summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorhenrike@webrtc.org <henrike@webrtc.org@4adac7df-926f-26a2-2b94-8c16560cd09d>2013-09-10 18:24:07 +0000
committerhenrike@webrtc.org <henrike@webrtc.org@4adac7df-926f-26a2-2b94-8c16560cd09d>2013-09-10 18:24:07 +0000
commitf46fff61745ab9bf19a7075a105e5d96308c5526 (patch)
tree33a02441c8a51621600911b92895846109f98d70
parentdadb2a18d18963ec55d5bfa85694fe96301f3239 (diff)
downloadwebrtc-f46fff61745ab9bf19a7075a105e5d96308c5526.tar.gz
OpenSL (not default): Enables low latency audio on Android.
BUG=1669 R=andrew@webrtc.org, fischman@webrtc.org, niklas.enbom@webrtc.org Review URL: https://webrtc-codereview.appspot.com/2032004 git-svn-id: http://webrtc.googlecode.com/svn/trunk/webrtc@4719 4adac7df-926f-26a2-2b94-8c16560cd09d
-rw-r--r--modules/audio_device/android/audio_device_opensles_android.cc1422
-rw-r--r--modules/audio_device/android/audio_device_opensles_android.h268
-rw-r--r--modules/audio_device/android/audio_manager_jni.cc151
-rw-r--r--modules/audio_device/android/audio_manager_jni.h73
-rw-r--r--modules/audio_device/android/fine_audio_buffer.cc88
-rw-r--r--modules/audio_device/android/fine_audio_buffer.h66
-rw-r--r--modules/audio_device/android/fine_audio_buffer_unittest.cc106
-rw-r--r--modules/audio_device/android/java/src/org/webrtc/voiceengine/AudioManagerAndroid.java72
-rw-r--r--modules/audio_device/android/low_latency_event.h65
-rw-r--r--modules/audio_device/android/low_latency_event_posix.cc97
-rw-r--r--modules/audio_device/android/low_latency_event_unittest.cc96
-rw-r--r--modules/audio_device/android/opensles_common.cc37
-rw-r--r--modules/audio_device/android/opensles_common.h37
-rw-r--r--modules/audio_device/android/opensles_input.cc504
-rw-r--r--modules/audio_device/android/opensles_input.h214
-rw-r--r--modules/audio_device/android/opensles_output.cc565
-rw-r--r--modules/audio_device/android/opensles_output.h236
-rw-r--r--modules/audio_device/android/single_rw_fifo.cc86
-rw-r--r--modules/audio_device/android/single_rw_fifo.h49
-rw-r--r--modules/audio_device/android/single_rw_fifo_unittest.cc126
-rw-r--r--modules/audio_device/audio_device.gypi38
-rw-r--r--modules/audio_device/audio_device_buffer.h10
-rw-r--r--modules/audio_device/mock_audio_device_buffer.h30
-rw-r--r--system_wrappers/interface/atomic32.h5
-rw-r--r--system_wrappers/source/atomic32_mac.cc4
-rw-r--r--system_wrappers/source/atomic32_posix.cc4
-rw-r--r--system_wrappers/source/atomic32_win.cc4
-rw-r--r--video_engine/test/android/jni/vie_android_java_api.cc4
-rw-r--r--voice_engine/voice_engine_impl.cc7
29 files changed, 2997 insertions, 1467 deletions
diff --git a/modules/audio_device/android/audio_device_opensles_android.cc b/modules/audio_device/android/audio_device_opensles_android.cc
index d5d1276e..f386eaff 100644
--- a/modules/audio_device/android/audio_device_opensles_android.cc
+++ b/modules/audio_device/android/audio_device_opensles_android.cc
@@ -10,1421 +10,371 @@
#include "webrtc/modules/audio_device/android/audio_device_opensles_android.h"
-#ifdef WEBRTC_ANDROID_DEBUG
-#include <android/log.h>
-#endif
-#include <sys/resource.h>
-#include <sys/syscall.h>
-#include <sys/time.h>
-#include <time.h>
-
-#include "webrtc/modules/audio_device/audio_device_utility.h"
-#include "webrtc/system_wrappers/interface/event_wrapper.h"
-#include "webrtc/system_wrappers/interface/thread_wrapper.h"
-#include "webrtc/system_wrappers/interface/trace.h"
-
-#ifdef WEBRTC_ANDROID_DEBUG
-#define WEBRTC_OPENSL_TRACE(a, b, c, ...) \
- __android_log_print(ANDROID_LOG_DEBUG, "WebRTC OpenSLES", __VA_ARGS__)
-#else
-#define WEBRTC_OPENSL_TRACE WEBRTC_TRACE
-#endif
+#include "webrtc/modules/audio_device/android/opensles_input.h"
+#include "webrtc/modules/audio_device/android/opensles_output.h"
namespace webrtc {
AudioDeviceAndroidOpenSLES::AudioDeviceAndroidOpenSLES(const int32_t id)
- : voe_audio_buffer_(NULL),
- crit_sect_(*CriticalSectionWrapper::CreateCriticalSection()),
- id_(id),
- sles_engine_(NULL),
- sles_player_(NULL),
- sles_engine_itf_(NULL),
- sles_player_itf_(NULL),
- sles_player_sbq_itf_(NULL),
- sles_output_mixer_(NULL),
- sles_speaker_volume_(NULL),
- sles_recorder_(NULL),
- sles_recorder_itf_(NULL),
- sles_recorder_sbq_itf_(NULL),
- sles_mic_volume_(NULL),
- mic_dev_id_(0),
- play_warning_(0),
- play_error_(0),
- rec_warning_(0),
- rec_error_(0),
- is_recording_dev_specified_(false),
- is_playout_dev_specified_(false),
- is_initialized_(false),
- is_recording_(false),
- is_playing_(false),
- is_rec_initialized_(false),
- is_play_initialized_(false),
- is_mic_initialized_(false),
- is_speaker_initialized_(false),
- playout_delay_(0),
- recording_delay_(0),
- agc_enabled_(false),
- rec_timer_(*EventWrapper::Create()),
- mic_sampling_rate_(N_REC_SAMPLES_PER_SEC * 1000),
- speaker_sampling_rate_(N_PLAY_SAMPLES_PER_SEC * 1000),
- max_speaker_vol_(0),
- min_speaker_vol_(0),
- loundspeaker_on_(false) {
- WEBRTC_OPENSL_TRACE(kTraceMemory, kTraceAudioDevice, id, "%s created",
- __FUNCTION__);
- memset(rec_buf_, 0, sizeof(rec_buf_));
- memset(play_buf_, 0, sizeof(play_buf_));
+ : output_(id),
+ input_(id, &output_) {
}
AudioDeviceAndroidOpenSLES::~AudioDeviceAndroidOpenSLES() {
- WEBRTC_OPENSL_TRACE(kTraceMemory, kTraceAudioDevice, id_, "%s destroyed",
- __FUNCTION__);
-
- Terminate();
-
- delete &crit_sect_;
- delete &rec_timer_;
-}
-
-void AudioDeviceAndroidOpenSLES::AttachAudioBuffer(
- AudioDeviceBuffer* audioBuffer) {
-
- CriticalSectionScoped lock(&crit_sect_);
-
- voe_audio_buffer_ = audioBuffer;
-
- // Inform the AudioBuffer about default settings for this implementation.
- voe_audio_buffer_->SetRecordingSampleRate(N_REC_SAMPLES_PER_SEC);
- voe_audio_buffer_->SetPlayoutSampleRate(N_PLAY_SAMPLES_PER_SEC);
- voe_audio_buffer_->SetRecordingChannels(N_REC_CHANNELS);
- voe_audio_buffer_->SetPlayoutChannels(N_PLAY_CHANNELS);
}
int32_t AudioDeviceAndroidOpenSLES::ActiveAudioLayer(
- AudioDeviceModule::AudioLayer& audioLayer) const {
-
- audioLayer = AudioDeviceModule::kPlatformDefaultAudio;
-
+ AudioDeviceModule::AudioLayer& audioLayer) const { // NOLINT
return 0;
}
int32_t AudioDeviceAndroidOpenSLES::Init() {
- CriticalSectionScoped lock(&crit_sect_);
-
- if (is_initialized_)
- return 0;
-
- SLEngineOption EngineOption[] = {
- { SL_ENGINEOPTION_THREADSAFE, static_cast<SLuint32>(SL_BOOLEAN_TRUE) },
- };
- int32_t res = slCreateEngine(&sles_engine_, 1, EngineOption, 0, NULL, NULL);
-
- if (res != SL_RESULT_SUCCESS) {
- WEBRTC_OPENSL_TRACE(kTraceError, kTraceAudioDevice, id_,
- " failed to create SL Engine Object");
- return -1;
- }
-
- // Realizing the SL Engine in synchronous mode.
- if ((*sles_engine_)->Realize(sles_engine_, SL_BOOLEAN_FALSE)
- != SL_RESULT_SUCCESS) {
- WEBRTC_OPENSL_TRACE(kTraceError, kTraceAudioDevice, id_,
- " failed to Realize SL Engine");
- return -1;
- }
-
- if ((*sles_engine_)->GetInterface(
- sles_engine_,
- SL_IID_ENGINE,
- &sles_engine_itf_) != SL_RESULT_SUCCESS) {
- WEBRTC_OPENSL_TRACE(kTraceError, kTraceAudioDevice, id_,
- " failed to get SL Engine interface");
- return -1;
- }
-
- // Check the sample rate to be used for playback and recording
- if (InitSampleRate() != 0) {
- WEBRTC_OPENSL_TRACE(kTraceError, kTraceAudioDevice, id_,
- "%s: Failed to init samplerate", __FUNCTION__);
- return -1;
- }
-
- // Set the audio device buffer sampling rate, we assume we get the same
- // for play and record.
- if (voe_audio_buffer_->SetRecordingSampleRate(mic_sampling_rate_) < 0) {
- WEBRTC_OPENSL_TRACE(kTraceError, kTraceAudioDevice, id_,
- " Could not set mic audio device buffer "
- "sampling rate (%d)", mic_sampling_rate_);
- }
- if (voe_audio_buffer_->SetPlayoutSampleRate(speaker_sampling_rate_) < 0) {
- WEBRTC_OPENSL_TRACE(kTraceError, kTraceAudioDevice, id_,
- " Could not set speaker audio device buffer "
- "sampling rate (%d)", speaker_sampling_rate_);
- }
-
- is_initialized_ = true;
-
- return 0;
+ return output_.Init() | input_.Init();
}
-int32_t AudioDeviceAndroidOpenSLES::Terminate() {
- CriticalSectionScoped lock(&crit_sect_);
-
- if (!is_initialized_)
- return 0;
-
- // RECORDING
- StopRecording();
-
- is_mic_initialized_ = false;
- is_recording_dev_specified_ = false;
-
- // PLAYOUT
- StopPlayout();
-
- if (sles_engine_ != NULL) {
- (*sles_engine_)->Destroy(sles_engine_);
- sles_engine_ = NULL;
- sles_engine_itf_ = NULL;
- }
-
- is_initialized_ = false;
- return 0;
+int32_t AudioDeviceAndroidOpenSLES::Terminate() {
+ return output_.Terminate() | input_.Terminate();
}
bool AudioDeviceAndroidOpenSLES::Initialized() const {
- return (is_initialized_);
+ return output_.Initialized() && input_.Initialized();
}
-int32_t AudioDeviceAndroidOpenSLES::SpeakerIsAvailable(
- bool& available) {
- // We always assume it's available
- available = true;
-
- return 0;
+int16_t AudioDeviceAndroidOpenSLES::PlayoutDevices() {
+ return output_.PlayoutDevices();
}
-int32_t AudioDeviceAndroidOpenSLES::InitSpeaker() {
- CriticalSectionScoped lock(&crit_sect_);
-
- if (is_playing_) {
- WEBRTC_OPENSL_TRACE(kTraceWarning, kTraceAudioDevice, id_,
- " Playout already started");
- return -1;
- }
-
- if (!is_playout_dev_specified_) {
- WEBRTC_OPENSL_TRACE(kTraceError, kTraceAudioDevice, id_,
- " Playout device is not specified");
- return -1;
- }
+int16_t AudioDeviceAndroidOpenSLES::RecordingDevices() {
+ return input_.RecordingDevices();
+}
- // Nothing needs to be done here, we use a flag to have consistent
- // behavior with other platforms.
- is_speaker_initialized_ = true;
+int32_t AudioDeviceAndroidOpenSLES::PlayoutDeviceName(
+ uint16_t index,
+ char name[kAdmMaxDeviceNameSize],
+ char guid[kAdmMaxGuidSize]) {
+ return output_.PlayoutDeviceName(index, name, guid);
+}
- return 0;
+int32_t AudioDeviceAndroidOpenSLES::RecordingDeviceName(
+ uint16_t index,
+ char name[kAdmMaxDeviceNameSize],
+ char guid[kAdmMaxGuidSize]) {
+ return input_.RecordingDeviceName(index, name, guid);
}
-int32_t AudioDeviceAndroidOpenSLES::MicrophoneIsAvailable(
- bool& available) {
- // We always assume it's available.
- available = true;
- return 0;
+int32_t AudioDeviceAndroidOpenSLES::SetPlayoutDevice(uint16_t index) {
+ return output_.SetPlayoutDevice(index);
}
-int32_t AudioDeviceAndroidOpenSLES::InitMicrophone() {
- CriticalSectionScoped lock(&crit_sect_);
- if (is_recording_) {
- WEBRTC_OPENSL_TRACE(kTraceWarning, kTraceAudioDevice, id_,
- " Recording already started");
- return -1;
- }
- if (!is_recording_dev_specified_) {
- WEBRTC_OPENSL_TRACE(kTraceError, kTraceAudioDevice, id_,
- " Recording device is not specified");
- return -1;
- }
-
- // Nothing needs to be done here, we use a flag to have consistent
- // behavior with other platforms.
- is_mic_initialized_ = true;
- return 0;
+int32_t AudioDeviceAndroidOpenSLES::SetPlayoutDevice(
+ AudioDeviceModule::WindowsDeviceType device) {
+ return output_.SetPlayoutDevice(device);
}
-bool AudioDeviceAndroidOpenSLES::SpeakerIsInitialized() const {
- return is_speaker_initialized_;
+int32_t AudioDeviceAndroidOpenSLES::SetRecordingDevice(uint16_t index) {
+ return input_.SetRecordingDevice(index);
}
-bool AudioDeviceAndroidOpenSLES::MicrophoneIsInitialized() const {
- return is_mic_initialized_;
+int32_t AudioDeviceAndroidOpenSLES::SetRecordingDevice(
+ AudioDeviceModule::WindowsDeviceType device) {
+ return input_.SetRecordingDevice(device);
}
-int32_t AudioDeviceAndroidOpenSLES::SpeakerVolumeIsAvailable(
- bool& available) {
- available = true; // We assume we are always be able to set/get volume.
- return 0;
+int32_t AudioDeviceAndroidOpenSLES::PlayoutIsAvailable(
+ bool& available) { // NOLINT
+ return output_.PlayoutIsAvailable(available);
}
-int32_t AudioDeviceAndroidOpenSLES::SetSpeakerVolume(
- uint32_t volume) {
- if (!is_speaker_initialized_) {
- WEBRTC_OPENSL_TRACE(kTraceError, kTraceAudioDevice, id_,
- " Speaker not initialized");
- return -1;
- }
-
- if (sles_engine_ == NULL) {
- WEBRTC_OPENSL_TRACE(kTraceError, kTraceAudioDevice, id_,
- "SetSpeakerVolume, SL Engine object doesnt exist");
- return -1;
- }
-
- if (sles_engine_itf_ == NULL) {
- if ((*sles_engine_)->GetInterface(
- sles_engine_,
- SL_IID_ENGINE,
- &sles_engine_itf_) != SL_RESULT_SUCCESS) {
- WEBRTC_OPENSL_TRACE(kTraceError, kTraceAudioDevice, id_,
- " failed to GetInterface SL Engine Interface");
- return -1;
- }
- }
- return 0;
+int32_t AudioDeviceAndroidOpenSLES::InitPlayout() {
+ return output_.InitPlayout();
}
-int32_t AudioDeviceAndroidOpenSLES::SpeakerVolume(
- uint32_t& volume) const {
- return 0;
+bool AudioDeviceAndroidOpenSLES::PlayoutIsInitialized() const {
+ return output_.PlayoutIsInitialized();
}
-int32_t AudioDeviceAndroidOpenSLES::SetWaveOutVolume(
- uint16_t volumeLeft,
- uint16_t volumeRight) {
- WEBRTC_OPENSL_TRACE(kTraceWarning, kTraceAudioDevice, id_,
- " API call not supported on this platform");
- return -1;
+int32_t AudioDeviceAndroidOpenSLES::RecordingIsAvailable(
+ bool& available) { // NOLINT
+ return input_.RecordingIsAvailable(available);
}
-int32_t AudioDeviceAndroidOpenSLES::WaveOutVolume(
- uint16_t& volumeLeft,
- uint16_t& volumeRight) const {
- WEBRTC_OPENSL_TRACE(kTraceWarning, kTraceAudioDevice, id_,
- " API call not supported on this platform");
- return -1;
+int32_t AudioDeviceAndroidOpenSLES::InitRecording() {
+ return input_.InitRecording();
}
-int32_t AudioDeviceAndroidOpenSLES::MaxSpeakerVolume(
- uint32_t& maxVolume) const {
- if (!is_speaker_initialized_) {
- WEBRTC_OPENSL_TRACE(kTraceError, kTraceAudioDevice, id_,
- " Speaker not initialized");
- return -1;
- }
+bool AudioDeviceAndroidOpenSLES::RecordingIsInitialized() const {
+ return input_.RecordingIsInitialized();
+}
- maxVolume = max_speaker_vol_;
+int32_t AudioDeviceAndroidOpenSLES::StartPlayout() {
+ return output_.StartPlayout();
+}
- return 0;
+int32_t AudioDeviceAndroidOpenSLES::StopPlayout() {
+ return output_.StopPlayout();
}
-int32_t AudioDeviceAndroidOpenSLES::MinSpeakerVolume(
- uint32_t& minVolume) const {
- if (!is_speaker_initialized_) {
- WEBRTC_OPENSL_TRACE(kTraceError, kTraceAudioDevice, id_,
- " Speaker not initialized");
- return -1;
- }
- minVolume = min_speaker_vol_;
- return 0;
+bool AudioDeviceAndroidOpenSLES::Playing() const {
+ return output_.Playing();
}
-int32_t AudioDeviceAndroidOpenSLES::SpeakerVolumeStepSize(
- uint16_t& stepSize) const {
- if (!is_speaker_initialized_) {
- WEBRTC_OPENSL_TRACE(kTraceError, kTraceAudioDevice, id_,
- " Speaker not initialized");
- return -1;
- }
- stepSize = 1;
- return 0;
+int32_t AudioDeviceAndroidOpenSLES::StartRecording() {
+ return input_.StartRecording();
}
-int32_t AudioDeviceAndroidOpenSLES::SpeakerMuteIsAvailable(
- bool& available) {
- available = false; // Speaker mute not supported on Android.
- return 0;
+int32_t AudioDeviceAndroidOpenSLES::StopRecording() {
+ return input_.StopRecording();
}
-int32_t AudioDeviceAndroidOpenSLES::SetSpeakerMute(bool enable) {
- WEBRTC_OPENSL_TRACE(kTraceWarning, kTraceAudioDevice, id_,
- " API call not supported on this platform");
- return -1;
+bool AudioDeviceAndroidOpenSLES::Recording() const {
+ return input_.Recording() ;
}
-int32_t AudioDeviceAndroidOpenSLES::SpeakerMute(
- bool& enabled) const {
- WEBRTC_OPENSL_TRACE(kTraceWarning, kTraceAudioDevice, id_,
- " API call not supported on this platform");
- return -1;
+int32_t AudioDeviceAndroidOpenSLES::SetAGC(bool enable) {
+ return input_.SetAGC(enable);
}
-int32_t AudioDeviceAndroidOpenSLES::MicrophoneMuteIsAvailable(
- bool& available) {
- available = false; // Mic mute not supported on Android
- return 0;
+bool AudioDeviceAndroidOpenSLES::AGC() const {
+ return input_.AGC();
}
-int32_t AudioDeviceAndroidOpenSLES::SetMicrophoneMute(bool enable) {
- WEBRTC_OPENSL_TRACE(kTraceWarning, kTraceAudioDevice, id_,
- " API call not supported on this platform");
+int32_t AudioDeviceAndroidOpenSLES::SetWaveOutVolume(uint16_t volumeLeft,
+ uint16_t volumeRight) {
return -1;
}
-int32_t AudioDeviceAndroidOpenSLES::MicrophoneMute(
- bool& enabled) const {
- WEBRTC_OPENSL_TRACE(kTraceWarning, kTraceAudioDevice, id_,
- " API call not supported on this platform");
+int32_t AudioDeviceAndroidOpenSLES::WaveOutVolume(
+ uint16_t& volumeLeft, // NOLINT
+ uint16_t& volumeRight) const { // NOLINT
return -1;
}
-int32_t AudioDeviceAndroidOpenSLES::MicrophoneBoostIsAvailable(
- bool& available) {
- available = false; // Mic boost not supported on Android.
- return 0;
+int32_t AudioDeviceAndroidOpenSLES::SpeakerIsAvailable(
+ bool& available) { // NOLINT
+ return output_.SpeakerIsAvailable(available);
}
-int32_t AudioDeviceAndroidOpenSLES::SetMicrophoneBoost(bool enable) {
- if (!is_mic_initialized_) {
- WEBRTC_OPENSL_TRACE(kTraceError, kTraceAudioDevice, id_,
- " Microphone not initialized");
- return -1;
- }
- if (enable) {
- WEBRTC_OPENSL_TRACE(kTraceError, kTraceAudioDevice, id_,
- " Enabling not available");
- return -1;
- }
- return 0;
+int32_t AudioDeviceAndroidOpenSLES::InitSpeaker() {
+ return output_.InitSpeaker();
}
-int32_t AudioDeviceAndroidOpenSLES::MicrophoneBoost(
- bool& enabled) const {
- if (!is_mic_initialized_) {
- WEBRTC_OPENSL_TRACE(kTraceError, kTraceAudioDevice, id_,
- " Microphone not initialized");
- return -1;
- }
- enabled = false;
- return 0;
+bool AudioDeviceAndroidOpenSLES::SpeakerIsInitialized() const {
+ return output_.SpeakerIsInitialized();
}
-int32_t AudioDeviceAndroidOpenSLES::StereoRecordingIsAvailable(
- bool& available) {
- available = false; // Stereo recording not supported on Android.
- return 0;
+int32_t AudioDeviceAndroidOpenSLES::MicrophoneIsAvailable(
+ bool& available) { // NOLINT
+ return input_.MicrophoneIsAvailable(available);
}
-int32_t AudioDeviceAndroidOpenSLES::SetStereoRecording(bool enable) {
- if (enable) {
- WEBRTC_OPENSL_TRACE(kTraceError, kTraceAudioDevice, id_,
- " Enabling not available");
- return -1;
- }
- return 0;
+int32_t AudioDeviceAndroidOpenSLES::InitMicrophone() {
+ return input_.InitMicrophone();
}
-int32_t AudioDeviceAndroidOpenSLES::StereoRecording(
- bool& enabled) const {
- enabled = false;
- return 0;
+bool AudioDeviceAndroidOpenSLES::MicrophoneIsInitialized() const {
+ return input_.MicrophoneIsInitialized();
}
-int32_t AudioDeviceAndroidOpenSLES::StereoPlayoutIsAvailable(
- bool& available) {
- // TODO(leozwang): This api is called before initplayout, we need
- // to detect audio device to find out if stereo is supported or not.
- available = false;
- return 0;
+int32_t AudioDeviceAndroidOpenSLES::SpeakerVolumeIsAvailable(
+ bool& available) { // NOLINT
+ return output_.SpeakerVolumeIsAvailable(available);
}
-int32_t AudioDeviceAndroidOpenSLES::SetStereoPlayout(bool enable) {
- if (enable) {
- return 0;
- } else {
- // TODO(leozwang): Enforce mono.
- return 0;
- }
+int32_t AudioDeviceAndroidOpenSLES::SetSpeakerVolume(uint32_t volume) {
+ return output_.SetSpeakerVolume(volume);
}
-int32_t AudioDeviceAndroidOpenSLES::StereoPlayout(
- bool& enabled) const {
- enabled = (player_pcm_.numChannels == 2 ? true : false);
- return 0;
+int32_t AudioDeviceAndroidOpenSLES::SpeakerVolume(
+ uint32_t& volume) const { // NOLINT
+ return output_.SpeakerVolume(volume);
}
+int32_t AudioDeviceAndroidOpenSLES::MaxSpeakerVolume(
+ uint32_t& maxVolume) const { // NOLINT
+ return output_.MaxSpeakerVolume(maxVolume);
+}
-int32_t AudioDeviceAndroidOpenSLES::SetAGC(bool enable) {
- agc_enabled_ = enable;
- return 0;
+int32_t AudioDeviceAndroidOpenSLES::MinSpeakerVolume(
+ uint32_t& minVolume) const { // NOLINT
+ return output_.MinSpeakerVolume(minVolume);
}
-bool AudioDeviceAndroidOpenSLES::AGC() const {
- return agc_enabled_;
+int32_t AudioDeviceAndroidOpenSLES::SpeakerVolumeStepSize(
+ uint16_t& stepSize) const { // NOLINT
+ return output_.SpeakerVolumeStepSize(stepSize);
}
int32_t AudioDeviceAndroidOpenSLES::MicrophoneVolumeIsAvailable(
- bool& available) {
- available = true;
- return 0;
+ bool& available) { // NOLINT
+ return input_.MicrophoneVolumeIsAvailable(available);
}
-int32_t AudioDeviceAndroidOpenSLES::SetMicrophoneVolume(
- uint32_t volume) {
- WEBRTC_OPENSL_TRACE(kTraceWarning, kTraceAudioDevice, id_,
- " OpenSL doesn't support contolling Mic volume yet");
- // TODO(leozwang): Add microphone volume control when OpenSL apis
- // are available.
- return 0;
+int32_t AudioDeviceAndroidOpenSLES::SetMicrophoneVolume(uint32_t volume) {
+ return input_.SetMicrophoneVolume(volume);
}
int32_t AudioDeviceAndroidOpenSLES::MicrophoneVolume(
- uint32_t& volume) const {
- return -1;
+ uint32_t& volume) const { // NOLINT
+ return input_.MicrophoneVolume(volume);
}
int32_t AudioDeviceAndroidOpenSLES::MaxMicrophoneVolume(
- uint32_t& maxVolume) const {
- return 0;
+ uint32_t& maxVolume) const { // NOLINT
+ return input_.MaxMicrophoneVolume(maxVolume);
}
int32_t AudioDeviceAndroidOpenSLES::MinMicrophoneVolume(
- uint32_t& minVolume) const {
- minVolume = 0;
- return 0;
+ uint32_t& minVolume) const { // NOLINT
+ return input_.MinMicrophoneVolume(minVolume);
}
int32_t AudioDeviceAndroidOpenSLES::MicrophoneVolumeStepSize(
- uint16_t& stepSize) const {
- stepSize = 1;
- return 0;
-}
-
-int16_t AudioDeviceAndroidOpenSLES::PlayoutDevices() {
- return 1;
-}
-
-int32_t AudioDeviceAndroidOpenSLES::SetPlayoutDevice(
- uint16_t index) {
- if (is_play_initialized_) {
- WEBRTC_OPENSL_TRACE(kTraceError, kTraceAudioDevice, id_,
- " Playout already initialized");
- return -1;
- }
- if (0 != index) {
- WEBRTC_OPENSL_TRACE(kTraceError, kTraceAudioDevice, id_,
- " Device index is out of range [0,0]");
- return -1;
- }
-
- // Do nothing but set a flag, this is to have consistent behaviour
- // with other platforms.
- is_playout_dev_specified_ = true;
-
- return 0;
+ uint16_t& stepSize) const { // NOLINT
+ return input_.MicrophoneVolumeStepSize(stepSize);
}
-int32_t AudioDeviceAndroidOpenSLES::SetPlayoutDevice(
- AudioDeviceModule::WindowsDeviceType device) {
-
- WEBRTC_OPENSL_TRACE(kTraceWarning, kTraceAudioDevice, id_,
- " API call not supported on this platform");
- return -1;
+int32_t AudioDeviceAndroidOpenSLES::SpeakerMuteIsAvailable(
+ bool& available) { // NOLINT
+ return output_.SpeakerMuteIsAvailable(available);
}
-int32_t AudioDeviceAndroidOpenSLES::PlayoutDeviceName(
- uint16_t index,
- char name[kAdmMaxDeviceNameSize],
- char guid[kAdmMaxGuidSize]) {
- if (0 != index) {
- WEBRTC_OPENSL_TRACE(kTraceError, kTraceAudioDevice, id_,
- " Device index is out of range [0,0]");
- return -1;
- }
-
- // Return empty string
- memset(name, 0, kAdmMaxDeviceNameSize);
-
- if (guid) {
- memset(guid, 0, kAdmMaxGuidSize);
- }
- return 0;
+int32_t AudioDeviceAndroidOpenSLES::SetSpeakerMute(bool enable) {
+ return output_.SetSpeakerMute(enable);
}
-int32_t AudioDeviceAndroidOpenSLES::RecordingDeviceName(
- uint16_t index,
- char name[kAdmMaxDeviceNameSize],
- char guid[kAdmMaxGuidSize]) {
- if (0 != index) {
- WEBRTC_OPENSL_TRACE(kTraceError, kTraceAudioDevice, id_,
- " Device index is out of range [0,0]");
- return -1;
- }
-
- // Return empty string
- memset(name, 0, kAdmMaxDeviceNameSize);
-
- if (guid) {
- memset(guid, 0, kAdmMaxGuidSize);
- }
- return 0;
+int32_t AudioDeviceAndroidOpenSLES::SpeakerMute(
+ bool& enabled) const { // NOLINT
+ return output_.SpeakerMute(enabled);
}
-int16_t AudioDeviceAndroidOpenSLES::RecordingDevices() {
- return 1;
+int32_t AudioDeviceAndroidOpenSLES::MicrophoneMuteIsAvailable(
+ bool& available) { // NOLINT
+ return input_.MicrophoneMuteIsAvailable(available);
}
-int32_t AudioDeviceAndroidOpenSLES::SetRecordingDevice(
- uint16_t index) {
- if (is_rec_initialized_) {
- WEBRTC_OPENSL_TRACE(kTraceError, kTraceAudioDevice, id_,
- " Recording already initialized");
- return -1;
- }
-
- if (0 != index) {
- WEBRTC_OPENSL_TRACE(kTraceError, kTraceAudioDevice, id_,
- " Device index is out of range [0,0]");
- return -1;
- }
-
- // Do nothing but set a flag, this is to have consistent behaviour with
- // other platforms.
- is_recording_dev_specified_ = true;
- return 0;
+int32_t AudioDeviceAndroidOpenSLES::SetMicrophoneMute(bool enable) {
+ return input_.SetMicrophoneMute(enable);
}
-int32_t AudioDeviceAndroidOpenSLES::SetRecordingDevice(
- AudioDeviceModule::WindowsDeviceType device) {
- WEBRTC_OPENSL_TRACE(kTraceWarning, kTraceAudioDevice, id_,
- " API call not supported on this platform");
- return -1;
+int32_t AudioDeviceAndroidOpenSLES::MicrophoneMute(
+ bool& enabled) const { // NOLINT
+ return input_.MicrophoneMute(enabled);
}
-int32_t AudioDeviceAndroidOpenSLES::PlayoutIsAvailable(
- bool& available) {
- available = false;
- int32_t res = InitPlayout();
- StopPlayout();
- if (res != -1) {
- available = true;
- }
- return res;
+int32_t AudioDeviceAndroidOpenSLES::MicrophoneBoostIsAvailable(
+ bool& available) { // NOLINT
+ return input_.MicrophoneBoostIsAvailable(available);
}
-int32_t AudioDeviceAndroidOpenSLES::RecordingIsAvailable(
- bool& available) {
- available = false;
- int32_t res = InitRecording();
- StopRecording();
- if (res != -1) {
- available = true;
- }
- return res;
+int32_t AudioDeviceAndroidOpenSLES::SetMicrophoneBoost(bool enable) {
+ return input_.SetMicrophoneBoost(enable);
}
-int32_t AudioDeviceAndroidOpenSLES::InitPlayout() {
- CriticalSectionScoped lock(&crit_sect_);
- if (!is_initialized_) {
- WEBRTC_OPENSL_TRACE(kTraceError, kTraceAudioDevice, id_,
- " Not initialized");
- return -1;
- }
-
- if (is_playing_) {
- WEBRTC_OPENSL_TRACE(kTraceWarning, kTraceAudioDevice, id_,
- " Playout already started");
- return -1;
- }
-
- if (!is_playout_dev_specified_) {
- WEBRTC_OPENSL_TRACE(kTraceError, kTraceAudioDevice, id_,
- " Playout device is not specified");
- return -1;
- }
-
- if (is_play_initialized_) {
- WEBRTC_OPENSL_TRACE(kTraceInfo, kTraceAudioDevice, id_,
- " Playout already initialized");
- return 0;
- }
-
- // Initialize the speaker
- if (InitSpeaker() == -1) {
- WEBRTC_OPENSL_TRACE(kTraceWarning, kTraceAudioDevice, id_,
- " InitSpeaker() failed");
- }
-
- if (sles_engine_ == NULL || sles_engine_itf_ == NULL) {
- WEBRTC_OPENSL_TRACE(kTraceError, kTraceAudioDevice, id_,
- " SLObject or Engiine is NULL");
- return -1;
- }
-
- SLDataLocator_AndroidSimpleBufferQueue simple_buf_queue = {
- SL_DATALOCATOR_ANDROIDSIMPLEBUFFERQUEUE,
- static_cast<SLuint32>(N_PLAY_QUEUE_BUFFERS)
- };
- SLDataSource audio_source = { &simple_buf_queue, &player_pcm_ };
- SLDataLocator_OutputMix locator_outputmix;
- SLDataSink audio_sink = { &locator_outputmix, NULL };
-
- // Create Output Mix object to be used by player.
- int32_t res = -1;
- res = (*sles_engine_itf_)->CreateOutputMix(sles_engine_itf_,
- &sles_output_mixer_,
- 0,
- NULL,
- NULL);
- if (res != SL_RESULT_SUCCESS) {
- WEBRTC_OPENSL_TRACE(kTraceError, kTraceAudioDevice, id_,
- " failed to get SL Output Mix object");
- return -1;
- }
- // Realizing the Output Mix object in synchronous mode.
- res = (*sles_output_mixer_)->Realize(sles_output_mixer_, SL_BOOLEAN_FALSE);
- if (res != SL_RESULT_SUCCESS) {
- WEBRTC_OPENSL_TRACE(kTraceError, kTraceAudioDevice, id_,
- " failed to realize SL Output Mix object");
- return -1;
- }
-
- // The code below can be moved to startplayout instead
- // Setup the data source structure for the buffer queue.
- player_pcm_.formatType = SL_DATAFORMAT_PCM;
- player_pcm_.numChannels = N_PLAY_CHANNELS;
- if (speaker_sampling_rate_ == 44000) {
- player_pcm_.samplesPerSec = 44100 * 1000;
- } else {
- player_pcm_.samplesPerSec = speaker_sampling_rate_ * 1000;
- }
- player_pcm_.bitsPerSample = SL_PCMSAMPLEFORMAT_FIXED_16;
- player_pcm_.containerSize = SL_PCMSAMPLEFORMAT_FIXED_16;
- if (1 == player_pcm_.numChannels) {
- player_pcm_.channelMask = SL_SPEAKER_FRONT_CENTER;
- } else if (2 == player_pcm_.numChannels) {
- player_pcm_.channelMask = SL_SPEAKER_FRONT_LEFT | SL_SPEAKER_FRONT_RIGHT;
- } else {
- WEBRTC_OPENSL_TRACE(kTraceError, kTraceAudioDevice, id_,
- " %d player channels not supported", N_PLAY_CHANNELS);
- }
-
- player_pcm_.endianness = SL_BYTEORDER_LITTLEENDIAN;
- // Setup the data sink structure.
- locator_outputmix.locatorType = SL_DATALOCATOR_OUTPUTMIX;
- locator_outputmix.outputMix = sles_output_mixer_;
-
- SLInterfaceID ids[N_MAX_INTERFACES] = {
- SL_IID_BUFFERQUEUE, SL_IID_ANDROIDCONFIGURATION };
- SLboolean req[N_MAX_INTERFACES] = {
- SL_BOOLEAN_TRUE, SL_BOOLEAN_TRUE };
- res = (*sles_engine_itf_)->CreateAudioPlayer(sles_engine_itf_,
- &sles_player_, &audio_source,
- &audio_sink, 2, ids, req);
- if (res != SL_RESULT_SUCCESS) {
- WEBRTC_OPENSL_TRACE(kTraceError, kTraceAudioDevice, id_,
- " failed to create AudioPlayer");
- return -1;
- }
-
- // Realizing the player in synchronous mode.
- res = (*sles_player_)->Realize(sles_player_, SL_BOOLEAN_FALSE);
- if (res != SL_RESULT_SUCCESS) {
- WEBRTC_OPENSL_TRACE(kTraceError, kTraceAudioDevice, id_,
- " failed to realize the player");
- return -1;
- }
- res = (*sles_player_)->GetInterface(
- sles_player_, SL_IID_PLAY,
- static_cast<void*>(&sles_player_itf_));
- if (res != SL_RESULT_SUCCESS) {
- WEBRTC_OPENSL_TRACE(kTraceError, kTraceAudioDevice, id_,
- " failed to get Player interface");
- return -1;
- }
- res = (*sles_player_)->GetInterface(
- sles_player_, SL_IID_BUFFERQUEUE,
- static_cast<void*>(&sles_player_sbq_itf_));
- if (res != SL_RESULT_SUCCESS) {
- WEBRTC_OPENSL_TRACE(kTraceError, kTraceAudioDevice, id_,
- " failed to get Player SimpleBufferQueue interface");
- return -1;
- }
-
- // Setup to receive buffer queue event callbacks
- res = (*sles_player_sbq_itf_)->RegisterCallback(
- sles_player_sbq_itf_,
- PlayerSimpleBufferQueueCallback,
- this);
- if (res != SL_RESULT_SUCCESS) {
- WEBRTC_OPENSL_TRACE(kTraceError, kTraceAudioDevice, id_,
- " failed to register Player Callback");
- return -1;
- }
- is_play_initialized_ = true;
- return 0;
+int32_t AudioDeviceAndroidOpenSLES::MicrophoneBoost(
+ bool& enabled) const { // NOLINT
+ return input_.MicrophoneBoost(enabled);
}
-int32_t AudioDeviceAndroidOpenSLES::InitRecording() {
- CriticalSectionScoped lock(&crit_sect_);
-
- if (!is_initialized_) {
- WEBRTC_OPENSL_TRACE(kTraceError, kTraceAudioDevice, id_,
- " Not initialized");
- return -1;
- }
-
- if (is_recording_) {
- WEBRTC_OPENSL_TRACE(kTraceWarning, kTraceAudioDevice, id_,
- " Recording already started");
- return -1;
- }
-
- if (!is_recording_dev_specified_) {
- WEBRTC_OPENSL_TRACE(kTraceError, kTraceAudioDevice, id_,
- " Recording device is not specified");
- return -1;
- }
-
- if (is_rec_initialized_) {
- WEBRTC_OPENSL_TRACE(kTraceInfo, kTraceAudioDevice, id_,
- " Recording already initialized");
- return 0;
- }
-
- // Initialize the microphone
- if (InitMicrophone() == -1) {
- WEBRTC_OPENSL_TRACE(kTraceWarning, kTraceAudioDevice, id_,
- " InitMicrophone() failed");
- }
-
- if (sles_engine_ == NULL || sles_engine_itf_ == NULL) {
- WEBRTC_OPENSL_TRACE(kTraceError, kTraceAudioDevice, id_,
- " Recording object is NULL");
- return -1;
- }
-
- SLDataLocator_IODevice micLocator = {
- SL_DATALOCATOR_IODEVICE, SL_IODEVICE_AUDIOINPUT,
- SL_DEFAULTDEVICEID_AUDIOINPUT, NULL };
- SLDataSource audio_source = { &micLocator, NULL };
- SLDataLocator_AndroidSimpleBufferQueue simple_buf_queue = {
- SL_DATALOCATOR_ANDROIDSIMPLEBUFFERQUEUE,
- static_cast<SLuint32>(N_REC_QUEUE_BUFFERS)
- };
- SLDataSink audio_sink = { &simple_buf_queue, &record_pcm_ };
-
- // Setup the format of the content in the buffer queue
- record_pcm_.formatType = SL_DATAFORMAT_PCM;
- record_pcm_.numChannels = N_REC_CHANNELS;
- if (mic_sampling_rate_ == 44000) {
- record_pcm_.samplesPerSec = 44100 * 1000;
- } else {
- record_pcm_.samplesPerSec = mic_sampling_rate_ * 1000;
- }
- record_pcm_.bitsPerSample = SL_PCMSAMPLEFORMAT_FIXED_16;
- record_pcm_.containerSize = SL_PCMSAMPLEFORMAT_FIXED_16;
- if (1 == record_pcm_.numChannels) {
- record_pcm_.channelMask = SL_SPEAKER_FRONT_CENTER;
- } else if (2 == record_pcm_.numChannels) {
- record_pcm_.channelMask = SL_SPEAKER_FRONT_LEFT | SL_SPEAKER_FRONT_RIGHT;
- } else {
- WEBRTC_OPENSL_TRACE(kTraceError, kTraceAudioDevice, id_,
- " %d rec channels not supported", N_REC_CHANNELS);
- }
- record_pcm_.endianness = SL_BYTEORDER_LITTLEENDIAN;
-
- const SLInterfaceID id[2] = {
- SL_IID_ANDROIDSIMPLEBUFFERQUEUE, SL_IID_ANDROIDCONFIGURATION };
- const SLboolean req[2] = {
- SL_BOOLEAN_TRUE, SL_BOOLEAN_TRUE };
- int32_t res = -1;
- res = (*sles_engine_itf_)->CreateAudioRecorder(sles_engine_itf_,
- &sles_recorder_,
- &audio_source,
- &audio_sink,
- 2,
- id,
- req);
- if (res != SL_RESULT_SUCCESS) {
- WEBRTC_OPENSL_TRACE(kTraceError, kTraceAudioDevice, id_,
- " failed to create Recorder");
- return -1;
- }
-
- // Realizing the recorder in synchronous mode.
- res = (*sles_recorder_)->Realize(sles_recorder_, SL_BOOLEAN_FALSE);
- if (res != SL_RESULT_SUCCESS) {
- WEBRTC_OPENSL_TRACE(kTraceError, kTraceAudioDevice, id_,
- " failed to realize Recorder");
- return -1;
- }
-
- // Get the RECORD interface - it is an implicit interface
- res = (*sles_recorder_)->GetInterface(
- sles_recorder_, SL_IID_RECORD,
- static_cast<void*>(&sles_recorder_itf_));
- if (res != SL_RESULT_SUCCESS) {
- WEBRTC_OPENSL_TRACE(kTraceError, kTraceAudioDevice, id_,
- " failed to get Recorder interface");
- return -1;
- }
-
- // Get the simpleBufferQueue interface
- res = (*sles_recorder_)->GetInterface(
- sles_recorder_,
- SL_IID_ANDROIDSIMPLEBUFFERQUEUE,
- static_cast<void*>(&sles_recorder_sbq_itf_));
- if (res != SL_RESULT_SUCCESS) {
- WEBRTC_OPENSL_TRACE(kTraceError, kTraceAudioDevice, id_,
- " failed to get Recorder Simple Buffer Queue");
- return -1;
- }
-
- // Setup to receive buffer queue event callbacks
- res = (*sles_recorder_sbq_itf_)->RegisterCallback(
- sles_recorder_sbq_itf_,
- RecorderSimpleBufferQueueCallback,
- this);
- if (res != SL_RESULT_SUCCESS) {
- WEBRTC_OPENSL_TRACE(kTraceError, kTraceAudioDevice, id_,
- " failed to register Recorder Callback");
- return -1;
- }
-
- is_rec_initialized_ = true;
- return 0;
+int32_t AudioDeviceAndroidOpenSLES::StereoPlayoutIsAvailable(
+ bool& available) { // NOLINT
+ return output_.StereoPlayoutIsAvailable(available);
}
-int32_t AudioDeviceAndroidOpenSLES::StartRecording() {
- CriticalSectionScoped lock(&crit_sect_);
-
- if (!is_rec_initialized_) {
- WEBRTC_OPENSL_TRACE(kTraceError, kTraceAudioDevice, id_,
- " Recording not initialized");
- return -1;
- }
-
- if (is_recording_) {
- WEBRTC_OPENSL_TRACE(kTraceInfo, kTraceAudioDevice, id_,
- " Recording already started");
- return 0;
- }
-
- if (sles_recorder_itf_ == NULL) {
- WEBRTC_OPENSL_TRACE(kTraceError, kTraceAudioDevice, id_,
- " RecordITF is NULL");
- return -1;
- }
-
- if (sles_recorder_sbq_itf_ == NULL) {
- WEBRTC_OPENSL_TRACE(kTraceError, kTraceAudioDevice, id_,
- " Recorder Simple Buffer Queue is NULL");
- return -1;
- }
-
- memset(rec_buf_, 0, sizeof(rec_buf_));
- memset(rec_voe_buf_, 0, sizeof(rec_voe_buf_));
- uint32_t num_bytes =
- N_REC_CHANNELS * sizeof(int16_t) * mic_sampling_rate_ / 100;
-
- while (!rec_queue_.empty())
- rec_queue_.pop();
- while (!rec_voe_audio_queue_.empty())
- rec_voe_audio_queue_.pop();
- while (!rec_voe_ready_queue_.empty())
- rec_voe_ready_queue_.pop();
-
- for (int i = 0; i < N_REC_QUEUE_BUFFERS; ++i) {
- rec_voe_ready_queue_.push(rec_voe_buf_[i]);
- }
-
- int32_t res = -1;
- for (int i = 0; i < N_REC_QUEUE_BUFFERS; ++i) {
- // We assign 10ms buffer to each queue, size given in bytes.
- res = (*sles_recorder_sbq_itf_)->Enqueue(
- sles_recorder_sbq_itf_,
- static_cast<void*>(rec_buf_[i]),
- num_bytes);
- if (res != SL_RESULT_SUCCESS) {
- WEBRTC_OPENSL_TRACE(kTraceError, kTraceAudioDevice, id_,
- "Recorder Enqueue failed:%d,%d", i, res);
- break;
- } else {
- rec_queue_.push(rec_buf_[i]);
- }
- }
-
- // Record the audio
- res = (*sles_recorder_itf_)->SetRecordState(sles_recorder_itf_,
- SL_RECORDSTATE_RECORDING);
- if (res != SL_RESULT_SUCCESS) {
- WEBRTC_OPENSL_TRACE(kTraceError, kTraceAudioDevice, id_,
- " failed to start recording");
- return -1;
- }
-
- // Start rec thread and playout thread
- rec_thread_ = ThreadWrapper::CreateThread(
- RecThreadFunc,
- this,
- kRealtimePriority,
- "opensl_capture_thread");
- if (rec_thread_ == NULL) {
- WEBRTC_TRACE(kTraceCritical, kTraceAudioDevice, id_,
- " failed to create the rec audio thread");
- return -1;
- }
-
- unsigned int thread_id = 0;
- if (!rec_thread_->Start(thread_id)) {
- WEBRTC_TRACE(kTraceCritical, kTraceAudioDevice, id_,
- " failed to start the rec audio thread");
- delete rec_thread_;
- rec_thread_ = NULL;
- return -1;
- }
- rec_thread_id_ = thread_id;
-
- is_recording_ = true;
- return 0;
+int32_t AudioDeviceAndroidOpenSLES::SetStereoPlayout(bool enable) {
+ return output_.SetStereoPlayout(enable);
}
-int32_t AudioDeviceAndroidOpenSLES::StopRecording() {
- // Stop the recording thread
- // Cannot be under lock, risk of deadlock
- if (rec_thread_) {
- if (rec_thread_->Stop()) {
- delete rec_thread_;
- rec_thread_ = NULL;
- } else {
- WEBRTC_TRACE(kTraceError, kTraceAudioDevice, id_,
- "Failed to stop recording thread ");
- }
- }
-
- CriticalSectionScoped lock(&crit_sect_);
-
- if (!is_rec_initialized_) {
- WEBRTC_OPENSL_TRACE(kTraceInfo, kTraceAudioDevice, id_,
- " Recording is not initialized");
- return 0;
- }
-
- int32_t res = (*sles_recorder_itf_)->SetRecordState(
- sles_recorder_itf_,
- SL_RECORDSTATE_STOPPED);
- if (res != SL_RESULT_SUCCESS) {
- WEBRTC_OPENSL_TRACE(kTraceError, kTraceAudioDevice, id_,
- " failed to stop recording");
- }
-
- res = (*sles_recorder_sbq_itf_)->Clear(sles_recorder_sbq_itf_);
- if (res != SL_RESULT_SUCCESS) {
- WEBRTC_OPENSL_TRACE(kTraceError, kTraceAudioDevice, id_,
- " failed to clear recorder buffer queue");
- }
-
- // Destroy the recorder object
- (*sles_recorder_)->Destroy(sles_recorder_);
-
- is_rec_initialized_ = false;
- is_recording_ = false;
- rec_warning_ = 0;
- rec_error_ = 0;
- sles_recorder_ = NULL;
- sles_recorder_itf_ = NULL;
- sles_recorder_sbq_itf_ = NULL;
-
- return 0;
+int32_t AudioDeviceAndroidOpenSLES::StereoPlayout(
+ bool& enabled) const { // NOLINT
+ return output_.StereoPlayout(enabled);
}
-bool AudioDeviceAndroidOpenSLES::RecordingIsInitialized() const {
- return is_rec_initialized_;
+int32_t AudioDeviceAndroidOpenSLES::StereoRecordingIsAvailable(
+ bool& available) { // NOLINT
+ return input_.StereoRecordingIsAvailable(available);
}
-bool AudioDeviceAndroidOpenSLES::Recording() const {
- return is_recording_;
+int32_t AudioDeviceAndroidOpenSLES::SetStereoRecording(bool enable) {
+ return input_.SetStereoRecording(enable);
}
-bool AudioDeviceAndroidOpenSLES::PlayoutIsInitialized() const {
- return is_play_initialized_;
+int32_t AudioDeviceAndroidOpenSLES::StereoRecording(
+ bool& enabled) const { // NOLINT
+ return input_.StereoRecording(enabled);
}
-int32_t AudioDeviceAndroidOpenSLES::StartPlayout() {
- int i;
- CriticalSectionScoped lock(&crit_sect_);
-
- if (!is_play_initialized_) {
- WEBRTC_OPENSL_TRACE(kTraceError, kTraceAudioDevice, id_,
- " Playout not initialized");
- return -1;
- }
-
- if (is_playing_) {
- WEBRTC_OPENSL_TRACE(kTraceInfo, kTraceAudioDevice, id_,
- " Playout already started");
- return 0;
- }
-
- if (sles_player_itf_ == NULL) {
- WEBRTC_OPENSL_TRACE(kTraceError, kTraceAudioDevice, id_,
- " PlayItf is NULL");
- return -1;
- }
- if (sles_player_sbq_itf_ == NULL) {
- WEBRTC_OPENSL_TRACE(kTraceError, kTraceAudioDevice, id_,
- " PlayerSimpleBufferQueue is NULL");
- return -1;
- }
-
- uint32_t num_bytes =
- N_PLAY_CHANNELS * sizeof(int16_t) * speaker_sampling_rate_ / 100;
-
- memset(play_buf_, 0, sizeof(play_buf_));
-
- while (!play_queue_.empty())
- play_queue_.pop();
-
- int32_t res = -1;
- for (i = 0; i < std::min(2, static_cast<int>(N_PLAY_QUEUE_BUFFERS)); ++i) {
- res = (*sles_player_sbq_itf_)->Enqueue(
- sles_player_sbq_itf_,
- static_cast<void*>(play_buf_[i]),
- num_bytes);
- if (res != SL_RESULT_SUCCESS) {
- WEBRTC_OPENSL_TRACE(kTraceWarning, kTraceAudioDevice, id_,
- " player simpler buffer Enqueue failed:%d,%d",
- i, res);
- break;
- } else {
- play_queue_.push(play_buf_[i]);
- }
- }
-
- res = (*sles_player_itf_)->SetPlayState(
- sles_player_itf_, SL_PLAYSTATE_PLAYING);
- if (res != SL_RESULT_SUCCESS) {
- WEBRTC_OPENSL_TRACE(kTraceError, kTraceAudioDevice, id_,
- " failed to start playout");
- return -1;
- }
-
- play_warning_ = 0;
- play_error_ = 0;
- is_playing_ = true;
-
- return 0;
+int32_t AudioDeviceAndroidOpenSLES::SetPlayoutBuffer(
+ const AudioDeviceModule::BufferType type,
+ uint16_t sizeMS) {
+ return output_.SetPlayoutBuffer(type, sizeMS);
}
-int32_t AudioDeviceAndroidOpenSLES::StopPlayout() {
-
- CriticalSectionScoped lock(&crit_sect_);
- if (!is_play_initialized_) {
- WEBRTC_OPENSL_TRACE(kTraceInfo, kTraceAudioDevice, id_,
- " Playout is not initialized");
- return 0;
- }
-
- if (sles_player_itf_) {
- // Make sure player is stopped
- int32_t res = (*sles_player_itf_)->SetPlayState(sles_player_itf_,
- SL_PLAYSTATE_STOPPED);
- if (res != SL_RESULT_SUCCESS) {
- WEBRTC_OPENSL_TRACE(kTraceError, kTraceAudioDevice, id_,
- " failed to stop playout");
- }
- }
-
- if (sles_player_sbq_itf_) {
- int32_t res = (*sles_player_sbq_itf_)->Clear(sles_player_sbq_itf_);
- if (res != SL_RESULT_SUCCESS) {
- WEBRTC_OPENSL_TRACE(kTraceError, kTraceAudioDevice, id_,
- " failed to clear player buffer queue");
- }
- }
-
- if (sles_player_) {
- // Destroy the player
- (*sles_player_)->Destroy(sles_player_);
- }
-
- if (sles_output_mixer_) {
- // Destroy Output Mix object
- (*sles_output_mixer_)->Destroy(sles_output_mixer_);
- }
-
- is_play_initialized_ = false;
- is_playing_ = false;
- play_warning_ = 0;
- play_error_ = 0;
- sles_player_sbq_itf_ = NULL;
- sles_player_itf_ = NULL;
- sles_player_ = NULL;
- sles_output_mixer_ = NULL;
- return 0;
+int32_t AudioDeviceAndroidOpenSLES::PlayoutBuffer(
+ AudioDeviceModule::BufferType& type,
+ uint16_t& sizeMS) const { // NOLINT
+ return output_.PlayoutBuffer(type, sizeMS);
}
int32_t AudioDeviceAndroidOpenSLES::PlayoutDelay(
- uint16_t& delayMS) const {
- delayMS = playout_delay_;
- return 0;
+ uint16_t& delayMS) const { // NOLINT
+ return output_.PlayoutDelay(delayMS);
}
int32_t AudioDeviceAndroidOpenSLES::RecordingDelay(
- uint16_t& delayMS) const {
- delayMS = recording_delay_;
- return 0;
-}
-
-bool AudioDeviceAndroidOpenSLES::Playing() const {
- return is_playing_;
-}
-
-int32_t AudioDeviceAndroidOpenSLES::SetPlayoutBuffer(
- const AudioDeviceModule::BufferType type,
- uint16_t sizeMS) {
- WEBRTC_OPENSL_TRACE(kTraceWarning, kTraceAudioDevice, id_,
- " API call not supported on this platform");
- return -1;
-}
-
-int32_t AudioDeviceAndroidOpenSLES::PlayoutBuffer(
- AudioDeviceModule::BufferType& type,
- uint16_t& sizeMS) const {
- type = AudioDeviceModule::kAdaptiveBufferSize;
- sizeMS = playout_delay_; // Set to current playout delay
- return 0;
+ uint16_t& delayMS) const { // NOLINT
+ return input_.RecordingDelay(delayMS);
}
int32_t AudioDeviceAndroidOpenSLES::CPULoad(
- uint16_t& load) const {
- WEBRTC_OPENSL_TRACE(kTraceWarning, kTraceAudioDevice, id_,
- " API call not supported on this platform");
+ uint16_t& load) const { // NOLINT
return -1;
}
bool AudioDeviceAndroidOpenSLES::PlayoutWarning() const {
- return (play_warning_ > 0);
+ return output_.PlayoutWarning();
}
bool AudioDeviceAndroidOpenSLES::PlayoutError() const {
- return (play_error_ > 0);
+ return output_.PlayoutError();
}
bool AudioDeviceAndroidOpenSLES::RecordingWarning() const {
- return (rec_warning_ > 0);
+ return input_.RecordingWarning();
}
bool AudioDeviceAndroidOpenSLES::RecordingError() const {
- return (rec_error_ > 0);
+ return input_.RecordingError();
}
void AudioDeviceAndroidOpenSLES::ClearPlayoutWarning() {
- play_warning_ = 0;
+ return output_.ClearPlayoutWarning();
}
void AudioDeviceAndroidOpenSLES::ClearPlayoutError() {
- play_error_ = 0;
+ return output_.ClearPlayoutError();
}
void AudioDeviceAndroidOpenSLES::ClearRecordingWarning() {
- rec_warning_ = 0;
+ return input_.ClearRecordingWarning();
}
void AudioDeviceAndroidOpenSLES::ClearRecordingError() {
- rec_error_ = 0;
+ return input_.ClearRecordingError();
}
-int32_t AudioDeviceAndroidOpenSLES::SetLoudspeakerStatus(bool enable) {
- loundspeaker_on_ = enable;
- return 0;
+void AudioDeviceAndroidOpenSLES::AttachAudioBuffer(
+ AudioDeviceBuffer* audioBuffer) {
+ output_.AttachAudioBuffer(audioBuffer);
+ input_.AttachAudioBuffer(audioBuffer);
}
-int32_t AudioDeviceAndroidOpenSLES::GetLoudspeakerStatus(
- bool& enabled) const {
- enabled = loundspeaker_on_;
- return 0;
+int32_t AudioDeviceAndroidOpenSLES::SetLoudspeakerStatus(bool enable) {
+ return output_.SetLoudspeakerStatus(enable);
}
-void AudioDeviceAndroidOpenSLES::PlayerSimpleBufferQueueCallback(
- SLAndroidSimpleBufferQueueItf queue_itf,
- void* p_context) {
- AudioDeviceAndroidOpenSLES* audio_device =
- static_cast<AudioDeviceAndroidOpenSLES*> (p_context);
- audio_device->PlayerSimpleBufferQueueCallbackHandler(queue_itf);
-}
-
-void AudioDeviceAndroidOpenSLES::PlayerSimpleBufferQueueCallbackHandler(
- SLAndroidSimpleBufferQueueItf queue_itf) {
- if (is_playing_) {
- const unsigned int num_samples = speaker_sampling_rate_ / 100;
- const unsigned int num_bytes =
- N_PLAY_CHANNELS * num_samples * sizeof(int16_t);
- int8_t buf[PLAY_MAX_TEMP_BUF_SIZE_PER_10ms];
- int8_t* audio;
-
- audio = play_queue_.front();
- play_queue_.pop();
-
- int num_out = voe_audio_buffer_->RequestPlayoutData(num_samples);
- num_out = voe_audio_buffer_->GetPlayoutData(buf);
- if (num_samples != static_cast<unsigned int>(num_out)) {
- WEBRTC_OPENSL_TRACE(kTraceWarning, kTraceAudioDevice, id_,
- "num (%u) != num_out (%d)", num_samples, num_out);
- play_warning_ = 1;
- }
- memcpy(audio, buf, num_bytes);
- UpdatePlayoutDelay(num_out);
-
- int res = (*queue_itf)->Enqueue(queue_itf,
- audio,
- num_bytes);
- if (res != SL_RESULT_SUCCESS) {
- WEBRTC_OPENSL_TRACE(kTraceWarning, kTraceAudioDevice, id_,
- " player callback Enqueue failed, %d", res);
- play_warning_ = 1;
- } else {
- play_queue_.push(audio);
- }
- }
-}
-
-bool AudioDeviceAndroidOpenSLES::RecThreadFunc(void* context) {
- return (static_cast<AudioDeviceAndroidOpenSLES*>(
- context)->RecThreadFuncImpl());
-}
-
-void AudioDeviceAndroidOpenSLES::RecorderSimpleBufferQueueCallback(
- SLAndroidSimpleBufferQueueItf queue_itf,
- void* p_context) {
- AudioDeviceAndroidOpenSLES* audio_device =
- static_cast<AudioDeviceAndroidOpenSLES*>(p_context);
- audio_device->RecorderSimpleBufferQueueCallbackHandler(queue_itf);
-}
-
-bool AudioDeviceAndroidOpenSLES::RecThreadFuncImpl() {
- if (is_recording_) {
- // TODO(leozwang): Add seting correct scheduling and thread priority.
-
- const unsigned int num_samples = mic_sampling_rate_ / 100;
- const unsigned int num_bytes =
- N_REC_CHANNELS * num_samples * sizeof(int16_t);
- const unsigned int total_bytes = num_bytes;
- int8_t buf[REC_MAX_TEMP_BUF_SIZE_PER_10ms];
-
- {
- CriticalSectionScoped lock(&crit_sect_);
- if (rec_voe_audio_queue_.size() <= 0) {
- rec_timer_.Wait(1);
- return true;
- }
-
- int8_t* audio = rec_voe_audio_queue_.front();
- rec_voe_audio_queue_.pop();
- memcpy(buf, audio, total_bytes);
- memset(audio, 0, total_bytes);
- rec_voe_ready_queue_.push(audio);
- }
-
- UpdateRecordingDelay();
- voe_audio_buffer_->SetRecordedBuffer(buf, num_samples);
- voe_audio_buffer_->SetVQEData(playout_delay_, recording_delay_, 0);
- voe_audio_buffer_->DeliverRecordedData();
- }
-
- return true;
-}
-
-void AudioDeviceAndroidOpenSLES::RecorderSimpleBufferQueueCallbackHandler(
- SLAndroidSimpleBufferQueueItf queue_itf) {
- if (is_recording_) {
- const unsigned int num_samples = mic_sampling_rate_ / 100;
- const unsigned int num_bytes =
- N_REC_CHANNELS * num_samples * sizeof(int16_t);
- const unsigned int total_bytes = num_bytes;
- int8_t* audio;
-
- {
- CriticalSectionScoped lock(&crit_sect_);
- audio = rec_queue_.front();
- rec_queue_.pop();
- rec_voe_audio_queue_.push(audio);
-
- if (rec_voe_ready_queue_.size() <= 0) {
- // Log Error.
- rec_error_ = 1;
- WEBRTC_OPENSL_TRACE(kTraceError, kTraceAudioDevice, id_,
- " Audio Rec thread buffers underrun");
- } else {
- audio = rec_voe_ready_queue_.front();
- rec_voe_ready_queue_.pop();
- }
- }
-
- int32_t res = (*queue_itf)->Enqueue(queue_itf,
- audio,
- total_bytes);
- if (res != SL_RESULT_SUCCESS) {
- WEBRTC_OPENSL_TRACE(kTraceWarning, kTraceAudioDevice, id_,
- " recorder callback Enqueue failed, %d", res);
- rec_warning_ = 1;
- return;
- } else {
- rec_queue_.push(audio);
- }
-
- // TODO(leozwang): OpenSL ES doesn't support AudioRecorder
- // volume control now, add it when it's ready.
- }
-}
-
-void AudioDeviceAndroidOpenSLES::CheckErr(SLresult res) {
- if (res != SL_RESULT_SUCCESS) {
- WEBRTC_OPENSL_TRACE(kTraceError, kTraceAudioDevice, id_,
- " AudioDeviceAndroidOpenSLES::CheckErr(%d)", res);
- exit(-1);
- }
-}
-
-void AudioDeviceAndroidOpenSLES::UpdatePlayoutDelay(
- uint32_t nSamplePlayed) {
- // TODO(leozwang): Add accurate delay estimat.
- playout_delay_ = (N_PLAY_QUEUE_BUFFERS - 0.5) * 10 +
- N_PLAY_QUEUE_BUFFERS * nSamplePlayed / (speaker_sampling_rate_ / 1000);
-}
-
-void AudioDeviceAndroidOpenSLES::UpdateRecordingDelay() {
- // TODO(leozwang): Add accurate delay estimat.
- recording_delay_ = 10;
- const uint32_t noSamp10ms = mic_sampling_rate_ / 100;
- recording_delay_ += (N_REC_QUEUE_BUFFERS * noSamp10ms) /
- (mic_sampling_rate_ / 1000);
-}
-
-int32_t AudioDeviceAndroidOpenSLES::InitSampleRate() {
- if (sles_engine_ == NULL) {
- WEBRTC_OPENSL_TRACE(kTraceError, kTraceAudioDevice, id_,
- " SL Object is NULL");
- return -1;
- }
-
- mic_sampling_rate_ = N_REC_SAMPLES_PER_SEC;
- speaker_sampling_rate_ = N_PLAY_SAMPLES_PER_SEC;
-
- WEBRTC_OPENSL_TRACE(kTraceStateInfo, kTraceAudioDevice, id_,
- " mic sample rate (%d), speaker sample rate (%d)",
- mic_sampling_rate_, speaker_sampling_rate_);
- return 0;
+int32_t AudioDeviceAndroidOpenSLES::GetLoudspeakerStatus(
+ bool& enable) const { // NOLINT
+ return output_.GetLoudspeakerStatus(enable);
}
} // namespace webrtc
diff --git a/modules/audio_device/android/audio_device_opensles_android.h b/modules/audio_device/android/audio_device_opensles_android.h
index c33a0343..1231e11b 100644
--- a/modules/audio_device/android/audio_device_opensles_android.h
+++ b/modules/audio_device/android/audio_device_opensles_android.h
@@ -8,97 +8,54 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#ifndef SRC_MODULES_AUDIO_DEVICE_ANDROID_AUDIO_DEVICE_OPENSLES_ANDROID_H_
-#define SRC_MODULES_AUDIO_DEVICE_ANDROID_AUDIO_DEVICE_OPENSLES_ANDROID_H_
-
-#include <jni.h>
-#include <stdio.h>
-#include <stdlib.h>
-
-#include <SLES/OpenSLES.h>
-#include <SLES/OpenSLES_Android.h>
-#include <SLES/OpenSLES_AndroidConfiguration.h>
-
-#include <queue>
+#ifndef WEBRTC_MODULES_AUDIO_DEVICE_ANDROID_AUDIO_DEVICE_OPENSLES_ANDROID_H_
+#define WEBRTC_MODULES_AUDIO_DEVICE_ANDROID_AUDIO_DEVICE_OPENSLES_ANDROID_H_
#include "webrtc/modules/audio_device/audio_device_generic.h"
-#include "webrtc/system_wrappers/interface/critical_section_wrapper.h"
+#include "webrtc/modules/audio_device/android/opensles_input.h"
+#include "webrtc/modules/audio_device/android/opensles_output.h"
namespace webrtc {
-class EventWrapper;
-
-const uint32_t N_MAX_INTERFACES = 3;
-const uint32_t N_MAX_OUTPUT_DEVICES = 6;
-const uint32_t N_MAX_INPUT_DEVICES = 3;
-
-const uint32_t N_REC_SAMPLES_PER_SEC = 16000; // Default fs
-const uint32_t N_PLAY_SAMPLES_PER_SEC = 16000; // Default fs
-
-const uint32_t N_REC_CHANNELS = 1;
-const uint32_t N_PLAY_CHANNELS = 1;
-
-const uint32_t REC_BUF_SIZE_IN_SAMPLES = 480;
-const uint32_t PLAY_BUF_SIZE_IN_SAMPLES = 480;
-
-const uint32_t REC_MAX_TEMP_BUF_SIZE_PER_10ms =
- N_REC_CHANNELS * REC_BUF_SIZE_IN_SAMPLES * sizeof(int16_t);
-
-const uint32_t PLAY_MAX_TEMP_BUF_SIZE_PER_10ms =
- N_PLAY_CHANNELS * PLAY_BUF_SIZE_IN_SAMPLES * sizeof(int16_t);
-
-// Number of the buffers in playout queue
-const uint16_t N_PLAY_QUEUE_BUFFERS = 8;
-// Number of buffers in recording queue
-// TODO(xian): Reduce the numbers of buffers to improve the latency.
-const uint16_t N_REC_QUEUE_BUFFERS = 8;
-// Some values returned from getMinBufferSize
-// (Nexus S playout 72ms, recording 64ms)
-// (Galaxy, 167ms, 44ms)
-// (Nexus 7, 72ms, 48ms)
-// (Xoom 92ms, 40ms)
-
-class ThreadWrapper;
-
-class AudioDeviceAndroidOpenSLES: public AudioDeviceGeneric {
+// Implements the interface of AudioDeviceGeneric. OpenSlesOutput and
+// OpenSlesInput are the imlementations.
+class AudioDeviceAndroidOpenSLES : public AudioDeviceGeneric {
public:
explicit AudioDeviceAndroidOpenSLES(const int32_t id);
- ~AudioDeviceAndroidOpenSLES();
+ virtual ~AudioDeviceAndroidOpenSLES();
// Retrieve the currently utilized audio layer
- virtual int32_t
- ActiveAudioLayer(AudioDeviceModule::AudioLayer& audioLayer) const; // NOLINT
+ virtual int32_t ActiveAudioLayer(
+ AudioDeviceModule::AudioLayer& audioLayer) const; // NOLINT
// Main initializaton and termination
virtual int32_t Init();
- virtual int32_t Terminate();
+ virtual int32_t Terminate() ;
virtual bool Initialized() const;
// Device enumeration
virtual int16_t PlayoutDevices();
virtual int16_t RecordingDevices();
- virtual int32_t
- PlayoutDeviceName(uint16_t index,
- char name[kAdmMaxDeviceNameSize],
- char guid[kAdmMaxGuidSize]);
- virtual int32_t
- RecordingDeviceName(uint16_t index,
- char name[kAdmMaxDeviceNameSize],
- char guid[kAdmMaxGuidSize]);
+ virtual int32_t PlayoutDeviceName(uint16_t index,
+ char name[kAdmMaxDeviceNameSize],
+ char guid[kAdmMaxGuidSize]);
+ virtual int32_t RecordingDeviceName(uint16_t index,
+ char name[kAdmMaxDeviceNameSize],
+ char guid[kAdmMaxGuidSize]);
// Device selection
virtual int32_t SetPlayoutDevice(uint16_t index);
- virtual int32_t
- SetPlayoutDevice(AudioDeviceModule::WindowsDeviceType device);
+ virtual int32_t SetPlayoutDevice(
+ AudioDeviceModule::WindowsDeviceType device);
virtual int32_t SetRecordingDevice(uint16_t index);
- virtual int32_t
- SetRecordingDevice(AudioDeviceModule::WindowsDeviceType device);
+ virtual int32_t SetRecordingDevice(
+ AudioDeviceModule::WindowsDeviceType device);
// Audio transport initialization
- virtual int32_t PlayoutIsAvailable(bool& available); // NOLINT
+ virtual int32_t PlayoutIsAvailable(bool& available);
virtual int32_t InitPlayout();
virtual bool PlayoutIsInitialized() const;
- virtual int32_t RecordingIsAvailable(bool& available); // NOLINT
+ virtual int32_t RecordingIsAvailable(bool& available);
virtual int32_t InitRecording();
virtual bool RecordingIsInitialized() const;
@@ -115,80 +72,67 @@ class AudioDeviceAndroidOpenSLES: public AudioDeviceGeneric {
virtual bool AGC() const;
// Volume control based on the Windows Wave API (Windows only)
- virtual int32_t SetWaveOutVolume(uint16_t volumeLeft, uint16_t volumeRight);
+ virtual int32_t SetWaveOutVolume(uint16_t volumeLeft,
+ uint16_t volumeRight);
virtual int32_t WaveOutVolume(
uint16_t& volumeLeft, // NOLINT
uint16_t& volumeRight) const; // NOLINT
// Audio mixer initialization
- virtual int32_t SpeakerIsAvailable(bool& available); // NOLINT
+ virtual int32_t SpeakerIsAvailable(bool& available);
virtual int32_t InitSpeaker();
virtual bool SpeakerIsInitialized() const;
- virtual int32_t MicrophoneIsAvailable(
- bool& available);
+ virtual int32_t MicrophoneIsAvailable(bool& available);
virtual int32_t InitMicrophone();
virtual bool MicrophoneIsInitialized() const;
// Speaker volume controls
- virtual int32_t SpeakerVolumeIsAvailable(
- bool& available); // NOLINT
+ virtual int32_t SpeakerVolumeIsAvailable(bool& available);
virtual int32_t SetSpeakerVolume(uint32_t volume);
- virtual int32_t SpeakerVolume(
- uint32_t& volume) const; // NOLINT
- virtual int32_t MaxSpeakerVolume(
- uint32_t& maxVolume) const; // NOLINT
- virtual int32_t MinSpeakerVolume(
- uint32_t& minVolume) const; // NOLINT
- virtual int32_t SpeakerVolumeStepSize(
- uint16_t& stepSize) const; // NOLINT
+ virtual int32_t SpeakerVolume(uint32_t& volume) const;
+ virtual int32_t MaxSpeakerVolume(uint32_t& maxVolume) const;
+ virtual int32_t MinSpeakerVolume(uint32_t& minVolume) const;
+ virtual int32_t SpeakerVolumeStepSize(uint16_t& stepSize) const;
// Microphone volume controls
- virtual int32_t MicrophoneVolumeIsAvailable(
- bool& available); // NOLINT
+ virtual int32_t MicrophoneVolumeIsAvailable(bool& available);
virtual int32_t SetMicrophoneVolume(uint32_t volume);
- virtual int32_t MicrophoneVolume(
- uint32_t& volume) const; // NOLINT
- virtual int32_t MaxMicrophoneVolume(
- uint32_t& maxVolume) const; // NOLINT
- virtual int32_t MinMicrophoneVolume(
- uint32_t& minVolume) const; // NOLINT
- virtual int32_t
- MicrophoneVolumeStepSize(uint16_t& stepSize) const; // NOLINT
+ virtual int32_t MicrophoneVolume(uint32_t& volume) const;
+ virtual int32_t MaxMicrophoneVolume(uint32_t& maxVolume) const;
+ virtual int32_t MinMicrophoneVolume(uint32_t& minVolume) const;
+ virtual int32_t MicrophoneVolumeStepSize(
+ uint16_t& stepSize) const;
// Speaker mute control
- virtual int32_t SpeakerMuteIsAvailable(bool& available); // NOLINT
+ virtual int32_t SpeakerMuteIsAvailable(bool& available);
virtual int32_t SetSpeakerMute(bool enable);
- virtual int32_t SpeakerMute(bool& enabled) const; // NOLINT
+ virtual int32_t SpeakerMute(bool& enabled) const;
// Microphone mute control
- virtual int32_t MicrophoneMuteIsAvailable(bool& available); // NOLINT
+ virtual int32_t MicrophoneMuteIsAvailable(bool& available);
virtual int32_t SetMicrophoneMute(bool enable);
- virtual int32_t MicrophoneMute(bool& enabled) const; // NOLINT
+ virtual int32_t MicrophoneMute(bool& enabled) const;
// Microphone boost control
- virtual int32_t MicrophoneBoostIsAvailable(bool& available); // NOLINT
+ virtual int32_t MicrophoneBoostIsAvailable(bool& available);
virtual int32_t SetMicrophoneBoost(bool enable);
- virtual int32_t MicrophoneBoost(bool& enabled) const; // NOLINT
+ virtual int32_t MicrophoneBoost(bool& enabled) const;
// Stereo support
- virtual int32_t StereoPlayoutIsAvailable(bool& available); // NOLINT
+ virtual int32_t StereoPlayoutIsAvailable(bool& available);
virtual int32_t SetStereoPlayout(bool enable);
- virtual int32_t StereoPlayout(bool& enabled) const; // NOLINT
- virtual int32_t StereoRecordingIsAvailable(bool& available); // NOLINT
+ virtual int32_t StereoPlayout(bool& enabled) const;
+ virtual int32_t StereoRecordingIsAvailable(bool& available);
virtual int32_t SetStereoRecording(bool enable);
- virtual int32_t StereoRecording(bool& enabled) const; // NOLINT
+ virtual int32_t StereoRecording(bool& enabled) const;
// Delay information and control
- virtual int32_t
- SetPlayoutBuffer(const AudioDeviceModule::BufferType type,
- uint16_t sizeMS);
- virtual int32_t PlayoutBuffer(
- AudioDeviceModule::BufferType& type, // NOLINT
- uint16_t& sizeMS) const;
- virtual int32_t PlayoutDelay(
- uint16_t& delayMS) const; // NOLINT
- virtual int32_t RecordingDelay(
- uint16_t& delayMS) const; // NOLINT
+ virtual int32_t SetPlayoutBuffer(const AudioDeviceModule::BufferType type,
+ uint16_t sizeMS);
+ virtual int32_t PlayoutBuffer(AudioDeviceModule::BufferType& type,
+ uint16_t& sizeMS) const;
+ virtual int32_t PlayoutDelay(uint16_t& delayMS) const;
+ virtual int32_t RecordingDelay(uint16_t& delayMS) const;
// CPU load
virtual int32_t CPULoad(uint16_t& load) const; // NOLINT
@@ -208,109 +152,13 @@ class AudioDeviceAndroidOpenSLES: public AudioDeviceGeneric {
// Speaker audio routing
virtual int32_t SetLoudspeakerStatus(bool enable);
- virtual int32_t GetLoudspeakerStatus(bool& enable) const; // NOLINT
+ virtual int32_t GetLoudspeakerStatus(bool& enable) const;
private:
- // Lock
- void Lock() {
- crit_sect_.Enter();
- };
- void UnLock() {
- crit_sect_.Leave();
- };
-
- static void PlayerSimpleBufferQueueCallback(
- SLAndroidSimpleBufferQueueItf queueItf,
- void *pContext);
- static void RecorderSimpleBufferQueueCallback(
- SLAndroidSimpleBufferQueueItf queueItf,
- void *pContext);
- void PlayerSimpleBufferQueueCallbackHandler(
- SLAndroidSimpleBufferQueueItf queueItf);
- void RecorderSimpleBufferQueueCallbackHandler(
- SLAndroidSimpleBufferQueueItf queueItf);
- void CheckErr(SLresult res);
-
- // Delay updates
- void UpdateRecordingDelay();
- void UpdatePlayoutDelay(uint32_t nSamplePlayed);
-
- // Init
- int32_t InitSampleRate();
-
- // Misc
- AudioDeviceBuffer* voe_audio_buffer_;
- CriticalSectionWrapper& crit_sect_;
- int32_t id_;
-
- // audio unit
- SLObjectItf sles_engine_;
-
- // playout device
- SLObjectItf sles_player_;
- SLEngineItf sles_engine_itf_;
- SLPlayItf sles_player_itf_;
- SLAndroidSimpleBufferQueueItf sles_player_sbq_itf_;
- SLObjectItf sles_output_mixer_;
- SLVolumeItf sles_speaker_volume_;
-
- // recording device
- SLObjectItf sles_recorder_;
- SLRecordItf sles_recorder_itf_;
- SLAndroidSimpleBufferQueueItf sles_recorder_sbq_itf_;
- SLDeviceVolumeItf sles_mic_volume_;
- uint32_t mic_dev_id_;
-
- uint32_t play_warning_, play_error_;
- uint32_t rec_warning_, rec_error_;
-
- // States
- bool is_recording_dev_specified_;
- bool is_playout_dev_specified_;
- bool is_initialized_;
- bool is_recording_;
- bool is_playing_;
- bool is_rec_initialized_;
- bool is_play_initialized_;
- bool is_mic_initialized_;
- bool is_speaker_initialized_;
-
- // Delay
- uint16_t playout_delay_;
- uint16_t recording_delay_;
-
- // AGC state
- bool agc_enabled_;
-
- // Threads
- ThreadWrapper* rec_thread_;
- uint32_t rec_thread_id_;
- static bool RecThreadFunc(void* context);
- bool RecThreadFuncImpl();
- EventWrapper& rec_timer_;
-
- uint32_t mic_sampling_rate_;
- uint32_t speaker_sampling_rate_;
- uint32_t max_speaker_vol_;
- uint32_t min_speaker_vol_;
- bool loundspeaker_on_;
-
- SLDataFormat_PCM player_pcm_;
- SLDataFormat_PCM record_pcm_;
-
- std::queue<int8_t*> rec_queue_;
- std::queue<int8_t*> rec_voe_audio_queue_;
- std::queue<int8_t*> rec_voe_ready_queue_;
- int8_t rec_buf_[N_REC_QUEUE_BUFFERS][
- N_REC_CHANNELS * sizeof(int16_t) * REC_BUF_SIZE_IN_SAMPLES];
- int8_t rec_voe_buf_[N_REC_QUEUE_BUFFERS][
- N_REC_CHANNELS * sizeof(int16_t) * REC_BUF_SIZE_IN_SAMPLES];
-
- std::queue<int8_t*> play_queue_;
- int8_t play_buf_[N_PLAY_QUEUE_BUFFERS][
- N_PLAY_CHANNELS * sizeof(int16_t) * PLAY_BUF_SIZE_IN_SAMPLES];
+ OpenSlesOutput output_;
+ OpenSlesInput input_;
};
} // namespace webrtc
-#endif // SRC_MODULES_AUDIO_DEVICE_ANDROID_AUDIO_DEVICE_OPENSLES_ANDROID_H_
+#endif // WEBRTC_MODULES_AUDIO_DEVICE_ANDROID_AUDIO_DEVICE_OPENSLES_ANDROID_H_
diff --git a/modules/audio_device/android/audio_manager_jni.cc b/modules/audio_device/android/audio_manager_jni.cc
new file mode 100644
index 00000000..b8dec2a9
--- /dev/null
+++ b/modules/audio_device/android/audio_manager_jni.cc
@@ -0,0 +1,151 @@
+/*
+ * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "webrtc/modules/audio_device/android/audio_manager_jni.h"
+
+#include <assert.h>
+
+#include "webrtc/system_wrappers/interface/trace.h"
+
+namespace {
+
+class AttachThreadScoped {
+ public:
+ explicit AttachThreadScoped(JavaVM* jvm)
+ : attached_(false), jvm_(jvm), env_(NULL) {
+ jint ret_val = jvm->GetEnv(reinterpret_cast<void**>(&env_),
+ REQUIRED_JNI_VERSION);
+ if (ret_val == JNI_EDETACHED) {
+ // Attach the thread to the Java VM.
+ ret_val = jvm_->AttachCurrentThread(&env_, NULL);
+ attached_ = ret_val > 0;
+ assert(attached_);
+ }
+ }
+ ~AttachThreadScoped() {
+ if (attached_ && (jvm_->DetachCurrentThread() < 0)) {
+ assert(false);
+ }
+ }
+
+ JNIEnv* env() { return env_; }
+
+ private:
+ bool attached_;
+ JavaVM* jvm_;
+ JNIEnv* env_;
+};
+
+} // namespace
+
+namespace webrtc {
+
+static JavaVM* g_jvm_ = NULL;
+static JNIEnv* g_jni_env_ = NULL;
+static jobject g_context_ = NULL;
+static jclass g_audio_manager_class_ = NULL;
+static jobject g_audio_manager_ = NULL;
+
+AudioManagerJni::AudioManagerJni()
+ : low_latency_supported_(false),
+ native_output_sample_rate_(0),
+ native_buffer_size_(0) {
+ if (!HasDeviceObjects()) {
+ assert(false);
+ }
+ AttachThreadScoped ats(g_jvm_);
+ JNIEnv* env = ats.env();
+ assert(env && "Unsupported JNI version!");
+ CreateInstance(env);
+ // Pre-store device specific values.
+ SetLowLatencySupported(env);
+ SetNativeOutputSampleRate(env);
+ SetNativeFrameSize(env);
+}
+
+void AudioManagerJni::SetAndroidAudioDeviceObjects(void* jvm, void* env,
+ void* context) {
+ assert(jvm);
+ assert(env);
+ assert(context);
+
+ // Store global Java VM variables to be accessed by API calls.
+ g_jvm_ = reinterpret_cast<JavaVM*>(jvm);
+ g_jni_env_ = reinterpret_cast<JNIEnv*>(env);
+ g_context_ = g_jni_env_->NewGlobalRef(reinterpret_cast<jobject>(context));
+
+ // FindClass must be made in this function since this function's contract
+ // requires it to be called by a Java thread.
+ // See
+ // http://developer.android.com/training/articles/perf-jni.html#faq_FindClass
+ // as to why this is necessary.
+ // Get the AudioManagerAndroid class object.
+ jclass javaAmClassLocal = g_jni_env_->FindClass(
+ "org/webrtc/voiceengine/AudioManagerAndroid");
+ assert(javaAmClassLocal);
+
+ // Create a global reference such that the class object is not recycled by
+ // the garbage collector.
+ g_audio_manager_class_ = reinterpret_cast<jclass>(
+ g_jni_env_->NewGlobalRef(javaAmClassLocal));
+ assert(g_audio_manager_class_);
+}
+
+void AudioManagerJni::ClearAndroidAudioDeviceObjects() {
+ g_jni_env_->DeleteGlobalRef(g_audio_manager_class_);
+ g_audio_manager_class_ = NULL;
+ g_jni_env_->DeleteGlobalRef(g_context_);
+ g_context_ = NULL;
+ g_jni_env_->DeleteGlobalRef(g_audio_manager_);
+ g_audio_manager_ = NULL;
+ g_jni_env_ = NULL;
+ g_jvm_ = NULL;
+}
+
+void AudioManagerJni::SetLowLatencySupported(JNIEnv* env) {
+ jmethodID id = LookUpMethodId(env, "isAudioLowLatencySupported", "()Z");
+ low_latency_supported_ = env->CallBooleanMethod(g_audio_manager_, id);
+}
+
+void AudioManagerJni::SetNativeOutputSampleRate(JNIEnv* env) {
+ jmethodID id = LookUpMethodId(env, "getNativeOutputSampleRate", "()I");
+ native_output_sample_rate_ = env->CallIntMethod(g_audio_manager_, id);
+}
+
+void AudioManagerJni::SetNativeFrameSize(JNIEnv* env) {
+ jmethodID id = LookUpMethodId(env,
+ "getAudioLowLatencyOutputFrameSize", "()I");
+ native_buffer_size_ = env->CallIntMethod(g_audio_manager_, id);
+}
+
+bool AudioManagerJni::HasDeviceObjects() {
+ return g_jvm_ && g_jni_env_ && g_context_ && g_audio_manager_class_;
+}
+
+jmethodID AudioManagerJni::LookUpMethodId(JNIEnv* env,
+ const char* method_name,
+ const char* method_signature) {
+ jmethodID ret_val = env->GetMethodID(g_audio_manager_class_, method_name,
+ method_signature);
+ assert(ret_val);
+ return ret_val;
+}
+
+void AudioManagerJni::CreateInstance(JNIEnv* env) {
+ // Get the method ID for the constructor taking Context.
+ jmethodID id = LookUpMethodId(env, "<init>", "(Landroid/content/Context;)V");
+ g_audio_manager_ = env->NewObject(g_audio_manager_class_, id, g_context_);
+ // Create a global reference so that the instance is accessible until no
+ // longer needed.
+ g_audio_manager_ = env->NewGlobalRef(g_audio_manager_);
+ assert(g_audio_manager_);
+}
+
+} // namespace webrtc
diff --git a/modules/audio_device/android/audio_manager_jni.h b/modules/audio_device/android/audio_manager_jni.h
new file mode 100644
index 00000000..6f85e72d
--- /dev/null
+++ b/modules/audio_device/android/audio_manager_jni.h
@@ -0,0 +1,73 @@
+/*
+ * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+// Android APIs used to access Java functionality needed to enable low latency
+// audio.
+
+#ifndef WEBRTC_MODULES_AUDIO_DEVICE_ANDROID_AUDIO_MANAGER_JNI_H_
+#define WEBRTC_MODULES_AUDIO_DEVICE_ANDROID_AUDIO_MANAGER_JNI_H_
+
+#include <jni.h>
+
+namespace webrtc {
+
+#define REQUIRED_JNI_VERSION JNI_VERSION_1_4
+
+class AudioManagerJni {
+ public:
+ AudioManagerJni();
+ ~AudioManagerJni() {}
+
+ // SetAndroidAudioDeviceObjects must only be called once unless there has
+ // been a successive call to ClearAndroidAudioDeviceObjects. For each
+ // call to ClearAndroidAudioDeviceObjects, SetAndroidAudioDeviceObjects may be
+ // called once.
+ // This function must be called by a Java thread as calling it from a thread
+ // created by the native application will prevent FindClass from working. See
+ // http://developer.android.com/training/articles/perf-jni.html#faq_FindClass
+ // for more details.
+ // It has to be called for this class' APIs to be successful. Calling
+ // ClearAndroidAudioDeviceObjects will prevent this class' APIs to be called
+ // successfully if SetAndroidAudioDeviceObjects is not called after it.
+ static void SetAndroidAudioDeviceObjects(void* jvm, void* env,
+ void* context);
+ // This function must be called when the AudioManagerJni class is no
+ // longer needed. It frees up the global references acquired in
+ // SetAndroidAudioDeviceObjects.
+ static void ClearAndroidAudioDeviceObjects();
+
+ bool low_latency_supported() { return low_latency_supported_; }
+ int native_output_sample_rate() { return native_output_sample_rate_; }
+ int native_buffer_size() { return native_buffer_size_; }
+
+ private:
+ bool HasDeviceObjects();
+
+ // Following functions assume that the calling thread has been attached.
+ void SetLowLatencySupported(JNIEnv* env);
+ void SetNativeOutputSampleRate(JNIEnv* env);
+ void SetNativeFrameSize(JNIEnv* env);
+
+ jmethodID LookUpMethodId(JNIEnv* env, const char* method_name,
+ const char* method_signature);
+
+ void CreateInstance(JNIEnv* env);
+
+ // Whether or not low latency audio is supported, the native output sample
+ // rate and the audio buffer size do not change. I.e the values might as well
+ // just be cached when initializing.
+ bool low_latency_supported_;
+ int native_output_sample_rate_;
+ int native_buffer_size_;
+};
+
+} // namespace webrtc
+
+#endif // WEBRTC_MODULES_AUDIO_DEVICE_ANDROID_AUDIO_MANAGER_JNI_H_
diff --git a/modules/audio_device/android/fine_audio_buffer.cc b/modules/audio_device/android/fine_audio_buffer.cc
new file mode 100644
index 00000000..ee566799
--- /dev/null
+++ b/modules/audio_device/android/fine_audio_buffer.cc
@@ -0,0 +1,88 @@
+/*
+ * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "webrtc/modules/audio_device/android/fine_audio_buffer.h"
+
+#include <memory.h>
+#include <stdio.h>
+#include <algorithm>
+
+#include "webrtc/modules/audio_device/audio_device_buffer.h"
+
+namespace webrtc {
+
+FineAudioBuffer::FineAudioBuffer(AudioDeviceBuffer* device_buffer,
+ int desired_frame_size_bytes,
+ int sample_rate)
+ : device_buffer_(device_buffer),
+ desired_frame_size_bytes_(desired_frame_size_bytes),
+ sample_rate_(sample_rate),
+ samples_per_10_ms_(sample_rate_ * 10 / 1000),
+ bytes_per_10_ms_(samples_per_10_ms_ * sizeof(int16_t)),
+ cached_buffer_start_(0),
+ cached_bytes_(0) {
+ cache_buffer_.reset(new int8_t[bytes_per_10_ms_]);
+}
+
+FineAudioBuffer::~FineAudioBuffer() {
+}
+
+int FineAudioBuffer::RequiredBufferSizeBytes() {
+ // It is possible that we store the desired frame size - 1 samples. Since new
+ // audio frames are pulled in chunks of 10ms we will need a buffer that can
+ // hold desired_frame_size - 1 + 10ms of data. We omit the - 1.
+ return desired_frame_size_bytes_ + bytes_per_10_ms_;
+}
+
+void FineAudioBuffer::GetBufferData(int8_t* buffer) {
+ if (desired_frame_size_bytes_ <= cached_bytes_) {
+ memcpy(buffer, &cache_buffer_.get()[cached_buffer_start_],
+ desired_frame_size_bytes_);
+ cached_buffer_start_ += desired_frame_size_bytes_;
+ cached_bytes_ -= desired_frame_size_bytes_;
+ assert(cached_buffer_start_ + cached_bytes_ < bytes_per_10_ms_);
+ return;
+ }
+ memcpy(buffer, &cache_buffer_.get()[cached_buffer_start_], cached_bytes_);
+ // Push another n*10ms of audio to |buffer|. n > 1 if
+ // |desired_frame_size_bytes_| is greater than 10ms of audio. Note that we
+ // write the audio after the cached bytes copied earlier.
+ int8_t* unwritten_buffer = &buffer[cached_bytes_];
+ int bytes_left = desired_frame_size_bytes_ - cached_bytes_;
+ // Ceiling of integer division: 1 + ((x - 1) / y)
+ int number_of_requests = 1 + (bytes_left - 1) / (bytes_per_10_ms_);
+ for (int i = 0; i < number_of_requests; ++i) {
+ device_buffer_->RequestPlayoutData(samples_per_10_ms_);
+ int num_out = device_buffer_->GetPlayoutData(unwritten_buffer);
+ if (num_out != samples_per_10_ms_) {
+ assert(num_out == 0);
+ cached_bytes_ = 0;
+ return;
+ }
+ unwritten_buffer += bytes_per_10_ms_;
+ assert(bytes_left >= 0);
+ bytes_left -= bytes_per_10_ms_;
+ }
+ assert(bytes_left <= 0);
+ // Put the samples that were written to |buffer| but are not used in the
+ // cache.
+ int cache_location = desired_frame_size_bytes_;
+ int8_t* cache_ptr = &buffer[cache_location];
+ cached_bytes_ = number_of_requests * bytes_per_10_ms_ -
+ (desired_frame_size_bytes_ - cached_bytes_);
+ // If cached_bytes_ is larger than the cache buffer, uninitialized memory
+ // will be read.
+ assert(cached_bytes_ <= bytes_per_10_ms_);
+ assert(-bytes_left == cached_bytes_);
+ cached_buffer_start_ = 0;
+ memcpy(cache_buffer_.get(), cache_ptr, cached_bytes_);
+}
+
+} // namespace webrtc
diff --git a/modules/audio_device/android/fine_audio_buffer.h b/modules/audio_device/android/fine_audio_buffer.h
new file mode 100644
index 00000000..597b8aaa
--- /dev/null
+++ b/modules/audio_device/android/fine_audio_buffer.h
@@ -0,0 +1,66 @@
+/*
+ * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef WEBRTC_MODULES_AUDIO_DEVICE_ANDROID_FINE_AUDIO_BUFFER_H_
+#define WEBRTC_MODULES_AUDIO_DEVICE_ANDROID_FINE_AUDIO_BUFFER_H_
+
+#include "webrtc/system_wrappers/interface/scoped_ptr.h"
+#include "webrtc/typedefs.h"
+
+namespace webrtc {
+
+class AudioDeviceBuffer;
+
+// FineAudioBuffer takes an AudioDeviceBuffer which delivers audio data
+// corresponding to 10ms of data. It then allows for this data to be pulled in
+// a finer or coarser granularity. I.e. interacting with this class instead of
+// directly with the AudioDeviceBuffer one can ask for any number of audio data
+// samples.
+class FineAudioBuffer {
+ public:
+ // |device_buffer| is a buffer that provides 10ms of audio data.
+ // |desired_frame_size_bytes| is the number of bytes of audio data
+ // (not samples) |GetBufferData| should return on success.
+ // |sample_rate| is the sample rate of the audio data. This is needed because
+ // |device_buffer| delivers 10ms of data. Given the sample rate the number
+ // of samples can be calculated.
+ FineAudioBuffer(AudioDeviceBuffer* device_buffer,
+ int desired_frame_size_bytes,
+ int sample_rate);
+ ~FineAudioBuffer();
+
+ // Returns the required size of |buffer| when calling GetBufferData. If the
+ // buffer is smaller memory trampling will happen.
+ // |desired_frame_size_bytes| and |samples_rate| are as described in the
+ // constructor.
+ int RequiredBufferSizeBytes();
+
+ // |buffer| must be of equal or greater size than what is returned by
+ // RequiredBufferSize. This is to avoid unnecessary memcpy.
+ void GetBufferData(int8_t* buffer);
+
+ private:
+ // Device buffer that provides 10ms chunks of data.
+ AudioDeviceBuffer* device_buffer_;
+ int desired_frame_size_bytes_; // Number of bytes delivered per GetBufferData
+ int sample_rate_;
+ int samples_per_10_ms_;
+ // Convenience parameter to avoid converting from samples
+ int bytes_per_10_ms_;
+
+ // Storage for samples that are not yet asked for.
+ scoped_array<int8_t> cache_buffer_;
+ int cached_buffer_start_; // Location of first unread sample.
+ int cached_bytes_; // Number of bytes stored in cache.
+};
+
+} // namespace webrtc
+
+#endif // WEBRTC_MODULES_AUDIO_DEVICE_ANDROID_FINE_AUDIO_BUFFER_H_
diff --git a/modules/audio_device/android/fine_audio_buffer_unittest.cc b/modules/audio_device/android/fine_audio_buffer_unittest.cc
new file mode 100644
index 00000000..69ba741d
--- /dev/null
+++ b/modules/audio_device/android/fine_audio_buffer_unittest.cc
@@ -0,0 +1,106 @@
+/*
+ * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "webrtc/modules/audio_device/android/fine_audio_buffer.h"
+
+#include <limits.h>
+#include <memory>
+
+#include "testing/gmock/include/gmock/gmock.h"
+#include "testing/gtest/include/gtest/gtest.h"
+#include "webrtc/modules/audio_device/mock_audio_device_buffer.h"
+#include "webrtc/system_wrappers/interface/scoped_ptr.h"
+
+using ::testing::_;
+using ::testing::InSequence;
+using ::testing::Return;
+
+namespace webrtc {
+
+// The fake audio data is 0,1,..SCHAR_MAX-1,0,1,... This is to make it easy
+// to detect errors. This function verifies that the buffers contain such data.
+// E.g. if there are two buffers of size 3, buffer 1 would contain 0,1,2 and
+// buffer 2 would contain 3,4,5. Note that SCHAR_MAX is 127 so wrap-around
+// will happen.
+// |buffer| is the audio buffer to verify.
+bool VerifyBuffer(const int8_t* buffer, int buffer_number, int size) {
+ int start_value = (buffer_number * size) % SCHAR_MAX;
+ for (int i = 0; i < size; ++i) {
+ if (buffer[i] != (i + start_value) % SCHAR_MAX) {
+ return false;
+ }
+ }
+ return true;
+}
+
+// This function replaces GetPlayoutData when it's called (which is done
+// implicitly when calling GetBufferData). It writes the sequence
+// 0,1,..SCHAR_MAX-1,0,1,... to the buffer. Note that this is likely a buffer of
+// different size than the one VerifyBuffer verifies.
+// |iteration| is the number of calls made to UpdateBuffer prior to this call.
+// |samples_per_10_ms| is the number of samples that should be written to the
+// buffer (|arg0|).
+ACTION_P2(UpdateBuffer, iteration, samples_per_10_ms) {
+ int8_t* buffer = static_cast<int8_t*>(arg0);
+ int bytes_per_10_ms = samples_per_10_ms * static_cast<int>(sizeof(int16_t));
+ int start_value = (iteration * bytes_per_10_ms) % SCHAR_MAX;
+ for (int i = 0; i < bytes_per_10_ms; ++i) {
+ buffer[i] = (i + start_value) % SCHAR_MAX;
+ }
+ return samples_per_10_ms;
+}
+
+void RunFineBufferTest(int sample_rate, int frame_size_in_samples) {
+ const int kSamplesPer10Ms = sample_rate * 10 / 1000;
+ const int kFrameSizeBytes = frame_size_in_samples *
+ static_cast<int>(sizeof(int16_t));
+ const int kNumberOfFrames = 5;
+ // Ceiling of integer division: 1 + ((x - 1) / y)
+ const int kNumberOfUpdateBufferCalls =
+ 1 + ((kNumberOfFrames * frame_size_in_samples - 1) / kSamplesPer10Ms);
+
+ MockAudioDeviceBuffer audio_device_buffer;
+ EXPECT_CALL(audio_device_buffer, RequestPlayoutData(_))
+ .WillRepeatedly(Return(kSamplesPer10Ms));
+ {
+ InSequence s;
+ for (int i = 0; i < kNumberOfUpdateBufferCalls; ++i) {
+ EXPECT_CALL(audio_device_buffer, GetPlayoutData(_))
+ .WillOnce(UpdateBuffer(i, kSamplesPer10Ms))
+ .RetiresOnSaturation();
+ }
+ }
+ FineAudioBuffer fine_buffer(&audio_device_buffer, kFrameSizeBytes,
+ sample_rate);
+
+ scoped_array<int8_t> out_buffer;
+ out_buffer.reset(
+ new int8_t[fine_buffer.RequiredBufferSizeBytes()]);
+ for (int i = 0; i < kNumberOfFrames; ++i) {
+ fine_buffer.GetBufferData(out_buffer.get());
+ EXPECT_TRUE(VerifyBuffer(out_buffer.get(), i, kFrameSizeBytes));
+ }
+}
+
+TEST(FineBufferTest, BufferLessThan10ms) {
+ const int kSampleRate = 44100;
+ const int kSamplesPer10Ms = kSampleRate * 10 / 1000;
+ const int kFrameSizeSamples = kSamplesPer10Ms - 50;
+ RunFineBufferTest(kSampleRate, kFrameSizeSamples);
+}
+
+TEST(FineBufferTest, GreaterThan10ms) {
+ const int kSampleRate = 44100;
+ const int kSamplesPer10Ms = kSampleRate * 10 / 1000;
+ const int kFrameSizeSamples = kSamplesPer10Ms + 50;
+ RunFineBufferTest(kSampleRate, kFrameSizeSamples);
+}
+
+} // namespace webrtc
diff --git a/modules/audio_device/android/java/src/org/webrtc/voiceengine/AudioManagerAndroid.java b/modules/audio_device/android/java/src/org/webrtc/voiceengine/AudioManagerAndroid.java
new file mode 100644
index 00000000..040acfbb
--- /dev/null
+++ b/modules/audio_device/android/java/src/org/webrtc/voiceengine/AudioManagerAndroid.java
@@ -0,0 +1,72 @@
+/*
+ * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+// The functions in this file are called from native code. They can still be
+// accessed even though they are declared private.
+
+package org.webrtc.voiceengine;
+
+import android.content.Context;
+import android.content.pm.PackageManager;
+import android.media.AudioManager;
+
+class AudioManagerAndroid {
+ // Most of Google lead devices use 44.1K as the default sampling rate, 44.1K
+ // is also widely used on other android devices.
+ private static final int DEFAULT_SAMPLING_RATE = 44100;
+ // Randomly picked up frame size which is close to return value on N4.
+ // Return this default value when
+ // getProperty(PROPERTY_OUTPUT_FRAMES_PER_BUFFER) fails.
+ private static final int DEFAULT_FRAMES_PER_BUFFER = 256;
+
+ private int mNativeOutputSampleRate;
+ private boolean mAudioLowLatencySupported;
+ private int mAudioLowLatencyOutputFrameSize;
+
+
+ @SuppressWarnings("unused")
+ private AudioManagerAndroid(Context context) {
+ AudioManager audioManager = (AudioManager)
+ context.getSystemService(Context.AUDIO_SERVICE);
+
+ mNativeOutputSampleRate = DEFAULT_SAMPLING_RATE;
+ if (android.os.Build.VERSION.SDK_INT >=
+ android.os.Build.VERSION_CODES.JELLY_BEAN_MR1) {
+ String sampleRateString = audioManager.getProperty(
+ AudioManager.PROPERTY_OUTPUT_SAMPLE_RATE);
+ if (sampleRateString != null) {
+ mNativeOutputSampleRate = Integer.parseInt(sampleRateString);
+ }
+ }
+ mAudioLowLatencySupported = context.getPackageManager().hasSystemFeature(
+ PackageManager.FEATURE_AUDIO_LOW_LATENCY);
+ mAudioLowLatencyOutputFrameSize = DEFAULT_FRAMES_PER_BUFFER;
+ String framesPerBuffer = audioManager.getProperty(
+ AudioManager.PROPERTY_OUTPUT_FRAMES_PER_BUFFER);
+ if (framesPerBuffer != null) {
+ mAudioLowLatencyOutputFrameSize = Integer.parseInt(framesPerBuffer);
+ }
+ }
+
+ @SuppressWarnings("unused")
+ private int getNativeOutputSampleRate() {
+ return mNativeOutputSampleRate;
+ }
+
+ @SuppressWarnings("unused")
+ private boolean isAudioLowLatencySupported() {
+ return mAudioLowLatencySupported;
+ }
+
+ @SuppressWarnings("unused")
+ private int getAudioLowLatencyOutputFrameSize() {
+ return mAudioLowLatencyOutputFrameSize;
+ }
+} \ No newline at end of file
diff --git a/modules/audio_device/android/low_latency_event.h b/modules/audio_device/android/low_latency_event.h
new file mode 100644
index 00000000..a19483d5
--- /dev/null
+++ b/modules/audio_device/android/low_latency_event.h
@@ -0,0 +1,65 @@
+/*
+ * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef WEBRTC_MODULES_AUDIO_DEVICE_ANDROID_LOW_LATENCY_EVENT_H_
+#define WEBRTC_MODULES_AUDIO_DEVICE_ANDROID_LOW_LATENCY_EVENT_H_
+
+#include <errno.h>
+#include <limits.h>
+#include <fcntl.h>
+#include <stdio.h>
+#include <sys/ioctl.h>
+#include <sys/socket.h>
+#include <sys/types.h>
+
+namespace webrtc {
+
+// Implementation of event for single waiter, single signal threads. Event
+// is sticky.
+class LowLatencyEvent {
+ public:
+ LowLatencyEvent();
+ ~LowLatencyEvent();
+
+ // Readies the event. Must be called before signaling or waiting for event.
+ // Returns true on success.
+ bool Start();
+ // Shuts down the event and releases threads calling WaitOnEvent. Once
+ // stopped SignalEvent and WaitOnEvent will have no effect. Start can be
+ // called to re-enable the event.
+ // Returns true on success.
+ bool Stop();
+
+ // Releases thread calling WaitOnEvent in a sticky fashion.
+ void SignalEvent(int event_id, int event_msg);
+ // Waits until SignalEvent or Stop is called.
+ void WaitOnEvent(int* event_id, int* event_msg);
+
+ private:
+ typedef int Handle;
+ static const Handle kInvalidHandle;
+ static const int kReadHandle;
+ static const int kWriteHandle;
+
+ // Closes the handle. Returns true on success.
+ static bool Close(Handle* handle);
+
+ // SignalEvent and WaitOnEvent are actually read/write to file descriptors.
+ // Write is signal.
+ void WriteFd(int message_id, int message);
+ // Read is wait.
+ void ReadFd(int* message_id, int* message);
+
+ Handle handles_[2];
+};
+
+} // namespace webrtc
+
+#endif // WEBRTC_MODULES_AUDIO_DEVICE_ANDROID_LOW_LATENCY_EVENT_H_
diff --git a/modules/audio_device/android/low_latency_event_posix.cc b/modules/audio_device/android/low_latency_event_posix.cc
new file mode 100644
index 00000000..4e0c88a4
--- /dev/null
+++ b/modules/audio_device/android/low_latency_event_posix.cc
@@ -0,0 +1,97 @@
+/*
+ * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "webrtc/modules/audio_device/android/low_latency_event.h"
+
+#include <assert.h>
+
+#define HANDLE_EINTR(x) ({ \
+ typeof(x) eintr_wrapper_result; \
+ do { \
+ eintr_wrapper_result = (x); \
+ } while (eintr_wrapper_result == -1 && errno == EINTR); \
+ eintr_wrapper_result; \
+ })
+
+namespace webrtc {
+
+const LowLatencyEvent::Handle LowLatencyEvent::kInvalidHandle = -1;
+const int LowLatencyEvent::kReadHandle = 0;
+const int LowLatencyEvent::kWriteHandle = 1;
+
+LowLatencyEvent::LowLatencyEvent() {
+ handles_[kReadHandle] = kInvalidHandle;
+ handles_[kWriteHandle] = kInvalidHandle;
+}
+
+LowLatencyEvent::~LowLatencyEvent() {
+ Stop();
+}
+
+bool LowLatencyEvent::Start() {
+ assert(handles_[kReadHandle] == kInvalidHandle);
+ assert(handles_[kWriteHandle] == kInvalidHandle);
+
+ return socketpair(AF_UNIX, SOCK_STREAM, 0, handles_) == 0;
+}
+
+bool LowLatencyEvent::Stop() {
+ bool ret = Close(&handles_[kReadHandle]) && Close(&handles_[kWriteHandle]);
+ handles_[kReadHandle] = kInvalidHandle;
+ handles_[kWriteHandle] = kInvalidHandle;
+ return ret;
+}
+
+void LowLatencyEvent::SignalEvent(int event_id, int event_msg) {
+ WriteFd(event_id, event_msg);
+}
+
+void LowLatencyEvent::WaitOnEvent(int* event_id, int* event_msg) {
+ ReadFd(event_id, event_msg);
+}
+
+bool LowLatencyEvent::Close(Handle* handle) {
+ if (*handle == kInvalidHandle) {
+ return false;
+ }
+ int retval = HANDLE_EINTR(close(*handle));
+ *handle = kInvalidHandle;
+ return retval == 0;
+}
+
+void LowLatencyEvent::WriteFd(int message_id, int message) {
+ char buffer[sizeof(message_id) + sizeof(message)];
+ size_t bytes = sizeof(buffer);
+ memcpy(buffer, &message_id, sizeof(message_id));
+ memcpy(&buffer[sizeof(message_id)], &message, sizeof(message));
+ ssize_t bytes_written = HANDLE_EINTR(write(handles_[kWriteHandle], buffer,
+ bytes));
+ if (bytes_written != static_cast<ssize_t>(bytes)) {
+ assert(false);
+ }
+}
+
+void LowLatencyEvent::ReadFd(int* message_id, int* message) {
+ char buffer[sizeof(message_id) + sizeof(message)];
+ size_t bytes = sizeof(buffer);
+ ssize_t bytes_read = HANDLE_EINTR(read(handles_[kReadHandle], buffer, bytes));
+ if (bytes_read == 0) {
+ *message_id = 0;
+ *message = 0;
+ return;
+ } else if (bytes_read == static_cast<ssize_t>(bytes)) {
+ memcpy(message_id, buffer, sizeof(*message_id));
+ memcpy(message, &buffer[sizeof(*message_id)], sizeof(*message));
+ } else {
+ assert(false);
+ }
+}
+
+} // namespace webrtc
diff --git a/modules/audio_device/android/low_latency_event_unittest.cc b/modules/audio_device/android/low_latency_event_unittest.cc
new file mode 100644
index 00000000..07269598
--- /dev/null
+++ b/modules/audio_device/android/low_latency_event_unittest.cc
@@ -0,0 +1,96 @@
+/*
+ * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "webrtc/modules/audio_device/android/low_latency_event.h"
+
+#include "testing/gtest/include/gtest/gtest.h"
+#include "webrtc/system_wrappers/interface/scoped_ptr.h"
+#include "webrtc/system_wrappers/interface/sleep.h"
+#include "webrtc/system_wrappers/interface/thread_wrapper.h"
+
+namespace webrtc {
+
+static const int kEventMsg = 1;
+
+class LowLatencyEventTest : public testing::Test {
+ public:
+ LowLatencyEventTest()
+ : process_thread_(ThreadWrapper::CreateThread(CbThread,
+ this,
+ kRealtimePriority,
+ "test_thread")),
+ terminated_(false),
+ iteration_count_(0),
+ allowed_iterations_(0) {
+ EXPECT_TRUE(event_.Start());
+ Start();
+ }
+ ~LowLatencyEventTest() {
+ EXPECT_GE(allowed_iterations_, 1);
+ EXPECT_GE(iteration_count_, 1);
+ Stop();
+ }
+
+ void AllowOneIteration() {
+ ++allowed_iterations_;
+ event_.SignalEvent(allowed_iterations_, kEventMsg);
+ }
+
+ private:
+ void Start() {
+ unsigned int thread_id = 0;
+ EXPECT_TRUE(process_thread_->Start(thread_id));
+ }
+ void Stop() {
+ terminated_ = true;
+ event_.Stop();
+ process_thread_->Stop();
+ }
+
+ static bool CbThread(void* context) {
+ return reinterpret_cast<LowLatencyEventTest*>(context)->CbThreadImpl();
+ }
+ bool CbThreadImpl() {
+ int allowed_iterations;
+ int message;
+ ++iteration_count_;
+ event_.WaitOnEvent(&allowed_iterations, &message);
+ EXPECT_EQ(iteration_count_, allowed_iterations);
+ EXPECT_EQ(message, kEventMsg);
+ return !terminated_;
+ }
+
+ LowLatencyEvent event_;
+
+ scoped_ptr<ThreadWrapper> process_thread_;
+ bool terminated_;
+ int iteration_count_;
+ int allowed_iterations_;
+};
+
+
+TEST_F(LowLatencyEventTest, TriggerEvent) {
+ for (int i = 0; i < 3; ++i) {
+ AllowOneIteration();
+ }
+}
+
+// Events trigger in less than 3ms. Wait for 3 ms to ensure there are no
+// spurious wakeups.
+TEST_F(LowLatencyEventTest, NoTriggerEvent) {
+ SleepMs(3);
+ // If there were spurious wakeups either the wakeups would have triggered a
+ // failure as we haven't allowed an iteration yet. Or the wakeup happened
+ // to signal 0, 0 in which case the mismatch will be discovered when allowing
+ // an iteration to happen.
+ AllowOneIteration();
+}
+
+} // namespace webrtc
diff --git a/modules/audio_device/android/opensles_common.cc b/modules/audio_device/android/opensles_common.cc
new file mode 100644
index 00000000..be70e44d
--- /dev/null
+++ b/modules/audio_device/android/opensles_common.cc
@@ -0,0 +1,37 @@
+/*
+ * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "webrtc/modules/audio_device/android/opensles_common.h"
+
+#include <assert.h>
+
+namespace webrtc_opensl {
+
+SLDataFormat_PCM CreatePcmConfiguration(int sample_rate) {
+ SLDataFormat_PCM configuration;
+ configuration.formatType = SL_DATAFORMAT_PCM;
+ configuration.numChannels = kNumChannels;
+ // According to the opensles documentation in the ndk:
+ // samplesPerSec is actually in units of milliHz, despite the misleading name.
+ // It further recommends using constants. However, this would lead to a lot
+ // of boilerplate code so it is not done here.
+ configuration.samplesPerSec = sample_rate * 1000;
+ configuration.bitsPerSample = SL_PCMSAMPLEFORMAT_FIXED_16;
+ configuration.containerSize = SL_PCMSAMPLEFORMAT_FIXED_16;
+ configuration.channelMask = SL_SPEAKER_FRONT_CENTER;
+ if (2 == configuration.numChannels) {
+ configuration.channelMask =
+ SL_SPEAKER_FRONT_LEFT | SL_SPEAKER_FRONT_RIGHT;
+ }
+ configuration.endianness = SL_BYTEORDER_LITTLEENDIAN;
+ return configuration;
+}
+
+} // namespace webrtc_opensl
diff --git a/modules/audio_device/android/opensles_common.h b/modules/audio_device/android/opensles_common.h
new file mode 100644
index 00000000..e1521718
--- /dev/null
+++ b/modules/audio_device/android/opensles_common.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef WEBRTC_MODULES_AUDIO_DEVICE_ANDROID_OPENSLES_COMMON_H_
+#define WEBRTC_MODULES_AUDIO_DEVICE_ANDROID_OPENSLES_COMMON_H_
+
+#include <SLES/OpenSLES.h>
+
+namespace webrtc_opensl {
+
+enum {
+ kDefaultSampleRate = 44100,
+ kNumChannels = 1
+};
+
+
+class PlayoutDelayProvider {
+ public:
+ virtual int PlayoutDelayMs() = 0;
+
+ protected:
+ PlayoutDelayProvider() {}
+ virtual ~PlayoutDelayProvider() {}
+};
+
+SLDataFormat_PCM CreatePcmConfiguration(int sample_rate);
+
+} // namespace webrtc_opensl
+
+#endif // WEBRTC_MODULES_AUDIO_DEVICE_ANDROID_OPENSLES_COMMON_H_
diff --git a/modules/audio_device/android/opensles_input.cc b/modules/audio_device/android/opensles_input.cc
new file mode 100644
index 00000000..e8466854
--- /dev/null
+++ b/modules/audio_device/android/opensles_input.cc
@@ -0,0 +1,504 @@
+/*
+ * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "webrtc/modules/audio_device/android/opensles_input.h"
+
+#include <assert.h>
+
+#include "webrtc/modules/audio_device/android/single_rw_fifo.h"
+#include "webrtc/modules/audio_device/audio_device_buffer.h"
+#include "webrtc/system_wrappers/interface/critical_section_wrapper.h"
+#include "webrtc/system_wrappers/interface/thread_wrapper.h"
+#include "webrtc/system_wrappers/interface/trace.h"
+
+using webrtc_opensl::kDefaultSampleRate;
+using webrtc_opensl::kNumChannels;
+
+#define VOID_RETURN
+#define OPENSL_RETURN_ON_FAILURE(op, ret_val) \
+ do { \
+ SLresult err = (op); \
+ if (err != SL_RESULT_SUCCESS) { \
+ WEBRTC_TRACE(kTraceError, kTraceAudioDevice, id_, \
+ "OpenSL error: %d", err); \
+ assert(false); \
+ return ret_val; \
+ } \
+ } while (0)
+
+static const SLEngineOption kOption[] = {
+ { SL_ENGINEOPTION_THREADSAFE, static_cast<SLuint32>(SL_BOOLEAN_TRUE) },
+};
+
+enum {
+ kNoOverrun,
+ kOverrun,
+};
+
+namespace webrtc {
+
+OpenSlesInput::OpenSlesInput(
+ const int32_t id,
+ webrtc_opensl::PlayoutDelayProvider* delay_provider)
+ : id_(id),
+ delay_provider_(delay_provider),
+ initialized_(false),
+ mic_initialized_(false),
+ rec_initialized_(false),
+ crit_sect_(CriticalSectionWrapper::CreateCriticalSection()),
+ recording_(false),
+ num_fifo_buffers_needed_(0),
+ number_overruns_(0),
+ sles_engine_(NULL),
+ sles_engine_itf_(NULL),
+ sles_recorder_(NULL),
+ sles_recorder_itf_(NULL),
+ sles_recorder_sbq_itf_(NULL),
+ audio_buffer_(NULL),
+ active_queue_(0),
+ agc_enabled_(false),
+ recording_delay_(0) {
+}
+
+OpenSlesInput::~OpenSlesInput() {
+}
+
+int32_t OpenSlesInput::Init() {
+ assert(!initialized_);
+
+ // Set up OpenSL engine.
+ OPENSL_RETURN_ON_FAILURE(slCreateEngine(&sles_engine_, 1, kOption, 0,
+ NULL, NULL),
+ -1);
+ OPENSL_RETURN_ON_FAILURE((*sles_engine_)->Realize(sles_engine_,
+ SL_BOOLEAN_FALSE),
+ -1);
+ OPENSL_RETURN_ON_FAILURE((*sles_engine_)->GetInterface(sles_engine_,
+ SL_IID_ENGINE,
+ &sles_engine_itf_),
+ -1);
+
+ if (InitSampleRate() != 0) {
+ return -1;
+ }
+ AllocateBuffers();
+ initialized_ = true;
+ return 0;
+}
+
+int32_t OpenSlesInput::Terminate() {
+ // It is assumed that the caller has stopped recording before terminating.
+ assert(!recording_);
+ (*sles_engine_)->Destroy(sles_engine_);
+ initialized_ = false;
+ mic_initialized_ = false;
+ rec_initialized_ = false;
+ return 0;
+}
+
+int32_t OpenSlesInput::RecordingDeviceName(uint16_t index,
+ char name[kAdmMaxDeviceNameSize],
+ char guid[kAdmMaxGuidSize]) {
+ assert(index == 0);
+ // Empty strings.
+ name[0] = '\0';
+ guid[0] = '\0';
+ return 0;
+}
+
+int32_t OpenSlesInput::SetRecordingDevice(uint16_t index) {
+ assert(index == 0);
+ return 0;
+}
+
+int32_t OpenSlesInput::RecordingIsAvailable(bool& available) { // NOLINT
+ available = true;
+ return 0;
+}
+
+int32_t OpenSlesInput::InitRecording() {
+ assert(initialized_);
+ assert(!rec_initialized_);
+ rec_initialized_ = true;
+ return 0;
+}
+
+int32_t OpenSlesInput::StartRecording() {
+ assert(rec_initialized_);
+ assert(!recording_);
+ if (!CreateAudioRecorder()) {
+ return -1;
+ }
+ // Setup to receive buffer queue event callbacks.
+ OPENSL_RETURN_ON_FAILURE(
+ (*sles_recorder_sbq_itf_)->RegisterCallback(
+ sles_recorder_sbq_itf_,
+ RecorderSimpleBufferQueueCallback,
+ this),
+ -1);
+
+ if (!EnqueueAllBuffers()) {
+ return -1;
+ }
+
+ {
+ // To prevent the compiler from e.g. optimizing the code to
+ // recording_ = StartCbThreads() which wouldn't have been thread safe.
+ CriticalSectionScoped lock(crit_sect_.get());
+ recording_ = true;
+ }
+ if (!StartCbThreads()) {
+ recording_ = false;
+ return -1;
+ }
+ return 0;
+}
+
+int32_t OpenSlesInput::StopRecording() {
+ StopCbThreads();
+ DestroyAudioRecorder();
+ return 0;
+}
+
+int32_t OpenSlesInput::SetAGC(bool enable) {
+ agc_enabled_ = enable;
+ return 0;
+}
+
+int32_t OpenSlesInput::MicrophoneIsAvailable(bool& available) { // NOLINT
+ available = true;
+ return 0;
+}
+
+int32_t OpenSlesInput::InitMicrophone() {
+ assert(initialized_);
+ assert(!recording_);
+ mic_initialized_ = true;
+ return 0;
+}
+
+int32_t OpenSlesInput::MicrophoneVolumeIsAvailable(bool& available) { // NOLINT
+ available = false;
+ return 0;
+}
+
+int32_t OpenSlesInput::MinMicrophoneVolume(
+ uint32_t& minVolume) const { // NOLINT
+ minVolume = 0;
+ return 0;
+}
+
+int32_t OpenSlesInput::MicrophoneVolumeStepSize(
+ uint16_t& stepSize) const {
+ stepSize = 1;
+ return 0;
+}
+
+int32_t OpenSlesInput::MicrophoneMuteIsAvailable(bool& available) { // NOLINT
+ available = false; // Mic mute not supported on Android
+ return 0;
+}
+
+int32_t OpenSlesInput::MicrophoneBoostIsAvailable(bool& available) { // NOLINT
+ available = false; // Mic boost not supported on Android.
+ return 0;
+}
+
+int32_t OpenSlesInput::SetMicrophoneBoost(bool enable) {
+ assert(false);
+ return -1; // Not supported
+}
+
+int32_t OpenSlesInput::MicrophoneBoost(bool& enabled) const { // NOLINT
+ assert(false);
+ return -1; // Not supported
+}
+
+int32_t OpenSlesInput::StereoRecordingIsAvailable(bool& available) { // NOLINT
+ available = false; // Stereo recording not supported on Android.
+ return 0;
+}
+
+int32_t OpenSlesInput::StereoRecording(bool& enabled) const { // NOLINT
+ enabled = false;
+ return 0;
+}
+
+int32_t OpenSlesInput::RecordingDelay(uint16_t& delayMS) const { // NOLINT
+ delayMS = recording_delay_;
+ return 0;
+}
+
+void OpenSlesInput::AttachAudioBuffer(AudioDeviceBuffer* audioBuffer) {
+ audio_buffer_ = audioBuffer;
+}
+
+int32_t OpenSlesInput::InitSampleRate() {
+ audio_buffer_->SetRecordingSampleRate(kDefaultSampleRate);
+ audio_buffer_->SetRecordingChannels(kNumChannels);
+ UpdateRecordingDelay();
+ return 0;
+}
+
+void OpenSlesInput::UpdateRecordingDelay() {
+ // TODO(hellner): Add accurate delay estimate.
+ // On average half the current buffer will have been filled with audio.
+ int outstanding_samples =
+ (TotalBuffersUsed() - 0.5) * kDefaultBufSizeInSamples;
+ recording_delay_ = outstanding_samples / (kDefaultSampleRate / 1000);
+}
+
+void OpenSlesInput::CalculateNumFifoBuffersNeeded() {
+ // Buffer size is 10ms of data.
+ num_fifo_buffers_needed_ = kNum10MsToBuffer;
+}
+
+void OpenSlesInput::AllocateBuffers() {
+ // Allocate FIFO to handle passing buffers between processing and OpenSL
+ // threads.
+ CalculateNumFifoBuffersNeeded();
+ assert(num_fifo_buffers_needed_ > 0);
+ fifo_.reset(new SingleRwFifo(num_fifo_buffers_needed_));
+
+ // Allocate the memory area to be used.
+ rec_buf_.reset(new scoped_array<int8_t>[TotalBuffersUsed()]);
+ for (int i = 0; i < TotalBuffersUsed(); ++i) {
+ rec_buf_[i].reset(new int8_t[kDefaultBufSizeInBytes]);
+ }
+}
+
+int OpenSlesInput::TotalBuffersUsed() const {
+ return num_fifo_buffers_needed_ + kNumOpenSlBuffers;
+}
+
+bool OpenSlesInput::EnqueueAllBuffers() {
+ active_queue_ = 0;
+ number_overruns_ = 0;
+ for (int i = 0; i < kNumOpenSlBuffers; ++i) {
+ memset(rec_buf_[i].get(), 0, kDefaultBufSizeInBytes);
+ OPENSL_RETURN_ON_FAILURE(
+ (*sles_recorder_sbq_itf_)->Enqueue(
+ sles_recorder_sbq_itf_,
+ reinterpret_cast<void*>(rec_buf_[i].get()),
+ kDefaultBufSizeInBytes),
+ false);
+ }
+ // In case of underrun the fifo will be at capacity. In case of first enqueue
+ // no audio can have been returned yet meaning fifo must be empty. Any other
+ // values are unexpected.
+ assert(fifo_->size() == fifo_->capacity() ||
+ fifo_->size() == 0);
+ // OpenSL recording has been stopped. I.e. only this thread is touching
+ // |fifo_|.
+ while (fifo_->size() != 0) {
+ // Clear the fifo.
+ fifo_->Pop();
+ }
+ return true;
+}
+
+bool OpenSlesInput::CreateAudioRecorder() {
+ if (!event_.Start()) {
+ assert(false);
+ return false;
+ }
+ SLDataLocator_IODevice micLocator = {
+ SL_DATALOCATOR_IODEVICE, SL_IODEVICE_AUDIOINPUT,
+ SL_DEFAULTDEVICEID_AUDIOINPUT, NULL };
+ SLDataSource audio_source = { &micLocator, NULL };
+
+ SLDataLocator_AndroidSimpleBufferQueue simple_buf_queue = {
+ SL_DATALOCATOR_ANDROIDSIMPLEBUFFERQUEUE,
+ static_cast<SLuint32>(TotalBuffersUsed())
+ };
+ SLDataFormat_PCM configuration =
+ webrtc_opensl::CreatePcmConfiguration(kDefaultSampleRate);
+ SLDataSink audio_sink = { &simple_buf_queue, &configuration };
+
+ // Interfaces for recording android audio data and Android are needed.
+ // Note the interfaces still need to be initialized. This only tells OpenSl
+ // that the interfaces will be needed at some point.
+ const SLInterfaceID id[kNumInterfaces] = {
+ SL_IID_ANDROIDSIMPLEBUFFERQUEUE, SL_IID_ANDROIDCONFIGURATION };
+ const SLboolean req[kNumInterfaces] = {
+ SL_BOOLEAN_TRUE, SL_BOOLEAN_TRUE };
+ OPENSL_RETURN_ON_FAILURE(
+ (*sles_engine_itf_)->CreateAudioRecorder(sles_engine_itf_,
+ &sles_recorder_,
+ &audio_source,
+ &audio_sink,
+ kNumInterfaces,
+ id,
+ req),
+ false);
+
+ // Realize the recorder in synchronous mode.
+ OPENSL_RETURN_ON_FAILURE((*sles_recorder_)->Realize(sles_recorder_,
+ SL_BOOLEAN_FALSE),
+ false);
+ OPENSL_RETURN_ON_FAILURE(
+ (*sles_recorder_)->GetInterface(sles_recorder_, SL_IID_RECORD,
+ static_cast<void*>(&sles_recorder_itf_)),
+ false);
+ OPENSL_RETURN_ON_FAILURE(
+ (*sles_recorder_)->GetInterface(
+ sles_recorder_,
+ SL_IID_ANDROIDSIMPLEBUFFERQUEUE,
+ static_cast<void*>(&sles_recorder_sbq_itf_)),
+ false);
+ return true;
+}
+
+void OpenSlesInput::DestroyAudioRecorder() {
+ event_.Stop();
+ if (sles_recorder_sbq_itf_) {
+ // Release all buffers currently queued up.
+ OPENSL_RETURN_ON_FAILURE(
+ (*sles_recorder_sbq_itf_)->Clear(sles_recorder_sbq_itf_),
+ VOID_RETURN);
+ sles_recorder_sbq_itf_ = NULL;
+ }
+ sles_recorder_itf_ = NULL;
+
+ if (!sles_recorder_) {
+ (*sles_recorder_)->Destroy(sles_recorder_);
+ sles_recorder_ = NULL;
+ }
+}
+
+bool OpenSlesInput::HandleOverrun(int event_id, int event_msg) {
+ if (!recording_) {
+ return false;
+ }
+ if (event_id == kNoOverrun) {
+ return false;
+ }
+ WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, id_, "Audio overrun");
+ assert(event_id == kOverrun);
+ assert(event_msg > 0);
+ // Wait for all enqueued buffers be flushed.
+ if (event_msg != kNumOpenSlBuffers) {
+ return true;
+ }
+ // All buffers passed to OpenSL have been flushed. Restart the audio from
+ // scratch.
+ // No need to check sles_recorder_itf_ as recording_ would be false before it
+ // is set to NULL.
+ OPENSL_RETURN_ON_FAILURE(
+ (*sles_recorder_itf_)->SetRecordState(sles_recorder_itf_,
+ SL_RECORDSTATE_STOPPED),
+ true);
+ EnqueueAllBuffers();
+ OPENSL_RETURN_ON_FAILURE(
+ (*sles_recorder_itf_)->SetRecordState(sles_recorder_itf_,
+ SL_RECORDSTATE_RECORDING),
+ true);
+ return true;
+}
+
+void OpenSlesInput::RecorderSimpleBufferQueueCallback(
+ SLAndroidSimpleBufferQueueItf queue_itf,
+ void* context) {
+ OpenSlesInput* audio_device = reinterpret_cast<OpenSlesInput*>(context);
+ audio_device->RecorderSimpleBufferQueueCallbackHandler(queue_itf);
+}
+
+void OpenSlesInput::RecorderSimpleBufferQueueCallbackHandler(
+ SLAndroidSimpleBufferQueueItf queue_itf) {
+ if (fifo_->size() >= fifo_->capacity() || number_overruns_ > 0) {
+ ++number_overruns_;
+ event_.SignalEvent(kOverrun, number_overruns_);
+ return;
+ }
+ int8_t* audio = rec_buf_[active_queue_].get();
+ // There is at least one spot available in the fifo.
+ fifo_->Push(audio);
+ active_queue_ = (active_queue_ + 1) % TotalBuffersUsed();
+ event_.SignalEvent(kNoOverrun, 0);
+ // active_queue_ is indexing the next buffer to record to. Since the current
+ // buffer has been recorded it means that the buffer index
+ // kNumOpenSlBuffers - 1 past |active_queue_| contains the next free buffer.
+ // Since |fifo_| wasn't at capacity, at least one buffer is free to be used.
+ int next_free_buffer =
+ (active_queue_ + kNumOpenSlBuffers - 1) % TotalBuffersUsed();
+ OPENSL_RETURN_ON_FAILURE(
+ (*sles_recorder_sbq_itf_)->Enqueue(
+ sles_recorder_sbq_itf_,
+ reinterpret_cast<void*>(rec_buf_[next_free_buffer].get()),
+ kDefaultBufSizeInBytes),
+ VOID_RETURN);
+}
+
+bool OpenSlesInput::StartCbThreads() {
+ rec_thread_.reset(ThreadWrapper::CreateThread(CbThread,
+ this,
+ kRealtimePriority,
+ "opensl_rec_thread"));
+ assert(rec_thread_.get());
+ unsigned int thread_id = 0;
+ if (!rec_thread_->Start(thread_id)) {
+ assert(false);
+ return false;
+ }
+ OPENSL_RETURN_ON_FAILURE(
+ (*sles_recorder_itf_)->SetRecordState(sles_recorder_itf_,
+ SL_RECORDSTATE_RECORDING),
+ false);
+ return true;
+}
+
+void OpenSlesInput::StopCbThreads() {
+ {
+ CriticalSectionScoped lock(crit_sect_.get());
+ recording_ = false;
+ }
+ if (sles_recorder_itf_) {
+ OPENSL_RETURN_ON_FAILURE(
+ (*sles_recorder_itf_)->SetRecordState(sles_recorder_itf_,
+ SL_RECORDSTATE_STOPPED),
+ VOID_RETURN);
+ }
+ if (rec_thread_.get() == NULL) {
+ return;
+ }
+ event_.Stop();
+ if (rec_thread_->Stop()) {
+ rec_thread_.reset();
+ } else {
+ assert(false);
+ }
+}
+
+bool OpenSlesInput::CbThread(void* context) {
+ return reinterpret_cast<OpenSlesInput*>(context)->CbThreadImpl();
+}
+
+bool OpenSlesInput::CbThreadImpl() {
+ int event_id;
+ int event_msg;
+ // event_ must not be waited on while a lock has been taken.
+ event_.WaitOnEvent(&event_id, &event_msg);
+
+ CriticalSectionScoped lock(crit_sect_.get());
+ if (HandleOverrun(event_id, event_msg)) {
+ return recording_;
+ }
+ // If the fifo_ has audio data process it.
+ while (fifo_->size() > 0 && recording_) {
+ int8_t* audio = fifo_->Pop();
+ audio_buffer_->SetRecordedBuffer(audio, kDefaultBufSizeInSamples);
+ audio_buffer_->SetVQEData(delay_provider_->PlayoutDelayMs(),
+ recording_delay_, 0);
+ audio_buffer_->DeliverRecordedData();
+ }
+ return recording_;
+}
+
+} // namespace webrtc
diff --git a/modules/audio_device/android/opensles_input.h b/modules/audio_device/android/opensles_input.h
new file mode 100644
index 00000000..d9d41eec
--- /dev/null
+++ b/modules/audio_device/android/opensles_input.h
@@ -0,0 +1,214 @@
+/*
+ * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef WEBRTC_MODULES_AUDIO_DEVICE_ANDROID_OPENSLES_INPUT_H_
+#define WEBRTC_MODULES_AUDIO_DEVICE_ANDROID_OPENSLES_INPUT_H_
+
+#include <SLES/OpenSLES.h>
+#include <SLES/OpenSLES_Android.h>
+#include <SLES/OpenSLES_AndroidConfiguration.h>
+
+#include "webrtc/modules/audio_device/android/low_latency_event.h"
+#include "webrtc/modules/audio_device/android/opensles_common.h"
+#include "webrtc/modules/audio_device/include/audio_device.h"
+#include "webrtc/modules/audio_device/include/audio_device_defines.h"
+#include "webrtc/system_wrappers/interface/scoped_ptr.h"
+
+namespace webrtc {
+
+class AudioDeviceBuffer;
+class CriticalSectionWrapper;
+class PlayoutDelayProvider;
+class SingleRwFifo;
+class ThreadWrapper;
+
+// OpenSL implementation that facilitate capturing PCM data from an android
+// device's microphone.
+// This class is Thread-compatible. I.e. Given an instance of this class, calls
+// to non-const methods require exclusive access to the object.
+class OpenSlesInput {
+ public:
+ OpenSlesInput(const int32_t id,
+ webrtc_opensl::PlayoutDelayProvider* delay_provider);
+ ~OpenSlesInput();
+
+ // Main initializaton and termination
+ int32_t Init();
+ int32_t Terminate();
+ bool Initialized() const { return initialized_; }
+
+ // Device enumeration
+ int16_t RecordingDevices() { return 1; }
+ int32_t RecordingDeviceName(uint16_t index,
+ char name[kAdmMaxDeviceNameSize],
+ char guid[kAdmMaxGuidSize]);
+
+ // Device selection
+ int32_t SetRecordingDevice(uint16_t index);
+ int32_t SetRecordingDevice(
+ AudioDeviceModule::WindowsDeviceType device) { return -1; }
+
+ // Audio transport initialization
+ int32_t RecordingIsAvailable(bool& available); // NOLINT
+ int32_t InitRecording();
+ bool RecordingIsInitialized() const { return rec_initialized_; }
+
+ // Audio transport control
+ int32_t StartRecording();
+ int32_t StopRecording();
+ bool Recording() const { return recording_; }
+
+ // Microphone Automatic Gain Control (AGC)
+ int32_t SetAGC(bool enable);
+ bool AGC() const { return agc_enabled_; }
+
+ // Audio mixer initialization
+ int32_t MicrophoneIsAvailable(bool& available); // NOLINT
+ int32_t InitMicrophone();
+ bool MicrophoneIsInitialized() const { return mic_initialized_; }
+
+ // Microphone volume controls
+ int32_t MicrophoneVolumeIsAvailable(bool& available); // NOLINT
+ // TODO(leozwang): Add microphone volume control when OpenSL APIs
+ // are available.
+ int32_t SetMicrophoneVolume(uint32_t volume) { return 0; }
+ int32_t MicrophoneVolume(uint32_t& volume) const { return -1; } // NOLINT
+ int32_t MaxMicrophoneVolume(
+ uint32_t& maxVolume) const { return 0; } // NOLINT
+ int32_t MinMicrophoneVolume(uint32_t& minVolume) const; // NOLINT
+ int32_t MicrophoneVolumeStepSize(
+ uint16_t& stepSize) const; // NOLINT
+
+ // Microphone mute control
+ int32_t MicrophoneMuteIsAvailable(bool& available); // NOLINT
+ int32_t SetMicrophoneMute(bool enable) { return -1; }
+ int32_t MicrophoneMute(bool& enabled) const { return -1; } // NOLINT
+
+ // Microphone boost control
+ int32_t MicrophoneBoostIsAvailable(bool& available); // NOLINT
+ int32_t SetMicrophoneBoost(bool enable);
+ int32_t MicrophoneBoost(bool& enabled) const; // NOLINT
+
+ // Stereo support
+ int32_t StereoRecordingIsAvailable(bool& available); // NOLINT
+ int32_t SetStereoRecording(bool enable) { return -1; }
+ int32_t StereoRecording(bool& enabled) const; // NOLINT
+
+ // Delay information and control
+ int32_t RecordingDelay(uint16_t& delayMS) const; // NOLINT
+
+ bool RecordingWarning() const { return false; }
+ bool RecordingError() const { return false; }
+ void ClearRecordingWarning() {}
+ void ClearRecordingError() {}
+
+ // Attach audio buffer
+ void AttachAudioBuffer(AudioDeviceBuffer* audioBuffer);
+
+ private:
+ enum {
+ kNumInterfaces = 2,
+ kDefaultBufSizeInSamples = webrtc_opensl::kDefaultSampleRate * 10 / 1000,
+ kDefaultBufSizeInBytes =
+ webrtc_opensl::kNumChannels * kDefaultBufSizeInSamples * sizeof(int16_t),
+ // Keep as few OpenSL buffers as possible to avoid wasting memory. 2 is
+ // minimum for playout. Keep 2 for recording as well.
+ kNumOpenSlBuffers = 2,
+ kNum10MsToBuffer = 3,
+ };
+
+ int32_t InitSampleRate();
+ void UpdateRecordingDelay();
+ void CalculateNumFifoBuffersNeeded();
+ void AllocateBuffers();
+ int TotalBuffersUsed() const;
+ bool EnqueueAllBuffers();
+ // This function also configures the audio recorder, e.g. sample rate to use
+ // etc, so it should be called when starting recording.
+ bool CreateAudioRecorder();
+ void DestroyAudioRecorder();
+
+ // When overrun happens there will be more frames received from OpenSL than
+ // the desired number of buffers. It is possible to expand the number of
+ // buffers as you go but that would greatly increase the complexity of this
+ // code. HandleOverrun gracefully handles the scenario by restarting playout,
+ // throwing away all pending audio data. This will sound like a click. This
+ // is also logged to identify these types of clicks.
+ // This function returns true if there has been overrun. Further processing
+ // of audio data should be avoided until this function returns false again.
+ // The function needs to be protected by |crit_sect_|.
+ bool HandleOverrun(int event_id, int event_msg);
+
+ static void RecorderSimpleBufferQueueCallback(
+ SLAndroidSimpleBufferQueueItf queueItf,
+ void* pContext);
+ // This function must not take any locks or do any heavy work. It is a
+ // requirement for the OpenSL implementation to work as intended. The reason
+ // for this is that taking locks exposes the OpenSL thread to the risk of
+ // priority inversion.
+ void RecorderSimpleBufferQueueCallbackHandler(
+ SLAndroidSimpleBufferQueueItf queueItf);
+
+ bool StartCbThreads();
+ void StopCbThreads();
+ static bool CbThread(void* context);
+ // This function must be protected against data race with threads calling this
+ // class' public functions. It is a requirement for this class to be
+ // Thread-compatible.
+ bool CbThreadImpl();
+
+ int id_;
+ webrtc_opensl::PlayoutDelayProvider* delay_provider_;
+ bool initialized_;
+ bool mic_initialized_;
+ bool rec_initialized_;
+
+ // Members that are read/write accessed concurrently by the process thread and
+ // threads calling public functions of this class.
+ scoped_ptr<ThreadWrapper> rec_thread_; // Processing thread
+ scoped_ptr<CriticalSectionWrapper> crit_sect_;
+ // This member controls the starting and stopping of recording audio to the
+ // the device.
+ bool recording_;
+
+ // Only one thread, T1, may push and only one thread, T2, may pull. T1 may or
+ // may not be the same thread as T2. T2 is the process thread and T1 is the
+ // OpenSL thread.
+ scoped_ptr<SingleRwFifo> fifo_;
+ int num_fifo_buffers_needed_;
+ LowLatencyEvent event_;
+ int number_overruns_;
+
+ // OpenSL handles
+ SLObjectItf sles_engine_;
+ SLEngineItf sles_engine_itf_;
+ SLObjectItf sles_recorder_;
+ SLRecordItf sles_recorder_itf_;
+ SLAndroidSimpleBufferQueueItf sles_recorder_sbq_itf_;
+
+ // Audio buffers
+ AudioDeviceBuffer* audio_buffer_;
+ // Holds all allocated memory such that it is deallocated properly.
+ scoped_array<scoped_array<int8_t> > rec_buf_;
+ // Index in |rec_buf_| pointing to the audio buffer that will be ready the
+ // next time RecorderSimpleBufferQueueCallbackHandler is invoked.
+ // Ready means buffer contains audio data from the device.
+ int active_queue_;
+
+ // Audio settings
+ bool agc_enabled_;
+
+ // Audio status
+ uint16_t recording_delay_;
+};
+
+} // namespace webrtc
+
+#endif // WEBRTC_MODULES_AUDIO_DEVICE_ANDROID_OPENSLES_INPUT_H_
diff --git a/modules/audio_device/android/opensles_output.cc b/modules/audio_device/android/opensles_output.cc
new file mode 100644
index 00000000..24504f8e
--- /dev/null
+++ b/modules/audio_device/android/opensles_output.cc
@@ -0,0 +1,565 @@
+/*
+ * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "webrtc/modules/audio_device/android/opensles_output.h"
+
+#include <assert.h>
+
+#include "webrtc/modules/audio_device/android/fine_audio_buffer.h"
+#include "webrtc/modules/audio_device/android/single_rw_fifo.h"
+#include "webrtc/modules/audio_device/audio_device_buffer.h"
+#include "webrtc/system_wrappers/interface/critical_section_wrapper.h"
+#include "webrtc/system_wrappers/interface/thread_wrapper.h"
+#include "webrtc/system_wrappers/interface/trace.h"
+
+using webrtc_opensl::kDefaultSampleRate;
+using webrtc_opensl::kNumChannels;
+
+#define VOID_RETURN
+#define OPENSL_RETURN_ON_FAILURE(op, ret_val) \
+ do { \
+ SLresult err = (op); \
+ if (err != SL_RESULT_SUCCESS) { \
+ WEBRTC_TRACE(kTraceError, kTraceAudioDevice, id_, \
+ "OpenSL error: %d", err); \
+ assert(false); \
+ return ret_val; \
+ } \
+ } while (0)
+
+static const SLEngineOption kOption[] = {
+ { SL_ENGINEOPTION_THREADSAFE, static_cast<SLuint32>(SL_BOOLEAN_TRUE) },
+};
+
+enum {
+ kNoUnderrun,
+ kUnderrun,
+};
+
+namespace webrtc {
+
+OpenSlesOutput::OpenSlesOutput(const int32_t id)
+ : id_(id),
+ initialized_(false),
+ speaker_initialized_(false),
+ play_initialized_(false),
+ crit_sect_(CriticalSectionWrapper::CreateCriticalSection()),
+ playing_(false),
+ num_fifo_buffers_needed_(0),
+ number_underruns_(0),
+ sles_engine_(NULL),
+ sles_engine_itf_(NULL),
+ sles_player_(NULL),
+ sles_player_itf_(NULL),
+ sles_player_sbq_itf_(NULL),
+ sles_output_mixer_(NULL),
+ audio_buffer_(NULL),
+ active_queue_(0),
+ speaker_sampling_rate_(kDefaultSampleRate),
+ buffer_size_samples_(0),
+ buffer_size_bytes_(0),
+ playout_delay_(0) {
+}
+
+OpenSlesOutput::~OpenSlesOutput() {
+}
+
+int32_t OpenSlesOutput::Init() {
+ assert(!initialized_);
+
+ // Set up OpenSl engine.
+ OPENSL_RETURN_ON_FAILURE(slCreateEngine(&sles_engine_, 1, kOption, 0,
+ NULL, NULL),
+ -1);
+ OPENSL_RETURN_ON_FAILURE((*sles_engine_)->Realize(sles_engine_,
+ SL_BOOLEAN_FALSE),
+ -1);
+ OPENSL_RETURN_ON_FAILURE((*sles_engine_)->GetInterface(sles_engine_,
+ SL_IID_ENGINE,
+ &sles_engine_itf_),
+ -1);
+ // Set up OpenSl output mix.
+ OPENSL_RETURN_ON_FAILURE(
+ (*sles_engine_itf_)->CreateOutputMix(sles_engine_itf_,
+ &sles_output_mixer_,
+ 0,
+ NULL,
+ NULL),
+ -1);
+ OPENSL_RETURN_ON_FAILURE(
+ (*sles_output_mixer_)->Realize(sles_output_mixer_,
+ SL_BOOLEAN_FALSE),
+ -1);
+
+ if (!InitSampleRate()) {
+ return -1;
+ }
+ AllocateBuffers();
+ initialized_ = true;
+ return 0;
+}
+
+int32_t OpenSlesOutput::Terminate() {
+ // It is assumed that the caller has stopped recording before terminating.
+ assert(!playing_);
+ (*sles_engine_)->Destroy(sles_engine_);
+ initialized_ = false;
+ speaker_initialized_ = false;
+ play_initialized_ = false;
+ return 0;
+}
+
+int32_t OpenSlesOutput::PlayoutDeviceName(uint16_t index,
+ char name[kAdmMaxDeviceNameSize],
+ char guid[kAdmMaxGuidSize]) {
+ assert(index == 0);
+ // Empty strings.
+ name[0] = '\0';
+ guid[0] = '\0';
+ return 0;
+}
+
+int32_t OpenSlesOutput::SetPlayoutDevice(uint16_t index) {
+ assert(index == 0);
+ return 0;
+}
+
+int32_t OpenSlesOutput::PlayoutIsAvailable(bool& available) { // NOLINT
+ available = true;
+ return 0;
+}
+
+int32_t OpenSlesOutput::InitPlayout() {
+ assert(initialized_);
+ assert(!play_initialized_);
+ play_initialized_ = true;
+ return 0;
+}
+
+int32_t OpenSlesOutput::StartPlayout() {
+ assert(play_initialized_);
+ assert(!playing_);
+ if (!CreateAudioPlayer()) {
+ return -1;
+ }
+
+ // Register callback to receive enqueued buffers.
+ OPENSL_RETURN_ON_FAILURE(
+ (*sles_player_sbq_itf_)->RegisterCallback(sles_player_sbq_itf_,
+ PlayerSimpleBufferQueueCallback,
+ this),
+ -1);
+ if (!EnqueueAllBuffers()) {
+ return -1;
+ }
+
+ {
+ // To prevent the compiler from e.g. optimizing the code to
+ // playing_ = StartCbThreads() which wouldn't have been thread safe.
+ CriticalSectionScoped lock(crit_sect_.get());
+ playing_ = true;
+ }
+ if (!StartCbThreads()) {
+ playing_ = false;
+ }
+ return 0;
+}
+
+int32_t OpenSlesOutput::StopPlayout() {
+ StopCbThreads();
+ DestroyAudioPlayer();
+ return 0;
+}
+
+int32_t OpenSlesOutput::SpeakerIsAvailable(bool& available) { // NOLINT
+ available = true;
+ return 0;
+}
+
+int32_t OpenSlesOutput::InitSpeaker() {
+ assert(!playing_);
+ speaker_initialized_ = true;
+ return 0;
+}
+
+int32_t OpenSlesOutput::SpeakerVolumeIsAvailable(bool& available) { // NOLINT
+ available = true;
+ return 0;
+}
+
+int32_t OpenSlesOutput::SetSpeakerVolume(uint32_t volume) {
+ assert(speaker_initialized_);
+ assert(initialized_);
+ // TODO(hellner): implement.
+ return 0;
+}
+
+int32_t OpenSlesOutput::MaxSpeakerVolume(uint32_t& maxVolume) const { // NOLINT
+ assert(speaker_initialized_);
+ assert(initialized_);
+ // TODO(hellner): implement.
+ maxVolume = 0;
+ return 0;
+}
+
+int32_t OpenSlesOutput::MinSpeakerVolume(uint32_t& minVolume) const { // NOLINT
+ assert(speaker_initialized_);
+ assert(initialized_);
+ // TODO(hellner): implement.
+ minVolume = 0;
+ return 0;
+}
+
+int32_t OpenSlesOutput::SpeakerVolumeStepSize(
+ uint16_t& stepSize) const { // NOLINT
+ assert(speaker_initialized_);
+ stepSize = 1;
+ return 0;
+}
+
+int32_t OpenSlesOutput::SpeakerMuteIsAvailable(bool& available) { // NOLINT
+ available = false;
+ return 0;
+}
+
+int32_t OpenSlesOutput::StereoPlayoutIsAvailable(bool& available) { // NOLINT
+ available = false;
+ return 0;
+}
+
+int32_t OpenSlesOutput::SetStereoPlayout(bool enable) {
+ if (enable) {
+ assert(false);
+ return -1;
+ }
+ return 0;
+}
+
+int32_t OpenSlesOutput::StereoPlayout(bool& enabled) const { // NOLINT
+ enabled = kNumChannels == 2;
+ return 0;
+}
+
+int32_t OpenSlesOutput::PlayoutBuffer(
+ AudioDeviceModule::BufferType& type, // NOLINT
+ uint16_t& sizeMS) const { // NOLINT
+ type = AudioDeviceModule::kAdaptiveBufferSize;
+ sizeMS = playout_delay_;
+ return 0;
+}
+
+int32_t OpenSlesOutput::PlayoutDelay(uint16_t& delayMS) const { // NOLINT
+ delayMS = playout_delay_;
+ return 0;
+}
+
+void OpenSlesOutput::AttachAudioBuffer(AudioDeviceBuffer* audioBuffer) {
+ audio_buffer_ = audioBuffer;
+}
+
+int32_t OpenSlesOutput::SetLoudspeakerStatus(bool enable) {
+ return 0;
+}
+
+int32_t OpenSlesOutput::GetLoudspeakerStatus(bool& enabled) const { // NOLINT
+ enabled = true;
+ return 0;
+}
+
+int OpenSlesOutput::PlayoutDelayMs() {
+ return playout_delay_;
+}
+
+bool OpenSlesOutput::InitSampleRate() {
+ if (!SetLowLatency()) {
+ speaker_sampling_rate_ = kDefaultSampleRate;
+ // Default is to use 10ms buffers.
+ buffer_size_samples_ = speaker_sampling_rate_ * 10 / 1000;
+ }
+ if (audio_buffer_->SetPlayoutSampleRate(speaker_sampling_rate_) < 0) {
+ return false;
+ }
+ if (audio_buffer_->SetPlayoutChannels(kNumChannels) < 0) {
+ return false;
+ }
+ UpdatePlayoutDelay();
+ return true;
+}
+
+void OpenSlesOutput::UpdatePlayoutDelay() {
+ // TODO(hellner): Add accurate delay estimate.
+ // On average half the current buffer will have been played out.
+ int outstanding_samples = (TotalBuffersUsed() - 0.5) * buffer_size_samples_;
+ playout_delay_ = outstanding_samples / (speaker_sampling_rate_ / 1000);
+}
+
+bool OpenSlesOutput::SetLowLatency() {
+ if (!audio_manager_.low_latency_supported()) {
+ return false;
+ }
+ buffer_size_samples_ = audio_manager_.native_buffer_size();
+ assert(buffer_size_samples_ > 0);
+ speaker_sampling_rate_ = audio_manager_.native_output_sample_rate();
+ assert(speaker_sampling_rate_ > 0);
+ return true;
+}
+
+void OpenSlesOutput::CalculateNumFifoBuffersNeeded() {
+ int number_of_bytes_needed =
+ (speaker_sampling_rate_ * kNumChannels * sizeof(int16_t)) * 10 / 1000;
+
+ // Ceiling of integer division: 1 + ((x - 1) / y)
+ int buffers_per_10_ms =
+ 1 + ((number_of_bytes_needed - 1) / buffer_size_bytes_);
+ // |num_fifo_buffers_needed_| is a multiple of 10ms of buffered up audio.
+ num_fifo_buffers_needed_ = kNum10MsToBuffer * buffers_per_10_ms;
+}
+
+void OpenSlesOutput::AllocateBuffers() {
+ // Allocate fine buffer to provide frames of the desired size.
+ buffer_size_bytes_ = buffer_size_samples_ * kNumChannels * sizeof(int16_t);
+ fine_buffer_.reset(new FineAudioBuffer(audio_buffer_, buffer_size_bytes_,
+ speaker_sampling_rate_));
+
+ // Allocate FIFO to handle passing buffers between processing and OpenSl
+ // threads.
+ CalculateNumFifoBuffersNeeded(); // Needs |buffer_size_bytes_| to be known
+ assert(num_fifo_buffers_needed_ > 0);
+ fifo_.reset(new SingleRwFifo(num_fifo_buffers_needed_));
+
+ // Allocate the memory area to be used.
+ play_buf_.reset(new scoped_array<int8_t>[TotalBuffersUsed()]);
+ int required_buffer_size = fine_buffer_->RequiredBufferSizeBytes();
+ for (int i = 0; i < TotalBuffersUsed(); ++i) {
+ play_buf_[i].reset(new int8_t[required_buffer_size]);
+ }
+}
+
+int OpenSlesOutput::TotalBuffersUsed() const {
+ return num_fifo_buffers_needed_ + kNumOpenSlBuffers;
+}
+
+bool OpenSlesOutput::EnqueueAllBuffers() {
+ active_queue_ = 0;
+ number_underruns_ = 0;
+ for (int i = 0; i < kNumOpenSlBuffers; ++i) {
+ memset(play_buf_[i].get(), 0, buffer_size_bytes_);
+ OPENSL_RETURN_ON_FAILURE(
+ (*sles_player_sbq_itf_)->Enqueue(
+ sles_player_sbq_itf_,
+ reinterpret_cast<void*>(play_buf_[i].get()),
+ buffer_size_bytes_),
+ false);
+ }
+ // OpenSL playing has been stopped. I.e. only this thread is touching
+ // |fifo_|.
+ while (fifo_->size() != 0) {
+ // Underrun might have happened when pushing new buffers to the FIFO.
+ fifo_->Pop();
+ }
+ for (int i = kNumOpenSlBuffers; i < TotalBuffersUsed(); ++i) {
+ memset(play_buf_[i].get(), 0, buffer_size_bytes_);
+ fifo_->Push(play_buf_[i].get());
+ }
+ return true;
+}
+
+bool OpenSlesOutput::CreateAudioPlayer() {
+ if (!event_.Start()) {
+ assert(false);
+ return false;
+ }
+ SLDataLocator_AndroidSimpleBufferQueue simple_buf_queue = {
+ SL_DATALOCATOR_ANDROIDSIMPLEBUFFERQUEUE,
+ static_cast<SLuint32>(kNumOpenSlBuffers)
+ };
+ SLDataFormat_PCM configuration =
+ webrtc_opensl::CreatePcmConfiguration(speaker_sampling_rate_);
+ SLDataSource audio_source = { &simple_buf_queue, &configuration };
+
+ SLDataLocator_OutputMix locator_outputmix;
+ // Setup the data sink structure.
+ locator_outputmix.locatorType = SL_DATALOCATOR_OUTPUTMIX;
+ locator_outputmix.outputMix = sles_output_mixer_;
+ SLDataSink audio_sink = { &locator_outputmix, NULL };
+
+ // Interfaces for streaming audio data, setting volume and Android are needed.
+ // Note the interfaces still need to be initialized. This only tells OpenSl
+ // that the interfaces will be needed at some point.
+ SLInterfaceID ids[kNumInterfaces] = {
+ SL_IID_BUFFERQUEUE, SL_IID_VOLUME, SL_IID_ANDROIDCONFIGURATION };
+ SLboolean req[kNumInterfaces] = {
+ SL_BOOLEAN_TRUE, SL_BOOLEAN_TRUE, SL_BOOLEAN_TRUE };
+ OPENSL_RETURN_ON_FAILURE(
+ (*sles_engine_itf_)->CreateAudioPlayer(sles_engine_itf_, &sles_player_,
+ &audio_source, &audio_sink,
+ kNumInterfaces, ids, req),
+ false);
+ // Realize the player in synchronous mode.
+ OPENSL_RETURN_ON_FAILURE((*sles_player_)->Realize(sles_player_,
+ SL_BOOLEAN_FALSE),
+ false);
+ OPENSL_RETURN_ON_FAILURE(
+ (*sles_player_)->GetInterface(sles_player_, SL_IID_PLAY,
+ &sles_player_itf_),
+ false);
+ OPENSL_RETURN_ON_FAILURE(
+ (*sles_player_)->GetInterface(sles_player_, SL_IID_BUFFERQUEUE,
+ &sles_player_sbq_itf_),
+ false);
+ return true;
+}
+
+void OpenSlesOutput::DestroyAudioPlayer() {
+ SLAndroidSimpleBufferQueueItf sles_player_sbq_itf = sles_player_sbq_itf_;
+ {
+ CriticalSectionScoped lock(crit_sect_.get());
+ sles_player_sbq_itf_ = NULL;
+ sles_player_itf_ = NULL;
+ }
+ event_.Stop();
+ if (sles_player_sbq_itf) {
+ // Release all buffers currently queued up.
+ OPENSL_RETURN_ON_FAILURE(
+ (*sles_player_sbq_itf)->Clear(sles_player_sbq_itf),
+ VOID_RETURN);
+ }
+
+ if (sles_player_) {
+ (*sles_player_)->Destroy(sles_player_);
+ sles_player_ = NULL;
+ }
+
+ if (sles_output_mixer_) {
+ (*sles_output_mixer_)->Destroy(sles_output_mixer_);
+ sles_output_mixer_ = NULL;
+ }
+}
+
+bool OpenSlesOutput::HandleUnderrun(int event_id, int event_msg) {
+ if (!playing_) {
+ return false;
+ }
+ if (event_id == kNoUnderrun) {
+ return false;
+ }
+ WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, id_, "Audio underrun");
+ assert(event_id == kUnderrun);
+ assert(event_msg > 0);
+ // Wait for all enqueued buffers to be flushed.
+ if (event_msg != kNumOpenSlBuffers) {
+ return true;
+ }
+ // All buffers have been flushed. Restart the audio from scratch.
+ // No need to check sles_player_itf_ as playing_ would be false before it is
+ // set to NULL.
+ OPENSL_RETURN_ON_FAILURE(
+ (*sles_player_itf_)->SetPlayState(sles_player_itf_,
+ SL_PLAYSTATE_STOPPED),
+ true);
+ EnqueueAllBuffers();
+ OPENSL_RETURN_ON_FAILURE(
+ (*sles_player_itf_)->SetPlayState(sles_player_itf_,
+ SL_PLAYSTATE_PLAYING),
+ true);
+ return true;
+}
+
+void OpenSlesOutput::PlayerSimpleBufferQueueCallback(
+ SLAndroidSimpleBufferQueueItf sles_player_sbq_itf,
+ void* p_context) {
+ OpenSlesOutput* audio_device = reinterpret_cast<OpenSlesOutput*>(p_context);
+ audio_device->PlayerSimpleBufferQueueCallbackHandler(sles_player_sbq_itf);
+}
+
+void OpenSlesOutput::PlayerSimpleBufferQueueCallbackHandler(
+ SLAndroidSimpleBufferQueueItf sles_player_sbq_itf) {
+ if (fifo_->size() <= 0 || number_underruns_ > 0) {
+ ++number_underruns_;
+ event_.SignalEvent(kUnderrun, number_underruns_);
+ return;
+ }
+ int8_t* audio = fifo_->Pop();
+ if (audio)
+ OPENSL_RETURN_ON_FAILURE(
+ (*sles_player_sbq_itf)->Enqueue(sles_player_sbq_itf,
+ audio,
+ buffer_size_bytes_),
+ VOID_RETURN);
+ event_.SignalEvent(kNoUnderrun, 0);
+}
+
+bool OpenSlesOutput::StartCbThreads() {
+ play_thread_.reset(ThreadWrapper::CreateThread(CbThread,
+ this,
+ kRealtimePriority,
+ "opensl_play_thread"));
+ assert(play_thread_.get());
+ OPENSL_RETURN_ON_FAILURE(
+ (*sles_player_itf_)->SetPlayState(sles_player_itf_,
+ SL_PLAYSTATE_PLAYING),
+ false);
+
+ unsigned int thread_id = 0;
+ if (!play_thread_->Start(thread_id)) {
+ assert(false);
+ return false;
+ }
+ return true;
+}
+
+void OpenSlesOutput::StopCbThreads() {
+ {
+ CriticalSectionScoped lock(crit_sect_.get());
+ playing_ = false;
+ }
+ if (sles_player_itf_) {
+ OPENSL_RETURN_ON_FAILURE(
+ (*sles_player_itf_)->SetPlayState(sles_player_itf_,
+ SL_PLAYSTATE_STOPPED),
+ VOID_RETURN);
+ }
+ if (play_thread_.get() == NULL) {
+ return;
+ }
+ event_.Stop();
+ if (play_thread_->Stop()) {
+ play_thread_.reset();
+ } else {
+ assert(false);
+ }
+}
+
+bool OpenSlesOutput::CbThread(void* context) {
+ return reinterpret_cast<OpenSlesOutput*>(context)->CbThreadImpl();
+}
+
+bool OpenSlesOutput::CbThreadImpl() {
+ assert(fine_buffer_.get() != NULL);
+ int event_id;
+ int event_msg;
+ // event_ must not be waited on while a lock has been taken.
+ event_.WaitOnEvent(&event_id, &event_msg);
+
+ CriticalSectionScoped lock(crit_sect_.get());
+ if (HandleUnderrun(event_id, event_msg)) {
+ return playing_;
+ }
+ // if fifo_ is not full it means next item in memory must be free.
+ while (fifo_->size() < num_fifo_buffers_needed_ && playing_) {
+ int8_t* audio = play_buf_[active_queue_].get();
+ fine_buffer_->GetBufferData(audio);
+ fifo_->Push(audio);
+ active_queue_ = (active_queue_ + 1) % TotalBuffersUsed();
+ }
+ return playing_;
+}
+
+} // namespace webrtc
diff --git a/modules/audio_device/android/opensles_output.h b/modules/audio_device/android/opensles_output.h
new file mode 100644
index 00000000..bf20cf64
--- /dev/null
+++ b/modules/audio_device/android/opensles_output.h
@@ -0,0 +1,236 @@
+/*
+ * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef WEBRTC_MODULES_AUDIO_DEVICE_ANDROID_OPENSLES_OUTPUT_H_
+#define WEBRTC_MODULES_AUDIO_DEVICE_ANDROID_OPENSLES_OUTPUT_H_
+
+#include <SLES/OpenSLES.h>
+#include <SLES/OpenSLES_Android.h>
+#include <SLES/OpenSLES_AndroidConfiguration.h>
+
+#include "webrtc/modules/audio_device/android/audio_manager_jni.h"
+#include "webrtc/modules/audio_device/android/low_latency_event.h"
+#include "webrtc/modules/audio_device/android/opensles_common.h"
+#include "webrtc/modules/audio_device/include/audio_device_defines.h"
+#include "webrtc/modules/audio_device/include/audio_device.h"
+#include "webrtc/system_wrappers/interface/scoped_ptr.h"
+
+namespace webrtc {
+
+class AudioDeviceBuffer;
+class CriticalSectionWrapper;
+class FineAudioBuffer;
+class SingleRwFifo;
+class ThreadWrapper;
+
+// OpenSL implementation that facilitate playing PCM data to an android device.
+// This class is Thread-compatible. I.e. Given an instance of this class, calls
+// to non-const methods require exclusive access to the object.
+class OpenSlesOutput : public webrtc_opensl::PlayoutDelayProvider {
+ public:
+ explicit OpenSlesOutput(const int32_t id);
+ virtual ~OpenSlesOutput();
+
+ // Main initializaton and termination
+ int32_t Init();
+ int32_t Terminate();
+ bool Initialized() const { return initialized_; }
+
+ // Device enumeration
+ int16_t PlayoutDevices() { return 1; }
+
+ int32_t PlayoutDeviceName(uint16_t index,
+ char name[kAdmMaxDeviceNameSize],
+ char guid[kAdmMaxGuidSize]);
+
+ // Device selection
+ int32_t SetPlayoutDevice(uint16_t index);
+ int32_t SetPlayoutDevice(
+ AudioDeviceModule::WindowsDeviceType device) { return 0; }
+
+ // Audio transport initialization
+ int32_t PlayoutIsAvailable(bool& available); // NOLINT
+ int32_t InitPlayout();
+ bool PlayoutIsInitialized() const { return play_initialized_; }
+
+ // Audio transport control
+ int32_t StartPlayout();
+ int32_t StopPlayout();
+ bool Playing() const { return playing_; }
+
+ // Audio mixer initialization
+ int32_t SpeakerIsAvailable(bool& available); // NOLINT
+ int32_t InitSpeaker();
+ bool SpeakerIsInitialized() const { return speaker_initialized_; }
+
+ // Speaker volume controls
+ int32_t SpeakerVolumeIsAvailable(bool& available); // NOLINT
+ int32_t SetSpeakerVolume(uint32_t volume);
+ int32_t SpeakerVolume(uint32_t& volume) const { return 0; } // NOLINT
+ int32_t MaxSpeakerVolume(uint32_t& maxVolume) const; // NOLINT
+ int32_t MinSpeakerVolume(uint32_t& minVolume) const; // NOLINT
+ int32_t SpeakerVolumeStepSize(uint16_t& stepSize) const; // NOLINT
+
+ // Speaker mute control
+ int32_t SpeakerMuteIsAvailable(bool& available); // NOLINT
+ int32_t SetSpeakerMute(bool enable) { return -1; }
+ int32_t SpeakerMute(bool& enabled) const { return -1; } // NOLINT
+
+
+ // Stereo support
+ int32_t StereoPlayoutIsAvailable(bool& available); // NOLINT
+ int32_t SetStereoPlayout(bool enable);
+ int32_t StereoPlayout(bool& enabled) const; // NOLINT
+
+ // Delay information and control
+ int32_t SetPlayoutBuffer(const AudioDeviceModule::BufferType type,
+ uint16_t sizeMS) { return -1; }
+ int32_t PlayoutBuffer(AudioDeviceModule::BufferType& type, // NOLINT
+ uint16_t& sizeMS) const;
+ int32_t PlayoutDelay(uint16_t& delayMS) const; // NOLINT
+
+
+ // Error and warning information
+ bool PlayoutWarning() const { return false; }
+ bool PlayoutError() const { return false; }
+ void ClearPlayoutWarning() {}
+ void ClearPlayoutError() {}
+
+ // Attach audio buffer
+ void AttachAudioBuffer(AudioDeviceBuffer* audioBuffer);
+
+ // Speaker audio routing
+ int32_t SetLoudspeakerStatus(bool enable);
+ int32_t GetLoudspeakerStatus(bool& enable) const; // NOLINT
+
+ protected:
+ virtual int PlayoutDelayMs();
+
+ private:
+ enum {
+ kNumInterfaces = 3,
+ // TODO(xians): Reduce the numbers of buffers to improve the latency.
+ // Currently 30ms worth of buffers are needed due to audio
+ // pipeline processing jitter. Note: kNumOpenSlBuffers must
+ // not be changed.
+ // According to the opensles documentation in the ndk:
+ // The lower output latency path is used only if the application requests a
+ // buffer count of 2 or more. Use minimum number of buffers to keep delay
+ // as low as possible.
+ kNumOpenSlBuffers = 2,
+ // NetEq delivers frames on a 10ms basis. This means that every 10ms there
+ // will be a time consuming task. Keeping 10ms worth of buffers will ensure
+ // that there is 10ms to perform the time consuming task without running
+ // into underflow.
+ // In addition to the 10ms that needs to be stored for NetEq processing
+ // there will be jitter in audio pipe line due to the acquisition of locks.
+ // Note: The buffers in the OpenSL queue do not count towards the 10ms of
+ // frames needed since OpenSL needs to have them ready for playout.
+ kNum10MsToBuffer = 4,
+ };
+
+ bool InitSampleRate();
+ bool SetLowLatency();
+ void UpdatePlayoutDelay();
+ // It might be possible to dynamically add or remove buffers based on how
+ // close to depletion the fifo is. Few buffers means low delay. Too few
+ // buffers will cause underrun. Dynamically changing the number of buffer
+ // will greatly increase code complexity.
+ void CalculateNumFifoBuffersNeeded();
+ void AllocateBuffers();
+ int TotalBuffersUsed() const;
+ bool EnqueueAllBuffers();
+ // This function also configures the audio player, e.g. sample rate to use
+ // etc, so it should be called when starting playout.
+ bool CreateAudioPlayer();
+ void DestroyAudioPlayer();
+
+ // When underrun happens there won't be a new frame ready for playout that
+ // can be retrieved yet. Since the OpenSL thread must return ASAP there will
+ // be one less queue available to OpenSL. This function handles this case
+ // gracefully by restarting the audio, pushing silent frames to OpenSL for
+ // playout. This will sound like a click. Underruns are also logged to
+ // make it possible to identify these types of audio artifacts.
+ // This function returns true if there has been underrun. Further processing
+ // of audio data should be avoided until this function returns false again.
+ // The function needs to be protected by |crit_sect_|.
+ bool HandleUnderrun(int event_id, int event_msg);
+
+ static void PlayerSimpleBufferQueueCallback(
+ SLAndroidSimpleBufferQueueItf queueItf,
+ void* pContext);
+ // This function must not take any locks or do any heavy work. It is a
+ // requirement for the OpenSL implementation to work as intended. The reason
+ // for this is that taking locks exposes the OpenSL thread to the risk of
+ // priority inversion.
+ void PlayerSimpleBufferQueueCallbackHandler(
+ SLAndroidSimpleBufferQueueItf queueItf);
+
+ bool StartCbThreads();
+ void StopCbThreads();
+ static bool CbThread(void* context);
+ // This function must be protected against data race with threads calling this
+ // class' public functions. It is a requirement for this class to be
+ // Thread-compatible.
+ bool CbThreadImpl();
+
+ // Java API handle
+ AudioManagerJni audio_manager_;
+
+ int id_;
+ bool initialized_;
+ bool speaker_initialized_;
+ bool play_initialized_;
+
+ // Members that are read/write accessed concurrently by the process thread and
+ // threads calling public functions of this class.
+ scoped_ptr<ThreadWrapper> play_thread_; // Processing thread
+ scoped_ptr<CriticalSectionWrapper> crit_sect_;
+ // This member controls the starting and stopping of playing audio to the
+ // the device.
+ bool playing_;
+
+ // Only one thread, T1, may push and only one thread, T2, may pull. T1 may or
+ // may not be the same thread as T2. T1 is the process thread and T2 is the
+ // OpenSL thread.
+ scoped_ptr<SingleRwFifo> fifo_;
+ int num_fifo_buffers_needed_;
+ LowLatencyEvent event_;
+ int number_underruns_;
+
+ // OpenSL handles
+ SLObjectItf sles_engine_;
+ SLEngineItf sles_engine_itf_;
+ SLObjectItf sles_player_;
+ SLPlayItf sles_player_itf_;
+ SLAndroidSimpleBufferQueueItf sles_player_sbq_itf_;
+ SLObjectItf sles_output_mixer_;
+
+ // Audio buffers
+ AudioDeviceBuffer* audio_buffer_;
+ scoped_ptr<FineAudioBuffer> fine_buffer_;
+ scoped_array<scoped_array<int8_t> > play_buf_;
+ // Index in |rec_buf_| pointing to the audio buffer that will be ready the
+ // next time PlayerSimpleBufferQueueCallbackHandler is invoked.
+ // Ready means buffer is ready to be played out to device.
+ int active_queue_;
+
+ // Audio settings
+ uint32_t speaker_sampling_rate_;
+ int buffer_size_samples_;
+ int buffer_size_bytes_;
+
+ // Audio status
+ uint16_t playout_delay_;
+};
+
+} // namespace webrtc
+
+#endif // WEBRTC_MODULES_AUDIO_DEVICE_ANDROID_OPENSLES_OUTPUT_H_
diff --git a/modules/audio_device/android/single_rw_fifo.cc b/modules/audio_device/android/single_rw_fifo.cc
new file mode 100644
index 00000000..29a13517
--- /dev/null
+++ b/modules/audio_device/android/single_rw_fifo.cc
@@ -0,0 +1,86 @@
+/*
+ * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "webrtc/modules/audio_device/android/single_rw_fifo.h"
+
+#if !defined(__ARMEL__)
+// ARM specific due to the implementation of MemoryBarrier.
+#error trying to compile ARM code for non-ARM target
+#endif
+
+static int UpdatePos(int pos, int capacity) {
+ return (pos + 1) % capacity;
+}
+
+namespace webrtc {
+
+namespace subtle {
+
+// From http://src.chromium.org/viewvc/chrome/trunk/src/base/atomicops_internals_arm_gcc.h
+// Note that it is only the MemoryBarrier function that makes this class arm
+// specific. Borrowing other MemoryBarrier implementations, this class could
+// be extended to more platforms.
+inline void MemoryBarrier() {
+ // Note: This is a function call, which is also an implicit compiler
+ // barrier.
+ typedef void (*KernelMemoryBarrierFunc)();
+ ((KernelMemoryBarrierFunc)0xffff0fa0)();
+}
+
+} // namespace subtle
+
+SingleRwFifo::SingleRwFifo(int capacity)
+ : capacity_(capacity),
+ size_(0),
+ read_pos_(0),
+ write_pos_(0) {
+ queue_.reset(new int8_t*[capacity_]);
+}
+
+SingleRwFifo::~SingleRwFifo() {
+}
+
+void SingleRwFifo::Push(int8_t* mem) {
+ assert(mem);
+
+ // Ensure that there is space for the new data in the FIFO.
+ // Note there is only one writer meaning that the other thread is guaranteed
+ // only to decrease the size.
+ const int free_slots = capacity() - size();
+ if (free_slots <= 0) {
+ // Size can be queried outside of the Push function. The caller is assumed
+ // to ensure that Push will be successful before calling it.
+ assert(false);
+ return;
+ }
+ queue_[write_pos_] = mem;
+ // Memory barrier ensures that |size_| is updated after the size has changed.
+ subtle::MemoryBarrier();
+ ++size_;
+ write_pos_ = UpdatePos(write_pos_, capacity());
+}
+
+int8_t* SingleRwFifo::Pop() {
+ int8_t* ret_val = NULL;
+ if (size() <= 0) {
+ // Size can be queried outside of the Pop function. The caller is assumed
+ // to ensure that Pop will be successfull before calling it.
+ assert(false);
+ return ret_val;
+ }
+ ret_val = queue_[read_pos_];
+ // Memory barrier ensures that |size_| is updated after the size has changed.
+ subtle::MemoryBarrier();
+ --size_;
+ read_pos_ = UpdatePos(read_pos_, capacity());
+ return ret_val;
+}
+
+} // namespace webrtc
diff --git a/modules/audio_device/android/single_rw_fifo.h b/modules/audio_device/android/single_rw_fifo.h
new file mode 100644
index 00000000..a1fcfaab
--- /dev/null
+++ b/modules/audio_device/android/single_rw_fifo.h
@@ -0,0 +1,49 @@
+/*
+ * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef WEBRTC_MODULES_AUDIO_DEVICE_ANDROID_SINGLE_RW_FIFO_H_
+#define WEBRTC_MODULES_AUDIO_DEVICE_ANDROID_SINGLE_RW_FIFO_H_
+
+#include "webrtc/system_wrappers/interface/atomic32.h"
+#include "webrtc/system_wrappers/interface/scoped_ptr.h"
+#include "webrtc/typedefs.h"
+
+namespace webrtc {
+
+// Implements a lock-free FIFO losely based on
+// http://src.chromium.org/viewvc/chrome/trunk/src/media/base/audio_fifo.cc
+// Note that this class assumes there is one producer (writer) and one
+// consumer (reader) thread.
+class SingleRwFifo {
+ public:
+ explicit SingleRwFifo(int capacity);
+ ~SingleRwFifo();
+
+ void Push(int8_t* mem);
+ int8_t* Pop();
+
+ void Clear();
+
+ int size() { return size_.Value(); }
+ int capacity() const { return capacity_; }
+
+ private:
+ scoped_array<int8_t*> queue_;
+ int capacity_;
+
+ Atomic32 size_;
+
+ int read_pos_;
+ int write_pos_;
+};
+
+} // namespace webrtc
+
+#endif // WEBRTC_MODULES_AUDIO_DEVICE_ANDROID_SINGLE_RW_FIFO_H_
diff --git a/modules/audio_device/android/single_rw_fifo_unittest.cc b/modules/audio_device/android/single_rw_fifo_unittest.cc
new file mode 100644
index 00000000..c722c275
--- /dev/null
+++ b/modules/audio_device/android/single_rw_fifo_unittest.cc
@@ -0,0 +1,126 @@
+/*
+ * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "webrtc/modules/audio_device/android/single_rw_fifo.h"
+
+#include <list>
+
+#include "testing/gtest/include/gtest/gtest.h"
+#include "webrtc/system_wrappers/interface/scoped_ptr.h"
+
+namespace webrtc {
+
+class SingleRwFifoTest : public testing::Test {
+ public:
+ enum {
+ // Uninteresting as it does not affect test
+ kBufferSize = 8,
+ kCapacity = 6,
+ };
+
+ SingleRwFifoTest() : fifo_(kCapacity), pushed_(0), available_(0) {
+ }
+ virtual ~SingleRwFifoTest() {}
+
+ void SetUp() {
+ for (int8_t i = 0; i < kCapacity; ++i) {
+ // Create memory area.
+ buffer_[i].reset(new int8_t[kBufferSize]);
+ // Set the first byte in the buffer to the order in which it was created
+ // this allows us to e.g. check that the buffers don't re-arrange.
+ buffer_[i][0] = i;
+ // Queue used by test.
+ memory_queue_.push_back(buffer_[i].get());
+ }
+ available_ = kCapacity;
+ VerifySizes();
+ }
+
+ void Push(int number_of_buffers) {
+ for (int8_t i = 0; i < number_of_buffers; ++i) {
+ int8_t* data = memory_queue_.front();
+ memory_queue_.pop_front();
+ fifo_.Push(data);
+ --available_;
+ ++pushed_;
+ }
+ VerifySizes();
+ VerifyOrdering();
+ }
+ void Pop(int number_of_buffers) {
+ for (int8_t i = 0; i < number_of_buffers; ++i) {
+ int8_t* data = fifo_.Pop();
+ memory_queue_.push_back(data);
+ ++available_;
+ --pushed_;
+ }
+ VerifySizes();
+ VerifyOrdering();
+ }
+
+ void VerifyOrdering() const {
+ std::list<int8_t*>::const_iterator iter = memory_queue_.begin();
+ if (iter == memory_queue_.end()) {
+ return;
+ }
+ int8_t previous_index = DataToElementIndex(*iter);
+ ++iter;
+ for (; iter != memory_queue_.end(); ++iter) {
+ int8_t current_index = DataToElementIndex(*iter);
+ EXPECT_EQ(current_index, ++previous_index % kCapacity);
+ }
+ }
+
+ void VerifySizes() {
+ EXPECT_EQ(available_, static_cast<int>(memory_queue_.size()));
+ EXPECT_EQ(pushed_, fifo_.size());
+ }
+
+ int8_t DataToElementIndex(int8_t* data) const {
+ return data[0];
+ }
+
+ protected:
+ SingleRwFifo fifo_;
+ // Memory area for proper de-allocation.
+ scoped_array<int8_t> buffer_[kCapacity];
+ std::list<int8_t*> memory_queue_;
+
+ int pushed_;
+ int available_;
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(SingleRwFifoTest);
+};
+
+TEST_F(SingleRwFifoTest, Construct) {
+ // All verifications are done in SetUp.
+}
+
+TEST_F(SingleRwFifoTest, Push) {
+ Push(kCapacity);
+}
+
+TEST_F(SingleRwFifoTest, Pop) {
+ // Push all available.
+ Push(available_);
+
+ // Test border cases:
+ // At capacity
+ Pop(1);
+ Push(1);
+
+ // At minimal capacity
+ Pop(pushed_);
+ Push(1);
+ Pop(1);
+}
+
+} // namespace webrtc
diff --git a/modules/audio_device/audio_device.gypi b/modules/audio_device/audio_device.gypi
index c3741540..b156cd1b 100644
--- a/modules/audio_device/audio_device.gypi
+++ b/modules/audio_device/audio_device.gypi
@@ -125,6 +125,20 @@
'sources': [
'android/audio_device_opensles_android.cc',
'android/audio_device_opensles_android.h',
+ 'android/audio_manager_jni.cc',
+ 'android/audio_manager_jni.h',
+ 'android/fine_audio_buffer.cc',
+ 'android/fine_audio_buffer.h',
+ 'android/low_latency_event_posix.cc',
+ 'android/low_latency_event.h',
+ 'android/opensles_common.cc',
+ 'android/opensles_common.h',
+ 'android/opensles_input.cc',
+ 'android/opensles_input.h',
+ 'android/opensles_output.cc',
+ 'android/opensles_output.h',
+ 'android/single_rw_fifo.cc',
+ 'android/single_rw_fifo.h',
],
}, {
'sources': [
@@ -240,6 +254,30 @@
],
}],
],
+ 'conditions': [
+ ['OS=="android" and enable_android_opensl==1', {
+ 'targets': [
+ {
+ 'target_name': 'audio_device_unittest',
+ 'type': 'executable',
+ 'dependencies': [
+ 'audio_device',
+ 'webrtc_utility',
+ '<(DEPTH)/testing/gmock.gyp:gmock',
+ '<(DEPTH)/testing/gtest.gyp:gtest',
+ '<(webrtc_root)/system_wrappers/source/system_wrappers.gyp:system_wrappers',
+ '<(webrtc_root)/test/test.gyp:test_support_main',
+ ],
+ 'sources': [
+ 'android/fine_audio_buffer_unittest.cc',
+ 'android/low_latency_event_unittest.cc',
+ 'android/single_rw_fifo_unittest.cc',
+ 'mock/mock_audio_device_buffer.h',
+ ],
+ },
+ ],
+ }],
+ ],
}], # include_tests
],
}
diff --git a/modules/audio_device/audio_device_buffer.h b/modules/audio_device/audio_device_buffer.h
index 90e07de8..77aaac7f 100644
--- a/modules/audio_device/audio_device_buffer.h
+++ b/modules/audio_device/audio_device_buffer.h
@@ -29,6 +29,9 @@ class MediaFile;
class AudioDeviceBuffer
{
public:
+ AudioDeviceBuffer();
+ virtual ~AudioDeviceBuffer();
+
void SetId(uint32_t id);
int32_t RegisterAudioCallback(AudioTransport* audioCallback);
@@ -57,8 +60,8 @@ public:
int32_t DeliverRecordedData();
uint32_t NewMicLevel() const;
- int32_t RequestPlayoutData(uint32_t nSamples);
- int32_t GetPlayoutData(void* audioBuffer);
+ virtual int32_t RequestPlayoutData(uint32_t nSamples);
+ virtual int32_t GetPlayoutData(void* audioBuffer);
int32_t StartInputFileRecording(
const char fileName[kAdmMaxFileNameSize]);
@@ -69,9 +72,6 @@ public:
int32_t SetTypingStatus(bool typingStatus);
- AudioDeviceBuffer();
- ~AudioDeviceBuffer();
-
private:
int32_t _id;
CriticalSectionWrapper& _critSect;
diff --git a/modules/audio_device/mock_audio_device_buffer.h b/modules/audio_device/mock_audio_device_buffer.h
new file mode 100644
index 00000000..b9e66f7d
--- /dev/null
+++ b/modules/audio_device/mock_audio_device_buffer.h
@@ -0,0 +1,30 @@
+/*
+ * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef WEBRTC_MODULES_AUDIO_DEVICE_MOCK_AUDIO_DEVICE_BUFFER_H_
+#define WEBRTC_MODULES_AUDIO_DEVICE_MOCK_AUDIO_DEVICE_BUFFER_H_
+
+#include "testing/gmock/include/gmock/gmock.h"
+#include "webrtc/modules/audio_device/audio_device_buffer.h"
+
+namespace webrtc {
+
+class MockAudioDeviceBuffer : public AudioDeviceBuffer {
+ public:
+ MockAudioDeviceBuffer() {}
+ virtual ~MockAudioDeviceBuffer() {}
+
+ MOCK_METHOD1(RequestPlayoutData, int32_t(uint32_t nSamples));
+ MOCK_METHOD1(GetPlayoutData, int32_t(void* audioBuffer));
+};
+
+} // namespace webrtc
+
+#endif // WEBRTC_MODULES_AUDIO_DEVICE_MOCK_AUDIO_DEVICE_BUFFER_H_
diff --git a/system_wrappers/interface/atomic32.h b/system_wrappers/interface/atomic32.h
index ab29f97a..08ab4f25 100644
--- a/system_wrappers/interface/atomic32.h
+++ b/system_wrappers/interface/atomic32.h
@@ -11,7 +11,6 @@
// Atomic, system independent 32-bit integer. Unless you know what you're
// doing, use locks instead! :-)
//
-// Note: uses full memory barriers.
// Note: assumes 32-bit (or higher) system
#ifndef WEBRTC_SYSTEM_WRAPPERS_INTERFACE_ATOMIC32_H_
#define WEBRTC_SYSTEM_WRAPPERS_INTERFACE_ATOMIC32_H_
@@ -42,7 +41,9 @@ class Atomic32 {
// Sets the value atomically to new_value if the value equals compare value.
// The function returns true if the exchange happened.
bool CompareExchange(int32_t new_value, int32_t compare_value);
- int32_t Value() const;
+ int32_t Value() {
+ return *this += 0;
+ }
private:
// Disable the + and - operator since it's unclear what these operations
diff --git a/system_wrappers/source/atomic32_mac.cc b/system_wrappers/source/atomic32_mac.cc
index f03c1fbb..d3728465 100644
--- a/system_wrappers/source/atomic32_mac.cc
+++ b/system_wrappers/source/atomic32_mac.cc
@@ -46,8 +46,4 @@ bool Atomic32::CompareExchange(int32_t new_value, int32_t compare_value) {
return OSAtomicCompareAndSwap32Barrier(compare_value, new_value, &value_);
}
-int32_t Atomic32::Value() const {
- return value_;
-}
-
} // namespace webrtc
diff --git a/system_wrappers/source/atomic32_posix.cc b/system_wrappers/source/atomic32_posix.cc
index 614f3333..e6a491c5 100644
--- a/system_wrappers/source/atomic32_posix.cc
+++ b/system_wrappers/source/atomic32_posix.cc
@@ -50,8 +50,4 @@ bool Atomic32::CompareExchange(int32_t new_value, int32_t compare_value) {
return __sync_bool_compare_and_swap(&value_, compare_value, new_value);
}
-int32_t Atomic32::Value() const {
- return value_;
-}
-
} // namespace webrtc
diff --git a/system_wrappers/source/atomic32_win.cc b/system_wrappers/source/atomic32_win.cc
index 2f0cf182..5dd07092 100644
--- a/system_wrappers/source/atomic32_win.cc
+++ b/system_wrappers/source/atomic32_win.cc
@@ -59,8 +59,4 @@ bool Atomic32::CompareExchange(int32_t new_value, int32_t compare_value) {
return (old_value == compare_value);
}
-int32_t Atomic32::Value() const {
- return value_;
-}
-
} // namespace webrtc
diff --git a/video_engine/test/android/jni/vie_android_java_api.cc b/video_engine/test/android/jni/vie_android_java_api.cc
index 34551073..d1475d20 100644
--- a/video_engine/test/android/jni/vie_android_java_api.cc
+++ b/video_engine/test/android/jni/vie_android_java_api.cc
@@ -1627,7 +1627,9 @@ JNIEXPORT jobjectArray JNICALL Java_org_webrtc_videoengineapp_ViEAndroidJavaAPI_
codecToList.plname, codecToList.pltype,
codecToList.plfreq, codecToList.pacsize,
codecToList.channels, codecToList.rate);
- assert(written >= 0 && written < static_cast<int>(sizeof(info)));
+ if (written < 0 || written >= static_cast<int>(sizeof(info))) {
+ assert(false);
+ }
__android_log_print(ANDROID_LOG_DEBUG, WEBRTC_LOG_TAG,
"VoiceEgnine Codec[%d] %s", i, info);
env->SetObjectArrayElement(ret, i, env->NewStringUTF( info ));
diff --git a/voice_engine/voice_engine_impl.cc b/voice_engine/voice_engine_impl.cc
index e01a2cd1..4c923ebc 100644
--- a/voice_engine/voice_engine_impl.cc
+++ b/voice_engine/voice_engine_impl.cc
@@ -8,9 +8,13 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#if defined(WEBRTC_ANDROID) && !defined(WEBRTC_ANDROID_OPENSLES)
+#if defined(WEBRTC_ANDROID)
+#if defined(WEBRTC_ANDROID_OPENSLES)
+#include "webrtc/modules/audio_device/android/audio_manager_jni.h"
+#else
#include "webrtc/modules/audio_device/android/audio_device_jni_android.h"
#endif
+#endif
#include "webrtc/system_wrappers/interface/trace.h"
#include "webrtc/voice_engine/voice_engine_impl.h"
@@ -142,6 +146,7 @@ int VoiceEngine::SetAndroidObjects(void* javaVM, void* env, void* context)
{
#ifdef WEBRTC_ANDROID
#ifdef WEBRTC_ANDROID_OPENSLES
+ AudioManagerJni::SetAndroidAudioDeviceObjects(javaVM, env, context);
return 0;
#else
return AudioDeviceAndroidJni::SetAndroidAudioDeviceObjects(