summaryrefslogtreecommitdiff
path: root/voice_engine
diff options
context:
space:
mode:
authorandrew@webrtc.org <andrew@webrtc.org@4adac7df-926f-26a2-2b94-8c16560cd09d>2013-03-05 01:12:49 +0000
committerandrew@webrtc.org <andrew@webrtc.org@4adac7df-926f-26a2-2b94-8c16560cd09d>2013-03-05 01:12:49 +0000
commitb79627b0f3cf072721f3ae4a584a4f90edba2d1b (patch)
tree869e205cae65ff502da4f9456113aae89e44b18f /voice_engine
parentd432df47411ffb2152cbdffec242af7c34cfe7e0 (diff)
downloadwebrtc-b79627b0f3cf072721f3ae4a584a4f90edba2d1b.tar.gz
Expose the capture-side AudioProcessing object and allow it to be injected.
* Clean up the configuration code, including removing most of the weird defines. * Add a unit test. Review URL: https://webrtc-codereview.appspot.com/1152005 git-svn-id: http://webrtc.googlecode.com/svn/trunk/webrtc@3605 4adac7df-926f-26a2-2b94-8c16560cd09d
Diffstat (limited to 'voice_engine')
-rw-r--r--voice_engine/include/voe_base.h23
-rw-r--r--voice_engine/shared_data.cc11
-rw-r--r--voice_engine/shared_data.h19
-rw-r--r--voice_engine/voe_audio_processing_impl.cc33
-rw-r--r--voice_engine/voe_base_impl.cc211
-rw-r--r--voice_engine/voe_base_impl.h7
-rw-r--r--voice_engine/voe_base_unittest.cc50
-rw-r--r--voice_engine/voe_codec_unittest.cc319
-rw-r--r--voice_engine/voice_engine_core.gypi1
-rw-r--r--voice_engine/voice_engine_defines.h69
10 files changed, 181 insertions, 562 deletions
diff --git a/voice_engine/include/voe_base.h b/voice_engine/include/voe_base.h
index b26f73a9..cb6fb3fb 100644
--- a/voice_engine/include/voe_base.h
+++ b/voice_engine/include/voe_base.h
@@ -40,6 +40,7 @@
namespace webrtc {
class AudioDeviceModule;
+class AudioProcessing;
const int kVoEDefault = -1;
@@ -100,7 +101,7 @@ public:
// Releases the VoEBase sub-API and decreases an internal reference
// counter. Returns the new reference count. This value should be zero
- // for all sub-API:s before the VoiceEngine object can be safely deleted.
+ // for all sub-APIs before the VoiceEngine object can be safely deleted.
virtual int Release() = 0;
// Installs the observer class to enable runtime error control and
@@ -111,12 +112,21 @@ public:
// and warning notifications.
virtual int DeRegisterVoiceEngineObserver() = 0;
- // Initiates all common parts of the VoiceEngine; e.g. all
+ // Initializes all common parts of the VoiceEngine; e.g. all
// encoders/decoders, the sound card and core receiving components.
- // This method also makes it possible to install a user-defined
- // external Audio Device Module (ADM) which implements all the audio
- // layer functionality in a separate (reference counted) module.
- virtual int Init(AudioDeviceModule* external_adm = NULL) = 0;
+ // This method also makes it possible to install some user-defined external
+ // modules:
+ // - The Audio Device Module (ADM) which implements all the audio layer
+ // functionality in a separate (reference counted) module.
+ // - The AudioProcessing module handles capture-side processing. VoiceEngine
+ // takes ownership of this object.
+ // If NULL is passed for any of these, VoiceEngine will create its own.
+ // TODO(ajm): Remove default NULLs.
+ virtual int Init(AudioDeviceModule* external_adm = NULL,
+ AudioProcessing* audioproc = NULL) = 0;
+
+ // Returns NULL before Init() is called.
+ virtual AudioProcessing* audio_processing() = 0;
// Terminates all VoiceEngine functions and releses allocated resources.
virtual int Terminate() = 0;
@@ -180,7 +190,6 @@ public:
// Gets the last VoiceEngine error code.
virtual int LastError() = 0;
-
// Stops or resumes playout and transmission on a temporary basis.
virtual int SetOnHoldStatus(int channel, bool enable,
OnHoldModes mode = kHoldSendAndPlay) = 0;
diff --git a/voice_engine/shared_data.cc b/voice_engine/shared_data.cc
index 7bea1e00..0426ecaa 100644
--- a/voice_engine/shared_data.cc
+++ b/voice_engine/shared_data.cc
@@ -29,7 +29,7 @@ SharedData::SharedData() :
_channelManager(_gInstanceCounter),
_engineStatistics(_gInstanceCounter),
_audioDevicePtr(NULL),
- _audioProcessingModulePtr(NULL),
+ audioproc_(NULL),
_moduleProcessThreadPtr(ProcessThread::CreateProcessThread()),
_externalRecording(false),
_externalPlayout(false)
@@ -56,7 +56,6 @@ SharedData::~SharedData()
if (_audioDevicePtr) {
_audioDevicePtr->Release();
}
- AudioProcessing::Destroy(_audioProcessingModulePtr);
delete _apiCritPtr;
ProcessThread::DestroyProcessThread(_moduleProcessThreadPtr);
Trace::ReturnTrace();
@@ -72,10 +71,10 @@ void SharedData::set_audio_device(AudioDeviceModule* audio_device)
_audioDevicePtr = audio_device;
}
-void SharedData::set_audio_processing(AudioProcessing* audio_processing) {
- if (_audioProcessingModulePtr)
- AudioProcessing::Destroy(_audioProcessingModulePtr);
- _audioProcessingModulePtr = audio_processing;
+void SharedData::set_audio_processing(AudioProcessing* audioproc) {
+ audioproc_.reset(audioproc);
+ _transmitMixerPtr->SetAudioProcessingModule(audioproc);
+ _outputMixerPtr->SetAudioProcessingModule(audioproc);
}
WebRtc_UWord16 SharedData::NumOfSendingChannels()
diff --git a/voice_engine/shared_data.h b/voice_engine/shared_data.h
index 191e369e..cce63686 100644
--- a/voice_engine/shared_data.h
+++ b/voice_engine/shared_data.h
@@ -11,14 +11,13 @@
#ifndef WEBRTC_VOICE_ENGINE_SHARED_DATA_H
#define WEBRTC_VOICE_ENGINE_SHARED_DATA_H
-#include "voice_engine_defines.h"
-
-#include "channel_manager.h"
-#include "statistics.h"
-#include "process_thread.h"
-
-#include "audio_device.h"
-#include "audio_processing.h"
+#include "webrtc/modules/audio_device/include/audio_device.h"
+#include "webrtc/modules/audio_processing/include/audio_processing.h"
+#include "webrtc/modules/utility/interface/process_thread.h"
+#include "webrtc/system_wrappers/interface/scoped_ptr.h"
+#include "webrtc/voice_engine/channel_manager.h"
+#include "webrtc/voice_engine/statistics.h"
+#include "webrtc/voice_engine/voice_engine_defines.h"
class ProcessThread;
@@ -39,7 +38,7 @@ public:
ChannelManager& channel_manager() { return _channelManager; }
AudioDeviceModule* audio_device() { return _audioDevicePtr; }
void set_audio_device(AudioDeviceModule* audio_device);
- AudioProcessing* audio_processing() { return _audioProcessingModulePtr; }
+ AudioProcessing* audio_processing() { return audioproc_.get(); }
void set_audio_processing(AudioProcessing* audio_processing);
TransmitMixer* transmit_mixer() { return _transmitMixerPtr; }
OutputMixer* output_mixer() { return _outputMixerPtr; }
@@ -72,7 +71,7 @@ protected:
AudioDeviceModule* _audioDevicePtr;
OutputMixer* _outputMixerPtr;
TransmitMixer* _transmitMixerPtr;
- AudioProcessing* _audioProcessingModulePtr;
+ scoped_ptr<AudioProcessing> audioproc_;
ProcessThread* _moduleProcessThreadPtr;
bool _externalRecording;
diff --git a/voice_engine/voe_audio_processing_impl.cc b/voice_engine/voe_audio_processing_impl.cc
index a9780f27..dd1195c1 100644
--- a/voice_engine/voe_audio_processing_impl.cc
+++ b/voice_engine/voe_audio_processing_impl.cc
@@ -79,11 +79,10 @@ int VoEAudioProcessingImpl::SetNsStatus(bool enable, NsModes mode) {
return -1;
}
- NoiseSuppression::Level nsLevel(
- (NoiseSuppression::Level)WEBRTC_VOICE_ENGINE_NS_DEFAULT_MODE);
+ NoiseSuppression::Level nsLevel = kDefaultNsMode;
switch (mode) {
case kNsDefault:
- nsLevel = (NoiseSuppression::Level)WEBRTC_VOICE_ENGINE_NS_DEFAULT_MODE;
+ nsLevel = kDefaultNsMode;
break;
case kNsUnchanged:
nsLevel = _shared->audio_processing()->noise_suppression()->level();
@@ -134,14 +133,9 @@ int VoEAudioProcessingImpl::GetNsStatus(bool& enabled, NsModes& mode) {
return -1;
}
- bool enable(false);
- NoiseSuppression::Level nsLevel(
- (NoiseSuppression::Level)WEBRTC_VOICE_ENGINE_NS_DEFAULT_MODE);
-
- enable = _shared->audio_processing()->noise_suppression()->is_enabled();
- nsLevel = _shared->audio_processing()->noise_suppression()->level();
-
- enabled = enable;
+ enabled = _shared->audio_processing()->noise_suppression()->is_enabled();
+ NoiseSuppression::Level nsLevel =
+ _shared->audio_processing()->noise_suppression()->level();
switch (nsLevel) {
case NoiseSuppression::kLow:
@@ -185,14 +179,13 @@ int VoEAudioProcessingImpl::SetAgcStatus(bool enable, AgcModes mode) {
}
#endif
- GainControl::Mode agcMode(
- (GainControl::Mode)WEBRTC_VOICE_ENGINE_AGC_DEFAULT_MODE);
+ GainControl::Mode agcMode = kDefaultAgcMode;
switch (mode) {
case kAgcDefault:
- agcMode = (GainControl::Mode)WEBRTC_VOICE_ENGINE_AGC_DEFAULT_MODE;
+ agcMode = kDefaultAgcMode;
break;
case kAgcUnchanged:
- agcMode = _shared->audio_processing()->gain_control()->mode();;
+ agcMode = _shared->audio_processing()->gain_control()->mode();
break;
case kAgcFixedDigital:
agcMode = GainControl::kFixedDigital;
@@ -244,14 +237,8 @@ int VoEAudioProcessingImpl::GetAgcStatus(bool& enabled, AgcModes& mode) {
return -1;
}
- bool enable(false);
- GainControl::Mode agcMode(
- (GainControl::Mode)WEBRTC_VOICE_ENGINE_AGC_DEFAULT_MODE);
-
- enable = _shared->audio_processing()->gain_control()->is_enabled();
- agcMode = _shared->audio_processing()->gain_control()->mode();
-
- enabled = enable;
+ enabled = _shared->audio_processing()->gain_control()->is_enabled();
+ GainControl::Mode agcMode = _shared->audio_processing()->gain_control()->mode();
switch (agcMode) {
case GainControl::kFixedDigital:
diff --git a/voice_engine/voe_base_impl.cc b/voice_engine/voe_base_impl.cc
index 28eb35cd..1d5cf71b 100644
--- a/voice_engine/voe_base_impl.cc
+++ b/voice_engine/voe_base_impl.cc
@@ -330,7 +330,8 @@ int VoEBaseImpl::DeRegisterVoiceEngineObserver()
return 0;
}
-int VoEBaseImpl::Init(AudioDeviceModule* external_adm)
+int VoEBaseImpl::Init(AudioDeviceModule* external_adm,
+ AudioProcessing* audioproc)
{
WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
"Init(external_adm=0x%p)", external_adm);
@@ -486,146 +487,73 @@ int VoEBaseImpl::Init(AudioDeviceModule* external_adm)
"Init() failed to set mono/stereo recording mode");
}
- // APM initialization done after sound card since we need
- // to know if we support stereo recording or not.
-
- // Create the AudioProcessing Module if it does not exist.
-
- if (_shared->audio_processing() == NULL)
- {
- _shared->set_audio_processing(AudioProcessing::Create(
- VoEId(_shared->instance_id(), -1)));
- if (_shared->audio_processing() == NULL)
- {
- _shared->SetLastError(VE_NO_MEMORY, kTraceCritical,
- "Init() failed to create the AP module");
- return -1;
- }
- // Ensure that mixers in both directions has access to the created APM
- _shared->transmit_mixer()->SetAudioProcessingModule(
- _shared->audio_processing());
- _shared->output_mixer()->SetAudioProcessingModule(
- _shared->audio_processing());
-
- if (_shared->audio_processing()->echo_cancellation()->
- set_device_sample_rate_hz(
- kVoiceEngineAudioProcessingDeviceSampleRateHz))
- {
- _shared->SetLastError(VE_APM_ERROR, kTraceError,
- "Init() failed to set the device sample rate to 48K for AP "
- " module");
- return -1;
- }
- // Using 8 kHz as inital Fs. Might be changed already at first call.
- if (_shared->audio_processing()->set_sample_rate_hz(8000))
- {
- _shared->SetLastError(VE_APM_ERROR, kTraceError,
- "Init() failed to set the sample rate to 8K for AP module");
- return -1;
- }
+ if (!audioproc) {
+ audioproc = AudioProcessing::Create(VoEId(_shared->instance_id(), -1));
+ if (!audioproc) {
+ LOG(LS_ERROR) << "Failed to create AudioProcessing.";
+ _shared->SetLastError(VE_NO_MEMORY);
+ return -1;
+ }
+ }
+ _shared->set_audio_processing(audioproc);
- // Assume mono until the audio frames are received from the capture
- // device, at which point this can be updated.
- if (_shared->audio_processing()->set_num_channels(1, 1) != 0)
- {
- _shared->SetLastError(VE_SOUNDCARD_ERROR, kTraceError,
- "Init() failed to set channels for the primary audio stream");
- return -1;
- }
+ // Set the error state for any failures in this block.
+ _shared->SetLastError(VE_APM_ERROR);
+ if (audioproc->echo_cancellation()->set_device_sample_rate_hz(48000)) {
+ LOG_FERR1(LS_ERROR, set_device_sample_rate_hz, 48000);
+ return -1;
+ }
+ // Assume 16 kHz mono until the audio frames are received from the capture
+ // device, at which point this can be updated.
+ if (audioproc->set_sample_rate_hz(16000)) {
+ LOG_FERR1(LS_ERROR, set_sample_rate_hz, 16000);
+ return -1;
+ }
+ if (audioproc->set_num_channels(1, 1) != 0) {
+ LOG_FERR2(LS_ERROR, set_num_channels, 1, 1);
+ return -1;
+ }
+ if (audioproc->set_num_reverse_channels(1) != 0) {
+ LOG_FERR1(LS_ERROR, set_num_reverse_channels, 1);
+ return -1;
+ }
- if (_shared->audio_processing()->set_num_reverse_channels(1) != 0)
- {
- _shared->SetLastError(VE_SOUNDCARD_ERROR, kTraceError,
- "Init() failed to set channels for the primary audio stream");
- return -1;
- }
- // high-pass filter
- if (_shared->audio_processing()->high_pass_filter()->Enable(
- WEBRTC_VOICE_ENGINE_HP_DEFAULT_STATE) != 0)
- {
- _shared->SetLastError(VE_APM_ERROR, kTraceError,
- "Init() failed to set the high-pass filter for AP module");
- return -1;
- }
- // Echo Cancellation
- if (_shared->audio_processing()->echo_cancellation()->
- enable_drift_compensation(false) != 0)
- {
- _shared->SetLastError(VE_APM_ERROR, kTraceError,
- "Init() failed to set drift compensation for AP module");
- return -1;
- }
- if (_shared->audio_processing()->echo_cancellation()->Enable(
- WEBRTC_VOICE_ENGINE_EC_DEFAULT_STATE))
- {
- _shared->SetLastError(VE_APM_ERROR, kTraceError,
- "Init() failed to set echo cancellation state for AP module");
- return -1;
- }
- // Noise Reduction
- if (_shared->audio_processing()->noise_suppression()->set_level(
- (NoiseSuppression::Level) WEBRTC_VOICE_ENGINE_NS_DEFAULT_MODE)
- != 0)
- {
- _shared->SetLastError(VE_APM_ERROR, kTraceError,
- "Init() failed to set noise reduction level for AP module");
- return -1;
- }
- if (_shared->audio_processing()->noise_suppression()->Enable(
- WEBRTC_VOICE_ENGINE_NS_DEFAULT_STATE) != 0)
- {
- _shared->SetLastError(VE_APM_ERROR, kTraceError,
- "Init() failed to set noise reduction state for AP module");
- return -1;
- }
- // Automatic Gain control
- if (_shared->audio_processing()->gain_control()->
- set_analog_level_limits(kMinVolumeLevel,kMaxVolumeLevel) != 0)
- {
- _shared->SetLastError(VE_APM_ERROR, kTraceError,
- "Init() failed to set AGC analog level for AP module");
- return -1;
- }
- if (_shared->audio_processing()->gain_control()->set_mode(
- (GainControl::Mode) WEBRTC_VOICE_ENGINE_AGC_DEFAULT_MODE)
- != 0)
- {
- _shared->SetLastError(VE_APM_ERROR, kTraceError,
- "Init() failed to set AGC mode for AP module");
- return -1;
- }
- if (_shared->audio_processing()->gain_control()->Enable(
- WEBRTC_VOICE_ENGINE_AGC_DEFAULT_STATE)
- != 0)
- {
- _shared->SetLastError(VE_APM_ERROR, kTraceError,
- "Init() failed to set AGC state for AP module");
- return -1;
- }
- // VAD
- if (_shared->audio_processing()->voice_detection()->Enable(
- WEBRTC_VOICE_ENGINE_VAD_DEFAULT_STATE)
- != 0)
- {
- _shared->SetLastError(VE_APM_ERROR, kTraceError,
- "Init() failed to set VAD state for AP module");
- return -1;
- }
+ // Configure AudioProcessing components. All are disabled by default.
+ if (audioproc->high_pass_filter()->Enable(true) != 0) {
+ LOG_FERR1(LS_ERROR, high_pass_filter()->Enable, true);
+ return -1;
+ }
+ if (audioproc->echo_cancellation()->enable_drift_compensation(false) != 0) {
+ LOG_FERR1(LS_ERROR, enable_drift_compensation, false);
+ return -1;
+ }
+ if (audioproc->noise_suppression()->set_level(kDefaultNsMode) != 0) {
+ LOG_FERR1(LS_ERROR, noise_suppression()->set_level, kDefaultNsMode);
+ return -1;
+ }
+ GainControl* agc = audioproc->gain_control();
+ if (agc->set_analog_level_limits(kMinVolumeLevel, kMaxVolumeLevel) != 0) {
+ LOG_FERR2(LS_ERROR, agc->set_analog_level_limits, kMinVolumeLevel,
+ kMaxVolumeLevel);
+ return -1;
+ }
+ if (agc->set_mode(kDefaultAgcMode) != 0) {
+ LOG_FERR1(LS_ERROR, agc->set_mode, kDefaultAgcMode);
+ return -1;
}
+ if (agc->Enable(kDefaultAgcState) != 0) {
+ LOG_FERR1(LS_ERROR, agc->Enable, kDefaultAgcState);
+ return -1;
+ }
+ _shared->SetLastError(0); // Clear error state.
- // Set default AGC mode for the ADM
#ifdef WEBRTC_VOICE_ENGINE_AGC
- bool enable(false);
- if (_shared->audio_processing()->gain_control()->mode()
- != GainControl::kFixedDigital)
- {
- enable = _shared->audio_processing()->gain_control()->is_enabled();
- // Only set the AGC mode for the ADM when Adaptive AGC mode is selected
- if (_shared->audio_device()->SetAGC(enable) != 0)
- {
- _shared->SetLastError(VE_AUDIO_DEVICE_MODULE_ERROR,
- kTraceError, "Init() failed to set default AGC mode in ADM 0");
- }
+ bool agc_enabled = agc->mode() == GainControl::kAdaptiveAnalog &&
+ agc->is_enabled();
+ if (_shared->audio_device()->SetAGC(agc_enabled) != 0) {
+ LOG_FERR1(LS_ERROR, audio_device()->SetAGC, agc_enabled);
+ _shared->SetLastError(VE_AUDIO_DEVICE_MODULE_ERROR);
+ return -1;
}
#endif
@@ -1578,9 +1506,7 @@ WebRtc_Word32 VoEBaseImpl::TerminateInternal()
}
}
- // Audio Device Module
-
- if (_shared->audio_device() != NULL)
+ if (_shared->audio_device())
{
if (_shared->audio_device()->StopPlayout() != 0)
{
@@ -1607,15 +1533,10 @@ WebRtc_Word32 VoEBaseImpl::TerminateInternal()
_shared->SetLastError(VE_AUDIO_DEVICE_MODULE_ERROR, kTraceError,
"TerminateInternal() failed to terminate the ADM");
}
-
_shared->set_audio_device(NULL);
}
- // AP module
-
- if (_shared->audio_processing() != NULL)
- {
- _shared->transmit_mixer()->SetAudioProcessingModule(NULL);
+ if (_shared->audio_processing()) {
_shared->set_audio_processing(NULL);
}
diff --git a/voice_engine/voe_base_impl.h b/voice_engine/voe_base_impl.h
index 0eb44fa3..3ff29e92 100644
--- a/voice_engine/voe_base_impl.h
+++ b/voice_engine/voe_base_impl.h
@@ -30,7 +30,11 @@ public:
virtual int DeRegisterVoiceEngineObserver();
- virtual int Init(AudioDeviceModule* external_adm = NULL);
+ virtual int Init(AudioDeviceModule* external_adm = NULL,
+ AudioProcessing* audioproc = NULL);
+ virtual AudioProcessing* audio_processing() {
+ return _shared->audio_processing();
+ }
virtual int Terminate();
@@ -79,7 +83,6 @@ public:
virtual int GetNetEQBGNMode(int channel, NetEqBgnModes& mode);
-
virtual int SetOnHoldStatus(int channel,
bool enable,
OnHoldModes mode = kHoldSendAndPlay);
diff --git a/voice_engine/voe_base_unittest.cc b/voice_engine/voe_base_unittest.cc
new file mode 100644
index 00000000..144c8417
--- /dev/null
+++ b/voice_engine/voe_base_unittest.cc
@@ -0,0 +1,50 @@
+/*
+ * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "webrtc/voice_engine/include/voe_base.h"
+
+#include "testing/gtest/include/gtest/gtest.h"
+#include "webrtc/modules/audio_processing/include/audio_processing.h"
+#include "webrtc/modules/audio_device/include/fake_audio_device.h"
+#include "webrtc/system_wrappers/interface/scoped_ptr.h"
+
+namespace webrtc {
+
+class VoEBaseTest : public ::testing::Test {
+ protected:
+ VoEBaseTest() :
+ voe_(VoiceEngine::Create()),
+ base_(VoEBase::GetInterface(voe_)),
+ adm_(new FakeAudioDeviceModule) {
+ }
+
+ ~VoEBaseTest() {
+ base_->Release();
+ VoiceEngine::Delete(voe_);
+ }
+
+ VoiceEngine* voe_;
+ VoEBase* base_;
+ scoped_ptr<FakeAudioDeviceModule> adm_;
+};
+
+TEST_F(VoEBaseTest, AcceptsAudioProcessingPtr) {
+ AudioProcessing* audioproc = AudioProcessing::Create(0);
+ EXPECT_EQ(0, base_->Init(adm_.get(), audioproc));
+ EXPECT_EQ(audioproc, base_->audio_processing());
+}
+
+TEST_F(VoEBaseTest, AudioProcessingCreatedAfterInit) {
+ EXPECT_TRUE(base_->audio_processing() == NULL);
+ EXPECT_EQ(0, base_->Init(adm_.get(), NULL));
+ EXPECT_TRUE(base_->audio_processing() != NULL);
+}
+
+} // namespace webrtc
diff --git a/voice_engine/voe_codec_unittest.cc b/voice_engine/voe_codec_unittest.cc
index e473ddf1..2e8fe619 100644
--- a/voice_engine/voe_codec_unittest.cc
+++ b/voice_engine/voe_codec_unittest.cc
@@ -11,8 +11,7 @@
#include "webrtc/voice_engine/include/voe_codec.h"
#include "gtest/gtest.h"
-#include "webrtc/modules/audio_device/include/audio_device.h"
-#include "webrtc/modules/audio_device/include/audio_device_defines.h"
+#include "webrtc/modules/audio_device/include/fake_audio_device.h"
#include "webrtc/system_wrappers/interface/scoped_ptr.h"
#include "webrtc/voice_engine/include/voe_base.h"
#include "webrtc/voice_engine/include/voe_hardware.h"
@@ -22,322 +21,6 @@ namespace webrtc {
namespace voe {
namespace {
-
-class FakeAudioDeviceModule : public AudioDeviceModule {
- public:
- FakeAudioDeviceModule() {}
- ~FakeAudioDeviceModule() {}
- virtual int32_t AddRef() { return 0; }
- virtual int32_t Release() { return 0; }
- virtual int32_t RegisterEventObserver(AudioDeviceObserver* eventCallback) {
- return 0;
- }
- virtual int32_t RegisterAudioCallback(AudioTransport* audioCallback) {
- return 0;
- }
- virtual int32_t Init() { return 0; }
- virtual int32_t SpeakerIsAvailable(bool* available) {
- *available = true;
- return 0;
- }
- virtual int32_t InitSpeaker() { return 0; }
- virtual int32_t SetPlayoutDevice(uint16_t index) { return 0; }
- virtual int32_t SetPlayoutDevice(WindowsDeviceType device) { return 0; }
- virtual int32_t SetStereoPlayout(bool enable) { return 0; }
- virtual int32_t StopPlayout() { return 0; }
- virtual int32_t MicrophoneIsAvailable(bool* available) {
- *available = true;
- return 0;
- }
- virtual int32_t InitMicrophone() { return 0; }
- virtual int32_t SetRecordingDevice(uint16_t index) { return 0; }
- virtual int32_t SetRecordingDevice(WindowsDeviceType device) { return 0; }
- virtual int32_t SetStereoRecording(bool enable) { return 0; }
- virtual int32_t SetAGC(bool enable) { return 0; }
- virtual int32_t StopRecording() { return 0; }
- virtual int32_t TimeUntilNextProcess() { return 0; }
- virtual int32_t Process() { return 0; }
- virtual int32_t Terminate() { return 0; }
-
- virtual int32_t ActiveAudioLayer(AudioLayer* audioLayer) const {
- assert(false);
- return 0;
- }
- virtual ErrorCode LastError() const {
- assert(false);
- return kAdmErrNone;
- }
- virtual bool Initialized() const {
- assert(false);
- return true;
- }
- virtual int16_t PlayoutDevices() {
- assert(false);
- return 0;
- }
- virtual int16_t RecordingDevices() {
- assert(false);
- return 0;
- }
- virtual int32_t PlayoutDeviceName(uint16_t index,
- char name[kAdmMaxDeviceNameSize],
- char guid[kAdmMaxGuidSize]) {
- assert(false);
- return 0;
- }
- virtual int32_t RecordingDeviceName(uint16_t index,
- char name[kAdmMaxDeviceNameSize],
- char guid[kAdmMaxGuidSize]) {
- assert(false);
- return 0;
- }
- virtual int32_t PlayoutIsAvailable(bool* available) {
- assert(false);
- return 0;
- }
- virtual int32_t InitPlayout() {
- assert(false);
- return 0;
- }
- virtual bool PlayoutIsInitialized() const {
- assert(false);
- return true;
- }
- virtual int32_t RecordingIsAvailable(bool* available) {
- assert(false);
- return 0;
- }
- virtual int32_t InitRecording() {
- assert(false);
- return 0;
- }
- virtual bool RecordingIsInitialized() const {
- assert(false);
- return true;
- }
- virtual int32_t StartPlayout() {
- assert(false);
- return 0;
- }
- virtual bool Playing() const {
- assert(false);
- return false;
- }
- virtual int32_t StartRecording() {
- assert(false);
- return 0;
- }
- virtual bool Recording() const {
- assert(false);
- return false;
- }
- virtual bool AGC() const {
- assert(false);
- return true;
- }
- virtual int32_t SetWaveOutVolume(uint16_t volumeLeft,
- uint16_t volumeRight) {
- assert(false);
- return 0;
- }
- virtual int32_t WaveOutVolume(uint16_t* volumeLeft,
- uint16_t* volumeRight) const {
- assert(false);
- return 0;
- }
- virtual bool SpeakerIsInitialized() const {
- assert(false);
- return true;
- }
- virtual bool MicrophoneIsInitialized() const {
- assert(false);
- return true;
- }
- virtual int32_t SpeakerVolumeIsAvailable(bool* available) {
- assert(false);
- return 0;
- }
- virtual int32_t SetSpeakerVolume(uint32_t volume) {
- assert(false);
- return 0;
- }
- virtual int32_t SpeakerVolume(uint32_t* volume) const {
- assert(false);
- return 0;
- }
- virtual int32_t MaxSpeakerVolume(uint32_t* maxVolume) const {
- assert(false);
- return 0;
- }
- virtual int32_t MinSpeakerVolume(uint32_t* minVolume) const {
- assert(false);
- return 0;
- }
- virtual int32_t SpeakerVolumeStepSize(uint16_t* stepSize) const {
- assert(false);
- return 0;
- }
- virtual int32_t MicrophoneVolumeIsAvailable(bool* available) {
- assert(false);
- return 0;
- }
- virtual int32_t SetMicrophoneVolume(uint32_t volume) {
- assert(false);
- return 0;
- }
- virtual int32_t MicrophoneVolume(uint32_t* volume) const {
- assert(false);
- return 0;
- }
- virtual int32_t MaxMicrophoneVolume(uint32_t* maxVolume) const {
- assert(false);
- return 0;
- }
- virtual int32_t MinMicrophoneVolume(uint32_t* minVolume) const {
- assert(false);
- return 0;
- }
- virtual int32_t MicrophoneVolumeStepSize(uint16_t* stepSize) const {
- assert(false);
- return 0;
- }
- virtual int32_t SpeakerMuteIsAvailable(bool* available) {
- assert(false);
- return 0;
- }
- virtual int32_t SetSpeakerMute(bool enable) {
- assert(false);
- return 0;
- }
- virtual int32_t SpeakerMute(bool* enabled) const {
- assert(false);
- return 0;
- }
- virtual int32_t MicrophoneMuteIsAvailable(bool* available) {
- assert(false);
- return 0;
- }
- virtual int32_t SetMicrophoneMute(bool enable) {
- assert(false);
- return 0;
- }
- virtual int32_t MicrophoneMute(bool* enabled) const {
- assert(false);
- return 0;
- }
- virtual int32_t MicrophoneBoostIsAvailable(bool* available) {
- assert(false);
- return 0;
- }
- virtual int32_t SetMicrophoneBoost(bool enable) {
- assert(false);
- return 0;
- }
- virtual int32_t MicrophoneBoost(bool* enabled) const {
- assert(false);
- return 0;
- }
- virtual int32_t StereoPlayoutIsAvailable(bool* available) const {
- *available = false;
- return 0;
- }
- virtual int32_t StereoPlayout(bool* enabled) const {
- assert(false);
- return 0;
- }
- virtual int32_t StereoRecordingIsAvailable(bool* available) const {
- *available = false;
- return 0;
- }
- virtual int32_t StereoRecording(bool* enabled) const {
- assert(false);
- return 0;
- }
- virtual int32_t SetRecordingChannel(const ChannelType channel) {
- assert(false);
- return 0;
- }
- virtual int32_t RecordingChannel(ChannelType* channel) const {
- assert(false);
- return 0;
- }
- virtual int32_t SetPlayoutBuffer(const BufferType type,
- uint16_t sizeMS = 0) {
- assert(false);
- return 0;
- }
- virtual int32_t PlayoutBuffer(BufferType* type, uint16_t* sizeMS) const {
- assert(false);
- return 0;
- }
- virtual int32_t PlayoutDelay(uint16_t* delayMS) const {
- assert(false);
- return 0;
- }
- virtual int32_t RecordingDelay(uint16_t* delayMS) const {
- assert(false);
- return 0;
- }
- virtual int32_t CPULoad(uint16_t* load) const {
- assert(false);
- return 0;
- }
- virtual int32_t StartRawOutputFileRecording(
- const char pcmFileNameUTF8[kAdmMaxFileNameSize]) {
- assert(false);
- return 0;
- }
- virtual int32_t StopRawOutputFileRecording() {
- assert(false);
- return 0;
- }
- virtual int32_t StartRawInputFileRecording(
- const char pcmFileNameUTF8[kAdmMaxFileNameSize]) {
- assert(false);
- return 0;
- }
- virtual int32_t StopRawInputFileRecording() {
- assert(false);
- return 0;
- }
- virtual int32_t SetRecordingSampleRate(const uint32_t samplesPerSec) {
- assert(false);
- return 0;
- }
- virtual int32_t RecordingSampleRate(uint32_t* samplesPerSec) const {
- assert(false);
- return 0;
- }
- virtual int32_t SetPlayoutSampleRate(const uint32_t samplesPerSec) {
- assert(false);
- return 0;
- }
- virtual int32_t PlayoutSampleRate(uint32_t* samplesPerSec) const {
- assert(false);
- return 0;
- }
- virtual int32_t ResetAudioDevice() {
- assert(false);
- return 0;
- }
- virtual int32_t SetLoudspeakerStatus(bool enable) {
- assert(false);
- return 0;
- }
- virtual int32_t GetLoudspeakerStatus(bool* enabled) const {
- assert(false);
- return 0;
- }
- virtual int32_t EnableBuiltInAEC(bool enable) {
- assert(false);
- return -1;
- }
- virtual bool BuiltInAECIsEnabled() const {
- assert(false);
- return false;
- }
-};
-
class VoECodecTest : public ::testing::Test {
protected:
VoECodecTest()
diff --git a/voice_engine/voice_engine_core.gypi b/voice_engine/voice_engine_core.gypi
index 3d99be72..1c28c0d4 100644
--- a/voice_engine/voice_engine_core.gypi
+++ b/voice_engine/voice_engine_core.gypi
@@ -146,6 +146,7 @@
'output_mixer_unittest.cc',
'transmit_mixer_unittest.cc',
'voe_audio_processing_unittest.cc',
+ 'voe_base_unittest.cc',
'voe_codec_unittest.cc',
],
},
diff --git a/voice_engine/voice_engine_defines.h b/voice_engine/voice_engine_defines.h
index a542c3e9..e5c9af9d 100644
--- a/voice_engine/voice_engine_defines.h
+++ b/voice_engine/voice_engine_defines.h
@@ -18,6 +18,7 @@
#include "webrtc/common_types.h"
#include "webrtc/engine_configurations.h"
+#include "webrtc/modules/audio_processing/include/audio_processing.h"
#include "webrtc/system_wrappers/interface/logging.h"
// ----------------------------------------------------------------------------
@@ -77,7 +78,19 @@ enum { kVoiceEngineMaxSrtpTagAuthNullLength = 12 };
enum { kVoiceEngineMaxSrtpKeyAuthNullLength = 256 };
// Audio processing
-enum { kVoiceEngineAudioProcessingDeviceSampleRateHz = 48000 };
+const NoiseSuppression::Level kDefaultNsMode = NoiseSuppression::kModerate;
+const GainControl::Mode kDefaultAgcMode =
+#if defined(WEBRTC_ANDROID) || defined(WEBRTC_IOS)
+ GainControl::kAdaptiveDigital;
+#else
+ GainControl::kAdaptiveAnalog;
+#endif
+const bool kDefaultAgcState =
+#if defined(WEBRTC_ANDROID) || defined(WEBRTC_IOS)
+ false;
+#else
+ true;
+#endif
// Codec
// Min init target rate for iSAC-wb
@@ -129,31 +142,15 @@ enum { kVoiceEngineMaxRtpExtensionId = 14 };
} // namespace webrtc
-// TODO(andrew): we shouldn't be using the precompiler for this.
+// TODO(ajm): we shouldn't be using the precompiler for this.
// Use enums or bools as appropriate.
-#define WEBRTC_AUDIO_PROCESSING_OFF false
-
-#define WEBRTC_VOICE_ENGINE_HP_DEFAULT_STATE true
- // AudioProcessing HP is ON
-#define WEBRTC_VOICE_ENGINE_NS_DEFAULT_STATE WEBRTC_AUDIO_PROCESSING_OFF
- // AudioProcessing NS off
-#define WEBRTC_VOICE_ENGINE_AGC_DEFAULT_STATE true
- // AudioProcessing AGC on
-#define WEBRTC_VOICE_ENGINE_EC_DEFAULT_STATE WEBRTC_AUDIO_PROCESSING_OFF
- // AudioProcessing EC off
-#define WEBRTC_VOICE_ENGINE_VAD_DEFAULT_STATE WEBRTC_AUDIO_PROCESSING_OFF
- // AudioProcessing off
-#define WEBRTC_VOICE_ENGINE_RX_AGC_DEFAULT_STATE WEBRTC_AUDIO_PROCESSING_OFF
+#define WEBRTC_VOICE_ENGINE_RX_AGC_DEFAULT_STATE false
// AudioProcessing RX AGC off
-#define WEBRTC_VOICE_ENGINE_RX_NS_DEFAULT_STATE WEBRTC_AUDIO_PROCESSING_OFF
+#define WEBRTC_VOICE_ENGINE_RX_NS_DEFAULT_STATE false
// AudioProcessing RX NS off
-#define WEBRTC_VOICE_ENGINE_RX_HP_DEFAULT_STATE WEBRTC_AUDIO_PROCESSING_OFF
+#define WEBRTC_VOICE_ENGINE_RX_HP_DEFAULT_STATE false
// AudioProcessing RX High Pass Filter off
-#define WEBRTC_VOICE_ENGINE_NS_DEFAULT_MODE NoiseSuppression::kModerate
- // AudioProcessing NS moderate suppression
-#define WEBRTC_VOICE_ENGINE_AGC_DEFAULT_MODE GainControl::kAdaptiveAnalog
- // AudioProcessing AGC analog digital combined
#define WEBRTC_VOICE_ENGINE_RX_AGC_DEFAULT_MODE GainControl::kAdaptiveDigital
// AudioProcessing AGC mode
#define WEBRTC_VOICE_ENGINE_RX_NS_DEFAULT_MODE NoiseSuppression::kModerate
@@ -346,22 +343,6 @@ inline int VoEChannelId(const int moduleId)
#undef WEBRTC_CONFERENCING
#undef WEBRTC_TYPING_DETECTION
- // Default audio processing states
- #undef WEBRTC_VOICE_ENGINE_NS_DEFAULT_STATE
- #undef WEBRTC_VOICE_ENGINE_AGC_DEFAULT_STATE
- #undef WEBRTC_VOICE_ENGINE_EC_DEFAULT_STATE
- #define WEBRTC_VOICE_ENGINE_NS_DEFAULT_STATE WEBRTC_AUDIO_PROCESSING_OFF
- #define WEBRTC_VOICE_ENGINE_AGC_DEFAULT_STATE WEBRTC_AUDIO_PROCESSING_OFF
- #define WEBRTC_VOICE_ENGINE_EC_DEFAULT_STATE WEBRTC_AUDIO_PROCESSING_OFF
-
- // Default audio processing modes
- #undef WEBRTC_VOICE_ENGINE_NS_DEFAULT_MODE
- #undef WEBRTC_VOICE_ENGINE_AGC_DEFAULT_MODE
- #define WEBRTC_VOICE_ENGINE_NS_DEFAULT_MODE \
- NoiseSuppression::kModerate
- #define WEBRTC_VOICE_ENGINE_AGC_DEFAULT_MODE \
- GainControl::kAdaptiveDigital
-
#define ANDROID_NOT_SUPPORTED(stat) NOT_SUPPORTED(stat)
#else // LINUX PC
@@ -444,20 +425,6 @@ inline int VoEChannelId(const int moduleId)
#undef WEBRTC_CODEC_ISAC
#undef WEBRTC_VOE_EXTERNAL_REC_AND_PLAYOUT
- #undef WEBRTC_VOICE_ENGINE_NS_DEFAULT_STATE
- #undef WEBRTC_VOICE_ENGINE_AGC_DEFAULT_STATE
- #undef WEBRTC_VOICE_ENGINE_EC_DEFAULT_STATE
- #define WEBRTC_VOICE_ENGINE_NS_DEFAULT_STATE WEBRTC_AUDIO_PROCESSING_OFF
- #define WEBRTC_VOICE_ENGINE_AGC_DEFAULT_STATE WEBRTC_AUDIO_PROCESSING_OFF
- #define WEBRTC_VOICE_ENGINE_EC_DEFAULT_STATE WEBRTC_AUDIO_PROCESSING_OFF
-
- #undef WEBRTC_VOICE_ENGINE_NS_DEFAULT_MODE
- #undef WEBRTC_VOICE_ENGINE_AGC_DEFAULT_MODE
- #define WEBRTC_VOICE_ENGINE_NS_DEFAULT_MODE \
- NoiseSuppression::kModerate
- #define WEBRTC_VOICE_ENGINE_AGC_DEFAULT_MODE \
- GainControl::kAdaptiveDigital
-
#define IPHONE_NOT_SUPPORTED(stat) NOT_SUPPORTED(stat)
#else // Non-iPhone