aboutsummaryrefslogtreecommitdiff
path: root/src/modules/audio_processing/main
diff options
context:
space:
mode:
Diffstat (limited to 'src/modules/audio_processing/main')
-rw-r--r--src/modules/audio_processing/main/apm_tests.gyp60
-rw-r--r--src/modules/audio_processing/main/interface/audio_processing.h564
-rw-r--r--src/modules/audio_processing/main/source/Android.mk72
-rw-r--r--src/modules/audio_processing/main/source/apm.gyp77
-rw-r--r--src/modules/audio_processing/main/source/audio_buffer.cc278
-rw-r--r--src/modules/audio_processing/main/source/audio_buffer.h68
-rw-r--r--src/modules/audio_processing/main/source/audio_processing_impl.cc636
-rw-r--r--src/modules/audio_processing/main/source/audio_processing_impl.h109
-rw-r--r--src/modules/audio_processing/main/source/echo_cancellation_impl.cc348
-rw-r--r--src/modules/audio_processing/main/source/echo_cancellation_impl.h72
-rw-r--r--src/modules/audio_processing/main/source/echo_control_mobile_impl.cc245
-rw-r--r--src/modules/audio_processing/main/source/echo_control_mobile_impl.h59
-rw-r--r--src/modules/audio_processing/main/source/gain_control_impl.cc391
-rw-r--r--src/modules/audio_processing/main/source/gain_control_impl.h80
-rw-r--r--src/modules/audio_processing/main/source/high_pass_filter_impl.cc180
-rw-r--r--src/modules/audio_processing/main/source/high_pass_filter_impl.h51
-rw-r--r--src/modules/audio_processing/main/source/level_estimator_impl.cc182
-rw-r--r--src/modules/audio_processing/main/source/level_estimator_impl.h53
-rw-r--r--src/modules/audio_processing/main/source/noise_suppression_impl.cc179
-rw-r--r--src/modules/audio_processing/main/source/noise_suppression_impl.h54
-rw-r--r--src/modules/audio_processing/main/source/processing_component.cc112
-rw-r--r--src/modules/audio_processing/main/source/processing_component.h63
-rw-r--r--src/modules/audio_processing/main/source/splitting_filter.cc33
-rw-r--r--src/modules/audio_processing/main/source/splitting_filter.h63
-rw-r--r--src/modules/audio_processing/main/source/voice_detection_impl.cc202
-rw-r--r--src/modules/audio_processing/main/source/voice_detection_impl.h63
-rw-r--r--src/modules/audio_processing/main/test/android/apmtest/AndroidManifest.xml30
-rw-r--r--src/modules/audio_processing/main/test/android/apmtest/default.properties11
-rw-r--r--src/modules/audio_processing/main/test/android/apmtest/jni/Android.mk26
-rw-r--r--src/modules/audio_processing/main/test/android/apmtest/jni/Application.mk1
-rw-r--r--src/modules/audio_processing/main/test/android/apmtest/jni/main.c307
-rw-r--r--src/modules/audio_processing/main/test/android/apmtest/res/values/strings.xml4
-rw-r--r--src/modules/audio_processing/main/test/process_test/Android.mk48
-rw-r--r--src/modules/audio_processing/main/test/process_test/apmtest.m360
-rw-r--r--src/modules/audio_processing/main/test/process_test/process_test.cc628
-rw-r--r--src/modules/audio_processing/main/test/unit_test/Android.mk49
-rw-r--r--src/modules/audio_processing/main/test/unit_test/audio_processing_unittest.pb.cc1111
-rw-r--r--src/modules/audio_processing/main/test/unit_test/audio_processing_unittest.pb.h862
-rw-r--r--src/modules/audio_processing/main/test/unit_test/audio_processing_unittest.proto33
-rw-r--r--src/modules/audio_processing/main/test/unit_test/unit_test.cc881
40 files changed, 8645 insertions, 0 deletions
diff --git a/src/modules/audio_processing/main/apm_tests.gyp b/src/modules/audio_processing/main/apm_tests.gyp
new file mode 100644
index 0000000000..441abebb49
--- /dev/null
+++ b/src/modules/audio_processing/main/apm_tests.gyp
@@ -0,0 +1,60 @@
+# Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+#
+# Use of this source code is governed by a BSD-style license
+# that can be found in the LICENSE file in the root of the source
+# tree. An additional intellectual property rights grant can be found
+# in the file PATENTS. All contributing project authors may
+# be found in the AUTHORS file in the root of the source tree.
+
+{
+ 'includes': [
+ '../../../common_settings.gypi',
+ ],
+ 'targets': [
+ {
+ 'target_name': 'unit_test',
+ 'type': 'executable',
+ 'dependencies': [
+ 'source/apm.gyp:audio_processing',
+ '../../../system_wrappers/source/system_wrappers.gyp:system_wrappers',
+ '../../../common_audio/signal_processing_library/main/source/spl.gyp:spl',
+
+ '../../../../testing/gtest.gyp:gtest',
+ '../../../../testing/gtest.gyp:gtest_main',
+ '../../../../third_party/protobuf/protobuf.gyp:protobuf_lite',
+ ],
+ 'include_dirs': [
+ '../../../../testing/gtest/include',
+ ],
+ 'sources': [
+ 'test/unit_test/unit_test.cc',
+ 'test/unit_test/audio_processing_unittest.pb.cc',
+ 'test/unit_test/audio_processing_unittest.pb.h',
+ ],
+ },
+ {
+ 'target_name': 'process_test',
+ 'type': 'executable',
+ 'dependencies': [
+ 'source/apm.gyp:audio_processing',
+ '../../../system_wrappers/source/system_wrappers.gyp:system_wrappers',
+
+ '../../../../testing/gtest.gyp:gtest',
+ '../../../../testing/gtest.gyp:gtest_main',
+ ],
+ 'include_dirs': [
+ '../../../../testing/gtest/include',
+ ],
+ 'sources': [
+ 'test/process_test/process_test.cc',
+ ],
+ },
+
+ ],
+}
+
+# Local Variables:
+# tab-width:2
+# indent-tabs-mode:nil
+# End:
+# vim: set expandtab tabstop=2 shiftwidth=2:
diff --git a/src/modules/audio_processing/main/interface/audio_processing.h b/src/modules/audio_processing/main/interface/audio_processing.h
new file mode 100644
index 0000000000..dc9c2325a5
--- /dev/null
+++ b/src/modules/audio_processing/main/interface/audio_processing.h
@@ -0,0 +1,564 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef WEBRTC_MODULES_AUDIO_PROCESSING_MAIN_INTERFACE_AUDIO_PROCESSING_H_
+#define WEBRTC_MODULES_AUDIO_PROCESSING_MAIN_INTERFACE_AUDIO_PROCESSING_H_
+
+#include "typedefs.h"
+#include "module.h"
+
+namespace webrtc {
+
+class AudioFrame;
+class EchoCancellation;
+class EchoControlMobile;
+class GainControl;
+class HighPassFilter;
+class LevelEstimator;
+class NoiseSuppression;
+class VoiceDetection;
+
+// The Audio Processing Module (APM) provides a collection of voice processing
+// components designed for real-time communications software.
+//
+// APM operates on two audio streams on a frame-by-frame basis. Frames of the
+// primary stream, on which all processing is applied, are passed to
+// |ProcessStream()|. Frames of the reverse direction stream, which are used for
+// analysis by some components, are passed to |AnalyzeReverseStream()|. On the
+// client-side, this will typically be the near-end (capture) and far-end
+// (render) streams, respectively. APM should be placed in the signal chain as
+// close to the audio hardware abstraction layer (HAL) as possible.
+//
+// On the server-side, the reverse stream will normally not be used, with
+// processing occurring on each incoming stream.
+//
+// Component interfaces follow a similar pattern and are accessed through
+// corresponding getters in APM. All components are disabled at create-time,
+// with default settings that are recommended for most situations. New settings
+// can be applied without enabling a component. Enabling a component triggers
+// memory allocation and initialization to allow it to start processing the
+// streams.
+//
+// Thread safety is provided with the following assumptions to reduce locking
+// overhead:
+// 1. The stream getters and setters are called from the same thread as
+// ProcessStream(). More precisely, stream functions are never called
+// concurrently with ProcessStream().
+// 2. Parameter getters are never called concurrently with the corresponding
+// setter.
+//
+// APM accepts only 16-bit linear PCM audio data in frames of 10 ms. Multiple
+// channels should be interleaved.
+//
+// Usage example, omitting error checking:
+// AudioProcessing* apm = AudioProcessing::Create(0);
+// apm->set_sample_rate_hz(32000); // Super-wideband processing.
+//
+// // Mono capture and stereo render.
+// apm->set_num_channels(1, 1);
+// apm->set_num_reverse_channels(2);
+//
+// apm->high_pass_filter()->Enable(true);
+//
+// apm->echo_cancellation()->enable_drift_compensation(false);
+// apm->echo_cancellation()->Enable(true);
+//
+// apm->noise_reduction()->set_level(kHighSuppression);
+// apm->noise_reduction()->Enable(true);
+//
+// apm->gain_control()->set_analog_level_limits(0, 255);
+// apm->gain_control()->set_mode(kAdaptiveAnalog);
+// apm->gain_control()->Enable(true);
+//
+// apm->voice_detection()->Enable(true);
+//
+// // Start a voice call...
+//
+// // ... Render frame arrives bound for the audio HAL ...
+// apm->AnalyzeReverseStream(render_frame);
+//
+// // ... Capture frame arrives from the audio HAL ...
+// // Call required set_stream_ functions.
+// apm->set_stream_delay_ms(delay_ms);
+// apm->gain_control()->set_stream_analog_level(analog_level);
+//
+// apm->ProcessStream(capture_frame);
+//
+// // Call required stream_ functions.
+// analog_level = apm->gain_control()->stream_analog_level();
+// has_voice = apm->stream_has_voice();
+//
+// // Repeate render and capture processing for the duration of the call...
+// // Start a new call...
+// apm->Initialize();
+//
+// // Close the application...
+// AudioProcessing::Destroy(apm);
+// apm = NULL;
+//
+class AudioProcessing : public Module {
+ public:
+ // Creates a APM instance, with identifier |id|. Use one instance for every
+ // primary audio stream requiring processing. On the client-side, this would
+ // typically be one instance for the near-end stream, and additional instances
+ // for each far-end stream which requires processing. On the server-side,
+ // this would typically be one instance for every incoming stream.
+ static AudioProcessing* Create(int id);
+
+ // Destroys a |apm| instance.
+ static void Destroy(AudioProcessing* apm);
+
+ // Initializes internal states, while retaining all user settings. This
+ // should be called before beginning to process a new audio stream. However,
+ // it is not necessary to call before processing the first stream after
+ // creation.
+ virtual int Initialize() = 0;
+
+ // Sets the sample |rate| in Hz for both the primary and reverse audio
+ // streams. 8000, 16000 or 32000 Hz are permitted.
+ virtual int set_sample_rate_hz(int rate) = 0;
+ virtual int sample_rate_hz() const = 0;
+
+ // Sets the number of channels for the primary audio stream. Input frames must
+ // contain a number of channels given by |input_channels|, while output frames
+ // will be returned with number of channels given by |output_channels|.
+ virtual int set_num_channels(int input_channels, int output_channels) = 0;
+ virtual int num_input_channels() const = 0;
+ virtual int num_output_channels() const = 0;
+
+ // Sets the number of channels for the reverse audio stream. Input frames must
+ // contain a number of channels given by |channels|.
+ virtual int set_num_reverse_channels(int channels) = 0;
+ virtual int num_reverse_channels() const = 0;
+
+ // Processes a 10 ms |frame| of the primary audio stream. On the client-side,
+ // this is the near-end (or captured) audio.
+ //
+ // If needed for enabled functionality, any function with the set_stream_ tag
+ // must be called prior to processing the current frame. Any getter function
+ // with the stream_ tag which is needed should be called after processing.
+ //
+ // The |_frequencyInHz|, |_audioChannel|, and |_payloadDataLengthInSamples|
+ // members of |frame| must be valid, and correspond to settings supplied
+ // to APM.
+ virtual int ProcessStream(AudioFrame* frame) = 0;
+
+ // Analyzes a 10 ms |frame| of the reverse direction audio stream. The frame
+ // will not be modified. On the client-side, this is the far-end (or to be
+ // rendered) audio.
+ //
+ // It is only necessary to provide this if echo processing is enabled, as the
+ // reverse stream forms the echo reference signal. It is recommended, but not
+ // necessary, to provide if gain control is enabled. On the server-side this
+ // typically will not be used. If you're not sure what to pass in here,
+ // chances are you don't need to use it.
+ //
+ // The |_frequencyInHz|, |_audioChannel|, and |_payloadDataLengthInSamples|
+ // members of |frame| must be valid.
+ //
+ // TODO(ajm): add const to input; requires an implementation fix.
+ virtual int AnalyzeReverseStream(AudioFrame* frame) = 0;
+
+ // This must be called if and only if echo processing is enabled.
+ //
+ // Sets the |delay| in ms between AnalyzeReverseStream() receiving a far-end
+ // frame and ProcessStream() receiving a near-end frame containing the
+ // corresponding echo. On the client-side this can be expressed as
+ // delay = (t_render - t_analyze) + (t_process - t_capture)
+ // where,
+ // - t_analyze is the time a frame is passed to AnalyzeReverseStream() and
+ // t_render is the time the first sample of the same frame is rendered by
+ // the audio hardware.
+ // - t_capture is the time the first sample of a frame is captured by the
+ // audio hardware and t_pull is the time the same frame is passed to
+ // ProcessStream().
+ virtual int set_stream_delay_ms(int delay) = 0;
+ virtual int stream_delay_ms() const = 0;
+
+ // Starts recording debugging information to a file specified by |filename|,
+ // a NULL-terminated string. If there is an ongoing recording, the old file
+ // will be closed, and recording will continue in the newly specified file.
+ // An already existing file will be overwritten without warning.
+ static const int kMaxFilenameSize = 1024;
+ virtual int StartDebugRecording(const char filename[kMaxFilenameSize]) = 0;
+
+ // Stops recording debugging information, and closes the file. Recording
+ // cannot be resumed in the same file (without overwriting it).
+ virtual int StopDebugRecording() = 0;
+
+ // These provide access to the component interfaces and should never return
+ // NULL. The pointers will be valid for the lifetime of the APM instance.
+ // The memory for these objects is entirely managed internally.
+ virtual EchoCancellation* echo_cancellation() const = 0;
+ virtual EchoControlMobile* echo_control_mobile() const = 0;
+ virtual GainControl* gain_control() const = 0;
+ virtual HighPassFilter* high_pass_filter() const = 0;
+ virtual LevelEstimator* level_estimator() const = 0;
+ virtual NoiseSuppression* noise_suppression() const = 0;
+ virtual VoiceDetection* voice_detection() const = 0;
+
+ struct Statistic {
+ int instant; // Instantaneous value.
+ int average; // Long-term average.
+ int maximum; // Long-term maximum.
+ int minimum; // Long-term minimum.
+ };
+
+ // Fatal errors.
+ enum Errors {
+ kNoError = 0,
+ kUnspecifiedError = -1,
+ kCreationFailedError = -2,
+ kUnsupportedComponentError = -3,
+ kUnsupportedFunctionError = -4,
+ kNullPointerError = -5,
+ kBadParameterError = -6,
+ kBadSampleRateError = -7,
+ kBadDataLengthError = -8,
+ kBadNumberChannelsError = -9,
+ kFileError = -10,
+ kStreamParameterNotSetError = -11,
+ kNotEnabledError = -12
+ };
+
+ // Warnings are non-fatal.
+ enum Warnings {
+ // This results when a set_stream_ parameter is out of range. Processing
+ // will continue, but the parameter may have been truncated.
+ kBadStreamParameterWarning = -13,
+ };
+
+ // Inherited from Module.
+ virtual WebRtc_Word32 TimeUntilNextProcess() { return -1; };
+ virtual WebRtc_Word32 Process() { return -1; };
+
+ protected:
+ virtual ~AudioProcessing() {};
+};
+
+// The acoustic echo cancellation (AEC) component provides better performance
+// than AECM but also requires more processing power and is dependent on delay
+// stability and reporting accuracy. As such it is well-suited and recommended
+// for PC and IP phone applications.
+//
+// Not recommended to be enabled on the server-side.
+class EchoCancellation {
+ public:
+ // EchoCancellation and EchoControlMobile may not be enabled simultaneously.
+ // Enabling one will disable the other.
+ virtual int Enable(bool enable) = 0;
+ virtual bool is_enabled() const = 0;
+
+ // Differences in clock speed on the primary and reverse streams can impact
+ // the AEC performance. On the client-side, this could be seen when different
+ // render and capture devices are used, particularly with webcams.
+ //
+ // This enables a compensation mechanism, and requires that
+ // |set_device_sample_rate_hz()| and |set_stream_drift_samples()| be called.
+ virtual int enable_drift_compensation(bool enable) = 0;
+ virtual bool is_drift_compensation_enabled() const = 0;
+
+ // Provides the sampling rate of the audio devices. It is assumed the render
+ // and capture devices use the same nominal sample rate. Required if and only
+ // if drift compensation is enabled.
+ virtual int set_device_sample_rate_hz(int rate) = 0;
+ virtual int device_sample_rate_hz() const = 0;
+
+ // Sets the difference between the number of samples rendered and captured by
+ // the audio devices since the last call to |ProcessStream()|. Must be called
+ // if and only if drift compensation is enabled, prior to |ProcessStream()|.
+ virtual int set_stream_drift_samples(int drift) = 0;
+ virtual int stream_drift_samples() const = 0;
+
+ enum SuppressionLevel {
+ kLowSuppression,
+ kModerateSuppression,
+ kHighSuppression
+ };
+
+ // Sets the aggressiveness of the suppressor. A higher level trades off
+ // double-talk performance for increased echo suppression.
+ virtual int set_suppression_level(SuppressionLevel level) = 0;
+ virtual SuppressionLevel suppression_level() const = 0;
+
+ // Returns false if the current frame almost certainly contains no echo
+ // and true if it _might_ contain echo.
+ virtual bool stream_has_echo() const = 0;
+
+ // Enables the computation of various echo metrics. These are obtained
+ // through |GetMetrics()|.
+ virtual int enable_metrics(bool enable) = 0;
+ virtual bool are_metrics_enabled() const = 0;
+
+ // Each statistic is reported in dB.
+ // P_far: Far-end (render) signal power.
+ // P_echo: Near-end (capture) echo signal power.
+ // P_out: Signal power at the output of the AEC.
+ // P_a: Internal signal power at the point before the AEC's non-linear
+ // processor.
+ struct Metrics {
+ // RERL = ERL + ERLE
+ AudioProcessing::Statistic residual_echo_return_loss;
+
+ // ERL = 10log_10(P_far / P_echo)
+ AudioProcessing::Statistic echo_return_loss;
+
+ // ERLE = 10log_10(P_echo / P_out)
+ AudioProcessing::Statistic echo_return_loss_enhancement;
+
+ // (Pre non-linear processing suppression) A_NLP = 10log_10(P_echo / P_a)
+ AudioProcessing::Statistic a_nlp;
+ };
+
+ // TODO(ajm): discuss the metrics update period.
+ virtual int GetMetrics(Metrics* metrics) = 0;
+
+ protected:
+ virtual ~EchoCancellation() {};
+};
+
+// The acoustic echo control for mobile (AECM) component is a low complexity
+// robust option intended for use on mobile devices.
+//
+// Not recommended to be enabled on the server-side.
+class EchoControlMobile {
+ public:
+ // EchoCancellation and EchoControlMobile may not be enabled simultaneously.
+ // Enabling one will disable the other.
+ virtual int Enable(bool enable) = 0;
+ virtual bool is_enabled() const = 0;
+
+ // Recommended settings for particular audio routes. In general, the louder
+ // the echo is expected to be, the higher this value should be set. The
+ // preferred setting may vary from device to device.
+ enum RoutingMode {
+ kQuietEarpieceOrHeadset,
+ kEarpiece,
+ kLoudEarpiece,
+ kSpeakerphone,
+ kLoudSpeakerphone
+ };
+
+ // Sets echo control appropriate for the audio routing |mode| on the device.
+ // It can and should be updated during a call if the audio routing changes.
+ virtual int set_routing_mode(RoutingMode mode) = 0;
+ virtual RoutingMode routing_mode() const = 0;
+
+ // Comfort noise replaces suppressed background noise to maintain a
+ // consistent signal level.
+ virtual int enable_comfort_noise(bool enable) = 0;
+ virtual bool is_comfort_noise_enabled() const = 0;
+
+ protected:
+ virtual ~EchoControlMobile() {};
+};
+
+// The automatic gain control (AGC) component brings the signal to an
+// appropriate range. This is done by applying a digital gain directly and, in
+// the analog mode, prescribing an analog gain to be applied at the audio HAL.
+//
+// Recommended to be enabled on the client-side.
+class GainControl {
+ public:
+ virtual int Enable(bool enable) = 0;
+ virtual bool is_enabled() const = 0;
+
+ // When an analog mode is set, this must be called prior to |ProcessStream()|
+ // to pass the current analog level from the audio HAL. Must be within the
+ // range provided to |set_analog_level_limits()|.
+ virtual int set_stream_analog_level(int level) = 0;
+
+ // When an analog mode is set, this should be called after |ProcessStream()|
+ // to obtain the recommended new analog level for the audio HAL. It is the
+ // users responsibility to apply this level.
+ virtual int stream_analog_level() = 0;
+
+ enum Mode {
+ // Adaptive mode intended for use if an analog volume control is available
+ // on the capture device. It will require the user to provide coupling
+ // between the OS mixer controls and AGC through the |stream_analog_level()|
+ // functions.
+ //
+ // It consists of an analog gain prescription for the audio device and a
+ // digital compression stage.
+ kAdaptiveAnalog,
+
+ // Adaptive mode intended for situations in which an analog volume control
+ // is unavailable. It operates in a similar fashion to the adaptive analog
+ // mode, but with scaling instead applied in the digital domain. As with
+ // the analog mode, it additionally uses a digital compression stage.
+ kAdaptiveDigital,
+
+ // Fixed mode which enables only the digital compression stage also used by
+ // the two adaptive modes.
+ //
+ // It is distinguished from the adaptive modes by considering only a
+ // short time-window of the input signal. It applies a fixed gain through
+ // most of the input level range, and compresses (gradually reduces gain
+ // with increasing level) the input signal at higher levels. This mode is
+ // preferred on embedded devices where the capture signal level is
+ // predictable, so that a known gain can be applied.
+ kFixedDigital
+ };
+
+ virtual int set_mode(Mode mode) = 0;
+ virtual Mode mode() const = 0;
+
+ // Sets the target peak |level| (or envelope) of the AGC in dBFs (decibels
+ // from digital full-scale). The convention is to use positive values. For
+ // instance, passing in a value of 3 corresponds to -3 dBFs, or a target
+ // level 3 dB below full-scale. Limited to [0, 31].
+ //
+ // TODO(ajm): use a negative value here instead, if/when VoE will similarly
+ // update its interface.
+ virtual int set_target_level_dbfs(int level) = 0;
+ virtual int target_level_dbfs() const = 0;
+
+ // Sets the maximum |gain| the digital compression stage may apply, in dB. A
+ // higher number corresponds to greater compression, while a value of 0 will
+ // leave the signal uncompressed. Limited to [0, 90].
+ virtual int set_compression_gain_db(int gain) = 0;
+ virtual int compression_gain_db() const = 0;
+
+ // When enabled, the compression stage will hard limit the signal to the
+ // target level. Otherwise, the signal will be compressed but not limited
+ // above the target level.
+ virtual int enable_limiter(bool enable) = 0;
+ virtual bool is_limiter_enabled() const = 0;
+
+ // Sets the |minimum| and |maximum| analog levels of the audio capture device.
+ // Must be set if and only if an analog mode is used. Limited to [0, 65535].
+ virtual int set_analog_level_limits(int minimum,
+ int maximum) = 0;
+ virtual int analog_level_minimum() const = 0;
+ virtual int analog_level_maximum() const = 0;
+
+ // Returns true if the AGC has detected a saturation event (period where the
+ // signal reaches digital full-scale) in the current frame and the analog
+ // level cannot be reduced.
+ //
+ // This could be used as an indicator to reduce or disable analog mic gain at
+ // the audio HAL.
+ virtual bool stream_is_saturated() const = 0;
+
+ protected:
+ virtual ~GainControl() {};
+};
+
+// A filtering component which removes DC offset and low-frequency noise.
+// Recommended to be enabled on the client-side.
+class HighPassFilter {
+ public:
+ virtual int Enable(bool enable) = 0;
+ virtual bool is_enabled() const = 0;
+
+ protected:
+ virtual ~HighPassFilter() {};
+};
+
+// An estimation component used to retrieve level metrics.
+class LevelEstimator {
+ public:
+ virtual int Enable(bool enable) = 0;
+ virtual bool is_enabled() const = 0;
+
+ // The metrics are reported in dBFs calculated as:
+ // Level = 10log_10(P_s / P_max) [dBFs], where
+ // P_s is the signal power and P_max is the maximum possible (or peak)
+ // power. With 16-bit signals, P_max = (2^15)^2.
+ struct Metrics {
+ AudioProcessing::Statistic signal; // Overall signal level.
+ AudioProcessing::Statistic speech; // Speech level.
+ AudioProcessing::Statistic noise; // Noise level.
+ };
+
+ virtual int GetMetrics(Metrics* metrics, Metrics* reverse_metrics) = 0;
+
+ //virtual int enable_noise_warning(bool enable) = 0;
+ //bool is_noise_warning_enabled() const = 0;
+ //virtual bool stream_has_high_noise() const = 0;
+
+ protected:
+ virtual ~LevelEstimator() {};
+};
+
+// The noise suppression (NS) component attempts to remove noise while
+// retaining speech. Recommended to be enabled on the client-side.
+//
+// Recommended to be enabled on the client-side.
+class NoiseSuppression {
+ public:
+ virtual int Enable(bool enable) = 0;
+ virtual bool is_enabled() const = 0;
+
+ // Determines the aggressiveness of the suppression. Increasing the level
+ // will reduce the noise level at the expense of a higher speech distortion.
+ enum Level {
+ kLow,
+ kModerate,
+ kHigh,
+ kVeryHigh
+ };
+
+ virtual int set_level(Level level) = 0;
+ virtual Level level() const = 0;
+
+ protected:
+ virtual ~NoiseSuppression() {};
+};
+
+// The voice activity detection (VAD) component analyzes the stream to
+// determine if voice is present. A facility is also provided to pass in an
+// external VAD decision.
+class VoiceDetection {
+ public:
+ virtual int Enable(bool enable) = 0;
+ virtual bool is_enabled() const = 0;
+
+ // Returns true if voice is detected in the current frame. Should be called
+ // after |ProcessStream()|.
+ virtual bool stream_has_voice() const = 0;
+
+ // Some of the APM functionality requires a VAD decision. In the case that
+ // a decision is externally available for the current frame, it can be passed
+ // in here, before |ProcessStream()| is called.
+ //
+ // VoiceDetection does _not_ need to be enabled to use this. If it happens to
+ // be enabled, detection will be skipped for any frame in which an external
+ // VAD decision is provided.
+ virtual int set_stream_has_voice(bool has_voice) = 0;
+
+ // Specifies the likelihood that a frame will be declared to contain voice.
+ // A higher value makes it more likely that speech will not be clipped, at
+ // the expense of more noise being detected as voice.
+ enum Likelihood {
+ kVeryLowLikelihood,
+ kLowLikelihood,
+ kModerateLikelihood,
+ kHighLikelihood
+ };
+
+ virtual int set_likelihood(Likelihood likelihood) = 0;
+ virtual Likelihood likelihood() const = 0;
+
+ // Sets the |size| of the frames in ms on which the VAD will operate. Larger
+ // frames will improve detection accuracy, but reduce the frequency of
+ // updates.
+ //
+ // This does not impact the size of frames passed to |ProcessStream()|.
+ virtual int set_frame_size_ms(int size) = 0;
+ virtual int frame_size_ms() const = 0;
+
+ protected:
+ virtual ~VoiceDetection() {};
+};
+} // namespace webrtc
+
+#endif // WEBRTC_MODULES_AUDIO_PROCESSING_MAIN_INTERFACE_AUDIO_PROCESSING_H_
diff --git a/src/modules/audio_processing/main/source/Android.mk b/src/modules/audio_processing/main/source/Android.mk
new file mode 100644
index 0000000000..12c7bc758a
--- /dev/null
+++ b/src/modules/audio_processing/main/source/Android.mk
@@ -0,0 +1,72 @@
+# Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+#
+# Use of this source code is governed by a BSD-style license
+# that can be found in the LICENSE file in the root of the source
+# tree. An additional intellectual property rights grant can be found
+# in the file PATENTS. All contributing project authors may
+# be found in the AUTHORS file in the root of the source tree.
+
+LOCAL_PATH := $(call my-dir)
+
+include $(CLEAR_VARS)
+
+LOCAL_ARM_MODE := arm
+LOCAL_MODULE := libwebrtc_apm
+LOCAL_MODULE_TAGS := optional
+LOCAL_CPP_EXTENSION := .cc
+LOCAL_GENERATED_SOURCES :=
+LOCAL_SRC_FILES := audio_buffer.cc \
+ audio_processing_impl.cc \
+ echo_cancellation_impl.cc \
+ echo_control_mobile_impl.cc \
+ gain_control_impl.cc \
+ high_pass_filter_impl.cc \
+ level_estimator_impl.cc \
+ noise_suppression_impl.cc \
+ splitting_filter.cc \
+ processing_component.cc \
+ voice_detection_impl.cc
+
+# Flags passed to both C and C++ files.
+MY_CFLAGS :=
+MY_CFLAGS_C :=
+MY_DEFS := '-DNO_TCMALLOC' \
+ '-DNO_HEAPCHECKER' \
+ '-DWEBRTC_TARGET_PC' \
+ '-DWEBRTC_LINUX' \
+ '-DWEBRTC_THREAD_RR' \
+ '-DWEBRTC_ANDROID' \
+ '-DANDROID' \
+ '-DWEBRTC_NS_FIXED'
+# floating point
+# -DWEBRTC_NS_FLOAT'
+LOCAL_CFLAGS := $(MY_CFLAGS_C) $(MY_CFLAGS) $(MY_DEFS)
+
+# Include paths placed before CFLAGS/CPPFLAGS
+LOCAL_C_INCLUDES := $(LOCAL_PATH)/../../../.. \
+ $(LOCAL_PATH)/../interface \
+ $(LOCAL_PATH)/../../../interface \
+ $(LOCAL_PATH)/../../../../system_wrappers/interface \
+ $(LOCAL_PATH)/../../aec/main/interface \
+ $(LOCAL_PATH)/../../aecm/main/interface \
+ $(LOCAL_PATH)/../../agc/main/interface \
+ $(LOCAL_PATH)/../../ns/main/interface \
+ $(LOCAL_PATH)/../../../../common_audio/signal_processing_library/main/interface \
+ $(LOCAL_PATH)/../../../../common_audio/vad/main/interface
+
+# Flags passed to only C++ (and not C) files.
+LOCAL_CPPFLAGS :=
+
+LOCAL_LDFLAGS :=
+
+LOCAL_STATIC_LIBRARIES :=
+
+LOCAL_SHARED_LIBRARIES := libcutils \
+ libdl \
+ libstlport
+
+LOCAL_ADDITIONAL_DEPENDENCIES :=
+
+include external/stlport/libstlport.mk
+include $(BUILD_STATIC_LIBRARY)
+
diff --git a/src/modules/audio_processing/main/source/apm.gyp b/src/modules/audio_processing/main/source/apm.gyp
new file mode 100644
index 0000000000..93811c71f9
--- /dev/null
+++ b/src/modules/audio_processing/main/source/apm.gyp
@@ -0,0 +1,77 @@
+# Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+#
+# Use of this source code is governed by a BSD-style license
+# that can be found in the LICENSE file in the root of the source
+# tree. An additional intellectual property rights grant can be found
+# in the file PATENTS. All contributing project authors may
+# be found in the AUTHORS file in the root of the source tree.
+
+{
+ 'includes': [
+ '../../../../common_settings.gypi', # Common settings
+ ],
+ 'targets': [
+ {
+ 'target_name': 'audio_processing',
+ 'type': '<(library)',
+ 'conditions': [
+ ['prefer_fixed_point==1', {
+ 'dependencies': ['../../ns/main/source/ns.gyp:ns_fix'],
+ 'defines': ['WEBRTC_NS_FIXED'],
+ }, { # else: prefer_fixed_point==0
+ 'dependencies': ['../../ns/main/source/ns.gyp:ns'],
+ 'defines': ['WEBRTC_NS_FLOAT'],
+ }],
+ ],
+ 'dependencies': [
+ '../../../../system_wrappers/source/system_wrappers.gyp:system_wrappers',
+ '../../aec/main/source/aec.gyp:aec',
+ '../../aecm/main/source/aecm.gyp:aecm',
+ '../../agc/main/source/agc.gyp:agc',
+ '../../../../common_audio/signal_processing_library/main/source/spl.gyp:spl',
+ '../../../../common_audio/vad/main/source/vad.gyp:vad',
+ ],
+ 'include_dirs': [
+ '../interface',
+ '../../../interface',
+ ],
+ 'direct_dependent_settings': {
+ 'include_dirs': [
+ '../interface',
+ '../../../interface',
+ ],
+ },
+ 'sources': [
+ '../interface/audio_processing.h',
+ 'audio_buffer.cc',
+ 'audio_buffer.h',
+ 'audio_processing_impl.cc',
+ 'audio_processing_impl.h',
+ 'echo_cancellation_impl.cc',
+ 'echo_cancellation_impl.h',
+ 'echo_control_mobile_impl.cc',
+ 'echo_control_mobile_impl.h',
+ 'gain_control_impl.cc',
+ 'gain_control_impl.h',
+ 'high_pass_filter_impl.cc',
+ 'high_pass_filter_impl.h',
+ 'level_estimator_impl.cc',
+ 'level_estimator_impl.h',
+ 'noise_suppression_impl.cc',
+ 'noise_suppression_impl.h',
+ 'splitting_filter.cc',
+ 'splitting_filter.h',
+ 'processing_component.cc',
+ 'processing_component.h',
+ 'voice_detection_impl.cc',
+ 'voice_detection_impl.h',
+ ],
+ },
+ ],
+}
+
+# Local Variables:
+# tab-width:2
+# indent-tabs-mode:nil
+# End:
+# vim: set expandtab tabstop=2 shiftwidth=2:
diff --git a/src/modules/audio_processing/main/source/audio_buffer.cc b/src/modules/audio_processing/main/source/audio_buffer.cc
new file mode 100644
index 0000000000..6b20fcecee
--- /dev/null
+++ b/src/modules/audio_processing/main/source/audio_buffer.cc
@@ -0,0 +1,278 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "audio_buffer.h"
+
+#include "module_common_types.h"
+
+namespace webrtc {
+namespace {
+
+enum {
+ kSamplesPer8kHzChannel = 80,
+ kSamplesPer16kHzChannel = 160,
+ kSamplesPer32kHzChannel = 320
+};
+
+void StereoToMono(const WebRtc_Word16* left, const WebRtc_Word16* right,
+ WebRtc_Word16* out, int samples_per_channel) {
+ WebRtc_Word32 data_int32 = 0;
+ for (int i = 0; i < samples_per_channel; i++) {
+ data_int32 = (left[i] + right[i]) >> 1;
+ if (data_int32 > 32767) {
+ data_int32 = 32767;
+ } else if (data_int32 < -32768) {
+ data_int32 = -32768;
+ }
+
+ out[i] = static_cast<WebRtc_Word16>(data_int32);
+ }
+}
+} // namespace
+
+struct AudioChannel {
+ AudioChannel() {
+ memset(data, 0, sizeof(data));
+ }
+
+ WebRtc_Word16 data[kSamplesPer32kHzChannel];
+};
+
+struct SplitAudioChannel {
+ SplitAudioChannel() {
+ memset(low_pass_data, 0, sizeof(low_pass_data));
+ memset(high_pass_data, 0, sizeof(high_pass_data));
+ memset(analysis_filter_state1, 0, sizeof(analysis_filter_state1));
+ memset(analysis_filter_state2, 0, sizeof(analysis_filter_state2));
+ memset(synthesis_filter_state1, 0, sizeof(synthesis_filter_state1));
+ memset(synthesis_filter_state2, 0, sizeof(synthesis_filter_state2));
+ }
+
+ WebRtc_Word16 low_pass_data[kSamplesPer16kHzChannel];
+ WebRtc_Word16 high_pass_data[kSamplesPer16kHzChannel];
+
+ WebRtc_Word32 analysis_filter_state1[6];
+ WebRtc_Word32 analysis_filter_state2[6];
+ WebRtc_Word32 synthesis_filter_state1[6];
+ WebRtc_Word32 synthesis_filter_state2[6];
+};
+
+// TODO(am): check range of input parameters?
+AudioBuffer::AudioBuffer(WebRtc_Word32 max_num_channels,
+ WebRtc_Word32 samples_per_channel)
+ : max_num_channels_(max_num_channels),
+ num_channels_(0),
+ num_mixed_channels_(0),
+ num_mixed_low_pass_channels_(0),
+ samples_per_channel_(samples_per_channel),
+ samples_per_split_channel_(samples_per_channel),
+ reference_copied_(false),
+ data_(NULL),
+ channels_(NULL),
+ split_channels_(NULL),
+ mixed_low_pass_channels_(NULL),
+ low_pass_reference_channels_(NULL) {
+ if (max_num_channels_ > 1) {
+ channels_ = new AudioChannel[max_num_channels_];
+ mixed_low_pass_channels_ = new AudioChannel[max_num_channels_];
+ }
+ low_pass_reference_channels_ = new AudioChannel[max_num_channels_];
+
+ if (samples_per_channel_ == kSamplesPer32kHzChannel) {
+ split_channels_ = new SplitAudioChannel[max_num_channels_];
+ samples_per_split_channel_ = kSamplesPer16kHzChannel;
+ }
+}
+
+AudioBuffer::~AudioBuffer() {
+ if (channels_ != NULL) {
+ delete [] channels_;
+ }
+
+ if (mixed_low_pass_channels_ != NULL) {
+ delete [] mixed_low_pass_channels_;
+ }
+
+ if (low_pass_reference_channels_ != NULL) {
+ delete [] low_pass_reference_channels_;
+ }
+
+ if (split_channels_ != NULL) {
+ delete [] split_channels_;
+ }
+}
+
+WebRtc_Word16* AudioBuffer::data(WebRtc_Word32 channel) const {
+ assert(channel >= 0 && channel < num_channels_);
+ if (data_ != NULL) {
+ return data_;
+ }
+
+ return channels_[channel].data;
+}
+
+WebRtc_Word16* AudioBuffer::low_pass_split_data(WebRtc_Word32 channel) const {
+ assert(channel >= 0 && channel < num_channels_);
+ if (split_channels_ == NULL) {
+ return data(channel);
+ }
+
+ return split_channels_[channel].low_pass_data;
+}
+
+WebRtc_Word16* AudioBuffer::high_pass_split_data(WebRtc_Word32 channel) const {
+ assert(channel >= 0 && channel < num_channels_);
+ if (split_channels_ == NULL) {
+ return NULL;
+ }
+
+ return split_channels_[channel].high_pass_data;
+}
+
+WebRtc_Word16* AudioBuffer::mixed_low_pass_data(WebRtc_Word32 channel) const {
+ assert(channel >= 0 && channel < num_mixed_low_pass_channels_);
+
+ return mixed_low_pass_channels_[channel].data;
+}
+
+WebRtc_Word16* AudioBuffer::low_pass_reference(WebRtc_Word32 channel) const {
+ assert(channel >= 0 && channel < num_channels_);
+ if (!reference_copied_) {
+ return NULL;
+ }
+
+ return low_pass_reference_channels_[channel].data;
+}
+
+WebRtc_Word32* AudioBuffer::analysis_filter_state1(WebRtc_Word32 channel) const {
+ assert(channel >= 0 && channel < num_channels_);
+ return split_channels_[channel].analysis_filter_state1;
+}
+
+WebRtc_Word32* AudioBuffer::analysis_filter_state2(WebRtc_Word32 channel) const {
+ assert(channel >= 0 && channel < num_channels_);
+ return split_channels_[channel].analysis_filter_state2;
+}
+
+WebRtc_Word32* AudioBuffer::synthesis_filter_state1(WebRtc_Word32 channel) const {
+ assert(channel >= 0 && channel < num_channels_);
+ return split_channels_[channel].synthesis_filter_state1;
+}
+
+WebRtc_Word32* AudioBuffer::synthesis_filter_state2(WebRtc_Word32 channel) const {
+ assert(channel >= 0 && channel < num_channels_);
+ return split_channels_[channel].synthesis_filter_state2;
+}
+
+WebRtc_Word32 AudioBuffer::num_channels() const {
+ return num_channels_;
+}
+
+WebRtc_Word32 AudioBuffer::samples_per_channel() const {
+ return samples_per_channel_;
+}
+
+WebRtc_Word32 AudioBuffer::samples_per_split_channel() const {
+ return samples_per_split_channel_;
+}
+
+// TODO(ajm): Do deinterleaving and mixing in one step?
+void AudioBuffer::DeinterleaveFrom(AudioFrame* audioFrame) {
+ assert(audioFrame->_audioChannel <= max_num_channels_);
+ assert(audioFrame->_payloadDataLengthInSamples == samples_per_channel_);
+
+ num_channels_ = audioFrame->_audioChannel;
+ num_mixed_channels_ = 0;
+ num_mixed_low_pass_channels_ = 0;
+ reference_copied_ = false;
+
+ if (num_channels_ == 1) {
+ // We can get away with a pointer assignment in this case.
+ data_ = audioFrame->_payloadData;
+ return;
+ }
+
+ for (int i = 0; i < num_channels_; i++) {
+ WebRtc_Word16* deinterleaved = channels_[i].data;
+ WebRtc_Word16* interleaved = audioFrame->_payloadData;
+ WebRtc_Word32 interleaved_idx = i;
+ for (int j = 0; j < samples_per_channel_; j++) {
+ deinterleaved[j] = interleaved[interleaved_idx];
+ interleaved_idx += num_channels_;
+ }
+ }
+}
+
+void AudioBuffer::InterleaveTo(AudioFrame* audioFrame) const {
+ assert(audioFrame->_audioChannel == num_channels_);
+ assert(audioFrame->_payloadDataLengthInSamples == samples_per_channel_);
+
+ if (num_channels_ == 1) {
+ if (num_mixed_channels_ == 1) {
+ memcpy(audioFrame->_payloadData,
+ channels_[0].data,
+ sizeof(WebRtc_Word16) * samples_per_channel_);
+ } else {
+ // These should point to the same buffer in this case.
+ assert(data_ == audioFrame->_payloadData);
+ }
+
+ return;
+ }
+
+ for (int i = 0; i < num_channels_; i++) {
+ WebRtc_Word16* deinterleaved = channels_[i].data;
+ WebRtc_Word16* interleaved = audioFrame->_payloadData;
+ WebRtc_Word32 interleaved_idx = i;
+ for (int j = 0; j < samples_per_channel_; j++) {
+ interleaved[interleaved_idx] = deinterleaved[j];
+ interleaved_idx += num_channels_;
+ }
+ }
+}
+
+// TODO(ajm): would be good to support the no-mix case with pointer assignment.
+// TODO(ajm): handle mixing to multiple channels?
+void AudioBuffer::Mix(WebRtc_Word32 num_mixed_channels) {
+ // We currently only support the stereo to mono case.
+ assert(num_channels_ == 2);
+ assert(num_mixed_channels == 1);
+
+ StereoToMono(channels_[0].data,
+ channels_[1].data,
+ channels_[0].data,
+ samples_per_channel_);
+
+ num_channels_ = num_mixed_channels;
+ num_mixed_channels_ = num_mixed_channels;
+}
+
+void AudioBuffer::CopyAndMixLowPass(WebRtc_Word32 num_mixed_channels) {
+ // We currently only support the stereo to mono case.
+ assert(num_channels_ == 2);
+ assert(num_mixed_channels == 1);
+
+ StereoToMono(low_pass_split_data(0),
+ low_pass_split_data(1),
+ mixed_low_pass_channels_[0].data,
+ samples_per_split_channel_);
+
+ num_mixed_low_pass_channels_ = num_mixed_channels;
+}
+
+void AudioBuffer::CopyLowPassToReference() {
+ reference_copied_ = true;
+ for (int i = 0; i < num_channels_; i++) {
+ memcpy(low_pass_reference_channels_[i].data,
+ low_pass_split_data(i),
+ sizeof(WebRtc_Word16) * samples_per_split_channel_);
+ }
+}
+} // namespace webrtc
diff --git a/src/modules/audio_processing/main/source/audio_buffer.h b/src/modules/audio_processing/main/source/audio_buffer.h
new file mode 100644
index 0000000000..15f850b67b
--- /dev/null
+++ b/src/modules/audio_processing/main/source/audio_buffer.h
@@ -0,0 +1,68 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef WEBRTC_MODULES_AUDIO_PROCESSING_MAIN_SOURCE_AUDIO_BUFFER_H_
+#define WEBRTC_MODULES_AUDIO_PROCESSING_MAIN_SOURCE_AUDIO_BUFFER_H_
+
+#include "typedefs.h"
+
+
+namespace webrtc {
+
+struct AudioChannel;
+struct SplitAudioChannel;
+class AudioFrame;
+
+class AudioBuffer {
+ public:
+ AudioBuffer(WebRtc_Word32 max_num_channels, WebRtc_Word32 samples_per_channel);
+ virtual ~AudioBuffer();
+
+ WebRtc_Word32 num_channels() const;
+ WebRtc_Word32 samples_per_channel() const;
+ WebRtc_Word32 samples_per_split_channel() const;
+
+ WebRtc_Word16* data(WebRtc_Word32 channel) const;
+ WebRtc_Word16* low_pass_split_data(WebRtc_Word32 channel) const;
+ WebRtc_Word16* high_pass_split_data(WebRtc_Word32 channel) const;
+ WebRtc_Word16* mixed_low_pass_data(WebRtc_Word32 channel) const;
+ WebRtc_Word16* low_pass_reference(WebRtc_Word32 channel) const;
+
+ WebRtc_Word32* analysis_filter_state1(WebRtc_Word32 channel) const;
+ WebRtc_Word32* analysis_filter_state2(WebRtc_Word32 channel) const;
+ WebRtc_Word32* synthesis_filter_state1(WebRtc_Word32 channel) const;
+ WebRtc_Word32* synthesis_filter_state2(WebRtc_Word32 channel) const;
+
+ void DeinterleaveFrom(AudioFrame* audioFrame);
+ void InterleaveTo(AudioFrame* audioFrame) const;
+ void Mix(WebRtc_Word32 num_mixed_channels);
+ void CopyAndMixLowPass(WebRtc_Word32 num_mixed_channels);
+ void CopyLowPassToReference();
+
+ private:
+ const WebRtc_Word32 max_num_channels_;
+ WebRtc_Word32 num_channels_;
+ WebRtc_Word32 num_mixed_channels_;
+ WebRtc_Word32 num_mixed_low_pass_channels_;
+ const WebRtc_Word32 samples_per_channel_;
+ WebRtc_Word32 samples_per_split_channel_;
+ bool reference_copied_;
+
+ WebRtc_Word16* data_;
+ // TODO(ajm): Prefer to make these vectors if permitted...
+ AudioChannel* channels_;
+ SplitAudioChannel* split_channels_;
+ // TODO(ajm): improve this, we don't need the full 32 kHz space here.
+ AudioChannel* mixed_low_pass_channels_;
+ AudioChannel* low_pass_reference_channels_;
+};
+} // namespace webrtc
+
+#endif // WEBRTC_MODULES_AUDIO_PROCESSING_MAIN_SOURCE_AUDIO_BUFFER_H_
diff --git a/src/modules/audio_processing/main/source/audio_processing_impl.cc b/src/modules/audio_processing/main/source/audio_processing_impl.cc
new file mode 100644
index 0000000000..6440e36ec4
--- /dev/null
+++ b/src/modules/audio_processing/main/source/audio_processing_impl.cc
@@ -0,0 +1,636 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "audio_processing_impl.h"
+
+#include <cassert>
+
+#include "module_common_types.h"
+
+#include "critical_section_wrapper.h"
+#include "file_wrapper.h"
+
+#include "audio_buffer.h"
+#include "echo_cancellation_impl.h"
+#include "echo_control_mobile_impl.h"
+#include "high_pass_filter_impl.h"
+#include "gain_control_impl.h"
+#include "level_estimator_impl.h"
+#include "noise_suppression_impl.h"
+#include "processing_component.h"
+#include "splitting_filter.h"
+#include "voice_detection_impl.h"
+
+namespace webrtc {
+namespace {
+
+enum Events {
+ kInitializeEvent,
+ kRenderEvent,
+ kCaptureEvent
+};
+
+const char kMagicNumber[] = "#!vqetrace1.2";
+} // namespace
+
+AudioProcessing* AudioProcessing::Create(int id) {
+ /*WEBRTC_TRACE(webrtc::kTraceModuleCall,
+ webrtc::kTraceAudioProcessing,
+ id,
+ "AudioProcessing::Create()");*/
+
+ AudioProcessingImpl* apm = new AudioProcessingImpl(id);
+ if (apm->Initialize() != kNoError) {
+ delete apm;
+ apm = NULL;
+ }
+
+ return apm;
+}
+
+void AudioProcessing::Destroy(AudioProcessing* apm) {
+ delete static_cast<AudioProcessingImpl*>(apm);
+}
+
+AudioProcessingImpl::AudioProcessingImpl(int id)
+ : id_(id),
+ echo_cancellation_(NULL),
+ echo_control_mobile_(NULL),
+ gain_control_(NULL),
+ high_pass_filter_(NULL),
+ level_estimator_(NULL),
+ noise_suppression_(NULL),
+ voice_detection_(NULL),
+ debug_file_(FileWrapper::Create()),
+ crit_(CriticalSectionWrapper::CreateCriticalSection()),
+ render_audio_(NULL),
+ capture_audio_(NULL),
+ sample_rate_hz_(kSampleRate16kHz),
+ split_sample_rate_hz_(kSampleRate16kHz),
+ samples_per_channel_(sample_rate_hz_ / 100),
+ stream_delay_ms_(0),
+ was_stream_delay_set_(false),
+ num_render_input_channels_(1),
+ num_capture_input_channels_(1),
+ num_capture_output_channels_(1) {
+
+ echo_cancellation_ = new EchoCancellationImpl(this);
+ component_list_.push_back(echo_cancellation_);
+
+ echo_control_mobile_ = new EchoControlMobileImpl(this);
+ component_list_.push_back(echo_control_mobile_);
+
+ gain_control_ = new GainControlImpl(this);
+ component_list_.push_back(gain_control_);
+
+ high_pass_filter_ = new HighPassFilterImpl(this);
+ component_list_.push_back(high_pass_filter_);
+
+ level_estimator_ = new LevelEstimatorImpl(this);
+ component_list_.push_back(level_estimator_);
+
+ noise_suppression_ = new NoiseSuppressionImpl(this);
+ component_list_.push_back(noise_suppression_);
+
+ voice_detection_ = new VoiceDetectionImpl(this);
+ component_list_.push_back(voice_detection_);
+}
+
+AudioProcessingImpl::~AudioProcessingImpl() {
+ while (!component_list_.empty()) {
+ ProcessingComponent* component = component_list_.front();
+ component->Destroy();
+ delete component;
+ component_list_.pop_front();
+ }
+
+ if (debug_file_->Open()) {
+ debug_file_->CloseFile();
+ }
+ delete debug_file_;
+ debug_file_ = NULL;
+
+ delete crit_;
+ crit_ = NULL;
+
+ if (render_audio_ != NULL) {
+ delete render_audio_;
+ render_audio_ = NULL;
+ }
+
+ if (capture_audio_ != NULL) {
+ delete capture_audio_;
+ capture_audio_ = NULL;
+ }
+}
+
+CriticalSectionWrapper* AudioProcessingImpl::crit() const {
+ return crit_;
+}
+
+int AudioProcessingImpl::split_sample_rate_hz() const {
+ return split_sample_rate_hz_;
+}
+
+int AudioProcessingImpl::Initialize() {
+ CriticalSectionScoped crit_scoped(*crit_);
+ return InitializeLocked();
+}
+
+int AudioProcessingImpl::InitializeLocked() {
+ if (render_audio_ != NULL) {
+ delete render_audio_;
+ render_audio_ = NULL;
+ }
+
+ if (capture_audio_ != NULL) {
+ delete capture_audio_;
+ capture_audio_ = NULL;
+ }
+
+ render_audio_ = new AudioBuffer(num_render_input_channels_,
+ samples_per_channel_);
+ capture_audio_ = new AudioBuffer(num_capture_input_channels_,
+ samples_per_channel_);
+
+ was_stream_delay_set_ = false;
+
+ // Initialize all components.
+ std::list<ProcessingComponent*>::iterator it;
+ for (it = component_list_.begin(); it != component_list_.end(); it++) {
+ int err = (*it)->Initialize();
+ if (err != kNoError) {
+ return err;
+ }
+ }
+
+ return kNoError;
+}
+
+int AudioProcessingImpl::set_sample_rate_hz(int rate) {
+ CriticalSectionScoped crit_scoped(*crit_);
+ if (rate != kSampleRate8kHz &&
+ rate != kSampleRate16kHz &&
+ rate != kSampleRate32kHz) {
+ return kBadParameterError;
+ }
+
+ sample_rate_hz_ = rate;
+ samples_per_channel_ = rate / 100;
+
+ if (sample_rate_hz_ == kSampleRate32kHz) {
+ split_sample_rate_hz_ = kSampleRate16kHz;
+ } else {
+ split_sample_rate_hz_ = sample_rate_hz_;
+ }
+
+ return InitializeLocked();
+}
+
+int AudioProcessingImpl::sample_rate_hz() const {
+ return sample_rate_hz_;
+}
+
+int AudioProcessingImpl::set_num_reverse_channels(int channels) {
+ CriticalSectionScoped crit_scoped(*crit_);
+ // Only stereo supported currently.
+ if (channels > 2 || channels < 1) {
+ return kBadParameterError;
+ }
+
+ num_render_input_channels_ = channels;
+
+ return InitializeLocked();
+}
+
+int AudioProcessingImpl::num_reverse_channels() const {
+ return num_render_input_channels_;
+}
+
+int AudioProcessingImpl::set_num_channels(
+ int input_channels,
+ int output_channels) {
+ CriticalSectionScoped crit_scoped(*crit_);
+ if (output_channels > input_channels) {
+ return kBadParameterError;
+ }
+
+ // Only stereo supported currently.
+ if (input_channels > 2 || input_channels < 1) {
+ return kBadParameterError;
+ }
+
+ if (output_channels > 2 || output_channels < 1) {
+ return kBadParameterError;
+ }
+
+ num_capture_input_channels_ = input_channels;
+ num_capture_output_channels_ = output_channels;
+
+ return InitializeLocked();
+}
+
+int AudioProcessingImpl::num_input_channels() const {
+ return num_capture_input_channels_;
+}
+
+int AudioProcessingImpl::num_output_channels() const {
+ return num_capture_output_channels_;
+}
+
+int AudioProcessingImpl::ProcessStream(AudioFrame* frame) {
+ CriticalSectionScoped crit_scoped(*crit_);
+ int err = kNoError;
+
+ if (frame == NULL) {
+ return kNullPointerError;
+ }
+
+ if (frame->_frequencyInHz !=
+ static_cast<WebRtc_UWord32>(sample_rate_hz_)) {
+ return kBadSampleRateError;
+ }
+
+ if (frame->_audioChannel != num_capture_input_channels_) {
+ return kBadNumberChannelsError;
+ }
+
+ if (frame->_payloadDataLengthInSamples != samples_per_channel_) {
+ return kBadDataLengthError;
+ }
+
+ if (debug_file_->Open()) {
+ WebRtc_UWord8 event = kCaptureEvent;
+ if (!debug_file_->Write(&event, sizeof(event))) {
+ return kFileError;
+ }
+
+ if (!debug_file_->Write(&frame->_frequencyInHz,
+ sizeof(frame->_frequencyInHz))) {
+ return kFileError;
+ }
+
+ if (!debug_file_->Write(&frame->_audioChannel,
+ sizeof(frame->_audioChannel))) {
+ return kFileError;
+ }
+
+ if (!debug_file_->Write(&frame->_payloadDataLengthInSamples,
+ sizeof(frame->_payloadDataLengthInSamples))) {
+ return kFileError;
+ }
+
+ if (!debug_file_->Write(frame->_payloadData,
+ sizeof(WebRtc_Word16) * frame->_payloadDataLengthInSamples *
+ frame->_audioChannel)) {
+ return kFileError;
+ }
+ }
+
+ capture_audio_->DeinterleaveFrom(frame);
+
+ // TODO(ajm): experiment with mixing and AEC placement.
+ if (num_capture_output_channels_ < num_capture_input_channels_) {
+ capture_audio_->Mix(num_capture_output_channels_);
+
+ frame->_audioChannel = num_capture_output_channels_;
+ }
+
+ if (sample_rate_hz_ == kSampleRate32kHz) {
+ for (int i = 0; i < num_capture_input_channels_; i++) {
+ // Split into a low and high band.
+ SplittingFilterAnalysis(capture_audio_->data(i),
+ capture_audio_->low_pass_split_data(i),
+ capture_audio_->high_pass_split_data(i),
+ capture_audio_->analysis_filter_state1(i),
+ capture_audio_->analysis_filter_state2(i));
+ }
+ }
+
+ err = high_pass_filter_->ProcessCaptureAudio(capture_audio_);
+ if (err != kNoError) {
+ return err;
+ }
+
+ err = gain_control_->AnalyzeCaptureAudio(capture_audio_);
+ if (err != kNoError) {
+ return err;
+ }
+
+ err = echo_cancellation_->ProcessCaptureAudio(capture_audio_);
+ if (err != kNoError) {
+ return err;
+ }
+
+ if (echo_control_mobile_->is_enabled() &&
+ noise_suppression_->is_enabled()) {
+ capture_audio_->CopyLowPassToReference();
+ }
+
+ err = noise_suppression_->ProcessCaptureAudio(capture_audio_);
+ if (err != kNoError) {
+ return err;
+ }
+
+ err = echo_control_mobile_->ProcessCaptureAudio(capture_audio_);
+ if (err != kNoError) {
+ return err;
+ }
+
+ err = voice_detection_->ProcessCaptureAudio(capture_audio_);
+ if (err != kNoError) {
+ return err;
+ }
+
+ err = gain_control_->ProcessCaptureAudio(capture_audio_);
+ if (err != kNoError) {
+ return err;
+ }
+
+ //err = level_estimator_->ProcessCaptureAudio(capture_audio_);
+ //if (err != kNoError) {
+ // return err;
+ //}
+
+ if (sample_rate_hz_ == kSampleRate32kHz) {
+ for (int i = 0; i < num_capture_output_channels_; i++) {
+ // Recombine low and high bands.
+ SplittingFilterSynthesis(capture_audio_->low_pass_split_data(i),
+ capture_audio_->high_pass_split_data(i),
+ capture_audio_->data(i),
+ capture_audio_->synthesis_filter_state1(i),
+ capture_audio_->synthesis_filter_state2(i));
+ }
+ }
+
+ capture_audio_->InterleaveTo(frame);
+
+ return kNoError;
+}
+
+int AudioProcessingImpl::AnalyzeReverseStream(AudioFrame* frame) {
+ CriticalSectionScoped crit_scoped(*crit_);
+ int err = kNoError;
+
+ if (frame == NULL) {
+ return kNullPointerError;
+ }
+
+ if (frame->_frequencyInHz !=
+ static_cast<WebRtc_UWord32>(sample_rate_hz_)) {
+ return kBadSampleRateError;
+ }
+
+ if (frame->_audioChannel != num_render_input_channels_) {
+ return kBadNumberChannelsError;
+ }
+
+ if (frame->_payloadDataLengthInSamples != samples_per_channel_) {
+ return kBadDataLengthError;
+ }
+
+ if (debug_file_->Open()) {
+ WebRtc_UWord8 event = kRenderEvent;
+ if (!debug_file_->Write(&event, sizeof(event))) {
+ return kFileError;
+ }
+
+ if (!debug_file_->Write(&frame->_frequencyInHz,
+ sizeof(frame->_frequencyInHz))) {
+ return kFileError;
+ }
+
+ if (!debug_file_->Write(&frame->_audioChannel,
+ sizeof(frame->_audioChannel))) {
+ return kFileError;
+ }
+
+ if (!debug_file_->Write(&frame->_payloadDataLengthInSamples,
+ sizeof(frame->_payloadDataLengthInSamples))) {
+ return kFileError;
+ }
+
+ if (!debug_file_->Write(frame->_payloadData,
+ sizeof(WebRtc_Word16) * frame->_payloadDataLengthInSamples *
+ frame->_audioChannel)) {
+ return kFileError;
+ }
+ }
+
+ render_audio_->DeinterleaveFrom(frame);
+
+ // TODO(ajm): turn the splitting filter into a component?
+ if (sample_rate_hz_ == kSampleRate32kHz) {
+ for (int i = 0; i < num_render_input_channels_; i++) {
+ // Split into low and high band.
+ SplittingFilterAnalysis(render_audio_->data(i),
+ render_audio_->low_pass_split_data(i),
+ render_audio_->high_pass_split_data(i),
+ render_audio_->analysis_filter_state1(i),
+ render_audio_->analysis_filter_state2(i));
+ }
+ }
+
+ // TODO(ajm): warnings possible from components?
+ err = echo_cancellation_->ProcessRenderAudio(render_audio_);
+ if (err != kNoError) {
+ return err;
+ }
+
+ err = echo_control_mobile_->ProcessRenderAudio(render_audio_);
+ if (err != kNoError) {
+ return err;
+ }
+
+ err = gain_control_->ProcessRenderAudio(render_audio_);
+ if (err != kNoError) {
+ return err;
+ }
+
+ //err = level_estimator_->AnalyzeReverseStream(render_audio_);
+ //if (err != kNoError) {
+ // return err;
+ //}
+
+ was_stream_delay_set_ = false;
+ return err; // TODO(ajm): this is for returning warnings; necessary?
+}
+
+int AudioProcessingImpl::set_stream_delay_ms(int delay) {
+ was_stream_delay_set_ = true;
+ if (delay < 0) {
+ return kBadParameterError;
+ }
+
+ // TODO(ajm): the max is rather arbitrarily chosen; investigate.
+ if (delay > 500) {
+ stream_delay_ms_ = 500;
+ return kBadStreamParameterWarning;
+ }
+
+ stream_delay_ms_ = delay;
+ return kNoError;
+}
+
+int AudioProcessingImpl::stream_delay_ms() const {
+ return stream_delay_ms_;
+}
+
+bool AudioProcessingImpl::was_stream_delay_set() const {
+ return was_stream_delay_set_;
+}
+
+int AudioProcessingImpl::StartDebugRecording(
+ const char filename[AudioProcessing::kMaxFilenameSize]) {
+ CriticalSectionScoped crit_scoped(*crit_);
+ assert(kMaxFilenameSize == FileWrapper::kMaxFileNameSize);
+
+ if (filename == NULL) {
+ return kNullPointerError;
+ }
+
+ // Stop any ongoing recording.
+ if (debug_file_->Open()) {
+ if (debug_file_->CloseFile() == -1) {
+ return kFileError;
+ }
+ }
+
+ if (debug_file_->OpenFile(filename, false) == -1) {
+ debug_file_->CloseFile();
+ return kFileError;
+ }
+
+ if (debug_file_->WriteText("%s\n", kMagicNumber) == -1) {
+ debug_file_->CloseFile();
+ return kFileError;
+ }
+
+ // TODO(ajm): should we do this? If so, we need the number of channels etc.
+ // Record the default sample rate.
+ WebRtc_UWord8 event = kInitializeEvent;
+ if (!debug_file_->Write(&event, sizeof(event))) {
+ return kFileError;
+ }
+
+ if (!debug_file_->Write(&sample_rate_hz_, sizeof(sample_rate_hz_))) {
+ return kFileError;
+ }
+
+ return kNoError;
+}
+
+int AudioProcessingImpl::StopDebugRecording() {
+ CriticalSectionScoped crit_scoped(*crit_);
+ // We just return if recording hasn't started.
+ if (debug_file_->Open()) {
+ if (debug_file_->CloseFile() == -1) {
+ return kFileError;
+ }
+ }
+
+ return kNoError;
+}
+
+EchoCancellation* AudioProcessingImpl::echo_cancellation() const {
+ return echo_cancellation_;
+}
+
+EchoControlMobile* AudioProcessingImpl::echo_control_mobile() const {
+ return echo_control_mobile_;
+}
+
+GainControl* AudioProcessingImpl::gain_control() const {
+ return gain_control_;
+}
+
+HighPassFilter* AudioProcessingImpl::high_pass_filter() const {
+ return high_pass_filter_;
+}
+
+LevelEstimator* AudioProcessingImpl::level_estimator() const {
+ return level_estimator_;
+}
+
+NoiseSuppression* AudioProcessingImpl::noise_suppression() const {
+ return noise_suppression_;
+}
+
+VoiceDetection* AudioProcessingImpl::voice_detection() const {
+ return voice_detection_;
+}
+
+WebRtc_Word32 AudioProcessingImpl::Version(WebRtc_Word8* version,
+ WebRtc_UWord32& bytes_remaining, WebRtc_UWord32& position) const {
+ if (version == NULL) {
+ /*WEBRTC_TRACE(webrtc::kTraceError,
+ webrtc::kTraceAudioProcessing,
+ -1,
+ "Null version pointer");*/
+ return kNullPointerError;
+ }
+ memset(&version[position], 0, bytes_remaining);
+
+ WebRtc_Word8 my_version[] = "AudioProcessing 1.0.0";
+ // Includes null termination.
+ WebRtc_UWord32 length = static_cast<WebRtc_UWord32>(strlen(my_version));
+ if (bytes_remaining < length) {
+ /*WEBRTC_TRACE(webrtc::kTraceError,
+ webrtc::kTraceAudioProcessing,
+ -1,
+ "Buffer of insufficient length");*/
+ return kBadParameterError;
+ }
+ memcpy(&version[position], my_version, length);
+ bytes_remaining -= length;
+ position += length;
+
+ std::list<ProcessingComponent*>::const_iterator it;
+ for (it = component_list_.begin(); it != component_list_.end(); it++) {
+ char component_version[256];
+ strcpy(component_version, "\n");
+ int err = (*it)->get_version(&component_version[1],
+ sizeof(component_version) - 1);
+ if (err != kNoError) {
+ return err;
+ }
+ if (strncmp(&component_version[1], "\0", 1) == 0) {
+ // Assume empty if first byte is NULL.
+ continue;
+ }
+
+ length = static_cast<WebRtc_UWord32>(strlen(component_version));
+ if (bytes_remaining < length) {
+ /*WEBRTC_TRACE(webrtc::kTraceError,
+ webrtc::kTraceAudioProcessing,
+ -1,
+ "Buffer of insufficient length");*/
+ return kBadParameterError;
+ }
+ memcpy(&version[position], component_version, length);
+ bytes_remaining -= length;
+ position += length;
+ }
+
+ return kNoError;
+}
+
+WebRtc_Word32 AudioProcessingImpl::ChangeUniqueId(const WebRtc_Word32 id) {
+ CriticalSectionScoped crit_scoped(*crit_);
+ /*WEBRTC_TRACE(webrtc::kTraceModuleCall,
+ webrtc::kTraceAudioProcessing,
+ id_,
+ "ChangeUniqueId(new id = %d)",
+ id);*/
+ id_ = id;
+
+ return kNoError;
+}
+} // namespace webrtc
diff --git a/src/modules/audio_processing/main/source/audio_processing_impl.h b/src/modules/audio_processing/main/source/audio_processing_impl.h
new file mode 100644
index 0000000000..9707bde248
--- /dev/null
+++ b/src/modules/audio_processing/main/source/audio_processing_impl.h
@@ -0,0 +1,109 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef WEBRTC_MODULES_AUDIO_PROCESSING_MAIN_SOURCE_AUDIO_PROCESSING_IMPL_H_
+#define WEBRTC_MODULES_AUDIO_PROCESSING_MAIN_SOURCE_AUDIO_PROCESSING_IMPL_H_
+
+#include <list>
+
+#include "audio_processing.h"
+
+namespace webrtc {
+class CriticalSectionWrapper;
+class FileWrapper;
+
+class AudioBuffer;
+class EchoCancellationImpl;
+class EchoControlMobileImpl;
+class GainControlImpl;
+class HighPassFilterImpl;
+class LevelEstimatorImpl;
+class NoiseSuppressionImpl;
+class ProcessingComponent;
+class VoiceDetectionImpl;
+
+class AudioProcessingImpl : public AudioProcessing {
+ public:
+ enum {
+ kSampleRate8kHz = 8000,
+ kSampleRate16kHz = 16000,
+ kSampleRate32kHz = 32000
+ };
+
+ explicit AudioProcessingImpl(int id);
+ virtual ~AudioProcessingImpl();
+
+ CriticalSectionWrapper* crit() const;
+
+ int split_sample_rate_hz() const;
+ bool was_stream_delay_set() const;
+
+ // AudioProcessing methods.
+ virtual int Initialize();
+ virtual int InitializeLocked();
+ virtual int set_sample_rate_hz(int rate);
+ virtual int sample_rate_hz() const;
+ virtual int set_num_channels(int input_channels, int output_channels);
+ virtual int num_input_channels() const;
+ virtual int num_output_channels() const;
+ virtual int set_num_reverse_channels(int channels);
+ virtual int num_reverse_channels() const;
+ virtual int ProcessStream(AudioFrame* frame);
+ virtual int AnalyzeReverseStream(AudioFrame* frame);
+ virtual int set_stream_delay_ms(int delay);
+ virtual int stream_delay_ms() const;
+ virtual int StartDebugRecording(const char filename[kMaxFilenameSize]);
+ virtual int StopDebugRecording();
+ virtual EchoCancellation* echo_cancellation() const;
+ virtual EchoControlMobile* echo_control_mobile() const;
+ virtual GainControl* gain_control() const;
+ virtual HighPassFilter* high_pass_filter() const;
+ virtual LevelEstimator* level_estimator() const;
+ virtual NoiseSuppression* noise_suppression() const;
+ virtual VoiceDetection* voice_detection() const;
+
+ // Module methods.
+ virtual WebRtc_Word32 Version(WebRtc_Word8* version,
+ WebRtc_UWord32& remainingBufferInBytes,
+ WebRtc_UWord32& position) const;
+ virtual WebRtc_Word32 ChangeUniqueId(const WebRtc_Word32 id);
+
+ private:
+ int id_;
+
+ EchoCancellationImpl* echo_cancellation_;
+ EchoControlMobileImpl* echo_control_mobile_;
+ GainControlImpl* gain_control_;
+ HighPassFilterImpl* high_pass_filter_;
+ LevelEstimatorImpl* level_estimator_;
+ NoiseSuppressionImpl* noise_suppression_;
+ VoiceDetectionImpl* voice_detection_;
+
+ std::list<ProcessingComponent*> component_list_;
+
+ FileWrapper* debug_file_;
+ CriticalSectionWrapper* crit_;
+
+ AudioBuffer* render_audio_;
+ AudioBuffer* capture_audio_;
+
+ int sample_rate_hz_;
+ int split_sample_rate_hz_;
+ int samples_per_channel_;
+ int stream_delay_ms_;
+ bool was_stream_delay_set_;
+
+ int num_render_input_channels_;
+ int num_capture_input_channels_;
+ int num_capture_output_channels_;
+};
+} // namespace webrtc
+
+#endif // WEBRTC_MODULES_AUDIO_PROCESSING_MAIN_SOURCE_AUDIO_PROCESSING_IMPL_H_
diff --git a/src/modules/audio_processing/main/source/echo_cancellation_impl.cc b/src/modules/audio_processing/main/source/echo_cancellation_impl.cc
new file mode 100644
index 0000000000..886d5f158c
--- /dev/null
+++ b/src/modules/audio_processing/main/source/echo_cancellation_impl.cc
@@ -0,0 +1,348 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "echo_cancellation_impl.h"
+
+#include <cassert>
+#include <string.h>
+
+#include "critical_section_wrapper.h"
+#include "echo_cancellation.h"
+
+#include "audio_processing_impl.h"
+#include "audio_buffer.h"
+
+namespace webrtc {
+
+typedef void Handle;
+
+namespace {
+WebRtc_Word16 MapSetting(EchoCancellation::SuppressionLevel level) {
+ switch (level) {
+ case EchoCancellation::kLowSuppression:
+ return kAecNlpConservative;
+ case EchoCancellation::kModerateSuppression:
+ return kAecNlpModerate;
+ case EchoCancellation::kHighSuppression:
+ return kAecNlpAggressive;
+ default:
+ return -1;
+ }
+}
+
+int MapError(int err) {
+ switch (err) {
+ case AEC_UNSUPPORTED_FUNCTION_ERROR:
+ return AudioProcessing::kUnsupportedFunctionError;
+ break;
+ case AEC_BAD_PARAMETER_ERROR:
+ return AudioProcessing::kBadParameterError;
+ break;
+ case AEC_BAD_PARAMETER_WARNING:
+ return AudioProcessing::kBadStreamParameterWarning;
+ break;
+ default:
+ // AEC_UNSPECIFIED_ERROR
+ // AEC_UNINITIALIZED_ERROR
+ // AEC_NULL_POINTER_ERROR
+ return AudioProcessing::kUnspecifiedError;
+ }
+}
+} // namespace
+
+EchoCancellationImpl::EchoCancellationImpl(const AudioProcessingImpl* apm)
+ : ProcessingComponent(apm),
+ apm_(apm),
+ drift_compensation_enabled_(false),
+ metrics_enabled_(false),
+ suppression_level_(kModerateSuppression),
+ device_sample_rate_hz_(48000),
+ stream_drift_samples_(0),
+ was_stream_drift_set_(false),
+ stream_has_echo_(false) {}
+
+EchoCancellationImpl::~EchoCancellationImpl() {}
+
+int EchoCancellationImpl::ProcessRenderAudio(const AudioBuffer* audio) {
+ if (!is_component_enabled()) {
+ return apm_->kNoError;
+ }
+
+ assert(audio->samples_per_split_channel() <= 160);
+ assert(audio->num_channels() == apm_->num_reverse_channels());
+
+ int err = apm_->kNoError;
+
+ // The ordering convention must be followed to pass to the correct AEC.
+ size_t handle_index = 0;
+ for (int i = 0; i < apm_->num_output_channels(); i++) {
+ for (int j = 0; j < audio->num_channels(); j++) {
+ Handle* my_handle = static_cast<Handle*>(handle(handle_index));
+ err = WebRtcAec_BufferFarend(
+ my_handle,
+ audio->low_pass_split_data(j),
+ static_cast<WebRtc_Word16>(audio->samples_per_split_channel()));
+
+ if (err != apm_->kNoError) {
+ return GetHandleError(my_handle); // TODO(ajm): warning possible?
+ }
+
+ handle_index++;
+ }
+ }
+
+ return apm_->kNoError;
+}
+
+int EchoCancellationImpl::ProcessCaptureAudio(AudioBuffer* audio) {
+ if (!is_component_enabled()) {
+ return apm_->kNoError;
+ }
+
+ if (!apm_->was_stream_delay_set()) {
+ return apm_->kStreamParameterNotSetError;
+ }
+
+ if (drift_compensation_enabled_ && !was_stream_drift_set_) {
+ return apm_->kStreamParameterNotSetError;
+ }
+
+ assert(audio->samples_per_split_channel() <= 160);
+ assert(audio->num_channels() == apm_->num_output_channels());
+
+ int err = apm_->kNoError;
+
+ // The ordering convention must be followed to pass to the correct AEC.
+ size_t handle_index = 0;
+ stream_has_echo_ = false;
+ for (int i = 0; i < audio->num_channels(); i++) {
+ for (int j = 0; j < apm_->num_reverse_channels(); j++) {
+ Handle* my_handle = handle(handle_index);
+ err = WebRtcAec_Process(
+ my_handle,
+ audio->low_pass_split_data(i),
+ audio->high_pass_split_data(i),
+ audio->low_pass_split_data(i),
+ audio->high_pass_split_data(i),
+ static_cast<WebRtc_Word16>(audio->samples_per_split_channel()),
+ apm_->stream_delay_ms(),
+ stream_drift_samples_);
+
+ if (err != apm_->kNoError) {
+ err = GetHandleError(my_handle);
+ // TODO(ajm): Figure out how to return warnings properly.
+ if (err != apm_->kBadStreamParameterWarning) {
+ return err;
+ }
+ }
+
+ WebRtc_Word16 status = 0;
+ err = WebRtcAec_get_echo_status(my_handle, &status);
+ if (err != apm_->kNoError) {
+ return GetHandleError(my_handle);
+ }
+
+ if (status == 1) {
+ stream_has_echo_ = true;
+ }
+
+ handle_index++;
+ }
+ }
+
+ was_stream_drift_set_ = false;
+ return apm_->kNoError;
+}
+
+int EchoCancellationImpl::Enable(bool enable) {
+ CriticalSectionScoped crit_scoped(*apm_->crit());
+ // Ensure AEC and AECM are not both enabled.
+ if (enable && apm_->echo_control_mobile()->is_enabled()) {
+ return apm_->kBadParameterError;
+ }
+
+ return EnableComponent(enable);
+}
+
+bool EchoCancellationImpl::is_enabled() const {
+ return is_component_enabled();
+}
+
+int EchoCancellationImpl::set_suppression_level(SuppressionLevel level) {
+ CriticalSectionScoped crit_scoped(*apm_->crit());
+ if (MapSetting(level) == -1) {
+ return apm_->kBadParameterError;
+ }
+
+ suppression_level_ = level;
+ return Configure();
+}
+
+EchoCancellation::SuppressionLevel EchoCancellationImpl::suppression_level()
+ const {
+ return suppression_level_;
+}
+
+int EchoCancellationImpl::enable_drift_compensation(bool enable) {
+ CriticalSectionScoped crit_scoped(*apm_->crit());
+ drift_compensation_enabled_ = enable;
+ return Configure();
+}
+
+bool EchoCancellationImpl::is_drift_compensation_enabled() const {
+ return drift_compensation_enabled_;
+}
+
+int EchoCancellationImpl::set_device_sample_rate_hz(int rate) {
+ CriticalSectionScoped crit_scoped(*apm_->crit());
+ if (rate < 8000 || rate > 96000) {
+ return apm_->kBadParameterError;
+ }
+
+ device_sample_rate_hz_ = rate;
+ return Initialize();
+}
+
+int EchoCancellationImpl::device_sample_rate_hz() const {
+ return device_sample_rate_hz_;
+}
+
+int EchoCancellationImpl::set_stream_drift_samples(int drift) {
+ was_stream_drift_set_ = true;
+ stream_drift_samples_ = drift;
+ return apm_->kNoError;
+}
+
+int EchoCancellationImpl::stream_drift_samples() const {
+ return stream_drift_samples_;
+}
+
+int EchoCancellationImpl::enable_metrics(bool enable) {
+ CriticalSectionScoped crit_scoped(*apm_->crit());
+ metrics_enabled_ = enable;
+ return Configure();
+}
+
+bool EchoCancellationImpl::are_metrics_enabled() const {
+ return metrics_enabled_;
+}
+
+// TODO(ajm): we currently just use the metrics from the first AEC. Think more
+// aboue the best way to extend this to multi-channel.
+int EchoCancellationImpl::GetMetrics(Metrics* metrics) {
+ CriticalSectionScoped crit_scoped(*apm_->crit());
+ if (metrics == NULL) {
+ return apm_->kNullPointerError;
+ }
+
+ if (!is_component_enabled() || !metrics_enabled_) {
+ return apm_->kNotEnabledError;
+ }
+
+ AecMetrics my_metrics;
+ memset(&my_metrics, 0, sizeof(my_metrics));
+ memset(metrics, 0, sizeof(Metrics));
+
+ Handle* my_handle = static_cast<Handle*>(handle(0));
+ int err = WebRtcAec_GetMetrics(my_handle, &my_metrics);
+ if (err != apm_->kNoError) {
+ return GetHandleError(my_handle);
+ }
+
+ metrics->residual_echo_return_loss.instant = my_metrics.rerl.instant;
+ metrics->residual_echo_return_loss.average = my_metrics.rerl.average;
+ metrics->residual_echo_return_loss.maximum = my_metrics.rerl.max;
+ metrics->residual_echo_return_loss.minimum = my_metrics.rerl.min;
+
+ metrics->echo_return_loss.instant = my_metrics.erl.instant;
+ metrics->echo_return_loss.average = my_metrics.erl.average;
+ metrics->echo_return_loss.maximum = my_metrics.erl.max;
+ metrics->echo_return_loss.minimum = my_metrics.erl.min;
+
+ metrics->echo_return_loss_enhancement.instant = my_metrics.erle.instant;
+ metrics->echo_return_loss_enhancement.average = my_metrics.erle.average;
+ metrics->echo_return_loss_enhancement.maximum = my_metrics.erle.max;
+ metrics->echo_return_loss_enhancement.minimum = my_metrics.erle.min;
+
+ metrics->a_nlp.instant = my_metrics.aNlp.instant;
+ metrics->a_nlp.average = my_metrics.aNlp.average;
+ metrics->a_nlp.maximum = my_metrics.aNlp.max;
+ metrics->a_nlp.minimum = my_metrics.aNlp.min;
+
+ return apm_->kNoError;
+}
+
+bool EchoCancellationImpl::stream_has_echo() const {
+ return stream_has_echo_;
+}
+
+int EchoCancellationImpl::Initialize() {
+ int err = ProcessingComponent::Initialize();
+ if (err != apm_->kNoError || !is_component_enabled()) {
+ return err;
+ }
+
+ was_stream_drift_set_ = false;
+
+ return apm_->kNoError;
+}
+
+int EchoCancellationImpl::get_version(char* version,
+ int version_len_bytes) const {
+ if (WebRtcAec_get_version(version, version_len_bytes) != 0) {
+ return apm_->kBadParameterError;
+ }
+
+ return apm_->kNoError;
+}
+
+void* EchoCancellationImpl::CreateHandle() const {
+ Handle* handle = NULL;
+ if (WebRtcAec_Create(&handle) != apm_->kNoError) {
+ handle = NULL;
+ } else {
+ assert(handle != NULL);
+ }
+
+ return handle;
+}
+
+int EchoCancellationImpl::DestroyHandle(void* handle) const {
+ assert(handle != NULL);
+ return WebRtcAec_Free(static_cast<Handle*>(handle));
+}
+
+int EchoCancellationImpl::InitializeHandle(void* handle) const {
+ assert(handle != NULL);
+ return WebRtcAec_Init(static_cast<Handle*>(handle),
+ apm_->sample_rate_hz(),
+ device_sample_rate_hz_);
+}
+
+int EchoCancellationImpl::ConfigureHandle(void* handle) const {
+ assert(handle != NULL);
+ AecConfig config;
+ config.metricsMode = metrics_enabled_;
+ config.nlpMode = MapSetting(suppression_level_);
+ config.skewMode = drift_compensation_enabled_;
+
+ return WebRtcAec_set_config(static_cast<Handle*>(handle), config);
+}
+
+int EchoCancellationImpl::num_handles_required() const {
+ return apm_->num_output_channels() *
+ apm_->num_reverse_channels();
+}
+
+int EchoCancellationImpl::GetHandleError(void* handle) const {
+ assert(handle != NULL);
+ return MapError(WebRtcAec_get_error_code(static_cast<Handle*>(handle)));
+}
+} // namespace webrtc
diff --git a/src/modules/audio_processing/main/source/echo_cancellation_impl.h b/src/modules/audio_processing/main/source/echo_cancellation_impl.h
new file mode 100644
index 0000000000..380a69849f
--- /dev/null
+++ b/src/modules/audio_processing/main/source/echo_cancellation_impl.h
@@ -0,0 +1,72 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef WEBRTC_MODULES_AUDIO_PROCESSING_MAIN_SOURCE_ECHO_CANCELLATION_IMPL_H_
+#define WEBRTC_MODULES_AUDIO_PROCESSING_MAIN_SOURCE_ECHO_CANCELLATION_IMPL_H_
+
+#include "audio_processing.h"
+#include "processing_component.h"
+
+namespace webrtc {
+class AudioProcessingImpl;
+class AudioBuffer;
+
+class EchoCancellationImpl : public EchoCancellation,
+ public ProcessingComponent {
+ public:
+ explicit EchoCancellationImpl(const AudioProcessingImpl* apm);
+ virtual ~EchoCancellationImpl();
+
+ int ProcessRenderAudio(const AudioBuffer* audio);
+ int ProcessCaptureAudio(AudioBuffer* audio);
+
+ // EchoCancellation implementation.
+ virtual bool is_enabled() const;
+
+ // ProcessingComponent implementation.
+ virtual int Initialize();
+ virtual int get_version(char* version, int version_len_bytes) const;
+
+ private:
+ // EchoCancellation implementation.
+ virtual int Enable(bool enable);
+ virtual int enable_drift_compensation(bool enable);
+ virtual bool is_drift_compensation_enabled() const;
+ virtual int set_device_sample_rate_hz(int rate);
+ virtual int device_sample_rate_hz() const;
+ virtual int set_stream_drift_samples(int drift);
+ virtual int stream_drift_samples() const;
+ virtual int set_suppression_level(SuppressionLevel level);
+ virtual SuppressionLevel suppression_level() const;
+ virtual int enable_metrics(bool enable);
+ virtual bool are_metrics_enabled() const;
+ virtual bool stream_has_echo() const;
+ virtual int GetMetrics(Metrics* metrics);
+
+ // ProcessingComponent implementation.
+ virtual void* CreateHandle() const;
+ virtual int InitializeHandle(void* handle) const;
+ virtual int ConfigureHandle(void* handle) const;
+ virtual int DestroyHandle(void* handle) const;
+ virtual int num_handles_required() const;
+ virtual int GetHandleError(void* handle) const;
+
+ const AudioProcessingImpl* apm_;
+ bool drift_compensation_enabled_;
+ bool metrics_enabled_;
+ SuppressionLevel suppression_level_;
+ int device_sample_rate_hz_;
+ int stream_drift_samples_;
+ bool was_stream_drift_set_;
+ bool stream_has_echo_;
+};
+} // namespace webrtc
+
+#endif // WEBRTC_MODULES_AUDIO_PROCESSING_MAIN_SOURCE_ECHO_CANCELLATION_IMPL_H_
diff --git a/src/modules/audio_processing/main/source/echo_control_mobile_impl.cc b/src/modules/audio_processing/main/source/echo_control_mobile_impl.cc
new file mode 100644
index 0000000000..1cd2502e2f
--- /dev/null
+++ b/src/modules/audio_processing/main/source/echo_control_mobile_impl.cc
@@ -0,0 +1,245 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "echo_control_mobile_impl.h"
+
+#include <cassert>
+
+#include "critical_section_wrapper.h"
+#include "echo_control_mobile.h"
+
+#include "audio_processing_impl.h"
+#include "audio_buffer.h"
+
+namespace webrtc {
+
+typedef void Handle;
+
+namespace {
+WebRtc_Word16 MapSetting(EchoControlMobile::RoutingMode mode) {
+ switch (mode) {
+ case EchoControlMobile::kQuietEarpieceOrHeadset:
+ return 0;
+ case EchoControlMobile::kEarpiece:
+ return 1;
+ case EchoControlMobile::kLoudEarpiece:
+ return 2;
+ case EchoControlMobile::kSpeakerphone:
+ return 3;
+ case EchoControlMobile::kLoudSpeakerphone:
+ return 4;
+ default:
+ return -1;
+ }
+}
+
+int MapError(int err) {
+ switch (err) {
+ case AECM_UNSUPPORTED_FUNCTION_ERROR:
+ return AudioProcessing::kUnsupportedFunctionError;
+ case AECM_BAD_PARAMETER_ERROR:
+ return AudioProcessing::kBadParameterError;
+ case AECM_BAD_PARAMETER_WARNING:
+ return AudioProcessing::kBadStreamParameterWarning;
+ default:
+ // AECMOBFIX_UNSPECIFIED_ERROR
+ // AECMOBFIX_UNINITIALIZED_ERROR
+ // AECMOBFIX_NULL_POINTER_ERROR
+ return AudioProcessing::kUnspecifiedError;
+ }
+}
+} // namespace
+
+EchoControlMobileImpl::EchoControlMobileImpl(const AudioProcessingImpl* apm)
+ : ProcessingComponent(apm),
+ apm_(apm),
+ routing_mode_(kSpeakerphone),
+ comfort_noise_enabled_(true) {}
+
+EchoControlMobileImpl::~EchoControlMobileImpl() {}
+
+int EchoControlMobileImpl::ProcessRenderAudio(const AudioBuffer* audio) {
+ if (!is_component_enabled()) {
+ return apm_->kNoError;
+ }
+
+ assert(audio->samples_per_split_channel() <= 160);
+ assert(audio->num_channels() == apm_->num_reverse_channels());
+
+ int err = apm_->kNoError;
+
+ // The ordering convention must be followed to pass to the correct AECM.
+ size_t handle_index = 0;
+ for (int i = 0; i < apm_->num_output_channels(); i++) {
+ for (int j = 0; j < audio->num_channels(); j++) {
+ Handle* my_handle = static_cast<Handle*>(handle(handle_index));
+ err = WebRtcAecm_BufferFarend(
+ my_handle,
+ audio->low_pass_split_data(j),
+ static_cast<WebRtc_Word16>(audio->samples_per_split_channel()));
+
+ if (err != apm_->kNoError) {
+ return GetHandleError(my_handle); // TODO(ajm): warning possible?
+ }
+
+ handle_index++;
+ }
+ }
+
+ return apm_->kNoError;
+}
+
+int EchoControlMobileImpl::ProcessCaptureAudio(AudioBuffer* audio) {
+ if (!is_component_enabled()) {
+ return apm_->kNoError;
+ }
+
+ if (!apm_->was_stream_delay_set()) {
+ return apm_->kStreamParameterNotSetError;
+ }
+
+ assert(audio->samples_per_split_channel() <= 160);
+ assert(audio->num_channels() == apm_->num_output_channels());
+
+ int err = apm_->kNoError;
+
+ // The ordering convention must be followed to pass to the correct AECM.
+ size_t handle_index = 0;
+ for (int i = 0; i < audio->num_channels(); i++) {
+ // TODO(ajm): improve how this works, possibly inside AECM.
+ // This is kind of hacked up.
+ WebRtc_Word16* noisy = audio->low_pass_reference(i);
+ WebRtc_Word16* clean = audio->low_pass_split_data(i);
+ if (noisy == NULL) {
+ noisy = clean;
+ clean = NULL;
+ }
+ for (int j = 0; j < apm_->num_reverse_channels(); j++) {
+ Handle* my_handle = static_cast<Handle*>(handle(handle_index));
+ err = WebRtcAecm_Process(
+ my_handle,
+ noisy,
+ clean,
+ audio->low_pass_split_data(i),
+ static_cast<WebRtc_Word16>(audio->samples_per_split_channel()),
+ apm_->stream_delay_ms());
+
+ if (err != apm_->kNoError) {
+ return GetHandleError(my_handle); // TODO(ajm): warning possible?
+ }
+
+ handle_index++;
+ }
+ }
+
+ return apm_->kNoError;
+}
+
+int EchoControlMobileImpl::Enable(bool enable) {
+ CriticalSectionScoped crit_scoped(*apm_->crit());
+ // Ensure AEC and AECM are not both enabled.
+ if (enable && apm_->echo_cancellation()->is_enabled()) {
+ return apm_->kBadParameterError;
+ }
+
+ return EnableComponent(enable);
+}
+
+bool EchoControlMobileImpl::is_enabled() const {
+ return is_component_enabled();
+}
+
+int EchoControlMobileImpl::set_routing_mode(RoutingMode mode) {
+ CriticalSectionScoped crit_scoped(*apm_->crit());
+ if (MapSetting(mode) == -1) {
+ return apm_->kBadParameterError;
+ }
+
+ routing_mode_ = mode;
+ return Configure();
+}
+
+EchoControlMobile::RoutingMode EchoControlMobileImpl::routing_mode()
+ const {
+ return routing_mode_;
+}
+
+int EchoControlMobileImpl::enable_comfort_noise(bool enable) {
+ CriticalSectionScoped crit_scoped(*apm_->crit());
+ comfort_noise_enabled_ = enable;
+ return Configure();
+}
+
+bool EchoControlMobileImpl::is_comfort_noise_enabled() const {
+ return comfort_noise_enabled_;
+}
+
+int EchoControlMobileImpl::Initialize() {
+ if (!is_component_enabled()) {
+ return apm_->kNoError;
+ }
+
+ if (apm_->sample_rate_hz() == apm_->kSampleRate32kHz) {
+ // AECM doesn't support super-wideband.
+ return apm_->kBadSampleRateError;
+ }
+
+ return ProcessingComponent::Initialize();
+}
+
+int EchoControlMobileImpl::get_version(char* version,
+ int version_len_bytes) const {
+ if (WebRtcAecm_get_version(version, version_len_bytes) != 0) {
+ return apm_->kBadParameterError;
+ }
+
+ return apm_->kNoError;
+}
+
+void* EchoControlMobileImpl::CreateHandle() const {
+ Handle* handle = NULL;
+ if (WebRtcAecm_Create(&handle) != apm_->kNoError) {
+ handle = NULL;
+ } else {
+ assert(handle != NULL);
+ }
+
+ return handle;
+}
+
+int EchoControlMobileImpl::DestroyHandle(void* handle) const {
+ return WebRtcAecm_Free(static_cast<Handle*>(handle));
+}
+
+int EchoControlMobileImpl::InitializeHandle(void* handle) const {
+ return WebRtcAecm_Init(static_cast<Handle*>(handle),
+ apm_->sample_rate_hz(),
+ 48000); // Dummy value. This isn't actually
+ // required by AECM.
+}
+
+int EchoControlMobileImpl::ConfigureHandle(void* handle) const {
+ AecmConfig config;
+ config.cngMode = comfort_noise_enabled_;
+ config.echoMode = MapSetting(routing_mode_);
+
+ return WebRtcAecm_set_config(static_cast<Handle*>(handle), config);
+}
+
+int EchoControlMobileImpl::num_handles_required() const {
+ return apm_->num_output_channels() *
+ apm_->num_reverse_channels();
+}
+
+int EchoControlMobileImpl::GetHandleError(void* handle) const {
+ assert(handle != NULL);
+ return MapError(WebRtcAecm_get_error_code(static_cast<Handle*>(handle)));
+}
+} // namespace webrtc
diff --git a/src/modules/audio_processing/main/source/echo_control_mobile_impl.h b/src/modules/audio_processing/main/source/echo_control_mobile_impl.h
new file mode 100644
index 0000000000..2fd624810a
--- /dev/null
+++ b/src/modules/audio_processing/main/source/echo_control_mobile_impl.h
@@ -0,0 +1,59 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef WEBRTC_MODULES_AUDIO_PROCESSING_MAIN_SOURCE_ECHO_CONTROL_MOBILE_IMPL_H_
+#define WEBRTC_MODULES_AUDIO_PROCESSING_MAIN_SOURCE_ECHO_CONTROL_MOBILE_IMPL_H_
+
+#include "audio_processing.h"
+#include "processing_component.h"
+
+namespace webrtc {
+class AudioProcessingImpl;
+class AudioBuffer;
+
+class EchoControlMobileImpl : public EchoControlMobile,
+ public ProcessingComponent {
+ public:
+ explicit EchoControlMobileImpl(const AudioProcessingImpl* apm);
+ virtual ~EchoControlMobileImpl();
+
+ int ProcessRenderAudio(const AudioBuffer* audio);
+ int ProcessCaptureAudio(AudioBuffer* audio);
+
+ // EchoControlMobile implementation.
+ virtual bool is_enabled() const;
+
+ // ProcessingComponent implementation.
+ virtual int Initialize();
+ virtual int get_version(char* version, int version_len_bytes) const;
+
+ private:
+ // EchoControlMobile implementation.
+ virtual int Enable(bool enable);
+ virtual int set_routing_mode(RoutingMode mode);
+ virtual RoutingMode routing_mode() const;
+ virtual int enable_comfort_noise(bool enable);
+ virtual bool is_comfort_noise_enabled() const;
+
+ // ProcessingComponent implementation.
+ virtual void* CreateHandle() const;
+ virtual int InitializeHandle(void* handle) const;
+ virtual int ConfigureHandle(void* handle) const;
+ virtual int DestroyHandle(void* handle) const;
+ virtual int num_handles_required() const;
+ virtual int GetHandleError(void* handle) const;
+
+ const AudioProcessingImpl* apm_;
+ RoutingMode routing_mode_;
+ bool comfort_noise_enabled_;
+};
+} // namespace webrtc
+
+#endif // WEBRTC_MODULES_AUDIO_PROCESSING_MAIN_SOURCE_ECHO_CONTROL_MOBILE_IMPL_H_
diff --git a/src/modules/audio_processing/main/source/gain_control_impl.cc b/src/modules/audio_processing/main/source/gain_control_impl.cc
new file mode 100644
index 0000000000..dc3e565589
--- /dev/null
+++ b/src/modules/audio_processing/main/source/gain_control_impl.cc
@@ -0,0 +1,391 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "gain_control_impl.h"
+
+#include <cassert>
+
+#include "critical_section_wrapper.h"
+#include "gain_control.h"
+
+#include "audio_processing_impl.h"
+#include "audio_buffer.h"
+
+namespace webrtc {
+
+typedef void Handle;
+
+/*template <class T>
+class GainControlHandle : public ComponentHandle<T> {
+ public:
+ GainControlHandle();
+ virtual ~GainControlHandle();
+
+ virtual int Create();
+ virtual T* ptr() const;
+
+ private:
+ T* handle;
+};*/
+
+namespace {
+WebRtc_Word16 MapSetting(GainControl::Mode mode) {
+ switch (mode) {
+ case GainControl::kAdaptiveAnalog:
+ return kAgcModeAdaptiveAnalog;
+ break;
+ case GainControl::kAdaptiveDigital:
+ return kAgcModeAdaptiveDigital;
+ break;
+ case GainControl::kFixedDigital:
+ return kAgcModeFixedDigital;
+ break;
+ default:
+ return -1;
+ }
+}
+} // namespace
+
+GainControlImpl::GainControlImpl(const AudioProcessingImpl* apm)
+ : ProcessingComponent(apm),
+ apm_(apm),
+ mode_(kAdaptiveAnalog),
+ minimum_capture_level_(0),
+ maximum_capture_level_(255),
+ limiter_enabled_(true),
+ target_level_dbfs_(3),
+ compression_gain_db_(9),
+ analog_capture_level_(0),
+ was_analog_level_set_(false),
+ stream_is_saturated_(false) {}
+
+GainControlImpl::~GainControlImpl() {}
+
+int GainControlImpl::ProcessRenderAudio(AudioBuffer* audio) {
+ if (!is_component_enabled()) {
+ return apm_->kNoError;
+ }
+
+ assert(audio->samples_per_split_channel() <= 160);
+
+ WebRtc_Word16* mixed_data = audio->low_pass_split_data(0);
+ if (audio->num_channels() > 1) {
+ audio->CopyAndMixLowPass(1);
+ mixed_data = audio->mixed_low_pass_data(0);
+ }
+
+ for (int i = 0; i < num_handles(); i++) {
+ Handle* my_handle = static_cast<Handle*>(handle(i));
+ int err = WebRtcAgc_AddFarend(
+ my_handle,
+ mixed_data,
+ static_cast<WebRtc_Word16>(audio->samples_per_split_channel()));
+
+ if (err != apm_->kNoError) {
+ return GetHandleError(my_handle);
+ }
+ }
+
+ return apm_->kNoError;
+}
+
+int GainControlImpl::AnalyzeCaptureAudio(AudioBuffer* audio) {
+ if (!is_component_enabled()) {
+ return apm_->kNoError;
+ }
+
+ assert(audio->samples_per_split_channel() <= 160);
+ assert(audio->num_channels() == num_handles());
+
+ int err = apm_->kNoError;
+
+ if (mode_ == kAdaptiveAnalog) {
+ for (int i = 0; i < num_handles(); i++) {
+ Handle* my_handle = static_cast<Handle*>(handle(i));
+ err = WebRtcAgc_AddMic(
+ my_handle,
+ audio->low_pass_split_data(i),
+ audio->high_pass_split_data(i),
+ static_cast<WebRtc_Word16>(audio->samples_per_split_channel()));
+
+ if (err != apm_->kNoError) {
+ return GetHandleError(my_handle);
+ }
+ }
+ } else if (mode_ == kAdaptiveDigital) {
+
+ for (int i = 0; i < num_handles(); i++) {
+ Handle* my_handle = static_cast<Handle*>(handle(i));
+ WebRtc_Word32 capture_level_out = 0;
+
+ err = WebRtcAgc_VirtualMic(
+ my_handle,
+ audio->low_pass_split_data(i),
+ audio->high_pass_split_data(i),
+ static_cast<WebRtc_Word16>(audio->samples_per_split_channel()),
+ //capture_levels_[i],
+ analog_capture_level_,
+ &capture_level_out);
+
+ capture_levels_[i] = capture_level_out;
+
+ if (err != apm_->kNoError) {
+ return GetHandleError(my_handle);
+ }
+
+ }
+ }
+
+ return apm_->kNoError;
+}
+
+int GainControlImpl::ProcessCaptureAudio(AudioBuffer* audio) {
+ if (!is_component_enabled()) {
+ return apm_->kNoError;
+ }
+
+ if (mode_ == kAdaptiveAnalog && !was_analog_level_set_) {
+ return apm_->kStreamParameterNotSetError;
+ }
+
+ assert(audio->samples_per_split_channel() <= 160);
+ assert(audio->num_channels() == num_handles());
+
+ stream_is_saturated_ = false;
+ for (int i = 0; i < num_handles(); i++) {
+ Handle* my_handle = static_cast<Handle*>(handle(i));
+ WebRtc_Word32 capture_level_out = 0;
+ WebRtc_UWord8 saturation_warning = 0;
+
+ int err = WebRtcAgc_Process(
+ my_handle,
+ audio->low_pass_split_data(i),
+ audio->high_pass_split_data(i),
+ static_cast<WebRtc_Word16>(audio->samples_per_split_channel()),
+ audio->low_pass_split_data(i),
+ audio->high_pass_split_data(i),
+ capture_levels_[i],
+ &capture_level_out,
+ apm_->echo_cancellation()->stream_has_echo(),
+ &saturation_warning);
+
+ if (err != apm_->kNoError) {
+ return GetHandleError(my_handle);
+ }
+
+ capture_levels_[i] = capture_level_out;
+ if (saturation_warning == 1) {
+ stream_is_saturated_ = true;
+ }
+ }
+
+ if (mode_ == kAdaptiveAnalog) {
+ // Take the analog level to be the average across the handles.
+ analog_capture_level_ = 0;
+ for (int i = 0; i < num_handles(); i++) {
+ analog_capture_level_ += capture_levels_[i];
+ }
+
+ analog_capture_level_ /= num_handles();
+ }
+
+ was_analog_level_set_ = false;
+ return apm_->kNoError;
+}
+
+// TODO(ajm): ensure this is called under kAdaptiveAnalog.
+int GainControlImpl::set_stream_analog_level(int level) {
+ was_analog_level_set_ = true;
+ if (level < minimum_capture_level_ || level > maximum_capture_level_) {
+ return apm_->kBadParameterError;
+ }
+
+ if (mode_ == kAdaptiveAnalog) {
+ if (level != analog_capture_level_) {
+ // The analog level has been changed; update our internal levels.
+ capture_levels_.assign(num_handles(), level);
+ }
+ }
+ analog_capture_level_ = level;
+
+ return apm_->kNoError;
+}
+
+int GainControlImpl::stream_analog_level() {
+ // TODO(ajm): enable this assertion?
+ //assert(mode_ == kAdaptiveAnalog);
+
+ return analog_capture_level_;
+}
+
+int GainControlImpl::Enable(bool enable) {
+ CriticalSectionScoped crit_scoped(*apm_->crit());
+ return EnableComponent(enable);
+}
+
+bool GainControlImpl::is_enabled() const {
+ return is_component_enabled();
+}
+
+int GainControlImpl::set_mode(Mode mode) {
+ CriticalSectionScoped crit_scoped(*apm_->crit());
+ if (MapSetting(mode) == -1) {
+ return apm_->kBadParameterError;
+ }
+
+ mode_ = mode;
+ return Initialize();
+}
+
+GainControl::Mode GainControlImpl::mode() const {
+ return mode_;
+}
+
+int GainControlImpl::set_analog_level_limits(int minimum,
+ int maximum) {
+ CriticalSectionScoped crit_scoped(*apm_->crit());
+ if (minimum < 0) {
+ return apm_->kBadParameterError;
+ }
+
+ if (maximum > 65535) {
+ return apm_->kBadParameterError;
+ }
+
+ if (maximum < minimum) {
+ return apm_->kBadParameterError;
+ }
+
+ minimum_capture_level_ = minimum;
+ maximum_capture_level_ = maximum;
+
+ return Initialize();
+}
+
+int GainControlImpl::analog_level_minimum() const {
+ return minimum_capture_level_;
+}
+
+int GainControlImpl::analog_level_maximum() const {
+ return maximum_capture_level_;
+}
+
+bool GainControlImpl::stream_is_saturated() const {
+ return stream_is_saturated_;
+}
+
+int GainControlImpl::set_target_level_dbfs(int level) {
+ CriticalSectionScoped crit_scoped(*apm_->crit());
+ if (level > 31 || level < 0) {
+ return apm_->kBadParameterError;
+ }
+
+ target_level_dbfs_ = level;
+ return Configure();
+}
+
+int GainControlImpl::target_level_dbfs() const {
+ return target_level_dbfs_;
+}
+
+int GainControlImpl::set_compression_gain_db(int gain) {
+ CriticalSectionScoped crit_scoped(*apm_->crit());
+ if (gain < 0 || gain > 90) {
+ return apm_->kBadParameterError;
+ }
+
+ compression_gain_db_ = gain;
+ return Configure();
+}
+
+int GainControlImpl::compression_gain_db() const {
+ return compression_gain_db_;
+}
+
+int GainControlImpl::enable_limiter(bool enable) {
+ CriticalSectionScoped crit_scoped(*apm_->crit());
+ limiter_enabled_ = enable;
+ return Configure();
+}
+
+bool GainControlImpl::is_limiter_enabled() const {
+ return limiter_enabled_;
+}
+
+int GainControlImpl::Initialize() {
+ int err = ProcessingComponent::Initialize();
+ if (err != apm_->kNoError || !is_component_enabled()) {
+ return err;
+ }
+
+ analog_capture_level_ =
+ (maximum_capture_level_ - minimum_capture_level_) >> 1;
+ capture_levels_.assign(num_handles(), analog_capture_level_);
+ was_analog_level_set_ = false;
+
+ return apm_->kNoError;
+}
+
+int GainControlImpl::get_version(char* version, int version_len_bytes) const {
+ if (WebRtcAgc_Version(version, version_len_bytes) != 0) {
+ return apm_->kBadParameterError;
+ }
+
+ return apm_->kNoError;
+}
+
+void* GainControlImpl::CreateHandle() const {
+ Handle* handle = NULL;
+ if (WebRtcAgc_Create(&handle) != apm_->kNoError) {
+ handle = NULL;
+ } else {
+ assert(handle != NULL);
+ }
+
+ return handle;
+}
+
+int GainControlImpl::DestroyHandle(void* handle) const {
+ return WebRtcAgc_Free(static_cast<Handle*>(handle));
+}
+
+int GainControlImpl::InitializeHandle(void* handle) const {
+ return WebRtcAgc_Init(static_cast<Handle*>(handle),
+ minimum_capture_level_,
+ maximum_capture_level_,
+ MapSetting(mode_),
+ apm_->sample_rate_hz());
+}
+
+int GainControlImpl::ConfigureHandle(void* handle) const {
+ WebRtcAgc_config_t config;
+ // TODO(ajm): Flip the sign here (since AGC expects a positive value) if we
+ // change the interface.
+ //assert(target_level_dbfs_ <= 0);
+ //config.targetLevelDbfs = static_cast<WebRtc_Word16>(-target_level_dbfs_);
+ config.targetLevelDbfs = static_cast<WebRtc_Word16>(target_level_dbfs_);
+ config.compressionGaindB =
+ static_cast<WebRtc_Word16>(compression_gain_db_);
+ config.limiterEnable = limiter_enabled_;
+
+ return WebRtcAgc_set_config(static_cast<Handle*>(handle), config);
+}
+
+int GainControlImpl::num_handles_required() const {
+ return apm_->num_output_channels();
+}
+
+int GainControlImpl::GetHandleError(void* handle) const {
+ // The AGC has no get_error() function.
+ // (Despite listing errors in its interface...)
+ assert(handle != NULL);
+ return apm_->kUnspecifiedError;
+}
+} // namespace webrtc
diff --git a/src/modules/audio_processing/main/source/gain_control_impl.h b/src/modules/audio_processing/main/source/gain_control_impl.h
new file mode 100644
index 0000000000..a11d606f45
--- /dev/null
+++ b/src/modules/audio_processing/main/source/gain_control_impl.h
@@ -0,0 +1,80 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef WEBRTC_MODULES_AUDIO_PROCESSING_MAIN_SOURCE_GAIN_CONTROL_IMPL_H_
+#define WEBRTC_MODULES_AUDIO_PROCESSING_MAIN_SOURCE_GAIN_CONTROL_IMPL_H_
+
+#include <vector>
+
+#include "audio_processing.h"
+#include "processing_component.h"
+
+namespace webrtc {
+class AudioProcessingImpl;
+class AudioBuffer;
+
+class GainControlImpl : public GainControl,
+ public ProcessingComponent {
+ public:
+ explicit GainControlImpl(const AudioProcessingImpl* apm);
+ virtual ~GainControlImpl();
+
+ int ProcessRenderAudio(AudioBuffer* audio);
+ int AnalyzeCaptureAudio(AudioBuffer* audio);
+ int ProcessCaptureAudio(AudioBuffer* audio);
+
+ // ProcessingComponent implementation.
+ virtual int Initialize();
+ virtual int get_version(char* version, int version_len_bytes) const;
+
+ // GainControl implementation.
+ virtual bool is_enabled() const;
+
+ private:
+ // GainControl implementation.
+ virtual int Enable(bool enable);
+ virtual int set_stream_analog_level(int level);
+ virtual int stream_analog_level();
+ virtual int set_mode(Mode mode);
+ virtual Mode mode() const;
+ virtual int set_target_level_dbfs(int level);
+ virtual int target_level_dbfs() const;
+ virtual int set_compression_gain_db(int gain);
+ virtual int compression_gain_db() const;
+ virtual int enable_limiter(bool enable);
+ virtual bool is_limiter_enabled() const;
+ virtual int set_analog_level_limits(int minimum, int maximum);
+ virtual int analog_level_minimum() const;
+ virtual int analog_level_maximum() const;
+ virtual bool stream_is_saturated() const;
+
+ // ProcessingComponent implementation.
+ virtual void* CreateHandle() const;
+ virtual int InitializeHandle(void* handle) const;
+ virtual int ConfigureHandle(void* handle) const;
+ virtual int DestroyHandle(void* handle) const;
+ virtual int num_handles_required() const;
+ virtual int GetHandleError(void* handle) const;
+
+ const AudioProcessingImpl* apm_;
+ Mode mode_;
+ int minimum_capture_level_;
+ int maximum_capture_level_;
+ bool limiter_enabled_;
+ int target_level_dbfs_;
+ int compression_gain_db_;
+ std::vector<int> capture_levels_;
+ int analog_capture_level_;
+ bool was_analog_level_set_;
+ bool stream_is_saturated_;
+};
+} // namespace webrtc
+
+#endif // WEBRTC_MODULES_AUDIO_PROCESSING_MAIN_SOURCE_GAIN_CONTROL_IMPL_H_
diff --git a/src/modules/audio_processing/main/source/high_pass_filter_impl.cc b/src/modules/audio_processing/main/source/high_pass_filter_impl.cc
new file mode 100644
index 0000000000..fa6d5d5ece
--- /dev/null
+++ b/src/modules/audio_processing/main/source/high_pass_filter_impl.cc
@@ -0,0 +1,180 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "high_pass_filter_impl.h"
+
+#include <cassert>
+
+#include "critical_section_wrapper.h"
+#include "typedefs.h"
+#include "signal_processing_library.h"
+
+#include "audio_processing_impl.h"
+#include "audio_buffer.h"
+
+namespace webrtc {
+namespace {
+const WebRtc_Word16 kFilterCoefficients8kHz[5] =
+ {3798, -7596, 3798, 7807, -3733};
+
+const WebRtc_Word16 kFilterCoefficients[5] =
+ {4012, -8024, 4012, 8002, -3913};
+
+struct FilterState {
+ WebRtc_Word16 y[4];
+ WebRtc_Word16 x[2];
+ const WebRtc_Word16* ba;
+};
+
+int InitializeFilter(FilterState* hpf, int sample_rate_hz) {
+ assert(hpf != NULL);
+
+ if (sample_rate_hz == AudioProcessingImpl::kSampleRate8kHz) {
+ hpf->ba = kFilterCoefficients8kHz;
+ } else {
+ hpf->ba = kFilterCoefficients;
+ }
+
+ WebRtcSpl_MemSetW16(hpf->x, 0, 2);
+ WebRtcSpl_MemSetW16(hpf->y, 0, 4);
+
+ return AudioProcessing::kNoError;
+}
+
+int Filter(FilterState* hpf, WebRtc_Word16* data, int length) {
+ assert(hpf != NULL);
+
+ WebRtc_Word32 tmp_int32 = 0;
+ WebRtc_Word16* y = hpf->y;
+ WebRtc_Word16* x = hpf->x;
+ const WebRtc_Word16* ba = hpf->ba;
+
+ for (int i = 0; i < length; i++) {
+ // y[i] = b[0] * x[i] + b[1] * x[i-1] + b[2] * x[i-2]
+ // + -a[1] * y[i-1] + -a[2] * y[i-2];
+
+ tmp_int32 =
+ WEBRTC_SPL_MUL_16_16(y[1], ba[3]); // -a[1] * y[i-1] (low part)
+ tmp_int32 +=
+ WEBRTC_SPL_MUL_16_16(y[3], ba[4]); // -a[2] * y[i-2] (low part)
+ tmp_int32 = (tmp_int32 >> 15);
+ tmp_int32 +=
+ WEBRTC_SPL_MUL_16_16(y[0], ba[3]); // -a[1] * y[i-1] (high part)
+ tmp_int32 +=
+ WEBRTC_SPL_MUL_16_16(y[2], ba[4]); // -a[2] * y[i-2] (high part)
+ tmp_int32 = (tmp_int32 << 1);
+
+ tmp_int32 += WEBRTC_SPL_MUL_16_16(data[i], ba[0]); // b[0]*x[0]
+ tmp_int32 += WEBRTC_SPL_MUL_16_16(x[0], ba[1]); // b[1]*x[i-1]
+ tmp_int32 += WEBRTC_SPL_MUL_16_16(x[1], ba[2]); // b[2]*x[i-2]
+
+ // Update state (input part)
+ x[1] = x[0];
+ x[0] = data[i];
+
+ // Update state (filtered part)
+ y[2] = y[0];
+ y[3] = y[1];
+ y[0] = static_cast<WebRtc_Word16>(tmp_int32 >> 13);
+ y[1] = static_cast<WebRtc_Word16>((tmp_int32 -
+ WEBRTC_SPL_LSHIFT_W32(static_cast<WebRtc_Word32>(y[0]), 13)) << 2);
+
+ // Rounding in Q12, i.e. add 2^11
+ tmp_int32 += 2048;
+
+ // Saturate (to 2^27) so that the HP filtered signal does not overflow
+ tmp_int32 = WEBRTC_SPL_SAT(static_cast<WebRtc_Word32>(134217727),
+ tmp_int32,
+ static_cast<WebRtc_Word32>(-134217728));
+
+ // Convert back to Q0 and use rounding
+ data[i] = (WebRtc_Word16)WEBRTC_SPL_RSHIFT_W32(tmp_int32, 12);
+
+ }
+
+ return AudioProcessing::kNoError;
+}
+} // namespace
+
+typedef FilterState Handle;
+
+HighPassFilterImpl::HighPassFilterImpl(const AudioProcessingImpl* apm)
+ : ProcessingComponent(apm),
+ apm_(apm) {}
+
+HighPassFilterImpl::~HighPassFilterImpl() {}
+
+int HighPassFilterImpl::ProcessCaptureAudio(AudioBuffer* audio) {
+ int err = apm_->kNoError;
+
+ if (!is_component_enabled()) {
+ return apm_->kNoError;
+ }
+
+ assert(audio->samples_per_split_channel() <= 160);
+
+ for (int i = 0; i < num_handles(); i++) {
+ Handle* my_handle = static_cast<Handle*>(handle(i));
+ err = Filter(my_handle,
+ audio->low_pass_split_data(i),
+ audio->samples_per_split_channel());
+
+ if (err != apm_->kNoError) {
+ return GetHandleError(my_handle);
+ }
+ }
+
+ return apm_->kNoError;
+}
+
+int HighPassFilterImpl::Enable(bool enable) {
+ CriticalSectionScoped crit_scoped(*apm_->crit());
+ return EnableComponent(enable);
+}
+
+bool HighPassFilterImpl::is_enabled() const {
+ return is_component_enabled();
+}
+
+int HighPassFilterImpl::get_version(char* version,
+ int version_len_bytes) const {
+ // An empty string is used to indicate no version information.
+ memset(version, 0, version_len_bytes);
+ return apm_->kNoError;
+}
+
+void* HighPassFilterImpl::CreateHandle() const {
+ return new FilterState;
+}
+
+int HighPassFilterImpl::DestroyHandle(void* handle) const {
+ delete static_cast<Handle*>(handle);
+ return apm_->kNoError;
+}
+
+int HighPassFilterImpl::InitializeHandle(void* handle) const {
+ return InitializeFilter(static_cast<Handle*>(handle),
+ apm_->sample_rate_hz());
+}
+
+int HighPassFilterImpl::ConfigureHandle(void* /*handle*/) const {
+ return apm_->kNoError; // Not configurable.
+}
+
+int HighPassFilterImpl::num_handles_required() const {
+ return apm_->num_output_channels();
+}
+
+int HighPassFilterImpl::GetHandleError(void* handle) const {
+ // The component has no detailed errors.
+ assert(handle != NULL);
+ return apm_->kUnspecifiedError;
+}
+} // namespace webrtc
diff --git a/src/modules/audio_processing/main/source/high_pass_filter_impl.h b/src/modules/audio_processing/main/source/high_pass_filter_impl.h
new file mode 100644
index 0000000000..4c23754270
--- /dev/null
+++ b/src/modules/audio_processing/main/source/high_pass_filter_impl.h
@@ -0,0 +1,51 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef WEBRTC_MODULES_AUDIO_PROCESSING_MAIN_SOURCE_HIGH_PASS_FILTER_IMPL_H_
+#define WEBRTC_MODULES_AUDIO_PROCESSING_MAIN_SOURCE_HIGH_PASS_FILTER_IMPL_H_
+
+#include "audio_processing.h"
+#include "processing_component.h"
+
+namespace webrtc {
+class AudioProcessingImpl;
+class AudioBuffer;
+
+class HighPassFilterImpl : public HighPassFilter,
+ public ProcessingComponent {
+ public:
+ explicit HighPassFilterImpl(const AudioProcessingImpl* apm);
+ virtual ~HighPassFilterImpl();
+
+ int ProcessCaptureAudio(AudioBuffer* audio);
+
+ // HighPassFilter implementation.
+ virtual bool is_enabled() const;
+
+ // ProcessingComponent implementation.
+ virtual int get_version(char* version, int version_len_bytes) const;
+
+ private:
+ // HighPassFilter implementation.
+ virtual int Enable(bool enable);
+
+ // ProcessingComponent implementation.
+ virtual void* CreateHandle() const;
+ virtual int InitializeHandle(void* handle) const;
+ virtual int ConfigureHandle(void* handle) const;
+ virtual int DestroyHandle(void* handle) const;
+ virtual int num_handles_required() const;
+ virtual int GetHandleError(void* handle) const;
+
+ const AudioProcessingImpl* apm_;
+};
+} // namespace webrtc
+
+#endif // WEBRTC_MODULES_AUDIO_PROCESSING_MAIN_SOURCE_HIGH_PASS_FILTER_IMPL_H_
diff --git a/src/modules/audio_processing/main/source/level_estimator_impl.cc b/src/modules/audio_processing/main/source/level_estimator_impl.cc
new file mode 100644
index 0000000000..799a9624f7
--- /dev/null
+++ b/src/modules/audio_processing/main/source/level_estimator_impl.cc
@@ -0,0 +1,182 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "level_estimator_impl.h"
+
+#include <cassert>
+#include <cstring>
+
+#include "critical_section_wrapper.h"
+
+#include "audio_processing_impl.h"
+#include "audio_buffer.h"
+
+// TODO(ajm): implement the underlying level estimator component.
+
+namespace webrtc {
+
+typedef void Handle;
+
+namespace {
+/*int EstimateLevel(AudioBuffer* audio, Handle* my_handle) {
+ assert(audio->samples_per_split_channel() <= 160);
+
+ WebRtc_Word16* mixed_data = audio->low_pass_split_data(0);
+ if (audio->num_channels() > 1) {
+ audio->CopyAndMixLowPass(1);
+ mixed_data = audio->mixed_low_pass_data(0);
+ }
+
+ int err = UpdateLvlEst(my_handle,
+ mixed_data,
+ audio->samples_per_split_channel());
+ if (err != AudioProcessing::kNoError) {
+ return GetHandleError(my_handle);
+ }
+
+ return AudioProcessing::kNoError;
+}
+
+int GetMetricsLocal(Handle* my_handle, LevelEstimator::Metrics* metrics) {
+ level_t levels;
+ memset(&levels, 0, sizeof(levels));
+
+ int err = ExportLevels(my_handle, &levels, 2);
+ if (err != AudioProcessing::kNoError) {
+ return err;
+ }
+ metrics->signal.instant = levels.instant;
+ metrics->signal.average = levels.average;
+ metrics->signal.maximum = levels.max;
+ metrics->signal.minimum = levels.min;
+
+ err = ExportLevels(my_handle, &levels, 1);
+ if (err != AudioProcessing::kNoError) {
+ return err;
+ }
+ metrics->speech.instant = levels.instant;
+ metrics->speech.average = levels.average;
+ metrics->speech.maximum = levels.max;
+ metrics->speech.minimum = levels.min;
+
+ err = ExportLevels(my_handle, &levels, 0);
+ if (err != AudioProcessing::kNoError) {
+ return err;
+ }
+ metrics->noise.instant = levels.instant;
+ metrics->noise.average = levels.average;
+ metrics->noise.maximum = levels.max;
+ metrics->noise.minimum = levels.min;
+
+ return AudioProcessing::kNoError;
+}*/
+} // namespace
+
+LevelEstimatorImpl::LevelEstimatorImpl(const AudioProcessingImpl* apm)
+ : ProcessingComponent(apm),
+ apm_(apm) {}
+
+LevelEstimatorImpl::~LevelEstimatorImpl() {}
+
+int LevelEstimatorImpl::AnalyzeReverseStream(AudioBuffer* /*audio*/) {
+ return apm_->kUnsupportedComponentError;
+ /*if (!is_component_enabled()) {
+ return apm_->kNoError;
+ }
+
+ return EstimateLevel(audio, static_cast<Handle*>(handle(1)));*/
+}
+
+int LevelEstimatorImpl::ProcessCaptureAudio(AudioBuffer* /*audio*/) {
+ return apm_->kUnsupportedComponentError;
+ /*if (!is_component_enabled()) {
+ return apm_->kNoError;
+ }
+
+ return EstimateLevel(audio, static_cast<Handle*>(handle(0)));*/
+}
+
+int LevelEstimatorImpl::Enable(bool /*enable*/) {
+ CriticalSectionScoped crit_scoped(*apm_->crit());
+ return apm_->kUnsupportedComponentError;
+ //return EnableComponent(enable);
+}
+
+bool LevelEstimatorImpl::is_enabled() const {
+ return is_component_enabled();
+}
+
+int LevelEstimatorImpl::GetMetrics(LevelEstimator::Metrics* /*metrics*/,
+ LevelEstimator::Metrics* /*reverse_metrics*/) {
+ return apm_->kUnsupportedComponentError;
+ /*if (!is_component_enabled()) {
+ return apm_->kNotEnabledError;
+ }
+
+ int err = GetMetricsLocal(static_cast<Handle*>(handle(0)), metrics);
+ if (err != apm_->kNoError) {
+ return err;
+ }
+
+ err = GetMetricsLocal(static_cast<Handle*>(handle(1)), reverse_metrics);
+ if (err != apm_->kNoError) {
+ return err;
+ }
+
+ return apm_->kNoError;*/
+}
+
+int LevelEstimatorImpl::get_version(char* version,
+ int version_len_bytes) const {
+ // An empty string is used to indicate no version information.
+ memset(version, 0, version_len_bytes);
+ return apm_->kNoError;
+}
+
+void* LevelEstimatorImpl::CreateHandle() const {
+ Handle* handle = NULL;
+ /*if (CreateLvlEst(&handle) != apm_->kNoError) {
+ handle = NULL;
+ } else {
+ assert(handle != NULL);
+ }*/
+
+ return handle;
+}
+
+int LevelEstimatorImpl::DestroyHandle(void* /*handle*/) const {
+ return apm_->kUnsupportedComponentError;
+ //return FreeLvlEst(static_cast<Handle*>(handle));
+}
+
+int LevelEstimatorImpl::InitializeHandle(void* /*handle*/) const {
+ return apm_->kUnsupportedComponentError;
+ /*const double kIntervalSeconds = 1.5;
+ return InitLvlEst(static_cast<Handle*>(handle),
+ apm_->sample_rate_hz(),
+ kIntervalSeconds);*/
+}
+
+int LevelEstimatorImpl::ConfigureHandle(void* /*handle*/) const {
+ return apm_->kUnsupportedComponentError;
+ //return apm_->kNoError;
+}
+
+int LevelEstimatorImpl::num_handles_required() const {
+ return apm_->kUnsupportedComponentError;
+ //return 2;
+}
+
+int LevelEstimatorImpl::GetHandleError(void* handle) const {
+ // The component has no detailed errors.
+ assert(handle != NULL);
+ return apm_->kUnspecifiedError;
+}
+} // namespace webrtc
diff --git a/src/modules/audio_processing/main/source/level_estimator_impl.h b/src/modules/audio_processing/main/source/level_estimator_impl.h
new file mode 100644
index 0000000000..1515722df4
--- /dev/null
+++ b/src/modules/audio_processing/main/source/level_estimator_impl.h
@@ -0,0 +1,53 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef WEBRTC_MODULES_AUDIO_PROCESSING_MAIN_SOURCE_LEVEL_ESTIMATOR_IMPL_H_
+#define WEBRTC_MODULES_AUDIO_PROCESSING_MAIN_SOURCE_LEVEL_ESTIMATOR_IMPL_H_
+
+#include "audio_processing.h"
+#include "processing_component.h"
+
+namespace webrtc {
+class AudioProcessingImpl;
+class AudioBuffer;
+
+class LevelEstimatorImpl : public LevelEstimator,
+ public ProcessingComponent {
+ public:
+ explicit LevelEstimatorImpl(const AudioProcessingImpl* apm);
+ virtual ~LevelEstimatorImpl();
+
+ int AnalyzeReverseStream(AudioBuffer* audio);
+ int ProcessCaptureAudio(AudioBuffer* audio);
+
+ // LevelEstimator implementation.
+ virtual bool is_enabled() const;
+
+ // ProcessingComponent implementation.
+ virtual int get_version(char* version, int version_len_bytes) const;
+
+ private:
+ // LevelEstimator implementation.
+ virtual int Enable(bool enable);
+ virtual int GetMetrics(Metrics* metrics, Metrics* reverse_metrics);
+
+ // ProcessingComponent implementation.
+ virtual void* CreateHandle() const;
+ virtual int InitializeHandle(void* handle) const;
+ virtual int ConfigureHandle(void* handle) const;
+ virtual int DestroyHandle(void* handle) const;
+ virtual int num_handles_required() const;
+ virtual int GetHandleError(void* handle) const;
+
+ const AudioProcessingImpl* apm_;
+};
+} // namespace webrtc
+
+#endif // WEBRTC_MODULES_AUDIO_PROCESSING_MAIN_SOURCE_LEVEL_ESTIMATOR_IMPL_H_
diff --git a/src/modules/audio_processing/main/source/noise_suppression_impl.cc b/src/modules/audio_processing/main/source/noise_suppression_impl.cc
new file mode 100644
index 0000000000..f899f350ca
--- /dev/null
+++ b/src/modules/audio_processing/main/source/noise_suppression_impl.cc
@@ -0,0 +1,179 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "noise_suppression_impl.h"
+
+#include <cassert>
+
+#include "critical_section_wrapper.h"
+#if defined(WEBRTC_NS_FLOAT)
+#include "noise_suppression.h"
+#elif defined(WEBRTC_NS_FIXED)
+#include "noise_suppression_x.h"
+#endif
+
+#include "audio_processing_impl.h"
+#include "audio_buffer.h"
+
+namespace webrtc {
+
+#if defined(WEBRTC_NS_FLOAT)
+typedef NsHandle Handle;
+#elif defined(WEBRTC_NS_FIXED)
+typedef NsxHandle Handle;
+#endif
+
+namespace {
+int MapSetting(NoiseSuppression::Level level) {
+ switch (level) {
+ case NoiseSuppression::kLow:
+ return 0;
+ case NoiseSuppression::kModerate:
+ return 1;
+ case NoiseSuppression::kHigh:
+ return 2;
+ case NoiseSuppression::kVeryHigh:
+ return 3;
+ default:
+ return -1;
+ }
+}
+} // namespace
+
+NoiseSuppressionImpl::NoiseSuppressionImpl(const AudioProcessingImpl* apm)
+ : ProcessingComponent(apm),
+ apm_(apm),
+ level_(kModerate) {}
+
+NoiseSuppressionImpl::~NoiseSuppressionImpl() {}
+
+int NoiseSuppressionImpl::ProcessCaptureAudio(AudioBuffer* audio) {
+ int err = apm_->kNoError;
+
+ if (!is_component_enabled()) {
+ return apm_->kNoError;
+ }
+ assert(audio->samples_per_split_channel() <= 160);
+ assert(audio->num_channels() == num_handles());
+
+ for (int i = 0; i < num_handles(); i++) {
+ Handle* my_handle = static_cast<Handle*>(handle(i));
+#if defined(WEBRTC_NS_FLOAT)
+ err = WebRtcNs_Process(static_cast<Handle*>(handle(i)),
+ audio->low_pass_split_data(i),
+ audio->high_pass_split_data(i),
+ audio->low_pass_split_data(i),
+ audio->high_pass_split_data(i));
+#elif defined(WEBRTC_NS_FIXED)
+ err = WebRtcNsx_Process(static_cast<Handle*>(handle(i)),
+ audio->low_pass_split_data(i),
+ audio->high_pass_split_data(i),
+ audio->low_pass_split_data(i),
+ audio->high_pass_split_data(i));
+#endif
+
+ if (err != apm_->kNoError) {
+ return GetHandleError(my_handle);
+ }
+ }
+
+ return apm_->kNoError;
+}
+
+int NoiseSuppressionImpl::Enable(bool enable) {
+ CriticalSectionScoped crit_scoped(*apm_->crit());
+ return EnableComponent(enable);
+}
+
+bool NoiseSuppressionImpl::is_enabled() const {
+ return is_component_enabled();
+}
+
+int NoiseSuppressionImpl::set_level(Level level) {
+ CriticalSectionScoped crit_scoped(*apm_->crit());
+ if (MapSetting(level) == -1) {
+ return apm_->kBadParameterError;
+ }
+
+ level_ = level;
+ return Configure();
+}
+
+NoiseSuppression::Level NoiseSuppressionImpl::level() const {
+ return level_;
+}
+
+int NoiseSuppressionImpl::get_version(char* version,
+ int version_len_bytes) const {
+#if defined(WEBRTC_NS_FLOAT)
+ if (WebRtcNs_get_version(version, version_len_bytes) != 0)
+#elif defined(WEBRTC_NS_FIXED)
+ if (WebRtcNsx_get_version(version, version_len_bytes) != 0)
+#endif
+ {
+ return apm_->kBadParameterError;
+ }
+
+ return apm_->kNoError;
+}
+
+void* NoiseSuppressionImpl::CreateHandle() const {
+ Handle* handle = NULL;
+#if defined(WEBRTC_NS_FLOAT)
+ if (WebRtcNs_Create(&handle) != apm_->kNoError)
+#elif defined(WEBRTC_NS_FIXED)
+ if (WebRtcNsx_Create(&handle) != apm_->kNoError)
+#endif
+ {
+ handle = NULL;
+ } else {
+ assert(handle != NULL);
+ }
+
+ return handle;
+}
+
+int NoiseSuppressionImpl::DestroyHandle(void* handle) const {
+#if defined(WEBRTC_NS_FLOAT)
+ return WebRtcNs_Free(static_cast<Handle*>(handle));
+#elif defined(WEBRTC_NS_FIXED)
+ return WebRtcNsx_Free(static_cast<Handle*>(handle));
+#endif
+}
+
+int NoiseSuppressionImpl::InitializeHandle(void* handle) const {
+#if defined(WEBRTC_NS_FLOAT)
+ return WebRtcNs_Init(static_cast<Handle*>(handle), apm_->sample_rate_hz());
+#elif defined(WEBRTC_NS_FIXED)
+ return WebRtcNsx_Init(static_cast<Handle*>(handle), apm_->sample_rate_hz());
+#endif
+}
+
+int NoiseSuppressionImpl::ConfigureHandle(void* handle) const {
+#if defined(WEBRTC_NS_FLOAT)
+ return WebRtcNs_set_policy(static_cast<Handle*>(handle),
+ MapSetting(level_));
+#elif defined(WEBRTC_NS_FIXED)
+ return WebRtcNsx_set_policy(static_cast<Handle*>(handle),
+ MapSetting(level_));
+#endif
+}
+
+int NoiseSuppressionImpl::num_handles_required() const {
+ return apm_->num_output_channels();
+}
+
+int NoiseSuppressionImpl::GetHandleError(void* handle) const {
+ // The NS has no get_error() function.
+ assert(handle != NULL);
+ return apm_->kUnspecifiedError;
+}
+} // namespace webrtc
+
diff --git a/src/modules/audio_processing/main/source/noise_suppression_impl.h b/src/modules/audio_processing/main/source/noise_suppression_impl.h
new file mode 100644
index 0000000000..c9ff9b31af
--- /dev/null
+++ b/src/modules/audio_processing/main/source/noise_suppression_impl.h
@@ -0,0 +1,54 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef WEBRTC_MODULES_AUDIO_PROCESSING_MAIN_SOURCE_NOISE_SUPPRESSION_IMPL_H_
+#define WEBRTC_MODULES_AUDIO_PROCESSING_MAIN_SOURCE_NOISE_SUPPRESSION_IMPL_H_
+
+#include "audio_processing.h"
+#include "processing_component.h"
+
+namespace webrtc {
+class AudioProcessingImpl;
+class AudioBuffer;
+
+class NoiseSuppressionImpl : public NoiseSuppression,
+ public ProcessingComponent {
+ public:
+ explicit NoiseSuppressionImpl(const AudioProcessingImpl* apm);
+ virtual ~NoiseSuppressionImpl();
+
+ int ProcessCaptureAudio(AudioBuffer* audio);
+
+ // NoiseSuppression implementation.
+ virtual bool is_enabled() const;
+
+ // ProcessingComponent implementation.
+ virtual int get_version(char* version, int version_len_bytes) const;
+
+ private:
+ // NoiseSuppression implementation.
+ virtual int Enable(bool enable);
+ virtual int set_level(Level level);
+ virtual Level level() const;
+
+ // ProcessingComponent implementation.
+ virtual void* CreateHandle() const;
+ virtual int InitializeHandle(void* handle) const;
+ virtual int ConfigureHandle(void* handle) const;
+ virtual int DestroyHandle(void* handle) const;
+ virtual int num_handles_required() const;
+ virtual int GetHandleError(void* handle) const;
+
+ const AudioProcessingImpl* apm_;
+ Level level_;
+};
+} // namespace webrtc
+
+#endif // WEBRTC_MODULES_AUDIO_PROCESSING_MAIN_SOURCE_NOISE_SUPPRESSION_IMPL_H_
diff --git a/src/modules/audio_processing/main/source/processing_component.cc b/src/modules/audio_processing/main/source/processing_component.cc
new file mode 100644
index 0000000000..9ac125794c
--- /dev/null
+++ b/src/modules/audio_processing/main/source/processing_component.cc
@@ -0,0 +1,112 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "processing_component.h"
+
+#include <cassert>
+
+#include "audio_processing_impl.h"
+
+namespace webrtc {
+
+ProcessingComponent::ProcessingComponent(const AudioProcessingImpl* apm)
+ : apm_(apm),
+ initialized_(false),
+ enabled_(false),
+ num_handles_(0) {}
+
+ProcessingComponent::~ProcessingComponent() {
+ assert(initialized_ == false);
+}
+
+int ProcessingComponent::Destroy() {
+ while (!handles_.empty()) {
+ DestroyHandle(handles_.back());
+ handles_.pop_back();
+ }
+ initialized_ = false;
+
+ return apm_->kNoError;
+}
+
+int ProcessingComponent::EnableComponent(bool enable) {
+ if (enable && !enabled_) {
+ enabled_ = enable; // Must be set before Initialize() is called.
+
+ int err = Initialize();
+ if (err != apm_->kNoError) {
+ enabled_ = false;
+ return err;
+ }
+ } else {
+ enabled_ = enable;
+ }
+
+ return apm_->kNoError;
+}
+
+bool ProcessingComponent::is_component_enabled() const {
+ return enabled_;
+}
+
+void* ProcessingComponent::handle(int index) const {
+ assert(index < num_handles_);
+ return handles_[index];
+}
+
+int ProcessingComponent::num_handles() const {
+ return num_handles_;
+}
+
+int ProcessingComponent::Initialize() {
+ if (!enabled_) {
+ return apm_->kNoError;
+ }
+
+ num_handles_ = num_handles_required();
+ if (num_handles_ > static_cast<int>(handles_.size())) {
+ handles_.resize(num_handles_, NULL);
+ }
+
+ assert(static_cast<int>(handles_.size()) >= num_handles_);
+ for (int i = 0; i < num_handles_; i++) {
+ if (handles_[i] == NULL) {
+ handles_[i] = CreateHandle();
+ if (handles_[i] == NULL) {
+ return apm_->kCreationFailedError;
+ }
+ }
+
+ int err = InitializeHandle(handles_[i]);
+ if (err != apm_->kNoError) {
+ return GetHandleError(handles_[i]);
+ }
+ }
+
+ initialized_ = true;
+ return Configure();
+}
+
+int ProcessingComponent::Configure() {
+ if (!initialized_) {
+ return apm_->kNoError;
+ }
+
+ assert(static_cast<int>(handles_.size()) >= num_handles_);
+ for (int i = 0; i < num_handles_; i++) {
+ int err = ConfigureHandle(handles_[i]);
+ if (err != apm_->kNoError) {
+ return GetHandleError(handles_[i]);
+ }
+ }
+
+ return apm_->kNoError;
+}
+} // namespace webrtc
diff --git a/src/modules/audio_processing/main/source/processing_component.h b/src/modules/audio_processing/main/source/processing_component.h
new file mode 100644
index 0000000000..3d8a02bd3e
--- /dev/null
+++ b/src/modules/audio_processing/main/source/processing_component.h
@@ -0,0 +1,63 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef WEBRTC_MODULES_AUDIO_PROCESSING_MAIN_SOURCE_PROCESSING_COMPONENT_H_
+#define WEBRTC_MODULES_AUDIO_PROCESSING_MAIN_SOURCE_PROCESSING_COMPONENT_H_
+
+#include <vector>
+
+#include "audio_processing.h"
+
+namespace webrtc {
+class AudioProcessingImpl;
+
+/*template <class T>
+class ComponentHandle {
+ public:
+ ComponentHandle();
+ virtual ~ComponentHandle();
+
+ virtual int Create() = 0;
+ virtual T* ptr() const = 0;
+};*/
+
+class ProcessingComponent {
+ public:
+ explicit ProcessingComponent(const AudioProcessingImpl* apm);
+ virtual ~ProcessingComponent();
+
+ virtual int Initialize();
+ virtual int Destroy();
+ virtual int get_version(char* version, int version_len_bytes) const = 0;
+
+ protected:
+ virtual int Configure();
+ int EnableComponent(bool enable);
+ bool is_component_enabled() const;
+ void* handle(int index) const;
+ int num_handles() const;
+
+ private:
+ virtual void* CreateHandle() const = 0;
+ virtual int InitializeHandle(void* handle) const = 0;
+ virtual int ConfigureHandle(void* handle) const = 0;
+ virtual int DestroyHandle(void* handle) const = 0;
+ virtual int num_handles_required() const = 0;
+ virtual int GetHandleError(void* handle) const = 0;
+
+ const AudioProcessingImpl* apm_;
+ std::vector<void*> handles_;
+ bool initialized_;
+ bool enabled_;
+ int num_handles_;
+};
+} // namespace webrtc
+
+#endif // WEBRTC_MODULES_AUDIO_PROCESSING_MAIN_SOURCE_PROCESSING_COMPONENT_H__
diff --git a/src/modules/audio_processing/main/source/splitting_filter.cc b/src/modules/audio_processing/main/source/splitting_filter.cc
new file mode 100644
index 0000000000..1526141cc9
--- /dev/null
+++ b/src/modules/audio_processing/main/source/splitting_filter.cc
@@ -0,0 +1,33 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "splitting_filter.h"
+#include "signal_processing_library.h"
+
+namespace webrtc {
+
+void SplittingFilterAnalysis(const WebRtc_Word16* in_data,
+ WebRtc_Word16* low_band,
+ WebRtc_Word16* high_band,
+ WebRtc_Word32* filter_state1,
+ WebRtc_Word32* filter_state2)
+{
+ WebRtcSpl_AnalysisQMF(in_data, low_band, high_band, filter_state1, filter_state2);
+}
+
+void SplittingFilterSynthesis(const WebRtc_Word16* low_band,
+ const WebRtc_Word16* high_band,
+ WebRtc_Word16* out_data,
+ WebRtc_Word32* filt_state1,
+ WebRtc_Word32* filt_state2)
+{
+ WebRtcSpl_SynthesisQMF(low_band, high_band, out_data, filt_state1, filt_state2);
+}
+} // namespace webrtc
diff --git a/src/modules/audio_processing/main/source/splitting_filter.h b/src/modules/audio_processing/main/source/splitting_filter.h
new file mode 100644
index 0000000000..661bfb2f6e
--- /dev/null
+++ b/src/modules/audio_processing/main/source/splitting_filter.h
@@ -0,0 +1,63 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef WEBRTC_MODULES_AUDIO_PROCESSING_MAIN_SOURCE_SPLITTING_FILTER_H_
+#define WEBRTC_MODULES_AUDIO_PROCESSING_MAIN_SOURCE_SPLITTING_FILTER_H_
+
+#include "typedefs.h"
+#include "signal_processing_library.h"
+
+namespace webrtc {
+/*
+ * SplittingFilterbank_analysisQMF(...)
+ *
+ * Splits a super-wb signal into two subbands: 0-8 kHz and 8-16 kHz.
+ *
+ * Input:
+ * - in_data : super-wb audio signal
+ *
+ * Input & Output:
+ * - filt_state1: Filter state for first all-pass filter
+ * - filt_state2: Filter state for second all-pass filter
+ *
+ * Output:
+ * - low_band : The signal from the 0-4 kHz band
+ * - high_band : The signal from the 4-8 kHz band
+ */
+void SplittingFilterAnalysis(const WebRtc_Word16* in_data,
+ WebRtc_Word16* low_band,
+ WebRtc_Word16* high_band,
+ WebRtc_Word32* filt_state1,
+ WebRtc_Word32* filt_state2);
+
+/*
+ * SplittingFilterbank_synthesisQMF(...)
+ *
+ * Combines the two subbands (0-8 and 8-16 kHz) into a super-wb signal.
+ *
+ * Input:
+ * - low_band : The signal with the 0-8 kHz band
+ * - high_band : The signal with the 8-16 kHz band
+ *
+ * Input & Output:
+ * - filt_state1: Filter state for first all-pass filter
+ * - filt_state2: Filter state for second all-pass filter
+ *
+ * Output:
+ * - out_data : super-wb speech signal
+ */
+void SplittingFilterSynthesis(const WebRtc_Word16* low_band,
+ const WebRtc_Word16* high_band,
+ WebRtc_Word16* out_data,
+ WebRtc_Word32* filt_state1,
+ WebRtc_Word32* filt_state2);
+} // namespace webrtc
+
+#endif // WEBRTC_MODULES_AUDIO_PROCESSING_MAIN_SOURCE_SPLITTING_FILTER_H_
diff --git a/src/modules/audio_processing/main/source/voice_detection_impl.cc b/src/modules/audio_processing/main/source/voice_detection_impl.cc
new file mode 100644
index 0000000000..3eb446e911
--- /dev/null
+++ b/src/modules/audio_processing/main/source/voice_detection_impl.cc
@@ -0,0 +1,202 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "voice_detection_impl.h"
+
+#include <cassert>
+
+#include "critical_section_wrapper.h"
+#include "webrtc_vad.h"
+
+#include "audio_processing_impl.h"
+#include "audio_buffer.h"
+
+namespace webrtc {
+
+typedef VadInst Handle;
+
+namespace {
+WebRtc_Word16 MapSetting(VoiceDetection::Likelihood likelihood) {
+ switch (likelihood) {
+ case VoiceDetection::kVeryLowLikelihood:
+ return 3;
+ break;
+ case VoiceDetection::kLowLikelihood:
+ return 2;
+ break;
+ case VoiceDetection::kModerateLikelihood:
+ return 1;
+ break;
+ case VoiceDetection::kHighLikelihood:
+ return 0;
+ break;
+ default:
+ return -1;
+ }
+}
+} // namespace
+
+
+VoiceDetectionImpl::VoiceDetectionImpl(const AudioProcessingImpl* apm)
+ : ProcessingComponent(apm),
+ apm_(apm),
+ stream_has_voice_(false),
+ using_external_vad_(false),
+ likelihood_(kLowLikelihood),
+ frame_size_ms_(10),
+ frame_size_samples_(0) {}
+
+VoiceDetectionImpl::~VoiceDetectionImpl() {}
+
+int VoiceDetectionImpl::ProcessCaptureAudio(AudioBuffer* audio) {
+ if (!is_component_enabled()) {
+ return apm_->kNoError;
+ }
+
+ if (using_external_vad_) {
+ using_external_vad_ = false;
+ return apm_->kNoError;
+ }
+ assert(audio->samples_per_split_channel() <= 160);
+
+ WebRtc_Word16* mixed_data = audio->low_pass_split_data(0);
+ if (audio->num_channels() > 1) {
+ audio->CopyAndMixLowPass(1);
+ mixed_data = audio->mixed_low_pass_data(0);
+ }
+
+ // TODO(ajm): concatenate data in frame buffer here.
+
+ int vad_ret_val;
+ vad_ret_val = WebRtcVad_Process(static_cast<Handle*>(handle(0)),
+ apm_->split_sample_rate_hz(),
+ mixed_data,
+ frame_size_samples_);
+
+ if (vad_ret_val == 0) {
+ stream_has_voice_ = false;
+ } else if (vad_ret_val == 1) {
+ stream_has_voice_ = true;
+ } else {
+ return apm_->kUnspecifiedError;
+ }
+
+ return apm_->kNoError;
+}
+
+int VoiceDetectionImpl::Enable(bool enable) {
+ CriticalSectionScoped crit_scoped(*apm_->crit());
+ return EnableComponent(enable);
+}
+
+bool VoiceDetectionImpl::is_enabled() const {
+ return is_component_enabled();
+}
+
+int VoiceDetectionImpl::set_stream_has_voice(bool has_voice) {
+ using_external_vad_ = true;
+ stream_has_voice_ = has_voice;
+ return apm_->kNoError;
+}
+
+bool VoiceDetectionImpl::stream_has_voice() const {
+ // TODO(ajm): enable this assertion?
+ //assert(using_external_vad_ || is_component_enabled());
+ return stream_has_voice_;
+}
+
+int VoiceDetectionImpl::set_likelihood(VoiceDetection::Likelihood likelihood) {
+ CriticalSectionScoped crit_scoped(*apm_->crit());
+ if (MapSetting(likelihood) == -1) {
+ return apm_->kBadParameterError;
+ }
+
+ likelihood_ = likelihood;
+ return Configure();
+}
+
+VoiceDetection::Likelihood VoiceDetectionImpl::likelihood() const {
+ return likelihood_;
+}
+
+int VoiceDetectionImpl::set_frame_size_ms(int size) {
+ CriticalSectionScoped crit_scoped(*apm_->crit());
+ assert(size == 10); // TODO(ajm): remove when supported.
+ if (size != 10 &&
+ size != 20 &&
+ size != 30) {
+ return apm_->kBadParameterError;
+ }
+
+ frame_size_ms_ = size;
+
+ return Initialize();
+}
+
+int VoiceDetectionImpl::frame_size_ms() const {
+ return frame_size_ms_;
+}
+
+int VoiceDetectionImpl::Initialize() {
+ int err = ProcessingComponent::Initialize();
+ if (err != apm_->kNoError || !is_component_enabled()) {
+ return err;
+ }
+
+ using_external_vad_ = false;
+ frame_size_samples_ = frame_size_ms_ * (apm_->split_sample_rate_hz() / 1000);
+ // TODO(ajm): intialize frame buffer here.
+
+ return apm_->kNoError;
+}
+
+int VoiceDetectionImpl::get_version(char* version,
+ int version_len_bytes) const {
+ if (WebRtcVad_get_version(version, version_len_bytes) != 0) {
+ return apm_->kBadParameterError;
+ }
+
+ return apm_->kNoError;
+}
+
+void* VoiceDetectionImpl::CreateHandle() const {
+ Handle* handle = NULL;
+ if (WebRtcVad_Create(&handle) != apm_->kNoError) {
+ handle = NULL;
+ } else {
+ assert(handle != NULL);
+ }
+
+ return handle;
+}
+
+int VoiceDetectionImpl::DestroyHandle(void* handle) const {
+ return WebRtcVad_Free(static_cast<Handle*>(handle));
+}
+
+int VoiceDetectionImpl::InitializeHandle(void* handle) const {
+ return WebRtcVad_Init(static_cast<Handle*>(handle));
+}
+
+int VoiceDetectionImpl::ConfigureHandle(void* handle) const {
+ return WebRtcVad_set_mode(static_cast<Handle*>(handle),
+ MapSetting(likelihood_));
+}
+
+int VoiceDetectionImpl::num_handles_required() const {
+ return 1;
+}
+
+int VoiceDetectionImpl::GetHandleError(void* handle) const {
+ // The VAD has no get_error() function.
+ assert(handle != NULL);
+ return apm_->kUnspecifiedError;
+}
+} // namespace webrtc
diff --git a/src/modules/audio_processing/main/source/voice_detection_impl.h b/src/modules/audio_processing/main/source/voice_detection_impl.h
new file mode 100644
index 0000000000..ef212d11b9
--- /dev/null
+++ b/src/modules/audio_processing/main/source/voice_detection_impl.h
@@ -0,0 +1,63 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef WEBRTC_MODULES_AUDIO_PROCESSING_MAIN_SOURCE_VOICE_DETECTION_IMPL_H_
+#define WEBRTC_MODULES_AUDIO_PROCESSING_MAIN_SOURCE_VOICE_DETECTION_IMPL_H_
+
+#include "audio_processing.h"
+#include "processing_component.h"
+
+namespace webrtc {
+class AudioProcessingImpl;
+class AudioBuffer;
+
+class VoiceDetectionImpl : public VoiceDetection,
+ public ProcessingComponent {
+ public:
+ explicit VoiceDetectionImpl(const AudioProcessingImpl* apm);
+ virtual ~VoiceDetectionImpl();
+
+ int ProcessCaptureAudio(AudioBuffer* audio);
+
+ // VoiceDetection implementation.
+ virtual bool is_enabled() const;
+
+ // ProcessingComponent implementation.
+ virtual int Initialize();
+ virtual int get_version(char* version, int version_len_bytes) const;
+
+ private:
+ // VoiceDetection implementation.
+ virtual int Enable(bool enable);
+ virtual int set_stream_has_voice(bool has_voice);
+ virtual bool stream_has_voice() const;
+ virtual int set_likelihood(Likelihood likelihood);
+ virtual Likelihood likelihood() const;
+ virtual int set_frame_size_ms(int size);
+ virtual int frame_size_ms() const;
+
+ // ProcessingComponent implementation.
+ virtual void* CreateHandle() const;
+ virtual int InitializeHandle(void* handle) const;
+ virtual int ConfigureHandle(void* handle) const;
+ virtual int DestroyHandle(void* handle) const;
+ virtual int num_handles_required() const;
+ virtual int GetHandleError(void* handle) const;
+
+ const AudioProcessingImpl* apm_;
+ bool stream_has_voice_;
+ bool using_external_vad_;
+ Likelihood likelihood_;
+ int frame_size_ms_;
+ int frame_size_samples_;
+};
+} // namespace webrtc
+
+#endif // WEBRTC_MODULES_AUDIO_PROCESSING_MAIN_SOURCE_VOICE_DETECTION_IMPL_H_
diff --git a/src/modules/audio_processing/main/test/android/apmtest/AndroidManifest.xml b/src/modules/audio_processing/main/test/android/apmtest/AndroidManifest.xml
new file mode 100644
index 0000000000..c6063b3d76
--- /dev/null
+++ b/src/modules/audio_processing/main/test/android/apmtest/AndroidManifest.xml
@@ -0,0 +1,30 @@
+<?xml version="1.0" encoding="utf-8"?>
+<!-- BEGIN_INCLUDE(manifest) -->
+<manifest xmlns:android="http://schemas.android.com/apk/res/android"
+ package="com.example.native_activity"
+ android:versionCode="1"
+ android:versionName="1.0">
+
+ <!-- This is the platform API where NativeActivity was introduced. -->
+ <uses-sdk android:minSdkVersion="8" />
+
+ <!-- This .apk has no Java code itself, so set hasCode to false. -->
+ <application android:label="@string/app_name" android:hasCode="false" android:debuggable="true">
+
+ <!-- Our activity is the built-in NativeActivity framework class.
+ This will take care of integrating with our NDK code. -->
+ <activity android:name="android.app.NativeActivity"
+ android:label="@string/app_name"
+ android:configChanges="orientation|keyboardHidden">
+ <!-- Tell NativeActivity the name of or .so -->
+ <meta-data android:name="android.app.lib_name"
+ android:value="apmtest-activity" />
+ <intent-filter>
+ <action android:name="android.intent.action.MAIN" />
+ <category android:name="android.intent.category.LAUNCHER" />
+ </intent-filter>
+ </activity>
+ </application>
+
+</manifest>
+<!-- END_INCLUDE(manifest) -->
diff --git a/src/modules/audio_processing/main/test/android/apmtest/default.properties b/src/modules/audio_processing/main/test/android/apmtest/default.properties
new file mode 100644
index 0000000000..9a2c9f6c88
--- /dev/null
+++ b/src/modules/audio_processing/main/test/android/apmtest/default.properties
@@ -0,0 +1,11 @@
+# This file is automatically generated by Android Tools.
+# Do not modify this file -- YOUR CHANGES WILL BE ERASED!
+#
+# This file must be checked in Version Control Systems.
+#
+# To customize properties used by the Ant build system use,
+# "build.properties", and override values to adapt the script to your
+# project structure.
+
+# Project target.
+target=android-9
diff --git a/src/modules/audio_processing/main/test/android/apmtest/jni/Android.mk b/src/modules/audio_processing/main/test/android/apmtest/jni/Android.mk
new file mode 100644
index 0000000000..eaf3c9d86f
--- /dev/null
+++ b/src/modules/audio_processing/main/test/android/apmtest/jni/Android.mk
@@ -0,0 +1,26 @@
+# Copyright (C) 2010 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+LOCAL_PATH := $(call my-dir)
+
+include $(CLEAR_VARS)
+
+LOCAL_MODULE := apmtest-activity
+LOCAL_SRC_FILES := main.c
+LOCAL_LDLIBS := -llog -landroid -lEGL -lGLESv1_CM
+LOCAL_STATIC_LIBRARIES := android_native_app_glue
+
+include $(BUILD_SHARED_LIBRARY)
+
+$(call import-module,android/native_app_glue)
diff --git a/src/modules/audio_processing/main/test/android/apmtest/jni/Application.mk b/src/modules/audio_processing/main/test/android/apmtest/jni/Application.mk
new file mode 100644
index 0000000000..22d188e595
--- /dev/null
+++ b/src/modules/audio_processing/main/test/android/apmtest/jni/Application.mk
@@ -0,0 +1 @@
+APP_PLATFORM := android-9
diff --git a/src/modules/audio_processing/main/test/android/apmtest/jni/main.c b/src/modules/audio_processing/main/test/android/apmtest/jni/main.c
new file mode 100644
index 0000000000..2e19635683
--- /dev/null
+++ b/src/modules/audio_processing/main/test/android/apmtest/jni/main.c
@@ -0,0 +1,307 @@
+/*
+ * Copyright (C) 2010 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+//BEGIN_INCLUDE(all)
+#include <jni.h>
+#include <errno.h>
+
+#include <EGL/egl.h>
+#include <GLES/gl.h>
+
+#include <android/sensor.h>
+#include <android/log.h>
+#include <android_native_app_glue.h>
+
+#define LOGI(...) ((void)__android_log_print(ANDROID_LOG_INFO, "native-activity", __VA_ARGS__))
+#define LOGW(...) ((void)__android_log_print(ANDROID_LOG_WARN, "native-activity", __VA_ARGS__))
+
+/**
+ * Our saved state data.
+ */
+struct saved_state {
+ float angle;
+ int32_t x;
+ int32_t y;
+};
+
+/**
+ * Shared state for our app.
+ */
+struct engine {
+ struct android_app* app;
+
+ ASensorManager* sensorManager;
+ const ASensor* accelerometerSensor;
+ ASensorEventQueue* sensorEventQueue;
+
+ int animating;
+ EGLDisplay display;
+ EGLSurface surface;
+ EGLContext context;
+ int32_t width;
+ int32_t height;
+ struct saved_state state;
+};
+
+/**
+ * Initialize an EGL context for the current display.
+ */
+static int engine_init_display(struct engine* engine) {
+ // initialize OpenGL ES and EGL
+
+ /*
+ * Here specify the attributes of the desired configuration.
+ * Below, we select an EGLConfig with at least 8 bits per color
+ * component compatible with on-screen windows
+ */
+ const EGLint attribs[] = {
+ EGL_SURFACE_TYPE, EGL_WINDOW_BIT,
+ EGL_BLUE_SIZE, 8,
+ EGL_GREEN_SIZE, 8,
+ EGL_RED_SIZE, 8,
+ EGL_NONE
+ };
+ EGLint w, h, dummy, format;
+ EGLint numConfigs;
+ EGLConfig config;
+ EGLSurface surface;
+ EGLContext context;
+
+ EGLDisplay display = eglGetDisplay(EGL_DEFAULT_DISPLAY);
+
+ eglInitialize(display, 0, 0);
+
+ /* Here, the application chooses the configuration it desires. In this
+ * sample, we have a very simplified selection process, where we pick
+ * the first EGLConfig that matches our criteria */
+ eglChooseConfig(display, attribs, &config, 1, &numConfigs);
+
+ /* EGL_NATIVE_VISUAL_ID is an attribute of the EGLConfig that is
+ * guaranteed to be accepted by ANativeWindow_setBuffersGeometry().
+ * As soon as we picked a EGLConfig, we can safely reconfigure the
+ * ANativeWindow buffers to match, using EGL_NATIVE_VISUAL_ID. */
+ eglGetConfigAttrib(display, config, EGL_NATIVE_VISUAL_ID, &format);
+
+ ANativeWindow_setBuffersGeometry(engine->app->window, 0, 0, format);
+
+ surface = eglCreateWindowSurface(display, config, engine->app->window, NULL);
+ context = eglCreateContext(display, config, NULL, NULL);
+
+ if (eglMakeCurrent(display, surface, surface, context) == EGL_FALSE) {
+ LOGW("Unable to eglMakeCurrent");
+ return -1;
+ }
+
+ eglQuerySurface(display, surface, EGL_WIDTH, &w);
+ eglQuerySurface(display, surface, EGL_HEIGHT, &h);
+
+ engine->display = display;
+ engine->context = context;
+ engine->surface = surface;
+ engine->width = w;
+ engine->height = h;
+ engine->state.angle = 0;
+
+ // Initialize GL state.
+ glHint(GL_PERSPECTIVE_CORRECTION_HINT, GL_FASTEST);
+ glEnable(GL_CULL_FACE);
+ glShadeModel(GL_SMOOTH);
+ glDisable(GL_DEPTH_TEST);
+
+ return 0;
+}
+
+/**
+ * Just the current frame in the display.
+ */
+static void engine_draw_frame(struct engine* engine) {
+ if (engine->display == NULL) {
+ // No display.
+ return;
+ }
+
+ // Just fill the screen with a color.
+ glClearColor(((float)engine->state.x)/engine->width, engine->state.angle,
+ ((float)engine->state.y)/engine->height, 1);
+ glClear(GL_COLOR_BUFFER_BIT);
+
+ eglSwapBuffers(engine->display, engine->surface);
+}
+
+/**
+ * Tear down the EGL context currently associated with the display.
+ */
+static void engine_term_display(struct engine* engine) {
+ if (engine->display != EGL_NO_DISPLAY) {
+ eglMakeCurrent(engine->display, EGL_NO_SURFACE, EGL_NO_SURFACE, EGL_NO_CONTEXT);
+ if (engine->context != EGL_NO_CONTEXT) {
+ eglDestroyContext(engine->display, engine->context);
+ }
+ if (engine->surface != EGL_NO_SURFACE) {
+ eglDestroySurface(engine->display, engine->surface);
+ }
+ eglTerminate(engine->display);
+ }
+ engine->animating = 0;
+ engine->display = EGL_NO_DISPLAY;
+ engine->context = EGL_NO_CONTEXT;
+ engine->surface = EGL_NO_SURFACE;
+}
+
+/**
+ * Process the next input event.
+ */
+static int32_t engine_handle_input(struct android_app* app, AInputEvent* event) {
+ struct engine* engine = (struct engine*)app->userData;
+ if (AInputEvent_getType(event) == AINPUT_EVENT_TYPE_MOTION) {
+ engine->animating = 1;
+ engine->state.x = AMotionEvent_getX(event, 0);
+ engine->state.y = AMotionEvent_getY(event, 0);
+ return 1;
+ }
+ return 0;
+}
+
+/**
+ * Process the next main command.
+ */
+static void engine_handle_cmd(struct android_app* app, int32_t cmd) {
+ struct engine* engine = (struct engine*)app->userData;
+ switch (cmd) {
+ case APP_CMD_SAVE_STATE:
+ // The system has asked us to save our current state. Do so.
+ engine->app->savedState = malloc(sizeof(struct saved_state));
+ *((struct saved_state*)engine->app->savedState) = engine->state;
+ engine->app->savedStateSize = sizeof(struct saved_state);
+ break;
+ case APP_CMD_INIT_WINDOW:
+ // The window is being shown, get it ready.
+ if (engine->app->window != NULL) {
+ engine_init_display(engine);
+ engine_draw_frame(engine);
+ }
+ break;
+ case APP_CMD_TERM_WINDOW:
+ // The window is being hidden or closed, clean it up.
+ engine_term_display(engine);
+ break;
+ case APP_CMD_GAINED_FOCUS:
+ // When our app gains focus, we start monitoring the accelerometer.
+ if (engine->accelerometerSensor != NULL) {
+ ASensorEventQueue_enableSensor(engine->sensorEventQueue,
+ engine->accelerometerSensor);
+ // We'd like to get 60 events per second (in us).
+ ASensorEventQueue_setEventRate(engine->sensorEventQueue,
+ engine->accelerometerSensor, (1000L/60)*1000);
+ }
+ break;
+ case APP_CMD_LOST_FOCUS:
+ // When our app loses focus, we stop monitoring the accelerometer.
+ // This is to avoid consuming battery while not being used.
+ if (engine->accelerometerSensor != NULL) {
+ ASensorEventQueue_disableSensor(engine->sensorEventQueue,
+ engine->accelerometerSensor);
+ }
+ // Also stop animating.
+ engine->animating = 0;
+ engine_draw_frame(engine);
+ break;
+ }
+}
+
+/**
+ * This is the main entry point of a native application that is using
+ * android_native_app_glue. It runs in its own thread, with its own
+ * event loop for receiving input events and doing other things.
+ */
+void android_main(struct android_app* state) {
+ struct engine engine;
+
+ // Make sure glue isn't stripped.
+ app_dummy();
+
+ memset(&engine, 0, sizeof(engine));
+ state->userData = &engine;
+ state->onAppCmd = engine_handle_cmd;
+ state->onInputEvent = engine_handle_input;
+ engine.app = state;
+
+ // Prepare to monitor accelerometer
+ engine.sensorManager = ASensorManager_getInstance();
+ engine.accelerometerSensor = ASensorManager_getDefaultSensor(engine.sensorManager,
+ ASENSOR_TYPE_ACCELEROMETER);
+ engine.sensorEventQueue = ASensorManager_createEventQueue(engine.sensorManager,
+ state->looper, LOOPER_ID_USER, NULL, NULL);
+
+ if (state->savedState != NULL) {
+ // We are starting with a previous saved state; restore from it.
+ engine.state = *(struct saved_state*)state->savedState;
+ }
+
+ // loop waiting for stuff to do.
+
+ while (1) {
+ // Read all pending events.
+ int ident;
+ int events;
+ struct android_poll_source* source;
+
+ // If not animating, we will block forever waiting for events.
+ // If animating, we loop until all events are read, then continue
+ // to draw the next frame of animation.
+ while ((ident=ALooper_pollAll(engine.animating ? 0 : -1, NULL, &events,
+ (void**)&source)) >= 0) {
+
+ // Process this event.
+ if (source != NULL) {
+ source->process(state, source);
+ }
+
+ // If a sensor has data, process it now.
+ if (ident == LOOPER_ID_USER) {
+ if (engine.accelerometerSensor != NULL) {
+ ASensorEvent event;
+ while (ASensorEventQueue_getEvents(engine.sensorEventQueue,
+ &event, 1) > 0) {
+ LOGI("accelerometer: x=%f y=%f z=%f",
+ event.acceleration.x, event.acceleration.y,
+ event.acceleration.z);
+ }
+ }
+ }
+
+ // Check if we are exiting.
+ if (state->destroyRequested != 0) {
+ engine_term_display(&engine);
+ return;
+ }
+ }
+
+ if (engine.animating) {
+ // Done with events; draw next animation frame.
+ engine.state.angle += .01f;
+ if (engine.state.angle > 1) {
+ engine.state.angle = 0;
+ }
+
+ // Drawing is throttled to the screen update rate, so there
+ // is no need to do timing here.
+ engine_draw_frame(&engine);
+ }
+ }
+}
+//END_INCLUDE(all)
diff --git a/src/modules/audio_processing/main/test/android/apmtest/res/values/strings.xml b/src/modules/audio_processing/main/test/android/apmtest/res/values/strings.xml
new file mode 100644
index 0000000000..d0bd0f3051
--- /dev/null
+++ b/src/modules/audio_processing/main/test/android/apmtest/res/values/strings.xml
@@ -0,0 +1,4 @@
+<?xml version="1.0" encoding="utf-8"?>
+<resources>
+ <string name="app_name">apmtest</string>
+</resources>
diff --git a/src/modules/audio_processing/main/test/process_test/Android.mk b/src/modules/audio_processing/main/test/process_test/Android.mk
new file mode 100644
index 0000000000..23080aab23
--- /dev/null
+++ b/src/modules/audio_processing/main/test/process_test/Android.mk
@@ -0,0 +1,48 @@
+# Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+#
+# Use of this source code is governed by a BSD-style license
+# that can be found in the LICENSE file in the root of the source
+# tree. An additional intellectual property rights grant can be found
+# in the file PATENTS. All contributing project authors may
+# be found in the AUTHORS file in the root of the source tree.
+
+LOCAL_PATH:= $(call my-dir)
+
+# apm test app
+
+include $(CLEAR_VARS)
+
+LOCAL_MODULE_TAGS := tests
+LOCAL_CPP_EXTENSION := .cc
+LOCAL_SRC_FILES:= \
+ process_test.cc
+
+# Flags passed to both C and C++ files.
+LOCAL_CFLAGS := \
+ '-DWEBRTC_TARGET_PC' \
+ '-DWEBRTC_LINUX' \
+ '-DWEBRTC_THREAD_RR' \
+ '-DWEBRTC_ANDROID' \
+ '-DANDROID'
+
+LOCAL_CPPFLAGS :=
+LOCAL_LDFLAGS :=
+LOCAL_C_INCLUDES := \
+ external/gtest/include \
+ $(LOCAL_PATH)/../../../../../system_wrappers/interface \
+ $(LOCAL_PATH)/../../interface \
+ $(LOCAL_PATH)/../../../../interface \
+ $(LOCAL_PATH)/../../../../..
+
+LOCAL_STATIC_LIBRARIES := \
+ libgtest
+
+LOCAL_SHARED_LIBRARIES := \
+ libutils \
+ libstlport \
+ libwebrtc_audio_preprocessing
+
+LOCAL_MODULE:= webrtc_apm_process_test
+
+include external/stlport/libstlport.mk
+include $(BUILD_EXECUTABLE)
diff --git a/src/modules/audio_processing/main/test/process_test/apmtest.m b/src/modules/audio_processing/main/test/process_test/apmtest.m
new file mode 100644
index 0000000000..6152bb5a9a
--- /dev/null
+++ b/src/modules/audio_processing/main/test/process_test/apmtest.m
@@ -0,0 +1,360 @@
+function apmtest(task, testname, casenumber, legacy)
+%APMTEST is a tool to process APM file sets and easily display the output.
+% APMTEST(TASK, TESTNAME, CASENUMBER) performs one of several TASKs:
+% 'test' Processes the files to produce test output.
+% 'list' Prints a list of cases in the test set, preceded by their
+% CASENUMBERs.
+% 'show' Uses spclab to show the test case specified by the
+% CASENUMBER parameter.
+%
+% using a set of test files determined by TESTNAME:
+% 'all' All tests.
+% 'apm' The standard APM test set (default).
+% 'apmm' The mobile APM test set.
+% 'aec' The AEC test set.
+% 'aecm' The AECM test set.
+% 'agc' The AGC test set.
+% 'ns' The NS test set.
+% 'vad' The VAD test set.
+%
+% CASENUMBER can be used to select a single test case. Omit CASENUMBER,
+% or set to zero, to use all test cases.
+%
+
+if nargin < 4
+ % Set to true to run old VQE recordings.
+ legacy = false;
+end
+
+if nargin < 3
+ casenumber = 0;
+end
+
+if nargin < 2
+ task = 'test';
+end
+
+if nargin < 1
+ testname = 'all';
+end
+
+if ~strcmp(task, 'test') && ~strcmp(task, 'list') && ~strcmp(task, 'show')
+ error(['TASK ' task ' is not recognized']);
+end
+
+if casenumber == 0 && strcmp(task, 'show')
+ error(['CASENUMBER must be specified for TASK ' task]);
+end
+
+filepath = 'data/';
+inpath = [filepath 'input/'];
+outpath = [filepath 'output/'];
+refpath = [filepath 'reference/'];
+
+% Temporary
+if legacy
+ refpath = [filepath 'output/'];
+ outpath = [filepath 'reference/'];
+end
+
+if strcmp(testname, 'all')
+ tests = {'apm','apmm','aec','aecm','agc','ns','vad'};
+else
+ tests = {testname};
+end
+
+if legacy
+ progname = '/usr/local/google/p4/dev/depot/test';
+else
+ progname = './process_test';
+end
+
+global farFile;
+global nearFile;
+global eventFile;
+global delayFile;
+global driftFile;
+
+if legacy
+ farFile = 'vqeFar.pcm';
+ nearFile = 'vqeNear.pcm';
+ eventFile = 'vqeEvent.dat';
+ delayFile = 'vqeBuf.dat';
+ driftFile = 'vqeDrift.dat';
+else
+ farFile = 'apm_far.pcm';
+ nearFile = 'apm_near.pcm';
+ eventFile = 'apm_event.dat';
+ delayFile = 'apm_delay.dat';
+ driftFile = 'apm_drift.dat';
+end
+
+simulateMode = false;
+nErr = 0;
+nCases = 0;
+for i=1:length(tests)
+ simulateMode = false;
+
+ if strcmp(tests{i}, 'apm')
+ testdir = ['apm/'];
+ outfile = ['out'];
+ if legacy
+ opt = ['-ec 1 -agc 2 -nc 2 -vad 3'];
+ else
+ opt = ['--no_progress -hpf' ...
+ ' -aec --drift_compensation -agc --fixed_digital' ...
+ ' -ns --ns_moderate -vad'];
+ end
+
+ elseif strcmp(tests{i}, 'apm-swb')
+ simulateMode = true;
+ testdir = ['apm-swb/'];
+ outfile = ['out'];
+ if legacy
+ opt = ['-fs 32000 -ec 1 -agc 2 -nc 2'];
+ else
+ opt = ['--no_progress -fs 32000 -hpf' ...
+ ' -aec --drift_compensation -agc --adaptive_digital' ...
+ ' -ns --ns_moderate -vad'];
+ end
+ elseif strcmp(tests{i}, 'apmm')
+ testdir = ['apmm/'];
+ outfile = ['out'];
+ opt = ['-aec --drift_compensation -agc --fixed_digital -hpf -ns ' ...
+ '--ns_moderate'];
+
+ else
+ error(['TESTNAME ' tests{i} ' is not recognized']);
+ end
+
+ inpath = [inpath testdir];
+ outpath = [outpath testdir];
+ refpath = [refpath testdir];
+
+ if ~exist(inpath,'dir')
+ error(['Input directory ' inpath ' does not exist']);
+ end
+
+ if ~exist(refpath,'dir')
+ warning(['Reference directory ' refpath ' does not exist']);
+ end
+
+ [status, errMsg] = mkdir(outpath);
+ if (status == 0)
+ error(errMsg);
+ end
+
+ [nErr, nCases] = recurseDir(inpath, outpath, refpath, outfile, ...
+ progname, opt, simulateMode, nErr, nCases, task, casenumber, legacy);
+
+ if strcmp(task, 'test') || strcmp(task, 'show')
+ system(['rm ' farFile]);
+ system(['rm ' nearFile]);
+ if simulateMode == false
+ system(['rm ' eventFile]);
+ system(['rm ' delayFile]);
+ system(['rm ' driftFile]);
+ end
+ end
+end
+
+if ~strcmp(task, 'list')
+ if nErr == 0
+ fprintf(1, '\nAll files are bit-exact to reference\n', nErr);
+ else
+ fprintf(1, '\n%d files are NOT bit-exact to reference\n', nErr);
+ end
+end
+
+
+function [nErrOut, nCases] = recurseDir(inpath, outpath, refpath, ...
+ outfile, progname, opt, simulateMode, nErr, nCases, task, casenumber, ...
+ legacy)
+
+global farFile;
+global nearFile;
+global eventFile;
+global delayFile;
+global driftFile;
+
+dirs = dir(inpath);
+nDirs = 0;
+nErrOut = nErr;
+for i=3:length(dirs) % skip . and ..
+ nDirs = nDirs + dirs(i).isdir;
+end
+
+
+if nDirs == 0
+ nCases = nCases + 1;
+
+ if casenumber == nCases || casenumber == 0
+
+ if strcmp(task, 'list')
+ fprintf([num2str(nCases) '. ' outfile '\n'])
+ else
+ vadoutfile = ['vad_' outfile '.dat'];
+ outfile = [outfile '.pcm'];
+
+ % Check for VAD test
+ vadTest = 0;
+ if ~isempty(findstr(opt, '-vad'))
+ vadTest = 1;
+ if legacy
+ opt = [opt ' ' outpath vadoutfile];
+ else
+ opt = [opt ' --vad_out_file ' outpath vadoutfile];
+ end
+ end
+
+ if exist([inpath 'vqeFar.pcm'])
+ system(['ln -s -f ' inpath 'vqeFar.pcm ' farFile]);
+ elseif exist([inpath 'apm_far.pcm'])
+ system(['ln -s -f ' inpath 'apm_far.pcm ' farFile]);
+ end
+
+ if exist([inpath 'vqeNear.pcm'])
+ system(['ln -s -f ' inpath 'vqeNear.pcm ' nearFile]);
+ elseif exist([inpath 'apm_near.pcm'])
+ system(['ln -s -f ' inpath 'apm_near.pcm ' nearFile]);
+ end
+
+ if exist([inpath 'vqeEvent.dat'])
+ system(['ln -s -f ' inpath 'vqeEvent.dat ' eventFile]);
+ elseif exist([inpath 'apm_event.day'])
+ system(['ln -s -f ' inpath 'apm_event.dat ' eventFile]);
+ end
+
+ if exist([inpath 'vqeBuf.dat'])
+ system(['ln -s -f ' inpath 'vqeBuf.dat ' delayFile]);
+ elseif exist([inpath 'apm_delay.day'])
+ system(['ln -s -f ' inpath 'apm_delay.dat ' delayFile]);
+ end
+
+ if exist([inpath 'vqeSkew.dat'])
+ system(['ln -s -f ' inpath 'vqeSkew.dat ' driftFile]);
+ elseif exist([inpath 'vqeDrift.dat'])
+ system(['ln -s -f ' inpath 'vqeDrift.dat ' driftFile]);
+ elseif exist([inpath 'apm_drift.dat'])
+ system(['ln -s -f ' inpath 'apm_drift.dat ' driftFile]);
+ end
+
+ if simulateMode == false
+ command = [progname ' -o ' outpath outfile ' ' opt];
+ else
+ if legacy
+ inputCmd = [' -in ' nearFile];
+ else
+ inputCmd = [' -i ' nearFile];
+ end
+
+ if exist([farFile])
+ if legacy
+ inputCmd = [' -if ' farFile inputCmd];
+ else
+ inputCmd = [' -ir ' farFile inputCmd];
+ end
+ end
+ command = [progname inputCmd ' -o ' outpath outfile ' ' opt];
+ end
+ % This prevents MATLAB from using its own C libraries.
+ shellcmd = ['bash -c "unset LD_LIBRARY_PATH;'];
+ fprintf([command '\n']);
+ [status, result] = system([shellcmd command '"']);
+ fprintf(result);
+
+ fprintf(['Reference file: ' refpath outfile '\n']);
+
+ if vadTest == 1
+ equal_to_ref = are_files_equal([outpath vadoutfile], ...
+ [refpath vadoutfile], ...
+ 'int8');
+ if ~equal_to_ref
+ nErr = nErr + 1;
+ end
+ end
+
+ [equal_to_ref, diffvector] = are_files_equal([outpath outfile], ...
+ [refpath outfile], ...
+ 'int16');
+ if ~equal_to_ref
+ nErr = nErr + 1;
+ end
+
+ if strcmp(task, 'show')
+ % Assume the last init gives the sample rate of interest.
+ str_idx = strfind(result, 'Sample rate:');
+ fs = str2num(result(str_idx(end) + 13:str_idx(end) + 17));
+ fprintf('Using %d Hz\n', fs);
+
+ if exist([farFile])
+ spclab(fs, farFile, nearFile, [refpath outfile], ...
+ [outpath outfile], diffvector);
+ %spclab(fs, diffvector);
+ else
+ spclab(fs, nearFile, [refpath outfile], [outpath outfile], ...
+ diffvector);
+ %spclab(fs, diffvector);
+ end
+
+ if vadTest == 1
+ spclab([refpath vadoutfile], [outpath vadoutfile]);
+ end
+ end
+ end
+ end
+else
+
+ for i=3:length(dirs)
+ if dirs(i).isdir
+ [nErr, nCases] = recurseDir([inpath dirs(i).name '/'], outpath, ...
+ refpath,[outfile '_' dirs(i).name], progname, opt, ...
+ simulateMode, nErr, nCases, task, casenumber, legacy);
+ end
+ end
+end
+nErrOut = nErr;
+
+function [are_equal, diffvector] = ...
+ are_files_equal(newfile, reffile, precision, diffvector)
+
+are_equal = false;
+diffvector = 0;
+if ~exist(newfile,'file')
+ warning(['Output file ' newfile ' does not exist']);
+ return
+end
+
+if ~exist(reffile,'file')
+ warning(['Reference file ' reffile ' does not exist']);
+ return
+end
+
+fid = fopen(newfile,'rb');
+new = fread(fid,inf,precision);
+fclose(fid);
+
+fid = fopen(reffile,'rb');
+ref = fread(fid,inf,precision);
+fclose(fid);
+
+if length(new) ~= length(ref)
+ warning('Reference is not the same length as output');
+ minlength = min(length(new), length(ref));
+ new = new(1:minlength);
+ ref = ref(1:minlength);
+end
+diffvector = new - ref;
+
+if isequal(new, ref)
+ fprintf([newfile ' is bit-exact to reference\n']);
+ are_equal = true;
+else
+ if isempty(new)
+ warning([newfile ' is empty']);
+ return
+ end
+ snr = snrseg(new,ref,80);
+ fprintf('\n');
+ are_equal = false;
+end
diff --git a/src/modules/audio_processing/main/test/process_test/process_test.cc b/src/modules/audio_processing/main/test/process_test/process_test.cc
new file mode 100644
index 0000000000..c62345fcf0
--- /dev/null
+++ b/src/modules/audio_processing/main/test/process_test/process_test.cc
@@ -0,0 +1,628 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <stdio.h>
+#include <string.h>
+#ifdef WEBRTC_ANDROID
+#include <sys/stat.h>
+#endif
+
+#include "tick_util.h"
+#include "gtest/gtest.h"
+#include "module_common_types.h"
+
+#include "audio_processing.h"
+
+#include "cpu_features_wrapper.h"
+
+using webrtc::AudioFrame;
+using webrtc::TickInterval;
+using webrtc::TickTime;
+
+using webrtc::AudioProcessing;
+using webrtc::GainControl;
+using webrtc::NoiseSuppression;
+
+void usage() {
+ printf(
+ "Usage: process_test [options] [-ir REVERSE_FILE] [-i PRIMARY_FILE]\n");
+ printf(
+ " [-o OUT_FILE]\n");
+ printf(
+ "process_test is a test application for AudioProcessing.\n\n"
+ "When -ir or -i is specified the files will be processed directly in a\n"
+ "simulation mode. Otherwise the full set of test files is expected to be\n"
+ "present in the working directory.\n");
+ printf("\n");
+ printf("Options\n");
+ printf("General configuration:\n");
+ printf(" -fs SAMPLE_RATE_HZ\n");
+ printf(" -ch CHANNELS_IN CHANNELS_OUT\n");
+ printf(" -rch REVERSE_CHANNELS\n");
+ printf("\n");
+ printf("Component configuration:\n");
+ printf(
+ "All components are disabled by default. Each block below begins with a\n"
+ "flag to enable the component with default settings. The subsequent flags\n"
+ "in the block are used to provide configuration settings.\n");
+ printf("\n -aec Echo cancellation\n");
+ printf(" --drift_compensation\n");
+ printf(" --no_drift_compensation\n");
+ printf("\n -aecm Echo control mobile\n");
+ printf("\n -agc Gain control\n");
+ printf(" --analog\n");
+ printf(" --adaptive_digital\n");
+ printf(" --fixed_digital\n");
+ printf(" --target_level LEVEL\n");
+ printf(" --compression_gain GAIN\n");
+ printf(" --limiter\n");
+ printf(" --no_limiter\n");
+ printf("\n -hpf High pass filter\n");
+ printf("\n -ns Noise suppression\n");
+ printf(" --ns_low\n");
+ printf(" --ns_moderate\n");
+ printf(" --ns_high\n");
+ printf(" --ns_very_high\n");
+ printf("\n -vad Voice activity detection\n");
+ printf(" --vad_out_file FILE");
+ printf("\n");
+ printf("Modifiers:\n");
+ printf(" --perf Measure performance.\n");
+ printf(" --quiet Suppress text output.\n");
+ printf(" --no_progress Suppress progress.\n");
+ printf(" --version Print version information and exit.\n");
+}
+
+// void function for gtest.
+void void_main(int argc, char* argv[]) {
+ if (argc > 1 && strcmp(argv[1], "--help") == 0) {
+ usage();
+ return;
+ }
+
+ if (argc < 2) {
+ printf("Did you mean to run without arguments?\n");
+ printf("Try `process_test --help' for more information.\n\n");
+ }
+
+ AudioProcessing* apm = AudioProcessing::Create(0);
+ ASSERT_TRUE(apm != NULL);
+
+ WebRtc_Word8 version[1024];
+ WebRtc_UWord32 version_bytes_remaining = sizeof(version);
+ WebRtc_UWord32 version_position = 0;
+
+ const char* far_filename = NULL;
+ const char* near_filename = NULL;
+ const char* out_filename = NULL;
+ const char* vad_out_filename = NULL;
+
+ int32_t sample_rate_hz = 16000;
+ int32_t device_sample_rate_hz = 16000;
+
+ int num_capture_input_channels = 1;
+ int num_capture_output_channels = 1;
+ int num_render_channels = 1;
+
+ int samples_per_channel = sample_rate_hz / 100;
+
+ bool simulating = false;
+ bool perf_testing = false;
+ bool verbose = true;
+ bool progress = true;
+ //bool interleaved = true;
+
+ for (int i = 1; i < argc; i++) {
+ if (strcmp(argv[i], "-ir") == 0) {
+ i++;
+ ASSERT_LT(i, argc) << "Specify filename after -ir";
+ far_filename = argv[i];
+ simulating = true;
+
+ } else if (strcmp(argv[i], "-i") == 0) {
+ i++;
+ ASSERT_LT(i, argc) << "Specify filename after -i";
+ near_filename = argv[i];
+ simulating = true;
+
+ } else if (strcmp(argv[i], "-o") == 0) {
+ i++;
+ ASSERT_LT(i, argc) << "Specify filename after -o";
+ out_filename = argv[i];
+
+ } else if (strcmp(argv[i], "-fs") == 0) {
+ i++;
+ ASSERT_LT(i, argc) << "Specify sample rate after -fs";
+ ASSERT_EQ(1, sscanf(argv[i], "%d", &sample_rate_hz));
+ samples_per_channel = sample_rate_hz / 100;
+
+ ASSERT_EQ(apm->kNoError,
+ apm->set_sample_rate_hz(sample_rate_hz));
+
+ } else if (strcmp(argv[i], "-ch") == 0) {
+ i++;
+ ASSERT_LT(i + 1, argc) << "Specify number of channels after -ch";
+ ASSERT_EQ(1, sscanf(argv[i], "%d", &num_capture_input_channels));
+ i++;
+ ASSERT_EQ(1, sscanf(argv[i], "%d", &num_capture_output_channels));
+
+ ASSERT_EQ(apm->kNoError,
+ apm->set_num_channels(num_capture_input_channels,
+ num_capture_output_channels));
+
+ } else if (strcmp(argv[i], "-rch") == 0) {
+ i++;
+ ASSERT_LT(i, argc) << "Specify number of channels after -rch";
+ ASSERT_EQ(1, sscanf(argv[i], "%d", &num_render_channels));
+
+ ASSERT_EQ(apm->kNoError,
+ apm->set_num_reverse_channels(num_render_channels));
+
+ } else if (strcmp(argv[i], "-aec") == 0) {
+ ASSERT_EQ(apm->kNoError, apm->echo_cancellation()->Enable(true));
+
+ } else if (strcmp(argv[i], "-noasm") == 0) {
+ WebRtc_GetCPUInfo = WebRtc_GetCPUInfoNoASM;
+
+ } else if (strcmp(argv[i], "--drift_compensation") == 0) {
+ ASSERT_EQ(apm->kNoError, apm->echo_cancellation()->Enable(true));
+ // TODO(ajm): this is enabled in the VQE test app by default. Investigate
+ // why it can give better performance despite passing zeros.
+ ASSERT_EQ(apm->kNoError,
+ apm->echo_cancellation()->enable_drift_compensation(true));
+ } else if (strcmp(argv[i], "--no_drift_compensation") == 0) {
+ ASSERT_EQ(apm->kNoError, apm->echo_cancellation()->Enable(true));
+ ASSERT_EQ(apm->kNoError,
+ apm->echo_cancellation()->enable_drift_compensation(false));
+
+ } else if (strcmp(argv[i], "-aecm") == 0) {
+ ASSERT_EQ(apm->kNoError, apm->echo_control_mobile()->Enable(true));
+
+ } else if (strcmp(argv[i], "-agc") == 0) {
+ ASSERT_EQ(apm->kNoError, apm->gain_control()->Enable(true));
+
+ } else if (strcmp(argv[i], "--analog") == 0) {
+ ASSERT_EQ(apm->kNoError, apm->gain_control()->Enable(true));
+ ASSERT_EQ(apm->kNoError,
+ apm->gain_control()->set_mode(GainControl::kAdaptiveAnalog));
+
+ } else if (strcmp(argv[i], "--adaptive_digital") == 0) {
+ ASSERT_EQ(apm->kNoError, apm->gain_control()->Enable(true));
+ ASSERT_EQ(apm->kNoError,
+ apm->gain_control()->set_mode(GainControl::kAdaptiveDigital));
+
+ } else if (strcmp(argv[i], "--fixed_digital") == 0) {
+ ASSERT_EQ(apm->kNoError, apm->gain_control()->Enable(true));
+ ASSERT_EQ(apm->kNoError,
+ apm->gain_control()->set_mode(GainControl::kFixedDigital));
+
+ } else if (strcmp(argv[i], "--target_level") == 0) {
+ i++;
+ int level;
+ ASSERT_EQ(1, sscanf(argv[i], "%d", &level));
+
+ ASSERT_EQ(apm->kNoError, apm->gain_control()->Enable(true));
+ ASSERT_EQ(apm->kNoError,
+ apm->gain_control()->set_target_level_dbfs(level));
+
+ } else if (strcmp(argv[i], "--compression_gain") == 0) {
+ i++;
+ int gain;
+ ASSERT_EQ(1, sscanf(argv[i], "%d", &gain));
+
+ ASSERT_EQ(apm->kNoError, apm->gain_control()->Enable(true));
+ ASSERT_EQ(apm->kNoError,
+ apm->gain_control()->set_compression_gain_db(gain));
+
+ } else if (strcmp(argv[i], "--limiter") == 0) {
+ ASSERT_EQ(apm->kNoError, apm->gain_control()->Enable(true));
+ ASSERT_EQ(apm->kNoError,
+ apm->gain_control()->enable_limiter(true));
+
+ } else if (strcmp(argv[i], "--no_limiter") == 0) {
+ ASSERT_EQ(apm->kNoError, apm->gain_control()->Enable(true));
+ ASSERT_EQ(apm->kNoError,
+ apm->gain_control()->enable_limiter(false));
+
+ } else if (strcmp(argv[i], "-hpf") == 0) {
+ ASSERT_EQ(apm->kNoError, apm->high_pass_filter()->Enable(true));
+
+ } else if (strcmp(argv[i], "-ns") == 0) {
+ ASSERT_EQ(apm->kNoError, apm->noise_suppression()->Enable(true));
+
+ } else if (strcmp(argv[i], "--ns_low") == 0) {
+ ASSERT_EQ(apm->kNoError, apm->noise_suppression()->Enable(true));
+ ASSERT_EQ(apm->kNoError,
+ apm->noise_suppression()->set_level(NoiseSuppression::kLow));
+
+ } else if (strcmp(argv[i], "--ns_moderate") == 0) {
+ ASSERT_EQ(apm->kNoError, apm->noise_suppression()->Enable(true));
+ ASSERT_EQ(apm->kNoError,
+ apm->noise_suppression()->set_level(NoiseSuppression::kModerate));
+
+ } else if (strcmp(argv[i], "--ns_high") == 0) {
+ ASSERT_EQ(apm->kNoError, apm->noise_suppression()->Enable(true));
+ ASSERT_EQ(apm->kNoError,
+ apm->noise_suppression()->set_level(NoiseSuppression::kHigh));
+
+ } else if (strcmp(argv[i], "--ns_very_high") == 0) {
+ ASSERT_EQ(apm->kNoError, apm->noise_suppression()->Enable(true));
+ ASSERT_EQ(apm->kNoError,
+ apm->noise_suppression()->set_level(NoiseSuppression::kVeryHigh));
+
+ } else if (strcmp(argv[i], "-vad") == 0) {
+ ASSERT_EQ(apm->kNoError, apm->voice_detection()->Enable(true));
+
+ } else if (strcmp(argv[i], "--vad_out_file") == 0) {
+ i++;
+ ASSERT_LT(i, argc) << "Specify filename after --vad_out_file";
+ vad_out_filename = argv[i];
+
+ } else if (strcmp(argv[i], "--perf") == 0) {
+ perf_testing = true;
+
+ } else if (strcmp(argv[i], "--quiet") == 0) {
+ verbose = false;
+ progress = false;
+
+ } else if (strcmp(argv[i], "--no_progress") == 0) {
+ progress = false;
+
+ } else if (strcmp(argv[i], "--version") == 0) {
+ ASSERT_EQ(apm->kNoError, apm->Version(version,
+ version_bytes_remaining,
+ version_position));
+ printf("%s\n", version);
+ return;
+
+ } else {
+ FAIL() << "Unrecognized argument " << argv[i];
+ }
+ }
+
+ if (verbose) {
+ printf("Sample rate: %d Hz\n", sample_rate_hz);
+ printf("Primary channels: %d (in), %d (out)\n",
+ num_capture_input_channels,
+ num_capture_output_channels);
+ printf("Reverse channels: %d \n", num_render_channels);
+ }
+
+ const char far_file_default[] = "apm_far.pcm";
+ const char near_file_default[] = "apm_near.pcm";
+ const char out_file_default[] = "out.pcm";
+ const char event_filename[] = "apm_event.dat";
+ const char delay_filename[] = "apm_delay.dat";
+ const char drift_filename[] = "apm_drift.dat";
+ const char vad_file_default[] = "vad_out.dat";
+
+ if (!simulating) {
+ far_filename = far_file_default;
+ near_filename = near_file_default;
+ }
+
+ if (out_filename == NULL) {
+ out_filename = out_file_default;
+ }
+
+ if (vad_out_filename == NULL) {
+ vad_out_filename = vad_file_default;
+ }
+
+ FILE* far_file = NULL;
+ FILE* near_file = NULL;
+ FILE* out_file = NULL;
+ FILE* event_file = NULL;
+ FILE* delay_file = NULL;
+ FILE* drift_file = NULL;
+ FILE* vad_out_file = NULL;
+
+ if (far_filename != NULL) {
+ far_file = fopen(far_filename, "rb");
+ ASSERT_TRUE(NULL != far_file) << "Unable to open far-end audio file "
+ << far_filename;
+ }
+
+ near_file = fopen(near_filename, "rb");
+ ASSERT_TRUE(NULL != near_file) << "Unable to open near-end audio file "
+ << near_filename;
+ struct stat st;
+ stat(near_filename, &st);
+ int near_size_samples = st.st_size / sizeof(int16_t);
+
+ out_file = fopen(out_filename, "wb");
+ ASSERT_TRUE(NULL != out_file) << "Unable to open output audio file "
+ << out_filename;
+
+ if (!simulating) {
+ event_file = fopen(event_filename, "rb");
+ ASSERT_TRUE(NULL != event_file) << "Unable to open event file "
+ << event_filename;
+
+ delay_file = fopen(delay_filename, "rb");
+ ASSERT_TRUE(NULL != delay_file) << "Unable to open buffer file "
+ << delay_filename;
+
+ drift_file = fopen(drift_filename, "rb");
+ ASSERT_TRUE(NULL != drift_file) << "Unable to open drift file "
+ << drift_filename;
+ }
+
+ if (apm->voice_detection()->is_enabled()) {
+ vad_out_file = fopen(vad_out_filename, "wb");
+ ASSERT_TRUE(NULL != vad_out_file) << "Unable to open VAD output file "
+ << vad_out_file;
+ }
+
+ enum Events {
+ kInitializeEvent,
+ kRenderEvent,
+ kCaptureEvent,
+ kResetEventDeprecated
+ };
+ int16_t event = 0;
+ size_t read_count = 0;
+ int reverse_count = 0;
+ int primary_count = 0;
+ int near_read_samples = 0;
+ TickInterval acc_ticks;
+
+ AudioFrame far_frame;
+ far_frame._frequencyInHz = sample_rate_hz;
+
+ AudioFrame near_frame;
+ near_frame._frequencyInHz = sample_rate_hz;
+
+ int delay_ms = 0;
+ int drift_samples = 0;
+ int capture_level = 127;
+ int8_t stream_has_voice = 0;
+
+ TickTime t0 = TickTime::Now();
+ TickTime t1 = t0;
+ WebRtc_Word64 max_time_us = 0;
+ WebRtc_Word64 max_time_reverse_us = 0;
+ WebRtc_Word64 min_time_us = 1e6;
+ WebRtc_Word64 min_time_reverse_us = 1e6;
+
+ while (simulating || feof(event_file) == 0) {
+ std::ostringstream trace_stream;
+ trace_stream << "Processed frames: " << reverse_count << " (reverse), "
+ << primary_count << " (primary)";
+ SCOPED_TRACE(trace_stream.str());
+
+
+ if (simulating) {
+ if (far_file == NULL) {
+ event = kCaptureEvent;
+ } else {
+ if (event == kRenderEvent) {
+ event = kCaptureEvent;
+ } else {
+ event = kRenderEvent;
+ }
+ }
+ } else {
+ read_count = fread(&event, sizeof(event), 1, event_file);
+ if (read_count != 1) {
+ break;
+ }
+ //if (fread(&event, sizeof(event), 1, event_file) != 1) {
+ // break; // This is expected.
+ //}
+ }
+
+ if (event == kInitializeEvent || event == kResetEventDeprecated) {
+ ASSERT_EQ(1u,
+ fread(&sample_rate_hz, sizeof(sample_rate_hz), 1, event_file));
+ samples_per_channel = sample_rate_hz / 100;
+
+ ASSERT_EQ(1u,
+ fread(&device_sample_rate_hz,
+ sizeof(device_sample_rate_hz),
+ 1,
+ event_file));
+
+ ASSERT_EQ(apm->kNoError,
+ apm->set_sample_rate_hz(sample_rate_hz));
+
+ ASSERT_EQ(apm->kNoError,
+ apm->echo_cancellation()->set_device_sample_rate_hz(
+ device_sample_rate_hz));
+
+ far_frame._frequencyInHz = sample_rate_hz;
+ near_frame._frequencyInHz = sample_rate_hz;
+
+ if (verbose) {
+ printf("Init at frame: %d (primary), %d (reverse)\n",
+ primary_count, reverse_count);
+ printf(" Sample rate: %d Hz\n", sample_rate_hz);
+ }
+
+ } else if (event == kRenderEvent) {
+ reverse_count++;
+ far_frame._audioChannel = num_render_channels;
+ far_frame._payloadDataLengthInSamples =
+ num_render_channels * samples_per_channel;
+
+ read_count = fread(far_frame._payloadData,
+ sizeof(WebRtc_Word16),
+ far_frame._payloadDataLengthInSamples,
+ far_file);
+
+ if (simulating) {
+ if (read_count != far_frame._payloadDataLengthInSamples) {
+ break; // This is expected.
+ }
+ } else {
+ ASSERT_EQ(read_count,
+ far_frame._payloadDataLengthInSamples);
+ }
+
+ if (perf_testing) {
+ t0 = TickTime::Now();
+ }
+
+ ASSERT_EQ(apm->kNoError,
+ apm->AnalyzeReverseStream(&far_frame));
+
+ if (perf_testing) {
+ t1 = TickTime::Now();
+ TickInterval tick_diff = t1 - t0;
+ acc_ticks += tick_diff;
+ if (tick_diff.Microseconds() > max_time_reverse_us) {
+ max_time_reverse_us = tick_diff.Microseconds();
+ }
+ if (tick_diff.Microseconds() < min_time_reverse_us) {
+ min_time_reverse_us = tick_diff.Microseconds();
+ }
+ }
+
+ } else if (event == kCaptureEvent) {
+ primary_count++;
+ near_frame._audioChannel = num_capture_input_channels;
+ near_frame._payloadDataLengthInSamples =
+ num_capture_input_channels * samples_per_channel;
+
+ read_count = fread(near_frame._payloadData,
+ sizeof(WebRtc_Word16),
+ near_frame._payloadDataLengthInSamples,
+ near_file);
+
+ near_read_samples += read_count;
+ if (progress && primary_count % 100 == 0) {
+ printf("%.0f%% complete\r",
+ (near_read_samples * 100.0) / near_size_samples);
+ fflush(stdout);
+ }
+ if (simulating) {
+ if (read_count != near_frame._payloadDataLengthInSamples) {
+ break; // This is expected.
+ }
+
+ delay_ms = 0;
+ drift_samples = 0;
+ } else {
+ ASSERT_EQ(read_count,
+ near_frame._payloadDataLengthInSamples);
+
+ // TODO(ajm): sizeof(delay_ms) for current files?
+ ASSERT_EQ(1u,
+ fread(&delay_ms, 2, 1, delay_file));
+ ASSERT_EQ(1u,
+ fread(&drift_samples, sizeof(drift_samples), 1, drift_file));
+ }
+
+ if (perf_testing) {
+ t0 = TickTime::Now();
+ }
+
+ // TODO(ajm): fake an analog gain while simulating.
+
+ int capture_level_in = capture_level;
+ ASSERT_EQ(apm->kNoError,
+ apm->gain_control()->set_stream_analog_level(capture_level));
+ ASSERT_EQ(apm->kNoError,
+ apm->set_stream_delay_ms(delay_ms));
+ ASSERT_EQ(apm->kNoError,
+ apm->echo_cancellation()->set_stream_drift_samples(drift_samples));
+
+ int err = apm->ProcessStream(&near_frame);
+ if (err == apm->kBadStreamParameterWarning) {
+ printf("Bad parameter warning. %s\n", trace_stream.str().c_str());
+ }
+ ASSERT_TRUE(err == apm->kNoError ||
+ err == apm->kBadStreamParameterWarning);
+
+ capture_level = apm->gain_control()->stream_analog_level();
+
+ stream_has_voice =
+ static_cast<int8_t>(apm->voice_detection()->stream_has_voice());
+ if (vad_out_file != NULL) {
+ ASSERT_EQ(1u, fwrite(&stream_has_voice,
+ sizeof(stream_has_voice),
+ 1,
+ vad_out_file));
+ }
+
+ if (apm->gain_control()->mode() != GainControl::kAdaptiveAnalog) {
+ ASSERT_EQ(capture_level_in, capture_level);
+ }
+
+ if (perf_testing) {
+ t1 = TickTime::Now();
+ TickInterval tick_diff = t1 - t0;
+ acc_ticks += tick_diff;
+ if (tick_diff.Microseconds() > max_time_us) {
+ max_time_us = tick_diff.Microseconds();
+ }
+ if (tick_diff.Microseconds() < min_time_us) {
+ min_time_us = tick_diff.Microseconds();
+ }
+ }
+
+ ASSERT_EQ(near_frame._payloadDataLengthInSamples,
+ fwrite(near_frame._payloadData,
+ sizeof(WebRtc_Word16),
+ near_frame._payloadDataLengthInSamples,
+ out_file));
+ }
+ else {
+ FAIL() << "Event " << event << " is unrecognized";
+ }
+ }
+
+ if (verbose) {
+ printf("\nProcessed frames: %d (primary), %d (reverse)\n",
+ primary_count, reverse_count);
+ }
+
+ int8_t temp_int8;
+ if (far_file != NULL) {
+ read_count = fread(&temp_int8, sizeof(temp_int8), 1, far_file);
+ EXPECT_NE(0, feof(far_file)) << "Far-end file not fully processed";
+ }
+ read_count = fread(&temp_int8, sizeof(temp_int8), 1, near_file);
+ EXPECT_NE(0, feof(near_file)) << "Near-end file not fully processed";
+
+ if (!simulating) {
+ read_count = fread(&temp_int8, sizeof(temp_int8), 1, event_file);
+ EXPECT_NE(0, feof(event_file)) << "Event file not fully processed";
+ read_count = fread(&temp_int8, sizeof(temp_int8), 1, delay_file);
+ EXPECT_NE(0, feof(delay_file)) << "Delay file not fully processed";
+ read_count = fread(&temp_int8, sizeof(temp_int8), 1, drift_file);
+ EXPECT_NE(0, feof(drift_file)) << "Drift file not fully processed";
+ }
+
+ if (perf_testing) {
+ if (primary_count > 0) {
+ WebRtc_Word64 exec_time = acc_ticks.Milliseconds();
+ printf("\nTotal time: %.3f s, file time: %.2f s\n",
+ exec_time * 0.001, primary_count * 0.01);
+ printf("Time per frame: %.3f ms (average), %.3f ms (max),"
+ " %.3f ms (min)\n",
+ (exec_time * 1.0) / primary_count,
+ (max_time_us + max_time_reverse_us) / 1000.0,
+ (min_time_us + min_time_reverse_us) / 1000.0);
+ } else {
+ printf("Warning: no capture frames\n");
+ }
+ }
+
+ AudioProcessing::Destroy(apm);
+ apm = NULL;
+}
+
+int main(int argc, char* argv[])
+{
+ void_main(argc, argv);
+
+ return 0;
+}
diff --git a/src/modules/audio_processing/main/test/unit_test/Android.mk b/src/modules/audio_processing/main/test/unit_test/Android.mk
new file mode 100644
index 0000000000..b2029cfb4d
--- /dev/null
+++ b/src/modules/audio_processing/main/test/unit_test/Android.mk
@@ -0,0 +1,49 @@
+# Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+#
+# Use of this source code is governed by a BSD-style license
+# that can be found in the LICENSE file in the root of the source
+# tree. An additional intellectual property rights grant can be found
+# in the file PATENTS. All contributing project authors may
+# be found in the AUTHORS file in the root of the source tree.
+
+LOCAL_PATH:= $(call my-dir)
+
+# apm test app
+
+include $(CLEAR_VARS)
+
+LOCAL_MODULE_TAGS := tests
+LOCAL_CPP_EXTENSION := .cc
+LOCAL_SRC_FILES:= \
+ unit_test.cc
+
+# Flags passed to both C and C++ files.
+LOCAL_CFLAGS := \
+ '-DWEBRTC_TARGET_PC' \
+ '-DWEBRTC_LINUX' \
+ '-DWEBRTC_THREAD_RR' \
+ '-DWEBRTC_ANDROID' \
+ '-DANDROID'
+
+LOCAL_CPPFLAGS :=
+LOCAL_LDFLAGS :=
+LOCAL_C_INCLUDES := \
+ external/gtest/include \
+ $(LOCAL_PATH)/../../../../../system_wrappers/interface \
+ $(LOCAL_PATH)/../../../../../common_audio/signal_processing_library/main/interface \
+ $(LOCAL_PATH)/../../interface \
+ $(LOCAL_PATH)/../../../../interface \
+ $(LOCAL_PATH)/../../../../..
+
+LOCAL_STATIC_LIBRARIES := \
+ libgtest
+
+LOCAL_SHARED_LIBRARIES := \
+ libutils \
+ libstlport \
+ libwebrtc_audio_preprocessing
+
+LOCAL_MODULE:= webrtc_apm_unit_test
+
+include external/stlport/libstlport.mk
+include $(BUILD_EXECUTABLE)
diff --git a/src/modules/audio_processing/main/test/unit_test/audio_processing_unittest.pb.cc b/src/modules/audio_processing/main/test/unit_test/audio_processing_unittest.pb.cc
new file mode 100644
index 0000000000..c82ffdb43e
--- /dev/null
+++ b/src/modules/audio_processing/main/test/unit_test/audio_processing_unittest.pb.cc
@@ -0,0 +1,1111 @@
+// Generated by the protocol buffer compiler. DO NOT EDIT!
+
+#define INTERNAL_SUPPRESS_PROTOBUF_FIELD_DEPRECATION
+#include "audio_processing_unittest.pb.h"
+
+#include <algorithm>
+
+#include <google/protobuf/stubs/once.h>
+#include <google/protobuf/io/coded_stream.h>
+#include <google/protobuf/wire_format_lite_inl.h>
+// @@protoc_insertion_point(includes)
+
+namespace audio_processing_unittest {
+
+void protobuf_ShutdownFile_audio_5fprocessing_5funittest_2eproto() {
+ delete Test::default_instance_;
+ delete Test_Statistic::default_instance_;
+ delete Test_EchoMetrics::default_instance_;
+ delete OutputData::default_instance_;
+}
+
+void protobuf_AddDesc_audio_5fprocessing_5funittest_2eproto() {
+ static bool already_here = false;
+ if (already_here) return;
+ already_here = true;
+ GOOGLE_PROTOBUF_VERIFY_VERSION;
+
+ Test::default_instance_ = new Test();
+ Test_Statistic::default_instance_ = new Test_Statistic();
+ Test_EchoMetrics::default_instance_ = new Test_EchoMetrics();
+ OutputData::default_instance_ = new OutputData();
+ Test::default_instance_->InitAsDefaultInstance();
+ Test_Statistic::default_instance_->InitAsDefaultInstance();
+ Test_EchoMetrics::default_instance_->InitAsDefaultInstance();
+ OutputData::default_instance_->InitAsDefaultInstance();
+ ::google::protobuf::internal::OnShutdown(&protobuf_ShutdownFile_audio_5fprocessing_5funittest_2eproto);
+}
+
+// Force AddDescriptors() to be called at static initialization time.
+struct StaticDescriptorInitializer_audio_5fprocessing_5funittest_2eproto {
+ StaticDescriptorInitializer_audio_5fprocessing_5funittest_2eproto() {
+ protobuf_AddDesc_audio_5fprocessing_5funittest_2eproto();
+ }
+} static_descriptor_initializer_audio_5fprocessing_5funittest_2eproto_;
+
+
+// ===================================================================
+
+#ifndef _MSC_VER
+const int Test_Statistic::kInstantFieldNumber;
+const int Test_Statistic::kAverageFieldNumber;
+const int Test_Statistic::kMaximumFieldNumber;
+const int Test_Statistic::kMinimumFieldNumber;
+#endif // !_MSC_VER
+
+Test_Statistic::Test_Statistic()
+ : ::google::protobuf::MessageLite() {
+ SharedCtor();
+}
+
+void Test_Statistic::InitAsDefaultInstance() {
+}
+
+Test_Statistic::Test_Statistic(const Test_Statistic& from)
+ : ::google::protobuf::MessageLite() {
+ SharedCtor();
+ MergeFrom(from);
+}
+
+void Test_Statistic::SharedCtor() {
+ _cached_size_ = 0;
+ instant_ = 0;
+ average_ = 0;
+ maximum_ = 0;
+ minimum_ = 0;
+ ::memset(_has_bits_, 0, sizeof(_has_bits_));
+}
+
+Test_Statistic::~Test_Statistic() {
+ SharedDtor();
+}
+
+void Test_Statistic::SharedDtor() {
+ if (this != default_instance_) {
+ }
+}
+
+void Test_Statistic::SetCachedSize(int size) const {
+ GOOGLE_SAFE_CONCURRENT_WRITES_BEGIN();
+ _cached_size_ = size;
+ GOOGLE_SAFE_CONCURRENT_WRITES_END();
+}
+const Test_Statistic& Test_Statistic::default_instance() {
+ if (default_instance_ == NULL) protobuf_AddDesc_audio_5fprocessing_5funittest_2eproto(); return *default_instance_;
+}
+
+Test_Statistic* Test_Statistic::default_instance_ = NULL;
+
+Test_Statistic* Test_Statistic::New() const {
+ return new Test_Statistic;
+}
+
+void Test_Statistic::Clear() {
+ if (_has_bits_[0 / 32] & (0xffu << (0 % 32))) {
+ instant_ = 0;
+ average_ = 0;
+ maximum_ = 0;
+ minimum_ = 0;
+ }
+ ::memset(_has_bits_, 0, sizeof(_has_bits_));
+}
+
+bool Test_Statistic::MergePartialFromCodedStream(
+ ::google::protobuf::io::CodedInputStream* input) {
+#define DO_(EXPRESSION) if (!(EXPRESSION)) return false
+ ::google::protobuf::uint32 tag;
+ while ((tag = input->ReadTag()) != 0) {
+ switch (::google::protobuf::internal::WireFormatLite::GetTagFieldNumber(tag)) {
+ // optional int32 instant = 1;
+ case 1: {
+ if (::google::protobuf::internal::WireFormatLite::GetTagWireType(tag) ==
+ ::google::protobuf::internal::WireFormatLite::WIRETYPE_VARINT) {
+ DO_((::google::protobuf::internal::WireFormatLite::ReadPrimitive<
+ ::google::protobuf::int32, ::google::protobuf::internal::WireFormatLite::TYPE_INT32>(
+ input, &instant_)));
+ set_has_instant();
+ } else {
+ goto handle_uninterpreted;
+ }
+ if (input->ExpectTag(16)) goto parse_average;
+ break;
+ }
+
+ // optional int32 average = 2;
+ case 2: {
+ if (::google::protobuf::internal::WireFormatLite::GetTagWireType(tag) ==
+ ::google::protobuf::internal::WireFormatLite::WIRETYPE_VARINT) {
+ parse_average:
+ DO_((::google::protobuf::internal::WireFormatLite::ReadPrimitive<
+ ::google::protobuf::int32, ::google::protobuf::internal::WireFormatLite::TYPE_INT32>(
+ input, &average_)));
+ set_has_average();
+ } else {
+ goto handle_uninterpreted;
+ }
+ if (input->ExpectTag(24)) goto parse_maximum;
+ break;
+ }
+
+ // optional int32 maximum = 3;
+ case 3: {
+ if (::google::protobuf::internal::WireFormatLite::GetTagWireType(tag) ==
+ ::google::protobuf::internal::WireFormatLite::WIRETYPE_VARINT) {
+ parse_maximum:
+ DO_((::google::protobuf::internal::WireFormatLite::ReadPrimitive<
+ ::google::protobuf::int32, ::google::protobuf::internal::WireFormatLite::TYPE_INT32>(
+ input, &maximum_)));
+ set_has_maximum();
+ } else {
+ goto handle_uninterpreted;
+ }
+ if (input->ExpectTag(32)) goto parse_minimum;
+ break;
+ }
+
+ // optional int32 minimum = 4;
+ case 4: {
+ if (::google::protobuf::internal::WireFormatLite::GetTagWireType(tag) ==
+ ::google::protobuf::internal::WireFormatLite::WIRETYPE_VARINT) {
+ parse_minimum:
+ DO_((::google::protobuf::internal::WireFormatLite::ReadPrimitive<
+ ::google::protobuf::int32, ::google::protobuf::internal::WireFormatLite::TYPE_INT32>(
+ input, &minimum_)));
+ set_has_minimum();
+ } else {
+ goto handle_uninterpreted;
+ }
+ if (input->ExpectAtEnd()) return true;
+ break;
+ }
+
+ default: {
+ handle_uninterpreted:
+ if (::google::protobuf::internal::WireFormatLite::GetTagWireType(tag) ==
+ ::google::protobuf::internal::WireFormatLite::WIRETYPE_END_GROUP) {
+ return true;
+ }
+ DO_(::google::protobuf::internal::WireFormatLite::SkipField(input, tag, NULL));
+ break;
+ }
+ }
+ }
+ return true;
+#undef DO_
+}
+
+void Test_Statistic::SerializeWithCachedSizes(
+ ::google::protobuf::io::CodedOutputStream* output) const {
+ // optional int32 instant = 1;
+ if (has_instant()) {
+ ::google::protobuf::internal::WireFormatLite::WriteInt32(1, this->instant(), output);
+ }
+
+ // optional int32 average = 2;
+ if (has_average()) {
+ ::google::protobuf::internal::WireFormatLite::WriteInt32(2, this->average(), output);
+ }
+
+ // optional int32 maximum = 3;
+ if (has_maximum()) {
+ ::google::protobuf::internal::WireFormatLite::WriteInt32(3, this->maximum(), output);
+ }
+
+ // optional int32 minimum = 4;
+ if (has_minimum()) {
+ ::google::protobuf::internal::WireFormatLite::WriteInt32(4, this->minimum(), output);
+ }
+
+}
+
+int Test_Statistic::ByteSize() const {
+ int total_size = 0;
+
+ if (_has_bits_[0 / 32] & (0xffu << (0 % 32))) {
+ // optional int32 instant = 1;
+ if (has_instant()) {
+ total_size += 1 +
+ ::google::protobuf::internal::WireFormatLite::Int32Size(
+ this->instant());
+ }
+
+ // optional int32 average = 2;
+ if (has_average()) {
+ total_size += 1 +
+ ::google::protobuf::internal::WireFormatLite::Int32Size(
+ this->average());
+ }
+
+ // optional int32 maximum = 3;
+ if (has_maximum()) {
+ total_size += 1 +
+ ::google::protobuf::internal::WireFormatLite::Int32Size(
+ this->maximum());
+ }
+
+ // optional int32 minimum = 4;
+ if (has_minimum()) {
+ total_size += 1 +
+ ::google::protobuf::internal::WireFormatLite::Int32Size(
+ this->minimum());
+ }
+
+ }
+ GOOGLE_SAFE_CONCURRENT_WRITES_BEGIN();
+ _cached_size_ = total_size;
+ GOOGLE_SAFE_CONCURRENT_WRITES_END();
+ return total_size;
+}
+
+void Test_Statistic::CheckTypeAndMergeFrom(
+ const ::google::protobuf::MessageLite& from) {
+ MergeFrom(*::google::protobuf::down_cast<const Test_Statistic*>(&from));
+}
+
+void Test_Statistic::MergeFrom(const Test_Statistic& from) {
+ GOOGLE_CHECK_NE(&from, this);
+ if (from._has_bits_[0 / 32] & (0xffu << (0 % 32))) {
+ if (from.has_instant()) {
+ set_instant(from.instant());
+ }
+ if (from.has_average()) {
+ set_average(from.average());
+ }
+ if (from.has_maximum()) {
+ set_maximum(from.maximum());
+ }
+ if (from.has_minimum()) {
+ set_minimum(from.minimum());
+ }
+ }
+}
+
+void Test_Statistic::CopyFrom(const Test_Statistic& from) {
+ if (&from == this) return;
+ Clear();
+ MergeFrom(from);
+}
+
+bool Test_Statistic::IsInitialized() const {
+
+ return true;
+}
+
+void Test_Statistic::Swap(Test_Statistic* other) {
+ if (other != this) {
+ std::swap(instant_, other->instant_);
+ std::swap(average_, other->average_);
+ std::swap(maximum_, other->maximum_);
+ std::swap(minimum_, other->minimum_);
+ std::swap(_has_bits_[0], other->_has_bits_[0]);
+ std::swap(_cached_size_, other->_cached_size_);
+ }
+}
+
+::std::string Test_Statistic::GetTypeName() const {
+ return "audio_processing_unittest.Test.Statistic";
+}
+
+
+// -------------------------------------------------------------------
+
+#ifndef _MSC_VER
+const int Test_EchoMetrics::kResidualEchoReturnLossFieldNumber;
+const int Test_EchoMetrics::kEchoReturnLossFieldNumber;
+const int Test_EchoMetrics::kEchoReturnLossEnhancementFieldNumber;
+const int Test_EchoMetrics::kANlpFieldNumber;
+#endif // !_MSC_VER
+
+Test_EchoMetrics::Test_EchoMetrics()
+ : ::google::protobuf::MessageLite() {
+ SharedCtor();
+}
+
+void Test_EchoMetrics::InitAsDefaultInstance() {
+ residualechoreturnloss_ = const_cast< ::audio_processing_unittest::Test_Statistic*>(&::audio_processing_unittest::Test_Statistic::default_instance());
+ echoreturnloss_ = const_cast< ::audio_processing_unittest::Test_Statistic*>(&::audio_processing_unittest::Test_Statistic::default_instance());
+ echoreturnlossenhancement_ = const_cast< ::audio_processing_unittest::Test_Statistic*>(&::audio_processing_unittest::Test_Statistic::default_instance());
+ anlp_ = const_cast< ::audio_processing_unittest::Test_Statistic*>(&::audio_processing_unittest::Test_Statistic::default_instance());
+}
+
+Test_EchoMetrics::Test_EchoMetrics(const Test_EchoMetrics& from)
+ : ::google::protobuf::MessageLite() {
+ SharedCtor();
+ MergeFrom(from);
+}
+
+void Test_EchoMetrics::SharedCtor() {
+ _cached_size_ = 0;
+ residualechoreturnloss_ = NULL;
+ echoreturnloss_ = NULL;
+ echoreturnlossenhancement_ = NULL;
+ anlp_ = NULL;
+ ::memset(_has_bits_, 0, sizeof(_has_bits_));
+}
+
+Test_EchoMetrics::~Test_EchoMetrics() {
+ SharedDtor();
+}
+
+void Test_EchoMetrics::SharedDtor() {
+ if (this != default_instance_) {
+ delete residualechoreturnloss_;
+ delete echoreturnloss_;
+ delete echoreturnlossenhancement_;
+ delete anlp_;
+ }
+}
+
+void Test_EchoMetrics::SetCachedSize(int size) const {
+ GOOGLE_SAFE_CONCURRENT_WRITES_BEGIN();
+ _cached_size_ = size;
+ GOOGLE_SAFE_CONCURRENT_WRITES_END();
+}
+const Test_EchoMetrics& Test_EchoMetrics::default_instance() {
+ if (default_instance_ == NULL) protobuf_AddDesc_audio_5fprocessing_5funittest_2eproto(); return *default_instance_;
+}
+
+Test_EchoMetrics* Test_EchoMetrics::default_instance_ = NULL;
+
+Test_EchoMetrics* Test_EchoMetrics::New() const {
+ return new Test_EchoMetrics;
+}
+
+void Test_EchoMetrics::Clear() {
+ if (_has_bits_[0 / 32] & (0xffu << (0 % 32))) {
+ if (has_residualechoreturnloss()) {
+ if (residualechoreturnloss_ != NULL) residualechoreturnloss_->::audio_processing_unittest::Test_Statistic::Clear();
+ }
+ if (has_echoreturnloss()) {
+ if (echoreturnloss_ != NULL) echoreturnloss_->::audio_processing_unittest::Test_Statistic::Clear();
+ }
+ if (has_echoreturnlossenhancement()) {
+ if (echoreturnlossenhancement_ != NULL) echoreturnlossenhancement_->::audio_processing_unittest::Test_Statistic::Clear();
+ }
+ if (has_anlp()) {
+ if (anlp_ != NULL) anlp_->::audio_processing_unittest::Test_Statistic::Clear();
+ }
+ }
+ ::memset(_has_bits_, 0, sizeof(_has_bits_));
+}
+
+bool Test_EchoMetrics::MergePartialFromCodedStream(
+ ::google::protobuf::io::CodedInputStream* input) {
+#define DO_(EXPRESSION) if (!(EXPRESSION)) return false
+ ::google::protobuf::uint32 tag;
+ while ((tag = input->ReadTag()) != 0) {
+ switch (::google::protobuf::internal::WireFormatLite::GetTagFieldNumber(tag)) {
+ // optional .audio_processing_unittest.Test.Statistic residualEchoReturnLoss = 1;
+ case 1: {
+ if (::google::protobuf::internal::WireFormatLite::GetTagWireType(tag) ==
+ ::google::protobuf::internal::WireFormatLite::WIRETYPE_LENGTH_DELIMITED) {
+ DO_(::google::protobuf::internal::WireFormatLite::ReadMessageNoVirtual(
+ input, mutable_residualechoreturnloss()));
+ } else {
+ goto handle_uninterpreted;
+ }
+ if (input->ExpectTag(18)) goto parse_echoReturnLoss;
+ break;
+ }
+
+ // optional .audio_processing_unittest.Test.Statistic echoReturnLoss = 2;
+ case 2: {
+ if (::google::protobuf::internal::WireFormatLite::GetTagWireType(tag) ==
+ ::google::protobuf::internal::WireFormatLite::WIRETYPE_LENGTH_DELIMITED) {
+ parse_echoReturnLoss:
+ DO_(::google::protobuf::internal::WireFormatLite::ReadMessageNoVirtual(
+ input, mutable_echoreturnloss()));
+ } else {
+ goto handle_uninterpreted;
+ }
+ if (input->ExpectTag(26)) goto parse_echoReturnLossEnhancement;
+ break;
+ }
+
+ // optional .audio_processing_unittest.Test.Statistic echoReturnLossEnhancement = 3;
+ case 3: {
+ if (::google::protobuf::internal::WireFormatLite::GetTagWireType(tag) ==
+ ::google::protobuf::internal::WireFormatLite::WIRETYPE_LENGTH_DELIMITED) {
+ parse_echoReturnLossEnhancement:
+ DO_(::google::protobuf::internal::WireFormatLite::ReadMessageNoVirtual(
+ input, mutable_echoreturnlossenhancement()));
+ } else {
+ goto handle_uninterpreted;
+ }
+ if (input->ExpectTag(34)) goto parse_aNlp;
+ break;
+ }
+
+ // optional .audio_processing_unittest.Test.Statistic aNlp = 4;
+ case 4: {
+ if (::google::protobuf::internal::WireFormatLite::GetTagWireType(tag) ==
+ ::google::protobuf::internal::WireFormatLite::WIRETYPE_LENGTH_DELIMITED) {
+ parse_aNlp:
+ DO_(::google::protobuf::internal::WireFormatLite::ReadMessageNoVirtual(
+ input, mutable_anlp()));
+ } else {
+ goto handle_uninterpreted;
+ }
+ if (input->ExpectAtEnd()) return true;
+ break;
+ }
+
+ default: {
+ handle_uninterpreted:
+ if (::google::protobuf::internal::WireFormatLite::GetTagWireType(tag) ==
+ ::google::protobuf::internal::WireFormatLite::WIRETYPE_END_GROUP) {
+ return true;
+ }
+ DO_(::google::protobuf::internal::WireFormatLite::SkipField(input, tag, NULL));
+ break;
+ }
+ }
+ }
+ return true;
+#undef DO_
+}
+
+void Test_EchoMetrics::SerializeWithCachedSizes(
+ ::google::protobuf::io::CodedOutputStream* output) const {
+ // optional .audio_processing_unittest.Test.Statistic residualEchoReturnLoss = 1;
+ if (has_residualechoreturnloss()) {
+ ::google::protobuf::internal::WireFormatLite::WriteMessage(
+ 1, this->residualechoreturnloss(), output);
+ }
+
+ // optional .audio_processing_unittest.Test.Statistic echoReturnLoss = 2;
+ if (has_echoreturnloss()) {
+ ::google::protobuf::internal::WireFormatLite::WriteMessage(
+ 2, this->echoreturnloss(), output);
+ }
+
+ // optional .audio_processing_unittest.Test.Statistic echoReturnLossEnhancement = 3;
+ if (has_echoreturnlossenhancement()) {
+ ::google::protobuf::internal::WireFormatLite::WriteMessage(
+ 3, this->echoreturnlossenhancement(), output);
+ }
+
+ // optional .audio_processing_unittest.Test.Statistic aNlp = 4;
+ if (has_anlp()) {
+ ::google::protobuf::internal::WireFormatLite::WriteMessage(
+ 4, this->anlp(), output);
+ }
+
+}
+
+int Test_EchoMetrics::ByteSize() const {
+ int total_size = 0;
+
+ if (_has_bits_[0 / 32] & (0xffu << (0 % 32))) {
+ // optional .audio_processing_unittest.Test.Statistic residualEchoReturnLoss = 1;
+ if (has_residualechoreturnloss()) {
+ total_size += 1 +
+ ::google::protobuf::internal::WireFormatLite::MessageSizeNoVirtual(
+ this->residualechoreturnloss());
+ }
+
+ // optional .audio_processing_unittest.Test.Statistic echoReturnLoss = 2;
+ if (has_echoreturnloss()) {
+ total_size += 1 +
+ ::google::protobuf::internal::WireFormatLite::MessageSizeNoVirtual(
+ this->echoreturnloss());
+ }
+
+ // optional .audio_processing_unittest.Test.Statistic echoReturnLossEnhancement = 3;
+ if (has_echoreturnlossenhancement()) {
+ total_size += 1 +
+ ::google::protobuf::internal::WireFormatLite::MessageSizeNoVirtual(
+ this->echoreturnlossenhancement());
+ }
+
+ // optional .audio_processing_unittest.Test.Statistic aNlp = 4;
+ if (has_anlp()) {
+ total_size += 1 +
+ ::google::protobuf::internal::WireFormatLite::MessageSizeNoVirtual(
+ this->anlp());
+ }
+
+ }
+ GOOGLE_SAFE_CONCURRENT_WRITES_BEGIN();
+ _cached_size_ = total_size;
+ GOOGLE_SAFE_CONCURRENT_WRITES_END();
+ return total_size;
+}
+
+void Test_EchoMetrics::CheckTypeAndMergeFrom(
+ const ::google::protobuf::MessageLite& from) {
+ MergeFrom(*::google::protobuf::down_cast<const Test_EchoMetrics*>(&from));
+}
+
+void Test_EchoMetrics::MergeFrom(const Test_EchoMetrics& from) {
+ GOOGLE_CHECK_NE(&from, this);
+ if (from._has_bits_[0 / 32] & (0xffu << (0 % 32))) {
+ if (from.has_residualechoreturnloss()) {
+ mutable_residualechoreturnloss()->::audio_processing_unittest::Test_Statistic::MergeFrom(from.residualechoreturnloss());
+ }
+ if (from.has_echoreturnloss()) {
+ mutable_echoreturnloss()->::audio_processing_unittest::Test_Statistic::MergeFrom(from.echoreturnloss());
+ }
+ if (from.has_echoreturnlossenhancement()) {
+ mutable_echoreturnlossenhancement()->::audio_processing_unittest::Test_Statistic::MergeFrom(from.echoreturnlossenhancement());
+ }
+ if (from.has_anlp()) {
+ mutable_anlp()->::audio_processing_unittest::Test_Statistic::MergeFrom(from.anlp());
+ }
+ }
+}
+
+void Test_EchoMetrics::CopyFrom(const Test_EchoMetrics& from) {
+ if (&from == this) return;
+ Clear();
+ MergeFrom(from);
+}
+
+bool Test_EchoMetrics::IsInitialized() const {
+
+ return true;
+}
+
+void Test_EchoMetrics::Swap(Test_EchoMetrics* other) {
+ if (other != this) {
+ std::swap(residualechoreturnloss_, other->residualechoreturnloss_);
+ std::swap(echoreturnloss_, other->echoreturnloss_);
+ std::swap(echoreturnlossenhancement_, other->echoreturnlossenhancement_);
+ std::swap(anlp_, other->anlp_);
+ std::swap(_has_bits_[0], other->_has_bits_[0]);
+ std::swap(_cached_size_, other->_cached_size_);
+ }
+}
+
+::std::string Test_EchoMetrics::GetTypeName() const {
+ return "audio_processing_unittest.Test.EchoMetrics";
+}
+
+
+// -------------------------------------------------------------------
+
+#ifndef _MSC_VER
+const int Test::kNumReverseChannelsFieldNumber;
+const int Test::kNumChannelsFieldNumber;
+const int Test::kSampleRateFieldNumber;
+const int Test::kHasEchoCountFieldNumber;
+const int Test::kHasVoiceCountFieldNumber;
+const int Test::kIsSaturatedCountFieldNumber;
+const int Test::kEchoMetricsFieldNumber;
+#endif // !_MSC_VER
+
+Test::Test()
+ : ::google::protobuf::MessageLite() {
+ SharedCtor();
+}
+
+void Test::InitAsDefaultInstance() {
+ echometrics_ = const_cast< ::audio_processing_unittest::Test_EchoMetrics*>(&::audio_processing_unittest::Test_EchoMetrics::default_instance());
+}
+
+Test::Test(const Test& from)
+ : ::google::protobuf::MessageLite() {
+ SharedCtor();
+ MergeFrom(from);
+}
+
+void Test::SharedCtor() {
+ _cached_size_ = 0;
+ numreversechannels_ = 0;
+ numchannels_ = 0;
+ samplerate_ = 0;
+ hasechocount_ = 0;
+ hasvoicecount_ = 0;
+ issaturatedcount_ = 0;
+ echometrics_ = NULL;
+ ::memset(_has_bits_, 0, sizeof(_has_bits_));
+}
+
+Test::~Test() {
+ SharedDtor();
+}
+
+void Test::SharedDtor() {
+ if (this != default_instance_) {
+ delete echometrics_;
+ }
+}
+
+void Test::SetCachedSize(int size) const {
+ GOOGLE_SAFE_CONCURRENT_WRITES_BEGIN();
+ _cached_size_ = size;
+ GOOGLE_SAFE_CONCURRENT_WRITES_END();
+}
+const Test& Test::default_instance() {
+ if (default_instance_ == NULL) protobuf_AddDesc_audio_5fprocessing_5funittest_2eproto(); return *default_instance_;
+}
+
+Test* Test::default_instance_ = NULL;
+
+Test* Test::New() const {
+ return new Test;
+}
+
+void Test::Clear() {
+ if (_has_bits_[0 / 32] & (0xffu << (0 % 32))) {
+ numreversechannels_ = 0;
+ numchannels_ = 0;
+ samplerate_ = 0;
+ hasechocount_ = 0;
+ hasvoicecount_ = 0;
+ issaturatedcount_ = 0;
+ if (has_echometrics()) {
+ if (echometrics_ != NULL) echometrics_->::audio_processing_unittest::Test_EchoMetrics::Clear();
+ }
+ }
+ ::memset(_has_bits_, 0, sizeof(_has_bits_));
+}
+
+bool Test::MergePartialFromCodedStream(
+ ::google::protobuf::io::CodedInputStream* input) {
+#define DO_(EXPRESSION) if (!(EXPRESSION)) return false
+ ::google::protobuf::uint32 tag;
+ while ((tag = input->ReadTag()) != 0) {
+ switch (::google::protobuf::internal::WireFormatLite::GetTagFieldNumber(tag)) {
+ // optional int32 numReverseChannels = 1;
+ case 1: {
+ if (::google::protobuf::internal::WireFormatLite::GetTagWireType(tag) ==
+ ::google::protobuf::internal::WireFormatLite::WIRETYPE_VARINT) {
+ DO_((::google::protobuf::internal::WireFormatLite::ReadPrimitive<
+ ::google::protobuf::int32, ::google::protobuf::internal::WireFormatLite::TYPE_INT32>(
+ input, &numreversechannels_)));
+ set_has_numreversechannels();
+ } else {
+ goto handle_uninterpreted;
+ }
+ if (input->ExpectTag(16)) goto parse_numChannels;
+ break;
+ }
+
+ // optional int32 numChannels = 2;
+ case 2: {
+ if (::google::protobuf::internal::WireFormatLite::GetTagWireType(tag) ==
+ ::google::protobuf::internal::WireFormatLite::WIRETYPE_VARINT) {
+ parse_numChannels:
+ DO_((::google::protobuf::internal::WireFormatLite::ReadPrimitive<
+ ::google::protobuf::int32, ::google::protobuf::internal::WireFormatLite::TYPE_INT32>(
+ input, &numchannels_)));
+ set_has_numchannels();
+ } else {
+ goto handle_uninterpreted;
+ }
+ if (input->ExpectTag(24)) goto parse_sampleRate;
+ break;
+ }
+
+ // optional int32 sampleRate = 3;
+ case 3: {
+ if (::google::protobuf::internal::WireFormatLite::GetTagWireType(tag) ==
+ ::google::protobuf::internal::WireFormatLite::WIRETYPE_VARINT) {
+ parse_sampleRate:
+ DO_((::google::protobuf::internal::WireFormatLite::ReadPrimitive<
+ ::google::protobuf::int32, ::google::protobuf::internal::WireFormatLite::TYPE_INT32>(
+ input, &samplerate_)));
+ set_has_samplerate();
+ } else {
+ goto handle_uninterpreted;
+ }
+ if (input->ExpectTag(32)) goto parse_hasEchoCount;
+ break;
+ }
+
+ // optional int32 hasEchoCount = 4;
+ case 4: {
+ if (::google::protobuf::internal::WireFormatLite::GetTagWireType(tag) ==
+ ::google::protobuf::internal::WireFormatLite::WIRETYPE_VARINT) {
+ parse_hasEchoCount:
+ DO_((::google::protobuf::internal::WireFormatLite::ReadPrimitive<
+ ::google::protobuf::int32, ::google::protobuf::internal::WireFormatLite::TYPE_INT32>(
+ input, &hasechocount_)));
+ set_has_hasechocount();
+ } else {
+ goto handle_uninterpreted;
+ }
+ if (input->ExpectTag(40)) goto parse_hasVoiceCount;
+ break;
+ }
+
+ // optional int32 hasVoiceCount = 5;
+ case 5: {
+ if (::google::protobuf::internal::WireFormatLite::GetTagWireType(tag) ==
+ ::google::protobuf::internal::WireFormatLite::WIRETYPE_VARINT) {
+ parse_hasVoiceCount:
+ DO_((::google::protobuf::internal::WireFormatLite::ReadPrimitive<
+ ::google::protobuf::int32, ::google::protobuf::internal::WireFormatLite::TYPE_INT32>(
+ input, &hasvoicecount_)));
+ set_has_hasvoicecount();
+ } else {
+ goto handle_uninterpreted;
+ }
+ if (input->ExpectTag(48)) goto parse_isSaturatedCount;
+ break;
+ }
+
+ // optional int32 isSaturatedCount = 6;
+ case 6: {
+ if (::google::protobuf::internal::WireFormatLite::GetTagWireType(tag) ==
+ ::google::protobuf::internal::WireFormatLite::WIRETYPE_VARINT) {
+ parse_isSaturatedCount:
+ DO_((::google::protobuf::internal::WireFormatLite::ReadPrimitive<
+ ::google::protobuf::int32, ::google::protobuf::internal::WireFormatLite::TYPE_INT32>(
+ input, &issaturatedcount_)));
+ set_has_issaturatedcount();
+ } else {
+ goto handle_uninterpreted;
+ }
+ if (input->ExpectTag(58)) goto parse_echoMetrics;
+ break;
+ }
+
+ // optional .audio_processing_unittest.Test.EchoMetrics echoMetrics = 7;
+ case 7: {
+ if (::google::protobuf::internal::WireFormatLite::GetTagWireType(tag) ==
+ ::google::protobuf::internal::WireFormatLite::WIRETYPE_LENGTH_DELIMITED) {
+ parse_echoMetrics:
+ DO_(::google::protobuf::internal::WireFormatLite::ReadMessageNoVirtual(
+ input, mutable_echometrics()));
+ } else {
+ goto handle_uninterpreted;
+ }
+ if (input->ExpectAtEnd()) return true;
+ break;
+ }
+
+ default: {
+ handle_uninterpreted:
+ if (::google::protobuf::internal::WireFormatLite::GetTagWireType(tag) ==
+ ::google::protobuf::internal::WireFormatLite::WIRETYPE_END_GROUP) {
+ return true;
+ }
+ DO_(::google::protobuf::internal::WireFormatLite::SkipField(input, tag, NULL));
+ break;
+ }
+ }
+ }
+ return true;
+#undef DO_
+}
+
+void Test::SerializeWithCachedSizes(
+ ::google::protobuf::io::CodedOutputStream* output) const {
+ // optional int32 numReverseChannels = 1;
+ if (has_numreversechannels()) {
+ ::google::protobuf::internal::WireFormatLite::WriteInt32(1, this->numreversechannels(), output);
+ }
+
+ // optional int32 numChannels = 2;
+ if (has_numchannels()) {
+ ::google::protobuf::internal::WireFormatLite::WriteInt32(2, this->numchannels(), output);
+ }
+
+ // optional int32 sampleRate = 3;
+ if (has_samplerate()) {
+ ::google::protobuf::internal::WireFormatLite::WriteInt32(3, this->samplerate(), output);
+ }
+
+ // optional int32 hasEchoCount = 4;
+ if (has_hasechocount()) {
+ ::google::protobuf::internal::WireFormatLite::WriteInt32(4, this->hasechocount(), output);
+ }
+
+ // optional int32 hasVoiceCount = 5;
+ if (has_hasvoicecount()) {
+ ::google::protobuf::internal::WireFormatLite::WriteInt32(5, this->hasvoicecount(), output);
+ }
+
+ // optional int32 isSaturatedCount = 6;
+ if (has_issaturatedcount()) {
+ ::google::protobuf::internal::WireFormatLite::WriteInt32(6, this->issaturatedcount(), output);
+ }
+
+ // optional .audio_processing_unittest.Test.EchoMetrics echoMetrics = 7;
+ if (has_echometrics()) {
+ ::google::protobuf::internal::WireFormatLite::WriteMessage(
+ 7, this->echometrics(), output);
+ }
+
+}
+
+int Test::ByteSize() const {
+ int total_size = 0;
+
+ if (_has_bits_[0 / 32] & (0xffu << (0 % 32))) {
+ // optional int32 numReverseChannels = 1;
+ if (has_numreversechannels()) {
+ total_size += 1 +
+ ::google::protobuf::internal::WireFormatLite::Int32Size(
+ this->numreversechannels());
+ }
+
+ // optional int32 numChannels = 2;
+ if (has_numchannels()) {
+ total_size += 1 +
+ ::google::protobuf::internal::WireFormatLite::Int32Size(
+ this->numchannels());
+ }
+
+ // optional int32 sampleRate = 3;
+ if (has_samplerate()) {
+ total_size += 1 +
+ ::google::protobuf::internal::WireFormatLite::Int32Size(
+ this->samplerate());
+ }
+
+ // optional int32 hasEchoCount = 4;
+ if (has_hasechocount()) {
+ total_size += 1 +
+ ::google::protobuf::internal::WireFormatLite::Int32Size(
+ this->hasechocount());
+ }
+
+ // optional int32 hasVoiceCount = 5;
+ if (has_hasvoicecount()) {
+ total_size += 1 +
+ ::google::protobuf::internal::WireFormatLite::Int32Size(
+ this->hasvoicecount());
+ }
+
+ // optional int32 isSaturatedCount = 6;
+ if (has_issaturatedcount()) {
+ total_size += 1 +
+ ::google::protobuf::internal::WireFormatLite::Int32Size(
+ this->issaturatedcount());
+ }
+
+ // optional .audio_processing_unittest.Test.EchoMetrics echoMetrics = 7;
+ if (has_echometrics()) {
+ total_size += 1 +
+ ::google::protobuf::internal::WireFormatLite::MessageSizeNoVirtual(
+ this->echometrics());
+ }
+
+ }
+ GOOGLE_SAFE_CONCURRENT_WRITES_BEGIN();
+ _cached_size_ = total_size;
+ GOOGLE_SAFE_CONCURRENT_WRITES_END();
+ return total_size;
+}
+
+void Test::CheckTypeAndMergeFrom(
+ const ::google::protobuf::MessageLite& from) {
+ MergeFrom(*::google::protobuf::down_cast<const Test*>(&from));
+}
+
+void Test::MergeFrom(const Test& from) {
+ GOOGLE_CHECK_NE(&from, this);
+ if (from._has_bits_[0 / 32] & (0xffu << (0 % 32))) {
+ if (from.has_numreversechannels()) {
+ set_numreversechannels(from.numreversechannels());
+ }
+ if (from.has_numchannels()) {
+ set_numchannels(from.numchannels());
+ }
+ if (from.has_samplerate()) {
+ set_samplerate(from.samplerate());
+ }
+ if (from.has_hasechocount()) {
+ set_hasechocount(from.hasechocount());
+ }
+ if (from.has_hasvoicecount()) {
+ set_hasvoicecount(from.hasvoicecount());
+ }
+ if (from.has_issaturatedcount()) {
+ set_issaturatedcount(from.issaturatedcount());
+ }
+ if (from.has_echometrics()) {
+ mutable_echometrics()->::audio_processing_unittest::Test_EchoMetrics::MergeFrom(from.echometrics());
+ }
+ }
+}
+
+void Test::CopyFrom(const Test& from) {
+ if (&from == this) return;
+ Clear();
+ MergeFrom(from);
+}
+
+bool Test::IsInitialized() const {
+
+ return true;
+}
+
+void Test::Swap(Test* other) {
+ if (other != this) {
+ std::swap(numreversechannels_, other->numreversechannels_);
+ std::swap(numchannels_, other->numchannels_);
+ std::swap(samplerate_, other->samplerate_);
+ std::swap(hasechocount_, other->hasechocount_);
+ std::swap(hasvoicecount_, other->hasvoicecount_);
+ std::swap(issaturatedcount_, other->issaturatedcount_);
+ std::swap(echometrics_, other->echometrics_);
+ std::swap(_has_bits_[0], other->_has_bits_[0]);
+ std::swap(_cached_size_, other->_cached_size_);
+ }
+}
+
+::std::string Test::GetTypeName() const {
+ return "audio_processing_unittest.Test";
+}
+
+
+// ===================================================================
+
+#ifndef _MSC_VER
+const int OutputData::kTestFieldNumber;
+#endif // !_MSC_VER
+
+OutputData::OutputData()
+ : ::google::protobuf::MessageLite() {
+ SharedCtor();
+}
+
+void OutputData::InitAsDefaultInstance() {
+}
+
+OutputData::OutputData(const OutputData& from)
+ : ::google::protobuf::MessageLite() {
+ SharedCtor();
+ MergeFrom(from);
+}
+
+void OutputData::SharedCtor() {
+ _cached_size_ = 0;
+ ::memset(_has_bits_, 0, sizeof(_has_bits_));
+}
+
+OutputData::~OutputData() {
+ SharedDtor();
+}
+
+void OutputData::SharedDtor() {
+ if (this != default_instance_) {
+ }
+}
+
+void OutputData::SetCachedSize(int size) const {
+ GOOGLE_SAFE_CONCURRENT_WRITES_BEGIN();
+ _cached_size_ = size;
+ GOOGLE_SAFE_CONCURRENT_WRITES_END();
+}
+const OutputData& OutputData::default_instance() {
+ if (default_instance_ == NULL) protobuf_AddDesc_audio_5fprocessing_5funittest_2eproto(); return *default_instance_;
+}
+
+OutputData* OutputData::default_instance_ = NULL;
+
+OutputData* OutputData::New() const {
+ return new OutputData;
+}
+
+void OutputData::Clear() {
+ test_.Clear();
+ ::memset(_has_bits_, 0, sizeof(_has_bits_));
+}
+
+bool OutputData::MergePartialFromCodedStream(
+ ::google::protobuf::io::CodedInputStream* input) {
+#define DO_(EXPRESSION) if (!(EXPRESSION)) return false
+ ::google::protobuf::uint32 tag;
+ while ((tag = input->ReadTag()) != 0) {
+ switch (::google::protobuf::internal::WireFormatLite::GetTagFieldNumber(tag)) {
+ // repeated .audio_processing_unittest.Test test = 1;
+ case 1: {
+ if (::google::protobuf::internal::WireFormatLite::GetTagWireType(tag) ==
+ ::google::protobuf::internal::WireFormatLite::WIRETYPE_LENGTH_DELIMITED) {
+ parse_test:
+ DO_(::google::protobuf::internal::WireFormatLite::ReadMessageNoVirtual(
+ input, add_test()));
+ } else {
+ goto handle_uninterpreted;
+ }
+ if (input->ExpectTag(10)) goto parse_test;
+ if (input->ExpectAtEnd()) return true;
+ break;
+ }
+
+ default: {
+ handle_uninterpreted:
+ if (::google::protobuf::internal::WireFormatLite::GetTagWireType(tag) ==
+ ::google::protobuf::internal::WireFormatLite::WIRETYPE_END_GROUP) {
+ return true;
+ }
+ DO_(::google::protobuf::internal::WireFormatLite::SkipField(input, tag, NULL));
+ break;
+ }
+ }
+ }
+ return true;
+#undef DO_
+}
+
+void OutputData::SerializeWithCachedSizes(
+ ::google::protobuf::io::CodedOutputStream* output) const {
+ // repeated .audio_processing_unittest.Test test = 1;
+ for (int i = 0; i < this->test_size(); i++) {
+ ::google::protobuf::internal::WireFormatLite::WriteMessage(
+ 1, this->test(i), output);
+ }
+
+}
+
+int OutputData::ByteSize() const {
+ int total_size = 0;
+
+ // repeated .audio_processing_unittest.Test test = 1;
+ total_size += 1 * this->test_size();
+ for (int i = 0; i < this->test_size(); i++) {
+ total_size +=
+ ::google::protobuf::internal::WireFormatLite::MessageSizeNoVirtual(
+ this->test(i));
+ }
+
+ GOOGLE_SAFE_CONCURRENT_WRITES_BEGIN();
+ _cached_size_ = total_size;
+ GOOGLE_SAFE_CONCURRENT_WRITES_END();
+ return total_size;
+}
+
+void OutputData::CheckTypeAndMergeFrom(
+ const ::google::protobuf::MessageLite& from) {
+ MergeFrom(*::google::protobuf::down_cast<const OutputData*>(&from));
+}
+
+void OutputData::MergeFrom(const OutputData& from) {
+ GOOGLE_CHECK_NE(&from, this);
+ test_.MergeFrom(from.test_);
+}
+
+void OutputData::CopyFrom(const OutputData& from) {
+ if (&from == this) return;
+ Clear();
+ MergeFrom(from);
+}
+
+bool OutputData::IsInitialized() const {
+
+ return true;
+}
+
+void OutputData::Swap(OutputData* other) {
+ if (other != this) {
+ test_.Swap(&other->test_);
+ std::swap(_has_bits_[0], other->_has_bits_[0]);
+ std::swap(_cached_size_, other->_cached_size_);
+ }
+}
+
+::std::string OutputData::GetTypeName() const {
+ return "audio_processing_unittest.OutputData";
+}
+
+
+// @@protoc_insertion_point(namespace_scope)
+
+} // namespace audio_processing_unittest
+
+// @@protoc_insertion_point(global_scope)
diff --git a/src/modules/audio_processing/main/test/unit_test/audio_processing_unittest.pb.h b/src/modules/audio_processing/main/test/unit_test/audio_processing_unittest.pb.h
new file mode 100644
index 0000000000..34c21b2f40
--- /dev/null
+++ b/src/modules/audio_processing/main/test/unit_test/audio_processing_unittest.pb.h
@@ -0,0 +1,862 @@
+// Generated by the protocol buffer compiler. DO NOT EDIT!
+// source: audio_processing_unittest.proto
+
+#ifndef PROTOBUF_audio_5fprocessing_5funittest_2eproto__INCLUDED
+#define PROTOBUF_audio_5fprocessing_5funittest_2eproto__INCLUDED
+
+#include <string>
+
+#include <google/protobuf/stubs/common.h>
+
+#if GOOGLE_PROTOBUF_VERSION < 2004000
+#error This file was generated by a newer version of protoc which is
+#error incompatible with your Protocol Buffer headers. Please update
+#error your headers.
+#endif
+#if 2004000 < GOOGLE_PROTOBUF_MIN_PROTOC_VERSION
+#error This file was generated by an older version of protoc which is
+#error incompatible with your Protocol Buffer headers. Please
+#error regenerate this file with a newer version of protoc.
+#endif
+
+#include <google/protobuf/generated_message_util.h>
+#include <google/protobuf/repeated_field.h>
+#include <google/protobuf/extension_set.h>
+// @@protoc_insertion_point(includes)
+
+namespace audio_processing_unittest {
+
+// Internal implementation detail -- do not call these.
+void protobuf_AddDesc_audio_5fprocessing_5funittest_2eproto();
+void protobuf_AssignDesc_audio_5fprocessing_5funittest_2eproto();
+void protobuf_ShutdownFile_audio_5fprocessing_5funittest_2eproto();
+
+class Test;
+class Test_Statistic;
+class Test_EchoMetrics;
+class OutputData;
+
+// ===================================================================
+
+class Test_Statistic : public ::google::protobuf::MessageLite {
+ public:
+ Test_Statistic();
+ virtual ~Test_Statistic();
+
+ Test_Statistic(const Test_Statistic& from);
+
+ inline Test_Statistic& operator=(const Test_Statistic& from) {
+ CopyFrom(from);
+ return *this;
+ }
+
+ static const Test_Statistic& default_instance();
+
+ void Swap(Test_Statistic* other);
+
+ // implements Message ----------------------------------------------
+
+ Test_Statistic* New() const;
+ void CheckTypeAndMergeFrom(const ::google::protobuf::MessageLite& from);
+ void CopyFrom(const Test_Statistic& from);
+ void MergeFrom(const Test_Statistic& from);
+ void Clear();
+ bool IsInitialized() const;
+
+ int ByteSize() const;
+ bool MergePartialFromCodedStream(
+ ::google::protobuf::io::CodedInputStream* input);
+ void SerializeWithCachedSizes(
+ ::google::protobuf::io::CodedOutputStream* output) const;
+ int GetCachedSize() const { return _cached_size_; }
+ private:
+ void SharedCtor();
+ void SharedDtor();
+ void SetCachedSize(int size) const;
+ public:
+
+ ::std::string GetTypeName() const;
+
+ // nested types ----------------------------------------------------
+
+ // accessors -------------------------------------------------------
+
+ // optional int32 instant = 1;
+ inline bool has_instant() const;
+ inline void clear_instant();
+ static const int kInstantFieldNumber = 1;
+ inline ::google::protobuf::int32 instant() const;
+ inline void set_instant(::google::protobuf::int32 value);
+
+ // optional int32 average = 2;
+ inline bool has_average() const;
+ inline void clear_average();
+ static const int kAverageFieldNumber = 2;
+ inline ::google::protobuf::int32 average() const;
+ inline void set_average(::google::protobuf::int32 value);
+
+ // optional int32 maximum = 3;
+ inline bool has_maximum() const;
+ inline void clear_maximum();
+ static const int kMaximumFieldNumber = 3;
+ inline ::google::protobuf::int32 maximum() const;
+ inline void set_maximum(::google::protobuf::int32 value);
+
+ // optional int32 minimum = 4;
+ inline bool has_minimum() const;
+ inline void clear_minimum();
+ static const int kMinimumFieldNumber = 4;
+ inline ::google::protobuf::int32 minimum() const;
+ inline void set_minimum(::google::protobuf::int32 value);
+
+ // @@protoc_insertion_point(class_scope:audio_processing_unittest.Test.Statistic)
+ private:
+ inline void set_has_instant();
+ inline void clear_has_instant();
+ inline void set_has_average();
+ inline void clear_has_average();
+ inline void set_has_maximum();
+ inline void clear_has_maximum();
+ inline void set_has_minimum();
+ inline void clear_has_minimum();
+
+ ::google::protobuf::int32 instant_;
+ ::google::protobuf::int32 average_;
+ ::google::protobuf::int32 maximum_;
+ ::google::protobuf::int32 minimum_;
+
+ mutable int _cached_size_;
+ ::google::protobuf::uint32 _has_bits_[(4 + 31) / 32];
+
+ friend void protobuf_AddDesc_audio_5fprocessing_5funittest_2eproto();
+ friend void protobuf_AssignDesc_audio_5fprocessing_5funittest_2eproto();
+ friend void protobuf_ShutdownFile_audio_5fprocessing_5funittest_2eproto();
+
+ void InitAsDefaultInstance();
+ static Test_Statistic* default_instance_;
+};
+// -------------------------------------------------------------------
+
+class Test_EchoMetrics : public ::google::protobuf::MessageLite {
+ public:
+ Test_EchoMetrics();
+ virtual ~Test_EchoMetrics();
+
+ Test_EchoMetrics(const Test_EchoMetrics& from);
+
+ inline Test_EchoMetrics& operator=(const Test_EchoMetrics& from) {
+ CopyFrom(from);
+ return *this;
+ }
+
+ static const Test_EchoMetrics& default_instance();
+
+ void Swap(Test_EchoMetrics* other);
+
+ // implements Message ----------------------------------------------
+
+ Test_EchoMetrics* New() const;
+ void CheckTypeAndMergeFrom(const ::google::protobuf::MessageLite& from);
+ void CopyFrom(const Test_EchoMetrics& from);
+ void MergeFrom(const Test_EchoMetrics& from);
+ void Clear();
+ bool IsInitialized() const;
+
+ int ByteSize() const;
+ bool MergePartialFromCodedStream(
+ ::google::protobuf::io::CodedInputStream* input);
+ void SerializeWithCachedSizes(
+ ::google::protobuf::io::CodedOutputStream* output) const;
+ int GetCachedSize() const { return _cached_size_; }
+ private:
+ void SharedCtor();
+ void SharedDtor();
+ void SetCachedSize(int size) const;
+ public:
+
+ ::std::string GetTypeName() const;
+
+ // nested types ----------------------------------------------------
+
+ // accessors -------------------------------------------------------
+
+ // optional .audio_processing_unittest.Test.Statistic residualEchoReturnLoss = 1;
+ inline bool has_residualechoreturnloss() const;
+ inline void clear_residualechoreturnloss();
+ static const int kResidualEchoReturnLossFieldNumber = 1;
+ inline const ::audio_processing_unittest::Test_Statistic& residualechoreturnloss() const;
+ inline ::audio_processing_unittest::Test_Statistic* mutable_residualechoreturnloss();
+ inline ::audio_processing_unittest::Test_Statistic* release_residualechoreturnloss();
+
+ // optional .audio_processing_unittest.Test.Statistic echoReturnLoss = 2;
+ inline bool has_echoreturnloss() const;
+ inline void clear_echoreturnloss();
+ static const int kEchoReturnLossFieldNumber = 2;
+ inline const ::audio_processing_unittest::Test_Statistic& echoreturnloss() const;
+ inline ::audio_processing_unittest::Test_Statistic* mutable_echoreturnloss();
+ inline ::audio_processing_unittest::Test_Statistic* release_echoreturnloss();
+
+ // optional .audio_processing_unittest.Test.Statistic echoReturnLossEnhancement = 3;
+ inline bool has_echoreturnlossenhancement() const;
+ inline void clear_echoreturnlossenhancement();
+ static const int kEchoReturnLossEnhancementFieldNumber = 3;
+ inline const ::audio_processing_unittest::Test_Statistic& echoreturnlossenhancement() const;
+ inline ::audio_processing_unittest::Test_Statistic* mutable_echoreturnlossenhancement();
+ inline ::audio_processing_unittest::Test_Statistic* release_echoreturnlossenhancement();
+
+ // optional .audio_processing_unittest.Test.Statistic aNlp = 4;
+ inline bool has_anlp() const;
+ inline void clear_anlp();
+ static const int kANlpFieldNumber = 4;
+ inline const ::audio_processing_unittest::Test_Statistic& anlp() const;
+ inline ::audio_processing_unittest::Test_Statistic* mutable_anlp();
+ inline ::audio_processing_unittest::Test_Statistic* release_anlp();
+
+ // @@protoc_insertion_point(class_scope:audio_processing_unittest.Test.EchoMetrics)
+ private:
+ inline void set_has_residualechoreturnloss();
+ inline void clear_has_residualechoreturnloss();
+ inline void set_has_echoreturnloss();
+ inline void clear_has_echoreturnloss();
+ inline void set_has_echoreturnlossenhancement();
+ inline void clear_has_echoreturnlossenhancement();
+ inline void set_has_anlp();
+ inline void clear_has_anlp();
+
+ ::audio_processing_unittest::Test_Statistic* residualechoreturnloss_;
+ ::audio_processing_unittest::Test_Statistic* echoreturnloss_;
+ ::audio_processing_unittest::Test_Statistic* echoreturnlossenhancement_;
+ ::audio_processing_unittest::Test_Statistic* anlp_;
+
+ mutable int _cached_size_;
+ ::google::protobuf::uint32 _has_bits_[(4 + 31) / 32];
+
+ friend void protobuf_AddDesc_audio_5fprocessing_5funittest_2eproto();
+ friend void protobuf_AssignDesc_audio_5fprocessing_5funittest_2eproto();
+ friend void protobuf_ShutdownFile_audio_5fprocessing_5funittest_2eproto();
+
+ void InitAsDefaultInstance();
+ static Test_EchoMetrics* default_instance_;
+};
+// -------------------------------------------------------------------
+
+class Test : public ::google::protobuf::MessageLite {
+ public:
+ Test();
+ virtual ~Test();
+
+ Test(const Test& from);
+
+ inline Test& operator=(const Test& from) {
+ CopyFrom(from);
+ return *this;
+ }
+
+ static const Test& default_instance();
+
+ void Swap(Test* other);
+
+ // implements Message ----------------------------------------------
+
+ Test* New() const;
+ void CheckTypeAndMergeFrom(const ::google::protobuf::MessageLite& from);
+ void CopyFrom(const Test& from);
+ void MergeFrom(const Test& from);
+ void Clear();
+ bool IsInitialized() const;
+
+ int ByteSize() const;
+ bool MergePartialFromCodedStream(
+ ::google::protobuf::io::CodedInputStream* input);
+ void SerializeWithCachedSizes(
+ ::google::protobuf::io::CodedOutputStream* output) const;
+ int GetCachedSize() const { return _cached_size_; }
+ private:
+ void SharedCtor();
+ void SharedDtor();
+ void SetCachedSize(int size) const;
+ public:
+
+ ::std::string GetTypeName() const;
+
+ // nested types ----------------------------------------------------
+
+ typedef Test_Statistic Statistic;
+ typedef Test_EchoMetrics EchoMetrics;
+
+ // accessors -------------------------------------------------------
+
+ // optional int32 numReverseChannels = 1;
+ inline bool has_numreversechannels() const;
+ inline void clear_numreversechannels();
+ static const int kNumReverseChannelsFieldNumber = 1;
+ inline ::google::protobuf::int32 numreversechannels() const;
+ inline void set_numreversechannels(::google::protobuf::int32 value);
+
+ // optional int32 numChannels = 2;
+ inline bool has_numchannels() const;
+ inline void clear_numchannels();
+ static const int kNumChannelsFieldNumber = 2;
+ inline ::google::protobuf::int32 numchannels() const;
+ inline void set_numchannels(::google::protobuf::int32 value);
+
+ // optional int32 sampleRate = 3;
+ inline bool has_samplerate() const;
+ inline void clear_samplerate();
+ static const int kSampleRateFieldNumber = 3;
+ inline ::google::protobuf::int32 samplerate() const;
+ inline void set_samplerate(::google::protobuf::int32 value);
+
+ // optional int32 hasEchoCount = 4;
+ inline bool has_hasechocount() const;
+ inline void clear_hasechocount();
+ static const int kHasEchoCountFieldNumber = 4;
+ inline ::google::protobuf::int32 hasechocount() const;
+ inline void set_hasechocount(::google::protobuf::int32 value);
+
+ // optional int32 hasVoiceCount = 5;
+ inline bool has_hasvoicecount() const;
+ inline void clear_hasvoicecount();
+ static const int kHasVoiceCountFieldNumber = 5;
+ inline ::google::protobuf::int32 hasvoicecount() const;
+ inline void set_hasvoicecount(::google::protobuf::int32 value);
+
+ // optional int32 isSaturatedCount = 6;
+ inline bool has_issaturatedcount() const;
+ inline void clear_issaturatedcount();
+ static const int kIsSaturatedCountFieldNumber = 6;
+ inline ::google::protobuf::int32 issaturatedcount() const;
+ inline void set_issaturatedcount(::google::protobuf::int32 value);
+
+ // optional .audio_processing_unittest.Test.EchoMetrics echoMetrics = 7;
+ inline bool has_echometrics() const;
+ inline void clear_echometrics();
+ static const int kEchoMetricsFieldNumber = 7;
+ inline const ::audio_processing_unittest::Test_EchoMetrics& echometrics() const;
+ inline ::audio_processing_unittest::Test_EchoMetrics* mutable_echometrics();
+ inline ::audio_processing_unittest::Test_EchoMetrics* release_echometrics();
+
+ // @@protoc_insertion_point(class_scope:audio_processing_unittest.Test)
+ private:
+ inline void set_has_numreversechannels();
+ inline void clear_has_numreversechannels();
+ inline void set_has_numchannels();
+ inline void clear_has_numchannels();
+ inline void set_has_samplerate();
+ inline void clear_has_samplerate();
+ inline void set_has_hasechocount();
+ inline void clear_has_hasechocount();
+ inline void set_has_hasvoicecount();
+ inline void clear_has_hasvoicecount();
+ inline void set_has_issaturatedcount();
+ inline void clear_has_issaturatedcount();
+ inline void set_has_echometrics();
+ inline void clear_has_echometrics();
+
+ ::google::protobuf::int32 numreversechannels_;
+ ::google::protobuf::int32 numchannels_;
+ ::google::protobuf::int32 samplerate_;
+ ::google::protobuf::int32 hasechocount_;
+ ::google::protobuf::int32 hasvoicecount_;
+ ::google::protobuf::int32 issaturatedcount_;
+ ::audio_processing_unittest::Test_EchoMetrics* echometrics_;
+
+ mutable int _cached_size_;
+ ::google::protobuf::uint32 _has_bits_[(7 + 31) / 32];
+
+ friend void protobuf_AddDesc_audio_5fprocessing_5funittest_2eproto();
+ friend void protobuf_AssignDesc_audio_5fprocessing_5funittest_2eproto();
+ friend void protobuf_ShutdownFile_audio_5fprocessing_5funittest_2eproto();
+
+ void InitAsDefaultInstance();
+ static Test* default_instance_;
+};
+// -------------------------------------------------------------------
+
+class OutputData : public ::google::protobuf::MessageLite {
+ public:
+ OutputData();
+ virtual ~OutputData();
+
+ OutputData(const OutputData& from);
+
+ inline OutputData& operator=(const OutputData& from) {
+ CopyFrom(from);
+ return *this;
+ }
+
+ static const OutputData& default_instance();
+
+ void Swap(OutputData* other);
+
+ // implements Message ----------------------------------------------
+
+ OutputData* New() const;
+ void CheckTypeAndMergeFrom(const ::google::protobuf::MessageLite& from);
+ void CopyFrom(const OutputData& from);
+ void MergeFrom(const OutputData& from);
+ void Clear();
+ bool IsInitialized() const;
+
+ int ByteSize() const;
+ bool MergePartialFromCodedStream(
+ ::google::protobuf::io::CodedInputStream* input);
+ void SerializeWithCachedSizes(
+ ::google::protobuf::io::CodedOutputStream* output) const;
+ int GetCachedSize() const { return _cached_size_; }
+ private:
+ void SharedCtor();
+ void SharedDtor();
+ void SetCachedSize(int size) const;
+ public:
+
+ ::std::string GetTypeName() const;
+
+ // nested types ----------------------------------------------------
+
+ // accessors -------------------------------------------------------
+
+ // repeated .audio_processing_unittest.Test test = 1;
+ inline int test_size() const;
+ inline void clear_test();
+ static const int kTestFieldNumber = 1;
+ inline const ::audio_processing_unittest::Test& test(int index) const;
+ inline ::audio_processing_unittest::Test* mutable_test(int index);
+ inline ::audio_processing_unittest::Test* add_test();
+ inline const ::google::protobuf::RepeatedPtrField< ::audio_processing_unittest::Test >&
+ test() const;
+ inline ::google::protobuf::RepeatedPtrField< ::audio_processing_unittest::Test >*
+ mutable_test();
+
+ // @@protoc_insertion_point(class_scope:audio_processing_unittest.OutputData)
+ private:
+
+ ::google::protobuf::RepeatedPtrField< ::audio_processing_unittest::Test > test_;
+
+ mutable int _cached_size_;
+ ::google::protobuf::uint32 _has_bits_[(1 + 31) / 32];
+
+ friend void protobuf_AddDesc_audio_5fprocessing_5funittest_2eproto();
+ friend void protobuf_AssignDesc_audio_5fprocessing_5funittest_2eproto();
+ friend void protobuf_ShutdownFile_audio_5fprocessing_5funittest_2eproto();
+
+ void InitAsDefaultInstance();
+ static OutputData* default_instance_;
+};
+// ===================================================================
+
+
+// ===================================================================
+
+// Test_Statistic
+
+// optional int32 instant = 1;
+inline bool Test_Statistic::has_instant() const {
+ return (_has_bits_[0] & 0x00000001u) != 0;
+}
+inline void Test_Statistic::set_has_instant() {
+ _has_bits_[0] |= 0x00000001u;
+}
+inline void Test_Statistic::clear_has_instant() {
+ _has_bits_[0] &= ~0x00000001u;
+}
+inline void Test_Statistic::clear_instant() {
+ instant_ = 0;
+ clear_has_instant();
+}
+inline ::google::protobuf::int32 Test_Statistic::instant() const {
+ return instant_;
+}
+inline void Test_Statistic::set_instant(::google::protobuf::int32 value) {
+ set_has_instant();
+ instant_ = value;
+}
+
+// optional int32 average = 2;
+inline bool Test_Statistic::has_average() const {
+ return (_has_bits_[0] & 0x00000002u) != 0;
+}
+inline void Test_Statistic::set_has_average() {
+ _has_bits_[0] |= 0x00000002u;
+}
+inline void Test_Statistic::clear_has_average() {
+ _has_bits_[0] &= ~0x00000002u;
+}
+inline void Test_Statistic::clear_average() {
+ average_ = 0;
+ clear_has_average();
+}
+inline ::google::protobuf::int32 Test_Statistic::average() const {
+ return average_;
+}
+inline void Test_Statistic::set_average(::google::protobuf::int32 value) {
+ set_has_average();
+ average_ = value;
+}
+
+// optional int32 maximum = 3;
+inline bool Test_Statistic::has_maximum() const {
+ return (_has_bits_[0] & 0x00000004u) != 0;
+}
+inline void Test_Statistic::set_has_maximum() {
+ _has_bits_[0] |= 0x00000004u;
+}
+inline void Test_Statistic::clear_has_maximum() {
+ _has_bits_[0] &= ~0x00000004u;
+}
+inline void Test_Statistic::clear_maximum() {
+ maximum_ = 0;
+ clear_has_maximum();
+}
+inline ::google::protobuf::int32 Test_Statistic::maximum() const {
+ return maximum_;
+}
+inline void Test_Statistic::set_maximum(::google::protobuf::int32 value) {
+ set_has_maximum();
+ maximum_ = value;
+}
+
+// optional int32 minimum = 4;
+inline bool Test_Statistic::has_minimum() const {
+ return (_has_bits_[0] & 0x00000008u) != 0;
+}
+inline void Test_Statistic::set_has_minimum() {
+ _has_bits_[0] |= 0x00000008u;
+}
+inline void Test_Statistic::clear_has_minimum() {
+ _has_bits_[0] &= ~0x00000008u;
+}
+inline void Test_Statistic::clear_minimum() {
+ minimum_ = 0;
+ clear_has_minimum();
+}
+inline ::google::protobuf::int32 Test_Statistic::minimum() const {
+ return minimum_;
+}
+inline void Test_Statistic::set_minimum(::google::protobuf::int32 value) {
+ set_has_minimum();
+ minimum_ = value;
+}
+
+// -------------------------------------------------------------------
+
+// Test_EchoMetrics
+
+// optional .audio_processing_unittest.Test.Statistic residualEchoReturnLoss = 1;
+inline bool Test_EchoMetrics::has_residualechoreturnloss() const {
+ return (_has_bits_[0] & 0x00000001u) != 0;
+}
+inline void Test_EchoMetrics::set_has_residualechoreturnloss() {
+ _has_bits_[0] |= 0x00000001u;
+}
+inline void Test_EchoMetrics::clear_has_residualechoreturnloss() {
+ _has_bits_[0] &= ~0x00000001u;
+}
+inline void Test_EchoMetrics::clear_residualechoreturnloss() {
+ if (residualechoreturnloss_ != NULL) residualechoreturnloss_->::audio_processing_unittest::Test_Statistic::Clear();
+ clear_has_residualechoreturnloss();
+}
+inline const ::audio_processing_unittest::Test_Statistic& Test_EchoMetrics::residualechoreturnloss() const {
+ return residualechoreturnloss_ != NULL ? *residualechoreturnloss_ : *default_instance_->residualechoreturnloss_;
+}
+inline ::audio_processing_unittest::Test_Statistic* Test_EchoMetrics::mutable_residualechoreturnloss() {
+ set_has_residualechoreturnloss();
+ if (residualechoreturnloss_ == NULL) residualechoreturnloss_ = new ::audio_processing_unittest::Test_Statistic;
+ return residualechoreturnloss_;
+}
+inline ::audio_processing_unittest::Test_Statistic* Test_EchoMetrics::release_residualechoreturnloss() {
+ clear_has_residualechoreturnloss();
+ ::audio_processing_unittest::Test_Statistic* temp = residualechoreturnloss_;
+ residualechoreturnloss_ = NULL;
+ return temp;
+}
+
+// optional .audio_processing_unittest.Test.Statistic echoReturnLoss = 2;
+inline bool Test_EchoMetrics::has_echoreturnloss() const {
+ return (_has_bits_[0] & 0x00000002u) != 0;
+}
+inline void Test_EchoMetrics::set_has_echoreturnloss() {
+ _has_bits_[0] |= 0x00000002u;
+}
+inline void Test_EchoMetrics::clear_has_echoreturnloss() {
+ _has_bits_[0] &= ~0x00000002u;
+}
+inline void Test_EchoMetrics::clear_echoreturnloss() {
+ if (echoreturnloss_ != NULL) echoreturnloss_->::audio_processing_unittest::Test_Statistic::Clear();
+ clear_has_echoreturnloss();
+}
+inline const ::audio_processing_unittest::Test_Statistic& Test_EchoMetrics::echoreturnloss() const {
+ return echoreturnloss_ != NULL ? *echoreturnloss_ : *default_instance_->echoreturnloss_;
+}
+inline ::audio_processing_unittest::Test_Statistic* Test_EchoMetrics::mutable_echoreturnloss() {
+ set_has_echoreturnloss();
+ if (echoreturnloss_ == NULL) echoreturnloss_ = new ::audio_processing_unittest::Test_Statistic;
+ return echoreturnloss_;
+}
+inline ::audio_processing_unittest::Test_Statistic* Test_EchoMetrics::release_echoreturnloss() {
+ clear_has_echoreturnloss();
+ ::audio_processing_unittest::Test_Statistic* temp = echoreturnloss_;
+ echoreturnloss_ = NULL;
+ return temp;
+}
+
+// optional .audio_processing_unittest.Test.Statistic echoReturnLossEnhancement = 3;
+inline bool Test_EchoMetrics::has_echoreturnlossenhancement() const {
+ return (_has_bits_[0] & 0x00000004u) != 0;
+}
+inline void Test_EchoMetrics::set_has_echoreturnlossenhancement() {
+ _has_bits_[0] |= 0x00000004u;
+}
+inline void Test_EchoMetrics::clear_has_echoreturnlossenhancement() {
+ _has_bits_[0] &= ~0x00000004u;
+}
+inline void Test_EchoMetrics::clear_echoreturnlossenhancement() {
+ if (echoreturnlossenhancement_ != NULL) echoreturnlossenhancement_->::audio_processing_unittest::Test_Statistic::Clear();
+ clear_has_echoreturnlossenhancement();
+}
+inline const ::audio_processing_unittest::Test_Statistic& Test_EchoMetrics::echoreturnlossenhancement() const {
+ return echoreturnlossenhancement_ != NULL ? *echoreturnlossenhancement_ : *default_instance_->echoreturnlossenhancement_;
+}
+inline ::audio_processing_unittest::Test_Statistic* Test_EchoMetrics::mutable_echoreturnlossenhancement() {
+ set_has_echoreturnlossenhancement();
+ if (echoreturnlossenhancement_ == NULL) echoreturnlossenhancement_ = new ::audio_processing_unittest::Test_Statistic;
+ return echoreturnlossenhancement_;
+}
+inline ::audio_processing_unittest::Test_Statistic* Test_EchoMetrics::release_echoreturnlossenhancement() {
+ clear_has_echoreturnlossenhancement();
+ ::audio_processing_unittest::Test_Statistic* temp = echoreturnlossenhancement_;
+ echoreturnlossenhancement_ = NULL;
+ return temp;
+}
+
+// optional .audio_processing_unittest.Test.Statistic aNlp = 4;
+inline bool Test_EchoMetrics::has_anlp() const {
+ return (_has_bits_[0] & 0x00000008u) != 0;
+}
+inline void Test_EchoMetrics::set_has_anlp() {
+ _has_bits_[0] |= 0x00000008u;
+}
+inline void Test_EchoMetrics::clear_has_anlp() {
+ _has_bits_[0] &= ~0x00000008u;
+}
+inline void Test_EchoMetrics::clear_anlp() {
+ if (anlp_ != NULL) anlp_->::audio_processing_unittest::Test_Statistic::Clear();
+ clear_has_anlp();
+}
+inline const ::audio_processing_unittest::Test_Statistic& Test_EchoMetrics::anlp() const {
+ return anlp_ != NULL ? *anlp_ : *default_instance_->anlp_;
+}
+inline ::audio_processing_unittest::Test_Statistic* Test_EchoMetrics::mutable_anlp() {
+ set_has_anlp();
+ if (anlp_ == NULL) anlp_ = new ::audio_processing_unittest::Test_Statistic;
+ return anlp_;
+}
+inline ::audio_processing_unittest::Test_Statistic* Test_EchoMetrics::release_anlp() {
+ clear_has_anlp();
+ ::audio_processing_unittest::Test_Statistic* temp = anlp_;
+ anlp_ = NULL;
+ return temp;
+}
+
+// -------------------------------------------------------------------
+
+// Test
+
+// optional int32 numReverseChannels = 1;
+inline bool Test::has_numreversechannels() const {
+ return (_has_bits_[0] & 0x00000001u) != 0;
+}
+inline void Test::set_has_numreversechannels() {
+ _has_bits_[0] |= 0x00000001u;
+}
+inline void Test::clear_has_numreversechannels() {
+ _has_bits_[0] &= ~0x00000001u;
+}
+inline void Test::clear_numreversechannels() {
+ numreversechannels_ = 0;
+ clear_has_numreversechannels();
+}
+inline ::google::protobuf::int32 Test::numreversechannels() const {
+ return numreversechannels_;
+}
+inline void Test::set_numreversechannels(::google::protobuf::int32 value) {
+ set_has_numreversechannels();
+ numreversechannels_ = value;
+}
+
+// optional int32 numChannels = 2;
+inline bool Test::has_numchannels() const {
+ return (_has_bits_[0] & 0x00000002u) != 0;
+}
+inline void Test::set_has_numchannels() {
+ _has_bits_[0] |= 0x00000002u;
+}
+inline void Test::clear_has_numchannels() {
+ _has_bits_[0] &= ~0x00000002u;
+}
+inline void Test::clear_numchannels() {
+ numchannels_ = 0;
+ clear_has_numchannels();
+}
+inline ::google::protobuf::int32 Test::numchannels() const {
+ return numchannels_;
+}
+inline void Test::set_numchannels(::google::protobuf::int32 value) {
+ set_has_numchannels();
+ numchannels_ = value;
+}
+
+// optional int32 sampleRate = 3;
+inline bool Test::has_samplerate() const {
+ return (_has_bits_[0] & 0x00000004u) != 0;
+}
+inline void Test::set_has_samplerate() {
+ _has_bits_[0] |= 0x00000004u;
+}
+inline void Test::clear_has_samplerate() {
+ _has_bits_[0] &= ~0x00000004u;
+}
+inline void Test::clear_samplerate() {
+ samplerate_ = 0;
+ clear_has_samplerate();
+}
+inline ::google::protobuf::int32 Test::samplerate() const {
+ return samplerate_;
+}
+inline void Test::set_samplerate(::google::protobuf::int32 value) {
+ set_has_samplerate();
+ samplerate_ = value;
+}
+
+// optional int32 hasEchoCount = 4;
+inline bool Test::has_hasechocount() const {
+ return (_has_bits_[0] & 0x00000008u) != 0;
+}
+inline void Test::set_has_hasechocount() {
+ _has_bits_[0] |= 0x00000008u;
+}
+inline void Test::clear_has_hasechocount() {
+ _has_bits_[0] &= ~0x00000008u;
+}
+inline void Test::clear_hasechocount() {
+ hasechocount_ = 0;
+ clear_has_hasechocount();
+}
+inline ::google::protobuf::int32 Test::hasechocount() const {
+ return hasechocount_;
+}
+inline void Test::set_hasechocount(::google::protobuf::int32 value) {
+ set_has_hasechocount();
+ hasechocount_ = value;
+}
+
+// optional int32 hasVoiceCount = 5;
+inline bool Test::has_hasvoicecount() const {
+ return (_has_bits_[0] & 0x00000010u) != 0;
+}
+inline void Test::set_has_hasvoicecount() {
+ _has_bits_[0] |= 0x00000010u;
+}
+inline void Test::clear_has_hasvoicecount() {
+ _has_bits_[0] &= ~0x00000010u;
+}
+inline void Test::clear_hasvoicecount() {
+ hasvoicecount_ = 0;
+ clear_has_hasvoicecount();
+}
+inline ::google::protobuf::int32 Test::hasvoicecount() const {
+ return hasvoicecount_;
+}
+inline void Test::set_hasvoicecount(::google::protobuf::int32 value) {
+ set_has_hasvoicecount();
+ hasvoicecount_ = value;
+}
+
+// optional int32 isSaturatedCount = 6;
+inline bool Test::has_issaturatedcount() const {
+ return (_has_bits_[0] & 0x00000020u) != 0;
+}
+inline void Test::set_has_issaturatedcount() {
+ _has_bits_[0] |= 0x00000020u;
+}
+inline void Test::clear_has_issaturatedcount() {
+ _has_bits_[0] &= ~0x00000020u;
+}
+inline void Test::clear_issaturatedcount() {
+ issaturatedcount_ = 0;
+ clear_has_issaturatedcount();
+}
+inline ::google::protobuf::int32 Test::issaturatedcount() const {
+ return issaturatedcount_;
+}
+inline void Test::set_issaturatedcount(::google::protobuf::int32 value) {
+ set_has_issaturatedcount();
+ issaturatedcount_ = value;
+}
+
+// optional .audio_processing_unittest.Test.EchoMetrics echoMetrics = 7;
+inline bool Test::has_echometrics() const {
+ return (_has_bits_[0] & 0x00000040u) != 0;
+}
+inline void Test::set_has_echometrics() {
+ _has_bits_[0] |= 0x00000040u;
+}
+inline void Test::clear_has_echometrics() {
+ _has_bits_[0] &= ~0x00000040u;
+}
+inline void Test::clear_echometrics() {
+ if (echometrics_ != NULL) echometrics_->::audio_processing_unittest::Test_EchoMetrics::Clear();
+ clear_has_echometrics();
+}
+inline const ::audio_processing_unittest::Test_EchoMetrics& Test::echometrics() const {
+ return echometrics_ != NULL ? *echometrics_ : *default_instance_->echometrics_;
+}
+inline ::audio_processing_unittest::Test_EchoMetrics* Test::mutable_echometrics() {
+ set_has_echometrics();
+ if (echometrics_ == NULL) echometrics_ = new ::audio_processing_unittest::Test_EchoMetrics;
+ return echometrics_;
+}
+inline ::audio_processing_unittest::Test_EchoMetrics* Test::release_echometrics() {
+ clear_has_echometrics();
+ ::audio_processing_unittest::Test_EchoMetrics* temp = echometrics_;
+ echometrics_ = NULL;
+ return temp;
+}
+
+// -------------------------------------------------------------------
+
+// OutputData
+
+// repeated .audio_processing_unittest.Test test = 1;
+inline int OutputData::test_size() const {
+ return test_.size();
+}
+inline void OutputData::clear_test() {
+ test_.Clear();
+}
+inline const ::audio_processing_unittest::Test& OutputData::test(int index) const {
+ return test_.Get(index);
+}
+inline ::audio_processing_unittest::Test* OutputData::mutable_test(int index) {
+ return test_.Mutable(index);
+}
+inline ::audio_processing_unittest::Test* OutputData::add_test() {
+ return test_.Add();
+}
+inline const ::google::protobuf::RepeatedPtrField< ::audio_processing_unittest::Test >&
+OutputData::test() const {
+ return test_;
+}
+inline ::google::protobuf::RepeatedPtrField< ::audio_processing_unittest::Test >*
+OutputData::mutable_test() {
+ return &test_;
+}
+
+
+// @@protoc_insertion_point(namespace_scope)
+
+} // namespace audio_processing_unittest
+
+// @@protoc_insertion_point(global_scope)
+
+#endif // PROTOBUF_audio_5fprocessing_5funittest_2eproto__INCLUDED
diff --git a/src/modules/audio_processing/main/test/unit_test/audio_processing_unittest.proto b/src/modules/audio_processing/main/test/unit_test/audio_processing_unittest.proto
new file mode 100644
index 0000000000..8520e64f22
--- /dev/null
+++ b/src/modules/audio_processing/main/test/unit_test/audio_processing_unittest.proto
@@ -0,0 +1,33 @@
+package audio_processing_unittest;
+option optimize_for = LITE_RUNTIME;
+
+message Test {
+ optional int32 numReverseChannels = 1;
+ optional int32 numChannels = 2;
+ optional int32 sampleRate = 3;
+
+ optional int32 hasEchoCount = 4;
+ optional int32 hasVoiceCount = 5;
+ optional int32 isSaturatedCount = 6;
+
+ message Statistic {
+ optional int32 instant = 1;
+ optional int32 average = 2;
+ optional int32 maximum = 3;
+ optional int32 minimum = 4;
+ }
+
+ message EchoMetrics {
+ optional Statistic residualEchoReturnLoss = 1;
+ optional Statistic echoReturnLoss = 2;
+ optional Statistic echoReturnLossEnhancement = 3;
+ optional Statistic aNlp = 4;
+ }
+
+ optional EchoMetrics echoMetrics = 7;
+}
+
+message OutputData {
+ repeated Test test = 1;
+}
+
diff --git a/src/modules/audio_processing/main/test/unit_test/unit_test.cc b/src/modules/audio_processing/main/test/unit_test/unit_test.cc
new file mode 100644
index 0000000000..3a6fce5a3f
--- /dev/null
+++ b/src/modules/audio_processing/main/test/unit_test/unit_test.cc
@@ -0,0 +1,881 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <cstdio>
+
+#include <gtest/gtest.h>
+
+#include "audio_processing.h"
+#include "audio_processing_unittest.pb.h"
+#include "event_wrapper.h"
+#include "module_common_types.h"
+#include "thread_wrapper.h"
+#include "trace.h"
+#include "signal_processing_library.h"
+
+using webrtc::AudioProcessing;
+using webrtc::AudioFrame;
+using webrtc::GainControl;
+using webrtc::NoiseSuppression;
+using webrtc::EchoCancellation;
+using webrtc::EventWrapper;
+using webrtc::Trace;
+using webrtc::LevelEstimator;
+using webrtc::EchoCancellation;
+using webrtc::EchoControlMobile;
+using webrtc::VoiceDetection;
+
+namespace {
+// When true, this will compare the output data with the results stored to
+// file. This is the typical case. When the file should be updated, it can
+// be set to false with the command-line switch --write_output_data.
+bool global_read_output_data = true;
+
+class ApmEnvironment : public ::testing::Environment {
+ public:
+ virtual void SetUp() {
+ Trace::CreateTrace();
+ ASSERT_EQ(0, Trace::SetTraceFile("apm_trace.txt"));
+ }
+
+ virtual void TearDown() {
+ Trace::ReturnTrace();
+ }
+};
+
+class ApmTest : public ::testing::Test {
+ protected:
+ ApmTest();
+ virtual void SetUp();
+ virtual void TearDown();
+
+ webrtc::AudioProcessing* apm_;
+ webrtc::AudioFrame* frame_;
+ webrtc::AudioFrame* revframe_;
+ FILE* far_file_;
+ FILE* near_file_;
+ bool update_output_data_;
+};
+
+ApmTest::ApmTest()
+ : apm_(NULL),
+ far_file_(NULL),
+ near_file_(NULL),
+ frame_(NULL),
+ revframe_(NULL) {}
+
+void ApmTest::SetUp() {
+ apm_ = AudioProcessing::Create(0);
+ ASSERT_TRUE(apm_ != NULL);
+
+ frame_ = new AudioFrame();
+ revframe_ = new AudioFrame();
+
+ ASSERT_EQ(apm_->kNoError, apm_->set_sample_rate_hz(32000));
+ ASSERT_EQ(apm_->kNoError, apm_->set_num_channels(2, 2));
+ ASSERT_EQ(apm_->kNoError, apm_->set_num_reverse_channels(2));
+
+ frame_->_payloadDataLengthInSamples = 320;
+ frame_->_audioChannel = 2;
+ frame_->_frequencyInHz = 32000;
+ revframe_->_payloadDataLengthInSamples = 320;
+ revframe_->_audioChannel = 2;
+ revframe_->_frequencyInHz = 32000;
+
+ far_file_ = fopen("aec_far.pcm", "rb");
+ ASSERT_TRUE(far_file_ != NULL) << "Could not open input file aec_far.pcm\n";
+ near_file_ = fopen("aec_near.pcm", "rb");
+ ASSERT_TRUE(near_file_ != NULL) << "Could not open input file aec_near.pcm\n";
+}
+
+void ApmTest::TearDown() {
+ if (frame_) {
+ delete frame_;
+ }
+ frame_ = NULL;
+
+ if (revframe_) {
+ delete revframe_;
+ }
+ revframe_ = NULL;
+
+ if (far_file_) {
+ ASSERT_EQ(0, fclose(far_file_));
+ }
+ far_file_ = NULL;
+
+ if (near_file_) {
+ ASSERT_EQ(0, fclose(near_file_));
+ }
+ near_file_ = NULL;
+
+ if (apm_ != NULL) {
+ AudioProcessing::Destroy(apm_);
+ }
+ apm_ = NULL;
+}
+
+void MixStereoToMono(WebRtc_Word16* stereo,
+ WebRtc_Word16* mono,
+ int numSamples) {
+ for (int i = 0; i < numSamples; i++) {
+ int int32 = (static_cast<int>(stereo[i * 2]) +
+ static_cast<int>(stereo[i * 2 + 1])) >> 1;
+ mono[i] = static_cast<WebRtc_Word16>(int32);
+ }
+}
+
+void WriteMessageLiteToFile(const char* filename,
+ const ::google::protobuf::MessageLite& message) {
+ assert(filename != NULL);
+
+ FILE* file = fopen(filename, "wb");
+ ASSERT_TRUE(file != NULL) << "Could not open " << filename;
+ int size = message.ByteSize();
+ ASSERT_GT(size, 0);
+ unsigned char* array = new unsigned char[size];
+ ASSERT_TRUE(message.SerializeToArray(array, size));
+
+ ASSERT_EQ(1, fwrite(&size, sizeof(int), 1, file));
+ ASSERT_EQ(size, fwrite(array, sizeof(unsigned char), size, file));
+
+ delete [] array;
+ fclose(file);
+}
+
+void ReadMessageLiteFromFile(const char* filename,
+ ::google::protobuf::MessageLite* message) {
+ assert(filename != NULL);
+ assert(message != NULL);
+
+ FILE* file = fopen(filename, "rb");
+ ASSERT_TRUE(file != NULL) << "Could not open " << filename;
+ int size = 0;
+ ASSERT_EQ(1, fread(&size, sizeof(int), 1, file));
+ ASSERT_GT(size, 0);
+ unsigned char* array = new unsigned char[size];
+ ASSERT_EQ(size, fread(array, sizeof(unsigned char), size, file));
+
+ ASSERT_TRUE(message->ParseFromArray(array, size));
+
+ delete [] array;
+ fclose(file);
+}
+
+struct ThreadData {
+ ThreadData(int thread_num_, AudioProcessing* ap_)
+ : thread_num(thread_num_),
+ error(false),
+ ap(ap_) {}
+ int thread_num;
+ bool error;
+ AudioProcessing* ap;
+};
+
+// Don't use GTest here; non-thread-safe on Windows (as of 1.5.0).
+bool DeadlockProc(void* thread_object) {
+ ThreadData* thread_data = static_cast<ThreadData*>(thread_object);
+ AudioProcessing* ap = thread_data->ap;
+ int err = ap->kNoError;
+
+ AudioFrame primary_frame;
+ AudioFrame reverse_frame;
+ primary_frame._payloadDataLengthInSamples = 320;
+ primary_frame._audioChannel = 2;
+ primary_frame._frequencyInHz = 32000;
+ reverse_frame._payloadDataLengthInSamples = 320;
+ reverse_frame._audioChannel = 2;
+ reverse_frame._frequencyInHz = 32000;
+
+ ap->echo_cancellation()->Enable(true);
+ ap->gain_control()->Enable(true);
+ ap->high_pass_filter()->Enable(true);
+ ap->level_estimator()->Enable(true);
+ ap->noise_suppression()->Enable(true);
+ ap->voice_detection()->Enable(true);
+
+ if (thread_data->thread_num % 2 == 0) {
+ err = ap->AnalyzeReverseStream(&reverse_frame);
+ if (err != ap->kNoError) {
+ printf("Error in AnalyzeReverseStream(): %d\n", err);
+ thread_data->error = true;
+ return false;
+ }
+ }
+
+ if (thread_data->thread_num % 2 == 1) {
+ ap->set_stream_delay_ms(0);
+ ap->echo_cancellation()->set_stream_drift_samples(0);
+ ap->gain_control()->set_stream_analog_level(0);
+ err = ap->ProcessStream(&primary_frame);
+ if (err == ap->kStreamParameterNotSetError) {
+ printf("Expected kStreamParameterNotSetError in ProcessStream(): %d\n",
+ err);
+ } else if (err != ap->kNoError) {
+ printf("Error in ProcessStream(): %d\n", err);
+ thread_data->error = true;
+ return false;
+ }
+ ap->gain_control()->stream_analog_level();
+ }
+
+ EventWrapper* event = EventWrapper::Create();
+ event->Wait(1);
+ delete event;
+ event = NULL;
+
+ return true;
+}
+
+/*TEST_F(ApmTest, Deadlock) {
+ const int num_threads = 16;
+ std::vector<ThreadWrapper*> threads(num_threads);
+ std::vector<ThreadData*> thread_data(num_threads);
+
+ ASSERT_EQ(apm_->kNoError, apm_->set_sample_rate_hz(32000));
+ ASSERT_EQ(apm_->kNoError, apm_->set_num_channels(2, 2));
+ ASSERT_EQ(apm_->kNoError, apm_->set_num_reverse_channels(2));
+
+ for (int i = 0; i < num_threads; i++) {
+ thread_data[i] = new ThreadData(i, apm_);
+ threads[i] = ThreadWrapper::CreateThread(DeadlockProc,
+ thread_data[i],
+ kNormalPriority,
+ 0);
+ ASSERT_TRUE(threads[i] != NULL);
+ unsigned int thread_id = 0;
+ threads[i]->Start(thread_id);
+ }
+
+ EventWrapper* event = EventWrapper::Create();
+ ASSERT_EQ(kEventTimeout, event->Wait(5000));
+ delete event;
+ event = NULL;
+
+ for (int i = 0; i < num_threads; i++) {
+ // This will return false if the thread has deadlocked.
+ ASSERT_TRUE(threads[i]->Stop());
+ ASSERT_FALSE(thread_data[i]->error);
+ delete threads[i];
+ threads[i] = NULL;
+ delete thread_data[i];
+ thread_data[i] = NULL;
+ }
+}*/
+
+TEST_F(ApmTest, StreamParameters) {
+ // No errors when the components are disabled.
+ EXPECT_EQ(apm_->kNoError,
+ apm_->ProcessStream(frame_));
+
+ // Missing agc level
+ EXPECT_EQ(apm_->kNoError, apm_->Initialize());
+ EXPECT_EQ(apm_->kNoError, apm_->gain_control()->Enable(true));
+ EXPECT_EQ(apm_->kStreamParameterNotSetError,
+ apm_->ProcessStream(frame_));
+ EXPECT_EQ(apm_->kNoError, apm_->set_stream_delay_ms(100));
+ EXPECT_EQ(apm_->kNoError,
+ apm_->echo_cancellation()->set_stream_drift_samples(0));
+ EXPECT_EQ(apm_->kStreamParameterNotSetError,
+ apm_->ProcessStream(frame_));
+ EXPECT_EQ(apm_->kNoError, apm_->gain_control()->Enable(false));
+
+ // Missing delay
+ EXPECT_EQ(apm_->kNoError, apm_->Initialize());
+ EXPECT_EQ(apm_->kNoError, apm_->echo_cancellation()->Enable(true));
+ EXPECT_EQ(apm_->kStreamParameterNotSetError,
+ apm_->ProcessStream(frame_));
+ EXPECT_EQ(apm_->kNoError, apm_->gain_control()->Enable(true));
+ EXPECT_EQ(apm_->kNoError,
+ apm_->echo_cancellation()->set_stream_drift_samples(0));
+ EXPECT_EQ(apm_->kNoError,
+ apm_->gain_control()->set_stream_analog_level(127));
+ EXPECT_EQ(apm_->kStreamParameterNotSetError,
+ apm_->ProcessStream(frame_));
+ EXPECT_EQ(apm_->kNoError, apm_->gain_control()->Enable(false));
+
+ // Missing drift
+ EXPECT_EQ(apm_->kNoError, apm_->Initialize());
+ EXPECT_EQ(apm_->kNoError,
+ apm_->echo_cancellation()->enable_drift_compensation(true));
+ EXPECT_EQ(apm_->kStreamParameterNotSetError,
+ apm_->ProcessStream(frame_));
+ EXPECT_EQ(apm_->kNoError, apm_->gain_control()->Enable(true));
+ EXPECT_EQ(apm_->kNoError, apm_->set_stream_delay_ms(100));
+ EXPECT_EQ(apm_->kNoError,
+ apm_->gain_control()->set_stream_analog_level(127));
+ EXPECT_EQ(apm_->kStreamParameterNotSetError,
+ apm_->ProcessStream(frame_));
+
+ // No stream parameters
+ EXPECT_EQ(apm_->kNoError, apm_->Initialize());
+ EXPECT_EQ(apm_->kNoError,
+ apm_->AnalyzeReverseStream(revframe_));
+ EXPECT_EQ(apm_->kStreamParameterNotSetError,
+ apm_->ProcessStream(frame_));
+
+ // All there
+ EXPECT_EQ(apm_->kNoError, apm_->gain_control()->Enable(true));
+ EXPECT_EQ(apm_->kNoError, apm_->Initialize());
+ EXPECT_EQ(apm_->kNoError, apm_->set_stream_delay_ms(100));
+ EXPECT_EQ(apm_->kNoError,
+ apm_->echo_cancellation()->set_stream_drift_samples(0));
+ EXPECT_EQ(apm_->kNoError,
+ apm_->gain_control()->set_stream_analog_level(127));
+ EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame_));
+}
+
+TEST_F(ApmTest, Channels) {
+ // Testing number of invalid channels
+ EXPECT_EQ(apm_->kBadParameterError, apm_->set_num_channels(0, 1));
+ EXPECT_EQ(apm_->kBadParameterError, apm_->set_num_channels(1, 0));
+ EXPECT_EQ(apm_->kBadParameterError, apm_->set_num_channels(3, 1));
+ EXPECT_EQ(apm_->kBadParameterError, apm_->set_num_channels(1, 3));
+ EXPECT_EQ(apm_->kBadParameterError, apm_->set_num_reverse_channels(0));
+ EXPECT_EQ(apm_->kBadParameterError, apm_->set_num_reverse_channels(3));
+ // Testing number of valid channels
+ for (int i = 1; i < 3; i++) {
+ for (int j = 1; j < 3; j++) {
+ if (j > i) {
+ EXPECT_EQ(apm_->kBadParameterError, apm_->set_num_channels(i, j));
+ } else {
+ EXPECT_EQ(apm_->kNoError, apm_->set_num_channels(i, j));
+ EXPECT_EQ(j, apm_->num_output_channels());
+ }
+ }
+ EXPECT_EQ(i, apm_->num_input_channels());
+ EXPECT_EQ(apm_->kNoError, apm_->set_num_reverse_channels(i));
+ EXPECT_EQ(i, apm_->num_reverse_channels());
+ }
+}
+
+TEST_F(ApmTest, SampleRates) {
+ // Testing invalid sample rates
+ EXPECT_EQ(apm_->kBadParameterError, apm_->set_sample_rate_hz(10000));
+ // Testing valid sample rates
+ int fs[] = {8000, 16000, 32000};
+ for (size_t i = 0; i < sizeof(fs) / sizeof(*fs); i++) {
+ EXPECT_EQ(apm_->kNoError, apm_->set_sample_rate_hz(fs[i]));
+ EXPECT_EQ(fs[i], apm_->sample_rate_hz());
+ }
+}
+
+TEST_F(ApmTest, Process) {
+ GOOGLE_PROTOBUF_VERIFY_VERSION;
+ audio_processing_unittest::OutputData output_data;
+
+ if (global_read_output_data) {
+ ReadMessageLiteFromFile("output_data.pb", &output_data);
+
+ } else {
+ // We don't have a file; add the required tests to the protobuf.
+ int rev_ch[] = {1, 2};
+ int ch[] = {1, 2};
+ int fs[] = {8000, 16000, 32000};
+ for (size_t i = 0; i < sizeof(rev_ch) / sizeof(*rev_ch); i++) {
+ for (size_t j = 0; j < sizeof(ch) / sizeof(*ch); j++) {
+ for (size_t k = 0; k < sizeof(fs) / sizeof(*fs); k++) {
+ audio_processing_unittest::Test* test = output_data.add_test();
+ test->set_numreversechannels(rev_ch[i]);
+ test->set_numchannels(ch[j]);
+ test->set_samplerate(fs[k]);
+ }
+ }
+ }
+ }
+
+ EXPECT_EQ(apm_->kNoError,
+ apm_->echo_cancellation()->enable_drift_compensation(true));
+ EXPECT_EQ(apm_->kNoError,
+ apm_->echo_cancellation()->enable_metrics(true));
+ EXPECT_EQ(apm_->kNoError, apm_->echo_cancellation()->Enable(true));
+
+ EXPECT_EQ(apm_->kNoError,
+ apm_->gain_control()->set_mode(GainControl::kAdaptiveAnalog));
+ EXPECT_EQ(apm_->kNoError,
+ apm_->gain_control()->set_analog_level_limits(0, 255));
+ EXPECT_EQ(apm_->kNoError, apm_->gain_control()->Enable(true));
+
+ EXPECT_EQ(apm_->kNoError,
+ apm_->high_pass_filter()->Enable(true));
+
+ //EXPECT_EQ(apm_->kNoError,
+ // apm_->level_estimator()->Enable(true));
+
+ EXPECT_EQ(apm_->kNoError,
+ apm_->noise_suppression()->Enable(true));
+
+ EXPECT_EQ(apm_->kNoError,
+ apm_->voice_detection()->Enable(true));
+
+ for (int i = 0; i < output_data.test_size(); i++) {
+ printf("Running test %d of %d...\n", i + 1, output_data.test_size());
+
+ audio_processing_unittest::Test* test = output_data.mutable_test(i);
+ const int num_samples = test->samplerate() / 100;
+ revframe_->_payloadDataLengthInSamples = num_samples;
+ revframe_->_audioChannel = test->numreversechannels();
+ revframe_->_frequencyInHz = test->samplerate();
+ frame_->_payloadDataLengthInSamples = num_samples;
+ frame_->_audioChannel = test->numchannels();
+ frame_->_frequencyInHz = test->samplerate();
+
+ EXPECT_EQ(apm_->kNoError, apm_->Initialize());
+ ASSERT_EQ(apm_->kNoError, apm_->set_sample_rate_hz(test->samplerate()));
+ ASSERT_EQ(apm_->kNoError, apm_->set_num_channels(frame_->_audioChannel,
+ frame_->_audioChannel));
+ ASSERT_EQ(apm_->kNoError,
+ apm_->set_num_reverse_channels(revframe_->_audioChannel));
+
+
+ int has_echo_count = 0;
+ int has_voice_count = 0;
+ int is_saturated_count = 0;
+
+ while (1) {
+ WebRtc_Word16 temp_data[640];
+ int analog_level = 127;
+
+ // Read far-end frame
+ size_t read_count = fread(temp_data,
+ sizeof(WebRtc_Word16),
+ num_samples * 2,
+ far_file_);
+ if (read_count != static_cast<size_t>(num_samples * 2)) {
+ // Check that the file really ended.
+ ASSERT_NE(0, feof(far_file_));
+ break; // This is expected.
+ }
+
+ if (revframe_->_audioChannel == 1) {
+ MixStereoToMono(temp_data, revframe_->_payloadData,
+ revframe_->_payloadDataLengthInSamples);
+ } else {
+ memcpy(revframe_->_payloadData,
+ &temp_data[0],
+ sizeof(WebRtc_Word16) * read_count);
+ }
+
+ EXPECT_EQ(apm_->kNoError,
+ apm_->AnalyzeReverseStream(revframe_));
+
+ EXPECT_EQ(apm_->kNoError, apm_->set_stream_delay_ms(0));
+ EXPECT_EQ(apm_->kNoError,
+ apm_->echo_cancellation()->set_stream_drift_samples(0));
+ EXPECT_EQ(apm_->kNoError,
+ apm_->gain_control()->set_stream_analog_level(analog_level));
+
+ // Read near-end frame
+ read_count = fread(temp_data,
+ sizeof(WebRtc_Word16),
+ num_samples * 2,
+ near_file_);
+ if (read_count != static_cast<size_t>(num_samples * 2)) {
+ // Check that the file really ended.
+ ASSERT_NE(0, feof(near_file_));
+ break; // This is expected.
+ }
+
+ if (frame_->_audioChannel == 1) {
+ MixStereoToMono(temp_data, frame_->_payloadData, num_samples);
+ } else {
+ memcpy(frame_->_payloadData,
+ &temp_data[0],
+ sizeof(WebRtc_Word16) * read_count);
+ }
+
+ EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame_));
+
+ if (apm_->echo_cancellation()->stream_has_echo()) {
+ has_echo_count++;
+ }
+
+ analog_level = apm_->gain_control()->stream_analog_level();
+ if (apm_->gain_control()->stream_is_saturated()) {
+ is_saturated_count++;
+ }
+ if (apm_->voice_detection()->stream_has_voice()) {
+ has_voice_count++;
+ }
+ }
+
+ //<-- Statistics -->
+ //LevelEstimator::Metrics far_metrics;
+ //LevelEstimator::Metrics near_metrics;
+ //EchoCancellation::Metrics echo_metrics;
+ //LevelEstimator::Metrics far_metrics_ref_;
+ //LevelEstimator::Metrics near_metrics_ref_;
+ //EchoCancellation::Metrics echo_metrics_ref_;
+ //EXPECT_EQ(apm_->kNoError,
+ // apm_->echo_cancellation()->GetMetrics(&echo_metrics));
+ //EXPECT_EQ(apm_->kNoError,
+ // apm_->level_estimator()->GetMetrics(&near_metrics,
+
+ // TODO(ajm): check echo metrics and output audio.
+ if (global_read_output_data) {
+ EXPECT_EQ(has_echo_count,
+ test->hasechocount());
+ EXPECT_EQ(has_voice_count,
+ test->hasvoicecount());
+ EXPECT_EQ(is_saturated_count,
+ test->issaturatedcount());
+ } else {
+ test->set_hasechocount(has_echo_count);
+ test->set_hasvoicecount(has_voice_count);
+ test->set_issaturatedcount(is_saturated_count);
+ }
+
+ rewind(far_file_);
+ rewind(near_file_);
+ }
+
+ if (!global_read_output_data) {
+ WriteMessageLiteToFile("output_data.pb", output_data);
+ }
+
+ google::protobuf::ShutdownProtobufLibrary();
+}
+
+TEST_F(ApmTest, EchoCancellation) {
+ EXPECT_EQ(apm_->kNoError,
+ apm_->echo_cancellation()->enable_drift_compensation(true));
+ EXPECT_TRUE(apm_->echo_cancellation()->is_drift_compensation_enabled());
+ EXPECT_EQ(apm_->kNoError,
+ apm_->echo_cancellation()->enable_drift_compensation(false));
+ EXPECT_FALSE(apm_->echo_cancellation()->is_drift_compensation_enabled());
+
+ EXPECT_EQ(apm_->kBadParameterError,
+ apm_->echo_cancellation()->set_device_sample_rate_hz(4000));
+ EXPECT_EQ(apm_->kBadParameterError,
+ apm_->echo_cancellation()->set_device_sample_rate_hz(100000));
+
+ int rate[] = {16000, 44100, 48000};
+ for (size_t i = 0; i < sizeof(rate)/sizeof(*rate); i++) {
+ EXPECT_EQ(apm_->kNoError,
+ apm_->echo_cancellation()->set_device_sample_rate_hz(rate[i]));
+ EXPECT_EQ(rate[i],
+ apm_->echo_cancellation()->device_sample_rate_hz());
+ }
+
+ EXPECT_EQ(apm_->kBadParameterError,
+ apm_->echo_cancellation()->set_suppression_level(
+ static_cast<EchoCancellation::SuppressionLevel>(-1)));
+
+ EXPECT_EQ(apm_->kBadParameterError,
+ apm_->echo_cancellation()->set_suppression_level(
+ static_cast<EchoCancellation::SuppressionLevel>(4)));
+
+ EchoCancellation::SuppressionLevel level[] = {
+ EchoCancellation::kLowSuppression,
+ EchoCancellation::kModerateSuppression,
+ EchoCancellation::kHighSuppression,
+ };
+ for (size_t i = 0; i < sizeof(level)/sizeof(*level); i++) {
+ EXPECT_EQ(apm_->kNoError,
+ apm_->echo_cancellation()->set_suppression_level(level[i]));
+ EXPECT_EQ(level[i],
+ apm_->echo_cancellation()->suppression_level());
+ }
+
+ EchoCancellation::Metrics metrics;
+ EXPECT_EQ(apm_->kNotEnabledError,
+ apm_->echo_cancellation()->GetMetrics(&metrics));
+
+ EXPECT_EQ(apm_->kNoError,
+ apm_->echo_cancellation()->enable_metrics(true));
+ EXPECT_TRUE(apm_->echo_cancellation()->are_metrics_enabled());
+ EXPECT_EQ(apm_->kNoError,
+ apm_->echo_cancellation()->enable_metrics(false));
+ EXPECT_FALSE(apm_->echo_cancellation()->are_metrics_enabled());
+
+ EXPECT_EQ(apm_->kNoError, apm_->echo_cancellation()->Enable(true));
+ EXPECT_TRUE(apm_->echo_cancellation()->is_enabled());
+ EXPECT_EQ(apm_->kNoError, apm_->echo_cancellation()->Enable(false));
+ EXPECT_FALSE(apm_->echo_cancellation()->is_enabled());
+}
+
+TEST_F(ApmTest, EchoControlMobile) {
+ // AECM won't use super-wideband.
+ EXPECT_EQ(apm_->kNoError, apm_->set_sample_rate_hz(32000));
+ EXPECT_EQ(apm_->kBadSampleRateError, apm_->echo_control_mobile()->Enable(true));
+ EXPECT_EQ(apm_->kNoError, apm_->set_sample_rate_hz(16000));
+ // Turn AECM on (and AEC off)
+ EXPECT_EQ(apm_->kNoError, apm_->echo_control_mobile()->Enable(true));
+ EXPECT_TRUE(apm_->echo_control_mobile()->is_enabled());
+
+ EXPECT_EQ(apm_->kBadParameterError,
+ apm_->echo_control_mobile()->set_routing_mode(
+ static_cast<EchoControlMobile::RoutingMode>(-1)));
+ EXPECT_EQ(apm_->kBadParameterError,
+ apm_->echo_control_mobile()->set_routing_mode(
+ static_cast<EchoControlMobile::RoutingMode>(5)));
+
+ // Toggle routing modes
+ EchoControlMobile::RoutingMode mode[] = {
+ EchoControlMobile::kQuietEarpieceOrHeadset,
+ EchoControlMobile::kEarpiece,
+ EchoControlMobile::kLoudEarpiece,
+ EchoControlMobile::kSpeakerphone,
+ EchoControlMobile::kLoudSpeakerphone,
+ };
+ for (size_t i = 0; i < sizeof(mode)/sizeof(*mode); i++) {
+ EXPECT_EQ(apm_->kNoError,
+ apm_->echo_control_mobile()->set_routing_mode(mode[i]));
+ EXPECT_EQ(mode[i],
+ apm_->echo_control_mobile()->routing_mode());
+ }
+ // Turn comfort noise off/on
+ EXPECT_EQ(apm_->kNoError,
+ apm_->echo_control_mobile()->enable_comfort_noise(false));
+ EXPECT_FALSE(apm_->echo_control_mobile()->is_comfort_noise_enabled());
+ EXPECT_EQ(apm_->kNoError,
+ apm_->echo_control_mobile()->enable_comfort_noise(true));
+ EXPECT_TRUE(apm_->echo_control_mobile()->is_comfort_noise_enabled());
+ // Turn AECM off
+ EXPECT_EQ(apm_->kNoError, apm_->echo_control_mobile()->Enable(false));
+ EXPECT_FALSE(apm_->echo_control_mobile()->is_enabled());
+}
+
+TEST_F(ApmTest, GainControl) {
+ // Testing gain modes
+ EXPECT_EQ(apm_->kBadParameterError,
+ apm_->gain_control()->set_mode(static_cast<GainControl::Mode>(-1)));
+
+ EXPECT_EQ(apm_->kBadParameterError,
+ apm_->gain_control()->set_mode(static_cast<GainControl::Mode>(3)));
+
+ EXPECT_EQ(apm_->kNoError,
+ apm_->gain_control()->set_mode(
+ apm_->gain_control()->mode()));
+
+ GainControl::Mode mode[] = {
+ GainControl::kAdaptiveAnalog,
+ GainControl::kAdaptiveDigital,
+ GainControl::kFixedDigital
+ };
+ for (size_t i = 0; i < sizeof(mode)/sizeof(*mode); i++) {
+ EXPECT_EQ(apm_->kNoError,
+ apm_->gain_control()->set_mode(mode[i]));
+ EXPECT_EQ(mode[i], apm_->gain_control()->mode());
+ }
+ // Testing invalid target levels
+ EXPECT_EQ(apm_->kBadParameterError,
+ apm_->gain_control()->set_target_level_dbfs(-3));
+ EXPECT_EQ(apm_->kBadParameterError,
+ apm_->gain_control()->set_target_level_dbfs(-40));
+ // Testing valid target levels
+ EXPECT_EQ(apm_->kNoError,
+ apm_->gain_control()->set_target_level_dbfs(
+ apm_->gain_control()->target_level_dbfs()));
+
+ int level_dbfs[] = {0, 6, 31};
+ for (size_t i = 0; i < sizeof(level_dbfs)/sizeof(*level_dbfs); i++) {
+ EXPECT_EQ(apm_->kNoError,
+ apm_->gain_control()->set_target_level_dbfs(level_dbfs[i]));
+ EXPECT_EQ(level_dbfs[i], apm_->gain_control()->target_level_dbfs());
+ }
+
+ // Testing invalid compression gains
+ EXPECT_EQ(apm_->kBadParameterError,
+ apm_->gain_control()->set_compression_gain_db(-1));
+ EXPECT_EQ(apm_->kBadParameterError,
+ apm_->gain_control()->set_compression_gain_db(100));
+
+ // Testing valid compression gains
+ EXPECT_EQ(apm_->kNoError,
+ apm_->gain_control()->set_compression_gain_db(
+ apm_->gain_control()->compression_gain_db()));
+
+ int gain_db[] = {0, 10, 90};
+ for (size_t i = 0; i < sizeof(gain_db)/sizeof(*gain_db); i++) {
+ EXPECT_EQ(apm_->kNoError,
+ apm_->gain_control()->set_compression_gain_db(gain_db[i]));
+ EXPECT_EQ(gain_db[i], apm_->gain_control()->compression_gain_db());
+ }
+
+ // Testing limiter off/on
+ EXPECT_EQ(apm_->kNoError, apm_->gain_control()->enable_limiter(false));
+ EXPECT_FALSE(apm_->gain_control()->is_limiter_enabled());
+ EXPECT_EQ(apm_->kNoError, apm_->gain_control()->enable_limiter(true));
+ EXPECT_TRUE(apm_->gain_control()->is_limiter_enabled());
+
+ // Testing invalid level limits
+ EXPECT_EQ(apm_->kBadParameterError,
+ apm_->gain_control()->set_analog_level_limits(-1, 512));
+ EXPECT_EQ(apm_->kBadParameterError,
+ apm_->gain_control()->set_analog_level_limits(100000, 512));
+ EXPECT_EQ(apm_->kBadParameterError,
+ apm_->gain_control()->set_analog_level_limits(512, -1));
+ EXPECT_EQ(apm_->kBadParameterError,
+ apm_->gain_control()->set_analog_level_limits(512, 100000));
+ EXPECT_EQ(apm_->kBadParameterError,
+ apm_->gain_control()->set_analog_level_limits(512, 255));
+
+ // Testing valid level limits
+ EXPECT_EQ(apm_->kNoError,
+ apm_->gain_control()->set_analog_level_limits(
+ apm_->gain_control()->analog_level_minimum(),
+ apm_->gain_control()->analog_level_maximum()));
+
+ int min_level[] = {0, 255, 1024};
+ for (size_t i = 0; i < sizeof(min_level)/sizeof(*min_level); i++) {
+ EXPECT_EQ(apm_->kNoError,
+ apm_->gain_control()->set_analog_level_limits(min_level[i], 1024));
+ EXPECT_EQ(min_level[i], apm_->gain_control()->analog_level_minimum());
+ }
+
+ int max_level[] = {0, 1024, 65535};
+ for (size_t i = 0; i < sizeof(min_level)/sizeof(*min_level); i++) {
+ EXPECT_EQ(apm_->kNoError,
+ apm_->gain_control()->set_analog_level_limits(0, max_level[i]));
+ EXPECT_EQ(max_level[i], apm_->gain_control()->analog_level_maximum());
+ }
+
+ // TODO(ajm): stream_is_saturated() and stream_analog_level()
+
+ // Turn AGC off
+ EXPECT_EQ(apm_->kNoError, apm_->gain_control()->Enable(false));
+ EXPECT_FALSE(apm_->gain_control()->is_enabled());
+}
+
+TEST_F(ApmTest, NoiseSuppression) {
+ // Tesing invalid suppression levels
+ EXPECT_EQ(apm_->kBadParameterError,
+ apm_->noise_suppression()->set_level(
+ static_cast<NoiseSuppression::Level>(-1)));
+
+ EXPECT_EQ(apm_->kBadParameterError,
+ apm_->noise_suppression()->set_level(
+ static_cast<NoiseSuppression::Level>(5)));
+
+ // Tesing valid suppression levels
+ NoiseSuppression::Level level[] = {
+ NoiseSuppression::kLow,
+ NoiseSuppression::kModerate,
+ NoiseSuppression::kHigh,
+ NoiseSuppression::kVeryHigh
+ };
+ for (size_t i = 0; i < sizeof(level)/sizeof(*level); i++) {
+ EXPECT_EQ(apm_->kNoError,
+ apm_->noise_suppression()->set_level(level[i]));
+ EXPECT_EQ(level[i], apm_->noise_suppression()->level());
+ }
+
+ // Turing NS on/off
+ EXPECT_EQ(apm_->kNoError, apm_->noise_suppression()->Enable(true));
+ EXPECT_TRUE(apm_->noise_suppression()->is_enabled());
+ EXPECT_EQ(apm_->kNoError, apm_->noise_suppression()->Enable(false));
+ EXPECT_FALSE(apm_->noise_suppression()->is_enabled());
+}
+
+TEST_F(ApmTest, HighPassFilter) {
+ // Turing HP filter on/off
+ EXPECT_EQ(apm_->kNoError, apm_->high_pass_filter()->Enable(true));
+ EXPECT_TRUE(apm_->high_pass_filter()->is_enabled());
+ EXPECT_EQ(apm_->kNoError, apm_->high_pass_filter()->Enable(false));
+ EXPECT_FALSE(apm_->high_pass_filter()->is_enabled());
+}
+
+TEST_F(ApmTest, LevelEstimator) {
+ // Turing Level estimator on/off
+ EXPECT_EQ(apm_->kUnsupportedComponentError,
+ apm_->level_estimator()->Enable(true));
+ EXPECT_FALSE(apm_->level_estimator()->is_enabled());
+ EXPECT_EQ(apm_->kUnsupportedComponentError,
+ apm_->level_estimator()->Enable(false));
+ EXPECT_FALSE(apm_->level_estimator()->is_enabled());
+}
+
+TEST_F(ApmTest, VoiceDetection) {
+ // Test external VAD
+ EXPECT_EQ(apm_->kNoError,
+ apm_->voice_detection()->set_stream_has_voice(true));
+ EXPECT_TRUE(apm_->voice_detection()->stream_has_voice());
+ EXPECT_EQ(apm_->kNoError,
+ apm_->voice_detection()->set_stream_has_voice(false));
+ EXPECT_FALSE(apm_->voice_detection()->stream_has_voice());
+
+ // Tesing invalid likelihoods
+ EXPECT_EQ(apm_->kBadParameterError,
+ apm_->voice_detection()->set_likelihood(
+ static_cast<VoiceDetection::Likelihood>(-1)));
+
+ EXPECT_EQ(apm_->kBadParameterError,
+ apm_->voice_detection()->set_likelihood(
+ static_cast<VoiceDetection::Likelihood>(5)));
+
+ // Tesing valid likelihoods
+ VoiceDetection::Likelihood likelihood[] = {
+ VoiceDetection::kVeryLowLikelihood,
+ VoiceDetection::kLowLikelihood,
+ VoiceDetection::kModerateLikelihood,
+ VoiceDetection::kHighLikelihood
+ };
+ for (size_t i = 0; i < sizeof(likelihood)/sizeof(*likelihood); i++) {
+ EXPECT_EQ(apm_->kNoError,
+ apm_->voice_detection()->set_likelihood(likelihood[i]));
+ EXPECT_EQ(likelihood[i], apm_->voice_detection()->likelihood());
+ }
+
+ /* TODO(bjornv): Enable once VAD supports other frame lengths than 10 ms
+ // Tesing invalid frame sizes
+ EXPECT_EQ(apm_->kBadParameterError,
+ apm_->voice_detection()->set_frame_size_ms(12));
+
+ // Tesing valid frame sizes
+ for (int i = 10; i <= 30; i += 10) {
+ EXPECT_EQ(apm_->kNoError,
+ apm_->voice_detection()->set_frame_size_ms(i));
+ EXPECT_EQ(i, apm_->voice_detection()->frame_size_ms());
+ }
+ */
+
+ // Turing VAD on/off
+ EXPECT_EQ(apm_->kNoError, apm_->voice_detection()->Enable(true));
+ EXPECT_TRUE(apm_->voice_detection()->is_enabled());
+ EXPECT_EQ(apm_->kNoError, apm_->voice_detection()->Enable(false));
+ EXPECT_FALSE(apm_->voice_detection()->is_enabled());
+
+ // TODO(bjornv): Add tests for streamed voice; stream_has_voice()
+}
+
+// Below are some ideas for tests from VPM.
+
+/*TEST_F(VideoProcessingModuleTest, GetVersionTest)
+{
+}
+
+TEST_F(VideoProcessingModuleTest, HandleNullBuffer)
+{
+}
+
+TEST_F(VideoProcessingModuleTest, HandleBadSize)
+{
+}
+
+TEST_F(VideoProcessingModuleTest, IdenticalResultsAfterReset)
+{
+}
+*/
+} // namespace
+
+int main(int argc, char** argv) {
+ ::testing::InitGoogleTest(&argc, argv);
+ ApmEnvironment* env = new ApmEnvironment; // GTest takes ownership.
+ ::testing::AddGlobalTestEnvironment(env);
+
+ for (int i = 1; i < argc; i++) {
+ if (strcmp(argv[i], "--write_output_data") == 0) {
+ global_read_output_data = false;
+ }
+ }
+
+ return RUN_ALL_TESTS();
+}