aboutsummaryrefslogtreecommitdiff
path: root/src/modules/audio_processing/test
diff options
context:
space:
mode:
Diffstat (limited to 'src/modules/audio_processing/test')
-rw-r--r--src/modules/audio_processing/test/android/apmtest/AndroidManifest.xml30
-rw-r--r--src/modules/audio_processing/test/android/apmtest/default.properties11
-rw-r--r--src/modules/audio_processing/test/android/apmtest/jni/Android.mk26
-rw-r--r--src/modules/audio_processing/test/android/apmtest/jni/Application.mk1
-rw-r--r--src/modules/audio_processing/test/android/apmtest/jni/main.c307
-rw-r--r--src/modules/audio_processing/test/android/apmtest/res/values/strings.xml4
-rw-r--r--src/modules/audio_processing/test/apmtest.m355
-rw-r--r--src/modules/audio_processing/test/process_test.cc964
-rw-r--r--src/modules/audio_processing/test/unit_test.cc1256
-rw-r--r--src/modules/audio_processing/test/unittest.proto52
-rw-r--r--src/modules/audio_processing/test/unpack.cc216
11 files changed, 3222 insertions, 0 deletions
diff --git a/src/modules/audio_processing/test/android/apmtest/AndroidManifest.xml b/src/modules/audio_processing/test/android/apmtest/AndroidManifest.xml
new file mode 100644
index 0000000000..c6063b3d76
--- /dev/null
+++ b/src/modules/audio_processing/test/android/apmtest/AndroidManifest.xml
@@ -0,0 +1,30 @@
+<?xml version="1.0" encoding="utf-8"?>
+<!-- BEGIN_INCLUDE(manifest) -->
+<manifest xmlns:android="http://schemas.android.com/apk/res/android"
+ package="com.example.native_activity"
+ android:versionCode="1"
+ android:versionName="1.0">
+
+ <!-- This is the platform API where NativeActivity was introduced. -->
+ <uses-sdk android:minSdkVersion="8" />
+
+ <!-- This .apk has no Java code itself, so set hasCode to false. -->
+ <application android:label="@string/app_name" android:hasCode="false" android:debuggable="true">
+
+ <!-- Our activity is the built-in NativeActivity framework class.
+ This will take care of integrating with our NDK code. -->
+ <activity android:name="android.app.NativeActivity"
+ android:label="@string/app_name"
+ android:configChanges="orientation|keyboardHidden">
+ <!-- Tell NativeActivity the name of or .so -->
+ <meta-data android:name="android.app.lib_name"
+ android:value="apmtest-activity" />
+ <intent-filter>
+ <action android:name="android.intent.action.MAIN" />
+ <category android:name="android.intent.category.LAUNCHER" />
+ </intent-filter>
+ </activity>
+ </application>
+
+</manifest>
+<!-- END_INCLUDE(manifest) -->
diff --git a/src/modules/audio_processing/test/android/apmtest/default.properties b/src/modules/audio_processing/test/android/apmtest/default.properties
new file mode 100644
index 0000000000..9a2c9f6c88
--- /dev/null
+++ b/src/modules/audio_processing/test/android/apmtest/default.properties
@@ -0,0 +1,11 @@
+# This file is automatically generated by Android Tools.
+# Do not modify this file -- YOUR CHANGES WILL BE ERASED!
+#
+# This file must be checked in Version Control Systems.
+#
+# To customize properties used by the Ant build system use,
+# "build.properties", and override values to adapt the script to your
+# project structure.
+
+# Project target.
+target=android-9
diff --git a/src/modules/audio_processing/test/android/apmtest/jni/Android.mk b/src/modules/audio_processing/test/android/apmtest/jni/Android.mk
new file mode 100644
index 0000000000..eaf3c9d86f
--- /dev/null
+++ b/src/modules/audio_processing/test/android/apmtest/jni/Android.mk
@@ -0,0 +1,26 @@
+# Copyright (C) 2010 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+LOCAL_PATH := $(call my-dir)
+
+include $(CLEAR_VARS)
+
+LOCAL_MODULE := apmtest-activity
+LOCAL_SRC_FILES := main.c
+LOCAL_LDLIBS := -llog -landroid -lEGL -lGLESv1_CM
+LOCAL_STATIC_LIBRARIES := android_native_app_glue
+
+include $(BUILD_SHARED_LIBRARY)
+
+$(call import-module,android/native_app_glue)
diff --git a/src/modules/audio_processing/test/android/apmtest/jni/Application.mk b/src/modules/audio_processing/test/android/apmtest/jni/Application.mk
new file mode 100644
index 0000000000..22d188e595
--- /dev/null
+++ b/src/modules/audio_processing/test/android/apmtest/jni/Application.mk
@@ -0,0 +1 @@
+APP_PLATFORM := android-9
diff --git a/src/modules/audio_processing/test/android/apmtest/jni/main.c b/src/modules/audio_processing/test/android/apmtest/jni/main.c
new file mode 100644
index 0000000000..2e19635683
--- /dev/null
+++ b/src/modules/audio_processing/test/android/apmtest/jni/main.c
@@ -0,0 +1,307 @@
+/*
+ * Copyright (C) 2010 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+//BEGIN_INCLUDE(all)
+#include <jni.h>
+#include <errno.h>
+
+#include <EGL/egl.h>
+#include <GLES/gl.h>
+
+#include <android/sensor.h>
+#include <android/log.h>
+#include <android_native_app_glue.h>
+
+#define LOGI(...) ((void)__android_log_print(ANDROID_LOG_INFO, "native-activity", __VA_ARGS__))
+#define LOGW(...) ((void)__android_log_print(ANDROID_LOG_WARN, "native-activity", __VA_ARGS__))
+
+/**
+ * Our saved state data.
+ */
+struct saved_state {
+ float angle;
+ int32_t x;
+ int32_t y;
+};
+
+/**
+ * Shared state for our app.
+ */
+struct engine {
+ struct android_app* app;
+
+ ASensorManager* sensorManager;
+ const ASensor* accelerometerSensor;
+ ASensorEventQueue* sensorEventQueue;
+
+ int animating;
+ EGLDisplay display;
+ EGLSurface surface;
+ EGLContext context;
+ int32_t width;
+ int32_t height;
+ struct saved_state state;
+};
+
+/**
+ * Initialize an EGL context for the current display.
+ */
+static int engine_init_display(struct engine* engine) {
+ // initialize OpenGL ES and EGL
+
+ /*
+ * Here specify the attributes of the desired configuration.
+ * Below, we select an EGLConfig with at least 8 bits per color
+ * component compatible with on-screen windows
+ */
+ const EGLint attribs[] = {
+ EGL_SURFACE_TYPE, EGL_WINDOW_BIT,
+ EGL_BLUE_SIZE, 8,
+ EGL_GREEN_SIZE, 8,
+ EGL_RED_SIZE, 8,
+ EGL_NONE
+ };
+ EGLint w, h, dummy, format;
+ EGLint numConfigs;
+ EGLConfig config;
+ EGLSurface surface;
+ EGLContext context;
+
+ EGLDisplay display = eglGetDisplay(EGL_DEFAULT_DISPLAY);
+
+ eglInitialize(display, 0, 0);
+
+ /* Here, the application chooses the configuration it desires. In this
+ * sample, we have a very simplified selection process, where we pick
+ * the first EGLConfig that matches our criteria */
+ eglChooseConfig(display, attribs, &config, 1, &numConfigs);
+
+ /* EGL_NATIVE_VISUAL_ID is an attribute of the EGLConfig that is
+ * guaranteed to be accepted by ANativeWindow_setBuffersGeometry().
+ * As soon as we picked a EGLConfig, we can safely reconfigure the
+ * ANativeWindow buffers to match, using EGL_NATIVE_VISUAL_ID. */
+ eglGetConfigAttrib(display, config, EGL_NATIVE_VISUAL_ID, &format);
+
+ ANativeWindow_setBuffersGeometry(engine->app->window, 0, 0, format);
+
+ surface = eglCreateWindowSurface(display, config, engine->app->window, NULL);
+ context = eglCreateContext(display, config, NULL, NULL);
+
+ if (eglMakeCurrent(display, surface, surface, context) == EGL_FALSE) {
+ LOGW("Unable to eglMakeCurrent");
+ return -1;
+ }
+
+ eglQuerySurface(display, surface, EGL_WIDTH, &w);
+ eglQuerySurface(display, surface, EGL_HEIGHT, &h);
+
+ engine->display = display;
+ engine->context = context;
+ engine->surface = surface;
+ engine->width = w;
+ engine->height = h;
+ engine->state.angle = 0;
+
+ // Initialize GL state.
+ glHint(GL_PERSPECTIVE_CORRECTION_HINT, GL_FASTEST);
+ glEnable(GL_CULL_FACE);
+ glShadeModel(GL_SMOOTH);
+ glDisable(GL_DEPTH_TEST);
+
+ return 0;
+}
+
+/**
+ * Just the current frame in the display.
+ */
+static void engine_draw_frame(struct engine* engine) {
+ if (engine->display == NULL) {
+ // No display.
+ return;
+ }
+
+ // Just fill the screen with a color.
+ glClearColor(((float)engine->state.x)/engine->width, engine->state.angle,
+ ((float)engine->state.y)/engine->height, 1);
+ glClear(GL_COLOR_BUFFER_BIT);
+
+ eglSwapBuffers(engine->display, engine->surface);
+}
+
+/**
+ * Tear down the EGL context currently associated with the display.
+ */
+static void engine_term_display(struct engine* engine) {
+ if (engine->display != EGL_NO_DISPLAY) {
+ eglMakeCurrent(engine->display, EGL_NO_SURFACE, EGL_NO_SURFACE, EGL_NO_CONTEXT);
+ if (engine->context != EGL_NO_CONTEXT) {
+ eglDestroyContext(engine->display, engine->context);
+ }
+ if (engine->surface != EGL_NO_SURFACE) {
+ eglDestroySurface(engine->display, engine->surface);
+ }
+ eglTerminate(engine->display);
+ }
+ engine->animating = 0;
+ engine->display = EGL_NO_DISPLAY;
+ engine->context = EGL_NO_CONTEXT;
+ engine->surface = EGL_NO_SURFACE;
+}
+
+/**
+ * Process the next input event.
+ */
+static int32_t engine_handle_input(struct android_app* app, AInputEvent* event) {
+ struct engine* engine = (struct engine*)app->userData;
+ if (AInputEvent_getType(event) == AINPUT_EVENT_TYPE_MOTION) {
+ engine->animating = 1;
+ engine->state.x = AMotionEvent_getX(event, 0);
+ engine->state.y = AMotionEvent_getY(event, 0);
+ return 1;
+ }
+ return 0;
+}
+
+/**
+ * Process the next main command.
+ */
+static void engine_handle_cmd(struct android_app* app, int32_t cmd) {
+ struct engine* engine = (struct engine*)app->userData;
+ switch (cmd) {
+ case APP_CMD_SAVE_STATE:
+ // The system has asked us to save our current state. Do so.
+ engine->app->savedState = malloc(sizeof(struct saved_state));
+ *((struct saved_state*)engine->app->savedState) = engine->state;
+ engine->app->savedStateSize = sizeof(struct saved_state);
+ break;
+ case APP_CMD_INIT_WINDOW:
+ // The window is being shown, get it ready.
+ if (engine->app->window != NULL) {
+ engine_init_display(engine);
+ engine_draw_frame(engine);
+ }
+ break;
+ case APP_CMD_TERM_WINDOW:
+ // The window is being hidden or closed, clean it up.
+ engine_term_display(engine);
+ break;
+ case APP_CMD_GAINED_FOCUS:
+ // When our app gains focus, we start monitoring the accelerometer.
+ if (engine->accelerometerSensor != NULL) {
+ ASensorEventQueue_enableSensor(engine->sensorEventQueue,
+ engine->accelerometerSensor);
+ // We'd like to get 60 events per second (in us).
+ ASensorEventQueue_setEventRate(engine->sensorEventQueue,
+ engine->accelerometerSensor, (1000L/60)*1000);
+ }
+ break;
+ case APP_CMD_LOST_FOCUS:
+ // When our app loses focus, we stop monitoring the accelerometer.
+ // This is to avoid consuming battery while not being used.
+ if (engine->accelerometerSensor != NULL) {
+ ASensorEventQueue_disableSensor(engine->sensorEventQueue,
+ engine->accelerometerSensor);
+ }
+ // Also stop animating.
+ engine->animating = 0;
+ engine_draw_frame(engine);
+ break;
+ }
+}
+
+/**
+ * This is the main entry point of a native application that is using
+ * android_native_app_glue. It runs in its own thread, with its own
+ * event loop for receiving input events and doing other things.
+ */
+void android_main(struct android_app* state) {
+ struct engine engine;
+
+ // Make sure glue isn't stripped.
+ app_dummy();
+
+ memset(&engine, 0, sizeof(engine));
+ state->userData = &engine;
+ state->onAppCmd = engine_handle_cmd;
+ state->onInputEvent = engine_handle_input;
+ engine.app = state;
+
+ // Prepare to monitor accelerometer
+ engine.sensorManager = ASensorManager_getInstance();
+ engine.accelerometerSensor = ASensorManager_getDefaultSensor(engine.sensorManager,
+ ASENSOR_TYPE_ACCELEROMETER);
+ engine.sensorEventQueue = ASensorManager_createEventQueue(engine.sensorManager,
+ state->looper, LOOPER_ID_USER, NULL, NULL);
+
+ if (state->savedState != NULL) {
+ // We are starting with a previous saved state; restore from it.
+ engine.state = *(struct saved_state*)state->savedState;
+ }
+
+ // loop waiting for stuff to do.
+
+ while (1) {
+ // Read all pending events.
+ int ident;
+ int events;
+ struct android_poll_source* source;
+
+ // If not animating, we will block forever waiting for events.
+ // If animating, we loop until all events are read, then continue
+ // to draw the next frame of animation.
+ while ((ident=ALooper_pollAll(engine.animating ? 0 : -1, NULL, &events,
+ (void**)&source)) >= 0) {
+
+ // Process this event.
+ if (source != NULL) {
+ source->process(state, source);
+ }
+
+ // If a sensor has data, process it now.
+ if (ident == LOOPER_ID_USER) {
+ if (engine.accelerometerSensor != NULL) {
+ ASensorEvent event;
+ while (ASensorEventQueue_getEvents(engine.sensorEventQueue,
+ &event, 1) > 0) {
+ LOGI("accelerometer: x=%f y=%f z=%f",
+ event.acceleration.x, event.acceleration.y,
+ event.acceleration.z);
+ }
+ }
+ }
+
+ // Check if we are exiting.
+ if (state->destroyRequested != 0) {
+ engine_term_display(&engine);
+ return;
+ }
+ }
+
+ if (engine.animating) {
+ // Done with events; draw next animation frame.
+ engine.state.angle += .01f;
+ if (engine.state.angle > 1) {
+ engine.state.angle = 0;
+ }
+
+ // Drawing is throttled to the screen update rate, so there
+ // is no need to do timing here.
+ engine_draw_frame(&engine);
+ }
+ }
+}
+//END_INCLUDE(all)
diff --git a/src/modules/audio_processing/test/android/apmtest/res/values/strings.xml b/src/modules/audio_processing/test/android/apmtest/res/values/strings.xml
new file mode 100644
index 0000000000..d0bd0f3051
--- /dev/null
+++ b/src/modules/audio_processing/test/android/apmtest/res/values/strings.xml
@@ -0,0 +1,4 @@
+<?xml version="1.0" encoding="utf-8"?>
+<resources>
+ <string name="app_name">apmtest</string>
+</resources>
diff --git a/src/modules/audio_processing/test/apmtest.m b/src/modules/audio_processing/test/apmtest.m
new file mode 100644
index 0000000000..3172cd1562
--- /dev/null
+++ b/src/modules/audio_processing/test/apmtest.m
@@ -0,0 +1,355 @@
+function apmtest(task, testname, filepath, casenumber, legacy)
+%APMTEST is a tool to process APM file sets and easily display the output.
+% APMTEST(TASK, TESTNAME, CASENUMBER) performs one of several TASKs:
+% 'test' Processes the files to produce test output.
+% 'list' Prints a list of cases in the test set, preceded by their
+% CASENUMBERs.
+% 'show' Uses spclab to show the test case specified by the
+% CASENUMBER parameter.
+%
+% using a set of test files determined by TESTNAME:
+% 'all' All tests.
+% 'apm' The standard APM test set (default).
+% 'apmm' The mobile APM test set.
+% 'aec' The AEC test set.
+% 'aecm' The AECM test set.
+% 'agc' The AGC test set.
+% 'ns' The NS test set.
+% 'vad' The VAD test set.
+%
+% FILEPATH specifies the path to the test data files.
+%
+% CASENUMBER can be used to select a single test case. Omit CASENUMBER,
+% or set to zero, to use all test cases.
+%
+
+if nargin < 5 || isempty(legacy)
+ % Set to true to run old VQE recordings.
+ legacy = false;
+end
+
+if nargin < 4 || isempty(casenumber)
+ casenumber = 0;
+end
+
+if nargin < 3 || isempty(filepath)
+ filepath = 'data/';
+end
+
+if nargin < 2 || isempty(testname)
+ testname = 'all';
+end
+
+if nargin < 1 || isempty(task)
+ task = 'test';
+end
+
+if ~strcmp(task, 'test') && ~strcmp(task, 'list') && ~strcmp(task, 'show')
+ error(['TASK ' task ' is not recognized']);
+end
+
+if casenumber == 0 && strcmp(task, 'show')
+ error(['CASENUMBER must be specified for TASK ' task]);
+end
+
+inpath = [filepath 'input/'];
+outpath = [filepath 'output/'];
+refpath = [filepath 'reference/'];
+
+if strcmp(testname, 'all')
+ tests = {'apm','apmm','aec','aecm','agc','ns','vad'};
+else
+ tests = {testname};
+end
+
+if legacy
+ progname = './test';
+else
+ progname = './process_test';
+end
+
+global farFile;
+global nearFile;
+global eventFile;
+global delayFile;
+global driftFile;
+
+if legacy
+ farFile = 'vqeFar.pcm';
+ nearFile = 'vqeNear.pcm';
+ eventFile = 'vqeEvent.dat';
+ delayFile = 'vqeBuf.dat';
+ driftFile = 'vqeDrift.dat';
+else
+ farFile = 'apm_far.pcm';
+ nearFile = 'apm_near.pcm';
+ eventFile = 'apm_event.dat';
+ delayFile = 'apm_delay.dat';
+ driftFile = 'apm_drift.dat';
+end
+
+simulateMode = false;
+nErr = 0;
+nCases = 0;
+for i=1:length(tests)
+ simulateMode = false;
+
+ if strcmp(tests{i}, 'apm')
+ testdir = ['apm/'];
+ outfile = ['out'];
+ if legacy
+ opt = ['-ec 1 -agc 2 -nc 2 -vad 3'];
+ else
+ opt = ['--no_progress -hpf' ...
+ ' -aec --drift_compensation -agc --fixed_digital' ...
+ ' -ns --ns_moderate -vad'];
+ end
+
+ elseif strcmp(tests{i}, 'apm-swb')
+ simulateMode = true;
+ testdir = ['apm-swb/'];
+ outfile = ['out'];
+ if legacy
+ opt = ['-fs 32000 -ec 1 -agc 2 -nc 2'];
+ else
+ opt = ['--no_progress -fs 32000 -hpf' ...
+ ' -aec --drift_compensation -agc --adaptive_digital' ...
+ ' -ns --ns_moderate -vad'];
+ end
+ elseif strcmp(tests{i}, 'apmm')
+ testdir = ['apmm/'];
+ outfile = ['out'];
+ opt = ['-aec --drift_compensation -agc --fixed_digital -hpf -ns ' ...
+ '--ns_moderate'];
+
+ else
+ error(['TESTNAME ' tests{i} ' is not recognized']);
+ end
+
+ inpathtest = [inpath testdir];
+ outpathtest = [outpath testdir];
+ refpathtest = [refpath testdir];
+
+ if ~exist(inpathtest,'dir')
+ error(['Input directory ' inpathtest ' does not exist']);
+ end
+
+ if ~exist(refpathtest,'dir')
+ warning(['Reference directory ' refpathtest ' does not exist']);
+ end
+
+ [status, errMsg] = mkdir(outpathtest);
+ if (status == 0)
+ error(errMsg);
+ end
+
+ [nErr, nCases] = recurseDir(inpathtest, outpathtest, refpathtest, outfile, ...
+ progname, opt, simulateMode, nErr, nCases, task, casenumber, legacy);
+
+ if strcmp(task, 'test') || strcmp(task, 'show')
+ system(['rm ' farFile]);
+ system(['rm ' nearFile]);
+ if simulateMode == false
+ system(['rm ' eventFile]);
+ system(['rm ' delayFile]);
+ system(['rm ' driftFile]);
+ end
+ end
+end
+
+if ~strcmp(task, 'list')
+ if nErr == 0
+ fprintf(1, '\nAll files are bit-exact to reference\n', nErr);
+ else
+ fprintf(1, '\n%d files are NOT bit-exact to reference\n', nErr);
+ end
+end
+
+
+function [nErrOut, nCases] = recurseDir(inpath, outpath, refpath, ...
+ outfile, progname, opt, simulateMode, nErr, nCases, task, casenumber, ...
+ legacy)
+
+global farFile;
+global nearFile;
+global eventFile;
+global delayFile;
+global driftFile;
+
+dirs = dir(inpath);
+nDirs = 0;
+nErrOut = nErr;
+for i=3:length(dirs) % skip . and ..
+ nDirs = nDirs + dirs(i).isdir;
+end
+
+
+if nDirs == 0
+ nCases = nCases + 1;
+
+ if casenumber == nCases || casenumber == 0
+
+ if strcmp(task, 'list')
+ fprintf([num2str(nCases) '. ' outfile '\n'])
+ else
+ vadoutfile = ['vad_' outfile '.dat'];
+ outfile = [outfile '.pcm'];
+
+ % Check for VAD test
+ vadTest = 0;
+ if ~isempty(findstr(opt, '-vad'))
+ vadTest = 1;
+ if legacy
+ opt = [opt ' ' outpath vadoutfile];
+ else
+ opt = [opt ' --vad_out_file ' outpath vadoutfile];
+ end
+ end
+
+ if exist([inpath 'vqeFar.pcm'])
+ system(['ln -s -f ' inpath 'vqeFar.pcm ' farFile]);
+ elseif exist([inpath 'apm_far.pcm'])
+ system(['ln -s -f ' inpath 'apm_far.pcm ' farFile]);
+ end
+
+ if exist([inpath 'vqeNear.pcm'])
+ system(['ln -s -f ' inpath 'vqeNear.pcm ' nearFile]);
+ elseif exist([inpath 'apm_near.pcm'])
+ system(['ln -s -f ' inpath 'apm_near.pcm ' nearFile]);
+ end
+
+ if exist([inpath 'vqeEvent.dat'])
+ system(['ln -s -f ' inpath 'vqeEvent.dat ' eventFile]);
+ elseif exist([inpath 'apm_event.dat'])
+ system(['ln -s -f ' inpath 'apm_event.dat ' eventFile]);
+ end
+
+ if exist([inpath 'vqeBuf.dat'])
+ system(['ln -s -f ' inpath 'vqeBuf.dat ' delayFile]);
+ elseif exist([inpath 'apm_delay.dat'])
+ system(['ln -s -f ' inpath 'apm_delay.dat ' delayFile]);
+ end
+
+ if exist([inpath 'vqeSkew.dat'])
+ system(['ln -s -f ' inpath 'vqeSkew.dat ' driftFile]);
+ elseif exist([inpath 'vqeDrift.dat'])
+ system(['ln -s -f ' inpath 'vqeDrift.dat ' driftFile]);
+ elseif exist([inpath 'apm_drift.dat'])
+ system(['ln -s -f ' inpath 'apm_drift.dat ' driftFile]);
+ end
+
+ if simulateMode == false
+ command = [progname ' -o ' outpath outfile ' ' opt];
+ else
+ if legacy
+ inputCmd = [' -in ' nearFile];
+ else
+ inputCmd = [' -i ' nearFile];
+ end
+
+ if exist([farFile])
+ if legacy
+ inputCmd = [' -if ' farFile inputCmd];
+ else
+ inputCmd = [' -ir ' farFile inputCmd];
+ end
+ end
+ command = [progname inputCmd ' -o ' outpath outfile ' ' opt];
+ end
+ % This prevents MATLAB from using its own C libraries.
+ shellcmd = ['bash -c "unset LD_LIBRARY_PATH;'];
+ fprintf([command '\n']);
+ [status, result] = system([shellcmd command '"']);
+ fprintf(result);
+
+ fprintf(['Reference file: ' refpath outfile '\n']);
+
+ if vadTest == 1
+ equal_to_ref = are_files_equal([outpath vadoutfile], ...
+ [refpath vadoutfile], ...
+ 'int8');
+ if ~equal_to_ref
+ nErr = nErr + 1;
+ end
+ end
+
+ [equal_to_ref, diffvector] = are_files_equal([outpath outfile], ...
+ [refpath outfile], ...
+ 'int16');
+ if ~equal_to_ref
+ nErr = nErr + 1;
+ end
+
+ if strcmp(task, 'show')
+ % Assume the last init gives the sample rate of interest.
+ str_idx = strfind(result, 'Sample rate:');
+ fs = str2num(result(str_idx(end) + 13:str_idx(end) + 17));
+ fprintf('Using %d Hz\n', fs);
+
+ if exist([farFile])
+ spclab(fs, farFile, nearFile, [refpath outfile], ...
+ [outpath outfile], diffvector);
+ %spclab(fs, diffvector);
+ else
+ spclab(fs, nearFile, [refpath outfile], [outpath outfile], ...
+ diffvector);
+ %spclab(fs, diffvector);
+ end
+ end
+ end
+ end
+else
+
+ for i=3:length(dirs)
+ if dirs(i).isdir
+ [nErr, nCases] = recurseDir([inpath dirs(i).name '/'], outpath, ...
+ refpath,[outfile '_' dirs(i).name], progname, opt, ...
+ simulateMode, nErr, nCases, task, casenumber, legacy);
+ end
+ end
+end
+nErrOut = nErr;
+
+function [are_equal, diffvector] = ...
+ are_files_equal(newfile, reffile, precision, diffvector)
+
+are_equal = false;
+diffvector = 0;
+if ~exist(newfile,'file')
+ warning(['Output file ' newfile ' does not exist']);
+ return
+end
+
+if ~exist(reffile,'file')
+ warning(['Reference file ' reffile ' does not exist']);
+ return
+end
+
+fid = fopen(newfile,'rb');
+new = fread(fid,inf,precision);
+fclose(fid);
+
+fid = fopen(reffile,'rb');
+ref = fread(fid,inf,precision);
+fclose(fid);
+
+if length(new) ~= length(ref)
+ warning('Reference is not the same length as output');
+ minlength = min(length(new), length(ref));
+ new = new(1:minlength);
+ ref = ref(1:minlength);
+end
+diffvector = new - ref;
+
+if isequal(new, ref)
+ fprintf([newfile ' is bit-exact to reference\n']);
+ are_equal = true;
+else
+ if isempty(new)
+ warning([newfile ' is empty']);
+ return
+ end
+ snr = snrseg(new,ref,80);
+ fprintf('\n');
+ are_equal = false;
+end
diff --git a/src/modules/audio_processing/test/process_test.cc b/src/modules/audio_processing/test/process_test.cc
new file mode 100644
index 0000000000..2023ddb13d
--- /dev/null
+++ b/src/modules/audio_processing/test/process_test.cc
@@ -0,0 +1,964 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <stdio.h>
+#include <string.h>
+#ifdef WEBRTC_ANDROID
+#include <sys/stat.h>
+#endif
+
+#include "gtest/gtest.h"
+
+#include "audio_processing.h"
+#include "cpu_features_wrapper.h"
+#include "module_common_types.h"
+#include "scoped_ptr.h"
+#include "tick_util.h"
+#ifdef WEBRTC_ANDROID
+#include "external/webrtc/src/modules/audio_processing/debug.pb.h"
+#else
+#include "webrtc/audio_processing/debug.pb.h"
+#endif
+
+using webrtc::AudioFrame;
+using webrtc::AudioProcessing;
+using webrtc::EchoCancellation;
+using webrtc::GainControl;
+using webrtc::NoiseSuppression;
+using webrtc::scoped_array;
+using webrtc::TickInterval;
+using webrtc::TickTime;
+
+using webrtc::audioproc::Event;
+using webrtc::audioproc::Init;
+using webrtc::audioproc::ReverseStream;
+using webrtc::audioproc::Stream;
+
+namespace {
+// Returns true on success, false on error or end-of-file.
+bool ReadMessageFromFile(FILE* file,
+ ::google::protobuf::MessageLite* msg) {
+ // The "wire format" for the size is little-endian.
+ // Assume process_test is running on a little-endian machine.
+ int32_t size = 0;
+ if (fread(&size, sizeof(int32_t), 1, file) != 1) {
+ return false;
+ }
+ if (size <= 0) {
+ return false;
+ }
+ const size_t usize = static_cast<size_t>(size);
+
+ scoped_array<char> array(new char[usize]);
+ if (fread(array.get(), sizeof(char), usize, file) != usize) {
+ return false;
+ }
+
+ msg->Clear();
+ return msg->ParseFromArray(array.get(), usize);
+}
+
+void PrintStat(const AudioProcessing::Statistic& stat) {
+ printf("%d, %d, %d\n", stat.average,
+ stat.maximum,
+ stat.minimum);
+}
+
+void usage() {
+ printf(
+ "Usage: process_test [options] [-pb PROTOBUF_FILE]\n"
+ " [-ir REVERSE_FILE] [-i PRIMARY_FILE] [-o OUT_FILE]\n");
+ printf(
+ "process_test is a test application for AudioProcessing.\n\n"
+ "When a protobuf debug file is available, specify it with -pb.\n"
+ "Alternately, when -ir or -i is used, the specified files will be\n"
+ "processed directly in a simulation mode. Otherwise the full set of\n"
+ "legacy test files is expected to be present in the working directory.\n");
+ printf("\n");
+ printf("Options\n");
+ printf("General configuration (only used for the simulation mode):\n");
+ printf(" -fs SAMPLE_RATE_HZ\n");
+ printf(" -ch CHANNELS_IN CHANNELS_OUT\n");
+ printf(" -rch REVERSE_CHANNELS\n");
+ printf("\n");
+ printf("Component configuration:\n");
+ printf(
+ "All components are disabled by default. Each block below begins with a\n"
+ "flag to enable the component with default settings. The subsequent flags\n"
+ "in the block are used to provide configuration settings.\n");
+ printf("\n -aec Echo cancellation\n");
+ printf(" --drift_compensation\n");
+ printf(" --no_drift_compensation\n");
+ printf(" --no_echo_metrics\n");
+ printf(" --no_delay_logging\n");
+ printf("\n -aecm Echo control mobile\n");
+ printf(" --aecm_echo_path_in_file FILE\n");
+ printf(" --aecm_echo_path_out_file FILE\n");
+ printf("\n -agc Gain control\n");
+ printf(" --analog\n");
+ printf(" --adaptive_digital\n");
+ printf(" --fixed_digital\n");
+ printf(" --target_level LEVEL\n");
+ printf(" --compression_gain GAIN\n");
+ printf(" --limiter\n");
+ printf(" --no_limiter\n");
+ printf("\n -hpf High pass filter\n");
+ printf("\n -ns Noise suppression\n");
+ printf(" --ns_low\n");
+ printf(" --ns_moderate\n");
+ printf(" --ns_high\n");
+ printf(" --ns_very_high\n");
+ printf("\n -vad Voice activity detection\n");
+ printf(" --vad_out_file FILE\n");
+ printf("\n Level metrics (enabled by default)\n");
+ printf(" --no_level_metrics\n");
+ printf("\n");
+ printf("Modifiers:\n");
+ printf(" --noasm Disable SSE optimization.\n");
+ printf(" --delay DELAY Add DELAY ms to input value.\n");
+ printf(" --perf Measure performance.\n");
+ printf(" --quiet Suppress text output.\n");
+ printf(" --no_progress Suppress progress.\n");
+ printf(" --debug_file FILE Dump a debug recording.\n");
+}
+
+// void function for gtest.
+void void_main(int argc, char* argv[]) {
+ if (argc > 1 && strcmp(argv[1], "--help") == 0) {
+ usage();
+ return;
+ }
+
+ if (argc < 2) {
+ printf("Did you mean to run without arguments?\n");
+ printf("Try `process_test --help' for more information.\n\n");
+ }
+
+ AudioProcessing* apm = AudioProcessing::Create(0);
+ ASSERT_TRUE(apm != NULL);
+
+ const char* pb_filename = NULL;
+ const char* far_filename = NULL;
+ const char* near_filename = NULL;
+ const char* out_filename = NULL;
+ const char* vad_out_filename = NULL;
+ const char* aecm_echo_path_in_filename = NULL;
+ const char* aecm_echo_path_out_filename = NULL;
+
+ int32_t sample_rate_hz = 16000;
+ int32_t device_sample_rate_hz = 16000;
+
+ int num_capture_input_channels = 1;
+ int num_capture_output_channels = 1;
+ int num_render_channels = 1;
+
+ int samples_per_channel = sample_rate_hz / 100;
+
+ bool simulating = false;
+ bool perf_testing = false;
+ bool verbose = true;
+ bool progress = true;
+ int extra_delay_ms = 0;
+ //bool interleaved = true;
+
+ ASSERT_EQ(apm->kNoError, apm->level_estimator()->Enable(true));
+ for (int i = 1; i < argc; i++) {
+ if (strcmp(argv[i], "-pb") == 0) {
+ i++;
+ ASSERT_LT(i, argc) << "Specify protobuf filename after -pb";
+ pb_filename = argv[i];
+
+ } else if (strcmp(argv[i], "-ir") == 0) {
+ i++;
+ ASSERT_LT(i, argc) << "Specify filename after -ir";
+ far_filename = argv[i];
+ simulating = true;
+
+ } else if (strcmp(argv[i], "-i") == 0) {
+ i++;
+ ASSERT_LT(i, argc) << "Specify filename after -i";
+ near_filename = argv[i];
+ simulating = true;
+
+ } else if (strcmp(argv[i], "-o") == 0) {
+ i++;
+ ASSERT_LT(i, argc) << "Specify filename after -o";
+ out_filename = argv[i];
+
+ } else if (strcmp(argv[i], "-fs") == 0) {
+ i++;
+ ASSERT_LT(i, argc) << "Specify sample rate after -fs";
+ ASSERT_EQ(1, sscanf(argv[i], "%d", &sample_rate_hz));
+ samples_per_channel = sample_rate_hz / 100;
+
+ ASSERT_EQ(apm->kNoError,
+ apm->set_sample_rate_hz(sample_rate_hz));
+
+ } else if (strcmp(argv[i], "-ch") == 0) {
+ i++;
+ ASSERT_LT(i + 1, argc) << "Specify number of channels after -ch";
+ ASSERT_EQ(1, sscanf(argv[i], "%d", &num_capture_input_channels));
+ i++;
+ ASSERT_EQ(1, sscanf(argv[i], "%d", &num_capture_output_channels));
+
+ ASSERT_EQ(apm->kNoError,
+ apm->set_num_channels(num_capture_input_channels,
+ num_capture_output_channels));
+
+ } else if (strcmp(argv[i], "-rch") == 0) {
+ i++;
+ ASSERT_LT(i, argc) << "Specify number of channels after -rch";
+ ASSERT_EQ(1, sscanf(argv[i], "%d", &num_render_channels));
+
+ ASSERT_EQ(apm->kNoError,
+ apm->set_num_reverse_channels(num_render_channels));
+
+ } else if (strcmp(argv[i], "-aec") == 0) {
+ ASSERT_EQ(apm->kNoError, apm->echo_cancellation()->Enable(true));
+ ASSERT_EQ(apm->kNoError,
+ apm->echo_cancellation()->enable_metrics(true));
+ ASSERT_EQ(apm->kNoError,
+ apm->echo_cancellation()->enable_delay_logging(true));
+
+ } else if (strcmp(argv[i], "--drift_compensation") == 0) {
+ ASSERT_EQ(apm->kNoError, apm->echo_cancellation()->Enable(true));
+ // TODO(ajm): this is enabled in the VQE test app by default. Investigate
+ // why it can give better performance despite passing zeros.
+ ASSERT_EQ(apm->kNoError,
+ apm->echo_cancellation()->enable_drift_compensation(true));
+ } else if (strcmp(argv[i], "--no_drift_compensation") == 0) {
+ ASSERT_EQ(apm->kNoError, apm->echo_cancellation()->Enable(true));
+ ASSERT_EQ(apm->kNoError,
+ apm->echo_cancellation()->enable_drift_compensation(false));
+
+ } else if (strcmp(argv[i], "--no_echo_metrics") == 0) {
+ ASSERT_EQ(apm->kNoError, apm->echo_cancellation()->Enable(true));
+ ASSERT_EQ(apm->kNoError,
+ apm->echo_cancellation()->enable_metrics(false));
+
+ } else if (strcmp(argv[i], "--no_delay_logging") == 0) {
+ ASSERT_EQ(apm->kNoError, apm->echo_cancellation()->Enable(true));
+ ASSERT_EQ(apm->kNoError,
+ apm->echo_cancellation()->enable_delay_logging(false));
+
+ } else if (strcmp(argv[i], "--no_level_metrics") == 0) {
+ ASSERT_EQ(apm->kNoError, apm->level_estimator()->Enable(false));
+
+ } else if (strcmp(argv[i], "-aecm") == 0) {
+ ASSERT_EQ(apm->kNoError, apm->echo_control_mobile()->Enable(true));
+
+ } else if (strcmp(argv[i], "--aecm_echo_path_in_file") == 0) {
+ i++;
+ ASSERT_LT(i, argc) << "Specify filename after --aecm_echo_path_in_file";
+ aecm_echo_path_in_filename = argv[i];
+
+ } else if (strcmp(argv[i], "--aecm_echo_path_out_file") == 0) {
+ i++;
+ ASSERT_LT(i, argc) << "Specify filename after --aecm_echo_path_out_file";
+ aecm_echo_path_out_filename = argv[i];
+
+ } else if (strcmp(argv[i], "-agc") == 0) {
+ ASSERT_EQ(apm->kNoError, apm->gain_control()->Enable(true));
+
+ } else if (strcmp(argv[i], "--analog") == 0) {
+ ASSERT_EQ(apm->kNoError, apm->gain_control()->Enable(true));
+ ASSERT_EQ(apm->kNoError,
+ apm->gain_control()->set_mode(GainControl::kAdaptiveAnalog));
+
+ } else if (strcmp(argv[i], "--adaptive_digital") == 0) {
+ ASSERT_EQ(apm->kNoError, apm->gain_control()->Enable(true));
+ ASSERT_EQ(apm->kNoError,
+ apm->gain_control()->set_mode(GainControl::kAdaptiveDigital));
+
+ } else if (strcmp(argv[i], "--fixed_digital") == 0) {
+ ASSERT_EQ(apm->kNoError, apm->gain_control()->Enable(true));
+ ASSERT_EQ(apm->kNoError,
+ apm->gain_control()->set_mode(GainControl::kFixedDigital));
+
+ } else if (strcmp(argv[i], "--target_level") == 0) {
+ i++;
+ int level;
+ ASSERT_EQ(1, sscanf(argv[i], "%d", &level));
+
+ ASSERT_EQ(apm->kNoError, apm->gain_control()->Enable(true));
+ ASSERT_EQ(apm->kNoError,
+ apm->gain_control()->set_target_level_dbfs(level));
+
+ } else if (strcmp(argv[i], "--compression_gain") == 0) {
+ i++;
+ int gain;
+ ASSERT_EQ(1, sscanf(argv[i], "%d", &gain));
+
+ ASSERT_EQ(apm->kNoError, apm->gain_control()->Enable(true));
+ ASSERT_EQ(apm->kNoError,
+ apm->gain_control()->set_compression_gain_db(gain));
+
+ } else if (strcmp(argv[i], "--limiter") == 0) {
+ ASSERT_EQ(apm->kNoError, apm->gain_control()->Enable(true));
+ ASSERT_EQ(apm->kNoError,
+ apm->gain_control()->enable_limiter(true));
+
+ } else if (strcmp(argv[i], "--no_limiter") == 0) {
+ ASSERT_EQ(apm->kNoError, apm->gain_control()->Enable(true));
+ ASSERT_EQ(apm->kNoError,
+ apm->gain_control()->enable_limiter(false));
+
+ } else if (strcmp(argv[i], "-hpf") == 0) {
+ ASSERT_EQ(apm->kNoError, apm->high_pass_filter()->Enable(true));
+
+ } else if (strcmp(argv[i], "-ns") == 0) {
+ ASSERT_EQ(apm->kNoError, apm->noise_suppression()->Enable(true));
+
+ } else if (strcmp(argv[i], "--ns_low") == 0) {
+ ASSERT_EQ(apm->kNoError, apm->noise_suppression()->Enable(true));
+ ASSERT_EQ(apm->kNoError,
+ apm->noise_suppression()->set_level(NoiseSuppression::kLow));
+
+ } else if (strcmp(argv[i], "--ns_moderate") == 0) {
+ ASSERT_EQ(apm->kNoError, apm->noise_suppression()->Enable(true));
+ ASSERT_EQ(apm->kNoError,
+ apm->noise_suppression()->set_level(NoiseSuppression::kModerate));
+
+ } else if (strcmp(argv[i], "--ns_high") == 0) {
+ ASSERT_EQ(apm->kNoError, apm->noise_suppression()->Enable(true));
+ ASSERT_EQ(apm->kNoError,
+ apm->noise_suppression()->set_level(NoiseSuppression::kHigh));
+
+ } else if (strcmp(argv[i], "--ns_very_high") == 0) {
+ ASSERT_EQ(apm->kNoError, apm->noise_suppression()->Enable(true));
+ ASSERT_EQ(apm->kNoError,
+ apm->noise_suppression()->set_level(NoiseSuppression::kVeryHigh));
+
+ } else if (strcmp(argv[i], "-vad") == 0) {
+ ASSERT_EQ(apm->kNoError, apm->voice_detection()->Enable(true));
+
+ } else if (strcmp(argv[i], "--vad_out_file") == 0) {
+ i++;
+ ASSERT_LT(i, argc) << "Specify filename after --vad_out_file";
+ vad_out_filename = argv[i];
+
+ } else if (strcmp(argv[i], "--noasm") == 0) {
+ WebRtc_GetCPUInfo = WebRtc_GetCPUInfoNoASM;
+ // We need to reinitialize here if components have already been enabled.
+ ASSERT_EQ(apm->kNoError, apm->Initialize());
+
+ } else if (strcmp(argv[i], "--delay") == 0) {
+ i++;
+ ASSERT_EQ(1, sscanf(argv[i], "%d", &extra_delay_ms));
+
+ } else if (strcmp(argv[i], "--perf") == 0) {
+ perf_testing = true;
+
+ } else if (strcmp(argv[i], "--quiet") == 0) {
+ verbose = false;
+ progress = false;
+
+ } else if (strcmp(argv[i], "--no_progress") == 0) {
+ progress = false;
+
+ } else if (strcmp(argv[i], "--debug_file") == 0) {
+ i++;
+ ASSERT_LT(i, argc) << "Specify filename after --debug_file";
+ ASSERT_EQ(apm->kNoError, apm->StartDebugRecording(argv[i]));
+ } else {
+ FAIL() << "Unrecognized argument " << argv[i];
+ }
+ }
+ // If we're reading a protobuf file, ensure a simulation hasn't also
+ // been requested (which makes no sense...)
+ ASSERT_FALSE(pb_filename && simulating);
+
+ if (verbose) {
+ printf("Sample rate: %d Hz\n", sample_rate_hz);
+ printf("Primary channels: %d (in), %d (out)\n",
+ num_capture_input_channels,
+ num_capture_output_channels);
+ printf("Reverse channels: %d \n", num_render_channels);
+ }
+
+ const char far_file_default[] = "apm_far.pcm";
+ const char near_file_default[] = "apm_near.pcm";
+ const char out_file_default[] = "out.pcm";
+ const char event_filename[] = "apm_event.dat";
+ const char delay_filename[] = "apm_delay.dat";
+ const char drift_filename[] = "apm_drift.dat";
+ const char vad_file_default[] = "vad_out.dat";
+
+ if (!simulating) {
+ far_filename = far_file_default;
+ near_filename = near_file_default;
+ }
+
+ if (!out_filename) {
+ out_filename = out_file_default;
+ }
+
+ if (!vad_out_filename) {
+ vad_out_filename = vad_file_default;
+ }
+
+ FILE* pb_file = NULL;
+ FILE* far_file = NULL;
+ FILE* near_file = NULL;
+ FILE* out_file = NULL;
+ FILE* event_file = NULL;
+ FILE* delay_file = NULL;
+ FILE* drift_file = NULL;
+ FILE* vad_out_file = NULL;
+ FILE* aecm_echo_path_in_file = NULL;
+ FILE* aecm_echo_path_out_file = NULL;
+
+ if (pb_filename) {
+ pb_file = fopen(pb_filename, "rb");
+ ASSERT_TRUE(NULL != pb_file) << "Unable to open protobuf file "
+ << pb_filename;
+ } else {
+ if (far_filename) {
+ far_file = fopen(far_filename, "rb");
+ ASSERT_TRUE(NULL != far_file) << "Unable to open far-end audio file "
+ << far_filename;
+ }
+
+ near_file = fopen(near_filename, "rb");
+ ASSERT_TRUE(NULL != near_file) << "Unable to open near-end audio file "
+ << near_filename;
+ if (!simulating) {
+ event_file = fopen(event_filename, "rb");
+ ASSERT_TRUE(NULL != event_file) << "Unable to open event file "
+ << event_filename;
+
+ delay_file = fopen(delay_filename, "rb");
+ ASSERT_TRUE(NULL != delay_file) << "Unable to open buffer file "
+ << delay_filename;
+
+ drift_file = fopen(drift_filename, "rb");
+ ASSERT_TRUE(NULL != drift_file) << "Unable to open drift file "
+ << drift_filename;
+ }
+ }
+
+ out_file = fopen(out_filename, "wb");
+ ASSERT_TRUE(NULL != out_file) << "Unable to open output audio file "
+ << out_filename;
+
+ int near_size_bytes = 0;
+ if (pb_file) {
+ struct stat st;
+ stat(pb_filename, &st);
+ // Crude estimate, but should be good enough.
+ near_size_bytes = st.st_size / 3;
+ } else {
+ struct stat st;
+ stat(near_filename, &st);
+ near_size_bytes = st.st_size;
+ }
+
+ if (apm->voice_detection()->is_enabled()) {
+ vad_out_file = fopen(vad_out_filename, "wb");
+ ASSERT_TRUE(NULL != vad_out_file) << "Unable to open VAD output file "
+ << vad_out_file;
+ }
+
+ if (aecm_echo_path_in_filename != NULL) {
+ aecm_echo_path_in_file = fopen(aecm_echo_path_in_filename, "rb");
+ ASSERT_TRUE(NULL != aecm_echo_path_in_file) << "Unable to open file "
+ << aecm_echo_path_in_filename;
+
+ const size_t path_size =
+ apm->echo_control_mobile()->echo_path_size_bytes();
+ scoped_array<char> echo_path(new char[path_size]);
+ ASSERT_EQ(path_size, fread(echo_path.get(),
+ sizeof(char),
+ path_size,
+ aecm_echo_path_in_file));
+ EXPECT_EQ(apm->kNoError,
+ apm->echo_control_mobile()->SetEchoPath(echo_path.get(),
+ path_size));
+ fclose(aecm_echo_path_in_file);
+ aecm_echo_path_in_file = NULL;
+ }
+
+ if (aecm_echo_path_out_filename != NULL) {
+ aecm_echo_path_out_file = fopen(aecm_echo_path_out_filename, "wb");
+ ASSERT_TRUE(NULL != aecm_echo_path_out_file) << "Unable to open file "
+ << aecm_echo_path_out_filename;
+ }
+
+ size_t read_count = 0;
+ int reverse_count = 0;
+ int primary_count = 0;
+ int near_read_bytes = 0;
+ TickInterval acc_ticks;
+
+ AudioFrame far_frame;
+ AudioFrame near_frame;
+
+ int delay_ms = 0;
+ int drift_samples = 0;
+ int capture_level = 127;
+ int8_t stream_has_voice = 0;
+
+ TickTime t0 = TickTime::Now();
+ TickTime t1 = t0;
+ WebRtc_Word64 max_time_us = 0;
+ WebRtc_Word64 max_time_reverse_us = 0;
+ WebRtc_Word64 min_time_us = 1e6;
+ WebRtc_Word64 min_time_reverse_us = 1e6;
+
+ // TODO(ajm): Ideally we would refactor this block into separate functions,
+ // but for now we want to share the variables.
+ if (pb_file) {
+ Event event_msg;
+ while (ReadMessageFromFile(pb_file, &event_msg)) {
+ std::ostringstream trace_stream;
+ trace_stream << "Processed frames: " << reverse_count << " (reverse), "
+ << primary_count << " (primary)";
+ SCOPED_TRACE(trace_stream.str());
+
+ if (event_msg.type() == Event::INIT) {
+ ASSERT_TRUE(event_msg.has_init());
+ const Init msg = event_msg.init();
+
+ ASSERT_TRUE(msg.has_sample_rate());
+ ASSERT_EQ(apm->kNoError,
+ apm->set_sample_rate_hz(msg.sample_rate()));
+
+ ASSERT_TRUE(msg.has_device_sample_rate());
+ ASSERT_EQ(apm->kNoError,
+ apm->echo_cancellation()->set_device_sample_rate_hz(
+ msg.device_sample_rate()));
+
+ ASSERT_TRUE(msg.has_num_input_channels());
+ ASSERT_TRUE(msg.has_num_output_channels());
+ ASSERT_EQ(apm->kNoError,
+ apm->set_num_channels(msg.num_input_channels(),
+ msg.num_output_channels()));
+
+ ASSERT_TRUE(msg.has_num_reverse_channels());
+ ASSERT_EQ(apm->kNoError,
+ apm->set_num_reverse_channels(msg.num_reverse_channels()));
+
+ samples_per_channel = msg.sample_rate() / 100;
+ far_frame._frequencyInHz = msg.sample_rate();
+ far_frame._payloadDataLengthInSamples = samples_per_channel;
+ far_frame._audioChannel = msg.num_reverse_channels();
+ near_frame._frequencyInHz = msg.sample_rate();
+ near_frame._payloadDataLengthInSamples = samples_per_channel;
+
+ if (verbose) {
+ printf("Init at frame: %d (primary), %d (reverse)\n",
+ primary_count, reverse_count);
+ printf(" Sample rate: %d Hz\n", msg.sample_rate());
+ printf(" Primary channels: %d (in), %d (out)\n",
+ msg.num_input_channels(),
+ msg.num_output_channels());
+ printf(" Reverse channels: %d \n", msg.num_reverse_channels());
+ }
+
+ } else if (event_msg.type() == Event::REVERSE_STREAM) {
+ ASSERT_TRUE(event_msg.has_reverse_stream());
+ const ReverseStream msg = event_msg.reverse_stream();
+ reverse_count++;
+
+ ASSERT_TRUE(msg.has_data());
+ ASSERT_EQ(sizeof(int16_t) * samples_per_channel *
+ far_frame._audioChannel, msg.data().size());
+ memcpy(far_frame._payloadData, msg.data().data(), msg.data().size());
+
+ if (perf_testing) {
+ t0 = TickTime::Now();
+ }
+
+ ASSERT_EQ(apm->kNoError,
+ apm->AnalyzeReverseStream(&far_frame));
+
+ if (perf_testing) {
+ t1 = TickTime::Now();
+ TickInterval tick_diff = t1 - t0;
+ acc_ticks += tick_diff;
+ if (tick_diff.Microseconds() > max_time_reverse_us) {
+ max_time_reverse_us = tick_diff.Microseconds();
+ }
+ if (tick_diff.Microseconds() < min_time_reverse_us) {
+ min_time_reverse_us = tick_diff.Microseconds();
+ }
+ }
+
+ } else if (event_msg.type() == Event::STREAM) {
+ ASSERT_TRUE(event_msg.has_stream());
+ const Stream msg = event_msg.stream();
+ primary_count++;
+
+ // ProcessStream could have changed this for the output frame.
+ near_frame._audioChannel = apm->num_input_channels();
+
+ ASSERT_TRUE(msg.has_input_data());
+ ASSERT_EQ(sizeof(int16_t) * samples_per_channel *
+ near_frame._audioChannel, msg.input_data().size());
+ memcpy(near_frame._payloadData,
+ msg.input_data().data(),
+ msg.input_data().size());
+
+ near_read_bytes += msg.input_data().size();
+ if (progress && primary_count % 100 == 0) {
+ printf("%.0f%% complete\r",
+ (near_read_bytes * 100.0) / near_size_bytes);
+ fflush(stdout);
+ }
+
+ if (perf_testing) {
+ t0 = TickTime::Now();
+ }
+
+ ASSERT_EQ(apm->kNoError,
+ apm->gain_control()->set_stream_analog_level(msg.level()));
+ ASSERT_EQ(apm->kNoError,
+ apm->set_stream_delay_ms(msg.delay() + extra_delay_ms));
+ ASSERT_EQ(apm->kNoError,
+ apm->echo_cancellation()->set_stream_drift_samples(msg.drift()));
+
+ int err = apm->ProcessStream(&near_frame);
+ if (err == apm->kBadStreamParameterWarning) {
+ printf("Bad parameter warning. %s\n", trace_stream.str().c_str());
+ }
+ ASSERT_TRUE(err == apm->kNoError ||
+ err == apm->kBadStreamParameterWarning);
+ ASSERT_TRUE(near_frame._audioChannel == apm->num_output_channels());
+
+ capture_level = apm->gain_control()->stream_analog_level();
+
+ stream_has_voice =
+ static_cast<int8_t>(apm->voice_detection()->stream_has_voice());
+ if (vad_out_file != NULL) {
+ ASSERT_EQ(1u, fwrite(&stream_has_voice,
+ sizeof(stream_has_voice),
+ 1,
+ vad_out_file));
+ }
+
+ if (apm->gain_control()->mode() != GainControl::kAdaptiveAnalog) {
+ ASSERT_EQ(msg.level(), capture_level);
+ }
+
+ if (perf_testing) {
+ t1 = TickTime::Now();
+ TickInterval tick_diff = t1 - t0;
+ acc_ticks += tick_diff;
+ if (tick_diff.Microseconds() > max_time_us) {
+ max_time_us = tick_diff.Microseconds();
+ }
+ if (tick_diff.Microseconds() < min_time_us) {
+ min_time_us = tick_diff.Microseconds();
+ }
+ }
+
+ size_t size = samples_per_channel * near_frame._audioChannel;
+ ASSERT_EQ(size, fwrite(near_frame._payloadData,
+ sizeof(int16_t),
+ size,
+ out_file));
+ }
+ }
+
+ ASSERT_TRUE(feof(pb_file));
+
+ } else {
+ enum Events {
+ kInitializeEvent,
+ kRenderEvent,
+ kCaptureEvent,
+ kResetEventDeprecated
+ };
+ int16_t event = 0;
+ while (simulating || feof(event_file) == 0) {
+ std::ostringstream trace_stream;
+ trace_stream << "Processed frames: " << reverse_count << " (reverse), "
+ << primary_count << " (primary)";
+ SCOPED_TRACE(trace_stream.str());
+
+ if (simulating) {
+ if (far_file == NULL) {
+ event = kCaptureEvent;
+ } else {
+ if (event == kRenderEvent) {
+ event = kCaptureEvent;
+ } else {
+ event = kRenderEvent;
+ }
+ }
+ } else {
+ read_count = fread(&event, sizeof(event), 1, event_file);
+ if (read_count != 1) {
+ break;
+ }
+ }
+
+ far_frame._frequencyInHz = sample_rate_hz;
+ far_frame._payloadDataLengthInSamples = samples_per_channel;
+ far_frame._audioChannel = num_render_channels;
+ near_frame._frequencyInHz = sample_rate_hz;
+ near_frame._payloadDataLengthInSamples = samples_per_channel;
+
+ if (event == kInitializeEvent || event == kResetEventDeprecated) {
+ ASSERT_EQ(1u,
+ fread(&sample_rate_hz, sizeof(sample_rate_hz), 1, event_file));
+ samples_per_channel = sample_rate_hz / 100;
+
+ ASSERT_EQ(1u,
+ fread(&device_sample_rate_hz,
+ sizeof(device_sample_rate_hz),
+ 1,
+ event_file));
+
+ ASSERT_EQ(apm->kNoError,
+ apm->set_sample_rate_hz(sample_rate_hz));
+
+ ASSERT_EQ(apm->kNoError,
+ apm->echo_cancellation()->set_device_sample_rate_hz(
+ device_sample_rate_hz));
+
+ far_frame._frequencyInHz = sample_rate_hz;
+ far_frame._payloadDataLengthInSamples = samples_per_channel;
+ far_frame._audioChannel = num_render_channels;
+ near_frame._frequencyInHz = sample_rate_hz;
+ near_frame._payloadDataLengthInSamples = samples_per_channel;
+
+ if (verbose) {
+ printf("Init at frame: %d (primary), %d (reverse)\n",
+ primary_count, reverse_count);
+ printf(" Sample rate: %d Hz\n", sample_rate_hz);
+ }
+
+ } else if (event == kRenderEvent) {
+ reverse_count++;
+
+ size_t size = samples_per_channel * num_render_channels;
+ read_count = fread(far_frame._payloadData,
+ sizeof(int16_t),
+ size,
+ far_file);
+
+ if (simulating) {
+ if (read_count != size) {
+ // Read an equal amount from the near file to avoid errors due to
+ // not reaching end-of-file.
+ EXPECT_EQ(0, fseek(near_file, read_count * sizeof(int16_t),
+ SEEK_CUR));
+ break; // This is expected.
+ }
+ } else {
+ ASSERT_EQ(size, read_count);
+ }
+
+ if (perf_testing) {
+ t0 = TickTime::Now();
+ }
+
+ ASSERT_EQ(apm->kNoError,
+ apm->AnalyzeReverseStream(&far_frame));
+
+ if (perf_testing) {
+ t1 = TickTime::Now();
+ TickInterval tick_diff = t1 - t0;
+ acc_ticks += tick_diff;
+ if (tick_diff.Microseconds() > max_time_reverse_us) {
+ max_time_reverse_us = tick_diff.Microseconds();
+ }
+ if (tick_diff.Microseconds() < min_time_reverse_us) {
+ min_time_reverse_us = tick_diff.Microseconds();
+ }
+ }
+
+ } else if (event == kCaptureEvent) {
+ primary_count++;
+ near_frame._audioChannel = num_capture_input_channels;
+
+ size_t size = samples_per_channel * num_capture_input_channels;
+ read_count = fread(near_frame._payloadData,
+ sizeof(int16_t),
+ size,
+ near_file);
+
+ near_read_bytes += read_count * sizeof(int16_t);
+ if (progress && primary_count % 100 == 0) {
+ printf("%.0f%% complete\r",
+ (near_read_bytes * 100.0) / near_size_bytes);
+ fflush(stdout);
+ }
+ if (simulating) {
+ if (read_count != size) {
+ break; // This is expected.
+ }
+
+ delay_ms = 0;
+ drift_samples = 0;
+ } else {
+ ASSERT_EQ(size, read_count);
+
+ // TODO(ajm): sizeof(delay_ms) for current files?
+ ASSERT_EQ(1u,
+ fread(&delay_ms, 2, 1, delay_file));
+ ASSERT_EQ(1u,
+ fread(&drift_samples, sizeof(drift_samples), 1, drift_file));
+ }
+
+ if (perf_testing) {
+ t0 = TickTime::Now();
+ }
+
+ // TODO(ajm): fake an analog gain while simulating.
+
+ int capture_level_in = capture_level;
+ ASSERT_EQ(apm->kNoError,
+ apm->gain_control()->set_stream_analog_level(capture_level));
+ ASSERT_EQ(apm->kNoError,
+ apm->set_stream_delay_ms(delay_ms + extra_delay_ms));
+ ASSERT_EQ(apm->kNoError,
+ apm->echo_cancellation()->set_stream_drift_samples(drift_samples));
+
+ int err = apm->ProcessStream(&near_frame);
+ if (err == apm->kBadStreamParameterWarning) {
+ printf("Bad parameter warning. %s\n", trace_stream.str().c_str());
+ }
+ ASSERT_TRUE(err == apm->kNoError ||
+ err == apm->kBadStreamParameterWarning);
+ ASSERT_TRUE(near_frame._audioChannel == apm->num_output_channels());
+
+ capture_level = apm->gain_control()->stream_analog_level();
+
+ stream_has_voice =
+ static_cast<int8_t>(apm->voice_detection()->stream_has_voice());
+ if (vad_out_file != NULL) {
+ ASSERT_EQ(1u, fwrite(&stream_has_voice,
+ sizeof(stream_has_voice),
+ 1,
+ vad_out_file));
+ }
+
+ if (apm->gain_control()->mode() != GainControl::kAdaptiveAnalog) {
+ ASSERT_EQ(capture_level_in, capture_level);
+ }
+
+ if (perf_testing) {
+ t1 = TickTime::Now();
+ TickInterval tick_diff = t1 - t0;
+ acc_ticks += tick_diff;
+ if (tick_diff.Microseconds() > max_time_us) {
+ max_time_us = tick_diff.Microseconds();
+ }
+ if (tick_diff.Microseconds() < min_time_us) {
+ min_time_us = tick_diff.Microseconds();
+ }
+ }
+
+ size = samples_per_channel * near_frame._audioChannel;
+ ASSERT_EQ(size, fwrite(near_frame._payloadData,
+ sizeof(int16_t),
+ size,
+ out_file));
+ }
+ else {
+ FAIL() << "Event " << event << " is unrecognized";
+ }
+ }
+ }
+ printf("100%% complete\r");
+
+ if (aecm_echo_path_out_file != NULL) {
+ const size_t path_size =
+ apm->echo_control_mobile()->echo_path_size_bytes();
+ scoped_array<char> echo_path(new char[path_size]);
+ apm->echo_control_mobile()->GetEchoPath(echo_path.get(), path_size);
+ ASSERT_EQ(path_size, fwrite(echo_path.get(),
+ sizeof(char),
+ path_size,
+ aecm_echo_path_out_file));
+ fclose(aecm_echo_path_out_file);
+ aecm_echo_path_out_file = NULL;
+ }
+
+ if (verbose) {
+ printf("\nProcessed frames: %d (primary), %d (reverse)\n",
+ primary_count, reverse_count);
+
+ if (apm->level_estimator()->is_enabled()) {
+ printf("\n--Level metrics--\n");
+ printf("RMS: %d dBFS\n", -apm->level_estimator()->RMS());
+ }
+ if (apm->echo_cancellation()->are_metrics_enabled()) {
+ EchoCancellation::Metrics metrics;
+ apm->echo_cancellation()->GetMetrics(&metrics);
+ printf("\n--Echo metrics--\n");
+ printf("(avg, max, min)\n");
+ printf("ERL: ");
+ PrintStat(metrics.echo_return_loss);
+ printf("ERLE: ");
+ PrintStat(metrics.echo_return_loss_enhancement);
+ printf("ANLP: ");
+ PrintStat(metrics.a_nlp);
+ }
+ if (apm->echo_cancellation()->is_delay_logging_enabled()) {
+ int median = 0;
+ int std = 0;
+ apm->echo_cancellation()->GetDelayMetrics(&median, &std);
+ printf("\n--Delay metrics--\n");
+ printf("Median: %3d\n", median);
+ printf("Standard deviation: %3d\n", std);
+ }
+ }
+
+ if (!pb_file) {
+ int8_t temp_int8;
+ if (far_file) {
+ read_count = fread(&temp_int8, sizeof(temp_int8), 1, far_file);
+ EXPECT_NE(0, feof(far_file)) << "Far-end file not fully processed";
+ }
+
+ read_count = fread(&temp_int8, sizeof(temp_int8), 1, near_file);
+ EXPECT_NE(0, feof(near_file)) << "Near-end file not fully processed";
+
+ if (!simulating) {
+ read_count = fread(&temp_int8, sizeof(temp_int8), 1, event_file);
+ EXPECT_NE(0, feof(event_file)) << "Event file not fully processed";
+ read_count = fread(&temp_int8, sizeof(temp_int8), 1, delay_file);
+ EXPECT_NE(0, feof(delay_file)) << "Delay file not fully processed";
+ read_count = fread(&temp_int8, sizeof(temp_int8), 1, drift_file);
+ EXPECT_NE(0, feof(drift_file)) << "Drift file not fully processed";
+ }
+ }
+
+ if (perf_testing) {
+ if (primary_count > 0) {
+ WebRtc_Word64 exec_time = acc_ticks.Milliseconds();
+ printf("\nTotal time: %.3f s, file time: %.2f s\n",
+ exec_time * 0.001, primary_count * 0.01);
+ printf("Time per frame: %.3f ms (average), %.3f ms (max),"
+ " %.3f ms (min)\n",
+ (exec_time * 1.0) / primary_count,
+ (max_time_us + max_time_reverse_us) / 1000.0,
+ (min_time_us + min_time_reverse_us) / 1000.0);
+ } else {
+ printf("Warning: no capture frames\n");
+ }
+ }
+
+ AudioProcessing::Destroy(apm);
+ apm = NULL;
+}
+} // namespace
+
+int main(int argc, char* argv[])
+{
+ void_main(argc, argv);
+
+ // Optional, but removes memory leak noise from Valgrind.
+ google::protobuf::ShutdownProtobufLibrary();
+ return 0;
+}
diff --git a/src/modules/audio_processing/test/unit_test.cc b/src/modules/audio_processing/test/unit_test.cc
new file mode 100644
index 0000000000..6fe59059e1
--- /dev/null
+++ b/src/modules/audio_processing/test/unit_test.cc
@@ -0,0 +1,1256 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <stdio.h>
+
+#include "gtest/gtest.h"
+
+#include "audio_processing.h"
+#include "event_wrapper.h"
+#include "module_common_types.h"
+#include "scoped_ptr.h"
+#include "signal_processing_library.h"
+#include "testsupport/fileutils.h"
+#include "thread_wrapper.h"
+#include "trace.h"
+#ifdef WEBRTC_ANDROID
+#include "external/webrtc/src/modules/audio_processing/test/unittest.pb.h"
+#else
+#include "webrtc/audio_processing/unittest.pb.h"
+#endif
+
+using webrtc::AudioProcessing;
+using webrtc::AudioFrame;
+using webrtc::GainControl;
+using webrtc::NoiseSuppression;
+using webrtc::EchoCancellation;
+using webrtc::EventWrapper;
+using webrtc::scoped_array;
+using webrtc::Trace;
+using webrtc::LevelEstimator;
+using webrtc::EchoCancellation;
+using webrtc::EchoControlMobile;
+using webrtc::VoiceDetection;
+
+namespace {
+// When false, this will compare the output data with the results stored to
+// file. This is the typical case. When the file should be updated, it can
+// be set to true with the command-line switch --write_output_data.
+bool write_output_data = false;
+
+class ApmTest : public ::testing::Test {
+ protected:
+ ApmTest();
+ virtual void SetUp();
+ virtual void TearDown();
+
+ static void SetUpTestCase() {
+ Trace::CreateTrace();
+ std::string trace_filename = webrtc::test::OutputPath() +
+ "audioproc_trace.txt";
+ ASSERT_EQ(0, Trace::SetTraceFile(trace_filename.c_str()));
+ }
+
+ static void TearDownTestCase() {
+ Trace::ReturnTrace();
+ }
+ // Path to where the resource files to be used for this test are located.
+ const std::string resource_path;
+ const std::string output_filename;
+ webrtc::AudioProcessing* apm_;
+ webrtc::AudioFrame* frame_;
+ webrtc::AudioFrame* revframe_;
+ FILE* far_file_;
+ FILE* near_file_;
+};
+
+ApmTest::ApmTest()
+ : resource_path(webrtc::test::ProjectRootPath() +
+ "test/data/audio_processing/"),
+#if defined(WEBRTC_APM_UNIT_TEST_FIXED_PROFILE)
+ output_filename(resource_path + "output_data_fixed.pb"),
+#elif defined(WEBRTC_APM_UNIT_TEST_FLOAT_PROFILE)
+ output_filename(resource_path + "output_data_float.pb"),
+#endif
+ apm_(NULL),
+ frame_(NULL),
+ revframe_(NULL),
+ far_file_(NULL),
+ near_file_(NULL) {}
+
+void ApmTest::SetUp() {
+ apm_ = AudioProcessing::Create(0);
+ ASSERT_TRUE(apm_ != NULL);
+
+ frame_ = new AudioFrame();
+ revframe_ = new AudioFrame();
+
+ ASSERT_EQ(apm_->kNoError, apm_->set_sample_rate_hz(32000));
+ ASSERT_EQ(apm_->kNoError, apm_->set_num_channels(2, 2));
+ ASSERT_EQ(apm_->kNoError, apm_->set_num_reverse_channels(2));
+
+ frame_->_payloadDataLengthInSamples = 320;
+ frame_->_audioChannel = 2;
+ frame_->_frequencyInHz = 32000;
+ revframe_->_payloadDataLengthInSamples = 320;
+ revframe_->_audioChannel = 2;
+ revframe_->_frequencyInHz = 32000;
+
+ std::string input_filename = resource_path + "aec_far.pcm";
+ far_file_ = fopen(input_filename.c_str(), "rb");
+ ASSERT_TRUE(far_file_ != NULL) << "Could not open input file " <<
+ input_filename << "\n";
+ input_filename = resource_path + "aec_near.pcm";
+ near_file_ = fopen(input_filename.c_str(), "rb");
+ ASSERT_TRUE(near_file_ != NULL) << "Could not open input file " <<
+ input_filename << "\n";
+}
+
+void ApmTest::TearDown() {
+ if (frame_) {
+ delete frame_;
+ }
+ frame_ = NULL;
+
+ if (revframe_) {
+ delete revframe_;
+ }
+ revframe_ = NULL;
+
+ if (far_file_) {
+ ASSERT_EQ(0, fclose(far_file_));
+ }
+ far_file_ = NULL;
+
+ if (near_file_) {
+ ASSERT_EQ(0, fclose(near_file_));
+ }
+ near_file_ = NULL;
+
+ if (apm_ != NULL) {
+ AudioProcessing::Destroy(apm_);
+ }
+ apm_ = NULL;
+}
+
+void MixStereoToMono(const int16_t* stereo,
+ int16_t* mono,
+ int samples_per_channel) {
+ for (int i = 0; i < samples_per_channel; i++) {
+ int32_t int32 = (static_cast<int32_t>(stereo[i * 2]) +
+ static_cast<int32_t>(stereo[i * 2 + 1])) >> 1;
+ mono[i] = static_cast<int16_t>(int32);
+ }
+}
+
+template <class T>
+T MaxValue(T a, T b) {
+ return a > b ? a : b;
+}
+
+template <class T>
+T AbsValue(T a) {
+ return a > 0 ? a : -a;
+}
+
+void SetFrameTo(AudioFrame* frame, int16_t value) {
+ for (int i = 0; i < frame->_payloadDataLengthInSamples * frame->_audioChannel;
+ ++i) {
+ frame->_payloadData[i] = value;
+ }
+}
+
+int16_t MaxAudioFrame(const AudioFrame& frame) {
+ const int length = frame._payloadDataLengthInSamples * frame._audioChannel;
+ int16_t max = AbsValue(frame._payloadData[0]);
+ for (int i = 1; i < length; i++) {
+ max = MaxValue(max, AbsValue(frame._payloadData[i]));
+ }
+
+ return max;
+}
+
+bool FrameDataAreEqual(const AudioFrame& frame1, const AudioFrame& frame2) {
+ if (frame1._payloadDataLengthInSamples !=
+ frame2._payloadDataLengthInSamples) {
+ return false;
+ }
+ if (frame1._audioChannel !=
+ frame2._audioChannel) {
+ return false;
+ }
+ if (memcmp(frame1._payloadData, frame2._payloadData,
+ frame1._payloadDataLengthInSamples * frame1._audioChannel *
+ sizeof(int16_t))) {
+ return false;
+ }
+ return true;
+}
+
+void TestStats(const AudioProcessing::Statistic& test,
+ const webrtc::audioproc::Test::Statistic& reference) {
+ EXPECT_EQ(reference.instant(), test.instant);
+ EXPECT_EQ(reference.average(), test.average);
+ EXPECT_EQ(reference.maximum(), test.maximum);
+ EXPECT_EQ(reference.minimum(), test.minimum);
+}
+
+void WriteStatsMessage(const AudioProcessing::Statistic& output,
+ webrtc::audioproc::Test::Statistic* message) {
+ message->set_instant(output.instant);
+ message->set_average(output.average);
+ message->set_maximum(output.maximum);
+ message->set_minimum(output.minimum);
+}
+
+void WriteMessageLiteToFile(const std::string filename,
+ const ::google::protobuf::MessageLite& message) {
+ FILE* file = fopen(filename.c_str(), "wb");
+ ASSERT_TRUE(file != NULL) << "Could not open " << filename;
+ int size = message.ByteSize();
+ ASSERT_GT(size, 0);
+ unsigned char* array = new unsigned char[size];
+ ASSERT_TRUE(message.SerializeToArray(array, size));
+
+ ASSERT_EQ(1u, fwrite(&size, sizeof(int), 1, file));
+ ASSERT_EQ(static_cast<size_t>(size),
+ fwrite(array, sizeof(unsigned char), size, file));
+
+ delete [] array;
+ fclose(file);
+}
+
+void ReadMessageLiteFromFile(const std::string filename,
+ ::google::protobuf::MessageLite* message) {
+ assert(message != NULL);
+
+ FILE* file = fopen(filename.c_str(), "rb");
+ ASSERT_TRUE(file != NULL) << "Could not open " << filename;
+ int size = 0;
+ ASSERT_EQ(1u, fread(&size, sizeof(int), 1, file));
+ ASSERT_GT(size, 0);
+ unsigned char* array = new unsigned char[size];
+ ASSERT_EQ(static_cast<size_t>(size),
+ fread(array, sizeof(unsigned char), size, file));
+
+ ASSERT_TRUE(message->ParseFromArray(array, size));
+
+ delete [] array;
+ fclose(file);
+}
+
+struct ThreadData {
+ ThreadData(int thread_num_, AudioProcessing* ap_)
+ : thread_num(thread_num_),
+ error(false),
+ ap(ap_) {}
+ int thread_num;
+ bool error;
+ AudioProcessing* ap;
+};
+
+// Don't use GTest here; non-thread-safe on Windows (as of 1.5.0).
+bool DeadlockProc(void* thread_object) {
+ ThreadData* thread_data = static_cast<ThreadData*>(thread_object);
+ AudioProcessing* ap = thread_data->ap;
+ int err = ap->kNoError;
+
+ AudioFrame primary_frame;
+ AudioFrame reverse_frame;
+ primary_frame._payloadDataLengthInSamples = 320;
+ primary_frame._audioChannel = 2;
+ primary_frame._frequencyInHz = 32000;
+ reverse_frame._payloadDataLengthInSamples = 320;
+ reverse_frame._audioChannel = 2;
+ reverse_frame._frequencyInHz = 32000;
+
+ ap->echo_cancellation()->Enable(true);
+ ap->gain_control()->Enable(true);
+ ap->high_pass_filter()->Enable(true);
+ ap->level_estimator()->Enable(true);
+ ap->noise_suppression()->Enable(true);
+ ap->voice_detection()->Enable(true);
+
+ if (thread_data->thread_num % 2 == 0) {
+ err = ap->AnalyzeReverseStream(&reverse_frame);
+ if (err != ap->kNoError) {
+ printf("Error in AnalyzeReverseStream(): %d\n", err);
+ thread_data->error = true;
+ return false;
+ }
+ }
+
+ if (thread_data->thread_num % 2 == 1) {
+ ap->set_stream_delay_ms(0);
+ ap->echo_cancellation()->set_stream_drift_samples(0);
+ ap->gain_control()->set_stream_analog_level(0);
+ err = ap->ProcessStream(&primary_frame);
+ if (err == ap->kStreamParameterNotSetError) {
+ printf("Expected kStreamParameterNotSetError in ProcessStream(): %d\n",
+ err);
+ } else if (err != ap->kNoError) {
+ printf("Error in ProcessStream(): %d\n", err);
+ thread_data->error = true;
+ return false;
+ }
+ ap->gain_control()->stream_analog_level();
+ }
+
+ EventWrapper* event = EventWrapper::Create();
+ event->Wait(1);
+ delete event;
+ event = NULL;
+
+ return true;
+}
+
+/*TEST_F(ApmTest, Deadlock) {
+ const int num_threads = 16;
+ std::vector<ThreadWrapper*> threads(num_threads);
+ std::vector<ThreadData*> thread_data(num_threads);
+
+ ASSERT_EQ(apm_->kNoError, apm_->set_sample_rate_hz(32000));
+ ASSERT_EQ(apm_->kNoError, apm_->set_num_channels(2, 2));
+ ASSERT_EQ(apm_->kNoError, apm_->set_num_reverse_channels(2));
+
+ for (int i = 0; i < num_threads; i++) {
+ thread_data[i] = new ThreadData(i, apm_);
+ threads[i] = ThreadWrapper::CreateThread(DeadlockProc,
+ thread_data[i],
+ kNormalPriority,
+ 0);
+ ASSERT_TRUE(threads[i] != NULL);
+ unsigned int thread_id = 0;
+ threads[i]->Start(thread_id);
+ }
+
+ EventWrapper* event = EventWrapper::Create();
+ ASSERT_EQ(kEventTimeout, event->Wait(5000));
+ delete event;
+ event = NULL;
+
+ for (int i = 0; i < num_threads; i++) {
+ // This will return false if the thread has deadlocked.
+ ASSERT_TRUE(threads[i]->Stop());
+ ASSERT_FALSE(thread_data[i]->error);
+ delete threads[i];
+ threads[i] = NULL;
+ delete thread_data[i];
+ thread_data[i] = NULL;
+ }
+}*/
+
+TEST_F(ApmTest, StreamParameters) {
+ // No errors when the components are disabled.
+ EXPECT_EQ(apm_->kNoError,
+ apm_->ProcessStream(frame_));
+
+ // -- Missing AGC level --
+ EXPECT_EQ(apm_->kNoError, apm_->Initialize());
+ EXPECT_EQ(apm_->kNoError, apm_->gain_control()->Enable(true));
+ EXPECT_EQ(apm_->kStreamParameterNotSetError, apm_->ProcessStream(frame_));
+
+ // Resets after successful ProcessStream().
+ EXPECT_EQ(apm_->kNoError,
+ apm_->gain_control()->set_stream_analog_level(127));
+ EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame_));
+ EXPECT_EQ(apm_->kStreamParameterNotSetError, apm_->ProcessStream(frame_));
+
+ // Other stream parameters set correctly.
+ EXPECT_EQ(apm_->kNoError, apm_->echo_cancellation()->Enable(true));
+ EXPECT_EQ(apm_->kNoError,
+ apm_->echo_cancellation()->enable_drift_compensation(true));
+ EXPECT_EQ(apm_->kNoError, apm_->set_stream_delay_ms(100));
+ EXPECT_EQ(apm_->kNoError,
+ apm_->echo_cancellation()->set_stream_drift_samples(0));
+ EXPECT_EQ(apm_->kStreamParameterNotSetError,
+ apm_->ProcessStream(frame_));
+ EXPECT_EQ(apm_->kNoError, apm_->gain_control()->Enable(false));
+ EXPECT_EQ(apm_->kNoError,
+ apm_->echo_cancellation()->enable_drift_compensation(false));
+
+ // -- Missing delay --
+ EXPECT_EQ(apm_->kNoError, apm_->Initialize());
+ EXPECT_EQ(apm_->kNoError, apm_->echo_cancellation()->Enable(true));
+ EXPECT_EQ(apm_->kStreamParameterNotSetError, apm_->ProcessStream(frame_));
+
+ // Resets after successful ProcessStream().
+ EXPECT_EQ(apm_->kNoError, apm_->set_stream_delay_ms(100));
+ EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame_));
+ EXPECT_EQ(apm_->kStreamParameterNotSetError, apm_->ProcessStream(frame_));
+
+ // Other stream parameters set correctly.
+ EXPECT_EQ(apm_->kNoError, apm_->gain_control()->Enable(true));
+ EXPECT_EQ(apm_->kNoError,
+ apm_->echo_cancellation()->enable_drift_compensation(true));
+ EXPECT_EQ(apm_->kNoError,
+ apm_->echo_cancellation()->set_stream_drift_samples(0));
+ EXPECT_EQ(apm_->kNoError,
+ apm_->gain_control()->set_stream_analog_level(127));
+ EXPECT_EQ(apm_->kStreamParameterNotSetError, apm_->ProcessStream(frame_));
+ EXPECT_EQ(apm_->kNoError, apm_->gain_control()->Enable(false));
+
+ // -- Missing drift --
+ EXPECT_EQ(apm_->kNoError, apm_->Initialize());
+ EXPECT_EQ(apm_->kStreamParameterNotSetError, apm_->ProcessStream(frame_));
+
+ // Resets after successful ProcessStream().
+ EXPECT_EQ(apm_->kNoError, apm_->set_stream_delay_ms(100));
+ EXPECT_EQ(apm_->kNoError,
+ apm_->echo_cancellation()->set_stream_drift_samples(0));
+ EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame_));
+ EXPECT_EQ(apm_->kStreamParameterNotSetError, apm_->ProcessStream(frame_));
+
+ // Other stream parameters set correctly.
+ EXPECT_EQ(apm_->kNoError, apm_->gain_control()->Enable(true));
+ EXPECT_EQ(apm_->kNoError, apm_->set_stream_delay_ms(100));
+ EXPECT_EQ(apm_->kNoError,
+ apm_->gain_control()->set_stream_analog_level(127));
+ EXPECT_EQ(apm_->kStreamParameterNotSetError, apm_->ProcessStream(frame_));
+
+ // -- No stream parameters --
+ EXPECT_EQ(apm_->kNoError, apm_->Initialize());
+ EXPECT_EQ(apm_->kNoError,
+ apm_->AnalyzeReverseStream(revframe_));
+ EXPECT_EQ(apm_->kStreamParameterNotSetError,
+ apm_->ProcessStream(frame_));
+
+ // -- All there --
+ EXPECT_EQ(apm_->kNoError, apm_->Initialize());
+ EXPECT_EQ(apm_->kNoError, apm_->set_stream_delay_ms(100));
+ EXPECT_EQ(apm_->kNoError,
+ apm_->echo_cancellation()->set_stream_drift_samples(0));
+ EXPECT_EQ(apm_->kNoError,
+ apm_->gain_control()->set_stream_analog_level(127));
+ EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame_));
+}
+
+TEST_F(ApmTest, Channels) {
+ // Testing number of invalid channels
+ EXPECT_EQ(apm_->kBadParameterError, apm_->set_num_channels(0, 1));
+ EXPECT_EQ(apm_->kBadParameterError, apm_->set_num_channels(1, 0));
+ EXPECT_EQ(apm_->kBadParameterError, apm_->set_num_channels(3, 1));
+ EXPECT_EQ(apm_->kBadParameterError, apm_->set_num_channels(1, 3));
+ EXPECT_EQ(apm_->kBadParameterError, apm_->set_num_reverse_channels(0));
+ EXPECT_EQ(apm_->kBadParameterError, apm_->set_num_reverse_channels(3));
+ // Testing number of valid channels
+ for (int i = 1; i < 3; i++) {
+ for (int j = 1; j < 3; j++) {
+ if (j > i) {
+ EXPECT_EQ(apm_->kBadParameterError, apm_->set_num_channels(i, j));
+ } else {
+ EXPECT_EQ(apm_->kNoError, apm_->set_num_channels(i, j));
+ EXPECT_EQ(j, apm_->num_output_channels());
+ }
+ }
+ EXPECT_EQ(i, apm_->num_input_channels());
+ EXPECT_EQ(apm_->kNoError, apm_->set_num_reverse_channels(i));
+ EXPECT_EQ(i, apm_->num_reverse_channels());
+ }
+}
+
+TEST_F(ApmTest, SampleRates) {
+ // Testing invalid sample rates
+ EXPECT_EQ(apm_->kBadParameterError, apm_->set_sample_rate_hz(10000));
+ // Testing valid sample rates
+ int fs[] = {8000, 16000, 32000};
+ for (size_t i = 0; i < sizeof(fs) / sizeof(*fs); i++) {
+ EXPECT_EQ(apm_->kNoError, apm_->set_sample_rate_hz(fs[i]));
+ EXPECT_EQ(fs[i], apm_->sample_rate_hz());
+ }
+}
+
+
+TEST_F(ApmTest, EchoCancellation) {
+ EXPECT_EQ(apm_->kNoError,
+ apm_->echo_cancellation()->enable_drift_compensation(true));
+ EXPECT_TRUE(apm_->echo_cancellation()->is_drift_compensation_enabled());
+ EXPECT_EQ(apm_->kNoError,
+ apm_->echo_cancellation()->enable_drift_compensation(false));
+ EXPECT_FALSE(apm_->echo_cancellation()->is_drift_compensation_enabled());
+
+ EXPECT_EQ(apm_->kBadParameterError,
+ apm_->echo_cancellation()->set_device_sample_rate_hz(4000));
+ EXPECT_EQ(apm_->kBadParameterError,
+ apm_->echo_cancellation()->set_device_sample_rate_hz(100000));
+
+ int rate[] = {16000, 44100, 48000};
+ for (size_t i = 0; i < sizeof(rate)/sizeof(*rate); i++) {
+ EXPECT_EQ(apm_->kNoError,
+ apm_->echo_cancellation()->set_device_sample_rate_hz(rate[i]));
+ EXPECT_EQ(rate[i],
+ apm_->echo_cancellation()->device_sample_rate_hz());
+ }
+
+ EXPECT_EQ(apm_->kBadParameterError,
+ apm_->echo_cancellation()->set_suppression_level(
+ static_cast<EchoCancellation::SuppressionLevel>(-1)));
+
+ EXPECT_EQ(apm_->kBadParameterError,
+ apm_->echo_cancellation()->set_suppression_level(
+ static_cast<EchoCancellation::SuppressionLevel>(4)));
+
+ EchoCancellation::SuppressionLevel level[] = {
+ EchoCancellation::kLowSuppression,
+ EchoCancellation::kModerateSuppression,
+ EchoCancellation::kHighSuppression,
+ };
+ for (size_t i = 0; i < sizeof(level)/sizeof(*level); i++) {
+ EXPECT_EQ(apm_->kNoError,
+ apm_->echo_cancellation()->set_suppression_level(level[i]));
+ EXPECT_EQ(level[i],
+ apm_->echo_cancellation()->suppression_level());
+ }
+
+ EchoCancellation::Metrics metrics;
+ EXPECT_EQ(apm_->kNotEnabledError,
+ apm_->echo_cancellation()->GetMetrics(&metrics));
+
+ EXPECT_EQ(apm_->kNoError,
+ apm_->echo_cancellation()->enable_metrics(true));
+ EXPECT_TRUE(apm_->echo_cancellation()->are_metrics_enabled());
+ EXPECT_EQ(apm_->kNoError,
+ apm_->echo_cancellation()->enable_metrics(false));
+ EXPECT_FALSE(apm_->echo_cancellation()->are_metrics_enabled());
+
+ int median = 0;
+ int std = 0;
+ EXPECT_EQ(apm_->kNotEnabledError,
+ apm_->echo_cancellation()->GetDelayMetrics(&median, &std));
+
+ EXPECT_EQ(apm_->kNoError,
+ apm_->echo_cancellation()->enable_delay_logging(true));
+ EXPECT_TRUE(apm_->echo_cancellation()->is_delay_logging_enabled());
+ EXPECT_EQ(apm_->kNoError,
+ apm_->echo_cancellation()->enable_delay_logging(false));
+ EXPECT_FALSE(apm_->echo_cancellation()->is_delay_logging_enabled());
+
+ EXPECT_EQ(apm_->kNoError, apm_->echo_cancellation()->Enable(true));
+ EXPECT_TRUE(apm_->echo_cancellation()->is_enabled());
+ EXPECT_EQ(apm_->kNoError, apm_->echo_cancellation()->Enable(false));
+ EXPECT_FALSE(apm_->echo_cancellation()->is_enabled());
+}
+
+TEST_F(ApmTest, EchoControlMobile) {
+ // AECM won't use super-wideband.
+ EXPECT_EQ(apm_->kNoError, apm_->set_sample_rate_hz(32000));
+ EXPECT_EQ(apm_->kBadSampleRateError, apm_->echo_control_mobile()->Enable(true));
+ EXPECT_EQ(apm_->kNoError, apm_->set_sample_rate_hz(16000));
+ // Turn AECM on (and AEC off)
+ EXPECT_EQ(apm_->kNoError, apm_->echo_control_mobile()->Enable(true));
+ EXPECT_TRUE(apm_->echo_control_mobile()->is_enabled());
+
+ EXPECT_EQ(apm_->kBadParameterError,
+ apm_->echo_control_mobile()->set_routing_mode(
+ static_cast<EchoControlMobile::RoutingMode>(-1)));
+ EXPECT_EQ(apm_->kBadParameterError,
+ apm_->echo_control_mobile()->set_routing_mode(
+ static_cast<EchoControlMobile::RoutingMode>(5)));
+
+ // Toggle routing modes
+ EchoControlMobile::RoutingMode mode[] = {
+ EchoControlMobile::kQuietEarpieceOrHeadset,
+ EchoControlMobile::kEarpiece,
+ EchoControlMobile::kLoudEarpiece,
+ EchoControlMobile::kSpeakerphone,
+ EchoControlMobile::kLoudSpeakerphone,
+ };
+ for (size_t i = 0; i < sizeof(mode)/sizeof(*mode); i++) {
+ EXPECT_EQ(apm_->kNoError,
+ apm_->echo_control_mobile()->set_routing_mode(mode[i]));
+ EXPECT_EQ(mode[i],
+ apm_->echo_control_mobile()->routing_mode());
+ }
+ // Turn comfort noise off/on
+ EXPECT_EQ(apm_->kNoError,
+ apm_->echo_control_mobile()->enable_comfort_noise(false));
+ EXPECT_FALSE(apm_->echo_control_mobile()->is_comfort_noise_enabled());
+ EXPECT_EQ(apm_->kNoError,
+ apm_->echo_control_mobile()->enable_comfort_noise(true));
+ EXPECT_TRUE(apm_->echo_control_mobile()->is_comfort_noise_enabled());
+ // Set and get echo path
+ const size_t echo_path_size =
+ apm_->echo_control_mobile()->echo_path_size_bytes();
+ scoped_array<char> echo_path_in(new char[echo_path_size]);
+ scoped_array<char> echo_path_out(new char[echo_path_size]);
+ EXPECT_EQ(apm_->kNullPointerError,
+ apm_->echo_control_mobile()->SetEchoPath(NULL, echo_path_size));
+ EXPECT_EQ(apm_->kNullPointerError,
+ apm_->echo_control_mobile()->GetEchoPath(NULL, echo_path_size));
+ EXPECT_EQ(apm_->kBadParameterError,
+ apm_->echo_control_mobile()->GetEchoPath(echo_path_out.get(), 1));
+ EXPECT_EQ(apm_->kNoError,
+ apm_->echo_control_mobile()->GetEchoPath(echo_path_out.get(),
+ echo_path_size));
+ for (size_t i = 0; i < echo_path_size; i++) {
+ echo_path_in[i] = echo_path_out[i] + 1;
+ }
+ EXPECT_EQ(apm_->kBadParameterError,
+ apm_->echo_control_mobile()->SetEchoPath(echo_path_in.get(), 1));
+ EXPECT_EQ(apm_->kNoError,
+ apm_->echo_control_mobile()->SetEchoPath(echo_path_in.get(),
+ echo_path_size));
+ EXPECT_EQ(apm_->kNoError,
+ apm_->echo_control_mobile()->GetEchoPath(echo_path_out.get(),
+ echo_path_size));
+ for (size_t i = 0; i < echo_path_size; i++) {
+ EXPECT_EQ(echo_path_in[i], echo_path_out[i]);
+ }
+ // Turn AECM off
+ EXPECT_EQ(apm_->kNoError, apm_->echo_control_mobile()->Enable(false));
+ EXPECT_FALSE(apm_->echo_control_mobile()->is_enabled());
+}
+
+TEST_F(ApmTest, GainControl) {
+ // Testing gain modes
+ EXPECT_EQ(apm_->kBadParameterError,
+ apm_->gain_control()->set_mode(static_cast<GainControl::Mode>(-1)));
+
+ EXPECT_EQ(apm_->kBadParameterError,
+ apm_->gain_control()->set_mode(static_cast<GainControl::Mode>(3)));
+
+ EXPECT_EQ(apm_->kNoError,
+ apm_->gain_control()->set_mode(
+ apm_->gain_control()->mode()));
+
+ GainControl::Mode mode[] = {
+ GainControl::kAdaptiveAnalog,
+ GainControl::kAdaptiveDigital,
+ GainControl::kFixedDigital
+ };
+ for (size_t i = 0; i < sizeof(mode)/sizeof(*mode); i++) {
+ EXPECT_EQ(apm_->kNoError,
+ apm_->gain_control()->set_mode(mode[i]));
+ EXPECT_EQ(mode[i], apm_->gain_control()->mode());
+ }
+ // Testing invalid target levels
+ EXPECT_EQ(apm_->kBadParameterError,
+ apm_->gain_control()->set_target_level_dbfs(-3));
+ EXPECT_EQ(apm_->kBadParameterError,
+ apm_->gain_control()->set_target_level_dbfs(-40));
+ // Testing valid target levels
+ EXPECT_EQ(apm_->kNoError,
+ apm_->gain_control()->set_target_level_dbfs(
+ apm_->gain_control()->target_level_dbfs()));
+
+ int level_dbfs[] = {0, 6, 31};
+ for (size_t i = 0; i < sizeof(level_dbfs)/sizeof(*level_dbfs); i++) {
+ EXPECT_EQ(apm_->kNoError,
+ apm_->gain_control()->set_target_level_dbfs(level_dbfs[i]));
+ EXPECT_EQ(level_dbfs[i], apm_->gain_control()->target_level_dbfs());
+ }
+
+ // Testing invalid compression gains
+ EXPECT_EQ(apm_->kBadParameterError,
+ apm_->gain_control()->set_compression_gain_db(-1));
+ EXPECT_EQ(apm_->kBadParameterError,
+ apm_->gain_control()->set_compression_gain_db(100));
+
+ // Testing valid compression gains
+ EXPECT_EQ(apm_->kNoError,
+ apm_->gain_control()->set_compression_gain_db(
+ apm_->gain_control()->compression_gain_db()));
+
+ int gain_db[] = {0, 10, 90};
+ for (size_t i = 0; i < sizeof(gain_db)/sizeof(*gain_db); i++) {
+ EXPECT_EQ(apm_->kNoError,
+ apm_->gain_control()->set_compression_gain_db(gain_db[i]));
+ EXPECT_EQ(gain_db[i], apm_->gain_control()->compression_gain_db());
+ }
+
+ // Testing limiter off/on
+ EXPECT_EQ(apm_->kNoError, apm_->gain_control()->enable_limiter(false));
+ EXPECT_FALSE(apm_->gain_control()->is_limiter_enabled());
+ EXPECT_EQ(apm_->kNoError, apm_->gain_control()->enable_limiter(true));
+ EXPECT_TRUE(apm_->gain_control()->is_limiter_enabled());
+
+ // Testing invalid level limits
+ EXPECT_EQ(apm_->kBadParameterError,
+ apm_->gain_control()->set_analog_level_limits(-1, 512));
+ EXPECT_EQ(apm_->kBadParameterError,
+ apm_->gain_control()->set_analog_level_limits(100000, 512));
+ EXPECT_EQ(apm_->kBadParameterError,
+ apm_->gain_control()->set_analog_level_limits(512, -1));
+ EXPECT_EQ(apm_->kBadParameterError,
+ apm_->gain_control()->set_analog_level_limits(512, 100000));
+ EXPECT_EQ(apm_->kBadParameterError,
+ apm_->gain_control()->set_analog_level_limits(512, 255));
+
+ // Testing valid level limits
+ EXPECT_EQ(apm_->kNoError,
+ apm_->gain_control()->set_analog_level_limits(
+ apm_->gain_control()->analog_level_minimum(),
+ apm_->gain_control()->analog_level_maximum()));
+
+ int min_level[] = {0, 255, 1024};
+ for (size_t i = 0; i < sizeof(min_level)/sizeof(*min_level); i++) {
+ EXPECT_EQ(apm_->kNoError,
+ apm_->gain_control()->set_analog_level_limits(min_level[i], 1024));
+ EXPECT_EQ(min_level[i], apm_->gain_control()->analog_level_minimum());
+ }
+
+ int max_level[] = {0, 1024, 65535};
+ for (size_t i = 0; i < sizeof(min_level)/sizeof(*min_level); i++) {
+ EXPECT_EQ(apm_->kNoError,
+ apm_->gain_control()->set_analog_level_limits(0, max_level[i]));
+ EXPECT_EQ(max_level[i], apm_->gain_control()->analog_level_maximum());
+ }
+
+ // TODO(ajm): stream_is_saturated() and stream_analog_level()
+
+ // Turn AGC off
+ EXPECT_EQ(apm_->kNoError, apm_->gain_control()->Enable(false));
+ EXPECT_FALSE(apm_->gain_control()->is_enabled());
+}
+
+TEST_F(ApmTest, NoiseSuppression) {
+ // Tesing invalid suppression levels
+ EXPECT_EQ(apm_->kBadParameterError,
+ apm_->noise_suppression()->set_level(
+ static_cast<NoiseSuppression::Level>(-1)));
+
+ EXPECT_EQ(apm_->kBadParameterError,
+ apm_->noise_suppression()->set_level(
+ static_cast<NoiseSuppression::Level>(5)));
+
+ // Tesing valid suppression levels
+ NoiseSuppression::Level level[] = {
+ NoiseSuppression::kLow,
+ NoiseSuppression::kModerate,
+ NoiseSuppression::kHigh,
+ NoiseSuppression::kVeryHigh
+ };
+ for (size_t i = 0; i < sizeof(level)/sizeof(*level); i++) {
+ EXPECT_EQ(apm_->kNoError,
+ apm_->noise_suppression()->set_level(level[i]));
+ EXPECT_EQ(level[i], apm_->noise_suppression()->level());
+ }
+
+ // Turing NS on/off
+ EXPECT_EQ(apm_->kNoError, apm_->noise_suppression()->Enable(true));
+ EXPECT_TRUE(apm_->noise_suppression()->is_enabled());
+ EXPECT_EQ(apm_->kNoError, apm_->noise_suppression()->Enable(false));
+ EXPECT_FALSE(apm_->noise_suppression()->is_enabled());
+}
+
+TEST_F(ApmTest, HighPassFilter) {
+ // Turing HP filter on/off
+ EXPECT_EQ(apm_->kNoError, apm_->high_pass_filter()->Enable(true));
+ EXPECT_TRUE(apm_->high_pass_filter()->is_enabled());
+ EXPECT_EQ(apm_->kNoError, apm_->high_pass_filter()->Enable(false));
+ EXPECT_FALSE(apm_->high_pass_filter()->is_enabled());
+}
+
+TEST_F(ApmTest, LevelEstimator) {
+ // Turning level estimator on/off
+ EXPECT_EQ(apm_->kNoError, apm_->level_estimator()->Enable(false));
+ EXPECT_FALSE(apm_->level_estimator()->is_enabled());
+
+ EXPECT_EQ(apm_->kNotEnabledError, apm_->level_estimator()->RMS());
+
+ EXPECT_EQ(apm_->kNoError, apm_->level_estimator()->Enable(true));
+ EXPECT_TRUE(apm_->level_estimator()->is_enabled());
+
+ // Run this test in wideband; in super-wb, the splitting filter distorts the
+ // audio enough to cause deviation from the expectation for small values.
+ EXPECT_EQ(apm_->kNoError, apm_->set_sample_rate_hz(16000));
+ frame_->_payloadDataLengthInSamples = 160;
+ frame_->_audioChannel = 2;
+ frame_->_frequencyInHz = 16000;
+
+ // Min value if no frames have been processed.
+ EXPECT_EQ(127, apm_->level_estimator()->RMS());
+
+ // Min value on zero frames.
+ SetFrameTo(frame_, 0);
+ EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame_));
+ EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame_));
+ EXPECT_EQ(127, apm_->level_estimator()->RMS());
+
+ // Try a few RMS values.
+ // (These also test that the value resets after retrieving it.)
+ SetFrameTo(frame_, 32767);
+ EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame_));
+ EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame_));
+ EXPECT_EQ(0, apm_->level_estimator()->RMS());
+
+ SetFrameTo(frame_, 30000);
+ EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame_));
+ EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame_));
+ EXPECT_EQ(1, apm_->level_estimator()->RMS());
+
+ SetFrameTo(frame_, 10000);
+ EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame_));
+ EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame_));
+ EXPECT_EQ(10, apm_->level_estimator()->RMS());
+
+ SetFrameTo(frame_, 10);
+ EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame_));
+ EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame_));
+ EXPECT_EQ(70, apm_->level_estimator()->RMS());
+
+ // Min value if _energy == 0.
+ SetFrameTo(frame_, 10000);
+ uint32_t energy = frame_->_energy; // Save default to restore below.
+ frame_->_energy = 0;
+ EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame_));
+ EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame_));
+ EXPECT_EQ(127, apm_->level_estimator()->RMS());
+ frame_->_energy = energy;
+
+ // Verify reset after enable/disable.
+ SetFrameTo(frame_, 32767);
+ EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame_));
+ EXPECT_EQ(apm_->kNoError, apm_->level_estimator()->Enable(false));
+ EXPECT_EQ(apm_->kNoError, apm_->level_estimator()->Enable(true));
+ SetFrameTo(frame_, 1);
+ EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame_));
+ EXPECT_EQ(90, apm_->level_estimator()->RMS());
+
+ // Verify reset after initialize.
+ SetFrameTo(frame_, 32767);
+ EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame_));
+ EXPECT_EQ(apm_->kNoError, apm_->Initialize());
+ SetFrameTo(frame_, 1);
+ EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame_));
+ EXPECT_EQ(90, apm_->level_estimator()->RMS());
+}
+
+TEST_F(ApmTest, VoiceDetection) {
+ // Test external VAD
+ EXPECT_EQ(apm_->kNoError,
+ apm_->voice_detection()->set_stream_has_voice(true));
+ EXPECT_TRUE(apm_->voice_detection()->stream_has_voice());
+ EXPECT_EQ(apm_->kNoError,
+ apm_->voice_detection()->set_stream_has_voice(false));
+ EXPECT_FALSE(apm_->voice_detection()->stream_has_voice());
+
+ // Tesing invalid likelihoods
+ EXPECT_EQ(apm_->kBadParameterError,
+ apm_->voice_detection()->set_likelihood(
+ static_cast<VoiceDetection::Likelihood>(-1)));
+
+ EXPECT_EQ(apm_->kBadParameterError,
+ apm_->voice_detection()->set_likelihood(
+ static_cast<VoiceDetection::Likelihood>(5)));
+
+ // Tesing valid likelihoods
+ VoiceDetection::Likelihood likelihood[] = {
+ VoiceDetection::kVeryLowLikelihood,
+ VoiceDetection::kLowLikelihood,
+ VoiceDetection::kModerateLikelihood,
+ VoiceDetection::kHighLikelihood
+ };
+ for (size_t i = 0; i < sizeof(likelihood)/sizeof(*likelihood); i++) {
+ EXPECT_EQ(apm_->kNoError,
+ apm_->voice_detection()->set_likelihood(likelihood[i]));
+ EXPECT_EQ(likelihood[i], apm_->voice_detection()->likelihood());
+ }
+
+ /* TODO(bjornv): Enable once VAD supports other frame lengths than 10 ms
+ // Tesing invalid frame sizes
+ EXPECT_EQ(apm_->kBadParameterError,
+ apm_->voice_detection()->set_frame_size_ms(12));
+
+ // Tesing valid frame sizes
+ for (int i = 10; i <= 30; i += 10) {
+ EXPECT_EQ(apm_->kNoError,
+ apm_->voice_detection()->set_frame_size_ms(i));
+ EXPECT_EQ(i, apm_->voice_detection()->frame_size_ms());
+ }
+ */
+
+ // Turing VAD on/off
+ EXPECT_EQ(apm_->kNoError, apm_->voice_detection()->Enable(true));
+ EXPECT_TRUE(apm_->voice_detection()->is_enabled());
+ EXPECT_EQ(apm_->kNoError, apm_->voice_detection()->Enable(false));
+ EXPECT_FALSE(apm_->voice_detection()->is_enabled());
+
+ // Test that AudioFrame activity is maintained when VAD is disabled.
+ EXPECT_EQ(apm_->kNoError, apm_->voice_detection()->Enable(false));
+ AudioFrame::VADActivity activity[] = {
+ AudioFrame::kVadActive,
+ AudioFrame::kVadPassive,
+ AudioFrame::kVadUnknown
+ };
+ for (size_t i = 0; i < sizeof(activity)/sizeof(*activity); i++) {
+ frame_->_vadActivity = activity[i];
+ EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame_));
+ EXPECT_EQ(activity[i], frame_->_vadActivity);
+ }
+
+ // Test that AudioFrame activity is set when VAD is enabled.
+ EXPECT_EQ(apm_->kNoError, apm_->voice_detection()->Enable(true));
+ frame_->_vadActivity = AudioFrame::kVadUnknown;
+ EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame_));
+ EXPECT_NE(AudioFrame::kVadUnknown, frame_->_vadActivity);
+
+ // TODO(bjornv): Add tests for streamed voice; stream_has_voice()
+}
+
+TEST_F(ApmTest, SplittingFilter) {
+ // Verify the filter is not active through undistorted audio when:
+ // 1. No components are enabled...
+ SetFrameTo(frame_, 1000);
+ AudioFrame frame_copy = *frame_;
+ EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame_));
+ EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame_));
+ EXPECT_TRUE(FrameDataAreEqual(*frame_, frame_copy));
+
+ // 2. Only the level estimator is enabled...
+ SetFrameTo(frame_, 1000);
+ frame_copy = *frame_;
+ EXPECT_EQ(apm_->kNoError, apm_->level_estimator()->Enable(true));
+ EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame_));
+ EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame_));
+ EXPECT_TRUE(FrameDataAreEqual(*frame_, frame_copy));
+ EXPECT_EQ(apm_->kNoError, apm_->level_estimator()->Enable(false));
+
+ // 3. Only VAD is enabled...
+ SetFrameTo(frame_, 1000);
+ frame_copy = *frame_;
+ EXPECT_EQ(apm_->kNoError, apm_->voice_detection()->Enable(true));
+ EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame_));
+ EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame_));
+ EXPECT_TRUE(FrameDataAreEqual(*frame_, frame_copy));
+ EXPECT_EQ(apm_->kNoError, apm_->voice_detection()->Enable(false));
+
+ // 4. Both VAD and the level estimator are enabled...
+ SetFrameTo(frame_, 1000);
+ frame_copy = *frame_;
+ EXPECT_EQ(apm_->kNoError, apm_->level_estimator()->Enable(true));
+ EXPECT_EQ(apm_->kNoError, apm_->voice_detection()->Enable(true));
+ EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame_));
+ EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame_));
+ EXPECT_TRUE(FrameDataAreEqual(*frame_, frame_copy));
+ EXPECT_EQ(apm_->kNoError, apm_->level_estimator()->Enable(false));
+ EXPECT_EQ(apm_->kNoError, apm_->voice_detection()->Enable(false));
+
+ // 5. Not using super-wb.
+ EXPECT_EQ(apm_->kNoError, apm_->set_sample_rate_hz(16000));
+ frame_->_payloadDataLengthInSamples = 160;
+ frame_->_audioChannel = 2;
+ frame_->_frequencyInHz = 16000;
+ // Enable AEC, which would require the filter in super-wb. We rely on the
+ // first few frames of data being unaffected by the AEC.
+ // TODO(andrew): This test, and the one below, rely rather tenuously on the
+ // behavior of the AEC. Think of something more robust.
+ EXPECT_EQ(apm_->kNoError, apm_->echo_cancellation()->Enable(true));
+ SetFrameTo(frame_, 1000);
+ frame_copy = *frame_;
+ EXPECT_EQ(apm_->kNoError, apm_->set_stream_delay_ms(0));
+ EXPECT_EQ(apm_->kNoError,
+ apm_->echo_cancellation()->set_stream_drift_samples(0));
+ EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame_));
+ EXPECT_EQ(apm_->kNoError, apm_->set_stream_delay_ms(0));
+ EXPECT_EQ(apm_->kNoError,
+ apm_->echo_cancellation()->set_stream_drift_samples(0));
+ EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame_));
+ EXPECT_TRUE(FrameDataAreEqual(*frame_, frame_copy));
+
+ // Check the test is valid. We should have distortion from the filter
+ // when AEC is enabled (which won't affect the audio).
+ EXPECT_EQ(apm_->kNoError, apm_->set_sample_rate_hz(32000));
+ frame_->_payloadDataLengthInSamples = 320;
+ frame_->_audioChannel = 2;
+ frame_->_frequencyInHz = 32000;
+ SetFrameTo(frame_, 1000);
+ frame_copy = *frame_;
+ EXPECT_EQ(apm_->kNoError, apm_->set_stream_delay_ms(0));
+ EXPECT_EQ(apm_->kNoError,
+ apm_->echo_cancellation()->set_stream_drift_samples(0));
+ EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame_));
+ EXPECT_FALSE(FrameDataAreEqual(*frame_, frame_copy));
+}
+
+// TODO(andrew): expand test to verify output.
+TEST_F(ApmTest, DebugDump) {
+ const std::string filename = webrtc::test::OutputPath() + "debug.aec";
+ EXPECT_EQ(apm_->kNullPointerError, apm_->StartDebugRecording(NULL));
+
+#ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP
+ // Stopping without having started should be OK.
+ EXPECT_EQ(apm_->kNoError, apm_->StopDebugRecording());
+
+ EXPECT_EQ(apm_->kNoError, apm_->StartDebugRecording(filename.c_str()));
+ EXPECT_EQ(apm_->kNoError, apm_->AnalyzeReverseStream(revframe_));
+ EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame_));
+ EXPECT_EQ(apm_->kNoError, apm_->StopDebugRecording());
+
+ // Verify the file has been written.
+ ASSERT_TRUE(fopen(filename.c_str(), "r") != NULL);
+ // Clean it up.
+ ASSERT_EQ(0, remove(filename.c_str()));
+#else
+ EXPECT_EQ(apm_->kUnsupportedFunctionError,
+ apm_->StartDebugRecording(filename.c_str()));
+ EXPECT_EQ(apm_->kUnsupportedFunctionError, apm_->StopDebugRecording());
+
+ // Verify the file has NOT been written.
+ ASSERT_TRUE(fopen(filename.c_str(), "r") == NULL);
+#endif // WEBRTC_AUDIOPROC_DEBUG_DUMP
+}
+
+TEST_F(ApmTest, Process) {
+ GOOGLE_PROTOBUF_VERIFY_VERSION;
+ webrtc::audioproc::OutputData output_data;
+
+ if (!write_output_data) {
+ ReadMessageLiteFromFile(output_filename, &output_data);
+ } else {
+ // We don't have a file; add the required tests to the protobuf.
+ // TODO(ajm): vary the output channels as well?
+ const int channels[] = {1, 2};
+ const size_t channels_size = sizeof(channels) / sizeof(*channels);
+#if defined(WEBRTC_APM_UNIT_TEST_FIXED_PROFILE)
+ // AECM doesn't support super-wb.
+ const int sample_rates[] = {8000, 16000};
+#elif defined(WEBRTC_APM_UNIT_TEST_FLOAT_PROFILE)
+ const int sample_rates[] = {8000, 16000, 32000};
+#endif
+ const size_t sample_rates_size = sizeof(sample_rates) / sizeof(*sample_rates);
+ for (size_t i = 0; i < channels_size; i++) {
+ for (size_t j = 0; j < channels_size; j++) {
+ for (size_t k = 0; k < sample_rates_size; k++) {
+ webrtc::audioproc::Test* test = output_data.add_test();
+ test->set_num_reverse_channels(channels[i]);
+ test->set_num_input_channels(channels[j]);
+ test->set_num_output_channels(channels[j]);
+ test->set_sample_rate(sample_rates[k]);
+ }
+ }
+ }
+ }
+
+#if defined(WEBRTC_APM_UNIT_TEST_FIXED_PROFILE)
+ EXPECT_EQ(apm_->kNoError, apm_->set_sample_rate_hz(16000));
+ EXPECT_EQ(apm_->kNoError, apm_->echo_control_mobile()->Enable(true));
+
+ EXPECT_EQ(apm_->kNoError,
+ apm_->gain_control()->set_mode(GainControl::kAdaptiveDigital));
+ EXPECT_EQ(apm_->kNoError, apm_->gain_control()->Enable(true));
+#elif defined(WEBRTC_APM_UNIT_TEST_FLOAT_PROFILE)
+ EXPECT_EQ(apm_->kNoError,
+ apm_->echo_cancellation()->enable_drift_compensation(true));
+ EXPECT_EQ(apm_->kNoError,
+ apm_->echo_cancellation()->enable_metrics(true));
+ EXPECT_EQ(apm_->kNoError,
+ apm_->echo_cancellation()->enable_delay_logging(true));
+ EXPECT_EQ(apm_->kNoError, apm_->echo_cancellation()->Enable(true));
+
+ EXPECT_EQ(apm_->kNoError,
+ apm_->gain_control()->set_mode(GainControl::kAdaptiveAnalog));
+ EXPECT_EQ(apm_->kNoError,
+ apm_->gain_control()->set_analog_level_limits(0, 255));
+ EXPECT_EQ(apm_->kNoError, apm_->gain_control()->Enable(true));
+#endif
+
+ EXPECT_EQ(apm_->kNoError,
+ apm_->high_pass_filter()->Enable(true));
+
+ EXPECT_EQ(apm_->kNoError,
+ apm_->level_estimator()->Enable(true));
+
+ EXPECT_EQ(apm_->kNoError,
+ apm_->noise_suppression()->Enable(true));
+
+ EXPECT_EQ(apm_->kNoError,
+ apm_->voice_detection()->Enable(true));
+
+ for (int i = 0; i < output_data.test_size(); i++) {
+ printf("Running test %d of %d...\n", i + 1, output_data.test_size());
+
+ webrtc::audioproc::Test* test = output_data.mutable_test(i);
+ const int samples_per_channel = test->sample_rate() / 100;
+ revframe_->_payloadDataLengthInSamples = samples_per_channel;
+ revframe_->_audioChannel = test->num_reverse_channels();
+ revframe_->_frequencyInHz = test->sample_rate();
+ frame_->_payloadDataLengthInSamples = samples_per_channel;
+ frame_->_audioChannel = test->num_input_channels();
+ frame_->_frequencyInHz = test->sample_rate();
+
+ EXPECT_EQ(apm_->kNoError, apm_->Initialize());
+ ASSERT_EQ(apm_->kNoError, apm_->set_sample_rate_hz(test->sample_rate()));
+ ASSERT_EQ(apm_->kNoError, apm_->set_num_channels(frame_->_audioChannel,
+ frame_->_audioChannel));
+ ASSERT_EQ(apm_->kNoError,
+ apm_->set_num_reverse_channels(revframe_->_audioChannel));
+
+ int frame_count = 0;
+ int has_echo_count = 0;
+ int has_voice_count = 0;
+ int is_saturated_count = 0;
+ int analog_level = 127;
+ int analog_level_average = 0;
+ int max_output_average = 0;
+
+ while (1) {
+ // Read far-end frame
+ const size_t frame_size = samples_per_channel * 2;
+ size_t read_count = fread(revframe_->_payloadData,
+ sizeof(int16_t),
+ frame_size,
+ far_file_);
+ if (read_count != frame_size) {
+ // Check that the file really ended.
+ ASSERT_NE(0, feof(far_file_));
+ break; // This is expected.
+ }
+
+ if (revframe_->_audioChannel == 1) {
+ MixStereoToMono(revframe_->_payloadData, revframe_->_payloadData,
+ samples_per_channel);
+ }
+
+ EXPECT_EQ(apm_->kNoError, apm_->AnalyzeReverseStream(revframe_));
+
+ EXPECT_EQ(apm_->kNoError, apm_->set_stream_delay_ms(0));
+ EXPECT_EQ(apm_->kNoError,
+ apm_->echo_cancellation()->set_stream_drift_samples(0));
+ EXPECT_EQ(apm_->kNoError,
+ apm_->gain_control()->set_stream_analog_level(analog_level));
+
+ // Read near-end frame
+ read_count = fread(frame_->_payloadData,
+ sizeof(int16_t),
+ frame_size,
+ near_file_);
+ if (read_count != frame_size) {
+ // Check that the file really ended.
+ ASSERT_NE(0, feof(near_file_));
+ break; // This is expected.
+ }
+
+ if (frame_->_audioChannel == 1) {
+ MixStereoToMono(frame_->_payloadData, frame_->_payloadData,
+ samples_per_channel);
+ }
+ frame_->_vadActivity = AudioFrame::kVadUnknown;
+
+ EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame_));
+
+ max_output_average += MaxAudioFrame(*frame_);
+
+ if (apm_->echo_cancellation()->stream_has_echo()) {
+ has_echo_count++;
+ }
+
+ analog_level = apm_->gain_control()->stream_analog_level();
+ analog_level_average += analog_level;
+ if (apm_->gain_control()->stream_is_saturated()) {
+ is_saturated_count++;
+ }
+ if (apm_->voice_detection()->stream_has_voice()) {
+ has_voice_count++;
+ EXPECT_EQ(AudioFrame::kVadActive, frame_->_vadActivity);
+ } else {
+ EXPECT_EQ(AudioFrame::kVadPassive, frame_->_vadActivity);
+ }
+
+ frame_count++;
+ }
+ max_output_average /= frame_count;
+ analog_level_average /= frame_count;
+
+#if defined(WEBRTC_APM_UNIT_TEST_FLOAT_PROFILE)
+ EchoCancellation::Metrics echo_metrics;
+ EXPECT_EQ(apm_->kNoError,
+ apm_->echo_cancellation()->GetMetrics(&echo_metrics));
+ int median = 0;
+ int std = 0;
+ EXPECT_EQ(apm_->kNoError,
+ apm_->echo_cancellation()->GetDelayMetrics(&median, &std));
+
+ int rms_level = apm_->level_estimator()->RMS();
+ EXPECT_LE(0, rms_level);
+ EXPECT_GE(127, rms_level);
+#endif
+
+ if (!write_output_data) {
+ EXPECT_EQ(test->has_echo_count(), has_echo_count);
+ EXPECT_EQ(test->has_voice_count(), has_voice_count);
+ EXPECT_EQ(test->is_saturated_count(), is_saturated_count);
+
+ EXPECT_EQ(test->analog_level_average(), analog_level_average);
+ EXPECT_EQ(test->max_output_average(), max_output_average);
+
+#if defined(WEBRTC_APM_UNIT_TEST_FLOAT_PROFILE)
+ webrtc::audioproc::Test::EchoMetrics reference =
+ test->echo_metrics();
+ TestStats(echo_metrics.residual_echo_return_loss,
+ reference.residual_echo_return_loss());
+ TestStats(echo_metrics.echo_return_loss,
+ reference.echo_return_loss());
+ TestStats(echo_metrics.echo_return_loss_enhancement,
+ reference.echo_return_loss_enhancement());
+ TestStats(echo_metrics.a_nlp,
+ reference.a_nlp());
+
+ webrtc::audioproc::Test::DelayMetrics reference_delay =
+ test->delay_metrics();
+ EXPECT_EQ(reference_delay.median(), median);
+ EXPECT_EQ(reference_delay.std(), std);
+
+ EXPECT_EQ(test->rms_level(), rms_level);
+#endif
+ } else {
+ test->set_has_echo_count(has_echo_count);
+ test->set_has_voice_count(has_voice_count);
+ test->set_is_saturated_count(is_saturated_count);
+
+ test->set_analog_level_average(analog_level_average);
+ test->set_max_output_average(max_output_average);
+
+#if defined(WEBRTC_APM_UNIT_TEST_FLOAT_PROFILE)
+ webrtc::audioproc::Test::EchoMetrics* message =
+ test->mutable_echo_metrics();
+ WriteStatsMessage(echo_metrics.residual_echo_return_loss,
+ message->mutable_residual_echo_return_loss());
+ WriteStatsMessage(echo_metrics.echo_return_loss,
+ message->mutable_echo_return_loss());
+ WriteStatsMessage(echo_metrics.echo_return_loss_enhancement,
+ message->mutable_echo_return_loss_enhancement());
+ WriteStatsMessage(echo_metrics.a_nlp,
+ message->mutable_a_nlp());
+
+ webrtc::audioproc::Test::DelayMetrics* message_delay =
+ test->mutable_delay_metrics();
+ message_delay->set_median(median);
+ message_delay->set_std(std);
+
+ test->set_rms_level(rms_level);
+#endif
+ }
+
+ rewind(far_file_);
+ rewind(near_file_);
+ }
+
+ if (write_output_data) {
+ WriteMessageLiteToFile(output_filename, output_data);
+ }
+}
+} // namespace
+
+int main(int argc, char** argv) {
+ ::testing::InitGoogleTest(&argc, argv);
+
+ for (int i = 1; i < argc; i++) {
+ if (strcmp(argv[i], "--write_output_data") == 0) {
+ write_output_data = true;
+ }
+ }
+
+ int err = RUN_ALL_TESTS();
+
+ // Optional, but removes memory leak noise from Valgrind.
+ google::protobuf::ShutdownProtobufLibrary();
+ return err;
+}
diff --git a/src/modules/audio_processing/test/unittest.proto b/src/modules/audio_processing/test/unittest.proto
new file mode 100644
index 0000000000..67ba722b3a
--- /dev/null
+++ b/src/modules/audio_processing/test/unittest.proto
@@ -0,0 +1,52 @@
+syntax = "proto2";
+option optimize_for = LITE_RUNTIME;
+package webrtc.audioproc;
+
+message Test {
+ optional int32 num_reverse_channels = 1;
+ optional int32 num_input_channels = 2;
+ optional int32 num_output_channels = 3;
+ optional int32 sample_rate = 4;
+
+ message Frame {
+ }
+
+ repeated Frame frame = 5;
+
+ optional int32 analog_level_average = 6;
+ optional int32 max_output_average = 7;
+
+ optional int32 has_echo_count = 8;
+ optional int32 has_voice_count = 9;
+ optional int32 is_saturated_count = 10;
+
+ message Statistic {
+ optional int32 instant = 1;
+ optional int32 average = 2;
+ optional int32 maximum = 3;
+ optional int32 minimum = 4;
+ }
+
+ message EchoMetrics {
+ optional Statistic residual_echo_return_loss = 1;
+ optional Statistic echo_return_loss = 2;
+ optional Statistic echo_return_loss_enhancement = 3;
+ optional Statistic a_nlp = 4;
+ }
+
+ optional EchoMetrics echo_metrics = 11;
+
+ message DelayMetrics {
+ optional int32 median = 1;
+ optional int32 std = 2;
+ }
+
+ optional DelayMetrics delay_metrics = 12;
+
+ optional int32 rms_level = 13;
+}
+
+message OutputData {
+ repeated Test test = 1;
+}
+
diff --git a/src/modules/audio_processing/test/unpack.cc b/src/modules/audio_processing/test/unpack.cc
new file mode 100644
index 0000000000..23371317d7
--- /dev/null
+++ b/src/modules/audio_processing/test/unpack.cc
@@ -0,0 +1,216 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+// Commandline tool to unpack audioproc debug files.
+//
+// The debug files are dumped as protobuf blobs. For analysis, it's necessary
+// to unpack the file into its component parts: audio and other data.
+
+#include <stdio.h>
+
+#include "google/gflags.h"
+#include "scoped_ptr.h"
+#include "typedefs.h"
+#include "webrtc/audio_processing/debug.pb.h"
+
+using webrtc::scoped_array;
+
+using webrtc::audioproc::Event;
+using webrtc::audioproc::ReverseStream;
+using webrtc::audioproc::Stream;
+using webrtc::audioproc::Init;
+
+// TODO(andrew): unpack more of the data.
+DEFINE_string(input_file, "input.pcm", "The name of the input stream file.");
+DEFINE_string(output_file, "ref_out.pcm",
+ "The name of the reference output stream file.");
+DEFINE_string(reverse_file, "reverse.pcm",
+ "The name of the reverse input stream file.");
+DEFINE_string(delay_file, "delay.int32", "The name of the delay file.");
+DEFINE_string(drift_file, "drift.int32", "The name of the drift file.");
+DEFINE_string(level_file, "level.int32", "The name of the level file.");
+DEFINE_string(settings_file, "settings.txt", "The name of the settings file.");
+DEFINE_bool(full, false,
+ "Unpack the full set of files (normally not needed).");
+
+// TODO(andrew): move this to a helper class to share with process_test.cc?
+// Returns true on success, false on error or end-of-file.
+bool ReadMessageFromFile(FILE* file,
+ ::google::protobuf::MessageLite* msg) {
+ // The "wire format" for the size is little-endian.
+ // Assume process_test is running on a little-endian machine.
+ int32_t size = 0;
+ if (fread(&size, sizeof(int32_t), 1, file) != 1) {
+ return false;
+ }
+ if (size <= 0) {
+ return false;
+ }
+ const size_t usize = static_cast<size_t>(size);
+
+ scoped_array<char> array(new char[usize]);
+ if (fread(array.get(), sizeof(char), usize, file) != usize) {
+ return false;
+ }
+
+ msg->Clear();
+ return msg->ParseFromArray(array.get(), usize);
+}
+
+int main(int argc, char* argv[]) {
+ std::string program_name = argv[0];
+ std::string usage = "Commandline tool to unpack audioproc debug files.\n"
+ "Example usage:\n" + program_name + " debug_dump.pb\n";
+ google::SetUsageMessage(usage);
+ google::ParseCommandLineFlags(&argc, &argv, true);
+
+ if (argc < 2) {
+ printf("%s", google::ProgramUsage());
+ return 1;
+ }
+
+ FILE* debug_file = fopen(argv[1], "rb");
+ if (debug_file == NULL) {
+ printf("Unable to open %s\n", argv[1]);
+ return 1;
+ }
+ FILE* input_file = fopen(FLAGS_input_file.c_str(), "wb");
+ if (input_file == NULL) {
+ printf("Unable to open %s\n", FLAGS_input_file.c_str());
+ return 1;
+ }
+ FILE* output_file = fopen(FLAGS_output_file.c_str(), "wb");
+ if (output_file == NULL) {
+ printf("Unable to open %s\n", FLAGS_output_file.c_str());
+ return 1;
+ }
+ FILE* reverse_file = fopen(FLAGS_reverse_file.c_str(), "wb");
+ if (reverse_file == NULL) {
+ printf("Unable to open %s\n", FLAGS_reverse_file.c_str());
+ return 1;
+ }
+ FILE* settings_file = fopen(FLAGS_settings_file.c_str(), "wb");
+ if (settings_file == NULL) {
+ printf("Unable to open %s\n", FLAGS_settings_file.c_str());
+ return 1;
+ }
+
+ FILE* delay_file = NULL;
+ FILE* drift_file = NULL;
+ FILE* level_file = NULL;
+ if (FLAGS_full) {
+ delay_file = fopen(FLAGS_delay_file.c_str(), "wb");
+ if (delay_file == NULL) {
+ printf("Unable to open %s\n", FLAGS_delay_file.c_str());
+ return 1;
+ }
+ drift_file = fopen(FLAGS_drift_file.c_str(), "wb");
+ if (drift_file == NULL) {
+ printf("Unable to open %s\n", FLAGS_drift_file.c_str());
+ return 1;
+ }
+ level_file = fopen(FLAGS_level_file.c_str(), "wb");
+ if (level_file == NULL) {
+ printf("Unable to open %s\n", FLAGS_level_file.c_str());
+ return 1;
+ }
+ }
+
+ Event event_msg;
+ int frame_count = 0;
+ while (ReadMessageFromFile(debug_file, &event_msg)) {
+ if (event_msg.type() == Event::REVERSE_STREAM) {
+ if (!event_msg.has_reverse_stream()) {
+ printf("Corrupted input file: ReverseStream missing.\n");
+ return 1;
+ }
+
+ const ReverseStream msg = event_msg.reverse_stream();
+ if (msg.has_data()) {
+ if (fwrite(msg.data().data(), msg.data().size(), 1, reverse_file) !=
+ 1) {
+ printf("Error when writing to %s\n", FLAGS_reverse_file.c_str());
+ return 1;
+ }
+ }
+ } else if (event_msg.type() == Event::STREAM) {
+ frame_count++;
+ if (!event_msg.has_stream()) {
+ printf("Corrupted input file: Stream missing.\n");
+ return 1;
+ }
+
+ const Stream msg = event_msg.stream();
+ if (msg.has_input_data()) {
+ if (fwrite(msg.input_data().data(), msg.input_data().size(), 1,
+ input_file) != 1) {
+ printf("Error when writing to %s\n", FLAGS_input_file.c_str());
+ return 1;
+ }
+ }
+
+ if (msg.has_output_data()) {
+ if (fwrite(msg.output_data().data(), msg.output_data().size(), 1,
+ output_file) != 1) {
+ printf("Error when writing to %s\n", FLAGS_output_file.c_str());
+ return 1;
+ }
+ }
+
+ if (FLAGS_full) {
+ if (msg.has_delay()) {
+ int32_t delay = msg.delay();
+ if (fwrite(&delay, sizeof(int32_t), 1, delay_file) != 1) {
+ printf("Error when writing to %s\n", FLAGS_delay_file.c_str());
+ return 1;
+ }
+ }
+
+ if (msg.has_drift()) {
+ int32_t drift = msg.drift();
+ if (fwrite(&drift, sizeof(int32_t), 1, drift_file) != 1) {
+ printf("Error when writing to %s\n", FLAGS_drift_file.c_str());
+ return 1;
+ }
+ }
+
+ if (msg.has_level()) {
+ int32_t level = msg.level();
+ if (fwrite(&level, sizeof(int32_t), 1, level_file) != 1) {
+ printf("Error when writing to %s\n", FLAGS_level_file.c_str());
+ return 1;
+ }
+ }
+ }
+ } else if (event_msg.type() == Event::INIT) {
+ if (!event_msg.has_init()) {
+ printf("Corrupted input file: Init missing.\n");
+ return 1;
+ }
+
+ const Init msg = event_msg.init();
+ // These should print out zeros if they're missing.
+ fprintf(settings_file, "Init at frame: %d\n", frame_count);
+ fprintf(settings_file, " Sample rate: %d\n", msg.sample_rate());
+ fprintf(settings_file, " Device sample rate: %d\n",
+ msg.device_sample_rate());
+ fprintf(settings_file, " Input channels: %d\n",
+ msg.num_input_channels());
+ fprintf(settings_file, " Output channels: %d\n",
+ msg.num_output_channels());
+ fprintf(settings_file, " Reverse channels: %d\n",
+ msg.num_reverse_channels());
+
+ fprintf(settings_file, "\n");
+ }
+ }
+
+ return 0;
+}