aboutsummaryrefslogtreecommitdiff
path: root/webrtc/modules/audio_coding/test
diff options
context:
space:
mode:
Diffstat (limited to 'webrtc/modules/audio_coding/test')
-rw-r--r--webrtc/modules/audio_coding/test/ACMTest.h21
-rw-r--r--webrtc/modules/audio_coding/test/APITest.cc1104
-rw-r--r--webrtc/modules/audio_coding/test/APITest.h163
-rw-r--r--webrtc/modules/audio_coding/test/Channel.cc424
-rw-r--r--webrtc/modules/audio_coding/test/Channel.h130
-rw-r--r--webrtc/modules/audio_coding/test/EncodeDecodeTest.cc355
-rw-r--r--webrtc/modules/audio_coding/test/EncodeDecodeTest.h123
-rw-r--r--webrtc/modules/audio_coding/test/PCMFile.cc221
-rw-r--r--webrtc/modules/audio_coding/test/PCMFile.h80
-rw-r--r--webrtc/modules/audio_coding/test/PacketLossTest.cc167
-rw-r--r--webrtc/modules/audio_coding/test/PacketLossTest.h67
-rw-r--r--webrtc/modules/audio_coding/test/RTPFile.cc227
-rw-r--r--webrtc/modules/audio_coding/test/RTPFile.h126
-rw-r--r--webrtc/modules/audio_coding/test/SpatialAudio.cc196
-rw-r--r--webrtc/modules/audio_coding/test/SpatialAudio.h47
-rw-r--r--webrtc/modules/audio_coding/test/TestAllCodecs.cc489
-rw-r--r--webrtc/modules/audio_coding/test/TestAllCodecs.h84
-rw-r--r--webrtc/modules/audio_coding/test/TestRedFec.cc480
-rw-r--r--webrtc/modules/audio_coding/test/TestRedFec.h51
-rw-r--r--webrtc/modules/audio_coding/test/TestStereo.cc844
-rw-r--r--webrtc/modules/audio_coding/test/TestStereo.h117
-rw-r--r--webrtc/modules/audio_coding/test/TestVADDTX.cc276
-rw-r--r--webrtc/modules/audio_coding/test/TestVADDTX.h102
-rw-r--r--webrtc/modules/audio_coding/test/Tester.cc181
-rw-r--r--webrtc/modules/audio_coding/test/TimedTrace.cc58
-rw-r--r--webrtc/modules/audio_coding/test/TimedTrace.h36
-rw-r--r--webrtc/modules/audio_coding/test/TwoWayCommunication.cc299
-rw-r--r--webrtc/modules/audio_coding/test/TwoWayCommunication.h60
-rw-r--r--webrtc/modules/audio_coding/test/delay_test.cc265
-rw-r--r--webrtc/modules/audio_coding/test/iSACTest.cc343
-rw-r--r--webrtc/modules/audio_coding/test/iSACTest.h79
-rw-r--r--webrtc/modules/audio_coding/test/insert_packet_with_timing.cc307
-rw-r--r--webrtc/modules/audio_coding/test/opus_test.cc383
-rw-r--r--webrtc/modules/audio_coding/test/opus_test.h60
-rw-r--r--webrtc/modules/audio_coding/test/target_delay_unittest.cc249
-rw-r--r--webrtc/modules/audio_coding/test/utility.cc303
-rw-r--r--webrtc/modules/audio_coding/test/utility.h139
37 files changed, 8656 insertions, 0 deletions
diff --git a/webrtc/modules/audio_coding/test/ACMTest.h b/webrtc/modules/audio_coding/test/ACMTest.h
new file mode 100644
index 0000000000..d7e87d34ba
--- /dev/null
+++ b/webrtc/modules/audio_coding/test/ACMTest.h
@@ -0,0 +1,21 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef WEBRTC_MODULES_AUDIO_CODING_TEST_ACMTEST_H_
+#define WEBRTC_MODULES_AUDIO_CODING_TEST_ACMTEST_H_
+
+class ACMTest {
+ public:
+ ACMTest() {}
+ virtual ~ACMTest() {}
+ virtual void Perform() = 0;
+};
+
+#endif // WEBRTC_MODULES_AUDIO_CODING_TEST_ACMTEST_H_
diff --git a/webrtc/modules/audio_coding/test/APITest.cc b/webrtc/modules/audio_coding/test/APITest.cc
new file mode 100644
index 0000000000..bf04d7c825
--- /dev/null
+++ b/webrtc/modules/audio_coding/test/APITest.cc
@@ -0,0 +1,1104 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "webrtc/modules/audio_coding/test/APITest.h"
+
+#include <ctype.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include <iostream>
+#include <ostream>
+#include <string>
+
+#include "testing/gtest/include/gtest/gtest.h"
+#include "webrtc/base/platform_thread.h"
+#include "webrtc/common.h"
+#include "webrtc/common_types.h"
+#include "webrtc/engine_configurations.h"
+#include "webrtc/modules/audio_coding/acm2/acm_common_defs.h"
+#include "webrtc/modules/audio_coding/test/utility.h"
+#include "webrtc/system_wrappers/include/event_wrapper.h"
+#include "webrtc/system_wrappers/include/tick_util.h"
+#include "webrtc/system_wrappers/include/trace.h"
+#include "webrtc/test/testsupport/fileutils.h"
+
+namespace webrtc {
+
+#define TEST_DURATION_SEC 600
+#define NUMBER_OF_SENDER_TESTS 6
+#define MAX_FILE_NAME_LENGTH_BYTE 500
+
+void APITest::Wait(uint32_t waitLengthMs) {
+ if (_randomTest) {
+ return;
+ } else {
+ EventWrapper* myEvent = EventWrapper::Create();
+ myEvent->Wait(waitLengthMs);
+ delete myEvent;
+ return;
+ }
+}
+
+APITest::APITest(const Config& config)
+ : _acmA(AudioCodingModule::Create(1)),
+ _acmB(AudioCodingModule::Create(2)),
+ _channel_A2B(NULL),
+ _channel_B2A(NULL),
+ _writeToFile(true),
+ _pullEventA(NULL),
+ _pushEventA(NULL),
+ _processEventA(NULL),
+ _apiEventA(NULL),
+ _pullEventB(NULL),
+ _pushEventB(NULL),
+ _processEventB(NULL),
+ _apiEventB(NULL),
+ _codecCntrA(0),
+ _codecCntrB(0),
+ _thereIsEncoderA(false),
+ _thereIsEncoderB(false),
+ _thereIsDecoderA(false),
+ _thereIsDecoderB(false),
+ _sendVADA(false),
+ _sendDTXA(false),
+ _sendVADModeA(VADNormal),
+ _sendVADB(false),
+ _sendDTXB(false),
+ _sendVADModeB(VADNormal),
+ _minDelayA(0),
+ _minDelayB(0),
+ _dotPositionA(0),
+ _dotMoveDirectionA(1),
+ _dotPositionB(39),
+ _dotMoveDirectionB(-1),
+ _vadCallbackA(NULL),
+ _vadCallbackB(NULL),
+ _apiTestRWLock(*RWLockWrapper::CreateRWLock()),
+ _randomTest(false),
+ _testNumA(0),
+ _testNumB(1) {
+ int n;
+ for (n = 0; n < 32; n++) {
+ _payloadUsed[n] = false;
+ }
+
+ _movingDot[40] = '\0';
+
+ for (int n = 0; n < 40; n++) {
+ _movingDot[n] = ' ';
+ }
+}
+
+APITest::~APITest() {
+ DELETE_POINTER(_channel_A2B);
+ DELETE_POINTER(_channel_B2A);
+
+ DELETE_POINTER(_pushEventA);
+ DELETE_POINTER(_pullEventA);
+ DELETE_POINTER(_processEventA);
+ DELETE_POINTER(_apiEventA);
+
+ DELETE_POINTER(_pushEventB);
+ DELETE_POINTER(_pullEventB);
+ DELETE_POINTER(_processEventB);
+ DELETE_POINTER(_apiEventB);
+
+ _inFileA.Close();
+ _outFileA.Close();
+
+ _inFileB.Close();
+ _outFileB.Close();
+
+ DELETE_POINTER(_vadCallbackA);
+ DELETE_POINTER(_vadCallbackB);
+
+ delete &_apiTestRWLock;
+}
+
+int16_t APITest::SetUp() {
+ CodecInst dummyCodec;
+ int lastPayloadType = 0;
+
+ int16_t numCodecs = _acmA->NumberOfCodecs();
+ for (uint8_t n = 0; n < numCodecs; n++) {
+ AudioCodingModule::Codec(n, &dummyCodec);
+ if ((STR_CASE_CMP(dummyCodec.plname, "CN") == 0)
+ && (dummyCodec.plfreq == 32000)) {
+ continue;
+ }
+
+ printf("Register Receive Codec %s ", dummyCodec.plname);
+
+ if ((n != 0) && !FixedPayloadTypeCodec(dummyCodec.plname)) {
+ // Check registration with an already occupied payload type
+ int currentPayloadType = dummyCodec.pltype;
+ dummyCodec.pltype = 97; //lastPayloadType;
+ CHECK_ERROR(_acmB->RegisterReceiveCodec(dummyCodec));
+ dummyCodec.pltype = currentPayloadType;
+ }
+
+ if ((n < numCodecs - 1) && !FixedPayloadTypeCodec(dummyCodec.plname)) {
+ // test if re-registration works;
+ CodecInst nextCodec;
+ int currentPayloadType = dummyCodec.pltype;
+ AudioCodingModule::Codec(n + 1, &nextCodec);
+ dummyCodec.pltype = nextCodec.pltype;
+ if (!FixedPayloadTypeCodec(nextCodec.plname)) {
+ _acmB->RegisterReceiveCodec(dummyCodec);
+ }
+ dummyCodec.pltype = currentPayloadType;
+ }
+
+ if ((n < numCodecs - 1) && !FixedPayloadTypeCodec(dummyCodec.plname)) {
+ // test if un-registration works;
+ CodecInst nextCodec;
+ AudioCodingModule::Codec(n + 1, &nextCodec);
+ nextCodec.pltype = dummyCodec.pltype;
+ if (!FixedPayloadTypeCodec(nextCodec.plname)) {
+ CHECK_ERROR_MT(_acmA->RegisterReceiveCodec(nextCodec));
+ CHECK_ERROR_MT(_acmA->UnregisterReceiveCodec(nextCodec.pltype));
+ }
+ }
+
+ CHECK_ERROR_MT(_acmA->RegisterReceiveCodec(dummyCodec));
+ printf(" side A done!");
+ CHECK_ERROR_MT(_acmB->RegisterReceiveCodec(dummyCodec));
+ printf(" side B done!\n");
+
+ if (!strcmp(dummyCodec.plname, "CN")) {
+ CHECK_ERROR_MT(_acmA->RegisterSendCodec(dummyCodec));
+ CHECK_ERROR_MT(_acmB->RegisterSendCodec(dummyCodec));
+ }
+ lastPayloadType = dummyCodec.pltype;
+ if ((lastPayloadType >= 96) && (lastPayloadType <= 127)) {
+ _payloadUsed[lastPayloadType - 96] = true;
+ }
+ }
+ _thereIsDecoderA = true;
+ _thereIsDecoderB = true;
+
+ // Register Send Codec
+ AudioCodingModule::Codec((uint8_t) _codecCntrA, &dummyCodec);
+ CHECK_ERROR_MT(_acmA->RegisterSendCodec(dummyCodec));
+ _thereIsEncoderA = true;
+ //
+ AudioCodingModule::Codec((uint8_t) _codecCntrB, &dummyCodec);
+ CHECK_ERROR_MT(_acmB->RegisterSendCodec(dummyCodec));
+ _thereIsEncoderB = true;
+
+ uint16_t frequencyHz;
+
+ printf("\n\nAPI Test\n");
+ printf("========\n");
+ printf("Hit enter to accept the default values indicated in []\n\n");
+
+ //--- Input A
+ std::string file_name = webrtc::test::ResourcePath(
+ "audio_coding/testfile32kHz", "pcm");
+ frequencyHz = 32000;
+ printf("Enter input file at side A [%s]: ", file_name.c_str());
+ PCMFile::ChooseFile(&file_name, 499, &frequencyHz);
+ _inFileA.Open(file_name, frequencyHz, "rb", true);
+
+ //--- Output A
+ std::string out_file_a = webrtc::test::OutputPath() + "outA.pcm";
+ printf("Enter output file at side A [%s]: ", out_file_a.c_str());
+ PCMFile::ChooseFile(&out_file_a, 499, &frequencyHz);
+ _outFileA.Open(out_file_a, frequencyHz, "wb");
+
+ //--- Input B
+ file_name = webrtc::test::ResourcePath("audio_coding/testfile32kHz", "pcm");
+ printf("\n\nEnter input file at side B [%s]: ", file_name.c_str());
+ PCMFile::ChooseFile(&file_name, 499, &frequencyHz);
+ _inFileB.Open(file_name, frequencyHz, "rb", true);
+
+ //--- Output B
+ std::string out_file_b = webrtc::test::OutputPath() + "outB.pcm";
+ printf("Enter output file at side B [%s]: ", out_file_b.c_str());
+ PCMFile::ChooseFile(&out_file_b, 499, &frequencyHz);
+ _outFileB.Open(out_file_b, frequencyHz, "wb");
+
+ //--- Set A-to-B channel
+ _channel_A2B = new Channel(2);
+ CHECK_ERROR_MT(_acmA->RegisterTransportCallback(_channel_A2B));
+ _channel_A2B->RegisterReceiverACM(_acmB.get());
+
+ //--- Set B-to-A channel
+ _channel_B2A = new Channel(1);
+ CHECK_ERROR_MT(_acmB->RegisterTransportCallback(_channel_B2A));
+ _channel_B2A->RegisterReceiverACM(_acmA.get());
+
+ //--- EVENT TIMERS
+ // A
+ _pullEventA = EventTimerWrapper::Create();
+ _pushEventA = EventTimerWrapper::Create();
+ _processEventA = EventTimerWrapper::Create();
+ _apiEventA = EventWrapper::Create();
+ // B
+ _pullEventB = EventTimerWrapper::Create();
+ _pushEventB = EventTimerWrapper::Create();
+ _processEventB = EventTimerWrapper::Create();
+ _apiEventB = EventWrapper::Create();
+
+ //--- I/O params
+ // A
+ _outFreqHzA = _outFileA.SamplingFrequency();
+ // B
+ _outFreqHzB = _outFileB.SamplingFrequency();
+
+ //Trace::SetEncryptedTraceFile("ACMAPITestEncrypted.txt");
+
+ char print[11];
+
+ // Create a trace file.
+ Trace::CreateTrace();
+ Trace::SetTraceFile(
+ (webrtc::test::OutputPath() + "acm_api_trace.txt").c_str());
+
+ printf("\nRandom Test (y/n)?");
+ EXPECT_TRUE(fgets(print, 10, stdin) != NULL);
+ print[10] = '\0';
+ if (strstr(print, "y") != NULL) {
+ _randomTest = true;
+ _verbose = false;
+ _writeToFile = false;
+ } else {
+ _randomTest = false;
+ printf("\nPrint Tests (y/n)? ");
+ EXPECT_TRUE(fgets(print, 10, stdin) != NULL);
+ print[10] = '\0';
+ if (strstr(print, "y") == NULL) {
+ EXPECT_TRUE(freopen("APITest_log.txt", "w", stdout) != 0);
+ _verbose = false;
+ }
+ }
+
+ _vadCallbackA = new VADCallback;
+ _vadCallbackB = new VADCallback;
+
+ return 0;
+}
+
+bool APITest::PushAudioThreadA(void* obj) {
+ return static_cast<APITest*>(obj)->PushAudioRunA();
+}
+
+bool APITest::PushAudioThreadB(void* obj) {
+ return static_cast<APITest*>(obj)->PushAudioRunB();
+}
+
+bool APITest::PullAudioThreadA(void* obj) {
+ return static_cast<APITest*>(obj)->PullAudioRunA();
+}
+
+bool APITest::PullAudioThreadB(void* obj) {
+ return static_cast<APITest*>(obj)->PullAudioRunB();
+}
+
+bool APITest::ProcessThreadA(void* obj) {
+ return static_cast<APITest*>(obj)->ProcessRunA();
+}
+
+bool APITest::ProcessThreadB(void* obj) {
+ return static_cast<APITest*>(obj)->ProcessRunB();
+}
+
+bool APITest::APIThreadA(void* obj) {
+ return static_cast<APITest*>(obj)->APIRunA();
+}
+
+bool APITest::APIThreadB(void* obj) {
+ return static_cast<APITest*>(obj)->APIRunB();
+}
+
+bool APITest::PullAudioRunA() {
+ _pullEventA->Wait(100);
+ AudioFrame audioFrame;
+ if (_acmA->PlayoutData10Ms(_outFreqHzA, &audioFrame) < 0) {
+ bool thereIsDecoder;
+ {
+ ReadLockScoped rl(_apiTestRWLock);
+ thereIsDecoder = _thereIsDecoderA;
+ }
+ if (thereIsDecoder) {
+ fprintf(stderr, "\n>>>>>> cannot pull audio A <<<<<<<< \n");
+ }
+ } else {
+ if (_writeToFile) {
+ _outFileA.Write10MsData(audioFrame);
+ }
+ }
+ return true;
+}
+
+bool APITest::PullAudioRunB() {
+ _pullEventB->Wait(100);
+ AudioFrame audioFrame;
+ if (_acmB->PlayoutData10Ms(_outFreqHzB, &audioFrame) < 0) {
+ bool thereIsDecoder;
+ {
+ ReadLockScoped rl(_apiTestRWLock);
+ thereIsDecoder = _thereIsDecoderB;
+ }
+ if (thereIsDecoder) {
+ fprintf(stderr, "\n>>>>>> cannot pull audio B <<<<<<<< \n");
+ fprintf(stderr, "%d %d\n", _testNumA, _testNumB);
+ }
+ } else {
+ if (_writeToFile) {
+ _outFileB.Write10MsData(audioFrame);
+ }
+ }
+ return true;
+}
+
+bool APITest::PushAudioRunA() {
+ _pushEventA->Wait(100);
+ AudioFrame audioFrame;
+ _inFileA.Read10MsData(audioFrame);
+ if (_acmA->Add10MsData(audioFrame) < 0) {
+ bool thereIsEncoder;
+ {
+ ReadLockScoped rl(_apiTestRWLock);
+ thereIsEncoder = _thereIsEncoderA;
+ }
+ if (thereIsEncoder) {
+ fprintf(stderr, "\n>>>> add10MsData at A failed <<<<\n");
+ }
+ }
+ return true;
+}
+
+bool APITest::PushAudioRunB() {
+ _pushEventB->Wait(100);
+ AudioFrame audioFrame;
+ _inFileB.Read10MsData(audioFrame);
+ if (_acmB->Add10MsData(audioFrame) < 0) {
+ bool thereIsEncoder;
+ {
+ ReadLockScoped rl(_apiTestRWLock);
+ thereIsEncoder = _thereIsEncoderB;
+ }
+
+ if (thereIsEncoder) {
+ fprintf(stderr, "\n>>>> cannot add audio to B <<<<");
+ }
+ }
+
+ return true;
+}
+
+bool APITest::ProcessRunA() {
+ _processEventA->Wait(100);
+ return true;
+}
+
+bool APITest::ProcessRunB() {
+ _processEventB->Wait(100);
+ return true;
+}
+
+/*/
+ *
+ * In side A we test the APIs which are related to sender Side.
+ *
+/*/
+
+void APITest::RunTest(char thread) {
+ int testNum;
+ {
+ WriteLockScoped cs(_apiTestRWLock);
+ if (thread == 'A') {
+ _testNumA = (_testNumB + 1 + (rand() % 3)) % 4;
+ testNum = _testNumA;
+
+ _movingDot[_dotPositionA] = ' ';
+ if (_dotPositionA == 0) {
+ _dotMoveDirectionA = 1;
+ }
+ if (_dotPositionA == 19) {
+ _dotMoveDirectionA = -1;
+ }
+ _dotPositionA += _dotMoveDirectionA;
+ _movingDot[_dotPositionA] = (_dotMoveDirectionA > 0) ? '>' : '<';
+ } else {
+ _testNumB = (_testNumA + 1 + (rand() % 3)) % 4;
+ testNum = _testNumB;
+
+ _movingDot[_dotPositionB] = ' ';
+ if (_dotPositionB == 20) {
+ _dotMoveDirectionB = 1;
+ }
+ if (_dotPositionB == 39) {
+ _dotMoveDirectionB = -1;
+ }
+ _dotPositionB += _dotMoveDirectionB;
+ _movingDot[_dotPositionB] = (_dotMoveDirectionB > 0) ? '>' : '<';
+ }
+ //fprintf(stderr, "%c: %d \n", thread, testNum);
+ //fflush(stderr);
+ }
+ switch (testNum) {
+ case 0:
+ CurrentCodec('A');
+ ChangeCodec('A');
+ break;
+ case 1:
+ if (!_randomTest) {
+ fprintf(stdout, "\nTesting Delay ...\n");
+ }
+ TestDelay('A');
+ break;
+ case 2:
+ TestSendVAD('A');
+ break;
+ case 3:
+ TestRegisteration('A');
+ break;
+ default:
+ fprintf(stderr, "Wrong Test Number\n");
+ getc(stdin);
+ exit(1);
+ }
+}
+
+bool APITest::APIRunA() {
+ _apiEventA->Wait(50);
+
+ bool randomTest;
+ {
+ ReadLockScoped rl(_apiTestRWLock);
+ randomTest = _randomTest;
+ }
+ if (randomTest) {
+ RunTest('A');
+ } else {
+ CurrentCodec('A');
+ ChangeCodec('A');
+ if (_codecCntrA == 0) {
+ fprintf(stdout, "\nTesting Delay ...\n");
+ TestDelay('A');
+ }
+ // VAD TEST
+ TestSendVAD('A');
+ TestRegisteration('A');
+ }
+ return true;
+}
+
+bool APITest::APIRunB() {
+ _apiEventB->Wait(50);
+ bool randomTest;
+ {
+ ReadLockScoped rl(_apiTestRWLock);
+ randomTest = _randomTest;
+ }
+ //_apiEventB->Wait(2000);
+ if (randomTest) {
+ RunTest('B');
+ }
+
+ return true;
+}
+
+void APITest::Perform() {
+ SetUp();
+
+ //--- THREADS
+ // A
+ // PUSH
+ rtc::PlatformThread myPushAudioThreadA(PushAudioThreadA, this,
+ "PushAudioThreadA");
+ myPushAudioThreadA.Start();
+ // PULL
+ rtc::PlatformThread myPullAudioThreadA(PullAudioThreadA, this,
+ "PullAudioThreadA");
+ myPullAudioThreadA.Start();
+ // Process
+ rtc::PlatformThread myProcessThreadA(ProcessThreadA, this, "ProcessThreadA");
+ myProcessThreadA.Start();
+ // API
+ rtc::PlatformThread myAPIThreadA(APIThreadA, this, "APIThreadA");
+ myAPIThreadA.Start();
+ // B
+ // PUSH
+ rtc::PlatformThread myPushAudioThreadB(PushAudioThreadB, this,
+ "PushAudioThreadB");
+ myPushAudioThreadB.Start();
+ // PULL
+ rtc::PlatformThread myPullAudioThreadB(PullAudioThreadB, this,
+ "PullAudioThreadB");
+ myPullAudioThreadB.Start();
+ // Process
+ rtc::PlatformThread myProcessThreadB(ProcessThreadB, this, "ProcessThreadB");
+ myProcessThreadB.Start();
+ // API
+ rtc::PlatformThread myAPIThreadB(APIThreadB, this, "APIThreadB");
+ myAPIThreadB.Start();
+
+ //_apiEventA->StartTimer(true, 5000);
+ //_apiEventB->StartTimer(true, 5000);
+
+ _processEventA->StartTimer(true, 10);
+ _processEventB->StartTimer(true, 10);
+
+ _pullEventA->StartTimer(true, 10);
+ _pullEventB->StartTimer(true, 10);
+
+ _pushEventA->StartTimer(true, 10);
+ _pushEventB->StartTimer(true, 10);
+
+ // Keep main thread waiting for sender/receiver
+ // threads to complete
+ EventWrapper* completeEvent = EventWrapper::Create();
+ uint64_t startTime = TickTime::MillisecondTimestamp();
+ uint64_t currentTime;
+ // Run test in 2 minutes (120000 ms).
+ do {
+ {
+ //ReadLockScoped rl(_apiTestRWLock);
+ //fprintf(stderr, "\r%s", _movingDot);
+ }
+ //fflush(stderr);
+ completeEvent->Wait(50);
+ currentTime = TickTime::MillisecondTimestamp();
+ } while ((currentTime - startTime) < 120000);
+
+ //completeEvent->Wait(0xFFFFFFFF);
+ //(unsigned long)((unsigned long)TEST_DURATION_SEC * (unsigned long)1000));
+ delete completeEvent;
+
+ myPushAudioThreadA.Stop();
+ myPullAudioThreadA.Stop();
+ myProcessThreadA.Stop();
+ myAPIThreadA.Stop();
+
+ myPushAudioThreadB.Stop();
+ myPullAudioThreadB.Stop();
+ myProcessThreadB.Stop();
+ myAPIThreadB.Stop();
+}
+
+void APITest::CheckVADStatus(char side) {
+
+ bool dtxEnabled;
+ bool vadEnabled;
+ ACMVADMode vadMode;
+
+ if (side == 'A') {
+ _acmA->VAD(&dtxEnabled, &vadEnabled, &vadMode);
+ _acmA->RegisterVADCallback(NULL);
+ _vadCallbackA->Reset();
+ _acmA->RegisterVADCallback(_vadCallbackA);
+
+ if (!_randomTest) {
+ if (_verbose) {
+ fprintf(stdout, "DTX %3s, VAD %3s, Mode %d", dtxEnabled ? "ON" : "OFF",
+ vadEnabled ? "ON" : "OFF", (int) vadMode);
+ Wait(5000);
+ fprintf(stdout, " => bit-rate %3.0f kbps\n", _channel_A2B->BitRate());
+ } else {
+ Wait(5000);
+ fprintf(stdout, "DTX %3s, VAD %3s, Mode %d => bit-rate %3.0f kbps\n",
+ dtxEnabled ? "ON" : "OFF", vadEnabled ? "ON" : "OFF",
+ (int) vadMode, _channel_A2B->BitRate());
+ }
+ _vadCallbackA->PrintFrameTypes();
+ }
+
+ if (dtxEnabled != _sendDTXA) {
+ fprintf(stderr, ">>> Error Enabling DTX <<<\n");
+ }
+ if ((vadEnabled != _sendVADA) && (!dtxEnabled)) {
+ fprintf(stderr, ">>> Error Enabling VAD <<<\n");
+ }
+ if ((vadMode != _sendVADModeA) && vadEnabled) {
+ fprintf(stderr, ">>> Error setting VAD-mode <<<\n");
+ }
+ } else {
+ _acmB->VAD(&dtxEnabled, &vadEnabled, &vadMode);
+
+ _acmB->RegisterVADCallback(NULL);
+ _vadCallbackB->Reset();
+ _acmB->RegisterVADCallback(_vadCallbackB);
+
+ if (!_randomTest) {
+ if (_verbose) {
+ fprintf(stdout, "DTX %3s, VAD %3s, Mode %d", dtxEnabled ? "ON" : "OFF",
+ vadEnabled ? "ON" : "OFF", (int) vadMode);
+ Wait(5000);
+ fprintf(stdout, " => bit-rate %3.0f kbps\n", _channel_B2A->BitRate());
+ } else {
+ Wait(5000);
+ fprintf(stdout, "DTX %3s, VAD %3s, Mode %d => bit-rate %3.0f kbps\n",
+ dtxEnabled ? "ON" : "OFF", vadEnabled ? "ON" : "OFF",
+ (int) vadMode, _channel_B2A->BitRate());
+ }
+ _vadCallbackB->PrintFrameTypes();
+ }
+
+ if (dtxEnabled != _sendDTXB) {
+ fprintf(stderr, ">>> Error Enabling DTX <<<\n");
+ }
+ if ((vadEnabled != _sendVADB) && (!dtxEnabled)) {
+ fprintf(stderr, ">>> Error Enabling VAD <<<\n");
+ }
+ if ((vadMode != _sendVADModeB) && vadEnabled) {
+ fprintf(stderr, ">>> Error setting VAD-mode <<<\n");
+ }
+ }
+}
+
+// Set Min delay, get delay, playout timestamp
+void APITest::TestDelay(char side) {
+ AudioCodingModule* myACM;
+ Channel* myChannel;
+ int32_t* myMinDelay;
+ EventTimerWrapper* myEvent = EventTimerWrapper::Create();
+
+ uint32_t inTimestamp = 0;
+ uint32_t outTimestamp = 0;
+ double estimDelay = 0;
+
+ double averageEstimDelay = 0;
+ double averageDelay = 0;
+
+ CircularBuffer estimDelayCB(100);
+ estimDelayCB.SetArithMean(true);
+
+ if (side == 'A') {
+ myACM = _acmA.get();
+ myChannel = _channel_B2A;
+ myMinDelay = &_minDelayA;
+ } else {
+ myACM = _acmB.get();
+ myChannel = _channel_A2B;
+ myMinDelay = &_minDelayB;
+ }
+
+ CHECK_ERROR_MT(myACM->SetMinimumPlayoutDelay(*myMinDelay));
+
+ inTimestamp = myChannel->LastInTimestamp();
+ CHECK_ERROR_MT(myACM->PlayoutTimestamp(&outTimestamp));
+
+ if (!_randomTest) {
+ myEvent->StartTimer(true, 30);
+ int n = 0;
+ int settlePoint = 5000;
+ while (n < settlePoint + 400) {
+ myEvent->Wait(1000);
+
+ inTimestamp = myChannel->LastInTimestamp();
+ CHECK_ERROR_MT(myACM->PlayoutTimestamp(&outTimestamp));
+
+ //std::cout << outTimestamp << std::endl << std::flush;
+ estimDelay = (double) ((uint32_t)(inTimestamp - outTimestamp))
+ / ((double) myACM->ReceiveFrequency() / 1000.0);
+
+ estimDelayCB.Update(estimDelay);
+
+ estimDelayCB.ArithMean(averageEstimDelay);
+ //printf("\n %6.1f \n", estimDelay);
+ //std::cout << " " << std::flush;
+
+ if (_verbose) {
+ fprintf(stdout,
+ "\rExpected: %4d, retreived: %6.1f, measured: %6.1f",
+ *myMinDelay, averageDelay, averageEstimDelay);
+ std::cout << " " << std::flush;
+ }
+ if ((averageDelay > *myMinDelay) && (n < settlePoint)) {
+ settlePoint = n;
+ }
+ n++;
+ }
+ myEvent->StopTimer();
+ }
+
+ if ((!_verbose) && (!_randomTest)) {
+ fprintf(stdout, "\nExpected: %4d, retreived: %6.1f, measured: %6.1f",
+ *myMinDelay, averageDelay, averageEstimDelay);
+ }
+
+ *myMinDelay = (rand() % 1000) + 1;
+
+ NetworkStatistics networkStat;
+ CHECK_ERROR_MT(myACM->GetNetworkStatistics(&networkStat));
+
+ if (!_randomTest) {
+ fprintf(stdout, "\n\nJitter Statistics at Side %c\n", side);
+ fprintf(stdout, "--------------------------------------\n");
+ fprintf(stdout, "buffer-size............. %d\n",
+ networkStat.currentBufferSize);
+ fprintf(stdout, "Preferred buffer-size... %d\n",
+ networkStat.preferredBufferSize);
+ fprintf(stdout, "Peaky jitter mode........%d\n",
+ networkStat.jitterPeaksFound);
+ fprintf(stdout, "packet-size rate........ %d\n",
+ networkStat.currentPacketLossRate);
+ fprintf(stdout, "discard rate............ %d\n",
+ networkStat.currentDiscardRate);
+ fprintf(stdout, "expand rate............. %d\n",
+ networkStat.currentExpandRate);
+ fprintf(stdout, "speech expand rate...... %d\n",
+ networkStat.currentSpeechExpandRate);
+ fprintf(stdout, "Preemptive rate......... %d\n",
+ networkStat.currentPreemptiveRate);
+ fprintf(stdout, "Accelerate rate......... %d\n",
+ networkStat.currentAccelerateRate);
+ fprintf(stdout, "Secondary decoded rate.. %d\n",
+ networkStat.currentSecondaryDecodedRate);
+ fprintf(stdout, "Clock-drift............. %d\n", networkStat.clockDriftPPM);
+ fprintf(stdout, "Mean waiting time....... %d\n",
+ networkStat.meanWaitingTimeMs);
+ fprintf(stdout, "Median waiting time..... %d\n",
+ networkStat.medianWaitingTimeMs);
+ fprintf(stdout, "Min waiting time........ %d\n",
+ networkStat.minWaitingTimeMs);
+ fprintf(stdout, "Max waiting time........ %d\n",
+ networkStat.maxWaitingTimeMs);
+ }
+
+ CHECK_ERROR_MT(myACM->SetMinimumPlayoutDelay(*myMinDelay));
+
+ if (!_randomTest) {
+ myEvent->Wait(500);
+ fprintf(stdout, "\n");
+ fprintf(stdout, "\n");
+ }
+ delete myEvent;
+}
+
+// Unregister a codec & register again.
+void APITest::TestRegisteration(char sendSide) {
+ AudioCodingModule* sendACM;
+ AudioCodingModule* receiveACM;
+ bool* thereIsDecoder;
+ EventWrapper* myEvent = EventWrapper::Create();
+
+ if (!_randomTest) {
+ fprintf(stdout, "\n\n");
+ fprintf(stdout,
+ "---------------------------------------------------------\n");
+ fprintf(stdout, " Unregister/register Receive Codec\n");
+ fprintf(stdout,
+ "---------------------------------------------------------\n");
+ }
+
+ switch (sendSide) {
+ case 'A': {
+ sendACM = _acmA.get();
+ receiveACM = _acmB.get();
+ thereIsDecoder = &_thereIsDecoderB;
+ break;
+ }
+ case 'B': {
+ sendACM = _acmB.get();
+ receiveACM = _acmA.get();
+ thereIsDecoder = &_thereIsDecoderA;
+ break;
+ }
+ default:
+ fprintf(stderr, "Invalid sender-side in TestRegistration(%c)\n",
+ sendSide);
+ exit(-1);
+ }
+
+ auto myCodec = sendACM->SendCodec();
+ if (!myCodec) {
+ CodecInst ci;
+ AudioCodingModule::Codec(_codecCntrA, &ci);
+ myCodec = rtc::Optional<CodecInst>(ci);
+ }
+
+ if (!_randomTest) {
+ fprintf(stdout, "Unregistering reveive codec, NO AUDIO.\n");
+ fflush (stdout);
+ }
+ {
+ WriteLockScoped wl(_apiTestRWLock);
+ *thereIsDecoder = false;
+ }
+ //myEvent->Wait(20);
+ CHECK_ERROR_MT(receiveACM->UnregisterReceiveCodec(myCodec->pltype));
+ Wait(1000);
+
+ int currentPayload = myCodec->pltype;
+
+ if (!FixedPayloadTypeCodec(myCodec->plname)) {
+ int32_t i;
+ for (i = 0; i < 32; i++) {
+ if (!_payloadUsed[i]) {
+ if (!_randomTest) {
+ fprintf(stdout,
+ "Register receive codec with new Payload, AUDIO BACK.\n");
+ }
+ //myCodec->pltype = i + 96;
+ //CHECK_ERROR_MT(receiveACM->RegisterReceiveCodec(*myCodec));
+ //CHECK_ERROR_MT(sendACM->RegisterSendCodec(*myCodec));
+ //myEvent->Wait(20);
+ //{
+ // WriteLockScoped wl(_apiTestRWLock);
+ // *thereIsDecoder = true;
+ //}
+ Wait(1000);
+
+ if (!_randomTest) {
+ fprintf(stdout, "Unregistering reveive codec, NO AUDIO.\n");
+ }
+ //{
+ // WriteLockScoped wl(_apiTestRWLock);
+ // *thereIsDecoder = false;
+ //}
+ //myEvent->Wait(20);
+ //CHECK_ERROR_MT(receiveACM->UnregisterReceiveCodec(myCodec->pltype));
+ Wait(1000);
+
+ myCodec->pltype = currentPayload;
+ if (!_randomTest) {
+ fprintf(stdout,
+ "Register receive codec with default Payload, AUDIO BACK.\n");
+ fflush (stdout);
+ }
+ CHECK_ERROR_MT(receiveACM->RegisterReceiveCodec(*myCodec));
+ //CHECK_ERROR_MT(sendACM->RegisterSendCodec(*myCodec));
+ myEvent->Wait(20);
+ {
+ WriteLockScoped wl(_apiTestRWLock);
+ *thereIsDecoder = true;
+ }
+ Wait(1000);
+
+ break;
+ }
+ }
+ if (i == 32) {
+ CHECK_ERROR_MT(receiveACM->RegisterReceiveCodec(*myCodec));
+ {
+ WriteLockScoped wl(_apiTestRWLock);
+ *thereIsDecoder = true;
+ }
+ }
+ } else {
+ if (!_randomTest) {
+ fprintf(stdout,
+ "Register receive codec with fixed Payload, AUDIO BACK.\n");
+ fflush (stdout);
+ }
+ CHECK_ERROR_MT(receiveACM->RegisterReceiveCodec(*myCodec));
+ //CHECK_ERROR_MT(receiveACM->UnregisterReceiveCodec(myCodec->pltype));
+ //CHECK_ERROR_MT(receiveACM->RegisterReceiveCodec(*myCodec));
+ myEvent->Wait(20);
+ {
+ WriteLockScoped wl(_apiTestRWLock);
+ *thereIsDecoder = true;
+ }
+ }
+ delete myEvent;
+ if (!_randomTest) {
+ fprintf(stdout,
+ "---------------------------------------------------------\n");
+ }
+}
+
+void APITest::TestSendVAD(char side) {
+ if (_randomTest) {
+ return;
+ }
+
+ bool* vad;
+ bool* dtx;
+ ACMVADMode* mode;
+ Channel* myChannel;
+ AudioCodingModule* myACM;
+
+ CodecInst myCodec;
+ if (!_randomTest) {
+ fprintf(stdout, "\n\n");
+ fprintf(stdout, "-----------------------------------------------\n");
+ fprintf(stdout, " Test VAD API\n");
+ fprintf(stdout, "-----------------------------------------------\n");
+ }
+
+ if (side == 'A') {
+ AudioCodingModule::Codec(_codecCntrA, &myCodec);
+ vad = &_sendVADA;
+ dtx = &_sendDTXA;
+ mode = &_sendVADModeA;
+ myChannel = _channel_A2B;
+ myACM = _acmA.get();
+ } else {
+ AudioCodingModule::Codec(_codecCntrB, &myCodec);
+ vad = &_sendVADB;
+ dtx = &_sendDTXB;
+ mode = &_sendVADModeB;
+ myChannel = _channel_B2A;
+ myACM = _acmB.get();
+ }
+
+ CheckVADStatus(side);
+ if (!_randomTest) {
+ fprintf(stdout, "\n\n");
+ }
+
+ switch (*mode) {
+ case VADNormal:
+ *vad = true;
+ *dtx = true;
+ *mode = VADAggr;
+ break;
+ case VADLowBitrate:
+ *vad = true;
+ *dtx = true;
+ *mode = VADVeryAggr;
+ break;
+ case VADAggr:
+ *vad = true;
+ *dtx = true;
+ *mode = VADLowBitrate;
+ break;
+ case VADVeryAggr:
+ *vad = false;
+ *dtx = false;
+ *mode = VADNormal;
+ break;
+ default:
+ *mode = VADNormal;
+ }
+
+ *dtx = (myCodec.plfreq == 32000) ? false : *dtx;
+
+ CHECK_ERROR_MT(myACM->SetVAD(*dtx, *vad, *mode));
+ myChannel->ResetStats();
+
+ CheckVADStatus(side);
+ if (!_randomTest) {
+ fprintf(stdout, "\n");
+ fprintf(stdout, "-----------------------------------------------\n");
+ }
+
+ // Fault Test
+ CHECK_PROTECTED_MT(myACM->SetVAD(false, true, (ACMVADMode) - 1));
+ CHECK_PROTECTED_MT(myACM->SetVAD(false, true, (ACMVADMode) 4));
+
+}
+
+void APITest::CurrentCodec(char side) {
+ auto myCodec = (side == 'A' ? _acmA : _acmB)->SendCodec();
+
+ if (!_randomTest) {
+ fprintf(stdout, "\n\n");
+ fprintf(stdout, "Send codec in Side A\n");
+ fprintf(stdout, "----------------------------\n");
+ fprintf(stdout, "Name................. %s\n", myCodec->plname);
+ fprintf(stdout, "Sampling Frequency... %d\n", myCodec->plfreq);
+ fprintf(stdout, "Rate................. %d\n", myCodec->rate);
+ fprintf(stdout, "Payload-type......... %d\n", myCodec->pltype);
+ fprintf(stdout, "Packet-size.......... %d\n", myCodec->pacsize);
+ }
+
+ Wait(100);
+}
+
+void APITest::ChangeCodec(char side) {
+ CodecInst myCodec;
+ AudioCodingModule* myACM;
+ uint8_t* codecCntr;
+ bool* thereIsEncoder;
+ bool* vad;
+ bool* dtx;
+ ACMVADMode* mode;
+ Channel* myChannel;
+ // Reset and Wait
+ if (!_randomTest) {
+ fprintf(stdout, "Reset Encoder Side A \n");
+ }
+ if (side == 'A') {
+ myACM = _acmA.get();
+ codecCntr = &_codecCntrA;
+ {
+ WriteLockScoped wl(_apiTestRWLock);
+ thereIsEncoder = &_thereIsEncoderA;
+ }
+ vad = &_sendVADA;
+ dtx = &_sendDTXA;
+ mode = &_sendVADModeA;
+ myChannel = _channel_A2B;
+ } else {
+ myACM = _acmB.get();
+ codecCntr = &_codecCntrB;
+ {
+ WriteLockScoped wl(_apiTestRWLock);
+ thereIsEncoder = &_thereIsEncoderB;
+ }
+ vad = &_sendVADB;
+ dtx = &_sendDTXB;
+ mode = &_sendVADModeB;
+ myChannel = _channel_B2A;
+ }
+
+ Wait(100);
+
+ // Register the next codec
+ do {
+ *codecCntr =
+ (*codecCntr < AudioCodingModule::NumberOfCodecs() - 1) ?
+ (*codecCntr + 1) : 0;
+
+ if (*codecCntr == 0) {
+ //printf("Initialize Sender Side A \n");
+ {
+ WriteLockScoped wl(_apiTestRWLock);
+ *thereIsEncoder = false;
+ }
+ // After Initialization CN is lost, re-register them
+ if (AudioCodingModule::Codec("CN", &myCodec, 8000, 1) >= 0) {
+ CHECK_ERROR_MT(myACM->RegisterSendCodec(myCodec));
+ }
+ if (AudioCodingModule::Codec("CN", &myCodec, 16000, 1) >= 0) {
+ CHECK_ERROR_MT(myACM->RegisterSendCodec(myCodec));
+ }
+ // VAD & DTX are disabled after initialization
+ *vad = false;
+ *dtx = false;
+ _writeToFile = false;
+ }
+
+ AudioCodingModule::Codec(*codecCntr, &myCodec);
+ } while (!STR_CASE_CMP(myCodec.plname, "CN")
+ || !STR_CASE_CMP(myCodec.plname, "telephone-event")
+ || !STR_CASE_CMP(myCodec.plname, "RED"));
+
+ if (!_randomTest) {
+ fprintf(stdout,"\n=====================================================\n");
+ fprintf(stdout, " Registering New Codec %s, %d kHz, %d kbps\n",
+ myCodec.plname, myCodec.plfreq / 1000, myCodec.rate / 1000);
+ }
+ //std::cout<< std::flush;
+
+ // NO DTX for supe-wideband codec at this point
+ if (myCodec.plfreq == 32000) {
+ *dtx = false;
+ CHECK_ERROR_MT(myACM->SetVAD(*dtx, *vad, *mode));
+
+ }
+
+ CHECK_ERROR_MT(myACM->RegisterSendCodec(myCodec));
+ myChannel->ResetStats();
+ {
+ WriteLockScoped wl(_apiTestRWLock);
+ *thereIsEncoder = true;
+ }
+ Wait(500);
+}
+
+} // namespace webrtc
diff --git a/webrtc/modules/audio_coding/test/APITest.h b/webrtc/modules/audio_coding/test/APITest.h
new file mode 100644
index 0000000000..a1937c2b00
--- /dev/null
+++ b/webrtc/modules/audio_coding/test/APITest.h
@@ -0,0 +1,163 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef WEBRTC_MODULES_AUDIO_CODING_TEST_APITEST_H_
+#define WEBRTC_MODULES_AUDIO_CODING_TEST_APITEST_H_
+
+#include "webrtc/base/scoped_ptr.h"
+#include "webrtc/modules/audio_coding/include/audio_coding_module.h"
+#include "webrtc/modules/audio_coding/test/ACMTest.h"
+#include "webrtc/modules/audio_coding/test/Channel.h"
+#include "webrtc/modules/audio_coding/test/PCMFile.h"
+#include "webrtc/modules/audio_coding/test/utility.h"
+#include "webrtc/system_wrappers/include/event_wrapper.h"
+#include "webrtc/system_wrappers/include/rw_lock_wrapper.h"
+
+namespace webrtc {
+
+class Config;
+
+enum APITESTAction {
+ TEST_CHANGE_CODEC_ONLY = 0,
+ DTX_TEST = 1
+};
+
+class APITest : public ACMTest {
+ public:
+ explicit APITest(const Config& config);
+ ~APITest();
+
+ void Perform();
+ private:
+ int16_t SetUp();
+
+ static bool PushAudioThreadA(void* obj);
+ static bool PullAudioThreadA(void* obj);
+ static bool ProcessThreadA(void* obj);
+ static bool APIThreadA(void* obj);
+
+ static bool PushAudioThreadB(void* obj);
+ static bool PullAudioThreadB(void* obj);
+ static bool ProcessThreadB(void* obj);
+ static bool APIThreadB(void* obj);
+
+ void CheckVADStatus(char side);
+
+ // Set Min delay, get delay, playout timestamp
+ void TestDelay(char side);
+
+ // Unregister a codec & register again.
+ void TestRegisteration(char side);
+
+ // Playout Mode, background noise mode.
+ // Receiver Frequency, playout frequency.
+ void TestPlayout(char receiveSide);
+
+ //
+ void TestSendVAD(char side);
+
+ void CurrentCodec(char side);
+
+ void ChangeCodec(char side);
+
+ void Wait(uint32_t waitLengthMs);
+
+ void RunTest(char thread);
+
+ bool PushAudioRunA();
+ bool PullAudioRunA();
+ bool ProcessRunA();
+ bool APIRunA();
+
+ bool PullAudioRunB();
+ bool PushAudioRunB();
+ bool ProcessRunB();
+ bool APIRunB();
+
+ //--- ACMs
+ rtc::scoped_ptr<AudioCodingModule> _acmA;
+ rtc::scoped_ptr<AudioCodingModule> _acmB;
+
+ //--- Channels
+ Channel* _channel_A2B;
+ Channel* _channel_B2A;
+
+ //--- I/O files
+ // A
+ PCMFile _inFileA;
+ PCMFile _outFileA;
+ // B
+ PCMFile _outFileB;
+ PCMFile _inFileB;
+
+ //--- I/O params
+ // A
+ int32_t _outFreqHzA;
+ // B
+ int32_t _outFreqHzB;
+
+ // Should we write to file.
+ // we might skip writing to file if we
+ // run the test for a long time.
+ bool _writeToFile;
+ //--- Events
+ // A
+ EventTimerWrapper* _pullEventA; // pulling data from ACM
+ EventTimerWrapper* _pushEventA; // pushing data to ACM
+ EventTimerWrapper* _processEventA; // process
+ EventWrapper* _apiEventA; // API calls
+ // B
+ EventTimerWrapper* _pullEventB; // pulling data from ACM
+ EventTimerWrapper* _pushEventB; // pushing data to ACM
+ EventTimerWrapper* _processEventB; // process
+ EventWrapper* _apiEventB; // API calls
+
+ // keep track of the codec in either side.
+ uint8_t _codecCntrA;
+ uint8_t _codecCntrB;
+
+ // Is set to true if there is no encoder in either side
+ bool _thereIsEncoderA;
+ bool _thereIsEncoderB;
+ bool _thereIsDecoderA;
+ bool _thereIsDecoderB;
+
+ bool _sendVADA;
+ bool _sendDTXA;
+ ACMVADMode _sendVADModeA;
+
+ bool _sendVADB;
+ bool _sendDTXB;
+ ACMVADMode _sendVADModeB;
+
+ int32_t _minDelayA;
+ int32_t _minDelayB;
+ bool _payloadUsed[32];
+
+ bool _verbose;
+
+ int _dotPositionA;
+ int _dotMoveDirectionA;
+ int _dotPositionB;
+ int _dotMoveDirectionB;
+
+ char _movingDot[41];
+
+ VADCallback* _vadCallbackA;
+ VADCallback* _vadCallbackB;
+ RWLockWrapper& _apiTestRWLock;
+ bool _randomTest;
+ int _testNumA;
+ int _testNumB;
+};
+
+} // namespace webrtc
+
+#endif // WEBRTC_MODULES_AUDIO_CODING_TEST_APITEST_H_
diff --git a/webrtc/modules/audio_coding/test/Channel.cc b/webrtc/modules/audio_coding/test/Channel.cc
new file mode 100644
index 0000000000..31521fe1e3
--- /dev/null
+++ b/webrtc/modules/audio_coding/test/Channel.cc
@@ -0,0 +1,424 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "webrtc/modules/audio_coding/test/Channel.h"
+
+#include <assert.h>
+#include <iostream>
+
+#include "webrtc/base/format_macros.h"
+#include "webrtc/system_wrappers/include/tick_util.h"
+#include "webrtc/system_wrappers/include/critical_section_wrapper.h"
+
+namespace webrtc {
+
+int32_t Channel::SendData(FrameType frameType,
+ uint8_t payloadType,
+ uint32_t timeStamp,
+ const uint8_t* payloadData,
+ size_t payloadSize,
+ const RTPFragmentationHeader* fragmentation) {
+ WebRtcRTPHeader rtpInfo;
+ int32_t status;
+ size_t payloadDataSize = payloadSize;
+
+ rtpInfo.header.markerBit = false;
+ rtpInfo.header.ssrc = 0;
+ rtpInfo.header.sequenceNumber = (external_sequence_number_ < 0) ?
+ _seqNo++ : static_cast<uint16_t>(external_sequence_number_);
+ rtpInfo.header.payloadType = payloadType;
+ rtpInfo.header.timestamp = (external_send_timestamp_ < 0) ? timeStamp :
+ static_cast<uint32_t>(external_send_timestamp_);
+
+ if (frameType == kAudioFrameCN) {
+ rtpInfo.type.Audio.isCNG = true;
+ } else {
+ rtpInfo.type.Audio.isCNG = false;
+ }
+ if (frameType == kEmptyFrame) {
+ // When frame is empty, we should not transmit it. The frame size of the
+ // next non-empty frame will be based on the previous frame size.
+ _useLastFrameSize = _lastFrameSizeSample > 0;
+ return 0;
+ }
+
+ rtpInfo.type.Audio.channel = 1;
+ // Treat fragmentation separately
+ if (fragmentation != NULL) {
+ // If silence for too long, send only new data.
+ if ((fragmentation->fragmentationVectorSize == 2) &&
+ (fragmentation->fragmentationTimeDiff[1] <= 0x3fff)) {
+ // only 0x80 if we have multiple blocks
+ _payloadData[0] = 0x80 + fragmentation->fragmentationPlType[1];
+ size_t REDheader = (fragmentation->fragmentationTimeDiff[1] << 10) +
+ fragmentation->fragmentationLength[1];
+ _payloadData[1] = uint8_t((REDheader >> 16) & 0x000000FF);
+ _payloadData[2] = uint8_t((REDheader >> 8) & 0x000000FF);
+ _payloadData[3] = uint8_t(REDheader & 0x000000FF);
+
+ _payloadData[4] = fragmentation->fragmentationPlType[0];
+ // copy the RED data
+ memcpy(_payloadData + 5,
+ payloadData + fragmentation->fragmentationOffset[1],
+ fragmentation->fragmentationLength[1]);
+ // copy the normal data
+ memcpy(_payloadData + 5 + fragmentation->fragmentationLength[1],
+ payloadData + fragmentation->fragmentationOffset[0],
+ fragmentation->fragmentationLength[0]);
+ payloadDataSize += 5;
+ } else {
+ // single block (newest one)
+ memcpy(_payloadData, payloadData + fragmentation->fragmentationOffset[0],
+ fragmentation->fragmentationLength[0]);
+ payloadDataSize = fragmentation->fragmentationLength[0];
+ rtpInfo.header.payloadType = fragmentation->fragmentationPlType[0];
+ }
+ } else {
+ memcpy(_payloadData, payloadData, payloadDataSize);
+ if (_isStereo) {
+ if (_leftChannel) {
+ memcpy(&_rtpInfo, &rtpInfo, sizeof(WebRtcRTPHeader));
+ _leftChannel = false;
+ rtpInfo.type.Audio.channel = 1;
+ } else {
+ memcpy(&rtpInfo, &_rtpInfo, sizeof(WebRtcRTPHeader));
+ _leftChannel = true;
+ rtpInfo.type.Audio.channel = 2;
+ }
+ }
+ }
+
+ _channelCritSect->Enter();
+ if (_saveBitStream) {
+ //fwrite(payloadData, sizeof(uint8_t), payloadSize, _bitStreamFile);
+ }
+
+ if (!_isStereo) {
+ CalcStatistics(rtpInfo, payloadSize);
+ }
+ _useLastFrameSize = false;
+ _lastInTimestamp = timeStamp;
+ _totalBytes += payloadDataSize;
+ _channelCritSect->Leave();
+
+ if (_useFECTestWithPacketLoss) {
+ _packetLoss += 1;
+ if (_packetLoss == 3) {
+ _packetLoss = 0;
+ return 0;
+ }
+ }
+
+ if (num_packets_to_drop_ > 0) {
+ num_packets_to_drop_--;
+ return 0;
+ }
+
+ status = _receiverACM->IncomingPacket(_payloadData, payloadDataSize, rtpInfo);
+
+ return status;
+}
+
+// TODO(turajs): rewite this method.
+void Channel::CalcStatistics(WebRtcRTPHeader& rtpInfo, size_t payloadSize) {
+ int n;
+ if ((rtpInfo.header.payloadType != _lastPayloadType)
+ && (_lastPayloadType != -1)) {
+ // payload-type is changed.
+ // we have to terminate the calculations on the previous payload type
+ // we ignore the last packet in that payload type just to make things
+ // easier.
+ for (n = 0; n < MAX_NUM_PAYLOADS; n++) {
+ if (_lastPayloadType == _payloadStats[n].payloadType) {
+ _payloadStats[n].newPacket = true;
+ break;
+ }
+ }
+ }
+ _lastPayloadType = rtpInfo.header.payloadType;
+
+ bool newPayload = true;
+ ACMTestPayloadStats* currentPayloadStr = NULL;
+ for (n = 0; n < MAX_NUM_PAYLOADS; n++) {
+ if (rtpInfo.header.payloadType == _payloadStats[n].payloadType) {
+ newPayload = false;
+ currentPayloadStr = &_payloadStats[n];
+ break;
+ }
+ }
+
+ if (!newPayload) {
+ if (!currentPayloadStr->newPacket) {
+ if (!_useLastFrameSize) {
+ _lastFrameSizeSample = (uint32_t) ((uint32_t) rtpInfo.header.timestamp -
+ (uint32_t) currentPayloadStr->lastTimestamp);
+ }
+ assert(_lastFrameSizeSample > 0);
+ int k = 0;
+ for (; k < MAX_NUM_FRAMESIZES; ++k) {
+ if ((currentPayloadStr->frameSizeStats[k].frameSizeSample ==
+ _lastFrameSizeSample) ||
+ (currentPayloadStr->frameSizeStats[k].frameSizeSample == 0)) {
+ break;
+ }
+ }
+ if (k == MAX_NUM_FRAMESIZES) {
+ // New frame size found but no space to count statistics on it. Skip it.
+ printf("No memory to store statistics for payload %d : frame size %d\n",
+ _lastPayloadType, _lastFrameSizeSample);
+ return;
+ }
+ ACMTestFrameSizeStats* currentFrameSizeStats = &(currentPayloadStr
+ ->frameSizeStats[k]);
+ currentFrameSizeStats->frameSizeSample = (int16_t) _lastFrameSizeSample;
+
+ // increment the number of encoded samples.
+ currentFrameSizeStats->totalEncodedSamples += _lastFrameSizeSample;
+ // increment the number of recveived packets
+ currentFrameSizeStats->numPackets++;
+ // increment the total number of bytes (this is based on
+ // the previous payload we don't know the frame-size of
+ // the current payload.
+ currentFrameSizeStats->totalPayloadLenByte += currentPayloadStr
+ ->lastPayloadLenByte;
+ // store the maximum payload-size (this is based on
+ // the previous payload we don't know the frame-size of
+ // the current payload.
+ if (currentFrameSizeStats->maxPayloadLen
+ < currentPayloadStr->lastPayloadLenByte) {
+ currentFrameSizeStats->maxPayloadLen = currentPayloadStr
+ ->lastPayloadLenByte;
+ }
+ // store the current values for the next time
+ currentPayloadStr->lastTimestamp = rtpInfo.header.timestamp;
+ currentPayloadStr->lastPayloadLenByte = payloadSize;
+ } else {
+ currentPayloadStr->newPacket = false;
+ currentPayloadStr->lastPayloadLenByte = payloadSize;
+ currentPayloadStr->lastTimestamp = rtpInfo.header.timestamp;
+ currentPayloadStr->payloadType = rtpInfo.header.payloadType;
+ memset(currentPayloadStr->frameSizeStats, 0, MAX_NUM_FRAMESIZES *
+ sizeof(ACMTestFrameSizeStats));
+ }
+ } else {
+ n = 0;
+ while (_payloadStats[n].payloadType != -1) {
+ n++;
+ }
+ // first packet
+ _payloadStats[n].newPacket = false;
+ _payloadStats[n].lastPayloadLenByte = payloadSize;
+ _payloadStats[n].lastTimestamp = rtpInfo.header.timestamp;
+ _payloadStats[n].payloadType = rtpInfo.header.payloadType;
+ memset(_payloadStats[n].frameSizeStats, 0, MAX_NUM_FRAMESIZES *
+ sizeof(ACMTestFrameSizeStats));
+ }
+}
+
+Channel::Channel(int16_t chID)
+ : _receiverACM(NULL),
+ _seqNo(0),
+ _channelCritSect(CriticalSectionWrapper::CreateCriticalSection()),
+ _bitStreamFile(NULL),
+ _saveBitStream(false),
+ _lastPayloadType(-1),
+ _isStereo(false),
+ _leftChannel(true),
+ _lastInTimestamp(0),
+ _useLastFrameSize(false),
+ _lastFrameSizeSample(0),
+ _packetLoss(0),
+ _useFECTestWithPacketLoss(false),
+ _beginTime(TickTime::MillisecondTimestamp()),
+ _totalBytes(0),
+ external_send_timestamp_(-1),
+ external_sequence_number_(-1),
+ num_packets_to_drop_(0) {
+ int n;
+ int k;
+ for (n = 0; n < MAX_NUM_PAYLOADS; n++) {
+ _payloadStats[n].payloadType = -1;
+ _payloadStats[n].newPacket = true;
+ for (k = 0; k < MAX_NUM_FRAMESIZES; k++) {
+ _payloadStats[n].frameSizeStats[k].frameSizeSample = 0;
+ _payloadStats[n].frameSizeStats[k].maxPayloadLen = 0;
+ _payloadStats[n].frameSizeStats[k].numPackets = 0;
+ _payloadStats[n].frameSizeStats[k].totalPayloadLenByte = 0;
+ _payloadStats[n].frameSizeStats[k].totalEncodedSamples = 0;
+ }
+ }
+ if (chID >= 0) {
+ _saveBitStream = true;
+ char bitStreamFileName[500];
+ sprintf(bitStreamFileName, "bitStream_%d.dat", chID);
+ _bitStreamFile = fopen(bitStreamFileName, "wb");
+ } else {
+ _saveBitStream = false;
+ }
+}
+
+Channel::~Channel() {
+ delete _channelCritSect;
+}
+
+void Channel::RegisterReceiverACM(AudioCodingModule* acm) {
+ _receiverACM = acm;
+ return;
+}
+
+void Channel::ResetStats() {
+ int n;
+ int k;
+ _channelCritSect->Enter();
+ _lastPayloadType = -1;
+ for (n = 0; n < MAX_NUM_PAYLOADS; n++) {
+ _payloadStats[n].payloadType = -1;
+ _payloadStats[n].newPacket = true;
+ for (k = 0; k < MAX_NUM_FRAMESIZES; k++) {
+ _payloadStats[n].frameSizeStats[k].frameSizeSample = 0;
+ _payloadStats[n].frameSizeStats[k].maxPayloadLen = 0;
+ _payloadStats[n].frameSizeStats[k].numPackets = 0;
+ _payloadStats[n].frameSizeStats[k].totalPayloadLenByte = 0;
+ _payloadStats[n].frameSizeStats[k].totalEncodedSamples = 0;
+ }
+ }
+ _beginTime = TickTime::MillisecondTimestamp();
+ _totalBytes = 0;
+ _channelCritSect->Leave();
+}
+
+int16_t Channel::Stats(CodecInst& codecInst,
+ ACMTestPayloadStats& payloadStats) {
+ _channelCritSect->Enter();
+ int n;
+ payloadStats.payloadType = -1;
+ for (n = 0; n < MAX_NUM_PAYLOADS; n++) {
+ if (_payloadStats[n].payloadType == codecInst.pltype) {
+ memcpy(&payloadStats, &_payloadStats[n], sizeof(ACMTestPayloadStats));
+ break;
+ }
+ }
+ if (payloadStats.payloadType == -1) {
+ _channelCritSect->Leave();
+ return -1;
+ }
+ for (n = 0; n < MAX_NUM_FRAMESIZES; n++) {
+ if (payloadStats.frameSizeStats[n].frameSizeSample == 0) {
+ _channelCritSect->Leave();
+ return 0;
+ }
+ payloadStats.frameSizeStats[n].usageLenSec = (double) payloadStats
+ .frameSizeStats[n].totalEncodedSamples / (double) codecInst.plfreq;
+
+ payloadStats.frameSizeStats[n].rateBitPerSec =
+ payloadStats.frameSizeStats[n].totalPayloadLenByte * 8
+ / payloadStats.frameSizeStats[n].usageLenSec;
+
+ }
+ _channelCritSect->Leave();
+ return 0;
+}
+
+void Channel::Stats(uint32_t* numPackets) {
+ _channelCritSect->Enter();
+ int k;
+ int n;
+ memset(numPackets, 0, MAX_NUM_PAYLOADS * sizeof(uint32_t));
+ for (k = 0; k < MAX_NUM_PAYLOADS; k++) {
+ if (_payloadStats[k].payloadType == -1) {
+ break;
+ }
+ numPackets[k] = 0;
+ for (n = 0; n < MAX_NUM_FRAMESIZES; n++) {
+ if (_payloadStats[k].frameSizeStats[n].frameSizeSample == 0) {
+ break;
+ }
+ numPackets[k] += _payloadStats[k].frameSizeStats[n].numPackets;
+ }
+ }
+ _channelCritSect->Leave();
+}
+
+void Channel::Stats(uint8_t* payloadType, uint32_t* payloadLenByte) {
+ _channelCritSect->Enter();
+
+ int k;
+ int n;
+ memset(payloadLenByte, 0, MAX_NUM_PAYLOADS * sizeof(uint32_t));
+ for (k = 0; k < MAX_NUM_PAYLOADS; k++) {
+ if (_payloadStats[k].payloadType == -1) {
+ break;
+ }
+ payloadType[k] = (uint8_t) _payloadStats[k].payloadType;
+ payloadLenByte[k] = 0;
+ for (n = 0; n < MAX_NUM_FRAMESIZES; n++) {
+ if (_payloadStats[k].frameSizeStats[n].frameSizeSample == 0) {
+ break;
+ }
+ payloadLenByte[k] += (uint16_t) _payloadStats[k].frameSizeStats[n]
+ .totalPayloadLenByte;
+ }
+ }
+
+ _channelCritSect->Leave();
+}
+
+void Channel::PrintStats(CodecInst& codecInst) {
+ ACMTestPayloadStats payloadStats;
+ Stats(codecInst, payloadStats);
+ printf("%s %d kHz\n", codecInst.plname, codecInst.plfreq / 1000);
+ printf("=====================================================\n");
+ if (payloadStats.payloadType == -1) {
+ printf("No Packets are sent with payload-type %d (%s)\n\n",
+ codecInst.pltype, codecInst.plname);
+ return;
+ }
+ for (int k = 0; k < MAX_NUM_FRAMESIZES; k++) {
+ if (payloadStats.frameSizeStats[k].frameSizeSample == 0) {
+ break;
+ }
+ printf("Frame-size.................... %d samples\n",
+ payloadStats.frameSizeStats[k].frameSizeSample);
+ printf("Average Rate.................. %.0f bits/sec\n",
+ payloadStats.frameSizeStats[k].rateBitPerSec);
+ printf("Maximum Payload-Size.......... %" PRIuS " Bytes\n",
+ payloadStats.frameSizeStats[k].maxPayloadLen);
+ printf(
+ "Maximum Instantaneous Rate.... %.0f bits/sec\n",
+ ((double) payloadStats.frameSizeStats[k].maxPayloadLen * 8.0
+ * (double) codecInst.plfreq)
+ / (double) payloadStats.frameSizeStats[k].frameSizeSample);
+ printf("Number of Packets............. %u\n",
+ (unsigned int) payloadStats.frameSizeStats[k].numPackets);
+ printf("Duration...................... %0.3f sec\n\n",
+ payloadStats.frameSizeStats[k].usageLenSec);
+
+ }
+
+}
+
+uint32_t Channel::LastInTimestamp() {
+ uint32_t timestamp;
+ _channelCritSect->Enter();
+ timestamp = _lastInTimestamp;
+ _channelCritSect->Leave();
+ return timestamp;
+}
+
+double Channel::BitRate() {
+ double rate;
+ uint64_t currTime = TickTime::MillisecondTimestamp();
+ _channelCritSect->Enter();
+ rate = ((double) _totalBytes * 8.0) / (double) (currTime - _beginTime);
+ _channelCritSect->Leave();
+ return rate;
+}
+
+} // namespace webrtc
diff --git a/webrtc/modules/audio_coding/test/Channel.h b/webrtc/modules/audio_coding/test/Channel.h
new file mode 100644
index 0000000000..b047aa9909
--- /dev/null
+++ b/webrtc/modules/audio_coding/test/Channel.h
@@ -0,0 +1,130 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef WEBRTC_MODULES_AUDIO_CODING_TEST_CHANNEL_H_
+#define WEBRTC_MODULES_AUDIO_CODING_TEST_CHANNEL_H_
+
+#include <stdio.h>
+
+#include "webrtc/modules/audio_coding/include/audio_coding_module.h"
+#include "webrtc/modules/include/module_common_types.h"
+#include "webrtc/typedefs.h"
+
+namespace webrtc {
+
+class CriticalSectionWrapper;
+
+#define MAX_NUM_PAYLOADS 50
+#define MAX_NUM_FRAMESIZES 6
+
+// TODO(turajs): Write constructor for this structure.
+struct ACMTestFrameSizeStats {
+ uint16_t frameSizeSample;
+ size_t maxPayloadLen;
+ uint32_t numPackets;
+ uint64_t totalPayloadLenByte;
+ uint64_t totalEncodedSamples;
+ double rateBitPerSec;
+ double usageLenSec;
+};
+
+// TODO(turajs): Write constructor for this structure.
+struct ACMTestPayloadStats {
+ bool newPacket;
+ int16_t payloadType;
+ size_t lastPayloadLenByte;
+ uint32_t lastTimestamp;
+ ACMTestFrameSizeStats frameSizeStats[MAX_NUM_FRAMESIZES];
+};
+
+class Channel : public AudioPacketizationCallback {
+ public:
+
+ Channel(int16_t chID = -1);
+ ~Channel();
+
+ int32_t SendData(FrameType frameType,
+ uint8_t payloadType,
+ uint32_t timeStamp,
+ const uint8_t* payloadData,
+ size_t payloadSize,
+ const RTPFragmentationHeader* fragmentation) override;
+
+ void RegisterReceiverACM(AudioCodingModule *acm);
+
+ void ResetStats();
+
+ int16_t Stats(CodecInst& codecInst, ACMTestPayloadStats& payloadStats);
+
+ void Stats(uint32_t* numPackets);
+
+ void Stats(uint8_t* payloadType, uint32_t* payloadLenByte);
+
+ void PrintStats(CodecInst& codecInst);
+
+ void SetIsStereo(bool isStereo) {
+ _isStereo = isStereo;
+ }
+
+ uint32_t LastInTimestamp();
+
+ void SetFECTestWithPacketLoss(bool usePacketLoss) {
+ _useFECTestWithPacketLoss = usePacketLoss;
+ }
+
+ double BitRate();
+
+ void set_send_timestamp(uint32_t new_send_ts) {
+ external_send_timestamp_ = new_send_ts;
+ }
+
+ void set_sequence_number(uint16_t new_sequence_number) {
+ external_sequence_number_ = new_sequence_number;
+ }
+
+ void set_num_packets_to_drop(int new_num_packets_to_drop) {
+ num_packets_to_drop_ = new_num_packets_to_drop;
+ }
+
+ private:
+ void CalcStatistics(WebRtcRTPHeader& rtpInfo, size_t payloadSize);
+
+ AudioCodingModule* _receiverACM;
+ uint16_t _seqNo;
+ // 60msec * 32 sample(max)/msec * 2 description (maybe) * 2 bytes/sample
+ uint8_t _payloadData[60 * 32 * 2 * 2];
+
+ CriticalSectionWrapper* _channelCritSect;
+ FILE* _bitStreamFile;
+ bool _saveBitStream;
+ int16_t _lastPayloadType;
+ ACMTestPayloadStats _payloadStats[MAX_NUM_PAYLOADS];
+ bool _isStereo;
+ WebRtcRTPHeader _rtpInfo;
+ bool _leftChannel;
+ uint32_t _lastInTimestamp;
+ bool _useLastFrameSize;
+ uint32_t _lastFrameSizeSample;
+ // FEC Test variables
+ int16_t _packetLoss;
+ bool _useFECTestWithPacketLoss;
+ uint64_t _beginTime;
+ uint64_t _totalBytes;
+
+ // External timing info, defaulted to -1. Only used if they are
+ // non-negative.
+ int64_t external_send_timestamp_;
+ int32_t external_sequence_number_;
+ int num_packets_to_drop_;
+};
+
+} // namespace webrtc
+
+#endif // WEBRTC_MODULES_AUDIO_CODING_TEST_CHANNEL_H_
diff --git a/webrtc/modules/audio_coding/test/EncodeDecodeTest.cc b/webrtc/modules/audio_coding/test/EncodeDecodeTest.cc
new file mode 100644
index 0000000000..ba3c8d9ad2
--- /dev/null
+++ b/webrtc/modules/audio_coding/test/EncodeDecodeTest.cc
@@ -0,0 +1,355 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "webrtc/modules/audio_coding/test/EncodeDecodeTest.h"
+
+#include <sstream>
+#include <stdio.h>
+#include <stdlib.h>
+
+#include "testing/gtest/include/gtest/gtest.h"
+#include "webrtc/base/scoped_ptr.h"
+#include "webrtc/common_types.h"
+#include "webrtc/modules/audio_coding/include/audio_coding_module.h"
+#include "webrtc/modules/audio_coding/acm2/acm_common_defs.h"
+#include "webrtc/modules/audio_coding/test/utility.h"
+#include "webrtc/system_wrappers/include/trace.h"
+#include "webrtc/test/testsupport/fileutils.h"
+
+namespace webrtc {
+
+TestPacketization::TestPacketization(RTPStream *rtpStream, uint16_t frequency)
+ : _rtpStream(rtpStream),
+ _frequency(frequency),
+ _seqNo(0) {
+}
+
+TestPacketization::~TestPacketization() {
+}
+
+int32_t TestPacketization::SendData(
+ const FrameType /* frameType */, const uint8_t payloadType,
+ const uint32_t timeStamp, const uint8_t* payloadData,
+ const size_t payloadSize,
+ const RTPFragmentationHeader* /* fragmentation */) {
+ _rtpStream->Write(payloadType, timeStamp, _seqNo++, payloadData, payloadSize,
+ _frequency);
+ return 1;
+}
+
+Sender::Sender()
+ : _acm(NULL),
+ _pcmFile(),
+ _audioFrame(),
+ _packetization(NULL) {
+}
+
+void Sender::Setup(AudioCodingModule *acm, RTPStream *rtpStream,
+ std::string in_file_name, int sample_rate, size_t channels) {
+ struct CodecInst sendCodec;
+ int noOfCodecs = acm->NumberOfCodecs();
+ int codecNo;
+
+ // Open input file
+ const std::string file_name = webrtc::test::ResourcePath(in_file_name, "pcm");
+ _pcmFile.Open(file_name, sample_rate, "rb");
+ if (channels == 2) {
+ _pcmFile.ReadStereo(true);
+ }
+ // Set test length to 500 ms (50 blocks of 10 ms each).
+ _pcmFile.SetNum10MsBlocksToRead(50);
+ // Fast-forward 1 second (100 blocks) since the file starts with silence.
+ _pcmFile.FastForward(100);
+
+ // Set the codec for the current test.
+ if ((testMode == 0) || (testMode == 1)) {
+ // Set the codec id.
+ codecNo = codeId;
+ } else {
+ // Choose codec on command line.
+ printf("List of supported codec.\n");
+ for (int n = 0; n < noOfCodecs; n++) {
+ EXPECT_EQ(0, acm->Codec(n, &sendCodec));
+ printf("%d %s\n", n, sendCodec.plname);
+ }
+ printf("Choose your codec:");
+ ASSERT_GT(scanf("%d", &codecNo), 0);
+ }
+
+ EXPECT_EQ(0, acm->Codec(codecNo, &sendCodec));
+
+ sendCodec.channels = channels;
+
+ EXPECT_EQ(0, acm->RegisterSendCodec(sendCodec));
+ _packetization = new TestPacketization(rtpStream, sendCodec.plfreq);
+ EXPECT_EQ(0, acm->RegisterTransportCallback(_packetization));
+
+ _acm = acm;
+}
+
+void Sender::Teardown() {
+ _pcmFile.Close();
+ delete _packetization;
+}
+
+bool Sender::Add10MsData() {
+ if (!_pcmFile.EndOfFile()) {
+ EXPECT_GT(_pcmFile.Read10MsData(_audioFrame), 0);
+ int32_t ok = _acm->Add10MsData(_audioFrame);
+ EXPECT_GE(ok, 0);
+ return ok >= 0 ? true : false;
+ }
+ return false;
+}
+
+void Sender::Run() {
+ while (true) {
+ if (!Add10MsData()) {
+ break;
+ }
+ }
+}
+
+Receiver::Receiver()
+ : _playoutLengthSmpls(WEBRTC_10MS_PCM_AUDIO),
+ _payloadSizeBytes(MAX_INCOMING_PAYLOAD) {
+}
+
+void Receiver::Setup(AudioCodingModule *acm, RTPStream *rtpStream,
+ std::string out_file_name, size_t channels) {
+ struct CodecInst recvCodec = CodecInst();
+ int noOfCodecs;
+ EXPECT_EQ(0, acm->InitializeReceiver());
+
+ noOfCodecs = acm->NumberOfCodecs();
+ for (int i = 0; i < noOfCodecs; i++) {
+ EXPECT_EQ(0, acm->Codec(i, &recvCodec));
+ if (recvCodec.channels == channels)
+ EXPECT_EQ(0, acm->RegisterReceiveCodec(recvCodec));
+ // Forces mono/stereo for Opus.
+ if (!strcmp(recvCodec.plname, "opus")) {
+ recvCodec.channels = channels;
+ EXPECT_EQ(0, acm->RegisterReceiveCodec(recvCodec));
+ }
+ }
+
+ int playSampFreq;
+ std::string file_name;
+ std::stringstream file_stream;
+ file_stream << webrtc::test::OutputPath() << out_file_name
+ << static_cast<int>(codeId) << ".pcm";
+ file_name = file_stream.str();
+ _rtpStream = rtpStream;
+
+ if (testMode == 1) {
+ playSampFreq = recvCodec.plfreq;
+ _pcmFile.Open(file_name, recvCodec.plfreq, "wb+");
+ } else if (testMode == 0) {
+ playSampFreq = 32000;
+ _pcmFile.Open(file_name, 32000, "wb+");
+ } else {
+ printf("\nValid output frequencies:\n");
+ printf("8000\n16000\n32000\n-1,");
+ printf("which means output frequency equal to received signal frequency");
+ printf("\n\nChoose output sampling frequency: ");
+ ASSERT_GT(scanf("%d", &playSampFreq), 0);
+ file_name = webrtc::test::OutputPath() + out_file_name + ".pcm";
+ _pcmFile.Open(file_name, playSampFreq, "wb+");
+ }
+
+ _realPayloadSizeBytes = 0;
+ _playoutBuffer = new int16_t[WEBRTC_10MS_PCM_AUDIO];
+ _frequency = playSampFreq;
+ _acm = acm;
+ _firstTime = true;
+}
+
+void Receiver::Teardown() {
+ delete[] _playoutBuffer;
+ _pcmFile.Close();
+ if (testMode > 1) {
+ Trace::ReturnTrace();
+ }
+}
+
+bool Receiver::IncomingPacket() {
+ if (!_rtpStream->EndOfFile()) {
+ if (_firstTime) {
+ _firstTime = false;
+ _realPayloadSizeBytes = _rtpStream->Read(&_rtpInfo, _incomingPayload,
+ _payloadSizeBytes, &_nextTime);
+ if (_realPayloadSizeBytes == 0) {
+ if (_rtpStream->EndOfFile()) {
+ _firstTime = true;
+ return true;
+ } else {
+ return false;
+ }
+ }
+ }
+
+ EXPECT_EQ(0, _acm->IncomingPacket(_incomingPayload, _realPayloadSizeBytes,
+ _rtpInfo));
+ _realPayloadSizeBytes = _rtpStream->Read(&_rtpInfo, _incomingPayload,
+ _payloadSizeBytes, &_nextTime);
+ if (_realPayloadSizeBytes == 0 && _rtpStream->EndOfFile()) {
+ _firstTime = true;
+ }
+ }
+ return true;
+}
+
+bool Receiver::PlayoutData() {
+ AudioFrame audioFrame;
+
+ int32_t ok =_acm->PlayoutData10Ms(_frequency, &audioFrame);
+ EXPECT_EQ(0, ok);
+ if (ok < 0){
+ return false;
+ }
+ if (_playoutLengthSmpls == 0) {
+ return false;
+ }
+ _pcmFile.Write10MsData(audioFrame.data_,
+ audioFrame.samples_per_channel_ * audioFrame.num_channels_);
+ return true;
+}
+
+void Receiver::Run() {
+ uint8_t counter500Ms = 50;
+ uint32_t clock = 0;
+
+ while (counter500Ms > 0) {
+ if (clock == 0 || clock >= _nextTime) {
+ EXPECT_TRUE(IncomingPacket());
+ if (clock == 0) {
+ clock = _nextTime;
+ }
+ }
+ if ((clock % 10) == 0) {
+ if (!PlayoutData()) {
+ clock++;
+ continue;
+ }
+ }
+ if (_rtpStream->EndOfFile()) {
+ counter500Ms--;
+ }
+ clock++;
+ }
+}
+
+EncodeDecodeTest::EncodeDecodeTest() {
+ _testMode = 2;
+ Trace::CreateTrace();
+ Trace::SetTraceFile(
+ (webrtc::test::OutputPath() + "acm_encdec_trace.txt").c_str());
+}
+
+EncodeDecodeTest::EncodeDecodeTest(int testMode) {
+ //testMode == 0 for autotest
+ //testMode == 1 for testing all codecs/parameters
+ //testMode > 1 for specific user-input test (as it was used before)
+ _testMode = testMode;
+ if (_testMode != 0) {
+ Trace::CreateTrace();
+ Trace::SetTraceFile(
+ (webrtc::test::OutputPath() + "acm_encdec_trace.txt").c_str());
+ }
+}
+
+void EncodeDecodeTest::Perform() {
+ int numCodecs = 1;
+ int codePars[3]; // Frequency, packet size, rate.
+ int numPars[52]; // Number of codec parameters sets (freq, pacsize, rate)
+ // to test, for a given codec.
+
+ codePars[0] = 0;
+ codePars[1] = 0;
+ codePars[2] = 0;
+
+ rtc::scoped_ptr<AudioCodingModule> acm(AudioCodingModule::Create(0));
+ struct CodecInst sendCodecTmp;
+ numCodecs = acm->NumberOfCodecs();
+
+ if (_testMode != 2) {
+ for (int n = 0; n < numCodecs; n++) {
+ EXPECT_EQ(0, acm->Codec(n, &sendCodecTmp));
+ if (STR_CASE_CMP(sendCodecTmp.plname, "telephone-event") == 0) {
+ numPars[n] = 0;
+ } else if (STR_CASE_CMP(sendCodecTmp.plname, "cn") == 0) {
+ numPars[n] = 0;
+ } else if (STR_CASE_CMP(sendCodecTmp.plname, "red") == 0) {
+ numPars[n] = 0;
+ } else if (sendCodecTmp.channels == 2) {
+ numPars[n] = 0;
+ } else {
+ numPars[n] = 1;
+ }
+ }
+ } else {
+ numCodecs = 1;
+ numPars[0] = 1;
+ }
+
+ _receiver.testMode = _testMode;
+
+ // Loop over all mono codecs:
+ for (int codeId = 0; codeId < numCodecs; codeId++) {
+ // Only encode using real mono encoders, not telephone-event and cng.
+ for (int loopPars = 1; loopPars <= numPars[codeId]; loopPars++) {
+ // Encode all data to file.
+ std::string fileName = EncodeToFile(1, codeId, codePars, _testMode);
+
+ RTPFile rtpFile;
+ rtpFile.Open(fileName.c_str(), "rb");
+
+ _receiver.codeId = codeId;
+
+ rtpFile.ReadHeader();
+ _receiver.Setup(acm.get(), &rtpFile, "encodeDecode_out", 1);
+ _receiver.Run();
+ _receiver.Teardown();
+ rtpFile.Close();
+ }
+ }
+
+ // End tracing.
+ if (_testMode == 1) {
+ Trace::ReturnTrace();
+ }
+}
+
+std::string EncodeDecodeTest::EncodeToFile(int fileType,
+ int codeId,
+ int* codePars,
+ int testMode) {
+ rtc::scoped_ptr<AudioCodingModule> acm(AudioCodingModule::Create(1));
+ RTPFile rtpFile;
+ std::string fileName = webrtc::test::TempFilename(webrtc::test::OutputPath(),
+ "encode_decode_rtp");
+ rtpFile.Open(fileName.c_str(), "wb+");
+ rtpFile.WriteHeader();
+
+ // Store for auto_test and logging.
+ _sender.testMode = testMode;
+ _sender.codeId = codeId;
+
+ _sender.Setup(acm.get(), &rtpFile, "audio_coding/testfile32kHz", 32000, 1);
+ if (acm->SendCodec()) {
+ _sender.Run();
+ }
+ _sender.Teardown();
+ rtpFile.Close();
+
+ return fileName;
+}
+
+} // namespace webrtc
diff --git a/webrtc/modules/audio_coding/test/EncodeDecodeTest.h b/webrtc/modules/audio_coding/test/EncodeDecodeTest.h
new file mode 100644
index 0000000000..f9a9a5bb52
--- /dev/null
+++ b/webrtc/modules/audio_coding/test/EncodeDecodeTest.h
@@ -0,0 +1,123 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef WEBRTC_MODULES_AUDIO_CODING_TEST_ENCODEDECODETEST_H_
+#define WEBRTC_MODULES_AUDIO_CODING_TEST_ENCODEDECODETEST_H_
+
+#include <stdio.h>
+#include <string.h>
+
+#include "webrtc/modules/audio_coding/include/audio_coding_module.h"
+#include "webrtc/modules/audio_coding/test/ACMTest.h"
+#include "webrtc/modules/audio_coding/test/PCMFile.h"
+#include "webrtc/modules/audio_coding/test/RTPFile.h"
+#include "webrtc/typedefs.h"
+
+namespace webrtc {
+
+#define MAX_INCOMING_PAYLOAD 8096
+
+// TestPacketization callback which writes the encoded payloads to file
+class TestPacketization : public AudioPacketizationCallback {
+ public:
+ TestPacketization(RTPStream *rtpStream, uint16_t frequency);
+ ~TestPacketization();
+ int32_t SendData(const FrameType frameType,
+ const uint8_t payloadType,
+ const uint32_t timeStamp,
+ const uint8_t* payloadData,
+ const size_t payloadSize,
+ const RTPFragmentationHeader* fragmentation) override;
+
+ private:
+ static void MakeRTPheader(uint8_t* rtpHeader, uint8_t payloadType,
+ int16_t seqNo, uint32_t timeStamp, uint32_t ssrc);
+ RTPStream* _rtpStream;
+ int32_t _frequency;
+ int16_t _seqNo;
+};
+
+class Sender {
+ public:
+ Sender();
+ void Setup(AudioCodingModule *acm, RTPStream *rtpStream,
+ std::string in_file_name, int sample_rate, size_t channels);
+ void Teardown();
+ void Run();
+ bool Add10MsData();
+
+ //for auto_test and logging
+ uint8_t testMode;
+ uint8_t codeId;
+
+ protected:
+ AudioCodingModule* _acm;
+
+ private:
+ PCMFile _pcmFile;
+ AudioFrame _audioFrame;
+ TestPacketization* _packetization;
+};
+
+class Receiver {
+ public:
+ Receiver();
+ virtual ~Receiver() {};
+ void Setup(AudioCodingModule *acm, RTPStream *rtpStream,
+ std::string out_file_name, size_t channels);
+ void Teardown();
+ void Run();
+ virtual bool IncomingPacket();
+ bool PlayoutData();
+
+ //for auto_test and logging
+ uint8_t codeId;
+ uint8_t testMode;
+
+ private:
+ PCMFile _pcmFile;
+ int16_t* _playoutBuffer;
+ uint16_t _playoutLengthSmpls;
+ int32_t _frequency;
+ bool _firstTime;
+
+ protected:
+ AudioCodingModule* _acm;
+ uint8_t _incomingPayload[MAX_INCOMING_PAYLOAD];
+ RTPStream* _rtpStream;
+ WebRtcRTPHeader _rtpInfo;
+ size_t _realPayloadSizeBytes;
+ size_t _payloadSizeBytes;
+ uint32_t _nextTime;
+};
+
+class EncodeDecodeTest : public ACMTest {
+ public:
+ EncodeDecodeTest();
+ explicit EncodeDecodeTest(int testMode);
+ void Perform() override;
+
+ uint16_t _playoutFreq;
+ uint8_t _testMode;
+
+ private:
+ std::string EncodeToFile(int fileType,
+ int codeId,
+ int* codePars,
+ int testMode);
+
+ protected:
+ Sender _sender;
+ Receiver _receiver;
+};
+
+} // namespace webrtc
+
+#endif // WEBRTC_MODULES_AUDIO_CODING_TEST_ENCODEDECODETEST_H_
diff --git a/webrtc/modules/audio_coding/test/PCMFile.cc b/webrtc/modules/audio_coding/test/PCMFile.cc
new file mode 100644
index 0000000000..9289d73baa
--- /dev/null
+++ b/webrtc/modules/audio_coding/test/PCMFile.cc
@@ -0,0 +1,221 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "webrtc/modules/audio_coding/test/PCMFile.h"
+
+#include <ctype.h>
+#include <stdio.h>
+#include <string.h>
+
+#include "testing/gtest/include/gtest/gtest.h"
+#include "webrtc/modules/include/module_common_types.h"
+
+namespace webrtc {
+
+#define MAX_FILE_NAME_LENGTH_BYTE 500
+
+PCMFile::PCMFile()
+ : pcm_file_(NULL),
+ samples_10ms_(160),
+ frequency_(16000),
+ end_of_file_(false),
+ auto_rewind_(false),
+ rewinded_(false),
+ read_stereo_(false),
+ save_stereo_(false) {
+ timestamp_ = (((uint32_t) rand() & 0x0000FFFF) << 16) |
+ ((uint32_t) rand() & 0x0000FFFF);
+}
+
+PCMFile::PCMFile(uint32_t timestamp)
+ : pcm_file_(NULL),
+ samples_10ms_(160),
+ frequency_(16000),
+ end_of_file_(false),
+ auto_rewind_(false),
+ rewinded_(false),
+ read_stereo_(false),
+ save_stereo_(false) {
+ timestamp_ = timestamp;
+}
+
+int16_t PCMFile::ChooseFile(std::string* file_name, int16_t max_len,
+ uint16_t* frequency_hz) {
+ char tmp_name[MAX_FILE_NAME_LENGTH_BYTE];
+
+ EXPECT_TRUE(fgets(tmp_name, MAX_FILE_NAME_LENGTH_BYTE, stdin) != NULL);
+ tmp_name[MAX_FILE_NAME_LENGTH_BYTE - 1] = '\0';
+ int16_t n = 0;
+
+ // Removing trailing spaces.
+ while ((isspace(tmp_name[n]) || iscntrl(tmp_name[n])) && (tmp_name[n] != 0)
+ && (n < MAX_FILE_NAME_LENGTH_BYTE)) {
+ n++;
+ }
+ if (n > 0) {
+ memmove(tmp_name, &tmp_name[n], MAX_FILE_NAME_LENGTH_BYTE - n);
+ }
+
+ // Removing trailing spaces.
+ n = (int16_t)(strlen(tmp_name) - 1);
+ if (n >= 0) {
+ while ((isspace(tmp_name[n]) || iscntrl(tmp_name[n])) && (n >= 0)) {
+ n--;
+ }
+ }
+ if (n >= 0) {
+ tmp_name[n + 1] = '\0';
+ }
+
+ int16_t len = (int16_t) strlen(tmp_name);
+ if (len > max_len) {
+ return -1;
+ }
+ if (len > 0) {
+ std::string tmp_string(tmp_name, len + 1);
+ *file_name = tmp_string;
+ }
+ printf("Enter the sampling frequency (in Hz) of the above file [%u]: ",
+ *frequency_hz);
+ EXPECT_TRUE(fgets(tmp_name, 10, stdin) != NULL);
+ uint16_t tmp_frequency = (uint16_t) atoi(tmp_name);
+ if (tmp_frequency > 0) {
+ *frequency_hz = tmp_frequency;
+ }
+ return 0;
+}
+
+void PCMFile::Open(const std::string& file_name, uint16_t frequency,
+ const char* mode, bool auto_rewind) {
+ if ((pcm_file_ = fopen(file_name.c_str(), mode)) == NULL) {
+ printf("Cannot open file %s.\n", file_name.c_str());
+ ADD_FAILURE() << "Unable to read file";
+ }
+ frequency_ = frequency;
+ samples_10ms_ = (uint16_t)(frequency_ / 100);
+ auto_rewind_ = auto_rewind;
+ end_of_file_ = false;
+ rewinded_ = false;
+}
+
+int32_t PCMFile::SamplingFrequency() const {
+ return frequency_;
+}
+
+uint16_t PCMFile::PayloadLength10Ms() const {
+ return samples_10ms_;
+}
+
+int32_t PCMFile::Read10MsData(AudioFrame& audio_frame) {
+ uint16_t channels = 1;
+ if (read_stereo_) {
+ channels = 2;
+ }
+
+ int32_t payload_size = (int32_t) fread(audio_frame.data_, sizeof(uint16_t),
+ samples_10ms_ * channels, pcm_file_);
+ if (payload_size < samples_10ms_ * channels) {
+ for (int k = payload_size; k < samples_10ms_ * channels; k++) {
+ audio_frame.data_[k] = 0;
+ }
+ if (auto_rewind_) {
+ rewind(pcm_file_);
+ rewinded_ = true;
+ } else {
+ end_of_file_ = true;
+ }
+ }
+ audio_frame.samples_per_channel_ = samples_10ms_;
+ audio_frame.sample_rate_hz_ = frequency_;
+ audio_frame.num_channels_ = channels;
+ audio_frame.timestamp_ = timestamp_;
+ timestamp_ += samples_10ms_;
+ ++blocks_read_;
+ if (num_10ms_blocks_to_read_ && blocks_read_ >= *num_10ms_blocks_to_read_)
+ end_of_file_ = true;
+ return samples_10ms_;
+}
+
+void PCMFile::Write10MsData(AudioFrame& audio_frame) {
+ if (audio_frame.num_channels_ == 1) {
+ if (!save_stereo_) {
+ if (fwrite(audio_frame.data_, sizeof(uint16_t),
+ audio_frame.samples_per_channel_, pcm_file_) !=
+ static_cast<size_t>(audio_frame.samples_per_channel_)) {
+ return;
+ }
+ } else {
+ int16_t* stereo_audio = new int16_t[2 * audio_frame.samples_per_channel_];
+ for (size_t k = 0; k < audio_frame.samples_per_channel_; k++) {
+ stereo_audio[k << 1] = audio_frame.data_[k];
+ stereo_audio[(k << 1) + 1] = audio_frame.data_[k];
+ }
+ if (fwrite(stereo_audio, sizeof(int16_t),
+ 2 * audio_frame.samples_per_channel_, pcm_file_) !=
+ static_cast<size_t>(2 * audio_frame.samples_per_channel_)) {
+ return;
+ }
+ delete[] stereo_audio;
+ }
+ } else {
+ if (fwrite(audio_frame.data_, sizeof(int16_t),
+ audio_frame.num_channels_ * audio_frame.samples_per_channel_,
+ pcm_file_) !=
+ static_cast<size_t>(audio_frame.num_channels_ *
+ audio_frame.samples_per_channel_)) {
+ return;
+ }
+ }
+}
+
+void PCMFile::Write10MsData(int16_t* playout_buffer, size_t length_smpls) {
+ if (fwrite(playout_buffer, sizeof(uint16_t), length_smpls, pcm_file_) !=
+ length_smpls) {
+ return;
+ }
+}
+
+void PCMFile::Close() {
+ fclose(pcm_file_);
+ pcm_file_ = NULL;
+ blocks_read_ = 0;
+}
+
+void PCMFile::FastForward(int num_10ms_blocks) {
+ const int channels = read_stereo_ ? 2 : 1;
+ long num_bytes_to_move =
+ num_10ms_blocks * sizeof(int16_t) * samples_10ms_ * channels;
+ int error = fseek(pcm_file_, num_bytes_to_move, SEEK_CUR);
+ RTC_DCHECK_EQ(error, 0);
+}
+
+void PCMFile::Rewind() {
+ rewind(pcm_file_);
+ end_of_file_ = false;
+ blocks_read_ = 0;
+}
+
+bool PCMFile::Rewinded() {
+ return rewinded_;
+}
+
+void PCMFile::SaveStereo(bool is_stereo) {
+ save_stereo_ = is_stereo;
+}
+
+void PCMFile::ReadStereo(bool is_stereo) {
+ read_stereo_ = is_stereo;
+}
+
+void PCMFile::SetNum10MsBlocksToRead(int value) {
+ num_10ms_blocks_to_read_ = rtc::Optional<int>(value);
+}
+
+} // namespace webrtc
diff --git a/webrtc/modules/audio_coding/test/PCMFile.h b/webrtc/modules/audio_coding/test/PCMFile.h
new file mode 100644
index 0000000000..840933a1bd
--- /dev/null
+++ b/webrtc/modules/audio_coding/test/PCMFile.h
@@ -0,0 +1,80 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef WEBRTC_MODULES_AUDIO_CODING_TEST_PCMFILE_H_
+#define WEBRTC_MODULES_AUDIO_CODING_TEST_PCMFILE_H_
+
+#include <stdio.h>
+#include <stdlib.h>
+
+#include <string>
+
+#include "webrtc/base/optional.h"
+#include "webrtc/modules/include/module_common_types.h"
+#include "webrtc/typedefs.h"
+
+namespace webrtc {
+
+class PCMFile {
+ public:
+ PCMFile();
+ PCMFile(uint32_t timestamp);
+ ~PCMFile() {
+ if (pcm_file_ != NULL) {
+ fclose(pcm_file_);
+ }
+ }
+
+ void Open(const std::string& filename, uint16_t frequency, const char* mode,
+ bool auto_rewind = false);
+
+ int32_t Read10MsData(AudioFrame& audio_frame);
+
+ void Write10MsData(int16_t *playout_buffer, size_t length_smpls);
+ void Write10MsData(AudioFrame& audio_frame);
+
+ uint16_t PayloadLength10Ms() const;
+ int32_t SamplingFrequency() const;
+ void Close();
+ bool EndOfFile() const {
+ return end_of_file_;
+ }
+ // Moves forward the specified number of 10 ms blocks. If a limit has been set
+ // with SetNum10MsBlocksToRead, fast-forwarding does not count towards this
+ // limit.
+ void FastForward(int num_10ms_blocks);
+ void Rewind();
+ static int16_t ChooseFile(std::string* file_name, int16_t max_len,
+ uint16_t* frequency_hz);
+ bool Rewinded();
+ void SaveStereo(bool is_stereo = true);
+ void ReadStereo(bool is_stereo = true);
+ // If set, the reading will stop after the specified number of blocks have
+ // been read. When that has happened, EndOfFile() will return true. Calling
+ // Rewind() will reset the counter and start over.
+ void SetNum10MsBlocksToRead(int value);
+
+ private:
+ FILE* pcm_file_;
+ uint16_t samples_10ms_;
+ int32_t frequency_;
+ bool end_of_file_;
+ bool auto_rewind_;
+ bool rewinded_;
+ uint32_t timestamp_;
+ bool read_stereo_;
+ bool save_stereo_;
+ rtc::Optional<int> num_10ms_blocks_to_read_;
+ int blocks_read_ = 0;
+};
+
+} // namespace webrtc
+
+#endif // WEBRTC_MODULES_AUDIO_CODING_TEST_PCMFILE_H_
diff --git a/webrtc/modules/audio_coding/test/PacketLossTest.cc b/webrtc/modules/audio_coding/test/PacketLossTest.cc
new file mode 100644
index 0000000000..ad3e83403e
--- /dev/null
+++ b/webrtc/modules/audio_coding/test/PacketLossTest.cc
@@ -0,0 +1,167 @@
+/*
+ * Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "webrtc/modules/audio_coding/test/PacketLossTest.h"
+
+#include "testing/gtest/include/gtest/gtest.h"
+#include "webrtc/common.h"
+#include "webrtc/test/testsupport/fileutils.h"
+
+namespace webrtc {
+
+ReceiverWithPacketLoss::ReceiverWithPacketLoss()
+ : loss_rate_(0),
+ burst_length_(1),
+ packet_counter_(0),
+ lost_packet_counter_(0),
+ burst_lost_counter_(burst_length_) {
+}
+
+void ReceiverWithPacketLoss::Setup(AudioCodingModule *acm,
+ RTPStream *rtpStream,
+ std::string out_file_name,
+ int channels,
+ int loss_rate,
+ int burst_length) {
+ loss_rate_ = loss_rate;
+ burst_length_ = burst_length;
+ burst_lost_counter_ = burst_length_; // To prevent first packet gets lost.
+ std::stringstream ss;
+ ss << out_file_name << "_" << loss_rate_ << "_" << burst_length_ << "_";
+ Receiver::Setup(acm, rtpStream, ss.str(), channels);
+}
+
+bool ReceiverWithPacketLoss::IncomingPacket() {
+ if (!_rtpStream->EndOfFile()) {
+ if (packet_counter_ == 0) {
+ _realPayloadSizeBytes = _rtpStream->Read(&_rtpInfo, _incomingPayload,
+ _payloadSizeBytes, &_nextTime);
+ if (_realPayloadSizeBytes == 0) {
+ if (_rtpStream->EndOfFile()) {
+ packet_counter_ = 0;
+ return true;
+ } else {
+ return false;
+ }
+ }
+ }
+
+ if (!PacketLost()) {
+ _acm->IncomingPacket(_incomingPayload, _realPayloadSizeBytes, _rtpInfo);
+ }
+ packet_counter_++;
+ _realPayloadSizeBytes = _rtpStream->Read(&_rtpInfo, _incomingPayload,
+ _payloadSizeBytes, &_nextTime);
+ if (_realPayloadSizeBytes == 0 && _rtpStream->EndOfFile()) {
+ packet_counter_ = 0;
+ lost_packet_counter_ = 0;
+ }
+ }
+ return true;
+}
+
+bool ReceiverWithPacketLoss::PacketLost() {
+ if (burst_lost_counter_ < burst_length_) {
+ lost_packet_counter_++;
+ burst_lost_counter_++;
+ return true;
+ }
+
+ if (lost_packet_counter_ * 100 < loss_rate_ * packet_counter_) {
+ lost_packet_counter_++;
+ burst_lost_counter_ = 1;
+ return true;
+ }
+ return false;
+}
+
+SenderWithFEC::SenderWithFEC()
+ : expected_loss_rate_(0) {
+}
+
+void SenderWithFEC::Setup(AudioCodingModule *acm, RTPStream *rtpStream,
+ std::string in_file_name, int sample_rate,
+ int channels, int expected_loss_rate) {
+ Sender::Setup(acm, rtpStream, in_file_name, sample_rate, channels);
+ EXPECT_TRUE(SetFEC(true));
+ EXPECT_TRUE(SetPacketLossRate(expected_loss_rate));
+}
+
+bool SenderWithFEC::SetFEC(bool enable_fec) {
+ if (_acm->SetCodecFEC(enable_fec) == 0) {
+ return true;
+ }
+ return false;
+}
+
+bool SenderWithFEC::SetPacketLossRate(int expected_loss_rate) {
+ if (_acm->SetPacketLossRate(expected_loss_rate) == 0) {
+ expected_loss_rate_ = expected_loss_rate;
+ return true;
+ }
+ return false;
+}
+
+PacketLossTest::PacketLossTest(int channels, int expected_loss_rate,
+ int actual_loss_rate, int burst_length)
+ : channels_(channels),
+ in_file_name_(channels_ == 1 ? "audio_coding/testfile32kHz" :
+ "audio_coding/teststereo32kHz"),
+ sample_rate_hz_(32000),
+ sender_(new SenderWithFEC),
+ receiver_(new ReceiverWithPacketLoss),
+ expected_loss_rate_(expected_loss_rate),
+ actual_loss_rate_(actual_loss_rate),
+ burst_length_(burst_length) {
+}
+
+void PacketLossTest::Perform() {
+#ifndef WEBRTC_CODEC_OPUS
+ return;
+#else
+ rtc::scoped_ptr<AudioCodingModule> acm(AudioCodingModule::Create(0));
+
+ int codec_id = acm->Codec("opus", 48000, channels_);
+
+ RTPFile rtpFile;
+ std::string fileName = webrtc::test::TempFilename(webrtc::test::OutputPath(),
+ "packet_loss_test");
+
+ // Encode to file
+ rtpFile.Open(fileName.c_str(), "wb+");
+ rtpFile.WriteHeader();
+
+ sender_->testMode = 0;
+ sender_->codeId = codec_id;
+
+ sender_->Setup(acm.get(), &rtpFile, in_file_name_, sample_rate_hz_, channels_,
+ expected_loss_rate_);
+ if (acm->SendCodec()) {
+ sender_->Run();
+ }
+ sender_->Teardown();
+ rtpFile.Close();
+
+ // Decode to file
+ rtpFile.Open(fileName.c_str(), "rb");
+ rtpFile.ReadHeader();
+
+ receiver_->testMode = 0;
+ receiver_->codeId = codec_id;
+
+ receiver_->Setup(acm.get(), &rtpFile, "packetLoss_out", channels_,
+ actual_loss_rate_, burst_length_);
+ receiver_->Run();
+ receiver_->Teardown();
+ rtpFile.Close();
+#endif
+}
+
+} // namespace webrtc
diff --git a/webrtc/modules/audio_coding/test/PacketLossTest.h b/webrtc/modules/audio_coding/test/PacketLossTest.h
new file mode 100644
index 0000000000..f3570ae1ca
--- /dev/null
+++ b/webrtc/modules/audio_coding/test/PacketLossTest.h
@@ -0,0 +1,67 @@
+/*
+ * Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef WEBRTC_MODULES_AUDIO_CODING_TEST_PACKETLOSSTEST_H_
+#define WEBRTC_MODULES_AUDIO_CODING_TEST_PACKETLOSSTEST_H_
+
+#include <string>
+#include "webrtc/base/scoped_ptr.h"
+#include "webrtc/modules/audio_coding/test/EncodeDecodeTest.h"
+
+namespace webrtc {
+
+class ReceiverWithPacketLoss : public Receiver {
+ public:
+ ReceiverWithPacketLoss();
+ void Setup(AudioCodingModule *acm, RTPStream *rtpStream,
+ std::string out_file_name, int channels, int loss_rate,
+ int burst_length);
+ bool IncomingPacket() override;
+
+ protected:
+ bool PacketLost();
+ int loss_rate_;
+ int burst_length_;
+ int packet_counter_;
+ int lost_packet_counter_;
+ int burst_lost_counter_;
+};
+
+class SenderWithFEC : public Sender {
+ public:
+ SenderWithFEC();
+ void Setup(AudioCodingModule *acm, RTPStream *rtpStream,
+ std::string in_file_name, int sample_rate, int channels,
+ int expected_loss_rate);
+ bool SetPacketLossRate(int expected_loss_rate);
+ bool SetFEC(bool enable_fec);
+ protected:
+ int expected_loss_rate_;
+};
+
+class PacketLossTest : public ACMTest {
+ public:
+ PacketLossTest(int channels, int expected_loss_rate_, int actual_loss_rate,
+ int burst_length);
+ void Perform();
+ protected:
+ int channels_;
+ std::string in_file_name_;
+ int sample_rate_hz_;
+ rtc::scoped_ptr<SenderWithFEC> sender_;
+ rtc::scoped_ptr<ReceiverWithPacketLoss> receiver_;
+ int expected_loss_rate_;
+ int actual_loss_rate_;
+ int burst_length_;
+};
+
+} // namespace webrtc
+
+#endif // WEBRTC_MODULES_AUDIO_CODING_TEST_PACKETLOSSTEST_H_
diff --git a/webrtc/modules/audio_coding/test/RTPFile.cc b/webrtc/modules/audio_coding/test/RTPFile.cc
new file mode 100644
index 0000000000..60777178c6
--- /dev/null
+++ b/webrtc/modules/audio_coding/test/RTPFile.cc
@@ -0,0 +1,227 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "RTPFile.h"
+
+#include <stdlib.h>
+#include <limits>
+
+#ifdef WIN32
+# include <Winsock2.h>
+#else
+# include <arpa/inet.h>
+#endif
+
+#include "audio_coding_module.h"
+#include "engine_configurations.h"
+#include "webrtc/system_wrappers/include/rw_lock_wrapper.h"
+// TODO(tlegrand): Consider removing usage of gtest.
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace webrtc {
+
+void RTPStream::ParseRTPHeader(WebRtcRTPHeader* rtpInfo,
+ const uint8_t* rtpHeader) {
+ rtpInfo->header.payloadType = rtpHeader[1];
+ rtpInfo->header.sequenceNumber = (static_cast<uint16_t>(rtpHeader[2]) << 8) |
+ rtpHeader[3];
+ rtpInfo->header.timestamp = (static_cast<uint32_t>(rtpHeader[4]) << 24) |
+ (static_cast<uint32_t>(rtpHeader[5]) << 16) |
+ (static_cast<uint32_t>(rtpHeader[6]) << 8) | rtpHeader[7];
+ rtpInfo->header.ssrc = (static_cast<uint32_t>(rtpHeader[8]) << 24) |
+ (static_cast<uint32_t>(rtpHeader[9]) << 16) |
+ (static_cast<uint32_t>(rtpHeader[10]) << 8) | rtpHeader[11];
+}
+
+void RTPStream::MakeRTPheader(uint8_t* rtpHeader, uint8_t payloadType,
+ int16_t seqNo, uint32_t timeStamp,
+ uint32_t ssrc) {
+ rtpHeader[0] = 0x80;
+ rtpHeader[1] = payloadType;
+ rtpHeader[2] = (seqNo >> 8) & 0xFF;
+ rtpHeader[3] = seqNo & 0xFF;
+ rtpHeader[4] = timeStamp >> 24;
+ rtpHeader[5] = (timeStamp >> 16) & 0xFF;
+ rtpHeader[6] = (timeStamp >> 8) & 0xFF;
+ rtpHeader[7] = timeStamp & 0xFF;
+ rtpHeader[8] = ssrc >> 24;
+ rtpHeader[9] = (ssrc >> 16) & 0xFF;
+ rtpHeader[10] = (ssrc >> 8) & 0xFF;
+ rtpHeader[11] = ssrc & 0xFF;
+}
+
+RTPPacket::RTPPacket(uint8_t payloadType, uint32_t timeStamp, int16_t seqNo,
+ const uint8_t* payloadData, size_t payloadSize,
+ uint32_t frequency)
+ : payloadType(payloadType),
+ timeStamp(timeStamp),
+ seqNo(seqNo),
+ payloadSize(payloadSize),
+ frequency(frequency) {
+ if (payloadSize > 0) {
+ this->payloadData = new uint8_t[payloadSize];
+ memcpy(this->payloadData, payloadData, payloadSize);
+ }
+}
+
+RTPPacket::~RTPPacket() {
+ delete[] payloadData;
+}
+
+RTPBuffer::RTPBuffer() {
+ _queueRWLock = RWLockWrapper::CreateRWLock();
+}
+
+RTPBuffer::~RTPBuffer() {
+ delete _queueRWLock;
+}
+
+void RTPBuffer::Write(const uint8_t payloadType, const uint32_t timeStamp,
+ const int16_t seqNo, const uint8_t* payloadData,
+ const size_t payloadSize, uint32_t frequency) {
+ RTPPacket *packet = new RTPPacket(payloadType, timeStamp, seqNo, payloadData,
+ payloadSize, frequency);
+ _queueRWLock->AcquireLockExclusive();
+ _rtpQueue.push(packet);
+ _queueRWLock->ReleaseLockExclusive();
+}
+
+size_t RTPBuffer::Read(WebRtcRTPHeader* rtpInfo, uint8_t* payloadData,
+ size_t payloadSize, uint32_t* offset) {
+ _queueRWLock->AcquireLockShared();
+ RTPPacket *packet = _rtpQueue.front();
+ _rtpQueue.pop();
+ _queueRWLock->ReleaseLockShared();
+ rtpInfo->header.markerBit = 1;
+ rtpInfo->header.payloadType = packet->payloadType;
+ rtpInfo->header.sequenceNumber = packet->seqNo;
+ rtpInfo->header.ssrc = 0;
+ rtpInfo->header.timestamp = packet->timeStamp;
+ if (packet->payloadSize > 0 && payloadSize >= packet->payloadSize) {
+ memcpy(payloadData, packet->payloadData, packet->payloadSize);
+ } else {
+ return 0;
+ }
+ *offset = (packet->timeStamp / (packet->frequency / 1000));
+
+ return packet->payloadSize;
+}
+
+bool RTPBuffer::EndOfFile() const {
+ _queueRWLock->AcquireLockShared();
+ bool eof = _rtpQueue.empty();
+ _queueRWLock->ReleaseLockShared();
+ return eof;
+}
+
+void RTPFile::Open(const char *filename, const char *mode) {
+ if ((_rtpFile = fopen(filename, mode)) == NULL) {
+ printf("Cannot write file %s.\n", filename);
+ ADD_FAILURE() << "Unable to write file";
+ exit(1);
+ }
+}
+
+void RTPFile::Close() {
+ if (_rtpFile != NULL) {
+ fclose(_rtpFile);
+ _rtpFile = NULL;
+ }
+}
+
+void RTPFile::WriteHeader() {
+ // Write data in a format that NetEQ and RTP Play can parse
+ fprintf(_rtpFile, "#!RTPencode%s\n", "1.0");
+ uint32_t dummy_variable = 0;
+ // should be converted to network endian format, but does not matter when 0
+ EXPECT_EQ(1u, fwrite(&dummy_variable, 4, 1, _rtpFile));
+ EXPECT_EQ(1u, fwrite(&dummy_variable, 4, 1, _rtpFile));
+ EXPECT_EQ(1u, fwrite(&dummy_variable, 4, 1, _rtpFile));
+ EXPECT_EQ(1u, fwrite(&dummy_variable, 2, 1, _rtpFile));
+ EXPECT_EQ(1u, fwrite(&dummy_variable, 2, 1, _rtpFile));
+ fflush(_rtpFile);
+}
+
+void RTPFile::ReadHeader() {
+ uint32_t start_sec, start_usec, source;
+ uint16_t port, padding;
+ char fileHeader[40];
+ EXPECT_TRUE(fgets(fileHeader, 40, _rtpFile) != 0);
+ EXPECT_EQ(1u, fread(&start_sec, 4, 1, _rtpFile));
+ start_sec = ntohl(start_sec);
+ EXPECT_EQ(1u, fread(&start_usec, 4, 1, _rtpFile));
+ start_usec = ntohl(start_usec);
+ EXPECT_EQ(1u, fread(&source, 4, 1, _rtpFile));
+ source = ntohl(source);
+ EXPECT_EQ(1u, fread(&port, 2, 1, _rtpFile));
+ port = ntohs(port);
+ EXPECT_EQ(1u, fread(&padding, 2, 1, _rtpFile));
+ padding = ntohs(padding);
+}
+
+void RTPFile::Write(const uint8_t payloadType, const uint32_t timeStamp,
+ const int16_t seqNo, const uint8_t* payloadData,
+ const size_t payloadSize, uint32_t frequency) {
+ /* write RTP packet to file */
+ uint8_t rtpHeader[12];
+ MakeRTPheader(rtpHeader, payloadType, seqNo, timeStamp, 0);
+ ASSERT_LE(12 + payloadSize + 8, std::numeric_limits<u_short>::max());
+ uint16_t lengthBytes = htons(static_cast<u_short>(12 + payloadSize + 8));
+ uint16_t plen = htons(static_cast<u_short>(12 + payloadSize));
+ uint32_t offsetMs;
+
+ offsetMs = (timeStamp / (frequency / 1000));
+ offsetMs = htonl(offsetMs);
+ EXPECT_EQ(1u, fwrite(&lengthBytes, 2, 1, _rtpFile));
+ EXPECT_EQ(1u, fwrite(&plen, 2, 1, _rtpFile));
+ EXPECT_EQ(1u, fwrite(&offsetMs, 4, 1, _rtpFile));
+ EXPECT_EQ(1u, fwrite(&rtpHeader, 12, 1, _rtpFile));
+ EXPECT_EQ(payloadSize, fwrite(payloadData, 1, payloadSize, _rtpFile));
+}
+
+size_t RTPFile::Read(WebRtcRTPHeader* rtpInfo, uint8_t* payloadData,
+ size_t payloadSize, uint32_t* offset) {
+ uint16_t lengthBytes;
+ uint16_t plen;
+ uint8_t rtpHeader[12];
+ size_t read_len = fread(&lengthBytes, 2, 1, _rtpFile);
+ /* Check if we have reached end of file. */
+ if ((read_len == 0) && feof(_rtpFile)) {
+ _rtpEOF = true;
+ return 0;
+ }
+ EXPECT_EQ(1u, fread(&plen, 2, 1, _rtpFile));
+ EXPECT_EQ(1u, fread(offset, 4, 1, _rtpFile));
+ lengthBytes = ntohs(lengthBytes);
+ plen = ntohs(plen);
+ *offset = ntohl(*offset);
+ EXPECT_GT(plen, 11);
+
+ EXPECT_EQ(1u, fread(rtpHeader, 12, 1, _rtpFile));
+ ParseRTPHeader(rtpInfo, rtpHeader);
+ rtpInfo->type.Audio.isCNG = false;
+ rtpInfo->type.Audio.channel = 1;
+ EXPECT_EQ(lengthBytes, plen + 8);
+
+ if (plen == 0) {
+ return 0;
+ }
+ if (lengthBytes < 20) {
+ return 0;
+ }
+ if (payloadSize < static_cast<size_t>((lengthBytes - 20))) {
+ return 0;
+ }
+ lengthBytes -= 20;
+ EXPECT_EQ(lengthBytes, fread(payloadData, 1, lengthBytes, _rtpFile));
+ return lengthBytes;
+}
+
+} // namespace webrtc
diff --git a/webrtc/modules/audio_coding/test/RTPFile.h b/webrtc/modules/audio_coding/test/RTPFile.h
new file mode 100644
index 0000000000..696d41ebd2
--- /dev/null
+++ b/webrtc/modules/audio_coding/test/RTPFile.h
@@ -0,0 +1,126 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef WEBRTC_MODULES_AUDIO_CODING_TEST_RTPFILE_H_
+#define WEBRTC_MODULES_AUDIO_CODING_TEST_RTPFILE_H_
+
+#include <stdio.h>
+#include <queue>
+
+#include "webrtc/modules/audio_coding/include/audio_coding_module.h"
+#include "webrtc/modules/include/module_common_types.h"
+#include "webrtc/system_wrappers/include/rw_lock_wrapper.h"
+#include "webrtc/typedefs.h"
+
+namespace webrtc {
+
+class RTPStream {
+ public:
+ virtual ~RTPStream() {
+ }
+
+ virtual void Write(const uint8_t payloadType, const uint32_t timeStamp,
+ const int16_t seqNo, const uint8_t* payloadData,
+ const size_t payloadSize, uint32_t frequency) = 0;
+
+ // Returns the packet's payload size. Zero should be treated as an
+ // end-of-stream (in the case that EndOfFile() is true) or an error.
+ virtual size_t Read(WebRtcRTPHeader* rtpInfo, uint8_t* payloadData,
+ size_t payloadSize, uint32_t* offset) = 0;
+ virtual bool EndOfFile() const = 0;
+
+ protected:
+ void MakeRTPheader(uint8_t* rtpHeader, uint8_t payloadType, int16_t seqNo,
+ uint32_t timeStamp, uint32_t ssrc);
+
+ void ParseRTPHeader(WebRtcRTPHeader* rtpInfo, const uint8_t* rtpHeader);
+};
+
+class RTPPacket {
+ public:
+ RTPPacket(uint8_t payloadType, uint32_t timeStamp, int16_t seqNo,
+ const uint8_t* payloadData, size_t payloadSize,
+ uint32_t frequency);
+
+ ~RTPPacket();
+
+ uint8_t payloadType;
+ uint32_t timeStamp;
+ int16_t seqNo;
+ uint8_t* payloadData;
+ size_t payloadSize;
+ uint32_t frequency;
+};
+
+class RTPBuffer : public RTPStream {
+ public:
+ RTPBuffer();
+
+ ~RTPBuffer();
+
+ void Write(const uint8_t payloadType,
+ const uint32_t timeStamp,
+ const int16_t seqNo,
+ const uint8_t* payloadData,
+ const size_t payloadSize,
+ uint32_t frequency) override;
+
+ size_t Read(WebRtcRTPHeader* rtpInfo,
+ uint8_t* payloadData,
+ size_t payloadSize,
+ uint32_t* offset) override;
+
+ bool EndOfFile() const override;
+
+ private:
+ RWLockWrapper* _queueRWLock;
+ std::queue<RTPPacket *> _rtpQueue;
+};
+
+class RTPFile : public RTPStream {
+ public:
+ ~RTPFile() {
+ }
+
+ RTPFile()
+ : _rtpFile(NULL),
+ _rtpEOF(false) {
+ }
+
+ void Open(const char *outFilename, const char *mode);
+
+ void Close();
+
+ void WriteHeader();
+
+ void ReadHeader();
+
+ void Write(const uint8_t payloadType,
+ const uint32_t timeStamp,
+ const int16_t seqNo,
+ const uint8_t* payloadData,
+ const size_t payloadSize,
+ uint32_t frequency) override;
+
+ size_t Read(WebRtcRTPHeader* rtpInfo,
+ uint8_t* payloadData,
+ size_t payloadSize,
+ uint32_t* offset) override;
+
+ bool EndOfFile() const override { return _rtpEOF; }
+
+ private:
+ FILE* _rtpFile;
+ bool _rtpEOF;
+};
+
+} // namespace webrtc
+
+#endif // WEBRTC_MODULES_AUDIO_CODING_TEST_RTPFILE_H_
diff --git a/webrtc/modules/audio_coding/test/SpatialAudio.cc b/webrtc/modules/audio_coding/test/SpatialAudio.cc
new file mode 100644
index 0000000000..c9f8080826
--- /dev/null
+++ b/webrtc/modules/audio_coding/test/SpatialAudio.cc
@@ -0,0 +1,196 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <stdio.h>
+#include <string.h>
+
+#include <math.h>
+
+#include "webrtc/common_types.h"
+#include "webrtc/modules/audio_coding/test/SpatialAudio.h"
+#include "webrtc/system_wrappers/include/trace.h"
+#include "webrtc/system_wrappers/include/trace.h"
+#include "webrtc/test/testsupport/fileutils.h"
+
+namespace webrtc {
+
+#define NUM_PANN_COEFFS 10
+
+SpatialAudio::SpatialAudio(int testMode)
+ : _acmLeft(AudioCodingModule::Create(1)),
+ _acmRight(AudioCodingModule::Create(2)),
+ _acmReceiver(AudioCodingModule::Create(3)),
+ _testMode(testMode) {
+}
+
+SpatialAudio::~SpatialAudio() {
+ delete _channel;
+ _inFile.Close();
+ _outFile.Close();
+}
+
+int16_t SpatialAudio::Setup() {
+ _channel = new Channel;
+
+ // Register callback for the sender side.
+ CHECK_ERROR(_acmLeft->RegisterTransportCallback(_channel));
+ CHECK_ERROR(_acmRight->RegisterTransportCallback(_channel));
+ // Register the receiver ACM in channel
+ _channel->RegisterReceiverACM(_acmReceiver.get());
+
+ uint16_t sampFreqHz = 32000;
+
+ const std::string file_name = webrtc::test::ResourcePath(
+ "audio_coding/testfile32kHz", "pcm");
+ _inFile.Open(file_name, sampFreqHz, "rb", false);
+
+ std::string output_file = webrtc::test::OutputPath()
+ + "out_spatial_autotest.pcm";
+ if (_testMode == 1) {
+ output_file = webrtc::test::OutputPath() + "testspatial_out.pcm";
+ printf("\n");
+ printf("Enter the output file [%s]: ", output_file.c_str());
+ PCMFile::ChooseFile(&output_file, MAX_FILE_NAME_LENGTH_BYTE, &sampFreqHz);
+ } else {
+ output_file = webrtc::test::OutputPath() + "testspatial_out.pcm";
+ }
+ _outFile.Open(output_file, sampFreqHz, "wb", false);
+ _outFile.SaveStereo(true);
+
+ // Register all available codes as receiving codecs.
+ CodecInst codecInst;
+ int status;
+ uint8_t num_encoders = _acmReceiver->NumberOfCodecs();
+ // Register all available codes as receiving codecs once more.
+ for (uint8_t n = 0; n < num_encoders; n++) {
+ status = _acmReceiver->Codec(n, &codecInst);
+ if (status < 0) {
+ printf("Error in Codec(), no matching codec found");
+ }
+ status = _acmReceiver->RegisterReceiveCodec(codecInst);
+ if (status < 0) {
+ printf("Error in RegisterReceiveCodec() for payload type %d",
+ codecInst.pltype);
+ }
+ }
+
+ return 0;
+}
+
+void SpatialAudio::Perform() {
+ if (_testMode == 0) {
+ printf("Running SpatialAudio Test");
+ WEBRTC_TRACE(webrtc::kTraceStateInfo, webrtc::kTraceAudioCoding, -1,
+ "---------- SpatialAudio ----------");
+ }
+
+ Setup();
+
+ CodecInst codecInst;
+ _acmLeft->Codec((uint8_t) 1, &codecInst);
+ CHECK_ERROR(_acmLeft->RegisterSendCodec(codecInst));
+ EncodeDecode();
+
+ int16_t pannCntr = 0;
+
+ double leftPanning[NUM_PANN_COEFFS] = { 1.00, 0.95, 0.90, 0.85, 0.80, 0.75,
+ 0.70, 0.60, 0.55, 0.50 };
+ double rightPanning[NUM_PANN_COEFFS] = { 0.50, 0.55, 0.60, 0.70, 0.75, 0.80,
+ 0.85, 0.90, 0.95, 1.00 };
+
+ while ((pannCntr + 1) < NUM_PANN_COEFFS) {
+ _acmLeft->Codec((uint8_t) 0, &codecInst);
+ codecInst.pacsize = 480;
+ CHECK_ERROR(_acmLeft->RegisterSendCodec(codecInst));
+ CHECK_ERROR(_acmRight->RegisterSendCodec(codecInst));
+
+ EncodeDecode(leftPanning[pannCntr], rightPanning[pannCntr]);
+ pannCntr++;
+
+ // Change codec
+ _acmLeft->Codec((uint8_t) 3, &codecInst);
+ codecInst.pacsize = 320;
+ CHECK_ERROR(_acmLeft->RegisterSendCodec(codecInst));
+ CHECK_ERROR(_acmRight->RegisterSendCodec(codecInst));
+
+ EncodeDecode(leftPanning[pannCntr], rightPanning[pannCntr]);
+ pannCntr++;
+ if (_testMode == 0) {
+ printf(".");
+ }
+ }
+
+ _acmLeft->Codec((uint8_t) 4, &codecInst);
+ CHECK_ERROR(_acmLeft->RegisterSendCodec(codecInst));
+ EncodeDecode();
+
+ _acmLeft->Codec((uint8_t) 0, &codecInst);
+ codecInst.pacsize = 480;
+ CHECK_ERROR(_acmLeft->RegisterSendCodec(codecInst));
+ CHECK_ERROR(_acmRight->RegisterSendCodec(codecInst));
+ pannCntr = NUM_PANN_COEFFS - 1;
+ while (pannCntr >= 0) {
+ EncodeDecode(leftPanning[pannCntr], rightPanning[pannCntr]);
+ pannCntr--;
+ if (_testMode == 0) {
+ printf(".");
+ }
+ }
+ if (_testMode == 0) {
+ printf("Done!\n");
+ }
+}
+
+void SpatialAudio::EncodeDecode(const double leftPanning,
+ const double rightPanning) {
+ AudioFrame audioFrame;
+ int32_t outFileSampFreq = _outFile.SamplingFrequency();
+
+ const double rightToLeftRatio = rightPanning / leftPanning;
+
+ _channel->SetIsStereo(true);
+
+ while (!_inFile.EndOfFile()) {
+ _inFile.Read10MsData(audioFrame);
+ for (size_t n = 0; n < audioFrame.samples_per_channel_; n++) {
+ audioFrame.data_[n] = (int16_t) floor(
+ audioFrame.data_[n] * leftPanning + 0.5);
+ }
+ CHECK_ERROR(_acmLeft->Add10MsData(audioFrame));
+
+ for (size_t n = 0; n < audioFrame.samples_per_channel_; n++) {
+ audioFrame.data_[n] = (int16_t) floor(
+ audioFrame.data_[n] * rightToLeftRatio + 0.5);
+ }
+ CHECK_ERROR(_acmRight->Add10MsData(audioFrame));
+
+ CHECK_ERROR(_acmReceiver->PlayoutData10Ms(outFileSampFreq, &audioFrame));
+ _outFile.Write10MsData(audioFrame);
+ }
+ _inFile.Rewind();
+}
+
+void SpatialAudio::EncodeDecode() {
+ AudioFrame audioFrame;
+ int32_t outFileSampFreq = _outFile.SamplingFrequency();
+
+ _channel->SetIsStereo(false);
+
+ while (!_inFile.EndOfFile()) {
+ _inFile.Read10MsData(audioFrame);
+ CHECK_ERROR(_acmLeft->Add10MsData(audioFrame));
+
+ CHECK_ERROR(_acmReceiver->PlayoutData10Ms(outFileSampFreq, &audioFrame));
+ _outFile.Write10MsData(audioFrame);
+ }
+ _inFile.Rewind();
+}
+
+} // namespace webrtc
diff --git a/webrtc/modules/audio_coding/test/SpatialAudio.h b/webrtc/modules/audio_coding/test/SpatialAudio.h
new file mode 100644
index 0000000000..3548cc98eb
--- /dev/null
+++ b/webrtc/modules/audio_coding/test/SpatialAudio.h
@@ -0,0 +1,47 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef WEBRTC_MODULES_AUDIO_CODING_TEST_SPATIALAUDIO_H_
+#define WEBRTC_MODULES_AUDIO_CODING_TEST_SPATIALAUDIO_H_
+
+#include "webrtc/base/scoped_ptr.h"
+#include "webrtc/modules/audio_coding/include/audio_coding_module.h"
+#include "webrtc/modules/audio_coding/test/ACMTest.h"
+#include "webrtc/modules/audio_coding/test/Channel.h"
+#include "webrtc/modules/audio_coding/test/PCMFile.h"
+#include "webrtc/modules/audio_coding/test/utility.h"
+
+#define MAX_FILE_NAME_LENGTH_BYTE 500
+
+namespace webrtc {
+
+class SpatialAudio : public ACMTest {
+ public:
+ SpatialAudio(int testMode);
+ ~SpatialAudio();
+
+ void Perform();
+ private:
+ int16_t Setup();
+ void EncodeDecode(double leftPanning, double rightPanning);
+ void EncodeDecode();
+
+ rtc::scoped_ptr<AudioCodingModule> _acmLeft;
+ rtc::scoped_ptr<AudioCodingModule> _acmRight;
+ rtc::scoped_ptr<AudioCodingModule> _acmReceiver;
+ Channel* _channel;
+ PCMFile _inFile;
+ PCMFile _outFile;
+ int _testMode;
+};
+
+} // namespace webrtc
+
+#endif // WEBRTC_MODULES_AUDIO_CODING_TEST_SPATIALAUDIO_H_
diff --git a/webrtc/modules/audio_coding/test/TestAllCodecs.cc b/webrtc/modules/audio_coding/test/TestAllCodecs.cc
new file mode 100644
index 0000000000..bacfd37188
--- /dev/null
+++ b/webrtc/modules/audio_coding/test/TestAllCodecs.cc
@@ -0,0 +1,489 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "webrtc/modules/audio_coding/test/TestAllCodecs.h"
+
+#include <cstdio>
+#include <limits>
+#include <string>
+
+#include "testing/gtest/include/gtest/gtest.h"
+
+#include "webrtc/common_types.h"
+#include "webrtc/engine_configurations.h"
+#include "webrtc/modules/audio_coding/include/audio_coding_module.h"
+#include "webrtc/modules/audio_coding/include/audio_coding_module_typedefs.h"
+#include "webrtc/modules/audio_coding/test/utility.h"
+#include "webrtc/system_wrappers/include/trace.h"
+#include "webrtc/test/testsupport/fileutils.h"
+#include "webrtc/typedefs.h"
+
+// Description of the test:
+// In this test we set up a one-way communication channel from a participant
+// called "a" to a participant called "b".
+// a -> channel_a_to_b -> b
+//
+// The test loops through all available mono codecs, encode at "a" sends over
+// the channel, and decodes at "b".
+
+namespace {
+const size_t kVariableSize = std::numeric_limits<size_t>::max();
+}
+
+namespace webrtc {
+
+// Class for simulating packet handling.
+TestPack::TestPack()
+ : receiver_acm_(NULL),
+ sequence_number_(0),
+ timestamp_diff_(0),
+ last_in_timestamp_(0),
+ total_bytes_(0),
+ payload_size_(0) {
+}
+
+TestPack::~TestPack() {
+}
+
+void TestPack::RegisterReceiverACM(AudioCodingModule* acm) {
+ receiver_acm_ = acm;
+ return;
+}
+
+int32_t TestPack::SendData(FrameType frame_type, uint8_t payload_type,
+ uint32_t timestamp, const uint8_t* payload_data,
+ size_t payload_size,
+ const RTPFragmentationHeader* fragmentation) {
+ WebRtcRTPHeader rtp_info;
+ int32_t status;
+
+ rtp_info.header.markerBit = false;
+ rtp_info.header.ssrc = 0;
+ rtp_info.header.sequenceNumber = sequence_number_++;
+ rtp_info.header.payloadType = payload_type;
+ rtp_info.header.timestamp = timestamp;
+ if (frame_type == kAudioFrameCN) {
+ rtp_info.type.Audio.isCNG = true;
+ } else {
+ rtp_info.type.Audio.isCNG = false;
+ }
+ if (frame_type == kEmptyFrame) {
+ // Skip this frame.
+ return 0;
+ }
+
+ // Only run mono for all test cases.
+ rtp_info.type.Audio.channel = 1;
+ memcpy(payload_data_, payload_data, payload_size);
+
+ status = receiver_acm_->IncomingPacket(payload_data_, payload_size, rtp_info);
+
+ payload_size_ = payload_size;
+ timestamp_diff_ = timestamp - last_in_timestamp_;
+ last_in_timestamp_ = timestamp;
+ total_bytes_ += payload_size;
+ return status;
+}
+
+size_t TestPack::payload_size() {
+ return payload_size_;
+}
+
+uint32_t TestPack::timestamp_diff() {
+ return timestamp_diff_;
+}
+
+void TestPack::reset_payload_size() {
+ payload_size_ = 0;
+}
+
+TestAllCodecs::TestAllCodecs(int test_mode)
+ : acm_a_(AudioCodingModule::Create(0)),
+ acm_b_(AudioCodingModule::Create(1)),
+ channel_a_to_b_(NULL),
+ test_count_(0),
+ packet_size_samples_(0),
+ packet_size_bytes_(0) {
+ // test_mode = 0 for silent test (auto test)
+ test_mode_ = test_mode;
+}
+
+TestAllCodecs::~TestAllCodecs() {
+ if (channel_a_to_b_ != NULL) {
+ delete channel_a_to_b_;
+ channel_a_to_b_ = NULL;
+ }
+}
+
+void TestAllCodecs::Perform() {
+ const std::string file_name = webrtc::test::ResourcePath(
+ "audio_coding/testfile32kHz", "pcm");
+ infile_a_.Open(file_name, 32000, "rb");
+
+ if (test_mode_ == 0) {
+ WEBRTC_TRACE(kTraceStateInfo, kTraceAudioCoding, -1,
+ "---------- TestAllCodecs ----------");
+ }
+
+ acm_a_->InitializeReceiver();
+ acm_b_->InitializeReceiver();
+
+ uint8_t num_encoders = acm_a_->NumberOfCodecs();
+ CodecInst my_codec_param;
+ for (uint8_t n = 0; n < num_encoders; n++) {
+ acm_b_->Codec(n, &my_codec_param);
+ if (!strcmp(my_codec_param.plname, "opus")) {
+ my_codec_param.channels = 1;
+ }
+ acm_b_->RegisterReceiveCodec(my_codec_param);
+ }
+
+ // Create and connect the channel
+ channel_a_to_b_ = new TestPack;
+ acm_a_->RegisterTransportCallback(channel_a_to_b_);
+ channel_a_to_b_->RegisterReceiverACM(acm_b_.get());
+
+ // All codecs are tested for all allowed sampling frequencies, rates and
+ // packet sizes.
+#ifdef WEBRTC_CODEC_G722
+ if (test_mode_ != 0) {
+ printf("===============================================================\n");
+ }
+ test_count_++;
+ OpenOutFile(test_count_);
+ char codec_g722[] = "G722";
+ RegisterSendCodec('A', codec_g722, 16000, 64000, 160, 0);
+ Run(channel_a_to_b_);
+ RegisterSendCodec('A', codec_g722, 16000, 64000, 320, 0);
+ Run(channel_a_to_b_);
+ RegisterSendCodec('A', codec_g722, 16000, 64000, 480, 0);
+ Run(channel_a_to_b_);
+ RegisterSendCodec('A', codec_g722, 16000, 64000, 640, 0);
+ Run(channel_a_to_b_);
+ RegisterSendCodec('A', codec_g722, 16000, 64000, 800, 0);
+ Run(channel_a_to_b_);
+ RegisterSendCodec('A', codec_g722, 16000, 64000, 960, 0);
+ Run(channel_a_to_b_);
+ outfile_b_.Close();
+#endif
+#ifdef WEBRTC_CODEC_ILBC
+ if (test_mode_ != 0) {
+ printf("===============================================================\n");
+ }
+ test_count_++;
+ OpenOutFile(test_count_);
+ char codec_ilbc[] = "ILBC";
+ RegisterSendCodec('A', codec_ilbc, 8000, 13300, 240, 0);
+ Run(channel_a_to_b_);
+ RegisterSendCodec('A', codec_ilbc, 8000, 13300, 480, 0);
+ Run(channel_a_to_b_);
+ RegisterSendCodec('A', codec_ilbc, 8000, 15200, 160, 0);
+ Run(channel_a_to_b_);
+ RegisterSendCodec('A', codec_ilbc, 8000, 15200, 320, 0);
+ Run(channel_a_to_b_);
+ outfile_b_.Close();
+#endif
+#if (defined(WEBRTC_CODEC_ISAC) || defined(WEBRTC_CODEC_ISACFX))
+ if (test_mode_ != 0) {
+ printf("===============================================================\n");
+ }
+ test_count_++;
+ OpenOutFile(test_count_);
+ char codec_isac[] = "ISAC";
+ RegisterSendCodec('A', codec_isac, 16000, -1, 480, kVariableSize);
+ Run(channel_a_to_b_);
+ RegisterSendCodec('A', codec_isac, 16000, -1, 960, kVariableSize);
+ Run(channel_a_to_b_);
+ RegisterSendCodec('A', codec_isac, 16000, 15000, 480, kVariableSize);
+ Run(channel_a_to_b_);
+ RegisterSendCodec('A', codec_isac, 16000, 32000, 960, kVariableSize);
+ Run(channel_a_to_b_);
+ outfile_b_.Close();
+#endif
+#ifdef WEBRTC_CODEC_ISAC
+ if (test_mode_ != 0) {
+ printf("===============================================================\n");
+ }
+ test_count_++;
+ OpenOutFile(test_count_);
+ RegisterSendCodec('A', codec_isac, 32000, -1, 960, kVariableSize);
+ Run(channel_a_to_b_);
+ RegisterSendCodec('A', codec_isac, 32000, 56000, 960, kVariableSize);
+ Run(channel_a_to_b_);
+ RegisterSendCodec('A', codec_isac, 32000, 37000, 960, kVariableSize);
+ Run(channel_a_to_b_);
+ RegisterSendCodec('A', codec_isac, 32000, 32000, 960, kVariableSize);
+ Run(channel_a_to_b_);
+ outfile_b_.Close();
+#endif
+ if (test_mode_ != 0) {
+ printf("===============================================================\n");
+ }
+ test_count_++;
+ OpenOutFile(test_count_);
+ char codec_l16[] = "L16";
+ RegisterSendCodec('A', codec_l16, 8000, 128000, 80, 0);
+ Run(channel_a_to_b_);
+ RegisterSendCodec('A', codec_l16, 8000, 128000, 160, 0);
+ Run(channel_a_to_b_);
+ RegisterSendCodec('A', codec_l16, 8000, 128000, 240, 0);
+ Run(channel_a_to_b_);
+ RegisterSendCodec('A', codec_l16, 8000, 128000, 320, 0);
+ Run(channel_a_to_b_);
+ outfile_b_.Close();
+ if (test_mode_ != 0) {
+ printf("===============================================================\n");
+ }
+ test_count_++;
+ OpenOutFile(test_count_);
+ RegisterSendCodec('A', codec_l16, 16000, 256000, 160, 0);
+ Run(channel_a_to_b_);
+ RegisterSendCodec('A', codec_l16, 16000, 256000, 320, 0);
+ Run(channel_a_to_b_);
+ RegisterSendCodec('A', codec_l16, 16000, 256000, 480, 0);
+ Run(channel_a_to_b_);
+ RegisterSendCodec('A', codec_l16, 16000, 256000, 640, 0);
+ Run(channel_a_to_b_);
+ outfile_b_.Close();
+ if (test_mode_ != 0) {
+ printf("===============================================================\n");
+ }
+ test_count_++;
+ OpenOutFile(test_count_);
+ RegisterSendCodec('A', codec_l16, 32000, 512000, 320, 0);
+ Run(channel_a_to_b_);
+ RegisterSendCodec('A', codec_l16, 32000, 512000, 640, 0);
+ Run(channel_a_to_b_);
+ outfile_b_.Close();
+ if (test_mode_ != 0) {
+ printf("===============================================================\n");
+ }
+ test_count_++;
+ OpenOutFile(test_count_);
+ char codec_pcma[] = "PCMA";
+ RegisterSendCodec('A', codec_pcma, 8000, 64000, 80, 0);
+ Run(channel_a_to_b_);
+ RegisterSendCodec('A', codec_pcma, 8000, 64000, 160, 0);
+ Run(channel_a_to_b_);
+ RegisterSendCodec('A', codec_pcma, 8000, 64000, 240, 0);
+ Run(channel_a_to_b_);
+ RegisterSendCodec('A', codec_pcma, 8000, 64000, 320, 0);
+ Run(channel_a_to_b_);
+ RegisterSendCodec('A', codec_pcma, 8000, 64000, 400, 0);
+ Run(channel_a_to_b_);
+ RegisterSendCodec('A', codec_pcma, 8000, 64000, 480, 0);
+ Run(channel_a_to_b_);
+ if (test_mode_ != 0) {
+ printf("===============================================================\n");
+ }
+ char codec_pcmu[] = "PCMU";
+ RegisterSendCodec('A', codec_pcmu, 8000, 64000, 80, 0);
+ Run(channel_a_to_b_);
+ RegisterSendCodec('A', codec_pcmu, 8000, 64000, 160, 0);
+ Run(channel_a_to_b_);
+ RegisterSendCodec('A', codec_pcmu, 8000, 64000, 240, 0);
+ Run(channel_a_to_b_);
+ RegisterSendCodec('A', codec_pcmu, 8000, 64000, 320, 0);
+ Run(channel_a_to_b_);
+ RegisterSendCodec('A', codec_pcmu, 8000, 64000, 400, 0);
+ Run(channel_a_to_b_);
+ RegisterSendCodec('A', codec_pcmu, 8000, 64000, 480, 0);
+ Run(channel_a_to_b_);
+ outfile_b_.Close();
+#ifdef WEBRTC_CODEC_OPUS
+ if (test_mode_ != 0) {
+ printf("===============================================================\n");
+ }
+ test_count_++;
+ OpenOutFile(test_count_);
+ char codec_opus[] = "OPUS";
+ RegisterSendCodec('A', codec_opus, 48000, 6000, 480, kVariableSize);
+ Run(channel_a_to_b_);
+ RegisterSendCodec('A', codec_opus, 48000, 20000, 480*2, kVariableSize);
+ Run(channel_a_to_b_);
+ RegisterSendCodec('A', codec_opus, 48000, 32000, 480*4, kVariableSize);
+ Run(channel_a_to_b_);
+ RegisterSendCodec('A', codec_opus, 48000, 48000, 480, kVariableSize);
+ Run(channel_a_to_b_);
+ RegisterSendCodec('A', codec_opus, 48000, 64000, 480*4, kVariableSize);
+ Run(channel_a_to_b_);
+ RegisterSendCodec('A', codec_opus, 48000, 96000, 480*6, kVariableSize);
+ Run(channel_a_to_b_);
+ RegisterSendCodec('A', codec_opus, 48000, 500000, 480*2, kVariableSize);
+ Run(channel_a_to_b_);
+ outfile_b_.Close();
+#endif
+ if (test_mode_ != 0) {
+ printf("===============================================================\n");
+
+ /* Print out all codecs that were not tested in the run */
+ printf("The following codecs was not included in the test:\n");
+#ifndef WEBRTC_CODEC_G722
+ printf(" G.722\n");
+#endif
+#ifndef WEBRTC_CODEC_ILBC
+ printf(" iLBC\n");
+#endif
+#ifndef WEBRTC_CODEC_ISAC
+ printf(" ISAC float\n");
+#endif
+#ifndef WEBRTC_CODEC_ISACFX
+ printf(" ISAC fix\n");
+#endif
+
+ printf("\nTo complete the test, listen to the %d number of output files.\n",
+ test_count_);
+ }
+}
+
+// Register Codec to use in the test
+//
+// Input: side - which ACM to use, 'A' or 'B'
+// codec_name - name to use when register the codec
+// sampling_freq_hz - sampling frequency in Herz
+// rate - bitrate in bytes
+// packet_size - packet size in samples
+// extra_byte - if extra bytes needed compared to the bitrate
+// used when registering, can be an internal header
+// set to kVariableSize if the codec is a variable
+// rate codec
+void TestAllCodecs::RegisterSendCodec(char side, char* codec_name,
+ int32_t sampling_freq_hz, int rate,
+ int packet_size, size_t extra_byte) {
+ if (test_mode_ != 0) {
+ // Print out codec and settings.
+ printf("codec: %s Freq: %d Rate: %d PackSize: %d\n", codec_name,
+ sampling_freq_hz, rate, packet_size);
+ }
+
+ // Store packet-size in samples, used to validate the received packet.
+ // If G.722, store half the size to compensate for the timestamp bug in the
+ // RFC for G.722.
+ // If iSAC runs in adaptive mode, packet size in samples can change on the
+ // fly, so we exclude this test by setting |packet_size_samples_| to -1.
+ if (!strcmp(codec_name, "G722")) {
+ packet_size_samples_ = packet_size / 2;
+ } else if (!strcmp(codec_name, "ISAC") && (rate == -1)) {
+ packet_size_samples_ = -1;
+ } else {
+ packet_size_samples_ = packet_size;
+ }
+
+ // Store the expected packet size in bytes, used to validate the received
+ // packet. If variable rate codec (extra_byte == -1), set to -1.
+ if (extra_byte != kVariableSize) {
+ // Add 0.875 to always round up to a whole byte
+ packet_size_bytes_ = static_cast<size_t>(
+ static_cast<float>(packet_size * rate) /
+ static_cast<float>(sampling_freq_hz * 8) + 0.875) + extra_byte;
+ } else {
+ // Packets will have a variable size.
+ packet_size_bytes_ = kVariableSize;
+ }
+
+ // Set pointer to the ACM where to register the codec.
+ AudioCodingModule* my_acm = NULL;
+ switch (side) {
+ case 'A': {
+ my_acm = acm_a_.get();
+ break;
+ }
+ case 'B': {
+ my_acm = acm_b_.get();
+ break;
+ }
+ default: {
+ break;
+ }
+ }
+ ASSERT_TRUE(my_acm != NULL);
+
+ // Get all codec parameters before registering
+ CodecInst my_codec_param;
+ CHECK_ERROR(AudioCodingModule::Codec(codec_name, &my_codec_param,
+ sampling_freq_hz, 1));
+ my_codec_param.rate = rate;
+ my_codec_param.pacsize = packet_size;
+ CHECK_ERROR(my_acm->RegisterSendCodec(my_codec_param));
+}
+
+void TestAllCodecs::Run(TestPack* channel) {
+ AudioFrame audio_frame;
+
+ int32_t out_freq_hz = outfile_b_.SamplingFrequency();
+ size_t receive_size;
+ uint32_t timestamp_diff;
+ channel->reset_payload_size();
+ int error_count = 0;
+ int counter = 0;
+ // Set test length to 500 ms (50 blocks of 10 ms each).
+ infile_a_.SetNum10MsBlocksToRead(50);
+ // Fast-forward 1 second (100 blocks) since the file starts with silence.
+ infile_a_.FastForward(100);
+
+ while (!infile_a_.EndOfFile()) {
+ // Add 10 msec to ACM.
+ infile_a_.Read10MsData(audio_frame);
+ CHECK_ERROR(acm_a_->Add10MsData(audio_frame));
+
+ // Verify that the received packet size matches the settings.
+ receive_size = channel->payload_size();
+ if (receive_size) {
+ if ((receive_size != packet_size_bytes_) &&
+ (packet_size_bytes_ != kVariableSize)) {
+ error_count++;
+ }
+
+ // Verify that the timestamp is updated with expected length. The counter
+ // is used to avoid problems when switching codec or frame size in the
+ // test.
+ timestamp_diff = channel->timestamp_diff();
+ if ((counter > 10) &&
+ (static_cast<int>(timestamp_diff) != packet_size_samples_) &&
+ (packet_size_samples_ > -1))
+ error_count++;
+ }
+
+ // Run received side of ACM.
+ CHECK_ERROR(acm_b_->PlayoutData10Ms(out_freq_hz, &audio_frame));
+
+ // Write output speech to file.
+ outfile_b_.Write10MsData(audio_frame.data_,
+ audio_frame.samples_per_channel_);
+
+ // Update loop counter
+ counter++;
+ }
+
+ EXPECT_EQ(0, error_count);
+
+ if (infile_a_.EndOfFile()) {
+ infile_a_.Rewind();
+ }
+}
+
+void TestAllCodecs::OpenOutFile(int test_number) {
+ std::string filename = webrtc::test::OutputPath();
+ std::ostringstream test_number_str;
+ test_number_str << test_number;
+ filename += "testallcodecs_out_";
+ filename += test_number_str.str();
+ filename += ".pcm";
+ outfile_b_.Open(filename, 32000, "wb");
+}
+
+void TestAllCodecs::DisplaySendReceiveCodec() {
+ CodecInst my_codec_param;
+ printf("%s -> ", acm_a_->SendCodec()->plname);
+ acm_b_->ReceiveCodec(&my_codec_param);
+ printf("%s\n", my_codec_param.plname);
+}
+
+} // namespace webrtc
diff --git a/webrtc/modules/audio_coding/test/TestAllCodecs.h b/webrtc/modules/audio_coding/test/TestAllCodecs.h
new file mode 100644
index 0000000000..e79bd69faa
--- /dev/null
+++ b/webrtc/modules/audio_coding/test/TestAllCodecs.h
@@ -0,0 +1,84 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef WEBRTC_MODULES_AUDIO_CODING_TEST_TESTALLCODECS_H_
+#define WEBRTC_MODULES_AUDIO_CODING_TEST_TESTALLCODECS_H_
+
+#include "webrtc/base/scoped_ptr.h"
+#include "webrtc/modules/audio_coding/test/ACMTest.h"
+#include "webrtc/modules/audio_coding/test/Channel.h"
+#include "webrtc/modules/audio_coding/test/PCMFile.h"
+#include "webrtc/typedefs.h"
+
+namespace webrtc {
+
+class Config;
+
+class TestPack : public AudioPacketizationCallback {
+ public:
+ TestPack();
+ ~TestPack();
+
+ void RegisterReceiverACM(AudioCodingModule* acm);
+
+ int32_t SendData(FrameType frame_type,
+ uint8_t payload_type,
+ uint32_t timestamp,
+ const uint8_t* payload_data,
+ size_t payload_size,
+ const RTPFragmentationHeader* fragmentation) override;
+
+ size_t payload_size();
+ uint32_t timestamp_diff();
+ void reset_payload_size();
+
+ private:
+ AudioCodingModule* receiver_acm_;
+ uint16_t sequence_number_;
+ uint8_t payload_data_[60 * 32 * 2 * 2];
+ uint32_t timestamp_diff_;
+ uint32_t last_in_timestamp_;
+ uint64_t total_bytes_;
+ size_t payload_size_;
+};
+
+class TestAllCodecs : public ACMTest {
+ public:
+ explicit TestAllCodecs(int test_mode);
+ ~TestAllCodecs();
+
+ void Perform() override;
+
+ private:
+ // The default value of '-1' indicates that the registration is based only on
+ // codec name, and a sampling frequency matching is not required.
+ // This is useful for codecs which support several sampling frequency.
+ // Note! Only mono mode is tested in this test.
+ void RegisterSendCodec(char side, char* codec_name, int32_t sampling_freq_hz,
+ int rate, int packet_size, size_t extra_byte);
+
+ void Run(TestPack* channel);
+ void OpenOutFile(int test_number);
+ void DisplaySendReceiveCodec();
+
+ int test_mode_;
+ rtc::scoped_ptr<AudioCodingModule> acm_a_;
+ rtc::scoped_ptr<AudioCodingModule> acm_b_;
+ TestPack* channel_a_to_b_;
+ PCMFile infile_a_;
+ PCMFile outfile_b_;
+ int test_count_;
+ int packet_size_samples_;
+ size_t packet_size_bytes_;
+};
+
+} // namespace webrtc
+
+#endif // WEBRTC_MODULES_AUDIO_CODING_TEST_TESTALLCODECS_H_
diff --git a/webrtc/modules/audio_coding/test/TestRedFec.cc b/webrtc/modules/audio_coding/test/TestRedFec.cc
new file mode 100644
index 0000000000..a1bdc04e53
--- /dev/null
+++ b/webrtc/modules/audio_coding/test/TestRedFec.cc
@@ -0,0 +1,480 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "webrtc/modules/audio_coding/test/TestRedFec.h"
+
+#include <assert.h>
+
+#include "webrtc/common.h"
+#include "webrtc/common_types.h"
+#include "webrtc/engine_configurations.h"
+#include "webrtc/modules/audio_coding/include/audio_coding_module_typedefs.h"
+#include "webrtc/modules/audio_coding/test/utility.h"
+#include "webrtc/system_wrappers/include/trace.h"
+#include "webrtc/test/testsupport/fileutils.h"
+
+#ifdef SUPPORT_RED_WB
+#undef SUPPORT_RED_WB
+#endif
+
+#ifdef SUPPORT_RED_SWB
+#undef SUPPORT_RED_SWB
+#endif
+
+#ifdef SUPPORT_RED_FB
+#undef SUPPORT_RED_FB
+#endif
+
+namespace webrtc {
+
+namespace {
+ const char kNameL16[] = "L16";
+ const char kNamePCMU[] = "PCMU";
+ const char kNameCN[] = "CN";
+ const char kNameRED[] = "RED";
+
+ // These three are only used by code #ifdeffed on WEBRTC_CODEC_G722.
+#ifdef WEBRTC_CODEC_G722
+ const char kNameISAC[] = "ISAC";
+ const char kNameG722[] = "G722";
+ const char kNameOPUS[] = "opus";
+#endif
+}
+
+TestRedFec::TestRedFec()
+ : _acmA(AudioCodingModule::Create(0)),
+ _acmB(AudioCodingModule::Create(1)),
+ _channelA2B(NULL),
+ _testCntr(0) {
+}
+
+TestRedFec::~TestRedFec() {
+ if (_channelA2B != NULL) {
+ delete _channelA2B;
+ _channelA2B = NULL;
+ }
+}
+
+void TestRedFec::Perform() {
+ const std::string file_name = webrtc::test::ResourcePath(
+ "audio_coding/testfile32kHz", "pcm");
+ _inFileA.Open(file_name, 32000, "rb");
+
+ ASSERT_EQ(0, _acmA->InitializeReceiver());
+ ASSERT_EQ(0, _acmB->InitializeReceiver());
+
+ uint8_t numEncoders = _acmA->NumberOfCodecs();
+ CodecInst myCodecParam;
+ for (uint8_t n = 0; n < numEncoders; n++) {
+ EXPECT_EQ(0, _acmB->Codec(n, &myCodecParam));
+ // Default number of channels is 2 for opus, so we change to 1 in this test.
+ if (!strcmp(myCodecParam.plname, "opus")) {
+ myCodecParam.channels = 1;
+ }
+ EXPECT_EQ(0, _acmB->RegisterReceiveCodec(myCodecParam));
+ }
+
+ // Create and connect the channel
+ _channelA2B = new Channel;
+ _acmA->RegisterTransportCallback(_channelA2B);
+ _channelA2B->RegisterReceiverACM(_acmB.get());
+
+ EXPECT_EQ(0, RegisterSendCodec('A', kNameL16, 8000));
+ EXPECT_EQ(0, RegisterSendCodec('A', kNameCN, 8000));
+ EXPECT_EQ(0, RegisterSendCodec('A', kNameRED));
+ EXPECT_EQ(0, SetVAD(true, true, VADAggr));
+ EXPECT_EQ(0, _acmA->SetREDStatus(true));
+ EXPECT_TRUE(_acmA->REDStatus());
+
+ OpenOutFile(_testCntr);
+ Run();
+ _outFileB.Close();
+
+ RegisterSendCodec('A', kNamePCMU, 8000);
+ // Switch to another 8 kHz codec, RED should remain switched on.
+ EXPECT_TRUE(_acmA->REDStatus());
+ OpenOutFile(_testCntr);
+ Run();
+ _outFileB.Close();
+
+#ifndef WEBRTC_CODEC_G722
+ EXPECT_TRUE(false);
+ printf("G722 needs to be activated to run this test\n");
+ return;
+#else
+ EXPECT_EQ(0, RegisterSendCodec('A', kNameG722, 16000));
+ EXPECT_EQ(0, RegisterSendCodec('A', kNameCN, 16000));
+
+#ifdef SUPPORT_RED_WB
+ // Switch codec, RED should remain.
+ EXPECT_TRUE(_acmA->REDStatus());
+#else
+ // Switch to a 16 kHz codec, RED should have been switched off.
+ EXPECT_FALSE(_acmA->REDStatus());
+#endif
+
+ OpenOutFile(_testCntr);
+ EXPECT_EQ(0, SetVAD(true, true, VADAggr));
+ EXPECT_EQ(0, _acmA->SetREDStatus(false));
+ EXPECT_FALSE(_acmA->REDStatus());
+ Run();
+#ifdef SUPPORT_RED_WB
+ EXPECT_EQ(0, _acmA->SetREDStatus(true));
+ EXPECT_TRUE(_acmA->REDStatus());
+#else
+ EXPECT_EQ(-1, _acmA->SetREDStatus(true));
+ EXPECT_FALSE(_acmA->REDStatus());
+#endif
+ Run();
+ _outFileB.Close();
+
+ RegisterSendCodec('A', kNameISAC, 16000);
+
+#ifdef SUPPORT_RED_WB
+ // Switch codec, RED should remain.
+ EXPECT_TRUE(_acmA->REDStatus());
+#else
+ EXPECT_FALSE(_acmA->REDStatus());
+#endif
+
+ OpenOutFile(_testCntr);
+ EXPECT_EQ(0, SetVAD(true, true, VADVeryAggr));
+ EXPECT_EQ(0, _acmA->SetREDStatus(false));
+ EXPECT_FALSE(_acmA->REDStatus());
+ Run();
+ _outFileB.Close();
+
+#ifdef SUPPORT_RED_WB
+ EXPECT_EQ(0, _acmA->SetREDStatus(true));
+ EXPECT_TRUE(_acmA->REDStatus());
+#else
+ EXPECT_EQ(-1, _acmA->SetREDStatus(true));
+ EXPECT_FALSE(_acmA->REDStatus());
+#endif
+ OpenOutFile(_testCntr);
+ Run();
+ _outFileB.Close();
+
+ RegisterSendCodec('A', kNameISAC, 32000);
+
+#if defined(SUPPORT_RED_SWB) && defined(SUPPORT_RED_WB)
+ // Switch codec, RED should remain.
+ EXPECT_TRUE(_acmA->REDStatus());
+#else
+ // Switch to a 32 kHz codec, RED should have been switched off.
+ EXPECT_FALSE(_acmA->REDStatus());
+#endif
+
+ OpenOutFile(_testCntr);
+ EXPECT_EQ(0, SetVAD(true, true, VADVeryAggr));
+ EXPECT_EQ(0, _acmA->SetREDStatus(false));
+ EXPECT_FALSE(_acmA->REDStatus());
+ Run();
+ _outFileB.Close();
+
+#ifdef SUPPORT_RED_SWB
+ EXPECT_EQ(0, _acmA->SetREDStatus(true));
+ EXPECT_TRUE(_acmA->REDStatus());
+#else
+ EXPECT_EQ(-1, _acmA->SetREDStatus(true));
+ EXPECT_FALSE(_acmA->REDStatus());
+#endif
+ OpenOutFile(_testCntr);
+ Run();
+ _outFileB.Close();
+
+ RegisterSendCodec('A', kNameISAC, 32000);
+ EXPECT_EQ(0, SetVAD(false, false, VADNormal));
+
+#if defined(SUPPORT_RED_SWB) && defined(SUPPORT_RED_WB)
+ OpenOutFile(_testCntr);
+ EXPECT_EQ(0, _acmA->SetREDStatus(true));
+ EXPECT_TRUE(_acmA->REDStatus());
+ Run();
+
+ RegisterSendCodec('A', kNameISAC, 16000);
+ EXPECT_TRUE(_acmA->REDStatus());
+ Run();
+
+ RegisterSendCodec('A', kNameISAC, 32000);
+ EXPECT_TRUE(_acmA->REDStatus());
+ Run();
+
+ RegisterSendCodec('A', kNameISAC, 16000);
+ EXPECT_TRUE(_acmA->REDStatus());
+ Run();
+ _outFileB.Close();
+#else
+ EXPECT_EQ(-1, _acmA->SetREDStatus(true));
+ EXPECT_FALSE(_acmA->REDStatus());
+#endif
+
+ _channelA2B->SetFECTestWithPacketLoss(true);
+ // Following tests are under packet losses.
+
+ EXPECT_EQ(0, RegisterSendCodec('A', kNameG722));
+ EXPECT_EQ(0, RegisterSendCodec('A', kNameCN, 16000));
+
+#if defined(SUPPORT_RED_WB) && defined(SUPPORT_RED_SWB)
+ // Switch codec, RED should remain.
+ EXPECT_TRUE(_acmA->REDStatus());
+#else
+ // Switch to a 16 kHz codec, RED should have been switched off.
+ EXPECT_FALSE(_acmA->REDStatus());
+#endif
+
+ OpenOutFile(_testCntr);
+ EXPECT_EQ(0, SetVAD(true, true, VADAggr));
+ EXPECT_EQ(0, _acmA->SetREDStatus(false));
+ EXPECT_FALSE(_acmA->REDStatus());
+ Run();
+ _outFileB.Close();
+
+#ifdef SUPPORT_RED_WB
+ EXPECT_EQ(0, _acmA->SetREDStatus(true));
+ EXPECT_TRUE(_acmA->REDStatus());
+#else
+ EXPECT_EQ(-1, _acmA->SetREDStatus(true));
+ EXPECT_FALSE(_acmA->REDStatus());
+#endif
+ OpenOutFile(_testCntr);
+ Run();
+ _outFileB.Close();
+
+ RegisterSendCodec('A', kNameISAC, 16000);
+
+#ifdef SUPPORT_RED_WB
+ // Switch codec, RED should remain.
+ EXPECT_TRUE(_acmA->REDStatus());
+#else
+ // Switch to a 16 kHz codec, RED should have been switched off.
+ EXPECT_FALSE(_acmA->REDStatus());
+#endif
+
+ OpenOutFile(_testCntr);
+ EXPECT_EQ(0, SetVAD(true, true, VADVeryAggr));
+ EXPECT_EQ(0, _acmA->SetREDStatus(false));
+ EXPECT_FALSE(_acmA->REDStatus());
+ Run();
+ _outFileB.Close();
+#ifdef SUPPORT_RED_WB
+ EXPECT_EQ(0, _acmA->SetREDStatus(true));
+ EXPECT_TRUE(_acmA->REDStatus());
+#else
+ EXPECT_EQ(-1, _acmA->SetREDStatus(true));
+ EXPECT_FALSE(_acmA->REDStatus());
+#endif
+ OpenOutFile(_testCntr);
+ Run();
+ _outFileB.Close();
+
+ RegisterSendCodec('A', kNameISAC, 32000);
+
+#if defined(SUPPORT_RED_SWB) && defined(SUPPORT_RED_WB)
+ // Switch codec, RED should remain.
+ EXPECT_TRUE(_acmA->REDStatus());
+#else
+ // Switch to a 32 kHz codec, RED should have been switched off.
+ EXPECT_FALSE(_acmA->REDStatus());
+#endif
+
+ OpenOutFile(_testCntr);
+ EXPECT_EQ(0, SetVAD(true, true, VADVeryAggr));
+ EXPECT_EQ(0, _acmA->SetREDStatus(false));
+ EXPECT_FALSE(_acmA->REDStatus());
+#ifdef SUPPORT_RED_SWB
+ EXPECT_EQ(0, _acmA->SetREDStatus(true));
+ EXPECT_TRUE(_acmA->REDStatus());
+#else
+ EXPECT_EQ(-1, _acmA->SetREDStatus(true));
+ EXPECT_FALSE(_acmA->REDStatus());
+#endif
+ OpenOutFile(_testCntr);
+ Run();
+ _outFileB.Close();
+
+ RegisterSendCodec('A', kNameISAC, 32000);
+ EXPECT_EQ(0, SetVAD(false, false, VADNormal));
+#if defined(SUPPORT_RED_SWB) && defined(SUPPORT_RED_WB)
+ OpenOutFile(_testCntr);
+ EXPECT_EQ(0, _acmA->SetREDStatus(true));
+ EXPECT_TRUE(_acmA->REDStatus());
+ Run();
+
+ RegisterSendCodec('A', kNameISAC, 16000);
+ EXPECT_TRUE(_acmA->REDStatus());
+ Run();
+
+ RegisterSendCodec('A', kNameISAC, 32000);
+ EXPECT_TRUE(_acmA->REDStatus());
+ Run();
+
+ RegisterSendCodec('A', kNameISAC, 16000);
+ EXPECT_TRUE(_acmA->REDStatus());
+ Run();
+ _outFileB.Close();
+#else
+ EXPECT_EQ(-1, _acmA->SetREDStatus(true));
+ EXPECT_FALSE(_acmA->REDStatus());
+#endif
+
+#ifndef WEBRTC_CODEC_OPUS
+ EXPECT_TRUE(false);
+ printf("Opus needs to be activated to run this test\n");
+ return;
+#endif
+
+ RegisterSendCodec('A', kNameOPUS, 48000);
+
+#if defined(SUPPORT_RED_FB) && defined(SUPPORT_RED_SWB) &&\
+ defined(SUPPORT_RED_WB)
+ // Switch to codec, RED should remain switched on.
+ EXPECT_TRUE(_acmA->REDStatus());
+#else
+ EXPECT_FALSE(_acmA->REDStatus());
+#endif
+
+ // _channelA2B imposes 25% packet loss rate.
+ EXPECT_EQ(0, _acmA->SetPacketLossRate(25));
+
+#ifdef SUPPORT_RED_FB
+ EXPECT_EQ(0, _acmA->SetREDStatus(true));
+ EXPECT_TRUE(_acmA->REDStatus());
+ // Codec FEC and RED are mutually exclusive.
+ EXPECT_EQ(-1, _acmA->SetCodecFEC(true));
+
+ EXPECT_EQ(0, _acmA->SetREDStatus(false));
+ EXPECT_EQ(0, _acmA->SetCodecFEC(true));
+
+ // Codec FEC and RED are mutually exclusive.
+ EXPECT_EQ(-1, _acmA->SetREDStatus(true));
+#else
+ EXPECT_EQ(-1, _acmA->SetREDStatus(true));
+ EXPECT_FALSE(_acmA->REDStatus());
+ EXPECT_EQ(0, _acmA->SetCodecFEC(true));
+#endif
+
+ EXPECT_TRUE(_acmA->CodecFEC());
+ OpenOutFile(_testCntr);
+ Run();
+
+ // Switch to L16 with RED.
+ RegisterSendCodec('A', kNameL16, 8000);
+ EXPECT_EQ(0, SetVAD(false, false, VADNormal));
+
+ // L16 does not support FEC, so FEC should be turned off automatically.
+ EXPECT_FALSE(_acmA->CodecFEC());
+
+ EXPECT_EQ(0, _acmA->SetREDStatus(true));
+ EXPECT_TRUE(_acmA->REDStatus());
+ Run();
+
+ // Switch to Opus again.
+ RegisterSendCodec('A', kNameOPUS, 48000);
+#ifdef SUPPORT_RED_FB
+ // Switch to codec, RED should remain switched on.
+ EXPECT_TRUE(_acmA->REDStatus());
+#else
+ EXPECT_FALSE(_acmA->REDStatus());
+#endif
+ EXPECT_EQ(0, _acmA->SetREDStatus(false));
+ EXPECT_EQ(0, _acmA->SetCodecFEC(false));
+ Run();
+
+ EXPECT_EQ(0, _acmA->SetCodecFEC(true));
+ _outFileB.Close();
+
+ // Codecs does not support internal FEC, cannot enable FEC.
+ RegisterSendCodec('A', kNameG722, 16000);
+ EXPECT_FALSE(_acmA->REDStatus());
+ EXPECT_EQ(-1, _acmA->SetCodecFEC(true));
+ EXPECT_FALSE(_acmA->CodecFEC());
+
+ RegisterSendCodec('A', kNameISAC, 16000);
+ EXPECT_FALSE(_acmA->REDStatus());
+ EXPECT_EQ(-1, _acmA->SetCodecFEC(true));
+ EXPECT_FALSE(_acmA->CodecFEC());
+
+ // Codecs does not support internal FEC, disable FEC does not trigger failure.
+ RegisterSendCodec('A', kNameG722, 16000);
+ EXPECT_FALSE(_acmA->REDStatus());
+ EXPECT_EQ(0, _acmA->SetCodecFEC(false));
+ EXPECT_FALSE(_acmA->CodecFEC());
+
+ RegisterSendCodec('A', kNameISAC, 16000);
+ EXPECT_FALSE(_acmA->REDStatus());
+ EXPECT_EQ(0, _acmA->SetCodecFEC(false));
+ EXPECT_FALSE(_acmA->CodecFEC());
+
+#endif // defined(WEBRTC_CODEC_G722)
+}
+
+int32_t TestRedFec::SetVAD(bool enableDTX, bool enableVAD, ACMVADMode vadMode) {
+ return _acmA->SetVAD(enableDTX, enableVAD, vadMode);
+}
+
+int16_t TestRedFec::RegisterSendCodec(char side, const char* codecName,
+ int32_t samplingFreqHz) {
+ std::cout << std::flush;
+ AudioCodingModule* myACM;
+ switch (side) {
+ case 'A': {
+ myACM = _acmA.get();
+ break;
+ }
+ case 'B': {
+ myACM = _acmB.get();
+ break;
+ }
+ default:
+ return -1;
+ }
+
+ if (myACM == NULL) {
+ assert(false);
+ return -1;
+ }
+ CodecInst myCodecParam;
+ EXPECT_GT(AudioCodingModule::Codec(codecName, &myCodecParam,
+ samplingFreqHz, 1), -1);
+ EXPECT_GT(myACM->RegisterSendCodec(myCodecParam), -1);
+
+ // Initialization was successful.
+ return 0;
+}
+
+void TestRedFec::Run() {
+ AudioFrame audioFrame;
+ int32_t outFreqHzB = _outFileB.SamplingFrequency();
+ // Set test length to 500 ms (50 blocks of 10 ms each).
+ _inFileA.SetNum10MsBlocksToRead(50);
+ // Fast-forward 1 second (100 blocks) since the file starts with silence.
+ _inFileA.FastForward(100);
+
+ while (!_inFileA.EndOfFile()) {
+ EXPECT_GT(_inFileA.Read10MsData(audioFrame), 0);
+ EXPECT_GE(_acmA->Add10MsData(audioFrame), 0);
+ EXPECT_EQ(0, _acmB->PlayoutData10Ms(outFreqHzB, &audioFrame));
+ _outFileB.Write10MsData(audioFrame.data_, audioFrame.samples_per_channel_);
+ }
+ _inFileA.Rewind();
+}
+
+void TestRedFec::OpenOutFile(int16_t test_number) {
+ std::string file_name;
+ std::stringstream file_stream;
+ file_stream << webrtc::test::OutputPath();
+ file_stream << "TestRedFec_outFile_";
+ file_stream << test_number << ".pcm";
+ file_name = file_stream.str();
+ _outFileB.Open(file_name, 16000, "wb");
+}
+
+} // namespace webrtc
diff --git a/webrtc/modules/audio_coding/test/TestRedFec.h b/webrtc/modules/audio_coding/test/TestRedFec.h
new file mode 100644
index 0000000000..6343d8e374
--- /dev/null
+++ b/webrtc/modules/audio_coding/test/TestRedFec.h
@@ -0,0 +1,51 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef WEBRTC_MODULES_AUDIO_CODING_TEST_TESTREDFEC_H_
+#define WEBRTC_MODULES_AUDIO_CODING_TEST_TESTREDFEC_H_
+
+#include <string>
+#include "webrtc/base/scoped_ptr.h"
+#include "webrtc/modules/audio_coding/test/ACMTest.h"
+#include "webrtc/modules/audio_coding/test/Channel.h"
+#include "webrtc/modules/audio_coding/test/PCMFile.h"
+
+namespace webrtc {
+
+class Config;
+
+class TestRedFec : public ACMTest {
+ public:
+ explicit TestRedFec();
+ ~TestRedFec();
+
+ void Perform();
+ private:
+ // The default value of '-1' indicates that the registration is based only on
+ // codec name and a sampling frequency matching is not required. This is
+ // useful for codecs which support several sampling frequency.
+ int16_t RegisterSendCodec(char side, const char* codecName,
+ int32_t sampFreqHz = -1);
+ void Run();
+ void OpenOutFile(int16_t testNumber);
+ int32_t SetVAD(bool enableDTX, bool enableVAD, ACMVADMode vadMode);
+ rtc::scoped_ptr<AudioCodingModule> _acmA;
+ rtc::scoped_ptr<AudioCodingModule> _acmB;
+
+ Channel* _channelA2B;
+
+ PCMFile _inFileA;
+ PCMFile _outFileB;
+ int16_t _testCntr;
+};
+
+} // namespace webrtc
+
+#endif // WEBRTC_MODULES_AUDIO_CODING_TEST_TESTREDFEC_H_
diff --git a/webrtc/modules/audio_coding/test/TestStereo.cc b/webrtc/modules/audio_coding/test/TestStereo.cc
new file mode 100644
index 0000000000..9bf560d323
--- /dev/null
+++ b/webrtc/modules/audio_coding/test/TestStereo.cc
@@ -0,0 +1,844 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "webrtc/modules/audio_coding/test/TestStereo.h"
+
+#include <assert.h>
+
+#include <string>
+
+#include "testing/gtest/include/gtest/gtest.h"
+#include "webrtc/common_types.h"
+#include "webrtc/engine_configurations.h"
+#include "webrtc/modules/audio_coding/include/audio_coding_module_typedefs.h"
+#include "webrtc/modules/audio_coding/test/utility.h"
+#include "webrtc/system_wrappers/include/trace.h"
+#include "webrtc/test/testsupport/fileutils.h"
+
+namespace webrtc {
+
+// Class for simulating packet handling
+TestPackStereo::TestPackStereo()
+ : receiver_acm_(NULL),
+ seq_no_(0),
+ timestamp_diff_(0),
+ last_in_timestamp_(0),
+ total_bytes_(0),
+ payload_size_(0),
+ codec_mode_(kNotSet),
+ lost_packet_(false) {
+}
+
+TestPackStereo::~TestPackStereo() {
+}
+
+void TestPackStereo::RegisterReceiverACM(AudioCodingModule* acm) {
+ receiver_acm_ = acm;
+ return;
+}
+
+int32_t TestPackStereo::SendData(const FrameType frame_type,
+ const uint8_t payload_type,
+ const uint32_t timestamp,
+ const uint8_t* payload_data,
+ const size_t payload_size,
+ const RTPFragmentationHeader* fragmentation) {
+ WebRtcRTPHeader rtp_info;
+ int32_t status = 0;
+
+ rtp_info.header.markerBit = false;
+ rtp_info.header.ssrc = 0;
+ rtp_info.header.sequenceNumber = seq_no_++;
+ rtp_info.header.payloadType = payload_type;
+ rtp_info.header.timestamp = timestamp;
+ if (frame_type == kEmptyFrame) {
+ // Skip this frame
+ return 0;
+ }
+
+ if (lost_packet_ == false) {
+ if (frame_type != kAudioFrameCN) {
+ rtp_info.type.Audio.isCNG = false;
+ rtp_info.type.Audio.channel = static_cast<int>(codec_mode_);
+ } else {
+ rtp_info.type.Audio.isCNG = true;
+ rtp_info.type.Audio.channel = static_cast<int>(kMono);
+ }
+ status = receiver_acm_->IncomingPacket(payload_data, payload_size,
+ rtp_info);
+
+ if (frame_type != kAudioFrameCN) {
+ payload_size_ = static_cast<int>(payload_size);
+ } else {
+ payload_size_ = -1;
+ }
+
+ timestamp_diff_ = timestamp - last_in_timestamp_;
+ last_in_timestamp_ = timestamp;
+ total_bytes_ += payload_size;
+ }
+ return status;
+}
+
+uint16_t TestPackStereo::payload_size() {
+ return static_cast<uint16_t>(payload_size_);
+}
+
+uint32_t TestPackStereo::timestamp_diff() {
+ return timestamp_diff_;
+}
+
+void TestPackStereo::reset_payload_size() {
+ payload_size_ = 0;
+}
+
+void TestPackStereo::set_codec_mode(enum StereoMonoMode mode) {
+ codec_mode_ = mode;
+}
+
+void TestPackStereo::set_lost_packet(bool lost) {
+ lost_packet_ = lost;
+}
+
+TestStereo::TestStereo(int test_mode)
+ : acm_a_(AudioCodingModule::Create(0)),
+ acm_b_(AudioCodingModule::Create(1)),
+ channel_a2b_(NULL),
+ test_cntr_(0),
+ pack_size_samp_(0),
+ pack_size_bytes_(0),
+ counter_(0)
+#ifdef WEBRTC_CODEC_G722
+ , g722_pltype_(0)
+#endif
+ , l16_8khz_pltype_(-1)
+ , l16_16khz_pltype_(-1)
+ , l16_32khz_pltype_(-1)
+#ifdef PCMA_AND_PCMU
+ , pcma_pltype_(-1)
+ , pcmu_pltype_(-1)
+#endif
+#ifdef WEBRTC_CODEC_OPUS
+ , opus_pltype_(-1)
+#endif
+ {
+ // test_mode = 0 for silent test (auto test)
+ test_mode_ = test_mode;
+}
+
+TestStereo::~TestStereo() {
+ if (channel_a2b_ != NULL) {
+ delete channel_a2b_;
+ channel_a2b_ = NULL;
+ }
+}
+
+void TestStereo::Perform() {
+ uint16_t frequency_hz;
+ int audio_channels;
+ int codec_channels;
+ bool dtx;
+ bool vad;
+ ACMVADMode vad_mode;
+
+ // Open both mono and stereo test files in 32 kHz.
+ const std::string file_name_stereo = webrtc::test::ResourcePath(
+ "audio_coding/teststereo32kHz", "pcm");
+ const std::string file_name_mono = webrtc::test::ResourcePath(
+ "audio_coding/testfile32kHz", "pcm");
+ frequency_hz = 32000;
+ in_file_stereo_ = new PCMFile();
+ in_file_mono_ = new PCMFile();
+ in_file_stereo_->Open(file_name_stereo, frequency_hz, "rb");
+ in_file_stereo_->ReadStereo(true);
+ in_file_mono_->Open(file_name_mono, frequency_hz, "rb");
+ in_file_mono_->ReadStereo(false);
+
+ // Create and initialize two ACMs, one for each side of a one-to-one call.
+ ASSERT_TRUE((acm_a_.get() != NULL) && (acm_b_.get() != NULL));
+ EXPECT_EQ(0, acm_a_->InitializeReceiver());
+ EXPECT_EQ(0, acm_b_->InitializeReceiver());
+
+ // Register all available codes as receiving codecs.
+ uint8_t num_encoders = acm_a_->NumberOfCodecs();
+ CodecInst my_codec_param;
+ for (uint8_t n = 0; n < num_encoders; n++) {
+ EXPECT_EQ(0, acm_b_->Codec(n, &my_codec_param));
+ EXPECT_EQ(0, acm_b_->RegisterReceiveCodec(my_codec_param));
+ }
+
+ // Test that unregister all receive codecs works.
+ for (uint8_t n = 0; n < num_encoders; n++) {
+ EXPECT_EQ(0, acm_b_->Codec(n, &my_codec_param));
+ EXPECT_EQ(0, acm_b_->UnregisterReceiveCodec(my_codec_param.pltype));
+ }
+
+ // Register all available codes as receiving codecs once more.
+ for (uint8_t n = 0; n < num_encoders; n++) {
+ EXPECT_EQ(0, acm_b_->Codec(n, &my_codec_param));
+ EXPECT_EQ(0, acm_b_->RegisterReceiveCodec(my_codec_param));
+ }
+
+ // Create and connect the channel.
+ channel_a2b_ = new TestPackStereo;
+ EXPECT_EQ(0, acm_a_->RegisterTransportCallback(channel_a2b_));
+ channel_a2b_->RegisterReceiverACM(acm_b_.get());
+
+ // Start with setting VAD/DTX, before we know we will send stereo.
+ // Continue with setting a stereo codec as send codec and verify that
+ // VAD/DTX gets turned off.
+ EXPECT_EQ(0, acm_a_->SetVAD(true, true, VADNormal));
+ EXPECT_EQ(0, acm_a_->VAD(&dtx, &vad, &vad_mode));
+ EXPECT_TRUE(dtx);
+ EXPECT_TRUE(vad);
+ char codec_pcma_temp[] = "PCMA";
+ RegisterSendCodec('A', codec_pcma_temp, 8000, 64000, 80, 2, pcma_pltype_);
+ EXPECT_EQ(0, acm_a_->VAD(&dtx, &vad, &vad_mode));
+ EXPECT_FALSE(dtx);
+ EXPECT_FALSE(vad);
+ if (test_mode_ != 0) {
+ printf("\n");
+ }
+
+ //
+ // Test Stereo-To-Stereo for all codecs.
+ //
+ audio_channels = 2;
+ codec_channels = 2;
+
+ // All codecs are tested for all allowed sampling frequencies, rates and
+ // packet sizes.
+#ifdef WEBRTC_CODEC_G722
+ if (test_mode_ != 0) {
+ printf("===========================================================\n");
+ printf("Test number: %d\n", test_cntr_ + 1);
+ printf("Test type: Stereo-to-stereo\n");
+ }
+ channel_a2b_->set_codec_mode(kStereo);
+ test_cntr_++;
+ OpenOutFile(test_cntr_);
+ char codec_g722[] = "G722";
+ RegisterSendCodec('A', codec_g722, 16000, 64000, 160, codec_channels,
+ g722_pltype_);
+ Run(channel_a2b_, audio_channels, codec_channels);
+ RegisterSendCodec('A', codec_g722, 16000, 64000, 320, codec_channels,
+ g722_pltype_);
+ Run(channel_a2b_, audio_channels, codec_channels);
+ RegisterSendCodec('A', codec_g722, 16000, 64000, 480, codec_channels,
+ g722_pltype_);
+ Run(channel_a2b_, audio_channels, codec_channels);
+ RegisterSendCodec('A', codec_g722, 16000, 64000, 640, codec_channels,
+ g722_pltype_);
+ Run(channel_a2b_, audio_channels, codec_channels);
+ RegisterSendCodec('A', codec_g722, 16000, 64000, 800, codec_channels,
+ g722_pltype_);
+ Run(channel_a2b_, audio_channels, codec_channels);
+ RegisterSendCodec('A', codec_g722, 16000, 64000, 960, codec_channels,
+ g722_pltype_);
+ Run(channel_a2b_, audio_channels, codec_channels);
+ out_file_.Close();
+#endif
+ if (test_mode_ != 0) {
+ printf("===========================================================\n");
+ printf("Test number: %d\n", test_cntr_ + 1);
+ printf("Test type: Stereo-to-stereo\n");
+ }
+ channel_a2b_->set_codec_mode(kStereo);
+ test_cntr_++;
+ OpenOutFile(test_cntr_);
+ char codec_l16[] = "L16";
+ RegisterSendCodec('A', codec_l16, 8000, 128000, 80, codec_channels,
+ l16_8khz_pltype_);
+ Run(channel_a2b_, audio_channels, codec_channels);
+ RegisterSendCodec('A', codec_l16, 8000, 128000, 160, codec_channels,
+ l16_8khz_pltype_);
+ Run(channel_a2b_, audio_channels, codec_channels);
+ RegisterSendCodec('A', codec_l16, 8000, 128000, 240, codec_channels,
+ l16_8khz_pltype_);
+ Run(channel_a2b_, audio_channels, codec_channels);
+ RegisterSendCodec('A', codec_l16, 8000, 128000, 320, codec_channels,
+ l16_8khz_pltype_);
+ Run(channel_a2b_, audio_channels, codec_channels);
+ out_file_.Close();
+
+ if (test_mode_ != 0) {
+ printf("===========================================================\n");
+ printf("Test number: %d\n", test_cntr_ + 1);
+ printf("Test type: Stereo-to-stereo\n");
+ }
+ test_cntr_++;
+ OpenOutFile(test_cntr_);
+ RegisterSendCodec('A', codec_l16, 16000, 256000, 160, codec_channels,
+ l16_16khz_pltype_);
+ Run(channel_a2b_, audio_channels, codec_channels);
+ RegisterSendCodec('A', codec_l16, 16000, 256000, 320, codec_channels,
+ l16_16khz_pltype_);
+ Run(channel_a2b_, audio_channels, codec_channels);
+ RegisterSendCodec('A', codec_l16, 16000, 256000, 480, codec_channels,
+ l16_16khz_pltype_);
+ Run(channel_a2b_, audio_channels, codec_channels);
+ RegisterSendCodec('A', codec_l16, 16000, 256000, 640, codec_channels,
+ l16_16khz_pltype_);
+ Run(channel_a2b_, audio_channels, codec_channels);
+ out_file_.Close();
+
+ if (test_mode_ != 0) {
+ printf("===========================================================\n");
+ printf("Test number: %d\n", test_cntr_ + 1);
+ printf("Test type: Stereo-to-stereo\n");
+ }
+ test_cntr_++;
+ OpenOutFile(test_cntr_);
+ RegisterSendCodec('A', codec_l16, 32000, 512000, 320, codec_channels,
+ l16_32khz_pltype_);
+ Run(channel_a2b_, audio_channels, codec_channels);
+ RegisterSendCodec('A', codec_l16, 32000, 512000, 640, codec_channels,
+ l16_32khz_pltype_);
+ Run(channel_a2b_, audio_channels, codec_channels);
+ out_file_.Close();
+#ifdef PCMA_AND_PCMU
+ if (test_mode_ != 0) {
+ printf("===========================================================\n");
+ printf("Test number: %d\n", test_cntr_ + 1);
+ printf("Test type: Stereo-to-stereo\n");
+ }
+ channel_a2b_->set_codec_mode(kStereo);
+ audio_channels = 2;
+ codec_channels = 2;
+ test_cntr_++;
+ OpenOutFile(test_cntr_);
+ char codec_pcma[] = "PCMA";
+ RegisterSendCodec('A', codec_pcma, 8000, 64000, 80, codec_channels,
+ pcma_pltype_);
+ Run(channel_a2b_, audio_channels, codec_channels);
+ RegisterSendCodec('A', codec_pcma, 8000, 64000, 160, codec_channels,
+ pcma_pltype_);
+ Run(channel_a2b_, audio_channels, codec_channels);
+ RegisterSendCodec('A', codec_pcma, 8000, 64000, 240, codec_channels,
+ pcma_pltype_);
+ Run(channel_a2b_, audio_channels, codec_channels);
+ RegisterSendCodec('A', codec_pcma, 8000, 64000, 320, codec_channels,
+ pcma_pltype_);
+ Run(channel_a2b_, audio_channels, codec_channels);
+ RegisterSendCodec('A', codec_pcma, 8000, 64000, 400, codec_channels,
+ pcma_pltype_);
+ Run(channel_a2b_, audio_channels, codec_channels);
+ RegisterSendCodec('A', codec_pcma, 8000, 64000, 480, codec_channels,
+ pcma_pltype_);
+ Run(channel_a2b_, audio_channels, codec_channels);
+
+ // Test that VAD/DTX cannot be turned on while sending stereo.
+ EXPECT_EQ(-1, acm_a_->SetVAD(true, true, VADNormal));
+ EXPECT_EQ(0, acm_a_->VAD(&dtx, &vad, &vad_mode));
+ EXPECT_FALSE(dtx);
+ EXPECT_FALSE(vad);
+ EXPECT_EQ(0, acm_a_->SetVAD(false, false, VADNormal));
+ EXPECT_EQ(0, acm_a_->VAD(&dtx, &vad, &vad_mode));
+ EXPECT_FALSE(dtx);
+ EXPECT_FALSE(vad);
+
+ out_file_.Close();
+ if (test_mode_ != 0) {
+ printf("===========================================================\n");
+ printf("Test number: %d\n", test_cntr_ + 1);
+ printf("Test type: Stereo-to-stereo\n");
+ }
+ test_cntr_++;
+ OpenOutFile(test_cntr_);
+ char codec_pcmu[] = "PCMU";
+ RegisterSendCodec('A', codec_pcmu, 8000, 64000, 80, codec_channels,
+ pcmu_pltype_);
+ Run(channel_a2b_, audio_channels, codec_channels);
+ RegisterSendCodec('A', codec_pcmu, 8000, 64000, 160, codec_channels,
+ pcmu_pltype_);
+ Run(channel_a2b_, audio_channels, codec_channels);
+ RegisterSendCodec('A', codec_pcmu, 8000, 64000, 240, codec_channels,
+ pcmu_pltype_);
+ Run(channel_a2b_, audio_channels, codec_channels);
+ RegisterSendCodec('A', codec_pcmu, 8000, 64000, 320, codec_channels,
+ pcmu_pltype_);
+ Run(channel_a2b_, audio_channels, codec_channels);
+ RegisterSendCodec('A', codec_pcmu, 8000, 64000, 400, codec_channels,
+ pcmu_pltype_);
+ Run(channel_a2b_, audio_channels, codec_channels);
+ RegisterSendCodec('A', codec_pcmu, 8000, 64000, 480, codec_channels,
+ pcmu_pltype_);
+ Run(channel_a2b_, audio_channels, codec_channels);
+ out_file_.Close();
+#endif
+#ifdef WEBRTC_CODEC_OPUS
+ if (test_mode_ != 0) {
+ printf("===========================================================\n");
+ printf("Test number: %d\n", test_cntr_ + 1);
+ printf("Test type: Stereo-to-stereo\n");
+ }
+ channel_a2b_->set_codec_mode(kStereo);
+ audio_channels = 2;
+ codec_channels = 2;
+ test_cntr_++;
+ OpenOutFile(test_cntr_);
+
+ char codec_opus[] = "opus";
+ // Run Opus with 10 ms frame size.
+ RegisterSendCodec('A', codec_opus, 48000, 64000, 480, codec_channels,
+ opus_pltype_);
+ Run(channel_a2b_, audio_channels, codec_channels);
+ // Run Opus with 20 ms frame size.
+ RegisterSendCodec('A', codec_opus, 48000, 64000, 480*2, codec_channels,
+ opus_pltype_);
+ Run(channel_a2b_, audio_channels, codec_channels);
+ // Run Opus with 40 ms frame size.
+ RegisterSendCodec('A', codec_opus, 48000, 64000, 480*4, codec_channels,
+ opus_pltype_);
+ Run(channel_a2b_, audio_channels, codec_channels);
+ // Run Opus with 60 ms frame size.
+ RegisterSendCodec('A', codec_opus, 48000, 64000, 480*6, codec_channels,
+ opus_pltype_);
+ Run(channel_a2b_, audio_channels, codec_channels);
+ // Run Opus with 20 ms frame size and different bitrates.
+ RegisterSendCodec('A', codec_opus, 48000, 40000, 960, codec_channels,
+ opus_pltype_);
+ Run(channel_a2b_, audio_channels, codec_channels);
+ RegisterSendCodec('A', codec_opus, 48000, 510000, 960, codec_channels,
+ opus_pltype_);
+ Run(channel_a2b_, audio_channels, codec_channels);
+ out_file_.Close();
+#endif
+ //
+ // Test Mono-To-Stereo for all codecs.
+ //
+ audio_channels = 1;
+ codec_channels = 2;
+
+#ifdef WEBRTC_CODEC_G722
+ if (test_mode_ != 0) {
+ printf("===============================================================\n");
+ printf("Test number: %d\n", test_cntr_ + 1);
+ printf("Test type: Mono-to-stereo\n");
+ }
+ test_cntr_++;
+ channel_a2b_->set_codec_mode(kStereo);
+ OpenOutFile(test_cntr_);
+ RegisterSendCodec('A', codec_g722, 16000, 64000, 160, codec_channels,
+ g722_pltype_);
+ Run(channel_a2b_, audio_channels, codec_channels);
+ out_file_.Close();
+#endif
+ if (test_mode_ != 0) {
+ printf("===============================================================\n");
+ printf("Test number: %d\n", test_cntr_ + 1);
+ printf("Test type: Mono-to-stereo\n");
+ }
+ test_cntr_++;
+ channel_a2b_->set_codec_mode(kStereo);
+ OpenOutFile(test_cntr_);
+ RegisterSendCodec('A', codec_l16, 8000, 128000, 80, codec_channels,
+ l16_8khz_pltype_);
+ Run(channel_a2b_, audio_channels, codec_channels);
+ out_file_.Close();
+ if (test_mode_ != 0) {
+ printf("===============================================================\n");
+ printf("Test number: %d\n", test_cntr_ + 1);
+ printf("Test type: Mono-to-stereo\n");
+ }
+ test_cntr_++;
+ OpenOutFile(test_cntr_);
+ RegisterSendCodec('A', codec_l16, 16000, 256000, 160, codec_channels,
+ l16_16khz_pltype_);
+ Run(channel_a2b_, audio_channels, codec_channels);
+ out_file_.Close();
+ if (test_mode_ != 0) {
+ printf("===============================================================\n");
+ printf("Test number: %d\n", test_cntr_ + 1);
+ printf("Test type: Mono-to-stereo\n");
+ }
+ test_cntr_++;
+ OpenOutFile(test_cntr_);
+ RegisterSendCodec('A', codec_l16, 32000, 512000, 320, codec_channels,
+ l16_32khz_pltype_);
+ Run(channel_a2b_, audio_channels, codec_channels);
+ out_file_.Close();
+#ifdef PCMA_AND_PCMU
+ if (test_mode_ != 0) {
+ printf("===============================================================\n");
+ printf("Test number: %d\n", test_cntr_ + 1);
+ printf("Test type: Mono-to-stereo\n");
+ }
+ test_cntr_++;
+ channel_a2b_->set_codec_mode(kStereo);
+ OpenOutFile(test_cntr_);
+ RegisterSendCodec('A', codec_pcmu, 8000, 64000, 80, codec_channels,
+ pcmu_pltype_);
+ Run(channel_a2b_, audio_channels, codec_channels);
+ RegisterSendCodec('A', codec_pcma, 8000, 64000, 80, codec_channels,
+ pcma_pltype_);
+ Run(channel_a2b_, audio_channels, codec_channels);
+ out_file_.Close();
+#endif
+#ifdef WEBRTC_CODEC_OPUS
+ if (test_mode_ != 0) {
+ printf("===============================================================\n");
+ printf("Test number: %d\n", test_cntr_ + 1);
+ printf("Test type: Mono-to-stereo\n");
+ }
+
+ // Keep encode and decode in stereo.
+ test_cntr_++;
+ channel_a2b_->set_codec_mode(kStereo);
+ OpenOutFile(test_cntr_);
+ RegisterSendCodec('A', codec_opus, 48000, 64000, 960, codec_channels,
+ opus_pltype_);
+ Run(channel_a2b_, audio_channels, codec_channels);
+
+ // Encode in mono, decode in stereo mode.
+ RegisterSendCodec('A', codec_opus, 48000, 64000, 960, 1, opus_pltype_);
+ Run(channel_a2b_, audio_channels, codec_channels);
+ out_file_.Close();
+#endif
+
+ //
+ // Test Stereo-To-Mono for all codecs.
+ //
+ audio_channels = 2;
+ codec_channels = 1;
+ channel_a2b_->set_codec_mode(kMono);
+
+#ifdef WEBRTC_CODEC_G722
+ // Run stereo audio and mono codec.
+ if (test_mode_ != 0) {
+ printf("===============================================================\n");
+ printf("Test number: %d\n", test_cntr_ + 1);
+ printf("Test type: Stereo-to-mono\n");
+ }
+ test_cntr_++;
+ OpenOutFile(test_cntr_);
+ RegisterSendCodec('A', codec_g722, 16000, 64000, 160, codec_channels,
+ g722_pltype_);
+
+ // Make sure it is possible to set VAD/CNG, now that we are sending mono
+ // again.
+ EXPECT_EQ(0, acm_a_->SetVAD(true, true, VADNormal));
+ EXPECT_EQ(0, acm_a_->VAD(&dtx, &vad, &vad_mode));
+ EXPECT_TRUE(dtx);
+ EXPECT_TRUE(vad);
+ EXPECT_EQ(0, acm_a_->SetVAD(false, false, VADNormal));
+ Run(channel_a2b_, audio_channels, codec_channels);
+ out_file_.Close();
+#endif
+ if (test_mode_ != 0) {
+ printf("===============================================================\n");
+ printf("Test number: %d\n", test_cntr_ + 1);
+ printf("Test type: Stereo-to-mono\n");
+ }
+ test_cntr_++;
+ OpenOutFile(test_cntr_);
+ RegisterSendCodec('A', codec_l16, 8000, 128000, 80, codec_channels,
+ l16_8khz_pltype_);
+ Run(channel_a2b_, audio_channels, codec_channels);
+ out_file_.Close();
+ if (test_mode_ != 0) {
+ printf("===============================================================\n");
+ printf("Test number: %d\n", test_cntr_ + 1);
+ printf("Test type: Stereo-to-mono\n");
+ }
+ test_cntr_++;
+ OpenOutFile(test_cntr_);
+ RegisterSendCodec('A', codec_l16, 16000, 256000, 160, codec_channels,
+ l16_16khz_pltype_);
+ Run(channel_a2b_, audio_channels, codec_channels);
+ out_file_.Close();
+ if (test_mode_ != 0) {
+ printf("==============================================================\n");
+ printf("Test number: %d\n", test_cntr_ + 1);
+ printf("Test type: Stereo-to-mono\n");
+ }
+ test_cntr_++;
+ OpenOutFile(test_cntr_);
+ RegisterSendCodec('A', codec_l16, 32000, 512000, 320, codec_channels,
+ l16_32khz_pltype_);
+ Run(channel_a2b_, audio_channels, codec_channels);
+ out_file_.Close();
+#ifdef PCMA_AND_PCMU
+ if (test_mode_ != 0) {
+ printf("===============================================================\n");
+ printf("Test number: %d\n", test_cntr_ + 1);
+ printf("Test type: Stereo-to-mono\n");
+ }
+ test_cntr_++;
+ OpenOutFile(test_cntr_);
+ RegisterSendCodec('A', codec_pcmu, 8000, 64000, 80, codec_channels,
+ pcmu_pltype_);
+ Run(channel_a2b_, audio_channels, codec_channels);
+ RegisterSendCodec('A', codec_pcma, 8000, 64000, 80, codec_channels,
+ pcma_pltype_);
+ Run(channel_a2b_, audio_channels, codec_channels);
+ out_file_.Close();
+#endif
+#ifdef WEBRTC_CODEC_OPUS
+ if (test_mode_ != 0) {
+ printf("===============================================================\n");
+ printf("Test number: %d\n", test_cntr_ + 1);
+ printf("Test type: Stereo-to-mono\n");
+ }
+ test_cntr_++;
+ OpenOutFile(test_cntr_);
+ // Encode and decode in mono.
+ RegisterSendCodec('A', codec_opus, 48000, 32000, 960, codec_channels,
+ opus_pltype_);
+ CodecInst opus_codec_param;
+ for (uint8_t n = 0; n < num_encoders; n++) {
+ EXPECT_EQ(0, acm_b_->Codec(n, &opus_codec_param));
+ if (!strcmp(opus_codec_param.plname, "opus")) {
+ opus_codec_param.channels = 1;
+ EXPECT_EQ(0, acm_b_->RegisterReceiveCodec(opus_codec_param));
+ break;
+ }
+ }
+ Run(channel_a2b_, audio_channels, codec_channels);
+
+ // Encode in stereo, decode in mono.
+ RegisterSendCodec('A', codec_opus, 48000, 32000, 960, 2, opus_pltype_);
+ Run(channel_a2b_, audio_channels, codec_channels);
+
+ out_file_.Close();
+
+ // Test switching between decoding mono and stereo for Opus.
+
+ // Decode in mono.
+ test_cntr_++;
+ OpenOutFile(test_cntr_);
+ if (test_mode_ != 0) {
+ // Print out codec and settings
+ printf("Test number: %d\nCodec: Opus Freq: 48000 Rate :32000 PackSize: 960"
+ " Decode: mono\n", test_cntr_);
+ }
+ Run(channel_a2b_, audio_channels, codec_channels);
+ out_file_.Close();
+ // Decode in stereo.
+ test_cntr_++;
+ OpenOutFile(test_cntr_);
+ if (test_mode_ != 0) {
+ // Print out codec and settings
+ printf("Test number: %d\nCodec: Opus Freq: 48000 Rate :32000 PackSize: 960"
+ " Decode: stereo\n", test_cntr_);
+ }
+ opus_codec_param.channels = 2;
+ EXPECT_EQ(0, acm_b_->RegisterReceiveCodec(opus_codec_param));
+ Run(channel_a2b_, audio_channels, 2);
+ out_file_.Close();
+ // Decode in mono.
+ test_cntr_++;
+ OpenOutFile(test_cntr_);
+ if (test_mode_ != 0) {
+ // Print out codec and settings
+ printf("Test number: %d\nCodec: Opus Freq: 48000 Rate :32000 PackSize: 960"
+ " Decode: mono\n", test_cntr_);
+ }
+ opus_codec_param.channels = 1;
+ EXPECT_EQ(0, acm_b_->RegisterReceiveCodec(opus_codec_param));
+ Run(channel_a2b_, audio_channels, codec_channels);
+ out_file_.Close();
+
+#endif
+
+ // Print out which codecs were tested, and which were not, in the run.
+ if (test_mode_ != 0) {
+ printf("\nThe following codecs was INCLUDED in the test:\n");
+#ifdef WEBRTC_CODEC_G722
+ printf(" G.722\n");
+#endif
+ printf(" PCM16\n");
+ printf(" G.711\n");
+#ifdef WEBRTC_CODEC_OPUS
+ printf(" Opus\n");
+#endif
+ printf("\nTo complete the test, listen to the %d number of output "
+ "files.\n",
+ test_cntr_);
+ }
+
+ // Delete the file pointers.
+ delete in_file_stereo_;
+ delete in_file_mono_;
+}
+
+// Register Codec to use in the test
+//
+// Input: side - which ACM to use, 'A' or 'B'
+// codec_name - name to use when register the codec
+// sampling_freq_hz - sampling frequency in Herz
+// rate - bitrate in bytes
+// pack_size - packet size in samples
+// channels - number of channels; 1 for mono, 2 for stereo
+// payload_type - payload type for the codec
+void TestStereo::RegisterSendCodec(char side, char* codec_name,
+ int32_t sampling_freq_hz, int rate,
+ int pack_size, int channels,
+ int payload_type) {
+ if (test_mode_ != 0) {
+ // Print out codec and settings
+ printf("Codec: %s Freq: %d Rate: %d PackSize: %d\n", codec_name,
+ sampling_freq_hz, rate, pack_size);
+ }
+
+ // Store packet size in samples, used to validate the received packet
+ pack_size_samp_ = pack_size;
+
+ // Store the expected packet size in bytes, used to validate the received
+ // packet. Add 0.875 to always round up to a whole byte.
+ pack_size_bytes_ = (uint16_t)(static_cast<float>(pack_size * rate) /
+ static_cast<float>(sampling_freq_hz * 8) +
+ 0.875);
+
+ // Set pointer to the ACM where to register the codec
+ AudioCodingModule* my_acm = NULL;
+ switch (side) {
+ case 'A': {
+ my_acm = acm_a_.get();
+ break;
+ }
+ case 'B': {
+ my_acm = acm_b_.get();
+ break;
+ }
+ default:
+ break;
+ }
+ ASSERT_TRUE(my_acm != NULL);
+
+ CodecInst my_codec_param;
+ // Get all codec parameters before registering
+ EXPECT_GT(AudioCodingModule::Codec(codec_name, &my_codec_param,
+ sampling_freq_hz, channels), -1);
+ my_codec_param.rate = rate;
+ my_codec_param.pacsize = pack_size;
+ EXPECT_EQ(0, my_acm->RegisterSendCodec(my_codec_param));
+
+ send_codec_name_ = codec_name;
+}
+
+void TestStereo::Run(TestPackStereo* channel, int in_channels, int out_channels,
+ int percent_loss) {
+ AudioFrame audio_frame;
+
+ int32_t out_freq_hz_b = out_file_.SamplingFrequency();
+ uint16_t rec_size;
+ uint32_t time_stamp_diff;
+ channel->reset_payload_size();
+ int error_count = 0;
+ int variable_bytes = 0;
+ int variable_packets = 0;
+ // Set test length to 500 ms (50 blocks of 10 ms each).
+ in_file_mono_->SetNum10MsBlocksToRead(50);
+ in_file_stereo_->SetNum10MsBlocksToRead(50);
+ // Fast-forward 1 second (100 blocks) since the files start with silence.
+ in_file_stereo_->FastForward(100);
+ in_file_mono_->FastForward(100);
+
+ while (1) {
+ // Simulate packet loss by setting |packet_loss_| to "true" in
+ // |percent_loss| percent of the loops.
+ if (percent_loss > 0) {
+ if (counter_ == floor((100 / percent_loss) + 0.5)) {
+ counter_ = 0;
+ channel->set_lost_packet(true);
+ } else {
+ channel->set_lost_packet(false);
+ }
+ counter_++;
+ }
+
+ // Add 10 msec to ACM
+ if (in_channels == 1) {
+ if (in_file_mono_->EndOfFile()) {
+ break;
+ }
+ in_file_mono_->Read10MsData(audio_frame);
+ } else {
+ if (in_file_stereo_->EndOfFile()) {
+ break;
+ }
+ in_file_stereo_->Read10MsData(audio_frame);
+ }
+ EXPECT_GE(acm_a_->Add10MsData(audio_frame), 0);
+
+ // Verify that the received packet size matches the settings.
+ rec_size = channel->payload_size();
+ if ((0 < rec_size) & (rec_size < 65535)) {
+ if (strcmp(send_codec_name_, "opus") == 0) {
+ // Opus is a variable rate codec, hence calculate the average packet
+ // size, and later make sure the average is in the right range.
+ variable_bytes += rec_size;
+ variable_packets++;
+ } else {
+ // For fixed rate codecs, check that packet size is correct.
+ if ((rec_size != pack_size_bytes_ * out_channels)
+ && (pack_size_bytes_ < 65535)) {
+ error_count++;
+ }
+ }
+ // Verify that the timestamp is updated with expected length
+ time_stamp_diff = channel->timestamp_diff();
+ if ((counter_ > 10) && (time_stamp_diff != pack_size_samp_)) {
+ error_count++;
+ }
+ }
+
+ // Run received side of ACM
+ EXPECT_EQ(0, acm_b_->PlayoutData10Ms(out_freq_hz_b, &audio_frame));
+
+ // Write output speech to file
+ out_file_.Write10MsData(
+ audio_frame.data_,
+ audio_frame.samples_per_channel_ * audio_frame.num_channels_);
+ }
+
+ EXPECT_EQ(0, error_count);
+
+ // Check that packet size is in the right range for variable rate codecs,
+ // such as Opus.
+ if (variable_packets > 0) {
+ variable_bytes /= variable_packets;
+ EXPECT_NEAR(variable_bytes, pack_size_bytes_, 18);
+ }
+
+ if (in_file_mono_->EndOfFile()) {
+ in_file_mono_->Rewind();
+ }
+ if (in_file_stereo_->EndOfFile()) {
+ in_file_stereo_->Rewind();
+ }
+ // Reset in case we ended with a lost packet
+ channel->set_lost_packet(false);
+}
+
+void TestStereo::OpenOutFile(int16_t test_number) {
+ std::string file_name;
+ std::stringstream file_stream;
+ file_stream << webrtc::test::OutputPath() << "teststereo_out_" << test_number
+ << ".pcm";
+ file_name = file_stream.str();
+ out_file_.Open(file_name, 32000, "wb");
+}
+
+void TestStereo::DisplaySendReceiveCodec() {
+ auto send_codec = acm_a_->SendCodec();
+ if (test_mode_ != 0) {
+ ASSERT_TRUE(send_codec);
+ printf("%s -> ", send_codec->plname);
+ }
+ CodecInst receive_codec;
+ acm_b_->ReceiveCodec(&receive_codec);
+ if (test_mode_ != 0) {
+ printf("%s\n", receive_codec.plname);
+ }
+}
+
+} // namespace webrtc
diff --git a/webrtc/modules/audio_coding/test/TestStereo.h b/webrtc/modules/audio_coding/test/TestStereo.h
new file mode 100644
index 0000000000..4526be6960
--- /dev/null
+++ b/webrtc/modules/audio_coding/test/TestStereo.h
@@ -0,0 +1,117 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef WEBRTC_MODULES_AUDIO_CODING_TEST_TESTSTEREO_H_
+#define WEBRTC_MODULES_AUDIO_CODING_TEST_TESTSTEREO_H_
+
+#include <math.h>
+
+#include "webrtc/base/scoped_ptr.h"
+#include "webrtc/modules/audio_coding/test/ACMTest.h"
+#include "webrtc/modules/audio_coding/test/Channel.h"
+#include "webrtc/modules/audio_coding/test/PCMFile.h"
+
+#define PCMA_AND_PCMU
+
+namespace webrtc {
+
+enum StereoMonoMode {
+ kNotSet,
+ kMono,
+ kStereo
+};
+
+class TestPackStereo : public AudioPacketizationCallback {
+ public:
+ TestPackStereo();
+ ~TestPackStereo();
+
+ void RegisterReceiverACM(AudioCodingModule* acm);
+
+ int32_t SendData(const FrameType frame_type,
+ const uint8_t payload_type,
+ const uint32_t timestamp,
+ const uint8_t* payload_data,
+ const size_t payload_size,
+ const RTPFragmentationHeader* fragmentation) override;
+
+ uint16_t payload_size();
+ uint32_t timestamp_diff();
+ void reset_payload_size();
+ void set_codec_mode(StereoMonoMode mode);
+ void set_lost_packet(bool lost);
+
+ private:
+ AudioCodingModule* receiver_acm_;
+ int16_t seq_no_;
+ uint32_t timestamp_diff_;
+ uint32_t last_in_timestamp_;
+ uint64_t total_bytes_;
+ int payload_size_;
+ StereoMonoMode codec_mode_;
+ // Simulate packet losses
+ bool lost_packet_;
+};
+
+class TestStereo : public ACMTest {
+ public:
+ explicit TestStereo(int test_mode);
+ ~TestStereo();
+
+ void Perform() override;
+
+ private:
+ // The default value of '-1' indicates that the registration is based only on
+ // codec name and a sampling frequncy matching is not required. This is useful
+ // for codecs which support several sampling frequency.
+ void RegisterSendCodec(char side, char* codec_name, int32_t samp_freq_hz,
+ int rate, int pack_size, int channels,
+ int payload_type);
+
+ void Run(TestPackStereo* channel, int in_channels, int out_channels,
+ int percent_loss = 0);
+ void OpenOutFile(int16_t test_number);
+ void DisplaySendReceiveCodec();
+
+ int test_mode_;
+
+ rtc::scoped_ptr<AudioCodingModule> acm_a_;
+ rtc::scoped_ptr<AudioCodingModule> acm_b_;
+
+ TestPackStereo* channel_a2b_;
+
+ PCMFile* in_file_stereo_;
+ PCMFile* in_file_mono_;
+ PCMFile out_file_;
+ int16_t test_cntr_;
+ uint16_t pack_size_samp_;
+ uint16_t pack_size_bytes_;
+ int counter_;
+ char* send_codec_name_;
+
+ // Payload types for stereo codecs and CNG
+#ifdef WEBRTC_CODEC_G722
+ int g722_pltype_;
+#endif
+ int l16_8khz_pltype_;
+ int l16_16khz_pltype_;
+ int l16_32khz_pltype_;
+#ifdef PCMA_AND_PCMU
+ int pcma_pltype_;
+ int pcmu_pltype_;
+#endif
+#ifdef WEBRTC_CODEC_OPUS
+ int opus_pltype_;
+#endif
+};
+
+} // namespace webrtc
+
+#endif // WEBRTC_MODULES_AUDIO_CODING_TEST_TESTSTEREO_H_
diff --git a/webrtc/modules/audio_coding/test/TestVADDTX.cc b/webrtc/modules/audio_coding/test/TestVADDTX.cc
new file mode 100644
index 0000000000..229dc2d474
--- /dev/null
+++ b/webrtc/modules/audio_coding/test/TestVADDTX.cc
@@ -0,0 +1,276 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "webrtc/modules/audio_coding/test/TestVADDTX.h"
+
+#include <string>
+
+#include "webrtc/engine_configurations.h"
+#include "webrtc/modules/audio_coding/test/PCMFile.h"
+#include "webrtc/modules/audio_coding/test/utility.h"
+#include "webrtc/test/testsupport/fileutils.h"
+
+namespace webrtc {
+
+#ifdef WEBRTC_CODEC_ISAC
+const CodecInst kIsacWb = {103, "ISAC", 16000, 480, 1, 32000};
+const CodecInst kIsacSwb = {104, "ISAC", 32000, 960, 1, 56000};
+#endif
+
+#ifdef WEBRTC_CODEC_ILBC
+const CodecInst kIlbc = {102, "ILBC", 8000, 240, 1, 13300};
+#endif
+
+#ifdef WEBRTC_CODEC_OPUS
+const CodecInst kOpus = {120, "opus", 48000, 960, 1, 64000};
+const CodecInst kOpusStereo = {120, "opus", 48000, 960, 2, 64000};
+#endif
+
+ActivityMonitor::ActivityMonitor() {
+ ResetStatistics();
+}
+
+int32_t ActivityMonitor::InFrameType(FrameType frame_type) {
+ counter_[frame_type]++;
+ return 0;
+}
+
+void ActivityMonitor::PrintStatistics() {
+ printf("\n");
+ printf("kEmptyFrame %u\n", counter_[kEmptyFrame]);
+ printf("kAudioFrameSpeech %u\n", counter_[kAudioFrameSpeech]);
+ printf("kAudioFrameCN %u\n", counter_[kAudioFrameCN]);
+ printf("kVideoFrameKey %u\n", counter_[kVideoFrameKey]);
+ printf("kVideoFrameDelta %u\n", counter_[kVideoFrameDelta]);
+ printf("\n\n");
+}
+
+void ActivityMonitor::ResetStatistics() {
+ memset(counter_, 0, sizeof(counter_));
+}
+
+void ActivityMonitor::GetStatistics(uint32_t* counter) {
+ memcpy(counter, counter_, sizeof(counter_));
+}
+
+TestVadDtx::TestVadDtx()
+ : acm_send_(AudioCodingModule::Create(0)),
+ acm_receive_(AudioCodingModule::Create(1)),
+ channel_(new Channel),
+ monitor_(new ActivityMonitor) {
+ EXPECT_EQ(0, acm_send_->RegisterTransportCallback(channel_.get()));
+ channel_->RegisterReceiverACM(acm_receive_.get());
+ EXPECT_EQ(0, acm_send_->RegisterVADCallback(monitor_.get()));
+}
+
+void TestVadDtx::RegisterCodec(CodecInst codec_param) {
+ // Set the codec for sending and receiving.
+ EXPECT_EQ(0, acm_send_->RegisterSendCodec(codec_param));
+ EXPECT_EQ(0, acm_receive_->RegisterReceiveCodec(codec_param));
+ channel_->SetIsStereo(codec_param.channels > 1);
+}
+
+// Encoding a file and see if the numbers that various packets occur follow
+// the expectation.
+void TestVadDtx::Run(std::string in_filename, int frequency, int channels,
+ std::string out_filename, bool append,
+ const int* expects) {
+ monitor_->ResetStatistics();
+
+ PCMFile in_file;
+ in_file.Open(in_filename, frequency, "rb");
+ in_file.ReadStereo(channels > 1);
+ // Set test length to 1000 ms (100 blocks of 10 ms each).
+ in_file.SetNum10MsBlocksToRead(100);
+ // Fast-forward both files 500 ms (50 blocks). The first second of the file is
+ // silence, but we want to keep half of that to test silence periods.
+ in_file.FastForward(50);
+
+ PCMFile out_file;
+ if (append) {
+ out_file.Open(out_filename, kOutputFreqHz, "ab");
+ } else {
+ out_file.Open(out_filename, kOutputFreqHz, "wb");
+ }
+
+ uint16_t frame_size_samples = in_file.PayloadLength10Ms();
+ uint32_t time_stamp = 0x12345678;
+ AudioFrame audio_frame;
+ while (!in_file.EndOfFile()) {
+ in_file.Read10MsData(audio_frame);
+ audio_frame.timestamp_ = time_stamp;
+ time_stamp += frame_size_samples;
+ EXPECT_GE(acm_send_->Add10MsData(audio_frame), 0);
+ acm_receive_->PlayoutData10Ms(kOutputFreqHz, &audio_frame);
+ out_file.Write10MsData(audio_frame);
+ }
+
+ in_file.Close();
+ out_file.Close();
+
+#ifdef PRINT_STAT
+ monitor_->PrintStatistics();
+#endif
+
+ uint32_t stats[5];
+ monitor_->GetStatistics(stats);
+ monitor_->ResetStatistics();
+
+ for (const auto& st : stats) {
+ int i = &st - stats; // Calculate the current position in stats.
+ switch (expects[i]) {
+ case 0: {
+ EXPECT_EQ(0u, st) << "stats[" << i << "] error.";
+ break;
+ }
+ case 1: {
+ EXPECT_GT(st, 0u) << "stats[" << i << "] error.";
+ break;
+ }
+ }
+ }
+}
+
+// Following is the implementation of TestWebRtcVadDtx.
+TestWebRtcVadDtx::TestWebRtcVadDtx()
+ : vad_enabled_(false),
+ dtx_enabled_(false),
+ output_file_num_(0) {
+}
+
+void TestWebRtcVadDtx::Perform() {
+ // Go through various test cases.
+#ifdef WEBRTC_CODEC_ISAC
+ // Register iSAC WB as send codec
+ RegisterCodec(kIsacWb);
+ RunTestCases();
+
+ // Register iSAC SWB as send codec
+ RegisterCodec(kIsacSwb);
+ RunTestCases();
+#endif
+
+#ifdef WEBRTC_CODEC_ILBC
+ // Register iLBC as send codec
+ RegisterCodec(kIlbc);
+ RunTestCases();
+#endif
+
+#ifdef WEBRTC_CODEC_OPUS
+ // Register Opus as send codec
+ RegisterCodec(kOpus);
+ RunTestCases();
+#endif
+}
+
+// Test various configurations on VAD/DTX.
+void TestWebRtcVadDtx::RunTestCases() {
+ // #1 DTX = OFF, VAD = OFF, VADNormal
+ SetVAD(false, false, VADNormal);
+ Test(true);
+
+ // #2 DTX = ON, VAD = ON, VADAggr
+ SetVAD(true, true, VADAggr);
+ Test(false);
+
+ // #3 DTX = ON, VAD = ON, VADLowBitrate
+ SetVAD(true, true, VADLowBitrate);
+ Test(false);
+
+ // #4 DTX = ON, VAD = ON, VADVeryAggr
+ SetVAD(true, true, VADVeryAggr);
+ Test(false);
+
+ // #5 DTX = ON, VAD = ON, VADNormal
+ SetVAD(true, true, VADNormal);
+ Test(false);
+}
+
+// Set the expectation and run the test.
+void TestWebRtcVadDtx::Test(bool new_outfile) {
+ int expects[] = {-1, 1, dtx_enabled_, 0, 0};
+ if (new_outfile) {
+ output_file_num_++;
+ }
+ std::stringstream out_filename;
+ out_filename << webrtc::test::OutputPath()
+ << "testWebRtcVadDtx_outFile_"
+ << output_file_num_
+ << ".pcm";
+ Run(webrtc::test::ResourcePath("audio_coding/testfile32kHz", "pcm"),
+ 32000, 1, out_filename.str(), !new_outfile, expects);
+}
+
+void TestWebRtcVadDtx::SetVAD(bool enable_dtx, bool enable_vad,
+ ACMVADMode vad_mode) {
+ ACMVADMode mode;
+ EXPECT_EQ(0, acm_send_->SetVAD(enable_dtx, enable_vad, vad_mode));
+ EXPECT_EQ(0, acm_send_->VAD(&dtx_enabled_, &vad_enabled_, &mode));
+
+ auto codec_param = acm_send_->SendCodec();
+ ASSERT_TRUE(codec_param);
+ if (STR_CASE_CMP(codec_param->plname, "opus") == 0) {
+ // If send codec is Opus, WebRTC VAD/DTX cannot be used.
+ enable_dtx = enable_vad = false;
+ }
+
+ EXPECT_EQ(dtx_enabled_ , enable_dtx); // DTX should be set as expected.
+
+ if (dtx_enabled_) {
+ EXPECT_TRUE(vad_enabled_); // WebRTC DTX cannot run without WebRTC VAD.
+ } else {
+ // Using no DTX should not affect setting of VAD.
+ EXPECT_EQ(enable_vad, vad_enabled_);
+ }
+}
+
+// Following is the implementation of TestOpusDtx.
+void TestOpusDtx::Perform() {
+#ifdef WEBRTC_CODEC_ISAC
+ // If we set other codec than Opus, DTX cannot be switched on.
+ RegisterCodec(kIsacWb);
+ EXPECT_EQ(-1, acm_send_->EnableOpusDtx());
+ EXPECT_EQ(0, acm_send_->DisableOpusDtx());
+#endif
+
+#ifdef WEBRTC_CODEC_OPUS
+ int expects[] = {0, 1, 0, 0, 0};
+
+ // Register Opus as send codec
+ std::string out_filename = webrtc::test::OutputPath() +
+ "testOpusDtx_outFile_mono.pcm";
+ RegisterCodec(kOpus);
+ EXPECT_EQ(0, acm_send_->DisableOpusDtx());
+
+ Run(webrtc::test::ResourcePath("audio_coding/testfile32kHz", "pcm"),
+ 32000, 1, out_filename, false, expects);
+
+ EXPECT_EQ(0, acm_send_->EnableOpusDtx());
+ expects[kEmptyFrame] = 1;
+ Run(webrtc::test::ResourcePath("audio_coding/testfile32kHz", "pcm"),
+ 32000, 1, out_filename, true, expects);
+
+ // Register stereo Opus as send codec
+ out_filename = webrtc::test::OutputPath() + "testOpusDtx_outFile_stereo.pcm";
+ RegisterCodec(kOpusStereo);
+ EXPECT_EQ(0, acm_send_->DisableOpusDtx());
+ expects[kEmptyFrame] = 0;
+ Run(webrtc::test::ResourcePath("audio_coding/teststereo32kHz", "pcm"),
+ 32000, 2, out_filename, false, expects);
+
+ EXPECT_EQ(0, acm_send_->EnableOpusDtx());
+
+ expects[kEmptyFrame] = 1;
+ Run(webrtc::test::ResourcePath("audio_coding/teststereo32kHz", "pcm"),
+ 32000, 2, out_filename, true, expects);
+#endif
+}
+
+} // namespace webrtc
diff --git a/webrtc/modules/audio_coding/test/TestVADDTX.h b/webrtc/modules/audio_coding/test/TestVADDTX.h
new file mode 100644
index 0000000000..1e7f0ef4d7
--- /dev/null
+++ b/webrtc/modules/audio_coding/test/TestVADDTX.h
@@ -0,0 +1,102 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef WEBRTC_MODULES_AUDIO_CODING_TEST_TESTVADDTX_H_
+#define WEBRTC_MODULES_AUDIO_CODING_TEST_TESTVADDTX_H_
+
+
+#include "webrtc/base/scoped_ptr.h"
+#include "webrtc/common_types.h"
+#include "webrtc/modules/audio_coding/include/audio_coding_module.h"
+#include "webrtc/modules/audio_coding/include/audio_coding_module_typedefs.h"
+#include "webrtc/modules/audio_coding/test/ACMTest.h"
+#include "webrtc/modules/audio_coding/test/Channel.h"
+
+namespace webrtc {
+
+class ActivityMonitor : public ACMVADCallback {
+ public:
+ ActivityMonitor();
+ int32_t InFrameType(FrameType frame_type);
+ void PrintStatistics();
+ void ResetStatistics();
+ void GetStatistics(uint32_t* stats);
+ private:
+ // 0 - kEmptyFrame
+ // 1 - kAudioFrameSpeech
+ // 2 - kAudioFrameCN
+ // 3 - kVideoFrameKey (not used by audio)
+ // 4 - kVideoFrameDelta (not used by audio)
+ uint32_t counter_[5];
+};
+
+
+// TestVadDtx is to verify that VAD/DTX perform as they should. It runs through
+// an audio file and check if the occurrence of various packet types follows
+// expectation. TestVadDtx needs its derived class to implement the Perform()
+// to put the test together.
+class TestVadDtx : public ACMTest {
+ public:
+ static const int kOutputFreqHz = 16000;
+
+ TestVadDtx();
+
+ virtual void Perform() = 0;
+
+ protected:
+ void RegisterCodec(CodecInst codec_param);
+
+ // Encoding a file and see if the numbers that various packets occur follow
+ // the expectation. Saves result to a file.
+ // expects[x] means
+ // -1 : do not care,
+ // 0 : there have been no packets of type |x|,
+ // 1 : there have been packets of type |x|,
+ // with |x| indicates the following packet types
+ // 0 - kEmptyFrame
+ // 1 - kAudioFrameSpeech
+ // 2 - kAudioFrameCN
+ // 3 - kVideoFrameKey (not used by audio)
+ // 4 - kVideoFrameDelta (not used by audio)
+ void Run(std::string in_filename, int frequency, int channels,
+ std::string out_filename, bool append, const int* expects);
+
+ rtc::scoped_ptr<AudioCodingModule> acm_send_;
+ rtc::scoped_ptr<AudioCodingModule> acm_receive_;
+ rtc::scoped_ptr<Channel> channel_;
+ rtc::scoped_ptr<ActivityMonitor> monitor_;
+};
+
+// TestWebRtcVadDtx is to verify that the WebRTC VAD/DTX perform as they should.
+class TestWebRtcVadDtx final : public TestVadDtx {
+ public:
+ TestWebRtcVadDtx();
+
+ void Perform() override;
+
+ private:
+ void RunTestCases();
+ void Test(bool new_outfile);
+ void SetVAD(bool enable_dtx, bool enable_vad, ACMVADMode vad_mode);
+
+ bool vad_enabled_;
+ bool dtx_enabled_;
+ int output_file_num_;
+};
+
+// TestOpusDtx is to verify that the Opus DTX performs as it should.
+class TestOpusDtx final : public TestVadDtx {
+ public:
+ void Perform() override;
+};
+
+} // namespace webrtc
+
+#endif // WEBRTC_MODULES_AUDIO_CODING_TEST_TESTVADDTX_H_
diff --git a/webrtc/modules/audio_coding/test/Tester.cc b/webrtc/modules/audio_coding/test/Tester.cc
new file mode 100644
index 0000000000..a27f0bc58b
--- /dev/null
+++ b/webrtc/modules/audio_coding/test/Tester.cc
@@ -0,0 +1,181 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <stdio.h>
+#include <string>
+#include <vector>
+
+#include "testing/gtest/include/gtest/gtest.h"
+#include "webrtc/modules/audio_coding/include/audio_coding_module.h"
+#include "webrtc/modules/audio_coding/test/APITest.h"
+#include "webrtc/modules/audio_coding/test/EncodeDecodeTest.h"
+#include "webrtc/modules/audio_coding/test/iSACTest.h"
+#include "webrtc/modules/audio_coding/test/opus_test.h"
+#include "webrtc/modules/audio_coding/test/PacketLossTest.h"
+#include "webrtc/modules/audio_coding/test/TestAllCodecs.h"
+#include "webrtc/modules/audio_coding/test/TestRedFec.h"
+#include "webrtc/modules/audio_coding/test/TestStereo.h"
+#include "webrtc/modules/audio_coding/test/TestVADDTX.h"
+#include "webrtc/modules/audio_coding/test/TwoWayCommunication.h"
+#include "webrtc/system_wrappers/include/trace.h"
+#include "webrtc/test/testsupport/fileutils.h"
+
+using webrtc::Trace;
+
+// This parameter is used to describe how to run the tests. It is normally
+// set to 0, and all tests are run in quite mode.
+#define ACM_TEST_MODE 0
+
+TEST(AudioCodingModuleTest, TestAllCodecs) {
+ Trace::CreateTrace();
+ Trace::SetTraceFile((webrtc::test::OutputPath() +
+ "acm_allcodecs_trace.txt").c_str());
+ webrtc::TestAllCodecs(ACM_TEST_MODE).Perform();
+ Trace::ReturnTrace();
+}
+
+#if defined(WEBRTC_ANDROID)
+TEST(AudioCodingModuleTest, DISABLED_TestEncodeDecode) {
+#else
+TEST(AudioCodingModuleTest, TestEncodeDecode) {
+#endif
+ Trace::CreateTrace();
+ Trace::SetTraceFile((webrtc::test::OutputPath() +
+ "acm_encodedecode_trace.txt").c_str());
+ webrtc::EncodeDecodeTest(ACM_TEST_MODE).Perform();
+ Trace::ReturnTrace();
+}
+
+#if defined(WEBRTC_CODEC_RED)
+#if defined(WEBRTC_ANDROID)
+TEST(AudioCodingModuleTest, DISABLED_TestRedFec) {
+#else
+TEST(AudioCodingModuleTest, TestRedFec) {
+#endif
+ Trace::CreateTrace();
+ Trace::SetTraceFile((webrtc::test::OutputPath() +
+ "acm_fec_trace.txt").c_str());
+ webrtc::TestRedFec().Perform();
+ Trace::ReturnTrace();
+}
+#endif
+
+#if defined(WEBRTC_CODEC_ISAC) || defined(WEBRTC_CODEC_ISACFX)
+#if defined(WEBRTC_ANDROID)
+TEST(AudioCodingModuleTest, DISABLED_TestIsac) {
+#else
+TEST(AudioCodingModuleTest, TestIsac) {
+#endif
+ Trace::CreateTrace();
+ Trace::SetTraceFile((webrtc::test::OutputPath() +
+ "acm_isac_trace.txt").c_str());
+ webrtc::ISACTest(ACM_TEST_MODE).Perform();
+ Trace::ReturnTrace();
+}
+#endif
+
+#if (defined(WEBRTC_CODEC_ISAC) || defined(WEBRTC_CODEC_ISACFX)) && \
+ defined(WEBRTC_CODEC_ILBC) && defined(WEBRTC_CODEC_G722)
+#if defined(WEBRTC_ANDROID)
+TEST(AudioCodingModuleTest, DISABLED_TwoWayCommunication) {
+#else
+TEST(AudioCodingModuleTest, TwoWayCommunication) {
+#endif
+ Trace::CreateTrace();
+ Trace::SetTraceFile((webrtc::test::OutputPath() +
+ "acm_twowaycom_trace.txt").c_str());
+ webrtc::TwoWayCommunication(ACM_TEST_MODE).Perform();
+ Trace::ReturnTrace();
+}
+#endif
+
+#if defined(WEBRTC_ANDROID)
+TEST(AudioCodingModuleTest, DISABLED_TestStereo) {
+#else
+TEST(AudioCodingModuleTest, TestStereo) {
+#endif
+ Trace::CreateTrace();
+ Trace::SetTraceFile((webrtc::test::OutputPath() +
+ "acm_stereo_trace.txt").c_str());
+ webrtc::TestStereo(ACM_TEST_MODE).Perform();
+ Trace::ReturnTrace();
+}
+
+#if defined(WEBRTC_ANDROID)
+TEST(AudioCodingModuleTest, DISABLED_TestWebRtcVadDtx) {
+#else
+TEST(AudioCodingModuleTest, TestWebRtcVadDtx) {
+#endif
+ Trace::CreateTrace();
+ Trace::SetTraceFile((webrtc::test::OutputPath() +
+ "acm_vaddtx_trace.txt").c_str());
+ webrtc::TestWebRtcVadDtx().Perform();
+ Trace::ReturnTrace();
+}
+
+TEST(AudioCodingModuleTest, TestOpusDtx) {
+ Trace::CreateTrace();
+ Trace::SetTraceFile((webrtc::test::OutputPath() +
+ "acm_opusdtx_trace.txt").c_str());
+ webrtc::TestOpusDtx().Perform();
+ Trace::ReturnTrace();
+}
+
+TEST(AudioCodingModuleTest, TestOpus) {
+ Trace::CreateTrace();
+ Trace::SetTraceFile((webrtc::test::OutputPath() +
+ "acm_opus_trace.txt").c_str());
+ webrtc::OpusTest().Perform();
+ Trace::ReturnTrace();
+}
+
+TEST(AudioCodingModuleTest, TestPacketLoss) {
+ Trace::CreateTrace();
+ Trace::SetTraceFile((webrtc::test::OutputPath() +
+ "acm_packetloss_trace.txt").c_str());
+ webrtc::PacketLossTest(1, 10, 10, 1).Perform();
+ Trace::ReturnTrace();
+}
+
+TEST(AudioCodingModuleTest, TestPacketLossBurst) {
+ Trace::CreateTrace();
+ Trace::SetTraceFile((webrtc::test::OutputPath() +
+ "acm_packetloss_burst_trace.txt").c_str());
+ webrtc::PacketLossTest(1, 10, 10, 2).Perform();
+ Trace::ReturnTrace();
+}
+
+TEST(AudioCodingModuleTest, TestPacketLossStereo) {
+ Trace::CreateTrace();
+ Trace::SetTraceFile((webrtc::test::OutputPath() +
+ "acm_packetloss_trace.txt").c_str());
+ webrtc::PacketLossTest(2, 10, 10, 1).Perform();
+ Trace::ReturnTrace();
+}
+
+TEST(AudioCodingModuleTest, TestPacketLossStereoBurst) {
+ Trace::CreateTrace();
+ Trace::SetTraceFile((webrtc::test::OutputPath() +
+ "acm_packetloss_burst_trace.txt").c_str());
+ webrtc::PacketLossTest(2, 10, 10, 2).Perform();
+ Trace::ReturnTrace();
+}
+
+// The full API test is too long to run automatically on bots, but can be used
+// for offline testing. User interaction is needed.
+#ifdef ACM_TEST_FULL_API
+ TEST(AudioCodingModuleTest, TestAPI) {
+ Trace::CreateTrace();
+ Trace::SetTraceFile((webrtc::test::OutputPath() +
+ "acm_apitest_trace.txt").c_str());
+ webrtc::APITest().Perform();
+ Trace::ReturnTrace();
+ }
+#endif
diff --git a/webrtc/modules/audio_coding/test/TimedTrace.cc b/webrtc/modules/audio_coding/test/TimedTrace.cc
new file mode 100644
index 0000000000..ff9b5eeb76
--- /dev/null
+++ b/webrtc/modules/audio_coding/test/TimedTrace.cc
@@ -0,0 +1,58 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "TimedTrace.h"
+#include <math.h>
+
+double TimedTrace::_timeEllapsedSec = 0;
+FILE* TimedTrace::_timedTraceFile = NULL;
+
+TimedTrace::TimedTrace() {
+
+}
+
+TimedTrace::~TimedTrace() {
+ if (_timedTraceFile != NULL) {
+ fclose(_timedTraceFile);
+ }
+ _timedTraceFile = NULL;
+}
+
+int16_t TimedTrace::SetUp(char* fileName) {
+ if (_timedTraceFile == NULL) {
+ _timedTraceFile = fopen(fileName, "w");
+ }
+ if (_timedTraceFile == NULL) {
+ return -1;
+ }
+ return 0;
+}
+
+void TimedTrace::SetTimeEllapsed(double timeEllapsedSec) {
+ _timeEllapsedSec = timeEllapsedSec;
+}
+
+double TimedTrace::TimeEllapsed() {
+ return _timeEllapsedSec;
+}
+
+void TimedTrace::Tick10Msec() {
+ _timeEllapsedSec += 0.010;
+}
+
+void TimedTrace::TimedLogg(char* message) {
+ unsigned int minutes = (uint32_t) floor(_timeEllapsedSec / 60.0);
+ double seconds = _timeEllapsedSec - minutes * 60;
+ //char myFormat[100] = "%8.2f, %3u:%05.2f: %s\n";
+ if (_timedTraceFile != NULL) {
+ fprintf(_timedTraceFile, "%8.2f, %3u:%05.2f: %s\n", _timeEllapsedSec,
+ minutes, seconds, message);
+ }
+}
diff --git a/webrtc/modules/audio_coding/test/TimedTrace.h b/webrtc/modules/audio_coding/test/TimedTrace.h
new file mode 100644
index 0000000000..0793eb0c0c
--- /dev/null
+++ b/webrtc/modules/audio_coding/test/TimedTrace.h
@@ -0,0 +1,36 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef WEBRTC_MODULES_AUDIO_CODING_TEST_TIMEDTRACE_H_
+#define WEBRTC_MODULES_AUDIO_CODING_TEST_TIMEDTRACE_H_
+
+#include "webrtc/typedefs.h"
+
+#include <stdio.h>
+#include <stdlib.h>
+
+class TimedTrace {
+ public:
+ TimedTrace();
+ ~TimedTrace();
+
+ void SetTimeEllapsed(double myTime);
+ double TimeEllapsed();
+ void Tick10Msec();
+ int16_t SetUp(char* fileName);
+ void TimedLogg(char* message);
+
+ private:
+ static double _timeEllapsedSec;
+ static FILE* _timedTraceFile;
+
+};
+
+#endif // WEBRTC_MODULES_AUDIO_CODING_TEST_TIMEDTRACE_H_
diff --git a/webrtc/modules/audio_coding/test/TwoWayCommunication.cc b/webrtc/modules/audio_coding/test/TwoWayCommunication.cc
new file mode 100644
index 0000000000..56e136bd34
--- /dev/null
+++ b/webrtc/modules/audio_coding/test/TwoWayCommunication.cc
@@ -0,0 +1,299 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "TwoWayCommunication.h"
+
+#include <ctype.h>
+#include <stdio.h>
+#include <string.h>
+
+#ifdef WIN32
+#include <Windows.h>
+#endif
+
+#include "testing/gtest/include/gtest/gtest.h"
+#include "webrtc/engine_configurations.h"
+#include "webrtc/common_types.h"
+#include "webrtc/modules/audio_coding/test/PCMFile.h"
+#include "webrtc/modules/audio_coding/test/utility.h"
+#include "webrtc/system_wrappers/include/trace.h"
+#include "webrtc/test/testsupport/fileutils.h"
+
+namespace webrtc {
+
+#define MAX_FILE_NAME_LENGTH_BYTE 500
+
+TwoWayCommunication::TwoWayCommunication(int testMode)
+ : _acmA(AudioCodingModule::Create(1)),
+ _acmRefA(AudioCodingModule::Create(3)),
+ _testMode(testMode) {
+ AudioCodingModule::Config config;
+ // The clicks will be more obvious in FAX mode. TODO(henrik.lundin) Really?
+ config.neteq_config.playout_mode = kPlayoutFax;
+ config.id = 2;
+ _acmB.reset(AudioCodingModule::Create(config));
+ config.id = 4;
+ _acmRefB.reset(AudioCodingModule::Create(config));
+}
+
+TwoWayCommunication::~TwoWayCommunication() {
+ delete _channel_A2B;
+ delete _channel_B2A;
+ delete _channelRef_A2B;
+ delete _channelRef_B2A;
+#ifdef WEBRTC_DTMF_DETECTION
+ if (_dtmfDetectorA != NULL) {
+ delete _dtmfDetectorA;
+ }
+ if (_dtmfDetectorB != NULL) {
+ delete _dtmfDetectorB;
+ }
+#endif
+ _inFileA.Close();
+ _inFileB.Close();
+ _outFileA.Close();
+ _outFileB.Close();
+ _outFileRefA.Close();
+ _outFileRefB.Close();
+}
+
+void TwoWayCommunication::ChooseCodec(uint8_t* codecID_A,
+ uint8_t* codecID_B) {
+ rtc::scoped_ptr<AudioCodingModule> tmpACM(AudioCodingModule::Create(0));
+ uint8_t noCodec = tmpACM->NumberOfCodecs();
+ CodecInst codecInst;
+ printf("List of Supported Codecs\n");
+ printf("========================\n");
+ for (uint8_t codecCntr = 0; codecCntr < noCodec; codecCntr++) {
+ EXPECT_EQ(tmpACM->Codec(codecCntr, &codecInst), 0);
+ printf("%d- %s\n", codecCntr, codecInst.plname);
+ }
+ printf("\nChoose a send codec for side A [0]: ");
+ char myStr[15] = "";
+ EXPECT_TRUE(fgets(myStr, 10, stdin) != NULL);
+ *codecID_A = (uint8_t) atoi(myStr);
+
+ printf("\nChoose a send codec for side B [0]: ");
+ EXPECT_TRUE(fgets(myStr, 10, stdin) != NULL);
+ *codecID_B = (uint8_t) atoi(myStr);
+
+ printf("\n");
+}
+
+void TwoWayCommunication::SetUp() {
+ uint8_t codecID_A;
+ uint8_t codecID_B;
+
+ ChooseCodec(&codecID_A, &codecID_B);
+ CodecInst codecInst_A;
+ CodecInst codecInst_B;
+ CodecInst dummyCodec;
+ EXPECT_EQ(0, _acmA->Codec(codecID_A, &codecInst_A));
+ EXPECT_EQ(0, _acmB->Codec(codecID_B, &codecInst_B));
+ EXPECT_EQ(0, _acmA->Codec(6, &dummyCodec));
+
+ //--- Set A codecs
+ EXPECT_EQ(0, _acmA->RegisterSendCodec(codecInst_A));
+ EXPECT_EQ(0, _acmA->RegisterReceiveCodec(codecInst_B));
+ //--- Set ref-A codecs
+ EXPECT_EQ(0, _acmRefA->RegisterSendCodec(codecInst_A));
+ EXPECT_EQ(0, _acmRefA->RegisterReceiveCodec(codecInst_B));
+
+ //--- Set B codecs
+ EXPECT_EQ(0, _acmB->RegisterSendCodec(codecInst_B));
+ EXPECT_EQ(0, _acmB->RegisterReceiveCodec(codecInst_A));
+
+ //--- Set ref-B codecs
+ EXPECT_EQ(0, _acmRefB->RegisterSendCodec(codecInst_B));
+ EXPECT_EQ(0, _acmRefB->RegisterReceiveCodec(codecInst_A));
+
+ uint16_t frequencyHz;
+
+ //--- Input A
+ std::string in_file_name = webrtc::test::ResourcePath(
+ "audio_coding/testfile32kHz", "pcm");
+ frequencyHz = 32000;
+ printf("Enter input file at side A [%s]: ", in_file_name.c_str());
+ PCMFile::ChooseFile(&in_file_name, 499, &frequencyHz);
+ _inFileA.Open(in_file_name, frequencyHz, "rb");
+
+ //--- Output A
+ std::string out_file_a = webrtc::test::OutputPath() + "outA.pcm";
+ printf("Output file at side A: %s\n", out_file_a.c_str());
+ printf("Sampling frequency (in Hz) of the above file: %u\n", frequencyHz);
+ _outFileA.Open(out_file_a, frequencyHz, "wb");
+ std::string ref_file_name = webrtc::test::OutputPath() + "ref_outA.pcm";
+ _outFileRefA.Open(ref_file_name, frequencyHz, "wb");
+
+ //--- Input B
+ in_file_name = webrtc::test::ResourcePath("audio_coding/testfile32kHz",
+ "pcm");
+ frequencyHz = 32000;
+ printf("\n\nEnter input file at side B [%s]: ", in_file_name.c_str());
+ PCMFile::ChooseFile(&in_file_name, 499, &frequencyHz);
+ _inFileB.Open(in_file_name, frequencyHz, "rb");
+
+ //--- Output B
+ std::string out_file_b = webrtc::test::OutputPath() + "outB.pcm";
+ printf("Output file at side B: %s\n", out_file_b.c_str());
+ printf("Sampling frequency (in Hz) of the above file: %u\n", frequencyHz);
+ _outFileB.Open(out_file_b, frequencyHz, "wb");
+ ref_file_name = webrtc::test::OutputPath() + "ref_outB.pcm";
+ _outFileRefB.Open(ref_file_name, frequencyHz, "wb");
+
+ //--- Set A-to-B channel
+ _channel_A2B = new Channel;
+ _acmA->RegisterTransportCallback(_channel_A2B);
+ _channel_A2B->RegisterReceiverACM(_acmB.get());
+ //--- Do the same for the reference
+ _channelRef_A2B = new Channel;
+ _acmRefA->RegisterTransportCallback(_channelRef_A2B);
+ _channelRef_A2B->RegisterReceiverACM(_acmRefB.get());
+
+ //--- Set B-to-A channel
+ _channel_B2A = new Channel;
+ _acmB->RegisterTransportCallback(_channel_B2A);
+ _channel_B2A->RegisterReceiverACM(_acmA.get());
+ //--- Do the same for reference
+ _channelRef_B2A = new Channel;
+ _acmRefB->RegisterTransportCallback(_channelRef_B2A);
+ _channelRef_B2A->RegisterReceiverACM(_acmRefA.get());
+}
+
+void TwoWayCommunication::SetUpAutotest() {
+ CodecInst codecInst_A;
+ CodecInst codecInst_B;
+ CodecInst dummyCodec;
+
+ EXPECT_EQ(0, _acmA->Codec("ISAC", &codecInst_A, 16000, 1));
+ EXPECT_EQ(0, _acmB->Codec("L16", &codecInst_B, 8000, 1));
+ EXPECT_EQ(0, _acmA->Codec(6, &dummyCodec));
+
+ //--- Set A codecs
+ EXPECT_EQ(0, _acmA->RegisterSendCodec(codecInst_A));
+ EXPECT_EQ(0, _acmA->RegisterReceiveCodec(codecInst_B));
+
+ //--- Set ref-A codecs
+ EXPECT_GT(_acmRefA->RegisterSendCodec(codecInst_A), -1);
+ EXPECT_GT(_acmRefA->RegisterReceiveCodec(codecInst_B), -1);
+
+ //--- Set B codecs
+ EXPECT_GT(_acmB->RegisterSendCodec(codecInst_B), -1);
+ EXPECT_GT(_acmB->RegisterReceiveCodec(codecInst_A), -1);
+
+ //--- Set ref-B codecs
+ EXPECT_EQ(0, _acmRefB->RegisterSendCodec(codecInst_B));
+ EXPECT_EQ(0, _acmRefB->RegisterReceiveCodec(codecInst_A));
+
+ uint16_t frequencyHz;
+
+ //--- Input A and B
+ std::string in_file_name = webrtc::test::ResourcePath(
+ "audio_coding/testfile32kHz", "pcm");
+ frequencyHz = 16000;
+ _inFileA.Open(in_file_name, frequencyHz, "rb");
+ _inFileB.Open(in_file_name, frequencyHz, "rb");
+
+ //--- Output A
+ std::string output_file_a = webrtc::test::OutputPath() + "outAutotestA.pcm";
+ frequencyHz = 16000;
+ _outFileA.Open(output_file_a, frequencyHz, "wb");
+ std::string output_ref_file_a = webrtc::test::OutputPath()
+ + "ref_outAutotestA.pcm";
+ _outFileRefA.Open(output_ref_file_a, frequencyHz, "wb");
+
+ //--- Output B
+ std::string output_file_b = webrtc::test::OutputPath() + "outAutotestB.pcm";
+ frequencyHz = 16000;
+ _outFileB.Open(output_file_b, frequencyHz, "wb");
+ std::string output_ref_file_b = webrtc::test::OutputPath()
+ + "ref_outAutotestB.pcm";
+ _outFileRefB.Open(output_ref_file_b, frequencyHz, "wb");
+
+ //--- Set A-to-B channel
+ _channel_A2B = new Channel;
+ _acmA->RegisterTransportCallback(_channel_A2B);
+ _channel_A2B->RegisterReceiverACM(_acmB.get());
+ //--- Do the same for the reference
+ _channelRef_A2B = new Channel;
+ _acmRefA->RegisterTransportCallback(_channelRef_A2B);
+ _channelRef_A2B->RegisterReceiverACM(_acmRefB.get());
+
+ //--- Set B-to-A channel
+ _channel_B2A = new Channel;
+ _acmB->RegisterTransportCallback(_channel_B2A);
+ _channel_B2A->RegisterReceiverACM(_acmA.get());
+ //--- Do the same for reference
+ _channelRef_B2A = new Channel;
+ _acmRefB->RegisterTransportCallback(_channelRef_B2A);
+ _channelRef_B2A->RegisterReceiverACM(_acmRefA.get());
+}
+
+void TwoWayCommunication::Perform() {
+ if (_testMode == 0) {
+ SetUpAutotest();
+ } else {
+ SetUp();
+ }
+ unsigned int msecPassed = 0;
+ unsigned int secPassed = 0;
+
+ int32_t outFreqHzA = _outFileA.SamplingFrequency();
+ int32_t outFreqHzB = _outFileB.SamplingFrequency();
+
+ AudioFrame audioFrame;
+
+ auto codecInst_B = _acmB->SendCodec();
+ ASSERT_TRUE(codecInst_B);
+
+ // In the following loop we tests that the code can handle misuse of the APIs.
+ // In the middle of a session with data flowing between two sides, called A
+ // and B, APIs will be called, and the code should continue to run, and be
+ // able to recover.
+ while (!_inFileA.EndOfFile() && !_inFileB.EndOfFile()) {
+ msecPassed += 10;
+ EXPECT_GT(_inFileA.Read10MsData(audioFrame), 0);
+ EXPECT_GE(_acmA->Add10MsData(audioFrame), 0);
+ EXPECT_GE(_acmRefA->Add10MsData(audioFrame), 0);
+
+ EXPECT_GT(_inFileB.Read10MsData(audioFrame), 0);
+
+ EXPECT_GE(_acmB->Add10MsData(audioFrame), 0);
+ EXPECT_GE(_acmRefB->Add10MsData(audioFrame), 0);
+ EXPECT_EQ(0, _acmA->PlayoutData10Ms(outFreqHzA, &audioFrame));
+ _outFileA.Write10MsData(audioFrame);
+ EXPECT_EQ(0, _acmRefA->PlayoutData10Ms(outFreqHzA, &audioFrame));
+ _outFileRefA.Write10MsData(audioFrame);
+ EXPECT_EQ(0, _acmB->PlayoutData10Ms(outFreqHzB, &audioFrame));
+ _outFileB.Write10MsData(audioFrame);
+ EXPECT_EQ(0, _acmRefB->PlayoutData10Ms(outFreqHzB, &audioFrame));
+ _outFileRefB.Write10MsData(audioFrame);
+
+ // Update time counters each time a second of data has passed.
+ if (msecPassed >= 1000) {
+ msecPassed = 0;
+ secPassed++;
+ }
+ // Re-register send codec on side B.
+ if (((secPassed % 5) == 4) && (msecPassed >= 990)) {
+ EXPECT_EQ(0, _acmB->RegisterSendCodec(*codecInst_B));
+ EXPECT_TRUE(_acmB->SendCodec());
+ }
+ // Initialize receiver on side A.
+ if (((secPassed % 7) == 6) && (msecPassed == 0))
+ EXPECT_EQ(0, _acmA->InitializeReceiver());
+ // Re-register codec on side A.
+ if (((secPassed % 7) == 6) && (msecPassed >= 990)) {
+ EXPECT_EQ(0, _acmA->RegisterReceiveCodec(*codecInst_B));
+ }
+ }
+}
+
+} // namespace webrtc
diff --git a/webrtc/modules/audio_coding/test/TwoWayCommunication.h b/webrtc/modules/audio_coding/test/TwoWayCommunication.h
new file mode 100644
index 0000000000..77639935da
--- /dev/null
+++ b/webrtc/modules/audio_coding/test/TwoWayCommunication.h
@@ -0,0 +1,60 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef WEBRTC_MODULES_AUDIO_CODING_TEST_TWOWAYCOMMUNICATION_H_
+#define WEBRTC_MODULES_AUDIO_CODING_TEST_TWOWAYCOMMUNICATION_H_
+
+#include "webrtc/base/scoped_ptr.h"
+#include "webrtc/modules/audio_coding/include/audio_coding_module.h"
+#include "webrtc/modules/audio_coding/test/ACMTest.h"
+#include "webrtc/modules/audio_coding/test/Channel.h"
+#include "webrtc/modules/audio_coding/test/PCMFile.h"
+#include "webrtc/modules/audio_coding/test/utility.h"
+
+namespace webrtc {
+
+class TwoWayCommunication : public ACMTest {
+ public:
+ explicit TwoWayCommunication(int testMode);
+ ~TwoWayCommunication();
+
+ void Perform();
+ private:
+ void ChooseCodec(uint8_t* codecID_A, uint8_t* codecID_B);
+ void SetUp();
+ void SetUpAutotest();
+
+ rtc::scoped_ptr<AudioCodingModule> _acmA;
+ rtc::scoped_ptr<AudioCodingModule> _acmB;
+
+ rtc::scoped_ptr<AudioCodingModule> _acmRefA;
+ rtc::scoped_ptr<AudioCodingModule> _acmRefB;
+
+ Channel* _channel_A2B;
+ Channel* _channel_B2A;
+
+ Channel* _channelRef_A2B;
+ Channel* _channelRef_B2A;
+
+ PCMFile _inFileA;
+ PCMFile _inFileB;
+
+ PCMFile _outFileA;
+ PCMFile _outFileB;
+
+ PCMFile _outFileRefA;
+ PCMFile _outFileRefB;
+
+ int _testMode;
+};
+
+} // namespace webrtc
+
+#endif // WEBRTC_MODULES_AUDIO_CODING_TEST_TWOWAYCOMMUNICATION_H_
diff --git a/webrtc/modules/audio_coding/test/delay_test.cc b/webrtc/modules/audio_coding/test/delay_test.cc
new file mode 100644
index 0000000000..a8c137f501
--- /dev/null
+++ b/webrtc/modules/audio_coding/test/delay_test.cc
@@ -0,0 +1,265 @@
+/*
+ * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <assert.h>
+#include <math.h>
+
+#include <iostream>
+
+#include "gflags/gflags.h"
+#include "testing/gtest/include/gtest/gtest.h"
+#include "webrtc/base/scoped_ptr.h"
+#include "webrtc/common.h"
+#include "webrtc/common_types.h"
+#include "webrtc/engine_configurations.h"
+#include "webrtc/modules/audio_coding/include/audio_coding_module.h"
+#include "webrtc/modules/audio_coding/include/audio_coding_module_typedefs.h"
+#include "webrtc/modules/audio_coding/acm2/acm_common_defs.h"
+#include "webrtc/modules/audio_coding/test/Channel.h"
+#include "webrtc/modules/audio_coding/test/PCMFile.h"
+#include "webrtc/modules/audio_coding/test/utility.h"
+#include "webrtc/system_wrappers/include/event_wrapper.h"
+#include "webrtc/test/testsupport/fileutils.h"
+
+DEFINE_string(codec, "isac", "Codec Name");
+DEFINE_int32(sample_rate_hz, 16000, "Sampling rate in Hertz.");
+DEFINE_int32(num_channels, 1, "Number of Channels.");
+DEFINE_string(input_file, "", "Input file, PCM16 32 kHz, optional.");
+DEFINE_int32(delay, 0, "Delay in millisecond.");
+DEFINE_bool(dtx, false, "Enable DTX at the sender side.");
+DEFINE_bool(packet_loss, false, "Apply packet loss, c.f. Channel{.cc, .h}.");
+DEFINE_bool(fec, false, "Use Forward Error Correction (FEC).");
+
+namespace webrtc {
+
+namespace {
+
+struct CodecSettings {
+ char name[50];
+ int sample_rate_hz;
+ int num_channels;
+};
+
+struct AcmSettings {
+ bool dtx;
+ bool fec;
+};
+
+struct TestSettings {
+ CodecSettings codec;
+ AcmSettings acm;
+ bool packet_loss;
+};
+
+} // namespace
+
+class DelayTest {
+ public:
+ DelayTest()
+ : acm_a_(AudioCodingModule::Create(0)),
+ acm_b_(AudioCodingModule::Create(1)),
+ channel_a2b_(new Channel),
+ test_cntr_(0),
+ encoding_sample_rate_hz_(8000) {}
+
+ ~DelayTest() {
+ if (channel_a2b_ != NULL) {
+ delete channel_a2b_;
+ channel_a2b_ = NULL;
+ }
+ in_file_a_.Close();
+ }
+
+ void Initialize() {
+ test_cntr_ = 0;
+ std::string file_name = webrtc::test::ResourcePath(
+ "audio_coding/testfile32kHz", "pcm");
+ if (FLAGS_input_file.size() > 0)
+ file_name = FLAGS_input_file;
+ in_file_a_.Open(file_name, 32000, "rb");
+ ASSERT_EQ(0, acm_a_->InitializeReceiver()) <<
+ "Couldn't initialize receiver.\n";
+ ASSERT_EQ(0, acm_b_->InitializeReceiver()) <<
+ "Couldn't initialize receiver.\n";
+
+ if (FLAGS_delay > 0) {
+ ASSERT_EQ(0, acm_b_->SetMinimumPlayoutDelay(FLAGS_delay)) <<
+ "Failed to set minimum delay.\n";
+ }
+
+ int num_encoders = acm_a_->NumberOfCodecs();
+ CodecInst my_codec_param;
+ for (int n = 0; n < num_encoders; n++) {
+ EXPECT_EQ(0, acm_b_->Codec(n, &my_codec_param)) <<
+ "Failed to get codec.";
+ if (STR_CASE_CMP(my_codec_param.plname, "opus") == 0)
+ my_codec_param.channels = 1;
+ else if (my_codec_param.channels > 1)
+ continue;
+ if (STR_CASE_CMP(my_codec_param.plname, "CN") == 0 &&
+ my_codec_param.plfreq == 48000)
+ continue;
+ if (STR_CASE_CMP(my_codec_param.plname, "telephone-event") == 0)
+ continue;
+ ASSERT_EQ(0, acm_b_->RegisterReceiveCodec(my_codec_param)) <<
+ "Couldn't register receive codec.\n";
+ }
+
+ // Create and connect the channel
+ ASSERT_EQ(0, acm_a_->RegisterTransportCallback(channel_a2b_)) <<
+ "Couldn't register Transport callback.\n";
+ channel_a2b_->RegisterReceiverACM(acm_b_.get());
+ }
+
+ void Perform(const TestSettings* config, size_t num_tests, int duration_sec,
+ const char* output_prefix) {
+ for (size_t n = 0; n < num_tests; ++n) {
+ ApplyConfig(config[n]);
+ Run(duration_sec, output_prefix);
+ }
+ }
+
+ private:
+ void ApplyConfig(const TestSettings& config) {
+ printf("====================================\n");
+ printf("Test %d \n"
+ "Codec: %s, %d kHz, %d channel(s)\n"
+ "ACM: DTX %s, FEC %s\n"
+ "Channel: %s\n",
+ ++test_cntr_, config.codec.name, config.codec.sample_rate_hz,
+ config.codec.num_channels, config.acm.dtx ? "on" : "off",
+ config.acm.fec ? "on" : "off",
+ config.packet_loss ? "with packet-loss" : "no packet-loss");
+ SendCodec(config.codec);
+ ConfigAcm(config.acm);
+ ConfigChannel(config.packet_loss);
+ }
+
+ void SendCodec(const CodecSettings& config) {
+ CodecInst my_codec_param;
+ ASSERT_EQ(0, AudioCodingModule::Codec(
+ config.name, &my_codec_param, config.sample_rate_hz,
+ config.num_channels)) << "Specified codec is not supported.\n";
+
+ encoding_sample_rate_hz_ = my_codec_param.plfreq;
+ ASSERT_EQ(0, acm_a_->RegisterSendCodec(my_codec_param)) <<
+ "Failed to register send-codec.\n";
+ }
+
+ void ConfigAcm(const AcmSettings& config) {
+ ASSERT_EQ(0, acm_a_->SetVAD(config.dtx, config.dtx, VADAggr)) <<
+ "Failed to set VAD.\n";
+ ASSERT_EQ(0, acm_a_->SetREDStatus(config.fec)) <<
+ "Failed to set RED.\n";
+ }
+
+ void ConfigChannel(bool packet_loss) {
+ channel_a2b_->SetFECTestWithPacketLoss(packet_loss);
+ }
+
+ void OpenOutFile(const char* output_id) {
+ std::stringstream file_stream;
+ file_stream << "delay_test_" << FLAGS_codec << "_" << FLAGS_sample_rate_hz
+ << "Hz" << "_" << FLAGS_delay << "ms.pcm";
+ std::cout << "Output file: " << file_stream.str() << std::endl << std::endl;
+ std::string file_name = webrtc::test::OutputPath() + file_stream.str();
+ out_file_b_.Open(file_name.c_str(), 32000, "wb");
+ }
+
+ void Run(int duration_sec, const char* output_prefix) {
+ OpenOutFile(output_prefix);
+ AudioFrame audio_frame;
+ uint32_t out_freq_hz_b = out_file_b_.SamplingFrequency();
+
+ int num_frames = 0;
+ int in_file_frames = 0;
+ uint32_t playout_ts;
+ uint32_t received_ts;
+ double average_delay = 0;
+ double inst_delay_sec = 0;
+ while (num_frames < (duration_sec * 100)) {
+ if (in_file_a_.EndOfFile()) {
+ in_file_a_.Rewind();
+ }
+
+ // Print delay information every 16 frame
+ if ((num_frames & 0x3F) == 0x3F) {
+ NetworkStatistics statistics;
+ acm_b_->GetNetworkStatistics(&statistics);
+ fprintf(stdout, "delay: min=%3d max=%3d mean=%3d median=%3d"
+ " ts-based average = %6.3f, "
+ "curr buff-lev = %4u opt buff-lev = %4u \n",
+ statistics.minWaitingTimeMs, statistics.maxWaitingTimeMs,
+ statistics.meanWaitingTimeMs, statistics.medianWaitingTimeMs,
+ average_delay, statistics.currentBufferSize,
+ statistics.preferredBufferSize);
+ fflush (stdout);
+ }
+
+ in_file_a_.Read10MsData(audio_frame);
+ ASSERT_GE(acm_a_->Add10MsData(audio_frame), 0);
+ ASSERT_EQ(0, acm_b_->PlayoutData10Ms(out_freq_hz_b, &audio_frame));
+ out_file_b_.Write10MsData(
+ audio_frame.data_,
+ audio_frame.samples_per_channel_ * audio_frame.num_channels_);
+ acm_b_->PlayoutTimestamp(&playout_ts);
+ received_ts = channel_a2b_->LastInTimestamp();
+ inst_delay_sec = static_cast<uint32_t>(received_ts - playout_ts)
+ / static_cast<double>(encoding_sample_rate_hz_);
+
+ if (num_frames > 10)
+ average_delay = 0.95 * average_delay + 0.05 * inst_delay_sec;
+
+ ++num_frames;
+ ++in_file_frames;
+ }
+ out_file_b_.Close();
+ }
+
+ rtc::scoped_ptr<AudioCodingModule> acm_a_;
+ rtc::scoped_ptr<AudioCodingModule> acm_b_;
+
+ Channel* channel_a2b_;
+
+ PCMFile in_file_a_;
+ PCMFile out_file_b_;
+ int test_cntr_;
+ int encoding_sample_rate_hz_;
+};
+
+} // namespace webrtc
+
+int main(int argc, char* argv[]) {
+ google::ParseCommandLineFlags(&argc, &argv, true);
+ webrtc::TestSettings test_setting;
+ strcpy(test_setting.codec.name, FLAGS_codec.c_str());
+
+ if (FLAGS_sample_rate_hz != 8000 &&
+ FLAGS_sample_rate_hz != 16000 &&
+ FLAGS_sample_rate_hz != 32000 &&
+ FLAGS_sample_rate_hz != 48000) {
+ std::cout << "Invalid sampling rate.\n";
+ return 1;
+ }
+ test_setting.codec.sample_rate_hz = FLAGS_sample_rate_hz;
+ if (FLAGS_num_channels < 1 || FLAGS_num_channels > 2) {
+ std::cout << "Only mono and stereo are supported.\n";
+ return 1;
+ }
+ test_setting.codec.num_channels = FLAGS_num_channels;
+ test_setting.acm.dtx = FLAGS_dtx;
+ test_setting.acm.fec = FLAGS_fec;
+ test_setting.packet_loss = FLAGS_packet_loss;
+
+ webrtc::DelayTest delay_test;
+ delay_test.Initialize();
+ delay_test.Perform(&test_setting, 1, 240, "delay_test");
+ return 0;
+}
diff --git a/webrtc/modules/audio_coding/test/iSACTest.cc b/webrtc/modules/audio_coding/test/iSACTest.cc
new file mode 100644
index 0000000000..9f223fb81f
--- /dev/null
+++ b/webrtc/modules/audio_coding/test/iSACTest.cc
@@ -0,0 +1,343 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "webrtc/modules/audio_coding/test/iSACTest.h"
+
+#include <ctype.h>
+#include <stdio.h>
+#include <string.h>
+
+#if _WIN32
+#include <windows.h>
+#elif WEBRTC_LINUX
+#include <time.h>
+#else
+#include <sys/time.h>
+#include <time.h>
+#endif
+
+#include "webrtc/modules/audio_coding/acm2/acm_common_defs.h"
+#include "webrtc/modules/audio_coding/test/utility.h"
+#include "webrtc/system_wrappers/include/event_wrapper.h"
+#include "webrtc/system_wrappers/include/tick_util.h"
+#include "webrtc/system_wrappers/include/trace.h"
+#include "webrtc/test/testsupport/fileutils.h"
+
+namespace webrtc {
+
+void SetISACConfigDefault(ACMTestISACConfig& isacConfig) {
+ isacConfig.currentRateBitPerSec = 0;
+ isacConfig.currentFrameSizeMsec = 0;
+ isacConfig.encodingMode = -1;
+ isacConfig.initRateBitPerSec = 0;
+ isacConfig.initFrameSizeInMsec = 0;
+ isacConfig.enforceFrameSize = false;
+ return;
+}
+
+int16_t SetISAConfig(ACMTestISACConfig& isacConfig, AudioCodingModule* acm,
+ int testMode) {
+
+ if ((isacConfig.currentRateBitPerSec != 0)
+ || (isacConfig.currentFrameSizeMsec != 0)) {
+ auto sendCodec = acm->SendCodec();
+ EXPECT_TRUE(sendCodec);
+ if (isacConfig.currentRateBitPerSec < 0) {
+ // Register iSAC in adaptive (channel-dependent) mode.
+ sendCodec->rate = -1;
+ EXPECT_EQ(0, acm->RegisterSendCodec(*sendCodec));
+ } else {
+ if (isacConfig.currentRateBitPerSec != 0) {
+ sendCodec->rate = isacConfig.currentRateBitPerSec;
+ }
+ if (isacConfig.currentFrameSizeMsec != 0) {
+ sendCodec->pacsize = isacConfig.currentFrameSizeMsec
+ * (sendCodec->plfreq / 1000);
+ }
+ EXPECT_EQ(0, acm->RegisterSendCodec(*sendCodec));
+ }
+ }
+
+ return 0;
+}
+
+ISACTest::ISACTest(int testMode)
+ : _acmA(AudioCodingModule::Create(1)),
+ _acmB(AudioCodingModule::Create(2)),
+ _testMode(testMode) {}
+
+ISACTest::~ISACTest() {}
+
+void ISACTest::Setup() {
+ int codecCntr;
+ CodecInst codecParam;
+
+ for (codecCntr = 0; codecCntr < AudioCodingModule::NumberOfCodecs();
+ codecCntr++) {
+ EXPECT_EQ(0, AudioCodingModule::Codec(codecCntr, &codecParam));
+ if (!STR_CASE_CMP(codecParam.plname, "ISAC")
+ && codecParam.plfreq == 16000) {
+ memcpy(&_paramISAC16kHz, &codecParam, sizeof(CodecInst));
+ _idISAC16kHz = codecCntr;
+ }
+ if (!STR_CASE_CMP(codecParam.plname, "ISAC")
+ && codecParam.plfreq == 32000) {
+ memcpy(&_paramISAC32kHz, &codecParam, sizeof(CodecInst));
+ _idISAC32kHz = codecCntr;
+ }
+ }
+
+ // Register both iSAC-wb & iSAC-swb in both sides as receiver codecs.
+ EXPECT_EQ(0, _acmA->RegisterReceiveCodec(_paramISAC16kHz));
+ EXPECT_EQ(0, _acmA->RegisterReceiveCodec(_paramISAC32kHz));
+ EXPECT_EQ(0, _acmB->RegisterReceiveCodec(_paramISAC16kHz));
+ EXPECT_EQ(0, _acmB->RegisterReceiveCodec(_paramISAC32kHz));
+
+ //--- Set A-to-B channel
+ _channel_A2B.reset(new Channel);
+ EXPECT_EQ(0, _acmA->RegisterTransportCallback(_channel_A2B.get()));
+ _channel_A2B->RegisterReceiverACM(_acmB.get());
+
+ //--- Set B-to-A channel
+ _channel_B2A.reset(new Channel);
+ EXPECT_EQ(0, _acmB->RegisterTransportCallback(_channel_B2A.get()));
+ _channel_B2A->RegisterReceiverACM(_acmA.get());
+
+ file_name_swb_ = webrtc::test::ResourcePath("audio_coding/testfile32kHz",
+ "pcm");
+
+ EXPECT_EQ(0, _acmB->RegisterSendCodec(_paramISAC16kHz));
+ EXPECT_EQ(0, _acmA->RegisterSendCodec(_paramISAC32kHz));
+
+ _inFileA.Open(file_name_swb_, 32000, "rb");
+ // Set test length to 500 ms (50 blocks of 10 ms each).
+ _inFileA.SetNum10MsBlocksToRead(50);
+ // Fast-forward 1 second (100 blocks) since the files start with silence.
+ _inFileA.FastForward(100);
+ std::string fileNameA = webrtc::test::OutputPath() + "testisac_a.pcm";
+ std::string fileNameB = webrtc::test::OutputPath() + "testisac_b.pcm";
+ _outFileA.Open(fileNameA, 32000, "wb");
+ _outFileB.Open(fileNameB, 32000, "wb");
+
+ while (!_inFileA.EndOfFile()) {
+ Run10ms();
+ }
+ CodecInst receiveCodec;
+ EXPECT_EQ(0, _acmA->ReceiveCodec(&receiveCodec));
+ EXPECT_EQ(0, _acmB->ReceiveCodec(&receiveCodec));
+
+ _inFileA.Close();
+ _outFileA.Close();
+ _outFileB.Close();
+}
+
+void ISACTest::Perform() {
+ Setup();
+
+ int16_t testNr = 0;
+ ACMTestISACConfig wbISACConfig;
+ ACMTestISACConfig swbISACConfig;
+
+ SetISACConfigDefault(wbISACConfig);
+ SetISACConfigDefault(swbISACConfig);
+
+ wbISACConfig.currentRateBitPerSec = -1;
+ swbISACConfig.currentRateBitPerSec = -1;
+ testNr++;
+ EncodeDecode(testNr, wbISACConfig, swbISACConfig);
+
+ if (_testMode != 0) {
+ SetISACConfigDefault(wbISACConfig);
+ SetISACConfigDefault(swbISACConfig);
+
+ wbISACConfig.currentRateBitPerSec = -1;
+ swbISACConfig.currentRateBitPerSec = -1;
+ wbISACConfig.initRateBitPerSec = 13000;
+ wbISACConfig.initFrameSizeInMsec = 60;
+ swbISACConfig.initRateBitPerSec = 20000;
+ swbISACConfig.initFrameSizeInMsec = 30;
+ testNr++;
+ EncodeDecode(testNr, wbISACConfig, swbISACConfig);
+
+ SetISACConfigDefault(wbISACConfig);
+ SetISACConfigDefault(swbISACConfig);
+
+ wbISACConfig.currentRateBitPerSec = 20000;
+ swbISACConfig.currentRateBitPerSec = 48000;
+ testNr++;
+ EncodeDecode(testNr, wbISACConfig, swbISACConfig);
+
+ wbISACConfig.currentRateBitPerSec = 16000;
+ swbISACConfig.currentRateBitPerSec = 30000;
+ wbISACConfig.currentFrameSizeMsec = 60;
+ testNr++;
+ EncodeDecode(testNr, wbISACConfig, swbISACConfig);
+ }
+
+ SetISACConfigDefault(wbISACConfig);
+ SetISACConfigDefault(swbISACConfig);
+ testNr++;
+ EncodeDecode(testNr, wbISACConfig, swbISACConfig);
+
+ testNr++;
+ if (_testMode == 0) {
+ SwitchingSamplingRate(testNr, 4);
+ } else {
+ SwitchingSamplingRate(testNr, 80);
+ }
+}
+
+void ISACTest::Run10ms() {
+ AudioFrame audioFrame;
+ EXPECT_GT(_inFileA.Read10MsData(audioFrame), 0);
+ EXPECT_GE(_acmA->Add10MsData(audioFrame), 0);
+ EXPECT_GE(_acmB->Add10MsData(audioFrame), 0);
+ EXPECT_EQ(0, _acmA->PlayoutData10Ms(32000, &audioFrame));
+ _outFileA.Write10MsData(audioFrame);
+ EXPECT_EQ(0, _acmB->PlayoutData10Ms(32000, &audioFrame));
+ _outFileB.Write10MsData(audioFrame);
+}
+
+void ISACTest::EncodeDecode(int testNr, ACMTestISACConfig& wbISACConfig,
+ ACMTestISACConfig& swbISACConfig) {
+ // Files in Side A and B
+ _inFileA.Open(file_name_swb_, 32000, "rb", true);
+ _inFileB.Open(file_name_swb_, 32000, "rb", true);
+
+ std::string file_name_out;
+ std::stringstream file_stream_a;
+ std::stringstream file_stream_b;
+ file_stream_a << webrtc::test::OutputPath();
+ file_stream_b << webrtc::test::OutputPath();
+ file_stream_a << "out_iSACTest_A_" << testNr << ".pcm";
+ file_stream_b << "out_iSACTest_B_" << testNr << ".pcm";
+ file_name_out = file_stream_a.str();
+ _outFileA.Open(file_name_out, 32000, "wb");
+ file_name_out = file_stream_b.str();
+ _outFileB.Open(file_name_out, 32000, "wb");
+
+ EXPECT_EQ(0, _acmA->RegisterSendCodec(_paramISAC16kHz));
+ EXPECT_EQ(0, _acmA->RegisterSendCodec(_paramISAC32kHz));
+ EXPECT_EQ(0, _acmB->RegisterSendCodec(_paramISAC32kHz));
+ EXPECT_EQ(0, _acmB->RegisterSendCodec(_paramISAC16kHz));
+
+ // Side A is sending super-wideband, and side B is sending wideband.
+ SetISAConfig(swbISACConfig, _acmA.get(), _testMode);
+ SetISAConfig(wbISACConfig, _acmB.get(), _testMode);
+
+ bool adaptiveMode = false;
+ if ((swbISACConfig.currentRateBitPerSec == -1)
+ || (wbISACConfig.currentRateBitPerSec == -1)) {
+ adaptiveMode = true;
+ }
+ _myTimer.Reset();
+ _channel_A2B->ResetStats();
+ _channel_B2A->ResetStats();
+
+ char currentTime[500];
+ EventTimerWrapper* myEvent = EventTimerWrapper::Create();
+ EXPECT_TRUE(myEvent->StartTimer(true, 10));
+ while (!(_inFileA.EndOfFile() || _inFileA.Rewinded())) {
+ Run10ms();
+ _myTimer.Tick10ms();
+ _myTimer.CurrentTimeHMS(currentTime);
+
+ if ((adaptiveMode) && (_testMode != 0)) {
+ myEvent->Wait(5000);
+ EXPECT_TRUE(_acmA->SendCodec());
+ EXPECT_TRUE(_acmB->SendCodec());
+ }
+ }
+
+ if (_testMode != 0) {
+ printf("\n\nSide A statistics\n\n");
+ _channel_A2B->PrintStats(_paramISAC32kHz);
+
+ printf("\n\nSide B statistics\n\n");
+ _channel_B2A->PrintStats(_paramISAC16kHz);
+ }
+
+ _channel_A2B->ResetStats();
+ _channel_B2A->ResetStats();
+
+ _outFileA.Close();
+ _outFileB.Close();
+ _inFileA.Close();
+ _inFileB.Close();
+}
+
+void ISACTest::SwitchingSamplingRate(int testNr, int maxSampRateChange) {
+ // Files in Side A
+ _inFileA.Open(file_name_swb_, 32000, "rb");
+ _inFileB.Open(file_name_swb_, 32000, "rb");
+
+ std::string file_name_out;
+ std::stringstream file_stream_a;
+ std::stringstream file_stream_b;
+ file_stream_a << webrtc::test::OutputPath();
+ file_stream_b << webrtc::test::OutputPath();
+ file_stream_a << "out_iSACTest_A_" << testNr << ".pcm";
+ file_stream_b << "out_iSACTest_B_" << testNr << ".pcm";
+ file_name_out = file_stream_a.str();
+ _outFileA.Open(file_name_out, 32000, "wb");
+ file_name_out = file_stream_b.str();
+ _outFileB.Open(file_name_out, 32000, "wb");
+
+ // Start with side A sending super-wideband and side B seding wideband.
+ // Toggle sending wideband/super-wideband in this test.
+ EXPECT_EQ(0, _acmA->RegisterSendCodec(_paramISAC32kHz));
+ EXPECT_EQ(0, _acmB->RegisterSendCodec(_paramISAC16kHz));
+
+ int numSendCodecChanged = 0;
+ _myTimer.Reset();
+ char currentTime[50];
+ while (numSendCodecChanged < (maxSampRateChange << 1)) {
+ Run10ms();
+ _myTimer.Tick10ms();
+ _myTimer.CurrentTimeHMS(currentTime);
+ if (_testMode == 2)
+ printf("\r%s", currentTime);
+ if (_inFileA.EndOfFile()) {
+ if (_inFileA.SamplingFrequency() == 16000) {
+ // Switch side A to send super-wideband.
+ _inFileA.Close();
+ _inFileA.Open(file_name_swb_, 32000, "rb");
+ EXPECT_EQ(0, _acmA->RegisterSendCodec(_paramISAC32kHz));
+ } else {
+ // Switch side A to send wideband.
+ _inFileA.Close();
+ _inFileA.Open(file_name_swb_, 32000, "rb");
+ EXPECT_EQ(0, _acmA->RegisterSendCodec(_paramISAC16kHz));
+ }
+ numSendCodecChanged++;
+ }
+
+ if (_inFileB.EndOfFile()) {
+ if (_inFileB.SamplingFrequency() == 16000) {
+ // Switch side B to send super-wideband.
+ _inFileB.Close();
+ _inFileB.Open(file_name_swb_, 32000, "rb");
+ EXPECT_EQ(0, _acmB->RegisterSendCodec(_paramISAC32kHz));
+ } else {
+ // Switch side B to send wideband.
+ _inFileB.Close();
+ _inFileB.Open(file_name_swb_, 32000, "rb");
+ EXPECT_EQ(0, _acmB->RegisterSendCodec(_paramISAC16kHz));
+ }
+ numSendCodecChanged++;
+ }
+ }
+ _outFileA.Close();
+ _outFileB.Close();
+ _inFileA.Close();
+ _inFileB.Close();
+}
+
+} // namespace webrtc
diff --git a/webrtc/modules/audio_coding/test/iSACTest.h b/webrtc/modules/audio_coding/test/iSACTest.h
new file mode 100644
index 0000000000..c5bb515437
--- /dev/null
+++ b/webrtc/modules/audio_coding/test/iSACTest.h
@@ -0,0 +1,79 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef WEBRTC_MODULES_AUDIO_CODING_TEST_ISACTEST_H_
+#define WEBRTC_MODULES_AUDIO_CODING_TEST_ISACTEST_H_
+
+#include <string.h>
+
+#include "webrtc/base/scoped_ptr.h"
+#include "webrtc/common_types.h"
+#include "webrtc/modules/audio_coding/include/audio_coding_module.h"
+#include "webrtc/modules/audio_coding/test/ACMTest.h"
+#include "webrtc/modules/audio_coding/test/Channel.h"
+#include "webrtc/modules/audio_coding/test/PCMFile.h"
+#include "webrtc/modules/audio_coding/test/utility.h"
+
+#define MAX_FILE_NAME_LENGTH_BYTE 500
+#define NO_OF_CLIENTS 15
+
+namespace webrtc {
+
+struct ACMTestISACConfig {
+ int32_t currentRateBitPerSec;
+ int16_t currentFrameSizeMsec;
+ int16_t encodingMode;
+ uint32_t initRateBitPerSec;
+ int16_t initFrameSizeInMsec;
+ bool enforceFrameSize;
+};
+
+class ISACTest : public ACMTest {
+ public:
+ explicit ISACTest(int testMode);
+ ~ISACTest();
+
+ void Perform();
+ private:
+ void Setup();
+
+ void Run10ms();
+
+ void EncodeDecode(int testNr, ACMTestISACConfig& wbISACConfig,
+ ACMTestISACConfig& swbISACConfig);
+
+ void SwitchingSamplingRate(int testNr, int maxSampRateChange);
+
+ rtc::scoped_ptr<AudioCodingModule> _acmA;
+ rtc::scoped_ptr<AudioCodingModule> _acmB;
+
+ rtc::scoped_ptr<Channel> _channel_A2B;
+ rtc::scoped_ptr<Channel> _channel_B2A;
+
+ PCMFile _inFileA;
+ PCMFile _inFileB;
+
+ PCMFile _outFileA;
+ PCMFile _outFileB;
+
+ uint8_t _idISAC16kHz;
+ uint8_t _idISAC32kHz;
+ CodecInst _paramISAC16kHz;
+ CodecInst _paramISAC32kHz;
+
+ std::string file_name_swb_;
+
+ ACMTestTimer _myTimer;
+ int _testMode;
+};
+
+} // namespace webrtc
+
+#endif // WEBRTC_MODULES_AUDIO_CODING_TEST_ISACTEST_H_
diff --git a/webrtc/modules/audio_coding/test/insert_packet_with_timing.cc b/webrtc/modules/audio_coding/test/insert_packet_with_timing.cc
new file mode 100644
index 0000000000..481df55ffd
--- /dev/null
+++ b/webrtc/modules/audio_coding/test/insert_packet_with_timing.cc
@@ -0,0 +1,307 @@
+/*
+ * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <stdio.h>
+
+#include "gflags/gflags.h"
+#include "testing/gtest/include/gtest/gtest.h"
+#include "webrtc/base/scoped_ptr.h"
+#include "webrtc/common_types.h"
+#include "webrtc/modules/audio_coding/include/audio_coding_module.h"
+#include "webrtc/modules/audio_coding/test/Channel.h"
+#include "webrtc/modules/audio_coding/test/PCMFile.h"
+#include "webrtc/modules/include/module_common_types.h"
+#include "webrtc/system_wrappers/include/clock.h"
+#include "webrtc/test/testsupport/fileutils.h"
+
+// Codec.
+DEFINE_string(codec, "opus", "Codec Name");
+DEFINE_int32(codec_sample_rate_hz, 48000, "Sampling rate in Hertz.");
+DEFINE_int32(codec_channels, 1, "Number of channels of the codec.");
+
+// PCM input/output.
+DEFINE_string(input, "", "Input PCM file at 16 kHz.");
+DEFINE_bool(input_stereo, false, "Input is stereo.");
+DEFINE_int32(input_fs_hz, 32000, "Input sample rate Hz.");
+DEFINE_string(output, "insert_rtp_with_timing_out.pcm", "OutputFile");
+DEFINE_int32(output_fs_hz, 32000, "Output sample rate Hz");
+
+// Timing files
+DEFINE_string(seq_num, "seq_num", "Sequence number file.");
+DEFINE_string(send_ts, "send_timestamp", "Send timestamp file.");
+DEFINE_string(receive_ts, "last_rec_timestamp", "Receive timestamp file");
+
+// Delay logging
+DEFINE_string(delay, "", "Log for delay.");
+
+// Other setups
+DEFINE_bool(verbose, false, "Verbosity.");
+DEFINE_double(loss_rate, 0, "Rate of packet loss < 1");
+
+const int32_t kAudioPlayedOut = 0x00000001;
+const int32_t kPacketPushedIn = 0x00000001 << 1;
+const int kPlayoutPeriodMs = 10;
+
+namespace webrtc {
+
+class InsertPacketWithTiming {
+ public:
+ InsertPacketWithTiming()
+ : sender_clock_(new SimulatedClock(0)),
+ receiver_clock_(new SimulatedClock(0)),
+ send_acm_(AudioCodingModule::Create(0, sender_clock_)),
+ receive_acm_(AudioCodingModule::Create(0, receiver_clock_)),
+ channel_(new Channel),
+ seq_num_fid_(fopen(FLAGS_seq_num.c_str(), "rt")),
+ send_ts_fid_(fopen(FLAGS_send_ts.c_str(), "rt")),
+ receive_ts_fid_(fopen(FLAGS_receive_ts.c_str(), "rt")),
+ pcm_out_fid_(fopen(FLAGS_output.c_str(), "wb")),
+ samples_in_1ms_(48),
+ num_10ms_in_codec_frame_(2), // Typical 20 ms frames.
+ time_to_insert_packet_ms_(3), // An arbitrary offset on pushing packet.
+ next_receive_ts_(0),
+ time_to_playout_audio_ms_(kPlayoutPeriodMs),
+ loss_threshold_(0),
+ playout_timing_fid_(fopen("playout_timing.txt", "wt")) {}
+
+ void SetUp() {
+ ASSERT_TRUE(sender_clock_ != NULL);
+ ASSERT_TRUE(receiver_clock_ != NULL);
+
+ ASSERT_TRUE(send_acm_.get() != NULL);
+ ASSERT_TRUE(receive_acm_.get() != NULL);
+ ASSERT_TRUE(channel_ != NULL);
+
+ ASSERT_TRUE(seq_num_fid_ != NULL);
+ ASSERT_TRUE(send_ts_fid_ != NULL);
+ ASSERT_TRUE(receive_ts_fid_ != NULL);
+
+ ASSERT_TRUE(playout_timing_fid_ != NULL);
+
+ next_receive_ts_ = ReceiveTimestamp();
+
+ CodecInst codec;
+ ASSERT_EQ(0, AudioCodingModule::Codec(FLAGS_codec.c_str(), &codec,
+ FLAGS_codec_sample_rate_hz,
+ FLAGS_codec_channels));
+ ASSERT_EQ(0, receive_acm_->InitializeReceiver());
+ ASSERT_EQ(0, send_acm_->RegisterSendCodec(codec));
+ ASSERT_EQ(0, receive_acm_->RegisterReceiveCodec(codec));
+
+ // Set codec-dependent parameters.
+ samples_in_1ms_ = codec.plfreq / 1000;
+ num_10ms_in_codec_frame_ = codec.pacsize / (codec.plfreq / 100);
+
+ channel_->RegisterReceiverACM(receive_acm_.get());
+ send_acm_->RegisterTransportCallback(channel_);
+
+ if (FLAGS_input.size() == 0) {
+ std::string file_name = test::ResourcePath("audio_coding/testfile32kHz",
+ "pcm");
+ pcm_in_fid_.Open(file_name, 32000, "r", true); // auto-rewind
+ std::cout << "Input file " << file_name << " 32 kHz mono." << std::endl;
+ } else {
+ pcm_in_fid_.Open(FLAGS_input, static_cast<uint16_t>(FLAGS_input_fs_hz),
+ "r", true); // auto-rewind
+ std::cout << "Input file " << FLAGS_input << "at " << FLAGS_input_fs_hz
+ << " Hz in " << ((FLAGS_input_stereo) ? "stereo." : "mono.")
+ << std::endl;
+ pcm_in_fid_.ReadStereo(FLAGS_input_stereo);
+ }
+
+ ASSERT_TRUE(pcm_out_fid_ != NULL);
+ std::cout << "Output file " << FLAGS_output << " at " << FLAGS_output_fs_hz
+ << " Hz." << std::endl;
+
+ // Other setups
+ if (FLAGS_loss_rate > 0)
+ loss_threshold_ = RAND_MAX * FLAGS_loss_rate;
+ else
+ loss_threshold_ = 0;
+ }
+
+ void TickOneMillisecond(uint32_t* action) {
+ // One millisecond passed.
+ time_to_insert_packet_ms_--;
+ time_to_playout_audio_ms_--;
+ sender_clock_->AdvanceTimeMilliseconds(1);
+ receiver_clock_->AdvanceTimeMilliseconds(1);
+
+ // Reset action.
+ *action = 0;
+
+ // Is it time to pull audio?
+ if (time_to_playout_audio_ms_ == 0) {
+ time_to_playout_audio_ms_ = kPlayoutPeriodMs;
+ receive_acm_->PlayoutData10Ms(static_cast<int>(FLAGS_output_fs_hz),
+ &frame_);
+ fwrite(frame_.data_, sizeof(frame_.data_[0]),
+ frame_.samples_per_channel_ * frame_.num_channels_, pcm_out_fid_);
+ *action |= kAudioPlayedOut;
+ }
+
+ // Is it time to push in next packet?
+ if (time_to_insert_packet_ms_ <= .5) {
+ *action |= kPacketPushedIn;
+
+ // Update time-to-insert packet.
+ uint32_t t = next_receive_ts_;
+ next_receive_ts_ = ReceiveTimestamp();
+ time_to_insert_packet_ms_ += static_cast<float>(next_receive_ts_ - t) /
+ samples_in_1ms_;
+
+ // Push in just enough audio.
+ for (int n = 0; n < num_10ms_in_codec_frame_; n++) {
+ pcm_in_fid_.Read10MsData(frame_);
+ EXPECT_GE(send_acm_->Add10MsData(frame_), 0);
+ }
+
+ // Set the parameters for the packet to be pushed in receiver ACM right
+ // now.
+ uint32_t ts = SendTimestamp();
+ int seq_num = SequenceNumber();
+ bool lost = false;
+ channel_->set_send_timestamp(ts);
+ channel_->set_sequence_number(seq_num);
+ if (loss_threshold_ > 0 && rand() < loss_threshold_) {
+ channel_->set_num_packets_to_drop(1);
+ lost = true;
+ }
+
+ if (FLAGS_verbose) {
+ if (!lost) {
+ std::cout << "\nInserting packet number " << seq_num
+ << " timestamp " << ts << std::endl;
+ } else {
+ std::cout << "\nLost packet number " << seq_num
+ << " timestamp " << ts << std::endl;
+ }
+ }
+ }
+ }
+
+ void TearDown() {
+ delete channel_;
+
+ fclose(seq_num_fid_);
+ fclose(send_ts_fid_);
+ fclose(receive_ts_fid_);
+ fclose(pcm_out_fid_);
+ pcm_in_fid_.Close();
+ }
+
+ ~InsertPacketWithTiming() {
+ delete sender_clock_;
+ delete receiver_clock_;
+ }
+
+ // Are there more info to simulate.
+ bool HasPackets() {
+ if (feof(seq_num_fid_) || feof(send_ts_fid_) || feof(receive_ts_fid_))
+ return false;
+ return true;
+ }
+
+ // Jitter buffer delay.
+ void Delay(int* optimal_delay, int* current_delay) {
+ NetworkStatistics statistics;
+ receive_acm_->GetNetworkStatistics(&statistics);
+ *optimal_delay = statistics.preferredBufferSize;
+ *current_delay = statistics.currentBufferSize;
+ }
+
+ private:
+ uint32_t SendTimestamp() {
+ uint32_t t;
+ EXPECT_EQ(1, fscanf(send_ts_fid_, "%u\n", &t));
+ return t;
+ }
+
+ uint32_t ReceiveTimestamp() {
+ uint32_t t;
+ EXPECT_EQ(1, fscanf(receive_ts_fid_, "%u\n", &t));
+ return t;
+ }
+
+ int SequenceNumber() {
+ int n;
+ EXPECT_EQ(1, fscanf(seq_num_fid_, "%d\n", &n));
+ return n;
+ }
+
+ // This class just creates these pointers, not deleting them. They are deleted
+ // by the associated ACM.
+ SimulatedClock* sender_clock_;
+ SimulatedClock* receiver_clock_;
+
+ rtc::scoped_ptr<AudioCodingModule> send_acm_;
+ rtc::scoped_ptr<AudioCodingModule> receive_acm_;
+ Channel* channel_;
+
+ FILE* seq_num_fid_; // Input (text), one sequence number per line.
+ FILE* send_ts_fid_; // Input (text), one send timestamp per line.
+ FILE* receive_ts_fid_; // Input (text), one receive timestamp per line.
+ FILE* pcm_out_fid_; // Output PCM16.
+
+ PCMFile pcm_in_fid_; // Input PCM16.
+
+ int samples_in_1ms_;
+
+ // TODO(turajs): this can be computed from the send timestamp, but there is
+ // some complication to account for lost and reordered packets.
+ int num_10ms_in_codec_frame_;
+
+ float time_to_insert_packet_ms_;
+ uint32_t next_receive_ts_;
+ uint32_t time_to_playout_audio_ms_;
+
+ AudioFrame frame_;
+
+ double loss_threshold_;
+
+ // Output (text), sequence number, playout timestamp, time (ms) of playout,
+ // per line.
+ FILE* playout_timing_fid_;
+};
+
+} // webrtc
+
+int main(int argc, char* argv[]) {
+ google::ParseCommandLineFlags(&argc, &argv, true);
+ webrtc::InsertPacketWithTiming test;
+ test.SetUp();
+
+ FILE* delay_log = NULL;
+ if (FLAGS_delay.size() > 0) {
+ delay_log = fopen(FLAGS_delay.c_str(), "wt");
+ if (delay_log == NULL) {
+ std::cout << "Cannot open the file to log delay values." << std::endl;
+ exit(1);
+ }
+ }
+
+ uint32_t action_taken;
+ int optimal_delay_ms;
+ int current_delay_ms;
+ while (test.HasPackets()) {
+ test.TickOneMillisecond(&action_taken);
+
+ if (action_taken != 0) {
+ test.Delay(&optimal_delay_ms, &current_delay_ms);
+ if (delay_log != NULL) {
+ fprintf(delay_log, "%3d %3d\n", optimal_delay_ms, current_delay_ms);
+ }
+ }
+ }
+ std::cout << std::endl;
+ test.TearDown();
+ if (delay_log != NULL)
+ fclose(delay_log);
+}
diff --git a/webrtc/modules/audio_coding/test/opus_test.cc b/webrtc/modules/audio_coding/test/opus_test.cc
new file mode 100644
index 0000000000..104b5e587b
--- /dev/null
+++ b/webrtc/modules/audio_coding/test/opus_test.cc
@@ -0,0 +1,383 @@
+/*
+ * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "webrtc/modules/audio_coding/test/opus_test.h"
+
+#include <assert.h>
+
+#include <string>
+
+#include "testing/gtest/include/gtest/gtest.h"
+#include "webrtc/common_types.h"
+#include "webrtc/engine_configurations.h"
+#include "webrtc/modules/audio_coding/codecs/opus/opus_interface.h"
+#include "webrtc/modules/audio_coding/include/audio_coding_module_typedefs.h"
+#include "webrtc/modules/audio_coding/test/TestStereo.h"
+#include "webrtc/modules/audio_coding/test/utility.h"
+#include "webrtc/system_wrappers/include/trace.h"
+#include "webrtc/test/testsupport/fileutils.h"
+
+namespace webrtc {
+
+OpusTest::OpusTest()
+ : acm_receiver_(AudioCodingModule::Create(0)),
+ channel_a2b_(NULL),
+ counter_(0),
+ payload_type_(255),
+ rtp_timestamp_(0) {}
+
+OpusTest::~OpusTest() {
+ if (channel_a2b_ != NULL) {
+ delete channel_a2b_;
+ channel_a2b_ = NULL;
+ }
+ if (opus_mono_encoder_ != NULL) {
+ WebRtcOpus_EncoderFree(opus_mono_encoder_);
+ opus_mono_encoder_ = NULL;
+ }
+ if (opus_stereo_encoder_ != NULL) {
+ WebRtcOpus_EncoderFree(opus_stereo_encoder_);
+ opus_stereo_encoder_ = NULL;
+ }
+ if (opus_mono_decoder_ != NULL) {
+ WebRtcOpus_DecoderFree(opus_mono_decoder_);
+ opus_mono_decoder_ = NULL;
+ }
+ if (opus_stereo_decoder_ != NULL) {
+ WebRtcOpus_DecoderFree(opus_stereo_decoder_);
+ opus_stereo_decoder_ = NULL;
+ }
+}
+
+void OpusTest::Perform() {
+#ifndef WEBRTC_CODEC_OPUS
+ // Opus isn't defined, exit.
+ return;
+#else
+ uint16_t frequency_hz;
+ size_t audio_channels;
+ int16_t test_cntr = 0;
+
+ // Open both mono and stereo test files in 32 kHz.
+ const std::string file_name_stereo =
+ webrtc::test::ResourcePath("audio_coding/teststereo32kHz", "pcm");
+ const std::string file_name_mono =
+ webrtc::test::ResourcePath("audio_coding/testfile32kHz", "pcm");
+ frequency_hz = 32000;
+ in_file_stereo_.Open(file_name_stereo, frequency_hz, "rb");
+ in_file_stereo_.ReadStereo(true);
+ in_file_mono_.Open(file_name_mono, frequency_hz, "rb");
+ in_file_mono_.ReadStereo(false);
+
+ // Create Opus encoders for mono and stereo.
+ ASSERT_GT(WebRtcOpus_EncoderCreate(&opus_mono_encoder_, 1, 0), -1);
+ ASSERT_GT(WebRtcOpus_EncoderCreate(&opus_stereo_encoder_, 2, 1), -1);
+
+ // Create Opus decoders for mono and stereo for stand-alone testing of Opus.
+ ASSERT_GT(WebRtcOpus_DecoderCreate(&opus_mono_decoder_, 1), -1);
+ ASSERT_GT(WebRtcOpus_DecoderCreate(&opus_stereo_decoder_, 2), -1);
+ WebRtcOpus_DecoderInit(opus_mono_decoder_);
+ WebRtcOpus_DecoderInit(opus_stereo_decoder_);
+
+ ASSERT_TRUE(acm_receiver_.get() != NULL);
+ EXPECT_EQ(0, acm_receiver_->InitializeReceiver());
+
+ // Register Opus stereo as receiving codec.
+ CodecInst opus_codec_param;
+ int codec_id = acm_receiver_->Codec("opus", 48000, 2);
+ EXPECT_EQ(0, acm_receiver_->Codec(codec_id, &opus_codec_param));
+ payload_type_ = opus_codec_param.pltype;
+ EXPECT_EQ(0, acm_receiver_->RegisterReceiveCodec(opus_codec_param));
+
+ // Create and connect the channel.
+ channel_a2b_ = new TestPackStereo;
+ channel_a2b_->RegisterReceiverACM(acm_receiver_.get());
+
+ //
+ // Test Stereo.
+ //
+
+ channel_a2b_->set_codec_mode(kStereo);
+ audio_channels = 2;
+ test_cntr++;
+ OpenOutFile(test_cntr);
+
+ // Run Opus with 2.5 ms frame size.
+ Run(channel_a2b_, audio_channels, 64000, 120);
+
+ // Run Opus with 5 ms frame size.
+ Run(channel_a2b_, audio_channels, 64000, 240);
+
+ // Run Opus with 10 ms frame size.
+ Run(channel_a2b_, audio_channels, 64000, 480);
+
+ // Run Opus with 20 ms frame size.
+ Run(channel_a2b_, audio_channels, 64000, 960);
+
+ // Run Opus with 40 ms frame size.
+ Run(channel_a2b_, audio_channels, 64000, 1920);
+
+ // Run Opus with 60 ms frame size.
+ Run(channel_a2b_, audio_channels, 64000, 2880);
+
+ out_file_.Close();
+ out_file_standalone_.Close();
+
+ //
+ // Test Opus stereo with packet-losses.
+ //
+
+ test_cntr++;
+ OpenOutFile(test_cntr);
+
+ // Run Opus with 20 ms frame size, 1% packet loss.
+ Run(channel_a2b_, audio_channels, 64000, 960, 1);
+
+ // Run Opus with 20 ms frame size, 5% packet loss.
+ Run(channel_a2b_, audio_channels, 64000, 960, 5);
+
+ // Run Opus with 20 ms frame size, 10% packet loss.
+ Run(channel_a2b_, audio_channels, 64000, 960, 10);
+
+ out_file_.Close();
+ out_file_standalone_.Close();
+
+ //
+ // Test Mono.
+ //
+ channel_a2b_->set_codec_mode(kMono);
+ audio_channels = 1;
+ test_cntr++;
+ OpenOutFile(test_cntr);
+
+ // Register Opus mono as receiving codec.
+ opus_codec_param.channels = 1;
+ EXPECT_EQ(0, acm_receiver_->RegisterReceiveCodec(opus_codec_param));
+
+ // Run Opus with 2.5 ms frame size.
+ Run(channel_a2b_, audio_channels, 32000, 120);
+
+ // Run Opus with 5 ms frame size.
+ Run(channel_a2b_, audio_channels, 32000, 240);
+
+ // Run Opus with 10 ms frame size.
+ Run(channel_a2b_, audio_channels, 32000, 480);
+
+ // Run Opus with 20 ms frame size.
+ Run(channel_a2b_, audio_channels, 32000, 960);
+
+ // Run Opus with 40 ms frame size.
+ Run(channel_a2b_, audio_channels, 32000, 1920);
+
+ // Run Opus with 60 ms frame size.
+ Run(channel_a2b_, audio_channels, 32000, 2880);
+
+ out_file_.Close();
+ out_file_standalone_.Close();
+
+ //
+ // Test Opus mono with packet-losses.
+ //
+ test_cntr++;
+ OpenOutFile(test_cntr);
+
+ // Run Opus with 20 ms frame size, 1% packet loss.
+ Run(channel_a2b_, audio_channels, 64000, 960, 1);
+
+ // Run Opus with 20 ms frame size, 5% packet loss.
+ Run(channel_a2b_, audio_channels, 64000, 960, 5);
+
+ // Run Opus with 20 ms frame size, 10% packet loss.
+ Run(channel_a2b_, audio_channels, 64000, 960, 10);
+
+ // Close the files.
+ in_file_stereo_.Close();
+ in_file_mono_.Close();
+ out_file_.Close();
+ out_file_standalone_.Close();
+#endif
+}
+
+void OpusTest::Run(TestPackStereo* channel, size_t channels, int bitrate,
+ size_t frame_length, int percent_loss) {
+ AudioFrame audio_frame;
+ int32_t out_freq_hz_b = out_file_.SamplingFrequency();
+ const size_t kBufferSizeSamples = 480 * 12 * 2; // 120 ms stereo audio.
+ int16_t audio[kBufferSizeSamples];
+ int16_t out_audio[kBufferSizeSamples];
+ int16_t audio_type;
+ size_t written_samples = 0;
+ size_t read_samples = 0;
+ size_t decoded_samples = 0;
+ bool first_packet = true;
+ uint32_t start_time_stamp = 0;
+
+ channel->reset_payload_size();
+ counter_ = 0;
+
+ // Set encoder rate.
+ EXPECT_EQ(0, WebRtcOpus_SetBitRate(opus_mono_encoder_, bitrate));
+ EXPECT_EQ(0, WebRtcOpus_SetBitRate(opus_stereo_encoder_, bitrate));
+
+#if defined(WEBRTC_ANDROID) || defined(WEBRTC_IOS) || defined(WEBRTC_ARCH_ARM)
+ // If we are on Android, iOS and/or ARM, use a lower complexity setting as
+ // default.
+ const int kOpusComplexity5 = 5;
+ EXPECT_EQ(0, WebRtcOpus_SetComplexity(opus_mono_encoder_, kOpusComplexity5));
+ EXPECT_EQ(0, WebRtcOpus_SetComplexity(opus_stereo_encoder_,
+ kOpusComplexity5));
+#endif
+
+ // Fast-forward 1 second (100 blocks) since the files start with silence.
+ in_file_stereo_.FastForward(100);
+ in_file_mono_.FastForward(100);
+
+ // Limit the runtime to 1000 blocks of 10 ms each.
+ for (size_t audio_length = 0; audio_length < 1000; audio_length += 10) {
+ bool lost_packet = false;
+
+ // Get 10 msec of audio.
+ if (channels == 1) {
+ if (in_file_mono_.EndOfFile()) {
+ break;
+ }
+ in_file_mono_.Read10MsData(audio_frame);
+ } else {
+ if (in_file_stereo_.EndOfFile()) {
+ break;
+ }
+ in_file_stereo_.Read10MsData(audio_frame);
+ }
+
+ // If input audio is sampled at 32 kHz, resampling to 48 kHz is required.
+ EXPECT_EQ(480,
+ resampler_.Resample10Msec(audio_frame.data_,
+ audio_frame.sample_rate_hz_,
+ 48000,
+ channels,
+ kBufferSizeSamples - written_samples,
+ &audio[written_samples]));
+ written_samples += 480 * channels;
+
+ // Sometimes we need to loop over the audio vector to produce the right
+ // number of packets.
+ size_t loop_encode = (written_samples - read_samples) /
+ (channels * frame_length);
+
+ if (loop_encode > 0) {
+ const size_t kMaxBytes = 1000; // Maximum number of bytes for one packet.
+ size_t bitstream_len_byte;
+ uint8_t bitstream[kMaxBytes];
+ for (size_t i = 0; i < loop_encode; i++) {
+ int bitstream_len_byte_int = WebRtcOpus_Encode(
+ (channels == 1) ? opus_mono_encoder_ : opus_stereo_encoder_,
+ &audio[read_samples], frame_length, kMaxBytes, bitstream);
+ ASSERT_GE(bitstream_len_byte_int, 0);
+ bitstream_len_byte = static_cast<size_t>(bitstream_len_byte_int);
+
+ // Simulate packet loss by setting |packet_loss_| to "true" in
+ // |percent_loss| percent of the loops.
+ // TODO(tlegrand): Move handling of loss simulation to TestPackStereo.
+ if (percent_loss > 0) {
+ if (counter_ == floor((100 / percent_loss) + 0.5)) {
+ counter_ = 0;
+ lost_packet = true;
+ channel->set_lost_packet(true);
+ } else {
+ lost_packet = false;
+ channel->set_lost_packet(false);
+ }
+ counter_++;
+ }
+
+ // Run stand-alone Opus decoder, or decode PLC.
+ if (channels == 1) {
+ if (!lost_packet) {
+ decoded_samples += WebRtcOpus_Decode(
+ opus_mono_decoder_, bitstream, bitstream_len_byte,
+ &out_audio[decoded_samples * channels], &audio_type);
+ } else {
+ decoded_samples += WebRtcOpus_DecodePlc(
+ opus_mono_decoder_, &out_audio[decoded_samples * channels], 1);
+ }
+ } else {
+ if (!lost_packet) {
+ decoded_samples += WebRtcOpus_Decode(
+ opus_stereo_decoder_, bitstream, bitstream_len_byte,
+ &out_audio[decoded_samples * channels], &audio_type);
+ } else {
+ decoded_samples += WebRtcOpus_DecodePlc(
+ opus_stereo_decoder_, &out_audio[decoded_samples * channels],
+ 1);
+ }
+ }
+
+ // Send data to the channel. "channel" will handle the loss simulation.
+ channel->SendData(kAudioFrameSpeech, payload_type_, rtp_timestamp_,
+ bitstream, bitstream_len_byte, NULL);
+ if (first_packet) {
+ first_packet = false;
+ start_time_stamp = rtp_timestamp_;
+ }
+ rtp_timestamp_ += static_cast<uint32_t>(frame_length);
+ read_samples += frame_length * channels;
+ }
+ if (read_samples == written_samples) {
+ read_samples = 0;
+ written_samples = 0;
+ }
+ }
+
+ // Run received side of ACM.
+ ASSERT_EQ(0, acm_receiver_->PlayoutData10Ms(out_freq_hz_b, &audio_frame));
+
+ // Write output speech to file.
+ out_file_.Write10MsData(
+ audio_frame.data_,
+ audio_frame.samples_per_channel_ * audio_frame.num_channels_);
+
+ // Write stand-alone speech to file.
+ out_file_standalone_.Write10MsData(out_audio, decoded_samples * channels);
+
+ if (audio_frame.timestamp_ > start_time_stamp) {
+ // Number of channels should be the same for both stand-alone and
+ // ACM-decoding.
+ EXPECT_EQ(audio_frame.num_channels_, channels);
+ }
+
+ decoded_samples = 0;
+ }
+
+ if (in_file_mono_.EndOfFile()) {
+ in_file_mono_.Rewind();
+ }
+ if (in_file_stereo_.EndOfFile()) {
+ in_file_stereo_.Rewind();
+ }
+ // Reset in case we ended with a lost packet.
+ channel->set_lost_packet(false);
+}
+
+void OpusTest::OpenOutFile(int test_number) {
+ std::string file_name;
+ std::stringstream file_stream;
+ file_stream << webrtc::test::OutputPath() << "opustest_out_"
+ << test_number << ".pcm";
+ file_name = file_stream.str();
+ out_file_.Open(file_name, 48000, "wb");
+ file_stream.str("");
+ file_name = file_stream.str();
+ file_stream << webrtc::test::OutputPath() << "opusstandalone_out_"
+ << test_number << ".pcm";
+ file_name = file_stream.str();
+ out_file_standalone_.Open(file_name, 48000, "wb");
+}
+
+} // namespace webrtc
diff --git a/webrtc/modules/audio_coding/test/opus_test.h b/webrtc/modules/audio_coding/test/opus_test.h
new file mode 100644
index 0000000000..93c9ffb263
--- /dev/null
+++ b/webrtc/modules/audio_coding/test/opus_test.h
@@ -0,0 +1,60 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef WEBRTC_MODULES_AUDIO_CODING_TEST_OPUS_TEST_H_
+#define WEBRTC_MODULES_AUDIO_CODING_TEST_OPUS_TEST_H_
+
+#include <math.h>
+
+#include "webrtc/base/scoped_ptr.h"
+#include "webrtc/modules/audio_coding/codecs/opus/opus_interface.h"
+#include "webrtc/modules/audio_coding/acm2/acm_resampler.h"
+#include "webrtc/modules/audio_coding/test/ACMTest.h"
+#include "webrtc/modules/audio_coding/test/Channel.h"
+#include "webrtc/modules/audio_coding/test/PCMFile.h"
+#include "webrtc/modules/audio_coding/test/TestStereo.h"
+
+namespace webrtc {
+
+class OpusTest : public ACMTest {
+ public:
+ OpusTest();
+ ~OpusTest();
+
+ void Perform();
+
+ private:
+ void Run(TestPackStereo* channel,
+ size_t channels,
+ int bitrate,
+ size_t frame_length,
+ int percent_loss = 0);
+
+ void OpenOutFile(int test_number);
+
+ rtc::scoped_ptr<AudioCodingModule> acm_receiver_;
+ TestPackStereo* channel_a2b_;
+ PCMFile in_file_stereo_;
+ PCMFile in_file_mono_;
+ PCMFile out_file_;
+ PCMFile out_file_standalone_;
+ int counter_;
+ uint8_t payload_type_;
+ uint32_t rtp_timestamp_;
+ acm2::ACMResampler resampler_;
+ WebRtcOpusEncInst* opus_mono_encoder_;
+ WebRtcOpusEncInst* opus_stereo_encoder_;
+ WebRtcOpusDecInst* opus_mono_decoder_;
+ WebRtcOpusDecInst* opus_stereo_decoder_;
+};
+
+} // namespace webrtc
+
+#endif // WEBRTC_MODULES_AUDIO_CODING_TEST_OPUS_TEST_H_
diff --git a/webrtc/modules/audio_coding/test/target_delay_unittest.cc b/webrtc/modules/audio_coding/test/target_delay_unittest.cc
new file mode 100644
index 0000000000..195e9d8145
--- /dev/null
+++ b/webrtc/modules/audio_coding/test/target_delay_unittest.cc
@@ -0,0 +1,249 @@
+/*
+ * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "testing/gtest/include/gtest/gtest.h"
+#include "webrtc/base/scoped_ptr.h"
+#include "webrtc/common_types.h"
+#include "webrtc/modules/audio_coding/codecs/pcm16b/pcm16b.h"
+#include "webrtc/modules/audio_coding/include/audio_coding_module.h"
+#include "webrtc/modules/audio_coding/test/utility.h"
+#include "webrtc/modules/include/module_common_types.h"
+#include "webrtc/system_wrappers/include/sleep.h"
+#include "webrtc/test/testsupport/fileutils.h"
+
+namespace webrtc {
+
+class TargetDelayTest : public ::testing::Test {
+ protected:
+ TargetDelayTest() : acm_(AudioCodingModule::Create(0)) {}
+
+ ~TargetDelayTest() {}
+
+ void SetUp() {
+ EXPECT_TRUE(acm_.get() != NULL);
+
+ CodecInst codec;
+ ASSERT_EQ(0, AudioCodingModule::Codec("L16", &codec, kSampleRateHz, 1));
+ ASSERT_EQ(0, acm_->InitializeReceiver());
+ ASSERT_EQ(0, acm_->RegisterReceiveCodec(codec));
+
+ rtp_info_.header.payloadType = codec.pltype;
+ rtp_info_.header.timestamp = 0;
+ rtp_info_.header.ssrc = 0x12345678;
+ rtp_info_.header.markerBit = false;
+ rtp_info_.header.sequenceNumber = 0;
+ rtp_info_.type.Audio.channel = 1;
+ rtp_info_.type.Audio.isCNG = false;
+ rtp_info_.frameType = kAudioFrameSpeech;
+
+ int16_t audio[kFrameSizeSamples];
+ const int kRange = 0x7FF; // 2047, easy for masking.
+ for (size_t n = 0; n < kFrameSizeSamples; ++n)
+ audio[n] = (rand() & kRange) - kRange / 2;
+ WebRtcPcm16b_Encode(audio, kFrameSizeSamples, payload_);
+ }
+
+ void OutOfRangeInput() {
+ EXPECT_EQ(-1, SetMinimumDelay(-1));
+ EXPECT_EQ(-1, SetMinimumDelay(10001));
+ }
+
+ void NoTargetDelayBufferSizeChanges() {
+ for (int n = 0; n < 30; ++n) // Run enough iterations.
+ Run(true);
+ int clean_optimal_delay = GetCurrentOptimalDelayMs();
+ Run(false); // Run with jitter.
+ int jittery_optimal_delay = GetCurrentOptimalDelayMs();
+ EXPECT_GT(jittery_optimal_delay, clean_optimal_delay);
+ int required_delay = RequiredDelay();
+ EXPECT_GT(required_delay, 0);
+ EXPECT_NEAR(required_delay, jittery_optimal_delay, 1);
+ }
+
+ void WithTargetDelayBufferNotChanging() {
+ // A target delay that is one packet larger than jitter.
+ const int kTargetDelayMs = (kInterarrivalJitterPacket + 1) *
+ kNum10msPerFrame * 10;
+ ASSERT_EQ(0, SetMinimumDelay(kTargetDelayMs));
+ for (int n = 0; n < 30; ++n) // Run enough iterations to fill the buffer.
+ Run(true);
+ int clean_optimal_delay = GetCurrentOptimalDelayMs();
+ EXPECT_EQ(kTargetDelayMs, clean_optimal_delay);
+ Run(false); // Run with jitter.
+ int jittery_optimal_delay = GetCurrentOptimalDelayMs();
+ EXPECT_EQ(jittery_optimal_delay, clean_optimal_delay);
+ }
+
+ void RequiredDelayAtCorrectRange() {
+ for (int n = 0; n < 30; ++n) // Run clean and store delay.
+ Run(true);
+ int clean_optimal_delay = GetCurrentOptimalDelayMs();
+
+ // A relatively large delay.
+ const int kTargetDelayMs = (kInterarrivalJitterPacket + 10) *
+ kNum10msPerFrame * 10;
+ ASSERT_EQ(0, SetMinimumDelay(kTargetDelayMs));
+ for (int n = 0; n < 300; ++n) // Run enough iterations to fill the buffer.
+ Run(true);
+ Run(false); // Run with jitter.
+
+ int jittery_optimal_delay = GetCurrentOptimalDelayMs();
+ EXPECT_EQ(kTargetDelayMs, jittery_optimal_delay);
+
+ int required_delay = RequiredDelay();
+
+ // Checking |required_delay| is in correct range.
+ EXPECT_GT(required_delay, 0);
+ EXPECT_GT(jittery_optimal_delay, required_delay);
+ EXPECT_GT(required_delay, clean_optimal_delay);
+
+ // A tighter check for the value of |required_delay|.
+ // The jitter forces a delay of
+ // |kInterarrivalJitterPacket * kNum10msPerFrame * 10| milliseconds. So we
+ // expect |required_delay| be close to that.
+ EXPECT_NEAR(kInterarrivalJitterPacket * kNum10msPerFrame * 10,
+ required_delay, 1);
+ }
+
+ void TargetDelayBufferMinMax() {
+ const int kTargetMinDelayMs = kNum10msPerFrame * 10;
+ ASSERT_EQ(0, SetMinimumDelay(kTargetMinDelayMs));
+ for (int m = 0; m < 30; ++m) // Run enough iterations to fill the buffer.
+ Run(true);
+ int clean_optimal_delay = GetCurrentOptimalDelayMs();
+ EXPECT_EQ(kTargetMinDelayMs, clean_optimal_delay);
+
+ const int kTargetMaxDelayMs = 2 * (kNum10msPerFrame * 10);
+ ASSERT_EQ(0, SetMaximumDelay(kTargetMaxDelayMs));
+ for (int n = 0; n < 30; ++n) // Run enough iterations to fill the buffer.
+ Run(false);
+
+ int capped_optimal_delay = GetCurrentOptimalDelayMs();
+ EXPECT_EQ(kTargetMaxDelayMs, capped_optimal_delay);
+ }
+
+ private:
+ static const int kSampleRateHz = 16000;
+ static const int kNum10msPerFrame = 2;
+ static const size_t kFrameSizeSamples = 320; // 20 ms @ 16 kHz.
+ // payload-len = frame-samples * 2 bytes/sample.
+ static const int kPayloadLenBytes = 320 * 2;
+ // Inter-arrival time in number of packets in a jittery channel. One is no
+ // jitter.
+ static const int kInterarrivalJitterPacket = 2;
+
+ void Push() {
+ rtp_info_.header.timestamp += kFrameSizeSamples;
+ rtp_info_.header.sequenceNumber++;
+ ASSERT_EQ(0, acm_->IncomingPacket(payload_, kFrameSizeSamples * 2,
+ rtp_info_));
+ }
+
+ // Pull audio equivalent to the amount of audio in one RTP packet.
+ void Pull() {
+ AudioFrame frame;
+ for (int k = 0; k < kNum10msPerFrame; ++k) { // Pull one frame.
+ ASSERT_EQ(0, acm_->PlayoutData10Ms(-1, &frame));
+ // Had to use ASSERT_TRUE, ASSERT_EQ generated error.
+ ASSERT_TRUE(kSampleRateHz == frame.sample_rate_hz_);
+ ASSERT_EQ(1u, frame.num_channels_);
+ ASSERT_TRUE(kSampleRateHz / 100 == frame.samples_per_channel_);
+ }
+ }
+
+ void Run(bool clean) {
+ for (int n = 0; n < 10; ++n) {
+ for (int m = 0; m < 5; ++m) {
+ Push();
+ Pull();
+ }
+
+ if (!clean) {
+ for (int m = 0; m < 10; ++m) { // Long enough to trigger delay change.
+ Push();
+ for (int n = 0; n < kInterarrivalJitterPacket; ++n)
+ Pull();
+ }
+ }
+ }
+ }
+
+ int SetMinimumDelay(int delay_ms) {
+ return acm_->SetMinimumPlayoutDelay(delay_ms);
+ }
+
+ int SetMaximumDelay(int delay_ms) {
+ return acm_->SetMaximumPlayoutDelay(delay_ms);
+ }
+
+ int GetCurrentOptimalDelayMs() {
+ NetworkStatistics stats;
+ acm_->GetNetworkStatistics(&stats);
+ return stats.preferredBufferSize;
+ }
+
+ int RequiredDelay() {
+ return acm_->LeastRequiredDelayMs();
+ }
+
+ rtc::scoped_ptr<AudioCodingModule> acm_;
+ WebRtcRTPHeader rtp_info_;
+ uint8_t payload_[kPayloadLenBytes];
+};
+
+#if defined(WEBRTC_ANDROID)
+#define MAYBE_OutOfRangeInput DISABLED_OutOfRangeInput
+#else
+#define MAYBE_OutOfRangeInput OutOfRangeInput
+#endif
+TEST_F(TargetDelayTest, MAYBE_OutOfRangeInput) {
+ OutOfRangeInput();
+}
+
+#if defined(WEBRTC_ANDROID)
+#define MAYBE_NoTargetDelayBufferSizeChanges \
+ DISABLED_NoTargetDelayBufferSizeChanges
+#else
+#define MAYBE_NoTargetDelayBufferSizeChanges NoTargetDelayBufferSizeChanges
+#endif
+TEST_F(TargetDelayTest, MAYBE_NoTargetDelayBufferSizeChanges) {
+ NoTargetDelayBufferSizeChanges();
+}
+
+#if defined(WEBRTC_ANDROID)
+#define MAYBE_WithTargetDelayBufferNotChanging \
+ DISABLED_WithTargetDelayBufferNotChanging
+#else
+#define MAYBE_WithTargetDelayBufferNotChanging WithTargetDelayBufferNotChanging
+#endif
+TEST_F(TargetDelayTest, MAYBE_WithTargetDelayBufferNotChanging) {
+ WithTargetDelayBufferNotChanging();
+}
+
+#if defined(WEBRTC_ANDROID)
+#define MAYBE_RequiredDelayAtCorrectRange DISABLED_RequiredDelayAtCorrectRange
+#else
+#define MAYBE_RequiredDelayAtCorrectRange RequiredDelayAtCorrectRange
+#endif
+TEST_F(TargetDelayTest, MAYBE_RequiredDelayAtCorrectRange) {
+ RequiredDelayAtCorrectRange();
+}
+
+#if defined(WEBRTC_ANDROID)
+#define MAYBE_TargetDelayBufferMinMax DISABLED_TargetDelayBufferMinMax
+#else
+#define MAYBE_TargetDelayBufferMinMax TargetDelayBufferMinMax
+#endif
+TEST_F(TargetDelayTest, MAYBE_TargetDelayBufferMinMax) {
+ TargetDelayBufferMinMax();
+}
+
+} // namespace webrtc
+
diff --git a/webrtc/modules/audio_coding/test/utility.cc b/webrtc/modules/audio_coding/test/utility.cc
new file mode 100644
index 0000000000..89368bce51
--- /dev/null
+++ b/webrtc/modules/audio_coding/test/utility.cc
@@ -0,0 +1,303 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "utility.h"
+
+#include <assert.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include "testing/gtest/include/gtest/gtest.h"
+#include "webrtc/common.h"
+#include "webrtc/common_types.h"
+#include "webrtc/modules/audio_coding/include/audio_coding_module.h"
+#include "webrtc/modules/audio_coding/acm2/acm_common_defs.h"
+
+#define NUM_CODECS_WITH_FIXED_PAYLOAD_TYPE 13
+
+namespace webrtc {
+
+ACMTestTimer::ACMTestTimer()
+ : _msec(0),
+ _sec(0),
+ _min(0),
+ _hour(0) {
+ return;
+}
+
+ACMTestTimer::~ACMTestTimer() {
+ return;
+}
+
+void ACMTestTimer::Reset() {
+ _msec = 0;
+ _sec = 0;
+ _min = 0;
+ _hour = 0;
+ return;
+}
+void ACMTestTimer::Tick10ms() {
+ _msec += 10;
+ Adjust();
+ return;
+}
+
+void ACMTestTimer::Tick1ms() {
+ _msec++;
+ Adjust();
+ return;
+}
+
+void ACMTestTimer::Tick100ms() {
+ _msec += 100;
+ Adjust();
+ return;
+}
+
+void ACMTestTimer::Tick1sec() {
+ _sec++;
+ Adjust();
+ return;
+}
+
+void ACMTestTimer::CurrentTimeHMS(char* currTime) {
+ sprintf(currTime, "%4lu:%02u:%06.3f", _hour, _min,
+ (double) _sec + (double) _msec / 1000.);
+ return;
+}
+
+void ACMTestTimer::CurrentTime(unsigned long& h, unsigned char& m,
+ unsigned char& s, unsigned short& ms) {
+ h = _hour;
+ m = _min;
+ s = _sec;
+ ms = _msec;
+ return;
+}
+
+void ACMTestTimer::Adjust() {
+ unsigned int n;
+ if (_msec >= 1000) {
+ n = _msec / 1000;
+ _msec -= (1000 * n);
+ _sec += n;
+ }
+ if (_sec >= 60) {
+ n = _sec / 60;
+ _sec -= (n * 60);
+ _min += n;
+ }
+ if (_min >= 60) {
+ n = _min / 60;
+ _min -= (n * 60);
+ _hour += n;
+ }
+}
+
+int16_t ChooseCodec(CodecInst& codecInst) {
+
+ PrintCodecs();
+ //AudioCodingModule* tmpACM = AudioCodingModule::Create(0);
+ uint8_t noCodec = AudioCodingModule::NumberOfCodecs();
+ int8_t codecID;
+ bool outOfRange = false;
+ char myStr[15] = "";
+ do {
+ printf("\nChoose a codec [0]: ");
+ EXPECT_TRUE(fgets(myStr, 10, stdin) != NULL);
+ codecID = atoi(myStr);
+ if ((codecID < 0) || (codecID >= noCodec)) {
+ printf("\nOut of range.\n");
+ outOfRange = true;
+ }
+ } while (outOfRange);
+
+ CHECK_ERROR(AudioCodingModule::Codec((uint8_t )codecID, &codecInst));
+ return 0;
+}
+
+void PrintCodecs() {
+ uint8_t noCodec = AudioCodingModule::NumberOfCodecs();
+
+ CodecInst codecInst;
+ printf("No Name [Hz] [bps]\n");
+ for (uint8_t codecCntr = 0; codecCntr < noCodec; codecCntr++) {
+ AudioCodingModule::Codec(codecCntr, &codecInst);
+ printf("%2d- %-18s %5d %6d\n", codecCntr, codecInst.plname,
+ codecInst.plfreq, codecInst.rate);
+ }
+
+}
+
+CircularBuffer::CircularBuffer(uint32_t len)
+ : _buff(NULL),
+ _idx(0),
+ _buffIsFull(false),
+ _calcAvg(false),
+ _calcVar(false),
+ _sum(0),
+ _sumSqr(0) {
+ _buff = new double[len];
+ if (_buff == NULL) {
+ _buffLen = 0;
+ } else {
+ for (uint32_t n = 0; n < len; n++) {
+ _buff[n] = 0;
+ }
+ _buffLen = len;
+ }
+}
+
+CircularBuffer::~CircularBuffer() {
+ if (_buff != NULL) {
+ delete[] _buff;
+ _buff = NULL;
+ }
+}
+
+void CircularBuffer::Update(const double newVal) {
+ assert(_buffLen > 0);
+
+ // store the value that is going to be overwritten
+ double oldVal = _buff[_idx];
+ // record the new value
+ _buff[_idx] = newVal;
+ // increment the index, to point to where we would
+ // write next
+ _idx++;
+ // it is a circular buffer, if we are at the end
+ // we have to cycle to the beginning
+ if (_idx >= _buffLen) {
+ // flag that the buffer is filled up.
+ _buffIsFull = true;
+ _idx = 0;
+ }
+
+ // Update
+
+ if (_calcAvg) {
+ // for the average we have to update
+ // the sum
+ _sum += (newVal - oldVal);
+ }
+
+ if (_calcVar) {
+ // to calculate variance we have to update
+ // the sum of squares
+ _sumSqr += (double) (newVal - oldVal) * (double) (newVal + oldVal);
+ }
+}
+
+void CircularBuffer::SetArithMean(bool enable) {
+ assert(_buffLen > 0);
+
+ if (enable && !_calcAvg) {
+ uint32_t lim;
+ if (_buffIsFull) {
+ lim = _buffLen;
+ } else {
+ lim = _idx;
+ }
+ _sum = 0;
+ for (uint32_t n = 0; n < lim; n++) {
+ _sum += _buff[n];
+ }
+ }
+ _calcAvg = enable;
+}
+
+void CircularBuffer::SetVariance(bool enable) {
+ assert(_buffLen > 0);
+
+ if (enable && !_calcVar) {
+ uint32_t lim;
+ if (_buffIsFull) {
+ lim = _buffLen;
+ } else {
+ lim = _idx;
+ }
+ _sumSqr = 0;
+ for (uint32_t n = 0; n < lim; n++) {
+ _sumSqr += _buff[n] * _buff[n];
+ }
+ }
+ _calcAvg = enable;
+}
+
+int16_t CircularBuffer::ArithMean(double& mean) {
+ assert(_buffLen > 0);
+
+ if (_buffIsFull) {
+
+ mean = _sum / (double) _buffLen;
+ return 0;
+ } else {
+ if (_idx > 0) {
+ mean = _sum / (double) _idx;
+ return 0;
+ } else {
+ return -1;
+ }
+
+ }
+}
+
+int16_t CircularBuffer::Variance(double& var) {
+ assert(_buffLen > 0);
+
+ if (_buffIsFull) {
+ var = _sumSqr / (double) _buffLen;
+ return 0;
+ } else {
+ if (_idx > 0) {
+ var = _sumSqr / (double) _idx;
+ return 0;
+ } else {
+ return -1;
+ }
+ }
+}
+
+bool FixedPayloadTypeCodec(const char* payloadName) {
+ char fixPayloadTypeCodecs[NUM_CODECS_WITH_FIXED_PAYLOAD_TYPE][32] = { "PCMU",
+ "PCMA", "GSM", "G723", "DVI4", "LPC", "PCMA", "G722", "QCELP", "CN",
+ "MPA", "G728", "G729" };
+
+ for (int n = 0; n < NUM_CODECS_WITH_FIXED_PAYLOAD_TYPE; n++) {
+ if (!STR_CASE_CMP(payloadName, fixPayloadTypeCodecs[n])) {
+ return true;
+ }
+ }
+ return false;
+}
+
+void VADCallback::Reset() {
+ memset(_numFrameTypes, 0, sizeof(_numFrameTypes));
+}
+
+VADCallback::VADCallback() {
+ memset(_numFrameTypes, 0, sizeof(_numFrameTypes));
+}
+
+void VADCallback::PrintFrameTypes() {
+ printf("kEmptyFrame......... %d\n", _numFrameTypes[kEmptyFrame]);
+ printf("kAudioFrameSpeech... %d\n", _numFrameTypes[kAudioFrameSpeech]);
+ printf("kAudioFrameCN....... %d\n", _numFrameTypes[kAudioFrameCN]);
+ printf("kVideoFrameKey...... %d\n", _numFrameTypes[kVideoFrameKey]);
+ printf("kVideoFrameDelta.... %d\n", _numFrameTypes[kVideoFrameDelta]);
+}
+
+int32_t VADCallback::InFrameType(FrameType frame_type) {
+ _numFrameTypes[frame_type]++;
+ return 0;
+}
+
+} // namespace webrtc
diff --git a/webrtc/modules/audio_coding/test/utility.h b/webrtc/modules/audio_coding/test/utility.h
new file mode 100644
index 0000000000..23869be7ed
--- /dev/null
+++ b/webrtc/modules/audio_coding/test/utility.h
@@ -0,0 +1,139 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef WEBRTC_MODULES_AUDIO_CODING_TEST_UTILITY_H_
+#define WEBRTC_MODULES_AUDIO_CODING_TEST_UTILITY_H_
+
+#include "testing/gtest/include/gtest/gtest.h"
+#include "webrtc/modules/audio_coding/include/audio_coding_module.h"
+
+namespace webrtc {
+
+//-----------------------------
+#define CHECK_ERROR(f) \
+ do { \
+ EXPECT_GE(f, 0) << "Error Calling API"; \
+ } while(0)
+
+//-----------------------------
+#define CHECK_PROTECTED(f) \
+ do { \
+ if (f >= 0) { \
+ ADD_FAILURE() << "Error Calling API"; \
+ } else { \
+ printf("An expected error is caught.\n"); \
+ } \
+ } while(0)
+
+//----------------------------
+#define CHECK_ERROR_MT(f) \
+ do { \
+ if (f < 0) { \
+ fprintf(stderr, "Error Calling API in file %s at line %d \n", \
+ __FILE__, __LINE__); \
+ } \
+ } while(0)
+
+//----------------------------
+#define CHECK_PROTECTED_MT(f) \
+ do { \
+ if (f >= 0) { \
+ fprintf(stderr, "Error Calling API in file %s at line %d \n", \
+ __FILE__, __LINE__); \
+ } else { \
+ printf("An expected error is caught.\n"); \
+ } \
+ } while(0)
+
+#define DELETE_POINTER(p) \
+ do { \
+ if (p != NULL) { \
+ delete p; \
+ p = NULL; \
+ } \
+ } while(0)
+
+class ACMTestTimer {
+ public:
+ ACMTestTimer();
+ ~ACMTestTimer();
+
+ void Reset();
+ void Tick10ms();
+ void Tick1ms();
+ void Tick100ms();
+ void Tick1sec();
+ void CurrentTimeHMS(char* currTime);
+ void CurrentTime(unsigned long& h, unsigned char& m, unsigned char& s,
+ unsigned short& ms);
+
+ private:
+ void Adjust();
+
+ unsigned short _msec;
+ unsigned char _sec;
+ unsigned char _min;
+ unsigned long _hour;
+};
+
+class CircularBuffer {
+ public:
+ CircularBuffer(uint32_t len);
+ ~CircularBuffer();
+
+ void SetArithMean(bool enable);
+ void SetVariance(bool enable);
+
+ void Update(const double newVal);
+ void IsBufferFull();
+
+ int16_t Variance(double& var);
+ int16_t ArithMean(double& mean);
+
+ protected:
+ double* _buff;
+ uint32_t _idx;
+ uint32_t _buffLen;
+
+ bool _buffIsFull;
+ bool _calcAvg;
+ bool _calcVar;
+ double _sum;
+ double _sumSqr;
+};
+
+int16_t ChooseCodec(CodecInst& codecInst);
+
+void PrintCodecs();
+
+bool FixedPayloadTypeCodec(const char* payloadName);
+
+class VADCallback : public ACMVADCallback {
+ public:
+ VADCallback();
+ ~VADCallback() {
+ }
+
+ int32_t InFrameType(FrameType frame_type);
+
+ void PrintFrameTypes();
+ void Reset();
+
+ private:
+ uint32_t _numFrameTypes[5];
+};
+
+void UseLegacyAcm(webrtc::Config* config);
+
+void UseNewAcm(webrtc::Config* config);
+
+} // namespace webrtc
+
+#endif // WEBRTC_MODULES_AUDIO_CODING_TEST_UTILITY_H_