aboutsummaryrefslogtreecommitdiff
path: root/webrtc/modules/audio_processing/test/audio_processing_unittest.cc
diff options
context:
space:
mode:
Diffstat (limited to 'webrtc/modules/audio_processing/test/audio_processing_unittest.cc')
-rw-r--r--webrtc/modules/audio_processing/test/audio_processing_unittest.cc237
1 files changed, 115 insertions, 122 deletions
diff --git a/webrtc/modules/audio_processing/test/audio_processing_unittest.cc b/webrtc/modules/audio_processing/test/audio_processing_unittest.cc
index c013a369fe..324cb7bec6 100644
--- a/webrtc/modules/audio_processing/test/audio_processing_unittest.cc
+++ b/webrtc/modules/audio_processing/test/audio_processing_unittest.cc
@@ -14,6 +14,7 @@
#include <limits>
#include <queue>
+#include "webrtc/base/arraysize.h"
#include "webrtc/base/scoped_ptr.h"
#include "webrtc/common_audio/include/audio_util.h"
#include "webrtc/common_audio/resampler/include/push_resampler.h"
@@ -23,11 +24,10 @@
#include "webrtc/modules/audio_processing/include/audio_processing.h"
#include "webrtc/modules/audio_processing/test/protobuf_utils.h"
#include "webrtc/modules/audio_processing/test/test_utils.h"
-#include "webrtc/modules/interface/module_common_types.h"
+#include "webrtc/modules/include/module_common_types.h"
#include "webrtc/system_wrappers/include/event_wrapper.h"
#include "webrtc/system_wrappers/include/trace.h"
#include "webrtc/test/testsupport/fileutils.h"
-#include "webrtc/test/testsupport/gtest_disable.h"
#ifdef WEBRTC_ANDROID_PLATFORM_BUILD
#include "gtest/gtest.h"
#include "external/webrtc/webrtc/modules/audio_processing/test/unittest.pb.h"
@@ -53,11 +53,8 @@ namespace {
// file. This is the typical case. When the file should be updated, it can
// be set to true with the command-line switch --write_ref_data.
bool write_ref_data = false;
-const int kChannels[] = {1, 2};
-const size_t kChannelsSize = sizeof(kChannels) / sizeof(*kChannels);
-
+const google::protobuf::int32 kChannels[] = {1, 2};
const int kSampleRates[] = {8000, 16000, 32000, 48000};
-const size_t kSampleRatesSize = sizeof(kSampleRates) / sizeof(*kSampleRates);
#if defined(WEBRTC_AUDIOPROC_FIXED_PROFILE)
// AECM doesn't support super-wb.
@@ -65,8 +62,6 @@ const int kProcessSampleRates[] = {8000, 16000};
#elif defined(WEBRTC_AUDIOPROC_FLOAT_PROFILE)
const int kProcessSampleRates[] = {8000, 16000, 32000, 48000};
#endif
-const size_t kProcessSampleRatesSize = sizeof(kProcessSampleRates) /
- sizeof(*kProcessSampleRates);
enum StreamDirection { kForward = 0, kReverse };
@@ -77,7 +72,7 @@ void ConvertToFloat(const int16_t* int_data, ChannelBuffer<float>* cb) {
cb->num_frames(),
cb->num_channels(),
cb_int.channels());
- for (int i = 0; i < cb->num_channels(); ++i) {
+ for (size_t i = 0; i < cb->num_channels(); ++i) {
S16ToFloat(cb_int.channels()[i],
cb->num_frames(),
cb->channels()[i]);
@@ -89,7 +84,7 @@ void ConvertToFloat(const AudioFrame& frame, ChannelBuffer<float>* cb) {
}
// Number of channels including the keyboard channel.
-int TotalChannelsFromLayout(AudioProcessing::ChannelLayout layout) {
+size_t TotalChannelsFromLayout(AudioProcessing::ChannelLayout layout) {
switch (layout) {
case AudioProcessing::kMono:
return 1;
@@ -100,7 +95,7 @@ int TotalChannelsFromLayout(AudioProcessing::ChannelLayout layout) {
return 3;
}
assert(false);
- return -1;
+ return 0;
}
int TruncateToMultipleOf10(int value) {
@@ -108,25 +103,25 @@ int TruncateToMultipleOf10(int value) {
}
void MixStereoToMono(const float* stereo, float* mono,
- int samples_per_channel) {
- for (int i = 0; i < samples_per_channel; ++i)
+ size_t samples_per_channel) {
+ for (size_t i = 0; i < samples_per_channel; ++i)
mono[i] = (stereo[i * 2] + stereo[i * 2 + 1]) / 2;
}
void MixStereoToMono(const int16_t* stereo, int16_t* mono,
- int samples_per_channel) {
- for (int i = 0; i < samples_per_channel; ++i)
+ size_t samples_per_channel) {
+ for (size_t i = 0; i < samples_per_channel; ++i)
mono[i] = (stereo[i * 2] + stereo[i * 2 + 1]) >> 1;
}
-void CopyLeftToRightChannel(int16_t* stereo, int samples_per_channel) {
- for (int i = 0; i < samples_per_channel; i++) {
+void CopyLeftToRightChannel(int16_t* stereo, size_t samples_per_channel) {
+ for (size_t i = 0; i < samples_per_channel; i++) {
stereo[i * 2 + 1] = stereo[i * 2];
}
}
-void VerifyChannelsAreEqual(int16_t* stereo, int samples_per_channel) {
- for (int i = 0; i < samples_per_channel; i++) {
+void VerifyChannelsAreEqual(int16_t* stereo, size_t samples_per_channel) {
+ for (size_t i = 0; i < samples_per_channel; i++) {
EXPECT_EQ(stereo[i * 2 + 1], stereo[i * 2]);
}
}
@@ -139,7 +134,7 @@ void SetFrameTo(AudioFrame* frame, int16_t value) {
}
void SetFrameTo(AudioFrame* frame, int16_t left, int16_t right) {
- ASSERT_EQ(2, frame->num_channels_);
+ ASSERT_EQ(2u, frame->num_channels_);
for (size_t i = 0; i < frame->samples_per_channel_ * 2; i += 2) {
frame->data_[i] = left;
frame->data_[i + 1] = right;
@@ -199,9 +194,9 @@ T AbsValue(T a) {
}
int16_t MaxAudioFrame(const AudioFrame& frame) {
- const int length = frame.samples_per_channel_ * frame.num_channels_;
+ const size_t length = frame.samples_per_channel_ * frame.num_channels_;
int16_t max_data = AbsValue(frame.data_[0]);
- for (int i = 1; i < length; i++) {
+ for (size_t i = 1; i < length; i++) {
max_data = std::max(max_data, AbsValue(frame.data_[i]));
}
@@ -264,10 +259,10 @@ std::string OutputFilePath(std::string name,
int output_rate,
int reverse_input_rate,
int reverse_output_rate,
- int num_input_channels,
- int num_output_channels,
- int num_reverse_input_channels,
- int num_reverse_output_channels,
+ size_t num_input_channels,
+ size_t num_output_channels,
+ size_t num_reverse_input_channels,
+ size_t num_reverse_output_channels,
StreamDirection file_direction) {
std::ostringstream ss;
ss << name << "_i" << num_input_channels << "_" << input_rate / 1000 << "_ir"
@@ -362,9 +357,9 @@ class ApmTest : public ::testing::Test {
void Init(int sample_rate_hz,
int output_sample_rate_hz,
int reverse_sample_rate_hz,
- int num_input_channels,
- int num_output_channels,
- int num_reverse_channels,
+ size_t num_input_channels,
+ size_t num_output_channels,
+ size_t num_reverse_channels,
bool open_output_file);
void Init(AudioProcessing* ap);
void EnableAllComponents();
@@ -377,12 +372,12 @@ class ApmTest : public ::testing::Test {
void ProcessDelayVerificationTest(int delay_ms, int system_delay_ms,
int delay_min, int delay_max);
void TestChangingChannelsInt16Interface(
- int num_channels,
+ size_t num_channels,
AudioProcessing::Error expected_return);
- void TestChangingForwardChannels(int num_in_channels,
- int num_out_channels,
+ void TestChangingForwardChannels(size_t num_in_channels,
+ size_t num_out_channels,
AudioProcessing::Error expected_return);
- void TestChangingReverseChannels(int num_rev_channels,
+ void TestChangingReverseChannels(size_t num_rev_channels,
AudioProcessing::Error expected_return);
void RunQuantizedVolumeDoesNotGetStuckTest(int sample_rate);
void RunManualVolumeChangeIsPossibleTest(int sample_rate);
@@ -403,7 +398,7 @@ class ApmTest : public ::testing::Test {
rtc::scoped_ptr<ChannelBuffer<float> > float_cb_;
rtc::scoped_ptr<ChannelBuffer<float> > revfloat_cb_;
int output_sample_rate_hz_;
- int num_output_channels_;
+ size_t num_output_channels_;
FILE* far_file_;
FILE* near_file_;
FILE* out_file_;
@@ -487,9 +482,9 @@ void ApmTest::Init(AudioProcessing* ap) {
void ApmTest::Init(int sample_rate_hz,
int output_sample_rate_hz,
int reverse_sample_rate_hz,
- int num_input_channels,
- int num_output_channels,
- int num_reverse_channels,
+ size_t num_input_channels,
+ size_t num_output_channels,
+ size_t num_reverse_channels,
bool open_output_file) {
SetContainerFormat(sample_rate_hz, num_input_channels, frame_, &float_cb_);
output_sample_rate_hz_ = output_sample_rate_hz;
@@ -821,7 +816,7 @@ TEST_F(ApmTest, DelayOffsetWithLimitsIsSetProperly) {
}
void ApmTest::TestChangingChannelsInt16Interface(
- int num_channels,
+ size_t num_channels,
AudioProcessing::Error expected_return) {
frame_->num_channels_ = num_channels;
EXPECT_EQ(expected_return, apm_->ProcessStream(frame_));
@@ -829,8 +824,8 @@ void ApmTest::TestChangingChannelsInt16Interface(
}
void ApmTest::TestChangingForwardChannels(
- int num_in_channels,
- int num_out_channels,
+ size_t num_in_channels,
+ size_t num_out_channels,
AudioProcessing::Error expected_return) {
const StreamConfig input_stream = {frame_->sample_rate_hz_, num_in_channels};
const StreamConfig output_stream = {output_sample_rate_hz_, num_out_channels};
@@ -841,7 +836,7 @@ void ApmTest::TestChangingForwardChannels(
}
void ApmTest::TestChangingReverseChannels(
- int num_rev_channels,
+ size_t num_rev_channels,
AudioProcessing::Error expected_return) {
const ProcessingConfig processing_config = {
{{frame_->sample_rate_hz_, apm_->num_input_channels()},
@@ -862,11 +857,11 @@ TEST_F(ApmTest, ChannelsInt16Interface) {
TestChangingChannelsInt16Interface(0, apm_->kBadNumberChannelsError);
- for (int i = 1; i < 4; i++) {
+ for (size_t i = 1; i < 4; i++) {
TestChangingChannelsInt16Interface(i, kNoErr);
EXPECT_EQ(i, apm_->num_input_channels());
// We always force the number of reverse channels used for processing to 1.
- EXPECT_EQ(1, apm_->num_reverse_channels());
+ EXPECT_EQ(1u, apm_->num_reverse_channels());
}
}
@@ -877,8 +872,8 @@ TEST_F(ApmTest, Channels) {
TestChangingForwardChannels(0, 1, apm_->kBadNumberChannelsError);
TestChangingReverseChannels(0, apm_->kBadNumberChannelsError);
- for (int i = 1; i < 4; ++i) {
- for (int j = 0; j < 1; ++j) {
+ for (size_t i = 1; i < 4; ++i) {
+ for (size_t j = 0; j < 1; ++j) {
// Output channels much be one or match input channels.
if (j == 1 || i == j) {
TestChangingForwardChannels(i, j, kNoErr);
@@ -887,7 +882,7 @@ TEST_F(ApmTest, Channels) {
EXPECT_EQ(i, apm_->num_input_channels());
EXPECT_EQ(j, apm_->num_output_channels());
// The number of reverse channels used for processing to is always 1.
- EXPECT_EQ(1, apm_->num_reverse_channels());
+ EXPECT_EQ(1u, apm_->num_reverse_channels());
} else {
TestChangingForwardChannels(i, j,
AudioProcessing::kBadNumberChannelsError);
@@ -902,7 +897,7 @@ TEST_F(ApmTest, SampleRatesInt) {
EXPECT_EQ(apm_->kBadSampleRateError, ProcessStreamChooser(kIntFormat));
// Testing valid sample rates
int fs[] = {8000, 16000, 32000, 48000};
- for (size_t i = 0; i < sizeof(fs) / sizeof(*fs); i++) {
+ for (size_t i = 0; i < arraysize(fs); i++) {
SetContainerFormat(fs[i], 2, frame_, &float_cb_);
EXPECT_NOERR(ProcessStreamChooser(kIntFormat));
}
@@ -921,7 +916,7 @@ TEST_F(ApmTest, EchoCancellation) {
EchoCancellation::kModerateSuppression,
EchoCancellation::kHighSuppression,
};
- for (size_t i = 0; i < sizeof(level)/sizeof(*level); i++) {
+ for (size_t i = 0; i < arraysize(level); i++) {
EXPECT_EQ(apm_->kNoError,
apm_->echo_cancellation()->set_suppression_level(level[i]));
EXPECT_EQ(level[i],
@@ -998,7 +993,7 @@ TEST_F(ApmTest, DISABLED_EchoCancellationReportsCorrectDelays) {
// Test a couple of corner cases and verify that the estimated delay is
// within a valid region (set to +-1.5 blocks). Note that these cases are
// sampling frequency dependent.
- for (size_t i = 0; i < kProcessSampleRatesSize; i++) {
+ for (size_t i = 0; i < arraysize(kProcessSampleRates); i++) {
Init(kProcessSampleRates[i],
kProcessSampleRates[i],
kProcessSampleRates[i],
@@ -1070,7 +1065,7 @@ TEST_F(ApmTest, EchoControlMobile) {
EchoControlMobile::kSpeakerphone,
EchoControlMobile::kLoudSpeakerphone,
};
- for (size_t i = 0; i < sizeof(mode)/sizeof(*mode); i++) {
+ for (size_t i = 0; i < arraysize(mode); i++) {
EXPECT_EQ(apm_->kNoError,
apm_->echo_control_mobile()->set_routing_mode(mode[i]));
EXPECT_EQ(mode[i],
@@ -1135,7 +1130,7 @@ TEST_F(ApmTest, GainControl) {
GainControl::kAdaptiveDigital,
GainControl::kFixedDigital
};
- for (size_t i = 0; i < sizeof(mode)/sizeof(*mode); i++) {
+ for (size_t i = 0; i < arraysize(mode); i++) {
EXPECT_EQ(apm_->kNoError,
apm_->gain_control()->set_mode(mode[i]));
EXPECT_EQ(mode[i], apm_->gain_control()->mode());
@@ -1151,7 +1146,7 @@ TEST_F(ApmTest, GainControl) {
apm_->gain_control()->target_level_dbfs()));
int level_dbfs[] = {0, 6, 31};
- for (size_t i = 0; i < sizeof(level_dbfs)/sizeof(*level_dbfs); i++) {
+ for (size_t i = 0; i < arraysize(level_dbfs); i++) {
EXPECT_EQ(apm_->kNoError,
apm_->gain_control()->set_target_level_dbfs(level_dbfs[i]));
EXPECT_EQ(level_dbfs[i], apm_->gain_control()->target_level_dbfs());
@@ -1169,7 +1164,7 @@ TEST_F(ApmTest, GainControl) {
apm_->gain_control()->compression_gain_db()));
int gain_db[] = {0, 10, 90};
- for (size_t i = 0; i < sizeof(gain_db)/sizeof(*gain_db); i++) {
+ for (size_t i = 0; i < arraysize(gain_db); i++) {
EXPECT_EQ(apm_->kNoError,
apm_->gain_control()->set_compression_gain_db(gain_db[i]));
EXPECT_EQ(gain_db[i], apm_->gain_control()->compression_gain_db());
@@ -1200,14 +1195,14 @@ TEST_F(ApmTest, GainControl) {
apm_->gain_control()->analog_level_maximum()));
int min_level[] = {0, 255, 1024};
- for (size_t i = 0; i < sizeof(min_level)/sizeof(*min_level); i++) {
+ for (size_t i = 0; i < arraysize(min_level); i++) {
EXPECT_EQ(apm_->kNoError,
apm_->gain_control()->set_analog_level_limits(min_level[i], 1024));
EXPECT_EQ(min_level[i], apm_->gain_control()->analog_level_minimum());
}
int max_level[] = {0, 1024, 65535};
- for (size_t i = 0; i < sizeof(min_level)/sizeof(*min_level); i++) {
+ for (size_t i = 0; i < arraysize(min_level); i++) {
EXPECT_EQ(apm_->kNoError,
apm_->gain_control()->set_analog_level_limits(0, max_level[i]));
EXPECT_EQ(max_level[i], apm_->gain_control()->analog_level_maximum());
@@ -1246,7 +1241,7 @@ void ApmTest::RunQuantizedVolumeDoesNotGetStuckTest(int sample_rate) {
// Verifies that despite volume slider quantization, the AGC can continue to
// increase its volume.
TEST_F(ApmTest, QuantizedVolumeDoesNotGetStuck) {
- for (size_t i = 0; i < kSampleRatesSize; ++i) {
+ for (size_t i = 0; i < arraysize(kSampleRates); ++i) {
RunQuantizedVolumeDoesNotGetStuckTest(kSampleRates[i]);
}
}
@@ -1291,7 +1286,7 @@ void ApmTest::RunManualVolumeChangeIsPossibleTest(int sample_rate) {
}
TEST_F(ApmTest, ManualVolumeChangeIsPossible) {
- for (size_t i = 0; i < kSampleRatesSize; ++i) {
+ for (size_t i = 0; i < arraysize(kSampleRates); ++i) {
RunManualVolumeChangeIsPossibleTest(kSampleRates[i]);
}
}
@@ -1299,11 +1294,11 @@ TEST_F(ApmTest, ManualVolumeChangeIsPossible) {
#if !defined(WEBRTC_ANDROID) && !defined(WEBRTC_IOS)
TEST_F(ApmTest, AgcOnlyAdaptsWhenTargetSignalIsPresent) {
const int kSampleRateHz = 16000;
- const int kSamplesPerChannel =
- AudioProcessing::kChunkSizeMs * kSampleRateHz / 1000;
- const int kNumInputChannels = 2;
- const int kNumOutputChannels = 1;
- const int kNumChunks = 700;
+ const size_t kSamplesPerChannel =
+ static_cast<size_t>(AudioProcessing::kChunkSizeMs * kSampleRateHz / 1000);
+ const size_t kNumInputChannels = 2;
+ const size_t kNumOutputChannels = 1;
+ const size_t kNumChunks = 700;
const float kScaleFactor = 0.25f;
Config config;
std::vector<webrtc::Point> geometry;
@@ -1317,8 +1312,8 @@ TEST_F(ApmTest, AgcOnlyAdaptsWhenTargetSignalIsPresent) {
EXPECT_EQ(kNoErr, apm->gain_control()->Enable(true));
ChannelBuffer<float> src_buf(kSamplesPerChannel, kNumInputChannels);
ChannelBuffer<float> dest_buf(kSamplesPerChannel, kNumOutputChannels);
- const int max_length = kSamplesPerChannel * std::max(kNumInputChannels,
- kNumOutputChannels);
+ const size_t max_length = kSamplesPerChannel * std::max(kNumInputChannels,
+ kNumOutputChannels);
rtc::scoped_ptr<int16_t[]> int_data(new int16_t[max_length]);
rtc::scoped_ptr<float[]> float_data(new float[max_length]);
std::string filename = ResourceFilePath("far", kSampleRateHz);
@@ -1330,13 +1325,13 @@ TEST_F(ApmTest, AgcOnlyAdaptsWhenTargetSignalIsPresent) {
bool is_target = false;
EXPECT_CALL(*beamformer, is_target_present())
.WillRepeatedly(testing::ReturnPointee(&is_target));
- for (int i = 0; i < kNumChunks; ++i) {
+ for (size_t i = 0; i < kNumChunks; ++i) {
ASSERT_TRUE(ReadChunk(far_file,
int_data.get(),
float_data.get(),
&src_buf));
- for (int j = 0; j < kNumInputChannels; ++j) {
- for (int k = 0; k < kSamplesPerChannel; ++k) {
+ for (size_t j = 0; j < kNumInputChannels; ++j) {
+ for (size_t k = 0; k < kSamplesPerChannel; ++k) {
src_buf.channels()[j][k] *= kScaleFactor;
}
}
@@ -1355,13 +1350,13 @@ TEST_F(ApmTest, AgcOnlyAdaptsWhenTargetSignalIsPresent) {
apm->gain_control()->compression_gain_db());
rewind(far_file);
is_target = true;
- for (int i = 0; i < kNumChunks; ++i) {
+ for (size_t i = 0; i < kNumChunks; ++i) {
ASSERT_TRUE(ReadChunk(far_file,
int_data.get(),
float_data.get(),
&src_buf));
- for (int j = 0; j < kNumInputChannels; ++j) {
- for (int k = 0; k < kSamplesPerChannel; ++k) {
+ for (size_t j = 0; j < kNumInputChannels; ++j) {
+ for (size_t k = 0; k < kSamplesPerChannel; ++k) {
src_buf.channels()[j][k] *= kScaleFactor;
}
}
@@ -1390,7 +1385,7 @@ TEST_F(ApmTest, NoiseSuppression) {
NoiseSuppression::kHigh,
NoiseSuppression::kVeryHigh
};
- for (size_t i = 0; i < sizeof(level)/sizeof(*level); i++) {
+ for (size_t i = 0; i < arraysize(level); i++) {
EXPECT_EQ(apm_->kNoError,
apm_->noise_suppression()->set_level(level[i]));
EXPECT_EQ(level[i], apm_->noise_suppression()->level());
@@ -1492,7 +1487,7 @@ TEST_F(ApmTest, VoiceDetection) {
VoiceDetection::kModerateLikelihood,
VoiceDetection::kHighLikelihood
};
- for (size_t i = 0; i < sizeof(likelihood)/sizeof(*likelihood); i++) {
+ for (size_t i = 0; i < arraysize(likelihood); i++) {
EXPECT_EQ(apm_->kNoError,
apm_->voice_detection()->set_likelihood(likelihood[i]));
EXPECT_EQ(likelihood[i], apm_->voice_detection()->likelihood());
@@ -1524,7 +1519,7 @@ TEST_F(ApmTest, VoiceDetection) {
AudioFrame::kVadPassive,
AudioFrame::kVadUnknown
};
- for (size_t i = 0; i < sizeof(activity)/sizeof(*activity); i++) {
+ for (size_t i = 0; i < arraysize(activity); i++) {
frame_->vad_activity_ = activity[i];
EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame_));
EXPECT_EQ(activity[i], frame_->vad_activity_);
@@ -1550,7 +1545,7 @@ TEST_F(ApmTest, AllProcessingDisabledByDefault) {
}
TEST_F(ApmTest, NoProcessingWhenAllComponentsDisabled) {
- for (size_t i = 0; i < kSampleRatesSize; i++) {
+ for (size_t i = 0; i < arraysize(kSampleRates); i++) {
Init(kSampleRates[i], kSampleRates[i], kSampleRates[i], 2, 2, 2, false);
SetFrameTo(frame_, 1000, 2000);
AudioFrame frame_copy;
@@ -1602,7 +1597,7 @@ TEST_F(ApmTest, NoProcessingWhenAllComponentsDisabledFloat) {
TEST_F(ApmTest, IdenticalInputChannelsResultInIdenticalOutputChannels) {
EnableAllComponents();
- for (size_t i = 0; i < kProcessSampleRatesSize; i++) {
+ for (size_t i = 0; i < arraysize(kProcessSampleRates); i++) {
Init(kProcessSampleRates[i],
kProcessSampleRates[i],
kProcessSampleRates[i],
@@ -1751,7 +1746,8 @@ void ApmTest::ProcessDebugDump(const std::string& in_filename,
const audioproc::ReverseStream msg = event_msg.reverse_stream();
if (msg.channel_size() > 0) {
- ASSERT_EQ(revframe_->num_channels_, msg.channel_size());
+ ASSERT_EQ(revframe_->num_channels_,
+ static_cast<size_t>(msg.channel_size()));
for (int i = 0; i < msg.channel_size(); ++i) {
memcpy(revfloat_cb_->channels()[i],
msg.channel(i).data(),
@@ -1781,7 +1777,8 @@ void ApmTest::ProcessDebugDump(const std::string& in_filename,
}
if (msg.input_channel_size() > 0) {
- ASSERT_EQ(frame_->num_channels_, msg.input_channel_size());
+ ASSERT_EQ(frame_->num_channels_,
+ static_cast<size_t>(msg.input_channel_size()));
for (int i = 0; i < msg.input_channel_size(); ++i) {
memcpy(float_cb_->channels()[i],
msg.input_channel(i).data(),
@@ -1939,11 +1936,14 @@ TEST_F(ApmTest, FloatAndIntInterfacesGiveSimilarResults) {
if (test->num_input_channels() != test->num_output_channels())
continue;
- const int num_render_channels = test->num_reverse_channels();
- const int num_input_channels = test->num_input_channels();
- const int num_output_channels = test->num_output_channels();
- const int samples_per_channel = test->sample_rate() *
- AudioProcessing::kChunkSizeMs / 1000;
+ const size_t num_render_channels =
+ static_cast<size_t>(test->num_reverse_channels());
+ const size_t num_input_channels =
+ static_cast<size_t>(test->num_input_channels());
+ const size_t num_output_channels =
+ static_cast<size_t>(test->num_output_channels());
+ const size_t samples_per_channel = static_cast<size_t>(
+ test->sample_rate() * AudioProcessing::kChunkSizeMs / 1000);
Init(test->sample_rate(), test->sample_rate(), test->sample_rate(),
num_input_channels, num_output_channels, num_render_channels, true);
@@ -1984,7 +1984,7 @@ TEST_F(ApmTest, FloatAndIntInterfacesGiveSimilarResults) {
test->sample_rate(),
LayoutFromChannels(num_output_channels),
float_cb_->channels()));
- for (int j = 0; j < num_output_channels; ++j) {
+ for (size_t j = 0; j < num_output_channels; ++j) {
FloatToS16(float_cb_->channels()[j],
samples_per_channel,
output_cb.channels()[j]);
@@ -2017,7 +2017,7 @@ TEST_F(ApmTest, FloatAndIntInterfacesGiveSimilarResults) {
0.01);
// Reset in case of downmixing.
- frame_->num_channels_ = test->num_input_channels();
+ frame_->num_channels_ = static_cast<size_t>(test->num_input_channels());
}
rewind(far_file_);
rewind(near_file_);
@@ -2035,9 +2035,9 @@ TEST_F(ApmTest, Process) {
OpenFileAndReadMessage(ref_filename_, &ref_data);
} else {
// Write the desired tests to the protobuf reference file.
- for (size_t i = 0; i < kChannelsSize; i++) {
- for (size_t j = 0; j < kChannelsSize; j++) {
- for (size_t l = 0; l < kProcessSampleRatesSize; l++) {
+ for (size_t i = 0; i < arraysize(kChannels); i++) {
+ for (size_t j = 0; j < arraysize(kChannels); j++) {
+ for (size_t l = 0; l < arraysize(kProcessSampleRates); l++) {
audioproc::Test* test = ref_data.add_test();
test->set_num_reverse_channels(kChannels[i]);
test->set_num_input_channels(kChannels[j]);
@@ -2078,9 +2078,9 @@ TEST_F(ApmTest, Process) {
Init(test->sample_rate(),
test->sample_rate(),
test->sample_rate(),
- test->num_input_channels(),
- test->num_output_channels(),
- test->num_reverse_channels(),
+ static_cast<size_t>(test->num_input_channels()),
+ static_cast<size_t>(test->num_output_channels()),
+ static_cast<size_t>(test->num_reverse_channels()),
true);
int frame_count = 0;
@@ -2105,7 +2105,8 @@ TEST_F(ApmTest, Process) {
EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame_));
// Ensure the frame was downmixed properly.
- EXPECT_EQ(test->num_output_channels(), frame_->num_channels_);
+ EXPECT_EQ(static_cast<size_t>(test->num_output_channels()),
+ frame_->num_channels_);
max_output_average += MaxAudioFrame(*frame_);
@@ -2135,7 +2136,7 @@ TEST_F(ApmTest, Process) {
ASSERT_EQ(frame_size, write_count);
// Reset in case of downmixing.
- frame_->num_channels_ = test->num_input_channels();
+ frame_->num_channels_ = static_cast<size_t>(test->num_input_channels());
frame_count++;
}
max_output_average /= frame_count;
@@ -2264,12 +2265,11 @@ TEST_F(ApmTest, NoErrorsWithKeyboardChannel) {
{AudioProcessing::kStereoAndKeyboard, AudioProcessing::kMono},
{AudioProcessing::kStereoAndKeyboard, AudioProcessing::kStereo},
};
- size_t channel_format_size = sizeof(cf) / sizeof(*cf);
rtc::scoped_ptr<AudioProcessing> ap(AudioProcessing::Create());
// Enable one component just to ensure some processing takes place.
ap->noise_suppression()->Enable(true);
- for (size_t i = 0; i < channel_format_size; ++i) {
+ for (size_t i = 0; i < arraysize(cf); ++i) {
const int in_rate = 44100;
const int out_rate = 48000;
ChannelBuffer<float> in_cb(SamplesFromRate(in_rate),
@@ -2296,7 +2296,7 @@ TEST_F(ApmTest, NoErrorsWithKeyboardChannel) {
// error results to the supplied accumulators.
void UpdateBestSNR(const float* ref,
const float* test,
- int length,
+ size_t length,
int expected_delay,
double* variance_acc,
double* sq_error_acc) {
@@ -2308,7 +2308,7 @@ void UpdateBestSNR(const float* ref,
++delay) {
double sq_error = 0;
double variance = 0;
- for (int i = 0; i < length - delay; ++i) {
+ for (size_t i = 0; i < length - delay; ++i) {
double error = test[i + delay] - ref[i];
sq_error += error * error;
variance += ref[i] * ref[i];
@@ -2360,14 +2360,10 @@ class AudioProcessingTest
static void SetUpTestCase() {
// Create all needed output reference files.
const int kNativeRates[] = {8000, 16000, 32000, 48000};
- const size_t kNativeRatesSize =
- sizeof(kNativeRates) / sizeof(*kNativeRates);
- const int kNumChannels[] = {1, 2};
- const size_t kNumChannelsSize =
- sizeof(kNumChannels) / sizeof(*kNumChannels);
- for (size_t i = 0; i < kNativeRatesSize; ++i) {
- for (size_t j = 0; j < kNumChannelsSize; ++j) {
- for (size_t k = 0; k < kNumChannelsSize; ++k) {
+ const size_t kNumChannels[] = {1, 2};
+ for (size_t i = 0; i < arraysize(kNativeRates); ++i) {
+ for (size_t j = 0; j < arraysize(kNumChannels); ++j) {
+ for (size_t k = 0; k < arraysize(kNumChannels); ++k) {
// The reference files always have matching input and output channels.
ProcessFormat(kNativeRates[i], kNativeRates[i], kNativeRates[i],
kNativeRates[i], kNumChannels[j], kNumChannels[j],
@@ -2388,10 +2384,10 @@ class AudioProcessingTest
int output_rate,
int reverse_input_rate,
int reverse_output_rate,
- int num_input_channels,
- int num_output_channels,
- int num_reverse_input_channels,
- int num_reverse_output_channels,
+ size_t num_input_channels,
+ size_t num_output_channels,
+ size_t num_reverse_input_channels,
+ size_t num_reverse_output_channels,
std::string output_file_prefix) {
Config config;
config.Set<ExperimentalAgc>(new ExperimentalAgc(false));
@@ -2466,18 +2462,19 @@ class AudioProcessingTest
// Dump forward output to file.
Interleave(out_cb.channels(), out_cb.num_frames(), out_cb.num_channels(),
float_data.get());
- int out_length = out_cb.num_channels() * out_cb.num_frames();
+ size_t out_length = out_cb.num_channels() * out_cb.num_frames();
- ASSERT_EQ(static_cast<size_t>(out_length),
+ ASSERT_EQ(out_length,
fwrite(float_data.get(), sizeof(float_data[0]),
out_length, out_file));
// Dump reverse output to file.
Interleave(rev_out_cb.channels(), rev_out_cb.num_frames(),
rev_out_cb.num_channels(), float_data.get());
- int rev_out_length = rev_out_cb.num_channels() * rev_out_cb.num_frames();
+ size_t rev_out_length =
+ rev_out_cb.num_channels() * rev_out_cb.num_frames();
- ASSERT_EQ(static_cast<size_t>(rev_out_length),
+ ASSERT_EQ(rev_out_length,
fwrite(float_data.get(), sizeof(float_data[0]), rev_out_length,
rev_out_file));
@@ -2513,9 +2510,8 @@ TEST_P(AudioProcessingTest, Formats) {
{2, 2, 1, 1},
{2, 2, 2, 2},
};
- size_t channel_format_size = sizeof(cf) / sizeof(*cf);
- for (size_t i = 0; i < channel_format_size; ++i) {
+ for (size_t i = 0; i < arraysize(cf); ++i) {
ProcessFormat(input_rate_, output_rate_, reverse_input_rate_,
reverse_output_rate_, cf[i].num_input, cf[i].num_output,
cf[i].num_reverse_input, cf[i].num_reverse_output, "out");
@@ -2565,8 +2561,8 @@ TEST_P(AudioProcessingTest, Formats) {
ASSERT_TRUE(out_file != NULL);
ASSERT_TRUE(ref_file != NULL);
- const int ref_length = SamplesFromRate(ref_rate) * out_num;
- const int out_length = SamplesFromRate(out_rate) * out_num;
+ const size_t ref_length = SamplesFromRate(ref_rate) * out_num;
+ const size_t out_length = SamplesFromRate(out_rate) * out_num;
// Data from the reference file.
rtc::scoped_ptr<float[]> ref_data(new float[ref_length]);
// Data from the output file.
@@ -2606,8 +2602,9 @@ TEST_P(AudioProcessingTest, Formats) {
if (out_rate != ref_rate) {
// Resample the output back to its internal processing rate if
// necssary.
- ASSERT_EQ(ref_length, resampler.Resample(out_ptr, out_length,
- cmp_data.get(), ref_length));
+ ASSERT_EQ(ref_length,
+ static_cast<size_t>(resampler.Resample(
+ out_ptr, out_length, cmp_data.get(), ref_length)));
out_ptr = cmp_data.get();
}
@@ -2752,9 +2749,5 @@ INSTANTIATE_TEST_CASE_P(
std::tr1::make_tuple(16000, 16000, 16000, 16000, 0, 0)));
#endif
-// TODO(henrike): re-implement functionality lost when removing the old main
-// function. See
-// https://code.google.com/p/webrtc/issues/detail?id=1981
-
} // namespace
} // namespace webrtc