aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorpkasting <pkasting@chromium.org>2016-01-08 13:50:27 -0800
committerCommit bot <commit-bot@chromium.org>2016-01-08 21:50:32 +0000
commit25702cb1628941427fa55e528f53483f239ae011 (patch)
tree508edfcb88a7099815dd335e1ea79ab265463d6a
parent5de688ed341fc7b6a558c1d44b489af14646c2e4 (diff)
downloadwebrtc-25702cb1628941427fa55e528f53483f239ae011.tar.gz
Misc. small cleanups.
* Better param names * Avoid using negative values for (bogus) placeholder channel counts (mostly in tests). Since channels will be changing to size_t, negative values will be illegal; it's sufficient to use 0 in these cases. * Use arraysize() * Use size_t for counting frames, samples, blocks, buffers, and bytes -- most of these are already size_t in most places, this just fixes some stragglers * reinterpret_cast<int64_t>(void*) is not necessarily safe; use uintptr_t instead * Remove unnecessary code, e.g. dead code, needlessly long/repetitive code, or function overrides that exactly match the base definition * Fix indenting * Use uint32_t for timestamps (matching how it's already a uint32_t in most places) * Spelling * RTC_CHECK_EQ(expected, actual) * Rewrap * Use .empty() * Be more pedantic about matching int/int32_t/ * Remove pointless consts on input parameters to functions * Add missing sanity checks All this was found in the course of constructing https://codereview.webrtc.org/1316523002/ , and is being landed separately first. BUG=none TEST=none Review URL: https://codereview.webrtc.org/1534193008 Cr-Commit-Position: refs/heads/master@{#11191}
-rw-r--r--talk/media/base/codec.cc40
-rw-r--r--talk/media/base/codec.h16
-rw-r--r--webrtc/audio/audio_receive_stream_unittest.cc2
-rw-r--r--webrtc/audio/audio_send_stream_unittest.cc2
-rw-r--r--webrtc/common_audio/audio_converter_unittest.cc21
-rw-r--r--webrtc/common_audio/blocker_unittest.cc69
-rw-r--r--webrtc/common_audio/lapped_transform_unittest.cc30
-rw-r--r--webrtc/common_audio/real_fourier.cc2
-rw-r--r--webrtc/common_audio/real_fourier.h2
-rw-r--r--webrtc/common_audio/real_fourier_unittest.cc8
-rw-r--r--webrtc/common_audio/wav_file.cc16
-rw-r--r--webrtc/common_audio/wav_file.h14
-rw-r--r--webrtc/common_audio/wav_file_unittest.cc16
-rw-r--r--webrtc/common_audio/wav_header.cc47
-rw-r--r--webrtc/common_audio/wav_header.h12
-rw-r--r--webrtc/common_audio/wav_header_unittest.cc18
-rw-r--r--webrtc/modules/audio_coding/acm2/acm_codec_database.cc4
-rw-r--r--webrtc/modules/audio_coding/acm2/acm_resampler.cc3
-rw-r--r--webrtc/modules/audio_coding/codecs/g711/audio_encoder_pcm.cc7
-rw-r--r--webrtc/modules/audio_coding/codecs/g711/audio_encoder_pcm.h6
-rw-r--r--webrtc/modules/audio_coding/codecs/isac/fix/test/isac_speed_test.cc2
-rw-r--r--webrtc/modules/audio_coding/codecs/opus/audio_encoder_opus.cc15
-rw-r--r--webrtc/modules/audio_coding/codecs/opus/audio_encoder_opus.h4
-rw-r--r--webrtc/modules/audio_coding/codecs/opus/opus_speed_test.cc2
-rw-r--r--webrtc/modules/audio_coding/codecs/pcm16b/audio_encoder_pcm16b.cc2
-rw-r--r--webrtc/modules/audio_coding/codecs/pcm16b/audio_encoder_pcm16b.h2
-rw-r--r--webrtc/modules/audio_coding/codecs/tools/audio_codec_speed_test.cc9
-rw-r--r--webrtc/modules/audio_coding/codecs/tools/audio_codec_speed_test.h4
-rw-r--r--webrtc/modules/audio_coding/include/audio_coding_module.h4
-rw-r--r--webrtc/modules/audio_coding/test/opus_test.cc21
-rw-r--r--webrtc/modules/audio_coding/test/opus_test.h7
-rw-r--r--webrtc/modules/audio_device/dummy/file_audio_device.cc12
-rw-r--r--webrtc/modules/audio_device/test/audio_device_test_api.cc23
-rw-r--r--webrtc/modules/audio_device/test/func_test_manager.cc26
-rw-r--r--webrtc/modules/audio_device/test/func_test_manager.h22
-rw-r--r--webrtc/modules/audio_processing/audio_buffer.cc2
-rw-r--r--webrtc/modules/audio_processing/audio_processing_impl_locking_unittest.cc2
-rw-r--r--webrtc/modules/audio_processing/beamformer/nonlinear_beamformer.cc10
-rw-r--r--webrtc/modules/audio_processing/common.h2
-rw-r--r--webrtc/modules/audio_processing/echo_cancellation_impl.cc7
-rw-r--r--webrtc/modules/audio_processing/echo_control_mobile_impl.cc7
-rw-r--r--webrtc/modules/audio_processing/gain_control_impl.cc4
-rw-r--r--webrtc/modules/audio_processing/noise_suppression_impl.cc2
-rw-r--r--webrtc/modules/audio_processing/test/audio_processing_unittest.cc128
-rw-r--r--webrtc/modules/audio_processing/test/test_utils.cc6
-rw-r--r--webrtc/modules/audio_processing/test/test_utils.h8
-rw-r--r--webrtc/modules/audio_processing/test/unpack.cc15
-rw-r--r--webrtc/modules/media_file/media_file_utility.cc334
-rw-r--r--webrtc/modules/media_file/media_file_utility.h22
-rw-r--r--webrtc/system_wrappers/include/aligned_array.h12
-rw-r--r--webrtc/system_wrappers/source/aligned_array_unittest.cc2
51 files changed, 445 insertions, 608 deletions
diff --git a/talk/media/base/codec.cc b/talk/media/base/codec.cc
index 5fbae2c45e..101d2beaf3 100644
--- a/talk/media/base/codec.cc
+++ b/talk/media/base/codec.cc
@@ -163,13 +163,15 @@ void Codec::IntersectFeedbackParams(const Codec& other) {
feedback_params.Intersect(other.feedback_params);
}
-AudioCodec::AudioCodec(int pt,
- const std::string& nm,
- int cr,
- int br,
- int cs,
- int pr)
- : Codec(pt, nm, cr, pr), bitrate(br), channels(cs) {
+AudioCodec::AudioCodec(int id,
+ const std::string& name,
+ int clockrate,
+ int bitrate,
+ int channels,
+ int preference)
+ : Codec(id, name, clockrate, preference),
+ bitrate(bitrate),
+ channels(channels) {
}
AudioCodec::AudioCodec() : Codec(), bitrate(0), channels(0) {
@@ -219,20 +221,20 @@ std::string VideoCodec::ToString() const {
return os.str();
}
-VideoCodec::VideoCodec(int pt,
- const std::string& nm,
- int w,
- int h,
- int fr,
- int pr)
- : Codec(pt, nm, kVideoCodecClockrate, pr),
- width(w),
- height(h),
- framerate(fr) {
+VideoCodec::VideoCodec(int id,
+ const std::string& name,
+ int width,
+ int height,
+ int framerate,
+ int preference)
+ : Codec(id, name, kVideoCodecClockrate, preference),
+ width(width),
+ height(height),
+ framerate(framerate) {
}
-VideoCodec::VideoCodec(int pt, const std::string& nm)
- : Codec(pt, nm, kVideoCodecClockrate, 0),
+VideoCodec::VideoCodec(int id, const std::string& name)
+ : Codec(id, name, kVideoCodecClockrate, 0),
width(0),
height(0),
framerate(0) {
diff --git a/talk/media/base/codec.h b/talk/media/base/codec.h
index ea6c5415dc..fcdb6ea512 100644
--- a/talk/media/base/codec.h
+++ b/talk/media/base/codec.h
@@ -131,7 +131,12 @@ struct AudioCodec : public Codec {
int channels;
// Creates a codec with the given parameters.
- AudioCodec(int pt, const std::string& nm, int cr, int br, int cs, int pr);
+ AudioCodec(int id,
+ const std::string& name,
+ int clockrate,
+ int bitrate,
+ int channels,
+ int preference);
// Creates an empty codec.
AudioCodec();
AudioCodec(const AudioCodec& c);
@@ -161,8 +166,13 @@ struct VideoCodec : public Codec {
int framerate;
// Creates a codec with the given parameters.
- VideoCodec(int pt, const std::string& nm, int w, int h, int fr, int pr);
- VideoCodec(int pt, const std::string& nm);
+ VideoCodec(int id,
+ const std::string& name,
+ int width,
+ int height,
+ int framerate,
+ int preference);
+ VideoCodec(int id, const std::string& name);
// Creates an empty codec.
VideoCodec();
VideoCodec(const VideoCodec& c);
diff --git a/webrtc/audio/audio_receive_stream_unittest.cc b/webrtc/audio/audio_receive_stream_unittest.cc
index bfd8dcaa33..01e8a2b76f 100644
--- a/webrtc/audio/audio_receive_stream_unittest.cc
+++ b/webrtc/audio/audio_receive_stream_unittest.cc
@@ -49,7 +49,7 @@ const unsigned int kSpeechOutputLevel = 99;
const CallStatistics kCallStats = {
345, 678, 901, 234, -12, 3456, 7890, 567, 890, 123};
const CodecInst kCodecInst = {
- 123, "codec_name_recv", 96000, -187, -198, -103};
+ 123, "codec_name_recv", 96000, -187, 0, -103};
const NetworkStatistics kNetworkStats = {
123, 456, false, 0, 0, 789, 12, 345, 678, 901, -1, -1, -1, -1, -1, 0};
const AudioDecodingCallStats kAudioDecodeStats = MakeAudioDecodeStatsForTest();
diff --git a/webrtc/audio/audio_send_stream_unittest.cc b/webrtc/audio/audio_send_stream_unittest.cc
index 3b1b44af34..08ff9a6de9 100644
--- a/webrtc/audio/audio_send_stream_unittest.cc
+++ b/webrtc/audio/audio_send_stream_unittest.cc
@@ -43,7 +43,7 @@ const int kEchoReturnLossEnhancement = 101;
const unsigned int kSpeechInputLevel = 96;
const CallStatistics kCallStats = {
1345, 1678, 1901, 1234, 112, 13456, 17890, 1567, -1890, -1123};
-const CodecInst kCodecInst = {-121, "codec_name_send", 48000, -231, -451, -671};
+const CodecInst kCodecInst = {-121, "codec_name_send", 48000, -231, 0, -671};
const ReportBlock kReportBlock = {456, 780, 123, 567, 890, 132, 143, 13354};
const int kTelephoneEventPayloadType = 123;
const uint8_t kTelephoneEventCode = 45;
diff --git a/webrtc/common_audio/audio_converter_unittest.cc b/webrtc/common_audio/audio_converter_unittest.cc
index c85b96e285..e373d78b46 100644
--- a/webrtc/common_audio/audio_converter_unittest.cc
+++ b/webrtc/common_audio/audio_converter_unittest.cc
@@ -13,6 +13,7 @@
#include <vector>
#include "testing/gtest/include/gtest/gtest.h"
+#include "webrtc/base/arraysize.h"
#include "webrtc/base/format_macros.h"
#include "webrtc/base/scoped_ptr.h"
#include "webrtc/common_audio/audio_converter.h"
@@ -24,11 +25,11 @@ namespace webrtc {
typedef rtc::scoped_ptr<ChannelBuffer<float>> ScopedBuffer;
// Sets the signal value to increase by |data| with every sample.
-ScopedBuffer CreateBuffer(const std::vector<float>& data, int frames) {
+ScopedBuffer CreateBuffer(const std::vector<float>& data, size_t frames) {
const int num_channels = static_cast<int>(data.size());
ScopedBuffer sb(new ChannelBuffer<float>(frames, num_channels));
for (int i = 0; i < num_channels; ++i)
- for (int j = 0; j < frames; ++j)
+ for (size_t j = 0; j < frames; ++j)
sb->channels()[i][j] = data[i] * j;
return sb;
}
@@ -96,8 +97,8 @@ void RunAudioConverterTest(int src_channels,
const float dst_left = resampling_factor * kSrcLeft;
const float dst_right = resampling_factor * kSrcRight;
const float dst_mono = (dst_left + dst_right) / 2;
- const int src_frames = src_sample_rate_hz / 100;
- const int dst_frames = dst_sample_rate_hz / 100;
+ const size_t src_frames = static_cast<size_t>(src_sample_rate_hz / 100);
+ const size_t dst_frames = static_cast<size_t>(dst_sample_rate_hz / 100);
std::vector<float> src_data(1, kSrcLeft);
if (src_channels == 2)
@@ -141,13 +142,13 @@ void RunAudioConverterTest(int src_channels,
TEST(AudioConverterTest, ConversionsPassSNRThreshold) {
const int kSampleRates[] = {8000, 16000, 32000, 44100, 48000};
- const int kSampleRatesSize = sizeof(kSampleRates) / sizeof(*kSampleRates);
const int kChannels[] = {1, 2};
- const int kChannelsSize = sizeof(kChannels) / sizeof(*kChannels);
- for (int src_rate = 0; src_rate < kSampleRatesSize; ++src_rate) {
- for (int dst_rate = 0; dst_rate < kSampleRatesSize; ++dst_rate) {
- for (int src_channel = 0; src_channel < kChannelsSize; ++src_channel) {
- for (int dst_channel = 0; dst_channel < kChannelsSize; ++dst_channel) {
+ for (size_t src_rate = 0; src_rate < arraysize(kSampleRates); ++src_rate) {
+ for (size_t dst_rate = 0; dst_rate < arraysize(kSampleRates); ++dst_rate) {
+ for (size_t src_channel = 0; src_channel < arraysize(kChannels);
+ ++src_channel) {
+ for (size_t dst_channel = 0; dst_channel < arraysize(kChannels);
+ ++dst_channel) {
RunAudioConverterTest(kChannels[src_channel], kSampleRates[src_rate],
kChannels[dst_channel], kSampleRates[dst_rate]);
}
diff --git a/webrtc/common_audio/blocker_unittest.cc b/webrtc/common_audio/blocker_unittest.cc
index 397e269239..065c09ed7f 100644
--- a/webrtc/common_audio/blocker_unittest.cc
+++ b/webrtc/common_audio/blocker_unittest.cc
@@ -11,6 +11,7 @@
#include "webrtc/common_audio/blocker.h"
#include "testing/gtest/include/gtest/gtest.h"
+#include "webrtc/base/arraysize.h"
namespace {
@@ -56,16 +57,16 @@ namespace webrtc {
class BlockerTest : public ::testing::Test {
protected:
void RunTest(Blocker* blocker,
- int chunk_size,
- int num_frames,
+ size_t chunk_size,
+ size_t num_frames,
const float* const* input,
float* const* input_chunk,
float* const* output,
float* const* output_chunk,
int num_input_channels,
int num_output_channels) {
- int start = 0;
- int end = chunk_size - 1;
+ size_t start = 0;
+ size_t end = chunk_size - 1;
while (end < num_frames) {
CopyTo(input_chunk, 0, start, num_input_channels, chunk_size, input);
blocker->ProcessChunk(input_chunk,
@@ -75,17 +76,17 @@ class BlockerTest : public ::testing::Test {
output_chunk);
CopyTo(output, start, 0, num_output_channels, chunk_size, output_chunk);
- start = start + chunk_size;
- end = end + chunk_size;
+ start += chunk_size;
+ end += chunk_size;
}
}
void ValidateSignalEquality(const float* const* expected,
const float* const* actual,
int num_channels,
- int num_frames) {
+ size_t num_frames) {
for (int i = 0; i < num_channels; ++i) {
- for (int j = 0; j < num_frames; ++j) {
+ for (size_t j = 0; j < num_frames; ++j) {
EXPECT_FLOAT_EQ(expected[i][j], actual[i][j]);
}
}
@@ -93,10 +94,10 @@ class BlockerTest : public ::testing::Test {
void ValidateInitialDelay(const float* const* output,
int num_channels,
- int num_frames,
- int initial_delay) {
+ size_t num_frames,
+ size_t initial_delay) {
for (int i = 0; i < num_channels; ++i) {
- for (int j = 0; j < num_frames; ++j) {
+ for (size_t j = 0; j < num_frames; ++j) {
if (j < initial_delay) {
EXPECT_FLOAT_EQ(output[i][j], 0.f);
} else {
@@ -107,10 +108,10 @@ class BlockerTest : public ::testing::Test {
}
static void CopyTo(float* const* dst,
- int start_index_dst,
- int start_index_src,
+ size_t start_index_dst,
+ size_t start_index_src,
int num_channels,
- int num_frames,
+ size_t num_frames,
const float* const* src) {
for (int i = 0; i < num_channels; ++i) {
memcpy(&dst[i][start_index_dst],
@@ -123,10 +124,10 @@ class BlockerTest : public ::testing::Test {
TEST_F(BlockerTest, TestBlockerMutuallyPrimeChunkandBlockSize) {
const int kNumInputChannels = 3;
const int kNumOutputChannels = 2;
- const int kNumFrames = 10;
- const int kBlockSize = 4;
- const int kChunkSize = 5;
- const int kShiftAmount = 2;
+ const size_t kNumFrames = 10;
+ const size_t kBlockSize = 4;
+ const size_t kChunkSize = 5;
+ const size_t kShiftAmount = 2;
const float kInput[kNumInputChannels][kNumFrames] = {
{1, 1, 1, 1, 1, 1, 1, 1, 1, 1},
@@ -176,10 +177,10 @@ TEST_F(BlockerTest, TestBlockerMutuallyPrimeChunkandBlockSize) {
TEST_F(BlockerTest, TestBlockerMutuallyPrimeShiftAndBlockSize) {
const int kNumInputChannels = 3;
const int kNumOutputChannels = 2;
- const int kNumFrames = 12;
- const int kBlockSize = 4;
- const int kChunkSize = 6;
- const int kShiftAmount = 3;
+ const size_t kNumFrames = 12;
+ const size_t kBlockSize = 4;
+ const size_t kChunkSize = 6;
+ const size_t kShiftAmount = 3;
const float kInput[kNumInputChannels][kNumFrames] = {
{1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1},
@@ -229,10 +230,10 @@ TEST_F(BlockerTest, TestBlockerMutuallyPrimeShiftAndBlockSize) {
TEST_F(BlockerTest, TestBlockerNoOverlap) {
const int kNumInputChannels = 3;
const int kNumOutputChannels = 2;
- const int kNumFrames = 12;
- const int kBlockSize = 4;
- const int kChunkSize = 4;
- const int kShiftAmount = 4;
+ const size_t kNumFrames = 12;
+ const size_t kBlockSize = 4;
+ const size_t kChunkSize = 4;
+ const size_t kShiftAmount = 4;
const float kInput[kNumInputChannels][kNumFrames] = {
{1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1},
@@ -282,19 +283,19 @@ TEST_F(BlockerTest, TestBlockerNoOverlap) {
TEST_F(BlockerTest, InitialDelaysAreMinimum) {
const int kNumInputChannels = 3;
const int kNumOutputChannels = 2;
- const int kNumFrames = 1280;
- const int kChunkSize[] =
+ const size_t kNumFrames = 1280;
+ const size_t kChunkSize[] =
{80, 80, 80, 80, 80, 80, 160, 160, 160, 160, 160, 160};
- const int kBlockSize[] =
+ const size_t kBlockSize[] =
{64, 64, 64, 128, 128, 128, 128, 128, 128, 256, 256, 256};
- const int kShiftAmount[] =
+ const size_t kShiftAmount[] =
{16, 32, 64, 32, 64, 128, 32, 64, 128, 64, 128, 256};
- const int kInitialDelay[] =
+ const size_t kInitialDelay[] =
{48, 48, 48, 112, 112, 112, 96, 96, 96, 224, 224, 224};
float input[kNumInputChannels][kNumFrames];
for (int i = 0; i < kNumInputChannels; ++i) {
- for (int j = 0; j < kNumFrames; ++j) {
+ for (size_t j = 0; j < kNumFrames; ++j) {
input[i][j] = i + 1;
}
}
@@ -305,9 +306,9 @@ TEST_F(BlockerTest, InitialDelaysAreMinimum) {
CopyBlockerCallback callback;
- for (size_t i = 0; i < (sizeof(kChunkSize) / sizeof(*kChunkSize)); ++i) {
+ for (size_t i = 0; i < arraysize(kChunkSize); ++i) {
rtc::scoped_ptr<float[]> window(new float[kBlockSize[i]]);
- for (int j = 0; j < kBlockSize[i]; ++j) {
+ for (size_t j = 0; j < kBlockSize[i]; ++j) {
window[j] = 1.f;
}
diff --git a/webrtc/common_audio/lapped_transform_unittest.cc b/webrtc/common_audio/lapped_transform_unittest.cc
index f688cc240a..eb1c80f281 100644
--- a/webrtc/common_audio/lapped_transform_unittest.cc
+++ b/webrtc/common_audio/lapped_transform_unittest.cc
@@ -36,12 +36,12 @@ class NoopCallback : public webrtc::LappedTransform::Callback {
++block_num_;
}
- int block_num() {
+ size_t block_num() {
return block_num_;
}
private:
- int block_num_;
+ size_t block_num_;
};
class FftCheckerCallback : public webrtc::LappedTransform::Callback {
@@ -69,12 +69,12 @@ class FftCheckerCallback : public webrtc::LappedTransform::Callback {
}
}
- int block_num() {
+ size_t block_num() {
return block_num_;
}
private:
- int block_num_;
+ size_t block_num_;
};
void SetFloatArray(float value, int rows, int cols, float* const* array) {
@@ -91,9 +91,9 @@ namespace webrtc {
TEST(LappedTransformTest, Windowless) {
const int kChannels = 3;
- const int kChunkLength = 512;
- const int kBlockLength = 64;
- const int kShiftAmount = 64;
+ const size_t kChunkLength = 512;
+ const size_t kBlockLength = 64;
+ const size_t kShiftAmount = 64;
NoopCallback noop;
// Rectangular window.
@@ -119,7 +119,7 @@ TEST(LappedTransformTest, Windowless) {
trans.ProcessChunk(in_chunk, out_chunk);
for (int i = 0; i < kChannels; ++i) {
- for (int j = 0; j < kChunkLength; ++j) {
+ for (size_t j = 0; j < kChunkLength; ++j) {
ASSERT_NEAR(out_chunk[i][j], 2.0f, 1e-5f);
}
}
@@ -128,9 +128,9 @@ TEST(LappedTransformTest, Windowless) {
}
TEST(LappedTransformTest, IdentityProcessor) {
- const int kChunkLength = 512;
- const int kBlockLength = 64;
- const int kShiftAmount = 32;
+ const size_t kChunkLength = 512;
+ const size_t kBlockLength = 64;
+ const size_t kShiftAmount = 32;
NoopCallback noop;
// Identity window for |overlap = block_size / 2|.
@@ -149,7 +149,7 @@ TEST(LappedTransformTest, IdentityProcessor) {
trans.ProcessChunk(&in_chunk, &out_chunk);
- for (int i = 0; i < kChunkLength; ++i) {
+ for (size_t i = 0; i < kChunkLength; ++i) {
ASSERT_NEAR(out_chunk[i],
(i < kBlockLength - kShiftAmount) ? 0.0f : 2.0f,
1e-5f);
@@ -159,8 +159,8 @@ TEST(LappedTransformTest, IdentityProcessor) {
}
TEST(LappedTransformTest, Callbacks) {
- const int kChunkLength = 512;
- const int kBlockLength = 64;
+ const size_t kChunkLength = 512;
+ const size_t kBlockLength = 64;
FftCheckerCallback call;
// Rectangular window.
@@ -183,7 +183,7 @@ TEST(LappedTransformTest, Callbacks) {
}
TEST(LappedTransformTest, chunk_length) {
- const int kBlockLength = 64;
+ const size_t kBlockLength = 64;
FftCheckerCallback call;
const float window[kBlockLength] = {};
diff --git a/webrtc/common_audio/real_fourier.cc b/webrtc/common_audio/real_fourier.cc
index fef3c60c4c..55ec49cba2 100644
--- a/webrtc/common_audio/real_fourier.cc
+++ b/webrtc/common_audio/real_fourier.cc
@@ -19,7 +19,7 @@ namespace webrtc {
using std::complex;
-const int RealFourier::kFftBufferAlignment = 32;
+const size_t RealFourier::kFftBufferAlignment = 32;
rtc::scoped_ptr<RealFourier> RealFourier::Create(int fft_order) {
#if defined(RTC_USE_OPENMAX_DL)
diff --git a/webrtc/common_audio/real_fourier.h b/webrtc/common_audio/real_fourier.h
index ce3bbff679..0be56a58b0 100644
--- a/webrtc/common_audio/real_fourier.h
+++ b/webrtc/common_audio/real_fourier.h
@@ -30,7 +30,7 @@ class RealFourier {
fft_cplx_scoper;
// The alignment required for all input and output buffers, in bytes.
- static const int kFftBufferAlignment;
+ static const size_t kFftBufferAlignment;
// Construct a wrapper instance for the given input order, which must be
// between 1 and kMaxFftOrder, inclusively.
diff --git a/webrtc/common_audio/real_fourier_unittest.cc b/webrtc/common_audio/real_fourier_unittest.cc
index 5c8542138b..eb5880ee8a 100644
--- a/webrtc/common_audio/real_fourier_unittest.cc
+++ b/webrtc/common_audio/real_fourier_unittest.cc
@@ -26,15 +26,15 @@ TEST(RealFourierStaticsTest, AllocatorAlignment) {
RealFourier::fft_real_scoper real;
real = RealFourier::AllocRealBuffer(3);
ASSERT_TRUE(real.get() != nullptr);
- int64_t ptr_value = reinterpret_cast<int64_t>(real.get());
- EXPECT_EQ(0, ptr_value % RealFourier::kFftBufferAlignment);
+ uintptr_t ptr_value = reinterpret_cast<uintptr_t>(real.get());
+ EXPECT_EQ(0u, ptr_value % RealFourier::kFftBufferAlignment);
}
{
RealFourier::fft_cplx_scoper cplx;
cplx = RealFourier::AllocCplxBuffer(3);
ASSERT_TRUE(cplx.get() != nullptr);
- int64_t ptr_value = reinterpret_cast<int64_t>(cplx.get());
- EXPECT_EQ(0, ptr_value % RealFourier::kFftBufferAlignment);
+ uintptr_t ptr_value = reinterpret_cast<uintptr_t>(cplx.get());
+ EXPECT_EQ(0u, ptr_value % RealFourier::kFftBufferAlignment);
}
}
diff --git a/webrtc/common_audio/wav_file.cc b/webrtc/common_audio/wav_file.cc
index ac11bcdd7b..27d335c543 100644
--- a/webrtc/common_audio/wav_file.cc
+++ b/webrtc/common_audio/wav_file.cc
@@ -24,7 +24,7 @@ namespace webrtc {
// We write 16-bit PCM WAV files.
static const WavFormat kWavFormat = kWavFormatPcm;
-static const int kBytesPerSample = 2;
+static const size_t kBytesPerSample = 2;
// Doesn't take ownership of the file handle and won't close it.
class ReadableWavFile : public ReadableWav {
@@ -52,7 +52,7 @@ WavReader::WavReader(const std::string& filename)
ReadableWavFile readable(file_handle_);
WavFormat format;
- int bytes_per_sample;
+ size_t bytes_per_sample;
RTC_CHECK(ReadWavHeader(&readable, &num_channels_, &sample_rate_, &format,
&bytes_per_sample, &num_samples_));
num_samples_remaining_ = num_samples_;
@@ -69,14 +69,13 @@ size_t WavReader::ReadSamples(size_t num_samples, int16_t* samples) {
#error "Need to convert samples to big-endian when reading from WAV file"
#endif
// There could be metadata after the audio; ensure we don't read it.
- num_samples = std::min(rtc::checked_cast<uint32_t>(num_samples),
- num_samples_remaining_);
+ num_samples = std::min(num_samples, num_samples_remaining_);
const size_t read =
fread(samples, sizeof(*samples), num_samples, file_handle_);
// If we didn't read what was requested, ensure we've reached the EOF.
RTC_CHECK(read == num_samples || feof(file_handle_));
RTC_CHECK_LE(read, num_samples_remaining_);
- num_samples_remaining_ -= rtc::checked_cast<uint32_t>(read);
+ num_samples_remaining_ -= read;
return read;
}
@@ -126,9 +125,8 @@ void WavWriter::WriteSamples(const int16_t* samples, size_t num_samples) {
const size_t written =
fwrite(samples, sizeof(*samples), num_samples, file_handle_);
RTC_CHECK_EQ(num_samples, written);
- num_samples_ += static_cast<uint32_t>(written);
- RTC_CHECK(written <= std::numeric_limits<uint32_t>::max() ||
- num_samples_ >= written); // detect uint32_t overflow
+ num_samples_ += written;
+ RTC_CHECK(num_samples_ >= written); // detect size_t overflow
}
void WavWriter::WriteSamples(const float* samples, size_t num_samples) {
@@ -178,6 +176,6 @@ int rtc_WavNumChannels(const rtc_WavWriter* wf) {
return reinterpret_cast<const webrtc::WavWriter*>(wf)->num_channels();
}
-uint32_t rtc_WavNumSamples(const rtc_WavWriter* wf) {
+size_t rtc_WavNumSamples(const rtc_WavWriter* wf) {
return reinterpret_cast<const webrtc::WavWriter*>(wf)->num_samples();
}
diff --git a/webrtc/common_audio/wav_file.h b/webrtc/common_audio/wav_file.h
index 42b0618e9c..eb2ce1e31d 100644
--- a/webrtc/common_audio/wav_file.h
+++ b/webrtc/common_audio/wav_file.h
@@ -28,7 +28,7 @@ class WavFile {
virtual int sample_rate() const = 0;
virtual int num_channels() const = 0;
- virtual uint32_t num_samples() const = 0;
+ virtual size_t num_samples() const = 0;
// Returns a human-readable string containing the audio format.
std::string FormatAsString() const;
@@ -52,13 +52,13 @@ class WavWriter final : public WavFile {
int sample_rate() const override { return sample_rate_; }
int num_channels() const override { return num_channels_; }
- uint32_t num_samples() const override { return num_samples_; }
+ size_t num_samples() const override { return num_samples_; }
private:
void Close();
const int sample_rate_;
const int num_channels_;
- uint32_t num_samples_; // Total number of samples written to file.
+ size_t num_samples_; // Total number of samples written to file.
FILE* file_handle_; // Output file, owned by this class
RTC_DISALLOW_COPY_AND_ASSIGN(WavWriter);
@@ -80,14 +80,14 @@ class WavReader final : public WavFile {
int sample_rate() const override { return sample_rate_; }
int num_channels() const override { return num_channels_; }
- uint32_t num_samples() const override { return num_samples_; }
+ size_t num_samples() const override { return num_samples_; }
private:
void Close();
int sample_rate_;
int num_channels_;
- uint32_t num_samples_; // Total number of samples in the file.
- uint32_t num_samples_remaining_;
+ size_t num_samples_; // Total number of samples in the file.
+ size_t num_samples_remaining_;
FILE* file_handle_; // Input file, owned by this class.
RTC_DISALLOW_COPY_AND_ASSIGN(WavReader);
@@ -109,7 +109,7 @@ void rtc_WavWriteSamples(rtc_WavWriter* wf,
size_t num_samples);
int rtc_WavSampleRate(const rtc_WavWriter* wf);
int rtc_WavNumChannels(const rtc_WavWriter* wf);
-uint32_t rtc_WavNumSamples(const rtc_WavWriter* wf);
+size_t rtc_WavNumSamples(const rtc_WavWriter* wf);
#ifdef __cplusplus
} // extern "C"
diff --git a/webrtc/common_audio/wav_file_unittest.cc b/webrtc/common_audio/wav_file_unittest.cc
index 78b0a34de9..3732079f4f 100644
--- a/webrtc/common_audio/wav_file_unittest.cc
+++ b/webrtc/common_audio/wav_file_unittest.cc
@@ -26,7 +26,7 @@ static const float kSamples[] = {0.0, 10.0, 4e4, -1e9};
// Write a tiny WAV file with the C++ interface and verify the result.
TEST(WavWriterTest, CPP) {
const std::string outfile = test::OutputPath() + "wavtest1.wav";
- static const uint32_t kNumSamples = 3;
+ static const size_t kNumSamples = 3;
{
WavWriter w(outfile, 14099, 1);
EXPECT_EQ(14099, w.sample_rate());
@@ -64,10 +64,10 @@ TEST(WavWriterTest, CPP) {
0xff, 0x7f, // third sample: 4e4 (saturated)
kMetadata[0], kMetadata[1],
};
- static const int kContentSize =
+ static const size_t kContentSize =
kWavHeaderSize + kNumSamples * sizeof(int16_t) + sizeof(kMetadata);
static_assert(sizeof(kExpectedContents) == kContentSize, "content size");
- EXPECT_EQ(size_t(kContentSize), test::GetFileSize(outfile));
+ EXPECT_EQ(kContentSize, test::GetFileSize(outfile));
FILE* f = fopen(outfile.c_str(), "rb");
ASSERT_TRUE(f);
uint8_t contents[kContentSize];
@@ -95,7 +95,7 @@ TEST(WavWriterTest, C) {
EXPECT_EQ(11904, rtc_WavSampleRate(w));
EXPECT_EQ(2, rtc_WavNumChannels(w));
EXPECT_EQ(0u, rtc_WavNumSamples(w));
- static const uint32_t kNumSamples = 4;
+ static const size_t kNumSamples = 4;
rtc_WavWriteSamples(w, &kSamples[0], 2);
EXPECT_EQ(2u, rtc_WavNumSamples(w));
rtc_WavWriteSamples(w, &kSamples[2], kNumSamples - 2);
@@ -120,10 +120,10 @@ TEST(WavWriterTest, C) {
0xff, 0x7f, // third sample: 4e4 (saturated)
0, 0x80, // fourth sample: -1e9 (saturated)
};
- static const int kContentSize =
+ static const size_t kContentSize =
kWavHeaderSize + kNumSamples * sizeof(int16_t);
static_assert(sizeof(kExpectedContents) == kContentSize, "content size");
- EXPECT_EQ(size_t(kContentSize), test::GetFileSize(outfile));
+ EXPECT_EQ(kContentSize, test::GetFileSize(outfile));
FILE* f = fopen(outfile.c_str(), "rb");
ASSERT_TRUE(f);
uint8_t contents[kContentSize];
@@ -137,9 +137,9 @@ TEST(WavWriterTest, LargeFile) {
std::string outfile = test::OutputPath() + "wavtest3.wav";
static const int kSampleRate = 8000;
static const int kNumChannels = 2;
- static const uint32_t kNumSamples = 3 * kSampleRate * kNumChannels;
+ static const size_t kNumSamples = 3 * kSampleRate * kNumChannels;
float samples[kNumSamples];
- for (uint32_t i = 0; i < kNumSamples; i += kNumChannels) {
+ for (size_t i = 0; i < kNumSamples; i += kNumChannels) {
// A nice periodic beeping sound.
static const double kToneHz = 440;
const double t = static_cast<double>(i) / (kNumChannels * kSampleRate);
diff --git a/webrtc/common_audio/wav_header.cc b/webrtc/common_audio/wav_header.cc
index 61cfffe62c..d2aa426414 100644
--- a/webrtc/common_audio/wav_header.cc
+++ b/webrtc/common_audio/wav_header.cc
@@ -62,12 +62,12 @@ static_assert(sizeof(WavHeader) == kWavHeaderSize, "no padding in header");
bool CheckWavParameters(int num_channels,
int sample_rate,
WavFormat format,
- int bytes_per_sample,
- uint32_t num_samples) {
+ size_t bytes_per_sample,
+ size_t num_samples) {
// num_channels, sample_rate, and bytes_per_sample must be positive, must fit
// in their respective fields, and their product must fit in the 32-bit
// ByteRate field.
- if (num_channels <= 0 || sample_rate <= 0 || bytes_per_sample <= 0)
+ if (num_channels <= 0 || sample_rate <= 0 || bytes_per_sample == 0)
return false;
if (static_cast<uint64_t>(sample_rate) > std::numeric_limits<uint32_t>::max())
return false;
@@ -99,10 +99,9 @@ bool CheckWavParameters(int num_channels,
// The number of bytes in the file, not counting the first ChunkHeader, must
// be less than 2^32; otherwise, the ChunkSize field overflows.
- const uint32_t max_samples =
- (std::numeric_limits<uint32_t>::max()
- - (kWavHeaderSize - sizeof(ChunkHeader))) /
- bytes_per_sample;
+ const size_t header_size = kWavHeaderSize - sizeof(ChunkHeader);
+ const size_t max_samples =
+ (std::numeric_limits<uint32_t>::max() - header_size) / bytes_per_sample;
if (num_samples > max_samples)
return false;
@@ -132,30 +131,31 @@ static inline std::string ReadFourCC(uint32_t x) {
#error "Write be-to-le conversion functions"
#endif
-static inline uint32_t RiffChunkSize(uint32_t bytes_in_payload) {
- return bytes_in_payload + kWavHeaderSize - sizeof(ChunkHeader);
+static inline uint32_t RiffChunkSize(size_t bytes_in_payload) {
+ return static_cast<uint32_t>(
+ bytes_in_payload + kWavHeaderSize - sizeof(ChunkHeader));
}
static inline uint32_t ByteRate(int num_channels, int sample_rate,
- int bytes_per_sample) {
- return static_cast<uint32_t>(num_channels) * sample_rate * bytes_per_sample;
+ size_t bytes_per_sample) {
+ return static_cast<uint32_t>(num_channels * sample_rate * bytes_per_sample);
}
-static inline uint16_t BlockAlign(int num_channels, int bytes_per_sample) {
- return num_channels * bytes_per_sample;
+static inline uint16_t BlockAlign(int num_channels, size_t bytes_per_sample) {
+ return static_cast<uint16_t>(num_channels * bytes_per_sample);
}
void WriteWavHeader(uint8_t* buf,
int num_channels,
int sample_rate,
WavFormat format,
- int bytes_per_sample,
- uint32_t num_samples) {
+ size_t bytes_per_sample,
+ size_t num_samples) {
RTC_CHECK(CheckWavParameters(num_channels, sample_rate, format,
bytes_per_sample, num_samples));
WavHeader header;
- const uint32_t bytes_in_payload = bytes_per_sample * num_samples;
+ const size_t bytes_in_payload = bytes_per_sample * num_samples;
WriteFourCC(&header.riff.header.ID, 'R', 'I', 'F', 'F');
WriteLE32(&header.riff.header.Size, RiffChunkSize(bytes_in_payload));
@@ -164,15 +164,16 @@ void WriteWavHeader(uint8_t* buf,
WriteFourCC(&header.fmt.header.ID, 'f', 'm', 't', ' ');
WriteLE32(&header.fmt.header.Size, kFmtSubchunkSize);
WriteLE16(&header.fmt.AudioFormat, format);
- WriteLE16(&header.fmt.NumChannels, num_channels);
+ WriteLE16(&header.fmt.NumChannels, static_cast<uint16_t>(num_channels));
WriteLE32(&header.fmt.SampleRate, sample_rate);
WriteLE32(&header.fmt.ByteRate, ByteRate(num_channels, sample_rate,
bytes_per_sample));
WriteLE16(&header.fmt.BlockAlign, BlockAlign(num_channels, bytes_per_sample));
- WriteLE16(&header.fmt.BitsPerSample, 8 * bytes_per_sample);
+ WriteLE16(&header.fmt.BitsPerSample,
+ static_cast<uint16_t>(8 * bytes_per_sample));
WriteFourCC(&header.data.header.ID, 'd', 'a', 't', 'a');
- WriteLE32(&header.data.header.Size, bytes_in_payload);
+ WriteLE32(&header.data.header.Size, static_cast<uint32_t>(bytes_in_payload));
// Do an extra copy rather than writing everything to buf directly, since buf
// might not be correctly aligned.
@@ -183,8 +184,8 @@ bool ReadWavHeader(ReadableWav* readable,
int* num_channels,
int* sample_rate,
WavFormat* format,
- int* bytes_per_sample,
- uint32_t* num_samples) {
+ size_t* bytes_per_sample,
+ size_t* num_samples) {
WavHeader header;
if (readable->Read(&header, kWavHeaderSize - sizeof(header.data)) !=
kWavHeaderSize - sizeof(header.data))
@@ -210,8 +211,8 @@ bool ReadWavHeader(ReadableWav* readable,
*num_channels = ReadLE16(header.fmt.NumChannels);
*sample_rate = ReadLE32(header.fmt.SampleRate);
*bytes_per_sample = ReadLE16(header.fmt.BitsPerSample) / 8;
- const uint32_t bytes_in_payload = ReadLE32(header.data.header.Size);
- if (*bytes_per_sample <= 0)
+ const size_t bytes_in_payload = ReadLE32(header.data.header.Size);
+ if (*bytes_per_sample == 0)
return false;
*num_samples = bytes_in_payload / *bytes_per_sample;
diff --git a/webrtc/common_audio/wav_header.h b/webrtc/common_audio/wav_header.h
index 1a0fd7c81d..65b7792992 100644
--- a/webrtc/common_audio/wav_header.h
+++ b/webrtc/common_audio/wav_header.h
@@ -35,8 +35,8 @@ enum WavFormat {
bool CheckWavParameters(int num_channels,
int sample_rate,
WavFormat format,
- int bytes_per_sample,
- uint32_t num_samples);
+ size_t bytes_per_sample,
+ size_t num_samples);
// Write a kWavHeaderSize bytes long WAV header to buf. The payload that
// follows the header is supposed to have the specified number of interleaved
@@ -46,8 +46,8 @@ void WriteWavHeader(uint8_t* buf,
int num_channels,
int sample_rate,
WavFormat format,
- int bytes_per_sample,
- uint32_t num_samples);
+ size_t bytes_per_sample,
+ size_t num_samples);
// Read a WAV header from an implemented ReadableWav and parse the values into
// the provided output parameters. ReadableWav is used because the header can
@@ -56,8 +56,8 @@ bool ReadWavHeader(ReadableWav* readable,
int* num_channels,
int* sample_rate,
WavFormat* format,
- int* bytes_per_sample,
- uint32_t* num_samples);
+ size_t* bytes_per_sample,
+ size_t* num_samples);
} // namespace webrtc
diff --git a/webrtc/common_audio/wav_header_unittest.cc b/webrtc/common_audio/wav_header_unittest.cc
index e03cb303aa..226d788204 100644
--- a/webrtc/common_audio/wav_header_unittest.cc
+++ b/webrtc/common_audio/wav_header_unittest.cc
@@ -70,7 +70,7 @@ TEST(WavHeaderTest, CheckWavParameters) {
// Try some really stupid values for one parameter at a time.
EXPECT_TRUE(CheckWavParameters(1, 8000, kWavFormatPcm, 1, 0));
EXPECT_FALSE(CheckWavParameters(0, 8000, kWavFormatPcm, 1, 0));
- EXPECT_FALSE(CheckWavParameters(-1, 8000, kWavFormatPcm, 1, 0));
+ EXPECT_FALSE(CheckWavParameters(0x10000, 8000, kWavFormatPcm, 1, 0));
EXPECT_FALSE(CheckWavParameters(1, 0, kWavFormatPcm, 1, 0));
EXPECT_FALSE(CheckWavParameters(1, 8000, WavFormat(0), 1, 0));
EXPECT_FALSE(CheckWavParameters(1, 8000, kWavFormatPcm, 0, 0));
@@ -94,8 +94,8 @@ TEST(WavHeaderTest, ReadWavHeaderWithErrors) {
int num_channels = 0;
int sample_rate = 0;
WavFormat format = kWavFormatPcm;
- int bytes_per_sample = 0;
- uint32_t num_samples = 0;
+ size_t bytes_per_sample = 0;
+ size_t num_samples = 0;
// Test a few ways the header can be invalid. We start with the valid header
// used in WriteAndReadWavHeader, and invalidate one field per test. The
@@ -271,8 +271,8 @@ TEST(WavHeaderTest, WriteAndReadWavHeader) {
int num_channels = 0;
int sample_rate = 0;
WavFormat format = kWavFormatPcm;
- int bytes_per_sample = 0;
- uint32_t num_samples = 0;
+ size_t bytes_per_sample = 0;
+ size_t num_samples = 0;
ReadableWavBuffer r(buf + 4, sizeof(buf) - 8);
EXPECT_TRUE(
ReadWavHeader(&r, &num_channels, &sample_rate, &format,
@@ -280,7 +280,7 @@ TEST(WavHeaderTest, WriteAndReadWavHeader) {
EXPECT_EQ(17, num_channels);
EXPECT_EQ(12345, sample_rate);
EXPECT_EQ(kWavFormatALaw, format);
- EXPECT_EQ(1, bytes_per_sample);
+ EXPECT_EQ(1u, bytes_per_sample);
EXPECT_EQ(123457689u, num_samples);
}
@@ -307,8 +307,8 @@ TEST(WavHeaderTest, ReadAtypicalWavHeader) {
int num_channels = 0;
int sample_rate = 0;
WavFormat format = kWavFormatPcm;
- int bytes_per_sample = 0;
- uint32_t num_samples = 0;
+ size_t bytes_per_sample = 0;
+ size_t num_samples = 0;
ReadableWavBuffer r(kBuf, sizeof(kBuf));
EXPECT_TRUE(
ReadWavHeader(&r, &num_channels, &sample_rate, &format,
@@ -316,7 +316,7 @@ TEST(WavHeaderTest, ReadAtypicalWavHeader) {
EXPECT_EQ(17, num_channels);
EXPECT_EQ(12345, sample_rate);
EXPECT_EQ(kWavFormatALaw, format);
- EXPECT_EQ(1, bytes_per_sample);
+ EXPECT_EQ(1u, bytes_per_sample);
EXPECT_EQ(123457689u, num_samples);
}
diff --git a/webrtc/modules/audio_coding/acm2/acm_codec_database.cc b/webrtc/modules/audio_coding/acm2/acm_codec_database.cc
index 8d4072fae4..bbd4509c25 100644
--- a/webrtc/modules/audio_coding/acm2/acm_codec_database.cc
+++ b/webrtc/modules/audio_coding/acm2/acm_codec_database.cc
@@ -106,7 +106,7 @@ const CodecInst ACMCodecDB::database_[] = {
{127, "red", 8000, 0, 1, 0},
#endif
// To prevent compile errors due to trailing commas.
- {-1, "Null", -1, -1, -1, -1}
+ {-1, "Null", -1, -1, 0, -1}
};
// Create database with all codec settings at compile time.
@@ -162,7 +162,7 @@ const ACMCodecDB::CodecSettings ACMCodecDB::codec_settings_[] = {
{1, {0}, 0, 1},
#endif
// To prevent compile errors due to trailing commas.
- {-1, {-1}, -1, -1}
+ {-1, {-1}, -1, 0}
};
// Create a database of all NetEQ decoders at compile time.
diff --git a/webrtc/modules/audio_coding/acm2/acm_resampler.cc b/webrtc/modules/audio_coding/acm2/acm_resampler.cc
index 5df87d2a19..d7ceb8ac9f 100644
--- a/webrtc/modules/audio_coding/acm2/acm_resampler.cc
+++ b/webrtc/modules/audio_coding/acm2/acm_resampler.cc
@@ -32,7 +32,6 @@ int ACMResampler::Resample10Msec(const int16_t* in_audio,
size_t out_capacity_samples,
int16_t* out_audio) {
size_t in_length = static_cast<size_t>(in_freq_hz * num_audio_channels / 100);
- int out_length = out_freq_hz * num_audio_channels / 100;
if (in_freq_hz == out_freq_hz) {
if (out_capacity_samples < in_length) {
assert(false);
@@ -49,7 +48,7 @@ int ACMResampler::Resample10Msec(const int16_t* in_audio,
return -1;
}
- out_length =
+ int out_length =
resampler_.Resample(in_audio, in_length, out_audio, out_capacity_samples);
if (out_length == -1) {
LOG(LS_ERROR) << "Resample(" << in_audio << ", " << in_length << ", "
diff --git a/webrtc/modules/audio_coding/codecs/g711/audio_encoder_pcm.cc b/webrtc/modules/audio_coding/codecs/g711/audio_encoder_pcm.cc
index 210791cbd9..26c7838861 100644
--- a/webrtc/modules/audio_coding/codecs/g711/audio_encoder_pcm.cc
+++ b/webrtc/modules/audio_coding/codecs/g711/audio_encoder_pcm.cc
@@ -83,7 +83,8 @@ size_t AudioEncoderPcm::Max10MsFramesInAPacket() const {
}
int AudioEncoderPcm::GetTargetBitrate() const {
- return 8 * BytesPerSample() * SampleRateHz() * NumChannels();
+ return static_cast<int>(
+ 8 * BytesPerSample() * SampleRateHz() * NumChannels());
}
AudioEncoder::EncodedInfo AudioEncoderPcm::EncodeInternal(
@@ -122,7 +123,7 @@ size_t AudioEncoderPcmA::EncodeCall(const int16_t* audio,
return WebRtcG711_EncodeA(audio, input_len, encoded);
}
-int AudioEncoderPcmA::BytesPerSample() const {
+size_t AudioEncoderPcmA::BytesPerSample() const {
return 1;
}
@@ -135,7 +136,7 @@ size_t AudioEncoderPcmU::EncodeCall(const int16_t* audio,
return WebRtcG711_EncodeU(audio, input_len, encoded);
}
-int AudioEncoderPcmU::BytesPerSample() const {
+size_t AudioEncoderPcmU::BytesPerSample() const {
return 1;
}
diff --git a/webrtc/modules/audio_coding/codecs/g711/audio_encoder_pcm.h b/webrtc/modules/audio_coding/codecs/g711/audio_encoder_pcm.h
index fd996dca75..6891cbdc3a 100644
--- a/webrtc/modules/audio_coding/codecs/g711/audio_encoder_pcm.h
+++ b/webrtc/modules/audio_coding/codecs/g711/audio_encoder_pcm.h
@@ -54,7 +54,7 @@ class AudioEncoderPcm : public AudioEncoder {
size_t input_len,
uint8_t* encoded) = 0;
- virtual int BytesPerSample() const = 0;
+ virtual size_t BytesPerSample() const = 0;
private:
const int sample_rate_hz_;
@@ -83,7 +83,7 @@ class AudioEncoderPcmA final : public AudioEncoderPcm {
size_t input_len,
uint8_t* encoded) override;
- int BytesPerSample() const override;
+ size_t BytesPerSample() const override;
private:
static const int kSampleRateHz = 8000;
@@ -105,7 +105,7 @@ class AudioEncoderPcmU final : public AudioEncoderPcm {
size_t input_len,
uint8_t* encoded) override;
- int BytesPerSample() const override;
+ size_t BytesPerSample() const override;
private:
static const int kSampleRateHz = 8000;
diff --git a/webrtc/modules/audio_coding/codecs/isac/fix/test/isac_speed_test.cc b/webrtc/modules/audio_coding/codecs/isac/fix/test/isac_speed_test.cc
index 632a4fe825..32f36c5261 100644
--- a/webrtc/modules/audio_coding/codecs/isac/fix/test/isac_speed_test.cc
+++ b/webrtc/modules/audio_coding/codecs/isac/fix/test/isac_speed_test.cc
@@ -92,7 +92,7 @@ float IsacSpeedTest::DecodeABlock(const uint8_t* bit_stream,
value = WebRtcIsacfix_Decode(ISACFIX_main_inst_, bit_stream, encoded_bytes,
out_data, &audio_type);
clocks = clock() - clocks;
- EXPECT_EQ(output_length_sample_, value);
+ EXPECT_EQ(output_length_sample_, static_cast<size_t>(value));
return 1000.0 * clocks / CLOCKS_PER_SEC;
}
diff --git a/webrtc/modules/audio_coding/codecs/opus/audio_encoder_opus.cc b/webrtc/modules/audio_coding/codecs/opus/audio_encoder_opus.cc
index 3e7d3ec738..0806bb81d9 100644
--- a/webrtc/modules/audio_coding/codecs/opus/audio_encoder_opus.cc
+++ b/webrtc/modules/audio_coding/codecs/opus/audio_encoder_opus.cc
@@ -137,15 +137,14 @@ AudioEncoder::EncodedInfo AudioEncoderOpus::EncodeInternal(
uint8_t* encoded) {
if (input_buffer_.empty())
first_timestamp_in_buffer_ = rtp_timestamp;
- RTC_DCHECK_EQ(static_cast<size_t>(SamplesPer10msFrame()), audio.size());
+ RTC_DCHECK_EQ(SamplesPer10msFrame(), audio.size());
input_buffer_.insert(input_buffer_.end(), audio.cbegin(), audio.cend());
if (input_buffer_.size() <
- (static_cast<size_t>(Num10msFramesPerPacket()) * SamplesPer10msFrame())) {
+ (Num10msFramesPerPacket() * SamplesPer10msFrame())) {
return EncodedInfo();
}
- RTC_CHECK_EQ(
- input_buffer_.size(),
- static_cast<size_t>(Num10msFramesPerPacket()) * SamplesPer10msFrame());
+ RTC_CHECK_EQ(input_buffer_.size(),
+ Num10msFramesPerPacket() * SamplesPer10msFrame());
int status = WebRtcOpus_Encode(
inst_, &input_buffer_[0],
rtc::CheckedDivExact(input_buffer_.size(),
@@ -214,11 +213,11 @@ void AudioEncoderOpus::SetTargetBitrate(int bits_per_second) {
RTC_CHECK_EQ(0, WebRtcOpus_SetBitRate(inst_, config_.bitrate_bps));
}
-int AudioEncoderOpus::Num10msFramesPerPacket() const {
- return rtc::CheckedDivExact(config_.frame_size_ms, 10);
+size_t AudioEncoderOpus::Num10msFramesPerPacket() const {
+ return static_cast<size_t>(rtc::CheckedDivExact(config_.frame_size_ms, 10));
}
-int AudioEncoderOpus::SamplesPer10msFrame() const {
+size_t AudioEncoderOpus::SamplesPer10msFrame() const {
return rtc::CheckedDivExact(kSampleRateHz, 100) * config_.num_channels;
}
diff --git a/webrtc/modules/audio_coding/codecs/opus/audio_encoder_opus.h b/webrtc/modules/audio_coding/codecs/opus/audio_encoder_opus.h
index 36011fab74..f37e344d4d 100644
--- a/webrtc/modules/audio_coding/codecs/opus/audio_encoder_opus.h
+++ b/webrtc/modules/audio_coding/codecs/opus/audio_encoder_opus.h
@@ -85,8 +85,8 @@ class AudioEncoderOpus final : public AudioEncoder {
bool dtx_enabled() const { return config_.dtx_enabled; }
private:
- int Num10msFramesPerPacket() const;
- int SamplesPer10msFrame() const;
+ size_t Num10msFramesPerPacket() const;
+ size_t SamplesPer10msFrame() const;
bool RecreateEncoderInstance(const Config& config);
Config config_;
diff --git a/webrtc/modules/audio_coding/codecs/opus/opus_speed_test.cc b/webrtc/modules/audio_coding/codecs/opus/opus_speed_test.cc
index f95cc7145d..4d1aa42c89 100644
--- a/webrtc/modules/audio_coding/codecs/opus/opus_speed_test.cc
+++ b/webrtc/modules/audio_coding/codecs/opus/opus_speed_test.cc
@@ -77,7 +77,7 @@ float OpusSpeedTest::DecodeABlock(const uint8_t* bit_stream,
value = WebRtcOpus_Decode(opus_decoder_, bit_stream, encoded_bytes, out_data,
&audio_type);
clocks = clock() - clocks;
- EXPECT_EQ(output_length_sample_, value);
+ EXPECT_EQ(output_length_sample_, static_cast<size_t>(value));
return 1000.0 * clocks / CLOCKS_PER_SEC;
}
diff --git a/webrtc/modules/audio_coding/codecs/pcm16b/audio_encoder_pcm16b.cc b/webrtc/modules/audio_coding/codecs/pcm16b/audio_encoder_pcm16b.cc
index 50d2041b83..f4d4022302 100644
--- a/webrtc/modules/audio_coding/codecs/pcm16b/audio_encoder_pcm16b.cc
+++ b/webrtc/modules/audio_coding/codecs/pcm16b/audio_encoder_pcm16b.cc
@@ -22,7 +22,7 @@ size_t AudioEncoderPcm16B::EncodeCall(const int16_t* audio,
return WebRtcPcm16b_Encode(audio, input_len, encoded);
}
-int AudioEncoderPcm16B::BytesPerSample() const {
+size_t AudioEncoderPcm16B::BytesPerSample() const {
return 2;
}
diff --git a/webrtc/modules/audio_coding/codecs/pcm16b/audio_encoder_pcm16b.h b/webrtc/modules/audio_coding/codecs/pcm16b/audio_encoder_pcm16b.h
index 3645a6f718..68ca2da77e 100644
--- a/webrtc/modules/audio_coding/codecs/pcm16b/audio_encoder_pcm16b.h
+++ b/webrtc/modules/audio_coding/codecs/pcm16b/audio_encoder_pcm16b.h
@@ -37,7 +37,7 @@ class AudioEncoderPcm16B final : public AudioEncoderPcm {
size_t input_len,
uint8_t* encoded) override;
- int BytesPerSample() const override;
+ size_t BytesPerSample() const override;
private:
RTC_DISALLOW_COPY_AND_ASSIGN(AudioEncoderPcm16B);
diff --git a/webrtc/modules/audio_coding/codecs/tools/audio_codec_speed_test.cc b/webrtc/modules/audio_coding/codecs/tools/audio_codec_speed_test.cc
index 3395721f8b..07a15ff578 100644
--- a/webrtc/modules/audio_coding/codecs/tools/audio_codec_speed_test.cc
+++ b/webrtc/modules/audio_coding/codecs/tools/audio_codec_speed_test.cc
@@ -23,8 +23,10 @@ AudioCodecSpeedTest::AudioCodecSpeedTest(int block_duration_ms,
: block_duration_ms_(block_duration_ms),
input_sampling_khz_(input_sampling_khz),
output_sampling_khz_(output_sampling_khz),
- input_length_sample_(block_duration_ms_ * input_sampling_khz_),
- output_length_sample_(block_duration_ms_ * output_sampling_khz_),
+ input_length_sample_(
+ static_cast<size_t>(block_duration_ms_ * input_sampling_khz_)),
+ output_length_sample_(
+ static_cast<size_t>(block_duration_ms_ * output_sampling_khz_)),
data_pointer_(0),
loop_length_samples_(0),
max_bytes_(0),
@@ -65,8 +67,7 @@ void AudioCodecSpeedTest::SetUp() {
memcpy(&in_data_[loop_length_samples_], &in_data_[0],
input_length_sample_ * channels_ * sizeof(int16_t));
- max_bytes_ =
- static_cast<size_t>(input_length_sample_ * channels_ * sizeof(int16_t));
+ max_bytes_ = input_length_sample_ * channels_ * sizeof(int16_t);
out_data_.reset(new int16_t[output_length_sample_ * channels_]);
bit_stream_.reset(new uint8_t[max_bytes_]);
diff --git a/webrtc/modules/audio_coding/codecs/tools/audio_codec_speed_test.h b/webrtc/modules/audio_coding/codecs/tools/audio_codec_speed_test.h
index 2736c2912e..b5aef75e95 100644
--- a/webrtc/modules/audio_coding/codecs/tools/audio_codec_speed_test.h
+++ b/webrtc/modules/audio_coding/codecs/tools/audio_codec_speed_test.h
@@ -55,10 +55,10 @@ class AudioCodecSpeedTest : public testing::TestWithParam<coding_param> {
int output_sampling_khz_;
// Number of samples-per-channel in a frame.
- int input_length_sample_;
+ size_t input_length_sample_;
// Expected output number of samples-per-channel in a frame.
- int output_length_sample_;
+ size_t output_length_sample_;
rtc::scoped_ptr<int16_t[]> in_data_;
rtc::scoped_ptr<int16_t[]> out_data_;
diff --git a/webrtc/modules/audio_coding/include/audio_coding_module.h b/webrtc/modules/audio_coding/include/audio_coding_module.h
index f5af65a13d..52fe383a88 100644
--- a/webrtc/modules/audio_coding/include/audio_coding_module.h
+++ b/webrtc/modules/audio_coding/include/audio_coding_module.h
@@ -134,7 +134,7 @@ class AudioCodingModule {
// 0 if succeeded.
//
static int Codec(const char* payload_name, CodecInst* codec,
- int sampling_freq_hz, int channels);
+ int sampling_freq_hz, int channels);
///////////////////////////////////////////////////////////////////////////
// int32_t Codec()
@@ -153,7 +153,7 @@ class AudioCodingModule {
// -1 if the codec is not found.
//
static int Codec(const char* payload_name, int sampling_freq_hz,
- int channels);
+ int channels);
///////////////////////////////////////////////////////////////////////////
// bool IsCodecValid()
diff --git a/webrtc/modules/audio_coding/test/opus_test.cc b/webrtc/modules/audio_coding/test/opus_test.cc
index a68db910f5..466db9faa2 100644
--- a/webrtc/modules/audio_coding/test/opus_test.cc
+++ b/webrtc/modules/audio_coding/test/opus_test.cc
@@ -206,16 +206,16 @@ void OpusTest::Perform() {
}
void OpusTest::Run(TestPackStereo* channel, int channels, int bitrate,
- int frame_length, int percent_loss) {
+ size_t frame_length, int percent_loss) {
AudioFrame audio_frame;
int32_t out_freq_hz_b = out_file_.SamplingFrequency();
- const int kBufferSizeSamples = 480 * 12 * 2; // Can hold 120 ms stereo audio.
+ const size_t kBufferSizeSamples = 480 * 12 * 2; // 120 ms stereo audio.
int16_t audio[kBufferSizeSamples];
int16_t out_audio[kBufferSizeSamples];
int16_t audio_type;
- int written_samples = 0;
- int read_samples = 0;
- int decoded_samples = 0;
+ size_t written_samples = 0;
+ size_t read_samples = 0;
+ size_t decoded_samples = 0;
bool first_packet = true;
uint32_t start_time_stamp = 0;
@@ -268,14 +268,14 @@ void OpusTest::Run(TestPackStereo* channel, int channels, int bitrate,
// Sometimes we need to loop over the audio vector to produce the right
// number of packets.
- int loop_encode = (written_samples - read_samples) /
+ size_t loop_encode = (written_samples - read_samples) /
(channels * frame_length);
if (loop_encode > 0) {
- const int kMaxBytes = 1000; // Maximum number of bytes for one packet.
+ const size_t kMaxBytes = 1000; // Maximum number of bytes for one packet.
size_t bitstream_len_byte;
uint8_t bitstream[kMaxBytes];
- for (int i = 0; i < loop_encode; i++) {
+ for (size_t i = 0; i < loop_encode; i++) {
int bitstream_len_byte_int = WebRtcOpus_Encode(
(channels == 1) ? opus_mono_encoder_ : opus_stereo_encoder_,
&audio[read_samples], frame_length, kMaxBytes, bitstream);
@@ -326,7 +326,7 @@ void OpusTest::Run(TestPackStereo* channel, int channels, int bitrate,
first_packet = false;
start_time_stamp = rtp_timestamp_;
}
- rtp_timestamp_ += frame_length;
+ rtp_timestamp_ += static_cast<uint32_t>(frame_length);
read_samples += frame_length * channels;
}
if (read_samples == written_samples) {
@@ -344,8 +344,7 @@ void OpusTest::Run(TestPackStereo* channel, int channels, int bitrate,
audio_frame.samples_per_channel_ * audio_frame.num_channels_);
// Write stand-alone speech to file.
- out_file_standalone_.Write10MsData(
- out_audio, static_cast<size_t>(decoded_samples) * channels);
+ out_file_standalone_.Write10MsData(out_audio, decoded_samples * channels);
if (audio_frame.timestamp_ > start_time_stamp) {
// Number of channels should be the same for both stand-alone and
diff --git a/webrtc/modules/audio_coding/test/opus_test.h b/webrtc/modules/audio_coding/test/opus_test.h
index 090c8fa9dd..88ef0ecdfb 100644
--- a/webrtc/modules/audio_coding/test/opus_test.h
+++ b/webrtc/modules/audio_coding/test/opus_test.h
@@ -31,7 +31,10 @@ class OpusTest : public ACMTest {
void Perform();
private:
- void Run(TestPackStereo* channel, int channels, int bitrate, int frame_length,
+ void Run(TestPackStereo* channel,
+ int channels,
+ int bitrate,
+ size_t frame_length,
int percent_loss = 0);
void OpenOutFile(int test_number);
@@ -44,7 +47,7 @@ class OpusTest : public ACMTest {
PCMFile out_file_standalone_;
int counter_;
uint8_t payload_type_;
- int rtp_timestamp_;
+ uint32_t rtp_timestamp_;
acm2::ACMResampler resampler_;
WebRtcOpusEncInst* opus_mono_encoder_;
WebRtcOpusEncInst* opus_stereo_encoder_;
diff --git a/webrtc/modules/audio_device/dummy/file_audio_device.cc b/webrtc/modules/audio_device/dummy/file_audio_device.cc
index ea432af203..ea61ce64a7 100644
--- a/webrtc/modules/audio_device/dummy/file_audio_device.cc
+++ b/webrtc/modules/audio_device/dummy/file_audio_device.cc
@@ -17,10 +17,10 @@ const int kRecordingFixedSampleRate = 48000;
const int kRecordingNumChannels = 2;
const int kPlayoutFixedSampleRate = 48000;
const int kPlayoutNumChannels = 2;
-const int kPlayoutBufferSize = kPlayoutFixedSampleRate / 100
- * kPlayoutNumChannels * 2;
-const int kRecordingBufferSize = kRecordingFixedSampleRate / 100
- * kRecordingNumChannels * 2;
+const size_t kPlayoutBufferSize =
+ kPlayoutFixedSampleRate / 100 * kPlayoutNumChannels * 2;
+const size_t kRecordingBufferSize =
+ kRecordingFixedSampleRate / 100 * kRecordingNumChannels * 2;
FileAudioDevice::FileAudioDevice(const int32_t id,
const char* inputFilename,
@@ -194,9 +194,7 @@ int32_t FileAudioDevice::StartPlayout() {
_playoutFramesLeft = 0;
if (!_playoutBuffer) {
- _playoutBuffer = new int8_t[2 *
- kPlayoutNumChannels *
- kPlayoutFixedSampleRate/100];
+ _playoutBuffer = new int8_t[kPlayoutBufferSize];
}
if (!_playoutBuffer) {
_playing = false;
diff --git a/webrtc/modules/audio_device/test/audio_device_test_api.cc b/webrtc/modules/audio_device/test/audio_device_test_api.cc
index 2e015cd9b3..2bbdfed594 100644
--- a/webrtc/modules/audio_device/test/audio_device_test_api.cc
+++ b/webrtc/modules/audio_device/test/audio_device_test_api.cc
@@ -128,29 +128,6 @@ class AudioTransportAPI: public AudioTransport {
return 0;
}
- int OnDataAvailable(const int voe_channels[],
- int number_of_voe_channels,
- const int16_t* audio_data,
- int sample_rate,
- int number_of_channels,
- size_t number_of_frames,
- int audio_delay_milliseconds,
- int current_volume,
- bool key_pressed,
- bool need_audio_processing) override {
- return 0;
- }
-
- void PushCaptureData(int voe_channel, const void* audio_data,
- int bits_per_sample, int sample_rate,
- int number_of_channels,
- size_t number_of_frames) override {}
-
- void PullRenderData(int bits_per_sample, int sample_rate,
- int number_of_channels, size_t number_of_frames,
- void* audio_data,
- int64_t* elapsed_time_ms,
- int64_t* ntp_time_ms) override {}
private:
uint32_t rec_count_;
uint32_t play_count_;
diff --git a/webrtc/modules/audio_device/test/func_test_manager.cc b/webrtc/modules/audio_device/test/func_test_manager.cc
index 241c072d82..b358e5013d 100644
--- a/webrtc/modules/audio_device/test/func_test_manager.cc
+++ b/webrtc/modules/audio_device/test/func_test_manager.cc
@@ -573,32 +573,6 @@ int32_t AudioTransportImpl::NeedMorePlayData(
return 0;
}
-int AudioTransportImpl::OnDataAvailable(const int voe_channels[],
- int number_of_voe_channels,
- const int16_t* audio_data,
- int sample_rate,
- int number_of_channels,
- size_t number_of_frames,
- int audio_delay_milliseconds,
- int current_volume,
- bool key_pressed,
- bool need_audio_processing) {
- return 0;
-}
-
-void AudioTransportImpl::PushCaptureData(int voe_channel,
- const void* audio_data,
- int bits_per_sample, int sample_rate,
- int number_of_channels,
- size_t number_of_frames) {}
-
-void AudioTransportImpl::PullRenderData(int bits_per_sample, int sample_rate,
- int number_of_channels,
- size_t number_of_frames,
- void* audio_data,
- int64_t* elapsed_time_ms,
- int64_t* ntp_time_ms) {}
-
FuncTestManager::FuncTestManager() :
_audioDevice(NULL),
_audioEventObserver(NULL),
diff --git a/webrtc/modules/audio_device/test/func_test_manager.h b/webrtc/modules/audio_device/test/func_test_manager.h
index 6ef13490d8..187070965d 100644
--- a/webrtc/modules/audio_device/test/func_test_manager.h
+++ b/webrtc/modules/audio_device/test/func_test_manager.h
@@ -105,28 +105,6 @@ public:
int64_t* elapsed_time_ms,
int64_t* ntp_time_ms) override;
- int OnDataAvailable(const int voe_channels[],
- int number_of_voe_channels,
- const int16_t* audio_data,
- int sample_rate,
- int number_of_channels,
- size_t number_of_frames,
- int audio_delay_milliseconds,
- int current_volume,
- bool key_pressed,
- bool need_audio_processing) override;
-
- void PushCaptureData(int voe_channel, const void* audio_data,
- int bits_per_sample, int sample_rate,
- int number_of_channels,
- size_t number_of_frames) override;
-
- void PullRenderData(int bits_per_sample, int sample_rate,
- int number_of_channels, size_t number_of_frames,
- void* audio_data,
- int64_t* elapsed_time_ms,
- int64_t* ntp_time_ms) override;
-
AudioTransportImpl(AudioDeviceModule* audioDevice);
~AudioTransportImpl();
diff --git a/webrtc/modules/audio_processing/audio_buffer.cc b/webrtc/modules/audio_processing/audio_buffer.cc
index 81790a159b..c1c4061f48 100644
--- a/webrtc/modules/audio_processing/audio_buffer.cc
+++ b/webrtc/modules/audio_processing/audio_buffer.cc
@@ -26,7 +26,7 @@ const size_t kSamplesPer48kHzChannel = 480;
int KeyboardChannelIndex(const StreamConfig& stream_config) {
if (!stream_config.has_keyboard()) {
assert(false);
- return -1;
+ return 0;
}
return stream_config.num_channels();
diff --git a/webrtc/modules/audio_processing/audio_processing_impl_locking_unittest.cc b/webrtc/modules/audio_processing/audio_processing_impl_locking_unittest.cc
index cd4b31c2c8..7e96774466 100644
--- a/webrtc/modules/audio_processing/audio_processing_impl_locking_unittest.cc
+++ b/webrtc/modules/audio_processing/audio_processing_impl_locking_unittest.cc
@@ -478,7 +478,7 @@ void PopulateAudioFrame(AudioFrame* frame,
ASSERT_GT(amplitude, 0);
ASSERT_LE(amplitude, 32767);
for (int ch = 0; ch < frame->num_channels_; ch++) {
- for (int k = 0; k < static_cast<int>(frame->samples_per_channel_); k++) {
+ for (size_t k = 0; k < frame->samples_per_channel_; k++) {
// Store random 16 bit number between -(amplitude+1) and
// amplitude.
frame->data_[k * ch] =
diff --git a/webrtc/modules/audio_processing/beamformer/nonlinear_beamformer.cc b/webrtc/modules/audio_processing/beamformer/nonlinear_beamformer.cc
index 029fa089fc..0544104b79 100644
--- a/webrtc/modules/audio_processing/beamformer/nonlinear_beamformer.cc
+++ b/webrtc/modules/audio_processing/beamformer/nonlinear_beamformer.cc
@@ -163,7 +163,7 @@ void TransposedConjugatedProduct(const ComplexMatrix<float>& in,
}
std::vector<Point> GetCenteredArray(std::vector<Point> array_geometry) {
- for (int dim = 0; dim < 3; ++dim) {
+ for (size_t dim = 0; dim < 3; ++dim) {
float center = 0.f;
for (size_t i = 0; i < array_geometry.size(); ++i) {
center += array_geometry[i].c[dim];
@@ -379,7 +379,7 @@ void NonlinearBeamformer::ProcessChunk(const ChannelBuffer<float>& input,
(high_pass_postfilter_mask_ - old_high_pass_mask) /
input.num_frames_per_band();
// Apply the smoothed high-pass mask to the first channel of each band.
- // This can be done because the effct of the linear beamformer is negligible
+ // This can be done because the effect of the linear beamformer is negligible
// compared to the post-filter.
for (size_t i = 1; i < input.num_bands(); ++i) {
float smoothed_mask = old_high_pass_mask;
@@ -412,9 +412,9 @@ void NonlinearBeamformer::ProcessAudioBlock(const complex_f* const* input,
size_t num_freq_bins,
int num_output_channels,
complex_f* const* output) {
- RTC_CHECK_EQ(num_freq_bins, kNumFreqBins);
- RTC_CHECK_EQ(num_input_channels, num_input_channels_);
- RTC_CHECK_EQ(num_output_channels, 1);
+ RTC_CHECK_EQ(kNumFreqBins, num_freq_bins);
+ RTC_CHECK_EQ(num_input_channels_, num_input_channels);
+ RTC_CHECK_EQ(1, num_output_channels);
// Calculating the post-filter masks. Note that we need two for each
// frequency bin to account for the positive and negative interferer
diff --git a/webrtc/modules/audio_processing/common.h b/webrtc/modules/audio_processing/common.h
index ed8a0544c3..cf0d8b772f 100644
--- a/webrtc/modules/audio_processing/common.h
+++ b/webrtc/modules/audio_processing/common.h
@@ -27,7 +27,7 @@ static inline int ChannelsFromLayout(AudioProcessing::ChannelLayout layout) {
return 2;
}
assert(false);
- return -1;
+ return 0;
}
} // namespace webrtc
diff --git a/webrtc/modules/audio_processing/echo_cancellation_impl.cc b/webrtc/modules/audio_processing/echo_cancellation_impl.cc
index 6d0373d758..bdcad200f2 100644
--- a/webrtc/modules/audio_processing/echo_cancellation_impl.cc
+++ b/webrtc/modules/audio_processing/echo_cancellation_impl.cc
@@ -142,8 +142,8 @@ void EchoCancellationImpl::ReadQueuedRenderData() {
while (render_signal_queue_->Remove(&capture_queue_buffer_)) {
size_t handle_index = 0;
- int buffer_index = 0;
- const int num_frames_per_band =
+ size_t buffer_index = 0;
+ const size_t num_frames_per_band =
capture_queue_buffer_.size() /
(apm_->num_output_channels() * apm_->num_reverse_channels());
for (int i = 0; i < apm_->num_output_channels(); i++) {
@@ -491,8 +491,7 @@ int EchoCancellationImpl::ConfigureHandle(void* handle) const {
int EchoCancellationImpl::num_handles_required() const {
// Not locked as it only relies on APM public API which is threadsafe.
- return apm_->num_output_channels() *
- apm_->num_reverse_channels();
+ return apm_->num_output_channels() * apm_->num_reverse_channels();
}
int EchoCancellationImpl::GetHandleError(void* handle) const {
diff --git a/webrtc/modules/audio_processing/echo_control_mobile_impl.cc b/webrtc/modules/audio_processing/echo_control_mobile_impl.cc
index 5ff7bd728d..a39528efd3 100644
--- a/webrtc/modules/audio_processing/echo_control_mobile_impl.cc
+++ b/webrtc/modules/audio_processing/echo_control_mobile_impl.cc
@@ -147,8 +147,8 @@ void EchoControlMobileImpl::ReadQueuedRenderData() {
while (render_signal_queue_->Remove(&capture_queue_buffer_)) {
size_t handle_index = 0;
- int buffer_index = 0;
- const int num_frames_per_band =
+ size_t buffer_index = 0;
+ const size_t num_frames_per_band =
capture_queue_buffer_.size() /
(apm_->num_output_channels() * apm_->num_reverse_channels());
for (int i = 0; i < apm_->num_output_channels(); i++) {
@@ -396,8 +396,7 @@ int EchoControlMobileImpl::ConfigureHandle(void* handle) const {
int EchoControlMobileImpl::num_handles_required() const {
// Not locked as it only relies on APM public API which is threadsafe.
- return apm_->num_output_channels() *
- apm_->num_reverse_channels();
+ return apm_->num_output_channels() * apm_->num_reverse_channels();
}
int EchoControlMobileImpl::GetHandleError(void* handle) const {
diff --git a/webrtc/modules/audio_processing/gain_control_impl.cc b/webrtc/modules/audio_processing/gain_control_impl.cc
index c8175dc01f..b9b35648aa 100644
--- a/webrtc/modules/audio_processing/gain_control_impl.cc
+++ b/webrtc/modules/audio_processing/gain_control_impl.cc
@@ -111,8 +111,8 @@ void GainControlImpl::ReadQueuedRenderData() {
}
while (render_signal_queue_->Remove(&capture_queue_buffer_)) {
- int buffer_index = 0;
- const int num_frames_per_band =
+ size_t buffer_index = 0;
+ const size_t num_frames_per_band =
capture_queue_buffer_.size() / num_handles();
for (int i = 0; i < num_handles(); i++) {
Handle* my_handle = static_cast<Handle*>(handle(i));
diff --git a/webrtc/modules/audio_processing/noise_suppression_impl.cc b/webrtc/modules/audio_processing/noise_suppression_impl.cc
index ee0deecbdf..a40cd631b6 100644
--- a/webrtc/modules/audio_processing/noise_suppression_impl.cc
+++ b/webrtc/modules/audio_processing/noise_suppression_impl.cc
@@ -165,7 +165,7 @@ float NoiseSuppressionImpl::speech_probability() const {
probability_average +=
WebRtcNs_prior_speech_probability(suppressor->state());
}
- if (suppressors_.size() > 0) {
+ if (!suppressors_.empty()) {
probability_average /= suppressors_.size();
}
return probability_average;
diff --git a/webrtc/modules/audio_processing/test/audio_processing_unittest.cc b/webrtc/modules/audio_processing/test/audio_processing_unittest.cc
index d4bb8aa513..6eae1e5b94 100644
--- a/webrtc/modules/audio_processing/test/audio_processing_unittest.cc
+++ b/webrtc/modules/audio_processing/test/audio_processing_unittest.cc
@@ -14,6 +14,7 @@
#include <limits>
#include <queue>
+#include "webrtc/base/arraysize.h"
#include "webrtc/base/scoped_ptr.h"
#include "webrtc/common_audio/include/audio_util.h"
#include "webrtc/common_audio/resampler/include/push_resampler.h"
@@ -49,11 +50,8 @@ namespace {
// file. This is the typical case. When the file should be updated, it can
// be set to true with the command-line switch --write_ref_data.
bool write_ref_data = false;
-const int kChannels[] = {1, 2};
-const size_t kChannelsSize = sizeof(kChannels) / sizeof(*kChannels);
-
+const google::protobuf::int32 kChannels[] = {1, 2};
const int kSampleRates[] = {8000, 16000, 32000, 48000};
-const size_t kSampleRatesSize = sizeof(kSampleRates) / sizeof(*kSampleRates);
#if defined(WEBRTC_AUDIOPROC_FIXED_PROFILE)
// AECM doesn't support super-wb.
@@ -61,8 +59,6 @@ const int kProcessSampleRates[] = {8000, 16000};
#elif defined(WEBRTC_AUDIOPROC_FLOAT_PROFILE)
const int kProcessSampleRates[] = {8000, 16000, 32000, 48000};
#endif
-const size_t kProcessSampleRatesSize = sizeof(kProcessSampleRates) /
- sizeof(*kProcessSampleRates);
enum StreamDirection { kForward = 0, kReverse };
@@ -96,7 +92,7 @@ int TotalChannelsFromLayout(AudioProcessing::ChannelLayout layout) {
return 3;
}
assert(false);
- return -1;
+ return 0;
}
int TruncateToMultipleOf10(int value) {
@@ -104,25 +100,25 @@ int TruncateToMultipleOf10(int value) {
}
void MixStereoToMono(const float* stereo, float* mono,
- int samples_per_channel) {
- for (int i = 0; i < samples_per_channel; ++i)
+ size_t samples_per_channel) {
+ for (size_t i = 0; i < samples_per_channel; ++i)
mono[i] = (stereo[i * 2] + stereo[i * 2 + 1]) / 2;
}
void MixStereoToMono(const int16_t* stereo, int16_t* mono,
- int samples_per_channel) {
- for (int i = 0; i < samples_per_channel; ++i)
+ size_t samples_per_channel) {
+ for (size_t i = 0; i < samples_per_channel; ++i)
mono[i] = (stereo[i * 2] + stereo[i * 2 + 1]) >> 1;
}
-void CopyLeftToRightChannel(int16_t* stereo, int samples_per_channel) {
- for (int i = 0; i < samples_per_channel; i++) {
+void CopyLeftToRightChannel(int16_t* stereo, size_t samples_per_channel) {
+ for (size_t i = 0; i < samples_per_channel; i++) {
stereo[i * 2 + 1] = stereo[i * 2];
}
}
-void VerifyChannelsAreEqual(int16_t* stereo, int samples_per_channel) {
- for (int i = 0; i < samples_per_channel; i++) {
+void VerifyChannelsAreEqual(int16_t* stereo, size_t samples_per_channel) {
+ for (size_t i = 0; i < samples_per_channel; i++) {
EXPECT_EQ(stereo[i * 2 + 1], stereo[i * 2]);
}
}
@@ -195,9 +191,9 @@ T AbsValue(T a) {
}
int16_t MaxAudioFrame(const AudioFrame& frame) {
- const int length = frame.samples_per_channel_ * frame.num_channels_;
+ const size_t length = frame.samples_per_channel_ * frame.num_channels_;
int16_t max_data = AbsValue(frame.data_[0]);
- for (int i = 1; i < length; i++) {
+ for (size_t i = 1; i < length; i++) {
max_data = std::max(max_data, AbsValue(frame.data_[i]));
}
@@ -898,7 +894,7 @@ TEST_F(ApmTest, SampleRatesInt) {
EXPECT_EQ(apm_->kBadSampleRateError, ProcessStreamChooser(kIntFormat));
// Testing valid sample rates
int fs[] = {8000, 16000, 32000, 48000};
- for (size_t i = 0; i < sizeof(fs) / sizeof(*fs); i++) {
+ for (size_t i = 0; i < arraysize(fs); i++) {
SetContainerFormat(fs[i], 2, frame_, &float_cb_);
EXPECT_NOERR(ProcessStreamChooser(kIntFormat));
}
@@ -917,7 +913,7 @@ TEST_F(ApmTest, EchoCancellation) {
EchoCancellation::kModerateSuppression,
EchoCancellation::kHighSuppression,
};
- for (size_t i = 0; i < sizeof(level)/sizeof(*level); i++) {
+ for (size_t i = 0; i < arraysize(level); i++) {
EXPECT_EQ(apm_->kNoError,
apm_->echo_cancellation()->set_suppression_level(level[i]));
EXPECT_EQ(level[i],
@@ -994,7 +990,7 @@ TEST_F(ApmTest, DISABLED_EchoCancellationReportsCorrectDelays) {
// Test a couple of corner cases and verify that the estimated delay is
// within a valid region (set to +-1.5 blocks). Note that these cases are
// sampling frequency dependent.
- for (size_t i = 0; i < kProcessSampleRatesSize; i++) {
+ for (size_t i = 0; i < arraysize(kProcessSampleRates); i++) {
Init(kProcessSampleRates[i],
kProcessSampleRates[i],
kProcessSampleRates[i],
@@ -1066,7 +1062,7 @@ TEST_F(ApmTest, EchoControlMobile) {
EchoControlMobile::kSpeakerphone,
EchoControlMobile::kLoudSpeakerphone,
};
- for (size_t i = 0; i < sizeof(mode)/sizeof(*mode); i++) {
+ for (size_t i = 0; i < arraysize(mode); i++) {
EXPECT_EQ(apm_->kNoError,
apm_->echo_control_mobile()->set_routing_mode(mode[i]));
EXPECT_EQ(mode[i],
@@ -1131,7 +1127,7 @@ TEST_F(ApmTest, GainControl) {
GainControl::kAdaptiveDigital,
GainControl::kFixedDigital
};
- for (size_t i = 0; i < sizeof(mode)/sizeof(*mode); i++) {
+ for (size_t i = 0; i < arraysize(mode); i++) {
EXPECT_EQ(apm_->kNoError,
apm_->gain_control()->set_mode(mode[i]));
EXPECT_EQ(mode[i], apm_->gain_control()->mode());
@@ -1147,7 +1143,7 @@ TEST_F(ApmTest, GainControl) {
apm_->gain_control()->target_level_dbfs()));
int level_dbfs[] = {0, 6, 31};
- for (size_t i = 0; i < sizeof(level_dbfs)/sizeof(*level_dbfs); i++) {
+ for (size_t i = 0; i < arraysize(level_dbfs); i++) {
EXPECT_EQ(apm_->kNoError,
apm_->gain_control()->set_target_level_dbfs(level_dbfs[i]));
EXPECT_EQ(level_dbfs[i], apm_->gain_control()->target_level_dbfs());
@@ -1165,7 +1161,7 @@ TEST_F(ApmTest, GainControl) {
apm_->gain_control()->compression_gain_db()));
int gain_db[] = {0, 10, 90};
- for (size_t i = 0; i < sizeof(gain_db)/sizeof(*gain_db); i++) {
+ for (size_t i = 0; i < arraysize(gain_db); i++) {
EXPECT_EQ(apm_->kNoError,
apm_->gain_control()->set_compression_gain_db(gain_db[i]));
EXPECT_EQ(gain_db[i], apm_->gain_control()->compression_gain_db());
@@ -1196,14 +1192,14 @@ TEST_F(ApmTest, GainControl) {
apm_->gain_control()->analog_level_maximum()));
int min_level[] = {0, 255, 1024};
- for (size_t i = 0; i < sizeof(min_level)/sizeof(*min_level); i++) {
+ for (size_t i = 0; i < arraysize(min_level); i++) {
EXPECT_EQ(apm_->kNoError,
apm_->gain_control()->set_analog_level_limits(min_level[i], 1024));
EXPECT_EQ(min_level[i], apm_->gain_control()->analog_level_minimum());
}
int max_level[] = {0, 1024, 65535};
- for (size_t i = 0; i < sizeof(min_level)/sizeof(*min_level); i++) {
+ for (size_t i = 0; i < arraysize(min_level); i++) {
EXPECT_EQ(apm_->kNoError,
apm_->gain_control()->set_analog_level_limits(0, max_level[i]));
EXPECT_EQ(max_level[i], apm_->gain_control()->analog_level_maximum());
@@ -1242,7 +1238,7 @@ void ApmTest::RunQuantizedVolumeDoesNotGetStuckTest(int sample_rate) {
// Verifies that despite volume slider quantization, the AGC can continue to
// increase its volume.
TEST_F(ApmTest, QuantizedVolumeDoesNotGetStuck) {
- for (size_t i = 0; i < kSampleRatesSize; ++i) {
+ for (size_t i = 0; i < arraysize(kSampleRates); ++i) {
RunQuantizedVolumeDoesNotGetStuckTest(kSampleRates[i]);
}
}
@@ -1287,7 +1283,7 @@ void ApmTest::RunManualVolumeChangeIsPossibleTest(int sample_rate) {
}
TEST_F(ApmTest, ManualVolumeChangeIsPossible) {
- for (size_t i = 0; i < kSampleRatesSize; ++i) {
+ for (size_t i = 0; i < arraysize(kSampleRates); ++i) {
RunManualVolumeChangeIsPossibleTest(kSampleRates[i]);
}
}
@@ -1295,11 +1291,11 @@ TEST_F(ApmTest, ManualVolumeChangeIsPossible) {
#if !defined(WEBRTC_ANDROID) && !defined(WEBRTC_IOS)
TEST_F(ApmTest, AgcOnlyAdaptsWhenTargetSignalIsPresent) {
const int kSampleRateHz = 16000;
- const int kSamplesPerChannel =
- AudioProcessing::kChunkSizeMs * kSampleRateHz / 1000;
+ const size_t kSamplesPerChannel =
+ static_cast<size_t>(AudioProcessing::kChunkSizeMs * kSampleRateHz / 1000);
const int kNumInputChannels = 2;
const int kNumOutputChannels = 1;
- const int kNumChunks = 700;
+ const size_t kNumChunks = 700;
const float kScaleFactor = 0.25f;
Config config;
std::vector<webrtc::Point> geometry;
@@ -1313,8 +1309,8 @@ TEST_F(ApmTest, AgcOnlyAdaptsWhenTargetSignalIsPresent) {
EXPECT_EQ(kNoErr, apm->gain_control()->Enable(true));
ChannelBuffer<float> src_buf(kSamplesPerChannel, kNumInputChannels);
ChannelBuffer<float> dest_buf(kSamplesPerChannel, kNumOutputChannels);
- const int max_length = kSamplesPerChannel * std::max(kNumInputChannels,
- kNumOutputChannels);
+ const size_t max_length = kSamplesPerChannel * std::max(kNumInputChannels,
+ kNumOutputChannels);
rtc::scoped_ptr<int16_t[]> int_data(new int16_t[max_length]);
rtc::scoped_ptr<float[]> float_data(new float[max_length]);
std::string filename = ResourceFilePath("far", kSampleRateHz);
@@ -1326,13 +1322,13 @@ TEST_F(ApmTest, AgcOnlyAdaptsWhenTargetSignalIsPresent) {
bool is_target = false;
EXPECT_CALL(*beamformer, is_target_present())
.WillRepeatedly(testing::ReturnPointee(&is_target));
- for (int i = 0; i < kNumChunks; ++i) {
+ for (size_t i = 0; i < kNumChunks; ++i) {
ASSERT_TRUE(ReadChunk(far_file,
int_data.get(),
float_data.get(),
&src_buf));
for (int j = 0; j < kNumInputChannels; ++j) {
- for (int k = 0; k < kSamplesPerChannel; ++k) {
+ for (size_t k = 0; k < kSamplesPerChannel; ++k) {
src_buf.channels()[j][k] *= kScaleFactor;
}
}
@@ -1351,13 +1347,13 @@ TEST_F(ApmTest, AgcOnlyAdaptsWhenTargetSignalIsPresent) {
apm->gain_control()->compression_gain_db());
rewind(far_file);
is_target = true;
- for (int i = 0; i < kNumChunks; ++i) {
+ for (size_t i = 0; i < kNumChunks; ++i) {
ASSERT_TRUE(ReadChunk(far_file,
int_data.get(),
float_data.get(),
&src_buf));
for (int j = 0; j < kNumInputChannels; ++j) {
- for (int k = 0; k < kSamplesPerChannel; ++k) {
+ for (size_t k = 0; k < kSamplesPerChannel; ++k) {
src_buf.channels()[j][k] *= kScaleFactor;
}
}
@@ -1386,7 +1382,7 @@ TEST_F(ApmTest, NoiseSuppression) {
NoiseSuppression::kHigh,
NoiseSuppression::kVeryHigh
};
- for (size_t i = 0; i < sizeof(level)/sizeof(*level); i++) {
+ for (size_t i = 0; i < arraysize(level); i++) {
EXPECT_EQ(apm_->kNoError,
apm_->noise_suppression()->set_level(level[i]));
EXPECT_EQ(level[i], apm_->noise_suppression()->level());
@@ -1488,7 +1484,7 @@ TEST_F(ApmTest, VoiceDetection) {
VoiceDetection::kModerateLikelihood,
VoiceDetection::kHighLikelihood
};
- for (size_t i = 0; i < sizeof(likelihood)/sizeof(*likelihood); i++) {
+ for (size_t i = 0; i < arraysize(likelihood); i++) {
EXPECT_EQ(apm_->kNoError,
apm_->voice_detection()->set_likelihood(likelihood[i]));
EXPECT_EQ(likelihood[i], apm_->voice_detection()->likelihood());
@@ -1520,7 +1516,7 @@ TEST_F(ApmTest, VoiceDetection) {
AudioFrame::kVadPassive,
AudioFrame::kVadUnknown
};
- for (size_t i = 0; i < sizeof(activity)/sizeof(*activity); i++) {
+ for (size_t i = 0; i < arraysize(activity); i++) {
frame_->vad_activity_ = activity[i];
EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame_));
EXPECT_EQ(activity[i], frame_->vad_activity_);
@@ -1546,7 +1542,7 @@ TEST_F(ApmTest, AllProcessingDisabledByDefault) {
}
TEST_F(ApmTest, NoProcessingWhenAllComponentsDisabled) {
- for (size_t i = 0; i < kSampleRatesSize; i++) {
+ for (size_t i = 0; i < arraysize(kSampleRates); i++) {
Init(kSampleRates[i], kSampleRates[i], kSampleRates[i], 2, 2, 2, false);
SetFrameTo(frame_, 1000, 2000);
AudioFrame frame_copy;
@@ -1598,7 +1594,7 @@ TEST_F(ApmTest, NoProcessingWhenAllComponentsDisabledFloat) {
TEST_F(ApmTest, IdenticalInputChannelsResultInIdenticalOutputChannels) {
EnableAllComponents();
- for (size_t i = 0; i < kProcessSampleRatesSize; i++) {
+ for (size_t i = 0; i < arraysize(kProcessSampleRates); i++) {
Init(kProcessSampleRates[i],
kProcessSampleRates[i],
kProcessSampleRates[i],
@@ -1937,8 +1933,8 @@ TEST_F(ApmTest, FloatAndIntInterfacesGiveSimilarResults) {
const int num_render_channels = test->num_reverse_channels();
const int num_input_channels = test->num_input_channels();
const int num_output_channels = test->num_output_channels();
- const int samples_per_channel = test->sample_rate() *
- AudioProcessing::kChunkSizeMs / 1000;
+ const size_t samples_per_channel = static_cast<size_t>(
+ test->sample_rate() * AudioProcessing::kChunkSizeMs / 1000);
Init(test->sample_rate(), test->sample_rate(), test->sample_rate(),
num_input_channels, num_output_channels, num_render_channels, true);
@@ -2030,9 +2026,9 @@ TEST_F(ApmTest, Process) {
OpenFileAndReadMessage(ref_filename_, &ref_data);
} else {
// Write the desired tests to the protobuf reference file.
- for (size_t i = 0; i < kChannelsSize; i++) {
- for (size_t j = 0; j < kChannelsSize; j++) {
- for (size_t l = 0; l < kProcessSampleRatesSize; l++) {
+ for (size_t i = 0; i < arraysize(kChannels); i++) {
+ for (size_t j = 0; j < arraysize(kChannels); j++) {
+ for (size_t l = 0; l < arraysize(kProcessSampleRates); l++) {
audioproc::Test* test = ref_data.add_test();
test->set_num_reverse_channels(kChannels[i]);
test->set_num_input_channels(kChannels[j]);
@@ -2259,12 +2255,11 @@ TEST_F(ApmTest, NoErrorsWithKeyboardChannel) {
{AudioProcessing::kStereoAndKeyboard, AudioProcessing::kMono},
{AudioProcessing::kStereoAndKeyboard, AudioProcessing::kStereo},
};
- size_t channel_format_size = sizeof(cf) / sizeof(*cf);
rtc::scoped_ptr<AudioProcessing> ap(AudioProcessing::Create());
// Enable one component just to ensure some processing takes place.
ap->noise_suppression()->Enable(true);
- for (size_t i = 0; i < channel_format_size; ++i) {
+ for (size_t i = 0; i < arraysize(cf); ++i) {
const int in_rate = 44100;
const int out_rate = 48000;
ChannelBuffer<float> in_cb(SamplesFromRate(in_rate),
@@ -2291,7 +2286,7 @@ TEST_F(ApmTest, NoErrorsWithKeyboardChannel) {
// error results to the supplied accumulators.
void UpdateBestSNR(const float* ref,
const float* test,
- int length,
+ size_t length,
int expected_delay,
double* variance_acc,
double* sq_error_acc) {
@@ -2303,7 +2298,7 @@ void UpdateBestSNR(const float* ref,
++delay) {
double sq_error = 0;
double variance = 0;
- for (int i = 0; i < length - delay; ++i) {
+ for (size_t i = 0; i < length - delay; ++i) {
double error = test[i + delay] - ref[i];
sq_error += error * error;
variance += ref[i] * ref[i];
@@ -2355,14 +2350,10 @@ class AudioProcessingTest
static void SetUpTestCase() {
// Create all needed output reference files.
const int kNativeRates[] = {8000, 16000, 32000, 48000};
- const size_t kNativeRatesSize =
- sizeof(kNativeRates) / sizeof(*kNativeRates);
const int kNumChannels[] = {1, 2};
- const size_t kNumChannelsSize =
- sizeof(kNumChannels) / sizeof(*kNumChannels);
- for (size_t i = 0; i < kNativeRatesSize; ++i) {
- for (size_t j = 0; j < kNumChannelsSize; ++j) {
- for (size_t k = 0; k < kNumChannelsSize; ++k) {
+ for (size_t i = 0; i < arraysize(kNativeRates); ++i) {
+ for (size_t j = 0; j < arraysize(kNumChannels); ++j) {
+ for (size_t k = 0; k < arraysize(kNumChannels); ++k) {
// The reference files always have matching input and output channels.
ProcessFormat(kNativeRates[i], kNativeRates[i], kNativeRates[i],
kNativeRates[i], kNumChannels[j], kNumChannels[j],
@@ -2461,18 +2452,19 @@ class AudioProcessingTest
// Dump forward output to file.
Interleave(out_cb.channels(), out_cb.num_frames(), out_cb.num_channels(),
float_data.get());
- int out_length = out_cb.num_channels() * out_cb.num_frames();
+ size_t out_length = out_cb.num_channels() * out_cb.num_frames();
- ASSERT_EQ(static_cast<size_t>(out_length),
+ ASSERT_EQ(out_length,
fwrite(float_data.get(), sizeof(float_data[0]),
out_length, out_file));
// Dump reverse output to file.
Interleave(rev_out_cb.channels(), rev_out_cb.num_frames(),
rev_out_cb.num_channels(), float_data.get());
- int rev_out_length = rev_out_cb.num_channels() * rev_out_cb.num_frames();
+ size_t rev_out_length =
+ rev_out_cb.num_channels() * rev_out_cb.num_frames();
- ASSERT_EQ(static_cast<size_t>(rev_out_length),
+ ASSERT_EQ(rev_out_length,
fwrite(float_data.get(), sizeof(float_data[0]), rev_out_length,
rev_out_file));
@@ -2508,9 +2500,8 @@ TEST_P(AudioProcessingTest, Formats) {
{2, 2, 1, 1},
{2, 2, 2, 2},
};
- size_t channel_format_size = sizeof(cf) / sizeof(*cf);
- for (size_t i = 0; i < channel_format_size; ++i) {
+ for (size_t i = 0; i < arraysize(cf); ++i) {
ProcessFormat(input_rate_, output_rate_, reverse_input_rate_,
reverse_output_rate_, cf[i].num_input, cf[i].num_output,
cf[i].num_reverse_input, cf[i].num_reverse_output, "out");
@@ -2560,8 +2551,8 @@ TEST_P(AudioProcessingTest, Formats) {
ASSERT_TRUE(out_file != NULL);
ASSERT_TRUE(ref_file != NULL);
- const int ref_length = SamplesFromRate(ref_rate) * out_num;
- const int out_length = SamplesFromRate(out_rate) * out_num;
+ const size_t ref_length = SamplesFromRate(ref_rate) * out_num;
+ const size_t out_length = SamplesFromRate(out_rate) * out_num;
// Data from the reference file.
rtc::scoped_ptr<float[]> ref_data(new float[ref_length]);
// Data from the output file.
@@ -2601,8 +2592,9 @@ TEST_P(AudioProcessingTest, Formats) {
if (out_rate != ref_rate) {
// Resample the output back to its internal processing rate if
// necssary.
- ASSERT_EQ(ref_length, resampler.Resample(out_ptr, out_length,
- cmp_data.get(), ref_length));
+ ASSERT_EQ(ref_length,
+ static_cast<size_t>(resampler.Resample(
+ out_ptr, out_length, cmp_data.get(), ref_length)));
out_ptr = cmp_data.get();
}
diff --git a/webrtc/modules/audio_processing/test/test_utils.cc b/webrtc/modules/audio_processing/test/test_utils.cc
index 25181a1c4d..74f8b73882 100644
--- a/webrtc/modules/audio_processing/test/test_utils.cc
+++ b/webrtc/modules/audio_processing/test/test_utils.cc
@@ -75,7 +75,7 @@ void WriteIntData(const int16_t* data,
}
void WriteFloatData(const float* const* data,
- int samples_per_channel,
+ size_t samples_per_channel,
int num_channels,
WavWriter* wav_file,
RawFile* raw_file) {
@@ -105,8 +105,8 @@ FILE* OpenFile(const std::string& filename, const char* mode) {
return file;
}
-int SamplesFromRate(int rate) {
- return AudioProcessing::kChunkSizeMs * rate / 1000;
+size_t SamplesFromRate(int rate) {
+ return static_cast<size_t>(AudioProcessing::kChunkSizeMs * rate / 1000);
}
void SetFrameSampleRate(AudioFrame* frame,
diff --git a/webrtc/modules/audio_processing/test/test_utils.h b/webrtc/modules/audio_processing/test/test_utils.h
index 93a0138c16..291e03e5f4 100644
--- a/webrtc/modules/audio_processing/test/test_utils.h
+++ b/webrtc/modules/audio_processing/test/test_utils.h
@@ -78,7 +78,7 @@ void WriteIntData(const int16_t* data,
RawFile* raw_file);
void WriteFloatData(const float* const* data,
- int samples_per_channel,
+ size_t samples_per_channel,
int num_channels,
WavWriter* wav_file,
RawFile* raw_file);
@@ -86,7 +86,7 @@ void WriteFloatData(const float* const* data,
// Exits on failure; do not use in unit tests.
FILE* OpenFile(const std::string& filename, const char* mode);
-int SamplesFromRate(int rate);
+size_t SamplesFromRate(int rate);
void SetFrameSampleRate(AudioFrame* frame,
int sample_rate_hz);
@@ -104,11 +104,11 @@ void SetContainerFormat(int sample_rate_hz,
AudioProcessing::ChannelLayout LayoutFromChannels(int num_channels);
template <typename T>
-float ComputeSNR(const T* ref, const T* test, int length, float* variance) {
+float ComputeSNR(const T* ref, const T* test, size_t length, float* variance) {
float mse = 0;
float mean = 0;
*variance = 0;
- for (int i = 0; i < length; ++i) {
+ for (size_t i = 0; i < length; ++i) {
T error = ref[i] - test[i];
mse += error * error;
*variance += ref[i] * ref[i];
diff --git a/webrtc/modules/audio_processing/test/unpack.cc b/webrtc/modules/audio_processing/test/unpack.cc
index 24578e240c..cd9205e16f 100644
--- a/webrtc/modules/audio_processing/test/unpack.cc
+++ b/webrtc/modules/audio_processing/test/unpack.cc
@@ -76,9 +76,9 @@ int do_main(int argc, char* argv[]) {
Event event_msg;
int frame_count = 0;
- int reverse_samples_per_channel = 0;
- int input_samples_per_channel = 0;
- int output_samples_per_channel = 0;
+ size_t reverse_samples_per_channel = 0;
+ size_t input_samples_per_channel = 0;
+ size_t output_samples_per_channel = 0;
int num_reverse_channels = 0;
int num_input_channels = 0;
int num_output_channels = 0;
@@ -283,9 +283,12 @@ int do_main(int argc, char* argv[]) {
output_sample_rate = input_sample_rate;
}
- reverse_samples_per_channel = reverse_sample_rate / 100;
- input_samples_per_channel = input_sample_rate / 100;
- output_samples_per_channel = output_sample_rate / 100;
+ reverse_samples_per_channel =
+ static_cast<size_t>(reverse_sample_rate / 100);
+ input_samples_per_channel =
+ static_cast<size_t>(input_sample_rate / 100);
+ output_samples_per_channel =
+ static_cast<size_t>(output_sample_rate / 100);
if (!FLAGS_raw) {
// The WAV files need to be reset every time, because they cant change
diff --git a/webrtc/modules/media_file/media_file_utility.cc b/webrtc/modules/media_file/media_file_utility.cc
index babfa6378b..1c2f7fd4c5 100644
--- a/webrtc/modules/media_file/media_file_utility.cc
+++ b/webrtc/modules/media_file/media_file_utility.cc
@@ -38,8 +38,8 @@ struct WAVE_RIFF_header
// the chunk size (16, 18 or 40 byte)
struct WAVE_CHUNK_header
{
- int8_t fmt_ckID[4];
- int32_t fmt_ckSize;
+ int8_t fmt_ckID[4];
+ uint32_t fmt_ckSize;
};
} // unnamed namespace
@@ -79,15 +79,15 @@ int32_t ModuleFileUtility::ReadWavHeader(InStream& wav)
// TODO (hellner): tmpStr and tmpStr2 seems unnecessary here.
char tmpStr[6] = "FOUR";
unsigned char tmpStr2[4];
- int32_t i, len;
+ size_t i;
bool dataFound = false;
bool fmtFound = false;
int8_t dummyRead;
_dataSize = 0;
- len = wav.Read(&RIFFheaderObj, sizeof(WAVE_RIFF_header));
- if(len != sizeof(WAVE_RIFF_header))
+ int len = wav.Read(&RIFFheaderObj, sizeof(WAVE_RIFF_header));
+ if (len != static_cast<int>(sizeof(WAVE_RIFF_header)))
{
WEBRTC_TRACE(kTraceError, kTraceFile, _id,
"Not a wave file (too short)");
@@ -123,14 +123,13 @@ int32_t ModuleFileUtility::ReadWavHeader(InStream& wav)
// in a subroutine.
memcpy(tmpStr2, &CHUNKheaderObj.fmt_ckSize, 4);
CHUNKheaderObj.fmt_ckSize =
- (int32_t) ((uint32_t) tmpStr2[0] +
- (((uint32_t)tmpStr2[1])<<8) +
- (((uint32_t)tmpStr2[2])<<16) +
- (((uint32_t)tmpStr2[3])<<24));
+ (uint32_t)tmpStr2[0] + (((uint32_t)tmpStr2[1]) << 8) +
+ (((uint32_t)tmpStr2[2]) << 16) + (((uint32_t)tmpStr2[3]) << 24);
memcpy(tmpStr, CHUNKheaderObj.fmt_ckID, 4);
- while ((len == sizeof(WAVE_CHUNK_header)) && (!fmtFound || !dataFound))
+ while ((len == static_cast<int>(sizeof(WAVE_CHUNK_header))) &&
+ (!fmtFound || !dataFound))
{
if(strcmp(tmpStr, "fmt ") == 0)
{
@@ -164,9 +163,14 @@ int32_t ModuleFileUtility::ReadWavHeader(InStream& wav)
(int16_t) ((uint32_t)tmpStr2[0] +
(((uint32_t)tmpStr2[1])<<8));
+ if (CHUNKheaderObj.fmt_ckSize < sizeof(WAVE_FMTINFO_header))
+ {
+ WEBRTC_TRACE(kTraceError, kTraceFile, _id,
+ "Chunk size is too small");
+ return -1;
+ }
for (i = 0;
- i < (CHUNKheaderObj.fmt_ckSize -
- (int32_t)sizeof(WAVE_FMTINFO_header));
+ i < CHUNKheaderObj.fmt_ckSize - sizeof(WAVE_FMTINFO_header);
i++)
{
len = wav.Read(&dummyRead, 1);
@@ -187,7 +191,7 @@ int32_t ModuleFileUtility::ReadWavHeader(InStream& wav)
}
else
{
- for (i = 0; i < (CHUNKheaderObj.fmt_ckSize); i++)
+ for (i = 0; i < CHUNKheaderObj.fmt_ckSize; i++)
{
len = wav.Read(&dummyRead, 1);
if(len != 1)
@@ -203,10 +207,8 @@ int32_t ModuleFileUtility::ReadWavHeader(InStream& wav)
memcpy(tmpStr2, &CHUNKheaderObj.fmt_ckSize, 4);
CHUNKheaderObj.fmt_ckSize =
- (int32_t) ((uint32_t)tmpStr2[0] +
- (((uint32_t)tmpStr2[1])<<8) +
- (((uint32_t)tmpStr2[2])<<16) +
- (((uint32_t)tmpStr2[3])<<24));
+ (uint32_t)tmpStr2[0] + (((uint32_t)tmpStr2[1]) << 8) +
+ (((uint32_t)tmpStr2[2]) << 16) + (((uint32_t)tmpStr2[3]) << 24);
memcpy(tmpStr, CHUNKheaderObj.fmt_ckID, 4);
}
@@ -241,30 +243,12 @@ int32_t ModuleFileUtility::ReadWavHeader(InStream& wav)
}
// Calculate the number of bytes that 10 ms of audio data correspond to.
- if(_wavFormatObj.formatTag == kWavFormatPcm)
- {
- // TODO (hellner): integer division for 22050 and 11025 would yield
- // the same result as the else statement. Remove those
- // special cases?
- if(_wavFormatObj.nSamplesPerSec == 44100)
- {
- _readSizeBytes = 440 * _wavFormatObj.nChannels *
- (_wavFormatObj.nBitsPerSample / 8);
- } else if(_wavFormatObj.nSamplesPerSec == 22050) {
- _readSizeBytes = 220 * _wavFormatObj.nChannels *
- (_wavFormatObj.nBitsPerSample / 8);
- } else if(_wavFormatObj.nSamplesPerSec == 11025) {
- _readSizeBytes = 110 * _wavFormatObj.nChannels *
- (_wavFormatObj.nBitsPerSample / 8);
- } else {
- _readSizeBytes = (_wavFormatObj.nSamplesPerSec/100) *
- _wavFormatObj.nChannels * (_wavFormatObj.nBitsPerSample / 8);
- }
-
- } else {
- _readSizeBytes = (_wavFormatObj.nSamplesPerSec/100) *
- _wavFormatObj.nChannels * (_wavFormatObj.nBitsPerSample / 8);
- }
+ size_t samples_per_10ms =
+ ((_wavFormatObj.formatTag == kWavFormatPcm) &&
+ (_wavFormatObj.nSamplesPerSec == 44100)) ?
+ 440 : static_cast<size_t>(_wavFormatObj.nSamplesPerSec / 100);
+ _readSizeBytes = samples_per_10ms * _wavFormatObj.nChannels *
+ (_wavFormatObj.nBitsPerSample / 8);
return 0;
}
@@ -376,15 +360,15 @@ int32_t ModuleFileUtility::InitWavReading(InStream& wav,
if(start > 0)
{
uint8_t dummy[WAV_MAX_BUFFER_SIZE];
- int32_t readLength;
+ int readLength;
if(_readSizeBytes <= WAV_MAX_BUFFER_SIZE)
{
while (_playoutPositionMs < start)
{
readLength = wav.Read(dummy, _readSizeBytes);
- if(readLength == _readSizeBytes)
+ if(readLength == static_cast<int>(_readSizeBytes))
{
- _readPos += readLength;
+ _readPos += _readSizeBytes;
_playoutPositionMs += 10;
}
else // Must have reached EOF before start position!
@@ -406,7 +390,7 @@ int32_t ModuleFileUtility::InitWavReading(InStream& wav,
{
return -1;
}
- _bytesPerSample = _wavFormatObj.nBitsPerSample / 8;
+ _bytesPerSample = static_cast<size_t>(_wavFormatObj.nBitsPerSample / 8);
_startPointInMs = start;
@@ -431,9 +415,9 @@ int32_t ModuleFileUtility::ReadWavDataAsMono(
bufferSize);
// The number of bytes that should be read from file.
- const uint32_t totalBytesNeeded = _readSizeBytes;
+ const size_t totalBytesNeeded = _readSizeBytes;
// The number of bytes that will be written to outData.
- const uint32_t bytesRequested = (codec_info_.channels == 2) ?
+ const size_t bytesRequested = (codec_info_.channels == 2) ?
totalBytesNeeded >> 1 : totalBytesNeeded;
if(bufferSize < bytesRequested)
{
@@ -472,7 +456,7 @@ int32_t ModuleFileUtility::ReadWavDataAsMono(
// Output data is should be mono.
if(codec_info_.channels == 2)
{
- for (uint32_t i = 0; i < bytesRequested / _bytesPerSample; i++)
+ for (size_t i = 0; i < bytesRequested / _bytesPerSample; i++)
{
// Sample value is the average of left and right buffer rounded to
// closest integer value. Note samples can be either 1 or 2 byte.
@@ -490,7 +474,7 @@ int32_t ModuleFileUtility::ReadWavDataAsMono(
}
memcpy(outData, _tempData, bytesRequested);
}
- return bytesRequested;
+ return static_cast<int32_t>(bytesRequested);
}
int32_t ModuleFileUtility::ReadWavDataAsStereo(
@@ -534,10 +518,10 @@ int32_t ModuleFileUtility::ReadWavDataAsStereo(
}
// The number of bytes that should be read from file.
- const uint32_t totalBytesNeeded = _readSizeBytes;
+ const size_t totalBytesNeeded = _readSizeBytes;
// The number of bytes that will be written to the left and the right
// buffers.
- const uint32_t bytesRequested = totalBytesNeeded >> 1;
+ const size_t bytesRequested = totalBytesNeeded >> 1;
if(bufferSize < bytesRequested)
{
WEBRTC_TRACE(kTraceError, kTraceFile, _id,
@@ -558,7 +542,7 @@ int32_t ModuleFileUtility::ReadWavDataAsStereo(
// either 1 or 2 bytes
if(_bytesPerSample == 1)
{
- for (uint32_t i = 0; i < bytesRequested; i++)
+ for (size_t i = 0; i < bytesRequested; i++)
{
outDataLeft[i] = _tempData[2 * i];
outDataRight[i] = _tempData[(2 * i) + 1];
@@ -572,35 +556,29 @@ int32_t ModuleFileUtility::ReadWavDataAsStereo(
outDataRight);
// Bytes requested to samples requested.
- uint32_t sampleCount = bytesRequested >> 1;
- for (uint32_t i = 0; i < sampleCount; i++)
+ size_t sampleCount = bytesRequested >> 1;
+ for (size_t i = 0; i < sampleCount; i++)
{
outLeft[i] = sampleData[2 * i];
outRight[i] = sampleData[(2 * i) + 1];
}
} else {
WEBRTC_TRACE(kTraceError, kTraceFile, _id,
- "ReadWavStereoData: unsupported sample size %d!",
+ "ReadWavStereoData: unsupported sample size %" PRIuS "!",
_bytesPerSample);
assert(false);
return -1;
}
- return bytesRequested;
+ return static_cast<int32_t>(bytesRequested);
}
-int32_t ModuleFileUtility::ReadWavData(
- InStream& wav,
- uint8_t* buffer,
- const uint32_t dataLengthInBytes)
+int32_t ModuleFileUtility::ReadWavData(InStream& wav,
+ uint8_t* buffer,
+ size_t dataLengthInBytes)
{
- WEBRTC_TRACE(
- kTraceStream,
- kTraceFile,
- _id,
- "ModuleFileUtility::ReadWavData(wav= 0x%x, buffer= 0x%x, dataLen= %ld)",
- &wav,
- buffer,
- dataLengthInBytes);
+ WEBRTC_TRACE(kTraceStream, kTraceFile, _id,
+ "ModuleFileUtility::ReadWavData(wav= 0x%x, buffer= 0x%x, "
+ "dataLen= %" PRIuS ")", &wav, buffer, dataLengthInBytes);
if(buffer == NULL)
@@ -613,7 +591,7 @@ int32_t ModuleFileUtility::ReadWavData(
// Make sure that a read won't return too few samples.
// TODO (hellner): why not read the remaining bytes needed from the start
// of the file?
- if((_dataSize - _readPos) < (int32_t)dataLengthInBytes)
+ if(_dataSize < (_readPos + dataLengthInBytes))
{
// Rewind() being -1 may be due to the file not supposed to be looped.
if(wav.Rewind() == -1)
@@ -696,7 +674,8 @@ int32_t ModuleFileUtility::InitWavWriting(OutStream& wav,
{
return -1;
}
- }else if(STR_CASE_CMP(codecInst.plname, "PCMA") == 0)
+ }
+ else if(STR_CASE_CMP(codecInst.plname, "PCMA") == 0)
{
_bytesPerSample = 1;
if(WriteWavHeader(wav, 8000, _bytesPerSample, channels, kWavFormatALaw,
@@ -729,15 +708,9 @@ int32_t ModuleFileUtility::WriteWavData(OutStream& out,
const int8_t* buffer,
const size_t dataLength)
{
- WEBRTC_TRACE(
- kTraceStream,
- kTraceFile,
- _id,
- "ModuleFileUtility::WriteWavData(out= 0x%x, buf= 0x%x, dataLen= %" PRIuS
- ")",
- &out,
- buffer,
- dataLength);
+ WEBRTC_TRACE(kTraceStream, kTraceFile, _id,
+ "ModuleFileUtility::WriteWavData(out= 0x%x, buf= 0x%x, "
+ "dataLen= %" PRIuS ")", &out, buffer, dataLength);
if(buffer == NULL)
{
@@ -757,19 +730,19 @@ int32_t ModuleFileUtility::WriteWavData(OutStream& out,
int32_t ModuleFileUtility::WriteWavHeader(
OutStream& wav,
- const uint32_t freq,
- const uint32_t bytesPerSample,
- const uint32_t channels,
- const uint32_t format,
- const uint32_t lengthInBytes)
+ uint32_t freq,
+ size_t bytesPerSample,
+ uint32_t channels,
+ uint32_t format,
+ size_t lengthInBytes)
{
// Frame size in bytes for 10 ms of audio.
// TODO (hellner): 44.1 kHz has 440 samples frame size. Doesn't seem to
// be taken into consideration here!
- const int32_t frameSize = (freq / 100) * channels;
+ const size_t frameSize = (freq / 100) * channels;
// Calculate the number of full frames that the wave file contain.
- const int32_t dataLengthInBytes = frameSize * (lengthInBytes / frameSize);
+ const size_t dataLengthInBytes = frameSize * (lengthInBytes / frameSize);
uint8_t buf[kWavHeaderSize];
webrtc::WriteWavHeader(buf, channels, freq, static_cast<WavFormat>(format),
@@ -785,8 +758,7 @@ int32_t ModuleFileUtility::UpdateWavHeader(OutStream& wav)
{
return -1;
}
- uint32_t channels = (codec_info_.channels == 0) ?
- 1 : codec_info_.channels;
+ uint32_t channels = (codec_info_.channels == 0) ? 1 : codec_info_.channels;
if(STR_CASE_CMP(codec_info_.plname, "L16") == 0)
{
@@ -839,22 +811,17 @@ int32_t ModuleFileUtility::ReadPreEncodedData(
int8_t* outData,
const size_t bufferSize)
{
- WEBRTC_TRACE(
- kTraceStream,
- kTraceFile,
- _id,
- "ModuleFileUtility::ReadPreEncodedData(in= 0x%x, outData= 0x%x, "
- "bufferSize= %" PRIuS ")",
- &in,
- outData,
- bufferSize);
+ WEBRTC_TRACE(kTraceStream, kTraceFile, _id,
+ "ModuleFileUtility::ReadPreEncodedData(in= 0x%x, "
+ "outData= 0x%x, bufferSize= %" PRIuS ")", &in, outData,
+ bufferSize);
if(outData == NULL)
{
WEBRTC_TRACE(kTraceError, kTraceFile, _id, "output buffer NULL");
}
- uint32_t frameLen;
+ size_t frameLen;
uint8_t buf[64];
// Each frame has a two byte header containing the frame length.
int32_t res = in.Read(buf, 2);
@@ -874,12 +841,9 @@ int32_t ModuleFileUtility::ReadPreEncodedData(
frameLen = buf[0] + buf[1] * 256;
if(bufferSize < frameLen)
{
- WEBRTC_TRACE(
- kTraceError,
- kTraceFile,
- _id,
- "buffer not large enough to read %d bytes of pre-encoded data!",
- frameLen);
+ WEBRTC_TRACE(kTraceError, kTraceFile, _id,
+ "buffer not large enough to read %" PRIuS " bytes of "
+ "pre-encoded data!", frameLen);
return -1;
}
return in.Read(outData, frameLen);
@@ -906,15 +870,10 @@ int32_t ModuleFileUtility::WritePreEncodedData(
const int8_t* buffer,
const size_t dataLength)
{
- WEBRTC_TRACE(
- kTraceStream,
- kTraceFile,
- _id,
- "ModuleFileUtility::WritePreEncodedData(out= 0x%x, inData= 0x%x, "
- "dataLen= %" PRIuS ")",
- &out,
- buffer,
- dataLength);
+ WEBRTC_TRACE(kTraceStream, kTraceFile, _id,
+ "ModuleFileUtility::WritePreEncodedData(out= 0x%x, "
+ "inData= 0x%x, dataLen= %" PRIuS ")", &out, buffer,
+ dataLength);
if(buffer == NULL)
{
@@ -945,15 +904,9 @@ int32_t ModuleFileUtility::InitCompressedReading(
const uint32_t start,
const uint32_t stop)
{
- WEBRTC_TRACE(
- kTraceDebug,
- kTraceFile,
- _id,
- "ModuleFileUtility::InitCompressedReading(in= 0x%x, start= %d,\
- stop= %d)",
- &in,
- start,
- stop);
+ WEBRTC_TRACE(kTraceDebug, kTraceFile, _id,
+ "ModuleFileUtility::InitCompressedReading(in= 0x%x, "
+ "start= %d, stop= %d)", &in, start, stop);
#if defined(WEBRTC_CODEC_ILBC)
int16_t read_len = 0;
@@ -976,9 +929,8 @@ int32_t ModuleFileUtility::InitCompressedReading(
if(cnt==64)
{
return -1;
- } else {
- buf[cnt]=0;
}
+ buf[cnt]=0;
#ifdef WEBRTC_CODEC_ILBC
if(!strcmp("#!iLBC20\n", buf))
@@ -996,14 +948,11 @@ int32_t ModuleFileUtility::InitCompressedReading(
while (_playoutPositionMs <= _startPointInMs)
{
read_len = in.Read(buf, 38);
- if(read_len == 38)
- {
- _playoutPositionMs += 20;
- }
- else
+ if(read_len != 38)
{
return -1;
}
+ _playoutPositionMs += 20;
}
}
}
@@ -1023,14 +972,11 @@ int32_t ModuleFileUtility::InitCompressedReading(
while (_playoutPositionMs <= _startPointInMs)
{
read_len = in.Read(buf, 50);
- if(read_len == 50)
- {
- _playoutPositionMs += 20;
- }
- else
+ if(read_len != 50)
{
return -1;
}
+ _playoutPositionMs += 20;
}
}
}
@@ -1047,17 +993,11 @@ int32_t ModuleFileUtility::ReadCompressedData(InStream& in,
int8_t* outData,
size_t bufferSize)
{
- WEBRTC_TRACE(
- kTraceStream,
- kTraceFile,
- _id,
- "ModuleFileUtility::ReadCompressedData(in=0x%x, outData=0x%x, bytes=%"
- PRIuS ")",
- &in,
- outData,
- bufferSize);
+ WEBRTC_TRACE(kTraceStream, kTraceFile, _id,
+ "ModuleFileUtility::ReadCompressedData(in=0x%x, outData=0x%x, "
+ "bytes=%" PRIuS ")", &in, outData, bufferSize);
- uint32_t bytesRead = 0;
+ int bytesRead = 0;
if(! _reading)
{
@@ -1069,7 +1009,7 @@ int32_t ModuleFileUtility::ReadCompressedData(InStream& in,
if((_codecId == kCodecIlbc20Ms) ||
(_codecId == kCodecIlbc30Ms))
{
- uint32_t byteSize = 0;
+ size_t byteSize = 0;
if(_codecId == kCodecIlbc30Ms)
{
byteSize = 50;
@@ -1081,20 +1021,20 @@ int32_t ModuleFileUtility::ReadCompressedData(InStream& in,
if(bufferSize < byteSize)
{
WEBRTC_TRACE(kTraceError, kTraceFile, _id,
- "output buffer is too short to read ILBC compressed\
- data.");
+ "output buffer is too short to read ILBC compressed "
+ "data.");
assert(false);
return -1;
}
bytesRead = in.Read(outData, byteSize);
- if(bytesRead != byteSize)
+ if(bytesRead != static_cast<int>(byteSize))
{
if(!in.Rewind())
{
InitCompressedReading(in, _startPointInMs, _stopPointInMs);
bytesRead = in.Read(outData, byteSize);
- if(bytesRead != byteSize)
+ if(bytesRead != static_cast<int>(byteSize))
{
_reading = false;
return -1;
@@ -1136,9 +1076,8 @@ int32_t ModuleFileUtility::InitCompressedWriting(
const CodecInst& codecInst)
{
WEBRTC_TRACE(kTraceDebug, kTraceFile, _id,
- "ModuleFileUtility::InitCompressedWriting(out= 0x%x,\
- codecName= %s)",
- &out, codecInst.plname);
+ "ModuleFileUtility::InitCompressedWriting(out= 0x%x, "
+ "codecName= %s)", &out, codecInst.plname);
_writing = false;
@@ -1177,15 +1116,9 @@ int32_t ModuleFileUtility::WriteCompressedData(
const int8_t* buffer,
const size_t dataLength)
{
- WEBRTC_TRACE(
- kTraceStream,
- kTraceFile,
- _id,
- "ModuleFileUtility::WriteCompressedData(out= 0x%x, buf= 0x%x, "
- "dataLen= %" PRIuS ")",
- &out,
- buffer,
- dataLength);
+ WEBRTC_TRACE(kTraceStream, kTraceFile, _id,
+ "ModuleFileUtility::WriteCompressedData(out= 0x%x, buf= 0x%x, "
+ "dataLen= %" PRIuS ")", &out, buffer, dataLength);
if(buffer == NULL)
{
@@ -1204,19 +1137,12 @@ int32_t ModuleFileUtility::InitPCMReading(InStream& pcm,
const uint32_t stop,
uint32_t freq)
{
- WEBRTC_TRACE(
- kTraceInfo,
- kTraceFile,
- _id,
- "ModuleFileUtility::InitPCMReading(pcm= 0x%x, start=%d, stop=%d,\
- freq=%d)",
- &pcm,
- start,
- stop,
- freq);
+ WEBRTC_TRACE(kTraceInfo, kTraceFile, _id,
+ "ModuleFileUtility::InitPCMReading(pcm= 0x%x, start=%d, "
+ "stop=%d, freq=%d)", &pcm, start, stop, freq);
int8_t dummy[320];
- int32_t read_len;
+ int read_len;
_playoutPositionMs = 0;
_startPointInMs = start;
@@ -1261,14 +1187,11 @@ int32_t ModuleFileUtility::InitPCMReading(InStream& pcm,
while (_playoutPositionMs < _startPointInMs)
{
read_len = pcm.Read(dummy, _readSizeBytes);
- if(read_len == _readSizeBytes)
+ if(read_len != static_cast<int>(_readSizeBytes))
{
- _playoutPositionMs += 10;
- }
- else // Must have reached EOF before start position!
- {
- return -1;
+ return -1; // Must have reached EOF before start position!
}
+ _playoutPositionMs += 10;
}
}
_reading = true;
@@ -1279,23 +1202,17 @@ int32_t ModuleFileUtility::ReadPCMData(InStream& pcm,
int8_t* outData,
size_t bufferSize)
{
- WEBRTC_TRACE(
- kTraceStream,
- kTraceFile,
- _id,
- "ModuleFileUtility::ReadPCMData(pcm= 0x%x, outData= 0x%x, bufSize= %"
- PRIuS ")",
- &pcm,
- outData,
- bufferSize);
+ WEBRTC_TRACE(kTraceStream, kTraceFile, _id,
+ "ModuleFileUtility::ReadPCMData(pcm= 0x%x, outData= 0x%x, "
+ "bufSize= %" PRIuS ")", &pcm, outData, bufferSize);
if(outData == NULL)
{
- WEBRTC_TRACE(kTraceError, kTraceFile, _id,"buffer NULL");
+ WEBRTC_TRACE(kTraceError, kTraceFile, _id, "buffer NULL");
}
// Readsize for 10ms of audio data (2 bytes per sample).
- uint32_t bytesRequested = 2 * codec_info_.plfreq / 100;
+ size_t bytesRequested = static_cast<size_t>(2 * codec_info_.plfreq / 100);
if(bufferSize < bytesRequested)
{
WEBRTC_TRACE(kTraceError, kTraceFile, _id,
@@ -1304,8 +1221,8 @@ int32_t ModuleFileUtility::ReadPCMData(InStream& pcm,
return -1;
}
- uint32_t bytesRead = pcm.Read(outData, bytesRequested);
- if(bytesRead < bytesRequested)
+ int bytesRead = pcm.Read(outData, bytesRequested);
+ if(bytesRead < static_cast<int>(bytesRequested))
{
if(pcm.Rewind() == -1)
{
@@ -1320,9 +1237,9 @@ int32_t ModuleFileUtility::ReadPCMData(InStream& pcm,
}
else
{
- int32_t rest = bytesRequested - bytesRead;
- int32_t len = pcm.Read(&(outData[bytesRead]), rest);
- if(len == rest)
+ size_t rest = bytesRequested - bytesRead;
+ int len = pcm.Read(&(outData[bytesRead]), rest);
+ if(len == static_cast<int>(rest))
{
bytesRead += len;
}
@@ -1334,7 +1251,7 @@ int32_t ModuleFileUtility::ReadPCMData(InStream& pcm,
if(bytesRead <= 0)
{
WEBRTC_TRACE(kTraceError, kTraceFile, _id,
- "ReadPCMData: Failed to rewind audio file.");
+ "ReadPCMData: Failed to rewind audio file.");
return -1;
}
}
@@ -1343,7 +1260,7 @@ int32_t ModuleFileUtility::ReadPCMData(InStream& pcm,
if(bytesRead <= 0)
{
WEBRTC_TRACE(kTraceStream, kTraceFile, _id,
- "ReadPCMData: end of file");
+ "ReadPCMData: end of file");
return -1;
}
_playoutPositionMs += 10;
@@ -1414,15 +1331,9 @@ int32_t ModuleFileUtility::WritePCMData(OutStream& out,
const int8_t* buffer,
const size_t dataLength)
{
- WEBRTC_TRACE(
- kTraceStream,
- kTraceFile,
- _id,
- "ModuleFileUtility::WritePCMData(out= 0x%x, buf= 0x%x, dataLen= %" PRIuS
- ")",
- &out,
- buffer,
- dataLength);
+ WEBRTC_TRACE(kTraceStream, kTraceFile, _id,
+ "ModuleFileUtility::WritePCMData(out= 0x%x, buf= 0x%x, "
+ "dataLen= %" PRIuS ")", &out, buffer, dataLength);
if(buffer == NULL)
{
@@ -1585,7 +1496,7 @@ int32_t ModuleFileUtility::FileDurationMs(const char* fileName,
case kFileFormatCompressedFile:
{
int32_t cnt = 0;
- int32_t read_len = 0;
+ int read_len = 0;
char buf[64];
do
{
@@ -1642,15 +1553,8 @@ int32_t ModuleFileUtility::FileDurationMs(const char* fileName,
uint32_t ModuleFileUtility::PlayoutPositionMs()
{
WEBRTC_TRACE(kTraceStream, kTraceFile, _id,
- "ModuleFileUtility::PlayoutPosition()");
+ "ModuleFileUtility::PlayoutPosition()");
- if(_reading)
- {
- return _playoutPositionMs;
- }
- else
- {
- return 0;
- }
+ return _reading ? _playoutPositionMs : 0;
}
} // namespace webrtc
diff --git a/webrtc/modules/media_file/media_file_utility.h b/webrtc/modules/media_file/media_file_utility.h
index 8f543bafd3..46ec3407b7 100644
--- a/webrtc/modules/media_file/media_file_utility.h
+++ b/webrtc/modules/media_file/media_file_utility.h
@@ -176,7 +176,7 @@ public:
private:
// Biggest WAV frame supported is 10 ms at 48kHz of 2 channel, 16 bit audio.
- enum{WAV_MAX_BUFFER_SIZE = 480*2*2};
+ static const size_t WAV_MAX_BUFFER_SIZE = 480 * 2 * 2;
int32_t InitWavCodec(uint32_t samplesPerSec,
@@ -194,16 +194,16 @@ private:
// stereo. format is the encode format (e.g. PCMU, PCMA, PCM etc).
// lengthInBytes is the number of bytes the audio samples are using up.
int32_t WriteWavHeader(OutStream& stream,
- const uint32_t freqInHz,
- const uint32_t bytesPerSample,
- const uint32_t channels,
- const uint32_t format,
- const uint32_t lengthInBytes);
+ uint32_t freqInHz,
+ size_t bytesPerSample,
+ uint32_t channels,
+ uint32_t format,
+ size_t lengthInBytes);
// Put dataLengthInBytes of audio data from stream into the audioBuffer.
// The return value is the number of bytes written to audioBuffer.
int32_t ReadWavData(InStream& stream, uint8_t* audioBuffer,
- const uint32_t dataLengthInBytes);
+ size_t dataLengthInBytes);
// Update the current audio codec being used for reading or writing
// according to codecInst.
@@ -254,10 +254,10 @@ private:
// TODO (hellner): why store multiple formats. Just store either codec_info_
// or _wavFormatObj and supply conversion functions.
WAVE_FMTINFO_header _wavFormatObj;
- int32_t _dataSize; // Chunk size if reading a WAV file
+ size_t _dataSize; // Chunk size if reading a WAV file
// Number of bytes to read. I.e. frame size in bytes. May be multiple
// chunks if reading WAV.
- int32_t _readSizeBytes;
+ size_t _readSizeBytes;
int32_t _id;
@@ -270,8 +270,8 @@ private:
MediaFileUtility_CodecType _codecId;
// The amount of bytes, on average, used for one audio sample.
- int32_t _bytesPerSample;
- int32_t _readPos;
+ size_t _bytesPerSample;
+ size_t _readPos;
// Only reading or writing can be enabled, not both.
bool _reading;
diff --git a/webrtc/system_wrappers/include/aligned_array.h b/webrtc/system_wrappers/include/aligned_array.h
index 7cd182c296..e985e88357 100644
--- a/webrtc/system_wrappers/include/aligned_array.h
+++ b/webrtc/system_wrappers/include/aligned_array.h
@@ -20,16 +20,15 @@ namespace webrtc {
// aligned to the given byte alignment.
template<typename T> class AlignedArray {
public:
- AlignedArray(int rows, size_t cols, int alignment)
+ AlignedArray(int rows, size_t cols, size_t alignment)
: rows_(rows),
- cols_(cols),
- alignment_(alignment) {
- RTC_CHECK_GT(alignment_, 0);
+ cols_(cols) {
+ RTC_CHECK_GT(alignment, 0u);
head_row_ = static_cast<T**>(AlignedMalloc(rows_ * sizeof(*head_row_),
- alignment_));
+ alignment));
for (int i = 0; i < rows_; ++i) {
head_row_[i] = static_cast<T*>(AlignedMalloc(cols_ * sizeof(**head_row_),
- alignment_));
+ alignment));
}
}
@@ -79,7 +78,6 @@ template<typename T> class AlignedArray {
private:
int rows_;
size_t cols_;
- int alignment_;
T** head_row_;
};
diff --git a/webrtc/system_wrappers/source/aligned_array_unittest.cc b/webrtc/system_wrappers/source/aligned_array_unittest.cc
index 8d898af03e..eb3ad880e6 100644
--- a/webrtc/system_wrappers/source/aligned_array_unittest.cc
+++ b/webrtc/system_wrappers/source/aligned_array_unittest.cc
@@ -16,7 +16,7 @@
namespace {
-bool IsAligned(const void* ptr, int alignment) {
+bool IsAligned(const void* ptr, size_t alignment) {
return reinterpret_cast<uintptr_t>(ptr) % alignment == 0;
}