summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAndroid Chromium Automerger <chromium-automerger@android>2014-03-05 21:10:29 +0000
committerAndroid Chromium Automerger <chromium-automerger@android>2014-03-05 21:10:29 +0000
commit430268aefa5210f16b74664a45329acc1d9103d1 (patch)
tree9de1bbc14156ed69bf2625c7b47cec59726a1ac6
parent812991021b19f10ab72c86913b66685a23259bf1 (diff)
parentbc55f91693c7aa050f6420dae22f651ef7b03f6d (diff)
downloadwebrtc-430268aefa5210f16b74664a45329acc1d9103d1.tar.gz
Merge third_party/webrtc from https://chromium.googlesource.com/external/webrtc/trunk/webrtc.git at bc55f91693c7aa050f6420dae22f651ef7b03f6d
This commit was generated by merge_from_chromium.py. Change-Id: I5b5bfe0cb7b0aeb5a56a446c3b0e2b0f6d2c7483
-rw-r--r--common_audio/audio_util.cc31
-rw-r--r--common_audio/audio_util_unittest.cc47
-rw-r--r--common_audio/include/audio_util.h78
-rw-r--r--modules/audio_coding/neteq/webrtc_neteq_unittest.cc1
-rw-r--r--modules/audio_coding/neteq4/neteq_stereo_unittest.cc1
-rw-r--r--modules/audio_processing/aec/aec_core.c2
-rw-r--r--modules/audio_processing/agc/digital_agc.c7
-rw-r--r--modules/audio_processing/audio_buffer.cc47
-rw-r--r--modules/audio_processing/audio_buffer.h11
-rw-r--r--modules/audio_processing/audio_processing.gypi1
-rw-r--r--modules/audio_processing/audio_processing_impl.cc278
-rw-r--r--modules/audio_processing/audio_processing_impl.h31
-rw-r--r--modules/audio_processing/debug.proto14
-rw-r--r--modules/audio_processing/echo_cancellation_impl.cc30
-rw-r--r--modules/audio_processing/echo_cancellation_impl.h19
-rw-r--r--modules/audio_processing/echo_cancellation_impl_wrapper.h35
-rw-r--r--modules/audio_processing/echo_control_mobile_impl.cc17
-rw-r--r--modules/audio_processing/echo_control_mobile_impl.h9
-rw-r--r--modules/audio_processing/gain_control_impl.cc22
-rw-r--r--modules/audio_processing/gain_control_impl.h9
-rw-r--r--modules/audio_processing/high_pass_filter_impl.cc15
-rw-r--r--modules/audio_processing/high_pass_filter_impl.h8
-rw-r--r--modules/audio_processing/include/audio_processing.h36
-rw-r--r--modules/audio_processing/include/mock_audio_processing.h19
-rw-r--r--modules/audio_processing/level_estimator_impl.cc11
-rw-r--r--modules/audio_processing/level_estimator_impl.h10
-rw-r--r--modules/audio_processing/noise_suppression_impl.cc15
-rw-r--r--modules/audio_processing/noise_suppression_impl.h10
-rw-r--r--modules/audio_processing/processing_component.cc27
-rw-r--r--modules/audio_processing/processing_component.h6
-rw-r--r--modules/audio_processing/test/audio_processing_unittest.cc349
-rw-r--r--modules/audio_processing/voice_detection_impl.cc16
-rw-r--r--modules/audio_processing/voice_detection_impl.h8
-rw-r--r--modules/pacing/include/paced_sender.h1
-rw-r--r--modules/pacing/paced_sender_unittest.cc3
-rw-r--r--modules/remote_bitrate_estimator/overuse_detector.cc6
-rw-r--r--modules/video_capture/android/java/src/org/webrtc/videoengine/VideoCaptureAndroid.java26
-rw-r--r--modules/video_coding/main/source/codec_database.cc2
-rw-r--r--modules/video_coding/main/source/jitter_estimator.cc4
-rw-r--r--modules/video_coding/main/source/receiver.cc4
-rw-r--r--modules/video_coding/main/source/rtt_filter.cc2
-rw-r--r--modules/video_coding/main/source/video_sender_unittest.cc2
-rwxr-xr-xtest/buildbot_tests.py3
-rw-r--r--typedefs.h2
-rw-r--r--video/call_perf_tests.cc2
-rw-r--r--video_engine/include/vie_rtp_rtcp.h7
-rw-r--r--video_engine/vie_encoder.cc4
-rw-r--r--video_engine/vie_encoder.h1
-rw-r--r--video_engine/vie_rtp_rtcp_impl.cc19
-rw-r--r--video_engine/vie_rtp_rtcp_impl.h2
50 files changed, 868 insertions, 442 deletions
diff --git a/common_audio/audio_util.cc b/common_audio/audio_util.cc
index a6114fdf..0c961e1a 100644
--- a/common_audio/audio_util.cc
+++ b/common_audio/audio_util.cc
@@ -14,28 +14,19 @@
namespace webrtc {
-void Deinterleave(const int16_t* interleaved, int samples_per_channel,
- int num_channels, int16_t** deinterleaved) {
- for (int i = 0; i < num_channels; i++) {
- int16_t* channel = deinterleaved[i];
- int interleaved_idx = i;
- for (int j = 0; j < samples_per_channel; j++) {
- channel[j] = interleaved[interleaved_idx];
- interleaved_idx += num_channels;
- }
- }
+void RoundToInt16(const float* src, int size, int16_t* dest) {
+ for (int i = 0; i < size; ++i)
+ dest[i] = RoundToInt16(src[i]);
}
-void Interleave(const int16_t* const* deinterleaved, int samples_per_channel,
- int num_channels, int16_t* interleaved) {
- for (int i = 0; i < num_channels; ++i) {
- const int16_t* channel = deinterleaved[i];
- int interleaved_idx = i;
- for (int j = 0; j < samples_per_channel; j++) {
- interleaved[interleaved_idx] = channel[j];
- interleaved_idx += num_channels;
- }
- }
+void ScaleAndRoundToInt16(const float* src, int size, int16_t* dest) {
+ for (int i = 0; i < size; ++i)
+ dest[i] = ScaleAndRoundToInt16(src[i]);
+}
+
+void ScaleToFloat(const int16_t* src, int size, float* dest) {
+ for (int i = 0; i < size; ++i)
+ dest[i] = ScaleToFloat(src[i]);
}
} // namespace webrtc
diff --git a/common_audio/audio_util_unittest.cc b/common_audio/audio_util_unittest.cc
index 7e8dee38..bf9ad812 100644
--- a/common_audio/audio_util_unittest.cc
+++ b/common_audio/audio_util_unittest.cc
@@ -16,25 +16,46 @@ namespace webrtc {
void ExpectArraysEq(const int16_t* ref, const int16_t* test, int length) {
for (int i = 0; i < length; ++i) {
- EXPECT_EQ(test[i], ref[i]);
+ EXPECT_EQ(ref[i], test[i]);
}
}
-TEST(AudioUtilTest, Clamp) {
- EXPECT_EQ(1000.f, ClampInt16(1000.f));
- EXPECT_EQ(32767.f, ClampInt16(32767.5f));
- EXPECT_EQ(-32768.f, ClampInt16(-32768.5f));
+void ExpectArraysEq(const float* ref, const float* test, int length) {
+ for (int i = 0; i < length; ++i) {
+ EXPECT_FLOAT_EQ(ref[i], test[i]);
+ }
}
-TEST(AudioUtilTest, Round) {
+TEST(AudioUtilTest, RoundToInt16) {
const int kSize = 7;
const float kInput[kSize] = {
0.f, 0.4f, 0.5f, -0.4f, -0.5f, 32768.f, -32769.f};
const int16_t kReference[kSize] = {0, 0, 1, 0, -1, 32767, -32768};
int16_t output[kSize];
RoundToInt16(kInput, kSize, output);
- for (int n = 0; n < kSize; ++n)
- EXPECT_EQ(kReference[n], output[n]);
+ ExpectArraysEq(kReference, output, kSize);
+}
+
+TEST(AudioUtilTest, ScaleAndRoundToInt16) {
+ const int kSize = 9;
+ const float kInput[kSize] = {
+ 0.f, 0.4f / 32767.f, 0.6f / 32767.f, -0.4f / 32768.f, -0.6f / 32768.f,
+ 1.f, -1.f, 1.1f, -1.1f};
+ const int16_t kReference[kSize] = {
+ 0, 0, 1, 0, -1, 32767, -32768, 32767, -32768};
+ int16_t output[kSize];
+ ScaleAndRoundToInt16(kInput, kSize, output);
+ ExpectArraysEq(kReference, output, kSize);
+}
+
+TEST(AudioUtilTest, ScaleToFloat) {
+ const int kSize = 7;
+ const int16_t kInput[kSize] = {0, 1, -1, 16384, -16384, 32767, -32768};
+ const float kReference[kSize] = {
+ 0.f, 1.f / 32767.f, -1.f / 32768.f, 16384.f / 32767.f, -0.5f, 1.f, -1.f};
+ float output[kSize];
+ ScaleToFloat(kInput, kSize, output);
+ ExpectArraysEq(kReference, output, kSize);
}
TEST(AudioUtilTest, InterleavingStereo) {
@@ -47,12 +68,12 @@ TEST(AudioUtilTest, InterleavingStereo) {
Deinterleave(kInterleaved, kSamplesPerChannel, kNumChannels, deinterleaved);
const int16_t kRefLeft[] = {2, 4, 8, 16};
const int16_t kRefRight[] = {3, 9, 27, 81};
- ExpectArraysEq(left, kRefLeft, kSamplesPerChannel);
- ExpectArraysEq(right, kRefRight, kSamplesPerChannel);
+ ExpectArraysEq(kRefLeft, left, kSamplesPerChannel);
+ ExpectArraysEq(kRefRight, right, kSamplesPerChannel);
int16_t interleaved[kLength];
Interleave(deinterleaved, kSamplesPerChannel, kNumChannels, interleaved);
- ExpectArraysEq(interleaved, kInterleaved, kLength);
+ ExpectArraysEq(kInterleaved, interleaved, kLength);
}
TEST(AudioUtilTest, InterleavingMonoIsIdentical) {
@@ -62,11 +83,11 @@ TEST(AudioUtilTest, InterleavingMonoIsIdentical) {
int16_t mono[kSamplesPerChannel];
int16_t* deinterleaved[] = {mono};
Deinterleave(kInterleaved, kSamplesPerChannel, kNumChannels, deinterleaved);
- ExpectArraysEq(mono, kInterleaved, kSamplesPerChannel);
+ ExpectArraysEq(kInterleaved, mono, kSamplesPerChannel);
int16_t interleaved[kSamplesPerChannel];
Interleave(deinterleaved, kSamplesPerChannel, kNumChannels, interleaved);
- ExpectArraysEq(interleaved, mono, kSamplesPerChannel);
+ ExpectArraysEq(mono, interleaved, kSamplesPerChannel);
}
} // namespace webrtc
diff --git a/common_audio/include/audio_util.h b/common_audio/include/audio_util.h
index 1e8f8d61..18fdbe2a 100644
--- a/common_audio/include/audio_util.h
+++ b/common_audio/include/audio_util.h
@@ -11,43 +11,83 @@
#ifndef WEBRTC_COMMON_AUDIO_INCLUDE_AUDIO_UTIL_H_
#define WEBRTC_COMMON_AUDIO_INCLUDE_AUDIO_UTIL_H_
+#include <limits>
+
+#include "webrtc/system_wrappers/interface/scoped_ptr.h"
#include "webrtc/typedefs.h"
namespace webrtc {
-// Clamp the floating |value| to the range representable by an int16_t.
-static inline float ClampInt16(float value) {
- const float kMaxInt16 = 32767.f;
- const float kMinInt16 = -32768.f;
- return value < kMinInt16 ? kMinInt16 :
- (value > kMaxInt16 ? kMaxInt16 : value);
+typedef std::numeric_limits<int16_t> limits_int16;
+
+static inline int16_t RoundToInt16(float v) {
+ const float kMaxRound = limits_int16::max() - 0.5f;
+ const float kMinRound = limits_int16::min() + 0.5f;
+ if (v > 0)
+ return v >= kMaxRound ? limits_int16::max() :
+ static_cast<int16_t>(v + 0.5f);
+ return v <= kMinRound ? limits_int16::min() :
+ static_cast<int16_t>(v - 0.5f);
}
-// Round |value| to the closest int16.
-static inline int16_t RoundToInt16(float value) {
- return static_cast<int16_t>(
- value > 0 ? (value >= 32766.5 ? 32767 : value + 0.5f)
- : (value <= -32767.5 ? -32768 : value - 0.5f));
+// Scale (from [-1, 1]) and round to full-range int16 with clamping.
+static inline int16_t ScaleAndRoundToInt16(float v) {
+ if (v > 0)
+ return v >= 1 ? limits_int16::max() :
+ static_cast<int16_t>(v * limits_int16::max() + 0.5f);
+ return v <= -1 ? limits_int16::min() :
+ static_cast<int16_t>(-v * limits_int16::min() - 0.5f);
}
-// Round |size| elements of |src| to the closest int16 and writes to |dest|.
-static inline void RoundToInt16(const float* src, int size, int16_t* dest) {
- for (int i = 0; i < size; ++i)
- dest[i] = RoundToInt16(src[i]);
+// Scale to float [-1, 1].
+static inline float ScaleToFloat(int16_t v) {
+ const float kMaxInt16Inverse = 1.f / limits_int16::max();
+ const float kMinInt16Inverse = 1.f / limits_int16::min();
+ return v * (v > 0 ? kMaxInt16Inverse : -kMinInt16Inverse);
}
+// Round |size| elements of |src| to int16 with clamping and write to |dest|.
+void RoundToInt16(const float* src, int size, int16_t* dest);
+
+// Scale (from [-1, 1]) and round |size| elements of |src| to full-range int16
+// with clamping and write to |dest|.
+void ScaleAndRoundToInt16(const float* src, int size, int16_t* dest);
+
+// Scale |size| elements of |src| to float [-1, 1] and write to |dest|.
+void ScaleToFloat(const int16_t* src, int size, float* dest);
+
// Deinterleave audio from |interleaved| to the channel buffers pointed to
// by |deinterleaved|. There must be sufficient space allocated in the
// |deinterleaved| buffers (|num_channel| buffers with |samples_per_channel|
// per buffer).
-void Deinterleave(const int16_t* interleaved, int samples_per_channel,
- int num_channels, int16_t** deinterleaved);
+template <typename T>
+void Deinterleave(const T* interleaved, int samples_per_channel,
+ int num_channels, T** deinterleaved) {
+ for (int i = 0; i < num_channels; ++i) {
+ T* channel = deinterleaved[i];
+ int interleaved_idx = i;
+ for (int j = 0; j < samples_per_channel; ++j) {
+ channel[j] = interleaved[interleaved_idx];
+ interleaved_idx += num_channels;
+ }
+ }
+}
// Interleave audio from the channel buffers pointed to by |deinterleaved| to
// |interleaved|. There must be sufficient space allocated in |interleaved|
// (|samples_per_channel| * |num_channels|).
-void Interleave(const int16_t* const* deinterleaved, int samples_per_channel,
- int num_channels, int16_t* interleaved);
+template <typename T>
+void Interleave(const T* const* deinterleaved, int samples_per_channel,
+ int num_channels, T* interleaved) {
+ for (int i = 0; i < num_channels; ++i) {
+ const T* channel = deinterleaved[i];
+ int interleaved_idx = i;
+ for (int j = 0; j < samples_per_channel; ++j) {
+ interleaved[interleaved_idx] = channel[j];
+ interleaved_idx += num_channels;
+ }
+ }
+}
} // namespace webrtc
diff --git a/modules/audio_coding/neteq/webrtc_neteq_unittest.cc b/modules/audio_coding/neteq/webrtc_neteq_unittest.cc
index c37f8990..c6adf036 100644
--- a/modules/audio_coding/neteq/webrtc_neteq_unittest.cc
+++ b/modules/audio_coding/neteq/webrtc_neteq_unittest.cc
@@ -17,6 +17,7 @@
#include <stdlib.h>
#include <string.h> // memset
+#include <algorithm>
#include <set>
#include <sstream>
#include <string>
diff --git a/modules/audio_coding/neteq4/neteq_stereo_unittest.cc b/modules/audio_coding/neteq4/neteq_stereo_unittest.cc
index d6c4150e..df212db2 100644
--- a/modules/audio_coding/neteq4/neteq_stereo_unittest.cc
+++ b/modules/audio_coding/neteq4/neteq_stereo_unittest.cc
@@ -10,6 +10,7 @@
// Test to verify correct stereo and multi-channel operation.
+#include <algorithm>
#include <string>
#include <list>
diff --git a/modules/audio_processing/aec/aec_core.c b/modules/audio_processing/aec/aec_core.c
index 9efa00d7..3f3d2c08 100644
--- a/modules/audio_processing/aec/aec_core.c
+++ b/modules/audio_processing/aec/aec_core.c
@@ -741,7 +741,7 @@ int WebRtcAec_GetDelayMetricsCore(AecCore* self, int* median, int* std) {
// Calculate the L1 norm, with median value as central moment.
for (i = 0; i < kHistorySizeBlocks; i++) {
- l1_norm += (float)(fabs(i - my_median) * self->delay_histogram[i]);
+ l1_norm += (float)abs(i - my_median) * self->delay_histogram[i];
}
*std = (int)(l1_norm / (float)num_delay_values + 0.5f) * kMsPerBlock;
diff --git a/modules/audio_processing/agc/digital_agc.c b/modules/audio_processing/agc/digital_agc.c
index 00565dd7..faef9141 100644
--- a/modules/audio_processing/agc/digital_agc.c
+++ b/modules/audio_processing/agc/digital_agc.c
@@ -288,12 +288,7 @@ int32_t WebRtcAgc_InitDigital(DigitalAgc_t *stt, int16_t agcMode)
int32_t WebRtcAgc_AddFarendToDigital(DigitalAgc_t *stt, const int16_t *in_far,
int16_t nrSamples)
{
- // Check for valid pointer
- if (&stt->vadFarend == NULL)
- {
- return -1;
- }
-
+ assert(stt != NULL);
// VAD for far end
WebRtcAgc_ProcessVad(&stt->vadFarend, in_far, nrSamples);
diff --git a/modules/audio_processing/audio_buffer.cc b/modules/audio_processing/audio_buffer.cc
index 048d0487..90824770 100644
--- a/modules/audio_processing/audio_buffer.cc
+++ b/modules/audio_processing/audio_buffer.cc
@@ -10,6 +10,7 @@
#include "webrtc/modules/audio_processing/audio_buffer.h"
+#include "webrtc/common_audio/include/audio_util.h"
#include "webrtc/common_audio/signal_processing/include/signal_processing_library.h"
namespace webrtc {
@@ -79,11 +80,9 @@ AudioBuffer::AudioBuffer(int max_num_channels,
mixed_channels_(NULL),
mixed_low_pass_channels_(NULL),
low_pass_reference_channels_(NULL) {
- if (max_num_channels_ > 1) {
- channels_.reset(new AudioChannel[max_num_channels_]);
- mixed_channels_.reset(new AudioChannel[max_num_channels_]);
- mixed_low_pass_channels_.reset(new AudioChannel[max_num_channels_]);
- }
+ channels_.reset(new AudioChannel[max_num_channels_]);
+ mixed_channels_.reset(new AudioChannel[max_num_channels_]);
+ mixed_low_pass_channels_.reset(new AudioChannel[max_num_channels_]);
low_pass_reference_channels_.reset(new AudioChannel[max_num_channels_]);
if (samples_per_channel_ == kSamplesPer32kHzChannel) {
@@ -94,6 +93,17 @@ AudioBuffer::AudioBuffer(int max_num_channels,
AudioBuffer::~AudioBuffer() {}
+void AudioBuffer::InitForNewData(int num_channels) {
+ num_channels_ = num_channels;
+ data_ = NULL;
+ data_was_mixed_ = false;
+ num_mixed_channels_ = 0;
+ num_mixed_low_pass_channels_ = 0;
+ reference_copied_ = false;
+ activity_ = AudioFrame::kVadUnknown;
+ is_muted_ = false;
+}
+
int16_t* AudioBuffer::data(int channel) const {
assert(channel >= 0 && channel < num_channels_);
if (data_ != NULL) {
@@ -191,13 +201,8 @@ void AudioBuffer::DeinterleaveFrom(AudioFrame* frame) {
assert(frame->num_channels_ <= max_num_channels_);
assert(frame->samples_per_channel_ == samples_per_channel_);
- num_channels_ = frame->num_channels_;
- data_was_mixed_ = false;
- num_mixed_channels_ = 0;
- num_mixed_low_pass_channels_ = 0;
- reference_copied_ = false;
+ InitForNewData(frame->num_channels_);
activity_ = frame->vad_activity_;
- is_muted_ = false;
if (frame->energy_ == 0) {
is_muted_ = true;
}
@@ -252,6 +257,26 @@ void AudioBuffer::InterleaveTo(AudioFrame* frame, bool data_changed) const {
}
}
+void AudioBuffer::CopyFrom(const float* const* data, int samples_per_channel,
+ int num_channels) {
+ assert(num_channels <= max_num_channels_);
+ assert(samples_per_channel == samples_per_channel_);
+
+ InitForNewData(num_channels);
+ for (int i = 0; i < num_channels_; ++i) {
+ ScaleAndRoundToInt16(data[i], samples_per_channel, channels_[i].data);
+ }
+}
+
+void AudioBuffer::CopyTo(int samples_per_channel, int num_channels,
+ float* const* data) const {
+ assert(num_channels == num_channels_);
+ assert(samples_per_channel == samples_per_channel_);
+ for (int i = 0; i < num_channels_; ++i) {
+ ScaleToFloat(channels_[i].data, samples_per_channel, data[i]);
+ }
+}
+
// TODO(andrew): would be good to support the no-mix case with pointer
// assignment.
// TODO(andrew): handle mixing to multiple channels?
diff --git a/modules/audio_processing/audio_buffer.h b/modules/audio_processing/audio_buffer.h
index 2638bef6..1030fec3 100644
--- a/modules/audio_processing/audio_buffer.h
+++ b/modules/audio_processing/audio_buffer.h
@@ -46,17 +46,28 @@ class AudioBuffer {
bool is_muted() const;
+ // Use for int16 interleaved data.
void DeinterleaveFrom(AudioFrame* audioFrame);
void InterleaveTo(AudioFrame* audioFrame) const;
// If |data_changed| is false, only the non-audio data members will be copied
// to |frame|.
void InterleaveTo(AudioFrame* frame, bool data_changed) const;
+
+ // Use for float deinterleaved data.
+ void CopyFrom(const float* const* data, int samples_per_channel,
+ int num_channels);
+ void CopyTo(int samples_per_channel, int num_channels,
+ float* const* data) const;
+
void Mix(int num_mixed_channels);
void CopyAndMix(int num_mixed_channels);
void CopyAndMixLowPass(int num_mixed_channels);
void CopyLowPassToReference();
private:
+ // Called from DeinterleaveFrom() and CopyFrom().
+ void InitForNewData(int num_channels);
+
const int max_num_channels_;
int num_channels_;
int num_mixed_channels_;
diff --git a/modules/audio_processing/audio_processing.gypi b/modules/audio_processing/audio_processing.gypi
index da612174..920cbca9 100644
--- a/modules/audio_processing/audio_processing.gypi
+++ b/modules/audio_processing/audio_processing.gypi
@@ -56,7 +56,6 @@
'audio_processing_impl.h',
'echo_cancellation_impl.cc',
'echo_cancellation_impl.h',
- 'echo_cancellation_impl_wrapper.h',
'echo_control_mobile_impl.cc',
'echo_control_mobile_impl.h',
'gain_control_impl.cc',
diff --git a/modules/audio_processing/audio_processing_impl.cc b/modules/audio_processing/audio_processing_impl.cc
index 48297fcd..272c786d 100644
--- a/modules/audio_processing/audio_processing_impl.cc
+++ b/modules/audio_processing/audio_processing_impl.cc
@@ -12,9 +12,10 @@
#include <assert.h>
+#include "webrtc/common_audio/include/audio_util.h"
#include "webrtc/common_audio/signal_processing/include/signal_processing_library.h"
#include "webrtc/modules/audio_processing/audio_buffer.h"
-#include "webrtc/modules/audio_processing/echo_cancellation_impl_wrapper.h"
+#include "webrtc/modules/audio_processing/echo_cancellation_impl.h"
#include "webrtc/modules/audio_processing/echo_control_mobile_impl.h"
#include "webrtc/modules/audio_processing/gain_control_impl.h"
#include "webrtc/modules/audio_processing/high_pass_filter_impl.h"
@@ -37,8 +38,6 @@
#endif
#endif // WEBRTC_AUDIOPROC_DEBUG_DUMP
-static const int kChunkSizeMs = 10;
-
#define RETURN_ON_ERR(expr) \
do { \
int err = expr; \
@@ -48,6 +47,24 @@ static const int kChunkSizeMs = 10;
} while (0)
namespace webrtc {
+namespace {
+
+const int kChunkSizeMs = 10;
+
+int ChannelsFromLayout(AudioProcessing::ChannelLayout layout) {
+ switch (layout) {
+ case AudioProcessing::kMono:
+ case AudioProcessing::kMonoAndKeyboard:
+ return 1;
+ case AudioProcessing::kStereo:
+ case AudioProcessing::kStereoAndKeyboard:
+ return 2;
+ }
+ assert(false);
+ return -1;
+}
+
+} // namespace
// Throughout webrtc, it's assumed that success is represented by zero.
COMPILE_ASSERT(AudioProcessing::kNoError == 0, no_error_must_be_zero);
@@ -97,25 +114,25 @@ AudioProcessingImpl::AudioProcessingImpl(const Config& config)
num_output_channels_(1),
output_will_be_muted_(false),
key_pressed_(false) {
- echo_cancellation_ = EchoCancellationImplWrapper::Create(this);
+ echo_cancellation_ = new EchoCancellationImpl(this, crit_);
component_list_.push_back(echo_cancellation_);
- echo_control_mobile_ = new EchoControlMobileImpl(this);
+ echo_control_mobile_ = new EchoControlMobileImpl(this, crit_);
component_list_.push_back(echo_control_mobile_);
- gain_control_ = new GainControlImpl(this);
+ gain_control_ = new GainControlImpl(this, crit_);
component_list_.push_back(gain_control_);
- high_pass_filter_ = new HighPassFilterImpl(this);
+ high_pass_filter_ = new HighPassFilterImpl(this, crit_);
component_list_.push_back(high_pass_filter_);
- level_estimator_ = new LevelEstimatorImpl(this);
+ level_estimator_ = new LevelEstimatorImpl(this, crit_);
component_list_.push_back(level_estimator_);
- noise_suppression_ = new NoiseSuppressionImpl(this);
+ noise_suppression_ = new NoiseSuppressionImpl(this, crit_);
component_list_.push_back(noise_suppression_);
- voice_detection_ = new VoiceDetectionImpl(this);
+ voice_detection_ = new VoiceDetectionImpl(this, crit_);
component_list_.push_back(voice_detection_);
SetExtraOptions(config);
@@ -152,10 +169,6 @@ AudioProcessingImpl::~AudioProcessingImpl() {
crit_ = NULL;
}
-CriticalSectionWrapper* AudioProcessingImpl::crit() const {
- return crit_;
-}
-
int AudioProcessingImpl::split_sample_rate_hz() const {
return split_sample_rate_hz_;
}
@@ -303,6 +316,8 @@ bool AudioProcessingImpl::output_will_be_muted() const {
return output_will_be_muted_;
}
+// Calls InitializeLocked() if any of the audio parameters have changed from
+// their current values.
int AudioProcessingImpl::MaybeInitializeLocked(int sample_rate_hz,
int num_input_channels, int num_output_channels, int num_reverse_channels) {
if (sample_rate_hz == sample_rate_hz_ &&
@@ -346,15 +361,62 @@ int AudioProcessingImpl::MaybeInitializeLocked(int sample_rate_hz,
return InitializeLocked();
}
-int AudioProcessingImpl::ProcessStream(AudioFrame* frame) {
+int AudioProcessingImpl::ProcessStream(float* const* data,
+ int samples_per_channel,
+ int sample_rate_hz,
+ ChannelLayout input_layout,
+ ChannelLayout output_layout) {
CriticalSectionScoped crit_scoped(crit_);
- int err = kNoError;
+ if (!data) {
+ return kNullPointerError;
+ }
- if (frame == NULL) {
+ const int num_input_channels = ChannelsFromLayout(input_layout);
+ // TODO(ajm): We now always set the output channels equal to the input
+ // channels here. Restore the ability to downmix.
+ RETURN_ON_ERR(MaybeInitializeLocked(sample_rate_hz,
+ num_input_channels, num_input_channels, num_reverse_channels_));
+ if (samples_per_channel != samples_per_channel_) {
+ return kBadDataLengthError;
+ }
+
+#ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP
+ if (debug_file_->Open()) {
+ event_msg_->set_type(audioproc::Event::STREAM);
+ audioproc::Stream* msg = event_msg_->mutable_stream();
+ const size_t channel_size = sizeof(float) * samples_per_channel;
+ for (int i = 0; i < num_input_channels; ++i)
+ msg->set_input_channel(i, data[i], channel_size);
+ }
+#endif
+
+ capture_audio_->CopyFrom(data, samples_per_channel, num_output_channels_);
+ RETURN_ON_ERR(ProcessStreamLocked());
+ if (output_copy_needed(is_data_processed())) {
+ capture_audio_->CopyTo(samples_per_channel, num_output_channels_, data);
+ }
+
+#ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP
+ if (debug_file_->Open()) {
+ audioproc::Stream* msg = event_msg_->mutable_stream();
+ const size_t channel_size = sizeof(float) * samples_per_channel;
+ for (int i = 0; i < num_output_channels_; ++i)
+ msg->set_output_channel(i, data[i], channel_size);
+ RETURN_ON_ERR(WriteMessageToDebugFile());
+ }
+#endif
+
+ return kNoError;
+}
+
+int AudioProcessingImpl::ProcessStream(AudioFrame* frame) {
+ CriticalSectionScoped crit_scoped(crit_);
+ if (!frame) {
return kNullPointerError;
}
+
// TODO(ajm): We now always set the output channels equal to the input
- // channels here. Remove the ability to downmix entirely.
+ // channels here. Restore the ability to downmix.
RETURN_ON_ERR(MaybeInitializeLocked(frame->sample_rate_hz_,
frame->num_channels_, frame->num_channels_, num_reverse_channels_));
if (frame->samples_per_channel_ != samples_per_channel_) {
@@ -369,20 +431,42 @@ int AudioProcessingImpl::ProcessStream(AudioFrame* frame) {
frame->samples_per_channel_ *
frame->num_channels_;
msg->set_input_data(frame->data_, data_size);
- msg->set_delay(stream_delay_ms_);
- msg->set_drift(echo_cancellation_->stream_drift_samples());
- msg->set_level(gain_control_->stream_analog_level());
- msg->set_keypress(key_pressed_);
}
#endif
capture_audio_->DeinterleaveFrom(frame);
-
- // TODO(ajm): experiment with mixing and AEC placement.
if (num_output_channels_ < num_input_channels_) {
capture_audio_->Mix(num_output_channels_);
frame->num_channels_ = num_output_channels_;
}
+ RETURN_ON_ERR(ProcessStreamLocked());
+ capture_audio_->InterleaveTo(frame, output_copy_needed(is_data_processed()));
+
+#ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP
+ if (debug_file_->Open()) {
+ audioproc::Stream* msg = event_msg_->mutable_stream();
+ const size_t data_size = sizeof(int16_t) *
+ frame->samples_per_channel_ *
+ frame->num_channels_;
+ msg->set_output_data(frame->data_, data_size);
+ RETURN_ON_ERR(WriteMessageToDebugFile());
+ }
+#endif
+
+ return kNoError;
+}
+
+
+int AudioProcessingImpl::ProcessStreamLocked() {
+#ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP
+ if (debug_file_->Open()) {
+ audioproc::Stream* msg = event_msg_->mutable_stream();
+ msg->set_delay(stream_delay_ms_);
+ msg->set_drift(echo_cancellation_->stream_drift_samples());
+ msg->set_level(gain_control_->stream_analog_level());
+ msg->set_keypress(key_pressed_);
+ }
+#endif
bool data_processed = is_data_processed();
if (analysis_needed(data_processed)) {
@@ -397,45 +481,18 @@ int AudioProcessingImpl::ProcessStream(AudioFrame* frame) {
}
}
- err = high_pass_filter_->ProcessCaptureAudio(capture_audio_);
- if (err != kNoError) {
- return err;
- }
-
- err = gain_control_->AnalyzeCaptureAudio(capture_audio_);
- if (err != kNoError) {
- return err;
- }
-
- err = echo_cancellation_->ProcessCaptureAudio(capture_audio_);
- if (err != kNoError) {
- return err;
- }
+ RETURN_ON_ERR(high_pass_filter_->ProcessCaptureAudio(capture_audio_));
+ RETURN_ON_ERR(gain_control_->AnalyzeCaptureAudio(capture_audio_));
+ RETURN_ON_ERR(echo_cancellation_->ProcessCaptureAudio(capture_audio_));
if (echo_control_mobile_->is_enabled() &&
noise_suppression_->is_enabled()) {
capture_audio_->CopyLowPassToReference();
}
-
- err = noise_suppression_->ProcessCaptureAudio(capture_audio_);
- if (err != kNoError) {
- return err;
- }
-
- err = echo_control_mobile_->ProcessCaptureAudio(capture_audio_);
- if (err != kNoError) {
- return err;
- }
-
- err = voice_detection_->ProcessCaptureAudio(capture_audio_);
- if (err != kNoError) {
- return err;
- }
-
- err = gain_control_->ProcessCaptureAudio(capture_audio_);
- if (err != kNoError) {
- return err;
- }
+ RETURN_ON_ERR(noise_suppression_->ProcessCaptureAudio(capture_audio_));
+ RETURN_ON_ERR(echo_control_mobile_->ProcessCaptureAudio(capture_audio_));
+ RETURN_ON_ERR(voice_detection_->ProcessCaptureAudio(capture_audio_));
+ RETURN_ON_ERR(gain_control_->ProcessCaptureAudio(capture_audio_));
if (synthesis_needed(data_processed)) {
for (int i = 0; i < num_output_channels_; i++) {
@@ -450,38 +507,48 @@ int AudioProcessingImpl::ProcessStream(AudioFrame* frame) {
}
// The level estimator operates on the recombined data.
- err = level_estimator_->ProcessStream(capture_audio_);
- if (err != kNoError) {
- return err;
+ RETURN_ON_ERR(level_estimator_->ProcessStream(capture_audio_));
+
+ was_stream_delay_set_ = false;
+ return kNoError;
+}
+
+int AudioProcessingImpl::AnalyzeReverseStream(const float* const* data,
+ int samples_per_channel,
+ int sample_rate_hz,
+ ChannelLayout layout) {
+ CriticalSectionScoped crit_scoped(crit_);
+ if (data == NULL) {
+ return kNullPointerError;
+ }
+ if (sample_rate_hz != sample_rate_hz_) {
+ return kBadSampleRateError;
}
- capture_audio_->InterleaveTo(frame, interleave_needed(data_processed));
+ const int num_channels = ChannelsFromLayout(layout);
+ RETURN_ON_ERR(MaybeInitializeLocked(sample_rate_hz_, num_input_channels_,
+ num_output_channels_, num_channels));
+ if (samples_per_channel != samples_per_channel_) {
+ return kBadDataLengthError;
+ }
#ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP
if (debug_file_->Open()) {
- audioproc::Stream* msg = event_msg_->mutable_stream();
- const size_t data_size = sizeof(int16_t) *
- frame->samples_per_channel_ *
- frame->num_channels_;
- msg->set_output_data(frame->data_, data_size);
- err = WriteMessageToDebugFile();
- if (err != kNoError) {
- return err;
- }
+ event_msg_->set_type(audioproc::Event::REVERSE_STREAM);
+ audioproc::ReverseStream* msg = event_msg_->mutable_reverse_stream();
+ const size_t channel_size = sizeof(float) * samples_per_channel;
+ for (int i = 0; i < num_channels; ++i)
+ msg->set_channel(i, data[i], channel_size);
+ RETURN_ON_ERR(WriteMessageToDebugFile());
}
#endif
- was_stream_delay_set_ = false;
- return kNoError;
+ render_audio_->CopyFrom(data, samples_per_channel, num_channels);
+ return AnalyzeReverseStreamLocked();
}
-// TODO(ajm): Have AnalyzeReverseStream accept sample rates not matching the
-// primary stream and convert ourselves rather than having the user manage it.
-// We can be smarter and use the splitting filter when appropriate. Similarly,
-// perform downmixing here.
int AudioProcessingImpl::AnalyzeReverseStream(AudioFrame* frame) {
CriticalSectionScoped crit_scoped(crit_);
- int err = kNoError;
if (frame == NULL) {
return kNullPointerError;
}
@@ -490,6 +557,9 @@ int AudioProcessingImpl::AnalyzeReverseStream(AudioFrame* frame) {
}
RETURN_ON_ERR(MaybeInitializeLocked(sample_rate_hz_, num_input_channels_,
num_output_channels_, frame->num_channels_));
+ if (frame->samples_per_channel_ != samples_per_channel_) {
+ return kBadDataLengthError;
+ }
#ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP
if (debug_file_->Open()) {
@@ -499,15 +569,19 @@ int AudioProcessingImpl::AnalyzeReverseStream(AudioFrame* frame) {
frame->samples_per_channel_ *
frame->num_channels_;
msg->set_data(frame->data_, data_size);
- err = WriteMessageToDebugFile();
- if (err != kNoError) {
- return err;
- }
+ RETURN_ON_ERR(WriteMessageToDebugFile());
}
#endif
render_audio_->DeinterleaveFrom(frame);
+ return AnalyzeReverseStreamLocked();
+}
+// TODO(ajm): Have AnalyzeReverseStream accept sample rates not matching the
+// primary stream and convert ourselves rather than having the user manage it.
+// We can be smarter and use the splitting filter when appropriate. Similarly,
+// perform downmixing here.
+int AudioProcessingImpl::AnalyzeReverseStreamLocked() {
if (sample_rate_hz_ == kSampleRate32kHz) {
for (int i = 0; i < num_reverse_channels_; i++) {
// Split into low and high band.
@@ -520,23 +594,11 @@ int AudioProcessingImpl::AnalyzeReverseStream(AudioFrame* frame) {
}
}
- // TODO(ajm): warnings possible from components?
- err = echo_cancellation_->ProcessRenderAudio(render_audio_);
- if (err != kNoError) {
- return err;
- }
-
- err = echo_control_mobile_->ProcessRenderAudio(render_audio_);
- if (err != kNoError) {
- return err;
- }
-
- err = gain_control_->ProcessRenderAudio(render_audio_);
- if (err != kNoError) {
- return err;
- }
+ RETURN_ON_ERR(echo_cancellation_->ProcessRenderAudio(render_audio_));
+ RETURN_ON_ERR(echo_control_mobile_->ProcessRenderAudio(render_audio_));
+ RETURN_ON_ERR(gain_control_->ProcessRenderAudio(render_audio_));
- return err; // TODO(ajm): this is for returning warnings; necessary?
+ return kNoError;
}
int AudioProcessingImpl::set_stream_delay_ms(int delay) {
@@ -567,6 +629,14 @@ bool AudioProcessingImpl::was_stream_delay_set() const {
return was_stream_delay_set_;
}
+void AudioProcessingImpl::set_stream_key_pressed(bool key_pressed) {
+ key_pressed_ = key_pressed;
+}
+
+bool AudioProcessingImpl::stream_key_pressed() const {
+ return key_pressed_;
+}
+
void AudioProcessingImpl::set_delay_offset_ms(int offset) {
CriticalSectionScoped crit_scoped(crit_);
delay_offset_ms_ = offset;
@@ -576,14 +646,6 @@ int AudioProcessingImpl::delay_offset_ms() const {
return delay_offset_ms_;
}
-void AudioProcessingImpl::set_stream_key_pressed(bool key_pressed) {
- key_pressed_ = key_pressed;
-}
-
-bool AudioProcessingImpl::stream_key_pressed() const {
- return key_pressed_;
-}
-
int AudioProcessingImpl::StartDebugRecording(
const char filename[AudioProcessing::kMaxFilenameSize]) {
CriticalSectionScoped crit_scoped(crit_);
@@ -714,7 +776,7 @@ bool AudioProcessingImpl::is_data_processed() const {
return true;
}
-bool AudioProcessingImpl::interleave_needed(bool is_data_processed) const {
+bool AudioProcessingImpl::output_copy_needed(bool is_data_processed) const {
// Check if we've upmixed or downmixed the audio.
return (num_output_channels_ != num_input_channels_ || is_data_processed);
}
@@ -759,7 +821,7 @@ int AudioProcessingImpl::WriteMessageToDebugFile() {
event_msg_->Clear();
- return 0;
+ return kNoError;
}
int AudioProcessingImpl::WriteInitMessage() {
diff --git a/modules/audio_processing/audio_processing_impl.h b/modules/audio_processing/audio_processing_impl.h
index 09e2192a..2f6385f0 100644
--- a/modules/audio_processing/audio_processing_impl.h
+++ b/modules/audio_processing/audio_processing_impl.h
@@ -21,7 +21,7 @@
namespace webrtc {
class AudioBuffer;
class CriticalSectionWrapper;
-class EchoCancellationImplWrapper;
+class EchoCancellationImpl;
class EchoControlMobileImpl;
class FileWrapper;
class GainControlImpl;
@@ -41,20 +41,9 @@ class Event;
class AudioProcessingImpl : public AudioProcessing {
public:
- enum {
- kSampleRate8kHz = 8000,
- kSampleRate16kHz = 16000,
- kSampleRate32kHz = 32000
- };
-
explicit AudioProcessingImpl(const Config& config);
virtual ~AudioProcessingImpl();
- CriticalSectionWrapper* crit() const;
-
- int split_sample_rate_hz() const;
- bool was_stream_delay_set() const;
-
// AudioProcessing methods.
virtual int Initialize() OVERRIDE;
virtual void SetExtraOptions(const Config& config) OVERRIDE;
@@ -64,6 +53,7 @@ class AudioProcessingImpl : public AudioProcessing {
}
virtual int set_sample_rate_hz(int rate) OVERRIDE;
virtual int sample_rate_hz() const OVERRIDE;
+ virtual int split_sample_rate_hz() const OVERRIDE;
virtual int set_num_channels(int input_channels,
int output_channels) OVERRIDE;
virtual int num_input_channels() const OVERRIDE;
@@ -73,9 +63,19 @@ class AudioProcessingImpl : public AudioProcessing {
virtual void set_output_will_be_muted(bool muted) OVERRIDE;
virtual bool output_will_be_muted() const OVERRIDE;
virtual int ProcessStream(AudioFrame* frame) OVERRIDE;
+ virtual int ProcessStream(float* const* data,
+ int samples_per_channel,
+ int sample_rate_hz,
+ ChannelLayout input_layout,
+ ChannelLayout output_layout) OVERRIDE;
virtual int AnalyzeReverseStream(AudioFrame* frame) OVERRIDE;
+ virtual int AnalyzeReverseStream(const float* const* data,
+ int samples_per_channel,
+ int sample_rate_hz,
+ ChannelLayout layout) OVERRIDE;
virtual int set_stream_delay_ms(int delay) OVERRIDE;
virtual int stream_delay_ms() const OVERRIDE;
+ virtual bool was_stream_delay_set() const OVERRIDE;
virtual void set_delay_offset_ms(int offset) OVERRIDE;
virtual int delay_offset_ms() const OVERRIDE;
virtual void set_stream_key_pressed(bool key_pressed) OVERRIDE;
@@ -98,12 +98,15 @@ class AudioProcessingImpl : public AudioProcessing {
private:
int MaybeInitializeLocked(int sample_rate_hz, int num_input_channels,
int num_output_channels, int num_reverse_channels);
+ int ProcessStreamLocked();
+ int AnalyzeReverseStreamLocked();
+
bool is_data_processed() const;
- bool interleave_needed(bool is_data_processed) const;
+ bool output_copy_needed(bool is_data_processed) const;
bool synthesis_needed(bool is_data_processed) const;
bool analysis_needed(bool is_data_processed) const;
- EchoCancellationImplWrapper* echo_cancellation_;
+ EchoCancellationImpl* echo_cancellation_;
EchoControlMobileImpl* echo_control_mobile_;
GainControlImpl* gain_control_;
HighPassFilterImpl* high_pass_filter_;
diff --git a/modules/audio_processing/debug.proto b/modules/audio_processing/debug.proto
index fb8e79a2..6042d906 100644
--- a/modules/audio_processing/debug.proto
+++ b/modules/audio_processing/debug.proto
@@ -10,17 +10,31 @@ message Init {
optional int32 num_reverse_channels = 5;
}
+// May contain interleaved or deinterleaved data, but don't store both formats.
message ReverseStream {
+ // int16 interleaved data.
optional bytes data = 1;
+
+ // float deinterleaved data, where each repeated element points to a single
+ // channel buffer of data.
+ repeated bytes channel = 2;
}
+// May contain interleaved or deinterleaved data, but don't store both formats.
message Stream {
+ // int16 interleaved data.
optional bytes input_data = 1;
optional bytes output_data = 2;
+
optional int32 delay = 3;
optional sint32 drift = 4;
optional int32 level = 5;
optional bool keypress = 6;
+
+ // float deinterleaved data, where each repeated element points to a single
+ // channel buffer of data.
+ repeated bytes input_channel = 7;
+ repeated bytes output_channel = 8;
}
message Event {
diff --git a/modules/audio_processing/echo_cancellation_impl.cc b/modules/audio_processing/echo_cancellation_impl.cc
index 8fa86c3b..0d6d159b 100644
--- a/modules/audio_processing/echo_cancellation_impl.cc
+++ b/modules/audio_processing/echo_cancellation_impl.cc
@@ -18,7 +18,6 @@ extern "C" {
}
#include "webrtc/modules/audio_processing/aec/include/echo_cancellation.h"
#include "webrtc/modules/audio_processing/audio_buffer.h"
-#include "webrtc/modules/audio_processing/audio_processing_impl.h"
#include "webrtc/system_wrappers/interface/critical_section_wrapper.h"
namespace webrtc {
@@ -56,14 +55,11 @@ AudioProcessing::Error MapError(int err) {
}
} // namespace
-EchoCancellationImplWrapper* EchoCancellationImplWrapper::Create(
- const AudioProcessingImpl* audioproc) {
- return new EchoCancellationImpl(audioproc);
-}
-
-EchoCancellationImpl::EchoCancellationImpl(const AudioProcessingImpl* apm)
- : ProcessingComponent(apm),
+EchoCancellationImpl::EchoCancellationImpl(const AudioProcessing* apm,
+ CriticalSectionWrapper* crit)
+ : ProcessingComponent(),
apm_(apm),
+ crit_(crit),
drift_compensation_enabled_(false),
metrics_enabled_(false),
suppression_level_(kModerateSuppression),
@@ -168,7 +164,7 @@ int EchoCancellationImpl::ProcessCaptureAudio(AudioBuffer* audio) {
}
int EchoCancellationImpl::Enable(bool enable) {
- CriticalSectionScoped crit_scoped(apm_->crit());
+ CriticalSectionScoped crit_scoped(crit_);
// Ensure AEC and AECM are not both enabled.
if (enable && apm_->echo_control_mobile()->is_enabled()) {
return apm_->kBadParameterError;
@@ -182,7 +178,7 @@ bool EchoCancellationImpl::is_enabled() const {
}
int EchoCancellationImpl::set_suppression_level(SuppressionLevel level) {
- CriticalSectionScoped crit_scoped(apm_->crit());
+ CriticalSectionScoped crit_scoped(crit_);
if (MapSetting(level) == -1) {
return apm_->kBadParameterError;
}
@@ -197,7 +193,7 @@ EchoCancellation::SuppressionLevel EchoCancellationImpl::suppression_level()
}
int EchoCancellationImpl::enable_drift_compensation(bool enable) {
- CriticalSectionScoped crit_scoped(apm_->crit());
+ CriticalSectionScoped crit_scoped(crit_);
drift_compensation_enabled_ = enable;
return Configure();
}
@@ -207,7 +203,7 @@ bool EchoCancellationImpl::is_drift_compensation_enabled() const {
}
int EchoCancellationImpl::set_device_sample_rate_hz(int rate) {
- CriticalSectionScoped crit_scoped(apm_->crit());
+ CriticalSectionScoped crit_scoped(crit_);
if (rate < 8000 || rate > 96000) {
return apm_->kBadParameterError;
}
@@ -230,7 +226,7 @@ int EchoCancellationImpl::stream_drift_samples() const {
}
int EchoCancellationImpl::enable_metrics(bool enable) {
- CriticalSectionScoped crit_scoped(apm_->crit());
+ CriticalSectionScoped crit_scoped(crit_);
metrics_enabled_ = enable;
return Configure();
}
@@ -242,7 +238,7 @@ bool EchoCancellationImpl::are_metrics_enabled() const {
// TODO(ajm): we currently just use the metrics from the first AEC. Think more
// aboue the best way to extend this to multi-channel.
int EchoCancellationImpl::GetMetrics(Metrics* metrics) {
- CriticalSectionScoped crit_scoped(apm_->crit());
+ CriticalSectionScoped crit_scoped(crit_);
if (metrics == NULL) {
return apm_->kNullPointerError;
}
@@ -289,7 +285,7 @@ bool EchoCancellationImpl::stream_has_echo() const {
}
int EchoCancellationImpl::enable_delay_logging(bool enable) {
- CriticalSectionScoped crit_scoped(apm_->crit());
+ CriticalSectionScoped crit_scoped(crit_);
delay_logging_enabled_ = enable;
return Configure();
}
@@ -300,7 +296,7 @@ bool EchoCancellationImpl::is_delay_logging_enabled() const {
// TODO(bjornv): How should we handle the multi-channel case?
int EchoCancellationImpl::GetDelayMetrics(int* median, int* std) {
- CriticalSectionScoped crit_scoped(apm_->crit());
+ CriticalSectionScoped crit_scoped(crit_);
if (median == NULL) {
return apm_->kNullPointerError;
}
@@ -322,7 +318,7 @@ int EchoCancellationImpl::GetDelayMetrics(int* median, int* std) {
}
struct AecCore* EchoCancellationImpl::aec_core() const {
- CriticalSectionScoped crit_scoped(apm_->crit());
+ CriticalSectionScoped crit_scoped(crit_);
if (!is_component_enabled()) {
return NULL;
}
diff --git a/modules/audio_processing/echo_cancellation_impl.h b/modules/audio_processing/echo_cancellation_impl.h
index 3ab0ce26..f5572b94 100644
--- a/modules/audio_processing/echo_cancellation_impl.h
+++ b/modules/audio_processing/echo_cancellation_impl.h
@@ -11,21 +11,23 @@
#ifndef WEBRTC_MODULES_AUDIO_PROCESSING_ECHO_CANCELLATION_IMPL_H_
#define WEBRTC_MODULES_AUDIO_PROCESSING_ECHO_CANCELLATION_IMPL_H_
-#include "webrtc/modules/audio_processing/echo_cancellation_impl_wrapper.h"
+#include "webrtc/modules/audio_processing/include/audio_processing.h"
+#include "webrtc/modules/audio_processing/processing_component.h"
namespace webrtc {
-class AudioProcessingImpl;
class AudioBuffer;
+class CriticalSectionWrapper;
-class EchoCancellationImpl : public EchoCancellationImplWrapper {
+class EchoCancellationImpl : public EchoCancellation,
+ public ProcessingComponent {
public:
- explicit EchoCancellationImpl(const AudioProcessingImpl* apm);
+ EchoCancellationImpl(const AudioProcessing* apm,
+ CriticalSectionWrapper* crit);
virtual ~EchoCancellationImpl();
- // EchoCancellationImplWrapper implementation.
- virtual int ProcessRenderAudio(const AudioBuffer* audio) OVERRIDE;
- virtual int ProcessCaptureAudio(AudioBuffer* audio) OVERRIDE;
+ int ProcessRenderAudio(const AudioBuffer* audio);
+ int ProcessCaptureAudio(AudioBuffer* audio);
// EchoCancellation implementation.
virtual bool is_enabled() const OVERRIDE;
@@ -62,7 +64,8 @@ class EchoCancellationImpl : public EchoCancellationImplWrapper {
virtual int num_handles_required() const OVERRIDE;
virtual int GetHandleError(void* handle) const OVERRIDE;
- const AudioProcessingImpl* apm_;
+ const AudioProcessing* apm_;
+ CriticalSectionWrapper* crit_;
bool drift_compensation_enabled_;
bool metrics_enabled_;
SuppressionLevel suppression_level_;
diff --git a/modules/audio_processing/echo_cancellation_impl_wrapper.h b/modules/audio_processing/echo_cancellation_impl_wrapper.h
deleted file mode 100644
index f1c03f32..00000000
--- a/modules/audio_processing/echo_cancellation_impl_wrapper.h
+++ /dev/null
@@ -1,35 +0,0 @@
-/*
- * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
- *
- * Use of this source code is governed by a BSD-style license
- * that can be found in the LICENSE file in the root of the source
- * tree. An additional intellectual property rights grant can be found
- * in the file PATENTS. All contributing project authors may
- * be found in the AUTHORS file in the root of the source tree.
- */
-
-#ifndef WEBRTC_MODULES_AUDIO_PROCESSING_ECHO_CANCELLATION_IMPL_WRAPPER_H_
-#define WEBRTC_MODULES_AUDIO_PROCESSING_ECHO_CANCELLATION_IMPL_WRAPPER_H_
-
-#include "webrtc/modules/audio_processing/include/audio_processing.h"
-#include "webrtc/modules/audio_processing/processing_component.h"
-
-namespace webrtc {
-
-class AudioProcessingImpl;
-class AudioBuffer;
-
-class EchoCancellationImplWrapper : public virtual EchoCancellation,
- public virtual ProcessingComponent {
- public:
- static EchoCancellationImplWrapper* Create(
- const AudioProcessingImpl* audioproc);
- virtual ~EchoCancellationImplWrapper() {}
-
- virtual int ProcessRenderAudio(const AudioBuffer* audio) = 0;
- virtual int ProcessCaptureAudio(AudioBuffer* audio) = 0;
-};
-
-} // namespace webrtc
-
-#endif // WEBRTC_MODULES_AUDIO_PROCESSING_ECHO_CANCELLATION_IMPL_WRAPPER_H_
diff --git a/modules/audio_processing/echo_control_mobile_impl.cc b/modules/audio_processing/echo_control_mobile_impl.cc
index f7853814..8434b617 100644
--- a/modules/audio_processing/echo_control_mobile_impl.cc
+++ b/modules/audio_processing/echo_control_mobile_impl.cc
@@ -15,7 +15,6 @@
#include "webrtc/modules/audio_processing/aecm/include/echo_control_mobile.h"
#include "webrtc/modules/audio_processing/audio_buffer.h"
-#include "webrtc/modules/audio_processing/audio_processing_impl.h"
#include "webrtc/system_wrappers/interface/critical_section_wrapper.h"
#include "webrtc/system_wrappers/interface/logging.h"
@@ -63,9 +62,11 @@ size_t EchoControlMobile::echo_path_size_bytes() {
return WebRtcAecm_echo_path_size_bytes();
}
-EchoControlMobileImpl::EchoControlMobileImpl(const AudioProcessingImpl* apm)
- : ProcessingComponent(apm),
+EchoControlMobileImpl::EchoControlMobileImpl(const AudioProcessing* apm,
+ CriticalSectionWrapper* crit)
+ : ProcessingComponent(),
apm_(apm),
+ crit_(crit),
routing_mode_(kSpeakerphone),
comfort_noise_enabled_(true),
external_echo_path_(NULL) {}
@@ -155,7 +156,7 @@ int EchoControlMobileImpl::ProcessCaptureAudio(AudioBuffer* audio) {
}
int EchoControlMobileImpl::Enable(bool enable) {
- CriticalSectionScoped crit_scoped(apm_->crit());
+ CriticalSectionScoped crit_scoped(crit_);
// Ensure AEC and AECM are not both enabled.
if (enable && apm_->echo_cancellation()->is_enabled()) {
return apm_->kBadParameterError;
@@ -169,7 +170,7 @@ bool EchoControlMobileImpl::is_enabled() const {
}
int EchoControlMobileImpl::set_routing_mode(RoutingMode mode) {
- CriticalSectionScoped crit_scoped(apm_->crit());
+ CriticalSectionScoped crit_scoped(crit_);
if (MapSetting(mode) == -1) {
return apm_->kBadParameterError;
}
@@ -184,7 +185,7 @@ EchoControlMobile::RoutingMode EchoControlMobileImpl::routing_mode()
}
int EchoControlMobileImpl::enable_comfort_noise(bool enable) {
- CriticalSectionScoped crit_scoped(apm_->crit());
+ CriticalSectionScoped crit_scoped(crit_);
comfort_noise_enabled_ = enable;
return Configure();
}
@@ -195,7 +196,7 @@ bool EchoControlMobileImpl::is_comfort_noise_enabled() const {
int EchoControlMobileImpl::SetEchoPath(const void* echo_path,
size_t size_bytes) {
- CriticalSectionScoped crit_scoped(apm_->crit());
+ CriticalSectionScoped crit_scoped(crit_);
if (echo_path == NULL) {
return apm_->kNullPointerError;
}
@@ -214,7 +215,7 @@ int EchoControlMobileImpl::SetEchoPath(const void* echo_path,
int EchoControlMobileImpl::GetEchoPath(void* echo_path,
size_t size_bytes) const {
- CriticalSectionScoped crit_scoped(apm_->crit());
+ CriticalSectionScoped crit_scoped(crit_);
if (echo_path == NULL) {
return apm_->kNullPointerError;
}
diff --git a/modules/audio_processing/echo_control_mobile_impl.h b/modules/audio_processing/echo_control_mobile_impl.h
index 5eefab0a..f00d59bc 100644
--- a/modules/audio_processing/echo_control_mobile_impl.h
+++ b/modules/audio_processing/echo_control_mobile_impl.h
@@ -15,13 +15,15 @@
#include "webrtc/modules/audio_processing/processing_component.h"
namespace webrtc {
-class AudioProcessingImpl;
+
class AudioBuffer;
+class CriticalSectionWrapper;
class EchoControlMobileImpl : public EchoControlMobile,
public ProcessingComponent {
public:
- explicit EchoControlMobileImpl(const AudioProcessingImpl* apm);
+ EchoControlMobileImpl(const AudioProcessing* apm,
+ CriticalSectionWrapper* crit);
virtual ~EchoControlMobileImpl();
int ProcessRenderAudio(const AudioBuffer* audio);
@@ -51,7 +53,8 @@ class EchoControlMobileImpl : public EchoControlMobile,
virtual int num_handles_required() const OVERRIDE;
virtual int GetHandleError(void* handle) const OVERRIDE;
- const AudioProcessingImpl* apm_;
+ const AudioProcessing* apm_;
+ CriticalSectionWrapper* crit_;
RoutingMode routing_mode_;
bool comfort_noise_enabled_;
unsigned char* external_echo_path_;
diff --git a/modules/audio_processing/gain_control_impl.cc b/modules/audio_processing/gain_control_impl.cc
index a6cd6842..59532210 100644
--- a/modules/audio_processing/gain_control_impl.cc
+++ b/modules/audio_processing/gain_control_impl.cc
@@ -12,12 +12,10 @@
#include <assert.h>
+#include "webrtc/modules/audio_processing/audio_buffer.h"
#include "webrtc/modules/audio_processing/agc/include/gain_control.h"
#include "webrtc/system_wrappers/interface/critical_section_wrapper.h"
-#include "webrtc/modules/audio_processing/audio_buffer.h"
-#include "webrtc/modules/audio_processing/audio_processing_impl.h"
-
namespace webrtc {
typedef void Handle;
@@ -37,9 +35,11 @@ int16_t MapSetting(GainControl::Mode mode) {
}
} // namespace
-GainControlImpl::GainControlImpl(const AudioProcessingImpl* apm)
- : ProcessingComponent(apm),
+GainControlImpl::GainControlImpl(const AudioProcessing* apm,
+ CriticalSectionWrapper* crit)
+ : ProcessingComponent(),
apm_(apm),
+ crit_(crit),
mode_(kAdaptiveAnalog),
minimum_capture_level_(0),
maximum_capture_level_(255),
@@ -203,7 +203,7 @@ int GainControlImpl::stream_analog_level() {
}
int GainControlImpl::Enable(bool enable) {
- CriticalSectionScoped crit_scoped(apm_->crit());
+ CriticalSectionScoped crit_scoped(crit_);
return EnableComponent(enable);
}
@@ -212,7 +212,7 @@ bool GainControlImpl::is_enabled() const {
}
int GainControlImpl::set_mode(Mode mode) {
- CriticalSectionScoped crit_scoped(apm_->crit());
+ CriticalSectionScoped crit_scoped(crit_);
if (MapSetting(mode) == -1) {
return apm_->kBadParameterError;
}
@@ -227,7 +227,7 @@ GainControl::Mode GainControlImpl::mode() const {
int GainControlImpl::set_analog_level_limits(int minimum,
int maximum) {
- CriticalSectionScoped crit_scoped(apm_->crit());
+ CriticalSectionScoped crit_scoped(crit_);
if (minimum < 0) {
return apm_->kBadParameterError;
}
@@ -259,7 +259,7 @@ bool GainControlImpl::stream_is_saturated() const {
}
int GainControlImpl::set_target_level_dbfs(int level) {
- CriticalSectionScoped crit_scoped(apm_->crit());
+ CriticalSectionScoped crit_scoped(crit_);
if (level > 31 || level < 0) {
return apm_->kBadParameterError;
}
@@ -273,7 +273,7 @@ int GainControlImpl::target_level_dbfs() const {
}
int GainControlImpl::set_compression_gain_db(int gain) {
- CriticalSectionScoped crit_scoped(apm_->crit());
+ CriticalSectionScoped crit_scoped(crit_);
if (gain < 0 || gain > 90) {
return apm_->kBadParameterError;
}
@@ -287,7 +287,7 @@ int GainControlImpl::compression_gain_db() const {
}
int GainControlImpl::enable_limiter(bool enable) {
- CriticalSectionScoped crit_scoped(apm_->crit());
+ CriticalSectionScoped crit_scoped(crit_);
limiter_enabled_ = enable;
return Configure();
}
diff --git a/modules/audio_processing/gain_control_impl.h b/modules/audio_processing/gain_control_impl.h
index 2de02f6e..e1669ccc 100644
--- a/modules/audio_processing/gain_control_impl.h
+++ b/modules/audio_processing/gain_control_impl.h
@@ -17,13 +17,15 @@
#include "webrtc/modules/audio_processing/processing_component.h"
namespace webrtc {
-class AudioProcessingImpl;
+
class AudioBuffer;
+class CriticalSectionWrapper;
class GainControlImpl : public GainControl,
public ProcessingComponent {
public:
- explicit GainControlImpl(const AudioProcessingImpl* apm);
+ GainControlImpl(const AudioProcessing* apm,
+ CriticalSectionWrapper* crit);
virtual ~GainControlImpl();
int ProcessRenderAudio(AudioBuffer* audio);
@@ -62,7 +64,8 @@ class GainControlImpl : public GainControl,
virtual int num_handles_required() const OVERRIDE;
virtual int GetHandleError(void* handle) const OVERRIDE;
- const AudioProcessingImpl* apm_;
+ const AudioProcessing* apm_;
+ CriticalSectionWrapper* crit_;
Mode mode_;
int minimum_capture_level_;
int maximum_capture_level_;
diff --git a/modules/audio_processing/high_pass_filter_impl.cc b/modules/audio_processing/high_pass_filter_impl.cc
index da217037..d4836ef5 100644
--- a/modules/audio_processing/high_pass_filter_impl.cc
+++ b/modules/audio_processing/high_pass_filter_impl.cc
@@ -13,11 +13,10 @@
#include <assert.h>
#include "webrtc/common_audio/signal_processing/include/signal_processing_library.h"
+#include "webrtc/modules/audio_processing/audio_buffer.h"
#include "webrtc/system_wrappers/interface/critical_section_wrapper.h"
#include "webrtc/typedefs.h"
-#include "webrtc/modules/audio_processing/audio_buffer.h"
-#include "webrtc/modules/audio_processing/audio_processing_impl.h"
namespace webrtc {
namespace {
@@ -36,7 +35,7 @@ struct FilterState {
int InitializeFilter(FilterState* hpf, int sample_rate_hz) {
assert(hpf != NULL);
- if (sample_rate_hz == AudioProcessingImpl::kSampleRate8kHz) {
+ if (sample_rate_hz == AudioProcessing::kSampleRate8kHz) {
hpf->ba = kFilterCoefficients8kHz;
} else {
hpf->ba = kFilterCoefficients;
@@ -105,9 +104,11 @@ int Filter(FilterState* hpf, int16_t* data, int length) {
typedef FilterState Handle;
-HighPassFilterImpl::HighPassFilterImpl(const AudioProcessingImpl* apm)
- : ProcessingComponent(apm),
- apm_(apm) {}
+HighPassFilterImpl::HighPassFilterImpl(const AudioProcessing* apm,
+ CriticalSectionWrapper* crit)
+ : ProcessingComponent(),
+ apm_(apm),
+ crit_(crit) {}
HighPassFilterImpl::~HighPassFilterImpl() {}
@@ -135,7 +136,7 @@ int HighPassFilterImpl::ProcessCaptureAudio(AudioBuffer* audio) {
}
int HighPassFilterImpl::Enable(bool enable) {
- CriticalSectionScoped crit_scoped(apm_->crit());
+ CriticalSectionScoped crit_scoped(crit_);
return EnableComponent(enable);
}
diff --git a/modules/audio_processing/high_pass_filter_impl.h b/modules/audio_processing/high_pass_filter_impl.h
index 7e11ea9c..1796e77f 100644
--- a/modules/audio_processing/high_pass_filter_impl.h
+++ b/modules/audio_processing/high_pass_filter_impl.h
@@ -15,13 +15,14 @@
#include "webrtc/modules/audio_processing/processing_component.h"
namespace webrtc {
-class AudioProcessingImpl;
+
class AudioBuffer;
+class CriticalSectionWrapper;
class HighPassFilterImpl : public HighPassFilter,
public ProcessingComponent {
public:
- explicit HighPassFilterImpl(const AudioProcessingImpl* apm);
+ HighPassFilterImpl(const AudioProcessing* apm, CriticalSectionWrapper* crit);
virtual ~HighPassFilterImpl();
int ProcessCaptureAudio(AudioBuffer* audio);
@@ -41,7 +42,8 @@ class HighPassFilterImpl : public HighPassFilter,
virtual int num_handles_required() const OVERRIDE;
virtual int GetHandleError(void* handle) const OVERRIDE;
- const AudioProcessingImpl* apm_;
+ const AudioProcessing* apm_;
+ CriticalSectionWrapper* crit_;
};
} // namespace webrtc
diff --git a/modules/audio_processing/include/audio_processing.h b/modules/audio_processing/include/audio_processing.h
index c34baacb..394bef8e 100644
--- a/modules/audio_processing/include/audio_processing.h
+++ b/modules/audio_processing/include/audio_processing.h
@@ -135,6 +135,16 @@ struct ExperimentalAgc {
//
class AudioProcessing {
public:
+ enum ChannelLayout {
+ kMono,
+ // Left, right.
+ kStereo,
+ // Mono, keyboard mic.
+ kMonoAndKeyboard,
+ // Left, right, keyboard mic.
+ kStereoAndKeyboard
+ };
+
// Creates an APM instance. Use one instance for every primary audio stream
// requiring processing. On the client-side, this would typically be one
// instance for the near-end stream, and additional instances for each far-end
@@ -168,6 +178,7 @@ class AudioProcessing {
// streams. 8000, 16000 or 32000 Hz are permitted.
virtual int set_sample_rate_hz(int rate) = 0;
virtual int sample_rate_hz() const = 0;
+ virtual int split_sample_rate_hz() const = 0;
// DEPRECATED: It is now possible to modify the number of channels directly in
// a call to |ProcessStream|.
@@ -204,6 +215,17 @@ class AudioProcessing {
// method, it will trigger an initialization.
virtual int ProcessStream(AudioFrame* frame) = 0;
+ // Accepts deinterleaved float audio with the range [-1, 1]. Each element
+ // of |data| points to a channel buffer, arranged according to
+ // |input_layout|. At output, the channels will be arranged according to
+ // |output_layout|.
+ // TODO(ajm): Output layout conversion does not yet work.
+ virtual int ProcessStream(float* const* data,
+ int samples_per_channel,
+ int sample_rate_hz,
+ ChannelLayout input_layout,
+ ChannelLayout output_layout) = 0;
+
// Analyzes a 10 ms |frame| of the reverse direction audio stream. The frame
// will not be modified. On the client-side, this is the far-end (or to be
// rendered) audio.
@@ -221,6 +243,13 @@ class AudioProcessing {
// TODO(ajm): add const to input; requires an implementation fix.
virtual int AnalyzeReverseStream(AudioFrame* frame) = 0;
+ // Accepts deinterleaved float audio with the range [-1, 1]. Each element
+ // of |data| points to a channel buffer, arranged according to |layout|.
+ virtual int AnalyzeReverseStream(const float* const* data,
+ int samples_per_channel,
+ int sample_rate_hz,
+ ChannelLayout layout) = 0;
+
// This must be called if and only if echo processing is enabled.
//
// Sets the |delay| in ms between AnalyzeReverseStream() receiving a far-end
@@ -236,6 +265,7 @@ class AudioProcessing {
// ProcessStream().
virtual int set_stream_delay_ms(int delay) = 0;
virtual int stream_delay_ms() const = 0;
+ virtual bool was_stream_delay_set() const = 0;
// Call to signal that a key press occurred (true) or did not occur (false)
// with this chunk of audio.
@@ -304,6 +334,12 @@ class AudioProcessing {
// will continue, but the parameter may have been truncated.
kBadStreamParameterWarning = -13
};
+
+ enum {
+ kSampleRate8kHz = 8000,
+ kSampleRate16kHz = 16000,
+ kSampleRate32kHz = 32000
+ };
};
// The acoustic echo cancellation (AEC) component provides better performance
diff --git a/modules/audio_processing/include/mock_audio_processing.h b/modules/audio_processing/include/mock_audio_processing.h
index 762f2123..96abb6a1 100644
--- a/modules/audio_processing/include/mock_audio_processing.h
+++ b/modules/audio_processing/include/mock_audio_processing.h
@@ -191,6 +191,8 @@ class MockAudioProcessing : public AudioProcessing {
int(int rate));
MOCK_CONST_METHOD0(sample_rate_hz,
int());
+ MOCK_CONST_METHOD0(split_sample_rate_hz,
+ int());
MOCK_METHOD2(set_num_channels,
int(int input_channels, int output_channels));
MOCK_CONST_METHOD0(num_input_channels,
@@ -207,12 +209,21 @@ class MockAudioProcessing : public AudioProcessing {
bool());
MOCK_METHOD1(ProcessStream,
int(AudioFrame* frame));
+ MOCK_METHOD5(ProcessStream,
+ int(float* const* data, int frames, int sample_rate_hz,
+ ChannelLayout input_layout,
+ ChannelLayout output_layout));
MOCK_METHOD1(AnalyzeReverseStream,
int(AudioFrame* frame));
+ MOCK_METHOD4(AnalyzeReverseStream,
+ int(const float* const* data, int frames, int sample_rate_hz,
+ ChannelLayout input_layout));
MOCK_METHOD1(set_stream_delay_ms,
int(int delay));
MOCK_CONST_METHOD0(stream_delay_ms,
int());
+ MOCK_CONST_METHOD0(was_stream_delay_set,
+ bool());
MOCK_METHOD1(set_stream_key_pressed,
void(bool key_pressed));
MOCK_CONST_METHOD0(stream_key_pressed,
@@ -238,16 +249,16 @@ class MockAudioProcessing : public AudioProcessing {
}
virtual MockHighPassFilter* high_pass_filter() const {
return high_pass_filter_.get();
- };
+ }
virtual MockLevelEstimator* level_estimator() const {
return level_estimator_.get();
- };
+ }
virtual MockNoiseSuppression* noise_suppression() const {
return noise_suppression_.get();
- };
+ }
virtual MockVoiceDetection* voice_detection() const {
return voice_detection_.get();
- };
+ }
private:
scoped_ptr<MockEchoCancellation> echo_cancellation_;
diff --git a/modules/audio_processing/level_estimator_impl.cc b/modules/audio_processing/level_estimator_impl.cc
index 29dbdfc7..a512ef15 100644
--- a/modules/audio_processing/level_estimator_impl.cc
+++ b/modules/audio_processing/level_estimator_impl.cc
@@ -15,7 +15,6 @@
#include <string.h>
#include "webrtc/modules/audio_processing/audio_buffer.h"
-#include "webrtc/modules/audio_processing/audio_processing_impl.h"
#include "webrtc/system_wrappers/interface/critical_section_wrapper.h"
namespace webrtc {
@@ -84,9 +83,11 @@ class Level {
};
} // namespace
-LevelEstimatorImpl::LevelEstimatorImpl(const AudioProcessingImpl* apm)
- : ProcessingComponent(apm),
- apm_(apm) {}
+LevelEstimatorImpl::LevelEstimatorImpl(const AudioProcessing* apm,
+ CriticalSectionWrapper* crit)
+ : ProcessingComponent(),
+ apm_(apm),
+ crit_(crit) {}
LevelEstimatorImpl::~LevelEstimatorImpl() {}
@@ -113,7 +114,7 @@ int LevelEstimatorImpl::ProcessStream(AudioBuffer* audio) {
}
int LevelEstimatorImpl::Enable(bool enable) {
- CriticalSectionScoped crit_scoped(apm_->crit());
+ CriticalSectionScoped crit_scoped(crit_);
return EnableComponent(enable);
}
diff --git a/modules/audio_processing/level_estimator_impl.h b/modules/audio_processing/level_estimator_impl.h
index 20dc18dc..2490d793 100644
--- a/modules/audio_processing/level_estimator_impl.h
+++ b/modules/audio_processing/level_estimator_impl.h
@@ -15,13 +15,15 @@
#include "webrtc/modules/audio_processing/processing_component.h"
namespace webrtc {
-class AudioProcessingImpl;
+
class AudioBuffer;
+class CriticalSectionWrapper;
class LevelEstimatorImpl : public LevelEstimator,
public ProcessingComponent {
public:
- explicit LevelEstimatorImpl(const AudioProcessingImpl* apm);
+ LevelEstimatorImpl(const AudioProcessing* apm,
+ CriticalSectionWrapper* crit);
virtual ~LevelEstimatorImpl();
int ProcessStream(AudioBuffer* audio);
@@ -42,8 +44,10 @@ class LevelEstimatorImpl : public LevelEstimator,
virtual int num_handles_required() const OVERRIDE;
virtual int GetHandleError(void* handle) const OVERRIDE;
- const AudioProcessingImpl* apm_;
+ const AudioProcessing* apm_;
+ CriticalSectionWrapper* crit_;
};
+
} // namespace webrtc
#endif // WEBRTC_MODULES_AUDIO_PROCESSING_LEVEL_ESTIMATOR_IMPL_H_
diff --git a/modules/audio_processing/noise_suppression_impl.cc b/modules/audio_processing/noise_suppression_impl.cc
index 41c11b1c..9ecbf8d3 100644
--- a/modules/audio_processing/noise_suppression_impl.cc
+++ b/modules/audio_processing/noise_suppression_impl.cc
@@ -12,15 +12,14 @@
#include <assert.h>
-#include "webrtc/system_wrappers/interface/critical_section_wrapper.h"
+#include "webrtc/modules/audio_processing/audio_buffer.h"
#if defined(WEBRTC_NS_FLOAT)
#include "webrtc/modules/audio_processing/ns/include/noise_suppression.h"
#elif defined(WEBRTC_NS_FIXED)
#include "webrtc/modules/audio_processing/ns/include/noise_suppression_x.h"
#endif
+#include "webrtc/system_wrappers/interface/critical_section_wrapper.h"
-#include "webrtc/modules/audio_processing/audio_buffer.h"
-#include "webrtc/modules/audio_processing/audio_processing_impl.h"
namespace webrtc {
@@ -47,9 +46,11 @@ int MapSetting(NoiseSuppression::Level level) {
}
} // namespace
-NoiseSuppressionImpl::NoiseSuppressionImpl(const AudioProcessingImpl* apm)
- : ProcessingComponent(apm),
+NoiseSuppressionImpl::NoiseSuppressionImpl(const AudioProcessing* apm,
+ CriticalSectionWrapper* crit)
+ : ProcessingComponent(),
apm_(apm),
+ crit_(crit),
level_(kModerate) {}
NoiseSuppressionImpl::~NoiseSuppressionImpl() {}
@@ -88,7 +89,7 @@ int NoiseSuppressionImpl::ProcessCaptureAudio(AudioBuffer* audio) {
}
int NoiseSuppressionImpl::Enable(bool enable) {
- CriticalSectionScoped crit_scoped(apm_->crit());
+ CriticalSectionScoped crit_scoped(crit_);
return EnableComponent(enable);
}
@@ -97,7 +98,7 @@ bool NoiseSuppressionImpl::is_enabled() const {
}
int NoiseSuppressionImpl::set_level(Level level) {
- CriticalSectionScoped crit_scoped(apm_->crit());
+ CriticalSectionScoped crit_scoped(crit_);
if (MapSetting(level) == -1) {
return apm_->kBadParameterError;
}
diff --git a/modules/audio_processing/noise_suppression_impl.h b/modules/audio_processing/noise_suppression_impl.h
index f6dd8cbd..46b7be2f 100644
--- a/modules/audio_processing/noise_suppression_impl.h
+++ b/modules/audio_processing/noise_suppression_impl.h
@@ -15,13 +15,15 @@
#include "webrtc/modules/audio_processing/processing_component.h"
namespace webrtc {
-class AudioProcessingImpl;
+
class AudioBuffer;
+class CriticalSectionWrapper;
class NoiseSuppressionImpl : public NoiseSuppression,
public ProcessingComponent {
public:
- explicit NoiseSuppressionImpl(const AudioProcessingImpl* apm);
+ NoiseSuppressionImpl(const AudioProcessing* apm,
+ CriticalSectionWrapper* crit);
virtual ~NoiseSuppressionImpl();
int ProcessCaptureAudio(AudioBuffer* audio);
@@ -44,9 +46,11 @@ class NoiseSuppressionImpl : public NoiseSuppression,
virtual int num_handles_required() const OVERRIDE;
virtual int GetHandleError(void* handle) const OVERRIDE;
- const AudioProcessingImpl* apm_;
+ const AudioProcessing* apm_;
+ CriticalSectionWrapper* crit_;
Level level_;
};
+
} // namespace webrtc
#endif // WEBRTC_MODULES_AUDIO_PROCESSING_NOISE_SUPPRESSION_IMPL_H_
diff --git a/modules/audio_processing/processing_component.cc b/modules/audio_processing/processing_component.cc
index 23bf2257..9e16d7c4 100644
--- a/modules/audio_processing/processing_component.cc
+++ b/modules/audio_processing/processing_component.cc
@@ -12,15 +12,12 @@
#include <assert.h>
-#include "webrtc/modules/audio_processing/audio_processing_impl.h"
+#include "webrtc/modules/audio_processing/include/audio_processing.h"
namespace webrtc {
-ProcessingComponent::ProcessingComponent() {}
-
-ProcessingComponent::ProcessingComponent(const AudioProcessingImpl* apm)
- : apm_(apm),
- initialized_(false),
+ProcessingComponent::ProcessingComponent()
+ : initialized_(false),
enabled_(false),
num_handles_(0) {}
@@ -35,7 +32,7 @@ int ProcessingComponent::Destroy() {
}
initialized_ = false;
- return apm_->kNoError;
+ return AudioProcessing::kNoError;
}
int ProcessingComponent::EnableComponent(bool enable) {
@@ -43,7 +40,7 @@ int ProcessingComponent::EnableComponent(bool enable) {
enabled_ = enable; // Must be set before Initialize() is called.
int err = Initialize();
- if (err != apm_->kNoError) {
+ if (err != AudioProcessing::kNoError) {
enabled_ = false;
return err;
}
@@ -51,7 +48,7 @@ int ProcessingComponent::EnableComponent(bool enable) {
enabled_ = enable;
}
- return apm_->kNoError;
+ return AudioProcessing::kNoError;
}
bool ProcessingComponent::is_component_enabled() const {
@@ -69,7 +66,7 @@ int ProcessingComponent::num_handles() const {
int ProcessingComponent::Initialize() {
if (!enabled_) {
- return apm_->kNoError;
+ return AudioProcessing::kNoError;
}
num_handles_ = num_handles_required();
@@ -82,12 +79,12 @@ int ProcessingComponent::Initialize() {
if (handles_[i] == NULL) {
handles_[i] = CreateHandle();
if (handles_[i] == NULL) {
- return apm_->kCreationFailedError;
+ return AudioProcessing::kCreationFailedError;
}
}
int err = InitializeHandle(handles_[i]);
- if (err != apm_->kNoError) {
+ if (err != AudioProcessing::kNoError) {
return GetHandleError(handles_[i]);
}
}
@@ -98,17 +95,17 @@ int ProcessingComponent::Initialize() {
int ProcessingComponent::Configure() {
if (!initialized_) {
- return apm_->kNoError;
+ return AudioProcessing::kNoError;
}
assert(static_cast<int>(handles_.size()) >= num_handles_);
for (int i = 0; i < num_handles_; i++) {
int err = ConfigureHandle(handles_[i]);
- if (err != apm_->kNoError) {
+ if (err != AudioProcessing::kNoError) {
return GetHandleError(handles_[i]);
}
}
- return apm_->kNoError;
+ return AudioProcessing::kNoError;
}
} // namespace webrtc
diff --git a/modules/audio_processing/processing_component.h b/modules/audio_processing/processing_component.h
index c090d222..27400998 100644
--- a/modules/audio_processing/processing_component.h
+++ b/modules/audio_processing/processing_component.h
@@ -13,16 +13,13 @@
#include <vector>
-#include "webrtc/modules/audio_processing/include/audio_processing.h"
+#include "webrtc/common.h"
namespace webrtc {
-class AudioProcessingImpl;
-
class ProcessingComponent {
public:
ProcessingComponent();
- explicit ProcessingComponent(const AudioProcessingImpl* apm);
virtual ~ProcessingComponent();
virtual int Initialize();
@@ -45,7 +42,6 @@ class ProcessingComponent {
virtual int num_handles_required() const = 0;
virtual int GetHandleError(void* handle) const = 0;
- const AudioProcessingImpl* apm_;
std::vector<void*> handles_;
bool initialized_;
bool enabled_;
diff --git a/modules/audio_processing/test/audio_processing_unittest.cc b/modules/audio_processing/test/audio_processing_unittest.cc
index 8f8dad0c..c66c12ef 100644
--- a/modules/audio_processing/test/audio_processing_unittest.cc
+++ b/modules/audio_processing/test/audio_processing_unittest.cc
@@ -36,8 +36,11 @@
# define WEBRTC_AUDIOPROC_BIT_EXACT
#endif
+#define EXPECT_NOERR(expr) EXPECT_EQ(AudioProcessing::kNoError, expr)
+
namespace webrtc {
namespace {
+
// TODO(bjornv): This is not feasible until the functionality has been
// re-implemented; see comment at the bottom of this file.
// When false, this will compare the output data with the results stored to
@@ -61,6 +64,28 @@ const int kProcessSampleRates[] = {8000, 16000, 32000};
const size_t kProcessSampleRatesSize = sizeof(kProcessSampleRates) /
sizeof(*kProcessSampleRates);
+// Helper to encapsulate a contiguous data buffer with access to a pointer
+// array of the deinterleaved channels.
+template <typename T>
+class ChannelBuffer {
+ public:
+ ChannelBuffer(int samples_per_channel, int num_channels)
+ : data_(new T[samples_per_channel * num_channels]),
+ channels_(new T*[num_channels]) {
+ memset(data_.get(), 0, sizeof(T) * samples_per_channel * num_channels);
+ for (int i = 0; i < num_channels; ++i)
+ channels_[i] = &data_[i * samples_per_channel];
+ }
+ ~ChannelBuffer() {}
+
+ T* data() { return data_.get(); }
+ T** channels() { return channels_.get(); }
+
+ private:
+ scoped_ptr<T[]> data_;
+ scoped_ptr<T*[]> channels_;
+};
+
int TruncateToMultipleOf10(int value) {
return (value / 10) * 10;
}
@@ -104,27 +129,61 @@ void SetFrameTo(AudioFrame* frame, int16_t left, int16_t right) {
void ScaleFrame(AudioFrame* frame, float scale) {
for (int i = 0; i < frame->samples_per_channel_ * frame->num_channels_; ++i) {
- frame->data_[i] = RoundToInt16(ClampInt16(frame->data_[i] * scale));
+ frame->data_[i] = RoundToInt16(frame->data_[i] * scale);
}
}
bool FrameDataAreEqual(const AudioFrame& frame1, const AudioFrame& frame2) {
- if (frame1.samples_per_channel_ !=
- frame2.samples_per_channel_) {
+ if (frame1.samples_per_channel_ != frame2.samples_per_channel_) {
return false;
}
- if (frame1.num_channels_ !=
- frame2.num_channels_) {
+ if (frame1.num_channels_ != frame2.num_channels_) {
return false;
}
if (memcmp(frame1.data_, frame2.data_,
frame1.samples_per_channel_ * frame1.num_channels_ *
- sizeof(int16_t))) {
+ sizeof(int16_t))) {
return false;
}
return true;
}
+AudioProcessing::ChannelLayout LayoutFromChannels(int num_channels) {
+ switch (num_channels) {
+ case 1:
+ return AudioProcessing::kMono;
+ case 2:
+ return AudioProcessing::kStereo;
+ default:
+ assert(false);
+ return AudioProcessing::kMono;
+ }
+}
+
+void EnableAllAPComponents(AudioProcessing* ap) {
+#if defined(WEBRTC_AUDIOPROC_FIXED_PROFILE)
+ EXPECT_NOERR(ap->echo_control_mobile()->Enable(true));
+
+ EXPECT_NOERR(ap->gain_control()->set_mode(GainControl::kAdaptiveDigital));
+ EXPECT_NOERR(ap->gain_control()->Enable(true));
+#elif defined(WEBRTC_AUDIOPROC_FLOAT_PROFILE)
+ EXPECT_NOERR(ap->echo_cancellation()->enable_drift_compensation(true));
+ EXPECT_NOERR(ap->echo_cancellation()->enable_metrics(true));
+ EXPECT_NOERR(ap->echo_cancellation()->enable_delay_logging(true));
+ EXPECT_NOERR(ap->echo_cancellation()->Enable(true));
+
+ EXPECT_NOERR(ap->gain_control()->set_mode(GainControl::kAdaptiveAnalog));
+ EXPECT_NOERR(ap->gain_control()->set_analog_level_limits(0, 255));
+ EXPECT_NOERR(ap->gain_control()->Enable(true));
+#endif
+
+ EXPECT_NOERR(ap->high_pass_filter()->Enable(true));
+ EXPECT_NOERR(ap->level_estimator()->Enable(true));
+ EXPECT_NOERR(ap->noise_suppression()->Enable(true));
+
+ EXPECT_NOERR(ap->voice_detection()->Enable(true));
+}
+
#ifdef WEBRTC_AUDIOPROC_BIT_EXACT
// These functions are only used by the bit-exact test.
template <class T>
@@ -176,6 +235,7 @@ void WriteMessageLiteToFile(const std::string filename,
delete [] array;
fclose(file);
}
+#endif // WEBRTC_AUDIOPROC_BIT_EXACT
void ReadMessageLiteFromFile(const std::string filename,
::google::protobuf::MessageLite* message) {
@@ -195,7 +255,6 @@ void ReadMessageLiteFromFile(const std::string filename,
delete [] array;
fclose(file);
}
-#endif // WEBRTC_AUDIOPROC_BIT_EXACT
class ApmTest : public ::testing::Test {
protected:
@@ -216,6 +275,7 @@ class ApmTest : public ::testing::Test {
void Init(int sample_rate_hz, int num_reverse_channels,
int num_input_channels, int num_output_channels,
bool open_output_file);
+ void Init(AudioProcessing* ap);
std::string ResourceFilePath(std::string name, int sample_rate_hz);
std::string OutputFilePath(std::string name,
int sample_rate_hz,
@@ -224,7 +284,10 @@ class ApmTest : public ::testing::Test {
int num_output_channels);
void EnableAllComponents();
bool ReadFrame(FILE* file, AudioFrame* frame);
+ bool ReadFrame(FILE* file, AudioFrame* frame, ChannelBuffer<float>* cb);
void ReadFrameWithRewind(FILE* file, AudioFrame* frame);
+ void ReadFrameWithRewind(FILE* file, AudioFrame* frame,
+ ChannelBuffer<float>* cb);
void ProcessWithDefaultStreamParameters(AudioFrame* frame);
void ProcessDelayVerificationTest(int delay_ms, int system_delay_ms,
int delay_min, int delay_max);
@@ -232,6 +295,10 @@ class ApmTest : public ::testing::Test {
AudioProcessing::Error expected_return);
void RunQuantizedVolumeDoesNotGetStuckTest(int sample_rate);
void RunManualVolumeChangeIsPossibleTest(int sample_rate);
+ void StreamParametersTest(bool int_format);
+ void SampleRatesTest(bool int_format);
+ int ProcessStreamChooser(bool int_format);
+ int AnalyzeReverseStreamChooser(bool int_format);
const std::string output_path_;
const std::string ref_path_;
@@ -239,6 +306,8 @@ class ApmTest : public ::testing::Test {
scoped_ptr<AudioProcessing> apm_;
AudioFrame* frame_;
AudioFrame* revframe_;
+ scoped_ptr<ChannelBuffer<float> > float_cb_;
+ scoped_ptr<ChannelBuffer<float> > revfloat_cb_;
FILE* far_file_;
FILE* near_file_;
FILE* out_file_;
@@ -330,6 +399,14 @@ std::string ApmTest::OutputFilePath(std::string name,
return output_path_ + ss.str();
}
+void ApmTest::Init(AudioProcessing* ap) {
+ // Make one process call to ensure the audio parameters are set. It might
+ // result in a stream error which we can safely ignore.
+ int err = ap->ProcessStream(frame_);
+ ASSERT_TRUE(err == kNoErr || err == apm_->kStreamParameterNotSetError);
+ ASSERT_EQ(ap->kNoError, ap->Initialize());
+}
+
void ApmTest::Init(int sample_rate_hz, int num_reverse_channels,
int num_input_channels, int num_output_channels,
bool open_output_file) {
@@ -338,15 +415,15 @@ void ApmTest::Init(int sample_rate_hz, int num_reverse_channels,
frame_->samples_per_channel_ = samples_per_channel;
frame_->num_channels_ = num_input_channels;
frame_->sample_rate_hz_ = sample_rate_hz;
+ float_cb_.reset(new ChannelBuffer<float>(samples_per_channel,
+ num_input_channels));
revframe_->samples_per_channel_ = samples_per_channel;
revframe_->num_channels_ = num_reverse_channels;
revframe_->sample_rate_hz_ = sample_rate_hz;
+ revfloat_cb_.reset(new ChannelBuffer<float>(samples_per_channel,
+ num_reverse_channels));
- // Make one process call to ensure the audio parameters are set. It might
- // result in a stream error which we can safely ignore.
- int err = apm_->ProcessStream(frame_);
- ASSERT_TRUE(err == kNoErr || err == apm_->kStreamParameterNotSetError);
- ASSERT_EQ(apm_->kNoError, apm_->Initialize());
+ Init(apm_.get());
if (far_file_) {
ASSERT_EQ(0, fclose(far_file_));
@@ -377,42 +454,11 @@ void ApmTest::Init(int sample_rate_hz, int num_reverse_channels,
}
void ApmTest::EnableAllComponents() {
-#if defined(WEBRTC_AUDIOPROC_FIXED_PROFILE)
- EXPECT_EQ(apm_->kNoError, apm_->echo_control_mobile()->Enable(true));
-
- EXPECT_EQ(apm_->kNoError,
- apm_->gain_control()->set_mode(GainControl::kAdaptiveDigital));
- EXPECT_EQ(apm_->kNoError, apm_->gain_control()->Enable(true));
-#elif defined(WEBRTC_AUDIOPROC_FLOAT_PROFILE)
- EXPECT_EQ(apm_->kNoError,
- apm_->echo_cancellation()->enable_drift_compensation(true));
- EXPECT_EQ(apm_->kNoError,
- apm_->echo_cancellation()->enable_metrics(true));
- EXPECT_EQ(apm_->kNoError,
- apm_->echo_cancellation()->enable_delay_logging(true));
- EXPECT_EQ(apm_->kNoError, apm_->echo_cancellation()->Enable(true));
-
- EXPECT_EQ(apm_->kNoError,
- apm_->gain_control()->set_mode(GainControl::kAdaptiveAnalog));
- EXPECT_EQ(apm_->kNoError,
- apm_->gain_control()->set_analog_level_limits(0, 255));
- EXPECT_EQ(apm_->kNoError, apm_->gain_control()->Enable(true));
-#endif
-
- EXPECT_EQ(apm_->kNoError,
- apm_->high_pass_filter()->Enable(true));
-
- EXPECT_EQ(apm_->kNoError,
- apm_->level_estimator()->Enable(true));
-
- EXPECT_EQ(apm_->kNoError,
- apm_->noise_suppression()->Enable(true));
-
- EXPECT_EQ(apm_->kNoError,
- apm_->voice_detection()->Enable(true));
+ EnableAllAPComponents(apm_.get());
}
-bool ApmTest::ReadFrame(FILE* file, AudioFrame* frame) {
+bool ApmTest::ReadFrame(FILE* file, AudioFrame* frame,
+ ChannelBuffer<float>* cb) {
// The files always contain stereo audio.
size_t frame_size = frame->samples_per_channel_ * 2;
size_t read_count = fread(frame->data_,
@@ -430,18 +476,39 @@ bool ApmTest::ReadFrame(FILE* file, AudioFrame* frame) {
frame->samples_per_channel_);
}
+ // Convert to deinterleaved float.
+ if (cb) {
+ ChannelBuffer<int16_t> cb_int(frame->samples_per_channel_,
+ frame->num_channels_);
+ Deinterleave(frame->data_,
+ frame->samples_per_channel_,
+ frame->num_channels_,
+ cb_int.channels());
+ ScaleToFloat(cb_int.data(),
+ frame->samples_per_channel_ * frame->num_channels_,
+ cb->data());
+ }
return true;
}
+bool ApmTest::ReadFrame(FILE* file, AudioFrame* frame) {
+ return ReadFrame(file, frame, NULL);
+}
+
// If the end of the file has been reached, rewind it and attempt to read the
// frame again.
-void ApmTest::ReadFrameWithRewind(FILE* file, AudioFrame* frame) {
- if (!ReadFrame(near_file_, frame_)) {
+void ApmTest::ReadFrameWithRewind(FILE* file, AudioFrame* frame,
+ ChannelBuffer<float>* cb) {
+ if (!ReadFrame(near_file_, frame_, cb)) {
rewind(near_file_);
- ASSERT_TRUE(ReadFrame(near_file_, frame_));
+ ASSERT_TRUE(ReadFrame(near_file_, frame_, cb));
}
}
+void ApmTest::ReadFrameWithRewind(FILE* file, AudioFrame* frame) {
+ ReadFrameWithRewind(file, frame, NULL);
+}
+
void ApmTest::ProcessWithDefaultStreamParameters(AudioFrame* frame) {
EXPECT_EQ(apm_->kNoError, apm_->set_stream_delay_ms(0));
apm_->echo_cancellation()->set_stream_drift_samples(0);
@@ -450,6 +517,30 @@ void ApmTest::ProcessWithDefaultStreamParameters(AudioFrame* frame) {
EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame));
}
+int ApmTest::ProcessStreamChooser(bool int_format) {
+ if (int_format) {
+ return apm_->ProcessStream(frame_);
+ }
+ // TODO(ajm): Update to match the number of output channels when supported.
+ return apm_->ProcessStream(float_cb_->channels(),
+ frame_->samples_per_channel_,
+ frame_->sample_rate_hz_,
+ LayoutFromChannels(frame_->num_channels_),
+ LayoutFromChannels(frame_->num_channels_));
+}
+
+int ApmTest::AnalyzeReverseStreamChooser(bool int_format) {
+ if (int_format) {
+ return apm_->AnalyzeReverseStream(revframe_);
+ }
+ // TODO(ajm): Update to match the number of output channels when supported.
+ return apm_->AnalyzeReverseStream(
+ revfloat_cb_->channels(),
+ revframe_->samples_per_channel_,
+ revframe_->sample_rate_hz_,
+ LayoutFromChannels(revframe_->num_channels_));
+}
+
void ApmTest::ProcessDelayVerificationTest(int delay_ms, int system_delay_ms,
int delay_min, int delay_max) {
// The |revframe_| and |frame_| should include the proper frame information,
@@ -537,20 +628,21 @@ void ApmTest::ProcessDelayVerificationTest(int delay_ms, int system_delay_ms,
EXPECT_LE(expected_median_low, median);
}
-TEST_F(ApmTest, StreamParameters) {
+void ApmTest::StreamParametersTest(bool int_format) {
// No errors when the components are disabled.
- EXPECT_EQ(apm_->kNoError,
- apm_->ProcessStream(frame_));
+ EXPECT_EQ(apm_->kNoError, ProcessStreamChooser(int_format));
// -- Missing AGC level --
EXPECT_EQ(apm_->kNoError, apm_->gain_control()->Enable(true));
- EXPECT_EQ(apm_->kStreamParameterNotSetError, apm_->ProcessStream(frame_));
+ EXPECT_EQ(apm_->kStreamParameterNotSetError,
+ ProcessStreamChooser(int_format));
// Resets after successful ProcessStream().
EXPECT_EQ(apm_->kNoError,
apm_->gain_control()->set_stream_analog_level(127));
- EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame_));
- EXPECT_EQ(apm_->kStreamParameterNotSetError, apm_->ProcessStream(frame_));
+ EXPECT_EQ(apm_->kNoError, ProcessStreamChooser(frame_));
+ EXPECT_EQ(apm_->kStreamParameterNotSetError,
+ ProcessStreamChooser(int_format));
// Other stream parameters set correctly.
EXPECT_EQ(apm_->kNoError, apm_->echo_cancellation()->Enable(true));
@@ -559,20 +651,22 @@ TEST_F(ApmTest, StreamParameters) {
EXPECT_EQ(apm_->kNoError, apm_->set_stream_delay_ms(100));
apm_->echo_cancellation()->set_stream_drift_samples(0);
EXPECT_EQ(apm_->kStreamParameterNotSetError,
- apm_->ProcessStream(frame_));
+ ProcessStreamChooser(int_format));
EXPECT_EQ(apm_->kNoError, apm_->gain_control()->Enable(false));
EXPECT_EQ(apm_->kNoError,
apm_->echo_cancellation()->enable_drift_compensation(false));
// -- Missing delay --
EXPECT_EQ(apm_->kNoError, apm_->echo_cancellation()->Enable(true));
- EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame_));
- EXPECT_EQ(apm_->kStreamParameterNotSetError, apm_->ProcessStream(frame_));
+ EXPECT_EQ(apm_->kNoError, ProcessStreamChooser(int_format));
+ EXPECT_EQ(apm_->kStreamParameterNotSetError,
+ ProcessStreamChooser(int_format));
// Resets after successful ProcessStream().
EXPECT_EQ(apm_->kNoError, apm_->set_stream_delay_ms(100));
- EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame_));
- EXPECT_EQ(apm_->kStreamParameterNotSetError, apm_->ProcessStream(frame_));
+ EXPECT_EQ(apm_->kNoError, ProcessStreamChooser(int_format));
+ EXPECT_EQ(apm_->kStreamParameterNotSetError,
+ ProcessStreamChooser(int_format));
// Other stream parameters set correctly.
EXPECT_EQ(apm_->kNoError, apm_->gain_control()->Enable(true));
@@ -581,37 +675,49 @@ TEST_F(ApmTest, StreamParameters) {
apm_->echo_cancellation()->set_stream_drift_samples(0);
EXPECT_EQ(apm_->kNoError,
apm_->gain_control()->set_stream_analog_level(127));
- EXPECT_EQ(apm_->kStreamParameterNotSetError, apm_->ProcessStream(frame_));
+ EXPECT_EQ(apm_->kStreamParameterNotSetError,
+ ProcessStreamChooser(int_format));
EXPECT_EQ(apm_->kNoError, apm_->gain_control()->Enable(false));
// -- Missing drift --
- EXPECT_EQ(apm_->kStreamParameterNotSetError, apm_->ProcessStream(frame_));
+ EXPECT_EQ(apm_->kStreamParameterNotSetError,
+ ProcessStreamChooser(int_format));
// Resets after successful ProcessStream().
EXPECT_EQ(apm_->kNoError, apm_->set_stream_delay_ms(100));
apm_->echo_cancellation()->set_stream_drift_samples(0);
- EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame_));
- EXPECT_EQ(apm_->kStreamParameterNotSetError, apm_->ProcessStream(frame_));
+ EXPECT_EQ(apm_->kNoError, ProcessStreamChooser(int_format));
+ EXPECT_EQ(apm_->kStreamParameterNotSetError,
+ ProcessStreamChooser(int_format));
// Other stream parameters set correctly.
EXPECT_EQ(apm_->kNoError, apm_->gain_control()->Enable(true));
EXPECT_EQ(apm_->kNoError, apm_->set_stream_delay_ms(100));
EXPECT_EQ(apm_->kNoError,
apm_->gain_control()->set_stream_analog_level(127));
- EXPECT_EQ(apm_->kStreamParameterNotSetError, apm_->ProcessStream(frame_));
+ EXPECT_EQ(apm_->kStreamParameterNotSetError,
+ ProcessStreamChooser(int_format));
// -- No stream parameters --
EXPECT_EQ(apm_->kNoError,
- apm_->AnalyzeReverseStream(revframe_));
+ AnalyzeReverseStreamChooser(int_format));
EXPECT_EQ(apm_->kStreamParameterNotSetError,
- apm_->ProcessStream(frame_));
+ ProcessStreamChooser(int_format));
// -- All there --
EXPECT_EQ(apm_->kNoError, apm_->set_stream_delay_ms(100));
apm_->echo_cancellation()->set_stream_drift_samples(0);
EXPECT_EQ(apm_->kNoError,
apm_->gain_control()->set_stream_analog_level(127));
- EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame_));
+ EXPECT_EQ(apm_->kNoError, ProcessStreamChooser(int_format));
+}
+
+TEST_F(ApmTest, StreamParametersInt) {
+ StreamParametersTest(true);
+}
+
+TEST_F(ApmTest, StreamParametersFloat) {
+ StreamParametersTest(false);
}
TEST_F(ApmTest, DefaultDelayOffsetIsZero) {
@@ -657,19 +763,27 @@ TEST_F(ApmTest, Channels) {
}
}
-TEST_F(ApmTest, SampleRates) {
+void ApmTest::SampleRatesTest(bool int_format) {
// Testing invalid sample rates
SetFrameSampleRate(frame_, 10000);
- EXPECT_EQ(apm_->kBadSampleRateError, apm_->ProcessStream(frame_));
+ EXPECT_EQ(apm_->kBadSampleRateError, ProcessStreamChooser(int_format));
// Testing valid sample rates
int fs[] = {8000, 16000, 32000};
for (size_t i = 0; i < sizeof(fs) / sizeof(*fs); i++) {
SetFrameSampleRate(frame_, fs[i]);
- EXPECT_EQ(kNoErr, apm_->ProcessStream(frame_));
+ EXPECT_EQ(kNoErr, ProcessStreamChooser(int_format));
EXPECT_EQ(fs[i], apm_->sample_rate_hz());
}
}
+TEST_F(ApmTest, SampleRatesInt) {
+ SampleRatesTest(true);
+}
+
+TEST_F(ApmTest, SampleRatesFloat) {
+ SampleRatesTest(false);
+}
+
TEST_F(ApmTest, EchoCancellation) {
EXPECT_EQ(apm_->kNoError,
apm_->echo_cancellation()->enable_drift_compensation(true));
@@ -1256,13 +1370,11 @@ TEST_F(ApmTest, IdenticalInputChannelsResultInIdenticalOutputChannels) {
int analog_level = 127;
EXPECT_EQ(0, feof(far_file_));
EXPECT_EQ(0, feof(near_file_));
- while (1) {
- if (!ReadFrame(far_file_, revframe_)) break;
+ while (ReadFrame(far_file_, revframe_) && ReadFrame(near_file_, frame_)) {
CopyLeftToRightChannel(revframe_->data_, revframe_->samples_per_channel_);
EXPECT_EQ(apm_->kNoError, apm_->AnalyzeReverseStream(revframe_));
- if (!ReadFrame(near_file_, frame_)) break;
CopyLeftToRightChannel(frame_->data_, frame_->samples_per_channel_);
frame_->vad_activity_ = AudioFrame::kVadUnknown;
@@ -1416,6 +1528,90 @@ TEST_F(ApmTest, DebugDumpFromFileHandle) {
#endif // WEBRTC_AUDIOPROC_DEBUG_DUMP
}
+TEST_F(ApmTest, FloatAndIntInterfacesGiveIdenticalResults) {
+ audioproc::OutputData ref_data;
+ ReadMessageLiteFromFile(ref_filename_, &ref_data);
+
+ Config config;
+ config.Set<ExperimentalAgc>(new ExperimentalAgc(false));
+ scoped_ptr<AudioProcessing> fapm(AudioProcessing::Create(config));
+ EnableAllComponents();
+ EnableAllAPComponents(fapm.get());
+ for (int i = 0; i < ref_data.test_size(); i++) {
+ printf("Running test %d of %d...\n", i + 1, ref_data.test_size());
+
+ audioproc::Test* test = ref_data.mutable_test(i);
+ // TODO(ajm): Restore downmixing test cases.
+ if (test->num_input_channels() != test->num_output_channels())
+ continue;
+
+ const int num_render_channels = test->num_reverse_channels();
+ const int num_input_channels = test->num_input_channels();
+ const int num_output_channels = test->num_output_channels();
+ const int samples_per_channel = test->sample_rate() * kChunkSizeMs / 1000;
+ const int output_length = samples_per_channel * num_output_channels;
+
+ Init(test->sample_rate(), num_render_channels, num_input_channels,
+ num_output_channels, true);
+ Init(fapm.get());
+
+ ChannelBuffer<int16_t> output_cb(samples_per_channel, num_input_channels);
+ scoped_ptr<int16_t[]> output_int16(new int16_t[output_length]);
+
+ int analog_level = 127;
+ while (ReadFrame(far_file_, revframe_, revfloat_cb_.get()) &&
+ ReadFrame(near_file_, frame_, float_cb_.get())) {
+ frame_->vad_activity_ = AudioFrame::kVadUnknown;
+
+ EXPECT_NOERR(apm_->AnalyzeReverseStream(revframe_));
+ EXPECT_NOERR(fapm->AnalyzeReverseStream(
+ revfloat_cb_->channels(),
+ samples_per_channel,
+ test->sample_rate(),
+ LayoutFromChannels(num_render_channels)));
+
+ EXPECT_NOERR(apm_->set_stream_delay_ms(0));
+ EXPECT_NOERR(fapm->set_stream_delay_ms(0));
+ apm_->echo_cancellation()->set_stream_drift_samples(0);
+ fapm->echo_cancellation()->set_stream_drift_samples(0);
+ EXPECT_NOERR(apm_->gain_control()->set_stream_analog_level(analog_level));
+ EXPECT_NOERR(fapm->gain_control()->set_stream_analog_level(analog_level));
+
+ EXPECT_NOERR(apm_->ProcessStream(frame_));
+ EXPECT_NOERR(fapm->ProcessStream(
+ float_cb_->channels(),
+ samples_per_channel,
+ test->sample_rate(),
+ LayoutFromChannels(num_input_channels),
+ LayoutFromChannels(num_output_channels)));
+
+ // Convert to interleaved int16.
+ ScaleAndRoundToInt16(float_cb_->data(), output_length, output_cb.data());
+ Interleave(output_cb.channels(),
+ samples_per_channel,
+ num_output_channels,
+ output_int16.get());
+ // Verify float and int16 paths produce identical output.
+ EXPECT_EQ(0, memcmp(frame_->data_, output_int16.get(), output_length));
+
+ analog_level = fapm->gain_control()->stream_analog_level();
+ EXPECT_EQ(apm_->gain_control()->stream_analog_level(),
+ fapm->gain_control()->stream_analog_level());
+ EXPECT_EQ(apm_->echo_cancellation()->stream_has_echo(),
+ fapm->echo_cancellation()->stream_has_echo());
+ EXPECT_EQ(apm_->voice_detection()->stream_has_voice(),
+ fapm->voice_detection()->stream_has_voice());
+ EXPECT_EQ(apm_->noise_suppression()->speech_probability(),
+ fapm->noise_suppression()->speech_probability());
+
+ // Reset in case of downmixing.
+ frame_->num_channels_ = test->num_input_channels();
+ }
+ rewind(far_file_);
+ rewind(near_file_);
+ }
+}
+
// TODO(andrew): Add a test to process a few frames with different combinations
// of enabled components.
@@ -1466,11 +1662,9 @@ TEST_F(ApmTest, DISABLED_ON_ANDROID(Process)) {
int max_output_average = 0;
float ns_speech_prob_average = 0.0f;
- while (1) {
- if (!ReadFrame(far_file_, revframe_)) break;
+ while (ReadFrame(far_file_, revframe_) && ReadFrame(near_file_, frame_)) {
EXPECT_EQ(apm_->kNoError, apm_->AnalyzeReverseStream(revframe_));
- if (!ReadFrame(near_file_, frame_)) break;
frame_->vad_activity_ = AudioFrame::kVadUnknown;
EXPECT_EQ(apm_->kNoError, apm_->set_stream_delay_ms(0));
@@ -1479,6 +1673,7 @@ TEST_F(ApmTest, DISABLED_ON_ANDROID(Process)) {
apm_->gain_control()->set_stream_analog_level(analog_level));
EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame_));
+
// Ensure the frame was downmixed properly.
EXPECT_EQ(test->num_output_channels(), frame_->num_channels_);
diff --git a/modules/audio_processing/voice_detection_impl.cc b/modules/audio_processing/voice_detection_impl.cc
index d41547c8..1b1dd8b8 100644
--- a/modules/audio_processing/voice_detection_impl.cc
+++ b/modules/audio_processing/voice_detection_impl.cc
@@ -13,10 +13,8 @@
#include <assert.h>
#include "webrtc/common_audio/vad/include/webrtc_vad.h"
-#include "webrtc/system_wrappers/interface/critical_section_wrapper.h"
-
#include "webrtc/modules/audio_processing/audio_buffer.h"
-#include "webrtc/modules/audio_processing/audio_processing_impl.h"
+#include "webrtc/system_wrappers/interface/critical_section_wrapper.h"
namespace webrtc {
@@ -39,9 +37,11 @@ int MapSetting(VoiceDetection::Likelihood likelihood) {
}
} // namespace
-VoiceDetectionImpl::VoiceDetectionImpl(const AudioProcessingImpl* apm)
- : ProcessingComponent(apm),
+VoiceDetectionImpl::VoiceDetectionImpl(const AudioProcessing* apm,
+ CriticalSectionWrapper* crit)
+ : ProcessingComponent(),
apm_(apm),
+ crit_(crit),
stream_has_voice_(false),
using_external_vad_(false),
likelihood_(kLowLikelihood),
@@ -87,7 +87,7 @@ int VoiceDetectionImpl::ProcessCaptureAudio(AudioBuffer* audio) {
}
int VoiceDetectionImpl::Enable(bool enable) {
- CriticalSectionScoped crit_scoped(apm_->crit());
+ CriticalSectionScoped crit_scoped(crit_);
return EnableComponent(enable);
}
@@ -108,7 +108,7 @@ bool VoiceDetectionImpl::stream_has_voice() const {
}
int VoiceDetectionImpl::set_likelihood(VoiceDetection::Likelihood likelihood) {
- CriticalSectionScoped crit_scoped(apm_->crit());
+ CriticalSectionScoped crit_scoped(crit_);
if (MapSetting(likelihood) == -1) {
return apm_->kBadParameterError;
}
@@ -122,7 +122,7 @@ VoiceDetection::Likelihood VoiceDetectionImpl::likelihood() const {
}
int VoiceDetectionImpl::set_frame_size_ms(int size) {
- CriticalSectionScoped crit_scoped(apm_->crit());
+ CriticalSectionScoped crit_scoped(crit_);
assert(size == 10); // TODO(ajm): remove when supported.
if (size != 10 &&
size != 20 &&
diff --git a/modules/audio_processing/voice_detection_impl.h b/modules/audio_processing/voice_detection_impl.h
index f8f50e84..5d06517a 100644
--- a/modules/audio_processing/voice_detection_impl.h
+++ b/modules/audio_processing/voice_detection_impl.h
@@ -15,13 +15,14 @@
#include "webrtc/modules/audio_processing/processing_component.h"
namespace webrtc {
-class AudioProcessingImpl;
+
class AudioBuffer;
+class CriticalSectionWrapper;
class VoiceDetectionImpl : public VoiceDetection,
public ProcessingComponent {
public:
- explicit VoiceDetectionImpl(const AudioProcessingImpl* apm);
+ VoiceDetectionImpl(const AudioProcessing* apm, CriticalSectionWrapper* crit);
virtual ~VoiceDetectionImpl();
int ProcessCaptureAudio(AudioBuffer* audio);
@@ -50,7 +51,8 @@ class VoiceDetectionImpl : public VoiceDetection,
virtual int num_handles_required() const OVERRIDE;
virtual int GetHandleError(void* handle) const OVERRIDE;
- const AudioProcessingImpl* apm_;
+ const AudioProcessing* apm_;
+ CriticalSectionWrapper* crit_;
bool stream_has_voice_;
bool using_external_vad_;
Likelihood likelihood_;
diff --git a/modules/pacing/include/paced_sender.h b/modules/pacing/include/paced_sender.h
index 04546900..45c89b77 100644
--- a/modules/pacing/include/paced_sender.h
+++ b/modules/pacing/include/paced_sender.h
@@ -49,6 +49,7 @@ class PacedSender : public Module {
bool retransmission) = 0;
// Called when it's a good time to send a padding data.
virtual int TimeToSendPadding(int bytes) = 0;
+
protected:
virtual ~Callback() {}
};
diff --git a/modules/pacing/paced_sender_unittest.cc b/modules/pacing/paced_sender_unittest.cc
index f8dcdfc6..a99101db 100644
--- a/modules/pacing/paced_sender_unittest.cc
+++ b/modules/pacing/paced_sender_unittest.cc
@@ -421,7 +421,8 @@ TEST_F(PacedSenderTest, Pause) {
EXPECT_EQ(0, send_bucket_->TimeUntilNextProcess());
EXPECT_EQ(0, send_bucket_->Process());
- EXPECT_CALL(callback_, TimeToSendPacket(_, _, second_capture_time_ms, false))
+ EXPECT_CALL(
+ callback_, TimeToSendPacket(_, _, second_capture_time_ms, false))
.Times(1)
.WillRepeatedly(Return(true));
EXPECT_EQ(5, send_bucket_->TimeUntilNextProcess());
diff --git a/modules/remote_bitrate_estimator/overuse_detector.cc b/modules/remote_bitrate_estimator/overuse_detector.cc
index 86f6cb8e..4ac042f5 100644
--- a/modules/remote_bitrate_estimator/overuse_detector.cc
+++ b/modules/remote_bitrate_estimator/overuse_detector.cc
@@ -249,10 +249,10 @@ void OveruseDetector::UpdateKalman(int64_t t_delta,
const double residual = t_ts_delta - slope_*h[0] - offset_;
const bool stable_state =
- (BWE_MIN(num_of_deltas_, 60) * fabsf(offset_) < threshold_);
+ (BWE_MIN(num_of_deltas_, 60) * fabs(offset_) < threshold_);
// We try to filter out very late frames. For instance periodic key
// frames doesn't fit the Gaussian model well.
- if (fabsf(residual) < 3 * sqrt(var_noise_)) {
+ if (fabs(residual) < 3 * sqrt(var_noise_)) {
UpdateNoiseEstimate(residual, min_frame_period, stable_state);
} else {
UpdateNoiseEstimate(3 * sqrt(var_noise_), min_frame_period, stable_state);
@@ -358,7 +358,7 @@ BandwidthUsage OveruseDetector::Detect(double ts_delta) {
return kBwNormal;
}
const double T = BWE_MIN(num_of_deltas_, 60) * offset_;
- if (fabsf(T) > threshold_) {
+ if (fabs(T) > threshold_) {
if (offset_ > 0) {
if (time_over_using_ == -1) {
// Initialize the timer. Assume that we've been
diff --git a/modules/video_capture/android/java/src/org/webrtc/videoengine/VideoCaptureAndroid.java b/modules/video_capture/android/java/src/org/webrtc/videoengine/VideoCaptureAndroid.java
index 691531f8..dfe63adf 100644
--- a/modules/video_capture/android/java/src/org/webrtc/videoengine/VideoCaptureAndroid.java
+++ b/modules/video_capture/android/java/src/org/webrtc/videoengine/VideoCaptureAndroid.java
@@ -31,7 +31,9 @@ import android.view.SurfaceHolder.Callback;
// the entry points to this class are all synchronized. This shouldn't present
// a performance bottleneck because only onPreviewFrame() is called more than
// once (and is called serially on a single thread), so the lock should be
-// uncontended.
+// uncontended. Note that each of these synchronized methods must check
+// |camera| for null to account for having possibly waited for stopCapture() to
+// complete.
public class VideoCaptureAndroid implements PreviewCallback, Callback {
private final static String TAG = "WEBRTC-JC";
@@ -149,7 +151,13 @@ public class VideoCaptureAndroid implements PreviewCallback, Callback {
private native void ProvideCameraFrame(
byte[] data, int length, long captureObject);
- public synchronized void onPreviewFrame(byte[] data, Camera camera) {
+ public synchronized void onPreviewFrame(byte[] data, Camera callbackCamera) {
+ if (camera == null) {
+ return;
+ }
+ if (camera != callbackCamera) {
+ throw new RuntimeException("Unexpected camera in callback!");
+ }
ProvideCameraFrame(data, data.length, native_capturer);
camera.addCallbackBuffer(data);
}
@@ -184,10 +192,11 @@ public class VideoCaptureAndroid implements PreviewCallback, Callback {
public synchronized void surfaceCreated(SurfaceHolder holder) {
Log.d(TAG, "VideoCaptureAndroid::surfaceCreated");
+ if (camera == null) {
+ return;
+ }
try {
- if (camera != null) {
- camera.setPreviewDisplay(holder);
- }
+ camera.setPreviewDisplay(holder);
} catch (IOException e) {
throw new RuntimeException(e);
}
@@ -195,10 +204,11 @@ public class VideoCaptureAndroid implements PreviewCallback, Callback {
public synchronized void surfaceDestroyed(SurfaceHolder holder) {
Log.d(TAG, "VideoCaptureAndroid::surfaceDestroyed");
+ if (camera == null) {
+ return;
+ }
try {
- if (camera != null) {
- camera.setPreviewDisplay(null);
- }
+ camera.setPreviewDisplay(null);
} catch (IOException e) {
throw new RuntimeException(e);
}
diff --git a/modules/video_coding/main/source/codec_database.cc b/modules/video_coding/main/source/codec_database.cc
index 60794f27..3ff8c762 100644
--- a/modules/video_coding/main/source/codec_database.cc
+++ b/modules/video_coding/main/source/codec_database.cc
@@ -160,7 +160,7 @@ bool VCMCodecDataBase::SetSendCodec(
if (max_payload_size <= 0) {
max_payload_size = kDefaultPayloadSize;
}
- if (number_of_cores <= 0 || number_of_cores > 32) {
+ if (number_of_cores <= 0) {
return false;
}
if (send_codec->plType <= 0) {
diff --git a/modules/video_coding/main/source/jitter_estimator.cc b/modules/video_coding/main/source/jitter_estimator.cc
index deb03630..083a9e69 100644
--- a/modules/video_coding/main/source/jitter_estimator.cc
+++ b/modules/video_coding/main/source/jitter_estimator.cc
@@ -162,7 +162,7 @@ VCMJitterEstimator::UpdateEstimate(int64_t frameDelayMS, uint32_t frameSizeBytes
// deviation is probably due to an incorrect line slope.
double deviation = DeviationFromExpectedDelay(frameDelayMS, deltaFS);
- if (abs(deviation) < _numStdDevDelayOutlier * sqrt(_varNoise) ||
+ if (fabs(deviation) < _numStdDevDelayOutlier * sqrt(_varNoise) ||
frameSizeBytes > _avgFrameSize + _numStdDevFrameSizeOutlier * sqrt(_varFrameSize))
{
// Update the variance of the deviation from the
@@ -257,7 +257,7 @@ VCMJitterEstimator::KalmanEstimateChannel(int64_t frameDelayMS,
{
return;
}
- double sigma = (300.0 * exp(-abs(static_cast<double>(deltaFSBytes)) /
+ double sigma = (300.0 * exp(-fabs(static_cast<double>(deltaFSBytes)) /
(1e0 * _maxFrameSize)) + 1) * sqrt(_varNoise);
if (sigma < 1.0)
{
diff --git a/modules/video_coding/main/source/receiver.cc b/modules/video_coding/main/source/receiver.cc
index ae13ddd4..e3fc0cea 100644
--- a/modules/video_coding/main/source/receiver.cc
+++ b/modules/video_coding/main/source/receiver.cc
@@ -156,12 +156,12 @@ VCMEncodedFrame* VCMReceiver::FrameForDecoding(
// Assume that render timing errors are due to changes in the video stream.
if (next_render_time_ms < 0) {
timing_error = true;
- } else if (abs(next_render_time_ms - now_ms) > max_video_delay_ms_) {
+ } else if (labs(next_render_time_ms - now_ms) > max_video_delay_ms_) {
WEBRTC_TRACE(webrtc::kTraceWarning, webrtc::kTraceVideoCoding,
VCMId(vcm_id_, receiver_id_),
"This frame is out of our delay bounds, resetting jitter "
"buffer: %d > %d",
- static_cast<int>(abs(next_render_time_ms - now_ms)),
+ static_cast<int>(labs(next_render_time_ms - now_ms)),
max_video_delay_ms_);
timing_error = true;
} else if (static_cast<int>(timing_->TargetVideoDelay()) >
diff --git a/modules/video_coding/main/source/rtt_filter.cc b/modules/video_coding/main/source/rtt_filter.cc
index 25d89e54..473c8869 100644
--- a/modules/video_coding/main/source/rtt_filter.cc
+++ b/modules/video_coding/main/source/rtt_filter.cc
@@ -114,7 +114,7 @@ bool
VCMRttFilter::JumpDetection(uint32_t rttMs)
{
double diffFromAvg = _avgRtt - rttMs;
- if (abs(diffFromAvg) > _jumpStdDevs * sqrt(_varRtt))
+ if (fabs(diffFromAvg) > _jumpStdDevs * sqrt(_varRtt))
{
int diffSign = (diffFromAvg >= 0) ? 1 : -1;
int jumpCountSign = (_jumpCount >= 0) ? 1 : -1;
diff --git a/modules/video_coding/main/source/video_sender_unittest.cc b/modules/video_coding/main/source/video_sender_unittest.cc
index 196eff31..ca11ec47 100644
--- a/modules/video_coding/main/source/video_sender_unittest.cc
+++ b/modules/video_coding/main/source/video_sender_unittest.cc
@@ -52,7 +52,7 @@ struct Vp8StreamInfo {
MATCHER_P(MatchesVp8StreamInfo, expected, "") {
bool res = true;
for (int tl = 0; tl < kMaxNumberOfTemporalLayers; ++tl) {
- if (abs(expected.framerate_fps[tl] - arg.framerate_fps[tl]) > 0.5) {
+ if (fabs(expected.framerate_fps[tl] - arg.framerate_fps[tl]) > 0.5) {
*result_listener << " framerate_fps[" << tl
<< "] = " << arg.framerate_fps[tl] << " (expected "
<< expected.framerate_fps[tl] << ") ";
diff --git a/test/buildbot_tests.py b/test/buildbot_tests.py
index 1c267965..da038d7f 100755
--- a/test/buildbot_tests.py
+++ b/test/buildbot_tests.py
@@ -114,9 +114,6 @@ def main():
print 'Running WebRTC Buildbot tests: %s' % options.test
for test in options.test:
- if test == 'libjingle_peerconnection_java_unittest':
- print 'Skipping disabled test: %s, see webrtc:2960' % test
- continue
cmd_line = test_dict[test]
env = os.environ.copy()
if test in _CUSTOM_ENV:
diff --git a/typedefs.h b/typedefs.h
index fc43c9af..16e2a9fb 100644
--- a/typedefs.h
+++ b/typedefs.h
@@ -68,7 +68,7 @@
#if !defined(_MSC_VER)
#include <stdint.h>
#else
-// Define C99 equivalent types, since MSVC doesn't provide stdint.h.
+// Define C99 equivalent types, since pre-2010 MSVC doesn't provide stdint.h.
typedef signed char int8_t;
typedef signed short int16_t;
typedef signed int int32_t;
diff --git a/video/call_perf_tests.cc b/video/call_perf_tests.cc
index 4766ff81..e4de8777 100644
--- a/video/call_perf_tests.cc
+++ b/video/call_perf_tests.cc
@@ -184,7 +184,7 @@ class VideoRtcpAndSyncObserver : public SyncRtcpObserver, public VideoRenderer {
// estimated as being synchronized. We don't want to trigger on those.
if (time_since_creation < kStartupTimeMs)
return;
- if (abs(latest_audio_ntp - latest_video_ntp) < kInSyncThresholdMs) {
+ if (labs(latest_audio_ntp - latest_video_ntp) < kInSyncThresholdMs) {
if (first_time_in_sync_ == -1) {
first_time_in_sync_ = now_ms;
webrtc::test::PrintResult("sync_convergence_time",
diff --git a/video_engine/include/vie_rtp_rtcp.h b/video_engine/include/vie_rtp_rtcp.h
index c76d2bb2..a358e480 100644
--- a/video_engine/include/vie_rtp_rtcp.h
+++ b/video_engine/include/vie_rtp_rtcp.h
@@ -405,6 +405,13 @@ class WEBRTC_DLLEXPORT ViERTP_RTCP {
const int video_channel,
ReceiveBandwidthEstimatorStats* output) const { return -1; }
+ // This function gets the PacedSender queuing delay for the last sent frame.
+ // TODO(jiayl): remove the default impl when libjingle is updated.
+ virtual int GetPacerQueuingDelayMs(
+ const int video_channel, int* delay_ms) const {
+ return -1;
+ }
+
// This function enables capturing of RTP packets to a binary file on a
// specific channel and for a given direction. The file can later be
// replayed using e.g. RTP Tools rtpplay since the binary file format is
diff --git a/video_engine/vie_encoder.cc b/video_engine/vie_encoder.cc
index 6be93506..dae1c1a5 100644
--- a/video_engine/vie_encoder.cc
+++ b/video_engine/vie_encoder.cc
@@ -734,6 +734,10 @@ int32_t ViEEncoder::SendCodecStatistics(
return 0;
}
+int32_t ViEEncoder::PacerQueuingDelayMs() const {
+ return paced_sender_->QueueInMs();
+}
+
int32_t ViEEncoder::EstimatedSendBandwidth(
uint32_t* available_bandwidth) const {
WEBRTC_TRACE(kTraceInfo, kTraceVideo, ViEId(engine_id_, channel_id_), "%s",
diff --git a/video_engine/vie_encoder.h b/video_engine/vie_encoder.h
index 24bd7202..d3271c31 100644
--- a/video_engine/vie_encoder.h
+++ b/video_engine/vie_encoder.h
@@ -109,6 +109,7 @@ class ViEEncoder
int32_t SendCodecStatistics(uint32_t* num_key_frames,
uint32_t* num_delta_frames);
+ int PacerQueuingDelayMs() const;
int32_t EstimatedSendBandwidth(
uint32_t* available_bandwidth) const;
diff --git a/video_engine/vie_rtp_rtcp_impl.cc b/video_engine/vie_rtp_rtcp_impl.cc
index 627c5308..54afa93c 100644
--- a/video_engine/vie_rtp_rtcp_impl.cc
+++ b/video_engine/vie_rtp_rtcp_impl.cc
@@ -1040,6 +1040,25 @@ int ViERTP_RTCPImpl::GetReceiveBandwidthEstimatorStats(
return 0;
}
+int ViERTP_RTCPImpl::GetPacerQueuingDelayMs(
+ const int video_channel, int* delay_ms) const {
+ WEBRTC_TRACE(kTraceApiCall, kTraceVideo,
+ ViEId(shared_data_->instance_id(), video_channel),
+ "%s(channel: %d)", __FUNCTION__, video_channel);
+ ViEChannelManagerScoped cs(*(shared_data_->channel_manager()));
+ ViEEncoder* vie_encoder = cs.Encoder(video_channel);
+ if (!vie_encoder) {
+ WEBRTC_TRACE(kTraceError, kTraceVideo,
+ ViEId(shared_data_->instance_id(), video_channel),
+ "%s: Could not get encoder for channel %d", __FUNCTION__,
+ video_channel);
+ shared_data_->SetLastError(kViERtpRtcpInvalidChannelId);
+ return -1;
+ }
+ *delay_ms = vie_encoder->PacerQueuingDelayMs();
+ return 0;
+}
+
int ViERTP_RTCPImpl::StartRTPDump(const int video_channel,
const char file_nameUTF8[1024],
RTPDirections direction) {
diff --git a/video_engine/vie_rtp_rtcp_impl.h b/video_engine/vie_rtp_rtcp_impl.h
index 0341d74c..227fa5e4 100644
--- a/video_engine/vie_rtp_rtcp_impl.h
+++ b/video_engine/vie_rtp_rtcp_impl.h
@@ -116,6 +116,8 @@ class ViERTP_RTCPImpl
unsigned int* estimated_bandwidth) const;
virtual int GetReceiveBandwidthEstimatorStats(
const int video_channel, ReceiveBandwidthEstimatorStats* output) const;
+ virtual int GetPacerQueuingDelayMs(const int video_channel,
+ int* delay_ms) const;
virtual int StartRTPDump(const int video_channel,
const char file_nameUTF8[1024],
RTPDirections direction);