summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authoraluebs@webrtc.org <aluebs@webrtc.org@4adac7df-926f-26a2-2b94-8c16560cd09d>2014-07-17 08:27:39 +0000
committeraluebs@webrtc.org <aluebs@webrtc.org@4adac7df-926f-26a2-2b94-8c16560cd09d>2014-07-17 08:27:39 +0000
commiteb15100c9bdb4c97ffda2c05a934aab270795c27 (patch)
tree995f40b8d748eeac9ff5c766e21c0acd2934e067
parent7036325d02854edf67e02610a94ad60801322566 (diff)
downloadwebrtc-eb15100c9bdb4c97ffda2c05a934aab270795c27.tar.gz
Simplify AudioBuffer::mixed_low_pass_data API
R=andrew@webrtc.org, kwiberg@webrtc.org Review URL: https://webrtc-codereview.appspot.com/21869004 git-svn-id: http://webrtc.googlecode.com/svn/trunk/webrtc@6715 4adac7df-926f-26a2-2b94-8c16560cd09d
-rw-r--r--modules/audio_processing/audio_buffer.cc71
-rw-r--r--modules/audio_processing/audio_buffer.h13
-rw-r--r--modules/audio_processing/gain_control_impl.cc8
-rw-r--r--modules/audio_processing/voice_detection_impl.cc8
4 files changed, 31 insertions, 69 deletions
diff --git a/modules/audio_processing/audio_buffer.cc b/modules/audio_processing/audio_buffer.cc
index 7f579b0d..7eac7ecf 100644
--- a/modules/audio_processing/audio_buffer.cc
+++ b/modules/audio_processing/audio_buffer.cc
@@ -51,7 +51,6 @@ int KeyboardChannelIndex(AudioProcessing::ChannelLayout layout) {
return -1;
}
-
void StereoToMono(const float* left, const float* right, float* out,
int samples_per_channel) {
for (int i = 0; i < samples_per_channel; ++i) {
@@ -155,8 +154,7 @@ AudioBuffer::AudioBuffer(int input_samples_per_channel,
num_proc_channels_(num_process_channels),
output_samples_per_channel_(output_samples_per_channel),
samples_per_split_channel_(proc_samples_per_channel_),
- num_mixed_channels_(0),
- num_mixed_low_pass_channels_(0),
+ mixed_low_pass_valid_(false),
reference_copied_(false),
activity_(AudioFrame::kVadUnknown),
keyboard_data_(NULL),
@@ -278,8 +276,7 @@ void AudioBuffer::CopyTo(int samples_per_channel,
void AudioBuffer::InitForNewData() {
keyboard_data_ = NULL;
- num_mixed_channels_ = 0;
- num_mixed_low_pass_channels_ = 0;
+ mixed_low_pass_valid_ = false;
reference_copied_ = false;
activity_ = AudioFrame::kVadUnknown;
}
@@ -289,6 +286,7 @@ const int16_t* AudioBuffer::data(int channel) const {
}
int16_t* AudioBuffer::data(int channel) {
+ mixed_low_pass_valid_ = false;
const AudioBuffer* t = this;
return const_cast<int16_t*>(t->data(channel));
}
@@ -298,6 +296,7 @@ const float* AudioBuffer::data_f(int channel) const {
}
float* AudioBuffer::data_f(int channel) {
+ mixed_low_pass_valid_ = false;
const AudioBuffer* t = this;
return const_cast<float*>(t->data_f(channel));
}
@@ -308,6 +307,7 @@ const int16_t* AudioBuffer::low_pass_split_data(int channel) const {
}
int16_t* AudioBuffer::low_pass_split_data(int channel) {
+ mixed_low_pass_valid_ = false;
const AudioBuffer* t = this;
return const_cast<int16_t*>(t->low_pass_split_data(channel));
}
@@ -318,6 +318,7 @@ const float* AudioBuffer::low_pass_split_data_f(int channel) const {
}
float* AudioBuffer::low_pass_split_data_f(int channel) {
+ mixed_low_pass_valid_ = false;
const AudioBuffer* t = this;
return const_cast<float*>(t->low_pass_split_data_f(channel));
}
@@ -341,12 +342,26 @@ float* AudioBuffer::high_pass_split_data_f(int channel) {
return const_cast<float*>(t->high_pass_split_data_f(channel));
}
-const int16_t* AudioBuffer::mixed_data(int channel) const {
- return mixed_channels_->channel(channel);
-}
+const int16_t* AudioBuffer::mixed_low_pass_data() {
+ // Currently only mixing stereo to mono is supported.
+ assert(num_proc_channels_ == 1 || num_proc_channels_ == 2);
-const int16_t* AudioBuffer::mixed_low_pass_data(int channel) const {
- return mixed_low_pass_channels_->channel(channel);
+ if (num_proc_channels_ == 1) {
+ return low_pass_split_data(0);
+ }
+
+ if (!mixed_low_pass_valid_) {
+ if (!mixed_low_pass_channels_.get()) {
+ mixed_low_pass_channels_.reset(
+ new ChannelBuffer<int16_t>(samples_per_split_channel_, 1));
+ }
+ StereoToMono(low_pass_split_data(0),
+ low_pass_split_data(1),
+ mixed_low_pass_channels_->data(),
+ samples_per_split_channel_);
+ mixed_low_pass_valid_ = true;
+ }
+ return mixed_low_pass_channels_->data();
}
const int16_t* AudioBuffer::low_pass_reference(int channel) const {
@@ -433,42 +448,6 @@ void AudioBuffer::InterleaveTo(AudioFrame* frame, bool data_changed) const {
}
}
-void AudioBuffer::CopyAndMix(int num_mixed_channels) {
- // We currently only support the stereo to mono case.
- assert(num_proc_channels_ == 2);
- assert(num_mixed_channels == 1);
- if (!mixed_channels_.get()) {
- mixed_channels_.reset(
- new ChannelBuffer<int16_t>(proc_samples_per_channel_,
- num_mixed_channels));
- }
-
- StereoToMono(channels_->ibuf()->channel(0),
- channels_->ibuf()->channel(1),
- mixed_channels_->channel(0),
- proc_samples_per_channel_);
-
- num_mixed_channels_ = num_mixed_channels;
-}
-
-void AudioBuffer::CopyAndMixLowPass(int num_mixed_channels) {
- // We currently only support the stereo to mono case.
- assert(num_proc_channels_ == 2);
- assert(num_mixed_channels == 1);
- if (!mixed_low_pass_channels_.get()) {
- mixed_low_pass_channels_.reset(
- new ChannelBuffer<int16_t>(samples_per_split_channel_,
- num_mixed_channels));
- }
-
- StereoToMono(low_pass_split_data(0),
- low_pass_split_data(1),
- mixed_low_pass_channels_->channel(0),
- samples_per_split_channel_);
-
- num_mixed_low_pass_channels_ = num_mixed_channels;
-}
-
void AudioBuffer::CopyLowPassToReference() {
reference_copied_ = true;
if (!low_pass_reference_channels_.get()) {
diff --git a/modules/audio_processing/audio_buffer.h b/modules/audio_processing/audio_buffer.h
index db24e959..6b1a46f9 100644
--- a/modules/audio_processing/audio_buffer.h
+++ b/modules/audio_processing/audio_buffer.h
@@ -63,8 +63,9 @@ class AudioBuffer {
const int16_t* low_pass_split_data(int channel) const;
int16_t* high_pass_split_data(int channel);
const int16_t* high_pass_split_data(int channel) const;
- const int16_t* mixed_data(int channel) const;
- const int16_t* mixed_low_pass_data(int channel) const;
+ // Returns a pointer to the low-pass data downmixed to mono. If this data
+ // isn't already available it re-calculates it.
+ const int16_t* mixed_low_pass_data();
const int16_t* low_pass_reference(int channel) const;
// Float versions of the accessors, with automatic conversion back and forth
@@ -85,7 +86,6 @@ class AudioBuffer {
// Use for int16 interleaved data.
void DeinterleaveFrom(AudioFrame* audioFrame);
- void InterleaveTo(AudioFrame* audioFrame) const;
// If |data_changed| is false, only the non-audio data members will be copied
// to |frame|.
void InterleaveTo(AudioFrame* frame, bool data_changed) const;
@@ -97,9 +97,6 @@ class AudioBuffer {
void CopyTo(int samples_per_channel,
AudioProcessing::ChannelLayout layout,
float* const* data);
-
- void CopyAndMix(int num_mixed_channels);
- void CopyAndMixLowPass(int num_mixed_channels);
void CopyLowPassToReference();
private:
@@ -112,8 +109,7 @@ class AudioBuffer {
const int num_proc_channels_;
const int output_samples_per_channel_;
int samples_per_split_channel_;
- int num_mixed_channels_;
- int num_mixed_low_pass_channels_;
+ bool mixed_low_pass_valid_;
bool reference_copied_;
AudioFrame::VADActivity activity_;
@@ -121,7 +117,6 @@ class AudioBuffer {
scoped_ptr<IFChannelBuffer> channels_;
scoped_ptr<SplitChannelBuffer> split_channels_;
scoped_ptr<SplitFilterStates[]> filter_states_;
- scoped_ptr<ChannelBuffer<int16_t> > mixed_channels_;
scoped_ptr<ChannelBuffer<int16_t> > mixed_low_pass_channels_;
scoped_ptr<ChannelBuffer<int16_t> > low_pass_reference_channels_;
scoped_ptr<ChannelBuffer<float> > input_buffer_;
diff --git a/modules/audio_processing/gain_control_impl.cc b/modules/audio_processing/gain_control_impl.cc
index a67b67ec..cf7df169 100644
--- a/modules/audio_processing/gain_control_impl.cc
+++ b/modules/audio_processing/gain_control_impl.cc
@@ -59,17 +59,11 @@ int GainControlImpl::ProcessRenderAudio(AudioBuffer* audio) {
assert(audio->samples_per_split_channel() <= 160);
- const int16_t* mixed_data = audio->low_pass_split_data(0);
- if (audio->num_channels() > 1) {
- audio->CopyAndMixLowPass(1);
- mixed_data = audio->mixed_low_pass_data(0);
- }
-
for (int i = 0; i < num_handles(); i++) {
Handle* my_handle = static_cast<Handle*>(handle(i));
int err = WebRtcAgc_AddFarend(
my_handle,
- mixed_data,
+ audio->mixed_low_pass_data(),
static_cast<int16_t>(audio->samples_per_split_channel()));
if (err != apm_->kNoError) {
diff --git a/modules/audio_processing/voice_detection_impl.cc b/modules/audio_processing/voice_detection_impl.cc
index c6e497ff..31336b41 100644
--- a/modules/audio_processing/voice_detection_impl.cc
+++ b/modules/audio_processing/voice_detection_impl.cc
@@ -61,17 +61,11 @@ int VoiceDetectionImpl::ProcessCaptureAudio(AudioBuffer* audio) {
}
assert(audio->samples_per_split_channel() <= 160);
- const int16_t* mixed_data = audio->low_pass_split_data(0);
- if (audio->num_channels() > 1) {
- audio->CopyAndMixLowPass(1);
- mixed_data = audio->mixed_low_pass_data(0);
- }
-
// TODO(ajm): concatenate data in frame buffer here.
int vad_ret = WebRtcVad_Process(static_cast<Handle*>(handle(0)),
apm_->proc_split_sample_rate_hz(),
- mixed_data,
+ audio->mixed_low_pass_data(),
frame_size_samples_);
if (vad_ret == 0) {
stream_has_voice_ = false;