summaryrefslogtreecommitdiff
path: root/voice_engine/channel.cc
diff options
context:
space:
mode:
authorandrew@webrtc.org <andrew@webrtc.org@4adac7df-926f-26a2-2b94-8c16560cd09d>2014-04-03 21:56:01 +0000
committerandrew@webrtc.org <andrew@webrtc.org@4adac7df-926f-26a2-2b94-8c16560cd09d>2014-04-03 21:56:01 +0000
commitf7c73b531c9f2aca2adb87044613a7b7fa94de84 (patch)
tree4e437ae32b38d2a07a05052b5d94fc6eb36710ac /voice_engine/channel.cc
parentb5a182a9321ff847a24620c5e991e1749362549a (diff)
downloadwebrtc-f7c73b531c9f2aca2adb87044613a7b7fa94de84.tar.gz
Consolidate audio conversion from Channel and TransmitMixer.
Replace the two versions with a single DownConvertToCodecFormat. As mentioned in comments, this could be further consolidated with RemixAndResample but we should write a full audio converter class in that case. Along the way: - Fix the bug present in Channel::Demultiplex with mono input and a stereo codec. - Remove the 32 kHz max from the OnDataAvailable path. This avoids a 48 -> 32 -> 48 conversion when VoE is passed 48 kHz audio; instead we get a straight pass-through to ACM. The 32 kHz conversion is still needed in the RecordedDataIsAvailable path until APM natively supports 48 kHz. - Merge resampler improvements from ACM1 to ACM2. This allows ACM to handle 44.1 kHz audio passed to VoE and was originally done here: https://webrtc-codereview.appspot.com/1590004 - Reuse the RemixAndResample unit tests for DownConvertToCodecFormat. - Remove unused functions from utility.cc. BUG=3155,3000,b/12867572 TESTED=voe_cmd_test using both the OnDataAvailable and RecordedDataIsAvailable paths, with a captured audio format of all combinations of {44.1,48} kHz and {1,2} channels, running through all codecs, and finally using both ACM1 and ACM2. R=henrika@webrtc.org, turaj@webrtc.org, xians@webrtc.org Review URL: https://webrtc-codereview.appspot.com/11019005 git-svn-id: http://webrtc.googlecode.com/svn/trunk/webrtc@5843 4adac7df-926f-26a2-2b94-8c16560cd09d
Diffstat (limited to 'voice_engine/channel.cc')
-rw-r--r--voice_engine/channel.cc79
1 files changed, 22 insertions, 57 deletions
diff --git a/voice_engine/channel.cc b/voice_engine/channel.cc
index bbea25e4..026b6b3c 100644
--- a/voice_engine/channel.cc
+++ b/voice_engine/channel.cc
@@ -4150,61 +4150,26 @@ Channel::Demultiplex(const AudioFrame& audioFrame)
return 0;
}
-// TODO(xians): This method borrows quite some code from
-// TransmitMixer::GenerateAudioFrame(), refactor these two methods and reduce
-// code duplication.
void Channel::Demultiplex(const int16_t* audio_data,
int sample_rate,
int number_of_frames,
int number_of_channels) {
- // The highest sample rate that WebRTC supports for mono audio is 96kHz.
- static const int kMaxNumberOfFrames = 960;
- assert(number_of_frames <= kMaxNumberOfFrames);
-
- // Get the send codec information for doing resampling or downmixing later on.
CodecInst codec;
GetSendCodec(codec);
- assert(codec.channels == 1 || codec.channels == 2);
- int support_sample_rate = std::min(32000,
- std::min(sample_rate, codec.plfreq));
-
- // Downmix the data to mono if needed.
- const int16_t* audio_ptr = audio_data;
- if (number_of_channels == 2 && codec.channels == 1) {
- if (!mono_recording_audio_.get())
- mono_recording_audio_.reset(new int16_t[kMaxNumberOfFrames]);
-
- AudioFrameOperations::StereoToMono(audio_data, number_of_frames,
- mono_recording_audio_.get());
- audio_ptr = mono_recording_audio_.get();
- }
- // Resample the data to the sample rate that the codec is using.
- if (input_resampler_.InitializeIfNeeded(sample_rate,
- support_sample_rate,
- codec.channels)) {
- WEBRTC_TRACE(kTraceError, kTraceVoice, VoEId(_instanceId, -1),
- "Channel::Demultiplex() unable to resample");
- return;
+ if (!mono_recording_audio_.get()) {
+ // Temporary space for DownConvertToCodecFormat.
+ mono_recording_audio_.reset(new int16_t[kMaxMonoDataSizeSamples]);
}
-
- int out_length = input_resampler_.Resample(audio_ptr,
- number_of_frames * codec.channels,
- _audioFrame.data_,
- AudioFrame::kMaxDataSizeSamples);
- if (out_length == -1) {
- WEBRTC_TRACE(kTraceError, kTraceVoice, VoEId(_instanceId, -1),
- "Channel::Demultiplex() resampling failed");
- return;
- }
-
- _audioFrame.samples_per_channel_ = out_length / codec.channels;
- _audioFrame.timestamp_ = -1;
- _audioFrame.sample_rate_hz_ = support_sample_rate;
- _audioFrame.speech_type_ = AudioFrame::kNormalSpeech;
- _audioFrame.vad_activity_ = AudioFrame::kVadUnknown;
- _audioFrame.num_channels_ = codec.channels;
- _audioFrame.id_ = _channelId;
+ DownConvertToCodecFormat(audio_data,
+ number_of_frames,
+ number_of_channels,
+ sample_rate,
+ codec.channels,
+ codec.plfreq,
+ mono_recording_audio_.get(),
+ &input_resampler_,
+ &_audioFrame);
}
uint32_t
@@ -4694,11 +4659,11 @@ Channel::MixOrReplaceAudioWithFile(int mixingFrequency)
{
// Currently file stream is always mono.
// TODO(xians): Change the code when FilePlayer supports real stereo.
- Utility::MixWithSat(_audioFrame.data_,
- _audioFrame.num_channels_,
- fileBuffer.get(),
- 1,
- fileSamples);
+ MixWithSat(_audioFrame.data_,
+ _audioFrame.num_channels_,
+ fileBuffer.get(),
+ 1,
+ fileSamples);
}
else
{
@@ -4754,11 +4719,11 @@ Channel::MixAudioWithFile(AudioFrame& audioFrame,
{
// Currently file stream is always mono.
// TODO(xians): Change the code when FilePlayer supports real stereo.
- Utility::MixWithSat(audioFrame.data_,
- audioFrame.num_channels_,
- fileBuffer.get(),
- 1,
- fileSamples);
+ MixWithSat(audioFrame.data_,
+ audioFrame.num_channels_,
+ fileBuffer.get(),
+ 1,
+ fileSamples);
}
else
{