aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authoraluebs <aluebs@webrtc.org>2016-01-11 18:04:30 -0800
committerCommit bot <commit-bot@chromium.org>2016-01-12 02:04:33 +0000
commit2a34688f86517bfd3745e131e4e5d2b7a924f46a (patch)
tree070aba1deabea47757a10bbedabfef1b5cc633e8
parent2bc63a1dd3188402f4da1cdb6ed24fc058819304 (diff)
downloadwebrtc-2a34688f86517bfd3745e131e4e5d2b7a924f46a.tar.gz
Make Beamforming dynamically settable for Android platform builds
Review URL: https://codereview.webrtc.org/1563493005 Cr-Commit-Position: refs/heads/master@{#11213}
-rw-r--r--webrtc/modules/audio_processing/audio_processing_impl.cc41
-rw-r--r--webrtc/modules/audio_processing/audio_processing_impl.h24
2 files changed, 38 insertions, 27 deletions
diff --git a/webrtc/modules/audio_processing/audio_processing_impl.cc b/webrtc/modules/audio_processing/audio_processing_impl.cc
index c0c5e8a465..67709b215f 100644
--- a/webrtc/modules/audio_processing/audio_processing_impl.cc
+++ b/webrtc/modules/audio_processing/audio_processing_impl.cc
@@ -214,21 +214,21 @@ AudioProcessingImpl::AudioProcessingImpl(const Config& config,
: public_submodules_(new ApmPublicSubmodules()),
private_submodules_(new ApmPrivateSubmodules(beamformer)),
constants_(config.Get<ExperimentalAgc>().startup_min_volume,
- config.Get<Beamforming>().array_geometry,
- config.Get<Beamforming>().target_direction,
#if defined(WEBRTC_ANDROID) || defined(WEBRTC_IOS)
false,
#else
config.Get<ExperimentalAgc>().enabled,
#endif
- config.Get<Intelligibility>().enabled,
- config.Get<Beamforming>().enabled),
+ config.Get<Intelligibility>().enabled),
#if defined(WEBRTC_ANDROID) || defined(WEBRTC_IOS)
- capture_(false)
+ capture_(false,
#else
- capture_(config.Get<ExperimentalNs>().enabled)
+ capture_(config.Get<ExperimentalNs>().enabled,
#endif
+ config.Get<Beamforming>().enabled,
+ config.Get<Beamforming>().array_geometry,
+ config.Get<Beamforming>().target_direction)
{
{
rtc::CritScope cs_render(&crit_render_);
@@ -345,7 +345,7 @@ int AudioProcessingImpl::MaybeInitialize(
int AudioProcessingImpl::InitializeLocked() {
const int fwd_audio_buffer_channels =
- constants_.beamformer_enabled
+ capture_.beamformer_enabled
? formats_.api_format.input_stream().num_channels()
: formats_.api_format.output_stream().num_channels();
const int rev_audio_buffer_out_num_frames =
@@ -428,9 +428,9 @@ int AudioProcessingImpl::InitializeLocked(const ProcessingConfig& config) {
return kBadNumberChannelsError;
}
- if (constants_.beamformer_enabled && (static_cast<size_t>(num_in_channels) !=
- constants_.array_geometry.size() ||
- num_out_channels > 1)) {
+ if (capture_.beamformer_enabled &&
+ (static_cast<size_t>(num_in_channels) != capture_.array_geometry.size() ||
+ num_out_channels > 1)) {
return kBadNumberChannelsError;
}
@@ -498,6 +498,17 @@ void AudioProcessingImpl::SetExtraOptions(const Config& config) {
config.Get<ExperimentalNs>().enabled;
InitializeTransient();
}
+
+#ifdef WEBRTC_ANDROID_PLATFORM_BUILD
+ if (capture_.beamformer_enabled != config.Get<Beamforming>().enabled) {
+ capture_.beamformer_enabled = config.Get<Beamforming>().enabled;
+ if (config.Get<Beamforming>().array_geometry.size() > 1) {
+ capture_.array_geometry = config.Get<Beamforming>().array_geometry;
+ }
+ capture_.target_direction = config.Get<Beamforming>().target_direction;
+ InitializeBeamformer();
+ }
+#endif // WEBRTC_ANDROID_PLATFORM_BUILD
}
int AudioProcessingImpl::input_sample_rate_hz() const {
@@ -760,7 +771,7 @@ int AudioProcessingImpl::ProcessStreamLocked() {
ca->num_channels());
}
- if (constants_.beamformer_enabled) {
+ if (capture_.beamformer_enabled) {
private_submodules_->beamformer->ProcessChunk(*ca->split_data_f(),
ca->split_data_f());
ca->set_num_channels(1);
@@ -782,7 +793,7 @@ int AudioProcessingImpl::ProcessStreamLocked() {
if (constants_.use_new_agc &&
public_submodules_->gain_control->is_enabled() &&
- (!constants_.beamformer_enabled ||
+ (!capture_.beamformer_enabled ||
private_submodules_->beamformer->is_target_present())) {
private_submodules_->agc_manager->Process(
ca->split_bands_const(0)[kBand0To8kHz], ca->num_frames_per_band(),
@@ -1172,7 +1183,7 @@ VoiceDetection* AudioProcessingImpl::voice_detection() const {
}
bool AudioProcessingImpl::is_data_processed() const {
- if (constants_.beamformer_enabled) {
+ if (capture_.beamformer_enabled) {
return true;
}
@@ -1287,10 +1298,10 @@ void AudioProcessingImpl::InitializeTransient() {
}
void AudioProcessingImpl::InitializeBeamformer() {
- if (constants_.beamformer_enabled) {
+ if (capture_.beamformer_enabled) {
if (!private_submodules_->beamformer) {
private_submodules_->beamformer.reset(new NonlinearBeamformer(
- constants_.array_geometry, constants_.target_direction));
+ capture_.array_geometry, capture_.target_direction));
}
private_submodules_->beamformer->Initialize(kChunkSizeMs,
capture_nonlocked_.split_rate);
diff --git a/webrtc/modules/audio_processing/audio_processing_impl.h b/webrtc/modules/audio_processing/audio_processing_impl.h
index 3506ac4dc0..39f87acd1b 100644
--- a/webrtc/modules/audio_processing/audio_processing_impl.h
+++ b/webrtc/modules/audio_processing/audio_processing_impl.h
@@ -267,28 +267,22 @@ class AudioProcessingImpl : public AudioProcessing {
// APM constants.
const struct ApmConstants {
ApmConstants(int agc_startup_min_volume,
- const std::vector<Point> array_geometry,
- SphericalPointf target_direction,
bool use_new_agc,
- bool intelligibility_enabled,
- bool beamformer_enabled)
+ bool intelligibility_enabled)
: // Format of processing streams at input/output call sites.
agc_startup_min_volume(agc_startup_min_volume),
- array_geometry(array_geometry),
- target_direction(target_direction),
use_new_agc(use_new_agc),
- intelligibility_enabled(intelligibility_enabled),
- beamformer_enabled(beamformer_enabled) {}
+ intelligibility_enabled(intelligibility_enabled) {}
int agc_startup_min_volume;
- std::vector<Point> array_geometry;
- SphericalPointf target_direction;
bool use_new_agc;
bool intelligibility_enabled;
- bool beamformer_enabled;
} constants_;
struct ApmCaptureState {
- ApmCaptureState(bool transient_suppressor_enabled)
+ ApmCaptureState(bool transient_suppressor_enabled,
+ bool beamformer_enabled,
+ const std::vector<Point>& array_geometry,
+ SphericalPointf target_direction)
: aec_system_delay_jumps(-1),
delay_offset_ms(0),
was_stream_delay_set(false),
@@ -298,6 +292,9 @@ class AudioProcessingImpl : public AudioProcessing {
output_will_be_muted(false),
key_pressed(false),
transient_suppressor_enabled(transient_suppressor_enabled),
+ beamformer_enabled(beamformer_enabled),
+ array_geometry(array_geometry),
+ target_direction(target_direction),
fwd_proc_format(kSampleRate16kHz),
split_rate(kSampleRate16kHz) {}
int aec_system_delay_jumps;
@@ -309,6 +306,9 @@ class AudioProcessingImpl : public AudioProcessing {
bool output_will_be_muted;
bool key_pressed;
bool transient_suppressor_enabled;
+ bool beamformer_enabled;
+ std::vector<Point> array_geometry;
+ SphericalPointf target_direction;
rtc::scoped_ptr<AudioBuffer> capture_audio;
// Only the rate and samples fields of fwd_proc_format_ are used because the
// forward processing number of channels is mutable and is tracked by the