aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorPer Åhgren <peah@webrtc.org>2020-04-26 23:56:17 +0200
committerCommit Bot <commit-bot@chromium.org>2020-04-26 23:06:44 +0000
commitcc73ed3e70d1968f8e2bc365880d8273028e14a6 (patch)
treee26c3f0be0eeac5eadd10d8e65499729d1864959
parent86bd33a1e77efc85383c61589dacd4310fdcb50e (diff)
downloadwebrtc-cc73ed3e70d1968f8e2bc365880d8273028e14a6.tar.gz
APM: Add build flag to allow building WebRTC without APM
This CL adds a build flag to allow building the non-test parts of WebRTC without the audio processing module. The CL also ensures that the WebRTC code correctly handles the case when no APM is available. Bug: webrtc:5298 Change-Id: I5c8b5d1f7115e5cce2af4c2b5ff701fa1c54e49e Reviewed-on: https://webrtc-review.googlesource.com/c/src/+/171509 Commit-Queue: Per Åhgren <peah@webrtc.org> Reviewed-by: Sam Zackrisson <saza@webrtc.org> Reviewed-by: Karl Wiberg <kwiberg@webrtc.org> Reviewed-by: Mirko Bonadei <mbonadei@webrtc.org> Cr-Commit-Position: refs/heads/master@{#31133}
-rw-r--r--BUILD.gn4
-rw-r--r--audio/audio_receive_stream_unittest.cc334
-rw-r--r--audio/audio_send_stream.cc8
-rw-r--r--audio/audio_send_stream_unittest.cc718
-rw-r--r--audio/audio_state.cc1
-rw-r--r--audio/audio_state_unittest.cc299
-rw-r--r--audio/audio_transport_impl.cc23
-rw-r--r--call/call_unittest.cc407
-rw-r--r--media/engine/webrtc_voice_engine.cc41
-rw-r--r--media/engine/webrtc_voice_engine_unittest.cc1074
-rw-r--r--modules/audio_processing/BUILD.gn76
-rw-r--r--modules/audio_processing/aec_dump/BUILD.gn78
-rw-r--r--modules/audio_processing/aec_dump/aec_dump_integration_test.cc3
-rw-r--r--modules/audio_processing/audio_processing_builder_impl.cc51
-rw-r--r--modules/audio_processing/audio_processing_impl.cc50
-rw-r--r--modules/audio_processing/audio_processing_impl_locking_unittest.cc3
-rw-r--r--modules/audio_processing/audio_processing_impl_unittest.cc12
-rw-r--r--modules/audio_processing/audio_processing_performance_unittest.cc11
-rw-r--r--modules/audio_processing/audio_processing_unittest.cc34
-rw-r--r--modules/audio_processing/include/audio_processing.h25
-rw-r--r--modules/audio_processing/test/audio_processing_builder_for_testing.cc68
-rw-r--r--modules/audio_processing/test/audio_processing_builder_for_testing.h81
-rw-r--r--modules/audio_processing/test/debug_dump_replayer.cc3
-rw-r--r--modules/audio_processing/test/debug_dump_test.cc3
-rw-r--r--pc/BUILD.gn1
-rw-r--r--pc/peer_connection_integrationtest.cc7
-rw-r--r--test/fuzzers/BUILD.gn1
-rw-r--r--test/fuzzers/audio_processing_configs_fuzzer.cc3
-rw-r--r--test/pc/e2e/test_peer.h6
-rw-r--r--test/pc/e2e/test_peer_factory.cc2
-rw-r--r--webrtc.gni3
31 files changed, 1957 insertions, 1473 deletions
diff --git a/BUILD.gn b/BUILD.gn
index 16ff6970bd..85c428d08c 100644
--- a/BUILD.gn
+++ b/BUILD.gn
@@ -281,6 +281,10 @@ config("common_config") {
defines += [ "WEBRTC_EXCLUDE_TRANSIENT_SUPPRESSOR" ]
}
+ if (rtc_exclude_audio_processing_module) {
+ defines += [ "WEBRTC_EXCLUDE_AUDIO_PROCESSING_MODULE" ]
+ }
+
cflags = []
if (build_with_chromium) {
diff --git a/audio/audio_receive_stream_unittest.cc b/audio/audio_receive_stream_unittest.cc
index 186eb1c67d..7759dd1e72 100644
--- a/audio/audio_receive_stream_unittest.cc
+++ b/audio/audio_receive_stream_unittest.cc
@@ -75,15 +75,21 @@ const NetworkStatistics kNetworkStats = {
const AudioDecodingCallStats kAudioDecodeStats = MakeAudioDecodeStatsForTest();
struct ConfigHelper {
- ConfigHelper() : ConfigHelper(new rtc::RefCountedObject<MockAudioMixer>()) {}
+ explicit ConfigHelper(bool use_null_audio_processing)
+ : ConfigHelper(new rtc::RefCountedObject<MockAudioMixer>(),
+ use_null_audio_processing) {}
- explicit ConfigHelper(rtc::scoped_refptr<MockAudioMixer> audio_mixer)
+ ConfigHelper(rtc::scoped_refptr<MockAudioMixer> audio_mixer,
+ bool use_null_audio_processing)
: audio_mixer_(audio_mixer) {
using ::testing::Invoke;
AudioState::Config config;
config.audio_mixer = audio_mixer_;
- config.audio_processing = new rtc::RefCountedObject<MockAudioProcessing>();
+ config.audio_processing =
+ use_null_audio_processing
+ ? nullptr
+ : new rtc::RefCountedObject<MockAudioProcessing>();
config.audio_device_module =
new rtc::RefCountedObject<testing::NiceMock<MockAudioDeviceModule>>();
audio_state_ = AudioState::Create(config);
@@ -230,182 +236,200 @@ TEST(AudioReceiveStreamTest, ConfigToString) {
}
TEST(AudioReceiveStreamTest, ConstructDestruct) {
- ConfigHelper helper;
- auto recv_stream = helper.CreateAudioReceiveStream();
+ for (bool use_null_audio_processing : {false, true}) {
+ ConfigHelper helper(use_null_audio_processing);
+ auto recv_stream = helper.CreateAudioReceiveStream();
+ }
}
TEST(AudioReceiveStreamTest, ReceiveRtpPacket) {
- ConfigHelper helper;
- helper.config().rtp.transport_cc = true;
- auto recv_stream = helper.CreateAudioReceiveStream();
- const int kTransportSequenceNumberValue = 1234;
- std::vector<uint8_t> rtp_packet = CreateRtpHeaderWithOneByteExtension(
- kTransportSequenceNumberId, kTransportSequenceNumberValue, 2);
- constexpr int64_t packet_time_us = 5678000;
-
- RtpPacketReceived parsed_packet;
- ASSERT_TRUE(parsed_packet.Parse(&rtp_packet[0], rtp_packet.size()));
- parsed_packet.set_arrival_time_ms((packet_time_us + 500) / 1000);
-
- EXPECT_CALL(*helper.channel_receive(),
- OnRtpPacket(::testing::Ref(parsed_packet)));
-
- recv_stream->OnRtpPacket(parsed_packet);
+ for (bool use_null_audio_processing : {false, true}) {
+ ConfigHelper helper(use_null_audio_processing);
+ helper.config().rtp.transport_cc = true;
+ auto recv_stream = helper.CreateAudioReceiveStream();
+ const int kTransportSequenceNumberValue = 1234;
+ std::vector<uint8_t> rtp_packet = CreateRtpHeaderWithOneByteExtension(
+ kTransportSequenceNumberId, kTransportSequenceNumberValue, 2);
+ constexpr int64_t packet_time_us = 5678000;
+
+ RtpPacketReceived parsed_packet;
+ ASSERT_TRUE(parsed_packet.Parse(&rtp_packet[0], rtp_packet.size()));
+ parsed_packet.set_arrival_time_ms((packet_time_us + 500) / 1000);
+
+ EXPECT_CALL(*helper.channel_receive(),
+ OnRtpPacket(::testing::Ref(parsed_packet)));
+
+ recv_stream->OnRtpPacket(parsed_packet);
+ }
}
TEST(AudioReceiveStreamTest, ReceiveRtcpPacket) {
- ConfigHelper helper;
- helper.config().rtp.transport_cc = true;
- auto recv_stream = helper.CreateAudioReceiveStream();
- std::vector<uint8_t> rtcp_packet = CreateRtcpSenderReport();
- EXPECT_CALL(*helper.channel_receive(),
- ReceivedRTCPPacket(&rtcp_packet[0], rtcp_packet.size()))
- .WillOnce(Return());
- recv_stream->DeliverRtcp(&rtcp_packet[0], rtcp_packet.size());
+ for (bool use_null_audio_processing : {false, true}) {
+ ConfigHelper helper(use_null_audio_processing);
+ helper.config().rtp.transport_cc = true;
+ auto recv_stream = helper.CreateAudioReceiveStream();
+ std::vector<uint8_t> rtcp_packet = CreateRtcpSenderReport();
+ EXPECT_CALL(*helper.channel_receive(),
+ ReceivedRTCPPacket(&rtcp_packet[0], rtcp_packet.size()))
+ .WillOnce(Return());
+ recv_stream->DeliverRtcp(&rtcp_packet[0], rtcp_packet.size());
+ }
}
TEST(AudioReceiveStreamTest, GetStats) {
- ConfigHelper helper;
- auto recv_stream = helper.CreateAudioReceiveStream();
- helper.SetupMockForGetStats();
- AudioReceiveStream::Stats stats = recv_stream->GetStats();
- EXPECT_EQ(kRemoteSsrc, stats.remote_ssrc);
- EXPECT_EQ(kCallStats.payload_bytes_rcvd, stats.payload_bytes_rcvd);
- EXPECT_EQ(kCallStats.header_and_padding_bytes_rcvd,
- stats.header_and_padding_bytes_rcvd);
- EXPECT_EQ(static_cast<uint32_t>(kCallStats.packetsReceived),
- stats.packets_rcvd);
- EXPECT_EQ(kCallStats.cumulativeLost, stats.packets_lost);
- EXPECT_EQ(kReceiveCodec.second.name, stats.codec_name);
- EXPECT_EQ(
- kCallStats.jitterSamples / (kReceiveCodec.second.clockrate_hz / 1000),
- stats.jitter_ms);
- EXPECT_EQ(kNetworkStats.currentBufferSize, stats.jitter_buffer_ms);
- EXPECT_EQ(kNetworkStats.preferredBufferSize,
- stats.jitter_buffer_preferred_ms);
- EXPECT_EQ(static_cast<uint32_t>(kJitterBufferDelay + kPlayoutBufferDelay),
- stats.delay_estimate_ms);
- EXPECT_EQ(static_cast<int32_t>(kSpeechOutputLevel), stats.audio_level);
- EXPECT_EQ(kTotalOutputEnergy, stats.total_output_energy);
- EXPECT_EQ(kNetworkStats.totalSamplesReceived, stats.total_samples_received);
- EXPECT_EQ(kTotalOutputDuration, stats.total_output_duration);
- EXPECT_EQ(kNetworkStats.concealedSamples, stats.concealed_samples);
- EXPECT_EQ(kNetworkStats.concealmentEvents, stats.concealment_events);
- EXPECT_EQ(static_cast<double>(kNetworkStats.jitterBufferDelayMs) /
- static_cast<double>(rtc::kNumMillisecsPerSec),
- stats.jitter_buffer_delay_seconds);
- EXPECT_EQ(kNetworkStats.jitterBufferEmittedCount,
- stats.jitter_buffer_emitted_count);
- EXPECT_EQ(static_cast<double>(kNetworkStats.jitterBufferTargetDelayMs) /
- static_cast<double>(rtc::kNumMillisecsPerSec),
- stats.jitter_buffer_target_delay_seconds);
- EXPECT_EQ(Q14ToFloat(kNetworkStats.currentExpandRate), stats.expand_rate);
- EXPECT_EQ(Q14ToFloat(kNetworkStats.currentSpeechExpandRate),
- stats.speech_expand_rate);
- EXPECT_EQ(Q14ToFloat(kNetworkStats.currentSecondaryDecodedRate),
- stats.secondary_decoded_rate);
- EXPECT_EQ(Q14ToFloat(kNetworkStats.currentSecondaryDiscardedRate),
- stats.secondary_discarded_rate);
- EXPECT_EQ(Q14ToFloat(kNetworkStats.currentAccelerateRate),
- stats.accelerate_rate);
- EXPECT_EQ(Q14ToFloat(kNetworkStats.currentPreemptiveRate),
- stats.preemptive_expand_rate);
- EXPECT_EQ(kAudioDecodeStats.calls_to_silence_generator,
- stats.decoding_calls_to_silence_generator);
- EXPECT_EQ(kAudioDecodeStats.calls_to_neteq, stats.decoding_calls_to_neteq);
- EXPECT_EQ(kAudioDecodeStats.decoded_normal, stats.decoding_normal);
- EXPECT_EQ(kAudioDecodeStats.decoded_neteq_plc, stats.decoding_plc);
- EXPECT_EQ(kAudioDecodeStats.decoded_codec_plc, stats.decoding_codec_plc);
- EXPECT_EQ(kAudioDecodeStats.decoded_cng, stats.decoding_cng);
- EXPECT_EQ(kAudioDecodeStats.decoded_plc_cng, stats.decoding_plc_cng);
- EXPECT_EQ(kAudioDecodeStats.decoded_muted_output,
- stats.decoding_muted_output);
- EXPECT_EQ(kCallStats.capture_start_ntp_time_ms_,
- stats.capture_start_ntp_time_ms);
- EXPECT_EQ(kPlayoutNtpTimestampMs, stats.estimated_playout_ntp_timestamp_ms);
+ for (bool use_null_audio_processing : {false, true}) {
+ ConfigHelper helper(use_null_audio_processing);
+ auto recv_stream = helper.CreateAudioReceiveStream();
+ helper.SetupMockForGetStats();
+ AudioReceiveStream::Stats stats = recv_stream->GetStats();
+ EXPECT_EQ(kRemoteSsrc, stats.remote_ssrc);
+ EXPECT_EQ(kCallStats.payload_bytes_rcvd, stats.payload_bytes_rcvd);
+ EXPECT_EQ(kCallStats.header_and_padding_bytes_rcvd,
+ stats.header_and_padding_bytes_rcvd);
+ EXPECT_EQ(static_cast<uint32_t>(kCallStats.packetsReceived),
+ stats.packets_rcvd);
+ EXPECT_EQ(kCallStats.cumulativeLost, stats.packets_lost);
+ EXPECT_EQ(kReceiveCodec.second.name, stats.codec_name);
+ EXPECT_EQ(
+ kCallStats.jitterSamples / (kReceiveCodec.second.clockrate_hz / 1000),
+ stats.jitter_ms);
+ EXPECT_EQ(kNetworkStats.currentBufferSize, stats.jitter_buffer_ms);
+ EXPECT_EQ(kNetworkStats.preferredBufferSize,
+ stats.jitter_buffer_preferred_ms);
+ EXPECT_EQ(static_cast<uint32_t>(kJitterBufferDelay + kPlayoutBufferDelay),
+ stats.delay_estimate_ms);
+ EXPECT_EQ(static_cast<int32_t>(kSpeechOutputLevel), stats.audio_level);
+ EXPECT_EQ(kTotalOutputEnergy, stats.total_output_energy);
+ EXPECT_EQ(kNetworkStats.totalSamplesReceived, stats.total_samples_received);
+ EXPECT_EQ(kTotalOutputDuration, stats.total_output_duration);
+ EXPECT_EQ(kNetworkStats.concealedSamples, stats.concealed_samples);
+ EXPECT_EQ(kNetworkStats.concealmentEvents, stats.concealment_events);
+ EXPECT_EQ(static_cast<double>(kNetworkStats.jitterBufferDelayMs) /
+ static_cast<double>(rtc::kNumMillisecsPerSec),
+ stats.jitter_buffer_delay_seconds);
+ EXPECT_EQ(kNetworkStats.jitterBufferEmittedCount,
+ stats.jitter_buffer_emitted_count);
+ EXPECT_EQ(static_cast<double>(kNetworkStats.jitterBufferTargetDelayMs) /
+ static_cast<double>(rtc::kNumMillisecsPerSec),
+ stats.jitter_buffer_target_delay_seconds);
+ EXPECT_EQ(Q14ToFloat(kNetworkStats.currentExpandRate), stats.expand_rate);
+ EXPECT_EQ(Q14ToFloat(kNetworkStats.currentSpeechExpandRate),
+ stats.speech_expand_rate);
+ EXPECT_EQ(Q14ToFloat(kNetworkStats.currentSecondaryDecodedRate),
+ stats.secondary_decoded_rate);
+ EXPECT_EQ(Q14ToFloat(kNetworkStats.currentSecondaryDiscardedRate),
+ stats.secondary_discarded_rate);
+ EXPECT_EQ(Q14ToFloat(kNetworkStats.currentAccelerateRate),
+ stats.accelerate_rate);
+ EXPECT_EQ(Q14ToFloat(kNetworkStats.currentPreemptiveRate),
+ stats.preemptive_expand_rate);
+ EXPECT_EQ(kAudioDecodeStats.calls_to_silence_generator,
+ stats.decoding_calls_to_silence_generator);
+ EXPECT_EQ(kAudioDecodeStats.calls_to_neteq, stats.decoding_calls_to_neteq);
+ EXPECT_EQ(kAudioDecodeStats.decoded_normal, stats.decoding_normal);
+ EXPECT_EQ(kAudioDecodeStats.decoded_neteq_plc, stats.decoding_plc);
+ EXPECT_EQ(kAudioDecodeStats.decoded_codec_plc, stats.decoding_codec_plc);
+ EXPECT_EQ(kAudioDecodeStats.decoded_cng, stats.decoding_cng);
+ EXPECT_EQ(kAudioDecodeStats.decoded_plc_cng, stats.decoding_plc_cng);
+ EXPECT_EQ(kAudioDecodeStats.decoded_muted_output,
+ stats.decoding_muted_output);
+ EXPECT_EQ(kCallStats.capture_start_ntp_time_ms_,
+ stats.capture_start_ntp_time_ms);
+ EXPECT_EQ(kPlayoutNtpTimestampMs, stats.estimated_playout_ntp_timestamp_ms);
+ }
}
TEST(AudioReceiveStreamTest, SetGain) {
- ConfigHelper helper;
- auto recv_stream = helper.CreateAudioReceiveStream();
- EXPECT_CALL(*helper.channel_receive(),
- SetChannelOutputVolumeScaling(FloatEq(0.765f)));
- recv_stream->SetGain(0.765f);
+ for (bool use_null_audio_processing : {false, true}) {
+ ConfigHelper helper(use_null_audio_processing);
+ auto recv_stream = helper.CreateAudioReceiveStream();
+ EXPECT_CALL(*helper.channel_receive(),
+ SetChannelOutputVolumeScaling(FloatEq(0.765f)));
+ recv_stream->SetGain(0.765f);
+ }
}
TEST(AudioReceiveStreamTest, StreamsShouldBeAddedToMixerOnceOnStart) {
- ConfigHelper helper1;
- ConfigHelper helper2(helper1.audio_mixer());
- auto recv_stream1 = helper1.CreateAudioReceiveStream();
- auto recv_stream2 = helper2.CreateAudioReceiveStream();
-
- EXPECT_CALL(*helper1.channel_receive(), StartPlayout()).Times(1);
- EXPECT_CALL(*helper2.channel_receive(), StartPlayout()).Times(1);
- EXPECT_CALL(*helper1.channel_receive(), StopPlayout()).Times(1);
- EXPECT_CALL(*helper2.channel_receive(), StopPlayout()).Times(1);
- EXPECT_CALL(*helper1.audio_mixer(), AddSource(recv_stream1.get()))
- .WillOnce(Return(true));
- EXPECT_CALL(*helper1.audio_mixer(), AddSource(recv_stream2.get()))
- .WillOnce(Return(true));
- EXPECT_CALL(*helper1.audio_mixer(), RemoveSource(recv_stream1.get()))
- .Times(1);
- EXPECT_CALL(*helper1.audio_mixer(), RemoveSource(recv_stream2.get()))
- .Times(1);
-
- recv_stream1->Start();
- recv_stream2->Start();
-
- // One more should not result in any more mixer sources added.
- recv_stream1->Start();
-
- // Stop stream before it is being destructed.
- recv_stream2->Stop();
+ for (bool use_null_audio_processing : {false, true}) {
+ ConfigHelper helper1(use_null_audio_processing);
+ ConfigHelper helper2(helper1.audio_mixer(), use_null_audio_processing);
+ auto recv_stream1 = helper1.CreateAudioReceiveStream();
+ auto recv_stream2 = helper2.CreateAudioReceiveStream();
+
+ EXPECT_CALL(*helper1.channel_receive(), StartPlayout()).Times(1);
+ EXPECT_CALL(*helper2.channel_receive(), StartPlayout()).Times(1);
+ EXPECT_CALL(*helper1.channel_receive(), StopPlayout()).Times(1);
+ EXPECT_CALL(*helper2.channel_receive(), StopPlayout()).Times(1);
+ EXPECT_CALL(*helper1.audio_mixer(), AddSource(recv_stream1.get()))
+ .WillOnce(Return(true));
+ EXPECT_CALL(*helper1.audio_mixer(), AddSource(recv_stream2.get()))
+ .WillOnce(Return(true));
+ EXPECT_CALL(*helper1.audio_mixer(), RemoveSource(recv_stream1.get()))
+ .Times(1);
+ EXPECT_CALL(*helper1.audio_mixer(), RemoveSource(recv_stream2.get()))
+ .Times(1);
+
+ recv_stream1->Start();
+ recv_stream2->Start();
+
+ // One more should not result in any more mixer sources added.
+ recv_stream1->Start();
+
+ // Stop stream before it is being destructed.
+ recv_stream2->Stop();
+ }
}
TEST(AudioReceiveStreamTest, ReconfigureWithSameConfig) {
- ConfigHelper helper;
- auto recv_stream = helper.CreateAudioReceiveStream();
- recv_stream->Reconfigure(helper.config());
+ for (bool use_null_audio_processing : {false, true}) {
+ ConfigHelper helper(use_null_audio_processing);
+ auto recv_stream = helper.CreateAudioReceiveStream();
+ recv_stream->Reconfigure(helper.config());
+ }
}
TEST(AudioReceiveStreamTest, ReconfigureWithUpdatedConfig) {
- ConfigHelper helper;
- auto recv_stream = helper.CreateAudioReceiveStream();
-
- auto new_config = helper.config();
- new_config.rtp.nack.rtp_history_ms = 300 + 20;
- new_config.rtp.extensions.clear();
- new_config.rtp.extensions.push_back(
- RtpExtension(RtpExtension::kAudioLevelUri, kAudioLevelId + 1));
- new_config.rtp.extensions.push_back(
- RtpExtension(RtpExtension::kTransportSequenceNumberUri,
- kTransportSequenceNumberId + 1));
- new_config.decoder_map.emplace(1, SdpAudioFormat("foo", 8000, 1));
-
- MockChannelReceive& channel_receive = *helper.channel_receive();
- EXPECT_CALL(channel_receive, SetNACKStatus(true, 15 + 1)).Times(1);
- EXPECT_CALL(channel_receive, SetReceiveCodecs(new_config.decoder_map));
-
- recv_stream->Reconfigure(new_config);
+ for (bool use_null_audio_processing : {false, true}) {
+ ConfigHelper helper(use_null_audio_processing);
+ auto recv_stream = helper.CreateAudioReceiveStream();
+
+ auto new_config = helper.config();
+ new_config.rtp.nack.rtp_history_ms = 300 + 20;
+ new_config.rtp.extensions.clear();
+ new_config.rtp.extensions.push_back(
+ RtpExtension(RtpExtension::kAudioLevelUri, kAudioLevelId + 1));
+ new_config.rtp.extensions.push_back(
+ RtpExtension(RtpExtension::kTransportSequenceNumberUri,
+ kTransportSequenceNumberId + 1));
+ new_config.decoder_map.emplace(1, SdpAudioFormat("foo", 8000, 1));
+
+ MockChannelReceive& channel_receive = *helper.channel_receive();
+ EXPECT_CALL(channel_receive, SetNACKStatus(true, 15 + 1)).Times(1);
+ EXPECT_CALL(channel_receive, SetReceiveCodecs(new_config.decoder_map));
+
+ recv_stream->Reconfigure(new_config);
+ }
}
TEST(AudioReceiveStreamTest, ReconfigureWithFrameDecryptor) {
- ConfigHelper helper;
- auto recv_stream = helper.CreateAudioReceiveStream();
-
- auto new_config_0 = helper.config();
- rtc::scoped_refptr<FrameDecryptorInterface> mock_frame_decryptor_0(
- new rtc::RefCountedObject<MockFrameDecryptor>());
- new_config_0.frame_decryptor = mock_frame_decryptor_0;
-
- recv_stream->Reconfigure(new_config_0);
-
- auto new_config_1 = helper.config();
- rtc::scoped_refptr<FrameDecryptorInterface> mock_frame_decryptor_1(
- new rtc::RefCountedObject<MockFrameDecryptor>());
- new_config_1.frame_decryptor = mock_frame_decryptor_1;
- new_config_1.crypto_options.sframe.require_frame_encryption = true;
- recv_stream->Reconfigure(new_config_1);
+ for (bool use_null_audio_processing : {false, true}) {
+ ConfigHelper helper(use_null_audio_processing);
+ auto recv_stream = helper.CreateAudioReceiveStream();
+
+ auto new_config_0 = helper.config();
+ rtc::scoped_refptr<FrameDecryptorInterface> mock_frame_decryptor_0(
+ new rtc::RefCountedObject<MockFrameDecryptor>());
+ new_config_0.frame_decryptor = mock_frame_decryptor_0;
+
+ recv_stream->Reconfigure(new_config_0);
+
+ auto new_config_1 = helper.config();
+ rtc::scoped_refptr<FrameDecryptorInterface> mock_frame_decryptor_1(
+ new rtc::RefCountedObject<MockFrameDecryptor>());
+ new_config_1.frame_decryptor = mock_frame_decryptor_1;
+ new_config_1.crypto_options.sframe.require_frame_encryption = true;
+ recv_stream->Reconfigure(new_config_1);
+ }
}
} // namespace test
diff --git a/audio/audio_send_stream.cc b/audio/audio_send_stream.cc
index acdc73b9a9..a44b55f95f 100644
--- a/audio/audio_send_stream.cc
+++ b/audio/audio_send_stream.cc
@@ -490,9 +490,11 @@ webrtc::AudioSendStream::Stats AudioSendStream::GetStats(
stats.typing_noise_detected = audio_state()->typing_noise_detected();
stats.ana_statistics = channel_send_->GetANAStatistics();
- RTC_DCHECK(audio_state_->audio_processing());
- stats.apm_statistics =
- audio_state_->audio_processing()->GetStatistics(has_remote_tracks);
+
+ AudioProcessing* ap = audio_state_->audio_processing();
+ if (ap) {
+ stats.apm_statistics = ap->GetStatistics(has_remote_tracks);
+ }
stats.report_block_datas = std::move(call_stats.report_block_datas);
diff --git a/audio/audio_send_stream_unittest.cc b/audio/audio_send_stream_unittest.cc
index de1f2fe007..8f33d29f70 100644
--- a/audio/audio_send_stream_unittest.cc
+++ b/audio/audio_send_stream_unittest.cc
@@ -141,11 +141,16 @@ rtc::scoped_refptr<MockAudioEncoderFactory> SetupEncoderFactoryMock() {
}
struct ConfigHelper {
- ConfigHelper(bool audio_bwe_enabled, bool expect_set_encoder_call)
+ ConfigHelper(bool audio_bwe_enabled,
+ bool expect_set_encoder_call,
+ bool use_null_audio_processing)
: clock_(1000000),
task_queue_factory_(CreateDefaultTaskQueueFactory()),
stream_config_(/*send_transport=*/nullptr),
- audio_processing_(new rtc::RefCountedObject<MockAudioProcessing>()),
+ audio_processing_(
+ use_null_audio_processing
+ ? nullptr
+ : new rtc::RefCountedObject<MockAudioProcessing>()),
bitrate_allocator_(&limit_observer_),
worker_queue_(task_queue_factory_->CreateTaskQueue(
"ConfigHelper_worker_queue",
@@ -273,7 +278,7 @@ struct ConfigHelper {
.WillOnce(Return(true));
}
- void SetupMockForGetStats() {
+ void SetupMockForGetStats(bool use_null_audio_processing) {
using ::testing::DoAll;
using ::testing::SetArgPointee;
using ::testing::SetArgReferee;
@@ -305,10 +310,13 @@ struct ConfigHelper {
audio_processing_stats_.residual_echo_likelihood = kResidualEchoLikelihood;
audio_processing_stats_.residual_echo_likelihood_recent_max =
kResidualEchoLikelihoodMax;
-
- EXPECT_CALL(*audio_processing_, GetStatistics(true))
- .WillRepeatedly(Return(audio_processing_stats_));
+ if (!use_null_audio_processing) {
+ ASSERT_TRUE(audio_processing_);
+ EXPECT_CALL(*audio_processing_, GetStatistics(true))
+ .WillRepeatedly(Return(audio_processing_stats_));
+ }
}
+
TaskQueueForTest* worker() { return &worker_queue_; }
private:
@@ -381,235 +389,270 @@ TEST(AudioSendStreamTest, ConfigToString) {
}
TEST(AudioSendStreamTest, ConstructDestruct) {
- ConfigHelper helper(false, true);
- auto send_stream = helper.CreateAudioSendStream();
+ for (bool use_null_audio_processing : {false, true}) {
+ ConfigHelper helper(false, true, use_null_audio_processing);
+ auto send_stream = helper.CreateAudioSendStream();
+ }
}
TEST(AudioSendStreamTest, SendTelephoneEvent) {
- ConfigHelper helper(false, true);
- auto send_stream = helper.CreateAudioSendStream();
- helper.SetupMockForSendTelephoneEvent();
- EXPECT_TRUE(send_stream->SendTelephoneEvent(
- kTelephoneEventPayloadType, kTelephoneEventPayloadFrequency,
- kTelephoneEventCode, kTelephoneEventDuration));
+ for (bool use_null_audio_processing : {false, true}) {
+ ConfigHelper helper(false, true, use_null_audio_processing);
+ auto send_stream = helper.CreateAudioSendStream();
+ helper.SetupMockForSendTelephoneEvent();
+ EXPECT_TRUE(send_stream->SendTelephoneEvent(
+ kTelephoneEventPayloadType, kTelephoneEventPayloadFrequency,
+ kTelephoneEventCode, kTelephoneEventDuration));
+ }
}
TEST(AudioSendStreamTest, SetMuted) {
- ConfigHelper helper(false, true);
- auto send_stream = helper.CreateAudioSendStream();
- EXPECT_CALL(*helper.channel_send(), SetInputMute(true));
- send_stream->SetMuted(true);
+ for (bool use_null_audio_processing : {false, true}) {
+ ConfigHelper helper(false, true, use_null_audio_processing);
+ auto send_stream = helper.CreateAudioSendStream();
+ EXPECT_CALL(*helper.channel_send(), SetInputMute(true));
+ send_stream->SetMuted(true);
+ }
}
TEST(AudioSendStreamTest, AudioBweCorrectObjectsOnChannelProxy) {
ScopedFieldTrials field_trials("WebRTC-Audio-SendSideBwe/Enabled/");
- ConfigHelper helper(true, true);
- auto send_stream = helper.CreateAudioSendStream();
+ for (bool use_null_audio_processing : {false, true}) {
+ ConfigHelper helper(true, true, use_null_audio_processing);
+ auto send_stream = helper.CreateAudioSendStream();
+ }
}
TEST(AudioSendStreamTest, NoAudioBweCorrectObjectsOnChannelProxy) {
- ConfigHelper helper(false, true);
- auto send_stream = helper.CreateAudioSendStream();
+ for (bool use_null_audio_processing : {false, true}) {
+ ConfigHelper helper(false, true, use_null_audio_processing);
+ auto send_stream = helper.CreateAudioSendStream();
+ }
}
TEST(AudioSendStreamTest, GetStats) {
- ConfigHelper helper(false, true);
- auto send_stream = helper.CreateAudioSendStream();
- helper.SetupMockForGetStats();
- AudioSendStream::Stats stats = send_stream->GetStats(true);
- EXPECT_EQ(kSsrc, stats.local_ssrc);
- EXPECT_EQ(kCallStats.payload_bytes_sent, stats.payload_bytes_sent);
- EXPECT_EQ(kCallStats.header_and_padding_bytes_sent,
- stats.header_and_padding_bytes_sent);
- EXPECT_EQ(kCallStats.packetsSent, stats.packets_sent);
- EXPECT_EQ(kReportBlock.cumulative_num_packets_lost, stats.packets_lost);
- EXPECT_EQ(Q8ToFloat(kReportBlock.fraction_lost), stats.fraction_lost);
- EXPECT_EQ(kIsacFormat.name, stats.codec_name);
- EXPECT_EQ(static_cast<int32_t>(kReportBlock.interarrival_jitter /
- (kIsacFormat.clockrate_hz / 1000)),
- stats.jitter_ms);
- EXPECT_EQ(kCallStats.rttMs, stats.rtt_ms);
- EXPECT_EQ(0, stats.audio_level);
- EXPECT_EQ(0, stats.total_input_energy);
- EXPECT_EQ(0, stats.total_input_duration);
- EXPECT_EQ(kEchoDelayMedian, stats.apm_statistics.delay_median_ms);
- EXPECT_EQ(kEchoDelayStdDev, stats.apm_statistics.delay_standard_deviation_ms);
- EXPECT_EQ(kEchoReturnLoss, stats.apm_statistics.echo_return_loss);
- EXPECT_EQ(kEchoReturnLossEnhancement,
- stats.apm_statistics.echo_return_loss_enhancement);
- EXPECT_EQ(kDivergentFilterFraction,
- stats.apm_statistics.divergent_filter_fraction);
- EXPECT_EQ(kResidualEchoLikelihood,
- stats.apm_statistics.residual_echo_likelihood);
- EXPECT_EQ(kResidualEchoLikelihoodMax,
- stats.apm_statistics.residual_echo_likelihood_recent_max);
- EXPECT_FALSE(stats.typing_noise_detected);
+ for (bool use_null_audio_processing : {false, true}) {
+ ConfigHelper helper(false, true, use_null_audio_processing);
+ auto send_stream = helper.CreateAudioSendStream();
+ helper.SetupMockForGetStats(use_null_audio_processing);
+ AudioSendStream::Stats stats = send_stream->GetStats(true);
+ EXPECT_EQ(kSsrc, stats.local_ssrc);
+ EXPECT_EQ(kCallStats.payload_bytes_sent, stats.payload_bytes_sent);
+ EXPECT_EQ(kCallStats.header_and_padding_bytes_sent,
+ stats.header_and_padding_bytes_sent);
+ EXPECT_EQ(kCallStats.packetsSent, stats.packets_sent);
+ EXPECT_EQ(kReportBlock.cumulative_num_packets_lost, stats.packets_lost);
+ EXPECT_EQ(Q8ToFloat(kReportBlock.fraction_lost), stats.fraction_lost);
+ EXPECT_EQ(kIsacFormat.name, stats.codec_name);
+ EXPECT_EQ(static_cast<int32_t>(kReportBlock.interarrival_jitter /
+ (kIsacFormat.clockrate_hz / 1000)),
+ stats.jitter_ms);
+ EXPECT_EQ(kCallStats.rttMs, stats.rtt_ms);
+ EXPECT_EQ(0, stats.audio_level);
+ EXPECT_EQ(0, stats.total_input_energy);
+ EXPECT_EQ(0, stats.total_input_duration);
+
+ if (!use_null_audio_processing) {
+ EXPECT_EQ(kEchoDelayMedian, stats.apm_statistics.delay_median_ms);
+ EXPECT_EQ(kEchoDelayStdDev,
+ stats.apm_statistics.delay_standard_deviation_ms);
+ EXPECT_EQ(kEchoReturnLoss, stats.apm_statistics.echo_return_loss);
+ EXPECT_EQ(kEchoReturnLossEnhancement,
+ stats.apm_statistics.echo_return_loss_enhancement);
+ EXPECT_EQ(kDivergentFilterFraction,
+ stats.apm_statistics.divergent_filter_fraction);
+ EXPECT_EQ(kResidualEchoLikelihood,
+ stats.apm_statistics.residual_echo_likelihood);
+ EXPECT_EQ(kResidualEchoLikelihoodMax,
+ stats.apm_statistics.residual_echo_likelihood_recent_max);
+ EXPECT_FALSE(stats.typing_noise_detected);
+ }
+ }
}
TEST(AudioSendStreamTest, GetStatsAudioLevel) {
- ConfigHelper helper(false, true);
- auto send_stream = helper.CreateAudioSendStream();
- helper.SetupMockForGetStats();
- EXPECT_CALL(*helper.channel_send(), ProcessAndEncodeAudioForMock(_))
- .Times(AnyNumber());
-
- constexpr int kSampleRateHz = 48000;
- constexpr size_t kNumChannels = 1;
-
- constexpr int16_t kSilentAudioLevel = 0;
- constexpr int16_t kMaxAudioLevel = 32767; // Audio level is [0,32767].
- constexpr int kAudioFrameDurationMs = 10;
-
- // Process 10 audio frames (100 ms) of silence. After this, on the next
- // (11-th) frame, the audio level will be updated with the maximum audio level
- // of the first 11 frames. See AudioLevel.
- for (size_t i = 0; i < 10; ++i) {
- send_stream->SendAudioData(CreateAudioFrame1kHzSineWave(
- kSilentAudioLevel, kAudioFrameDurationMs, kSampleRateHz, kNumChannels));
- }
- AudioSendStream::Stats stats = send_stream->GetStats();
- EXPECT_EQ(kSilentAudioLevel, stats.audio_level);
- EXPECT_NEAR(0.0f, stats.total_input_energy, kTolerance);
- EXPECT_NEAR(0.1f, stats.total_input_duration, kTolerance); // 100 ms = 0.1 s
-
- // Process 10 audio frames (100 ms) of maximum audio level.
- // Note that AudioLevel updates the audio level every 11th frame, processing
- // 10 frames above was needed to see a non-zero audio level here.
- for (size_t i = 0; i < 10; ++i) {
- send_stream->SendAudioData(CreateAudioFrame1kHzSineWave(
- kMaxAudioLevel, kAudioFrameDurationMs, kSampleRateHz, kNumChannels));
- }
- stats = send_stream->GetStats();
- EXPECT_EQ(kMaxAudioLevel, stats.audio_level);
- // Energy increases by energy*duration, where energy is audio level in [0,1].
- EXPECT_NEAR(0.1f, stats.total_input_energy, kTolerance); // 0.1 s of max
- EXPECT_NEAR(0.2f, stats.total_input_duration, kTolerance); // 200 ms = 0.2 s
+ for (bool use_null_audio_processing : {false, true}) {
+ ConfigHelper helper(false, true, use_null_audio_processing);
+ auto send_stream = helper.CreateAudioSendStream();
+ helper.SetupMockForGetStats(use_null_audio_processing);
+ EXPECT_CALL(*helper.channel_send(), ProcessAndEncodeAudioForMock(_))
+ .Times(AnyNumber());
+
+ constexpr int kSampleRateHz = 48000;
+ constexpr size_t kNumChannels = 1;
+
+ constexpr int16_t kSilentAudioLevel = 0;
+ constexpr int16_t kMaxAudioLevel = 32767; // Audio level is [0,32767].
+ constexpr int kAudioFrameDurationMs = 10;
+
+ // Process 10 audio frames (100 ms) of silence. After this, on the next
+ // (11-th) frame, the audio level will be updated with the maximum audio
+ // level of the first 11 frames. See AudioLevel.
+ for (size_t i = 0; i < 10; ++i) {
+ send_stream->SendAudioData(
+ CreateAudioFrame1kHzSineWave(kSilentAudioLevel, kAudioFrameDurationMs,
+ kSampleRateHz, kNumChannels));
+ }
+ AudioSendStream::Stats stats = send_stream->GetStats();
+ EXPECT_EQ(kSilentAudioLevel, stats.audio_level);
+ EXPECT_NEAR(0.0f, stats.total_input_energy, kTolerance);
+ EXPECT_NEAR(0.1f, stats.total_input_duration,
+ kTolerance); // 100 ms = 0.1 s
+
+ // Process 10 audio frames (100 ms) of maximum audio level.
+ // Note that AudioLevel updates the audio level every 11th frame, processing
+ // 10 frames above was needed to see a non-zero audio level here.
+ for (size_t i = 0; i < 10; ++i) {
+ send_stream->SendAudioData(CreateAudioFrame1kHzSineWave(
+ kMaxAudioLevel, kAudioFrameDurationMs, kSampleRateHz, kNumChannels));
+ }
+ stats = send_stream->GetStats();
+ EXPECT_EQ(kMaxAudioLevel, stats.audio_level);
+ // Energy increases by energy*duration, where energy is audio level in
+ // [0,1].
+ EXPECT_NEAR(0.1f, stats.total_input_energy, kTolerance); // 0.1 s of max
+ EXPECT_NEAR(0.2f, stats.total_input_duration,
+ kTolerance); // 200 ms = 0.2 s
+ }
}
TEST(AudioSendStreamTest, SendCodecAppliesAudioNetworkAdaptor) {
- ConfigHelper helper(false, true);
- helper.config().send_codec_spec =
- AudioSendStream::Config::SendCodecSpec(0, kOpusFormat);
- const std::string kAnaConfigString = "abcde";
- const std::string kAnaReconfigString = "12345";
-
- helper.config().rtp.extensions.push_back(RtpExtension(
- RtpExtension::kTransportSequenceNumberUri, kTransportSequenceNumberId));
- helper.config().audio_network_adaptor_config = kAnaConfigString;
-
- EXPECT_CALL(helper.mock_encoder_factory(), MakeAudioEncoderMock(_, _, _, _))
- .WillOnce(Invoke([&kAnaConfigString, &kAnaReconfigString](
- int payload_type, const SdpAudioFormat& format,
- absl::optional<AudioCodecPairId> codec_pair_id,
- std::unique_ptr<AudioEncoder>* return_value) {
- auto mock_encoder = SetupAudioEncoderMock(payload_type, format);
- EXPECT_CALL(*mock_encoder,
- EnableAudioNetworkAdaptor(StrEq(kAnaConfigString), _))
- .WillOnce(Return(true));
- EXPECT_CALL(*mock_encoder,
- EnableAudioNetworkAdaptor(StrEq(kAnaReconfigString), _))
- .WillOnce(Return(true));
- *return_value = std::move(mock_encoder);
- }));
-
- auto send_stream = helper.CreateAudioSendStream();
-
- auto stream_config = helper.config();
- stream_config.audio_network_adaptor_config = kAnaReconfigString;
-
- send_stream->Reconfigure(stream_config);
+ for (bool use_null_audio_processing : {false, true}) {
+ ConfigHelper helper(false, true, use_null_audio_processing);
+ helper.config().send_codec_spec =
+ AudioSendStream::Config::SendCodecSpec(0, kOpusFormat);
+ const std::string kAnaConfigString = "abcde";
+ const std::string kAnaReconfigString = "12345";
+
+ helper.config().rtp.extensions.push_back(RtpExtension(
+ RtpExtension::kTransportSequenceNumberUri, kTransportSequenceNumberId));
+ helper.config().audio_network_adaptor_config = kAnaConfigString;
+
+ EXPECT_CALL(helper.mock_encoder_factory(), MakeAudioEncoderMock(_, _, _, _))
+ .WillOnce(Invoke([&kAnaConfigString, &kAnaReconfigString](
+ int payload_type, const SdpAudioFormat& format,
+ absl::optional<AudioCodecPairId> codec_pair_id,
+ std::unique_ptr<AudioEncoder>* return_value) {
+ auto mock_encoder = SetupAudioEncoderMock(payload_type, format);
+ EXPECT_CALL(*mock_encoder,
+ EnableAudioNetworkAdaptor(StrEq(kAnaConfigString), _))
+ .WillOnce(Return(true));
+ EXPECT_CALL(*mock_encoder,
+ EnableAudioNetworkAdaptor(StrEq(kAnaReconfigString), _))
+ .WillOnce(Return(true));
+ *return_value = std::move(mock_encoder);
+ }));
+
+ auto send_stream = helper.CreateAudioSendStream();
+
+ auto stream_config = helper.config();
+ stream_config.audio_network_adaptor_config = kAnaReconfigString;
+
+ send_stream->Reconfigure(stream_config);
+ }
}
// VAD is applied when codec is mono and the CNG frequency matches the codec
// clock rate.
TEST(AudioSendStreamTest, SendCodecCanApplyVad) {
- ConfigHelper helper(false, false);
- helper.config().send_codec_spec =
- AudioSendStream::Config::SendCodecSpec(9, kG722Format);
- helper.config().send_codec_spec->cng_payload_type = 105;
- using ::testing::Invoke;
- std::unique_ptr<AudioEncoder> stolen_encoder;
- EXPECT_CALL(*helper.channel_send(), SetEncoderForMock(_, _))
- .WillOnce(
- Invoke([&stolen_encoder](int payload_type,
- std::unique_ptr<AudioEncoder>* encoder) {
- stolen_encoder = std::move(*encoder);
- return true;
- }));
- EXPECT_CALL(*helper.channel_send(), RegisterCngPayloadType(105, 8000));
-
- auto send_stream = helper.CreateAudioSendStream();
-
- // We cannot truly determine if the encoder created is an AudioEncoderCng. It
- // is the only reasonable implementation that will return something from
- // ReclaimContainedEncoders, though.
- ASSERT_TRUE(stolen_encoder);
- EXPECT_FALSE(stolen_encoder->ReclaimContainedEncoders().empty());
+ for (bool use_null_audio_processing : {false, true}) {
+ ConfigHelper helper(false, false, use_null_audio_processing);
+ helper.config().send_codec_spec =
+ AudioSendStream::Config::SendCodecSpec(9, kG722Format);
+ helper.config().send_codec_spec->cng_payload_type = 105;
+ using ::testing::Invoke;
+ std::unique_ptr<AudioEncoder> stolen_encoder;
+ EXPECT_CALL(*helper.channel_send(), SetEncoderForMock(_, _))
+ .WillOnce(
+ Invoke([&stolen_encoder](int payload_type,
+ std::unique_ptr<AudioEncoder>* encoder) {
+ stolen_encoder = std::move(*encoder);
+ return true;
+ }));
+ EXPECT_CALL(*helper.channel_send(), RegisterCngPayloadType(105, 8000));
+
+ auto send_stream = helper.CreateAudioSendStream();
+
+ // We cannot truly determine if the encoder created is an AudioEncoderCng.
+ // It is the only reasonable implementation that will return something from
+ // ReclaimContainedEncoders, though.
+ ASSERT_TRUE(stolen_encoder);
+ EXPECT_FALSE(stolen_encoder->ReclaimContainedEncoders().empty());
+ }
}
TEST(AudioSendStreamTest, DoesNotPassHigherBitrateThanMaxBitrate) {
- ConfigHelper helper(false, true);
- auto send_stream = helper.CreateAudioSendStream();
- EXPECT_CALL(*helper.channel_send(),
- OnBitrateAllocation(Field(
- &BitrateAllocationUpdate::target_bitrate,
+ for (bool use_null_audio_processing : {false, true}) {
+ ConfigHelper helper(false, true, use_null_audio_processing);
+ auto send_stream = helper.CreateAudioSendStream();
+ EXPECT_CALL(
+ *helper.channel_send(),
+ OnBitrateAllocation(
+ Field(&BitrateAllocationUpdate::target_bitrate,
Eq(DataRate::BitsPerSec(helper.config().max_bitrate_bps)))));
- BitrateAllocationUpdate update;
- update.target_bitrate =
- DataRate::BitsPerSec(helper.config().max_bitrate_bps + 5000);
- update.packet_loss_ratio = 0;
- update.round_trip_time = TimeDelta::Millis(50);
- update.bwe_period = TimeDelta::Millis(6000);
- helper.worker()->SendTask([&] { send_stream->OnBitrateUpdated(update); },
- RTC_FROM_HERE);
+ BitrateAllocationUpdate update;
+ update.target_bitrate =
+ DataRate::BitsPerSec(helper.config().max_bitrate_bps + 5000);
+ update.packet_loss_ratio = 0;
+ update.round_trip_time = TimeDelta::Millis(50);
+ update.bwe_period = TimeDelta::Millis(6000);
+ helper.worker()->SendTask([&] { send_stream->OnBitrateUpdated(update); },
+ RTC_FROM_HERE);
+ }
}
TEST(AudioSendStreamTest, SSBweTargetInRangeRespected) {
ScopedFieldTrials field_trials("WebRTC-Audio-SendSideBwe/Enabled/");
- ConfigHelper helper(true, true);
- auto send_stream = helper.CreateAudioSendStream();
- EXPECT_CALL(
- *helper.channel_send(),
- OnBitrateAllocation(Field(
- &BitrateAllocationUpdate::target_bitrate,
- Eq(DataRate::BitsPerSec(helper.config().max_bitrate_bps - 5000)))));
- BitrateAllocationUpdate update;
- update.target_bitrate =
- DataRate::BitsPerSec(helper.config().max_bitrate_bps - 5000);
- helper.worker()->SendTask([&] { send_stream->OnBitrateUpdated(update); },
- RTC_FROM_HERE);
+ for (bool use_null_audio_processing : {false, true}) {
+ ConfigHelper helper(true, true, use_null_audio_processing);
+ auto send_stream = helper.CreateAudioSendStream();
+ EXPECT_CALL(
+ *helper.channel_send(),
+ OnBitrateAllocation(Field(
+ &BitrateAllocationUpdate::target_bitrate,
+ Eq(DataRate::BitsPerSec(helper.config().max_bitrate_bps - 5000)))));
+ BitrateAllocationUpdate update;
+ update.target_bitrate =
+ DataRate::BitsPerSec(helper.config().max_bitrate_bps - 5000);
+ helper.worker()->SendTask([&] { send_stream->OnBitrateUpdated(update); },
+ RTC_FROM_HERE);
+ }
}
TEST(AudioSendStreamTest, SSBweFieldTrialMinRespected) {
ScopedFieldTrials field_trials(
"WebRTC-Audio-SendSideBwe/Enabled/"
"WebRTC-Audio-Allocation/min:6kbps,max:64kbps/");
- ConfigHelper helper(true, true);
- auto send_stream = helper.CreateAudioSendStream();
- EXPECT_CALL(
- *helper.channel_send(),
- OnBitrateAllocation(Field(&BitrateAllocationUpdate::target_bitrate,
- Eq(DataRate::KilobitsPerSec(6)))));
- BitrateAllocationUpdate update;
- update.target_bitrate = DataRate::KilobitsPerSec(1);
- helper.worker()->SendTask([&] { send_stream->OnBitrateUpdated(update); },
- RTC_FROM_HERE);
+ for (bool use_null_audio_processing : {false, true}) {
+ ConfigHelper helper(true, true, use_null_audio_processing);
+ auto send_stream = helper.CreateAudioSendStream();
+ EXPECT_CALL(
+ *helper.channel_send(),
+ OnBitrateAllocation(Field(&BitrateAllocationUpdate::target_bitrate,
+ Eq(DataRate::KilobitsPerSec(6)))));
+ BitrateAllocationUpdate update;
+ update.target_bitrate = DataRate::KilobitsPerSec(1);
+ helper.worker()->SendTask([&] { send_stream->OnBitrateUpdated(update); },
+ RTC_FROM_HERE);
+ }
}
TEST(AudioSendStreamTest, SSBweFieldTrialMaxRespected) {
ScopedFieldTrials field_trials(
"WebRTC-Audio-SendSideBwe/Enabled/"
"WebRTC-Audio-Allocation/min:6kbps,max:64kbps/");
- ConfigHelper helper(true, true);
- auto send_stream = helper.CreateAudioSendStream();
- EXPECT_CALL(
- *helper.channel_send(),
- OnBitrateAllocation(Field(&BitrateAllocationUpdate::target_bitrate,
- Eq(DataRate::KilobitsPerSec(64)))));
- BitrateAllocationUpdate update;
- update.target_bitrate = DataRate::KilobitsPerSec(128);
- helper.worker()->SendTask([&] { send_stream->OnBitrateUpdated(update); },
- RTC_FROM_HERE);
+ for (bool use_null_audio_processing : {false, true}) {
+ ConfigHelper helper(true, true, use_null_audio_processing);
+ auto send_stream = helper.CreateAudioSendStream();
+ EXPECT_CALL(
+ *helper.channel_send(),
+ OnBitrateAllocation(Field(&BitrateAllocationUpdate::target_bitrate,
+ Eq(DataRate::KilobitsPerSec(64)))));
+ BitrateAllocationUpdate update;
+ update.target_bitrate = DataRate::KilobitsPerSec(128);
+ helper.worker()->SendTask([&] { send_stream->OnBitrateUpdated(update); },
+ RTC_FROM_HERE);
+ }
}
TEST(AudioSendStreamTest, SSBweWithOverhead) {
@@ -617,19 +660,22 @@ TEST(AudioSendStreamTest, SSBweWithOverhead) {
"WebRTC-Audio-SendSideBwe/Enabled/"
"WebRTC-SendSideBwe-WithOverhead/Enabled/"
"WebRTC-Audio-LegacyOverhead/Disabled/");
- ConfigHelper helper(true, true);
- auto send_stream = helper.CreateAudioSendStream();
- EXPECT_CALL(*helper.channel_send(), CallEncoder(_)).Times(1);
- send_stream->OnOverheadChanged(kOverheadPerPacket.bytes<size_t>());
- const DataRate bitrate =
- DataRate::BitsPerSec(helper.config().max_bitrate_bps) + kMaxOverheadRate;
- EXPECT_CALL(*helper.channel_send(),
- OnBitrateAllocation(Field(
- &BitrateAllocationUpdate::target_bitrate, Eq(bitrate))));
- BitrateAllocationUpdate update;
- update.target_bitrate = bitrate;
- helper.worker()->SendTask([&] { send_stream->OnBitrateUpdated(update); },
- RTC_FROM_HERE);
+ for (bool use_null_audio_processing : {false, true}) {
+ ConfigHelper helper(true, true, use_null_audio_processing);
+ auto send_stream = helper.CreateAudioSendStream();
+ EXPECT_CALL(*helper.channel_send(), CallEncoder(_)).Times(1);
+ send_stream->OnOverheadChanged(kOverheadPerPacket.bytes<size_t>());
+ const DataRate bitrate =
+ DataRate::BitsPerSec(helper.config().max_bitrate_bps) +
+ kMaxOverheadRate;
+ EXPECT_CALL(*helper.channel_send(),
+ OnBitrateAllocation(Field(
+ &BitrateAllocationUpdate::target_bitrate, Eq(bitrate))));
+ BitrateAllocationUpdate update;
+ update.target_bitrate = bitrate;
+ helper.worker()->SendTask([&] { send_stream->OnBitrateUpdated(update); },
+ RTC_FROM_HERE);
+ }
}
TEST(AudioSendStreamTest, SSBweWithOverheadMinRespected) {
@@ -638,18 +684,20 @@ TEST(AudioSendStreamTest, SSBweWithOverheadMinRespected) {
"WebRTC-SendSideBwe-WithOverhead/Enabled/"
"WebRTC-Audio-LegacyOverhead/Disabled/"
"WebRTC-Audio-Allocation/min:6kbps,max:64kbps/");
- ConfigHelper helper(true, true);
- auto send_stream = helper.CreateAudioSendStream();
- EXPECT_CALL(*helper.channel_send(), CallEncoder(_)).Times(1);
- send_stream->OnOverheadChanged(kOverheadPerPacket.bytes<size_t>());
- const DataRate bitrate = DataRate::KilobitsPerSec(6) + kMinOverheadRate;
- EXPECT_CALL(*helper.channel_send(),
- OnBitrateAllocation(Field(
- &BitrateAllocationUpdate::target_bitrate, Eq(bitrate))));
- BitrateAllocationUpdate update;
- update.target_bitrate = DataRate::KilobitsPerSec(1);
- helper.worker()->SendTask([&] { send_stream->OnBitrateUpdated(update); },
- RTC_FROM_HERE);
+ for (bool use_null_audio_processing : {false, true}) {
+ ConfigHelper helper(true, true, use_null_audio_processing);
+ auto send_stream = helper.CreateAudioSendStream();
+ EXPECT_CALL(*helper.channel_send(), CallEncoder(_)).Times(1);
+ send_stream->OnOverheadChanged(kOverheadPerPacket.bytes<size_t>());
+ const DataRate bitrate = DataRate::KilobitsPerSec(6) + kMinOverheadRate;
+ EXPECT_CALL(*helper.channel_send(),
+ OnBitrateAllocation(Field(
+ &BitrateAllocationUpdate::target_bitrate, Eq(bitrate))));
+ BitrateAllocationUpdate update;
+ update.target_bitrate = DataRate::KilobitsPerSec(1);
+ helper.worker()->SendTask([&] { send_stream->OnBitrateUpdated(update); },
+ RTC_FROM_HERE);
+ }
}
TEST(AudioSendStreamTest, SSBweWithOverheadMaxRespected) {
@@ -658,152 +706,172 @@ TEST(AudioSendStreamTest, SSBweWithOverheadMaxRespected) {
"WebRTC-SendSideBwe-WithOverhead/Enabled/"
"WebRTC-Audio-LegacyOverhead/Disabled/"
"WebRTC-Audio-Allocation/min:6kbps,max:64kbps/");
- ConfigHelper helper(true, true);
- auto send_stream = helper.CreateAudioSendStream();
- EXPECT_CALL(*helper.channel_send(), CallEncoder(_)).Times(1);
- send_stream->OnOverheadChanged(kOverheadPerPacket.bytes<size_t>());
- const DataRate bitrate = DataRate::KilobitsPerSec(64) + kMaxOverheadRate;
- EXPECT_CALL(*helper.channel_send(),
- OnBitrateAllocation(Field(
- &BitrateAllocationUpdate::target_bitrate, Eq(bitrate))));
- BitrateAllocationUpdate update;
- update.target_bitrate = DataRate::KilobitsPerSec(128);
- helper.worker()->SendTask([&] { send_stream->OnBitrateUpdated(update); },
- RTC_FROM_HERE);
+ for (bool use_null_audio_processing : {false, true}) {
+ ConfigHelper helper(true, true, use_null_audio_processing);
+ auto send_stream = helper.CreateAudioSendStream();
+ EXPECT_CALL(*helper.channel_send(), CallEncoder(_)).Times(1);
+ send_stream->OnOverheadChanged(kOverheadPerPacket.bytes<size_t>());
+ const DataRate bitrate = DataRate::KilobitsPerSec(64) + kMaxOverheadRate;
+ EXPECT_CALL(*helper.channel_send(),
+ OnBitrateAllocation(Field(
+ &BitrateAllocationUpdate::target_bitrate, Eq(bitrate))));
+ BitrateAllocationUpdate update;
+ update.target_bitrate = DataRate::KilobitsPerSec(128);
+ helper.worker()->SendTask([&] { send_stream->OnBitrateUpdated(update); },
+ RTC_FROM_HERE);
+ }
}
TEST(AudioSendStreamTest, ProbingIntervalOnBitrateUpdated) {
- ConfigHelper helper(false, true);
- auto send_stream = helper.CreateAudioSendStream();
-
- EXPECT_CALL(*helper.channel_send(),
- OnBitrateAllocation(Field(&BitrateAllocationUpdate::bwe_period,
- Eq(TimeDelta::Millis(5000)))));
- BitrateAllocationUpdate update;
- update.target_bitrate =
- DataRate::BitsPerSec(helper.config().max_bitrate_bps + 5000);
- update.packet_loss_ratio = 0;
- update.round_trip_time = TimeDelta::Millis(50);
- update.bwe_period = TimeDelta::Millis(5000);
- helper.worker()->SendTask([&] { send_stream->OnBitrateUpdated(update); },
- RTC_FROM_HERE);
+ for (bool use_null_audio_processing : {false, true}) {
+ ConfigHelper helper(false, true, use_null_audio_processing);
+ auto send_stream = helper.CreateAudioSendStream();
+
+ EXPECT_CALL(*helper.channel_send(),
+ OnBitrateAllocation(Field(&BitrateAllocationUpdate::bwe_period,
+ Eq(TimeDelta::Millis(5000)))));
+ BitrateAllocationUpdate update;
+ update.target_bitrate =
+ DataRate::BitsPerSec(helper.config().max_bitrate_bps + 5000);
+ update.packet_loss_ratio = 0;
+ update.round_trip_time = TimeDelta::Millis(50);
+ update.bwe_period = TimeDelta::Millis(5000);
+ helper.worker()->SendTask([&] { send_stream->OnBitrateUpdated(update); },
+ RTC_FROM_HERE);
+ }
}
// Test that AudioSendStream doesn't recreate the encoder unnecessarily.
TEST(AudioSendStreamTest, DontRecreateEncoder) {
- ConfigHelper helper(false, false);
- // WillOnce is (currently) the default used by ConfigHelper if asked to set an
- // expectation for SetEncoder. Since this behavior is essential for this test
- // to be correct, it's instead set-up manually here. Otherwise a simple change
- // to ConfigHelper (say to WillRepeatedly) would silently make this test
- // useless.
- EXPECT_CALL(*helper.channel_send(), SetEncoderForMock(_, _))
- .WillOnce(Return());
-
- EXPECT_CALL(*helper.channel_send(), RegisterCngPayloadType(105, 8000));
-
- helper.config().send_codec_spec =
- AudioSendStream::Config::SendCodecSpec(9, kG722Format);
- helper.config().send_codec_spec->cng_payload_type = 105;
- auto send_stream = helper.CreateAudioSendStream();
- send_stream->Reconfigure(helper.config());
+ for (bool use_null_audio_processing : {false, true}) {
+ ConfigHelper helper(false, false, use_null_audio_processing);
+ // WillOnce is (currently) the default used by ConfigHelper if asked to set
+ // an expectation for SetEncoder. Since this behavior is essential for this
+ // test to be correct, it's instead set-up manually here. Otherwise a simple
+ // change to ConfigHelper (say to WillRepeatedly) would silently make this
+ // test useless.
+ EXPECT_CALL(*helper.channel_send(), SetEncoderForMock(_, _))
+ .WillOnce(Return());
+
+ EXPECT_CALL(*helper.channel_send(), RegisterCngPayloadType(105, 8000));
+
+ helper.config().send_codec_spec =
+ AudioSendStream::Config::SendCodecSpec(9, kG722Format);
+ helper.config().send_codec_spec->cng_payload_type = 105;
+ auto send_stream = helper.CreateAudioSendStream();
+ send_stream->Reconfigure(helper.config());
+ }
}
TEST(AudioSendStreamTest, ReconfigureTransportCcResetsFirst) {
ScopedFieldTrials field_trials("WebRTC-Audio-SendSideBwe/Enabled/");
- ConfigHelper helper(false, true);
- auto send_stream = helper.CreateAudioSendStream();
- auto new_config = helper.config();
- ConfigHelper::AddBweToConfig(&new_config);
-
- EXPECT_CALL(*helper.rtp_rtcp(),
- RegisterRtpHeaderExtension(TransportSequenceNumber::kUri,
- kTransportSequenceNumberId))
- .Times(1);
- {
- ::testing::InSequence seq;
- EXPECT_CALL(*helper.channel_send(), ResetSenderCongestionControlObjects())
- .Times(1);
- EXPECT_CALL(*helper.channel_send(), RegisterSenderCongestionControlObjects(
- helper.transport(), Ne(nullptr)))
+ for (bool use_null_audio_processing : {false, true}) {
+ ConfigHelper helper(false, true, use_null_audio_processing);
+ auto send_stream = helper.CreateAudioSendStream();
+ auto new_config = helper.config();
+ ConfigHelper::AddBweToConfig(&new_config);
+
+ EXPECT_CALL(*helper.rtp_rtcp(),
+ RegisterRtpHeaderExtension(TransportSequenceNumber::kUri,
+ kTransportSequenceNumberId))
.Times(1);
- }
+ {
+ ::testing::InSequence seq;
+ EXPECT_CALL(*helper.channel_send(), ResetSenderCongestionControlObjects())
+ .Times(1);
+ EXPECT_CALL(*helper.channel_send(),
+ RegisterSenderCongestionControlObjects(helper.transport(),
+ Ne(nullptr)))
+ .Times(1);
+ }
- send_stream->Reconfigure(new_config);
+ send_stream->Reconfigure(new_config);
+ }
}
TEST(AudioSendStreamTest, OnTransportOverheadChanged) {
- ConfigHelper helper(false, true);
- auto send_stream = helper.CreateAudioSendStream();
- auto new_config = helper.config();
+ for (bool use_null_audio_processing : {false, true}) {
+ ConfigHelper helper(false, true, use_null_audio_processing);
+ auto send_stream = helper.CreateAudioSendStream();
+ auto new_config = helper.config();
- // CallEncoder will be called on overhead change.
- EXPECT_CALL(*helper.channel_send(), CallEncoder(::testing::_)).Times(1);
+ // CallEncoder will be called on overhead change.
+ EXPECT_CALL(*helper.channel_send(), CallEncoder(::testing::_)).Times(1);
- const size_t transport_overhead_per_packet_bytes = 333;
- send_stream->SetTransportOverhead(transport_overhead_per_packet_bytes);
+ const size_t transport_overhead_per_packet_bytes = 333;
+ send_stream->SetTransportOverhead(transport_overhead_per_packet_bytes);
- EXPECT_EQ(transport_overhead_per_packet_bytes,
- send_stream->TestOnlyGetPerPacketOverheadBytes());
+ EXPECT_EQ(transport_overhead_per_packet_bytes,
+ send_stream->TestOnlyGetPerPacketOverheadBytes());
+ }
}
TEST(AudioSendStreamTest, OnAudioOverheadChanged) {
- ConfigHelper helper(false, true);
- auto send_stream = helper.CreateAudioSendStream();
- auto new_config = helper.config();
-
- // CallEncoder will be called on overhead change.
- EXPECT_CALL(*helper.channel_send(), CallEncoder(::testing::_)).Times(1);
-
- const size_t audio_overhead_per_packet_bytes = 555;
- send_stream->OnOverheadChanged(audio_overhead_per_packet_bytes);
- EXPECT_EQ(audio_overhead_per_packet_bytes,
- send_stream->TestOnlyGetPerPacketOverheadBytes());
+ for (bool use_null_audio_processing : {false, true}) {
+ ConfigHelper helper(false, true, use_null_audio_processing);
+ auto send_stream = helper.CreateAudioSendStream();
+ auto new_config = helper.config();
+
+ // CallEncoder will be called on overhead change.
+ EXPECT_CALL(*helper.channel_send(), CallEncoder(::testing::_)).Times(1);
+
+ const size_t audio_overhead_per_packet_bytes = 555;
+ send_stream->OnOverheadChanged(audio_overhead_per_packet_bytes);
+ EXPECT_EQ(audio_overhead_per_packet_bytes,
+ send_stream->TestOnlyGetPerPacketOverheadBytes());
+ }
}
TEST(AudioSendStreamTest, OnAudioAndTransportOverheadChanged) {
- ConfigHelper helper(false, true);
- auto send_stream = helper.CreateAudioSendStream();
- auto new_config = helper.config();
+ for (bool use_null_audio_processing : {false, true}) {
+ ConfigHelper helper(false, true, use_null_audio_processing);
+ auto send_stream = helper.CreateAudioSendStream();
+ auto new_config = helper.config();
- // CallEncoder will be called when each of overhead changes.
- EXPECT_CALL(*helper.channel_send(), CallEncoder(::testing::_)).Times(2);
+ // CallEncoder will be called when each of overhead changes.
+ EXPECT_CALL(*helper.channel_send(), CallEncoder(::testing::_)).Times(2);
- const size_t transport_overhead_per_packet_bytes = 333;
- send_stream->SetTransportOverhead(transport_overhead_per_packet_bytes);
+ const size_t transport_overhead_per_packet_bytes = 333;
+ send_stream->SetTransportOverhead(transport_overhead_per_packet_bytes);
- const size_t audio_overhead_per_packet_bytes = 555;
- send_stream->OnOverheadChanged(audio_overhead_per_packet_bytes);
+ const size_t audio_overhead_per_packet_bytes = 555;
+ send_stream->OnOverheadChanged(audio_overhead_per_packet_bytes);
- EXPECT_EQ(
- transport_overhead_per_packet_bytes + audio_overhead_per_packet_bytes,
- send_stream->TestOnlyGetPerPacketOverheadBytes());
+ EXPECT_EQ(
+ transport_overhead_per_packet_bytes + audio_overhead_per_packet_bytes,
+ send_stream->TestOnlyGetPerPacketOverheadBytes());
+ }
}
// Validates that reconfiguring the AudioSendStream with a Frame encryptor
// correctly reconfigures on the object without crashing.
TEST(AudioSendStreamTest, ReconfigureWithFrameEncryptor) {
- ConfigHelper helper(false, true);
- auto send_stream = helper.CreateAudioSendStream();
- auto new_config = helper.config();
-
- rtc::scoped_refptr<FrameEncryptorInterface> mock_frame_encryptor_0(
- new rtc::RefCountedObject<MockFrameEncryptor>());
- new_config.frame_encryptor = mock_frame_encryptor_0;
- EXPECT_CALL(*helper.channel_send(), SetFrameEncryptor(Ne(nullptr))).Times(1);
- send_stream->Reconfigure(new_config);
-
- // Not updating the frame encryptor shouldn't force it to reconfigure.
- EXPECT_CALL(*helper.channel_send(), SetFrameEncryptor(_)).Times(0);
- send_stream->Reconfigure(new_config);
-
- // Updating frame encryptor to a new object should force a call to the proxy.
- rtc::scoped_refptr<FrameEncryptorInterface> mock_frame_encryptor_1(
- new rtc::RefCountedObject<MockFrameEncryptor>());
- new_config.frame_encryptor = mock_frame_encryptor_1;
- new_config.crypto_options.sframe.require_frame_encryption = true;
- EXPECT_CALL(*helper.channel_send(), SetFrameEncryptor(Ne(nullptr))).Times(1);
- send_stream->Reconfigure(new_config);
+ for (bool use_null_audio_processing : {false, true}) {
+ ConfigHelper helper(false, true, use_null_audio_processing);
+ auto send_stream = helper.CreateAudioSendStream();
+ auto new_config = helper.config();
+
+ rtc::scoped_refptr<FrameEncryptorInterface> mock_frame_encryptor_0(
+ new rtc::RefCountedObject<MockFrameEncryptor>());
+ new_config.frame_encryptor = mock_frame_encryptor_0;
+ EXPECT_CALL(*helper.channel_send(), SetFrameEncryptor(Ne(nullptr)))
+ .Times(1);
+ send_stream->Reconfigure(new_config);
+
+ // Not updating the frame encryptor shouldn't force it to reconfigure.
+ EXPECT_CALL(*helper.channel_send(), SetFrameEncryptor(_)).Times(0);
+ send_stream->Reconfigure(new_config);
+
+ // Updating frame encryptor to a new object should force a call to the
+ // proxy.
+ rtc::scoped_refptr<FrameEncryptorInterface> mock_frame_encryptor_1(
+ new rtc::RefCountedObject<MockFrameEncryptor>());
+ new_config.frame_encryptor = mock_frame_encryptor_1;
+ new_config.crypto_options.sframe.require_frame_encryption = true;
+ EXPECT_CALL(*helper.channel_send(), SetFrameEncryptor(Ne(nullptr)))
+ .Times(1);
+ send_stream->Reconfigure(new_config);
+ }
}
} // namespace test
} // namespace webrtc
diff --git a/audio/audio_state.cc b/audio/audio_state.cc
index 1a4fd77ed2..73366e20a8 100644
--- a/audio/audio_state.cc
+++ b/audio/audio_state.cc
@@ -41,7 +41,6 @@ AudioState::~AudioState() {
}
AudioProcessing* AudioState::audio_processing() {
- RTC_DCHECK(config_.audio_processing);
return config_.audio_processing.get();
}
diff --git a/audio/audio_state_unittest.cc b/audio/audio_state_unittest.cc
index 2a1018c120..76e08c549c 100644
--- a/audio/audio_state_unittest.cc
+++ b/audio/audio_state_unittest.cc
@@ -31,10 +31,14 @@ constexpr int kSampleRate = 16000;
constexpr int kNumberOfChannels = 1;
struct ConfigHelper {
- ConfigHelper() : audio_mixer(AudioMixerImpl::Create()) {
+ explicit ConfigHelper(bool use_null_audio_processing)
+ : audio_mixer(AudioMixerImpl::Create()) {
audio_state_config.audio_mixer = audio_mixer;
audio_state_config.audio_processing =
- new rtc::RefCountedObject<testing::NiceMock<MockAudioProcessing>>();
+ use_null_audio_processing
+ ? nullptr
+ : new rtc::RefCountedObject<
+ testing::NiceMock<MockAudioProcessing>>();
audio_state_config.audio_device_module =
new rtc::RefCountedObject<MockAudioDeviceModule>();
}
@@ -88,162 +92,183 @@ std::vector<uint32_t> ComputeChannelLevels(AudioFrame* audio_frame) {
} // namespace
TEST(AudioStateTest, Create) {
- ConfigHelper helper;
- auto audio_state = AudioState::Create(helper.config());
- EXPECT_TRUE(audio_state.get());
+ for (bool use_null_audio_processing : {false, true}) {
+ ConfigHelper helper(use_null_audio_processing);
+ auto audio_state = AudioState::Create(helper.config());
+ EXPECT_TRUE(audio_state.get());
+ }
}
TEST(AudioStateTest, ConstructDestruct) {
- ConfigHelper helper;
- rtc::scoped_refptr<internal::AudioState> audio_state(
- new rtc::RefCountedObject<internal::AudioState>(helper.config()));
+ for (bool use_null_audio_processing : {false, true}) {
+ ConfigHelper helper(use_null_audio_processing);
+ rtc::scoped_refptr<internal::AudioState> audio_state(
+ new rtc::RefCountedObject<internal::AudioState>(helper.config()));
+ }
}
TEST(AudioStateTest, RecordedAudioArrivesAtSingleStream) {
- ConfigHelper helper;
- rtc::scoped_refptr<internal::AudioState> audio_state(
- new rtc::RefCountedObject<internal::AudioState>(helper.config()));
-
- MockAudioSendStream stream;
- audio_state->AddSendingStream(&stream, 8000, 2);
-
- EXPECT_CALL(
- stream,
- SendAudioDataForMock(::testing::AllOf(
- ::testing::Field(&AudioFrame::sample_rate_hz_, ::testing::Eq(8000)),
- ::testing::Field(&AudioFrame::num_channels_, ::testing::Eq(2u)))))
- .WillOnce(
- // Verify that channels are not swapped by default.
- ::testing::Invoke([](AudioFrame* audio_frame) {
- auto levels = ComputeChannelLevels(audio_frame);
- EXPECT_LT(0u, levels[0]);
- EXPECT_EQ(0u, levels[1]);
- }));
- MockAudioProcessing* ap =
- static_cast<MockAudioProcessing*>(audio_state->audio_processing());
- EXPECT_CALL(*ap, set_stream_delay_ms(0));
- EXPECT_CALL(*ap, set_stream_key_pressed(false));
- EXPECT_CALL(*ap, ProcessStream(_, _, _, Matcher<int16_t*>(_)));
+ for (bool use_null_audio_processing : {false, true}) {
+ ConfigHelper helper(use_null_audio_processing);
+ rtc::scoped_refptr<internal::AudioState> audio_state(
+ new rtc::RefCountedObject<internal::AudioState>(helper.config()));
+
+ MockAudioSendStream stream;
+ audio_state->AddSendingStream(&stream, 8000, 2);
+
+ EXPECT_CALL(
+ stream,
+ SendAudioDataForMock(::testing::AllOf(
+ ::testing::Field(&AudioFrame::sample_rate_hz_, ::testing::Eq(8000)),
+ ::testing::Field(&AudioFrame::num_channels_, ::testing::Eq(2u)))))
+ .WillOnce(
+ // Verify that channels are not swapped by default.
+ ::testing::Invoke([](AudioFrame* audio_frame) {
+ auto levels = ComputeChannelLevels(audio_frame);
+ EXPECT_LT(0u, levels[0]);
+ EXPECT_EQ(0u, levels[1]);
+ }));
+ MockAudioProcessing* ap = use_null_audio_processing
+ ? nullptr
+ : static_cast<MockAudioProcessing*>(
+ audio_state->audio_processing());
+ if (ap) {
+ EXPECT_CALL(*ap, set_stream_delay_ms(0));
+ EXPECT_CALL(*ap, set_stream_key_pressed(false));
+ EXPECT_CALL(*ap, ProcessStream(_, _, _, Matcher<int16_t*>(_)));
+ }
- constexpr int kSampleRate = 16000;
- constexpr size_t kNumChannels = 2;
- auto audio_data = Create10msTestData(kSampleRate, kNumChannels);
- uint32_t new_mic_level = 667;
- audio_state->audio_transport()->RecordedDataIsAvailable(
- &audio_data[0], kSampleRate / 100, kNumChannels * 2, kNumChannels,
- kSampleRate, 0, 0, 0, false, new_mic_level);
- EXPECT_EQ(667u, new_mic_level);
-
- audio_state->RemoveSendingStream(&stream);
+ constexpr int kSampleRate = 16000;
+ constexpr size_t kNumChannels = 2;
+ auto audio_data = Create10msTestData(kSampleRate, kNumChannels);
+ uint32_t new_mic_level = 667;
+ audio_state->audio_transport()->RecordedDataIsAvailable(
+ &audio_data[0], kSampleRate / 100, kNumChannels * 2, kNumChannels,
+ kSampleRate, 0, 0, 0, false, new_mic_level);
+ EXPECT_EQ(667u, new_mic_level);
+
+ audio_state->RemoveSendingStream(&stream);
+ }
}
TEST(AudioStateTest, RecordedAudioArrivesAtMultipleStreams) {
- ConfigHelper helper;
- rtc::scoped_refptr<internal::AudioState> audio_state(
- new rtc::RefCountedObject<internal::AudioState>(helper.config()));
-
- MockAudioSendStream stream_1;
- MockAudioSendStream stream_2;
- audio_state->AddSendingStream(&stream_1, 8001, 2);
- audio_state->AddSendingStream(&stream_2, 32000, 1);
-
- EXPECT_CALL(
- stream_1,
- SendAudioDataForMock(::testing::AllOf(
- ::testing::Field(&AudioFrame::sample_rate_hz_, ::testing::Eq(16000)),
- ::testing::Field(&AudioFrame::num_channels_, ::testing::Eq(1u)))))
- .WillOnce(
- // Verify that there is output signal.
- ::testing::Invoke([](AudioFrame* audio_frame) {
- auto levels = ComputeChannelLevels(audio_frame);
- EXPECT_LT(0u, levels[0]);
- }));
- EXPECT_CALL(
- stream_2,
- SendAudioDataForMock(::testing::AllOf(
- ::testing::Field(&AudioFrame::sample_rate_hz_, ::testing::Eq(16000)),
- ::testing::Field(&AudioFrame::num_channels_, ::testing::Eq(1u)))))
- .WillOnce(
- // Verify that there is output signal.
- ::testing::Invoke([](AudioFrame* audio_frame) {
- auto levels = ComputeChannelLevels(audio_frame);
- EXPECT_LT(0u, levels[0]);
- }));
- MockAudioProcessing* ap =
- static_cast<MockAudioProcessing*>(audio_state->audio_processing());
- EXPECT_CALL(*ap, set_stream_delay_ms(5));
- EXPECT_CALL(*ap, set_stream_key_pressed(true));
- EXPECT_CALL(*ap, ProcessStream(_, _, _, Matcher<int16_t*>(_)));
+ for (bool use_null_audio_processing : {false, true}) {
+ ConfigHelper helper(use_null_audio_processing);
+ rtc::scoped_refptr<internal::AudioState> audio_state(
+ new rtc::RefCountedObject<internal::AudioState>(helper.config()));
+
+ MockAudioSendStream stream_1;
+ MockAudioSendStream stream_2;
+ audio_state->AddSendingStream(&stream_1, 8001, 2);
+ audio_state->AddSendingStream(&stream_2, 32000, 1);
+
+ EXPECT_CALL(
+ stream_1,
+ SendAudioDataForMock(::testing::AllOf(
+ ::testing::Field(&AudioFrame::sample_rate_hz_,
+ ::testing::Eq(16000)),
+ ::testing::Field(&AudioFrame::num_channels_, ::testing::Eq(1u)))))
+ .WillOnce(
+ // Verify that there is output signal.
+ ::testing::Invoke([](AudioFrame* audio_frame) {
+ auto levels = ComputeChannelLevels(audio_frame);
+ EXPECT_LT(0u, levels[0]);
+ }));
+ EXPECT_CALL(
+ stream_2,
+ SendAudioDataForMock(::testing::AllOf(
+ ::testing::Field(&AudioFrame::sample_rate_hz_,
+ ::testing::Eq(16000)),
+ ::testing::Field(&AudioFrame::num_channels_, ::testing::Eq(1u)))))
+ .WillOnce(
+ // Verify that there is output signal.
+ ::testing::Invoke([](AudioFrame* audio_frame) {
+ auto levels = ComputeChannelLevels(audio_frame);
+ EXPECT_LT(0u, levels[0]);
+ }));
+ MockAudioProcessing* ap =
+ static_cast<MockAudioProcessing*>(audio_state->audio_processing());
+ if (ap) {
+ EXPECT_CALL(*ap, set_stream_delay_ms(5));
+ EXPECT_CALL(*ap, set_stream_key_pressed(true));
+ EXPECT_CALL(*ap, ProcessStream(_, _, _, Matcher<int16_t*>(_)));
+ }
- constexpr int kSampleRate = 16000;
- constexpr size_t kNumChannels = 1;
- auto audio_data = Create10msTestData(kSampleRate, kNumChannels);
- uint32_t new_mic_level = 667;
- audio_state->audio_transport()->RecordedDataIsAvailable(
- &audio_data[0], kSampleRate / 100, kNumChannels * 2, kNumChannels,
- kSampleRate, 5, 0, 0, true, new_mic_level);
- EXPECT_EQ(667u, new_mic_level);
-
- audio_state->RemoveSendingStream(&stream_1);
- audio_state->RemoveSendingStream(&stream_2);
+ constexpr int kSampleRate = 16000;
+ constexpr size_t kNumChannels = 1;
+ auto audio_data = Create10msTestData(kSampleRate, kNumChannels);
+ uint32_t new_mic_level = 667;
+ audio_state->audio_transport()->RecordedDataIsAvailable(
+ &audio_data[0], kSampleRate / 100, kNumChannels * 2, kNumChannels,
+ kSampleRate, 5, 0, 0, true, new_mic_level);
+ EXPECT_EQ(667u, new_mic_level);
+
+ audio_state->RemoveSendingStream(&stream_1);
+ audio_state->RemoveSendingStream(&stream_2);
+ }
}
TEST(AudioStateTest, EnableChannelSwap) {
constexpr int kSampleRate = 16000;
constexpr size_t kNumChannels = 2;
- ConfigHelper helper;
- rtc::scoped_refptr<internal::AudioState> audio_state(
- new rtc::RefCountedObject<internal::AudioState>(helper.config()));
-
- audio_state->SetStereoChannelSwapping(true);
-
- MockAudioSendStream stream;
- audio_state->AddSendingStream(&stream, kSampleRate, kNumChannels);
-
- EXPECT_CALL(stream, SendAudioDataForMock(_))
- .WillOnce(
- // Verify that channels are swapped.
- ::testing::Invoke([](AudioFrame* audio_frame) {
- auto levels = ComputeChannelLevels(audio_frame);
- EXPECT_EQ(0u, levels[0]);
- EXPECT_LT(0u, levels[1]);
- }));
-
- auto audio_data = Create10msTestData(kSampleRate, kNumChannels);
- uint32_t new_mic_level = 667;
- audio_state->audio_transport()->RecordedDataIsAvailable(
- &audio_data[0], kSampleRate / 100, kNumChannels * 2, kNumChannels,
- kSampleRate, 0, 0, 0, false, new_mic_level);
- EXPECT_EQ(667u, new_mic_level);
-
- audio_state->RemoveSendingStream(&stream);
+ for (bool use_null_audio_processing : {false, true}) {
+ ConfigHelper helper(use_null_audio_processing);
+ rtc::scoped_refptr<internal::AudioState> audio_state(
+ new rtc::RefCountedObject<internal::AudioState>(helper.config()));
+
+ audio_state->SetStereoChannelSwapping(true);
+
+ MockAudioSendStream stream;
+ audio_state->AddSendingStream(&stream, kSampleRate, kNumChannels);
+
+ EXPECT_CALL(stream, SendAudioDataForMock(_))
+ .WillOnce(
+ // Verify that channels are swapped.
+ ::testing::Invoke([](AudioFrame* audio_frame) {
+ auto levels = ComputeChannelLevels(audio_frame);
+ EXPECT_EQ(0u, levels[0]);
+ EXPECT_LT(0u, levels[1]);
+ }));
+
+ auto audio_data = Create10msTestData(kSampleRate, kNumChannels);
+ uint32_t new_mic_level = 667;
+ audio_state->audio_transport()->RecordedDataIsAvailable(
+ &audio_data[0], kSampleRate / 100, kNumChannels * 2, kNumChannels,
+ kSampleRate, 0, 0, 0, false, new_mic_level);
+ EXPECT_EQ(667u, new_mic_level);
+
+ audio_state->RemoveSendingStream(&stream);
+ }
}
TEST(AudioStateTest,
QueryingTransportForAudioShouldResultInGetAudioCallOnMixerSource) {
- ConfigHelper helper;
- auto audio_state = AudioState::Create(helper.config());
-
- FakeAudioSource fake_source;
- helper.mixer()->AddSource(&fake_source);
-
- EXPECT_CALL(fake_source, GetAudioFrameWithInfo(_, _))
- .WillOnce(
- ::testing::Invoke([](int sample_rate_hz, AudioFrame* audio_frame) {
- audio_frame->sample_rate_hz_ = sample_rate_hz;
- audio_frame->samples_per_channel_ = sample_rate_hz / 100;
- audio_frame->num_channels_ = kNumberOfChannels;
- return AudioMixer::Source::AudioFrameInfo::kNormal;
- }));
-
- int16_t audio_buffer[kSampleRate / 100 * kNumberOfChannels];
- size_t n_samples_out;
- int64_t elapsed_time_ms;
- int64_t ntp_time_ms;
- audio_state->audio_transport()->NeedMorePlayData(
- kSampleRate / 100, kNumberOfChannels * 2, kNumberOfChannels, kSampleRate,
- audio_buffer, n_samples_out, &elapsed_time_ms, &ntp_time_ms);
+ for (bool use_null_audio_processing : {false, true}) {
+ ConfigHelper helper(use_null_audio_processing);
+ auto audio_state = AudioState::Create(helper.config());
+
+ FakeAudioSource fake_source;
+ helper.mixer()->AddSource(&fake_source);
+
+ EXPECT_CALL(fake_source, GetAudioFrameWithInfo(_, _))
+ .WillOnce(
+ ::testing::Invoke([](int sample_rate_hz, AudioFrame* audio_frame) {
+ audio_frame->sample_rate_hz_ = sample_rate_hz;
+ audio_frame->samples_per_channel_ = sample_rate_hz / 100;
+ audio_frame->num_channels_ = kNumberOfChannels;
+ return AudioMixer::Source::AudioFrameInfo::kNormal;
+ }));
+
+ int16_t audio_buffer[kSampleRate / 100 * kNumberOfChannels];
+ size_t n_samples_out;
+ int64_t elapsed_time_ms;
+ int64_t ntp_time_ms;
+ audio_state->audio_transport()->NeedMorePlayData(
+ kSampleRate / 100, kNumberOfChannels * 2, kNumberOfChannels,
+ kSampleRate, audio_buffer, n_samples_out, &elapsed_time_ms,
+ &ntp_time_ms);
+ }
}
} // namespace test
} // namespace webrtc
diff --git a/audio/audio_transport_impl.cc b/audio/audio_transport_impl.cc
index a61ea73102..7648fb948f 100644
--- a/audio/audio_transport_impl.cc
+++ b/audio/audio_transport_impl.cc
@@ -49,13 +49,15 @@ void ProcessCaptureFrame(uint32_t delay_ms,
bool swap_stereo_channels,
AudioProcessing* audio_processing,
AudioFrame* audio_frame) {
- RTC_DCHECK(audio_processing);
RTC_DCHECK(audio_frame);
- audio_processing->set_stream_delay_ms(delay_ms);
- audio_processing->set_stream_key_pressed(key_pressed);
- int error = ProcessAudioFrame(audio_processing, audio_frame);
+ if (audio_processing) {
+ audio_processing->set_stream_delay_ms(delay_ms);
+ audio_processing->set_stream_key_pressed(key_pressed);
+ int error = ProcessAudioFrame(audio_processing, audio_frame);
+
+ RTC_DCHECK_EQ(0, error) << "ProcessStream() error: " << error;
+ }
- RTC_DCHECK_EQ(0, error) << "ProcessStream() error: " << error;
if (swap_stereo_channels) {
AudioFrameOperations::SwapStereoChannels(audio_frame);
}
@@ -85,7 +87,6 @@ AudioTransportImpl::AudioTransportImpl(AudioMixer* mixer,
AudioProcessing* audio_processing)
: audio_processing_(audio_processing), mixer_(mixer) {
RTC_DCHECK(mixer);
- RTC_DCHECK(audio_processing);
}
AudioTransportImpl::~AudioTransportImpl() {}
@@ -137,7 +138,8 @@ int32_t AudioTransportImpl::RecordedDataIsAvailable(
// if we're using this feature or not.
// TODO(solenberg): GetConfig() takes a lock. Work around that.
bool typing_detected = false;
- if (audio_processing_->GetConfig().voice_detection.enabled) {
+ if (audio_processing_ &&
+ audio_processing_->GetConfig().voice_detection.enabled) {
if (audio_frame->vad_activity_ != AudioFrame::kVadUnknown) {
bool vad_active = audio_frame->vad_activity_ == AudioFrame::kVadActive;
typing_detected = typing_detection_.Process(key_pressed, vad_active);
@@ -192,8 +194,11 @@ int32_t AudioTransportImpl::NeedMorePlayData(const size_t nSamples,
*elapsed_time_ms = mixed_frame_.elapsed_time_ms_;
*ntp_time_ms = mixed_frame_.ntp_time_ms_;
- const auto error = ProcessReverseAudioFrame(audio_processing_, &mixed_frame_);
- RTC_DCHECK_EQ(error, AudioProcessing::kNoError);
+ if (audio_processing_) {
+ const auto error =
+ ProcessReverseAudioFrame(audio_processing_, &mixed_frame_);
+ RTC_DCHECK_EQ(error, AudioProcessing::kNoError);
+ }
nSamplesOut = Resample(mixed_frame_, samplesPerSec, &render_resampler_,
static_cast<int16_t*>(audioSamples));
diff --git a/call/call_unittest.cc b/call/call_unittest.cc
index a8cf534f90..cf727d4044 100644
--- a/call/call_unittest.cc
+++ b/call/call_unittest.cc
@@ -35,13 +35,15 @@
namespace {
struct CallHelper {
- CallHelper() {
+ explicit CallHelper(bool use_null_audio_processing) {
task_queue_factory_ = webrtc::CreateDefaultTaskQueueFactory();
webrtc::AudioState::Config audio_state_config;
audio_state_config.audio_mixer =
new rtc::RefCountedObject<webrtc::test::MockAudioMixer>();
audio_state_config.audio_processing =
- new rtc::RefCountedObject<webrtc::test::MockAudioProcessing>();
+ use_null_audio_processing
+ ? nullptr
+ : new rtc::RefCountedObject<webrtc::test::MockAudioProcessing>();
audio_state_config.audio_device_module =
new rtc::RefCountedObject<webrtc::test::MockAudioDeviceModule>();
webrtc::Call::Config config(&event_log_);
@@ -64,236 +66,261 @@ struct CallHelper {
namespace webrtc {
TEST(CallTest, ConstructDestruct) {
- CallHelper call;
+ for (bool use_null_audio_processing : {false, true}) {
+ CallHelper call(use_null_audio_processing);
+ }
}
TEST(CallTest, CreateDestroy_AudioSendStream) {
- CallHelper call;
- MockTransport send_transport;
- AudioSendStream::Config config(&send_transport);
- config.rtp.ssrc = 42;
- AudioSendStream* stream = call->CreateAudioSendStream(config);
- EXPECT_NE(stream, nullptr);
- call->DestroyAudioSendStream(stream);
+ for (bool use_null_audio_processing : {false, true}) {
+ CallHelper call(use_null_audio_processing);
+ MockTransport send_transport;
+ AudioSendStream::Config config(&send_transport);
+ config.rtp.ssrc = 42;
+ AudioSendStream* stream = call->CreateAudioSendStream(config);
+ EXPECT_NE(stream, nullptr);
+ call->DestroyAudioSendStream(stream);
+ }
}
TEST(CallTest, CreateDestroy_AudioReceiveStream) {
- CallHelper call;
- AudioReceiveStream::Config config;
- MockTransport rtcp_send_transport;
- config.rtp.remote_ssrc = 42;
- config.rtcp_send_transport = &rtcp_send_transport;
- config.decoder_factory =
- new rtc::RefCountedObject<webrtc::MockAudioDecoderFactory>();
- AudioReceiveStream* stream = call->CreateAudioReceiveStream(config);
- EXPECT_NE(stream, nullptr);
- call->DestroyAudioReceiveStream(stream);
+ for (bool use_null_audio_processing : {false, true}) {
+ CallHelper call(use_null_audio_processing);
+ AudioReceiveStream::Config config;
+ MockTransport rtcp_send_transport;
+ config.rtp.remote_ssrc = 42;
+ config.rtcp_send_transport = &rtcp_send_transport;
+ config.decoder_factory =
+ new rtc::RefCountedObject<webrtc::MockAudioDecoderFactory>();
+ AudioReceiveStream* stream = call->CreateAudioReceiveStream(config);
+ EXPECT_NE(stream, nullptr);
+ call->DestroyAudioReceiveStream(stream);
+ }
}
TEST(CallTest, CreateDestroy_AudioSendStreams) {
- CallHelper call;
- MockTransport send_transport;
- AudioSendStream::Config config(&send_transport);
- std::list<AudioSendStream*> streams;
- for (int i = 0; i < 2; ++i) {
- for (uint32_t ssrc = 0; ssrc < 1234567; ssrc += 34567) {
- config.rtp.ssrc = ssrc;
- AudioSendStream* stream = call->CreateAudioSendStream(config);
- EXPECT_NE(stream, nullptr);
- if (ssrc & 1) {
- streams.push_back(stream);
- } else {
- streams.push_front(stream);
+ for (bool use_null_audio_processing : {false, true}) {
+ CallHelper call(use_null_audio_processing);
+ MockTransport send_transport;
+ AudioSendStream::Config config(&send_transport);
+ std::list<AudioSendStream*> streams;
+ for (int i = 0; i < 2; ++i) {
+ for (uint32_t ssrc = 0; ssrc < 1234567; ssrc += 34567) {
+ config.rtp.ssrc = ssrc;
+ AudioSendStream* stream = call->CreateAudioSendStream(config);
+ EXPECT_NE(stream, nullptr);
+ if (ssrc & 1) {
+ streams.push_back(stream);
+ } else {
+ streams.push_front(stream);
+ }
}
+ for (auto s : streams) {
+ call->DestroyAudioSendStream(s);
+ }
+ streams.clear();
}
- for (auto s : streams) {
- call->DestroyAudioSendStream(s);
- }
- streams.clear();
}
}
TEST(CallTest, CreateDestroy_AudioReceiveStreams) {
- CallHelper call;
- AudioReceiveStream::Config config;
- MockTransport rtcp_send_transport;
- config.rtcp_send_transport = &rtcp_send_transport;
- config.decoder_factory =
- new rtc::RefCountedObject<webrtc::MockAudioDecoderFactory>();
- std::list<AudioReceiveStream*> streams;
- for (int i = 0; i < 2; ++i) {
- for (uint32_t ssrc = 0; ssrc < 1234567; ssrc += 34567) {
- config.rtp.remote_ssrc = ssrc;
- AudioReceiveStream* stream = call->CreateAudioReceiveStream(config);
- EXPECT_NE(stream, nullptr);
- if (ssrc & 1) {
- streams.push_back(stream);
- } else {
- streams.push_front(stream);
+ for (bool use_null_audio_processing : {false, true}) {
+ CallHelper call(use_null_audio_processing);
+ AudioReceiveStream::Config config;
+ MockTransport rtcp_send_transport;
+ config.rtcp_send_transport = &rtcp_send_transport;
+ config.decoder_factory =
+ new rtc::RefCountedObject<webrtc::MockAudioDecoderFactory>();
+ std::list<AudioReceiveStream*> streams;
+ for (int i = 0; i < 2; ++i) {
+ for (uint32_t ssrc = 0; ssrc < 1234567; ssrc += 34567) {
+ config.rtp.remote_ssrc = ssrc;
+ AudioReceiveStream* stream = call->CreateAudioReceiveStream(config);
+ EXPECT_NE(stream, nullptr);
+ if (ssrc & 1) {
+ streams.push_back(stream);
+ } else {
+ streams.push_front(stream);
+ }
}
+ for (auto s : streams) {
+ call->DestroyAudioReceiveStream(s);
+ }
+ streams.clear();
}
- for (auto s : streams) {
- call->DestroyAudioReceiveStream(s);
- }
- streams.clear();
}
}
TEST(CallTest, CreateDestroy_AssociateAudioSendReceiveStreams_RecvFirst) {
- CallHelper call;
- AudioReceiveStream::Config recv_config;
- MockTransport rtcp_send_transport;
- recv_config.rtp.remote_ssrc = 42;
- recv_config.rtp.local_ssrc = 777;
- recv_config.rtcp_send_transport = &rtcp_send_transport;
- recv_config.decoder_factory =
- new rtc::RefCountedObject<webrtc::MockAudioDecoderFactory>();
- AudioReceiveStream* recv_stream = call->CreateAudioReceiveStream(recv_config);
- EXPECT_NE(recv_stream, nullptr);
-
- MockTransport send_transport;
- AudioSendStream::Config send_config(&send_transport);
- send_config.rtp.ssrc = 777;
- AudioSendStream* send_stream = call->CreateAudioSendStream(send_config);
- EXPECT_NE(send_stream, nullptr);
-
- internal::AudioReceiveStream* internal_recv_stream =
- static_cast<internal::AudioReceiveStream*>(recv_stream);
- EXPECT_EQ(send_stream,
- internal_recv_stream->GetAssociatedSendStreamForTesting());
-
- call->DestroyAudioSendStream(send_stream);
- EXPECT_EQ(nullptr, internal_recv_stream->GetAssociatedSendStreamForTesting());
-
- call->DestroyAudioReceiveStream(recv_stream);
+ for (bool use_null_audio_processing : {false, true}) {
+ CallHelper call(use_null_audio_processing);
+ AudioReceiveStream::Config recv_config;
+ MockTransport rtcp_send_transport;
+ recv_config.rtp.remote_ssrc = 42;
+ recv_config.rtp.local_ssrc = 777;
+ recv_config.rtcp_send_transport = &rtcp_send_transport;
+ recv_config.decoder_factory =
+ new rtc::RefCountedObject<webrtc::MockAudioDecoderFactory>();
+ AudioReceiveStream* recv_stream =
+ call->CreateAudioReceiveStream(recv_config);
+ EXPECT_NE(recv_stream, nullptr);
+
+ MockTransport send_transport;
+ AudioSendStream::Config send_config(&send_transport);
+ send_config.rtp.ssrc = 777;
+ AudioSendStream* send_stream = call->CreateAudioSendStream(send_config);
+ EXPECT_NE(send_stream, nullptr);
+
+ internal::AudioReceiveStream* internal_recv_stream =
+ static_cast<internal::AudioReceiveStream*>(recv_stream);
+ EXPECT_EQ(send_stream,
+ internal_recv_stream->GetAssociatedSendStreamForTesting());
+
+ call->DestroyAudioSendStream(send_stream);
+ EXPECT_EQ(nullptr,
+ internal_recv_stream->GetAssociatedSendStreamForTesting());
+
+ call->DestroyAudioReceiveStream(recv_stream);
+ }
}
TEST(CallTest, CreateDestroy_AssociateAudioSendReceiveStreams_SendFirst) {
- CallHelper call;
- MockTransport send_transport;
- AudioSendStream::Config send_config(&send_transport);
- send_config.rtp.ssrc = 777;
- AudioSendStream* send_stream = call->CreateAudioSendStream(send_config);
- EXPECT_NE(send_stream, nullptr);
-
- AudioReceiveStream::Config recv_config;
- MockTransport rtcp_send_transport;
- recv_config.rtp.remote_ssrc = 42;
- recv_config.rtp.local_ssrc = 777;
- recv_config.rtcp_send_transport = &rtcp_send_transport;
- recv_config.decoder_factory =
- new rtc::RefCountedObject<webrtc::MockAudioDecoderFactory>();
- AudioReceiveStream* recv_stream = call->CreateAudioReceiveStream(recv_config);
- EXPECT_NE(recv_stream, nullptr);
-
- internal::AudioReceiveStream* internal_recv_stream =
- static_cast<internal::AudioReceiveStream*>(recv_stream);
- EXPECT_EQ(send_stream,
- internal_recv_stream->GetAssociatedSendStreamForTesting());
-
- call->DestroyAudioReceiveStream(recv_stream);
-
- call->DestroyAudioSendStream(send_stream);
+ for (bool use_null_audio_processing : {false, true}) {
+ CallHelper call(use_null_audio_processing);
+ MockTransport send_transport;
+ AudioSendStream::Config send_config(&send_transport);
+ send_config.rtp.ssrc = 777;
+ AudioSendStream* send_stream = call->CreateAudioSendStream(send_config);
+ EXPECT_NE(send_stream, nullptr);
+
+ AudioReceiveStream::Config recv_config;
+ MockTransport rtcp_send_transport;
+ recv_config.rtp.remote_ssrc = 42;
+ recv_config.rtp.local_ssrc = 777;
+ recv_config.rtcp_send_transport = &rtcp_send_transport;
+ recv_config.decoder_factory =
+ new rtc::RefCountedObject<webrtc::MockAudioDecoderFactory>();
+ AudioReceiveStream* recv_stream =
+ call->CreateAudioReceiveStream(recv_config);
+ EXPECT_NE(recv_stream, nullptr);
+
+ internal::AudioReceiveStream* internal_recv_stream =
+ static_cast<internal::AudioReceiveStream*>(recv_stream);
+ EXPECT_EQ(send_stream,
+ internal_recv_stream->GetAssociatedSendStreamForTesting());
+
+ call->DestroyAudioReceiveStream(recv_stream);
+
+ call->DestroyAudioSendStream(send_stream);
+ }
}
TEST(CallTest, CreateDestroy_FlexfecReceiveStream) {
- CallHelper call;
- MockTransport rtcp_send_transport;
- FlexfecReceiveStream::Config config(&rtcp_send_transport);
- config.payload_type = 118;
- config.remote_ssrc = 38837212;
- config.protected_media_ssrcs = {27273};
-
- FlexfecReceiveStream* stream = call->CreateFlexfecReceiveStream(config);
- EXPECT_NE(stream, nullptr);
- call->DestroyFlexfecReceiveStream(stream);
+ for (bool use_null_audio_processing : {false, true}) {
+ CallHelper call(use_null_audio_processing);
+ MockTransport rtcp_send_transport;
+ FlexfecReceiveStream::Config config(&rtcp_send_transport);
+ config.payload_type = 118;
+ config.remote_ssrc = 38837212;
+ config.protected_media_ssrcs = {27273};
+
+ FlexfecReceiveStream* stream = call->CreateFlexfecReceiveStream(config);
+ EXPECT_NE(stream, nullptr);
+ call->DestroyFlexfecReceiveStream(stream);
+ }
}
TEST(CallTest, CreateDestroy_FlexfecReceiveStreams) {
- CallHelper call;
- MockTransport rtcp_send_transport;
- FlexfecReceiveStream::Config config(&rtcp_send_transport);
- config.payload_type = 118;
- std::list<FlexfecReceiveStream*> streams;
-
- for (int i = 0; i < 2; ++i) {
- for (uint32_t ssrc = 0; ssrc < 1234567; ssrc += 34567) {
- config.remote_ssrc = ssrc;
- config.protected_media_ssrcs = {ssrc + 1};
- FlexfecReceiveStream* stream = call->CreateFlexfecReceiveStream(config);
- EXPECT_NE(stream, nullptr);
- if (ssrc & 1) {
- streams.push_back(stream);
- } else {
- streams.push_front(stream);
+ for (bool use_null_audio_processing : {false, true}) {
+ CallHelper call(use_null_audio_processing);
+ MockTransport rtcp_send_transport;
+ FlexfecReceiveStream::Config config(&rtcp_send_transport);
+ config.payload_type = 118;
+ std::list<FlexfecReceiveStream*> streams;
+
+ for (int i = 0; i < 2; ++i) {
+ for (uint32_t ssrc = 0; ssrc < 1234567; ssrc += 34567) {
+ config.remote_ssrc = ssrc;
+ config.protected_media_ssrcs = {ssrc + 1};
+ FlexfecReceiveStream* stream = call->CreateFlexfecReceiveStream(config);
+ EXPECT_NE(stream, nullptr);
+ if (ssrc & 1) {
+ streams.push_back(stream);
+ } else {
+ streams.push_front(stream);
+ }
}
+ for (auto s : streams) {
+ call->DestroyFlexfecReceiveStream(s);
+ }
+ streams.clear();
}
- for (auto s : streams) {
- call->DestroyFlexfecReceiveStream(s);
- }
- streams.clear();
}
}
TEST(CallTest, MultipleFlexfecReceiveStreamsProtectingSingleVideoStream) {
- CallHelper call;
- MockTransport rtcp_send_transport;
- FlexfecReceiveStream::Config config(&rtcp_send_transport);
- config.payload_type = 118;
- config.protected_media_ssrcs = {1324234};
- FlexfecReceiveStream* stream;
- std::list<FlexfecReceiveStream*> streams;
-
- config.remote_ssrc = 838383;
- stream = call->CreateFlexfecReceiveStream(config);
- EXPECT_NE(stream, nullptr);
- streams.push_back(stream);
-
- config.remote_ssrc = 424993;
- stream = call->CreateFlexfecReceiveStream(config);
- EXPECT_NE(stream, nullptr);
- streams.push_back(stream);
-
- config.remote_ssrc = 99383;
- stream = call->CreateFlexfecReceiveStream(config);
- EXPECT_NE(stream, nullptr);
- streams.push_back(stream);
-
- config.remote_ssrc = 5548;
- stream = call->CreateFlexfecReceiveStream(config);
- EXPECT_NE(stream, nullptr);
- streams.push_back(stream);
-
- for (auto s : streams) {
- call->DestroyFlexfecReceiveStream(s);
+ for (bool use_null_audio_processing : {false, true}) {
+ CallHelper call(use_null_audio_processing);
+ MockTransport rtcp_send_transport;
+ FlexfecReceiveStream::Config config(&rtcp_send_transport);
+ config.payload_type = 118;
+ config.protected_media_ssrcs = {1324234};
+ FlexfecReceiveStream* stream;
+ std::list<FlexfecReceiveStream*> streams;
+
+ config.remote_ssrc = 838383;
+ stream = call->CreateFlexfecReceiveStream(config);
+ EXPECT_NE(stream, nullptr);
+ streams.push_back(stream);
+
+ config.remote_ssrc = 424993;
+ stream = call->CreateFlexfecReceiveStream(config);
+ EXPECT_NE(stream, nullptr);
+ streams.push_back(stream);
+
+ config.remote_ssrc = 99383;
+ stream = call->CreateFlexfecReceiveStream(config);
+ EXPECT_NE(stream, nullptr);
+ streams.push_back(stream);
+
+ config.remote_ssrc = 5548;
+ stream = call->CreateFlexfecReceiveStream(config);
+ EXPECT_NE(stream, nullptr);
+ streams.push_back(stream);
+
+ for (auto s : streams) {
+ call->DestroyFlexfecReceiveStream(s);
+ }
}
}
TEST(CallTest, RecreatingAudioStreamWithSameSsrcReusesRtpState) {
constexpr uint32_t kSSRC = 12345;
- CallHelper call;
+ for (bool use_null_audio_processing : {false, true}) {
+ CallHelper call(use_null_audio_processing);
- auto create_stream_and_get_rtp_state = [&](uint32_t ssrc) {
- MockTransport send_transport;
- AudioSendStream::Config config(&send_transport);
- config.rtp.ssrc = ssrc;
- AudioSendStream* stream = call->CreateAudioSendStream(config);
- const RtpState rtp_state =
- static_cast<internal::AudioSendStream*>(stream)->GetRtpState();
- call->DestroyAudioSendStream(stream);
- return rtp_state;
- };
-
- const RtpState rtp_state1 = create_stream_and_get_rtp_state(kSSRC);
- const RtpState rtp_state2 = create_stream_and_get_rtp_state(kSSRC);
-
- EXPECT_EQ(rtp_state1.sequence_number, rtp_state2.sequence_number);
- EXPECT_EQ(rtp_state1.start_timestamp, rtp_state2.start_timestamp);
- EXPECT_EQ(rtp_state1.timestamp, rtp_state2.timestamp);
- EXPECT_EQ(rtp_state1.capture_time_ms, rtp_state2.capture_time_ms);
- EXPECT_EQ(rtp_state1.last_timestamp_time_ms,
- rtp_state2.last_timestamp_time_ms);
- EXPECT_EQ(rtp_state1.media_has_been_sent, rtp_state2.media_has_been_sent);
+ auto create_stream_and_get_rtp_state = [&](uint32_t ssrc) {
+ MockTransport send_transport;
+ AudioSendStream::Config config(&send_transport);
+ config.rtp.ssrc = ssrc;
+ AudioSendStream* stream = call->CreateAudioSendStream(config);
+ const RtpState rtp_state =
+ static_cast<internal::AudioSendStream*>(stream)->GetRtpState();
+ call->DestroyAudioSendStream(stream);
+ return rtp_state;
+ };
+
+ const RtpState rtp_state1 = create_stream_and_get_rtp_state(kSSRC);
+ const RtpState rtp_state2 = create_stream_and_get_rtp_state(kSSRC);
+
+ EXPECT_EQ(rtp_state1.sequence_number, rtp_state2.sequence_number);
+ EXPECT_EQ(rtp_state1.start_timestamp, rtp_state2.start_timestamp);
+ EXPECT_EQ(rtp_state1.timestamp, rtp_state2.timestamp);
+ EXPECT_EQ(rtp_state1.capture_time_ms, rtp_state2.capture_time_ms);
+ EXPECT_EQ(rtp_state1.last_timestamp_time_ms,
+ rtp_state2.last_timestamp_time_ms);
+ EXPECT_EQ(rtp_state1.media_has_been_sent, rtp_state2.media_has_been_sent);
+ }
}
} // namespace webrtc
diff --git a/media/engine/webrtc_voice_engine.cc b/media/engine/webrtc_voice_engine.cc
index 42109e86a8..47bfa7d812 100644
--- a/media/engine/webrtc_voice_engine.cc
+++ b/media/engine/webrtc_voice_engine.cc
@@ -206,7 +206,6 @@ WebRtcVoiceEngine::WebRtcVoiceEngine(
RTC_LOG(LS_INFO) << "WebRtcVoiceEngine::WebRtcVoiceEngine";
RTC_DCHECK(decoder_factory);
RTC_DCHECK(encoder_factory);
- RTC_DCHECK(audio_processing);
// The rest of our initialization will happen in Init.
}
@@ -458,6 +457,14 @@ bool WebRtcVoiceEngine::ApplyOptions(const AudioOptions& options_in) {
*options.audio_jitter_buffer_enable_rtx_handling;
}
+ webrtc::AudioProcessing* ap = apm();
+ if (!ap) {
+ RTC_LOG(LS_INFO)
+ << "No audio processing module present. No software-provided effects "
+ "(AEC, NS, AGC, ...) are activated";
+ return true;
+ }
+
webrtc::Config config;
if (options.experimental_ns) {
@@ -469,7 +476,7 @@ bool WebRtcVoiceEngine::ApplyOptions(const AudioOptions& options_in) {
new webrtc::ExperimentalNs(*experimental_ns_));
}
- webrtc::AudioProcessing::Config apm_config = apm()->GetConfig();
+ webrtc::AudioProcessing::Config apm_config = ap->GetConfig();
if (options.echo_cancellation) {
apm_config.echo_canceller.enabled = *options.echo_cancellation;
@@ -524,8 +531,8 @@ bool WebRtcVoiceEngine::ApplyOptions(const AudioOptions& options_in) {
apm_config.voice_detection.enabled = *options.typing_detection;
}
- apm()->SetExtraOptions(config);
- apm()->ApplyConfig(apm_config);
+ ap->SetExtraOptions(config);
+ ap->ApplyConfig(apm_config);
return true;
}
@@ -571,18 +578,34 @@ void WebRtcVoiceEngine::UnregisterChannel(WebRtcVoiceMediaChannel* channel) {
bool WebRtcVoiceEngine::StartAecDump(webrtc::FileWrapper file,
int64_t max_size_bytes) {
RTC_DCHECK(worker_thread_checker_.IsCurrent());
+
+ webrtc::AudioProcessing* ap = apm();
+ if (!ap) {
+ RTC_LOG(LS_WARNING)
+ << "Attempting to start aecdump when no audio processing module is "
+ "present, hence no aecdump is started.";
+ return false;
+ }
+
auto aec_dump = webrtc::AecDumpFactory::Create(
std::move(file), max_size_bytes, low_priority_worker_queue_.get());
if (!aec_dump) {
return false;
}
- apm()->AttachAecDump(std::move(aec_dump));
+
+ ap->AttachAecDump(std::move(aec_dump));
return true;
}
void WebRtcVoiceEngine::StopAecDump() {
RTC_DCHECK(worker_thread_checker_.IsCurrent());
- apm()->DetachAecDump();
+ webrtc::AudioProcessing* ap = apm();
+ if (ap) {
+ ap->DetachAecDump();
+ } else {
+ RTC_LOG(LS_WARNING) << "Attempting to stop aecdump when no audio "
+ "processing module is present";
+ }
}
webrtc::AudioDeviceModule* WebRtcVoiceEngine::adm() {
@@ -593,7 +616,6 @@ webrtc::AudioDeviceModule* WebRtcVoiceEngine::adm() {
webrtc::AudioProcessing* WebRtcVoiceEngine::apm() const {
RTC_DCHECK(worker_thread_checker_.IsCurrent());
- RTC_DCHECK(apm_);
return apm_.get();
}
@@ -2141,7 +2163,10 @@ bool WebRtcVoiceMediaChannel::MuteStream(uint32_t ssrc, bool muted) {
for (const auto& kv : send_streams_) {
all_muted = all_muted && kv.second->muted();
}
- engine()->apm()->set_output_will_be_muted(all_muted);
+ webrtc::AudioProcessing* ap = engine()->apm();
+ if (ap) {
+ ap->set_output_will_be_muted(all_muted);
+ }
return true;
}
diff --git a/media/engine/webrtc_voice_engine_unittest.cc b/media/engine/webrtc_voice_engine_unittest.cc
index 5b6ed3a527..e7ebf8940f 100644
--- a/media/engine/webrtc_voice_engine_unittest.cc
+++ b/media/engine/webrtc_voice_engine_unittest.cc
@@ -139,25 +139,31 @@ void AdmSetupExpectations(webrtc::test::MockAudioDeviceModule* adm) {
// Tests that our stub library "works".
TEST(WebRtcVoiceEngineTestStubLibrary, StartupShutdown) {
- std::unique_ptr<webrtc::TaskQueueFactory> task_queue_factory =
- webrtc::CreateDefaultTaskQueueFactory();
- rtc::scoped_refptr<webrtc::test::MockAudioDeviceModule> adm =
- webrtc::test::MockAudioDeviceModule::CreateStrict();
- AdmSetupExpectations(adm);
- rtc::scoped_refptr<StrictMock<webrtc::test::MockAudioProcessing>> apm =
- new rtc::RefCountedObject<
- StrictMock<webrtc::test::MockAudioProcessing>>();
- webrtc::AudioProcessing::Config apm_config;
- EXPECT_CALL(*apm, GetConfig()).WillRepeatedly(ReturnPointee(&apm_config));
- EXPECT_CALL(*apm, ApplyConfig(_)).WillRepeatedly(SaveArg<0>(&apm_config));
- EXPECT_CALL(*apm, SetExtraOptions(::testing::_));
- EXPECT_CALL(*apm, DetachAecDump());
- {
- cricket::WebRtcVoiceEngine engine(
- task_queue_factory.get(), adm,
- webrtc::MockAudioEncoderFactory::CreateUnusedFactory(),
- webrtc::MockAudioDecoderFactory::CreateUnusedFactory(), nullptr, apm);
- engine.Init();
+ for (bool use_null_apm : {false, true}) {
+ std::unique_ptr<webrtc::TaskQueueFactory> task_queue_factory =
+ webrtc::CreateDefaultTaskQueueFactory();
+ rtc::scoped_refptr<webrtc::test::MockAudioDeviceModule> adm =
+ webrtc::test::MockAudioDeviceModule::CreateStrict();
+ AdmSetupExpectations(adm);
+ rtc::scoped_refptr<StrictMock<webrtc::test::MockAudioProcessing>> apm =
+ use_null_apm ? nullptr
+ : new rtc::RefCountedObject<
+ StrictMock<webrtc::test::MockAudioProcessing>>();
+
+ webrtc::AudioProcessing::Config apm_config;
+ if (!use_null_apm) {
+ EXPECT_CALL(*apm, GetConfig()).WillRepeatedly(ReturnPointee(&apm_config));
+ EXPECT_CALL(*apm, ApplyConfig(_)).WillRepeatedly(SaveArg<0>(&apm_config));
+ EXPECT_CALL(*apm, SetExtraOptions(::testing::_));
+ EXPECT_CALL(*apm, DetachAecDump());
+ }
+ {
+ cricket::WebRtcVoiceEngine engine(
+ task_queue_factory.get(), adm,
+ webrtc::MockAudioEncoderFactory::CreateUnusedFactory(),
+ webrtc::MockAudioDecoderFactory::CreateUnusedFactory(), nullptr, apm);
+ engine.Init();
+ }
}
}
@@ -170,24 +176,33 @@ class FakeAudioSource : public cricket::AudioSource {
void SetSink(Sink* sink) override {}
};
-class WebRtcVoiceEngineTestFake : public ::testing::Test {
+class WebRtcVoiceEngineTestFake : public ::testing::TestWithParam<bool> {
public:
WebRtcVoiceEngineTestFake() : WebRtcVoiceEngineTestFake("") {}
explicit WebRtcVoiceEngineTestFake(const char* field_trials)
- : task_queue_factory_(webrtc::CreateDefaultTaskQueueFactory()),
+ : use_null_apm_(GetParam()),
+ task_queue_factory_(webrtc::CreateDefaultTaskQueueFactory()),
adm_(webrtc::test::MockAudioDeviceModule::CreateStrict()),
- apm_(new rtc::RefCountedObject<
- StrictMock<webrtc::test::MockAudioProcessing>>()),
+ apm_(use_null_apm_
+ ? nullptr
+ : new rtc::RefCountedObject<
+ StrictMock<webrtc::test::MockAudioProcessing>>()),
call_(),
override_field_trials_(field_trials) {
// AudioDeviceModule.
AdmSetupExpectations(adm_);
- // AudioProcessing.
- EXPECT_CALL(*apm_, GetConfig()).WillRepeatedly(ReturnPointee(&apm_config_));
- EXPECT_CALL(*apm_, ApplyConfig(_)).WillRepeatedly(SaveArg<0>(&apm_config_));
- EXPECT_CALL(*apm_, SetExtraOptions(::testing::_));
- EXPECT_CALL(*apm_, DetachAecDump());
+
+ if (!use_null_apm_) {
+ // AudioProcessing.
+ EXPECT_CALL(*apm_, GetConfig())
+ .WillRepeatedly(ReturnPointee(&apm_config_));
+ EXPECT_CALL(*apm_, ApplyConfig(_))
+ .WillRepeatedly(SaveArg<0>(&apm_config_));
+ EXPECT_CALL(*apm_, SetExtraOptions(::testing::_));
+ EXPECT_CALL(*apm_, DetachAecDump());
+ }
+
// Default Options.
// TODO(kwiberg): We should use mock factories here, but a bunch of
// the tests here probe the specific set of codecs provided by the builtin
@@ -201,18 +216,22 @@ class WebRtcVoiceEngineTestFake : public ::testing::Test {
send_parameters_.codecs.push_back(kPcmuCodec);
recv_parameters_.codecs.push_back(kPcmuCodec);
- // Default Options.
- VerifyEchoCancellationSettings(/*enabled=*/true);
- EXPECT_TRUE(IsHighPassFilterEnabled());
- EXPECT_TRUE(IsTypingDetectionEnabled());
- EXPECT_TRUE(apm_config_.noise_suppression.enabled);
- EXPECT_EQ(apm_config_.noise_suppression.level, kDefaultNsLevel);
- VerifyGainControlEnabledCorrectly();
- VerifyGainControlDefaultSettings();
+ if (!use_null_apm_) {
+ // Default Options.
+ VerifyEchoCancellationSettings(/*enabled=*/true);
+ EXPECT_TRUE(IsHighPassFilterEnabled());
+ EXPECT_TRUE(IsTypingDetectionEnabled());
+ EXPECT_TRUE(apm_config_.noise_suppression.enabled);
+ EXPECT_EQ(apm_config_.noise_suppression.level, kDefaultNsLevel);
+ VerifyGainControlEnabledCorrectly();
+ VerifyGainControlDefaultSettings();
+ }
}
bool SetupChannel() {
- EXPECT_CALL(*apm_, SetExtraOptions(::testing::_));
+ if (!use_null_apm_) {
+ EXPECT_CALL(*apm_, SetExtraOptions(::testing::_));
+ }
channel_ = engine_->CreateMediaChannel(&call_, cricket::MediaConfig(),
cricket::AudioOptions(),
webrtc::CryptoOptions());
@@ -237,7 +256,9 @@ class WebRtcVoiceEngineTestFake : public ::testing::Test {
if (!channel_->AddSendStream(sp)) {
return false;
}
- EXPECT_CALL(*apm_, set_output_will_be_muted(false));
+ if (!use_null_apm_) {
+ EXPECT_CALL(*apm_, set_output_will_be_muted(false));
+ }
return channel_->SetAudioSend(kSsrcX, true, nullptr, &fake_source_);
}
@@ -288,13 +309,17 @@ class WebRtcVoiceEngineTestFake : public ::testing::Test {
EXPECT_CALL(*adm_, RecordingIsInitialized()).WillOnce(Return(false));
EXPECT_CALL(*adm_, Recording()).WillOnce(Return(false));
EXPECT_CALL(*adm_, InitRecording()).WillOnce(Return(0));
- EXPECT_CALL(*apm_, SetExtraOptions(::testing::_));
+ if (!use_null_apm_) {
+ EXPECT_CALL(*apm_, SetExtraOptions(::testing::_));
+ }
}
channel_->SetSend(enable);
}
void SetSendParameters(const cricket::AudioSendParameters& params) {
- EXPECT_CALL(*apm_, SetExtraOptions(::testing::_));
+ if (!use_null_apm_) {
+ EXPECT_CALL(*apm_, SetExtraOptions(::testing::_));
+ }
ASSERT_TRUE(channel_);
EXPECT_TRUE(channel_->SetSendParameters(params));
}
@@ -303,10 +328,12 @@ class WebRtcVoiceEngineTestFake : public ::testing::Test {
bool enable,
cricket::AudioSource* source,
const cricket::AudioOptions* options = nullptr) {
- EXPECT_CALL(*apm_, set_output_will_be_muted(!enable));
ASSERT_TRUE(channel_);
- if (enable && options) {
- EXPECT_CALL(*apm_, SetExtraOptions(::testing::_));
+ if (!use_null_apm_) {
+ EXPECT_CALL(*apm_, set_output_will_be_muted(!enable));
+ if (enable && options) {
+ EXPECT_CALL(*apm_, SetExtraOptions(::testing::_));
+ }
}
EXPECT_TRUE(channel_->SetAudioSend(ssrc, enable, options, source));
}
@@ -773,6 +800,7 @@ class WebRtcVoiceEngineTestFake : public ::testing::Test {
}
protected:
+ const bool use_null_apm_;
std::unique_ptr<webrtc::TaskQueueFactory> task_queue_factory_;
rtc::scoped_refptr<webrtc::test::MockAudioDeviceModule> adm_;
rtc::scoped_refptr<StrictMock<webrtc::test::MockAudioProcessing>> apm_;
@@ -788,13 +816,17 @@ class WebRtcVoiceEngineTestFake : public ::testing::Test {
webrtc::test::ScopedFieldTrials override_field_trials_;
};
+INSTANTIATE_TEST_SUITE_P(TestBothWithAndWithoutNullApm,
+ WebRtcVoiceEngineTestFake,
+ ::testing::Values(false, true));
+
// Tests that we can create and destroy a channel.
-TEST_F(WebRtcVoiceEngineTestFake, CreateMediaChannel) {
+TEST_P(WebRtcVoiceEngineTestFake, CreateMediaChannel) {
EXPECT_TRUE(SetupChannel());
}
// Test that we can add a send stream and that it has the correct defaults.
-TEST_F(WebRtcVoiceEngineTestFake, CreateSendStream) {
+TEST_P(WebRtcVoiceEngineTestFake, CreateSendStream) {
EXPECT_TRUE(SetupChannel());
EXPECT_TRUE(
channel_->AddSendStream(cricket::StreamParams::CreateLegacy(kSsrcX)));
@@ -807,7 +839,7 @@ TEST_F(WebRtcVoiceEngineTestFake, CreateSendStream) {
}
// Test that we can add a receive stream and that it has the correct defaults.
-TEST_F(WebRtcVoiceEngineTestFake, CreateRecvStream) {
+TEST_P(WebRtcVoiceEngineTestFake, CreateRecvStream) {
EXPECT_TRUE(SetupChannel());
EXPECT_TRUE(AddRecvStream(kSsrcX));
const webrtc::AudioReceiveStream::Config& config =
@@ -821,7 +853,7 @@ TEST_F(WebRtcVoiceEngineTestFake, CreateRecvStream) {
EXPECT_EQ("", config.sync_group);
}
-TEST_F(WebRtcVoiceEngineTestFake, OpusSupportsTransportCc) {
+TEST_P(WebRtcVoiceEngineTestFake, OpusSupportsTransportCc) {
const std::vector<cricket::AudioCodec>& codecs = engine_->send_codecs();
bool opus_found = false;
for (const cricket::AudioCodec& codec : codecs) {
@@ -834,7 +866,7 @@ TEST_F(WebRtcVoiceEngineTestFake, OpusSupportsTransportCc) {
}
// Test that we set our inbound codecs properly, including changing PT.
-TEST_F(WebRtcVoiceEngineTestFake, SetRecvCodecs) {
+TEST_P(WebRtcVoiceEngineTestFake, SetRecvCodecs) {
EXPECT_TRUE(SetupChannel());
cricket::AudioRecvParameters parameters;
parameters.codecs.push_back(kIsacCodec);
@@ -854,7 +886,7 @@ TEST_F(WebRtcVoiceEngineTestFake, SetRecvCodecs) {
}
// Test that we fail to set an unknown inbound codec.
-TEST_F(WebRtcVoiceEngineTestFake, SetRecvCodecsUnsupportedCodec) {
+TEST_P(WebRtcVoiceEngineTestFake, SetRecvCodecsUnsupportedCodec) {
EXPECT_TRUE(SetupChannel());
cricket::AudioRecvParameters parameters;
parameters.codecs.push_back(kIsacCodec);
@@ -863,7 +895,7 @@ TEST_F(WebRtcVoiceEngineTestFake, SetRecvCodecsUnsupportedCodec) {
}
// Test that we fail if we have duplicate types in the inbound list.
-TEST_F(WebRtcVoiceEngineTestFake, SetRecvCodecsDuplicatePayloadType) {
+TEST_P(WebRtcVoiceEngineTestFake, SetRecvCodecsDuplicatePayloadType) {
EXPECT_TRUE(SetupChannel());
cricket::AudioRecvParameters parameters;
parameters.codecs.push_back(kIsacCodec);
@@ -873,7 +905,7 @@ TEST_F(WebRtcVoiceEngineTestFake, SetRecvCodecsDuplicatePayloadType) {
}
// Test that we can decode OPUS without stereo parameters.
-TEST_F(WebRtcVoiceEngineTestFake, SetRecvCodecsWithOpusNoStereo) {
+TEST_P(WebRtcVoiceEngineTestFake, SetRecvCodecsWithOpusNoStereo) {
EXPECT_TRUE(SetupChannel());
cricket::AudioRecvParameters parameters;
parameters.codecs.push_back(kIsacCodec);
@@ -889,7 +921,7 @@ TEST_F(WebRtcVoiceEngineTestFake, SetRecvCodecsWithOpusNoStereo) {
}
// Test that we can decode OPUS with stereo = 0.
-TEST_F(WebRtcVoiceEngineTestFake, SetRecvCodecsWithOpus0Stereo) {
+TEST_P(WebRtcVoiceEngineTestFake, SetRecvCodecsWithOpus0Stereo) {
EXPECT_TRUE(SetupChannel());
cricket::AudioRecvParameters parameters;
parameters.codecs.push_back(kIsacCodec);
@@ -906,7 +938,7 @@ TEST_F(WebRtcVoiceEngineTestFake, SetRecvCodecsWithOpus0Stereo) {
}
// Test that we can decode OPUS with stereo = 1.
-TEST_F(WebRtcVoiceEngineTestFake, SetRecvCodecsWithOpus1Stereo) {
+TEST_P(WebRtcVoiceEngineTestFake, SetRecvCodecsWithOpus1Stereo) {
EXPECT_TRUE(SetupChannel());
cricket::AudioRecvParameters parameters;
parameters.codecs.push_back(kIsacCodec);
@@ -923,7 +955,7 @@ TEST_F(WebRtcVoiceEngineTestFake, SetRecvCodecsWithOpus1Stereo) {
}
// Test that changes to recv codecs are applied to all streams.
-TEST_F(WebRtcVoiceEngineTestFake, SetRecvCodecsWithMultipleStreams) {
+TEST_P(WebRtcVoiceEngineTestFake, SetRecvCodecsWithMultipleStreams) {
EXPECT_TRUE(SetupChannel());
cricket::AudioRecvParameters parameters;
parameters.codecs.push_back(kIsacCodec);
@@ -944,7 +976,7 @@ TEST_F(WebRtcVoiceEngineTestFake, SetRecvCodecsWithMultipleStreams) {
}
}
-TEST_F(WebRtcVoiceEngineTestFake, SetRecvCodecsAfterAddingStreams) {
+TEST_P(WebRtcVoiceEngineTestFake, SetRecvCodecsAfterAddingStreams) {
EXPECT_TRUE(SetupRecvStream());
cricket::AudioRecvParameters parameters;
parameters.codecs.push_back(kIsacCodec);
@@ -957,7 +989,7 @@ TEST_F(WebRtcVoiceEngineTestFake, SetRecvCodecsAfterAddingStreams) {
}
// Test that we can apply the same set of codecs again while playing.
-TEST_F(WebRtcVoiceEngineTestFake, SetRecvCodecsWhilePlaying) {
+TEST_P(WebRtcVoiceEngineTestFake, SetRecvCodecsWhilePlaying) {
EXPECT_TRUE(SetupRecvStream());
cricket::AudioRecvParameters parameters;
parameters.codecs.push_back(kIsacCodec);
@@ -974,7 +1006,7 @@ TEST_F(WebRtcVoiceEngineTestFake, SetRecvCodecsWhilePlaying) {
}
// Test that we can add a codec while playing.
-TEST_F(WebRtcVoiceEngineTestFake, AddRecvCodecsWhilePlaying) {
+TEST_P(WebRtcVoiceEngineTestFake, AddRecvCodecsWhilePlaying) {
EXPECT_TRUE(SetupRecvStream());
cricket::AudioRecvParameters parameters;
parameters.codecs.push_back(kIsacCodec);
@@ -989,7 +1021,7 @@ TEST_F(WebRtcVoiceEngineTestFake, AddRecvCodecsWhilePlaying) {
// Test that we accept adding the same codec with a different payload type.
// See: https://bugs.chromium.org/p/webrtc/issues/detail?id=5847
-TEST_F(WebRtcVoiceEngineTestFake, ChangeRecvCodecPayloadType) {
+TEST_P(WebRtcVoiceEngineTestFake, ChangeRecvCodecPayloadType) {
EXPECT_TRUE(SetupRecvStream());
cricket::AudioRecvParameters parameters;
parameters.codecs.push_back(kIsacCodec);
@@ -999,7 +1031,7 @@ TEST_F(WebRtcVoiceEngineTestFake, ChangeRecvCodecPayloadType) {
EXPECT_TRUE(channel_->SetRecvParameters(parameters));
}
-TEST_F(WebRtcVoiceEngineTestFake, SetSendBandwidthAuto) {
+TEST_P(WebRtcVoiceEngineTestFake, SetSendBandwidthAuto) {
EXPECT_TRUE(SetupSendStream());
// Test that when autobw is enabled, bitrate is kept as the default
@@ -1016,7 +1048,7 @@ TEST_F(WebRtcVoiceEngineTestFake, SetSendBandwidthAuto) {
TestMaxSendBandwidth(kOpusCodec, -1, true, 32000);
}
-TEST_F(WebRtcVoiceEngineTestFake, SetMaxSendBandwidthMultiRateAsCaller) {
+TEST_P(WebRtcVoiceEngineTestFake, SetMaxSendBandwidthMultiRateAsCaller) {
EXPECT_TRUE(SetupSendStream());
// ISAC, default bitrate == 32000.
@@ -1031,7 +1063,7 @@ TEST_F(WebRtcVoiceEngineTestFake, SetMaxSendBandwidthMultiRateAsCaller) {
TestMaxSendBandwidth(kOpusCodec, 600000, true, 510000);
}
-TEST_F(WebRtcVoiceEngineTestFake, SetMaxSendBandwidthFixedRateAsCaller) {
+TEST_P(WebRtcVoiceEngineTestFake, SetMaxSendBandwidthFixedRateAsCaller) {
EXPECT_TRUE(SetupSendStream());
// Test that we can only set a maximum bitrate for a fixed-rate codec
@@ -1047,7 +1079,7 @@ TEST_F(WebRtcVoiceEngineTestFake, SetMaxSendBandwidthFixedRateAsCaller) {
TestMaxSendBandwidth(kPcmuCodec, 64001, true, 64000);
}
-TEST_F(WebRtcVoiceEngineTestFake, SetMaxSendBandwidthMultiRateAsCallee) {
+TEST_P(WebRtcVoiceEngineTestFake, SetMaxSendBandwidthMultiRateAsCallee) {
EXPECT_TRUE(SetupChannel());
const int kDesiredBitrate = 128000;
cricket::AudioSendParameters parameters;
@@ -1064,7 +1096,7 @@ TEST_F(WebRtcVoiceEngineTestFake, SetMaxSendBandwidthMultiRateAsCallee) {
// Test that bitrate cannot be set for CBR codecs.
// Bitrate is ignored if it is higher than the fixed bitrate.
// Bitrate less then the fixed bitrate is an error.
-TEST_F(WebRtcVoiceEngineTestFake, SetMaxSendBandwidthCbr) {
+TEST_P(WebRtcVoiceEngineTestFake, SetMaxSendBandwidthCbr) {
EXPECT_TRUE(SetupSendStream());
// PCMU, default bitrate == 64000.
@@ -1082,7 +1114,7 @@ TEST_F(WebRtcVoiceEngineTestFake, SetMaxSendBandwidthCbr) {
// Test that the per-stream bitrate limit and the global
// bitrate limit both apply.
-TEST_F(WebRtcVoiceEngineTestFake, SetMaxBitratePerStream) {
+TEST_P(WebRtcVoiceEngineTestFake, SetMaxBitratePerStream) {
EXPECT_TRUE(SetupSendStream());
// opus, default bitrate == 32000.
@@ -1104,7 +1136,7 @@ TEST_F(WebRtcVoiceEngineTestFake, SetMaxBitratePerStream) {
// Test that an attempt to set RtpParameters for a stream that does not exist
// fails.
-TEST_F(WebRtcVoiceEngineTestFake, CannotSetMaxBitrateForNonexistentStream) {
+TEST_P(WebRtcVoiceEngineTestFake, CannotSetMaxBitrateForNonexistentStream) {
EXPECT_TRUE(SetupChannel());
webrtc::RtpParameters nonexistent_parameters =
channel_->GetRtpSendParameters(kSsrcX);
@@ -1115,7 +1147,7 @@ TEST_F(WebRtcVoiceEngineTestFake, CannotSetMaxBitrateForNonexistentStream) {
channel_->SetRtpSendParameters(kSsrcX, nonexistent_parameters).ok());
}
-TEST_F(WebRtcVoiceEngineTestFake,
+TEST_P(WebRtcVoiceEngineTestFake,
CannotSetRtpSendParametersWithIncorrectNumberOfEncodings) {
// This test verifies that setting RtpParameters succeeds only if
// the structure contains exactly one encoding.
@@ -1133,7 +1165,7 @@ TEST_F(WebRtcVoiceEngineTestFake,
}
// Changing the SSRC through RtpParameters is not allowed.
-TEST_F(WebRtcVoiceEngineTestFake, CannotSetSsrcInRtpSendParameters) {
+TEST_P(WebRtcVoiceEngineTestFake, CannotSetSsrcInRtpSendParameters) {
EXPECT_TRUE(SetupSendStream());
webrtc::RtpParameters parameters = channel_->GetRtpSendParameters(kSsrcX);
parameters.encodings[0].ssrc = 0xdeadbeef;
@@ -1142,7 +1174,7 @@ TEST_F(WebRtcVoiceEngineTestFake, CannotSetSsrcInRtpSendParameters) {
// Test that a stream will not be sending if its encoding is made
// inactive through SetRtpSendParameters.
-TEST_F(WebRtcVoiceEngineTestFake, SetRtpParametersEncodingsActive) {
+TEST_P(WebRtcVoiceEngineTestFake, SetRtpParametersEncodingsActive) {
EXPECT_TRUE(SetupSendStream());
SetSend(true);
EXPECT_TRUE(GetSendStream(kSsrcX).IsSending());
@@ -1164,7 +1196,7 @@ TEST_F(WebRtcVoiceEngineTestFake, SetRtpParametersEncodingsActive) {
// Test that SetRtpSendParameters configures the correct encoding channel for
// each SSRC.
-TEST_F(WebRtcVoiceEngineTestFake, RtpParametersArePerStream) {
+TEST_P(WebRtcVoiceEngineTestFake, RtpParametersArePerStream) {
SetupForMultiSendStream();
// Create send streams.
for (uint32_t ssrc : kSsrcs4) {
@@ -1192,7 +1224,7 @@ TEST_F(WebRtcVoiceEngineTestFake, RtpParametersArePerStream) {
}
// Test that GetRtpSendParameters returns the currently configured codecs.
-TEST_F(WebRtcVoiceEngineTestFake, GetRtpSendParametersCodecs) {
+TEST_P(WebRtcVoiceEngineTestFake, GetRtpSendParametersCodecs) {
EXPECT_TRUE(SetupSendStream());
cricket::AudioSendParameters parameters;
parameters.codecs.push_back(kIsacCodec);
@@ -1206,7 +1238,7 @@ TEST_F(WebRtcVoiceEngineTestFake, GetRtpSendParametersCodecs) {
}
// Test that GetRtpSendParameters returns the currently configured RTCP CNAME.
-TEST_F(WebRtcVoiceEngineTestFake, GetRtpSendParametersRtcpCname) {
+TEST_P(WebRtcVoiceEngineTestFake, GetRtpSendParametersRtcpCname) {
cricket::StreamParams params = cricket::StreamParams::CreateLegacy(kSsrcX);
params.cname = "rtcpcname";
EXPECT_TRUE(SetupSendStream(params));
@@ -1215,7 +1247,7 @@ TEST_F(WebRtcVoiceEngineTestFake, GetRtpSendParametersRtcpCname) {
EXPECT_STREQ("rtcpcname", rtp_parameters.rtcp.cname.c_str());
}
-TEST_F(WebRtcVoiceEngineTestFake,
+TEST_P(WebRtcVoiceEngineTestFake,
DetectRtpSendParameterHeaderExtensionsChange) {
EXPECT_TRUE(SetupSendStream());
@@ -1230,7 +1262,7 @@ TEST_F(WebRtcVoiceEngineTestFake,
}
// Test that GetRtpSendParameters returns an SSRC.
-TEST_F(WebRtcVoiceEngineTestFake, GetRtpSendParametersSsrc) {
+TEST_P(WebRtcVoiceEngineTestFake, GetRtpSendParametersSsrc) {
EXPECT_TRUE(SetupSendStream());
webrtc::RtpParameters rtp_parameters = channel_->GetRtpSendParameters(kSsrcX);
ASSERT_EQ(1u, rtp_parameters.encodings.size());
@@ -1238,7 +1270,7 @@ TEST_F(WebRtcVoiceEngineTestFake, GetRtpSendParametersSsrc) {
}
// Test that if we set/get parameters multiple times, we get the same results.
-TEST_F(WebRtcVoiceEngineTestFake, SetAndGetRtpSendParameters) {
+TEST_P(WebRtcVoiceEngineTestFake, SetAndGetRtpSendParameters) {
EXPECT_TRUE(SetupSendStream());
cricket::AudioSendParameters parameters;
parameters.codecs.push_back(kIsacCodec);
@@ -1257,7 +1289,7 @@ TEST_F(WebRtcVoiceEngineTestFake, SetAndGetRtpSendParameters) {
// Test that max_bitrate_bps in send stream config gets updated correctly when
// SetRtpSendParameters is called.
-TEST_F(WebRtcVoiceEngineTestFake, SetRtpSendParameterUpdatesMaxBitrate) {
+TEST_P(WebRtcVoiceEngineTestFake, SetRtpSendParameterUpdatesMaxBitrate) {
webrtc::test::ScopedFieldTrials override_field_trials(
"WebRTC-Audio-SendSideBwe/Enabled/");
EXPECT_TRUE(SetupSendStream());
@@ -1279,7 +1311,7 @@ TEST_F(WebRtcVoiceEngineTestFake, SetRtpSendParameterUpdatesMaxBitrate) {
// Tests that when RTCRtpEncodingParameters.bitrate_priority gets set to
// a value <= 0, setting the parameters returns false.
-TEST_F(WebRtcVoiceEngineTestFake, SetRtpSendParameterInvalidBitratePriority) {
+TEST_P(WebRtcVoiceEngineTestFake, SetRtpSendParameterInvalidBitratePriority) {
EXPECT_TRUE(SetupSendStream());
webrtc::RtpParameters rtp_parameters = channel_->GetRtpSendParameters(kSsrcX);
EXPECT_EQ(1UL, rtp_parameters.encodings.size());
@@ -1294,7 +1326,7 @@ TEST_F(WebRtcVoiceEngineTestFake, SetRtpSendParameterInvalidBitratePriority) {
// Test that the bitrate_priority in the send stream config gets updated when
// SetRtpSendParameters is set for the VoiceMediaChannel.
-TEST_F(WebRtcVoiceEngineTestFake, SetRtpSendParameterUpdatesBitratePriority) {
+TEST_P(WebRtcVoiceEngineTestFake, SetRtpSendParameterUpdatesBitratePriority) {
EXPECT_TRUE(SetupSendStream());
webrtc::RtpParameters rtp_parameters = channel_->GetRtpSendParameters(kSsrcX);
@@ -1314,7 +1346,7 @@ TEST_F(WebRtcVoiceEngineTestFake, SetRtpSendParameterUpdatesBitratePriority) {
}
// Test that GetRtpReceiveParameters returns the currently configured codecs.
-TEST_F(WebRtcVoiceEngineTestFake, GetRtpReceiveParametersCodecs) {
+TEST_P(WebRtcVoiceEngineTestFake, GetRtpReceiveParametersCodecs) {
EXPECT_TRUE(SetupRecvStream());
cricket::AudioRecvParameters parameters;
parameters.codecs.push_back(kIsacCodec);
@@ -1329,7 +1361,7 @@ TEST_F(WebRtcVoiceEngineTestFake, GetRtpReceiveParametersCodecs) {
}
// Test that GetRtpReceiveParameters returns an SSRC.
-TEST_F(WebRtcVoiceEngineTestFake, GetRtpReceiveParametersSsrc) {
+TEST_P(WebRtcVoiceEngineTestFake, GetRtpReceiveParametersSsrc) {
EXPECT_TRUE(SetupRecvStream());
webrtc::RtpParameters rtp_parameters =
channel_->GetRtpReceiveParameters(kSsrcX);
@@ -1338,7 +1370,7 @@ TEST_F(WebRtcVoiceEngineTestFake, GetRtpReceiveParametersSsrc) {
}
// Test that if we set/get parameters multiple times, we get the same results.
-TEST_F(WebRtcVoiceEngineTestFake, SetAndGetRtpReceiveParameters) {
+TEST_P(WebRtcVoiceEngineTestFake, SetAndGetRtpReceiveParameters) {
EXPECT_TRUE(SetupRecvStream());
cricket::AudioRecvParameters parameters;
parameters.codecs.push_back(kIsacCodec);
@@ -1358,7 +1390,7 @@ TEST_F(WebRtcVoiceEngineTestFake, SetAndGetRtpReceiveParameters) {
// aren't signaled. It should return an empty "RtpEncodingParameters" when
// configured to receive an unsignaled stream and no packets have been received
// yet, and start returning the SSRC once a packet has been received.
-TEST_F(WebRtcVoiceEngineTestFake, GetRtpReceiveParametersWithUnsignaledSsrc) {
+TEST_P(WebRtcVoiceEngineTestFake, GetRtpReceiveParametersWithUnsignaledSsrc) {
ASSERT_TRUE(SetupChannel());
// Call necessary methods to configure receiving a default stream as
// soon as it arrives.
@@ -1392,7 +1424,7 @@ TEST_F(WebRtcVoiceEngineTestFake, GetRtpReceiveParametersWithUnsignaledSsrc) {
}
// Test that we apply codecs properly.
-TEST_F(WebRtcVoiceEngineTestFake, SetSendCodecs) {
+TEST_P(WebRtcVoiceEngineTestFake, SetSendCodecs) {
EXPECT_TRUE(SetupSendStream());
cricket::AudioSendParameters parameters;
parameters.codecs.push_back(kIsacCodec);
@@ -1412,7 +1444,7 @@ TEST_F(WebRtcVoiceEngineTestFake, SetSendCodecs) {
// Test that WebRtcVoiceEngine reconfigures, rather than recreates its
// AudioSendStream.
-TEST_F(WebRtcVoiceEngineTestFake, DontRecreateSendStream) {
+TEST_P(WebRtcVoiceEngineTestFake, DontRecreateSendStream) {
EXPECT_TRUE(SetupSendStream());
cricket::AudioSendParameters parameters;
parameters.codecs.push_back(kIsacCodec);
@@ -1433,7 +1465,7 @@ TEST_F(WebRtcVoiceEngineTestFake, DontRecreateSendStream) {
// tests should be available in AudioEncoderOpusTest.
// Test that if clockrate is not 48000 for opus, we fail.
-TEST_F(WebRtcVoiceEngineTestFake, SetSendCodecOpusBadClockrate) {
+TEST_P(WebRtcVoiceEngineTestFake, SetSendCodecOpusBadClockrate) {
EXPECT_TRUE(SetupSendStream());
cricket::AudioSendParameters parameters;
parameters.codecs.push_back(kOpusCodec);
@@ -1443,7 +1475,7 @@ TEST_F(WebRtcVoiceEngineTestFake, SetSendCodecOpusBadClockrate) {
}
// Test that if channels=0 for opus, we fail.
-TEST_F(WebRtcVoiceEngineTestFake, SetSendCodecOpusBad0ChannelsNoStereo) {
+TEST_P(WebRtcVoiceEngineTestFake, SetSendCodecOpusBad0ChannelsNoStereo) {
EXPECT_TRUE(SetupSendStream());
cricket::AudioSendParameters parameters;
parameters.codecs.push_back(kOpusCodec);
@@ -1453,7 +1485,7 @@ TEST_F(WebRtcVoiceEngineTestFake, SetSendCodecOpusBad0ChannelsNoStereo) {
}
// Test that if channels=0 for opus, we fail.
-TEST_F(WebRtcVoiceEngineTestFake, SetSendCodecOpusBad0Channels1Stereo) {
+TEST_P(WebRtcVoiceEngineTestFake, SetSendCodecOpusBad0Channels1Stereo) {
EXPECT_TRUE(SetupSendStream());
cricket::AudioSendParameters parameters;
parameters.codecs.push_back(kOpusCodec);
@@ -1464,7 +1496,7 @@ TEST_F(WebRtcVoiceEngineTestFake, SetSendCodecOpusBad0Channels1Stereo) {
}
// Test that if channel is 1 for opus and there's no stereo, we fail.
-TEST_F(WebRtcVoiceEngineTestFake, SetSendCodecOpus1ChannelNoStereo) {
+TEST_P(WebRtcVoiceEngineTestFake, SetSendCodecOpus1ChannelNoStereo) {
EXPECT_TRUE(SetupSendStream());
cricket::AudioSendParameters parameters;
parameters.codecs.push_back(kOpusCodec);
@@ -1474,7 +1506,7 @@ TEST_F(WebRtcVoiceEngineTestFake, SetSendCodecOpus1ChannelNoStereo) {
}
// Test that if channel is 1 for opus and stereo=0, we fail.
-TEST_F(WebRtcVoiceEngineTestFake, SetSendCodecOpusBad1Channel0Stereo) {
+TEST_P(WebRtcVoiceEngineTestFake, SetSendCodecOpusBad1Channel0Stereo) {
EXPECT_TRUE(SetupSendStream());
cricket::AudioSendParameters parameters;
parameters.codecs.push_back(kOpusCodec);
@@ -1485,7 +1517,7 @@ TEST_F(WebRtcVoiceEngineTestFake, SetSendCodecOpusBad1Channel0Stereo) {
}
// Test that if channel is 1 for opus and stereo=1, we fail.
-TEST_F(WebRtcVoiceEngineTestFake, SetSendCodecOpusBad1Channel1Stereo) {
+TEST_P(WebRtcVoiceEngineTestFake, SetSendCodecOpusBad1Channel1Stereo) {
EXPECT_TRUE(SetupSendStream());
cricket::AudioSendParameters parameters;
parameters.codecs.push_back(kOpusCodec);
@@ -1496,7 +1528,7 @@ TEST_F(WebRtcVoiceEngineTestFake, SetSendCodecOpusBad1Channel1Stereo) {
}
// Test that with bitrate=0 and no stereo, bitrate is 32000.
-TEST_F(WebRtcVoiceEngineTestFake, SetSendCodecOpusGood0BitrateNoStereo) {
+TEST_P(WebRtcVoiceEngineTestFake, SetSendCodecOpusGood0BitrateNoStereo) {
EXPECT_TRUE(SetupSendStream());
cricket::AudioSendParameters parameters;
parameters.codecs.push_back(kOpusCodec);
@@ -1506,7 +1538,7 @@ TEST_F(WebRtcVoiceEngineTestFake, SetSendCodecOpusGood0BitrateNoStereo) {
}
// Test that with bitrate=0 and stereo=0, bitrate is 32000.
-TEST_F(WebRtcVoiceEngineTestFake, SetSendCodecOpusGood0Bitrate0Stereo) {
+TEST_P(WebRtcVoiceEngineTestFake, SetSendCodecOpusGood0Bitrate0Stereo) {
EXPECT_TRUE(SetupSendStream());
cricket::AudioSendParameters parameters;
parameters.codecs.push_back(kOpusCodec);
@@ -1517,7 +1549,7 @@ TEST_F(WebRtcVoiceEngineTestFake, SetSendCodecOpusGood0Bitrate0Stereo) {
}
// Test that with bitrate=invalid and stereo=0, bitrate is 32000.
-TEST_F(WebRtcVoiceEngineTestFake, SetSendCodecOpusGoodXBitrate0Stereo) {
+TEST_P(WebRtcVoiceEngineTestFake, SetSendCodecOpusGoodXBitrate0Stereo) {
EXPECT_TRUE(SetupSendStream());
cricket::AudioSendParameters parameters;
parameters.codecs.push_back(kOpusCodec);
@@ -1533,7 +1565,7 @@ TEST_F(WebRtcVoiceEngineTestFake, SetSendCodecOpusGoodXBitrate0Stereo) {
}
// Test that with bitrate=0 and stereo=1, bitrate is 64000.
-TEST_F(WebRtcVoiceEngineTestFake, SetSendCodecOpusGood0Bitrate1Stereo) {
+TEST_P(WebRtcVoiceEngineTestFake, SetSendCodecOpusGood0Bitrate1Stereo) {
EXPECT_TRUE(SetupSendStream());
cricket::AudioSendParameters parameters;
parameters.codecs.push_back(kOpusCodec);
@@ -1544,7 +1576,7 @@ TEST_F(WebRtcVoiceEngineTestFake, SetSendCodecOpusGood0Bitrate1Stereo) {
}
// Test that with bitrate=invalid and stereo=1, bitrate is 64000.
-TEST_F(WebRtcVoiceEngineTestFake, SetSendCodecOpusGoodXBitrate1Stereo) {
+TEST_P(WebRtcVoiceEngineTestFake, SetSendCodecOpusGoodXBitrate1Stereo) {
EXPECT_TRUE(SetupSendStream());
cricket::AudioSendParameters parameters;
parameters.codecs.push_back(kOpusCodec);
@@ -1560,7 +1592,7 @@ TEST_F(WebRtcVoiceEngineTestFake, SetSendCodecOpusGoodXBitrate1Stereo) {
}
// Test that with bitrate=N and stereo unset, bitrate is N.
-TEST_F(WebRtcVoiceEngineTestFake, SetSendCodecOpusGoodNBitrateNoStereo) {
+TEST_P(WebRtcVoiceEngineTestFake, SetSendCodecOpusGoodNBitrateNoStereo) {
EXPECT_TRUE(SetupSendStream());
cricket::AudioSendParameters parameters;
parameters.codecs.push_back(kOpusCodec);
@@ -1575,7 +1607,7 @@ TEST_F(WebRtcVoiceEngineTestFake, SetSendCodecOpusGoodNBitrateNoStereo) {
}
// Test that with bitrate=N and stereo=0, bitrate is N.
-TEST_F(WebRtcVoiceEngineTestFake, SetSendCodecOpusGoodNBitrate0Stereo) {
+TEST_P(WebRtcVoiceEngineTestFake, SetSendCodecOpusGoodNBitrate0Stereo) {
EXPECT_TRUE(SetupSendStream());
cricket::AudioSendParameters parameters;
parameters.codecs.push_back(kOpusCodec);
@@ -1586,7 +1618,7 @@ TEST_F(WebRtcVoiceEngineTestFake, SetSendCodecOpusGoodNBitrate0Stereo) {
}
// Test that with bitrate=N and without any parameters, bitrate is N.
-TEST_F(WebRtcVoiceEngineTestFake, SetSendCodecOpusGoodNBitrateNoParameters) {
+TEST_P(WebRtcVoiceEngineTestFake, SetSendCodecOpusGoodNBitrateNoParameters) {
EXPECT_TRUE(SetupSendStream());
cricket::AudioSendParameters parameters;
parameters.codecs.push_back(kOpusCodec);
@@ -1596,7 +1628,7 @@ TEST_F(WebRtcVoiceEngineTestFake, SetSendCodecOpusGoodNBitrateNoParameters) {
}
// Test that with bitrate=N and stereo=1, bitrate is N.
-TEST_F(WebRtcVoiceEngineTestFake, SetSendCodecOpusGoodNBitrate1Stereo) {
+TEST_P(WebRtcVoiceEngineTestFake, SetSendCodecOpusGoodNBitrate1Stereo) {
EXPECT_TRUE(SetupSendStream());
cricket::AudioSendParameters parameters;
parameters.codecs.push_back(kOpusCodec);
@@ -1606,25 +1638,25 @@ TEST_F(WebRtcVoiceEngineTestFake, SetSendCodecOpusGoodNBitrate1Stereo) {
CheckSendCodecBitrate(kSsrcX, "opus", 30000);
}
-TEST_F(WebRtcVoiceEngineTestFake, SetSendCodecsWithBitrates) {
+TEST_P(WebRtcVoiceEngineTestFake, SetSendCodecsWithBitrates) {
SetSendCodecsShouldWorkForBitrates("100", 100000, "150", 150000, "200",
200000);
}
-TEST_F(WebRtcVoiceEngineTestFake, SetSendCodecsWithHighMaxBitrate) {
+TEST_P(WebRtcVoiceEngineTestFake, SetSendCodecsWithHighMaxBitrate) {
SetSendCodecsShouldWorkForBitrates("", 0, "", -1, "10000", 10000000);
}
-TEST_F(WebRtcVoiceEngineTestFake,
+TEST_P(WebRtcVoiceEngineTestFake,
SetSendCodecsWithoutBitratesUsesCorrectDefaults) {
SetSendCodecsShouldWorkForBitrates("", 0, "", -1, "", -1);
}
-TEST_F(WebRtcVoiceEngineTestFake, SetSendCodecsCapsMinAndStartBitrate) {
+TEST_P(WebRtcVoiceEngineTestFake, SetSendCodecsCapsMinAndStartBitrate) {
SetSendCodecsShouldWorkForBitrates("-1", 0, "-100", -1, "", -1);
}
-TEST_F(WebRtcVoiceEngineTestFake, SetMaxSendBandwidthForAudioDoesntAffectBwe) {
+TEST_P(WebRtcVoiceEngineTestFake, SetMaxSendBandwidthForAudioDoesntAffectBwe) {
SetSendCodecsShouldWorkForBitrates("100", 100000, "150", 150000, "200",
200000);
send_parameters_.max_bandwidth_bps = 100000;
@@ -1639,7 +1671,7 @@ TEST_F(WebRtcVoiceEngineTestFake, SetMaxSendBandwidthForAudioDoesntAffectBwe) {
}
// Test that we can enable NACK with opus as callee.
-TEST_F(WebRtcVoiceEngineTestFake, SetSendCodecEnableNackAsCallee) {
+TEST_P(WebRtcVoiceEngineTestFake, SetSendCodecEnableNackAsCallee) {
EXPECT_TRUE(SetupRecvStream());
cricket::AudioSendParameters parameters;
parameters.codecs.push_back(kOpusCodec);
@@ -1655,7 +1687,7 @@ TEST_F(WebRtcVoiceEngineTestFake, SetSendCodecEnableNackAsCallee) {
}
// Test that we can enable NACK on receive streams.
-TEST_F(WebRtcVoiceEngineTestFake, SetSendCodecEnableNackRecvStreams) {
+TEST_P(WebRtcVoiceEngineTestFake, SetSendCodecEnableNackRecvStreams) {
EXPECT_TRUE(SetupSendStream());
EXPECT_TRUE(AddRecvStream(kSsrcY));
cricket::AudioSendParameters parameters;
@@ -1668,7 +1700,7 @@ TEST_F(WebRtcVoiceEngineTestFake, SetSendCodecEnableNackRecvStreams) {
}
// Test that we can disable NACK on receive streams.
-TEST_F(WebRtcVoiceEngineTestFake, SetSendCodecDisableNackRecvStreams) {
+TEST_P(WebRtcVoiceEngineTestFake, SetSendCodecDisableNackRecvStreams) {
EXPECT_TRUE(SetupSendStream());
EXPECT_TRUE(AddRecvStream(kSsrcY));
cricket::AudioSendParameters parameters;
@@ -1685,7 +1717,7 @@ TEST_F(WebRtcVoiceEngineTestFake, SetSendCodecDisableNackRecvStreams) {
}
// Test that NACK is enabled on a new receive stream.
-TEST_F(WebRtcVoiceEngineTestFake, AddRecvStreamEnableNack) {
+TEST_P(WebRtcVoiceEngineTestFake, AddRecvStreamEnableNack) {
EXPECT_TRUE(SetupSendStream());
cricket::AudioSendParameters parameters;
parameters.codecs.push_back(kIsacCodec);
@@ -1700,7 +1732,7 @@ TEST_F(WebRtcVoiceEngineTestFake, AddRecvStreamEnableNack) {
EXPECT_EQ(kRtpHistoryMs, GetRecvStreamConfig(kSsrcZ).rtp.nack.rtp_history_ms);
}
-TEST_F(WebRtcVoiceEngineTestFake, TransportCcCanBeEnabledAndDisabled) {
+TEST_P(WebRtcVoiceEngineTestFake, TransportCcCanBeEnabledAndDisabled) {
EXPECT_TRUE(SetupChannel());
cricket::AudioSendParameters send_parameters;
send_parameters.codecs.push_back(kOpusCodec);
@@ -1723,7 +1755,7 @@ TEST_F(WebRtcVoiceEngineTestFake, TransportCcCanBeEnabledAndDisabled) {
}
// Test that we can switch back and forth between Opus and ISAC with CN.
-TEST_F(WebRtcVoiceEngineTestFake, SetSendCodecsIsacOpusSwitching) {
+TEST_P(WebRtcVoiceEngineTestFake, SetSendCodecsIsacOpusSwitching) {
EXPECT_TRUE(SetupSendStream());
cricket::AudioSendParameters opus_parameters;
@@ -1755,7 +1787,7 @@ TEST_F(WebRtcVoiceEngineTestFake, SetSendCodecsIsacOpusSwitching) {
}
// Test that we handle various ways of specifying bitrate.
-TEST_F(WebRtcVoiceEngineTestFake, SetSendCodecsBitrate) {
+TEST_P(WebRtcVoiceEngineTestFake, SetSendCodecsBitrate) {
EXPECT_TRUE(SetupSendStream());
cricket::AudioSendParameters parameters;
parameters.codecs.push_back(kIsacCodec); // bitrate == 32000
@@ -1814,7 +1846,7 @@ TEST_F(WebRtcVoiceEngineTestFake, SetSendCodecsBitrate) {
}
// Test that we fail if no codecs are specified.
-TEST_F(WebRtcVoiceEngineTestFake, SetSendCodecsNoCodecs) {
+TEST_P(WebRtcVoiceEngineTestFake, SetSendCodecsNoCodecs) {
EXPECT_TRUE(SetupSendStream());
cricket::AudioSendParameters parameters;
EXPECT_FALSE(channel_->SetSendParameters(parameters));
@@ -1822,7 +1854,7 @@ TEST_F(WebRtcVoiceEngineTestFake, SetSendCodecsNoCodecs) {
// Test that we can set send codecs even with telephone-event codec as the first
// one on the list.
-TEST_F(WebRtcVoiceEngineTestFake, SetSendCodecsDTMFOnTop) {
+TEST_P(WebRtcVoiceEngineTestFake, SetSendCodecsDTMFOnTop) {
EXPECT_TRUE(SetupSendStream());
cricket::AudioSendParameters parameters;
parameters.codecs.push_back(kTelephoneEventCodec1);
@@ -1839,7 +1871,7 @@ TEST_F(WebRtcVoiceEngineTestFake, SetSendCodecsDTMFOnTop) {
}
// Test that CanInsertDtmf() is governed by the send flag
-TEST_F(WebRtcVoiceEngineTestFake, DTMFControlledBySendFlag) {
+TEST_P(WebRtcVoiceEngineTestFake, DTMFControlledBySendFlag) {
EXPECT_TRUE(SetupSendStream());
cricket::AudioSendParameters parameters;
parameters.codecs.push_back(kTelephoneEventCodec1);
@@ -1855,7 +1887,7 @@ TEST_F(WebRtcVoiceEngineTestFake, DTMFControlledBySendFlag) {
}
// Test that payload type range is limited for telephone-event codec.
-TEST_F(WebRtcVoiceEngineTestFake, SetSendCodecsDTMFPayloadTypeOutOfRange) {
+TEST_P(WebRtcVoiceEngineTestFake, SetSendCodecsDTMFPayloadTypeOutOfRange) {
EXPECT_TRUE(SetupSendStream());
cricket::AudioSendParameters parameters;
parameters.codecs.push_back(kTelephoneEventCodec2);
@@ -1878,7 +1910,7 @@ TEST_F(WebRtcVoiceEngineTestFake, SetSendCodecsDTMFPayloadTypeOutOfRange) {
// Test that we can set send codecs even with CN codec as the first
// one on the list.
-TEST_F(WebRtcVoiceEngineTestFake, SetSendCodecsCNOnTop) {
+TEST_P(WebRtcVoiceEngineTestFake, SetSendCodecsCNOnTop) {
EXPECT_TRUE(SetupSendStream());
cricket::AudioSendParameters parameters;
parameters.codecs.push_back(kCn16000Codec);
@@ -1894,7 +1926,7 @@ TEST_F(WebRtcVoiceEngineTestFake, SetSendCodecsCNOnTop) {
}
// Test that we set VAD and DTMF types correctly as caller.
-TEST_F(WebRtcVoiceEngineTestFake, SetSendCodecsCNandDTMFAsCaller) {
+TEST_P(WebRtcVoiceEngineTestFake, SetSendCodecsCNandDTMFAsCaller) {
EXPECT_TRUE(SetupSendStream());
cricket::AudioSendParameters parameters;
parameters.codecs.push_back(kIsacCodec);
@@ -1917,7 +1949,7 @@ TEST_F(WebRtcVoiceEngineTestFake, SetSendCodecsCNandDTMFAsCaller) {
}
// Test that we set VAD and DTMF types correctly as callee.
-TEST_F(WebRtcVoiceEngineTestFake, SetSendCodecsCNandDTMFAsCallee) {
+TEST_P(WebRtcVoiceEngineTestFake, SetSendCodecsCNandDTMFAsCallee) {
EXPECT_TRUE(SetupChannel());
cricket::AudioSendParameters parameters;
parameters.codecs.push_back(kIsacCodec);
@@ -1944,7 +1976,7 @@ TEST_F(WebRtcVoiceEngineTestFake, SetSendCodecsCNandDTMFAsCallee) {
// Test that we only apply VAD if we have a CN codec that matches the
// send codec clockrate.
-TEST_F(WebRtcVoiceEngineTestFake, SetSendCodecsCNNoMatch) {
+TEST_P(WebRtcVoiceEngineTestFake, SetSendCodecsCNNoMatch) {
EXPECT_TRUE(SetupSendStream());
cricket::AudioSendParameters parameters;
// Set ISAC(16K) and CN(16K). VAD should be activated.
@@ -1986,7 +2018,7 @@ TEST_F(WebRtcVoiceEngineTestFake, SetSendCodecsCNNoMatch) {
}
// Test that we perform case-insensitive matching of codec names.
-TEST_F(WebRtcVoiceEngineTestFake, SetSendCodecsCaseInsensitive) {
+TEST_P(WebRtcVoiceEngineTestFake, SetSendCodecsCaseInsensitive) {
EXPECT_TRUE(SetupSendStream());
cricket::AudioSendParameters parameters;
parameters.codecs.push_back(kIsacCodec);
@@ -2014,7 +2046,7 @@ class WebRtcVoiceEngineWithSendSideBweTest : public WebRtcVoiceEngineTestFake {
: WebRtcVoiceEngineTestFake("WebRTC-Audio-SendSideBwe/Enabled/") {}
};
-TEST_F(WebRtcVoiceEngineWithSendSideBweTest,
+TEST_P(WebRtcVoiceEngineWithSendSideBweTest,
SupportsTransportSequenceNumberHeaderExtension) {
const std::vector<webrtc::RtpExtension> header_extensions =
GetDefaultEnabledRtpHeaderExtensions(*engine_);
@@ -2025,25 +2057,25 @@ TEST_F(WebRtcVoiceEngineWithSendSideBweTest,
}
// Test support for audio level header extension.
-TEST_F(WebRtcVoiceEngineTestFake, SendAudioLevelHeaderExtensions) {
+TEST_P(WebRtcVoiceEngineTestFake, SendAudioLevelHeaderExtensions) {
TestSetSendRtpHeaderExtensions(webrtc::RtpExtension::kAudioLevelUri);
}
-TEST_F(WebRtcVoiceEngineTestFake, RecvAudioLevelHeaderExtensions) {
+TEST_P(WebRtcVoiceEngineTestFake, RecvAudioLevelHeaderExtensions) {
TestSetRecvRtpHeaderExtensions(webrtc::RtpExtension::kAudioLevelUri);
}
// Test support for transport sequence number header extension.
-TEST_F(WebRtcVoiceEngineTestFake, SendTransportSequenceNumberHeaderExtensions) {
+TEST_P(WebRtcVoiceEngineTestFake, SendTransportSequenceNumberHeaderExtensions) {
TestSetSendRtpHeaderExtensions(
webrtc::RtpExtension::kTransportSequenceNumberUri);
}
-TEST_F(WebRtcVoiceEngineTestFake, RecvTransportSequenceNumberHeaderExtensions) {
+TEST_P(WebRtcVoiceEngineTestFake, RecvTransportSequenceNumberHeaderExtensions) {
TestSetRecvRtpHeaderExtensions(
webrtc::RtpExtension::kTransportSequenceNumberUri);
}
// Test that we can create a channel and start sending on it.
-TEST_F(WebRtcVoiceEngineTestFake, Send) {
+TEST_P(WebRtcVoiceEngineTestFake, Send) {
EXPECT_TRUE(SetupSendStream());
SetSendParameters(send_parameters_);
SetSend(true);
@@ -2054,7 +2086,7 @@ TEST_F(WebRtcVoiceEngineTestFake, Send) {
// Test that a channel will send if and only if it has a source and is enabled
// for sending.
-TEST_F(WebRtcVoiceEngineTestFake, SendStateWithAndWithoutSource) {
+TEST_P(WebRtcVoiceEngineTestFake, SendStateWithAndWithoutSource) {
EXPECT_TRUE(SetupSendStream());
SetSendParameters(send_parameters_);
SetAudioSend(kSsrcX, true, nullptr);
@@ -2067,7 +2099,7 @@ TEST_F(WebRtcVoiceEngineTestFake, SendStateWithAndWithoutSource) {
}
// Test that a channel is muted/unmuted.
-TEST_F(WebRtcVoiceEngineTestFake, SendStateMuteUnmute) {
+TEST_P(WebRtcVoiceEngineTestFake, SendStateMuteUnmute) {
EXPECT_TRUE(SetupSendStream());
SetSendParameters(send_parameters_);
EXPECT_FALSE(GetSendStream(kSsrcX).muted());
@@ -2078,7 +2110,7 @@ TEST_F(WebRtcVoiceEngineTestFake, SendStateMuteUnmute) {
}
// Test that SetSendParameters() does not alter a stream's send state.
-TEST_F(WebRtcVoiceEngineTestFake, SendStateWhenStreamsAreRecreated) {
+TEST_P(WebRtcVoiceEngineTestFake, SendStateWhenStreamsAreRecreated) {
EXPECT_TRUE(SetupSendStream());
EXPECT_FALSE(GetSendStream(kSsrcX).IsSending());
@@ -2103,7 +2135,7 @@ TEST_F(WebRtcVoiceEngineTestFake, SendStateWhenStreamsAreRecreated) {
}
// Test that we can create a channel and start playing out on it.
-TEST_F(WebRtcVoiceEngineTestFake, Playout) {
+TEST_P(WebRtcVoiceEngineTestFake, Playout) {
EXPECT_TRUE(SetupRecvStream());
EXPECT_TRUE(channel_->SetRecvParameters(recv_parameters_));
channel_->SetPlayout(true);
@@ -2113,7 +2145,7 @@ TEST_F(WebRtcVoiceEngineTestFake, Playout) {
}
// Test that we can add and remove send streams.
-TEST_F(WebRtcVoiceEngineTestFake, CreateAndDeleteMultipleSendStreams) {
+TEST_P(WebRtcVoiceEngineTestFake, CreateAndDeleteMultipleSendStreams) {
SetupForMultiSendStream();
// Set the global state for sending.
@@ -2138,7 +2170,7 @@ TEST_F(WebRtcVoiceEngineTestFake, CreateAndDeleteMultipleSendStreams) {
}
// Test SetSendCodecs correctly configure the codecs in all send streams.
-TEST_F(WebRtcVoiceEngineTestFake, SetSendCodecsWithMultipleSendStreams) {
+TEST_P(WebRtcVoiceEngineTestFake, SetSendCodecsWithMultipleSendStreams) {
SetupForMultiSendStream();
// Create send streams.
@@ -2177,7 +2209,7 @@ TEST_F(WebRtcVoiceEngineTestFake, SetSendCodecsWithMultipleSendStreams) {
}
// Test we can SetSend on all send streams correctly.
-TEST_F(WebRtcVoiceEngineTestFake, SetSendWithMultipleSendStreams) {
+TEST_P(WebRtcVoiceEngineTestFake, SetSendWithMultipleSendStreams) {
SetupForMultiSendStream();
// Create the send channels and they should be a "not sending" date.
@@ -2204,7 +2236,7 @@ TEST_F(WebRtcVoiceEngineTestFake, SetSendWithMultipleSendStreams) {
}
// Test we can set the correct statistics on all send streams.
-TEST_F(WebRtcVoiceEngineTestFake, GetStatsWithMultipleSendStreams) {
+TEST_P(WebRtcVoiceEngineTestFake, GetStatsWithMultipleSendStreams) {
SetupForMultiSendStream();
// Create send streams.
@@ -2267,7 +2299,7 @@ TEST_F(WebRtcVoiceEngineTestFake, GetStatsWithMultipleSendStreams) {
// Test that we can add and remove receive streams, and do proper send/playout.
// We can receive on multiple streams while sending one stream.
-TEST_F(WebRtcVoiceEngineTestFake, PlayoutWithMultipleStreams) {
+TEST_P(WebRtcVoiceEngineTestFake, PlayoutWithMultipleStreams) {
EXPECT_TRUE(SetupSendStream());
// Start playout without a receive stream.
@@ -2306,46 +2338,59 @@ TEST_F(WebRtcVoiceEngineTestFake, PlayoutWithMultipleStreams) {
EXPECT_TRUE(channel_->RemoveRecvStream(kSsrcY));
}
-TEST_F(WebRtcVoiceEngineTestFake, TxAgcConfigViaOptions) {
+TEST_P(WebRtcVoiceEngineTestFake, TxAgcConfigViaOptions) {
EXPECT_TRUE(SetupSendStream());
EXPECT_CALL(*adm_, BuiltInAGCIsAvailable())
.Times(::testing::AtLeast(1))
.WillRepeatedly(Return(false));
- const auto& agc_config = apm_config_.gain_controller1;
- // Ensure default options.
- VerifyGainControlEnabledCorrectly();
- VerifyGainControlDefaultSettings();
+ if (!use_null_apm_) {
+ // Ensure default options.
+ VerifyGainControlEnabledCorrectly();
+ VerifyGainControlDefaultSettings();
+ }
+
+ const auto& agc_config = apm_config_.gain_controller1;
send_parameters_.options.auto_gain_control = false;
SetSendParameters(send_parameters_);
- EXPECT_FALSE(agc_config.enabled);
+ if (!use_null_apm_) {
+ EXPECT_FALSE(agc_config.enabled);
+ }
send_parameters_.options.auto_gain_control = absl::nullopt;
send_parameters_.options.tx_agc_target_dbov = 5;
SetSendParameters(send_parameters_);
- EXPECT_EQ(5, agc_config.target_level_dbfs);
+ if (!use_null_apm_) {
+ EXPECT_EQ(5, agc_config.target_level_dbfs);
+ }
send_parameters_.options.tx_agc_target_dbov = absl::nullopt;
send_parameters_.options.tx_agc_digital_compression_gain = 10;
SetSendParameters(send_parameters_);
- EXPECT_EQ(10, agc_config.compression_gain_db);
+ if (!use_null_apm_) {
+ EXPECT_EQ(10, agc_config.compression_gain_db);
+ }
send_parameters_.options.tx_agc_digital_compression_gain = absl::nullopt;
send_parameters_.options.tx_agc_limiter = false;
SetSendParameters(send_parameters_);
- EXPECT_FALSE(agc_config.enable_limiter);
+ if (!use_null_apm_) {
+ EXPECT_FALSE(agc_config.enable_limiter);
+ }
send_parameters_.options.tx_agc_limiter = absl::nullopt;
SetSendParameters(send_parameters_);
- // Expect all options to have been preserved.
- EXPECT_FALSE(agc_config.enabled);
- EXPECT_EQ(5, agc_config.target_level_dbfs);
- EXPECT_EQ(10, agc_config.compression_gain_db);
- EXPECT_FALSE(agc_config.enable_limiter);
+ if (!use_null_apm_) {
+ // Expect all options to have been preserved.
+ EXPECT_FALSE(agc_config.enabled);
+ EXPECT_EQ(5, agc_config.target_level_dbfs);
+ EXPECT_EQ(10, agc_config.compression_gain_db);
+ EXPECT_FALSE(agc_config.enable_limiter);
+ }
}
-TEST_F(WebRtcVoiceEngineTestFake, SetAudioNetworkAdaptorViaOptions) {
+TEST_P(WebRtcVoiceEngineTestFake, SetAudioNetworkAdaptorViaOptions) {
EXPECT_TRUE(SetupSendStream());
send_parameters_.options.audio_network_adaptor = true;
send_parameters_.options.audio_network_adaptor_config = {"1234"};
@@ -2354,7 +2399,7 @@ TEST_F(WebRtcVoiceEngineTestFake, SetAudioNetworkAdaptorViaOptions) {
GetAudioNetworkAdaptorConfig(kSsrcX));
}
-TEST_F(WebRtcVoiceEngineTestFake, AudioSendResetAudioNetworkAdaptor) {
+TEST_P(WebRtcVoiceEngineTestFake, AudioSendResetAudioNetworkAdaptor) {
EXPECT_TRUE(SetupSendStream());
send_parameters_.options.audio_network_adaptor = true;
send_parameters_.options.audio_network_adaptor_config = {"1234"};
@@ -2367,7 +2412,7 @@ TEST_F(WebRtcVoiceEngineTestFake, AudioSendResetAudioNetworkAdaptor) {
EXPECT_EQ(absl::nullopt, GetAudioNetworkAdaptorConfig(kSsrcX));
}
-TEST_F(WebRtcVoiceEngineTestFake, AudioNetworkAdaptorNotGetOverridden) {
+TEST_P(WebRtcVoiceEngineTestFake, AudioNetworkAdaptorNotGetOverridden) {
EXPECT_TRUE(SetupSendStream());
send_parameters_.options.audio_network_adaptor = true;
send_parameters_.options.audio_network_adaptor_config = {"1234"};
@@ -2398,12 +2443,12 @@ class WebRtcVoiceEngineWithSendSideBweWithOverheadTest
// Test that we can set the outgoing SSRC properly.
// SSRC is set in SetupSendStream() by calling AddSendStream.
-TEST_F(WebRtcVoiceEngineTestFake, SetSendSsrc) {
+TEST_P(WebRtcVoiceEngineTestFake, SetSendSsrc) {
EXPECT_TRUE(SetupSendStream());
EXPECT_TRUE(call_.GetAudioSendStream(kSsrcX));
}
-TEST_F(WebRtcVoiceEngineTestFake, GetStats) {
+TEST_P(WebRtcVoiceEngineTestFake, GetStats) {
// Setup. We need send codec to be set to get all stats.
EXPECT_TRUE(SetupSendStream());
// SetupSendStream adds a send stream with kSsrcX, so the receive
@@ -2464,7 +2509,7 @@ TEST_F(WebRtcVoiceEngineTestFake, GetStats) {
// Test that we can set the outgoing SSRC properly with multiple streams.
// SSRC is set in SetupSendStream() by calling AddSendStream.
-TEST_F(WebRtcVoiceEngineTestFake, SetSendSsrcWithMultipleStreams) {
+TEST_P(WebRtcVoiceEngineTestFake, SetSendSsrcWithMultipleStreams) {
EXPECT_TRUE(SetupSendStream());
EXPECT_TRUE(call_.GetAudioSendStream(kSsrcX));
EXPECT_TRUE(AddRecvStream(kSsrcY));
@@ -2473,7 +2518,7 @@ TEST_F(WebRtcVoiceEngineTestFake, SetSendSsrcWithMultipleStreams) {
// Test that the local SSRC is the same on sending and receiving channels if the
// receive channel is created before the send channel.
-TEST_F(WebRtcVoiceEngineTestFake, SetSendSsrcAfterCreatingReceiveChannel) {
+TEST_P(WebRtcVoiceEngineTestFake, SetSendSsrcAfterCreatingReceiveChannel) {
EXPECT_TRUE(SetupChannel());
EXPECT_TRUE(AddRecvStream(kSsrcY));
EXPECT_TRUE(
@@ -2483,7 +2528,7 @@ TEST_F(WebRtcVoiceEngineTestFake, SetSendSsrcAfterCreatingReceiveChannel) {
}
// Test that we can properly receive packets.
-TEST_F(WebRtcVoiceEngineTestFake, Recv) {
+TEST_P(WebRtcVoiceEngineTestFake, Recv) {
EXPECT_TRUE(SetupChannel());
EXPECT_TRUE(AddRecvStream(1));
DeliverPacket(kPcmuFrame, sizeof(kPcmuFrame));
@@ -2493,7 +2538,7 @@ TEST_F(WebRtcVoiceEngineTestFake, Recv) {
}
// Test that we can properly receive packets on multiple streams.
-TEST_F(WebRtcVoiceEngineTestFake, RecvWithMultipleStreams) {
+TEST_P(WebRtcVoiceEngineTestFake, RecvWithMultipleStreams) {
EXPECT_TRUE(SetupChannel());
const uint32_t ssrc1 = 1;
const uint32_t ssrc2 = 2;
@@ -2545,7 +2590,7 @@ TEST_F(WebRtcVoiceEngineTestFake, RecvWithMultipleStreams) {
}
// Test that receiving on an unsignaled stream works (a stream is created).
-TEST_F(WebRtcVoiceEngineTestFake, RecvUnsignaled) {
+TEST_P(WebRtcVoiceEngineTestFake, RecvUnsignaled) {
EXPECT_TRUE(SetupChannel());
EXPECT_EQ(0u, call_.GetAudioReceiveStreams().size());
@@ -2559,7 +2604,7 @@ TEST_F(WebRtcVoiceEngineTestFake, RecvUnsignaled) {
// Tests that when we add a stream without SSRCs, but contains a stream_id
// that it is stored and its stream id is later used when the first packet
// arrives to properly create a receive stream with a sync label.
-TEST_F(WebRtcVoiceEngineTestFake, RecvUnsignaledSsrcWithSignaledStreamId) {
+TEST_P(WebRtcVoiceEngineTestFake, RecvUnsignaledSsrcWithSignaledStreamId) {
const char kSyncLabel[] = "sync_label";
EXPECT_TRUE(SetupChannel());
cricket::StreamParams unsignaled_stream;
@@ -2591,7 +2636,7 @@ TEST_F(WebRtcVoiceEngineTestFake, RecvUnsignaledSsrcWithSignaledStreamId) {
// Test that receiving N unsignaled stream works (streams will be created), and
// that packets are forwarded to them all.
-TEST_F(WebRtcVoiceEngineTestFake, RecvMultipleUnsignaled) {
+TEST_P(WebRtcVoiceEngineTestFake, RecvMultipleUnsignaled) {
EXPECT_TRUE(SetupChannel());
unsigned char packet[sizeof(kPcmuFrame)];
memcpy(packet, kPcmuFrame, sizeof(kPcmuFrame));
@@ -2637,7 +2682,7 @@ TEST_F(WebRtcVoiceEngineTestFake, RecvMultipleUnsignaled) {
// Test that a default channel is created even after a signaled stream has been
// added, and that this stream will get any packets for unknown SSRCs.
-TEST_F(WebRtcVoiceEngineTestFake, RecvUnsignaledAfterSignaled) {
+TEST_P(WebRtcVoiceEngineTestFake, RecvUnsignaledAfterSignaled) {
EXPECT_TRUE(SetupChannel());
unsigned char packet[sizeof(kPcmuFrame)];
memcpy(packet, kPcmuFrame, sizeof(kPcmuFrame));
@@ -2672,7 +2717,7 @@ TEST_F(WebRtcVoiceEngineTestFake, RecvUnsignaledAfterSignaled) {
// Two tests to verify that adding a receive stream with the same SSRC as a
// previously added unsignaled stream will only recreate underlying stream
// objects if the stream parameters have changed.
-TEST_F(WebRtcVoiceEngineTestFake, AddRecvStreamAfterUnsignaled_NoRecreate) {
+TEST_P(WebRtcVoiceEngineTestFake, AddRecvStreamAfterUnsignaled_NoRecreate) {
EXPECT_TRUE(SetupChannel());
// Spawn unsignaled stream with SSRC=1.
@@ -2691,7 +2736,7 @@ TEST_F(WebRtcVoiceEngineTestFake, AddRecvStreamAfterUnsignaled_NoRecreate) {
EXPECT_EQ(audio_receive_stream_id, streams.front()->id());
}
-TEST_F(WebRtcVoiceEngineTestFake, AddRecvStreamAfterUnsignaled_Recreate) {
+TEST_P(WebRtcVoiceEngineTestFake, AddRecvStreamAfterUnsignaled_Recreate) {
EXPECT_TRUE(SetupChannel());
// Spawn unsignaled stream with SSRC=1.
@@ -2714,14 +2759,14 @@ TEST_F(WebRtcVoiceEngineTestFake, AddRecvStreamAfterUnsignaled_Recreate) {
}
// Test that AddRecvStream creates new stream.
-TEST_F(WebRtcVoiceEngineTestFake, AddRecvStream) {
+TEST_P(WebRtcVoiceEngineTestFake, AddRecvStream) {
EXPECT_TRUE(SetupRecvStream());
EXPECT_TRUE(AddRecvStream(1));
}
// Test that after adding a recv stream, we do not decode more codecs than
// those previously passed into SetRecvCodecs.
-TEST_F(WebRtcVoiceEngineTestFake, AddRecvStreamUnsupportedCodec) {
+TEST_P(WebRtcVoiceEngineTestFake, AddRecvStreamUnsupportedCodec) {
EXPECT_TRUE(SetupSendStream());
cricket::AudioRecvParameters parameters;
parameters.codecs.push_back(kIsacCodec);
@@ -2735,7 +2780,7 @@ TEST_F(WebRtcVoiceEngineTestFake, AddRecvStreamUnsupportedCodec) {
// Test that we properly clean up any streams that were added, even if
// not explicitly removed.
-TEST_F(WebRtcVoiceEngineTestFake, StreamCleanup) {
+TEST_P(WebRtcVoiceEngineTestFake, StreamCleanup) {
EXPECT_TRUE(SetupSendStream());
SetSendParameters(send_parameters_);
EXPECT_TRUE(AddRecvStream(1));
@@ -2749,52 +2794,52 @@ TEST_F(WebRtcVoiceEngineTestFake, StreamCleanup) {
EXPECT_EQ(0u, call_.GetAudioReceiveStreams().size());
}
-TEST_F(WebRtcVoiceEngineTestFake, TestAddRecvStreamSuccessWithZeroSsrc) {
+TEST_P(WebRtcVoiceEngineTestFake, TestAddRecvStreamSuccessWithZeroSsrc) {
EXPECT_TRUE(SetupSendStream());
EXPECT_TRUE(AddRecvStream(0));
}
-TEST_F(WebRtcVoiceEngineTestFake, TestAddRecvStreamFailWithSameSsrc) {
+TEST_P(WebRtcVoiceEngineTestFake, TestAddRecvStreamFailWithSameSsrc) {
EXPECT_TRUE(SetupChannel());
EXPECT_TRUE(AddRecvStream(1));
EXPECT_FALSE(AddRecvStream(1));
}
// Test the InsertDtmf on default send stream as caller.
-TEST_F(WebRtcVoiceEngineTestFake, InsertDtmfOnDefaultSendStreamAsCaller) {
+TEST_P(WebRtcVoiceEngineTestFake, InsertDtmfOnDefaultSendStreamAsCaller) {
TestInsertDtmf(0, true, kTelephoneEventCodec1);
}
// Test the InsertDtmf on default send stream as callee
-TEST_F(WebRtcVoiceEngineTestFake, InsertDtmfOnDefaultSendStreamAsCallee) {
+TEST_P(WebRtcVoiceEngineTestFake, InsertDtmfOnDefaultSendStreamAsCallee) {
TestInsertDtmf(0, false, kTelephoneEventCodec2);
}
// Test the InsertDtmf on specified send stream as caller.
-TEST_F(WebRtcVoiceEngineTestFake, InsertDtmfOnSendStreamAsCaller) {
+TEST_P(WebRtcVoiceEngineTestFake, InsertDtmfOnSendStreamAsCaller) {
TestInsertDtmf(kSsrcX, true, kTelephoneEventCodec2);
}
// Test the InsertDtmf on specified send stream as callee.
-TEST_F(WebRtcVoiceEngineTestFake, InsertDtmfOnSendStreamAsCallee) {
+TEST_P(WebRtcVoiceEngineTestFake, InsertDtmfOnSendStreamAsCallee) {
TestInsertDtmf(kSsrcX, false, kTelephoneEventCodec1);
}
// Test propagation of extmap allow mixed setting.
-TEST_F(WebRtcVoiceEngineTestFake, SetExtmapAllowMixedAsCaller) {
+TEST_P(WebRtcVoiceEngineTestFake, SetExtmapAllowMixedAsCaller) {
TestExtmapAllowMixedCaller(/*extmap_allow_mixed=*/true);
}
-TEST_F(WebRtcVoiceEngineTestFake, SetExtmapAllowMixedDisabledAsCaller) {
+TEST_P(WebRtcVoiceEngineTestFake, SetExtmapAllowMixedDisabledAsCaller) {
TestExtmapAllowMixedCaller(/*extmap_allow_mixed=*/false);
}
-TEST_F(WebRtcVoiceEngineTestFake, SetExtmapAllowMixedAsCallee) {
+TEST_P(WebRtcVoiceEngineTestFake, SetExtmapAllowMixedAsCallee) {
TestExtmapAllowMixedCallee(/*extmap_allow_mixed=*/true);
}
-TEST_F(WebRtcVoiceEngineTestFake, SetExtmapAllowMixedDisabledAsCallee) {
+TEST_P(WebRtcVoiceEngineTestFake, SetExtmapAllowMixedDisabledAsCallee) {
TestExtmapAllowMixedCallee(/*extmap_allow_mixed=*/false);
}
-TEST_F(WebRtcVoiceEngineTestFake, SetAudioOptions) {
+TEST_P(WebRtcVoiceEngineTestFake, SetAudioOptions) {
EXPECT_TRUE(SetupSendStream());
EXPECT_TRUE(AddRecvStream(kSsrcY));
EXPECT_CALL(*adm_, BuiltInAECIsAvailable())
@@ -2813,42 +2858,56 @@ TEST_F(WebRtcVoiceEngineTestFake, SetAudioOptions) {
// Nothing set in AudioOptions, so everything should be as default.
send_parameters_.options = cricket::AudioOptions();
SetSendParameters(send_parameters_);
- VerifyEchoCancellationSettings(/*enabled=*/true);
- EXPECT_TRUE(IsHighPassFilterEnabled());
- EXPECT_TRUE(IsTypingDetectionEnabled());
+ if (!use_null_apm_) {
+ VerifyEchoCancellationSettings(/*enabled=*/true);
+ EXPECT_TRUE(IsHighPassFilterEnabled());
+ EXPECT_TRUE(IsTypingDetectionEnabled());
+ }
EXPECT_EQ(200u, GetRecvStreamConfig(kSsrcY).jitter_buffer_max_packets);
EXPECT_FALSE(GetRecvStreamConfig(kSsrcY).jitter_buffer_fast_accelerate);
// Turn typing detection off.
send_parameters_.options.typing_detection = false;
SetSendParameters(send_parameters_);
- EXPECT_FALSE(IsTypingDetectionEnabled());
+ if (!use_null_apm_) {
+ EXPECT_FALSE(IsTypingDetectionEnabled());
+ }
// Leave typing detection unchanged, but non-default.
send_parameters_.options.typing_detection = absl::nullopt;
SetSendParameters(send_parameters_);
- EXPECT_FALSE(IsTypingDetectionEnabled());
+ if (!use_null_apm_) {
+ EXPECT_FALSE(IsTypingDetectionEnabled());
+ }
// Turn typing detection on.
send_parameters_.options.typing_detection = true;
SetSendParameters(send_parameters_);
- EXPECT_TRUE(IsTypingDetectionEnabled());
+ if (!use_null_apm_) {
+ EXPECT_TRUE(IsTypingDetectionEnabled());
+ }
// Turn echo cancellation off
send_parameters_.options.echo_cancellation = false;
SetSendParameters(send_parameters_);
- VerifyEchoCancellationSettings(/*enabled=*/false);
+ if (!use_null_apm_) {
+ VerifyEchoCancellationSettings(/*enabled=*/false);
+ }
// Turn echo cancellation back on, with settings, and make sure
// nothing else changed.
send_parameters_.options.echo_cancellation = true;
SetSendParameters(send_parameters_);
- VerifyEchoCancellationSettings(/*enabled=*/true);
+ if (!use_null_apm_) {
+ VerifyEchoCancellationSettings(/*enabled=*/true);
+ }
// Turn off echo cancellation and delay agnostic aec.
send_parameters_.options.echo_cancellation = false;
SetSendParameters(send_parameters_);
- VerifyEchoCancellationSettings(/*enabled=*/false);
+ if (!use_null_apm_) {
+ VerifyEchoCancellationSettings(/*enabled=*/false);
+ }
// Restore AEC to be on to work with the following tests.
send_parameters_.options.echo_cancellation = true;
@@ -2857,51 +2916,62 @@ TEST_F(WebRtcVoiceEngineTestFake, SetAudioOptions) {
// Turn off AGC
send_parameters_.options.auto_gain_control = false;
SetSendParameters(send_parameters_);
- VerifyEchoCancellationSettings(/*enabled=*/true);
- EXPECT_FALSE(apm_config_.gain_controller1.enabled);
+ if (!use_null_apm_) {
+ VerifyEchoCancellationSettings(/*enabled=*/true);
+ EXPECT_FALSE(apm_config_.gain_controller1.enabled);
+ }
// Turn AGC back on
send_parameters_.options.auto_gain_control = true;
SetSendParameters(send_parameters_);
- VerifyEchoCancellationSettings(/*enabled=*/true);
- EXPECT_TRUE(apm_config_.gain_controller1.enabled);
+ if (!use_null_apm_) {
+ VerifyEchoCancellationSettings(/*enabled=*/true);
+ EXPECT_TRUE(apm_config_.gain_controller1.enabled);
+ }
// Turn off other options.
send_parameters_.options.noise_suppression = false;
send_parameters_.options.highpass_filter = false;
send_parameters_.options.stereo_swapping = true;
SetSendParameters(send_parameters_);
- VerifyEchoCancellationSettings(/*enabled=*/true);
- EXPECT_FALSE(IsHighPassFilterEnabled());
- EXPECT_TRUE(apm_config_.gain_controller1.enabled);
- EXPECT_FALSE(apm_config_.noise_suppression.enabled);
- EXPECT_EQ(apm_config_.noise_suppression.level, kDefaultNsLevel);
+ if (!use_null_apm_) {
+ VerifyEchoCancellationSettings(/*enabled=*/true);
+ EXPECT_FALSE(IsHighPassFilterEnabled());
+ EXPECT_TRUE(apm_config_.gain_controller1.enabled);
+ EXPECT_FALSE(apm_config_.noise_suppression.enabled);
+ EXPECT_EQ(apm_config_.noise_suppression.level, kDefaultNsLevel);
+ }
// Set options again to ensure it has no impact.
SetSendParameters(send_parameters_);
- VerifyEchoCancellationSettings(/*enabled=*/true);
- EXPECT_TRUE(apm_config_.gain_controller1.enabled);
- EXPECT_FALSE(apm_config_.noise_suppression.enabled);
- EXPECT_EQ(apm_config_.noise_suppression.level, kDefaultNsLevel);
+ if (!use_null_apm_) {
+ VerifyEchoCancellationSettings(/*enabled=*/true);
+ EXPECT_TRUE(apm_config_.gain_controller1.enabled);
+ EXPECT_FALSE(apm_config_.noise_suppression.enabled);
+ EXPECT_EQ(apm_config_.noise_suppression.level, kDefaultNsLevel);
+ }
}
-TEST_F(WebRtcVoiceEngineTestFake, SetOptionOverridesViaChannels) {
+TEST_P(WebRtcVoiceEngineTestFake, SetOptionOverridesViaChannels) {
EXPECT_TRUE(SetupSendStream());
EXPECT_CALL(*adm_, BuiltInAECIsAvailable())
- .Times(8)
+ .Times(use_null_apm_ ? 4 : 8)
.WillRepeatedly(Return(false));
EXPECT_CALL(*adm_, BuiltInAGCIsAvailable())
- .Times(8)
+ .Times(use_null_apm_ ? 7 : 8)
.WillRepeatedly(Return(false));
EXPECT_CALL(*adm_, BuiltInNSIsAvailable())
- .Times(8)
+ .Times(use_null_apm_ ? 5 : 8)
.WillRepeatedly(Return(false));
EXPECT_CALL(*adm_, RecordingIsInitialized())
.Times(2)
.WillRepeatedly(Return(false));
+
EXPECT_CALL(*adm_, Recording()).Times(2).WillRepeatedly(Return(false));
EXPECT_CALL(*adm_, InitRecording()).Times(2).WillRepeatedly(Return(0));
- EXPECT_CALL(*apm_, SetExtraOptions(::testing::_)).Times(10);
+ if (!use_null_apm_) {
+ EXPECT_CALL(*apm_, SetExtraOptions(::testing::_)).Times(10);
+ }
std::unique_ptr<cricket::WebRtcVoiceMediaChannel> channel1(
static_cast<cricket::WebRtcVoiceMediaChannel*>(
@@ -2928,60 +2998,72 @@ TEST_F(WebRtcVoiceEngineTestFake, SetOptionOverridesViaChannels) {
parameters_options_all.options.auto_gain_control = true;
parameters_options_all.options.noise_suppression = true;
EXPECT_TRUE(channel1->SetSendParameters(parameters_options_all));
- VerifyEchoCancellationSettings(/*enabled=*/true);
- VerifyGainControlEnabledCorrectly();
- EXPECT_TRUE(apm_config_.noise_suppression.enabled);
- EXPECT_EQ(apm_config_.noise_suppression.level, kDefaultNsLevel);
- EXPECT_EQ(parameters_options_all.options, channel1->options());
- EXPECT_TRUE(channel2->SetSendParameters(parameters_options_all));
- VerifyEchoCancellationSettings(/*enabled=*/true);
- VerifyGainControlEnabledCorrectly();
- EXPECT_EQ(parameters_options_all.options, channel2->options());
+ if (!use_null_apm_) {
+ VerifyEchoCancellationSettings(/*enabled=*/true);
+ VerifyGainControlEnabledCorrectly();
+ EXPECT_TRUE(apm_config_.noise_suppression.enabled);
+ EXPECT_EQ(apm_config_.noise_suppression.level, kDefaultNsLevel);
+ EXPECT_EQ(parameters_options_all.options, channel1->options());
+ EXPECT_TRUE(channel2->SetSendParameters(parameters_options_all));
+ VerifyEchoCancellationSettings(/*enabled=*/true);
+ VerifyGainControlEnabledCorrectly();
+ EXPECT_EQ(parameters_options_all.options, channel2->options());
+ }
// unset NS
cricket::AudioSendParameters parameters_options_no_ns = send_parameters_;
parameters_options_no_ns.options.noise_suppression = false;
EXPECT_TRUE(channel1->SetSendParameters(parameters_options_no_ns));
- VerifyEchoCancellationSettings(/*enabled=*/true);
- EXPECT_FALSE(apm_config_.noise_suppression.enabled);
- EXPECT_EQ(apm_config_.noise_suppression.level, kDefaultNsLevel);
- VerifyGainControlEnabledCorrectly();
cricket::AudioOptions expected_options = parameters_options_all.options;
- expected_options.echo_cancellation = true;
- expected_options.auto_gain_control = true;
- expected_options.noise_suppression = false;
- EXPECT_EQ(expected_options, channel1->options());
+ if (!use_null_apm_) {
+ VerifyEchoCancellationSettings(/*enabled=*/true);
+ EXPECT_FALSE(apm_config_.noise_suppression.enabled);
+ EXPECT_EQ(apm_config_.noise_suppression.level, kDefaultNsLevel);
+ VerifyGainControlEnabledCorrectly();
+ expected_options.echo_cancellation = true;
+ expected_options.auto_gain_control = true;
+ expected_options.noise_suppression = false;
+ EXPECT_EQ(expected_options, channel1->options());
+ }
// unset AGC
cricket::AudioSendParameters parameters_options_no_agc = send_parameters_;
parameters_options_no_agc.options.auto_gain_control = false;
EXPECT_TRUE(channel2->SetSendParameters(parameters_options_no_agc));
- VerifyEchoCancellationSettings(/*enabled=*/true);
- EXPECT_FALSE(apm_config_.gain_controller1.enabled);
- EXPECT_TRUE(apm_config_.noise_suppression.enabled);
- EXPECT_EQ(apm_config_.noise_suppression.level, kDefaultNsLevel);
- expected_options.echo_cancellation = true;
- expected_options.auto_gain_control = false;
- expected_options.noise_suppression = true;
- EXPECT_EQ(expected_options, channel2->options());
+ if (!use_null_apm_) {
+ VerifyEchoCancellationSettings(/*enabled=*/true);
+ EXPECT_FALSE(apm_config_.gain_controller1.enabled);
+ EXPECT_TRUE(apm_config_.noise_suppression.enabled);
+ EXPECT_EQ(apm_config_.noise_suppression.level, kDefaultNsLevel);
+ expected_options.echo_cancellation = true;
+ expected_options.auto_gain_control = false;
+ expected_options.noise_suppression = true;
+ EXPECT_EQ(expected_options, channel2->options());
+ }
EXPECT_TRUE(channel_->SetSendParameters(parameters_options_all));
- VerifyEchoCancellationSettings(/*enabled=*/true);
- VerifyGainControlEnabledCorrectly();
- EXPECT_TRUE(apm_config_.noise_suppression.enabled);
- EXPECT_EQ(apm_config_.noise_suppression.level, kDefaultNsLevel);
+ if (!use_null_apm_) {
+ VerifyEchoCancellationSettings(/*enabled=*/true);
+ VerifyGainControlEnabledCorrectly();
+ EXPECT_TRUE(apm_config_.noise_suppression.enabled);
+ EXPECT_EQ(apm_config_.noise_suppression.level, kDefaultNsLevel);
+ }
channel1->SetSend(true);
- VerifyEchoCancellationSettings(/*enabled=*/true);
- VerifyGainControlEnabledCorrectly();
- EXPECT_FALSE(apm_config_.noise_suppression.enabled);
- EXPECT_EQ(apm_config_.noise_suppression.level, kDefaultNsLevel);
+ if (!use_null_apm_) {
+ VerifyEchoCancellationSettings(/*enabled=*/true);
+ VerifyGainControlEnabledCorrectly();
+ EXPECT_FALSE(apm_config_.noise_suppression.enabled);
+ EXPECT_EQ(apm_config_.noise_suppression.level, kDefaultNsLevel);
+ }
channel2->SetSend(true);
- VerifyEchoCancellationSettings(/*enabled=*/true);
- EXPECT_FALSE(apm_config_.gain_controller1.enabled);
- EXPECT_TRUE(apm_config_.noise_suppression.enabled);
- EXPECT_EQ(apm_config_.noise_suppression.level, kDefaultNsLevel);
+ if (!use_null_apm_) {
+ VerifyEchoCancellationSettings(/*enabled=*/true);
+ EXPECT_FALSE(apm_config_.gain_controller1.enabled);
+ EXPECT_TRUE(apm_config_.noise_suppression.enabled);
+ EXPECT_EQ(apm_config_.noise_suppression.level, kDefaultNsLevel);
+ }
// Make sure settings take effect while we are sending.
cricket::AudioSendParameters parameters_options_no_agc_nor_ns =
@@ -2989,25 +3071,29 @@ TEST_F(WebRtcVoiceEngineTestFake, SetOptionOverridesViaChannels) {
parameters_options_no_agc_nor_ns.options.auto_gain_control = false;
parameters_options_no_agc_nor_ns.options.noise_suppression = false;
EXPECT_TRUE(channel2->SetSendParameters(parameters_options_no_agc_nor_ns));
- VerifyEchoCancellationSettings(/*enabled=*/true);
- EXPECT_FALSE(apm_config_.gain_controller1.enabled);
- EXPECT_FALSE(apm_config_.noise_suppression.enabled);
- EXPECT_EQ(apm_config_.noise_suppression.level, kDefaultNsLevel);
- expected_options.echo_cancellation = true;
- expected_options.auto_gain_control = false;
- expected_options.noise_suppression = false;
- EXPECT_EQ(expected_options, channel2->options());
+ if (!use_null_apm_) {
+ VerifyEchoCancellationSettings(/*enabled=*/true);
+ EXPECT_FALSE(apm_config_.gain_controller1.enabled);
+ EXPECT_FALSE(apm_config_.noise_suppression.enabled);
+ EXPECT_EQ(apm_config_.noise_suppression.level, kDefaultNsLevel);
+ expected_options.echo_cancellation = true;
+ expected_options.auto_gain_control = false;
+ expected_options.noise_suppression = false;
+ EXPECT_EQ(expected_options, channel2->options());
+ }
}
// This test verifies DSCP settings are properly applied on voice media channel.
-TEST_F(WebRtcVoiceEngineTestFake, TestSetDscpOptions) {
+TEST_P(WebRtcVoiceEngineTestFake, TestSetDscpOptions) {
EXPECT_TRUE(SetupSendStream());
cricket::FakeNetworkInterface network_interface;
cricket::MediaConfig config;
std::unique_ptr<cricket::WebRtcVoiceMediaChannel> channel;
webrtc::RtpParameters parameters;
- EXPECT_CALL(*apm_, SetExtraOptions(::testing::_)).Times(3);
+ if (!use_null_apm_) {
+ EXPECT_CALL(*apm_, SetExtraOptions(::testing::_)).Times(3);
+ }
channel.reset(static_cast<cricket::WebRtcVoiceMediaChannel*>(
engine_->CreateMediaChannel(&call_, config, cricket::AudioOptions(),
@@ -3055,7 +3141,7 @@ TEST_F(WebRtcVoiceEngineTestFake, TestSetDscpOptions) {
channel->SetInterface(nullptr, webrtc::MediaTransportConfig());
}
-TEST_F(WebRtcVoiceEngineTestFake, SetOutputVolume) {
+TEST_P(WebRtcVoiceEngineTestFake, SetOutputVolume) {
EXPECT_TRUE(SetupChannel());
EXPECT_FALSE(channel_->SetOutputVolume(kSsrcY, 0.5));
cricket::StreamParams stream;
@@ -3066,7 +3152,7 @@ TEST_F(WebRtcVoiceEngineTestFake, SetOutputVolume) {
EXPECT_DOUBLE_EQ(3, GetRecvStream(kSsrcY).gain());
}
-TEST_F(WebRtcVoiceEngineTestFake, SetOutputVolumeUnsignaledRecvStream) {
+TEST_P(WebRtcVoiceEngineTestFake, SetOutputVolumeUnsignaledRecvStream) {
EXPECT_TRUE(SetupChannel());
// Spawn an unsignaled stream by sending a packet - gain should be 1.
@@ -3100,7 +3186,7 @@ TEST_F(WebRtcVoiceEngineTestFake, SetOutputVolumeUnsignaledRecvStream) {
EXPECT_DOUBLE_EQ(4, GetRecvStream(kSsrcX).gain());
}
-TEST_F(WebRtcVoiceEngineTestFake, BaseMinimumPlayoutDelayMs) {
+TEST_P(WebRtcVoiceEngineTestFake, BaseMinimumPlayoutDelayMs) {
EXPECT_TRUE(SetupChannel());
EXPECT_FALSE(channel_->SetBaseMinimumPlayoutDelayMs(kSsrcY, 200));
EXPECT_FALSE(channel_->GetBaseMinimumPlayoutDelayMs(kSsrcY).has_value());
@@ -3113,7 +3199,7 @@ TEST_F(WebRtcVoiceEngineTestFake, BaseMinimumPlayoutDelayMs) {
EXPECT_EQ(300, GetRecvStream(kSsrcY).base_mininum_playout_delay_ms());
}
-TEST_F(WebRtcVoiceEngineTestFake,
+TEST_P(WebRtcVoiceEngineTestFake,
BaseMinimumPlayoutDelayMsUnsignaledRecvStream) {
// Here base minimum delay is abbreviated to delay in comments for shortness.
EXPECT_TRUE(SetupChannel());
@@ -3159,7 +3245,7 @@ TEST_F(WebRtcVoiceEngineTestFake,
EXPECT_FALSE(channel_->GetBaseMinimumPlayoutDelayMs(kSsrcY).has_value());
}
-TEST_F(WebRtcVoiceEngineTestFake, SetsSyncGroupFromStreamId) {
+TEST_P(WebRtcVoiceEngineTestFake, SetsSyncGroupFromStreamId) {
const uint32_t kAudioSsrc = 123;
const std::string kStreamId = "AvSyncLabel";
@@ -3183,7 +3269,7 @@ TEST_F(WebRtcVoiceEngineTestFake, SetsSyncGroupFromStreamId) {
// TODO(solenberg): Remove, once recv streams are configured through Call.
// (This is then covered by TestSetRecvRtpHeaderExtensions.)
-TEST_F(WebRtcVoiceEngineTestFake, ConfiguresAudioReceiveStreamRtpExtensions) {
+TEST_P(WebRtcVoiceEngineTestFake, ConfiguresAudioReceiveStreamRtpExtensions) {
// Test that setting the header extensions results in the expected state
// changes on an associated Call.
std::vector<uint32_t> ssrcs;
@@ -3234,7 +3320,7 @@ TEST_F(WebRtcVoiceEngineTestFake, ConfiguresAudioReceiveStreamRtpExtensions) {
}
}
-TEST_F(WebRtcVoiceEngineTestFake, DeliverAudioPacket_Call) {
+TEST_P(WebRtcVoiceEngineTestFake, DeliverAudioPacket_Call) {
// Test that packets are forwarded to the Call when configured accordingly.
const uint32_t kAudioSsrc = 1;
rtc::CopyOnWriteBuffer kPcmuPacket(kPcmuFrame, sizeof(kPcmuFrame));
@@ -3261,7 +3347,7 @@ TEST_F(WebRtcVoiceEngineTestFake, DeliverAudioPacket_Call) {
// All receive channels should be associated with the first send channel,
// since they do not send RTCP SR.
-TEST_F(WebRtcVoiceEngineTestFake, AssociateFirstSendChannel_SendCreatedFirst) {
+TEST_P(WebRtcVoiceEngineTestFake, AssociateFirstSendChannel_SendCreatedFirst) {
EXPECT_TRUE(SetupSendStream());
EXPECT_TRUE(AddRecvStream(kSsrcY));
EXPECT_EQ(kSsrcX, GetRecvStreamConfig(kSsrcY).rtp.local_ssrc);
@@ -3272,7 +3358,7 @@ TEST_F(WebRtcVoiceEngineTestFake, AssociateFirstSendChannel_SendCreatedFirst) {
EXPECT_EQ(kSsrcX, GetRecvStreamConfig(kSsrcW).rtp.local_ssrc);
}
-TEST_F(WebRtcVoiceEngineTestFake, AssociateFirstSendChannel_RecvCreatedFirst) {
+TEST_P(WebRtcVoiceEngineTestFake, AssociateFirstSendChannel_RecvCreatedFirst) {
EXPECT_TRUE(SetupRecvStream());
EXPECT_EQ(0xFA17FA17u, GetRecvStreamConfig(kSsrcX).rtp.local_ssrc);
EXPECT_TRUE(
@@ -3286,7 +3372,7 @@ TEST_F(WebRtcVoiceEngineTestFake, AssociateFirstSendChannel_RecvCreatedFirst) {
EXPECT_EQ(kSsrcY, GetRecvStreamConfig(kSsrcZ).rtp.local_ssrc);
}
-TEST_F(WebRtcVoiceEngineTestFake, SetRawAudioSink) {
+TEST_P(WebRtcVoiceEngineTestFake, SetRawAudioSink) {
EXPECT_TRUE(SetupChannel());
std::unique_ptr<FakeAudioSink> fake_sink_1(new FakeAudioSink());
std::unique_ptr<FakeAudioSink> fake_sink_2(new FakeAudioSink());
@@ -3305,7 +3391,7 @@ TEST_F(WebRtcVoiceEngineTestFake, SetRawAudioSink) {
EXPECT_EQ(nullptr, GetRecvStream(kSsrcX).sink());
}
-TEST_F(WebRtcVoiceEngineTestFake, SetRawAudioSinkUnsignaledRecvStream) {
+TEST_P(WebRtcVoiceEngineTestFake, SetRawAudioSinkUnsignaledRecvStream) {
EXPECT_TRUE(SetupChannel());
std::unique_ptr<FakeAudioSink> fake_sink_1(new FakeAudioSink());
std::unique_ptr<FakeAudioSink> fake_sink_2(new FakeAudioSink());
@@ -3371,7 +3457,7 @@ TEST_F(WebRtcVoiceEngineTestFake, SetRawAudioSinkUnsignaledRecvStream) {
// Test that, just like the video channel, the voice channel communicates the
// network state to the call.
-TEST_F(WebRtcVoiceEngineTestFake, OnReadyToSendSignalsNetworkState) {
+TEST_P(WebRtcVoiceEngineTestFake, OnReadyToSendSignalsNetworkState) {
EXPECT_TRUE(SetupChannel());
EXPECT_EQ(webrtc::kNetworkUp,
@@ -3393,7 +3479,7 @@ TEST_F(WebRtcVoiceEngineTestFake, OnReadyToSendSignalsNetworkState) {
}
// Test that playout is still started after changing parameters
-TEST_F(WebRtcVoiceEngineTestFake, PreservePlayoutWhenRecreateRecvStream) {
+TEST_P(WebRtcVoiceEngineTestFake, PreservePlayoutWhenRecreateRecvStream) {
SetupRecvStream();
channel_->SetPlayout(true);
EXPECT_TRUE(GetRecvStream(kSsrcX).started());
@@ -3409,7 +3495,7 @@ TEST_F(WebRtcVoiceEngineTestFake, PreservePlayoutWhenRecreateRecvStream) {
// Tests when GetSources is called with non-existing ssrc, it will return an
// empty list of RtpSource without crashing.
-TEST_F(WebRtcVoiceEngineTestFake, GetSourcesWithNonExistingSsrc) {
+TEST_P(WebRtcVoiceEngineTestFake, GetSourcesWithNonExistingSsrc) {
// Setup an recv stream with |kSsrcX|.
SetupRecvStream();
cricket::WebRtcVoiceMediaChannel* media_channel =
@@ -3421,43 +3507,15 @@ TEST_F(WebRtcVoiceEngineTestFake, GetSourcesWithNonExistingSsrc) {
// Tests that the library initializes and shuts down properly.
TEST(WebRtcVoiceEngineTest, StartupShutdown) {
- // If the VoiceEngine wants to gather available codecs early, that's fine but
- // we never want it to create a decoder at this stage.
- std::unique_ptr<webrtc::TaskQueueFactory> task_queue_factory =
- webrtc::CreateDefaultTaskQueueFactory();
- rtc::scoped_refptr<webrtc::test::MockAudioDeviceModule> adm =
- webrtc::test::MockAudioDeviceModule::CreateNice();
- rtc::scoped_refptr<webrtc::AudioProcessing> apm =
- webrtc::AudioProcessingBuilder().Create();
- cricket::WebRtcVoiceEngine engine(
- task_queue_factory.get(), adm,
- webrtc::MockAudioEncoderFactory::CreateUnusedFactory(),
- webrtc::MockAudioDecoderFactory::CreateUnusedFactory(), nullptr, apm);
- engine.Init();
- webrtc::RtcEventLogNull event_log;
- webrtc::Call::Config call_config(&event_log);
- webrtc::FieldTrialBasedConfig field_trials;
- call_config.trials = &field_trials;
- call_config.task_queue_factory = task_queue_factory.get();
- auto call = absl::WrapUnique(webrtc::Call::Create(call_config));
- cricket::VoiceMediaChannel* channel = engine.CreateMediaChannel(
- call.get(), cricket::MediaConfig(), cricket::AudioOptions(),
- webrtc::CryptoOptions());
- EXPECT_TRUE(channel != nullptr);
- delete channel;
-}
-
-// Tests that reference counting on the external ADM is correct.
-TEST(WebRtcVoiceEngineTest, StartupShutdownWithExternalADM) {
- std::unique_ptr<webrtc::TaskQueueFactory> task_queue_factory =
- webrtc::CreateDefaultTaskQueueFactory();
- rtc::scoped_refptr<rtc::RefCountedObject<
- ::testing::NiceMock<webrtc::test::MockAudioDeviceModule>>>
- adm(new rtc::RefCountedObject<
- ::testing::NiceMock<webrtc::test::MockAudioDeviceModule>>());
- {
+ for (bool use_null_apm : {false, true}) {
+ // If the VoiceEngine wants to gather available codecs early, that's fine
+ // but we never want it to create a decoder at this stage.
+ std::unique_ptr<webrtc::TaskQueueFactory> task_queue_factory =
+ webrtc::CreateDefaultTaskQueueFactory();
+ rtc::scoped_refptr<webrtc::test::MockAudioDeviceModule> adm =
+ webrtc::test::MockAudioDeviceModule::CreateNice();
rtc::scoped_refptr<webrtc::AudioProcessing> apm =
- webrtc::AudioProcessingBuilder().Create();
+ use_null_apm ? nullptr : webrtc::AudioProcessingBuilder().Create();
cricket::WebRtcVoiceEngine engine(
task_queue_factory.get(), adm,
webrtc::MockAudioEncoderFactory::CreateUnusedFactory(),
@@ -3475,212 +3533,252 @@ TEST(WebRtcVoiceEngineTest, StartupShutdownWithExternalADM) {
EXPECT_TRUE(channel != nullptr);
delete channel;
}
- // The engine/channel should have dropped their references.
- EXPECT_TRUE(adm->HasOneRef());
+}
+
+// Tests that reference counting on the external ADM is correct.
+TEST(WebRtcVoiceEngineTest, StartupShutdownWithExternalADM) {
+ for (bool use_null_apm : {false, true}) {
+ std::unique_ptr<webrtc::TaskQueueFactory> task_queue_factory =
+ webrtc::CreateDefaultTaskQueueFactory();
+ rtc::scoped_refptr<rtc::RefCountedObject<
+ ::testing::NiceMock<webrtc::test::MockAudioDeviceModule>>>
+ adm(new rtc::RefCountedObject<
+ ::testing::NiceMock<webrtc::test::MockAudioDeviceModule>>());
+ {
+ rtc::scoped_refptr<webrtc::AudioProcessing> apm =
+ use_null_apm ? nullptr : webrtc::AudioProcessingBuilder().Create();
+ cricket::WebRtcVoiceEngine engine(
+ task_queue_factory.get(), adm,
+ webrtc::MockAudioEncoderFactory::CreateUnusedFactory(),
+ webrtc::MockAudioDecoderFactory::CreateUnusedFactory(), nullptr, apm);
+ engine.Init();
+ webrtc::RtcEventLogNull event_log;
+ webrtc::Call::Config call_config(&event_log);
+ webrtc::FieldTrialBasedConfig field_trials;
+ call_config.trials = &field_trials;
+ call_config.task_queue_factory = task_queue_factory.get();
+ auto call = absl::WrapUnique(webrtc::Call::Create(call_config));
+ cricket::VoiceMediaChannel* channel = engine.CreateMediaChannel(
+ call.get(), cricket::MediaConfig(), cricket::AudioOptions(),
+ webrtc::CryptoOptions());
+ EXPECT_TRUE(channel != nullptr);
+ delete channel;
+ }
+ // The engine/channel should have dropped their references.
+ EXPECT_TRUE(adm->HasOneRef());
+ }
}
// Verify the payload id of common audio codecs, including CN, ISAC, and G722.
TEST(WebRtcVoiceEngineTest, HasCorrectPayloadTypeMapping) {
- std::unique_ptr<webrtc::TaskQueueFactory> task_queue_factory =
- webrtc::CreateDefaultTaskQueueFactory();
- // TODO(ossu): Why are the payload types of codecs with non-static payload
- // type assignments checked here? It shouldn't really matter.
- rtc::scoped_refptr<webrtc::test::MockAudioDeviceModule> adm =
- webrtc::test::MockAudioDeviceModule::CreateNice();
- rtc::scoped_refptr<webrtc::AudioProcessing> apm =
- webrtc::AudioProcessingBuilder().Create();
- cricket::WebRtcVoiceEngine engine(
- task_queue_factory.get(), adm,
- webrtc::MockAudioEncoderFactory::CreateUnusedFactory(),
- webrtc::MockAudioDecoderFactory::CreateUnusedFactory(), nullptr, apm);
- engine.Init();
- for (const cricket::AudioCodec& codec : engine.send_codecs()) {
- auto is_codec = [&codec](const char* name, int clockrate = 0) {
- return absl::EqualsIgnoreCase(codec.name, name) &&
- (clockrate == 0 || codec.clockrate == clockrate);
- };
- if (is_codec("CN", 16000)) {
- EXPECT_EQ(105, codec.id);
- } else if (is_codec("CN", 32000)) {
- EXPECT_EQ(106, codec.id);
- } else if (is_codec("ISAC", 16000)) {
- EXPECT_EQ(103, codec.id);
- } else if (is_codec("ISAC", 32000)) {
- EXPECT_EQ(104, codec.id);
- } else if (is_codec("G722", 8000)) {
- EXPECT_EQ(9, codec.id);
- } else if (is_codec("telephone-event", 8000)) {
- EXPECT_EQ(126, codec.id);
- // TODO(solenberg): 16k, 32k, 48k DTMF should be dynamically assigned.
- // Remove these checks once both send and receive side assigns payload
- // types dynamically.
- } else if (is_codec("telephone-event", 16000)) {
- EXPECT_EQ(113, codec.id);
- } else if (is_codec("telephone-event", 32000)) {
- EXPECT_EQ(112, codec.id);
- } else if (is_codec("telephone-event", 48000)) {
- EXPECT_EQ(110, codec.id);
- } else if (is_codec("opus")) {
- EXPECT_EQ(111, codec.id);
- ASSERT_TRUE(codec.params.find("minptime") != codec.params.end());
- EXPECT_EQ("10", codec.params.find("minptime")->second);
- ASSERT_TRUE(codec.params.find("useinbandfec") != codec.params.end());
- EXPECT_EQ("1", codec.params.find("useinbandfec")->second);
+ for (bool use_null_apm : {false, true}) {
+ std::unique_ptr<webrtc::TaskQueueFactory> task_queue_factory =
+ webrtc::CreateDefaultTaskQueueFactory();
+ // TODO(ossu): Why are the payload types of codecs with non-static payload
+ // type assignments checked here? It shouldn't really matter.
+ rtc::scoped_refptr<webrtc::test::MockAudioDeviceModule> adm =
+ webrtc::test::MockAudioDeviceModule::CreateNice();
+ rtc::scoped_refptr<webrtc::AudioProcessing> apm =
+ use_null_apm ? nullptr : webrtc::AudioProcessingBuilder().Create();
+ cricket::WebRtcVoiceEngine engine(
+ task_queue_factory.get(), adm,
+ webrtc::MockAudioEncoderFactory::CreateUnusedFactory(),
+ webrtc::MockAudioDecoderFactory::CreateUnusedFactory(), nullptr, apm);
+ engine.Init();
+ for (const cricket::AudioCodec& codec : engine.send_codecs()) {
+ auto is_codec = [&codec](const char* name, int clockrate = 0) {
+ return absl::EqualsIgnoreCase(codec.name, name) &&
+ (clockrate == 0 || codec.clockrate == clockrate);
+ };
+ if (is_codec("CN", 16000)) {
+ EXPECT_EQ(105, codec.id);
+ } else if (is_codec("CN", 32000)) {
+ EXPECT_EQ(106, codec.id);
+ } else if (is_codec("ISAC", 16000)) {
+ EXPECT_EQ(103, codec.id);
+ } else if (is_codec("ISAC", 32000)) {
+ EXPECT_EQ(104, codec.id);
+ } else if (is_codec("G722", 8000)) {
+ EXPECT_EQ(9, codec.id);
+ } else if (is_codec("telephone-event", 8000)) {
+ EXPECT_EQ(126, codec.id);
+ // TODO(solenberg): 16k, 32k, 48k DTMF should be dynamically assigned.
+ // Remove these checks once both send and receive side assigns payload
+ // types dynamically.
+ } else if (is_codec("telephone-event", 16000)) {
+ EXPECT_EQ(113, codec.id);
+ } else if (is_codec("telephone-event", 32000)) {
+ EXPECT_EQ(112, codec.id);
+ } else if (is_codec("telephone-event", 48000)) {
+ EXPECT_EQ(110, codec.id);
+ } else if (is_codec("opus")) {
+ EXPECT_EQ(111, codec.id);
+ ASSERT_TRUE(codec.params.find("minptime") != codec.params.end());
+ EXPECT_EQ("10", codec.params.find("minptime")->second);
+ ASSERT_TRUE(codec.params.find("useinbandfec") != codec.params.end());
+ EXPECT_EQ("1", codec.params.find("useinbandfec")->second);
+ }
}
}
}
// Tests that VoE supports at least 32 channels
TEST(WebRtcVoiceEngineTest, Has32Channels) {
- std::unique_ptr<webrtc::TaskQueueFactory> task_queue_factory =
- webrtc::CreateDefaultTaskQueueFactory();
- rtc::scoped_refptr<webrtc::test::MockAudioDeviceModule> adm =
- webrtc::test::MockAudioDeviceModule::CreateNice();
- rtc::scoped_refptr<webrtc::AudioProcessing> apm =
- webrtc::AudioProcessingBuilder().Create();
- cricket::WebRtcVoiceEngine engine(
- task_queue_factory.get(), adm,
- webrtc::MockAudioEncoderFactory::CreateUnusedFactory(),
- webrtc::MockAudioDecoderFactory::CreateUnusedFactory(), nullptr, apm);
- engine.Init();
- webrtc::RtcEventLogNull event_log;
- webrtc::Call::Config call_config(&event_log);
- webrtc::FieldTrialBasedConfig field_trials;
- call_config.trials = &field_trials;
- call_config.task_queue_factory = task_queue_factory.get();
- auto call = absl::WrapUnique(webrtc::Call::Create(call_config));
-
- cricket::VoiceMediaChannel* channels[32];
- size_t num_channels = 0;
- while (num_channels < arraysize(channels)) {
- cricket::VoiceMediaChannel* channel = engine.CreateMediaChannel(
- call.get(), cricket::MediaConfig(), cricket::AudioOptions(),
- webrtc::CryptoOptions());
- if (!channel)
- break;
- channels[num_channels++] = channel;
- }
+ for (bool use_null_apm : {false, true}) {
+ std::unique_ptr<webrtc::TaskQueueFactory> task_queue_factory =
+ webrtc::CreateDefaultTaskQueueFactory();
+ rtc::scoped_refptr<webrtc::test::MockAudioDeviceModule> adm =
+ webrtc::test::MockAudioDeviceModule::CreateNice();
+ rtc::scoped_refptr<webrtc::AudioProcessing> apm =
+ use_null_apm ? nullptr : webrtc::AudioProcessingBuilder().Create();
+ cricket::WebRtcVoiceEngine engine(
+ task_queue_factory.get(), adm,
+ webrtc::MockAudioEncoderFactory::CreateUnusedFactory(),
+ webrtc::MockAudioDecoderFactory::CreateUnusedFactory(), nullptr, apm);
+ engine.Init();
+ webrtc::RtcEventLogNull event_log;
+ webrtc::Call::Config call_config(&event_log);
+ webrtc::FieldTrialBasedConfig field_trials;
+ call_config.trials = &field_trials;
+ call_config.task_queue_factory = task_queue_factory.get();
+ auto call = absl::WrapUnique(webrtc::Call::Create(call_config));
- size_t expected = arraysize(channels);
- EXPECT_EQ(expected, num_channels);
+ cricket::VoiceMediaChannel* channels[32];
+ size_t num_channels = 0;
+ while (num_channels < arraysize(channels)) {
+ cricket::VoiceMediaChannel* channel = engine.CreateMediaChannel(
+ call.get(), cricket::MediaConfig(), cricket::AudioOptions(),
+ webrtc::CryptoOptions());
+ if (!channel)
+ break;
+ channels[num_channels++] = channel;
+ }
- while (num_channels > 0) {
- delete channels[--num_channels];
+ size_t expected = arraysize(channels);
+ EXPECT_EQ(expected, num_channels);
+
+ while (num_channels > 0) {
+ delete channels[--num_channels];
+ }
}
}
// Test that we set our preferred codecs properly.
TEST(WebRtcVoiceEngineTest, SetRecvCodecs) {
- std::unique_ptr<webrtc::TaskQueueFactory> task_queue_factory =
- webrtc::CreateDefaultTaskQueueFactory();
- // TODO(ossu): I'm not sure of the intent of this test. It's either:
- // - Check that our builtin codecs are usable by Channel.
- // - The codecs provided by the engine is usable by Channel.
- // It does not check that the codecs in the RecvParameters are actually
- // what we sent in - though it's probably reasonable to expect so, if
- // SetRecvParameters returns true.
- // I think it will become clear once audio decoder injection is completed.
- rtc::scoped_refptr<webrtc::test::MockAudioDeviceModule> adm =
- webrtc::test::MockAudioDeviceModule::CreateNice();
- rtc::scoped_refptr<webrtc::AudioProcessing> apm =
- webrtc::AudioProcessingBuilder().Create();
- cricket::WebRtcVoiceEngine engine(
- task_queue_factory.get(), adm,
- webrtc::MockAudioEncoderFactory::CreateUnusedFactory(),
- webrtc::CreateBuiltinAudioDecoderFactory(), nullptr, apm);
- engine.Init();
- webrtc::RtcEventLogNull event_log;
- webrtc::Call::Config call_config(&event_log);
- webrtc::FieldTrialBasedConfig field_trials;
- call_config.trials = &field_trials;
- call_config.task_queue_factory = task_queue_factory.get();
- auto call = absl::WrapUnique(webrtc::Call::Create(call_config));
- cricket::WebRtcVoiceMediaChannel channel(&engine, cricket::MediaConfig(),
- cricket::AudioOptions(),
- webrtc::CryptoOptions(), call.get());
- cricket::AudioRecvParameters parameters;
- parameters.codecs = engine.recv_codecs();
- EXPECT_TRUE(channel.SetRecvParameters(parameters));
+ for (bool use_null_apm : {false, true}) {
+ std::unique_ptr<webrtc::TaskQueueFactory> task_queue_factory =
+ webrtc::CreateDefaultTaskQueueFactory();
+ // TODO(ossu): I'm not sure of the intent of this test. It's either:
+ // - Check that our builtin codecs are usable by Channel.
+ // - The codecs provided by the engine is usable by Channel.
+ // It does not check that the codecs in the RecvParameters are actually
+ // what we sent in - though it's probably reasonable to expect so, if
+ // SetRecvParameters returns true.
+ // I think it will become clear once audio decoder injection is completed.
+ rtc::scoped_refptr<webrtc::test::MockAudioDeviceModule> adm =
+ webrtc::test::MockAudioDeviceModule::CreateNice();
+ rtc::scoped_refptr<webrtc::AudioProcessing> apm =
+ use_null_apm ? nullptr : webrtc::AudioProcessingBuilder().Create();
+ cricket::WebRtcVoiceEngine engine(
+ task_queue_factory.get(), adm,
+ webrtc::MockAudioEncoderFactory::CreateUnusedFactory(),
+ webrtc::CreateBuiltinAudioDecoderFactory(), nullptr, apm);
+ engine.Init();
+ webrtc::RtcEventLogNull event_log;
+ webrtc::Call::Config call_config(&event_log);
+ webrtc::FieldTrialBasedConfig field_trials;
+ call_config.trials = &field_trials;
+ call_config.task_queue_factory = task_queue_factory.get();
+ auto call = absl::WrapUnique(webrtc::Call::Create(call_config));
+ cricket::WebRtcVoiceMediaChannel channel(
+ &engine, cricket::MediaConfig(), cricket::AudioOptions(),
+ webrtc::CryptoOptions(), call.get());
+ cricket::AudioRecvParameters parameters;
+ parameters.codecs = engine.recv_codecs();
+ EXPECT_TRUE(channel.SetRecvParameters(parameters));
+ }
}
TEST(WebRtcVoiceEngineTest, CollectRecvCodecs) {
- std::vector<webrtc::AudioCodecSpec> specs;
- webrtc::AudioCodecSpec spec1{{"codec1", 48000, 2, {{"param1", "value1"}}},
- {48000, 2, 16000, 10000, 20000}};
- spec1.info.allow_comfort_noise = false;
- spec1.info.supports_network_adaption = true;
- specs.push_back(spec1);
- webrtc::AudioCodecSpec spec2{{"codec2", 32000, 1}, {32000, 1, 32000}};
- spec2.info.allow_comfort_noise = false;
- specs.push_back(spec2);
- specs.push_back(webrtc::AudioCodecSpec{
- {"codec3", 16000, 1, {{"param1", "value1b"}, {"param2", "value2"}}},
- {16000, 1, 13300}});
- specs.push_back(
- webrtc::AudioCodecSpec{{"codec4", 8000, 1}, {8000, 1, 64000}});
- specs.push_back(
- webrtc::AudioCodecSpec{{"codec5", 8000, 2}, {8000, 1, 64000}});
-
- std::unique_ptr<webrtc::TaskQueueFactory> task_queue_factory =
- webrtc::CreateDefaultTaskQueueFactory();
- rtc::scoped_refptr<webrtc::MockAudioEncoderFactory> unused_encoder_factory =
- webrtc::MockAudioEncoderFactory::CreateUnusedFactory();
- rtc::scoped_refptr<webrtc::MockAudioDecoderFactory> mock_decoder_factory =
- new rtc::RefCountedObject<webrtc::MockAudioDecoderFactory>;
- EXPECT_CALL(*mock_decoder_factory.get(), GetSupportedDecoders())
- .WillOnce(Return(specs));
- rtc::scoped_refptr<webrtc::test::MockAudioDeviceModule> adm =
- webrtc::test::MockAudioDeviceModule::CreateNice();
-
- rtc::scoped_refptr<webrtc::AudioProcessing> apm =
- webrtc::AudioProcessingBuilder().Create();
- cricket::WebRtcVoiceEngine engine(task_queue_factory.get(), adm,
- unused_encoder_factory,
- mock_decoder_factory, nullptr, apm);
- engine.Init();
- auto codecs = engine.recv_codecs();
- EXPECT_EQ(11u, codecs.size());
-
- // Rather than just ASSERTing that there are enough codecs, ensure that we can
- // check the actual values safely, to provide better test results.
- auto get_codec = [&codecs](size_t index) -> const cricket::AudioCodec& {
- static const cricket::AudioCodec missing_codec(0, "<missing>", 0, 0, 0);
- if (codecs.size() > index)
- return codecs[index];
- return missing_codec;
- };
-
- // Ensure the general codecs are generated first and in order.
- for (size_t i = 0; i != specs.size(); ++i) {
- EXPECT_EQ(specs[i].format.name, get_codec(i).name);
- EXPECT_EQ(specs[i].format.clockrate_hz, get_codec(i).clockrate);
- EXPECT_EQ(specs[i].format.num_channels, get_codec(i).channels);
- EXPECT_EQ(specs[i].format.parameters, get_codec(i).params);
- }
-
- // Find the index of a codec, or -1 if not found, so that we can easily check
- // supplementary codecs are ordered after the general codecs.
- auto find_codec = [&codecs](const webrtc::SdpAudioFormat& format) -> int {
- for (size_t i = 0; i != codecs.size(); ++i) {
- const cricket::AudioCodec& codec = codecs[i];
- if (absl::EqualsIgnoreCase(codec.name, format.name) &&
- codec.clockrate == format.clockrate_hz &&
- codec.channels == format.num_channels) {
- return rtc::checked_cast<int>(i);
- }
+ for (bool use_null_apm : {false, true}) {
+ std::vector<webrtc::AudioCodecSpec> specs;
+ webrtc::AudioCodecSpec spec1{{"codec1", 48000, 2, {{"param1", "value1"}}},
+ {48000, 2, 16000, 10000, 20000}};
+ spec1.info.allow_comfort_noise = false;
+ spec1.info.supports_network_adaption = true;
+ specs.push_back(spec1);
+ webrtc::AudioCodecSpec spec2{{"codec2", 32000, 1}, {32000, 1, 32000}};
+ spec2.info.allow_comfort_noise = false;
+ specs.push_back(spec2);
+ specs.push_back(webrtc::AudioCodecSpec{
+ {"codec3", 16000, 1, {{"param1", "value1b"}, {"param2", "value2"}}},
+ {16000, 1, 13300}});
+ specs.push_back(
+ webrtc::AudioCodecSpec{{"codec4", 8000, 1}, {8000, 1, 64000}});
+ specs.push_back(
+ webrtc::AudioCodecSpec{{"codec5", 8000, 2}, {8000, 1, 64000}});
+
+ std::unique_ptr<webrtc::TaskQueueFactory> task_queue_factory =
+ webrtc::CreateDefaultTaskQueueFactory();
+ rtc::scoped_refptr<webrtc::MockAudioEncoderFactory> unused_encoder_factory =
+ webrtc::MockAudioEncoderFactory::CreateUnusedFactory();
+ rtc::scoped_refptr<webrtc::MockAudioDecoderFactory> mock_decoder_factory =
+ new rtc::RefCountedObject<webrtc::MockAudioDecoderFactory>;
+ EXPECT_CALL(*mock_decoder_factory.get(), GetSupportedDecoders())
+ .WillOnce(Return(specs));
+ rtc::scoped_refptr<webrtc::test::MockAudioDeviceModule> adm =
+ webrtc::test::MockAudioDeviceModule::CreateNice();
+
+ rtc::scoped_refptr<webrtc::AudioProcessing> apm =
+ use_null_apm ? nullptr : webrtc::AudioProcessingBuilder().Create();
+ cricket::WebRtcVoiceEngine engine(task_queue_factory.get(), adm,
+ unused_encoder_factory,
+ mock_decoder_factory, nullptr, apm);
+ engine.Init();
+ auto codecs = engine.recv_codecs();
+ EXPECT_EQ(11u, codecs.size());
+
+ // Rather than just ASSERTing that there are enough codecs, ensure that we
+ // can check the actual values safely, to provide better test results.
+ auto get_codec = [&codecs](size_t index) -> const cricket::AudioCodec& {
+ static const cricket::AudioCodec missing_codec(0, "<missing>", 0, 0, 0);
+ if (codecs.size() > index)
+ return codecs[index];
+ return missing_codec;
+ };
+
+ // Ensure the general codecs are generated first and in order.
+ for (size_t i = 0; i != specs.size(); ++i) {
+ EXPECT_EQ(specs[i].format.name, get_codec(i).name);
+ EXPECT_EQ(specs[i].format.clockrate_hz, get_codec(i).clockrate);
+ EXPECT_EQ(specs[i].format.num_channels, get_codec(i).channels);
+ EXPECT_EQ(specs[i].format.parameters, get_codec(i).params);
}
- return -1;
- };
-
- // Ensure all supplementary codecs are generated last. Their internal ordering
- // is not important.
- // Without this cast, the comparison turned unsigned and, thus, failed for -1.
- const int num_specs = static_cast<int>(specs.size());
- EXPECT_GE(find_codec({"cn", 8000, 1}), num_specs);
- EXPECT_GE(find_codec({"cn", 16000, 1}), num_specs);
- EXPECT_EQ(find_codec({"cn", 32000, 1}), -1);
- EXPECT_GE(find_codec({"telephone-event", 8000, 1}), num_specs);
- EXPECT_GE(find_codec({"telephone-event", 16000, 1}), num_specs);
- EXPECT_GE(find_codec({"telephone-event", 32000, 1}), num_specs);
- EXPECT_GE(find_codec({"telephone-event", 48000, 1}), num_specs);
+
+ // Find the index of a codec, or -1 if not found, so that we can easily
+ // check supplementary codecs are ordered after the general codecs.
+ auto find_codec = [&codecs](const webrtc::SdpAudioFormat& format) -> int {
+ for (size_t i = 0; i != codecs.size(); ++i) {
+ const cricket::AudioCodec& codec = codecs[i];
+ if (absl::EqualsIgnoreCase(codec.name, format.name) &&
+ codec.clockrate == format.clockrate_hz &&
+ codec.channels == format.num_channels) {
+ return rtc::checked_cast<int>(i);
+ }
+ }
+ return -1;
+ };
+
+ // Ensure all supplementary codecs are generated last. Their internal
+ // ordering is not important. Without this cast, the comparison turned
+ // unsigned and, thus, failed for -1.
+ const int num_specs = static_cast<int>(specs.size());
+ EXPECT_GE(find_codec({"cn", 8000, 1}), num_specs);
+ EXPECT_GE(find_codec({"cn", 16000, 1}), num_specs);
+ EXPECT_EQ(find_codec({"cn", 32000, 1}), -1);
+ EXPECT_GE(find_codec({"telephone-event", 8000, 1}), num_specs);
+ EXPECT_GE(find_codec({"telephone-event", 16000, 1}), num_specs);
+ EXPECT_GE(find_codec({"telephone-event", 32000, 1}), num_specs);
+ EXPECT_GE(find_codec({"telephone-event", 48000, 1}), num_specs);
+ }
}
diff --git a/modules/audio_processing/BUILD.gn b/modules/audio_processing/BUILD.gn
index 69f94fa69f..46207aa658 100644
--- a/modules/audio_processing/BUILD.gn
+++ b/modules/audio_processing/BUILD.gn
@@ -116,6 +116,7 @@ rtc_library("audio_processing") {
visibility = [ "*" ]
configs += [ ":apm_debug_dump" ]
sources = [
+ "audio_processing_builder_impl.cc",
"audio_processing_impl.cc",
"audio_processing_impl.h",
"common.h",
@@ -169,6 +170,7 @@ rtc_library("audio_processing") {
"../../rtc_base:deprecation",
"../../rtc_base:gtest_prod",
"../../rtc_base:ignore_wundef",
+ "../../rtc_base:refcount",
"../../rtc_base:safe_minmax",
"../../rtc_base:sanitizer",
"../../rtc_base/system:rtc_export",
@@ -556,41 +558,6 @@ if (rtc_include_tests) {
} # audioproc_f_impl
}
- rtc_library("audioproc_test_utils") {
- visibility = [ "*" ]
- testonly = true
- sources = [
- "test/audio_buffer_tools.cc",
- "test/audio_buffer_tools.h",
- "test/bitexactness_tools.cc",
- "test/bitexactness_tools.h",
- "test/performance_timer.cc",
- "test/performance_timer.h",
- "test/simulator_buffers.cc",
- "test/simulator_buffers.h",
- "test/test_utils.cc",
- "test/test_utils.h",
- ]
-
- deps = [
- ":api",
- ":audio_buffer",
- ":audio_processing",
- "../../api:array_view",
- "../../api/audio:audio_frame_api",
- "../../common_audio",
- "../../rtc_base:checks",
- "../../rtc_base:rtc_base_approved",
- "../../rtc_base/system:arch",
- "../../system_wrappers",
- "../../test:fileutils",
- "../../test:test_support",
- "../audio_coding:neteq_input_audio_tools",
- "//testing/gtest",
- "//third_party/abseil-cpp/absl/types:optional",
- ]
- }
-
if (rtc_enable_protobuf) {
proto_library("audioproc_unittest_proto") {
sources = [ "test/unittest.proto" ]
@@ -629,3 +596,42 @@ if (rtc_include_tests) {
}
}
}
+
+rtc_library("audioproc_test_utils") {
+ visibility = [ "*" ]
+ testonly = true
+ sources = [
+ "test/audio_buffer_tools.cc",
+ "test/audio_buffer_tools.h",
+ "test/audio_processing_builder_for_testing.cc",
+ "test/audio_processing_builder_for_testing.h",
+ "test/bitexactness_tools.cc",
+ "test/bitexactness_tools.h",
+ "test/performance_timer.cc",
+ "test/performance_timer.h",
+ "test/simulator_buffers.cc",
+ "test/simulator_buffers.h",
+ "test/test_utils.cc",
+ "test/test_utils.h",
+ ]
+
+ configs += [ ":apm_debug_dump" ]
+
+ deps = [
+ ":api",
+ ":audio_buffer",
+ ":audio_processing",
+ "../../api:array_view",
+ "../../api/audio:audio_frame_api",
+ "../../common_audio",
+ "../../rtc_base:checks",
+ "../../rtc_base:rtc_base_approved",
+ "../../rtc_base/system:arch",
+ "../../system_wrappers",
+ "../../test:fileutils",
+ "../../test:test_support",
+ "../audio_coding:neteq_input_audio_tools",
+ "//testing/gtest",
+ "//third_party/abseil-cpp/absl/types:optional",
+ ]
+}
diff --git a/modules/audio_processing/aec_dump/BUILD.gn b/modules/audio_processing/aec_dump/BUILD.gn
index 46f002205e..7ba3bc08e0 100644
--- a/modules/audio_processing/aec_dump/BUILD.gn
+++ b/modules/audio_processing/aec_dump/BUILD.gn
@@ -20,31 +20,35 @@ rtc_source_set("aec_dump") {
]
}
-rtc_library("mock_aec_dump") {
- testonly = true
- sources = [
- "mock_aec_dump.cc",
- "mock_aec_dump.h",
- ]
+if (rtc_include_tests) {
+ rtc_library("mock_aec_dump") {
+ testonly = true
+ sources = [
+ "mock_aec_dump.cc",
+ "mock_aec_dump.h",
+ ]
- deps = [
- "../",
- "../../../test:test_support",
- ]
-}
+ deps = [
+ "..:audioproc_test_utils",
+ "../",
+ "../../../test:test_support",
+ ]
+ }
-rtc_library("mock_aec_dump_unittests") {
- testonly = true
- configs += [ "..:apm_debug_dump" ]
- sources = [ "aec_dump_integration_test.cc" ]
+ rtc_library("mock_aec_dump_unittests") {
+ testonly = true
+ configs += [ "..:apm_debug_dump" ]
+ sources = [ "aec_dump_integration_test.cc" ]
- deps = [
- ":mock_aec_dump",
- "..:api",
- "../",
- "../../../rtc_base:rtc_base_approved",
- "//testing/gtest",
- ]
+ deps = [
+ ":mock_aec_dump",
+ "..:api",
+ "..:audioproc_test_utils",
+ "../",
+ "../../../rtc_base:rtc_base_approved",
+ "//testing/gtest",
+ ]
+ }
}
if (rtc_enable_protobuf) {
@@ -75,20 +79,22 @@ if (rtc_enable_protobuf) {
deps += [ "../:audioproc_debug_proto" ]
}
- rtc_library("aec_dump_unittests") {
- testonly = true
- defines = []
- deps = [
- ":aec_dump",
- ":aec_dump_impl",
- "..:audioproc_debug_proto",
- "../",
- "../../../rtc_base:task_queue_for_test",
- "../../../test:fileutils",
- "../../../test:test_support",
- "//testing/gtest",
- ]
- sources = [ "aec_dump_unittest.cc" ]
+ if (rtc_include_tests) {
+ rtc_library("aec_dump_unittests") {
+ testonly = true
+ defines = []
+ deps = [
+ ":aec_dump",
+ ":aec_dump_impl",
+ "..:audioproc_debug_proto",
+ "../",
+ "../../../rtc_base:task_queue_for_test",
+ "../../../test:fileutils",
+ "../../../test:test_support",
+ "//testing/gtest",
+ ]
+ sources = [ "aec_dump_unittest.cc" ]
+ }
}
}
diff --git a/modules/audio_processing/aec_dump/aec_dump_integration_test.cc b/modules/audio_processing/aec_dump/aec_dump_integration_test.cc
index 6d6b46655a..83268b5994 100644
--- a/modules/audio_processing/aec_dump/aec_dump_integration_test.cc
+++ b/modules/audio_processing/aec_dump/aec_dump_integration_test.cc
@@ -15,6 +15,7 @@
#include "modules/audio_processing/aec_dump/mock_aec_dump.h"
#include "modules/audio_processing/audio_processing_impl.h"
#include "modules/audio_processing/include/audio_processing.h"
+#include "modules/audio_processing/test/audio_processing_builder_for_testing.h"
using ::testing::_;
using ::testing::AtLeast;
@@ -25,7 +26,7 @@ namespace {
std::unique_ptr<webrtc::AudioProcessing> CreateAudioProcessing() {
webrtc::Config config;
std::unique_ptr<webrtc::AudioProcessing> apm(
- webrtc::AudioProcessingBuilder().Create(config));
+ webrtc::AudioProcessingBuilderForTesting().Create(config));
RTC_DCHECK(apm);
return apm;
}
diff --git a/modules/audio_processing/audio_processing_builder_impl.cc b/modules/audio_processing/audio_processing_builder_impl.cc
new file mode 100644
index 0000000000..e89bbecc68
--- /dev/null
+++ b/modules/audio_processing/audio_processing_builder_impl.cc
@@ -0,0 +1,51 @@
+/*
+ * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/include/audio_processing.h"
+
+#include <memory>
+
+#include "modules/audio_processing/audio_processing_impl.h"
+#include "rtc_base/ref_counted_object.h"
+
+namespace webrtc {
+
+AudioProcessingBuilder::AudioProcessingBuilder() = default;
+AudioProcessingBuilder::~AudioProcessingBuilder() = default;
+
+AudioProcessing* AudioProcessingBuilder::Create() {
+ webrtc::Config config;
+ return Create(config);
+}
+
+AudioProcessing* AudioProcessingBuilder::Create(const webrtc::Config& config) {
+#ifdef WEBRTC_EXCLUDE_AUDIO_PROCESSING_MODULE
+
+ // Implementation returning a null pointer for using when the APM is excluded
+ // from the build..
+ return nullptr;
+
+#else
+
+ // Standard implementation.
+ AudioProcessingImpl* apm = new rtc::RefCountedObject<AudioProcessingImpl>(
+ config, std::move(capture_post_processing_),
+ std::move(render_pre_processing_), std::move(echo_control_factory_),
+ std::move(echo_detector_), std::move(capture_analyzer_));
+ if (apm->Initialize() != AudioProcessing::kNoError) {
+ delete apm;
+ apm = nullptr;
+ }
+ return apm;
+
+#endif
+}
+
+} // namespace webrtc
diff --git a/modules/audio_processing/audio_processing_impl.cc b/modules/audio_processing/audio_processing_impl.cc
index 34e64251b8..bdef059686 100644
--- a/modules/audio_processing/audio_processing_impl.cc
+++ b/modules/audio_processing/audio_processing_impl.cc
@@ -229,56 +229,6 @@ bool AudioProcessingImpl::SubmoduleStates::HighPassFilteringRequired() const {
noise_suppressor_enabled_;
}
-AudioProcessingBuilder::AudioProcessingBuilder() = default;
-AudioProcessingBuilder::~AudioProcessingBuilder() = default;
-
-AudioProcessingBuilder& AudioProcessingBuilder::SetCapturePostProcessing(
- std::unique_ptr<CustomProcessing> capture_post_processing) {
- capture_post_processing_ = std::move(capture_post_processing);
- return *this;
-}
-
-AudioProcessingBuilder& AudioProcessingBuilder::SetRenderPreProcessing(
- std::unique_ptr<CustomProcessing> render_pre_processing) {
- render_pre_processing_ = std::move(render_pre_processing);
- return *this;
-}
-
-AudioProcessingBuilder& AudioProcessingBuilder::SetCaptureAnalyzer(
- std::unique_ptr<CustomAudioAnalyzer> capture_analyzer) {
- capture_analyzer_ = std::move(capture_analyzer);
- return *this;
-}
-
-AudioProcessingBuilder& AudioProcessingBuilder::SetEchoControlFactory(
- std::unique_ptr<EchoControlFactory> echo_control_factory) {
- echo_control_factory_ = std::move(echo_control_factory);
- return *this;
-}
-
-AudioProcessingBuilder& AudioProcessingBuilder::SetEchoDetector(
- rtc::scoped_refptr<EchoDetector> echo_detector) {
- echo_detector_ = std::move(echo_detector);
- return *this;
-}
-
-AudioProcessing* AudioProcessingBuilder::Create() {
- webrtc::Config config;
- return Create(config);
-}
-
-AudioProcessing* AudioProcessingBuilder::Create(const webrtc::Config& config) {
- AudioProcessingImpl* apm = new rtc::RefCountedObject<AudioProcessingImpl>(
- config, std::move(capture_post_processing_),
- std::move(render_pre_processing_), std::move(echo_control_factory_),
- std::move(echo_detector_), std::move(capture_analyzer_));
- if (apm->Initialize() != AudioProcessing::kNoError) {
- delete apm;
- apm = nullptr;
- }
- return apm;
-}
-
AudioProcessingImpl::AudioProcessingImpl(const webrtc::Config& config)
: AudioProcessingImpl(config,
/*capture_post_processor=*/nullptr,
diff --git a/modules/audio_processing/audio_processing_impl_locking_unittest.cc b/modules/audio_processing/audio_processing_impl_locking_unittest.cc
index f1e049d44a..500539405b 100644
--- a/modules/audio_processing/audio_processing_impl_locking_unittest.cc
+++ b/modules/audio_processing/audio_processing_impl_locking_unittest.cc
@@ -14,6 +14,7 @@
#include "api/array_view.h"
#include "modules/audio_processing/audio_processing_impl.h"
+#include "modules/audio_processing/test/audio_processing_builder_for_testing.h"
#include "modules/audio_processing/test/test_utils.h"
#include "rtc_base/critical_section.h"
#include "rtc_base/event.h"
@@ -496,7 +497,7 @@ AudioProcessingImplLockTest::AudioProcessingImplLockTest()
this,
"stats",
rtc::kNormalPriority),
- apm_(AudioProcessingBuilder().Create()),
+ apm_(AudioProcessingBuilderForTesting().Create()),
render_thread_state_(kMaxFrameSize,
&rand_gen_,
&render_call_event_,
diff --git a/modules/audio_processing/audio_processing_impl_unittest.cc b/modules/audio_processing/audio_processing_impl_unittest.cc
index a441e2f208..3c5458d151 100644
--- a/modules/audio_processing/audio_processing_impl_unittest.cc
+++ b/modules/audio_processing/audio_processing_impl_unittest.cc
@@ -15,6 +15,7 @@
#include "api/scoped_refptr.h"
#include "modules/audio_processing/include/audio_processing.h"
+#include "modules/audio_processing/test/audio_processing_builder_for_testing.h"
#include "modules/audio_processing/test/echo_control_mock.h"
#include "modules/audio_processing/test/test_utils.h"
#include "rtc_base/checks.h"
@@ -167,7 +168,8 @@ TEST(AudioProcessingImplTest, AudioParameterChangeTriggersInit) {
}
TEST(AudioProcessingImplTest, UpdateCapturePreGainRuntimeSetting) {
- std::unique_ptr<AudioProcessing> apm(AudioProcessingBuilder().Create());
+ std::unique_ptr<AudioProcessing> apm(
+ AudioProcessingBuilderForTesting().Create());
webrtc::AudioProcessing::Config apm_config;
apm_config.pre_amplifier.enabled = true;
apm_config.pre_amplifier.fixed_gain_factor = 1.f;
@@ -205,7 +207,7 @@ TEST(AudioProcessingImplTest,
const auto* echo_control_factory_ptr = echo_control_factory.get();
std::unique_ptr<AudioProcessing> apm(
- AudioProcessingBuilder()
+ AudioProcessingBuilderForTesting()
.SetEchoControlFactory(std::move(echo_control_factory))
.Create());
// Disable AGC.
@@ -248,7 +250,7 @@ TEST(AudioProcessingImplTest,
const auto* echo_control_factory_ptr = echo_control_factory.get();
std::unique_ptr<AudioProcessing> apm(
- AudioProcessingBuilder()
+ AudioProcessingBuilderForTesting()
.SetEchoControlFactory(std::move(echo_control_factory))
.Create());
webrtc::AudioProcessing::Config apm_config;
@@ -294,7 +296,7 @@ TEST(AudioProcessingImplTest, EchoControllerObservesPlayoutVolumeChange) {
const auto* echo_control_factory_ptr = echo_control_factory.get();
std::unique_ptr<AudioProcessing> apm(
- AudioProcessingBuilder()
+ AudioProcessingBuilderForTesting()
.SetEchoControlFactory(std::move(echo_control_factory))
.Create());
// Disable AGC.
@@ -353,7 +355,7 @@ TEST(AudioProcessingImplTest, RenderPreProcessorBeforeEchoDetector) {
new TestRenderPreProcessor());
// Create APM injecting the test echo detector and render pre-processor.
std::unique_ptr<AudioProcessing> apm(
- AudioProcessingBuilder()
+ AudioProcessingBuilderForTesting()
.SetEchoDetector(test_echo_detector)
.SetRenderPreProcessing(std::move(test_render_pre_processor))
.Create());
diff --git a/modules/audio_processing/audio_processing_performance_unittest.cc b/modules/audio_processing/audio_processing_performance_unittest.cc
index 2ed6f174af..206812b0cb 100644
--- a/modules/audio_processing/audio_processing_performance_unittest.cc
+++ b/modules/audio_processing/audio_processing_performance_unittest.cc
@@ -15,6 +15,7 @@
#include "api/array_view.h"
#include "modules/audio_processing/audio_processing_impl.h"
+#include "modules/audio_processing/test/audio_processing_builder_for_testing.h"
#include "modules/audio_processing/test/test_utils.h"
#include "rtc_base/atomic_ops.h"
#include "rtc_base/event.h"
@@ -486,28 +487,28 @@ class CallSimulator : public ::testing::TestWithParam<SimulationConfig> {
int num_capture_channels = 1;
switch (simulation_config_.simulation_settings) {
case SettingsType::kDefaultApmMobile: {
- apm_.reset(AudioProcessingBuilder().Create());
+ apm_.reset(AudioProcessingBuilderForTesting().Create());
ASSERT_TRUE(!!apm_);
set_default_mobile_apm_runtime_settings(apm_.get());
break;
}
case SettingsType::kDefaultApmDesktop: {
Config config;
- apm_.reset(AudioProcessingBuilder().Create(config));
+ apm_.reset(AudioProcessingBuilderForTesting().Create(config));
ASSERT_TRUE(!!apm_);
set_default_desktop_apm_runtime_settings(apm_.get());
apm_->SetExtraOptions(config);
break;
}
case SettingsType::kAllSubmodulesTurnedOff: {
- apm_.reset(AudioProcessingBuilder().Create());
+ apm_.reset(AudioProcessingBuilderForTesting().Create());
ASSERT_TRUE(!!apm_);
turn_off_default_apm_runtime_settings(apm_.get());
break;
}
case SettingsType::kDefaultApmDesktopWithoutDelayAgnostic: {
Config config;
- apm_.reset(AudioProcessingBuilder().Create(config));
+ apm_.reset(AudioProcessingBuilderForTesting().Create(config));
ASSERT_TRUE(!!apm_);
set_default_desktop_apm_runtime_settings(apm_.get());
apm_->SetExtraOptions(config);
@@ -515,7 +516,7 @@ class CallSimulator : public ::testing::TestWithParam<SimulationConfig> {
}
case SettingsType::kDefaultApmDesktopWithoutExtendedFilter: {
Config config;
- apm_.reset(AudioProcessingBuilder().Create(config));
+ apm_.reset(AudioProcessingBuilderForTesting().Create(config));
ASSERT_TRUE(!!apm_);
set_default_desktop_apm_runtime_settings(apm_.get());
apm_->SetExtraOptions(config);
diff --git a/modules/audio_processing/audio_processing_unittest.cc b/modules/audio_processing/audio_processing_unittest.cc
index cdca7c3524..90413a84be 100644
--- a/modules/audio_processing/audio_processing_unittest.cc
+++ b/modules/audio_processing/audio_processing_unittest.cc
@@ -28,6 +28,7 @@
#include "modules/audio_processing/audio_processing_impl.h"
#include "modules/audio_processing/common.h"
#include "modules/audio_processing/include/mock_audio_processing.h"
+#include "modules/audio_processing/test/audio_processing_builder_for_testing.h"
#include "modules/audio_processing/test/protobuf_utils.h"
#include "modules/audio_processing/test/test_utils.h"
#include "rtc_base/arraysize.h"
@@ -426,7 +427,7 @@ ApmTest::ApmTest()
far_file_(NULL),
near_file_(NULL),
out_file_(NULL) {
- apm_.reset(AudioProcessingBuilder().Create());
+ apm_.reset(AudioProcessingBuilderForTesting().Create());
AudioProcessing::Config apm_config = apm_->GetConfig();
apm_config.gain_controller1.analog_gain_controller.enabled = false;
apm_config.pipeline.maximum_internal_processing_rate = 48000;
@@ -1176,7 +1177,7 @@ TEST_F(ApmTest, NoProcessingWhenAllComponentsDisabledFloat) {
auto src_channels = &src[0];
auto dest_channels = &dest[0];
- apm_.reset(AudioProcessingBuilder().Create());
+ apm_.reset(AudioProcessingBuilderForTesting().Create());
EXPECT_NOERR(apm_->ProcessStream(&src_channels, StreamConfig(sample_rate, 1),
StreamConfig(sample_rate, 1),
&dest_channels));
@@ -1637,7 +1638,7 @@ TEST_F(ApmTest, Process) {
if (test->num_input_channels() != test->num_output_channels())
continue;
- apm_.reset(AudioProcessingBuilder().Create());
+ apm_.reset(AudioProcessingBuilderForTesting().Create());
AudioProcessing::Config apm_config = apm_->GetConfig();
apm_config.gain_controller1.analog_gain_controller.enabled = false;
apm_->ApplyConfig(apm_config);
@@ -1806,7 +1807,8 @@ TEST_F(ApmTest, NoErrorsWithKeyboardChannel) {
{AudioProcessing::kStereoAndKeyboard, AudioProcessing::kStereo},
};
- std::unique_ptr<AudioProcessing> ap(AudioProcessingBuilder().Create());
+ std::unique_ptr<AudioProcessing> ap(
+ AudioProcessingBuilderForTesting().Create());
// Enable one component just to ensure some processing takes place.
AudioProcessing::Config config;
config.noise_suppression.enabled = true;
@@ -1932,7 +1934,8 @@ class AudioProcessingTest
size_t num_reverse_input_channels,
size_t num_reverse_output_channels,
const std::string& output_file_prefix) {
- std::unique_ptr<AudioProcessing> ap(AudioProcessingBuilder().Create());
+ std::unique_ptr<AudioProcessing> ap(
+ AudioProcessingBuilderForTesting().Create());
AudioProcessing::Config apm_config = ap->GetConfig();
apm_config.gain_controller1.analog_gain_controller.enabled = false;
ap->ApplyConfig(apm_config);
@@ -2316,7 +2319,8 @@ void RunApmRateAndChannelTest(
rtc::ArrayView<const int> sample_rates_hz,
rtc::ArrayView<const int> render_channel_counts,
rtc::ArrayView<const int> capture_channel_counts) {
- std::unique_ptr<AudioProcessing> apm(AudioProcessingBuilder().Create());
+ std::unique_ptr<AudioProcessing> apm(
+ AudioProcessingBuilderForTesting().Create());
webrtc::AudioProcessing::Config apm_config;
apm_config.echo_canceller.enabled = true;
apm->ApplyConfig(apm_config);
@@ -2455,7 +2459,7 @@ TEST(ApmConfiguration, EnablePostProcessing) {
auto mock_post_processor =
std::unique_ptr<CustomProcessing>(mock_post_processor_ptr);
rtc::scoped_refptr<AudioProcessing> apm =
- AudioProcessingBuilder()
+ AudioProcessingBuilderForTesting()
.SetCapturePostProcessing(std::move(mock_post_processor))
.Create();
@@ -2477,7 +2481,7 @@ TEST(ApmConfiguration, EnablePreProcessing) {
auto mock_pre_processor =
std::unique_ptr<CustomProcessing>(mock_pre_processor_ptr);
rtc::scoped_refptr<AudioProcessing> apm =
- AudioProcessingBuilder()
+ AudioProcessingBuilderForTesting()
.SetRenderPreProcessing(std::move(mock_pre_processor))
.Create();
@@ -2499,7 +2503,7 @@ TEST(ApmConfiguration, EnableCaptureAnalyzer) {
auto mock_capture_analyzer =
std::unique_ptr<CustomAudioAnalyzer>(mock_capture_analyzer_ptr);
rtc::scoped_refptr<AudioProcessing> apm =
- AudioProcessingBuilder()
+ AudioProcessingBuilderForTesting()
.SetCaptureAnalyzer(std::move(mock_capture_analyzer))
.Create();
@@ -2520,7 +2524,7 @@ TEST(ApmConfiguration, PreProcessingReceivesRuntimeSettings) {
auto mock_pre_processor =
std::unique_ptr<CustomProcessing>(mock_pre_processor_ptr);
rtc::scoped_refptr<AudioProcessing> apm =
- AudioProcessingBuilder()
+ AudioProcessingBuilderForTesting()
.SetRenderPreProcessing(std::move(mock_pre_processor))
.Create();
apm->SetRuntimeSetting(
@@ -2565,7 +2569,7 @@ TEST(ApmConfiguration, EchoControlInjection) {
new MyEchoControlFactory());
rtc::scoped_refptr<AudioProcessing> apm =
- AudioProcessingBuilder()
+ AudioProcessingBuilderForTesting()
.SetEchoControlFactory(std::move(echo_control_factory))
.Create(webrtc_config);
@@ -2589,7 +2593,7 @@ TEST(ApmConfiguration, EchoControlInjection) {
std::unique_ptr<AudioProcessing> CreateApm(bool mobile_aec) {
Config old_config;
std::unique_ptr<AudioProcessing> apm(
- AudioProcessingBuilder().Create(old_config));
+ AudioProcessingBuilderForTesting().Create(old_config));
if (!apm) {
return apm;
}
@@ -2740,7 +2744,8 @@ TEST(ApmStatistics, ReportOutputRmsDbfs) {
ptr[i] = 10000 * ((i % 3) - 1);
}
- std::unique_ptr<AudioProcessing> apm(AudioProcessingBuilder().Create());
+ std::unique_ptr<AudioProcessing> apm(
+ AudioProcessingBuilderForTesting().Create());
apm->Initialize(processing_config);
// If not enabled, no metric should be reported.
@@ -2793,7 +2798,8 @@ TEST(ApmStatistics, ReportHasVoice) {
ptr[i] = 10000 * ((i % 3) - 1);
}
- std::unique_ptr<AudioProcessing> apm(AudioProcessingBuilder().Create());
+ std::unique_ptr<AudioProcessing> apm(
+ AudioProcessingBuilderForTesting().Create());
apm->Initialize(processing_config);
// If not enabled, no metric should be reported.
diff --git a/modules/audio_processing/include/audio_processing.h b/modules/audio_processing/include/audio_processing.h
index 7daac86ee3..9c2b09f2f4 100644
--- a/modules/audio_processing/include/audio_processing.h
+++ b/modules/audio_processing/include/audio_processing.h
@@ -685,19 +685,34 @@ class RTC_EXPORT AudioProcessingBuilder {
~AudioProcessingBuilder();
// The AudioProcessingBuilder takes ownership of the echo_control_factory.
AudioProcessingBuilder& SetEchoControlFactory(
- std::unique_ptr<EchoControlFactory> echo_control_factory);
+ std::unique_ptr<EchoControlFactory> echo_control_factory) {
+ echo_control_factory_ = std::move(echo_control_factory);
+ return *this;
+ }
// The AudioProcessingBuilder takes ownership of the capture_post_processing.
AudioProcessingBuilder& SetCapturePostProcessing(
- std::unique_ptr<CustomProcessing> capture_post_processing);
+ std::unique_ptr<CustomProcessing> capture_post_processing) {
+ capture_post_processing_ = std::move(capture_post_processing);
+ return *this;
+ }
// The AudioProcessingBuilder takes ownership of the render_pre_processing.
AudioProcessingBuilder& SetRenderPreProcessing(
- std::unique_ptr<CustomProcessing> render_pre_processing);
+ std::unique_ptr<CustomProcessing> render_pre_processing) {
+ render_pre_processing_ = std::move(render_pre_processing);
+ return *this;
+ }
// The AudioProcessingBuilder takes ownership of the echo_detector.
AudioProcessingBuilder& SetEchoDetector(
- rtc::scoped_refptr<EchoDetector> echo_detector);
+ rtc::scoped_refptr<EchoDetector> echo_detector) {
+ echo_detector_ = std::move(echo_detector);
+ return *this;
+ }
// The AudioProcessingBuilder takes ownership of the capture_analyzer.
AudioProcessingBuilder& SetCaptureAnalyzer(
- std::unique_ptr<CustomAudioAnalyzer> capture_analyzer);
+ std::unique_ptr<CustomAudioAnalyzer> capture_analyzer) {
+ capture_analyzer_ = std::move(capture_analyzer);
+ return *this;
+ }
// This creates an APM instance using the previously set components. Calling
// the Create function resets the AudioProcessingBuilder to its initial state.
AudioProcessing* Create();
diff --git a/modules/audio_processing/test/audio_processing_builder_for_testing.cc b/modules/audio_processing/test/audio_processing_builder_for_testing.cc
new file mode 100644
index 0000000000..26ed679d7f
--- /dev/null
+++ b/modules/audio_processing/test/audio_processing_builder_for_testing.cc
@@ -0,0 +1,68 @@
+/*
+ * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/test/audio_processing_builder_for_testing.h"
+
+#include <memory>
+#include <utility>
+
+#include "modules/audio_processing/audio_processing_impl.h"
+#include "rtc_base/ref_counted_object.h"
+
+namespace webrtc {
+
+AudioProcessingBuilderForTesting::AudioProcessingBuilderForTesting() = default;
+AudioProcessingBuilderForTesting::~AudioProcessingBuilderForTesting() = default;
+
+#ifdef WEBRTC_EXCLUDE_AUDIO_PROCESSING_MODULE
+
+AudioProcessing* AudioProcessingBuilderForTesting::Create() {
+ webrtc::Config config;
+ return Create(config);
+}
+
+AudioProcessing* AudioProcessingBuilderForTesting::Create(
+ const webrtc::Config& config) {
+ AudioProcessingImpl* apm = new rtc::RefCountedObject<AudioProcessingImpl>(
+ config, std::move(capture_post_processing_),
+ std::move(render_pre_processing_), std::move(echo_control_factory_),
+ std::move(echo_detector_), std::move(capture_analyzer_));
+ int error = apm->Initialize();
+ RTC_CHECK_EQ(error, AudioProcessing::kNoError);
+ return apm;
+}
+
+#else
+
+AudioProcessing* AudioProcessingBuilderForTesting::Create() {
+ AudioProcessingBuilder builder;
+ TransferOwnershipsToBuilder(&builder);
+ return builder.Create();
+}
+
+AudioProcessing* AudioProcessingBuilderForTesting::Create(
+ const webrtc::Config& config) {
+ AudioProcessingBuilder builder;
+ TransferOwnershipsToBuilder(&builder);
+ return builder.Create(config);
+}
+
+#endif
+
+void AudioProcessingBuilderForTesting::TransferOwnershipsToBuilder(
+ AudioProcessingBuilder* builder) {
+ builder->SetCapturePostProcessing(std::move(capture_post_processing_));
+ builder->SetRenderPreProcessing(std::move(render_pre_processing_));
+ builder->SetCaptureAnalyzer(std::move(capture_analyzer_));
+ builder->SetEchoControlFactory(std::move(echo_control_factory_));
+ builder->SetEchoDetector(std::move(echo_detector_));
+}
+
+} // namespace webrtc
diff --git a/modules/audio_processing/test/audio_processing_builder_for_testing.h b/modules/audio_processing/test/audio_processing_builder_for_testing.h
new file mode 100644
index 0000000000..a245450d45
--- /dev/null
+++ b/modules/audio_processing/test/audio_processing_builder_for_testing.h
@@ -0,0 +1,81 @@
+/*
+ * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_TEST_AUDIO_PROCESSING_BUILDER_FOR_TESTING_H_
+#define MODULES_AUDIO_PROCESSING_TEST_AUDIO_PROCESSING_BUILDER_FOR_TESTING_H_
+
+#include <list>
+#include <memory>
+#include <utility>
+#include <vector>
+
+#include "modules/audio_processing/include/audio_processing.h"
+
+namespace webrtc {
+
+// Facilitates building of AudioProcessingImp for the tests.
+class AudioProcessingBuilderForTesting {
+ public:
+ AudioProcessingBuilderForTesting();
+ ~AudioProcessingBuilderForTesting();
+ // The AudioProcessingBuilderForTesting takes ownership of the
+ // echo_control_factory.
+ AudioProcessingBuilderForTesting& SetEchoControlFactory(
+ std::unique_ptr<EchoControlFactory> echo_control_factory) {
+ echo_control_factory_ = std::move(echo_control_factory);
+ return *this;
+ }
+ // The AudioProcessingBuilderForTesting takes ownership of the
+ // capture_post_processing.
+ AudioProcessingBuilderForTesting& SetCapturePostProcessing(
+ std::unique_ptr<CustomProcessing> capture_post_processing) {
+ capture_post_processing_ = std::move(capture_post_processing);
+ return *this;
+ }
+ // The AudioProcessingBuilderForTesting takes ownership of the
+ // render_pre_processing.
+ AudioProcessingBuilderForTesting& SetRenderPreProcessing(
+ std::unique_ptr<CustomProcessing> render_pre_processing) {
+ render_pre_processing_ = std::move(render_pre_processing);
+ return *this;
+ }
+ // The AudioProcessingBuilderForTesting takes ownership of the echo_detector.
+ AudioProcessingBuilderForTesting& SetEchoDetector(
+ rtc::scoped_refptr<EchoDetector> echo_detector) {
+ echo_detector_ = std::move(echo_detector);
+ return *this;
+ }
+ // The AudioProcessingBuilderForTesting takes ownership of the
+ // capture_analyzer.
+ AudioProcessingBuilderForTesting& SetCaptureAnalyzer(
+ std::unique_ptr<CustomAudioAnalyzer> capture_analyzer) {
+ capture_analyzer_ = std::move(capture_analyzer);
+ return *this;
+ }
+ // This creates an APM instance using the previously set components. Calling
+ // the Create function resets the AudioProcessingBuilderForTesting to its
+ // initial state.
+ AudioProcessing* Create();
+ AudioProcessing* Create(const webrtc::Config& config);
+
+ private:
+ // Transfers the ownership to a non-testing builder.
+ void TransferOwnershipsToBuilder(AudioProcessingBuilder* builder);
+
+ std::unique_ptr<EchoControlFactory> echo_control_factory_;
+ std::unique_ptr<CustomProcessing> capture_post_processing_;
+ std::unique_ptr<CustomProcessing> render_pre_processing_;
+ rtc::scoped_refptr<EchoDetector> echo_detector_;
+ std::unique_ptr<CustomAudioAnalyzer> capture_analyzer_;
+};
+
+} // namespace webrtc
+
+#endif // MODULES_AUDIO_PROCESSING_TEST_AUDIO_PROCESSING_BUILDER_FOR_TESTING_H_
diff --git a/modules/audio_processing/test/debug_dump_replayer.cc b/modules/audio_processing/test/debug_dump_replayer.cc
index 26ca4290c3..b8cccd126c 100644
--- a/modules/audio_processing/test/debug_dump_replayer.cc
+++ b/modules/audio_processing/test/debug_dump_replayer.cc
@@ -10,6 +10,7 @@
#include "modules/audio_processing/test/debug_dump_replayer.h"
+#include "modules/audio_processing/test/audio_processing_builder_for_testing.h"
#include "modules/audio_processing/test/protobuf_utils.h"
#include "modules/audio_processing/test/runtime_setting_util.h"
#include "rtc_base/checks.h"
@@ -185,7 +186,7 @@ void DebugDumpReplayer::MaybeRecreateApm(const audioproc::Config& msg) {
// We only create APM once, since changes on these fields should not
// happen in current implementation.
if (!apm_.get()) {
- apm_.reset(AudioProcessingBuilder().Create(config));
+ apm_.reset(AudioProcessingBuilderForTesting().Create(config));
}
}
diff --git a/modules/audio_processing/test/debug_dump_test.cc b/modules/audio_processing/test/debug_dump_test.cc
index 71478a988c..2381d1e8b4 100644
--- a/modules/audio_processing/test/debug_dump_test.cc
+++ b/modules/audio_processing/test/debug_dump_test.cc
@@ -17,6 +17,7 @@
#include "api/audio/echo_canceller3_factory.h"
#include "modules/audio_coding/neteq/tools/resample_input_audio_file.h"
#include "modules/audio_processing/aec_dump/aec_dump_factory.h"
+#include "modules/audio_processing/test/audio_processing_builder_for_testing.h"
#include "modules/audio_processing/test/debug_dump_replayer.h"
#include "modules/audio_processing/test/test_utils.h"
#include "rtc_base/task_queue_for_test.h"
@@ -141,7 +142,7 @@ DebugDumpGenerator::DebugDumpGenerator(const std::string& input_file_name,
enable_pre_amplifier_(enable_pre_amplifier),
worker_queue_("debug_dump_generator_worker_queue"),
dump_file_name_(dump_file_name) {
- AudioProcessingBuilder apm_builder;
+ AudioProcessingBuilderForTesting apm_builder;
apm_.reset(apm_builder.Create(config));
}
diff --git a/pc/BUILD.gn b/pc/BUILD.gn
index 576685c29c..a48a0469d9 100644
--- a/pc/BUILD.gn
+++ b/pc/BUILD.gn
@@ -587,6 +587,7 @@ if (rtc_include_tests) {
"../media:rtc_media_engine_defaults",
"../modules/audio_device:audio_device_api",
"../modules/audio_processing:audio_processing_statistics",
+ "../modules/audio_processing:audioproc_test_utils",
"../modules/rtp_rtcp:rtp_rtcp_format",
"../p2p:fake_ice_transport",
"../p2p:fake_port_allocator",
diff --git a/pc/peer_connection_integrationtest.cc b/pc/peer_connection_integrationtest.cc
index 1575572ce8..f3b4f28360 100644
--- a/pc/peer_connection_integrationtest.cc
+++ b/pc/peer_connection_integrationtest.cc
@@ -36,6 +36,7 @@
#include "media/engine/fake_webrtc_video_engine.h"
#include "media/engine/webrtc_media_engine.h"
#include "media/engine/webrtc_media_engine_defaults.h"
+#include "modules/audio_processing/test/audio_processing_builder_for_testing.h"
#include "p2p/base/fake_ice_transport.h"
#include "p2p/base/mock_async_resolver.h"
#include "p2p/base/p2p_constants.h"
@@ -648,6 +649,12 @@ class PeerConnectionWrapper : public webrtc::PeerConnectionObserver,
media_deps.video_decoder_factory.reset();
}
+ if (!media_deps.audio_processing) {
+ // If the standard Creation method for APM returns a null pointer, instead
+ // use the builder for testing to create an APM object.
+ media_deps.audio_processing = AudioProcessingBuilderForTesting().Create();
+ }
+
pc_factory_dependencies.media_engine =
cricket::CreateMediaEngine(std::move(media_deps));
pc_factory_dependencies.call_factory = webrtc::CreateCallFactory();
diff --git a/test/fuzzers/BUILD.gn b/test/fuzzers/BUILD.gn
index f52797d67f..a7aa058ecb 100644
--- a/test/fuzzers/BUILD.gn
+++ b/test/fuzzers/BUILD.gn
@@ -453,6 +453,7 @@ webrtc_fuzzer_test("audio_processing_fuzzer") {
"../../modules/audio_processing",
"../../modules/audio_processing:api",
"../../modules/audio_processing:audio_buffer",
+ "../../modules/audio_processing:audioproc_test_utils",
"../../modules/audio_processing/aec3",
"../../modules/audio_processing/aec_dump",
"../../modules/audio_processing/aec_dump:aec_dump_impl",
diff --git a/test/fuzzers/audio_processing_configs_fuzzer.cc b/test/fuzzers/audio_processing_configs_fuzzer.cc
index 8fe9ad1c55..d511b7bc0e 100644
--- a/test/fuzzers/audio_processing_configs_fuzzer.cc
+++ b/test/fuzzers/audio_processing_configs_fuzzer.cc
@@ -16,6 +16,7 @@
#include "api/task_queue/default_task_queue_factory.h"
#include "modules/audio_processing/aec_dump/aec_dump_factory.h"
#include "modules/audio_processing/include/audio_processing.h"
+#include "modules/audio_processing/test/audio_processing_builder_for_testing.h"
#include "rtc_base/arraysize.h"
#include "rtc_base/numerics/safe_minmax.h"
#include "rtc_base/task_queue.h"
@@ -108,7 +109,7 @@ std::unique_ptr<AudioProcessing> CreateApm(test::FuzzDataHelper* fuzz_data,
config.Set<ExperimentalNs>(new ExperimentalNs(exp_ns));
std::unique_ptr<AudioProcessing> apm(
- AudioProcessingBuilder()
+ AudioProcessingBuilderForTesting()
.SetEchoControlFactory(std::move(echo_control_factory))
.Create(config));
diff --git a/test/pc/e2e/test_peer.h b/test/pc/e2e/test_peer.h
index cd6435ca40..ae664f4810 100644
--- a/test/pc/e2e/test_peer.h
+++ b/test/pc/e2e/test_peer.h
@@ -33,7 +33,11 @@ class TestPeer final : public PeerConnectionWrapper {
return std::move(video_generators_[i]);
}
- void DetachAecDump() { audio_processing_->DetachAecDump(); }
+ void DetachAecDump() {
+ if (audio_processing_) {
+ audio_processing_->DetachAecDump();
+ }
+ }
// Adds provided |candidates| to the owned peer connection.
bool AddIceCandidates(
diff --git a/test/pc/e2e/test_peer_factory.cc b/test/pc/e2e/test_peer_factory.cc
index 84045091ce..4fc6578894 100644
--- a/test/pc/e2e/test_peer_factory.cc
+++ b/test/pc/e2e/test_peer_factory.cc
@@ -290,7 +290,7 @@ std::unique_ptr<TestPeer> TestPeerFactory::CreateTestPeer(
// Create peer connection factory.
rtc::scoped_refptr<AudioProcessing> audio_processing =
webrtc::AudioProcessingBuilder().Create();
- if (params->aec_dump_path) {
+ if (params->aec_dump_path && audio_processing) {
audio_processing->AttachAecDump(
AecDumpFactory::Create(*params->aec_dump_path, -1, task_queue));
}
diff --git a/webrtc.gni b/webrtc.gni
index dcd34007d2..9750d4d507 100644
--- a/webrtc.gni
+++ b/webrtc.gni
@@ -96,6 +96,9 @@ declare_args() {
# should be generated.
apm_debug_dump = false
+ # Selects whether the audio processing module should be excluded.
+ rtc_exclude_audio_processing_module = false
+
# Set this to true to enable BWE test logging.
rtc_enable_bwe_test_logging = false