aboutsummaryrefslogtreecommitdiff
path: root/audio
diff options
context:
space:
mode:
authorPer Åhgren <peah@webrtc.org>2020-04-26 23:56:17 +0200
committerCommit Bot <commit-bot@chromium.org>2020-04-26 23:06:44 +0000
commitcc73ed3e70d1968f8e2bc365880d8273028e14a6 (patch)
treee26c3f0be0eeac5eadd10d8e65499729d1864959 /audio
parent86bd33a1e77efc85383c61589dacd4310fdcb50e (diff)
downloadwebrtc-cc73ed3e70d1968f8e2bc365880d8273028e14a6.tar.gz
APM: Add build flag to allow building WebRTC without APM
This CL adds a build flag to allow building the non-test parts of WebRTC without the audio processing module. The CL also ensures that the WebRTC code correctly handles the case when no APM is available. Bug: webrtc:5298 Change-Id: I5c8b5d1f7115e5cce2af4c2b5ff701fa1c54e49e Reviewed-on: https://webrtc-review.googlesource.com/c/src/+/171509 Commit-Queue: Per Åhgren <peah@webrtc.org> Reviewed-by: Sam Zackrisson <saza@webrtc.org> Reviewed-by: Karl Wiberg <kwiberg@webrtc.org> Reviewed-by: Mirko Bonadei <mbonadei@webrtc.org> Cr-Commit-Position: refs/heads/master@{#31133}
Diffstat (limited to 'audio')
-rw-r--r--audio/audio_receive_stream_unittest.cc334
-rw-r--r--audio/audio_send_stream.cc8
-rw-r--r--audio/audio_send_stream_unittest.cc718
-rw-r--r--audio/audio_state.cc1
-rw-r--r--audio/audio_state_unittest.cc299
-rw-r--r--audio/audio_transport_impl.cc23
6 files changed, 753 insertions, 630 deletions
diff --git a/audio/audio_receive_stream_unittest.cc b/audio/audio_receive_stream_unittest.cc
index 186eb1c67d..7759dd1e72 100644
--- a/audio/audio_receive_stream_unittest.cc
+++ b/audio/audio_receive_stream_unittest.cc
@@ -75,15 +75,21 @@ const NetworkStatistics kNetworkStats = {
const AudioDecodingCallStats kAudioDecodeStats = MakeAudioDecodeStatsForTest();
struct ConfigHelper {
- ConfigHelper() : ConfigHelper(new rtc::RefCountedObject<MockAudioMixer>()) {}
+ explicit ConfigHelper(bool use_null_audio_processing)
+ : ConfigHelper(new rtc::RefCountedObject<MockAudioMixer>(),
+ use_null_audio_processing) {}
- explicit ConfigHelper(rtc::scoped_refptr<MockAudioMixer> audio_mixer)
+ ConfigHelper(rtc::scoped_refptr<MockAudioMixer> audio_mixer,
+ bool use_null_audio_processing)
: audio_mixer_(audio_mixer) {
using ::testing::Invoke;
AudioState::Config config;
config.audio_mixer = audio_mixer_;
- config.audio_processing = new rtc::RefCountedObject<MockAudioProcessing>();
+ config.audio_processing =
+ use_null_audio_processing
+ ? nullptr
+ : new rtc::RefCountedObject<MockAudioProcessing>();
config.audio_device_module =
new rtc::RefCountedObject<testing::NiceMock<MockAudioDeviceModule>>();
audio_state_ = AudioState::Create(config);
@@ -230,182 +236,200 @@ TEST(AudioReceiveStreamTest, ConfigToString) {
}
TEST(AudioReceiveStreamTest, ConstructDestruct) {
- ConfigHelper helper;
- auto recv_stream = helper.CreateAudioReceiveStream();
+ for (bool use_null_audio_processing : {false, true}) {
+ ConfigHelper helper(use_null_audio_processing);
+ auto recv_stream = helper.CreateAudioReceiveStream();
+ }
}
TEST(AudioReceiveStreamTest, ReceiveRtpPacket) {
- ConfigHelper helper;
- helper.config().rtp.transport_cc = true;
- auto recv_stream = helper.CreateAudioReceiveStream();
- const int kTransportSequenceNumberValue = 1234;
- std::vector<uint8_t> rtp_packet = CreateRtpHeaderWithOneByteExtension(
- kTransportSequenceNumberId, kTransportSequenceNumberValue, 2);
- constexpr int64_t packet_time_us = 5678000;
-
- RtpPacketReceived parsed_packet;
- ASSERT_TRUE(parsed_packet.Parse(&rtp_packet[0], rtp_packet.size()));
- parsed_packet.set_arrival_time_ms((packet_time_us + 500) / 1000);
-
- EXPECT_CALL(*helper.channel_receive(),
- OnRtpPacket(::testing::Ref(parsed_packet)));
-
- recv_stream->OnRtpPacket(parsed_packet);
+ for (bool use_null_audio_processing : {false, true}) {
+ ConfigHelper helper(use_null_audio_processing);
+ helper.config().rtp.transport_cc = true;
+ auto recv_stream = helper.CreateAudioReceiveStream();
+ const int kTransportSequenceNumberValue = 1234;
+ std::vector<uint8_t> rtp_packet = CreateRtpHeaderWithOneByteExtension(
+ kTransportSequenceNumberId, kTransportSequenceNumberValue, 2);
+ constexpr int64_t packet_time_us = 5678000;
+
+ RtpPacketReceived parsed_packet;
+ ASSERT_TRUE(parsed_packet.Parse(&rtp_packet[0], rtp_packet.size()));
+ parsed_packet.set_arrival_time_ms((packet_time_us + 500) / 1000);
+
+ EXPECT_CALL(*helper.channel_receive(),
+ OnRtpPacket(::testing::Ref(parsed_packet)));
+
+ recv_stream->OnRtpPacket(parsed_packet);
+ }
}
TEST(AudioReceiveStreamTest, ReceiveRtcpPacket) {
- ConfigHelper helper;
- helper.config().rtp.transport_cc = true;
- auto recv_stream = helper.CreateAudioReceiveStream();
- std::vector<uint8_t> rtcp_packet = CreateRtcpSenderReport();
- EXPECT_CALL(*helper.channel_receive(),
- ReceivedRTCPPacket(&rtcp_packet[0], rtcp_packet.size()))
- .WillOnce(Return());
- recv_stream->DeliverRtcp(&rtcp_packet[0], rtcp_packet.size());
+ for (bool use_null_audio_processing : {false, true}) {
+ ConfigHelper helper(use_null_audio_processing);
+ helper.config().rtp.transport_cc = true;
+ auto recv_stream = helper.CreateAudioReceiveStream();
+ std::vector<uint8_t> rtcp_packet = CreateRtcpSenderReport();
+ EXPECT_CALL(*helper.channel_receive(),
+ ReceivedRTCPPacket(&rtcp_packet[0], rtcp_packet.size()))
+ .WillOnce(Return());
+ recv_stream->DeliverRtcp(&rtcp_packet[0], rtcp_packet.size());
+ }
}
TEST(AudioReceiveStreamTest, GetStats) {
- ConfigHelper helper;
- auto recv_stream = helper.CreateAudioReceiveStream();
- helper.SetupMockForGetStats();
- AudioReceiveStream::Stats stats = recv_stream->GetStats();
- EXPECT_EQ(kRemoteSsrc, stats.remote_ssrc);
- EXPECT_EQ(kCallStats.payload_bytes_rcvd, stats.payload_bytes_rcvd);
- EXPECT_EQ(kCallStats.header_and_padding_bytes_rcvd,
- stats.header_and_padding_bytes_rcvd);
- EXPECT_EQ(static_cast<uint32_t>(kCallStats.packetsReceived),
- stats.packets_rcvd);
- EXPECT_EQ(kCallStats.cumulativeLost, stats.packets_lost);
- EXPECT_EQ(kReceiveCodec.second.name, stats.codec_name);
- EXPECT_EQ(
- kCallStats.jitterSamples / (kReceiveCodec.second.clockrate_hz / 1000),
- stats.jitter_ms);
- EXPECT_EQ(kNetworkStats.currentBufferSize, stats.jitter_buffer_ms);
- EXPECT_EQ(kNetworkStats.preferredBufferSize,
- stats.jitter_buffer_preferred_ms);
- EXPECT_EQ(static_cast<uint32_t>(kJitterBufferDelay + kPlayoutBufferDelay),
- stats.delay_estimate_ms);
- EXPECT_EQ(static_cast<int32_t>(kSpeechOutputLevel), stats.audio_level);
- EXPECT_EQ(kTotalOutputEnergy, stats.total_output_energy);
- EXPECT_EQ(kNetworkStats.totalSamplesReceived, stats.total_samples_received);
- EXPECT_EQ(kTotalOutputDuration, stats.total_output_duration);
- EXPECT_EQ(kNetworkStats.concealedSamples, stats.concealed_samples);
- EXPECT_EQ(kNetworkStats.concealmentEvents, stats.concealment_events);
- EXPECT_EQ(static_cast<double>(kNetworkStats.jitterBufferDelayMs) /
- static_cast<double>(rtc::kNumMillisecsPerSec),
- stats.jitter_buffer_delay_seconds);
- EXPECT_EQ(kNetworkStats.jitterBufferEmittedCount,
- stats.jitter_buffer_emitted_count);
- EXPECT_EQ(static_cast<double>(kNetworkStats.jitterBufferTargetDelayMs) /
- static_cast<double>(rtc::kNumMillisecsPerSec),
- stats.jitter_buffer_target_delay_seconds);
- EXPECT_EQ(Q14ToFloat(kNetworkStats.currentExpandRate), stats.expand_rate);
- EXPECT_EQ(Q14ToFloat(kNetworkStats.currentSpeechExpandRate),
- stats.speech_expand_rate);
- EXPECT_EQ(Q14ToFloat(kNetworkStats.currentSecondaryDecodedRate),
- stats.secondary_decoded_rate);
- EXPECT_EQ(Q14ToFloat(kNetworkStats.currentSecondaryDiscardedRate),
- stats.secondary_discarded_rate);
- EXPECT_EQ(Q14ToFloat(kNetworkStats.currentAccelerateRate),
- stats.accelerate_rate);
- EXPECT_EQ(Q14ToFloat(kNetworkStats.currentPreemptiveRate),
- stats.preemptive_expand_rate);
- EXPECT_EQ(kAudioDecodeStats.calls_to_silence_generator,
- stats.decoding_calls_to_silence_generator);
- EXPECT_EQ(kAudioDecodeStats.calls_to_neteq, stats.decoding_calls_to_neteq);
- EXPECT_EQ(kAudioDecodeStats.decoded_normal, stats.decoding_normal);
- EXPECT_EQ(kAudioDecodeStats.decoded_neteq_plc, stats.decoding_plc);
- EXPECT_EQ(kAudioDecodeStats.decoded_codec_plc, stats.decoding_codec_plc);
- EXPECT_EQ(kAudioDecodeStats.decoded_cng, stats.decoding_cng);
- EXPECT_EQ(kAudioDecodeStats.decoded_plc_cng, stats.decoding_plc_cng);
- EXPECT_EQ(kAudioDecodeStats.decoded_muted_output,
- stats.decoding_muted_output);
- EXPECT_EQ(kCallStats.capture_start_ntp_time_ms_,
- stats.capture_start_ntp_time_ms);
- EXPECT_EQ(kPlayoutNtpTimestampMs, stats.estimated_playout_ntp_timestamp_ms);
+ for (bool use_null_audio_processing : {false, true}) {
+ ConfigHelper helper(use_null_audio_processing);
+ auto recv_stream = helper.CreateAudioReceiveStream();
+ helper.SetupMockForGetStats();
+ AudioReceiveStream::Stats stats = recv_stream->GetStats();
+ EXPECT_EQ(kRemoteSsrc, stats.remote_ssrc);
+ EXPECT_EQ(kCallStats.payload_bytes_rcvd, stats.payload_bytes_rcvd);
+ EXPECT_EQ(kCallStats.header_and_padding_bytes_rcvd,
+ stats.header_and_padding_bytes_rcvd);
+ EXPECT_EQ(static_cast<uint32_t>(kCallStats.packetsReceived),
+ stats.packets_rcvd);
+ EXPECT_EQ(kCallStats.cumulativeLost, stats.packets_lost);
+ EXPECT_EQ(kReceiveCodec.second.name, stats.codec_name);
+ EXPECT_EQ(
+ kCallStats.jitterSamples / (kReceiveCodec.second.clockrate_hz / 1000),
+ stats.jitter_ms);
+ EXPECT_EQ(kNetworkStats.currentBufferSize, stats.jitter_buffer_ms);
+ EXPECT_EQ(kNetworkStats.preferredBufferSize,
+ stats.jitter_buffer_preferred_ms);
+ EXPECT_EQ(static_cast<uint32_t>(kJitterBufferDelay + kPlayoutBufferDelay),
+ stats.delay_estimate_ms);
+ EXPECT_EQ(static_cast<int32_t>(kSpeechOutputLevel), stats.audio_level);
+ EXPECT_EQ(kTotalOutputEnergy, stats.total_output_energy);
+ EXPECT_EQ(kNetworkStats.totalSamplesReceived, stats.total_samples_received);
+ EXPECT_EQ(kTotalOutputDuration, stats.total_output_duration);
+ EXPECT_EQ(kNetworkStats.concealedSamples, stats.concealed_samples);
+ EXPECT_EQ(kNetworkStats.concealmentEvents, stats.concealment_events);
+ EXPECT_EQ(static_cast<double>(kNetworkStats.jitterBufferDelayMs) /
+ static_cast<double>(rtc::kNumMillisecsPerSec),
+ stats.jitter_buffer_delay_seconds);
+ EXPECT_EQ(kNetworkStats.jitterBufferEmittedCount,
+ stats.jitter_buffer_emitted_count);
+ EXPECT_EQ(static_cast<double>(kNetworkStats.jitterBufferTargetDelayMs) /
+ static_cast<double>(rtc::kNumMillisecsPerSec),
+ stats.jitter_buffer_target_delay_seconds);
+ EXPECT_EQ(Q14ToFloat(kNetworkStats.currentExpandRate), stats.expand_rate);
+ EXPECT_EQ(Q14ToFloat(kNetworkStats.currentSpeechExpandRate),
+ stats.speech_expand_rate);
+ EXPECT_EQ(Q14ToFloat(kNetworkStats.currentSecondaryDecodedRate),
+ stats.secondary_decoded_rate);
+ EXPECT_EQ(Q14ToFloat(kNetworkStats.currentSecondaryDiscardedRate),
+ stats.secondary_discarded_rate);
+ EXPECT_EQ(Q14ToFloat(kNetworkStats.currentAccelerateRate),
+ stats.accelerate_rate);
+ EXPECT_EQ(Q14ToFloat(kNetworkStats.currentPreemptiveRate),
+ stats.preemptive_expand_rate);
+ EXPECT_EQ(kAudioDecodeStats.calls_to_silence_generator,
+ stats.decoding_calls_to_silence_generator);
+ EXPECT_EQ(kAudioDecodeStats.calls_to_neteq, stats.decoding_calls_to_neteq);
+ EXPECT_EQ(kAudioDecodeStats.decoded_normal, stats.decoding_normal);
+ EXPECT_EQ(kAudioDecodeStats.decoded_neteq_plc, stats.decoding_plc);
+ EXPECT_EQ(kAudioDecodeStats.decoded_codec_plc, stats.decoding_codec_plc);
+ EXPECT_EQ(kAudioDecodeStats.decoded_cng, stats.decoding_cng);
+ EXPECT_EQ(kAudioDecodeStats.decoded_plc_cng, stats.decoding_plc_cng);
+ EXPECT_EQ(kAudioDecodeStats.decoded_muted_output,
+ stats.decoding_muted_output);
+ EXPECT_EQ(kCallStats.capture_start_ntp_time_ms_,
+ stats.capture_start_ntp_time_ms);
+ EXPECT_EQ(kPlayoutNtpTimestampMs, stats.estimated_playout_ntp_timestamp_ms);
+ }
}
TEST(AudioReceiveStreamTest, SetGain) {
- ConfigHelper helper;
- auto recv_stream = helper.CreateAudioReceiveStream();
- EXPECT_CALL(*helper.channel_receive(),
- SetChannelOutputVolumeScaling(FloatEq(0.765f)));
- recv_stream->SetGain(0.765f);
+ for (bool use_null_audio_processing : {false, true}) {
+ ConfigHelper helper(use_null_audio_processing);
+ auto recv_stream = helper.CreateAudioReceiveStream();
+ EXPECT_CALL(*helper.channel_receive(),
+ SetChannelOutputVolumeScaling(FloatEq(0.765f)));
+ recv_stream->SetGain(0.765f);
+ }
}
TEST(AudioReceiveStreamTest, StreamsShouldBeAddedToMixerOnceOnStart) {
- ConfigHelper helper1;
- ConfigHelper helper2(helper1.audio_mixer());
- auto recv_stream1 = helper1.CreateAudioReceiveStream();
- auto recv_stream2 = helper2.CreateAudioReceiveStream();
-
- EXPECT_CALL(*helper1.channel_receive(), StartPlayout()).Times(1);
- EXPECT_CALL(*helper2.channel_receive(), StartPlayout()).Times(1);
- EXPECT_CALL(*helper1.channel_receive(), StopPlayout()).Times(1);
- EXPECT_CALL(*helper2.channel_receive(), StopPlayout()).Times(1);
- EXPECT_CALL(*helper1.audio_mixer(), AddSource(recv_stream1.get()))
- .WillOnce(Return(true));
- EXPECT_CALL(*helper1.audio_mixer(), AddSource(recv_stream2.get()))
- .WillOnce(Return(true));
- EXPECT_CALL(*helper1.audio_mixer(), RemoveSource(recv_stream1.get()))
- .Times(1);
- EXPECT_CALL(*helper1.audio_mixer(), RemoveSource(recv_stream2.get()))
- .Times(1);
-
- recv_stream1->Start();
- recv_stream2->Start();
-
- // One more should not result in any more mixer sources added.
- recv_stream1->Start();
-
- // Stop stream before it is being destructed.
- recv_stream2->Stop();
+ for (bool use_null_audio_processing : {false, true}) {
+ ConfigHelper helper1(use_null_audio_processing);
+ ConfigHelper helper2(helper1.audio_mixer(), use_null_audio_processing);
+ auto recv_stream1 = helper1.CreateAudioReceiveStream();
+ auto recv_stream2 = helper2.CreateAudioReceiveStream();
+
+ EXPECT_CALL(*helper1.channel_receive(), StartPlayout()).Times(1);
+ EXPECT_CALL(*helper2.channel_receive(), StartPlayout()).Times(1);
+ EXPECT_CALL(*helper1.channel_receive(), StopPlayout()).Times(1);
+ EXPECT_CALL(*helper2.channel_receive(), StopPlayout()).Times(1);
+ EXPECT_CALL(*helper1.audio_mixer(), AddSource(recv_stream1.get()))
+ .WillOnce(Return(true));
+ EXPECT_CALL(*helper1.audio_mixer(), AddSource(recv_stream2.get()))
+ .WillOnce(Return(true));
+ EXPECT_CALL(*helper1.audio_mixer(), RemoveSource(recv_stream1.get()))
+ .Times(1);
+ EXPECT_CALL(*helper1.audio_mixer(), RemoveSource(recv_stream2.get()))
+ .Times(1);
+
+ recv_stream1->Start();
+ recv_stream2->Start();
+
+ // One more should not result in any more mixer sources added.
+ recv_stream1->Start();
+
+ // Stop stream before it is being destructed.
+ recv_stream2->Stop();
+ }
}
TEST(AudioReceiveStreamTest, ReconfigureWithSameConfig) {
- ConfigHelper helper;
- auto recv_stream = helper.CreateAudioReceiveStream();
- recv_stream->Reconfigure(helper.config());
+ for (bool use_null_audio_processing : {false, true}) {
+ ConfigHelper helper(use_null_audio_processing);
+ auto recv_stream = helper.CreateAudioReceiveStream();
+ recv_stream->Reconfigure(helper.config());
+ }
}
TEST(AudioReceiveStreamTest, ReconfigureWithUpdatedConfig) {
- ConfigHelper helper;
- auto recv_stream = helper.CreateAudioReceiveStream();
-
- auto new_config = helper.config();
- new_config.rtp.nack.rtp_history_ms = 300 + 20;
- new_config.rtp.extensions.clear();
- new_config.rtp.extensions.push_back(
- RtpExtension(RtpExtension::kAudioLevelUri, kAudioLevelId + 1));
- new_config.rtp.extensions.push_back(
- RtpExtension(RtpExtension::kTransportSequenceNumberUri,
- kTransportSequenceNumberId + 1));
- new_config.decoder_map.emplace(1, SdpAudioFormat("foo", 8000, 1));
-
- MockChannelReceive& channel_receive = *helper.channel_receive();
- EXPECT_CALL(channel_receive, SetNACKStatus(true, 15 + 1)).Times(1);
- EXPECT_CALL(channel_receive, SetReceiveCodecs(new_config.decoder_map));
-
- recv_stream->Reconfigure(new_config);
+ for (bool use_null_audio_processing : {false, true}) {
+ ConfigHelper helper(use_null_audio_processing);
+ auto recv_stream = helper.CreateAudioReceiveStream();
+
+ auto new_config = helper.config();
+ new_config.rtp.nack.rtp_history_ms = 300 + 20;
+ new_config.rtp.extensions.clear();
+ new_config.rtp.extensions.push_back(
+ RtpExtension(RtpExtension::kAudioLevelUri, kAudioLevelId + 1));
+ new_config.rtp.extensions.push_back(
+ RtpExtension(RtpExtension::kTransportSequenceNumberUri,
+ kTransportSequenceNumberId + 1));
+ new_config.decoder_map.emplace(1, SdpAudioFormat("foo", 8000, 1));
+
+ MockChannelReceive& channel_receive = *helper.channel_receive();
+ EXPECT_CALL(channel_receive, SetNACKStatus(true, 15 + 1)).Times(1);
+ EXPECT_CALL(channel_receive, SetReceiveCodecs(new_config.decoder_map));
+
+ recv_stream->Reconfigure(new_config);
+ }
}
TEST(AudioReceiveStreamTest, ReconfigureWithFrameDecryptor) {
- ConfigHelper helper;
- auto recv_stream = helper.CreateAudioReceiveStream();
-
- auto new_config_0 = helper.config();
- rtc::scoped_refptr<FrameDecryptorInterface> mock_frame_decryptor_0(
- new rtc::RefCountedObject<MockFrameDecryptor>());
- new_config_0.frame_decryptor = mock_frame_decryptor_0;
-
- recv_stream->Reconfigure(new_config_0);
-
- auto new_config_1 = helper.config();
- rtc::scoped_refptr<FrameDecryptorInterface> mock_frame_decryptor_1(
- new rtc::RefCountedObject<MockFrameDecryptor>());
- new_config_1.frame_decryptor = mock_frame_decryptor_1;
- new_config_1.crypto_options.sframe.require_frame_encryption = true;
- recv_stream->Reconfigure(new_config_1);
+ for (bool use_null_audio_processing : {false, true}) {
+ ConfigHelper helper(use_null_audio_processing);
+ auto recv_stream = helper.CreateAudioReceiveStream();
+
+ auto new_config_0 = helper.config();
+ rtc::scoped_refptr<FrameDecryptorInterface> mock_frame_decryptor_0(
+ new rtc::RefCountedObject<MockFrameDecryptor>());
+ new_config_0.frame_decryptor = mock_frame_decryptor_0;
+
+ recv_stream->Reconfigure(new_config_0);
+
+ auto new_config_1 = helper.config();
+ rtc::scoped_refptr<FrameDecryptorInterface> mock_frame_decryptor_1(
+ new rtc::RefCountedObject<MockFrameDecryptor>());
+ new_config_1.frame_decryptor = mock_frame_decryptor_1;
+ new_config_1.crypto_options.sframe.require_frame_encryption = true;
+ recv_stream->Reconfigure(new_config_1);
+ }
}
} // namespace test
diff --git a/audio/audio_send_stream.cc b/audio/audio_send_stream.cc
index acdc73b9a9..a44b55f95f 100644
--- a/audio/audio_send_stream.cc
+++ b/audio/audio_send_stream.cc
@@ -490,9 +490,11 @@ webrtc::AudioSendStream::Stats AudioSendStream::GetStats(
stats.typing_noise_detected = audio_state()->typing_noise_detected();
stats.ana_statistics = channel_send_->GetANAStatistics();
- RTC_DCHECK(audio_state_->audio_processing());
- stats.apm_statistics =
- audio_state_->audio_processing()->GetStatistics(has_remote_tracks);
+
+ AudioProcessing* ap = audio_state_->audio_processing();
+ if (ap) {
+ stats.apm_statistics = ap->GetStatistics(has_remote_tracks);
+ }
stats.report_block_datas = std::move(call_stats.report_block_datas);
diff --git a/audio/audio_send_stream_unittest.cc b/audio/audio_send_stream_unittest.cc
index de1f2fe007..8f33d29f70 100644
--- a/audio/audio_send_stream_unittest.cc
+++ b/audio/audio_send_stream_unittest.cc
@@ -141,11 +141,16 @@ rtc::scoped_refptr<MockAudioEncoderFactory> SetupEncoderFactoryMock() {
}
struct ConfigHelper {
- ConfigHelper(bool audio_bwe_enabled, bool expect_set_encoder_call)
+ ConfigHelper(bool audio_bwe_enabled,
+ bool expect_set_encoder_call,
+ bool use_null_audio_processing)
: clock_(1000000),
task_queue_factory_(CreateDefaultTaskQueueFactory()),
stream_config_(/*send_transport=*/nullptr),
- audio_processing_(new rtc::RefCountedObject<MockAudioProcessing>()),
+ audio_processing_(
+ use_null_audio_processing
+ ? nullptr
+ : new rtc::RefCountedObject<MockAudioProcessing>()),
bitrate_allocator_(&limit_observer_),
worker_queue_(task_queue_factory_->CreateTaskQueue(
"ConfigHelper_worker_queue",
@@ -273,7 +278,7 @@ struct ConfigHelper {
.WillOnce(Return(true));
}
- void SetupMockForGetStats() {
+ void SetupMockForGetStats(bool use_null_audio_processing) {
using ::testing::DoAll;
using ::testing::SetArgPointee;
using ::testing::SetArgReferee;
@@ -305,10 +310,13 @@ struct ConfigHelper {
audio_processing_stats_.residual_echo_likelihood = kResidualEchoLikelihood;
audio_processing_stats_.residual_echo_likelihood_recent_max =
kResidualEchoLikelihoodMax;
-
- EXPECT_CALL(*audio_processing_, GetStatistics(true))
- .WillRepeatedly(Return(audio_processing_stats_));
+ if (!use_null_audio_processing) {
+ ASSERT_TRUE(audio_processing_);
+ EXPECT_CALL(*audio_processing_, GetStatistics(true))
+ .WillRepeatedly(Return(audio_processing_stats_));
+ }
}
+
TaskQueueForTest* worker() { return &worker_queue_; }
private:
@@ -381,235 +389,270 @@ TEST(AudioSendStreamTest, ConfigToString) {
}
TEST(AudioSendStreamTest, ConstructDestruct) {
- ConfigHelper helper(false, true);
- auto send_stream = helper.CreateAudioSendStream();
+ for (bool use_null_audio_processing : {false, true}) {
+ ConfigHelper helper(false, true, use_null_audio_processing);
+ auto send_stream = helper.CreateAudioSendStream();
+ }
}
TEST(AudioSendStreamTest, SendTelephoneEvent) {
- ConfigHelper helper(false, true);
- auto send_stream = helper.CreateAudioSendStream();
- helper.SetupMockForSendTelephoneEvent();
- EXPECT_TRUE(send_stream->SendTelephoneEvent(
- kTelephoneEventPayloadType, kTelephoneEventPayloadFrequency,
- kTelephoneEventCode, kTelephoneEventDuration));
+ for (bool use_null_audio_processing : {false, true}) {
+ ConfigHelper helper(false, true, use_null_audio_processing);
+ auto send_stream = helper.CreateAudioSendStream();
+ helper.SetupMockForSendTelephoneEvent();
+ EXPECT_TRUE(send_stream->SendTelephoneEvent(
+ kTelephoneEventPayloadType, kTelephoneEventPayloadFrequency,
+ kTelephoneEventCode, kTelephoneEventDuration));
+ }
}
TEST(AudioSendStreamTest, SetMuted) {
- ConfigHelper helper(false, true);
- auto send_stream = helper.CreateAudioSendStream();
- EXPECT_CALL(*helper.channel_send(), SetInputMute(true));
- send_stream->SetMuted(true);
+ for (bool use_null_audio_processing : {false, true}) {
+ ConfigHelper helper(false, true, use_null_audio_processing);
+ auto send_stream = helper.CreateAudioSendStream();
+ EXPECT_CALL(*helper.channel_send(), SetInputMute(true));
+ send_stream->SetMuted(true);
+ }
}
TEST(AudioSendStreamTest, AudioBweCorrectObjectsOnChannelProxy) {
ScopedFieldTrials field_trials("WebRTC-Audio-SendSideBwe/Enabled/");
- ConfigHelper helper(true, true);
- auto send_stream = helper.CreateAudioSendStream();
+ for (bool use_null_audio_processing : {false, true}) {
+ ConfigHelper helper(true, true, use_null_audio_processing);
+ auto send_stream = helper.CreateAudioSendStream();
+ }
}
TEST(AudioSendStreamTest, NoAudioBweCorrectObjectsOnChannelProxy) {
- ConfigHelper helper(false, true);
- auto send_stream = helper.CreateAudioSendStream();
+ for (bool use_null_audio_processing : {false, true}) {
+ ConfigHelper helper(false, true, use_null_audio_processing);
+ auto send_stream = helper.CreateAudioSendStream();
+ }
}
TEST(AudioSendStreamTest, GetStats) {
- ConfigHelper helper(false, true);
- auto send_stream = helper.CreateAudioSendStream();
- helper.SetupMockForGetStats();
- AudioSendStream::Stats stats = send_stream->GetStats(true);
- EXPECT_EQ(kSsrc, stats.local_ssrc);
- EXPECT_EQ(kCallStats.payload_bytes_sent, stats.payload_bytes_sent);
- EXPECT_EQ(kCallStats.header_and_padding_bytes_sent,
- stats.header_and_padding_bytes_sent);
- EXPECT_EQ(kCallStats.packetsSent, stats.packets_sent);
- EXPECT_EQ(kReportBlock.cumulative_num_packets_lost, stats.packets_lost);
- EXPECT_EQ(Q8ToFloat(kReportBlock.fraction_lost), stats.fraction_lost);
- EXPECT_EQ(kIsacFormat.name, stats.codec_name);
- EXPECT_EQ(static_cast<int32_t>(kReportBlock.interarrival_jitter /
- (kIsacFormat.clockrate_hz / 1000)),
- stats.jitter_ms);
- EXPECT_EQ(kCallStats.rttMs, stats.rtt_ms);
- EXPECT_EQ(0, stats.audio_level);
- EXPECT_EQ(0, stats.total_input_energy);
- EXPECT_EQ(0, stats.total_input_duration);
- EXPECT_EQ(kEchoDelayMedian, stats.apm_statistics.delay_median_ms);
- EXPECT_EQ(kEchoDelayStdDev, stats.apm_statistics.delay_standard_deviation_ms);
- EXPECT_EQ(kEchoReturnLoss, stats.apm_statistics.echo_return_loss);
- EXPECT_EQ(kEchoReturnLossEnhancement,
- stats.apm_statistics.echo_return_loss_enhancement);
- EXPECT_EQ(kDivergentFilterFraction,
- stats.apm_statistics.divergent_filter_fraction);
- EXPECT_EQ(kResidualEchoLikelihood,
- stats.apm_statistics.residual_echo_likelihood);
- EXPECT_EQ(kResidualEchoLikelihoodMax,
- stats.apm_statistics.residual_echo_likelihood_recent_max);
- EXPECT_FALSE(stats.typing_noise_detected);
+ for (bool use_null_audio_processing : {false, true}) {
+ ConfigHelper helper(false, true, use_null_audio_processing);
+ auto send_stream = helper.CreateAudioSendStream();
+ helper.SetupMockForGetStats(use_null_audio_processing);
+ AudioSendStream::Stats stats = send_stream->GetStats(true);
+ EXPECT_EQ(kSsrc, stats.local_ssrc);
+ EXPECT_EQ(kCallStats.payload_bytes_sent, stats.payload_bytes_sent);
+ EXPECT_EQ(kCallStats.header_and_padding_bytes_sent,
+ stats.header_and_padding_bytes_sent);
+ EXPECT_EQ(kCallStats.packetsSent, stats.packets_sent);
+ EXPECT_EQ(kReportBlock.cumulative_num_packets_lost, stats.packets_lost);
+ EXPECT_EQ(Q8ToFloat(kReportBlock.fraction_lost), stats.fraction_lost);
+ EXPECT_EQ(kIsacFormat.name, stats.codec_name);
+ EXPECT_EQ(static_cast<int32_t>(kReportBlock.interarrival_jitter /
+ (kIsacFormat.clockrate_hz / 1000)),
+ stats.jitter_ms);
+ EXPECT_EQ(kCallStats.rttMs, stats.rtt_ms);
+ EXPECT_EQ(0, stats.audio_level);
+ EXPECT_EQ(0, stats.total_input_energy);
+ EXPECT_EQ(0, stats.total_input_duration);
+
+ if (!use_null_audio_processing) {
+ EXPECT_EQ(kEchoDelayMedian, stats.apm_statistics.delay_median_ms);
+ EXPECT_EQ(kEchoDelayStdDev,
+ stats.apm_statistics.delay_standard_deviation_ms);
+ EXPECT_EQ(kEchoReturnLoss, stats.apm_statistics.echo_return_loss);
+ EXPECT_EQ(kEchoReturnLossEnhancement,
+ stats.apm_statistics.echo_return_loss_enhancement);
+ EXPECT_EQ(kDivergentFilterFraction,
+ stats.apm_statistics.divergent_filter_fraction);
+ EXPECT_EQ(kResidualEchoLikelihood,
+ stats.apm_statistics.residual_echo_likelihood);
+ EXPECT_EQ(kResidualEchoLikelihoodMax,
+ stats.apm_statistics.residual_echo_likelihood_recent_max);
+ EXPECT_FALSE(stats.typing_noise_detected);
+ }
+ }
}
TEST(AudioSendStreamTest, GetStatsAudioLevel) {
- ConfigHelper helper(false, true);
- auto send_stream = helper.CreateAudioSendStream();
- helper.SetupMockForGetStats();
- EXPECT_CALL(*helper.channel_send(), ProcessAndEncodeAudioForMock(_))
- .Times(AnyNumber());
-
- constexpr int kSampleRateHz = 48000;
- constexpr size_t kNumChannels = 1;
-
- constexpr int16_t kSilentAudioLevel = 0;
- constexpr int16_t kMaxAudioLevel = 32767; // Audio level is [0,32767].
- constexpr int kAudioFrameDurationMs = 10;
-
- // Process 10 audio frames (100 ms) of silence. After this, on the next
- // (11-th) frame, the audio level will be updated with the maximum audio level
- // of the first 11 frames. See AudioLevel.
- for (size_t i = 0; i < 10; ++i) {
- send_stream->SendAudioData(CreateAudioFrame1kHzSineWave(
- kSilentAudioLevel, kAudioFrameDurationMs, kSampleRateHz, kNumChannels));
- }
- AudioSendStream::Stats stats = send_stream->GetStats();
- EXPECT_EQ(kSilentAudioLevel, stats.audio_level);
- EXPECT_NEAR(0.0f, stats.total_input_energy, kTolerance);
- EXPECT_NEAR(0.1f, stats.total_input_duration, kTolerance); // 100 ms = 0.1 s
-
- // Process 10 audio frames (100 ms) of maximum audio level.
- // Note that AudioLevel updates the audio level every 11th frame, processing
- // 10 frames above was needed to see a non-zero audio level here.
- for (size_t i = 0; i < 10; ++i) {
- send_stream->SendAudioData(CreateAudioFrame1kHzSineWave(
- kMaxAudioLevel, kAudioFrameDurationMs, kSampleRateHz, kNumChannels));
- }
- stats = send_stream->GetStats();
- EXPECT_EQ(kMaxAudioLevel, stats.audio_level);
- // Energy increases by energy*duration, where energy is audio level in [0,1].
- EXPECT_NEAR(0.1f, stats.total_input_energy, kTolerance); // 0.1 s of max
- EXPECT_NEAR(0.2f, stats.total_input_duration, kTolerance); // 200 ms = 0.2 s
+ for (bool use_null_audio_processing : {false, true}) {
+ ConfigHelper helper(false, true, use_null_audio_processing);
+ auto send_stream = helper.CreateAudioSendStream();
+ helper.SetupMockForGetStats(use_null_audio_processing);
+ EXPECT_CALL(*helper.channel_send(), ProcessAndEncodeAudioForMock(_))
+ .Times(AnyNumber());
+
+ constexpr int kSampleRateHz = 48000;
+ constexpr size_t kNumChannels = 1;
+
+ constexpr int16_t kSilentAudioLevel = 0;
+ constexpr int16_t kMaxAudioLevel = 32767; // Audio level is [0,32767].
+ constexpr int kAudioFrameDurationMs = 10;
+
+ // Process 10 audio frames (100 ms) of silence. After this, on the next
+ // (11-th) frame, the audio level will be updated with the maximum audio
+ // level of the first 11 frames. See AudioLevel.
+ for (size_t i = 0; i < 10; ++i) {
+ send_stream->SendAudioData(
+ CreateAudioFrame1kHzSineWave(kSilentAudioLevel, kAudioFrameDurationMs,
+ kSampleRateHz, kNumChannels));
+ }
+ AudioSendStream::Stats stats = send_stream->GetStats();
+ EXPECT_EQ(kSilentAudioLevel, stats.audio_level);
+ EXPECT_NEAR(0.0f, stats.total_input_energy, kTolerance);
+ EXPECT_NEAR(0.1f, stats.total_input_duration,
+ kTolerance); // 100 ms = 0.1 s
+
+ // Process 10 audio frames (100 ms) of maximum audio level.
+ // Note that AudioLevel updates the audio level every 11th frame, processing
+ // 10 frames above was needed to see a non-zero audio level here.
+ for (size_t i = 0; i < 10; ++i) {
+ send_stream->SendAudioData(CreateAudioFrame1kHzSineWave(
+ kMaxAudioLevel, kAudioFrameDurationMs, kSampleRateHz, kNumChannels));
+ }
+ stats = send_stream->GetStats();
+ EXPECT_EQ(kMaxAudioLevel, stats.audio_level);
+ // Energy increases by energy*duration, where energy is audio level in
+ // [0,1].
+ EXPECT_NEAR(0.1f, stats.total_input_energy, kTolerance); // 0.1 s of max
+ EXPECT_NEAR(0.2f, stats.total_input_duration,
+ kTolerance); // 200 ms = 0.2 s
+ }
}
TEST(AudioSendStreamTest, SendCodecAppliesAudioNetworkAdaptor) {
- ConfigHelper helper(false, true);
- helper.config().send_codec_spec =
- AudioSendStream::Config::SendCodecSpec(0, kOpusFormat);
- const std::string kAnaConfigString = "abcde";
- const std::string kAnaReconfigString = "12345";
-
- helper.config().rtp.extensions.push_back(RtpExtension(
- RtpExtension::kTransportSequenceNumberUri, kTransportSequenceNumberId));
- helper.config().audio_network_adaptor_config = kAnaConfigString;
-
- EXPECT_CALL(helper.mock_encoder_factory(), MakeAudioEncoderMock(_, _, _, _))
- .WillOnce(Invoke([&kAnaConfigString, &kAnaReconfigString](
- int payload_type, const SdpAudioFormat& format,
- absl::optional<AudioCodecPairId> codec_pair_id,
- std::unique_ptr<AudioEncoder>* return_value) {
- auto mock_encoder = SetupAudioEncoderMock(payload_type, format);
- EXPECT_CALL(*mock_encoder,
- EnableAudioNetworkAdaptor(StrEq(kAnaConfigString), _))
- .WillOnce(Return(true));
- EXPECT_CALL(*mock_encoder,
- EnableAudioNetworkAdaptor(StrEq(kAnaReconfigString), _))
- .WillOnce(Return(true));
- *return_value = std::move(mock_encoder);
- }));
-
- auto send_stream = helper.CreateAudioSendStream();
-
- auto stream_config = helper.config();
- stream_config.audio_network_adaptor_config = kAnaReconfigString;
-
- send_stream->Reconfigure(stream_config);
+ for (bool use_null_audio_processing : {false, true}) {
+ ConfigHelper helper(false, true, use_null_audio_processing);
+ helper.config().send_codec_spec =
+ AudioSendStream::Config::SendCodecSpec(0, kOpusFormat);
+ const std::string kAnaConfigString = "abcde";
+ const std::string kAnaReconfigString = "12345";
+
+ helper.config().rtp.extensions.push_back(RtpExtension(
+ RtpExtension::kTransportSequenceNumberUri, kTransportSequenceNumberId));
+ helper.config().audio_network_adaptor_config = kAnaConfigString;
+
+ EXPECT_CALL(helper.mock_encoder_factory(), MakeAudioEncoderMock(_, _, _, _))
+ .WillOnce(Invoke([&kAnaConfigString, &kAnaReconfigString](
+ int payload_type, const SdpAudioFormat& format,
+ absl::optional<AudioCodecPairId> codec_pair_id,
+ std::unique_ptr<AudioEncoder>* return_value) {
+ auto mock_encoder = SetupAudioEncoderMock(payload_type, format);
+ EXPECT_CALL(*mock_encoder,
+ EnableAudioNetworkAdaptor(StrEq(kAnaConfigString), _))
+ .WillOnce(Return(true));
+ EXPECT_CALL(*mock_encoder,
+ EnableAudioNetworkAdaptor(StrEq(kAnaReconfigString), _))
+ .WillOnce(Return(true));
+ *return_value = std::move(mock_encoder);
+ }));
+
+ auto send_stream = helper.CreateAudioSendStream();
+
+ auto stream_config = helper.config();
+ stream_config.audio_network_adaptor_config = kAnaReconfigString;
+
+ send_stream->Reconfigure(stream_config);
+ }
}
// VAD is applied when codec is mono and the CNG frequency matches the codec
// clock rate.
TEST(AudioSendStreamTest, SendCodecCanApplyVad) {
- ConfigHelper helper(false, false);
- helper.config().send_codec_spec =
- AudioSendStream::Config::SendCodecSpec(9, kG722Format);
- helper.config().send_codec_spec->cng_payload_type = 105;
- using ::testing::Invoke;
- std::unique_ptr<AudioEncoder> stolen_encoder;
- EXPECT_CALL(*helper.channel_send(), SetEncoderForMock(_, _))
- .WillOnce(
- Invoke([&stolen_encoder](int payload_type,
- std::unique_ptr<AudioEncoder>* encoder) {
- stolen_encoder = std::move(*encoder);
- return true;
- }));
- EXPECT_CALL(*helper.channel_send(), RegisterCngPayloadType(105, 8000));
-
- auto send_stream = helper.CreateAudioSendStream();
-
- // We cannot truly determine if the encoder created is an AudioEncoderCng. It
- // is the only reasonable implementation that will return something from
- // ReclaimContainedEncoders, though.
- ASSERT_TRUE(stolen_encoder);
- EXPECT_FALSE(stolen_encoder->ReclaimContainedEncoders().empty());
+ for (bool use_null_audio_processing : {false, true}) {
+ ConfigHelper helper(false, false, use_null_audio_processing);
+ helper.config().send_codec_spec =
+ AudioSendStream::Config::SendCodecSpec(9, kG722Format);
+ helper.config().send_codec_spec->cng_payload_type = 105;
+ using ::testing::Invoke;
+ std::unique_ptr<AudioEncoder> stolen_encoder;
+ EXPECT_CALL(*helper.channel_send(), SetEncoderForMock(_, _))
+ .WillOnce(
+ Invoke([&stolen_encoder](int payload_type,
+ std::unique_ptr<AudioEncoder>* encoder) {
+ stolen_encoder = std::move(*encoder);
+ return true;
+ }));
+ EXPECT_CALL(*helper.channel_send(), RegisterCngPayloadType(105, 8000));
+
+ auto send_stream = helper.CreateAudioSendStream();
+
+ // We cannot truly determine if the encoder created is an AudioEncoderCng.
+ // It is the only reasonable implementation that will return something from
+ // ReclaimContainedEncoders, though.
+ ASSERT_TRUE(stolen_encoder);
+ EXPECT_FALSE(stolen_encoder->ReclaimContainedEncoders().empty());
+ }
}
TEST(AudioSendStreamTest, DoesNotPassHigherBitrateThanMaxBitrate) {
- ConfigHelper helper(false, true);
- auto send_stream = helper.CreateAudioSendStream();
- EXPECT_CALL(*helper.channel_send(),
- OnBitrateAllocation(Field(
- &BitrateAllocationUpdate::target_bitrate,
+ for (bool use_null_audio_processing : {false, true}) {
+ ConfigHelper helper(false, true, use_null_audio_processing);
+ auto send_stream = helper.CreateAudioSendStream();
+ EXPECT_CALL(
+ *helper.channel_send(),
+ OnBitrateAllocation(
+ Field(&BitrateAllocationUpdate::target_bitrate,
Eq(DataRate::BitsPerSec(helper.config().max_bitrate_bps)))));
- BitrateAllocationUpdate update;
- update.target_bitrate =
- DataRate::BitsPerSec(helper.config().max_bitrate_bps + 5000);
- update.packet_loss_ratio = 0;
- update.round_trip_time = TimeDelta::Millis(50);
- update.bwe_period = TimeDelta::Millis(6000);
- helper.worker()->SendTask([&] { send_stream->OnBitrateUpdated(update); },
- RTC_FROM_HERE);
+ BitrateAllocationUpdate update;
+ update.target_bitrate =
+ DataRate::BitsPerSec(helper.config().max_bitrate_bps + 5000);
+ update.packet_loss_ratio = 0;
+ update.round_trip_time = TimeDelta::Millis(50);
+ update.bwe_period = TimeDelta::Millis(6000);
+ helper.worker()->SendTask([&] { send_stream->OnBitrateUpdated(update); },
+ RTC_FROM_HERE);
+ }
}
TEST(AudioSendStreamTest, SSBweTargetInRangeRespected) {
ScopedFieldTrials field_trials("WebRTC-Audio-SendSideBwe/Enabled/");
- ConfigHelper helper(true, true);
- auto send_stream = helper.CreateAudioSendStream();
- EXPECT_CALL(
- *helper.channel_send(),
- OnBitrateAllocation(Field(
- &BitrateAllocationUpdate::target_bitrate,
- Eq(DataRate::BitsPerSec(helper.config().max_bitrate_bps - 5000)))));
- BitrateAllocationUpdate update;
- update.target_bitrate =
- DataRate::BitsPerSec(helper.config().max_bitrate_bps - 5000);
- helper.worker()->SendTask([&] { send_stream->OnBitrateUpdated(update); },
- RTC_FROM_HERE);
+ for (bool use_null_audio_processing : {false, true}) {
+ ConfigHelper helper(true, true, use_null_audio_processing);
+ auto send_stream = helper.CreateAudioSendStream();
+ EXPECT_CALL(
+ *helper.channel_send(),
+ OnBitrateAllocation(Field(
+ &BitrateAllocationUpdate::target_bitrate,
+ Eq(DataRate::BitsPerSec(helper.config().max_bitrate_bps - 5000)))));
+ BitrateAllocationUpdate update;
+ update.target_bitrate =
+ DataRate::BitsPerSec(helper.config().max_bitrate_bps - 5000);
+ helper.worker()->SendTask([&] { send_stream->OnBitrateUpdated(update); },
+ RTC_FROM_HERE);
+ }
}
TEST(AudioSendStreamTest, SSBweFieldTrialMinRespected) {
ScopedFieldTrials field_trials(
"WebRTC-Audio-SendSideBwe/Enabled/"
"WebRTC-Audio-Allocation/min:6kbps,max:64kbps/");
- ConfigHelper helper(true, true);
- auto send_stream = helper.CreateAudioSendStream();
- EXPECT_CALL(
- *helper.channel_send(),
- OnBitrateAllocation(Field(&BitrateAllocationUpdate::target_bitrate,
- Eq(DataRate::KilobitsPerSec(6)))));
- BitrateAllocationUpdate update;
- update.target_bitrate = DataRate::KilobitsPerSec(1);
- helper.worker()->SendTask([&] { send_stream->OnBitrateUpdated(update); },
- RTC_FROM_HERE);
+ for (bool use_null_audio_processing : {false, true}) {
+ ConfigHelper helper(true, true, use_null_audio_processing);
+ auto send_stream = helper.CreateAudioSendStream();
+ EXPECT_CALL(
+ *helper.channel_send(),
+ OnBitrateAllocation(Field(&BitrateAllocationUpdate::target_bitrate,
+ Eq(DataRate::KilobitsPerSec(6)))));
+ BitrateAllocationUpdate update;
+ update.target_bitrate = DataRate::KilobitsPerSec(1);
+ helper.worker()->SendTask([&] { send_stream->OnBitrateUpdated(update); },
+ RTC_FROM_HERE);
+ }
}
TEST(AudioSendStreamTest, SSBweFieldTrialMaxRespected) {
ScopedFieldTrials field_trials(
"WebRTC-Audio-SendSideBwe/Enabled/"
"WebRTC-Audio-Allocation/min:6kbps,max:64kbps/");
- ConfigHelper helper(true, true);
- auto send_stream = helper.CreateAudioSendStream();
- EXPECT_CALL(
- *helper.channel_send(),
- OnBitrateAllocation(Field(&BitrateAllocationUpdate::target_bitrate,
- Eq(DataRate::KilobitsPerSec(64)))));
- BitrateAllocationUpdate update;
- update.target_bitrate = DataRate::KilobitsPerSec(128);
- helper.worker()->SendTask([&] { send_stream->OnBitrateUpdated(update); },
- RTC_FROM_HERE);
+ for (bool use_null_audio_processing : {false, true}) {
+ ConfigHelper helper(true, true, use_null_audio_processing);
+ auto send_stream = helper.CreateAudioSendStream();
+ EXPECT_CALL(
+ *helper.channel_send(),
+ OnBitrateAllocation(Field(&BitrateAllocationUpdate::target_bitrate,
+ Eq(DataRate::KilobitsPerSec(64)))));
+ BitrateAllocationUpdate update;
+ update.target_bitrate = DataRate::KilobitsPerSec(128);
+ helper.worker()->SendTask([&] { send_stream->OnBitrateUpdated(update); },
+ RTC_FROM_HERE);
+ }
}
TEST(AudioSendStreamTest, SSBweWithOverhead) {
@@ -617,19 +660,22 @@ TEST(AudioSendStreamTest, SSBweWithOverhead) {
"WebRTC-Audio-SendSideBwe/Enabled/"
"WebRTC-SendSideBwe-WithOverhead/Enabled/"
"WebRTC-Audio-LegacyOverhead/Disabled/");
- ConfigHelper helper(true, true);
- auto send_stream = helper.CreateAudioSendStream();
- EXPECT_CALL(*helper.channel_send(), CallEncoder(_)).Times(1);
- send_stream->OnOverheadChanged(kOverheadPerPacket.bytes<size_t>());
- const DataRate bitrate =
- DataRate::BitsPerSec(helper.config().max_bitrate_bps) + kMaxOverheadRate;
- EXPECT_CALL(*helper.channel_send(),
- OnBitrateAllocation(Field(
- &BitrateAllocationUpdate::target_bitrate, Eq(bitrate))));
- BitrateAllocationUpdate update;
- update.target_bitrate = bitrate;
- helper.worker()->SendTask([&] { send_stream->OnBitrateUpdated(update); },
- RTC_FROM_HERE);
+ for (bool use_null_audio_processing : {false, true}) {
+ ConfigHelper helper(true, true, use_null_audio_processing);
+ auto send_stream = helper.CreateAudioSendStream();
+ EXPECT_CALL(*helper.channel_send(), CallEncoder(_)).Times(1);
+ send_stream->OnOverheadChanged(kOverheadPerPacket.bytes<size_t>());
+ const DataRate bitrate =
+ DataRate::BitsPerSec(helper.config().max_bitrate_bps) +
+ kMaxOverheadRate;
+ EXPECT_CALL(*helper.channel_send(),
+ OnBitrateAllocation(Field(
+ &BitrateAllocationUpdate::target_bitrate, Eq(bitrate))));
+ BitrateAllocationUpdate update;
+ update.target_bitrate = bitrate;
+ helper.worker()->SendTask([&] { send_stream->OnBitrateUpdated(update); },
+ RTC_FROM_HERE);
+ }
}
TEST(AudioSendStreamTest, SSBweWithOverheadMinRespected) {
@@ -638,18 +684,20 @@ TEST(AudioSendStreamTest, SSBweWithOverheadMinRespected) {
"WebRTC-SendSideBwe-WithOverhead/Enabled/"
"WebRTC-Audio-LegacyOverhead/Disabled/"
"WebRTC-Audio-Allocation/min:6kbps,max:64kbps/");
- ConfigHelper helper(true, true);
- auto send_stream = helper.CreateAudioSendStream();
- EXPECT_CALL(*helper.channel_send(), CallEncoder(_)).Times(1);
- send_stream->OnOverheadChanged(kOverheadPerPacket.bytes<size_t>());
- const DataRate bitrate = DataRate::KilobitsPerSec(6) + kMinOverheadRate;
- EXPECT_CALL(*helper.channel_send(),
- OnBitrateAllocation(Field(
- &BitrateAllocationUpdate::target_bitrate, Eq(bitrate))));
- BitrateAllocationUpdate update;
- update.target_bitrate = DataRate::KilobitsPerSec(1);
- helper.worker()->SendTask([&] { send_stream->OnBitrateUpdated(update); },
- RTC_FROM_HERE);
+ for (bool use_null_audio_processing : {false, true}) {
+ ConfigHelper helper(true, true, use_null_audio_processing);
+ auto send_stream = helper.CreateAudioSendStream();
+ EXPECT_CALL(*helper.channel_send(), CallEncoder(_)).Times(1);
+ send_stream->OnOverheadChanged(kOverheadPerPacket.bytes<size_t>());
+ const DataRate bitrate = DataRate::KilobitsPerSec(6) + kMinOverheadRate;
+ EXPECT_CALL(*helper.channel_send(),
+ OnBitrateAllocation(Field(
+ &BitrateAllocationUpdate::target_bitrate, Eq(bitrate))));
+ BitrateAllocationUpdate update;
+ update.target_bitrate = DataRate::KilobitsPerSec(1);
+ helper.worker()->SendTask([&] { send_stream->OnBitrateUpdated(update); },
+ RTC_FROM_HERE);
+ }
}
TEST(AudioSendStreamTest, SSBweWithOverheadMaxRespected) {
@@ -658,152 +706,172 @@ TEST(AudioSendStreamTest, SSBweWithOverheadMaxRespected) {
"WebRTC-SendSideBwe-WithOverhead/Enabled/"
"WebRTC-Audio-LegacyOverhead/Disabled/"
"WebRTC-Audio-Allocation/min:6kbps,max:64kbps/");
- ConfigHelper helper(true, true);
- auto send_stream = helper.CreateAudioSendStream();
- EXPECT_CALL(*helper.channel_send(), CallEncoder(_)).Times(1);
- send_stream->OnOverheadChanged(kOverheadPerPacket.bytes<size_t>());
- const DataRate bitrate = DataRate::KilobitsPerSec(64) + kMaxOverheadRate;
- EXPECT_CALL(*helper.channel_send(),
- OnBitrateAllocation(Field(
- &BitrateAllocationUpdate::target_bitrate, Eq(bitrate))));
- BitrateAllocationUpdate update;
- update.target_bitrate = DataRate::KilobitsPerSec(128);
- helper.worker()->SendTask([&] { send_stream->OnBitrateUpdated(update); },
- RTC_FROM_HERE);
+ for (bool use_null_audio_processing : {false, true}) {
+ ConfigHelper helper(true, true, use_null_audio_processing);
+ auto send_stream = helper.CreateAudioSendStream();
+ EXPECT_CALL(*helper.channel_send(), CallEncoder(_)).Times(1);
+ send_stream->OnOverheadChanged(kOverheadPerPacket.bytes<size_t>());
+ const DataRate bitrate = DataRate::KilobitsPerSec(64) + kMaxOverheadRate;
+ EXPECT_CALL(*helper.channel_send(),
+ OnBitrateAllocation(Field(
+ &BitrateAllocationUpdate::target_bitrate, Eq(bitrate))));
+ BitrateAllocationUpdate update;
+ update.target_bitrate = DataRate::KilobitsPerSec(128);
+ helper.worker()->SendTask([&] { send_stream->OnBitrateUpdated(update); },
+ RTC_FROM_HERE);
+ }
}
TEST(AudioSendStreamTest, ProbingIntervalOnBitrateUpdated) {
- ConfigHelper helper(false, true);
- auto send_stream = helper.CreateAudioSendStream();
-
- EXPECT_CALL(*helper.channel_send(),
- OnBitrateAllocation(Field(&BitrateAllocationUpdate::bwe_period,
- Eq(TimeDelta::Millis(5000)))));
- BitrateAllocationUpdate update;
- update.target_bitrate =
- DataRate::BitsPerSec(helper.config().max_bitrate_bps + 5000);
- update.packet_loss_ratio = 0;
- update.round_trip_time = TimeDelta::Millis(50);
- update.bwe_period = TimeDelta::Millis(5000);
- helper.worker()->SendTask([&] { send_stream->OnBitrateUpdated(update); },
- RTC_FROM_HERE);
+ for (bool use_null_audio_processing : {false, true}) {
+ ConfigHelper helper(false, true, use_null_audio_processing);
+ auto send_stream = helper.CreateAudioSendStream();
+
+ EXPECT_CALL(*helper.channel_send(),
+ OnBitrateAllocation(Field(&BitrateAllocationUpdate::bwe_period,
+ Eq(TimeDelta::Millis(5000)))));
+ BitrateAllocationUpdate update;
+ update.target_bitrate =
+ DataRate::BitsPerSec(helper.config().max_bitrate_bps + 5000);
+ update.packet_loss_ratio = 0;
+ update.round_trip_time = TimeDelta::Millis(50);
+ update.bwe_period = TimeDelta::Millis(5000);
+ helper.worker()->SendTask([&] { send_stream->OnBitrateUpdated(update); },
+ RTC_FROM_HERE);
+ }
}
// Test that AudioSendStream doesn't recreate the encoder unnecessarily.
TEST(AudioSendStreamTest, DontRecreateEncoder) {
- ConfigHelper helper(false, false);
- // WillOnce is (currently) the default used by ConfigHelper if asked to set an
- // expectation for SetEncoder. Since this behavior is essential for this test
- // to be correct, it's instead set-up manually here. Otherwise a simple change
- // to ConfigHelper (say to WillRepeatedly) would silently make this test
- // useless.
- EXPECT_CALL(*helper.channel_send(), SetEncoderForMock(_, _))
- .WillOnce(Return());
-
- EXPECT_CALL(*helper.channel_send(), RegisterCngPayloadType(105, 8000));
-
- helper.config().send_codec_spec =
- AudioSendStream::Config::SendCodecSpec(9, kG722Format);
- helper.config().send_codec_spec->cng_payload_type = 105;
- auto send_stream = helper.CreateAudioSendStream();
- send_stream->Reconfigure(helper.config());
+ for (bool use_null_audio_processing : {false, true}) {
+ ConfigHelper helper(false, false, use_null_audio_processing);
+ // WillOnce is (currently) the default used by ConfigHelper if asked to set
+ // an expectation for SetEncoder. Since this behavior is essential for this
+ // test to be correct, it's instead set-up manually here. Otherwise a simple
+ // change to ConfigHelper (say to WillRepeatedly) would silently make this
+ // test useless.
+ EXPECT_CALL(*helper.channel_send(), SetEncoderForMock(_, _))
+ .WillOnce(Return());
+
+ EXPECT_CALL(*helper.channel_send(), RegisterCngPayloadType(105, 8000));
+
+ helper.config().send_codec_spec =
+ AudioSendStream::Config::SendCodecSpec(9, kG722Format);
+ helper.config().send_codec_spec->cng_payload_type = 105;
+ auto send_stream = helper.CreateAudioSendStream();
+ send_stream->Reconfigure(helper.config());
+ }
}
TEST(AudioSendStreamTest, ReconfigureTransportCcResetsFirst) {
ScopedFieldTrials field_trials("WebRTC-Audio-SendSideBwe/Enabled/");
- ConfigHelper helper(false, true);
- auto send_stream = helper.CreateAudioSendStream();
- auto new_config = helper.config();
- ConfigHelper::AddBweToConfig(&new_config);
-
- EXPECT_CALL(*helper.rtp_rtcp(),
- RegisterRtpHeaderExtension(TransportSequenceNumber::kUri,
- kTransportSequenceNumberId))
- .Times(1);
- {
- ::testing::InSequence seq;
- EXPECT_CALL(*helper.channel_send(), ResetSenderCongestionControlObjects())
- .Times(1);
- EXPECT_CALL(*helper.channel_send(), RegisterSenderCongestionControlObjects(
- helper.transport(), Ne(nullptr)))
+ for (bool use_null_audio_processing : {false, true}) {
+ ConfigHelper helper(false, true, use_null_audio_processing);
+ auto send_stream = helper.CreateAudioSendStream();
+ auto new_config = helper.config();
+ ConfigHelper::AddBweToConfig(&new_config);
+
+ EXPECT_CALL(*helper.rtp_rtcp(),
+ RegisterRtpHeaderExtension(TransportSequenceNumber::kUri,
+ kTransportSequenceNumberId))
.Times(1);
- }
+ {
+ ::testing::InSequence seq;
+ EXPECT_CALL(*helper.channel_send(), ResetSenderCongestionControlObjects())
+ .Times(1);
+ EXPECT_CALL(*helper.channel_send(),
+ RegisterSenderCongestionControlObjects(helper.transport(),
+ Ne(nullptr)))
+ .Times(1);
+ }
- send_stream->Reconfigure(new_config);
+ send_stream->Reconfigure(new_config);
+ }
}
TEST(AudioSendStreamTest, OnTransportOverheadChanged) {
- ConfigHelper helper(false, true);
- auto send_stream = helper.CreateAudioSendStream();
- auto new_config = helper.config();
+ for (bool use_null_audio_processing : {false, true}) {
+ ConfigHelper helper(false, true, use_null_audio_processing);
+ auto send_stream = helper.CreateAudioSendStream();
+ auto new_config = helper.config();
- // CallEncoder will be called on overhead change.
- EXPECT_CALL(*helper.channel_send(), CallEncoder(::testing::_)).Times(1);
+ // CallEncoder will be called on overhead change.
+ EXPECT_CALL(*helper.channel_send(), CallEncoder(::testing::_)).Times(1);
- const size_t transport_overhead_per_packet_bytes = 333;
- send_stream->SetTransportOverhead(transport_overhead_per_packet_bytes);
+ const size_t transport_overhead_per_packet_bytes = 333;
+ send_stream->SetTransportOverhead(transport_overhead_per_packet_bytes);
- EXPECT_EQ(transport_overhead_per_packet_bytes,
- send_stream->TestOnlyGetPerPacketOverheadBytes());
+ EXPECT_EQ(transport_overhead_per_packet_bytes,
+ send_stream->TestOnlyGetPerPacketOverheadBytes());
+ }
}
TEST(AudioSendStreamTest, OnAudioOverheadChanged) {
- ConfigHelper helper(false, true);
- auto send_stream = helper.CreateAudioSendStream();
- auto new_config = helper.config();
-
- // CallEncoder will be called on overhead change.
- EXPECT_CALL(*helper.channel_send(), CallEncoder(::testing::_)).Times(1);
-
- const size_t audio_overhead_per_packet_bytes = 555;
- send_stream->OnOverheadChanged(audio_overhead_per_packet_bytes);
- EXPECT_EQ(audio_overhead_per_packet_bytes,
- send_stream->TestOnlyGetPerPacketOverheadBytes());
+ for (bool use_null_audio_processing : {false, true}) {
+ ConfigHelper helper(false, true, use_null_audio_processing);
+ auto send_stream = helper.CreateAudioSendStream();
+ auto new_config = helper.config();
+
+ // CallEncoder will be called on overhead change.
+ EXPECT_CALL(*helper.channel_send(), CallEncoder(::testing::_)).Times(1);
+
+ const size_t audio_overhead_per_packet_bytes = 555;
+ send_stream->OnOverheadChanged(audio_overhead_per_packet_bytes);
+ EXPECT_EQ(audio_overhead_per_packet_bytes,
+ send_stream->TestOnlyGetPerPacketOverheadBytes());
+ }
}
TEST(AudioSendStreamTest, OnAudioAndTransportOverheadChanged) {
- ConfigHelper helper(false, true);
- auto send_stream = helper.CreateAudioSendStream();
- auto new_config = helper.config();
+ for (bool use_null_audio_processing : {false, true}) {
+ ConfigHelper helper(false, true, use_null_audio_processing);
+ auto send_stream = helper.CreateAudioSendStream();
+ auto new_config = helper.config();
- // CallEncoder will be called when each of overhead changes.
- EXPECT_CALL(*helper.channel_send(), CallEncoder(::testing::_)).Times(2);
+ // CallEncoder will be called when each of overhead changes.
+ EXPECT_CALL(*helper.channel_send(), CallEncoder(::testing::_)).Times(2);
- const size_t transport_overhead_per_packet_bytes = 333;
- send_stream->SetTransportOverhead(transport_overhead_per_packet_bytes);
+ const size_t transport_overhead_per_packet_bytes = 333;
+ send_stream->SetTransportOverhead(transport_overhead_per_packet_bytes);
- const size_t audio_overhead_per_packet_bytes = 555;
- send_stream->OnOverheadChanged(audio_overhead_per_packet_bytes);
+ const size_t audio_overhead_per_packet_bytes = 555;
+ send_stream->OnOverheadChanged(audio_overhead_per_packet_bytes);
- EXPECT_EQ(
- transport_overhead_per_packet_bytes + audio_overhead_per_packet_bytes,
- send_stream->TestOnlyGetPerPacketOverheadBytes());
+ EXPECT_EQ(
+ transport_overhead_per_packet_bytes + audio_overhead_per_packet_bytes,
+ send_stream->TestOnlyGetPerPacketOverheadBytes());
+ }
}
// Validates that reconfiguring the AudioSendStream with a Frame encryptor
// correctly reconfigures on the object without crashing.
TEST(AudioSendStreamTest, ReconfigureWithFrameEncryptor) {
- ConfigHelper helper(false, true);
- auto send_stream = helper.CreateAudioSendStream();
- auto new_config = helper.config();
-
- rtc::scoped_refptr<FrameEncryptorInterface> mock_frame_encryptor_0(
- new rtc::RefCountedObject<MockFrameEncryptor>());
- new_config.frame_encryptor = mock_frame_encryptor_0;
- EXPECT_CALL(*helper.channel_send(), SetFrameEncryptor(Ne(nullptr))).Times(1);
- send_stream->Reconfigure(new_config);
-
- // Not updating the frame encryptor shouldn't force it to reconfigure.
- EXPECT_CALL(*helper.channel_send(), SetFrameEncryptor(_)).Times(0);
- send_stream->Reconfigure(new_config);
-
- // Updating frame encryptor to a new object should force a call to the proxy.
- rtc::scoped_refptr<FrameEncryptorInterface> mock_frame_encryptor_1(
- new rtc::RefCountedObject<MockFrameEncryptor>());
- new_config.frame_encryptor = mock_frame_encryptor_1;
- new_config.crypto_options.sframe.require_frame_encryption = true;
- EXPECT_CALL(*helper.channel_send(), SetFrameEncryptor(Ne(nullptr))).Times(1);
- send_stream->Reconfigure(new_config);
+ for (bool use_null_audio_processing : {false, true}) {
+ ConfigHelper helper(false, true, use_null_audio_processing);
+ auto send_stream = helper.CreateAudioSendStream();
+ auto new_config = helper.config();
+
+ rtc::scoped_refptr<FrameEncryptorInterface> mock_frame_encryptor_0(
+ new rtc::RefCountedObject<MockFrameEncryptor>());
+ new_config.frame_encryptor = mock_frame_encryptor_0;
+ EXPECT_CALL(*helper.channel_send(), SetFrameEncryptor(Ne(nullptr)))
+ .Times(1);
+ send_stream->Reconfigure(new_config);
+
+ // Not updating the frame encryptor shouldn't force it to reconfigure.
+ EXPECT_CALL(*helper.channel_send(), SetFrameEncryptor(_)).Times(0);
+ send_stream->Reconfigure(new_config);
+
+ // Updating frame encryptor to a new object should force a call to the
+ // proxy.
+ rtc::scoped_refptr<FrameEncryptorInterface> mock_frame_encryptor_1(
+ new rtc::RefCountedObject<MockFrameEncryptor>());
+ new_config.frame_encryptor = mock_frame_encryptor_1;
+ new_config.crypto_options.sframe.require_frame_encryption = true;
+ EXPECT_CALL(*helper.channel_send(), SetFrameEncryptor(Ne(nullptr)))
+ .Times(1);
+ send_stream->Reconfigure(new_config);
+ }
}
} // namespace test
} // namespace webrtc
diff --git a/audio/audio_state.cc b/audio/audio_state.cc
index 1a4fd77ed2..73366e20a8 100644
--- a/audio/audio_state.cc
+++ b/audio/audio_state.cc
@@ -41,7 +41,6 @@ AudioState::~AudioState() {
}
AudioProcessing* AudioState::audio_processing() {
- RTC_DCHECK(config_.audio_processing);
return config_.audio_processing.get();
}
diff --git a/audio/audio_state_unittest.cc b/audio/audio_state_unittest.cc
index 2a1018c120..76e08c549c 100644
--- a/audio/audio_state_unittest.cc
+++ b/audio/audio_state_unittest.cc
@@ -31,10 +31,14 @@ constexpr int kSampleRate = 16000;
constexpr int kNumberOfChannels = 1;
struct ConfigHelper {
- ConfigHelper() : audio_mixer(AudioMixerImpl::Create()) {
+ explicit ConfigHelper(bool use_null_audio_processing)
+ : audio_mixer(AudioMixerImpl::Create()) {
audio_state_config.audio_mixer = audio_mixer;
audio_state_config.audio_processing =
- new rtc::RefCountedObject<testing::NiceMock<MockAudioProcessing>>();
+ use_null_audio_processing
+ ? nullptr
+ : new rtc::RefCountedObject<
+ testing::NiceMock<MockAudioProcessing>>();
audio_state_config.audio_device_module =
new rtc::RefCountedObject<MockAudioDeviceModule>();
}
@@ -88,162 +92,183 @@ std::vector<uint32_t> ComputeChannelLevels(AudioFrame* audio_frame) {
} // namespace
TEST(AudioStateTest, Create) {
- ConfigHelper helper;
- auto audio_state = AudioState::Create(helper.config());
- EXPECT_TRUE(audio_state.get());
+ for (bool use_null_audio_processing : {false, true}) {
+ ConfigHelper helper(use_null_audio_processing);
+ auto audio_state = AudioState::Create(helper.config());
+ EXPECT_TRUE(audio_state.get());
+ }
}
TEST(AudioStateTest, ConstructDestruct) {
- ConfigHelper helper;
- rtc::scoped_refptr<internal::AudioState> audio_state(
- new rtc::RefCountedObject<internal::AudioState>(helper.config()));
+ for (bool use_null_audio_processing : {false, true}) {
+ ConfigHelper helper(use_null_audio_processing);
+ rtc::scoped_refptr<internal::AudioState> audio_state(
+ new rtc::RefCountedObject<internal::AudioState>(helper.config()));
+ }
}
TEST(AudioStateTest, RecordedAudioArrivesAtSingleStream) {
- ConfigHelper helper;
- rtc::scoped_refptr<internal::AudioState> audio_state(
- new rtc::RefCountedObject<internal::AudioState>(helper.config()));
-
- MockAudioSendStream stream;
- audio_state->AddSendingStream(&stream, 8000, 2);
-
- EXPECT_CALL(
- stream,
- SendAudioDataForMock(::testing::AllOf(
- ::testing::Field(&AudioFrame::sample_rate_hz_, ::testing::Eq(8000)),
- ::testing::Field(&AudioFrame::num_channels_, ::testing::Eq(2u)))))
- .WillOnce(
- // Verify that channels are not swapped by default.
- ::testing::Invoke([](AudioFrame* audio_frame) {
- auto levels = ComputeChannelLevels(audio_frame);
- EXPECT_LT(0u, levels[0]);
- EXPECT_EQ(0u, levels[1]);
- }));
- MockAudioProcessing* ap =
- static_cast<MockAudioProcessing*>(audio_state->audio_processing());
- EXPECT_CALL(*ap, set_stream_delay_ms(0));
- EXPECT_CALL(*ap, set_stream_key_pressed(false));
- EXPECT_CALL(*ap, ProcessStream(_, _, _, Matcher<int16_t*>(_)));
+ for (bool use_null_audio_processing : {false, true}) {
+ ConfigHelper helper(use_null_audio_processing);
+ rtc::scoped_refptr<internal::AudioState> audio_state(
+ new rtc::RefCountedObject<internal::AudioState>(helper.config()));
+
+ MockAudioSendStream stream;
+ audio_state->AddSendingStream(&stream, 8000, 2);
+
+ EXPECT_CALL(
+ stream,
+ SendAudioDataForMock(::testing::AllOf(
+ ::testing::Field(&AudioFrame::sample_rate_hz_, ::testing::Eq(8000)),
+ ::testing::Field(&AudioFrame::num_channels_, ::testing::Eq(2u)))))
+ .WillOnce(
+ // Verify that channels are not swapped by default.
+ ::testing::Invoke([](AudioFrame* audio_frame) {
+ auto levels = ComputeChannelLevels(audio_frame);
+ EXPECT_LT(0u, levels[0]);
+ EXPECT_EQ(0u, levels[1]);
+ }));
+ MockAudioProcessing* ap = use_null_audio_processing
+ ? nullptr
+ : static_cast<MockAudioProcessing*>(
+ audio_state->audio_processing());
+ if (ap) {
+ EXPECT_CALL(*ap, set_stream_delay_ms(0));
+ EXPECT_CALL(*ap, set_stream_key_pressed(false));
+ EXPECT_CALL(*ap, ProcessStream(_, _, _, Matcher<int16_t*>(_)));
+ }
- constexpr int kSampleRate = 16000;
- constexpr size_t kNumChannels = 2;
- auto audio_data = Create10msTestData(kSampleRate, kNumChannels);
- uint32_t new_mic_level = 667;
- audio_state->audio_transport()->RecordedDataIsAvailable(
- &audio_data[0], kSampleRate / 100, kNumChannels * 2, kNumChannels,
- kSampleRate, 0, 0, 0, false, new_mic_level);
- EXPECT_EQ(667u, new_mic_level);
-
- audio_state->RemoveSendingStream(&stream);
+ constexpr int kSampleRate = 16000;
+ constexpr size_t kNumChannels = 2;
+ auto audio_data = Create10msTestData(kSampleRate, kNumChannels);
+ uint32_t new_mic_level = 667;
+ audio_state->audio_transport()->RecordedDataIsAvailable(
+ &audio_data[0], kSampleRate / 100, kNumChannels * 2, kNumChannels,
+ kSampleRate, 0, 0, 0, false, new_mic_level);
+ EXPECT_EQ(667u, new_mic_level);
+
+ audio_state->RemoveSendingStream(&stream);
+ }
}
TEST(AudioStateTest, RecordedAudioArrivesAtMultipleStreams) {
- ConfigHelper helper;
- rtc::scoped_refptr<internal::AudioState> audio_state(
- new rtc::RefCountedObject<internal::AudioState>(helper.config()));
-
- MockAudioSendStream stream_1;
- MockAudioSendStream stream_2;
- audio_state->AddSendingStream(&stream_1, 8001, 2);
- audio_state->AddSendingStream(&stream_2, 32000, 1);
-
- EXPECT_CALL(
- stream_1,
- SendAudioDataForMock(::testing::AllOf(
- ::testing::Field(&AudioFrame::sample_rate_hz_, ::testing::Eq(16000)),
- ::testing::Field(&AudioFrame::num_channels_, ::testing::Eq(1u)))))
- .WillOnce(
- // Verify that there is output signal.
- ::testing::Invoke([](AudioFrame* audio_frame) {
- auto levels = ComputeChannelLevels(audio_frame);
- EXPECT_LT(0u, levels[0]);
- }));
- EXPECT_CALL(
- stream_2,
- SendAudioDataForMock(::testing::AllOf(
- ::testing::Field(&AudioFrame::sample_rate_hz_, ::testing::Eq(16000)),
- ::testing::Field(&AudioFrame::num_channels_, ::testing::Eq(1u)))))
- .WillOnce(
- // Verify that there is output signal.
- ::testing::Invoke([](AudioFrame* audio_frame) {
- auto levels = ComputeChannelLevels(audio_frame);
- EXPECT_LT(0u, levels[0]);
- }));
- MockAudioProcessing* ap =
- static_cast<MockAudioProcessing*>(audio_state->audio_processing());
- EXPECT_CALL(*ap, set_stream_delay_ms(5));
- EXPECT_CALL(*ap, set_stream_key_pressed(true));
- EXPECT_CALL(*ap, ProcessStream(_, _, _, Matcher<int16_t*>(_)));
+ for (bool use_null_audio_processing : {false, true}) {
+ ConfigHelper helper(use_null_audio_processing);
+ rtc::scoped_refptr<internal::AudioState> audio_state(
+ new rtc::RefCountedObject<internal::AudioState>(helper.config()));
+
+ MockAudioSendStream stream_1;
+ MockAudioSendStream stream_2;
+ audio_state->AddSendingStream(&stream_1, 8001, 2);
+ audio_state->AddSendingStream(&stream_2, 32000, 1);
+
+ EXPECT_CALL(
+ stream_1,
+ SendAudioDataForMock(::testing::AllOf(
+ ::testing::Field(&AudioFrame::sample_rate_hz_,
+ ::testing::Eq(16000)),
+ ::testing::Field(&AudioFrame::num_channels_, ::testing::Eq(1u)))))
+ .WillOnce(
+ // Verify that there is output signal.
+ ::testing::Invoke([](AudioFrame* audio_frame) {
+ auto levels = ComputeChannelLevels(audio_frame);
+ EXPECT_LT(0u, levels[0]);
+ }));
+ EXPECT_CALL(
+ stream_2,
+ SendAudioDataForMock(::testing::AllOf(
+ ::testing::Field(&AudioFrame::sample_rate_hz_,
+ ::testing::Eq(16000)),
+ ::testing::Field(&AudioFrame::num_channels_, ::testing::Eq(1u)))))
+ .WillOnce(
+ // Verify that there is output signal.
+ ::testing::Invoke([](AudioFrame* audio_frame) {
+ auto levels = ComputeChannelLevels(audio_frame);
+ EXPECT_LT(0u, levels[0]);
+ }));
+ MockAudioProcessing* ap =
+ static_cast<MockAudioProcessing*>(audio_state->audio_processing());
+ if (ap) {
+ EXPECT_CALL(*ap, set_stream_delay_ms(5));
+ EXPECT_CALL(*ap, set_stream_key_pressed(true));
+ EXPECT_CALL(*ap, ProcessStream(_, _, _, Matcher<int16_t*>(_)));
+ }
- constexpr int kSampleRate = 16000;
- constexpr size_t kNumChannels = 1;
- auto audio_data = Create10msTestData(kSampleRate, kNumChannels);
- uint32_t new_mic_level = 667;
- audio_state->audio_transport()->RecordedDataIsAvailable(
- &audio_data[0], kSampleRate / 100, kNumChannels * 2, kNumChannels,
- kSampleRate, 5, 0, 0, true, new_mic_level);
- EXPECT_EQ(667u, new_mic_level);
-
- audio_state->RemoveSendingStream(&stream_1);
- audio_state->RemoveSendingStream(&stream_2);
+ constexpr int kSampleRate = 16000;
+ constexpr size_t kNumChannels = 1;
+ auto audio_data = Create10msTestData(kSampleRate, kNumChannels);
+ uint32_t new_mic_level = 667;
+ audio_state->audio_transport()->RecordedDataIsAvailable(
+ &audio_data[0], kSampleRate / 100, kNumChannels * 2, kNumChannels,
+ kSampleRate, 5, 0, 0, true, new_mic_level);
+ EXPECT_EQ(667u, new_mic_level);
+
+ audio_state->RemoveSendingStream(&stream_1);
+ audio_state->RemoveSendingStream(&stream_2);
+ }
}
TEST(AudioStateTest, EnableChannelSwap) {
constexpr int kSampleRate = 16000;
constexpr size_t kNumChannels = 2;
- ConfigHelper helper;
- rtc::scoped_refptr<internal::AudioState> audio_state(
- new rtc::RefCountedObject<internal::AudioState>(helper.config()));
-
- audio_state->SetStereoChannelSwapping(true);
-
- MockAudioSendStream stream;
- audio_state->AddSendingStream(&stream, kSampleRate, kNumChannels);
-
- EXPECT_CALL(stream, SendAudioDataForMock(_))
- .WillOnce(
- // Verify that channels are swapped.
- ::testing::Invoke([](AudioFrame* audio_frame) {
- auto levels = ComputeChannelLevels(audio_frame);
- EXPECT_EQ(0u, levels[0]);
- EXPECT_LT(0u, levels[1]);
- }));
-
- auto audio_data = Create10msTestData(kSampleRate, kNumChannels);
- uint32_t new_mic_level = 667;
- audio_state->audio_transport()->RecordedDataIsAvailable(
- &audio_data[0], kSampleRate / 100, kNumChannels * 2, kNumChannels,
- kSampleRate, 0, 0, 0, false, new_mic_level);
- EXPECT_EQ(667u, new_mic_level);
-
- audio_state->RemoveSendingStream(&stream);
+ for (bool use_null_audio_processing : {false, true}) {
+ ConfigHelper helper(use_null_audio_processing);
+ rtc::scoped_refptr<internal::AudioState> audio_state(
+ new rtc::RefCountedObject<internal::AudioState>(helper.config()));
+
+ audio_state->SetStereoChannelSwapping(true);
+
+ MockAudioSendStream stream;
+ audio_state->AddSendingStream(&stream, kSampleRate, kNumChannels);
+
+ EXPECT_CALL(stream, SendAudioDataForMock(_))
+ .WillOnce(
+ // Verify that channels are swapped.
+ ::testing::Invoke([](AudioFrame* audio_frame) {
+ auto levels = ComputeChannelLevels(audio_frame);
+ EXPECT_EQ(0u, levels[0]);
+ EXPECT_LT(0u, levels[1]);
+ }));
+
+ auto audio_data = Create10msTestData(kSampleRate, kNumChannels);
+ uint32_t new_mic_level = 667;
+ audio_state->audio_transport()->RecordedDataIsAvailable(
+ &audio_data[0], kSampleRate / 100, kNumChannels * 2, kNumChannels,
+ kSampleRate, 0, 0, 0, false, new_mic_level);
+ EXPECT_EQ(667u, new_mic_level);
+
+ audio_state->RemoveSendingStream(&stream);
+ }
}
TEST(AudioStateTest,
QueryingTransportForAudioShouldResultInGetAudioCallOnMixerSource) {
- ConfigHelper helper;
- auto audio_state = AudioState::Create(helper.config());
-
- FakeAudioSource fake_source;
- helper.mixer()->AddSource(&fake_source);
-
- EXPECT_CALL(fake_source, GetAudioFrameWithInfo(_, _))
- .WillOnce(
- ::testing::Invoke([](int sample_rate_hz, AudioFrame* audio_frame) {
- audio_frame->sample_rate_hz_ = sample_rate_hz;
- audio_frame->samples_per_channel_ = sample_rate_hz / 100;
- audio_frame->num_channels_ = kNumberOfChannels;
- return AudioMixer::Source::AudioFrameInfo::kNormal;
- }));
-
- int16_t audio_buffer[kSampleRate / 100 * kNumberOfChannels];
- size_t n_samples_out;
- int64_t elapsed_time_ms;
- int64_t ntp_time_ms;
- audio_state->audio_transport()->NeedMorePlayData(
- kSampleRate / 100, kNumberOfChannels * 2, kNumberOfChannels, kSampleRate,
- audio_buffer, n_samples_out, &elapsed_time_ms, &ntp_time_ms);
+ for (bool use_null_audio_processing : {false, true}) {
+ ConfigHelper helper(use_null_audio_processing);
+ auto audio_state = AudioState::Create(helper.config());
+
+ FakeAudioSource fake_source;
+ helper.mixer()->AddSource(&fake_source);
+
+ EXPECT_CALL(fake_source, GetAudioFrameWithInfo(_, _))
+ .WillOnce(
+ ::testing::Invoke([](int sample_rate_hz, AudioFrame* audio_frame) {
+ audio_frame->sample_rate_hz_ = sample_rate_hz;
+ audio_frame->samples_per_channel_ = sample_rate_hz / 100;
+ audio_frame->num_channels_ = kNumberOfChannels;
+ return AudioMixer::Source::AudioFrameInfo::kNormal;
+ }));
+
+ int16_t audio_buffer[kSampleRate / 100 * kNumberOfChannels];
+ size_t n_samples_out;
+ int64_t elapsed_time_ms;
+ int64_t ntp_time_ms;
+ audio_state->audio_transport()->NeedMorePlayData(
+ kSampleRate / 100, kNumberOfChannels * 2, kNumberOfChannels,
+ kSampleRate, audio_buffer, n_samples_out, &elapsed_time_ms,
+ &ntp_time_ms);
+ }
}
} // namespace test
} // namespace webrtc
diff --git a/audio/audio_transport_impl.cc b/audio/audio_transport_impl.cc
index a61ea73102..7648fb948f 100644
--- a/audio/audio_transport_impl.cc
+++ b/audio/audio_transport_impl.cc
@@ -49,13 +49,15 @@ void ProcessCaptureFrame(uint32_t delay_ms,
bool swap_stereo_channels,
AudioProcessing* audio_processing,
AudioFrame* audio_frame) {
- RTC_DCHECK(audio_processing);
RTC_DCHECK(audio_frame);
- audio_processing->set_stream_delay_ms(delay_ms);
- audio_processing->set_stream_key_pressed(key_pressed);
- int error = ProcessAudioFrame(audio_processing, audio_frame);
+ if (audio_processing) {
+ audio_processing->set_stream_delay_ms(delay_ms);
+ audio_processing->set_stream_key_pressed(key_pressed);
+ int error = ProcessAudioFrame(audio_processing, audio_frame);
+
+ RTC_DCHECK_EQ(0, error) << "ProcessStream() error: " << error;
+ }
- RTC_DCHECK_EQ(0, error) << "ProcessStream() error: " << error;
if (swap_stereo_channels) {
AudioFrameOperations::SwapStereoChannels(audio_frame);
}
@@ -85,7 +87,6 @@ AudioTransportImpl::AudioTransportImpl(AudioMixer* mixer,
AudioProcessing* audio_processing)
: audio_processing_(audio_processing), mixer_(mixer) {
RTC_DCHECK(mixer);
- RTC_DCHECK(audio_processing);
}
AudioTransportImpl::~AudioTransportImpl() {}
@@ -137,7 +138,8 @@ int32_t AudioTransportImpl::RecordedDataIsAvailable(
// if we're using this feature or not.
// TODO(solenberg): GetConfig() takes a lock. Work around that.
bool typing_detected = false;
- if (audio_processing_->GetConfig().voice_detection.enabled) {
+ if (audio_processing_ &&
+ audio_processing_->GetConfig().voice_detection.enabled) {
if (audio_frame->vad_activity_ != AudioFrame::kVadUnknown) {
bool vad_active = audio_frame->vad_activity_ == AudioFrame::kVadActive;
typing_detected = typing_detection_.Process(key_pressed, vad_active);
@@ -192,8 +194,11 @@ int32_t AudioTransportImpl::NeedMorePlayData(const size_t nSamples,
*elapsed_time_ms = mixed_frame_.elapsed_time_ms_;
*ntp_time_ms = mixed_frame_.ntp_time_ms_;
- const auto error = ProcessReverseAudioFrame(audio_processing_, &mixed_frame_);
- RTC_DCHECK_EQ(error, AudioProcessing::kNoError);
+ if (audio_processing_) {
+ const auto error =
+ ProcessReverseAudioFrame(audio_processing_, &mixed_frame_);
+ RTC_DCHECK_EQ(error, AudioProcessing::kNoError);
+ }
nSamplesOut = Resample(mixed_frame_, samplesPerSec, &render_resampler_,
static_cast<int16_t*>(audioSamples));