summaryrefslogtreecommitdiff
path: root/modules
diff options
context:
space:
mode:
authorAndroid Chromium Automerger <chromium-automerger@android>2014-07-03 16:32:54 +0000
committerAndroid Chromium Automerger <chromium-automerger@android>2014-07-03 16:32:54 +0000
commitc7343a33e05ef5f5a7caa3d61a89863348f2dced (patch)
tree9f6f1017e567b8dc0f09368cfea69ad5ffd1102e /modules
parentb3f058479df0aaef58588ab2d623fb302afdc8ca (diff)
parentd13c3753199496aeddc73ec88548da73283c312f (diff)
downloadwebrtc-c7343a33e05ef5f5a7caa3d61a89863348f2dced.tar.gz
Merge third_party/webrtc from https://chromium.googlesource.com/external/webrtc/trunk/webrtc.git at d13c3753199496aeddc73ec88548da73283c312f
This commit was generated by merge_from_chromium.py. Change-Id: Ic6e1b4cf621e39333250662fbbf1833e8467204f
Diffstat (limited to 'modules')
-rw-r--r--modules/audio_coding/main/acm2/acm_receive_test.cc181
-rw-r--r--modules/audio_coding/main/acm2/acm_receive_test.h55
-rw-r--r--modules/audio_coding/main/acm2/audio_coding_module.gypi13
-rw-r--r--modules/audio_coding/main/acm2/audio_coding_module_unittest.cc76
-rw-r--r--modules/audio_coding/neteq/tools/audio_sink.h17
-rw-r--r--modules/audio_coding/neteq/tools/packet_source.h10
-rw-r--r--modules/audio_coding/neteq/tools/rtp_file_source.cc4
-rw-r--r--modules/audio_device/OWNERS1
-rw-r--r--modules/audio_processing/aec/Android.mk3
-rw-r--r--modules/audio_processing/aec/aec_core.c288
-rw-r--r--modules/audio_processing/aec/aec_core_internal.h8
-rw-r--r--modules/audio_processing/aec/aec_rdft.c6
-rw-r--r--modules/audio_processing/aec/aec_rdft.h3
-rw-r--r--modules/audio_processing/aec/aec_rdft_neon.c185
-rw-r--r--modules/audio_processing/audio_processing.gypi1
-rw-r--r--modules/audio_processing/include/audio_processing.h8
-rw-r--r--modules/audio_processing/test/process_test.cc4
-rw-r--r--modules/desktop_capture/BUILD.gn113
-rw-r--r--modules/modules.gyp1
-rw-r--r--modules/remote_bitrate_estimator/include/remote_bitrate_estimator.h1
-rw-r--r--modules/remote_bitrate_estimator/overuse_detector.cc12
-rw-r--r--modules/remote_bitrate_estimator/overuse_detector.h4
-rw-r--r--modules/remote_bitrate_estimator/remote_bitrate_estimator_single_stream.cc65
-rw-r--r--modules/remote_bitrate_estimator/remote_bitrate_estimator_unittest_helper.cc9
-rw-r--r--modules/remote_bitrate_estimator/remote_bitrate_estimator_unittest_helper.h1
-rw-r--r--modules/video_capture/OWNERS1
-rw-r--r--modules/video_coding/main/source/media_optimization.cc103
-rw-r--r--modules/video_coding/main/source/media_optimization.h103
-rw-r--r--modules/video_render/OWNERS3
29 files changed, 1038 insertions, 241 deletions
diff --git a/modules/audio_coding/main/acm2/acm_receive_test.cc b/modules/audio_coding/main/acm2/acm_receive_test.cc
new file mode 100644
index 00000000..43d623d1
--- /dev/null
+++ b/modules/audio_coding/main/acm2/acm_receive_test.cc
@@ -0,0 +1,181 @@
+/*
+ * Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "webrtc/modules/audio_coding/main/acm2/acm_receive_test.h"
+
+#include <assert.h>
+#include <stdio.h>
+
+#include "testing/gtest/include/gtest/gtest.h"
+#include "webrtc/modules/audio_coding/main/interface/audio_coding_module.h"
+#include "webrtc/modules/audio_coding/neteq/tools/audio_sink.h"
+#include "webrtc/modules/audio_coding/neteq/tools/packet.h"
+#include "webrtc/modules/audio_coding/neteq/tools/packet_source.h"
+
+namespace webrtc {
+namespace test {
+
+namespace {
+// Returns true if the codec should be registered, otherwise false. Changes
+// the number of channels for the Opus codec to always be 1.
+bool ModifyAndUseThisCodec(CodecInst* codec_param) {
+ if (STR_CASE_CMP(codec_param->plname, "CN") == 0 &&
+ codec_param->plfreq == 48000)
+ return false; // Skip 48 kHz comfort noise.
+
+ if (STR_CASE_CMP(codec_param->plname, "telephone-event") == 0)
+ return false; // Skip DTFM.
+
+ if (STR_CASE_CMP(codec_param->plname, "opus") == 0)
+ codec_param->channels = 1; // Always register Opus as mono.
+ else if (codec_param->channels > 1)
+ return false; // Skip all non-mono codecs.
+
+ return true;
+}
+
+// Remaps payload types from ACM's default to those used in the resource file
+// neteq_universal_new.rtp. Returns true if the codec should be registered,
+// otherwise false. The payload types are set as follows (all are mono codecs):
+// PCMu = 0;
+// PCMa = 8;
+// Comfort noise 8 kHz = 13
+// Comfort noise 16 kHz = 98
+// Comfort noise 32 kHz = 99
+// iLBC = 102
+// iSAC wideband = 103
+// iSAC super-wideband = 104
+// iSAC fullband = 124
+// AVT/DTMF = 106
+// RED = 117
+// PCM16b 8 kHz = 93
+// PCM16b 16 kHz = 94
+// PCM16b 32 kHz = 95
+// G.722 = 94
+bool RemapPltypeAndUseThisCodec(const char* plname,
+ int plfreq,
+ int channels,
+ int* pltype) {
+ if (channels != 1)
+ return false; // Don't use non-mono codecs.
+
+ // Re-map pltypes to those used in the NetEq test files.
+ if (STR_CASE_CMP(plname, "PCMU") == 0 && plfreq == 8000) {
+ *pltype = 0;
+ } else if (STR_CASE_CMP(plname, "PCMA") == 0 && plfreq == 8000) {
+ *pltype = 8;
+ } else if (STR_CASE_CMP(plname, "CN") == 0 && plfreq == 8000) {
+ *pltype = 13;
+ } else if (STR_CASE_CMP(plname, "CN") == 0 && plfreq == 16000) {
+ *pltype = 98;
+ } else if (STR_CASE_CMP(plname, "CN") == 0 && plfreq == 32000) {
+ *pltype = 99;
+ } else if (STR_CASE_CMP(plname, "ILBC") == 0) {
+ *pltype = 102;
+ } else if (STR_CASE_CMP(plname, "ISAC") == 0 && plfreq == 16000) {
+ *pltype = 103;
+ } else if (STR_CASE_CMP(plname, "ISAC") == 0 && plfreq == 32000) {
+ *pltype = 104;
+ } else if (STR_CASE_CMP(plname, "ISAC") == 0 && plfreq == 48000) {
+ *pltype = 124;
+ } else if (STR_CASE_CMP(plname, "telephone-event") == 0) {
+ *pltype = 106;
+ } else if (STR_CASE_CMP(plname, "red") == 0) {
+ *pltype = 117;
+ } else if (STR_CASE_CMP(plname, "L16") == 0 && plfreq == 8000) {
+ *pltype = 93;
+ } else if (STR_CASE_CMP(plname, "L16") == 0 && plfreq == 16000) {
+ *pltype = 94;
+ } else if (STR_CASE_CMP(plname, "L16") == 0 && plfreq == 32000) {
+ *pltype = 95;
+ } else if (STR_CASE_CMP(plname, "G722") == 0) {
+ *pltype = 9;
+ } else {
+ // Don't use any other codecs.
+ return false;
+ }
+ return true;
+}
+} // namespace
+
+AcmReceiveTest::AcmReceiveTest(PacketSource* packet_source,
+ AudioSink* audio_sink,
+ int output_freq_hz)
+ : clock_(0),
+ acm_(webrtc::AudioCodingModule::Create(0, &clock_)),
+ packet_source_(packet_source),
+ audio_sink_(audio_sink),
+ output_freq_hz_(output_freq_hz) {
+}
+
+void AcmReceiveTest::RegisterDefaultCodecs() {
+ CodecInst my_codec_param;
+ for (int n = 0; n < acm_->NumberOfCodecs(); n++) {
+ ASSERT_EQ(0, acm_->Codec(n, &my_codec_param)) << "Failed to get codec.";
+ if (ModifyAndUseThisCodec(&my_codec_param)) {
+ ASSERT_EQ(0, acm_->RegisterReceiveCodec(my_codec_param))
+ << "Couldn't register receive codec.\n";
+ }
+ }
+}
+
+void AcmReceiveTest::RegisterNetEqTestCodecs() {
+ CodecInst my_codec_param;
+ for (int n = 0; n < acm_->NumberOfCodecs(); n++) {
+ ASSERT_EQ(0, acm_->Codec(n, &my_codec_param)) << "Failed to get codec.";
+ if (!ModifyAndUseThisCodec(&my_codec_param)) {
+ // Skip this codec.
+ continue;
+ }
+
+ if (RemapPltypeAndUseThisCodec(my_codec_param.plname,
+ my_codec_param.plfreq,
+ my_codec_param.channels,
+ &my_codec_param.pltype)) {
+ ASSERT_EQ(0, acm_->RegisterReceiveCodec(my_codec_param))
+ << "Couldn't register receive codec.\n";
+ }
+ }
+}
+
+void AcmReceiveTest::Run() {
+ for (scoped_ptr<Packet> packet(packet_source_->NextPacket()); packet;
+ packet.reset(packet_source_->NextPacket())) {
+ // Pull audio until time to insert packet.
+ while (clock_.TimeInMilliseconds() < packet->time_ms()) {
+ AudioFrame output_frame;
+ EXPECT_EQ(0, acm_->PlayoutData10Ms(output_freq_hz_, &output_frame));
+ EXPECT_EQ(output_freq_hz_, output_frame.sample_rate_hz_);
+ const int samples_per_block = output_freq_hz_ * 10 / 1000;
+ EXPECT_EQ(samples_per_block, output_frame.samples_per_channel_);
+ EXPECT_EQ(1, output_frame.num_channels_);
+ ASSERT_TRUE(audio_sink_->WriteAudioFrame(output_frame));
+ clock_.AdvanceTimeMilliseconds(10);
+ }
+
+ // Insert packet after converting from RTPHeader to WebRtcRTPHeader.
+ WebRtcRTPHeader header;
+ header.header = packet->header();
+ header.frameType = kAudioFrameSpeech;
+ memset(&header.type.Audio, 0, sizeof(RTPAudioHeader));
+ EXPECT_EQ(0,
+ acm_->IncomingPacket(
+ packet->payload(),
+ static_cast<int32_t>(packet->payload_length_bytes()),
+ header))
+ << "Failure when inserting packet:" << std::endl
+ << " PT = " << static_cast<int>(header.header.payloadType) << std::endl
+ << " TS = " << header.header.timestamp << std::endl
+ << " SN = " << header.header.sequenceNumber;
+ }
+}
+
+} // namespace test
+} // namespace webrtc
diff --git a/modules/audio_coding/main/acm2/acm_receive_test.h b/modules/audio_coding/main/acm2/acm_receive_test.h
new file mode 100644
index 00000000..672c9292
--- /dev/null
+++ b/modules/audio_coding/main/acm2/acm_receive_test.h
@@ -0,0 +1,55 @@
+/*
+ * Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef WEBRTC_MODULES_AUDIO_CODING_MAIN_ACM2_ACM_RECEIVE_TEST_H_
+#define WEBRTC_MODULES_AUDIO_CODING_MAIN_ACM2_ACM_RECEIVE_TEST_H_
+
+#include "webrtc/base/constructormagic.h"
+#include "webrtc/system_wrappers/interface/clock.h"
+#include "webrtc/system_wrappers/interface/scoped_ptr.h"
+
+namespace webrtc {
+class AudioCodingModule;
+struct CodecInst;
+
+namespace test {
+class AudioSink;
+class PacketSource;
+
+class AcmReceiveTest {
+ public:
+ AcmReceiveTest(PacketSource* packet_source,
+ AudioSink* audio_sink,
+ int output_freq_hz);
+ virtual ~AcmReceiveTest() {}
+
+ // Registers the codecs with default parameters from ACM.
+ void RegisterDefaultCodecs();
+
+ // Registers codecs with payload types matching the pre-encoded NetEq test
+ // files.
+ void RegisterNetEqTestCodecs();
+
+ // Runs the test and returns true if successful.
+ void Run();
+
+ private:
+ SimulatedClock clock_;
+ scoped_ptr<AudioCodingModule> acm_;
+ PacketSource* packet_source_;
+ AudioSink* audio_sink_;
+ const int output_freq_hz_;
+
+ DISALLOW_COPY_AND_ASSIGN(AcmReceiveTest);
+};
+
+} // namespace test
+} // namespace webrtc
+#endif // WEBRTC_MODULES_AUDIO_CODING_MAIN_ACM2_ACM_RECEIVE_TEST_H_
diff --git a/modules/audio_coding/main/acm2/audio_coding_module.gypi b/modules/audio_coding/main/acm2/audio_coding_module.gypi
index 90dad6c5..dccfe682 100644
--- a/modules/audio_coding/main/acm2/audio_coding_module.gypi
+++ b/modules/audio_coding/main/acm2/audio_coding_module.gypi
@@ -117,6 +117,19 @@
['include_tests==1', {
'targets': [
{
+ 'target_name': 'acm_receive_test',
+ 'type': 'static_library',
+ 'dependencies': [
+ 'audio_coding_module',
+ 'neteq_unittest_tools',
+ '<(DEPTH)/testing/gtest.gyp:gtest',
+ ],
+ 'sources': [
+ 'acm_receive_test.cc',
+ 'acm_receive_test.h',
+ ],
+ }, # acm_receive_test
+ {
'target_name': 'delay_test',
'type': 'executable',
'dependencies': [
diff --git a/modules/audio_coding/main/acm2/audio_coding_module_unittest.cc b/modules/audio_coding/main/acm2/audio_coding_module_unittest.cc
index 37cd70e5..a73effb4 100644
--- a/modules/audio_coding/main/acm2/audio_coding_module_unittest.cc
+++ b/modules/audio_coding/main/acm2/audio_coding_module_unittest.cc
@@ -12,9 +12,13 @@
#include <vector>
#include "testing/gtest/include/gtest/gtest.h"
+#include "webrtc/modules/audio_coding/main/acm2/acm_receive_test.h"
#include "webrtc/modules/audio_coding/main/interface/audio_coding_module.h"
#include "webrtc/modules/audio_coding/main/interface/audio_coding_module_typedefs.h"
+#include "webrtc/modules/audio_coding/neteq/tools/audio_checksum.h"
#include "webrtc/modules/audio_coding/neteq/tools/audio_loop.h"
+#include "webrtc/modules/audio_coding/neteq/tools/output_audio_file.h"
+#include "webrtc/modules/audio_coding/neteq/tools/rtp_file_source.h"
#include "webrtc/modules/interface/module_common_types.h"
#include "webrtc/system_wrappers/interface/clock.h"
#include "webrtc/system_wrappers/interface/compile_assert.h"
@@ -511,4 +515,76 @@ TEST_F(AcmIsacMtTest, DoTest) {
EXPECT_EQ(kEventSignaled, RunTest());
}
+class AcmReceiverBitExactness : public ::testing::Test {
+ protected:
+ void Run(int output_freq_hz, const std::string& checksum_ref) {
+ const std::string input_file_name =
+ webrtc::test::ResourcePath("audio_coding/neteq_universal_new", "rtp");
+ scoped_ptr<test::RtpFileSource> packet_source(
+ test::RtpFileSource::Create(input_file_name));
+#ifdef WEBRTC_ANDROID
+ // Filter out iLBC and iSAC-swb since they are not supported on Android.
+ packet_source->FilterOutPayloadType(102); // iLBC.
+ packet_source->FilterOutPayloadType(104); // iSAC-swb.
+#endif
+
+ test::AudioChecksum checksum;
+ const std::string output_file_name =
+ webrtc::test::OutputPath() +
+ ::testing::UnitTest::GetInstance()
+ ->current_test_info()
+ ->test_case_name() +
+ "_" + ::testing::UnitTest::GetInstance()->current_test_info()->name() +
+ "_output.pcm";
+ test::OutputAudioFile output_file(output_file_name);
+ test::AudioSinkFork output(&checksum, &output_file);
+
+ test::AcmReceiveTest test(packet_source.get(), &output, output_freq_hz);
+ ASSERT_NO_FATAL_FAILURE(test.RegisterNetEqTestCodecs());
+ test.Run();
+
+ std::string checksum_string = checksum.Finish();
+ EXPECT_EQ(checksum_ref, checksum_string);
+ }
+
+ static std::string PlatformChecksum(std::string win64,
+ std::string android,
+ std::string others) {
+#if defined(_WIN32) && defined(WEBRTC_ARCH_64_BITS)
+ return win64;
+#elif defined(WEBRTC_ANDROID)
+ return android;
+#else
+ return others;
+#endif
+ }
+};
+
+TEST_F(AcmReceiverBitExactness, 8kHzOutput) {
+ Run(8000,
+ PlatformChecksum("a53573d9a44a53ea852056e9550fbd53",
+ "7924385273062b9f07aa3d4dff30d601",
+ "c54fd4a532cdb400bca2758d3a941eee"));
+}
+
+TEST_F(AcmReceiverBitExactness, 16kHzOutput) {
+ Run(16000,
+ PlatformChecksum("16ed8ee37bad45de2e1ad2b34c7c3910",
+ "d1d3dde41da936f80fa63d718fbc0fc0",
+ "68a8b57a0672356f846b3cea51e49903"));
+}
+
+TEST_F(AcmReceiverBitExactness, 32kHzOutput) {
+ Run(32000,
+ PlatformChecksum("f0f41f494d5d811f5a1cfce8fd89d9db",
+ "23b82b2605e3aab3d4d9e67dba341355",
+ "f2a69bcdedca515e548cd2c5af75d046"));
+}
+
+TEST_F(AcmReceiverBitExactness, 48kHzOutput) {
+ Run(48000,
+ PlatformChecksum("77730099d995180ab6cb60379d4a9715",
+ "580c2d0b273ffa8fa0796d784908cbdb",
+ "5c1bdee51750e13fbb9413bc9280c0dd"));
+}
} // namespace webrtc
diff --git a/modules/audio_coding/neteq/tools/audio_sink.h b/modules/audio_coding/neteq/tools/audio_sink.h
index 5743c364..474ec1c4 100644
--- a/modules/audio_coding/neteq/tools/audio_sink.h
+++ b/modules/audio_coding/neteq/tools/audio_sink.h
@@ -41,6 +41,23 @@ class AudioSink {
DISALLOW_COPY_AND_ASSIGN(AudioSink);
};
+// Forks the output audio to two AudioSink objects.
+class AudioSinkFork : public AudioSink {
+ public:
+ AudioSinkFork(AudioSink* left, AudioSink* right)
+ : left_sink_(left), right_sink_(right) {}
+
+ virtual bool WriteArray(const int16_t* audio, size_t num_samples) OVERRIDE {
+ return left_sink_->WriteArray(audio, num_samples) &&
+ right_sink_->WriteArray(audio, num_samples);
+ }
+
+ private:
+ AudioSink* left_sink_;
+ AudioSink* right_sink_;
+
+ DISALLOW_COPY_AND_ASSIGN(AudioSinkFork);
+};
} // namespace test
} // namespace webrtc
#endif // WEBRTC_MODULES_AUDIO_CODING_NETEQ_TOOLS_AUDIO_SINK_H_
diff --git a/modules/audio_coding/neteq/tools/packet_source.h b/modules/audio_coding/neteq/tools/packet_source.h
index 669bc14e..ab9ef83e 100644
--- a/modules/audio_coding/neteq/tools/packet_source.h
+++ b/modules/audio_coding/neteq/tools/packet_source.h
@@ -11,7 +11,10 @@
#ifndef WEBRTC_MODULES_AUDIO_CODING_NETEQ_TOOLS_PACKET_SOURCE_H_
#define WEBRTC_MODULES_AUDIO_CODING_NETEQ_TOOLS_PACKET_SOURCE_H_
+#include <bitset>
+
#include "webrtc/base/constructormagic.h"
+#include "webrtc/typedefs.h"
namespace webrtc {
namespace test {
@@ -28,6 +31,13 @@ class PacketSource {
// depleted, or if an error occurred.
virtual Packet* NextPacket() = 0;
+ virtual void FilterOutPayloadType(uint8_t payload_type) {
+ filter_.set(payload_type, true);
+ }
+
+ protected:
+ std::bitset<128> filter_; // Payload type is 7 bits in the RFC.
+
private:
DISALLOW_COPY_AND_ASSIGN(PacketSource);
};
diff --git a/modules/audio_coding/neteq/tools/rtp_file_source.cc b/modules/audio_coding/neteq/tools/rtp_file_source.cc
index 6490d468..6924a7f2 100644
--- a/modules/audio_coding/neteq/tools/rtp_file_source.cc
+++ b/modules/audio_coding/neteq/tools/rtp_file_source.cc
@@ -92,6 +92,10 @@ Packet* RtpFileSource::NextPacket() {
assert(false);
return NULL;
}
+ if (filter_.test(packet->header().payloadType)) {
+ // This payload type should be filtered out. Continue to the next packet.
+ continue;
+ }
return packet.release();
}
return NULL;
diff --git a/modules/audio_device/OWNERS b/modules/audio_device/OWNERS
index 1fe45940..bb11a4ec 100644
--- a/modules/audio_device/OWNERS
+++ b/modules/audio_device/OWNERS
@@ -1,6 +1,7 @@
henrikg@webrtc.org
henrika@webrtc.org
niklas.enbom@webrtc.org
+tkchin@webrtc.org
xians@webrtc.org
per-file *.isolate=kjellander@webrtc.org
diff --git a/modules/audio_processing/aec/Android.mk b/modules/audio_processing/aec/Android.mk
index 181e87d9..b86ef5f9 100644
--- a/modules/audio_processing/aec/Android.mk
+++ b/modules/audio_processing/aec/Android.mk
@@ -53,6 +53,7 @@ include $(BUILD_STATIC_LIBRARY)
ifeq ($(WEBRTC_BUILD_NEON_LIBS),true)
LOCAL_SRC_FILES += \
- aec_core_neon.c
+ aec_core_neon.c \
+ aec_rdft_neon.c
endif # ifeq ($(WEBRTC_BUILD_NEON_LIBS),true)
diff --git a/modules/audio_processing/aec/aec_core.c b/modules/audio_processing/aec/aec_core.c
index 207c6dc3..2fd298c0 100644
--- a/modules/audio_processing/aec/aec_core.c
+++ b/modules/audio_processing/aec/aec_core.c
@@ -415,11 +415,167 @@ static void OverdriveAndSuppress(AecCore* aec,
}
}
+static int PartitionDelay(const AecCore* aec) {
+ // Measures the energy in each filter partition and returns the partition with
+ // highest energy.
+ // TODO(bjornv): Spread computational cost by computing one partition per
+ // block?
+ float wfEnMax = 0;
+ int i;
+ int delay = 0;
+
+ for (i = 0; i < aec->num_partitions; i++) {
+ int j;
+ int pos = i * PART_LEN1;
+ float wfEn = 0;
+ for (j = 0; j < PART_LEN1; j++) {
+ wfEn += aec->wfBuf[0][pos + j] * aec->wfBuf[0][pos + j] +
+ aec->wfBuf[1][pos + j] * aec->wfBuf[1][pos + j];
+ }
+
+ if (wfEn > wfEnMax) {
+ wfEnMax = wfEn;
+ delay = i;
+ }
+ }
+ return delay;
+}
+
+// Threshold to protect against the ill-effects of a zero far-end.
+static const float kMinFarendPSD = 15;
+
+// Updates the following smoothed Power Spectral Densities (PSD):
+// - sd : near-end
+// - se : residual echo
+// - sx : far-end
+// - sde : cross-PSD of near-end and residual echo
+// - sxd : cross-PSD of near-end and far-end
+//
+// In addition to updating the PSDs, also the filter diverge state is determined
+// upon actions are taken.
+static void SmoothedPSD(AecCore* aec,
+ float efw[2][PART_LEN1],
+ float dfw[2][PART_LEN1],
+ float xfw[2][PART_LEN1]) {
+ // Power estimate smoothing coefficients.
+ const float* ptrGCoh = aec->extended_filter_enabled
+ ? kExtendedSmoothingCoefficients[aec->mult - 1]
+ : kNormalSmoothingCoefficients[aec->mult - 1];
+ int i;
+ float sdSum = 0, seSum = 0;
+
+ for (i = 0; i < PART_LEN1; i++) {
+ aec->sd[i] = ptrGCoh[0] * aec->sd[i] +
+ ptrGCoh[1] * (dfw[0][i] * dfw[0][i] + dfw[1][i] * dfw[1][i]);
+ aec->se[i] = ptrGCoh[0] * aec->se[i] +
+ ptrGCoh[1] * (efw[0][i] * efw[0][i] + efw[1][i] * efw[1][i]);
+ // We threshold here to protect against the ill-effects of a zero farend.
+ // The threshold is not arbitrarily chosen, but balances protection and
+ // adverse interaction with the algorithm's tuning.
+ // TODO(bjornv): investigate further why this is so sensitive.
+ aec->sx[i] =
+ ptrGCoh[0] * aec->sx[i] +
+ ptrGCoh[1] * WEBRTC_SPL_MAX(
+ xfw[0][i] * xfw[0][i] + xfw[1][i] * xfw[1][i], kMinFarendPSD);
+
+ aec->sde[i][0] =
+ ptrGCoh[0] * aec->sde[i][0] +
+ ptrGCoh[1] * (dfw[0][i] * efw[0][i] + dfw[1][i] * efw[1][i]);
+ aec->sde[i][1] =
+ ptrGCoh[0] * aec->sde[i][1] +
+ ptrGCoh[1] * (dfw[0][i] * efw[1][i] - dfw[1][i] * efw[0][i]);
+
+ aec->sxd[i][0] =
+ ptrGCoh[0] * aec->sxd[i][0] +
+ ptrGCoh[1] * (dfw[0][i] * xfw[0][i] + dfw[1][i] * xfw[1][i]);
+ aec->sxd[i][1] =
+ ptrGCoh[0] * aec->sxd[i][1] +
+ ptrGCoh[1] * (dfw[0][i] * xfw[1][i] - dfw[1][i] * xfw[0][i]);
+
+ sdSum += aec->sd[i];
+ seSum += aec->se[i];
+ }
+
+ // Divergent filter safeguard.
+ aec->divergeState = (aec->divergeState ? 1.05f : 1.0f) * seSum > sdSum;
+
+ if (aec->divergeState)
+ memcpy(efw, dfw, sizeof(efw[0][0]) * 2 * PART_LEN1);
+
+ // Reset if error is significantly larger than nearend (13 dB).
+ if (!aec->extended_filter_enabled && seSum > (19.95f * sdSum))
+ memset(aec->wfBuf, 0, sizeof(aec->wfBuf));
+}
+
+// Window time domain data to be used by the fft.
+__inline static void WindowData(float* x_windowed, const float* x) {
+ int i;
+ for (i = 0; i < PART_LEN; i++) {
+ x_windowed[i] = x[i] * sqrtHanning[i];
+ x_windowed[PART_LEN + i] = x[PART_LEN + i] * sqrtHanning[PART_LEN - i];
+ }
+}
+
+// Puts fft output data into a complex valued array.
+__inline static void StoreAsComplex(const float* data,
+ float data_complex[2][PART_LEN1]) {
+ int i;
+ data_complex[0][0] = data[0];
+ data_complex[1][0] = 0;
+ for (i = 1; i < PART_LEN; i++) {
+ data_complex[0][i] = data[2 * i];
+ data_complex[1][i] = data[2 * i + 1];
+ }
+ data_complex[0][PART_LEN] = data[1];
+ data_complex[1][PART_LEN] = 0;
+}
+
+static void SubbandCoherence(AecCore* aec,
+ float efw[2][PART_LEN1],
+ float xfw[2][PART_LEN1],
+ float* fft,
+ float* cohde,
+ float* cohxd) {
+ float dfw[2][PART_LEN1];
+ int i;
+
+ if (aec->delayEstCtr == 0)
+ aec->delayIdx = PartitionDelay(aec);
+
+ // Use delayed far.
+ memcpy(xfw,
+ aec->xfwBuf + aec->delayIdx * PART_LEN1,
+ sizeof(xfw[0][0]) * 2 * PART_LEN1);
+
+ // Windowed near fft
+ WindowData(fft, aec->dBuf);
+ aec_rdft_forward_128(fft);
+ StoreAsComplex(fft, dfw);
+
+ // Windowed error fft
+ WindowData(fft, aec->eBuf);
+ aec_rdft_forward_128(fft);
+ StoreAsComplex(fft, efw);
+
+ SmoothedPSD(aec, efw, dfw, xfw);
+
+ // Subband coherence
+ for (i = 0; i < PART_LEN1; i++) {
+ cohde[i] =
+ (aec->sde[i][0] * aec->sde[i][0] + aec->sde[i][1] * aec->sde[i][1]) /
+ (aec->sd[i] * aec->se[i] + 1e-10f);
+ cohxd[i] =
+ (aec->sxd[i][0] * aec->sxd[i][0] + aec->sxd[i][1] * aec->sxd[i][1]) /
+ (aec->sx[i] * aec->sd[i] + 1e-10f);
+ }
+}
+
WebRtcAec_FilterFar_t WebRtcAec_FilterFar;
WebRtcAec_ScaleErrorSignal_t WebRtcAec_ScaleErrorSignal;
WebRtcAec_FilterAdaptation_t WebRtcAec_FilterAdaptation;
WebRtcAec_OverdriveAndSuppress_t WebRtcAec_OverdriveAndSuppress;
WebRtcAec_ComfortNoise_t WebRtcAec_ComfortNoise;
+WebRtcAec_SubbandCoherence_t WebRtcAec_SubbandCoherence;
int WebRtcAec_InitAec(AecCore* aec, int sampFreq) {
int i;
@@ -571,6 +727,7 @@ int WebRtcAec_InitAec(AecCore* aec, int sampFreq) {
WebRtcAec_FilterAdaptation = FilterAdaptation;
WebRtcAec_OverdriveAndSuppress = OverdriveAndSuppress;
WebRtcAec_ComfortNoise = ComfortNoise;
+ WebRtcAec_SubbandCoherence = SubbandCoherence;
#if defined(WEBRTC_ARCH_X86_FAMILY)
if (WebRtc_GetCPUInfo(kSSE2)) {
@@ -1024,12 +1181,12 @@ static void ProcessBlock(AecCore* aec) {
}
static void NonLinearProcessing(AecCore* aec, float* output, float* outputH) {
- float efw[2][PART_LEN1], dfw[2][PART_LEN1], xfw[2][PART_LEN1];
+ float efw[2][PART_LEN1], xfw[2][PART_LEN1];
complex_t comfortNoiseHband[PART_LEN1];
float fft[PART_LEN2];
float scale, dtmp;
float nlpGainHband;
- int i, j, pos;
+ int i;
// Coherence and non-linear filter
float cohde[PART_LEN1], cohxd[PART_LEN1];
@@ -1040,20 +1197,12 @@ static void NonLinearProcessing(AecCore* aec, float* output, float* outputH) {
const float prefBandQuant = 0.75f, prefBandQuantLow = 0.5f;
const int prefBandSize = kPrefBandSize / aec->mult;
const int minPrefBand = 4 / aec->mult;
-
- // Near and error power sums
- float sdSum = 0, seSum = 0;
-
// Power estimate smoothing coefficients.
- const float* ptrGCoh = aec->extended_filter_enabled
- ? kExtendedSmoothingCoefficients[aec->mult - 1]
- : kNormalSmoothingCoefficients[aec->mult - 1];
const float* min_overdrive = aec->extended_filter_enabled
? kExtendedMinOverDrive
: kNormalMinOverDrive;
// Filter energy
- float wfEnMax = 0, wfEn = 0;
const int delayEstInterval = 10 * aec->mult;
float* xfw_ptr = NULL;
@@ -1068,26 +1217,6 @@ static void NonLinearProcessing(AecCore* aec, float* output, float* outputH) {
nlpGainHband = (float)0.0;
dtmp = (float)0.0;
- // Measure energy in each filter partition to determine delay.
- // TODO: Spread by computing one partition per block?
- if (aec->delayEstCtr == 0) {
- wfEnMax = 0;
- aec->delayIdx = 0;
- for (i = 0; i < aec->num_partitions; i++) {
- pos = i * PART_LEN1;
- wfEn = 0;
- for (j = 0; j < PART_LEN1; j++) {
- wfEn += aec->wfBuf[0][pos + j] * aec->wfBuf[0][pos + j] +
- aec->wfBuf[1][pos + j] * aec->wfBuf[1][pos + j];
- }
-
- if (wfEn > wfEnMax) {
- wfEnMax = wfEn;
- aec->delayIdx = i;
- }
- }
- }
-
// We should always have at least one element stored in |far_buf|.
assert(WebRtc_available_read(aec->far_buf_windowed) > 0);
// NLP
@@ -1098,104 +1227,7 @@ static void NonLinearProcessing(AecCore* aec, float* output, float* outputH) {
// Buffer far.
memcpy(aec->xfwBuf, xfw_ptr, sizeof(float) * 2 * PART_LEN1);
- // Use delayed far.
- memcpy(xfw, aec->xfwBuf + aec->delayIdx * PART_LEN1, sizeof(xfw));
-
- // Windowed near fft
- for (i = 0; i < PART_LEN; i++) {
- fft[i] = aec->dBuf[i] * sqrtHanning[i];
- fft[PART_LEN + i] = aec->dBuf[PART_LEN + i] * sqrtHanning[PART_LEN - i];
- }
- aec_rdft_forward_128(fft);
-
- dfw[1][0] = 0;
- dfw[1][PART_LEN] = 0;
- dfw[0][0] = fft[0];
- dfw[0][PART_LEN] = fft[1];
- for (i = 1; i < PART_LEN; i++) {
- dfw[0][i] = fft[2 * i];
- dfw[1][i] = fft[2 * i + 1];
- }
-
- // Windowed error fft
- for (i = 0; i < PART_LEN; i++) {
- fft[i] = aec->eBuf[i] * sqrtHanning[i];
- fft[PART_LEN + i] = aec->eBuf[PART_LEN + i] * sqrtHanning[PART_LEN - i];
- }
- aec_rdft_forward_128(fft);
- efw[1][0] = 0;
- efw[1][PART_LEN] = 0;
- efw[0][0] = fft[0];
- efw[0][PART_LEN] = fft[1];
- for (i = 1; i < PART_LEN; i++) {
- efw[0][i] = fft[2 * i];
- efw[1][i] = fft[2 * i + 1];
- }
-
- // Smoothed PSD
- for (i = 0; i < PART_LEN1; i++) {
- aec->sd[i] = ptrGCoh[0] * aec->sd[i] +
- ptrGCoh[1] * (dfw[0][i] * dfw[0][i] + dfw[1][i] * dfw[1][i]);
- aec->se[i] = ptrGCoh[0] * aec->se[i] +
- ptrGCoh[1] * (efw[0][i] * efw[0][i] + efw[1][i] * efw[1][i]);
- // We threshold here to protect against the ill-effects of a zero farend.
- // The threshold is not arbitrarily chosen, but balances protection and
- // adverse interaction with the algorithm's tuning.
- // TODO: investigate further why this is so sensitive.
- aec->sx[i] =
- ptrGCoh[0] * aec->sx[i] +
- ptrGCoh[1] *
- WEBRTC_SPL_MAX(xfw[0][i] * xfw[0][i] + xfw[1][i] * xfw[1][i], 15);
-
- aec->sde[i][0] =
- ptrGCoh[0] * aec->sde[i][0] +
- ptrGCoh[1] * (dfw[0][i] * efw[0][i] + dfw[1][i] * efw[1][i]);
- aec->sde[i][1] =
- ptrGCoh[0] * aec->sde[i][1] +
- ptrGCoh[1] * (dfw[0][i] * efw[1][i] - dfw[1][i] * efw[0][i]);
-
- aec->sxd[i][0] =
- ptrGCoh[0] * aec->sxd[i][0] +
- ptrGCoh[1] * (dfw[0][i] * xfw[0][i] + dfw[1][i] * xfw[1][i]);
- aec->sxd[i][1] =
- ptrGCoh[0] * aec->sxd[i][1] +
- ptrGCoh[1] * (dfw[0][i] * xfw[1][i] - dfw[1][i] * xfw[0][i]);
-
- sdSum += aec->sd[i];
- seSum += aec->se[i];
- }
-
- // Divergent filter safeguard.
- if (aec->divergeState == 0) {
- if (seSum > sdSum) {
- aec->divergeState = 1;
- }
- } else {
- if (seSum * 1.05f < sdSum) {
- aec->divergeState = 0;
- }
- }
-
- if (aec->divergeState == 1) {
- memcpy(efw, dfw, sizeof(efw));
- }
-
- if (!aec->extended_filter_enabled) {
- // Reset if error is significantly larger than nearend (13 dB).
- if (seSum > (19.95f * sdSum)) {
- memset(aec->wfBuf, 0, sizeof(aec->wfBuf));
- }
- }
-
- // Subband coherence
- for (i = 0; i < PART_LEN1; i++) {
- cohde[i] =
- (aec->sde[i][0] * aec->sde[i][0] + aec->sde[i][1] * aec->sde[i][1]) /
- (aec->sd[i] * aec->se[i] + 1e-10f);
- cohxd[i] =
- (aec->sxd[i][0] * aec->sxd[i][0] + aec->sxd[i][1] * aec->sxd[i][1]) /
- (aec->sx[i] * aec->sd[i] + 1e-10f);
- }
+ WebRtcAec_SubbandCoherence(aec, efw, xfw, fft, cohde, cohxd);
hNlXdAvg = 0;
for (i = minPrefBand; i < prefBandSize + minPrefBand; i++) {
diff --git a/modules/audio_processing/aec/aec_core_internal.h b/modules/audio_processing/aec/aec_core_internal.h
index 1c560f91..372b4274 100644
--- a/modules/audio_processing/aec/aec_core_internal.h
+++ b/modules/audio_processing/aec/aec_core_internal.h
@@ -170,4 +170,12 @@ typedef void (*WebRtcAec_ComfortNoise_t)(AecCore* aec,
const float* lambda);
extern WebRtcAec_ComfortNoise_t WebRtcAec_ComfortNoise;
+typedef void (*WebRtcAec_SubbandCoherence_t)(AecCore* aec,
+ float efw[2][PART_LEN1],
+ float xfw[2][PART_LEN1],
+ float* fft,
+ float* cohde,
+ float* cohxd);
+extern WebRtcAec_SubbandCoherence_t WebRtcAec_SubbandCoherence;
+
#endif // WEBRTC_MODULES_AUDIO_PROCESSING_AEC_AEC_CORE_INTERNAL_H_
diff --git a/modules/audio_processing/aec/aec_rdft.c b/modules/audio_processing/aec/aec_rdft.c
index 7731b37b..5b1c2210 100644
--- a/modules/audio_processing/aec/aec_rdft.c
+++ b/modules/audio_processing/aec/aec_rdft.c
@@ -287,6 +287,9 @@ static void cft1st_128_C(float* a) {
float wk1r, wk1i, wk2r, wk2i, wk3r, wk3i;
float x0r, x0i, x1r, x1i, x2r, x2i, x3r, x3i;
+ // The processing of the first set of elements was simplified in C to avoid
+ // some operations (multiplication by zero or one, addition of two elements
+ // multiplied by the same weight, ...).
x0r = a[0] + a[2];
x0i = a[1] + a[3];
x1r = a[0] - a[2];
@@ -660,6 +663,9 @@ void aec_rdft_init(void) {
#if defined(MIPS_FPU_LE)
aec_rdft_init_mips();
#endif
+#if defined(WEBRTC_DETECT_ARM_NEON) || defined(WEBRTC_ARCH_ARM_NEON)
+ aec_rdft_init_neon();
+#endif
// init library constants.
makewt_32();
makect_32();
diff --git a/modules/audio_processing/aec/aec_rdft.h b/modules/audio_processing/aec/aec_rdft.h
index 795c57d4..94301601 100644
--- a/modules/audio_processing/aec/aec_rdft.h
+++ b/modules/audio_processing/aec/aec_rdft.h
@@ -60,5 +60,8 @@ void aec_rdft_inverse_128(float* a);
#if defined(MIPS_FPU_LE)
void aec_rdft_init_mips(void);
#endif
+#if defined(WEBRTC_DETECT_ARM_NEON) || defined(WEBRTC_ARCH_ARM_NEON)
+void aec_rdft_init_neon(void);
+#endif
#endif // WEBRTC_MODULES_AUDIO_PROCESSING_AEC_MAIN_SOURCE_AEC_RDFT_H_
diff --git a/modules/audio_processing/aec/aec_rdft_neon.c b/modules/audio_processing/aec/aec_rdft_neon.c
new file mode 100644
index 00000000..80892d26
--- /dev/null
+++ b/modules/audio_processing/aec/aec_rdft_neon.c
@@ -0,0 +1,185 @@
+/*
+ * Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+/*
+ * The rdft AEC algorithm, neon version of speed-critical functions.
+ *
+ * Based on the sse2 version.
+ */
+
+
+#include "webrtc/modules/audio_processing/aec/aec_rdft.h"
+
+#include <arm_neon.h>
+
+static const ALIGN16_BEG float ALIGN16_END
+ k_swap_sign[4] = {-1.f, 1.f, -1.f, 1.f};
+
+static void cft1st_128_neon(float* a) {
+ const float32x4_t vec_swap_sign = vld1q_f32((float32_t*)k_swap_sign);
+ int j, k2;
+
+ for (k2 = 0, j = 0; j < 128; j += 16, k2 += 4) {
+ float32x4_t a00v = vld1q_f32(&a[j + 0]);
+ float32x4_t a04v = vld1q_f32(&a[j + 4]);
+ float32x4_t a08v = vld1q_f32(&a[j + 8]);
+ float32x4_t a12v = vld1q_f32(&a[j + 12]);
+ float32x4_t a01v = vcombine_f32(vget_low_f32(a00v), vget_low_f32(a08v));
+ float32x4_t a23v = vcombine_f32(vget_high_f32(a00v), vget_high_f32(a08v));
+ float32x4_t a45v = vcombine_f32(vget_low_f32(a04v), vget_low_f32(a12v));
+ float32x4_t a67v = vcombine_f32(vget_high_f32(a04v), vget_high_f32(a12v));
+ const float32x4_t wk1rv = vld1q_f32(&rdft_wk1r[k2]);
+ const float32x4_t wk1iv = vld1q_f32(&rdft_wk1i[k2]);
+ const float32x4_t wk2rv = vld1q_f32(&rdft_wk2r[k2]);
+ const float32x4_t wk2iv = vld1q_f32(&rdft_wk2i[k2]);
+ const float32x4_t wk3rv = vld1q_f32(&rdft_wk3r[k2]);
+ const float32x4_t wk3iv = vld1q_f32(&rdft_wk3i[k2]);
+ float32x4_t x0v = vaddq_f32(a01v, a23v);
+ const float32x4_t x1v = vsubq_f32(a01v, a23v);
+ const float32x4_t x2v = vaddq_f32(a45v, a67v);
+ const float32x4_t x3v = vsubq_f32(a45v, a67v);
+ const float32x4_t x3w = vrev64q_f32(x3v);
+ float32x4_t x0w;
+ a01v = vaddq_f32(x0v, x2v);
+ x0v = vsubq_f32(x0v, x2v);
+ x0w = vrev64q_f32(x0v);
+ a45v = vmulq_f32(wk2rv, x0v);
+ a45v = vmlaq_f32(a45v, wk2iv, x0w);
+ x0v = vmlaq_f32(x1v, x3w, vec_swap_sign);
+ x0w = vrev64q_f32(x0v);
+ a23v = vmulq_f32(wk1rv, x0v);
+ a23v = vmlaq_f32(a23v, wk1iv, x0w);
+ x0v = vmlsq_f32(x1v, x3w, vec_swap_sign);
+ x0w = vrev64q_f32(x0v);
+ a67v = vmulq_f32(wk3rv, x0v);
+ a67v = vmlaq_f32(a67v, wk3iv, x0w);
+ a00v = vcombine_f32(vget_low_f32(a01v), vget_low_f32(a23v));
+ a04v = vcombine_f32(vget_low_f32(a45v), vget_low_f32(a67v));
+ a08v = vcombine_f32(vget_high_f32(a01v), vget_high_f32(a23v));
+ a12v = vcombine_f32(vget_high_f32(a45v), vget_high_f32(a67v));
+ vst1q_f32(&a[j + 0], a00v);
+ vst1q_f32(&a[j + 4], a04v);
+ vst1q_f32(&a[j + 8], a08v);
+ vst1q_f32(&a[j + 12], a12v);
+ }
+}
+
+static void cftmdl_128_neon(float* a) {
+ int j;
+ const int l = 8;
+ const float32x4_t vec_swap_sign = vld1q_f32((float32_t*)k_swap_sign);
+ float32x4_t wk1rv = vld1q_f32(cftmdl_wk1r);
+
+ for (j = 0; j < l; j += 2) {
+ const float32x2_t a_00 = vld1_f32(&a[j + 0]);
+ const float32x2_t a_08 = vld1_f32(&a[j + 8]);
+ const float32x2_t a_32 = vld1_f32(&a[j + 32]);
+ const float32x2_t a_40 = vld1_f32(&a[j + 40]);
+ const float32x4_t a_00_32 = vcombine_f32(a_00, a_32);
+ const float32x4_t a_08_40 = vcombine_f32(a_08, a_40);
+ const float32x4_t x0r0_0i0_0r1_x0i1 = vaddq_f32(a_00_32, a_08_40);
+ const float32x4_t x1r0_1i0_1r1_x1i1 = vsubq_f32(a_00_32, a_08_40);
+ const float32x2_t a_16 = vld1_f32(&a[j + 16]);
+ const float32x2_t a_24 = vld1_f32(&a[j + 24]);
+ const float32x2_t a_48 = vld1_f32(&a[j + 48]);
+ const float32x2_t a_56 = vld1_f32(&a[j + 56]);
+ const float32x4_t a_16_48 = vcombine_f32(a_16, a_48);
+ const float32x4_t a_24_56 = vcombine_f32(a_24, a_56);
+ const float32x4_t x2r0_2i0_2r1_x2i1 = vaddq_f32(a_16_48, a_24_56);
+ const float32x4_t x3r0_3i0_3r1_x3i1 = vsubq_f32(a_16_48, a_24_56);
+ const float32x4_t xx0 = vaddq_f32(x0r0_0i0_0r1_x0i1, x2r0_2i0_2r1_x2i1);
+ const float32x4_t xx1 = vsubq_f32(x0r0_0i0_0r1_x0i1, x2r0_2i0_2r1_x2i1);
+ const float32x4_t x3i0_3r0_3i1_x3r1 = vrev64q_f32(x3r0_3i0_3r1_x3i1);
+ const float32x4_t x1_x3_add =
+ vmlaq_f32(x1r0_1i0_1r1_x1i1, vec_swap_sign, x3i0_3r0_3i1_x3r1);
+ const float32x4_t x1_x3_sub =
+ vmlsq_f32(x1r0_1i0_1r1_x1i1, vec_swap_sign, x3i0_3r0_3i1_x3r1);
+ const float32x2_t yy0_a = vdup_lane_f32(vget_high_f32(x1_x3_add), 0);
+ const float32x2_t yy0_s = vdup_lane_f32(vget_high_f32(x1_x3_sub), 0);
+ const float32x4_t yy0_as = vcombine_f32(yy0_a, yy0_s);
+ const float32x2_t yy1_a = vdup_lane_f32(vget_high_f32(x1_x3_add), 1);
+ const float32x2_t yy1_s = vdup_lane_f32(vget_high_f32(x1_x3_sub), 1);
+ const float32x4_t yy1_as = vcombine_f32(yy1_a, yy1_s);
+ const float32x4_t yy0 = vmlaq_f32(yy0_as, vec_swap_sign, yy1_as);
+ const float32x4_t yy4 = vmulq_f32(wk1rv, yy0);
+ const float32x4_t xx1_rev = vrev64q_f32(xx1);
+ const float32x4_t yy4_rev = vrev64q_f32(yy4);
+
+ vst1_f32(&a[j + 0], vget_low_f32(xx0));
+ vst1_f32(&a[j + 32], vget_high_f32(xx0));
+ vst1_f32(&a[j + 16], vget_low_f32(xx1));
+ vst1_f32(&a[j + 48], vget_high_f32(xx1_rev));
+
+ a[j + 48] = -a[j + 48];
+
+ vst1_f32(&a[j + 8], vget_low_f32(x1_x3_add));
+ vst1_f32(&a[j + 24], vget_low_f32(x1_x3_sub));
+ vst1_f32(&a[j + 40], vget_low_f32(yy4));
+ vst1_f32(&a[j + 56], vget_high_f32(yy4_rev));
+ }
+
+ {
+ const int k = 64;
+ const int k1 = 2;
+ const int k2 = 2 * k1;
+ const float32x4_t wk2rv = vld1q_f32(&rdft_wk2r[k2 + 0]);
+ const float32x4_t wk2iv = vld1q_f32(&rdft_wk2i[k2 + 0]);
+ const float32x4_t wk1iv = vld1q_f32(&rdft_wk1i[k2 + 0]);
+ const float32x4_t wk3rv = vld1q_f32(&rdft_wk3r[k2 + 0]);
+ const float32x4_t wk3iv = vld1q_f32(&rdft_wk3i[k2 + 0]);
+ wk1rv = vld1q_f32(&rdft_wk1r[k2 + 0]);
+ for (j = k; j < l + k; j += 2) {
+ const float32x2_t a_00 = vld1_f32(&a[j + 0]);
+ const float32x2_t a_08 = vld1_f32(&a[j + 8]);
+ const float32x2_t a_32 = vld1_f32(&a[j + 32]);
+ const float32x2_t a_40 = vld1_f32(&a[j + 40]);
+ const float32x4_t a_00_32 = vcombine_f32(a_00, a_32);
+ const float32x4_t a_08_40 = vcombine_f32(a_08, a_40);
+ const float32x4_t x0r0_0i0_0r1_x0i1 = vaddq_f32(a_00_32, a_08_40);
+ const float32x4_t x1r0_1i0_1r1_x1i1 = vsubq_f32(a_00_32, a_08_40);
+ const float32x2_t a_16 = vld1_f32(&a[j + 16]);
+ const float32x2_t a_24 = vld1_f32(&a[j + 24]);
+ const float32x2_t a_48 = vld1_f32(&a[j + 48]);
+ const float32x2_t a_56 = vld1_f32(&a[j + 56]);
+ const float32x4_t a_16_48 = vcombine_f32(a_16, a_48);
+ const float32x4_t a_24_56 = vcombine_f32(a_24, a_56);
+ const float32x4_t x2r0_2i0_2r1_x2i1 = vaddq_f32(a_16_48, a_24_56);
+ const float32x4_t x3r0_3i0_3r1_x3i1 = vsubq_f32(a_16_48, a_24_56);
+ const float32x4_t xx = vaddq_f32(x0r0_0i0_0r1_x0i1, x2r0_2i0_2r1_x2i1);
+ const float32x4_t xx1 = vsubq_f32(x0r0_0i0_0r1_x0i1, x2r0_2i0_2r1_x2i1);
+ const float32x4_t x3i0_3r0_3i1_x3r1 = vrev64q_f32(x3r0_3i0_3r1_x3i1);
+ const float32x4_t x1_x3_add =
+ vmlaq_f32(x1r0_1i0_1r1_x1i1, vec_swap_sign, x3i0_3r0_3i1_x3r1);
+ const float32x4_t x1_x3_sub =
+ vmlsq_f32(x1r0_1i0_1r1_x1i1, vec_swap_sign, x3i0_3r0_3i1_x3r1);
+ float32x4_t xx4 = vmulq_f32(wk2rv, xx1);
+ float32x4_t xx12 = vmulq_f32(wk1rv, x1_x3_add);
+ float32x4_t xx22 = vmulq_f32(wk3rv, x1_x3_sub);
+ xx4 = vmlaq_f32(xx4, wk2iv, vrev64q_f32(xx1));
+ xx12 = vmlaq_f32(xx12, wk1iv, vrev64q_f32(x1_x3_add));
+ xx22 = vmlaq_f32(xx22, wk3iv, vrev64q_f32(x1_x3_sub));
+
+ vst1_f32(&a[j + 0], vget_low_f32(xx));
+ vst1_f32(&a[j + 32], vget_high_f32(xx));
+ vst1_f32(&a[j + 16], vget_low_f32(xx4));
+ vst1_f32(&a[j + 48], vget_high_f32(xx4));
+ vst1_f32(&a[j + 8], vget_low_f32(xx12));
+ vst1_f32(&a[j + 40], vget_high_f32(xx12));
+ vst1_f32(&a[j + 24], vget_low_f32(xx22));
+ vst1_f32(&a[j + 56], vget_high_f32(xx22));
+ }
+ }
+}
+
+void aec_rdft_init_neon(void) {
+ cft1st_128 = cft1st_128_neon;
+ cftmdl_128 = cftmdl_128_neon;
+}
+
diff --git a/modules/audio_processing/audio_processing.gypi b/modules/audio_processing/audio_processing.gypi
index b1d18c5b..98d437f8 100644
--- a/modules/audio_processing/audio_processing.gypi
+++ b/modules/audio_processing/audio_processing.gypi
@@ -200,6 +200,7 @@
],
'sources': [
'aec/aec_core_neon.c',
+ 'aec/aec_rdft_neon.c',
'aecm/aecm_core_neon.c',
'ns/nsx_core_neon.c',
],
diff --git a/modules/audio_processing/include/audio_processing.h b/modules/audio_processing/include/audio_processing.h
index 77c3f3ad..1e494d18 100644
--- a/modules/audio_processing/include/audio_processing.h
+++ b/modules/audio_processing/include/audio_processing.h
@@ -73,6 +73,14 @@ struct ExperimentalAgc {
bool enabled;
};
+// Use to enable experimental noise suppression. It can be set in the
+// constructor or using AudioProcessing::SetExtraOptions().
+struct ExperimentalNs {
+ ExperimentalNs() : enabled(false) {}
+ explicit ExperimentalNs(bool enabled) : enabled(enabled) {}
+ bool enabled;
+};
+
static const int kAudioProcMaxNativeSampleRateHz = 32000;
// The Audio Processing Module (APM) provides a collection of voice processing
diff --git a/modules/audio_processing/test/process_test.cc b/modules/audio_processing/test/process_test.cc
index a36a072c..05f4b77b 100644
--- a/modules/audio_processing/test/process_test.cc
+++ b/modules/audio_processing/test/process_test.cc
@@ -400,7 +400,9 @@ void void_main(int argc, char* argv[]) {
vad_out_filename = argv[i];
} else if (strcmp(argv[i], "-expns") == 0) {
- ASSERT_EQ(apm->kNoError, apm->EnableExperimentalNs(true));
+ Config config;
+ config.Set<ExperimentalNs>(new ExperimentalNs(true));
+ apm->SetExtraOptions(config);
} else if (strcmp(argv[i], "--noasm") == 0) {
WebRtc_GetCPUInfo = WebRtc_GetCPUInfoNoASM;
diff --git a/modules/desktop_capture/BUILD.gn b/modules/desktop_capture/BUILD.gn
index 0ccb18a1..bb219e2b 100644
--- a/modules/desktop_capture/BUILD.gn
+++ b/modules/desktop_capture/BUILD.gn
@@ -6,14 +6,121 @@
# in the file PATENTS. All contributing project authors may
# be found in the AUTHORS file in the root of the source tree.
+import("//build/config/ui.gni")
import("../../build/webrtc.gni")
+use_desktop_capture_differ_sse2 =
+ (!is_ios && (cpu_arch == "x86" || cpu_arch == "x64"))
+
source_set("desktop_capture") {
- # TODO(jiayl): Implement.
+ sources = [
+ "desktop_and_cursor_composer.cc",
+ "desktop_and_cursor_composer.h",
+ "desktop_capture_types.h",
+ "desktop_capturer.h",
+ "desktop_frame.cc",
+ "desktop_frame.h",
+ "desktop_frame_win.cc",
+ "desktop_frame_win.h",
+ "desktop_geometry.cc",
+ "desktop_geometry.h",
+ "desktop_capture_options.h",
+ "desktop_capture_options.cc",
+ "desktop_capturer.h",
+ "desktop_region.cc",
+ "desktop_region.h",
+ "differ.cc",
+ "differ.h",
+ "differ_block.cc",
+ "differ_block.h",
+ "mac/desktop_configuration.h",
+ "mac/desktop_configuration.mm",
+ "mac/desktop_configuration_monitor.h",
+ "mac/desktop_configuration_monitor.cc",
+ "mac/osx_version.h",
+ "mac/osx_version.cc",
+ "mac/scoped_pixel_buffer_object.cc",
+ "mac/scoped_pixel_buffer_object.h",
+ "mouse_cursor.cc",
+ "mouse_cursor.h",
+ "mouse_cursor_monitor.h",
+ "mouse_cursor_monitor_mac.mm",
+ "mouse_cursor_monitor_win.cc",
+ "mouse_cursor_shape.h",
+ "screen_capture_frame_queue.cc",
+ "screen_capture_frame_queue.h",
+ "screen_capturer.cc",
+ "screen_capturer.h",
+ "screen_capturer_helper.cc",
+ "screen_capturer_helper.h",
+ "screen_capturer_mac.mm",
+ "screen_capturer_win.cc",
+ "shared_desktop_frame.cc",
+ "shared_desktop_frame.h",
+ "shared_memory.cc",
+ "shared_memory.h",
+ "win/cursor.cc",
+ "win/cursor.h",
+ "win/desktop.cc",
+ "win/desktop.h",
+ "win/scoped_gdi_object.h",
+ "win/scoped_thread_desktop.cc",
+ "win/scoped_thread_desktop.h",
+ "win/screen_capturer_win_gdi.cc",
+ "win/screen_capturer_win_gdi.h",
+ "win/screen_capturer_win_magnifier.cc",
+ "win/screen_capturer_win_magnifier.h",
+ "win/screen_capture_utils.cc",
+ "win/screen_capture_utils.h",
+ "win/window_capture_utils.cc",
+ "win/window_capture_utils.h",
+ "window_capturer.cc",
+ "window_capturer.h",
+ "window_capturer_mac.mm",
+ "window_capturer_win.cc",
+ ]
+
+ if (use_x11) {
+ sources += [
+ "mouse_cursor_monitor_x11.cc",
+ "screen_capturer_x11.cc",
+ "window_capturer_x11.cc",
+ "x11/shared_x_display.h",
+ "x11/shared_x_display.cc",
+ "x11/x_error_trap.cc",
+ "x11/x_error_trap.h",
+ "x11/x_server_pixel_buffer.cc",
+ "x11/x_server_pixel_buffer.h",
+ ]
+ configs += ["//build/config/linux:x11"]
+ }
+
+ if (!is_win && !is_mac && !use_x11) {
+ sources += [
+ "mouse_cursor_monitor_null.cc",
+ "screen_capturer_null.cc",
+ "window_capturer_null.cc",
+ ]
+ }
+
+ deps = ["../../system_wrappers"]
+
+ if (use_desktop_capture_differ_sse2) {
+ deps += [":desktop_capture_differ_sse2"]
+ }
}
-if (!is_ios && (cpu_arch == "x86" || cpu_arch == "x64")) {
+if (use_desktop_capture_differ_sse2) {
+ # Have to be compiled as a separate target because it needs to be compiled
+ # with SSE2 enabled.
source_set("desktop_capture_differ_sse2") {
- # TODO(jiayl): Implement.
+ sources = [
+ "differ_block_sse2.cc",
+ "differ_block_sse2.h",
+ ]
+
+ if (is_posix && !is_mac) {
+ cflags = ["-msse2"]
+ }
}
}
diff --git a/modules/modules.gyp b/modules/modules.gyp
index 8dec125b..d054fe9e 100644
--- a/modules/modules.gyp
+++ b/modules/modules.gyp
@@ -68,6 +68,7 @@
'<@(audio_coding_defines)',
],
'dependencies': [
+ 'acm_receive_test',
'audio_coding_module',
'audio_processing',
'bitrate_controller',
diff --git a/modules/remote_bitrate_estimator/include/remote_bitrate_estimator.h b/modules/remote_bitrate_estimator/include/remote_bitrate_estimator.h
index 7dc4f270..e61e9035 100644
--- a/modules/remote_bitrate_estimator/include/remote_bitrate_estimator.h
+++ b/modules/remote_bitrate_estimator/include/remote_bitrate_estimator.h
@@ -71,6 +71,7 @@ class RemoteBitrateEstimator : public CallStatsObserver, public Module {
// estimate and the over-use detector. If an over-use is detected the
// remote bitrate estimate will be updated. Note that |payload_size| is the
// packet size excluding headers.
+ // Note that |arrival_time_ms| can be of an arbitrary time base.
virtual void IncomingPacket(int64_t arrival_time_ms,
int payload_size,
const RTPHeader& header) = 0;
diff --git a/modules/remote_bitrate_estimator/overuse_detector.cc b/modules/remote_bitrate_estimator/overuse_detector.cc
index 56a6baa8..9baaa9c9 100644
--- a/modules/remote_bitrate_estimator/overuse_detector.cc
+++ b/modules/remote_bitrate_estimator/overuse_detector.cc
@@ -36,8 +36,7 @@ OveruseDetector::OveruseDetector(const OverUseDetectorOptions& options)
prev_offset_(0.0),
time_over_using_(-1),
over_use_counter_(0),
- hypothesis_(kBwNormal),
- time_of_last_received_packet_(-1) {
+ hypothesis_(kBwNormal) {
memcpy(E_, options_.initial_e, sizeof(E_));
memcpy(process_noise_, options_.initial_process_noise,
sizeof(process_noise_));
@@ -50,8 +49,7 @@ OveruseDetector::~OveruseDetector() {
void OveruseDetector::Update(uint16_t packet_size,
int64_t timestamp_ms,
uint32_t timestamp,
- const int64_t now_ms) {
- time_of_last_received_packet_ = now_ms;
+ const int64_t arrival_time_ms) {
bool new_timestamp = (timestamp != current_frame_.timestamp);
if (timestamp_ms >= 0) {
if (prev_frame_.timestamp_ms == -1 && current_frame_.timestamp_ms == -1) {
@@ -82,7 +80,7 @@ void OveruseDetector::Update(uint16_t packet_size,
}
// Accumulate the frame size
current_frame_.size += packet_size;
- current_frame_.complete_time_ms = now_ms;
+ current_frame_.complete_time_ms = arrival_time_ms;
}
BandwidthUsage OveruseDetector::State() const {
@@ -107,10 +105,6 @@ void OveruseDetector::SetRateControlRegion(RateControlRegion region) {
}
}
-int64_t OveruseDetector::time_of_last_received_packet() const {
- return time_of_last_received_packet_;
-}
-
void OveruseDetector::SwitchTimeBase() {
current_frame_.size = 0;
current_frame_.complete_time_ms = -1;
diff --git a/modules/remote_bitrate_estimator/overuse_detector.h b/modules/remote_bitrate_estimator/overuse_detector.h
index a7e59cc6..9c565e45 100644
--- a/modules/remote_bitrate_estimator/overuse_detector.h
+++ b/modules/remote_bitrate_estimator/overuse_detector.h
@@ -28,11 +28,10 @@ class OveruseDetector {
void Update(uint16_t packet_size,
int64_t timestamp_ms,
uint32_t rtp_timestamp,
- int64_t now_ms);
+ int64_t arrival_time_ms);
BandwidthUsage State() const;
double NoiseVar() const;
void SetRateControlRegion(RateControlRegion region);
- int64_t time_of_last_received_packet() const;
private:
struct FrameSample {
@@ -89,7 +88,6 @@ class OveruseDetector {
double time_over_using_;
uint16_t over_use_counter_;
BandwidthUsage hypothesis_;
- int64_t time_of_last_received_packet_;
};
} // namespace webrtc
diff --git a/modules/remote_bitrate_estimator/remote_bitrate_estimator_single_stream.cc b/modules/remote_bitrate_estimator/remote_bitrate_estimator_single_stream.cc
index 577912eb..08422d28 100644
--- a/modules/remote_bitrate_estimator/remote_bitrate_estimator_single_stream.cc
+++ b/modules/remote_bitrate_estimator/remote_bitrate_estimator_single_stream.cc
@@ -59,10 +59,27 @@ class RemoteBitrateEstimatorSingleStream : public RemoteBitrateEstimator {
ReceiveBandwidthEstimatorStats* output) const OVERRIDE;
private:
- typedef std::map<unsigned int, OveruseDetector> SsrcOveruseDetectorMap;
+ // Map from SSRC to over-use detector and last incoming packet time in
+ // milliseconds, taken from clock_.
+ typedef std::map<unsigned int, std::pair<OveruseDetector, int64_t> >
+ SsrcOveruseDetectorMap;
+
+ static OveruseDetector* GetDetector(
+ const SsrcOveruseDetectorMap::iterator it) {
+ return &it->second.first;
+ }
+
+ static int64_t GetPacketTimeMs(const SsrcOveruseDetectorMap::iterator it) {
+ return it->second.second;
+ }
+
+ static void SetPacketTimeMs(SsrcOveruseDetectorMap::iterator it,
+ int64_t time_ms) {
+ it->second.second = time_ms;
+ }
// Triggers a new estimate calculation.
- void UpdateEstimate(int64_t time_now);
+ void UpdateEstimate(int64_t now_ms);
void GetSsrcs(std::vector<unsigned int>* ssrcs) const;
@@ -95,6 +112,7 @@ void RemoteBitrateEstimatorSingleStream::IncomingPacket(
uint32_t ssrc = header.ssrc;
uint32_t rtp_timestamp = header.timestamp +
header.extension.transmissionTimeOffset;
+ int64_t now_ms = clock_->TimeInMilliseconds();
CriticalSectionScoped cs(crit_sect_.get());
SsrcOveruseDetectorMap::iterator it = overuse_detectors_.find(ssrc);
if (it == overuse_detectors_.end()) {
@@ -105,22 +123,23 @@ void RemoteBitrateEstimatorSingleStream::IncomingPacket(
// automatically cleaned up when we have one RemoteBitrateEstimator per REMB
// group.
std::pair<SsrcOveruseDetectorMap::iterator, bool> insert_result =
- overuse_detectors_.insert(std::make_pair(ssrc, OveruseDetector(
- OverUseDetectorOptions())));
+ overuse_detectors_.insert(std::make_pair(ssrc,
+ std::make_pair(OveruseDetector(OverUseDetectorOptions()), now_ms)));
it = insert_result.first;
}
- OveruseDetector* overuse_detector = &it->second;
- incoming_bitrate_.Update(payload_size, arrival_time_ms);
+ SetPacketTimeMs(it, now_ms);
+ OveruseDetector* overuse_detector = GetDetector(it);
+ incoming_bitrate_.Update(payload_size, now_ms);
const BandwidthUsage prior_state = overuse_detector->State();
overuse_detector->Update(payload_size, -1, rtp_timestamp, arrival_time_ms);
if (overuse_detector->State() == kBwOverusing) {
- unsigned int incoming_bitrate = incoming_bitrate_.Rate(arrival_time_ms);
+ unsigned int incoming_bitrate = incoming_bitrate_.Rate(now_ms);
if (prior_state != kBwOverusing ||
- remote_rate_.TimeToReduceFurther(arrival_time_ms, incoming_bitrate)) {
+ remote_rate_.TimeToReduceFurther(now_ms, incoming_bitrate)) {
// The first overuse should immediately trigger a new estimate.
// We also have to update the estimate immediately if we are overusing
// and the target bitrate is too high compared to what we are receiving.
- UpdateEstimate(arrival_time_ms);
+ UpdateEstimate(now_ms);
}
}
}
@@ -129,8 +148,9 @@ int32_t RemoteBitrateEstimatorSingleStream::Process() {
if (TimeUntilNextProcess() > 0) {
return 0;
}
- UpdateEstimate(clock_->TimeInMilliseconds());
- last_process_time_ = clock_->TimeInMilliseconds();
+ int64_t now_ms = clock_->TimeInMilliseconds();
+ UpdateEstimate(now_ms);
+ last_process_time_ = now_ms;
return 0;
}
@@ -141,25 +161,24 @@ int32_t RemoteBitrateEstimatorSingleStream::TimeUntilNextProcess() {
return last_process_time_ + kProcessIntervalMs - clock_->TimeInMilliseconds();
}
-void RemoteBitrateEstimatorSingleStream::UpdateEstimate(int64_t time_now) {
+void RemoteBitrateEstimatorSingleStream::UpdateEstimate(int64_t now_ms) {
CriticalSectionScoped cs(crit_sect_.get());
BandwidthUsage bw_state = kBwNormal;
double sum_noise_var = 0.0;
SsrcOveruseDetectorMap::iterator it = overuse_detectors_.begin();
while (it != overuse_detectors_.end()) {
- const int64_t time_of_last_received_packet =
- it->second.time_of_last_received_packet();
- if (time_of_last_received_packet >= 0 &&
- time_now - time_of_last_received_packet > kStreamTimeOutMs) {
+ if (GetPacketTimeMs(it) >= 0 &&
+ now_ms - GetPacketTimeMs(it) > kStreamTimeOutMs) {
// This over-use detector hasn't received packets for |kStreamTimeOutMs|
// milliseconds and is considered stale.
overuse_detectors_.erase(it++);
} else {
- sum_noise_var += it->second.NoiseVar();
+ OveruseDetector* overuse_detector = GetDetector(it);
+ sum_noise_var += overuse_detector->NoiseVar();
// Make sure that we trigger an over-use if any of the over-use detectors
// is detecting over-use.
- if (it->second.State() > bw_state) {
- bw_state = it->second.State();
+ if (overuse_detector->State() > bw_state) {
+ bw_state = overuse_detector->State();
}
++it;
}
@@ -172,17 +191,17 @@ void RemoteBitrateEstimatorSingleStream::UpdateEstimate(int64_t time_now) {
double mean_noise_var = sum_noise_var /
static_cast<double>(overuse_detectors_.size());
const RateControlInput input(bw_state,
- incoming_bitrate_.Rate(time_now),
+ incoming_bitrate_.Rate(now_ms),
mean_noise_var);
- const RateControlRegion region = remote_rate_.Update(&input, time_now);
- unsigned int target_bitrate = remote_rate_.UpdateBandwidthEstimate(time_now);
+ const RateControlRegion region = remote_rate_.Update(&input, now_ms);
+ unsigned int target_bitrate = remote_rate_.UpdateBandwidthEstimate(now_ms);
if (remote_rate_.ValidEstimate()) {
std::vector<unsigned int> ssrcs;
GetSsrcs(&ssrcs);
observer_->OnReceiveBitrateChanged(ssrcs, target_bitrate);
}
for (it = overuse_detectors_.begin(); it != overuse_detectors_.end(); ++it) {
- it->second.SetRateControlRegion(region);
+ GetDetector(it)->SetRateControlRegion(region);
}
}
diff --git a/modules/remote_bitrate_estimator/remote_bitrate_estimator_unittest_helper.cc b/modules/remote_bitrate_estimator/remote_bitrate_estimator_unittest_helper.cc
index dc30d933..1b38a1ea 100644
--- a/modules/remote_bitrate_estimator/remote_bitrate_estimator_unittest_helper.cc
+++ b/modules/remote_bitrate_estimator/remote_bitrate_estimator_unittest_helper.cc
@@ -226,7 +226,8 @@ void RemoteBitrateEstimatorTest::IncomingPacket(uint32_t ssrc,
header.ssrc = ssrc;
header.timestamp = rtp_timestamp;
header.extension.absoluteSendTime = absolute_send_time;
- bitrate_estimator_->IncomingPacket(arrival_time, payload_size, header);
+ bitrate_estimator_->IncomingPacket(arrival_time + kArrivalTimeClockOffsetMs,
+ payload_size, header);
}
// Generates a frame of packets belonging to a stream at a given bitrate and
@@ -245,6 +246,10 @@ bool RemoteBitrateEstimatorTest::GenerateAndProcessFrame(unsigned int ssrc,
while (!packets.empty()) {
testing::RtpStream::RtpPacket* packet = packets.front();
bitrate_observer_->Reset();
+ // The simulated clock should match the time of packet->arrival_time
+ // since both are used in IncomingPacket().
+ clock_.AdvanceTimeMicroseconds(packet->arrival_time -
+ clock_.TimeInMicroseconds());
IncomingPacket(packet->ssrc,
packet->size,
(packet->arrival_time + 500) / 1000,
@@ -256,8 +261,6 @@ bool RemoteBitrateEstimatorTest::GenerateAndProcessFrame(unsigned int ssrc,
overuse = true;
EXPECT_LE(bitrate_observer_->latest_bitrate(), bitrate_bps);
}
- clock_.AdvanceTimeMicroseconds(packet->arrival_time -
- clock_.TimeInMicroseconds());
delete packet;
packets.pop_front();
}
diff --git a/modules/remote_bitrate_estimator/remote_bitrate_estimator_unittest_helper.h b/modules/remote_bitrate_estimator/remote_bitrate_estimator_unittest_helper.h
index 14cfc31c..1d748c57 100644
--- a/modules/remote_bitrate_estimator/remote_bitrate_estimator_unittest_helper.h
+++ b/modules/remote_bitrate_estimator/remote_bitrate_estimator_unittest_helper.h
@@ -198,6 +198,7 @@ class RemoteBitrateEstimatorTest : public ::testing::Test {
unsigned int expected_bitrate_drop_delta);
static const unsigned int kDefaultSsrc;
+ static const int kArrivalTimeClockOffsetMs = 60000;
SimulatedClock clock_; // Time at the receiver.
scoped_ptr<testing::TestBitrateObserver> bitrate_observer_;
diff --git a/modules/video_capture/OWNERS b/modules/video_capture/OWNERS
index ba897650..cc79c075 100644
--- a/modules/video_capture/OWNERS
+++ b/modules/video_capture/OWNERS
@@ -3,6 +3,7 @@ glaznev@webrtc.org
mallinath@webrtc.org
mflodman@webrtc.org
perkj@webrtc.org
+tkchin@webrtc.org
wu@webrtc.org
per-file *.isolate=kjellander@webrtc.org
diff --git a/modules/video_coding/main/source/media_optimization.cc b/modules/video_coding/main/source/media_optimization.cc
index 4dc72253..0d9a4bdf 100644
--- a/modules/video_coding/main/source/media_optimization.cc
+++ b/modules/video_coding/main/source/media_optimization.cc
@@ -75,7 +75,8 @@ struct MediaOptimization::EncodedFrameSample {
};
MediaOptimization::MediaOptimization(Clock* clock)
- : clock_(clock),
+ : crit_sect_(CriticalSectionWrapper::CreateCriticalSection()),
+ clock_(clock),
max_bit_rate_(0),
send_codec_type_(kVideoCodecUnknown),
codec_width_(0),
@@ -113,7 +114,9 @@ MediaOptimization::~MediaOptimization(void) {
}
void MediaOptimization::Reset() {
- SetEncodingData(kVideoCodecUnknown, 0, 0, 0, 0, 0, 0, max_payload_size_);
+ CriticalSectionScoped lock(crit_sect_.get());
+ SetEncodingDataInternal(
+ kVideoCodecUnknown, 0, 0, 0, 0, 0, 0, max_payload_size_);
memset(incoming_frame_times_, -1, sizeof(incoming_frame_times_));
incoming_frame_rate_ = 0.0;
frame_dropper_->Reset();
@@ -145,6 +148,25 @@ void MediaOptimization::SetEncodingData(VideoCodecType send_codec_type,
uint16_t height,
int num_layers,
int32_t mtu) {
+ CriticalSectionScoped lock(crit_sect_.get());
+ SetEncodingDataInternal(send_codec_type,
+ max_bit_rate,
+ frame_rate,
+ target_bitrate,
+ width,
+ height,
+ num_layers,
+ mtu);
+}
+
+void MediaOptimization::SetEncodingDataInternal(VideoCodecType send_codec_type,
+ int32_t max_bit_rate,
+ uint32_t frame_rate,
+ uint32_t target_bitrate,
+ uint16_t width,
+ uint16_t height,
+ int num_layers,
+ int32_t mtu) {
// Everything codec specific should be reset here since this means the codec
// has changed. If native dimension values have changed, then either user
// initiated change, or QM initiated change. Will be able to determine only
@@ -181,6 +203,7 @@ uint32_t MediaOptimization::SetTargetRates(
uint32_t round_trip_time_ms,
VCMProtectionCallback* protection_callback,
VCMQMSettingsCallback* qmsettings_callback) {
+ CriticalSectionScoped lock(crit_sect_.get());
// TODO(holmer): Consider putting this threshold only on the video bitrate,
// and not on protection.
if (max_bit_rate_ > 0 &&
@@ -194,7 +217,7 @@ uint32_t MediaOptimization::SetTargetRates(
loss_prot_logic_->UpdateResidualPacketLoss(static_cast<float>(fraction_lost));
// Get frame rate for encoder: this is the actual/sent frame rate.
- float actual_frame_rate = SentFrameRate();
+ float actual_frame_rate = SentFrameRateInternal();
// Sanity check.
if (actual_frame_rate < 1.0) {
@@ -297,6 +320,7 @@ uint32_t MediaOptimization::SetTargetRates(
void MediaOptimization::EnableProtectionMethod(bool enable,
VCMProtectionMethodEnum method) {
+ CriticalSectionScoped lock(crit_sect_.get());
bool updated = false;
if (enable) {
updated = loss_prot_logic_->SetMethod(method);
@@ -309,17 +333,28 @@ void MediaOptimization::EnableProtectionMethod(bool enable,
}
uint32_t MediaOptimization::InputFrameRate() {
+ CriticalSectionScoped lock(crit_sect_.get());
+ return InputFrameRateInternal();
+}
+
+uint32_t MediaOptimization::InputFrameRateInternal() {
ProcessIncomingFrameRate(clock_->TimeInMilliseconds());
return uint32_t(incoming_frame_rate_ + 0.5f);
}
uint32_t MediaOptimization::SentFrameRate() {
+ CriticalSectionScoped lock(crit_sect_.get());
+ return SentFrameRateInternal();
+}
+
+uint32_t MediaOptimization::SentFrameRateInternal() {
PurgeOldFrameSamples(clock_->TimeInMilliseconds());
UpdateSentFramerate();
return avg_sent_framerate_;
}
uint32_t MediaOptimization::SentBitRate() {
+ CriticalSectionScoped lock(crit_sect_.get());
const int64_t now_ms = clock_->TimeInMilliseconds();
PurgeOldFrameSamples(now_ms);
UpdateSentBitrate(now_ms);
@@ -327,6 +362,7 @@ uint32_t MediaOptimization::SentBitRate() {
}
VCMFrameCount MediaOptimization::SentFrameCount() {
+ CriticalSectionScoped lock(crit_sect_.get());
VCMFrameCount count;
count.numDeltaFrames = delta_frame_cnt_;
count.numKeyFrames = key_frame_cnt_;
@@ -336,6 +372,7 @@ VCMFrameCount MediaOptimization::SentFrameCount() {
int32_t MediaOptimization::UpdateWithEncodedData(int encoded_length,
uint32_t timestamp,
FrameType encoded_frame_type) {
+ CriticalSectionScoped lock(crit_sect_.get());
const int64_t now_ms = clock_->TimeInMilliseconds();
PurgeOldFrameSamples(now_ms);
if (encoded_frame_samples_.size() > 0 &&
@@ -386,22 +423,55 @@ int32_t MediaOptimization::UpdateWithEncodedData(int encoded_length,
return VCM_OK;
}
-void MediaOptimization::EnableQM(bool enable) { enable_qm_ = enable; }
+void MediaOptimization::EnableQM(bool enable) {
+ CriticalSectionScoped lock(crit_sect_.get());
+ enable_qm_ = enable;
+}
void MediaOptimization::EnableFrameDropper(bool enable) {
+ CriticalSectionScoped lock(crit_sect_.get());
frame_dropper_->Enable(enable);
}
+void MediaOptimization::SuspendBelowMinBitrate(int threshold_bps,
+ int window_bps) {
+ CriticalSectionScoped lock(crit_sect_.get());
+ assert(threshold_bps > 0 && window_bps >= 0);
+ suspension_threshold_bps_ = threshold_bps;
+ suspension_window_bps_ = window_bps;
+ suspension_enabled_ = true;
+ video_suspended_ = false;
+}
+
+bool MediaOptimization::IsVideoSuspended() const {
+ CriticalSectionScoped lock(crit_sect_.get());
+ return video_suspended_;
+}
+
bool MediaOptimization::DropFrame() {
+ CriticalSectionScoped lock(crit_sect_.get());
UpdateIncomingFrameRate();
// Leak appropriate number of bytes.
- frame_dropper_->Leak((uint32_t)(InputFrameRate() + 0.5f));
+ frame_dropper_->Leak((uint32_t)(InputFrameRateInternal() + 0.5f));
if (video_suspended_) {
return true; // Drop all frames when muted.
}
return frame_dropper_->DropFrame();
}
+void MediaOptimization::UpdateContentData(
+ const VideoContentMetrics* content_metrics) {
+ CriticalSectionScoped lock(crit_sect_.get());
+ // Updating content metrics.
+ if (content_metrics == NULL) {
+ // Disable QM if metrics are NULL.
+ enable_qm_ = false;
+ qm_resolution_->Reset();
+ } else {
+ content_->UpdateContentData(content_metrics);
+ }
+}
+
void MediaOptimization::UpdateIncomingFrameRate() {
int64_t now = clock_->TimeInMilliseconds();
if (incoming_frame_times_[0] == 0) {
@@ -416,18 +486,6 @@ void MediaOptimization::UpdateIncomingFrameRate() {
ProcessIncomingFrameRate(now);
}
-void MediaOptimization::UpdateContentData(
- const VideoContentMetrics* content_metrics) {
- // Updating content metrics.
- if (content_metrics == NULL) {
- // Disable QM if metrics are NULL.
- enable_qm_ = false;
- qm_resolution_->Reset();
- } else {
- content_->UpdateContentData(content_metrics);
- }
-}
-
int32_t MediaOptimization::SelectQuality(
VCMQMSettingsCallback* video_qmsettings_callback) {
// Reset quantities for QM select.
@@ -458,17 +516,6 @@ int32_t MediaOptimization::SelectQuality(
return VCM_OK;
}
-void MediaOptimization::SuspendBelowMinBitrate(int threshold_bps,
- int window_bps) {
- assert(threshold_bps > 0 && window_bps >= 0);
- suspension_threshold_bps_ = threshold_bps;
- suspension_window_bps_ = window_bps;
- suspension_enabled_ = true;
- video_suspended_ = false;
-}
-
-bool MediaOptimization::IsVideoSuspended() const { return video_suspended_; }
-
void MediaOptimization::PurgeOldFrameSamples(int64_t now_ms) {
while (!encoded_frame_samples_.empty()) {
if (now_ms - encoded_frame_samples_.front().time_complete_ms >
diff --git a/modules/video_coding/main/source/media_optimization.h b/modules/video_coding/main/source/media_optimization.h
index 35a49712..df3fbb64 100644
--- a/modules/video_coding/main/source/media_optimization.h
+++ b/modules/video_coding/main/source/media_optimization.h
@@ -17,6 +17,7 @@
#include "webrtc/modules/video_coding/main/interface/video_coding.h"
#include "webrtc/modules/video_coding/main/source/media_opt_util.h"
#include "webrtc/modules/video_coding/main/source/qm_select.h"
+#include "webrtc/system_wrappers/interface/critical_section_wrapper.h"
#include "webrtc/system_wrappers/interface/scoped_ptr.h"
namespace webrtc {
@@ -28,7 +29,6 @@ class VCMContentMetricsProcessing;
namespace media_optimization {
-// TODO(andresp): Make thread safe.
class MediaOptimization {
public:
explicit MediaOptimization(Clock* clock);
@@ -100,59 +100,80 @@ class MediaOptimization {
struct EncodedFrameSample;
typedef std::list<EncodedFrameSample> FrameSampleList;
- void UpdateIncomingFrameRate();
- void PurgeOldFrameSamples(int64_t now_ms);
- void UpdateSentBitrate(int64_t now_ms);
- void UpdateSentFramerate();
+ void UpdateIncomingFrameRate() EXCLUSIVE_LOCKS_REQUIRED(crit_sect_);
+ void PurgeOldFrameSamples(int64_t now_ms)
+ EXCLUSIVE_LOCKS_REQUIRED(crit_sect_);
+ void UpdateSentBitrate(int64_t now_ms) EXCLUSIVE_LOCKS_REQUIRED(crit_sect_);
+ void UpdateSentFramerate() EXCLUSIVE_LOCKS_REQUIRED(crit_sect_);
// Computes new Quality Mode.
- int32_t SelectQuality(VCMQMSettingsCallback* qmsettings_callback);
+ int32_t SelectQuality(VCMQMSettingsCallback* qmsettings_callback)
+ EXCLUSIVE_LOCKS_REQUIRED(crit_sect_);
// Verifies if QM settings differ from default, i.e. if an update is required.
// Computes actual values, as will be sent to the encoder.
bool QMUpdate(VCMResolutionScale* qm,
- VCMQMSettingsCallback* qmsettings_callback);
+ VCMQMSettingsCallback* qmsettings_callback)
+ EXCLUSIVE_LOCKS_REQUIRED(crit_sect_);
// Checks if we should make a QM change. Return true if yes, false otherwise.
- bool CheckStatusForQMchange();
+ bool CheckStatusForQMchange() EXCLUSIVE_LOCKS_REQUIRED(crit_sect_);
- void ProcessIncomingFrameRate(int64_t now);
+ void ProcessIncomingFrameRate(int64_t now)
+ EXCLUSIVE_LOCKS_REQUIRED(crit_sect_);
// Checks conditions for suspending the video. The method compares
// |target_bit_rate_| with the threshold values for suspension, and changes
// the state of |video_suspended_| accordingly.
- void CheckSuspendConditions();
-
- Clock* clock_;
- int32_t max_bit_rate_;
- VideoCodecType send_codec_type_;
- uint16_t codec_width_;
- uint16_t codec_height_;
- float user_frame_rate_;
- scoped_ptr<FrameDropper> frame_dropper_;
- scoped_ptr<VCMLossProtectionLogic> loss_prot_logic_;
- uint8_t fraction_lost_;
- uint32_t send_statistics_[4];
- uint32_t send_statistics_zero_encode_;
- int32_t max_payload_size_;
- int target_bit_rate_;
- float incoming_frame_rate_;
- int64_t incoming_frame_times_[kFrameCountHistorySize];
- bool enable_qm_;
- std::list<EncodedFrameSample> encoded_frame_samples_;
- uint32_t avg_sent_bit_rate_bps_;
- uint32_t avg_sent_framerate_;
- uint32_t key_frame_cnt_;
- uint32_t delta_frame_cnt_;
- scoped_ptr<VCMContentMetricsProcessing> content_;
- scoped_ptr<VCMQmResolution> qm_resolution_;
- int64_t last_qm_update_time_;
- int64_t last_change_time_; // Content/user triggered.
- int num_layers_;
- bool suspension_enabled_;
- bool video_suspended_;
- int suspension_threshold_bps_;
- int suspension_window_bps_;
+ void CheckSuspendConditions() EXCLUSIVE_LOCKS_REQUIRED(crit_sect_);
+
+ void SetEncodingDataInternal(VideoCodecType send_codec_type,
+ int32_t max_bit_rate,
+ uint32_t frame_rate,
+ uint32_t bit_rate,
+ uint16_t width,
+ uint16_t height,
+ int num_temporal_layers,
+ int32_t mtu)
+ EXCLUSIVE_LOCKS_REQUIRED(crit_sect_);
+
+ uint32_t InputFrameRateInternal() EXCLUSIVE_LOCKS_REQUIRED(crit_sect_);
+
+ uint32_t SentFrameRateInternal() EXCLUSIVE_LOCKS_REQUIRED(crit_sect_);
+
+ // Protect all members.
+ scoped_ptr<CriticalSectionWrapper> crit_sect_;
+
+ Clock* clock_ GUARDED_BY(crit_sect_);
+ int32_t max_bit_rate_ GUARDED_BY(crit_sect_);
+ VideoCodecType send_codec_type_ GUARDED_BY(crit_sect_);
+ uint16_t codec_width_ GUARDED_BY(crit_sect_);
+ uint16_t codec_height_ GUARDED_BY(crit_sect_);
+ float user_frame_rate_ GUARDED_BY(crit_sect_);
+ scoped_ptr<FrameDropper> frame_dropper_ GUARDED_BY(crit_sect_);
+ scoped_ptr<VCMLossProtectionLogic> loss_prot_logic_ GUARDED_BY(crit_sect_);
+ uint8_t fraction_lost_ GUARDED_BY(crit_sect_);
+ uint32_t send_statistics_[4] GUARDED_BY(crit_sect_);
+ uint32_t send_statistics_zero_encode_ GUARDED_BY(crit_sect_);
+ int32_t max_payload_size_ GUARDED_BY(crit_sect_);
+ int target_bit_rate_ GUARDED_BY(crit_sect_);
+ float incoming_frame_rate_ GUARDED_BY(crit_sect_);
+ int64_t incoming_frame_times_[kFrameCountHistorySize] GUARDED_BY(crit_sect_);
+ bool enable_qm_ GUARDED_BY(crit_sect_);
+ std::list<EncodedFrameSample> encoded_frame_samples_ GUARDED_BY(crit_sect_);
+ uint32_t avg_sent_bit_rate_bps_ GUARDED_BY(crit_sect_);
+ uint32_t avg_sent_framerate_ GUARDED_BY(crit_sect_);
+ uint32_t key_frame_cnt_ GUARDED_BY(crit_sect_);
+ uint32_t delta_frame_cnt_ GUARDED_BY(crit_sect_);
+ scoped_ptr<VCMContentMetricsProcessing> content_ GUARDED_BY(crit_sect_);
+ scoped_ptr<VCMQmResolution> qm_resolution_ GUARDED_BY(crit_sect_);
+ int64_t last_qm_update_time_ GUARDED_BY(crit_sect_);
+ int64_t last_change_time_ GUARDED_BY(crit_sect_); // Content/user triggered.
+ int num_layers_ GUARDED_BY(crit_sect_);
+ bool suspension_enabled_ GUARDED_BY(crit_sect_);
+ bool video_suspended_ GUARDED_BY(crit_sect_);
+ int suspension_threshold_bps_ GUARDED_BY(crit_sect_);
+ int suspension_window_bps_ GUARDED_BY(crit_sect_);
};
} // namespace media_optimization
} // namespace webrtc
diff --git a/modules/video_render/OWNERS b/modules/video_render/OWNERS
index da941f40..71d24677 100644
--- a/modules/video_render/OWNERS
+++ b/modules/video_render/OWNERS
@@ -1,8 +1,9 @@
fischman@webrtc.org
+mallinath@webrtc.org
mflodman@webrtc.org
perkj@webrtc.org
+tkchin@webrtc.org
wu@webrtc.org
-mallinath@webrtc.org
per-file *.isolate=kjellander@webrtc.org