summaryrefslogtreecommitdiff
path: root/modules
diff options
context:
space:
mode:
authorAndroid Chromium Automerger <chromium-automerger@android>2014-07-29 12:29:13 +0000
committerAndroid Chromium Automerger <chromium-automerger@android>2014-07-29 12:29:13 +0000
commitfa5085428e6672f7f9e88a652e3c4a7f9c93d116 (patch)
treeef453f1196cff48f99c0b0f45a95c11a4fd49b97 /modules
parentf3d27028ce9a280c02c286f9b98adf1e476c03d2 (diff)
parent4a1b3e3a69d349b0d3e91f607f24e02d8b975688 (diff)
downloadwebrtc-fa5085428e6672f7f9e88a652e3c4a7f9c93d116.tar.gz
Merge third_party/webrtc from https://chromium.googlesource.com/external/webrtc/trunk/webrtc.git at 4a1b3e3a69d349b0d3e91f607f24e02d8b975688
This commit was generated by merge_from_chromium.py. Change-Id: Iada7abd78f123301a98db982a6272cd9487de72f
Diffstat (limited to 'modules')
-rw-r--r--modules/audio_coding/codecs/isac/fix/source/codec.h10
-rw-r--r--modules/audio_coding/codecs/isac/fix/source/filterbank_internal.h21
-rw-r--r--modules/audio_coding/codecs/isac/fix/source/filterbanks.c11
-rw-r--r--modules/audio_coding/codecs/isac/fix/source/filterbanks_mips.c172
-rw-r--r--modules/audio_coding/codecs/isac/fix/source/filterbanks_unittest.cc7
-rw-r--r--modules/audio_coding/codecs/isac/fix/source/isacfix.c11
-rw-r--r--modules/audio_coding/codecs/isac/fix/source/isacfix.gypi6
-rw-r--r--modules/audio_coding/codecs/isac/fix/source/lpc_masking_model.h9
-rw-r--r--modules/audio_coding/codecs/isac/fix/source/lpc_masking_model_mips.c237
-rw-r--r--modules/audio_coding/codecs/isac/fix/source/pitch_estimator.c133
-rw-r--r--modules/audio_coding/codecs/isac/fix/source/pitch_estimator.h4
-rw-r--r--modules/audio_coding/codecs/isac/fix/source/pitch_estimator_c.c122
-rw-r--r--modules/audio_coding/codecs/isac/fix/source/pitch_estimator_mips.c196
-rw-r--r--modules/audio_coding/codecs/isac/fix/source/transform.c78
-rw-r--r--modules/audio_coding/codecs/isac/fix/source/transform_mips.c1287
-rw-r--r--modules/audio_coding/codecs/isac/fix/source/transform_tables.c111
-rw-r--r--modules/audio_coding/codecs/opus/opus_fec_test.cc35
-rw-r--r--modules/audio_coding/codecs/opus/opus_interface.c259
-rw-r--r--modules/audio_coding/codecs/opus/opus_speed_test.cc7
-rw-r--r--modules/audio_coding/codecs/opus/opus_unittest.cc241
-rw-r--r--modules/audio_coding/main/acm2/audio_coding_module_impl.cc10
-rw-r--r--modules/audio_coding/main/test/opus_test.cc18
-rw-r--r--modules/audio_coding/neteq/audio_decoder.cc2
-rw-r--r--modules/audio_coding/neteq/audio_decoder_unittest.cc82
-rw-r--r--modules/audio_coding/neteq/payload_splitter_unittest.cc2
-rw-r--r--modules/audio_coding/neteq/test/neteq_opus_fec_quality_test.cc7
-rw-r--r--modules/audio_coding/neteq/timestamp_scaler.cc2
-rw-r--r--modules/audio_coding/neteq/timestamp_scaler_unittest.cc13
-rw-r--r--modules/audio_coding/neteq/tools/neteq_quality_test.cc4
-rw-r--r--modules/audio_coding/neteq/tools/neteq_quality_test.h4
-rw-r--r--modules/audio_conference_mixer/source/audio_conference_mixer_impl.cc40
-rw-r--r--modules/audio_conference_mixer/source/audio_conference_mixer_impl.h3
-rw-r--r--modules/audio_device/audio_device.gypi17
-rw-r--r--modules/audio_device/audio_device_impl.cc8
-rw-r--r--modules/audio_device/audio_device_utility.h16
-rw-r--r--modules/audio_device/ios/audio_device_ios.h439
-rw-r--r--modules/audio_device/ios/audio_device_ios.mm (renamed from modules/audio_device/ios/audio_device_ios.cc)480
-rw-r--r--modules/audio_device/ios/audio_device_utility_ios.cc11
-rw-r--r--modules/audio_device/ios/audio_device_utility_ios.h18
-rw-r--r--modules/audio_processing/aecm/aecm_core_mips.c2
-rw-r--r--modules/audio_processing/audio_buffer.cc173
-rw-r--r--modules/audio_processing/audio_buffer.h21
-rw-r--r--modules/audio_processing/common.h8
-rw-r--r--modules/audio_processing/gain_control_impl.cc8
-rw-r--r--modules/audio_processing/include/audio_processing.h3
-rw-r--r--modules/audio_processing/utility/ring_buffer_unittest.cc4
-rw-r--r--modules/audio_processing/voice_detection_impl.cc8
-rw-r--r--modules/desktop_capture/win/cursor.cc2
-rw-r--r--modules/media_file/source/media_file_unittest.cc48
-rw-r--r--modules/modules.gyp1
-rw-r--r--modules/pacing/include/paced_sender.h4
-rw-r--r--modules/pacing/paced_sender.cc27
-rw-r--r--modules/rtp_rtcp/interface/rtp_rtcp_defines.h4
-rw-r--r--modules/rtp_rtcp/source/rtcp_format_remb_unittest.cc6
-rw-r--r--modules/rtp_rtcp/source/rtcp_receiver.cc14
-rw-r--r--modules/rtp_rtcp/source/rtcp_receiver.h10
-rw-r--r--modules/rtp_rtcp/source/rtcp_sender.cc27
-rw-r--r--modules/rtp_rtcp/source/rtcp_sender.h5
-rw-r--r--modules/rtp_rtcp/source/rtcp_sender_unittest.cc18
-rw-r--r--modules/rtp_rtcp/source/rtp_header_extension.cc10
-rw-r--r--modules/rtp_rtcp/source/rtp_rtcp_impl.cc95
-rw-r--r--modules/rtp_rtcp/source/rtp_rtcp_impl.h10
-rw-r--r--modules/rtp_rtcp/source/rtp_rtcp_impl_unittest.cc10
-rw-r--r--modules/rtp_rtcp/source/rtp_sender.cc126
-rw-r--r--modules/rtp_rtcp/source/rtp_sender.h15
-rw-r--r--modules/rtp_rtcp/source/rtp_sender_unittest.cc52
-rw-r--r--modules/rtp_rtcp/source/ssrc_database.cc93
-rw-r--r--modules/rtp_rtcp/source/ssrc_database.h9
-rw-r--r--modules/video_capture/ios/rtc_video_capture_ios_objc.mm9
-rw-r--r--modules/video_coding/BUILD.gn2
-rw-r--r--modules/video_coding/main/source/media_opt_util.cc6
-rw-r--r--modules/video_coding/main/source/media_opt_util.h44
-rw-r--r--modules/video_coding/utility/exp_filter.cc60
-rw-r--r--modules/video_coding/utility/frame_dropper.cc32
-rw-r--r--modules/video_coding/utility/include/exp_filter.h58
-rw-r--r--modules/video_coding/utility/include/frame_dropper.h36
-rw-r--r--modules/video_coding/utility/video_coding_utility.gyp2
-rw-r--r--modules/video_processing/BUILD.gn2
-rw-r--r--modules/video_processing/OWNERS5
-rw-r--r--modules/video_processing/main/OWNERS4
-rw-r--r--modules/video_processing/main/interface/video_processing.h11
-rw-r--r--modules/video_processing/main/source/Android.mk1
-rw-r--r--modules/video_processing/main/source/denoising.cc156
-rw-r--r--modules/video_processing/main/source/denoising.h42
-rw-r--r--modules/video_processing/main/source/video_processing.gypi2
-rw-r--r--modules/video_processing/main/source/video_processing_impl.cc8
-rw-r--r--modules/video_processing/main/source/video_processing_impl.h4
-rw-r--r--modules/video_processing/main/test/unit_test/denoising_test.cc136
-rw-r--r--modules/video_processing/main/test/unit_test/video_processing_unittest.cc17
89 files changed, 3405 insertions, 2178 deletions
diff --git a/modules/audio_coding/codecs/isac/fix/source/codec.h b/modules/audio_coding/codecs/isac/fix/source/codec.h
index 2f649324..a38c6e56 100644
--- a/modules/audio_coding/codecs/isac/fix/source/codec.h
+++ b/modules/audio_coding/codecs/isac/fix/source/codec.h
@@ -101,6 +101,16 @@ void WebRtcIsacfix_Spec2TimeNeon(int16_t* inreQ7,
int32_t* outre2Q16);
#endif
+#if defined(MIPS32_LE)
+void WebRtcIsacfix_Time2SpecMIPS(int16_t* inre1Q9,
+ int16_t* inre2Q9,
+ int16_t* outre,
+ int16_t* outim);
+void WebRtcIsacfix_Spec2TimeMIPS(int16_t* inreQ7,
+ int16_t* inimQ7,
+ int32_t* outre1Q16,
+ int32_t* outre2Q16);
+#endif
/* filterbank functions */
diff --git a/modules/audio_coding/codecs/isac/fix/source/filterbank_internal.h b/modules/audio_coding/codecs/isac/fix/source/filterbank_internal.h
index 3fefc1a5..7a5f7462 100644
--- a/modules/audio_coding/codecs/isac/fix/source/filterbank_internal.h
+++ b/modules/audio_coding/codecs/isac/fix/source/filterbank_internal.h
@@ -23,10 +23,23 @@ extern "C" {
* coefficient: Input.
* state: Input/output, filter state, in Q4.
*/
-void WebRtcIsacfix_HighpassFilterFixDec32(int16_t *io,
- int16_t len,
- const int16_t *coefficient,
- int32_t *state);
+typedef void (*HighpassFilterFixDec32)(int16_t* io,
+ int16_t len,
+ const int16_t* coefficient,
+ int32_t* state);
+extern HighpassFilterFixDec32 WebRtcIsacfix_HighpassFilterFixDec32;
+
+void WebRtcIsacfix_HighpassFilterFixDec32C(int16_t* io,
+ int16_t len,
+ const int16_t* coefficient,
+ int32_t* state);
+
+#if defined(MIPS_DSP_R1_LE)
+void WebRtcIsacfix_HighpassFilterFixDec32MIPS(int16_t* io,
+ int16_t len,
+ const int16_t* coefficient,
+ int32_t* state);
+#endif
typedef void (*AllpassFilter2FixDec16)(
int16_t *data_ch1, // Input and output in channel 1, in Q0
diff --git a/modules/audio_coding/codecs/isac/fix/source/filterbanks.c b/modules/audio_coding/codecs/isac/fix/source/filterbanks.c
index 64557e13..1928a7cb 100644
--- a/modules/audio_coding/codecs/isac/fix/source/filterbanks.c
+++ b/modules/audio_coding/codecs/isac/fix/source/filterbanks.c
@@ -86,10 +86,13 @@ void WebRtcIsacfix_AllpassFilter2FixDec16C(
filter_state_ch2[1] = state1_ch2;
}
-void WebRtcIsacfix_HighpassFilterFixDec32(int16_t *io,
- int16_t len,
- const int16_t *coefficient,
- int32_t *state)
+// Declare a function pointer.
+HighpassFilterFixDec32 WebRtcIsacfix_HighpassFilterFixDec32;
+
+void WebRtcIsacfix_HighpassFilterFixDec32C(int16_t *io,
+ int16_t len,
+ const int16_t *coefficient,
+ int32_t *state)
{
int k;
int32_t a1 = 0, b1 = 0, c = 0, in = 0;
diff --git a/modules/audio_coding/codecs/isac/fix/source/filterbanks_mips.c b/modules/audio_coding/codecs/isac/fix/source/filterbanks_mips.c
index 1887745b..4dd70cf6 100644
--- a/modules/audio_coding/codecs/isac/fix/source/filterbanks_mips.c
+++ b/modules/audio_coding/codecs/isac/fix/source/filterbanks_mips.c
@@ -10,26 +10,26 @@
#include "webrtc/modules/audio_coding/codecs/isac/fix/source/filterbank_internal.h"
-// WebRtcIsacfix_AllpassFilter2FixDec16 function optimized for MIPSDSP platform
-// Bit-exact with WebRtcIsacfix_AllpassFilter2FixDec16C from filterbanks.c
+// WebRtcIsacfix_AllpassFilter2FixDec16 function optimized for MIPSDSP platform.
+// Bit-exact with WebRtcIsacfix_AllpassFilter2FixDec16C from filterbanks.c.
void WebRtcIsacfix_AllpassFilter2FixDec16MIPS(
- int16_t *data_ch1, // Input and output in channel 1, in Q0
- int16_t *data_ch2, // Input and output in channel 2, in Q0
- const int16_t *factor_ch1, // Scaling factor for channel 1, in Q15
- const int16_t *factor_ch2, // Scaling factor for channel 2, in Q15
- const int length, // Length of the data buffers
- int32_t *filter_state_ch1, // Filter state for channel 1, in Q16
- int32_t *filter_state_ch2) { // Filter state for channel 2, in Q16
+ int16_t* data_ch1, // Input and output in channel 1, in Q0.
+ int16_t* data_ch2, // Input and output in channel 2, in Q0.
+ const int16_t* factor_ch1, // Scaling factor for channel 1, in Q15.
+ const int16_t* factor_ch2, // Scaling factor for channel 2, in Q15.
+ const int length, // Length of the data buffers.
+ int32_t* filter_state_ch1, // Filter state for channel 1, in Q16.
+ int32_t* filter_state_ch2) { // Filter state for channel 2, in Q16.
- int32_t st0_ch1, st1_ch1; // channel1 state variables
- int32_t st0_ch2, st1_ch2; // channel2 state variables
- int32_t f_ch10, f_ch11, f_ch20, f_ch21; // factor variables
- int32_t r0, r1, r2, r3, r4, r5; // temporary ragister variables
+ int32_t st0_ch1, st1_ch1; // channel1 state variables.
+ int32_t st0_ch2, st1_ch2; // channel2 state variables.
+ int32_t f_ch10, f_ch11, f_ch20, f_ch21; // factor variables.
+ int32_t r0, r1, r2, r3, r4, r5; // temporary register variables.
__asm __volatile (
".set push \n\t"
".set noreorder \n\t"
- // Load all the state and factor variables
+ // Load all the state and factor variables.
"lh %[f_ch10], 0(%[factor_ch1]) \n\t"
"lh %[f_ch20], 0(%[factor_ch2]) \n\t"
"lh %[f_ch11], 2(%[factor_ch1]) \n\t"
@@ -38,7 +38,7 @@ void WebRtcIsacfix_AllpassFilter2FixDec16MIPS(
"lw %[st1_ch1], 4(%[filter_state_ch1]) \n\t"
"lw %[st0_ch2], 0(%[filter_state_ch2]) \n\t"
"lw %[st1_ch2], 4(%[filter_state_ch2]) \n\t"
- // Allpass filtering loop
+ // Allpass filtering loop.
"1: \n\t"
"lh %[r0], 0(%[data_ch1]) \n\t"
"lh %[r1], 0(%[data_ch2]) \n\t"
@@ -80,7 +80,7 @@ void WebRtcIsacfix_AllpassFilter2FixDec16MIPS(
"subq_s.w %[st1_ch2], %[r3], %[r1] \n\t"
"bgtz %[length], 1b \n\t"
" addiu %[data_ch2], %[data_ch2], 2 \n\t"
- // Store channel states
+ // Store channel states.
"sw %[st0_ch1], 0(%[filter_state_ch1]) \n\t"
"sw %[st1_ch1], 4(%[filter_state_ch1]) \n\t"
"sw %[st0_ch2], 0(%[filter_state_ch2]) \n\t"
@@ -100,3 +100,143 @@ void WebRtcIsacfix_AllpassFilter2FixDec16MIPS(
: "memory", "hi", "lo"
);
}
+
+// WebRtcIsacfix_HighpassFilterFixDec32 function optimized for MIPSDSP platform.
+// Bit-exact with WebRtcIsacfix_HighpassFilterFixDec32C from filterbanks.c.
+void WebRtcIsacfix_HighpassFilterFixDec32MIPS(int16_t* io,
+ int16_t len,
+ const int16_t* coefficient,
+ int32_t* state) {
+ int k;
+ int32_t a1, a2, b1, b2, in;
+ int32_t state0 = state[0];
+ int32_t state1 = state[1];
+
+ int32_t c0, c1, c2, c3;
+ int32_t c4, c5, c6, c7;
+ int32_t state0_lo, state0_hi;
+ int32_t state1_lo, state1_hi;
+ int32_t t0, t1, t2, t3, t4, t5;
+
+ __asm __volatile (
+ "lh %[c0], 0(%[coeff_ptr]) \n\t"
+ "lh %[c1], 2(%[coeff_ptr]) \n\t"
+ "lh %[c2], 4(%[coeff_ptr]) \n\t"
+ "lh %[c3], 6(%[coeff_ptr]) \n\t"
+ "sra %[state0_hi], %[state0], 16 \n\t"
+ "sra %[state1_hi], %[state1], 16 \n\t"
+ "andi %[state0_lo], %[state0], 0xFFFF \n\t"
+ "andi %[state1_lo], %[state1], 0xFFFF \n\t"
+ "lh %[c4], 8(%[coeff_ptr]) \n\t"
+ "lh %[c5], 10(%[coeff_ptr]) \n\t"
+ "lh %[c6], 12(%[coeff_ptr]) \n\t"
+ "lh %[c7], 14(%[coeff_ptr]) \n\t"
+ "sra %[state0_lo], %[state0_lo], 1 \n\t"
+ "sra %[state1_lo], %[state1_lo], 1 \n\t"
+ : [c0] "=&r" (c0), [c1] "=&r" (c1), [c2] "=&r" (c2), [c3] "=&r" (c3),
+ [c4] "=&r" (c4), [c5] "=&r" (c5), [c6] "=&r" (c6), [c7] "=&r" (c7),
+ [state0_hi] "=&r" (state0_hi), [state0_lo] "=&r" (state0_lo),
+ [state1_hi] "=&r" (state1_hi), [state1_lo] "=&r" (state1_lo)
+ : [coeff_ptr] "r" (coefficient), [state0] "r" (state0),
+ [state1] "r" (state1)
+ : "memory"
+ );
+
+ for (k = 0; k < len; k++) {
+ in = (int32_t)io[k];
+
+ __asm __volatile (
+ ".set push \n\t"
+ ".set noreorder \n\t"
+ "mul %[t2], %[c4], %[state0_lo] \n\t"
+ "mul %[t0], %[c5], %[state0_lo] \n\t"
+ "mul %[t1], %[c4], %[state0_hi] \n\t"
+ "mul %[a1], %[c5], %[state0_hi] \n\t"
+ "mul %[t5], %[c6], %[state1_lo] \n\t"
+ "mul %[t3], %[c7], %[state1_lo] \n\t"
+ "mul %[t4], %[c6], %[state1_hi] \n\t"
+ "mul %[b1], %[c7], %[state1_hi] \n\t"
+ "shra_r.w %[t2], %[t2], 15 \n\t"
+ "shra_r.w %[t0], %[t0], 15 \n\t"
+ "addu %[t1], %[t1], %[t2] \n\t"
+ "addu %[a1], %[a1], %[t0] \n\t"
+ "sra %[t1], %[t1], 16 \n\t"
+ "addu %[a1], %[a1], %[t1] \n\t"
+ "shra_r.w %[t5], %[t5], 15 \n\t"
+ "shra_r.w %[t3], %[t3], 15 \n\t"
+ "addu %[t4], %[t4], %[t5] \n\t"
+ "addu %[b1], %[b1], %[t3] \n\t"
+ "sra %[t4], %[t4], 16 \n\t"
+ "addu %[b1], %[b1], %[t4] \n\t"
+ "mul %[t2], %[c0], %[state0_lo] \n\t"
+ "mul %[t0], %[c1], %[state0_lo] \n\t"
+ "mul %[t1], %[c0], %[state0_hi] \n\t"
+ "mul %[a2], %[c1], %[state0_hi] \n\t"
+ "mul %[t5], %[c2], %[state1_lo] \n\t"
+ "mul %[t3], %[c3], %[state1_lo] \n\t"
+ "mul %[t4], %[c2], %[state1_hi] \n\t"
+ "mul %[b2], %[c3], %[state1_hi] \n\t"
+ "shra_r.w %[t2], %[t2], 15 \n\t"
+ "shra_r.w %[t0], %[t0], 15 \n\t"
+ "addu %[t1], %[t1], %[t2] \n\t"
+ "addu %[a2], %[a2], %[t0] \n\t"
+ "sra %[t1], %[t1], 16 \n\t"
+ "addu %[a2], %[a2], %[t1] \n\t"
+ "shra_r.w %[t5], %[t5], 15 \n\t"
+ "shra_r.w %[t3], %[t3], 15 \n\t"
+ "addu %[t4], %[t4], %[t5] \n\t"
+ "addu %[b2], %[b2], %[t3] \n\t"
+ "sra %[t4], %[t4], 16 \n\t"
+ "addu %[b2], %[b2], %[t4] \n\t"
+ "addu %[a1], %[a1], %[b1] \n\t"
+ "sra %[a1], %[a1], 7 \n\t"
+ "addu %[a1], %[a1], %[in] \n\t"
+ "sll %[t0], %[in], 2 \n\t"
+ "addu %[a2], %[a2], %[b2] \n\t"
+ "subu %[t0], %[t0], %[a2] \n\t"
+ "shll_s.w %[a1], %[a1], 16 \n\t"
+ "shll_s.w %[t0], %[t0], 2 \n\t"
+ "sra %[a1], %[a1], 16 \n\t"
+ "addu %[state1_hi], %[state0_hi], $0 \n\t"
+ "addu %[state1_lo], %[state0_lo], $0 \n\t"
+ "sra %[state0_hi], %[t0], 16 \n\t"
+ "andi %[state0_lo], %[t0], 0xFFFF \n\t"
+ "sra %[state0_lo], %[state0_lo], 1 \n\t"
+ ".set pop \n\t"
+ : [a1] "=&r" (a1), [b1] "=&r" (b1), [a2] "=&r" (a2), [b2] "=&r" (b2),
+ [state0_hi] "+r" (state0_hi), [state0_lo] "+r" (state0_lo),
+ [state1_hi] "+r" (state1_hi), [state1_lo] "+r" (state1_lo),
+ [t0] "=&r" (t0), [t1] "=&r" (t1), [t2] "=&r" (t2),
+ [t3] "=&r" (t3), [t4] "=&r" (t4), [t5] "=&r" (t5)
+ : [c0] "r" (c0), [c1] "r" (c1), [c2] "r" (c2), [c3] "r" (c3),
+ [c4] "r" (c4), [c5] "r" (c5), [c6] "r" (c6), [c7] "r" (c7),
+ [in] "r" (in)
+ : "hi", "lo"
+ );
+ io[k] = (int16_t)a1;
+ }
+ __asm __volatile (
+ ".set push \n\t"
+ ".set noreorder \n\t"
+#if !defined(MIPS_DSP_R2_LE)
+ "sll %[state0_hi], %[state0_hi], 16 \n\t"
+ "sll %[state0_lo], %[state0_lo], 1 \n\t"
+ "sll %[state1_hi], %[state1_hi], 16 \n\t"
+ "sll %[state1_lo], %[state1_lo], 1 \n\t"
+ "or %[state0_hi], %[state0_hi], %[state0_lo] \n\t"
+ "or %[state1_hi], %[state1_hi], %[state1_lo] \n\t"
+#else
+ "sll %[state0_lo], %[state0_lo], 1 \n\t"
+ "sll %[state1_lo], %[state1_lo], 1 \n\t"
+ "precr_sra.ph.w %[state0_hi], %[state0_lo], 0 \n\t"
+ "precr_sra.ph.w %[state1_hi], %[state1_lo], 0 \n\t"
+#endif
+ "sw %[state0_hi], 0(%[state]) \n\t"
+ "sw %[state1_hi], 4(%[state]) \n\t"
+ ".set pop \n\t"
+ : [state0_hi] "+r" (state0_hi), [state0_lo] "+r" (state0_lo),
+ [state1_hi] "+r" (state1_hi), [state1_lo] "+r" (state1_lo)
+ : [state] "r" (state)
+ : "memory"
+ );
+}
diff --git a/modules/audio_coding/codecs/isac/fix/source/filterbanks_unittest.cc b/modules/audio_coding/codecs/isac/fix/source/filterbanks_unittest.cc
index d7484277..d15318a7 100644
--- a/modules/audio_coding/codecs/isac/fix/source/filterbanks_unittest.cc
+++ b/modules/audio_coding/codecs/isac/fix/source/filterbanks_unittest.cc
@@ -86,6 +86,13 @@ TEST_F(FilterBanksTest, HighpassFilterFixDec32Test) {
-1280, -8554, -14496, -7561, -23541, -27263, -30560, -32768, -3441, -32768,
25203, -27550, 22419};
#endif
+ HighpassFilterFixDec32 WebRtcIsacfix_HighpassFilterFixDec32;
+#if defined(MIPS_DSP_R1_LE)
+ WebRtcIsacfix_HighpassFilterFixDec32 =
+ WebRtcIsacfix_HighpassFilterFixDec32MIPS;
+#else
+ WebRtcIsacfix_HighpassFilterFixDec32 = WebRtcIsacfix_HighpassFilterFixDec32C;
+#endif
for (int i = 0; i < kSamples; i++) {
in[i] = WEBRTC_SPL_WORD32_MAX / (i + 1);
diff --git a/modules/audio_coding/codecs/isac/fix/source/isacfix.c b/modules/audio_coding/codecs/isac/fix/source/isacfix.c
index 76359080..887a7ba2 100644
--- a/modules/audio_coding/codecs/isac/fix/source/isacfix.c
+++ b/modules/audio_coding/codecs/isac/fix/source/isacfix.c
@@ -209,9 +209,17 @@ static void WebRtcIsacfix_InitNeon(void) {
static void WebRtcIsacfix_InitMIPS(void) {
WebRtcIsacfix_AutocorrFix = WebRtcIsacfix_AutocorrMIPS;
WebRtcIsacfix_FilterMaLoopFix = WebRtcIsacfix_FilterMaLoopMIPS;
+ WebRtcIsacfix_Spec2Time = WebRtcIsacfix_Spec2TimeMIPS;
+ WebRtcIsacfix_Time2Spec = WebRtcIsacfix_Time2SpecMIPS;
#if defined(MIPS_DSP_R1_LE)
WebRtcIsacfix_AllpassFilter2FixDec16 =
WebRtcIsacfix_AllpassFilter2FixDec16MIPS;
+ WebRtcIsacfix_HighpassFilterFixDec32 =
+ WebRtcIsacfix_HighpassFilterFixDec32MIPS;
+#endif
+#if defined(MIPS_DSP_R2_LE)
+ WebRtcIsacfix_CalculateResidualEnergy =
+ WebRtcIsacfix_CalculateResidualEnergyMIPS;
#endif
}
#endif
@@ -300,10 +308,11 @@ int16_t WebRtcIsacfix_EncoderInit(ISACFIX_MainStruct *ISAC_main_inst,
WebRtcIsacfix_CalculateResidualEnergy =
WebRtcIsacfix_CalculateResidualEnergyC;
WebRtcIsacfix_AllpassFilter2FixDec16 = WebRtcIsacfix_AllpassFilter2FixDec16C;
+ WebRtcIsacfix_HighpassFilterFixDec32 = WebRtcIsacfix_HighpassFilterFixDec32C;
WebRtcIsacfix_Time2Spec = WebRtcIsacfix_Time2SpecC;
WebRtcIsacfix_Spec2Time = WebRtcIsacfix_Spec2TimeC;
WebRtcIsacfix_MatrixProduct1 = WebRtcIsacfix_MatrixProduct1C;
- WebRtcIsacfix_MatrixProduct2 = WebRtcIsacfix_MatrixProduct2C ;
+ WebRtcIsacfix_MatrixProduct2 = WebRtcIsacfix_MatrixProduct2C;
#ifdef WEBRTC_DETECT_ARM_NEON
if ((WebRtc_GetCPUFeaturesARM() & kCPUFeatureNEON) != 0) {
diff --git a/modules/audio_coding/codecs/isac/fix/source/isacfix.gypi b/modules/audio_coding/codecs/isac/fix/source/isacfix.gypi
index a18a803d..e5aade65 100644
--- a/modules/audio_coding/codecs/isac/fix/source/isacfix.gypi
+++ b/modules/audio_coding/codecs/isac/fix/source/isacfix.gypi
@@ -47,12 +47,14 @@
'lpc_masking_model.c',
'lpc_tables.c',
'pitch_estimator.c',
+ 'pitch_estimator_c.c',
'pitch_filter.c',
'pitch_filter_c.c',
'pitch_gain_tables.c',
'pitch_lag_tables.c',
'spectrum_ar_model_tables.c',
'transform.c',
+ 'transform_tables.c',
'arith_routins.h',
'bandwidth_estimator.h',
'codec.h',
@@ -89,9 +91,12 @@
'sources': [
'filters_mips.c',
'lattice_mips.c',
+ 'pitch_estimator_mips.c',
+ 'transform_mips.c',
],
'sources!': [
'lattice_c.c',
+ 'pitch_estimator_c.c',
],
'conditions': [
['mips_dsp_rev>0', {
@@ -101,6 +106,7 @@
}],
['mips_dsp_rev>1', {
'sources': [
+ 'lpc_masking_model_mips.c',
'pitch_filter_mips.c',
],
'sources!': [
diff --git a/modules/audio_coding/codecs/isac/fix/source/lpc_masking_model.h b/modules/audio_coding/codecs/isac/fix/source/lpc_masking_model.h
index 72e0cfc4..1270c142 100644
--- a/modules/audio_coding/codecs/isac/fix/source/lpc_masking_model.h
+++ b/modules/audio_coding/codecs/isac/fix/source/lpc_masking_model.h
@@ -62,6 +62,15 @@ int32_t WebRtcIsacfix_CalculateResidualEnergyNeon(int lpc_order,
int* q_val_residual_energy);
#endif
+#if defined(MIPS_DSP_R2_LE)
+int32_t WebRtcIsacfix_CalculateResidualEnergyMIPS(int lpc_order,
+ int32_t q_val_corr,
+ int q_val_polynomial,
+ int16_t* a_polynomial,
+ int32_t* corr_coeffs,
+ int* q_val_residual_energy);
+#endif
+
#ifdef __cplusplus
} /* extern "C" */
#endif
diff --git a/modules/audio_coding/codecs/isac/fix/source/lpc_masking_model_mips.c b/modules/audio_coding/codecs/isac/fix/source/lpc_masking_model_mips.c
new file mode 100644
index 00000000..55602b97
--- /dev/null
+++ b/modules/audio_coding/codecs/isac/fix/source/lpc_masking_model_mips.c
@@ -0,0 +1,237 @@
+/*
+ * Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "webrtc/modules/audio_coding/codecs/isac/fix/source/lpc_masking_model.h"
+
+// MIPS DSPR2 optimization for function WebRtcIsacfix_CalculateResidualEnergy
+// Bit-exact with WebRtcIsacfix_CalculateResidualEnergyC from file
+// lpc_masking_model.c
+int32_t WebRtcIsacfix_CalculateResidualEnergyMIPS(int lpc_order,
+ int32_t q_val_corr,
+ int q_val_polynomial,
+ int16_t* a_polynomial,
+ int32_t* corr_coeffs,
+ int* q_val_residual_energy) {
+
+ int i = 0, j = 0;
+ int shift_internal = 0, shift_norm = 0;
+ int32_t tmp32 = 0, word32_high = 0, word32_low = 0, residual_energy = 0;
+ int32_t tmp_corr_c = corr_coeffs[0];
+ int16_t* tmp_a_poly = &a_polynomial[0];
+ int32_t sum64_hi = 0;
+ int32_t sum64_lo = 0;
+
+ for (j = 0; j <= lpc_order; j++) {
+ // For the case of i == 0:
+ // residual_energy +=
+ // a_polynomial[j] * corr_coeffs[i] * a_polynomial[j - i];
+
+ int32_t tmp2, tmp3;
+ int16_t sign_1;
+ int16_t sign_2;
+ int16_t sign_3;
+
+ __asm __volatile (
+ ".set push \n\t"
+ ".set noreorder \n\t"
+ "lh %[tmp2], 0(%[tmp_a_poly]) \n\t"
+ "mul %[tmp32], %[tmp2], %[tmp2] \n\t"
+ "addiu %[tmp_a_poly], %[tmp_a_poly], 2 \n\t"
+ "sra %[sign_2], %[sum64_hi], 31 \n\t"
+ "mult $ac0, %[tmp32], %[tmp_corr_c] \n\t"
+ "shilov $ac0, %[shift_internal] \n\t"
+ "mfhi %[tmp2], $ac0 \n\t"
+ "mflo %[tmp3], $ac0 \n\t"
+ "sra %[sign_1], %[tmp2], 31 \n\t"
+ "xor %[sign_3], %[sign_1], %[sign_2] \n\t"
+ ".set pop \n\t"
+ : [tmp2] "=&r" (tmp2), [tmp3] "=&r" (tmp3), [tmp32] "=&r" (tmp32),
+ [tmp_a_poly] "+r" (tmp_a_poly), [sign_1] "=&r" (sign_1),
+ [sign_3] "=&r" (sign_3), [sign_2] "=&r" (sign_2),
+ [sum64_hi] "+r" (sum64_hi), [sum64_lo] "+r" (sum64_lo)
+ : [tmp_corr_c] "r" (tmp_corr_c), [shift_internal] "r" (shift_internal)
+ : "hi", "lo", "memory"
+ );
+
+ if (sign_3 != 0) {
+ __asm __volatile (
+ ".set push \n\t"
+ ".set noreorder \n\t"
+ "addsc %[sum64_lo], %[sum64_lo], %[tmp3] \n\t"
+ "addwc %[sum64_hi], %[sum64_hi], %[tmp2] \n\t"
+ ".set pop \n\t"
+ : [sum64_hi] "+r" (sum64_hi), [sum64_lo] "+r" (sum64_lo)
+ : [tmp2] "r" (tmp2), [tmp3] "r" (tmp3)
+ : "hi", "lo", "memory"
+ );
+ } else {
+ if (((!(sign_1 || sign_2)) && (0x7FFFFFFF - sum64_hi < tmp2)) ||
+ ((sign_1 && sign_2) && (sum64_hi + tmp2 > 0))) {
+ // Shift right for overflow.
+ __asm __volatile (
+ ".set push \n\t"
+ ".set noreorder \n\t"
+ "addiu %[shift_internal], %[shift_internal], 1 \n\t"
+ "prepend %[sum64_lo], %[sum64_hi], 1 \n\t"
+ "sra %[sum64_hi], %[sum64_hi], 1 \n\t"
+ "prepend %[tmp3], %[tmp2], 1 \n\t"
+ "sra %[tmp2], %[tmp2], 1 \n\t"
+ "addsc %[sum64_lo], %[sum64_lo], %[tmp3] \n\t"
+ "addwc %[sum64_hi], %[sum64_hi], %[tmp2] \n\t"
+ ".set pop \n\t"
+ : [tmp2] "+r" (tmp2), [tmp3] "+r" (tmp3),
+ [shift_internal] "+r" (shift_internal),
+ [sum64_hi] "+r" (sum64_hi), [sum64_lo] "+r" (sum64_lo)
+ :
+ : "hi", "lo", "memory"
+ );
+ } else {
+ __asm __volatile (
+ ".set push \n\t"
+ ".set noreorder \n\t"
+ "addsc %[sum64_lo], %[sum64_lo], %[tmp3] \n\t"
+ "addwc %[sum64_hi], %[sum64_hi], %[tmp2] \n\t"
+ ".set pop \n\t"
+ : [sum64_hi] "+r" (sum64_hi), [sum64_lo] "+r" (sum64_lo)
+ : [tmp2] "r" (tmp2), [tmp3] "r" (tmp3)
+ : "hi", "lo", "memory"
+ );
+ }
+ }
+ }
+
+ for (i = 1; i <= lpc_order; i++) {
+ tmp_corr_c = corr_coeffs[i];
+ int16_t* tmp_a_poly_j = &a_polynomial[i];
+ int16_t* tmp_a_poly_j_i = &a_polynomial[0];
+ for (j = i; j <= lpc_order; j++) {
+ // For the case of i = 1 .. lpc_order:
+ // residual_energy +=
+ // a_polynomial[j] * corr_coeffs[i] * a_polynomial[j - i] * 2;
+
+ int32_t tmp2, tmp3;
+ int16_t sign_1;
+ int16_t sign_2;
+ int16_t sign_3;
+
+ __asm __volatile (
+ ".set push \n\t"
+ ".set noreorder \n\t"
+ "lh %[tmp3], 0(%[tmp_a_poly_j]) \n\t"
+ "lh %[tmp2], 0(%[tmp_a_poly_j_i]) \n\t"
+ "addiu %[tmp_a_poly_j], %[tmp_a_poly_j], 2 \n\t"
+ "addiu %[tmp_a_poly_j_i], %[tmp_a_poly_j_i], 2 \n\t"
+ "mul %[tmp32], %[tmp3], %[tmp2] \n\t"
+ "sll %[tmp32], %[tmp32], 1 \n\t"
+ "mult $ac0, %[tmp32], %[tmp_corr_c] \n\t"
+ "shilov $ac0, %[shift_internal] \n\t"
+ "mfhi %[tmp2], $ac0 \n\t"
+ "mflo %[tmp3], $ac0 \n\t"
+ "sra %[sign_1], %[tmp2], 31 \n\t"
+ "sra %[sign_2], %[sum64_hi], 31 \n\t"
+ "xor %[sign_3], %[sign_1], %[sign_2] \n\t"
+ ".set pop \n\t"
+ : [tmp2] "=&r" (tmp2), [tmp3] "=&r" (tmp3), [tmp32] "=&r" (tmp32),
+ [tmp_a_poly_j] "+r" (tmp_a_poly_j), [sign_1] "=&r" (sign_1),
+ [tmp_a_poly_j_i] "+r" (tmp_a_poly_j_i), [sign_2] "=&r" (sign_2),
+ [sign_3] "=&r" (sign_3), [sum64_hi] "+r" (sum64_hi),
+ [sum64_lo] "+r" (sum64_lo)
+ : [tmp_corr_c] "r" (tmp_corr_c), [shift_internal] "r" (shift_internal)
+ : "hi", "lo", "memory"
+ );
+ if (sign_3 != 0) {
+ __asm __volatile (
+ ".set push \n\t"
+ ".set noreorder \n\t"
+ "addsc %[sum64_lo], %[sum64_lo], %[tmp3] \n\t"
+ "addwc %[sum64_hi], %[sum64_hi], %[tmp2] \n\t"
+ ".set pop \n\t"
+ : [tmp2] "+r" (tmp2), [tmp3] "+r" (tmp3), [sum64_hi] "+r" (sum64_hi),
+ [sum64_lo] "+r" (sum64_lo)
+ :
+ :"memory"
+ );
+ } else {
+ // Test overflow and sum the result.
+ if (((!(sign_1 || sign_2)) && (0x7FFFFFFF - sum64_hi < tmp2)) ||
+ ((sign_1 && sign_2) && (sum64_hi + tmp2 > 0))) {
+ // Shift right for overflow.
+ __asm __volatile (
+ ".set push \n\t"
+ ".set noreorder \n\t"
+ "addiu %[shift_internal], %[shift_internal], 1 \n\t"
+ "prepend %[sum64_lo], %[sum64_hi], 1 \n\t"
+ "sra %[sum64_hi], %[sum64_hi], 1 \n\t"
+ "prepend %[tmp3], %[tmp2], 1 \n\t"
+ "sra %[tmp2], %[tmp2], 1 \n\t"
+ "addsc %[sum64_lo], %[sum64_lo], %[tmp3] \n\t"
+ "addwc %[sum64_hi], %[sum64_hi], %[tmp2] \n\t"
+ ".set pop \n\t"
+ : [tmp2] "+r" (tmp2), [tmp3] "+r" (tmp3),
+ [shift_internal] "+r" (shift_internal),
+ [sum64_hi] "+r" (sum64_hi), [sum64_lo] "+r" (sum64_lo)
+ :
+ : "hi", "lo", "memory"
+ );
+ } else {
+ __asm __volatile (
+ ".set push \n\t"
+ ".set noreorder \n\t"
+ "addsc %[sum64_lo], %[sum64_lo], %[tmp3] \n\t"
+ "addwc %[sum64_hi], %[sum64_hi], %[tmp2] \n\t"
+ ".set pop \n\t"
+ : [tmp2] "+r" (tmp2), [tmp3] "+r" (tmp3),
+ [sum64_hi] "+r" (sum64_hi), [sum64_lo] "+r" (sum64_lo)
+ :
+ : "hi", "lo", "memory"
+ );
+ }
+ }
+ }
+ }
+ word32_high = sum64_hi;
+ word32_low = sum64_lo;
+
+ // Calculate the value of shifting (shift_norm) for the 64-bit sum.
+ if (word32_high != 0) {
+ shift_norm = 32 - WebRtcSpl_NormW32(word32_high);
+ int tmp1;
+ __asm __volatile (
+ ".set push \n\t"
+ ".set noreorder \n\t"
+ "srl %[residual_energy], %[sum64_lo], %[shift_norm] \n\t"
+ "li %[tmp1], 32 \n\t"
+ "subu %[tmp1], %[tmp1], %[shift_norm] \n\t"
+ "sll %[tmp1], %[sum64_hi], %[tmp1] \n\t"
+ "or %[residual_energy], %[residual_energy], %[tmp1] \n\t"
+ ".set pop \n\t"
+ : [residual_energy] "=&r" (residual_energy), [tmp1]"=&r"(tmp1),
+ [sum64_hi] "+r" (sum64_hi), [sum64_lo] "+r" (sum64_lo)
+ : [shift_norm] "r" (shift_norm)
+ : "memory"
+ );
+ } else {
+ if ((word32_low & 0x80000000) != 0) {
+ shift_norm = 1;
+ residual_energy = (uint32_t)word32_low >> 1;
+ } else {
+ shift_norm = WebRtcSpl_NormW32(word32_low);
+ residual_energy = word32_low << shift_norm;
+ shift_norm = -shift_norm;
+ }
+ }
+
+ // Q(q_val_polynomial * 2) * Q(q_val_corr) >> shift_internal >> shift_norm
+ // = Q(q_val_corr - shift_internal - shift_norm + q_val_polynomial * 2)
+ *q_val_residual_energy =
+ q_val_corr - shift_internal - shift_norm + q_val_polynomial * 2;
+
+ return residual_energy;
+}
diff --git a/modules/audio_coding/codecs/isac/fix/source/pitch_estimator.c b/modules/audio_coding/codecs/isac/fix/source/pitch_estimator.c
index 9c4e5875..426b2cf4 100644
--- a/modules/audio_coding/codecs/isac/fix/source/pitch_estimator.c
+++ b/modules/audio_coding/codecs/isac/fix/source/pitch_estimator.c
@@ -29,7 +29,7 @@ static const int16_t kACoefQ12[3] = {
-static __inline int32_t Log2Q8( uint32_t x ) {
+__inline int32_t WebRtcIsacfix_Log2Q8( uint32_t x ) {
int32_t zeros, lg2;
int16_t frac;
@@ -153,109 +153,7 @@ static void FindFour32(int32_t *in, int16_t length, int16_t *bestind)
-static void PCorr2Q32(const int16_t *in, int32_t *logcorQ8)
-{
- int16_t scaling,n,k;
- int32_t ysum32,csum32, lys, lcs;
- int32_t oneQ8;
-
-
- const int16_t *x, *inptr;
-
- oneQ8 = WEBRTC_SPL_LSHIFT_W32((int32_t)1, 8); // 1.00 in Q8
-
- x = in + PITCH_MAX_LAG/2 + 2;
- scaling = WebRtcSpl_GetScalingSquare ((int16_t *) in, PITCH_CORR_LEN2, PITCH_CORR_LEN2);
- ysum32 = 1;
- csum32 = 0;
- x = in + PITCH_MAX_LAG/2 + 2;
- for (n = 0; n < PITCH_CORR_LEN2; n++) {
- ysum32 += WEBRTC_SPL_MUL_16_16_RSFT( (int16_t) in[n],(int16_t) in[n], scaling); // Q0
- csum32 += WEBRTC_SPL_MUL_16_16_RSFT((int16_t) x[n],(int16_t) in[n], scaling); // Q0
- }
-
- logcorQ8 += PITCH_LAG_SPAN2 - 1;
-
- lys=Log2Q8((uint32_t) ysum32); // Q8
- lys=WEBRTC_SPL_RSHIFT_W32(lys, 1); //sqrt(ysum);
-
- if (csum32>0) {
-
- lcs=Log2Q8((uint32_t) csum32); // 2log(csum) in Q8
-
- if (lcs>(lys + oneQ8) ){ // csum/sqrt(ysum) > 2 in Q8
- *logcorQ8 = lcs - lys; // log2(csum/sqrt(ysum))
- } else {
- *logcorQ8 = oneQ8; // 1.00
- }
-
- } else {
- *logcorQ8 = 0;
- }
-
-
- for (k = 1; k < PITCH_LAG_SPAN2; k++) {
- inptr = &in[k];
- ysum32 -= WEBRTC_SPL_MUL_16_16_RSFT( (int16_t) in[k-1],(int16_t) in[k-1], scaling);
- ysum32 += WEBRTC_SPL_MUL_16_16_RSFT( (int16_t) in[PITCH_CORR_LEN2 + k - 1],(int16_t) in[PITCH_CORR_LEN2 + k - 1], scaling);
-
-#ifdef WEBRTC_ARCH_ARM_NEON
- {
- int32_t vbuff[4];
- int32x4_t int_32x4_sum = vmovq_n_s32(0);
- // Can't shift a Neon register to right with a non-constant shift value.
- int32x4_t int_32x4_scale = vdupq_n_s32(-scaling);
- // Assert a codition used in loop unrolling at compile-time.
- COMPILE_ASSERT(PITCH_CORR_LEN2 %4 == 0);
-
- for (n = 0; n < PITCH_CORR_LEN2; n += 4) {
- int16x4_t int_16x4_x = vld1_s16(&x[n]);
- int16x4_t int_16x4_in = vld1_s16(&inptr[n]);
- int32x4_t int_32x4 = vmull_s16(int_16x4_x, int_16x4_in);
- int_32x4 = vshlq_s32(int_32x4, int_32x4_scale);
- int_32x4_sum = vaddq_s32(int_32x4_sum, int_32x4);
- }
-
- // Use vector store to avoid long stall from data trasferring
- // from vector to general register.
- vst1q_s32(vbuff, int_32x4_sum);
- csum32 = vbuff[0] + vbuff[1];
- csum32 += vbuff[2];
- csum32 += vbuff[3];
- }
-#else
- csum32 = 0;
- if(scaling == 0) {
- for (n = 0; n < PITCH_CORR_LEN2; n++) {
- csum32 += x[n] * inptr[n];
- }
- } else {
- for (n = 0; n < PITCH_CORR_LEN2; n++) {
- csum32 += (x[n] * inptr[n]) >> scaling;
- }
- }
-#endif
-
- logcorQ8--;
-
- lys=Log2Q8((uint32_t)ysum32); // Q8
- lys=WEBRTC_SPL_RSHIFT_W32(lys, 1); //sqrt(ysum);
-
- if (csum32>0) {
-
- lcs=Log2Q8((uint32_t) csum32); // 2log(csum) in Q8
-
- if (lcs>(lys + oneQ8) ){ // csum/sqrt(ysum) > 2
- *logcorQ8 = lcs - lys; // log2(csum/sqrt(ysum))
- } else {
- *logcorQ8 = oneQ8; // 1.00
- }
-
- } else {
- *logcorQ8 = 0;
- }
- }
-}
+extern void WebRtcIsacfix_PCorr2Q32(const int16_t *in, int32_t *logcorQ8);
@@ -311,12 +209,13 @@ void WebRtcIsacfix_InitialPitch(const int16_t *in, /* Q0 */
/* compute correlation for first and second half of the frame */
- PCorr2Q32(buf_dec16, crrvecQ8_1);
- PCorr2Q32(buf_dec16 + PITCH_CORR_STEP2, crrvecQ8_2);
+ WebRtcIsacfix_PCorr2Q32(buf_dec16, crrvecQ8_1);
+ WebRtcIsacfix_PCorr2Q32(buf_dec16 + PITCH_CORR_STEP2, crrvecQ8_2);
/* bias towards pitch lag of previous frame */
- tmp32a = Log2Q8((uint32_t) old_lagQ8) - 2304; // log2(0.5*oldlag) in Q8
+ tmp32a = WebRtcIsacfix_Log2Q8((uint32_t) old_lagQ8) - 2304;
+ // log2(0.5*oldlag) in Q8
tmp32b = WEBRTC_SPL_MUL_16_16_RSFT(oldgQ12,oldgQ12, 10); //Q12 & * 4.0;
gain_bias16 = (int16_t) tmp32b; //Q12
if (gain_bias16 > 3276) gain_bias16 = 3276; // 0.8 in Q12
@@ -325,7 +224,7 @@ void WebRtcIsacfix_InitialPitch(const int16_t *in, /* Q0 */
for (k = 0; k < PITCH_LAG_SPAN2; k++)
{
if (crrvecQ8_1[k]>0) {
- tmp32b = Log2Q8((uint32_t) (k + (PITCH_MIN_LAG/2-2)));
+ tmp32b = WebRtcIsacfix_Log2Q8((uint32_t) (k + (PITCH_MIN_LAG/2-2)));
tmp16a = (int16_t) (tmp32b - tmp32a); // Q8 & fabs(ratio)<4
tmp32c = WEBRTC_SPL_MUL_16_16_RSFT(tmp16a,tmp16a, 6); //Q10
tmp16b = (int16_t) tmp32c; // Q10 & <8
@@ -334,7 +233,8 @@ void WebRtcIsacfix_InitialPitch(const int16_t *in, /* Q0 */
tmp16d = Exp2Q10((int16_t) -tmp16c); //Q10
tmp32c = WEBRTC_SPL_MUL_16_16_RSFT(gain_bias16,tmp16d,13); // Q10 & * 0.5
bias16 = (int16_t) (1024 + tmp32c); // Q10
- tmp32b = Log2Q8((uint32_t) bias16) - 2560; // Q10 in -> Q8 out with 10*2^8 offset
+ tmp32b = WebRtcIsacfix_Log2Q8((uint32_t)bias16) - 2560;
+ // Q10 in -> Q8 out with 10*2^8 offset
crrvecQ8_1[k] += tmp32b ; // -10*2^8 offset
}
}
@@ -407,7 +307,7 @@ void WebRtcIsacfix_InitialPitch(const int16_t *in, /* Q0 */
xq[0] = WEBRTC_SPL_LSHIFT_W32(xq[0], 8);
Intrp1DQ8(xq, fxq, yq, fyq);
- tmp32a= Log2Q8((uint32_t) *yq) - 2048; // offset 8*2^8
+ tmp32a= WebRtcIsacfix_Log2Q8((uint32_t) *yq) - 2048; // offset 8*2^8
/* Bias towards short lags */
/* log(pow(0.8, log(2.0 * *y )))/log(2.0) */
tmp32b= WEBRTC_SPL_MUL_16_16_RSFT((int16_t) tmp32a, -42, 8);
@@ -437,10 +337,13 @@ void WebRtcIsacfix_InitialPitch(const int16_t *in, /* Q0 */
tmp32b = (int32_t) (WEBRTC_SPL_LSHIFT_W32(tmp32a, 1)) - ratq; // Q8
tmp32c = WEBRTC_SPL_MUL_16_16_RSFT((int16_t) tmp32b, (int16_t) tmp32b, 8); // Q8
- tmp32b = (int32_t) tmp32c + (int32_t) WEBRTC_SPL_RSHIFT_W32(ratq, 1); // (k-r)^2 + 0.5 * r Q8
- tmp32c = Log2Q8((uint32_t) tmp32a) - 2048; // offset 8*2^8 , log2(0.5*k) Q8
- tmp32d = Log2Q8((uint32_t) tmp32b) - 2048; // offset 8*2^8 , log2(0.5*k) Q8
- tmp32e = tmp32c -tmp32d;
+ tmp32b = (int32_t)tmp32c + (int32_t)WEBRTC_SPL_RSHIFT_W32(ratq, 1);
+ // (k-r)^2 + 0.5 * r Q8
+ tmp32c = WebRtcIsacfix_Log2Q8((uint32_t)tmp32a) - 2048;
+ // offset 8*2^8 , log2(0.5*k) Q8
+ tmp32d = WebRtcIsacfix_Log2Q8((uint32_t)tmp32b) - 2048;
+ // offset 8*2^8 , log2(0.5*k) Q8
+ tmp32e = tmp32c - tmp32d;
cv2q[k] += WEBRTC_SPL_RSHIFT_W32(tmp32e, 1);
@@ -481,7 +384,7 @@ void WebRtcIsacfix_InitialPitch(const int16_t *in, /* Q0 */
/* Bias towards short lags */
/* log(pow(0.8, log(2.0f * *y )))/log(2.0f) */
- tmp32a= Log2Q8((uint32_t) *yq) - 2048; // offset 8*2^8
+ tmp32a= WebRtcIsacfix_Log2Q8((uint32_t) *yq) - 2048; // offset 8*2^8
tmp32b= WEBRTC_SPL_MUL_16_16_RSFT((int16_t) tmp32a, -82, 8);
tmp32c= tmp32b + 256;
*fyq += tmp32c;
diff --git a/modules/audio_coding/codecs/isac/fix/source/pitch_estimator.h b/modules/audio_coding/codecs/isac/fix/source/pitch_estimator.h
index 93c81c8e..da401e5f 100644
--- a/modules/audio_coding/codecs/isac/fix/source/pitch_estimator.h
+++ b/modules/audio_coding/codecs/isac/fix/source/pitch_estimator.h
@@ -58,4 +58,8 @@ void WebRtcIsacfix_DecimateAllpass32(const int16_t *in,
int16_t N, /* number of input samples */
int16_t *out); /* array of size N/2 */
+int32_t WebRtcIsacfix_Log2Q8( uint32_t x );
+
+void WebRtcIsacfix_PCorr2Q32(const int16_t* in, int32_t* logcorQ8);
+
#endif /* WEBRTC_MODULES_AUDIO_CODING_CODECS_ISAC_FIX_SOURCE_PITCH_ESTIMATOR_H_ */
diff --git a/modules/audio_coding/codecs/isac/fix/source/pitch_estimator_c.c b/modules/audio_coding/codecs/isac/fix/source/pitch_estimator_c.c
new file mode 100644
index 00000000..82155d27
--- /dev/null
+++ b/modules/audio_coding/codecs/isac/fix/source/pitch_estimator_c.c
@@ -0,0 +1,122 @@
+/*
+ * Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "webrtc/modules/audio_coding/codecs/isac/fix/source/pitch_estimator.h"
+
+#ifdef WEBRTC_ARCH_ARM_NEON
+#include <arm_neon.h>
+#endif
+
+#include "webrtc/common_audio/signal_processing/include/signal_processing_library.h"
+#include "webrtc/system_wrappers/interface/compile_assert_c.h"
+
+extern int32_t WebRtcIsacfix_Log2Q8(uint32_t x);
+
+void WebRtcIsacfix_PCorr2Q32(const int16_t* in, int32_t* logcorQ8) {
+ int16_t scaling,n,k;
+ int32_t ysum32,csum32, lys, lcs;
+ int32_t oneQ8;
+ const int16_t* x;
+ const int16_t* inptr;
+
+ oneQ8 = WEBRTC_SPL_LSHIFT_W32((int32_t)1, 8); // 1.00 in Q8
+
+ x = in + PITCH_MAX_LAG / 2 + 2;
+ scaling = WebRtcSpl_GetScalingSquare((int16_t*)in,
+ PITCH_CORR_LEN2,
+ PITCH_CORR_LEN2);
+ ysum32 = 1;
+ csum32 = 0;
+ x = in + PITCH_MAX_LAG / 2 + 2;
+ for (n = 0; n < PITCH_CORR_LEN2; n++) {
+ ysum32 += WEBRTC_SPL_MUL_16_16_RSFT((int16_t)in[n],
+ (int16_t)in[n],
+ scaling); // Q0
+ csum32 += WEBRTC_SPL_MUL_16_16_RSFT((int16_t)x[n],
+ (int16_t)in[n],
+ scaling); // Q0
+ }
+ logcorQ8 += PITCH_LAG_SPAN2 - 1;
+ lys = WebRtcIsacfix_Log2Q8((uint32_t)ysum32); // Q8
+ lys = WEBRTC_SPL_RSHIFT_W32(lys, 1); //sqrt(ysum);
+ if (csum32 > 0) {
+ lcs = WebRtcIsacfix_Log2Q8((uint32_t)csum32); // 2log(csum) in Q8
+ if (lcs > (lys + oneQ8)) { // csum/sqrt(ysum) > 2 in Q8
+ *logcorQ8 = lcs - lys; // log2(csum/sqrt(ysum))
+ } else {
+ *logcorQ8 = oneQ8; // 1.00
+ }
+ } else {
+ *logcorQ8 = 0;
+ }
+
+
+ for (k = 1; k < PITCH_LAG_SPAN2; k++) {
+ inptr = &in[k];
+ ysum32 -= WEBRTC_SPL_MUL_16_16_RSFT((int16_t)in[k - 1],
+ (int16_t)in[k - 1],
+ scaling);
+ ysum32 += WEBRTC_SPL_MUL_16_16_RSFT((int16_t)in[PITCH_CORR_LEN2 + k - 1],
+ (int16_t)in[PITCH_CORR_LEN2 + k - 1],
+ scaling);
+#ifdef WEBRTC_ARCH_ARM_NEON
+ {
+ int32_t vbuff[4];
+ int32x4_t int_32x4_sum = vmovq_n_s32(0);
+ // Can't shift a Neon register to right with a non-constant shift value.
+ int32x4_t int_32x4_scale = vdupq_n_s32(-scaling);
+ // Assert a codition used in loop unrolling at compile-time.
+ COMPILE_ASSERT(PITCH_CORR_LEN2 %4 == 0);
+
+ for (n = 0; n < PITCH_CORR_LEN2; n += 4) {
+ int16x4_t int_16x4_x = vld1_s16(&x[n]);
+ int16x4_t int_16x4_in = vld1_s16(&inptr[n]);
+ int32x4_t int_32x4 = vmull_s16(int_16x4_x, int_16x4_in);
+ int_32x4 = vshlq_s32(int_32x4, int_32x4_scale);
+ int_32x4_sum = vaddq_s32(int_32x4_sum, int_32x4);
+ }
+
+ // Use vector store to avoid long stall from data trasferring
+ // from vector to general register.
+ vst1q_s32(vbuff, int_32x4_sum);
+ csum32 = vbuff[0] + vbuff[1];
+ csum32 += vbuff[2];
+ csum32 += vbuff[3];
+ }
+#else
+ csum32 = 0;
+ if(scaling == 0) {
+ for (n = 0; n < PITCH_CORR_LEN2; n++) {
+ csum32 += x[n] * inptr[n];
+ }
+ } else {
+ for (n = 0; n < PITCH_CORR_LEN2; n++) {
+ csum32 += (x[n] * inptr[n]) >> scaling;
+ }
+ }
+#endif
+
+ logcorQ8--;
+
+ lys = WebRtcIsacfix_Log2Q8((uint32_t)ysum32); // Q8
+ lys = WEBRTC_SPL_RSHIFT_W32(lys, 1); //sqrt(ysum);
+
+ if (csum32 > 0) {
+ lcs = WebRtcIsacfix_Log2Q8((uint32_t)csum32); // 2log(csum) in Q8
+ if (lcs > (lys + oneQ8)) { // csum/sqrt(ysum) > 2
+ *logcorQ8 = lcs - lys; // log2(csum/sqrt(ysum))
+ } else {
+ *logcorQ8 = oneQ8; // 1.00
+ }
+ } else {
+ *logcorQ8 = 0;
+ }
+ }
+}
diff --git a/modules/audio_coding/codecs/isac/fix/source/pitch_estimator_mips.c b/modules/audio_coding/codecs/isac/fix/source/pitch_estimator_mips.c
new file mode 100644
index 00000000..fa426e98
--- /dev/null
+++ b/modules/audio_coding/codecs/isac/fix/source/pitch_estimator_mips.c
@@ -0,0 +1,196 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "webrtc/modules/audio_coding/codecs/isac/fix/source/pitch_estimator.h"
+#include "webrtc/common_audio/signal_processing/include/signal_processing_library.h"
+#include "webrtc/system_wrappers/interface/compile_assert_c.h"
+
+extern int32_t WebRtcIsacfix_Log2Q8(uint32_t x);
+
+void WebRtcIsacfix_PCorr2Q32(const int16_t* in, int32_t* logcorQ8) {
+ int16_t scaling,n,k;
+ int32_t ysum32,csum32, lys, lcs;
+ int32_t oneQ8;
+ const int16_t* x;
+ const int16_t* inptr;
+
+ oneQ8 = WEBRTC_SPL_LSHIFT_W32((int32_t)1, 8); // 1.00 in Q8
+ x = in + PITCH_MAX_LAG / 2 + 2;
+ scaling = WebRtcSpl_GetScalingSquare((int16_t*)in,
+ PITCH_CORR_LEN2,
+ PITCH_CORR_LEN2);
+ ysum32 = 1;
+ csum32 = 0;
+ x = in + PITCH_MAX_LAG / 2 + 2;
+ {
+ const int16_t* tmp_x = x;
+ const int16_t* tmp_in = in;
+ int32_t tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7, tmp8;
+ n = PITCH_CORR_LEN2;
+ COMPILE_ASSERT(PITCH_CORR_LEN2 % 4 == 0);
+ __asm __volatile (
+ ".set push \n\t"
+ ".set noreorder \n\t"
+ "1: \n\t"
+ "lh %[tmp1], 0(%[tmp_in]) \n\t"
+ "lh %[tmp2], 2(%[tmp_in]) \n\t"
+ "lh %[tmp3], 4(%[tmp_in]) \n\t"
+ "lh %[tmp4], 6(%[tmp_in]) \n\t"
+ "lh %[tmp5], 0(%[tmp_x]) \n\t"
+ "lh %[tmp6], 2(%[tmp_x]) \n\t"
+ "lh %[tmp7], 4(%[tmp_x]) \n\t"
+ "lh %[tmp8], 6(%[tmp_x]) \n\t"
+ "mul %[tmp5], %[tmp1], %[tmp5] \n\t"
+ "mul %[tmp1], %[tmp1], %[tmp1] \n\t"
+ "mul %[tmp6], %[tmp2], %[tmp6] \n\t"
+ "mul %[tmp2], %[tmp2], %[tmp2] \n\t"
+ "mul %[tmp7], %[tmp3], %[tmp7] \n\t"
+ "mul %[tmp3], %[tmp3], %[tmp3] \n\t"
+ "mul %[tmp8], %[tmp4], %[tmp8] \n\t"
+ "mul %[tmp4], %[tmp4], %[tmp4] \n\t"
+ "addiu %[n], %[n], -4 \n\t"
+ "srav %[tmp5], %[tmp5], %[scaling] \n\t"
+ "srav %[tmp1], %[tmp1], %[scaling] \n\t"
+ "srav %[tmp6], %[tmp6], %[scaling] \n\t"
+ "srav %[tmp2], %[tmp2], %[scaling] \n\t"
+ "srav %[tmp7], %[tmp7], %[scaling] \n\t"
+ "srav %[tmp3], %[tmp3], %[scaling] \n\t"
+ "srav %[tmp8], %[tmp8], %[scaling] \n\t"
+ "srav %[tmp4], %[tmp4], %[scaling] \n\t"
+ "addu %[ysum32], %[ysum32], %[tmp1] \n\t"
+ "addu %[csum32], %[csum32], %[tmp5] \n\t"
+ "addu %[ysum32], %[ysum32], %[tmp2] \n\t"
+ "addu %[csum32], %[csum32], %[tmp6] \n\t"
+ "addu %[ysum32], %[ysum32], %[tmp3] \n\t"
+ "addu %[csum32], %[csum32], %[tmp7] \n\t"
+ "addu %[ysum32], %[ysum32], %[tmp4] \n\t"
+ "addu %[csum32], %[csum32], %[tmp8] \n\t"
+ "addiu %[tmp_in], %[tmp_in], 8 \n\t"
+ "bgtz %[n], 1b \n\t"
+ " addiu %[tmp_x], %[tmp_x], 8 \n\t"
+ ".set pop \n\t"
+ : [tmp1] "=&r" (tmp1), [tmp2] "=&r" (tmp2), [tmp3] "=&r" (tmp3),
+ [tmp4] "=&r" (tmp4), [tmp5] "=&r" (tmp5), [tmp6] "=&r" (tmp6),
+ [tmp7] "=&r" (tmp7), [tmp8] "=&r" (tmp8), [tmp_in] "+r" (tmp_in),
+ [ysum32] "+r" (ysum32), [tmp_x] "+r" (tmp_x), [csum32] "+r" (csum32),
+ [n] "+r" (n)
+ : [scaling] "r" (scaling)
+ : "memory", "hi", "lo"
+ );
+ }
+ logcorQ8 += PITCH_LAG_SPAN2 - 1;
+ lys = WebRtcIsacfix_Log2Q8((uint32_t)ysum32); // Q8
+ lys = WEBRTC_SPL_RSHIFT_W32(lys, 1); //sqrt(ysum);
+ if (csum32 > 0) {
+ lcs = WebRtcIsacfix_Log2Q8((uint32_t)csum32); // 2log(csum) in Q8
+ if (lcs > (lys + oneQ8)) { // csum/sqrt(ysum) > 2 in Q8
+ *logcorQ8 = lcs - lys; // log2(csum/sqrt(ysum))
+ } else {
+ *logcorQ8 = oneQ8; // 1.00
+ }
+ } else {
+ *logcorQ8 = 0;
+ }
+
+ for (k = 1; k < PITCH_LAG_SPAN2; k++) {
+ inptr = &in[k];
+ const int16_t* tmp_in1 = &in[k - 1];
+ const int16_t* tmp_in2 = &in[PITCH_CORR_LEN2 + k - 1];
+ const int16_t* tmp_x = x;
+ int32_t tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7, tmp8;
+ n = PITCH_CORR_LEN2;
+ csum32 = 0;
+ __asm __volatile (
+ ".set push \n\t"
+ ".set noreorder \n\t"
+ "lh %[tmp1], 0(%[tmp_in1]) \n\t"
+ "lh %[tmp2], 0(%[tmp_in2]) \n\t"
+ "mul %[tmp1], %[tmp1], %[tmp1] \n\t"
+ "mul %[tmp2], %[tmp2], %[tmp2] \n\t"
+ "srav %[tmp1], %[tmp1], %[scaling] \n\t"
+ "srav %[tmp2], %[tmp2], %[scaling] \n\t"
+ "subu %[ysum32], %[ysum32], %[tmp1] \n\t"
+ "bnez %[scaling], 2f \n\t"
+ " addu %[ysum32], %[ysum32], %[tmp2] \n\t"
+ "1: \n\t"
+ "lh %[tmp1], 0(%[inptr]) \n\t"
+ "lh %[tmp2], 0(%[tmp_x]) \n\t"
+ "lh %[tmp3], 2(%[inptr]) \n\t"
+ "lh %[tmp4], 2(%[tmp_x]) \n\t"
+ "lh %[tmp5], 4(%[inptr]) \n\t"
+ "lh %[tmp6], 4(%[tmp_x]) \n\t"
+ "lh %[tmp7], 6(%[inptr]) \n\t"
+ "lh %[tmp8], 6(%[tmp_x]) \n\t"
+ "mul %[tmp1], %[tmp1], %[tmp2] \n\t"
+ "mul %[tmp2], %[tmp3], %[tmp4] \n\t"
+ "mul %[tmp3], %[tmp5], %[tmp6] \n\t"
+ "mul %[tmp4], %[tmp7], %[tmp8] \n\t"
+ "addiu %[n], %[n], -4 \n\t"
+ "addiu %[inptr], %[inptr], 8 \n\t"
+ "addiu %[tmp_x], %[tmp_x], 8 \n\t"
+ "addu %[csum32], %[csum32], %[tmp1] \n\t"
+ "addu %[csum32], %[csum32], %[tmp2] \n\t"
+ "addu %[csum32], %[csum32], %[tmp3] \n\t"
+ "bgtz %[n], 1b \n\t"
+ " addu %[csum32], %[csum32], %[tmp4] \n\t"
+ "b 3f \n\t"
+ " nop \n\t"
+ "2: \n\t"
+ "lh %[tmp1], 0(%[inptr]) \n\t"
+ "lh %[tmp2], 0(%[tmp_x]) \n\t"
+ "lh %[tmp3], 2(%[inptr]) \n\t"
+ "lh %[tmp4], 2(%[tmp_x]) \n\t"
+ "lh %[tmp5], 4(%[inptr]) \n\t"
+ "lh %[tmp6], 4(%[tmp_x]) \n\t"
+ "lh %[tmp7], 6(%[inptr]) \n\t"
+ "lh %[tmp8], 6(%[tmp_x]) \n\t"
+ "mul %[tmp1], %[tmp1], %[tmp2] \n\t"
+ "mul %[tmp2], %[tmp3], %[tmp4] \n\t"
+ "mul %[tmp3], %[tmp5], %[tmp6] \n\t"
+ "mul %[tmp4], %[tmp7], %[tmp8] \n\t"
+ "addiu %[n], %[n], -4 \n\t"
+ "addiu %[inptr], %[inptr], 8 \n\t"
+ "addiu %[tmp_x], %[tmp_x], 8 \n\t"
+ "srav %[tmp1], %[tmp1], %[scaling] \n\t"
+ "srav %[tmp2], %[tmp2], %[scaling] \n\t"
+ "srav %[tmp3], %[tmp3], %[scaling] \n\t"
+ "srav %[tmp4], %[tmp4], %[scaling] \n\t"
+ "addu %[csum32], %[csum32], %[tmp1] \n\t"
+ "addu %[csum32], %[csum32], %[tmp2] \n\t"
+ "addu %[csum32], %[csum32], %[tmp3] \n\t"
+ "bgtz %[n], 2b \n\t"
+ " addu %[csum32], %[csum32], %[tmp4] \n\t"
+ "3: \n\t"
+ ".set pop \n\t"
+ : [tmp1] "=&r" (tmp1), [tmp2] "=&r" (tmp2), [tmp3] "=&r" (tmp3),
+ [tmp4] "=&r" (tmp4), [tmp5] "=&r" (tmp5), [tmp6] "=&r" (tmp6),
+ [tmp7] "=&r" (tmp7), [tmp8] "=&r" (tmp8), [inptr] "+r" (inptr),
+ [csum32] "+r" (csum32), [tmp_x] "+r" (tmp_x), [ysum32] "+r" (ysum32),
+ [n] "+r" (n)
+ : [tmp_in1] "r" (tmp_in1), [tmp_in2] "r" (tmp_in2),
+ [scaling] "r" (scaling)
+ : "memory", "hi", "lo"
+ );
+
+ logcorQ8--;
+ lys = WebRtcIsacfix_Log2Q8((uint32_t)ysum32); // Q8
+ lys = WEBRTC_SPL_RSHIFT_W32(lys, 1); //sqrt(ysum);
+ if (csum32 > 0) {
+ lcs = WebRtcIsacfix_Log2Q8((uint32_t)csum32); // 2log(csum) in Q8
+ if (lcs > (lys + oneQ8)) { // csum/sqrt(ysum) > 2
+ *logcorQ8 = lcs - lys; // log2(csum/sqrt(ysum))
+ } else {
+ *logcorQ8 = oneQ8; // 1.00
+ }
+ } else {
+ *logcorQ8 = 0;
+ }
+ }
+}
diff --git a/modules/audio_coding/codecs/isac/fix/source/transform.c b/modules/audio_coding/codecs/isac/fix/source/transform.c
index 67e513c7..24ccc821 100644
--- a/modules/audio_coding/codecs/isac/fix/source/transform.c
+++ b/modules/audio_coding/codecs/isac/fix/source/transform.c
@@ -19,89 +19,13 @@
#include "webrtc/modules/audio_coding/codecs/isac/fix/source/fft.h"
#include "webrtc/modules/audio_coding/codecs/isac/fix/source/settings.h"
-#if (defined WEBRTC_DETECT_ARM_NEON || defined WEBRTC_ARCH_ARM_NEON)
-/* Tables are defined in ARM assembly files. */
+/* Tables are defined in transform_tables.c file or ARM assembly files. */
/* Cosine table 1 in Q14 */
extern const int16_t WebRtcIsacfix_kCosTab1[FRAMESAMPLES/2];
/* Sine table 1 in Q14 */
extern const int16_t WebRtcIsacfix_kSinTab1[FRAMESAMPLES/2];
/* Sine table 2 in Q14 */
extern const int16_t WebRtcIsacfix_kSinTab2[FRAMESAMPLES/4];
-#else
-/* Cosine table 1 in Q14 */
-static const int16_t WebRtcIsacfix_kCosTab1[FRAMESAMPLES/2] = {
- 16384, 16383, 16378, 16371, 16362, 16349, 16333, 16315, 16294, 16270,
- 16244, 16214, 16182, 16147, 16110, 16069, 16026, 15980, 15931, 15880,
- 15826, 15769, 15709, 15647, 15582, 15515, 15444, 15371, 15296, 15218,
- 15137, 15053, 14968, 14879, 14788, 14694, 14598, 14500, 14399, 14295,
- 14189, 14081, 13970, 13856, 13741, 13623, 13502, 13380, 13255, 13128,
- 12998, 12867, 12733, 12597, 12458, 12318, 12176, 12031, 11885, 11736,
- 11585, 11433, 11278, 11121, 10963, 10803, 10641, 10477, 10311, 10143,
- 9974, 9803, 9630, 9456, 9280, 9102, 8923, 8743, 8561, 8377,
- 8192, 8006, 7818, 7629, 7438, 7246, 7053, 6859, 6664, 6467,
- 6270, 6071, 5872, 5671, 5469, 5266, 5063, 4859, 4653, 4447,
- 4240, 4033, 3825, 3616, 3406, 3196, 2986, 2775, 2563, 2351,
- 2139, 1926, 1713, 1499, 1285, 1072, 857, 643, 429, 214,
- 0, -214, -429, -643, -857, -1072, -1285, -1499, -1713, -1926,
- -2139, -2351, -2563, -2775, -2986, -3196, -3406, -3616, -3825, -4033,
- -4240, -4447, -4653, -4859, -5063, -5266, -5469, -5671, -5872, -6071,
- -6270, -6467, -6664, -6859, -7053, -7246, -7438, -7629, -7818, -8006,
- -8192, -8377, -8561, -8743, -8923, -9102, -9280, -9456, -9630, -9803,
- -9974, -10143, -10311, -10477, -10641, -10803, -10963, -11121, -11278, -11433,
- -11585, -11736, -11885, -12031, -12176, -12318, -12458, -12597, -12733,
- -12867, -12998, -13128, -13255, -13380, -13502, -13623, -13741, -13856,
- -13970, -14081, -14189, -14295, -14399, -14500, -14598, -14694, -14788,
- -14879, -14968, -15053, -15137, -15218, -15296, -15371, -15444, -15515,
- -15582, -15647, -15709, -15769, -15826, -15880, -15931, -15980, -16026,
- -16069, -16110, -16147, -16182, -16214, -16244, -16270, -16294, -16315,
- -16333, -16349, -16362, -16371, -16378, -16383
-};
-
-/* Sine table 1 in Q14 */
-static const int16_t WebRtcIsacfix_kSinTab1[FRAMESAMPLES/2] = {
- 0, 214, 429, 643, 857, 1072, 1285, 1499, 1713, 1926,
- 2139, 2351, 2563, 2775, 2986, 3196, 3406, 3616, 3825, 4033,
- 4240, 4447, 4653, 4859, 5063, 5266, 5469, 5671, 5872, 6071,
- 6270, 6467, 6664, 6859, 7053, 7246, 7438, 7629, 7818, 8006,
- 8192, 8377, 8561, 8743, 8923, 9102, 9280, 9456, 9630, 9803,
- 9974, 10143, 10311, 10477, 10641, 10803, 10963, 11121, 11278, 11433,
- 11585, 11736, 11885, 12031, 12176, 12318, 12458, 12597, 12733, 12867,
- 12998, 13128, 13255, 13380, 13502, 13623, 13741, 13856, 13970, 14081,
- 14189, 14295, 14399, 14500, 14598, 14694, 14788, 14879, 14968, 15053,
- 15137, 15218, 15296, 15371, 15444, 15515, 15582, 15647, 15709, 15769,
- 15826, 15880, 15931, 15980, 16026, 16069, 16110, 16147, 16182, 16214,
- 16244, 16270, 16294, 16315, 16333, 16349, 16362, 16371, 16378, 16383,
- 16384, 16383, 16378, 16371, 16362, 16349, 16333, 16315, 16294, 16270,
- 16244, 16214, 16182, 16147, 16110, 16069, 16026, 15980, 15931, 15880,
- 15826, 15769, 15709, 15647, 15582, 15515, 15444, 15371, 15296, 15218,
- 15137, 15053, 14968, 14879, 14788, 14694, 14598, 14500, 14399, 14295,
- 14189, 14081, 13970, 13856, 13741, 13623, 13502, 13380, 13255, 13128,
- 12998, 12867, 12733, 12597, 12458, 12318, 12176, 12031, 11885, 11736,
- 11585, 11433, 11278, 11121, 10963, 10803, 10641, 10477, 10311, 10143,
- 9974, 9803, 9630, 9456, 9280, 9102, 8923, 8743, 8561, 8377,
- 8192, 8006, 7818, 7629, 7438, 7246, 7053, 6859, 6664, 6467,
- 6270, 6071, 5872, 5671, 5469, 5266, 5063, 4859, 4653, 4447,
- 4240, 4033, 3825, 3616, 3406, 3196, 2986, 2775, 2563, 2351,
- 2139, 1926, 1713, 1499, 1285, 1072, 857, 643, 429, 214
-};
-
-
-/* Sine table 2 in Q14 */
-static const int16_t WebRtcIsacfix_kSinTab2[FRAMESAMPLES/4] = {
- 16384, -16381, 16375, -16367, 16356, -16342, 16325, -16305, 16283, -16257,
- 16229, -16199, 16165, -16129, 16090, -16048, 16003, -15956, 15906, -15853,
- 15798, -15739, 15679, -15615, 15549, -15480, 15408, -15334, 15257, -15178,
- 15095, -15011, 14924, -14834, 14741, -14647, 14549, -14449, 14347, -14242,
- 14135, -14025, 13913, -13799, 13682, -13563, 13441, -13318, 13192, -13063,
- 12933, -12800, 12665, -12528, 12389, -12247, 12104, -11958, 11810, -11661,
- 11509, -11356, 11200, -11042, 10883, -10722, 10559, -10394, 10227, -10059,
- 9889, -9717, 9543, -9368, 9191, -9013, 8833, -8652, 8469, -8285,
- 8099, -7912, 7723, -7534, 7342, -7150, 6957, -6762, 6566, -6369,
- 6171, -5971, 5771, -5570, 5368, -5165, 4961, -4756, 4550, -4344,
- 4137, -3929, 3720, -3511, 3301, -3091, 2880, -2669, 2457, -2245,
- 2032, -1819, 1606, -1392, 1179, -965, 750, -536, 322, -107
-};
-#endif // WEBRTC_DETECT_ARM_NEON || WEBRTC_ARCH_ARM_NEON
void WebRtcIsacfix_Time2SpecC(int16_t *inre1Q9,
int16_t *inre2Q9,
diff --git a/modules/audio_coding/codecs/isac/fix/source/transform_mips.c b/modules/audio_coding/codecs/isac/fix/source/transform_mips.c
new file mode 100644
index 00000000..bf95ee57
--- /dev/null
+++ b/modules/audio_coding/codecs/isac/fix/source/transform_mips.c
@@ -0,0 +1,1287 @@
+/*
+ * Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "webrtc/modules/audio_coding/codecs/isac/fix/source/codec.h"
+#include "webrtc/modules/audio_coding/codecs/isac/fix/source/fft.h"
+#include "webrtc/modules/audio_coding/codecs/isac/fix/source/settings.h"
+
+// The tables are defined in transform_tables.c file.
+extern const int16_t WebRtcIsacfix_kCosTab1[FRAMESAMPLES/2];
+extern const int16_t WebRtcIsacfix_kSinTab1[FRAMESAMPLES/2];
+extern const int16_t WebRtcIsacfix_kCosTab2[FRAMESAMPLES/4];
+extern const int16_t WebRtcIsacfix_kSinTab2[FRAMESAMPLES/4];
+
+// MIPS DSPr2 version of the WebRtcIsacfix_Time2Spec function
+// is not bit-exact with the C version.
+// The accuracy of the MIPS DSPr2 version is same or better.
+void WebRtcIsacfix_Time2SpecMIPS(int16_t* inre1Q9,
+ int16_t* inre2Q9,
+ int16_t* outreQ7,
+ int16_t* outimQ7) {
+ int k = FRAMESAMPLES / 2;
+ int32_t tmpreQ16[FRAMESAMPLES / 2], tmpimQ16[FRAMESAMPLES / 2];
+ int32_t r0, r1, r2, r3, r4, r5, r6, r7, r8, r9;
+ int32_t inre1, inre2, tmpre, tmpim, factor, max, max1;
+ int16_t* cosptr;
+ int16_t* sinptr;
+
+ cosptr = (int16_t*)WebRtcIsacfix_kCosTab1;
+ sinptr = (int16_t*)WebRtcIsacfix_kSinTab1;
+
+ __asm __volatile (
+ ".set push \n\t"
+ ".set noreorder \n\t"
+ "addiu %[inre1], %[inre1Q9], 0 \n\t"
+ "addiu %[inre2], %[inre2Q9], 0 \n\t"
+ "addiu %[tmpre], %[tmpreQ16], 0 \n\t"
+ "addiu %[tmpim], %[tmpimQ16], 0 \n\t"
+ "addiu %[factor], $zero, 16921 \n\t"
+ "mul %[max], $zero, $zero \n\t"
+ // Multiply with complex exponentials and combine into one complex vector.
+ // Also, calculate the maximal absolute value in the same loop.
+ "1: \n\t"
+#if defined(MIPS_DSP_R2_LE)
+ "lwl %[r0], 0(%[inre1]) \n\t"
+ "lwl %[r2], 0(%[cosptr]) \n\t"
+ "lwl %[r3], 0(%[sinptr]) \n\t"
+ "lwl %[r1], 0(%[inre2]) \n\t"
+ "lwr %[r0], 0(%[inre1]) \n\t"
+ "lwr %[r2], 0(%[cosptr]) \n\t"
+ "lwr %[r3], 0(%[sinptr]) \n\t"
+ "lwr %[r1], 0(%[inre2]) \n\t"
+ "muleq_s.w.phr %[r4], %[r2], %[r0] \n\t"
+ "muleq_s.w.phr %[r5], %[r3], %[r0] \n\t"
+ "muleq_s.w.phr %[r6], %[r3], %[r1] \n\t"
+ "muleq_s.w.phr %[r7], %[r2], %[r1] \n\t"
+ "muleq_s.w.phl %[r8], %[r2], %[r0] \n\t"
+ "muleq_s.w.phl %[r0], %[r3], %[r0] \n\t"
+ "muleq_s.w.phl %[r3], %[r3], %[r1] \n\t"
+ "muleq_s.w.phl %[r1], %[r2], %[r1] \n\t"
+ "addiu %[k], %[k], -2 \n\t"
+ "addu %[r4], %[r4], %[r6] \n\t"
+ "subu %[r5], %[r7], %[r5] \n\t"
+ "sra %[r4], %[r4], 8 \n\t"
+ "sra %[r5], %[r5], 8 \n\t"
+ "mult $ac0, %[factor], %[r4] \n\t"
+ "mult $ac1, %[factor], %[r5] \n\t"
+ "addu %[r3], %[r8], %[r3] \n\t"
+ "subu %[r0], %[r1], %[r0] \n\t"
+ "sra %[r3], %[r3], 8 \n\t"
+ "sra %[r0], %[r0], 8 \n\t"
+ "mult $ac2, %[factor], %[r3] \n\t"
+ "mult $ac3, %[factor], %[r0] \n\t"
+ "extr_r.w %[r4], $ac0, 16 \n\t"
+ "extr_r.w %[r5], $ac1, 16 \n\t"
+ "addiu %[inre1], %[inre1], 4 \n\t"
+ "addiu %[inre2], %[inre2], 4 \n\t"
+ "extr_r.w %[r6], $ac2, 16 \n\t"
+ "extr_r.w %[r7], $ac3, 16 \n\t"
+ "addiu %[cosptr], %[cosptr], 4 \n\t"
+ "addiu %[sinptr], %[sinptr], 4 \n\t"
+ "shra_r.w %[r4], %[r4], 3 \n\t"
+ "shra_r.w %[r5], %[r5], 3 \n\t"
+ "sw %[r4], 0(%[tmpre]) \n\t"
+ "absq_s.w %[r4], %[r4] \n\t"
+ "sw %[r5], 0(%[tmpim]) \n\t"
+ "absq_s.w %[r5], %[r5] \n\t"
+ "shra_r.w %[r6], %[r6], 3 \n\t"
+ "shra_r.w %[r7], %[r7], 3 \n\t"
+ "sw %[r6], 4(%[tmpre]) \n\t"
+ "absq_s.w %[r6], %[r6] \n\t"
+ "sw %[r7], 4(%[tmpim]) \n\t"
+ "absq_s.w %[r7], %[r7] \n\t"
+ "slt %[r0], %[r4], %[r5] \n\t"
+ "movn %[r4], %[r5], %[r0] \n\t"
+ "slt %[r1], %[r6], %[r7] \n\t"
+ "movn %[r6], %[r7], %[r1] \n\t"
+ "slt %[r0], %[max], %[r4] \n\t"
+ "movn %[max], %[r4], %[r0] \n\t"
+ "slt %[r1], %[max], %[r6] \n\t"
+ "movn %[max], %[r6], %[r1] \n\t"
+ "addiu %[tmpre], %[tmpre], 8 \n\t"
+ "bgtz %[k], 1b \n\t"
+ " addiu %[tmpim], %[tmpim], 8 \n\t"
+#else // #if defined(MIPS_DSP_R2_LE)
+ "lh %[r0], 0(%[inre1]) \n\t"
+ "lh %[r1], 0(%[inre2]) \n\t"
+ "lh %[r2], 0(%[cosptr]) \n\t"
+ "lh %[r3], 0(%[sinptr]) \n\t"
+ "addiu %[k], %[k], -1 \n\t"
+ "mul %[r4], %[r0], %[r2] \n\t"
+ "mul %[r5], %[r1], %[r3] \n\t"
+ "mul %[r0], %[r0], %[r3] \n\t"
+ "mul %[r2], %[r1], %[r2] \n\t"
+ "addiu %[inre1], %[inre1], 2 \n\t"
+ "addiu %[inre2], %[inre2], 2 \n\t"
+ "addiu %[cosptr], %[cosptr], 2 \n\t"
+ "addiu %[sinptr], %[sinptr], 2 \n\t"
+ "addu %[r1], %[r4], %[r5] \n\t"
+ "sra %[r1], %[r1], 7 \n\t"
+ "sra %[r3], %[r1], 16 \n\t"
+ "andi %[r1], %[r1], 0xFFFF \n\t"
+ "sra %[r1], %[r1], 1 \n\t"
+ "mul %[r1], %[factor], %[r1] \n\t"
+ "mul %[r3], %[factor], %[r3] \n\t"
+ "subu %[r0], %[r2], %[r0] \n\t"
+ "sra %[r0], %[r0], 7 \n\t"
+ "sra %[r2], %[r0], 16 \n\t"
+ "andi %[r0], %[r0], 0xFFFF \n\t"
+ "sra %[r0], %[r0], 1 \n\t"
+ "mul %[r0], %[factor], %[r0] \n\t"
+ "mul %[r2], %[factor], %[r2] \n\t"
+#if defined(MIPS_DSP_R1_LE)
+ "shra_r.w %[r1], %[r1], 15 \n\t"
+#else // #if defined(MIPS_DSP_R1_LE)
+ "addiu %[r1], %[r1], 0x4000 \n\t"
+ "sra %[r1], %[r1], 15 \n\t"
+#endif // #if defined(MIPS_DSP_R1_LE)
+ "addu %[r1], %[r3], %[r1] \n\t"
+#if defined(MIPS_DSP_R1_LE)
+ "shra_r.w %[r1], %[r1], 3 \n\t"
+#else // #if defined(MIPS_DSP_R1_LE)
+ "addiu %[r1], %[r1], 4 \n\t"
+ "sra %[r1], %[r1], 3 \n\t"
+#endif // #if defined(MIPS_DSP_R1_LE)
+ "sw %[r1], 0(%[tmpre]) \n\t"
+ "addiu %[tmpre], %[tmpre], 4 \n\t"
+#if defined(MIPS_DSP_R1_LE)
+ "absq_s.w %[r1], %[r1] \n\t"
+ "shra_r.w %[r0], %[r0], 15 \n\t"
+#else // #if defined(MIPS_DSP_R1_LE)
+ "negu %[r4], %[r1] \n\t"
+ "slt %[r3], %[r1], $zero \n\t"
+ "movn %[r1], %[r4], %[r3] \n\t"
+ "addiu %[r0], %[r0], 0x4000 \n\t"
+ "sra %[r0], %[r0], 15 \n\t"
+#endif // #if defined(MIPS_DSP_R1_LE)
+ "addu %[r0], %[r0], %[r2] \n\t"
+#if defined(MIPS_DSP_R1_LE)
+ "shra_r.w %[r0], %[r0], 3 \n\t"
+ "sw %[r0], 0(%[tmpim]) \n\t"
+ "absq_s.w %[r0], %[r0] \n\t"
+#else // #if defined(MIPS_DSP_R1_LE)
+ "addiu %[r0], %[r0], 4 \n\t"
+ "sra %[r0], %[r0], 3 \n\t"
+ "sw %[r0], 0(%[tmpim]) \n\t"
+ "negu %[r2], %[r0] \n\t"
+ "slt %[r3], %[r0], $zero \n\t"
+ "movn %[r0], %[r2], %[r3] \n\t"
+#endif // #if defined(MIPS_DSP_R1_LE)
+ "slt %[r2], %[max], %[r1] \n\t"
+ "movn %[max], %[r1], %[r2] \n\t"
+ "slt %[r2], %[max], %[r0] \n\t"
+ "movn %[max], %[r0], %[r2] \n\t"
+ "bgtz %[k], 1b \n\t"
+ " addiu %[tmpim], %[tmpim], 4 \n\t"
+#endif // #if defined(MIPS_DSP_R2_LE)
+ // Calculate WebRtcSpl_NormW32(max).
+ // If max gets value >=0, we should shift max steps to the left, and the
+ // domain will be Q(16+shift). If max gets value <0, we should shift -max
+ // steps to the right, and the domain will be Q(16+max)
+ "clz %[max], %[max] \n\t"
+ "addiu %[max], %[max], -25 \n\t"
+ ".set pop \n\t"
+ : [k] "+r" (k), [inre1] "=&r" (inre1), [inre2] "=&r" (inre2),
+ [r0] "=&r" (r0), [r1] "=&r" (r1), [r2] "=&r" (r2),
+ [r3] "=&r" (r3), [r4] "=&r" (r4), [tmpre] "=&r" (tmpre),
+ [tmpim] "=&r" (tmpim), [max] "=&r" (max), [factor] "=&r" (factor),
+#if defined(MIPS_DSP_R2_LE)
+ [r6] "=&r" (r6), [r7] "=&r" (r7), [r8] "=&r" (r8),
+#endif // #if defined(MIPS_DSP_R2_LE)
+ [r5] "=&r" (r5)
+ : [inre1Q9] "r" (inre1Q9), [inre2Q9] "r" (inre2Q9),
+ [tmpreQ16] "r" (tmpreQ16), [tmpimQ16] "r" (tmpimQ16),
+ [cosptr] "r" (cosptr), [sinptr] "r" (sinptr)
+ : "hi", "lo", "memory"
+ );
+
+ // "Fastest" vectors
+ k = FRAMESAMPLES / 4;
+ __asm __volatile (
+ ".set push \n\t"
+ ".set noreorder \n\t"
+ "addiu %[tmpre], %[tmpreQ16], 0 \n\t"
+ "addiu %[tmpim], %[tmpimQ16], 0 \n\t"
+ "addiu %[inre1], %[inre1Q9], 0 \n\t"
+ "addiu %[inre2], %[inre2Q9], 0 \n\t"
+ "blez %[max], 2f \n\t"
+ " subu %[max1], $zero, %[max] \n\t"
+ "1: \n\t"
+ "lw %[r0], 0(%[tmpre]) \n\t"
+ "lw %[r1], 0(%[tmpim]) \n\t"
+ "lw %[r2], 4(%[tmpre]) \n\t"
+ "lw %[r3], 4(%[tmpim]) \n\t"
+ "addiu %[k], %[k], -1 \n\t"
+ "sllv %[r0], %[r0], %[max] \n\t"
+ "sllv %[r1], %[r1], %[max] \n\t"
+ "sllv %[r2], %[r2], %[max] \n\t"
+ "sllv %[r3], %[r3], %[max] \n\t"
+ "addiu %[tmpre], %[tmpre], 8 \n\t"
+ "addiu %[tmpim], %[tmpim], 8 \n\t"
+ "sh %[r0], 0(%[inre1]) \n\t"
+ "sh %[r1], 0(%[inre2]) \n\t"
+ "sh %[r2], 2(%[inre1]) \n\t"
+ "sh %[r3], 2(%[inre2]) \n\t"
+ "addiu %[inre1], %[inre1], 4 \n\t"
+ "bgtz %[k], 1b \n\t"
+ " addiu %[inre2], %[inre2], 4 \n\t"
+ "b 4f \n\t"
+ " nop \n\t"
+ "2: \n\t"
+#if !defined(MIPS_DSP_R1_LE)
+ "addiu %[r4], %[max1], -1 \n\t"
+ "addiu %[r5], $zero, 1 \n\t"
+ "sllv %[r4], %[r5], %[r4] \n\t"
+#endif // #if !defined(MIPS_DSP_R1_LE)
+ "3: \n\t"
+ "lw %[r0], 0(%[tmpre]) \n\t"
+ "lw %[r1], 0(%[tmpim]) \n\t"
+ "lw %[r2], 4(%[tmpre]) \n\t"
+ "lw %[r3], 4(%[tmpim]) \n\t"
+ "addiu %[k], %[k], -1 \n\t"
+#if defined(MIPS_DSP_R1_LE)
+ "shrav_r.w %[r0], %[r0], %[max1] \n\t"
+ "shrav_r.w %[r1], %[r1], %[max1] \n\t"
+ "shrav_r.w %[r2], %[r2], %[max1] \n\t"
+ "shrav_r.w %[r3], %[r3], %[max1] \n\t"
+#else // #if !defined(MIPS_DSP_R1_LE)
+ "addu %[r0], %[r0], %[r4] \n\t"
+ "addu %[r1], %[r1], %[r4] \n\t"
+ "addu %[r2], %[r2], %[r4] \n\t"
+ "addu %[r3], %[r3], %[r4] \n\t"
+ "srav %[r0], %[r0], %[max1] \n\t"
+ "srav %[r1], %[r1], %[max1] \n\t"
+ "srav %[r2], %[r2], %[max1] \n\t"
+ "srav %[r3], %[r3], %[max1] \n\t"
+#endif // #if !defined(MIPS_DSP_R1_LE)
+ "addiu %[tmpre], %[tmpre], 8 \n\t"
+ "addiu %[tmpim], %[tmpim], 8 \n\t"
+ "sh %[r0], 0(%[inre1]) \n\t"
+ "sh %[r1], 0(%[inre2]) \n\t"
+ "sh %[r2], 2(%[inre1]) \n\t"
+ "sh %[r3], 2(%[inre2]) \n\t"
+ "addiu %[inre1], %[inre1], 4 \n\t"
+ "bgtz %[k], 3b \n\t"
+ " addiu %[inre2], %[inre2], 4 \n\t"
+ "4: \n\t"
+ ".set pop \n\t"
+ : [tmpre] "=&r" (tmpre), [tmpim] "=&r" (tmpim), [inre1] "=&r" (inre1),
+ [inre2] "=&r" (inre2), [k] "+r" (k), [max1] "=&r" (max1),
+#if !defined(MIPS_DSP_R1_LE)
+ [r4] "=&r" (r4), [r5] "=&r" (r5),
+#endif // #if !defined(MIPS_DSP_R1_LE)
+ [r0] "=&r" (r0), [r1] "=&r" (r1), [r2] "=&r" (r2), [r3] "=&r" (r3)
+ : [tmpreQ16] "r" (tmpreQ16), [tmpimQ16] "r" (tmpimQ16),
+ [inre1Q9] "r" (inre1Q9), [inre2Q9] "r" (inre2Q9), [max] "r" (max)
+ : "memory"
+ );
+
+ // Get DFT
+ WebRtcIsacfix_FftRadix16Fastest(inre1Q9, inre2Q9, -1); // real call
+
+ // "Fastest" vectors and
+ // Use symmetry to separate into two complex vectors
+ // and center frames in time around zero
+ // merged into one loop
+ cosptr = (int16_t*)WebRtcIsacfix_kCosTab2;
+ sinptr = (int16_t*)WebRtcIsacfix_kSinTab2;
+ k = FRAMESAMPLES / 4;
+ factor = FRAMESAMPLES - 2; // offset for FRAMESAMPLES / 2 - 1 array member
+
+ __asm __volatile (
+ ".set push \n\t"
+ ".set noreorder \n\t"
+ "addiu %[inre1], %[inre1Q9], 0 \n\t"
+ "addiu %[inre2], %[inre2Q9], 0 \n\t"
+ "addiu %[tmpre], %[outreQ7], 0 \n\t"
+ "addiu %[tmpim], %[outimQ7], 0 \n\t"
+ "bltz %[max], 2f \n\t"
+ " subu %[max1], $zero, %[max] \n\t"
+ "1: \n\t"
+#if !defined(MIPS_DSP_R1_LE)
+ "addu %[r4], %[inre1], %[offset] \n\t"
+ "addu %[r5], %[inre2], %[offset] \n\t"
+#endif // #if !defined(MIPS_DSP_R1_LE)
+ "lh %[r0], 0(%[inre1]) \n\t"
+ "lh %[r1], 0(%[inre2]) \n\t"
+#if defined(MIPS_DSP_R1_LE)
+ "lhx %[r2], %[offset](%[inre1]) \n\t"
+ "lhx %[r3], %[offset](%[inre2]) \n\t"
+#else // #if defined(MIPS_DSP_R1_LE)
+ "lh %[r2], 0(%[r4]) \n\t"
+ "lh %[r3], 0(%[r5]) \n\t"
+#endif // #if defined(MIPS_DSP_R1_LE)
+ "srav %[r0], %[r0], %[max] \n\t"
+ "srav %[r1], %[r1], %[max] \n\t"
+ "srav %[r2], %[r2], %[max] \n\t"
+ "srav %[r3], %[r3], %[max] \n\t"
+ "addu %[r4], %[r0], %[r2] \n\t"
+ "subu %[r0], %[r2], %[r0] \n\t"
+ "subu %[r2], %[r1], %[r3] \n\t"
+ "addu %[r1], %[r1], %[r3] \n\t"
+ "lh %[r3], 0(%[cosptr]) \n\t"
+ "lh %[r5], 0(%[sinptr]) \n\t"
+ "andi %[r6], %[r4], 0xFFFF \n\t"
+ "sra %[r4], %[r4], 16 \n\t"
+ "mul %[r7], %[r3], %[r6] \n\t"
+ "mul %[r8], %[r3], %[r4] \n\t"
+ "mul %[r6], %[r5], %[r6] \n\t"
+ "mul %[r4], %[r5], %[r4] \n\t"
+ "addiu %[k], %[k], -1 \n\t"
+ "addiu %[inre1], %[inre1], 2 \n\t"
+ "addiu %[inre2], %[inre2], 2 \n\t"
+#if defined(MIPS_DSP_R1_LE)
+ "shra_r.w %[r7], %[r7], 14 \n\t"
+#else // #if defined(MIPS_DSP_R1_LE)
+ "addiu %[r7], %[r7], 0x2000 \n\t"
+ "sra %[r7], %[r7], 14 \n\t"
+#endif // #if defined(MIPS_DSP_R1_LE)
+ "sll %[r8], %[r8], 2 \n\t"
+ "addu %[r8], %[r8], %[r7] \n\t"
+#if defined(MIPS_DSP_R1_LE)
+ "shra_r.w %[r6], %[r6], 14 \n\t"
+#else // #if defined(MIPS_DSP_R1_LE)
+ "addiu %[r6], %[r6], 0x2000 \n\t"
+ "sra %[r6], %[r6], 14 \n\t"
+#endif // #if defined(MIPS_DSP_R1_LE)
+ "sll %[r4], %[r4], 2 \n\t"
+ "addu %[r4], %[r4], %[r6] \n\t"
+ "andi %[r6], %[r2], 0xFFFF \n\t"
+ "sra %[r2], %[r2], 16 \n\t"
+ "mul %[r7], %[r5], %[r6] \n\t"
+ "mul %[r9], %[r5], %[r2] \n\t"
+ "mul %[r6], %[r3], %[r6] \n\t"
+ "mul %[r2], %[r3], %[r2] \n\t"
+ "addiu %[cosptr], %[cosptr], 2 \n\t"
+ "addiu %[sinptr], %[sinptr], 2 \n\t"
+#if defined(MIPS_DSP_R1_LE)
+ "shra_r.w %[r7], %[r7], 14 \n\t"
+#else // #if defined(MIPS_DSP_R1_LE)
+ "addiu %[r7], %[r7], 0x2000 \n\t"
+ "sra %[r7], %[r7], 14 \n\t"
+#endif // #if defined(MIPS_DSP_R1_LE)
+ "sll %[r9], %[r9], 2 \n\t"
+ "addu %[r9], %[r7], %[r9] \n\t"
+#if defined(MIPS_DSP_R1_LE)
+ "shra_r.w %[r6], %[r6], 14 \n\t"
+#else // #if defined(MIPS_DSP_R1_LE)
+ "addiu %[r6], %[r6], 0x2000 \n\t"
+ "sra %[r6], %[r6], 14 \n\t"
+#endif // #if defined(MIPS_DSP_R1_LE)
+ "sll %[r2], %[r2], 2 \n\t"
+ "addu %[r2], %[r6], %[r2] \n\t"
+ "subu %[r8], %[r8], %[r9] \n\t"
+ "sra %[r8], %[r8], 9 \n\t"
+ "addu %[r2], %[r4], %[r2] \n\t"
+ "sra %[r2], %[r2], 9 \n\t"
+ "sh %[r8], 0(%[tmpre]) \n\t"
+ "sh %[r2], 0(%[tmpim]) \n\t"
+
+ "andi %[r4], %[r1], 0xFFFF \n\t"
+ "sra %[r1], %[r1], 16 \n\t"
+ "andi %[r6], %[r0], 0xFFFF \n\t"
+ "sra %[r0], %[r0], 16 \n\t"
+ "mul %[r7], %[r5], %[r4] \n\t"
+ "mul %[r9], %[r5], %[r1] \n\t"
+ "mul %[r4], %[r3], %[r4] \n\t"
+ "mul %[r1], %[r3], %[r1] \n\t"
+ "mul %[r8], %[r3], %[r0] \n\t"
+ "mul %[r3], %[r3], %[r6] \n\t"
+ "mul %[r6], %[r5], %[r6] \n\t"
+ "mul %[r0], %[r5], %[r0] \n\t"
+#if defined(MIPS_DSP_R1_LE)
+ "shra_r.w %[r7], %[r7], 14 \n\t"
+#else // #if defined(MIPS_DSP_R1_LE)
+ "addiu %[r7], %[r7], 0x2000 \n\t"
+ "sra %[r7], %[r7], 14 \n\t"
+#endif // #if defined(MIPS_DSP_R1_LE)
+ "sll %[r9], %[r9], 2 \n\t"
+ "addu %[r9], %[r9], %[r7] \n\t"
+#if defined(MIPS_DSP_R1_LE)
+ "shra_r.w %[r4], %[r4], 14 \n\t"
+#else // #if defined(MIPS_DSP_R1_LE)
+ "addiu %[r4], %[r4], 0x2000 \n\t"
+ "sra %[r4], %[r4], 14 \n\t"
+#endif // #if defined(MIPS_DSP_R1_LE)
+ "sll %[r1], %[r1], 2 \n\t"
+ "addu %[r1], %[r1], %[r4] \n\t"
+#if defined(MIPS_DSP_R1_LE)
+ "shra_r.w %[r3], %[r3], 14 \n\t"
+#else // #if defined(MIPS_DSP_R1_LE)
+ "addiu %[r3], %[r3], 0x2000 \n\t"
+ "sra %[r3], %[r3], 14 \n\t"
+#endif // #if defined(MIPS_DSP_R1_LE)
+ "sll %[r8], %[r8], 2 \n\t"
+ "addu %[r8], %[r8], %[r3] \n\t"
+#if defined(MIPS_DSP_R1_LE)
+ "shra_r.w %[r6], %[r6], 14 \n\t"
+#else // #if defined(MIPS_DSP_R1_LE)
+ "addiu %[r6], %[r6], 0x2000 \n\t"
+ "sra %[r6], %[r6], 14 \n\t"
+#endif // #if defined(MIPS_DSP_R1_LE)
+ "sll %[r0], %[r0], 2 \n\t"
+ "addu %[r0], %[r0], %[r6] \n\t"
+ "addu %[r3], %[tmpre], %[offset] \n\t"
+ "addu %[r2], %[tmpim], %[offset] \n\t"
+ "addu %[r9], %[r9], %[r8] \n\t"
+ "negu %[r9], %[r9] \n\t"
+ "sra %[r9], %[r9], 9 \n\t"
+ "subu %[r0], %[r0], %[r1] \n\t"
+ "addiu %[offset], %[offset], -4 \n\t"
+ "sh %[r9], 0(%[r3]) \n\t"
+ "sh %[r0], 0(%[r2]) \n\t"
+ "addiu %[tmpre], %[tmpre], 2 \n\t"
+ "bgtz %[k], 1b \n\t"
+ " addiu %[tmpim], %[tmpim], 2 \n\t"
+ "b 3f \n\t"
+ " nop \n\t"
+ "2: \n\t"
+#if !defined(MIPS_DSP_R1_LE)
+ "addu %[r4], %[inre1], %[offset] \n\t"
+ "addu %[r5], %[inre2], %[offset] \n\t"
+#endif // #if !defined(MIPS_DSP_R1_LE)
+ "lh %[r0], 0(%[inre1]) \n\t"
+ "lh %[r1], 0(%[inre2]) \n\t"
+#if defined(MIPS_DSP_R1_LE)
+ "lhx %[r2], %[offset](%[inre1]) \n\t"
+ "lhx %[r3], %[offset](%[inre2]) \n\t"
+#else // #if defined(MIPS_DSP_R1_LE)
+ "lh %[r2], 0(%[r4]) \n\t"
+ "lh %[r3], 0(%[r5]) \n\t"
+#endif // #if defined(MIPS_DSP_R1_LE)
+ "sllv %[r0], %[r0], %[max1] \n\t"
+ "sllv %[r1], %[r1], %[max1] \n\t"
+ "sllv %[r2], %[r2], %[max1] \n\t"
+ "sllv %[r3], %[r3], %[max1] \n\t"
+ "addu %[r4], %[r0], %[r2] \n\t"
+ "subu %[r0], %[r2], %[r0] \n\t"
+ "subu %[r2], %[r1], %[r3] \n\t"
+ "addu %[r1], %[r1], %[r3] \n\t"
+ "lh %[r3], 0(%[cosptr]) \n\t"
+ "lh %[r5], 0(%[sinptr]) \n\t"
+ "andi %[r6], %[r4], 0xFFFF \n\t"
+ "sra %[r4], %[r4], 16 \n\t"
+ "mul %[r7], %[r3], %[r6] \n\t"
+ "mul %[r8], %[r3], %[r4] \n\t"
+ "mul %[r6], %[r5], %[r6] \n\t"
+ "mul %[r4], %[r5], %[r4] \n\t"
+ "addiu %[k], %[k], -1 \n\t"
+ "addiu %[inre1], %[inre1], 2 \n\t"
+ "addiu %[inre2], %[inre2], 2 \n\t"
+#if defined(MIPS_DSP_R1_LE)
+ "shra_r.w %[r7], %[r7], 14 \n\t"
+#else // #if defined(MIPS_DSP_R1_LE)
+ "addiu %[r7], %[r7], 0x2000 \n\t"
+ "sra %[r7], %[r7], 14 \n\t"
+#endif // #if defined(MIPS_DSP_R1_LE)
+ "sll %[r8], %[r8], 2 \n\t"
+ "addu %[r8], %[r8], %[r7] \n\t"
+#if defined(MIPS_DSP_R1_LE)
+ "shra_r.w %[r6], %[r6], 14 \n\t"
+#else // #if defined(MIPS_DSP_R1_LE)
+ "addiu %[r6], %[r6], 0x2000 \n\t"
+ "sra %[r6], %[r6], 14 \n\t"
+#endif // #if defined(MIPS_DSP_R1_LE)
+ "sll %[r4], %[r4], 2 \n\t"
+ "addu %[r4], %[r4], %[r6] \n\t"
+ "andi %[r6], %[r2], 0xFFFF \n\t"
+ "sra %[r2], %[r2], 16 \n\t"
+ "mul %[r7], %[r5], %[r6] \n\t"
+ "mul %[r9], %[r5], %[r2] \n\t"
+ "mul %[r6], %[r3], %[r6] \n\t"
+ "mul %[r2], %[r3], %[r2] \n\t"
+ "addiu %[cosptr], %[cosptr], 2 \n\t"
+ "addiu %[sinptr], %[sinptr], 2 \n\t"
+#if defined(MIPS_DSP_R1_LE)
+ "shra_r.w %[r7], %[r7], 14 \n\t"
+#else // #if defined(MIPS_DSP_R1_LE)
+ "addiu %[r7], %[r7], 0x2000 \n\t"
+ "sra %[r7], %[r7], 14 \n\t"
+#endif // #if defined(MIPS_DSP_R1_LE)
+ "sll %[r9], %[r9], 2 \n\t"
+ "addu %[r9], %[r7], %[r9] \n\t"
+#if defined(MIPS_DSP_R1_LE)
+ "shra_r.w %[r6], %[r6], 14 \n\t"
+#else // #if defined(MIPS_DSP_R1_LE)
+ "addiu %[r6], %[r6], 0x2000 \n\t"
+ "sra %[r6], %[r6], 14 \n\t"
+#endif // #if defined(MIPS_DSP_R1_LE)
+ "sll %[r2], %[r2], 2 \n\t"
+ "addu %[r2], %[r6], %[r2] \n\t"
+ "subu %[r8], %[r8], %[r9] \n\t"
+ "sra %[r8], %[r8], 9 \n\t"
+ "addu %[r2], %[r4], %[r2] \n\t"
+ "sra %[r2], %[r2], 9 \n\t"
+ "sh %[r8], 0(%[tmpre]) \n\t"
+ "sh %[r2], 0(%[tmpim]) \n\t"
+ "andi %[r4], %[r1], 0xFFFF \n\t"
+ "sra %[r1], %[r1], 16 \n\t"
+ "andi %[r6], %[r0], 0xFFFF \n\t"
+ "sra %[r0], %[r0], 16 \n\t"
+ "mul %[r7], %[r5], %[r4] \n\t"
+ "mul %[r9], %[r5], %[r1] \n\t"
+ "mul %[r4], %[r3], %[r4] \n\t"
+ "mul %[r1], %[r3], %[r1] \n\t"
+ "mul %[r8], %[r3], %[r0] \n\t"
+ "mul %[r3], %[r3], %[r6] \n\t"
+ "mul %[r6], %[r5], %[r6] \n\t"
+ "mul %[r0], %[r5], %[r0] \n\t"
+#if defined(MIPS_DSP_R1_LE)
+ "shra_r.w %[r7], %[r7], 14 \n\t"
+#else // #if defined(MIPS_DSP_R1_LE)
+ "addiu %[r7], %[r7], 0x2000 \n\t"
+ "sra %[r7], %[r7], 14 \n\t"
+#endif // #if defined(MIPS_DSP_R1_LE)
+ "sll %[r9], %[r9], 2 \n\t"
+ "addu %[r9], %[r9], %[r7] \n\t"
+#if defined(MIPS_DSP_R1_LE)
+ "shra_r.w %[r4], %[r4], 14 \n\t"
+#else // #if defined(MIPS_DSP_R1_LE)
+ "addiu %[r4], %[r4], 0x2000 \n\t"
+ "sra %[r4], %[r4], 14 \n\t"
+#endif
+ "sll %[r1], %[r1], 2 \n\t"
+ "addu %[r1], %[r1], %[r4] \n\t"
+#if defined(MIPS_DSP_R1_LE)
+ "shra_r.w %[r3], %[r3], 14 \n\t"
+#else // #if defined(MIPS_DSP_R1_LE)
+ "addiu %[r3], %[r3], 0x2000 \n\t"
+ "sra %[r3], %[r3], 14 \n\t"
+#endif // #if defined(MIPS_DSP_R1_LE)
+ "sll %[r8], %[r8], 2 \n\t"
+ "addu %[r8], %[r8], %[r3] \n\t"
+#if defined(MIPS_DSP_R1_LE)
+ "shra_r.w %[r6], %[r6], 14 \n\t"
+#else // #if defined(MIPS_DSP_R1_LE)
+ "addiu %[r6], %[r6], 0x2000 \n\t"
+ "sra %[r6], %[r6], 14 \n\t"
+#endif // #if defined(MIPS_DSP_R1_LE)
+ "sll %[r0], %[r0], 2 \n\t"
+ "addu %[r0], %[r0], %[r6] \n\t"
+ "addu %[r3], %[tmpre], %[offset] \n\t"
+ "addu %[r2], %[tmpim], %[offset] \n\t"
+ "addu %[r9], %[r9], %[r8] \n\t"
+ "negu %[r9], %[r9] \n\t"
+ "sra %[r9], %[r9], 9 \n\t"
+ "subu %[r0], %[r0], %[r1] \n\t"
+ "sra %[r0], %[r0], 9 \n\t"
+ "addiu %[offset], %[offset], -4 \n\t"
+ "sh %[r9], 0(%[r3]) \n\t"
+ "sh %[r0], 0(%[r2]) \n\t"
+ "addiu %[tmpre], %[tmpre], 2 \n\t"
+ "bgtz %[k], 2b \n\t"
+ " addiu %[tmpim], %[tmpim], 2 \n\t"
+ "3: \n\t"
+ ".set pop \n\t"
+ : [inre1] "=&r" (inre1), [inre2] "=&r" (inre2), [tmpre] "=&r" (tmpre),
+ [tmpim] "=&r" (tmpim), [offset] "+r" (factor), [k] "+r" (k),
+ [r0] "=&r" (r0), [r1] "=&r" (r1), [r2] "=&r" (r2), [r3] "=&r" (r3),
+ [r4] "=&r" (r4), [r5] "=&r" (r5), [r6] "=&r" (r6), [r7] "=&r" (r7),
+ [r8] "=&r" (r8), [r9] "=&r" (r9), [max1] "=&r" (max1)
+ : [inre1Q9] "r" (inre1Q9), [inre2Q9] "r" (inre2Q9),
+ [outreQ7] "r" (outreQ7), [outimQ7] "r" (outimQ7),
+ [max] "r" (max), [cosptr] "r" (cosptr), [sinptr] "r" (sinptr)
+ : "hi", "lo", "memory"
+ );
+}
+
+void WebRtcIsacfix_Spec2TimeMIPS(int16_t *inreQ7,
+ int16_t *inimQ7,
+ int32_t *outre1Q16,
+ int32_t *outre2Q16) {
+ int k = FRAMESAMPLES / 4;
+ int16_t* inre;
+ int16_t* inim;
+ int32_t* outre1;
+ int32_t* outre2;
+ int16_t* cosptr = (int16_t*)WebRtcIsacfix_kCosTab2;
+ int16_t* sinptr = (int16_t*)WebRtcIsacfix_kSinTab2;
+ int32_t r0, r1, r2, r3, r4, r5, r6, r7, r8, r9, r10, max, max1;
+#if defined(MIPS_DSP_R1_LE)
+ int32_t offset = FRAMESAMPLES - 4;
+#else // #if defined(MIPS_DSP_R1_LE)
+ int32_t offset = FRAMESAMPLES - 2;
+#endif // #if defined(MIPS_DSP_R1_LE)
+
+ __asm __volatile (
+ ".set push \n\t"
+ ".set noreorder \n\t"
+ "addiu %[inre], %[inreQ7], 0 \n\t"
+ "addiu %[inim] , %[inimQ7], 0 \n\t"
+ "addiu %[outre1], %[outre1Q16], 0 \n\t"
+ "addiu %[outre2], %[outre2Q16], 0 \n\t"
+ "mul %[max], $zero, $zero \n\t"
+ "1: \n\t"
+#if defined(MIPS_DSP_R1_LE)
+ // Process two samples in one iteration avoiding left shift before
+ // multiplication. MaxAbsValueW32 function inlined into the loop.
+ "addu %[r8], %[inre], %[offset] \n\t"
+ "addu %[r9], %[inim], %[offset] \n\t"
+ "lwl %[r4], 0(%[r8]) \n\t"
+ "lwl %[r5], 0(%[r9]) \n\t"
+ "lwl %[r0], 0(%[inre]) \n\t"
+ "lwl %[r1], 0(%[inim]) \n\t"
+ "lwl %[r2], 0(%[cosptr]) \n\t"
+ "lwl %[r3], 0(%[sinptr]) \n\t"
+ "lwr %[r4], 0(%[r8]) \n\t"
+ "lwr %[r5], 0(%[r9]) \n\t"
+ "lwr %[r0], 0(%[inre]) \n\t"
+ "lwr %[r1], 0(%[inim]) \n\t"
+ "lwr %[r2], 0(%[cosptr]) \n\t"
+ "lwr %[r3], 0(%[sinptr]) \n\t"
+ "packrl.ph %[r4], %[r4], %[r4] \n\t"
+ "packrl.ph %[r5], %[r5], %[r5] \n\t"
+ "muleq_s.w.phr %[r6], %[r0], %[r2] \n\t"
+ "muleq_s.w.phr %[r7], %[r1], %[r3] \n\t"
+ "muleq_s.w.phr %[r8], %[r4], %[r2] \n\t"
+ "muleq_s.w.phr %[r9], %[r5], %[r3] \n\t"
+ "addiu %[k], %[k], -2 \n\t"
+ "addiu %[cosptr], %[cosptr], 4 \n\t"
+ "addiu %[sinptr], %[sinptr], 4 \n\t"
+ "addiu %[inre], %[inre], 4 \n\t"
+ "addiu %[inim], %[inim], 4 \n\t"
+ "shra_r.w %[r6], %[r6], 6 \n\t"
+ "shra_r.w %[r7], %[r7], 6 \n\t"
+ "shra_r.w %[r8], %[r8], 6 \n\t"
+ "shra_r.w %[r9], %[r9], 6 \n\t"
+ "addu %[r6], %[r6], %[r7] \n\t"
+ "subu %[r9], %[r9], %[r8] \n\t"
+ "subu %[r7], %[r6], %[r9] \n\t"
+ "addu %[r6], %[r6], %[r9] \n\t"
+ "sll %[r10], %[offset], 1 \n\t"
+ "addu %[r10], %[outre1], %[r10] \n\t"
+ "sw %[r7], 0(%[outre1]) \n\t"
+ "absq_s.w %[r7], %[r7] \n\t"
+ "sw %[r6], 4(%[r10]) \n\t"
+ "absq_s.w %[r6], %[r6] \n\t"
+ "slt %[r8], %[max], %[r7] \n\t"
+ "movn %[max], %[r7], %[r8] \n\t"
+ "slt %[r8], %[max], %[r6] \n\t"
+ "movn %[max], %[r6], %[r8] \n\t"
+ "muleq_s.w.phl %[r6], %[r0], %[r2] \n\t"
+ "muleq_s.w.phl %[r7], %[r1], %[r3] \n\t"
+ "muleq_s.w.phl %[r8], %[r4], %[r2] \n\t"
+ "muleq_s.w.phl %[r9], %[r5], %[r3] \n\t"
+ "shra_r.w %[r6], %[r6], 6 \n\t"
+ "shra_r.w %[r7], %[r7], 6 \n\t"
+ "shra_r.w %[r8], %[r8], 6 \n\t"
+ "shra_r.w %[r9], %[r9], 6 \n\t"
+ "addu %[r6], %[r6], %[r7] \n\t"
+ "subu %[r9], %[r9], %[r8] \n\t"
+ "subu %[r7], %[r6], %[r9] \n\t"
+ "addu %[r6], %[r6], %[r9] \n\t"
+ "sw %[r7], 4(%[outre1]) \n\t"
+ "absq_s.w %[r7], %[r7] \n\t"
+ "sw %[r6], 0(%[r10]) \n\t"
+ "absq_s.w %[r6], %[r6] \n\t"
+ "slt %[r8], %[max], %[r7] \n\t"
+ "movn %[max], %[r7], %[r8] \n\t"
+ "slt %[r8], %[max], %[r6] \n\t"
+ "movn %[max], %[r6], %[r8] \n\t"
+ "muleq_s.w.phr %[r6], %[r1], %[r2] \n\t"
+ "muleq_s.w.phr %[r7], %[r0], %[r3] \n\t"
+ "muleq_s.w.phr %[r8], %[r5], %[r2] \n\t"
+ "muleq_s.w.phr %[r9], %[r4], %[r3] \n\t"
+ "addiu %[outre1], %[outre1], 8 \n\t"
+ "shra_r.w %[r6], %[r6], 6 \n\t"
+ "shra_r.w %[r7], %[r7], 6 \n\t"
+ "shra_r.w %[r8], %[r8], 6 \n\t"
+ "shra_r.w %[r9], %[r9], 6 \n\t"
+ "subu %[r6], %[r6], %[r7] \n\t"
+ "addu %[r9], %[r9], %[r8] \n\t"
+ "subu %[r7], %[r6], %[r9] \n\t"
+ "addu %[r6], %[r9], %[r6] \n\t"
+ "negu %[r6], %[r6] \n\t"
+ "sll %[r10], %[offset], 1 \n\t"
+ "addu %[r10], %[outre2], %[r10] \n\t"
+ "sw %[r7], 0(%[outre2]) \n\t"
+ "absq_s.w %[r7], %[r7] \n\t"
+ "sw %[r6], 4(%[r10]) \n\t"
+ "absq_s.w %[r6], %[r6] \n\t"
+ "slt %[r8], %[max], %[r7] \n\t"
+ "movn %[max], %[r7], %[r8] \n\t"
+ "slt %[r8], %[max], %[r6] \n\t"
+ "movn %[max], %[r6], %[r8] \n\t"
+ "muleq_s.w.phl %[r6], %[r1], %[r2] \n\t"
+ "muleq_s.w.phl %[r7], %[r0], %[r3] \n\t"
+ "muleq_s.w.phl %[r8], %[r5], %[r2] \n\t"
+ "muleq_s.w.phl %[r9], %[r4], %[r3] \n\t"
+ "addiu %[offset], %[offset], -8 \n\t"
+ "shra_r.w %[r6], %[r6], 6 \n\t"
+ "shra_r.w %[r7], %[r7], 6 \n\t"
+ "shra_r.w %[r8], %[r8], 6 \n\t"
+ "shra_r.w %[r9], %[r9], 6 \n\t"
+ "subu %[r6], %[r6], %[r7] \n\t"
+ "addu %[r9], %[r9], %[r8] \n\t"
+ "subu %[r7], %[r6], %[r9] \n\t"
+ "addu %[r6], %[r9], %[r6] \n\t"
+ "negu %[r6], %[r6] \n\t"
+ "sw %[r7], 4(%[outre2]) \n\t"
+ "absq_s.w %[r7], %[r7] \n\t"
+ "sw %[r6], 0(%[r10]) \n\t"
+ "absq_s.w %[r6], %[r6] \n\t"
+ "slt %[r8], %[max], %[r7] \n\t"
+ "movn %[max], %[r7], %[r8] \n\t"
+ "slt %[r8], %[max], %[r6] \n\t"
+ "movn %[max], %[r6], %[r8] \n\t"
+ "bgtz %[k], 1b \n\t"
+ " addiu %[outre2], %[outre2], 8 \n\t"
+#else // #if defined(MIPS_DSP_R1_LE)
+ "lh %[r0], 0(%[inre]) \n\t"
+ "lh %[r1], 0(%[inim]) \n\t"
+ "lh %[r4], 0(%[cosptr]) \n\t"
+ "lh %[r5], 0(%[sinptr]) \n\t"
+ "addiu %[k], %[k], -1 \n\t"
+ "mul %[r2], %[r0], %[r4] \n\t"
+ "mul %[r0], %[r0], %[r5] \n\t"
+ "mul %[r3], %[r1], %[r5] \n\t"
+ "mul %[r1], %[r1], %[r4] \n\t"
+ "addiu %[cosptr], %[cosptr], 2 \n\t"
+ "addiu %[sinptr], %[sinptr], 2 \n\t"
+ "addu %[r8], %[inre], %[offset] \n\t"
+ "addu %[r9], %[inim], %[offset] \n\t"
+ "addiu %[r2], %[r2], 16 \n\t"
+ "sra %[r2], %[r2], 5 \n\t"
+ "addiu %[r0], %[r0], 16 \n\t"
+ "sra %[r0], %[r0], 5 \n\t"
+ "addiu %[r3], %[r3], 16 \n\t"
+ "sra %[r3], %[r3], 5 \n\t"
+ "lh %[r6], 0(%[r8]) \n\t"
+ "lh %[r7], 0(%[r9]) \n\t"
+ "addiu %[r1], %[r1], 16 \n\t"
+ "sra %[r1], %[r1], 5 \n\t"
+ "mul %[r8], %[r7], %[r4] \n\t"
+ "mul %[r7], %[r7], %[r5] \n\t"
+ "mul %[r9], %[r6], %[r4] \n\t"
+ "mul %[r6], %[r6], %[r5] \n\t"
+ "addu %[r2], %[r2], %[r3] \n\t"
+ "subu %[r1], %[r1], %[r0] \n\t"
+ "sll %[r0], %[offset], 1 \n\t"
+ "addu %[r4], %[outre1], %[r0] \n\t"
+ "addu %[r5], %[outre2], %[r0] \n\t"
+ "addiu %[r8], %[r8], 16 \n\t"
+ "sra %[r8], %[r8], 5 \n\t"
+ "addiu %[r7], %[r7], 16 \n\t"
+ "sra %[r7], %[r7], 5 \n\t"
+ "addiu %[r6], %[r6], 16 \n\t"
+ "sra %[r6], %[r6], 5 \n\t"
+ "addiu %[r9], %[r9], 16 \n\t"
+ "sra %[r9], %[r9], 5 \n\t"
+ "addu %[r8], %[r8], %[r6] \n\t"
+ "negu %[r8], %[r8] \n\t"
+ "subu %[r7], %[r7], %[r9] \n\t"
+ "subu %[r6], %[r2], %[r7] \n\t"
+ "addu %[r0], %[r2], %[r7] \n\t"
+ "addu %[r3], %[r1], %[r8] \n\t"
+ "subu %[r1], %[r8], %[r1] \n\t"
+ "sw %[r6], 0(%[outre1]) \n\t"
+ "sw %[r0], 0(%[r4]) \n\t"
+ "sw %[r3], 0(%[outre2]) \n\t"
+ "sw %[r1], 0(%[r5]) \n\t"
+ "addiu %[outre1], %[outre1], 4 \n\t"
+ "addiu %[offset], %[offset], -4 \n\t"
+ "addiu %[inre], %[inre], 2 \n\t"
+ "addiu %[inim], %[inim], 2 \n\t"
+ // Inlined WebRtcSpl_MaxAbsValueW32
+ "negu %[r5], %[r6] \n\t"
+ "slt %[r2], %[r6], $zero \n\t"
+ "movn %[r6], %[r5], %[r2] \n\t"
+ "negu %[r5], %[r0] \n\t"
+ "slt %[r2], %[r0], $zero \n\t"
+ "movn %[r0], %[r5], %[r2] \n\t"
+ "negu %[r5], %[r3] \n\t"
+ "slt %[r2], %[r3], $zero \n\t"
+ "movn %[r3], %[r5], %[r2] \n\t"
+ "negu %[r5], %[r1] \n\t"
+ "slt %[r2], %[r1], $zero \n\t"
+ "movn %[r1], %[r5], %[r2] \n\t"
+ "slt %[r2], %[r6], %[r0] \n\t"
+ "slt %[r5], %[r3], %[r1] \n\t"
+ "movn %[r6], %[r0], %[r2] \n\t"
+ "movn %[r3], %[r1], %[r5] \n\t"
+ "slt %[r2], %[r6], %[r3] \n\t"
+ "movn %[r6], %[r3], %[r2] \n\t"
+ "slt %[r2], %[max], %[r6] \n\t"
+ "movn %[max], %[r6], %[r2] \n\t"
+ "bgtz %[k], 1b \n\t"
+ " addiu %[outre2], %[outre2], 4 \n\t"
+#endif // #if defined(MIPS_DSP_R1_LE)
+ "clz %[max], %[max] \n\t"
+ "addiu %[max], %[max], -25 \n\t"
+ ".set pop \n\t"
+ : [inre] "=&r" (inre), [inim] "=&r" (inim),
+ [outre1] "=&r" (outre1), [outre2] "=&r" (outre2),
+ [offset] "+r" (offset), [k] "+r" (k), [r0] "=&r" (r0),
+ [r1] "=&r" (r1), [r2] "=&r" (r2), [r3] "=&r" (r3),
+ [r4] "=&r" (r4), [r5] "=&r" (r5), [r6] "=&r" (r6),
+ [r7] "=&r" (r7), [r10] "=&r" (r10),
+ [r8] "=&r" (r8), [r9] "=&r" (r9), [max] "=&r" (max)
+ : [inreQ7] "r" (inreQ7), [inimQ7] "r" (inimQ7),
+ [cosptr] "r" (cosptr), [sinptr] "r" (sinptr),
+ [outre1Q16] "r" (outre1Q16), [outre2Q16] "r" (outre2Q16)
+ : "hi", "lo", "memory"
+ );
+
+ // "Fastest" vectors
+ k = FRAMESAMPLES / 4;
+ __asm __volatile (
+ ".set push \n\t"
+ ".set noreorder \n\t"
+ "addiu %[inre], %[inreQ7], 0 \n\t"
+ "addiu %[inim], %[inimQ7], 0 \n\t"
+ "addiu %[outre1], %[outre1Q16], 0 \n\t"
+ "addiu %[outre2], %[outre2Q16], 0 \n\t"
+ "bltz %[max], 2f \n\t"
+ " subu %[max1], $zero, %[max] \n\t"
+ "1: \n\t"
+ "lw %[r0], 0(%[outre1]) \n\t"
+ "lw %[r1], 0(%[outre2]) \n\t"
+ "lw %[r2], 4(%[outre1]) \n\t"
+ "lw %[r3], 4(%[outre2]) \n\t"
+ "sllv %[r0], %[r0], %[max] \n\t"
+ "sllv %[r1], %[r1], %[max] \n\t"
+ "sllv %[r2], %[r2], %[max] \n\t"
+ "sllv %[r3], %[r3], %[max] \n\t"
+ "addiu %[k], %[k], -1 \n\t"
+ "addiu %[outre1], %[outre1], 8 \n\t"
+ "addiu %[outre2], %[outre2], 8 \n\t"
+ "sh %[r0], 0(%[inre]) \n\t"
+ "sh %[r1], 0(%[inim]) \n\t"
+ "sh %[r2], 2(%[inre]) \n\t"
+ "sh %[r3], 2(%[inim]) \n\t"
+ "addiu %[inre], %[inre], 4 \n\t"
+ "bgtz %[k], 1b \n\t"
+ " addiu %[inim], %[inim], 4 \n\t"
+ "b 4f \n\t"
+ " nop \n\t"
+ "2: \n\t"
+#if !defined(MIPS_DSP_R1_LE)
+ "addiu %[r4], $zero, 1 \n\t"
+ "addiu %[r5], %[max1], -1 \n\t"
+ "sllv %[r4], %[r4], %[r5] \n\t"
+#endif // #if !defined(MIPS_DSP_R1_LE)
+ "3: \n\t"
+ "lw %[r0], 0(%[outre1]) \n\t"
+ "lw %[r1], 0(%[outre2]) \n\t"
+ "lw %[r2], 4(%[outre1]) \n\t"
+ "lw %[r3], 4(%[outre2]) \n\t"
+#if defined(MIPS_DSP_R1_LE)
+ "shrav_r.w %[r0], %[r0], %[max1] \n\t"
+ "shrav_r.w %[r1], %[r1], %[max1] \n\t"
+ "shrav_r.w %[r2], %[r2], %[max1] \n\t"
+ "shrav_r.w %[r3], %[r3], %[max1] \n\t"
+#else // #if defined(MIPS_DSP_R1_LE)
+ "addu %[r0], %[r0], %[r4] \n\t"
+ "addu %[r1], %[r1], %[r4] \n\t"
+ "addu %[r2], %[r2], %[r4] \n\t"
+ "addu %[r3], %[r3], %[r4] \n\t"
+ "srav %[r0], %[r0], %[max1] \n\t"
+ "srav %[r1], %[r1], %[max1] \n\t"
+ "srav %[r2], %[r2], %[max1] \n\t"
+ "srav %[r3], %[r3], %[max1] \n\t"
+#endif // #if defined(MIPS_DSP_R1_LE)
+ "addiu %[outre1], %[outre1], 8 \n\t"
+ "addiu %[outre2], %[outre2], 8 \n\t"
+ "sh %[r0], 0(%[inre]) \n\t"
+ "sh %[r1], 0(%[inim]) \n\t"
+ "sh %[r2], 2(%[inre]) \n\t"
+ "sh %[r3], 2(%[inim]) \n\t"
+ "addiu %[k], %[k], -1 \n\t"
+ "addiu %[inre], %[inre], 4 \n\t"
+ "bgtz %[k], 3b \n\t"
+ " addiu %[inim], %[inim], 4 \n\t"
+ "4: \n\t"
+ ".set pop \n\t"
+ : [k] "+r" (k), [max1] "=&r" (max1), [r0] "=&r" (r0),
+ [inre] "=&r" (inre), [inim] "=&r" (inim),
+ [outre1] "=&r" (outre1), [outre2] "=&r" (outre2),
+#if !defined(MIPS_DSP_R1_LE)
+ [r4] "=&r" (r4), [r5] "=&r" (r5),
+#endif // #if !defined(MIPS_DSP_R1_LE)
+ [r1] "=&r" (r1), [r2] "=&r" (r2), [r3] "=&r" (r3)
+ : [max] "r" (max), [inreQ7] "r" (inreQ7),
+ [inimQ7] "r" (inimQ7), [outre1Q16] "r" (outre1Q16),
+ [outre2Q16] "r" (outre2Q16)
+ : "memory"
+ );
+
+ WebRtcIsacfix_FftRadix16Fastest(inreQ7, inimQ7, 1); // real call
+
+ // All the remaining processing is done inside a single loop to avoid
+ // unnecessary memory accesses. MIPS DSPr2 version processes two samples
+ // at a time.
+ cosptr = (int16_t*)WebRtcIsacfix_kCosTab1;
+ sinptr = (int16_t*)WebRtcIsacfix_kSinTab1;
+ k = FRAMESAMPLES / 2;
+ __asm __volatile (
+ ".set push \n\t"
+ ".set noreorder \n\t"
+ "addiu %[inre], %[inreQ7], 0 \n\t"
+ "addiu %[inim], %[inimQ7], 0 \n\t"
+ "addiu %[outre1], %[outre1Q16], 0 \n\t"
+ "addiu %[outre2], %[outre2Q16], 0 \n\t"
+ "addiu %[r4], $zero, 273 \n\t"
+ "addiu %[r5], $zero, 31727 \n\t"
+#if defined(MIPS_DSP_R2_LE)
+ "addiu %[max], %[max], 16 \n\t"
+ "replv.ph %[r4], %[r4] \n\t"
+#endif // #if defined(MIPS_DSP_R2_LE)
+ "bltz %[max], 2f \n\t"
+ " subu %[max1], $zero, %[max] \n\t"
+#if defined(MIPS_DSP_R2_LE)
+ "addiu %[max], %[max], 1 \n\t"
+#endif // #if defined(MIPS_DSP_R2_LE)
+ "1: \n\t"
+#if defined(MIPS_DSP_R2_LE)
+ "lwl %[r0], 0(%[inre]) \n\t"
+ "lwl %[r1], 0(%[inim]) \n\t"
+ "lh %[r2], 0(%[cosptr]) \n\t"
+ "lwr %[r0], 0(%[inre]) \n\t"
+ "lwr %[r1], 0(%[inim]) \n\t"
+ "lh %[r3], 0(%[sinptr]) \n\t"
+ "muleq_s.w.phr %[r6], %[r0], %[r4] \n\t"
+ "muleq_s.w.phr %[r7], %[r1], %[r4] \n\t"
+ "muleq_s.w.phl %[r0], %[r0], %[r4] \n\t"
+ "muleq_s.w.phl %[r1], %[r1], %[r4] \n\t"
+ "addiu %[k], %[k], -2 \n\t"
+ "addiu %[inre], %[inre], 4 \n\t"
+ "addiu %[inim], %[inim], 4 \n\t"
+ "shrav_r.w %[r6], %[r6], %[max] \n\t"
+ "shrav_r.w %[r7], %[r7], %[max] \n\t"
+ "mult $ac0, %[r2], %[r6] \n\t"
+ "mult $ac1, %[r3], %[r7] \n\t"
+ "mult $ac2, %[r2], %[r7] \n\t"
+ "mult $ac3, %[r3], %[r6] \n\t"
+ "lh %[r2], 2(%[cosptr]) \n\t"
+ "lh %[r3], 2(%[sinptr]) \n\t"
+ "extr_r.w %[r6], $ac0, 14 \n\t"
+ "extr_r.w %[r7], $ac1, 14 \n\t"
+ "extr_r.w %[r8], $ac2, 14 \n\t"
+ "extr_r.w %[r9], $ac3, 14 \n\t"
+ "shrav_r.w %[r0], %[r0], %[max] \n\t"
+ "shrav_r.w %[r1], %[r1], %[max] \n\t"
+ "mult $ac0, %[r2], %[r0] \n\t"
+ "mult $ac1, %[r3], %[r1] \n\t"
+ "mult $ac2, %[r2], %[r1] \n\t"
+ "mult $ac3, %[r3], %[r0] \n\t"
+ "addiu %[cosptr], %[cosptr], 4 \n\t"
+ "extr_r.w %[r0], $ac0, 14 \n\t"
+ "extr_r.w %[r1], $ac1, 14 \n\t"
+ "extr_r.w %[r2], $ac2, 14 \n\t"
+ "extr_r.w %[r3], $ac3, 14 \n\t"
+ "subu %[r6], %[r6], %[r7] \n\t"
+ "addu %[r8], %[r8], %[r9] \n\t"
+ "mult $ac0, %[r5], %[r6] \n\t"
+ "mult $ac1, %[r5], %[r8] \n\t"
+ "addiu %[sinptr], %[sinptr], 4 \n\t"
+ "subu %[r0], %[r0], %[r1] \n\t"
+ "addu %[r2], %[r2], %[r3] \n\t"
+ "extr_r.w %[r1], $ac0, 11 \n\t"
+ "extr_r.w %[r3], $ac1, 11 \n\t"
+ "mult $ac2, %[r5], %[r0] \n\t"
+ "mult $ac3, %[r5], %[r2] \n\t"
+ "sw %[r1], 0(%[outre1]) \n\t"
+ "sw %[r3], 0(%[outre2]) \n\t"
+ "addiu %[outre1], %[outre1], 8 \n\t"
+ "extr_r.w %[r0], $ac2, 11 \n\t"
+ "extr_r.w %[r2], $ac3, 11 \n\t"
+ "sw %[r0], -4(%[outre1]) \n\t"
+ "sw %[r2], 4(%[outre2]) \n\t"
+ "bgtz %[k], 1b \n\t"
+ " addiu %[outre2], %[outre2], 8 \n\t"
+ "b 3f \n\t"
+#else // #if defined(MIPS_DSP_R2_LE)
+ "lh %[r0], 0(%[inre]) \n\t"
+ "lh %[r1], 0(%[inim]) \n\t"
+ "addiu %[k], %[k], -1 \n\t"
+ "srav %[r0], %[r0], %[max] \n\t"
+ "srav %[r1], %[r1], %[max] \n\t"
+ "sra %[r2], %[r0], 16 \n\t"
+ "andi %[r0], %[r0], 0xFFFF \n\t"
+ "sra %[r0], %[r0], 1 \n\t"
+ "sra %[r3], %[r1], 16 \n\t"
+ "andi %[r1], %[r1], 0xFFFF \n\t"
+ "sra %[r1], %[r1], 1 \n\t"
+ "mul %[r2], %[r2], %[r4] \n\t"
+ "mul %[r0], %[r0], %[r4] \n\t"
+ "mul %[r3], %[r3], %[r4] \n\t"
+ "mul %[r1], %[r1], %[r4] \n\t"
+ "addiu %[inre], %[inre], 2 \n\t"
+ "addiu %[inim], %[inim], 2 \n\t"
+ "lh %[r6], 0(%[cosptr]) \n\t"
+ "lh %[r7], 0(%[sinptr]) \n\t"
+#if defined(MIPS_DSP_R1_LE)
+ "shra_r.w %[r0], %[r0], 15 \n\t"
+ "shra_r.w %[r1], %[r1], 15 \n\t"
+#else // #if defined(MIPS_DSP_R1_LE)
+ "addiu %[r0], %[r0], 0x4000 \n\t"
+ "addiu %[r1], %[r1], 0x4000 \n\t"
+ "sra %[r0], %[r0], 15 \n\t"
+ "sra %[r1], %[r1], 15 \n\t"
+#endif // #if defined(MIPS_DSP_R1_LE)
+ "addu %[r0], %[r2], %[r0] \n\t"
+ "addu %[r1], %[r3], %[r1] \n\t"
+ "sra %[r2], %[r0], 16 \n\t"
+ "andi %[r0], %[r0], 0xFFFF \n\t"
+ "mul %[r9], %[r2], %[r6] \n\t"
+ "mul %[r2], %[r2], %[r7] \n\t"
+ "mul %[r8], %[r0], %[r6] \n\t"
+ "mul %[r0], %[r0], %[r7] \n\t"
+ "sra %[r3], %[r3], 16 \n\t"
+ "andi %[r1], %[r1], 0xFFFF \n\t"
+ "sll %[r9], %[r9], 2 \n\t"
+ "sll %[r2], %[r2], 2 \n\t"
+#if defined(MIPS_DSP_R1_LE)
+ "shra_r.w %[r8], %[r8], 14 \n\t"
+ "shra_r.w %[r0], %[r0], 14 \n\t"
+#else // #if defined(MIPS_DSP_R1_LE)
+ "addiu %[r8], %[r8], 0x2000 \n\t"
+ "addiu %[r0], %[r0], 0x2000 \n\t"
+ "sra %[r8], %[r8], 14 \n\t"
+ "sra %[r0], %[r0], 14 \n\t"
+#endif // #if defined(MIPS_DSP_R1_LE)
+ "addu %[r9], %[r9], %[r8] \n\t"
+ "addu %[r2], %[r2], %[r0] \n\t"
+ "mul %[r0], %[r3], %[r6] \n\t"
+ "mul %[r3], %[r3], %[r7] \n\t"
+ "mul %[r8], %[r1], %[r6] \n\t"
+ "mul %[r1], %[r1], %[r8] \n\t"
+ "addiu %[cosptr], %[cosptr], 2 \n\t"
+ "addiu %[sinptr], %[sinptr], 2 \n\t"
+ "sll %[r0], %[r0], 2 \n\t"
+ "sll %[r3], %[r3], 2 \n\t"
+#if defined(MIPS_DSP_R1_LE)
+ "shra_r.w %[r8], %[r8], 14 \n\t"
+ "shra_r.w %[r1], %[r1], 14 \n\t"
+#else // #if defined(MIPS_DSP_R1_LE)
+ "addiu %[r8], %[r8], 0x2000 \n\t"
+ "addiu %[r1], %[r1], 0x2000 \n\t"
+ "sra %[r8], %[r8], 14 \n\t"
+ "sra %[r1], %[r1], 14 \n\t"
+#endif // #if defined(MIPS_DSP_R1_LE)
+ "addu %[r0], %[r0], %[r8] \n\t"
+ "addu %[r3], %[r3], %[r1] \n\t"
+ "subu %[r9], %[r9], %[r3] \n\t"
+ "addu %[r0], %[r0], %[r2] \n\t"
+ "sra %[r1], %[r9], 16 \n\t"
+ "andi %[r9], %[r9], 0xFFFF \n\t"
+ "mul %[r1], %[r1], %[r5] \n\t"
+ "mul %[r9], %[r9], %[r5] \n\t"
+ "sra %[r2], %[r0], 16 \n\t"
+ "andi %[r0], %[r0], 0xFFFF \n\t"
+ "mul %[r2], %[r2], %[r5] \n\t"
+ "mul %[r0], %[r0], %[r5] \n\t"
+ "sll %[r1], %[r1], 5 \n\t"
+#if defined(MIPS_DSP_R1_LE)
+ "shra_r.w %[r9], %[r9], 11 \n\t"
+#else // #if defined(MIPS_DSP_R1_LE)
+ "addiu %[r9], %[r9], 0x400 \n\t"
+ "sra %[r9], %[r9], 11 \n\t"
+#endif // #if defined(MIPS_DSP_R1_LE)
+ "addu %[r1], %[r1], %[r9] \n\t"
+ "sll %[r2], %[r2], 5 \n\t"
+#if defined(MIPS_DSP_R1_LE)
+ "shra_r.w %[r0], %[r0], 11 \n\t"
+#else // #if defined(MIPS_DSP_R1_LE)
+ "addiu %[r0], %[r0], 0x400 \n\t"
+ "sra %[r0], %[r0], 11 \n\t"
+#endif // #if defined(MIPS_DSP_R1_LE)
+ "addu %[r0], %[r0], %[r2] \n\t"
+ "sw %[r1], 0(%[outre1]) \n\t"
+ "addiu %[outre1], %[outre1], 4 \n\t"
+ "sw %[r0], 0(%[outre2]) \n\t"
+ "bgtz %[k], 1b \n\t"
+ " addiu %[outre2], %[outre2], 4 \n\t"
+ "b 3f \n\t"
+ " nop \n\t"
+#endif // #if defined(MIPS_DSP_R2_LE)
+ "2: \n\t"
+#if defined(MIPS_DSP_R2_LE)
+ "addiu %[max1], %[max1], -1 \n\t"
+ "21: \n\t"
+ "lwl %[r0], 0(%[inre]) \n\t"
+ "lwl %[r1], 0(%[inim]) \n\t"
+ "lh %[r2], 0(%[cosptr]) \n\t"
+ "lwr %[r0], 0(%[inre]) \n\t"
+ "lwr %[r1], 0(%[inim]) \n\t"
+ "lh %[r3], 0(%[sinptr]) \n\t"
+ "muleq_s.w.phr %[r6], %[r0], %[r4] \n\t"
+ "muleq_s.w.phr %[r7], %[r1], %[r4] \n\t"
+ "muleq_s.w.phl %[r0], %[r0], %[r4] \n\t"
+ "muleq_s.w.phl %[r1], %[r1], %[r4] \n\t"
+ "addiu %[k], %[k], -2 \n\t"
+ "addiu %[inre], %[inre], 4 \n\t"
+ "addiu %[inim], %[inim], 4 \n\t"
+ "sllv %[r6], %[r6], %[max1] \n\t"
+ "sllv %[r7], %[r7], %[max1] \n\t"
+ "mult $ac0, %[r2], %[r6] \n\t"
+ "mult $ac1, %[r3], %[r7] \n\t"
+ "mult $ac2, %[r2], %[r7] \n\t"
+ "mult $ac3, %[r3], %[r6] \n\t"
+ "lh %[r2], 2(%[cosptr]) \n\t"
+ "lh %[r3], 2(%[sinptr]) \n\t"
+ "extr_r.w %[r6], $ac0, 14 \n\t"
+ "extr_r.w %[r7], $ac1, 14 \n\t"
+ "extr_r.w %[r8], $ac2, 14 \n\t"
+ "extr_r.w %[r9], $ac3, 14 \n\t"
+ "sllv %[r0], %[r0], %[max1] \n\t"
+ "sllv %[r1], %[r1], %[max1] \n\t"
+ "mult $ac0, %[r2], %[r0] \n\t"
+ "mult $ac1, %[r3], %[r1] \n\t"
+ "mult $ac2, %[r2], %[r1] \n\t"
+ "mult $ac3, %[r3], %[r0] \n\t"
+ "addiu %[cosptr], %[cosptr], 4 \n\t"
+ "extr_r.w %[r0], $ac0, 14 \n\t"
+ "extr_r.w %[r1], $ac1, 14 \n\t"
+ "extr_r.w %[r2], $ac2, 14 \n\t"
+ "extr_r.w %[r3], $ac3, 14 \n\t"
+ "subu %[r6], %[r6], %[r7] \n\t"
+ "addu %[r8], %[r8], %[r9] \n\t"
+ "mult $ac0, %[r5], %[r6] \n\t"
+ "mult $ac1, %[r5], %[r8] \n\t"
+ "addiu %[sinptr], %[sinptr], 4 \n\t"
+ "subu %[r0], %[r0], %[r1] \n\t"
+ "addu %[r2], %[r2], %[r3] \n\t"
+ "extr_r.w %[r1], $ac0, 11 \n\t"
+ "extr_r.w %[r3], $ac1, 11 \n\t"
+ "mult $ac2, %[r5], %[r0] \n\t"
+ "mult $ac3, %[r5], %[r2] \n\t"
+ "sw %[r1], 0(%[outre1]) \n\t"
+ "sw %[r3], 0(%[outre2]) \n\t"
+ "addiu %[outre1], %[outre1], 8 \n\t"
+ "extr_r.w %[r0], $ac2, 11 \n\t"
+ "extr_r.w %[r2], $ac3, 11 \n\t"
+ "sw %[r0], -4(%[outre1]) \n\t"
+ "sw %[r2], 4(%[outre2]) \n\t"
+ "bgtz %[k], 21b \n\t"
+ " addiu %[outre2], %[outre2], 8 \n\t"
+ "b 3f \n\t"
+ " nop \n\t"
+#else // #if defined(MIPS_DSP_R2_LE)
+ "lh %[r0], 0(%[inre]) \n\t"
+ "lh %[r1], 0(%[inim]) \n\t"
+ "addiu %[k], %[k], -1 \n\t"
+ "sllv %[r0], %[r0], %[max1] \n\t"
+ "sllv %[r1], %[r1], %[max1] \n\t"
+ "sra %[r2], %[r0], 16 \n\t"
+ "andi %[r0], %[r0], 0xFFFF \n\t"
+ "sra %[r0], %[r0], 1 \n\t"
+ "sra %[r3], %[r1], 16 \n\t"
+ "andi %[r1], %[r1], 0xFFFF \n\t"
+ "sra %[r1], %[r1], 1 \n\t"
+ "mul %[r2], %[r2], %[r4] \n\t"
+ "mul %[r0], %[r0], %[r4] \n\t"
+ "mul %[r3], %[r3], %[r4] \n\t"
+ "mul %[r1], %[r1], %[r4] \n\t"
+ "addiu %[inre], %[inre], 2 \n\t"
+ "addiu %[inim], %[inim], 2 \n\t"
+ "lh %[r6], 0(%[cosptr]) \n\t"
+ "lh %[r7], 0(%[sinptr]) \n\t"
+#if defined(MIPS_DSP_R1_LE)
+ "shra_r.w %[r0], %[r0], 15 \n\t"
+ "shra_r.w %[r1], %[r1], 15 \n\t"
+#else // #if defined(MIPS_DSP_R1_LE)
+ "addiu %[r0], %[r0], 0x4000 \n\t"
+ "addiu %[r1], %[r1], 0x4000 \n\t"
+ "sra %[r0], %[r0], 15 \n\t"
+ "sra %[r1], %[r1], 15 \n\t"
+#endif // #if defined(MIPS_DSP_R1_LE)
+ "addu %[r0], %[r2], %[r0] \n\t"
+ "addu %[r1], %[r3], %[r1] \n\t"
+ "sra %[r2], %[r0], 16 \n\t"
+ "andi %[r0], %[r0], 0xFFFF \n\t"
+ "mul %[r9], %[r2], %[r6] \n\t"
+ "mul %[r2], %[r2], %[r7] \n\t"
+ "mul %[r8], %[r0], %[r6] \n\t"
+ "mul %[r0], %[r0], %[r7] \n\t"
+ "sra %[r3], %[r1], 16 \n\t"
+ "andi %[r1], %[r1], 0xFFFF \n\t"
+ "sll %[r9], %[r9], 2 \n\t"
+ "sll %[r2], %[r2], 2 \n\t"
+#if defined(MIPS_DSP_R1_LE)
+ "shra_r.w %[r8], %[r8], 14 \n\t"
+ "shra_r.w %[r0], %[r0], 14 \n\t"
+#else // #if defined(MIPS_DSP_R1_LE)
+ "addiu %[r8], %[r8], 0x2000 \n\t"
+ "addiu %[r0], %[r0], 0x2000 \n\t"
+ "sra %[r8], %[r8], 14 \n\t"
+ "sra %[r0], %[r0], 14 \n\t"
+#endif // #if defined(MIPS_DSP_R1_LE)
+ "addu %[r9], %[r9], %[r8] \n\t"
+ "addu %[r2], %[r2], %[r0] \n\t"
+ "mul %[r0], %[r3], %[r6] \n\t"
+ "mul %[r3], %[r3], %[r7] \n\t"
+ "mul %[r8], %[r1], %[r6] \n\t"
+ "mul %[r1], %[r1], %[r7] \n\t"
+ "addiu %[cosptr], %[cosptr], 2 \n\t"
+ "addiu %[sinptr], %[sinptr], 2 \n\t"
+ "sll %[r0], %[r0], 2 \n\t"
+ "sll %[r3], %[r3], 2 \n\t"
+#if defined(MIPS_DSP_R1_LE)
+ "shra_r.w %[r8], %[r8], 14 \n\t"
+ "shra_r.w %[r1], %[r1], 14 \n\t"
+#else // #if defined(MIPS_DSP_R1_LE)
+ "addiu %[r8], %[r8], 0x2000 \n\t"
+ "addiu %[r1], %[r1], 0x2000 \n\t"
+ "sra %[r8], %[r8], 14 \n\t"
+ "sra %[r1], %[r1], 14 \n\t"
+#endif // #if defined(MIPS_DSP_R1_LE)
+ "addu %[r0], %[r0], %[r8] \n\t"
+ "addu %[r3], %[r3], %[r1] \n\t"
+ "subu %[r9], %[r9], %[r3] \n\t"
+ "addu %[r0], %[r0], %[r2] \n\t"
+ "sra %[r1], %[r9], 16 \n\t"
+ "andi %[r9], %[r9], 0xFFFF \n\t"
+ "mul %[r1], %[r1], %[r5] \n\t"
+ "mul %[r9], %[r9], %[r5] \n\t"
+ "sra %[r2], %[r0], 16 \n\t"
+ "andi %[r0], %[r0], 0xFFFF \n\t"
+ "mul %[r2], %[r2], %[r5] \n\t"
+ "mul %[r0], %[r0], %[r5] \n\t"
+ "sll %[r1], %[r1], 5 \n\t"
+#if defined(MIPS_DSP_R1_LE)
+ "shra_r.w %[r9], %[r9], 11 \n\t"
+#else // #if defined(MIPS_DSP_R1_LE)
+ "addiu %[r9], %[r9], 0x400 \n\t"
+ "sra %[r9], %[r9], 11 \n\t"
+#endif // #if defined(MIPS_DSP_R1_LE)
+ "addu %[r1], %[r1], %[r9] \n\t"
+ "sll %[r2], %[r2], 5 \n\t"
+#if defined(MIPS_DSP_R1_LE)
+ "shra_r.w %[r0], %[r0], 11 \n\t"
+#else // #if defined(MIPS_DSP_R1_LE)
+ "addiu %[r0], %[r0], 0x400 \n\t"
+ "sra %[r0], %[r0], 11 \n\t"
+#endif // #if defined(MIPS_DSP_R1_LE)
+ "addu %[r0], %[r0], %[r2] \n\t"
+ "sw %[r1], 0(%[outre1]) \n\t"
+ "addiu %[outre1], %[outre1], 4 \n\t"
+ "sw %[r0], 0(%[outre2]) \n\t"
+ "bgtz %[k], 2b \n\t"
+ " addiu %[outre2], %[outre2], 4 \n\t"
+#endif // #if defined(MIPS_DSP_R2_LE)
+ "3: \n\t"
+ ".set pop \n\t"
+ : [k] "+r" (k), [r0] "=&r" (r0), [r1] "=&r" (r1),
+ [r2] "=&r" (r2), [r3] "=&r" (r3), [r4] "=&r" (r4),
+ [r5] "=&r" (r5), [r6] "=&r" (r6), [r7] "=&r" (r7),
+ [r8] "=&r" (r8), [r9] "=&r" (r9), [max1] "=&r" (max1),
+ [inre] "=&r" (inre), [inim] "=&r" (inim),
+ [outre1] "=&r" (outre1), [outre2] "=&r" (outre2)
+ : [max] "r" (max), [inreQ7] "r" (inreQ7),
+ [inimQ7] "r" (inimQ7), [cosptr] "r" (cosptr),
+ [sinptr] "r" (sinptr), [outre1Q16] "r" (outre1Q16),
+ [outre2Q16] "r" (outre2Q16)
+ : "hi", "lo", "memory"
+#if defined(MIPS_DSP_R2_LE)
+ , "$ac1hi", "$ac1lo", "$ac2hi", "$ac2lo", "$ac3hi", "$ac3lo"
+#endif // #if defined(MIPS_DSP_R2_LE)
+ );
+}
diff --git a/modules/audio_coding/codecs/isac/fix/source/transform_tables.c b/modules/audio_coding/codecs/isac/fix/source/transform_tables.c
new file mode 100644
index 00000000..ee96b8e3
--- /dev/null
+++ b/modules/audio_coding/codecs/isac/fix/source/transform_tables.c
@@ -0,0 +1,111 @@
+/*
+ * Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+/*
+ * This file contains trigonometric functions look-up tables used in
+ * transform functions WebRtcIsacfix_Time2Spec and WebRtcIsacfix_Spec2Time.
+ */
+
+#include "webrtc/modules/audio_coding/codecs/isac/fix/source/settings.h"
+#include "webrtc/typedefs.h"
+
+#if !(defined WEBRTC_DETECT_ARM_NEON || defined WEBRTC_ARCH_ARM_NEON)
+/* Cosine table 1 in Q14. */
+const int16_t WebRtcIsacfix_kCosTab1[FRAMESAMPLES/2] = {
+ 16384, 16383, 16378, 16371, 16362, 16349, 16333, 16315, 16294, 16270,
+ 16244, 16214, 16182, 16147, 16110, 16069, 16026, 15980, 15931, 15880,
+ 15826, 15769, 15709, 15647, 15582, 15515, 15444, 15371, 15296, 15218,
+ 15137, 15053, 14968, 14879, 14788, 14694, 14598, 14500, 14399, 14295,
+ 14189, 14081, 13970, 13856, 13741, 13623, 13502, 13380, 13255, 13128,
+ 12998, 12867, 12733, 12597, 12458, 12318, 12176, 12031, 11885, 11736,
+ 11585, 11433, 11278, 11121, 10963, 10803, 10641, 10477, 10311, 10143,
+ 9974, 9803, 9630, 9456, 9280, 9102, 8923, 8743, 8561, 8377,
+ 8192, 8006, 7818, 7629, 7438, 7246, 7053, 6859, 6664, 6467,
+ 6270, 6071, 5872, 5671, 5469, 5266, 5063, 4859, 4653, 4447,
+ 4240, 4033, 3825, 3616, 3406, 3196, 2986, 2775, 2563, 2351,
+ 2139, 1926, 1713, 1499, 1285, 1072, 857, 643, 429, 214,
+ 0, -214, -429, -643, -857, -1072, -1285, -1499, -1713, -1926,
+ -2139, -2351, -2563, -2775, -2986, -3196, -3406, -3616, -3825, -4033,
+ -4240, -4447, -4653, -4859, -5063, -5266, -5469, -5671, -5872, -6071,
+ -6270, -6467, -6664, -6859, -7053, -7246, -7438, -7629, -7818, -8006,
+ -8192, -8377, -8561, -8743, -8923, -9102, -9280, -9456, -9630, -9803,
+ -9974, -10143, -10311, -10477, -10641, -10803, -10963, -11121, -11278, -11433,
+ -11585, -11736, -11885, -12031, -12176, -12318, -12458, -12597, -12733,
+ -12867, -12998, -13128, -13255, -13380, -13502, -13623, -13741, -13856,
+ -13970, -14081, -14189, -14295, -14399, -14500, -14598, -14694, -14788,
+ -14879, -14968, -15053, -15137, -15218, -15296, -15371, -15444, -15515,
+ -15582, -15647, -15709, -15769, -15826, -15880, -15931, -15980, -16026,
+ -16069, -16110, -16147, -16182, -16214, -16244, -16270, -16294, -16315,
+ -16333, -16349, -16362, -16371, -16378, -16383
+};
+
+/* Sine table 1 in Q14. */
+const int16_t WebRtcIsacfix_kSinTab1[FRAMESAMPLES/2] = {
+ 0, 214, 429, 643, 857, 1072, 1285, 1499, 1713, 1926,
+ 2139, 2351, 2563, 2775, 2986, 3196, 3406, 3616, 3825, 4033,
+ 4240, 4447, 4653, 4859, 5063, 5266, 5469, 5671, 5872, 6071,
+ 6270, 6467, 6664, 6859, 7053, 7246, 7438, 7629, 7818, 8006,
+ 8192, 8377, 8561, 8743, 8923, 9102, 9280, 9456, 9630, 9803,
+ 9974, 10143, 10311, 10477, 10641, 10803, 10963, 11121, 11278, 11433,
+ 11585, 11736, 11885, 12031, 12176, 12318, 12458, 12597, 12733, 12867,
+ 12998, 13128, 13255, 13380, 13502, 13623, 13741, 13856, 13970, 14081,
+ 14189, 14295, 14399, 14500, 14598, 14694, 14788, 14879, 14968, 15053,
+ 15137, 15218, 15296, 15371, 15444, 15515, 15582, 15647, 15709, 15769,
+ 15826, 15880, 15931, 15980, 16026, 16069, 16110, 16147, 16182, 16214,
+ 16244, 16270, 16294, 16315, 16333, 16349, 16362, 16371, 16378, 16383,
+ 16384, 16383, 16378, 16371, 16362, 16349, 16333, 16315, 16294, 16270,
+ 16244, 16214, 16182, 16147, 16110, 16069, 16026, 15980, 15931, 15880,
+ 15826, 15769, 15709, 15647, 15582, 15515, 15444, 15371, 15296, 15218,
+ 15137, 15053, 14968, 14879, 14788, 14694, 14598, 14500, 14399, 14295,
+ 14189, 14081, 13970, 13856, 13741, 13623, 13502, 13380, 13255, 13128,
+ 12998, 12867, 12733, 12597, 12458, 12318, 12176, 12031, 11885, 11736,
+ 11585, 11433, 11278, 11121, 10963, 10803, 10641, 10477, 10311, 10143,
+ 9974, 9803, 9630, 9456, 9280, 9102, 8923, 8743, 8561, 8377,
+ 8192, 8006, 7818, 7629, 7438, 7246, 7053, 6859, 6664, 6467,
+ 6270, 6071, 5872, 5671, 5469, 5266, 5063, 4859, 4653, 4447,
+ 4240, 4033, 3825, 3616, 3406, 3196, 2986, 2775, 2563, 2351,
+ 2139, 1926, 1713, 1499, 1285, 1072, 857, 643, 429, 214
+};
+
+
+/* Sine table 2 in Q14. */
+const int16_t WebRtcIsacfix_kSinTab2[FRAMESAMPLES/4] = {
+ 16384, -16381, 16375, -16367, 16356, -16342, 16325, -16305, 16283, -16257,
+ 16229, -16199, 16165, -16129, 16090, -16048, 16003, -15956, 15906, -15853,
+ 15798, -15739, 15679, -15615, 15549, -15480, 15408, -15334, 15257, -15178,
+ 15095, -15011, 14924, -14834, 14741, -14647, 14549, -14449, 14347, -14242,
+ 14135, -14025, 13913, -13799, 13682, -13563, 13441, -13318, 13192, -13063,
+ 12933, -12800, 12665, -12528, 12389, -12247, 12104, -11958, 11810, -11661,
+ 11509, -11356, 11200, -11042, 10883, -10722, 10559, -10394, 10227, -10059,
+ 9889, -9717, 9543, -9368, 9191, -9013, 8833, -8652, 8469, -8285,
+ 8099, -7912, 7723, -7534, 7342, -7150, 6957, -6762, 6566, -6369,
+ 6171, -5971, 5771, -5570, 5368, -5165, 4961, -4756, 4550, -4344,
+ 4137, -3929, 3720, -3511, 3301, -3091, 2880, -2669, 2457, -2245,
+ 2032, -1819, 1606, -1392, 1179, -965, 750, -536, 322, -107
+};
+#endif
+
+#if defined(MIPS32_LE)
+/* Cosine table 2 in Q14. Used only on MIPS platforms. */
+const int16_t WebRtcIsacfix_kCosTab2[FRAMESAMPLES/4] = {
+ 107, -322, 536, -750, 965, -1179, 1392, -1606, 1819, -2032,
+ 2245, -2457, 2669, -2880, 3091, -3301, 3511, -3720, 3929, -4137,
+ 4344, -4550, 4756, -4961, 5165, -5368, 5570, -5771, 5971, -6171,
+ 6369, -6566, 6762, -6957, 7150, -7342, 7534, -7723, 7912, -8099,
+ 8285, -8469, 8652, -8833, 9013, -9191, 9368, -9543, 9717, -9889,
+ 10059, -10227, 10394, -10559, 10722, -10883, 11042, -11200, 11356, -11509,
+ 11661, -11810, 11958, -12104, 12247, -12389, 12528, -12665, 12800, -12933,
+ 13063, -13192, 13318, -13441, 13563, -13682, 13799, -13913, 14025, -14135,
+ 14242, -14347, 14449, -14549, 14647, -14741, 14834, -14924, 15011, -15095,
+ 15178, -15257, 15334, -15408, 15480, -15549, 15615, -15679, 15739, -15798,
+ 15853, -15906, 15956, -16003, 16048, -16090, 16129, -16165, 16199, -16229,
+ 16257, -16283, 16305, -16325, 16342, -16356, 16367, -16375, 16381, -16384
+};
+#endif
diff --git a/modules/audio_coding/codecs/opus/opus_fec_test.cc b/modules/audio_coding/codecs/opus/opus_fec_test.cc
index fb4cb04f..ee027e80 100644
--- a/modules/audio_coding/codecs/opus/opus_fec_test.cc
+++ b/modules/audio_coding/codecs/opus/opus_fec_test.cc
@@ -32,8 +32,7 @@ struct mode {
};
const int kOpusBlockDurationMs = 20;
-const int kOpusInputSamplingKhz = 48;
-const int kOpusOutputSamplingKhz = 32;
+const int kOpusSamplingKhz = 48;
class OpusFecTest : public TestWithParam<coding_param> {
protected:
@@ -47,14 +46,8 @@ class OpusFecTest : public TestWithParam<coding_param> {
virtual void DecodeABlock(bool lost_previous, bool lost_current);
int block_duration_ms_;
- int input_sampling_khz_;
- int output_sampling_khz_;
-
- // Number of samples-per-channel in a frame.
- int input_length_sample_;
-
- // Expected output number of samples-per-channel in a frame.
- int output_length_sample_;
+ int sampling_khz_;
+ int block_length_sample_;
int channels_;
int bit_rate_;
@@ -91,7 +84,7 @@ void OpusFecTest::SetUp() {
// Allocate memory to contain the whole file.
in_data_.reset(new int16_t[loop_length_samples_ +
- input_length_sample_ * channels_]);
+ block_length_sample_ * channels_]);
// Copy the file into the buffer.
ASSERT_EQ(fread(&in_data_[0], sizeof(int16_t), loop_length_samples_, fp),
@@ -104,12 +97,12 @@ void OpusFecTest::SetUp() {
// beginning of the array. Audio frames cross the end of the excerpt always
// appear as a continuum of memory.
memcpy(&in_data_[loop_length_samples_], &in_data_[0],
- input_length_sample_ * channels_ * sizeof(int16_t));
+ block_length_sample_ * channels_ * sizeof(int16_t));
// Maximum number of bytes in output bitstream.
- max_bytes_ = input_length_sample_ * channels_ * sizeof(int16_t);
+ max_bytes_ = block_length_sample_ * channels_ * sizeof(int16_t);
- out_data_.reset(new int16_t[2 * output_length_sample_ * channels_]);
+ out_data_.reset(new int16_t[2 * block_length_sample_ * channels_]);
bit_stream_.reset(new uint8_t[max_bytes_]);
// Create encoder memory.
@@ -127,10 +120,8 @@ void OpusFecTest::TearDown() {
OpusFecTest::OpusFecTest()
: block_duration_ms_(kOpusBlockDurationMs),
- input_sampling_khz_(kOpusInputSamplingKhz),
- output_sampling_khz_(kOpusOutputSamplingKhz),
- input_length_sample_(block_duration_ms_ * input_sampling_khz_),
- output_length_sample_(block_duration_ms_ * output_sampling_khz_),
+ sampling_khz_(kOpusSamplingKhz),
+ block_length_sample_(block_duration_ms_ * sampling_khz_),
data_pointer_(0),
max_bytes_(0),
encoded_bytes_(0),
@@ -141,7 +132,7 @@ OpusFecTest::OpusFecTest()
void OpusFecTest::EncodeABlock() {
int16_t value = WebRtcOpus_Encode(opus_encoder_,
&in_data_[data_pointer_],
- input_length_sample_,
+ block_length_sample_,
max_bytes_, &bit_stream_[0]);
EXPECT_GT(value, 0);
@@ -162,7 +153,7 @@ void OpusFecTest::DecodeABlock(bool lost_previous, bool lost_current) {
} else {
value_1 = WebRtcOpus_DecodePlc(opus_decoder_, &out_data_[0], 1);
}
- EXPECT_EQ(output_length_sample_, value_1);
+ EXPECT_EQ(block_length_sample_, value_1);
}
if (!lost_current) {
@@ -171,7 +162,7 @@ void OpusFecTest::DecodeABlock(bool lost_previous, bool lost_current) {
encoded_bytes_,
&out_data_[value_1 * channels_],
&audio_type);
- EXPECT_EQ(output_length_sample_, value_2);
+ EXPECT_EQ(block_length_sample_, value_2);
}
}
@@ -224,7 +215,7 @@ TEST_P(OpusFecTest, RandomPacketLossTest) {
// |data_pointer_| is incremented and wrapped across
// |loop_length_samples_|.
- data_pointer_ = (data_pointer_ + input_length_sample_ * channels_) %
+ data_pointer_ = (data_pointer_ + block_length_sample_ * channels_) %
loop_length_samples_;
}
if (mode_set[i].fec) {
diff --git a/modules/audio_coding/codecs/opus/opus_interface.c b/modules/audio_coding/codecs/opus/opus_interface.c
index 24fc4fc4..ea535ea9 100644
--- a/modules/audio_coding/codecs/opus/opus_interface.c
+++ b/modules/audio_coding/codecs/opus/opus_interface.c
@@ -15,9 +15,6 @@
#include "opus.h"
-#include "webrtc/common_audio/signal_processing/resample_by_2_internal.h"
-#include "webrtc/common_audio/signal_processing/include/signal_processing_library.h"
-
enum {
/* Maximum supported frame size in WebRTC is 60 ms. */
kWebRtcOpusMaxEncodeFrameSizeMs = 60,
@@ -31,17 +28,6 @@ enum {
* milliseconds. */
kWebRtcOpusMaxFrameSizePerChannel = 48 * kWebRtcOpusMaxDecodeFrameSizeMs,
- /* Maximum sample count per frame is 48 kHz * maximum frame size in
- * milliseconds * maximum number of channels. */
- kWebRtcOpusMaxFrameSize = kWebRtcOpusMaxFrameSizePerChannel * 2,
-
- /* Maximum sample count per channel for output resampled to 32 kHz,
- * 32 kHz * maximum frame size in milliseconds. */
- kWebRtcOpusMaxFrameSizePerChannel32kHz = 32 * kWebRtcOpusMaxDecodeFrameSizeMs,
-
- /* Number of samples in resampler state. */
- kWebRtcOpusStateSize = 7,
-
/* Default frame size, 20 ms @ 48 kHz, in samples (for one channel). */
kWebRtcOpusDefaultFrameSize = 960,
};
@@ -143,8 +129,6 @@ int16_t WebRtcOpus_SetComplexity(OpusEncInst* inst, int32_t complexity) {
}
struct WebRtcOpusDecInst {
- int16_t state_48_32_left[8];
- int16_t state_48_32_right[8];
OpusDecoder* decoder_left;
OpusDecoder* decoder_right;
int prev_decoded_samples;
@@ -205,8 +189,6 @@ int WebRtcOpus_DecoderChannels(OpusDecInst* inst) {
int16_t WebRtcOpus_DecoderInitNew(OpusDecInst* inst) {
int error = opus_decoder_ctl(inst->decoder_left, OPUS_RESET_STATE);
if (error == OPUS_OK) {
- memset(inst->state_48_32_left, 0, sizeof(inst->state_48_32_left));
- memset(inst->state_48_32_right, 0, sizeof(inst->state_48_32_right));
return 0;
}
return -1;
@@ -215,7 +197,6 @@ int16_t WebRtcOpus_DecoderInitNew(OpusDecInst* inst) {
int16_t WebRtcOpus_DecoderInit(OpusDecInst* inst) {
int error = opus_decoder_ctl(inst->decoder_left, OPUS_RESET_STATE);
if (error == OPUS_OK) {
- memset(inst->state_48_32_left, 0, sizeof(inst->state_48_32_left));
return 0;
}
return -1;
@@ -224,7 +205,6 @@ int16_t WebRtcOpus_DecoderInit(OpusDecInst* inst) {
int16_t WebRtcOpus_DecoderInitSlave(OpusDecInst* inst) {
int error = opus_decoder_ctl(inst->decoder_right, OPUS_RESET_STATE);
if (error == OPUS_OK) {
- memset(inst->state_48_32_right, 0, sizeof(inst->state_48_32_right));
return 0;
}
return -1;
@@ -267,124 +247,29 @@ static int DecodeFec(OpusDecoder* inst, const int16_t* encoded,
return -1;
}
-/* Resample from 48 to 32 kHz. Length of state is assumed to be
- * kWebRtcOpusStateSize (7).
- */
-static int WebRtcOpus_Resample48to32(const int16_t* samples_in, int length,
- int16_t* state, int16_t* samples_out) {
- int i;
- int blocks;
- int16_t output_samples;
- int32_t buffer32[kWebRtcOpusMaxFrameSizePerChannel + kWebRtcOpusStateSize];
-
- /* Resample from 48 kHz to 32 kHz. */
- for (i = 0; i < kWebRtcOpusStateSize; i++) {
- buffer32[i] = state[i];
- state[i] = samples_in[length - kWebRtcOpusStateSize + i];
- }
- for (i = 0; i < length; i++) {
- buffer32[kWebRtcOpusStateSize + i] = samples_in[i];
- }
- /* Resampling 3 samples to 2. Function divides the input in |blocks| number
- * of 3-sample groups, and output is |blocks| number of 2-sample groups.
- * When this is removed, the compensation in WebRtcOpus_DurationEst should be
- * removed too. */
- blocks = length / 3;
- WebRtcSpl_Resample48khzTo32khz(buffer32, buffer32, blocks);
- output_samples = (int16_t) (blocks * 2);
- WebRtcSpl_VectorBitShiftW32ToW16(samples_out, output_samples, buffer32, 15);
-
- return output_samples;
-}
-
-static int WebRtcOpus_DeInterleaveResample(OpusDecInst* inst, int16_t* input,
- int sample_pairs, int16_t* output) {
- int i;
- int16_t buffer_left[kWebRtcOpusMaxFrameSizePerChannel];
- int16_t buffer_right[kWebRtcOpusMaxFrameSizePerChannel];
- int16_t buffer_out[kWebRtcOpusMaxFrameSizePerChannel32kHz];
- int resampled_samples;
-
- /* De-interleave the signal in left and right channel. */
- for (i = 0; i < sample_pairs; i++) {
- /* Take every second sample, starting at the first sample. */
- buffer_left[i] = input[i * 2];
- buffer_right[i] = input[i * 2 + 1];
- }
-
- /* Resample from 48 kHz to 32 kHz for left channel. */
- resampled_samples = WebRtcOpus_Resample48to32(
- buffer_left, sample_pairs, inst->state_48_32_left, buffer_out);
-
- /* Add samples interleaved to output vector. */
- for (i = 0; i < resampled_samples; i++) {
- output[i * 2] = buffer_out[i];
- }
-
- /* Resample from 48 kHz to 32 kHz for right channel. */
- resampled_samples = WebRtcOpus_Resample48to32(
- buffer_right, sample_pairs, inst->state_48_32_right, buffer_out);
-
- /* Add samples interleaved to output vector. */
- for (i = 0; i < resampled_samples; i++) {
- output[i * 2 + 1] = buffer_out[i];
- }
-
- return resampled_samples;
-}
-
int16_t WebRtcOpus_DecodeNew(OpusDecInst* inst, const uint8_t* encoded,
int16_t encoded_bytes, int16_t* decoded,
int16_t* audio_type) {
- /* |buffer| is big enough for 120 ms (the largest Opus packet size) of stereo
- * audio at 48 kHz. */
- int16_t buffer[kWebRtcOpusMaxFrameSize];
int16_t* coded = (int16_t*)encoded;
int decoded_samples;
- int resampled_samples;
-
- /* If mono case, just do a regular call to the decoder.
- * If stereo, we need to de-interleave the stereo output into blocks with
- * left and right channel. Each block is resampled to 32 kHz, and then
- * interleaved again. */
- /* Decode to a temporary buffer. */
decoded_samples = DecodeNative(inst->decoder_left, coded, encoded_bytes,
kWebRtcOpusMaxFrameSizePerChannel,
- buffer, audio_type);
+ decoded, audio_type);
if (decoded_samples < 0) {
return -1;
}
- if (inst->channels == 2) {
- /* De-interleave and resample. */
- resampled_samples = WebRtcOpus_DeInterleaveResample(inst,
- buffer,
- decoded_samples,
- decoded);
- } else {
- /* Resample from 48 kHz to 32 kHz. Filter state memory for left channel is
- * used for mono signals. */
- resampled_samples = WebRtcOpus_Resample48to32(buffer,
- decoded_samples,
- inst->state_48_32_left,
- decoded);
- }
-
/* Update decoded sample memory, to be used by the PLC in case of losses. */
inst->prev_decoded_samples = decoded_samples;
- return resampled_samples;
+ return decoded_samples;
}
int16_t WebRtcOpus_Decode(OpusDecInst* inst, const int16_t* encoded,
int16_t encoded_bytes, int16_t* decoded,
int16_t* audio_type) {
- /* |buffer16| is big enough for 120 ms (the largestOpus packet size) of
- * stereo audio at 48 kHz. */
- int16_t buffer16[kWebRtcOpusMaxFrameSize];
int decoded_samples;
- int16_t output_samples;
int i;
/* If mono case, just do a regular call to the decoder.
@@ -393,120 +278,82 @@ int16_t WebRtcOpus_Decode(OpusDecInst* inst, const int16_t* encoded,
* This is to make stereo work with the current setup of NetEQ, which
* requires two calls to the decoder to produce stereo. */
- /* Decode to a temporary buffer. */
decoded_samples = DecodeNative(inst->decoder_left, encoded, encoded_bytes,
- kWebRtcOpusMaxFrameSizePerChannel, buffer16,
+ kWebRtcOpusMaxFrameSizePerChannel, decoded,
audio_type);
if (decoded_samples < 0) {
return -1;
}
if (inst->channels == 2) {
/* The parameter |decoded_samples| holds the number of samples pairs, in
- * case of stereo. Number of samples in |buffer16| equals |decoded_samples|
+ * case of stereo. Number of samples in |decoded| equals |decoded_samples|
* times 2. */
for (i = 0; i < decoded_samples; i++) {
/* Take every second sample, starting at the first sample. This gives
* the left channel. */
- buffer16[i] = buffer16[i * 2];
+ decoded[i] = decoded[i * 2];
}
}
- /* Resample from 48 kHz to 32 kHz. */
- output_samples = WebRtcOpus_Resample48to32(buffer16, decoded_samples,
- inst->state_48_32_left, decoded);
-
/* Update decoded sample memory, to be used by the PLC in case of losses. */
inst->prev_decoded_samples = decoded_samples;
- return output_samples;
+ return decoded_samples;
}
int16_t WebRtcOpus_DecodeSlave(OpusDecInst* inst, const int16_t* encoded,
int16_t encoded_bytes, int16_t* decoded,
int16_t* audio_type) {
- /* |buffer16| is big enough for 120 ms (the largestOpus packet size) of
- * stereo audio at 48 kHz. */
- int16_t buffer16[kWebRtcOpusMaxFrameSize];
int decoded_samples;
- int16_t output_samples;
int i;
- /* Decode to a temporary buffer. */
decoded_samples = DecodeNative(inst->decoder_right, encoded, encoded_bytes,
- kWebRtcOpusMaxFrameSizePerChannel, buffer16,
+ kWebRtcOpusMaxFrameSizePerChannel, decoded,
audio_type);
if (decoded_samples < 0) {
return -1;
}
if (inst->channels == 2) {
/* The parameter |decoded_samples| holds the number of samples pairs, in
- * case of stereo. Number of samples in |buffer16| equals |decoded_samples|
+ * case of stereo. Number of samples in |decoded| equals |decoded_samples|
* times 2. */
for (i = 0; i < decoded_samples; i++) {
/* Take every second sample, starting at the second sample. This gives
* the right channel. */
- buffer16[i] = buffer16[i * 2 + 1];
+ decoded[i] = decoded[i * 2 + 1];
}
} else {
/* Decode slave should never be called for mono packets. */
return -1;
}
- /* Resample from 48 kHz to 32 kHz. */
- output_samples = WebRtcOpus_Resample48to32(buffer16, decoded_samples,
- inst->state_48_32_right, decoded);
- return output_samples;
+ return decoded_samples;
}
int16_t WebRtcOpus_DecodePlc(OpusDecInst* inst, int16_t* decoded,
int16_t number_of_lost_frames) {
- int16_t buffer[kWebRtcOpusMaxFrameSize];
int16_t audio_type = 0;
int decoded_samples;
- int resampled_samples;
int plc_samples;
- /* If mono case, just do a regular call to the plc function, before
- * resampling.
- * If stereo, we need to de-interleave the stereo output into blocks with
- * left and right channel. Each block is resampled to 32 kHz, and then
- * interleaved again. */
-
- /* Decode to a temporary buffer. The number of samples we ask for is
- * |number_of_lost_frames| times |prev_decoded_samples_|. Limit the number
- * of samples to maximum |kWebRtcOpusMaxFrameSizePerChannel|. */
+ /* The number of samples we ask for is |number_of_lost_frames| times
+ * |prev_decoded_samples_|. Limit the number of samples to maximum
+ * |kWebRtcOpusMaxFrameSizePerChannel|. */
plc_samples = number_of_lost_frames * inst->prev_decoded_samples;
plc_samples = (plc_samples <= kWebRtcOpusMaxFrameSizePerChannel) ?
plc_samples : kWebRtcOpusMaxFrameSizePerChannel;
decoded_samples = DecodeNative(inst->decoder_left, NULL, 0, plc_samples,
- buffer, &audio_type);
+ decoded, &audio_type);
if (decoded_samples < 0) {
return -1;
}
- if (inst->channels == 2) {
- /* De-interleave and resample. */
- resampled_samples = WebRtcOpus_DeInterleaveResample(inst,
- buffer,
- decoded_samples,
- decoded);
- } else {
- /* Resample from 48 kHz to 32 kHz. Filter state memory for left channel is
- * used for mono signals. */
- resampled_samples = WebRtcOpus_Resample48to32(buffer,
- decoded_samples,
- inst->state_48_32_left,
- decoded);
- }
-
- return resampled_samples;
+ return decoded_samples;
}
int16_t WebRtcOpus_DecodePlcMaster(OpusDecInst* inst, int16_t* decoded,
int16_t number_of_lost_frames) {
- int16_t buffer[kWebRtcOpusMaxFrameSize];
int decoded_samples;
- int resampled_samples;
int16_t audio_type = 0;
int plc_samples;
int i;
@@ -517,42 +364,35 @@ int16_t WebRtcOpus_DecodePlcMaster(OpusDecInst* inst, int16_t* decoded,
* output. This is to make stereo work with the current setup of NetEQ, which
* requires two calls to the decoder to produce stereo. */
- /* Decode to a temporary buffer. The number of samples we ask for is
- * |number_of_lost_frames| times |prev_decoded_samples_|. Limit the number
- * of samples to maximum |kWebRtcOpusMaxFrameSizePerChannel|. */
+ /* The number of samples we ask for is |number_of_lost_frames| times
+ * |prev_decoded_samples_|. Limit the number of samples to maximum
+ * |kWebRtcOpusMaxFrameSizePerChannel|. */
plc_samples = number_of_lost_frames * inst->prev_decoded_samples;
plc_samples = (plc_samples <= kWebRtcOpusMaxFrameSizePerChannel) ?
plc_samples : kWebRtcOpusMaxFrameSizePerChannel;
decoded_samples = DecodeNative(inst->decoder_left, NULL, 0, plc_samples,
- buffer, &audio_type);
+ decoded, &audio_type);
if (decoded_samples < 0) {
return -1;
}
if (inst->channels == 2) {
/* The parameter |decoded_samples| holds the number of sample pairs, in
- * case of stereo. The original number of samples in |buffer| equals
+ * case of stereo. The original number of samples in |decoded| equals
* |decoded_samples| times 2. */
for (i = 0; i < decoded_samples; i++) {
/* Take every second sample, starting at the first sample. This gives
* the left channel. */
- buffer[i] = buffer[i * 2];
+ decoded[i] = decoded[i * 2];
}
}
- /* Resample from 48 kHz to 32 kHz for left channel. */
- resampled_samples = WebRtcOpus_Resample48to32(buffer,
- decoded_samples,
- inst->state_48_32_left,
- decoded);
- return resampled_samples;
+ return decoded_samples;
}
int16_t WebRtcOpus_DecodePlcSlave(OpusDecInst* inst, int16_t* decoded,
int16_t number_of_lost_frames) {
- int16_t buffer[kWebRtcOpusMaxFrameSize];
int decoded_samples;
- int resampled_samples;
int16_t audio_type = 0;
int plc_samples;
int i;
@@ -563,44 +403,35 @@ int16_t WebRtcOpus_DecodePlcSlave(OpusDecInst* inst, int16_t* decoded,
return -1;
}
- /* Decode to a temporary buffer. The number of samples we ask for is
- * |number_of_lost_frames| times |prev_decoded_samples_|. Limit the number
- * of samples to maximum |kWebRtcOpusMaxFrameSizePerChannel|. */
+ /* The number of samples we ask for is |number_of_lost_frames| times
+ * |prev_decoded_samples_|. Limit the number of samples to maximum
+ * |kWebRtcOpusMaxFrameSizePerChannel|. */
plc_samples = number_of_lost_frames * inst->prev_decoded_samples;
plc_samples = (plc_samples <= kWebRtcOpusMaxFrameSizePerChannel)
? plc_samples : kWebRtcOpusMaxFrameSizePerChannel;
decoded_samples = DecodeNative(inst->decoder_right, NULL, 0, plc_samples,
- buffer, &audio_type);
+ decoded, &audio_type);
if (decoded_samples < 0) {
return -1;
}
/* The parameter |decoded_samples| holds the number of sample pairs,
- * The original number of samples in |buffer| equals |decoded_samples|
+ * The original number of samples in |decoded| equals |decoded_samples|
* times 2. */
for (i = 0; i < decoded_samples; i++) {
/* Take every second sample, starting at the second sample. This gives
* the right channel. */
- buffer[i] = buffer[i * 2 + 1];
+ decoded[i] = decoded[i * 2 + 1];
}
- /* Resample from 48 kHz to 32 kHz for left channel. */
- resampled_samples = WebRtcOpus_Resample48to32(buffer,
- decoded_samples,
- inst->state_48_32_right,
- decoded);
- return resampled_samples;
+ return decoded_samples;
}
int16_t WebRtcOpus_DecodeFec(OpusDecInst* inst, const uint8_t* encoded,
int16_t encoded_bytes, int16_t* decoded,
int16_t* audio_type) {
- /* |buffer| is big enough for 120 ms (the largest Opus packet size) of stereo
- * audio at 48 kHz. */
- int16_t buffer[kWebRtcOpusMaxFrameSize];
int16_t* coded = (int16_t*)encoded;
int decoded_samples;
- int resampled_samples;
int fec_samples;
if (WebRtcOpus_PacketHasFec(encoded, encoded_bytes) != 1) {
@@ -609,33 +440,13 @@ int16_t WebRtcOpus_DecodeFec(OpusDecInst* inst, const uint8_t* encoded,
fec_samples = opus_packet_get_samples_per_frame(encoded, 48000);
- /* Decode to a temporary buffer. */
decoded_samples = DecodeFec(inst->decoder_left, coded, encoded_bytes,
- fec_samples, buffer, audio_type);
+ fec_samples, decoded, audio_type);
if (decoded_samples < 0) {
return -1;
}
- /* If mono case, just do a regular call to the decoder.
- * If stereo, we need to de-interleave the stereo output into blocks with
- * left and right channel. Each block is resampled to 32 kHz, and then
- * interleaved again. */
- if (inst->channels == 2) {
- /* De-interleave and resample. */
- resampled_samples = WebRtcOpus_DeInterleaveResample(inst,
- buffer,
- decoded_samples,
- decoded);
- } else {
- /* Resample from 48 kHz to 32 kHz. Filter state memory for left channel is
- * used for mono signals. */
- resampled_samples = WebRtcOpus_Resample48to32(buffer,
- decoded_samples,
- inst->state_48_32_left,
- decoded);
- }
-
- return resampled_samples;
+ return decoded_samples;
}
int WebRtcOpus_DurationEst(OpusDecInst* inst,
@@ -652,10 +463,6 @@ int WebRtcOpus_DurationEst(OpusDecInst* inst,
/* Invalid payload duration. */
return 0;
}
- /* Compensate for the down-sampling from 48 kHz to 32 kHz.
- * This should be removed when the resampling in WebRtcOpus_Decode is
- * removed. */
- samples = samples * 2 / 3;
return samples;
}
@@ -671,10 +478,6 @@ int WebRtcOpus_FecDurationEst(const uint8_t* payload,
/* Invalid payload duration. */
return 0;
}
- /* Compensate for the down-sampling from 48 kHz to 32 kHz.
- * This should be removed when the resampling in WebRtcOpus_Decode is
- * removed. */
- samples = samples * 2 / 3;
return samples;
}
diff --git a/modules/audio_coding/codecs/opus/opus_speed_test.cc b/modules/audio_coding/codecs/opus/opus_speed_test.cc
index 16099c6d..e2439cf5 100644
--- a/modules/audio_coding/codecs/opus/opus_speed_test.cc
+++ b/modules/audio_coding/codecs/opus/opus_speed_test.cc
@@ -18,8 +18,7 @@ using ::testing::ValuesIn;
namespace webrtc {
static const int kOpusBlockDurationMs = 20;
-static const int kOpusInputSamplingKhz = 48;
-static const int kOpustOutputSamplingKhz = 32;
+static const int kOpusSamplingKhz = 48;
class OpusSpeedTest : public AudioCodecSpeedTest {
protected:
@@ -36,8 +35,8 @@ class OpusSpeedTest : public AudioCodecSpeedTest {
OpusSpeedTest::OpusSpeedTest()
: AudioCodecSpeedTest(kOpusBlockDurationMs,
- kOpusInputSamplingKhz,
- kOpustOutputSamplingKhz),
+ kOpusSamplingKhz,
+ kOpusSamplingKhz),
opus_encoder_(NULL),
opus_decoder_(NULL) {
}
diff --git a/modules/audio_coding/codecs/opus/opus_unittest.cc b/modules/audio_coding/codecs/opus/opus_unittest.cc
index ed876cd1..2ec77a53 100644
--- a/modules/audio_coding/codecs/opus/opus_unittest.cc
+++ b/modules/audio_coding/codecs/opus/opus_unittest.cc
@@ -19,9 +19,13 @@ struct WebRtcOpusDecInst;
namespace webrtc {
// Number of samples in a 60 ms stereo frame, sampled at 48 kHz.
-const int kOpusNumberOfSamples = 480 * 6 * 2;
+const int kOpusMaxFrameSamples = 48 * 60 * 2;
// Maximum number of bytes in output bitstream.
const size_t kMaxBytes = 1000;
+// Number of samples-per-channel in a 20 ms frame, sampled at 48 kHz.
+const int kOpus20msFrameSamples = 48 * 20;
+// Number of samples-per-channel in a 10 ms frame, sampled at 48 kHz.
+const int kOpus10msFrameSamples = 48 * 10;
class OpusTest : public ::testing::Test {
protected:
@@ -35,8 +39,8 @@ class OpusTest : public ::testing::Test {
WebRtcOpusDecInst* opus_stereo_decoder_;
WebRtcOpusDecInst* opus_stereo_decoder_new_;
- int16_t speech_data_[kOpusNumberOfSamples];
- int16_t output_data_[kOpusNumberOfSamples];
+ int16_t speech_data_[kOpusMaxFrameSamples];
+ int16_t output_data_[kOpusMaxFrameSamples];
uint8_t bitstream_[kMaxBytes];
};
@@ -50,17 +54,14 @@ OpusTest::OpusTest()
}
void OpusTest::SetUp() {
- // Read some samples from a speech file, to be used in the encode test.
- // In this test we do not care that the sampling frequency of the file is
- // really 32000 Hz. We pretend that it is 48000 Hz.
FILE* input_file;
const std::string file_name =
- webrtc::test::ResourcePath("audio_coding/testfile32kHz", "pcm");
+ webrtc::test::ResourcePath("audio_coding/speech_mono_32_48kHz", "pcm");
input_file = fopen(file_name.c_str(), "rb");
ASSERT_TRUE(input_file != NULL);
- ASSERT_EQ(kOpusNumberOfSamples,
+ ASSERT_EQ(kOpusMaxFrameSamples,
static_cast<int32_t>(fread(speech_data_, sizeof(int16_t),
- kOpusNumberOfSamples, input_file)));
+ kOpusMaxFrameSamples, input_file)));
fclose(input_file);
input_file = NULL;
}
@@ -114,21 +115,24 @@ TEST_F(OpusTest, OpusEncodeDecodeMono) {
// Encode & decode.
int16_t encoded_bytes;
int16_t audio_type;
- int16_t output_data_decode_new[kOpusNumberOfSamples];
- int16_t output_data_decode[kOpusNumberOfSamples];
+ int16_t output_data_decode_new[kOpusMaxFrameSamples];
+ int16_t output_data_decode[kOpusMaxFrameSamples];
int16_t* coded = reinterpret_cast<int16_t*>(bitstream_);
- encoded_bytes = WebRtcOpus_Encode(opus_mono_encoder_, speech_data_, 960,
- kMaxBytes, bitstream_);
- EXPECT_EQ(640, WebRtcOpus_DecodeNew(opus_mono_decoder_new_, bitstream_,
- encoded_bytes, output_data_decode_new,
- &audio_type));
- EXPECT_EQ(640, WebRtcOpus_Decode(opus_mono_decoder_, coded,
- encoded_bytes, output_data_decode,
- &audio_type));
+ encoded_bytes = WebRtcOpus_Encode(opus_mono_encoder_, speech_data_,
+ kOpus20msFrameSamples, kMaxBytes,
+ bitstream_);
+ EXPECT_EQ(kOpus20msFrameSamples,
+ WebRtcOpus_DecodeNew(opus_mono_decoder_new_, bitstream_,
+ encoded_bytes, output_data_decode_new,
+ &audio_type));
+ EXPECT_EQ(kOpus20msFrameSamples,
+ WebRtcOpus_Decode(opus_mono_decoder_, coded,
+ encoded_bytes, output_data_decode,
+ &audio_type));
// Data in |output_data_decode_new| should be the same as in
// |output_data_decode|.
- for (int i = 0; i < 640; i++) {
+ for (int i = 0; i < kOpus20msFrameSamples; i++) {
EXPECT_EQ(output_data_decode_new[i], output_data_decode[i]);
}
@@ -154,26 +158,30 @@ TEST_F(OpusTest, OpusEncodeDecodeStereo) {
// Encode & decode.
int16_t encoded_bytes;
int16_t audio_type;
- int16_t output_data_decode_new[kOpusNumberOfSamples];
- int16_t output_data_decode[kOpusNumberOfSamples];
- int16_t output_data_decode_slave[kOpusNumberOfSamples];
+ int16_t output_data_decode_new[kOpusMaxFrameSamples];
+ int16_t output_data_decode[kOpusMaxFrameSamples];
+ int16_t output_data_decode_slave[kOpusMaxFrameSamples];
int16_t* coded = reinterpret_cast<int16_t*>(bitstream_);
- encoded_bytes = WebRtcOpus_Encode(opus_stereo_encoder_, speech_data_, 960,
- kMaxBytes, bitstream_);
- EXPECT_EQ(640, WebRtcOpus_DecodeNew(opus_stereo_decoder_new_, bitstream_,
- encoded_bytes, output_data_decode_new,
- &audio_type));
- EXPECT_EQ(640, WebRtcOpus_Decode(opus_stereo_decoder_, coded,
- encoded_bytes, output_data_decode,
+ encoded_bytes = WebRtcOpus_Encode(opus_stereo_encoder_, speech_data_,
+ kOpus20msFrameSamples, kMaxBytes,
+ bitstream_);
+ EXPECT_EQ(kOpus20msFrameSamples,
+ WebRtcOpus_DecodeNew(opus_stereo_decoder_new_, bitstream_,
+ encoded_bytes, output_data_decode_new,
+ &audio_type));
+ EXPECT_EQ(kOpus20msFrameSamples,
+ WebRtcOpus_Decode(opus_stereo_decoder_, coded,
+ encoded_bytes, output_data_decode,
+ &audio_type));
+ EXPECT_EQ(kOpus20msFrameSamples,
+ WebRtcOpus_DecodeSlave(opus_stereo_decoder_, coded,
+ encoded_bytes, output_data_decode_slave,
&audio_type));
- EXPECT_EQ(640, WebRtcOpus_DecodeSlave(opus_stereo_decoder_, coded,
- encoded_bytes, output_data_decode_slave,
- &audio_type));
// Data in |output_data_decode_new| should be the same as in
// |output_data_decode| and |output_data_decode_slave| interleaved to a
// stereo signal.
- for (int i = 0; i < 640; i++) {
+ for (int i = 0; i < kOpus20msFrameSamples; i++) {
EXPECT_EQ(output_data_decode_new[i * 2], output_data_decode[i]);
EXPECT_EQ(output_data_decode_new[i * 2 + 1], output_data_decode_slave[i]);
}
@@ -234,26 +242,30 @@ TEST_F(OpusTest, OpusDecodeInit) {
// Encode & decode.
int16_t encoded_bytes;
int16_t audio_type;
- int16_t output_data_decode_new[kOpusNumberOfSamples];
- int16_t output_data_decode[kOpusNumberOfSamples];
- int16_t output_data_decode_slave[kOpusNumberOfSamples];
+ int16_t output_data_decode_new[kOpusMaxFrameSamples];
+ int16_t output_data_decode[kOpusMaxFrameSamples];
+ int16_t output_data_decode_slave[kOpusMaxFrameSamples];
int16_t* coded = reinterpret_cast<int16_t*>(bitstream_);
- encoded_bytes = WebRtcOpus_Encode(opus_stereo_encoder_, speech_data_, 960,
- kMaxBytes, bitstream_);
- EXPECT_EQ(640, WebRtcOpus_DecodeNew(opus_stereo_decoder_new_, bitstream_,
- encoded_bytes, output_data_decode_new,
- &audio_type));
- EXPECT_EQ(640, WebRtcOpus_Decode(opus_stereo_decoder_, coded,
- encoded_bytes, output_data_decode,
+ encoded_bytes = WebRtcOpus_Encode(opus_stereo_encoder_, speech_data_,
+ kOpus20msFrameSamples, kMaxBytes,
+ bitstream_);
+ EXPECT_EQ(kOpus20msFrameSamples,
+ WebRtcOpus_DecodeNew(opus_stereo_decoder_new_, bitstream_,
+ encoded_bytes, output_data_decode_new,
+ &audio_type));
+ EXPECT_EQ(kOpus20msFrameSamples,
+ WebRtcOpus_Decode(opus_stereo_decoder_, coded,
+ encoded_bytes, output_data_decode,
+ &audio_type));
+ EXPECT_EQ(kOpus20msFrameSamples,
+ WebRtcOpus_DecodeSlave(opus_stereo_decoder_, coded,
+ encoded_bytes, output_data_decode_slave,
&audio_type));
- EXPECT_EQ(640, WebRtcOpus_DecodeSlave(opus_stereo_decoder_, coded,
- encoded_bytes, output_data_decode_slave,
- &audio_type));
// Data in |output_data_decode_new| should be the same as in
// |output_data_decode| and |output_data_decode_slave| interleaved to a
// stereo signal.
- for (int i = 0; i < 640; i++) {
+ for (int i = 0; i < kOpus20msFrameSamples; i++) {
EXPECT_EQ(output_data_decode_new[i * 2], output_data_decode[i]);
EXPECT_EQ(output_data_decode_new[i * 2 + 1], output_data_decode_slave[i]);
}
@@ -262,20 +274,23 @@ TEST_F(OpusTest, OpusDecodeInit) {
EXPECT_EQ(0, WebRtcOpus_DecoderInit(opus_stereo_decoder_));
EXPECT_EQ(0, WebRtcOpus_DecoderInitSlave(opus_stereo_decoder_));
- EXPECT_EQ(640, WebRtcOpus_DecodeNew(opus_stereo_decoder_new_, bitstream_,
- encoded_bytes, output_data_decode_new,
- &audio_type));
- EXPECT_EQ(640, WebRtcOpus_Decode(opus_stereo_decoder_, coded,
- encoded_bytes, output_data_decode,
+ EXPECT_EQ(kOpus20msFrameSamples,
+ WebRtcOpus_DecodeNew(opus_stereo_decoder_new_, bitstream_,
+ encoded_bytes, output_data_decode_new,
+ &audio_type));
+ EXPECT_EQ(kOpus20msFrameSamples,
+ WebRtcOpus_Decode(opus_stereo_decoder_, coded,
+ encoded_bytes, output_data_decode,
+ &audio_type));
+ EXPECT_EQ(kOpus20msFrameSamples,
+ WebRtcOpus_DecodeSlave(opus_stereo_decoder_, coded,
+ encoded_bytes, output_data_decode_slave,
&audio_type));
- EXPECT_EQ(640, WebRtcOpus_DecodeSlave(opus_stereo_decoder_, coded,
- encoded_bytes, output_data_decode_slave,
- &audio_type));
// Data in |output_data_decode_new| should be the same as in
// |output_data_decode| and |output_data_decode_slave| interleaved to a
// stereo signal.
- for (int i = 0; i < 640; i++) {
+ for (int i = 0; i < kOpus20msFrameSamples; i++) {
EXPECT_EQ(output_data_decode_new[i * 2], output_data_decode[i]);
EXPECT_EQ(output_data_decode_new[i * 2 + 1], output_data_decode_slave[i]);
}
@@ -344,27 +359,31 @@ TEST_F(OpusTest, OpusDecodePlcMono) {
// Encode & decode.
int16_t encoded_bytes;
int16_t audio_type;
- int16_t output_data_decode_new[kOpusNumberOfSamples];
- int16_t output_data_decode[kOpusNumberOfSamples];
+ int16_t output_data_decode_new[kOpusMaxFrameSamples];
+ int16_t output_data_decode[kOpusMaxFrameSamples];
int16_t* coded = reinterpret_cast<int16_t*>(bitstream_);
- encoded_bytes = WebRtcOpus_Encode(opus_mono_encoder_, speech_data_, 960,
- kMaxBytes, bitstream_);
- EXPECT_EQ(640, WebRtcOpus_DecodeNew(opus_mono_decoder_new_, bitstream_,
- encoded_bytes, output_data_decode_new,
- &audio_type));
- EXPECT_EQ(640, WebRtcOpus_Decode(opus_mono_decoder_, coded,
- encoded_bytes, output_data_decode,
- &audio_type));
+ encoded_bytes = WebRtcOpus_Encode(opus_mono_encoder_, speech_data_,
+ kOpus20msFrameSamples, kMaxBytes,
+ bitstream_);
+ EXPECT_EQ(kOpus20msFrameSamples,
+ WebRtcOpus_DecodeNew(opus_mono_decoder_new_, bitstream_,
+ encoded_bytes, output_data_decode_new,
+ &audio_type));
+ EXPECT_EQ(kOpus20msFrameSamples,
+ WebRtcOpus_Decode(opus_mono_decoder_, coded,
+ encoded_bytes, output_data_decode,
+ &audio_type));
// Call decoder PLC for both versions of the decoder.
- int16_t plc_buffer[kOpusNumberOfSamples];
- int16_t plc_buffer_new[kOpusNumberOfSamples];
- EXPECT_EQ(640, WebRtcOpus_DecodePlcMaster(opus_mono_decoder_, plc_buffer, 1));
- EXPECT_EQ(640, WebRtcOpus_DecodePlc(opus_mono_decoder_new_,
- plc_buffer_new, 1));
+ int16_t plc_buffer[kOpusMaxFrameSamples];
+ int16_t plc_buffer_new[kOpusMaxFrameSamples];
+ EXPECT_EQ(kOpus20msFrameSamples,
+ WebRtcOpus_DecodePlcMaster(opus_mono_decoder_, plc_buffer, 1));
+ EXPECT_EQ(kOpus20msFrameSamples,
+ WebRtcOpus_DecodePlc(opus_mono_decoder_new_, plc_buffer_new, 1));
// Data in |plc_buffer| should be the same as in |plc_buffer_new|.
- for (int i = 0; i < 640; i++) {
+ for (int i = 0; i < kOpus20msFrameSamples; i++) {
EXPECT_EQ(plc_buffer[i], plc_buffer_new[i]);
}
@@ -391,36 +410,42 @@ TEST_F(OpusTest, OpusDecodePlcStereo) {
// Encode & decode.
int16_t encoded_bytes;
int16_t audio_type;
- int16_t output_data_decode_new[kOpusNumberOfSamples];
- int16_t output_data_decode[kOpusNumberOfSamples];
- int16_t output_data_decode_slave[kOpusNumberOfSamples];
+ int16_t output_data_decode_new[kOpusMaxFrameSamples];
+ int16_t output_data_decode[kOpusMaxFrameSamples];
+ int16_t output_data_decode_slave[kOpusMaxFrameSamples];
int16_t* coded = reinterpret_cast<int16_t*>(bitstream_);
- encoded_bytes = WebRtcOpus_Encode(opus_stereo_encoder_, speech_data_, 960,
- kMaxBytes, bitstream_);
- EXPECT_EQ(640, WebRtcOpus_DecodeNew(opus_stereo_decoder_new_, bitstream_,
- encoded_bytes, output_data_decode_new,
- &audio_type));
- EXPECT_EQ(640, WebRtcOpus_Decode(opus_stereo_decoder_, coded,
- encoded_bytes, output_data_decode,
+ encoded_bytes = WebRtcOpus_Encode(opus_stereo_encoder_, speech_data_,
+ kOpus20msFrameSamples, kMaxBytes,
+ bitstream_);
+ EXPECT_EQ(kOpus20msFrameSamples,
+ WebRtcOpus_DecodeNew(opus_stereo_decoder_new_, bitstream_,
+ encoded_bytes, output_data_decode_new,
+ &audio_type));
+ EXPECT_EQ(kOpus20msFrameSamples,
+ WebRtcOpus_Decode(opus_stereo_decoder_, coded,
+ encoded_bytes, output_data_decode,
+ &audio_type));
+ EXPECT_EQ(kOpus20msFrameSamples,
+ WebRtcOpus_DecodeSlave(opus_stereo_decoder_, coded,
+ encoded_bytes,
+ output_data_decode_slave,
&audio_type));
- EXPECT_EQ(640, WebRtcOpus_DecodeSlave(opus_stereo_decoder_, coded,
- encoded_bytes,
- output_data_decode_slave,
- &audio_type));
// Call decoder PLC for both versions of the decoder.
- int16_t plc_buffer_left[kOpusNumberOfSamples];
- int16_t plc_buffer_right[kOpusNumberOfSamples];
- int16_t plc_buffer_new[kOpusNumberOfSamples];
- EXPECT_EQ(640, WebRtcOpus_DecodePlcMaster(opus_stereo_decoder_,
- plc_buffer_left, 1));
- EXPECT_EQ(640, WebRtcOpus_DecodePlcSlave(opus_stereo_decoder_,
- plc_buffer_right, 1));
- EXPECT_EQ(640, WebRtcOpus_DecodePlc(opus_stereo_decoder_new_, plc_buffer_new,
- 1));
+ int16_t plc_buffer_left[kOpusMaxFrameSamples];
+ int16_t plc_buffer_right[kOpusMaxFrameSamples];
+ int16_t plc_buffer_new[kOpusMaxFrameSamples];
+ EXPECT_EQ(kOpus20msFrameSamples,
+ WebRtcOpus_DecodePlcMaster(opus_stereo_decoder_,
+ plc_buffer_left, 1));
+ EXPECT_EQ(kOpus20msFrameSamples,
+ WebRtcOpus_DecodePlcSlave(opus_stereo_decoder_,
+ plc_buffer_right, 1));
+ EXPECT_EQ(kOpus20msFrameSamples,
+ WebRtcOpus_DecodePlc(opus_stereo_decoder_new_, plc_buffer_new, 1));
// Data in |plc_buffer_left| and |plc_buffer_right|should be the same as the
// interleaved samples in |plc_buffer_new|.
- for (int i = 0, j = 0; i < 640; i++) {
+ for (int i = 0, j = 0; i < kOpus20msFrameSamples; i++) {
EXPECT_EQ(plc_buffer_left[i], plc_buffer_new[j++]);
EXPECT_EQ(plc_buffer_right[i], plc_buffer_new[j++]);
}
@@ -437,21 +462,23 @@ TEST_F(OpusTest, OpusDurationEstimation) {
EXPECT_EQ(0, WebRtcOpus_EncoderCreate(&opus_stereo_encoder_, 2));
EXPECT_EQ(0, WebRtcOpus_DecoderCreate(&opus_stereo_decoder_, 2));
- // Encode with different packet sizes (input 48 kHz, output in 32 kHz).
int16_t encoded_bytes;
// 10 ms.
- encoded_bytes = WebRtcOpus_Encode(opus_stereo_encoder_, speech_data_, 480,
- kMaxBytes, bitstream_);
- EXPECT_EQ(320, WebRtcOpus_DurationEst(opus_stereo_decoder_, bitstream_,
- encoded_bytes));
+ encoded_bytes = WebRtcOpus_Encode(opus_stereo_encoder_, speech_data_,
+ kOpus10msFrameSamples, kMaxBytes,
+ bitstream_);
+ EXPECT_EQ(kOpus10msFrameSamples,
+ WebRtcOpus_DurationEst(opus_stereo_decoder_, bitstream_,
+ encoded_bytes));
// 20 ms
- encoded_bytes = WebRtcOpus_Encode(opus_stereo_encoder_, speech_data_, 960,
- kMaxBytes, bitstream_);
- EXPECT_EQ(640, WebRtcOpus_DurationEst(opus_stereo_decoder_, bitstream_,
- encoded_bytes));
-
+ encoded_bytes = WebRtcOpus_Encode(opus_stereo_encoder_, speech_data_,
+ kOpus20msFrameSamples, kMaxBytes,
+ bitstream_);
+ EXPECT_EQ(kOpus20msFrameSamples,
+ WebRtcOpus_DurationEst(opus_stereo_decoder_, bitstream_,
+ encoded_bytes));
// Free memory.
EXPECT_EQ(0, WebRtcOpus_EncoderFree(opus_stereo_encoder_));
diff --git a/modules/audio_coding/main/acm2/audio_coding_module_impl.cc b/modules/audio_coding/main/acm2/audio_coding_module_impl.cc
index f2410b7d..26f5b542 100644
--- a/modules/audio_coding/main/acm2/audio_coding_module_impl.cc
+++ b/modules/audio_coding/main/acm2/audio_coding_module_impl.cc
@@ -1618,14 +1618,8 @@ int AudioCodingModuleImpl::ReceiveFrequency() const {
int codec_id = receiver_.last_audio_codec_id();
- int sample_rate_hz;
- if (codec_id < 0)
- sample_rate_hz = receiver_.current_sample_rate_hz();
- else
- sample_rate_hz = ACMCodecDB::database_[codec_id].plfreq;
-
- // TODO(tlegrand): Remove this option when we have full 48 kHz support.
- return (sample_rate_hz > 32000) ? 32000 : sample_rate_hz;
+ return codec_id < 0 ? receiver_.current_sample_rate_hz() :
+ ACMCodecDB::database_[codec_id].plfreq;
}
// Get current playout frequency.
diff --git a/modules/audio_coding/main/test/opus_test.cc b/modules/audio_coding/main/test/opus_test.cc
index 261eb613..398d59da 100644
--- a/modules/audio_coding/main/test/opus_test.cc
+++ b/modules/audio_coding/main/test/opus_test.cc
@@ -218,6 +218,8 @@ void OpusTest::Run(TestPackStereo* channel, int channels, int bitrate,
int written_samples = 0;
int read_samples = 0;
int decoded_samples = 0;
+ bool first_packet = true;
+ uint32_t start_time_stamp = 0;
channel->reset_payload_size();
counter_ = 0;
@@ -324,6 +326,10 @@ void OpusTest::Run(TestPackStereo* channel, int channels, int bitrate,
// Send data to the channel. "channel" will handle the loss simulation.
channel->SendData(kAudioFrameSpeech, payload_type_, rtp_timestamp_,
bitstream, bitstream_len_byte, NULL);
+ if (first_packet) {
+ first_packet = false;
+ start_time_stamp = rtp_timestamp_;
+ }
rtp_timestamp_ += frame_length;
read_samples += frame_length * channels;
}
@@ -344,9 +350,11 @@ void OpusTest::Run(TestPackStereo* channel, int channels, int bitrate,
// Write stand-alone speech to file.
out_file_standalone_.Write10MsData(out_audio, decoded_samples * channels);
- // Number of channels should be the same for both stand-alone and
- // ACM-decoding.
- EXPECT_EQ(audio_frame.num_channels_, channels);
+ if (audio_frame.timestamp_ > start_time_stamp) {
+ // Number of channels should be the same for both stand-alone and
+ // ACM-decoding.
+ EXPECT_EQ(audio_frame.num_channels_, channels);
+ }
decoded_samples = 0;
}
@@ -367,13 +375,13 @@ void OpusTest::OpenOutFile(int test_number) {
file_stream << webrtc::test::OutputPath() << "opustest_out_"
<< test_number << ".pcm";
file_name = file_stream.str();
- out_file_.Open(file_name, 32000, "wb");
+ out_file_.Open(file_name, 48000, "wb");
file_stream.str("");
file_name = file_stream.str();
file_stream << webrtc::test::OutputPath() << "opusstandalone_out_"
<< test_number << ".pcm";
file_name = file_stream.str();
- out_file_standalone_.Open(file_name, 32000, "wb");
+ out_file_standalone_.Open(file_name, 48000, "wb");
}
} // namespace webrtc
diff --git a/modules/audio_coding/neteq/audio_decoder.cc b/modules/audio_coding/neteq/audio_decoder.cc
index f539bb2e..0fdaa44b 100644
--- a/modules/audio_coding/neteq/audio_decoder.cc
+++ b/modules/audio_coding/neteq/audio_decoder.cc
@@ -162,7 +162,7 @@ int AudioDecoder::CodecSampleRateHz(NetEqDecoder codec_type) {
#ifdef WEBRTC_CODEC_OPUS
case kDecoderOpus:
case kDecoderOpus_2ch: {
- return 32000;
+ return 48000;
}
#endif
case kDecoderCNGswb48kHz: {
diff --git a/modules/audio_coding/neteq/audio_decoder_unittest.cc b/modules/audio_coding/neteq/audio_decoder_unittest.cc
index 05684ac7..687a733c 100644
--- a/modules/audio_coding/neteq/audio_decoder_unittest.cc
+++ b/modules/audio_coding/neteq/audio_decoder_unittest.cc
@@ -602,7 +602,7 @@ class AudioDecoderCeltStereoTest : public AudioDecoderTest {
class AudioDecoderOpusTest : public AudioDecoderTest {
protected:
AudioDecoderOpusTest() : AudioDecoderTest() {
- frame_size_ = 320;
+ frame_size_ = 480;
data_length_ = 10 * frame_size_;
decoder_ = new AudioDecoderOpus(kDecoderOpus);
assert(decoder_);
@@ -613,75 +613,69 @@ class AudioDecoderOpusTest : public AudioDecoderTest {
WebRtcOpus_EncoderFree(encoder_);
}
- virtual void InitEncoder() {}
-
- virtual int EncodeFrame(const int16_t* input, size_t input_len_samples,
- uint8_t* output) {
+ virtual void SetUp() OVERRIDE {
+ AudioDecoderTest::SetUp();
// Upsample from 32 to 48 kHz.
+ // Because Opus is 48 kHz codec but the input file is 32 kHz, so the data
+ // read in |AudioDecoderTest::SetUp| has to be upsampled.
+ // |AudioDecoderTest::SetUp| has read |data_length_| samples, which is more
+ // than necessary after upsampling, so the end of audio that has been read
+ // is unused and the end of the buffer is overwritten by the resampled data.
Resampler rs;
rs.Reset(32000, 48000, kResamplerSynchronous);
- const int max_resamp_len_samples = static_cast<int>(input_len_samples) *
- 3 / 2;
- int16_t* resamp_input = new int16_t[max_resamp_len_samples];
+ const int before_resamp_len_samples = static_cast<int>(data_length_) * 2
+ / 3;
+ int16_t* before_resamp_input = new int16_t[before_resamp_len_samples];
+ memcpy(before_resamp_input, input_,
+ sizeof(int16_t) * before_resamp_len_samples);
int resamp_len_samples;
- EXPECT_EQ(0, rs.Push(input, static_cast<int>(input_len_samples),
- resamp_input, max_resamp_len_samples,
+ EXPECT_EQ(0, rs.Push(before_resamp_input, before_resamp_len_samples,
+ input_, static_cast<int>(data_length_),
resamp_len_samples));
- EXPECT_EQ(max_resamp_len_samples, resamp_len_samples);
- int enc_len_bytes =
- WebRtcOpus_Encode(encoder_, resamp_input, resamp_len_samples,
- static_cast<int>(data_length_), output);
+ EXPECT_EQ(static_cast<int>(data_length_), resamp_len_samples);
+ delete[] before_resamp_input;
+ }
+
+ virtual void InitEncoder() {}
+
+ virtual int EncodeFrame(const int16_t* input, size_t input_len_samples,
+ uint8_t* output) OVERRIDE {
+ int enc_len_bytes = WebRtcOpus_Encode(encoder_, const_cast<int16_t*>(input),
+ static_cast<int16_t>(input_len_samples),
+ static_cast<int16_t>(data_length_), output);
EXPECT_GT(enc_len_bytes, 0);
- delete [] resamp_input;
return enc_len_bytes;
}
OpusEncInst* encoder_;
};
-class AudioDecoderOpusStereoTest : public AudioDecoderTest {
+class AudioDecoderOpusStereoTest : public AudioDecoderOpusTest {
protected:
- AudioDecoderOpusStereoTest() : AudioDecoderTest() {
+ AudioDecoderOpusStereoTest() : AudioDecoderOpusTest() {
channels_ = 2;
- frame_size_ = 320;
- data_length_ = 10 * frame_size_;
+ WebRtcOpus_EncoderFree(encoder_);
+ delete decoder_;
decoder_ = new AudioDecoderOpus(kDecoderOpus_2ch);
assert(decoder_);
WebRtcOpus_EncoderCreate(&encoder_, 2);
}
- ~AudioDecoderOpusStereoTest() {
- WebRtcOpus_EncoderFree(encoder_);
- }
-
- virtual void InitEncoder() {}
-
virtual int EncodeFrame(const int16_t* input, size_t input_len_samples,
- uint8_t* output) {
+ uint8_t* output) OVERRIDE {
// Create stereo by duplicating each sample in |input|.
const int input_stereo_samples = static_cast<int>(input_len_samples) * 2;
int16_t* input_stereo = new int16_t[input_stereo_samples];
for (size_t i = 0; i < input_len_samples; i++)
input_stereo[i * 2] = input_stereo[i * 2 + 1] = input[i];
- // Upsample from 32 to 48 kHz.
- Resampler rs;
- rs.Reset(32000, 48000, kResamplerSynchronousStereo);
- const int max_resamp_len_samples = input_stereo_samples * 3 / 2;
- int16_t* resamp_input = new int16_t[max_resamp_len_samples];
- int resamp_len_samples;
- EXPECT_EQ(0, rs.Push(input_stereo, input_stereo_samples, resamp_input,
- max_resamp_len_samples, resamp_len_samples));
- EXPECT_EQ(max_resamp_len_samples, resamp_len_samples);
- int enc_len_bytes =
- WebRtcOpus_Encode(encoder_, resamp_input, resamp_len_samples / 2,
- static_cast<int16_t>(data_length_), output);
+
+ int enc_len_bytes = WebRtcOpus_Encode(
+ encoder_, input_stereo, static_cast<int16_t>(input_len_samples),
+ static_cast<int16_t>(data_length_), output);
EXPECT_GT(enc_len_bytes, 0);
- delete [] resamp_input;
- delete [] input_stereo;
+ delete[] input_stereo;
return enc_len_bytes;
}
-
- OpusEncInst* encoder_;
};
TEST_F(AudioDecoderPcmUTest, EncodeDecode) {
@@ -871,11 +865,11 @@ TEST(AudioDecoder, CodecSampleRateHz) {
EXPECT_EQ(8000, AudioDecoder::CodecSampleRateHz(kDecoderCNGnb));
EXPECT_EQ(16000, AudioDecoder::CodecSampleRateHz(kDecoderCNGwb));
EXPECT_EQ(32000, AudioDecoder::CodecSampleRateHz(kDecoderCNGswb32kHz));
+ EXPECT_EQ(48000, AudioDecoder::CodecSampleRateHz(kDecoderOpus));
+ EXPECT_EQ(48000, AudioDecoder::CodecSampleRateHz(kDecoderOpus_2ch));
// TODO(tlegrand): Change 32000 to 48000 below once ACM has 48 kHz support.
EXPECT_EQ(32000, AudioDecoder::CodecSampleRateHz(kDecoderCNGswb48kHz));
EXPECT_EQ(-1, AudioDecoder::CodecSampleRateHz(kDecoderArbitrary));
- EXPECT_EQ(32000, AudioDecoder::CodecSampleRateHz(kDecoderOpus));
- EXPECT_EQ(32000, AudioDecoder::CodecSampleRateHz(kDecoderOpus_2ch));
#ifdef WEBRTC_CODEC_CELT
EXPECT_EQ(32000, AudioDecoder::CodecSampleRateHz(kDecoderCELT_32));
EXPECT_EQ(32000, AudioDecoder::CodecSampleRateHz(kDecoderCELT_32_2ch));
diff --git a/modules/audio_coding/neteq/payload_splitter_unittest.cc b/modules/audio_coding/neteq/payload_splitter_unittest.cc
index 5cde1bda..9d0aaa1d 100644
--- a/modules/audio_coding/neteq/payload_splitter_unittest.cc
+++ b/modules/audio_coding/neteq/payload_splitter_unittest.cc
@@ -743,7 +743,7 @@ TEST(FecPayloadSplitter, MixedPayload) {
// Check first packet.
packet = packet_list.front();
EXPECT_EQ(0, packet->header.payloadType);
- EXPECT_EQ(kBaseTimestamp - 20 * 32, packet->header.timestamp);
+ EXPECT_EQ(kBaseTimestamp - 20 * 48, packet->header.timestamp);
EXPECT_EQ(10, packet->payload_length);
EXPECT_FALSE(packet->primary);
delete [] packet->payload;
diff --git a/modules/audio_coding/neteq/test/neteq_opus_fec_quality_test.cc b/modules/audio_coding/neteq/test/neteq_opus_fec_quality_test.cc
index e8fd06a4..dee99b87 100644
--- a/modules/audio_coding/neteq/test/neteq_opus_fec_quality_test.cc
+++ b/modules/audio_coding/neteq/test/neteq_opus_fec_quality_test.cc
@@ -21,8 +21,7 @@ namespace webrtc {
namespace test {
static const int kOpusBlockDurationMs = 20;
-static const int kOpusInputSamplingKhz = 48;
-static const int kOpusOutputSamplingKhz = 32;
+static const int kOpusSamplingKhz = 48;
// Define switch for input file name.
static bool ValidateInFilename(const char* flagname, const string& value) {
@@ -128,8 +127,8 @@ class NetEqOpusFecQualityTest : public NetEqQualityTest {
};
NetEqOpusFecQualityTest::NetEqOpusFecQualityTest()
- : NetEqQualityTest(kOpusBlockDurationMs, kOpusInputSamplingKhz,
- kOpusOutputSamplingKhz,
+ : NetEqQualityTest(kOpusBlockDurationMs, kOpusSamplingKhz,
+ kOpusSamplingKhz,
(FLAGS_channels == 1) ? kDecoderOpus : kDecoderOpus_2ch,
FLAGS_channels,
FLAGS_in_filename,
diff --git a/modules/audio_coding/neteq/timestamp_scaler.cc b/modules/audio_coding/neteq/timestamp_scaler.cc
index 01890136..1809324b 100644
--- a/modules/audio_coding/neteq/timestamp_scaler.cc
+++ b/modules/audio_coding/neteq/timestamp_scaler.cc
@@ -48,8 +48,6 @@ uint32_t TimestampScaler::ToInternal(uint32_t external_timestamp,
denominator_ = 1;
break;
}
- case kDecoderOpus:
- case kDecoderOpus_2ch:
case kDecoderISACfb:
case kDecoderCNGswb48kHz: {
// Use timestamp scaling with factor 2/3 (32 kHz sample rate, but RTP
diff --git a/modules/audio_coding/neteq/timestamp_scaler_unittest.cc b/modules/audio_coding/neteq/timestamp_scaler_unittest.cc
index 8cbbfa39..1cbbf7f3 100644
--- a/modules/audio_coding/neteq/timestamp_scaler_unittest.cc
+++ b/modules/audio_coding/neteq/timestamp_scaler_unittest.cc
@@ -252,10 +252,14 @@ TEST(TimestampScaler, TestG722Reset) {
EXPECT_CALL(db, Die()); // Called when database object is deleted.
}
+// TODO(minyue): This test becomes trivial since Opus does not need a timestamp
+// scaler. Therefore, this test may be removed in future. There is no harm to
+// keep it, since it can be taken as a test case for the situation of a trivial
+// timestamp scaler.
TEST(TimestampScaler, TestOpusLargeStep) {
MockDecoderDatabase db;
DecoderDatabase::DecoderInfo info;
- info.codec_type = kDecoderOpus; // Uses a factor 2/3 scaling.
+ info.codec_type = kDecoderOpus;
static const uint8_t kRtpPayloadType = 17;
EXPECT_CALL(db, GetDecoderInfo(kRtpPayloadType))
.WillRepeatedly(Return(&info));
@@ -273,8 +277,7 @@ TEST(TimestampScaler, TestOpusLargeStep) {
scaler.ToInternal(external_timestamp, kRtpPayloadType));
// Scale back.
EXPECT_EQ(external_timestamp, scaler.ToExternal(internal_timestamp));
- // Internal timestamp should be incremented with twice the step.
- internal_timestamp += 2 * kStep / 3;
+ internal_timestamp += kStep;
}
EXPECT_CALL(db, Die()); // Called when database object is deleted.
@@ -283,7 +286,7 @@ TEST(TimestampScaler, TestOpusLargeStep) {
TEST(TimestampScaler, TestIsacFbLargeStep) {
MockDecoderDatabase db;
DecoderDatabase::DecoderInfo info;
- info.codec_type = kDecoderISACfb; // Uses a factor 2/3 scaling.
+ info.codec_type = kDecoderISACfb;
static const uint8_t kRtpPayloadType = 17;
EXPECT_CALL(db, GetDecoderInfo(kRtpPayloadType))
.WillRepeatedly(Return(&info));
@@ -301,7 +304,7 @@ TEST(TimestampScaler, TestIsacFbLargeStep) {
scaler.ToInternal(external_timestamp, kRtpPayloadType));
// Scale back.
EXPECT_EQ(external_timestamp, scaler.ToExternal(internal_timestamp));
- // Internal timestamp should be incremented with twice the step.
+ // Internal timestamp should be incremented with two-thirds the step.
internal_timestamp += 2 * kStep / 3;
}
diff --git a/modules/audio_coding/neteq/tools/neteq_quality_test.cc b/modules/audio_coding/neteq/tools/neteq_quality_test.cc
index a80b1f88..83288557 100644
--- a/modules/audio_coding/neteq/tools/neteq_quality_test.cc
+++ b/modules/audio_coding/neteq/tools/neteq_quality_test.cc
@@ -158,7 +158,7 @@ bool NoLoss::Lost() {
return false;
}
-UniformLoss::UniformLoss(int loss_rate)
+UniformLoss::UniformLoss(double loss_rate)
: loss_rate_(loss_rate) {
}
@@ -204,8 +204,6 @@ void NetEqQualityTest::SetUp() {
// a full packet duration is drawn with a loss, |unit_loss_rate| fulfills
// (1 - unit_loss_rate) ^ (block_duration_ms_ / kPacketLossTimeUnitMs) ==
// 1 - packet_loss_rate.
- // |unit_loss_rate| is usually small. To increase its resolution, we
- // magnify it by |RAND_MAX|.
double unit_loss_rate = (1.0f - pow(1.0f - 0.01f * packet_loss_rate_,
1.0f / units));
loss_model_.reset(new UniformLoss(unit_loss_rate));
diff --git a/modules/audio_coding/neteq/tools/neteq_quality_test.h b/modules/audio_coding/neteq/tools/neteq_quality_test.h
index 75d19ae6..e0a43b6f 100644
--- a/modules/audio_coding/neteq/tools/neteq_quality_test.h
+++ b/modules/audio_coding/neteq/tools/neteq_quality_test.h
@@ -38,9 +38,10 @@ class NoLoss : public LossModel {
class UniformLoss : public LossModel {
public:
- UniformLoss(int loss_rate);
+ UniformLoss(double loss_rate);
virtual bool Lost() OVERRIDE;
void set_loss_rate(double loss_rate) { loss_rate_ = loss_rate; }
+
private:
double loss_rate_;
};
@@ -49,6 +50,7 @@ class GilbertElliotLoss : public LossModel {
public:
GilbertElliotLoss(double prob_trans_11, double prob_trans_01);
virtual bool Lost() OVERRIDE;
+
private:
// Prob. of losing current packet, when previous packet is lost.
double prob_trans_11_;
diff --git a/modules/audio_conference_mixer/source/audio_conference_mixer_impl.cc b/modules/audio_conference_mixer/source/audio_conference_mixer_impl.cc
index 26ef3e88..6ef6166b 100644
--- a/modules/audio_conference_mixer/source/audio_conference_mixer_impl.cc
+++ b/modules/audio_conference_mixer/source/audio_conference_mixer_impl.cc
@@ -32,10 +32,13 @@ typedef std::list<ParticipantFramePair*> ParticipantFramePairList;
// stereo at most.
//
// TODO(andrew): consider not modifying |frame| here.
-void MixFrames(AudioFrame* mixed_frame, AudioFrame* frame) {
+void MixFrames(AudioFrame* mixed_frame, AudioFrame* frame, bool use_limiter) {
assert(mixed_frame->num_channels_ >= frame->num_channels_);
- // Divide by two to avoid saturation in the mixing.
- *frame >>= 1;
+ if (use_limiter) {
+ // Divide by two to avoid saturation in the mixing.
+ // This is only meaningful if the limiter will be used.
+ *frame >>= 1;
+ }
if (mixed_frame->num_channels_ > frame->num_channels_) {
// We only support mono-to-stereo.
assert(mixed_frame->num_channels_ == 2 &&
@@ -131,6 +134,7 @@ AudioConferenceMixerImpl::AudioConferenceMixerImpl(int id)
_participantList(),
_additionalParticipantList(),
_numMixedParticipants(0),
+ use_limiter_(true),
_timeStamp(0),
_timeScheduler(kProcessPeriodicityInMs),
_mixedAudioLevel(),
@@ -308,6 +312,11 @@ int32_t AudioConferenceMixerImpl::Process() {
_timeStamp += _sampleSize;
+ // We only use the limiter if it supports the output sample rate and
+ // we're actually mixing multiple streams.
+ use_limiter_ = _numMixedParticipants > 1 &&
+ _outputFrequency <= kAudioProcMaxNativeSampleRateHz;
+
MixFromList(*mixedAudio, &mixList);
MixAnonomouslyFromList(*mixedAudio, &additionalFramesList);
MixAnonomouslyFromList(*mixedAudio, &rampOutList);
@@ -946,16 +955,8 @@ int32_t AudioConferenceMixerImpl::MixFromList(
if(audioFrameList->empty()) return 0;
uint32_t position = 0;
- if(_numMixedParticipants == 1) {
- // No mixing required here; skip the saturation protection.
- AudioFrame* audioFrame = audioFrameList->front();
- mixedAudio.CopyFrom(*audioFrame);
- SetParticipantStatistics(&_scratchMixedParticipants[position],
- *audioFrame);
- return 0;
- }
- if (audioFrameList->size() == 1) {
+ if (_numMixedParticipants == 1) {
mixedAudio.timestamp_ = audioFrameList->front()->timestamp_;
mixedAudio.elapsed_time_ms_ = audioFrameList->front()->elapsed_time_ms_;
} else {
@@ -979,7 +980,7 @@ int32_t AudioConferenceMixerImpl::MixFromList(
assert(false);
position = 0;
}
- MixFrames(&mixedAudio, (*iter));
+ MixFrames(&mixedAudio, (*iter), use_limiter_);
SetParticipantStatistics(&_scratchMixedParticipants[position],
**iter);
@@ -999,24 +1000,17 @@ int32_t AudioConferenceMixerImpl::MixAnonomouslyFromList(
if(audioFrameList->empty()) return 0;
- if(_numMixedParticipants == 1) {
- // No mixing required here; skip the saturation protection.
- AudioFrame* audioFrame = audioFrameList->front();
- mixedAudio.CopyFrom(*audioFrame);
- return 0;
- }
-
for (AudioFrameList::const_iterator iter = audioFrameList->begin();
iter != audioFrameList->end();
++iter) {
- MixFrames(&mixedAudio, *iter);
+ MixFrames(&mixedAudio, *iter, use_limiter_);
}
return 0;
}
bool AudioConferenceMixerImpl::LimitMixedAudio(AudioFrame& mixedAudio) {
- if(_numMixedParticipants == 1) {
- return true;
+ if (!use_limiter_) {
+ return true;
}
// Smoothly limit the mixed frame.
diff --git a/modules/audio_conference_mixer/source/audio_conference_mixer_impl.h b/modules/audio_conference_mixer/source/audio_conference_mixer_impl.h
index 31dc71e5..44f4ff04 100644
--- a/modules/audio_conference_mixer/source/audio_conference_mixer_impl.h
+++ b/modules/audio_conference_mixer/source/audio_conference_mixer_impl.h
@@ -192,6 +192,9 @@ private:
MixerParticipantList _additionalParticipantList;
size_t _numMixedParticipants;
+ // Determines if we will use a limiter for clipping protection during
+ // mixing.
+ bool use_limiter_;
uint32_t _timeStamp;
diff --git a/modules/audio_device/audio_device.gypi b/modules/audio_device/audio_device.gypi
index a64856b5..23f417f9 100644
--- a/modules/audio_device/audio_device.gypi
+++ b/modules/audio_device/audio_device.gypi
@@ -98,7 +98,7 @@
'linux/audio_mixer_manager_alsa_linux.h',
'linux/latebindingsymboltable_linux.cc',
'linux/latebindingsymboltable_linux.h',
- 'ios/audio_device_ios.cc',
+ 'ios/audio_device_ios.mm',
'ios/audio_device_ios.h',
'ios/audio_device_utility_ios.cc',
'ios/audio_device_utility_ios.h',
@@ -175,7 +175,7 @@
}],
],
}],
- ['OS=="mac" or OS=="ios"', {
+ ['OS=="mac"', {
'link_settings': {
'libraries': [
'$(SDKROOT)/System/Library/Frameworks/AudioToolbox.framework',
@@ -183,6 +183,19 @@
],
},
}],
+ ['OS=="ios"', {
+ 'xcode_settings': {
+ 'CLANG_ENABLE_OBJC_ARC': 'YES',
+ },
+ 'link_settings': {
+ 'xcode_settings': {
+ 'OTHER_LDFLAGS': [
+ '-framework AudioToolbox',
+ '-framework AVFoundation',
+ ],
+ },
+ },
+ }],
['OS=="win"', {
'link_settings': {
'libraries': [
diff --git a/modules/audio_device/audio_device_impl.cc b/modules/audio_device/audio_device_impl.cc
index 58411e3b..a8145603 100644
--- a/modules/audio_device/audio_device_impl.cc
+++ b/modules/audio_device/audio_device_impl.cc
@@ -349,15 +349,15 @@ int32_t AudioDeviceModuleImpl::CreatePlatformSpecificObjects()
#if defined(WEBRTC_IOS)
if (audioLayer == kPlatformDefaultAudio)
{
- // Create *iPhone Audio* implementation
- ptrAudioDevice = new AudioDeviceIPhone(Id());
+ // Create iOS Audio Device implementation.
+ ptrAudioDevice = new AudioDeviceIOS(Id());
WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "iPhone Audio APIs will be utilized");
}
if (ptrAudioDevice != NULL)
{
- // Create the Mac implementation of the Device Utility.
- ptrAudioDeviceUtility = new AudioDeviceUtilityIPhone(Id());
+ // Create iOS Device Utility implementation.
+ ptrAudioDeviceUtility = new AudioDeviceUtilityIOS(Id());
}
// END #if defined(WEBRTC_IOS)
diff --git a/modules/audio_device/audio_device_utility.h b/modules/audio_device/audio_device_utility.h
index eb3e623e..ebe06d1f 100644
--- a/modules/audio_device/audio_device_utility.h
+++ b/modules/audio_device/audio_device_utility.h
@@ -18,15 +18,15 @@ namespace webrtc
class AudioDeviceUtility
{
-public:
- static uint32_t GetTimeInMS();
- static void WaitForKey();
- static bool StringCompare(const char* str1,
- const char* str2,
- const uint32_t length);
- virtual int32_t Init() = 0;
+ public:
+ static uint32_t GetTimeInMS();
+ static void WaitForKey();
+ static bool StringCompare(const char* str1,
+ const char* str2,
+ const uint32_t length);
+ virtual int32_t Init() = 0;
- virtual ~AudioDeviceUtility() {}
+ virtual ~AudioDeviceUtility() {}
};
} // namespace webrtc
diff --git a/modules/audio_device/ios/audio_device_ios.h b/modules/audio_device/ios/audio_device_ios.h
index 011b6acf..2a48845e 100644
--- a/modules/audio_device/ios/audio_device_ios.h
+++ b/modules/audio_device/ios/audio_device_ios.h
@@ -8,8 +8,8 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#ifndef WEBRTC_AUDIO_DEVICE_AUDIO_DEVICE_IPHONE_H
-#define WEBRTC_AUDIO_DEVICE_AUDIO_DEVICE_IPHONE_H
+#ifndef WEBRTC_AUDIO_DEVICE_AUDIO_DEVICE_IOS_H
+#define WEBRTC_AUDIO_DEVICE_AUDIO_DEVICE_IOS_H
#include <AudioUnit/AudioUnit.h>
@@ -32,244 +32,241 @@ const uint32_t ENGINE_PLAY_BUF_SIZE_IN_SAMPLES = (N_PLAY_SAMPLES_PER_SEC / 100);
// Number of 10 ms recording blocks in recording buffer
const uint16_t N_REC_BUFFERS = 20;
-class AudioDeviceIPhone : public AudioDeviceGeneric {
-public:
- AudioDeviceIPhone(const int32_t id);
- ~AudioDeviceIPhone();
-
- // Retrieve the currently utilized audio layer
- virtual int32_t
- ActiveAudioLayer(AudioDeviceModule::AudioLayer& audioLayer) const;
-
- // Main initializaton and termination
- virtual int32_t Init();
- virtual int32_t Terminate();
- virtual bool Initialized() const;
-
- // Device enumeration
- virtual int16_t PlayoutDevices();
- virtual int16_t RecordingDevices();
- virtual int32_t PlayoutDeviceName(uint16_t index,
+class AudioDeviceIOS : public AudioDeviceGeneric {
+ public:
+ AudioDeviceIOS(const int32_t id);
+ ~AudioDeviceIOS();
+
+ // Retrieve the currently utilized audio layer
+ virtual int32_t ActiveAudioLayer(
+ AudioDeviceModule::AudioLayer& audioLayer) const;
+
+ // Main initializaton and termination
+ virtual int32_t Init();
+ virtual int32_t Terminate();
+ virtual bool Initialized() const;
+
+ // Device enumeration
+ virtual int16_t PlayoutDevices();
+ virtual int16_t RecordingDevices();
+ virtual int32_t PlayoutDeviceName(uint16_t index,
+ char name[kAdmMaxDeviceNameSize],
+ char guid[kAdmMaxGuidSize]);
+ virtual int32_t RecordingDeviceName(uint16_t index,
char name[kAdmMaxDeviceNameSize],
char guid[kAdmMaxGuidSize]);
- virtual int32_t RecordingDeviceName(uint16_t index,
- char name[kAdmMaxDeviceNameSize],
- char guid[kAdmMaxGuidSize]);
-
- // Device selection
- virtual int32_t SetPlayoutDevice(uint16_t index);
- virtual int32_t
- SetPlayoutDevice(AudioDeviceModule::WindowsDeviceType device);
- virtual int32_t SetRecordingDevice(uint16_t index);
- virtual int32_t SetRecordingDevice(
- AudioDeviceModule::WindowsDeviceType device);
-
- // Audio transport initialization
- virtual int32_t PlayoutIsAvailable(bool& available);
- virtual int32_t InitPlayout();
- virtual bool PlayoutIsInitialized() const;
- virtual int32_t RecordingIsAvailable(bool& available);
- virtual int32_t InitRecording();
- virtual bool RecordingIsInitialized() const;
-
- // Audio transport control
- virtual int32_t StartPlayout();
- virtual int32_t StopPlayout();
- virtual bool Playing() const;
- virtual int32_t StartRecording();
- virtual int32_t StopRecording();
- virtual bool Recording() const;
-
- // Microphone Automatic Gain Control (AGC)
- virtual int32_t SetAGC(bool enable);
- virtual bool AGC() const;
-
- // Volume control based on the Windows Wave API (Windows only)
- virtual int32_t SetWaveOutVolume(uint16_t volumeLeft, uint16_t volumeRight);
- virtual int32_t WaveOutVolume(uint16_t& volumeLeft,
- uint16_t& volumeRight) const;
-
- // Audio mixer initialization
- virtual int32_t InitSpeaker();
- virtual bool SpeakerIsInitialized() const;
- virtual int32_t InitMicrophone();
- virtual bool MicrophoneIsInitialized() const;
-
- // Speaker volume controls
- virtual int32_t SpeakerVolumeIsAvailable(bool& available);
- virtual int32_t SetSpeakerVolume(uint32_t volume);
- virtual int32_t SpeakerVolume(uint32_t& volume) const;
- virtual int32_t MaxSpeakerVolume(uint32_t& maxVolume) const;
- virtual int32_t MinSpeakerVolume(uint32_t& minVolume) const;
- virtual int32_t SpeakerVolumeStepSize(uint16_t& stepSize) const;
-
- // Microphone volume controls
- virtual int32_t MicrophoneVolumeIsAvailable(bool& available);
- virtual int32_t SetMicrophoneVolume(uint32_t volume);
- virtual int32_t MicrophoneVolume(uint32_t& volume) const;
- virtual int32_t MaxMicrophoneVolume(uint32_t& maxVolume) const;
- virtual int32_t MinMicrophoneVolume(uint32_t& minVolume) const;
- virtual int32_t
- MicrophoneVolumeStepSize(uint16_t& stepSize) const;
-
- // Microphone mute control
- virtual int32_t MicrophoneMuteIsAvailable(bool& available);
- virtual int32_t SetMicrophoneMute(bool enable);
- virtual int32_t MicrophoneMute(bool& enabled) const;
-
- // Speaker mute control
- virtual int32_t SpeakerMuteIsAvailable(bool& available);
- virtual int32_t SetSpeakerMute(bool enable);
- virtual int32_t SpeakerMute(bool& enabled) const;
-
- // Microphone boost control
- virtual int32_t MicrophoneBoostIsAvailable(bool& available);
- virtual int32_t SetMicrophoneBoost(bool enable);
- virtual int32_t MicrophoneBoost(bool& enabled) const;
-
- // Stereo support
- virtual int32_t StereoPlayoutIsAvailable(bool& available);
- virtual int32_t SetStereoPlayout(bool enable);
- virtual int32_t StereoPlayout(bool& enabled) const;
- virtual int32_t StereoRecordingIsAvailable(bool& available);
- virtual int32_t SetStereoRecording(bool enable);
- virtual int32_t StereoRecording(bool& enabled) const;
-
- // Delay information and control
- virtual int32_t
- SetPlayoutBuffer(const AudioDeviceModule::BufferType type,
- uint16_t sizeMS);
- virtual int32_t PlayoutBuffer(AudioDeviceModule::BufferType& type,
- uint16_t& sizeMS) const;
- virtual int32_t PlayoutDelay(uint16_t& delayMS) const;
- virtual int32_t RecordingDelay(uint16_t& delayMS) const;
-
- // CPU load
- virtual int32_t CPULoad(uint16_t& load) const;
-
-public:
- virtual bool PlayoutWarning() const;
- virtual bool PlayoutError() const;
- virtual bool RecordingWarning() const;
- virtual bool RecordingError() const;
- virtual void ClearPlayoutWarning();
- virtual void ClearPlayoutError();
- virtual void ClearRecordingWarning();
- virtual void ClearRecordingError();
-
-public:
- virtual void AttachAudioBuffer(AudioDeviceBuffer* audioBuffer);
-
- // Reset Audio Deivce (for mobile devices only)
- virtual int32_t ResetAudioDevice();
-
- // enable or disable loud speaker (for iphone only)
- virtual int32_t SetLoudspeakerStatus(bool enable);
- virtual int32_t GetLoudspeakerStatus(bool& enabled) const;
-
-private:
- void Lock() {
- _critSect.Enter();
- }
-
- void UnLock() {
- _critSect.Leave();
- }
-
- int32_t Id() {
- return _id;
- }
-
- // Init and shutdown
- int32_t InitPlayOrRecord();
- int32_t ShutdownPlayOrRecord();
-
- void UpdateRecordingDelay();
- void UpdatePlayoutDelay();
-
- static OSStatus RecordProcess(void *inRefCon,
- AudioUnitRenderActionFlags *ioActionFlags,
- const AudioTimeStamp *timeStamp,
- UInt32 inBusNumber,
- UInt32 inNumberFrames,
- AudioBufferList *ioData);
-
- static OSStatus PlayoutProcess(void *inRefCon,
- AudioUnitRenderActionFlags *ioActionFlags,
- const AudioTimeStamp *timeStamp,
- UInt32 inBusNumber,
- UInt32 inNumberFrames,
- AudioBufferList *ioData);
-
- OSStatus RecordProcessImpl(AudioUnitRenderActionFlags *ioActionFlags,
- const AudioTimeStamp *timeStamp,
- uint32_t inBusNumber,
- uint32_t inNumberFrames);
-
- OSStatus PlayoutProcessImpl(uint32_t inNumberFrames,
+
+ // Device selection
+ virtual int32_t SetPlayoutDevice(uint16_t index);
+ virtual int32_t SetPlayoutDevice(AudioDeviceModule::WindowsDeviceType device);
+ virtual int32_t SetRecordingDevice(uint16_t index);
+ virtual int32_t SetRecordingDevice(
+ AudioDeviceModule::WindowsDeviceType device);
+
+ // Audio transport initialization
+ virtual int32_t PlayoutIsAvailable(bool& available);
+ virtual int32_t InitPlayout();
+ virtual bool PlayoutIsInitialized() const;
+ virtual int32_t RecordingIsAvailable(bool& available);
+ virtual int32_t InitRecording();
+ virtual bool RecordingIsInitialized() const;
+
+ // Audio transport control
+ virtual int32_t StartPlayout();
+ virtual int32_t StopPlayout();
+ virtual bool Playing() const;
+ virtual int32_t StartRecording();
+ virtual int32_t StopRecording();
+ virtual bool Recording() const;
+
+ // Microphone Automatic Gain Control (AGC)
+ virtual int32_t SetAGC(bool enable);
+ virtual bool AGC() const;
+
+ // Volume control based on the Windows Wave API (Windows only)
+ virtual int32_t SetWaveOutVolume(uint16_t volumeLeft, uint16_t volumeRight);
+ virtual int32_t WaveOutVolume(uint16_t& volumeLeft,
+ uint16_t& volumeRight) const;
+
+ // Audio mixer initialization
+ virtual int32_t InitSpeaker();
+ virtual bool SpeakerIsInitialized() const;
+ virtual int32_t InitMicrophone();
+ virtual bool MicrophoneIsInitialized() const;
+
+ // Speaker volume controls
+ virtual int32_t SpeakerVolumeIsAvailable(bool& available);
+ virtual int32_t SetSpeakerVolume(uint32_t volume);
+ virtual int32_t SpeakerVolume(uint32_t& volume) const;
+ virtual int32_t MaxSpeakerVolume(uint32_t& maxVolume) const;
+ virtual int32_t MinSpeakerVolume(uint32_t& minVolume) const;
+ virtual int32_t SpeakerVolumeStepSize(uint16_t& stepSize) const;
+
+ // Microphone volume controls
+ virtual int32_t MicrophoneVolumeIsAvailable(bool& available);
+ virtual int32_t SetMicrophoneVolume(uint32_t volume);
+ virtual int32_t MicrophoneVolume(uint32_t& volume) const;
+ virtual int32_t MaxMicrophoneVolume(uint32_t& maxVolume) const;
+ virtual int32_t MinMicrophoneVolume(uint32_t& minVolume) const;
+ virtual int32_t MicrophoneVolumeStepSize(uint16_t& stepSize) const;
+
+ // Microphone mute control
+ virtual int32_t MicrophoneMuteIsAvailable(bool& available);
+ virtual int32_t SetMicrophoneMute(bool enable);
+ virtual int32_t MicrophoneMute(bool& enabled) const;
+
+ // Speaker mute control
+ virtual int32_t SpeakerMuteIsAvailable(bool& available);
+ virtual int32_t SetSpeakerMute(bool enable);
+ virtual int32_t SpeakerMute(bool& enabled) const;
+
+ // Microphone boost control
+ virtual int32_t MicrophoneBoostIsAvailable(bool& available);
+ virtual int32_t SetMicrophoneBoost(bool enable);
+ virtual int32_t MicrophoneBoost(bool& enabled) const;
+
+ // Stereo support
+ virtual int32_t StereoPlayoutIsAvailable(bool& available);
+ virtual int32_t SetStereoPlayout(bool enable);
+ virtual int32_t StereoPlayout(bool& enabled) const;
+ virtual int32_t StereoRecordingIsAvailable(bool& available);
+ virtual int32_t SetStereoRecording(bool enable);
+ virtual int32_t StereoRecording(bool& enabled) const;
+
+ // Delay information and control
+ virtual int32_t SetPlayoutBuffer(const AudioDeviceModule::BufferType type,
+ uint16_t sizeMS);
+ virtual int32_t PlayoutBuffer(AudioDeviceModule::BufferType& type,
+ uint16_t& sizeMS) const;
+ virtual int32_t PlayoutDelay(uint16_t& delayMS) const;
+ virtual int32_t RecordingDelay(uint16_t& delayMS) const;
+
+ // CPU load
+ virtual int32_t CPULoad(uint16_t& load) const;
+
+ public:
+ virtual bool PlayoutWarning() const;
+ virtual bool PlayoutError() const;
+ virtual bool RecordingWarning() const;
+ virtual bool RecordingError() const;
+ virtual void ClearPlayoutWarning();
+ virtual void ClearPlayoutError();
+ virtual void ClearRecordingWarning();
+ virtual void ClearRecordingError();
+
+ public:
+ virtual void AttachAudioBuffer(AudioDeviceBuffer* audioBuffer);
+
+ // Reset Audio Device (for mobile devices only)
+ virtual int32_t ResetAudioDevice();
+
+ // enable or disable loud speaker (for iphone only)
+ virtual int32_t SetLoudspeakerStatus(bool enable);
+ virtual int32_t GetLoudspeakerStatus(bool& enabled) const;
+
+ private:
+ void Lock() {
+ _critSect.Enter();
+ }
+
+ void UnLock() {
+ _critSect.Leave();
+ }
+
+ int32_t Id() {
+ return _id;
+ }
+
+ // Init and shutdown
+ int32_t InitPlayOrRecord();
+ int32_t ShutdownPlayOrRecord();
+
+ void UpdateRecordingDelay();
+ void UpdatePlayoutDelay();
+
+ static OSStatus RecordProcess(void *inRefCon,
+ AudioUnitRenderActionFlags *ioActionFlags,
+ const AudioTimeStamp *timeStamp,
+ UInt32 inBusNumber,
+ UInt32 inNumberFrames,
AudioBufferList *ioData);
- static bool RunCapture(void* ptrThis);
- bool CaptureWorkerThread();
+ static OSStatus PlayoutProcess(void *inRefCon,
+ AudioUnitRenderActionFlags *ioActionFlags,
+ const AudioTimeStamp *timeStamp,
+ UInt32 inBusNumber,
+ UInt32 inNumberFrames,
+ AudioBufferList *ioData);
+
+ OSStatus RecordProcessImpl(AudioUnitRenderActionFlags *ioActionFlags,
+ const AudioTimeStamp *timeStamp,
+ uint32_t inBusNumber,
+ uint32_t inNumberFrames);
+
+ OSStatus PlayoutProcessImpl(uint32_t inNumberFrames,
+ AudioBufferList *ioData);
+
+ static bool RunCapture(void* ptrThis);
+ bool CaptureWorkerThread();
-private:
- AudioDeviceBuffer* _ptrAudioBuffer;
+ private:
+ AudioDeviceBuffer* _ptrAudioBuffer;
- CriticalSectionWrapper& _critSect;
+ CriticalSectionWrapper& _critSect;
- ThreadWrapper* _captureWorkerThread;
- uint32_t _captureWorkerThreadId;
+ ThreadWrapper* _captureWorkerThread;
+ uint32_t _captureWorkerThreadId;
- int32_t _id;
+ int32_t _id;
- AudioUnit _auVoiceProcessing;
+ AudioUnit _auVoiceProcessing;
+ void* _audioInterruptionObserver;
-private:
- bool _initialized;
- bool _isShutDown;
- bool _recording;
- bool _playing;
- bool _recIsInitialized;
- bool _playIsInitialized;
+ private:
+ bool _initialized;
+ bool _isShutDown;
+ bool _recording;
+ bool _playing;
+ bool _recIsInitialized;
+ bool _playIsInitialized;
- bool _recordingDeviceIsSpecified;
- bool _playoutDeviceIsSpecified;
- bool _micIsInitialized;
- bool _speakerIsInitialized;
+ bool _recordingDeviceIsSpecified;
+ bool _playoutDeviceIsSpecified;
+ bool _micIsInitialized;
+ bool _speakerIsInitialized;
- bool _AGC;
+ bool _AGC;
- // The sampling rate to use with Audio Device Buffer
- uint32_t _adbSampFreq;
+ // The sampling rate to use with Audio Device Buffer
+ uint32_t _adbSampFreq;
- // Delay calculation
- uint32_t _recordingDelay;
- uint32_t _playoutDelay;
- uint32_t _playoutDelayMeasurementCounter;
- uint32_t _recordingDelayHWAndOS;
- uint32_t _recordingDelayMeasurementCounter;
+ // Delay calculation
+ uint32_t _recordingDelay;
+ uint32_t _playoutDelay;
+ uint32_t _playoutDelayMeasurementCounter;
+ uint32_t _recordingDelayHWAndOS;
+ uint32_t _recordingDelayMeasurementCounter;
- // Errors and warnings count
- uint16_t _playWarning;
- uint16_t _playError;
- uint16_t _recWarning;
- uint16_t _recError;
+ // Errors and warnings count
+ uint16_t _playWarning;
+ uint16_t _playError;
+ uint16_t _recWarning;
+ uint16_t _recError;
- // Playout buffer, needed for 44.0 / 44.1 kHz mismatch
- int16_t _playoutBuffer[ENGINE_PLAY_BUF_SIZE_IN_SAMPLES];
- uint32_t _playoutBufferUsed; // How much is filled
+ // Playout buffer, needed for 44.0 / 44.1 kHz mismatch
+ int16_t _playoutBuffer[ENGINE_PLAY_BUF_SIZE_IN_SAMPLES];
+ uint32_t _playoutBufferUsed; // How much is filled
- // Recording buffers
- int16_t
- _recordingBuffer[N_REC_BUFFERS][ENGINE_REC_BUF_SIZE_IN_SAMPLES];
- uint32_t _recordingLength[N_REC_BUFFERS];
- uint32_t _recordingSeqNumber[N_REC_BUFFERS];
- uint32_t _recordingCurrentSeq;
+ // Recording buffers
+ int16_t _recordingBuffer[N_REC_BUFFERS][ENGINE_REC_BUF_SIZE_IN_SAMPLES];
+ uint32_t _recordingLength[N_REC_BUFFERS];
+ uint32_t _recordingSeqNumber[N_REC_BUFFERS];
+ uint32_t _recordingCurrentSeq;
- // Current total size all data in buffers, used for delay estimate
- uint32_t _recordingBufferTotalSize;
+ // Current total size all data in buffers, used for delay estimate
+ uint32_t _recordingBufferTotalSize;
};
} // namespace webrtc
-#endif // MODULES_AUDIO_DEVICE_MAIN_SOURCE_MAC_AUDIO_DEVICE_IPHONE_H_
+#endif // WEBRTC_AUDIO_DEVICE_AUDIO_DEVICE_IOS_H
diff --git a/modules/audio_device/ios/audio_device_ios.cc b/modules/audio_device/ios/audio_device_ios.mm
index 7a7189a2..19dcfdfb 100644
--- a/modules/audio_device/ios/audio_device_ios.cc
+++ b/modules/audio_device/ios/audio_device_ios.mm
@@ -8,7 +8,8 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#include <AudioToolbox/AudioServices.h> // AudioSession
+#import <AVFoundation/AVFoundation.h>
+#import <Foundation/Foundation.h>
#include "webrtc/modules/audio_device/ios/audio_device_ios.h"
@@ -16,7 +17,7 @@
#include "webrtc/system_wrappers/interface/trace.h"
namespace webrtc {
-AudioDeviceIPhone::AudioDeviceIPhone(const int32_t id)
+AudioDeviceIOS::AudioDeviceIOS(const int32_t id)
:
_ptrAudioBuffer(NULL),
_critSect(*CriticalSectionWrapper::CreateCriticalSection()),
@@ -24,6 +25,7 @@ AudioDeviceIPhone::AudioDeviceIPhone(const int32_t id)
_captureWorkerThreadId(0),
_id(id),
_auVoiceProcessing(NULL),
+ _audioInterruptionObserver(NULL),
_initialized(false),
_isShutDown(false),
_recording(false),
@@ -57,7 +59,7 @@ AudioDeviceIPhone::AudioDeviceIPhone(const int32_t id)
memset(_recordingSeqNumber, 0, sizeof(_recordingSeqNumber));
}
-AudioDeviceIPhone::~AudioDeviceIPhone() {
+AudioDeviceIOS::~AudioDeviceIOS() {
WEBRTC_TRACE(kTraceMemory, kTraceAudioDevice, _id,
"%s destroyed", __FUNCTION__);
@@ -71,7 +73,7 @@ AudioDeviceIPhone::~AudioDeviceIPhone() {
// API
// ============================================================================
-void AudioDeviceIPhone::AttachAudioBuffer(AudioDeviceBuffer* audioBuffer) {
+void AudioDeviceIOS::AttachAudioBuffer(AudioDeviceBuffer* audioBuffer) {
WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
"%s", __FUNCTION__);
@@ -86,7 +88,7 @@ void AudioDeviceIPhone::AttachAudioBuffer(AudioDeviceBuffer* audioBuffer) {
_ptrAudioBuffer->SetPlayoutChannels(N_PLAY_CHANNELS);
}
-int32_t AudioDeviceIPhone::ActiveAudioLayer(
+int32_t AudioDeviceIOS::ActiveAudioLayer(
AudioDeviceModule::AudioLayer& audioLayer) const {
WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
"%s", __FUNCTION__);
@@ -94,7 +96,7 @@ int32_t AudioDeviceIPhone::ActiveAudioLayer(
return 0;
}
-int32_t AudioDeviceIPhone::Init() {
+int32_t AudioDeviceIOS::Init() {
WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
"%s", __FUNCTION__);
@@ -137,7 +139,7 @@ int32_t AudioDeviceIPhone::Init() {
return 0;
}
-int32_t AudioDeviceIPhone::Terminate() {
+int32_t AudioDeviceIOS::Terminate() {
WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
"%s", __FUNCTION__);
@@ -169,13 +171,13 @@ int32_t AudioDeviceIPhone::Terminate() {
return 0;
}
-bool AudioDeviceIPhone::Initialized() const {
+bool AudioDeviceIOS::Initialized() const {
WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
"%s", __FUNCTION__);
return (_initialized);
}
-int32_t AudioDeviceIPhone::InitSpeaker() {
+int32_t AudioDeviceIOS::InitSpeaker() {
WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
"%s", __FUNCTION__);
@@ -205,7 +207,7 @@ int32_t AudioDeviceIPhone::InitSpeaker() {
return 0;
}
-int32_t AudioDeviceIPhone::InitMicrophone() {
+int32_t AudioDeviceIOS::InitMicrophone() {
WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
"%s", __FUNCTION__);
@@ -236,19 +238,19 @@ int32_t AudioDeviceIPhone::InitMicrophone() {
return 0;
}
-bool AudioDeviceIPhone::SpeakerIsInitialized() const {
+bool AudioDeviceIOS::SpeakerIsInitialized() const {
WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
"%s", __FUNCTION__);
return _speakerIsInitialized;
}
-bool AudioDeviceIPhone::MicrophoneIsInitialized() const {
+bool AudioDeviceIOS::MicrophoneIsInitialized() const {
WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
"%s", __FUNCTION__);
return _micIsInitialized;
}
-int32_t AudioDeviceIPhone::SpeakerVolumeIsAvailable(bool& available) {
+int32_t AudioDeviceIOS::SpeakerVolumeIsAvailable(bool& available) {
WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
"%s", __FUNCTION__);
@@ -257,16 +259,16 @@ int32_t AudioDeviceIPhone::SpeakerVolumeIsAvailable(bool& available) {
return 0;
}
-int32_t AudioDeviceIPhone::SetSpeakerVolume(uint32_t volume) {
+int32_t AudioDeviceIOS::SetSpeakerVolume(uint32_t volume) {
WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
- "AudioDeviceIPhone::SetSpeakerVolume(volume=%u)", volume);
+ "AudioDeviceIOS::SetSpeakerVolume(volume=%u)", volume);
WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
" API call not supported on this platform");
return -1;
}
-int32_t AudioDeviceIPhone::SpeakerVolume(uint32_t& volume) const {
+int32_t AudioDeviceIOS::SpeakerVolume(uint32_t& volume) const {
WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
"%s", __FUNCTION__);
@@ -276,13 +278,13 @@ int32_t AudioDeviceIPhone::SpeakerVolume(uint32_t& volume) const {
}
int32_t
- AudioDeviceIPhone::SetWaveOutVolume(uint16_t volumeLeft,
- uint16_t volumeRight) {
+ AudioDeviceIOS::SetWaveOutVolume(uint16_t volumeLeft,
+ uint16_t volumeRight) {
WEBRTC_TRACE(
kTraceModuleCall,
kTraceAudioDevice,
_id,
- "AudioDeviceIPhone::SetWaveOutVolume(volumeLeft=%u, volumeRight=%u)",
+ "AudioDeviceIOS::SetWaveOutVolume(volumeLeft=%u, volumeRight=%u)",
volumeLeft, volumeRight);
WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
@@ -292,7 +294,7 @@ int32_t
}
int32_t
-AudioDeviceIPhone::WaveOutVolume(uint16_t& /*volumeLeft*/,
+AudioDeviceIOS::WaveOutVolume(uint16_t& /*volumeLeft*/,
uint16_t& /*volumeRight*/) const {
WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
"%s", __FUNCTION__);
@@ -303,7 +305,7 @@ AudioDeviceIPhone::WaveOutVolume(uint16_t& /*volumeLeft*/,
}
int32_t
- AudioDeviceIPhone::MaxSpeakerVolume(uint32_t& maxVolume) const {
+ AudioDeviceIOS::MaxSpeakerVolume(uint32_t& maxVolume) const {
WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
"%s", __FUNCTION__);
@@ -312,7 +314,7 @@ int32_t
return -1;
}
-int32_t AudioDeviceIPhone::MinSpeakerVolume(
+int32_t AudioDeviceIOS::MinSpeakerVolume(
uint32_t& minVolume) const {
WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
"%s", __FUNCTION__);
@@ -323,7 +325,7 @@ int32_t AudioDeviceIPhone::MinSpeakerVolume(
}
int32_t
- AudioDeviceIPhone::SpeakerVolumeStepSize(uint16_t& stepSize) const {
+ AudioDeviceIOS::SpeakerVolumeStepSize(uint16_t& stepSize) const {
WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
"%s", __FUNCTION__);
@@ -332,7 +334,7 @@ int32_t
return -1;
}
-int32_t AudioDeviceIPhone::SpeakerMuteIsAvailable(bool& available) {
+int32_t AudioDeviceIOS::SpeakerMuteIsAvailable(bool& available) {
WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
"%s", __FUNCTION__);
@@ -341,7 +343,7 @@ int32_t AudioDeviceIPhone::SpeakerMuteIsAvailable(bool& available) {
return 0;
}
-int32_t AudioDeviceIPhone::SetSpeakerMute(bool enable) {
+int32_t AudioDeviceIOS::SetSpeakerMute(bool enable) {
WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
"%s", __FUNCTION__);
@@ -350,7 +352,7 @@ int32_t AudioDeviceIPhone::SetSpeakerMute(bool enable) {
return -1;
}
-int32_t AudioDeviceIPhone::SpeakerMute(bool& enabled) const {
+int32_t AudioDeviceIOS::SpeakerMute(bool& enabled) const {
WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
"%s", __FUNCTION__);
@@ -359,7 +361,7 @@ int32_t AudioDeviceIPhone::SpeakerMute(bool& enabled) const {
return -1;
}
-int32_t AudioDeviceIPhone::MicrophoneMuteIsAvailable(bool& available) {
+int32_t AudioDeviceIOS::MicrophoneMuteIsAvailable(bool& available) {
WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
"%s", __FUNCTION__);
@@ -368,7 +370,7 @@ int32_t AudioDeviceIPhone::MicrophoneMuteIsAvailable(bool& available) {
return 0;
}
-int32_t AudioDeviceIPhone::SetMicrophoneMute(bool enable) {
+int32_t AudioDeviceIOS::SetMicrophoneMute(bool enable) {
WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
"%s", __FUNCTION__);
@@ -377,7 +379,7 @@ int32_t AudioDeviceIPhone::SetMicrophoneMute(bool enable) {
return -1;
}
-int32_t AudioDeviceIPhone::MicrophoneMute(bool& enabled) const {
+int32_t AudioDeviceIOS::MicrophoneMute(bool& enabled) const {
WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
"%s", __FUNCTION__);
@@ -386,7 +388,7 @@ int32_t AudioDeviceIPhone::MicrophoneMute(bool& enabled) const {
return -1;
}
-int32_t AudioDeviceIPhone::MicrophoneBoostIsAvailable(bool& available) {
+int32_t AudioDeviceIOS::MicrophoneBoostIsAvailable(bool& available) {
WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
"%s", __FUNCTION__);
@@ -395,9 +397,9 @@ int32_t AudioDeviceIPhone::MicrophoneBoostIsAvailable(bool& available) {
return 0;
}
-int32_t AudioDeviceIPhone::SetMicrophoneBoost(bool enable) {
+int32_t AudioDeviceIOS::SetMicrophoneBoost(bool enable) {
WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
- "AudioDeviceIPhone::SetMicrophoneBoost(enable=%u)", enable);
+ "AudioDeviceIOS::SetMicrophoneBoost(enable=%u)", enable);
if (!_micIsInitialized) {
WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
@@ -414,7 +416,7 @@ int32_t AudioDeviceIPhone::SetMicrophoneBoost(bool enable) {
return 0;
}
-int32_t AudioDeviceIPhone::MicrophoneBoost(bool& enabled) const {
+int32_t AudioDeviceIOS::MicrophoneBoost(bool& enabled) const {
WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
"%s", __FUNCTION__);
if (!_micIsInitialized) {
@@ -428,7 +430,7 @@ int32_t AudioDeviceIPhone::MicrophoneBoost(bool& enabled) const {
return 0;
}
-int32_t AudioDeviceIPhone::StereoRecordingIsAvailable(bool& available) {
+int32_t AudioDeviceIOS::StereoRecordingIsAvailable(bool& available) {
WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
"%s", __FUNCTION__);
@@ -437,9 +439,9 @@ int32_t AudioDeviceIPhone::StereoRecordingIsAvailable(bool& available) {
return 0;
}
-int32_t AudioDeviceIPhone::SetStereoRecording(bool enable) {
+int32_t AudioDeviceIOS::SetStereoRecording(bool enable) {
WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
- "AudioDeviceIPhone::SetStereoRecording(enable=%u)", enable);
+ "AudioDeviceIOS::SetStereoRecording(enable=%u)", enable);
if (enable) {
WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
@@ -449,7 +451,7 @@ int32_t AudioDeviceIPhone::SetStereoRecording(bool enable) {
return 0;
}
-int32_t AudioDeviceIPhone::StereoRecording(bool& enabled) const {
+int32_t AudioDeviceIOS::StereoRecording(bool& enabled) const {
WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
"%s", __FUNCTION__);
@@ -457,7 +459,7 @@ int32_t AudioDeviceIPhone::StereoRecording(bool& enabled) const {
return 0;
}
-int32_t AudioDeviceIPhone::StereoPlayoutIsAvailable(bool& available) {
+int32_t AudioDeviceIOS::StereoPlayoutIsAvailable(bool& available) {
WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
"%s", __FUNCTION__);
@@ -466,9 +468,9 @@ int32_t AudioDeviceIPhone::StereoPlayoutIsAvailable(bool& available) {
return 0;
}
-int32_t AudioDeviceIPhone::SetStereoPlayout(bool enable) {
+int32_t AudioDeviceIOS::SetStereoPlayout(bool enable) {
WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
- "AudioDeviceIPhone::SetStereoPlayout(enable=%u)", enable);
+ "AudioDeviceIOS::SetStereoPlayout(enable=%u)", enable);
if (enable) {
WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
@@ -478,7 +480,7 @@ int32_t AudioDeviceIPhone::SetStereoPlayout(bool enable) {
return 0;
}
-int32_t AudioDeviceIPhone::StereoPlayout(bool& enabled) const {
+int32_t AudioDeviceIOS::StereoPlayout(bool& enabled) const {
WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
"%s", __FUNCTION__);
@@ -486,23 +488,23 @@ int32_t AudioDeviceIPhone::StereoPlayout(bool& enabled) const {
return 0;
}
-int32_t AudioDeviceIPhone::SetAGC(bool enable) {
+int32_t AudioDeviceIOS::SetAGC(bool enable) {
WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
- "AudioDeviceIPhone::SetAGC(enable=%d)", enable);
+ "AudioDeviceIOS::SetAGC(enable=%d)", enable);
_AGC = enable;
return 0;
}
-bool AudioDeviceIPhone::AGC() const {
+bool AudioDeviceIOS::AGC() const {
WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
"%s", __FUNCTION__);
return _AGC;
}
-int32_t AudioDeviceIPhone::MicrophoneVolumeIsAvailable(bool& available) {
+int32_t AudioDeviceIOS::MicrophoneVolumeIsAvailable(bool& available) {
WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
"%s", __FUNCTION__);
@@ -511,9 +513,9 @@ int32_t AudioDeviceIPhone::MicrophoneVolumeIsAvailable(bool& available) {
return 0;
}
-int32_t AudioDeviceIPhone::SetMicrophoneVolume(uint32_t volume) {
+int32_t AudioDeviceIOS::SetMicrophoneVolume(uint32_t volume) {
WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
- "AudioDeviceIPhone::SetMicrophoneVolume(volume=%u)", volume);
+ "AudioDeviceIOS::SetMicrophoneVolume(volume=%u)", volume);
WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
" API call not supported on this platform");
@@ -521,7 +523,7 @@ int32_t AudioDeviceIPhone::SetMicrophoneVolume(uint32_t volume) {
}
int32_t
- AudioDeviceIPhone::MicrophoneVolume(uint32_t& volume) const {
+ AudioDeviceIOS::MicrophoneVolume(uint32_t& volume) const {
WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
"%s", __FUNCTION__);
@@ -531,7 +533,7 @@ int32_t
}
int32_t
- AudioDeviceIPhone::MaxMicrophoneVolume(uint32_t& maxVolume) const {
+ AudioDeviceIOS::MaxMicrophoneVolume(uint32_t& maxVolume) const {
WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
"%s", __FUNCTION__);
@@ -541,7 +543,7 @@ int32_t
}
int32_t
- AudioDeviceIPhone::MinMicrophoneVolume(uint32_t& minVolume) const {
+ AudioDeviceIOS::MinMicrophoneVolume(uint32_t& minVolume) const {
WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
"%s", __FUNCTION__);
@@ -551,7 +553,7 @@ int32_t
}
int32_t
- AudioDeviceIPhone::MicrophoneVolumeStepSize(
+ AudioDeviceIOS::MicrophoneVolumeStepSize(
uint16_t& stepSize) const {
WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
"%s", __FUNCTION__);
@@ -561,16 +563,16 @@ int32_t
return -1;
}
-int16_t AudioDeviceIPhone::PlayoutDevices() {
+int16_t AudioDeviceIOS::PlayoutDevices() {
WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
"%s", __FUNCTION__);
return (int16_t)1;
}
-int32_t AudioDeviceIPhone::SetPlayoutDevice(uint16_t index) {
+int32_t AudioDeviceIOS::SetPlayoutDevice(uint16_t index) {
WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
- "AudioDeviceIPhone::SetPlayoutDevice(index=%u)", index);
+ "AudioDeviceIOS::SetPlayoutDevice(index=%u)", index);
if (_playIsInitialized) {
WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
@@ -589,18 +591,18 @@ int32_t AudioDeviceIPhone::SetPlayoutDevice(uint16_t index) {
}
int32_t
- AudioDeviceIPhone::SetPlayoutDevice(AudioDeviceModule::WindowsDeviceType) {
+ AudioDeviceIOS::SetPlayoutDevice(AudioDeviceModule::WindowsDeviceType) {
WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
"WindowsDeviceType not supported");
return -1;
}
int32_t
- AudioDeviceIPhone::PlayoutDeviceName(uint16_t index,
+ AudioDeviceIOS::PlayoutDeviceName(uint16_t index,
char name[kAdmMaxDeviceNameSize],
char guid[kAdmMaxGuidSize]) {
WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
- "AudioDeviceIPhone::PlayoutDeviceName(index=%u)", index);
+ "AudioDeviceIOS::PlayoutDeviceName(index=%u)", index);
if (index != 0) {
return -1;
@@ -615,11 +617,11 @@ int32_t
}
int32_t
- AudioDeviceIPhone::RecordingDeviceName(uint16_t index,
+ AudioDeviceIOS::RecordingDeviceName(uint16_t index,
char name[kAdmMaxDeviceNameSize],
char guid[kAdmMaxGuidSize]) {
WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
- "AudioDeviceIPhone::RecordingDeviceName(index=%u)", index);
+ "AudioDeviceIOS::RecordingDeviceName(index=%u)", index);
if (index != 0) {
return -1;
@@ -633,15 +635,15 @@ int32_t
return 0;
}
-int16_t AudioDeviceIPhone::RecordingDevices() {
+int16_t AudioDeviceIOS::RecordingDevices() {
WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, "%s", __FUNCTION__);
return (int16_t)1;
}
-int32_t AudioDeviceIPhone::SetRecordingDevice(uint16_t index) {
+int32_t AudioDeviceIOS::SetRecordingDevice(uint16_t index) {
WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
- "AudioDeviceIPhone::SetRecordingDevice(index=%u)", index);
+ "AudioDeviceIOS::SetRecordingDevice(index=%u)", index);
if (_recIsInitialized) {
WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
@@ -661,7 +663,7 @@ int32_t AudioDeviceIPhone::SetRecordingDevice(uint16_t index) {
}
int32_t
- AudioDeviceIPhone::SetRecordingDevice(
+ AudioDeviceIOS::SetRecordingDevice(
AudioDeviceModule::WindowsDeviceType) {
WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
"WindowsDeviceType not supported");
@@ -671,52 +673,55 @@ int32_t
// ----------------------------------------------------------------------------
// SetLoudspeakerStatus
//
-// Overrides the receiver playout route to speaker instead. See
-// kAudioSessionProperty_OverrideCategoryDefaultToSpeaker in CoreAudio
-// documentation.
+// Change the default receiver playout route to speaker.
+//
// ----------------------------------------------------------------------------
-int32_t AudioDeviceIPhone::SetLoudspeakerStatus(bool enable) {
+int32_t AudioDeviceIOS::SetLoudspeakerStatus(bool enable) {
WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
- "AudioDeviceIPhone::SetLoudspeakerStatus(enable=%d)", enable);
-
- UInt32 doChangeDefaultRoute = enable ? 1 : 0;
- OSStatus err = AudioSessionSetProperty(
- kAudioSessionProperty_OverrideCategoryDefaultToSpeaker,
- sizeof(doChangeDefaultRoute), &doChangeDefaultRoute);
+ "AudioDeviceIOS::SetLoudspeakerStatus(enable=%d)", enable);
+
+ AVAudioSession* session = [AVAudioSession sharedInstance];
+ NSString* category = session.category;
+ AVAudioSessionCategoryOptions options = session.categoryOptions;
+ // Respect old category options if category is
+ // AVAudioSessionCategoryPlayAndRecord. Otherwise reset it since old options
+ // might not be valid for this category.
+ if ([category isEqualToString:AVAudioSessionCategoryPlayAndRecord]) {
+ if (enable) {
+ options |= AVAudioSessionCategoryOptionDefaultToSpeaker;
+ } else {
+ options &= ~AVAudioSessionCategoryOptionDefaultToSpeaker;
+ }
+ } else {
+ options = AVAudioSessionCategoryOptionDefaultToSpeaker;
+ }
- if (err != noErr) {
- WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
- "Error changing default output route " \
- "(only available on iOS 3.1 or later)");
- return -1;
+ NSError* error = nil;
+ [session setCategory:AVAudioSessionCategoryPlayAndRecord
+ withOptions:options
+ error:&error];
+ if (error != nil) {
+ WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
+ "Error changing default output route ");
+ return -1;
}
return 0;
}
-int32_t AudioDeviceIPhone::GetLoudspeakerStatus(bool &enabled) const {
+int32_t AudioDeviceIOS::GetLoudspeakerStatus(bool &enabled) const {
WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
- "AudioDeviceIPhone::SetLoudspeakerStatus(enabled=?)");
-
- UInt32 route(0);
- UInt32 size = sizeof(route);
- OSStatus err = AudioSessionGetProperty(
- kAudioSessionProperty_OverrideCategoryDefaultToSpeaker,
- &size, &route);
- if (err != noErr) {
- WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
- "Error changing default output route " \
- "(only available on iOS 3.1 or later)");
- return -1;
- }
+ "AudioDeviceIOS::SetLoudspeakerStatus(enabled=?)");
- enabled = route == 1 ? true: false;
+ AVAudioSession* session = [AVAudioSession sharedInstance];
+ AVAudioSessionCategoryOptions options = session.categoryOptions;
+ enabled = options & AVAudioSessionCategoryOptionDefaultToSpeaker;
return 0;
}
-int32_t AudioDeviceIPhone::PlayoutIsAvailable(bool& available) {
+int32_t AudioDeviceIOS::PlayoutIsAvailable(bool& available) {
WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, "%s", __FUNCTION__);
available = false;
@@ -734,7 +739,7 @@ int32_t AudioDeviceIPhone::PlayoutIsAvailable(bool& available) {
return 0;
}
-int32_t AudioDeviceIPhone::RecordingIsAvailable(bool& available) {
+int32_t AudioDeviceIOS::RecordingIsAvailable(bool& available) {
WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, "%s", __FUNCTION__);
available = false;
@@ -752,7 +757,7 @@ int32_t AudioDeviceIPhone::RecordingIsAvailable(bool& available) {
return 0;
}
-int32_t AudioDeviceIPhone::InitPlayout() {
+int32_t AudioDeviceIOS::InitPlayout() {
WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, "%s", __FUNCTION__);
CriticalSectionScoped lock(&_critSect);
@@ -803,12 +808,12 @@ int32_t AudioDeviceIPhone::InitPlayout() {
return 0;
}
-bool AudioDeviceIPhone::PlayoutIsInitialized() const {
+bool AudioDeviceIOS::PlayoutIsInitialized() const {
WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, "%s", __FUNCTION__);
return (_playIsInitialized);
}
-int32_t AudioDeviceIPhone::InitRecording() {
+int32_t AudioDeviceIOS::InitRecording() {
WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, "%s", __FUNCTION__);
CriticalSectionScoped lock(&_critSect);
@@ -861,12 +866,12 @@ int32_t AudioDeviceIPhone::InitRecording() {
return 0;
}
-bool AudioDeviceIPhone::RecordingIsInitialized() const {
+bool AudioDeviceIOS::RecordingIsInitialized() const {
WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, "%s", __FUNCTION__);
return (_recIsInitialized);
}
-int32_t AudioDeviceIPhone::StartRecording() {
+int32_t AudioDeviceIOS::StartRecording() {
WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, "%s", __FUNCTION__);
CriticalSectionScoped lock(&_critSect);
@@ -913,7 +918,7 @@ int32_t AudioDeviceIPhone::StartRecording() {
return 0;
}
-int32_t AudioDeviceIPhone::StopRecording() {
+int32_t AudioDeviceIOS::StopRecording() {
WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, "%s", __FUNCTION__);
CriticalSectionScoped lock(&_critSect);
@@ -937,12 +942,12 @@ int32_t AudioDeviceIPhone::StopRecording() {
return 0;
}
-bool AudioDeviceIPhone::Recording() const {
+bool AudioDeviceIOS::Recording() const {
WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, "%s", __FUNCTION__);
return (_recording);
}
-int32_t AudioDeviceIPhone::StartPlayout() {
+int32_t AudioDeviceIOS::StartPlayout() {
WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, "%s", __FUNCTION__);
// This lock is (among other things) needed to avoid concurrency issues
@@ -988,7 +993,7 @@ int32_t AudioDeviceIPhone::StartPlayout() {
return 0;
}
-int32_t AudioDeviceIPhone::StopPlayout() {
+int32_t AudioDeviceIOS::StopPlayout() {
WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, "%s", __FUNCTION__);
CriticalSectionScoped lock(&_critSect);
@@ -1012,7 +1017,7 @@ int32_t AudioDeviceIPhone::StopPlayout() {
return 0;
}
-bool AudioDeviceIPhone::Playing() const {
+bool AudioDeviceIOS::Playing() const {
WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
"%s", __FUNCTION__);
return (_playing);
@@ -1025,7 +1030,7 @@ bool AudioDeviceIPhone::Playing() const {
// and set enable states after shutdown to same as current.
// In capture thread audio device will be shutdown, then started again.
// ----------------------------------------------------------------------------
-int32_t AudioDeviceIPhone::ResetAudioDevice() {
+int32_t AudioDeviceIOS::ResetAudioDevice() {
WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, "%s", __FUNCTION__);
CriticalSectionScoped lock(&_critSect);
@@ -1067,22 +1072,21 @@ int32_t AudioDeviceIPhone::ResetAudioDevice() {
return 0;
}
-int32_t AudioDeviceIPhone::PlayoutDelay(uint16_t& delayMS) const {
+int32_t AudioDeviceIOS::PlayoutDelay(uint16_t& delayMS) const {
delayMS = _playoutDelay;
return 0;
}
-int32_t AudioDeviceIPhone::RecordingDelay(uint16_t& delayMS) const {
+int32_t AudioDeviceIOS::RecordingDelay(uint16_t& delayMS) const {
delayMS = _recordingDelay;
return 0;
}
int32_t
- AudioDeviceIPhone::SetPlayoutBuffer(
- const AudioDeviceModule::BufferType type,
- uint16_t sizeMS) {
+ AudioDeviceIOS::SetPlayoutBuffer(const AudioDeviceModule::BufferType type,
+ uint16_t sizeMS) {
WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
- "AudioDeviceIPhone::SetPlayoutBuffer(type=%u, sizeMS=%u)",
+ "AudioDeviceIOS::SetPlayoutBuffer(type=%u, sizeMS=%u)",
type, sizeMS);
WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
@@ -1091,7 +1095,7 @@ int32_t
}
int32_t
- AudioDeviceIPhone::PlayoutBuffer(AudioDeviceModule::BufferType& type,
+ AudioDeviceIOS::PlayoutBuffer(AudioDeviceModule::BufferType& type,
uint16_t& sizeMS) const {
WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, "%s", __FUNCTION__);
@@ -1102,7 +1106,7 @@ int32_t
return 0;
}
-int32_t AudioDeviceIPhone::CPULoad(uint16_t& /*load*/) const {
+int32_t AudioDeviceIOS::CPULoad(uint16_t& /*load*/) const {
WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, "%s", __FUNCTION__);
WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
@@ -1110,35 +1114,35 @@ int32_t AudioDeviceIPhone::CPULoad(uint16_t& /*load*/) const {
return -1;
}
-bool AudioDeviceIPhone::PlayoutWarning() const {
+bool AudioDeviceIOS::PlayoutWarning() const {
return (_playWarning > 0);
}
-bool AudioDeviceIPhone::PlayoutError() const {
+bool AudioDeviceIOS::PlayoutError() const {
return (_playError > 0);
}
-bool AudioDeviceIPhone::RecordingWarning() const {
+bool AudioDeviceIOS::RecordingWarning() const {
return (_recWarning > 0);
}
-bool AudioDeviceIPhone::RecordingError() const {
+bool AudioDeviceIOS::RecordingError() const {
return (_recError > 0);
}
-void AudioDeviceIPhone::ClearPlayoutWarning() {
+void AudioDeviceIOS::ClearPlayoutWarning() {
_playWarning = 0;
}
-void AudioDeviceIPhone::ClearPlayoutError() {
+void AudioDeviceIOS::ClearPlayoutError() {
_playError = 0;
}
-void AudioDeviceIPhone::ClearRecordingWarning() {
+void AudioDeviceIOS::ClearRecordingWarning() {
_recWarning = 0;
}
-void AudioDeviceIPhone::ClearRecordingError() {
+void AudioDeviceIOS::ClearRecordingError() {
_recError = 0;
}
@@ -1146,7 +1150,7 @@ void AudioDeviceIPhone::ClearRecordingError() {
// Private Methods
// ============================================================================
-int32_t AudioDeviceIPhone::InitPlayOrRecord() {
+int32_t AudioDeviceIOS::InitPlayOrRecord() {
WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, "%s", __FUNCTION__);
OSStatus result = -1;
@@ -1187,18 +1191,32 @@ int32_t AudioDeviceIPhone::InitPlayOrRecord() {
}
// Set preferred hardware sample rate to 16 kHz
- Float64 sampleRate(16000.0);
- result = AudioSessionSetProperty(
- kAudioSessionProperty_PreferredHardwareSampleRate,
- sizeof(sampleRate), &sampleRate);
- if (0 != result) {
+ NSError* error = nil;
+ AVAudioSession* session = [AVAudioSession sharedInstance];
+ Float64 preferredSampleRate(16000.0);
+ [session setPreferredSampleRate:preferredSampleRate
+ error:&error];
+ if (error != nil) {
+ const char* errorString = [[error localizedDescription] UTF8String];
WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
- "Could not set preferred sample rate (result=%d)", result);
+ "Could not set preferred sample rate: %s", errorString);
+ }
+ error = nil;
+ [session setMode:AVAudioSessionModeVoiceChat
+ error:&error];
+ if (error != nil) {
+ const char* errorString = [[error localizedDescription] UTF8String];
+ WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
+ "Could not set mode: %s", errorString);
+ }
+ error = nil;
+ [session setCategory:AVAudioSessionCategoryPlayAndRecord
+ error:&error];
+ if (error != nil) {
+ const char* errorString = [[error localizedDescription] UTF8String];
+ WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
+ "Could not set category: %s", errorString);
}
-
- uint32_t voiceChat = kAudioSessionMode_VoiceChat;
- AudioSessionSetProperty(kAudioSessionProperty_Mode,
- sizeof(voiceChat), &voiceChat);
//////////////////////
// Setup Voice Processing Audio Unit
@@ -1293,7 +1311,7 @@ int32_t AudioDeviceIPhone::InitPlayOrRecord() {
" Audio Unit playout opened in sampling rate %f",
playoutDesc.mSampleRate);
- playoutDesc.mSampleRate = sampleRate;
+ playoutDesc.mSampleRate = preferredSampleRate;
// Store the sampling frequency to use towards the Audio Device Buffer
// todo: Add 48 kHz (increase buffer sizes). Other fs?
@@ -1362,7 +1380,7 @@ int32_t AudioDeviceIPhone::InitPlayOrRecord() {
" Audio Unit recording opened in sampling rate %f",
recordingDesc.mSampleRate);
- recordingDesc.mSampleRate = sampleRate;
+ recordingDesc.mSampleRate = preferredSampleRate;
// Set stream format for out/1 (use same sampling frequency as for in/1)
recordingDesc.mFormatFlags = kLinearPCMFormatFlagIsSignedInteger
@@ -1392,25 +1410,73 @@ int32_t AudioDeviceIPhone::InitPlayOrRecord() {
}
// Get hardware sample rate for logging (see if we get what we asked for)
- Float64 hardwareSampleRate = 0.0;
- size = sizeof(hardwareSampleRate);
- result = AudioSessionGetProperty(
- kAudioSessionProperty_CurrentHardwareSampleRate, &size,
- &hardwareSampleRate);
- if (0 != result) {
- WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id,
- " Could not get current HW sample rate (result=%d)", result);
- }
+ double sampleRate = session.sampleRate;
WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id,
" Current HW sample rate is %f, ADB sample rate is %d",
- hardwareSampleRate, _adbSampFreq);
+ sampleRate, _adbSampFreq);
+
+ // Listen to audio interruptions.
+ NSNotificationCenter* center = [NSNotificationCenter defaultCenter];
+ id observer =
+ [center addObserverForName:AVAudioSessionInterruptionNotification
+ object:nil
+ queue:[NSOperationQueue mainQueue]
+ usingBlock:^(NSNotification* notification) {
+ NSNumber* typeNumber =
+ [notification userInfo][AVAudioSessionInterruptionTypeKey];
+ AVAudioSessionInterruptionType type =
+ (AVAudioSessionInterruptionType)[typeNumber unsignedIntegerValue];
+ switch (type) {
+ case AVAudioSessionInterruptionTypeBegan:
+ // At this point our audio session has been deactivated and the
+ // audio unit render callbacks no longer occur. Nothing to do.
+ break;
+ case AVAudioSessionInterruptionTypeEnded: {
+ NSError* error = nil;
+ AVAudioSession* session = [AVAudioSession sharedInstance];
+ [session setActive:YES
+ error:&error];
+ if (error != nil) {
+ WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
+ "Error activating audio session");
+ }
+ // Post interruption the audio unit render callbacks don't
+ // automatically continue, so we restart the unit manually here.
+ AudioOutputUnitStop(_auVoiceProcessing);
+ AudioOutputUnitStart(_auVoiceProcessing);
+ break;
+ }
+ }
+ }];
+ // Increment refcount on observer using ARC bridge. Instance variable is a
+ // void* instead of an id because header is included in other pure C++
+ // files.
+ _audioInterruptionObserver = (__bridge_retained void*)observer;
+
+ // Activate audio session.
+ error = nil;
+ [session setActive:YES
+ error:&error];
+ if (error != nil) {
+ WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
+ "Error activating audio session");
+ }
return 0;
}
-int32_t AudioDeviceIPhone::ShutdownPlayOrRecord() {
+int32_t AudioDeviceIOS::ShutdownPlayOrRecord() {
WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "%s", __FUNCTION__);
+ if (_audioInterruptionObserver != NULL) {
+ NSNotificationCenter* center = [NSNotificationCenter defaultCenter];
+ // Transfer ownership of observer back to ARC, which will dealloc the
+ // observer once it exits this scope.
+ id observer = (__bridge_transfer id)_audioInterruptionObserver;
+ [center removeObserver:observer];
+ _audioInterruptionObserver = NULL;
+ }
+
// Close and delete AU
OSStatus result = -1;
if (NULL != _auVoiceProcessing) {
@@ -1435,13 +1501,13 @@ int32_t AudioDeviceIPhone::ShutdownPlayOrRecord() {
// ============================================================================
OSStatus
- AudioDeviceIPhone::RecordProcess(void *inRefCon,
- AudioUnitRenderActionFlags *ioActionFlags,
- const AudioTimeStamp *inTimeStamp,
- UInt32 inBusNumber,
- UInt32 inNumberFrames,
- AudioBufferList *ioData) {
- AudioDeviceIPhone* ptrThis = static_cast<AudioDeviceIPhone*>(inRefCon);
+ AudioDeviceIOS::RecordProcess(void *inRefCon,
+ AudioUnitRenderActionFlags *ioActionFlags,
+ const AudioTimeStamp *inTimeStamp,
+ UInt32 inBusNumber,
+ UInt32 inNumberFrames,
+ AudioBufferList *ioData) {
+ AudioDeviceIOS* ptrThis = static_cast<AudioDeviceIOS*>(inRefCon);
return ptrThis->RecordProcessImpl(ioActionFlags,
inTimeStamp,
@@ -1451,11 +1517,10 @@ OSStatus
OSStatus
- AudioDeviceIPhone::RecordProcessImpl(
- AudioUnitRenderActionFlags *ioActionFlags,
- const AudioTimeStamp *inTimeStamp,
- uint32_t inBusNumber,
- uint32_t inNumberFrames) {
+ AudioDeviceIOS::RecordProcessImpl(AudioUnitRenderActionFlags *ioActionFlags,
+ const AudioTimeStamp *inTimeStamp,
+ uint32_t inBusNumber,
+ uint32_t inNumberFrames) {
// Setup some basic stuff
// Use temp buffer not to lock up recording buffer more than necessary
// todo: Make dataTmp a member variable with static size that holds
@@ -1561,20 +1626,20 @@ OSStatus
}
OSStatus
- AudioDeviceIPhone::PlayoutProcess(void *inRefCon,
- AudioUnitRenderActionFlags *ioActionFlags,
- const AudioTimeStamp *inTimeStamp,
- UInt32 inBusNumber,
- UInt32 inNumberFrames,
- AudioBufferList *ioData) {
- AudioDeviceIPhone* ptrThis = static_cast<AudioDeviceIPhone*>(inRefCon);
+ AudioDeviceIOS::PlayoutProcess(void *inRefCon,
+ AudioUnitRenderActionFlags *ioActionFlags,
+ const AudioTimeStamp *inTimeStamp,
+ UInt32 inBusNumber,
+ UInt32 inNumberFrames,
+ AudioBufferList *ioData) {
+ AudioDeviceIOS* ptrThis = static_cast<AudioDeviceIOS*>(inRefCon);
return ptrThis->PlayoutProcessImpl(inNumberFrames, ioData);
}
OSStatus
- AudioDeviceIPhone::PlayoutProcessImpl(uint32_t inNumberFrames,
- AudioBufferList *ioData) {
+ AudioDeviceIOS::PlayoutProcessImpl(uint32_t inNumberFrames,
+ AudioBufferList *ioData) {
// Setup some basic stuff
// assert(sizeof(short) == 2); // Assumption for implementation
@@ -1677,7 +1742,7 @@ OSStatus
return 0;
}
-void AudioDeviceIPhone::UpdatePlayoutDelay() {
+void AudioDeviceIOS::UpdatePlayoutDelay() {
++_playoutDelayMeasurementCounter;
if (_playoutDelayMeasurementCounter >= 100) {
@@ -1686,36 +1751,25 @@ void AudioDeviceIPhone::UpdatePlayoutDelay() {
// Since this is eventually rounded to integral ms, add 0.5ms
// here to get round-to-nearest-int behavior instead of
// truncation.
- float totalDelaySeconds = 0.0005;
+ double totalDelaySeconds = 0.0005;
// HW output latency
- Float32 f32(0);
- UInt32 size = sizeof(f32);
- OSStatus result = AudioSessionGetProperty(
- kAudioSessionProperty_CurrentHardwareOutputLatency, &size, &f32);
- if (0 != result) {
- WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
- "error HW latency (result=%d)", result);
- }
- assert(f32 >= 0);
- totalDelaySeconds += f32;
+ AVAudioSession* session = [AVAudioSession sharedInstance];
+ double latency = session.outputLatency;
+ assert(latency >= 0);
+ totalDelaySeconds += latency;
// HW buffer duration
- f32 = 0;
- result = AudioSessionGetProperty(
- kAudioSessionProperty_CurrentHardwareIOBufferDuration, &size, &f32);
- if (0 != result) {
- WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
- "error HW buffer duration (result=%d)", result);
- }
- assert(f32 >= 0);
- totalDelaySeconds += f32;
+ double ioBufferDuration = session.IOBufferDuration;
+ assert(ioBufferDuration >= 0);
+ totalDelaySeconds += ioBufferDuration;
// AU latency
Float64 f64(0);
- size = sizeof(f64);
- result = AudioUnitGetProperty(_auVoiceProcessing,
- kAudioUnitProperty_Latency, kAudioUnitScope_Global, 0, &f64, &size);
+ UInt32 size = sizeof(f64);
+ OSStatus result = AudioUnitGetProperty(
+ _auVoiceProcessing, kAudioUnitProperty_Latency,
+ kAudioUnitScope_Global, 0, &f64, &size);
if (0 != result) {
WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
"error AU latency (result=%d)", result);
@@ -1733,7 +1787,7 @@ void AudioDeviceIPhone::UpdatePlayoutDelay() {
// todo: Add playout buffer?
}
-void AudioDeviceIPhone::UpdateRecordingDelay() {
+void AudioDeviceIOS::UpdateRecordingDelay() {
++_recordingDelayMeasurementCounter;
if (_recordingDelayMeasurementCounter >= 100) {
@@ -1742,37 +1796,25 @@ void AudioDeviceIPhone::UpdateRecordingDelay() {
// Since this is eventually rounded to integral ms, add 0.5ms
// here to get round-to-nearest-int behavior instead of
// truncation.
- float totalDelaySeconds = 0.0005;
+ double totalDelaySeconds = 0.0005;
// HW input latency
- Float32 f32(0);
- UInt32 size = sizeof(f32);
- OSStatus result = AudioSessionGetProperty(
- kAudioSessionProperty_CurrentHardwareInputLatency, &size, &f32);
- if (0 != result) {
- WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
- "error HW latency (result=%d)", result);
- }
- assert(f32 >= 0);
- totalDelaySeconds += f32;
+ AVAudioSession* session = [AVAudioSession sharedInstance];
+ double latency = session.inputLatency;
+ assert(latency >= 0);
+ totalDelaySeconds += latency;
// HW buffer duration
- f32 = 0;
- result = AudioSessionGetProperty(
- kAudioSessionProperty_CurrentHardwareIOBufferDuration, &size, &f32);
- if (0 != result) {
- WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
- "error HW buffer duration (result=%d)", result);
- }
- assert(f32 >= 0);
- totalDelaySeconds += f32;
+ double ioBufferDuration = session.IOBufferDuration;
+ assert(ioBufferDuration >= 0);
+ totalDelaySeconds += ioBufferDuration;
// AU latency
Float64 f64(0);
- size = sizeof(f64);
- result = AudioUnitGetProperty(_auVoiceProcessing,
- kAudioUnitProperty_Latency,
- kAudioUnitScope_Global, 0, &f64, &size);
+ UInt32 size = sizeof(f64);
+ OSStatus result = AudioUnitGetProperty(
+ _auVoiceProcessing, kAudioUnitProperty_Latency,
+ kAudioUnitScope_Global, 0, &f64, &size);
if (0 != result) {
WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
"error AU latency (result=%d)", result);
@@ -1799,11 +1841,11 @@ void AudioDeviceIPhone::UpdateRecordingDelay() {
}
}
-bool AudioDeviceIPhone::RunCapture(void* ptrThis) {
- return static_cast<AudioDeviceIPhone*>(ptrThis)->CaptureWorkerThread();
+bool AudioDeviceIOS::RunCapture(void* ptrThis) {
+ return static_cast<AudioDeviceIOS*>(ptrThis)->CaptureWorkerThread();
}
-bool AudioDeviceIPhone::CaptureWorkerThread() {
+bool AudioDeviceIOS::CaptureWorkerThread() {
if (_recording) {
int bufPos = 0;
unsigned int lowestSeq = 0;
diff --git a/modules/audio_device/ios/audio_device_utility_ios.cc b/modules/audio_device/ios/audio_device_utility_ios.cc
index 6bbceb92..33628175 100644
--- a/modules/audio_device/ios/audio_device_utility_ios.cc
+++ b/modules/audio_device/ios/audio_device_utility_ios.cc
@@ -15,7 +15,7 @@
#include "webrtc/system_wrappers/interface/trace.h"
namespace webrtc {
-AudioDeviceUtilityIPhone::AudioDeviceUtilityIPhone(const int32_t id)
+AudioDeviceUtilityIOS::AudioDeviceUtilityIOS(const int32_t id)
:
_critSect(*CriticalSectionWrapper::CreateCriticalSection()),
_id(id),
@@ -24,15 +24,16 @@ AudioDeviceUtilityIPhone::AudioDeviceUtilityIPhone(const int32_t id)
"%s created", __FUNCTION__);
}
-AudioDeviceUtilityIPhone::~AudioDeviceUtilityIPhone() {
+AudioDeviceUtilityIOS::~AudioDeviceUtilityIOS() {
WEBRTC_TRACE(kTraceMemory, kTraceAudioDevice, _id,
"%s destroyed", __FUNCTION__);
- CriticalSectionScoped lock(&_critSect);
-
+ {
+ CriticalSectionScoped lock(&_critSect);
+ }
delete &_critSect;
}
-int32_t AudioDeviceUtilityIPhone::Init() {
+int32_t AudioDeviceUtilityIOS::Init() {
WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
"%s", __FUNCTION__);
diff --git a/modules/audio_device/ios/audio_device_utility_ios.h b/modules/audio_device/ios/audio_device_utility_ios.h
index 081ab82d..16948685 100644
--- a/modules/audio_device/ios/audio_device_utility_ios.h
+++ b/modules/audio_device/ios/audio_device_utility_ios.h
@@ -8,8 +8,8 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#ifndef WEBRTC_AUDIO_DEVICE_AUDIO_DEVICE_UTILITY_IPHONE_H
-#define WEBRTC_AUDIO_DEVICE_AUDIO_DEVICE_UTILITY_IPHONE_H
+#ifndef WEBRTC_AUDIO_DEVICE_AUDIO_DEVICE_UTILITY_IOS_H
+#define WEBRTC_AUDIO_DEVICE_AUDIO_DEVICE_UTILITY_IOS_H
#include "webrtc/modules/audio_device/audio_device_utility.h"
#include "webrtc/modules/audio_device/include/audio_device.h"
@@ -17,15 +17,15 @@
namespace webrtc {
class CriticalSectionWrapper;
-class AudioDeviceUtilityIPhone: public AudioDeviceUtility {
-public:
- AudioDeviceUtilityIPhone(const int32_t id);
- AudioDeviceUtilityIPhone();
- virtual ~AudioDeviceUtilityIPhone();
+class AudioDeviceUtilityIOS: public AudioDeviceUtility {
+ public:
+ AudioDeviceUtilityIOS(const int32_t id);
+ AudioDeviceUtilityIOS();
+ virtual ~AudioDeviceUtilityIOS();
virtual int32_t Init();
-private:
+ private:
CriticalSectionWrapper& _critSect;
int32_t _id;
AudioDeviceModule::ErrorCode _lastError;
@@ -33,4 +33,4 @@ private:
} // namespace webrtc
-#endif // WEBRTC_AUDIO_DEVICE_AUDIO_DEVICE_UTILITY_IPHONE_H
+#endif // WEBRTC_AUDIO_DEVICE_AUDIO_DEVICE_UTILITY_IOS_H
diff --git a/modules/audio_processing/aecm/aecm_core_mips.c b/modules/audio_processing/aecm/aecm_core_mips.c
index 31f232b8..4c925ca2 100644
--- a/modules/audio_processing/aecm/aecm_core_mips.c
+++ b/modules/audio_processing/aecm/aecm_core_mips.c
@@ -1485,7 +1485,7 @@ static void ComfortNoise(AecmCore_t* aecm,
"mul %[tmp321], %[tmp321], %[tmp161] \n\t"
"sra %[nrsh1], %[tmp32], 14 \n\t"
"sra %[nrsh2], %[tmp321], 14 \n\t"
- : [nrsh1] "=r" (nrsh1), [nrsh2] "=r" (nrsh2)
+ : [nrsh1] "=&r" (nrsh1), [nrsh2] "=r" (nrsh2)
: [tmp16] "r" (tmp16), [tmp161] "r" (tmp161), [tmp32] "r" (tmp32),
[tmp321] "r" (tmp321)
: "memory", "hi", "lo"
diff --git a/modules/audio_processing/audio_buffer.cc b/modules/audio_processing/audio_buffer.cc
index 35e1eb7c..fb2c200e 100644
--- a/modules/audio_processing/audio_buffer.cc
+++ b/modules/audio_processing/audio_buffer.cc
@@ -51,7 +51,6 @@ int KeyboardChannelIndex(AudioProcessing::ChannelLayout layout) {
return -1;
}
-
void StereoToMono(const float* left, const float* right, float* out,
int samples_per_channel) {
for (int i = 0; i < samples_per_channel; ++i) {
@@ -71,8 +70,9 @@ void StereoToMono(const int16_t* left, const int16_t* right, int16_t* out,
// One int16_t and one float ChannelBuffer that are kept in sync. The sync is
// broken when someone requests write access to either ChannelBuffer, and
// reestablished when someone requests the outdated ChannelBuffer. It is
-// therefore safe to use the return value of ibuf() and fbuf() until the next
-// call to the other method.
+// therefore safe to use the return value of ibuf_const() and fbuf_const()
+// until the next call to ibuf() or fbuf(), and the return value of ibuf() and
+// fbuf() until the next call to any of the other functions.
class IFChannelBuffer {
public:
IFChannelBuffer(int samples_per_channel, int num_channels)
@@ -81,19 +81,24 @@ class IFChannelBuffer {
fvalid_(true),
fbuf_(samples_per_channel, num_channels) {}
- ChannelBuffer<int16_t>* ibuf() {
+ ChannelBuffer<int16_t>* ibuf() { return ibuf(false); }
+ ChannelBuffer<float>* fbuf() { return fbuf(false); }
+ const ChannelBuffer<int16_t>* ibuf_const() { return ibuf(true); }
+ const ChannelBuffer<float>* fbuf_const() { return fbuf(true); }
+
+ private:
+ ChannelBuffer<int16_t>* ibuf(bool readonly) {
RefreshI();
- fvalid_ = false;
+ fvalid_ = readonly;
return &ibuf_;
}
- ChannelBuffer<float>* fbuf() {
+ ChannelBuffer<float>* fbuf(bool readonly) {
RefreshF();
- ivalid_ = false;
+ ivalid_ = readonly;
return &fbuf_;
}
- private:
void RefreshF() {
if (!fvalid_) {
assert(ivalid_);
@@ -126,24 +131,6 @@ class IFChannelBuffer {
ChannelBuffer<float> fbuf_;
};
-class SplitChannelBuffer {
- public:
- SplitChannelBuffer(int samples_per_split_channel, int num_channels)
- : low_(samples_per_split_channel, num_channels),
- high_(samples_per_split_channel, num_channels) {
- }
- ~SplitChannelBuffer() {}
-
- int16_t* low_channel(int i) { return low_.ibuf()->channel(i); }
- int16_t* high_channel(int i) { return high_.ibuf()->channel(i); }
- float* low_channel_f(int i) { return low_.fbuf()->channel(i); }
- float* high_channel_f(int i) { return high_.fbuf()->channel(i); }
-
- private:
- IFChannelBuffer low_;
- IFChannelBuffer high_;
-};
-
AudioBuffer::AudioBuffer(int input_samples_per_channel,
int num_input_channels,
int process_samples_per_channel,
@@ -155,8 +142,7 @@ AudioBuffer::AudioBuffer(int input_samples_per_channel,
num_proc_channels_(num_process_channels),
output_samples_per_channel_(output_samples_per_channel),
samples_per_split_channel_(proc_samples_per_channel_),
- num_mixed_channels_(0),
- num_mixed_low_pass_channels_(0),
+ mixed_low_pass_valid_(false),
reference_copied_(false),
activity_(AudioFrame::kVadUnknown),
keyboard_data_(NULL),
@@ -200,8 +186,10 @@ AudioBuffer::AudioBuffer(int input_samples_per_channel,
if (proc_samples_per_channel_ == kSamplesPer32kHzChannel) {
samples_per_split_channel_ = kSamplesPer16kHzChannel;
- split_channels_.reset(new SplitChannelBuffer(samples_per_split_channel_,
- num_proc_channels_));
+ split_channels_low_.reset(new IFChannelBuffer(samples_per_split_channel_,
+ num_proc_channels_));
+ split_channels_high_.reset(new IFChannelBuffer(samples_per_split_channel_,
+ num_proc_channels_));
filter_states_.reset(new SplitFilterStates[num_proc_channels_]);
}
}
@@ -278,89 +266,102 @@ void AudioBuffer::CopyTo(int samples_per_channel,
void AudioBuffer::InitForNewData() {
keyboard_data_ = NULL;
- num_mixed_channels_ = 0;
- num_mixed_low_pass_channels_ = 0;
+ mixed_low_pass_valid_ = false;
reference_copied_ = false;
activity_ = AudioFrame::kVadUnknown;
}
const int16_t* AudioBuffer::data(int channel) const {
- assert(channel >= 0 && channel < num_proc_channels_);
- return channels_->ibuf()->channel(channel);
+ return channels_->ibuf_const()->channel(channel);
}
int16_t* AudioBuffer::data(int channel) {
- const AudioBuffer* t = this;
- return const_cast<int16_t*>(t->data(channel));
+ mixed_low_pass_valid_ = false;
+ return channels_->ibuf()->channel(channel);
}
const float* AudioBuffer::data_f(int channel) const {
- assert(channel >= 0 && channel < num_proc_channels_);
- return channels_->fbuf()->channel(channel);
+ return channels_->fbuf_const()->channel(channel);
}
float* AudioBuffer::data_f(int channel) {
- const AudioBuffer* t = this;
- return const_cast<float*>(t->data_f(channel));
+ mixed_low_pass_valid_ = false;
+ return channels_->fbuf()->channel(channel);
}
const int16_t* AudioBuffer::low_pass_split_data(int channel) const {
- assert(channel >= 0 && channel < num_proc_channels_);
- return split_channels_.get() ? split_channels_->low_channel(channel)
- : data(channel);
+ return split_channels_low_.get()
+ ? split_channels_low_->ibuf_const()->channel(channel)
+ : data(channel);
}
int16_t* AudioBuffer::low_pass_split_data(int channel) {
- const AudioBuffer* t = this;
- return const_cast<int16_t*>(t->low_pass_split_data(channel));
+ mixed_low_pass_valid_ = false;
+ return split_channels_low_.get()
+ ? split_channels_low_->ibuf()->channel(channel)
+ : data(channel);
}
const float* AudioBuffer::low_pass_split_data_f(int channel) const {
- assert(channel >= 0 && channel < num_proc_channels_);
- return split_channels_.get() ? split_channels_->low_channel_f(channel)
- : data_f(channel);
+ return split_channels_low_.get()
+ ? split_channels_low_->fbuf_const()->channel(channel)
+ : data_f(channel);
}
float* AudioBuffer::low_pass_split_data_f(int channel) {
- const AudioBuffer* t = this;
- return const_cast<float*>(t->low_pass_split_data_f(channel));
+ mixed_low_pass_valid_ = false;
+ return split_channels_low_.get()
+ ? split_channels_low_->fbuf()->channel(channel)
+ : data_f(channel);
}
const int16_t* AudioBuffer::high_pass_split_data(int channel) const {
- assert(channel >= 0 && channel < num_proc_channels_);
- return split_channels_.get() ? split_channels_->high_channel(channel) : NULL;
+ return split_channels_high_.get()
+ ? split_channels_high_->ibuf_const()->channel(channel)
+ : NULL;
}
int16_t* AudioBuffer::high_pass_split_data(int channel) {
- const AudioBuffer* t = this;
- return const_cast<int16_t*>(t->high_pass_split_data(channel));
+ return split_channels_high_.get()
+ ? split_channels_high_->ibuf()->channel(channel)
+ : NULL;
}
const float* AudioBuffer::high_pass_split_data_f(int channel) const {
- assert(channel >= 0 && channel < num_proc_channels_);
- return split_channels_.get() ? split_channels_->high_channel_f(channel)
- : NULL;
+ return split_channels_high_.get()
+ ? split_channels_high_->fbuf_const()->channel(channel)
+ : NULL;
}
float* AudioBuffer::high_pass_split_data_f(int channel) {
- const AudioBuffer* t = this;
- return const_cast<float*>(t->high_pass_split_data_f(channel));
+ return split_channels_high_.get()
+ ? split_channels_high_->fbuf()->channel(channel)
+ : NULL;
}
-const int16_t* AudioBuffer::mixed_data(int channel) const {
- assert(channel >= 0 && channel < num_mixed_channels_);
-
- return mixed_channels_->channel(channel);
-}
+const int16_t* AudioBuffer::mixed_low_pass_data() {
+ // Currently only mixing stereo to mono is supported.
+ assert(num_proc_channels_ == 1 || num_proc_channels_ == 2);
-const int16_t* AudioBuffer::mixed_low_pass_data(int channel) const {
- assert(channel >= 0 && channel < num_mixed_low_pass_channels_);
+ if (num_proc_channels_ == 1) {
+ return low_pass_split_data(0);
+ }
- return mixed_low_pass_channels_->channel(channel);
+ if (!mixed_low_pass_valid_) {
+ if (!mixed_low_pass_channels_.get()) {
+ mixed_low_pass_channels_.reset(
+ new ChannelBuffer<int16_t>(samples_per_split_channel_, 1));
+ }
+ StereoToMono(low_pass_split_data(0),
+ low_pass_split_data(1),
+ mixed_low_pass_channels_->data(),
+ samples_per_split_channel_);
+ mixed_low_pass_valid_ = true;
+ }
+ return mixed_low_pass_channels_->data();
}
const int16_t* AudioBuffer::low_pass_reference(int channel) const {
- assert(channel >= 0 && channel < num_proc_channels_);
if (!reference_copied_) {
return NULL;
}
@@ -444,42 +445,6 @@ void AudioBuffer::InterleaveTo(AudioFrame* frame, bool data_changed) const {
}
}
-void AudioBuffer::CopyAndMix(int num_mixed_channels) {
- // We currently only support the stereo to mono case.
- assert(num_proc_channels_ == 2);
- assert(num_mixed_channels == 1);
- if (!mixed_channels_.get()) {
- mixed_channels_.reset(
- new ChannelBuffer<int16_t>(proc_samples_per_channel_,
- num_mixed_channels));
- }
-
- StereoToMono(channels_->ibuf()->channel(0),
- channels_->ibuf()->channel(1),
- mixed_channels_->channel(0),
- proc_samples_per_channel_);
-
- num_mixed_channels_ = num_mixed_channels;
-}
-
-void AudioBuffer::CopyAndMixLowPass(int num_mixed_channels) {
- // We currently only support the stereo to mono case.
- assert(num_proc_channels_ == 2);
- assert(num_mixed_channels == 1);
- if (!mixed_low_pass_channels_.get()) {
- mixed_low_pass_channels_.reset(
- new ChannelBuffer<int16_t>(samples_per_split_channel_,
- num_mixed_channels));
- }
-
- StereoToMono(low_pass_split_data(0),
- low_pass_split_data(1),
- mixed_low_pass_channels_->channel(0),
- samples_per_split_channel_);
-
- num_mixed_low_pass_channels_ = num_mixed_channels;
-}
-
void AudioBuffer::CopyLowPassToReference() {
reference_copied_ = true;
if (!low_pass_reference_channels_.get()) {
diff --git a/modules/audio_processing/audio_buffer.h b/modules/audio_processing/audio_buffer.h
index db24e959..acf5753c 100644
--- a/modules/audio_processing/audio_buffer.h
+++ b/modules/audio_processing/audio_buffer.h
@@ -23,7 +23,6 @@
namespace webrtc {
class PushSincResampler;
-class SplitChannelBuffer;
class IFChannelBuffer;
struct SplitFilterStates {
@@ -56,15 +55,18 @@ class AudioBuffer {
int samples_per_split_channel() const;
int samples_per_keyboard_channel() const;
- // It can be assumed that channels are stored contiguously.
+ // Sample array accessors. Channels are guaranteed to be stored contiguously
+ // in memory. Prefer to use the const variants of each accessor when
+ // possible, since they incur less float<->int16 conversion overhead.
int16_t* data(int channel);
const int16_t* data(int channel) const;
int16_t* low_pass_split_data(int channel);
const int16_t* low_pass_split_data(int channel) const;
int16_t* high_pass_split_data(int channel);
const int16_t* high_pass_split_data(int channel) const;
- const int16_t* mixed_data(int channel) const;
- const int16_t* mixed_low_pass_data(int channel) const;
+ // Returns a pointer to the low-pass data downmixed to mono. If this data
+ // isn't already available it re-calculates it.
+ const int16_t* mixed_low_pass_data();
const int16_t* low_pass_reference(int channel) const;
// Float versions of the accessors, with automatic conversion back and forth
@@ -85,7 +87,6 @@ class AudioBuffer {
// Use for int16 interleaved data.
void DeinterleaveFrom(AudioFrame* audioFrame);
- void InterleaveTo(AudioFrame* audioFrame) const;
// If |data_changed| is false, only the non-audio data members will be copied
// to |frame|.
void InterleaveTo(AudioFrame* frame, bool data_changed) const;
@@ -97,9 +98,6 @@ class AudioBuffer {
void CopyTo(int samples_per_channel,
AudioProcessing::ChannelLayout layout,
float* const* data);
-
- void CopyAndMix(int num_mixed_channels);
- void CopyAndMixLowPass(int num_mixed_channels);
void CopyLowPassToReference();
private:
@@ -112,16 +110,15 @@ class AudioBuffer {
const int num_proc_channels_;
const int output_samples_per_channel_;
int samples_per_split_channel_;
- int num_mixed_channels_;
- int num_mixed_low_pass_channels_;
+ bool mixed_low_pass_valid_;
bool reference_copied_;
AudioFrame::VADActivity activity_;
const float* keyboard_data_;
scoped_ptr<IFChannelBuffer> channels_;
- scoped_ptr<SplitChannelBuffer> split_channels_;
+ scoped_ptr<IFChannelBuffer> split_channels_low_;
+ scoped_ptr<IFChannelBuffer> split_channels_high_;
scoped_ptr<SplitFilterStates[]> filter_states_;
- scoped_ptr<ChannelBuffer<int16_t> > mixed_channels_;
scoped_ptr<ChannelBuffer<int16_t> > mixed_low_pass_channels_;
scoped_ptr<ChannelBuffer<int16_t> > low_pass_reference_channels_;
scoped_ptr<ChannelBuffer<float> > input_buffer_;
diff --git a/modules/audio_processing/common.h b/modules/audio_processing/common.h
index 42454df2..98e36cb0 100644
--- a/modules/audio_processing/common.h
+++ b/modules/audio_processing/common.h
@@ -54,10 +54,14 @@ class ChannelBuffer {
}
T* data() { return data_.get(); }
- T* channel(int i) {
- assert(i < num_channels_);
+ const T* channel(int i) const {
+ assert(i >= 0 && i < num_channels_);
return channels_[i];
}
+ T* channel(int i) {
+ const ChannelBuffer<T>* t = this;
+ return const_cast<T*>(t->channel(i));
+ }
T** channels() { return channels_.get(); }
int samples_per_channel() { return samples_per_channel_; }
diff --git a/modules/audio_processing/gain_control_impl.cc b/modules/audio_processing/gain_control_impl.cc
index a67b67ec..cf7df169 100644
--- a/modules/audio_processing/gain_control_impl.cc
+++ b/modules/audio_processing/gain_control_impl.cc
@@ -59,17 +59,11 @@ int GainControlImpl::ProcessRenderAudio(AudioBuffer* audio) {
assert(audio->samples_per_split_channel() <= 160);
- const int16_t* mixed_data = audio->low_pass_split_data(0);
- if (audio->num_channels() > 1) {
- audio->CopyAndMixLowPass(1);
- mixed_data = audio->mixed_low_pass_data(0);
- }
-
for (int i = 0; i < num_handles(); i++) {
Handle* my_handle = static_cast<Handle*>(handle(i));
int err = WebRtcAgc_AddFarend(
my_handle,
- mixed_data,
+ audio->mixed_low_pass_data(),
static_cast<int16_t>(audio->samples_per_split_channel()));
if (err != apm_->kNoError) {
diff --git a/modules/audio_processing/include/audio_processing.h b/modules/audio_processing/include/audio_processing.h
index 6f4cc9e7..30f0d9c5 100644
--- a/modules/audio_processing/include/audio_processing.h
+++ b/modules/audio_processing/include/audio_processing.h
@@ -209,9 +209,6 @@ class AudioProcessing {
// ensures the options are applied immediately.
virtual void SetExtraOptions(const Config& config) = 0;
- virtual int EnableExperimentalNs(bool enable) { return kNoError; }
- virtual bool experimental_ns_enabled() const { return false; }
-
// DEPRECATED.
// TODO(ajm): Remove after Chromium has upgraded to using Initialize().
virtual int set_sample_rate_hz(int rate) = 0;
diff --git a/modules/audio_processing/utility/ring_buffer_unittest.cc b/modules/audio_processing/utility/ring_buffer_unittest.cc
index 5dacf0b8..f5c36c2a 100644
--- a/modules/audio_processing/utility/ring_buffer_unittest.cc
+++ b/modules/audio_processing/utility/ring_buffer_unittest.cc
@@ -52,8 +52,8 @@ static int CheckIncrementingData(int* data, int num_elements,
// We use ASSERTs in this test to avoid obscuring the seed in the case of a
// failure.
static void RandomStressTest(int** data_ptr) {
- const int kNumTests = 100;
- const int kNumOps = 10000;
+ const int kNumTests = 10;
+ const int kNumOps = 1000;
const int kMaxBufferSize = 1000;
unsigned int seed = time(NULL);
diff --git a/modules/audio_processing/voice_detection_impl.cc b/modules/audio_processing/voice_detection_impl.cc
index c6e497ff..31336b41 100644
--- a/modules/audio_processing/voice_detection_impl.cc
+++ b/modules/audio_processing/voice_detection_impl.cc
@@ -61,17 +61,11 @@ int VoiceDetectionImpl::ProcessCaptureAudio(AudioBuffer* audio) {
}
assert(audio->samples_per_split_channel() <= 160);
- const int16_t* mixed_data = audio->low_pass_split_data(0);
- if (audio->num_channels() > 1) {
- audio->CopyAndMixLowPass(1);
- mixed_data = audio->mixed_low_pass_data(0);
- }
-
// TODO(ajm): concatenate data in frame buffer here.
int vad_ret = WebRtcVad_Process(static_cast<Handle*>(handle(0)),
apm_->proc_split_sample_rate_hz(),
- mixed_data,
+ audio->mixed_low_pass_data(),
frame_size_samples_);
if (vad_ret == 0) {
stream_has_voice_ = false;
diff --git a/modules/desktop_capture/win/cursor.cc b/modules/desktop_capture/win/cursor.cc
index 00055c44..e3c272ce 100644
--- a/modules/desktop_capture/win/cursor.cc
+++ b/modules/desktop_capture/win/cursor.cc
@@ -197,7 +197,7 @@ MouseCursor* CreateMouseCursorFromHCursor(HDC dc, HCURSOR cursor) {
// The XOR mask becomes the color bitmap.
memcpy(
- image->data(), mask_plane + (width * height), image->stride() * width);
+ image->data(), mask_plane + (width * height), image->stride() * height);
}
// Reconstruct transparency from the mask if the color image does not has
diff --git a/modules/media_file/source/media_file_unittest.cc b/modules/media_file/source/media_file_unittest.cc
index d658dc2c..56d3544c 100644
--- a/modules/media_file/source/media_file_unittest.cc
+++ b/modules/media_file/source/media_file_unittest.cc
@@ -10,6 +10,7 @@
#include "testing/gtest/include/gtest/gtest.h"
#include "webrtc/modules/media_file/interface/media_file.h"
+#include "webrtc/system_wrappers/interface/compile_assert.h"
#include "webrtc/system_wrappers/interface/sleep.h"
#include "webrtc/test/testsupport/fileutils.h"
#include "webrtc/test/testsupport/gtest_disable.h"
@@ -45,3 +46,50 @@ TEST_F(MediaFileTest, DISABLED_ON_ANDROID(StartPlayingAudioFileWithoutError)) {
ASSERT_EQ(0, media_file_->StopPlaying());
}
+
+TEST_F(MediaFileTest, WriteWavFile) {
+ // Write file.
+ static const int kHeaderSize = 44;
+ static const int kPayloadSize = 320;
+ webrtc::CodecInst codec = {0, "L16", 16000, kPayloadSize, 1};
+ std::string outfile = webrtc::test::OutputPath() + "wavtest.wav";
+ ASSERT_EQ(0,
+ media_file_->StartRecordingAudioFile(
+ outfile.c_str(), webrtc::kFileFormatWavFile, codec));
+ static const int8_t kFakeData[kPayloadSize] = {0};
+ ASSERT_EQ(0, media_file_->IncomingAudioData(kFakeData, kPayloadSize));
+ ASSERT_EQ(0, media_file_->StopRecording());
+
+ // Check the file we just wrote.
+ static const uint8_t kExpectedHeader[] = {
+ 'R', 'I', 'F', 'F',
+ 0x64, 0x1, 0, 0, // size of whole file - 8: 320 + 44 - 8
+ 'W', 'A', 'V', 'E',
+ 'f', 'm', 't', ' ',
+ 0x10, 0, 0, 0, // size of fmt block - 8: 24 - 8
+ 0x1, 0, // format: PCM (1)
+ 0x1, 0, // channels: 1
+ 0x80, 0x3e, 0, 0, // sample rate: 16000
+ 0, 0x7d, 0, 0, // byte rate: 2 * 16000
+ 0x2, 0, // block align: NumChannels * BytesPerSample
+ 0x10, 0, // bits per sample: 2 * 8
+ 'd', 'a', 't', 'a',
+ 0x40, 0x1, 0, 0, // size of payload: 320
+ };
+ COMPILE_ASSERT(sizeof(kExpectedHeader) == kHeaderSize, header_size);
+
+ EXPECT_EQ(size_t(kHeaderSize + kPayloadSize),
+ webrtc::test::GetFileSize(outfile));
+ FILE* f = fopen(outfile.c_str(), "rb");
+ ASSERT_TRUE(f);
+
+ uint8_t header[kHeaderSize];
+ ASSERT_EQ(1u, fread(header, kHeaderSize, 1, f));
+ EXPECT_EQ(0, memcmp(kExpectedHeader, header, kHeaderSize));
+
+ uint8_t payload[kPayloadSize];
+ ASSERT_EQ(1u, fread(payload, kPayloadSize, 1, f));
+ EXPECT_EQ(0, memcmp(kFakeData, payload, kPayloadSize));
+
+ EXPECT_EQ(0, fclose(f));
+}
diff --git a/modules/modules.gyp b/modules/modules.gyp
index d054fe9e..2a3ba743 100644
--- a/modules/modules.gyp
+++ b/modules/modules.gyp
@@ -249,7 +249,6 @@
'video_processing/main/test/unit_test/color_enhancement_test.cc',
'video_processing/main/test/unit_test/content_metrics_test.cc',
'video_processing/main/test/unit_test/deflickering_test.cc',
- 'video_processing/main/test/unit_test/denoising_test.cc',
'video_processing/main/test/unit_test/video_processing_unittest.cc',
'video_processing/main/test/unit_test/video_processing_unittest.h',
],
diff --git a/modules/pacing/include/paced_sender.h b/modules/pacing/include/paced_sender.h
index b9151a5f..ddd8e53b 100644
--- a/modules/pacing/include/paced_sender.h
+++ b/modules/pacing/include/paced_sender.h
@@ -146,8 +146,8 @@ class PacedSender : public Module {
scoped_ptr<paced_sender::IntervalBudget> padding_budget_
GUARDED_BY(critsect_);
- int64_t time_last_update_ GUARDED_BY(critsect_);
- int64_t time_last_send_ GUARDED_BY(critsect_);
+ int64_t time_last_update_us_ GUARDED_BY(critsect_);
+ int64_t time_last_send_us_ GUARDED_BY(critsect_);
int64_t capture_time_ms_last_queued_ GUARDED_BY(critsect_);
int64_t capture_time_ms_last_sent_ GUARDED_BY(critsect_);
diff --git a/modules/pacing/paced_sender.cc b/modules/pacing/paced_sender.cc
index 52e9cfb4..6204a9a0 100644
--- a/modules/pacing/paced_sender.cc
+++ b/modules/pacing/paced_sender.cc
@@ -31,12 +31,11 @@ const int kMaxIntervalTimeMs = 30;
// Max time that the first packet in the queue can sit in the queue if no
// packets are sent, regardless of buffer state. In practice only in effect at
// low bitrates (less than 320 kbits/s).
-const int kMaxQueueTimeWithoutSendingMs = 30;
+const int kMaxQueueTimeWithoutSendingUs = 30000;
} // namespace
namespace webrtc {
-
namespace paced_sender {
struct Packet {
Packet(uint32_t ssrc,
@@ -142,7 +141,7 @@ PacedSender::PacedSender(Clock* clock,
max_queue_length_ms_(kDefaultMaxQueueLengthMs),
media_budget_(new paced_sender::IntervalBudget(max_bitrate_kbps)),
padding_budget_(new paced_sender::IntervalBudget(min_bitrate_kbps)),
- time_last_update_(clock->TimeInMilliseconds()),
+ time_last_update_us_(clock->TimeInMicroseconds()),
capture_time_ms_last_queued_(0),
capture_time_ms_last_sent_(0),
high_priority_packets_(new paced_sender::PacketList),
@@ -151,8 +150,7 @@ PacedSender::PacedSender(Clock* clock,
UpdateBytesPerInterval(kMinPacketLimitMs);
}
-PacedSender::~PacedSender() {
-}
+PacedSender::~PacedSender() {}
void PacedSender::Pause() {
CriticalSectionScoped cs(critsect_.get());
@@ -248,7 +246,8 @@ int PacedSender::QueueInMs() const {
int32_t PacedSender::TimeUntilNextProcess() {
CriticalSectionScoped cs(critsect_.get());
- int64_t elapsed_time_ms = clock_->TimeInMilliseconds() - time_last_update_;
+ int64_t elapsed_time_ms = (clock_->TimeInMicroseconds() -
+ time_last_update_us_ + 500) / 1000;
if (elapsed_time_ms <= 0) {
return kMinPacketLimitMs;
}
@@ -259,10 +258,10 @@ int32_t PacedSender::TimeUntilNextProcess() {
}
int32_t PacedSender::Process() {
- int64_t now = clock_->TimeInMilliseconds();
+ int64_t now_us = clock_->TimeInMicroseconds();
CriticalSectionScoped cs(critsect_.get());
- int elapsed_time_ms = now - time_last_update_;
- time_last_update_ = now;
+ int elapsed_time_ms = (now_us - time_last_update_us_ + 500) / 1000;
+ time_last_update_us_ = now_us;
if (!enabled_) {
return 0;
}
@@ -291,7 +290,6 @@ int32_t PacedSender::Process() {
return 0;
}
-// MUST have critsect_ when calling.
bool PacedSender::SendPacketFromList(paced_sender::PacketList* packet_list)
EXCLUSIVE_LOCKS_REQUIRED(critsect_.get()) {
paced_sender::Packet packet = GetNextPacketFromList(packet_list);
@@ -322,20 +320,18 @@ bool PacedSender::SendPacketFromList(paced_sender::PacketList* packet_list)
return true;
}
-// MUST have critsect_ when calling.
void PacedSender::UpdateBytesPerInterval(uint32_t delta_time_ms) {
media_budget_->IncreaseBudget(delta_time_ms);
padding_budget_->IncreaseBudget(delta_time_ms);
}
-// MUST have critsect_ when calling.
bool PacedSender::ShouldSendNextPacket(paced_sender::PacketList** packet_list) {
*packet_list = NULL;
if (media_budget_->bytes_remaining() <= 0) {
// All bytes consumed for this interval.
// Check if we have not sent in a too long time.
- if (clock_->TimeInMilliseconds() - time_last_send_ >
- kMaxQueueTimeWithoutSendingMs) {
+ if (clock_->TimeInMicroseconds() - time_last_send_us_ >
+ kMaxQueueTimeWithoutSendingUs) {
if (!high_priority_packets_->empty()) {
*packet_list = high_priority_packets_.get();
return true;
@@ -386,9 +382,8 @@ paced_sender::Packet PacedSender::GetNextPacketFromList(
return packet;
}
-// MUST have critsect_ when calling.
void PacedSender::UpdateMediaBytesSent(int num_bytes) {
- time_last_send_ = clock_->TimeInMilliseconds();
+ time_last_send_us_ = clock_->TimeInMicroseconds();
media_budget_->UseBudget(num_bytes);
padding_budget_->UseBudget(num_bytes);
}
diff --git a/modules/rtp_rtcp/interface/rtp_rtcp_defines.h b/modules/rtp_rtcp/interface/rtp_rtcp_defines.h
index e1bec5fc..99808439 100644
--- a/modules/rtp_rtcp/interface/rtp_rtcp_defines.h
+++ b/modules/rtp_rtcp/interface/rtp_rtcp_defines.h
@@ -198,12 +198,14 @@ struct RtpState {
start_timestamp(0),
timestamp(0),
capture_time_ms(-1),
- last_timestamp_time_ms(-1) {}
+ last_timestamp_time_ms(-1),
+ media_has_been_sent(false) {}
uint16_t sequence_number;
uint32_t start_timestamp;
uint32_t timestamp;
int64_t capture_time_ms;
int64_t last_timestamp_time_ms;
+ bool media_has_been_sent;
};
class RtpData
diff --git a/modules/rtp_rtcp/source/rtcp_format_remb_unittest.cc b/modules/rtp_rtcp/source/rtcp_format_remb_unittest.cc
index 0514277f..5e580a34 100644
--- a/modules/rtp_rtcp/source/rtcp_format_remb_unittest.cc
+++ b/modules/rtp_rtcp/source/rtcp_format_remb_unittest.cc
@@ -121,7 +121,8 @@ TEST_F(RtcpFormatRembTest, TestNonCompund) {
uint32_t SSRC = 456789;
EXPECT_EQ(0, rtcp_sender_->SetRTCPStatus(kRtcpNonCompound));
EXPECT_EQ(0, rtcp_sender_->SetREMBData(1234, 1, &SSRC));
- RTCPSender::FeedbackState feedback_state(dummy_rtp_rtcp_impl_);
+ RTCPSender::FeedbackState feedback_state =
+ dummy_rtp_rtcp_impl_->GetFeedbackState();
EXPECT_EQ(0, rtcp_sender_->SendRTCP(feedback_state, kRtcpRemb));
}
@@ -129,7 +130,8 @@ TEST_F(RtcpFormatRembTest, TestCompund) {
uint32_t SSRCs[2] = {456789, 98765};
EXPECT_EQ(0, rtcp_sender_->SetRTCPStatus(kRtcpCompound));
EXPECT_EQ(0, rtcp_sender_->SetREMBData(1234, 2, SSRCs));
- RTCPSender::FeedbackState feedback_state(dummy_rtp_rtcp_impl_);
+ RTCPSender::FeedbackState feedback_state =
+ dummy_rtp_rtcp_impl_->GetFeedbackState();
EXPECT_EQ(0, rtcp_sender_->SendRTCP(feedback_state, kRtcpRemb));
}
} // namespace
diff --git a/modules/rtp_rtcp/source/rtcp_receiver.cc b/modules/rtp_rtcp/source/rtcp_receiver.cc
index b38ae1f0..54b991bf 100644
--- a/modules/rtp_rtcp/source/rtcp_receiver.cc
+++ b/modules/rtp_rtcp/source/rtcp_receiver.cc
@@ -224,12 +224,12 @@ bool RTCPReceiver::GetAndResetXrRrRtt(uint16_t* rtt_ms) {
return true;
}
-int32_t
-RTCPReceiver::NTP(uint32_t *ReceivedNTPsecs,
- uint32_t *ReceivedNTPfrac,
- uint32_t *RTCPArrivalTimeSecs,
- uint32_t *RTCPArrivalTimeFrac,
- uint32_t *rtcp_timestamp) const
+// TODO(pbos): Make this fail when we haven't received NTP.
+bool RTCPReceiver::NTP(uint32_t* ReceivedNTPsecs,
+ uint32_t* ReceivedNTPfrac,
+ uint32_t* RTCPArrivalTimeSecs,
+ uint32_t* RTCPArrivalTimeFrac,
+ uint32_t* rtcp_timestamp) const
{
CriticalSectionScoped lock(_criticalSectionRTCPReceiver);
if(ReceivedNTPsecs)
@@ -251,7 +251,7 @@ RTCPReceiver::NTP(uint32_t *ReceivedNTPsecs,
if (rtcp_timestamp) {
*rtcp_timestamp = _remoteSenderInfo.RTPtimeStamp;
}
- return 0;
+ return true;
}
bool RTCPReceiver::LastReceivedXrReferenceTimeInfo(
diff --git a/modules/rtp_rtcp/source/rtcp_receiver.h b/modules/rtp_rtcp/source/rtcp_receiver.h
index ebffb7cf..84eb24c7 100644
--- a/modules/rtp_rtcp/source/rtcp_receiver.h
+++ b/modules/rtp_rtcp/source/rtcp_receiver.h
@@ -63,11 +63,11 @@ public:
char cName[RTCP_CNAME_SIZE]) const;
// get received NTP
- int32_t NTP(uint32_t *ReceivedNTPsecs,
- uint32_t *ReceivedNTPfrac,
- uint32_t *RTCPArrivalTimeSecs,
- uint32_t *RTCPArrivalTimeFrac,
- uint32_t *rtcp_timestamp) const;
+ bool NTP(uint32_t* ReceivedNTPsecs,
+ uint32_t* ReceivedNTPfrac,
+ uint32_t* RTCPArrivalTimeSecs,
+ uint32_t* RTCPArrivalTimeFrac,
+ uint32_t* rtcp_timestamp) const;
bool LastReceivedXrReferenceTimeInfo(RtcpReceiveTimeInfo* info) const;
diff --git a/modules/rtp_rtcp/source/rtcp_sender.cc b/modules/rtp_rtcp/source/rtcp_sender.cc
index 2cf7e1cb..1edbee43 100644
--- a/modules/rtp_rtcp/source/rtcp_sender.cc
+++ b/modules/rtp_rtcp/source/rtcp_sender.cc
@@ -65,30 +65,11 @@ std::string NACKStringBuilder::GetResult()
return _stream.str();
}
-RTCPSender::FeedbackState::FeedbackState(ModuleRtpRtcpImpl* module)
- : send_payload_type(module->SendPayloadType()),
- frequency_hz(module->CurrentSendFrequencyHz()),
- packet_count_sent(module->PacketCountSent()),
- byte_count_sent(module->ByteCountSent()),
- module(module) {
- uint32_t last_ntp_secs = 0, last_ntp_frac = 0, last_remote_sr = 0;
- module->LastReceivedNTP(last_ntp_secs, last_ntp_frac, last_remote_sr);
- last_rr_ntp_secs = last_ntp_secs;
- last_rr_ntp_frac = last_ntp_frac;
- remote_sr = last_remote_sr;
-
- has_last_xr_rr = module->LastReceivedXrReferenceTimeInfo(&last_xr_rr);
-
- uint32_t send_bitrate = 0, tmp;
- module->BitrateSent(&send_bitrate, &tmp, &tmp, &tmp);
- this->send_bitrate = send_bitrate;
-}
-
RTCPSender::FeedbackState::FeedbackState()
: send_payload_type(0),
frequency_hz(0),
- packet_count_sent(0),
- byte_count_sent(0),
+ packets_sent(0),
+ media_bytes_sent(0),
send_bitrate(0),
last_rr_ntp_secs(0),
last_rr_ntp_frac(0),
@@ -654,12 +635,12 @@ int32_t RTCPSender::BuildSR(const FeedbackState& feedback_state,
//sender's packet count
RtpUtility::AssignUWord32ToBuffer(rtcpbuffer + pos,
- feedback_state.packet_count_sent);
+ feedback_state.packets_sent);
pos += 4;
//sender's octet count
RtpUtility::AssignUWord32ToBuffer(rtcpbuffer + pos,
- feedback_state.byte_count_sent);
+ feedback_state.media_bytes_sent);
pos += 4;
uint8_t numberOfReportBlocks = 0;
diff --git a/modules/rtp_rtcp/source/rtcp_sender.h b/modules/rtp_rtcp/source/rtcp_sender.h
index fad3b5e3..26c44b04 100644
--- a/modules/rtp_rtcp/source/rtcp_sender.h
+++ b/modules/rtp_rtcp/source/rtcp_sender.h
@@ -51,13 +51,12 @@ class RTCPSender
{
public:
struct FeedbackState {
- explicit FeedbackState(ModuleRtpRtcpImpl* module);
FeedbackState();
uint8_t send_payload_type;
uint32_t frequency_hz;
- uint32_t packet_count_sent;
- uint32_t byte_count_sent;
+ uint32_t packets_sent;
+ uint32_t media_bytes_sent;
uint32_t send_bitrate;
uint32_t last_rr_ntp_secs;
diff --git a/modules/rtp_rtcp/source/rtcp_sender_unittest.cc b/modules/rtp_rtcp/source/rtcp_sender_unittest.cc
index cba1c346..b8d53953 100644
--- a/modules/rtp_rtcp/source/rtcp_sender_unittest.cc
+++ b/modules/rtp_rtcp/source/rtcp_sender_unittest.cc
@@ -337,7 +337,7 @@ class RtcpSenderTest : public ::testing::Test {
TEST_F(RtcpSenderTest, RtcpOff) {
EXPECT_EQ(0, rtcp_sender_->SetRTCPStatus(kRtcpOff));
- RTCPSender::FeedbackState feedback_state(rtp_rtcp_impl_);
+ RTCPSender::FeedbackState feedback_state = rtp_rtcp_impl_->GetFeedbackState();
EXPECT_EQ(-1, rtcp_sender_->SendRTCP(feedback_state, kRtcpSr));
}
@@ -381,7 +381,7 @@ TEST_F(RtcpSenderTest, TestCompound) {
EXPECT_EQ(0, rtcp_sender_->SetIJStatus(true));
EXPECT_EQ(0, rtcp_sender_->SetRTCPStatus(kRtcpCompound));
- RTCPSender::FeedbackState feedback_state(rtp_rtcp_impl_);
+ RTCPSender::FeedbackState feedback_state = rtp_rtcp_impl_->GetFeedbackState();
EXPECT_EQ(0, rtcp_sender_->SendRTCP(feedback_state, kRtcpRr));
// Transmission time offset packet should be received.
@@ -392,7 +392,7 @@ TEST_F(RtcpSenderTest, TestCompound) {
TEST_F(RtcpSenderTest, TestCompound_NoRtpReceived) {
EXPECT_EQ(0, rtcp_sender_->SetIJStatus(true));
EXPECT_EQ(0, rtcp_sender_->SetRTCPStatus(kRtcpCompound));
- RTCPSender::FeedbackState feedback_state(rtp_rtcp_impl_);
+ RTCPSender::FeedbackState feedback_state = rtp_rtcp_impl_->GetFeedbackState();
EXPECT_EQ(0, rtcp_sender_->SendRTCP(feedback_state, kRtcpRr));
// Transmission time offset packet should not be received.
@@ -402,7 +402,7 @@ TEST_F(RtcpSenderTest, TestCompound_NoRtpReceived) {
TEST_F(RtcpSenderTest, TestXrReceiverReferenceTime) {
EXPECT_EQ(0, rtcp_sender_->SetRTCPStatus(kRtcpCompound));
- RTCPSender::FeedbackState feedback_state(rtp_rtcp_impl_);
+ RTCPSender::FeedbackState feedback_state = rtp_rtcp_impl_->GetFeedbackState();
EXPECT_EQ(0, rtcp_sender_->SetSendingStatus(feedback_state, false));
rtcp_sender_->SendRtcpXrReceiverReferenceTime(true);
EXPECT_EQ(0, rtcp_sender_->SendRTCP(feedback_state, kRtcpReport));
@@ -413,7 +413,7 @@ TEST_F(RtcpSenderTest, TestXrReceiverReferenceTime) {
TEST_F(RtcpSenderTest, TestNoXrReceiverReferenceTimeIfSending) {
EXPECT_EQ(0, rtcp_sender_->SetRTCPStatus(kRtcpCompound));
- RTCPSender::FeedbackState feedback_state(rtp_rtcp_impl_);
+ RTCPSender::FeedbackState feedback_state = rtp_rtcp_impl_->GetFeedbackState();
EXPECT_EQ(0, rtcp_sender_->SetSendingStatus(feedback_state, true));
rtcp_sender_->SendRtcpXrReceiverReferenceTime(true);
EXPECT_EQ(0, rtcp_sender_->SendRTCP(feedback_state, kRtcpReport));
@@ -424,7 +424,7 @@ TEST_F(RtcpSenderTest, TestNoXrReceiverReferenceTimeIfSending) {
TEST_F(RtcpSenderTest, TestNoXrReceiverReferenceTimeIfNotEnabled) {
EXPECT_EQ(0, rtcp_sender_->SetRTCPStatus(kRtcpCompound));
- RTCPSender::FeedbackState feedback_state(rtp_rtcp_impl_);
+ RTCPSender::FeedbackState feedback_state = rtp_rtcp_impl_->GetFeedbackState();
EXPECT_EQ(0, rtcp_sender_->SetSendingStatus(feedback_state, false));
rtcp_sender_->SendRtcpXrReceiverReferenceTime(false);
EXPECT_EQ(0, rtcp_sender_->SendRTCP(feedback_state, kRtcpReport));
@@ -435,7 +435,7 @@ TEST_F(RtcpSenderTest, TestNoXrReceiverReferenceTimeIfNotEnabled) {
TEST_F(RtcpSenderTest, TestSendTimeOfXrRrReport) {
EXPECT_EQ(0, rtcp_sender_->SetRTCPStatus(kRtcpCompound));
- RTCPSender::FeedbackState feedback_state(rtp_rtcp_impl_);
+ RTCPSender::FeedbackState feedback_state = rtp_rtcp_impl_->GetFeedbackState();
EXPECT_EQ(0, rtcp_sender_->SetSendingStatus(feedback_state, false));
rtcp_sender_->SendRtcpXrReceiverReferenceTime(true);
uint32_t ntp_sec;
@@ -475,7 +475,7 @@ TEST_F(RtcpSenderTest, SendsTmmbnIfSetAndEmpty) {
TMMBRSet bounding_set;
EXPECT_EQ(0, rtcp_sender_->SetTMMBN(&bounding_set, 3));
ASSERT_EQ(0U, test_transport_->rtcp_packet_info_.rtcpPacketTypeFlags);
- RTCPSender::FeedbackState feedback_state(rtp_rtcp_impl_);
+ RTCPSender::FeedbackState feedback_state = rtp_rtcp_impl_->GetFeedbackState();
EXPECT_EQ(0, rtcp_sender_->SendRTCP(feedback_state,kRtcpSr));
// We now expect the packet to show up in the rtcp_packet_info_ of
// test_transport_.
@@ -498,7 +498,7 @@ TEST_F(RtcpSenderTest, SendsTmmbnIfSetAndValid) {
EXPECT_EQ(0, rtcp_sender_->SetTMMBN(&bounding_set, 3));
ASSERT_EQ(0U, test_transport_->rtcp_packet_info_.rtcpPacketTypeFlags);
- RTCPSender::FeedbackState feedback_state(rtp_rtcp_impl_);
+ RTCPSender::FeedbackState feedback_state = rtp_rtcp_impl_->GetFeedbackState();
EXPECT_EQ(0, rtcp_sender_->SendRTCP(feedback_state, kRtcpSr));
// We now expect the packet to show up in the rtcp_packet_info_ of
// test_transport_.
diff --git a/modules/rtp_rtcp/source/rtp_header_extension.cc b/modules/rtp_rtcp/source/rtp_header_extension.cc
index 2e72d750..9a1836e1 100644
--- a/modules/rtp_rtcp/source/rtp_header_extension.cc
+++ b/modules/rtp_rtcp/source/rtp_header_extension.cc
@@ -65,6 +65,16 @@ int32_t RtpHeaderExtensionMap::Deregister(const RTPExtensionType type) {
return 0;
}
+bool RtpHeaderExtensionMap::IsRegistered(RTPExtensionType type) const {
+ std::map<uint8_t, HeaderExtension*>::const_iterator it =
+ extensionMap_.begin();
+ for (; it != extensionMap_.end(); ++it) {
+ if (it->second->type == type)
+ return true;
+ }
+ return false;
+}
+
int32_t RtpHeaderExtensionMap::GetType(const uint8_t id,
RTPExtensionType* type) const {
assert(type);
diff --git a/modules/rtp_rtcp/source/rtp_rtcp_impl.cc b/modules/rtp_rtcp/source/rtp_rtcp_impl.cc
index 349340f5..1a3b79cb 100644
--- a/modules/rtp_rtcp/source/rtp_rtcp_impl.cc
+++ b/modules/rtp_rtcp/source/rtp_rtcp_impl.cc
@@ -230,8 +230,7 @@ int32_t ModuleRtpRtcpImpl::Process() {
}
if (rtcp_sender_.TimeToSendRTCPReport()) {
- RTCPSender::FeedbackState feedback_state(this);
- rtcp_sender_.SendRTCP(feedback_state, kRtcpReport);
+ rtcp_sender_.SendRTCP(GetFeedbackState(), kRtcpReport);
}
}
@@ -418,12 +417,29 @@ int32_t ModuleRtpRtcpImpl::SetCSRCs(
return 0; // TODO(pwestin): change to void.
}
-uint32_t ModuleRtpRtcpImpl::PacketCountSent() const {
- return rtp_sender_.Packets();
-}
+// TODO(pbos): Handle media and RTX streams separately (separate RTCP
+// feedbacks).
+RTCPSender::FeedbackState ModuleRtpRtcpImpl::GetFeedbackState() {
+ StreamDataCounters rtp_stats;
+ StreamDataCounters rtx_stats;
+ rtp_sender_.GetDataCounters(&rtp_stats, &rtx_stats);
+
+ RTCPSender::FeedbackState state;
+ state.send_payload_type = SendPayloadType();
+ state.frequency_hz = CurrentSendFrequencyHz();
+ state.packets_sent = rtp_stats.packets + rtx_stats.packets;
+ state.media_bytes_sent = rtp_stats.bytes + rtx_stats.bytes;
+ state.module = this;
+
+ LastReceivedNTP(&state.last_rr_ntp_secs,
+ &state.last_rr_ntp_frac,
+ &state.remote_sr);
-uint32_t ModuleRtpRtcpImpl::ByteCountSent() const {
- return rtp_sender_.Bytes();
+ state.has_last_xr_rr = LastReceivedXrReferenceTimeInfo(&state.last_xr_rr);
+
+ uint32_t tmp;
+ BitrateSent(&state.send_bitrate, &tmp, &tmp, &tmp);
+ return state;
}
int ModuleRtpRtcpImpl::CurrentSendFrequencyHz() const {
@@ -433,8 +449,7 @@ int ModuleRtpRtcpImpl::CurrentSendFrequencyHz() const {
int32_t ModuleRtpRtcpImpl::SetSendingStatus(const bool sending) {
if (rtcp_sender_.Sending() != sending) {
// Sends RTCP BYE when going from true to false
- RTCPSender::FeedbackState feedback_state(this);
- if (rtcp_sender_.SetSendingStatus(feedback_state, sending) != 0) {
+ if (rtcp_sender_.SetSendingStatus(GetFeedbackState(), sending) != 0) {
LOG(LS_WARNING) << "Failed to send RTCP BYE";
}
@@ -499,8 +514,7 @@ int32_t ModuleRtpRtcpImpl::SendOutgoingData(
if (!IsDefaultModule()) {
// Don't send RTCP from default module.
if (rtcp_sender_.TimeToSendRTCPReport(kVideoFrameKey == frame_type)) {
- RTCPSender::FeedbackState feedback_state(this);
- rtcp_sender_.SendRTCP(feedback_state, kRtcpReport);
+ rtcp_sender_.SendRTCP(GetFeedbackState(), kRtcpReport);
}
return rtp_sender_.SendOutgoingData(frame_type,
payload_type,
@@ -599,14 +613,10 @@ int ModuleRtpRtcpImpl::TimeToSendPadding(int bytes) {
}
} else {
CriticalSectionScoped lock(critical_section_module_ptrs_.get());
- // Decide what media stream to pad on based on a round-robin scheme.
for (size_t i = 0; i < child_modules_.size(); ++i) {
- padding_index_ = (padding_index_ + 1) % child_modules_.size();
// Send padding on one of the modules sending media.
- if (child_modules_[padding_index_]->SendingMedia() &&
- child_modules_[padding_index_]->rtp_sender_.GetTargetBitrate() > 0) {
- return child_modules_[padding_index_]->rtp_sender_.TimeToSendPadding(
- bytes);
+ if (child_modules_[i]->SendingMedia()) {
+ return child_modules_[i]->rtp_sender_.TimeToSendPadding(bytes);
}
}
}
@@ -751,7 +761,9 @@ int32_t ModuleRtpRtcpImpl::RemoteNTP(
received_ntpfrac,
rtcp_arrival_time_secs,
rtcp_arrival_time_frac,
- rtcp_timestamp);
+ rtcp_timestamp)
+ ? 0
+ : -1;
}
// Get RoundTripTime.
@@ -782,8 +794,7 @@ int32_t ModuleRtpRtcpImpl::ResetSendDataCountersRTP() {
// Force a send of an RTCP packet.
// Normal SR and RR are triggered via the process function.
int32_t ModuleRtpRtcpImpl::SendRTCP(uint32_t rtcp_packet_type) {
- RTCPSender::FeedbackState feedback_state(this);
- return rtcp_sender_.SendRTCP(feedback_state, rtcp_packet_type);
+ return rtcp_sender_.SendRTCP(GetFeedbackState(), rtcp_packet_type);
}
int32_t ModuleRtpRtcpImpl::SetRTCPApplicationSpecificData(
@@ -811,11 +822,17 @@ bool ModuleRtpRtcpImpl::RtcpXrRrtrStatus() const {
int32_t ModuleRtpRtcpImpl::DataCountersRTP(
uint32_t* bytes_sent,
uint32_t* packets_sent) const {
+ StreamDataCounters rtp_stats;
+ StreamDataCounters rtx_stats;
+ rtp_sender_.GetDataCounters(&rtp_stats, &rtx_stats);
+
if (bytes_sent) {
- *bytes_sent = rtp_sender_.Bytes();
+ *bytes_sent = rtp_stats.bytes + rtp_stats.padding_bytes +
+ rtp_stats.header_bytes + rtx_stats.bytes +
+ rtx_stats.padding_bytes + rtx_stats.header_bytes;
}
if (packets_sent) {
- *packets_sent = rtp_sender_.Packets();
+ *packets_sent = rtp_stats.packets + rtx_stats.packets;
}
return 0;
}
@@ -955,9 +972,8 @@ int32_t ModuleRtpRtcpImpl::SendNACK(const uint16_t* nack_list,
}
nack_last_seq_number_sent_ = nack_list[start_id + nackLength - 1];
- RTCPSender::FeedbackState feedback_state(this);
return rtcp_sender_.SendRTCP(
- feedback_state, kRtcpNack, nackLength, &nack_list[start_id]);
+ GetFeedbackState(), kRtcpNack, nackLength, &nack_list[start_id]);
}
// Store the sent packets, needed to answer to a Negative acknowledgment
@@ -1074,9 +1090,8 @@ int32_t ModuleRtpRtcpImpl::RequestKeyFrame() {
int32_t ModuleRtpRtcpImpl::SendRTCPSliceLossIndication(
const uint8_t picture_id) {
- RTCPSender::FeedbackState feedback_state(this);
return rtcp_sender_.SendRTCP(
- feedback_state, kRtcpSli, 0, 0, false, picture_id);
+ GetFeedbackState(), kRtcpSli, 0, 0, false, picture_id);
}
int32_t ModuleRtpRtcpImpl::SetCameraDelay(const int32_t delay_ms) {
@@ -1245,9 +1260,8 @@ void ModuleRtpRtcpImpl::OnRequestSendReport() {
int32_t ModuleRtpRtcpImpl::SendRTCPReferencePictureSelection(
const uint64_t picture_id) {
- RTCPSender::FeedbackState feedback_state(this);
return rtcp_sender_.SendRTCP(
- feedback_state, kRtcpRpsi, 0, 0, false, picture_id);
+ GetFeedbackState(), kRtcpRpsi, 0, 0, false, picture_id);
}
uint32_t ModuleRtpRtcpImpl::SendTimeOfSendReport(
@@ -1274,23 +1288,24 @@ void ModuleRtpRtcpImpl::OnReceivedNACK(
rtp_sender_.OnReceivedNACK(nack_sequence_numbers, rtt);
}
-int32_t ModuleRtpRtcpImpl::LastReceivedNTP(
- uint32_t& rtcp_arrival_time_secs, // When we got the last report.
- uint32_t& rtcp_arrival_time_frac,
- uint32_t& remote_sr) {
+bool ModuleRtpRtcpImpl::LastReceivedNTP(
+ uint32_t* rtcp_arrival_time_secs, // When we got the last report.
+ uint32_t* rtcp_arrival_time_frac,
+ uint32_t* remote_sr) const {
// Remote SR: NTP inside the last received (mid 16 bits from sec and frac).
uint32_t ntp_secs = 0;
uint32_t ntp_frac = 0;
- if (-1 == rtcp_receiver_.NTP(&ntp_secs,
- &ntp_frac,
- &rtcp_arrival_time_secs,
- &rtcp_arrival_time_frac,
- NULL)) {
- return -1;
+ if (!rtcp_receiver_.NTP(&ntp_secs,
+ &ntp_frac,
+ rtcp_arrival_time_secs,
+ rtcp_arrival_time_frac,
+ NULL)) {
+ return false;
}
- remote_sr = ((ntp_secs & 0x0000ffff) << 16) + ((ntp_frac & 0xffff0000) >> 16);
- return 0;
+ *remote_sr =
+ ((ntp_secs & 0x0000ffff) << 16) + ((ntp_frac & 0xffff0000) >> 16);
+ return true;
}
bool ModuleRtpRtcpImpl::LastReceivedXrReferenceTimeInfo(
diff --git a/modules/rtp_rtcp/source/rtp_rtcp_impl.h b/modules/rtp_rtcp/source/rtp_rtcp_impl.h
index 7e7ea027..4a23dd40 100644
--- a/modules/rtp_rtcp/source/rtp_rtcp_impl.h
+++ b/modules/rtp_rtcp/source/rtp_rtcp_impl.h
@@ -89,12 +89,10 @@ class ModuleRtpRtcpImpl : public RtpRtcp {
virtual int32_t SetCSRCStatus(const bool include) OVERRIDE;
- virtual uint32_t PacketCountSent() const;
+ virtual RTCPSender::FeedbackState GetFeedbackState();
virtual int CurrentSendFrequencyHz() const;
- virtual uint32_t ByteCountSent() const;
-
virtual void SetRTXSendStatus(const int mode) OVERRIDE;
virtual void RTXSendStatus(int* mode, uint32_t* ssrc,
@@ -328,9 +326,9 @@ class ModuleRtpRtcpImpl : public RtpRtcp {
const FecProtectionParams* delta_params,
const FecProtectionParams* key_params) OVERRIDE;
- virtual int32_t LastReceivedNTP(uint32_t& NTPsecs,
- uint32_t& NTPfrac,
- uint32_t& remote_sr);
+ virtual bool LastReceivedNTP(uint32_t* NTPsecs,
+ uint32_t* NTPfrac,
+ uint32_t* remote_sr) const;
virtual bool LastReceivedXrReferenceTimeInfo(RtcpReceiveTimeInfo* info) const;
diff --git a/modules/rtp_rtcp/source/rtp_rtcp_impl_unittest.cc b/modules/rtp_rtcp/source/rtp_rtcp_impl_unittest.cc
index 930778c3..3101a1fe 100644
--- a/modules/rtp_rtcp/source/rtp_rtcp_impl_unittest.cc
+++ b/modules/rtp_rtcp/source/rtp_rtcp_impl_unittest.cc
@@ -388,7 +388,7 @@ class RtpSendingTest : public ::testing::Test {
VideoCodec codec_;
};
-TEST_F(RtpSendingTest, RoundRobinPadding) {
+TEST_F(RtpSendingTest, DISABLED_RoundRobinPadding) {
// We have to send on an SSRC to be allowed to pad, since a marker bit must
// be sent prior to padding packets.
const uint8_t payload[200] = {0};
@@ -410,9 +410,13 @@ TEST_F(RtpSendingTest, RoundRobinPadding) {
ExpectPadding(expected_padding);
}
-TEST_F(RtpSendingTest, RoundRobinPaddingRtx) {
+TEST_F(RtpSendingTest, DISABLED_RoundRobinPaddingRtx) {
// Enable RTX to allow padding to be sent prior to media.
for (int i = 1; i < codec_.numberOfSimulcastStreams + 1; ++i) {
+ // Abs-send-time is needed to be allowed to send padding prior to media,
+ // as otherwise the timestmap used for BWE will be broken.
+ senders_[i]->RegisterSendRtpHeaderExtension(kRtpExtensionAbsoluteSendTime,
+ 1);
senders_[i]->SetRtxSendPayloadType(96);
senders_[i]->SetRtxSsrc(kSenderRtxSsrc + i);
senders_[i]->SetRTXSendStatus(kRtxRetransmitted);
@@ -436,7 +440,7 @@ TEST_F(RtpSendingTest, RoundRobinPaddingRtx) {
ExpectPadding(expected_padding);
}
-TEST_F(RtpSendingTest, RoundRobinPaddingRtxRedundantPayloads) {
+TEST_F(RtpSendingTest, DISABLED_RoundRobinPaddingRtxRedundantPayloads) {
for (int i = 1; i < codec_.numberOfSimulcastStreams + 1; ++i) {
senders_[i]->SetRtxSendPayloadType(96);
senders_[i]->SetRtxSsrc(kSenderRtxSsrc + i);
diff --git a/modules/rtp_rtcp/source/rtp_sender.cc b/modules/rtp_rtcp/source/rtp_sender.cc
index c24b15a3..eb88be7c 100644
--- a/modules/rtp_rtcp/source/rtp_sender.cc
+++ b/modules/rtp_rtcp/source/rtp_sender.cc
@@ -87,6 +87,7 @@ RTPSender::RTPSender(const int32_t id,
timestamp_(0),
capture_time_ms_(0),
last_timestamp_time_ms_(0),
+ media_has_been_sent_(false),
last_packet_marker_bit_(false),
num_csrcs_(0),
csrcs_(),
@@ -430,14 +431,9 @@ int32_t RTPSender::SendOutgoingData(
"Send", "type", FrameTypeToString(frame_type));
assert(frame_type != kAudioFrameSpeech && frame_type != kAudioFrameCN);
- if (frame_type == kFrameEmpty) {
- if (paced_sender_->Enabled()) {
- // Padding is driven by the pacer and not by the encoder.
- return 0;
- }
- return SendPaddingAccordingToBitrate(payload_type, capture_timestamp,
- capture_time_ms) ? 0 : -1;
- }
+ if (frame_type == kFrameEmpty)
+ return 0;
+
ret_val = video_->SendVideo(video_type, frame_type, payload_type,
capture_timestamp, capture_time_ms,
payload_data, payload_size,
@@ -475,45 +471,6 @@ int RTPSender::SendRedundantPayloads(int payload_type, int bytes_to_send) {
return bytes_to_send - bytes_left;
}
-bool RTPSender::SendPaddingAccordingToBitrate(
- int8_t payload_type, uint32_t capture_timestamp,
- int64_t capture_time_ms) {
- // Current bitrate since last estimate(1 second) averaged with the
- // estimate since then, to get the most up to date bitrate.
- uint32_t current_bitrate = bitrate_sent_.BitrateNow();
- uint32_t target_bitrate = GetTargetBitrate();
- int bitrate_diff = target_bitrate - current_bitrate;
- if (bitrate_diff <= 0) {
- return true;
- }
- int bytes = 0;
- if (current_bitrate == 0) {
- // Start up phase. Send one 33.3 ms batch to start with.
- bytes = (bitrate_diff / 8) / 30;
- } else {
- bytes = (bitrate_diff / 8);
- // Cap at 200 ms of target send data.
- int bytes_cap = target_bitrate / 1000 * 25; // 1000 / 8 / 5.
- if (bytes > bytes_cap) {
- bytes = bytes_cap;
- }
- }
- uint32_t timestamp;
- {
- CriticalSectionScoped cs(send_critsect_);
- // Add the random RTP timestamp offset and store the capture time for
- // later calculation of the send time offset.
- timestamp = start_timestamp_ + capture_timestamp;
- timestamp_ = timestamp;
- capture_time_ms_ = capture_time_ms;
- last_timestamp_time_ms_ = clock_->TimeInMilliseconds();
- }
- int bytes_sent = SendPadData(payload_type, timestamp, capture_time_ms,
- bytes, false, false);
- // We did not manage to send all bytes. Comparing with 31 due to modulus 32.
- return bytes - bytes_sent < 31;
-}
-
int RTPSender::BuildPaddingPacket(uint8_t* packet, int header_length,
int32_t bytes) {
int padding_bytes_in_packet = kMaxPaddingLength;
@@ -536,9 +493,7 @@ int RTPSender::BuildPaddingPacket(uint8_t* packet, int header_length,
int RTPSender::SendPadData(int payload_type,
uint32_t timestamp,
int64_t capture_time_ms,
- int32_t bytes,
- bool force_full_size_packets,
- bool over_rtx) {
+ int32_t bytes) {
// Drop this packet if we're not sending media packets.
if (!SendingMedia()) {
return bytes;
@@ -547,36 +502,34 @@ int RTPSender::SendPadData(int payload_type,
int bytes_sent = 0;
for (; bytes > 0; bytes -= padding_bytes_in_packet) {
// Always send full padding packets.
- if (force_full_size_packets && bytes < kMaxPaddingLength)
+ if (bytes < kMaxPaddingLength)
bytes = kMaxPaddingLength;
- if (bytes < kMaxPaddingLength) {
- if (force_full_size_packets) {
- bytes = kMaxPaddingLength;
- } else {
- // Round to the nearest multiple of 32.
- bytes = (bytes + 16) & 0xffe0;
- }
- }
- if (bytes < 32) {
- // Sanity don't send empty packets.
- break;
- }
+
uint32_t ssrc;
uint16_t sequence_number;
+ bool over_rtx;
{
CriticalSectionScoped cs(send_critsect_);
// Only send padding packets following the last packet of a frame,
// indicated by the marker bit.
- if (!over_rtx && !last_packet_marker_bit_)
- return bytes_sent;
if (rtx_ == kRtxOff) {
+ // Without RTX we can't send padding in the middle of frames.
+ if (!last_packet_marker_bit_)
+ return bytes_sent;
ssrc = ssrc_;
sequence_number = sequence_number_;
++sequence_number_;
+ over_rtx = false;
} else {
+ // Without abs-send-time a media packet must be sent before padding so
+ // that the timestamps used for estimation are correct.
+ if (!media_has_been_sent_ && !rtp_header_extension_map_.IsRegistered(
+ kRtpExtensionAbsoluteSendTime))
+ return bytes_sent;
ssrc = ssrc_rtx_;
sequence_number = sequence_number_rtx_;
++sequence_number_rtx_;
+ over_rtx = true;
}
}
@@ -656,10 +609,13 @@ int32_t RTPSender::ReSendPacket(uint16_t packet_id, uint32_t min_resend_time) {
return length;
}
}
-
- CriticalSectionScoped lock(send_critsect_);
+ int rtx = kRtxOff;
+ {
+ CriticalSectionScoped lock(send_critsect_);
+ rtx = rtx_;
+ }
return PrepareAndSendPacket(data_buffer, length, capture_time_ms,
- (rtx_ & kRtxRetransmitted) > 0, true) ?
+ (rtx & kRtxRetransmitted) > 0, true) ?
length : -1;
}
@@ -852,6 +808,10 @@ bool RTPSender::PrepareAndSendPacket(uint8_t* buffer,
diff_ms);
UpdateAbsoluteSendTime(buffer_to_send_ptr, length, rtp_header, now_ms);
bool ret = SendPacketToNetwork(buffer_to_send_ptr, length);
+ if (ret) {
+ CriticalSectionScoped lock(send_critsect_);
+ media_has_been_sent_ = true;
+ }
UpdateRtpStats(buffer_to_send_ptr, length, rtp_header, send_over_rtx,
is_retransmit);
return ret;
@@ -907,6 +867,7 @@ bool RTPSender::IsFecPacket(const uint8_t* buffer,
}
int RTPSender::TimeToSendPadding(int bytes) {
+ assert(bytes > 0);
int payload_type;
int64_t capture_time_ms;
uint32_t timestamp;
@@ -933,12 +894,8 @@ int RTPSender::TimeToSendPadding(int bytes) {
bytes_sent = SendRedundantPayloads(payload_type, bytes);
bytes -= bytes_sent;
if (bytes > 0) {
- int padding_sent = SendPadData(payload_type,
- timestamp,
- capture_time_ms,
- bytes,
- true,
- rtx != kRtxOff);
+ int padding_sent =
+ SendPadData(payload_type, timestamp, capture_time_ms, bytes);
bytes_sent += padding_sent;
}
return bytes_sent;
@@ -992,6 +949,11 @@ int32_t RTPSender::SendToNetwork(
uint32_t length = payload_length + rtp_header_length;
if (!SendPacketToNetwork(buffer, length))
return -1;
+ assert(payload_length - rtp_header.paddingLength > 0);
+ {
+ CriticalSectionScoped lock(send_critsect_);
+ media_has_been_sent_ = true;
+ }
UpdateRtpStats(buffer, length, rtp_header, false, false);
return 0;
}
@@ -1061,17 +1023,11 @@ void RTPSender::ResetDataCounters() {
}
}
-uint32_t RTPSender::Packets() const {
- CriticalSectionScoped lock(statistics_crit_.get());
- return rtp_stats_.packets + rtx_rtp_stats_.packets;
-}
-
-// Number of sent RTP bytes.
-uint32_t RTPSender::Bytes() const {
+void RTPSender::GetDataCounters(StreamDataCounters* rtp_stats,
+ StreamDataCounters* rtx_stats) const {
CriticalSectionScoped lock(statistics_crit_.get());
- return rtp_stats_.bytes + rtp_stats_.header_bytes + rtp_stats_.padding_bytes +
- rtx_rtp_stats_.bytes + rtx_rtp_stats_.header_bytes +
- rtx_rtp_stats_.padding_bytes;
+ *rtp_stats = rtp_stats_;
+ *rtx_stats = rtx_rtp_stats_;
}
int RTPSender::CreateRTPHeader(
@@ -1732,6 +1688,7 @@ void RTPSender::SetRtpState(const RtpState& rtp_state) {
timestamp_ = rtp_state.timestamp;
capture_time_ms_ = rtp_state.capture_time_ms;
last_timestamp_time_ms_ = rtp_state.last_timestamp_time_ms;
+ media_has_been_sent_ = rtp_state.media_has_been_sent;
}
RtpState RTPSender::GetRtpState() const {
@@ -1743,6 +1700,7 @@ RtpState RTPSender::GetRtpState() const {
state.timestamp = timestamp_;
state.capture_time_ms = capture_time_ms_;
state.last_timestamp_time_ms = last_timestamp_time_ms_;
+ state.media_has_been_sent = media_has_been_sent_;
return state;
}
diff --git a/modules/rtp_rtcp/source/rtp_sender.h b/modules/rtp_rtcp/source/rtp_sender.h
index 4a9e10ed..f3a2bdc1 100644
--- a/modules/rtp_rtcp/source/rtp_sender.h
+++ b/modules/rtp_rtcp/source/rtp_sender.h
@@ -109,11 +109,8 @@ class RTPSender : public RTPSenderInterface, public Bitrate::Observer {
void SetSendingMediaStatus(const bool enabled);
bool SendingMedia() const;
- // Number of sent RTP packets.
- uint32_t Packets() const;
-
- // Number of sent RTP bytes.
- uint32_t Bytes() const;
+ void GetDataCounters(StreamDataCounters* rtp_stats,
+ StreamDataCounters* rtx_stats) const;
void ResetDataCounters();
@@ -270,9 +267,7 @@ class RTPSender : public RTPSenderInterface, public Bitrate::Observer {
int SendPadData(int payload_type,
uint32_t timestamp,
int64_t capture_time_ms,
- int32_t bytes,
- bool force_full_size_packets,
- bool only_pad_after_markerbit);
+ int32_t bytes);
// Called on update of RTP statistics.
void RegisterRtpStatisticsCallback(StreamDataCountersCallback* callback);
@@ -312,9 +307,6 @@ class RTPSender : public RTPSenderInterface, public Bitrate::Observer {
int SendRedundantPayloads(int payload_type, int bytes);
- bool SendPaddingAccordingToBitrate(int8_t payload_type,
- uint32_t capture_timestamp,
- int64_t capture_time_ms);
int BuildPaddingPacket(uint8_t* packet, int header_length, int32_t bytes);
void BuildRtxPacket(uint8_t* buffer, uint16_t* length,
@@ -395,6 +387,7 @@ class RTPSender : public RTPSenderInterface, public Bitrate::Observer {
uint32_t timestamp_ GUARDED_BY(send_critsect_);
int64_t capture_time_ms_ GUARDED_BY(send_critsect_);
int64_t last_timestamp_time_ms_ GUARDED_BY(send_critsect_);
+ bool media_has_been_sent_ GUARDED_BY(send_critsect_);
bool last_packet_marker_bit_ GUARDED_BY(send_critsect_);
uint8_t num_csrcs_ GUARDED_BY(send_critsect_);
uint32_t csrcs_[kRtpCsrcSize] GUARDED_BY(send_critsect_);
diff --git a/modules/rtp_rtcp/source/rtp_sender_unittest.cc b/modules/rtp_rtcp/source/rtp_sender_unittest.cc
index 40b10548..e9b01def 100644
--- a/modules/rtp_rtcp/source/rtp_sender_unittest.cc
+++ b/modules/rtp_rtcp/source/rtp_sender_unittest.cc
@@ -39,6 +39,7 @@ const uint8_t kAudioLevel = 0x5a;
const uint8_t kAudioLevelExtensionId = 9;
const int kAudioPayload = 103;
const uint64_t kStartTime = 123456789;
+const size_t kMaxPaddingSize = 224u;
} // namespace
using testing::_;
@@ -700,7 +701,7 @@ TEST_F(RtpSenderTest, SendRedundantPayloads) {
kAbsoluteSendTimeExtensionId);
rtp_sender_->SetTargetBitrate(300000);
const size_t kNumPayloadSizes = 10;
- const int kPayloadSizes[kNumPayloadSizes] = {500, 550, 600, 650, 700, 750,
+ const size_t kPayloadSizes[kNumPayloadSizes] = {500, 550, 600, 650, 700, 750,
800, 850, 900, 950};
// Send 10 packets of increasing size.
for (size_t i = 0; i < kNumPayloadSizes; ++i) {
@@ -711,25 +712,27 @@ TEST_F(RtpSenderTest, SendRedundantPayloads) {
rtp_sender_->TimeToSendPacket(seq_num++, capture_time_ms, false);
fake_clock_.AdvanceTimeMilliseconds(33);
}
- const int kPaddingPayloadSize = 224;
// The amount of padding to send it too small to send a payload packet.
- EXPECT_CALL(transport, SendPacket(_, _, kPaddingPayloadSize + rtp_header_len))
+ EXPECT_CALL(transport,
+ SendPacket(_, _, kMaxPaddingSize + rtp_header_len))
.WillOnce(testing::ReturnArg<2>());
- EXPECT_EQ(kPaddingPayloadSize, rtp_sender_->TimeToSendPadding(49));
+ EXPECT_EQ(kMaxPaddingSize,
+ static_cast<size_t>(rtp_sender_->TimeToSendPadding(49)));
const int kRtxHeaderSize = 2;
EXPECT_CALL(transport, SendPacket(_, _, kPayloadSizes[0] +
rtp_header_len + kRtxHeaderSize))
.WillOnce(testing::ReturnArg<2>());
- EXPECT_EQ(kPayloadSizes[0], rtp_sender_->TimeToSendPadding(500));
+ EXPECT_EQ(kPayloadSizes[0],
+ static_cast<size_t>(rtp_sender_->TimeToSendPadding(500)));
EXPECT_CALL(transport, SendPacket(_, _, kPayloadSizes[kNumPayloadSizes - 1] +
rtp_header_len + kRtxHeaderSize))
.WillOnce(testing::ReturnArg<2>());
- EXPECT_CALL(transport, SendPacket(_, _, kPaddingPayloadSize + rtp_header_len))
+ EXPECT_CALL(transport, SendPacket(_, _, kMaxPaddingSize + rtp_header_len))
.WillOnce(testing::ReturnArg<2>());
- EXPECT_EQ(kPayloadSizes[kNumPayloadSizes - 1] + kPaddingPayloadSize,
- rtp_sender_->TimeToSendPadding(999));
+ EXPECT_EQ(kPayloadSizes[kNumPayloadSizes - 1] + kMaxPaddingSize,
+ static_cast<size_t>(rtp_sender_->TimeToSendPadding(999)));
}
TEST_F(RtpSenderTest, SendGenericVideo) {
@@ -959,7 +962,6 @@ TEST_F(RtpSenderTest, StreamDataCountersCallbacks) {
const uint8_t kRedPayloadType = 96;
const uint8_t kUlpfecPayloadType = 97;
- const uint32_t kMaxPaddingSize = 224;
char payload_name[RTP_PAYLOAD_NAME_SIZE] = "GENERIC";
const uint8_t payload_type = 127;
ASSERT_EQ(0, rtp_sender_->RegisterPayload(payload_name, payload_type, 90000,
@@ -988,7 +990,7 @@ TEST_F(RtpSenderTest, StreamDataCountersCallbacks) {
// Send padding.
rtp_sender_->TimeToSendPadding(kMaxPaddingSize);
// {bytes = 6, header = 24, padding = 224, packets = 3, retrans = 1, fec = 0}
- EXPECT_TRUE(callback.Matches(ssrc, 6, 24, 224, 3, 1, 0));
+ EXPECT_TRUE(callback.Matches(ssrc, 6, 24, kMaxPaddingSize, 3, 1, 0));
// Send FEC.
rtp_sender_->SetGenericFECStatus(true, kRedPayloadType, kUlpfecPayloadType);
@@ -1003,7 +1005,7 @@ TEST_F(RtpSenderTest, StreamDataCountersCallbacks) {
sizeof(payload), NULL));
// {bytes = 34, header = 48, padding = 224, packets = 5, retrans = 1, fec = 1}
- EXPECT_TRUE(callback.Matches(ssrc, 34, 48, 224, 5, 1, 1));
+ EXPECT_TRUE(callback.Matches(ssrc, 34, 48, kMaxPaddingSize, 5, 1, 1));
rtp_sender_->RegisterRtpStatisticsCallback(NULL);
}
@@ -1093,13 +1095,25 @@ TEST_F(RtpSenderTest, BytesReportedCorrectly) {
sizeof(payload),
0));
- EXPECT_GT(transport_.total_bytes_sent_, 0u);
- EXPECT_EQ(transport_.total_bytes_sent_, rtp_sender_->Bytes());
- size_t last_bytes_sent = transport_.total_bytes_sent_;
-
- rtp_sender_->TimeToSendPadding(42);
-
- EXPECT_GT(transport_.total_bytes_sent_, last_bytes_sent);
- EXPECT_EQ(transport_.total_bytes_sent_, rtp_sender_->Bytes());
+ // Will send 2 full-size padding packets.
+ rtp_sender_->TimeToSendPadding(1);
+ rtp_sender_->TimeToSendPadding(1);
+
+ StreamDataCounters rtp_stats;
+ StreamDataCounters rtx_stats;
+ rtp_sender_->GetDataCounters(&rtp_stats, &rtx_stats);
+
+ // Payload + 1-byte generic header.
+ EXPECT_EQ(rtp_stats.bytes, sizeof(payload) + 1);
+ EXPECT_EQ(rtp_stats.header_bytes, 12u);
+ EXPECT_EQ(rtp_stats.padding_bytes, 0u);
+ EXPECT_EQ(rtx_stats.bytes, 0u);
+ EXPECT_EQ(rtx_stats.header_bytes, 24u);
+ EXPECT_EQ(rtx_stats.padding_bytes, 2 * kMaxPaddingSize);
+
+ EXPECT_EQ(transport_.total_bytes_sent_,
+ rtp_stats.bytes + rtp_stats.header_bytes + rtp_stats.padding_bytes +
+ rtx_stats.bytes + rtx_stats.header_bytes +
+ rtx_stats.padding_bytes);
}
} // namespace webrtc
diff --git a/modules/rtp_rtcp/source/ssrc_database.cc b/modules/rtp_rtcp/source/ssrc_database.cc
index df09b01b..4e230833 100644
--- a/modules/rtp_rtcp/source/ssrc_database.cc
+++ b/modules/rtp_rtcp/source/ssrc_database.cc
@@ -57,45 +57,12 @@ SSRCDatabase::CreateSSRC()
uint32_t ssrc = GenerateRandom();
-#ifndef WEBRTC_NO_STL
-
while(_ssrcMap.find(ssrc) != _ssrcMap.end())
{
ssrc = GenerateRandom();
}
_ssrcMap[ssrc] = 0;
-#else
- if(_sizeOfSSRC <= _numberOfSSRC)
- {
- // allocate more space
- const int newSize = _sizeOfSSRC + 10;
- uint32_t* tempSSRCVector = new uint32_t[newSize];
- memcpy(tempSSRCVector, _ssrcVector, _sizeOfSSRC*sizeof(uint32_t));
- delete [] _ssrcVector;
-
- _ssrcVector = tempSSRCVector;
- _sizeOfSSRC = newSize;
- }
-
- // check if in DB
- if(_ssrcVector)
- {
- for (int i=0; i<_numberOfSSRC; i++)
- {
- if (_ssrcVector[i] == ssrc)
- {
- // we have a match
- i = 0; // start over with a new ssrc
- ssrc = GenerateRandom();
- }
-
- }
- // add to database
- _ssrcVector[_numberOfSSRC] = ssrc;
- _numberOfSSRC++;
- }
-#endif
return ssrc;
}
@@ -103,39 +70,7 @@ int32_t
SSRCDatabase::RegisterSSRC(const uint32_t ssrc)
{
CriticalSectionScoped lock(_critSect);
-
-#ifndef WEBRTC_NO_STL
-
_ssrcMap[ssrc] = 0;
-
-#else
- if(_sizeOfSSRC <= _numberOfSSRC)
- {
- // allocate more space
- const int newSize = _sizeOfSSRC + 10;
- uint32_t* tempSSRCVector = new uint32_t[newSize];
- memcpy(tempSSRCVector, _ssrcVector, _sizeOfSSRC*sizeof(uint32_t));
- delete [] _ssrcVector;
-
- _ssrcVector = tempSSRCVector;
- _sizeOfSSRC = newSize;
- }
- // check if in DB
- if(_ssrcVector)
- {
- for (int i=0; i<_numberOfSSRC; i++)
- {
- if (_ssrcVector[i] == ssrc)
- {
- // we have a match
- return -1;
- }
- }
- // add to database
- _ssrcVector[_numberOfSSRC] = ssrc;
- _numberOfSSRC++;
- }
-#endif
return 0;
}
@@ -143,26 +78,7 @@ int32_t
SSRCDatabase::ReturnSSRC(const uint32_t ssrc)
{
CriticalSectionScoped lock(_critSect);
-
-#ifndef WEBRTC_NO_STL
_ssrcMap.erase(ssrc);
-
-#else
- if(_ssrcVector)
- {
- for (int i=0; i<_numberOfSSRC; i++)
- {
- if (_ssrcVector[i] == ssrc)
- {
- // we have a match
- // remove from database
- _ssrcVector[i] = _ssrcVector[_numberOfSSRC-1];
- _numberOfSSRC--;
- break;
- }
- }
- }
-#endif
return 0;
}
@@ -178,21 +94,12 @@ SSRCDatabase::SSRCDatabase()
srand(tv.tv_usec);
#endif
-#ifdef WEBRTC_NO_STL
- _sizeOfSSRC = 10;
- _numberOfSSRC = 0;
- _ssrcVector = new uint32_t[10];
-#endif
_critSect = CriticalSectionWrapper::CreateCriticalSection();
}
SSRCDatabase::~SSRCDatabase()
{
-#ifdef WEBRTC_NO_STL
- delete [] _ssrcVector;
-#else
_ssrcMap.clear();
-#endif
delete _critSect;
}
diff --git a/modules/rtp_rtcp/source/ssrc_database.h b/modules/rtp_rtcp/source/ssrc_database.h
index e1f90e79..2d4932af 100644
--- a/modules/rtp_rtcp/source/ssrc_database.h
+++ b/modules/rtp_rtcp/source/ssrc_database.h
@@ -11,9 +11,7 @@
#ifndef WEBRTC_MODULES_RTP_RTCP_SOURCE_SSRC_DATABASE_H_
#define WEBRTC_MODULES_RTP_RTCP_SOURCE_SSRC_DATABASE_H_
-#ifndef WEBRTC_NO_STL
#include <map>
-#endif
#include "webrtc/system_wrappers/interface/static_instance.h"
#include "webrtc/typedefs.h"
@@ -46,14 +44,7 @@ private:
uint32_t GenerateRandom();
-#ifdef WEBRTC_NO_STL
- int _numberOfSSRC;
- int _sizeOfSSRC;
-
- uint32_t* _ssrcVector;
-#else
std::map<uint32_t, uint32_t> _ssrcMap;
-#endif
CriticalSectionWrapper* _critSect;
};
diff --git a/modules/video_capture/ios/rtc_video_capture_ios_objc.mm b/modules/video_capture/ios/rtc_video_capture_ios_objc.mm
index 641ca241..ac90b72a 100644
--- a/modules/video_capture/ios/rtc_video_capture_ios_objc.mm
+++ b/modules/video_capture/ios/rtc_video_capture_ios_objc.mm
@@ -43,6 +43,12 @@ using namespace webrtc::videocapturemodule;
_owner = owner;
_captureId = captureId;
_captureSession = [[AVCaptureSession alloc] init];
+#if defined(__IPHONE_7_0) && __IPHONE_OS_VERSION_MAX_ALLOWED >= __IPHONE_7_0
+ NSString* version = [[UIDevice currentDevice] systemVersion];
+ if ([version integerValue] >= 7) {
+ _captureSession.usesApplicationAudioSession = NO;
+ }
+#endif
_captureChanging = NO;
_captureChangingCondition = [[NSCondition alloc] init];
@@ -217,6 +223,9 @@ using namespace webrtc::videocapturemodule;
return;
switch ([UIApplication sharedApplication].statusBarOrientation) {
case UIInterfaceOrientationPortrait:
+#if defined(__IPHONE_8_0) && __IPHONE_OS_VERSION_MAX_ALLOWED >= __IPHONE_8_0
+ case UIInterfaceOrientationUnknown:
+#endif
_connection.videoOrientation = AVCaptureVideoOrientationPortrait;
break;
case UIInterfaceOrientationPortraitUpsideDown:
diff --git a/modules/video_coding/BUILD.gn b/modules/video_coding/BUILD.gn
index 0dc6721b..cbd12936 100644
--- a/modules/video_coding/BUILD.gn
+++ b/modules/video_coding/BUILD.gn
@@ -75,8 +75,6 @@ source_set("video_coding") {
source_set("video_coding_utility") {
sources = [
- "utility/exp_filter.cc",
- "utility/include/exp_filter.h",
"utility/include/frame_dropper.h",
"utility/frame_dropper.cc",
]
diff --git a/modules/video_coding/main/source/media_opt_util.cc b/modules/video_coding/main/source/media_opt_util.cc
index ba86575f..b506a5b5 100644
--- a/modules/video_coding/main/source/media_opt_util.cc
+++ b/modules/video_coding/main/source/media_opt_util.cc
@@ -837,7 +837,7 @@ uint8_t VCMLossProtectionLogic::FilteredLoss(
case kNoFilter:
break;
case kAvgFilter:
- filtered_loss = static_cast<uint8_t> (_lossPr255.Value() + 0.5);
+ filtered_loss = static_cast<uint8_t>(_lossPr255.filtered() + 0.5);
break;
case kMaxFilter:
filtered_loss = MaxFilteredLossPr(nowMs);
@@ -907,8 +907,8 @@ VCMLossProtectionLogic::UpdateMethod()
_currentParameters.keyFrameSize = _keyFrameSize;
_currentParameters.fecRateDelta = _fecRateDelta;
_currentParameters.fecRateKey = _fecRateKey;
- _currentParameters.packetsPerFrame = _packetsPerFrame.Value();
- _currentParameters.packetsPerFrameKey = _packetsPerFrameKey.Value();
+ _currentParameters.packetsPerFrame = _packetsPerFrame.filtered();
+ _currentParameters.packetsPerFrameKey = _packetsPerFrameKey.filtered();
_currentParameters.residualPacketLossFec = _residualPacketLossFec;
_currentParameters.codecWidth = _codecWidth;
_currentParameters.codecHeight = _codecHeight;
diff --git a/modules/video_coding/main/source/media_opt_util.h b/modules/video_coding/main/source/media_opt_util.h
index f39a5780..d421d9e7 100644
--- a/modules/video_coding/main/source/media_opt_util.h
+++ b/modules/video_coding/main/source/media_opt_util.h
@@ -14,9 +14,9 @@
#include <math.h>
#include <stdlib.h>
+#include "webrtc/base/exp_filter.h"
#include "webrtc/modules/video_coding/main/source/internal_defines.h"
#include "webrtc/modules/video_coding/main/source/qm_select.h"
-#include "webrtc/modules/video_coding/utility/include/exp_filter.h"
#include "webrtc/system_wrappers/interface/trace.h"
#include "webrtc/typedefs.h"
@@ -367,27 +367,27 @@ private:
// Sets the available loss protection methods.
void UpdateMaxLossHistory(uint8_t lossPr255, int64_t now);
uint8_t MaxFilteredLossPr(int64_t nowMs) const;
- VCMProtectionMethod* _selectedMethod;
- VCMProtectionParameters _currentParameters;
- uint32_t _rtt;
- float _lossPr;
- float _bitRate;
- float _frameRate;
- float _keyFrameSize;
- uint8_t _fecRateKey;
- uint8_t _fecRateDelta;
- int64_t _lastPrUpdateT;
- int64_t _lastPacketPerFrameUpdateT;
- int64_t _lastPacketPerFrameUpdateTKey;
- VCMExpFilter _lossPr255;
- VCMLossProbabilitySample _lossPrHistory[kLossPrHistorySize];
- uint8_t _shortMaxLossPr255;
- VCMExpFilter _packetsPerFrame;
- VCMExpFilter _packetsPerFrameKey;
- float _residualPacketLossFec;
- uint16_t _codecWidth;
- uint16_t _codecHeight;
- int _numLayers;
+ VCMProtectionMethod* _selectedMethod;
+ VCMProtectionParameters _currentParameters;
+ uint32_t _rtt;
+ float _lossPr;
+ float _bitRate;
+ float _frameRate;
+ float _keyFrameSize;
+ uint8_t _fecRateKey;
+ uint8_t _fecRateDelta;
+ int64_t _lastPrUpdateT;
+ int64_t _lastPacketPerFrameUpdateT;
+ int64_t _lastPacketPerFrameUpdateTKey;
+ rtc::ExpFilter _lossPr255;
+ VCMLossProbabilitySample _lossPrHistory[kLossPrHistorySize];
+ uint8_t _shortMaxLossPr255;
+ rtc::ExpFilter _packetsPerFrame;
+ rtc::ExpFilter _packetsPerFrameKey;
+ float _residualPacketLossFec;
+ uint16_t _codecWidth;
+ uint16_t _codecHeight;
+ int _numLayers;
};
} // namespace media_optimization
diff --git a/modules/video_coding/utility/exp_filter.cc b/modules/video_coding/utility/exp_filter.cc
deleted file mode 100644
index 44f280bc..00000000
--- a/modules/video_coding/utility/exp_filter.cc
+++ /dev/null
@@ -1,60 +0,0 @@
-/*
- * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
- *
- * Use of this source code is governed by a BSD-style license
- * that can be found in the LICENSE file in the root of the source
- * tree. An additional intellectual property rights grant can be found
- * in the file PATENTS. All contributing project authors may
- * be found in the AUTHORS file in the root of the source tree.
- */
-
-#include "webrtc/modules/video_coding/utility/include/exp_filter.h"
-
-#include <math.h>
-
-namespace webrtc {
-
-void
-VCMExpFilter::Reset(float alpha)
-{
- _alpha = alpha;
- _filtered = -1.0;
-}
-
-float
-VCMExpFilter::Apply(float exp, float sample)
-{
- if (_filtered == -1.0)
- {
- // Initialize filtered bit rates
- _filtered = sample;
- }
- else if (exp == 1.0)
- {
- _filtered = _alpha * _filtered + (1 - _alpha) * sample;
- }
- else
- {
- float alpha = pow(_alpha, exp);
- _filtered = alpha * _filtered + (1 - alpha) * sample;
- }
- if (_max != -1 && _filtered > _max)
- {
- _filtered = _max;
- }
- return _filtered;
-}
-
-void
-VCMExpFilter::UpdateBase(float alpha)
-{
- _alpha = alpha;
-}
-
-float
-VCMExpFilter::Value() const
-{
- return _filtered;
-}
-
-}
diff --git a/modules/video_coding/utility/frame_dropper.cc b/modules/video_coding/utility/frame_dropper.cc
index d3c25fb9..54c8cb8a 100644
--- a/modules/video_coding/utility/frame_dropper.cc
+++ b/modules/video_coding/utility/frame_dropper.cc
@@ -86,25 +86,27 @@ FrameDropper::Fill(uint32_t frameSizeBytes, bool deltaFrame)
{
_keyFrameSizeAvgKbits.Apply(1, frameSizeKbits);
_keyFrameRatio.Apply(1.0, 1.0);
- if (frameSizeKbits > _keyFrameSizeAvgKbits.Value())
+ if (frameSizeKbits > _keyFrameSizeAvgKbits.filtered())
{
// Remove the average key frame size since we
// compensate for key frames when adding delta
// frames.
- frameSizeKbits -= _keyFrameSizeAvgKbits.Value();
+ frameSizeKbits -= _keyFrameSizeAvgKbits.filtered();
}
else
{
// Shouldn't be negative, so zero is the lower bound.
frameSizeKbits = 0;
}
- if (_keyFrameRatio.Value() > 1e-5 && 1 / _keyFrameRatio.Value() < _keyFrameSpreadFrames)
+ if (_keyFrameRatio.filtered() > 1e-5 &&
+ 1 / _keyFrameRatio.filtered() < _keyFrameSpreadFrames)
{
// We are sending key frames more often than our upper bound for
// how much we allow the key frame compensation to be spread
// out in time. Therefor we must use the key frame ratio rather
// than keyFrameSpreadFrames.
- _keyFrameCount = static_cast<int32_t>(1 / _keyFrameRatio.Value() + 0.5);
+ _keyFrameCount =
+ static_cast<int32_t>(1 / _keyFrameRatio.filtered() + 0.5);
}
else
{
@@ -145,13 +147,14 @@ FrameDropper::Leak(uint32_t inputFrameRate)
if (_keyFrameCount > 0)
{
// Perform the key frame compensation
- if (_keyFrameRatio.Value() > 0 && 1 / _keyFrameRatio.Value() < _keyFrameSpreadFrames)
+ if (_keyFrameRatio.filtered() > 0 &&
+ 1 / _keyFrameRatio.filtered() < _keyFrameSpreadFrames)
{
- T -= _keyFrameSizeAvgKbits.Value() * _keyFrameRatio.Value();
+ T -= _keyFrameSizeAvgKbits.filtered() * _keyFrameRatio.filtered();
}
else
{
- T -= _keyFrameSizeAvgKbits.Value() / _keyFrameSpreadFrames;
+ T -= _keyFrameSizeAvgKbits.filtered() / _keyFrameSpreadFrames;
}
_keyFrameCount--;
}
@@ -232,11 +235,11 @@ FrameDropper::DropFrame()
_dropCount = 0;
}
- if (_dropRatio.Value() >= 0.5f) // Drops per keep
+ if (_dropRatio.filtered() >= 0.5f) // Drops per keep
{
// limit is the number of frames we should drop between each kept frame
// to keep our drop ratio. limit is positive in this case.
- float denom = 1.0f - _dropRatio.Value();
+ float denom = 1.0f - _dropRatio.filtered();
if (denom < 1e-5)
{
denom = (float)1e-5;
@@ -252,7 +255,7 @@ FrameDropper::DropFrame()
if (_dropCount < 0)
{
// Reset the _dropCount since it was negative and should be positive.
- if (_dropRatio.Value() > 0.4f)
+ if (_dropRatio.filtered() > 0.4f)
{
_dropCount = -_dropCount;
}
@@ -274,12 +277,13 @@ FrameDropper::DropFrame()
return false;
}
}
- else if (_dropRatio.Value() > 0.0f && _dropRatio.Value() < 0.5f) // Keeps per drop
+ else if (_dropRatio.filtered() > 0.0f &&
+ _dropRatio.filtered() < 0.5f) // Keeps per drop
{
// limit is the number of frames we should keep between each drop
// in order to keep the drop ratio. limit is negative in this case,
// and the _dropCount is also negative.
- float denom = _dropRatio.Value();
+ float denom = _dropRatio.filtered();
if (denom < 1e-5)
{
denom = (float)1e-5;
@@ -289,7 +293,7 @@ FrameDropper::DropFrame()
{
// Reset the _dropCount since we have a positive
// _dropCount, and it should be negative.
- if (_dropRatio.Value() < 0.6f)
+ if (_dropRatio.filtered() < 0.6f)
{
_dropCount = -_dropCount;
}
@@ -350,7 +354,7 @@ FrameDropper::ActualFrameRate(uint32_t inputFrameRate) const
{
return static_cast<float>(inputFrameRate);
}
- return inputFrameRate * (1.0f - _dropRatio.Value());
+ return inputFrameRate * (1.0f - _dropRatio.filtered());
}
// Put a cap on the accumulator, i.e., don't let it grow beyond some level.
diff --git a/modules/video_coding/utility/include/exp_filter.h b/modules/video_coding/utility/include/exp_filter.h
deleted file mode 100644
index d8c37a30..00000000
--- a/modules/video_coding/utility/include/exp_filter.h
+++ /dev/null
@@ -1,58 +0,0 @@
-/*
- * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
- *
- * Use of this source code is governed by a BSD-style license
- * that can be found in the LICENSE file in the root of the source
- * tree. An additional intellectual property rights grant can be found
- * in the file PATENTS. All contributing project authors may
- * be found in the AUTHORS file in the root of the source tree.
- */
-
-#ifndef WEBRTC_MODULES_VIDEO_CODING_UTILITY_INCLUDE_EXP_FILTER_H_
-#define WEBRTC_MODULES_VIDEO_CODING_UTILITY_INCLUDE_EXP_FILTER_H_
-
-namespace webrtc
-{
-
-/**********************/
-/* ExpFilter class */
-/**********************/
-
-class VCMExpFilter
-{
-public:
- VCMExpFilter(float alpha, float max = -1.0) : _alpha(alpha), _filtered(-1.0), _max(max) {}
-
- // Resets the filter to its initial state, and resets alpha to the given value
- //
- // Input:
- // - alpha : the new value of the filter factor base.
- void Reset(float alpha);
-
- // Applies the filter with the given exponent on the provided sample
- //
- // Input:
- // - exp : Exponent T in y(k) = alpha^T * y(k-1) + (1 - alpha^T) * x(k)
- // - sample : x(k) in the above filter equation
- float Apply(float exp, float sample);
-
- // Return current filtered value: y(k)
- //
- // Return value : The current filter output
- float Value() const;
-
- // Change the filter factor base
- //
- // Input:
- // - alpha : The new filter factor base.
- void UpdateBase(float alpha);
-
-private:
- float _alpha; // Filter factor base
- float _filtered; // Current filter output
- const float _max;
-}; // end of ExpFilter class
-
-} // namespace webrtc
-
-#endif // WEBRTC_MODULES_VIDEO_CODING_UTILITY_INCLUDE_EXP_FILTER_H_
diff --git a/modules/video_coding/utility/include/frame_dropper.h b/modules/video_coding/utility/include/frame_dropper.h
index 4c1c168c..8eebd784 100644
--- a/modules/video_coding/utility/include/frame_dropper.h
+++ b/modules/video_coding/utility/include/frame_dropper.h
@@ -11,7 +11,7 @@
#ifndef WEBRTC_MODULES_VIDEO_CODING_UTILITY_INCLUDE_FRAME_DROPPER_H_
#define WEBRTC_MODULES_VIDEO_CODING_UTILITY_INCLUDE_FRAME_DROPPER_H_
-#include "webrtc/modules/video_coding/utility/include/exp_filter.h"
+#include "webrtc/base/exp_filter.h"
#include "webrtc/typedefs.h"
namespace webrtc
@@ -72,23 +72,23 @@ private:
void UpdateRatio();
void CapAccumulator();
- VCMExpFilter _keyFrameSizeAvgKbits;
- VCMExpFilter _keyFrameRatio;
- float _keyFrameSpreadFrames;
- int32_t _keyFrameCount;
- float _accumulator;
- float _accumulatorMax;
- float _targetBitRate;
- bool _dropNext;
- VCMExpFilter _dropRatio;
- int32_t _dropCount;
- float _windowSize;
- float _incoming_frame_rate;
- bool _wasBelowMax;
- bool _enabled;
- bool _fastMode;
- float _cap_buffer_size;
- float _max_time_drops;
+ rtc::ExpFilter _keyFrameSizeAvgKbits;
+ rtc::ExpFilter _keyFrameRatio;
+ float _keyFrameSpreadFrames;
+ int32_t _keyFrameCount;
+ float _accumulator;
+ float _accumulatorMax;
+ float _targetBitRate;
+ bool _dropNext;
+ rtc::ExpFilter _dropRatio;
+ int32_t _dropCount;
+ float _windowSize;
+ float _incoming_frame_rate;
+ bool _wasBelowMax;
+ bool _enabled;
+ bool _fastMode;
+ float _cap_buffer_size;
+ float _max_time_drops;
}; // end of VCMFrameDropper class
} // namespace webrtc
diff --git a/modules/video_coding/utility/video_coding_utility.gyp b/modules/video_coding/utility/video_coding_utility.gyp
index 24f88800..2f0202b1 100644
--- a/modules/video_coding/utility/video_coding_utility.gyp
+++ b/modules/video_coding/utility/video_coding_utility.gyp
@@ -18,9 +18,7 @@
'<(webrtc_root)/system_wrappers/source/system_wrappers.gyp:system_wrappers',
],
'sources': [
- 'include/exp_filter.h',
'include/frame_dropper.h',
- 'exp_filter.cc',
'frame_dropper.cc',
],
},
diff --git a/modules/video_processing/BUILD.gn b/modules/video_processing/BUILD.gn
index 724a9dcb..f9412f9e 100644
--- a/modules/video_processing/BUILD.gn
+++ b/modules/video_processing/BUILD.gn
@@ -25,8 +25,6 @@ source_set("video_processing") {
"main/source/content_analysis.h",
"main/source/deflickering.cc",
"main/source/deflickering.h",
- "main/source/denoising.cc",
- "main/source/denoising.h",
"main/source/frame_preprocessor.cc",
"main/source/frame_preprocessor.h",
"main/source/spatial_resampler.cc",
diff --git a/modules/video_processing/OWNERS b/modules/video_processing/OWNERS
index d5ae8473..037de93f 100644
--- a/modules/video_processing/OWNERS
+++ b/modules/video_processing/OWNERS
@@ -1 +1,6 @@
+stefan@webrtc.org
+mikhal@webrtc.org
+marpan@webrtc.org
+henrik.lundin@webrtc.org
+
per-file BUILD.gn=kjellander@webrtc.org
diff --git a/modules/video_processing/main/OWNERS b/modules/video_processing/main/OWNERS
deleted file mode 100644
index 7183cf21..00000000
--- a/modules/video_processing/main/OWNERS
+++ /dev/null
@@ -1,4 +0,0 @@
-stefan@webrtc.org
-mikhal@webrtc.org
-marpan@webrtc.org
-henrik.lundin@webrtc.org
diff --git a/modules/video_processing/main/interface/video_processing.h b/modules/video_processing/main/interface/video_processing.h
index 817d43d9..4df8812a 100644
--- a/modules/video_processing/main/interface/video_processing.h
+++ b/modules/video_processing/main/interface/video_processing.h
@@ -177,17 +177,6 @@ class VideoProcessingModule : public Module {
virtual int32_t Deflickering(I420VideoFrame* frame, FrameStats* stats) = 0;
/**
- Denoises a video frame. Every frame from the stream should be passed in.
- Has a fixed-point implementation.
-
- \param[in,out] frame
- Pointer to the video frame.
-
- \return The number of modified pixels on success, -1 on failure.
- */
- virtual int32_t Denoising(I420VideoFrame* frame) = 0;
-
- /**
Detects if a video frame is excessively bright or dark. Returns a
warning if this is the case. Multiple frames should be passed in before
expecting a warning. Has a floating-point implementation.
diff --git a/modules/video_processing/main/source/Android.mk b/modules/video_processing/main/source/Android.mk
index 829fa960..62eb3874 100644
--- a/modules/video_processing/main/source/Android.mk
+++ b/modules/video_processing/main/source/Android.mk
@@ -23,7 +23,6 @@ LOCAL_SRC_FILES := \
color_enhancement.cc \
content_analysis.cc \
deflickering.cc \
- denoising.cc \
frame_preprocessor.cc \
spatial_resampler.cc \
video_decimator.cc \
diff --git a/modules/video_processing/main/source/denoising.cc b/modules/video_processing/main/source/denoising.cc
deleted file mode 100644
index 4c8dcb43..00000000
--- a/modules/video_processing/main/source/denoising.cc
+++ /dev/null
@@ -1,156 +0,0 @@
-/*
- * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
- *
- * Use of this source code is governed by a BSD-style license
- * that can be found in the LICENSE file in the root of the source
- * tree. An additional intellectual property rights grant can be found
- * in the file PATENTS. All contributing project authors may
- * be found in the AUTHORS file in the root of the source tree.
- */
-
-#include "webrtc/modules/video_processing/main/source/denoising.h"
-
-#include <string.h>
-
-namespace webrtc {
-// Down-sampling in time (unit: number of frames)
-enum { kSubsamplingTime = 0 };
-// Sub-sampling in width (unit: power of 2.
-enum { kSubsamplingWidth = 0 };
-// Sub-sampling in height (unit: power of 2)
-enum { kSubsamplingHeight = 0 };
-// (Q8) De-noising filter parameter
-enum { kDenoiseFiltParam = 179 };
-// (Q8) 1 - filter parameter
-enum { kDenoiseFiltParamRec = 77 };
-// (Q8) De-noising threshold level
-enum { kDenoiseThreshold = 19200 };
-
-VPMDenoising::VPMDenoising()
- : id_(0),
- moment1_(NULL),
- moment2_(NULL) {
- Reset();
-}
-
-VPMDenoising::~VPMDenoising() {
- if (moment1_) {
- delete [] moment1_;
- moment1_ = NULL;
-}
-
- if (moment2_) {
- delete [] moment2_;
- moment2_ = NULL;
- }
-}
-
-int32_t VPMDenoising::ChangeUniqueId(const int32_t id) {
- id_ = id;
- return VPM_OK;
-}
-
-void VPMDenoising::Reset() {
- frame_size_ = 0;
- denoise_frame_cnt_ = 0;
-
- if (moment1_) {
- delete [] moment1_;
- moment1_ = NULL;
- }
-
- if (moment2_) {
- delete [] moment2_;
- moment2_ = NULL;
- }
-}
-
-int32_t VPMDenoising::ProcessFrame(I420VideoFrame* frame) {
- assert(frame);
- int32_t thevar;
- int k;
- int jsub, ksub;
- int32_t diff0;
- uint32_t tmp_moment1;
- uint32_t tmp_moment2;
- uint32_t tmp;
- int32_t num_pixels_changed = 0;
-
- if (frame->IsZeroSize()) {
- return VPM_GENERAL_ERROR;
- }
-
- int width = frame->width();
- int height = frame->height();
-
- /* Size of luminance component */
- const uint32_t y_size = height * width;
-
- /* Initialization */
- if (y_size != frame_size_) {
- delete [] moment1_;
- moment1_ = NULL;
-
- delete [] moment2_;
- moment2_ = NULL;
- }
- frame_size_ = y_size;
-
- if (!moment1_) {
- moment1_ = new uint32_t[y_size];
- memset(moment1_, 0, sizeof(uint32_t)*y_size);
- }
-
- if (!moment2_) {
- moment2_ = new uint32_t[y_size];
- memset(moment2_, 0, sizeof(uint32_t)*y_size);
- }
-
- /* Apply de-noising on each pixel, but update variance sub-sampled */
- uint8_t* buffer = frame->buffer(kYPlane);
- for (int i = 0; i < height; i++) { // Collect over height
- k = i * width;
- ksub = ((i >> kSubsamplingHeight) << kSubsamplingHeight) * width;
- for (int j = 0; j < width; j++) { // Collect over width
- jsub = ((j >> kSubsamplingWidth) << kSubsamplingWidth);
- /* Update mean value for every pixel and every frame */
- tmp_moment1 = moment1_[k + j];
- tmp_moment1 *= kDenoiseFiltParam; // Q16
- tmp_moment1 += ((kDenoiseFiltParamRec * ((uint32_t)buffer[k + j])) << 8);
- tmp_moment1 >>= 8; // Q8
- moment1_[k + j] = tmp_moment1;
-
- tmp_moment2 = moment2_[ksub + jsub];
- if ((ksub == k) && (jsub == j) && (denoise_frame_cnt_ == 0)) {
- tmp = ((uint32_t)buffer[k + j] *
- (uint32_t)buffer[k + j]);
- tmp_moment2 *= kDenoiseFiltParam; // Q16
- tmp_moment2 += ((kDenoiseFiltParamRec * tmp) << 8);
- tmp_moment2 >>= 8; // Q8
- }
- moment2_[k + j] = tmp_moment2;
- /* Current event = deviation from mean value */
- diff0 = ((int32_t)buffer[k + j] << 8) - moment1_[k + j];
- /* Recent events = variance (variations over time) */
- thevar = moment2_[k + j];
- thevar -= ((moment1_[k + j] * moment1_[k + j]) >> 8);
- // De-noising criteria, i.e., when should we replace a pixel by its mean.
- // 1) recent events are minor.
- // 2) current events are minor.
- if ((thevar < kDenoiseThreshold)
- && ((diff0 * diff0 >> 8) < kDenoiseThreshold)) {
- // Replace with mean.
- buffer[k + j] = (uint8_t)(moment1_[k + j] >> 8);
- num_pixels_changed++;
- }
- }
- }
-
- denoise_frame_cnt_++;
- if (denoise_frame_cnt_ > kSubsamplingTime)
- denoise_frame_cnt_ = 0;
-
- return num_pixels_changed;
-}
-
-} // namespace
diff --git a/modules/video_processing/main/source/denoising.h b/modules/video_processing/main/source/denoising.h
deleted file mode 100644
index 60645fbd..00000000
--- a/modules/video_processing/main/source/denoising.h
+++ /dev/null
@@ -1,42 +0,0 @@
-/*
- * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
- *
- * Use of this source code is governed by a BSD-style license
- * that can be found in the LICENSE file in the root of the source
- * tree. An additional intellectual property rights grant can be found
- * in the file PATENTS. All contributing project authors may
- * be found in the AUTHORS file in the root of the source tree.
- */
-
-#ifndef WEBRTC_MODULES_VIDEO_PROCESSING_MAIN_SOURCE_DENOISING_H_
-#define WEBRTC_MODULES_VIDEO_PROCESSING_MAIN_SOURCE_DENOISING_H_
-
-#include "webrtc/modules/video_processing/main/interface/video_processing.h"
-#include "webrtc/typedefs.h"
-
-namespace webrtc {
-
-class VPMDenoising {
- public:
- VPMDenoising();
- ~VPMDenoising();
-
- int32_t ChangeUniqueId(int32_t id);
-
- void Reset();
-
- int32_t ProcessFrame(I420VideoFrame* frame);
-
- private:
- int32_t id_;
-
- uint32_t* moment1_; // (Q8) First order moment (mean).
- uint32_t* moment2_; // (Q8) Second order moment.
- uint32_t frame_size_; // Size (# of pixels) of frame.
- int denoise_frame_cnt_; // Counter for subsampling in time.
-};
-
-} // namespace webrtc
-
-#endif // WEBRTC_MODULES_VIDEO_PROCESSING_MAIN_SOURCE_DENOISING_H_
-
diff --git a/modules/video_processing/main/source/video_processing.gypi b/modules/video_processing/main/source/video_processing.gypi
index 7a0279e7..f62aa18a 100644
--- a/modules/video_processing/main/source/video_processing.gypi
+++ b/modules/video_processing/main/source/video_processing.gypi
@@ -31,8 +31,6 @@
'content_analysis.h',
'deflickering.cc',
'deflickering.h',
- 'denoising.cc',
- 'denoising.h',
'frame_preprocessor.cc',
'frame_preprocessor.h',
'spatial_resampler.cc',
diff --git a/modules/video_processing/main/source/video_processing_impl.cc b/modules/video_processing/main/source/video_processing_impl.cc
index 3560030c..8bc5bf0d 100644
--- a/modules/video_processing/main/source/video_processing_impl.cc
+++ b/modules/video_processing/main/source/video_processing_impl.cc
@@ -51,7 +51,6 @@ int32_t VideoProcessingModuleImpl::ChangeUniqueId(const int32_t id) {
id_ = id;
brightness_detection_.ChangeUniqueId(id);
deflickering_.ChangeUniqueId(id);
- denoising_.ChangeUniqueId(id);
frame_pre_processor_.ChangeUniqueId(id);
return VPM_OK;
}
@@ -66,7 +65,6 @@ VideoProcessingModuleImpl::VideoProcessingModuleImpl(const int32_t id)
mutex_(*CriticalSectionWrapper::CreateCriticalSection()) {
brightness_detection_.ChangeUniqueId(id);
deflickering_.ChangeUniqueId(id);
- denoising_.ChangeUniqueId(id);
frame_pre_processor_.ChangeUniqueId(id);
}
@@ -77,7 +75,6 @@ VideoProcessingModuleImpl::~VideoProcessingModuleImpl() {
void VideoProcessingModuleImpl::Reset() {
CriticalSectionScoped mutex(&mutex_);
deflickering_.Reset();
- denoising_.Reset();
brightness_detection_.Reset();
frame_pre_processor_.Reset();
}
@@ -146,11 +143,6 @@ int32_t VideoProcessingModuleImpl::Deflickering(I420VideoFrame* frame,
return deflickering_.ProcessFrame(frame, stats);
}
-int32_t VideoProcessingModuleImpl::Denoising(I420VideoFrame* frame) {
- CriticalSectionScoped mutex(&mutex_);
- return denoising_.ProcessFrame(frame);
-}
-
int32_t VideoProcessingModuleImpl::BrightnessDetection(
const I420VideoFrame& frame,
const FrameStats& stats) {
diff --git a/modules/video_processing/main/source/video_processing_impl.h b/modules/video_processing/main/source/video_processing_impl.h
index deae6ff6..6fe617d8 100644
--- a/modules/video_processing/main/source/video_processing_impl.h
+++ b/modules/video_processing/main/source/video_processing_impl.h
@@ -16,7 +16,6 @@
#include "webrtc/modules/video_processing/main/source/brightness_detection.h"
#include "webrtc/modules/video_processing/main/source/color_enhancement.h"
#include "webrtc/modules/video_processing/main/source/deflickering.h"
-#include "webrtc/modules/video_processing/main/source/denoising.h"
#include "webrtc/modules/video_processing/main/source/frame_preprocessor.h"
namespace webrtc {
@@ -36,8 +35,6 @@ class VideoProcessingModuleImpl : public VideoProcessingModule {
virtual int32_t Deflickering(I420VideoFrame* frame, FrameStats* stats);
- virtual int32_t Denoising(I420VideoFrame* frame);
-
virtual int32_t BrightnessDetection(const I420VideoFrame& frame,
const FrameStats& stats);
@@ -74,7 +71,6 @@ class VideoProcessingModuleImpl : public VideoProcessingModule {
int32_t id_;
CriticalSectionWrapper& mutex_;
VPMDeflickering deflickering_;
- VPMDenoising denoising_;
VPMBrightnessDetection brightness_detection_;
VPMFramePreprocessor frame_pre_processor_;
};
diff --git a/modules/video_processing/main/test/unit_test/denoising_test.cc b/modules/video_processing/main/test/unit_test/denoising_test.cc
deleted file mode 100644
index c00db6ab..00000000
--- a/modules/video_processing/main/test/unit_test/denoising_test.cc
+++ /dev/null
@@ -1,136 +0,0 @@
-/*
- * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
- *
- * Use of this source code is governed by a BSD-style license
- * that can be found in the LICENSE file in the root of the source
- * tree. An additional intellectual property rights grant can be found
- * in the file PATENTS. All contributing project authors may
- * be found in the AUTHORS file in the root of the source tree.
- */
-
-#include <stdio.h>
-#include <stdlib.h>
-
-#include "webrtc/common_video/libyuv/include/webrtc_libyuv.h"
-#include "webrtc/modules/video_processing/main/interface/video_processing.h"
-#include "webrtc/modules/video_processing/main/test/unit_test/video_processing_unittest.h"
-#include "webrtc/system_wrappers/interface/tick_util.h"
-#include "webrtc/test/testsupport/fileutils.h"
-#include "webrtc/test/testsupport/gtest_disable.h"
-
-namespace webrtc {
-
-TEST_F(VideoProcessingModuleTest, DISABLED_ON_ANDROID(Denoising))
-{
- enum { NumRuns = 10 };
- uint32_t frameNum = 0;
-
- int64_t min_runtime = 0;
- int64_t avg_runtime = 0;
-
- const std::string denoise_filename =
- webrtc::test::OutputPath() + "denoise_testfile.yuv";
- FILE* denoiseFile = fopen(denoise_filename.c_str(), "wb");
- ASSERT_TRUE(denoiseFile != NULL) <<
- "Could not open output file: " << denoise_filename << "\n";
-
- const std::string noise_filename =
- webrtc::test::OutputPath() + "noise_testfile.yuv";
- FILE* noiseFile = fopen(noise_filename.c_str(), "wb");
- ASSERT_TRUE(noiseFile != NULL) <<
- "Could not open noisy file: " << noise_filename << "\n";
-
- printf("\nRun time [us / frame]:\n");
- for (uint32_t run_idx = 0; run_idx < NumRuns; run_idx++)
- {
- TickTime t0;
- TickTime t1;
- TickInterval acc_ticks;
- int32_t modifiedPixels = 0;
-
- frameNum = 0;
- scoped_ptr<uint8_t[]> video_buffer(new uint8_t[frame_length_]);
- while (fread(video_buffer.get(), 1, frame_length_, source_file_) ==
- frame_length_)
- {
- EXPECT_EQ(0, ConvertToI420(kI420, video_buffer.get(), 0, 0,
- width_, height_,
- 0, kRotateNone, &video_frame_));
- frameNum++;
- uint8_t* sourceBuffer = video_frame_.buffer(kYPlane);
-
- // Add noise to a part in video stream
- // Random noise
- // TODO: investigate the effectiveness of this test.
-
- for (int ir = 0; ir < height_; ir++)
- {
- uint32_t ik = ir * width_;
- for (int ic = 0; ic < width_; ic++)
- {
- uint8_t r = rand() % 16;
- r -= 8;
- if (ir < height_ / 4)
- r = 0;
- if (ir >= 3 * height_ / 4)
- r = 0;
- if (ic < width_ / 4)
- r = 0;
- if (ic >= 3 * width_ / 4)
- r = 0;
-
- /*uint8_t pixelValue = 0;
- if (ir >= height_ / 2)
- { // Region 3 or 4
- pixelValue = 170;
- }
- if (ic >= width_ / 2)
- { // Region 2 or 4
- pixelValue += 85;
- }
- pixelValue += r;
- sourceBuffer[ik + ic] = pixelValue;
- */
- sourceBuffer[ik + ic] += r;
- }
- }
-
- if (run_idx == 0)
- {
- if (PrintI420VideoFrame(video_frame_, noiseFile) < 0) {
- return;
- }
- }
-
- t0 = TickTime::Now();
- ASSERT_GE(modifiedPixels = vpm_->Denoising(&video_frame_), 0);
- t1 = TickTime::Now();
- acc_ticks += (t1 - t0);
-
- if (run_idx == 0)
- {
- if (PrintI420VideoFrame(video_frame_, noiseFile) < 0) {
- return;
- }
- }
- }
- ASSERT_NE(0, feof(source_file_)) << "Error reading source file";
-
- printf("%u\n", static_cast<int>(acc_ticks.Microseconds() / frameNum));
- if (acc_ticks.Microseconds() < min_runtime || run_idx == 0)
- {
- min_runtime = acc_ticks.Microseconds();
- }
- avg_runtime += acc_ticks.Microseconds();
-
- rewind(source_file_);
- }
- ASSERT_EQ(0, fclose(denoiseFile));
- ASSERT_EQ(0, fclose(noiseFile));
- printf("\nAverage run time = %d us / frame\n",
- static_cast<int>(avg_runtime / frameNum / NumRuns));
- printf("Min run time = %d us / frame\n\n",
- static_cast<int>(min_runtime / frameNum));
-}
-
-} // namespace webrtc
diff --git a/modules/video_processing/main/test/unit_test/video_processing_unittest.cc b/modules/video_processing/main/test/unit_test/video_processing_unittest.cc
index 973552c8..b1245038 100644
--- a/modules/video_processing/main/test/unit_test/video_processing_unittest.cc
+++ b/modules/video_processing/main/test/unit_test/video_processing_unittest.cc
@@ -82,8 +82,6 @@ TEST_F(VideoProcessingModuleTest, HandleNullBuffer) {
EXPECT_EQ(-1, vpm_->Deflickering(&videoFrame, &stats));
- EXPECT_EQ(-1, vpm_->Denoising(&videoFrame));
-
EXPECT_EQ(-3, vpm_->BrightnessDetection(videoFrame, stats));
}
@@ -113,8 +111,6 @@ TEST_F(VideoProcessingModuleTest, HandleBadSize) {
EXPECT_EQ(-1, vpm_->Deflickering(&video_frame_, &stats));
- EXPECT_EQ(-1, vpm_->Denoising(&video_frame_));
-
EXPECT_EQ(-3, vpm_->BrightnessDetection(video_frame_, stats));
EXPECT_EQ(VPM_PARAMETER_ERROR, vpm_->SetTargetResolution(0,0,0));
@@ -145,19 +141,6 @@ TEST_F(VideoProcessingModuleTest, IdenticalResultsAfterReset) {
ASSERT_EQ(frame_length_, fread(video_buffer.get(), 1, frame_length_,
source_file_));
- // Using ConvertToI420 to add stride to the image.
- EXPECT_EQ(0, ConvertToI420(kI420, video_buffer.get(), 0, 0,
- width_, height_,
- 0, kRotateNone, &video_frame_));
- video_frame2.CopyFrame(video_frame_);
- EXPECT_TRUE(CompareFrames(video_frame_, video_frame2));
- ASSERT_GE(vpm_->Denoising(&video_frame_), 0);
- vpm_->Reset();
- ASSERT_GE(vpm_->Denoising(&video_frame2), 0);
- EXPECT_TRUE(CompareFrames(video_frame_, video_frame2));
-
- ASSERT_EQ(frame_length_, fread(video_buffer.get(), 1, frame_length_,
- source_file_));
EXPECT_EQ(0, ConvertToI420(kI420, video_buffer.get(), 0, 0,
width_, height_,
0, kRotateNone, &video_frame_));