diff options
author | bjornv@webrtc.org <bjornv@webrtc.org> | 2014-10-13 13:01:13 +0000 |
---|---|---|
committer | bjornv@webrtc.org <bjornv@webrtc.org> | 2014-10-13 13:01:13 +0000 |
commit | cc05752091dd843c21de2f79db7616d9fbe0dfd5 (patch) | |
tree | 173f585094ce78cc30b41a8d54bdc01c58038d75 | |
parent | 7e367d395f8c8860054d1c5422561cfa4f90b67a (diff) | |
download | webrtc-cc05752091dd843c21de2f79db7616d9fbe0dfd5.tar.gz |
audio_processing: Replaced macro WEBRTC_SPL_RSHIFT_W16 with >>
The implementation of WEBRTC_SPL_RSHIFT_W16 is simply >>. This CL removes the macro usage in audio_processing and signal_processing.
Affected components:
* aecm
* agc
* nsx
Indirectly affecting (through signal_processing changes)
* codecs/cng
* codecs/isac/fix
* codecs/isac/main
BUG=3348,3353
TESTED=locally on Linux and trybots
R=kwiberg@webrtc.org
Review URL: https://webrtc-codereview.appspot.com/28699005
git-svn-id: http://webrtc.googlecode.com/svn/trunk/webrtc@7432 4adac7df-926f-26a2-2b94-8c16560cd09d
-rw-r--r-- | common_audio/signal_processing/refl_coef_to_lpc.c | 4 | ||||
-rw-r--r-- | common_audio/signal_processing/spl_sqrt.c | 12 | ||||
-rw-r--r-- | modules/audio_processing/aecm/aecm_core.c | 4 | ||||
-rw-r--r-- | modules/audio_processing/aecm/aecm_core_c.c | 5 | ||||
-rw-r--r-- | modules/audio_processing/aecm/aecm_core_mips.c | 2 | ||||
-rw-r--r-- | modules/audio_processing/agc/analog_agc.c | 6 | ||||
-rw-r--r-- | modules/audio_processing/agc/digital_agc.c | 2 | ||||
-rw-r--r-- | modules/audio_processing/ns/nsx_core.c | 12 | ||||
-rw-r--r-- | modules/audio_processing/ns/nsx_core_neon.c | 6 |
9 files changed, 24 insertions, 29 deletions
diff --git a/common_audio/signal_processing/refl_coef_to_lpc.c b/common_audio/signal_processing/refl_coef_to_lpc.c index 3d81778c..17055c9c 100644 --- a/common_audio/signal_processing/refl_coef_to_lpc.c +++ b/common_audio/signal_processing/refl_coef_to_lpc.c @@ -27,7 +27,7 @@ void WebRtcSpl_ReflCoefToLpc(const int16_t *k, int use_order, int16_t *a) kptr = k; *a = 4096; // i.e., (Word16_MAX >> 3)+1. *any = *a; - a[1] = WEBRTC_SPL_RSHIFT_W16((*k), 3); + a[1] = *k >> 3; for (m = 1; m < use_order; m++) { @@ -38,7 +38,7 @@ void WebRtcSpl_ReflCoefToLpc(const int16_t *k, int use_order, int16_t *a) anyptr = any; anyptr++; - any[m + 1] = WEBRTC_SPL_RSHIFT_W16((*kptr), 3); + any[m + 1] = *kptr >> 3; for (i = 0; i < m; i++) { *anyptr = (*aptr) diff --git a/common_audio/signal_processing/spl_sqrt.c b/common_audio/signal_processing/spl_sqrt.c index d4f808ca..fff73c03 100644 --- a/common_audio/signal_processing/spl_sqrt.c +++ b/common_audio/signal_processing/spl_sqrt.c @@ -17,6 +17,8 @@ #include "webrtc/common_audio/signal_processing/include/signal_processing_library.h" +#include <assert.h> + int32_t WebRtcSpl_SqrtLocal(int32_t in); int32_t WebRtcSpl_SqrtLocal(int32_t in) @@ -154,15 +156,15 @@ int32_t WebRtcSpl_Sqrt(int32_t value) x_norm = (int16_t)WEBRTC_SPL_RSHIFT_W32(A, 16); // x_norm = AH - nshift = WEBRTC_SPL_RSHIFT_W16(sh, 1); // nshift = sh>>1 - nshift = -nshift; // Negate the power for later de-normalization + nshift = (sh / 2); + assert(nshift >= 0); A = (int32_t)WEBRTC_SPL_LSHIFT_W32((int32_t)x_norm, 16); A = WEBRTC_SPL_ABS_W32(A); // A = abs(x_norm<<16) A = WebRtcSpl_SqrtLocal(A); // A = sqrt(A) - if ((-2 * nshift) == sh) - { // Even shift value case + if (2 * nshift == sh) { + // Even shift value case t16 = (int16_t)WEBRTC_SPL_RSHIFT_W32(A, 16); // t16 = AH @@ -178,7 +180,7 @@ int32_t WebRtcSpl_Sqrt(int32_t value) } A = A & ((int32_t)0x0000ffff); - A = (int32_t)WEBRTC_SPL_SHIFT_W32(A, nshift); // De-normalize the result + A >>= nshift; // De-normalize the result. return A; } diff --git a/modules/audio_processing/aecm/aecm_core.c b/modules/audio_processing/aecm/aecm_core.c index 61d4c0bb..c489731f 100644 --- a/modules/audio_processing/aecm/aecm_core.c +++ b/modules/audio_processing/aecm/aecm_core.c @@ -691,10 +691,10 @@ int16_t WebRtcAecm_AsymFilt(const int16_t filtOld, const int16_t inVal, retVal = filtOld; if (filtOld > inVal) { - retVal -= WEBRTC_SPL_RSHIFT_W16(filtOld - inVal, stepSizeNeg); + retVal -= (filtOld - inVal) >> stepSizeNeg; } else { - retVal += WEBRTC_SPL_RSHIFT_W16(inVal - filtOld, stepSizePos); + retVal += (inVal - filtOld) >> stepSizePos; } return retVal; diff --git a/modules/audio_processing/aecm/aecm_core_c.c b/modules/audio_processing/aecm/aecm_core_c.c index f608a285..d656f9b4 100644 --- a/modules/audio_processing/aecm/aecm_core_c.c +++ b/modules/audio_processing/aecm/aecm_core_c.c @@ -483,10 +483,7 @@ int WebRtcAecm_ProcessBlock(AecmCore_t * aecm, if (zeros32 > tmp16no1) { echoEst32Gained = WEBRTC_SPL_UMUL_32_16((uint32_t)aecm->echoFilt[i], - (uint16_t)WEBRTC_SPL_RSHIFT_W16( - supGain, - tmp16no1) - ); + supGain >> tmp16no1); } else { // Result in Q-(RESOLUTION_CHANNEL+RESOLUTION_SUPGAIN-16) diff --git a/modules/audio_processing/aecm/aecm_core_mips.c b/modules/audio_processing/aecm/aecm_core_mips.c index 4a07042f..fdd5b2c9 100644 --- a/modules/audio_processing/aecm/aecm_core_mips.c +++ b/modules/audio_processing/aecm/aecm_core_mips.c @@ -988,7 +988,7 @@ int WebRtcAecm_ProcessBlock(AecmCore_t* aecm, if (zeros32 > tmp16no1) { echoEst32Gained = WEBRTC_SPL_UMUL_32_16( (uint32_t)aecm->echoFilt[i], - (uint16_t)WEBRTC_SPL_RSHIFT_W16(supGain, tmp16no1)); + supGain >> tmp16no1); } else { // Result in Q-(RESOLUTION_CHANNEL+RESOLUTION_SUPGAIN-16) echoEst32Gained = WEBRTC_SPL_UMUL_32_16( diff --git a/modules/audio_processing/agc/analog_agc.c b/modules/audio_processing/agc/analog_agc.c index 357aff0a..93fe987e 100644 --- a/modules/audio_processing/agc/analog_agc.c +++ b/modules/audio_processing/agc/analog_agc.c @@ -296,7 +296,7 @@ int WebRtcAgc_AddMic(void *state, int16_t *in_mic, int16_t *in_mic_H, ptr = stt->Rxx16w32_array[0]; } - for (i = 0; i < WEBRTC_SPL_RSHIFT_W16(M, 1); i++) + for (i = 0; i < M / 2; i++) { if (stt->fs == 16000) { @@ -546,7 +546,7 @@ void WebRtcAgc_UpdateAgcThresholds(Agc_t *stt) { /* Lower the analog target level since we have reached its maximum */ zeros = WebRtcSpl_NormW32(stt->Rxx160_LPw32); - stt->targetIdxOffset = WEBRTC_SPL_RSHIFT_W16((3 * zeros) - stt->targetIdx - 2, 2); + stt->targetIdxOffset = (3 * zeros - stt->targetIdx - 2) / 4; } #endif @@ -696,7 +696,7 @@ void WebRtcAgc_SpeakerInactiveCtrl(Agc_t *stt) if (stt->vadMic.stdLongTerm < 4500) { /* Scale between min and max threshold */ - vadThresh += WEBRTC_SPL_RSHIFT_W16(4500 - stt->vadMic.stdLongTerm, 1); + vadThresh += (4500 - stt->vadMic.stdLongTerm) / 2; } /* stt->vadThreshold = (31 * stt->vadThreshold + vadThresh) / 32; */ diff --git a/modules/audio_processing/agc/digital_agc.c b/modules/audio_processing/agc/digital_agc.c index 82a46190..c99a78c2 100644 --- a/modules/audio_processing/agc/digital_agc.c +++ b/modules/audio_processing/agc/digital_agc.c @@ -507,7 +507,7 @@ int32_t WebRtcAgc_ProcessDigital(DigitalAgc_t *stt, const int16_t *in_near, { if (gate < 2500) { - gain_adj = WEBRTC_SPL_RSHIFT_W16(2500 - gate, 5); + gain_adj = (2500 - gate) >> 5; } else { gain_adj = 0; diff --git a/modules/audio_processing/ns/nsx_core.c b/modules/audio_processing/ns/nsx_core.c index ff72883e..d021f689 100644 --- a/modules/audio_processing/ns/nsx_core.c +++ b/modules/audio_processing/ns/nsx_core.c @@ -407,13 +407,11 @@ static void NoiseEstimationC(NsxInst_t* inst, // +=QUANTILE*delta/(inst->counter[s]+1) QUANTILE=0.25, =1 in Q2 // CounterDiv=1/(inst->counter[s]+1) in Q15 tmp16 += 2; - tmp16no1 = WEBRTC_SPL_RSHIFT_W16(tmp16, 2); - inst->noiseEstLogQuantile[offset + i] += tmp16no1; + inst->noiseEstLogQuantile[offset + i] += tmp16 / 4; } else { tmp16 += 1; - tmp16no1 = WEBRTC_SPL_RSHIFT_W16(tmp16, 1); // *(1-QUANTILE), in Q2 QUANTILE=0.25, 1-0.25=0.75=3 in Q2 - tmp16no2 = (int16_t)WEBRTC_SPL_MUL_16_16_RSFT(tmp16no1, 3, 1); + tmp16no2 = (int16_t)WEBRTC_SPL_MUL_16_16_RSFT(tmp16 / 2, 3, 1); inst->noiseEstLogQuantile[offset + i] -= tmp16no2; if (inst->noiseEstLogQuantile[offset + i] < logval) { // This is the smallest fixed point representation we can @@ -611,7 +609,7 @@ void WebRtcNsx_CalcParametricNoiseEstimate(NsxInst_t* inst, // Piecewise linear approximation of 'b' in // 2^(int_part+frac_part) = 2^int_part * (1 + b) // 'b' is given in Q11 and below stored in frac_part. - if (WEBRTC_SPL_RSHIFT_W16(frac_part, 10)) { + if (frac_part >> 10) { // Upper fractional part tmp32no2 = WEBRTC_SPL_MUL_16_16(2048 - frac_part, 1244); // Q21 tmp32no2 = 2048 - WEBRTC_SPL_RSHIFT_W32(tmp32no2, 10); @@ -669,7 +667,7 @@ int32_t WebRtcNsx_InitCore(NsxInst_t* inst, uint32_t fs) { inst->maxLrt = 0x0080000; inst->minLrt = 104858; } - inst->anaLen2 = WEBRTC_SPL_RSHIFT_W16(inst->anaLen, 1); + inst->anaLen2 = inst->anaLen / 2; inst->magnLen = inst->anaLen2 + 1; if (inst->real_fft != NULL) { @@ -1410,7 +1408,7 @@ void WebRtcNsx_DataAnalysis(NsxInst_t* inst, short* speechFrame, uint16_t* magnU tmpU32no1 >>= zeros; } tmp_2_w32 -= (int32_t)WEBRTC_SPL_UMUL_32_16(tmpU32no1, tmp_u16); // Q(11-zeros) - matrix_determinant = WEBRTC_SPL_RSHIFT_W16(matrix_determinant, zeros); // Q(-zeros) + matrix_determinant >>= zeros; // Q(-zeros) tmp_2_w32 = WebRtcSpl_DivW32W16(tmp_2_w32, matrix_determinant); // Q11 tmp_2_w32 += (int32_t)net_norm << 11; // Q11 if (tmp_2_w32 < 0) { diff --git a/modules/audio_processing/ns/nsx_core_neon.c b/modules/audio_processing/ns/nsx_core_neon.c index 0a72a08e..f88a59db 100644 --- a/modules/audio_processing/ns/nsx_core_neon.c +++ b/modules/audio_processing/ns/nsx_core_neon.c @@ -313,13 +313,11 @@ void WebRtcNsx_NoiseEstimationNeon(NsxInst_t* inst, // +=QUANTILE*delta/(inst->counter[s]+1) QUANTILE=0.25, =1 in Q2 // CounterDiv=1/(inst->counter[s]+1) in Q15 tmp16 += 2; - tmp16no1 = WEBRTC_SPL_RSHIFT_W16(tmp16, 2); - inst->noiseEstLogQuantile[offset + i] += tmp16no1; + inst->noiseEstLogQuantile[offset + i] += tmp16 / 4; } else { tmp16 += 1; - tmp16no1 = WEBRTC_SPL_RSHIFT_W16(tmp16, 1); // *(1-QUANTILE), in Q2 QUANTILE=0.25, 1-0.25=0.75=3 in Q2 - tmp16no2 = (int16_t)WEBRTC_SPL_MUL_16_16_RSFT(tmp16no1, 3, 1); + tmp16no2 = (int16_t)WEBRTC_SPL_MUL_16_16_RSFT(tmp16 / 2, 3, 1); inst->noiseEstLogQuantile[offset + i] -= tmp16no2; if (inst->noiseEstLogQuantile[offset + i] < logval) { // logval is the smallest fixed point representation we can have. |