summaryrefslogtreecommitdiff
path: root/modules
diff options
context:
space:
mode:
authorAndroid Chromium Automerger <chromium-automerger@android>2014-07-16 14:53:18 +0000
committerAndroid Chromium Automerger <chromium-automerger@android>2014-07-16 14:53:18 +0000
commitf3d27028ce9a280c02c286f9b98adf1e476c03d2 (patch)
treedca7e434477e7454b81894977f75e455af4cc0dc /modules
parent477e6bce6816b8bb433c2e6c44b377bdc766a3b3 (diff)
parent82383d9b14ff8e5fedf5a70229eb0ac6b512909a (diff)
downloadwebrtc-f3d27028ce9a280c02c286f9b98adf1e476c03d2.tar.gz
Merge third_party/webrtc from https://chromium.googlesource.com/external/webrtc/trunk/webrtc.git at 82383d9b14ff8e5fedf5a70229eb0ac6b512909a
This commit was generated by merge_from_chromium.py. Change-Id: I8c578be801fa38420e875a4a8cef17e7522252e2
Diffstat (limited to 'modules')
-rw-r--r--modules/audio_coding/codecs/isac/main/source/isac.c4
-rw-r--r--modules/audio_coding/main/acm2/acm_generic_codec.cc2
-rw-r--r--modules/audio_coding/main/acm2/acm_opus.cc2
-rw-r--r--modules/audio_coding/main/acm2/acm_speex.cc2
-rw-r--r--modules/audio_device/win/audio_device_core_win.cc6
-rw-r--r--modules/audio_processing/aec/aec_common.h32
-rw-r--r--modules/audio_processing/aec/aec_core.c33
-rw-r--r--modules/audio_processing/aec/aec_core_internal.h1
-rw-r--r--modules/audio_processing/aec/aec_core_neon.c292
-rw-r--r--modules/audio_processing/aec/aec_core_sse2.c6
-rw-r--r--modules/audio_processing/aec/aec_rdft.h10
-rw-r--r--modules/audio_processing/aec/aec_rdft_neon.c112
-rw-r--r--modules/audio_processing/agc/digital_agc.c2
-rw-r--r--modules/audio_processing/audio_buffer.h1
-rw-r--r--modules/desktop_capture/BUILD.gn4
-rw-r--r--modules/desktop_capture/desktop_capture_options.cc2
-rw-r--r--modules/interface/module_common_types.h26
-rw-r--r--modules/pacing/include/paced_sender.h5
-rw-r--r--modules/pacing/paced_sender.cc13
-rw-r--r--modules/pacing/paced_sender_unittest.cc29
-rw-r--r--modules/remote_bitrate_estimator/BUILD.gn2
-rw-r--r--modules/rtp_rtcp/interface/rtp_rtcp.h17
-rw-r--r--modules/rtp_rtcp/mocks/mock_rtp_rtcp.h2
-rw-r--r--modules/rtp_rtcp/source/rtcp_format_remb_unittest.cc1
-rw-r--r--modules/rtp_rtcp/source/rtcp_sender.cc83
-rw-r--r--modules/rtp_rtcp/source/rtcp_sender.h182
-rw-r--r--modules/rtp_rtcp/source/rtcp_sender_unittest.cc1
-rw-r--r--modules/rtp_rtcp/source/rtp_format_vp8.cc2
-rw-r--r--modules/rtp_rtcp/source/rtp_rtcp_impl.cc28
-rw-r--r--modules/rtp_rtcp/source/rtp_rtcp_impl.h7
-rw-r--r--modules/rtp_rtcp/source/rtp_sender.cc100
-rw-r--r--modules/rtp_rtcp/source/rtp_sender.h19
-rw-r--r--modules/rtp_rtcp/source/rtp_sender_unittest.cc52
-rw-r--r--modules/rtp_rtcp/source/rtp_sender_video.cc11
-rw-r--r--modules/rtp_rtcp/source/tmmbr_help.cc279
-rw-r--r--modules/rtp_rtcp/test/testAPI/test_api.cc4
-rw-r--r--modules/video_capture/android/video_capture_android.cc7
-rw-r--r--modules/video_coding/main/source/timing.cc1
-rw-r--r--modules/video_coding/main/source/timing.h29
-rw-r--r--modules/video_coding/main/source/video_sender_unittest.cc12
40 files changed, 878 insertions, 545 deletions
diff --git a/modules/audio_coding/codecs/isac/main/source/isac.c b/modules/audio_coding/codecs/isac/main/source/isac.c
index fa54a8d8..d47eb80b 100644
--- a/modules/audio_coding/codecs/isac/main/source/isac.c
+++ b/modules/audio_coding/codecs/isac/main/source/isac.c
@@ -2243,8 +2243,6 @@ int16_t WebRtcIsac_SetEncSampRate(ISACStruct* ISAC_main_inst,
} else {
ISACUBStruct* instUB = &(instISAC->instUB);
ISACLBStruct* instLB = &(instISAC->instLB);
- double bottleneckLB;
- double bottleneckUB;
int32_t bottleneck = instISAC->bottleneck;
int16_t codingMode = instISAC->codingMode;
int16_t frameSizeMs = instLB->ISACencLB_obj.new_framelength /
@@ -2263,6 +2261,8 @@ int16_t WebRtcIsac_SetEncSampRate(ISACStruct* ISAC_main_inst,
instISAC->maxRateBytesPer30Ms = STREAM_SIZE_MAX_30;
} else if ((encoder_operational_rate == kIsacSuperWideband) &&
(instISAC->encoderSamplingRateKHz == kIsacWideband)) {
+ double bottleneckLB = 0;
+ double bottleneckUB = 0;
if (codingMode == 1) {
WebRtcIsac_RateAllocation(bottleneck, &bottleneckLB, &bottleneckUB,
&(instISAC->bandwidthKHz));
diff --git a/modules/audio_coding/main/acm2/acm_generic_codec.cc b/modules/audio_coding/main/acm2/acm_generic_codec.cc
index d16d1d38..db776d2e 100644
--- a/modules/audio_coding/main/acm2/acm_generic_codec.cc
+++ b/modules/audio_coding/main/acm2/acm_generic_codec.cc
@@ -838,7 +838,7 @@ int16_t ACMGenericCodec::ProcessFrameVADDTX(uint8_t* bitstream,
// Calculate number of samples in 10 ms blocks, and number ms in one frame.
int16_t samples_in_10ms = static_cast<int16_t>(freq_hz / 100);
int32_t frame_len_ms = static_cast<int32_t>(frame_len_smpl_) * 1000 / freq_hz;
- int16_t status;
+ int16_t status = -1;
// Vector for storing maximum 30 ms of mono audio at 48 kHz.
int16_t audio[1440];
diff --git a/modules/audio_coding/main/acm2/acm_opus.cc b/modules/audio_coding/main/acm2/acm_opus.cc
index 544c932f..638c72a9 100644
--- a/modules/audio_coding/main/acm2/acm_opus.cc
+++ b/modules/audio_coding/main/acm2/acm_opus.cc
@@ -80,7 +80,7 @@ ACMOpus::ACMOpus(int16_t codec_id)
if (codec_id_ != ACMCodecDB::kOpus) {
WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, unique_id_,
"Wrong codec id for Opus.");
- sample_freq_ = -1;
+ sample_freq_ = 0xFFFF;
bitrate_ = -1;
}
return;
diff --git a/modules/audio_coding/main/acm2/acm_speex.cc b/modules/audio_coding/main/acm2/acm_speex.cc
index 84a0592a..c4d7628d 100644
--- a/modules/audio_coding/main/acm2/acm_speex.cc
+++ b/modules/audio_coding/main/acm2/acm_speex.cc
@@ -30,7 +30,7 @@ ACMSPEEX::ACMSPEEX(int16_t /* codec_id */)
vbr_enabled_(false),
encoding_rate_(-1),
sampling_frequency_(-1),
- samples_in_20ms_audio_(-1) {
+ samples_in_20ms_audio_(0xFFFF) {
return;
}
diff --git a/modules/audio_device/win/audio_device_core_win.cc b/modules/audio_device/win/audio_device_core_win.cc
index 32b5e49a..0a36174b 100644
--- a/modules/audio_device/win/audio_device_core_win.cc
+++ b/modules/audio_device/win/audio_device_core_win.cc
@@ -20,12 +20,6 @@
#include "webrtc/modules/audio_device/audio_device_config.h"
-#if defined(WEBRTC_WINDOWS_CORE_AUDIO_BUILD)
-#pragma message(">> INFO: WEBRTC_WINDOWS_CORE_AUDIO_BUILD is defined")
-#else
-#pragma message(">> INFO: WEBRTC_WINDOWS_CORE_AUDIO_BUILD is *not* defined")
-#endif
-
#ifdef WEBRTC_WINDOWS_CORE_AUDIO_BUILD
#include "webrtc/modules/audio_device/win/audio_device_core_win.h"
diff --git a/modules/audio_processing/aec/aec_common.h b/modules/audio_processing/aec/aec_common.h
new file mode 100644
index 00000000..1e24ca99
--- /dev/null
+++ b/modules/audio_processing/aec/aec_common.h
@@ -0,0 +1,32 @@
+/*
+ * Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef WEBRTC_MODULES_AUDIO_PROCESSING_AEC_AEC_COMMON_H_
+#define WEBRTC_MODULES_AUDIO_PROCESSING_AEC_AEC_COMMON_H_
+
+#include "webrtc/typedefs.h"
+
+#ifdef _MSC_VER /* visual c++ */
+#define ALIGN16_BEG __declspec(align(16))
+#define ALIGN16_END
+#else /* gcc or icc */
+#define ALIGN16_BEG
+#define ALIGN16_END __attribute__((aligned(16)))
+#endif
+
+extern ALIGN16_BEG const float ALIGN16_END WebRtcAec_sqrtHanning[65];
+extern ALIGN16_BEG const float ALIGN16_END WebRtcAec_weightCurve[65];
+extern ALIGN16_BEG const float ALIGN16_END WebRtcAec_overDriveCurve[65];
+extern const float WebRtcAec_kExtendedSmoothingCoefficients[2][2];
+extern const float WebRtcAec_kNormalSmoothingCoefficients[2][2];
+extern const float WebRtcAec_kMinFarendPSD;
+
+#endif // WEBRTC_MODULES_AUDIO_PROCESSING_AEC_AEC_COMMON_H_
+
diff --git a/modules/audio_processing/aec/aec_core.c b/modules/audio_processing/aec/aec_core.c
index 2fd298c0..139d37d7 100644
--- a/modules/audio_processing/aec/aec_core.c
+++ b/modules/audio_processing/aec/aec_core.c
@@ -21,6 +21,7 @@
#include <string.h>
#include "webrtc/common_audio/signal_processing/include/signal_processing_library.h"
+#include "webrtc/modules/audio_processing/aec/aec_common.h"
#include "webrtc/modules/audio_processing/aec/aec_core_internal.h"
#include "webrtc/modules/audio_processing/aec/aec_rdft.h"
#include "webrtc/modules/audio_processing/utility/delay_estimator_wrapper.h"
@@ -45,7 +46,7 @@ static const int freqAvgIc = PART_LEN / 2;
// Matlab code to produce table:
// win = sqrt(hanning(63)); win = [0 ; win(1:32)];
// fprintf(1, '\t%.14f, %.14f, %.14f,\n', win);
-static const float sqrtHanning[65] = {
+ALIGN16_BEG const float ALIGN16_END WebRtcAec_sqrtHanning[65] = {
0.00000000000000f, 0.02454122852291f, 0.04906767432742f, 0.07356456359967f,
0.09801714032956f, 0.12241067519922f, 0.14673047445536f, 0.17096188876030f,
0.19509032201613f, 0.21910124015687f, 0.24298017990326f, 0.26671275747490f,
@@ -99,10 +100,10 @@ static const float kTargetSupp[3] = {-6.9f, -11.5f, -18.4f};
// Two sets of parameters, one for the extended filter mode.
static const float kExtendedMinOverDrive[3] = {3.0f, 6.0f, 15.0f};
static const float kNormalMinOverDrive[3] = {1.0f, 2.0f, 5.0f};
-static const float kExtendedSmoothingCoefficients[2][2] = {{0.9f, 0.1f},
- {0.92f, 0.08f}};
-static const float kNormalSmoothingCoefficients[2][2] = {{0.9f, 0.1f},
- {0.93f, 0.07f}};
+const float WebRtcAec_kExtendedSmoothingCoefficients[2][2] = {{0.9f, 0.1f},
+ {0.92f, 0.08f}};
+const float WebRtcAec_kNormalSmoothingCoefficients[2][2] = {{0.9f, 0.1f},
+ {0.93f, 0.07f}};
// Number of partitions forming the NLP's "preferred" bands.
enum {
@@ -442,7 +443,7 @@ static int PartitionDelay(const AecCore* aec) {
}
// Threshold to protect against the ill-effects of a zero far-end.
-static const float kMinFarendPSD = 15;
+const float WebRtcAec_kMinFarendPSD = 15;
// Updates the following smoothed Power Spectral Densities (PSD):
// - sd : near-end
@@ -459,8 +460,8 @@ static void SmoothedPSD(AecCore* aec,
float xfw[2][PART_LEN1]) {
// Power estimate smoothing coefficients.
const float* ptrGCoh = aec->extended_filter_enabled
- ? kExtendedSmoothingCoefficients[aec->mult - 1]
- : kNormalSmoothingCoefficients[aec->mult - 1];
+ ? WebRtcAec_kExtendedSmoothingCoefficients[aec->mult - 1]
+ : WebRtcAec_kNormalSmoothingCoefficients[aec->mult - 1];
int i;
float sdSum = 0, seSum = 0;
@@ -476,7 +477,8 @@ static void SmoothedPSD(AecCore* aec,
aec->sx[i] =
ptrGCoh[0] * aec->sx[i] +
ptrGCoh[1] * WEBRTC_SPL_MAX(
- xfw[0][i] * xfw[0][i] + xfw[1][i] * xfw[1][i], kMinFarendPSD);
+ xfw[0][i] * xfw[0][i] + xfw[1][i] * xfw[1][i],
+ WebRtcAec_kMinFarendPSD);
aec->sde[i][0] =
ptrGCoh[0] * aec->sde[i][0] +
@@ -511,8 +513,9 @@ static void SmoothedPSD(AecCore* aec,
__inline static void WindowData(float* x_windowed, const float* x) {
int i;
for (i = 0; i < PART_LEN; i++) {
- x_windowed[i] = x[i] * sqrtHanning[i];
- x_windowed[PART_LEN + i] = x[PART_LEN + i] * sqrtHanning[PART_LEN - i];
+ x_windowed[i] = x[i] * WebRtcAec_sqrtHanning[i];
+ x_windowed[PART_LEN + i] =
+ x[PART_LEN + i] * WebRtcAec_sqrtHanning[PART_LEN - i];
}
}
@@ -1347,10 +1350,10 @@ static void NonLinearProcessing(AecCore* aec, float* output, float* outputH) {
scale = 2.0f / PART_LEN2;
for (i = 0; i < PART_LEN; i++) {
fft[i] *= scale; // fft scaling
- fft[i] = fft[i] * sqrtHanning[i] + aec->outBuf[i];
+ fft[i] = fft[i] * WebRtcAec_sqrtHanning[i] + aec->outBuf[i];
fft[PART_LEN + i] *= scale; // fft scaling
- aec->outBuf[i] = fft[PART_LEN + i] * sqrtHanning[PART_LEN - i];
+ aec->outBuf[i] = fft[PART_LEN + i] * WebRtcAec_sqrtHanning[PART_LEN - i];
// Saturate output to keep it in the allowed range.
output[i] = WEBRTC_SPL_SAT(
@@ -1737,8 +1740,8 @@ static void TimeToFrequency(float time_data[PART_LEN2],
// TODO(bjornv): Should we have a different function/wrapper for windowed FFT?
if (window) {
for (i = 0; i < PART_LEN; i++) {
- time_data[i] *= sqrtHanning[i];
- time_data[PART_LEN + i] *= sqrtHanning[PART_LEN - i];
+ time_data[i] *= WebRtcAec_sqrtHanning[i];
+ time_data[PART_LEN + i] *= WebRtcAec_sqrtHanning[PART_LEN - i];
}
}
diff --git a/modules/audio_processing/aec/aec_core_internal.h b/modules/audio_processing/aec/aec_core_internal.h
index 372b4274..8e5ee5cb 100644
--- a/modules/audio_processing/aec/aec_core_internal.h
+++ b/modules/audio_processing/aec/aec_core_internal.h
@@ -15,6 +15,7 @@
#include <stdio.h>
#endif
+#include "webrtc/modules/audio_processing/aec/aec_common.h"
#include "webrtc/modules/audio_processing/aec/aec_core.h"
#include "webrtc/modules/audio_processing/utility/ring_buffer.h"
#include "webrtc/typedefs.h"
diff --git a/modules/audio_processing/aec/aec_core_neon.c b/modules/audio_processing/aec/aec_core_neon.c
index 13ca47af..a21a954b 100644
--- a/modules/audio_processing/aec/aec_core_neon.c
+++ b/modules/audio_processing/aec/aec_core_neon.c
@@ -14,12 +14,12 @@
* Based on aec_core_sse2.c.
*/
-#include "webrtc/modules/audio_processing/aec/aec_core.h"
-
#include <arm_neon.h>
#include <math.h>
#include <string.h> // memset
+#include "webrtc/common_audio/signal_processing/include/signal_processing_library.h"
+#include "webrtc/modules/audio_processing/aec/aec_common.h"
#include "webrtc/modules/audio_processing/aec/aec_core_internal.h"
#include "webrtc/modules/audio_processing/aec/aec_rdft.h"
@@ -250,9 +250,6 @@ static void FilterAdaptationNEON(AecCore* aec,
}
}
-extern const float WebRtcAec_weightCurve[65];
-extern const float WebRtcAec_overDriveCurve[65];
-
static float32x4_t vpowq_f32(float32x4_t a, float32x4_t b) {
// a^b = exp2(b * log2(a))
// exp2(x) and log2(x) are calculated using polynomial approximations.
@@ -442,10 +439,295 @@ static void OverdriveAndSuppressNEON(AecCore* aec,
}
}
+static int PartitionDelay(const AecCore* aec) {
+ // Measures the energy in each filter partition and returns the partition with
+ // highest energy.
+ // TODO(bjornv): Spread computational cost by computing one partition per
+ // block?
+ float wfEnMax = 0;
+ int i;
+ int delay = 0;
+
+ for (i = 0; i < aec->num_partitions; i++) {
+ int j;
+ int pos = i * PART_LEN1;
+ float wfEn = 0;
+ float32x4_t vec_wfEn = vdupq_n_f32(0.0f);
+ // vectorized code (four at once)
+ for (j = 0; j + 3 < PART_LEN1; j += 4) {
+ const float32x4_t vec_wfBuf0 = vld1q_f32(&aec->wfBuf[0][pos + j]);
+ const float32x4_t vec_wfBuf1 = vld1q_f32(&aec->wfBuf[1][pos + j]);
+ vec_wfEn = vmlaq_f32(vec_wfEn, vec_wfBuf0, vec_wfBuf0);
+ vec_wfEn = vmlaq_f32(vec_wfEn, vec_wfBuf1, vec_wfBuf1);
+ }
+ {
+ float32x2_t vec_total;
+ // A B C D
+ vec_total = vpadd_f32(vget_low_f32(vec_wfEn), vget_high_f32(vec_wfEn));
+ // A+B C+D
+ vec_total = vpadd_f32(vec_total, vec_total);
+ // A+B+C+D A+B+C+D
+ wfEn = vget_lane_f32(vec_total, 0);
+ }
+
+ // scalar code for the remaining items.
+ for (; j < PART_LEN1; j++) {
+ wfEn += aec->wfBuf[0][pos + j] * aec->wfBuf[0][pos + j] +
+ aec->wfBuf[1][pos + j] * aec->wfBuf[1][pos + j];
+ }
+
+ if (wfEn > wfEnMax) {
+ wfEnMax = wfEn;
+ delay = i;
+ }
+ }
+ return delay;
+}
+
+// Updates the following smoothed Power Spectral Densities (PSD):
+// - sd : near-end
+// - se : residual echo
+// - sx : far-end
+// - sde : cross-PSD of near-end and residual echo
+// - sxd : cross-PSD of near-end and far-end
+//
+// In addition to updating the PSDs, also the filter diverge state is determined
+// upon actions are taken.
+static void SmoothedPSD(AecCore* aec,
+ float efw[2][PART_LEN1],
+ float dfw[2][PART_LEN1],
+ float xfw[2][PART_LEN1]) {
+ // Power estimate smoothing coefficients.
+ const float* ptrGCoh = aec->extended_filter_enabled
+ ? WebRtcAec_kExtendedSmoothingCoefficients[aec->mult - 1]
+ : WebRtcAec_kNormalSmoothingCoefficients[aec->mult - 1];
+ int i;
+ float sdSum = 0, seSum = 0;
+ const float32x4_t vec_15 = vdupq_n_f32(WebRtcAec_kMinFarendPSD);
+ float32x4_t vec_sdSum = vdupq_n_f32(0.0f);
+ float32x4_t vec_seSum = vdupq_n_f32(0.0f);
+
+ for (i = 0; i + 3 < PART_LEN1; i += 4) {
+ const float32x4_t vec_dfw0 = vld1q_f32(&dfw[0][i]);
+ const float32x4_t vec_dfw1 = vld1q_f32(&dfw[1][i]);
+ const float32x4_t vec_efw0 = vld1q_f32(&efw[0][i]);
+ const float32x4_t vec_efw1 = vld1q_f32(&efw[1][i]);
+ const float32x4_t vec_xfw0 = vld1q_f32(&xfw[0][i]);
+ const float32x4_t vec_xfw1 = vld1q_f32(&xfw[1][i]);
+ float32x4_t vec_sd = vmulq_n_f32(vld1q_f32(&aec->sd[i]), ptrGCoh[0]);
+ float32x4_t vec_se = vmulq_n_f32(vld1q_f32(&aec->se[i]), ptrGCoh[0]);
+ float32x4_t vec_sx = vmulq_n_f32(vld1q_f32(&aec->sx[i]), ptrGCoh[0]);
+ float32x4_t vec_dfw_sumsq = vmulq_f32(vec_dfw0, vec_dfw0);
+ float32x4_t vec_efw_sumsq = vmulq_f32(vec_efw0, vec_efw0);
+ float32x4_t vec_xfw_sumsq = vmulq_f32(vec_xfw0, vec_xfw0);
+
+ vec_dfw_sumsq = vmlaq_f32(vec_dfw_sumsq, vec_dfw1, vec_dfw1);
+ vec_efw_sumsq = vmlaq_f32(vec_efw_sumsq, vec_efw1, vec_efw1);
+ vec_xfw_sumsq = vmlaq_f32(vec_xfw_sumsq, vec_xfw1, vec_xfw1);
+ vec_xfw_sumsq = vmaxq_f32(vec_xfw_sumsq, vec_15);
+ vec_sd = vmlaq_n_f32(vec_sd, vec_dfw_sumsq, ptrGCoh[1]);
+ vec_se = vmlaq_n_f32(vec_se, vec_efw_sumsq, ptrGCoh[1]);
+ vec_sx = vmlaq_n_f32(vec_sx, vec_xfw_sumsq, ptrGCoh[1]);
+
+ vst1q_f32(&aec->sd[i], vec_sd);
+ vst1q_f32(&aec->se[i], vec_se);
+ vst1q_f32(&aec->sx[i], vec_sx);
+
+ {
+ float32x4x2_t vec_sde = vld2q_f32(&aec->sde[i][0]);
+ float32x4_t vec_dfwefw0011 = vmulq_f32(vec_dfw0, vec_efw0);
+ float32x4_t vec_dfwefw0110 = vmulq_f32(vec_dfw0, vec_efw1);
+ vec_sde.val[0] = vmulq_n_f32(vec_sde.val[0], ptrGCoh[0]);
+ vec_sde.val[1] = vmulq_n_f32(vec_sde.val[1], ptrGCoh[0]);
+ vec_dfwefw0011 = vmlaq_f32(vec_dfwefw0011, vec_dfw1, vec_efw1);
+ vec_dfwefw0110 = vmlsq_f32(vec_dfwefw0110, vec_dfw1, vec_efw0);
+ vec_sde.val[0] = vmlaq_n_f32(vec_sde.val[0], vec_dfwefw0011, ptrGCoh[1]);
+ vec_sde.val[1] = vmlaq_n_f32(vec_sde.val[1], vec_dfwefw0110, ptrGCoh[1]);
+ vst2q_f32(&aec->sde[i][0], vec_sde);
+ }
+
+ {
+ float32x4x2_t vec_sxd = vld2q_f32(&aec->sxd[i][0]);
+ float32x4_t vec_dfwxfw0011 = vmulq_f32(vec_dfw0, vec_xfw0);
+ float32x4_t vec_dfwxfw0110 = vmulq_f32(vec_dfw0, vec_xfw1);
+ vec_sxd.val[0] = vmulq_n_f32(vec_sxd.val[0], ptrGCoh[0]);
+ vec_sxd.val[1] = vmulq_n_f32(vec_sxd.val[1], ptrGCoh[0]);
+ vec_dfwxfw0011 = vmlaq_f32(vec_dfwxfw0011, vec_dfw1, vec_xfw1);
+ vec_dfwxfw0110 = vmlsq_f32(vec_dfwxfw0110, vec_dfw1, vec_xfw0);
+ vec_sxd.val[0] = vmlaq_n_f32(vec_sxd.val[0], vec_dfwxfw0011, ptrGCoh[1]);
+ vec_sxd.val[1] = vmlaq_n_f32(vec_sxd.val[1], vec_dfwxfw0110, ptrGCoh[1]);
+ vst2q_f32(&aec->sxd[i][0], vec_sxd);
+ }
+
+ vec_sdSum = vaddq_f32(vec_sdSum, vec_sd);
+ vec_seSum = vaddq_f32(vec_seSum, vec_se);
+ }
+ {
+ float32x2_t vec_sdSum_total;
+ float32x2_t vec_seSum_total;
+ // A B C D
+ vec_sdSum_total = vpadd_f32(vget_low_f32(vec_sdSum),
+ vget_high_f32(vec_sdSum));
+ vec_seSum_total = vpadd_f32(vget_low_f32(vec_seSum),
+ vget_high_f32(vec_seSum));
+ // A+B C+D
+ vec_sdSum_total = vpadd_f32(vec_sdSum_total, vec_sdSum_total);
+ vec_seSum_total = vpadd_f32(vec_seSum_total, vec_seSum_total);
+ // A+B+C+D A+B+C+D
+ sdSum = vget_lane_f32(vec_sdSum_total, 0);
+ seSum = vget_lane_f32(vec_seSum_total, 0);
+ }
+
+ // scalar code for the remaining items.
+ for (; i < PART_LEN1; i++) {
+ aec->sd[i] = ptrGCoh[0] * aec->sd[i] +
+ ptrGCoh[1] * (dfw[0][i] * dfw[0][i] + dfw[1][i] * dfw[1][i]);
+ aec->se[i] = ptrGCoh[0] * aec->se[i] +
+ ptrGCoh[1] * (efw[0][i] * efw[0][i] + efw[1][i] * efw[1][i]);
+ // We threshold here to protect against the ill-effects of a zero farend.
+ // The threshold is not arbitrarily chosen, but balances protection and
+ // adverse interaction with the algorithm's tuning.
+ // TODO(bjornv): investigate further why this is so sensitive.
+ aec->sx[i] =
+ ptrGCoh[0] * aec->sx[i] +
+ ptrGCoh[1] * WEBRTC_SPL_MAX(
+ xfw[0][i] * xfw[0][i] + xfw[1][i] * xfw[1][i],
+ WebRtcAec_kMinFarendPSD);
+
+ aec->sde[i][0] =
+ ptrGCoh[0] * aec->sde[i][0] +
+ ptrGCoh[1] * (dfw[0][i] * efw[0][i] + dfw[1][i] * efw[1][i]);
+ aec->sde[i][1] =
+ ptrGCoh[0] * aec->sde[i][1] +
+ ptrGCoh[1] * (dfw[0][i] * efw[1][i] - dfw[1][i] * efw[0][i]);
+
+ aec->sxd[i][0] =
+ ptrGCoh[0] * aec->sxd[i][0] +
+ ptrGCoh[1] * (dfw[0][i] * xfw[0][i] + dfw[1][i] * xfw[1][i]);
+ aec->sxd[i][1] =
+ ptrGCoh[0] * aec->sxd[i][1] +
+ ptrGCoh[1] * (dfw[0][i] * xfw[1][i] - dfw[1][i] * xfw[0][i]);
+
+ sdSum += aec->sd[i];
+ seSum += aec->se[i];
+ }
+
+ // Divergent filter safeguard.
+ aec->divergeState = (aec->divergeState ? 1.05f : 1.0f) * seSum > sdSum;
+
+ if (aec->divergeState)
+ memcpy(efw, dfw, sizeof(efw[0][0]) * 2 * PART_LEN1);
+
+ // Reset if error is significantly larger than nearend (13 dB).
+ if (!aec->extended_filter_enabled && seSum > (19.95f * sdSum))
+ memset(aec->wfBuf, 0, sizeof(aec->wfBuf));
+}
+
+// Window time domain data to be used by the fft.
+__inline static void WindowData(float* x_windowed, const float* x) {
+ int i;
+ for (i = 0; i < PART_LEN; i += 4) {
+ const float32x4_t vec_Buf1 = vld1q_f32(&x[i]);
+ const float32x4_t vec_Buf2 = vld1q_f32(&x[PART_LEN + i]);
+ const float32x4_t vec_sqrtHanning = vld1q_f32(&WebRtcAec_sqrtHanning[i]);
+ // A B C D
+ float32x4_t vec_sqrtHanning_rev =
+ vld1q_f32(&WebRtcAec_sqrtHanning[PART_LEN - i - 3]);
+ // B A D C
+ vec_sqrtHanning_rev = vrev64q_f32(vec_sqrtHanning_rev);
+ // D C B A
+ vec_sqrtHanning_rev = vcombine_f32(vget_high_f32(vec_sqrtHanning_rev),
+ vget_low_f32(vec_sqrtHanning_rev));
+ vst1q_f32(&x_windowed[i], vmulq_f32(vec_Buf1, vec_sqrtHanning));
+ vst1q_f32(&x_windowed[PART_LEN + i],
+ vmulq_f32(vec_Buf2, vec_sqrtHanning_rev));
+ }
+}
+
+// Puts fft output data into a complex valued array.
+__inline static void StoreAsComplex(const float* data,
+ float data_complex[2][PART_LEN1]) {
+ int i;
+ for (i = 0; i < PART_LEN; i += 4) {
+ const float32x4x2_t vec_data = vld2q_f32(&data[2 * i]);
+ vst1q_f32(&data_complex[0][i], vec_data.val[0]);
+ vst1q_f32(&data_complex[1][i], vec_data.val[1]);
+ }
+ // fix beginning/end values
+ data_complex[1][0] = 0;
+ data_complex[1][PART_LEN] = 0;
+ data_complex[0][0] = data[0];
+ data_complex[0][PART_LEN] = data[1];
+}
+
+static void SubbandCoherenceNEON(AecCore* aec,
+ float efw[2][PART_LEN1],
+ float xfw[2][PART_LEN1],
+ float* fft,
+ float* cohde,
+ float* cohxd) {
+ float dfw[2][PART_LEN1];
+ int i;
+
+ if (aec->delayEstCtr == 0)
+ aec->delayIdx = PartitionDelay(aec);
+
+ // Use delayed far.
+ memcpy(xfw,
+ aec->xfwBuf + aec->delayIdx * PART_LEN1,
+ sizeof(xfw[0][0]) * 2 * PART_LEN1);
+
+ // Windowed near fft
+ WindowData(fft, aec->dBuf);
+ aec_rdft_forward_128(fft);
+ StoreAsComplex(fft, dfw);
+
+ // Windowed error fft
+ WindowData(fft, aec->eBuf);
+ aec_rdft_forward_128(fft);
+ StoreAsComplex(fft, efw);
+
+ SmoothedPSD(aec, efw, dfw, xfw);
+
+ {
+ const float32x4_t vec_1eminus10 = vdupq_n_f32(1e-10f);
+
+ // Subband coherence
+ for (i = 0; i + 3 < PART_LEN1; i += 4) {
+ const float32x4_t vec_sd = vld1q_f32(&aec->sd[i]);
+ const float32x4_t vec_se = vld1q_f32(&aec->se[i]);
+ const float32x4_t vec_sx = vld1q_f32(&aec->sx[i]);
+ const float32x4_t vec_sdse = vmlaq_f32(vec_1eminus10, vec_sd, vec_se);
+ const float32x4_t vec_sdsx = vmlaq_f32(vec_1eminus10, vec_sd, vec_sx);
+ float32x4x2_t vec_sde = vld2q_f32(&aec->sde[i][0]);
+ float32x4x2_t vec_sxd = vld2q_f32(&aec->sxd[i][0]);
+ float32x4_t vec_cohde = vmulq_f32(vec_sde.val[0], vec_sde.val[0]);
+ float32x4_t vec_cohxd = vmulq_f32(vec_sxd.val[0], vec_sxd.val[0]);
+ vec_cohde = vmlaq_f32(vec_cohde, vec_sde.val[1], vec_sde.val[1]);
+ vec_cohde = vdivq_f32(vec_cohde, vec_sdse);
+ vec_cohxd = vmlaq_f32(vec_cohxd, vec_sxd.val[1], vec_sxd.val[1]);
+ vec_cohxd = vdivq_f32(vec_cohxd, vec_sdsx);
+
+ vst1q_f32(&cohde[i], vec_cohde);
+ vst1q_f32(&cohxd[i], vec_cohxd);
+ }
+ }
+ // scalar code for the remaining items.
+ for (; i < PART_LEN1; i++) {
+ cohde[i] =
+ (aec->sde[i][0] * aec->sde[i][0] + aec->sde[i][1] * aec->sde[i][1]) /
+ (aec->sd[i] * aec->se[i] + 1e-10f);
+ cohxd[i] =
+ (aec->sxd[i][0] * aec->sxd[i][0] + aec->sxd[i][1] * aec->sxd[i][1]) /
+ (aec->sx[i] * aec->sd[i] + 1e-10f);
+ }
+}
+
void WebRtcAec_InitAec_neon(void) {
WebRtcAec_FilterFar = FilterFarNEON;
WebRtcAec_ScaleErrorSignal = ScaleErrorSignalNEON;
WebRtcAec_FilterAdaptation = FilterAdaptationNEON;
WebRtcAec_OverdriveAndSuppress = OverdriveAndSuppressNEON;
+ WebRtcAec_SubbandCoherence = SubbandCoherenceNEON;
}
diff --git a/modules/audio_processing/aec/aec_core_sse2.c b/modules/audio_processing/aec/aec_core_sse2.c
index 1489d26e..4d9b4efe 100644
--- a/modules/audio_processing/aec/aec_core_sse2.c
+++ b/modules/audio_processing/aec/aec_core_sse2.c
@@ -12,12 +12,11 @@
* The core AEC algorithm, SSE2 version of speed-critical functions.
*/
-#include "webrtc/modules/audio_processing/aec/aec_core.h"
-
#include <emmintrin.h>
#include <math.h>
#include <string.h> // memset
+#include "webrtc/modules/audio_processing/aec/aec_common.h"
#include "webrtc/modules/audio_processing/aec/aec_core_internal.h"
#include "webrtc/modules/audio_processing/aec/aec_rdft.h"
@@ -354,9 +353,6 @@ static __m128 mm_pow_ps(__m128 a, __m128 b) {
return a_exp_b;
}
-extern ALIGN16_BEG const float ALIGN16_END WebRtcAec_weightCurve[65];
-extern ALIGN16_BEG const float ALIGN16_END WebRtcAec_overDriveCurve[65];
-
static void OverdriveAndSuppressSSE2(AecCore* aec,
float hNl[PART_LEN1],
const float hNlFb,
diff --git a/modules/audio_processing/aec/aec_rdft.h b/modules/audio_processing/aec/aec_rdft.h
index 94301601..3b339a05 100644
--- a/modules/audio_processing/aec/aec_rdft.h
+++ b/modules/audio_processing/aec/aec_rdft.h
@@ -11,6 +11,8 @@
#ifndef WEBRTC_MODULES_AUDIO_PROCESSING_AEC_MAIN_SOURCE_AEC_RDFT_H_
#define WEBRTC_MODULES_AUDIO_PROCESSING_AEC_MAIN_SOURCE_AEC_RDFT_H_
+#include "webrtc/modules/audio_processing/aec/aec_common.h"
+
// These intrinsics were unavailable before VS 2008.
// TODO(andrew): move to a common file.
#if defined(_MSC_VER) && _MSC_VER < 1500
@@ -19,14 +21,6 @@ static __inline __m128 _mm_castsi128_ps(__m128i a) { return *(__m128*)&a; }
static __inline __m128i _mm_castps_si128(__m128 a) { return *(__m128i*)&a; }
#endif
-#ifdef _MSC_VER /* visual c++ */
-#define ALIGN16_BEG __declspec(align(16))
-#define ALIGN16_END
-#else /* gcc or icc */
-#define ALIGN16_BEG
-#define ALIGN16_END __attribute__((aligned(16)))
-#endif
-
// constants shared by all paths (C, SSE2).
extern float rdft_w[64];
// constants used by the C path.
diff --git a/modules/audio_processing/aec/aec_rdft_neon.c b/modules/audio_processing/aec/aec_rdft_neon.c
index a9c79b7b..43b6a68c 100644
--- a/modules/audio_processing/aec/aec_rdft_neon.c
+++ b/modules/audio_processing/aec/aec_rdft_neon.c
@@ -187,19 +187,18 @@ __inline static float32x4_t reverse_order_f32x4(float32x4_t in) {
static void rftfsub_128_neon(float* a) {
const float* c = rdft_w + 32;
- int j1, j2, k1, k2;
- float wkr, wki, xr, xi, yr, yi;
+ int j1, j2;
const float32x4_t mm_half = vdupq_n_f32(0.5f);
// Vectorized code (four at once).
// Note: commented number are indexes for the first iteration of the loop.
for (j1 = 1, j2 = 2; j2 + 7 < 64; j1 += 4, j2 += 8) {
// Load 'wk'.
- const float32x4_t c_j1 = vld1q_f32(&c[j1]); // 1, 2, 3, 4,
- const float32x4_t c_k1 = vld1q_f32(&c[29 - j1]); // 28, 29, 30, 31,
- const float32x4_t wkrt = vsubq_f32(mm_half, c_k1); // 28, 29, 30, 31,
- const float32x4_t wkr_ = reverse_order_f32x4(wkrt);
- const float32x4_t wki_ = c_j1; // 1, 2, 3, 4,
+ const float32x4_t c_j1 = vld1q_f32(&c[j1]); // 1, 2, 3, 4,
+ const float32x4_t c_k1 = vld1q_f32(&c[29 - j1]); // 28, 29, 30, 31,
+ const float32x4_t wkrt = vsubq_f32(mm_half, c_k1); // 28, 29, 30, 31,
+ const float32x4_t wkr_ = reverse_order_f32x4(wkrt); // 31, 30, 29, 28,
+ const float32x4_t wki_ = c_j1; // 1, 2, 3, 4,
// Load and shuffle 'a'.
// 2, 4, 6, 8, 3, 5, 7, 9
float32x4x2_t a_j2_p = vld2q_f32(&a[0 + j2]);
@@ -250,14 +249,14 @@ static void rftfsub_128_neon(float* a) {
// Scalar code for the remaining items.
for (; j2 < 64; j1 += 1, j2 += 2) {
- k2 = 128 - j2;
- k1 = 32 - j1;
- wkr = 0.5f - c[k1];
- wki = c[j1];
- xr = a[j2 + 0] - a[k2 + 0];
- xi = a[j2 + 1] + a[k2 + 1];
- yr = wkr * xr - wki * xi;
- yi = wkr * xi + wki * xr;
+ const int k2 = 128 - j2;
+ const int k1 = 32 - j1;
+ const float wkr = 0.5f - c[k1];
+ const float wki = c[j1];
+ const float xr = a[j2 + 0] - a[k2 + 0];
+ const float xi = a[j2 + 1] + a[k2 + 1];
+ const float yr = wkr * xr - wki * xi;
+ const float yi = wkr * xi + wki * xr;
a[j2 + 0] -= yr;
a[j2 + 1] -= yi;
a[k2 + 0] += yr;
@@ -265,9 +264,92 @@ static void rftfsub_128_neon(float* a) {
}
}
+static void rftbsub_128_neon(float* a) {
+ const float* c = rdft_w + 32;
+ int j1, j2;
+ const float32x4_t mm_half = vdupq_n_f32(0.5f);
+
+ a[1] = -a[1];
+ // Vectorized code (four at once).
+ // Note: commented number are indexes for the first iteration of the loop.
+ for (j1 = 1, j2 = 2; j2 + 7 < 64; j1 += 4, j2 += 8) {
+ // Load 'wk'.
+ const float32x4_t c_j1 = vld1q_f32(&c[j1]); // 1, 2, 3, 4,
+ const float32x4_t c_k1 = vld1q_f32(&c[29 - j1]); // 28, 29, 30, 31,
+ const float32x4_t wkrt = vsubq_f32(mm_half, c_k1); // 28, 29, 30, 31,
+ const float32x4_t wkr_ = reverse_order_f32x4(wkrt); // 31, 30, 29, 28,
+ const float32x4_t wki_ = c_j1; // 1, 2, 3, 4,
+ // Load and shuffle 'a'.
+ // 2, 4, 6, 8, 3, 5, 7, 9
+ float32x4x2_t a_j2_p = vld2q_f32(&a[0 + j2]);
+ // 120, 122, 124, 126, 121, 123, 125, 127,
+ const float32x4x2_t k2_0_4 = vld2q_f32(&a[122 - j2]);
+ // 126, 124, 122, 120
+ const float32x4_t a_k2_p0 = reverse_order_f32x4(k2_0_4.val[0]);
+ // 127, 125, 123, 121
+ const float32x4_t a_k2_p1 = reverse_order_f32x4(k2_0_4.val[1]);
+ // Calculate 'x'.
+ const float32x4_t xr_ = vsubq_f32(a_j2_p.val[0], a_k2_p0);
+ // 2-126, 4-124, 6-122, 8-120,
+ const float32x4_t xi_ = vaddq_f32(a_j2_p.val[1], a_k2_p1);
+ // 3-127, 5-125, 7-123, 9-121,
+ // Calculate product into 'y'.
+ // yr = wkr * xr - wki * xi;
+ // yi = wkr * xi + wki * xr;
+ const float32x4_t a_ = vmulq_f32(wkr_, xr_);
+ const float32x4_t b_ = vmulq_f32(wki_, xi_);
+ const float32x4_t c_ = vmulq_f32(wkr_, xi_);
+ const float32x4_t d_ = vmulq_f32(wki_, xr_);
+ const float32x4_t yr_ = vaddq_f32(a_, b_); // 2-126, 4-124, 6-122, 8-120,
+ const float32x4_t yi_ = vsubq_f32(c_, d_); // 3-127, 5-125, 7-123, 9-121,
+ // Update 'a'.
+ // a[j2 + 0] -= yr;
+ // a[j2 + 1] -= yi;
+ // a[k2 + 0] += yr;
+ // a[k2 + 1] -= yi;
+ // 126, 124, 122, 120,
+ const float32x4_t a_k2_p0n = vaddq_f32(a_k2_p0, yr_);
+ // 127, 125, 123, 121,
+ const float32x4_t a_k2_p1n = vsubq_f32(yi_, a_k2_p1);
+ // Shuffle in right order and store.
+ // 2, 3, 4, 5, 6, 7, 8, 9,
+ const float32x4_t a_k2_p0nr = vrev64q_f32(a_k2_p0n);
+ const float32x4_t a_k2_p1nr = vrev64q_f32(a_k2_p1n);
+ // 124, 125, 126, 127, 120, 121, 122, 123
+ const float32x4x2_t a_k2_n = vzipq_f32(a_k2_p0nr, a_k2_p1nr);
+ // 2, 4, 6, 8,
+ a_j2_p.val[0] = vsubq_f32(a_j2_p.val[0], yr_);
+ // 3, 5, 7, 9,
+ a_j2_p.val[1] = vsubq_f32(yi_, a_j2_p.val[1]);
+ // 2, 3, 4, 5, 6, 7, 8, 9,
+ vst2q_f32(&a[0 + j2], a_j2_p);
+
+ vst1q_f32(&a[122 - j2], a_k2_n.val[1]);
+ vst1q_f32(&a[126 - j2], a_k2_n.val[0]);
+ }
+
+ // Scalar code for the remaining items.
+ for (; j2 < 64; j1 += 1, j2 += 2) {
+ const int k2 = 128 - j2;
+ const int k1 = 32 - j1;
+ const float wkr = 0.5f - c[k1];
+ const float wki = c[j1];
+ const float xr = a[j2 + 0] - a[k2 + 0];
+ const float xi = a[j2 + 1] + a[k2 + 1];
+ const float yr = wkr * xr + wki * xi;
+ const float yi = wkr * xi - wki * xr;
+ a[j2 + 0] = a[j2 + 0] - yr;
+ a[j2 + 1] = yi - a[j2 + 1];
+ a[k2 + 0] = yr + a[k2 + 0];
+ a[k2 + 1] = yi - a[k2 + 1];
+ }
+ a[65] = -a[65];
+}
+
void aec_rdft_init_neon(void) {
cft1st_128 = cft1st_128_neon;
cftmdl_128 = cftmdl_128_neon;
rftfsub_128 = rftfsub_128_neon;
+ rftbsub_128 = rftbsub_128_neon;
}
diff --git a/modules/audio_processing/agc/digital_agc.c b/modules/audio_processing/agc/digital_agc.c
index 4b169c18..d0f7b10d 100644
--- a/modules/audio_processing/agc/digital_agc.c
+++ b/modules/audio_processing/agc/digital_agc.c
@@ -310,7 +310,7 @@ int32_t WebRtcAgc_ProcessDigital(DigitalAgc_t *stt, const int16_t *in_near,
int32_t gain32, delta;
int16_t logratio;
int16_t lower_thr, upper_thr;
- int16_t zeros, zeros_fast, frac;
+ int16_t zeros = 0, zeros_fast, frac = 0;
int16_t decay;
int16_t gate, gain_adj;
int16_t k, n;
diff --git a/modules/audio_processing/audio_buffer.h b/modules/audio_processing/audio_buffer.h
index 2fab814a..db24e959 100644
--- a/modules/audio_processing/audio_buffer.h
+++ b/modules/audio_processing/audio_buffer.h
@@ -56,6 +56,7 @@ class AudioBuffer {
int samples_per_split_channel() const;
int samples_per_keyboard_channel() const;
+ // It can be assumed that channels are stored contiguously.
int16_t* data(int channel);
const int16_t* data(int channel) const;
int16_t* low_pass_split_data(int channel);
diff --git a/modules/desktop_capture/BUILD.gn b/modules/desktop_capture/BUILD.gn
index be9658f8..55853fb0 100644
--- a/modules/desktop_capture/BUILD.gn
+++ b/modules/desktop_capture/BUILD.gn
@@ -105,6 +105,8 @@ source_set("desktop_capture") {
]
}
+ configs += [ "../../:common_inherited_config"]
+
deps = ["../../system_wrappers"]
if (use_desktop_capture_differ_sse2) {
@@ -121,6 +123,8 @@ if (use_desktop_capture_differ_sse2) {
"differ_block_sse2.h",
]
+ configs += [ "../../:common_inherited_config"]
+
if (is_posix && !is_mac) {
cflags = ["-msse2"]
}
diff --git a/modules/desktop_capture/desktop_capture_options.cc b/modules/desktop_capture/desktop_capture_options.cc
index 105853bf..b7f123b9 100644
--- a/modules/desktop_capture/desktop_capture_options.cc
+++ b/modules/desktop_capture/desktop_capture_options.cc
@@ -35,6 +35,8 @@ DesktopCaptureOptions DesktopCaptureOptions::CreateDefault() {
#endif
#if defined(WEBRTC_MAC) && !defined(WEBRTC_IOS)
result.set_configuration_monitor(new DesktopConfigurationMonitor());
+ result.set_full_screen_chrome_window_detector(
+ new FullScreenChromeWindowDetector());
#endif
return result;
}
diff --git a/modules/interface/module_common_types.h b/modules/interface/module_common_types.h
index e37313c9..398808f7 100644
--- a/modules/interface/module_common_types.h
+++ b/modules/interface/module_common_types.h
@@ -20,11 +20,6 @@
#include "webrtc/common_types.h"
#include "webrtc/typedefs.h"
-#ifdef _WIN32
-// Remove warning "new behavior: elements of array will be default initialized".
-#pragma warning(disable : 4351)
-#endif
-
namespace webrtc {
struct RTPAudioHeader {
@@ -34,21 +29,10 @@ struct RTPAudioHeader {
uint8_t channel; // number of channels 2 = stereo
};
-enum {
- kNoPictureId = -1
-};
-enum {
- kNoTl0PicIdx = -1
-};
-enum {
- kNoTemporalIdx = -1
-};
-enum {
- kNoKeyIdx = -1
-};
-enum {
- kNoSimulcastIdx = 0
-};
+const int16_t kNoPictureId = -1;
+const int16_t kNoTl0PicIdx = -1;
+const uint8_t kNoTemporalIdx = 0xFF;
+const int kNoKeyIdx = -1;
struct RTPVideoHeaderVP8 {
void InitRTPVideoHeaderVP8() {
@@ -67,7 +51,7 @@ struct RTPVideoHeaderVP8 {
// kNoPictureId if PictureID does not exist.
int16_t tl0PicIdx; // TL0PIC_IDX, 8 bits;
// kNoTl0PicIdx means no value provided.
- int8_t temporalIdx; // Temporal layer index, or kNoTemporalIdx.
+ uint8_t temporalIdx; // Temporal layer index, or kNoTemporalIdx.
bool layerSync; // This frame is a layer sync frame.
// Disabled if temporalIdx == kNoTemporalIdx.
int keyIdx; // 5 bits; kNoKeyIdx means not used.
diff --git a/modules/pacing/include/paced_sender.h b/modules/pacing/include/paced_sender.h
index 55497db3..b9151a5f 100644
--- a/modules/pacing/include/paced_sender.h
+++ b/modules/pacing/include/paced_sender.h
@@ -17,7 +17,6 @@
#include "webrtc/modules/interface/module.h"
#include "webrtc/system_wrappers/interface/scoped_ptr.h"
#include "webrtc/system_wrappers/interface/thread_annotations.h"
-#include "webrtc/system_wrappers/interface/tick_util.h"
#include "webrtc/typedefs.h"
namespace webrtc {
@@ -147,8 +146,8 @@ class PacedSender : public Module {
scoped_ptr<paced_sender::IntervalBudget> padding_budget_
GUARDED_BY(critsect_);
- TickTime time_last_update_ GUARDED_BY(critsect_);
- TickTime time_last_send_ GUARDED_BY(critsect_);
+ int64_t time_last_update_ GUARDED_BY(critsect_);
+ int64_t time_last_send_ GUARDED_BY(critsect_);
int64_t capture_time_ms_last_queued_ GUARDED_BY(critsect_);
int64_t capture_time_ms_last_sent_ GUARDED_BY(critsect_);
diff --git a/modules/pacing/paced_sender.cc b/modules/pacing/paced_sender.cc
index 323cafec..52e9cfb4 100644
--- a/modules/pacing/paced_sender.cc
+++ b/modules/pacing/paced_sender.cc
@@ -142,7 +142,7 @@ PacedSender::PacedSender(Clock* clock,
max_queue_length_ms_(kDefaultMaxQueueLengthMs),
media_budget_(new paced_sender::IntervalBudget(max_bitrate_kbps)),
padding_budget_(new paced_sender::IntervalBudget(min_bitrate_kbps)),
- time_last_update_(TickTime::Now()),
+ time_last_update_(clock->TimeInMilliseconds()),
capture_time_ms_last_queued_(0),
capture_time_ms_last_sent_(0),
high_priority_packets_(new paced_sender::PacketList),
@@ -248,8 +248,7 @@ int PacedSender::QueueInMs() const {
int32_t PacedSender::TimeUntilNextProcess() {
CriticalSectionScoped cs(critsect_.get());
- int64_t elapsed_time_ms =
- (TickTime::Now() - time_last_update_).Milliseconds();
+ int64_t elapsed_time_ms = clock_->TimeInMilliseconds() - time_last_update_;
if (elapsed_time_ms <= 0) {
return kMinPacketLimitMs;
}
@@ -260,9 +259,9 @@ int32_t PacedSender::TimeUntilNextProcess() {
}
int32_t PacedSender::Process() {
- TickTime now = TickTime::Now();
+ int64_t now = clock_->TimeInMilliseconds();
CriticalSectionScoped cs(critsect_.get());
- int elapsed_time_ms = (now - time_last_update_).Milliseconds();
+ int elapsed_time_ms = now - time_last_update_;
time_last_update_ = now;
if (!enabled_) {
return 0;
@@ -335,7 +334,7 @@ bool PacedSender::ShouldSendNextPacket(paced_sender::PacketList** packet_list) {
if (media_budget_->bytes_remaining() <= 0) {
// All bytes consumed for this interval.
// Check if we have not sent in a too long time.
- if ((TickTime::Now() - time_last_send_).Milliseconds() >
+ if (clock_->TimeInMilliseconds() - time_last_send_ >
kMaxQueueTimeWithoutSendingMs) {
if (!high_priority_packets_->empty()) {
*packet_list = high_priority_packets_.get();
@@ -389,7 +388,7 @@ paced_sender::Packet PacedSender::GetNextPacketFromList(
// MUST have critsect_ when calling.
void PacedSender::UpdateMediaBytesSent(int num_bytes) {
- time_last_send_ = TickTime::Now();
+ time_last_send_ = clock_->TimeInMilliseconds();
media_budget_->UseBudget(num_bytes);
padding_budget_->UseBudget(num_bytes);
}
diff --git a/modules/pacing/paced_sender_unittest.cc b/modules/pacing/paced_sender_unittest.cc
index 55188558..14dcdbc5 100644
--- a/modules/pacing/paced_sender_unittest.cc
+++ b/modules/pacing/paced_sender_unittest.cc
@@ -58,7 +58,6 @@ class PacedSenderTest : public ::testing::Test {
protected:
PacedSenderTest() : clock_(123456) {
srand(0);
- TickTime::UseFakeClock(123456);
// Need to initialize PacedSender after we initialize clock.
send_bucket_.reset(
new PacedSender(
@@ -99,10 +98,8 @@ TEST_F(PacedSenderTest, QueuePacket) {
EXPECT_EQ(5, send_bucket_->TimeUntilNextProcess());
EXPECT_CALL(callback_, TimeToSendPadding(_)).Times(0);
clock_.AdvanceTimeMilliseconds(4);
- TickTime::AdvanceFakeClock(4);
EXPECT_EQ(1, send_bucket_->TimeUntilNextProcess());
clock_.AdvanceTimeMilliseconds(1);
- TickTime::AdvanceFakeClock(1);
EXPECT_EQ(0, send_bucket_->TimeUntilNextProcess());
EXPECT_CALL(callback_, TimeToSendPacket(
ssrc, sequence_number++, queued_packet_timestamp, false))
@@ -137,7 +134,6 @@ TEST_F(PacedSenderTest, PaceQueuedPackets) {
for (int k = 0; k < 10; ++k) {
EXPECT_EQ(5, send_bucket_->TimeUntilNextProcess());
clock_.AdvanceTimeMilliseconds(5);
- TickTime::AdvanceFakeClock(5);
EXPECT_CALL(callback_,
TimeToSendPacket(ssrc, _, _, false))
.Times(3)
@@ -147,7 +143,6 @@ TEST_F(PacedSenderTest, PaceQueuedPackets) {
}
EXPECT_EQ(5, send_bucket_->TimeUntilNextProcess());
clock_.AdvanceTimeMilliseconds(5);
- TickTime::AdvanceFakeClock(5);
EXPECT_EQ(0, send_bucket_->TimeUntilNextProcess());
EXPECT_EQ(0, send_bucket_->Process());
SendAndExpectPacket(PacedSender::kNormalPriority, ssrc, sequence_number++,
@@ -185,7 +180,6 @@ TEST_F(PacedSenderTest, PaceQueuedPacketsWithDuplicates) {
for (int k = 0; k < 10; ++k) {
EXPECT_EQ(5, send_bucket_->TimeUntilNextProcess());
clock_.AdvanceTimeMilliseconds(5);
- TickTime::AdvanceFakeClock(5);
for (int i = 0; i < 3; ++i) {
EXPECT_CALL(callback_, TimeToSendPacket(ssrc, queued_sequence_number++,
@@ -199,7 +193,6 @@ TEST_F(PacedSenderTest, PaceQueuedPacketsWithDuplicates) {
}
EXPECT_EQ(5, send_bucket_->TimeUntilNextProcess());
clock_.AdvanceTimeMilliseconds(5);
- TickTime::AdvanceFakeClock(5);
EXPECT_EQ(0, send_bucket_->TimeUntilNextProcess());
EXPECT_EQ(0, send_bucket_->Process());
SendAndExpectPacket(PacedSender::kNormalPriority, ssrc, sequence_number++,
@@ -233,7 +226,6 @@ TEST_F(PacedSenderTest, CanQueuePacketsWithSameSequenceNumberOnDifferentSsrcs) {
false);
clock_.AdvanceTimeMilliseconds(1000);
- TickTime::AdvanceFakeClock(1000);
send_bucket_->Process();
}
@@ -253,7 +245,6 @@ TEST_F(PacedSenderTest, Padding) {
EXPECT_CALL(callback_, TimeToSendPadding(_)).Times(0);
EXPECT_EQ(5, send_bucket_->TimeUntilNextProcess());
clock_.AdvanceTimeMilliseconds(5);
- TickTime::AdvanceFakeClock(5);
EXPECT_EQ(0, send_bucket_->TimeUntilNextProcess());
EXPECT_EQ(0, send_bucket_->Process());
@@ -262,7 +253,6 @@ TEST_F(PacedSenderTest, Padding) {
WillOnce(Return(250));
EXPECT_EQ(5, send_bucket_->TimeUntilNextProcess());
clock_.AdvanceTimeMilliseconds(5);
- TickTime::AdvanceFakeClock(5);
EXPECT_EQ(0, send_bucket_->TimeUntilNextProcess());
EXPECT_EQ(0, send_bucket_->Process());
}
@@ -274,13 +264,11 @@ TEST_F(PacedSenderTest, NoPaddingWhenDisabled) {
EXPECT_CALL(callback_, TimeToSendPadding(_)).Times(0);
EXPECT_EQ(5, send_bucket_->TimeUntilNextProcess());
clock_.AdvanceTimeMilliseconds(5);
- TickTime::AdvanceFakeClock(5);
EXPECT_EQ(0, send_bucket_->TimeUntilNextProcess());
EXPECT_EQ(0, send_bucket_->Process());
EXPECT_CALL(callback_, TimeToSendPadding(_)).Times(0);
EXPECT_EQ(5, send_bucket_->TimeUntilNextProcess());
clock_.AdvanceTimeMilliseconds(5);
- TickTime::AdvanceFakeClock(5);
EXPECT_EQ(0, send_bucket_->TimeUntilNextProcess());
EXPECT_EQ(0, send_bucket_->Process());
}
@@ -297,7 +285,6 @@ TEST_F(PacedSenderTest, VerifyPaddingUpToBitrate) {
SendAndExpectPacket(PacedSender::kNormalPriority, ssrc, sequence_number++,
capture_time_ms, 250, false);
clock_.AdvanceTimeMilliseconds(kTimeStep);
- TickTime::AdvanceFakeClock(kTimeStep);
EXPECT_CALL(callback_, TimeToSendPadding(250)).Times(1).
WillOnce(Return(250));
send_bucket_->Process();
@@ -323,7 +310,6 @@ TEST_F(PacedSenderTest, VerifyAverageBitrateVaryingMediaPayload) {
media_payload, false));
media_bytes += media_payload;
clock_.AdvanceTimeMilliseconds(kTimeStep);
- TickTime::AdvanceFakeClock(kTimeStep);
send_bucket_->Process();
}
EXPECT_NEAR(kTargetBitrate, 8 * (media_bytes + callback.padding_sent()) /
@@ -365,7 +351,6 @@ TEST_F(PacedSenderTest, Priority) {
EXPECT_EQ(5, send_bucket_->TimeUntilNextProcess());
clock_.AdvanceTimeMilliseconds(5);
- TickTime::AdvanceFakeClock(5);
EXPECT_EQ(0, send_bucket_->TimeUntilNextProcess());
EXPECT_EQ(0, send_bucket_->Process());
@@ -376,7 +361,6 @@ TEST_F(PacedSenderTest, Priority) {
EXPECT_EQ(5, send_bucket_->TimeUntilNextProcess());
clock_.AdvanceTimeMilliseconds(5);
- TickTime::AdvanceFakeClock(5);
EXPECT_EQ(0, send_bucket_->TimeUntilNextProcess());
EXPECT_EQ(0, send_bucket_->Process());
}
@@ -408,7 +392,6 @@ TEST_F(PacedSenderTest, Pause) {
ssrc, sequence_number++, capture_time_ms, 250, false));
clock_.AdvanceTimeMilliseconds(10000);
- TickTime::AdvanceFakeClock(10000);
int64_t second_capture_time_ms = clock_.TimeInMilliseconds();
// Expect everything to be queued.
@@ -425,7 +408,6 @@ TEST_F(PacedSenderTest, Pause) {
for (int i = 0; i < 10; ++i) {
clock_.AdvanceTimeMilliseconds(5);
- TickTime::AdvanceFakeClock(5);
EXPECT_EQ(0, send_bucket_->TimeUntilNextProcess());
EXPECT_EQ(0, send_bucket_->Process());
}
@@ -438,7 +420,6 @@ TEST_F(PacedSenderTest, Pause) {
EXPECT_EQ(5, send_bucket_->TimeUntilNextProcess());
clock_.AdvanceTimeMilliseconds(5);
- TickTime::AdvanceFakeClock(5);
EXPECT_EQ(0, send_bucket_->TimeUntilNextProcess());
EXPECT_EQ(0, send_bucket_->Process());
@@ -448,7 +429,6 @@ TEST_F(PacedSenderTest, Pause) {
.WillRepeatedly(Return(true));
EXPECT_EQ(5, send_bucket_->TimeUntilNextProcess());
clock_.AdvanceTimeMilliseconds(5);
- TickTime::AdvanceFakeClock(5);
EXPECT_EQ(0, send_bucket_->TimeUntilNextProcess());
EXPECT_EQ(0, send_bucket_->Process());
EXPECT_EQ(0, send_bucket_->QueueInMs());
@@ -467,7 +447,6 @@ TEST_F(PacedSenderTest, ResendPacket) {
250,
false));
clock_.AdvanceTimeMilliseconds(1);
- TickTime::AdvanceFakeClock(1);
EXPECT_FALSE(send_bucket_->SendPacket(PacedSender::kNormalPriority,
ssrc,
sequence_number + 1,
@@ -475,7 +454,6 @@ TEST_F(PacedSenderTest, ResendPacket) {
250,
false));
clock_.AdvanceTimeMilliseconds(9999);
- TickTime::AdvanceFakeClock(9999);
EXPECT_EQ(clock_.TimeInMilliseconds() - capture_time_ms,
send_bucket_->QueueInMs());
// Fails to send first packet so only one call.
@@ -484,7 +462,6 @@ TEST_F(PacedSenderTest, ResendPacket) {
.Times(1)
.WillOnce(Return(false));
clock_.AdvanceTimeMilliseconds(10000);
- TickTime::AdvanceFakeClock(10000);
send_bucket_->Process();
// Queue remains unchanged.
@@ -501,7 +478,6 @@ TEST_F(PacedSenderTest, ResendPacket) {
.Times(1)
.WillOnce(Return(false));
clock_.AdvanceTimeMilliseconds(10000);
- TickTime::AdvanceFakeClock(10000);
send_bucket_->Process();
// Queue is reduced by 1 packet.
@@ -514,7 +490,6 @@ TEST_F(PacedSenderTest, ResendPacket) {
.Times(1)
.WillOnce(Return(true));
clock_.AdvanceTimeMilliseconds(10000);
- TickTime::AdvanceFakeClock(10000);
send_bucket_->Process();
EXPECT_EQ(0, send_bucket_->QueueInMs());
}
@@ -535,7 +510,6 @@ TEST_F(PacedSenderTest, MaxQueueLength) {
}
clock_.AdvanceTimeMilliseconds(2001);
- TickTime::AdvanceFakeClock(2001);
SendAndExpectPacket(PacedSender::kNormalPriority,
ssrc,
sequence_number++,
@@ -546,7 +520,7 @@ TEST_F(PacedSenderTest, MaxQueueLength) {
send_bucket_->Process();
EXPECT_EQ(0, send_bucket_->QueueInMs());
clock_.AdvanceTimeMilliseconds(31);
- TickTime::AdvanceFakeClock(31);
+
send_bucket_->Process();
}
@@ -564,7 +538,6 @@ TEST_F(PacedSenderTest, QueueTimeGrowsOverTime) {
false);
clock_.AdvanceTimeMilliseconds(500);
- TickTime::AdvanceFakeClock(500);
EXPECT_EQ(500, send_bucket_->QueueInMs());
send_bucket_->Process();
EXPECT_EQ(0, send_bucket_->QueueInMs());
diff --git a/modules/remote_bitrate_estimator/BUILD.gn b/modules/remote_bitrate_estimator/BUILD.gn
index 7ee4c8df..cc643dad 100644
--- a/modules/remote_bitrate_estimator/BUILD.gn
+++ b/modules/remote_bitrate_estimator/BUILD.gn
@@ -14,4 +14,6 @@ source_set("remote_bitrate_estimator") {
"remote_rate_control.cc",
"remote_rate_control.h",
]
+
+ configs += [ "../../:common_inherited_config"]
}
diff --git a/modules/rtp_rtcp/interface/rtp_rtcp.h b/modules/rtp_rtcp/interface/rtp_rtcp.h
index 235ca849..7b0a4f8a 100644
--- a/modules/rtp_rtcp/interface/rtp_rtcp.h
+++ b/modules/rtp_rtcp/interface/rtp_rtcp.h
@@ -47,8 +47,8 @@ class RtpRtcp : public Module {
* intra_frame_callback - Called when the receiver request a intra frame.
* bandwidth_callback - Called when we receive a changed estimate from
* the receiver of out stream.
- * audio_messages - Telehone events. May not be NULL; default callback
- * will do nothing.
+ * audio_messages - Telephone events. May not be NULL; default
+ * callback will do nothing.
* remote_bitrate_estimator - Estimates the bandwidth available for a set of
* streams from the same client.
* paced_sender - Spread any bursts of packets into smaller
@@ -68,6 +68,8 @@ class RtpRtcp : public Module {
RemoteBitrateEstimator* remote_bitrate_estimator;
PacedSender* paced_sender;
BitrateStatisticsObserver* send_bitrate_observer;
+ FrameCountObserver* send_frame_count_observer;
+ SendSideDelayObserver* send_side_delay_observer;
};
/*
@@ -340,10 +342,6 @@ class RtpRtcp : public Module {
virtual int TimeToSendPadding(int bytes) = 0;
- virtual void RegisterSendFrameCountObserver(
- FrameCountObserver* observer) = 0;
- virtual FrameCountObserver* GetSendFrameCountObserver() const = 0;
-
virtual bool GetSendSideDelay(int* avg_send_delay_ms,
int* max_send_delay_ms) const = 0;
@@ -381,13 +379,6 @@ class RtpRtcp : public Module {
virtual int32_t SetCNAME(const char cName[RTCP_CNAME_SIZE]) = 0;
/*
- * Get RTCP CName (i.e unique identifier)
- *
- * return -1 on failure else 0
- */
- virtual int32_t CNAME(char cName[RTCP_CNAME_SIZE]) = 0;
-
- /*
* Get remote CName
*
* return -1 on failure else 0
diff --git a/modules/rtp_rtcp/mocks/mock_rtp_rtcp.h b/modules/rtp_rtcp/mocks/mock_rtp_rtcp.h
index fe20c6a3..c954aa20 100644
--- a/modules/rtp_rtcp/mocks/mock_rtp_rtcp.h
+++ b/modules/rtp_rtcp/mocks/mock_rtp_rtcp.h
@@ -135,8 +135,6 @@ class MockRtpRtcp : public RtpRtcp {
int32_t(const RTCPMethod method));
MOCK_METHOD1(SetCNAME,
int32_t(const char cName[RTCP_CNAME_SIZE]));
- MOCK_METHOD1(CNAME,
- int32_t(char cName[RTCP_CNAME_SIZE]));
MOCK_CONST_METHOD2(RemoteCNAME,
int32_t(const uint32_t remoteSSRC,
char cName[RTCP_CNAME_SIZE]));
diff --git a/modules/rtp_rtcp/source/rtcp_format_remb_unittest.cc b/modules/rtp_rtcp/source/rtcp_format_remb_unittest.cc
index 88463e47..0514277f 100644
--- a/modules/rtp_rtcp/source/rtcp_format_remb_unittest.cc
+++ b/modules/rtp_rtcp/source/rtcp_format_remb_unittest.cc
@@ -97,7 +97,6 @@ void RtcpFormatRembTest::SetUp() {
rtcp_receiver_ = new RTCPReceiver(0, system_clock_, dummy_rtp_rtcp_impl_);
test_transport_ = new TestTransport(rtcp_receiver_);
- EXPECT_EQ(0, rtcp_sender_->Init());
EXPECT_EQ(0, rtcp_sender_->RegisterSendTransport(test_transport_));
}
diff --git a/modules/rtp_rtcp/source/rtcp_sender.cc b/modules/rtp_rtcp/source/rtcp_sender.cc
index b9ab0c1e..2cf7e1cb 100644
--- a/modules/rtp_rtcp/source/rtcp_sender.cc
+++ b/modules/rtp_rtcp/source/rtcp_sender.cc
@@ -188,61 +188,6 @@ RTCPSender::~RTCPSender() {
}
int32_t
-RTCPSender::Init()
-{
- CriticalSectionScoped lock(_criticalSectionRTCPSender);
-
- _method = kRtcpOff;
- _cbTransport = NULL;
- _usingNack = false;
- _sending = false;
- _sendTMMBN = false;
- _TMMBR = false;
- _IJ = false;
- _REMB = false;
- _sendREMB = false;
- last_rtp_timestamp_ = 0;
- last_frame_capture_time_ms_ = -1;
- start_timestamp_ = -1;
- _SSRC = 0;
- _remoteSSRC = 0;
- _cameraDelayMS = 0;
- _sequenceNumberFIR = 0;
- _tmmbr_Send = 0;
- _packetOH_Send = 0;
- _nextTimeToSendRTCP = 0;
- _CSRCs = 0;
- _appSend = false;
- _appSubType = 0;
-
- if(_appData)
- {
- delete [] _appData;
- _appData = NULL;
- }
- _appLength = 0;
-
- xrSendReceiverReferenceTimeEnabled_ = false;
-
- _xrSendVoIPMetric = false;
-
- memset(&_xrVoIPMetric, 0, sizeof(_xrVoIPMetric));
- memset(_CNAME, 0, sizeof(_CNAME));
- memset(_lastSendReport, 0, sizeof(_lastSendReport));
- memset(_lastRTCPTime, 0, sizeof(_lastRTCPTime));
- last_xr_rr_.clear();
-
- memset(&packet_type_counter_, 0, sizeof(packet_type_counter_));
- return 0;
-}
-
-void
-RTCPSender::ChangeUniqueId(const int32_t id)
-{
- _id = id;
-}
-
-int32_t
RTCPSender::RegisterSendTransport(Transport* outgoingTransport)
{
CriticalSectionScoped lock(_criticalSectionTransport);
@@ -330,17 +275,17 @@ RTCPSender::SetREMBData(const uint32_t bitrate,
{
CriticalSectionScoped lock(_criticalSectionRTCPSender);
_rembBitrate = bitrate;
-
+
if(_sizeRembSSRC < numberOfSSRC)
{
delete [] _rembSSRC;
_rembSSRC = new uint32_t[numberOfSSRC];
_sizeRembSSRC = numberOfSSRC;
- }
+ }
_lengthRembSSRC = numberOfSSRC;
for (int i = 0; i < numberOfSSRC; i++)
- {
+ {
_rembSSRC[i] = SSRC[i];
}
_sendREMB = true;
@@ -381,6 +326,7 @@ RTCPSender::SetIJStatus(const bool enable)
}
void RTCPSender::SetStartTimestamp(uint32_t start_timestamp) {
+ CriticalSectionScoped lock(_criticalSectionRTCPSender);
start_timestamp_ = start_timestamp;
}
@@ -431,14 +377,6 @@ RTCPSender::SetCameraDelay(const int32_t delayMS)
return 0;
}
-int32_t RTCPSender::CNAME(char cName[RTCP_CNAME_SIZE]) {
- assert(cName);
- CriticalSectionScoped lock(_criticalSectionRTCPSender);
- cName[RTCP_CNAME_SIZE - 1] = 0;
- strncpy(cName, _CNAME, RTCP_CNAME_SIZE - 1);
- return 0;
-}
-
int32_t RTCPSender::SetCNAME(const char cName[RTCP_CNAME_SIZE]) {
if (!cName)
return -1;
@@ -694,13 +632,9 @@ int32_t RTCPSender::BuildSR(const FeedbackState& feedback_state,
// the frame being captured at this moment. We are calculating that
// timestamp as the last frame's timestamp + the time since the last frame
// was captured.
- {
- // Needs protection since this method is called on the process thread.
- CriticalSectionScoped lock(_criticalSectionRTCPSender);
- RTPtime = start_timestamp_ + last_rtp_timestamp_ + (
- _clock->TimeInMilliseconds() - last_frame_capture_time_ms_) *
- (feedback_state.frequency_hz / 1000);
- }
+ RTPtime = start_timestamp_ + last_rtp_timestamp_ +
+ (_clock->TimeInMilliseconds() - last_frame_capture_time_ms_) *
+ (feedback_state.frequency_hz / 1000);
// Add sender data
// Save for our length field
@@ -1175,7 +1109,7 @@ RTCPSender::BuildREMB(uint8_t* rtcpbuffer, int& pos)
rtcpbuffer[pos++]=(uint8_t)(brMantissa >> 8);
rtcpbuffer[pos++]=(uint8_t)(brMantissa);
- for (int i = 0; i < _lengthRembSSRC; i++)
+ for (int i = 0; i < _lengthRembSSRC; i++)
{
RtpUtility::AssignUWord32ToBuffer(rtcpbuffer + pos, _rembSSRC[i]);
pos += 4;
@@ -2110,6 +2044,7 @@ RTCPSender::SendToNetwork(const uint8_t* dataBuffer,
int32_t
RTCPSender::SetCSRCStatus(const bool include)
{
+ CriticalSectionScoped lock(_criticalSectionRTCPSender);
_includeCSRCs = include;
return 0;
}
diff --git a/modules/rtp_rtcp/source/rtcp_sender.h b/modules/rtp_rtcp/source/rtcp_sender.h
index cbbc32aa..fad3b5e3 100644
--- a/modules/rtp_rtcp/source/rtcp_sender.h
+++ b/modules/rtp_rtcp/source/rtcp_sender.h
@@ -23,6 +23,7 @@
#include "webrtc/modules/rtp_rtcp/source/rtp_utility.h"
#include "webrtc/modules/rtp_rtcp/source/tmmbr_help.h"
#include "webrtc/system_wrappers/interface/scoped_ptr.h"
+#include "webrtc/system_wrappers/interface/thread_annotations.h"
#include "webrtc/typedefs.h"
namespace webrtc {
@@ -74,10 +75,6 @@ public:
ReceiveStatistics* receive_statistics);
virtual ~RTCPSender();
- void ChangeUniqueId(const int32_t id);
-
- int32_t Init();
-
int32_t RegisterSendTransport(Transport* outgoingTransport);
RTCPMethod Status() const;
@@ -100,7 +97,6 @@ public:
int32_t SetCameraDelay(const int32_t delayMS);
- int32_t CNAME(char cName[RTCP_CNAME_SIZE]);
int32_t SetCNAME(const char cName[RTCP_CNAME_SIZE]);
int32_t AddMixedCNAME(const uint32_t SSRC,
@@ -185,13 +181,12 @@ public:
private:
int32_t SendToNetwork(const uint8_t* dataBuffer, const uint16_t length);
- void UpdatePacketRate();
-
int32_t WriteAllReportBlocksToBuffer(uint8_t* rtcpbuffer,
int pos,
uint8_t& numberOfReportBlocks,
const uint32_t NTPsec,
- const uint32_t NTPfrac);
+ const uint32_t NTPfrac)
+ EXCLUSIVE_LOCKS_REQUIRED(_criticalSectionRTCPSender);
int32_t WriteReportBlocksToBuffer(
uint8_t* rtcpbuffer,
@@ -212,12 +207,14 @@ private:
uint8_t* rtcpbuffer,
int& pos,
uint32_t NTPsec,
- uint32_t NTPfrac);
+ uint32_t NTPfrac)
+ EXCLUSIVE_LOCKS_REQUIRED(_criticalSectionRTCPSender);
int32_t BuildRR(uint8_t* rtcpbuffer,
int& pos,
const uint32_t NTPsec,
- const uint32_t NTPfrac);
+ const uint32_t NTPfrac)
+ EXCLUSIVE_LOCKS_REQUIRED(_criticalSectionRTCPSender);
int PrepareRTCP(
const FeedbackState& feedback_state,
@@ -234,117 +231,136 @@ private:
int32_t BuildExtendedJitterReport(
uint8_t* rtcpbuffer,
int& pos,
- const uint32_t jitterTransmissionTimeOffset);
-
- int32_t BuildSDEC(uint8_t* rtcpbuffer, int& pos);
- int32_t BuildPLI(uint8_t* rtcpbuffer, int& pos);
- int32_t BuildREMB(uint8_t* rtcpbuffer, int& pos);
- int32_t BuildTMMBR(ModuleRtpRtcpImpl* module,
- uint8_t* rtcpbuffer,
- int& pos);
- int32_t BuildTMMBN(uint8_t* rtcpbuffer, int& pos);
- int32_t BuildAPP(uint8_t* rtcpbuffer, int& pos);
- int32_t BuildVoIPMetric(uint8_t* rtcpbuffer, int& pos);
- int32_t BuildBYE(uint8_t* rtcpbuffer, int& pos);
- int32_t BuildFIR(uint8_t* rtcpbuffer, int& pos, bool repeat);
- int32_t BuildSLI(uint8_t* rtcpbuffer,
- int& pos,
- const uint8_t pictureID);
+ const uint32_t jitterTransmissionTimeOffset)
+ EXCLUSIVE_LOCKS_REQUIRED(_criticalSectionRTCPSender);
+
+ int32_t BuildSDEC(uint8_t* rtcpbuffer, int& pos)
+ EXCLUSIVE_LOCKS_REQUIRED(_criticalSectionRTCPSender);
+ int32_t BuildPLI(uint8_t* rtcpbuffer, int& pos)
+ EXCLUSIVE_LOCKS_REQUIRED(_criticalSectionRTCPSender);
+ int32_t BuildREMB(uint8_t* rtcpbuffer, int& pos)
+ EXCLUSIVE_LOCKS_REQUIRED(_criticalSectionRTCPSender);
+ int32_t BuildTMMBR(ModuleRtpRtcpImpl* module, uint8_t* rtcpbuffer, int& pos)
+ EXCLUSIVE_LOCKS_REQUIRED(_criticalSectionRTCPSender);
+ int32_t BuildTMMBN(uint8_t* rtcpbuffer, int& pos)
+ EXCLUSIVE_LOCKS_REQUIRED(_criticalSectionRTCPSender);
+ int32_t BuildAPP(uint8_t* rtcpbuffer, int& pos)
+ EXCLUSIVE_LOCKS_REQUIRED(_criticalSectionRTCPSender);
+ int32_t BuildVoIPMetric(uint8_t* rtcpbuffer, int& pos)
+ EXCLUSIVE_LOCKS_REQUIRED(_criticalSectionRTCPSender);
+ int32_t BuildBYE(uint8_t* rtcpbuffer, int& pos)
+ EXCLUSIVE_LOCKS_REQUIRED(_criticalSectionRTCPSender);
+ int32_t BuildFIR(uint8_t* rtcpbuffer, int& pos, bool repeat)
+ EXCLUSIVE_LOCKS_REQUIRED(_criticalSectionRTCPSender);
+ int32_t BuildSLI(uint8_t* rtcpbuffer, int& pos, const uint8_t pictureID)
+ EXCLUSIVE_LOCKS_REQUIRED(_criticalSectionRTCPSender);
int32_t BuildRPSI(uint8_t* rtcpbuffer,
int& pos,
const uint64_t pictureID,
- const uint8_t payloadType);
+ const uint8_t payloadType)
+ EXCLUSIVE_LOCKS_REQUIRED(_criticalSectionRTCPSender);
int32_t BuildNACK(uint8_t* rtcpbuffer,
int& pos,
const int32_t nackSize,
const uint16_t* nackList,
- std::string* nackString);
-
+ std::string* nackString)
+ EXCLUSIVE_LOCKS_REQUIRED(_criticalSectionRTCPSender);
int32_t BuildReceiverReferenceTime(uint8_t* buffer,
int& pos,
uint32_t ntp_sec,
- uint32_t ntp_frac);
+ uint32_t ntp_frac)
+ EXCLUSIVE_LOCKS_REQUIRED(_criticalSectionRTCPSender);
int32_t BuildDlrr(uint8_t* buffer,
int& pos,
- const RtcpReceiveTimeInfo& info);
+ const RtcpReceiveTimeInfo& info)
+ EXCLUSIVE_LOCKS_REQUIRED(_criticalSectionRTCPSender);
private:
- int32_t _id;
+ const int32_t _id;
const bool _audio;
- Clock* _clock;
- RTCPMethod _method;
+ Clock* const _clock;
+ RTCPMethod _method GUARDED_BY(_criticalSectionRTCPSender);
CriticalSectionWrapper* _criticalSectionTransport;
- Transport* _cbTransport;
+ Transport* _cbTransport GUARDED_BY(_criticalSectionTransport);
CriticalSectionWrapper* _criticalSectionRTCPSender;
- bool _usingNack;
- bool _sending;
- bool _sendTMMBN;
- bool _REMB;
- bool _sendREMB;
- bool _TMMBR;
- bool _IJ;
-
- int64_t _nextTimeToSendRTCP;
-
- uint32_t start_timestamp_;
- uint32_t last_rtp_timestamp_;
- int64_t last_frame_capture_time_ms_;
- uint32_t _SSRC;
- uint32_t _remoteSSRC; // SSRC that we receive on our RTP channel
- char _CNAME[RTCP_CNAME_SIZE];
-
-
- ReceiveStatistics* receive_statistics_;
- std::map<uint32_t, RTCPReportBlock*> internal_report_blocks_;
- std::map<uint32_t, RTCPReportBlock*> external_report_blocks_;
- std::map<uint32_t, RTCPUtility::RTCPCnameInformation*> _csrcCNAMEs;
-
- int32_t _cameraDelayMS;
+ bool _usingNack GUARDED_BY(_criticalSectionRTCPSender);
+ bool _sending GUARDED_BY(_criticalSectionRTCPSender);
+ bool _sendTMMBN GUARDED_BY(_criticalSectionRTCPSender);
+ bool _REMB GUARDED_BY(_criticalSectionRTCPSender);
+ bool _sendREMB GUARDED_BY(_criticalSectionRTCPSender);
+ bool _TMMBR GUARDED_BY(_criticalSectionRTCPSender);
+ bool _IJ GUARDED_BY(_criticalSectionRTCPSender);
+
+ int64_t _nextTimeToSendRTCP GUARDED_BY(_criticalSectionRTCPSender);
+
+ uint32_t start_timestamp_ GUARDED_BY(_criticalSectionRTCPSender);
+ uint32_t last_rtp_timestamp_ GUARDED_BY(_criticalSectionRTCPSender);
+ int64_t last_frame_capture_time_ms_ GUARDED_BY(_criticalSectionRTCPSender);
+ uint32_t _SSRC GUARDED_BY(_criticalSectionRTCPSender);
+ // SSRC that we receive on our RTP channel
+ uint32_t _remoteSSRC GUARDED_BY(_criticalSectionRTCPSender);
+ char _CNAME[RTCP_CNAME_SIZE] GUARDED_BY(_criticalSectionRTCPSender);
+
+ ReceiveStatistics* receive_statistics_
+ GUARDED_BY(_criticalSectionRTCPSender);
+ std::map<uint32_t, RTCPReportBlock*> internal_report_blocks_
+ GUARDED_BY(_criticalSectionRTCPSender);
+ std::map<uint32_t, RTCPReportBlock*> external_report_blocks_
+ GUARDED_BY(_criticalSectionRTCPSender);
+ std::map<uint32_t, RTCPUtility::RTCPCnameInformation*> _csrcCNAMEs
+ GUARDED_BY(_criticalSectionRTCPSender);
+
+ int32_t _cameraDelayMS GUARDED_BY(_criticalSectionRTCPSender);
// Sent
- uint32_t _lastSendReport[RTCP_NUMBER_OF_SR]; // allow packet loss and RTT above 1 sec
- uint32_t _lastRTCPTime[RTCP_NUMBER_OF_SR];
+ uint32_t _lastSendReport[RTCP_NUMBER_OF_SR] GUARDED_BY(
+ _criticalSectionRTCPSender); // allow packet loss and RTT above 1 sec
+ uint32_t _lastRTCPTime[RTCP_NUMBER_OF_SR] GUARDED_BY(
+ _criticalSectionRTCPSender);
// Sent XR receiver reference time report.
// <mid ntp (mid 32 bits of the 64 bits NTP timestamp), send time in ms>.
- std::map<uint32_t, int64_t> last_xr_rr_;
+ std::map<uint32_t, int64_t> last_xr_rr_
+ GUARDED_BY(_criticalSectionRTCPSender);
// send CSRCs
- uint8_t _CSRCs;
- uint32_t _CSRC[kRtpCsrcSize];
- bool _includeCSRCs;
+ uint8_t _CSRCs GUARDED_BY(_criticalSectionRTCPSender);
+ uint32_t _CSRC[kRtpCsrcSize] GUARDED_BY(_criticalSectionRTCPSender);
+ bool _includeCSRCs GUARDED_BY(_criticalSectionRTCPSender);
// Full intra request
- uint8_t _sequenceNumberFIR;
+ uint8_t _sequenceNumberFIR GUARDED_BY(_criticalSectionRTCPSender);
- // REMB
- uint8_t _lengthRembSSRC;
- uint8_t _sizeRembSSRC;
- uint32_t* _rembSSRC;
- uint32_t _rembBitrate;
+ // REMB
+ uint8_t _lengthRembSSRC GUARDED_BY(_criticalSectionRTCPSender);
+ uint8_t _sizeRembSSRC GUARDED_BY(_criticalSectionRTCPSender);
+ uint32_t* _rembSSRC GUARDED_BY(_criticalSectionRTCPSender);
+ uint32_t _rembBitrate GUARDED_BY(_criticalSectionRTCPSender);
- TMMBRHelp _tmmbrHelp;
- uint32_t _tmmbr_Send;
- uint32_t _packetOH_Send;
+ TMMBRHelp _tmmbrHelp GUARDED_BY(_criticalSectionRTCPSender);
+ uint32_t _tmmbr_Send GUARDED_BY(_criticalSectionRTCPSender);
+ uint32_t _packetOH_Send GUARDED_BY(_criticalSectionRTCPSender);
// APP
- bool _appSend;
- uint8_t _appSubType;
- uint32_t _appName;
- uint8_t* _appData;
- uint16_t _appLength;
+ bool _appSend GUARDED_BY(_criticalSectionRTCPSender);
+ uint8_t _appSubType GUARDED_BY(_criticalSectionRTCPSender);
+ uint32_t _appName GUARDED_BY(_criticalSectionRTCPSender);
+ uint8_t* _appData GUARDED_BY(_criticalSectionRTCPSender);
+ uint16_t _appLength GUARDED_BY(_criticalSectionRTCPSender);
// True if sending of XR Receiver reference time report is enabled.
- bool xrSendReceiverReferenceTimeEnabled_;
+ bool xrSendReceiverReferenceTimeEnabled_
+ GUARDED_BY(_criticalSectionRTCPSender);
// XR VoIP metric
- bool _xrSendVoIPMetric;
- RTCPVoIPMetric _xrVoIPMetric;
+ bool _xrSendVoIPMetric GUARDED_BY(_criticalSectionRTCPSender);
+ RTCPVoIPMetric _xrVoIPMetric GUARDED_BY(_criticalSectionRTCPSender);
- RtcpPacketTypeCounter packet_type_counter_;
+ RtcpPacketTypeCounter packet_type_counter_
+ GUARDED_BY(_criticalSectionRTCPSender);
};
} // namespace webrtc
diff --git a/modules/rtp_rtcp/source/rtcp_sender_unittest.cc b/modules/rtp_rtcp/source/rtcp_sender_unittest.cc
index dfb655c5..cba1c346 100644
--- a/modules/rtp_rtcp/source/rtcp_sender_unittest.cc
+++ b/modules/rtp_rtcp/source/rtcp_sender_unittest.cc
@@ -304,7 +304,6 @@ class RtcpSenderTest : public ::testing::Test {
rtcp_receiver_ = new RTCPReceiver(0, &clock_, rtp_rtcp_impl_);
test_transport_->SetRTCPReceiver(rtcp_receiver_);
// Initialize
- EXPECT_EQ(0, rtcp_sender_->Init());
EXPECT_EQ(0, rtcp_sender_->RegisterSendTransport(test_transport_));
}
~RtcpSenderTest() {
diff --git a/modules/rtp_rtcp/source/rtp_format_vp8.cc b/modules/rtp_rtcp/source/rtp_format_vp8.cc
index 360176b3..9c04c25a 100644
--- a/modules/rtp_rtcp/source/rtp_format_vp8.cc
+++ b/modules/rtp_rtcp/source/rtp_format_vp8.cc
@@ -420,7 +420,7 @@ int RtpFormatVp8::WriteTIDAndKeyIdxFields(uint8_t* x_field,
*data_field = 0;
if (TIDFieldPresent()) {
*x_field |= kTBit;
- assert(hdr_info_.temporalIdx >= 0 && hdr_info_.temporalIdx <= 3);
+ assert(hdr_info_.temporalIdx <= 3);
*data_field |= hdr_info_.temporalIdx << 6;
*data_field |= hdr_info_.layerSync ? kYBit : 0;
}
diff --git a/modules/rtp_rtcp/source/rtp_rtcp_impl.cc b/modules/rtp_rtcp/source/rtp_rtcp_impl.cc
index 855d51b7..349340f5 100644
--- a/modules/rtp_rtcp/source/rtp_rtcp_impl.cc
+++ b/modules/rtp_rtcp/source/rtp_rtcp_impl.cc
@@ -38,20 +38,21 @@ RtpRtcp::Configuration::Configuration()
audio_messages(NullObjectRtpAudioFeedback()),
remote_bitrate_estimator(NULL),
paced_sender(NULL),
- send_bitrate_observer(NULL) {
+ send_bitrate_observer(NULL),
+ send_frame_count_observer(NULL),
+ send_side_delay_observer(NULL) {
}
RtpRtcp* RtpRtcp::CreateRtpRtcp(const RtpRtcp::Configuration& configuration) {
if (configuration.clock) {
return new ModuleRtpRtcpImpl(configuration);
} else {
+ // No clock implementation provided, use default clock.
RtpRtcp::Configuration configuration_copy;
memcpy(&configuration_copy, &configuration,
sizeof(RtpRtcp::Configuration));
configuration_copy.clock = Clock::GetRealTimeClock();
- ModuleRtpRtcpImpl* rtp_rtcp_instance =
- new ModuleRtpRtcpImpl(configuration_copy);
- return rtp_rtcp_instance;
+ return new ModuleRtpRtcpImpl(configuration_copy);
}
}
@@ -62,7 +63,9 @@ ModuleRtpRtcpImpl::ModuleRtpRtcpImpl(const Configuration& configuration)
configuration.outgoing_transport,
configuration.audio_messages,
configuration.paced_sender,
- configuration.send_bitrate_observer),
+ configuration.send_bitrate_observer,
+ configuration.send_frame_count_observer,
+ configuration.send_side_delay_observer),
rtcp_sender_(configuration.id,
configuration.audio,
configuration.clock,
@@ -82,7 +85,7 @@ ModuleRtpRtcpImpl::ModuleRtpRtcpImpl(const Configuration& configuration)
CriticalSectionWrapper::CreateCriticalSection()),
default_module_(
static_cast<ModuleRtpRtcpImpl*>(configuration.default_module)),
- padding_index_(-1), // Start padding at the first child module.
+ padding_index_(static_cast<size_t>(-1)), // Start padding at first child.
nack_method_(kNackOff),
nack_last_time_sent_full_(0),
nack_last_seq_number_sent_(0),
@@ -722,10 +725,6 @@ int32_t ModuleRtpRtcpImpl::SetCNAME(const char c_name[RTCP_CNAME_SIZE]) {
return rtcp_sender_.SetCNAME(c_name);
}
-int32_t ModuleRtpRtcpImpl::CNAME(char c_name[RTCP_CNAME_SIZE]) {
- return rtcp_sender_.CNAME(c_name);
-}
-
int32_t ModuleRtpRtcpImpl::AddMixedCNAME(
const uint32_t ssrc,
const char c_name[RTCP_CNAME_SIZE]) {
@@ -1350,15 +1349,6 @@ StreamDataCountersCallback*
return rtp_sender_.GetRtpStatisticsCallback();
}
-void ModuleRtpRtcpImpl::RegisterSendFrameCountObserver(
- FrameCountObserver* observer) {
- rtp_sender_.RegisterFrameCountObserver(observer);
-}
-
-FrameCountObserver* ModuleRtpRtcpImpl::GetSendFrameCountObserver() const {
- return rtp_sender_.GetFrameCountObserver();
-}
-
bool ModuleRtpRtcpImpl::IsDefaultModule() const {
CriticalSectionScoped cs(critical_section_module_ptrs_.get());
return !child_modules_.empty();
diff --git a/modules/rtp_rtcp/source/rtp_rtcp_impl.h b/modules/rtp_rtcp/source/rtp_rtcp_impl.h
index b65131fb..7e7ea027 100644
--- a/modules/rtp_rtcp/source/rtp_rtcp_impl.h
+++ b/modules/rtp_rtcp/source/rtp_rtcp_impl.h
@@ -148,9 +148,6 @@ class ModuleRtpRtcpImpl : public RtpRtcp {
// Set RTCP CName.
virtual int32_t SetCNAME(const char c_name[RTCP_CNAME_SIZE]) OVERRIDE;
- // Get RTCP CName.
- virtual int32_t CNAME(char c_name[RTCP_CNAME_SIZE]) OVERRIDE;
-
// Get remote CName.
virtual int32_t RemoteCNAME(const uint32_t remote_ssrc,
char c_name[RTCP_CNAME_SIZE]) const OVERRIDE;
@@ -373,10 +370,6 @@ class ModuleRtpRtcpImpl : public RtpRtcp {
void OnRequestSendReport();
- virtual void RegisterSendFrameCountObserver(
- FrameCountObserver* observer) OVERRIDE;
- virtual FrameCountObserver* GetSendFrameCountObserver() const OVERRIDE;
-
protected:
void RegisterChildModule(RtpRtcp* module);
diff --git a/modules/rtp_rtcp/source/rtp_sender.cc b/modules/rtp_rtcp/source/rtp_sender.cc
index 858fc42a..c24b15a3 100644
--- a/modules/rtp_rtcp/source/rtp_sender.cc
+++ b/modules/rtp_rtcp/source/rtp_sender.cc
@@ -46,7 +46,9 @@ RTPSender::RTPSender(const int32_t id,
Transport* transport,
RtpAudioFeedback* audio_feedback,
PacedSender* paced_sender,
- BitrateStatisticsObserver* bitrate_callback)
+ BitrateStatisticsObserver* bitrate_callback,
+ FrameCountObserver* frame_count_observer,
+ SendSideDelayObserver* send_side_delay_observer)
: clock_(clock),
bitrate_sent_(clock, this),
id_(id),
@@ -71,9 +73,10 @@ RTPSender::RTPSender(const int32_t id,
packet_history_(clock),
// Statistics
statistics_crit_(CriticalSectionWrapper::CreateCriticalSection()),
- frame_count_observer_(NULL),
rtp_stats_callback_(NULL),
bitrate_callback_(bitrate_callback),
+ frame_count_observer_(frame_count_observer),
+ send_side_delay_observer_(send_side_delay_observer),
// RTP variables
start_timestamp_forced_(false),
start_timestamp_(0),
@@ -163,9 +166,7 @@ uint32_t RTPSender::NackOverheadRate() const {
bool RTPSender::GetSendSideDelay(int* avg_send_delay_ms,
int* max_send_delay_ms) const {
- if (!SendingMedia())
- return false;
- CriticalSectionScoped cs(statistics_crit_.get());
+ CriticalSectionScoped lock(statistics_crit_.get());
SendDelayMap::const_iterator it = send_delays_.upper_bound(
clock_->TimeInMilliseconds() - kSendSideDelayWindowMs);
if (it == send_delays_.end())
@@ -508,7 +509,7 @@ bool RTPSender::SendPaddingAccordingToBitrate(
last_timestamp_time_ms_ = clock_->TimeInMilliseconds();
}
int bytes_sent = SendPadData(payload_type, timestamp, capture_time_ms,
- bytes, kDontRetransmit, false, false);
+ bytes, false, false);
// We did not manage to send all bytes. Comparing with 31 due to modulus 32.
return bytes - bytes_sent < 31;
}
@@ -532,10 +533,12 @@ int RTPSender::BuildPaddingPacket(uint8_t* packet, int header_length,
return padding_bytes_in_packet;
}
-int RTPSender::SendPadData(int payload_type, uint32_t timestamp,
- int64_t capture_time_ms, int32_t bytes,
- StorageType store, bool force_full_size_packets,
- bool only_pad_after_markerbit) {
+int RTPSender::SendPadData(int payload_type,
+ uint32_t timestamp,
+ int64_t capture_time_ms,
+ int32_t bytes,
+ bool force_full_size_packets,
+ bool over_rtx) {
// Drop this packet if we're not sending media packets.
if (!SendingMedia()) {
return bytes;
@@ -564,7 +567,7 @@ int RTPSender::SendPadData(int payload_type, uint32_t timestamp,
CriticalSectionScoped cs(send_critsect_);
// Only send padding packets following the last packet of a frame,
// indicated by the marker bit.
- if (only_pad_after_markerbit && !last_packet_marker_bit_)
+ if (!over_rtx && !last_packet_marker_bit_)
return bytes_sent;
if (rtx_ == kRtxOff) {
ssrc = ssrc_;
@@ -578,19 +581,35 @@ int RTPSender::SendPadData(int payload_type, uint32_t timestamp,
}
uint8_t padding_packet[IP_PACKET_SIZE];
- int header_length = CreateRTPHeader(padding_packet, payload_type, ssrc,
- false, timestamp, sequence_number, NULL,
+ int header_length = CreateRTPHeader(padding_packet,
+ payload_type,
+ ssrc,
+ false,
+ timestamp,
+ sequence_number,
+ NULL,
0);
- padding_bytes_in_packet = BuildPaddingPacket(padding_packet, header_length,
- bytes);
- if (0 > SendToNetwork(padding_packet, padding_bytes_in_packet,
- header_length, capture_time_ms, store,
- PacedSender::kLowPriority)) {
- // Error sending the packet.
- break;
+ padding_bytes_in_packet =
+ BuildPaddingPacket(padding_packet, header_length, bytes);
+ int length = padding_bytes_in_packet + header_length;
+ int64_t now_ms = clock_->TimeInMilliseconds();
+
+ RtpUtility::RtpHeaderParser rtp_parser(padding_packet, length);
+ RTPHeader rtp_header;
+ rtp_parser.Parse(rtp_header);
+
+ if (capture_time_ms > 0) {
+ UpdateTransmissionTimeOffset(
+ padding_packet, length, rtp_header, now_ms - capture_time_ms);
}
+
+ UpdateAbsoluteSendTime(padding_packet, length, rtp_header, now_ms);
+ if (!SendPacketToNetwork(padding_packet, length))
+ break;
bytes_sent += padding_bytes_in_packet;
+ UpdateRtpStats(padding_packet, length, rtp_header, over_rtx, false);
}
+
return bytes_sent;
}
@@ -918,9 +937,8 @@ int RTPSender::TimeToSendPadding(int bytes) {
timestamp,
capture_time_ms,
bytes,
- kDontStore,
true,
- rtx == kRtxOff);
+ rtx != kRtxOff);
bytes_sent += padding_sent;
}
return bytes_sent;
@@ -979,10 +997,26 @@ int32_t RTPSender::SendToNetwork(
}
void RTPSender::UpdateDelayStatistics(int64_t capture_time_ms, int64_t now_ms) {
- CriticalSectionScoped cs(statistics_crit_.get());
- send_delays_[now_ms] = now_ms - capture_time_ms;
- send_delays_.erase(send_delays_.begin(),
- send_delays_.lower_bound(now_ms - kSendSideDelayWindowMs));
+ uint32_t ssrc;
+ int avg_delay_ms = 0;
+ int max_delay_ms = 0;
+ {
+ CriticalSectionScoped lock(send_critsect_);
+ ssrc = ssrc_;
+ }
+ {
+ CriticalSectionScoped cs(statistics_crit_.get());
+ // TODO(holmer): Compute this iteratively instead.
+ send_delays_[now_ms] = now_ms - capture_time_ms;
+ send_delays_.erase(send_delays_.begin(),
+ send_delays_.lower_bound(now_ms -
+ kSendSideDelayWindowMs));
+ }
+ if (send_side_delay_observer_ &&
+ GetSendSideDelay(&avg_delay_ms, &max_delay_ms)) {
+ send_side_delay_observer_->SendSideDelayUpdated(avg_delay_ms,
+ max_delay_ms, ssrc);
+ }
}
void RTPSender::ProcessBitrate() {
@@ -1035,7 +1069,9 @@ uint32_t RTPSender::Packets() const {
// Number of sent RTP bytes.
uint32_t RTPSender::Bytes() const {
CriticalSectionScoped lock(statistics_crit_.get());
- return rtp_stats_.bytes + rtx_rtp_stats_.bytes;
+ return rtp_stats_.bytes + rtp_stats_.header_bytes + rtp_stats_.padding_bytes +
+ rtx_rtp_stats_.bytes + rtx_rtp_stats_.header_bytes +
+ rtx_rtp_stats_.padding_bytes;
}
int RTPSender::CreateRTPHeader(
@@ -1664,16 +1700,6 @@ void RTPSender::BuildRtxPacket(uint8_t* buffer, uint16_t* length,
*length += 2;
}
-void RTPSender::RegisterFrameCountObserver(FrameCountObserver* observer) {
- CriticalSectionScoped cs(statistics_crit_.get());
- frame_count_observer_ = observer;
-}
-
-FrameCountObserver* RTPSender::GetFrameCountObserver() const {
- CriticalSectionScoped cs(statistics_crit_.get());
- return frame_count_observer_;
-}
-
void RTPSender::RegisterRtpStatisticsCallback(
StreamDataCountersCallback* callback) {
CriticalSectionScoped cs(statistics_crit_.get());
diff --git a/modules/rtp_rtcp/source/rtp_sender.h b/modules/rtp_rtcp/source/rtp_sender.h
index 0cc35cf4..4a9e10ed 100644
--- a/modules/rtp_rtcp/source/rtp_sender.h
+++ b/modules/rtp_rtcp/source/rtp_sender.h
@@ -70,7 +70,9 @@ class RTPSender : public RTPSenderInterface, public Bitrate::Observer {
RTPSender(const int32_t id, const bool audio, Clock *clock,
Transport *transport, RtpAudioFeedback *audio_feedback,
PacedSender *paced_sender,
- BitrateStatisticsObserver* bitrate_callback);
+ BitrateStatisticsObserver* bitrate_callback,
+ FrameCountObserver* frame_count_observer,
+ SendSideDelayObserver* send_side_delay_observer);
virtual ~RTPSender();
void ProcessBitrate();
@@ -265,12 +267,12 @@ class RTPSender : public RTPSenderInterface, public Bitrate::Observer {
int32_t SetFecParameters(const FecProtectionParams *delta_params,
const FecProtectionParams *key_params);
- virtual void RegisterFrameCountObserver(FrameCountObserver* observer);
- virtual FrameCountObserver* GetFrameCountObserver() const;
-
- int SendPadData(int payload_type, uint32_t timestamp, int64_t capture_time_ms,
- int32_t bytes, StorageType store,
- bool force_full_size_packets, bool only_pad_after_markerbit);
+ int SendPadData(int payload_type,
+ uint32_t timestamp,
+ int64_t capture_time_ms,
+ int32_t bytes,
+ bool force_full_size_packets,
+ bool only_pad_after_markerbit);
// Called on update of RTP statistics.
void RegisterRtpStatisticsCallback(StreamDataCountersCallback* callback);
@@ -373,11 +375,12 @@ class RTPSender : public RTPSenderInterface, public Bitrate::Observer {
scoped_ptr<CriticalSectionWrapper> statistics_crit_;
SendDelayMap send_delays_ GUARDED_BY(statistics_crit_);
std::map<FrameType, uint32_t> frame_counts_ GUARDED_BY(statistics_crit_);
- FrameCountObserver* frame_count_observer_ GUARDED_BY(statistics_crit_);
StreamDataCounters rtp_stats_ GUARDED_BY(statistics_crit_);
StreamDataCounters rtx_rtp_stats_ GUARDED_BY(statistics_crit_);
StreamDataCountersCallback* rtp_stats_callback_ GUARDED_BY(statistics_crit_);
BitrateStatisticsObserver* const bitrate_callback_;
+ FrameCountObserver* const frame_count_observer_;
+ SendSideDelayObserver* const send_side_delay_observer_;
// RTP variables
bool start_timestamp_forced_ GUARDED_BY(send_critsect_);
diff --git a/modules/rtp_rtcp/source/rtp_sender_unittest.cc b/modules/rtp_rtcp/source/rtp_sender_unittest.cc
index e08aa202..40b10548 100644
--- a/modules/rtp_rtcp/source/rtp_sender_unittest.cc
+++ b/modules/rtp_rtcp/source/rtp_sender_unittest.cc
@@ -62,13 +62,12 @@ uint64_t ConvertMsToAbsSendTime(int64_t time_ms) {
class LoopbackTransportTest : public webrtc::Transport {
public:
LoopbackTransportTest()
- : packets_sent_(0),
- last_sent_packet_len_(0) {
- }
+ : packets_sent_(0), last_sent_packet_len_(0), total_bytes_sent_(0) {}
virtual int SendPacket(int channel, const void *data, int len) {
packets_sent_++;
memcpy(last_sent_packet_, data, len);
last_sent_packet_len_ = len;
+ total_bytes_sent_ += static_cast<size_t>(len);
return len;
}
virtual int SendRTCPPacket(int channel, const void *data, int len) {
@@ -76,6 +75,7 @@ class LoopbackTransportTest : public webrtc::Transport {
}
int packets_sent_;
int last_sent_packet_len_;
+ size_t total_bytes_sent_;
uint8_t last_sent_packet_[kMaxPacketLength];
};
@@ -94,7 +94,7 @@ class RtpSenderTest : public ::testing::Test {
virtual void SetUp() {
rtp_sender_.reset(new RTPSender(0, false, &fake_clock_, &transport_, NULL,
- &mock_paced_sender_, NULL));
+ &mock_paced_sender_, NULL, NULL, NULL));
rtp_sender_->SetSequenceNumber(kSeqNum);
}
@@ -672,7 +672,7 @@ TEST_F(RtpSenderTest, SendPadding) {
TEST_F(RtpSenderTest, SendRedundantPayloads) {
MockTransport transport;
rtp_sender_.reset(new RTPSender(0, false, &fake_clock_, &transport, NULL,
- &mock_paced_sender_, NULL));
+ &mock_paced_sender_, NULL, NULL, NULL));
rtp_sender_->SetSequenceNumber(kSeqNum);
// Make all packets go through the pacer.
EXPECT_CALL(mock_paced_sender_,
@@ -817,6 +817,9 @@ TEST_F(RtpSenderTest, FrameCountCallbacks) {
uint32_t delta_frames_;
} callback;
+ rtp_sender_.reset(new RTPSender(0, false, &fake_clock_, &transport_, NULL,
+ &mock_paced_sender_, NULL, &callback, NULL));
+
char payload_name[RTP_PAYLOAD_NAME_SIZE] = "GENERIC";
const uint8_t payload_type = 127;
ASSERT_EQ(0, rtp_sender_->RegisterPayload(payload_name, payload_type, 90000,
@@ -825,8 +828,6 @@ TEST_F(RtpSenderTest, FrameCountCallbacks) {
rtp_sender_->SetStorePacketsStatus(true, 1);
uint32_t ssrc = rtp_sender_->SSRC();
- rtp_sender_->RegisterFrameCountObserver(&callback);
-
ASSERT_EQ(0, rtp_sender_->SendOutgoingData(kVideoFrameKey, payload_type, 1234,
4321, payload, sizeof(payload),
NULL));
@@ -845,7 +846,7 @@ TEST_F(RtpSenderTest, FrameCountCallbacks) {
EXPECT_EQ(1U, callback.key_frames_);
EXPECT_EQ(1U, callback.delta_frames_);
- rtp_sender_->RegisterFrameCountObserver(NULL);
+ rtp_sender_.reset();
}
TEST_F(RtpSenderTest, BitrateCallbacks) {
@@ -866,7 +867,7 @@ TEST_F(RtpSenderTest, BitrateCallbacks) {
BitrateStatistics bitrate_;
} callback;
rtp_sender_.reset(new RTPSender(0, false, &fake_clock_, &transport_, NULL,
- &mock_paced_sender_, &callback));
+ &mock_paced_sender_, &callback, NULL, NULL));
// Simulate kNumPackets sent with kPacketInterval ms intervals.
const uint32_t kNumPackets = 15;
@@ -922,7 +923,7 @@ class RtpSenderAudioTest : public RtpSenderTest {
virtual void SetUp() {
payload_ = kAudioPayload;
rtp_sender_.reset(new RTPSender(0, true, &fake_clock_, &transport_, NULL,
- &mock_paced_sender_, NULL));
+ &mock_paced_sender_, NULL, NULL, NULL));
rtp_sender_->SetSequenceNumber(kSeqNum);
}
};
@@ -1070,4 +1071,35 @@ TEST_F(RtpSenderAudioTest, SendAudioWithAudioLevelExtension) {
sizeof(extension)));
}
+TEST_F(RtpSenderTest, BytesReportedCorrectly) {
+ const char* kPayloadName = "GENERIC";
+ const uint8_t kPayloadType = 127;
+ rtp_sender_->SetSSRC(1234);
+ rtp_sender_->SetRtxSsrc(4321);
+ rtp_sender_->SetRtxPayloadType(kPayloadType - 1);
+ rtp_sender_->SetRTXStatus(kRtxRetransmitted | kRtxRedundantPayloads);
+
+ ASSERT_EQ(
+ 0,
+ rtp_sender_->RegisterPayload(kPayloadName, kPayloadType, 90000, 0, 1500));
+ uint8_t payload[] = {47, 11, 32, 93, 89};
+
+ ASSERT_EQ(0,
+ rtp_sender_->SendOutgoingData(kVideoFrameKey,
+ kPayloadType,
+ 1234,
+ 4321,
+ payload,
+ sizeof(payload),
+ 0));
+
+ EXPECT_GT(transport_.total_bytes_sent_, 0u);
+ EXPECT_EQ(transport_.total_bytes_sent_, rtp_sender_->Bytes());
+ size_t last_bytes_sent = transport_.total_bytes_sent_;
+
+ rtp_sender_->TimeToSendPadding(42);
+
+ EXPECT_GT(transport_.total_bytes_sent_, last_bytes_sent);
+ EXPECT_EQ(transport_.total_bytes_sent_, rtp_sender_->Bytes());
+}
} // namespace webrtc
diff --git a/modules/rtp_rtcp/source/rtp_sender_video.cc b/modules/rtp_rtcp/source/rtp_sender_video.cc
index ea5f7a7e..c53dd21d 100644
--- a/modules/rtp_rtcp/source/rtp_sender_video.cc
+++ b/modules/rtp_rtcp/source/rtp_sender_video.cc
@@ -434,17 +434,18 @@ RTPSenderVideo::SendVP8(const FrameType frameType,
if (rtpTypeHdr->VP8.temporalIdx == 0 &&
!(_retransmissionSettings & kRetransmitBaseLayer)) {
storage = kDontRetransmit;
- }
- if (rtpTypeHdr->VP8.temporalIdx > 0 &&
+ } else if (rtpTypeHdr->VP8.temporalIdx != kNoTemporalIdx &&
!(_retransmissionSettings & kRetransmitHigherLayers)) {
storage = kDontRetransmit;
}
bool last = false;
_numberFirstPartition = 0;
- // |rtpTypeHdr->VP8.temporalIdx| is zero for base layers, or -1 if the field
- // isn't used. We currently only protect base layers.
- bool protect = (rtpTypeHdr->VP8.temporalIdx < 1);
+ // |rtpTypeHdr->VP8.temporalIdx| is zero for base layers, or kNoTemporalIdx
+ // if the field isn't used (so all layers are the base layer). We currently
+ // only protect base layers, so look for these two cases.
+ bool protect = rtpTypeHdr->VP8.temporalIdx == 0 ||
+ rtpTypeHdr->VP8.temporalIdx == kNoTemporalIdx;
while (!last)
{
// Write VP8 Payload Descriptor and VP8 payload.
diff --git a/modules/rtp_rtcp/source/tmmbr_help.cc b/modules/rtp_rtcp/source/tmmbr_help.cc
index ecdf6b87..fb1ed625 100644
--- a/modules/rtp_rtcp/source/tmmbr_help.cc
+++ b/modules/rtp_rtcp/source/tmmbr_help.cc
@@ -266,180 +266,177 @@ TMMBRHelp::FindTMMBRBoundingSet(int32_t numCandidates, TMMBRSet& candidateSet)
numBoundingSet++;
}
}
- if (numBoundingSet != 1)
- {
- numBoundingSet = -1;
- }
- } else
+ return (numBoundingSet == 1) ? 1 : -1;
+ }
+
+ // 1. Sort by increasing packetOH
+ for (int i = candidateSet.sizeOfSet() - 1; i >= 0; i--)
{
- // 1. Sort by increasing packetOH
- for (int i = candidateSet.sizeOfSet() - 1; i >= 0; i--)
+ for (int j = 1; j <= i; j++)
{
- for (int j = 1; j <= i; j++)
+ if (candidateSet.PacketOH(j-1) > candidateSet.PacketOH(j))
{
- if (candidateSet.PacketOH(j-1) > candidateSet.PacketOH(j))
- {
- candidateSet.SwapEntries(j-1, j);
- }
+ candidateSet.SwapEntries(j-1, j);
}
}
- // 2. For tuples with same OH, keep the one w/ the lowest bitrate
- for (uint32_t i = 0; i < candidateSet.sizeOfSet(); i++)
+ }
+ // 2. For tuples with same OH, keep the one w/ the lowest bitrate
+ for (uint32_t i = 0; i < candidateSet.sizeOfSet(); i++)
+ {
+ if (candidateSet.Tmmbr(i) > 0)
{
- if (candidateSet.Tmmbr(i) > 0)
+ // get min bitrate for packets w/ same OH
+ uint32_t currentPacketOH = candidateSet.PacketOH(i);
+ uint32_t currentMinTMMBR = candidateSet.Tmmbr(i);
+ uint32_t currentMinIndexTMMBR = i;
+ for (uint32_t j = i+1; j < candidateSet.sizeOfSet(); j++)
{
- // get min bitrate for packets w/ same OH
- uint32_t currentPacketOH = candidateSet.PacketOH(i);
- uint32_t currentMinTMMBR = candidateSet.Tmmbr(i);
- uint32_t currentMinIndexTMMBR = i;
- for (uint32_t j = i+1; j < candidateSet.sizeOfSet(); j++)
+ if(candidateSet.PacketOH(j) == currentPacketOH)
{
- if(candidateSet.PacketOH(j) == currentPacketOH)
+ if(candidateSet.Tmmbr(j) < currentMinTMMBR)
{
- if(candidateSet.Tmmbr(j) < currentMinTMMBR)
- {
- currentMinTMMBR = candidateSet.Tmmbr(j);
- currentMinIndexTMMBR = j;
- }
+ currentMinTMMBR = candidateSet.Tmmbr(j);
+ currentMinIndexTMMBR = j;
}
}
- // keep lowest bitrate
- for (uint32_t j = 0; j < candidateSet.sizeOfSet(); j++)
+ }
+ // keep lowest bitrate
+ for (uint32_t j = 0; j < candidateSet.sizeOfSet(); j++)
+ {
+ if(candidateSet.PacketOH(j) == currentPacketOH
+ && j != currentMinIndexTMMBR)
{
- if(candidateSet.PacketOH(j) == currentPacketOH
- && j != currentMinIndexTMMBR)
- {
- candidateSet.ClearEntry(j);
- }
+ candidateSet.ClearEntry(j);
}
}
}
- // 3. Select and remove tuple w/ lowest tmmbr.
- // (If more than 1, choose the one w/ highest OH).
- uint32_t minTMMBR = 0;
- uint32_t minIndexTMMBR = 0;
- for (uint32_t i = 0; i < candidateSet.sizeOfSet(); i++)
+ }
+ // 3. Select and remove tuple w/ lowest tmmbr.
+ // (If more than 1, choose the one w/ highest OH).
+ uint32_t minTMMBR = 0;
+ uint32_t minIndexTMMBR = 0;
+ for (uint32_t i = 0; i < candidateSet.sizeOfSet(); i++)
+ {
+ if (candidateSet.Tmmbr(i) > 0)
{
- if (candidateSet.Tmmbr(i) > 0)
- {
- minTMMBR = candidateSet.Tmmbr(i);
- minIndexTMMBR = i;
- break;
- }
+ minTMMBR = candidateSet.Tmmbr(i);
+ minIndexTMMBR = i;
+ break;
}
+ }
- for (uint32_t i = 0; i < candidateSet.sizeOfSet(); i++)
+ for (uint32_t i = 0; i < candidateSet.sizeOfSet(); i++)
+ {
+ if (candidateSet.Tmmbr(i) > 0 && candidateSet.Tmmbr(i) <= minTMMBR)
{
- if (candidateSet.Tmmbr(i) > 0 && candidateSet.Tmmbr(i) <= minTMMBR)
- {
- // get min bitrate
- minTMMBR = candidateSet.Tmmbr(i);
- minIndexTMMBR = i;
- }
+ // get min bitrate
+ minTMMBR = candidateSet.Tmmbr(i);
+ minIndexTMMBR = i;
}
- // first member of selected list
- _boundingSet.SetEntry(numBoundingSet,
- candidateSet.Tmmbr(minIndexTMMBR),
- candidateSet.PacketOH(minIndexTMMBR),
- candidateSet.Ssrc(minIndexTMMBR));
-
- // set intersection value
- _ptrIntersectionBoundingSet[numBoundingSet] = 0;
- // calculate its maximum packet rate (where its line crosses x-axis)
- _ptrMaxPRBoundingSet[numBoundingSet]
- = _boundingSet.Tmmbr(numBoundingSet) * 1000
- / float(8 * _boundingSet.PacketOH(numBoundingSet));
- numBoundingSet++;
- // remove from candidate list
- candidateSet.ClearEntry(minIndexTMMBR);
- numCandidates--;
-
- // 4. Discard from candidate list all tuple w/ lower OH
- // (next tuple must be steeper)
- for (uint32_t i = 0; i < candidateSet.sizeOfSet(); i++)
+ }
+ // first member of selected list
+ _boundingSet.SetEntry(numBoundingSet,
+ candidateSet.Tmmbr(minIndexTMMBR),
+ candidateSet.PacketOH(minIndexTMMBR),
+ candidateSet.Ssrc(minIndexTMMBR));
+
+ // set intersection value
+ _ptrIntersectionBoundingSet[numBoundingSet] = 0;
+ // calculate its maximum packet rate (where its line crosses x-axis)
+ _ptrMaxPRBoundingSet[numBoundingSet]
+ = _boundingSet.Tmmbr(numBoundingSet) * 1000
+ / float(8 * _boundingSet.PacketOH(numBoundingSet));
+ numBoundingSet++;
+ // remove from candidate list
+ candidateSet.ClearEntry(minIndexTMMBR);
+ numCandidates--;
+
+ // 4. Discard from candidate list all tuple w/ lower OH
+ // (next tuple must be steeper)
+ for (uint32_t i = 0; i < candidateSet.sizeOfSet(); i++)
+ {
+ if(candidateSet.Tmmbr(i) > 0
+ && candidateSet.PacketOH(i) < _boundingSet.PacketOH(0))
{
- if(candidateSet.Tmmbr(i) > 0
- && candidateSet.PacketOH(i) < _boundingSet.PacketOH(0))
- {
- candidateSet.ClearEntry(i);
- numCandidates--;
- }
+ candidateSet.ClearEntry(i);
+ numCandidates--;
}
+ }
- if (numCandidates == 0)
- {
- // Should be true already:_boundingSet.lengthOfSet = numBoundingSet;
- assert(_boundingSet.lengthOfSet() == numBoundingSet);
- return numBoundingSet;
- }
+ if (numCandidates == 0)
+ {
+ // Should be true already:_boundingSet.lengthOfSet = numBoundingSet;
+ assert(_boundingSet.lengthOfSet() == numBoundingSet);
+ return numBoundingSet;
+ }
- bool getNewCandidate = true;
- int curCandidateTMMBR = 0;
- int curCandidateIndex = 0;
- int curCandidatePacketOH = 0;
- int curCandidateSSRC = 0;
- do
+ bool getNewCandidate = true;
+ int curCandidateTMMBR = 0;
+ int curCandidateIndex = 0;
+ int curCandidatePacketOH = 0;
+ int curCandidateSSRC = 0;
+ do
+ {
+ if (getNewCandidate)
{
- if (getNewCandidate)
+ // 5. Remove first remaining tuple from candidate list
+ for (uint32_t i = 0; i < candidateSet.sizeOfSet(); i++)
{
- // 5. Remove first remaining tuple from candidate list
- for (uint32_t i = 0; i < candidateSet.sizeOfSet(); i++)
+ if (candidateSet.Tmmbr(i) > 0)
{
- if (candidateSet.Tmmbr(i) > 0)
- {
- curCandidateTMMBR = candidateSet.Tmmbr(i);
- curCandidatePacketOH = candidateSet.PacketOH(i);
- curCandidateSSRC = candidateSet.Ssrc(i);
- curCandidateIndex = i;
- candidateSet.ClearEntry(curCandidateIndex);
- break;
- }
+ curCandidateTMMBR = candidateSet.Tmmbr(i);
+ curCandidatePacketOH = candidateSet.PacketOH(i);
+ curCandidateSSRC = candidateSet.Ssrc(i);
+ curCandidateIndex = i;
+ candidateSet.ClearEntry(curCandidateIndex);
+ break;
}
}
+ }
- // 6. Calculate packet rate and intersection of the current
- // line with line of last tuple in selected list
- float packetRate
- = float(curCandidateTMMBR
- - _boundingSet.Tmmbr(numBoundingSet-1))*1000
- / (8*(curCandidatePacketOH
- - _boundingSet.PacketOH(numBoundingSet-1)));
-
- // 7. If the packet rate is equal or lower than intersection of
- // last tuple in selected list,
- // remove last tuple in selected list & go back to step 6
- if(packetRate <= _ptrIntersectionBoundingSet[numBoundingSet-1])
- {
- // remove last tuple and goto step 6
- numBoundingSet--;
- _boundingSet.ClearEntry(numBoundingSet);
- _ptrIntersectionBoundingSet[numBoundingSet] = 0;
- _ptrMaxPRBoundingSet[numBoundingSet] = 0;
- getNewCandidate = false;
- } else
+ // 6. Calculate packet rate and intersection of the current
+ // line with line of last tuple in selected list
+ float packetRate
+ = float(curCandidateTMMBR
+ - _boundingSet.Tmmbr(numBoundingSet-1))*1000
+ / (8*(curCandidatePacketOH
+ - _boundingSet.PacketOH(numBoundingSet-1)));
+
+ // 7. If the packet rate is equal or lower than intersection of
+ // last tuple in selected list,
+ // remove last tuple in selected list & go back to step 6
+ if(packetRate <= _ptrIntersectionBoundingSet[numBoundingSet-1])
+ {
+ // remove last tuple and goto step 6
+ numBoundingSet--;
+ _boundingSet.ClearEntry(numBoundingSet);
+ _ptrIntersectionBoundingSet[numBoundingSet] = 0;
+ _ptrMaxPRBoundingSet[numBoundingSet] = 0;
+ getNewCandidate = false;
+ } else
+ {
+ // 8. If packet rate is lower than maximum packet rate of
+ // last tuple in selected list, add current tuple to selected
+ // list
+ if (packetRate < _ptrMaxPRBoundingSet[numBoundingSet-1])
{
- // 8. If packet rate is lower than maximum packet rate of
- // last tuple in selected list, add current tuple to selected
- // list
- if (packetRate < _ptrMaxPRBoundingSet[numBoundingSet-1])
- {
- _boundingSet.SetEntry(numBoundingSet,
- curCandidateTMMBR,
- curCandidatePacketOH,
- curCandidateSSRC);
- _ptrIntersectionBoundingSet[numBoundingSet] = packetRate;
- _ptrMaxPRBoundingSet[numBoundingSet]
- = _boundingSet.Tmmbr(numBoundingSet)*1000
- / float(8*_boundingSet.PacketOH(numBoundingSet));
- numBoundingSet++;
- }
- numCandidates--;
- getNewCandidate = true;
+ _boundingSet.SetEntry(numBoundingSet,
+ curCandidateTMMBR,
+ curCandidatePacketOH,
+ curCandidateSSRC);
+ _ptrIntersectionBoundingSet[numBoundingSet] = packetRate;
+ _ptrMaxPRBoundingSet[numBoundingSet]
+ = _boundingSet.Tmmbr(numBoundingSet)*1000
+ / float(8*_boundingSet.PacketOH(numBoundingSet));
+ numBoundingSet++;
}
+ numCandidates--;
+ getNewCandidate = true;
+ }
+
+ // 9. Go back to step 5 if any tuple remains in candidate list
+ } while (numCandidates > 0);
- // 9. Go back to step 5 if any tuple remains in candidate list
- } while (numCandidates > 0);
- }
return numBoundingSet;
}
diff --git a/modules/rtp_rtcp/test/testAPI/test_api.cc b/modules/rtp_rtcp/test/testAPI/test_api.cc
index ac2c5ca0..3885eb07 100644
--- a/modules/rtp_rtcp/test/testAPI/test_api.cc
+++ b/modules/rtp_rtcp/test/testAPI/test_api.cc
@@ -99,10 +99,6 @@ TEST_F(RtpRtcpAPITest, RTCP) {
EXPECT_EQ(0, module->SetCNAME("john.doe@test.test"));
- char cName[RTCP_CNAME_SIZE];
- EXPECT_EQ(0, module->CNAME(cName));
- EXPECT_STRCASEEQ(cName, "john.doe@test.test");
-
EXPECT_FALSE(module->TMMBR());
EXPECT_EQ(0, module->SetTMMBRStatus(true));
EXPECT_TRUE(module->TMMBR());
diff --git a/modules/video_capture/android/video_capture_android.cc b/modules/video_capture/android/video_capture_android.cc
index 6f0200e6..4bc14e5a 100644
--- a/modules/video_capture/android/video_capture_android.cc
+++ b/modules/video_capture/android/video_capture_android.cc
@@ -130,6 +130,8 @@ VideoCaptureModule* VideoCaptureImpl::Create(
int32_t VideoCaptureAndroid::OnIncomingFrame(uint8_t* videoFrame,
int32_t videoFrameLength,
int64_t captureTime) {
+ if (!_captureStarted)
+ return 0;
return IncomingFrame(
videoFrame, videoFrameLength, _captureCapability, captureTime);
}
@@ -209,13 +211,16 @@ int32_t VideoCaptureAndroid::StartCapture(
}
int32_t VideoCaptureAndroid::StopCapture() {
- CriticalSectionScoped cs(&_apiCs);
+ _apiCs.Enter();
AttachThreadScoped ats(g_jvm);
JNIEnv* env = ats.env();
memset(&_requestedCapability, 0, sizeof(_requestedCapability));
memset(&_captureCapability, 0, sizeof(_captureCapability));
_captureStarted = false;
+ // Exit critical section to avoid blocking camera thread inside
+ // onIncomingFrame() call.
+ _apiCs.Leave();
jmethodID j_stop =
env->GetMethodID(g_java_capturer_class, "stopCapture", "()Z");
diff --git a/modules/video_coding/main/source/timing.cc b/modules/video_coding/main/source/timing.cc
index af0e35c4..2ec149ce 100644
--- a/modules/video_coding/main/source/timing.cc
+++ b/modules/video_coding/main/source/timing.cc
@@ -58,6 +58,7 @@ void VCMTiming::Reset() {
}
void VCMTiming::ResetDecodeTime() {
+ CriticalSectionScoped lock(crit_sect_);
codec_timer_.Reset();
}
diff --git a/modules/video_coding/main/source/timing.h b/modules/video_coding/main/source/timing.h
index 1dca5e60..116639eb 100644
--- a/modules/video_coding/main/source/timing.h
+++ b/modules/video_coding/main/source/timing.h
@@ -13,6 +13,7 @@
#include "webrtc/modules/video_coding/main/source/codec_timer.h"
#include "webrtc/system_wrappers/interface/critical_section_wrapper.h"
+#include "webrtc/system_wrappers/interface/thread_annotations.h"
#include "webrtc/typedefs.h"
namespace webrtc {
@@ -93,22 +94,24 @@ class VCMTiming {
enum { kDelayMaxChangeMsPerS = 100 };
protected:
- int32_t MaxDecodeTimeMs(FrameType frame_type = kVideoFrameDelta) const;
- int64_t RenderTimeMsInternal(uint32_t frame_timestamp, int64_t now_ms) const;
- uint32_t TargetDelayInternal() const;
+ int32_t MaxDecodeTimeMs(FrameType frame_type = kVideoFrameDelta) const
+ EXCLUSIVE_LOCKS_REQUIRED(crit_sect_);
+ int64_t RenderTimeMsInternal(uint32_t frame_timestamp, int64_t now_ms) const
+ EXCLUSIVE_LOCKS_REQUIRED(crit_sect_);
+ uint32_t TargetDelayInternal() const EXCLUSIVE_LOCKS_REQUIRED(crit_sect_);
private:
CriticalSectionWrapper* crit_sect_;
- Clock* clock_;
- bool master_;
- TimestampExtrapolator* ts_extrapolator_;
- VCMCodecTimer codec_timer_;
- uint32_t render_delay_ms_;
- uint32_t min_playout_delay_ms_;
- uint32_t jitter_delay_ms_;
- uint32_t current_delay_ms_;
- int last_decode_ms_;
- uint32_t prev_frame_timestamp_;
+ Clock* const clock_;
+ bool master_ GUARDED_BY(crit_sect_);
+ TimestampExtrapolator* ts_extrapolator_ GUARDED_BY(crit_sect_);
+ VCMCodecTimer codec_timer_ GUARDED_BY(crit_sect_);
+ uint32_t render_delay_ms_ GUARDED_BY(crit_sect_);
+ uint32_t min_playout_delay_ms_ GUARDED_BY(crit_sect_);
+ uint32_t jitter_delay_ms_ GUARDED_BY(crit_sect_);
+ uint32_t current_delay_ms_ GUARDED_BY(crit_sect_);
+ int last_decode_ms_ GUARDED_BY(crit_sect_);
+ uint32_t prev_frame_timestamp_ GUARDED_BY(crit_sect_);
};
} // namespace webrtc
diff --git a/modules/video_coding/main/source/video_sender_unittest.cc b/modules/video_coding/main/source/video_sender_unittest.cc
index 67b3e7ae..0b8193b2 100644
--- a/modules/video_coding/main/source/video_sender_unittest.cc
+++ b/modules/video_coding/main/source/video_sender_unittest.cc
@@ -141,10 +141,10 @@ class PacketizationCallback : public VCMPacketizationCallback {
int frames = 0;
for (size_t i = 0; i < frame_data_.size(); ++i) {
EXPECT_EQ(kRtpVideoVp8, frame_data_[i].rtp_video_header.codec);
- if (frame_data_[i].rtp_video_header.codecHeader.VP8.temporalIdx <=
- temporal_layer) {
+ const uint8_t temporal_idx =
+ frame_data_[i].rtp_video_header.codecHeader.VP8.temporalIdx;
+ if (temporal_idx <= temporal_layer || temporal_idx == kNoTemporalIdx)
frames++;
- }
}
return frames;
}
@@ -153,10 +153,10 @@ class PacketizationCallback : public VCMPacketizationCallback {
int payload_size = 0;
for (size_t i = 0; i < frame_data_.size(); ++i) {
EXPECT_EQ(kRtpVideoVp8, frame_data_[i].rtp_video_header.codec);
- if (frame_data_[i].rtp_video_header.codecHeader.VP8.temporalIdx <=
- temporal_layer) {
+ const uint8_t temporal_idx =
+ frame_data_[i].rtp_video_header.codecHeader.VP8.temporalIdx;
+ if (temporal_idx <= temporal_layer || temporal_idx == kNoTemporalIdx)
payload_size += frame_data_[i].payload_size;
- }
}
return payload_size;
}