aboutsummaryrefslogtreecommitdiff
path: root/webrtc/modules
diff options
context:
space:
mode:
authorChih-hung Hsieh <chh@google.com>2016-01-20 17:50:13 +0000
committerandroid-build-merger <android-build-merger@google.com>2016-01-20 17:50:13 +0000
commitb3cb8ab4ede8bb77f0bdef2715efc2c1e6267072 (patch)
tree28c4cf735dd5bd9cc8f1ccd06fff8a173b20d1cb /webrtc/modules
parenta4acd9d6bc9b3b033d7d274316e75ee067df8d20 (diff)
parent9a337512d97e37afc142dee4fd50a41b741a87d2 (diff)
downloadwebrtc-b3cb8ab4ede8bb77f0bdef2715efc2c1e6267072.tar.gz
Merge "Merge upstream SHA 04cb763"android-cts_7.1_r1android-cts-7.1_r9android-cts-7.1_r8android-cts-7.1_r7android-cts-7.1_r6android-cts-7.1_r5android-cts-7.1_r4android-cts-7.1_r3android-cts-7.1_r29android-cts-7.1_r28android-cts-7.1_r27android-cts-7.1_r26android-cts-7.1_r25android-cts-7.1_r24android-cts-7.1_r23android-cts-7.1_r22android-cts-7.1_r21android-cts-7.1_r20android-cts-7.1_r2android-cts-7.1_r19android-cts-7.1_r18android-cts-7.1_r17android-cts-7.1_r16android-cts-7.1_r15android-cts-7.1_r14android-cts-7.1_r13android-cts-7.1_r12android-cts-7.1_r11android-cts-7.1_r10android-cts-7.1_r1android-cts-7.0_r9android-cts-7.0_r8android-cts-7.0_r7android-cts-7.0_r6android-cts-7.0_r5android-cts-7.0_r4android-cts-7.0_r33android-cts-7.0_r32android-cts-7.0_r31android-cts-7.0_r30android-cts-7.0_r3android-cts-7.0_r29android-cts-7.0_r28android-cts-7.0_r27android-cts-7.0_r26android-cts-7.0_r25android-cts-7.0_r24android-cts-7.0_r23android-cts-7.0_r22android-cts-7.0_r21android-cts-7.0_r20android-cts-7.0_r2android-cts-7.0_r19android-cts-7.0_r18android-cts-7.0_r17android-cts-7.0_r16android-cts-7.0_r15android-cts-7.0_r14android-cts-7.0_r13android-cts-7.0_r12android-cts-7.0_r11android-cts-7.0_r10android-cts-7.0_r1android-7.1.2_r9android-7.1.2_r8android-7.1.2_r6android-7.1.2_r5android-7.1.2_r4android-7.1.2_r39android-7.1.2_r38android-7.1.2_r37android-7.1.2_r36android-7.1.2_r33android-7.1.2_r32android-7.1.2_r30android-7.1.2_r3android-7.1.2_r29android-7.1.2_r28android-7.1.2_r27android-7.1.2_r25android-7.1.2_r24android-7.1.2_r23android-7.1.2_r2android-7.1.2_r19android-7.1.2_r18android-7.1.2_r17android-7.1.2_r16android-7.1.2_r15android-7.1.2_r14android-7.1.2_r13android-7.1.2_r12android-7.1.2_r11android-7.1.2_r10android-7.1.2_r1android-7.1.1_r9android-7.1.1_r8android-7.1.1_r7android-7.1.1_r61android-7.1.1_r60android-7.1.1_r6android-7.1.1_r59android-7.1.1_r58android-7.1.1_r57android-7.1.1_r56android-7.1.1_r55android-7.1.1_r54android-7.1.1_r53android-7.1.1_r52android-7.1.1_r51android-7.1.1_r50android-7.1.1_r49android-7.1.1_r48android-7.1.1_r47android-7.1.1_r46android-7.1.1_r45android-7.1.1_r44android-7.1.1_r43android-7.1.1_r42android-7.1.1_r41android-7.1.1_r40android-7.1.1_r4android-7.1.1_r39android-7.1.1_r38android-7.1.1_r35android-7.1.1_r33android-7.1.1_r32android-7.1.1_r31android-7.1.1_r3android-7.1.1_r28android-7.1.1_r27android-7.1.1_r26android-7.1.1_r25android-7.1.1_r24android-7.1.1_r23android-7.1.1_r22android-7.1.1_r21android-7.1.1_r20android-7.1.1_r2android-7.1.1_r17android-7.1.1_r16android-7.1.1_r15android-7.1.1_r14android-7.1.1_r13android-7.1.1_r12android-7.1.1_r11android-7.1.1_r10android-7.1.1_r1android-7.1.0_r7android-7.1.0_r6android-7.1.0_r5android-7.1.0_r4android-7.1.0_r3android-7.1.0_r2android-7.1.0_r1android-7.0.0_r9android-7.0.0_r8android-7.0.0_r7android-7.0.0_r6android-7.0.0_r5android-7.0.0_r4android-7.0.0_r36android-7.0.0_r35android-7.0.0_r34android-7.0.0_r33android-7.0.0_r32android-7.0.0_r31android-7.0.0_r30android-7.0.0_r3android-7.0.0_r29android-7.0.0_r28android-7.0.0_r27android-7.0.0_r24android-7.0.0_r21android-7.0.0_r19android-7.0.0_r17android-7.0.0_r15android-7.0.0_r14android-7.0.0_r13android-7.0.0_r12android-7.0.0_r11android-7.0.0_r10android-7.0.0_r1nougat-releasenougat-mr2.3-releasenougat-mr2.2-releasenougat-mr2.1-releasenougat-mr2-security-releasenougat-mr2-releasenougat-mr2-pixel-releasenougat-mr2-devnougat-mr1.8-releasenougat-mr1.7-releasenougat-mr1.6-releasenougat-mr1.5-releasenougat-mr1.4-releasenougat-mr1.3-releasenougat-mr1.2-releasenougat-mr1.1-releasenougat-mr1-volantis-releasenougat-mr1-security-releasenougat-mr1-releasenougat-mr1-flounder-releasenougat-mr1-devnougat-mr1-cts-releasenougat-mr0.5-releasenougat-dr1-releasenougat-devnougat-cts-releasenougat-bugfix-release
am: 9a337512d9 * commit '9a337512d97e37afc142dee4fd50a41b741a87d2': (797 commits) Add tests for verifying transport feedback for audio and video. Eliminate defines in talk/ Revert of Update with new default boringssl no-aes cipher suites. Re-enable tests. (patchset #3 id:40001 of https://codereview.webrtc.org/1550773002/ ) Remove assert which was incorrectly added to TcpPort::OnSentPacket. Reland Connect TurnPort and TCPPort to AsyncPacketSocket::SignalSentPacket. Update with new default boringssl no-aes cipher suites. Re-enable tests. Revert of Connect TurnPort and TCPPort to AsyncPacketSocket::SignalSentPacket. (patchset #3 id:40001 of https://codereview.webrtc.org/1577873003/ ) Re-land: "Use an explicit identifier in Config" Connect TurnPort and TCPPort to AsyncPacketSocket::SignalSentPacket. Revert of Delete remnants of non-square pixel support from cricket::VideoFrame. (patchset #1 id:1 of https://codereview.webrtc.org/1586613002/ ) Remove libfuzzer trybot from default trybot set. Add ramp-up tests for transport sequence number with and w/o audio. Delete remnants of non-square pixel support from cricket::VideoFrame. Fix IPAddress::ToSensitiveString() to avoid dependency on inet_ntop(). Revert of Storing raw audio sink for default audio track. (patchset #7 id:120001 of https://codereview.chromium.org/1551813002/ ) Re-enable tests that failed under Linux_Msan. Revert of Use an explicit identifier in Config (patchset #4 id:60001 of https://codereview.webrtc.org/1538643004/ ) Roll chromium_revision 346fea9..099be58 (369082:369139) Disable WebRtcVideoChannel2BaseTest.SendManyResizeOnce for TSan Add build_protobuf variable. ...
Diffstat (limited to 'webrtc/modules')
-rw-r--r--webrtc/modules/audio_coding/BUILD.gn93
-rw-r--r--webrtc/modules/audio_coding/OWNERS2
-rw-r--r--webrtc/modules/audio_coding/acm2/acm_codec_database.cc (renamed from webrtc/modules/audio_coding/main/acm2/acm_codec_database.cc)49
-rw-r--r--webrtc/modules/audio_coding/acm2/acm_codec_database.h (renamed from webrtc/modules/audio_coding/main/acm2/acm_codec_database.h)44
-rw-r--r--webrtc/modules/audio_coding/acm2/acm_common_defs.h (renamed from webrtc/modules/audio_coding/main/acm2/acm_common_defs.h)6
-rw-r--r--webrtc/modules/audio_coding/acm2/acm_neteq_unittest.cc (renamed from webrtc/modules/audio_coding/main/acm2/acm_neteq_unittest.cc)0
-rw-r--r--webrtc/modules/audio_coding/acm2/acm_receive_test_oldapi.cc (renamed from webrtc/modules/audio_coding/main/acm2/acm_receive_test_oldapi.cc)11
-rw-r--r--webrtc/modules/audio_coding/acm2/acm_receive_test_oldapi.h (renamed from webrtc/modules/audio_coding/main/acm2/acm_receive_test_oldapi.h)11
-rw-r--r--webrtc/modules/audio_coding/acm2/acm_receiver.cc (renamed from webrtc/modules/audio_coding/main/acm2/acm_receiver.cc)310
-rw-r--r--webrtc/modules/audio_coding/acm2/acm_receiver.h (renamed from webrtc/modules/audio_coding/main/acm2/acm_receiver.h)93
-rw-r--r--webrtc/modules/audio_coding/acm2/acm_receiver_unittest_oldapi.cc (renamed from webrtc/modules/audio_coding/main/acm2/acm_receiver_unittest_oldapi.cc)104
-rw-r--r--webrtc/modules/audio_coding/acm2/acm_resampler.cc (renamed from webrtc/modules/audio_coding/main/acm2/acm_resampler.cc)23
-rw-r--r--webrtc/modules/audio_coding/acm2/acm_resampler.h (renamed from webrtc/modules/audio_coding/main/acm2/acm_resampler.h)8
-rw-r--r--webrtc/modules/audio_coding/acm2/acm_send_test_oldapi.cc (renamed from webrtc/modules/audio_coding/main/acm2/acm_send_test_oldapi.cc)4
-rw-r--r--webrtc/modules/audio_coding/acm2/acm_send_test_oldapi.h (renamed from webrtc/modules/audio_coding/main/acm2/acm_send_test_oldapi.h)8
-rw-r--r--webrtc/modules/audio_coding/acm2/audio_coding_module.cc (renamed from webrtc/modules/audio_coding/main/acm2/audio_coding_module.cc)19
-rw-r--r--webrtc/modules/audio_coding/acm2/audio_coding_module_impl.cc (renamed from webrtc/modules/audio_coding/main/acm2/audio_coding_module_impl.cc)208
-rw-r--r--webrtc/modules/audio_coding/acm2/audio_coding_module_impl.h (renamed from webrtc/modules/audio_coding/main/acm2/audio_coding_module_impl.h)26
-rw-r--r--webrtc/modules/audio_coding/acm2/audio_coding_module_unittest_oldapi.cc (renamed from webrtc/modules/audio_coding/main/acm2/audio_coding_module_unittest_oldapi.cc)457
-rw-r--r--webrtc/modules/audio_coding/acm2/call_statistics.cc (renamed from webrtc/modules/audio_coding/main/acm2/call_statistics.cc)2
-rw-r--r--webrtc/modules/audio_coding/acm2/call_statistics.h (renamed from webrtc/modules/audio_coding/main/acm2/call_statistics.h)8
-rw-r--r--webrtc/modules/audio_coding/acm2/call_statistics_unittest.cc (renamed from webrtc/modules/audio_coding/main/acm2/call_statistics_unittest.cc)2
-rw-r--r--webrtc/modules/audio_coding/acm2/codec_manager.cc194
-rw-r--r--webrtc/modules/audio_coding/acm2/codec_manager.h66
-rw-r--r--webrtc/modules/audio_coding/acm2/codec_manager_unittest.cc73
-rw-r--r--webrtc/modules/audio_coding/acm2/initial_delay_manager.cc (renamed from webrtc/modules/audio_coding/main/acm2/initial_delay_manager.cc)2
-rw-r--r--webrtc/modules/audio_coding/acm2/initial_delay_manager.h (renamed from webrtc/modules/audio_coding/main/acm2/initial_delay_manager.h)8
-rw-r--r--webrtc/modules/audio_coding/acm2/initial_delay_manager_unittest.cc (renamed from webrtc/modules/audio_coding/main/acm2/initial_delay_manager_unittest.cc)2
-rw-r--r--webrtc/modules/audio_coding/acm2/rent_a_codec.cc307
-rw-r--r--webrtc/modules/audio_coding/acm2/rent_a_codec.h249
-rw-r--r--webrtc/modules/audio_coding/acm2/rent_a_codec_unittest.cc (renamed from webrtc/modules/audio_coding/main/acm2/codec_owner_unittest.cc)166
-rw-r--r--webrtc/modules/audio_coding/audio_coding.gypi185
-rw-r--r--webrtc/modules/audio_coding/codecs/audio_decoder.cc9
-rw-r--r--webrtc/modules/audio_coding/codecs/audio_decoder.h28
-rw-r--r--webrtc/modules/audio_coding/codecs/audio_encoder.cc17
-rw-r--r--webrtc/modules/audio_coding/codecs/audio_encoder.h10
-rw-r--r--webrtc/modules/audio_coding/codecs/cng/audio_encoder_cng.cc20
-rw-r--r--webrtc/modules/audio_coding/codecs/cng/audio_encoder_cng.h (renamed from webrtc/modules/audio_coding/codecs/cng/include/audio_encoder_cng.h)14
-rw-r--r--webrtc/modules/audio_coding/codecs/cng/audio_encoder_cng_unittest.cc8
-rw-r--r--webrtc/modules/audio_coding/codecs/cng/cng.gypi16
-rw-r--r--webrtc/modules/audio_coding/codecs/cng/webrtc_cng.h (renamed from webrtc/modules/audio_coding/codecs/cng/include/webrtc_cng.h)8
-rw-r--r--webrtc/modules/audio_coding/codecs/g711/audio_decoder_pcm.cc4
-rw-r--r--webrtc/modules/audio_coding/codecs/g711/audio_decoder_pcm.h (renamed from webrtc/modules/audio_coding/codecs/g711/include/audio_decoder_pcm.h)6
-rw-r--r--webrtc/modules/audio_coding/codecs/g711/audio_encoder_pcm.cc34
-rw-r--r--webrtc/modules/audio_coding/codecs/g711/audio_encoder_pcm.h (renamed from webrtc/modules/audio_coding/codecs/g711/include/audio_encoder_pcm.h)20
-rw-r--r--webrtc/modules/audio_coding/codecs/g711/g711.gypi20
-rw-r--r--webrtc/modules/audio_coding/codecs/g711/g711_interface.h (renamed from webrtc/modules/audio_coding/codecs/g711/include/g711_interface.h)6
-rw-r--r--webrtc/modules/audio_coding/codecs/g711/test/testG711.cc2
-rw-r--r--webrtc/modules/audio_coding/codecs/g722/audio_decoder_g722.cc4
-rw-r--r--webrtc/modules/audio_coding/codecs/g722/audio_decoder_g722.h (renamed from webrtc/modules/audio_coding/codecs/g722/include/audio_decoder_g722.h)6
-rw-r--r--webrtc/modules/audio_coding/codecs/g722/audio_encoder_g722.cc22
-rw-r--r--webrtc/modules/audio_coding/codecs/g722/audio_encoder_g722.h (renamed from webrtc/modules/audio_coding/codecs/g722/include/audio_encoder_g722.h)16
-rw-r--r--webrtc/modules/audio_coding/codecs/g722/g722.gypi18
-rw-r--r--webrtc/modules/audio_coding/codecs/g722/g722_interface.h (renamed from webrtc/modules/audio_coding/codecs/g722/include/g722_interface.h)6
-rw-r--r--webrtc/modules/audio_coding/codecs/g722/test/testG722.cc2
-rw-r--r--webrtc/modules/audio_coding/codecs/ilbc/audio_decoder_ilbc.cc4
-rw-r--r--webrtc/modules/audio_coding/codecs/ilbc/audio_decoder_ilbc.h (renamed from webrtc/modules/audio_coding/codecs/ilbc/include/audio_decoder_ilbc.h)6
-rw-r--r--webrtc/modules/audio_coding/codecs/ilbc/audio_encoder_ilbc.cc16
-rw-r--r--webrtc/modules/audio_coding/codecs/ilbc/audio_encoder_ilbc.h (renamed from webrtc/modules/audio_coding/codecs/ilbc/include/audio_encoder_ilbc.h)12
-rw-r--r--webrtc/modules/audio_coding/codecs/ilbc/ilbc.gypi16
-rw-r--r--webrtc/modules/audio_coding/codecs/ilbc/ilbc.h (renamed from webrtc/modules/audio_coding/codecs/ilbc/include/ilbc.h)20
-rw-r--r--webrtc/modules/audio_coding/codecs/ilbc/test/iLBC_test.c2
-rw-r--r--webrtc/modules/audio_coding/codecs/ilbc/test/iLBC_testLib.c2
-rw-r--r--webrtc/modules/audio_coding/codecs/ilbc/test/iLBC_testprogram.c14
-rw-r--r--webrtc/modules/audio_coding/codecs/isac/audio_encoder_isac_t.h4
-rw-r--r--webrtc/modules/audio_coding/codecs/isac/audio_encoder_isac_t_impl.h6
-rw-r--r--webrtc/modules/audio_coding/codecs/isac/fix/test/isac_speed_test.cc2
-rw-r--r--webrtc/modules/audio_coding/codecs/isac/fix/test/test_iSACfixfloat.c16
-rw-r--r--webrtc/modules/audio_coding/codecs/isac/isac_test.gypi14
-rw-r--r--webrtc/modules/audio_coding/codecs/isac/isacfix.gypi5
-rw-r--r--webrtc/modules/audio_coding/codecs/isac/main/test/ReleaseTest-API/ReleaseTest-API.cc12
-rw-r--r--webrtc/modules/audio_coding/codecs/mock/mock_audio_encoder.h4
-rw-r--r--webrtc/modules/audio_coding/codecs/opus/audio_decoder_opus.cc4
-rw-r--r--webrtc/modules/audio_coding/codecs/opus/audio_decoder_opus.h (renamed from webrtc/modules/audio_coding/codecs/opus/include/audio_decoder_opus.h)8
-rw-r--r--webrtc/modules/audio_coding/codecs/opus/audio_encoder_opus.cc28
-rw-r--r--webrtc/modules/audio_coding/codecs/opus/audio_encoder_opus.h (renamed from webrtc/modules/audio_coding/codecs/opus/include/audio_encoder_opus.h)18
-rw-r--r--webrtc/modules/audio_coding/codecs/opus/audio_encoder_opus_unittest.cc2
-rw-r--r--webrtc/modules/audio_coding/codecs/opus/opus.gypi12
-rw-r--r--webrtc/modules/audio_coding/codecs/opus/opus_fec_test.cc9
-rw-r--r--webrtc/modules/audio_coding/codecs/opus/opus_inst.h11
-rw-r--r--webrtc/modules/audio_coding/codecs/opus/opus_interface.c115
-rw-r--r--webrtc/modules/audio_coding/codecs/opus/opus_interface.h (renamed from webrtc/modules/audio_coding/codecs/opus/include/opus_interface.h)12
-rw-r--r--webrtc/modules/audio_coding/codecs/opus/opus_speed_test.cc4
-rw-r--r--webrtc/modules/audio_coding/codecs/opus/opus_unittest.cc190
-rw-r--r--webrtc/modules/audio_coding/codecs/pcm16b/audio_decoder_pcm16b.cc4
-rw-r--r--webrtc/modules/audio_coding/codecs/pcm16b/audio_decoder_pcm16b.h (renamed from webrtc/modules/audio_coding/codecs/pcm16b/include/audio_decoder_pcm16b.h)6
-rw-r--r--webrtc/modules/audio_coding/codecs/pcm16b/audio_encoder_pcm16b.cc6
-rw-r--r--webrtc/modules/audio_coding/codecs/pcm16b/audio_encoder_pcm16b.h (renamed from webrtc/modules/audio_coding/codecs/pcm16b/include/audio_encoder_pcm16b.h)12
-rw-r--r--webrtc/modules/audio_coding/codecs/pcm16b/pcm16b.gypi16
-rw-r--r--webrtc/modules/audio_coding/codecs/pcm16b/pcm16b.h (renamed from webrtc/modules/audio_coding/codecs/pcm16b/include/pcm16b.h)6
-rw-r--r--webrtc/modules/audio_coding/codecs/red/audio_encoder_copy_red.cc9
-rw-r--r--webrtc/modules/audio_coding/codecs/red/audio_encoder_copy_red.h4
-rw-r--r--webrtc/modules/audio_coding/codecs/red/audio_encoder_copy_red_unittest.cc14
-rw-r--r--webrtc/modules/audio_coding/codecs/tools/audio_codec_speed_test.cc12
-rw-r--r--webrtc/modules/audio_coding/codecs/tools/audio_codec_speed_test.h9
-rw-r--r--webrtc/modules/audio_coding/include/audio_coding_module.h (renamed from webrtc/modules/audio_coding/main/include/audio_coding_module.h)54
-rw-r--r--webrtc/modules/audio_coding/include/audio_coding_module_typedefs.h (renamed from webrtc/modules/audio_coding/main/include/audio_coding_module_typedefs.h)8
-rw-r--r--webrtc/modules/audio_coding/main/acm2/OWNERS5
-rw-r--r--webrtc/modules/audio_coding/main/acm2/codec_manager.cc465
-rw-r--r--webrtc/modules/audio_coding/main/acm2/codec_manager.h90
-rw-r--r--webrtc/modules/audio_coding/main/acm2/codec_owner.cc213
-rw-r--r--webrtc/modules/audio_coding/main/acm2/codec_owner.h86
-rw-r--r--webrtc/modules/audio_coding/main/acm2/rent_a_codec.cc70
-rw-r--r--webrtc/modules/audio_coding/main/acm2/rent_a_codec.h162
-rw-r--r--webrtc/modules/audio_coding/main/audio_coding_module.gypi193
-rw-r--r--webrtc/modules/audio_coding/main/test/initial_delay_unittest.cc175
-rw-r--r--webrtc/modules/audio_coding/neteq/audio_decoder_impl.cc12
-rw-r--r--webrtc/modules/audio_coding/neteq/audio_decoder_impl.h6
-rw-r--r--webrtc/modules/audio_coding/neteq/audio_decoder_unittest.cc37
-rw-r--r--webrtc/modules/audio_coding/neteq/comfort_noise.cc2
-rw-r--r--webrtc/modules/audio_coding/neteq/decision_logic.cc3
-rw-r--r--webrtc/modules/audio_coding/neteq/decision_logic_normal.cc2
-rw-r--r--webrtc/modules/audio_coding/neteq/decoder_database.cc14
-rw-r--r--webrtc/modules/audio_coding/neteq/decoder_database.h44
-rw-r--r--webrtc/modules/audio_coding/neteq/decoder_database_unittest.cc56
-rw-r--r--webrtc/modules/audio_coding/neteq/delay_manager.cc2
-rw-r--r--webrtc/modules/audio_coding/neteq/expand.cc2
-rw-r--r--webrtc/modules/audio_coding/neteq/include/neteq.h27
-rw-r--r--webrtc/modules/audio_coding/neteq/mock/mock_audio_decoder.h5
-rw-r--r--webrtc/modules/audio_coding/neteq/mock/mock_decoder_database.h12
-rw-r--r--webrtc/modules/audio_coding/neteq/mock/mock_external_decoder_pcm16b.h10
-rw-r--r--webrtc/modules/audio_coding/neteq/nack.cc2
-rw-r--r--webrtc/modules/audio_coding/neteq/nack.h2
-rw-r--r--webrtc/modules/audio_coding/neteq/nack_unittest.cc2
-rw-r--r--webrtc/modules/audio_coding/neteq/neteq.cc2
-rw-r--r--webrtc/modules/audio_coding/neteq/neteq_external_decoder_unittest.cc44
-rw-r--r--webrtc/modules/audio_coding/neteq/neteq_impl.cc100
-rw-r--r--webrtc/modules/audio_coding/neteq/neteq_impl.h24
-rw-r--r--webrtc/modules/audio_coding/neteq/neteq_impl_unittest.cc237
-rw-r--r--webrtc/modules/audio_coding/neteq/neteq_network_stats_unittest.cc3
-rw-r--r--webrtc/modules/audio_coding/neteq/neteq_stereo_unittest.cc52
-rw-r--r--webrtc/modules/audio_coding/neteq/neteq_tests.gypi17
-rw-r--r--webrtc/modules/audio_coding/neteq/neteq_unittest.cc470
-rw-r--r--webrtc/modules/audio_coding/neteq/neteq_unittest.proto29
-rw-r--r--webrtc/modules/audio_coding/neteq/normal.cc2
-rw-r--r--webrtc/modules/audio_coding/neteq/packet.h2
-rw-r--r--webrtc/modules/audio_coding/neteq/payload_splitter_unittest.cc14
-rw-r--r--webrtc/modules/audio_coding/neteq/rtcp.cc2
-rw-r--r--webrtc/modules/audio_coding/neteq/statistics_calculator.cc8
-rw-r--r--webrtc/modules/audio_coding/neteq/test/NETEQTEST_RTPpacket.h2
-rw-r--r--webrtc/modules/audio_coding/neteq/test/PayloadTypes.h2
-rw-r--r--webrtc/modules/audio_coding/neteq/test/RTPencode.cc63
-rw-r--r--webrtc/modules/audio_coding/neteq/test/neteq_ilbc_quality_test.cc10
-rw-r--r--webrtc/modules/audio_coding/neteq/test/neteq_isac_quality_test.cc2
-rw-r--r--webrtc/modules/audio_coding/neteq/test/neteq_opus_quality_test.cc2
-rw-r--r--webrtc/modules/audio_coding/neteq/test/neteq_pcmu_quality_test.cc10
-rw-r--r--webrtc/modules/audio_coding/neteq/timestamp_scaler.cc14
-rw-r--r--webrtc/modules/audio_coding/neteq/tools/audio_loop.cc7
-rw-r--r--webrtc/modules/audio_coding/neteq/tools/audio_loop.h8
-rw-r--r--webrtc/modules/audio_coding/neteq/tools/audio_sink.h2
-rw-r--r--webrtc/modules/audio_coding/neteq/tools/constant_pcm_packet_source.cc2
-rw-r--r--webrtc/modules/audio_coding/neteq/tools/neteq_external_decoder_test.cc27
-rw-r--r--webrtc/modules/audio_coding/neteq/tools/neteq_external_decoder_test.h11
-rw-r--r--webrtc/modules/audio_coding/neteq/tools/neteq_performance_test.cc32
-rw-r--r--webrtc/modules/audio_coding/neteq/tools/neteq_quality_test.cc14
-rw-r--r--webrtc/modules/audio_coding/neteq/tools/neteq_quality_test.h2
-rw-r--r--webrtc/modules/audio_coding/neteq/tools/neteq_rtpplay.cc72
-rw-r--r--webrtc/modules/audio_coding/neteq/tools/packet.cc4
-rw-r--r--webrtc/modules/audio_coding/neteq/tools/rtc_event_log_source.cc2
-rw-r--r--webrtc/modules/audio_coding/neteq/tools/rtc_event_log_source.h2
-rw-r--r--webrtc/modules/audio_coding/neteq/tools/rtp_file_source.cc14
-rw-r--r--webrtc/modules/audio_coding/neteq/tools/rtp_file_source.h6
-rw-r--r--webrtc/modules/audio_coding/neteq/tools/rtp_generator.h2
-rw-r--r--webrtc/modules/audio_coding/test/ACMTest.h (renamed from webrtc/modules/audio_coding/main/test/ACMTest.h)6
-rw-r--r--webrtc/modules/audio_coding/test/APITest.cc (renamed from webrtc/modules/audio_coding/main/test/APITest.cc)127
-rw-r--r--webrtc/modules/audio_coding/test/APITest.h (renamed from webrtc/modules/audio_coding/main/test/APITest.h)16
-rw-r--r--webrtc/modules/audio_coding/test/Channel.cc (renamed from webrtc/modules/audio_coding/main/test/Channel.cc)2
-rw-r--r--webrtc/modules/audio_coding/test/Channel.h (renamed from webrtc/modules/audio_coding/main/test/Channel.h)10
-rw-r--r--webrtc/modules/audio_coding/test/EncodeDecodeTest.cc (renamed from webrtc/modules/audio_coding/main/test/EncodeDecodeTest.cc)19
-rw-r--r--webrtc/modules/audio_coding/test/EncodeDecodeTest.h (renamed from webrtc/modules/audio_coding/main/test/EncodeDecodeTest.h)18
-rw-r--r--webrtc/modules/audio_coding/test/PCMFile.cc (renamed from webrtc/modules/audio_coding/main/test/PCMFile.cc)21
-rw-r--r--webrtc/modules/audio_coding/test/PCMFile.h (renamed from webrtc/modules/audio_coding/main/test/PCMFile.h)20
-rw-r--r--webrtc/modules/audio_coding/test/PacketLossTest.cc (renamed from webrtc/modules/audio_coding/main/test/PacketLossTest.cc)5
-rw-r--r--webrtc/modules/audio_coding/test/PacketLossTest.h (renamed from webrtc/modules/audio_coding/main/test/PacketLossTest.h)8
-rw-r--r--webrtc/modules/audio_coding/test/RTPFile.cc (renamed from webrtc/modules/audio_coding/main/test/RTPFile.cc)0
-rw-r--r--webrtc/modules/audio_coding/test/RTPFile.h (renamed from webrtc/modules/audio_coding/main/test/RTPFile.h)10
-rw-r--r--webrtc/modules/audio_coding/test/SpatialAudio.cc (renamed from webrtc/modules/audio_coding/main/test/SpatialAudio.cc)2
-rw-r--r--webrtc/modules/audio_coding/test/SpatialAudio.h (renamed from webrtc/modules/audio_coding/main/test/SpatialAudio.h)16
-rw-r--r--webrtc/modules/audio_coding/test/TestAllCodecs.cc (renamed from webrtc/modules/audio_coding/main/test/TestAllCodecs.cc)17
-rw-r--r--webrtc/modules/audio_coding/test/TestAllCodecs.h (renamed from webrtc/modules/audio_coding/main/test/TestAllCodecs.h)12
-rw-r--r--webrtc/modules/audio_coding/test/TestRedFec.cc (renamed from webrtc/modules/audio_coding/main/test/TestRedFec.cc)10
-rw-r--r--webrtc/modules/audio_coding/test/TestRedFec.h (renamed from webrtc/modules/audio_coding/main/test/TestRedFec.h)12
-rw-r--r--webrtc/modules/audio_coding/test/TestStereo.cc (renamed from webrtc/modules/audio_coding/main/test/TestStereo.cc)25
-rw-r--r--webrtc/modules/audio_coding/test/TestStereo.h (renamed from webrtc/modules/audio_coding/main/test/TestStereo.h)12
-rw-r--r--webrtc/modules/audio_coding/test/TestVADDTX.cc (renamed from webrtc/modules/audio_coding/main/test/TestVADDTX.cc)21
-rw-r--r--webrtc/modules/audio_coding/test/TestVADDTX.h (renamed from webrtc/modules/audio_coding/main/test/TestVADDTX.h)14
-rw-r--r--webrtc/modules/audio_coding/test/Tester.cc (renamed from webrtc/modules/audio_coding/main/test/Tester.cc)68
-rw-r--r--webrtc/modules/audio_coding/test/TimedTrace.cc (renamed from webrtc/modules/audio_coding/main/test/TimedTrace.cc)0
-rw-r--r--webrtc/modules/audio_coding/test/TimedTrace.h (renamed from webrtc/modules/audio_coding/main/test/TimedTrace.h)6
-rw-r--r--webrtc/modules/audio_coding/test/TwoWayCommunication.cc (renamed from webrtc/modules/audio_coding/main/test/TwoWayCommunication.cc)16
-rw-r--r--webrtc/modules/audio_coding/test/TwoWayCommunication.h (renamed from webrtc/modules/audio_coding/main/test/TwoWayCommunication.h)16
-rw-r--r--webrtc/modules/audio_coding/test/delay_test.cc (renamed from webrtc/modules/audio_coding/main/test/delay_test.cc)19
-rw-r--r--webrtc/modules/audio_coding/test/iSACTest.cc (renamed from webrtc/modules/audio_coding/main/test/iSACTest.cc)31
-rw-r--r--webrtc/modules/audio_coding/test/iSACTest.h (renamed from webrtc/modules/audio_coding/main/test/iSACTest.h)16
-rw-r--r--webrtc/modules/audio_coding/test/insert_packet_with_timing.cc (renamed from webrtc/modules/audio_coding/main/test/insert_packet_with_timing.cc)12
-rw-r--r--webrtc/modules/audio_coding/test/opus_test.cc (renamed from webrtc/modules/audio_coding/main/test/opus_test.cc)44
-rw-r--r--webrtc/modules/audio_coding/test/opus_test.h (renamed from webrtc/modules/audio_coding/main/test/opus_test.h)25
-rw-r--r--webrtc/modules/audio_coding/test/target_delay_unittest.cc (renamed from webrtc/modules/audio_coding/main/test/target_delay_unittest.cc)48
-rw-r--r--webrtc/modules/audio_coding/test/utility.cc (renamed from webrtc/modules/audio_coding/main/test/utility.cc)4
-rw-r--r--webrtc/modules/audio_coding/test/utility.h (renamed from webrtc/modules/audio_coding/main/test/utility.h)8
-rw-r--r--webrtc/modules/audio_conference_mixer/BUILD.gn8
-rw-r--r--webrtc/modules/audio_conference_mixer/OWNERS7
-rw-r--r--webrtc/modules/audio_conference_mixer/audio_conference_mixer.gypi4
-rw-r--r--webrtc/modules/audio_conference_mixer/include/audio_conference_mixer.h (renamed from webrtc/modules/audio_conference_mixer/interface/audio_conference_mixer.h)12
-rw-r--r--webrtc/modules/audio_conference_mixer/include/audio_conference_mixer_defines.h (renamed from webrtc/modules/audio_conference_mixer/interface/audio_conference_mixer_defines.h)8
-rw-r--r--webrtc/modules/audio_conference_mixer/source/audio_conference_mixer_impl.cc10
-rw-r--r--webrtc/modules/audio_conference_mixer/source/audio_conference_mixer_impl.h4
-rw-r--r--webrtc/modules/audio_conference_mixer/source/audio_frame_manipulator.cc2
-rw-r--r--webrtc/modules/audio_conference_mixer/test/audio_conference_mixer_unittest.cc4
-rw-r--r--webrtc/modules/audio_device/BUILD.gn95
-rw-r--r--webrtc/modules/audio_device/OWNERS2
-rw-r--r--webrtc/modules/audio_device/android/audio_device_unittest.cc14
-rw-r--r--webrtc/modules/audio_device/android/audio_manager.cc31
-rw-r--r--webrtc/modules/audio_device/android/audio_manager.h17
-rw-r--r--webrtc/modules/audio_device/android/audio_manager_unittest.cc8
-rw-r--r--webrtc/modules/audio_device/android/audio_record_jni.cc23
-rw-r--r--webrtc/modules/audio_device/android/audio_record_jni.h6
-rw-r--r--webrtc/modules/audio_device/android/audio_track_jni.cc18
-rw-r--r--webrtc/modules/audio_device/android/audio_track_jni.h4
-rw-r--r--webrtc/modules/audio_device/android/build_info.cc2
-rw-r--r--webrtc/modules/audio_device/android/build_info.h2
-rw-r--r--webrtc/modules/audio_device/android/ensure_initialized.cc3
-rw-r--r--webrtc/modules/audio_device/android/java/src/org/webrtc/voiceengine/WebRtcAudioEffects.java28
-rw-r--r--webrtc/modules/audio_device/android/java/src/org/webrtc/voiceengine/WebRtcAudioManager.java48
-rw-r--r--webrtc/modules/audio_device/android/java/src/org/webrtc/voiceengine/WebRtcAudioRecord.java4
-rw-r--r--webrtc/modules/audio_device/android/java/src/org/webrtc/voiceengine/WebRtcAudioTrack.java53
-rw-r--r--webrtc/modules/audio_device/android/java/src/org/webrtc/voiceengine/WebRtcAudioUtils.java37
-rw-r--r--webrtc/modules/audio_device/android/opensles_player.cc37
-rw-r--r--webrtc/modules/audio_device/android/opensles_player.h23
-rw-r--r--webrtc/modules/audio_device/audio_device.gypi118
-rw-r--r--webrtc/modules/audio_device/audio_device_buffer.cc10
-rw-r--r--webrtc/modules/audio_device/audio_device_buffer.h12
-rw-r--r--webrtc/modules/audio_device/dummy/file_audio_device.cc66
-rw-r--r--webrtc/modules/audio_device/dummy/file_audio_device.h10
-rw-r--r--webrtc/modules/audio_device/include/audio_device.h2
-rw-r--r--webrtc/modules/audio_device/include/audio_device_defines.h26
-rw-r--r--webrtc/modules/audio_device/ios/audio_device_ios.h5
-rw-r--r--webrtc/modules/audio_device/ios/audio_device_ios.mm350
-rw-r--r--webrtc/modules/audio_device/ios/audio_device_unittest_ios.cc72
-rw-r--r--webrtc/modules/audio_device/linux/audio_device_alsa_linux.cc40
-rw-r--r--webrtc/modules/audio_device/linux/audio_device_alsa_linux.h8
-rw-r--r--webrtc/modules/audio_device/linux/audio_device_pulse_linux.cc36
-rw-r--r--webrtc/modules/audio_device/linux/audio_device_pulse_linux.h9
-rw-r--r--webrtc/modules/audio_device/mac/audio_device_mac.cc4841
-rw-r--r--webrtc/modules/audio_device/mac/audio_device_mac.h587
-rw-r--r--webrtc/modules/audio_device/mac/audio_mixer_manager_mac.cc1907
-rw-r--r--webrtc/modules/audio_device/mac/audio_mixer_manager_mac.h99
-rw-r--r--webrtc/modules/audio_device/main/interface/audio_device.h16
-rw-r--r--webrtc/modules/audio_device/main/source/OWNERS5
-rw-r--r--webrtc/modules/audio_device/main/source/audio_device.gypi14
-rw-r--r--webrtc/modules/audio_device/test/audio_device_test_api.cc40
-rw-r--r--webrtc/modules/audio_device/test/audio_device_test_defines.h2
-rw-r--r--webrtc/modules/audio_device/test/func_test_manager.cc34
-rw-r--r--webrtc/modules/audio_device/test/func_test_manager.h28
-rw-r--r--webrtc/modules/audio_device/win/audio_device_wave_win.cc30
-rw-r--r--webrtc/modules/audio_device/win/audio_device_wave_win.h5
-rw-r--r--webrtc/modules/audio_processing/BUILD.gn8
-rw-r--r--webrtc/modules/audio_processing/OWNERS1
-rw-r--r--webrtc/modules/audio_processing/aec/aec_core.c625
-rw-r--r--webrtc/modules/audio_processing/aec/aec_core_internal.h47
-rw-r--r--webrtc/modules/audio_processing/aec/aec_core_mips.c89
-rw-r--r--webrtc/modules/audio_processing/aec/aec_core_neon.c184
-rw-r--r--webrtc/modules/audio_processing/aec/aec_core_sse2.c193
-rw-r--r--webrtc/modules/audio_processing/aec/echo_cancellation.c139
-rw-r--r--webrtc/modules/audio_processing/aec/echo_cancellation.h (renamed from webrtc/modules/audio_processing/aec/include/echo_cancellation.h)58
-rw-r--r--webrtc/modules/audio_processing/aec/echo_cancellation_internal.h2
-rw-r--r--webrtc/modules/audio_processing/aec/echo_cancellation_unittest.cc2
-rw-r--r--webrtc/modules/audio_processing/aec/system_delay_unittest.cc3
-rw-r--r--webrtc/modules/audio_processing/aecm/aecm_core.c2
-rw-r--r--webrtc/modules/audio_processing/aecm/aecm_core_c.c2
-rw-r--r--webrtc/modules/audio_processing/aecm/aecm_core_mips.c2
-rw-r--r--webrtc/modules/audio_processing/aecm/echo_control_mobile.c160
-rw-r--r--webrtc/modules/audio_processing/aecm/echo_control_mobile.h (renamed from webrtc/modules/audio_processing/aecm/include/echo_control_mobile.h)65
-rw-r--r--webrtc/modules/audio_processing/agc/agc.cc2
-rw-r--r--webrtc/modules/audio_processing/agc/agc_manager_direct.cc15
-rw-r--r--webrtc/modules/audio_processing/agc/agc_unittest.cc2
-rw-r--r--webrtc/modules/audio_processing/agc/histogram.cc2
-rw-r--r--webrtc/modules/audio_processing/agc/legacy/analog_agc.c47
-rw-r--r--webrtc/modules/audio_processing/agc/legacy/gain_control.h14
-rw-r--r--webrtc/modules/audio_processing/agc/mock_agc.h2
-rw-r--r--webrtc/modules/audio_processing/audio_buffer.cc43
-rw-r--r--webrtc/modules/audio_processing/audio_buffer.h24
-rw-r--r--webrtc/modules/audio_processing/audio_processing.gypi8
-rw-r--r--webrtc/modules/audio_processing/audio_processing_impl.cc1099
-rw-r--r--webrtc/modules/audio_processing/audio_processing_impl.h366
-rw-r--r--webrtc/modules/audio_processing/audio_processing_impl_locking_unittest.cc1133
-rw-r--r--webrtc/modules/audio_processing/audio_processing_impl_unittest.cc2
-rw-r--r--webrtc/modules/audio_processing/audio_processing_performance_unittest.cc724
-rw-r--r--webrtc/modules/audio_processing/audio_processing_tests.gypi6
-rw-r--r--webrtc/modules/audio_processing/beamformer/array_util.cc25
-rw-r--r--webrtc/modules/audio_processing/beamformer/array_util.h9
-rw-r--r--webrtc/modules/audio_processing/beamformer/complex_matrix.h10
-rw-r--r--webrtc/modules/audio_processing/beamformer/covariance_matrix_generator.cc14
-rw-r--r--webrtc/modules/audio_processing/beamformer/matrix.h48
-rw-r--r--webrtc/modules/audio_processing/beamformer/matrix_test_helpers.h16
-rw-r--r--webrtc/modules/audio_processing/beamformer/nonlinear_beamformer.cc42
-rw-r--r--webrtc/modules/audio_processing/beamformer/nonlinear_beamformer.h8
-rw-r--r--webrtc/modules/audio_processing/beamformer/nonlinear_beamformer_test.cc5
-rw-r--r--webrtc/modules/audio_processing/common.h4
-rw-r--r--webrtc/modules/audio_processing/echo_cancellation_impl.cc269
-rw-r--r--webrtc/modules/audio_processing/echo_cancellation_impl.h50
-rw-r--r--webrtc/modules/audio_processing/echo_cancellation_impl_unittest.cc1
-rw-r--r--webrtc/modules/audio_processing/echo_control_mobile_impl.cc245
-rw-r--r--webrtc/modules/audio_processing/echo_control_mobile_impl.h40
-rw-r--r--webrtc/modules/audio_processing/gain_control_impl.cc218
-rw-r--r--webrtc/modules/audio_processing/gain_control_impl.h51
-rw-r--r--webrtc/modules/audio_processing/high_pass_filter_impl.cc222
-rw-r--r--webrtc/modules/audio_processing/high_pass_filter_impl.h35
-rw-r--r--webrtc/modules/audio_processing/include/audio_processing.h25
-rw-r--r--webrtc/modules/audio_processing/include/mock_audio_processing.h6
-rw-r--r--webrtc/modules/audio_processing/intelligibility/intelligibility_enhancer.cc12
-rw-r--r--webrtc/modules/audio_processing/intelligibility/intelligibility_enhancer.h16
-rw-r--r--webrtc/modules/audio_processing/intelligibility/test/intelligibility_proc.cc2
-rw-r--r--webrtc/modules/audio_processing/level_estimator_impl.cc75
-rw-r--r--webrtc/modules/audio_processing/level_estimator_impl.h39
-rw-r--r--webrtc/modules/audio_processing/logging/aec_logging.h22
-rw-r--r--webrtc/modules/audio_processing/noise_suppression_impl.cc220
-rw-r--r--webrtc/modules/audio_processing/noise_suppression_impl.h47
-rw-r--r--webrtc/modules/audio_processing/ns/noise_suppression.c2
-rw-r--r--webrtc/modules/audio_processing/ns/noise_suppression.h (renamed from webrtc/modules/audio_processing/ns/include/noise_suppression.h)6
-rw-r--r--webrtc/modules/audio_processing/ns/noise_suppression_x.c2
-rw-r--r--webrtc/modules/audio_processing/ns/noise_suppression_x.h (renamed from webrtc/modules/audio_processing/ns/include/noise_suppression_x.h)6
-rw-r--r--webrtc/modules/audio_processing/ns/ns_core.c2
-rw-r--r--webrtc/modules/audio_processing/ns/nsx_core.c2
-rw-r--r--webrtc/modules/audio_processing/ns/nsx_core_c.c2
-rw-r--r--webrtc/modules/audio_processing/ns/nsx_core_mips.c2
-rw-r--r--webrtc/modules/audio_processing/processing_component.cc14
-rw-r--r--webrtc/modules/audio_processing/processing_component.h24
-rw-r--r--webrtc/modules/audio_processing/splitting_filter.cc16
-rw-r--r--webrtc/modules/audio_processing/splitting_filter.h2
-rw-r--r--webrtc/modules/audio_processing/test/audio_file_processor.cc180
-rw-r--r--webrtc/modules/audio_processing/test/audio_file_processor.h139
-rw-r--r--webrtc/modules/audio_processing/test/audio_processing_unittest.cc237
-rw-r--r--webrtc/modules/audio_processing/test/audioproc_float.cc217
-rw-r--r--webrtc/modules/audio_processing/test/debug_dump_test.cc612
-rw-r--r--webrtc/modules/audio_processing/test/process_test.cc53
-rw-r--r--webrtc/modules/audio_processing/test/test_utils.cc63
-rw-r--r--webrtc/modules/audio_processing/test/test_utils.h48
-rw-r--r--webrtc/modules/audio_processing/test/unpack.cc37
-rw-r--r--webrtc/modules/audio_processing/transient/file_utils_unittest.cc96
-rw-r--r--webrtc/modules/audio_processing/transient/transient_detector_unittest.cc9
-rw-r--r--webrtc/modules/audio_processing/transient/transient_suppression_test.cc2
-rw-r--r--webrtc/modules/audio_processing/transient/wpd_tree_unittest.cc7
-rw-r--r--webrtc/modules/audio_processing/typing_detection.h2
-rw-r--r--webrtc/modules/audio_processing/vad/pitch_based_vad.cc2
-rw-r--r--webrtc/modules/audio_processing/vad/standalone_vad.cc4
-rw-r--r--webrtc/modules/audio_processing/vad/standalone_vad_unittest.cc9
-rw-r--r--webrtc/modules/audio_processing/vad/vad_audio_proc.cc2
-rw-r--r--webrtc/modules/audio_processing/vad/vad_audio_proc_unittest.cc2
-rw-r--r--webrtc/modules/audio_processing/vad/voice_activity_detector.cc2
-rw-r--r--webrtc/modules/audio_processing/voice_detection_impl.cc184
-rw-r--r--webrtc/modules/audio_processing/voice_detection_impl.h51
-rw-r--r--webrtc/modules/bitrate_controller/BUILD.gn1
-rw-r--r--webrtc/modules/bitrate_controller/bitrate_allocator.cc190
-rw-r--r--webrtc/modules/bitrate_controller/bitrate_allocator_unittest.cc212
-rw-r--r--webrtc/modules/bitrate_controller/bitrate_controller.gypi2
-rw-r--r--webrtc/modules/bitrate_controller/bitrate_controller_impl.cc7
-rw-r--r--webrtc/modules/bitrate_controller/bitrate_controller_impl.h2
-rw-r--r--webrtc/modules/bitrate_controller/bitrate_controller_unittest.cc2
-rw-r--r--webrtc/modules/bitrate_controller/include/bitrate_allocator.h99
-rw-r--r--webrtc/modules/bitrate_controller/include/bitrate_controller.h7
-rw-r--r--webrtc/modules/bitrate_controller/include/mock/mock_bitrate_controller.h30
-rw-r--r--webrtc/modules/bitrate_controller/send_side_bandwidth_estimation.cc44
-rw-r--r--webrtc/modules/bitrate_controller/send_side_bandwidth_estimation.h8
-rw-r--r--webrtc/modules/desktop_capture/differ_block.cc4
-rw-r--r--webrtc/modules/desktop_capture/screen_capturer_win.cc4
-rw-r--r--webrtc/modules/desktop_capture/win/screen_capturer_win_magnifier.cc13
-rw-r--r--webrtc/modules/desktop_capture/window_capturer_win.cc11
-rw-r--r--webrtc/modules/include/module.h (renamed from webrtc/modules/interface/module.h)6
-rw-r--r--webrtc/modules/include/module_common_types.h (renamed from webrtc/modules/interface/module_common_types.h)29
-rw-r--r--webrtc/modules/media_file/BUILD.gn12
-rw-r--r--webrtc/modules/media_file/OWNERS15
-rw-r--r--webrtc/modules/media_file/media_file.gypi12
-rw-r--r--webrtc/modules/media_file/media_file.h (renamed from webrtc/modules/media_file/interface/media_file.h)12
-rw-r--r--webrtc/modules/media_file/media_file_defines.h (renamed from webrtc/modules/media_file/interface/media_file_defines.h)8
-rw-r--r--webrtc/modules/media_file/media_file_impl.cc (renamed from webrtc/modules/media_file/source/media_file_impl.cc)2
-rw-r--r--webrtc/modules/media_file/media_file_impl.h (renamed from webrtc/modules/media_file/source/media_file_impl.h)14
-rw-r--r--webrtc/modules/media_file/media_file_unittest.cc (renamed from webrtc/modules/media_file/source/media_file_unittest.cc)20
-rw-r--r--webrtc/modules/media_file/media_file_utility.cc (renamed from webrtc/modules/media_file/source/media_file_utility.cc)351
-rw-r--r--webrtc/modules/media_file/media_file_utility.h (renamed from webrtc/modules/media_file/source/media_file_utility.h)32
-rw-r--r--webrtc/modules/media_file/source/OWNERS5
-rw-r--r--webrtc/modules/module_common_types_unittest.cc2
-rw-r--r--webrtc/modules/modules.gyp756
-rw-r--r--webrtc/modules/modules_unittests.isolate3
-rw-r--r--webrtc/modules/pacing/BUILD.gn4
-rw-r--r--webrtc/modules/pacing/bitrate_prober.cc4
-rw-r--r--webrtc/modules/pacing/mock/mock_paced_sender.h (renamed from webrtc/modules/pacing/include/mock/mock_paced_sender.h)8
-rw-r--r--webrtc/modules/pacing/paced_sender.cc141
-rw-r--r--webrtc/modules/pacing/paced_sender.h (renamed from webrtc/modules/pacing/include/paced_sender.h)23
-rw-r--r--webrtc/modules/pacing/paced_sender_unittest.cc100
-rw-r--r--webrtc/modules/pacing/pacing.gypi4
-rw-r--r--webrtc/modules/pacing/packet_router.cc6
-rw-r--r--webrtc/modules/pacing/packet_router.h (renamed from webrtc/modules/pacing/include/packet_router.h)10
-rw-r--r--webrtc/modules/pacing/packet_router_unittest.cc4
-rw-r--r--webrtc/modules/remote_bitrate_estimator/aimd_rate_control.cc19
-rw-r--r--webrtc/modules/remote_bitrate_estimator/aimd_rate_control.h2
-rw-r--r--webrtc/modules/remote_bitrate_estimator/bwe_simulations.cc26
-rw-r--r--webrtc/modules/remote_bitrate_estimator/include/bwe_defines.h57
-rw-r--r--webrtc/modules/remote_bitrate_estimator/include/mock/mock_remote_bitrate_observer.h6
-rw-r--r--webrtc/modules/remote_bitrate_estimator/include/remote_bitrate_estimator.h6
-rw-r--r--webrtc/modules/remote_bitrate_estimator/include/send_time_history.h6
-rw-r--r--webrtc/modules/remote_bitrate_estimator/inter_arrival.cc5
-rw-r--r--webrtc/modules/remote_bitrate_estimator/overuse_detector.cc6
-rw-r--r--webrtc/modules/remote_bitrate_estimator/overuse_detector.h2
-rw-r--r--webrtc/modules/remote_bitrate_estimator/overuse_detector_unittest.cc147
-rw-r--r--webrtc/modules/remote_bitrate_estimator/overuse_estimator.cc5
-rw-r--r--webrtc/modules/remote_bitrate_estimator/remote_bitrate_estimator_abs_send_time.cc7
-rw-r--r--webrtc/modules/remote_bitrate_estimator/remote_bitrate_estimator_abs_send_time_unittest.cc1
-rw-r--r--webrtc/modules/remote_bitrate_estimator/remote_bitrate_estimator_single_stream.cc33
-rw-r--r--webrtc/modules/remote_bitrate_estimator/remote_bitrate_estimator_single_stream_unittest.cc1
-rw-r--r--webrtc/modules/remote_bitrate_estimator/remote_bitrate_estimator_unittest_helper.cc21
-rw-r--r--webrtc/modules/remote_bitrate_estimator/remote_bitrate_estimator_unittest_helper.h1
-rw-r--r--webrtc/modules/remote_bitrate_estimator/remote_bitrate_estimators_test.cc34
-rw-r--r--webrtc/modules/remote_bitrate_estimator/remote_estimator_proxy.cc6
-rw-r--r--webrtc/modules/remote_bitrate_estimator/remote_estimator_proxy.h2
-rw-r--r--webrtc/modules/remote_bitrate_estimator/remote_estimator_proxy_unittest.cc2
-rw-r--r--webrtc/modules/remote_bitrate_estimator/test/bwe.h3
-rw-r--r--webrtc/modules/remote_bitrate_estimator/test/bwe_test.cc17
-rw-r--r--webrtc/modules/remote_bitrate_estimator/test/bwe_test_baselinefile.h2
-rw-r--r--webrtc/modules/remote_bitrate_estimator/test/bwe_test_fileutils.h2
-rw-r--r--webrtc/modules/remote_bitrate_estimator/test/bwe_test_framework.cc13
-rw-r--r--webrtc/modules/remote_bitrate_estimator/test/bwe_test_framework.h19
-rw-r--r--webrtc/modules/remote_bitrate_estimator/test/bwe_test_framework_unittest.cc62
-rw-r--r--webrtc/modules/remote_bitrate_estimator/test/bwe_test_logging.cc28
-rw-r--r--webrtc/modules/remote_bitrate_estimator/test/bwe_test_logging.h28
-rw-r--r--webrtc/modules/remote_bitrate_estimator/test/bwe_unittest.cc3
-rw-r--r--webrtc/modules/remote_bitrate_estimator/test/estimators/nada.cc5
-rw-r--r--webrtc/modules/remote_bitrate_estimator/test/estimators/nada.h2
-rw-r--r--webrtc/modules/remote_bitrate_estimator/test/estimators/nada_unittest.cc7
-rw-r--r--webrtc/modules/remote_bitrate_estimator/test/estimators/remb.cc2
-rw-r--r--webrtc/modules/remote_bitrate_estimator/test/estimators/tcp.cc2
-rw-r--r--webrtc/modules/remote_bitrate_estimator/test/metric_recorder.cc4
-rw-r--r--webrtc/modules/remote_bitrate_estimator/test/packet.h2
-rw-r--r--webrtc/modules/remote_bitrate_estimator/test/packet_receiver.cc4
-rw-r--r--webrtc/modules/remote_bitrate_estimator/test/packet_sender.cc2
-rw-r--r--webrtc/modules/remote_bitrate_estimator/test/packet_sender.h5
-rw-r--r--webrtc/modules/remote_bitrate_estimator/tools/bwe_rtp.cc8
-rw-r--r--webrtc/modules/remote_bitrate_estimator/tools/bwe_rtp_play.cc4
-rw-r--r--webrtc/modules/remote_bitrate_estimator/tools/rtp_to_text.cc4
-rw-r--r--webrtc/modules/remote_bitrate_estimator/transport_feedback_adapter.cc5
-rw-r--r--webrtc/modules/remote_bitrate_estimator/transport_feedback_adapter.h4
-rw-r--r--webrtc/modules/remote_bitrate_estimator/transport_feedback_adapter_unittest.cc4
-rw-r--r--webrtc/modules/rtp_rtcp/BUILD.gn48
-rw-r--r--webrtc/modules/rtp_rtcp/OWNERS17
-rw-r--r--webrtc/modules/rtp_rtcp/include/fec_receiver.h (renamed from webrtc/modules/rtp_rtcp/interface/fec_receiver.h)8
-rw-r--r--webrtc/modules/rtp_rtcp/include/receive_statistics.h (renamed from webrtc/modules/rtp_rtcp/interface/receive_statistics.h)10
-rw-r--r--webrtc/modules/rtp_rtcp/include/remote_ntp_time_estimator.h (renamed from webrtc/modules/rtp_rtcp/interface/remote_ntp_time_estimator.h)6
-rw-r--r--webrtc/modules/rtp_rtcp/include/rtp_cvo.h (renamed from webrtc/modules/rtp_rtcp/interface/rtp_cvo.h)6
-rw-r--r--webrtc/modules/rtp_rtcp/include/rtp_header_parser.h (renamed from webrtc/modules/rtp_rtcp/interface/rtp_header_parser.h)8
-rw-r--r--webrtc/modules/rtp_rtcp/include/rtp_payload_registry.h (renamed from webrtc/modules/rtp_rtcp/interface/rtp_payload_registry.h)32
-rw-r--r--webrtc/modules/rtp_rtcp/include/rtp_receiver.h (renamed from webrtc/modules/rtp_rtcp/interface/rtp_receiver.h)10
-rw-r--r--webrtc/modules/rtp_rtcp/include/rtp_rtcp.h (renamed from webrtc/modules/rtp_rtcp/interface/rtp_rtcp.h)36
-rw-r--r--webrtc/modules/rtp_rtcp/include/rtp_rtcp_defines.h (renamed from webrtc/modules/rtp_rtcp/interface/rtp_rtcp_defines.h)106
-rw-r--r--webrtc/modules/rtp_rtcp/mocks/mock_rtp_rtcp.h33
-rw-r--r--webrtc/modules/rtp_rtcp/rtp_rtcp.gypi48
-rw-r--r--webrtc/modules/rtp_rtcp/source/CPPLINT.cfg6
-rw-r--r--webrtc/modules/rtp_rtcp/source/byte_io.h15
-rw-r--r--webrtc/modules/rtp_rtcp/source/dtmf_queue.cc6
-rw-r--r--webrtc/modules/rtp_rtcp/source/fec_private_tables_bursty.h6
-rw-r--r--webrtc/modules/rtp_rtcp/source/fec_private_tables_random.h12
-rw-r--r--webrtc/modules/rtp_rtcp/source/fec_receiver_impl.h6
-rw-r--r--webrtc/modules/rtp_rtcp/source/fec_receiver_unittest.cc4
-rw-r--r--webrtc/modules/rtp_rtcp/source/fec_test_helper.h4
-rw-r--r--webrtc/modules/rtp_rtcp/source/forward_error_correction.cc178
-rw-r--r--webrtc/modules/rtp_rtcp/source/forward_error_correction.h2
-rw-r--r--webrtc/modules/rtp_rtcp/source/forward_error_correction_internal.cc106
-rw-r--r--webrtc/modules/rtp_rtcp/source/h264_bitstream_parser.h2
-rw-r--r--webrtc/modules/rtp_rtcp/source/h264_sps_parser_unittest.cc11
-rw-r--r--webrtc/modules/rtp_rtcp/source/mock/mock_rtp_payload_strategy.h12
-rw-r--r--webrtc/modules/rtp_rtcp/source/nack_rtx_unittest.cc68
-rw-r--r--webrtc/modules/rtp_rtcp/source/producer_fec_unittest.cc49
-rw-r--r--webrtc/modules/rtp_rtcp/source/receive_statistics_impl.cc29
-rw-r--r--webrtc/modules/rtp_rtcp/source/receive_statistics_impl.h11
-rw-r--r--webrtc/modules/rtp_rtcp/source/receive_statistics_unittest.cc2
-rw-r--r--webrtc/modules/rtp_rtcp/source/remote_ntp_time_estimator.cc2
-rw-r--r--webrtc/modules/rtp_rtcp/source/remote_ntp_time_estimator_unittest.cc2
-rw-r--r--webrtc/modules/rtp_rtcp/source/rtcp_format_remb_unittest.cc17
-rw-r--r--webrtc/modules/rtp_rtcp/source/rtcp_packet.cc647
-rw-r--r--webrtc/modules/rtp_rtcp/source/rtcp_packet.h692
-rw-r--r--webrtc/modules/rtp_rtcp/source/rtcp_packet/app.cc79
-rw-r--r--webrtc/modules/rtp_rtcp/source/rtcp_packet/app.h66
-rw-r--r--webrtc/modules/rtp_rtcp/source/rtcp_packet/app_unittest.cc81
-rw-r--r--webrtc/modules/rtp_rtcp/source/rtcp_packet/bye.cc133
-rw-r--r--webrtc/modules/rtp_rtcp/source/rtcp_packet/bye.h63
-rw-r--r--webrtc/modules/rtp_rtcp/source/rtcp_packet/bye_unittest.cc173
-rw-r--r--webrtc/modules/rtp_rtcp/source/rtcp_packet/compound_packet.cc28
-rw-r--r--webrtc/modules/rtp_rtcp/source/rtcp_packet/compound_packet.h41
-rw-r--r--webrtc/modules/rtp_rtcp/source/rtcp_packet/compound_packet_unittest.cc157
-rw-r--r--webrtc/modules/rtp_rtcp/source/rtcp_packet/dlrr.cc100
-rw-r--r--webrtc/modules/rtp_rtcp/source/rtcp_packet/dlrr.h63
-rw-r--r--webrtc/modules/rtp_rtcp/source/rtcp_packet/dlrr_unittest.cc102
-rw-r--r--webrtc/modules/rtp_rtcp/source/rtcp_packet/extended_jitter_report.cc95
-rw-r--r--webrtc/modules/rtp_rtcp/source/rtcp_packet/extended_jitter_report.h63
-rw-r--r--webrtc/modules/rtp_rtcp/source/rtcp_packet/extended_jitter_report_unittest.cc98
-rw-r--r--webrtc/modules/rtp_rtcp/source/rtcp_packet/nack.cc163
-rw-r--r--webrtc/modules/rtp_rtcp/source/rtcp_packet/nack.h63
-rw-r--r--webrtc/modules/rtp_rtcp/source/rtcp_packet/nack_unittest.cc190
-rw-r--r--webrtc/modules/rtp_rtcp/source/rtcp_packet/pli.cc70
-rw-r--r--webrtc/modules/rtp_rtcp/source/rtcp_packet/pli.h49
-rw-r--r--webrtc/modules/rtp_rtcp/source/rtcp_packet/pli_unittest.cc66
-rw-r--r--webrtc/modules/rtp_rtcp/source/rtcp_packet/psfb.cc45
-rw-r--r--webrtc/modules/rtp_rtcp/source/rtcp_packet/psfb.h48
-rw-r--r--webrtc/modules/rtp_rtcp/source/rtcp_packet/receiver_report.cc89
-rw-r--r--webrtc/modules/rtp_rtcp/source/rtcp_packet/receiver_report.h66
-rw-r--r--webrtc/modules/rtp_rtcp/source/rtcp_packet/receiver_report_unittest.cc145
-rw-r--r--webrtc/modules/rtp_rtcp/source/rtcp_packet/report_block.cc89
-rw-r--r--webrtc/modules/rtp_rtcp/source/rtcp_packet/report_block.h67
-rw-r--r--webrtc/modules/rtp_rtcp/source/rtcp_packet/report_block_unittest.cc86
-rw-r--r--webrtc/modules/rtp_rtcp/source/rtcp_packet/rrtr.cc49
-rw-r--r--webrtc/modules/rtp_rtcp/source/rtcp_packet/rrtr.h49
-rw-r--r--webrtc/modules/rtp_rtcp/source/rtcp_packet/rrtr_unittest.cc51
-rw-r--r--webrtc/modules/rtp_rtcp/source/rtcp_packet/rtpfb.cc45
-rw-r--r--webrtc/modules/rtp_rtcp/source/rtcp_packet/rtpfb.h48
-rw-r--r--webrtc/modules/rtp_rtcp/source/rtcp_packet/sli.cc108
-rw-r--r--webrtc/modules/rtp_rtcp/source/rtcp_packet/sli.h81
-rw-r--r--webrtc/modules/rtp_rtcp/source/rtcp_packet/sli_unittest.cc91
-rw-r--r--webrtc/modules/rtp_rtcp/source/rtcp_packet/tmmbn.cc119
-rw-r--r--webrtc/modules/rtp_rtcp/source/rtcp_packet/tmmbn.h60
-rw-r--r--webrtc/modules/rtp_rtcp/source/rtcp_packet/tmmbn_unittest.cc84
-rw-r--r--webrtc/modules/rtp_rtcp/source/rtcp_packet/tmmbr.cc105
-rw-r--r--webrtc/modules/rtp_rtcp/source/rtcp_packet/tmmbr.h64
-rw-r--r--webrtc/modules/rtp_rtcp/source/rtcp_packet/tmmbr_unittest.cc43
-rw-r--r--webrtc/modules/rtp_rtcp/source/rtcp_packet/transport_feedback.h2
-rw-r--r--webrtc/modules/rtp_rtcp/source/rtcp_packet/voip_metric.cc107
-rw-r--r--webrtc/modules/rtp_rtcp/source/rtcp_packet/voip_metric.h53
-rw-r--r--webrtc/modules/rtp_rtcp/source/rtcp_packet/voip_metric_unittest.cc93
-rw-r--r--webrtc/modules/rtp_rtcp/source/rtcp_packet_unittest.cc561
-rw-r--r--webrtc/modules/rtp_rtcp/source/rtcp_receiver.cc13
-rw-r--r--webrtc/modules/rtp_rtcp/source/rtcp_receiver.h6
-rw-r--r--webrtc/modules/rtp_rtcp/source/rtcp_receiver_help.cc3
-rw-r--r--webrtc/modules/rtp_rtcp/source/rtcp_receiver_help.h4
-rw-r--r--webrtc/modules/rtp_rtcp/source/rtcp_receiver_unittest.cc34
-rw-r--r--webrtc/modules/rtp_rtcp/source/rtcp_sender.cc787
-rw-r--r--webrtc/modules/rtp_rtcp/source/rtcp_sender.h484
-rw-r--r--webrtc/modules/rtp_rtcp/source/rtcp_utility.cc5
-rw-r--r--webrtc/modules/rtp_rtcp/source/rtcp_utility.h4
-rw-r--r--webrtc/modules/rtp_rtcp/source/rtp_fec_unittest.cc29
-rw-r--r--webrtc/modules/rtp_rtcp/source/rtp_format.h4
-rw-r--r--webrtc/modules/rtp_rtcp/source/rtp_format_h264.cc2
-rw-r--r--webrtc/modules/rtp_rtcp/source/rtp_format_h264_unittest.cc2
-rw-r--r--webrtc/modules/rtp_rtcp/source/rtp_format_video_generic.cc2
-rw-r--r--webrtc/modules/rtp_rtcp/source/rtp_format_vp8.h2
-rw-r--r--webrtc/modules/rtp_rtcp/source/rtp_format_vp8_test_helper.h2
-rw-r--r--webrtc/modules/rtp_rtcp/source/rtp_format_vp9.cc72
-rw-r--r--webrtc/modules/rtp_rtcp/source/rtp_format_vp9.h2
-rw-r--r--webrtc/modules/rtp_rtcp/source/rtp_format_vp9_unittest.cc33
-rw-r--r--webrtc/modules/rtp_rtcp/source/rtp_header_extension.h13
-rw-r--r--webrtc/modules/rtp_rtcp/source/rtp_header_extension_unittest.cc2
-rw-r--r--webrtc/modules/rtp_rtcp/source/rtp_header_parser.cc4
-rw-r--r--webrtc/modules/rtp_rtcp/source/rtp_packet_history.cc2
-rw-r--r--webrtc/modules/rtp_rtcp/source/rtp_packet_history.h12
-rw-r--r--webrtc/modules/rtp_rtcp/source/rtp_packet_history_unittest.cc8
-rw-r--r--webrtc/modules/rtp_rtcp/source/rtp_payload_registry.cc30
-rw-r--r--webrtc/modules/rtp_rtcp/source/rtp_payload_registry_unittest.cc141
-rw-r--r--webrtc/modules/rtp_rtcp/source/rtp_receiver_audio.h20
-rw-r--r--webrtc/modules/rtp_rtcp/source/rtp_receiver_impl.cc32
-rw-r--r--webrtc/modules/rtp_rtcp/source/rtp_receiver_impl.h8
-rw-r--r--webrtc/modules/rtp_rtcp/source/rtp_receiver_strategy.h6
-rw-r--r--webrtc/modules/rtp_rtcp/source/rtp_receiver_video.cc4
-rw-r--r--webrtc/modules/rtp_rtcp/source/rtp_receiver_video.h2
-rw-r--r--webrtc/modules/rtp_rtcp/source/rtp_rtcp_config.h39
-rw-r--r--webrtc/modules/rtp_rtcp/source/rtp_rtcp_impl.cc28
-rw-r--r--webrtc/modules/rtp_rtcp/source/rtp_rtcp_impl.h14
-rw-r--r--webrtc/modules/rtp_rtcp/source/rtp_rtcp_impl_unittest.cc32
-rw-r--r--webrtc/modules/rtp_rtcp/source/rtp_sender.cc63
-rw-r--r--webrtc/modules/rtp_rtcp/source/rtp_sender.h23
-rw-r--r--webrtc/modules/rtp_rtcp/source/rtp_sender_audio.cc450
-rw-r--r--webrtc/modules/rtp_rtcp/source/rtp_sender_audio.h171
-rw-r--r--webrtc/modules/rtp_rtcp/source/rtp_sender_unittest.cc270
-rw-r--r--webrtc/modules/rtp_rtcp/source/rtp_sender_video.cc46
-rw-r--r--webrtc/modules/rtp_rtcp/source/rtp_sender_video.h18
-rw-r--r--webrtc/modules/rtp_rtcp/source/rtp_utility.cc151
-rw-r--r--webrtc/modules/rtp_rtcp/source/rtp_utility.h114
-rw-r--r--webrtc/modules/rtp_rtcp/source/ssrc_database.cc111
-rw-r--r--webrtc/modules/rtp_rtcp/source/ssrc_database.h48
-rw-r--r--webrtc/modules/rtp_rtcp/source/time_util.h48
-rw-r--r--webrtc/modules/rtp_rtcp/source/time_util_unittest.cc62
-rw-r--r--webrtc/modules/rtp_rtcp/source/tmmbr_help.cc4
-rw-r--r--webrtc/modules/rtp_rtcp/source/video_codec_information.h13
-rw-r--r--webrtc/modules/rtp_rtcp/source/vp8_partition_aggregator.cc36
-rw-r--r--webrtc/modules/rtp_rtcp/source/vp8_partition_aggregator.h4
-rw-r--r--webrtc/modules/rtp_rtcp/source/vp8_partition_aggregator_unittest.cc2
-rw-r--r--webrtc/modules/rtp_rtcp/test/BWEStandAlone/BWEStandAlone.cc199
-rw-r--r--webrtc/modules/rtp_rtcp/test/BWEStandAlone/MatlabPlot.cc1055
-rw-r--r--webrtc/modules/rtp_rtcp/test/BWEStandAlone/MatlabPlot.h170
-rw-r--r--webrtc/modules/rtp_rtcp/test/BWEStandAlone/TestLoadGenerator.cc432
-rw-r--r--webrtc/modules/rtp_rtcp/test/BWEStandAlone/TestLoadGenerator.h149
-rw-r--r--webrtc/modules/rtp_rtcp/test/BWEStandAlone/TestSenderReceiver.cc411
-rw-r--r--webrtc/modules/rtp_rtcp/test/BWEStandAlone/TestSenderReceiver.h153
-rw-r--r--webrtc/modules/rtp_rtcp/test/bwe_standalone.gypi85
-rw-r--r--webrtc/modules/rtp_rtcp/test/testAPI/test_api.cc7
-rw-r--r--webrtc/modules/rtp_rtcp/test/testAPI/test_api.h15
-rw-r--r--webrtc/modules/rtp_rtcp/test/testAPI/test_api_audio.cc23
-rw-r--r--webrtc/modules/rtp_rtcp/test/testAPI/test_api_rtcp.cc29
-rw-r--r--webrtc/modules/rtp_rtcp/test/testAPI/test_api_video.cc6
-rw-r--r--webrtc/modules/rtp_rtcp/test/testFec/average_residual_loss_xor_codes.h7
-rw-r--r--webrtc/modules/rtp_rtcp/test/testFec/test_fec.cc120
-rw-r--r--webrtc/modules/rtp_rtcp/test/testFec/test_packet_masks_metrics.cc27
-rw-r--r--webrtc/modules/utility/BUILD.gn12
-rw-r--r--webrtc/modules/utility/OWNERS5
-rw-r--r--webrtc/modules/utility/include/audio_frame_operations.h (renamed from webrtc/modules/utility/interface/audio_frame_operations.h)6
-rw-r--r--webrtc/modules/utility/include/file_player.h (renamed from webrtc/modules/utility/interface/file_player.h)8
-rw-r--r--webrtc/modules/utility/include/file_recorder.h (renamed from webrtc/modules/utility/interface/file_recorder.h)10
-rw-r--r--webrtc/modules/utility/include/helpers_android.h (renamed from webrtc/modules/utility/interface/helpers_android.h)6
-rw-r--r--webrtc/modules/utility/include/helpers_ios.h (renamed from webrtc/modules/utility/interface/helpers_ios.h)6
-rw-r--r--webrtc/modules/utility/include/jvm_android.h (renamed from webrtc/modules/utility/interface/jvm_android.h)8
-rw-r--r--webrtc/modules/utility/include/mock/mock_process_thread.h (renamed from webrtc/modules/utility/interface/mock/mock_process_thread.h)8
-rw-r--r--webrtc/modules/utility/include/process_thread.h (renamed from webrtc/modules/utility/interface/process_thread.h)6
-rw-r--r--webrtc/modules/utility/source/audio_frame_operations.cc4
-rw-r--r--webrtc/modules/utility/source/audio_frame_operations_unittest.cc4
-rw-r--r--webrtc/modules/utility/source/coder.cc2
-rw-r--r--webrtc/modules/utility/source/coder.h2
-rw-r--r--webrtc/modules/utility/source/file_player_impl.h6
-rw-r--r--webrtc/modules/utility/source/file_player_unittests.cc17
-rw-r--r--webrtc/modules/utility/source/file_recorder_impl.cc2
-rw-r--r--webrtc/modules/utility/source/file_recorder_impl.h10
-rw-r--r--webrtc/modules/utility/source/helpers_android.cc2
-rw-r--r--webrtc/modules/utility/source/helpers_ios.mm2
-rw-r--r--webrtc/modules/utility/source/jvm_android.cc2
-rw-r--r--webrtc/modules/utility/source/process_thread_impl.cc13
-rw-r--r--webrtc/modules/utility/source/process_thread_impl.h7
-rw-r--r--webrtc/modules/utility/source/process_thread_impl_unittest.cc13
-rw-r--r--webrtc/modules/utility/utility.gypi14
-rw-r--r--webrtc/modules/video_capture/BUILD.gn6
-rw-r--r--webrtc/modules/video_capture/device_info_impl.h2
-rw-r--r--webrtc/modules/video_capture/ios/device_info_ios_objc.h2
-rw-r--r--webrtc/modules/video_capture/linux/video_capture_linux.cc6
-rw-r--r--webrtc/modules/video_capture/linux/video_capture_linux.h5
-rw-r--r--webrtc/modules/video_capture/mac/qtkit/video_capture_qtkit_info.mm2
-rw-r--r--webrtc/modules/video_capture/test/video_capture_unittest.cc36
-rw-r--r--webrtc/modules/video_capture/video_capture.gypi23
-rw-r--r--webrtc/modules/video_capture/video_capture.h (renamed from webrtc/modules/video_capture/include/video_capture.h)10
-rw-r--r--webrtc/modules/video_capture/video_capture_defines.h (renamed from webrtc/modules/video_capture/include/video_capture_defines.h)8
-rw-r--r--webrtc/modules/video_capture/video_capture_factory.cc2
-rw-r--r--webrtc/modules/video_capture/video_capture_factory.h (renamed from webrtc/modules/video_capture/include/video_capture_factory.h)8
-rw-r--r--webrtc/modules/video_capture/video_capture_impl.cc2
-rw-r--r--webrtc/modules/video_capture/video_capture_impl.h2
-rw-r--r--webrtc/modules/video_capture/windows/sink_filter_ds.h2
-rw-r--r--webrtc/modules/video_coding/BUILD.gn146
-rw-r--r--webrtc/modules/video_coding/OWNERS5
-rw-r--r--webrtc/modules/video_coding/codec_database.cc (renamed from webrtc/modules/video_coding/main/source/codec_database.cc)265
-rw-r--r--webrtc/modules/video_coding/codec_database.h (renamed from webrtc/modules/video_coding/main/source/codec_database.h)51
-rw-r--r--webrtc/modules/video_coding/codec_timer.cc96
-rw-r--r--webrtc/modules/video_coding/codec_timer.h57
-rw-r--r--webrtc/modules/video_coding/codecs/h264/h264_video_toolbox_decoder.cc19
-rw-r--r--webrtc/modules/video_coding/codecs/h264/h264_video_toolbox_decoder.h2
-rw-r--r--webrtc/modules/video_coding/codecs/h264/h264_video_toolbox_encoder.cc43
-rw-r--r--webrtc/modules/video_coding/codecs/h264/h264_video_toolbox_encoder.h2
-rw-r--r--webrtc/modules/video_coding/codecs/h264/h264_video_toolbox_nalu.cc9
-rw-r--r--webrtc/modules/video_coding/codecs/h264/h264_video_toolbox_nalu.h17
-rw-r--r--webrtc/modules/video_coding/codecs/h264/include/h264.h2
-rw-r--r--webrtc/modules/video_coding/codecs/i420/i420.cc55
-rw-r--r--webrtc/modules/video_coding/codecs/i420/include/i420.h145
-rw-r--r--webrtc/modules/video_coding/codecs/interface/mock/mock_video_codec_interface.h43
-rw-r--r--webrtc/modules/video_coding/codecs/interface/video_codec_interface.h28
-rw-r--r--webrtc/modules/video_coding/codecs/interface/video_error_codes.h9
-rw-r--r--webrtc/modules/video_coding/codecs/test/packet_manipulator.cc8
-rw-r--r--webrtc/modules/video_coding/codecs/test/packet_manipulator.h13
-rw-r--r--webrtc/modules/video_coding/codecs/test/packet_manipulator_unittest.cc27
-rw-r--r--webrtc/modules/video_coding/codecs/test/predictive_packet_manipulator.cc17
-rw-r--r--webrtc/modules/video_coding/codecs/test/predictive_packet_manipulator.h1
-rw-r--r--webrtc/modules/video_coding/codecs/test/stats.cc69
-rw-r--r--webrtc/modules/video_coding/codecs/test/stats.h2
-rw-r--r--webrtc/modules/video_coding/codecs/test/stats_unittest.cc16
-rw-r--r--webrtc/modules/video_coding/codecs/test/videoprocessor.cc67
-rw-r--r--webrtc/modules/video_coding/codecs/test/videoprocessor.h15
-rw-r--r--webrtc/modules/video_coding/codecs/test/videoprocessor_integrationtest.cc346
-rw-r--r--webrtc/modules/video_coding/codecs/test/videoprocessor_unittest.cc44
-rw-r--r--webrtc/modules/video_coding/codecs/tools/video_quality_measurement.cc320
-rw-r--r--webrtc/modules/video_coding/codecs/vp8/default_temporal_layers.cc26
-rw-r--r--webrtc/modules/video_coding/codecs/vp8/default_temporal_layers_unittest.cc157
-rw-r--r--webrtc/modules/video_coding/codecs/vp8/include/vp8.h9
-rw-r--r--webrtc/modules/video_coding/codecs/vp8/include/vp8_common_types.h14
-rw-r--r--webrtc/modules/video_coding/codecs/vp8/realtime_temporal_layers.cc43
-rw-r--r--webrtc/modules/video_coding/codecs/vp8/reference_picture_selection.cc20
-rw-r--r--webrtc/modules/video_coding/codecs/vp8/reference_picture_selection_unittest.cc40
-rw-r--r--webrtc/modules/video_coding/codecs/vp8/screenshare_layers.cc6
-rw-r--r--webrtc/modules/video_coding/codecs/vp8/screenshare_layers.h2
-rw-r--r--webrtc/modules/video_coding/codecs/vp8/screenshare_layers_unittest.cc4
-rw-r--r--webrtc/modules/video_coding/codecs/vp8/simulcast_encoder_adapter.cc58
-rw-r--r--webrtc/modules/video_coding/codecs/vp8/simulcast_encoder_adapter.h10
-rw-r--r--webrtc/modules/video_coding/codecs/vp8/simulcast_encoder_adapter_unittest.cc33
-rw-r--r--webrtc/modules/video_coding/codecs/vp8/simulcast_unittest.cc14
-rw-r--r--webrtc/modules/video_coding/codecs/vp8/simulcast_unittest.h282
-rw-r--r--webrtc/modules/video_coding/codecs/vp8/temporal_layers.h5
-rw-r--r--webrtc/modules/video_coding/codecs/vp8/test/vp8_impl_unittest.cc22
-rw-r--r--webrtc/modules/video_coding/codecs/vp8/vp8_factory.h1
-rw-r--r--webrtc/modules/video_coding/codecs/vp8/vp8_impl.cc262
-rw-r--r--webrtc/modules/video_coding/codecs/vp8/vp8_impl.h24
-rw-r--r--webrtc/modules/video_coding/codecs/vp8/vp8_sequence_coder.cc93
-rw-r--r--webrtc/modules/video_coding/codecs/vp9/include/vp9.h3
-rw-r--r--webrtc/modules/video_coding/codecs/vp9/screenshare_layers.cc93
-rw-r--r--webrtc/modules/video_coding/codecs/vp9/screenshare_layers.h66
-rw-r--r--webrtc/modules/video_coding/codecs/vp9/screenshare_layers_unittest.cc323
-rw-r--r--webrtc/modules/video_coding/codecs/vp9/vp9.gyp32
-rw-r--r--webrtc/modules/video_coding/codecs/vp9/vp9_dummy_impl.cc19
-rw-r--r--webrtc/modules/video_coding/codecs/vp9/vp9_frame_buffer_pool.cc2
-rw-r--r--webrtc/modules/video_coding/codecs/vp9/vp9_impl.cc411
-rw-r--r--webrtc/modules/video_coding/codecs/vp9/vp9_impl.h49
-rw-r--r--webrtc/modules/video_coding/content_metrics_processing.cc (renamed from webrtc/modules/video_coding/main/source/content_metrics_processing.cc)49
-rw-r--r--webrtc/modules/video_coding/content_metrics_processing.h (renamed from webrtc/modules/video_coding/main/source/content_metrics_processing.h)16
-rw-r--r--webrtc/modules/video_coding/decoding_state.cc (renamed from webrtc/modules/video_coding/main/source/decoding_state.cc)86
-rw-r--r--webrtc/modules/video_coding/decoding_state.h (renamed from webrtc/modules/video_coding/main/source/decoding_state.h)26
-rw-r--r--webrtc/modules/video_coding/decoding_state_unittest.cc (renamed from webrtc/modules/video_coding/main/source/decoding_state_unittest.cc)260
-rw-r--r--webrtc/modules/video_coding/encoded_frame.cc (renamed from webrtc/modules/video_coding/main/source/encoded_frame.cc)118
-rw-r--r--webrtc/modules/video_coding/encoded_frame.h132
-rw-r--r--webrtc/modules/video_coding/fec_tables_xor.h459
-rw-r--r--webrtc/modules/video_coding/frame_buffer.cc270
-rw-r--r--webrtc/modules/video_coding/frame_buffer.h (renamed from webrtc/modules/video_coding/main/source/frame_buffer.h)18
-rw-r--r--webrtc/modules/video_coding/generic_decoder.cc192
-rw-r--r--webrtc/modules/video_coding/generic_decoder.h111
-rw-r--r--webrtc/modules/video_coding/generic_encoder.cc (renamed from webrtc/modules/video_coding/main/source/generic_encoder.cc)103
-rw-r--r--webrtc/modules/video_coding/generic_encoder.h149
-rw-r--r--webrtc/modules/video_coding/include/mock/mock_vcm_callbacks.h (renamed from webrtc/modules/video_coding/main/interface/mock/mock_vcm_callbacks.h)15
-rw-r--r--webrtc/modules/video_coding/include/mock/mock_video_codec_interface.h81
-rw-r--r--webrtc/modules/video_coding/include/video_codec_interface.h99
-rw-r--r--webrtc/modules/video_coding/include/video_coding.h519
-rw-r--r--webrtc/modules/video_coding/include/video_coding_defines.h (renamed from webrtc/modules/video_coding/main/interface/video_coding_defines.h)99
-rw-r--r--webrtc/modules/video_coding/include/video_error_codes.h32
-rw-r--r--webrtc/modules/video_coding/inter_frame_delay.cc107
-rw-r--r--webrtc/modules/video_coding/inter_frame_delay.h67
-rw-r--r--webrtc/modules/video_coding/internal_defines.h41
-rw-r--r--webrtc/modules/video_coding/jitter_buffer.cc (renamed from webrtc/modules/video_coding/main/source/jitter_buffer.cc)213
-rw-r--r--webrtc/modules/video_coding/jitter_buffer.h (renamed from webrtc/modules/video_coding/main/source/jitter_buffer.h)47
-rw-r--r--webrtc/modules/video_coding/jitter_buffer_common.h (renamed from webrtc/modules/video_coding/main/source/jitter_buffer_common.h)52
-rw-r--r--webrtc/modules/video_coding/jitter_buffer_unittest.cc (renamed from webrtc/modules/video_coding/main/source/jitter_buffer_unittest.cc)488
-rw-r--r--webrtc/modules/video_coding/jitter_estimator.cc443
-rw-r--r--webrtc/modules/video_coding/jitter_estimator.h170
-rw-r--r--webrtc/modules/video_coding/jitter_estimator_tests.cc (renamed from webrtc/modules/video_coding/main/source/jitter_estimator_tests.cc)2
-rw-r--r--webrtc/modules/video_coding/main/interface/video_coding.h544
-rw-r--r--webrtc/modules/video_coding/main/source/OWNERS5
-rw-r--r--webrtc/modules/video_coding/main/source/codec_timer.cc136
-rw-r--r--webrtc/modules/video_coding/main/source/codec_timer.h62
-rw-r--r--webrtc/modules/video_coding/main/source/encoded_frame.h127
-rw-r--r--webrtc/modules/video_coding/main/source/fec_tables_xor.h6481
-rw-r--r--webrtc/modules/video_coding/main/source/frame_buffer.cc297
-rw-r--r--webrtc/modules/video_coding/main/source/generic_decoder.cc198
-rw-r--r--webrtc/modules/video_coding/main/source/generic_decoder.h112
-rw-r--r--webrtc/modules/video_coding/main/source/generic_encoder.h142
-rw-r--r--webrtc/modules/video_coding/main/source/inter_frame_delay.cc114
-rw-r--r--webrtc/modules/video_coding/main/source/inter_frame_delay.h66
-rw-r--r--webrtc/modules/video_coding/main/source/internal_defines.h68
-rw-r--r--webrtc/modules/video_coding/main/source/jitter_estimator.cc482
-rw-r--r--webrtc/modules/video_coding/main/source/jitter_estimator.h165
-rw-r--r--webrtc/modules/video_coding/main/source/media_opt_util.cc774
-rw-r--r--webrtc/modules/video_coding/main/source/media_opt_util.h364
-rw-r--r--webrtc/modules/video_coding/main/source/nack_fec_tables.h126
-rw-r--r--webrtc/modules/video_coding/main/source/packet.h59
-rw-r--r--webrtc/modules/video_coding/main/source/rtt_filter.cc202
-rw-r--r--webrtc/modules/video_coding/main/source/rtt_filter.h68
-rw-r--r--webrtc/modules/video_coding/main/test/video_source.h82
-rw-r--r--webrtc/modules/video_coding/media_opt_util.cc682
-rw-r--r--webrtc/modules/video_coding/media_opt_util.h361
-rw-r--r--webrtc/modules/video_coding/media_optimization.cc (renamed from webrtc/modules/video_coding/main/source/media_optimization.cc)65
-rw-r--r--webrtc/modules/video_coding/media_optimization.h (renamed from webrtc/modules/video_coding/main/source/media_optimization.h)26
-rw-r--r--webrtc/modules/video_coding/media_optimization_unittest.cc (renamed from webrtc/modules/video_coding/main/source/media_optimization_unittest.cc)3
-rw-r--r--webrtc/modules/video_coding/nack_fec_tables.h31
-rw-r--r--webrtc/modules/video_coding/packet.cc (renamed from webrtc/modules/video_coding/main/source/packet.cc)73
-rw-r--r--webrtc/modules/video_coding/packet.h59
-rw-r--r--webrtc/modules/video_coding/qm_select.cc (renamed from webrtc/modules/video_coding/main/source/qm_select.cc)191
-rw-r--r--webrtc/modules/video_coding/qm_select.h (renamed from webrtc/modules/video_coding/main/source/qm_select.h)65
-rw-r--r--webrtc/modules/video_coding/qm_select_data.h (renamed from webrtc/modules/video_coding/main/source/qm_select_data.h)178
-rw-r--r--webrtc/modules/video_coding/qm_select_unittest.cc (renamed from webrtc/modules/video_coding/main/source/qm_select_unittest.cc)234
-rw-r--r--webrtc/modules/video_coding/receiver.cc (renamed from webrtc/modules/video_coding/main/source/receiver.cc)59
-rw-r--r--webrtc/modules/video_coding/receiver.h (renamed from webrtc/modules/video_coding/main/source/receiver.h)26
-rw-r--r--webrtc/modules/video_coding/receiver_unittest.cc (renamed from webrtc/modules/video_coding/main/source/receiver_unittest.cc)143
-rw-r--r--webrtc/modules/video_coding/rtt_filter.cc165
-rw-r--r--webrtc/modules/video_coding/rtt_filter.h66
-rw-r--r--webrtc/modules/video_coding/session_info.cc (renamed from webrtc/modules/video_coding/main/source/session_info.cc)83
-rw-r--r--webrtc/modules/video_coding/session_info.h (renamed from webrtc/modules/video_coding/main/source/session_info.h)18
-rw-r--r--webrtc/modules/video_coding/session_info_unittest.cc (renamed from webrtc/modules/video_coding/main/source/session_info_unittest.cc)146
-rw-r--r--webrtc/modules/video_coding/test/plotJitterEstimate.m (renamed from webrtc/modules/video_coding/main/test/plotJitterEstimate.m)0
-rw-r--r--webrtc/modules/video_coding/test/plotReceiveTrace.m (renamed from webrtc/modules/video_coding/main/test/plotReceiveTrace.m)0
-rw-r--r--webrtc/modules/video_coding/test/plotTimingTest.m (renamed from webrtc/modules/video_coding/main/test/plotTimingTest.m)0
-rw-r--r--webrtc/modules/video_coding/test/receiver_tests.h (renamed from webrtc/modules/video_coding/main/test/receiver_tests.h)20
-rw-r--r--webrtc/modules/video_coding/test/release_test.h (renamed from webrtc/modules/video_coding/main/test/release_test.h)6
-rw-r--r--webrtc/modules/video_coding/test/rtp_player.cc (renamed from webrtc/modules/video_coding/main/test/rtp_player.cc)63
-rw-r--r--webrtc/modules/video_coding/test/rtp_player.h (renamed from webrtc/modules/video_coding/main/test/rtp_player.h)21
-rw-r--r--webrtc/modules/video_coding/test/stream_generator.cc (renamed from webrtc/modules/video_coding/main/source/test/stream_generator.cc)21
-rw-r--r--webrtc/modules/video_coding/test/stream_generator.h (renamed from webrtc/modules/video_coding/main/source/test/stream_generator.h)10
-rw-r--r--webrtc/modules/video_coding/test/subfigure.m (renamed from webrtc/modules/video_coding/main/test/subfigure.m)0
-rw-r--r--webrtc/modules/video_coding/test/test_util.cc (renamed from webrtc/modules/video_coding/main/test/test_util.cc)37
-rw-r--r--webrtc/modules/video_coding/test/test_util.h (renamed from webrtc/modules/video_coding/main/test/test_util.h)18
-rw-r--r--webrtc/modules/video_coding/test/tester_main.cc (renamed from webrtc/modules/video_coding/main/test/tester_main.cc)53
-rw-r--r--webrtc/modules/video_coding/test/vcm_payload_sink_factory.cc (renamed from webrtc/modules/video_coding/main/test/vcm_payload_sink_factory.cc)22
-rw-r--r--webrtc/modules/video_coding/test/vcm_payload_sink_factory.h (renamed from webrtc/modules/video_coding/main/test/vcm_payload_sink_factory.h)15
-rw-r--r--webrtc/modules/video_coding/test/video_rtp_play.cc (renamed from webrtc/modules/video_coding/main/test/video_rtp_play.cc)12
-rw-r--r--webrtc/modules/video_coding/test/video_source.h85
-rw-r--r--webrtc/modules/video_coding/timestamp_map.cc (renamed from webrtc/modules/video_coding/main/source/timestamp_map.cc)12
-rw-r--r--webrtc/modules/video_coding/timestamp_map.h (renamed from webrtc/modules/video_coding/main/source/timestamp_map.h)2
-rw-r--r--webrtc/modules/video_coding/timing.cc (renamed from webrtc/modules/video_coding/main/source/timing.cc)71
-rw-r--r--webrtc/modules/video_coding/timing.h (renamed from webrtc/modules/video_coding/main/source/timing.h)13
-rw-r--r--webrtc/modules/video_coding/timing_unittest.cc (renamed from webrtc/modules/video_coding/main/source/timing_unittest.cc)28
-rw-r--r--webrtc/modules/video_coding/utility/frame_dropper.cc529
-rw-r--r--webrtc/modules/video_coding/utility/frame_dropper.h96
-rw-r--r--webrtc/modules/video_coding/utility/include/frame_dropper.h98
-rw-r--r--webrtc/modules/video_coding/utility/include/mock/mock_frame_dropper.h41
-rw-r--r--webrtc/modules/video_coding/utility/include/vp8_header_parser.h77
-rw-r--r--webrtc/modules/video_coding/utility/mock/mock_frame_dropper.h34
-rw-r--r--webrtc/modules/video_coding/utility/moving_average.h (renamed from webrtc/modules/video_coding/utility/include/moving_average.h)18
-rw-r--r--webrtc/modules/video_coding/utility/qp_parser.cc4
-rw-r--r--webrtc/modules/video_coding/utility/qp_parser.h (renamed from webrtc/modules/video_coding/utility/include/qp_parser.h)2
-rw-r--r--webrtc/modules/video_coding/utility/quality_scaler.cc18
-rw-r--r--webrtc/modules/video_coding/utility/quality_scaler.h (renamed from webrtc/modules/video_coding/utility/include/quality_scaler.h)2
-rw-r--r--webrtc/modules/video_coding/utility/quality_scaler_unittest.cc34
-rw-r--r--webrtc/modules/video_coding/utility/video_coding_utility.gyp10
-rw-r--r--webrtc/modules/video_coding/utility/vp8_header_parser.cc29
-rw-r--r--webrtc/modules/video_coding/utility/vp8_header_parser.h68
-rw-r--r--webrtc/modules/video_coding/video_coding.gypi102
-rw-r--r--webrtc/modules/video_coding/video_coding_impl.cc (renamed from webrtc/modules/video_coding/main/source/video_coding_impl.cc)197
-rw-r--r--webrtc/modules/video_coding/video_coding_impl.h (renamed from webrtc/modules/video_coding/main/source/video_coding_impl.h)45
-rw-r--r--webrtc/modules/video_coding/video_coding_robustness_unittest.cc (renamed from webrtc/modules/video_coding/main/source/video_coding_robustness_unittest.cc)112
-rw-r--r--webrtc/modules/video_coding/video_coding_test.gypi16
-rw-r--r--webrtc/modules/video_coding/video_receiver.cc (renamed from webrtc/modules/video_coding/main/source/video_receiver.cc)141
-rw-r--r--webrtc/modules/video_coding/video_receiver_unittest.cc (renamed from webrtc/modules/video_coding/main/source/video_receiver_unittest.cc)20
-rw-r--r--webrtc/modules/video_coding/video_sender.cc (renamed from webrtc/modules/video_coding/main/source/video_sender.cc)60
-rw-r--r--webrtc/modules/video_coding/video_sender_unittest.cc (renamed from webrtc/modules/video_coding/main/source/video_sender_unittest.cc)76
-rw-r--r--webrtc/modules/video_processing/BUILD.gn65
-rw-r--r--webrtc/modules/video_processing/OWNERS5
-rw-r--r--webrtc/modules/video_processing/brightness_detection.cc (renamed from webrtc/modules/video_processing/main/source/brightness_detection.cc)65
-rw-r--r--webrtc/modules/video_processing/brightness_detection.h (renamed from webrtc/modules/video_processing/main/source/brightness_detection.h)14
-rw-r--r--webrtc/modules/video_processing/content_analysis.cc (renamed from webrtc/modules/video_processing/main/source/content_analysis.cc)85
-rw-r--r--webrtc/modules/video_processing/content_analysis.h (renamed from webrtc/modules/video_processing/main/source/content_analysis.h)16
-rw-r--r--webrtc/modules/video_processing/content_analysis_sse2.cc (renamed from webrtc/modules/video_processing/main/source/content_analysis_sse2.cc)181
-rw-r--r--webrtc/modules/video_processing/deflickering.cc (renamed from webrtc/modules/video_processing/main/source/deflickering.cc)86
-rw-r--r--webrtc/modules/video_processing/deflickering.h (renamed from webrtc/modules/video_processing/main/source/deflickering.h)27
-rw-r--r--webrtc/modules/video_processing/frame_preprocessor.cc (renamed from webrtc/modules/video_processing/main/source/frame_preprocessor.cc)87
-rw-r--r--webrtc/modules/video_processing/frame_preprocessor.h (renamed from webrtc/modules/video_processing/main/source/frame_preprocessor.h)45
-rw-r--r--webrtc/modules/video_processing/include/video_processing.h102
-rw-r--r--webrtc/modules/video_processing/include/video_processing_defines.h (renamed from webrtc/modules/video_processing/main/interface/video_processing_defines.h)28
-rw-r--r--webrtc/modules/video_processing/main/interface/video_processing.h270
-rw-r--r--webrtc/modules/video_processing/main/source/OWNERS5
-rw-r--r--webrtc/modules/video_processing/main/source/brighten.cc45
-rw-r--r--webrtc/modules/video_processing/main/source/brighten.h25
-rw-r--r--webrtc/modules/video_processing/main/source/video_processing_impl.cc183
-rw-r--r--webrtc/modules/video_processing/main/source/video_processing_impl.h75
-rw-r--r--webrtc/modules/video_processing/main/test/unit_test/brightness_detection_test.cc121
-rw-r--r--webrtc/modules/video_processing/main/test/unit_test/deflickering_test.cc100
-rw-r--r--webrtc/modules/video_processing/spatial_resampler.cc (renamed from webrtc/modules/video_processing/main/source/spatial_resampler.cc)27
-rw-r--r--webrtc/modules/video_processing/spatial_resampler.h (renamed from webrtc/modules/video_processing/main/source/spatial_resampler.h)25
-rw-r--r--webrtc/modules/video_processing/test/brightness_detection_test.cc120
-rw-r--r--webrtc/modules/video_processing/test/content_metrics_test.cc (renamed from webrtc/modules/video_processing/main/test/unit_test/content_metrics_test.cc)30
-rw-r--r--webrtc/modules/video_processing/test/createTable.m (renamed from webrtc/modules/video_processing/main/test/unit_test/createTable.m)4
-rw-r--r--webrtc/modules/video_processing/test/deflickering_test.cc98
-rw-r--r--webrtc/modules/video_processing/test/denoiser_test.cc156
-rw-r--r--webrtc/modules/video_processing/test/readYUV420file.m (renamed from webrtc/modules/video_processing/main/test/unit_test/readYUV420file.m)10
-rw-r--r--webrtc/modules/video_processing/test/video_processing_unittest.cc (renamed from webrtc/modules/video_processing/main/test/unit_test/video_processing_unittest.cc)247
-rw-r--r--webrtc/modules/video_processing/test/video_processing_unittest.h (renamed from webrtc/modules/video_processing/main/test/unit_test/video_processing_unittest.h)20
-rw-r--r--webrtc/modules/video_processing/test/writeYUV420file.m (renamed from webrtc/modules/video_processing/main/test/unit_test/writeYUV420file.m)4
-rw-r--r--webrtc/modules/video_processing/util/denoiser_filter.cc54
-rw-r--r--webrtc/modules/video_processing/util/denoiser_filter.h63
-rw-r--r--webrtc/modules/video_processing/util/denoiser_filter_c.cc194
-rw-r--r--webrtc/modules/video_processing/util/denoiser_filter_c.h46
-rw-r--r--webrtc/modules/video_processing/util/denoiser_filter_neon.cc283
-rw-r--r--webrtc/modules/video_processing/util/denoiser_filter_neon.h46
-rw-r--r--webrtc/modules/video_processing/util/denoiser_filter_sse2.cc280
-rw-r--r--webrtc/modules/video_processing/util/denoiser_filter_sse2.h46
-rw-r--r--webrtc/modules/video_processing/util/skin_detection.cc65
-rwxr-xr-xwebrtc/modules/video_processing/util/skin_detection.h28
-rw-r--r--webrtc/modules/video_processing/video_decimator.cc (renamed from webrtc/modules/video_processing/main/source/video_decimator.cc)50
-rw-r--r--webrtc/modules/video_processing/video_decimator.h (renamed from webrtc/modules/video_processing/main/source/video_decimator.h)14
-rw-r--r--webrtc/modules/video_processing/video_denoiser.cc147
-rw-r--r--webrtc/modules/video_processing/video_denoiser.h38
-rw-r--r--webrtc/modules/video_processing/video_processing.gypi62
-rw-r--r--webrtc/modules/video_processing/video_processing_impl.cc179
-rw-r--r--webrtc/modules/video_processing/video_processing_impl.h55
-rw-r--r--webrtc/modules/video_render/BUILD.gn4
-rw-r--r--webrtc/modules/video_render/android/video_render_android_impl.cc17
-rw-r--r--webrtc/modules/video_render/android/video_render_android_impl.h5
-rw-r--r--webrtc/modules/video_render/android/video_render_android_native_opengl2.h2
-rw-r--r--webrtc/modules/video_render/android/video_render_android_surface_view.h2
-rw-r--r--webrtc/modules/video_render/android/video_render_opengles20.h2
-rw-r--r--webrtc/modules/video_render/external/video_render_external_impl.h2
-rw-r--r--webrtc/modules/video_render/i_video_render.h2
-rw-r--r--webrtc/modules/video_render/ios/open_gles20.h2
-rw-r--r--webrtc/modules/video_render/ios/video_render_ios_channel.h2
-rw-r--r--webrtc/modules/video_render/ios/video_render_ios_gles20.h5
-rw-r--r--webrtc/modules/video_render/ios/video_render_ios_gles20.mm8
-rw-r--r--webrtc/modules/video_render/linux/video_x11_channel.h2
-rw-r--r--webrtc/modules/video_render/linux/video_x11_render.h2
-rw-r--r--webrtc/modules/video_render/mac/video_render_agl.cc23
-rw-r--r--webrtc/modules/video_render/mac/video_render_agl.h7
-rw-r--r--webrtc/modules/video_render/mac/video_render_nsopengl.h10
-rw-r--r--webrtc/modules/video_render/mac/video_render_nsopengl.mm36
-rw-r--r--webrtc/modules/video_render/test/testAPI/testAPI.cc10
-rw-r--r--webrtc/modules/video_render/test/testAPI/testAPI.h2
-rw-r--r--webrtc/modules/video_render/test/testAPI/testAPI_mac.mm8
-rw-r--r--webrtc/modules/video_render/video_render.gypi23
-rw-r--r--webrtc/modules/video_render/video_render.h (renamed from webrtc/modules/video_render/include/video_render.h)10
-rw-r--r--webrtc/modules/video_render/video_render_defines.h (renamed from webrtc/modules/video_render/include/video_render_defines.h)10
-rw-r--r--webrtc/modules/video_render/video_render_impl.cc7
-rw-r--r--webrtc/modules/video_render/video_render_impl.h2
-rw-r--r--webrtc/modules/video_render/video_render_internal_impl.cc7
-rw-r--r--webrtc/modules/video_render/windows/i_video_render_win.h2
-rw-r--r--webrtc/modules/video_render/windows/video_render_direct3d9.cc8
-rw-r--r--webrtc/modules/video_render/windows/video_render_direct3d9.h7
896 files changed, 33775 insertions, 36158 deletions
diff --git a/webrtc/modules/audio_coding/BUILD.gn b/webrtc/modules/audio_coding/BUILD.gn
index 839a1439e6..000dd394df 100644
--- a/webrtc/modules/audio_coding/BUILD.gn
+++ b/webrtc/modules/audio_coding/BUILD.gn
@@ -11,10 +11,10 @@ import("../../build/webrtc.gni")
source_set("rent_a_codec") {
sources = [
- "main/acm2/acm_codec_database.cc",
- "main/acm2/acm_codec_database.h",
- "main/acm2/rent_a_codec.cc",
- "main/acm2/rent_a_codec.h",
+ "acm2/acm_codec_database.cc",
+ "acm2/acm_codec_database.h",
+ "acm2/rent_a_codec.cc",
+ "acm2/rent_a_codec.h",
]
configs += [ "../..:common_config" ]
public_configs = [ "../..:common_inherited_config" ]
@@ -44,31 +44,29 @@ source_set("rent_a_codec") {
config("audio_coding_config") {
include_dirs = [
- "main/include",
- "../interface",
+ "include",
+ "../include",
]
}
source_set("audio_coding") {
sources = [
- "main/acm2/acm_common_defs.h",
- "main/acm2/acm_receiver.cc",
- "main/acm2/acm_receiver.h",
- "main/acm2/acm_resampler.cc",
- "main/acm2/acm_resampler.h",
- "main/acm2/audio_coding_module.cc",
- "main/acm2/audio_coding_module_impl.cc",
- "main/acm2/audio_coding_module_impl.h",
- "main/acm2/call_statistics.cc",
- "main/acm2/call_statistics.h",
- "main/acm2/codec_manager.cc",
- "main/acm2/codec_manager.h",
- "main/acm2/codec_owner.cc",
- "main/acm2/codec_owner.h",
- "main/acm2/initial_delay_manager.cc",
- "main/acm2/initial_delay_manager.h",
- "main/include/audio_coding_module.h",
- "main/include/audio_coding_module_typedefs.h",
+ "acm2/acm_common_defs.h",
+ "acm2/acm_receiver.cc",
+ "acm2/acm_receiver.h",
+ "acm2/acm_resampler.cc",
+ "acm2/acm_resampler.h",
+ "acm2/audio_coding_module.cc",
+ "acm2/audio_coding_module_impl.cc",
+ "acm2/audio_coding_module_impl.h",
+ "acm2/call_statistics.cc",
+ "acm2/call_statistics.h",
+ "acm2/codec_manager.cc",
+ "acm2/codec_manager.h",
+ "acm2/initial_delay_manager.cc",
+ "acm2/initial_delay_manager.h",
+ "include/audio_coding_module.h",
+ "include/audio_coding_module_typedefs.h",
]
defines = []
@@ -166,11 +164,11 @@ config("cng_config") {
source_set("cng") {
sources = [
"codecs/cng/audio_encoder_cng.cc",
+ "codecs/cng/audio_encoder_cng.h",
"codecs/cng/cng_helpfuns.c",
"codecs/cng/cng_helpfuns.h",
- "codecs/cng/include/audio_encoder_cng.h",
- "codecs/cng/include/webrtc_cng.h",
"codecs/cng/webrtc_cng.c",
+ "codecs/cng/webrtc_cng.h",
]
configs += [ "../..:common_config" ]
@@ -181,8 +179,8 @@ source_set("cng") {
]
deps = [
- "../../common_audio",
":audio_encoder_interface",
+ "../../common_audio",
]
}
@@ -204,8 +202,8 @@ source_set("red") {
]
deps = [
- "../../common_audio",
":audio_encoder_interface",
+ "../../common_audio",
]
}
@@ -219,13 +217,13 @@ config("g711_config") {
source_set("g711") {
sources = [
"codecs/g711/audio_decoder_pcm.cc",
+ "codecs/g711/audio_decoder_pcm.h",
"codecs/g711/audio_encoder_pcm.cc",
+ "codecs/g711/audio_encoder_pcm.h",
"codecs/g711/g711.c",
"codecs/g711/g711.h",
"codecs/g711/g711_interface.c",
- "codecs/g711/include/audio_decoder_pcm.h",
- "codecs/g711/include/audio_encoder_pcm.h",
- "codecs/g711/include/g711_interface.h",
+ "codecs/g711/g711_interface.h",
]
configs += [ "../..:common_config" ]
@@ -236,6 +234,7 @@ source_set("g711") {
]
deps = [
+ ":audio_decoder_interface",
":audio_encoder_interface",
]
}
@@ -250,14 +249,14 @@ config("g722_config") {
source_set("g722") {
sources = [
"codecs/g722/audio_decoder_g722.cc",
+ "codecs/g722/audio_decoder_g722.h",
"codecs/g722/audio_encoder_g722.cc",
+ "codecs/g722/audio_encoder_g722.h",
"codecs/g722/g722_decode.c",
"codecs/g722/g722_enc_dec.h",
"codecs/g722/g722_encode.c",
"codecs/g722/g722_interface.c",
- "codecs/g722/include/audio_decoder_g722.h",
- "codecs/g722/include/audio_encoder_g722.h",
- "codecs/g722/include/g722_interface.h",
+ "codecs/g722/g722_interface.h",
]
configs += [ "../..:common_config" ]
@@ -268,6 +267,7 @@ source_set("g722") {
]
deps = [
+ ":audio_decoder_interface",
":audio_encoder_interface",
]
}
@@ -286,7 +286,9 @@ source_set("ilbc") {
"codecs/ilbc/abs_quant_loop.c",
"codecs/ilbc/abs_quant_loop.h",
"codecs/ilbc/audio_decoder_ilbc.cc",
+ "codecs/ilbc/audio_decoder_ilbc.h",
"codecs/ilbc/audio_encoder_ilbc.cc",
+ "codecs/ilbc/audio_encoder_ilbc.h",
"codecs/ilbc/augmented_cb_corr.c",
"codecs/ilbc/augmented_cb_corr.h",
"codecs/ilbc/bw_expand.c",
@@ -351,9 +353,7 @@ source_set("ilbc") {
"codecs/ilbc/hp_output.c",
"codecs/ilbc/hp_output.h",
"codecs/ilbc/ilbc.c",
- "codecs/ilbc/include/audio_decoder_ilbc.h",
- "codecs/ilbc/include/audio_encoder_ilbc.h",
- "codecs/ilbc/include/ilbc.h",
+ "codecs/ilbc/ilbc.h",
"codecs/ilbc/index_conv_dec.c",
"codecs/ilbc/index_conv_dec.h",
"codecs/ilbc/index_conv_enc.c",
@@ -434,8 +434,9 @@ source_set("ilbc") {
]
deps = [
- "../../common_audio",
+ ":audio_decoder_interface",
":audio_encoder_interface",
+ "../../common_audio",
]
}
@@ -604,6 +605,7 @@ source_set("isac_fix") {
]
deps = [
+ ":audio_decoder_interface",
":audio_encoder_interface",
":isac_common",
"../../common_audio",
@@ -696,14 +698,15 @@ config("pcm16b_config") {
source_set("pcm16b") {
sources = [
"codecs/pcm16b/audio_decoder_pcm16b.cc",
+ "codecs/pcm16b/audio_decoder_pcm16b.h",
"codecs/pcm16b/audio_encoder_pcm16b.cc",
- "codecs/pcm16b/include/audio_decoder_pcm16b.h",
- "codecs/pcm16b/include/audio_encoder_pcm16b.h",
- "codecs/pcm16b/include/pcm16b.h",
+ "codecs/pcm16b/audio_encoder_pcm16b.h",
"codecs/pcm16b/pcm16b.c",
+ "codecs/pcm16b/pcm16b.h",
]
deps = [
+ ":audio_decoder_interface",
":audio_encoder_interface",
":g711",
]
@@ -723,16 +726,18 @@ config("opus_config") {
source_set("webrtc_opus") {
sources = [
"codecs/opus/audio_decoder_opus.cc",
+ "codecs/opus/audio_decoder_opus.h",
"codecs/opus/audio_encoder_opus.cc",
- "codecs/opus/include/audio_decoder_opus.h",
- "codecs/opus/include/audio_encoder_opus.h",
- "codecs/opus/include/opus_interface.h",
+ "codecs/opus/audio_encoder_opus.h",
"codecs/opus/opus_inst.h",
"codecs/opus/opus_interface.c",
+ "codecs/opus/opus_interface.h",
]
deps = [
+ ":audio_decoder_interface",
":audio_encoder_interface",
+ "../../base:rtc_base_approved",
]
if (rtc_build_opus) {
diff --git a/webrtc/modules/audio_coding/OWNERS b/webrtc/modules/audio_coding/OWNERS
index f43fb1f3ee..77db17d1c5 100644
--- a/webrtc/modules/audio_coding/OWNERS
+++ b/webrtc/modules/audio_coding/OWNERS
@@ -5,8 +5,6 @@ kwiberg@webrtc.org
minyue@webrtc.org
jan.skoglund@webrtc.org
-per-file *.isolate=kjellander@webrtc.org
-
# These are for the common case of adding or renaming files. If you're doing
# structural changes, please get a review from a reviewer in this file.
per-file *.gyp=*
diff --git a/webrtc/modules/audio_coding/main/acm2/acm_codec_database.cc b/webrtc/modules/audio_coding/acm2/acm_codec_database.cc
index f7842ce5b1..5f3c07802b 100644
--- a/webrtc/modules/audio_coding/main/acm2/acm_codec_database.cc
+++ b/webrtc/modules/audio_coding/acm2/acm_codec_database.cc
@@ -15,12 +15,12 @@
// TODO(tlegrand): Change constant input pointers in all functions to constant
// references, where appropriate.
-#include "webrtc/modules/audio_coding/main/acm2/acm_codec_database.h"
+#include "webrtc/modules/audio_coding/acm2/acm_codec_database.h"
#include <assert.h>
#include "webrtc/base/checks.h"
-#include "webrtc/modules/audio_coding/main/acm2/acm_common_defs.h"
+#include "webrtc/modules/audio_coding/acm2/acm_common_defs.h"
#include "webrtc/system_wrappers/include/trace.h"
namespace webrtc {
@@ -29,11 +29,6 @@ namespace acm2 {
namespace {
-// Checks if the bitrate is valid for the codec.
-bool IsRateValid(int codec_id, int rate) {
- return ACMCodecDB::database_[codec_id].rate == rate;
-}
-
// Checks if the bitrate is valid for iSAC.
bool IsISACRateValid(int rate) {
return (rate == -1) || ((rate <= 56000) && (rate >= 10000));
@@ -111,7 +106,7 @@ const CodecInst ACMCodecDB::database_[] = {
{127, "red", 8000, 0, 1, 0},
#endif
// To prevent compile errors due to trailing commas.
- {-1, "Null", -1, -1, -1, -1}
+ {-1, "Null", -1, -1, 0, -1}
};
// Create database with all codec settings at compile time.
@@ -167,7 +162,7 @@ const ACMCodecDB::CodecSettings ACMCodecDB::codec_settings_[] = {
{1, {0}, 0, 1},
#endif
// To prevent compile errors due to trailing commas.
- {-1, {-1}, -1, -1}
+ {-1, {-1}, -1, 0}
};
// Create a database of all NetEQ decoders at compile time.
@@ -214,20 +209,6 @@ const NetEqDecoder ACMCodecDB::neteq_decoders_[] = {
#endif
};
-// Get codec information from database.
-// TODO(tlegrand): replace memcpy with a pointer to the data base memory.
-int ACMCodecDB::Codec(int codec_id, CodecInst* codec_inst) {
- // Error check to see that codec_id is not out of bounds.
- if (static_cast<size_t>(codec_id) >= RentACodec::NumberOfCodecs()) {
- return -1;
- }
-
- // Copy database information for the codec to the output.
- memcpy(codec_inst, &database_[codec_id], sizeof(CodecInst));
-
- return 0;
-}
-
// Enumerator for error codes when asking for codec database id.
enum {
kInvalidCodec = -10,
@@ -249,7 +230,7 @@ int ACMCodecDB::CodecNumber(const CodecInst& codec_inst) {
}
// Checks the validity of payload type
- if (!ValidPayloadType(codec_inst.pltype)) {
+ if (!RentACodec::IsPayloadTypeValid(codec_inst.pltype)) {
return kInvalidPayloadtype;
}
@@ -298,8 +279,7 @@ int ACMCodecDB::CodecNumber(const CodecInst& codec_inst) {
? codec_id : kInvalidRate;
}
- return IsRateValid(codec_id, codec_inst.rate) ?
- codec_id : kInvalidRate;
+ return database_[codec_id].rate == codec_inst.rate ? codec_id : kInvalidRate;
}
// Looks for a matching payload name, frequency, and channels in the
@@ -312,7 +292,9 @@ int ACMCodecDB::CodecId(const CodecInst& codec_inst) {
codec_inst.channels));
}
-int ACMCodecDB::CodecId(const char* payload_name, int frequency, int channels) {
+int ACMCodecDB::CodecId(const char* payload_name,
+ int frequency,
+ size_t channels) {
for (const CodecInst& ci : RentACodec::Database()) {
bool name_match = false;
bool frequency_match = false;
@@ -346,19 +328,6 @@ int ACMCodecDB::ReceiverCodecNumber(const CodecInst& codec_inst) {
return CodecId(codec_inst);
}
-// Returns the codec sampling frequency for codec with id = "codec_id" in
-// database.
-int ACMCodecDB::CodecFreq(int codec_id) {
- const size_t i = static_cast<size_t>(codec_id);
- const auto db = RentACodec::Database();
- return i < db.size() ? db[i].plfreq : -1;
-}
-
-// Checks if the payload type is in the valid range.
-bool ACMCodecDB::ValidPayloadType(int payload_type) {
- return (payload_type >= 0) && (payload_type <= 127);
-}
-
} // namespace acm2
} // namespace webrtc
diff --git a/webrtc/modules/audio_coding/main/acm2/acm_codec_database.h b/webrtc/modules/audio_coding/acm2/acm_codec_database.h
index 84c8846a57..6c2db9cfc8 100644
--- a/webrtc/modules/audio_coding/main/acm2/acm_codec_database.h
+++ b/webrtc/modules/audio_coding/acm2/acm_codec_database.h
@@ -13,12 +13,12 @@
* codecs.
*/
-#ifndef WEBRTC_MODULES_AUDIO_CODING_MAIN_ACM2_ACM_CODEC_DATABASE_H_
-#define WEBRTC_MODULES_AUDIO_CODING_MAIN_ACM2_ACM_CODEC_DATABASE_H_
+#ifndef WEBRTC_MODULES_AUDIO_CODING_ACM2_ACM_CODEC_DATABASE_H_
+#define WEBRTC_MODULES_AUDIO_CODING_ACM2_ACM_CODEC_DATABASE_H_
#include "webrtc/common_types.h"
#include "webrtc/engine_configurations.h"
-#include "webrtc/modules/audio_coding/main/acm2/rent_a_codec.h"
+#include "webrtc/modules/audio_coding/acm2/rent_a_codec.h"
#include "webrtc/modules/audio_coding/neteq/include/neteq.h"
namespace webrtc {
@@ -48,20 +48,9 @@ class ACMCodecDB {
int num_packet_sizes;
int packet_sizes_samples[kMaxNumPacketSize];
int basic_block_samples;
- int channel_support;
+ size_t channel_support;
};
- // Gets codec information from database at the position in database given by
- // [codec_id].
- // Input:
- // [codec_id] - number that specifies at what position in the database to
- // get the information.
- // Output:
- // [codec_inst] - filled with information about the codec.
- // Return:
- // 0 if successful, otherwise -1.
- static int Codec(int codec_id, CodecInst* codec_inst);
-
// Returns codec id from database, given the information received in the input
// [codec_inst].
// Input:
@@ -71,26 +60,9 @@ class ACMCodecDB {
// codec id if successful, otherwise < 0.
static int CodecNumber(const CodecInst& codec_inst);
static int CodecId(const CodecInst& codec_inst);
- static int CodecId(const char* payload_name, int frequency, int channels);
+ static int CodecId(const char* payload_name, int frequency, size_t channels);
static int ReceiverCodecNumber(const CodecInst& codec_inst);
- // Returns the codec sampling frequency for codec with id = "codec_id" in
- // database.
- // TODO(tlegrand): Check if function is needed, or if we can change
- // to access database directly.
- // Input:
- // [codec_id] - number that specifies at what position in the database to
- // get the information.
- // Return:
- // codec sampling frequency if successful, otherwise -1.
- static int CodecFreq(int codec_id);
-
- // Check if the payload type is valid, meaning that it is in the valid range
- // of 0 to 127.
- // Input:
- // [payload_type] - payload type.
- static bool ValidPayloadType(int payload_type);
-
// Databases with information about the supported codecs
// database_ - stored information about all codecs: payload type, name,
// sampling frequency, packet size in samples, default channel
@@ -101,15 +73,11 @@ class ACMCodecDB {
// neteq_decoders_ - list of supported decoders in NetEQ.
static const CodecInst database_[kMaxNumCodecs];
static const CodecSettings codec_settings_[kMaxNumCodecs];
-
- private:
static const NetEqDecoder neteq_decoders_[kMaxNumCodecs];
-
- friend class RentACodec;
};
} // namespace acm2
} // namespace webrtc
-#endif // WEBRTC_MODULES_AUDIO_CODING_MAIN_ACM2_ACM_CODEC_DATABASE_H_
+#endif // WEBRTC_MODULES_AUDIO_CODING_ACM2_ACM_CODEC_DATABASE_H_
diff --git a/webrtc/modules/audio_coding/main/acm2/acm_common_defs.h b/webrtc/modules/audio_coding/acm2/acm_common_defs.h
index 23e3519ed0..483bdd93f1 100644
--- a/webrtc/modules/audio_coding/main/acm2/acm_common_defs.h
+++ b/webrtc/modules/audio_coding/acm2/acm_common_defs.h
@@ -8,8 +8,8 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#ifndef WEBRTC_MODULES_AUDIO_CODING_MAIN_ACM2_ACM_COMMON_DEFS_H_
-#define WEBRTC_MODULES_AUDIO_CODING_MAIN_ACM2_ACM_COMMON_DEFS_H_
+#ifndef WEBRTC_MODULES_AUDIO_CODING_ACM2_ACM_COMMON_DEFS_H_
+#define WEBRTC_MODULES_AUDIO_CODING_ACM2_ACM_COMMON_DEFS_H_
#include "webrtc/engine_configurations.h"
@@ -29,4 +29,4 @@ const int kIsacPacSize960 = 960;
} // namespace webrtc
-#endif // WEBRTC_MODULES_AUDIO_CODING_MAIN_ACM2_ACM_COMMON_DEFS_H_
+#endif // WEBRTC_MODULES_AUDIO_CODING_ACM2_ACM_COMMON_DEFS_H_
diff --git a/webrtc/modules/audio_coding/main/acm2/acm_neteq_unittest.cc b/webrtc/modules/audio_coding/acm2/acm_neteq_unittest.cc
index 607b933deb..607b933deb 100644
--- a/webrtc/modules/audio_coding/main/acm2/acm_neteq_unittest.cc
+++ b/webrtc/modules/audio_coding/acm2/acm_neteq_unittest.cc
diff --git a/webrtc/modules/audio_coding/main/acm2/acm_receive_test_oldapi.cc b/webrtc/modules/audio_coding/acm2/acm_receive_test_oldapi.cc
index fdcfdfc22d..855a39e675 100644
--- a/webrtc/modules/audio_coding/main/acm2/acm_receive_test_oldapi.cc
+++ b/webrtc/modules/audio_coding/acm2/acm_receive_test_oldapi.cc
@@ -8,13 +8,13 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#include "webrtc/modules/audio_coding/main/acm2/acm_receive_test_oldapi.h"
+#include "webrtc/modules/audio_coding/acm2/acm_receive_test_oldapi.h"
#include <assert.h>
#include <stdio.h>
#include "testing/gtest/include/gtest/gtest.h"
-#include "webrtc/modules/audio_coding/main/include/audio_coding_module.h"
+#include "webrtc/modules/audio_coding/include/audio_coding_module.h"
#include "webrtc/modules/audio_coding/neteq/tools/audio_sink.h"
#include "webrtc/modules/audio_coding/neteq/tools/packet.h"
#include "webrtc/modules/audio_coding/neteq/tools/packet_source.h"
@@ -55,7 +55,7 @@ bool ModifyAndUseThisCodec(CodecInst* codec_param) {
// G.722 = 94
bool RemapPltypeAndUseThisCodec(const char* plname,
int plfreq,
- int channels,
+ size_t channels,
int* pltype) {
if (channels != 1)
return false; // Don't use non-mono codecs.
@@ -144,9 +144,10 @@ int AcmReceiveTestOldApi::RegisterExternalReceiveCodec(
int rtp_payload_type,
AudioDecoder* external_decoder,
int sample_rate_hz,
- int num_channels) {
+ int num_channels,
+ const std::string& name) {
return acm_->RegisterExternalReceiveCodec(rtp_payload_type, external_decoder,
- sample_rate_hz, num_channels);
+ sample_rate_hz, num_channels, name);
}
void AcmReceiveTestOldApi::Run() {
diff --git a/webrtc/modules/audio_coding/main/acm2/acm_receive_test_oldapi.h b/webrtc/modules/audio_coding/acm2/acm_receive_test_oldapi.h
index 0b5671fe8c..3010ec72b1 100644
--- a/webrtc/modules/audio_coding/main/acm2/acm_receive_test_oldapi.h
+++ b/webrtc/modules/audio_coding/acm2/acm_receive_test_oldapi.h
@@ -8,8 +8,10 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#ifndef WEBRTC_MODULES_AUDIO_CODING_MAIN_ACM2_ACM_RECEIVE_TEST_H_
-#define WEBRTC_MODULES_AUDIO_CODING_MAIN_ACM2_ACM_RECEIVE_TEST_H_
+#ifndef WEBRTC_MODULES_AUDIO_CODING_ACM2_ACM_RECEIVE_TEST_OLDAPI_H_
+#define WEBRTC_MODULES_AUDIO_CODING_ACM2_ACM_RECEIVE_TEST_OLDAPI_H_
+
+#include <string>
#include "webrtc/base/constructormagic.h"
#include "webrtc/base/scoped_ptr.h"
@@ -48,7 +50,8 @@ class AcmReceiveTestOldApi {
int RegisterExternalReceiveCodec(int rtp_payload_type,
AudioDecoder* external_decoder,
int sample_rate_hz,
- int num_channels);
+ int num_channels,
+ const std::string& name);
// Runs the test and returns true if successful.
void Run();
@@ -91,4 +94,4 @@ class AcmReceiveTestToggleOutputFreqOldApi : public AcmReceiveTestOldApi {
} // namespace test
} // namespace webrtc
-#endif // WEBRTC_MODULES_AUDIO_CODING_MAIN_ACM2_ACM_RECEIVE_TEST_H_
+#endif // WEBRTC_MODULES_AUDIO_CODING_ACM2_ACM_RECEIVE_TEST_OLDAPI_H_
diff --git a/webrtc/modules/audio_coding/main/acm2/acm_receiver.cc b/webrtc/modules/audio_coding/acm2/acm_receiver.cc
index cf486ce06a..f45d5d3414 100644
--- a/webrtc/modules/audio_coding/main/acm2/acm_receiver.cc
+++ b/webrtc/modules/audio_coding/acm2/acm_receiver.cc
@@ -8,7 +8,7 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#include "webrtc/modules/audio_coding/main/acm2/acm_receiver.h"
+#include "webrtc/modules/audio_coding/acm2/acm_receiver.h"
#include <stdlib.h> // malloc
@@ -21,9 +21,8 @@
#include "webrtc/common_audio/signal_processing/include/signal_processing_library.h"
#include "webrtc/common_types.h"
#include "webrtc/modules/audio_coding/codecs/audio_decoder.h"
-#include "webrtc/modules/audio_coding/main/acm2/acm_common_defs.h"
-#include "webrtc/modules/audio_coding/main/acm2/acm_resampler.h"
-#include "webrtc/modules/audio_coding/main/acm2/call_statistics.h"
+#include "webrtc/modules/audio_coding/acm2/acm_resampler.h"
+#include "webrtc/modules/audio_coding/acm2/call_statistics.h"
#include "webrtc/modules/audio_coding/neteq/include/neteq.h"
#include "webrtc/system_wrappers/include/clock.h"
#include "webrtc/system_wrappers/include/critical_section_wrapper.h"
@@ -124,27 +123,13 @@ AcmReceiver::AcmReceiver(const AudioCodingModule::Config& config)
id_(config.id),
last_audio_decoder_(nullptr),
previous_audio_activity_(AudioFrame::kVadPassive),
- current_sample_rate_hz_(config.neteq_config.sample_rate_hz),
audio_buffer_(new int16_t[AudioFrame::kMaxDataSizeSamples]),
last_audio_buffer_(new int16_t[AudioFrame::kMaxDataSizeSamples]),
neteq_(NetEq::Create(config.neteq_config)),
- vad_enabled_(true),
+ vad_enabled_(config.neteq_config.enable_post_decode_vad),
clock_(config.clock),
- resampled_last_output_frame_(true),
- av_sync_(false),
- initial_delay_manager_(),
- missing_packets_sync_stream_(),
- late_packets_sync_stream_() {
+ resampled_last_output_frame_(true) {
assert(clock_);
-
- // Make sure we are on the same page as NetEq. Post-decode VAD is disabled by
- // default in NetEq4, however, Audio Conference Mixer relies on VAD decision
- // and fails if VAD decision is not provided.
- if (vad_enabled_)
- neteq_->EnableVad();
- else
- neteq_->DisableVad();
-
memset(audio_buffer_.get(), 0, AudioFrame::kMaxDataSizeSamples);
memset(last_audio_buffer_.get(), 0, AudioFrame::kMaxDataSizeSamples);
}
@@ -160,42 +145,6 @@ int AcmReceiver::SetMinimumDelay(int delay_ms) {
return -1;
}
-int AcmReceiver::SetInitialDelay(int delay_ms) {
- if (delay_ms < 0 || delay_ms > 10000) {
- return -1;
- }
- CriticalSectionScoped lock(crit_sect_.get());
-
- if (delay_ms == 0) {
- av_sync_ = false;
- initial_delay_manager_.reset();
- missing_packets_sync_stream_.reset();
- late_packets_sync_stream_.reset();
- neteq_->SetMinimumDelay(0);
- return 0;
- }
-
- if (av_sync_ && initial_delay_manager_->PacketBuffered()) {
- // Too late for this API. Only works before a call is started.
- return -1;
- }
-
- // Most of places NetEq calls are not within AcmReceiver's critical section to
- // improve performance. Here, this call has to be placed before the following
- // block, therefore, we keep it inside critical section. Otherwise, we have to
- // release |neteq_crit_sect_| and acquire it again, which seems an overkill.
- if (!neteq_->SetMinimumDelay(delay_ms))
- return -1;
-
- const int kLatePacketThreshold = 5;
- av_sync_ = true;
- initial_delay_manager_.reset(new InitialDelayManager(delay_ms,
- kLatePacketThreshold));
- missing_packets_sync_stream_.reset(new InitialDelayManager::SyncStream);
- late_packets_sync_stream_.reset(new InitialDelayManager::SyncStream);
- return 0;
-}
-
int AcmReceiver::SetMaximumDelay(int delay_ms) {
if (neteq_->SetMaximumDelay(delay_ms))
return 0;
@@ -207,74 +156,52 @@ int AcmReceiver::LeastRequiredDelayMs() const {
return neteq_->LeastRequiredDelayMs();
}
-int AcmReceiver::current_sample_rate_hz() const {
+rtc::Optional<int> AcmReceiver::last_packet_sample_rate_hz() const {
CriticalSectionScoped lock(crit_sect_.get());
- return current_sample_rate_hz_;
+ return last_packet_sample_rate_hz_;
+}
+
+int AcmReceiver::last_output_sample_rate_hz() const {
+ return neteq_->last_output_sample_rate_hz();
}
int AcmReceiver::InsertPacket(const WebRtcRTPHeader& rtp_header,
- const uint8_t* incoming_payload,
- size_t length_payload) {
+ rtc::ArrayView<const uint8_t> incoming_payload) {
uint32_t receive_timestamp = 0;
- InitialDelayManager::PacketType packet_type =
- InitialDelayManager::kUndefinedPacket;
- bool new_codec = false;
const RTPHeader* header = &rtp_header.header; // Just a shorthand.
{
CriticalSectionScoped lock(crit_sect_.get());
- const Decoder* decoder = RtpHeaderToDecoder(*header, incoming_payload);
+ const Decoder* decoder = RtpHeaderToDecoder(*header, incoming_payload[0]);
if (!decoder) {
LOG_F(LS_ERROR) << "Payload-type "
<< static_cast<int>(header->payloadType)
<< " is not registered.";
return -1;
}
- const int sample_rate_hz = ACMCodecDB::CodecFreq(decoder->acm_codec_id);
+ const int sample_rate_hz = [&decoder] {
+ const auto ci = RentACodec::CodecIdFromIndex(decoder->acm_codec_id);
+ return ci ? RentACodec::CodecInstById(*ci)->plfreq : -1;
+ }();
receive_timestamp = NowInTimestamp(sample_rate_hz);
- if (IsCng(decoder->acm_codec_id)) {
- // If this is a CNG while the audio codec is not mono skip pushing in
- // packets into NetEq.
- if (last_audio_decoder_ && last_audio_decoder_->channels > 1)
+ // If this is a CNG while the audio codec is not mono, skip pushing in
+ // packets into NetEq.
+ if (IsCng(decoder->acm_codec_id) && last_audio_decoder_ &&
+ last_audio_decoder_->channels > 1)
return 0;
- packet_type = InitialDelayManager::kCngPacket;
- } else if (decoder->acm_codec_id ==
- *RentACodec::CodecIndexFromId(RentACodec::CodecId::kAVT)) {
- packet_type = InitialDelayManager::kAvtPacket;
- } else {
- if (decoder != last_audio_decoder_) {
- // This is either the first audio packet or send codec is changed.
- // Therefore, either NetEq buffer is empty or will be flushed when this
- // packet is inserted.
- new_codec = true;
- last_audio_decoder_ = decoder;
- }
- packet_type = InitialDelayManager::kAudioPacket;
+ if (!IsCng(decoder->acm_codec_id) &&
+ decoder->acm_codec_id !=
+ *RentACodec::CodecIndexFromId(RentACodec::CodecId::kAVT)) {
+ last_audio_decoder_ = decoder;
+ last_packet_sample_rate_hz_ = rtc::Optional<int>(decoder->sample_rate_hz);
}
- if (av_sync_) {
- assert(initial_delay_manager_.get());
- assert(missing_packets_sync_stream_.get());
- // This updates |initial_delay_manager_| and specifies an stream of
- // sync-packets, if required to be inserted. We insert the sync-packets
- // when AcmReceiver lock is released and |decoder_lock_| is acquired.
- initial_delay_manager_->UpdateLastReceivedPacket(
- rtp_header, receive_timestamp, packet_type, new_codec, sample_rate_hz,
- missing_packets_sync_stream_.get());
- }
} // |crit_sect_| is released.
- // If |missing_packets_sync_stream_| is allocated then we are in AV-sync and
- // we may need to insert sync-packets. We don't check |av_sync_| as we are
- // outside AcmReceiver's critical section.
- if (missing_packets_sync_stream_.get()) {
- InsertStreamOfSyncPackets(missing_packets_sync_stream_.get());
- }
-
- if (neteq_->InsertPacket(rtp_header, incoming_payload, length_payload,
- receive_timestamp) < 0) {
+ if (neteq_->InsertPacket(rtp_header, incoming_payload, receive_timestamp) <
+ 0) {
LOG(LERROR) << "AcmReceiver::InsertPacket "
<< static_cast<int>(header->payloadType)
<< " Failed to insert packet";
@@ -286,30 +213,7 @@ int AcmReceiver::InsertPacket(const WebRtcRTPHeader& rtp_header,
int AcmReceiver::GetAudio(int desired_freq_hz, AudioFrame* audio_frame) {
enum NetEqOutputType type;
size_t samples_per_channel;
- int num_channels;
- bool return_silence = false;
-
- {
- // Accessing members, take the lock.
- CriticalSectionScoped lock(crit_sect_.get());
-
- if (av_sync_) {
- assert(initial_delay_manager_.get());
- assert(late_packets_sync_stream_.get());
- return_silence = GetSilence(desired_freq_hz, audio_frame);
- uint32_t timestamp_now = NowInTimestamp(current_sample_rate_hz_);
- initial_delay_manager_->LatePackets(timestamp_now,
- late_packets_sync_stream_.get());
- }
- }
-
- // If |late_packets_sync_stream_| is allocated then we have been in AV-sync
- // mode and we might have to insert sync-packets.
- if (late_packets_sync_stream_.get()) {
- InsertStreamOfSyncPackets(late_packets_sync_stream_.get());
- if (return_silence) // Silence generated, don't pull from NetEq.
- return 0;
- }
+ size_t num_channels;
// Accessing members, take the lock.
CriticalSectionScoped lock(crit_sect_.get());
@@ -324,23 +228,18 @@ int AcmReceiver::GetAudio(int desired_freq_hz, AudioFrame* audio_frame) {
return -1;
}
- // NetEq always returns 10 ms of audio.
- current_sample_rate_hz_ = static_cast<int>(samples_per_channel * 100);
+ const int current_sample_rate_hz = neteq_->last_output_sample_rate_hz();
// Update if resampling is required.
- bool need_resampling = (desired_freq_hz != -1) &&
- (current_sample_rate_hz_ != desired_freq_hz);
+ const bool need_resampling =
+ (desired_freq_hz != -1) && (current_sample_rate_hz != desired_freq_hz);
if (need_resampling && !resampled_last_output_frame_) {
// Prime the resampler with the last frame.
int16_t temp_output[AudioFrame::kMaxDataSizeSamples];
- int samples_per_channel_int =
- resampler_.Resample10Msec(last_audio_buffer_.get(),
- current_sample_rate_hz_,
- desired_freq_hz,
- num_channels,
- AudioFrame::kMaxDataSizeSamples,
- temp_output);
+ int samples_per_channel_int = resampler_.Resample10Msec(
+ last_audio_buffer_.get(), current_sample_rate_hz, desired_freq_hz,
+ num_channels, AudioFrame::kMaxDataSizeSamples, temp_output);
if (samples_per_channel_int < 0) {
LOG(LERROR) << "AcmReceiver::GetAudio - "
"Resampling last_audio_buffer_ failed.";
@@ -354,13 +253,9 @@ int AcmReceiver::GetAudio(int desired_freq_hz, AudioFrame* audio_frame) {
// TODO(henrik.lundin) Glitches in the output may appear if the output rate
// from NetEq changes. See WebRTC issue 3923.
if (need_resampling) {
- int samples_per_channel_int =
- resampler_.Resample10Msec(audio_buffer_.get(),
- current_sample_rate_hz_,
- desired_freq_hz,
- num_channels,
- AudioFrame::kMaxDataSizeSamples,
- audio_frame->data_);
+ int samples_per_channel_int = resampler_.Resample10Msec(
+ audio_buffer_.get(), current_sample_rate_hz, desired_freq_hz,
+ num_channels, AudioFrame::kMaxDataSizeSamples, audio_frame->data_);
if (samples_per_channel_int < 0) {
LOG(LERROR) << "AcmReceiver::GetAudio - Resampling audio_buffer_ failed.";
return -1;
@@ -406,16 +301,17 @@ int AcmReceiver::GetAudio(int desired_freq_hz, AudioFrame* audio_frame) {
int32_t AcmReceiver::AddCodec(int acm_codec_id,
uint8_t payload_type,
- int channels,
+ size_t channels,
int sample_rate_hz,
- AudioDecoder* audio_decoder) {
+ AudioDecoder* audio_decoder,
+ const std::string& name) {
const auto neteq_decoder = [acm_codec_id, channels]() -> NetEqDecoder {
if (acm_codec_id == -1)
return NetEqDecoder::kDecoderArbitrary; // External decoder.
- const rtc::Maybe<RentACodec::CodecId> cid =
+ const rtc::Optional<RentACodec::CodecId> cid =
RentACodec::CodecIdFromIndex(acm_codec_id);
RTC_DCHECK(cid) << "Invalid codec index: " << acm_codec_id;
- const rtc::Maybe<NetEqDecoder> ned =
+ const rtc::Optional<NetEqDecoder> ned =
RentACodec::NetEqDecoderFromCodecId(*cid, channels);
RTC_DCHECK(ned) << "Invalid codec ID: " << static_cast<int>(*cid);
return *ned;
@@ -447,10 +343,10 @@ int32_t AcmReceiver::AddCodec(int acm_codec_id,
int ret_val;
if (!audio_decoder) {
- ret_val = neteq_->RegisterPayloadType(neteq_decoder, payload_type);
+ ret_val = neteq_->RegisterPayloadType(neteq_decoder, name, payload_type);
} else {
- ret_val = neteq_->RegisterExternalDecoder(audio_decoder, neteq_decoder,
- payload_type, sample_rate_hz);
+ ret_val = neteq_->RegisterExternalDecoder(
+ audio_decoder, neteq_decoder, name, payload_type, sample_rate_hz);
}
if (ret_val != NetEq::kOK) {
LOG(LERROR) << "AcmReceiver::AddCodec " << acm_codec_id
@@ -503,6 +399,7 @@ int AcmReceiver::RemoveAllCodecs() {
// No codec is registered, invalidate last audio decoder.
last_audio_decoder_ = nullptr;
+ last_packet_sample_rate_hz_ = rtc::Optional<int>();
return ret_val;
}
@@ -516,8 +413,10 @@ int AcmReceiver::RemoveCodec(uint8_t payload_type) {
LOG(LERROR) << "AcmReceiver::RemoveCodec" << static_cast<int>(payload_type);
return -1;
}
- if (last_audio_decoder_ == &it->second)
+ if (last_audio_decoder_ == &it->second) {
last_audio_decoder_ = nullptr;
+ last_packet_sample_rate_hz_ = rtc::Optional<int>();
+ }
decoders_.erase(it);
return 0;
}
@@ -528,42 +427,16 @@ void AcmReceiver::set_id(int id) {
}
bool AcmReceiver::GetPlayoutTimestamp(uint32_t* timestamp) {
- if (av_sync_) {
- assert(initial_delay_manager_.get());
- if (initial_delay_manager_->buffering()) {
- return initial_delay_manager_->GetPlayoutTimestamp(timestamp);
- }
- }
return neteq_->GetPlayoutTimestamp(timestamp);
}
-int AcmReceiver::last_audio_codec_id() const {
- CriticalSectionScoped lock(crit_sect_.get());
- return last_audio_decoder_ ? last_audio_decoder_->acm_codec_id : -1;
-}
-
-int AcmReceiver::RedPayloadType() const {
- const auto red_index =
- RentACodec::CodecIndexFromId(RentACodec::CodecId::kRED);
- if (red_index) {
- CriticalSectionScoped lock(crit_sect_.get());
- for (const auto& decoder_pair : decoders_) {
- const Decoder& decoder = decoder_pair.second;
- if (decoder.acm_codec_id == *red_index)
- return decoder.payload_type;
- }
- }
- LOG(WARNING) << "RED is not registered.";
- return -1;
-}
-
int AcmReceiver::LastAudioCodec(CodecInst* codec) const {
CriticalSectionScoped lock(crit_sect_.get());
if (!last_audio_decoder_) {
return -1;
}
- memcpy(codec, &ACMCodecDB::database_[last_audio_decoder_->acm_codec_id],
- sizeof(CodecInst));
+ *codec = *RentACodec::CodecInstById(
+ *RentACodec::CodecIdFromIndex(last_audio_decoder_->acm_codec_id));
codec->pltype = last_audio_decoder_->payload_type;
codec->channels = last_audio_decoder_->channels;
codec->plfreq = last_audio_decoder_->sample_rate_hz;
@@ -603,8 +476,8 @@ int AcmReceiver::DecoderByPayloadType(uint8_t payload_type,
return -1;
}
const Decoder& decoder = it->second;
- memcpy(codec, &ACMCodecDB::database_[decoder.acm_codec_id],
- sizeof(CodecInst));
+ *codec = *RentACodec::CodecInstById(
+ *RentACodec::CodecIdFromIndex(decoder.acm_codec_id));
codec->pltype = decoder.payload_type;
codec->channels = decoder.channels;
codec->plfreq = decoder.sample_rate_hz;
@@ -626,74 +499,20 @@ std::vector<uint16_t> AcmReceiver::GetNackList(
}
void AcmReceiver::ResetInitialDelay() {
- {
- CriticalSectionScoped lock(crit_sect_.get());
- av_sync_ = false;
- initial_delay_manager_.reset(NULL);
- missing_packets_sync_stream_.reset(NULL);
- late_packets_sync_stream_.reset(NULL);
- }
neteq_->SetMinimumDelay(0);
// TODO(turajs): Should NetEq Buffer be flushed?
}
-// This function is called within critical section, no need to acquire a lock.
-bool AcmReceiver::GetSilence(int desired_sample_rate_hz, AudioFrame* frame) {
- assert(av_sync_);
- assert(initial_delay_manager_.get());
- if (!initial_delay_manager_->buffering()) {
- return false;
- }
-
- // We stop accumulating packets, if the number of packets or the total size
- // exceeds a threshold.
- int num_packets;
- int max_num_packets;
- const float kBufferingThresholdScale = 0.9f;
- neteq_->PacketBufferStatistics(&num_packets, &max_num_packets);
- if (num_packets > max_num_packets * kBufferingThresholdScale) {
- initial_delay_manager_->DisableBuffering();
- return false;
- }
-
- // Update statistics.
- call_stats_.DecodedBySilenceGenerator();
-
- // Set the values if already got a packet, otherwise set to default values.
- if (last_audio_decoder_) {
- current_sample_rate_hz_ =
- ACMCodecDB::database_[last_audio_decoder_->acm_codec_id].plfreq;
- frame->num_channels_ = last_audio_decoder_->channels;
- } else {
- frame->num_channels_ = 1;
- }
-
- // Set the audio frame's sampling frequency.
- if (desired_sample_rate_hz > 0) {
- frame->sample_rate_hz_ = desired_sample_rate_hz;
- } else {
- frame->sample_rate_hz_ = current_sample_rate_hz_;
- }
-
- frame->samples_per_channel_ =
- static_cast<size_t>(frame->sample_rate_hz_ / 100); // Always 10 ms.
- frame->speech_type_ = AudioFrame::kCNG;
- frame->vad_activity_ = AudioFrame::kVadPassive;
- size_t samples = frame->samples_per_channel_ * frame->num_channels_;
- memset(frame->data_, 0, samples * sizeof(int16_t));
- return true;
-}
-
const AcmReceiver::Decoder* AcmReceiver::RtpHeaderToDecoder(
const RTPHeader& rtp_header,
- const uint8_t* payload) const {
+ uint8_t payload_type) const {
auto it = decoders_.find(rtp_header.payloadType);
const auto red_index =
RentACodec::CodecIndexFromId(RentACodec::CodecId::kRED);
if (red_index && // This ensures that RED is defined in WebRTC.
it != decoders_.end() && it->second.acm_codec_id == *red_index) {
// This is a RED packet, get the payload of the audio codec.
- it = decoders_.find(payload[0] & 0x7F);
+ it = decoders_.find(payload_type & 0x7F);
}
// Check if the payload is registered.
@@ -711,23 +530,6 @@ uint32_t AcmReceiver::NowInTimestamp(int decoder_sampling_rate) const {
(decoder_sampling_rate / 1000) * now_in_ms);
}
-// This function only interacts with |neteq_|, therefore, it does not have to
-// be within critical section of AcmReceiver. It is inserting packets
-// into NetEq, so we call it when |decode_lock_| is acquired. However, this is
-// not essential as sync-packets do not interact with codecs (especially BWE).
-void AcmReceiver::InsertStreamOfSyncPackets(
- InitialDelayManager::SyncStream* sync_stream) {
- assert(sync_stream);
- assert(av_sync_);
- for (int n = 0; n < sync_stream->num_sync_packets; ++n) {
- neteq_->InsertSyncPacket(sync_stream->rtp_info,
- sync_stream->receive_timestamp);
- ++sync_stream->rtp_info.header.sequenceNumber;
- sync_stream->rtp_info.header.timestamp += sync_stream->timestamp_step;
- sync_stream->receive_timestamp += sync_stream->timestamp_step;
- }
-}
-
void AcmReceiver::GetDecodingCallStatistics(
AudioDecodingCallStats* stats) const {
CriticalSectionScoped lock(crit_sect_.get());
diff --git a/webrtc/modules/audio_coding/main/acm2/acm_receiver.h b/webrtc/modules/audio_coding/acm2/acm_receiver.h
index 4775b8c6d9..b150612f69 100644
--- a/webrtc/modules/audio_coding/main/acm2/acm_receiver.h
+++ b/webrtc/modules/audio_coding/acm2/acm_receiver.h
@@ -8,23 +8,25 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#ifndef WEBRTC_MODULES_AUDIO_CODING_MAIN_ACM2_ACM_RECEIVER_H_
-#define WEBRTC_MODULES_AUDIO_CODING_MAIN_ACM2_ACM_RECEIVER_H_
+#ifndef WEBRTC_MODULES_AUDIO_CODING_ACM2_ACM_RECEIVER_H_
+#define WEBRTC_MODULES_AUDIO_CODING_ACM2_ACM_RECEIVER_H_
#include <map>
+#include <string>
#include <vector>
+#include "webrtc/base/array_view.h"
+#include "webrtc/base/optional.h"
#include "webrtc/base/scoped_ptr.h"
#include "webrtc/base/thread_annotations.h"
#include "webrtc/common_audio/vad/include/webrtc_vad.h"
#include "webrtc/engine_configurations.h"
-#include "webrtc/modules/audio_coding/main/include/audio_coding_module.h"
-#include "webrtc/modules/audio_coding/main/acm2/acm_codec_database.h"
-#include "webrtc/modules/audio_coding/main/acm2/acm_resampler.h"
-#include "webrtc/modules/audio_coding/main/acm2/call_statistics.h"
-#include "webrtc/modules/audio_coding/main/acm2/initial_delay_manager.h"
+#include "webrtc/modules/audio_coding/include/audio_coding_module.h"
+#include "webrtc/modules/audio_coding/acm2/acm_resampler.h"
+#include "webrtc/modules/audio_coding/acm2/call_statistics.h"
+#include "webrtc/modules/audio_coding/acm2/initial_delay_manager.h"
#include "webrtc/modules/audio_coding/neteq/include/neteq.h"
-#include "webrtc/modules/interface/module_common_types.h"
+#include "webrtc/modules/include/module_common_types.h"
#include "webrtc/typedefs.h"
namespace webrtc {
@@ -42,7 +44,7 @@ class AcmReceiver {
uint8_t payload_type;
// This field is meaningful for codecs where both mono and
// stereo versions are registered under the same ID.
- int channels;
+ size_t channels;
int sample_rate_hz;
};
@@ -66,8 +68,7 @@ class AcmReceiver {
// <0 if NetEq returned an error.
//
int InsertPacket(const WebRtcRTPHeader& rtp_header,
- const uint8_t* incoming_payload,
- size_t length_payload);
+ rtc::ArrayView<const uint8_t> incoming_payload);
//
// Asks NetEq for 10 milliseconds of decoded audio.
@@ -115,9 +116,10 @@ class AcmReceiver {
//
int AddCodec(int acm_codec_id,
uint8_t payload_type,
- int channels,
+ size_t channels,
int sample_rate_hz,
- AudioDecoder* audio_decoder);
+ AudioDecoder* audio_decoder,
+ const std::string& name);
//
// Sets a minimum delay for packet buffer. The given delay is maintained,
@@ -151,29 +153,18 @@ class AcmReceiver {
int LeastRequiredDelayMs() const;
//
- // Sets an initial delay of |delay_ms| milliseconds. This introduces a playout
- // delay. Silence (zero signal) is played out until equivalent of |delay_ms|
- // millisecond of audio is buffered. Then, NetEq maintains the delay.
- //
- // Input:
- // - delay_ms : initial delay in milliseconds.
- //
- // Return value : 0 if OK.
- // <0 if NetEq returned an error.
- //
- int SetInitialDelay(int delay_ms);
-
- //
// Resets the initial delay to zero.
//
void ResetInitialDelay();
- //
- // Get the current sampling frequency in Hz.
- //
- // Return value : Sampling frequency in Hz.
- //
- int current_sample_rate_hz() const;
+ // Returns the sample rate of the decoder associated with the last incoming
+ // packet. If no packet of a registered non-CNG codec has been received, the
+ // return value is empty. Also, if the decoder was unregistered since the last
+ // packet was inserted, the return value is empty.
+ rtc::Optional<int> last_packet_sample_rate_hz() const;
+
+ // Returns last_output_sample_rate_hz from the NetEq instance.
+ int last_output_sample_rate_hz() const;
//
// Get the current network statistics from NetEq.
@@ -231,13 +222,6 @@ class AcmReceiver {
bool GetPlayoutTimestamp(uint32_t* timestamp);
//
- // Return the index of the codec associated with the last non-CNG/non-DTMF
- // received payload. If no non-CNG/non-DTMF payload is received -1 is
- // returned.
- //
- int last_audio_codec_id() const; // TODO(turajs): can be inline.
-
- //
// Get the audio codec associated with the last non-CNG/non-DTMF received
// payload. If no non-CNG/non-DTMF packet is received -1 is returned,
// otherwise return 0.
@@ -245,11 +229,6 @@ class AcmReceiver {
int LastAudioCodec(CodecInst* codec) const;
//
- // Return payload type of RED if it is registered, otherwise return -1;
- //
- int RedPayloadType() const;
-
- //
// Get a decoder given its registered payload-type.
//
// Input:
@@ -296,24 +275,16 @@ class AcmReceiver {
void GetDecodingCallStatistics(AudioDecodingCallStats* stats) const;
private:
- bool GetSilence(int desired_sample_rate_hz, AudioFrame* frame)
- EXCLUSIVE_LOCKS_REQUIRED(crit_sect_);
-
- int GetNumSyncPacketToInsert(uint16_t received_squence_number);
-
const Decoder* RtpHeaderToDecoder(const RTPHeader& rtp_header,
- const uint8_t* payload) const
+ uint8_t payload_type) const
EXCLUSIVE_LOCKS_REQUIRED(crit_sect_);
uint32_t NowInTimestamp(int decoder_sampling_rate) const;
- void InsertStreamOfSyncPackets(InitialDelayManager::SyncStream* sync_stream);
-
rtc::scoped_ptr<CriticalSectionWrapper> crit_sect_;
int id_; // TODO(henrik.lundin) Make const.
const Decoder* last_audio_decoder_ GUARDED_BY(crit_sect_);
AudioFrame::VADActivity previous_audio_activity_ GUARDED_BY(crit_sect_);
- int current_sample_rate_hz_ GUARDED_BY(crit_sect_);
ACMResampler resampler_ GUARDED_BY(crit_sect_);
// Used in GetAudio, declared as member to avoid allocating every 10ms.
// TODO(henrik.lundin) Stack-allocate in GetAudio instead?
@@ -326,23 +297,11 @@ class AcmReceiver {
bool vad_enabled_;
Clock* clock_; // TODO(henrik.lundin) Make const if possible.
bool resampled_last_output_frame_ GUARDED_BY(crit_sect_);
-
- // Indicates if a non-zero initial delay is set, and the receiver is in
- // AV-sync mode.
- bool av_sync_;
- rtc::scoped_ptr<InitialDelayManager> initial_delay_manager_;
-
- // The following are defined as members to avoid creating them in every
- // iteration. |missing_packets_sync_stream_| is *ONLY* used in InsertPacket().
- // |late_packets_sync_stream_| is only used in GetAudio(). Both of these
- // member variables are allocated only when we AV-sync is enabled, i.e.
- // initial delay is set.
- rtc::scoped_ptr<InitialDelayManager::SyncStream> missing_packets_sync_stream_;
- rtc::scoped_ptr<InitialDelayManager::SyncStream> late_packets_sync_stream_;
+ rtc::Optional<int> last_packet_sample_rate_hz_ GUARDED_BY(crit_sect_);
};
} // namespace acm2
} // namespace webrtc
-#endif // WEBRTC_MODULES_AUDIO_CODING_MAIN_ACM2_ACM_RECEIVER_H_
+#endif // WEBRTC_MODULES_AUDIO_CODING_ACM2_ACM_RECEIVER_H_
diff --git a/webrtc/modules/audio_coding/main/acm2/acm_receiver_unittest_oldapi.cc b/webrtc/modules/audio_coding/acm2/acm_receiver_unittest_oldapi.cc
index f0caacce10..24ecc694ff 100644
--- a/webrtc/modules/audio_coding/main/acm2/acm_receiver_unittest_oldapi.cc
+++ b/webrtc/modules/audio_coding/acm2/acm_receiver_unittest_oldapi.cc
@@ -8,20 +8,18 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#include "webrtc/modules/audio_coding/main/acm2/acm_receiver.h"
+#include "webrtc/modules/audio_coding/acm2/acm_receiver.h"
#include <algorithm> // std::min
#include "testing/gtest/include/gtest/gtest.h"
#include "webrtc/base/scoped_ptr.h"
-#include "webrtc/modules/audio_coding/main/include/audio_coding_module.h"
-#include "webrtc/modules/audio_coding/main/acm2/audio_coding_module_impl.h"
-#include "webrtc/modules/audio_coding/main/acm2/acm_codec_database.h"
+#include "webrtc/modules/audio_coding/include/audio_coding_module.h"
+#include "webrtc/modules/audio_coding/acm2/audio_coding_module_impl.h"
#include "webrtc/modules/audio_coding/neteq/tools/rtp_generator.h"
#include "webrtc/system_wrappers/include/clock.h"
#include "webrtc/test/test_suite.h"
#include "webrtc/test/testsupport/fileutils.h"
-#include "webrtc/test/testsupport/gtest_disable.h"
namespace webrtc {
@@ -88,14 +86,14 @@ class AcmReceiverTestOldApi : public AudioPacketizationCallback,
void TearDown() override {}
void InsertOnePacketOfSilence(int codec_id) {
- CodecInst codec;
- ACMCodecDB::Codec(codec_id, &codec);
+ CodecInst codec =
+ *RentACodec::CodecInstById(*RentACodec::CodecIdFromIndex(codec_id));
if (timestamp_ == 0) { // This is the first time inserting audio.
ASSERT_EQ(0, acm_->RegisterSendCodec(codec));
} else {
- CodecInst current_codec;
- ASSERT_EQ(0, acm_->SendCodec(&current_codec));
- if (!CodecsEqual(codec, current_codec))
+ auto current_codec = acm_->SendCodec();
+ ASSERT_TRUE(current_codec);
+ if (!CodecsEqual(codec, *current_codec))
ASSERT_EQ(0, acm_->RegisterSendCodec(codec));
}
AudioFrame frame;
@@ -121,7 +119,7 @@ class AcmReceiverTestOldApi : public AudioPacketizationCallback,
ASSERT_TRUE(i);
ASSERT_EQ(
0, receiver_->AddCodec(*i, codecs_[*i].pltype, codecs_[*i].channels,
- codecs_[*i].plfreq, nullptr));
+ codecs_[*i].plfreq, nullptr, ""));
}
}
@@ -142,8 +140,9 @@ class AcmReceiverTestOldApi : public AudioPacketizationCallback,
rtp_header_.type.Audio.isCNG = true;
rtp_header_.header.timestamp = timestamp;
- int ret_val = receiver_->InsertPacket(rtp_header_, payload_data,
- payload_len_bytes);
+ int ret_val = receiver_->InsertPacket(
+ rtp_header_,
+ rtc::ArrayView<const uint8_t>(payload_data, payload_len_bytes));
if (ret_val < 0) {
assert(false);
return -1;
@@ -164,13 +163,18 @@ class AcmReceiverTestOldApi : public AudioPacketizationCallback,
FrameType last_frame_type_;
};
-TEST_F(AcmReceiverTestOldApi, DISABLED_ON_ANDROID(AddCodecGetCodec)) {
+#if defined(WEBRTC_ANDROID)
+#define MAYBE_AddCodecGetCodec DISABLED_AddCodecGetCodec
+#else
+#define MAYBE_AddCodecGetCodec AddCodecGetCodec
+#endif
+TEST_F(AcmReceiverTestOldApi, MAYBE_AddCodecGetCodec) {
// Add codec.
for (size_t n = 0; n < codecs_.size(); ++n) {
if (n & 0x1) // Just add codecs with odd index.
EXPECT_EQ(0,
receiver_->AddCodec(n, codecs_[n].pltype, codecs_[n].channels,
- codecs_[n].plfreq, NULL));
+ codecs_[n].plfreq, NULL, ""));
}
// Get codec and compare.
for (size_t n = 0; n < codecs_.size(); ++n) {
@@ -188,7 +192,12 @@ TEST_F(AcmReceiverTestOldApi, DISABLED_ON_ANDROID(AddCodecGetCodec)) {
}
}
-TEST_F(AcmReceiverTestOldApi, DISABLED_ON_ANDROID(AddCodecChangePayloadType)) {
+#if defined(WEBRTC_ANDROID)
+#define MAYBE_AddCodecChangePayloadType DISABLED_AddCodecChangePayloadType
+#else
+#define MAYBE_AddCodecChangePayloadType AddCodecChangePayloadType
+#endif
+TEST_F(AcmReceiverTestOldApi, MAYBE_AddCodecChangePayloadType) {
const CodecIdInst codec1(RentACodec::CodecId::kPCMA);
CodecInst codec2 = codec1.inst;
++codec2.pltype;
@@ -197,9 +206,9 @@ TEST_F(AcmReceiverTestOldApi, DISABLED_ON_ANDROID(AddCodecChangePayloadType)) {
// Register the same codec with different payloads.
EXPECT_EQ(0, receiver_->AddCodec(codec1.id, codec1.inst.pltype,
codec1.inst.channels, codec1.inst.plfreq,
- nullptr));
+ nullptr, ""));
EXPECT_EQ(0, receiver_->AddCodec(codec1.id, codec2.pltype, codec2.channels,
- codec2.plfreq, NULL));
+ codec2.plfreq, NULL, ""));
// Both payload types should exist.
EXPECT_EQ(0,
@@ -209,7 +218,12 @@ TEST_F(AcmReceiverTestOldApi, DISABLED_ON_ANDROID(AddCodecChangePayloadType)) {
EXPECT_EQ(true, CodecsEqual(codec2, test_codec));
}
-TEST_F(AcmReceiverTestOldApi, DISABLED_ON_ANDROID(AddCodecChangeCodecId)) {
+#if defined(WEBRTC_ANDROID)
+#define MAYBE_AddCodecChangeCodecId DISABLED_AddCodecChangeCodecId
+#else
+#define MAYBE_AddCodecChangeCodecId AddCodecChangeCodecId
+#endif
+TEST_F(AcmReceiverTestOldApi, AddCodecChangeCodecId) {
const CodecIdInst codec1(RentACodec::CodecId::kPCMU);
CodecIdInst codec2(RentACodec::CodecId::kPCMA);
codec2.inst.pltype = codec1.inst.pltype;
@@ -218,10 +232,10 @@ TEST_F(AcmReceiverTestOldApi, DISABLED_ON_ANDROID(AddCodecChangeCodecId)) {
// Register the same payload type with different codec ID.
EXPECT_EQ(0, receiver_->AddCodec(codec1.id, codec1.inst.pltype,
codec1.inst.channels, codec1.inst.plfreq,
- nullptr));
+ nullptr, ""));
EXPECT_EQ(0, receiver_->AddCodec(codec2.id, codec2.inst.pltype,
codec2.inst.channels, codec2.inst.plfreq,
- nullptr));
+ nullptr, ""));
// Make sure that the last codec is used.
EXPECT_EQ(0,
@@ -229,12 +243,17 @@ TEST_F(AcmReceiverTestOldApi, DISABLED_ON_ANDROID(AddCodecChangeCodecId)) {
EXPECT_EQ(true, CodecsEqual(codec2.inst, test_codec));
}
-TEST_F(AcmReceiverTestOldApi, DISABLED_ON_ANDROID(AddCodecRemoveCodec)) {
+#if defined(WEBRTC_ANDROID)
+#define MAYBE_AddCodecRemoveCodec DISABLED_AddCodecRemoveCodec
+#else
+#define MAYBE_AddCodecRemoveCodec AddCodecRemoveCodec
+#endif
+TEST_F(AcmReceiverTestOldApi, MAYBE_AddCodecRemoveCodec) {
const CodecIdInst codec(RentACodec::CodecId::kPCMA);
const int payload_type = codec.inst.pltype;
EXPECT_EQ(
0, receiver_->AddCodec(codec.id, codec.inst.pltype, codec.inst.channels,
- codec.inst.plfreq, nullptr));
+ codec.inst.plfreq, nullptr, ""));
// Remove non-existing codec should not fail. ACM1 legacy.
EXPECT_EQ(0, receiver_->RemoveCodec(payload_type + 1));
@@ -247,7 +266,12 @@ TEST_F(AcmReceiverTestOldApi, DISABLED_ON_ANDROID(AddCodecRemoveCodec)) {
EXPECT_EQ(-1, receiver_->DecoderByPayloadType(payload_type, &ci));
}
-TEST_F(AcmReceiverTestOldApi, DISABLED_ON_ANDROID(SampleRate)) {
+#if defined(WEBRTC_ANDROID)
+#define MAYBE_SampleRate DISABLED_SampleRate
+#else
+#define MAYBE_SampleRate SampleRate
+#endif
+TEST_F(AcmReceiverTestOldApi, MAYBE_SampleRate) {
const RentACodec::CodecId kCodecId[] = {RentACodec::CodecId::kISAC,
RentACodec::CodecId::kISACSWB};
AddSetOfCodecs(kCodecId);
@@ -261,18 +285,22 @@ TEST_F(AcmReceiverTestOldApi, DISABLED_ON_ANDROID(SampleRate)) {
for (int k = 0; k < num_10ms_frames; ++k) {
EXPECT_EQ(0, receiver_->GetAudio(kOutSampleRateHz, &frame));
}
- EXPECT_EQ(std::min(32000, codec.inst.plfreq),
- receiver_->current_sample_rate_hz());
+ EXPECT_EQ(codec.inst.plfreq, receiver_->last_output_sample_rate_hz());
}
}
-TEST_F(AcmReceiverTestOldApi, DISABLED_ON_ANDROID(PostdecodingVad)) {
+#if defined(WEBRTC_ANDROID)
+#define MAYBE_PostdecodingVad DISABLED_PostdecodingVad
+#else
+#define MAYBE_PostdecodingVad PostdecodingVad
+#endif
+TEST_F(AcmReceiverTestOldApi, MAYBE_PostdecodingVad) {
receiver_->EnableVad();
EXPECT_TRUE(receiver_->vad_enabled());
const CodecIdInst codec(RentACodec::CodecId::kPCM16Bwb);
ASSERT_EQ(
0, receiver_->AddCodec(codec.id, codec.inst.pltype, codec.inst.channels,
- codec.inst.plfreq, nullptr));
+ codec.inst.plfreq, nullptr, ""));
const int kNumPackets = 5;
const int num_10ms_frames = codec.inst.pacsize / (codec.inst.plfreq / 100);
AudioFrame frame;
@@ -294,14 +322,13 @@ TEST_F(AcmReceiverTestOldApi, DISABLED_ON_ANDROID(PostdecodingVad)) {
EXPECT_EQ(AudioFrame::kVadUnknown, frame.vad_activity_);
}
-#ifdef WEBRTC_CODEC_ISAC
-#define IF_ISAC_FLOAT(x) x
+#if defined(WEBRTC_ANDROID)
+#define MAYBE_LastAudioCodec DISABLED_LastAudioCodec
#else
-#define IF_ISAC_FLOAT(x) DISABLED_##x
+#define MAYBE_LastAudioCodec LastAudioCodec
#endif
-
-TEST_F(AcmReceiverTestOldApi,
- DISABLED_ON_ANDROID(IF_ISAC_FLOAT(LastAudioCodec))) {
+#if defined(WEBRTC_CODEC_ISAC)
+TEST_F(AcmReceiverTestOldApi, MAYBE_LastAudioCodec) {
const RentACodec::CodecId kCodecId[] = {
RentACodec::CodecId::kISAC, RentACodec::CodecId::kPCMA,
RentACodec::CodecId::kISACSWB, RentACodec::CodecId::kPCM16Bswb32kHz};
@@ -331,7 +358,7 @@ TEST_F(AcmReceiverTestOldApi,
// Has received, only, DTX. Last Audio codec is undefined.
EXPECT_EQ(-1, receiver_->LastAudioCodec(&codec));
- EXPECT_EQ(-1, receiver_->last_audio_codec_id());
+ EXPECT_FALSE(receiver_->last_packet_sample_rate_hz());
for (auto id : kCodecId) {
const CodecIdInst c(id);
@@ -345,7 +372,8 @@ TEST_F(AcmReceiverTestOldApi,
// of type "speech."
ASSERT_TRUE(packet_sent_);
ASSERT_EQ(kAudioFrameSpeech, last_frame_type_);
- EXPECT_EQ(c.id, receiver_->last_audio_codec_id());
+ EXPECT_EQ(rtc::Optional<int>(c.inst.plfreq),
+ receiver_->last_packet_sample_rate_hz());
// Set VAD on to send DTX. Then check if the "Last Audio codec" returns
// the expected codec.
@@ -357,11 +385,13 @@ TEST_F(AcmReceiverTestOldApi,
InsertOnePacketOfSilence(c.id);
ASSERT_TRUE(packet_sent_);
}
- EXPECT_EQ(c.id, receiver_->last_audio_codec_id());
+ EXPECT_EQ(rtc::Optional<int>(c.inst.plfreq),
+ receiver_->last_packet_sample_rate_hz());
EXPECT_EQ(0, receiver_->LastAudioCodec(&codec));
EXPECT_TRUE(CodecsEqual(c.inst, codec));
}
}
+#endif
} // namespace acm2
diff --git a/webrtc/modules/audio_coding/main/acm2/acm_resampler.cc b/webrtc/modules/audio_coding/acm2/acm_resampler.cc
index cbcad85f5b..dfc3ef7e27 100644
--- a/webrtc/modules/audio_coding/main/acm2/acm_resampler.cc
+++ b/webrtc/modules/audio_coding/acm2/acm_resampler.cc
@@ -8,7 +8,7 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#include "webrtc/modules/audio_coding/main/acm2/acm_resampler.h"
+#include "webrtc/modules/audio_coding/acm2/acm_resampler.h"
#include <assert.h>
#include <string.h>
@@ -28,11 +28,10 @@ ACMResampler::~ACMResampler() {
int ACMResampler::Resample10Msec(const int16_t* in_audio,
int in_freq_hz,
int out_freq_hz,
- int num_audio_channels,
+ size_t num_audio_channels,
size_t out_capacity_samples,
int16_t* out_audio) {
- size_t in_length = static_cast<size_t>(in_freq_hz * num_audio_channels / 100);
- int out_length = out_freq_hz * num_audio_channels / 100;
+ size_t in_length = in_freq_hz * num_audio_channels / 100;
if (in_freq_hz == out_freq_hz) {
if (out_capacity_samples < in_length) {
assert(false);
@@ -44,24 +43,20 @@ int ACMResampler::Resample10Msec(const int16_t* in_audio,
if (resampler_.InitializeIfNeeded(in_freq_hz, out_freq_hz,
num_audio_channels) != 0) {
- LOG_FERR3(LS_ERROR, InitializeIfNeeded, in_freq_hz, out_freq_hz,
- num_audio_channels);
+ LOG(LS_ERROR) << "InitializeIfNeeded(" << in_freq_hz << ", " << out_freq_hz
+ << ", " << num_audio_channels << ") failed.";
return -1;
}
- out_length =
+ int out_length =
resampler_.Resample(in_audio, in_length, out_audio, out_capacity_samples);
if (out_length == -1) {
- LOG_FERR4(LS_ERROR,
- Resample,
- in_audio,
- in_length,
- out_audio,
- out_capacity_samples);
+ LOG(LS_ERROR) << "Resample(" << in_audio << ", " << in_length << ", "
+ << out_audio << ", " << out_capacity_samples << ") failed.";
return -1;
}
- return out_length / num_audio_channels;
+ return static_cast<int>(out_length / num_audio_channels);
}
} // namespace acm2
diff --git a/webrtc/modules/audio_coding/main/acm2/acm_resampler.h b/webrtc/modules/audio_coding/acm2/acm_resampler.h
index a19b0c4569..268db8b752 100644
--- a/webrtc/modules/audio_coding/main/acm2/acm_resampler.h
+++ b/webrtc/modules/audio_coding/acm2/acm_resampler.h
@@ -8,8 +8,8 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#ifndef WEBRTC_MODULES_AUDIO_CODING_MAIN_ACM2_ACM_RESAMPLER_H_
-#define WEBRTC_MODULES_AUDIO_CODING_MAIN_ACM2_ACM_RESAMPLER_H_
+#ifndef WEBRTC_MODULES_AUDIO_CODING_ACM2_ACM_RESAMPLER_H_
+#define WEBRTC_MODULES_AUDIO_CODING_ACM2_ACM_RESAMPLER_H_
#include "webrtc/common_audio/resampler/include/push_resampler.h"
#include "webrtc/typedefs.h"
@@ -25,7 +25,7 @@ class ACMResampler {
int Resample10Msec(const int16_t* in_audio,
int in_freq_hz,
int out_freq_hz,
- int num_audio_channels,
+ size_t num_audio_channels,
size_t out_capacity_samples,
int16_t* out_audio);
@@ -36,4 +36,4 @@ class ACMResampler {
} // namespace acm2
} // namespace webrtc
-#endif // WEBRTC_MODULES_AUDIO_CODING_MAIN_ACM2_ACM_RESAMPLER_H_
+#endif // WEBRTC_MODULES_AUDIO_CODING_ACM2_ACM_RESAMPLER_H_
diff --git a/webrtc/modules/audio_coding/main/acm2/acm_send_test_oldapi.cc b/webrtc/modules/audio_coding/acm2/acm_send_test_oldapi.cc
index ac38dc011d..3a89a77487 100644
--- a/webrtc/modules/audio_coding/main/acm2/acm_send_test_oldapi.cc
+++ b/webrtc/modules/audio_coding/acm2/acm_send_test_oldapi.cc
@@ -8,7 +8,7 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#include "webrtc/modules/audio_coding/main/acm2/acm_send_test_oldapi.h"
+#include "webrtc/modules/audio_coding/acm2/acm_send_test_oldapi.h"
#include <assert.h>
#include <stdio.h>
@@ -17,7 +17,7 @@
#include "testing/gtest/include/gtest/gtest.h"
#include "webrtc/base/checks.h"
#include "webrtc/modules/audio_coding/codecs/audio_encoder.h"
-#include "webrtc/modules/audio_coding/main/include/audio_coding_module.h"
+#include "webrtc/modules/audio_coding/include/audio_coding_module.h"
#include "webrtc/modules/audio_coding/neteq/tools/input_audio_file.h"
#include "webrtc/modules/audio_coding/neteq/tools/packet.h"
diff --git a/webrtc/modules/audio_coding/main/acm2/acm_send_test_oldapi.h b/webrtc/modules/audio_coding/acm2/acm_send_test_oldapi.h
index 3e65ec6c2d..ce68196a3f 100644
--- a/webrtc/modules/audio_coding/main/acm2/acm_send_test_oldapi.h
+++ b/webrtc/modules/audio_coding/acm2/acm_send_test_oldapi.h
@@ -8,14 +8,14 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#ifndef WEBRTC_MODULES_AUDIO_CODING_MAIN_ACM2_ACM_SEND_TEST_H_
-#define WEBRTC_MODULES_AUDIO_CODING_MAIN_ACM2_ACM_SEND_TEST_H_
+#ifndef WEBRTC_MODULES_AUDIO_CODING_ACM2_ACM_SEND_TEST_OLDAPI_H_
+#define WEBRTC_MODULES_AUDIO_CODING_ACM2_ACM_SEND_TEST_OLDAPI_H_
#include <vector>
#include "webrtc/base/constructormagic.h"
#include "webrtc/base/scoped_ptr.h"
-#include "webrtc/modules/audio_coding/main/include/audio_coding_module.h"
+#include "webrtc/modules/audio_coding/include/audio_coding_module.h"
#include "webrtc/modules/audio_coding/neteq/tools/packet_source.h"
#include "webrtc/system_wrappers/include/clock.h"
@@ -88,4 +88,4 @@ class AcmSendTestOldApi : public AudioPacketizationCallback,
} // namespace test
} // namespace webrtc
-#endif // WEBRTC_MODULES_AUDIO_CODING_MAIN_ACM2_ACM_SEND_TEST_H_
+#endif // WEBRTC_MODULES_AUDIO_CODING_ACM2_ACM_SEND_TEST_OLDAPI_H_
diff --git a/webrtc/modules/audio_coding/main/acm2/audio_coding_module.cc b/webrtc/modules/audio_coding/acm2/audio_coding_module.cc
index 77ee0f2789..c4dd349cc4 100644
--- a/webrtc/modules/audio_coding/main/acm2/audio_coding_module.cc
+++ b/webrtc/modules/audio_coding/acm2/audio_coding_module.cc
@@ -8,12 +8,12 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#include "webrtc/modules/audio_coding/main/include/audio_coding_module.h"
+#include "webrtc/modules/audio_coding/include/audio_coding_module.h"
#include "webrtc/base/checks.h"
#include "webrtc/common_types.h"
-#include "webrtc/modules/audio_coding/main/acm2/audio_coding_module_impl.h"
-#include "webrtc/modules/audio_coding/main/acm2/rent_a_codec.h"
+#include "webrtc/modules/audio_coding/acm2/audio_coding_module_impl.h"
+#include "webrtc/modules/audio_coding/acm2/rent_a_codec.h"
#include "webrtc/system_wrappers/include/clock.h"
#include "webrtc/system_wrappers/include/trace.h"
@@ -56,8 +56,8 @@ int AudioCodingModule::Codec(int list_id, CodecInst* codec) {
int AudioCodingModule::Codec(const char* payload_name,
CodecInst* codec,
int sampling_freq_hz,
- int channels) {
- rtc::Maybe<CodecInst> ci = acm2::RentACodec::CodecInstByParams(
+ size_t channels) {
+ rtc::Optional<CodecInst> ci = acm2::RentACodec::CodecInstByParams(
payload_name, sampling_freq_hz, channels);
if (ci) {
*codec = *ci;
@@ -76,12 +76,13 @@ int AudioCodingModule::Codec(const char* payload_name,
int AudioCodingModule::Codec(const char* payload_name,
int sampling_freq_hz,
- int channels) {
- rtc::Maybe<acm2::RentACodec::CodecId> ci = acm2::RentACodec::CodecIdByParams(
- payload_name, sampling_freq_hz, channels);
+ size_t channels) {
+ rtc::Optional<acm2::RentACodec::CodecId> ci =
+ acm2::RentACodec::CodecIdByParams(payload_name, sampling_freq_hz,
+ channels);
if (!ci)
return -1;
- rtc::Maybe<int> i = acm2::RentACodec::CodecIndexFromId(*ci);
+ rtc::Optional<int> i = acm2::RentACodec::CodecIndexFromId(*ci);
return i ? *i : -1;
}
diff --git a/webrtc/modules/audio_coding/main/acm2/audio_coding_module_impl.cc b/webrtc/modules/audio_coding/acm2/audio_coding_module_impl.cc
index b36c064800..ac302f0fe3 100644
--- a/webrtc/modules/audio_coding/main/acm2/audio_coding_module_impl.cc
+++ b/webrtc/modules/audio_coding/acm2/audio_coding_module_impl.cc
@@ -8,7 +8,7 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#include "webrtc/modules/audio_coding/main/acm2/audio_coding_module_impl.h"
+#include "webrtc/modules/audio_coding/acm2/audio_coding_module_impl.h"
#include <assert.h>
#include <stdlib.h>
@@ -17,10 +17,10 @@
#include "webrtc/base/checks.h"
#include "webrtc/base/safe_conversions.h"
#include "webrtc/engine_configurations.h"
-#include "webrtc/modules/audio_coding/main/include/audio_coding_module_typedefs.h"
-#include "webrtc/modules/audio_coding/main/acm2/acm_common_defs.h"
-#include "webrtc/modules/audio_coding/main/acm2/acm_resampler.h"
-#include "webrtc/modules/audio_coding/main/acm2/call_statistics.h"
+#include "webrtc/modules/audio_coding/include/audio_coding_module_typedefs.h"
+#include "webrtc/modules/audio_coding/acm2/acm_common_defs.h"
+#include "webrtc/modules/audio_coding/acm2/acm_resampler.h"
+#include "webrtc/modules/audio_coding/acm2/call_statistics.h"
#include "webrtc/system_wrappers/include/critical_section_wrapper.h"
#include "webrtc/system_wrappers/include/logging.h"
#include "webrtc/system_wrappers/include/metrics.h"
@@ -97,7 +97,7 @@ void AudioCodingModuleImpl::ChangeLogger::MaybeLog(int value) {
if (value != last_value_ || first_time_) {
first_time_ = false;
last_value_ = value;
- RTC_HISTOGRAM_COUNTS_100(histogram_name_, value);
+ RTC_HISTOGRAM_COUNTS_SPARSE_100(histogram_name_, value);
}
}
@@ -133,7 +133,7 @@ int32_t AudioCodingModuleImpl::Encode(const InputData& input_data) {
if (!HaveValidEncoder("Process"))
return -1;
- AudioEncoder* audio_encoder = codec_manager_.CurrentEncoder();
+ AudioEncoder* audio_encoder = rent_a_codec_.GetEncoderStack();
// Scale the timestamp to the codec's RTP timestamp rate.
uint32_t rtp_timestamp =
first_frame_ ? input_data.input_timestamp
@@ -149,7 +149,9 @@ int32_t AudioCodingModuleImpl::Encode(const InputData& input_data) {
encode_buffer_.SetSize(audio_encoder->MaxEncodedBytes());
encoded_info = audio_encoder->Encode(
- rtp_timestamp, input_data.audio, input_data.length_per_channel,
+ rtp_timestamp, rtc::ArrayView<const int16_t>(
+ input_data.audio, input_data.audio_channel *
+ input_data.length_per_channel),
encode_buffer_.size(), encode_buffer_.data());
encode_buffer_.SetSize(encoded_info.encoded_bytes);
bitrate_logger_.MaybeLog(audio_encoder->GetTargetBitrate() / 1000);
@@ -196,19 +198,43 @@ int32_t AudioCodingModuleImpl::Encode(const InputData& input_data) {
// Can be called multiple times for Codec, CNG, RED.
int AudioCodingModuleImpl::RegisterSendCodec(const CodecInst& send_codec) {
CriticalSectionScoped lock(acm_crit_sect_.get());
- return codec_manager_.RegisterEncoder(send_codec);
+ if (!codec_manager_.RegisterEncoder(send_codec)) {
+ return -1;
+ }
+ auto* sp = codec_manager_.GetStackParams();
+ if (!sp->speech_encoder && codec_manager_.GetCodecInst()) {
+ // We have no speech encoder, but we have a specification for making one.
+ AudioEncoder* enc =
+ rent_a_codec_.RentEncoder(*codec_manager_.GetCodecInst());
+ if (!enc)
+ return -1;
+ sp->speech_encoder = enc;
+ }
+ if (sp->speech_encoder)
+ rent_a_codec_.RentEncoderStack(sp);
+ return 0;
}
void AudioCodingModuleImpl::RegisterExternalSendCodec(
AudioEncoder* external_speech_encoder) {
CriticalSectionScoped lock(acm_crit_sect_.get());
- codec_manager_.RegisterEncoder(external_speech_encoder);
+ auto* sp = codec_manager_.GetStackParams();
+ sp->speech_encoder = external_speech_encoder;
+ rent_a_codec_.RentEncoderStack(sp);
}
// Get current send codec.
-int AudioCodingModuleImpl::SendCodec(CodecInst* current_codec) const {
+rtc::Optional<CodecInst> AudioCodingModuleImpl::SendCodec() const {
CriticalSectionScoped lock(acm_crit_sect_.get());
- return codec_manager_.GetCodecInst(current_codec);
+ auto* ci = codec_manager_.GetCodecInst();
+ if (ci) {
+ return rtc::Optional<CodecInst>(*ci);
+ }
+ auto* enc = codec_manager_.GetStackParams()->speech_encoder;
+ if (enc) {
+ return rtc::Optional<CodecInst>(CodecManager::ForgeCodecInst(enc));
+ }
+ return rtc::Optional<CodecInst>();
}
// Get current send frequency.
@@ -217,19 +243,21 @@ int AudioCodingModuleImpl::SendFrequency() const {
"SendFrequency()");
CriticalSectionScoped lock(acm_crit_sect_.get());
- if (!codec_manager_.CurrentEncoder()) {
+ const auto* enc = rent_a_codec_.GetEncoderStack();
+ if (!enc) {
WEBRTC_TRACE(webrtc::kTraceStream, webrtc::kTraceAudioCoding, id_,
"SendFrequency Failed, no codec is registered");
return -1;
}
- return codec_manager_.CurrentEncoder()->SampleRateHz();
+ return enc->SampleRateHz();
}
void AudioCodingModuleImpl::SetBitRate(int bitrate_bps) {
CriticalSectionScoped lock(acm_crit_sect_.get());
- if (codec_manager_.CurrentEncoder()) {
- codec_manager_.CurrentEncoder()->SetTargetBitrate(bitrate_bps);
+ auto* enc = rent_a_codec_.GetEncoderStack();
+ if (enc) {
+ enc->SetTargetBitrate(bitrate_bps);
}
}
@@ -296,10 +324,12 @@ int AudioCodingModuleImpl::Add10MsDataInternal(const AudioFrame& audio_frame,
}
// Check whether we need an up-mix or down-mix?
- bool remix = ptr_frame->num_channels_ !=
- codec_manager_.CurrentEncoder()->NumChannels();
+ const size_t current_num_channels =
+ rent_a_codec_.GetEncoderStack()->NumChannels();
+ const bool same_num_channels =
+ ptr_frame->num_channels_ == current_num_channels;
- if (remix) {
+ if (!same_num_channels) {
if (ptr_frame->num_channels_ == 1) {
if (UpMix(*ptr_frame, WEBRTC_10MS_PCM_AUDIO, input_data->buffer) < 0)
return -1;
@@ -314,14 +344,13 @@ int AudioCodingModuleImpl::Add10MsDataInternal(const AudioFrame& audio_frame,
const int16_t* ptr_audio = ptr_frame->data_;
// For pushing data to primary, point the |ptr_audio| to correct buffer.
- if (codec_manager_.CurrentEncoder()->NumChannels() !=
- ptr_frame->num_channels_)
+ if (!same_num_channels)
ptr_audio = input_data->buffer;
input_data->input_timestamp = ptr_frame->timestamp_;
input_data->audio = ptr_audio;
input_data->length_per_channel = ptr_frame->samples_per_channel_;
- input_data->audio_channel = codec_manager_.CurrentEncoder()->NumChannels();
+ input_data->audio_channel = current_num_channels;
return 0;
}
@@ -333,13 +362,14 @@ int AudioCodingModuleImpl::Add10MsDataInternal(const AudioFrame& audio_frame,
// is required, |*ptr_out| points to |in_frame|.
int AudioCodingModuleImpl::PreprocessToAddData(const AudioFrame& in_frame,
const AudioFrame** ptr_out) {
- bool resample = (in_frame.sample_rate_hz_ !=
- codec_manager_.CurrentEncoder()->SampleRateHz());
+ const auto* enc = rent_a_codec_.GetEncoderStack();
+ const bool resample = in_frame.sample_rate_hz_ != enc->SampleRateHz();
// This variable is true if primary codec and secondary codec (if exists)
// are both mono and input is stereo.
- bool down_mix = (in_frame.num_channels_ == 2) &&
- (codec_manager_.CurrentEncoder()->NumChannels() == 1);
+ // TODO(henrik.lundin): This condition should probably be
+ // in_frame.num_channels_ > enc->NumChannels()
+ const bool down_mix = in_frame.num_channels_ == 2 && enc->NumChannels() == 1;
if (!first_10ms_data_) {
expected_in_ts_ = in_frame.timestamp_;
@@ -349,10 +379,8 @@ int AudioCodingModuleImpl::PreprocessToAddData(const AudioFrame& in_frame,
// TODO(turajs): Do we need a warning here.
expected_codec_ts_ +=
(in_frame.timestamp_ - expected_in_ts_) *
- static_cast<uint32_t>(
- (static_cast<double>(
- codec_manager_.CurrentEncoder()->SampleRateHz()) /
- static_cast<double>(in_frame.sample_rate_hz_)));
+ static_cast<uint32_t>(static_cast<double>(enc->SampleRateHz()) /
+ static_cast<double>(in_frame.sample_rate_hz_));
expected_in_ts_ = in_frame.timestamp_;
}
@@ -391,8 +419,7 @@ int AudioCodingModuleImpl::PreprocessToAddData(const AudioFrame& in_frame,
dest_ptr_audio = preprocess_frame_.data_;
int samples_per_channel = resampler_.Resample10Msec(
- src_ptr_audio, in_frame.sample_rate_hz_,
- codec_manager_.CurrentEncoder()->SampleRateHz(),
+ src_ptr_audio, in_frame.sample_rate_hz_, enc->SampleRateHz(),
preprocess_frame_.num_channels_, AudioFrame::kMaxDataSizeSamples,
dest_ptr_audio);
@@ -403,8 +430,7 @@ int AudioCodingModuleImpl::PreprocessToAddData(const AudioFrame& in_frame,
}
preprocess_frame_.samples_per_channel_ =
static_cast<size_t>(samples_per_channel);
- preprocess_frame_.sample_rate_hz_ =
- codec_manager_.CurrentEncoder()->SampleRateHz();
+ preprocess_frame_.sample_rate_hz_ = enc->SampleRateHz();
}
expected_codec_ts_ +=
@@ -420,17 +446,21 @@ int AudioCodingModuleImpl::PreprocessToAddData(const AudioFrame& in_frame,
bool AudioCodingModuleImpl::REDStatus() const {
CriticalSectionScoped lock(acm_crit_sect_.get());
- return codec_manager_.red_enabled();
+ return codec_manager_.GetStackParams()->use_red;
}
// Configure RED status i.e on/off.
-int AudioCodingModuleImpl::SetREDStatus(
+int AudioCodingModuleImpl::SetREDStatus(bool enable_red) {
#ifdef WEBRTC_CODEC_RED
- bool enable_red) {
CriticalSectionScoped lock(acm_crit_sect_.get());
- return codec_manager_.SetCopyRed(enable_red) ? 0 : -1;
+ if (!codec_manager_.SetCopyRed(enable_red)) {
+ return -1;
+ }
+ auto* sp = codec_manager_.GetStackParams();
+ if (sp->speech_encoder)
+ rent_a_codec_.RentEncoderStack(sp);
+ return 0;
#else
- bool /* enable_red */) {
WEBRTC_TRACE(webrtc::kTraceWarning, webrtc::kTraceAudioCoding, id_,
" WEBRTC_CODEC_RED is undefined");
return -1;
@@ -443,18 +473,29 @@ int AudioCodingModuleImpl::SetREDStatus(
bool AudioCodingModuleImpl::CodecFEC() const {
CriticalSectionScoped lock(acm_crit_sect_.get());
- return codec_manager_.codec_fec_enabled();
+ return codec_manager_.GetStackParams()->use_codec_fec;
}
int AudioCodingModuleImpl::SetCodecFEC(bool enable_codec_fec) {
CriticalSectionScoped lock(acm_crit_sect_.get());
- return codec_manager_.SetCodecFEC(enable_codec_fec);
+ if (!codec_manager_.SetCodecFEC(enable_codec_fec)) {
+ return -1;
+ }
+ auto* sp = codec_manager_.GetStackParams();
+ if (sp->speech_encoder)
+ rent_a_codec_.RentEncoderStack(sp);
+ if (enable_codec_fec) {
+ return sp->use_codec_fec ? 0 : -1;
+ } else {
+ RTC_DCHECK(!sp->use_codec_fec);
+ return 0;
+ }
}
int AudioCodingModuleImpl::SetPacketLossRate(int loss_rate) {
CriticalSectionScoped lock(acm_crit_sect_.get());
if (HaveValidEncoder("SetPacketLossRate")) {
- codec_manager_.CurrentEncoder()->SetProjectedPacketLossRate(loss_rate /
+ rent_a_codec_.GetEncoderStack()->SetProjectedPacketLossRate(loss_rate /
100.0);
}
return 0;
@@ -469,14 +510,22 @@ int AudioCodingModuleImpl::SetVAD(bool enable_dtx,
// Note: |enable_vad| is not used; VAD is enabled based on the DTX setting.
RTC_DCHECK_EQ(enable_dtx, enable_vad);
CriticalSectionScoped lock(acm_crit_sect_.get());
- return codec_manager_.SetVAD(enable_dtx, mode);
+ if (!codec_manager_.SetVAD(enable_dtx, mode)) {
+ return -1;
+ }
+ auto* sp = codec_manager_.GetStackParams();
+ if (sp->speech_encoder)
+ rent_a_codec_.RentEncoderStack(sp);
+ return 0;
}
// Get VAD/DTX settings.
int AudioCodingModuleImpl::VAD(bool* dtx_enabled, bool* vad_enabled,
ACMVADMode* mode) const {
CriticalSectionScoped lock(acm_crit_sect_.get());
- codec_manager_.VAD(dtx_enabled, vad_enabled, mode);
+ const auto* sp = codec_manager_.GetStackParams();
+ *dtx_enabled = *vad_enabled = sp->use_cng;
+ *mode = sp->vad_mode;
return 0;
}
@@ -510,7 +559,7 @@ int AudioCodingModuleImpl::InitializeReceiverSafe() {
if (IsCodecRED(db[i]) || IsCodecCN(db[i])) {
if (receiver_.AddCodec(static_cast<int>(i),
static_cast<uint8_t>(db[i].pltype), 1,
- db[i].plfreq, nullptr) < 0) {
+ db[i].plfreq, nullptr, db[i].plname) < 0) {
WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, id_,
"Cannot register master codec.");
return -1;
@@ -523,25 +572,16 @@ int AudioCodingModuleImpl::InitializeReceiverSafe() {
// Get current receive frequency.
int AudioCodingModuleImpl::ReceiveFrequency() const {
- WEBRTC_TRACE(webrtc::kTraceStream, webrtc::kTraceAudioCoding, id_,
- "ReceiveFrequency()");
-
- CriticalSectionScoped lock(acm_crit_sect_.get());
-
- int codec_id = receiver_.last_audio_codec_id();
-
- return codec_id < 0 ? receiver_.current_sample_rate_hz() :
- ACMCodecDB::database_[codec_id].plfreq;
+ const auto last_packet_sample_rate = receiver_.last_packet_sample_rate_hz();
+ return last_packet_sample_rate ? *last_packet_sample_rate
+ : receiver_.last_output_sample_rate_hz();
}
// Get current playout frequency.
int AudioCodingModuleImpl::PlayoutFrequency() const {
WEBRTC_TRACE(webrtc::kTraceStream, webrtc::kTraceAudioCoding, id_,
"PlayoutFrequency()");
-
- CriticalSectionScoped lock(acm_crit_sect_.get());
-
- return receiver_.current_sample_rate_hz();
+ return receiver_.last_output_sample_rate_hz();
}
// Register possible receive codecs, can be called multiple times,
@@ -549,7 +589,7 @@ int AudioCodingModuleImpl::PlayoutFrequency() const {
int AudioCodingModuleImpl::RegisterReceiveCodec(const CodecInst& codec) {
CriticalSectionScoped lock(acm_crit_sect_.get());
RTC_DCHECK(receiver_initialized_);
- if (codec.channels > 2 || codec.channels < 0) {
+ if (codec.channels > 2) {
LOG_F(LS_ERROR) << "Unsupported number of channels: " << codec.channels;
return -1;
}
@@ -564,7 +604,7 @@ int AudioCodingModuleImpl::RegisterReceiveCodec(const CodecInst& codec) {
RTC_CHECK(codec_index) << "Invalid codec ID: " << static_cast<int>(*codec_id);
// Check if the payload-type is valid.
- if (!ACMCodecDB::ValidPayloadType(codec.pltype)) {
+ if (!RentACodec::IsPayloadTypeValid(codec.pltype)) {
LOG_F(LS_ERROR) << "Invalid payload type " << codec.pltype << " for "
<< codec.plname;
return -1;
@@ -572,16 +612,19 @@ int AudioCodingModuleImpl::RegisterReceiveCodec(const CodecInst& codec) {
// Get |decoder| associated with |codec|. |decoder| is NULL if |codec| does
// not own its decoder.
- return receiver_.AddCodec(*codec_index, codec.pltype, codec.channels,
- codec.plfreq,
- codec_manager_.GetAudioDecoder(codec));
+ return receiver_.AddCodec(
+ *codec_index, codec.pltype, codec.channels, codec.plfreq,
+ STR_CASE_CMP(codec.plname, "isac") == 0 ? rent_a_codec_.RentIsacDecoder()
+ : nullptr,
+ codec.plname);
}
int AudioCodingModuleImpl::RegisterExternalReceiveCodec(
int rtp_payload_type,
AudioDecoder* external_decoder,
int sample_rate_hz,
- int num_channels) {
+ int num_channels,
+ const std::string& name) {
CriticalSectionScoped lock(acm_crit_sect_.get());
RTC_DCHECK(receiver_initialized_);
if (num_channels > 2 || num_channels < 0) {
@@ -590,14 +633,14 @@ int AudioCodingModuleImpl::RegisterExternalReceiveCodec(
}
// Check if the payload-type is valid.
- if (!ACMCodecDB::ValidPayloadType(rtp_payload_type)) {
+ if (!RentACodec::IsPayloadTypeValid(rtp_payload_type)) {
LOG_F(LS_ERROR) << "Invalid payload-type " << rtp_payload_type
<< " for external decoder.";
return -1;
}
return receiver_.AddCodec(-1 /* external */, rtp_payload_type, num_channels,
- sample_rate_hz, external_decoder);
+ sample_rate_hz, external_decoder, name);
}
// Get current received codec.
@@ -610,7 +653,9 @@ int AudioCodingModuleImpl::ReceiveCodec(CodecInst* current_codec) const {
int AudioCodingModuleImpl::IncomingPacket(const uint8_t* incoming_payload,
const size_t payload_length,
const WebRtcRTPHeader& rtp_header) {
- return receiver_.InsertPacket(rtp_header, incoming_payload, payload_length);
+ return receiver_.InsertPacket(
+ rtp_header,
+ rtc::ArrayView<const uint8_t>(incoming_payload, payload_length));
}
// Minimum playout delay (Used for lip-sync).
@@ -699,8 +744,6 @@ int AudioCodingModuleImpl::SetOpusApplication(OpusApplicationMode application) {
if (!HaveValidEncoder("SetOpusApplication")) {
return -1;
}
- if (!codec_manager_.CurrentEncoderIsOpus())
- return -1;
AudioEncoder::Application app;
switch (application) {
case kVoip:
@@ -713,7 +756,7 @@ int AudioCodingModuleImpl::SetOpusApplication(OpusApplicationMode application) {
FATAL();
return 0;
}
- return codec_manager_.CurrentEncoder()->SetApplication(app) ? 0 : -1;
+ return rent_a_codec_.GetEncoderStack()->SetApplication(app) ? 0 : -1;
}
// Informs Opus encoder of the maximum playback rate the receiver will render.
@@ -722,9 +765,7 @@ int AudioCodingModuleImpl::SetOpusMaxPlaybackRate(int frequency_hz) {
if (!HaveValidEncoder("SetOpusMaxPlaybackRate")) {
return -1;
}
- if (!codec_manager_.CurrentEncoderIsOpus())
- return -1;
- codec_manager_.CurrentEncoder()->SetMaxPlaybackRate(frequency_hz);
+ rent_a_codec_.GetEncoderStack()->SetMaxPlaybackRate(frequency_hz);
return 0;
}
@@ -733,9 +774,7 @@ int AudioCodingModuleImpl::EnableOpusDtx() {
if (!HaveValidEncoder("EnableOpusDtx")) {
return -1;
}
- if (!codec_manager_.CurrentEncoderIsOpus())
- return -1;
- return codec_manager_.CurrentEncoder()->SetDtx(true) ? 0 : -1;
+ return rent_a_codec_.GetEncoderStack()->SetDtx(true) ? 0 : -1;
}
int AudioCodingModuleImpl::DisableOpusDtx() {
@@ -743,9 +782,7 @@ int AudioCodingModuleImpl::DisableOpusDtx() {
if (!HaveValidEncoder("DisableOpusDtx")) {
return -1;
}
- if (!codec_manager_.CurrentEncoderIsOpus())
- return -1;
- return codec_manager_.CurrentEncoder()->SetDtx(false) ? 0 : -1;
+ return rent_a_codec_.GetEncoderStack()->SetDtx(false) ? 0 : -1;
}
int AudioCodingModuleImpl::PlayoutTimestamp(uint32_t* timestamp) {
@@ -753,7 +790,7 @@ int AudioCodingModuleImpl::PlayoutTimestamp(uint32_t* timestamp) {
}
bool AudioCodingModuleImpl::HaveValidEncoder(const char* caller_name) const {
- if (!codec_manager_.CurrentEncoder()) {
+ if (!rent_a_codec_.GetEncoderStack()) {
WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, id_,
"%s failed: No send codec is registered.", caller_name);
return false;
@@ -765,17 +802,6 @@ int AudioCodingModuleImpl::UnregisterReceiveCodec(uint8_t payload_type) {
return receiver_.RemoveCodec(payload_type);
}
-int AudioCodingModuleImpl::SetInitialPlayoutDelay(int delay_ms) {
- {
- CriticalSectionScoped lock(acm_crit_sect_.get());
- // Initialize receiver, if it is not initialized. Otherwise, initial delay
- // is reset upon initialization of the receiver.
- if (!receiver_initialized_)
- InitializeReceiverSafe();
- }
- return receiver_.SetInitialDelay(delay_ms);
-}
-
int AudioCodingModuleImpl::EnableNack(size_t max_nack_list_size) {
return receiver_.EnableNack(max_nack_list_size);
}
diff --git a/webrtc/modules/audio_coding/main/acm2/audio_coding_module_impl.h b/webrtc/modules/audio_coding/acm2/audio_coding_module_impl.h
index f20861398b..926671f199 100644
--- a/webrtc/modules/audio_coding/main/acm2/audio_coding_module_impl.h
+++ b/webrtc/modules/audio_coding/acm2/audio_coding_module_impl.h
@@ -8,9 +8,10 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#ifndef WEBRTC_MODULES_AUDIO_CODING_MAIN_ACM2_AUDIO_CODING_MODULE_IMPL_H_
-#define WEBRTC_MODULES_AUDIO_CODING_MAIN_ACM2_AUDIO_CODING_MODULE_IMPL_H_
+#ifndef WEBRTC_MODULES_AUDIO_CODING_ACM2_AUDIO_CODING_MODULE_IMPL_H_
+#define WEBRTC_MODULES_AUDIO_CODING_ACM2_AUDIO_CODING_MODULE_IMPL_H_
+#include <string>
#include <vector>
#include "webrtc/base/buffer.h"
@@ -18,10 +19,9 @@
#include "webrtc/base/thread_annotations.h"
#include "webrtc/common_types.h"
#include "webrtc/engine_configurations.h"
-#include "webrtc/modules/audio_coding/main/acm2/acm_codec_database.h"
-#include "webrtc/modules/audio_coding/main/acm2/acm_receiver.h"
-#include "webrtc/modules/audio_coding/main/acm2/acm_resampler.h"
-#include "webrtc/modules/audio_coding/main/acm2/codec_manager.h"
+#include "webrtc/modules/audio_coding/acm2/acm_receiver.h"
+#include "webrtc/modules/audio_coding/acm2/acm_resampler.h"
+#include "webrtc/modules/audio_coding/acm2/codec_manager.h"
namespace webrtc {
@@ -48,7 +48,7 @@ class AudioCodingModuleImpl final : public AudioCodingModule {
AudioEncoder* external_speech_encoder) override;
// Get current send codec.
- int SendCodec(CodecInst* current_codec) const override;
+ rtc::Optional<CodecInst> SendCodec() const override;
// Get current send frequency.
int SendFrequency() const override;
@@ -124,7 +124,8 @@ class AudioCodingModuleImpl final : public AudioCodingModule {
int RegisterExternalReceiveCodec(int rtp_payload_type,
AudioDecoder* external_decoder,
int sample_rate_hz,
- int num_channels) override;
+ int num_channels,
+ const std::string& name) override;
// Get current received codec.
int ReceiveCodec(CodecInst* current_codec) const override;
@@ -150,10 +151,6 @@ class AudioCodingModuleImpl final : public AudioCodingModule {
// Smallest latency NetEq will maintain.
int LeastRequiredDelayMs() const override;
- // Impose an initial delay on playout. ACM plays silence until |delay_ms|
- // audio is accumulated in NetEq buffer, then starts decoding payloads.
- int SetInitialPlayoutDelay(int delay_ms) override;
-
// Get playout timestamp.
int PlayoutTimestamp(uint32_t* timestamp) override;
@@ -192,7 +189,7 @@ class AudioCodingModuleImpl final : public AudioCodingModule {
uint32_t input_timestamp;
const int16_t* audio;
size_t length_per_channel;
- uint8_t audio_channel;
+ size_t audio_channel;
// If a re-mix is required (up or down), this buffer will store a re-mixed
// version of the input.
int16_t buffer[WEBRTC_10MS_PCM_AUDIO];
@@ -252,6 +249,7 @@ class AudioCodingModuleImpl final : public AudioCodingModule {
AcmReceiver receiver_; // AcmReceiver has it's own internal lock.
ChangeLogger bitrate_logger_ GUARDED_BY(acm_crit_sect_);
CodecManager codec_manager_ GUARDED_BY(acm_crit_sect_);
+ RentACodec rent_a_codec_ GUARDED_BY(acm_crit_sect_);
// This is to keep track of CN instances where we can send DTMFs.
uint8_t previous_pltype_ GUARDED_BY(acm_crit_sect_);
@@ -282,4 +280,4 @@ class AudioCodingModuleImpl final : public AudioCodingModule {
} // namespace acm2
} // namespace webrtc
-#endif // WEBRTC_MODULES_AUDIO_CODING_MAIN_ACM2_AUDIO_CODING_MODULE_IMPL_H_
+#endif // WEBRTC_MODULES_AUDIO_CODING_ACM2_AUDIO_CODING_MODULE_IMPL_H_
diff --git a/webrtc/modules/audio_coding/main/acm2/audio_coding_module_unittest_oldapi.cc b/webrtc/modules/audio_coding/acm2/audio_coding_module_unittest_oldapi.cc
index 879fb839fe..6f82a96ee5 100644
--- a/webrtc/modules/audio_coding/main/acm2/audio_coding_module_unittest_oldapi.cc
+++ b/webrtc/modules/audio_coding/acm2/audio_coding_module_unittest_oldapi.cc
@@ -8,22 +8,24 @@
* be found in the AUTHORS file in the root of the source tree.
*/
+#include <stdio.h>
#include <string.h>
#include <vector>
#include "testing/gtest/include/gtest/gtest.h"
#include "webrtc/base/md5digest.h"
+#include "webrtc/base/platform_thread.h"
#include "webrtc/base/scoped_ptr.h"
#include "webrtc/base/thread_annotations.h"
#include "webrtc/modules/audio_coding/codecs/audio_encoder.h"
-#include "webrtc/modules/audio_coding/codecs/g711/include/audio_decoder_pcm.h"
-#include "webrtc/modules/audio_coding/codecs/g711/include/audio_encoder_pcm.h"
+#include "webrtc/modules/audio_coding/codecs/g711/audio_decoder_pcm.h"
+#include "webrtc/modules/audio_coding/codecs/g711/audio_encoder_pcm.h"
#include "webrtc/modules/audio_coding/codecs/isac/main/include/audio_encoder_isac.h"
#include "webrtc/modules/audio_coding/codecs/mock/mock_audio_encoder.h"
-#include "webrtc/modules/audio_coding/main/acm2/acm_receive_test_oldapi.h"
-#include "webrtc/modules/audio_coding/main/acm2/acm_send_test_oldapi.h"
-#include "webrtc/modules/audio_coding/main/include/audio_coding_module.h"
-#include "webrtc/modules/audio_coding/main/include/audio_coding_module_typedefs.h"
+#include "webrtc/modules/audio_coding/acm2/acm_receive_test_oldapi.h"
+#include "webrtc/modules/audio_coding/acm2/acm_send_test_oldapi.h"
+#include "webrtc/modules/audio_coding/include/audio_coding_module.h"
+#include "webrtc/modules/audio_coding/include/audio_coding_module_typedefs.h"
#include "webrtc/modules/audio_coding/neteq/audio_decoder_impl.h"
#include "webrtc/modules/audio_coding/neteq/mock/mock_audio_decoder.h"
#include "webrtc/modules/audio_coding/neteq/tools/audio_checksum.h"
@@ -33,14 +35,12 @@
#include "webrtc/modules/audio_coding/neteq/tools/output_audio_file.h"
#include "webrtc/modules/audio_coding/neteq/tools/packet.h"
#include "webrtc/modules/audio_coding/neteq/tools/rtp_file_source.h"
-#include "webrtc/modules/interface/module_common_types.h"
+#include "webrtc/modules/include/module_common_types.h"
#include "webrtc/system_wrappers/include/clock.h"
#include "webrtc/system_wrappers/include/critical_section_wrapper.h"
#include "webrtc/system_wrappers/include/event_wrapper.h"
#include "webrtc/system_wrappers/include/sleep.h"
-#include "webrtc/system_wrappers/include/thread_wrapper.h"
#include "webrtc/test/testsupport/fileutils.h"
-#include "webrtc/test/testsupport/gtest_disable.h"
using ::testing::AtLeast;
using ::testing::Invoke;
@@ -237,7 +237,12 @@ class AudioCodingModuleTestOldApi : public ::testing::Test {
// Check if the statistics are initialized correctly. Before any call to ACM
// all fields have to be zero.
-TEST_F(AudioCodingModuleTestOldApi, DISABLED_ON_ANDROID(InitializedToZero)) {
+#if defined(WEBRTC_ANDROID)
+#define MAYBE_InitializedToZero DISABLED_InitializedToZero
+#else
+#define MAYBE_InitializedToZero InitializedToZero
+#endif
+TEST_F(AudioCodingModuleTestOldApi, MAYBE_InitializedToZero) {
RegisterCodec();
AudioDecodingCallStats stats;
acm_->GetDecodingCallStatistics(&stats);
@@ -249,34 +254,15 @@ TEST_F(AudioCodingModuleTestOldApi, DISABLED_ON_ANDROID(InitializedToZero)) {
EXPECT_EQ(0, stats.decoded_plc_cng);
}
-// Apply an initial playout delay. Calls to AudioCodingModule::PlayoutData10ms()
-// should result in generating silence, check the associated field.
-TEST_F(AudioCodingModuleTestOldApi,
- DISABLED_ON_ANDROID(SilenceGeneratorCalled)) {
- RegisterCodec();
- AudioDecodingCallStats stats;
- const int kInitialDelay = 100;
-
- acm_->SetInitialPlayoutDelay(kInitialDelay);
-
- int num_calls = 0;
- for (int time_ms = 0; time_ms < kInitialDelay;
- time_ms += kFrameSizeMs, ++num_calls) {
- InsertPacketAndPullAudio();
- }
- acm_->GetDecodingCallStatistics(&stats);
- EXPECT_EQ(0, stats.calls_to_neteq);
- EXPECT_EQ(num_calls, stats.calls_to_silence_generator);
- EXPECT_EQ(0, stats.decoded_normal);
- EXPECT_EQ(0, stats.decoded_cng);
- EXPECT_EQ(0, stats.decoded_plc);
- EXPECT_EQ(0, stats.decoded_plc_cng);
-}
-
// Insert some packets and pull audio. Check statistics are valid. Then,
// simulate packet loss and check if PLC and PLC-to-CNG statistics are
// correctly updated.
-TEST_F(AudioCodingModuleTestOldApi, DISABLED_ON_ANDROID(NetEqCalls)) {
+#if defined(WEBRTC_ANDROID)
+#define MAYBE_NetEqCalls DISABLED_NetEqCalls
+#else
+#define MAYBE_NetEqCalls NetEqCalls
+#endif
+TEST_F(AudioCodingModuleTestOldApi, MAYBE_NetEqCalls) {
RegisterCodec();
AudioDecodingCallStats stats;
const int kNumNormalCalls = 10;
@@ -314,7 +300,7 @@ TEST_F(AudioCodingModuleTestOldApi, VerifyOutputFrame) {
EXPECT_EQ(0, acm_->PlayoutData10Ms(kSampleRateHz, &audio_frame));
EXPECT_EQ(id_, audio_frame.id_);
EXPECT_EQ(0u, audio_frame.timestamp_);
- EXPECT_GT(audio_frame.num_channels_, 0);
+ EXPECT_GT(audio_frame.num_channels_, 0u);
EXPECT_EQ(static_cast<size_t>(kSampleRateHz / 100),
audio_frame.samples_per_channel_);
EXPECT_EQ(kSampleRateHz, audio_frame.sample_rate_hz_);
@@ -343,15 +329,9 @@ TEST_F(AudioCodingModuleTestOldApi, TransportCallbackIsInvokedForEachPacket) {
}
#if defined(WEBRTC_CODEC_ISAC) || defined(WEBRTC_CODEC_ISACFX)
-#define IF_ISAC(x) x
-#else
-#define IF_ISAC(x) DISABLED_##x
-#endif
-
// Verifies that the RTP timestamp series is not reset when the codec is
// changed.
-TEST_F(AudioCodingModuleTestOldApi,
- IF_ISAC(TimestampSeriesContinuesWhenCodecChanges)) {
+TEST_F(AudioCodingModuleTestOldApi, TimestampSeriesContinuesWhenCodecChanges) {
RegisterCodec(); // This registers the default codec.
uint32_t expected_ts = input_frame_.timestamp_;
int blocks_per_packet = codec_.pacsize / (kSampleRateHz / 100);
@@ -383,6 +363,7 @@ TEST_F(AudioCodingModuleTestOldApi,
expected_ts += codec_.pacsize;
}
}
+#endif
// Introduce this class to set different expectations on the number of encoded
// bytes. This class expects all encoded packets to be 9 bytes (matching one
@@ -481,11 +462,9 @@ class AudioCodingModuleMtTestOldApi : public AudioCodingModuleTestOldApi {
AudioCodingModuleMtTestOldApi()
: AudioCodingModuleTestOldApi(),
- send_thread_(ThreadWrapper::CreateThread(CbSendThread, this, "send")),
- insert_packet_thread_(ThreadWrapper::CreateThread(
- CbInsertPacketThread, this, "insert_packet")),
- pull_audio_thread_(ThreadWrapper::CreateThread(
- CbPullAudioThread, this, "pull_audio")),
+ send_thread_(CbSendThread, this, "send"),
+ insert_packet_thread_(CbInsertPacketThread, this, "insert_packet"),
+ pull_audio_thread_(CbPullAudioThread, this, "pull_audio"),
test_complete_(EventWrapper::Create()),
send_count_(0),
insert_packet_count_(0),
@@ -503,19 +482,19 @@ class AudioCodingModuleMtTestOldApi : public AudioCodingModuleTestOldApi {
}
void StartThreads() {
- ASSERT_TRUE(send_thread_->Start());
- send_thread_->SetPriority(kRealtimePriority);
- ASSERT_TRUE(insert_packet_thread_->Start());
- insert_packet_thread_->SetPriority(kRealtimePriority);
- ASSERT_TRUE(pull_audio_thread_->Start());
- pull_audio_thread_->SetPriority(kRealtimePriority);
+ send_thread_.Start();
+ send_thread_.SetPriority(rtc::kRealtimePriority);
+ insert_packet_thread_.Start();
+ insert_packet_thread_.SetPriority(rtc::kRealtimePriority);
+ pull_audio_thread_.Start();
+ pull_audio_thread_.SetPriority(rtc::kRealtimePriority);
}
void TearDown() {
AudioCodingModuleTestOldApi::TearDown();
- pull_audio_thread_->Stop();
- send_thread_->Stop();
- insert_packet_thread_->Stop();
+ pull_audio_thread_.Stop();
+ send_thread_.Stop();
+ insert_packet_thread_.Stop();
}
EventTypeWrapper RunTest() {
@@ -595,9 +574,9 @@ class AudioCodingModuleMtTestOldApi : public AudioCodingModuleTestOldApi {
return true;
}
- rtc::scoped_ptr<ThreadWrapper> send_thread_;
- rtc::scoped_ptr<ThreadWrapper> insert_packet_thread_;
- rtc::scoped_ptr<ThreadWrapper> pull_audio_thread_;
+ rtc::PlatformThread send_thread_;
+ rtc::PlatformThread insert_packet_thread_;
+ rtc::PlatformThread pull_audio_thread_;
const rtc::scoped_ptr<EventWrapper> test_complete_;
int send_count_;
int insert_packet_count_;
@@ -607,7 +586,12 @@ class AudioCodingModuleMtTestOldApi : public AudioCodingModuleTestOldApi {
rtc::scoped_ptr<SimulatedClock> fake_clock_;
};
-TEST_F(AudioCodingModuleMtTestOldApi, DISABLED_ON_IOS(DoTest)) {
+#if defined(WEBRTC_IOS)
+#define MAYBE_DoTest DISABLED_DoTest
+#else
+#define MAYBE_DoTest DoTest
+#endif
+TEST_F(AudioCodingModuleMtTestOldApi, MAYBE_DoTest) {
EXPECT_EQ(kEventSignaled, RunTest());
}
@@ -680,7 +664,11 @@ class AcmIsacMtTestOldApi : public AudioCodingModuleMtTestOldApi {
}
void InsertAudio() {
- memcpy(input_frame_.data_, audio_loop_.GetNextBlock(), kNumSamples10ms);
+ // TODO(kwiberg): Use std::copy here. Might be complications because AFAICS
+ // this call confuses the number of samples with the number of bytes, and
+ // ends up copying only half of what it should.
+ memcpy(input_frame_.data_, audio_loop_.GetNextBlock().data(),
+ kNumSamples10ms);
AudioCodingModuleTestOldApi::InsertAudio();
}
@@ -707,9 +695,16 @@ class AcmIsacMtTestOldApi : public AudioCodingModuleMtTestOldApi {
test::AudioLoop audio_loop_;
};
-TEST_F(AcmIsacMtTestOldApi, DISABLED_ON_IOS(IF_ISAC(DoTest))) {
+#if defined(WEBRTC_IOS)
+#define MAYBE_DoTest DISABLED_DoTest
+#else
+#define MAYBE_DoTest DoTest
+#endif
+#if defined(WEBRTC_CODEC_ISAC) || defined(WEBRTC_CODEC_ISACFX)
+TEST_F(AcmIsacMtTestOldApi, MAYBE_DoTest) {
EXPECT_EQ(kEventSignaled, RunTest());
}
+#endif
class AcmReRegisterIsacMtTestOldApi : public AudioCodingModuleTestOldApi {
protected:
@@ -720,12 +715,10 @@ class AcmReRegisterIsacMtTestOldApi : public AudioCodingModuleTestOldApi {
AcmReRegisterIsacMtTestOldApi()
: AudioCodingModuleTestOldApi(),
- receive_thread_(
- ThreadWrapper::CreateThread(CbReceiveThread, this, "receive")),
- codec_registration_thread_(
- ThreadWrapper::CreateThread(CbCodecRegistrationThread,
- this,
- "codec_registration")),
+ receive_thread_(CbReceiveThread, this, "receive"),
+ codec_registration_thread_(CbCodecRegistrationThread,
+ this,
+ "codec_registration"),
test_complete_(EventWrapper::Create()),
crit_sect_(CriticalSectionWrapper::CreateCriticalSection()),
codec_registered_(false),
@@ -761,16 +754,16 @@ class AcmReRegisterIsacMtTestOldApi : public AudioCodingModuleTestOldApi {
}
void StartThreads() {
- ASSERT_TRUE(receive_thread_->Start());
- receive_thread_->SetPriority(kRealtimePriority);
- ASSERT_TRUE(codec_registration_thread_->Start());
- codec_registration_thread_->SetPriority(kRealtimePriority);
+ receive_thread_.Start();
+ receive_thread_.SetPriority(rtc::kRealtimePriority);
+ codec_registration_thread_.Start();
+ codec_registration_thread_.SetPriority(rtc::kRealtimePriority);
}
void TearDown() {
AudioCodingModuleTestOldApi::TearDown();
- receive_thread_->Stop();
- codec_registration_thread_->Stop();
+ receive_thread_.Stop();
+ codec_registration_thread_.Stop();
}
EventTypeWrapper RunTest() {
@@ -798,9 +791,9 @@ class AcmReRegisterIsacMtTestOldApi : public AudioCodingModuleTestOldApi {
// Encode new frame.
uint32_t input_timestamp = rtp_header_.header.timestamp;
while (info.encoded_bytes == 0) {
- info = isac_encoder_->Encode(
- input_timestamp, audio_loop_.GetNextBlock(), kNumSamples10ms,
- max_encoded_bytes, encoded.get());
+ info =
+ isac_encoder_->Encode(input_timestamp, audio_loop_.GetNextBlock(),
+ max_encoded_bytes, encoded.get());
input_timestamp += 160; // 10 ms at 16 kHz.
}
EXPECT_EQ(rtp_header_.header.timestamp + kPacketSizeSamples,
@@ -849,8 +842,8 @@ class AcmReRegisterIsacMtTestOldApi : public AudioCodingModuleTestOldApi {
return true;
}
- rtc::scoped_ptr<ThreadWrapper> receive_thread_;
- rtc::scoped_ptr<ThreadWrapper> codec_registration_thread_;
+ rtc::PlatformThread receive_thread_;
+ rtc::PlatformThread codec_registration_thread_;
const rtc::scoped_ptr<EventWrapper> test_complete_;
const rtc::scoped_ptr<CriticalSectionWrapper> crit_sect_;
bool codec_registered_ GUARDED_BY(crit_sect_);
@@ -861,9 +854,16 @@ class AcmReRegisterIsacMtTestOldApi : public AudioCodingModuleTestOldApi {
test::AudioLoop audio_loop_;
};
-TEST_F(AcmReRegisterIsacMtTestOldApi, DISABLED_ON_IOS(IF_ISAC(DoTest))) {
+#if defined(WEBRTC_IOS)
+#define MAYBE_DoTest DISABLED_DoTest
+#else
+#define MAYBE_DoTest DoTest
+#endif
+#if defined(WEBRTC_CODEC_ISAC) || defined(WEBRTC_CODEC_ISACFX)
+TEST_F(AcmReRegisterIsacMtTestOldApi, MAYBE_DoTest) {
EXPECT_EQ(kEventSignaled, RunTest());
}
+#endif
// Disabling all of these tests on iOS until file support has been added.
// See https://code.google.com/p/webrtc/issues/detail?id=4752 for details.
@@ -871,13 +871,16 @@ TEST_F(AcmReRegisterIsacMtTestOldApi, DISABLED_ON_IOS(IF_ISAC(DoTest))) {
class AcmReceiverBitExactnessOldApi : public ::testing::Test {
public:
- static std::string PlatformChecksum(std::string win64,
- std::string android,
- std::string others) {
+ static std::string PlatformChecksum(std::string others,
+ std::string win64,
+ std::string android_arm32,
+ std::string android_arm64) {
#if defined(_WIN32) && defined(WEBRTC_ARCH_64_BITS)
return win64;
-#elif defined(WEBRTC_ANDROID)
- return android;
+#elif defined(WEBRTC_ANDROID) && defined(WEBRTC_ARCH_ARM)
+ return android_arm32;
+#elif defined(WEBRTC_ANDROID) && defined(WEBRTC_ARCH_ARM64)
+ return android_arm64;
#else
return others;
#endif
@@ -889,6 +892,7 @@ class AcmReceiverBitExactnessOldApi : public ::testing::Test {
AudioDecoder* external_decoder;
int sample_rate_hz;
int num_channels;
+ std::string name;
};
void Run(int output_freq_hz,
@@ -924,83 +928,76 @@ class AcmReceiverBitExactnessOldApi : public ::testing::Test {
for (const auto& ed : external_decoders) {
ASSERT_EQ(0, test.RegisterExternalReceiveCodec(
ed.rtp_payload_type, ed.external_decoder,
- ed.sample_rate_hz, ed.num_channels));
+ ed.sample_rate_hz, ed.num_channels, ed.name));
}
test.Run();
std::string checksum_string = checksum.Finish();
EXPECT_EQ(checksum_ref, checksum_string);
+
+ // Delete the output file.
+ remove(output_file_name.c_str());
}
};
-#if (defined(WEBRTC_CODEC_ISAC) || defined(WEBRTC_CODEC_ISAC)) && \
+#if (defined(WEBRTC_CODEC_ISAC) || defined(WEBRTC_CODEC_ISACFX)) && \
defined(WEBRTC_CODEC_ILBC) && defined(WEBRTC_CODEC_G722)
-#define IF_ALL_CODECS(x) x
-#else
-#define IF_ALL_CODECS(x) DISABLED_##x
-#endif
-
-// Fails Android ARM64. https://code.google.com/p/webrtc/issues/detail?id=4199
-#if defined(WEBRTC_ANDROID) && defined(WEBRTC_ARCH_ARM64)
-#define MAYBE_8kHzOutput DISABLED_8kHzOutput
-#else
-#define MAYBE_8kHzOutput 8kHzOutput
-#endif
-TEST_F(AcmReceiverBitExactnessOldApi, IF_ALL_CODECS(MAYBE_8kHzOutput)) {
- Run(8000, PlatformChecksum("dcee98c623b147ebe1b40dd30efa896e",
+TEST_F(AcmReceiverBitExactnessOldApi, 8kHzOutput) {
+ Run(8000, PlatformChecksum("908002dc01fc4eb1d2be24eb1d3f354b",
+ "dcee98c623b147ebe1b40dd30efa896e",
"adc92e173f908f93b96ba5844209815a",
- "908002dc01fc4eb1d2be24eb1d3f354b"),
+ "ba16137d3a5a1e637252289c57522bfe"),
std::vector<ExternalDecoder>());
}
-// Fails Android ARM64. https://code.google.com/p/webrtc/issues/detail?id=4199
-#if defined(WEBRTC_ANDROID) && defined(WEBRTC_ARCH_ARM64)
-#define MAYBE_16kHzOutput DISABLED_16kHzOutput
-#else
-#define MAYBE_16kHzOutput 16kHzOutput
-#endif
-TEST_F(AcmReceiverBitExactnessOldApi, IF_ALL_CODECS(MAYBE_16kHzOutput)) {
- Run(16000, PlatformChecksum("f790e7a8cce4e2c8b7bb5e0e4c5dac0d",
+TEST_F(AcmReceiverBitExactnessOldApi, 16kHzOutput) {
+ Run(16000, PlatformChecksum("a909560b5ca49fa472b17b7b277195e9",
+ "f790e7a8cce4e2c8b7bb5e0e4c5dac0d",
"8cffa6abcb3e18e33b9d857666dff66a",
- "a909560b5ca49fa472b17b7b277195e9"),
+ "66ee001e23534d4dcf5d0f81f916c93b"),
std::vector<ExternalDecoder>());
}
-// Fails Android ARM64. https://code.google.com/p/webrtc/issues/detail?id=4199
-#if defined(WEBRTC_ANDROID) && defined(WEBRTC_ARCH_ARM64)
-#define MAYBE_32kHzOutput DISABLED_32kHzOutput
-#else
-#define MAYBE_32kHzOutput 32kHzOutput
-#endif
-TEST_F(AcmReceiverBitExactnessOldApi, IF_ALL_CODECS(MAYBE_32kHzOutput)) {
- Run(32000, PlatformChecksum("306e0d990ee6e92de3fbecc0123ece37",
+TEST_F(AcmReceiverBitExactnessOldApi, 32kHzOutput) {
+ Run(32000, PlatformChecksum("441aab4b347fb3db4e9244337aca8d8e",
+ "306e0d990ee6e92de3fbecc0123ece37",
"3e126fe894720c3f85edadcc91964ba5",
- "441aab4b347fb3db4e9244337aca8d8e"),
+ "9c6ff204b14152c48fe41d5ab757943b"),
std::vector<ExternalDecoder>());
}
-// Fails Android ARM64. https://code.google.com/p/webrtc/issues/detail?id=4199
-#if defined(WEBRTC_ANDROID) && defined(WEBRTC_ARCH_ARM64)
-#define MAYBE_48kHzOutput DISABLED_48kHzOutput
-#else
-#define MAYBE_48kHzOutput 48kHzOutput
-#endif
-TEST_F(AcmReceiverBitExactnessOldApi, IF_ALL_CODECS(MAYBE_48kHzOutput)) {
- Run(48000, PlatformChecksum("aa7c232f63a67b2a72703593bdd172e0",
+TEST_F(AcmReceiverBitExactnessOldApi, 48kHzOutput) {
+ Run(48000, PlatformChecksum("4ee2730fa1daae755e8a8fd3abd779ec",
+ "aa7c232f63a67b2a72703593bdd172e0",
"0155665e93067c4e89256b944dd11999",
- "4ee2730fa1daae755e8a8fd3abd779ec"),
+ "fc4f0da8844cd808d822bbddf3b9c285"),
std::vector<ExternalDecoder>());
}
-// Fails Android ARM64. https://code.google.com/p/webrtc/issues/detail?id=4199
-#if defined(WEBRTC_ANDROID) && defined(__aarch64__)
-#define MAYBE_48kHzOutputExternalDecoder DISABLED_48kHzOutputExternalDecoder
-#else
-#define MAYBE_48kHzOutputExternalDecoder 48kHzOutputExternalDecoder
-#endif
-TEST_F(AcmReceiverBitExactnessOldApi,
- IF_ALL_CODECS(MAYBE_48kHzOutputExternalDecoder)) {
+TEST_F(AcmReceiverBitExactnessOldApi, 48kHzOutputExternalDecoder) {
+ // Class intended to forward a call from a mock DecodeInternal to Decode on
+ // the real decoder's Decode. DecodeInternal for the real decoder isn't
+ // public.
+ class DecodeForwarder {
+ public:
+ DecodeForwarder(AudioDecoder* decoder) : decoder_(decoder) {}
+ int Decode(const uint8_t* encoded,
+ size_t encoded_len,
+ int sample_rate_hz,
+ int16_t* decoded,
+ AudioDecoder::SpeechType* speech_type) {
+ return decoder_->Decode(encoded, encoded_len, sample_rate_hz,
+ decoder_->PacketDuration(encoded, encoded_len) *
+ decoder_->Channels() * sizeof(int16_t),
+ decoded, speech_type);
+ }
+
+ private:
+ AudioDecoder* const decoder_;
+ };
+
AudioDecoderPcmU decoder(1);
+ DecodeForwarder decode_forwarder(&decoder);
MockAudioDecoder mock_decoder;
// Set expectations on the mock decoder and also delegate the calls to the
// real decoder.
@@ -1010,9 +1007,9 @@ TEST_F(AcmReceiverBitExactnessOldApi,
EXPECT_CALL(mock_decoder, Channels())
.Times(AtLeast(1))
.WillRepeatedly(Invoke(&decoder, &AudioDecoderPcmU::Channels));
- EXPECT_CALL(mock_decoder, Decode(_, _, _, _, _, _))
+ EXPECT_CALL(mock_decoder, DecodeInternal(_, _, _, _, _))
.Times(AtLeast(1))
- .WillRepeatedly(Invoke(&decoder, &AudioDecoderPcmU::Decode));
+ .WillRepeatedly(Invoke(&decode_forwarder, &DecodeForwarder::Decode));
EXPECT_CALL(mock_decoder, HasDecodePlc())
.Times(AtLeast(1))
.WillRepeatedly(Invoke(&decoder, &AudioDecoderPcmU::HasDecodePlc));
@@ -1024,16 +1021,19 @@ TEST_F(AcmReceiverBitExactnessOldApi,
ed.external_decoder = &mock_decoder;
ed.sample_rate_hz = 8000;
ed.num_channels = 1;
+ ed.name = "MockPCMU";
std::vector<ExternalDecoder> external_decoders;
external_decoders.push_back(ed);
- Run(48000, PlatformChecksum("aa7c232f63a67b2a72703593bdd172e0",
+ Run(48000, PlatformChecksum("4ee2730fa1daae755e8a8fd3abd779ec",
+ "aa7c232f63a67b2a72703593bdd172e0",
"0155665e93067c4e89256b944dd11999",
- "4ee2730fa1daae755e8a8fd3abd779ec"),
+ "fc4f0da8844cd808d822bbddf3b9c285"),
external_decoders);
EXPECT_CALL(mock_decoder, Die());
}
+#endif
// This test verifies bit exactness for the send-side of ACM. The test setup is
// a chain of three different test classes:
@@ -1138,6 +1138,9 @@ class AcmSenderBitExactnessOldApi : public ::testing::Test,
// Verify number of packets produced.
EXPECT_EQ(expected_packets, packet_count_);
+
+ // Delete the output file.
+ remove(output_file_name.c_str());
}
// Returns a pointer to the next packet. Returns NULL if the source is
@@ -1209,65 +1212,57 @@ class AcmSenderBitExactnessOldApi : public ::testing::Test,
rtc::Md5Digest payload_checksum_;
};
-// Fails Android ARM64. https://code.google.com/p/webrtc/issues/detail?id=4199
-#if defined(WEBRTC_ANDROID) && defined(WEBRTC_ARCH_ARM64)
-#define MAYBE_IsacWb30ms DISABLED_IsacWb30ms
-#else
-#define MAYBE_IsacWb30ms IsacWb30ms
-#endif
-TEST_F(AcmSenderBitExactnessOldApi, IF_ISAC(MAYBE_IsacWb30ms)) {
+#if defined(WEBRTC_CODEC_ISAC) || defined(WEBRTC_CODEC_ISACFX)
+TEST_F(AcmSenderBitExactnessOldApi, IsacWb30ms) {
ASSERT_NO_FATAL_FAILURE(SetUpTest("ISAC", 16000, 1, 103, 480, 480));
Run(AcmReceiverBitExactnessOldApi::PlatformChecksum(
+ "0b58f9eeee43d5891f5f6c75e77984a3",
"c7e5bdadfa2871df95639fcc297cf23d",
"0499ca260390769b3172136faad925b9",
- "0b58f9eeee43d5891f5f6c75e77984a3"),
+ "866abf524acd2807efbe65e133c23f95"),
AcmReceiverBitExactnessOldApi::PlatformChecksum(
+ "3c79f16f34218271f3dca4e2b1dfe1bb",
"d42cb5195463da26c8129bbfe73a22e6",
"83de248aea9c3c2bd680b6952401b4ca",
"3c79f16f34218271f3dca4e2b1dfe1bb"),
- 33,
- test::AcmReceiveTestOldApi::kMonoOutput);
+ 33, test::AcmReceiveTestOldApi::kMonoOutput);
}
-// Fails Android ARM64. https://code.google.com/p/webrtc/issues/detail?id=4199
-#if defined(WEBRTC_ANDROID) && defined(WEBRTC_ARCH_ARM64)
-#define MAYBE_IsacWb60ms DISABLED_IsacWb60ms
-#else
-#define MAYBE_IsacWb60ms IsacWb60ms
-#endif
-TEST_F(AcmSenderBitExactnessOldApi, IF_ISAC(MAYBE_IsacWb60ms)) {
+TEST_F(AcmSenderBitExactnessOldApi, IsacWb60ms) {
ASSERT_NO_FATAL_FAILURE(SetUpTest("ISAC", 16000, 1, 103, 960, 960));
Run(AcmReceiverBitExactnessOldApi::PlatformChecksum(
+ "1ad29139a04782a33daad8c2b9b35875",
"14d63c5f08127d280e722e3191b73bdd",
"8da003e16c5371af2dc2be79a50f9076",
- "1ad29139a04782a33daad8c2b9b35875"),
+ "ef75e900e6f375e3061163c53fd09a63"),
AcmReceiverBitExactnessOldApi::PlatformChecksum(
+ "9e0a0ab743ad987b55b8e14802769c56",
"ebe04a819d3a9d83a83a17f271e1139a",
"97aeef98553b5a4b5a68f8b716e8eaf0",
"9e0a0ab743ad987b55b8e14802769c56"),
- 16,
- test::AcmReceiveTestOldApi::kMonoOutput);
+ 16, test::AcmReceiveTestOldApi::kMonoOutput);
}
+#endif
-#ifdef WEBRTC_CODEC_ISAC
-#define IF_ISAC_FLOAT(x) x
+#if defined(WEBRTC_ANDROID)
+#define MAYBE_IsacSwb30ms DISABLED_IsacSwb30ms
#else
-#define IF_ISAC_FLOAT(x) DISABLED_##x
+#define MAYBE_IsacSwb30ms IsacSwb30ms
#endif
-
-TEST_F(AcmSenderBitExactnessOldApi,
- DISABLED_ON_ANDROID(IF_ISAC_FLOAT(IsacSwb30ms))) {
+#if defined(WEBRTC_CODEC_ISAC)
+TEST_F(AcmSenderBitExactnessOldApi, MAYBE_IsacSwb30ms) {
ASSERT_NO_FATAL_FAILURE(SetUpTest("ISAC", 32000, 1, 104, 960, 960));
Run(AcmReceiverBitExactnessOldApi::PlatformChecksum(
- "2b3c387d06f00b7b7aad4c9be56fb83d",
- "",
- "5683b58da0fbf2063c7adc2e6bfb3fb8"),
+ "5683b58da0fbf2063c7adc2e6bfb3fb8",
+ "2b3c387d06f00b7b7aad4c9be56fb83d", "android_arm32_audio",
+ "android_arm64_audio"),
AcmReceiverBitExactnessOldApi::PlatformChecksum(
- "bcc2041e7744c7ebd9f701866856849c",
- "",
- "ce86106a93419aefb063097108ec94ab"),
+ "ce86106a93419aefb063097108ec94ab",
+ "bcc2041e7744c7ebd9f701866856849c", "android_arm32_payload",
+ "android_arm64_payload"),
33, test::AcmReceiveTestOldApi::kMonoOutput);
}
+#endif
TEST_F(AcmSenderBitExactnessOldApi, Pcm16_8000khz_10ms) {
ASSERT_NO_FATAL_FAILURE(SetUpTest("L16", 8000, 1, 107, 80, 80));
@@ -1349,101 +1344,96 @@ TEST_F(AcmSenderBitExactnessOldApi, Pcma_stereo_20ms) {
test::AcmReceiveTestOldApi::kStereoOutput);
}
-#ifdef WEBRTC_CODEC_ILBC
-#define IF_ILBC(x) x
+#if defined(WEBRTC_ANDROID)
+#define MAYBE_Ilbc_30ms DISABLED_Ilbc_30ms
#else
-#define IF_ILBC(x) DISABLED_##x
+#define MAYBE_Ilbc_30ms Ilbc_30ms
#endif
-
-TEST_F(AcmSenderBitExactnessOldApi, DISABLED_ON_ANDROID(IF_ILBC(Ilbc_30ms))) {
+#if defined(WEBRTC_CODEC_ILBC)
+TEST_F(AcmSenderBitExactnessOldApi, MAYBE_Ilbc_30ms) {
ASSERT_NO_FATAL_FAILURE(SetUpTest("ILBC", 8000, 1, 102, 240, 240));
Run(AcmReceiverBitExactnessOldApi::PlatformChecksum(
"7b6ec10910debd9af08011d3ed5249f7",
- "android_audio",
- "7b6ec10910debd9af08011d3ed5249f7"),
+ "7b6ec10910debd9af08011d3ed5249f7", "android_arm32_audio",
+ "android_arm64_audio"),
AcmReceiverBitExactnessOldApi::PlatformChecksum(
"cfae2e9f6aba96e145f2bcdd5050ce78",
- "android_payload",
- "cfae2e9f6aba96e145f2bcdd5050ce78"),
- 33,
- test::AcmReceiveTestOldApi::kMonoOutput);
+ "cfae2e9f6aba96e145f2bcdd5050ce78", "android_arm32_payload",
+ "android_arm64_payload"),
+ 33, test::AcmReceiveTestOldApi::kMonoOutput);
}
+#endif
-#ifdef WEBRTC_CODEC_G722
-#define IF_G722(x) x
+#if defined(WEBRTC_ANDROID)
+#define MAYBE_G722_20ms DISABLED_G722_20ms
#else
-#define IF_G722(x) DISABLED_##x
+#define MAYBE_G722_20ms G722_20ms
#endif
-
-TEST_F(AcmSenderBitExactnessOldApi, DISABLED_ON_ANDROID(IF_G722(G722_20ms))) {
+#if defined(WEBRTC_CODEC_G722)
+TEST_F(AcmSenderBitExactnessOldApi, MAYBE_G722_20ms) {
ASSERT_NO_FATAL_FAILURE(SetUpTest("G722", 16000, 1, 9, 320, 160));
Run(AcmReceiverBitExactnessOldApi::PlatformChecksum(
"7d759436f2533582950d148b5161a36c",
- "android_audio",
- "7d759436f2533582950d148b5161a36c"),
+ "7d759436f2533582950d148b5161a36c", "android_arm32_audio",
+ "android_arm64_audio"),
AcmReceiverBitExactnessOldApi::PlatformChecksum(
"fc68a87e1380614e658087cb35d5ca10",
- "android_payload",
- "fc68a87e1380614e658087cb35d5ca10"),
- 50,
- test::AcmReceiveTestOldApi::kMonoOutput);
+ "fc68a87e1380614e658087cb35d5ca10", "android_arm32_payload",
+ "android_arm64_payload"),
+ 50, test::AcmReceiveTestOldApi::kMonoOutput);
}
+#endif
-TEST_F(AcmSenderBitExactnessOldApi,
- DISABLED_ON_ANDROID(IF_G722(G722_stereo_20ms))) {
+#if defined(WEBRTC_ANDROID)
+#define MAYBE_G722_stereo_20ms DISABLED_G722_stereo_20ms
+#else
+#define MAYBE_G722_stereo_20ms G722_stereo_20ms
+#endif
+#if defined(WEBRTC_CODEC_G722)
+TEST_F(AcmSenderBitExactnessOldApi, MAYBE_G722_stereo_20ms) {
ASSERT_NO_FATAL_FAILURE(SetUpTest("G722", 16000, 2, 119, 320, 160));
Run(AcmReceiverBitExactnessOldApi::PlatformChecksum(
"7190ee718ab3d80eca181e5f7140c210",
- "android_audio",
- "7190ee718ab3d80eca181e5f7140c210"),
+ "7190ee718ab3d80eca181e5f7140c210", "android_arm32_audio",
+ "android_arm64_audio"),
AcmReceiverBitExactnessOldApi::PlatformChecksum(
"66516152eeaa1e650ad94ff85f668dac",
- "android_payload",
- "66516152eeaa1e650ad94ff85f668dac"),
- 50,
- test::AcmReceiveTestOldApi::kStereoOutput);
+ "66516152eeaa1e650ad94ff85f668dac", "android_arm32_payload",
+ "android_arm64_payload"),
+ 50, test::AcmReceiveTestOldApi::kStereoOutput);
}
-
-// Fails Android ARM64. https://code.google.com/p/webrtc/issues/detail?id=4199
-#if defined(WEBRTC_ANDROID) && defined(WEBRTC_ARCH_ARM64)
-#define MAYBE_Opus_stereo_20ms DISABLED_Opus_stereo_20ms
-#else
-#define MAYBE_Opus_stereo_20ms Opus_stereo_20ms
#endif
-TEST_F(AcmSenderBitExactnessOldApi, MAYBE_Opus_stereo_20ms) {
+
+TEST_F(AcmSenderBitExactnessOldApi, Opus_stereo_20ms) {
ASSERT_NO_FATAL_FAILURE(SetUpTest("opus", 48000, 2, 120, 960, 960));
Run(AcmReceiverBitExactnessOldApi::PlatformChecksum(
"855041f2490b887302bce9d544731849",
+ "855041f2490b887302bce9d544731849",
"1e1a0fce893fef2d66886a7f09e2ebce",
- "855041f2490b887302bce9d544731849"),
+ "7417a66c28be42d5d9b2d64e0c191585"),
AcmReceiverBitExactnessOldApi::PlatformChecksum(
"d781cce1ab986b618d0da87226cdde30",
+ "d781cce1ab986b618d0da87226cdde30",
"1a1fe04dd12e755949987c8d729fb3e0",
- "d781cce1ab986b618d0da87226cdde30"),
- 50,
- test::AcmReceiveTestOldApi::kStereoOutput);
+ "47b0b04f1d03076b857c86c72c2c298b"),
+ 50, test::AcmReceiveTestOldApi::kStereoOutput);
}
-// Fails Android ARM64. https://code.google.com/p/webrtc/issues/detail?id=4199
-#if defined(WEBRTC_ANDROID) && defined(WEBRTC_ARCH_ARM64)
-#define MAYBE_Opus_stereo_20ms_voip DISABLED_Opus_stereo_20ms_voip
-#else
-#define MAYBE_Opus_stereo_20ms_voip Opus_stereo_20ms_voip
-#endif
-TEST_F(AcmSenderBitExactnessOldApi, MAYBE_Opus_stereo_20ms_voip) {
+TEST_F(AcmSenderBitExactnessOldApi, Opus_stereo_20ms_voip) {
ASSERT_NO_FATAL_FAILURE(SetUpTest("opus", 48000, 2, 120, 960, 960));
// If not set, default will be kAudio in case of stereo.
EXPECT_EQ(0, send_test_->acm()->SetOpusApplication(kVoip));
Run(AcmReceiverBitExactnessOldApi::PlatformChecksum(
"9b9e12bc3cc793740966e11cbfa8b35b",
+ "9b9e12bc3cc793740966e11cbfa8b35b",
"57412a4b5771d19ff03ec35deffe7067",
- "9b9e12bc3cc793740966e11cbfa8b35b"),
+ "7ad0bbefcaa87e23187bf4a56d2f3513"),
AcmReceiverBitExactnessOldApi::PlatformChecksum(
"c7340b1189652ab6b5e80dade7390cb4",
+ "c7340b1189652ab6b5e80dade7390cb4",
"cdfe85939c411d12b61701c566e22d26",
- "c7340b1189652ab6b5e80dade7390cb4"),
- 50,
- test::AcmReceiveTestOldApi::kStereoOutput);
+ "7a678fbe46df5bf0c67e88264a2d9275"),
+ 50, test::AcmReceiveTestOldApi::kStereoOutput);
}
// This test is for verifying the SetBitRate function. The bitrate is changed at
@@ -1528,7 +1518,12 @@ TEST_F(AcmSetBitRateOldApi, Opus_48khz_20ms_50kbps) {
// The result on the Android platforms is inconsistent for this test case.
// On android_rel the result is different from android and android arm64 rel.
-TEST_F(AcmSetBitRateOldApi, DISABLED_ON_ANDROID(Opus_48khz_20ms_100kbps)) {
+#if defined(WEBRTC_ANDROID)
+#define MAYBE_Opus_48khz_20ms_100kbps DISABLED_Opus_48khz_20ms_100kbps
+#else
+#define MAYBE_Opus_48khz_20ms_100kbps Opus_48khz_20ms_100kbps
+#endif
+TEST_F(AcmSetBitRateOldApi, MAYBE_Opus_48khz_20ms_100kbps) {
ASSERT_NO_FATAL_FAILURE(SetUpTest("opus", 48000, 1, 107, 960, 960));
Run(100000, 100888);
}
@@ -1657,16 +1652,15 @@ TEST_F(AcmSenderBitExactnessOldApi, External_Pcmu_20ms) {
.Times(AtLeast(1))
.WillRepeatedly(
Invoke(&encoder, &AudioEncoderPcmU::Num10MsFramesInNextPacket));
- EXPECT_CALL(mock_encoder, Max10MsFramesInAPacket())
- .Times(AtLeast(1))
- .WillRepeatedly(
- Invoke(&encoder, &AudioEncoderPcmU::Max10MsFramesInAPacket));
EXPECT_CALL(mock_encoder, GetTargetBitrate())
.Times(AtLeast(1))
.WillRepeatedly(Invoke(&encoder, &AudioEncoderPcmU::GetTargetBitrate));
EXPECT_CALL(mock_encoder, EncodeInternal(_, _, _, _))
.Times(AtLeast(1))
.WillRepeatedly(Invoke(&encoder, &AudioEncoderPcmU::EncodeInternal));
+ EXPECT_CALL(mock_encoder, SetFec(_))
+ .Times(AtLeast(1))
+ .WillRepeatedly(Invoke(&encoder, &AudioEncoderPcmU::SetFec));
ASSERT_NO_FATAL_FAILURE(
SetUpTestExternalEncoder(&mock_encoder, codec_inst.pltype));
Run("81a9d4c0bb72e9becc43aef124c981e9", "8f9b8750bd80fe26b6cbf6659b89f0f9",
@@ -1724,6 +1718,9 @@ class AcmSwitchingOutputFrequencyOldApi : public ::testing::Test,
// This is where the actual test is executed.
receive_test.Run();
+
+ // Delete output file.
+ remove(output_file_name.c_str());
}
// Inherited from test::PacketSource.
diff --git a/webrtc/modules/audio_coding/main/acm2/call_statistics.cc b/webrtc/modules/audio_coding/acm2/call_statistics.cc
index 4c3e9fc393..4441932c8c 100644
--- a/webrtc/modules/audio_coding/main/acm2/call_statistics.cc
+++ b/webrtc/modules/audio_coding/acm2/call_statistics.cc
@@ -8,7 +8,7 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#include "webrtc/modules/audio_coding/main/acm2/call_statistics.h"
+#include "webrtc/modules/audio_coding/acm2/call_statistics.h"
#include <assert.h>
diff --git a/webrtc/modules/audio_coding/main/acm2/call_statistics.h b/webrtc/modules/audio_coding/acm2/call_statistics.h
index 2aece0ff40..888afea0a7 100644
--- a/webrtc/modules/audio_coding/main/acm2/call_statistics.h
+++ b/webrtc/modules/audio_coding/acm2/call_statistics.h
@@ -8,11 +8,11 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#ifndef WEBRTC_MODULES_AUDIO_CODING_MAIN_ACM2_CALL_STATISTICS_H_
-#define WEBRTC_MODULES_AUDIO_CODING_MAIN_ACM2_CALL_STATISTICS_H_
+#ifndef WEBRTC_MODULES_AUDIO_CODING_ACM2_CALL_STATISTICS_H_
+#define WEBRTC_MODULES_AUDIO_CODING_ACM2_CALL_STATISTICS_H_
#include "webrtc/common_types.h"
-#include "webrtc/modules/interface/module_common_types.h"
+#include "webrtc/modules/include/module_common_types.h"
//
// This class is for book keeping of calls to ACM. It is not useful to log API
@@ -60,4 +60,4 @@ class CallStatistics {
} // namespace webrtc
-#endif // WEBRTC_MODULES_AUDIO_CODING_MAIN_ACM2_CALL_STATISTICS_H_
+#endif // WEBRTC_MODULES_AUDIO_CODING_ACM2_CALL_STATISTICS_H_
diff --git a/webrtc/modules/audio_coding/main/acm2/call_statistics_unittest.cc b/webrtc/modules/audio_coding/acm2/call_statistics_unittest.cc
index 2bee96465d..9ba0774ce1 100644
--- a/webrtc/modules/audio_coding/main/acm2/call_statistics_unittest.cc
+++ b/webrtc/modules/audio_coding/acm2/call_statistics_unittest.cc
@@ -9,7 +9,7 @@
*/
#include "testing/gtest/include/gtest/gtest.h"
-#include "webrtc/modules/audio_coding/main/acm2/call_statistics.h"
+#include "webrtc/modules/audio_coding/acm2/call_statistics.h"
namespace webrtc {
diff --git a/webrtc/modules/audio_coding/acm2/codec_manager.cc b/webrtc/modules/audio_coding/acm2/codec_manager.cc
new file mode 100644
index 0000000000..ad67377d42
--- /dev/null
+++ b/webrtc/modules/audio_coding/acm2/codec_manager.cc
@@ -0,0 +1,194 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "webrtc/modules/audio_coding/acm2/codec_manager.h"
+
+#include "webrtc/base/checks.h"
+#include "webrtc/base/format_macros.h"
+#include "webrtc/engine_configurations.h"
+#include "webrtc/modules/audio_coding/acm2/rent_a_codec.h"
+#include "webrtc/system_wrappers/include/trace.h"
+
+namespace webrtc {
+namespace acm2 {
+
+namespace {
+
+// Check if the given codec is a valid to be registered as send codec.
+int IsValidSendCodec(const CodecInst& send_codec) {
+ int dummy_id = 0;
+ if ((send_codec.channels != 1) && (send_codec.channels != 2)) {
+ WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, dummy_id,
+ "Wrong number of channels (%" PRIuS ", only mono and stereo "
+ "are supported)",
+ send_codec.channels);
+ return -1;
+ }
+
+ auto maybe_codec_id = RentACodec::CodecIdByInst(send_codec);
+ if (!maybe_codec_id) {
+ WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, dummy_id,
+ "Invalid codec setting for the send codec.");
+ return -1;
+ }
+
+ // Telephone-event cannot be a send codec.
+ if (!STR_CASE_CMP(send_codec.plname, "telephone-event")) {
+ WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, dummy_id,
+ "telephone-event cannot be a send codec");
+ return -1;
+ }
+
+ if (!RentACodec::IsSupportedNumChannels(*maybe_codec_id, send_codec.channels)
+ .value_or(false)) {
+ WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, dummy_id,
+ "%" PRIuS " number of channels not supportedn for %s.",
+ send_codec.channels, send_codec.plname);
+ return -1;
+ }
+ return RentACodec::CodecIndexFromId(*maybe_codec_id).value_or(-1);
+}
+
+bool IsOpus(const CodecInst& codec) {
+ return
+#ifdef WEBRTC_CODEC_OPUS
+ !STR_CASE_CMP(codec.plname, "opus") ||
+#endif
+ false;
+}
+
+} // namespace
+
+CodecManager::CodecManager() {
+ thread_checker_.DetachFromThread();
+}
+
+CodecManager::~CodecManager() = default;
+
+bool CodecManager::RegisterEncoder(const CodecInst& send_codec) {
+ RTC_DCHECK(thread_checker_.CalledOnValidThread());
+ int codec_id = IsValidSendCodec(send_codec);
+
+ // Check for reported errors from function IsValidSendCodec().
+ if (codec_id < 0) {
+ return false;
+ }
+
+ int dummy_id = 0;
+ switch (RentACodec::RegisterRedPayloadType(
+ &codec_stack_params_.red_payload_types, send_codec)) {
+ case RentACodec::RegistrationResult::kOk:
+ return true;
+ case RentACodec::RegistrationResult::kBadFreq:
+ WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, dummy_id,
+ "RegisterSendCodec() failed, invalid frequency for RED"
+ " registration");
+ return false;
+ case RentACodec::RegistrationResult::kSkip:
+ break;
+ }
+ switch (RentACodec::RegisterCngPayloadType(
+ &codec_stack_params_.cng_payload_types, send_codec)) {
+ case RentACodec::RegistrationResult::kOk:
+ return true;
+ case RentACodec::RegistrationResult::kBadFreq:
+ WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, dummy_id,
+ "RegisterSendCodec() failed, invalid frequency for CNG"
+ " registration");
+ return false;
+ case RentACodec::RegistrationResult::kSkip:
+ break;
+ }
+
+ if (IsOpus(send_codec)) {
+ // VAD/DTX not supported.
+ codec_stack_params_.use_cng = false;
+ }
+
+ send_codec_inst_ = rtc::Optional<CodecInst>(send_codec);
+ codec_stack_params_.speech_encoder = nullptr; // Caller must recreate it.
+ return true;
+}
+
+CodecInst CodecManager::ForgeCodecInst(
+ const AudioEncoder* external_speech_encoder) {
+ CodecInst ci;
+ ci.channels = external_speech_encoder->NumChannels();
+ ci.plfreq = external_speech_encoder->SampleRateHz();
+ ci.pacsize = rtc::CheckedDivExact(
+ static_cast<int>(external_speech_encoder->Max10MsFramesInAPacket() *
+ ci.plfreq),
+ 100);
+ ci.pltype = -1; // Not valid.
+ ci.rate = -1; // Not valid.
+ static const char kName[] = "external";
+ memcpy(ci.plname, kName, sizeof(kName));
+ return ci;
+}
+
+bool CodecManager::SetCopyRed(bool enable) {
+ if (enable && codec_stack_params_.use_codec_fec) {
+ WEBRTC_TRACE(webrtc::kTraceWarning, webrtc::kTraceAudioCoding, 0,
+ "Codec internal FEC and RED cannot be co-enabled.");
+ return false;
+ }
+ if (enable && send_codec_inst_ &&
+ codec_stack_params_.red_payload_types.count(send_codec_inst_->plfreq) <
+ 1) {
+ WEBRTC_TRACE(webrtc::kTraceWarning, webrtc::kTraceAudioCoding, 0,
+ "Cannot enable RED at %i Hz.", send_codec_inst_->plfreq);
+ return false;
+ }
+ codec_stack_params_.use_red = enable;
+ return true;
+}
+
+bool CodecManager::SetVAD(bool enable, ACMVADMode mode) {
+ // Sanity check of the mode.
+ RTC_DCHECK(mode == VADNormal || mode == VADLowBitrate || mode == VADAggr ||
+ mode == VADVeryAggr);
+
+ // Check that the send codec is mono. We don't support VAD/DTX for stereo
+ // sending.
+ const bool stereo_send =
+ codec_stack_params_.speech_encoder
+ ? (codec_stack_params_.speech_encoder->NumChannels() != 1)
+ : false;
+ if (enable && stereo_send) {
+ WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, 0,
+ "VAD/DTX not supported for stereo sending");
+ return false;
+ }
+
+ // TODO(kwiberg): This doesn't protect Opus when injected as an external
+ // encoder.
+ if (send_codec_inst_ && IsOpus(*send_codec_inst_)) {
+ // VAD/DTX not supported, but don't fail.
+ enable = false;
+ }
+
+ codec_stack_params_.use_cng = enable;
+ codec_stack_params_.vad_mode = mode;
+ return true;
+}
+
+bool CodecManager::SetCodecFEC(bool enable_codec_fec) {
+ if (enable_codec_fec && codec_stack_params_.use_red) {
+ WEBRTC_TRACE(webrtc::kTraceWarning, webrtc::kTraceAudioCoding, 0,
+ "Codec internal FEC and RED cannot be co-enabled.");
+ return false;
+ }
+
+ codec_stack_params_.use_codec_fec = enable_codec_fec;
+ return true;
+}
+
+} // namespace acm2
+} // namespace webrtc
diff --git a/webrtc/modules/audio_coding/acm2/codec_manager.h b/webrtc/modules/audio_coding/acm2/codec_manager.h
new file mode 100644
index 0000000000..9227e13f09
--- /dev/null
+++ b/webrtc/modules/audio_coding/acm2/codec_manager.h
@@ -0,0 +1,66 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef WEBRTC_MODULES_AUDIO_CODING_ACM2_CODEC_MANAGER_H_
+#define WEBRTC_MODULES_AUDIO_CODING_ACM2_CODEC_MANAGER_H_
+
+#include <map>
+
+#include "webrtc/base/constructormagic.h"
+#include "webrtc/base/optional.h"
+#include "webrtc/base/scoped_ptr.h"
+#include "webrtc/base/thread_checker.h"
+#include "webrtc/modules/audio_coding/acm2/rent_a_codec.h"
+#include "webrtc/modules/audio_coding/include/audio_coding_module_typedefs.h"
+#include "webrtc/common_types.h"
+
+namespace webrtc {
+
+class AudioDecoder;
+class AudioEncoder;
+
+namespace acm2 {
+
+class CodecManager final {
+ public:
+ CodecManager();
+ ~CodecManager();
+
+ // Parses the given specification. On success, returns true and updates the
+ // stored CodecInst and stack parameters; on error, returns false.
+ bool RegisterEncoder(const CodecInst& send_codec);
+
+ static CodecInst ForgeCodecInst(const AudioEncoder* external_speech_encoder);
+
+ const CodecInst* GetCodecInst() const {
+ return send_codec_inst_ ? &*send_codec_inst_ : nullptr;
+ }
+ const RentACodec::StackParameters* GetStackParams() const {
+ return &codec_stack_params_;
+ }
+ RentACodec::StackParameters* GetStackParams() { return &codec_stack_params_; }
+
+ bool SetCopyRed(bool enable);
+
+ bool SetVAD(bool enable, ACMVADMode mode);
+
+ bool SetCodecFEC(bool enable_codec_fec);
+
+ private:
+ rtc::ThreadChecker thread_checker_;
+ rtc::Optional<CodecInst> send_codec_inst_;
+ RentACodec::StackParameters codec_stack_params_;
+
+ RTC_DISALLOW_COPY_AND_ASSIGN(CodecManager);
+};
+
+} // namespace acm2
+} // namespace webrtc
+#endif // WEBRTC_MODULES_AUDIO_CODING_ACM2_CODEC_MANAGER_H_
diff --git a/webrtc/modules/audio_coding/acm2/codec_manager_unittest.cc b/webrtc/modules/audio_coding/acm2/codec_manager_unittest.cc
new file mode 100644
index 0000000000..dce8f38842
--- /dev/null
+++ b/webrtc/modules/audio_coding/acm2/codec_manager_unittest.cc
@@ -0,0 +1,73 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "testing/gtest/include/gtest/gtest.h"
+#include "webrtc/modules/audio_coding/codecs/mock/mock_audio_encoder.h"
+#include "webrtc/modules/audio_coding/acm2/codec_manager.h"
+#include "webrtc/modules/audio_coding/acm2/rent_a_codec.h"
+
+namespace webrtc {
+namespace acm2 {
+
+using ::testing::Return;
+
+namespace {
+
+// Create a MockAudioEncoder with some reasonable default behavior.
+rtc::scoped_ptr<MockAudioEncoder> CreateMockEncoder() {
+ auto enc = rtc_make_scoped_ptr(new MockAudioEncoder);
+ EXPECT_CALL(*enc, SampleRateHz()).WillRepeatedly(Return(8000));
+ EXPECT_CALL(*enc, NumChannels()).WillRepeatedly(Return(1));
+ EXPECT_CALL(*enc, Max10MsFramesInAPacket()).WillRepeatedly(Return(1));
+ EXPECT_CALL(*enc, Die());
+ return enc;
+}
+
+} // namespace
+
+TEST(CodecManagerTest, ExternalEncoderFec) {
+ auto enc0 = CreateMockEncoder();
+ auto enc1 = CreateMockEncoder();
+ {
+ ::testing::InSequence s;
+ EXPECT_CALL(*enc0, SetFec(false)).WillOnce(Return(true));
+ EXPECT_CALL(*enc0, Mark("A"));
+ EXPECT_CALL(*enc0, SetFec(true)).WillOnce(Return(true));
+ EXPECT_CALL(*enc1, SetFec(true)).WillOnce(Return(true));
+ EXPECT_CALL(*enc1, SetFec(false)).WillOnce(Return(true));
+ EXPECT_CALL(*enc0, Mark("B"));
+ EXPECT_CALL(*enc0, SetFec(false)).WillOnce(Return(true));
+ }
+
+ CodecManager cm;
+ RentACodec rac;
+ EXPECT_FALSE(cm.GetStackParams()->use_codec_fec);
+ cm.GetStackParams()->speech_encoder = enc0.get();
+ EXPECT_TRUE(rac.RentEncoderStack(cm.GetStackParams()));
+ EXPECT_FALSE(cm.GetStackParams()->use_codec_fec);
+ enc0->Mark("A");
+ EXPECT_EQ(true, cm.SetCodecFEC(true));
+ EXPECT_TRUE(rac.RentEncoderStack(cm.GetStackParams()));
+ EXPECT_TRUE(cm.GetStackParams()->use_codec_fec);
+ cm.GetStackParams()->speech_encoder = enc1.get();
+ EXPECT_TRUE(rac.RentEncoderStack(cm.GetStackParams()));
+ EXPECT_TRUE(cm.GetStackParams()->use_codec_fec);
+
+ EXPECT_EQ(true, cm.SetCodecFEC(false));
+ EXPECT_TRUE(rac.RentEncoderStack(cm.GetStackParams()));
+ enc0->Mark("B");
+ EXPECT_FALSE(cm.GetStackParams()->use_codec_fec);
+ cm.GetStackParams()->speech_encoder = enc0.get();
+ EXPECT_TRUE(rac.RentEncoderStack(cm.GetStackParams()));
+ EXPECT_FALSE(cm.GetStackParams()->use_codec_fec);
+}
+
+} // namespace acm2
+} // namespace webrtc
diff --git a/webrtc/modules/audio_coding/main/acm2/initial_delay_manager.cc b/webrtc/modules/audio_coding/acm2/initial_delay_manager.cc
index 786fb2e527..0c31b83eb3 100644
--- a/webrtc/modules/audio_coding/main/acm2/initial_delay_manager.cc
+++ b/webrtc/modules/audio_coding/acm2/initial_delay_manager.cc
@@ -8,7 +8,7 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#include "webrtc/modules/audio_coding/main/acm2/initial_delay_manager.h"
+#include "webrtc/modules/audio_coding/acm2/initial_delay_manager.h"
namespace webrtc {
diff --git a/webrtc/modules/audio_coding/main/acm2/initial_delay_manager.h b/webrtc/modules/audio_coding/acm2/initial_delay_manager.h
index c6942ec285..32dd1260f1 100644
--- a/webrtc/modules/audio_coding/main/acm2/initial_delay_manager.h
+++ b/webrtc/modules/audio_coding/acm2/initial_delay_manager.h
@@ -8,11 +8,11 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#ifndef WEBRTC_MODULES_AUDIO_CODING_MAIN_ACM2_INITIAL_DELAY_MANAGER_H_
-#define WEBRTC_MODULES_AUDIO_CODING_MAIN_ACM2_INITIAL_DELAY_MANAGER_H_
+#ifndef WEBRTC_MODULES_AUDIO_CODING_ACM2_INITIAL_DELAY_MANAGER_H_
+#define WEBRTC_MODULES_AUDIO_CODING_ACM2_INITIAL_DELAY_MANAGER_H_
#include "webrtc/base/scoped_ptr.h"
-#include "webrtc/modules/interface/module_common_types.h"
+#include "webrtc/modules/include/module_common_types.h"
namespace webrtc {
@@ -117,4 +117,4 @@ class InitialDelayManager {
} // namespace webrtc
-#endif // WEBRTC_MODULES_AUDIO_CODING_MAIN_ACM2_INITIAL_DELAY_MANAGER_H_
+#endif // WEBRTC_MODULES_AUDIO_CODING_ACM2_INITIAL_DELAY_MANAGER_H_
diff --git a/webrtc/modules/audio_coding/main/acm2/initial_delay_manager_unittest.cc b/webrtc/modules/audio_coding/acm2/initial_delay_manager_unittest.cc
index e973593eb4..d86d221851 100644
--- a/webrtc/modules/audio_coding/main/acm2/initial_delay_manager_unittest.cc
+++ b/webrtc/modules/audio_coding/acm2/initial_delay_manager_unittest.cc
@@ -11,7 +11,7 @@
#include <string.h>
#include "testing/gtest/include/gtest/gtest.h"
-#include "webrtc/modules/audio_coding/main/acm2/initial_delay_manager.h"
+#include "webrtc/modules/audio_coding/acm2/initial_delay_manager.h"
namespace webrtc {
diff --git a/webrtc/modules/audio_coding/acm2/rent_a_codec.cc b/webrtc/modules/audio_coding/acm2/rent_a_codec.cc
new file mode 100644
index 0000000000..5695fd6e08
--- /dev/null
+++ b/webrtc/modules/audio_coding/acm2/rent_a_codec.cc
@@ -0,0 +1,307 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "webrtc/modules/audio_coding/acm2/rent_a_codec.h"
+
+#include <utility>
+
+#include "webrtc/base/logging.h"
+#include "webrtc/modules/audio_coding/codecs/cng/audio_encoder_cng.h"
+#include "webrtc/modules/audio_coding/codecs/g711/audio_encoder_pcm.h"
+#ifdef WEBRTC_CODEC_G722
+#include "webrtc/modules/audio_coding/codecs/g722/audio_encoder_g722.h"
+#endif
+#ifdef WEBRTC_CODEC_ILBC
+#include "webrtc/modules/audio_coding/codecs/ilbc/audio_encoder_ilbc.h"
+#endif
+#ifdef WEBRTC_CODEC_ISACFX
+#include "webrtc/modules/audio_coding/codecs/isac/fix/include/audio_decoder_isacfix.h"
+#include "webrtc/modules/audio_coding/codecs/isac/fix/include/audio_encoder_isacfix.h"
+#endif
+#ifdef WEBRTC_CODEC_ISAC
+#include "webrtc/modules/audio_coding/codecs/isac/main/include/audio_decoder_isac.h"
+#include "webrtc/modules/audio_coding/codecs/isac/main/include/audio_encoder_isac.h"
+#endif
+#ifdef WEBRTC_CODEC_OPUS
+#include "webrtc/modules/audio_coding/codecs/opus/audio_encoder_opus.h"
+#endif
+#include "webrtc/modules/audio_coding/codecs/pcm16b/audio_encoder_pcm16b.h"
+#ifdef WEBRTC_CODEC_RED
+#include "webrtc/modules/audio_coding/codecs/red/audio_encoder_copy_red.h"
+#endif
+#include "webrtc/modules/audio_coding/acm2/acm_codec_database.h"
+#include "webrtc/modules/audio_coding/acm2/acm_common_defs.h"
+
+namespace webrtc {
+namespace acm2 {
+
+rtc::Optional<RentACodec::CodecId> RentACodec::CodecIdByParams(
+ const char* payload_name,
+ int sampling_freq_hz,
+ size_t channels) {
+ return CodecIdFromIndex(
+ ACMCodecDB::CodecId(payload_name, sampling_freq_hz, channels));
+}
+
+rtc::Optional<CodecInst> RentACodec::CodecInstById(CodecId codec_id) {
+ rtc::Optional<int> mi = CodecIndexFromId(codec_id);
+ return mi ? rtc::Optional<CodecInst>(Database()[*mi])
+ : rtc::Optional<CodecInst>();
+}
+
+rtc::Optional<RentACodec::CodecId> RentACodec::CodecIdByInst(
+ const CodecInst& codec_inst) {
+ return CodecIdFromIndex(ACMCodecDB::CodecNumber(codec_inst));
+}
+
+rtc::Optional<CodecInst> RentACodec::CodecInstByParams(const char* payload_name,
+ int sampling_freq_hz,
+ size_t channels) {
+ rtc::Optional<CodecId> codec_id =
+ CodecIdByParams(payload_name, sampling_freq_hz, channels);
+ if (!codec_id)
+ return rtc::Optional<CodecInst>();
+ rtc::Optional<CodecInst> ci = CodecInstById(*codec_id);
+ RTC_DCHECK(ci);
+
+ // Keep the number of channels from the function call. For most codecs it
+ // will be the same value as in default codec settings, but not for all.
+ ci->channels = channels;
+
+ return ci;
+}
+
+bool RentACodec::IsCodecValid(const CodecInst& codec_inst) {
+ return ACMCodecDB::CodecNumber(codec_inst) >= 0;
+}
+
+rtc::Optional<bool> RentACodec::IsSupportedNumChannels(CodecId codec_id,
+ size_t num_channels) {
+ auto i = CodecIndexFromId(codec_id);
+ return i ? rtc::Optional<bool>(
+ ACMCodecDB::codec_settings_[*i].channel_support >=
+ num_channels)
+ : rtc::Optional<bool>();
+}
+
+rtc::ArrayView<const CodecInst> RentACodec::Database() {
+ return rtc::ArrayView<const CodecInst>(ACMCodecDB::database_,
+ NumberOfCodecs());
+}
+
+rtc::Optional<NetEqDecoder> RentACodec::NetEqDecoderFromCodecId(
+ CodecId codec_id,
+ size_t num_channels) {
+ rtc::Optional<int> i = CodecIndexFromId(codec_id);
+ if (!i)
+ return rtc::Optional<NetEqDecoder>();
+ const NetEqDecoder ned = ACMCodecDB::neteq_decoders_[*i];
+ return rtc::Optional<NetEqDecoder>(
+ (ned == NetEqDecoder::kDecoderOpus && num_channels == 2)
+ ? NetEqDecoder::kDecoderOpus_2ch
+ : ned);
+}
+
+RentACodec::RegistrationResult RentACodec::RegisterCngPayloadType(
+ std::map<int, int>* pt_map,
+ const CodecInst& codec_inst) {
+ if (STR_CASE_CMP(codec_inst.plname, "CN") != 0)
+ return RegistrationResult::kSkip;
+ switch (codec_inst.plfreq) {
+ case 8000:
+ case 16000:
+ case 32000:
+ case 48000:
+ (*pt_map)[codec_inst.plfreq] = codec_inst.pltype;
+ return RegistrationResult::kOk;
+ default:
+ return RegistrationResult::kBadFreq;
+ }
+}
+
+RentACodec::RegistrationResult RentACodec::RegisterRedPayloadType(
+ std::map<int, int>* pt_map,
+ const CodecInst& codec_inst) {
+ if (STR_CASE_CMP(codec_inst.plname, "RED") != 0)
+ return RegistrationResult::kSkip;
+ switch (codec_inst.plfreq) {
+ case 8000:
+ (*pt_map)[codec_inst.plfreq] = codec_inst.pltype;
+ return RegistrationResult::kOk;
+ default:
+ return RegistrationResult::kBadFreq;
+ }
+}
+
+namespace {
+
+// Returns a new speech encoder, or null on error.
+// TODO(kwiberg): Don't handle errors here (bug 5033)
+rtc::scoped_ptr<AudioEncoder> CreateEncoder(
+ const CodecInst& speech_inst,
+ LockedIsacBandwidthInfo* bwinfo) {
+#if defined(WEBRTC_CODEC_ISACFX)
+ if (STR_CASE_CMP(speech_inst.plname, "isac") == 0)
+ return rtc_make_scoped_ptr(new AudioEncoderIsacFix(speech_inst, bwinfo));
+#endif
+#if defined(WEBRTC_CODEC_ISAC)
+ if (STR_CASE_CMP(speech_inst.plname, "isac") == 0)
+ return rtc_make_scoped_ptr(new AudioEncoderIsac(speech_inst, bwinfo));
+#endif
+#ifdef WEBRTC_CODEC_OPUS
+ if (STR_CASE_CMP(speech_inst.plname, "opus") == 0)
+ return rtc_make_scoped_ptr(new AudioEncoderOpus(speech_inst));
+#endif
+ if (STR_CASE_CMP(speech_inst.plname, "pcmu") == 0)
+ return rtc_make_scoped_ptr(new AudioEncoderPcmU(speech_inst));
+ if (STR_CASE_CMP(speech_inst.plname, "pcma") == 0)
+ return rtc_make_scoped_ptr(new AudioEncoderPcmA(speech_inst));
+ if (STR_CASE_CMP(speech_inst.plname, "l16") == 0)
+ return rtc_make_scoped_ptr(new AudioEncoderPcm16B(speech_inst));
+#ifdef WEBRTC_CODEC_ILBC
+ if (STR_CASE_CMP(speech_inst.plname, "ilbc") == 0)
+ return rtc_make_scoped_ptr(new AudioEncoderIlbc(speech_inst));
+#endif
+#ifdef WEBRTC_CODEC_G722
+ if (STR_CASE_CMP(speech_inst.plname, "g722") == 0)
+ return rtc_make_scoped_ptr(new AudioEncoderG722(speech_inst));
+#endif
+ LOG_F(LS_ERROR) << "Could not create encoder of type " << speech_inst.plname;
+ return rtc::scoped_ptr<AudioEncoder>();
+}
+
+rtc::scoped_ptr<AudioEncoder> CreateRedEncoder(AudioEncoder* encoder,
+ int red_payload_type) {
+#ifdef WEBRTC_CODEC_RED
+ AudioEncoderCopyRed::Config config;
+ config.payload_type = red_payload_type;
+ config.speech_encoder = encoder;
+ return rtc::scoped_ptr<AudioEncoder>(new AudioEncoderCopyRed(config));
+#else
+ return rtc::scoped_ptr<AudioEncoder>();
+#endif
+}
+
+rtc::scoped_ptr<AudioEncoder> CreateCngEncoder(AudioEncoder* encoder,
+ int payload_type,
+ ACMVADMode vad_mode) {
+ AudioEncoderCng::Config config;
+ config.num_channels = encoder->NumChannels();
+ config.payload_type = payload_type;
+ config.speech_encoder = encoder;
+ switch (vad_mode) {
+ case VADNormal:
+ config.vad_mode = Vad::kVadNormal;
+ break;
+ case VADLowBitrate:
+ config.vad_mode = Vad::kVadLowBitrate;
+ break;
+ case VADAggr:
+ config.vad_mode = Vad::kVadAggressive;
+ break;
+ case VADVeryAggr:
+ config.vad_mode = Vad::kVadVeryAggressive;
+ break;
+ default:
+ FATAL();
+ }
+ return rtc::scoped_ptr<AudioEncoder>(new AudioEncoderCng(config));
+}
+
+rtc::scoped_ptr<AudioDecoder> CreateIsacDecoder(
+ LockedIsacBandwidthInfo* bwinfo) {
+#if defined(WEBRTC_CODEC_ISACFX)
+ return rtc_make_scoped_ptr(new AudioDecoderIsacFix(bwinfo));
+#elif defined(WEBRTC_CODEC_ISAC)
+ return rtc_make_scoped_ptr(new AudioDecoderIsac(bwinfo));
+#else
+ FATAL() << "iSAC is not supported.";
+ return rtc::scoped_ptr<AudioDecoder>();
+#endif
+}
+
+} // namespace
+
+RentACodec::RentACodec() = default;
+RentACodec::~RentACodec() = default;
+
+AudioEncoder* RentACodec::RentEncoder(const CodecInst& codec_inst) {
+ rtc::scoped_ptr<AudioEncoder> enc =
+ CreateEncoder(codec_inst, &isac_bandwidth_info_);
+ if (!enc)
+ return nullptr;
+ speech_encoder_ = std::move(enc);
+ return speech_encoder_.get();
+}
+
+RentACodec::StackParameters::StackParameters() {
+ // Register the default payload types for RED and CNG.
+ for (const CodecInst& ci : RentACodec::Database()) {
+ RentACodec::RegisterCngPayloadType(&cng_payload_types, ci);
+ RentACodec::RegisterRedPayloadType(&red_payload_types, ci);
+ }
+}
+
+RentACodec::StackParameters::~StackParameters() = default;
+
+AudioEncoder* RentACodec::RentEncoderStack(StackParameters* param) {
+ RTC_DCHECK(param->speech_encoder);
+
+ if (param->use_codec_fec) {
+ // Switch FEC on. On failure, remember that FEC is off.
+ if (!param->speech_encoder->SetFec(true))
+ param->use_codec_fec = false;
+ } else {
+ // Switch FEC off. This shouldn't fail.
+ const bool success = param->speech_encoder->SetFec(false);
+ RTC_DCHECK(success);
+ }
+
+ auto pt = [&param](const std::map<int, int>& m) {
+ auto it = m.find(param->speech_encoder->SampleRateHz());
+ return it == m.end() ? rtc::Optional<int>()
+ : rtc::Optional<int>(it->second);
+ };
+ auto cng_pt = pt(param->cng_payload_types);
+ param->use_cng =
+ param->use_cng && cng_pt && param->speech_encoder->NumChannels() == 1;
+ auto red_pt = pt(param->red_payload_types);
+ param->use_red = param->use_red && red_pt;
+
+ if (param->use_cng || param->use_red) {
+ // The RED and CNG encoders need to be in sync with the speech encoder, so
+ // reset the latter to ensure its buffer is empty.
+ param->speech_encoder->Reset();
+ }
+ encoder_stack_ = param->speech_encoder;
+ if (param->use_red) {
+ red_encoder_ = CreateRedEncoder(encoder_stack_, *red_pt);
+ if (red_encoder_)
+ encoder_stack_ = red_encoder_.get();
+ } else {
+ red_encoder_.reset();
+ }
+ if (param->use_cng) {
+ cng_encoder_ = CreateCngEncoder(encoder_stack_, *cng_pt, param->vad_mode);
+ encoder_stack_ = cng_encoder_.get();
+ } else {
+ cng_encoder_.reset();
+ }
+ return encoder_stack_;
+}
+
+AudioDecoder* RentACodec::RentIsacDecoder() {
+ if (!isac_decoder_)
+ isac_decoder_ = CreateIsacDecoder(&isac_bandwidth_info_);
+ return isac_decoder_.get();
+}
+
+} // namespace acm2
+} // namespace webrtc
diff --git a/webrtc/modules/audio_coding/acm2/rent_a_codec.h b/webrtc/modules/audio_coding/acm2/rent_a_codec.h
new file mode 100644
index 0000000000..b1dcc9196c
--- /dev/null
+++ b/webrtc/modules/audio_coding/acm2/rent_a_codec.h
@@ -0,0 +1,249 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef WEBRTC_MODULES_AUDIO_CODING_ACM2_RENT_A_CODEC_H_
+#define WEBRTC_MODULES_AUDIO_CODING_ACM2_RENT_A_CODEC_H_
+
+#include <stddef.h>
+#include <map>
+
+#include "webrtc/base/array_view.h"
+#include "webrtc/base/constructormagic.h"
+#include "webrtc/base/optional.h"
+#include "webrtc/base/scoped_ptr.h"
+#include "webrtc/modules/audio_coding/codecs/audio_decoder.h"
+#include "webrtc/modules/audio_coding/codecs/audio_encoder.h"
+#include "webrtc/modules/audio_coding/include/audio_coding_module_typedefs.h"
+#include "webrtc/typedefs.h"
+
+#if defined(WEBRTC_CODEC_ISAC) || defined(WEBRTC_CODEC_ISACFX)
+#include "webrtc/modules/audio_coding/codecs/isac/locked_bandwidth_info.h"
+#else
+// Dummy implementation, for when we don't have iSAC.
+namespace webrtc {
+class LockedIsacBandwidthInfo {};
+}
+#endif
+
+namespace webrtc {
+
+struct CodecInst;
+
+namespace acm2 {
+
+class RentACodec {
+ public:
+ enum class CodecId {
+#if defined(WEBRTC_CODEC_ISAC) || defined(WEBRTC_CODEC_ISACFX)
+ kISAC,
+#endif
+#ifdef WEBRTC_CODEC_ISAC
+ kISACSWB,
+#endif
+ // Mono
+ kPCM16B,
+ kPCM16Bwb,
+ kPCM16Bswb32kHz,
+ // Stereo
+ kPCM16B_2ch,
+ kPCM16Bwb_2ch,
+ kPCM16Bswb32kHz_2ch,
+ // Mono
+ kPCMU,
+ kPCMA,
+ // Stereo
+ kPCMU_2ch,
+ kPCMA_2ch,
+#ifdef WEBRTC_CODEC_ILBC
+ kILBC,
+#endif
+#ifdef WEBRTC_CODEC_G722
+ kG722, // Mono
+ kG722_2ch, // Stereo
+#endif
+#ifdef WEBRTC_CODEC_OPUS
+ kOpus, // Mono and stereo
+#endif
+ kCNNB,
+ kCNWB,
+ kCNSWB,
+#ifdef ENABLE_48000_HZ
+ kCNFB,
+#endif
+ kAVT,
+#ifdef WEBRTC_CODEC_RED
+ kRED,
+#endif
+ kNumCodecs, // Implementation detail. Don't use.
+
+// Set unsupported codecs to -1.
+#if !defined(WEBRTC_CODEC_ISAC) && !defined(WEBRTC_CODEC_ISACFX)
+ kISAC = -1,
+#endif
+#ifndef WEBRTC_CODEC_ISAC
+ kISACSWB = -1,
+#endif
+ // 48 kHz not supported, always set to -1.
+ kPCM16Bswb48kHz = -1,
+#ifndef WEBRTC_CODEC_ILBC
+ kILBC = -1,
+#endif
+#ifndef WEBRTC_CODEC_G722
+ kG722 = -1, // Mono
+ kG722_2ch = -1, // Stereo
+#endif
+#ifndef WEBRTC_CODEC_OPUS
+ kOpus = -1, // Mono and stereo
+#endif
+#ifndef WEBRTC_CODEC_RED
+ kRED = -1,
+#endif
+#ifndef ENABLE_48000_HZ
+ kCNFB = -1,
+#endif
+
+ kNone = -1
+ };
+
+ enum class NetEqDecoder {
+ kDecoderPCMu,
+ kDecoderPCMa,
+ kDecoderPCMu_2ch,
+ kDecoderPCMa_2ch,
+ kDecoderILBC,
+ kDecoderISAC,
+ kDecoderISACswb,
+ kDecoderPCM16B,
+ kDecoderPCM16Bwb,
+ kDecoderPCM16Bswb32kHz,
+ kDecoderPCM16Bswb48kHz,
+ kDecoderPCM16B_2ch,
+ kDecoderPCM16Bwb_2ch,
+ kDecoderPCM16Bswb32kHz_2ch,
+ kDecoderPCM16Bswb48kHz_2ch,
+ kDecoderPCM16B_5ch,
+ kDecoderG722,
+ kDecoderG722_2ch,
+ kDecoderRED,
+ kDecoderAVT,
+ kDecoderCNGnb,
+ kDecoderCNGwb,
+ kDecoderCNGswb32kHz,
+ kDecoderCNGswb48kHz,
+ kDecoderArbitrary,
+ kDecoderOpus,
+ kDecoderOpus_2ch,
+ };
+
+ static inline size_t NumberOfCodecs() {
+ return static_cast<size_t>(CodecId::kNumCodecs);
+ }
+
+ static inline rtc::Optional<int> CodecIndexFromId(CodecId codec_id) {
+ const int i = static_cast<int>(codec_id);
+ return i >= 0 && i < static_cast<int>(NumberOfCodecs())
+ ? rtc::Optional<int>(i)
+ : rtc::Optional<int>();
+ }
+
+ static inline rtc::Optional<CodecId> CodecIdFromIndex(int codec_index) {
+ return static_cast<size_t>(codec_index) < NumberOfCodecs()
+ ? rtc::Optional<RentACodec::CodecId>(
+ static_cast<RentACodec::CodecId>(codec_index))
+ : rtc::Optional<RentACodec::CodecId>();
+ }
+
+ static rtc::Optional<CodecId> CodecIdByParams(const char* payload_name,
+ int sampling_freq_hz,
+ size_t channels);
+ static rtc::Optional<CodecInst> CodecInstById(CodecId codec_id);
+ static rtc::Optional<CodecId> CodecIdByInst(const CodecInst& codec_inst);
+ static rtc::Optional<CodecInst> CodecInstByParams(const char* payload_name,
+ int sampling_freq_hz,
+ size_t channels);
+ static bool IsCodecValid(const CodecInst& codec_inst);
+
+ static inline bool IsPayloadTypeValid(int payload_type) {
+ return payload_type >= 0 && payload_type <= 127;
+ }
+
+ static rtc::ArrayView<const CodecInst> Database();
+
+ static rtc::Optional<bool> IsSupportedNumChannels(CodecId codec_id,
+ size_t num_channels);
+
+ static rtc::Optional<NetEqDecoder> NetEqDecoderFromCodecId(
+ CodecId codec_id,
+ size_t num_channels);
+
+ // Parse codec_inst and extract payload types. If the given CodecInst was for
+ // the wrong sort of codec, return kSkip; otherwise, if the rate was illegal,
+ // return kBadFreq; otherwise, update the given RTP timestamp rate (Hz) ->
+ // payload type map and return kOk.
+ enum class RegistrationResult { kOk, kSkip, kBadFreq };
+ static RegistrationResult RegisterCngPayloadType(std::map<int, int>* pt_map,
+ const CodecInst& codec_inst);
+ static RegistrationResult RegisterRedPayloadType(std::map<int, int>* pt_map,
+ const CodecInst& codec_inst);
+
+ RentACodec();
+ ~RentACodec();
+
+ // Creates and returns an audio encoder built to the given specification.
+ // Returns null in case of error. The returned encoder is live until the next
+ // successful call to this function, or until the Rent-A-Codec is destroyed.
+ AudioEncoder* RentEncoder(const CodecInst& codec_inst);
+
+ struct StackParameters {
+ StackParameters();
+ ~StackParameters();
+
+ AudioEncoder* speech_encoder = nullptr;
+ bool use_codec_fec = false;
+ bool use_red = false;
+ bool use_cng = false;
+ ACMVADMode vad_mode = VADNormal;
+
+ // Maps from RTP timestamp rate (in Hz) to payload type.
+ std::map<int, int> cng_payload_types;
+ std::map<int, int> red_payload_types;
+ };
+
+ // Creates and returns an audio encoder stack constructed to the given
+ // specification. If the specification isn't compatible with the encoder, it
+ // will be changed to match (things will be switched off). The returned
+ // encoder is live until the next successful call to this function, or until
+ // the Rent-A-Codec is destroyed.
+ AudioEncoder* RentEncoderStack(StackParameters* param);
+
+ // The last return value of RentEncoderStack, or null if it hasn't been
+ // called.
+ AudioEncoder* GetEncoderStack() const { return encoder_stack_; }
+
+ // Creates and returns an iSAC decoder, which will remain live until the
+ // Rent-A-Codec is destroyed. Subsequent calls will simply return the same
+ // object.
+ AudioDecoder* RentIsacDecoder();
+
+ private:
+ rtc::scoped_ptr<AudioEncoder> speech_encoder_;
+ rtc::scoped_ptr<AudioEncoder> cng_encoder_;
+ rtc::scoped_ptr<AudioEncoder> red_encoder_;
+ rtc::scoped_ptr<AudioDecoder> isac_decoder_;
+ AudioEncoder* encoder_stack_ = nullptr;
+ LockedIsacBandwidthInfo isac_bandwidth_info_;
+
+ RTC_DISALLOW_COPY_AND_ASSIGN(RentACodec);
+};
+
+} // namespace acm2
+} // namespace webrtc
+
+#endif // WEBRTC_MODULES_AUDIO_CODING_ACM2_RENT_A_CODEC_H_
diff --git a/webrtc/modules/audio_coding/main/acm2/codec_owner_unittest.cc b/webrtc/modules/audio_coding/acm2/rent_a_codec_unittest.cc
index 6c232615a7..e838488e53 100644
--- a/webrtc/modules/audio_coding/main/acm2/codec_owner_unittest.cc
+++ b/webrtc/modules/audio_coding/acm2/rent_a_codec_unittest.cc
@@ -8,36 +8,34 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#include <cstring>
-
#include "testing/gtest/include/gtest/gtest.h"
#include "webrtc/base/arraysize.h"
-#include "webrtc/base/safe_conversions.h"
#include "webrtc/modules/audio_coding/codecs/mock/mock_audio_encoder.h"
-#include "webrtc/modules/audio_coding/main/acm2/codec_owner.h"
+#include "webrtc/modules/audio_coding/acm2/rent_a_codec.h"
namespace webrtc {
namespace acm2 {
using ::testing::Return;
-using ::testing::InSequence;
namespace {
const int kDataLengthSamples = 80;
const int kPacketSizeSamples = 2 * kDataLengthSamples;
const int16_t kZeroData[kDataLengthSamples] = {0};
-const CodecInst kDefaultCodecInst =
- {0, "pcmu", 8000, kPacketSizeSamples, 1, 64000};
+const CodecInst kDefaultCodecInst = {0, "pcmu", 8000, kPacketSizeSamples,
+ 1, 64000};
const int kCngPt = 13;
} // namespace
-class CodecOwnerTest : public ::testing::Test {
+class RentACodecTestF : public ::testing::Test {
protected:
- CodecOwnerTest() : timestamp_(0) {}
-
void CreateCodec() {
- ASSERT_TRUE(
- codec_owner_.SetEncoders(kDefaultCodecInst, kCngPt, VADNormal, -1));
+ speech_encoder_ = rent_a_codec_.RentEncoder(kDefaultCodecInst);
+ ASSERT_TRUE(speech_encoder_);
+ RentACodec::StackParameters param;
+ param.use_cng = true;
+ param.speech_encoder = speech_encoder_;
+ encoder_ = rent_a_codec_.RentEncoderStack(&param);
}
void EncodeAndVerify(size_t expected_out_length,
@@ -46,8 +44,8 @@ class CodecOwnerTest : public ::testing::Test {
int expected_send_even_if_empty) {
uint8_t out[kPacketSizeSamples];
AudioEncoder::EncodedInfo encoded_info;
- encoded_info = codec_owner_.Encoder()->Encode(
- timestamp_, kZeroData, kDataLengthSamples, kPacketSizeSamples, out);
+ encoded_info =
+ encoder_->Encode(timestamp_, kZeroData, kPacketSizeSamples, out);
timestamp_ += kDataLengthSamples;
EXPECT_TRUE(encoded_info.redundant.empty());
EXPECT_EQ(expected_out_length, encoded_info.encoded_bytes);
@@ -59,43 +57,10 @@ class CodecOwnerTest : public ::testing::Test {
encoded_info.send_even_if_empty);
}
- // Verify that the speech encoder's Reset method is called when CNG or RED
- // (or both) are switched on, but not when they're switched off.
- void TestCngAndRedResetSpeechEncoder(bool use_cng, bool use_red) {
- MockAudioEncoder speech_encoder;
- EXPECT_CALL(speech_encoder, NumChannels())
- .WillRepeatedly(Return(1));
- EXPECT_CALL(speech_encoder, Max10MsFramesInAPacket())
- .WillRepeatedly(Return(2));
- EXPECT_CALL(speech_encoder, SampleRateHz())
- .WillRepeatedly(Return(8000));
- {
- InSequence s;
- EXPECT_CALL(speech_encoder, Mark("start off"));
- EXPECT_CALL(speech_encoder, Mark("switch on"));
- if (use_cng || use_red)
- EXPECT_CALL(speech_encoder, Reset());
- EXPECT_CALL(speech_encoder, Mark("start on"));
- if (use_cng || use_red)
- EXPECT_CALL(speech_encoder, Reset());
- EXPECT_CALL(speech_encoder, Mark("switch off"));
- EXPECT_CALL(speech_encoder, Die());
- }
-
- int cng_pt = use_cng ? 17 : -1;
- int red_pt = use_red ? 19 : -1;
- speech_encoder.Mark("start off");
- codec_owner_.SetEncoders(&speech_encoder, -1, VADNormal, -1);
- speech_encoder.Mark("switch on");
- codec_owner_.ChangeCngAndRed(cng_pt, VADNormal, red_pt);
- speech_encoder.Mark("start on");
- codec_owner_.SetEncoders(&speech_encoder, cng_pt, VADNormal, red_pt);
- speech_encoder.Mark("switch off");
- codec_owner_.ChangeCngAndRed(-1, VADNormal, -1);
- }
-
- CodecOwner codec_owner_;
- uint32_t timestamp_;
+ RentACodec rent_a_codec_;
+ AudioEncoder* speech_encoder_ = nullptr;
+ AudioEncoder* encoder_ = nullptr;
+ uint32_t timestamp_ = 0;
};
// This test verifies that CNG frames are delivered as expected. Since the frame
@@ -107,7 +72,7 @@ class CodecOwnerTest : public ::testing::Test {
// AudioEncoder::EncodedInfo::send_even_if_empty set to true. (The reason to
// produce an empty frame is to drive sending of DTMF packets in the RTP/RTCP
// module.)
-TEST_F(CodecOwnerTest, VerifyCngFrames) {
+TEST_F(RentACodecTestF, VerifyCngFrames) {
CreateCodec();
uint32_t expected_timestamp = timestamp_;
// Verify no frame.
@@ -136,75 +101,122 @@ TEST_F(CodecOwnerTest, VerifyCngFrames) {
}
}
-TEST_F(CodecOwnerTest, ExternalEncoder) {
- MockAudioEncoder external_encoder;
- codec_owner_.SetEncoders(&external_encoder, -1, VADNormal, -1);
+TEST(RentACodecTest, ExternalEncoder) {
const int kSampleRateHz = 8000;
+ MockAudioEncoder external_encoder;
+ EXPECT_CALL(external_encoder, SampleRateHz())
+ .WillRepeatedly(Return(kSampleRateHz));
+ EXPECT_CALL(external_encoder, NumChannels()).WillRepeatedly(Return(1));
+ EXPECT_CALL(external_encoder, SetFec(false)).WillRepeatedly(Return(true));
+
+ RentACodec rac;
+ RentACodec::StackParameters param;
+ param.speech_encoder = &external_encoder;
+ EXPECT_EQ(&external_encoder, rac.RentEncoderStack(&param));
const int kPacketSizeSamples = kSampleRateHz / 100;
int16_t audio[kPacketSizeSamples] = {0};
uint8_t encoded[kPacketSizeSamples];
AudioEncoder::EncodedInfo info;
- EXPECT_CALL(external_encoder, SampleRateHz())
- .WillRepeatedly(Return(kSampleRateHz));
{
- InSequence s;
+ ::testing::InSequence s;
info.encoded_timestamp = 0;
EXPECT_CALL(external_encoder,
- EncodeInternal(0, audio, arraysize(encoded), encoded))
+ EncodeInternal(0, rtc::ArrayView<const int16_t>(audio),
+ arraysize(encoded), encoded))
.WillOnce(Return(info));
EXPECT_CALL(external_encoder, Mark("A"));
EXPECT_CALL(external_encoder, Mark("B"));
info.encoded_timestamp = 2;
EXPECT_CALL(external_encoder,
- EncodeInternal(2, audio, arraysize(encoded), encoded))
+ EncodeInternal(2, rtc::ArrayView<const int16_t>(audio),
+ arraysize(encoded), encoded))
.WillOnce(Return(info));
EXPECT_CALL(external_encoder, Die());
}
- info = codec_owner_.Encoder()->Encode(0, audio, arraysize(audio),
- arraysize(encoded), encoded);
+ info = rac.GetEncoderStack()->Encode(0, audio, arraysize(encoded), encoded);
EXPECT_EQ(0u, info.encoded_timestamp);
external_encoder.Mark("A");
// Change to internal encoder.
CodecInst codec_inst = kDefaultCodecInst;
codec_inst.pacsize = kPacketSizeSamples;
- ASSERT_TRUE(codec_owner_.SetEncoders(codec_inst, -1, VADNormal, -1));
+ param.speech_encoder = rac.RentEncoder(codec_inst);
+ ASSERT_TRUE(param.speech_encoder);
+ EXPECT_EQ(param.speech_encoder, rac.RentEncoderStack(&param));
+
// Don't expect any more calls to the external encoder.
- info = codec_owner_.Encoder()->Encode(1, audio, arraysize(audio),
- arraysize(encoded), encoded);
+ info = rac.GetEncoderStack()->Encode(1, audio, arraysize(encoded), encoded);
external_encoder.Mark("B");
// Change back to external encoder again.
- codec_owner_.SetEncoders(&external_encoder, -1, VADNormal, -1);
- info = codec_owner_.Encoder()->Encode(2, audio, arraysize(audio),
- arraysize(encoded), encoded);
+ param.speech_encoder = &external_encoder;
+ EXPECT_EQ(&external_encoder, rac.RentEncoderStack(&param));
+ info = rac.GetEncoderStack()->Encode(2, audio, arraysize(encoded), encoded);
EXPECT_EQ(2u, info.encoded_timestamp);
}
-TEST_F(CodecOwnerTest, CngResetsSpeechEncoder) {
+// Verify that the speech encoder's Reset method is called when CNG or RED
+// (or both) are switched on, but not when they're switched off.
+void TestCngAndRedResetSpeechEncoder(bool use_cng, bool use_red) {
+ MockAudioEncoder speech_encoder;
+ EXPECT_CALL(speech_encoder, NumChannels()).WillRepeatedly(Return(1));
+ EXPECT_CALL(speech_encoder, Max10MsFramesInAPacket())
+ .WillRepeatedly(Return(2));
+ EXPECT_CALL(speech_encoder, SampleRateHz()).WillRepeatedly(Return(8000));
+ EXPECT_CALL(speech_encoder, SetFec(false)).WillRepeatedly(Return(true));
+ {
+ ::testing::InSequence s;
+ EXPECT_CALL(speech_encoder, Mark("disabled"));
+ EXPECT_CALL(speech_encoder, Mark("enabled"));
+ if (use_cng || use_red)
+ EXPECT_CALL(speech_encoder, Reset());
+ EXPECT_CALL(speech_encoder, Die());
+ }
+
+ RentACodec::StackParameters param1, param2;
+ param1.speech_encoder = &speech_encoder;
+ param2.speech_encoder = &speech_encoder;
+ param2.use_cng = use_cng;
+ param2.use_red = use_red;
+ speech_encoder.Mark("disabled");
+ RentACodec rac;
+ rac.RentEncoderStack(&param1);
+ speech_encoder.Mark("enabled");
+ rac.RentEncoderStack(&param2);
+}
+
+TEST(RentACodecTest, CngResetsSpeechEncoder) {
TestCngAndRedResetSpeechEncoder(true, false);
}
-TEST_F(CodecOwnerTest, RedResetsSpeechEncoder) {
+TEST(RentACodecTest, RedResetsSpeechEncoder) {
TestCngAndRedResetSpeechEncoder(false, true);
}
-TEST_F(CodecOwnerTest, CngAndRedResetsSpeechEncoder) {
+TEST(RentACodecTest, CngAndRedResetsSpeechEncoder) {
TestCngAndRedResetSpeechEncoder(true, true);
}
-TEST_F(CodecOwnerTest, NoCngAndRedNoSpeechEncoderReset) {
+TEST(RentACodecTest, NoCngAndRedNoSpeechEncoderReset) {
TestCngAndRedResetSpeechEncoder(false, false);
}
-TEST_F(CodecOwnerTest, SetEncodersError) {
- CodecInst codec_inst = kDefaultCodecInst;
- static const char bad_name[] = "Robert'); DROP TABLE Students;";
- std::memcpy(codec_inst.plname, bad_name, sizeof bad_name);
- EXPECT_FALSE(codec_owner_.SetEncoders(codec_inst, -1, VADNormal, -1));
+TEST(RentACodecTest, RentEncoderError) {
+ const CodecInst codec_inst = {
+ 0, "Robert'); DROP TABLE Students;", 8000, 160, 1, 64000};
+ RentACodec rent_a_codec;
+ EXPECT_FALSE(rent_a_codec.RentEncoder(codec_inst));
+}
+
+#if GTEST_HAS_DEATH_TEST && !defined(WEBRTC_ANDROID)
+TEST(RentACodecTest, RentEncoderStackWithoutSpeechEncoder) {
+ RentACodec::StackParameters sp;
+ EXPECT_EQ(nullptr, sp.speech_encoder);
+ EXPECT_DEATH(RentACodec().RentEncoderStack(&sp), "");
}
+#endif
} // namespace acm2
} // namespace webrtc
diff --git a/webrtc/modules/audio_coding/audio_coding.gypi b/webrtc/modules/audio_coding/audio_coding.gypi
index bc3c48d075..abdb1915c3 100644
--- a/webrtc/modules/audio_coding/audio_coding.gypi
+++ b/webrtc/modules/audio_coding/audio_coding.gypi
@@ -19,12 +19,195 @@
'codecs/isac/isacfix.gypi',
'codecs/pcm16b/pcm16b.gypi',
'codecs/red/red.gypi',
- 'main/audio_coding_module.gypi',
'neteq/neteq.gypi',
],
+ 'variables': {
+ 'audio_coding_dependencies': [
+ 'cng',
+ 'g711',
+ 'pcm16b',
+ '<(webrtc_root)/common.gyp:webrtc_common',
+ '<(webrtc_root)/common_audio/common_audio.gyp:common_audio',
+ '<(webrtc_root)/system_wrappers/system_wrappers.gyp:system_wrappers',
+ ],
+ 'audio_coding_defines': [],
+ 'conditions': [
+ ['include_opus==1', {
+ 'audio_coding_dependencies': ['webrtc_opus',],
+ 'audio_coding_defines': ['WEBRTC_CODEC_OPUS',],
+ }],
+ ['build_with_mozilla==0', {
+ 'conditions': [
+ ['target_arch=="arm"', {
+ 'audio_coding_dependencies': ['isac_fix',],
+ 'audio_coding_defines': ['WEBRTC_CODEC_ISACFX',],
+ }, {
+ 'audio_coding_dependencies': ['isac',],
+ 'audio_coding_defines': ['WEBRTC_CODEC_ISAC',],
+ }],
+ ],
+ 'audio_coding_dependencies': ['g722',],
+ 'audio_coding_defines': ['WEBRTC_CODEC_G722',],
+ }],
+ ['build_with_mozilla==0 and build_with_chromium==0', {
+ 'audio_coding_dependencies': ['ilbc', 'red',],
+ 'audio_coding_defines': ['WEBRTC_CODEC_ILBC', 'WEBRTC_CODEC_RED',],
+ }],
+ ],
+ },
+ 'targets': [
+ {
+ 'target_name': 'rent_a_codec',
+ 'type': 'static_library',
+ 'defines': [
+ '<@(audio_coding_defines)',
+ ],
+ 'dependencies': [
+ '<(webrtc_root)/common.gyp:webrtc_common',
+ ],
+ 'include_dirs': [
+ '<(webrtc_root)',
+ ],
+ 'direct_dependent_settings': {
+ 'include_dirs': [
+ '<(webrtc_root)',
+ ],
+ },
+ 'sources': [
+ 'acm2/acm_codec_database.cc',
+ 'acm2/acm_codec_database.h',
+ 'acm2/rent_a_codec.cc',
+ 'acm2/rent_a_codec.h',
+ ],
+ },
+ {
+ 'target_name': 'audio_coding_module',
+ 'type': 'static_library',
+ 'defines': [
+ '<@(audio_coding_defines)',
+ ],
+ 'dependencies': [
+ '<@(audio_coding_dependencies)',
+ '<(webrtc_root)/common.gyp:webrtc_common',
+ '<(webrtc_root)/webrtc.gyp:rtc_event_log',
+ 'neteq',
+ 'rent_a_codec',
+ ],
+ 'include_dirs': [
+ 'include',
+ '../include',
+ '<(webrtc_root)',
+ ],
+ 'direct_dependent_settings': {
+ 'include_dirs': [
+ 'include',
+ '../include',
+ '<(webrtc_root)',
+ ],
+ },
+ 'conditions': [
+ ['include_opus==1', {
+ 'export_dependent_settings': ['webrtc_opus'],
+ }],
+ ],
+ 'sources': [
+ 'acm2/acm_common_defs.h',
+ 'acm2/acm_receiver.cc',
+ 'acm2/acm_receiver.h',
+ 'acm2/acm_resampler.cc',
+ 'acm2/acm_resampler.h',
+ 'acm2/audio_coding_module.cc',
+ 'acm2/audio_coding_module_impl.cc',
+ 'acm2/audio_coding_module_impl.h',
+ 'acm2/call_statistics.cc',
+ 'acm2/call_statistics.h',
+ 'acm2/codec_manager.cc',
+ 'acm2/codec_manager.h',
+ 'acm2/initial_delay_manager.cc',
+ 'acm2/initial_delay_manager.h',
+ 'include/audio_coding_module.h',
+ 'include/audio_coding_module_typedefs.h',
+ ],
+ },
+ ],
'conditions': [
['include_opus==1', {
'includes': ['codecs/opus/opus.gypi',],
}],
+ ['include_tests==1', {
+ 'targets': [
+ {
+ 'target_name': 'acm_receive_test',
+ 'type': 'static_library',
+ 'defines': [
+ '<@(audio_coding_defines)',
+ ],
+ 'dependencies': [
+ '<@(audio_coding_dependencies)',
+ 'audio_coding_module',
+ 'neteq_unittest_tools',
+ '<(DEPTH)/testing/gtest.gyp:gtest',
+ ],
+ 'sources': [
+ 'acm2/acm_receive_test_oldapi.cc',
+ 'acm2/acm_receive_test_oldapi.h',
+ ],
+ }, # acm_receive_test
+ {
+ 'target_name': 'acm_send_test',
+ 'type': 'static_library',
+ 'defines': [
+ '<@(audio_coding_defines)',
+ ],
+ 'dependencies': [
+ '<@(audio_coding_dependencies)',
+ 'audio_coding_module',
+ 'neteq_unittest_tools',
+ '<(DEPTH)/testing/gtest.gyp:gtest',
+ ],
+ 'sources': [
+ 'acm2/acm_send_test_oldapi.cc',
+ 'acm2/acm_send_test_oldapi.h',
+ ],
+ }, # acm_send_test
+ {
+ 'target_name': 'delay_test',
+ 'type': 'executable',
+ 'dependencies': [
+ 'audio_coding_module',
+ '<(DEPTH)/testing/gtest.gyp:gtest',
+ '<(webrtc_root)/common.gyp:webrtc_common',
+ '<(webrtc_root)/test/test.gyp:test_support',
+ '<(webrtc_root)/system_wrappers/system_wrappers.gyp:system_wrappers',
+ '<(webrtc_root)/system_wrappers/system_wrappers.gyp:system_wrappers_default',
+ '<(DEPTH)/third_party/gflags/gflags.gyp:gflags',
+ ],
+ 'sources': [
+ 'test/delay_test.cc',
+ 'test/Channel.cc',
+ 'test/PCMFile.cc',
+ 'test/utility.cc',
+ ],
+ }, # delay_test
+ {
+ 'target_name': 'insert_packet_with_timing',
+ 'type': 'executable',
+ 'dependencies': [
+ 'audio_coding_module',
+ '<(DEPTH)/testing/gtest.gyp:gtest',
+ '<(webrtc_root)/common.gyp:webrtc_common',
+ '<(webrtc_root)/test/test.gyp:test_support',
+ '<(webrtc_root)/system_wrappers/system_wrappers.gyp:system_wrappers',
+ '<(webrtc_root)/system_wrappers/system_wrappers.gyp:system_wrappers_default',
+ '<(DEPTH)/third_party/gflags/gflags.gyp:gflags',
+ ],
+ 'sources': [
+ 'test/insert_packet_with_timing.cc',
+ 'test/Channel.cc',
+ 'test/PCMFile.cc',
+ ],
+ }, # delay_test
+ ],
+ }],
],
}
diff --git a/webrtc/modules/audio_coding/codecs/audio_decoder.cc b/webrtc/modules/audio_coding/codecs/audio_decoder.cc
index 08d101c5ae..d2984b97b0 100644
--- a/webrtc/modules/audio_coding/codecs/audio_decoder.cc
+++ b/webrtc/modules/audio_coding/codecs/audio_decoder.cc
@@ -13,12 +13,14 @@
#include <assert.h>
#include "webrtc/base/checks.h"
+#include "webrtc/base/trace_event.h"
namespace webrtc {
int AudioDecoder::Decode(const uint8_t* encoded, size_t encoded_len,
int sample_rate_hz, size_t max_decoded_bytes,
int16_t* decoded, SpeechType* speech_type) {
+ TRACE_EVENT0("webrtc", "AudioDecoder::Decode");
int duration = PacketDuration(encoded, encoded_len);
if (duration >= 0 &&
duration * Channels() * sizeof(int16_t) > max_decoded_bytes) {
@@ -31,6 +33,7 @@ int AudioDecoder::Decode(const uint8_t* encoded, size_t encoded_len,
int AudioDecoder::DecodeRedundant(const uint8_t* encoded, size_t encoded_len,
int sample_rate_hz, size_t max_decoded_bytes,
int16_t* decoded, SpeechType* speech_type) {
+ TRACE_EVENT0("webrtc", "AudioDecoder::DecodeRedundant");
int duration = PacketDurationRedundant(encoded, encoded_len);
if (duration >= 0 &&
duration * Channels() * sizeof(int16_t) > max_decoded_bytes) {
@@ -40,12 +43,6 @@ int AudioDecoder::DecodeRedundant(const uint8_t* encoded, size_t encoded_len,
speech_type);
}
-int AudioDecoder::DecodeInternal(const uint8_t* encoded, size_t encoded_len,
- int sample_rate_hz, int16_t* decoded,
- SpeechType* speech_type) {
- return kNotImplemented;
-}
-
int AudioDecoder::DecodeRedundantInternal(const uint8_t* encoded,
size_t encoded_len,
int sample_rate_hz, int16_t* decoded,
diff --git a/webrtc/modules/audio_coding/codecs/audio_decoder.h b/webrtc/modules/audio_coding/codecs/audio_decoder.h
index 6189be098d..81ac873183 100644
--- a/webrtc/modules/audio_coding/codecs/audio_decoder.h
+++ b/webrtc/modules/audio_coding/codecs/audio_decoder.h
@@ -14,7 +14,7 @@
#include <stdlib.h> // NULL
#include "webrtc/base/constructormagic.h"
-#include "webrtc/modules/audio_coding/codecs/cng/include/webrtc_cng.h"
+#include "webrtc/modules/audio_coding/codecs/cng/webrtc_cng.h"
#include "webrtc/typedefs.h"
namespace webrtc {
@@ -41,21 +41,21 @@ class AudioDecoder {
// is set to kComfortNoise, otherwise it is kSpeech. The desired output
// sample rate is provided in |sample_rate_hz|, which must be valid for the
// codec at hand.
- virtual int Decode(const uint8_t* encoded,
- size_t encoded_len,
- int sample_rate_hz,
- size_t max_decoded_bytes,
- int16_t* decoded,
- SpeechType* speech_type);
+ int Decode(const uint8_t* encoded,
+ size_t encoded_len,
+ int sample_rate_hz,
+ size_t max_decoded_bytes,
+ int16_t* decoded,
+ SpeechType* speech_type);
// Same as Decode(), but interfaces to the decoders redundant decode function.
// The default implementation simply calls the regular Decode() method.
- virtual int DecodeRedundant(const uint8_t* encoded,
- size_t encoded_len,
- int sample_rate_hz,
- size_t max_decoded_bytes,
- int16_t* decoded,
- SpeechType* speech_type);
+ int DecodeRedundant(const uint8_t* encoded,
+ size_t encoded_len,
+ int sample_rate_hz,
+ size_t max_decoded_bytes,
+ int16_t* decoded,
+ SpeechType* speech_type);
// Indicates if the decoder implements the DecodePlc method.
virtual bool HasDecodePlc() const;
@@ -107,7 +107,7 @@ class AudioDecoder {
size_t encoded_len,
int sample_rate_hz,
int16_t* decoded,
- SpeechType* speech_type);
+ SpeechType* speech_type) = 0;
virtual int DecodeRedundantInternal(const uint8_t* encoded,
size_t encoded_len,
diff --git a/webrtc/modules/audio_coding/codecs/audio_encoder.cc b/webrtc/modules/audio_coding/codecs/audio_encoder.cc
index 6d763005ac..e99fc30995 100644
--- a/webrtc/modules/audio_coding/codecs/audio_encoder.cc
+++ b/webrtc/modules/audio_coding/codecs/audio_encoder.cc
@@ -9,7 +9,9 @@
*/
#include "webrtc/modules/audio_coding/codecs/audio_encoder.h"
+
#include "webrtc/base/checks.h"
+#include "webrtc/base/trace_event.h"
namespace webrtc {
@@ -21,13 +23,14 @@ int AudioEncoder::RtpTimestampRateHz() const {
return SampleRateHz();
}
-AudioEncoder::EncodedInfo AudioEncoder::Encode(uint32_t rtp_timestamp,
- const int16_t* audio,
- size_t num_samples_per_channel,
- size_t max_encoded_bytes,
- uint8_t* encoded) {
- RTC_CHECK_EQ(num_samples_per_channel,
- static_cast<size_t>(SampleRateHz() / 100));
+AudioEncoder::EncodedInfo AudioEncoder::Encode(
+ uint32_t rtp_timestamp,
+ rtc::ArrayView<const int16_t> audio,
+ size_t max_encoded_bytes,
+ uint8_t* encoded) {
+ TRACE_EVENT0("webrtc", "AudioEncoder::Encode");
+ RTC_CHECK_EQ(audio.size(),
+ static_cast<size_t>(NumChannels() * SampleRateHz() / 100));
EncodedInfo info =
EncodeInternal(rtp_timestamp, audio, max_encoded_bytes, encoded);
RTC_CHECK_LE(info.encoded_bytes, max_encoded_bytes);
diff --git a/webrtc/modules/audio_coding/codecs/audio_encoder.h b/webrtc/modules/audio_coding/codecs/audio_encoder.h
index cda9d86f2e..a46b0e86a7 100644
--- a/webrtc/modules/audio_coding/codecs/audio_encoder.h
+++ b/webrtc/modules/audio_coding/codecs/audio_encoder.h
@@ -14,6 +14,7 @@
#include <algorithm>
#include <vector>
+#include "webrtc/base/array_view.h"
#include "webrtc/typedefs.h"
namespace webrtc {
@@ -60,7 +61,7 @@ class AudioEncoder {
// Returns the input sample rate in Hz and the number of input channels.
// These are constants set at instantiation time.
virtual int SampleRateHz() const = 0;
- virtual int NumChannels() const = 0;
+ virtual size_t NumChannels() const = 0;
// Returns the rate at which the RTP timestamps are updated. The default
// implementation returns SampleRateHz().
@@ -91,13 +92,12 @@ class AudioEncoder {
// Encode() checks some preconditions, calls EncodeInternal() which does the
// actual work, and then checks some postconditions.
EncodedInfo Encode(uint32_t rtp_timestamp,
- const int16_t* audio,
- size_t num_samples_per_channel,
+ rtc::ArrayView<const int16_t> audio,
size_t max_encoded_bytes,
uint8_t* encoded);
virtual EncodedInfo EncodeInternal(uint32_t rtp_timestamp,
- const int16_t* audio,
+ rtc::ArrayView<const int16_t> audio,
size_t max_encoded_bytes,
uint8_t* encoded) = 0;
@@ -125,7 +125,7 @@ class AudioEncoder {
// Tells the encoder about the highest sample rate the decoder is expected to
// use when decoding the bitstream. The encoder would typically use this
// information to adjust the quality of the encoding. The default
- // implementation just returns true.
+ // implementation does nothing.
virtual void SetMaxPlaybackRate(int frequency_hz);
// Tells the encoder what the projected packet loss rate is. The rate is in
diff --git a/webrtc/modules/audio_coding/codecs/cng/audio_encoder_cng.cc b/webrtc/modules/audio_coding/codecs/cng/audio_encoder_cng.cc
index 121524633c..180166c40c 100644
--- a/webrtc/modules/audio_coding/codecs/cng/audio_encoder_cng.cc
+++ b/webrtc/modules/audio_coding/codecs/cng/audio_encoder_cng.cc
@@ -8,7 +8,7 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#include "webrtc/modules/audio_coding/codecs/cng/include/audio_encoder_cng.h"
+#include "webrtc/modules/audio_coding/codecs/cng/audio_encoder_cng.h"
#include <algorithm>
#include <limits>
@@ -75,7 +75,7 @@ int AudioEncoderCng::SampleRateHz() const {
return speech_encoder_->SampleRateHz();
}
-int AudioEncoderCng::NumChannels() const {
+size_t AudioEncoderCng::NumChannels() const {
return 1;
}
@@ -97,7 +97,7 @@ int AudioEncoderCng::GetTargetBitrate() const {
AudioEncoder::EncodedInfo AudioEncoderCng::EncodeInternal(
uint32_t rtp_timestamp,
- const int16_t* audio,
+ rtc::ArrayView<const int16_t> audio,
size_t max_encoded_bytes,
uint8_t* encoded) {
RTC_CHECK_GE(max_encoded_bytes,
@@ -106,9 +106,8 @@ AudioEncoder::EncodedInfo AudioEncoderCng::EncodeInternal(
RTC_CHECK_EQ(speech_buffer_.size(),
rtp_timestamps_.size() * samples_per_10ms_frame);
rtp_timestamps_.push_back(rtp_timestamp);
- for (size_t i = 0; i < samples_per_10ms_frame; ++i) {
- speech_buffer_.push_back(audio[i]);
- }
+ RTC_DCHECK_EQ(samples_per_10ms_frame, audio.size());
+ speech_buffer_.insert(speech_buffer_.end(), audio.cbegin(), audio.cend());
const size_t frames_to_encode = speech_encoder_->Num10MsFramesInNextPacket();
if (rtp_timestamps_.size() < frames_to_encode) {
return EncodedInfo();
@@ -242,9 +241,12 @@ AudioEncoder::EncodedInfo AudioEncoderCng::EncodeActive(
const size_t samples_per_10ms_frame = SamplesPer10msFrame();
AudioEncoder::EncodedInfo info;
for (size_t i = 0; i < frames_to_encode; ++i) {
- info = speech_encoder_->Encode(
- rtp_timestamps_.front(), &speech_buffer_[i * samples_per_10ms_frame],
- samples_per_10ms_frame, max_encoded_bytes, encoded);
+ info =
+ speech_encoder_->Encode(rtp_timestamps_.front(),
+ rtc::ArrayView<const int16_t>(
+ &speech_buffer_[i * samples_per_10ms_frame],
+ samples_per_10ms_frame),
+ max_encoded_bytes, encoded);
if (i + 1 == frames_to_encode) {
RTC_CHECK_GT(info.encoded_bytes, 0u) << "Encoder didn't deliver data.";
} else {
diff --git a/webrtc/modules/audio_coding/codecs/cng/include/audio_encoder_cng.h b/webrtc/modules/audio_coding/codecs/cng/audio_encoder_cng.h
index 3ca9eb60f3..87383e2ac5 100644
--- a/webrtc/modules/audio_coding/codecs/cng/include/audio_encoder_cng.h
+++ b/webrtc/modules/audio_coding/codecs/cng/audio_encoder_cng.h
@@ -8,15 +8,15 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#ifndef WEBRTC_MODULES_AUDIO_CODING_CODECS_CNG_INCLUDE_AUDIO_ENCODER_CNG_H_
-#define WEBRTC_MODULES_AUDIO_CODING_CODECS_CNG_INCLUDE_AUDIO_ENCODER_CNG_H_
+#ifndef WEBRTC_MODULES_AUDIO_CODING_CODECS_CNG_AUDIO_ENCODER_CNG_H_
+#define WEBRTC_MODULES_AUDIO_CODING_CODECS_CNG_AUDIO_ENCODER_CNG_H_
#include <vector>
#include "webrtc/base/scoped_ptr.h"
#include "webrtc/common_audio/vad/include/vad.h"
#include "webrtc/modules/audio_coding/codecs/audio_encoder.h"
-#include "webrtc/modules/audio_coding/codecs/cng/include/webrtc_cng.h"
+#include "webrtc/modules/audio_coding/codecs/cng/webrtc_cng.h"
namespace webrtc {
@@ -32,7 +32,7 @@ class AudioEncoderCng final : public AudioEncoder {
struct Config {
bool IsOk() const;
- int num_channels = 1;
+ size_t num_channels = 1;
int payload_type = 13;
// Caller keeps ownership of the AudioEncoder object.
AudioEncoder* speech_encoder = nullptr;
@@ -51,13 +51,13 @@ class AudioEncoderCng final : public AudioEncoder {
size_t MaxEncodedBytes() const override;
int SampleRateHz() const override;
- int NumChannels() const override;
+ size_t NumChannels() const override;
int RtpTimestampRateHz() const override;
size_t Num10MsFramesInNextPacket() const override;
size_t Max10MsFramesInAPacket() const override;
int GetTargetBitrate() const override;
EncodedInfo EncodeInternal(uint32_t rtp_timestamp,
- const int16_t* audio,
+ rtc::ArrayView<const int16_t> audio,
size_t max_encoded_bytes,
uint8_t* encoded) override;
void Reset() override;
@@ -92,4 +92,4 @@ class AudioEncoderCng final : public AudioEncoder {
} // namespace webrtc
-#endif // WEBRTC_MODULES_AUDIO_CODING_CODECS_CNG_INCLUDE_AUDIO_ENCODER_CNG_H_
+#endif // WEBRTC_MODULES_AUDIO_CODING_CODECS_CNG_AUDIO_ENCODER_CNG_H_
diff --git a/webrtc/modules/audio_coding/codecs/cng/audio_encoder_cng_unittest.cc b/webrtc/modules/audio_coding/codecs/cng/audio_encoder_cng_unittest.cc
index 0b837a0f12..feb3ed1f0a 100644
--- a/webrtc/modules/audio_coding/codecs/cng/audio_encoder_cng_unittest.cc
+++ b/webrtc/modules/audio_coding/codecs/cng/audio_encoder_cng_unittest.cc
@@ -13,7 +13,7 @@
#include "testing/gtest/include/gtest/gtest.h"
#include "webrtc/base/scoped_ptr.h"
#include "webrtc/common_audio/vad/mock/mock_vad.h"
-#include "webrtc/modules/audio_coding/codecs/cng/include/audio_encoder_cng.h"
+#include "webrtc/modules/audio_coding/codecs/cng/audio_encoder_cng.h"
#include "webrtc/modules/audio_coding/codecs/mock/mock_audio_encoder.h"
using ::testing::Return;
@@ -75,8 +75,10 @@ class AudioEncoderCngTest : public ::testing::Test {
void Encode() {
ASSERT_TRUE(cng_) << "Must call CreateCng() first.";
- encoded_info_ = cng_->Encode(timestamp_, audio_, num_audio_samples_10ms_,
- encoded_.size(), &encoded_[0]);
+ encoded_info_ = cng_->Encode(
+ timestamp_,
+ rtc::ArrayView<const int16_t>(audio_, num_audio_samples_10ms_),
+ encoded_.size(), &encoded_[0]);
timestamp_ += static_cast<uint32_t>(num_audio_samples_10ms_);
}
diff --git a/webrtc/modules/audio_coding/codecs/cng/cng.gypi b/webrtc/modules/audio_coding/codecs/cng/cng.gypi
index 78dc41a94f..c020f4740d 100644
--- a/webrtc/modules/audio_coding/codecs/cng/cng.gypi
+++ b/webrtc/modules/audio_coding/codecs/cng/cng.gypi
@@ -15,23 +15,13 @@
'<(webrtc_root)/common_audio/common_audio.gyp:common_audio',
'audio_encoder_interface',
],
- 'include_dirs': [
- 'include',
- '<(webrtc_root)',
- ],
- 'direct_dependent_settings': {
- 'include_dirs': [
- 'include',
- '<(webrtc_root)',
- ],
- },
'sources': [
- 'include/audio_encoder_cng.h',
- 'include/webrtc_cng.h',
'audio_encoder_cng.cc',
- 'webrtc_cng.c',
+ 'audio_encoder_cng.h',
'cng_helpfuns.c',
'cng_helpfuns.h',
+ 'webrtc_cng.c',
+ 'webrtc_cng.h',
],
},
], # targets
diff --git a/webrtc/modules/audio_coding/codecs/cng/include/webrtc_cng.h b/webrtc/modules/audio_coding/codecs/cng/webrtc_cng.h
index 35660c4c3c..64bea1e26f 100644
--- a/webrtc/modules/audio_coding/codecs/cng/include/webrtc_cng.h
+++ b/webrtc/modules/audio_coding/codecs/cng/webrtc_cng.h
@@ -9,8 +9,8 @@
*/
-#ifndef WEBRTC_MODULES_AUDIO_CODING_CODECS_CNG_MAIN_INCLUDE_WEBRTC_CNG_H_
-#define WEBRTC_MODULES_AUDIO_CODING_CODECS_CNG_MAIN_INCLUDE_WEBRTC_CNG_H_
+#ifndef WEBRTC_MODULES_AUDIO_CODING_CODECS_CNG_WEBRTC_CNG_H_
+#define WEBRTC_MODULES_AUDIO_CODING_CODECS_CNG_WEBRTC_CNG_H_
#include <stddef.h>
#include "webrtc/typedefs.h"
@@ -144,7 +144,7 @@ int16_t WebRtcCng_Generate(CNG_dec_inst* cng_inst, int16_t* outData,
* WebRtcCng_GetErrorCodeEnc/Dec(...)
*
* This functions can be used to check the error code of a CNG instance. When
- * a function returns -1 a error code will be set for that instance. The
+ * a function returns -1 a error code will be set for that instance. The
* function below extract the code of the last error that occurred in the
* specified instance.
*
@@ -160,4 +160,4 @@ int16_t WebRtcCng_GetErrorCodeDec(CNG_dec_inst* cng_inst);
}
#endif
-#endif // WEBRTC_MODULES_AUDIO_CODING_CODECS_CNG_MAIN_INCLUDE_WEBRTC_CNG_H_
+#endif // WEBRTC_MODULES_AUDIO_CODING_CODECS_CNG_WEBRTC_CNG_H_
diff --git a/webrtc/modules/audio_coding/codecs/g711/audio_decoder_pcm.cc b/webrtc/modules/audio_coding/codecs/g711/audio_decoder_pcm.cc
index 12306d9167..9757b4a010 100644
--- a/webrtc/modules/audio_coding/codecs/g711/audio_decoder_pcm.cc
+++ b/webrtc/modules/audio_coding/codecs/g711/audio_decoder_pcm.cc
@@ -8,9 +8,9 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#include "webrtc/modules/audio_coding/codecs/g711/include/audio_decoder_pcm.h"
+#include "webrtc/modules/audio_coding/codecs/g711/audio_decoder_pcm.h"
-#include "webrtc/modules/audio_coding/codecs/g711/include/g711_interface.h"
+#include "webrtc/modules/audio_coding/codecs/g711/g711_interface.h"
namespace webrtc {
diff --git a/webrtc/modules/audio_coding/codecs/g711/include/audio_decoder_pcm.h b/webrtc/modules/audio_coding/codecs/g711/audio_decoder_pcm.h
index 7bc37d3b7a..9dc3a6fd7a 100644
--- a/webrtc/modules/audio_coding/codecs/g711/include/audio_decoder_pcm.h
+++ b/webrtc/modules/audio_coding/codecs/g711/audio_decoder_pcm.h
@@ -8,8 +8,8 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#ifndef WEBRTC_MODULES_AUDIO_CODING_CODECS_G711_INCLUDE_AUDIO_DECODER_PCM_H_
-#define WEBRTC_MODULES_AUDIO_CODING_CODECS_G711_INCLUDE_AUDIO_DECODER_PCM_H_
+#ifndef WEBRTC_MODULES_AUDIO_CODING_CODECS_G711_AUDIO_DECODER_PCM_H_
+#define WEBRTC_MODULES_AUDIO_CODING_CODECS_G711_AUDIO_DECODER_PCM_H_
#include "webrtc/base/checks.h"
#include "webrtc/modules/audio_coding/codecs/audio_decoder.h"
@@ -60,4 +60,4 @@ class AudioDecoderPcmA final : public AudioDecoder {
} // namespace webrtc
-#endif // WEBRTC_MODULES_AUDIO_CODING_CODECS_G711_INCLUDE_AUDIO_DECODER_PCM_H_
+#endif // WEBRTC_MODULES_AUDIO_CODING_CODECS_G711_AUDIO_DECODER_PCM_H_
diff --git a/webrtc/modules/audio_coding/codecs/g711/audio_encoder_pcm.cc b/webrtc/modules/audio_coding/codecs/g711/audio_encoder_pcm.cc
index dde3cc6799..ff61db8e8d 100644
--- a/webrtc/modules/audio_coding/codecs/g711/audio_encoder_pcm.cc
+++ b/webrtc/modules/audio_coding/codecs/g711/audio_encoder_pcm.cc
@@ -8,27 +8,18 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#include "webrtc/modules/audio_coding/codecs/g711/include/audio_encoder_pcm.h"
+#include "webrtc/modules/audio_coding/codecs/g711/audio_encoder_pcm.h"
#include <limits>
#include "webrtc/base/checks.h"
#include "webrtc/common_types.h"
-#include "webrtc/modules/audio_coding/codecs/g711/include/g711_interface.h"
+#include "webrtc/modules/audio_coding/codecs/g711/g711_interface.h"
namespace webrtc {
namespace {
-int16_t NumSamplesPerFrame(int num_channels,
- int frame_size_ms,
- int sample_rate_hz) {
- int samples_per_frame = num_channels * frame_size_ms * sample_rate_hz / 1000;
- RTC_CHECK_LE(samples_per_frame, std::numeric_limits<int16_t>::max())
- << "Frame size too large.";
- return static_cast<int16_t>(samples_per_frame);
-}
-
template <typename T>
typename T::Config CreateConfig(const CodecInst& codec_inst) {
typename T::Config config;
@@ -50,9 +41,8 @@ AudioEncoderPcm::AudioEncoderPcm(const Config& config, int sample_rate_hz)
payload_type_(config.payload_type),
num_10ms_frames_per_packet_(
static_cast<size_t>(config.frame_size_ms / 10)),
- full_frame_samples_(NumSamplesPerFrame(config.num_channels,
- config.frame_size_ms,
- sample_rate_hz_)),
+ full_frame_samples_(
+ config.num_channels * config.frame_size_ms * sample_rate_hz / 1000),
first_timestamp_in_buffer_(0) {
RTC_CHECK_GT(sample_rate_hz, 0) << "Sample rate must be larger than 0 Hz";
RTC_CHECK_EQ(config.frame_size_ms % 10, 0)
@@ -70,7 +60,7 @@ int AudioEncoderPcm::SampleRateHz() const {
return sample_rate_hz_;
}
-int AudioEncoderPcm::NumChannels() const {
+size_t AudioEncoderPcm::NumChannels() const {
return num_channels_;
}
@@ -83,21 +73,19 @@ size_t AudioEncoderPcm::Max10MsFramesInAPacket() const {
}
int AudioEncoderPcm::GetTargetBitrate() const {
- return 8 * BytesPerSample() * SampleRateHz() * NumChannels();
+ return static_cast<int>(
+ 8 * BytesPerSample() * SampleRateHz() * NumChannels());
}
AudioEncoder::EncodedInfo AudioEncoderPcm::EncodeInternal(
uint32_t rtp_timestamp,
- const int16_t* audio,
+ rtc::ArrayView<const int16_t> audio,
size_t max_encoded_bytes,
uint8_t* encoded) {
- const int num_samples = SampleRateHz() / 100 * NumChannels();
if (speech_buffer_.empty()) {
first_timestamp_in_buffer_ = rtp_timestamp;
}
- for (int i = 0; i < num_samples; ++i) {
- speech_buffer_.push_back(audio[i]);
- }
+ speech_buffer_.insert(speech_buffer_.end(), audio.begin(), audio.end());
if (speech_buffer_.size() < full_frame_samples_) {
return EncodedInfo();
}
@@ -125,7 +113,7 @@ size_t AudioEncoderPcmA::EncodeCall(const int16_t* audio,
return WebRtcG711_EncodeA(audio, input_len, encoded);
}
-int AudioEncoderPcmA::BytesPerSample() const {
+size_t AudioEncoderPcmA::BytesPerSample() const {
return 1;
}
@@ -138,7 +126,7 @@ size_t AudioEncoderPcmU::EncodeCall(const int16_t* audio,
return WebRtcG711_EncodeU(audio, input_len, encoded);
}
-int AudioEncoderPcmU::BytesPerSample() const {
+size_t AudioEncoderPcmU::BytesPerSample() const {
return 1;
}
diff --git a/webrtc/modules/audio_coding/codecs/g711/include/audio_encoder_pcm.h b/webrtc/modules/audio_coding/codecs/g711/audio_encoder_pcm.h
index e532f9b1bc..b839488628 100644
--- a/webrtc/modules/audio_coding/codecs/g711/include/audio_encoder_pcm.h
+++ b/webrtc/modules/audio_coding/codecs/g711/audio_encoder_pcm.h
@@ -8,8 +8,8 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#ifndef WEBRTC_MODULES_AUDIO_CODING_CODECS_G711_INCLUDE_AUDIO_ENCODER_PCM_H_
-#define WEBRTC_MODULES_AUDIO_CODING_CODECS_G711_INCLUDE_AUDIO_ENCODER_PCM_H_
+#ifndef WEBRTC_MODULES_AUDIO_CODING_CODECS_G711_AUDIO_ENCODER_PCM_H_
+#define WEBRTC_MODULES_AUDIO_CODING_CODECS_G711_AUDIO_ENCODER_PCM_H_
#include <vector>
@@ -25,7 +25,7 @@ class AudioEncoderPcm : public AudioEncoder {
bool IsOk() const;
int frame_size_ms;
- int num_channels;
+ size_t num_channels;
int payload_type;
protected:
@@ -37,12 +37,12 @@ class AudioEncoderPcm : public AudioEncoder {
size_t MaxEncodedBytes() const override;
int SampleRateHz() const override;
- int NumChannels() const override;
+ size_t NumChannels() const override;
size_t Num10MsFramesInNextPacket() const override;
size_t Max10MsFramesInAPacket() const override;
int GetTargetBitrate() const override;
EncodedInfo EncodeInternal(uint32_t rtp_timestamp,
- const int16_t* audio,
+ rtc::ArrayView<const int16_t> audio,
size_t max_encoded_bytes,
uint8_t* encoded) override;
void Reset() override;
@@ -54,11 +54,11 @@ class AudioEncoderPcm : public AudioEncoder {
size_t input_len,
uint8_t* encoded) = 0;
- virtual int BytesPerSample() const = 0;
+ virtual size_t BytesPerSample() const = 0;
private:
const int sample_rate_hz_;
- const int num_channels_;
+ const size_t num_channels_;
const int payload_type_;
const size_t num_10ms_frames_per_packet_;
const size_t full_frame_samples_;
@@ -83,7 +83,7 @@ class AudioEncoderPcmA final : public AudioEncoderPcm {
size_t input_len,
uint8_t* encoded) override;
- int BytesPerSample() const override;
+ size_t BytesPerSample() const override;
private:
static const int kSampleRateHz = 8000;
@@ -105,7 +105,7 @@ class AudioEncoderPcmU final : public AudioEncoderPcm {
size_t input_len,
uint8_t* encoded) override;
- int BytesPerSample() const override;
+ size_t BytesPerSample() const override;
private:
static const int kSampleRateHz = 8000;
@@ -114,4 +114,4 @@ class AudioEncoderPcmU final : public AudioEncoderPcm {
} // namespace webrtc
-#endif // WEBRTC_MODULES_AUDIO_CODING_CODECS_G711_INCLUDE_AUDIO_ENCODER_PCM_H_
+#endif // WEBRTC_MODULES_AUDIO_CODING_CODECS_G711_AUDIO_ENCODER_PCM_H_
diff --git a/webrtc/modules/audio_coding/codecs/g711/g711.gypi b/webrtc/modules/audio_coding/codecs/g711/g711.gypi
index d35d7874e7..4b902809ea 100644
--- a/webrtc/modules/audio_coding/codecs/g711/g711.gypi
+++ b/webrtc/modules/audio_coding/codecs/g711/g711.gypi
@@ -14,25 +14,15 @@
'dependencies': [
'audio_encoder_interface',
],
- 'include_dirs': [
- 'include',
- '<(webrtc_root)',
- ],
- 'direct_dependent_settings': {
- 'include_dirs': [
- 'include',
- '<(webrtc_root)',
- ],
- },
'sources': [
- 'include/g711_interface.h',
- 'include/audio_decoder_pcm.h',
- 'include/audio_encoder_pcm.h',
+ 'audio_decoder_pcm.cc',
+ 'audio_decoder_pcm.h',
+ 'audio_encoder_pcm.cc',
+ 'audio_encoder_pcm.h',
'g711_interface.c',
+ 'g711_interface.h',
'g711.c',
'g711.h',
- 'audio_decoder_pcm.cc',
- 'audio_encoder_pcm.cc',
],
},
], # targets
diff --git a/webrtc/modules/audio_coding/codecs/g711/include/g711_interface.h b/webrtc/modules/audio_coding/codecs/g711/g711_interface.h
index f9867f4504..00854bbb2c 100644
--- a/webrtc/modules/audio_coding/codecs/g711/include/g711_interface.h
+++ b/webrtc/modules/audio_coding/codecs/g711/g711_interface.h
@@ -8,8 +8,8 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#ifndef MODULES_AUDIO_CODING_CODECS_G711_MAIN_INCLUDE_G711_INTERFACE_H_
-#define MODULES_AUDIO_CODING_CODECS_G711_MAIN_INCLUDE_G711_INTERFACE_H_
+#ifndef WEBRTC_MODULES_AUDIO_CODING_CODECS_G711_G711_INTERFACE_H_
+#define WEBRTC_MODULES_AUDIO_CODING_CODECS_G711_G711_INTERFACE_H_
#include "webrtc/typedefs.h"
@@ -132,4 +132,4 @@ int16_t WebRtcG711_Version(char* version, int16_t lenBytes);
}
#endif
-#endif /* MODULES_AUDIO_CODING_CODECS_G711_MAIN_INCLUDE_G711_INCLUDE_H_ */
+#endif // WEBRTC_MODULES_AUDIO_CODING_CODECS_G711_G711_INTERFACE_H_
diff --git a/webrtc/modules/audio_coding/codecs/g711/test/testG711.cc b/webrtc/modules/audio_coding/codecs/g711/test/testG711.cc
index 94248f7a66..5675b1f8b0 100644
--- a/webrtc/modules/audio_coding/codecs/g711/test/testG711.cc
+++ b/webrtc/modules/audio_coding/codecs/g711/test/testG711.cc
@@ -17,7 +17,7 @@
#include <string.h>
/* include API */
-#include "g711_interface.h"
+#include "webrtc/modules/audio_coding/codecs/g711/g711_interface.h"
/* Runtime statistics */
#include <time.h>
diff --git a/webrtc/modules/audio_coding/codecs/g722/audio_decoder_g722.cc b/webrtc/modules/audio_coding/codecs/g722/audio_decoder_g722.cc
index 55ebe7a315..7676e90d9e 100644
--- a/webrtc/modules/audio_coding/codecs/g722/audio_decoder_g722.cc
+++ b/webrtc/modules/audio_coding/codecs/g722/audio_decoder_g722.cc
@@ -8,12 +8,12 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#include "webrtc/modules/audio_coding/codecs/g722/include/audio_decoder_g722.h"
+#include "webrtc/modules/audio_coding/codecs/g722/audio_decoder_g722.h"
#include <string.h>
#include "webrtc/base/checks.h"
-#include "webrtc/modules/audio_coding/codecs/g722/include/g722_interface.h"
+#include "webrtc/modules/audio_coding/codecs/g722/g722_interface.h"
namespace webrtc {
diff --git a/webrtc/modules/audio_coding/codecs/g722/include/audio_decoder_g722.h b/webrtc/modules/audio_coding/codecs/g722/audio_decoder_g722.h
index b9fa68fc48..7cc2ea9877 100644
--- a/webrtc/modules/audio_coding/codecs/g722/include/audio_decoder_g722.h
+++ b/webrtc/modules/audio_coding/codecs/g722/audio_decoder_g722.h
@@ -8,8 +8,8 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#ifndef WEBRTC_MODULES_AUDIO_CODING_CODECS_G722_INCLUDE_AUDIO_DECODER_G722_H_
-#define WEBRTC_MODULES_AUDIO_CODING_CODECS_G722_INCLUDE_AUDIO_DECODER_G722_H_
+#ifndef WEBRTC_MODULES_AUDIO_CODING_CODECS_G722_AUDIO_DECODER_G722_H_
+#define WEBRTC_MODULES_AUDIO_CODING_CODECS_G722_AUDIO_DECODER_G722_H_
#include "webrtc/modules/audio_coding/codecs/audio_decoder.h"
@@ -69,4 +69,4 @@ class AudioDecoderG722Stereo final : public AudioDecoder {
} // namespace webrtc
-#endif // WEBRTC_MODULES_AUDIO_CODING_CODECS_G722_INCLUDE_AUDIO_DECODER_G722_H_
+#endif // WEBRTC_MODULES_AUDIO_CODING_CODECS_G722_AUDIO_DECODER_G722_H_
diff --git a/webrtc/modules/audio_coding/codecs/g722/audio_encoder_g722.cc b/webrtc/modules/audio_coding/codecs/g722/audio_encoder_g722.cc
index 43b097fa0e..d7203b9da3 100644
--- a/webrtc/modules/audio_coding/codecs/g722/audio_encoder_g722.cc
+++ b/webrtc/modules/audio_coding/codecs/g722/audio_encoder_g722.cc
@@ -8,12 +8,12 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#include "webrtc/modules/audio_coding/codecs/g722/include/audio_encoder_g722.h"
+#include "webrtc/modules/audio_coding/codecs/g722/audio_encoder_g722.h"
#include <limits>
#include "webrtc/base/checks.h"
#include "webrtc/common_types.h"
-#include "webrtc/modules/audio_coding/codecs/g722/include/g722_interface.h"
+#include "webrtc/modules/audio_coding/codecs/g722/g722_interface.h"
namespace webrtc {
@@ -48,7 +48,7 @@ AudioEncoderG722::AudioEncoderG722(const Config& config)
RTC_CHECK(config.IsOk());
const size_t samples_per_channel =
kSampleRateHz / 100 * num_10ms_frames_per_packet_;
- for (int i = 0; i < num_channels_; ++i) {
+ for (size_t i = 0; i < num_channels_; ++i) {
encoders_[i].speech_buffer.reset(new int16_t[samples_per_channel]);
encoders_[i].encoded_buffer.SetSize(samples_per_channel / 2);
}
@@ -68,7 +68,7 @@ int AudioEncoderG722::SampleRateHz() const {
return kSampleRateHz;
}
-int AudioEncoderG722::NumChannels() const {
+size_t AudioEncoderG722::NumChannels() const {
return num_channels_;
}
@@ -88,12 +88,12 @@ size_t AudioEncoderG722::Max10MsFramesInAPacket() const {
int AudioEncoderG722::GetTargetBitrate() const {
// 4 bits/sample, 16000 samples/s/channel.
- return 64000 * NumChannels();
+ return static_cast<int>(64000 * NumChannels());
}
AudioEncoder::EncodedInfo AudioEncoderG722::EncodeInternal(
uint32_t rtp_timestamp,
- const int16_t* audio,
+ rtc::ArrayView<const int16_t> audio,
size_t max_encoded_bytes,
uint8_t* encoded) {
RTC_CHECK_GE(max_encoded_bytes, MaxEncodedBytes());
@@ -104,7 +104,7 @@ AudioEncoder::EncodedInfo AudioEncoderG722::EncodeInternal(
// Deinterleave samples and save them in each channel's buffer.
const size_t start = kSampleRateHz / 100 * num_10ms_frames_buffered_;
for (size_t i = 0; i < kSampleRateHz / 100; ++i)
- for (int j = 0; j < num_channels_; ++j)
+ for (size_t j = 0; j < num_channels_; ++j)
encoders_[j].speech_buffer[start + i] = audio[i * num_channels_ + j];
// If we don't yet have enough samples for a packet, we're done for now.
@@ -116,7 +116,7 @@ AudioEncoder::EncodedInfo AudioEncoderG722::EncodeInternal(
RTC_CHECK_EQ(num_10ms_frames_buffered_, num_10ms_frames_per_packet_);
num_10ms_frames_buffered_ = 0;
const size_t samples_per_channel = SamplesPerChannel();
- for (int i = 0; i < num_channels_; ++i) {
+ for (size_t i = 0; i < num_channels_; ++i) {
const size_t encoded = WebRtcG722_Encode(
encoders_[i].encoder, encoders_[i].speech_buffer.get(),
samples_per_channel, encoders_[i].encoded_buffer.data());
@@ -127,12 +127,12 @@ AudioEncoder::EncodedInfo AudioEncoderG722::EncodeInternal(
// channel and the interleaved stream encodes two samples per byte, most
// significant half first.
for (size_t i = 0; i < samples_per_channel / 2; ++i) {
- for (int j = 0; j < num_channels_; ++j) {
+ for (size_t j = 0; j < num_channels_; ++j) {
uint8_t two_samples = encoders_[j].encoded_buffer.data()[i];
interleave_buffer_.data()[j] = two_samples >> 4;
interleave_buffer_.data()[num_channels_ + j] = two_samples & 0xf;
}
- for (int j = 0; j < num_channels_; ++j)
+ for (size_t j = 0; j < num_channels_; ++j)
encoded[i * num_channels_ + j] = interleave_buffer_.data()[2 * j] << 4 |
interleave_buffer_.data()[2 * j + 1];
}
@@ -145,7 +145,7 @@ AudioEncoder::EncodedInfo AudioEncoderG722::EncodeInternal(
void AudioEncoderG722::Reset() {
num_10ms_frames_buffered_ = 0;
- for (int i = 0; i < num_channels_; ++i)
+ for (size_t i = 0; i < num_channels_; ++i)
RTC_CHECK_EQ(0, WebRtcG722_EncoderInit(encoders_[i].encoder));
}
diff --git a/webrtc/modules/audio_coding/codecs/g722/include/audio_encoder_g722.h b/webrtc/modules/audio_coding/codecs/g722/audio_encoder_g722.h
index 12495c5f48..07d767e778 100644
--- a/webrtc/modules/audio_coding/codecs/g722/include/audio_encoder_g722.h
+++ b/webrtc/modules/audio_coding/codecs/g722/audio_encoder_g722.h
@@ -8,13 +8,13 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#ifndef WEBRTC_MODULES_AUDIO_CODING_CODECS_G722_INCLUDE_AUDIO_ENCODER_G722_H_
-#define WEBRTC_MODULES_AUDIO_CODING_CODECS_G722_INCLUDE_AUDIO_ENCODER_G722_H_
+#ifndef WEBRTC_MODULES_AUDIO_CODING_CODECS_G722_AUDIO_ENCODER_G722_H_
+#define WEBRTC_MODULES_AUDIO_CODING_CODECS_G722_AUDIO_ENCODER_G722_H_
#include "webrtc/base/buffer.h"
#include "webrtc/base/scoped_ptr.h"
#include "webrtc/modules/audio_coding/codecs/audio_encoder.h"
-#include "webrtc/modules/audio_coding/codecs/g722/include/g722_interface.h"
+#include "webrtc/modules/audio_coding/codecs/g722/g722_interface.h"
namespace webrtc {
@@ -27,7 +27,7 @@ class AudioEncoderG722 final : public AudioEncoder {
int payload_type = 9;
int frame_size_ms = 20;
- int num_channels = 1;
+ size_t num_channels = 1;
};
explicit AudioEncoderG722(const Config& config);
@@ -36,13 +36,13 @@ class AudioEncoderG722 final : public AudioEncoder {
size_t MaxEncodedBytes() const override;
int SampleRateHz() const override;
- int NumChannels() const override;
+ size_t NumChannels() const override;
int RtpTimestampRateHz() const override;
size_t Num10MsFramesInNextPacket() const override;
size_t Max10MsFramesInAPacket() const override;
int GetTargetBitrate() const override;
EncodedInfo EncodeInternal(uint32_t rtp_timestamp,
- const int16_t* audio,
+ rtc::ArrayView<const int16_t> audio,
size_t max_encoded_bytes,
uint8_t* encoded) override;
void Reset() override;
@@ -59,7 +59,7 @@ class AudioEncoderG722 final : public AudioEncoder {
size_t SamplesPerChannel() const;
- const int num_channels_;
+ const size_t num_channels_;
const int payload_type_;
const size_t num_10ms_frames_per_packet_;
size_t num_10ms_frames_buffered_;
@@ -70,4 +70,4 @@ class AudioEncoderG722 final : public AudioEncoder {
};
} // namespace webrtc
-#endif // WEBRTC_MODULES_AUDIO_CODING_CODECS_G722_INCLUDE_AUDIO_ENCODER_G722_H_
+#endif // WEBRTC_MODULES_AUDIO_CODING_CODECS_G722_AUDIO_ENCODER_G722_H_
diff --git a/webrtc/modules/audio_coding/codecs/g722/g722.gypi b/webrtc/modules/audio_coding/codecs/g722/g722.gypi
index aad11e3685..756fabe345 100644
--- a/webrtc/modules/audio_coding/codecs/g722/g722.gypi
+++ b/webrtc/modules/audio_coding/codecs/g722/g722.gypi
@@ -13,26 +13,16 @@
'dependencies': [
'audio_encoder_interface',
],
- 'include_dirs': [
- 'include',
- '<(webrtc_root)',
- ],
- 'direct_dependent_settings': {
- 'include_dirs': [
- 'include',
- '<(webrtc_root)',
- ],
- },
'sources': [
'audio_decoder_g722.cc',
+ 'audio_decoder_g722.h',
'audio_encoder_g722.cc',
- 'include/audio_decoder_g722.h',
- 'include/audio_encoder_g722.h',
- 'include/g722_interface.h',
+ 'audio_encoder_g722.h',
'g722_interface.c',
- 'g722_encode.c',
+ 'g722_interface.h',
'g722_decode.c',
'g722_enc_dec.h',
+ 'g722_encode.c',
],
},
], # targets
diff --git a/webrtc/modules/audio_coding/codecs/g722/include/g722_interface.h b/webrtc/modules/audio_coding/codecs/g722/g722_interface.h
index 5a46ef2ad5..b411ef0e8e 100644
--- a/webrtc/modules/audio_coding/codecs/g722/include/g722_interface.h
+++ b/webrtc/modules/audio_coding/codecs/g722/g722_interface.h
@@ -8,8 +8,8 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#ifndef MODULES_AUDIO_CODING_CODECS_G722_MAIN_INCLUDE_G722_INTERFACE_H_
-#define MODULES_AUDIO_CODING_CODECS_G722_MAIN_INCLUDE_G722_INTERFACE_H_
+#ifndef WEBRTC_MODULES_AUDIO_CODING_CODECS_G722_G722_INTERFACE_H_
+#define WEBRTC_MODULES_AUDIO_CODING_CODECS_G722_G722_INTERFACE_H_
#include "webrtc/typedefs.h"
@@ -179,4 +179,4 @@ int16_t WebRtcG722_Version(char *versionStr, short len);
#endif
-#endif /* MODULES_AUDIO_CODING_CODECS_G722_MAIN_INCLUDE_G722_INCLUDE_H_ */
+#endif /* WEBRTC_MODULES_AUDIO_CODING_CODECS_G722_G722_INTERFACE_H_ */
diff --git a/webrtc/modules/audio_coding/codecs/g722/test/testG722.cc b/webrtc/modules/audio_coding/codecs/g722/test/testG722.cc
index b473c138c6..c55a2eb357 100644
--- a/webrtc/modules/audio_coding/codecs/g722/test/testG722.cc
+++ b/webrtc/modules/audio_coding/codecs/g722/test/testG722.cc
@@ -18,7 +18,7 @@
#include "webrtc/typedefs.h"
/* include API */
-#include "g722_interface.h"
+#include "webrtc/modules/audio_coding/codecs/g722/g722_interface.h"
/* Runtime statistics */
#include <time.h>
diff --git a/webrtc/modules/audio_coding/codecs/ilbc/audio_decoder_ilbc.cc b/webrtc/modules/audio_coding/codecs/ilbc/audio_decoder_ilbc.cc
index ba6284f33d..9ae0e1a95e 100644
--- a/webrtc/modules/audio_coding/codecs/ilbc/audio_decoder_ilbc.cc
+++ b/webrtc/modules/audio_coding/codecs/ilbc/audio_decoder_ilbc.cc
@@ -8,10 +8,10 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#include "webrtc/modules/audio_coding/codecs/ilbc/include/audio_decoder_ilbc.h"
+#include "webrtc/modules/audio_coding/codecs/ilbc/audio_decoder_ilbc.h"
#include "webrtc/base/checks.h"
-#include "webrtc/modules/audio_coding/codecs/ilbc/include/ilbc.h"
+#include "webrtc/modules/audio_coding/codecs/ilbc/ilbc.h"
namespace webrtc {
diff --git a/webrtc/modules/audio_coding/codecs/ilbc/include/audio_decoder_ilbc.h b/webrtc/modules/audio_coding/codecs/ilbc/audio_decoder_ilbc.h
index fd52da7986..e890635da0 100644
--- a/webrtc/modules/audio_coding/codecs/ilbc/include/audio_decoder_ilbc.h
+++ b/webrtc/modules/audio_coding/codecs/ilbc/audio_decoder_ilbc.h
@@ -8,8 +8,8 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#ifndef WEBRTC_MODULES_AUDIO_CODING_CODECS_ILBC_INCLUDE_AUDIO_DECODER_ILBC_H_
-#define WEBRTC_MODULES_AUDIO_CODING_CODECS_ILBC_INCLUDE_AUDIO_DECODER_ILBC_H_
+#ifndef WEBRTC_MODULES_AUDIO_CODING_CODECS_ILBC_AUDIO_DECODER_ILBC_H_
+#define WEBRTC_MODULES_AUDIO_CODING_CODECS_ILBC_AUDIO_DECODER_ILBC_H_
#include "webrtc/modules/audio_coding/codecs/audio_decoder.h"
@@ -39,4 +39,4 @@ class AudioDecoderIlbc final : public AudioDecoder {
};
} // namespace webrtc
-#endif // WEBRTC_MODULES_AUDIO_CODING_CODECS_ILBC_INCLUDE_AUDIO_DECODER_ILBC_H_
+#endif // WEBRTC_MODULES_AUDIO_CODING_CODECS_ILBC_AUDIO_DECODER_ILBC_H_
diff --git a/webrtc/modules/audio_coding/codecs/ilbc/audio_encoder_ilbc.cc b/webrtc/modules/audio_coding/codecs/ilbc/audio_encoder_ilbc.cc
index 065dc06817..ddd6dde31c 100644
--- a/webrtc/modules/audio_coding/codecs/ilbc/audio_encoder_ilbc.cc
+++ b/webrtc/modules/audio_coding/codecs/ilbc/audio_encoder_ilbc.cc
@@ -8,13 +8,13 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#include "webrtc/modules/audio_coding/codecs/ilbc/include/audio_encoder_ilbc.h"
+#include "webrtc/modules/audio_coding/codecs/ilbc/audio_encoder_ilbc.h"
-#include <cstring>
+#include <algorithm>
#include <limits>
#include "webrtc/base/checks.h"
#include "webrtc/common_types.h"
-#include "webrtc/modules/audio_coding/codecs/ilbc/include/ilbc.h"
+#include "webrtc/modules/audio_coding/codecs/ilbc/ilbc.h"
namespace webrtc {
@@ -64,7 +64,7 @@ int AudioEncoderIlbc::SampleRateHz() const {
return kSampleRateHz;
}
-int AudioEncoderIlbc::NumChannels() const {
+size_t AudioEncoderIlbc::NumChannels() const {
return 1;
}
@@ -91,7 +91,7 @@ int AudioEncoderIlbc::GetTargetBitrate() const {
AudioEncoder::EncodedInfo AudioEncoderIlbc::EncodeInternal(
uint32_t rtp_timestamp,
- const int16_t* audio,
+ rtc::ArrayView<const int16_t> audio,
size_t max_encoded_bytes,
uint8_t* encoded) {
RTC_DCHECK_GE(max_encoded_bytes, RequiredOutputSizeBytes());
@@ -101,9 +101,9 @@ AudioEncoder::EncodedInfo AudioEncoderIlbc::EncodeInternal(
first_timestamp_in_buffer_ = rtp_timestamp;
// Buffer input.
- std::memcpy(input_buffer_ + kSampleRateHz / 100 * num_10ms_frames_buffered_,
- audio,
- kSampleRateHz / 100 * sizeof(audio[0]));
+ RTC_DCHECK_EQ(static_cast<size_t>(kSampleRateHz / 100), audio.size());
+ std::copy(audio.cbegin(), audio.cend(),
+ input_buffer_ + kSampleRateHz / 100 * num_10ms_frames_buffered_);
// If we don't yet have enough buffered input for a whole packet, we're done
// for now.
diff --git a/webrtc/modules/audio_coding/codecs/ilbc/include/audio_encoder_ilbc.h b/webrtc/modules/audio_coding/codecs/ilbc/audio_encoder_ilbc.h
index 2bb3101fd4..102a274642 100644
--- a/webrtc/modules/audio_coding/codecs/ilbc/include/audio_encoder_ilbc.h
+++ b/webrtc/modules/audio_coding/codecs/ilbc/audio_encoder_ilbc.h
@@ -8,12 +8,12 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#ifndef WEBRTC_MODULES_AUDIO_CODING_CODECS_ILBC_INCLUDE_AUDIO_ENCODER_ILBC_H_
-#define WEBRTC_MODULES_AUDIO_CODING_CODECS_ILBC_INCLUDE_AUDIO_ENCODER_ILBC_H_
+#ifndef WEBRTC_MODULES_AUDIO_CODING_CODECS_ILBC_AUDIO_ENCODER_ILBC_H_
+#define WEBRTC_MODULES_AUDIO_CODING_CODECS_ILBC_AUDIO_ENCODER_ILBC_H_
#include "webrtc/base/scoped_ptr.h"
#include "webrtc/modules/audio_coding/codecs/audio_encoder.h"
-#include "webrtc/modules/audio_coding/codecs/ilbc/include/ilbc.h"
+#include "webrtc/modules/audio_coding/codecs/ilbc/ilbc.h"
namespace webrtc {
@@ -36,12 +36,12 @@ class AudioEncoderIlbc final : public AudioEncoder {
size_t MaxEncodedBytes() const override;
int SampleRateHz() const override;
- int NumChannels() const override;
+ size_t NumChannels() const override;
size_t Num10MsFramesInNextPacket() const override;
size_t Max10MsFramesInAPacket() const override;
int GetTargetBitrate() const override;
EncodedInfo EncodeInternal(uint32_t rtp_timestamp,
- const int16_t* audio,
+ rtc::ArrayView<const int16_t> audio,
size_t max_encoded_bytes,
uint8_t* encoded) override;
void Reset() override;
@@ -60,4 +60,4 @@ class AudioEncoderIlbc final : public AudioEncoder {
};
} // namespace webrtc
-#endif // WEBRTC_MODULES_AUDIO_CODING_CODECS_ILBC_INCLUDE_AUDIO_ENCODER_ILBC_H_
+#endif // WEBRTC_MODULES_AUDIO_CODING_CODECS_ILBC_AUDIO_ENCODER_ILBC_H_
diff --git a/webrtc/modules/audio_coding/codecs/ilbc/ilbc.gypi b/webrtc/modules/audio_coding/codecs/ilbc/ilbc.gypi
index ac9f2e7b39..ffb0574588 100644
--- a/webrtc/modules/audio_coding/codecs/ilbc/ilbc.gypi
+++ b/webrtc/modules/audio_coding/codecs/ilbc/ilbc.gypi
@@ -15,24 +15,13 @@
'<(webrtc_root)/common_audio/common_audio.gyp:common_audio',
'audio_encoder_interface',
],
- 'include_dirs': [
- 'include',
- '<(webrtc_root)',
- ],
- 'direct_dependent_settings': {
- 'include_dirs': [
- 'include',
- '<(webrtc_root)',
- ],
- },
'sources': [
- 'include/audio_decoder_ilbc.h',
- 'include/audio_encoder_ilbc.h',
- 'include/ilbc.h',
'abs_quant.c',
'abs_quant_loop.c',
'audio_decoder_ilbc.cc',
+ 'audio_decoder_ilbc.h',
'audio_encoder_ilbc.cc',
+ 'audio_encoder_ilbc.h',
'augmented_cb_corr.c',
'bw_expand.c',
'cb_construct.c',
@@ -65,6 +54,7 @@
'hp_input.c',
'hp_output.c',
'ilbc.c',
+ 'ilbc.h',
'index_conv_dec.c',
'index_conv_enc.c',
'init_decode.c',
diff --git a/webrtc/modules/audio_coding/codecs/ilbc/include/ilbc.h b/webrtc/modules/audio_coding/codecs/ilbc/ilbc.h
index 3be9142c8c..c021f5be52 100644
--- a/webrtc/modules/audio_coding/codecs/ilbc/include/ilbc.h
+++ b/webrtc/modules/audio_coding/codecs/ilbc/ilbc.h
@@ -15,8 +15,8 @@
*
*/
-#ifndef WEBRTC_MODULES_AUDIO_CODING_CODECS_ILBC_INCLUDE_ILBC_H_
-#define WEBRTC_MODULES_AUDIO_CODING_CODECS_ILBC_INCLUDE_ILBC_H_
+#ifndef WEBRTC_MODULES_AUDIO_CODING_CODECS_ILBC_ILBC_H_
+#define WEBRTC_MODULES_AUDIO_CODING_CODECS_ILBC_ILBC_H_
#include <stddef.h>
@@ -53,10 +53,10 @@ extern "C" {
* memory location
*
* Input:
- * - XXX_xxxinst : Pointer to created instance that should be
- * assigned
- * - ILBCXXX_inst_Addr : Pointer to the desired memory space
- * - size : The size that this structure occupies (in Word16)
+ * - XXX_xxxinst : Pointer to created instance that should be
+ * assigned
+ * - ILBCXXX_inst_Addr : Pointer to the desired memory space
+ * - size : The size that this structure occupies (in Word16)
*
* Return value : 0 - Ok
* -1 - Error
@@ -76,10 +76,10 @@ extern "C" {
* These functions create a instance to the specified structure
*
* Input:
- * - XXX_inst : Pointer to created instance that should be created
+ * - XXX_inst : Pointer to created instance that should be created
*
- * Return value : 0 - Ok
- * -1 - Error
+ * Return value : 0 - Ok
+ * -1 - Error
*/
int16_t WebRtcIlbcfix_EncoderCreate(IlbcEncoderInstance **iLBC_encinst);
@@ -255,4 +255,4 @@ extern "C" {
}
#endif
-#endif
+#endif // WEBRTC_MODULES_AUDIO_CODING_CODECS_ILBC_ILBC_H_
diff --git a/webrtc/modules/audio_coding/codecs/ilbc/test/iLBC_test.c b/webrtc/modules/audio_coding/codecs/ilbc/test/iLBC_test.c
index 1199c816d8..b440c7a45f 100644
--- a/webrtc/modules/audio_coding/codecs/ilbc/test/iLBC_test.c
+++ b/webrtc/modules/audio_coding/codecs/ilbc/test/iLBC_test.c
@@ -19,7 +19,7 @@
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
-#include "ilbc.h"
+#include "webrtc/modules/audio_coding/codecs/ilbc/ilbc.h"
/*---------------------------------------------------------------*
* Main program to test iLBC encoding and decoding
diff --git a/webrtc/modules/audio_coding/codecs/ilbc/test/iLBC_testLib.c b/webrtc/modules/audio_coding/codecs/ilbc/test/iLBC_testLib.c
index f14192c2ae..7ffa4a7d0e 100644
--- a/webrtc/modules/audio_coding/codecs/ilbc/test/iLBC_testLib.c
+++ b/webrtc/modules/audio_coding/codecs/ilbc/test/iLBC_testLib.c
@@ -21,7 +21,7 @@ iLBC_test.c
#include <stdio.h>
#include <string.h>
#include <time.h>
-#include "ilbc.h"
+#include "webrtc/modules/audio_coding/codecs/ilbc/ilbc.h"
//#define JUNK_DATA
#ifdef JUNK_DATA
diff --git a/webrtc/modules/audio_coding/codecs/ilbc/test/iLBC_testprogram.c b/webrtc/modules/audio_coding/codecs/ilbc/test/iLBC_testprogram.c
index 303ede3e63..5454948287 100644
--- a/webrtc/modules/audio_coding/codecs/ilbc/test/iLBC_testprogram.c
+++ b/webrtc/modules/audio_coding/codecs/ilbc/test/iLBC_testprogram.c
@@ -21,13 +21,13 @@
#include <stdio.h>
#include <string.h>
-#include "defines.h"
-#include "nit_encode.h"
-#include "encode.h"
-#include "init_decode.h"
-#include "decode.h"
-#include "constants.h"
-#include "ilbc.h"
+#include "webrtc/modules/audio_coding/codecs/ilbc/defines.h"
+#include "webrtc/modules/audio_coding/codecs/ilbc/nit_encode.h"
+#include "webrtc/modules/audio_coding/codecs/ilbc/encode.h"
+#include "webrtc/modules/audio_coding/codecs/ilbc/init_decode.h"
+#include "webrtc/modules/audio_coding/codecs/ilbc/decode.h"
+#include "webrtc/modules/audio_coding/codecs/ilbc/constants.h"
+#include "webrtc/modules/audio_coding/codecs/ilbc/ilbc.h"
#define ILBCNOOFWORDS_MAX (NO_OF_BYTES_30MS)/2
diff --git a/webrtc/modules/audio_coding/codecs/isac/audio_encoder_isac_t.h b/webrtc/modules/audio_coding/codecs/isac/audio_encoder_isac_t.h
index b15ad942df..321dac3567 100644
--- a/webrtc/modules/audio_coding/codecs/isac/audio_encoder_isac_t.h
+++ b/webrtc/modules/audio_coding/codecs/isac/audio_encoder_isac_t.h
@@ -56,12 +56,12 @@ class AudioEncoderIsacT final : public AudioEncoder {
size_t MaxEncodedBytes() const override;
int SampleRateHz() const override;
- int NumChannels() const override;
+ size_t NumChannels() const override;
size_t Num10MsFramesInNextPacket() const override;
size_t Max10MsFramesInAPacket() const override;
int GetTargetBitrate() const override;
EncodedInfo EncodeInternal(uint32_t rtp_timestamp,
- const int16_t* audio,
+ rtc::ArrayView<const int16_t> audio,
size_t max_encoded_bytes,
uint8_t* encoded) override;
void Reset() override;
diff --git a/webrtc/modules/audio_coding/codecs/isac/audio_encoder_isac_t_impl.h b/webrtc/modules/audio_coding/codecs/isac/audio_encoder_isac_t_impl.h
index 279f80d6fc..d4438cc775 100644
--- a/webrtc/modules/audio_coding/codecs/isac/audio_encoder_isac_t_impl.h
+++ b/webrtc/modules/audio_coding/codecs/isac/audio_encoder_isac_t_impl.h
@@ -88,7 +88,7 @@ int AudioEncoderIsacT<T>::SampleRateHz() const {
}
template <typename T>
-int AudioEncoderIsacT<T>::NumChannels() const {
+size_t AudioEncoderIsacT<T>::NumChannels() const {
return 1;
}
@@ -115,7 +115,7 @@ int AudioEncoderIsacT<T>::GetTargetBitrate() const {
template <typename T>
AudioEncoder::EncodedInfo AudioEncoderIsacT<T>::EncodeInternal(
uint32_t rtp_timestamp,
- const int16_t* audio,
+ rtc::ArrayView<const int16_t> audio,
size_t max_encoded_bytes,
uint8_t* encoded) {
if (!packet_in_progress_) {
@@ -127,7 +127,7 @@ AudioEncoder::EncodedInfo AudioEncoderIsacT<T>::EncodeInternal(
IsacBandwidthInfo bwinfo = bwinfo_->Get();
T::SetBandwidthInfo(isac_state_, &bwinfo);
}
- int r = T::Encode(isac_state_, audio, encoded);
+ int r = T::Encode(isac_state_, audio.data(), encoded);
RTC_CHECK_GE(r, 0) << "Encode failed (error code "
<< T::GetErrorCode(isac_state_) << ")";
diff --git a/webrtc/modules/audio_coding/codecs/isac/fix/test/isac_speed_test.cc b/webrtc/modules/audio_coding/codecs/isac/fix/test/isac_speed_test.cc
index 632a4fe825..32f36c5261 100644
--- a/webrtc/modules/audio_coding/codecs/isac/fix/test/isac_speed_test.cc
+++ b/webrtc/modules/audio_coding/codecs/isac/fix/test/isac_speed_test.cc
@@ -92,7 +92,7 @@ float IsacSpeedTest::DecodeABlock(const uint8_t* bit_stream,
value = WebRtcIsacfix_Decode(ISACFIX_main_inst_, bit_stream, encoded_bytes,
out_data, &audio_type);
clocks = clock() - clocks;
- EXPECT_EQ(output_length_sample_, value);
+ EXPECT_EQ(output_length_sample_, static_cast<size_t>(value));
return 1000.0 * clocks / CLOCKS_PER_SEC;
}
diff --git a/webrtc/modules/audio_coding/codecs/isac/fix/test/test_iSACfixfloat.c b/webrtc/modules/audio_coding/codecs/isac/fix/test/test_iSACfixfloat.c
index b82af1c059..ac0fa350c9 100644
--- a/webrtc/modules/audio_coding/codecs/isac/fix/test/test_iSACfixfloat.c
+++ b/webrtc/modules/audio_coding/codecs/isac/fix/test/test_iSACfixfloat.c
@@ -112,12 +112,12 @@ int main(int argc, char* argv[]) {
char version_number[20];
int mode = -1, tmp, nbTest = 0; /*,sss;*/
-#ifdef _DEBUG
+#if !defined(NDEBUG)
FILE* fy;
double kbps;
size_t totalbits = 0;
int totalsmpls = 0;
-#endif /* _DEBUG */
+#endif
/* only one structure used for ISAC encoder */
ISAC_MainStruct* ISAC_main_inst;
@@ -126,12 +126,12 @@ int main(int argc, char* argv[]) {
BottleNeckModel BN_data;
f_bn = NULL;
-#ifdef _DEBUG
+#if !defined(NDEBUG)
fy = fopen("bit_rate.dat", "w");
fclose(fy);
fy = fopen("bytes_frames.dat", "w");
fclose(fy);
-#endif /* _DEBUG */
+#endif
// histfile = fopen("histo.dat", "ab");
// ratefile = fopen("rates.dat", "ab");
@@ -589,7 +589,7 @@ int main(int argc, char* argv[]) {
fprintf(stderr, " \rframe = %d", framecnt);
framecnt++;
-#ifdef _DEBUG
+#if !defined(NDEBUG)
totalsmpls += declen;
totalbits += 8 * stream_len;
@@ -598,15 +598,15 @@ int main(int argc, char* argv[]) {
fprintf(fy, "Frame %i = %0.14f\n", framecnt, kbps);
fclose(fy);
-#endif /* _DEBUG */
+#endif
}
-#ifdef _DEBUG
+#if !defined(NDEBUG)
printf("\n\ntotal bits = %" PRIuS " bits", totalbits);
printf("\nmeasured average bitrate = %0.3f kbits/s",
(double)totalbits * (FS / 1000) / totalsmpls);
printf("\n");
-#endif /* _DEBUG */
+#endif
/* Runtime statistics */
runtime = (double)(clock() / (double)CLOCKS_PER_SEC - starttime);
diff --git a/webrtc/modules/audio_coding/codecs/isac/isac_test.gypi b/webrtc/modules/audio_coding/codecs/isac/isac_test.gypi
index 47944b7f42..54cedb4e18 100644
--- a/webrtc/modules/audio_coding/codecs/isac/isac_test.gypi
+++ b/webrtc/modules/audio_coding/codecs/isac/isac_test.gypi
@@ -25,6 +25,19 @@
'./main/test/simpleKenny.c',
'./main/util/utility.c',
],
+ 'conditions': [
+ ['OS=="win" and clang==1', {
+ 'msvs_settings': {
+ 'VCCLCompilerTool': {
+ 'AdditionalOptions': [
+ # Disable warnings failing when compiling with Clang on Windows.
+ # https://bugs.chromium.org/p/webrtc/issues/detail?id=5366
+ '-Wno-format',
+ ],
+ },
+ },
+ }],
+ ], # conditions.
},
# ReleaseTest-API
{
@@ -63,6 +76,5 @@
'./main/util/utility.c',
],
},
-
],
}
diff --git a/webrtc/modules/audio_coding/codecs/isac/isacfix.gypi b/webrtc/modules/audio_coding/codecs/isac/isacfix.gypi
index f10de56c5a..7730d16dc9 100644
--- a/webrtc/modules/audio_coding/codecs/isac/isacfix.gypi
+++ b/webrtc/modules/audio_coding/codecs/isac/isacfix.gypi
@@ -77,11 +77,6 @@
'fix/source/structs.h',
],
'conditions': [
- ['OS!="win"', {
- 'defines': [
- 'WEBRTC_LINUX',
- ],
- }],
['target_arch=="arm" and arm_version>=7', {
'sources': [
'fix/source/lattice_armv7.S',
diff --git a/webrtc/modules/audio_coding/codecs/isac/main/test/ReleaseTest-API/ReleaseTest-API.cc b/webrtc/modules/audio_coding/codecs/isac/main/test/ReleaseTest-API/ReleaseTest-API.cc
index 2e5badd82c..4cef8f7b3b 100644
--- a/webrtc/modules/audio_coding/codecs/isac/main/test/ReleaseTest-API/ReleaseTest-API.cc
+++ b/webrtc/modules/audio_coding/codecs/isac/main/test/ReleaseTest-API/ReleaseTest-API.cc
@@ -73,10 +73,10 @@ int main(int argc, char* argv[]) {
FILE* plFile;
int32_t sendBN;
-#ifdef _DEBUG
+#if !defined(NDEBUG)
FILE* fy;
double kbps;
-#endif /* _DEBUG */
+#endif
size_t totalbits = 0;
int totalsmpls = 0;
@@ -103,12 +103,12 @@ int main(int argc, char* argv[]) {
BottleNeckModel BN_data;
-#ifdef _DEBUG
+#if !defined(NDEBUG)
fy = fopen("bit_rate.dat", "w");
fclose(fy);
fy = fopen("bytes_frames.dat", "w");
fclose(fy);
-#endif /* _DEBUG */
+#endif
/* Handling wrong input arguments in the command line */
if ((argc < 3) || (argc > 17)) {
@@ -885,14 +885,14 @@ int main(int argc, char* argv[]) {
totalsmpls += declen;
totalbits += 8 * stream_len;
-#ifdef _DEBUG
+#if !defined(NDEBUG)
kbps = ((double)sampFreqKHz * 1000.) / ((double)cur_framesmpls) * 8.0 *
stream_len / 1000.0; // kbits/s
fy = fopen("bit_rate.dat", "a");
fprintf(fy, "Frame %i = %0.14f\n", framecnt, kbps);
fclose(fy);
-#endif /* _DEBUG */
+#endif
}
printf("\n");
printf("total bits = %" PRIuS " bits\n", totalbits);
diff --git a/webrtc/modules/audio_coding/codecs/mock/mock_audio_encoder.h b/webrtc/modules/audio_coding/codecs/mock/mock_audio_encoder.h
index 95426d89e1..66adde4be1 100644
--- a/webrtc/modules/audio_coding/codecs/mock/mock_audio_encoder.h
+++ b/webrtc/modules/audio_coding/codecs/mock/mock_audio_encoder.h
@@ -24,7 +24,7 @@ class MockAudioEncoder final : public AudioEncoder {
MOCK_METHOD1(Mark, void(std::string desc));
MOCK_CONST_METHOD0(MaxEncodedBytes, size_t());
MOCK_CONST_METHOD0(SampleRateHz, int());
- MOCK_CONST_METHOD0(NumChannels, int());
+ MOCK_CONST_METHOD0(NumChannels, size_t());
MOCK_CONST_METHOD0(RtpTimestampRateHz, int());
MOCK_CONST_METHOD0(Num10MsFramesInNextPacket, size_t());
MOCK_CONST_METHOD0(Max10MsFramesInAPacket, size_t());
@@ -32,7 +32,7 @@ class MockAudioEncoder final : public AudioEncoder {
// Note, we explicitly chose not to create a mock for the Encode method.
MOCK_METHOD4(EncodeInternal,
EncodedInfo(uint32_t timestamp,
- const int16_t* audio,
+ rtc::ArrayView<const int16_t> audio,
size_t max_encoded_bytes,
uint8_t* encoded));
MOCK_METHOD0(Reset, void());
diff --git a/webrtc/modules/audio_coding/codecs/opus/audio_decoder_opus.cc b/webrtc/modules/audio_coding/codecs/opus/audio_decoder_opus.cc
index d1390e2ca4..f64e811afe 100644
--- a/webrtc/modules/audio_coding/codecs/opus/audio_decoder_opus.cc
+++ b/webrtc/modules/audio_coding/codecs/opus/audio_decoder_opus.cc
@@ -8,7 +8,7 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#include "webrtc/modules/audio_coding/codecs/opus/include/audio_decoder_opus.h"
+#include "webrtc/modules/audio_coding/codecs/opus/audio_decoder_opus.h"
#include "webrtc/base/checks.h"
@@ -17,7 +17,7 @@ namespace webrtc {
AudioDecoderOpus::AudioDecoderOpus(size_t num_channels)
: channels_(num_channels) {
RTC_DCHECK(num_channels == 1 || num_channels == 2);
- WebRtcOpus_DecoderCreate(&dec_state_, static_cast<int>(channels_));
+ WebRtcOpus_DecoderCreate(&dec_state_, channels_);
WebRtcOpus_DecoderInit(dec_state_);
}
diff --git a/webrtc/modules/audio_coding/codecs/opus/include/audio_decoder_opus.h b/webrtc/modules/audio_coding/codecs/opus/audio_decoder_opus.h
index 6b0a88ae97..af32a84512 100644
--- a/webrtc/modules/audio_coding/codecs/opus/include/audio_decoder_opus.h
+++ b/webrtc/modules/audio_coding/codecs/opus/audio_decoder_opus.h
@@ -8,11 +8,11 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#ifndef WEBRTC_MODULES_AUDIO_CODING_CODECS_OPUS_INCLUDE_AUDIO_DECODER_OPUS_H
-#define WEBRTC_MODULES_AUDIO_CODING_CODECS_OPUS_INCLUDE_AUDIO_DECODER_OPUS_H
+#ifndef WEBRTC_MODULES_AUDIO_CODING_CODECS_OPUS_AUDIO_DECODER_OPUS_H_
+#define WEBRTC_MODULES_AUDIO_CODING_CODECS_OPUS_AUDIO_DECODER_OPUS_H_
#include "webrtc/modules/audio_coding/codecs/audio_decoder.h"
-#include "webrtc/modules/audio_coding/codecs/opus/include/opus_interface.h"
+#include "webrtc/modules/audio_coding/codecs/opus/opus_interface.h"
namespace webrtc {
@@ -48,4 +48,4 @@ class AudioDecoderOpus final : public AudioDecoder {
} // namespace webrtc
-#endif // WEBRTC_MODULES_AUDIO_CODING_CODECS_OPUS_INCLUDE_AUDIO_DECODER_OPUS_H
+#endif // WEBRTC_MODULES_AUDIO_CODING_CODECS_OPUS_AUDIO_DECODER_OPUS_H_
diff --git a/webrtc/modules/audio_coding/codecs/opus/audio_encoder_opus.cc b/webrtc/modules/audio_coding/codecs/opus/audio_encoder_opus.cc
index eac7412178..707d6c2488 100644
--- a/webrtc/modules/audio_coding/codecs/opus/audio_encoder_opus.cc
+++ b/webrtc/modules/audio_coding/codecs/opus/audio_encoder_opus.cc
@@ -8,12 +8,12 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#include "webrtc/modules/audio_coding/codecs/opus/include/audio_encoder_opus.h"
+#include "webrtc/modules/audio_coding/codecs/opus/audio_encoder_opus.h"
#include "webrtc/base/checks.h"
#include "webrtc/base/safe_conversions.h"
#include "webrtc/common_types.h"
-#include "webrtc/modules/audio_coding/codecs/opus/include/opus_interface.h"
+#include "webrtc/modules/audio_coding/codecs/opus/opus_interface.h"
namespace webrtc {
@@ -114,7 +114,7 @@ int AudioEncoderOpus::SampleRateHz() const {
return kSampleRateHz;
}
-int AudioEncoderOpus::NumChannels() const {
+size_t AudioEncoderOpus::NumChannels() const {
return config_.num_channels;
}
@@ -132,24 +132,22 @@ int AudioEncoderOpus::GetTargetBitrate() const {
AudioEncoder::EncodedInfo AudioEncoderOpus::EncodeInternal(
uint32_t rtp_timestamp,
- const int16_t* audio,
+ rtc::ArrayView<const int16_t> audio,
size_t max_encoded_bytes,
uint8_t* encoded) {
if (input_buffer_.empty())
first_timestamp_in_buffer_ = rtp_timestamp;
- input_buffer_.insert(input_buffer_.end(), audio,
- audio + SamplesPer10msFrame());
+ RTC_DCHECK_EQ(SamplesPer10msFrame(), audio.size());
+ input_buffer_.insert(input_buffer_.end(), audio.cbegin(), audio.cend());
if (input_buffer_.size() <
- (static_cast<size_t>(Num10msFramesPerPacket()) * SamplesPer10msFrame())) {
+ (Num10msFramesPerPacket() * SamplesPer10msFrame())) {
return EncodedInfo();
}
- RTC_CHECK_EQ(
- input_buffer_.size(),
- static_cast<size_t>(Num10msFramesPerPacket()) * SamplesPer10msFrame());
+ RTC_CHECK_EQ(input_buffer_.size(),
+ Num10msFramesPerPacket() * SamplesPer10msFrame());
int status = WebRtcOpus_Encode(
inst_, &input_buffer_[0],
- rtc::CheckedDivExact(input_buffer_.size(),
- static_cast<size_t>(config_.num_channels)),
+ rtc::CheckedDivExact(input_buffer_.size(), config_.num_channels),
rtc::saturated_cast<int16_t>(max_encoded_bytes), encoded);
RTC_CHECK_GE(status, 0); // Fails only if fed invalid data.
input_buffer_.clear();
@@ -214,11 +212,11 @@ void AudioEncoderOpus::SetTargetBitrate(int bits_per_second) {
RTC_CHECK_EQ(0, WebRtcOpus_SetBitRate(inst_, config_.bitrate_bps));
}
-int AudioEncoderOpus::Num10msFramesPerPacket() const {
- return rtc::CheckedDivExact(config_.frame_size_ms, 10);
+size_t AudioEncoderOpus::Num10msFramesPerPacket() const {
+ return static_cast<size_t>(rtc::CheckedDivExact(config_.frame_size_ms, 10));
}
-int AudioEncoderOpus::SamplesPer10msFrame() const {
+size_t AudioEncoderOpus::SamplesPer10msFrame() const {
return rtc::CheckedDivExact(kSampleRateHz, 100) * config_.num_channels;
}
diff --git a/webrtc/modules/audio_coding/codecs/opus/include/audio_encoder_opus.h b/webrtc/modules/audio_coding/codecs/opus/audio_encoder_opus.h
index 7f2b563fd9..59c8f796ee 100644
--- a/webrtc/modules/audio_coding/codecs/opus/include/audio_encoder_opus.h
+++ b/webrtc/modules/audio_coding/codecs/opus/audio_encoder_opus.h
@@ -8,13 +8,13 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#ifndef WEBRTC_MODULES_AUDIO_CODING_CODECS_OPUS_INCLUDE_AUDIO_ENCODER_OPUS_H_
-#define WEBRTC_MODULES_AUDIO_CODING_CODECS_OPUS_INCLUDE_AUDIO_ENCODER_OPUS_H_
+#ifndef WEBRTC_MODULES_AUDIO_CODING_CODECS_OPUS_AUDIO_ENCODER_OPUS_H_
+#define WEBRTC_MODULES_AUDIO_CODING_CODECS_OPUS_AUDIO_ENCODER_OPUS_H_
#include <vector>
#include "webrtc/base/constructormagic.h"
-#include "webrtc/modules/audio_coding/codecs/opus/include/opus_interface.h"
+#include "webrtc/modules/audio_coding/codecs/opus/opus_interface.h"
#include "webrtc/modules/audio_coding/codecs/audio_encoder.h"
namespace webrtc {
@@ -31,7 +31,7 @@ class AudioEncoderOpus final : public AudioEncoder {
struct Config {
bool IsOk() const;
int frame_size_ms = 20;
- int num_channels = 1;
+ size_t num_channels = 1;
int payload_type = 120;
ApplicationMode application = kVoip;
int bitrate_bps = 64000;
@@ -56,13 +56,13 @@ class AudioEncoderOpus final : public AudioEncoder {
size_t MaxEncodedBytes() const override;
int SampleRateHz() const override;
- int NumChannels() const override;
+ size_t NumChannels() const override;
size_t Num10MsFramesInNextPacket() const override;
size_t Max10MsFramesInAPacket() const override;
int GetTargetBitrate() const override;
EncodedInfo EncodeInternal(uint32_t rtp_timestamp,
- const int16_t* audio,
+ rtc::ArrayView<const int16_t> audio,
size_t max_encoded_bytes,
uint8_t* encoded) override;
@@ -85,8 +85,8 @@ class AudioEncoderOpus final : public AudioEncoder {
bool dtx_enabled() const { return config_.dtx_enabled; }
private:
- int Num10msFramesPerPacket() const;
- int SamplesPer10msFrame() const;
+ size_t Num10msFramesPerPacket() const;
+ size_t SamplesPer10msFrame() const;
bool RecreateEncoderInstance(const Config& config);
Config config_;
@@ -99,4 +99,4 @@ class AudioEncoderOpus final : public AudioEncoder {
} // namespace webrtc
-#endif // WEBRTC_MODULES_AUDIO_CODING_CODECS_OPUS_INCLUDE_AUDIO_ENCODER_OPUS_H_
+#endif // WEBRTC_MODULES_AUDIO_CODING_CODECS_OPUS_AUDIO_ENCODER_OPUS_H_
diff --git a/webrtc/modules/audio_coding/codecs/opus/audio_encoder_opus_unittest.cc b/webrtc/modules/audio_coding/codecs/opus/audio_encoder_opus_unittest.cc
index e69f259554..441e807b4f 100644
--- a/webrtc/modules/audio_coding/codecs/opus/audio_encoder_opus_unittest.cc
+++ b/webrtc/modules/audio_coding/codecs/opus/audio_encoder_opus_unittest.cc
@@ -12,7 +12,7 @@
#include "webrtc/base/checks.h"
#include "webrtc/base/scoped_ptr.h"
#include "webrtc/common_types.h"
-#include "webrtc/modules/audio_coding/codecs/opus/include/audio_encoder_opus.h"
+#include "webrtc/modules/audio_coding/codecs/opus/audio_encoder_opus.h"
namespace webrtc {
diff --git a/webrtc/modules/audio_coding/codecs/opus/opus.gypi b/webrtc/modules/audio_coding/codecs/opus/opus.gypi
index 05da3e5e47..d7454d632d 100644
--- a/webrtc/modules/audio_coding/codecs/opus/opus.gypi
+++ b/webrtc/modules/audio_coding/codecs/opus/opus.gypi
@@ -39,17 +39,14 @@
'dependencies': [
'audio_encoder_interface',
],
- 'include_dirs': [
- '<(webrtc_root)',
- ],
'sources': [
'audio_decoder_opus.cc',
+ 'audio_decoder_opus.h',
'audio_encoder_opus.cc',
- 'include/audio_decoder_opus.h',
- 'include/audio_encoder_opus.h',
- 'include/opus_interface.h',
+ 'audio_encoder_opus.h',
'opus_inst.h',
'opus_interface.c',
+ 'opus_interface.h',
],
},
],
@@ -65,9 +62,6 @@
'<(webrtc_root)/test/test.gyp:test_support_main',
'<(DEPTH)/testing/gtest.gyp:gtest',
],
- 'include_dirs': [
- '<(webrtc_root)',
- ],
'sources': [
'opus_fec_test.cc',
],
diff --git a/webrtc/modules/audio_coding/codecs/opus/opus_fec_test.cc b/webrtc/modules/audio_coding/codecs/opus/opus_fec_test.cc
index f257210431..4f9f7ff7bb 100644
--- a/webrtc/modules/audio_coding/codecs/opus/opus_fec_test.cc
+++ b/webrtc/modules/audio_coding/codecs/opus/opus_fec_test.cc
@@ -9,8 +9,9 @@
*/
#include "testing/gtest/include/gtest/gtest.h"
+#include "webrtc/base/format_macros.h"
#include "webrtc/base/scoped_ptr.h"
-#include "webrtc/modules/audio_coding/codecs/opus/include/opus_interface.h"
+#include "webrtc/modules/audio_coding/codecs/opus/opus_interface.h"
#include "webrtc/test/testsupport/fileutils.h"
using ::std::string;
@@ -21,7 +22,7 @@ using ::testing::TestWithParam;
namespace webrtc {
// Define coding parameter as <channels, bit_rate, filename, extension>.
-typedef tuple<int, int, string, string> coding_param;
+typedef tuple<size_t, int, string, string> coding_param;
typedef struct mode mode;
struct mode {
@@ -47,7 +48,7 @@ class OpusFecTest : public TestWithParam<coding_param> {
int sampling_khz_;
size_t block_length_sample_;
- int channels_;
+ size_t channels_;
int bit_rate_;
size_t data_pointer_;
@@ -68,7 +69,7 @@ class OpusFecTest : public TestWithParam<coding_param> {
void OpusFecTest::SetUp() {
channels_ = get<0>(GetParam());
bit_rate_ = get<1>(GetParam());
- printf("Coding %d channel signal at %d bps.\n", channels_, bit_rate_);
+ printf("Coding %" PRIuS " channel signal at %d bps.\n", channels_, bit_rate_);
in_filename_ = test::ResourcePath(get<2>(GetParam()), get<3>(GetParam()));
diff --git a/webrtc/modules/audio_coding/codecs/opus/opus_inst.h b/webrtc/modules/audio_coding/codecs/opus/opus_inst.h
index 373db392a6..8d032baf35 100644
--- a/webrtc/modules/audio_coding/codecs/opus/opus_inst.h
+++ b/webrtc/modules/audio_coding/codecs/opus/opus_inst.h
@@ -11,17 +11,26 @@
#ifndef WEBRTC_MODULES_AUDIO_CODING_CODECS_OPUS_OPUS_INST_H_
#define WEBRTC_MODULES_AUDIO_CODING_CODECS_OPUS_OPUS_INST_H_
+#include <stddef.h>
+
#include "opus.h"
struct WebRtcOpusEncInst {
OpusEncoder* encoder;
+ size_t channels;
int in_dtx_mode;
+ // When Opus is in DTX mode, we use |zero_counts| to count consecutive zeros
+ // to break long zero segment so as to prevent DTX from going wrong. We use
+ // one counter for each channel. After each encoding, |zero_counts| contain
+ // the remaining zeros from the last frame.
+ // TODO(minyue): remove this when Opus gets an internal fix to DTX.
+ size_t* zero_counts;
};
struct WebRtcOpusDecInst {
OpusDecoder* decoder;
int prev_decoded_samples;
- int channels;
+ size_t channels;
int in_dtx_mode;
};
diff --git a/webrtc/modules/audio_coding/codecs/opus/opus_interface.c b/webrtc/modules/audio_coding/codecs/opus/opus_interface.c
index 1a632422c5..9dc7ef95fe 100644
--- a/webrtc/modules/audio_coding/codecs/opus/opus_interface.c
+++ b/webrtc/modules/audio_coding/codecs/opus/opus_interface.c
@@ -8,9 +8,10 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#include "webrtc/modules/audio_coding/codecs/opus/include/opus_interface.h"
+#include "webrtc/modules/audio_coding/codecs/opus/opus_interface.h"
#include "webrtc/modules/audio_coding/codecs/opus/opus_inst.h"
+#include <assert.h>
#include <stdlib.h>
#include <string.h>
@@ -29,48 +30,61 @@ enum {
/* Default frame size, 20 ms @ 48 kHz, in samples (for one channel). */
kWebRtcOpusDefaultFrameSize = 960,
+
+ // Maximum number of consecutive zeros, beyond or equal to which DTX can fail.
+ kZeroBreakCount = 157,
+
+#if defined(OPUS_FIXED_POINT)
+ kZeroBreakValue = 10,
+#else
+ kZeroBreakValue = 1,
+#endif
};
int16_t WebRtcOpus_EncoderCreate(OpusEncInst** inst,
- int32_t channels,
+ size_t channels,
int32_t application) {
- OpusEncInst* state;
- if (inst != NULL) {
- state = (OpusEncInst*) calloc(1, sizeof(OpusEncInst));
- if (state) {
- int opus_app;
- switch (application) {
- case 0: {
- opus_app = OPUS_APPLICATION_VOIP;
- break;
- }
- case 1: {
- opus_app = OPUS_APPLICATION_AUDIO;
- break;
- }
- default: {
- free(state);
- return -1;
- }
- }
+ int opus_app;
+ if (!inst)
+ return -1;
- int error;
- state->encoder = opus_encoder_create(48000, channels, opus_app,
- &error);
- state->in_dtx_mode = 0;
- if (error == OPUS_OK && state->encoder != NULL) {
- *inst = state;
- return 0;
- }
- free(state);
- }
+ switch (application) {
+ case 0:
+ opus_app = OPUS_APPLICATION_VOIP;
+ break;
+ case 1:
+ opus_app = OPUS_APPLICATION_AUDIO;
+ break;
+ default:
+ return -1;
}
- return -1;
+
+ OpusEncInst* state = calloc(1, sizeof(OpusEncInst));
+ assert(state);
+
+ // Allocate zero counters.
+ state->zero_counts = calloc(channels, sizeof(size_t));
+ assert(state->zero_counts);
+
+ int error;
+ state->encoder = opus_encoder_create(48000, (int)channels, opus_app,
+ &error);
+ if (error != OPUS_OK || !state->encoder) {
+ WebRtcOpus_EncoderFree(state);
+ return -1;
+ }
+
+ state->in_dtx_mode = 0;
+ state->channels = channels;
+
+ *inst = state;
+ return 0;
}
int16_t WebRtcOpus_EncoderFree(OpusEncInst* inst) {
if (inst) {
opus_encoder_destroy(inst->encoder);
+ free(inst->zero_counts);
free(inst);
return 0;
} else {
@@ -84,13 +98,42 @@ int WebRtcOpus_Encode(OpusEncInst* inst,
size_t length_encoded_buffer,
uint8_t* encoded) {
int res;
+ size_t i;
+ size_t c;
+
+ int16_t buffer[2 * 48 * kWebRtcOpusMaxEncodeFrameSizeMs];
if (samples > 48 * kWebRtcOpusMaxEncodeFrameSizeMs) {
return -1;
}
+ const size_t channels = inst->channels;
+ int use_buffer = 0;
+
+ // Break long consecutive zeros by forcing a "1" every |kZeroBreakCount|
+ // samples.
+ if (inst->in_dtx_mode) {
+ for (i = 0; i < samples; ++i) {
+ for (c = 0; c < channels; ++c) {
+ if (audio_in[i * channels + c] == 0) {
+ ++inst->zero_counts[c];
+ if (inst->zero_counts[c] == kZeroBreakCount) {
+ if (!use_buffer) {
+ memcpy(buffer, audio_in, samples * channels * sizeof(int16_t));
+ use_buffer = 1;
+ }
+ buffer[i * channels + c] = kZeroBreakValue;
+ inst->zero_counts[c] = 0;
+ }
+ } else {
+ inst->zero_counts[c] = 0;
+ }
+ }
+ }
+ }
+
res = opus_encode(inst->encoder,
- (const opus_int16*)audio_in,
+ use_buffer ? buffer : audio_in,
(int)samples,
encoded,
(opus_int32)length_encoded_buffer);
@@ -205,7 +248,7 @@ int16_t WebRtcOpus_SetComplexity(OpusEncInst* inst, int32_t complexity) {
}
}
-int16_t WebRtcOpus_DecoderCreate(OpusDecInst** inst, int channels) {
+int16_t WebRtcOpus_DecoderCreate(OpusDecInst** inst, size_t channels) {
int error;
OpusDecInst* state;
@@ -217,7 +260,7 @@ int16_t WebRtcOpus_DecoderCreate(OpusDecInst** inst, int channels) {
}
/* Create new memory, always at 48000 Hz. */
- state->decoder = opus_decoder_create(48000, channels, &error);
+ state->decoder = opus_decoder_create(48000, (int)channels, &error);
if (error == OPUS_OK && state->decoder != NULL) {
/* Creation of memory all ok. */
state->channels = channels;
@@ -246,7 +289,7 @@ int16_t WebRtcOpus_DecoderFree(OpusDecInst* inst) {
}
}
-int WebRtcOpus_DecoderChannels(OpusDecInst* inst) {
+size_t WebRtcOpus_DecoderChannels(OpusDecInst* inst) {
return inst->channels;
}
diff --git a/webrtc/modules/audio_coding/codecs/opus/include/opus_interface.h b/webrtc/modules/audio_coding/codecs/opus/opus_interface.h
index 50b2338ab5..754b49c808 100644
--- a/webrtc/modules/audio_coding/codecs/opus/include/opus_interface.h
+++ b/webrtc/modules/audio_coding/codecs/opus/opus_interface.h
@@ -8,8 +8,8 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#ifndef WEBRTC_MODULES_AUDIO_CODING_CODECS_OPUS_INCLUDE_OPUS_INTERFACE_H_
-#define WEBRTC_MODULES_AUDIO_CODING_CODECS_OPUS_INCLUDE_OPUS_INTERFACE_H_
+#ifndef WEBRTC_MODULES_AUDIO_CODING_CODECS_OPUS_OPUS_INTERFACE_H_
+#define WEBRTC_MODULES_AUDIO_CODING_CODECS_OPUS_OPUS_INTERFACE_H_
#include <stddef.h>
@@ -43,7 +43,7 @@ typedef struct WebRtcOpusDecInst OpusDecInst;
* -1 - Error
*/
int16_t WebRtcOpus_EncoderCreate(OpusEncInst** inst,
- int32_t channels,
+ size_t channels,
int32_t application);
int16_t WebRtcOpus_EncoderFree(OpusEncInst* inst);
@@ -195,7 +195,7 @@ int16_t WebRtcOpus_DisableDtx(OpusEncInst* inst);
*/
int16_t WebRtcOpus_SetComplexity(OpusEncInst* inst, int32_t complexity);
-int16_t WebRtcOpus_DecoderCreate(OpusDecInst** inst, int channels);
+int16_t WebRtcOpus_DecoderCreate(OpusDecInst** inst, size_t channels);
int16_t WebRtcOpus_DecoderFree(OpusDecInst* inst);
/****************************************************************************
@@ -203,7 +203,7 @@ int16_t WebRtcOpus_DecoderFree(OpusDecInst* inst);
*
* This function returns the number of channels created for Opus decoder.
*/
-int WebRtcOpus_DecoderChannels(OpusDecInst* inst);
+size_t WebRtcOpus_DecoderChannels(OpusDecInst* inst);
/****************************************************************************
* WebRtcOpus_DecoderInit(...)
@@ -346,4 +346,4 @@ int WebRtcOpus_PacketHasFec(const uint8_t* payload,
} // extern "C"
#endif
-#endif // WEBRTC_MODULES_AUDIO_CODING_CODECS_OPUS_INCLUDE_OPUS_INCLUDE_H_
+#endif // WEBRTC_MODULES_AUDIO_CODING_CODECS_OPUS_OPUS_INTERFACE_H_
diff --git a/webrtc/modules/audio_coding/codecs/opus/opus_speed_test.cc b/webrtc/modules/audio_coding/codecs/opus/opus_speed_test.cc
index 29def14bf8..4d1aa42c89 100644
--- a/webrtc/modules/audio_coding/codecs/opus/opus_speed_test.cc
+++ b/webrtc/modules/audio_coding/codecs/opus/opus_speed_test.cc
@@ -8,7 +8,7 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#include "webrtc/modules/audio_coding/codecs/opus/include/opus_interface.h"
+#include "webrtc/modules/audio_coding/codecs/opus/opus_interface.h"
#include "webrtc/modules/audio_coding/codecs/tools/audio_codec_speed_test.h"
using ::std::string;
@@ -77,7 +77,7 @@ float OpusSpeedTest::DecodeABlock(const uint8_t* bit_stream,
value = WebRtcOpus_Decode(opus_decoder_, bit_stream, encoded_bytes, out_data,
&audio_type);
clocks = clock() - clocks;
- EXPECT_EQ(output_length_sample_, value);
+ EXPECT_EQ(output_length_sample_, static_cast<size_t>(value));
return 1000.0 * clocks / CLOCKS_PER_SEC;
}
diff --git a/webrtc/modules/audio_coding/codecs/opus/opus_unittest.cc b/webrtc/modules/audio_coding/codecs/opus/opus_unittest.cc
index 4630e44807..c82b184b38 100644
--- a/webrtc/modules/audio_coding/codecs/opus/opus_unittest.cc
+++ b/webrtc/modules/audio_coding/codecs/opus/opus_unittest.cc
@@ -10,7 +10,8 @@
#include <string>
#include "testing/gtest/include/gtest/gtest.h"
-#include "webrtc/modules/audio_coding/codecs/opus/include/opus_interface.h"
+#include "webrtc/base/checks.h"
+#include "webrtc/modules/audio_coding/codecs/opus/opus_interface.h"
#include "webrtc/modules/audio_coding/codecs/opus/opus_inst.h"
#include "webrtc/modules/audio_coding/neteq/tools/audio_loop.h"
#include "webrtc/test/testsupport/fileutils.h"
@@ -35,17 +36,18 @@ class OpusTest : public TestWithParam<::testing::tuple<int, int>> {
protected:
OpusTest();
- void TestDtxEffect(bool dtx);
+ void TestDtxEffect(bool dtx, int block_length_ms);
// Prepare |speech_data_| for encoding, read from a hard-coded file.
// After preparation, |speech_data_.GetNextBlock()| returns a pointer to a
// block of |block_length_ms| milliseconds. The data is looped every
// |loop_length_ms| milliseconds.
- void PrepareSpeechData(int channel, int block_length_ms, int loop_length_ms);
+ void PrepareSpeechData(size_t channel,
+ int block_length_ms,
+ int loop_length_ms);
int EncodeDecode(WebRtcOpusEncInst* encoder,
- const int16_t* input_audio,
- size_t input_samples,
+ rtc::ArrayView<const int16_t> input_audio,
WebRtcOpusDecInst* decoder,
int16_t* output_audio,
int16_t* audio_type);
@@ -53,13 +55,16 @@ class OpusTest : public TestWithParam<::testing::tuple<int, int>> {
void SetMaxPlaybackRate(WebRtcOpusEncInst* encoder,
opus_int32 expect, int32_t set);
+ void CheckAudioBounded(const int16_t* audio, size_t samples, size_t channels,
+ uint16_t bound) const;
+
WebRtcOpusEncInst* opus_encoder_;
WebRtcOpusDecInst* opus_decoder_;
AudioLoop speech_data_;
uint8_t bitstream_[kMaxBytes];
size_t encoded_bytes_;
- int channels_;
+ size_t channels_;
int application_;
};
@@ -67,11 +72,11 @@ OpusTest::OpusTest()
: opus_encoder_(NULL),
opus_decoder_(NULL),
encoded_bytes_(0),
- channels_(::testing::get<0>(GetParam())),
+ channels_(static_cast<size_t>(::testing::get<0>(GetParam()))),
application_(::testing::get<1>(GetParam())) {
}
-void OpusTest::PrepareSpeechData(int channel, int block_length_ms,
+void OpusTest::PrepareSpeechData(size_t channel, int block_length_ms,
int loop_length_ms) {
const std::string file_name =
webrtc::test::ResourcePath((channel == 1) ?
@@ -95,14 +100,25 @@ void OpusTest::SetMaxPlaybackRate(WebRtcOpusEncInst* encoder,
EXPECT_EQ(expect, bandwidth);
}
+void OpusTest::CheckAudioBounded(const int16_t* audio, size_t samples,
+ size_t channels, uint16_t bound) const {
+ for (size_t i = 0; i < samples; ++i) {
+ for (size_t c = 0; c < channels; ++c) {
+ ASSERT_GE(audio[i * channels + c], -bound);
+ ASSERT_LE(audio[i * channels + c], bound);
+ }
+ }
+}
+
int OpusTest::EncodeDecode(WebRtcOpusEncInst* encoder,
- const int16_t* input_audio,
- size_t input_samples,
+ rtc::ArrayView<const int16_t> input_audio,
WebRtcOpusDecInst* decoder,
int16_t* output_audio,
int16_t* audio_type) {
- int encoded_bytes_int = WebRtcOpus_Encode(encoder, input_audio, input_samples,
- kMaxBytes, bitstream_);
+ int encoded_bytes_int = WebRtcOpus_Encode(
+ encoder, input_audio.data(),
+ rtc::CheckedDivExact(input_audio.size(), channels_),
+ kMaxBytes, bitstream_);
EXPECT_GE(encoded_bytes_int, 0);
encoded_bytes_ = static_cast<size_t>(encoded_bytes_int);
int est_len = WebRtcOpus_DurationEst(decoder, bitstream_, encoded_bytes_);
@@ -115,8 +131,9 @@ int OpusTest::EncodeDecode(WebRtcOpusEncInst* encoder,
// Test if encoder/decoder can enter DTX mode properly and do not enter DTX when
// they should not. This test is signal dependent.
-void OpusTest::TestDtxEffect(bool dtx) {
- PrepareSpeechData(channels_, 20, 2000);
+void OpusTest::TestDtxEffect(bool dtx, int block_length_ms) {
+ PrepareSpeechData(channels_, block_length_ms, 2000);
+ const size_t samples = kOpusRateKhz * block_length_ms;
// Create encoder memory.
EXPECT_EQ(0, WebRtcOpus_EncoderCreate(&opus_encoder_,
@@ -129,22 +146,20 @@ void OpusTest::TestDtxEffect(bool dtx) {
channels_ == 1 ? 32000 : 64000));
// Set input audio as silence.
- int16_t* silence = new int16_t[kOpus20msFrameSamples * channels_];
- memset(silence, 0, sizeof(int16_t) * kOpus20msFrameSamples * channels_);
+ std::vector<int16_t> silence(samples * channels_, 0);
// Setting DTX.
EXPECT_EQ(0, dtx ? WebRtcOpus_EnableDtx(opus_encoder_) :
WebRtcOpus_DisableDtx(opus_encoder_));
int16_t audio_type;
- int16_t* output_data_decode = new int16_t[kOpus20msFrameSamples * channels_];
+ int16_t* output_data_decode = new int16_t[samples * channels_];
for (int i = 0; i < 100; ++i) {
- EXPECT_EQ(kOpus20msFrameSamples,
+ EXPECT_EQ(samples,
static_cast<size_t>(EncodeDecode(
- opus_encoder_, speech_data_.GetNextBlock(),
- kOpus20msFrameSamples, opus_decoder_, output_data_decode,
- &audio_type)));
+ opus_encoder_, speech_data_.GetNextBlock(), opus_decoder_,
+ output_data_decode, &audio_type)));
// If not DTX, it should never enter DTX mode. If DTX, we do not care since
// whether it enters DTX depends on the signal type.
if (!dtx) {
@@ -158,10 +173,10 @@ void OpusTest::TestDtxEffect(bool dtx) {
// We input some silent segments. In DTX mode, the encoder will stop sending.
// However, DTX may happen after a while.
for (int i = 0; i < 30; ++i) {
- EXPECT_EQ(kOpus20msFrameSamples,
+ EXPECT_EQ(samples,
static_cast<size_t>(EncodeDecode(
- opus_encoder_, silence, kOpus20msFrameSamples, opus_decoder_,
- output_data_decode, &audio_type)));
+ opus_encoder_, silence, opus_decoder_, output_data_decode,
+ &audio_type)));
if (!dtx) {
EXPECT_GT(encoded_bytes_, 1U);
EXPECT_EQ(0, opus_encoder_->in_dtx_mode);
@@ -177,21 +192,47 @@ void OpusTest::TestDtxEffect(bool dtx) {
// When Opus is in DTX, it wakes up in a regular basis. It sends two packets,
// one with an arbitrary size and the other of 1-byte, then stops sending for
- // 19 frames.
- const int cycles = 5;
- for (int j = 0; j < cycles; ++j) {
- // DTX mode is maintained 19 frames.
- for (int i = 0; i < 19; ++i) {
- EXPECT_EQ(kOpus20msFrameSamples,
+ // a certain number of frames.
+
+ // |max_dtx_frames| is the maximum number of frames Opus can stay in DTX.
+ const int max_dtx_frames = 400 / block_length_ms + 1;
+
+ // We run |kRunTimeMs| milliseconds of pure silence.
+ const int kRunTimeMs = 2000;
+
+ // We check that, after a |kCheckTimeMs| milliseconds (given that the CNG in
+ // Opus needs time to adapt), the absolute values of DTX decoded signal are
+ // bounded by |kOutputValueBound|.
+ const int kCheckTimeMs = 1500;
+
+#if defined(OPUS_FIXED_POINT)
+ const uint16_t kOutputValueBound = 20;
+#else
+ const uint16_t kOutputValueBound = 2;
+#endif
+
+ int time = 0;
+ while (time < kRunTimeMs) {
+ // DTX mode is maintained for maximum |max_dtx_frames| frames.
+ int i = 0;
+ for (; i < max_dtx_frames; ++i) {
+ time += block_length_ms;
+ EXPECT_EQ(samples,
static_cast<size_t>(EncodeDecode(
- opus_encoder_, silence, kOpus20msFrameSamples,
- opus_decoder_, output_data_decode, &audio_type)));
+ opus_encoder_, silence, opus_decoder_, output_data_decode,
+ &audio_type)));
if (dtx) {
+ if (encoded_bytes_ > 1)
+ break;
EXPECT_EQ(0U, encoded_bytes_) // Send 0 byte.
<< "Opus should have entered DTX mode.";
EXPECT_EQ(1, opus_encoder_->in_dtx_mode);
EXPECT_EQ(1, opus_decoder_->in_dtx_mode);
EXPECT_EQ(2, audio_type); // Comfort noise.
+ if (time >= kCheckTimeMs) {
+ CheckAudioBounded(output_data_decode, samples, channels_,
+ kOutputValueBound);
+ }
} else {
EXPECT_GT(encoded_bytes_, 1U);
EXPECT_EQ(0, opus_encoder_->in_dtx_mode);
@@ -200,27 +241,31 @@ void OpusTest::TestDtxEffect(bool dtx) {
}
}
- // Quit DTX after 19 frames.
- EXPECT_EQ(kOpus20msFrameSamples,
- static_cast<size_t>(EncodeDecode(
- opus_encoder_, silence, kOpus20msFrameSamples, opus_decoder_,
- output_data_decode, &audio_type)));
+ if (dtx) {
+ // With DTX, Opus must stop transmission for some time.
+ EXPECT_GT(i, 1);
+ }
- EXPECT_GT(encoded_bytes_, 1U);
+ // We expect a normal payload.
EXPECT_EQ(0, opus_encoder_->in_dtx_mode);
EXPECT_EQ(0, opus_decoder_->in_dtx_mode);
EXPECT_EQ(0, audio_type); // Speech.
// Enters DTX again immediately.
- EXPECT_EQ(kOpus20msFrameSamples,
+ time += block_length_ms;
+ EXPECT_EQ(samples,
static_cast<size_t>(EncodeDecode(
- opus_encoder_, silence, kOpus20msFrameSamples, opus_decoder_,
- output_data_decode, &audio_type)));
+ opus_encoder_, silence, opus_decoder_, output_data_decode,
+ &audio_type)));
if (dtx) {
EXPECT_EQ(1U, encoded_bytes_); // Send 1 byte.
EXPECT_EQ(1, opus_encoder_->in_dtx_mode);
EXPECT_EQ(1, opus_decoder_->in_dtx_mode);
EXPECT_EQ(2, audio_type); // Comfort noise.
+ if (time >= kCheckTimeMs) {
+ CheckAudioBounded(output_data_decode, samples, channels_,
+ kOutputValueBound);
+ }
} else {
EXPECT_GT(encoded_bytes_, 1U);
EXPECT_EQ(0, opus_encoder_->in_dtx_mode);
@@ -232,10 +277,10 @@ void OpusTest::TestDtxEffect(bool dtx) {
silence[0] = 10000;
if (dtx) {
// Verify that encoder/decoder can jump out from DTX mode.
- EXPECT_EQ(kOpus20msFrameSamples,
+ EXPECT_EQ(samples,
static_cast<size_t>(EncodeDecode(
- opus_encoder_, silence, kOpus20msFrameSamples, opus_decoder_,
- output_data_decode, &audio_type)));
+ opus_encoder_, silence, opus_decoder_, output_data_decode,
+ &audio_type)));
EXPECT_GT(encoded_bytes_, 1U);
EXPECT_EQ(0, opus_encoder_->in_dtx_mode);
EXPECT_EQ(0, opus_decoder_->in_dtx_mode);
@@ -244,7 +289,6 @@ void OpusTest::TestDtxEffect(bool dtx) {
// Free memory.
delete[] output_data_decode;
- delete[] silence;
EXPECT_EQ(0, WebRtcOpus_EncoderFree(opus_encoder_));
EXPECT_EQ(0, WebRtcOpus_DecoderFree(opus_decoder_));
}
@@ -314,10 +358,9 @@ TEST_P(OpusTest, OpusEncodeDecode) {
int16_t audio_type;
int16_t* output_data_decode = new int16_t[kOpus20msFrameSamples * channels_];
EXPECT_EQ(kOpus20msFrameSamples,
- static_cast<size_t>(EncodeDecode(
- opus_encoder_, speech_data_.GetNextBlock(),
- kOpus20msFrameSamples, opus_decoder_, output_data_decode,
- &audio_type)));
+ static_cast<size_t>(
+ EncodeDecode(opus_encoder_, speech_data_.GetNextBlock(),
+ opus_decoder_, output_data_decode, &audio_type)));
// Free memory.
delete[] output_data_decode;
@@ -374,10 +417,9 @@ TEST_P(OpusTest, OpusDecodeInit) {
int16_t audio_type;
int16_t* output_data_decode = new int16_t[kOpus20msFrameSamples * channels_];
EXPECT_EQ(kOpus20msFrameSamples,
- static_cast<size_t>(EncodeDecode(
- opus_encoder_, speech_data_.GetNextBlock(),
- kOpus20msFrameSamples, opus_decoder_, output_data_decode,
- &audio_type)));
+ static_cast<size_t>(
+ EncodeDecode(opus_encoder_, speech_data_.GetNextBlock(),
+ opus_decoder_, output_data_decode, &audio_type)));
WebRtcOpus_DecoderInit(opus_decoder_);
@@ -444,11 +486,15 @@ TEST_P(OpusTest, OpusEnableDisableDtx) {
}
TEST_P(OpusTest, OpusDtxOff) {
- TestDtxEffect(false);
+ TestDtxEffect(false, 10);
+ TestDtxEffect(false, 20);
+ TestDtxEffect(false, 40);
}
TEST_P(OpusTest, OpusDtxOn) {
- TestDtxEffect(true);
+ TestDtxEffect(true, 10);
+ TestDtxEffect(true, 20);
+ TestDtxEffect(true, 40);
}
TEST_P(OpusTest, OpusSetPacketLossRate) {
@@ -513,10 +559,9 @@ TEST_P(OpusTest, OpusDecodePlc) {
int16_t audio_type;
int16_t* output_data_decode = new int16_t[kOpus20msFrameSamples * channels_];
EXPECT_EQ(kOpus20msFrameSamples,
- static_cast<size_t>(EncodeDecode(
- opus_encoder_, speech_data_.GetNextBlock(),
- kOpus20msFrameSamples, opus_decoder_, output_data_decode,
- &audio_type)));
+ static_cast<size_t>(
+ EncodeDecode(opus_encoder_, speech_data_.GetNextBlock(),
+ opus_decoder_, output_data_decode, &audio_type)));
// Call decoder PLC.
int16_t* plc_buffer = new int16_t[kOpus20msFrameSamples * channels_];
@@ -542,10 +587,11 @@ TEST_P(OpusTest, OpusDurationEstimation) {
EXPECT_EQ(0, WebRtcOpus_DecoderCreate(&opus_decoder_, channels_));
// 10 ms. We use only first 10 ms of a 20 ms block.
- int encoded_bytes_int = WebRtcOpus_Encode(opus_encoder_,
- speech_data_.GetNextBlock(),
- kOpus10msFrameSamples,
- kMaxBytes, bitstream_);
+ auto speech_block = speech_data_.GetNextBlock();
+ int encoded_bytes_int = WebRtcOpus_Encode(
+ opus_encoder_, speech_block.data(),
+ rtc::CheckedDivExact(speech_block.size(), 2 * channels_),
+ kMaxBytes, bitstream_);
EXPECT_GE(encoded_bytes_int, 0);
EXPECT_EQ(kOpus10msFrameSamples,
static_cast<size_t>(WebRtcOpus_DurationEst(
@@ -553,10 +599,11 @@ TEST_P(OpusTest, OpusDurationEstimation) {
static_cast<size_t>(encoded_bytes_int))));
// 20 ms
- encoded_bytes_int = WebRtcOpus_Encode(opus_encoder_,
- speech_data_.GetNextBlock(),
- kOpus20msFrameSamples,
- kMaxBytes, bitstream_);
+ speech_block = speech_data_.GetNextBlock();
+ encoded_bytes_int = WebRtcOpus_Encode(
+ opus_encoder_, speech_block.data(),
+ rtc::CheckedDivExact(speech_block.size(), channels_),
+ kMaxBytes, bitstream_);
EXPECT_GE(encoded_bytes_int, 0);
EXPECT_EQ(kOpus20msFrameSamples,
static_cast<size_t>(WebRtcOpus_DurationEst(
@@ -594,10 +641,11 @@ TEST_P(OpusTest, OpusDecodeRepacketized) {
OpusRepacketizer* rp = opus_repacketizer_create();
for (int idx = 0; idx < kPackets; idx++) {
- encoded_bytes_ = WebRtcOpus_Encode(opus_encoder_,
- speech_data_.GetNextBlock(),
- kOpus20msFrameSamples, kMaxBytes,
- bitstream_);
+ auto speech_block = speech_data_.GetNextBlock();
+ encoded_bytes_ =
+ WebRtcOpus_Encode(opus_encoder_, speech_block.data(),
+ rtc::CheckedDivExact(speech_block.size(), channels_),
+ kMaxBytes, bitstream_);
EXPECT_EQ(OPUS_OK, opus_repacketizer_cat(rp, bitstream_, encoded_bytes_));
}
diff --git a/webrtc/modules/audio_coding/codecs/pcm16b/audio_decoder_pcm16b.cc b/webrtc/modules/audio_coding/codecs/pcm16b/audio_decoder_pcm16b.cc
index 7d07b23a3c..834c070073 100644
--- a/webrtc/modules/audio_coding/codecs/pcm16b/audio_decoder_pcm16b.cc
+++ b/webrtc/modules/audio_coding/codecs/pcm16b/audio_decoder_pcm16b.cc
@@ -8,10 +8,10 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#include "webrtc/modules/audio_coding/codecs/pcm16b/include/audio_decoder_pcm16b.h"
+#include "webrtc/modules/audio_coding/codecs/pcm16b/audio_decoder_pcm16b.h"
#include "webrtc/base/checks.h"
-#include "webrtc/modules/audio_coding/codecs/pcm16b/include/pcm16b.h"
+#include "webrtc/modules/audio_coding/codecs/pcm16b/pcm16b.h"
namespace webrtc {
diff --git a/webrtc/modules/audio_coding/codecs/pcm16b/include/audio_decoder_pcm16b.h b/webrtc/modules/audio_coding/codecs/pcm16b/audio_decoder_pcm16b.h
index 96131c4d21..692cb94282 100644
--- a/webrtc/modules/audio_coding/codecs/pcm16b/include/audio_decoder_pcm16b.h
+++ b/webrtc/modules/audio_coding/codecs/pcm16b/audio_decoder_pcm16b.h
@@ -8,8 +8,8 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#ifndef WEBRTC_MODULES_AUDIO_CODING_CODECS_PCM16B_INCLUDE_AUDIO_DECODER_PCM16B_H_
-#define WEBRTC_MODULES_AUDIO_CODING_CODECS_PCM16B_INCLUDE_AUDIO_DECODER_PCM16B_H_
+#ifndef WEBRTC_MODULES_AUDIO_CODING_CODECS_PCM16B_AUDIO_DECODER_PCM16B_H_
+#define WEBRTC_MODULES_AUDIO_CODING_CODECS_PCM16B_AUDIO_DECODER_PCM16B_H_
#include "webrtc/base/constructormagic.h"
#include "webrtc/modules/audio_coding/codecs/audio_decoder.h"
@@ -37,4 +37,4 @@ class AudioDecoderPcm16B final : public AudioDecoder {
} // namespace webrtc
-#endif // WEBRTC_MODULES_AUDIO_CODING_CODECS_PCM16B_INCLUDE_AUDIO_DECODER_PCM16B_H_
+#endif // WEBRTC_MODULES_AUDIO_CODING_CODECS_PCM16B_AUDIO_DECODER_PCM16B_H_
diff --git a/webrtc/modules/audio_coding/codecs/pcm16b/audio_encoder_pcm16b.cc b/webrtc/modules/audio_coding/codecs/pcm16b/audio_encoder_pcm16b.cc
index 6c30c7ff62..f4d4022302 100644
--- a/webrtc/modules/audio_coding/codecs/pcm16b/audio_encoder_pcm16b.cc
+++ b/webrtc/modules/audio_coding/codecs/pcm16b/audio_encoder_pcm16b.cc
@@ -8,11 +8,11 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#include "webrtc/modules/audio_coding/codecs/pcm16b/include/audio_encoder_pcm16b.h"
+#include "webrtc/modules/audio_coding/codecs/pcm16b/audio_encoder_pcm16b.h"
#include "webrtc/base/checks.h"
#include "webrtc/common_types.h"
-#include "webrtc/modules/audio_coding/codecs/pcm16b/include/pcm16b.h"
+#include "webrtc/modules/audio_coding/codecs/pcm16b/pcm16b.h"
namespace webrtc {
@@ -22,7 +22,7 @@ size_t AudioEncoderPcm16B::EncodeCall(const int16_t* audio,
return WebRtcPcm16b_Encode(audio, input_len, encoded);
}
-int AudioEncoderPcm16B::BytesPerSample() const {
+size_t AudioEncoderPcm16B::BytesPerSample() const {
return 2;
}
diff --git a/webrtc/modules/audio_coding/codecs/pcm16b/include/audio_encoder_pcm16b.h b/webrtc/modules/audio_coding/codecs/pcm16b/audio_encoder_pcm16b.h
index e03da213df..68ca2da77e 100644
--- a/webrtc/modules/audio_coding/codecs/pcm16b/include/audio_encoder_pcm16b.h
+++ b/webrtc/modules/audio_coding/codecs/pcm16b/audio_encoder_pcm16b.h
@@ -8,11 +8,11 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#ifndef WEBRTC_MODULES_AUDIO_CODING_CODECS_PCM16B_INCLUDE_AUDIO_ENCODER_PCM16B_H_
-#define WEBRTC_MODULES_AUDIO_CODING_CODECS_PCM16B_INCLUDE_AUDIO_ENCODER_PCM16B_H_
+#ifndef WEBRTC_MODULES_AUDIO_CODING_CODECS_PCM16B_AUDIO_ENCODER_PCM16B_H_
+#define WEBRTC_MODULES_AUDIO_CODING_CODECS_PCM16B_AUDIO_ENCODER_PCM16B_H_
#include "webrtc/base/scoped_ptr.h"
-#include "webrtc/modules/audio_coding/codecs/g711/include/audio_encoder_pcm.h"
+#include "webrtc/modules/audio_coding/codecs/g711/audio_encoder_pcm.h"
namespace webrtc {
@@ -37,12 +37,12 @@ class AudioEncoderPcm16B final : public AudioEncoderPcm {
size_t input_len,
uint8_t* encoded) override;
- int BytesPerSample() const override;
+ size_t BytesPerSample() const override;
-private:
+ private:
RTC_DISALLOW_COPY_AND_ASSIGN(AudioEncoderPcm16B);
};
} // namespace webrtc
-#endif // WEBRTC_MODULES_AUDIO_CODING_CODECS_PCM16B_INCLUDE_AUDIO_ENCODER_PCM16B_H_
+#endif // WEBRTC_MODULES_AUDIO_CODING_CODECS_PCM16B_AUDIO_ENCODER_PCM16B_H_
diff --git a/webrtc/modules/audio_coding/codecs/pcm16b/pcm16b.gypi b/webrtc/modules/audio_coding/codecs/pcm16b/pcm16b.gypi
index 3dc2f772c1..d0dd21bb60 100644
--- a/webrtc/modules/audio_coding/codecs/pcm16b/pcm16b.gypi
+++ b/webrtc/modules/audio_coding/codecs/pcm16b/pcm16b.gypi
@@ -15,23 +15,13 @@
'audio_encoder_interface',
'g711',
],
- 'include_dirs': [
- 'include',
- '<(webrtc_root)',
- ],
- 'direct_dependent_settings': {
- 'include_dirs': [
- 'include',
- '<(webrtc_root)',
- ],
- },
'sources': [
- 'include/audio_decoder_pcm16b.h',
- 'include/audio_encoder_pcm16b.h',
- 'include/pcm16b.h',
'audio_decoder_pcm16b.cc',
+ 'audio_decoder_pcm16b.h',
'audio_encoder_pcm16b.cc',
+ 'audio_encoder_pcm16b.h',
'pcm16b.c',
+ 'pcm16b.h',
],
},
], # targets
diff --git a/webrtc/modules/audio_coding/codecs/pcm16b/include/pcm16b.h b/webrtc/modules/audio_coding/codecs/pcm16b/pcm16b.h
index d86a65db49..f96e741c46 100644
--- a/webrtc/modules/audio_coding/codecs/pcm16b/include/pcm16b.h
+++ b/webrtc/modules/audio_coding/codecs/pcm16b/pcm16b.h
@@ -8,8 +8,8 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#ifndef WEBRTC_MODULES_AUDIO_CODING_CODECS_PCM16B_MAIN_INCLUDE_PCM16B_H_
-#define WEBRTC_MODULES_AUDIO_CODING_CODECS_PCM16B_MAIN_INCLUDE_PCM16B_H_
+#ifndef WEBRTC_MODULES_AUDIO_CODING_CODECS_PCM16B_PCM16B_H_
+#define WEBRTC_MODULES_AUDIO_CODING_CODECS_PCM16B_PCM16B_H_
/*
* Define the fixpoint numeric formats
*/
@@ -65,4 +65,4 @@ size_t WebRtcPcm16b_Decode(const uint8_t* encoded,
}
#endif
-#endif /* PCM16B */
+#endif /* WEBRTC_MODULES_AUDIO_CODING_CODECS_PCM16B_PCM16B_H_ */
diff --git a/webrtc/modules/audio_coding/codecs/red/audio_encoder_copy_red.cc b/webrtc/modules/audio_coding/codecs/red/audio_encoder_copy_red.cc
index a19d194e59..7ef1ce096b 100644
--- a/webrtc/modules/audio_coding/codecs/red/audio_encoder_copy_red.cc
+++ b/webrtc/modules/audio_coding/codecs/red/audio_encoder_copy_red.cc
@@ -32,7 +32,7 @@ int AudioEncoderCopyRed::SampleRateHz() const {
return speech_encoder_->SampleRateHz();
}
-int AudioEncoderCopyRed::NumChannels() const {
+size_t AudioEncoderCopyRed::NumChannels() const {
return speech_encoder_->NumChannels();
}
@@ -54,12 +54,11 @@ int AudioEncoderCopyRed::GetTargetBitrate() const {
AudioEncoder::EncodedInfo AudioEncoderCopyRed::EncodeInternal(
uint32_t rtp_timestamp,
- const int16_t* audio,
+ rtc::ArrayView<const int16_t> audio,
size_t max_encoded_bytes,
uint8_t* encoded) {
- EncodedInfo info = speech_encoder_->Encode(
- rtp_timestamp, audio, static_cast<size_t>(SampleRateHz() / 100),
- max_encoded_bytes, encoded);
+ EncodedInfo info =
+ speech_encoder_->Encode(rtp_timestamp, audio, max_encoded_bytes, encoded);
RTC_CHECK_GE(max_encoded_bytes,
info.encoded_bytes + secondary_info_.encoded_bytes);
RTC_CHECK(info.redundant.empty()) << "Cannot use nested redundant encoders.";
diff --git a/webrtc/modules/audio_coding/codecs/red/audio_encoder_copy_red.h b/webrtc/modules/audio_coding/codecs/red/audio_encoder_copy_red.h
index 7837010605..2f53765389 100644
--- a/webrtc/modules/audio_coding/codecs/red/audio_encoder_copy_red.h
+++ b/webrtc/modules/audio_coding/codecs/red/audio_encoder_copy_red.h
@@ -38,13 +38,13 @@ class AudioEncoderCopyRed final : public AudioEncoder {
size_t MaxEncodedBytes() const override;
int SampleRateHz() const override;
- int NumChannels() const override;
+ size_t NumChannels() const override;
int RtpTimestampRateHz() const override;
size_t Num10MsFramesInNextPacket() const override;
size_t Max10MsFramesInAPacket() const override;
int GetTargetBitrate() const override;
EncodedInfo EncodeInternal(uint32_t rtp_timestamp,
- const int16_t* audio,
+ rtc::ArrayView<const int16_t> audio,
size_t max_encoded_bytes,
uint8_t* encoded) override;
void Reset() override;
diff --git a/webrtc/modules/audio_coding/codecs/red/audio_encoder_copy_red_unittest.cc b/webrtc/modules/audio_coding/codecs/red/audio_encoder_copy_red_unittest.cc
index cb50652183..22601b6597 100644
--- a/webrtc/modules/audio_coding/codecs/red/audio_encoder_copy_red_unittest.cc
+++ b/webrtc/modules/audio_coding/codecs/red/audio_encoder_copy_red_unittest.cc
@@ -42,7 +42,7 @@ class AudioEncoderCopyRedTest : public ::testing::Test {
config.speech_encoder = &mock_encoder_;
red_.reset(new AudioEncoderCopyRed(config));
memset(audio_, 0, sizeof(audio_));
- EXPECT_CALL(mock_encoder_, NumChannels()).WillRepeatedly(Return(1));
+ EXPECT_CALL(mock_encoder_, NumChannels()).WillRepeatedly(Return(1U));
EXPECT_CALL(mock_encoder_, SampleRateHz())
.WillRepeatedly(Return(sample_rate_hz_));
EXPECT_CALL(mock_encoder_, MaxEncodedBytes())
@@ -60,8 +60,10 @@ class AudioEncoderCopyRedTest : public ::testing::Test {
void Encode() {
ASSERT_TRUE(red_.get() != NULL);
- encoded_info_ = red_->Encode(timestamp_, audio_, num_audio_samples_10ms,
- encoded_.size(), &encoded_[0]);
+ encoded_info_ = red_->Encode(
+ timestamp_,
+ rtc::ArrayView<const int16_t>(audio_, num_audio_samples_10ms),
+ encoded_.size(), &encoded_[0]);
timestamp_ += num_audio_samples_10ms;
}
@@ -83,7 +85,7 @@ class MockEncodeHelper {
}
AudioEncoder::EncodedInfo Encode(uint32_t timestamp,
- const int16_t* audio,
+ rtc::ArrayView<const int16_t> audio,
size_t max_encoded_bytes,
uint8_t* encoded) {
if (write_payload_) {
@@ -108,8 +110,8 @@ TEST_F(AudioEncoderCopyRedTest, CheckSampleRatePropagation) {
}
TEST_F(AudioEncoderCopyRedTest, CheckNumChannelsPropagation) {
- EXPECT_CALL(mock_encoder_, NumChannels()).WillOnce(Return(17));
- EXPECT_EQ(17, red_->NumChannels());
+ EXPECT_CALL(mock_encoder_, NumChannels()).WillOnce(Return(17U));
+ EXPECT_EQ(17U, red_->NumChannels());
}
TEST_F(AudioEncoderCopyRedTest, CheckFrameSizePropagation) {
diff --git a/webrtc/modules/audio_coding/codecs/tools/audio_codec_speed_test.cc b/webrtc/modules/audio_coding/codecs/tools/audio_codec_speed_test.cc
index 3395721f8b..3dc665482a 100644
--- a/webrtc/modules/audio_coding/codecs/tools/audio_codec_speed_test.cc
+++ b/webrtc/modules/audio_coding/codecs/tools/audio_codec_speed_test.cc
@@ -11,6 +11,7 @@
#include "webrtc/modules/audio_coding/codecs/tools/audio_codec_speed_test.h"
#include "testing/gtest/include/gtest/gtest.h"
+#include "webrtc/base/format_macros.h"
#include "webrtc/test/testsupport/fileutils.h"
using ::std::tr1::get;
@@ -23,8 +24,10 @@ AudioCodecSpeedTest::AudioCodecSpeedTest(int block_duration_ms,
: block_duration_ms_(block_duration_ms),
input_sampling_khz_(input_sampling_khz),
output_sampling_khz_(output_sampling_khz),
- input_length_sample_(block_duration_ms_ * input_sampling_khz_),
- output_length_sample_(block_duration_ms_ * output_sampling_khz_),
+ input_length_sample_(
+ static_cast<size_t>(block_duration_ms_ * input_sampling_khz_)),
+ output_length_sample_(
+ static_cast<size_t>(block_duration_ms_ * output_sampling_khz_)),
data_pointer_(0),
loop_length_samples_(0),
max_bytes_(0),
@@ -65,8 +68,7 @@ void AudioCodecSpeedTest::SetUp() {
memcpy(&in_data_[loop_length_samples_], &in_data_[0],
input_length_sample_ * channels_ * sizeof(int16_t));
- max_bytes_ =
- static_cast<size_t>(input_length_sample_ * channels_ * sizeof(int16_t));
+ max_bytes_ = input_length_sample_ * channels_ * sizeof(int16_t);
out_data_.reset(new int16_t[output_length_sample_ * channels_]);
bit_stream_.reset(new uint8_t[max_bytes_]);
@@ -98,7 +100,7 @@ void AudioCodecSpeedTest::EncodeDecode(size_t audio_duration_sec) {
size_t time_now_ms = 0;
float time_ms;
- printf("Coding %d kHz-sampled %d-channel audio at %d bps ...\n",
+ printf("Coding %d kHz-sampled %" PRIuS "-channel audio at %d bps ...\n",
input_sampling_khz_, channels_, bit_rate_);
while (time_now_ms < audio_duration_sec * 1000) {
diff --git a/webrtc/modules/audio_coding/codecs/tools/audio_codec_speed_test.h b/webrtc/modules/audio_coding/codecs/tools/audio_codec_speed_test.h
index 2736c2912e..fb7b3e5b1e 100644
--- a/webrtc/modules/audio_coding/codecs/tools/audio_codec_speed_test.h
+++ b/webrtc/modules/audio_coding/codecs/tools/audio_codec_speed_test.h
@@ -20,7 +20,8 @@ namespace webrtc {
// Define coding parameter as
// <channels, bit_rate, file_name, extension, if_save_output>.
-typedef std::tr1::tuple<int, int, std::string, std::string, bool> coding_param;
+typedef std::tr1::tuple<size_t, int, std::string, std::string, bool>
+ coding_param;
class AudioCodecSpeedTest : public testing::TestWithParam<coding_param> {
protected:
@@ -55,10 +56,10 @@ class AudioCodecSpeedTest : public testing::TestWithParam<coding_param> {
int output_sampling_khz_;
// Number of samples-per-channel in a frame.
- int input_length_sample_;
+ size_t input_length_sample_;
// Expected output number of samples-per-channel in a frame.
- int output_length_sample_;
+ size_t output_length_sample_;
rtc::scoped_ptr<int16_t[]> in_data_;
rtc::scoped_ptr<int16_t[]> out_data_;
@@ -74,7 +75,7 @@ class AudioCodecSpeedTest : public testing::TestWithParam<coding_param> {
float decoding_time_ms_;
FILE* out_file_;
- int channels_;
+ size_t channels_;
// Bit rate is in bit-per-second.
int bit_rate_;
diff --git a/webrtc/modules/audio_coding/main/include/audio_coding_module.h b/webrtc/modules/audio_coding/include/audio_coding_module.h
index b145cf423e..9e7991f22f 100644
--- a/webrtc/modules/audio_coding/main/include/audio_coding_module.h
+++ b/webrtc/modules/audio_coding/include/audio_coding_module.h
@@ -8,16 +8,17 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#ifndef WEBRTC_MODULES_AUDIO_CODING_MAIN_INCLUDE_AUDIO_CODING_MODULE_H_
-#define WEBRTC_MODULES_AUDIO_CODING_MAIN_INCLUDE_AUDIO_CODING_MODULE_H_
+#ifndef WEBRTC_MODULES_AUDIO_CODING_INCLUDE_AUDIO_CODING_MODULE_H_
+#define WEBRTC_MODULES_AUDIO_CODING_INCLUDE_AUDIO_CODING_MODULE_H_
+#include <string>
#include <vector>
+#include "webrtc/base/optional.h"
#include "webrtc/common_types.h"
-#include "webrtc/modules/audio_coding/main/acm2/acm_codec_database.h"
-#include "webrtc/modules/audio_coding/main/include/audio_coding_module_typedefs.h"
+#include "webrtc/modules/audio_coding/include/audio_coding_module_typedefs.h"
#include "webrtc/modules/audio_coding/neteq/include/neteq.h"
-#include "webrtc/modules/interface/module.h"
+#include "webrtc/modules/include/module.h"
#include "webrtc/system_wrappers/include/clock.h"
#include "webrtc/typedefs.h"
@@ -60,7 +61,11 @@ class AudioCodingModule {
public:
struct Config {
- Config() : id(0), neteq_config(), clock(Clock::GetRealTimeClock()) {}
+ Config() : id(0), neteq_config(), clock(Clock::GetRealTimeClock()) {
+ // Post-decode VAD is disabled by default in NetEq, however, Audio
+ // Conference Mixer relies on VAD decisions and fails without them.
+ neteq_config.enable_post_decode_vad = true;
+ }
int id;
NetEq::Config neteq_config;
@@ -129,7 +134,7 @@ class AudioCodingModule {
// 0 if succeeded.
//
static int Codec(const char* payload_name, CodecInst* codec,
- int sampling_freq_hz, int channels);
+ int sampling_freq_hz, size_t channels);
///////////////////////////////////////////////////////////////////////////
// int32_t Codec()
@@ -148,7 +153,7 @@ class AudioCodingModule {
// -1 if the codec is not found.
//
static int Codec(const char* payload_name, int sampling_freq_hz,
- int channels);
+ size_t channels);
///////////////////////////////////////////////////////////////////////////
// bool IsCodecValid()
@@ -206,14 +211,10 @@ class AudioCodingModule {
// int32_t SendCodec()
// Get parameters for the codec currently registered as send codec.
//
- // Output:
- // -current_send_codec : parameters of the send codec.
- //
// Return value:
- // -1 if failed to get send codec,
- // 0 if succeeded.
+ // The send codec, or nothing if we don't have one
//
- virtual int32_t SendCodec(CodecInst* current_send_codec) const = 0;
+ virtual rtc::Optional<CodecInst> SendCodec() const = 0;
///////////////////////////////////////////////////////////////////////////
// int32_t SendFrequency()
@@ -471,10 +472,14 @@ class AudioCodingModule {
//
virtual int RegisterReceiveCodec(const CodecInst& receive_codec) = 0;
+ // Registers an external decoder. The name is only used to provide information
+ // back to the caller about the decoder. Hence, the name is arbitrary, and may
+ // be empty.
virtual int RegisterExternalReceiveCodec(int rtp_payload_type,
AudioDecoder* external_decoder,
int sample_rate_hz,
- int num_channels) = 0;
+ int num_channels,
+ const std::string& name) = 0;
///////////////////////////////////////////////////////////////////////////
// int32_t UnregisterReceiveCodec()
@@ -705,23 +710,6 @@ class AudioCodingModule {
NetworkStatistics* network_statistics) = 0;
//
- // Set an initial delay for playout.
- // An initial delay yields ACM playout silence until equivalent of |delay_ms|
- // audio payload is accumulated in NetEq jitter. Thereafter, ACM pulls audio
- // from NetEq in its regular fashion, and the given delay is maintained
- // through out the call, unless channel conditions yield to a higher jitter
- // buffer delay.
- //
- // Input:
- // -delay_ms : delay in milliseconds.
- //
- // Return values:
- // -1 if failed to set the delay.
- // 0 if delay is set successfully.
- //
- virtual int SetInitialPlayoutDelay(int delay_ms) = 0;
-
- //
// Enable NACK and set the maximum size of the NACK list. If NACK is already
// enable then the maximum NACK list size is modified accordingly.
//
@@ -755,4 +743,4 @@ class AudioCodingModule {
} // namespace webrtc
-#endif // WEBRTC_MODULES_AUDIO_CODING_MAIN_INCLUDE_AUDIO_CODING_MODULE_H_
+#endif // WEBRTC_MODULES_AUDIO_CODING_INCLUDE_AUDIO_CODING_MODULE_H_
diff --git a/webrtc/modules/audio_coding/main/include/audio_coding_module_typedefs.h b/webrtc/modules/audio_coding/include/audio_coding_module_typedefs.h
index 489df406f4..280d6bffa2 100644
--- a/webrtc/modules/audio_coding/main/include/audio_coding_module_typedefs.h
+++ b/webrtc/modules/audio_coding/include/audio_coding_module_typedefs.h
@@ -8,12 +8,12 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#ifndef WEBRTC_MODULES_AUDIO_CODING_MAIN_INCLUDE_AUDIO_CODING_MODULE_TYPEDEFS_H_
-#define WEBRTC_MODULES_AUDIO_CODING_MAIN_INCLUDE_AUDIO_CODING_MODULE_TYPEDEFS_H_
+#ifndef WEBRTC_MODULES_AUDIO_CODING_INCLUDE_AUDIO_CODING_MODULE_TYPEDEFS_H_
+#define WEBRTC_MODULES_AUDIO_CODING_INCLUDE_AUDIO_CODING_MODULE_TYPEDEFS_H_
#include <map>
-#include "webrtc/modules/interface/module_common_types.h"
+#include "webrtc/modules/include/module_common_types.h"
#include "webrtc/typedefs.h"
namespace webrtc {
@@ -48,4 +48,4 @@ enum OpusApplicationMode {
} // namespace webrtc
-#endif // WEBRTC_MODULES_AUDIO_CODING_MAIN_INCLUDE_AUDIO_CODING_MODULE_TYPEDEFS_H_
+#endif // WEBRTC_MODULES_AUDIO_CODING_INCLUDE_AUDIO_CODING_MODULE_TYPEDEFS_H_
diff --git a/webrtc/modules/audio_coding/main/acm2/OWNERS b/webrtc/modules/audio_coding/main/acm2/OWNERS
deleted file mode 100644
index 3ee6b4bf5f..0000000000
--- a/webrtc/modules/audio_coding/main/acm2/OWNERS
+++ /dev/null
@@ -1,5 +0,0 @@
-
-# These are for the common case of adding or renaming files. If you're doing
-# structural changes, please get a review from a reviewer in this file.
-per-file *.gyp=*
-per-file *.gypi=*
diff --git a/webrtc/modules/audio_coding/main/acm2/codec_manager.cc b/webrtc/modules/audio_coding/main/acm2/codec_manager.cc
deleted file mode 100644
index f9b77e8985..0000000000
--- a/webrtc/modules/audio_coding/main/acm2/codec_manager.cc
+++ /dev/null
@@ -1,465 +0,0 @@
-/*
- * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
- *
- * Use of this source code is governed by a BSD-style license
- * that can be found in the LICENSE file in the root of the source
- * tree. An additional intellectual property rights grant can be found
- * in the file PATENTS. All contributing project authors may
- * be found in the AUTHORS file in the root of the source tree.
- */
-
-#include "webrtc/modules/audio_coding/main/acm2/codec_manager.h"
-
-#include "webrtc/base/checks.h"
-#include "webrtc/engine_configurations.h"
-#include "webrtc/modules/audio_coding/main/acm2/acm_codec_database.h"
-#include "webrtc/system_wrappers/include/trace.h"
-
-namespace webrtc {
-namespace acm2 {
-
-namespace {
-bool IsCodecRED(const CodecInst& codec) {
- return (STR_CASE_CMP(codec.plname, "RED") == 0);
-}
-
-bool IsCodecCN(const CodecInst& codec) {
- return (STR_CASE_CMP(codec.plname, "CN") == 0);
-}
-
-// Check if the given codec is a valid to be registered as send codec.
-int IsValidSendCodec(const CodecInst& send_codec, bool is_primary_encoder) {
- int dummy_id = 0;
- if ((send_codec.channels != 1) && (send_codec.channels != 2)) {
- WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, dummy_id,
- "Wrong number of channels (%d, only mono and stereo are "
- "supported) for %s encoder",
- send_codec.channels,
- is_primary_encoder ? "primary" : "secondary");
- return -1;
- }
-
- int codec_id = ACMCodecDB::CodecNumber(send_codec);
- if (codec_id < 0) {
- WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, dummy_id,
- "Invalid codec setting for the send codec.");
- return -1;
- }
-
- // TODO(tlegrand): Remove this check. Already taken care of in
- // ACMCodecDB::CodecNumber().
- // Check if the payload-type is valid
- if (!ACMCodecDB::ValidPayloadType(send_codec.pltype)) {
- WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, dummy_id,
- "Invalid payload-type %d for %s.", send_codec.pltype,
- send_codec.plname);
- return -1;
- }
-
- // Telephone-event cannot be a send codec.
- if (!STR_CASE_CMP(send_codec.plname, "telephone-event")) {
- WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, dummy_id,
- "telephone-event cannot be a send codec");
- return -1;
- }
-
- if (ACMCodecDB::codec_settings_[codec_id].channel_support <
- send_codec.channels) {
- WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, dummy_id,
- "%d number of channels not supportedn for %s.",
- send_codec.channels, send_codec.plname);
- return -1;
- }
-
- if (!is_primary_encoder) {
- // If registering the secondary encoder, then RED and CN are not valid
- // choices as encoder.
- if (IsCodecRED(send_codec)) {
- WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, dummy_id,
- "RED cannot be secondary codec");
- return -1;
- }
-
- if (IsCodecCN(send_codec)) {
- WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, dummy_id,
- "DTX cannot be secondary codec");
- return -1;
- }
- }
- return codec_id;
-}
-
-bool IsIsac(const CodecInst& codec) {
- return
-#if (defined(WEBRTC_CODEC_ISAC) || defined(WEBRTC_CODEC_ISACFX))
- !STR_CASE_CMP(codec.plname, "isac") ||
-#endif
- false;
-}
-
-bool IsOpus(const CodecInst& codec) {
- return
-#ifdef WEBRTC_CODEC_OPUS
- !STR_CASE_CMP(codec.plname, "opus") ||
-#endif
- false;
-}
-
-bool IsPcmU(const CodecInst& codec) {
- return !STR_CASE_CMP(codec.plname, "pcmu");
-}
-
-bool IsPcmA(const CodecInst& codec) {
- return !STR_CASE_CMP(codec.plname, "pcma");
-}
-
-bool IsPcm16B(const CodecInst& codec) {
- return !STR_CASE_CMP(codec.plname, "l16");
-}
-
-bool IsIlbc(const CodecInst& codec) {
- return
-#ifdef WEBRTC_CODEC_ILBC
- !STR_CASE_CMP(codec.plname, "ilbc") ||
-#endif
- false;
-}
-
-bool IsG722(const CodecInst& codec) {
- return
-#ifdef WEBRTC_CODEC_G722
- !STR_CASE_CMP(codec.plname, "g722") ||
-#endif
- false;
-}
-
-bool CodecSupported(const CodecInst& codec) {
- return IsOpus(codec) || IsPcmU(codec) || IsPcmA(codec) || IsPcm16B(codec) ||
- IsIlbc(codec) || IsG722(codec) || IsIsac(codec);
-}
-
-const CodecInst kEmptyCodecInst = {-1, "noCodecRegistered", 0, 0, 0, 0};
-} // namespace
-
-CodecManager::CodecManager()
- : cng_nb_pltype_(255),
- cng_wb_pltype_(255),
- cng_swb_pltype_(255),
- cng_fb_pltype_(255),
- red_nb_pltype_(255),
- stereo_send_(false),
- dtx_enabled_(false),
- vad_mode_(VADNormal),
- send_codec_inst_(kEmptyCodecInst),
- red_enabled_(false),
- codec_fec_enabled_(false),
- encoder_is_opus_(false) {
- // Register the default payload type for RED and for CNG at sampling rates of
- // 8, 16, 32 and 48 kHz.
- for (const CodecInst& ci : RentACodec::Database()) {
- if (IsCodecRED(ci) && ci.plfreq == 8000) {
- red_nb_pltype_ = static_cast<uint8_t>(ci.pltype);
- } else if (IsCodecCN(ci)) {
- if (ci.plfreq == 8000) {
- cng_nb_pltype_ = static_cast<uint8_t>(ci.pltype);
- } else if (ci.plfreq == 16000) {
- cng_wb_pltype_ = static_cast<uint8_t>(ci.pltype);
- } else if (ci.plfreq == 32000) {
- cng_swb_pltype_ = static_cast<uint8_t>(ci.pltype);
- } else if (ci.plfreq == 48000) {
- cng_fb_pltype_ = static_cast<uint8_t>(ci.pltype);
- }
- }
- }
- thread_checker_.DetachFromThread();
-}
-
-CodecManager::~CodecManager() = default;
-
-int CodecManager::RegisterEncoder(const CodecInst& send_codec) {
- RTC_DCHECK(thread_checker_.CalledOnValidThread());
- int codec_id = IsValidSendCodec(send_codec, true);
-
- // Check for reported errors from function IsValidSendCodec().
- if (codec_id < 0) {
- return -1;
- }
-
- int dummy_id = 0;
- // RED can be registered with other payload type. If not registered a default
- // payload type is used.
- if (IsCodecRED(send_codec)) {
- // TODO(tlegrand): Remove this check. Already taken care of in
- // ACMCodecDB::CodecNumber().
- // Check if the payload-type is valid
- if (!ACMCodecDB::ValidPayloadType(send_codec.pltype)) {
- WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, dummy_id,
- "Invalid payload-type %d for %s.", send_codec.pltype,
- send_codec.plname);
- return -1;
- }
- // Set RED payload type.
- if (send_codec.plfreq == 8000) {
- red_nb_pltype_ = static_cast<uint8_t>(send_codec.pltype);
- } else {
- WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, dummy_id,
- "RegisterSendCodec() failed, invalid frequency for RED "
- "registration");
- return -1;
- }
- return 0;
- }
-
- // CNG can be registered with other payload type. If not registered the
- // default payload types from codec database will be used.
- if (IsCodecCN(send_codec)) {
- // CNG is registered.
- switch (send_codec.plfreq) {
- case 8000: {
- cng_nb_pltype_ = static_cast<uint8_t>(send_codec.pltype);
- return 0;
- }
- case 16000: {
- cng_wb_pltype_ = static_cast<uint8_t>(send_codec.pltype);
- return 0;
- }
- case 32000: {
- cng_swb_pltype_ = static_cast<uint8_t>(send_codec.pltype);
- return 0;
- }
- case 48000: {
- cng_fb_pltype_ = static_cast<uint8_t>(send_codec.pltype);
- return 0;
- }
- default: {
- WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, dummy_id,
- "RegisterSendCodec() failed, invalid frequency for CNG "
- "registration");
- return -1;
- }
- }
- }
-
- // Set Stereo, and make sure VAD and DTX is turned off.
- if (send_codec.channels == 2) {
- stereo_send_ = true;
- if (dtx_enabled_) {
- WEBRTC_TRACE(webrtc::kTraceWarning, webrtc::kTraceAudioCoding, dummy_id,
- "VAD/DTX is turned off, not supported when sending stereo.");
- }
- dtx_enabled_ = false;
- } else {
- stereo_send_ = false;
- }
-
- // Check if the codec is already registered as send codec.
- bool new_codec = true;
- if (codec_owner_.Encoder()) {
- int new_codec_id = ACMCodecDB::CodecNumber(send_codec_inst_);
- RTC_DCHECK_GE(new_codec_id, 0);
- new_codec = new_codec_id != codec_id;
- }
-
- if (RedPayloadType(send_codec.plfreq) == -1) {
- red_enabled_ = false;
- }
-
- encoder_is_opus_ = IsOpus(send_codec);
-
- if (new_codec) {
- // This is a new codec. Register it and return.
- RTC_DCHECK(CodecSupported(send_codec));
- if (IsOpus(send_codec)) {
- // VAD/DTX not supported.
- dtx_enabled_ = false;
- }
- if (!codec_owner_.SetEncoders(
- send_codec, dtx_enabled_ ? CngPayloadType(send_codec.plfreq) : -1,
- vad_mode_, red_enabled_ ? RedPayloadType(send_codec.plfreq) : -1))
- return -1;
- RTC_DCHECK(codec_owner_.Encoder());
-
- codec_fec_enabled_ = codec_fec_enabled_ &&
- codec_owner_.Encoder()->SetFec(codec_fec_enabled_);
-
- send_codec_inst_ = send_codec;
- return 0;
- }
-
- // This is an existing codec; re-create it if any parameters have changed.
- if (send_codec_inst_.plfreq != send_codec.plfreq ||
- send_codec_inst_.pacsize != send_codec.pacsize ||
- send_codec_inst_.channels != send_codec.channels) {
- if (!codec_owner_.SetEncoders(
- send_codec, dtx_enabled_ ? CngPayloadType(send_codec.plfreq) : -1,
- vad_mode_, red_enabled_ ? RedPayloadType(send_codec.plfreq) : -1))
- return -1;
- RTC_DCHECK(codec_owner_.Encoder());
- }
- send_codec_inst_.plfreq = send_codec.plfreq;
- send_codec_inst_.pacsize = send_codec.pacsize;
- send_codec_inst_.channels = send_codec.channels;
- send_codec_inst_.pltype = send_codec.pltype;
-
- // Check if a change in Rate is required.
- if (send_codec.rate != send_codec_inst_.rate) {
- codec_owner_.Encoder()->SetTargetBitrate(send_codec.rate);
- send_codec_inst_.rate = send_codec.rate;
- }
-
- codec_fec_enabled_ =
- codec_fec_enabled_ && codec_owner_.Encoder()->SetFec(codec_fec_enabled_);
-
- return 0;
-}
-
-void CodecManager::RegisterEncoder(AudioEncoder* external_speech_encoder) {
- // Make up a CodecInst.
- send_codec_inst_.channels = external_speech_encoder->NumChannels();
- send_codec_inst_.plfreq = external_speech_encoder->SampleRateHz();
- send_codec_inst_.pacsize = rtc::CheckedDivExact(
- static_cast<int>(external_speech_encoder->Max10MsFramesInAPacket() *
- send_codec_inst_.plfreq),
- 100);
- send_codec_inst_.pltype = -1; // Not valid.
- send_codec_inst_.rate = -1; // Not valid.
- static const char kName[] = "external";
- memcpy(send_codec_inst_.plname, kName, sizeof(kName));
-
- if (stereo_send_)
- dtx_enabled_ = false;
- codec_fec_enabled_ =
- codec_fec_enabled_ && codec_owner_.Encoder()->SetFec(codec_fec_enabled_);
- int cng_pt = dtx_enabled_
- ? CngPayloadType(external_speech_encoder->SampleRateHz())
- : -1;
- int red_pt = red_enabled_ ? RedPayloadType(send_codec_inst_.plfreq) : -1;
- codec_owner_.SetEncoders(external_speech_encoder, cng_pt, vad_mode_, red_pt);
-}
-
-int CodecManager::GetCodecInst(CodecInst* current_codec) const {
- int dummy_id = 0;
- WEBRTC_TRACE(webrtc::kTraceStream, webrtc::kTraceAudioCoding, dummy_id,
- "SendCodec()");
-
- if (!codec_owner_.Encoder()) {
- WEBRTC_TRACE(webrtc::kTraceStream, webrtc::kTraceAudioCoding, dummy_id,
- "SendCodec Failed, no codec is registered");
- return -1;
- }
- *current_codec = send_codec_inst_;
- return 0;
-}
-
-bool CodecManager::SetCopyRed(bool enable) {
- if (enable && codec_fec_enabled_) {
- WEBRTC_TRACE(webrtc::kTraceWarning, webrtc::kTraceAudioCoding, 0,
- "Codec internal FEC and RED cannot be co-enabled.");
- return false;
- }
- if (enable && RedPayloadType(send_codec_inst_.plfreq) == -1) {
- WEBRTC_TRACE(webrtc::kTraceWarning, webrtc::kTraceAudioCoding, 0,
- "Cannot enable RED at %i Hz.", send_codec_inst_.plfreq);
- return false;
- }
- if (red_enabled_ != enable) {
- red_enabled_ = enable;
- if (codec_owner_.Encoder()) {
- int cng_pt = dtx_enabled_ ? CngPayloadType(send_codec_inst_.plfreq) : -1;
- int red_pt = red_enabled_ ? RedPayloadType(send_codec_inst_.plfreq) : -1;
- codec_owner_.ChangeCngAndRed(cng_pt, vad_mode_, red_pt);
- }
- }
- return true;
-}
-
-int CodecManager::SetVAD(bool enable, ACMVADMode mode) {
- // Sanity check of the mode.
- RTC_DCHECK(mode == VADNormal || mode == VADLowBitrate || mode == VADAggr ||
- mode == VADVeryAggr);
-
- // Check that the send codec is mono. We don't support VAD/DTX for stereo
- // sending.
- if (enable && stereo_send_) {
- WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, 0,
- "VAD/DTX not supported for stereo sending");
- dtx_enabled_ = false;
- return -1;
- }
-
- // If a send codec is registered, set VAD/DTX for the codec.
- if (IsOpus(send_codec_inst_)) {
- // VAD/DTX not supported.
- dtx_enabled_ = false;
- return 0;
- }
-
- if (dtx_enabled_ != enable || vad_mode_ != mode) {
- dtx_enabled_ = enable;
- vad_mode_ = mode;
- if (codec_owner_.Encoder()) {
- int cng_pt = dtx_enabled_ ? CngPayloadType(send_codec_inst_.plfreq) : -1;
- int red_pt = red_enabled_ ? RedPayloadType(send_codec_inst_.plfreq) : -1;
- codec_owner_.ChangeCngAndRed(cng_pt, vad_mode_, red_pt);
- }
- }
- return 0;
-}
-
-void CodecManager::VAD(bool* dtx_enabled,
- bool* vad_enabled,
- ACMVADMode* mode) const {
- *dtx_enabled = dtx_enabled_;
- *vad_enabled = dtx_enabled_;
- *mode = vad_mode_;
-}
-
-int CodecManager::SetCodecFEC(bool enable_codec_fec) {
- if (enable_codec_fec == true && red_enabled_ == true) {
- WEBRTC_TRACE(webrtc::kTraceWarning, webrtc::kTraceAudioCoding, 0,
- "Codec internal FEC and RED cannot be co-enabled.");
- return -1;
- }
-
- RTC_CHECK(codec_owner_.Encoder());
- codec_fec_enabled_ =
- codec_owner_.Encoder()->SetFec(enable_codec_fec) && enable_codec_fec;
- return codec_fec_enabled_ == enable_codec_fec ? 0 : -1;
-}
-
-AudioDecoder* CodecManager::GetAudioDecoder(const CodecInst& codec) {
- return IsIsac(codec) ? codec_owner_.GetIsacDecoder() : nullptr;
-}
-
-int CodecManager::CngPayloadType(int sample_rate_hz) const {
- switch (sample_rate_hz) {
- case 8000:
- return cng_nb_pltype_;
- case 16000:
- return cng_wb_pltype_;
- case 32000:
- return cng_swb_pltype_;
- case 48000:
- return cng_fb_pltype_;
- default:
- FATAL() << sample_rate_hz << " Hz is not supported";
- return -1;
- }
-}
-
-int CodecManager::RedPayloadType(int sample_rate_hz) const {
- switch (sample_rate_hz) {
- case 8000:
- return red_nb_pltype_;
- case 16000:
- case 32000:
- case 48000:
- return -1;
- default:
- FATAL() << sample_rate_hz << " Hz is not supported";
- return -1;
- }
-}
-
-} // namespace acm2
-} // namespace webrtc
diff --git a/webrtc/modules/audio_coding/main/acm2/codec_manager.h b/webrtc/modules/audio_coding/main/acm2/codec_manager.h
deleted file mode 100644
index c6c262ea26..0000000000
--- a/webrtc/modules/audio_coding/main/acm2/codec_manager.h
+++ /dev/null
@@ -1,90 +0,0 @@
-/*
- * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
- *
- * Use of this source code is governed by a BSD-style license
- * that can be found in the LICENSE file in the root of the source
- * tree. An additional intellectual property rights grant can be found
- * in the file PATENTS. All contributing project authors may
- * be found in the AUTHORS file in the root of the source tree.
- */
-
-#ifndef WEBRTC_MODULES_AUDIO_CODING_MAIN_ACM2_CODEC_MANAGER_H_
-#define WEBRTC_MODULES_AUDIO_CODING_MAIN_ACM2_CODEC_MANAGER_H_
-
-#include "webrtc/base/constructormagic.h"
-#include "webrtc/base/scoped_ptr.h"
-#include "webrtc/base/thread_checker.h"
-#include "webrtc/modules/audio_coding/main/acm2/codec_owner.h"
-#include "webrtc/modules/audio_coding/main/include/audio_coding_module_typedefs.h"
-#include "webrtc/common_types.h"
-
-namespace webrtc {
-
-class AudioDecoder;
-class AudioEncoder;
-
-namespace acm2 {
-
-class CodecManager final {
- public:
- CodecManager();
- ~CodecManager();
-
- int RegisterEncoder(const CodecInst& send_codec);
-
- void RegisterEncoder(AudioEncoder* external_speech_encoder);
-
- int GetCodecInst(CodecInst* current_codec) const;
-
- bool SetCopyRed(bool enable);
-
- int SetVAD(bool enable, ACMVADMode mode);
-
- void VAD(bool* dtx_enabled, bool* vad_enabled, ACMVADMode* mode) const;
-
- int SetCodecFEC(bool enable_codec_fec);
-
- // Returns a pointer to AudioDecoder of the given codec. For iSAC, encoding
- // and decoding have to be performed on a shared codec instance. By calling
- // this method, we get the codec instance that ACM owns.
- // If |codec| does not share an instance between encoder and decoder, returns
- // null.
- AudioDecoder* GetAudioDecoder(const CodecInst& codec);
-
- bool stereo_send() const { return stereo_send_; }
-
- bool red_enabled() const { return red_enabled_; }
-
- bool codec_fec_enabled() const { return codec_fec_enabled_; }
-
- AudioEncoder* CurrentEncoder() { return codec_owner_.Encoder(); }
- const AudioEncoder* CurrentEncoder() const { return codec_owner_.Encoder(); }
-
- bool CurrentEncoderIsOpus() const { return encoder_is_opus_; }
-
- private:
- int CngPayloadType(int sample_rate_hz) const;
-
- int RedPayloadType(int sample_rate_hz) const;
-
- rtc::ThreadChecker thread_checker_;
- uint8_t cng_nb_pltype_;
- uint8_t cng_wb_pltype_;
- uint8_t cng_swb_pltype_;
- uint8_t cng_fb_pltype_;
- uint8_t red_nb_pltype_;
- bool stereo_send_;
- bool dtx_enabled_;
- ACMVADMode vad_mode_;
- CodecInst send_codec_inst_;
- bool red_enabled_;
- bool codec_fec_enabled_;
- CodecOwner codec_owner_;
- bool encoder_is_opus_;
-
- RTC_DISALLOW_COPY_AND_ASSIGN(CodecManager);
-};
-
-} // namespace acm2
-} // namespace webrtc
-#endif // WEBRTC_MODULES_AUDIO_CODING_MAIN_ACM2_CODEC_MANAGER_H_
diff --git a/webrtc/modules/audio_coding/main/acm2/codec_owner.cc b/webrtc/modules/audio_coding/main/acm2/codec_owner.cc
deleted file mode 100644
index df9a992dac..0000000000
--- a/webrtc/modules/audio_coding/main/acm2/codec_owner.cc
+++ /dev/null
@@ -1,213 +0,0 @@
-/*
- * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
- *
- * Use of this source code is governed by a BSD-style license
- * that can be found in the LICENSE file in the root of the source
- * tree. An additional intellectual property rights grant can be found
- * in the file PATENTS. All contributing project authors may
- * be found in the AUTHORS file in the root of the source tree.
- */
-
-#include "webrtc/modules/audio_coding/main/acm2/codec_owner.h"
-
-#include "webrtc/base/checks.h"
-#include "webrtc/base/logging.h"
-#include "webrtc/engine_configurations.h"
-#include "webrtc/modules/audio_coding/codecs/cng/include/audio_encoder_cng.h"
-#include "webrtc/modules/audio_coding/codecs/g711/include/audio_encoder_pcm.h"
-#ifdef WEBRTC_CODEC_G722
-#include "webrtc/modules/audio_coding/codecs/g722/include/audio_encoder_g722.h"
-#endif
-#ifdef WEBRTC_CODEC_ILBC
-#include "webrtc/modules/audio_coding/codecs/ilbc/include/audio_encoder_ilbc.h"
-#endif
-#ifdef WEBRTC_CODEC_ISACFX
-#include "webrtc/modules/audio_coding/codecs/isac/fix/include/audio_decoder_isacfix.h"
-#include "webrtc/modules/audio_coding/codecs/isac/fix/include/audio_encoder_isacfix.h"
-#endif
-#ifdef WEBRTC_CODEC_ISAC
-#include "webrtc/modules/audio_coding/codecs/isac/main/include/audio_decoder_isac.h"
-#include "webrtc/modules/audio_coding/codecs/isac/main/include/audio_encoder_isac.h"
-#endif
-#ifdef WEBRTC_CODEC_OPUS
-#include "webrtc/modules/audio_coding/codecs/opus/include/audio_encoder_opus.h"
-#endif
-#include "webrtc/modules/audio_coding/codecs/pcm16b/include/audio_encoder_pcm16b.h"
-#ifdef WEBRTC_CODEC_RED
-#include "webrtc/modules/audio_coding/codecs/red/audio_encoder_copy_red.h"
-#endif
-
-namespace webrtc {
-namespace acm2 {
-
-CodecOwner::CodecOwner() : external_speech_encoder_(nullptr) {
-}
-
-CodecOwner::~CodecOwner() = default;
-
-namespace {
-
-rtc::scoped_ptr<AudioDecoder> CreateIsacDecoder(
- LockedIsacBandwidthInfo* bwinfo) {
-#if defined(WEBRTC_CODEC_ISACFX)
- return rtc_make_scoped_ptr(new AudioDecoderIsacFix(bwinfo));
-#elif defined(WEBRTC_CODEC_ISAC)
- return rtc_make_scoped_ptr(new AudioDecoderIsac(bwinfo));
-#else
- FATAL() << "iSAC is not supported.";
- return rtc::scoped_ptr<AudioDecoder>();
-#endif
-}
-
-// Returns a new speech encoder, or null on error.
-// TODO(kwiberg): Don't handle errors here (bug 5033)
-rtc::scoped_ptr<AudioEncoder> CreateSpeechEncoder(
- const CodecInst& speech_inst,
- LockedIsacBandwidthInfo* bwinfo) {
-#if defined(WEBRTC_CODEC_ISACFX)
- if (STR_CASE_CMP(speech_inst.plname, "isac") == 0)
- return rtc_make_scoped_ptr(new AudioEncoderIsacFix(speech_inst, bwinfo));
-#endif
-#if defined(WEBRTC_CODEC_ISAC)
- if (STR_CASE_CMP(speech_inst.plname, "isac") == 0)
- return rtc_make_scoped_ptr(new AudioEncoderIsac(speech_inst, bwinfo));
-#endif
-#ifdef WEBRTC_CODEC_OPUS
- if (STR_CASE_CMP(speech_inst.plname, "opus") == 0)
- return rtc_make_scoped_ptr(new AudioEncoderOpus(speech_inst));
-#endif
- if (STR_CASE_CMP(speech_inst.plname, "pcmu") == 0)
- return rtc_make_scoped_ptr(new AudioEncoderPcmU(speech_inst));
- if (STR_CASE_CMP(speech_inst.plname, "pcma") == 0)
- return rtc_make_scoped_ptr(new AudioEncoderPcmA(speech_inst));
- if (STR_CASE_CMP(speech_inst.plname, "l16") == 0)
- return rtc_make_scoped_ptr(new AudioEncoderPcm16B(speech_inst));
-#ifdef WEBRTC_CODEC_ILBC
- if (STR_CASE_CMP(speech_inst.plname, "ilbc") == 0)
- return rtc_make_scoped_ptr(new AudioEncoderIlbc(speech_inst));
-#endif
-#ifdef WEBRTC_CODEC_G722
- if (STR_CASE_CMP(speech_inst.plname, "g722") == 0)
- return rtc_make_scoped_ptr(new AudioEncoderG722(speech_inst));
-#endif
- LOG_F(LS_ERROR) << "Could not create encoder of type " << speech_inst.plname;
- return rtc::scoped_ptr<AudioEncoder>();
-}
-
-AudioEncoder* CreateRedEncoder(int red_payload_type,
- AudioEncoder* encoder,
- rtc::scoped_ptr<AudioEncoder>* red_encoder) {
-#ifdef WEBRTC_CODEC_RED
- if (red_payload_type != -1) {
- AudioEncoderCopyRed::Config config;
- config.payload_type = red_payload_type;
- config.speech_encoder = encoder;
- red_encoder->reset(new AudioEncoderCopyRed(config));
- return red_encoder->get();
- }
-#endif
-
- red_encoder->reset();
- return encoder;
-}
-
-void CreateCngEncoder(int cng_payload_type,
- ACMVADMode vad_mode,
- AudioEncoder* encoder,
- rtc::scoped_ptr<AudioEncoder>* cng_encoder) {
- if (cng_payload_type == -1) {
- cng_encoder->reset();
- return;
- }
- AudioEncoderCng::Config config;
- config.num_channels = encoder->NumChannels();
- config.payload_type = cng_payload_type;
- config.speech_encoder = encoder;
- switch (vad_mode) {
- case VADNormal:
- config.vad_mode = Vad::kVadNormal;
- break;
- case VADLowBitrate:
- config.vad_mode = Vad::kVadLowBitrate;
- break;
- case VADAggr:
- config.vad_mode = Vad::kVadAggressive;
- break;
- case VADVeryAggr:
- config.vad_mode = Vad::kVadVeryAggressive;
- break;
- default:
- FATAL();
- }
- cng_encoder->reset(new AudioEncoderCng(config));
-}
-} // namespace
-
-bool CodecOwner::SetEncoders(const CodecInst& speech_inst,
- int cng_payload_type,
- ACMVADMode vad_mode,
- int red_payload_type) {
- speech_encoder_ = CreateSpeechEncoder(speech_inst, &isac_bandwidth_info_);
- if (!speech_encoder_)
- return false;
- external_speech_encoder_ = nullptr;
- ChangeCngAndRed(cng_payload_type, vad_mode, red_payload_type);
- return true;
-}
-
-void CodecOwner::SetEncoders(AudioEncoder* external_speech_encoder,
- int cng_payload_type,
- ACMVADMode vad_mode,
- int red_payload_type) {
- external_speech_encoder_ = external_speech_encoder;
- speech_encoder_.reset();
- ChangeCngAndRed(cng_payload_type, vad_mode, red_payload_type);
-}
-
-void CodecOwner::ChangeCngAndRed(int cng_payload_type,
- ACMVADMode vad_mode,
- int red_payload_type) {
- AudioEncoder* speech_encoder = SpeechEncoder();
- if (cng_payload_type != -1 || red_payload_type != -1) {
- // The RED and CNG encoders need to be in sync with the speech encoder, so
- // reset the latter to ensure its buffer is empty.
- speech_encoder->Reset();
- }
- AudioEncoder* encoder =
- CreateRedEncoder(red_payload_type, speech_encoder, &red_encoder_);
- CreateCngEncoder(cng_payload_type, vad_mode, encoder, &cng_encoder_);
- RTC_DCHECK_EQ(!!speech_encoder_ + !!external_speech_encoder_, 1);
-}
-
-AudioDecoder* CodecOwner::GetIsacDecoder() {
- if (!isac_decoder_)
- isac_decoder_ = CreateIsacDecoder(&isac_bandwidth_info_);
- return isac_decoder_.get();
-}
-
-AudioEncoder* CodecOwner::Encoder() {
- const auto& const_this = *this;
- return const_cast<AudioEncoder*>(const_this.Encoder());
-}
-
-const AudioEncoder* CodecOwner::Encoder() const {
- if (cng_encoder_)
- return cng_encoder_.get();
- if (red_encoder_)
- return red_encoder_.get();
- return SpeechEncoder();
-}
-
-AudioEncoder* CodecOwner::SpeechEncoder() {
- const auto* const_this = this;
- return const_cast<AudioEncoder*>(const_this->SpeechEncoder());
-}
-
-const AudioEncoder* CodecOwner::SpeechEncoder() const {
- RTC_DCHECK(!speech_encoder_ || !external_speech_encoder_);
- return external_speech_encoder_ ? external_speech_encoder_
- : speech_encoder_.get();
-}
-
-} // namespace acm2
-} // namespace webrtc
diff --git a/webrtc/modules/audio_coding/main/acm2/codec_owner.h b/webrtc/modules/audio_coding/main/acm2/codec_owner.h
deleted file mode 100644
index d0fb4f760e..0000000000
--- a/webrtc/modules/audio_coding/main/acm2/codec_owner.h
+++ /dev/null
@@ -1,86 +0,0 @@
-/*
- * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
- *
- * Use of this source code is governed by a BSD-style license
- * that can be found in the LICENSE file in the root of the source
- * tree. An additional intellectual property rights grant can be found
- * in the file PATENTS. All contributing project authors may
- * be found in the AUTHORS file in the root of the source tree.
- */
-
-#ifndef WEBRTC_MODULES_AUDIO_CODING_MAIN_ACM2_CODEC_OWNER_H_
-#define WEBRTC_MODULES_AUDIO_CODING_MAIN_ACM2_CODEC_OWNER_H_
-
-#include "webrtc/base/constructormagic.h"
-#include "webrtc/base/scoped_ptr.h"
-#include "webrtc/common_types.h"
-#include "webrtc/modules/audio_coding/codecs/audio_encoder.h"
-#include "webrtc/modules/audio_coding/codecs/audio_decoder.h"
-#include "webrtc/modules/audio_coding/main/include/audio_coding_module_typedefs.h"
-
-#if defined(WEBRTC_CODEC_ISAC) || defined(WEBRTC_CODEC_ISACFX)
-#include "webrtc/modules/audio_coding/codecs/isac/locked_bandwidth_info.h"
-#else
-// Dummy implementation, for when we don't have iSAC.
-namespace webrtc {
-class LockedIsacBandwidthInfo {};
-}
-#endif
-
-namespace webrtc {
-namespace acm2 {
-
-class CodecOwner {
- public:
- CodecOwner();
- ~CodecOwner();
-
- // Start using the specified encoder. Returns false on error.
- // TODO(kwiberg): Don't handle errors here (bug 5033)
- bool SetEncoders(const CodecInst& speech_inst,
- int cng_payload_type,
- ACMVADMode vad_mode,
- int red_payload_type) WARN_UNUSED_RESULT;
-
- void SetEncoders(AudioEncoder* external_speech_encoder,
- int cng_payload_type,
- ACMVADMode vad_mode,
- int red_payload_type);
-
- void ChangeCngAndRed(int cng_payload_type,
- ACMVADMode vad_mode,
- int red_payload_type);
-
- // Returns a pointer to an iSAC decoder owned by the CodecOwner. The decoder
- // will live as long as the CodecOwner exists.
- AudioDecoder* GetIsacDecoder();
-
- AudioEncoder* Encoder();
- const AudioEncoder* Encoder() const;
-
- private:
- AudioEncoder* SpeechEncoder();
- const AudioEncoder* SpeechEncoder() const;
-
- // At most one of these is non-null:
- rtc::scoped_ptr<AudioEncoder> speech_encoder_;
- AudioEncoder* external_speech_encoder_;
-
- // If we've created an iSAC decoder because someone called GetIsacDecoder,
- // store it here.
- rtc::scoped_ptr<AudioDecoder> isac_decoder_;
-
- // iSAC bandwidth estimation info, for use with iSAC encoders and decoders.
- LockedIsacBandwidthInfo isac_bandwidth_info_;
-
- // |cng_encoder_| and |red_encoder_| are valid iff CNG or RED, respectively,
- // are active.
- rtc::scoped_ptr<AudioEncoder> cng_encoder_;
- rtc::scoped_ptr<AudioEncoder> red_encoder_;
-
- RTC_DISALLOW_COPY_AND_ASSIGN(CodecOwner);
-};
-
-} // namespace acm2
-} // namespace webrtc
-#endif // WEBRTC_MODULES_AUDIO_CODING_MAIN_ACM2_CODEC_OWNER_H_
diff --git a/webrtc/modules/audio_coding/main/acm2/rent_a_codec.cc b/webrtc/modules/audio_coding/main/acm2/rent_a_codec.cc
deleted file mode 100644
index 42f0a4c7db..0000000000
--- a/webrtc/modules/audio_coding/main/acm2/rent_a_codec.cc
+++ /dev/null
@@ -1,70 +0,0 @@
-/*
- * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
- *
- * Use of this source code is governed by a BSD-style license
- * that can be found in the LICENSE file in the root of the source
- * tree. An additional intellectual property rights grant can be found
- * in the file PATENTS. All contributing project authors may
- * be found in the AUTHORS file in the root of the source tree.
- */
-
-#include "webrtc/modules/audio_coding/main/acm2/rent_a_codec.h"
-
-#include "webrtc/modules/audio_coding/main/acm2/acm_codec_database.h"
-#include "webrtc/modules/audio_coding/main/acm2/acm_common_defs.h"
-
-namespace webrtc {
-namespace acm2 {
-
-rtc::Maybe<RentACodec::CodecId> RentACodec::CodecIdByParams(
- const char* payload_name,
- int sampling_freq_hz,
- int channels) {
- return CodecIdFromIndex(
- ACMCodecDB::CodecId(payload_name, sampling_freq_hz, channels));
-}
-
-rtc::Maybe<CodecInst> RentACodec::CodecInstById(CodecId codec_id) {
- rtc::Maybe<int> mi = CodecIndexFromId(codec_id);
- return mi ? rtc::Maybe<CodecInst>(Database()[*mi]) : rtc::Maybe<CodecInst>();
-}
-
-rtc::Maybe<CodecInst> RentACodec::CodecInstByParams(const char* payload_name,
- int sampling_freq_hz,
- int channels) {
- rtc::Maybe<CodecId> codec_id =
- CodecIdByParams(payload_name, sampling_freq_hz, channels);
- if (!codec_id)
- return rtc::Maybe<CodecInst>();
- rtc::Maybe<CodecInst> ci = CodecInstById(*codec_id);
- RTC_DCHECK(ci);
-
- // Keep the number of channels from the function call. For most codecs it
- // will be the same value as in default codec settings, but not for all.
- ci->channels = channels;
-
- return ci;
-}
-
-bool RentACodec::IsCodecValid(const CodecInst& codec_inst) {
- return ACMCodecDB::CodecNumber(codec_inst) >= 0;
-}
-
-rtc::ArrayView<const CodecInst> RentACodec::Database() {
- return rtc::ArrayView<const CodecInst>(ACMCodecDB::database_,
- NumberOfCodecs());
-}
-
-rtc::Maybe<NetEqDecoder> RentACodec::NetEqDecoderFromCodecId(CodecId codec_id,
- int num_channels) {
- rtc::Maybe<int> i = CodecIndexFromId(codec_id);
- if (!i)
- return rtc::Maybe<NetEqDecoder>();
- const NetEqDecoder ned = ACMCodecDB::neteq_decoders_[*i];
- return (ned == NetEqDecoder::kDecoderOpus && num_channels == 2)
- ? NetEqDecoder::kDecoderOpus_2ch
- : ned;
-}
-
-} // namespace acm2
-} // namespace webrtc
diff --git a/webrtc/modules/audio_coding/main/acm2/rent_a_codec.h b/webrtc/modules/audio_coding/main/acm2/rent_a_codec.h
deleted file mode 100644
index 55a5d0361a..0000000000
--- a/webrtc/modules/audio_coding/main/acm2/rent_a_codec.h
+++ /dev/null
@@ -1,162 +0,0 @@
-/*
- * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
- *
- * Use of this source code is governed by a BSD-style license
- * that can be found in the LICENSE file in the root of the source
- * tree. An additional intellectual property rights grant can be found
- * in the file PATENTS. All contributing project authors may
- * be found in the AUTHORS file in the root of the source tree.
- */
-
-#ifndef WEBRTC_MODULES_AUDIO_CODING_MAIN_ACM2_RENT_A_CODEC_H_
-#define WEBRTC_MODULES_AUDIO_CODING_MAIN_ACM2_RENT_A_CODEC_H_
-
-#include <stddef.h>
-
-#include "webrtc/base/array_view.h"
-#include "webrtc/base/maybe.h"
-#include "webrtc/typedefs.h"
-
-namespace webrtc {
-
-struct CodecInst;
-
-namespace acm2 {
-
-class RentACodec {
- public:
- enum class CodecId {
-#if defined(WEBRTC_CODEC_ISAC) || defined(WEBRTC_CODEC_ISACFX)
- kISAC,
-#endif
-#ifdef WEBRTC_CODEC_ISAC
- kISACSWB,
-#endif
- // Mono
- kPCM16B,
- kPCM16Bwb,
- kPCM16Bswb32kHz,
- // Stereo
- kPCM16B_2ch,
- kPCM16Bwb_2ch,
- kPCM16Bswb32kHz_2ch,
- // Mono
- kPCMU,
- kPCMA,
- // Stereo
- kPCMU_2ch,
- kPCMA_2ch,
-#ifdef WEBRTC_CODEC_ILBC
- kILBC,
-#endif
-#ifdef WEBRTC_CODEC_G722
- kG722, // Mono
- kG722_2ch, // Stereo
-#endif
-#ifdef WEBRTC_CODEC_OPUS
- kOpus, // Mono and stereo
-#endif
- kCNNB,
- kCNWB,
- kCNSWB,
-#ifdef ENABLE_48000_HZ
- kCNFB,
-#endif
- kAVT,
-#ifdef WEBRTC_CODEC_RED
- kRED,
-#endif
- kNumCodecs, // Implementation detail. Don't use.
-
-// Set unsupported codecs to -1.
-#if !defined(WEBRTC_CODEC_ISAC) && !defined(WEBRTC_CODEC_ISACFX)
- kISAC = -1,
-#endif
-#ifndef WEBRTC_CODEC_ISAC
- kISACSWB = -1,
-#endif
- // 48 kHz not supported, always set to -1.
- kPCM16Bswb48kHz = -1,
-#ifndef WEBRTC_CODEC_ILBC
- kILBC = -1,
-#endif
-#ifndef WEBRTC_CODEC_G722
- kG722 = -1, // Mono
- kG722_2ch = -1, // Stereo
-#endif
-#ifndef WEBRTC_CODEC_OPUS
- kOpus = -1, // Mono and stereo
-#endif
-#ifndef WEBRTC_CODEC_RED
- kRED = -1,
-#endif
-#ifndef ENABLE_48000_HZ
- kCNFB = -1,
-#endif
-
- kNone = -1
- };
-
- enum class NetEqDecoder {
- kDecoderPCMu,
- kDecoderPCMa,
- kDecoderPCMu_2ch,
- kDecoderPCMa_2ch,
- kDecoderILBC,
- kDecoderISAC,
- kDecoderISACswb,
- kDecoderPCM16B,
- kDecoderPCM16Bwb,
- kDecoderPCM16Bswb32kHz,
- kDecoderPCM16Bswb48kHz,
- kDecoderPCM16B_2ch,
- kDecoderPCM16Bwb_2ch,
- kDecoderPCM16Bswb32kHz_2ch,
- kDecoderPCM16Bswb48kHz_2ch,
- kDecoderPCM16B_5ch,
- kDecoderG722,
- kDecoderG722_2ch,
- kDecoderRED,
- kDecoderAVT,
- kDecoderCNGnb,
- kDecoderCNGwb,
- kDecoderCNGswb32kHz,
- kDecoderCNGswb48kHz,
- kDecoderArbitrary,
- kDecoderOpus,
- kDecoderOpus_2ch,
- };
-
- static inline size_t NumberOfCodecs() {
- return static_cast<size_t>(CodecId::kNumCodecs);
- }
-
- static inline rtc::Maybe<int> CodecIndexFromId(CodecId codec_id) {
- const int i = static_cast<int>(codec_id);
- return i < static_cast<int>(NumberOfCodecs()) ? i : rtc::Maybe<int>();
- }
-
- static inline rtc::Maybe<CodecId> CodecIdFromIndex(int codec_index) {
- return static_cast<size_t>(codec_index) < NumberOfCodecs()
- ? static_cast<RentACodec::CodecId>(codec_index)
- : rtc::Maybe<RentACodec::CodecId>();
- }
-
- static rtc::Maybe<CodecId> CodecIdByParams(const char* payload_name,
- int sampling_freq_hz,
- int channels);
- static rtc::Maybe<CodecInst> CodecInstById(CodecId codec_id);
- static rtc::Maybe<CodecInst> CodecInstByParams(const char* payload_name,
- int sampling_freq_hz,
- int channels);
- static bool IsCodecValid(const CodecInst& codec_inst);
- static rtc::ArrayView<const CodecInst> Database();
-
- static rtc::Maybe<NetEqDecoder> NetEqDecoderFromCodecId(CodecId codec_id,
- int num_channels);
-};
-
-} // namespace acm2
-} // namespace webrtc
-
-#endif // WEBRTC_MODULES_AUDIO_CODING_MAIN_ACM2_RENT_A_CODEC_H_
diff --git a/webrtc/modules/audio_coding/main/audio_coding_module.gypi b/webrtc/modules/audio_coding/main/audio_coding_module.gypi
deleted file mode 100644
index 6fb37d25fa..0000000000
--- a/webrtc/modules/audio_coding/main/audio_coding_module.gypi
+++ /dev/null
@@ -1,193 +0,0 @@
-# Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
-#
-# Use of this source code is governed by a BSD-style license
-# that can be found in the LICENSE file in the root of the source
-# tree. An additional intellectual property rights grant can be found
-# in the file PATENTS. All contributing project authors may
-# be found in the AUTHORS file in the root of the source tree.
-
-{
- 'variables': {
- 'audio_coding_dependencies': [
- 'cng',
- 'g711',
- 'pcm16b',
- '<(webrtc_root)/common.gyp:webrtc_common',
- '<(webrtc_root)/common_audio/common_audio.gyp:common_audio',
- '<(webrtc_root)/system_wrappers/system_wrappers.gyp:system_wrappers',
- ],
- 'audio_coding_defines': [],
- 'conditions': [
- ['include_opus==1', {
- 'audio_coding_dependencies': ['webrtc_opus',],
- 'audio_coding_defines': ['WEBRTC_CODEC_OPUS',],
- }],
- ['build_with_mozilla==0', {
- 'conditions': [
- ['target_arch=="arm"', {
- 'audio_coding_dependencies': ['isac_fix',],
- 'audio_coding_defines': ['WEBRTC_CODEC_ISACFX',],
- }, {
- 'audio_coding_dependencies': ['isac',],
- 'audio_coding_defines': ['WEBRTC_CODEC_ISAC',],
- }],
- ],
- 'audio_coding_dependencies': ['g722',],
- 'audio_coding_defines': ['WEBRTC_CODEC_G722',],
- }],
- ['build_with_mozilla==0 and build_with_chromium==0', {
- 'audio_coding_dependencies': ['ilbc', 'red',],
- 'audio_coding_defines': ['WEBRTC_CODEC_ILBC', 'WEBRTC_CODEC_RED',],
- }],
- ],
- },
- 'targets': [
- {
- 'target_name': 'rent_a_codec',
- 'type': 'static_library',
- 'defines': [
- '<@(audio_coding_defines)',
- ],
- 'dependencies': [
- '<(webrtc_root)/common.gyp:webrtc_common',
- ],
- 'include_dirs': [
- '<(webrtc_root)',
- ],
- 'direct_dependent_settings': {
- 'include_dirs': [
- '<(webrtc_root)',
- ],
- },
- 'sources': [
- 'acm2/acm_codec_database.cc',
- 'acm2/acm_codec_database.h',
- 'acm2/rent_a_codec.cc',
- 'acm2/rent_a_codec.h',
- ],
- },
- {
- 'target_name': 'audio_coding_module',
- 'type': 'static_library',
- 'defines': [
- '<@(audio_coding_defines)',
- ],
- 'dependencies': [
- '<@(audio_coding_dependencies)',
- '<(webrtc_root)/common.gyp:webrtc_common',
- '<(webrtc_root)/webrtc.gyp:rtc_event_log',
- 'neteq',
- 'rent_a_codec',
- ],
- 'include_dirs': [
- 'include',
- '../../interface',
- '<(webrtc_root)',
- ],
- 'direct_dependent_settings': {
- 'include_dirs': [
- 'include',
- '../../interface',
- '<(webrtc_root)',
- ],
- },
- 'sources': [
- 'acm2/acm_common_defs.h',
- 'acm2/acm_receiver.cc',
- 'acm2/acm_receiver.h',
- 'acm2/acm_resampler.cc',
- 'acm2/acm_resampler.h',
- 'acm2/audio_coding_module.cc',
- 'acm2/audio_coding_module_impl.cc',
- 'acm2/audio_coding_module_impl.h',
- 'acm2/call_statistics.cc',
- 'acm2/call_statistics.h',
- 'acm2/codec_manager.cc',
- 'acm2/codec_manager.h',
- 'acm2/codec_owner.cc',
- 'acm2/codec_owner.h',
- 'acm2/initial_delay_manager.cc',
- 'acm2/initial_delay_manager.h',
- 'include/audio_coding_module.h',
- 'include/audio_coding_module_typedefs.h',
- ],
- },
- ],
- 'conditions': [
- ['include_tests==1', {
- 'targets': [
- {
- 'target_name': 'acm_receive_test',
- 'type': 'static_library',
- 'defines': [
- '<@(audio_coding_defines)',
- ],
- 'dependencies': [
- '<@(audio_coding_dependencies)',
- 'audio_coding_module',
- 'neteq_unittest_tools',
- '<(DEPTH)/testing/gtest.gyp:gtest',
- ],
- 'sources': [
- 'acm2/acm_receive_test_oldapi.cc',
- 'acm2/acm_receive_test_oldapi.h',
- ],
- }, # acm_receive_test
- {
- 'target_name': 'acm_send_test',
- 'type': 'static_library',
- 'defines': [
- '<@(audio_coding_defines)',
- ],
- 'dependencies': [
- '<@(audio_coding_dependencies)',
- 'audio_coding_module',
- 'neteq_unittest_tools',
- '<(DEPTH)/testing/gtest.gyp:gtest',
- ],
- 'sources': [
- 'acm2/acm_send_test_oldapi.cc',
- 'acm2/acm_send_test_oldapi.h',
- ],
- }, # acm_send_test
- {
- 'target_name': 'delay_test',
- 'type': 'executable',
- 'dependencies': [
- 'audio_coding_module',
- '<(DEPTH)/testing/gtest.gyp:gtest',
- '<(webrtc_root)/common.gyp:webrtc_common',
- '<(webrtc_root)/test/test.gyp:test_support',
- '<(webrtc_root)/system_wrappers/system_wrappers.gyp:system_wrappers',
- '<(webrtc_root)/system_wrappers/system_wrappers.gyp:system_wrappers_default',
- '<(DEPTH)/third_party/gflags/gflags.gyp:gflags',
- ],
- 'sources': [
- 'test/delay_test.cc',
- 'test/Channel.cc',
- 'test/PCMFile.cc',
- 'test/utility.cc',
- ],
- }, # delay_test
- {
- 'target_name': 'insert_packet_with_timing',
- 'type': 'executable',
- 'dependencies': [
- 'audio_coding_module',
- '<(DEPTH)/testing/gtest.gyp:gtest',
- '<(webrtc_root)/common.gyp:webrtc_common',
- '<(webrtc_root)/test/test.gyp:test_support',
- '<(webrtc_root)/system_wrappers/system_wrappers.gyp:system_wrappers',
- '<(webrtc_root)/system_wrappers/system_wrappers.gyp:system_wrappers_default',
- '<(DEPTH)/third_party/gflags/gflags.gyp:gflags',
- ],
- 'sources': [
- 'test/insert_packet_with_timing.cc',
- 'test/Channel.cc',
- 'test/PCMFile.cc',
- ],
- }, # delay_test
- ],
- }],
- ],
-}
diff --git a/webrtc/modules/audio_coding/main/test/initial_delay_unittest.cc b/webrtc/modules/audio_coding/main/test/initial_delay_unittest.cc
deleted file mode 100644
index 8495e0e596..0000000000
--- a/webrtc/modules/audio_coding/main/test/initial_delay_unittest.cc
+++ /dev/null
@@ -1,175 +0,0 @@
-/*
- * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
- *
- * Use of this source code is governed by a BSD-style license
- * that can be found in the LICENSE file in the root of the source
- * tree. An additional intellectual property rights grant can be found
- * in the file PATENTS. All contributing project authors may
- * be found in the AUTHORS file in the root of the source tree.
- */
-
-#include "webrtc/modules/audio_coding/main/include/audio_coding_module.h"
-
-#include <assert.h>
-#include <math.h>
-
-#include <iostream>
-
-#include "testing/gtest/include/gtest/gtest.h"
-#include "webrtc/base/scoped_ptr.h"
-#include "webrtc/common_types.h"
-#include "webrtc/engine_configurations.h"
-#include "webrtc/modules/audio_coding/main/include/audio_coding_module_typedefs.h"
-#include "webrtc/modules/audio_coding/main/test/Channel.h"
-#include "webrtc/modules/audio_coding/main/test/PCMFile.h"
-#include "webrtc/modules/audio_coding/main/test/utility.h"
-#include "webrtc/system_wrappers/include/event_wrapper.h"
-#include "webrtc/test/testsupport/fileutils.h"
-#include "webrtc/test/testsupport/gtest_disable.h"
-
-namespace webrtc {
-
-namespace {
-
-double FrameRms(AudioFrame& frame) {
- size_t samples = frame.num_channels_ * frame.samples_per_channel_;
- double rms = 0;
- for (size_t n = 0; n < samples; ++n)
- rms += frame.data_[n] * frame.data_[n];
- rms /= samples;
- rms = sqrt(rms);
- return rms;
-}
-
-}
-
-class InitialPlayoutDelayTest : public ::testing::Test {
- protected:
- InitialPlayoutDelayTest()
- : acm_a_(AudioCodingModule::Create(0)),
- acm_b_(AudioCodingModule::Create(1)),
- channel_a2b_(NULL) {}
-
- ~InitialPlayoutDelayTest() {
- if (channel_a2b_ != NULL) {
- delete channel_a2b_;
- channel_a2b_ = NULL;
- }
- }
-
- void SetUp() {
- ASSERT_TRUE(acm_a_.get() != NULL);
- ASSERT_TRUE(acm_b_.get() != NULL);
-
- EXPECT_EQ(0, acm_b_->InitializeReceiver());
- EXPECT_EQ(0, acm_a_->InitializeReceiver());
-
- // Register all L16 codecs in receiver.
- CodecInst codec;
- const int kFsHz[3] = { 8000, 16000, 32000 };
- const int kChannels[2] = { 1, 2 };
- for (int n = 0; n < 3; ++n) {
- for (int k = 0; k < 2; ++k) {
- AudioCodingModule::Codec("L16", &codec, kFsHz[n], kChannels[k]);
- acm_b_->RegisterReceiveCodec(codec);
- }
- }
-
- // Create and connect the channel
- channel_a2b_ = new Channel;
- acm_a_->RegisterTransportCallback(channel_a2b_);
- channel_a2b_->RegisterReceiverACM(acm_b_.get());
- }
-
- void NbMono() {
- CodecInst codec;
- AudioCodingModule::Codec("L16", &codec, 8000, 1);
- codec.pacsize = codec.plfreq * 30 / 1000; // 30 ms packets.
- Run(codec, 1000);
- }
-
- void WbMono() {
- CodecInst codec;
- AudioCodingModule::Codec("L16", &codec, 16000, 1);
- codec.pacsize = codec.plfreq * 30 / 1000; // 30 ms packets.
- Run(codec, 1000);
- }
-
- void SwbMono() {
- CodecInst codec;
- AudioCodingModule::Codec("L16", &codec, 32000, 1);
- codec.pacsize = codec.plfreq * 10 / 1000; // 10 ms packets.
- Run(codec, 400); // Memory constraints limit the buffer at <500 ms.
- }
-
- void NbStereo() {
- CodecInst codec;
- AudioCodingModule::Codec("L16", &codec, 8000, 2);
- codec.pacsize = codec.plfreq * 30 / 1000; // 30 ms packets.
- Run(codec, 1000);
- }
-
- void WbStereo() {
- CodecInst codec;
- AudioCodingModule::Codec("L16", &codec, 16000, 2);
- codec.pacsize = codec.plfreq * 30 / 1000; // 30 ms packets.
- Run(codec, 1000);
- }
-
- void SwbStereo() {
- CodecInst codec;
- AudioCodingModule::Codec("L16", &codec, 32000, 2);
- codec.pacsize = codec.plfreq * 10 / 1000; // 10 ms packets.
- Run(codec, 400); // Memory constraints limit the buffer at <500 ms.
- }
-
- private:
- void Run(CodecInst codec, int initial_delay_ms) {
- AudioFrame in_audio_frame;
- AudioFrame out_audio_frame;
- int num_frames = 0;
- const int kAmp = 10000;
- in_audio_frame.sample_rate_hz_ = codec.plfreq;
- in_audio_frame.num_channels_ = codec.channels;
- in_audio_frame.samples_per_channel_ = codec.plfreq / 100; // 10 ms.
- size_t samples = in_audio_frame.num_channels_ *
- in_audio_frame.samples_per_channel_;
- for (size_t n = 0; n < samples; ++n) {
- in_audio_frame.data_[n] = kAmp;
- }
-
- uint32_t timestamp = 0;
- double rms = 0;
- ASSERT_EQ(0, acm_a_->RegisterSendCodec(codec));
- acm_b_->SetInitialPlayoutDelay(initial_delay_ms);
- while (rms < kAmp / 2) {
- in_audio_frame.timestamp_ = timestamp;
- timestamp += static_cast<uint32_t>(in_audio_frame.samples_per_channel_);
- ASSERT_GE(acm_a_->Add10MsData(in_audio_frame), 0);
- ASSERT_EQ(0, acm_b_->PlayoutData10Ms(codec.plfreq, &out_audio_frame));
- rms = FrameRms(out_audio_frame);
- ++num_frames;
- }
-
- ASSERT_GE(num_frames * 10, initial_delay_ms);
- ASSERT_LE(num_frames * 10, initial_delay_ms + 100);
- }
-
- rtc::scoped_ptr<AudioCodingModule> acm_a_;
- rtc::scoped_ptr<AudioCodingModule> acm_b_;
- Channel* channel_a2b_;
-};
-
-TEST_F(InitialPlayoutDelayTest, NbMono) { NbMono(); }
-
-TEST_F(InitialPlayoutDelayTest, WbMono) { WbMono(); }
-
-TEST_F(InitialPlayoutDelayTest, SwbMono) { SwbMono(); }
-
-TEST_F(InitialPlayoutDelayTest, NbStereo) { NbStereo(); }
-
-TEST_F(InitialPlayoutDelayTest, WbStereo) { WbStereo(); }
-
-TEST_F(InitialPlayoutDelayTest, SwbStereo) { SwbStereo(); }
-
-} // namespace webrtc
diff --git a/webrtc/modules/audio_coding/neteq/audio_decoder_impl.cc b/webrtc/modules/audio_coding/neteq/audio_decoder_impl.cc
index a9ea44d6a6..d800cc7dbe 100644
--- a/webrtc/modules/audio_coding/neteq/audio_decoder_impl.cc
+++ b/webrtc/modules/audio_coding/neteq/audio_decoder_impl.cc
@@ -13,13 +13,13 @@
#include <assert.h>
#include "webrtc/base/checks.h"
-#include "webrtc/modules/audio_coding/codecs/cng/include/webrtc_cng.h"
-#include "webrtc/modules/audio_coding/codecs/g711/include/audio_decoder_pcm.h"
+#include "webrtc/modules/audio_coding/codecs/cng/webrtc_cng.h"
+#include "webrtc/modules/audio_coding/codecs/g711/audio_decoder_pcm.h"
#ifdef WEBRTC_CODEC_G722
-#include "webrtc/modules/audio_coding/codecs/g722/include/audio_decoder_g722.h"
+#include "webrtc/modules/audio_coding/codecs/g722/audio_decoder_g722.h"
#endif
#ifdef WEBRTC_CODEC_ILBC
-#include "webrtc/modules/audio_coding/codecs/ilbc/include/audio_decoder_ilbc.h"
+#include "webrtc/modules/audio_coding/codecs/ilbc/audio_decoder_ilbc.h"
#endif
#ifdef WEBRTC_CODEC_ISACFX
#include "webrtc/modules/audio_coding/codecs/isac/fix/include/audio_decoder_isacfix.h"
@@ -30,9 +30,9 @@
#include "webrtc/modules/audio_coding/codecs/isac/main/include/audio_encoder_isac.h"
#endif
#ifdef WEBRTC_CODEC_OPUS
-#include "webrtc/modules/audio_coding/codecs/opus/include/audio_decoder_opus.h"
+#include "webrtc/modules/audio_coding/codecs/opus/audio_decoder_opus.h"
#endif
-#include "webrtc/modules/audio_coding/codecs/pcm16b/include/audio_decoder_pcm16b.h"
+#include "webrtc/modules/audio_coding/codecs/pcm16b/audio_decoder_pcm16b.h"
namespace webrtc {
diff --git a/webrtc/modules/audio_coding/neteq/audio_decoder_impl.h b/webrtc/modules/audio_coding/neteq/audio_decoder_impl.h
index 3229033d92..bc8bdd9626 100644
--- a/webrtc/modules/audio_coding/neteq/audio_decoder_impl.h
+++ b/webrtc/modules/audio_coding/neteq/audio_decoder_impl.h
@@ -16,11 +16,11 @@
#include "webrtc/engine_configurations.h"
#include "webrtc/base/constructormagic.h"
#include "webrtc/modules/audio_coding/codecs/audio_decoder.h"
-#include "webrtc/modules/audio_coding/codecs/cng/include/webrtc_cng.h"
+#include "webrtc/modules/audio_coding/codecs/cng/webrtc_cng.h"
#ifdef WEBRTC_CODEC_G722
-#include "webrtc/modules/audio_coding/codecs/g722/include/g722_interface.h"
+#include "webrtc/modules/audio_coding/codecs/g722/g722_interface.h"
#endif
-#include "webrtc/modules/audio_coding/main/acm2/rent_a_codec.h"
+#include "webrtc/modules/audio_coding/acm2/rent_a_codec.h"
#include "webrtc/typedefs.h"
namespace webrtc {
diff --git a/webrtc/modules/audio_coding/neteq/audio_decoder_unittest.cc b/webrtc/modules/audio_coding/neteq/audio_decoder_unittest.cc
index 8f82fb11a4..599929e78d 100644
--- a/webrtc/modules/audio_coding/neteq/audio_decoder_unittest.cc
+++ b/webrtc/modules/audio_coding/neteq/audio_decoder_unittest.cc
@@ -18,20 +18,20 @@
#include "testing/gtest/include/gtest/gtest.h"
#include "webrtc/base/scoped_ptr.h"
-#include "webrtc/modules/audio_coding/codecs/g711/include/audio_decoder_pcm.h"
-#include "webrtc/modules/audio_coding/codecs/g711/include/audio_encoder_pcm.h"
-#include "webrtc/modules/audio_coding/codecs/g722/include/audio_decoder_g722.h"
-#include "webrtc/modules/audio_coding/codecs/g722/include/audio_encoder_g722.h"
-#include "webrtc/modules/audio_coding/codecs/ilbc/include/audio_decoder_ilbc.h"
-#include "webrtc/modules/audio_coding/codecs/ilbc/include/audio_encoder_ilbc.h"
+#include "webrtc/modules/audio_coding/codecs/g711/audio_decoder_pcm.h"
+#include "webrtc/modules/audio_coding/codecs/g711/audio_encoder_pcm.h"
+#include "webrtc/modules/audio_coding/codecs/g722/audio_decoder_g722.h"
+#include "webrtc/modules/audio_coding/codecs/g722/audio_encoder_g722.h"
+#include "webrtc/modules/audio_coding/codecs/ilbc/audio_decoder_ilbc.h"
+#include "webrtc/modules/audio_coding/codecs/ilbc/audio_encoder_ilbc.h"
#include "webrtc/modules/audio_coding/codecs/isac/fix/include/audio_decoder_isacfix.h"
#include "webrtc/modules/audio_coding/codecs/isac/fix/include/audio_encoder_isacfix.h"
#include "webrtc/modules/audio_coding/codecs/isac/main/include/audio_decoder_isac.h"
#include "webrtc/modules/audio_coding/codecs/isac/main/include/audio_encoder_isac.h"
-#include "webrtc/modules/audio_coding/codecs/opus/include/audio_decoder_opus.h"
-#include "webrtc/modules/audio_coding/codecs/opus/include/audio_encoder_opus.h"
-#include "webrtc/modules/audio_coding/codecs/pcm16b/include/audio_decoder_pcm16b.h"
-#include "webrtc/modules/audio_coding/codecs/pcm16b/include/audio_encoder_pcm16b.h"
+#include "webrtc/modules/audio_coding/codecs/opus/audio_decoder_opus.h"
+#include "webrtc/modules/audio_coding/codecs/opus/audio_encoder_opus.h"
+#include "webrtc/modules/audio_coding/codecs/pcm16b/audio_decoder_pcm16b.h"
+#include "webrtc/modules/audio_coding/codecs/pcm16b/audio_encoder_pcm16b.h"
#include "webrtc/modules/audio_coding/neteq/tools/resample_input_audio_file.h"
#include "webrtc/system_wrappers/include/data_log.h"
#include "webrtc/test/testsupport/fileutils.h"
@@ -158,7 +158,10 @@ class AudioDecoderTest : public ::testing::Test {
interleaved_input.get());
encoded_info_ = audio_encoder_->Encode(
- 0, interleaved_input.get(), audio_encoder_->SampleRateHz() / 100,
+ 0, rtc::ArrayView<const int16_t>(interleaved_input.get(),
+ audio_encoder_->NumChannels() *
+ audio_encoder_->SampleRateHz() /
+ 100),
data_length_ * 2, output);
}
EXPECT_EQ(payload_type_, encoded_info_.payload_type);
@@ -563,18 +566,14 @@ TEST_F(AudioDecoderIsacSwbTest, SetTargetBitrate) {
TestSetAndGetTargetBitratesWithFixedCodec(audio_encoder_.get(), 32000);
}
-// Fails Android ARM64. https://code.google.com/p/webrtc/issues/detail?id=4198
-#if defined(WEBRTC_ANDROID) && defined(WEBRTC_ARCH_ARM64)
-#define MAYBE_EncodeDecode DISABLED_EncodeDecode
-#else
-#define MAYBE_EncodeDecode EncodeDecode
-#endif
-TEST_F(AudioDecoderIsacFixTest, MAYBE_EncodeDecode) {
+TEST_F(AudioDecoderIsacFixTest, EncodeDecode) {
int tolerance = 11034;
double mse = 3.46e6;
int delay = 54; // Delay from input to output.
-#ifdef WEBRTC_ANDROID
+#if defined(WEBRTC_ANDROID) && defined(WEBRTC_ARCH_ARM)
static const int kEncodedBytes = 685;
+#elif defined(WEBRTC_ANDROID) && defined(WEBRTC_ARCH_ARM64)
+ static const int kEncodedBytes = 673;
#else
static const int kEncodedBytes = 671;
#endif
diff --git a/webrtc/modules/audio_coding/neteq/comfort_noise.cc b/webrtc/modules/audio_coding/neteq/comfort_noise.cc
index 3fe6607778..a5b08469be 100644
--- a/webrtc/modules/audio_coding/neteq/comfort_noise.cc
+++ b/webrtc/modules/audio_coding/neteq/comfort_noise.cc
@@ -14,7 +14,7 @@
#include "webrtc/base/logging.h"
#include "webrtc/modules/audio_coding/codecs/audio_decoder.h"
-#include "webrtc/modules/audio_coding/codecs/cng/include/webrtc_cng.h"
+#include "webrtc/modules/audio_coding/codecs/cng/webrtc_cng.h"
#include "webrtc/modules/audio_coding/neteq/decoder_database.h"
#include "webrtc/modules/audio_coding/neteq/dsp_helper.h"
#include "webrtc/modules/audio_coding/neteq/sync_buffer.h"
diff --git a/webrtc/modules/audio_coding/neteq/decision_logic.cc b/webrtc/modules/audio_coding/neteq/decision_logic.cc
index 14e0426d7d..39bb4662c7 100644
--- a/webrtc/modules/audio_coding/neteq/decision_logic.cc
+++ b/webrtc/modules/audio_coding/neteq/decision_logic.cc
@@ -128,9 +128,6 @@ Operations DecisionLogic::GetDecision(const SyncBuffer& sync_buffer,
const size_t cur_size_samples =
samples_left + packet_buffer_.NumSamplesInBuffer(decoder_database_,
decoder_frame_length);
- LOG(LS_VERBOSE) << "Buffers: " << packet_buffer_.NumPacketsInBuffer() <<
- " packets * " << decoder_frame_length << " samples/packet + " <<
- samples_left << " samples in sync buffer = " << cur_size_samples;
prev_time_scale_ = prev_time_scale_ &&
(prev_mode == kModeAccelerateSuccess ||
diff --git a/webrtc/modules/audio_coding/neteq/decision_logic_normal.cc b/webrtc/modules/audio_coding/neteq/decision_logic_normal.cc
index d3f6fa6dd4..0252d1cdfa 100644
--- a/webrtc/modules/audio_coding/neteq/decision_logic_normal.cc
+++ b/webrtc/modules/audio_coding/neteq/decision_logic_normal.cc
@@ -20,7 +20,7 @@
#include "webrtc/modules/audio_coding/neteq/expand.h"
#include "webrtc/modules/audio_coding/neteq/packet_buffer.h"
#include "webrtc/modules/audio_coding/neteq/sync_buffer.h"
-#include "webrtc/modules/interface/module_common_types.h"
+#include "webrtc/modules/include/module_common_types.h"
namespace webrtc {
diff --git a/webrtc/modules/audio_coding/neteq/decoder_database.cc b/webrtc/modules/audio_coding/neteq/decoder_database.cc
index 41803f754a..92d4bab1e4 100644
--- a/webrtc/modules/audio_coding/neteq/decoder_database.cc
+++ b/webrtc/modules/audio_coding/neteq/decoder_database.cc
@@ -13,6 +13,7 @@
#include <assert.h>
#include <utility> // pair
+#include "webrtc/base/checks.h"
#include "webrtc/base/logging.h"
#include "webrtc/modules/audio_coding/codecs/audio_decoder.h"
@@ -38,17 +39,17 @@ void DecoderDatabase::Reset() {
}
int DecoderDatabase::RegisterPayload(uint8_t rtp_payload_type,
- NetEqDecoder codec_type) {
+ NetEqDecoder codec_type,
+ const std::string& name) {
if (rtp_payload_type > 0x7F) {
return kInvalidRtpPayloadType;
}
if (!CodecSupported(codec_type)) {
return kCodecNotSupported;
}
- int fs_hz = CodecSampleRateHz(codec_type);
- std::pair<DecoderMap::iterator, bool> ret;
- DecoderInfo info(codec_type, fs_hz, NULL, false);
- ret = decoders_.insert(std::make_pair(rtp_payload_type, info));
+ const int fs_hz = CodecSampleRateHz(codec_type);
+ DecoderInfo info(codec_type, name, fs_hz, NULL, false);
+ auto ret = decoders_.insert(std::make_pair(rtp_payload_type, info));
if (ret.second == false) {
// Database already contains a decoder with type |rtp_payload_type|.
return kDecoderExists;
@@ -58,6 +59,7 @@ int DecoderDatabase::RegisterPayload(uint8_t rtp_payload_type,
int DecoderDatabase::InsertExternal(uint8_t rtp_payload_type,
NetEqDecoder codec_type,
+ const std::string& codec_name,
int fs_hz,
AudioDecoder* decoder) {
if (rtp_payload_type > 0x7F) {
@@ -73,7 +75,7 @@ int DecoderDatabase::InsertExternal(uint8_t rtp_payload_type,
return kInvalidPointer;
}
std::pair<DecoderMap::iterator, bool> ret;
- DecoderInfo info(codec_type, fs_hz, decoder, true);
+ DecoderInfo info(codec_type, codec_name, fs_hz, decoder, true);
ret = decoders_.insert(std::make_pair(rtp_payload_type, info));
if (ret.second == false) {
// Database already contains a decoder with type |rtp_payload_type|.
diff --git a/webrtc/modules/audio_coding/neteq/decoder_database.h b/webrtc/modules/audio_coding/neteq/decoder_database.h
index ea70997c14..f34904fda8 100644
--- a/webrtc/modules/audio_coding/neteq/decoder_database.h
+++ b/webrtc/modules/audio_coding/neteq/decoder_database.h
@@ -12,8 +12,10 @@
#define WEBRTC_MODULES_AUDIO_CODING_NETEQ_DECODER_DATABASE_H_
#include <map>
+#include <string>
#include "webrtc/base/constructormagic.h"
+#include "webrtc/base/scoped_ptr.h"
#include "webrtc/common_types.h" // NULL
#include "webrtc/modules/audio_coding/neteq/audio_decoder_impl.h"
#include "webrtc/modules/audio_coding/neteq/packet.h"
@@ -35,25 +37,28 @@ class DecoderDatabase {
// Struct used to store decoder info in the database.
struct DecoderInfo {
- // Constructors.
- DecoderInfo()
- : codec_type(NetEqDecoder::kDecoderArbitrary),
- fs_hz(8000),
- decoder(NULL),
- external(false) {}
+ DecoderInfo() = default;
DecoderInfo(NetEqDecoder ct, int fs, AudioDecoder* dec, bool ext)
+ : DecoderInfo(ct, "", fs, dec, ext) {}
+ DecoderInfo(NetEqDecoder ct,
+ const std::string& nm,
+ int fs,
+ AudioDecoder* dec,
+ bool ext)
: codec_type(ct),
+ name(nm),
fs_hz(fs),
+ rtp_sample_rate_hz(fs),
decoder(dec),
- external(ext) {
- }
- // Destructor. (Defined in decoder_database.cc.)
+ external(ext) {}
~DecoderInfo();
- NetEqDecoder codec_type;
- int fs_hz;
- AudioDecoder* decoder;
- bool external;
+ NetEqDecoder codec_type = NetEqDecoder::kDecoderArbitrary;
+ std::string name;
+ int fs_hz = 8000;
+ int rtp_sample_rate_hz = 8000;
+ AudioDecoder* decoder = nullptr;
+ bool external = false;
};
// Maximum value for 8 bits, and an invalid RTP payload type (since it is
@@ -75,16 +80,21 @@ class DecoderDatabase {
// using InsertExternal().
virtual void Reset();
- // Registers |rtp_payload_type| as a decoder of type |codec_type|. Returns
- // kOK on success; otherwise an error code.
+ // Registers |rtp_payload_type| as a decoder of type |codec_type|. The |name|
+ // is only used to populate the name field in the DecoderInfo struct in the
+ // database, and can be arbitrary (including empty). Returns kOK on success;
+ // otherwise an error code.
virtual int RegisterPayload(uint8_t rtp_payload_type,
- NetEqDecoder codec_type);
+ NetEqDecoder codec_type,
+ const std::string& name);
// Registers an externally created AudioDecoder object, and associates it
// as a decoder of type |codec_type| with |rtp_payload_type|.
virtual int InsertExternal(uint8_t rtp_payload_type,
NetEqDecoder codec_type,
- int fs_hz, AudioDecoder* decoder);
+ const std::string& codec_name,
+ int fs_hz,
+ AudioDecoder* decoder);
// Removes the entry for |rtp_payload_type| from the database.
// Returns kDecoderNotFound or kOK depending on the outcome of the operation.
diff --git a/webrtc/modules/audio_coding/neteq/decoder_database_unittest.cc b/webrtc/modules/audio_coding/neteq/decoder_database_unittest.cc
index e85d8d32fb..85aaef1143 100644
--- a/webrtc/modules/audio_coding/neteq/decoder_database_unittest.cc
+++ b/webrtc/modules/audio_coding/neteq/decoder_database_unittest.cc
@@ -19,7 +19,6 @@
#include "testing/gtest/include/gtest/gtest.h"
#include "webrtc/modules/audio_coding/neteq/mock/mock_audio_decoder.h"
-#include "webrtc/test/testsupport/gtest_disable.h"
namespace webrtc {
@@ -32,8 +31,10 @@ TEST(DecoderDatabase, CreateAndDestroy) {
TEST(DecoderDatabase, InsertAndRemove) {
DecoderDatabase db;
const uint8_t kPayloadType = 0;
- EXPECT_EQ(DecoderDatabase::kOK,
- db.RegisterPayload(kPayloadType, NetEqDecoder::kDecoderPCMu));
+ const std::string kCodecName = "Robert\'); DROP TABLE Students;";
+ EXPECT_EQ(
+ DecoderDatabase::kOK,
+ db.RegisterPayload(kPayloadType, NetEqDecoder::kDecoderPCMu, kCodecName));
EXPECT_EQ(1, db.Size());
EXPECT_FALSE(db.Empty());
EXPECT_EQ(DecoderDatabase::kOK, db.Remove(kPayloadType));
@@ -44,14 +45,17 @@ TEST(DecoderDatabase, InsertAndRemove) {
TEST(DecoderDatabase, GetDecoderInfo) {
DecoderDatabase db;
const uint8_t kPayloadType = 0;
- EXPECT_EQ(DecoderDatabase::kOK,
- db.RegisterPayload(kPayloadType, NetEqDecoder::kDecoderPCMu));
+ const std::string kCodecName = "Robert\'); DROP TABLE Students;";
+ EXPECT_EQ(
+ DecoderDatabase::kOK,
+ db.RegisterPayload(kPayloadType, NetEqDecoder::kDecoderPCMu, kCodecName));
const DecoderDatabase::DecoderInfo* info;
info = db.GetDecoderInfo(kPayloadType);
ASSERT_TRUE(info != NULL);
EXPECT_EQ(NetEqDecoder::kDecoderPCMu, info->codec_type);
EXPECT_EQ(NULL, info->decoder);
EXPECT_EQ(8000, info->fs_hz);
+ EXPECT_EQ(kCodecName, info->name);
EXPECT_FALSE(info->external);
info = db.GetDecoderInfo(kPayloadType + 1); // Other payload type.
EXPECT_TRUE(info == NULL); // Should not be found.
@@ -60,8 +64,10 @@ TEST(DecoderDatabase, GetDecoderInfo) {
TEST(DecoderDatabase, GetRtpPayloadType) {
DecoderDatabase db;
const uint8_t kPayloadType = 0;
- EXPECT_EQ(DecoderDatabase::kOK,
- db.RegisterPayload(kPayloadType, NetEqDecoder::kDecoderPCMu));
+ const std::string kCodecName = "Robert\'); DROP TABLE Students;";
+ EXPECT_EQ(
+ DecoderDatabase::kOK,
+ db.RegisterPayload(kPayloadType, NetEqDecoder::kDecoderPCMu, kCodecName));
EXPECT_EQ(kPayloadType, db.GetRtpPayloadType(NetEqDecoder::kDecoderPCMu));
const uint8_t expected_value = DecoderDatabase::kRtpPayloadTypeError;
EXPECT_EQ(expected_value,
@@ -72,8 +78,10 @@ TEST(DecoderDatabase, GetRtpPayloadType) {
TEST(DecoderDatabase, GetDecoder) {
DecoderDatabase db;
const uint8_t kPayloadType = 0;
+ const std::string kCodecName = "Robert\'); DROP TABLE Students;";
EXPECT_EQ(DecoderDatabase::kOK,
- db.RegisterPayload(kPayloadType, NetEqDecoder::kDecoderPCM16B));
+ db.RegisterPayload(kPayloadType, NetEqDecoder::kDecoderPCM16B,
+ kCodecName));
AudioDecoder* dec = db.GetDecoder(kPayloadType);
ASSERT_TRUE(dec != NULL);
}
@@ -86,14 +94,18 @@ TEST(DecoderDatabase, TypeTests) {
const uint8_t kPayloadTypeRed = 101;
const uint8_t kPayloadNotUsed = 102;
// Load into database.
+ EXPECT_EQ(
+ DecoderDatabase::kOK,
+ db.RegisterPayload(kPayloadTypePcmU, NetEqDecoder::kDecoderPCMu, "pcmu"));
EXPECT_EQ(DecoderDatabase::kOK,
- db.RegisterPayload(kPayloadTypePcmU, NetEqDecoder::kDecoderPCMu));
- EXPECT_EQ(DecoderDatabase::kOK,
- db.RegisterPayload(kPayloadTypeCng, NetEqDecoder::kDecoderCNGnb));
- EXPECT_EQ(DecoderDatabase::kOK,
- db.RegisterPayload(kPayloadTypeDtmf, NetEqDecoder::kDecoderAVT));
- EXPECT_EQ(DecoderDatabase::kOK,
- db.RegisterPayload(kPayloadTypeRed, NetEqDecoder::kDecoderRED));
+ db.RegisterPayload(kPayloadTypeCng, NetEqDecoder::kDecoderCNGnb,
+ "cng-nb"));
+ EXPECT_EQ(
+ DecoderDatabase::kOK,
+ db.RegisterPayload(kPayloadTypeDtmf, NetEqDecoder::kDecoderAVT, "avt"));
+ EXPECT_EQ(
+ DecoderDatabase::kOK,
+ db.RegisterPayload(kPayloadTypeRed, NetEqDecoder::kDecoderRED, "red"));
EXPECT_EQ(4, db.Size());
// Test.
EXPECT_FALSE(db.IsComfortNoise(kPayloadNotUsed));
@@ -112,11 +124,12 @@ TEST(DecoderDatabase, TypeTests) {
TEST(DecoderDatabase, ExternalDecoder) {
DecoderDatabase db;
const uint8_t kPayloadType = 0;
+ const std::string kCodecName = "Robert\'); DROP TABLE Students;";
MockAudioDecoder decoder;
// Load into database.
EXPECT_EQ(DecoderDatabase::kOK,
- db.InsertExternal(kPayloadType, NetEqDecoder::kDecoderPCMu, 8000,
- &decoder));
+ db.InsertExternal(kPayloadType, NetEqDecoder::kDecoderPCMu,
+ kCodecName, 8000, &decoder));
EXPECT_EQ(1, db.Size());
// Get decoder and make sure we get the external one.
EXPECT_EQ(&decoder, db.GetDecoder(kPayloadType));
@@ -125,6 +138,7 @@ TEST(DecoderDatabase, ExternalDecoder) {
info = db.GetDecoderInfo(kPayloadType);
ASSERT_TRUE(info != NULL);
EXPECT_EQ(NetEqDecoder::kDecoderPCMu, info->codec_type);
+ EXPECT_EQ(kCodecName, info->name);
EXPECT_EQ(&decoder, info->decoder);
EXPECT_EQ(8000, info->fs_hz);
EXPECT_TRUE(info->external);
@@ -146,7 +160,7 @@ TEST(DecoderDatabase, CheckPayloadTypes) {
for (uint8_t payload_type = 0; payload_type < kNumPayloads; ++payload_type) {
EXPECT_EQ(
DecoderDatabase::kOK,
- db.RegisterPayload(payload_type, NetEqDecoder::kDecoderArbitrary));
+ db.RegisterPayload(payload_type, NetEqDecoder::kDecoderArbitrary, ""));
}
PacketList packet_list;
for (int i = 0; i < kNumPayloads + 1; ++i) {
@@ -185,11 +199,11 @@ TEST(DecoderDatabase, IF_ISAC(ActiveDecoders)) {
DecoderDatabase db;
// Load payload types.
ASSERT_EQ(DecoderDatabase::kOK,
- db.RegisterPayload(0, NetEqDecoder::kDecoderPCMu));
+ db.RegisterPayload(0, NetEqDecoder::kDecoderPCMu, "pcmu"));
ASSERT_EQ(DecoderDatabase::kOK,
- db.RegisterPayload(103, NetEqDecoder::kDecoderISAC));
+ db.RegisterPayload(103, NetEqDecoder::kDecoderISAC, "isac"));
ASSERT_EQ(DecoderDatabase::kOK,
- db.RegisterPayload(13, NetEqDecoder::kDecoderCNGnb));
+ db.RegisterPayload(13, NetEqDecoder::kDecoderCNGnb, "cng-nb"));
// Verify that no decoders are active from the start.
EXPECT_EQ(NULL, db.GetActiveDecoder());
EXPECT_EQ(NULL, db.GetActiveCngDecoder());
diff --git a/webrtc/modules/audio_coding/neteq/delay_manager.cc b/webrtc/modules/audio_coding/neteq/delay_manager.cc
index 5140c0620f..806d02b8de 100644
--- a/webrtc/modules/audio_coding/neteq/delay_manager.cc
+++ b/webrtc/modules/audio_coding/neteq/delay_manager.cc
@@ -17,7 +17,7 @@
#include "webrtc/common_audio/signal_processing/include/signal_processing_library.h"
#include "webrtc/modules/audio_coding/neteq/delay_peak_detector.h"
-#include "webrtc/modules/interface/module_common_types.h"
+#include "webrtc/modules/include/module_common_types.h"
#include "webrtc/system_wrappers/include/logging.h"
namespace webrtc {
diff --git a/webrtc/modules/audio_coding/neteq/expand.cc b/webrtc/modules/audio_coding/neteq/expand.cc
index 2aa9fb0a8d..ef7af46597 100644
--- a/webrtc/modules/audio_coding/neteq/expand.cc
+++ b/webrtc/modules/audio_coding/neteq/expand.cc
@@ -519,7 +519,7 @@ void Expand::AnalyzeSignal(int16_t* random_vector) {
energy2 = WebRtcSpl_DotProductWithScale(vector2, vector2, expansion_length,
correlation_scale);
// Confirm that amplitude ratio sqrt(energy1 / energy2) is within 0.5 - 2.0,
- // i.e., energy1 / energy1 is within 0.25 - 4.
+ // i.e., energy1 / energy2 is within 0.25 - 4.
int16_t amplitude_ratio;
if ((energy1 / 4 < energy2) && (energy1 > energy2 / 4)) {
// Energy constraint fulfilled. Use both vectors and scale them
diff --git a/webrtc/modules/audio_coding/neteq/include/neteq.h b/webrtc/modules/audio_coding/neteq/include/neteq.h
index 205a0dfe80..1322223970 100644
--- a/webrtc/modules/audio_coding/neteq/include/neteq.h
+++ b/webrtc/modules/audio_coding/neteq/include/neteq.h
@@ -81,6 +81,7 @@ class NetEq {
Config()
: sample_rate_hz(16000),
enable_audio_classifier(false),
+ enable_post_decode_vad(false),
max_packets_in_buffer(50),
// |max_delay_ms| has the same effect as calling SetMaximumDelay().
max_delay_ms(2000),
@@ -92,6 +93,7 @@ class NetEq {
int sample_rate_hz; // Initial value. Will change with input data.
bool enable_audio_classifier;
+ bool enable_post_decode_vad;
size_t max_packets_in_buffer;
int max_delay_ms;
BackgroundNoiseMode background_noise_mode;
@@ -145,8 +147,7 @@ class NetEq {
// the same tick rate as the RTP timestamp of the current payload.
// Returns 0 on success, -1 on failure.
virtual int InsertPacket(const WebRtcRTPHeader& rtp_header,
- const uint8_t* payload,
- size_t length_bytes,
+ rtc::ArrayView<const uint8_t> payload,
uint32_t receive_timestamp) = 0;
// Inserts a sync-packet into packet queue. Sync-packets are decoded to
@@ -170,20 +171,27 @@ class NetEq {
// The speech type is written to |type|, if |type| is not NULL.
// Returns kOK on success, or kFail in case of an error.
virtual int GetAudio(size_t max_length, int16_t* output_audio,
- size_t* samples_per_channel, int* num_channels,
+ size_t* samples_per_channel, size_t* num_channels,
NetEqOutputType* type) = 0;
- // Associates |rtp_payload_type| with |codec| and stores the information in
- // the codec database. Returns 0 on success, -1 on failure.
+ // Associates |rtp_payload_type| with |codec| and |codec_name|, and stores the
+ // information in the codec database. Returns 0 on success, -1 on failure.
+ // The name is only used to provide information back to the caller about the
+ // decoders. Hence, the name is arbitrary, and may be empty.
virtual int RegisterPayloadType(NetEqDecoder codec,
+ const std::string& codec_name,
uint8_t rtp_payload_type) = 0;
// Provides an externally created decoder object |decoder| to insert in the
// decoder database. The decoder implements a decoder of type |codec| and
- // associates it with |rtp_payload_type|. The decoder will produce samples
- // at the rate |sample_rate_hz|. Returns kOK on success, kFail on failure.
+ // associates it with |rtp_payload_type| and |codec_name|. The decoder will
+ // produce samples at the rate |sample_rate_hz|. Returns kOK on success, kFail
+ // on failure.
+ // The name is only used to provide information back to the caller about the
+ // decoders. Hence, the name is arbitrary, and may be empty.
virtual int RegisterExternalDecoder(AudioDecoder* decoder,
NetEqDecoder codec,
+ const std::string& codec_name,
uint8_t rtp_payload_type,
int sample_rate_hz) = 0;
@@ -250,6 +258,11 @@ class NetEq {
// Returns true if the RTP timestamp is valid, otherwise false.
virtual bool GetPlayoutTimestamp(uint32_t* timestamp) = 0;
+ // Returns the sample rate in Hz of the audio produced in the last GetAudio
+ // call. If GetAudio has not been called yet, the configured sample rate
+ // (Config::sample_rate_hz) is returned.
+ virtual int last_output_sample_rate_hz() const = 0;
+
// Not implemented.
virtual int SetTargetNumberOfChannels() = 0;
diff --git a/webrtc/modules/audio_coding/neteq/mock/mock_audio_decoder.h b/webrtc/modules/audio_coding/neteq/mock/mock_audio_decoder.h
index 8debcbbb1e..c1cc09cb5e 100644
--- a/webrtc/modules/audio_coding/neteq/mock/mock_audio_decoder.h
+++ b/webrtc/modules/audio_coding/neteq/mock/mock_audio_decoder.h
@@ -22,9 +22,8 @@ class MockAudioDecoder : public AudioDecoder {
MockAudioDecoder() {}
virtual ~MockAudioDecoder() { Die(); }
MOCK_METHOD0(Die, void());
- MOCK_METHOD6(
- Decode,
- int(const uint8_t*, size_t, int, size_t, int16_t*, SpeechType*));
+ MOCK_METHOD5(DecodeInternal,
+ int(const uint8_t*, size_t, int, int16_t*, SpeechType*));
MOCK_CONST_METHOD0(HasDecodePlc, bool());
MOCK_METHOD2(DecodePlc, size_t(size_t, int16_t*));
MOCK_METHOD0(Reset, void());
diff --git a/webrtc/modules/audio_coding/neteq/mock/mock_decoder_database.h b/webrtc/modules/audio_coding/neteq/mock/mock_decoder_database.h
index d127c5d810..1b4a3c9da5 100644
--- a/webrtc/modules/audio_coding/neteq/mock/mock_decoder_database.h
+++ b/webrtc/modules/audio_coding/neteq/mock/mock_decoder_database.h
@@ -11,6 +11,8 @@
#ifndef WEBRTC_MODULES_AUDIO_CODING_NETEQ_MOCK_MOCK_DECODER_DATABASE_H_
#define WEBRTC_MODULES_AUDIO_CODING_NETEQ_MOCK_MOCK_DECODER_DATABASE_H_
+#include <string>
+
#include "webrtc/modules/audio_coding/neteq/decoder_database.h"
#include "testing/gmock/include/gmock/gmock.h"
@@ -27,10 +29,12 @@ class MockDecoderDatabase : public DecoderDatabase {
int());
MOCK_METHOD0(Reset,
void());
- MOCK_METHOD2(RegisterPayload,
- int(uint8_t rtp_payload_type, NetEqDecoder codec_type));
- MOCK_METHOD4(InsertExternal,
- int(uint8_t rtp_payload_type, NetEqDecoder codec_type, int fs_hz,
+ MOCK_METHOD3(RegisterPayload,
+ int(uint8_t rtp_payload_type, NetEqDecoder codec_type,
+ const std::string& name));
+ MOCK_METHOD5(InsertExternal,
+ int(uint8_t rtp_payload_type, NetEqDecoder codec_type,
+ const std::string& codec_name, int fs_hz,
AudioDecoder* decoder));
MOCK_METHOD1(Remove,
int(uint8_t rtp_payload_type));
diff --git a/webrtc/modules/audio_coding/neteq/mock/mock_external_decoder_pcm16b.h b/webrtc/modules/audio_coding/neteq/mock/mock_external_decoder_pcm16b.h
index 8cf89c083d..42c17ae054 100644
--- a/webrtc/modules/audio_coding/neteq/mock/mock_external_decoder_pcm16b.h
+++ b/webrtc/modules/audio_coding/neteq/mock/mock_external_decoder_pcm16b.h
@@ -15,7 +15,7 @@
#include "testing/gmock/include/gmock/gmock.h"
#include "webrtc/base/constructormagic.h"
-#include "webrtc/modules/audio_coding/codecs/pcm16b/include/pcm16b.h"
+#include "webrtc/modules/audio_coding/codecs/pcm16b/pcm16b.h"
#include "webrtc/typedefs.h"
namespace webrtc {
@@ -30,7 +30,6 @@ class ExternalPcm16B : public AudioDecoder {
ExternalPcm16B() {}
void Reset() override {}
- protected:
int DecodeInternal(const uint8_t* encoded,
size_t encoded_len,
int sample_rate_hz,
@@ -52,8 +51,8 @@ class MockExternalPcm16B : public ExternalPcm16B {
public:
MockExternalPcm16B() {
// By default, all calls are delegated to the real object.
- ON_CALL(*this, Decode(_, _, _, _, _, _))
- .WillByDefault(Invoke(&real_, &ExternalPcm16B::Decode));
+ ON_CALL(*this, DecodeInternal(_, _, _, _, _))
+ .WillByDefault(Invoke(&real_, &ExternalPcm16B::DecodeInternal));
ON_CALL(*this, HasDecodePlc())
.WillByDefault(Invoke(&real_, &ExternalPcm16B::HasDecodePlc));
ON_CALL(*this, DecodePlc(_, _))
@@ -68,11 +67,10 @@ class MockExternalPcm16B : public ExternalPcm16B {
virtual ~MockExternalPcm16B() { Die(); }
MOCK_METHOD0(Die, void());
- MOCK_METHOD6(Decode,
+ MOCK_METHOD5(DecodeInternal,
int(const uint8_t* encoded,
size_t encoded_len,
int sample_rate_hz,
- size_t max_decoded_bytes,
int16_t* decoded,
SpeechType* speech_type));
MOCK_CONST_METHOD0(HasDecodePlc,
diff --git a/webrtc/modules/audio_coding/neteq/nack.cc b/webrtc/modules/audio_coding/neteq/nack.cc
index fd3d762605..011914b3d9 100644
--- a/webrtc/modules/audio_coding/neteq/nack.cc
+++ b/webrtc/modules/audio_coding/neteq/nack.cc
@@ -15,7 +15,7 @@
#include <algorithm> // For std::max.
#include "webrtc/base/checks.h"
-#include "webrtc/modules/interface/module_common_types.h"
+#include "webrtc/modules/include/module_common_types.h"
#include "webrtc/system_wrappers/include/logging.h"
namespace webrtc {
diff --git a/webrtc/modules/audio_coding/neteq/nack.h b/webrtc/modules/audio_coding/neteq/nack.h
index 116b7e2192..17fef46464 100644
--- a/webrtc/modules/audio_coding/neteq/nack.h
+++ b/webrtc/modules/audio_coding/neteq/nack.h
@@ -15,7 +15,7 @@
#include <map>
#include "webrtc/base/scoped_ptr.h"
-#include "webrtc/modules/audio_coding/main/include/audio_coding_module_typedefs.h"
+#include "webrtc/modules/audio_coding/include/audio_coding_module_typedefs.h"
#include "webrtc/test/testsupport/gtest_prod_util.h"
//
diff --git a/webrtc/modules/audio_coding/neteq/nack_unittest.cc b/webrtc/modules/audio_coding/neteq/nack_unittest.cc
index 853af94ede..53b19dc50f 100644
--- a/webrtc/modules/audio_coding/neteq/nack_unittest.cc
+++ b/webrtc/modules/audio_coding/neteq/nack_unittest.cc
@@ -17,7 +17,7 @@
#include "testing/gtest/include/gtest/gtest.h"
#include "webrtc/base/scoped_ptr.h"
#include "webrtc/typedefs.h"
-#include "webrtc/modules/audio_coding/main/include/audio_coding_module_typedefs.h"
+#include "webrtc/modules/audio_coding/include/audio_coding_module_typedefs.h"
namespace webrtc {
namespace {
diff --git a/webrtc/modules/audio_coding/neteq/neteq.cc b/webrtc/modules/audio_coding/neteq/neteq.cc
index ca51c9602d..c31dbdc1a3 100644
--- a/webrtc/modules/audio_coding/neteq/neteq.cc
+++ b/webrtc/modules/audio_coding/neteq/neteq.cc
@@ -32,6 +32,8 @@ std::string NetEq::Config::ToString() const {
std::stringstream ss;
ss << "sample_rate_hz=" << sample_rate_hz << ", enable_audio_classifier="
<< (enable_audio_classifier ? "true" : "false")
+ << ", enable_post_decode_vad="
+ << (enable_post_decode_vad ? "true" : "false")
<< ", max_packets_in_buffer=" << max_packets_in_buffer
<< ", background_noise_mode=" << background_noise_mode
<< ", playout_mode=" << playout_mode
diff --git a/webrtc/modules/audio_coding/neteq/neteq_external_decoder_unittest.cc b/webrtc/modules/audio_coding/neteq/neteq_external_decoder_unittest.cc
index 09eb5614fe..c03fbb7347 100644
--- a/webrtc/modules/audio_coding/neteq/neteq_external_decoder_unittest.cc
+++ b/webrtc/modules/audio_coding/neteq/neteq_external_decoder_unittest.cc
@@ -98,14 +98,16 @@ class NetEqExternalDecoderUnitTest : public test::NetEqExternalDecoderTest {
next_arrival_time = GetArrivalTime(next_send_time);
} while (Lost()); // If lost, immediately read the next packet.
- EXPECT_CALL(*external_decoder_,
- Decode(_, payload_size_bytes_, 1000 * samples_per_ms_, _, _, _))
+ EXPECT_CALL(
+ *external_decoder_,
+ DecodeInternal(_, payload_size_bytes_, 1000 * samples_per_ms_, _, _))
.Times(NumExpectedDecodeCalls(num_loops));
uint32_t time_now = 0;
for (int k = 0; k < num_loops; ++k) {
while (time_now >= next_arrival_time) {
- InsertPacket(rtp_header_, encoded_, payload_size_bytes_,
+ InsertPacket(rtp_header_, rtc::ArrayView<const uint8_t>(
+ encoded_, payload_size_bytes_),
next_arrival_time);
// Get next input packet.
do {
@@ -124,17 +126,14 @@ class NetEqExternalDecoderUnitTest : public test::NetEqExternalDecoderTest {
}
}
- void InsertPacket(WebRtcRTPHeader rtp_header, const uint8_t* payload,
- size_t payload_size_bytes,
+ void InsertPacket(WebRtcRTPHeader rtp_header,
+ rtc::ArrayView<const uint8_t> payload,
uint32_t receive_timestamp) override {
- EXPECT_CALL(*external_decoder_,
- IncomingPacket(_,
- payload_size_bytes,
- rtp_header.header.sequenceNumber,
- rtp_header.header.timestamp,
- receive_timestamp));
+ EXPECT_CALL(
+ *external_decoder_,
+ IncomingPacket(_, payload.size(), rtp_header.header.sequenceNumber,
+ rtp_header.header.timestamp, receive_timestamp));
NetEqExternalDecoderTest::InsertPacket(rtp_header, payload,
- payload_size_bytes,
receive_timestamp);
}
@@ -181,15 +180,15 @@ class NetEqExternalVsInternalDecoderTest : public NetEqExternalDecoderUnitTest,
}
void SetUp() override {
- ASSERT_EQ(NetEq::kOK,
- neteq_internal_->RegisterPayloadType(
- NetEqDecoder::kDecoderPCM16Bswb32kHz, kPayloadType));
+ ASSERT_EQ(NetEq::kOK, neteq_internal_->RegisterPayloadType(
+ NetEqDecoder::kDecoderPCM16Bswb32kHz,
+ "pcm16-swb32", kPayloadType));
}
void GetAndVerifyOutput() override {
NetEqOutputType output_type;
size_t samples_per_channel;
- int num_channels;
+ size_t num_channels;
// Get audio from internal decoder instance.
EXPECT_EQ(NetEq::kOK,
neteq_internal_->GetAudio(kMaxBlockSize,
@@ -197,7 +196,7 @@ class NetEqExternalVsInternalDecoderTest : public NetEqExternalDecoderUnitTest,
&samples_per_channel,
&num_channels,
&output_type));
- EXPECT_EQ(1, num_channels);
+ EXPECT_EQ(1u, num_channels);
EXPECT_EQ(static_cast<size_t>(kOutputLengthMs * sample_rate_hz_ / 1000),
samples_per_channel);
@@ -210,18 +209,15 @@ class NetEqExternalVsInternalDecoderTest : public NetEqExternalDecoderUnitTest,
}
}
- void InsertPacket(WebRtcRTPHeader rtp_header, const uint8_t* payload,
- size_t payload_size_bytes,
+ void InsertPacket(WebRtcRTPHeader rtp_header,
+ rtc::ArrayView<const uint8_t> payload,
uint32_t receive_timestamp) override {
// Insert packet in internal decoder.
- ASSERT_EQ(
- NetEq::kOK,
- neteq_internal_->InsertPacket(
- rtp_header, payload, payload_size_bytes, receive_timestamp));
+ ASSERT_EQ(NetEq::kOK, neteq_internal_->InsertPacket(rtp_header, payload,
+ receive_timestamp));
// Insert packet in external decoder instance.
NetEqExternalDecoderUnitTest::InsertPacket(rtp_header, payload,
- payload_size_bytes,
receive_timestamp);
}
diff --git a/webrtc/modules/audio_coding/neteq/neteq_impl.cc b/webrtc/modules/audio_coding/neteq/neteq_impl.cc
index 92ce41e2ea..6c07da46f0 100644
--- a/webrtc/modules/audio_coding/neteq/neteq_impl.cc
+++ b/webrtc/modules/audio_coding/neteq/neteq_impl.cc
@@ -18,6 +18,7 @@
#include "webrtc/base/checks.h"
#include "webrtc/base/logging.h"
#include "webrtc/base/safe_conversions.h"
+#include "webrtc/base/trace_event.h"
#include "webrtc/common_audio/signal_processing/include/signal_processing_library.h"
#include "webrtc/modules/audio_coding/codecs/audio_decoder.h"
#include "webrtc/modules/audio_coding/neteq/accelerate.h"
@@ -42,7 +43,7 @@
#include "webrtc/modules/audio_coding/neteq/preemptive_expand.h"
#include "webrtc/modules/audio_coding/neteq/sync_buffer.h"
#include "webrtc/modules/audio_coding/neteq/timestamp_scaler.h"
-#include "webrtc/modules/interface/module_common_types.h"
+#include "webrtc/modules/include/module_common_types.h"
#include "webrtc/system_wrappers/include/critical_section_wrapper.h"
// Modify the code to obtain backwards bit-exactness. Once bit-exactness is no
@@ -106,28 +107,28 @@ NetEqImpl::NetEqImpl(const NetEq::Config& config,
}
fs_hz_ = fs;
fs_mult_ = fs / 8000;
+ last_output_sample_rate_hz_ = fs;
output_size_samples_ = static_cast<size_t>(kOutputSizeMs * 8 * fs_mult_);
decoder_frame_length_ = 3 * output_size_samples_;
WebRtcSpl_Init();
if (create_components) {
SetSampleRateAndChannels(fs, 1); // Default is 1 channel.
}
+ RTC_DCHECK(!vad_->enabled());
+ if (config.enable_post_decode_vad) {
+ vad_->Enable();
+ }
}
NetEqImpl::~NetEqImpl() = default;
int NetEqImpl::InsertPacket(const WebRtcRTPHeader& rtp_header,
- const uint8_t* payload,
- size_t length_bytes,
+ rtc::ArrayView<const uint8_t> payload,
uint32_t receive_timestamp) {
+ TRACE_EVENT0("webrtc", "NetEqImpl::InsertPacket");
CriticalSectionScoped lock(crit_sect_.get());
- LOG(LS_VERBOSE) << "InsertPacket: ts=" << rtp_header.header.timestamp <<
- ", sn=" << rtp_header.header.sequenceNumber <<
- ", pt=" << static_cast<int>(rtp_header.header.payloadType) <<
- ", ssrc=" << rtp_header.header.ssrc <<
- ", len=" << length_bytes;
- int error = InsertPacketInternal(rtp_header, payload, length_bytes,
- receive_timestamp, false);
+ int error =
+ InsertPacketInternal(rtp_header, payload, receive_timestamp, false);
if (error != 0) {
error_code_ = error;
return kFail;
@@ -138,15 +139,9 @@ int NetEqImpl::InsertPacket(const WebRtcRTPHeader& rtp_header,
int NetEqImpl::InsertSyncPacket(const WebRtcRTPHeader& rtp_header,
uint32_t receive_timestamp) {
CriticalSectionScoped lock(crit_sect_.get());
- LOG(LS_VERBOSE) << "InsertPacket-Sync: ts="
- << rtp_header.header.timestamp <<
- ", sn=" << rtp_header.header.sequenceNumber <<
- ", pt=" << static_cast<int>(rtp_header.header.payloadType) <<
- ", ssrc=" << rtp_header.header.ssrc;
-
const uint8_t kSyncPayload[] = { 's', 'y', 'n', 'c' };
- int error = InsertPacketInternal(
- rtp_header, kSyncPayload, sizeof(kSyncPayload), receive_timestamp, true);
+ int error =
+ InsertPacketInternal(rtp_header, kSyncPayload, receive_timestamp, true);
if (error != 0) {
error_code_ = error;
@@ -156,14 +151,12 @@ int NetEqImpl::InsertSyncPacket(const WebRtcRTPHeader& rtp_header,
}
int NetEqImpl::GetAudio(size_t max_length, int16_t* output_audio,
- size_t* samples_per_channel, int* num_channels,
+ size_t* samples_per_channel, size_t* num_channels,
NetEqOutputType* type) {
+ TRACE_EVENT0("webrtc", "NetEqImpl::GetAudio");
CriticalSectionScoped lock(crit_sect_.get());
- LOG(LS_VERBOSE) << "GetAudio";
int error = GetAudioInternal(max_length, output_audio, samples_per_channel,
num_channels);
- LOG(LS_VERBOSE) << "Produced " << *samples_per_channel <<
- " samples/channel for " << *num_channels << " channel(s)";
if (error != 0) {
error_code_ = error;
return kFail;
@@ -171,16 +164,24 @@ int NetEqImpl::GetAudio(size_t max_length, int16_t* output_audio,
if (type) {
*type = LastOutputType();
}
+ last_output_sample_rate_hz_ =
+ rtc::checked_cast<int>(*samples_per_channel * 100);
+ RTC_DCHECK(last_output_sample_rate_hz_ == 8000 ||
+ last_output_sample_rate_hz_ == 16000 ||
+ last_output_sample_rate_hz_ == 32000 ||
+ last_output_sample_rate_hz_ == 48000)
+ << "Unexpected sample rate " << last_output_sample_rate_hz_;
return kOK;
}
int NetEqImpl::RegisterPayloadType(NetEqDecoder codec,
+ const std::string& name,
uint8_t rtp_payload_type) {
CriticalSectionScoped lock(crit_sect_.get());
LOG(LS_VERBOSE) << "RegisterPayloadType "
<< static_cast<int>(rtp_payload_type) << " "
<< static_cast<int>(codec);
- int ret = decoder_database_->RegisterPayload(rtp_payload_type, codec);
+ int ret = decoder_database_->RegisterPayload(rtp_payload_type, codec, name);
if (ret != DecoderDatabase::kOK) {
switch (ret) {
case DecoderDatabase::kInvalidRtpPayloadType:
@@ -202,6 +203,7 @@ int NetEqImpl::RegisterPayloadType(NetEqDecoder codec,
int NetEqImpl::RegisterExternalDecoder(AudioDecoder* decoder,
NetEqDecoder codec,
+ const std::string& codec_name,
uint8_t rtp_payload_type,
int sample_rate_hz) {
CriticalSectionScoped lock(crit_sect_.get());
@@ -213,8 +215,8 @@ int NetEqImpl::RegisterExternalDecoder(AudioDecoder* decoder,
assert(false);
return kFail;
}
- int ret = decoder_database_->InsertExternal(rtp_payload_type, codec,
- sample_rate_hz, decoder);
+ int ret = decoder_database_->InsertExternal(
+ rtp_payload_type, codec, codec_name, sample_rate_hz, decoder);
if (ret != DecoderDatabase::kOK) {
switch (ret) {
case DecoderDatabase::kInvalidRtpPayloadType:
@@ -370,6 +372,11 @@ bool NetEqImpl::GetPlayoutTimestamp(uint32_t* timestamp) {
return true;
}
+int NetEqImpl::last_output_sample_rate_hz() const {
+ CriticalSectionScoped lock(crit_sect_.get());
+ return last_output_sample_rate_hz_;
+}
+
int NetEqImpl::SetTargetNumberOfChannels() {
return kNotImplemented;
}
@@ -441,12 +448,11 @@ const SyncBuffer* NetEqImpl::sync_buffer_for_test() const {
// Methods below this line are private.
int NetEqImpl::InsertPacketInternal(const WebRtcRTPHeader& rtp_header,
- const uint8_t* payload,
- size_t length_bytes,
+ rtc::ArrayView<const uint8_t> payload,
uint32_t receive_timestamp,
bool is_sync_packet) {
- if (!payload) {
- LOG_F(LS_ERROR) << "payload == NULL";
+ if (payload.empty()) {
+ LOG_F(LS_ERROR) << "payload is empty";
return kInvalidPointer;
}
// Sanity checks for sync-packets.
@@ -482,7 +488,7 @@ int NetEqImpl::InsertPacketInternal(const WebRtcRTPHeader& rtp_header,
packet->header.timestamp = rtp_header.header.timestamp;
packet->header.ssrc = rtp_header.header.ssrc;
packet->header.numCSRCs = 0;
- packet->payload_length = length_bytes;
+ packet->payload_length = payload.size();
packet->primary = true;
packet->waiting_time = 0;
packet->payload = new uint8_t[packet->payload_length];
@@ -490,8 +496,8 @@ int NetEqImpl::InsertPacketInternal(const WebRtcRTPHeader& rtp_header,
if (!packet->payload) {
LOG_F(LS_ERROR) << "Payload pointer is NULL.";
}
- assert(payload); // Already checked above.
- memcpy(packet->payload, payload, packet->payload_length);
+ assert(!payload.empty()); // Already checked above.
+ memcpy(packet->payload, payload.data(), packet->payload_length);
// Insert packet in a packet list.
packet_list.push_back(packet);
// Save main payloads header for later.
@@ -738,7 +744,7 @@ int NetEqImpl::InsertPacketInternal(const WebRtcRTPHeader& rtp_header,
int NetEqImpl::GetAudioInternal(size_t max_length,
int16_t* output,
size_t* samples_per_channel,
- int* num_channels) {
+ size_t* num_channels) {
PacketList packet_list;
DtmfEvent dtmf_event;
Operations operation;
@@ -749,8 +755,6 @@ int NetEqImpl::GetAudioInternal(size_t max_length,
last_mode_ = kModeError;
return return_value;
}
- LOG(LS_VERBOSE) << "GetDecision returned operation=" << operation <<
- " and " << packet_list.size() << " packet(s)";
AudioDecoder::SpeechType speech_type;
int length = 0;
@@ -864,10 +868,7 @@ int NetEqImpl::GetAudioInternal(size_t max_length,
const size_t samples_from_sync =
sync_buffer_->GetNextAudioInterleaved(num_output_samples_per_channel,
output);
- *num_channels = static_cast<int>(sync_buffer_->Channels());
- LOG(LS_VERBOSE) << "Sync buffer (" << *num_channels << " channel(s)):" <<
- " insert " << algorithm_buffer_->Size() << " samples, extract " <<
- samples_from_sync << " samples";
+ *num_channels = sync_buffer_->Channels();
if (sync_buffer_->FutureLength() < expand_->overlap_length()) {
// The sync buffer should always contain |overlap_length| samples, but now
// too many samples have been extracted. Reinstall the |overlap_length|
@@ -1325,7 +1326,6 @@ int NetEqImpl::DecodeCng(AudioDecoder* decoder, int* decoded_length,
&decoded_buffer_[*decoded_length], speech_type);
if (length > 0) {
*decoded_length += length;
- LOG(LS_VERBOSE) << "Decoded " << length << " CNG samples";
} else {
// Error.
LOG(LS_WARNING) << "Failed to decode CNG";
@@ -1365,34 +1365,17 @@ int NetEqImpl::DecodeLoop(PacketList* packet_list, const Operations& operation,
int decode_length;
if (packet->sync_packet) {
// Decode to silence with the same frame size as the last decode.
- LOG(LS_VERBOSE) << "Decoding sync-packet: " <<
- " ts=" << packet->header.timestamp <<
- ", sn=" << packet->header.sequenceNumber <<
- ", pt=" << static_cast<int>(packet->header.payloadType) <<
- ", ssrc=" << packet->header.ssrc <<
- ", len=" << packet->payload_length;
memset(&decoded_buffer_[*decoded_length], 0,
decoder_frame_length_ * decoder->Channels() *
sizeof(decoded_buffer_[0]));
decode_length = rtc::checked_cast<int>(decoder_frame_length_);
} else if (!packet->primary) {
// This is a redundant payload; call the special decoder method.
- LOG(LS_VERBOSE) << "Decoding packet (redundant):" <<
- " ts=" << packet->header.timestamp <<
- ", sn=" << packet->header.sequenceNumber <<
- ", pt=" << static_cast<int>(packet->header.payloadType) <<
- ", ssrc=" << packet->header.ssrc <<
- ", len=" << packet->payload_length;
decode_length = decoder->DecodeRedundant(
packet->payload, packet->payload_length, fs_hz_,
(decoded_buffer_length_ - *decoded_length) * sizeof(int16_t),
&decoded_buffer_[*decoded_length], speech_type);
} else {
- LOG(LS_VERBOSE) << "Decoding packet: ts=" << packet->header.timestamp <<
- ", sn=" << packet->header.sequenceNumber <<
- ", pt=" << static_cast<int>(packet->header.payloadType) <<
- ", ssrc=" << packet->header.ssrc <<
- ", len=" << packet->payload_length;
decode_length =
decoder->Decode(
packet->payload, packet->payload_length, fs_hz_,
@@ -1408,9 +1391,6 @@ int NetEqImpl::DecodeLoop(PacketList* packet_list, const Operations& operation,
// Update |decoder_frame_length_| with number of samples per channel.
decoder_frame_length_ =
static_cast<size_t>(decode_length) / decoder->Channels();
- LOG(LS_VERBOSE) << "Decoded " << decode_length << " samples ("
- << decoder->Channels() << " channel(s) -> "
- << decoder_frame_length_ << " samples per channel)";
} else if (decode_length < 0) {
// Error.
LOG(LS_WARNING) << "Decode " << decode_length << " " << payload_length;
diff --git a/webrtc/modules/audio_coding/neteq/neteq_impl.h b/webrtc/modules/audio_coding/neteq/neteq_impl.h
index c001e53b81..940deadd2f 100644
--- a/webrtc/modules/audio_coding/neteq/neteq_impl.h
+++ b/webrtc/modules/audio_coding/neteq/neteq_impl.h
@@ -11,6 +11,8 @@
#ifndef WEBRTC_MODULES_AUDIO_CODING_NETEQ_NETEQ_IMPL_H_
#define WEBRTC_MODULES_AUDIO_CODING_NETEQ_NETEQ_IMPL_H_
+#include <string>
+
#include "webrtc/base/constructormagic.h"
#include "webrtc/base/scoped_ptr.h"
#include "webrtc/base/thread_annotations.h"
@@ -79,8 +81,7 @@ class NetEqImpl : public webrtc::NetEq {
// the same tick rate as the RTP timestamp of the current payload.
// Returns 0 on success, -1 on failure.
int InsertPacket(const WebRtcRTPHeader& rtp_header,
- const uint8_t* payload,
- size_t length_bytes,
+ rtc::ArrayView<const uint8_t> payload,
uint32_t receive_timestamp) override;
// Inserts a sync-packet into packet queue. Sync-packets are decoded to
@@ -106,20 +107,16 @@ class NetEqImpl : public webrtc::NetEq {
int GetAudio(size_t max_length,
int16_t* output_audio,
size_t* samples_per_channel,
- int* num_channels,
+ size_t* num_channels,
NetEqOutputType* type) override;
- // Associates |rtp_payload_type| with |codec| and stores the information in
- // the codec database. Returns kOK on success, kFail on failure.
int RegisterPayloadType(NetEqDecoder codec,
+ const std::string& codec_name,
uint8_t rtp_payload_type) override;
- // Provides an externally created decoder object |decoder| to insert in the
- // decoder database. The decoder implements a decoder of type |codec| and
- // associates it with |rtp_payload_type|. The decoder will produce samples
- // at the rate |sample_rate_hz|. Returns kOK on success, kFail on failure.
int RegisterExternalDecoder(AudioDecoder* decoder,
NetEqDecoder codec,
+ const std::string& codec_name,
uint8_t rtp_payload_type,
int sample_rate_hz) override;
@@ -169,6 +166,8 @@ class NetEqImpl : public webrtc::NetEq {
bool GetPlayoutTimestamp(uint32_t* timestamp) override;
+ int last_output_sample_rate_hz() const override;
+
int SetTargetNumberOfChannels() override;
int SetTargetSampleRate() override;
@@ -207,8 +206,7 @@ class NetEqImpl : public webrtc::NetEq {
// above. Returns 0 on success, otherwise an error code.
// TODO(hlundin): Merge this with InsertPacket above?
int InsertPacketInternal(const WebRtcRTPHeader& rtp_header,
- const uint8_t* payload,
- size_t length_bytes,
+ rtc::ArrayView<const uint8_t> payload,
uint32_t receive_timestamp,
bool is_sync_packet)
EXCLUSIVE_LOCKS_REQUIRED(crit_sect_);
@@ -222,7 +220,8 @@ class NetEqImpl : public webrtc::NetEq {
int GetAudioInternal(size_t max_length,
int16_t* output,
size_t* samples_per_channel,
- int* num_channels) EXCLUSIVE_LOCKS_REQUIRED(crit_sect_);
+ size_t* num_channels)
+ EXCLUSIVE_LOCKS_REQUIRED(crit_sect_);
// Provides a decision to the GetAudioInternal method. The decision what to
// do is written to |operation|. Packets to decode are written to
@@ -377,6 +376,7 @@ class NetEqImpl : public webrtc::NetEq {
StatisticsCalculator stats_ GUARDED_BY(crit_sect_);
int fs_hz_ GUARDED_BY(crit_sect_);
int fs_mult_ GUARDED_BY(crit_sect_);
+ int last_output_sample_rate_hz_ GUARDED_BY(crit_sect_);
size_t output_size_samples_ GUARDED_BY(crit_sect_);
size_t decoder_frame_length_ GUARDED_BY(crit_sect_);
Modes last_mode_ GUARDED_BY(crit_sect_);
diff --git a/webrtc/modules/audio_coding/neteq/neteq_impl_unittest.cc b/webrtc/modules/audio_coding/neteq/neteq_impl_unittest.cc
index 90640ca1d2..f734883635 100644
--- a/webrtc/modules/audio_coding/neteq/neteq_impl_unittest.cc
+++ b/webrtc/modules/audio_coding/neteq/neteq_impl_unittest.cc
@@ -240,9 +240,10 @@ TEST_F(NetEqImplTest, RegisterPayloadType) {
CreateInstance();
uint8_t rtp_payload_type = 0;
NetEqDecoder codec_type = NetEqDecoder::kDecoderPCMu;
+ const std::string kCodecName = "Robert\'); DROP TABLE Students;";
EXPECT_CALL(*mock_decoder_database_,
- RegisterPayload(rtp_payload_type, codec_type));
- neteq_->RegisterPayloadType(codec_type, rtp_payload_type);
+ RegisterPayload(rtp_payload_type, codec_type, kCodecName));
+ neteq_->RegisterPayloadType(codec_type, kCodecName, rtp_payload_type);
}
TEST_F(NetEqImplTest, RemovePayloadType) {
@@ -359,13 +360,12 @@ TEST_F(NetEqImplTest, InsertPacket) {
.WillRepeatedly(Return(PayloadSplitter::kOK));
// Insert first packet.
- neteq_->InsertPacket(rtp_header, payload, kPayloadLength, kFirstReceiveTime);
+ neteq_->InsertPacket(rtp_header, payload, kFirstReceiveTime);
// Insert second packet.
rtp_header.header.timestamp += 160;
rtp_header.header.sequenceNumber += 1;
- neteq_->InsertPacket(rtp_header, payload, kPayloadLength,
- kFirstReceiveTime + 155);
+ neteq_->InsertPacket(rtp_header, payload, kFirstReceiveTime + 155);
}
TEST_F(NetEqImplTest, InsertPacketsUntilBufferIsFull) {
@@ -384,13 +384,12 @@ TEST_F(NetEqImplTest, InsertPacketsUntilBufferIsFull) {
rtp_header.header.ssrc = 0x87654321;
EXPECT_EQ(NetEq::kOK, neteq_->RegisterPayloadType(
- NetEqDecoder::kDecoderPCM16B, kPayloadType));
+ NetEqDecoder::kDecoderPCM16B, "", kPayloadType));
// Insert packets. The buffer should not flush.
for (size_t i = 1; i <= config_.max_packets_in_buffer; ++i) {
EXPECT_EQ(NetEq::kOK,
- neteq_->InsertPacket(
- rtp_header, payload, kPayloadLengthBytes, kReceiveTime));
+ neteq_->InsertPacket(rtp_header, payload, kReceiveTime));
rtp_header.header.timestamp += kPayloadLengthSamples;
rtp_header.header.sequenceNumber += 1;
EXPECT_EQ(i, packet_buffer_->NumPacketsInBuffer());
@@ -399,8 +398,7 @@ TEST_F(NetEqImplTest, InsertPacketsUntilBufferIsFull) {
// Insert one more packet and make sure the buffer got flushed. That is, it
// should only hold one single packet.
EXPECT_EQ(NetEq::kOK,
- neteq_->InsertPacket(
- rtp_header, payload, kPayloadLengthBytes, kReceiveTime));
+ neteq_->InsertPacket(rtp_header, payload, kReceiveTime));
EXPECT_EQ(1u, packet_buffer_->NumPacketsInBuffer());
const RTPHeader* test_header = packet_buffer_->NextRtpHeader();
EXPECT_EQ(rtp_header.header.timestamp, test_header->timestamp);
@@ -434,12 +432,11 @@ TEST_F(NetEqImplTest, VerifyTimestampPropagation) {
CountingSamplesDecoder() : next_value_(1) {}
// Produce as many samples as input bytes (|encoded_len|).
- int Decode(const uint8_t* encoded,
- size_t encoded_len,
- int /* sample_rate_hz */,
- size_t /* max_decoded_bytes */,
- int16_t* decoded,
- SpeechType* speech_type) override {
+ int DecodeInternal(const uint8_t* encoded,
+ size_t encoded_len,
+ int /* sample_rate_hz */,
+ int16_t* decoded,
+ SpeechType* speech_type) override {
for (size_t i = 0; i < encoded_len; ++i) {
decoded[i] = next_value_++;
}
@@ -459,25 +456,24 @@ TEST_F(NetEqImplTest, VerifyTimestampPropagation) {
EXPECT_EQ(NetEq::kOK, neteq_->RegisterExternalDecoder(
&decoder_, NetEqDecoder::kDecoderPCM16B,
- kPayloadType, kSampleRateHz));
+ "dummy name", kPayloadType, kSampleRateHz));
// Insert one packet.
EXPECT_EQ(NetEq::kOK,
- neteq_->InsertPacket(
- rtp_header, payload, kPayloadLengthBytes, kReceiveTime));
+ neteq_->InsertPacket(rtp_header, payload, kReceiveTime));
// Pull audio once.
const size_t kMaxOutputSize = static_cast<size_t>(10 * kSampleRateHz / 1000);
int16_t output[kMaxOutputSize];
size_t samples_per_channel;
- int num_channels;
+ size_t num_channels;
NetEqOutputType type;
EXPECT_EQ(
NetEq::kOK,
neteq_->GetAudio(
kMaxOutputSize, output, &samples_per_channel, &num_channels, &type));
ASSERT_EQ(kMaxOutputSize, samples_per_channel);
- EXPECT_EQ(1, num_channels);
+ EXPECT_EQ(1u, num_channels);
EXPECT_EQ(kOutputNormal, type);
// Start with a simple check that the fake decoder is behaving as expected.
@@ -531,33 +527,32 @@ TEST_F(NetEqImplTest, ReorderedPacket) {
int16_t dummy_output[kPayloadLengthSamples] = {0};
// The below expectation will make the mock decoder write
// |kPayloadLengthSamples| zeros to the output array, and mark it as speech.
- EXPECT_CALL(mock_decoder,
- Decode(Pointee(0), kPayloadLengthBytes, kSampleRateHz, _, _, _))
- .WillOnce(DoAll(SetArrayArgument<4>(dummy_output,
+ EXPECT_CALL(mock_decoder, DecodeInternal(Pointee(0), kPayloadLengthBytes,
+ kSampleRateHz, _, _))
+ .WillOnce(DoAll(SetArrayArgument<3>(dummy_output,
dummy_output + kPayloadLengthSamples),
- SetArgPointee<5>(AudioDecoder::kSpeech),
+ SetArgPointee<4>(AudioDecoder::kSpeech),
Return(kPayloadLengthSamples)));
EXPECT_EQ(NetEq::kOK, neteq_->RegisterExternalDecoder(
&mock_decoder, NetEqDecoder::kDecoderPCM16B,
- kPayloadType, kSampleRateHz));
+ "dummy name", kPayloadType, kSampleRateHz));
// Insert one packet.
EXPECT_EQ(NetEq::kOK,
- neteq_->InsertPacket(
- rtp_header, payload, kPayloadLengthBytes, kReceiveTime));
+ neteq_->InsertPacket(rtp_header, payload, kReceiveTime));
// Pull audio once.
const size_t kMaxOutputSize = static_cast<size_t>(10 * kSampleRateHz / 1000);
int16_t output[kMaxOutputSize];
size_t samples_per_channel;
- int num_channels;
+ size_t num_channels;
NetEqOutputType type;
EXPECT_EQ(
NetEq::kOK,
neteq_->GetAudio(
kMaxOutputSize, output, &samples_per_channel, &num_channels, &type));
ASSERT_EQ(kMaxOutputSize, samples_per_channel);
- EXPECT_EQ(1, num_channels);
+ EXPECT_EQ(1u, num_channels);
EXPECT_EQ(kOutputNormal, type);
// Insert two more packets. The first one is out of order, and is already too
@@ -566,22 +561,20 @@ TEST_F(NetEqImplTest, ReorderedPacket) {
rtp_header.header.timestamp -= kPayloadLengthSamples;
payload[0] = 1;
EXPECT_EQ(NetEq::kOK,
- neteq_->InsertPacket(
- rtp_header, payload, kPayloadLengthBytes, kReceiveTime));
+ neteq_->InsertPacket(rtp_header, payload, kReceiveTime));
rtp_header.header.sequenceNumber += 2;
rtp_header.header.timestamp += 2 * kPayloadLengthSamples;
payload[0] = 2;
EXPECT_EQ(NetEq::kOK,
- neteq_->InsertPacket(
- rtp_header, payload, kPayloadLengthBytes, kReceiveTime));
+ neteq_->InsertPacket(rtp_header, payload, kReceiveTime));
// Expect only the second packet to be decoded (the one with "2" as the first
// payload byte).
- EXPECT_CALL(mock_decoder,
- Decode(Pointee(2), kPayloadLengthBytes, kSampleRateHz, _, _, _))
- .WillOnce(DoAll(SetArrayArgument<4>(dummy_output,
+ EXPECT_CALL(mock_decoder, DecodeInternal(Pointee(2), kPayloadLengthBytes,
+ kSampleRateHz, _, _))
+ .WillOnce(DoAll(SetArrayArgument<3>(dummy_output,
dummy_output + kPayloadLengthSamples),
- SetArgPointee<5>(AudioDecoder::kSpeech),
+ SetArgPointee<4>(AudioDecoder::kSpeech),
Return(kPayloadLengthSamples)));
// Pull audio once.
@@ -590,7 +583,7 @@ TEST_F(NetEqImplTest, ReorderedPacket) {
neteq_->GetAudio(
kMaxOutputSize, output, &samples_per_channel, &num_channels, &type));
ASSERT_EQ(kMaxOutputSize, samples_per_channel);
- EXPECT_EQ(1, num_channels);
+ EXPECT_EQ(1u, num_channels);
EXPECT_EQ(kOutputNormal, type);
// Now check the packet buffer, and make sure it is empty, since the
@@ -622,35 +615,33 @@ TEST_F(NetEqImplTest, FirstPacketUnknown) {
// Insert one packet. Note that we have not registered any payload type, so
// this packet will be rejected.
EXPECT_EQ(NetEq::kFail,
- neteq_->InsertPacket(rtp_header, payload, kPayloadLengthBytes,
- kReceiveTime));
+ neteq_->InsertPacket(rtp_header, payload, kReceiveTime));
EXPECT_EQ(NetEq::kUnknownRtpPayloadType, neteq_->LastError());
// Pull audio once.
const size_t kMaxOutputSize = static_cast<size_t>(10 * kSampleRateHz / 1000);
int16_t output[kMaxOutputSize];
size_t samples_per_channel;
- int num_channels;
+ size_t num_channels;
NetEqOutputType type;
EXPECT_EQ(NetEq::kOK,
neteq_->GetAudio(kMaxOutputSize, output, &samples_per_channel,
&num_channels, &type));
ASSERT_LE(samples_per_channel, kMaxOutputSize);
EXPECT_EQ(kMaxOutputSize, samples_per_channel);
- EXPECT_EQ(1, num_channels);
+ EXPECT_EQ(1u, num_channels);
EXPECT_EQ(kOutputPLC, type);
// Register the payload type.
EXPECT_EQ(NetEq::kOK, neteq_->RegisterPayloadType(
- NetEqDecoder::kDecoderPCM16B, kPayloadType));
+ NetEqDecoder::kDecoderPCM16B, "", kPayloadType));
// Insert 10 packets.
for (size_t i = 0; i < 10; ++i) {
rtp_header.header.sequenceNumber++;
rtp_header.header.timestamp += kPayloadLengthSamples;
EXPECT_EQ(NetEq::kOK,
- neteq_->InsertPacket(rtp_header, payload, kPayloadLengthBytes,
- kReceiveTime));
+ neteq_->InsertPacket(rtp_header, payload, kReceiveTime));
EXPECT_EQ(i + 1, packet_buffer_->NumPacketsInBuffer());
}
@@ -661,7 +652,7 @@ TEST_F(NetEqImplTest, FirstPacketUnknown) {
&num_channels, &type));
ASSERT_LE(samples_per_channel, kMaxOutputSize);
EXPECT_EQ(kMaxOutputSize, samples_per_channel);
- EXPECT_EQ(1, num_channels);
+ EXPECT_EQ(1u, num_channels);
EXPECT_EQ(kOutputNormal, type)
<< "NetEq did not decode the packets as expected.";
}
@@ -697,54 +688,53 @@ TEST_F(NetEqImplTest, CodecInternalCng) {
// Pointee(x) verifies that first byte of the payload equals x, this makes it
// possible to verify that the correct payload is fed to Decode().
- EXPECT_CALL(mock_decoder, Decode(Pointee(0), kPayloadLengthBytes,
- kSampleRateKhz * 1000, _, _, _))
- .WillOnce(DoAll(SetArrayArgument<4>(dummy_output,
+ EXPECT_CALL(mock_decoder, DecodeInternal(Pointee(0), kPayloadLengthBytes,
+ kSampleRateKhz * 1000, _, _))
+ .WillOnce(DoAll(SetArrayArgument<3>(dummy_output,
dummy_output + kPayloadLengthSamples),
- SetArgPointee<5>(AudioDecoder::kSpeech),
+ SetArgPointee<4>(AudioDecoder::kSpeech),
Return(kPayloadLengthSamples)));
- EXPECT_CALL(mock_decoder, Decode(Pointee(1), kPayloadLengthBytes,
- kSampleRateKhz * 1000, _, _, _))
- .WillOnce(DoAll(SetArrayArgument<4>(dummy_output,
+ EXPECT_CALL(mock_decoder, DecodeInternal(Pointee(1), kPayloadLengthBytes,
+ kSampleRateKhz * 1000, _, _))
+ .WillOnce(DoAll(SetArrayArgument<3>(dummy_output,
dummy_output + kPayloadLengthSamples),
- SetArgPointee<5>(AudioDecoder::kComfortNoise),
+ SetArgPointee<4>(AudioDecoder::kComfortNoise),
Return(kPayloadLengthSamples)));
- EXPECT_CALL(mock_decoder, Decode(IsNull(), 0, kSampleRateKhz * 1000, _, _, _))
- .WillOnce(DoAll(SetArrayArgument<4>(dummy_output,
+ EXPECT_CALL(mock_decoder,
+ DecodeInternal(IsNull(), 0, kSampleRateKhz * 1000, _, _))
+ .WillOnce(DoAll(SetArrayArgument<3>(dummy_output,
dummy_output + kPayloadLengthSamples),
- SetArgPointee<5>(AudioDecoder::kComfortNoise),
+ SetArgPointee<4>(AudioDecoder::kComfortNoise),
Return(kPayloadLengthSamples)));
- EXPECT_CALL(mock_decoder, Decode(Pointee(2), kPayloadLengthBytes,
- kSampleRateKhz * 1000, _, _, _))
- .WillOnce(DoAll(SetArrayArgument<4>(dummy_output,
+ EXPECT_CALL(mock_decoder, DecodeInternal(Pointee(2), kPayloadLengthBytes,
+ kSampleRateKhz * 1000, _, _))
+ .WillOnce(DoAll(SetArrayArgument<3>(dummy_output,
dummy_output + kPayloadLengthSamples),
- SetArgPointee<5>(AudioDecoder::kSpeech),
+ SetArgPointee<4>(AudioDecoder::kSpeech),
Return(kPayloadLengthSamples)));
EXPECT_EQ(NetEq::kOK, neteq_->RegisterExternalDecoder(
&mock_decoder, NetEqDecoder::kDecoderOpus,
- kPayloadType, kSampleRateKhz * 1000));
+ "dummy name", kPayloadType, kSampleRateKhz * 1000));
// Insert one packet (decoder will return speech).
EXPECT_EQ(NetEq::kOK,
- neteq_->InsertPacket(
- rtp_header, payload, kPayloadLengthBytes, kReceiveTime));
+ neteq_->InsertPacket(rtp_header, payload, kReceiveTime));
// Insert second packet (decoder will return CNG).
payload[0] = 1;
rtp_header.header.sequenceNumber++;
rtp_header.header.timestamp += kPayloadLengthSamples;
EXPECT_EQ(NetEq::kOK,
- neteq_->InsertPacket(
- rtp_header, payload, kPayloadLengthBytes, kReceiveTime));
+ neteq_->InsertPacket(rtp_header, payload, kReceiveTime));
const size_t kMaxOutputSize = static_cast<size_t>(10 * kSampleRateKhz);
int16_t output[kMaxOutputSize];
size_t samples_per_channel;
- int num_channels;
+ size_t num_channels;
uint32_t timestamp;
uint32_t last_timestamp;
NetEqOutputType type;
@@ -769,7 +759,7 @@ TEST_F(NetEqImplTest, CodecInternalCng) {
for (size_t i = 1; i < 6; ++i) {
ASSERT_EQ(kMaxOutputSize, samples_per_channel);
- EXPECT_EQ(1, num_channels);
+ EXPECT_EQ(1u, num_channels);
EXPECT_EQ(expected_type[i - 1], type);
EXPECT_TRUE(neteq_->GetPlayoutTimestamp(&timestamp));
EXPECT_EQ(NetEq::kOK,
@@ -785,12 +775,11 @@ TEST_F(NetEqImplTest, CodecInternalCng) {
rtp_header.header.sequenceNumber += 2;
rtp_header.header.timestamp += 2 * kPayloadLengthSamples;
EXPECT_EQ(NetEq::kOK,
- neteq_->InsertPacket(
- rtp_header, payload, kPayloadLengthBytes, kReceiveTime));
+ neteq_->InsertPacket(rtp_header, payload, kReceiveTime));
for (size_t i = 6; i < 8; ++i) {
ASSERT_EQ(kMaxOutputSize, samples_per_channel);
- EXPECT_EQ(1, num_channels);
+ EXPECT_EQ(1u, num_channels);
EXPECT_EQ(expected_type[i - 1], type);
EXPECT_EQ(NetEq::kOK,
neteq_->GetAudio(kMaxOutputSize, output, &samples_per_channel,
@@ -810,7 +799,7 @@ TEST_F(NetEqImplTest, UnsupportedDecoder) {
UseNoMocks();
CreateInstance();
static const size_t kNetEqMaxFrameSize = 2880; // 60 ms @ 48 kHz.
- static const int kChannels = 2;
+ static const size_t kChannels = 2;
const uint8_t kPayloadType = 17; // Just an arbitrary number.
const uint32_t kReceiveTime = 17; // Value doesn't matter for this test.
@@ -866,13 +855,12 @@ TEST_F(NetEqImplTest, UnsupportedDecoder) {
EXPECT_EQ(NetEq::kOK, neteq_->RegisterExternalDecoder(
&decoder_, NetEqDecoder::kDecoderPCM16B,
- kPayloadType, kSampleRateHz));
+ "dummy name", kPayloadType, kSampleRateHz));
// Insert one packet.
payload[0] = kFirstPayloadValue; // This will make Decode() fail.
EXPECT_EQ(NetEq::kOK,
- neteq_->InsertPacket(
- rtp_header, payload, kPayloadLengthBytes, kReceiveTime));
+ neteq_->InsertPacket(rtp_header, payload, kReceiveTime));
// Insert another packet.
payload[0] = kSecondPayloadValue; // This will make Decode() successful.
@@ -881,14 +869,12 @@ TEST_F(NetEqImplTest, UnsupportedDecoder) {
// the second packet get decoded.
rtp_header.header.timestamp += 3 * kPayloadLengthSamples;
EXPECT_EQ(NetEq::kOK,
- neteq_->InsertPacket(
- rtp_header, payload, kPayloadLengthBytes, kReceiveTime));
+ neteq_->InsertPacket(rtp_header, payload, kReceiveTime));
- const size_t kMaxOutputSize =
- static_cast<size_t>(10 * kSampleRateHz / 1000 * kChannels);
+ const size_t kMaxOutputSize = 10 * kSampleRateHz / 1000 * kChannels;
int16_t output[kMaxOutputSize];
size_t samples_per_channel;
- int num_channels;
+ size_t num_channels;
NetEqOutputType type;
EXPECT_EQ(NetEq::kFail, neteq_->GetAudio(kMaxOutputSize, output,
@@ -926,14 +912,13 @@ TEST_F(NetEqImplTest, FloodBufferAndGetNetworkStats) {
rtp_header.header.ssrc = 0x87654321;
EXPECT_EQ(NetEq::kOK, neteq_->RegisterPayloadType(
- NetEqDecoder::kDecoderPCM16B, kPayloadType));
+ NetEqDecoder::kDecoderPCM16B, "", kPayloadType));
// Insert packets until the buffer flushes.
for (size_t i = 0; i <= config_.max_packets_in_buffer; ++i) {
EXPECT_EQ(i, packet_buffer_->NumPacketsInBuffer());
EXPECT_EQ(NetEq::kOK,
- neteq_->InsertPacket(rtp_header, payload, kPayloadLengthBytes,
- kReceiveTime));
+ neteq_->InsertPacket(rtp_header, payload, kReceiveTime));
rtp_header.header.timestamp +=
rtc::checked_cast<uint32_t>(kPayloadLengthSamples);
++rtp_header.header.sequenceNumber;
@@ -975,20 +960,19 @@ TEST_F(NetEqImplTest, DecodedPayloadTooShort) {
// |kPayloadLengthSamples| - 5 zeros to the output array, and mark it as
// speech. That is, the decoded length is 5 samples shorter than the expected.
EXPECT_CALL(mock_decoder,
- Decode(_, kPayloadLengthBytes, kSampleRateHz, _, _, _))
+ DecodeInternal(_, kPayloadLengthBytes, kSampleRateHz, _, _))
.WillOnce(
- DoAll(SetArrayArgument<4>(dummy_output,
+ DoAll(SetArrayArgument<3>(dummy_output,
dummy_output + kPayloadLengthSamples - 5),
- SetArgPointee<5>(AudioDecoder::kSpeech),
+ SetArgPointee<4>(AudioDecoder::kSpeech),
Return(kPayloadLengthSamples - 5)));
EXPECT_EQ(NetEq::kOK, neteq_->RegisterExternalDecoder(
&mock_decoder, NetEqDecoder::kDecoderPCM16B,
- kPayloadType, kSampleRateHz));
+ "dummy name", kPayloadType, kSampleRateHz));
// Insert one packet.
EXPECT_EQ(NetEq::kOK,
- neteq_->InsertPacket(rtp_header, payload, kPayloadLengthBytes,
- kReceiveTime));
+ neteq_->InsertPacket(rtp_header, payload, kReceiveTime));
EXPECT_EQ(5u, neteq_->sync_buffer_for_test()->FutureLength());
@@ -996,13 +980,13 @@ TEST_F(NetEqImplTest, DecodedPayloadTooShort) {
const size_t kMaxOutputSize = static_cast<size_t>(10 * kSampleRateHz / 1000);
int16_t output[kMaxOutputSize];
size_t samples_per_channel;
- int num_channels;
+ size_t num_channels;
NetEqOutputType type;
EXPECT_EQ(NetEq::kOK,
neteq_->GetAudio(kMaxOutputSize, output, &samples_per_channel,
&num_channels, &type));
ASSERT_EQ(kMaxOutputSize, samples_per_channel);
- EXPECT_EQ(1, num_channels);
+ EXPECT_EQ(1u, num_channels);
EXPECT_EQ(kOutputNormal, type);
EXPECT_CALL(mock_decoder, Die());
@@ -1050,57 +1034,56 @@ TEST_F(NetEqImplTest, DecodingError) {
InSequence sequence; // Dummy variable.
// Mock decoder works normally the first time.
EXPECT_CALL(mock_decoder,
- Decode(_, kPayloadLengthBytes, kSampleRateHz, _, _, _))
+ DecodeInternal(_, kPayloadLengthBytes, kSampleRateHz, _, _))
.Times(3)
.WillRepeatedly(
- DoAll(SetArrayArgument<4>(dummy_output,
+ DoAll(SetArrayArgument<3>(dummy_output,
dummy_output + kFrameLengthSamples),
- SetArgPointee<5>(AudioDecoder::kSpeech),
+ SetArgPointee<4>(AudioDecoder::kSpeech),
Return(kFrameLengthSamples)))
.RetiresOnSaturation();
// Then mock decoder fails. A common reason for failure can be buffer being
// too short
EXPECT_CALL(mock_decoder,
- Decode(_, kPayloadLengthBytes, kSampleRateHz, _, _, _))
+ DecodeInternal(_, kPayloadLengthBytes, kSampleRateHz, _, _))
.WillOnce(Return(-1))
.RetiresOnSaturation();
// Mock decoder finally returns to normal.
EXPECT_CALL(mock_decoder,
- Decode(_, kPayloadLengthBytes, kSampleRateHz, _, _, _))
+ DecodeInternal(_, kPayloadLengthBytes, kSampleRateHz, _, _))
.Times(2)
.WillRepeatedly(
- DoAll(SetArrayArgument<4>(dummy_output,
- dummy_output + kFrameLengthSamples),
- SetArgPointee<5>(AudioDecoder::kSpeech),
+ DoAll(SetArrayArgument<3>(dummy_output,
+ dummy_output + kFrameLengthSamples),
+ SetArgPointee<4>(AudioDecoder::kSpeech),
Return(kFrameLengthSamples)));
}
EXPECT_EQ(NetEq::kOK, neteq_->RegisterExternalDecoder(
&mock_decoder, NetEqDecoder::kDecoderPCM16B,
- kPayloadType, kSampleRateHz));
+ "dummy name", kPayloadType, kSampleRateHz));
// Insert packets.
for (int i = 0; i < 6; ++i) {
rtp_header.header.sequenceNumber += 1;
rtp_header.header.timestamp += kFrameLengthSamples;
EXPECT_EQ(NetEq::kOK,
- neteq_->InsertPacket(rtp_header, payload, kPayloadLengthBytes,
- kReceiveTime));
+ neteq_->InsertPacket(rtp_header, payload, kReceiveTime));
}
// Pull audio.
const size_t kMaxOutputSize = static_cast<size_t>(10 * kSampleRateHz / 1000);
int16_t output[kMaxOutputSize];
size_t samples_per_channel;
- int num_channels;
+ size_t num_channels;
NetEqOutputType type;
EXPECT_EQ(NetEq::kOK,
neteq_->GetAudio(kMaxOutputSize, output, &samples_per_channel,
&num_channels, &type));
EXPECT_EQ(kMaxOutputSize, samples_per_channel);
- EXPECT_EQ(1, num_channels);
+ EXPECT_EQ(1u, num_channels);
EXPECT_EQ(kOutputNormal, type);
// Pull audio again. Decoder fails.
@@ -1110,7 +1093,7 @@ TEST_F(NetEqImplTest, DecodingError) {
EXPECT_EQ(NetEq::kDecoderErrorCode, neteq_->LastError());
EXPECT_EQ(kDecoderErrorCode, neteq_->LastDecoderError());
EXPECT_EQ(kMaxOutputSize, samples_per_channel);
- EXPECT_EQ(1, num_channels);
+ EXPECT_EQ(1u, num_channels);
// TODO(minyue): should NetEq better give kOutputPLC, since it is actually an
// expansion.
EXPECT_EQ(kOutputNormal, type);
@@ -1120,7 +1103,7 @@ TEST_F(NetEqImplTest, DecodingError) {
neteq_->GetAudio(kMaxOutputSize, output, &samples_per_channel,
&num_channels, &type));
EXPECT_EQ(kMaxOutputSize, samples_per_channel);
- EXPECT_EQ(1, num_channels);
+ EXPECT_EQ(1u, num_channels);
EXPECT_EQ(kOutputPLC, type);
// Pull audio again, should behave normal.
@@ -1128,7 +1111,7 @@ TEST_F(NetEqImplTest, DecodingError) {
neteq_->GetAudio(kMaxOutputSize, output, &samples_per_channel,
&num_channels, &type));
EXPECT_EQ(kMaxOutputSize, samples_per_channel);
- EXPECT_EQ(1, num_channels);
+ EXPECT_EQ(1u, num_channels);
EXPECT_EQ(kOutputNormal, type);
EXPECT_CALL(mock_decoder, Die());
@@ -1174,55 +1157,54 @@ TEST_F(NetEqImplTest, DecodingErrorDuringInternalCng) {
InSequence sequence; // Dummy variable.
// Mock decoder works normally the first 2 times.
EXPECT_CALL(mock_decoder,
- Decode(_, kPayloadLengthBytes, kSampleRateHz, _, _, _))
+ DecodeInternal(_, kPayloadLengthBytes, kSampleRateHz, _, _))
.Times(2)
.WillRepeatedly(
- DoAll(SetArrayArgument<4>(dummy_output,
+ DoAll(SetArrayArgument<3>(dummy_output,
dummy_output + kFrameLengthSamples),
- SetArgPointee<5>(AudioDecoder::kComfortNoise),
+ SetArgPointee<4>(AudioDecoder::kComfortNoise),
Return(kFrameLengthSamples)))
.RetiresOnSaturation();
// Then mock decoder fails. A common reason for failure can be buffer being
// too short
- EXPECT_CALL(mock_decoder, Decode(nullptr, 0, kSampleRateHz, _, _, _))
+ EXPECT_CALL(mock_decoder, DecodeInternal(nullptr, 0, kSampleRateHz, _, _))
.WillOnce(Return(-1))
.RetiresOnSaturation();
// Mock decoder finally returns to normal.
- EXPECT_CALL(mock_decoder, Decode(nullptr, 0, kSampleRateHz, _, _, _))
+ EXPECT_CALL(mock_decoder, DecodeInternal(nullptr, 0, kSampleRateHz, _, _))
.Times(2)
.WillRepeatedly(
- DoAll(SetArrayArgument<4>(dummy_output,
- dummy_output + kFrameLengthSamples),
- SetArgPointee<5>(AudioDecoder::kComfortNoise),
+ DoAll(SetArrayArgument<3>(dummy_output,
+ dummy_output + kFrameLengthSamples),
+ SetArgPointee<4>(AudioDecoder::kComfortNoise),
Return(kFrameLengthSamples)));
}
EXPECT_EQ(NetEq::kOK, neteq_->RegisterExternalDecoder(
&mock_decoder, NetEqDecoder::kDecoderPCM16B,
- kPayloadType, kSampleRateHz));
+ "dummy name", kPayloadType, kSampleRateHz));
// Insert 2 packets. This will make netEq into codec internal CNG mode.
for (int i = 0; i < 2; ++i) {
rtp_header.header.sequenceNumber += 1;
rtp_header.header.timestamp += kFrameLengthSamples;
EXPECT_EQ(NetEq::kOK,
- neteq_->InsertPacket(rtp_header, payload, kPayloadLengthBytes,
- kReceiveTime));
+ neteq_->InsertPacket(rtp_header, payload, kReceiveTime));
}
// Pull audio.
const size_t kMaxOutputSize = static_cast<size_t>(10 * kSampleRateHz / 1000);
int16_t output[kMaxOutputSize];
size_t samples_per_channel;
- int num_channels;
+ size_t num_channels;
NetEqOutputType type;
EXPECT_EQ(NetEq::kOK,
neteq_->GetAudio(kMaxOutputSize, output, &samples_per_channel,
&num_channels, &type));
EXPECT_EQ(kMaxOutputSize, samples_per_channel);
- EXPECT_EQ(1, num_channels);
+ EXPECT_EQ(1u, num_channels);
EXPECT_EQ(kOutputCNG, type);
// Pull audio again. Decoder fails.
@@ -1232,7 +1214,7 @@ TEST_F(NetEqImplTest, DecodingErrorDuringInternalCng) {
EXPECT_EQ(NetEq::kDecoderErrorCode, neteq_->LastError());
EXPECT_EQ(kDecoderErrorCode, neteq_->LastDecoderError());
EXPECT_EQ(kMaxOutputSize, samples_per_channel);
- EXPECT_EQ(1, num_channels);
+ EXPECT_EQ(1u, num_channels);
// TODO(minyue): should NetEq better give kOutputPLC, since it is actually an
// expansion.
EXPECT_EQ(kOutputCNG, type);
@@ -1242,10 +1224,19 @@ TEST_F(NetEqImplTest, DecodingErrorDuringInternalCng) {
neteq_->GetAudio(kMaxOutputSize, output, &samples_per_channel,
&num_channels, &type));
EXPECT_EQ(kMaxOutputSize, samples_per_channel);
- EXPECT_EQ(1, num_channels);
+ EXPECT_EQ(1u, num_channels);
EXPECT_EQ(kOutputCNG, type);
EXPECT_CALL(mock_decoder, Die());
}
+// Tests that the return value from last_output_sample_rate_hz() is equal to the
+// configured inital sample rate.
+TEST_F(NetEqImplTest, InitialLastOutputSampleRate) {
+ UseNoMocks();
+ config_.sample_rate_hz = 48000;
+ CreateInstance();
+ EXPECT_EQ(48000, neteq_->last_output_sample_rate_hz());
+}
+
}// namespace webrtc
diff --git a/webrtc/modules/audio_coding/neteq/neteq_network_stats_unittest.cc b/webrtc/modules/audio_coding/neteq/neteq_network_stats_unittest.cc
index 16fa04c234..34ca9ea856 100644
--- a/webrtc/modules/audio_coding/neteq/neteq_network_stats_unittest.cc
+++ b/webrtc/modules/audio_coding/neteq/neteq_network_stats_unittest.cc
@@ -191,8 +191,7 @@ struct NetEqNetworkStatsCheck {
frame_size_samples_,
&rtp_header_);
if (!Lost(next_send_time)) {
- InsertPacket(rtp_header_, payload_, kPayloadSizeByte,
- next_send_time);
+ InsertPacket(rtp_header_, payload_, next_send_time);
}
}
GetOutputAudio(kMaxOutputSize, output_, &output_type);
diff --git a/webrtc/modules/audio_coding/neteq/neteq_stereo_unittest.cc b/webrtc/modules/audio_coding/neteq/neteq_stereo_unittest.cc
index 66874b8a50..d3f59ec668 100644
--- a/webrtc/modules/audio_coding/neteq/neteq_stereo_unittest.cc
+++ b/webrtc/modules/audio_coding/neteq/neteq_stereo_unittest.cc
@@ -16,19 +16,18 @@
#include "testing/gtest/include/gtest/gtest.h"
#include "webrtc/base/scoped_ptr.h"
-#include "webrtc/modules/audio_coding/codecs/pcm16b/include/pcm16b.h"
+#include "webrtc/modules/audio_coding/codecs/pcm16b/pcm16b.h"
#include "webrtc/modules/audio_coding/neteq/include/neteq.h"
#include "webrtc/modules/audio_coding/neteq/tools/input_audio_file.h"
#include "webrtc/modules/audio_coding/neteq/tools/rtp_generator.h"
#include "webrtc/test/testsupport/fileutils.h"
-#include "webrtc/test/testsupport/gtest_disable.h"
namespace webrtc {
struct TestParameters {
int frame_size;
int sample_rate;
- int num_channels;
+ size_t num_channels;
};
// This is a parameterized test. The test parameters are supplied through a
@@ -127,11 +126,10 @@ class NetEqStereoTest : public ::testing::TestWithParam<TestParameters> {
default:
FAIL() << "We shouldn't get here.";
}
+ ASSERT_EQ(NetEq::kOK, neteq_mono_->RegisterPayloadType(mono_decoder, "mono",
+ kPayloadTypeMono));
ASSERT_EQ(NetEq::kOK,
- neteq_mono_->RegisterPayloadType(mono_decoder,
- kPayloadTypeMono));
- ASSERT_EQ(NetEq::kOK,
- neteq_->RegisterPayloadType(multi_decoder,
+ neteq_->RegisterPayloadType(multi_decoder, "multi-channel",
kPayloadTypeMulti));
}
@@ -165,7 +163,7 @@ class NetEqStereoTest : public ::testing::TestWithParam<TestParameters> {
void VerifyOutput(size_t num_samples) {
for (size_t i = 0; i < num_samples; ++i) {
- for (int j = 0; j < num_channels_; ++j) {
+ for (size_t j = 0; j < num_channels_; ++j) {
ASSERT_EQ(output_[i], output_multi_channel_[i * num_channels_ + j]) <<
"Diff in sample " << i << ", channel " << j << ".";
}
@@ -196,14 +194,16 @@ class NetEqStereoTest : public ::testing::TestWithParam<TestParameters> {
while (time_now >= next_arrival_time) {
// Insert packet in mono instance.
ASSERT_EQ(NetEq::kOK,
- neteq_mono_->InsertPacket(rtp_header_mono_, encoded_,
- payload_size_bytes_,
+ neteq_mono_->InsertPacket(rtp_header_mono_,
+ rtc::ArrayView<const uint8_t>(
+ encoded_, payload_size_bytes_),
next_arrival_time));
// Insert packet in multi-channel instance.
- ASSERT_EQ(NetEq::kOK,
- neteq_->InsertPacket(rtp_header_, encoded_multi_channel_,
- multi_payload_size_bytes_,
- next_arrival_time));
+ ASSERT_EQ(NetEq::kOK, neteq_->InsertPacket(
+ rtp_header_, rtc::ArrayView<const uint8_t>(
+ encoded_multi_channel_,
+ multi_payload_size_bytes_),
+ next_arrival_time));
// Get next input packets (mono and multi-channel).
do {
next_send_time = GetNewPackets();
@@ -214,12 +214,12 @@ class NetEqStereoTest : public ::testing::TestWithParam<TestParameters> {
NetEqOutputType output_type;
// Get audio from mono instance.
size_t samples_per_channel;
- int num_channels;
+ size_t num_channels;
EXPECT_EQ(NetEq::kOK,
neteq_mono_->GetAudio(kMaxBlockSize, output_,
&samples_per_channel, &num_channels,
&output_type));
- EXPECT_EQ(1, num_channels);
+ EXPECT_EQ(1u, num_channels);
EXPECT_EQ(output_size_samples_, samples_per_channel);
// Get audio from multi-channel instance.
ASSERT_EQ(NetEq::kOK,
@@ -239,7 +239,7 @@ class NetEqStereoTest : public ::testing::TestWithParam<TestParameters> {
}
}
- const int num_channels_;
+ const size_t num_channels_;
const int sample_rate_hz_;
const int samples_per_ms_;
const int frame_size_ms_;
@@ -275,7 +275,12 @@ class NetEqStereoTestNoJitter : public NetEqStereoTest {
}
};
-TEST_P(NetEqStereoTestNoJitter, DISABLED_ON_ANDROID(RunTest)) {
+#if defined(WEBRTC_ANDROID)
+#define MAYBE_RunTest DISABLED_RunTest
+#else
+#define MAYBE_RunTest RunTest
+#endif
+TEST_P(NetEqStereoTestNoJitter, MAYBE_RunTest) {
RunTest(8);
}
@@ -300,7 +305,7 @@ class NetEqStereoTestPositiveDrift : public NetEqStereoTest {
double drift_factor;
};
-TEST_P(NetEqStereoTestPositiveDrift, DISABLED_ON_ANDROID(RunTest)) {
+TEST_P(NetEqStereoTestPositiveDrift, MAYBE_RunTest) {
RunTest(100);
}
@@ -313,7 +318,7 @@ class NetEqStereoTestNegativeDrift : public NetEqStereoTestPositiveDrift {
}
};
-TEST_P(NetEqStereoTestNegativeDrift, DISABLED_ON_ANDROID(RunTest)) {
+TEST_P(NetEqStereoTestNegativeDrift, MAYBE_RunTest) {
RunTest(100);
}
@@ -341,7 +346,7 @@ class NetEqStereoTestDelays : public NetEqStereoTest {
int frame_index_;
};
-TEST_P(NetEqStereoTestDelays, DISABLED_ON_ANDROID(RunTest)) {
+TEST_P(NetEqStereoTestDelays, MAYBE_RunTest) {
RunTest(1000);
}
@@ -360,7 +365,10 @@ class NetEqStereoTestLosses : public NetEqStereoTest {
int frame_index_;
};
-TEST_P(NetEqStereoTestLosses, DISABLED_ON_ANDROID(RunTest)) {
+// TODO(pbos): Enable on non-Android, this went failing while being accidentally
+// disabled on all platforms and not just Android.
+// https://bugs.chromium.org/p/webrtc/issues/detail?id=5387
+TEST_P(NetEqStereoTestLosses, DISABLED_RunTest) {
RunTest(100);
}
diff --git a/webrtc/modules/audio_coding/neteq/neteq_tests.gypi b/webrtc/modules/audio_coding/neteq/neteq_tests.gypi
index ee9583ab85..f02d3deee9 100644
--- a/webrtc/modules/audio_coding/neteq/neteq_tests.gypi
+++ b/webrtc/modules/audio_coding/neteq/neteq_tests.gypi
@@ -39,6 +39,21 @@
'defines': [
],
}, # neteq_rtpplay
+ {
+ 'target_name': 'neteq_unittest_proto',
+ 'type': 'static_library',
+ 'sources': [
+ 'neteq_unittest.proto',
+ ],
+ 'variables': {
+ 'proto_in_dir': '.',
+ # Workaround to protect against gyp's pathname relativization when
+ # this file is included by modules.gyp.
+ 'proto_out_protected': 'webrtc/audio_coding/neteq',
+ 'proto_out_dir': '<(proto_out_protected)',
+ },
+ 'includes': ['../../../build/protoc.gypi',],
+ },
],
}],
],
@@ -56,6 +71,7 @@
'isac',
'neteq_test_tools', # Test helpers
'pcm16b',
+ 'webrtc_opus',
],
'defines': [
'CODEC_ILBC',
@@ -72,6 +88,7 @@
'CODEC_CNGCODEC32',
'CODEC_ATEVENT_DECODE',
'CODEC_RED',
+ 'CODEC_OPUS',
],
'include_dirs': [
'include',
diff --git a/webrtc/modules/audio_coding/neteq/neteq_unittest.cc b/webrtc/modules/audio_coding/neteq/neteq_unittest.cc
index 4340f54975..8d52c615da 100644
--- a/webrtc/modules/audio_coding/neteq/neteq_unittest.cc
+++ b/webrtc/modules/audio_coding/neteq/neteq_unittest.cc
@@ -28,29 +28,91 @@
#include "webrtc/base/scoped_ptr.h"
#include "webrtc/modules/audio_coding/neteq/tools/audio_loop.h"
#include "webrtc/modules/audio_coding/neteq/tools/rtp_file_source.h"
-#include "webrtc/modules/audio_coding/codecs/pcm16b/include/pcm16b.h"
+#include "webrtc/modules/audio_coding/codecs/pcm16b/pcm16b.h"
#include "webrtc/test/testsupport/fileutils.h"
-#include "webrtc/test/testsupport/gtest_disable.h"
#include "webrtc/typedefs.h"
+#ifdef WEBRTC_NETEQ_UNITTEST_BITEXACT
+#ifdef WEBRTC_ANDROID_PLATFORM_BUILD
+#include "external/webrtc/webrtc/modules/audio_coding/neteq/neteq_unittest.pb.h"
+#else
+#include "webrtc/audio_coding/neteq/neteq_unittest.pb.h"
+#endif
+#endif
+
DEFINE_bool(gen_ref, false, "Generate reference files.");
-namespace webrtc {
+namespace {
-static bool IsAllZero(const int16_t* buf, size_t buf_length) {
+bool IsAllZero(const int16_t* buf, size_t buf_length) {
bool all_zero = true;
for (size_t n = 0; n < buf_length && all_zero; ++n)
all_zero = buf[n] == 0;
return all_zero;
}
-static bool IsAllNonZero(const int16_t* buf, size_t buf_length) {
+bool IsAllNonZero(const int16_t* buf, size_t buf_length) {
bool all_non_zero = true;
for (size_t n = 0; n < buf_length && all_non_zero; ++n)
all_non_zero = buf[n] != 0;
return all_non_zero;
}
+#ifdef WEBRTC_NETEQ_UNITTEST_BITEXACT
+void Convert(const webrtc::NetEqNetworkStatistics& stats_raw,
+ webrtc::neteq_unittest::NetEqNetworkStatistics* stats) {
+ stats->set_current_buffer_size_ms(stats_raw.current_buffer_size_ms);
+ stats->set_preferred_buffer_size_ms(stats_raw.preferred_buffer_size_ms);
+ stats->set_jitter_peaks_found(stats_raw.jitter_peaks_found);
+ stats->set_packet_loss_rate(stats_raw.packet_loss_rate);
+ stats->set_packet_discard_rate(stats_raw.packet_discard_rate);
+ stats->set_expand_rate(stats_raw.expand_rate);
+ stats->set_speech_expand_rate(stats_raw.speech_expand_rate);
+ stats->set_preemptive_rate(stats_raw.preemptive_rate);
+ stats->set_accelerate_rate(stats_raw.accelerate_rate);
+ stats->set_secondary_decoded_rate(stats_raw.secondary_decoded_rate);
+ stats->set_clockdrift_ppm(stats_raw.clockdrift_ppm);
+ stats->set_added_zero_samples(stats_raw.added_zero_samples);
+ stats->set_mean_waiting_time_ms(stats_raw.mean_waiting_time_ms);
+ stats->set_median_waiting_time_ms(stats_raw.median_waiting_time_ms);
+ stats->set_min_waiting_time_ms(stats_raw.min_waiting_time_ms);
+ stats->set_max_waiting_time_ms(stats_raw.max_waiting_time_ms);
+}
+
+void Convert(const webrtc::RtcpStatistics& stats_raw,
+ webrtc::neteq_unittest::RtcpStatistics* stats) {
+ stats->set_fraction_lost(stats_raw.fraction_lost);
+ stats->set_cumulative_lost(stats_raw.cumulative_lost);
+ stats->set_extended_max_sequence_number(
+ stats_raw.extended_max_sequence_number);
+ stats->set_jitter(stats_raw.jitter);
+}
+
+void WriteMessage(FILE* file, const std::string& message) {
+ int32_t size = message.length();
+ ASSERT_EQ(1u, fwrite(&size, sizeof(size), 1, file));
+ if (size <= 0)
+ return;
+ ASSERT_EQ(static_cast<size_t>(size),
+ fwrite(message.data(), sizeof(char), size, file));
+}
+
+void ReadMessage(FILE* file, std::string* message) {
+ int32_t size;
+ ASSERT_EQ(1u, fread(&size, sizeof(size), 1, file));
+ if (size <= 0)
+ return;
+ rtc::scoped_ptr<char[]> buffer(new char[size]);
+ ASSERT_EQ(static_cast<size_t>(size),
+ fread(buffer.get(), sizeof(char), size, file));
+ message->assign(buffer.get(), size);
+}
+#endif // WEBRTC_NETEQ_UNITTEST_BITEXACT
+
+} // namespace
+
+namespace webrtc {
+
class RefFiles {
public:
RefFiles(const std::string& input_file, const std::string& output_file);
@@ -128,92 +190,84 @@ void RefFiles::ReadFromFileAndCompare(const T (&test_results)[n],
}
}
-void RefFiles::WriteToFile(const NetEqNetworkStatistics& stats) {
- if (output_fp_) {
- ASSERT_EQ(1u, fwrite(&stats, sizeof(NetEqNetworkStatistics), 1,
- output_fp_));
- }
+void RefFiles::WriteToFile(const NetEqNetworkStatistics& stats_raw) {
+#ifdef WEBRTC_NETEQ_UNITTEST_BITEXACT
+ if (!output_fp_)
+ return;
+ neteq_unittest::NetEqNetworkStatistics stats;
+ Convert(stats_raw, &stats);
+
+ std::string stats_string;
+ ASSERT_TRUE(stats.SerializeToString(&stats_string));
+ WriteMessage(output_fp_, stats_string);
+#else
+ FAIL() << "Writing to reference file requires Proto Buffer.";
+#endif // WEBRTC_NETEQ_UNITTEST_BITEXACT
}
void RefFiles::ReadFromFileAndCompare(
const NetEqNetworkStatistics& stats) {
- // TODO(minyue): Update resource/audio_coding/neteq_network_stats.dat and
- // resource/audio_coding/neteq_network_stats_win32.dat.
- struct NetEqNetworkStatisticsOld {
- uint16_t current_buffer_size_ms; // Current jitter buffer size in ms.
- uint16_t preferred_buffer_size_ms; // Target buffer size in ms.
- uint16_t jitter_peaks_found; // 1 if adding extra delay due to peaky
- // jitter; 0 otherwise.
- uint16_t packet_loss_rate; // Loss rate (network + late) in Q14.
- uint16_t packet_discard_rate; // Late loss rate in Q14.
- uint16_t expand_rate; // Fraction (of original stream) of synthesized
- // audio inserted through expansion (in Q14).
- uint16_t preemptive_rate; // Fraction of data inserted through pre-emptive
- // expansion (in Q14).
- uint16_t accelerate_rate; // Fraction of data removed through acceleration
- // (in Q14).
- int32_t clockdrift_ppm; // Average clock-drift in parts-per-million
- // (positive or negative).
- int added_zero_samples; // Number of zero samples added in "off" mode.
- };
- if (input_fp_) {
- // Read from ref file.
- size_t stat_size = sizeof(NetEqNetworkStatisticsOld);
- NetEqNetworkStatisticsOld ref_stats;
- ASSERT_EQ(1u, fread(&ref_stats, stat_size, 1, input_fp_));
- // Compare
- ASSERT_EQ(stats.current_buffer_size_ms, ref_stats.current_buffer_size_ms);
- ASSERT_EQ(stats.preferred_buffer_size_ms,
- ref_stats.preferred_buffer_size_ms);
- ASSERT_EQ(stats.jitter_peaks_found, ref_stats.jitter_peaks_found);
- ASSERT_EQ(stats.packet_loss_rate, ref_stats.packet_loss_rate);
- ASSERT_EQ(stats.packet_discard_rate, ref_stats.packet_discard_rate);
- ASSERT_EQ(stats.expand_rate, ref_stats.expand_rate);
- ASSERT_EQ(stats.preemptive_rate, ref_stats.preemptive_rate);
- ASSERT_EQ(stats.accelerate_rate, ref_stats.accelerate_rate);
- ASSERT_EQ(stats.clockdrift_ppm, ref_stats.clockdrift_ppm);
- ASSERT_EQ(stats.added_zero_samples,
- static_cast<size_t>(ref_stats.added_zero_samples));
- ASSERT_EQ(stats.secondary_decoded_rate, 0);
- ASSERT_LE(stats.speech_expand_rate, ref_stats.expand_rate);
- }
+#ifdef WEBRTC_NETEQ_UNITTEST_BITEXACT
+ if (!input_fp_)
+ return;
+
+ std::string stats_string;
+ ReadMessage(input_fp_, &stats_string);
+ neteq_unittest::NetEqNetworkStatistics ref_stats;
+ ASSERT_TRUE(ref_stats.ParseFromString(stats_string));
+
+ // Compare
+ ASSERT_EQ(stats.current_buffer_size_ms, ref_stats.current_buffer_size_ms());
+ ASSERT_EQ(stats.preferred_buffer_size_ms,
+ ref_stats.preferred_buffer_size_ms());
+ ASSERT_EQ(stats.jitter_peaks_found, ref_stats.jitter_peaks_found());
+ ASSERT_EQ(stats.packet_loss_rate, ref_stats.packet_loss_rate());
+ ASSERT_EQ(stats.packet_discard_rate, ref_stats.packet_discard_rate());
+ ASSERT_EQ(stats.expand_rate, ref_stats.expand_rate());
+ ASSERT_EQ(stats.preemptive_rate, ref_stats.preemptive_rate());
+ ASSERT_EQ(stats.accelerate_rate, ref_stats.accelerate_rate());
+ ASSERT_EQ(stats.clockdrift_ppm, ref_stats.clockdrift_ppm());
+ ASSERT_EQ(stats.added_zero_samples, ref_stats.added_zero_samples());
+ ASSERT_EQ(stats.secondary_decoded_rate, ref_stats.secondary_decoded_rate());
+ ASSERT_LE(stats.speech_expand_rate, ref_stats.expand_rate());
+#else
+ FAIL() << "Reading from reference file requires Proto Buffer.";
+#endif // WEBRTC_NETEQ_UNITTEST_BITEXACT
}
-void RefFiles::WriteToFile(const RtcpStatistics& stats) {
- if (output_fp_) {
- ASSERT_EQ(1u, fwrite(&(stats.fraction_lost), sizeof(stats.fraction_lost), 1,
- output_fp_));
- ASSERT_EQ(1u, fwrite(&(stats.cumulative_lost),
- sizeof(stats.cumulative_lost), 1, output_fp_));
- ASSERT_EQ(1u, fwrite(&(stats.extended_max_sequence_number),
- sizeof(stats.extended_max_sequence_number), 1,
- output_fp_));
- ASSERT_EQ(1u, fwrite(&(stats.jitter), sizeof(stats.jitter), 1,
- output_fp_));
- }
+void RefFiles::WriteToFile(const RtcpStatistics& stats_raw) {
+#ifdef WEBRTC_NETEQ_UNITTEST_BITEXACT
+ if (!output_fp_)
+ return;
+ neteq_unittest::RtcpStatistics stats;
+ Convert(stats_raw, &stats);
+
+ std::string stats_string;
+ ASSERT_TRUE(stats.SerializeToString(&stats_string));
+ WriteMessage(output_fp_, stats_string);
+#else
+ FAIL() << "Writing to reference file requires Proto Buffer.";
+#endif // WEBRTC_NETEQ_UNITTEST_BITEXACT
}
-void RefFiles::ReadFromFileAndCompare(
- const RtcpStatistics& stats) {
- if (input_fp_) {
- // Read from ref file.
- RtcpStatistics ref_stats;
- ASSERT_EQ(1u, fread(&(ref_stats.fraction_lost),
- sizeof(ref_stats.fraction_lost), 1, input_fp_));
- ASSERT_EQ(1u, fread(&(ref_stats.cumulative_lost),
- sizeof(ref_stats.cumulative_lost), 1, input_fp_));
- ASSERT_EQ(1u, fread(&(ref_stats.extended_max_sequence_number),
- sizeof(ref_stats.extended_max_sequence_number), 1,
- input_fp_));
- ASSERT_EQ(1u, fread(&(ref_stats.jitter), sizeof(ref_stats.jitter), 1,
- input_fp_));
- // Compare
- ASSERT_EQ(ref_stats.fraction_lost, stats.fraction_lost);
- ASSERT_EQ(ref_stats.cumulative_lost, stats.cumulative_lost);
- ASSERT_EQ(ref_stats.extended_max_sequence_number,
- stats.extended_max_sequence_number);
- ASSERT_EQ(ref_stats.jitter, stats.jitter);
- }
+void RefFiles::ReadFromFileAndCompare(const RtcpStatistics& stats) {
+#ifdef WEBRTC_NETEQ_UNITTEST_BITEXACT
+ if (!input_fp_)
+ return;
+ std::string stats_string;
+ ReadMessage(input_fp_, &stats_string);
+ neteq_unittest::RtcpStatistics ref_stats;
+ ASSERT_TRUE(ref_stats.ParseFromString(stats_string));
+
+ // Compare
+ ASSERT_EQ(stats.fraction_lost, ref_stats.fraction_lost());
+ ASSERT_EQ(stats.cumulative_lost, ref_stats.cumulative_lost());
+ ASSERT_EQ(stats.extended_max_sequence_number,
+ ref_stats.extended_max_sequence_number());
+ ASSERT_EQ(stats.jitter, ref_stats.jitter());
+#else
+ FAIL() << "Reading from reference file requires Proto Buffer.";
+#endif // WEBRTC_NETEQ_UNITTEST_BITEXACT
}
class NetEqDecodingTest : public ::testing::Test {
@@ -224,7 +278,8 @@ class NetEqDecodingTest : public ::testing::Test {
static const size_t kBlockSize8kHz = kTimeStepMs * 8;
static const size_t kBlockSize16kHz = kTimeStepMs * 16;
static const size_t kBlockSize32kHz = kTimeStepMs * 32;
- static const size_t kMaxBlockSize = kBlockSize32kHz;
+ static const size_t kBlockSize48kHz = kTimeStepMs * 48;
+ static const size_t kMaxBlockSize = kBlockSize48kHz;
static const int kInitSampleRateHz = 8000;
NetEqDecodingTest();
@@ -234,10 +289,12 @@ class NetEqDecodingTest : public ::testing::Test {
void LoadDecoders();
void OpenInputFile(const std::string &rtp_file);
void Process(size_t* out_len);
+
void DecodeAndCompare(const std::string& rtp_file,
const std::string& ref_file,
const std::string& stat_ref_file,
const std::string& rtcp_ref_file);
+
static void PopulateRtpInfo(int frame_index,
int timestamp,
WebRtcRTPHeader* rtp_info);
@@ -304,32 +361,45 @@ void NetEqDecodingTest::TearDown() {
void NetEqDecodingTest::LoadDecoders() {
// Load PCMu.
- ASSERT_EQ(0, neteq_->RegisterPayloadType(NetEqDecoder::kDecoderPCMu, 0));
+ ASSERT_EQ(0,
+ neteq_->RegisterPayloadType(NetEqDecoder::kDecoderPCMu, "pcmu", 0));
// Load PCMa.
- ASSERT_EQ(0, neteq_->RegisterPayloadType(NetEqDecoder::kDecoderPCMa, 8));
+ ASSERT_EQ(0,
+ neteq_->RegisterPayloadType(NetEqDecoder::kDecoderPCMa, "pcma", 8));
#ifdef WEBRTC_CODEC_ILBC
// Load iLBC.
- ASSERT_EQ(0, neteq_->RegisterPayloadType(NetEqDecoder::kDecoderILBC, 102));
+ ASSERT_EQ(
+ 0, neteq_->RegisterPayloadType(NetEqDecoder::kDecoderILBC, "ilbc", 102));
#endif
#if defined(WEBRTC_CODEC_ISAC) || defined(WEBRTC_CODEC_ISACFX)
// Load iSAC.
- ASSERT_EQ(0, neteq_->RegisterPayloadType(NetEqDecoder::kDecoderISAC, 103));
+ ASSERT_EQ(
+ 0, neteq_->RegisterPayloadType(NetEqDecoder::kDecoderISAC, "isac", 103));
#endif
#ifdef WEBRTC_CODEC_ISAC
// Load iSAC SWB.
- ASSERT_EQ(0, neteq_->RegisterPayloadType(NetEqDecoder::kDecoderISACswb, 104));
+ ASSERT_EQ(0, neteq_->RegisterPayloadType(NetEqDecoder::kDecoderISACswb,
+ "isac-swb", 104));
+#endif
+#ifdef WEBRTC_CODEC_OPUS
+ ASSERT_EQ(0, neteq_->RegisterPayloadType(NetEqDecoder::kDecoderOpus,
+ "opus", 111));
#endif
// Load PCM16B nb.
- ASSERT_EQ(0, neteq_->RegisterPayloadType(NetEqDecoder::kDecoderPCM16B, 93));
+ ASSERT_EQ(0, neteq_->RegisterPayloadType(NetEqDecoder::kDecoderPCM16B,
+ "pcm16-nb", 93));
// Load PCM16B wb.
- ASSERT_EQ(0, neteq_->RegisterPayloadType(NetEqDecoder::kDecoderPCM16Bwb, 94));
+ ASSERT_EQ(0, neteq_->RegisterPayloadType(NetEqDecoder::kDecoderPCM16Bwb,
+ "pcm16-wb", 94));
// Load PCM16B swb32.
- ASSERT_EQ(
- 0, neteq_->RegisterPayloadType(NetEqDecoder::kDecoderPCM16Bswb32kHz, 95));
+ ASSERT_EQ(0, neteq_->RegisterPayloadType(NetEqDecoder::kDecoderPCM16Bswb32kHz,
+ "pcm16-swb32", 95));
// Load CNG 8 kHz.
- ASSERT_EQ(0, neteq_->RegisterPayloadType(NetEqDecoder::kDecoderCNGnb, 13));
+ ASSERT_EQ(0, neteq_->RegisterPayloadType(NetEqDecoder::kDecoderCNGnb,
+ "cng-nb", 13));
// Load CNG 16 kHz.
- ASSERT_EQ(0, neteq_->RegisterPayloadType(NetEqDecoder::kDecoderCNGwb, 98));
+ ASSERT_EQ(0, neteq_->RegisterPayloadType(NetEqDecoder::kDecoderCNGwb,
+ "cng-wb", 98));
}
void NetEqDecodingTest::OpenInputFile(const std::string &rtp_file) {
@@ -343,10 +413,11 @@ void NetEqDecodingTest::Process(size_t* out_len) {
WebRtcRTPHeader rtp_header;
packet_->ConvertHeader(&rtp_header);
ASSERT_EQ(0, neteq_->InsertPacket(
- rtp_header, packet_->payload(),
- packet_->payload_length_bytes(),
- static_cast<uint32_t>(
- packet_->time_ms() * (output_sample_rate_ / 1000))));
+ rtp_header,
+ rtc::ArrayView<const uint8_t>(
+ packet_->payload(), packet_->payload_length_bytes()),
+ static_cast<uint32_t>(packet_->time_ms() *
+ (output_sample_rate_ / 1000))));
}
// Get next packet.
packet_.reset(rtp_source_->NextPacket());
@@ -354,13 +425,15 @@ void NetEqDecodingTest::Process(size_t* out_len) {
// Get audio from NetEq.
NetEqOutputType type;
- int num_channels;
+ size_t num_channels;
ASSERT_EQ(0, neteq_->GetAudio(kMaxBlockSize, out_data_, out_len,
&num_channels, &type));
ASSERT_TRUE((*out_len == kBlockSize8kHz) ||
(*out_len == kBlockSize16kHz) ||
- (*out_len == kBlockSize32kHz));
+ (*out_len == kBlockSize32kHz) ||
+ (*out_len == kBlockSize48kHz));
output_sample_rate_ = static_cast<int>(*out_len / 10 * 1000);
+ EXPECT_EQ(output_sample_rate_, neteq_->last_output_sample_rate_hz());
// Increase time.
sim_clock_ += kTimeStepMs;
@@ -442,17 +515,17 @@ void NetEqDecodingTest::PopulateCng(int frame_index,
*payload_len = 1; // Only noise level, no spectral parameters.
}
-#if (defined(WEBRTC_CODEC_ISAC) || defined(WEBRTC_CODEC_ISAC)) && \
+#if !defined(WEBRTC_IOS) && !defined(WEBRTC_ANDROID) && \
+ defined(WEBRTC_NETEQ_UNITTEST_BITEXACT) && \
+ (defined(WEBRTC_CODEC_ISAC) || defined(WEBRTC_CODEC_ISACFX)) && \
defined(WEBRTC_CODEC_ILBC) && defined(WEBRTC_CODEC_G722)
-#define IF_ALL_CODECS(x) x
+#define MAYBE_TestBitExactness TestBitExactness
#else
-#define IF_ALL_CODECS(x) DISABLED_##x
+#define MAYBE_TestBitExactness DISABLED_TestBitExactness
#endif
-
-TEST_F(NetEqDecodingTest,
- DISABLED_ON_IOS(DISABLED_ON_ANDROID(IF_ALL_CODECS(TestBitExactness)))) {
- const std::string input_rtp_file = webrtc::test::ProjectRootPath() +
- "resources/audio_coding/neteq_universal_new.rtp";
+TEST_F(NetEqDecodingTest, MAYBE_TestBitExactness) {
+ const std::string input_rtp_file =
+ webrtc::test::ResourcePath("audio_coding/neteq_universal_new", "rtp");
// Note that neteq4_universal_ref.pcm and neteq4_universal_ref_win_32.pcm
// are identical. The latter could have been removed, but if clients still
// have a copy of the file, the test will fail.
@@ -480,6 +553,34 @@ TEST_F(NetEqDecodingTest,
}
}
+#if !defined(WEBRTC_IOS) && !defined(WEBRTC_ANDROID) && \
+ defined(WEBRTC_NETEQ_UNITTEST_BITEXACT) && \
+ defined(WEBRTC_CODEC_OPUS)
+#define MAYBE_TestOpusBitExactness TestOpusBitExactness
+#else
+#define MAYBE_TestOpusBitExactness DISABLED_TestOpusBitExactness
+#endif
+TEST_F(NetEqDecodingTest, MAYBE_TestOpusBitExactness) {
+ const std::string input_rtp_file =
+ webrtc::test::ResourcePath("audio_coding/neteq_opus", "rtp");
+ const std::string input_ref_file =
+ webrtc::test::ResourcePath("audio_coding/neteq4_opus_ref", "pcm");
+ const std::string network_stat_ref_file =
+ webrtc::test::ResourcePath("audio_coding/neteq4_opus_network_stats",
+ "dat");
+ const std::string rtcp_stat_ref_file =
+ webrtc::test::ResourcePath("audio_coding/neteq4_opus_rtcp_stats", "dat");
+
+ if (FLAGS_gen_ref) {
+ DecodeAndCompare(input_rtp_file, "", "", "");
+ } else {
+ DecodeAndCompare(input_rtp_file,
+ input_ref_file,
+ network_stat_ref_file,
+ rtcp_stat_ref_file);
+ }
+}
+
// Use fax mode to avoid time-scaling. This is to simplify the testing of
// packet waiting times in the packet buffer.
class NetEqDecodingTestFaxMode : public NetEqDecodingTest {
@@ -495,22 +596,19 @@ TEST_F(NetEqDecodingTestFaxMode, TestFrameWaitingTimeStatistics) {
const size_t kSamples = 10 * 16;
const size_t kPayloadBytes = kSamples * 2;
for (size_t i = 0; i < num_frames; ++i) {
- uint16_t payload[kSamples] = {0};
+ const uint8_t payload[kPayloadBytes] = {0};
WebRtcRTPHeader rtp_info;
rtp_info.header.sequenceNumber = i;
rtp_info.header.timestamp = i * kSamples;
rtp_info.header.ssrc = 0x1234; // Just an arbitrary SSRC.
rtp_info.header.payloadType = 94; // PCM16b WB codec.
rtp_info.header.markerBit = 0;
- ASSERT_EQ(0, neteq_->InsertPacket(
- rtp_info,
- reinterpret_cast<uint8_t*>(payload),
- kPayloadBytes, 0));
+ ASSERT_EQ(0, neteq_->InsertPacket(rtp_info, payload, 0));
}
// Pull out all data.
for (size_t i = 0; i < num_frames; ++i) {
size_t out_len;
- int num_channels;
+ size_t num_channels;
NetEqOutputType type;
ASSERT_EQ(0, neteq_->GetAudio(kMaxBlockSize, out_data_, &out_len,
&num_channels, &type));
@@ -549,13 +647,13 @@ TEST_F(NetEqDecodingTest, TestAverageInterArrivalTimeNegative) {
uint8_t payload[kPayloadBytes] = {0};
WebRtcRTPHeader rtp_info;
PopulateRtpInfo(frame_index, frame_index * kSamples, &rtp_info);
- ASSERT_EQ(0, neteq_->InsertPacket(rtp_info, payload, kPayloadBytes, 0));
+ ASSERT_EQ(0, neteq_->InsertPacket(rtp_info, payload, 0));
++frame_index;
}
// Pull out data once.
size_t out_len;
- int num_channels;
+ size_t num_channels;
NetEqOutputType type;
ASSERT_EQ(0, neteq_->GetAudio(kMaxBlockSize, out_data_, &out_len,
&num_channels, &type));
@@ -580,13 +678,13 @@ TEST_F(NetEqDecodingTest, TestAverageInterArrivalTimePositive) {
uint8_t payload[kPayloadBytes] = {0};
WebRtcRTPHeader rtp_info;
PopulateRtpInfo(frame_index, frame_index * kSamples, &rtp_info);
- ASSERT_EQ(0, neteq_->InsertPacket(rtp_info, payload, kPayloadBytes, 0));
+ ASSERT_EQ(0, neteq_->InsertPacket(rtp_info, payload, 0));
++frame_index;
}
// Pull out data once.
size_t out_len;
- int num_channels;
+ size_t num_channels;
NetEqOutputType type;
ASSERT_EQ(0, neteq_->GetAudio(kMaxBlockSize, out_data_, &out_len,
&num_channels, &type));
@@ -611,7 +709,7 @@ void NetEqDecodingTest::LongCngWithClockDrift(double drift_factor,
double next_input_time_ms = 0.0;
double t_ms;
size_t out_len;
- int num_channels;
+ size_t num_channels;
NetEqOutputType type;
// Insert speech for 5 seconds.
@@ -623,7 +721,7 @@ void NetEqDecodingTest::LongCngWithClockDrift(double drift_factor,
uint8_t payload[kPayloadBytes] = {0};
WebRtcRTPHeader rtp_info;
PopulateRtpInfo(seq_no, timestamp, &rtp_info);
- ASSERT_EQ(0, neteq_->InsertPacket(rtp_info, payload, kPayloadBytes, 0));
+ ASSERT_EQ(0, neteq_->InsertPacket(rtp_info, payload, 0));
++seq_no;
timestamp += kSamples;
next_input_time_ms += static_cast<double>(kFrameSizeMs) * drift_factor;
@@ -649,7 +747,9 @@ void NetEqDecodingTest::LongCngWithClockDrift(double drift_factor,
size_t payload_len;
WebRtcRTPHeader rtp_info;
PopulateCng(seq_no, timestamp, &rtp_info, payload, &payload_len);
- ASSERT_EQ(0, neteq_->InsertPacket(rtp_info, payload, payload_len, 0));
+ ASSERT_EQ(0, neteq_->InsertPacket(
+ rtp_info,
+ rtc::ArrayView<const uint8_t>(payload, payload_len), 0));
++seq_no;
timestamp += kCngPeriodSamples;
next_input_time_ms += static_cast<double>(kCngPeriodMs) * drift_factor;
@@ -696,7 +796,9 @@ void NetEqDecodingTest::LongCngWithClockDrift(double drift_factor,
size_t payload_len;
WebRtcRTPHeader rtp_info;
PopulateCng(seq_no, timestamp, &rtp_info, payload, &payload_len);
- ASSERT_EQ(0, neteq_->InsertPacket(rtp_info, payload, payload_len, 0));
+ ASSERT_EQ(0, neteq_->InsertPacket(
+ rtp_info,
+ rtc::ArrayView<const uint8_t>(payload, payload_len), 0));
++seq_no;
timestamp += kCngPeriodSamples;
next_input_time_ms += kCngPeriodMs * drift_factor;
@@ -712,7 +814,7 @@ void NetEqDecodingTest::LongCngWithClockDrift(double drift_factor,
uint8_t payload[kPayloadBytes] = {0};
WebRtcRTPHeader rtp_info;
PopulateRtpInfo(seq_no, timestamp, &rtp_info);
- ASSERT_EQ(0, neteq_->InsertPacket(rtp_info, payload, kPayloadBytes, 0));
+ ASSERT_EQ(0, neteq_->InsertPacket(rtp_info, payload, 0));
++seq_no;
timestamp += kSamples;
next_input_time_ms += kFrameSizeMs * drift_factor;
@@ -823,31 +925,30 @@ TEST_F(NetEqDecodingTest, UnknownPayloadType) {
WebRtcRTPHeader rtp_info;
PopulateRtpInfo(0, 0, &rtp_info);
rtp_info.header.payloadType = 1; // Not registered as a decoder.
- EXPECT_EQ(NetEq::kFail,
- neteq_->InsertPacket(rtp_info, payload, kPayloadBytes, 0));
+ EXPECT_EQ(NetEq::kFail, neteq_->InsertPacket(rtp_info, payload, 0));
EXPECT_EQ(NetEq::kUnknownRtpPayloadType, neteq_->LastError());
}
-#if defined(WEBRTC_CODEC_ISAC) || defined(WEBRTC_CODEC_ISACFX)
-#define IF_ISAC(x) x
+#if defined(WEBRTC_ANDROID)
+#define MAYBE_DecoderError DISABLED_DecoderError
#else
-#define IF_ISAC(x) DISABLED_##x
+#define MAYBE_DecoderError DecoderError
#endif
-
-TEST_F(NetEqDecodingTest, DISABLED_ON_ANDROID(IF_ISAC(DecoderError))) {
+#if defined(WEBRTC_CODEC_ISAC) || defined(WEBRTC_CODEC_ISACFX)
+TEST_F(NetEqDecodingTest, MAYBE_DecoderError) {
const size_t kPayloadBytes = 100;
uint8_t payload[kPayloadBytes] = {0};
WebRtcRTPHeader rtp_info;
PopulateRtpInfo(0, 0, &rtp_info);
rtp_info.header.payloadType = 103; // iSAC, but the payload is invalid.
- EXPECT_EQ(0, neteq_->InsertPacket(rtp_info, payload, kPayloadBytes, 0));
+ EXPECT_EQ(0, neteq_->InsertPacket(rtp_info, payload, 0));
NetEqOutputType type;
// Set all of |out_data_| to 1, and verify that it was set to 0 by the call
// to GetAudio.
for (size_t i = 0; i < kMaxBlockSize; ++i) {
out_data_[i] = 1;
}
- int num_channels;
+ size_t num_channels;
size_t samples_per_channel;
EXPECT_EQ(NetEq::kFail,
neteq_->GetAudio(kMaxBlockSize, out_data_,
@@ -872,6 +973,7 @@ TEST_F(NetEqDecodingTest, DISABLED_ON_ANDROID(IF_ISAC(DecoderError))) {
EXPECT_EQ(1, out_data_[i]);
}
}
+#endif
TEST_F(NetEqDecodingTest, GetAudioBeforeInsertPacket) {
NetEqOutputType type;
@@ -880,7 +982,7 @@ TEST_F(NetEqDecodingTest, GetAudioBeforeInsertPacket) {
for (size_t i = 0; i < kMaxBlockSize; ++i) {
out_data_[i] = 1;
}
- int num_channels;
+ size_t num_channels;
size_t samples_per_channel;
EXPECT_EQ(0, neteq_->GetAudio(kMaxBlockSize, out_data_,
&samples_per_channel,
@@ -894,6 +996,8 @@ TEST_F(NetEqDecodingTest, GetAudioBeforeInsertPacket) {
SCOPED_TRACE(ss.str()); // Print out the parameter values on failure.
EXPECT_EQ(0, out_data_[i]);
}
+ // Verify that the sample rate did not change from the initial configuration.
+ EXPECT_EQ(config_.sample_rate_hz, neteq_->last_output_sample_rate_hz());
}
class NetEqBgnTest : public NetEqDecodingTest {
@@ -934,27 +1038,29 @@ class NetEqBgnTest : public NetEqDecodingTest {
PopulateRtpInfo(0, 0, &rtp_info);
rtp_info.header.payloadType = payload_type;
- int number_channels = 0;
+ size_t number_channels = 0;
size_t samples_per_channel = 0;
uint32_t receive_timestamp = 0;
for (int n = 0; n < 10; ++n) { // Insert few packets and get audio.
- size_t enc_len_bytes = WebRtcPcm16b_Encode(
- input.GetNextBlock(), expected_samples_per_channel, payload);
+ auto block = input.GetNextBlock();
+ ASSERT_EQ(expected_samples_per_channel, block.size());
+ size_t enc_len_bytes =
+ WebRtcPcm16b_Encode(block.data(), block.size(), payload);
ASSERT_EQ(enc_len_bytes, expected_samples_per_channel * 2);
number_channels = 0;
samples_per_channel = 0;
- ASSERT_EQ(0,
- neteq_->InsertPacket(rtp_info, payload, enc_len_bytes,
- receive_timestamp));
+ ASSERT_EQ(0, neteq_->InsertPacket(rtp_info, rtc::ArrayView<const uint8_t>(
+ payload, enc_len_bytes),
+ receive_timestamp));
ASSERT_EQ(0,
neteq_->GetAudio(kBlockSize32kHz,
output,
&samples_per_channel,
&number_channels,
&type));
- ASSERT_EQ(1, number_channels);
+ ASSERT_EQ(1u, number_channels);
ASSERT_EQ(expected_samples_per_channel, samples_per_channel);
ASSERT_EQ(kOutputNormal, type);
@@ -976,7 +1082,7 @@ class NetEqBgnTest : public NetEqDecodingTest {
&samples_per_channel,
&number_channels,
&type));
- ASSERT_EQ(1, number_channels);
+ ASSERT_EQ(1u, number_channels);
ASSERT_EQ(expected_samples_per_channel, samples_per_channel);
// To be able to test the fading of background noise we need at lease to
@@ -997,7 +1103,7 @@ class NetEqBgnTest : public NetEqDecodingTest {
&samples_per_channel,
&number_channels,
&type));
- ASSERT_EQ(1, number_channels);
+ ASSERT_EQ(1u, number_channels);
ASSERT_EQ(expected_samples_per_channel, samples_per_channel);
if (type == kOutputPLCtoCNG) {
plc_to_cng = true;
@@ -1065,7 +1171,8 @@ TEST_F(NetEqBgnTestFade, RunTest) {
CheckBgn(32000);
}
-TEST_F(NetEqDecodingTest, IF_ISAC(SyncPacketInsert)) {
+#if defined(WEBRTC_CODEC_ISAC) || defined(WEBRTC_CODEC_ISACFX)
+TEST_F(NetEqDecodingTest, SyncPacketInsert) {
WebRtcRTPHeader rtp_info;
uint32_t receive_timestamp = 0;
// For the readability use the following payloads instead of the defaults of
@@ -1081,20 +1188,20 @@ TEST_F(NetEqDecodingTest, IF_ISAC(SyncPacketInsert)) {
// Register decoders.
ASSERT_EQ(0, neteq_->RegisterPayloadType(NetEqDecoder::kDecoderPCM16Bwb,
- kPcm16WbPayloadType));
+ "pcm16-wb", kPcm16WbPayloadType));
ASSERT_EQ(0, neteq_->RegisterPayloadType(NetEqDecoder::kDecoderCNGnb,
- kCngNbPayloadType));
+ "cng-nb", kCngNbPayloadType));
ASSERT_EQ(0, neteq_->RegisterPayloadType(NetEqDecoder::kDecoderCNGwb,
- kCngWbPayloadType));
+ "cng-wb", kCngWbPayloadType));
ASSERT_EQ(0, neteq_->RegisterPayloadType(NetEqDecoder::kDecoderCNGswb32kHz,
- kCngSwb32PayloadType));
+ "cng-swb32", kCngSwb32PayloadType));
ASSERT_EQ(0, neteq_->RegisterPayloadType(NetEqDecoder::kDecoderCNGswb48kHz,
- kCngSwb48PayloadType));
- ASSERT_EQ(0, neteq_->RegisterPayloadType(NetEqDecoder::kDecoderAVT,
+ "cng-swb48", kCngSwb48PayloadType));
+ ASSERT_EQ(0, neteq_->RegisterPayloadType(NetEqDecoder::kDecoderAVT, "avt",
kAvtPayloadType));
- ASSERT_EQ(0, neteq_->RegisterPayloadType(NetEqDecoder::kDecoderRED,
+ ASSERT_EQ(0, neteq_->RegisterPayloadType(NetEqDecoder::kDecoderRED, "red",
kRedPayloadType));
- ASSERT_EQ(0, neteq_->RegisterPayloadType(NetEqDecoder::kDecoderISAC,
+ ASSERT_EQ(0, neteq_->RegisterPayloadType(NetEqDecoder::kDecoderISAC, "isac",
kIsacPayloadType));
PopulateRtpInfo(0, 0, &rtp_info);
@@ -1106,8 +1213,7 @@ TEST_F(NetEqDecodingTest, IF_ISAC(SyncPacketInsert)) {
// Payload length of 10 ms PCM16 16 kHz.
const size_t kPayloadBytes = kBlockSize16kHz * sizeof(int16_t);
uint8_t payload[kPayloadBytes] = {0};
- ASSERT_EQ(0, neteq_->InsertPacket(
- rtp_info, payload, kPayloadBytes, receive_timestamp));
+ ASSERT_EQ(0, neteq_->InsertPacket(rtp_info, payload, receive_timestamp));
// Next packet. Last packet contained 10 ms audio.
rtp_info.header.sequenceNumber++;
@@ -1145,6 +1251,7 @@ TEST_F(NetEqDecodingTest, IF_ISAC(SyncPacketInsert)) {
--rtp_info.header.ssrc;
EXPECT_EQ(0, neteq_->InsertSyncPacket(rtp_info, receive_timestamp));
}
+#endif
// First insert several noise like packets, then sync-packets. Decoding all
// packets should not produce error, statistics should not show any packet loss
@@ -1165,17 +1272,16 @@ TEST_F(NetEqDecodingTest, SyncPacketDecode) {
// Insert some packets which decode to noise. We are not interested in
// actual decoded values.
NetEqOutputType output_type;
- int num_channels;
+ size_t num_channels;
size_t samples_per_channel;
uint32_t receive_timestamp = 0;
for (int n = 0; n < 100; ++n) {
- ASSERT_EQ(0, neteq_->InsertPacket(rtp_info, payload, kPayloadBytes,
- receive_timestamp));
+ ASSERT_EQ(0, neteq_->InsertPacket(rtp_info, payload, receive_timestamp));
ASSERT_EQ(0, neteq_->GetAudio(kBlockSize16kHz, decoded,
&samples_per_channel, &num_channels,
&output_type));
ASSERT_EQ(kBlockSize16kHz, samples_per_channel);
- ASSERT_EQ(1, num_channels);
+ ASSERT_EQ(1u, num_channels);
rtp_info.header.sequenceNumber++;
rtp_info.header.timestamp += kBlockSize16kHz;
@@ -1193,7 +1299,7 @@ TEST_F(NetEqDecodingTest, SyncPacketDecode) {
&samples_per_channel, &num_channels,
&output_type));
ASSERT_EQ(kBlockSize16kHz, samples_per_channel);
- ASSERT_EQ(1, num_channels);
+ ASSERT_EQ(1u, num_channels);
if (n > algorithmic_frame_delay) {
EXPECT_TRUE(IsAllZero(decoded, samples_per_channel * num_channels));
}
@@ -1205,8 +1311,7 @@ TEST_F(NetEqDecodingTest, SyncPacketDecode) {
// We insert regular packets, if sync packet are not correctly buffered then
// network statistics would show some packet loss.
for (int n = 0; n <= algorithmic_frame_delay + 10; ++n) {
- ASSERT_EQ(0, neteq_->InsertPacket(rtp_info, payload, kPayloadBytes,
- receive_timestamp));
+ ASSERT_EQ(0, neteq_->InsertPacket(rtp_info, payload, receive_timestamp));
ASSERT_EQ(0, neteq_->GetAudio(kBlockSize16kHz, decoded,
&samples_per_channel, &num_channels,
&output_type));
@@ -1243,18 +1348,17 @@ TEST_F(NetEqDecodingTest, SyncPacketBufferSizeAndOverridenByNetworkPackets) {
// Insert some packets which decode to noise. We are not interested in
// actual decoded values.
NetEqOutputType output_type;
- int num_channels;
+ size_t num_channels;
size_t samples_per_channel;
uint32_t receive_timestamp = 0;
int algorithmic_frame_delay = algorithmic_delay_ms_ / 10 + 1;
for (int n = 0; n < algorithmic_frame_delay; ++n) {
- ASSERT_EQ(0, neteq_->InsertPacket(rtp_info, payload, kPayloadBytes,
- receive_timestamp));
+ ASSERT_EQ(0, neteq_->InsertPacket(rtp_info, payload, receive_timestamp));
ASSERT_EQ(0, neteq_->GetAudio(kBlockSize16kHz, decoded,
&samples_per_channel, &num_channels,
&output_type));
ASSERT_EQ(kBlockSize16kHz, samples_per_channel);
- ASSERT_EQ(1, num_channels);
+ ASSERT_EQ(1u, num_channels);
rtp_info.header.sequenceNumber++;
rtp_info.header.timestamp += kBlockSize16kHz;
receive_timestamp += kBlockSize16kHz;
@@ -1281,8 +1385,7 @@ TEST_F(NetEqDecodingTest, SyncPacketBufferSizeAndOverridenByNetworkPackets) {
// Insert.
for (int n = 0; n < kNumSyncPackets; ++n) {
- ASSERT_EQ(0, neteq_->InsertPacket(rtp_info, payload, kPayloadBytes,
- receive_timestamp));
+ ASSERT_EQ(0, neteq_->InsertPacket(rtp_info, payload, receive_timestamp));
rtp_info.header.sequenceNumber++;
rtp_info.header.timestamp += kBlockSize16kHz;
receive_timestamp += kBlockSize16kHz;
@@ -1294,7 +1397,7 @@ TEST_F(NetEqDecodingTest, SyncPacketBufferSizeAndOverridenByNetworkPackets) {
&samples_per_channel, &num_channels,
&output_type));
ASSERT_EQ(kBlockSize16kHz, samples_per_channel);
- ASSERT_EQ(1, num_channels);
+ ASSERT_EQ(1u, num_channels);
EXPECT_TRUE(IsAllNonZero(decoded, samples_per_channel * num_channels));
}
}
@@ -1312,7 +1415,7 @@ void NetEqDecodingTest::WrapTest(uint16_t start_seq_no,
const size_t kPayloadBytes = kSamples * sizeof(int16_t);
double next_input_time_ms = 0.0;
int16_t decoded[kBlockSize16kHz];
- int num_channels;
+ size_t num_channels;
size_t samples_per_channel;
NetEqOutputType output_type;
uint32_t receive_timestamp = 0;
@@ -1334,8 +1437,7 @@ void NetEqDecodingTest::WrapTest(uint16_t start_seq_no,
if (drop_seq_numbers.find(seq_no) == drop_seq_numbers.end()) {
// This sequence number was not in the set to drop. Insert it.
ASSERT_EQ(0,
- neteq_->InsertPacket(rtp_info, payload, kPayloadBytes,
- receive_timestamp));
+ neteq_->InsertPacket(rtp_info, payload, receive_timestamp));
++packets_inserted;
}
NetEqNetworkStatistics network_stats;
@@ -1366,7 +1468,7 @@ void NetEqDecodingTest::WrapTest(uint16_t start_seq_no,
&samples_per_channel, &num_channels,
&output_type));
ASSERT_EQ(kBlockSize16kHz, samples_per_channel);
- ASSERT_EQ(1, num_channels);
+ ASSERT_EQ(1u, num_channels);
// Expect delay (in samples) to be less than 2 packets.
EXPECT_LE(timestamp - PlayoutTimestamp(),
@@ -1417,13 +1519,13 @@ void NetEqDecodingTest::DuplicateCng() {
// Insert three speech packets. Three are needed to get the frame length
// correct.
size_t out_len;
- int num_channels;
+ size_t num_channels;
NetEqOutputType type;
uint8_t payload[kPayloadBytes] = {0};
WebRtcRTPHeader rtp_info;
for (int i = 0; i < 3; ++i) {
PopulateRtpInfo(seq_no, timestamp, &rtp_info);
- ASSERT_EQ(0, neteq_->InsertPacket(rtp_info, payload, kPayloadBytes, 0));
+ ASSERT_EQ(0, neteq_->InsertPacket(rtp_info, payload, 0));
++seq_no;
timestamp += kSamples;
@@ -1442,7 +1544,9 @@ void NetEqDecodingTest::DuplicateCng() {
size_t payload_len;
PopulateCng(seq_no, timestamp, &rtp_info, payload, &payload_len);
// This is the first time this CNG packet is inserted.
- ASSERT_EQ(0, neteq_->InsertPacket(rtp_info, payload, payload_len, 0));
+ ASSERT_EQ(
+ 0, neteq_->InsertPacket(
+ rtp_info, rtc::ArrayView<const uint8_t>(payload, payload_len), 0));
// Pull audio once and make sure CNG is played.
ASSERT_EQ(0,
@@ -1454,7 +1558,9 @@ void NetEqDecodingTest::DuplicateCng() {
// Insert the same CNG packet again. Note that at this point it is old, since
// we have already decoded the first copy of it.
- ASSERT_EQ(0, neteq_->InsertPacket(rtp_info, payload, payload_len, 0));
+ ASSERT_EQ(
+ 0, neteq_->InsertPacket(
+ rtp_info, rtc::ArrayView<const uint8_t>(payload, payload_len), 0));
// Pull audio until we have played |kCngPeriodMs| of CNG. Start at 10 ms since
// we have already pulled out CNG once.
@@ -1472,7 +1578,7 @@ void NetEqDecodingTest::DuplicateCng() {
++seq_no;
timestamp += kCngPeriodSamples;
PopulateRtpInfo(seq_no, timestamp, &rtp_info);
- ASSERT_EQ(0, neteq_->InsertPacket(rtp_info, payload, kPayloadBytes, 0));
+ ASSERT_EQ(0, neteq_->InsertPacket(rtp_info, payload, 0));
// Pull audio once and verify that the output is speech again.
ASSERT_EQ(0,
@@ -1507,14 +1613,16 @@ TEST_F(NetEqDecodingTest, CngFirst) {
WebRtcRTPHeader rtp_info;
PopulateCng(seq_no, timestamp, &rtp_info, payload, &payload_len);
- ASSERT_EQ(NetEq::kOK,
- neteq_->InsertPacket(rtp_info, payload, payload_len, 0));
+ ASSERT_EQ(
+ NetEq::kOK,
+ neteq_->InsertPacket(
+ rtp_info, rtc::ArrayView<const uint8_t>(payload, payload_len), 0));
++seq_no;
timestamp += kCngPeriodSamples;
// Pull audio once and make sure CNG is played.
size_t out_len;
- int num_channels;
+ size_t num_channels;
NetEqOutputType type;
ASSERT_EQ(0, neteq_->GetAudio(kMaxBlockSize, out_data_, &out_len,
&num_channels, &type));
@@ -1524,7 +1632,7 @@ TEST_F(NetEqDecodingTest, CngFirst) {
// Insert some speech packets.
for (int i = 0; i < 3; ++i) {
PopulateRtpInfo(seq_no, timestamp, &rtp_info);
- ASSERT_EQ(0, neteq_->InsertPacket(rtp_info, payload, kPayloadBytes, 0));
+ ASSERT_EQ(0, neteq_->InsertPacket(rtp_info, payload, 0));
++seq_no;
timestamp += kSamples;
diff --git a/webrtc/modules/audio_coding/neteq/neteq_unittest.proto b/webrtc/modules/audio_coding/neteq/neteq_unittest.proto
new file mode 100644
index 0000000000..4b59848eb2
--- /dev/null
+++ b/webrtc/modules/audio_coding/neteq/neteq_unittest.proto
@@ -0,0 +1,29 @@
+syntax = "proto2";
+option optimize_for = LITE_RUNTIME;
+package webrtc.neteq_unittest;
+
+message NetEqNetworkStatistics {
+ optional uint32 current_buffer_size_ms = 1;
+ optional uint32 preferred_buffer_size_ms = 2;
+ optional uint32 jitter_peaks_found = 3;
+ optional uint32 packet_loss_rate = 4;
+ optional uint32 packet_discard_rate = 5;
+ optional uint32 expand_rate = 6;
+ optional uint32 speech_expand_rate = 7;
+ optional uint32 preemptive_rate = 8;
+ optional uint32 accelerate_rate = 9;
+ optional uint32 secondary_decoded_rate = 10;
+ optional int32 clockdrift_ppm = 11;
+ optional uint64 added_zero_samples = 12;
+ optional int32 mean_waiting_time_ms = 13;
+ optional int32 median_waiting_time_ms = 14;
+ optional int32 min_waiting_time_ms = 15;
+ optional int32 max_waiting_time_ms = 16;
+}
+
+message RtcpStatistics {
+ optional uint32 fraction_lost = 1;
+ optional uint32 cumulative_lost = 2;
+ optional uint32 extended_max_sequence_number = 3;
+ optional uint32 jitter = 4;
+} \ No newline at end of file
diff --git a/webrtc/modules/audio_coding/neteq/normal.cc b/webrtc/modules/audio_coding/neteq/normal.cc
index ebecbf94bd..1b888f70d1 100644
--- a/webrtc/modules/audio_coding/neteq/normal.cc
+++ b/webrtc/modules/audio_coding/neteq/normal.cc
@@ -16,7 +16,7 @@
#include "webrtc/common_audio/signal_processing/include/signal_processing_library.h"
#include "webrtc/modules/audio_coding/codecs/audio_decoder.h"
-#include "webrtc/modules/audio_coding/codecs/cng/include/webrtc_cng.h"
+#include "webrtc/modules/audio_coding/codecs/cng/webrtc_cng.h"
#include "webrtc/modules/audio_coding/neteq/audio_multi_vector.h"
#include "webrtc/modules/audio_coding/neteq/background_noise.h"
#include "webrtc/modules/audio_coding/neteq/decoder_database.h"
diff --git a/webrtc/modules/audio_coding/neteq/packet.h b/webrtc/modules/audio_coding/neteq/packet.h
index 723ed8b0a3..64b325e027 100644
--- a/webrtc/modules/audio_coding/neteq/packet.h
+++ b/webrtc/modules/audio_coding/neteq/packet.h
@@ -13,7 +13,7 @@
#include <list>
-#include "webrtc/modules/interface/module_common_types.h"
+#include "webrtc/modules/include/module_common_types.h"
#include "webrtc/typedefs.h"
namespace webrtc {
diff --git a/webrtc/modules/audio_coding/neteq/payload_splitter_unittest.cc b/webrtc/modules/audio_coding/neteq/payload_splitter_unittest.cc
index bf26a8f517..07c4bac0b6 100644
--- a/webrtc/modules/audio_coding/neteq/payload_splitter_unittest.cc
+++ b/webrtc/modules/audio_coding/neteq/payload_splitter_unittest.cc
@@ -310,10 +310,10 @@ TEST(RedPayloadSplitter, CheckRedPayloads) {
// easier to just register the payload types and let the actual implementation
// do its job.
DecoderDatabase decoder_database;
- decoder_database.RegisterPayload(0, NetEqDecoder::kDecoderCNGnb);
- decoder_database.RegisterPayload(1, NetEqDecoder::kDecoderPCMu);
- decoder_database.RegisterPayload(2, NetEqDecoder::kDecoderAVT);
- decoder_database.RegisterPayload(3, NetEqDecoder::kDecoderILBC);
+ decoder_database.RegisterPayload(0, NetEqDecoder::kDecoderCNGnb, "cng-nb");
+ decoder_database.RegisterPayload(1, NetEqDecoder::kDecoderPCMu, "pcmu");
+ decoder_database.RegisterPayload(2, NetEqDecoder::kDecoderAVT, "avt");
+ decoder_database.RegisterPayload(3, NetEqDecoder::kDecoderILBC, "ilbc");
PayloadSplitter splitter;
splitter.CheckRedPayloads(&packet_list, decoder_database);
@@ -745,8 +745,8 @@ TEST(FecPayloadSplitter, MixedPayload) {
PacketList packet_list;
DecoderDatabase decoder_database;
- decoder_database.RegisterPayload(0, NetEqDecoder::kDecoderOpus);
- decoder_database.RegisterPayload(1, NetEqDecoder::kDecoderPCMu);
+ decoder_database.RegisterPayload(0, NetEqDecoder::kDecoderOpus, "opus");
+ decoder_database.RegisterPayload(1, NetEqDecoder::kDecoderPCMu, "pcmu");
Packet* packet = CreatePacket(0, 10, 0xFF, true);
packet_list.push_back(packet);
@@ -802,7 +802,7 @@ TEST(FecPayloadSplitter, EmbedFecInRed) {
const int kTimestampOffset = 20 * 48; // 20 ms * 48 kHz.
uint8_t payload_types[] = {0, 0};
- decoder_database.RegisterPayload(0, NetEqDecoder::kDecoderOpus);
+ decoder_database.RegisterPayload(0, NetEqDecoder::kDecoderOpus, "opus");
Packet* packet = CreateRedPayload(2, payload_types, kTimestampOffset, true);
packet_list.push_back(packet);
diff --git a/webrtc/modules/audio_coding/neteq/rtcp.cc b/webrtc/modules/audio_coding/neteq/rtcp.cc
index cf8e0280bb..7ef40bc814 100644
--- a/webrtc/modules/audio_coding/neteq/rtcp.cc
+++ b/webrtc/modules/audio_coding/neteq/rtcp.cc
@@ -15,7 +15,7 @@
#include <algorithm>
#include "webrtc/common_audio/signal_processing/include/signal_processing_library.h"
-#include "webrtc/modules/interface/module_common_types.h"
+#include "webrtc/modules/include/module_common_types.h"
namespace webrtc {
diff --git a/webrtc/modules/audio_coding/neteq/statistics_calculator.cc b/webrtc/modules/audio_coding/neteq/statistics_calculator.cc
index e6a6fbf705..8f873762c5 100644
--- a/webrtc/modules/audio_coding/neteq/statistics_calculator.cc
+++ b/webrtc/modules/audio_coding/neteq/statistics_calculator.cc
@@ -50,7 +50,7 @@ void StatisticsCalculator::PeriodicUmaLogger::AdvanceClock(int step_ms) {
}
void StatisticsCalculator::PeriodicUmaLogger::LogToUma(int value) const {
- RTC_HISTOGRAM_COUNTS(uma_name_, value, 1, max_value_, 50);
+ RTC_HISTOGRAM_COUNTS_SPARSE(uma_name_, value, 1, max_value_, 50);
}
StatisticsCalculator::PeriodicUmaCount::PeriodicUmaCount(
@@ -187,9 +187,9 @@ void StatisticsCalculator::SecondaryDecodedSamples(int num_samples) {
}
void StatisticsCalculator::LogDelayedPacketOutageEvent(int outage_duration_ms) {
- RTC_HISTOGRAM_COUNTS("WebRTC.Audio.DelayedPacketOutageEventMs",
- outage_duration_ms, 1 /* min */, 2000 /* max */,
- 100 /* bucket count */);
+ RTC_HISTOGRAM_COUNTS_SPARSE("WebRTC.Audio.DelayedPacketOutageEventMs",
+ outage_duration_ms, 1 /* min */, 2000 /* max */,
+ 100 /* bucket count */);
delayed_packet_outage_counter_.RegisterSample();
}
diff --git a/webrtc/modules/audio_coding/neteq/test/NETEQTEST_RTPpacket.h b/webrtc/modules/audio_coding/neteq/test/NETEQTEST_RTPpacket.h
index 3fbce8be5c..56ed72fcee 100644
--- a/webrtc/modules/audio_coding/neteq/test/NETEQTEST_RTPpacket.h
+++ b/webrtc/modules/audio_coding/neteq/test/NETEQTEST_RTPpacket.h
@@ -14,7 +14,7 @@
#include <map>
#include <stdio.h>
#include "webrtc/typedefs.h"
-#include "webrtc/modules/interface/module_common_types.h"
+#include "webrtc/modules/include/module_common_types.h"
enum stereoModes {
stereoModeMono,
diff --git a/webrtc/modules/audio_coding/neteq/test/PayloadTypes.h b/webrtc/modules/audio_coding/neteq/test/PayloadTypes.h
index c46a3daece..aba525b162 100644
--- a/webrtc/modules/audio_coding/neteq/test/PayloadTypes.h
+++ b/webrtc/modules/audio_coding/neteq/test/PayloadTypes.h
@@ -39,7 +39,7 @@
#define NETEQ_CODEC_G722_1_16_PT 108
#define NETEQ_CODEC_G722_1_24_PT 109
#define NETEQ_CODEC_G722_1_32_PT 110
-#define NETEQ_CODEC_SC3_PT 111
+#define NETEQ_CODEC_OPUS_PT 111
#define NETEQ_CODEC_AMR_PT 112
#define NETEQ_CODEC_GSMEFR_PT 113
//#define NETEQ_CODEC_ILBCRCU_PT 114
diff --git a/webrtc/modules/audio_coding/neteq/test/RTPencode.cc b/webrtc/modules/audio_coding/neteq/test/RTPencode.cc
index cbb7436152..45586ee111 100644
--- a/webrtc/modules/audio_coding/neteq/test/RTPencode.cc
+++ b/webrtc/modules/audio_coding/neteq/test/RTPencode.cc
@@ -25,7 +25,9 @@
#include <algorithm>
+#include "webrtc/base/checks.h"
#include "webrtc/typedefs.h"
+
// needed for NetEqDecoder
#include "webrtc/modules/audio_coding/neteq/audio_decoder_impl.h"
#include "webrtc/modules/audio_coding/neteq/include/neteq.h"
@@ -36,6 +38,10 @@
#include "PayloadTypes.h"
+namespace {
+const size_t kRtpDataSize = 8000;
+}
+
/*********************/
/* Misc. definitions */
/*********************/
@@ -126,10 +132,10 @@ void stereoInterleave(unsigned char* data, size_t dataLen, size_t stride);
#include "webrtc_vad.h"
#if ((defined CODEC_PCM16B) || (defined NETEQ_ARBITRARY_CODEC))
-#include "pcm16b.h"
+#include "webrtc/modules/audio_coding/codecs/pcm16b/pcm16b.h"
#endif
#ifdef CODEC_G711
-#include "g711_interface.h"
+#include "webrtc/modules/audio_coding/codecs/g711/g711_interface.h"
#endif
#ifdef CODEC_G729
#include "G729Interface.h"
@@ -146,19 +152,19 @@ void stereoInterleave(unsigned char* data, size_t dataLen, size_t stride);
#include "AMRWBCreation.h"
#endif
#ifdef CODEC_ILBC
-#include "ilbc.h"
+#include "webrtc/modules/audio_coding/codecs/ilbc/ilbc.h"
#endif
#if (defined CODEC_ISAC || defined CODEC_ISAC_SWB)
-#include "isac.h"
+#include "webrtc/modules/audio_coding/codecs/isac/main/include/isac.h"
#endif
#ifdef NETEQ_ISACFIX_CODEC
-#include "isacfix.h"
+#include "webrtc/modules/audio_coding/codecs/isac/fix/include/isacfix.h"
#ifdef CODEC_ISAC
#error Cannot have both ISAC and ISACfix defined. Please de-select one.
#endif
#endif
#ifdef CODEC_G722
-#include "g722_interface.h"
+#include "webrtc/modules/audio_coding/codecs/g722/g722_interface.h"
#endif
#ifdef CODEC_G722_1_24
#include "G722_1Interface.h"
@@ -188,11 +194,14 @@ void stereoInterleave(unsigned char* data, size_t dataLen, size_t stride);
#endif
#if (defined(CODEC_CNGCODEC8) || defined(CODEC_CNGCODEC16) || \
defined(CODEC_CNGCODEC32) || defined(CODEC_CNGCODEC48))
-#include "webrtc_cng.h"
+#include "webrtc/modules/audio_coding/codecs/cng/webrtc_cng.h"
#endif
#if ((defined CODEC_SPEEX_8) || (defined CODEC_SPEEX_16))
#include "SpeexInterface.h"
#endif
+#ifdef CODEC_OPUS
+#include "webrtc/modules/audio_coding/codecs/opus/opus_interface.h"
+#endif
/***********************************/
/* Global codec instance variables */
@@ -264,6 +273,9 @@ SPEEX_encinst_t* SPEEX8enc_inst[2];
#ifdef CODEC_SPEEX_16
SPEEX_encinst_t* SPEEX16enc_inst[2];
#endif
+#ifdef CODEC_OPUS
+OpusEncInst* opus_inst[2];
+#endif
int main(int argc, char* argv[]) {
size_t packet_size;
@@ -275,7 +287,7 @@ int main(int argc, char* argv[]) {
int useRed = 0;
size_t len, enc_len;
int16_t org_data[4000];
- unsigned char rtp_data[8000];
+ unsigned char rtp_data[kRtpDataSize];
int16_t seqNo = 0xFFF;
uint32_t ssrc = 1235412312;
uint32_t timestamp = 0xAC1245;
@@ -286,12 +298,12 @@ int main(int argc, char* argv[]) {
uint32_t red_TS[2] = {0};
uint16_t red_len[2] = {0};
size_t RTPheaderLen = 12;
- uint8_t red_data[8000];
+ uint8_t red_data[kRtpDataSize];
#ifdef INSERT_OLD_PACKETS
uint16_t old_length, old_plen;
size_t old_enc_len;
int first_old_packet = 1;
- unsigned char old_rtp_data[8000];
+ unsigned char old_rtp_data[kRtpDataSize];
size_t packet_age = 0;
#endif
#ifdef INSERT_DTMF_PACKETS
@@ -429,6 +441,10 @@ int main(int argc, char* argv[]) {
printf(" : red_isac Redundancy RTP packet with 2*iSAC "
"frames\n");
#endif
+#endif // CODEC_RED
+#ifdef CODEC_OPUS
+ printf(" : opus Opus codec with FEC (48kHz, 32kbps, FEC"
+ " on and tuned for 5%% packet losses)\n");
#endif
printf("\n");
@@ -880,6 +896,10 @@ void NetEQTest_GetCodec_and_PT(char* name,
*PT = NETEQ_CODEC_ISAC_PT; /* this will be the PT for the sub-headers */
*fs = 16000;
*useRed = 1;
+ } else if (!strcmp(name, "opus")) {
+ *codec = webrtc::NetEqDecoder::kDecoderOpus;
+ *PT = NETEQ_CODEC_OPUS_PT; /* this will be the PT for the sub-headers */
+ *fs = 48000;
} else {
printf("Error: Not a supported codec (%s)\n", name);
exit(0);
@@ -1411,12 +1431,23 @@ int NetEQTest_init_coders(webrtc::NetEqDecoder coder,
}
break;
#endif
+#ifdef CODEC_OPUS
+ case webrtc::NetEqDecoder::kDecoderOpus:
+ ok = WebRtcOpus_EncoderCreate(&opus_inst[k], 1, 0);
+ if (ok != 0) {
+ printf("Error: Couldn't allocate memory for Opus encoding "
+ "instance\n");
+ exit(0);
+ }
+ WebRtcOpus_EnableFec(opus_inst[k]);
+ WebRtcOpus_SetPacketLossRate(opus_inst[k], 5);
+ break;
+#endif
default:
printf("Error: unknown codec in call to NetEQTest_init_coders.\n");
exit(0);
break;
}
-
if (ok != 0) {
return (ok);
}
@@ -1543,6 +1574,11 @@ int NetEQTest_free_coders(webrtc::NetEqDecoder coder, size_t numChannels) {
WebRtcGSMFR_FreeEnc(GSMFRenc_inst[k]);
break;
#endif
+#ifdef CODEC_OPUS
+ case webrtc::NetEqDecoder::kDecoderOpus:
+ WebRtcOpus_EncoderFree(opus_inst[k]);
+ break;
+#endif
default:
printf("Error: unknown codec in call to NetEQTest_init_coders.\n");
exit(0);
@@ -1687,6 +1723,11 @@ size_t NetEQTest_encode(webrtc::NetEqDecoder coder,
cdlen = static_cast<size_t>(res);
}
#endif
+#ifdef CODEC_OPUS
+ cdlen = WebRtcOpus_Encode(opus_inst[k], indata, frameLen, kRtpDataSize - 12,
+ encoded);
+ RTC_CHECK_GT(cdlen, 0u);
+#endif
indata += frameLen;
encoded += cdlen;
totalLen += cdlen;
diff --git a/webrtc/modules/audio_coding/neteq/test/neteq_ilbc_quality_test.cc b/webrtc/modules/audio_coding/neteq/test/neteq_ilbc_quality_test.cc
index 2042e0d2b8..0c09e92b4d 100644
--- a/webrtc/modules/audio_coding/neteq/test/neteq_ilbc_quality_test.cc
+++ b/webrtc/modules/audio_coding/neteq/test/neteq_ilbc_quality_test.cc
@@ -11,7 +11,7 @@
#include "webrtc/base/checks.h"
#include "webrtc/base/safe_conversions.h"
#include "webrtc/base/scoped_ptr.h"
-#include "webrtc/modules/audio_coding/codecs/ilbc/include/audio_encoder_ilbc.h"
+#include "webrtc/modules/audio_coding/codecs/ilbc/audio_encoder_ilbc.h"
#include "webrtc/modules/audio_coding/neteq/tools/neteq_quality_test.h"
#include "webrtc/test/testsupport/fileutils.h"
@@ -50,7 +50,7 @@ class NetEqIlbcQualityTest : public NetEqQualityTest {
NetEqDecoder::kDecoderILBC) {}
void SetUp() override {
- ASSERT_EQ(1, channels_) << "iLBC supports only mono audio.";
+ ASSERT_EQ(1u, channels_) << "iLBC supports only mono audio.";
AudioEncoderIlbc::Config config;
config.frame_size_ms = FLAGS_frame_size_ms;
encoder_.reset(new AudioEncoderIlbc(config));
@@ -66,8 +66,10 @@ class NetEqIlbcQualityTest : public NetEqQualityTest {
uint32_t dummy_timestamp = 0;
AudioEncoder::EncodedInfo info;
do {
- info = encoder_->Encode(dummy_timestamp, &in_data[encoded_samples],
- kFrameSizeSamples, max_bytes, payload);
+ info = encoder_->Encode(dummy_timestamp,
+ rtc::ArrayView<const int16_t>(
+ in_data + encoded_samples, kFrameSizeSamples),
+ max_bytes, payload);
encoded_samples += kFrameSizeSamples;
} while (info.encoded_bytes == 0);
return rtc::checked_cast<int>(info.encoded_bytes);
diff --git a/webrtc/modules/audio_coding/neteq/test/neteq_isac_quality_test.cc b/webrtc/modules/audio_coding/neteq/test/neteq_isac_quality_test.cc
index 66b0903f66..4ccebb3e66 100644
--- a/webrtc/modules/audio_coding/neteq/test/neteq_isac_quality_test.cc
+++ b/webrtc/modules/audio_coding/neteq/test/neteq_isac_quality_test.cc
@@ -59,7 +59,7 @@ NetEqIsacQualityTest::NetEqIsacQualityTest()
bit_rate_kbps_(FLAGS_bit_rate_kbps) {}
void NetEqIsacQualityTest::SetUp() {
- ASSERT_EQ(1, channels_) << "iSAC supports only mono audio.";
+ ASSERT_EQ(1u, channels_) << "iSAC supports only mono audio.";
// Create encoder memory.
WebRtcIsacfix_Create(&isac_encoder_);
ASSERT_TRUE(isac_encoder_ != NULL);
diff --git a/webrtc/modules/audio_coding/neteq/test/neteq_opus_quality_test.cc b/webrtc/modules/audio_coding/neteq/test/neteq_opus_quality_test.cc
index 5e8b2297d4..5ab55ba9e8 100644
--- a/webrtc/modules/audio_coding/neteq/test/neteq_opus_quality_test.cc
+++ b/webrtc/modules/audio_coding/neteq/test/neteq_opus_quality_test.cc
@@ -8,7 +8,7 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#include "webrtc/modules/audio_coding/codecs/opus/include/opus_interface.h"
+#include "webrtc/modules/audio_coding/codecs/opus/opus_interface.h"
#include "webrtc/modules/audio_coding/codecs/opus/opus_inst.h"
#include "webrtc/modules/audio_coding/neteq/tools/neteq_quality_test.h"
diff --git a/webrtc/modules/audio_coding/neteq/test/neteq_pcmu_quality_test.cc b/webrtc/modules/audio_coding/neteq/test/neteq_pcmu_quality_test.cc
index 422a9fa6eb..ac478ab5ac 100644
--- a/webrtc/modules/audio_coding/neteq/test/neteq_pcmu_quality_test.cc
+++ b/webrtc/modules/audio_coding/neteq/test/neteq_pcmu_quality_test.cc
@@ -11,7 +11,7 @@
#include "webrtc/base/checks.h"
#include "webrtc/base/safe_conversions.h"
#include "webrtc/base/scoped_ptr.h"
-#include "webrtc/modules/audio_coding/codecs/g711/include/audio_encoder_pcm.h"
+#include "webrtc/modules/audio_coding/codecs/g711/audio_encoder_pcm.h"
#include "webrtc/modules/audio_coding/neteq/tools/neteq_quality_test.h"
#include "webrtc/test/testsupport/fileutils.h"
@@ -50,7 +50,7 @@ class NetEqPcmuQualityTest : public NetEqQualityTest {
NetEqDecoder::kDecoderPCMu) {}
void SetUp() override {
- ASSERT_EQ(1, channels_) << "PCMu supports only mono audio.";
+ ASSERT_EQ(1u, channels_) << "PCMu supports only mono audio.";
AudioEncoderPcmU::Config config;
config.frame_size_ms = FLAGS_frame_size_ms;
encoder_.reset(new AudioEncoderPcmU(config));
@@ -66,8 +66,10 @@ class NetEqPcmuQualityTest : public NetEqQualityTest {
uint32_t dummy_timestamp = 0;
AudioEncoder::EncodedInfo info;
do {
- info = encoder_->Encode(dummy_timestamp, &in_data[encoded_samples],
- kFrameSizeSamples, max_bytes, payload);
+ info = encoder_->Encode(dummy_timestamp,
+ rtc::ArrayView<const int16_t>(
+ in_data + encoded_samples, kFrameSizeSamples),
+ max_bytes, payload);
encoded_samples += kFrameSizeSamples;
} while (info.encoded_bytes == 0);
return rtc::checked_cast<int>(info.encoded_bytes);
diff --git a/webrtc/modules/audio_coding/neteq/timestamp_scaler.cc b/webrtc/modules/audio_coding/neteq/timestamp_scaler.cc
index eb69ac7889..c1abdc30f5 100644
--- a/webrtc/modules/audio_coding/neteq/timestamp_scaler.cc
+++ b/webrtc/modules/audio_coding/neteq/timestamp_scaler.cc
@@ -52,19 +52,11 @@ uint32_t TimestampScaler::ToInternal(uint32_t external_timestamp,
denominator_ = 1;
break;
}
- case NetEqDecoder::kDecoderCNGswb48kHz: {
- // Use timestamp scaling with factor 2/3 (32 kHz sample rate, but RTP
- // timestamps run on 48 kHz).
- // TODO(tlegrand): Remove scaling for kDecoderCNGswb48kHz once ACM has
- // full 48 kHz support.
- numerator_ = 2;
- denominator_ = 3;
- break;
- }
case NetEqDecoder::kDecoderAVT:
case NetEqDecoder::kDecoderCNGnb:
case NetEqDecoder::kDecoderCNGwb:
- case NetEqDecoder::kDecoderCNGswb32kHz: {
+ case NetEqDecoder::kDecoderCNGswb32kHz:
+ case NetEqDecoder::kDecoderCNGswb48kHz: {
// Do not change the timestamp scaling settings for DTMF or CNG.
break;
}
@@ -87,8 +79,6 @@ uint32_t TimestampScaler::ToInternal(uint32_t external_timestamp,
assert(denominator_ > 0); // Should not be possible.
external_ref_ = external_timestamp;
internal_ref_ += (external_diff * numerator_) / denominator_;
- LOG(LS_VERBOSE) << "Converting timestamp: " << external_timestamp <<
- " -> " << internal_ref_;
return internal_ref_;
} else {
// No scaling.
diff --git a/webrtc/modules/audio_coding/neteq/tools/audio_loop.cc b/webrtc/modules/audio_coding/neteq/tools/audio_loop.cc
index 2d2a7e3dd4..eed95753f0 100644
--- a/webrtc/modules/audio_coding/neteq/tools/audio_loop.cc
+++ b/webrtc/modules/audio_coding/neteq/tools/audio_loop.cc
@@ -43,13 +43,14 @@ bool AudioLoop::Init(const std::string file_name,
return true;
}
-const int16_t* AudioLoop::GetNextBlock() {
+rtc::ArrayView<const int16_t> AudioLoop::GetNextBlock() {
// Check that the AudioLoop is initialized.
- if (block_length_samples_ == 0) return NULL;
+ if (block_length_samples_ == 0)
+ return rtc::ArrayView<const int16_t>();
const int16_t* output_ptr = &audio_array_[next_index_];
next_index_ = (next_index_ + block_length_samples_) % loop_length_samples_;
- return output_ptr;
+ return rtc::ArrayView<const int16_t>(output_ptr, block_length_samples_);
}
diff --git a/webrtc/modules/audio_coding/neteq/tools/audio_loop.h b/webrtc/modules/audio_coding/neteq/tools/audio_loop.h
index a897ee5aef..14e20f68ac 100644
--- a/webrtc/modules/audio_coding/neteq/tools/audio_loop.h
+++ b/webrtc/modules/audio_coding/neteq/tools/audio_loop.h
@@ -13,6 +13,7 @@
#include <string>
+#include "webrtc/base/array_view.h"
#include "webrtc/base/constructormagic.h"
#include "webrtc/base/scoped_ptr.h"
#include "webrtc/typedefs.h"
@@ -40,10 +41,9 @@ class AudioLoop {
bool Init(const std::string file_name, size_t max_loop_length_samples,
size_t block_length_samples);
- // Returns a pointer to the next block of audio. The number given as
- // |block_length_samples| to the Init() function determines how many samples
- // that can be safely read from the pointer.
- const int16_t* GetNextBlock();
+ // Returns a (pointer,size) pair for the next block of audio. The size is
+ // equal to the |block_length_samples| Init() argument.
+ rtc::ArrayView<const int16_t> GetNextBlock();
private:
size_t next_index_;
diff --git a/webrtc/modules/audio_coding/neteq/tools/audio_sink.h b/webrtc/modules/audio_coding/neteq/tools/audio_sink.h
index 3bd2df5ca8..489a8b2ad8 100644
--- a/webrtc/modules/audio_coding/neteq/tools/audio_sink.h
+++ b/webrtc/modules/audio_coding/neteq/tools/audio_sink.h
@@ -12,7 +12,7 @@
#define WEBRTC_MODULES_AUDIO_CODING_NETEQ_TOOLS_AUDIO_SINK_H_
#include "webrtc/base/constructormagic.h"
-#include "webrtc/modules/interface/module_common_types.h"
+#include "webrtc/modules/include/module_common_types.h"
#include "webrtc/typedefs.h"
namespace webrtc {
diff --git a/webrtc/modules/audio_coding/neteq/tools/constant_pcm_packet_source.cc b/webrtc/modules/audio_coding/neteq/tools/constant_pcm_packet_source.cc
index dc07030dd6..5a9f79f877 100644
--- a/webrtc/modules/audio_coding/neteq/tools/constant_pcm_packet_source.cc
+++ b/webrtc/modules/audio_coding/neteq/tools/constant_pcm_packet_source.cc
@@ -13,7 +13,7 @@
#include <algorithm>
#include "webrtc/base/checks.h"
-#include "webrtc/modules/audio_coding/codecs/pcm16b/include/pcm16b.h"
+#include "webrtc/modules/audio_coding/codecs/pcm16b/pcm16b.h"
#include "webrtc/modules/audio_coding/neteq/tools/packet.h"
namespace webrtc {
diff --git a/webrtc/modules/audio_coding/neteq/tools/neteq_external_decoder_test.cc b/webrtc/modules/audio_coding/neteq/tools/neteq_external_decoder_test.cc
index 49750c26c8..694b9ed153 100644
--- a/webrtc/modules/audio_coding/neteq/tools/neteq_external_decoder_test.cc
+++ b/webrtc/modules/audio_coding/neteq/tools/neteq_external_decoder_test.cc
@@ -12,6 +12,7 @@
#include "webrtc/modules/audio_coding/neteq/tools/neteq_external_decoder_test.h"
#include "testing/gtest/include/gtest/gtest.h"
+#include "webrtc/base/format_macros.h"
namespace webrtc {
namespace test {
@@ -21,26 +22,25 @@ NetEqExternalDecoderTest::NetEqExternalDecoderTest(NetEqDecoder codec,
: codec_(codec),
decoder_(decoder),
sample_rate_hz_(CodecSampleRateHz(codec_)),
- channels_(static_cast<int>(decoder_->Channels())) {
+ channels_(decoder_->Channels()) {
NetEq::Config config;
config.sample_rate_hz = sample_rate_hz_;
neteq_.reset(NetEq::Create(config));
- printf("%d\n", channels_);
+ printf("%" PRIuS "\n", channels_);
}
void NetEqExternalDecoderTest::Init() {
- ASSERT_EQ(NetEq::kOK, neteq_->RegisterExternalDecoder(
- decoder_, codec_, kPayloadType, sample_rate_hz_));
+ ASSERT_EQ(NetEq::kOK,
+ neteq_->RegisterExternalDecoder(decoder_, codec_, name_,
+ kPayloadType, sample_rate_hz_));
}
-void NetEqExternalDecoderTest::InsertPacket(WebRtcRTPHeader rtp_header,
- const uint8_t* payload,
- size_t payload_size_bytes,
- uint32_t receive_timestamp) {
- ASSERT_EQ(
- NetEq::kOK,
- neteq_->InsertPacket(
- rtp_header, payload, payload_size_bytes, receive_timestamp));
+void NetEqExternalDecoderTest::InsertPacket(
+ WebRtcRTPHeader rtp_header,
+ rtc::ArrayView<const uint8_t> payload,
+ uint32_t receive_timestamp) {
+ ASSERT_EQ(NetEq::kOK,
+ neteq_->InsertPacket(rtp_header, payload, receive_timestamp));
}
size_t NetEqExternalDecoderTest::GetOutputAudio(size_t max_length,
@@ -48,7 +48,7 @@ size_t NetEqExternalDecoderTest::GetOutputAudio(size_t max_length,
NetEqOutputType* output_type) {
// Get audio from regular instance.
size_t samples_per_channel;
- int num_channels;
+ size_t num_channels;
EXPECT_EQ(NetEq::kOK,
neteq_->GetAudio(max_length,
output,
@@ -58,6 +58,7 @@ size_t NetEqExternalDecoderTest::GetOutputAudio(size_t max_length,
EXPECT_EQ(channels_, num_channels);
EXPECT_EQ(static_cast<size_t>(kOutputLengthMs * sample_rate_hz_ / 1000),
samples_per_channel);
+ EXPECT_EQ(sample_rate_hz_, neteq_->last_output_sample_rate_hz());
return samples_per_channel;
}
diff --git a/webrtc/modules/audio_coding/neteq/tools/neteq_external_decoder_test.h b/webrtc/modules/audio_coding/neteq/tools/neteq_external_decoder_test.h
index 0a41c6ec20..d7b01fe33a 100644
--- a/webrtc/modules/audio_coding/neteq/tools/neteq_external_decoder_test.h
+++ b/webrtc/modules/audio_coding/neteq/tools/neteq_external_decoder_test.h
@@ -11,10 +11,12 @@
#ifndef WEBRTC_MODULES_AUDIO_CODING_NETEQ_TOOLS_NETEQ_EXTERNAL_DECODER_TEST_H_
#define WEBRTC_MODULES_AUDIO_CODING_NETEQ_TOOLS_NETEQ_EXTERNAL_DECODER_TEST_H_
+#include <string>
+
#include "webrtc/base/scoped_ptr.h"
#include "webrtc/modules/audio_coding/codecs/audio_decoder.h"
#include "webrtc/modules/audio_coding/neteq/include/neteq.h"
-#include "webrtc/modules/interface/module_common_types.h"
+#include "webrtc/modules/include/module_common_types.h"
namespace webrtc {
namespace test {
@@ -36,8 +38,8 @@ class NetEqExternalDecoderTest {
// |payload_size_bytes| bytes. The |receive_timestamp| is an indication
// of the time when the packet was received, and should be measured with
// the same tick rate as the RTP timestamp of the current payload.
- virtual void InsertPacket(WebRtcRTPHeader rtp_header, const uint8_t* payload,
- size_t payload_size_bytes,
+ virtual void InsertPacket(WebRtcRTPHeader rtp_header,
+ rtc::ArrayView<const uint8_t> payload,
uint32_t receive_timestamp);
// Get 10 ms of audio data. The data is written to |output|, which can hold
@@ -49,9 +51,10 @@ class NetEqExternalDecoderTest {
private:
NetEqDecoder codec_;
+ std::string name_ = "dummy name";
AudioDecoder* decoder_;
int sample_rate_hz_;
- int channels_;
+ size_t channels_;
rtc::scoped_ptr<NetEq> neteq_;
};
diff --git a/webrtc/modules/audio_coding/neteq/tools/neteq_performance_test.cc b/webrtc/modules/audio_coding/neteq/tools/neteq_performance_test.cc
index 9fe4dffa91..7d1f9f9798 100644
--- a/webrtc/modules/audio_coding/neteq/tools/neteq_performance_test.cc
+++ b/webrtc/modules/audio_coding/neteq/tools/neteq_performance_test.cc
@@ -10,7 +10,7 @@
#include "webrtc/modules/audio_coding/neteq/tools/neteq_performance_test.h"
-#include "webrtc/modules/audio_coding/codecs/pcm16b/include/pcm16b.h"
+#include "webrtc/modules/audio_coding/codecs/pcm16b/pcm16b.h"
#include "webrtc/modules/audio_coding/neteq/include/neteq.h"
#include "webrtc/modules/audio_coding/neteq/tools/audio_loop.h"
#include "webrtc/modules/audio_coding/neteq/tools/rtp_generator.h"
@@ -34,6 +34,7 @@ int64_t NetEqPerformanceTest::Run(int runtime_ms,
const int kSampRateHz = 32000;
const webrtc::NetEqDecoder kDecoderType =
webrtc::NetEqDecoder::kDecoderPCM16Bswb32kHz;
+ const std::string kDecoderName = "pcm16-swb32";
const int kPayloadType = 95;
// Initialize NetEq instance.
@@ -41,7 +42,7 @@ int64_t NetEqPerformanceTest::Run(int runtime_ms,
config.sample_rate_hz = kSampRateHz;
NetEq* neteq = NetEq::Create(config);
// Register decoder in |neteq|.
- if (neteq->RegisterPayloadType(kDecoderType, kPayloadType) != 0)
+ if (neteq->RegisterPayloadType(kDecoderType, kDecoderName, kPayloadType) != 0)
return -1;
// Set up AudioLoop object.
@@ -62,12 +63,13 @@ int64_t NetEqPerformanceTest::Run(int runtime_ms,
bool drift_flipped = false;
int32_t packet_input_time_ms =
rtp_gen.GetRtpHeader(kPayloadType, kInputBlockSizeSamples, &rtp_header);
- const int16_t* input_samples = audio_loop.GetNextBlock();
- if (!input_samples) exit(1);
+ auto input_samples = audio_loop.GetNextBlock();
+ if (input_samples.empty())
+ exit(1);
uint8_t input_payload[kInputBlockSizeSamples * sizeof(int16_t)];
- size_t payload_len =
- WebRtcPcm16b_Encode(input_samples, kInputBlockSizeSamples, input_payload);
- assert(payload_len == kInputBlockSizeSamples * sizeof(int16_t));
+ size_t payload_len = WebRtcPcm16b_Encode(input_samples.data(),
+ input_samples.size(), input_payload);
+ RTC_CHECK_EQ(sizeof(input_payload), payload_len);
// Main loop.
webrtc::Clock* clock = webrtc::Clock::GetRealTimeClock();
@@ -81,9 +83,9 @@ int64_t NetEqPerformanceTest::Run(int runtime_ms,
}
if (!lost) {
// Insert packet.
- int error = neteq->InsertPacket(
- rtp_header, input_payload, payload_len,
- packet_input_time_ms * kSampRateHz / 1000);
+ int error =
+ neteq->InsertPacket(rtp_header, input_payload,
+ packet_input_time_ms * kSampRateHz / 1000);
if (error != NetEq::kOK)
return -1;
}
@@ -93,10 +95,10 @@ int64_t NetEqPerformanceTest::Run(int runtime_ms,
kInputBlockSizeSamples,
&rtp_header);
input_samples = audio_loop.GetNextBlock();
- if (!input_samples) return -1;
- payload_len = WebRtcPcm16b_Encode(const_cast<int16_t*>(input_samples),
- kInputBlockSizeSamples,
- input_payload);
+ if (input_samples.empty())
+ return -1;
+ payload_len = WebRtcPcm16b_Encode(input_samples.data(),
+ input_samples.size(), input_payload);
assert(payload_len == kInputBlockSizeSamples * sizeof(int16_t));
}
@@ -107,7 +109,7 @@ int64_t NetEqPerformanceTest::Run(int runtime_ms,
static const size_t kOutDataLen =
kOutputBlockSizeMs * kMaxSamplesPerMs * kMaxChannels;
int16_t out_data[kOutDataLen];
- int num_channels;
+ size_t num_channels;
size_t samples_per_channel;
int error = neteq->GetAudio(kOutDataLen, out_data, &samples_per_channel,
&num_channels, NULL);
diff --git a/webrtc/modules/audio_coding/neteq/tools/neteq_quality_test.cc b/webrtc/modules/audio_coding/neteq/tools/neteq_quality_test.cc
index 6826d1be74..9c64e0fb48 100644
--- a/webrtc/modules/audio_coding/neteq/tools/neteq_quality_test.cc
+++ b/webrtc/modules/audio_coding/neteq/tools/neteq_quality_test.cc
@@ -210,7 +210,7 @@ NetEqQualityTest::NetEqQualityTest(int block_duration_ms,
int out_sampling_khz,
NetEqDecoder decoder_type)
: decoder_type_(decoder_type),
- channels_(FLAGS_channels),
+ channels_(static_cast<size_t>(FLAGS_channels)),
decoded_time_ms_(0),
decodable_time_ms_(0),
drift_factor_(FLAGS_drift_factor),
@@ -292,7 +292,8 @@ bool GilbertElliotLoss::Lost() {
}
void NetEqQualityTest::SetUp() {
- ASSERT_EQ(0, neteq_->RegisterPayloadType(decoder_type_, kPayloadType));
+ ASSERT_EQ(0,
+ neteq_->RegisterPayloadType(decoder_type_, "noname", kPayloadType));
rtp_generator_->set_drift_factor(drift_factor_);
int units = block_duration_ms_ / kPacketLossTimeUnitMs;
@@ -377,9 +378,10 @@ int NetEqQualityTest::Transmit() {
<< " ms ";
if (payload_size_bytes_ > 0) {
if (!PacketLost()) {
- int ret = neteq_->InsertPacket(rtp_header_, &payload_[0],
- payload_size_bytes_,
- packet_input_time_ms * in_sampling_khz_);
+ int ret = neteq_->InsertPacket(
+ rtp_header_,
+ rtc::ArrayView<const uint8_t>(payload_.get(), payload_size_bytes_),
+ packet_input_time_ms * in_sampling_khz_);
if (ret != NetEq::kOK)
return -1;
Log() << "was sent.";
@@ -392,7 +394,7 @@ int NetEqQualityTest::Transmit() {
}
int NetEqQualityTest::DecodeBlock() {
- int channels;
+ size_t channels;
size_t samples;
int ret = neteq_->GetAudio(out_size_samples_ * channels_, &out_data_[0],
&samples, &channels, NULL);
diff --git a/webrtc/modules/audio_coding/neteq/tools/neteq_quality_test.h b/webrtc/modules/audio_coding/neteq/tools/neteq_quality_test.h
index e20be5796b..c2b2effee2 100644
--- a/webrtc/modules/audio_coding/neteq/tools/neteq_quality_test.h
+++ b/webrtc/modules/audio_coding/neteq/tools/neteq_quality_test.h
@@ -99,7 +99,7 @@ class NetEqQualityTest : public ::testing::Test {
std::ofstream& Log();
NetEqDecoder decoder_type_;
- const int channels_;
+ const size_t channels_;
private:
int decoded_time_ms_;
diff --git a/webrtc/modules/audio_coding/neteq/tools/neteq_rtpplay.cc b/webrtc/modules/audio_coding/neteq/tools/neteq_rtpplay.cc
index 0aaf8c71fd..3d79e5b5a2 100644
--- a/webrtc/modules/audio_coding/neteq/tools/neteq_rtpplay.cc
+++ b/webrtc/modules/audio_coding/neteq/tools/neteq_rtpplay.cc
@@ -26,7 +26,7 @@
#include "webrtc/base/checks.h"
#include "webrtc/base/safe_conversions.h"
#include "webrtc/base/scoped_ptr.h"
-#include "webrtc/modules/audio_coding/codecs/pcm16b/include/pcm16b.h"
+#include "webrtc/modules/audio_coding/codecs/pcm16b/pcm16b.h"
#include "webrtc/modules/audio_coding/neteq/include/neteq.h"
#include "webrtc/modules/audio_coding/neteq/tools/input_audio_file.h"
#include "webrtc/modules/audio_coding/neteq/tools/output_audio_file.h"
@@ -34,7 +34,7 @@
#include "webrtc/modules/audio_coding/neteq/tools/packet.h"
#include "webrtc/modules/audio_coding/neteq/tools/rtc_event_log_source.h"
#include "webrtc/modules/audio_coding/neteq/tools/rtp_file_source.h"
-#include "webrtc/modules/interface/module_common_types.h"
+#include "webrtc/modules/include/module_common_types.h"
#include "webrtc/system_wrappers/include/trace.h"
#include "webrtc/test/rtp_file_reader.h"
#include "webrtc/test/testsupport/fileutils.h"
@@ -189,8 +189,9 @@ std::string CodecName(webrtc::NetEqDecoder codec) {
void RegisterPayloadType(NetEq* neteq,
webrtc::NetEqDecoder codec,
+ const std::string& name,
google::int32 flag) {
- if (neteq->RegisterPayloadType(codec, static_cast<uint8_t>(flag))) {
+ if (neteq->RegisterPayloadType(codec, name, static_cast<uint8_t>(flag))) {
std::cerr << "Cannot register payload type " << flag << " as "
<< CodecName(codec) << std::endl;
exit(1);
@@ -200,30 +201,40 @@ void RegisterPayloadType(NetEq* neteq,
// Registers all decoders in |neteq|.
void RegisterPayloadTypes(NetEq* neteq) {
assert(neteq);
- RegisterPayloadType(neteq, webrtc::NetEqDecoder::kDecoderPCMu, FLAGS_pcmu);
- RegisterPayloadType(neteq, webrtc::NetEqDecoder::kDecoderPCMa, FLAGS_pcma);
- RegisterPayloadType(neteq, webrtc::NetEqDecoder::kDecoderILBC, FLAGS_ilbc);
- RegisterPayloadType(neteq, webrtc::NetEqDecoder::kDecoderISAC, FLAGS_isac);
- RegisterPayloadType(neteq, webrtc::NetEqDecoder::kDecoderISACswb,
+ RegisterPayloadType(neteq, webrtc::NetEqDecoder::kDecoderPCMu, "pcmu",
+ FLAGS_pcmu);
+ RegisterPayloadType(neteq, webrtc::NetEqDecoder::kDecoderPCMa, "pcma",
+ FLAGS_pcma);
+ RegisterPayloadType(neteq, webrtc::NetEqDecoder::kDecoderILBC, "ilbc",
+ FLAGS_ilbc);
+ RegisterPayloadType(neteq, webrtc::NetEqDecoder::kDecoderISAC, "isac",
+ FLAGS_isac);
+ RegisterPayloadType(neteq, webrtc::NetEqDecoder::kDecoderISACswb, "isac-swb",
FLAGS_isac_swb);
- RegisterPayloadType(neteq, webrtc::NetEqDecoder::kDecoderOpus, FLAGS_opus);
- RegisterPayloadType(neteq, webrtc::NetEqDecoder::kDecoderPCM16B,
+ RegisterPayloadType(neteq, webrtc::NetEqDecoder::kDecoderOpus, "opus",
+ FLAGS_opus);
+ RegisterPayloadType(neteq, webrtc::NetEqDecoder::kDecoderPCM16B, "pcm16-nb",
FLAGS_pcm16b);
- RegisterPayloadType(neteq, webrtc::NetEqDecoder::kDecoderPCM16Bwb,
+ RegisterPayloadType(neteq, webrtc::NetEqDecoder::kDecoderPCM16Bwb, "pcm16-wb",
FLAGS_pcm16b_wb);
RegisterPayloadType(neteq, webrtc::NetEqDecoder::kDecoderPCM16Bswb32kHz,
- FLAGS_pcm16b_swb32);
+ "pcm16-swb32", FLAGS_pcm16b_swb32);
RegisterPayloadType(neteq, webrtc::NetEqDecoder::kDecoderPCM16Bswb48kHz,
- FLAGS_pcm16b_swb48);
- RegisterPayloadType(neteq, webrtc::NetEqDecoder::kDecoderG722, FLAGS_g722);
- RegisterPayloadType(neteq, webrtc::NetEqDecoder::kDecoderAVT, FLAGS_avt);
- RegisterPayloadType(neteq, webrtc::NetEqDecoder::kDecoderRED, FLAGS_red);
- RegisterPayloadType(neteq, webrtc::NetEqDecoder::kDecoderCNGnb, FLAGS_cn_nb);
- RegisterPayloadType(neteq, webrtc::NetEqDecoder::kDecoderCNGwb, FLAGS_cn_wb);
+ "pcm16-swb48", FLAGS_pcm16b_swb48);
+ RegisterPayloadType(neteq, webrtc::NetEqDecoder::kDecoderG722, "g722",
+ FLAGS_g722);
+ RegisterPayloadType(neteq, webrtc::NetEqDecoder::kDecoderAVT, "avt",
+ FLAGS_avt);
+ RegisterPayloadType(neteq, webrtc::NetEqDecoder::kDecoderRED, "red",
+ FLAGS_red);
+ RegisterPayloadType(neteq, webrtc::NetEqDecoder::kDecoderCNGnb, "cng-nb",
+ FLAGS_cn_nb);
+ RegisterPayloadType(neteq, webrtc::NetEqDecoder::kDecoderCNGwb, "cng-wb",
+ FLAGS_cn_wb);
RegisterPayloadType(neteq, webrtc::NetEqDecoder::kDecoderCNGswb32kHz,
- FLAGS_cn_swb32);
+ "cng-swb32", FLAGS_cn_swb32);
RegisterPayloadType(neteq, webrtc::NetEqDecoder::kDecoderCNGswb48kHz,
- FLAGS_cn_swb48);
+ "cng-swb48", FLAGS_cn_swb48);
}
void PrintCodecMappingEntry(webrtc::NetEqDecoder codec, google::int32 flag) {
@@ -399,23 +410,12 @@ int main(int argc, char* argv[]) {
printf("Input file: %s\n", argv[1]);
- // TODO(ivoc): Modify the RtpFileSource::Create and RtcEventLogSource::Create
- // functions to return a nullptr on failure instead of crashing
- // the program.
-
- // This temporary solution uses a RtpFileReader directly to check if the file
- // is a valid RtpDump file.
bool is_rtp_dump = false;
- {
- rtc::scoped_ptr<webrtc::test::RtpFileReader> rtp_reader(
- webrtc::test::RtpFileReader::Create(
- webrtc::test::RtpFileReader::kRtpDump, argv[1]));
- if (rtp_reader)
- is_rtp_dump = true;
- }
rtc::scoped_ptr<webrtc::test::PacketSource> file_source;
webrtc::test::RtcEventLogSource* event_log_source = nullptr;
- if (is_rtp_dump) {
+ if (webrtc::test::RtpFileSource::ValidRtpDump(argv[1]) ||
+ webrtc::test::RtpFileSource::ValidPcap(argv[1])) {
+ is_rtp_dump = true;
file_source.reset(webrtc::test::RtpFileSource::Create(argv[1]));
} else {
event_log_source = webrtc::test::RtcEventLogSource::Create(argv[1]);
@@ -558,7 +558,7 @@ int main(int argc, char* argv[]) {
payload_ptr = payload.get();
}
int error = neteq->InsertPacket(
- rtp_header, payload_ptr, payload_len,
+ rtp_header, rtc::ArrayView<const uint8_t>(payload_ptr, payload_len),
static_cast<uint32_t>(packet->time_ms() * sample_rate_hz / 1000));
if (error != NetEq::kOK) {
if (neteq->LastError() == NetEq::kUnknownRtpPayloadType) {
@@ -609,7 +609,7 @@ int main(int argc, char* argv[]) {
static const size_t kOutDataLen =
kOutputBlockSizeMs * kMaxSamplesPerMs * kMaxChannels;
int16_t out_data[kOutDataLen];
- int num_channels;
+ size_t num_channels;
size_t samples_per_channel;
int error = neteq->GetAudio(kOutDataLen, out_data, &samples_per_channel,
&num_channels, NULL);
diff --git a/webrtc/modules/audio_coding/neteq/tools/packet.cc b/webrtc/modules/audio_coding/neteq/tools/packet.cc
index b8b27afdec..2b2fcc286e 100644
--- a/webrtc/modules/audio_coding/neteq/tools/packet.cc
+++ b/webrtc/modules/audio_coding/neteq/tools/packet.cc
@@ -12,8 +12,8 @@
#include <string.h>
-#include "webrtc/modules/interface/module_common_types.h"
-#include "webrtc/modules/rtp_rtcp/interface/rtp_header_parser.h"
+#include "webrtc/modules/include/module_common_types.h"
+#include "webrtc/modules/rtp_rtcp/include/rtp_header_parser.h"
namespace webrtc {
namespace test {
diff --git a/webrtc/modules/audio_coding/neteq/tools/rtc_event_log_source.cc b/webrtc/modules/audio_coding/neteq/tools/rtc_event_log_source.cc
index 9b17ba8f64..dad72eaecd 100644
--- a/webrtc/modules/audio_coding/neteq/tools/rtc_event_log_source.cc
+++ b/webrtc/modules/audio_coding/neteq/tools/rtc_event_log_source.cc
@@ -18,7 +18,7 @@
#include "webrtc/base/checks.h"
#include "webrtc/call/rtc_event_log.h"
#include "webrtc/modules/audio_coding/neteq/tools/packet.h"
-#include "webrtc/modules/rtp_rtcp/interface/rtp_header_parser.h"
+#include "webrtc/modules/rtp_rtcp/include/rtp_header_parser.h"
// Files generated at build-time by the protobuf compiler.
#ifdef WEBRTC_ANDROID_PLATFORM_BUILD
diff --git a/webrtc/modules/audio_coding/neteq/tools/rtc_event_log_source.h b/webrtc/modules/audio_coding/neteq/tools/rtc_event_log_source.h
index 7150bcfe89..90d5931224 100644
--- a/webrtc/modules/audio_coding/neteq/tools/rtc_event_log_source.h
+++ b/webrtc/modules/audio_coding/neteq/tools/rtc_event_log_source.h
@@ -16,7 +16,7 @@
#include "webrtc/base/constructormagic.h"
#include "webrtc/base/scoped_ptr.h"
#include "webrtc/modules/audio_coding/neteq/tools/packet_source.h"
-#include "webrtc/modules/rtp_rtcp/interface/rtp_rtcp_defines.h"
+#include "webrtc/modules/rtp_rtcp/include/rtp_rtcp_defines.h"
namespace webrtc {
diff --git a/webrtc/modules/audio_coding/neteq/tools/rtp_file_source.cc b/webrtc/modules/audio_coding/neteq/tools/rtp_file_source.cc
index 9681ad17ea..b7a3109c01 100644
--- a/webrtc/modules/audio_coding/neteq/tools/rtp_file_source.cc
+++ b/webrtc/modules/audio_coding/neteq/tools/rtp_file_source.cc
@@ -20,7 +20,7 @@
#include "webrtc/base/checks.h"
#include "webrtc/modules/audio_coding/neteq/tools/packet.h"
-#include "webrtc/modules/rtp_rtcp/interface/rtp_header_parser.h"
+#include "webrtc/modules/rtp_rtcp/include/rtp_header_parser.h"
#include "webrtc/test/rtp_file_reader.h"
namespace webrtc {
@@ -32,6 +32,18 @@ RtpFileSource* RtpFileSource::Create(const std::string& file_name) {
return source;
}
+bool RtpFileSource::ValidRtpDump(const std::string& file_name) {
+ rtc::scoped_ptr<RtpFileReader> temp_file(
+ RtpFileReader::Create(RtpFileReader::kRtpDump, file_name));
+ return !!temp_file;
+}
+
+bool RtpFileSource::ValidPcap(const std::string& file_name) {
+ rtc::scoped_ptr<RtpFileReader> temp_file(
+ RtpFileReader::Create(RtpFileReader::kPcap, file_name));
+ return !!temp_file;
+}
+
RtpFileSource::~RtpFileSource() {
}
diff --git a/webrtc/modules/audio_coding/neteq/tools/rtp_file_source.h b/webrtc/modules/audio_coding/neteq/tools/rtp_file_source.h
index d0856a819c..2febf68b91 100644
--- a/webrtc/modules/audio_coding/neteq/tools/rtp_file_source.h
+++ b/webrtc/modules/audio_coding/neteq/tools/rtp_file_source.h
@@ -18,7 +18,7 @@
#include "webrtc/base/scoped_ptr.h"
#include "webrtc/common_types.h"
#include "webrtc/modules/audio_coding/neteq/tools/packet_source.h"
-#include "webrtc/modules/rtp_rtcp/interface/rtp_rtcp_defines.h"
+#include "webrtc/modules/rtp_rtcp/include/rtp_rtcp_defines.h"
namespace webrtc {
@@ -34,6 +34,10 @@ class RtpFileSource : public PacketSource {
// opened, or has the wrong format, NULL will be returned.
static RtpFileSource* Create(const std::string& file_name);
+ // Checks whether a files is a valid RTP dump or PCAP (Wireshark) file.
+ static bool ValidRtpDump(const std::string& file_name);
+ static bool ValidPcap(const std::string& file_name);
+
virtual ~RtpFileSource();
// Registers an RTP header extension and binds it to |id|.
diff --git a/webrtc/modules/audio_coding/neteq/tools/rtp_generator.h b/webrtc/modules/audio_coding/neteq/tools/rtp_generator.h
index 6c16192daa..53371be8f6 100644
--- a/webrtc/modules/audio_coding/neteq/tools/rtp_generator.h
+++ b/webrtc/modules/audio_coding/neteq/tools/rtp_generator.h
@@ -12,7 +12,7 @@
#define WEBRTC_MODULES_AUDIO_CODING_NETEQ_TOOLS_RTP_GENERATOR_H_
#include "webrtc/base/constructormagic.h"
-#include "webrtc/modules/interface/module_common_types.h"
+#include "webrtc/modules/include/module_common_types.h"
#include "webrtc/typedefs.h"
namespace webrtc {
diff --git a/webrtc/modules/audio_coding/main/test/ACMTest.h b/webrtc/modules/audio_coding/test/ACMTest.h
index f73961f5e5..d7e87d34ba 100644
--- a/webrtc/modules/audio_coding/main/test/ACMTest.h
+++ b/webrtc/modules/audio_coding/test/ACMTest.h
@@ -8,8 +8,8 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#ifndef WEBRTC_MODULES_AUDIO_CODING_MAIN_TEST_ACMTEST_H_
-#define WEBRTC_MODULES_AUDIO_CODING_MAIN_TEST_ACMTEST_H_
+#ifndef WEBRTC_MODULES_AUDIO_CODING_TEST_ACMTEST_H_
+#define WEBRTC_MODULES_AUDIO_CODING_TEST_ACMTEST_H_
class ACMTest {
public:
@@ -18,4 +18,4 @@ class ACMTest {
virtual void Perform() = 0;
};
-#endif // WEBRTC_MODULES_AUDIO_CODING_MAIN_TEST_ACMTEST_H_
+#endif // WEBRTC_MODULES_AUDIO_CODING_TEST_ACMTEST_H_
diff --git a/webrtc/modules/audio_coding/main/test/APITest.cc b/webrtc/modules/audio_coding/test/APITest.cc
index 1313f35332..bf04d7c825 100644
--- a/webrtc/modules/audio_coding/main/test/APITest.cc
+++ b/webrtc/modules/audio_coding/test/APITest.cc
@@ -8,7 +8,7 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#include "webrtc/modules/audio_coding/main/test/APITest.h"
+#include "webrtc/modules/audio_coding/test/APITest.h"
#include <ctype.h>
#include <stdio.h>
@@ -20,13 +20,13 @@
#include <string>
#include "testing/gtest/include/gtest/gtest.h"
+#include "webrtc/base/platform_thread.h"
#include "webrtc/common.h"
#include "webrtc/common_types.h"
#include "webrtc/engine_configurations.h"
-#include "webrtc/modules/audio_coding/main/acm2/acm_common_defs.h"
-#include "webrtc/modules/audio_coding/main/test/utility.h"
+#include "webrtc/modules/audio_coding/acm2/acm_common_defs.h"
+#include "webrtc/modules/audio_coding/test/utility.h"
#include "webrtc/system_wrappers/include/event_wrapper.h"
-#include "webrtc/system_wrappers/include/thread_wrapper.h"
#include "webrtc/system_wrappers/include/tick_util.h"
#include "webrtc/system_wrappers/include/trace.h"
#include "webrtc/test/testsupport/fileutils.h"
@@ -36,12 +36,6 @@ namespace webrtc {
#define TEST_DURATION_SEC 600
#define NUMBER_OF_SENDER_TESTS 6
#define MAX_FILE_NAME_LENGTH_BYTE 500
-#define CHECK_THREAD_NULLITY(myThread, S) \
- if(myThread != NULL) { \
- (myThread)->Start(); \
- } else { \
- ADD_FAILURE() << S; \
- }
void APITest::Wait(uint32_t waitLengthMs) {
if (_randomTest) {
@@ -522,38 +516,34 @@ void APITest::Perform() {
//--- THREADS
// A
// PUSH
- rtc::scoped_ptr<ThreadWrapper> myPushAudioThreadA =
- ThreadWrapper::CreateThread(PushAudioThreadA, this, "PushAudioThreadA");
- CHECK_THREAD_NULLITY(myPushAudioThreadA, "Unable to start A::PUSH thread");
+ rtc::PlatformThread myPushAudioThreadA(PushAudioThreadA, this,
+ "PushAudioThreadA");
+ myPushAudioThreadA.Start();
// PULL
- rtc::scoped_ptr<ThreadWrapper> myPullAudioThreadA =
- ThreadWrapper::CreateThread(PullAudioThreadA, this, "PullAudioThreadA");
- CHECK_THREAD_NULLITY(myPullAudioThreadA, "Unable to start A::PULL thread");
+ rtc::PlatformThread myPullAudioThreadA(PullAudioThreadA, this,
+ "PullAudioThreadA");
+ myPullAudioThreadA.Start();
// Process
- rtc::scoped_ptr<ThreadWrapper> myProcessThreadA = ThreadWrapper::CreateThread(
- ProcessThreadA, this, "ProcessThreadA");
- CHECK_THREAD_NULLITY(myProcessThreadA, "Unable to start A::Process thread");
+ rtc::PlatformThread myProcessThreadA(ProcessThreadA, this, "ProcessThreadA");
+ myProcessThreadA.Start();
// API
- rtc::scoped_ptr<ThreadWrapper> myAPIThreadA = ThreadWrapper::CreateThread(
- APIThreadA, this, "APIThreadA");
- CHECK_THREAD_NULLITY(myAPIThreadA, "Unable to start A::API thread");
+ rtc::PlatformThread myAPIThreadA(APIThreadA, this, "APIThreadA");
+ myAPIThreadA.Start();
// B
// PUSH
- rtc::scoped_ptr<ThreadWrapper> myPushAudioThreadB =
- ThreadWrapper::CreateThread(PushAudioThreadB, this, "PushAudioThreadB");
- CHECK_THREAD_NULLITY(myPushAudioThreadB, "Unable to start B::PUSH thread");
+ rtc::PlatformThread myPushAudioThreadB(PushAudioThreadB, this,
+ "PushAudioThreadB");
+ myPushAudioThreadB.Start();
// PULL
- rtc::scoped_ptr<ThreadWrapper> myPullAudioThreadB =
- ThreadWrapper::CreateThread(PullAudioThreadB, this, "PullAudioThreadB");
- CHECK_THREAD_NULLITY(myPullAudioThreadB, "Unable to start B::PULL thread");
+ rtc::PlatformThread myPullAudioThreadB(PullAudioThreadB, this,
+ "PullAudioThreadB");
+ myPullAudioThreadB.Start();
// Process
- rtc::scoped_ptr<ThreadWrapper> myProcessThreadB = ThreadWrapper::CreateThread(
- ProcessThreadB, this, "ProcessThreadB");
- CHECK_THREAD_NULLITY(myProcessThreadB, "Unable to start B::Process thread");
+ rtc::PlatformThread myProcessThreadB(ProcessThreadB, this, "ProcessThreadB");
+ myProcessThreadB.Start();
// API
- rtc::scoped_ptr<ThreadWrapper> myAPIThreadB = ThreadWrapper::CreateThread(
- APIThreadB, this, "APIThreadB");
- CHECK_THREAD_NULLITY(myAPIThreadB, "Unable to start B::API thread");
+ rtc::PlatformThread myAPIThreadB(APIThreadB, this, "APIThreadB");
+ myAPIThreadB.Start();
//_apiEventA->StartTimer(true, 5000);
//_apiEventB->StartTimer(true, 5000);
@@ -587,15 +577,15 @@ void APITest::Perform() {
//(unsigned long)((unsigned long)TEST_DURATION_SEC * (unsigned long)1000));
delete completeEvent;
- myPushAudioThreadA->Stop();
- myPullAudioThreadA->Stop();
- myProcessThreadA->Stop();
- myAPIThreadA->Stop();
+ myPushAudioThreadA.Stop();
+ myPullAudioThreadA.Stop();
+ myProcessThreadA.Stop();
+ myAPIThreadA.Stop();
- myPushAudioThreadB->Stop();
- myPullAudioThreadB->Stop();
- myProcessThreadB->Stop();
- myAPIThreadB->Stop();
+ myPushAudioThreadB.Stop();
+ myPullAudioThreadB.Stop();
+ myProcessThreadB.Stop();
+ myAPIThreadB.Stop();
}
void APITest::CheckVADStatus(char side) {
@@ -823,9 +813,11 @@ void APITest::TestRegisteration(char sendSide) {
exit(-1);
}
- CodecInst myCodec;
- if (sendACM->SendCodec(&myCodec) < 0) {
- AudioCodingModule::Codec(_codecCntrA, &myCodec);
+ auto myCodec = sendACM->SendCodec();
+ if (!myCodec) {
+ CodecInst ci;
+ AudioCodingModule::Codec(_codecCntrA, &ci);
+ myCodec = rtc::Optional<CodecInst>(ci);
}
if (!_randomTest) {
@@ -837,12 +829,12 @@ void APITest::TestRegisteration(char sendSide) {
*thereIsDecoder = false;
}
//myEvent->Wait(20);
- CHECK_ERROR_MT(receiveACM->UnregisterReceiveCodec(myCodec.pltype));
+ CHECK_ERROR_MT(receiveACM->UnregisterReceiveCodec(myCodec->pltype));
Wait(1000);
- int currentPayload = myCodec.pltype;
+ int currentPayload = myCodec->pltype;
- if (!FixedPayloadTypeCodec(myCodec.plname)) {
+ if (!FixedPayloadTypeCodec(myCodec->plname)) {
int32_t i;
for (i = 0; i < 32; i++) {
if (!_payloadUsed[i]) {
@@ -850,9 +842,9 @@ void APITest::TestRegisteration(char sendSide) {
fprintf(stdout,
"Register receive codec with new Payload, AUDIO BACK.\n");
}
- //myCodec.pltype = i + 96;
- //CHECK_ERROR_MT(receiveACM->RegisterReceiveCodec(myCodec));
- //CHECK_ERROR_MT(sendACM->RegisterSendCodec(myCodec));
+ //myCodec->pltype = i + 96;
+ //CHECK_ERROR_MT(receiveACM->RegisterReceiveCodec(*myCodec));
+ //CHECK_ERROR_MT(sendACM->RegisterSendCodec(*myCodec));
//myEvent->Wait(20);
//{
// WriteLockScoped wl(_apiTestRWLock);
@@ -868,17 +860,17 @@ void APITest::TestRegisteration(char sendSide) {
// *thereIsDecoder = false;
//}
//myEvent->Wait(20);
- //CHECK_ERROR_MT(receiveACM->UnregisterReceiveCodec(myCodec.pltype));
+ //CHECK_ERROR_MT(receiveACM->UnregisterReceiveCodec(myCodec->pltype));
Wait(1000);
- myCodec.pltype = currentPayload;
+ myCodec->pltype = currentPayload;
if (!_randomTest) {
fprintf(stdout,
"Register receive codec with default Payload, AUDIO BACK.\n");
fflush (stdout);
}
- CHECK_ERROR_MT(receiveACM->RegisterReceiveCodec(myCodec));
- //CHECK_ERROR_MT(sendACM->RegisterSendCodec(myCodec));
+ CHECK_ERROR_MT(receiveACM->RegisterReceiveCodec(*myCodec));
+ //CHECK_ERROR_MT(sendACM->RegisterSendCodec(*myCodec));
myEvent->Wait(20);
{
WriteLockScoped wl(_apiTestRWLock);
@@ -890,7 +882,7 @@ void APITest::TestRegisteration(char sendSide) {
}
}
if (i == 32) {
- CHECK_ERROR_MT(receiveACM->RegisterReceiveCodec(myCodec));
+ CHECK_ERROR_MT(receiveACM->RegisterReceiveCodec(*myCodec));
{
WriteLockScoped wl(_apiTestRWLock);
*thereIsDecoder = true;
@@ -902,9 +894,9 @@ void APITest::TestRegisteration(char sendSide) {
"Register receive codec with fixed Payload, AUDIO BACK.\n");
fflush (stdout);
}
- CHECK_ERROR_MT(receiveACM->RegisterReceiveCodec(myCodec));
- //CHECK_ERROR_MT(receiveACM->UnregisterReceiveCodec(myCodec.pltype));
- //CHECK_ERROR_MT(receiveACM->RegisterReceiveCodec(myCodec));
+ CHECK_ERROR_MT(receiveACM->RegisterReceiveCodec(*myCodec));
+ //CHECK_ERROR_MT(receiveACM->UnregisterReceiveCodec(myCodec->pltype));
+ //CHECK_ERROR_MT(receiveACM->RegisterReceiveCodec(*myCodec));
myEvent->Wait(20);
{
WriteLockScoped wl(_apiTestRWLock);
@@ -1001,22 +993,17 @@ void APITest::TestSendVAD(char side) {
}
void APITest::CurrentCodec(char side) {
- CodecInst myCodec;
- if (side == 'A') {
- _acmA->SendCodec(&myCodec);
- } else {
- _acmB->SendCodec(&myCodec);
- }
+ auto myCodec = (side == 'A' ? _acmA : _acmB)->SendCodec();
if (!_randomTest) {
fprintf(stdout, "\n\n");
fprintf(stdout, "Send codec in Side A\n");
fprintf(stdout, "----------------------------\n");
- fprintf(stdout, "Name................. %s\n", myCodec.plname);
- fprintf(stdout, "Sampling Frequency... %d\n", myCodec.plfreq);
- fprintf(stdout, "Rate................. %d\n", myCodec.rate);
- fprintf(stdout, "Payload-type......... %d\n", myCodec.pltype);
- fprintf(stdout, "Packet-size.......... %d\n", myCodec.pacsize);
+ fprintf(stdout, "Name................. %s\n", myCodec->plname);
+ fprintf(stdout, "Sampling Frequency... %d\n", myCodec->plfreq);
+ fprintf(stdout, "Rate................. %d\n", myCodec->rate);
+ fprintf(stdout, "Payload-type......... %d\n", myCodec->pltype);
+ fprintf(stdout, "Packet-size.......... %d\n", myCodec->pacsize);
}
Wait(100);
diff --git a/webrtc/modules/audio_coding/main/test/APITest.h b/webrtc/modules/audio_coding/test/APITest.h
index d4c5b1ecdd..a1937c2b00 100644
--- a/webrtc/modules/audio_coding/main/test/APITest.h
+++ b/webrtc/modules/audio_coding/test/APITest.h
@@ -8,15 +8,15 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#ifndef WEBRTC_MODULES_AUDIO_CODING_MAIN_TEST_APITEST_H_
-#define WEBRTC_MODULES_AUDIO_CODING_MAIN_TEST_APITEST_H_
+#ifndef WEBRTC_MODULES_AUDIO_CODING_TEST_APITEST_H_
+#define WEBRTC_MODULES_AUDIO_CODING_TEST_APITEST_H_
#include "webrtc/base/scoped_ptr.h"
-#include "webrtc/modules/audio_coding/main/include/audio_coding_module.h"
-#include "webrtc/modules/audio_coding/main/test/ACMTest.h"
-#include "webrtc/modules/audio_coding/main/test/Channel.h"
-#include "webrtc/modules/audio_coding/main/test/PCMFile.h"
-#include "webrtc/modules/audio_coding/main/test/utility.h"
+#include "webrtc/modules/audio_coding/include/audio_coding_module.h"
+#include "webrtc/modules/audio_coding/test/ACMTest.h"
+#include "webrtc/modules/audio_coding/test/Channel.h"
+#include "webrtc/modules/audio_coding/test/PCMFile.h"
+#include "webrtc/modules/audio_coding/test/utility.h"
#include "webrtc/system_wrappers/include/event_wrapper.h"
#include "webrtc/system_wrappers/include/rw_lock_wrapper.h"
@@ -160,4 +160,4 @@ class APITest : public ACMTest {
} // namespace webrtc
-#endif // WEBRTC_MODULES_AUDIO_CODING_MAIN_TEST_APITEST_H_
+#endif // WEBRTC_MODULES_AUDIO_CODING_TEST_APITEST_H_
diff --git a/webrtc/modules/audio_coding/main/test/Channel.cc b/webrtc/modules/audio_coding/test/Channel.cc
index 02bd783a38..31521fe1e3 100644
--- a/webrtc/modules/audio_coding/main/test/Channel.cc
+++ b/webrtc/modules/audio_coding/test/Channel.cc
@@ -8,7 +8,7 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#include "webrtc/modules/audio_coding/main/test/Channel.h"
+#include "webrtc/modules/audio_coding/test/Channel.h"
#include <assert.h>
#include <iostream>
diff --git a/webrtc/modules/audio_coding/main/test/Channel.h b/webrtc/modules/audio_coding/test/Channel.h
index 39d4dabd98..b047aa9909 100644
--- a/webrtc/modules/audio_coding/main/test/Channel.h
+++ b/webrtc/modules/audio_coding/test/Channel.h
@@ -8,13 +8,13 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#ifndef WEBRTC_MODULES_AUDIO_CODING_MAIN_TEST_CHANNEL_H_
-#define WEBRTC_MODULES_AUDIO_CODING_MAIN_TEST_CHANNEL_H_
+#ifndef WEBRTC_MODULES_AUDIO_CODING_TEST_CHANNEL_H_
+#define WEBRTC_MODULES_AUDIO_CODING_TEST_CHANNEL_H_
#include <stdio.h>
-#include "webrtc/modules/audio_coding/main/include/audio_coding_module.h"
-#include "webrtc/modules/interface/module_common_types.h"
+#include "webrtc/modules/audio_coding/include/audio_coding_module.h"
+#include "webrtc/modules/include/module_common_types.h"
#include "webrtc/typedefs.h"
namespace webrtc {
@@ -127,4 +127,4 @@ class Channel : public AudioPacketizationCallback {
} // namespace webrtc
-#endif // WEBRTC_MODULES_AUDIO_CODING_MAIN_TEST_CHANNEL_H_
+#endif // WEBRTC_MODULES_AUDIO_CODING_TEST_CHANNEL_H_
diff --git a/webrtc/modules/audio_coding/main/test/EncodeDecodeTest.cc b/webrtc/modules/audio_coding/test/EncodeDecodeTest.cc
index d062af0fb9..ba3c8d9ad2 100644
--- a/webrtc/modules/audio_coding/main/test/EncodeDecodeTest.cc
+++ b/webrtc/modules/audio_coding/test/EncodeDecodeTest.cc
@@ -8,7 +8,7 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#include "webrtc/modules/audio_coding/main/test/EncodeDecodeTest.h"
+#include "webrtc/modules/audio_coding/test/EncodeDecodeTest.h"
#include <sstream>
#include <stdio.h>
@@ -17,9 +17,9 @@
#include "testing/gtest/include/gtest/gtest.h"
#include "webrtc/base/scoped_ptr.h"
#include "webrtc/common_types.h"
-#include "webrtc/modules/audio_coding/main/include/audio_coding_module.h"
-#include "webrtc/modules/audio_coding/main/acm2/acm_common_defs.h"
-#include "webrtc/modules/audio_coding/main/test/utility.h"
+#include "webrtc/modules/audio_coding/include/audio_coding_module.h"
+#include "webrtc/modules/audio_coding/acm2/acm_common_defs.h"
+#include "webrtc/modules/audio_coding/test/utility.h"
#include "webrtc/system_wrappers/include/trace.h"
#include "webrtc/test/testsupport/fileutils.h"
@@ -52,7 +52,7 @@ Sender::Sender()
}
void Sender::Setup(AudioCodingModule *acm, RTPStream *rtpStream,
- std::string in_file_name, int sample_rate, int channels) {
+ std::string in_file_name, int sample_rate, size_t channels) {
struct CodecInst sendCodec;
int noOfCodecs = acm->NumberOfCodecs();
int codecNo;
@@ -63,6 +63,10 @@ void Sender::Setup(AudioCodingModule *acm, RTPStream *rtpStream,
if (channels == 2) {
_pcmFile.ReadStereo(true);
}
+ // Set test length to 500 ms (50 blocks of 10 ms each).
+ _pcmFile.SetNum10MsBlocksToRead(50);
+ // Fast-forward 1 second (100 blocks) since the file starts with silence.
+ _pcmFile.FastForward(100);
// Set the codec for the current test.
if ((testMode == 0) || (testMode == 1)) {
@@ -119,7 +123,7 @@ Receiver::Receiver()
}
void Receiver::Setup(AudioCodingModule *acm, RTPStream *rtpStream,
- std::string out_file_name, int channels) {
+ std::string out_file_name, size_t channels) {
struct CodecInst recvCodec = CodecInst();
int noOfCodecs;
EXPECT_EQ(0, acm->InitializeReceiver());
@@ -339,8 +343,7 @@ std::string EncodeDecodeTest::EncodeToFile(int fileType,
_sender.codeId = codeId;
_sender.Setup(acm.get(), &rtpFile, "audio_coding/testfile32kHz", 32000, 1);
- struct CodecInst sendCodecInst;
- if (acm->SendCodec(&sendCodecInst) >= 0) {
+ if (acm->SendCodec()) {
_sender.Run();
}
_sender.Teardown();
diff --git a/webrtc/modules/audio_coding/main/test/EncodeDecodeTest.h b/webrtc/modules/audio_coding/test/EncodeDecodeTest.h
index 4ad92cec15..f9a9a5bb52 100644
--- a/webrtc/modules/audio_coding/main/test/EncodeDecodeTest.h
+++ b/webrtc/modules/audio_coding/test/EncodeDecodeTest.h
@@ -8,16 +8,16 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#ifndef WEBRTC_MODULES_AUDIO_CODING_MAIN_TEST_ENCODEDECODETEST_H_
-#define WEBRTC_MODULES_AUDIO_CODING_MAIN_TEST_ENCODEDECODETEST_H_
+#ifndef WEBRTC_MODULES_AUDIO_CODING_TEST_ENCODEDECODETEST_H_
+#define WEBRTC_MODULES_AUDIO_CODING_TEST_ENCODEDECODETEST_H_
#include <stdio.h>
#include <string.h>
-#include "webrtc/modules/audio_coding/main/include/audio_coding_module.h"
-#include "webrtc/modules/audio_coding/main/test/ACMTest.h"
-#include "webrtc/modules/audio_coding/main/test/PCMFile.h"
-#include "webrtc/modules/audio_coding/main/test/RTPFile.h"
+#include "webrtc/modules/audio_coding/include/audio_coding_module.h"
+#include "webrtc/modules/audio_coding/test/ACMTest.h"
+#include "webrtc/modules/audio_coding/test/PCMFile.h"
+#include "webrtc/modules/audio_coding/test/RTPFile.h"
#include "webrtc/typedefs.h"
namespace webrtc {
@@ -48,7 +48,7 @@ class Sender {
public:
Sender();
void Setup(AudioCodingModule *acm, RTPStream *rtpStream,
- std::string in_file_name, int sample_rate, int channels);
+ std::string in_file_name, int sample_rate, size_t channels);
void Teardown();
void Run();
bool Add10MsData();
@@ -71,7 +71,7 @@ class Receiver {
Receiver();
virtual ~Receiver() {};
void Setup(AudioCodingModule *acm, RTPStream *rtpStream,
- std::string out_file_name, int channels);
+ std::string out_file_name, size_t channels);
void Teardown();
void Run();
virtual bool IncomingPacket();
@@ -120,4 +120,4 @@ class EncodeDecodeTest : public ACMTest {
} // namespace webrtc
-#endif // WEBRTC_MODULES_AUDIO_CODING_MAIN_TEST_ENCODEDECODETEST_H_
+#endif // WEBRTC_MODULES_AUDIO_CODING_TEST_ENCODEDECODETEST_H_
diff --git a/webrtc/modules/audio_coding/main/test/PCMFile.cc b/webrtc/modules/audio_coding/test/PCMFile.cc
index d0ae7830de..9289d73baa 100644
--- a/webrtc/modules/audio_coding/main/test/PCMFile.cc
+++ b/webrtc/modules/audio_coding/test/PCMFile.cc
@@ -8,14 +8,14 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#include "PCMFile.h"
+#include "webrtc/modules/audio_coding/test/PCMFile.h"
#include <ctype.h>
#include <stdio.h>
#include <string.h>
#include "testing/gtest/include/gtest/gtest.h"
-#include "webrtc/modules/interface/module_common_types.h"
+#include "webrtc/modules/include/module_common_types.h"
namespace webrtc {
@@ -137,6 +137,9 @@ int32_t PCMFile::Read10MsData(AudioFrame& audio_frame) {
audio_frame.num_channels_ = channels;
audio_frame.timestamp_ = timestamp_;
timestamp_ += samples_10ms_;
+ ++blocks_read_;
+ if (num_10ms_blocks_to_read_ && blocks_read_ >= *num_10ms_blocks_to_read_)
+ end_of_file_ = true;
return samples_10ms_;
}
@@ -182,11 +185,21 @@ void PCMFile::Write10MsData(int16_t* playout_buffer, size_t length_smpls) {
void PCMFile::Close() {
fclose(pcm_file_);
pcm_file_ = NULL;
+ blocks_read_ = 0;
+}
+
+void PCMFile::FastForward(int num_10ms_blocks) {
+ const int channels = read_stereo_ ? 2 : 1;
+ long num_bytes_to_move =
+ num_10ms_blocks * sizeof(int16_t) * samples_10ms_ * channels;
+ int error = fseek(pcm_file_, num_bytes_to_move, SEEK_CUR);
+ RTC_DCHECK_EQ(error, 0);
}
void PCMFile::Rewind() {
rewind(pcm_file_);
end_of_file_ = false;
+ blocks_read_ = 0;
}
bool PCMFile::Rewinded() {
@@ -201,4 +214,8 @@ void PCMFile::ReadStereo(bool is_stereo) {
read_stereo_ = is_stereo;
}
+void PCMFile::SetNum10MsBlocksToRead(int value) {
+ num_10ms_blocks_to_read_ = rtc::Optional<int>(value);
+}
+
} // namespace webrtc
diff --git a/webrtc/modules/audio_coding/main/test/PCMFile.h b/webrtc/modules/audio_coding/test/PCMFile.h
index 8353898f03..840933a1bd 100644
--- a/webrtc/modules/audio_coding/main/test/PCMFile.h
+++ b/webrtc/modules/audio_coding/test/PCMFile.h
@@ -8,15 +8,16 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#ifndef WEBRTC_MODULES_AUDIO_CODING_MAIN_TEST_PCMFILE_H_
-#define WEBRTC_MODULES_AUDIO_CODING_MAIN_TEST_PCMFILE_H_
+#ifndef WEBRTC_MODULES_AUDIO_CODING_TEST_PCMFILE_H_
+#define WEBRTC_MODULES_AUDIO_CODING_TEST_PCMFILE_H_
#include <stdio.h>
#include <stdlib.h>
#include <string>
-#include "webrtc/modules/interface/module_common_types.h"
+#include "webrtc/base/optional.h"
+#include "webrtc/modules/include/module_common_types.h"
#include "webrtc/typedefs.h"
namespace webrtc {
@@ -45,12 +46,21 @@ class PCMFile {
bool EndOfFile() const {
return end_of_file_;
}
+ // Moves forward the specified number of 10 ms blocks. If a limit has been set
+ // with SetNum10MsBlocksToRead, fast-forwarding does not count towards this
+ // limit.
+ void FastForward(int num_10ms_blocks);
void Rewind();
static int16_t ChooseFile(std::string* file_name, int16_t max_len,
uint16_t* frequency_hz);
bool Rewinded();
void SaveStereo(bool is_stereo = true);
void ReadStereo(bool is_stereo = true);
+ // If set, the reading will stop after the specified number of blocks have
+ // been read. When that has happened, EndOfFile() will return true. Calling
+ // Rewind() will reset the counter and start over.
+ void SetNum10MsBlocksToRead(int value);
+
private:
FILE* pcm_file_;
uint16_t samples_10ms_;
@@ -61,8 +71,10 @@ class PCMFile {
uint32_t timestamp_;
bool read_stereo_;
bool save_stereo_;
+ rtc::Optional<int> num_10ms_blocks_to_read_;
+ int blocks_read_ = 0;
};
} // namespace webrtc
-#endif // WEBRTC_MODULES_AUDIO_CODING_MAIN_TEST_PCMFILE_H_
+#endif // WEBRTC_MODULES_AUDIO_CODING_TEST_PCMFILE_H_
diff --git a/webrtc/modules/audio_coding/main/test/PacketLossTest.cc b/webrtc/modules/audio_coding/test/PacketLossTest.cc
index f19d491d2d..ad3e83403e 100644
--- a/webrtc/modules/audio_coding/main/test/PacketLossTest.cc
+++ b/webrtc/modules/audio_coding/test/PacketLossTest.cc
@@ -8,7 +8,7 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#include "webrtc/modules/audio_coding/main/test/PacketLossTest.h"
+#include "webrtc/modules/audio_coding/test/PacketLossTest.h"
#include "testing/gtest/include/gtest/gtest.h"
#include "webrtc/common.h"
@@ -143,8 +143,7 @@ void PacketLossTest::Perform() {
sender_->Setup(acm.get(), &rtpFile, in_file_name_, sample_rate_hz_, channels_,
expected_loss_rate_);
- struct CodecInst sendCodecInst;
- if (acm->SendCodec(&sendCodecInst) >= 0) {
+ if (acm->SendCodec()) {
sender_->Run();
}
sender_->Teardown();
diff --git a/webrtc/modules/audio_coding/main/test/PacketLossTest.h b/webrtc/modules/audio_coding/test/PacketLossTest.h
index d25dea264f..f3570ae1ca 100644
--- a/webrtc/modules/audio_coding/main/test/PacketLossTest.h
+++ b/webrtc/modules/audio_coding/test/PacketLossTest.h
@@ -8,12 +8,12 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#ifndef WEBRTC_MODULES_AUDIO_CODING_MAIN_TEST_PACKETLOSSTEST_H_
-#define WEBRTC_MODULES_AUDIO_CODING_MAIN_TEST_PACKETLOSSTEST_H_
+#ifndef WEBRTC_MODULES_AUDIO_CODING_TEST_PACKETLOSSTEST_H_
+#define WEBRTC_MODULES_AUDIO_CODING_TEST_PACKETLOSSTEST_H_
#include <string>
#include "webrtc/base/scoped_ptr.h"
-#include "webrtc/modules/audio_coding/main/test/EncodeDecodeTest.h"
+#include "webrtc/modules/audio_coding/test/EncodeDecodeTest.h"
namespace webrtc {
@@ -64,4 +64,4 @@ class PacketLossTest : public ACMTest {
} // namespace webrtc
-#endif // WEBRTC_MODULES_AUDIO_CODING_MAIN_TEST_PACKETLOSSTEST_H_
+#endif // WEBRTC_MODULES_AUDIO_CODING_TEST_PACKETLOSSTEST_H_
diff --git a/webrtc/modules/audio_coding/main/test/RTPFile.cc b/webrtc/modules/audio_coding/test/RTPFile.cc
index 60777178c6..60777178c6 100644
--- a/webrtc/modules/audio_coding/main/test/RTPFile.cc
+++ b/webrtc/modules/audio_coding/test/RTPFile.cc
diff --git a/webrtc/modules/audio_coding/main/test/RTPFile.h b/webrtc/modules/audio_coding/test/RTPFile.h
index c79b63e164..696d41ebd2 100644
--- a/webrtc/modules/audio_coding/main/test/RTPFile.h
+++ b/webrtc/modules/audio_coding/test/RTPFile.h
@@ -8,14 +8,14 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#ifndef WEBRTC_MODULES_AUDIO_CODING_MAIN_TEST_RTPFILE_H_
-#define WEBRTC_MODULES_AUDIO_CODING_MAIN_TEST_RTPFILE_H_
+#ifndef WEBRTC_MODULES_AUDIO_CODING_TEST_RTPFILE_H_
+#define WEBRTC_MODULES_AUDIO_CODING_TEST_RTPFILE_H_
#include <stdio.h>
#include <queue>
-#include "webrtc/modules/audio_coding/main/include/audio_coding_module.h"
-#include "webrtc/modules/interface/module_common_types.h"
+#include "webrtc/modules/audio_coding/include/audio_coding_module.h"
+#include "webrtc/modules/include/module_common_types.h"
#include "webrtc/system_wrappers/include/rw_lock_wrapper.h"
#include "webrtc/typedefs.h"
@@ -123,4 +123,4 @@ class RTPFile : public RTPStream {
} // namespace webrtc
-#endif // WEBRTC_MODULES_AUDIO_CODING_MAIN_TEST_RTPFILE_H_
+#endif // WEBRTC_MODULES_AUDIO_CODING_TEST_RTPFILE_H_
diff --git a/webrtc/modules/audio_coding/main/test/SpatialAudio.cc b/webrtc/modules/audio_coding/test/SpatialAudio.cc
index 17d4fc88b2..c9f8080826 100644
--- a/webrtc/modules/audio_coding/main/test/SpatialAudio.cc
+++ b/webrtc/modules/audio_coding/test/SpatialAudio.cc
@@ -14,7 +14,7 @@
#include <math.h>
#include "webrtc/common_types.h"
-#include "webrtc/modules/audio_coding/main/test/SpatialAudio.h"
+#include "webrtc/modules/audio_coding/test/SpatialAudio.h"
#include "webrtc/system_wrappers/include/trace.h"
#include "webrtc/system_wrappers/include/trace.h"
#include "webrtc/test/testsupport/fileutils.h"
diff --git a/webrtc/modules/audio_coding/main/test/SpatialAudio.h b/webrtc/modules/audio_coding/test/SpatialAudio.h
index fc258977f3..3548cc98eb 100644
--- a/webrtc/modules/audio_coding/main/test/SpatialAudio.h
+++ b/webrtc/modules/audio_coding/test/SpatialAudio.h
@@ -8,15 +8,15 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#ifndef WEBRTC_MODULES_AUDIO_CODING_MAIN_TEST_SPATIALAUDIO_H_
-#define WEBRTC_MODULES_AUDIO_CODING_MAIN_TEST_SPATIALAUDIO_H_
+#ifndef WEBRTC_MODULES_AUDIO_CODING_TEST_SPATIALAUDIO_H_
+#define WEBRTC_MODULES_AUDIO_CODING_TEST_SPATIALAUDIO_H_
#include "webrtc/base/scoped_ptr.h"
-#include "webrtc/modules/audio_coding/main/include/audio_coding_module.h"
-#include "webrtc/modules/audio_coding/main/test/ACMTest.h"
-#include "webrtc/modules/audio_coding/main/test/Channel.h"
-#include "webrtc/modules/audio_coding/main/test/PCMFile.h"
-#include "webrtc/modules/audio_coding/main/test/utility.h"
+#include "webrtc/modules/audio_coding/include/audio_coding_module.h"
+#include "webrtc/modules/audio_coding/test/ACMTest.h"
+#include "webrtc/modules/audio_coding/test/Channel.h"
+#include "webrtc/modules/audio_coding/test/PCMFile.h"
+#include "webrtc/modules/audio_coding/test/utility.h"
#define MAX_FILE_NAME_LENGTH_BYTE 500
@@ -44,4 +44,4 @@ class SpatialAudio : public ACMTest {
} // namespace webrtc
-#endif // WEBRTC_MODULES_AUDIO_CODING_MAIN_TEST_SPATIALAUDIO_H_
+#endif // WEBRTC_MODULES_AUDIO_CODING_TEST_SPATIALAUDIO_H_
diff --git a/webrtc/modules/audio_coding/main/test/TestAllCodecs.cc b/webrtc/modules/audio_coding/test/TestAllCodecs.cc
index 19189b6b8f..bacfd37188 100644
--- a/webrtc/modules/audio_coding/main/test/TestAllCodecs.cc
+++ b/webrtc/modules/audio_coding/test/TestAllCodecs.cc
@@ -8,7 +8,7 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#include "webrtc/modules/audio_coding/main/test/TestAllCodecs.h"
+#include "webrtc/modules/audio_coding/test/TestAllCodecs.h"
#include <cstdio>
#include <limits>
@@ -18,9 +18,9 @@
#include "webrtc/common_types.h"
#include "webrtc/engine_configurations.h"
-#include "webrtc/modules/audio_coding/main/include/audio_coding_module.h"
-#include "webrtc/modules/audio_coding/main/include/audio_coding_module_typedefs.h"
-#include "webrtc/modules/audio_coding/main/test/utility.h"
+#include "webrtc/modules/audio_coding/include/audio_coding_module.h"
+#include "webrtc/modules/audio_coding/include/audio_coding_module_typedefs.h"
+#include "webrtc/modules/audio_coding/test/utility.h"
#include "webrtc/system_wrappers/include/trace.h"
#include "webrtc/test/testsupport/fileutils.h"
#include "webrtc/typedefs.h"
@@ -422,8 +422,12 @@ void TestAllCodecs::Run(TestPack* channel) {
uint32_t timestamp_diff;
channel->reset_payload_size();
int error_count = 0;
-
int counter = 0;
+ // Set test length to 500 ms (50 blocks of 10 ms each).
+ infile_a_.SetNum10MsBlocksToRead(50);
+ // Fast-forward 1 second (100 blocks) since the file starts with silence.
+ infile_a_.FastForward(100);
+
while (!infile_a_.EndOfFile()) {
// Add 10 msec to ACM.
infile_a_.Read10MsData(audio_frame);
@@ -477,8 +481,7 @@ void TestAllCodecs::OpenOutFile(int test_number) {
void TestAllCodecs::DisplaySendReceiveCodec() {
CodecInst my_codec_param;
- acm_a_->SendCodec(&my_codec_param);
- printf("%s -> ", my_codec_param.plname);
+ printf("%s -> ", acm_a_->SendCodec()->plname);
acm_b_->ReceiveCodec(&my_codec_param);
printf("%s\n", my_codec_param.plname);
}
diff --git a/webrtc/modules/audio_coding/main/test/TestAllCodecs.h b/webrtc/modules/audio_coding/test/TestAllCodecs.h
index 1cdc0cba98..e79bd69faa 100644
--- a/webrtc/modules/audio_coding/main/test/TestAllCodecs.h
+++ b/webrtc/modules/audio_coding/test/TestAllCodecs.h
@@ -8,13 +8,13 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#ifndef WEBRTC_MODULES_AUDIO_CODING_MAIN_TEST_TESTALLCODECS_H_
-#define WEBRTC_MODULES_AUDIO_CODING_MAIN_TEST_TESTALLCODECS_H_
+#ifndef WEBRTC_MODULES_AUDIO_CODING_TEST_TESTALLCODECS_H_
+#define WEBRTC_MODULES_AUDIO_CODING_TEST_TESTALLCODECS_H_
#include "webrtc/base/scoped_ptr.h"
-#include "webrtc/modules/audio_coding/main/test/ACMTest.h"
-#include "webrtc/modules/audio_coding/main/test/Channel.h"
-#include "webrtc/modules/audio_coding/main/test/PCMFile.h"
+#include "webrtc/modules/audio_coding/test/ACMTest.h"
+#include "webrtc/modules/audio_coding/test/Channel.h"
+#include "webrtc/modules/audio_coding/test/PCMFile.h"
#include "webrtc/typedefs.h"
namespace webrtc {
@@ -81,4 +81,4 @@ class TestAllCodecs : public ACMTest {
} // namespace webrtc
-#endif // WEBRTC_MODULES_AUDIO_CODING_MAIN_TEST_TESTALLCODECS_H_
+#endif // WEBRTC_MODULES_AUDIO_CODING_TEST_TESTALLCODECS_H_
diff --git a/webrtc/modules/audio_coding/main/test/TestRedFec.cc b/webrtc/modules/audio_coding/test/TestRedFec.cc
index 0627ae2d74..a1bdc04e53 100644
--- a/webrtc/modules/audio_coding/main/test/TestRedFec.cc
+++ b/webrtc/modules/audio_coding/test/TestRedFec.cc
@@ -8,15 +8,15 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#include "webrtc/modules/audio_coding/main/test/TestRedFec.h"
+#include "webrtc/modules/audio_coding/test/TestRedFec.h"
#include <assert.h>
#include "webrtc/common.h"
#include "webrtc/common_types.h"
#include "webrtc/engine_configurations.h"
-#include "webrtc/modules/audio_coding/main/include/audio_coding_module_typedefs.h"
-#include "webrtc/modules/audio_coding/main/test/utility.h"
+#include "webrtc/modules/audio_coding/include/audio_coding_module_typedefs.h"
+#include "webrtc/modules/audio_coding/test/utility.h"
#include "webrtc/system_wrappers/include/trace.h"
#include "webrtc/test/testsupport/fileutils.h"
@@ -453,6 +453,10 @@ int16_t TestRedFec::RegisterSendCodec(char side, const char* codecName,
void TestRedFec::Run() {
AudioFrame audioFrame;
int32_t outFreqHzB = _outFileB.SamplingFrequency();
+ // Set test length to 500 ms (50 blocks of 10 ms each).
+ _inFileA.SetNum10MsBlocksToRead(50);
+ // Fast-forward 1 second (100 blocks) since the file starts with silence.
+ _inFileA.FastForward(100);
while (!_inFileA.EndOfFile()) {
EXPECT_GT(_inFileA.Read10MsData(audioFrame), 0);
diff --git a/webrtc/modules/audio_coding/main/test/TestRedFec.h b/webrtc/modules/audio_coding/test/TestRedFec.h
index ac0b6cdfc7..6343d8e374 100644
--- a/webrtc/modules/audio_coding/main/test/TestRedFec.h
+++ b/webrtc/modules/audio_coding/test/TestRedFec.h
@@ -8,14 +8,14 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#ifndef WEBRTC_MODULES_AUDIO_CODING_MAIN_TESTREDFEC_H_
-#define WEBRTC_MODULES_AUDIO_CODING_MAIN_TESTREDFEC_H_
+#ifndef WEBRTC_MODULES_AUDIO_CODING_TEST_TESTREDFEC_H_
+#define WEBRTC_MODULES_AUDIO_CODING_TEST_TESTREDFEC_H_
#include <string>
#include "webrtc/base/scoped_ptr.h"
-#include "webrtc/modules/audio_coding/main/test/ACMTest.h"
-#include "webrtc/modules/audio_coding/main/test/Channel.h"
-#include "webrtc/modules/audio_coding/main/test/PCMFile.h"
+#include "webrtc/modules/audio_coding/test/ACMTest.h"
+#include "webrtc/modules/audio_coding/test/Channel.h"
+#include "webrtc/modules/audio_coding/test/PCMFile.h"
namespace webrtc {
@@ -48,4 +48,4 @@ class TestRedFec : public ACMTest {
} // namespace webrtc
-#endif // WEBRTC_MODULES_AUDIO_CODING_MAIN_TESTREDFEC_H_
+#endif // WEBRTC_MODULES_AUDIO_CODING_TEST_TESTREDFEC_H_
diff --git a/webrtc/modules/audio_coding/main/test/TestStereo.cc b/webrtc/modules/audio_coding/test/TestStereo.cc
index 69cc3272bb..9bf560d323 100644
--- a/webrtc/modules/audio_coding/main/test/TestStereo.cc
+++ b/webrtc/modules/audio_coding/test/TestStereo.cc
@@ -8,7 +8,7 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#include "webrtc/modules/audio_coding/main/test/TestStereo.h"
+#include "webrtc/modules/audio_coding/test/TestStereo.h"
#include <assert.h>
@@ -17,8 +17,8 @@
#include "testing/gtest/include/gtest/gtest.h"
#include "webrtc/common_types.h"
#include "webrtc/engine_configurations.h"
-#include "webrtc/modules/audio_coding/main/include/audio_coding_module_typedefs.h"
-#include "webrtc/modules/audio_coding/main/test/utility.h"
+#include "webrtc/modules/audio_coding/include/audio_coding_module_typedefs.h"
+#include "webrtc/modules/audio_coding/test/utility.h"
#include "webrtc/system_wrappers/include/trace.h"
#include "webrtc/test/testsupport/fileutils.h"
@@ -735,6 +735,12 @@ void TestStereo::Run(TestPackStereo* channel, int in_channels, int out_channels,
int error_count = 0;
int variable_bytes = 0;
int variable_packets = 0;
+ // Set test length to 500 ms (50 blocks of 10 ms each).
+ in_file_mono_->SetNum10MsBlocksToRead(50);
+ in_file_stereo_->SetNum10MsBlocksToRead(50);
+ // Fast-forward 1 second (100 blocks) since the files start with silence.
+ in_file_stereo_->FastForward(100);
+ in_file_mono_->FastForward(100);
while (1) {
// Simulate packet loss by setting |packet_loss_| to "true" in
@@ -800,7 +806,7 @@ void TestStereo::Run(TestPackStereo* channel, int in_channels, int out_channels,
// such as Opus.
if (variable_packets > 0) {
variable_bytes /= variable_packets;
- EXPECT_NEAR(variable_bytes, pack_size_bytes_, 3);
+ EXPECT_NEAR(variable_bytes, pack_size_bytes_, 18);
}
if (in_file_mono_->EndOfFile()) {
@@ -823,14 +829,15 @@ void TestStereo::OpenOutFile(int16_t test_number) {
}
void TestStereo::DisplaySendReceiveCodec() {
- CodecInst my_codec_param;
- acm_a_->SendCodec(&my_codec_param);
+ auto send_codec = acm_a_->SendCodec();
if (test_mode_ != 0) {
- printf("%s -> ", my_codec_param.plname);
+ ASSERT_TRUE(send_codec);
+ printf("%s -> ", send_codec->plname);
}
- acm_b_->ReceiveCodec(&my_codec_param);
+ CodecInst receive_codec;
+ acm_b_->ReceiveCodec(&receive_codec);
if (test_mode_ != 0) {
- printf("%s\n", my_codec_param.plname);
+ printf("%s\n", receive_codec.plname);
}
}
diff --git a/webrtc/modules/audio_coding/main/test/TestStereo.h b/webrtc/modules/audio_coding/test/TestStereo.h
index b56e995272..4526be6960 100644
--- a/webrtc/modules/audio_coding/main/test/TestStereo.h
+++ b/webrtc/modules/audio_coding/test/TestStereo.h
@@ -8,15 +8,15 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#ifndef WEBRTC_MODULES_AUDIO_CODING_MAIN_TEST_TESTSTEREO_H_
-#define WEBRTC_MODULES_AUDIO_CODING_MAIN_TEST_TESTSTEREO_H_
+#ifndef WEBRTC_MODULES_AUDIO_CODING_TEST_TESTSTEREO_H_
+#define WEBRTC_MODULES_AUDIO_CODING_TEST_TESTSTEREO_H_
#include <math.h>
#include "webrtc/base/scoped_ptr.h"
-#include "webrtc/modules/audio_coding/main/test/ACMTest.h"
-#include "webrtc/modules/audio_coding/main/test/Channel.h"
-#include "webrtc/modules/audio_coding/main/test/PCMFile.h"
+#include "webrtc/modules/audio_coding/test/ACMTest.h"
+#include "webrtc/modules/audio_coding/test/Channel.h"
+#include "webrtc/modules/audio_coding/test/PCMFile.h"
#define PCMA_AND_PCMU
@@ -114,4 +114,4 @@ class TestStereo : public ACMTest {
} // namespace webrtc
-#endif // WEBRTC_MODULES_AUDIO_CODING_MAIN_TEST_TESTSTEREO_H_
+#endif // WEBRTC_MODULES_AUDIO_CODING_TEST_TESTSTEREO_H_
diff --git a/webrtc/modules/audio_coding/main/test/TestVADDTX.cc b/webrtc/modules/audio_coding/test/TestVADDTX.cc
index bd0335a5f3..229dc2d474 100644
--- a/webrtc/modules/audio_coding/main/test/TestVADDTX.cc
+++ b/webrtc/modules/audio_coding/test/TestVADDTX.cc
@@ -8,13 +8,13 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#include "webrtc/modules/audio_coding/main/test/TestVADDTX.h"
+#include "webrtc/modules/audio_coding/test/TestVADDTX.h"
#include <string>
#include "webrtc/engine_configurations.h"
-#include "webrtc/modules/audio_coding/main/test/PCMFile.h"
-#include "webrtc/modules/audio_coding/main/test/utility.h"
+#include "webrtc/modules/audio_coding/test/PCMFile.h"
+#include "webrtc/modules/audio_coding/test/utility.h"
#include "webrtc/test/testsupport/fileutils.h"
namespace webrtc {
@@ -87,6 +87,11 @@ void TestVadDtx::Run(std::string in_filename, int frequency, int channels,
PCMFile in_file;
in_file.Open(in_filename, frequency, "rb");
in_file.ReadStereo(channels > 1);
+ // Set test length to 1000 ms (100 blocks of 10 ms each).
+ in_file.SetNum10MsBlocksToRead(100);
+ // Fast-forward both files 500 ms (50 blocks). The first second of the file is
+ // silence, but we want to keep half of that to test silence periods.
+ in_file.FastForward(50);
PCMFile out_file;
if (append) {
@@ -209,9 +214,9 @@ void TestWebRtcVadDtx::SetVAD(bool enable_dtx, bool enable_vad,
EXPECT_EQ(0, acm_send_->SetVAD(enable_dtx, enable_vad, vad_mode));
EXPECT_EQ(0, acm_send_->VAD(&dtx_enabled_, &vad_enabled_, &mode));
- CodecInst codec_param;
- acm_send_->SendCodec(&codec_param);
- if (STR_CASE_CMP(codec_param.plname, "opus") == 0) {
+ auto codec_param = acm_send_->SendCodec();
+ ASSERT_TRUE(codec_param);
+ if (STR_CASE_CMP(codec_param->plname, "opus") == 0) {
// If send codec is Opus, WebRTC VAD/DTX cannot be used.
enable_dtx = enable_vad = false;
}
@@ -229,10 +234,10 @@ void TestWebRtcVadDtx::SetVAD(bool enable_dtx, bool enable_vad,
// Following is the implementation of TestOpusDtx.
void TestOpusDtx::Perform() {
#ifdef WEBRTC_CODEC_ISAC
- // If we set other codec than Opus, DTX cannot be toggled.
+ // If we set other codec than Opus, DTX cannot be switched on.
RegisterCodec(kIsacWb);
EXPECT_EQ(-1, acm_send_->EnableOpusDtx());
- EXPECT_EQ(-1, acm_send_->DisableOpusDtx());
+ EXPECT_EQ(0, acm_send_->DisableOpusDtx());
#endif
#ifdef WEBRTC_CODEC_OPUS
diff --git a/webrtc/modules/audio_coding/main/test/TestVADDTX.h b/webrtc/modules/audio_coding/test/TestVADDTX.h
index 07596e2e86..1e7f0ef4d7 100644
--- a/webrtc/modules/audio_coding/main/test/TestVADDTX.h
+++ b/webrtc/modules/audio_coding/test/TestVADDTX.h
@@ -8,16 +8,16 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#ifndef WEBRTC_MODULES_AUDIO_CODING_MAIN_TEST_TESTVADDTX_H_
-#define WEBRTC_MODULES_AUDIO_CODING_MAIN_TEST_TESTVADDTX_H_
+#ifndef WEBRTC_MODULES_AUDIO_CODING_TEST_TESTVADDTX_H_
+#define WEBRTC_MODULES_AUDIO_CODING_TEST_TESTVADDTX_H_
#include "webrtc/base/scoped_ptr.h"
#include "webrtc/common_types.h"
-#include "webrtc/modules/audio_coding/main/include/audio_coding_module.h"
-#include "webrtc/modules/audio_coding/main/include/audio_coding_module_typedefs.h"
-#include "webrtc/modules/audio_coding/main/test/ACMTest.h"
-#include "webrtc/modules/audio_coding/main/test/Channel.h"
+#include "webrtc/modules/audio_coding/include/audio_coding_module.h"
+#include "webrtc/modules/audio_coding/include/audio_coding_module_typedefs.h"
+#include "webrtc/modules/audio_coding/test/ACMTest.h"
+#include "webrtc/modules/audio_coding/test/Channel.h"
namespace webrtc {
@@ -99,4 +99,4 @@ class TestOpusDtx final : public TestVadDtx {
} // namespace webrtc
-#endif // WEBRTC_MODULES_AUDIO_CODING_MAIN_TEST_TESTVADDTX_H_
+#endif // WEBRTC_MODULES_AUDIO_CODING_TEST_TESTVADDTX_H_
diff --git a/webrtc/modules/audio_coding/main/test/Tester.cc b/webrtc/modules/audio_coding/test/Tester.cc
index 7302e5dcbe..a27f0bc58b 100644
--- a/webrtc/modules/audio_coding/main/test/Tester.cc
+++ b/webrtc/modules/audio_coding/test/Tester.cc
@@ -13,20 +13,19 @@
#include <vector>
#include "testing/gtest/include/gtest/gtest.h"
-#include "webrtc/modules/audio_coding/main/include/audio_coding_module.h"
-#include "webrtc/modules/audio_coding/main/test/APITest.h"
-#include "webrtc/modules/audio_coding/main/test/EncodeDecodeTest.h"
-#include "webrtc/modules/audio_coding/main/test/iSACTest.h"
-#include "webrtc/modules/audio_coding/main/test/opus_test.h"
-#include "webrtc/modules/audio_coding/main/test/PacketLossTest.h"
-#include "webrtc/modules/audio_coding/main/test/TestAllCodecs.h"
-#include "webrtc/modules/audio_coding/main/test/TestRedFec.h"
-#include "webrtc/modules/audio_coding/main/test/TestStereo.h"
-#include "webrtc/modules/audio_coding/main/test/TestVADDTX.h"
-#include "webrtc/modules/audio_coding/main/test/TwoWayCommunication.h"
+#include "webrtc/modules/audio_coding/include/audio_coding_module.h"
+#include "webrtc/modules/audio_coding/test/APITest.h"
+#include "webrtc/modules/audio_coding/test/EncodeDecodeTest.h"
+#include "webrtc/modules/audio_coding/test/iSACTest.h"
+#include "webrtc/modules/audio_coding/test/opus_test.h"
+#include "webrtc/modules/audio_coding/test/PacketLossTest.h"
+#include "webrtc/modules/audio_coding/test/TestAllCodecs.h"
+#include "webrtc/modules/audio_coding/test/TestRedFec.h"
+#include "webrtc/modules/audio_coding/test/TestStereo.h"
+#include "webrtc/modules/audio_coding/test/TestVADDTX.h"
+#include "webrtc/modules/audio_coding/test/TwoWayCommunication.h"
#include "webrtc/system_wrappers/include/trace.h"
#include "webrtc/test/testsupport/fileutils.h"
-#include "webrtc/test/testsupport/gtest_disable.h"
using webrtc::Trace;
@@ -42,7 +41,11 @@ TEST(AudioCodingModuleTest, TestAllCodecs) {
Trace::ReturnTrace();
}
-TEST(AudioCodingModuleTest, DISABLED_ON_ANDROID(TestEncodeDecode)) {
+#if defined(WEBRTC_ANDROID)
+TEST(AudioCodingModuleTest, DISABLED_TestEncodeDecode) {
+#else
+TEST(AudioCodingModuleTest, TestEncodeDecode) {
+#endif
Trace::CreateTrace();
Trace::SetTraceFile((webrtc::test::OutputPath() +
"acm_encodedecode_trace.txt").c_str());
@@ -50,51 +53,54 @@ TEST(AudioCodingModuleTest, DISABLED_ON_ANDROID(TestEncodeDecode)) {
Trace::ReturnTrace();
}
-#ifdef WEBRTC_CODEC_RED
-#define IF_RED(x) x
+#if defined(WEBRTC_CODEC_RED)
+#if defined(WEBRTC_ANDROID)
+TEST(AudioCodingModuleTest, DISABLED_TestRedFec) {
#else
-#define IF_RED(x) DISABLED_##x
+TEST(AudioCodingModuleTest, TestRedFec) {
#endif
-
-TEST(AudioCodingModuleTest, DISABLED_ON_ANDROID(IF_RED(TestRedFec))) {
Trace::CreateTrace();
Trace::SetTraceFile((webrtc::test::OutputPath() +
"acm_fec_trace.txt").c_str());
webrtc::TestRedFec().Perform();
Trace::ReturnTrace();
}
+#endif
#if defined(WEBRTC_CODEC_ISAC) || defined(WEBRTC_CODEC_ISACFX)
-#define IF_ISAC(x) x
+#if defined(WEBRTC_ANDROID)
+TEST(AudioCodingModuleTest, DISABLED_TestIsac) {
#else
-#define IF_ISAC(x) DISABLED_##x
+TEST(AudioCodingModuleTest, TestIsac) {
#endif
-
-TEST(AudioCodingModuleTest, DISABLED_ON_ANDROID(IF_ISAC(TestIsac))) {
Trace::CreateTrace();
Trace::SetTraceFile((webrtc::test::OutputPath() +
"acm_isac_trace.txt").c_str());
webrtc::ISACTest(ACM_TEST_MODE).Perform();
Trace::ReturnTrace();
}
+#endif
#if (defined(WEBRTC_CODEC_ISAC) || defined(WEBRTC_CODEC_ISACFX)) && \
defined(WEBRTC_CODEC_ILBC) && defined(WEBRTC_CODEC_G722)
-#define IF_ALL_CODECS(x) x
+#if defined(WEBRTC_ANDROID)
+TEST(AudioCodingModuleTest, DISABLED_TwoWayCommunication) {
#else
-#define IF_ALL_CODECS(x) DISABLED_##x
+TEST(AudioCodingModuleTest, TwoWayCommunication) {
#endif
-
-TEST(AudioCodingModuleTest,
- DISABLED_ON_ANDROID(IF_ALL_CODECS(TwoWayCommunication))) {
Trace::CreateTrace();
Trace::SetTraceFile((webrtc::test::OutputPath() +
"acm_twowaycom_trace.txt").c_str());
webrtc::TwoWayCommunication(ACM_TEST_MODE).Perform();
Trace::ReturnTrace();
}
+#endif
-TEST(AudioCodingModuleTest, DISABLED_ON_ANDROID(TestStereo)) {
+#if defined(WEBRTC_ANDROID)
+TEST(AudioCodingModuleTest, DISABLED_TestStereo) {
+#else
+TEST(AudioCodingModuleTest, TestStereo) {
+#endif
Trace::CreateTrace();
Trace::SetTraceFile((webrtc::test::OutputPath() +
"acm_stereo_trace.txt").c_str());
@@ -102,7 +108,11 @@ TEST(AudioCodingModuleTest, DISABLED_ON_ANDROID(TestStereo)) {
Trace::ReturnTrace();
}
-TEST(AudioCodingModuleTest, DISABLED_ON_ANDROID(TestWebRtcVadDtx)) {
+#if defined(WEBRTC_ANDROID)
+TEST(AudioCodingModuleTest, DISABLED_TestWebRtcVadDtx) {
+#else
+TEST(AudioCodingModuleTest, TestWebRtcVadDtx) {
+#endif
Trace::CreateTrace();
Trace::SetTraceFile((webrtc::test::OutputPath() +
"acm_vaddtx_trace.txt").c_str());
diff --git a/webrtc/modules/audio_coding/main/test/TimedTrace.cc b/webrtc/modules/audio_coding/test/TimedTrace.cc
index ff9b5eeb76..ff9b5eeb76 100644
--- a/webrtc/modules/audio_coding/main/test/TimedTrace.cc
+++ b/webrtc/modules/audio_coding/test/TimedTrace.cc
diff --git a/webrtc/modules/audio_coding/main/test/TimedTrace.h b/webrtc/modules/audio_coding/test/TimedTrace.h
index ef9609a267..0793eb0c0c 100644
--- a/webrtc/modules/audio_coding/main/test/TimedTrace.h
+++ b/webrtc/modules/audio_coding/test/TimedTrace.h
@@ -8,8 +8,8 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#ifndef TIMED_TRACE_H
-#define TIMED_TRACE_H
+#ifndef WEBRTC_MODULES_AUDIO_CODING_TEST_TIMEDTRACE_H_
+#define WEBRTC_MODULES_AUDIO_CODING_TEST_TIMEDTRACE_H_
#include "webrtc/typedefs.h"
@@ -33,4 +33,4 @@ class TimedTrace {
};
-#endif
+#endif // WEBRTC_MODULES_AUDIO_CODING_TEST_TIMEDTRACE_H_
diff --git a/webrtc/modules/audio_coding/main/test/TwoWayCommunication.cc b/webrtc/modules/audio_coding/test/TwoWayCommunication.cc
index 2ff2a85afe..56e136bd34 100644
--- a/webrtc/modules/audio_coding/main/test/TwoWayCommunication.cc
+++ b/webrtc/modules/audio_coding/test/TwoWayCommunication.cc
@@ -21,8 +21,8 @@
#include "testing/gtest/include/gtest/gtest.h"
#include "webrtc/engine_configurations.h"
#include "webrtc/common_types.h"
-#include "webrtc/modules/audio_coding/main/test/PCMFile.h"
-#include "webrtc/modules/audio_coding/main/test/utility.h"
+#include "webrtc/modules/audio_coding/test/PCMFile.h"
+#include "webrtc/modules/audio_coding/test/utility.h"
#include "webrtc/system_wrappers/include/trace.h"
#include "webrtc/test/testsupport/fileutils.h"
@@ -250,10 +250,8 @@ void TwoWayCommunication::Perform() {
AudioFrame audioFrame;
- CodecInst codecInst_B;
- CodecInst dummy;
-
- EXPECT_EQ(0, _acmB->SendCodec(&codecInst_B));
+ auto codecInst_B = _acmB->SendCodec();
+ ASSERT_TRUE(codecInst_B);
// In the following loop we tests that the code can handle misuse of the APIs.
// In the middle of a session with data flowing between two sides, called A
@@ -285,15 +283,15 @@ void TwoWayCommunication::Perform() {
}
// Re-register send codec on side B.
if (((secPassed % 5) == 4) && (msecPassed >= 990)) {
- EXPECT_EQ(0, _acmB->RegisterSendCodec(codecInst_B));
- EXPECT_EQ(0, _acmB->SendCodec(&dummy));
+ EXPECT_EQ(0, _acmB->RegisterSendCodec(*codecInst_B));
+ EXPECT_TRUE(_acmB->SendCodec());
}
// Initialize receiver on side A.
if (((secPassed % 7) == 6) && (msecPassed == 0))
EXPECT_EQ(0, _acmA->InitializeReceiver());
// Re-register codec on side A.
if (((secPassed % 7) == 6) && (msecPassed >= 990)) {
- EXPECT_EQ(0, _acmA->RegisterReceiveCodec(codecInst_B));
+ EXPECT_EQ(0, _acmA->RegisterReceiveCodec(*codecInst_B));
}
}
}
diff --git a/webrtc/modules/audio_coding/main/test/TwoWayCommunication.h b/webrtc/modules/audio_coding/test/TwoWayCommunication.h
index bf969fe683..77639935da 100644
--- a/webrtc/modules/audio_coding/main/test/TwoWayCommunication.h
+++ b/webrtc/modules/audio_coding/test/TwoWayCommunication.h
@@ -8,15 +8,15 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#ifndef WEBRTC_MODULES_AUDIO_CODING_MAIN_TEST_TWOWAYCOMMUNICATION_H_
-#define WEBRTC_MODULES_AUDIO_CODING_MAIN_TEST_TWOWAYCOMMUNICATION_H_
+#ifndef WEBRTC_MODULES_AUDIO_CODING_TEST_TWOWAYCOMMUNICATION_H_
+#define WEBRTC_MODULES_AUDIO_CODING_TEST_TWOWAYCOMMUNICATION_H_
#include "webrtc/base/scoped_ptr.h"
-#include "webrtc/modules/audio_coding/main/include/audio_coding_module.h"
-#include "webrtc/modules/audio_coding/main/test/ACMTest.h"
-#include "webrtc/modules/audio_coding/main/test/Channel.h"
-#include "webrtc/modules/audio_coding/main/test/PCMFile.h"
-#include "webrtc/modules/audio_coding/main/test/utility.h"
+#include "webrtc/modules/audio_coding/include/audio_coding_module.h"
+#include "webrtc/modules/audio_coding/test/ACMTest.h"
+#include "webrtc/modules/audio_coding/test/Channel.h"
+#include "webrtc/modules/audio_coding/test/PCMFile.h"
+#include "webrtc/modules/audio_coding/test/utility.h"
namespace webrtc {
@@ -57,4 +57,4 @@ class TwoWayCommunication : public ACMTest {
} // namespace webrtc
-#endif // WEBRTC_MODULES_AUDIO_CODING_MAIN_TEST_TWOWAYCOMMUNICATION_H_
+#endif // WEBRTC_MODULES_AUDIO_CODING_TEST_TWOWAYCOMMUNICATION_H_
diff --git a/webrtc/modules/audio_coding/main/test/delay_test.cc b/webrtc/modules/audio_coding/test/delay_test.cc
index 6186d67fc9..a8c137f501 100644
--- a/webrtc/modules/audio_coding/main/test/delay_test.cc
+++ b/webrtc/modules/audio_coding/test/delay_test.cc
@@ -19,12 +19,12 @@
#include "webrtc/common.h"
#include "webrtc/common_types.h"
#include "webrtc/engine_configurations.h"
-#include "webrtc/modules/audio_coding/main/include/audio_coding_module.h"
-#include "webrtc/modules/audio_coding/main/include/audio_coding_module_typedefs.h"
-#include "webrtc/modules/audio_coding/main/acm2/acm_common_defs.h"
-#include "webrtc/modules/audio_coding/main/test/Channel.h"
-#include "webrtc/modules/audio_coding/main/test/PCMFile.h"
-#include "webrtc/modules/audio_coding/main/test/utility.h"
+#include "webrtc/modules/audio_coding/include/audio_coding_module.h"
+#include "webrtc/modules/audio_coding/include/audio_coding_module_typedefs.h"
+#include "webrtc/modules/audio_coding/acm2/acm_common_defs.h"
+#include "webrtc/modules/audio_coding/test/Channel.h"
+#include "webrtc/modules/audio_coding/test/PCMFile.h"
+#include "webrtc/modules/audio_coding/test/utility.h"
#include "webrtc/system_wrappers/include/event_wrapper.h"
#include "webrtc/test/testsupport/fileutils.h"
@@ -33,7 +33,6 @@ DEFINE_int32(sample_rate_hz, 16000, "Sampling rate in Hertz.");
DEFINE_int32(num_channels, 1, "Number of Channels.");
DEFINE_string(input_file, "", "Input file, PCM16 32 kHz, optional.");
DEFINE_int32(delay, 0, "Delay in millisecond.");
-DEFINE_int32(init_delay, 0, "Initial delay in millisecond.");
DEFINE_bool(dtx, false, "Enable DTX at the sender side.");
DEFINE_bool(packet_loss, false, "Apply packet loss, c.f. Channel{.cc, .h}.");
DEFINE_bool(fec, false, "Use Forward Error Correction (FEC).");
@@ -89,10 +88,6 @@ class DelayTest {
"Couldn't initialize receiver.\n";
ASSERT_EQ(0, acm_b_->InitializeReceiver()) <<
"Couldn't initialize receiver.\n";
- if (FLAGS_init_delay > 0) {
- ASSERT_EQ(0, acm_b_->SetInitialPlayoutDelay(FLAGS_init_delay)) <<
- "Failed to set initial delay.\n";
- }
if (FLAGS_delay > 0) {
ASSERT_EQ(0, acm_b_->SetMinimumPlayoutDelay(FLAGS_delay)) <<
@@ -172,7 +167,7 @@ class DelayTest {
void OpenOutFile(const char* output_id) {
std::stringstream file_stream;
file_stream << "delay_test_" << FLAGS_codec << "_" << FLAGS_sample_rate_hz
- << "Hz" << "_" << FLAGS_init_delay << "ms_" << FLAGS_delay << "ms.pcm";
+ << "Hz" << "_" << FLAGS_delay << "ms.pcm";
std::cout << "Output file: " << file_stream.str() << std::endl << std::endl;
std::string file_name = webrtc::test::OutputPath() + file_stream.str();
out_file_b_.Open(file_name.c_str(), 32000, "wb");
diff --git a/webrtc/modules/audio_coding/main/test/iSACTest.cc b/webrtc/modules/audio_coding/test/iSACTest.cc
index 35c34d5947..9f223fb81f 100644
--- a/webrtc/modules/audio_coding/main/test/iSACTest.cc
+++ b/webrtc/modules/audio_coding/test/iSACTest.cc
@@ -8,7 +8,7 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#include "webrtc/modules/audio_coding/main/test/iSACTest.h"
+#include "webrtc/modules/audio_coding/test/iSACTest.h"
#include <ctype.h>
#include <stdio.h>
@@ -23,8 +23,8 @@
#include <time.h>
#endif
-#include "webrtc/modules/audio_coding/main/acm2/acm_common_defs.h"
-#include "webrtc/modules/audio_coding/main/test/utility.h"
+#include "webrtc/modules/audio_coding/acm2/acm_common_defs.h"
+#include "webrtc/modules/audio_coding/test/utility.h"
#include "webrtc/system_wrappers/include/event_wrapper.h"
#include "webrtc/system_wrappers/include/tick_util.h"
#include "webrtc/system_wrappers/include/trace.h"
@@ -47,21 +47,21 @@ int16_t SetISAConfig(ACMTestISACConfig& isacConfig, AudioCodingModule* acm,
if ((isacConfig.currentRateBitPerSec != 0)
|| (isacConfig.currentFrameSizeMsec != 0)) {
- CodecInst sendCodec;
- EXPECT_EQ(0, acm->SendCodec(&sendCodec));
+ auto sendCodec = acm->SendCodec();
+ EXPECT_TRUE(sendCodec);
if (isacConfig.currentRateBitPerSec < 0) {
// Register iSAC in adaptive (channel-dependent) mode.
- sendCodec.rate = -1;
- EXPECT_EQ(0, acm->RegisterSendCodec(sendCodec));
+ sendCodec->rate = -1;
+ EXPECT_EQ(0, acm->RegisterSendCodec(*sendCodec));
} else {
if (isacConfig.currentRateBitPerSec != 0) {
- sendCodec.rate = isacConfig.currentRateBitPerSec;
+ sendCodec->rate = isacConfig.currentRateBitPerSec;
}
if (isacConfig.currentFrameSizeMsec != 0) {
- sendCodec.pacsize = isacConfig.currentFrameSizeMsec
- * (sendCodec.plfreq / 1000);
+ sendCodec->pacsize = isacConfig.currentFrameSizeMsec
+ * (sendCodec->plfreq / 1000);
}
- EXPECT_EQ(0, acm->RegisterSendCodec(sendCodec));
+ EXPECT_EQ(0, acm->RegisterSendCodec(*sendCodec));
}
}
@@ -117,6 +117,10 @@ void ISACTest::Setup() {
EXPECT_EQ(0, _acmA->RegisterSendCodec(_paramISAC32kHz));
_inFileA.Open(file_name_swb_, 32000, "rb");
+ // Set test length to 500 ms (50 blocks of 10 ms each).
+ _inFileA.SetNum10MsBlocksToRead(50);
+ // Fast-forward 1 second (100 blocks) since the files start with silence.
+ _inFileA.FastForward(100);
std::string fileNameA = webrtc::test::OutputPath() + "testisac_a.pcm";
std::string fileNameB = webrtc::test::OutputPath() + "testisac_b.pcm";
_outFileA.Open(fileNameA, 32000, "wb");
@@ -238,7 +242,6 @@ void ISACTest::EncodeDecode(int testNr, ACMTestISACConfig& wbISACConfig,
_channel_B2A->ResetStats();
char currentTime[500];
- CodecInst sendCodec;
EventTimerWrapper* myEvent = EventTimerWrapper::Create();
EXPECT_TRUE(myEvent->StartTimer(true, 10));
while (!(_inFileA.EndOfFile() || _inFileA.Rewinded())) {
@@ -248,8 +251,8 @@ void ISACTest::EncodeDecode(int testNr, ACMTestISACConfig& wbISACConfig,
if ((adaptiveMode) && (_testMode != 0)) {
myEvent->Wait(5000);
- EXPECT_EQ(0, _acmA->SendCodec(&sendCodec));
- EXPECT_EQ(0, _acmB->SendCodec(&sendCodec));
+ EXPECT_TRUE(_acmA->SendCodec());
+ EXPECT_TRUE(_acmB->SendCodec());
}
}
diff --git a/webrtc/modules/audio_coding/main/test/iSACTest.h b/webrtc/modules/audio_coding/test/iSACTest.h
index 0693d935e1..c5bb515437 100644
--- a/webrtc/modules/audio_coding/main/test/iSACTest.h
+++ b/webrtc/modules/audio_coding/test/iSACTest.h
@@ -8,18 +8,18 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#ifndef WEBRTC_MODULES_AUDIO_CODING_MAIN_TEST_ISACTEST_H_
-#define WEBRTC_MODULES_AUDIO_CODING_MAIN_TEST_ISACTEST_H_
+#ifndef WEBRTC_MODULES_AUDIO_CODING_TEST_ISACTEST_H_
+#define WEBRTC_MODULES_AUDIO_CODING_TEST_ISACTEST_H_
#include <string.h>
#include "webrtc/base/scoped_ptr.h"
#include "webrtc/common_types.h"
-#include "webrtc/modules/audio_coding/main/include/audio_coding_module.h"
-#include "webrtc/modules/audio_coding/main/test/ACMTest.h"
-#include "webrtc/modules/audio_coding/main/test/Channel.h"
-#include "webrtc/modules/audio_coding/main/test/PCMFile.h"
-#include "webrtc/modules/audio_coding/main/test/utility.h"
+#include "webrtc/modules/audio_coding/include/audio_coding_module.h"
+#include "webrtc/modules/audio_coding/test/ACMTest.h"
+#include "webrtc/modules/audio_coding/test/Channel.h"
+#include "webrtc/modules/audio_coding/test/PCMFile.h"
+#include "webrtc/modules/audio_coding/test/utility.h"
#define MAX_FILE_NAME_LENGTH_BYTE 500
#define NO_OF_CLIENTS 15
@@ -76,4 +76,4 @@ class ISACTest : public ACMTest {
} // namespace webrtc
-#endif // WEBRTC_MODULES_AUDIO_CODING_MAIN_TEST_ISACTEST_H_
+#endif // WEBRTC_MODULES_AUDIO_CODING_TEST_ISACTEST_H_
diff --git a/webrtc/modules/audio_coding/main/test/insert_packet_with_timing.cc b/webrtc/modules/audio_coding/test/insert_packet_with_timing.cc
index ea7266567e..481df55ffd 100644
--- a/webrtc/modules/audio_coding/main/test/insert_packet_with_timing.cc
+++ b/webrtc/modules/audio_coding/test/insert_packet_with_timing.cc
@@ -14,10 +14,10 @@
#include "testing/gtest/include/gtest/gtest.h"
#include "webrtc/base/scoped_ptr.h"
#include "webrtc/common_types.h"
-#include "webrtc/modules/audio_coding/main/include/audio_coding_module.h"
-#include "webrtc/modules/audio_coding/main/test/Channel.h"
-#include "webrtc/modules/audio_coding/main/test/PCMFile.h"
-#include "webrtc/modules/interface/module_common_types.h"
+#include "webrtc/modules/audio_coding/include/audio_coding_module.h"
+#include "webrtc/modules/audio_coding/test/Channel.h"
+#include "webrtc/modules/audio_coding/test/PCMFile.h"
+#include "webrtc/modules/include/module_common_types.h"
#include "webrtc/system_wrappers/include/clock.h"
#include "webrtc/test/testsupport/fileutils.h"
@@ -42,7 +42,6 @@ DEFINE_string(receive_ts, "last_rec_timestamp", "Receive timestamp file");
DEFINE_string(delay, "", "Log for delay.");
// Other setups
-DEFINE_int32(init_delay, 0, "Initial delay.");
DEFINE_bool(verbose, false, "Verbosity.");
DEFINE_double(loss_rate, 0, "Rate of packet loss < 1");
@@ -122,9 +121,6 @@ class InsertPacketWithTiming {
<< " Hz." << std::endl;
// Other setups
- if (FLAGS_init_delay > 0)
- EXPECT_EQ(0, receive_acm_->SetInitialPlayoutDelay(FLAGS_init_delay));
-
if (FLAGS_loss_rate > 0)
loss_threshold_ = RAND_MAX * FLAGS_loss_rate;
else
diff --git a/webrtc/modules/audio_coding/main/test/opus_test.cc b/webrtc/modules/audio_coding/test/opus_test.cc
index 00c66cb3aa..104b5e587b 100644
--- a/webrtc/modules/audio_coding/main/test/opus_test.cc
+++ b/webrtc/modules/audio_coding/test/opus_test.cc
@@ -8,7 +8,7 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#include "webrtc/modules/audio_coding/main/test/opus_test.h"
+#include "webrtc/modules/audio_coding/test/opus_test.h"
#include <assert.h>
@@ -17,11 +17,10 @@
#include "testing/gtest/include/gtest/gtest.h"
#include "webrtc/common_types.h"
#include "webrtc/engine_configurations.h"
-#include "webrtc/modules/audio_coding/codecs/opus/include/opus_interface.h"
-#include "webrtc/modules/audio_coding/main/include/audio_coding_module_typedefs.h"
-#include "webrtc/modules/audio_coding/main/acm2/acm_codec_database.h"
-#include "webrtc/modules/audio_coding/main/test/TestStereo.h"
-#include "webrtc/modules/audio_coding/main/test/utility.h"
+#include "webrtc/modules/audio_coding/codecs/opus/opus_interface.h"
+#include "webrtc/modules/audio_coding/include/audio_coding_module_typedefs.h"
+#include "webrtc/modules/audio_coding/test/TestStereo.h"
+#include "webrtc/modules/audio_coding/test/utility.h"
#include "webrtc/system_wrappers/include/trace.h"
#include "webrtc/test/testsupport/fileutils.h"
@@ -63,7 +62,7 @@ void OpusTest::Perform() {
return;
#else
uint16_t frequency_hz;
- int audio_channels;
+ size_t audio_channels;
int16_t test_cntr = 0;
// Open both mono and stereo test files in 32 kHz.
@@ -206,17 +205,17 @@ void OpusTest::Perform() {
#endif
}
-void OpusTest::Run(TestPackStereo* channel, int channels, int bitrate,
- int frame_length, int percent_loss) {
+void OpusTest::Run(TestPackStereo* channel, size_t channels, int bitrate,
+ size_t frame_length, int percent_loss) {
AudioFrame audio_frame;
int32_t out_freq_hz_b = out_file_.SamplingFrequency();
- const int kBufferSizeSamples = 480 * 12 * 2; // Can hold 120 ms stereo audio.
+ const size_t kBufferSizeSamples = 480 * 12 * 2; // 120 ms stereo audio.
int16_t audio[kBufferSizeSamples];
int16_t out_audio[kBufferSizeSamples];
int16_t audio_type;
- int written_samples = 0;
- int read_samples = 0;
- int decoded_samples = 0;
+ size_t written_samples = 0;
+ size_t read_samples = 0;
+ size_t decoded_samples = 0;
bool first_packet = true;
uint32_t start_time_stamp = 0;
@@ -236,8 +235,12 @@ void OpusTest::Run(TestPackStereo* channel, int channels, int bitrate,
kOpusComplexity5));
#endif
- // Make sure the runtime is less than 60 seconds to pass Android test.
- for (size_t audio_length = 0; audio_length < 10000; audio_length += 10) {
+ // Fast-forward 1 second (100 blocks) since the files start with silence.
+ in_file_stereo_.FastForward(100);
+ in_file_mono_.FastForward(100);
+
+ // Limit the runtime to 1000 blocks of 10 ms each.
+ for (size_t audio_length = 0; audio_length < 1000; audio_length += 10) {
bool lost_packet = false;
// Get 10 msec of audio.
@@ -265,14 +268,14 @@ void OpusTest::Run(TestPackStereo* channel, int channels, int bitrate,
// Sometimes we need to loop over the audio vector to produce the right
// number of packets.
- int loop_encode = (written_samples - read_samples) /
+ size_t loop_encode = (written_samples - read_samples) /
(channels * frame_length);
if (loop_encode > 0) {
- const int kMaxBytes = 1000; // Maximum number of bytes for one packet.
+ const size_t kMaxBytes = 1000; // Maximum number of bytes for one packet.
size_t bitstream_len_byte;
uint8_t bitstream[kMaxBytes];
- for (int i = 0; i < loop_encode; i++) {
+ for (size_t i = 0; i < loop_encode; i++) {
int bitstream_len_byte_int = WebRtcOpus_Encode(
(channels == 1) ? opus_mono_encoder_ : opus_stereo_encoder_,
&audio[read_samples], frame_length, kMaxBytes, bitstream);
@@ -323,7 +326,7 @@ void OpusTest::Run(TestPackStereo* channel, int channels, int bitrate,
first_packet = false;
start_time_stamp = rtp_timestamp_;
}
- rtp_timestamp_ += frame_length;
+ rtp_timestamp_ += static_cast<uint32_t>(frame_length);
read_samples += frame_length * channels;
}
if (read_samples == written_samples) {
@@ -341,8 +344,7 @@ void OpusTest::Run(TestPackStereo* channel, int channels, int bitrate,
audio_frame.samples_per_channel_ * audio_frame.num_channels_);
// Write stand-alone speech to file.
- out_file_standalone_.Write10MsData(
- out_audio, static_cast<size_t>(decoded_samples) * channels);
+ out_file_standalone_.Write10MsData(out_audio, decoded_samples * channels);
if (audio_frame.timestamp_ > start_time_stamp) {
// Number of channels should be the same for both stand-alone and
diff --git a/webrtc/modules/audio_coding/main/test/opus_test.h b/webrtc/modules/audio_coding/test/opus_test.h
index 379bb86d5d..93c9ffb263 100644
--- a/webrtc/modules/audio_coding/main/test/opus_test.h
+++ b/webrtc/modules/audio_coding/test/opus_test.h
@@ -8,18 +8,18 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#ifndef WEBRTC_MODULES_AUDIO_CODING_MAIN_TEST_OPUS_TEST_H_
-#define WEBRTC_MODULES_AUDIO_CODING_MAIN_TEST_OPUS_TEST_H_
+#ifndef WEBRTC_MODULES_AUDIO_CODING_TEST_OPUS_TEST_H_
+#define WEBRTC_MODULES_AUDIO_CODING_TEST_OPUS_TEST_H_
#include <math.h>
#include "webrtc/base/scoped_ptr.h"
-#include "webrtc/modules/audio_coding/codecs/opus/include/opus_interface.h"
-#include "webrtc/modules/audio_coding/main/acm2/acm_resampler.h"
-#include "webrtc/modules/audio_coding/main/test/ACMTest.h"
-#include "webrtc/modules/audio_coding/main/test/Channel.h"
-#include "webrtc/modules/audio_coding/main/test/PCMFile.h"
-#include "webrtc/modules/audio_coding/main/test/TestStereo.h"
+#include "webrtc/modules/audio_coding/codecs/opus/opus_interface.h"
+#include "webrtc/modules/audio_coding/acm2/acm_resampler.h"
+#include "webrtc/modules/audio_coding/test/ACMTest.h"
+#include "webrtc/modules/audio_coding/test/Channel.h"
+#include "webrtc/modules/audio_coding/test/PCMFile.h"
+#include "webrtc/modules/audio_coding/test/TestStereo.h"
namespace webrtc {
@@ -31,7 +31,10 @@ class OpusTest : public ACMTest {
void Perform();
private:
- void Run(TestPackStereo* channel, int channels, int bitrate, int frame_length,
+ void Run(TestPackStereo* channel,
+ size_t channels,
+ int bitrate,
+ size_t frame_length,
int percent_loss = 0);
void OpenOutFile(int test_number);
@@ -44,7 +47,7 @@ class OpusTest : public ACMTest {
PCMFile out_file_standalone_;
int counter_;
uint8_t payload_type_;
- int rtp_timestamp_;
+ uint32_t rtp_timestamp_;
acm2::ACMResampler resampler_;
WebRtcOpusEncInst* opus_mono_encoder_;
WebRtcOpusEncInst* opus_stereo_encoder_;
@@ -54,4 +57,4 @@ class OpusTest : public ACMTest {
} // namespace webrtc
-#endif // WEBRTC_MODULES_AUDIO_CODING_MAIN_TEST_OPUS_TEST_H_
+#endif // WEBRTC_MODULES_AUDIO_CODING_TEST_OPUS_TEST_H_
diff --git a/webrtc/modules/audio_coding/main/test/target_delay_unittest.cc b/webrtc/modules/audio_coding/test/target_delay_unittest.cc
index 20b10a376e..195e9d8145 100644
--- a/webrtc/modules/audio_coding/main/test/target_delay_unittest.cc
+++ b/webrtc/modules/audio_coding/test/target_delay_unittest.cc
@@ -11,13 +11,12 @@
#include "testing/gtest/include/gtest/gtest.h"
#include "webrtc/base/scoped_ptr.h"
#include "webrtc/common_types.h"
-#include "webrtc/modules/audio_coding/codecs/pcm16b/include/pcm16b.h"
-#include "webrtc/modules/audio_coding/main/include/audio_coding_module.h"
-#include "webrtc/modules/audio_coding/main/test/utility.h"
-#include "webrtc/modules/interface/module_common_types.h"
+#include "webrtc/modules/audio_coding/codecs/pcm16b/pcm16b.h"
+#include "webrtc/modules/audio_coding/include/audio_coding_module.h"
+#include "webrtc/modules/audio_coding/test/utility.h"
+#include "webrtc/modules/include/module_common_types.h"
#include "webrtc/system_wrappers/include/sleep.h"
#include "webrtc/test/testsupport/fileutils.h"
-#include "webrtc/test/testsupport/gtest_disable.h"
namespace webrtc {
@@ -154,7 +153,7 @@ class TargetDelayTest : public ::testing::Test {
ASSERT_EQ(0, acm_->PlayoutData10Ms(-1, &frame));
// Had to use ASSERT_TRUE, ASSERT_EQ generated error.
ASSERT_TRUE(kSampleRateHz == frame.sample_rate_hz_);
- ASSERT_EQ(1, frame.num_channels_);
+ ASSERT_EQ(1u, frame.num_channels_);
ASSERT_TRUE(kSampleRateHz / 100 == frame.samples_per_channel_);
}
}
@@ -199,23 +198,50 @@ class TargetDelayTest : public ::testing::Test {
uint8_t payload_[kPayloadLenBytes];
};
-TEST_F(TargetDelayTest, DISABLED_ON_ANDROID(OutOfRangeInput)) {
+#if defined(WEBRTC_ANDROID)
+#define MAYBE_OutOfRangeInput DISABLED_OutOfRangeInput
+#else
+#define MAYBE_OutOfRangeInput OutOfRangeInput
+#endif
+TEST_F(TargetDelayTest, MAYBE_OutOfRangeInput) {
OutOfRangeInput();
}
-TEST_F(TargetDelayTest, DISABLED_ON_ANDROID(NoTargetDelayBufferSizeChanges)) {
+#if defined(WEBRTC_ANDROID)
+#define MAYBE_NoTargetDelayBufferSizeChanges \
+ DISABLED_NoTargetDelayBufferSizeChanges
+#else
+#define MAYBE_NoTargetDelayBufferSizeChanges NoTargetDelayBufferSizeChanges
+#endif
+TEST_F(TargetDelayTest, MAYBE_NoTargetDelayBufferSizeChanges) {
NoTargetDelayBufferSizeChanges();
}
-TEST_F(TargetDelayTest, DISABLED_ON_ANDROID(WithTargetDelayBufferNotChanging)) {
+#if defined(WEBRTC_ANDROID)
+#define MAYBE_WithTargetDelayBufferNotChanging \
+ DISABLED_WithTargetDelayBufferNotChanging
+#else
+#define MAYBE_WithTargetDelayBufferNotChanging WithTargetDelayBufferNotChanging
+#endif
+TEST_F(TargetDelayTest, MAYBE_WithTargetDelayBufferNotChanging) {
WithTargetDelayBufferNotChanging();
}
-TEST_F(TargetDelayTest, DISABLED_ON_ANDROID(RequiredDelayAtCorrectRange)) {
+#if defined(WEBRTC_ANDROID)
+#define MAYBE_RequiredDelayAtCorrectRange DISABLED_RequiredDelayAtCorrectRange
+#else
+#define MAYBE_RequiredDelayAtCorrectRange RequiredDelayAtCorrectRange
+#endif
+TEST_F(TargetDelayTest, MAYBE_RequiredDelayAtCorrectRange) {
RequiredDelayAtCorrectRange();
}
-TEST_F(TargetDelayTest, DISABLED_ON_ANDROID(TargetDelayBufferMinMax)) {
+#if defined(WEBRTC_ANDROID)
+#define MAYBE_TargetDelayBufferMinMax DISABLED_TargetDelayBufferMinMax
+#else
+#define MAYBE_TargetDelayBufferMinMax TargetDelayBufferMinMax
+#endif
+TEST_F(TargetDelayTest, MAYBE_TargetDelayBufferMinMax) {
TargetDelayBufferMinMax();
}
diff --git a/webrtc/modules/audio_coding/main/test/utility.cc b/webrtc/modules/audio_coding/test/utility.cc
index 34af5e703f..89368bce51 100644
--- a/webrtc/modules/audio_coding/main/test/utility.cc
+++ b/webrtc/modules/audio_coding/test/utility.cc
@@ -18,8 +18,8 @@
#include "testing/gtest/include/gtest/gtest.h"
#include "webrtc/common.h"
#include "webrtc/common_types.h"
-#include "webrtc/modules/audio_coding/main/include/audio_coding_module.h"
-#include "webrtc/modules/audio_coding/main/acm2/acm_common_defs.h"
+#include "webrtc/modules/audio_coding/include/audio_coding_module.h"
+#include "webrtc/modules/audio_coding/acm2/acm_common_defs.h"
#define NUM_CODECS_WITH_FIXED_PAYLOAD_TYPE 13
diff --git a/webrtc/modules/audio_coding/main/test/utility.h b/webrtc/modules/audio_coding/test/utility.h
index e936ec1cdd..23869be7ed 100644
--- a/webrtc/modules/audio_coding/main/test/utility.h
+++ b/webrtc/modules/audio_coding/test/utility.h
@@ -8,11 +8,11 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#ifndef WEBRTC_MODULES_AUDIO_CODING_MAIN_TEST_UTILITY_H_
-#define WEBRTC_MODULES_AUDIO_CODING_MAIN_TEST_UTILITY_H_
+#ifndef WEBRTC_MODULES_AUDIO_CODING_TEST_UTILITY_H_
+#define WEBRTC_MODULES_AUDIO_CODING_TEST_UTILITY_H_
#include "testing/gtest/include/gtest/gtest.h"
-#include "webrtc/modules/audio_coding/main/include/audio_coding_module.h"
+#include "webrtc/modules/audio_coding/include/audio_coding_module.h"
namespace webrtc {
@@ -136,4 +136,4 @@ void UseNewAcm(webrtc::Config* config);
} // namespace webrtc
-#endif // WEBRTC_MODULES_AUDIO_CODING_MAIN_TEST_UTILITY_H_
+#endif // WEBRTC_MODULES_AUDIO_CODING_TEST_UTILITY_H_
diff --git a/webrtc/modules/audio_conference_mixer/BUILD.gn b/webrtc/modules/audio_conference_mixer/BUILD.gn
index 3b9e2769ac..36391c7abc 100644
--- a/webrtc/modules/audio_conference_mixer/BUILD.gn
+++ b/webrtc/modules/audio_conference_mixer/BUILD.gn
@@ -9,15 +9,15 @@
config("audio_conference_mixer_config") {
visibility = [ ":*" ] # Only targets in this file can depend on this.
include_dirs = [
- "interface",
- "../interface",
+ "include",
+ "../include",
]
}
source_set("audio_conference_mixer") {
sources = [
- "interface/audio_conference_mixer.h",
- "interface/audio_conference_mixer_defines.h",
+ "include/audio_conference_mixer.h",
+ "include/audio_conference_mixer_defines.h",
"source/audio_conference_mixer_impl.cc",
"source/audio_conference_mixer_impl.h",
"source/audio_frame_manipulator.cc",
diff --git a/webrtc/modules/audio_conference_mixer/OWNERS b/webrtc/modules/audio_conference_mixer/OWNERS
index 34bc7389ba..ea2062a9a0 100644
--- a/webrtc/modules/audio_conference_mixer/OWNERS
+++ b/webrtc/modules/audio_conference_mixer/OWNERS
@@ -1,3 +1,8 @@
-andrew@webrtc.org
+minyue@webrtc.org
+
+# These are for the common case of adding or renaming files. If you're doing
+# structural changes, please get a review from a reviewer in this file.
+per-file *.gyp=*
+per-file *.gypi=*
per-file BUILD.gn=kjellander@webrtc.org
diff --git a/webrtc/modules/audio_conference_mixer/audio_conference_mixer.gypi b/webrtc/modules/audio_conference_mixer/audio_conference_mixer.gypi
index 5aa3cc449b..9d7179504c 100644
--- a/webrtc/modules/audio_conference_mixer/audio_conference_mixer.gypi
+++ b/webrtc/modules/audio_conference_mixer/audio_conference_mixer.gypi
@@ -17,8 +17,8 @@
'<(webrtc_root)/system_wrappers/system_wrappers.gyp:system_wrappers',
],
'sources': [
- 'interface/audio_conference_mixer.h',
- 'interface/audio_conference_mixer_defines.h',
+ 'include/audio_conference_mixer.h',
+ 'include/audio_conference_mixer_defines.h',
'source/audio_frame_manipulator.cc',
'source/audio_frame_manipulator.h',
'source/memory_pool.h',
diff --git a/webrtc/modules/audio_conference_mixer/interface/audio_conference_mixer.h b/webrtc/modules/audio_conference_mixer/include/audio_conference_mixer.h
index 7ff39579ee..7370442704 100644
--- a/webrtc/modules/audio_conference_mixer/interface/audio_conference_mixer.h
+++ b/webrtc/modules/audio_conference_mixer/include/audio_conference_mixer.h
@@ -8,12 +8,12 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#ifndef WEBRTC_MODULES_AUDIO_CONFERENCE_MIXER_INTERFACE_AUDIO_CONFERENCE_MIXER_H_
-#define WEBRTC_MODULES_AUDIO_CONFERENCE_MIXER_INTERFACE_AUDIO_CONFERENCE_MIXER_H_
+#ifndef WEBRTC_MODULES_AUDIO_CONFERENCE_MIXER_INCLUDE_AUDIO_CONFERENCE_MIXER_H_
+#define WEBRTC_MODULES_AUDIO_CONFERENCE_MIXER_INCLUDE_AUDIO_CONFERENCE_MIXER_H_
-#include "webrtc/modules/audio_conference_mixer/interface/audio_conference_mixer_defines.h"
-#include "webrtc/modules/interface/module.h"
-#include "webrtc/modules/interface/module_common_types.h"
+#include "webrtc/modules/audio_conference_mixer/include/audio_conference_mixer_defines.h"
+#include "webrtc/modules/include/module.h"
+#include "webrtc/modules/include/module_common_types.h"
namespace webrtc {
class AudioMixerOutputReceiver;
@@ -74,4 +74,4 @@ protected:
};
} // namespace webrtc
-#endif // WEBRTC_MODULES_AUDIO_CONFERENCE_MIXER_INTERFACE_AUDIO_CONFERENCE_MIXER_H_
+#endif // WEBRTC_MODULES_AUDIO_CONFERENCE_MIXER_INCLUDE_AUDIO_CONFERENCE_MIXER_H_
diff --git a/webrtc/modules/audio_conference_mixer/interface/audio_conference_mixer_defines.h b/webrtc/modules/audio_conference_mixer/include/audio_conference_mixer_defines.h
index d15b7fca02..5d58f42435 100644
--- a/webrtc/modules/audio_conference_mixer/interface/audio_conference_mixer_defines.h
+++ b/webrtc/modules/audio_conference_mixer/include/audio_conference_mixer_defines.h
@@ -8,10 +8,10 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#ifndef WEBRTC_MODULES_AUDIO_CONFERENCE_MIXER_INTERFACE_AUDIO_CONFERENCE_MIXER_DEFINES_H_
-#define WEBRTC_MODULES_AUDIO_CONFERENCE_MIXER_INTERFACE_AUDIO_CONFERENCE_MIXER_DEFINES_H_
+#ifndef WEBRTC_MODULES_AUDIO_CONFERENCE_MIXER_INCLUDE_AUDIO_CONFERENCE_MIXER_DEFINES_H_
+#define WEBRTC_MODULES_AUDIO_CONFERENCE_MIXER_INCLUDE_AUDIO_CONFERENCE_MIXER_DEFINES_H_
-#include "webrtc/modules/interface/module_common_types.h"
+#include "webrtc/modules/include/module_common_types.h"
#include "webrtc/typedefs.h"
namespace webrtc {
@@ -57,4 +57,4 @@ protected:
};
} // namespace webrtc
-#endif // WEBRTC_MODULES_AUDIO_CONFERENCE_MIXER_INTERFACE_AUDIO_CONFERENCE_MIXER_DEFINES_H_
+#endif // WEBRTC_MODULES_AUDIO_CONFERENCE_MIXER_INCLUDE_AUDIO_CONFERENCE_MIXER_DEFINES_H_
diff --git a/webrtc/modules/audio_conference_mixer/source/audio_conference_mixer_impl.cc b/webrtc/modules/audio_conference_mixer/source/audio_conference_mixer_impl.cc
index 2d2cf9dbb8..afb060f46d 100644
--- a/webrtc/modules/audio_conference_mixer/source/audio_conference_mixer_impl.cc
+++ b/webrtc/modules/audio_conference_mixer/source/audio_conference_mixer_impl.cc
@@ -8,11 +8,11 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#include "webrtc/modules/audio_conference_mixer/interface/audio_conference_mixer_defines.h"
+#include "webrtc/modules/audio_conference_mixer/include/audio_conference_mixer_defines.h"
#include "webrtc/modules/audio_conference_mixer/source/audio_conference_mixer_impl.h"
#include "webrtc/modules/audio_conference_mixer/source/audio_frame_manipulator.h"
#include "webrtc/modules/audio_processing/include/audio_processing.h"
-#include "webrtc/modules/utility/interface/audio_frame_operations.h"
+#include "webrtc/modules/utility/include/audio_frame_operations.h"
#include "webrtc/system_wrappers/include/critical_section_wrapper.h"
#include "webrtc/system_wrappers/include/trace.h"
@@ -50,8 +50,8 @@ void MixFrames(AudioFrame* mixed_frame, AudioFrame* frame, bool use_limiter) {
}
// Return the max number of channels from a |list| composed of AudioFrames.
-int MaxNumChannels(const AudioFrameList* list) {
- int max_num_channels = 1;
+size_t MaxNumChannels(const AudioFrameList* list) {
+ size_t max_num_channels = 1;
for (AudioFrameList::const_iterator iter = list->begin();
iter != list->end();
++iter) {
@@ -278,7 +278,7 @@ int32_t AudioConferenceMixerImpl::Process() {
// with an API instead of dynamically.
// Find the max channels over all mixing lists.
- const int num_mixed_channels = std::max(MaxNumChannels(&mixList),
+ const size_t num_mixed_channels = std::max(MaxNumChannels(&mixList),
std::max(MaxNumChannels(&additionalFramesList),
MaxNumChannels(&rampOutList)));
diff --git a/webrtc/modules/audio_conference_mixer/source/audio_conference_mixer_impl.h b/webrtc/modules/audio_conference_mixer/source/audio_conference_mixer_impl.h
index bc9a27e9f0..2466112769 100644
--- a/webrtc/modules/audio_conference_mixer/source/audio_conference_mixer_impl.h
+++ b/webrtc/modules/audio_conference_mixer/source/audio_conference_mixer_impl.h
@@ -16,10 +16,10 @@
#include "webrtc/base/scoped_ptr.h"
#include "webrtc/engine_configurations.h"
-#include "webrtc/modules/audio_conference_mixer/interface/audio_conference_mixer.h"
+#include "webrtc/modules/audio_conference_mixer/include/audio_conference_mixer.h"
#include "webrtc/modules/audio_conference_mixer/source/memory_pool.h"
#include "webrtc/modules/audio_conference_mixer/source/time_scheduler.h"
-#include "webrtc/modules/interface/module_common_types.h"
+#include "webrtc/modules/include/module_common_types.h"
namespace webrtc {
class AudioProcessing;
diff --git a/webrtc/modules/audio_conference_mixer/source/audio_frame_manipulator.cc b/webrtc/modules/audio_conference_mixer/source/audio_frame_manipulator.cc
index 636698e9c1..9c5d3b939d 100644
--- a/webrtc/modules/audio_conference_mixer/source/audio_frame_manipulator.cc
+++ b/webrtc/modules/audio_conference_mixer/source/audio_frame_manipulator.cc
@@ -9,7 +9,7 @@
*/
#include "webrtc/modules/audio_conference_mixer/source/audio_frame_manipulator.h"
-#include "webrtc/modules/interface/module_common_types.h"
+#include "webrtc/modules/include/module_common_types.h"
#include "webrtc/typedefs.h"
namespace {
diff --git a/webrtc/modules/audio_conference_mixer/test/audio_conference_mixer_unittest.cc b/webrtc/modules/audio_conference_mixer/test/audio_conference_mixer_unittest.cc
index d4fbd205f1..293bfa0db9 100644
--- a/webrtc/modules/audio_conference_mixer/test/audio_conference_mixer_unittest.cc
+++ b/webrtc/modules/audio_conference_mixer/test/audio_conference_mixer_unittest.cc
@@ -10,8 +10,8 @@
#include "testing/gmock/include/gmock/gmock.h"
#include "webrtc/base/scoped_ptr.h"
-#include "webrtc/modules/audio_conference_mixer/interface/audio_conference_mixer.h"
-#include "webrtc/modules/audio_conference_mixer/interface/audio_conference_mixer_defines.h"
+#include "webrtc/modules/audio_conference_mixer/include/audio_conference_mixer.h"
+#include "webrtc/modules/audio_conference_mixer/include/audio_conference_mixer_defines.h"
namespace webrtc {
diff --git a/webrtc/modules/audio_device/BUILD.gn b/webrtc/modules/audio_device/BUILD.gn
index 8875178c15..5897176845 100644
--- a/webrtc/modules/audio_device/BUILD.gn
+++ b/webrtc/modules/audio_device/BUILD.gn
@@ -10,7 +10,7 @@ import("../../build/webrtc.gni")
config("audio_device_config") {
include_dirs = [
- "../interface",
+ "../include",
"include",
"dummy", # Contains dummy audio device implementations.
]
@@ -50,54 +50,50 @@ source_set("audio_device") {
include_dirs += [ "android" ]
}
defines = []
+ cflags = []
if (rtc_include_internal_audio_device) {
defines += [ "WEBRTC_INCLUDE_INTERNAL_AUDIO_DEVICE" ]
sources += [
- "android/audio_device_template.h",
- "android/audio_manager.cc",
- "android/audio_manager.h",
- "android/audio_record_jni.cc",
- "android/audio_record_jni.h",
- "android/audio_track_jni.cc",
- "android/audio_track_jni.h",
- "android/opensles_common.cc",
- "android/opensles_common.h",
- "android/opensles_player.cc",
- "android/opensles_player.h",
"audio_device_impl.cc",
"audio_device_impl.h",
- "ios/audio_device_ios.h",
- "ios/audio_device_ios.mm",
- "linux/alsasymboltable_linux.cc",
- "linux/alsasymboltable_linux.h",
- "linux/audio_device_alsa_linux.cc",
- "linux/audio_device_alsa_linux.h",
- "linux/audio_mixer_manager_alsa_linux.cc",
- "linux/audio_mixer_manager_alsa_linux.h",
- "linux/latebindingsymboltable_linux.cc",
- "linux/latebindingsymboltable_linux.h",
- "mac/audio_device_mac.cc",
- "mac/audio_device_mac.h",
- "mac/audio_mixer_manager_mac.cc",
- "mac/audio_mixer_manager_mac.h",
- "mac/portaudio/pa_memorybarrier.h",
- "mac/portaudio/pa_ringbuffer.c",
- "mac/portaudio/pa_ringbuffer.h",
- "win/audio_device_core_win.cc",
- "win/audio_device_core_win.h",
- "win/audio_device_wave_win.cc",
- "win/audio_device_wave_win.h",
- "win/audio_mixer_manager_win.cc",
- "win/audio_mixer_manager_win.h",
]
+ if (is_android) {
+ sources += [
+ "android/audio_device_template.h",
+ "android/audio_manager.cc",
+ "android/audio_manager.h",
+ "android/audio_record_jni.cc",
+ "android/audio_record_jni.h",
+ "android/audio_track_jni.cc",
+ "android/audio_track_jni.h",
+ "android/build_info.cc",
+ "android/build_info.h",
+ "android/opensles_common.cc",
+ "android/opensles_common.h",
+ "android/opensles_player.cc",
+ "android/opensles_player.h",
+ ]
+ libs = [
+ "log",
+ "OpenSLES",
+ ]
+ }
if (is_linux) {
+ sources += [
+ "linux/alsasymboltable_linux.cc",
+ "linux/alsasymboltable_linux.h",
+ "linux/audio_device_alsa_linux.cc",
+ "linux/audio_device_alsa_linux.h",
+ "linux/audio_mixer_manager_alsa_linux.cc",
+ "linux/audio_mixer_manager_alsa_linux.h",
+ "linux/latebindingsymboltable_linux.cc",
+ "linux/latebindingsymboltable_linux.h",
+ ]
defines += [ "LINUX_ALSA" ]
-
libs = [
"dl",
"X11",
]
-
if (rtc_include_pulse_audio) {
sources += [
"linux/audio_device_pulse_linux.cc",
@@ -107,26 +103,47 @@ source_set("audio_device") {
"linux/pulseaudiosymboltable_linux.cc",
"linux/pulseaudiosymboltable_linux.h",
]
-
defines += [ "LINUX_PULSE" ]
}
}
if (is_mac) {
+ sources += [
+ "mac/audio_device_mac.cc",
+ "mac/audio_device_mac.h",
+ "mac/audio_mixer_manager_mac.cc",
+ "mac/audio_mixer_manager_mac.h",
+ "mac/portaudio/pa_memorybarrier.h",
+ "mac/portaudio/pa_ringbuffer.c",
+ "mac/portaudio/pa_ringbuffer.h",
+ ]
libs = [
"AudioToolbox.framework",
"CoreAudio.framework",
]
}
if (is_ios) {
+ sources += [
+ "ios/audio_device_ios.h",
+ "ios/audio_device_ios.mm",
+ "ios/audio_device_not_implemented_ios.mm",
+ ]
cflags += [ "-fobjc-arc" ] # CLANG_ENABLE_OBJC_ARC = YES.
-
libs = [
"AudioToolbox.framework",
"AVFoundation.framework",
"Foundation.framework",
+ "UIKit.framework",
]
}
if (is_win) {
+ sources += [
+ "win/audio_device_core_win.cc",
+ "win/audio_device_core_win.h",
+ "win/audio_device_wave_win.cc",
+ "win/audio_device_wave_win.h",
+ "win/audio_mixer_manager_win.cc",
+ "win/audio_mixer_manager_win.h",
+ ]
libs = [
# Required for the built-in WASAPI AEC.
"dmoguids.lib",
diff --git a/webrtc/modules/audio_device/OWNERS b/webrtc/modules/audio_device/OWNERS
index bb11a4ec0e..12d67c035b 100644
--- a/webrtc/modules/audio_device/OWNERS
+++ b/webrtc/modules/audio_device/OWNERS
@@ -4,8 +4,6 @@ niklas.enbom@webrtc.org
tkchin@webrtc.org
xians@webrtc.org
-per-file *.isolate=kjellander@webrtc.org
-
# These are for the common case of adding or renaming files. If you're doing
# structural changes, please get a review from a reviewer in this file.
per-file *.gyp=*
diff --git a/webrtc/modules/audio_device/android/audio_device_unittest.cc b/webrtc/modules/audio_device/android/audio_device_unittest.cc
index 7b2d6354c4..768047df51 100644
--- a/webrtc/modules/audio_device/android/audio_device_unittest.cc
+++ b/webrtc/modules/audio_device/android/audio_device_unittest.cc
@@ -383,7 +383,7 @@ class MockAudioTransport : public AudioTransport {
int32_t(const void* audioSamples,
const size_t nSamples,
const size_t nBytesPerSample,
- const uint8_t nChannels,
+ const size_t nChannels,
const uint32_t samplesPerSec,
const uint32_t totalDelayMS,
const int32_t clockDrift,
@@ -393,7 +393,7 @@ class MockAudioTransport : public AudioTransport {
MOCK_METHOD8(NeedMorePlayData,
int32_t(const size_t nSamples,
const size_t nBytesPerSample,
- const uint8_t nChannels,
+ const size_t nChannels,
const uint32_t samplesPerSec,
void* audioSamples,
size_t& nSamplesOut,
@@ -423,7 +423,7 @@ class MockAudioTransport : public AudioTransport {
int32_t RealRecordedDataIsAvailable(const void* audioSamples,
const size_t nSamples,
const size_t nBytesPerSample,
- const uint8_t nChannels,
+ const size_t nChannels,
const uint32_t samplesPerSec,
const uint32_t totalDelayMS,
const int32_t clockDrift,
@@ -445,7 +445,7 @@ class MockAudioTransport : public AudioTransport {
int32_t RealNeedMorePlayData(const size_t nSamples,
const size_t nBytesPerSample,
- const uint8_t nChannels,
+ const size_t nChannels,
const uint32_t samplesPerSec,
void* audioSamples,
size_t& nSamplesOut,
@@ -521,10 +521,10 @@ class AudioDeviceTest : public ::testing::Test {
int record_sample_rate() const {
return record_parameters_.sample_rate();
}
- int playout_channels() const {
+ size_t playout_channels() const {
return playout_parameters_.channels();
}
- int record_channels() const {
+ size_t record_channels() const {
return record_parameters_.channels();
}
size_t playout_frames_per_10ms_buffer() const {
@@ -931,7 +931,7 @@ TEST_F(AudioDeviceTest, StartPlayoutAndRecordingVerifyCallbacks) {
// not contain any explicit verification that the audio quality is perfect.
TEST_F(AudioDeviceTest, RunPlayoutWithFileAsSource) {
// TODO(henrika): extend test when mono output is supported.
- EXPECT_EQ(1, playout_channels());
+ EXPECT_EQ(1u, playout_channels());
NiceMock<MockAudioTransport> mock(kPlayout);
const int num_callbacks = kFilePlayTimeInSec * kNumCallbacksPerSecond;
std::string file_name = GetFileName(playout_sample_rate());
diff --git a/webrtc/modules/audio_device/android/audio_manager.cc b/webrtc/modules/audio_device/android/audio_manager.cc
index 169a1929ce..1d08a6adc0 100644
--- a/webrtc/modules/audio_device/android/audio_manager.cc
+++ b/webrtc/modules/audio_device/android/audio_manager.cc
@@ -10,13 +10,15 @@
#include "webrtc/modules/audio_device/android/audio_manager.h"
+#include <utility>
+
#include <android/log.h>
#include "webrtc/base/arraysize.h"
#include "webrtc/base/checks.h"
#include "webrtc/base/scoped_ptr.h"
#include "webrtc/modules/audio_device/android/audio_common.h"
-#include "webrtc/modules/utility/interface/helpers_android.h"
+#include "webrtc/modules/utility/include/helpers_android.h"
#define TAG "AudioManager"
#define ALOGV(...) __android_log_print(ANDROID_LOG_VERBOSE, TAG, __VA_ARGS__)
@@ -29,15 +31,16 @@ namespace webrtc {
// AudioManager::JavaAudioManager implementation
AudioManager::JavaAudioManager::JavaAudioManager(
- NativeRegistration* native_reg, rtc::scoped_ptr<GlobalRef> audio_manager)
- : audio_manager_(audio_manager.Pass()),
+ NativeRegistration* native_reg,
+ rtc::scoped_ptr<GlobalRef> audio_manager)
+ : audio_manager_(std::move(audio_manager)),
init_(native_reg->GetMethodId("init", "()Z")),
dispose_(native_reg->GetMethodId("dispose", "()V")),
is_communication_mode_enabled_(
native_reg->GetMethodId("isCommunicationModeEnabled", "()Z")),
is_device_blacklisted_for_open_sles_usage_(
- native_reg->GetMethodId(
- "isDeviceBlacklistedForOpenSLESUsage", "()Z")) {
+ native_reg->GetMethodId("isDeviceBlacklistedForOpenSLESUsage",
+ "()Z")) {
ALOGD("JavaAudioManager::ctor%s", GetThreadInfo().c_str());
}
@@ -71,13 +74,12 @@ AudioManager::AudioManager()
hardware_agc_(false),
hardware_ns_(false),
low_latency_playout_(false),
- delay_estimate_in_milliseconds_(0),
- output_stream_type_(0) {
+ delay_estimate_in_milliseconds_(0) {
ALOGD("ctor%s", GetThreadInfo().c_str());
RTC_CHECK(j_environment_);
JNINativeMethod native_methods[] = {
{"nativeCacheAudioParameters",
- "(IIZZZZIIIJ)V",
+ "(IIZZZZIIJ)V",
reinterpret_cast<void*>(&webrtc::AudioManager::CacheAudioParameters)}};
j_native_registration_ = j_environment_->RegisterNatives(
"org/webrtc/voiceengine/WebRtcAudioManager",
@@ -180,14 +182,12 @@ void JNICALL AudioManager::CacheAudioParameters(JNIEnv* env,
jboolean low_latency_output,
jint output_buffer_size,
jint input_buffer_size,
- jint output_stream_type,
jlong native_audio_manager) {
webrtc::AudioManager* this_object =
reinterpret_cast<webrtc::AudioManager*>(native_audio_manager);
this_object->OnCacheAudioParameters(
env, sample_rate, channels, hardware_aec, hardware_agc, hardware_ns,
- low_latency_output, output_buffer_size, input_buffer_size,
- output_stream_type);
+ low_latency_output, output_buffer_size, input_buffer_size);
}
void AudioManager::OnCacheAudioParameters(JNIEnv* env,
@@ -198,8 +198,7 @@ void AudioManager::OnCacheAudioParameters(JNIEnv* env,
jboolean hardware_ns,
jboolean low_latency_output,
jint output_buffer_size,
- jint input_buffer_size,
- jint output_stream_type) {
+ jint input_buffer_size) {
ALOGD("OnCacheAudioParameters%s", GetThreadInfo().c_str());
ALOGD("hardware_aec: %d", hardware_aec);
ALOGD("hardware_agc: %d", hardware_agc);
@@ -209,17 +208,15 @@ void AudioManager::OnCacheAudioParameters(JNIEnv* env,
ALOGD("channels: %d", channels);
ALOGD("output_buffer_size: %d", output_buffer_size);
ALOGD("input_buffer_size: %d", input_buffer_size);
- ALOGD("output_stream_type: %d", output_stream_type);
RTC_DCHECK(thread_checker_.CalledOnValidThread());
hardware_aec_ = hardware_aec;
hardware_agc_ = hardware_agc;
hardware_ns_ = hardware_ns;
low_latency_playout_ = low_latency_output;
- output_stream_type_ = output_stream_type;
// TODO(henrika): add support for stereo output.
- playout_parameters_.reset(sample_rate, channels,
+ playout_parameters_.reset(sample_rate, static_cast<size_t>(channels),
static_cast<size_t>(output_buffer_size));
- record_parameters_.reset(sample_rate, channels,
+ record_parameters_.reset(sample_rate, static_cast<size_t>(channels),
static_cast<size_t>(input_buffer_size));
}
diff --git a/webrtc/modules/audio_device/android/audio_manager.h b/webrtc/modules/audio_device/android/audio_manager.h
index 5f23147b8a..26caf61afe 100644
--- a/webrtc/modules/audio_device/android/audio_manager.h
+++ b/webrtc/modules/audio_device/android/audio_manager.h
@@ -19,8 +19,8 @@
#include "webrtc/modules/audio_device/audio_device_config.h"
#include "webrtc/modules/audio_device/include/audio_device_defines.h"
#include "webrtc/modules/audio_device/audio_device_generic.h"
-#include "webrtc/modules/utility/interface/helpers_android.h"
-#include "webrtc/modules/utility/interface/jvm_android.h"
+#include "webrtc/modules/utility/include/helpers_android.h"
+#include "webrtc/modules/utility/include/jvm_android.h"
namespace webrtc {
@@ -93,8 +93,6 @@ class AudioManager {
// webrtc::kHighLatencyModeDelayEstimateInMilliseconds.
int GetDelayEstimateInMilliseconds() const;
- int OutputStreamType() const { return output_stream_type_; }
-
private:
// Called from Java side so we can cache the native audio parameters.
// This method will be called by the WebRtcAudioManager constructor, i.e.
@@ -109,7 +107,6 @@ class AudioManager {
jboolean low_latency_output,
jint output_buffer_size,
jint input_buffer_size,
- jint output_stream_type,
jlong native_audio_manager);
void OnCacheAudioParameters(JNIEnv* env,
jint sample_rate,
@@ -119,8 +116,7 @@ class AudioManager {
jboolean hardware_ns,
jboolean low_latency_output,
jint output_buffer_size,
- jint input_buffer_size,
- jint output_stream_type);
+ jint input_buffer_size);
// Stores thread ID in the constructor.
// We can then use ThreadChecker::CalledOnValidThread() to ensure that
@@ -159,13 +155,6 @@ class AudioManager {
// device supports low-latency output or not.
int delay_estimate_in_milliseconds_;
- // Contains the output stream type provided to this class at construction by
- // the AudioManager in Java land. Possible values are:
- // - AudioManager.STREAM_VOICE_CALL = 0
- // - AudioManager.STREAM_RING = 2
- // - AudioManager.STREAM_MUSIC = 3
- int output_stream_type_;
-
// Contains native parameters (e.g. sample rate, channel configuration).
// Set at construction in OnCacheAudioParameters() which is called from
// Java on the same thread as this object is created on.
diff --git a/webrtc/modules/audio_device/android/audio_manager_unittest.cc b/webrtc/modules/audio_device/android/audio_manager_unittest.cc
index a5bc840dff..ddae73067a 100644
--- a/webrtc/modules/audio_device/android/audio_manager_unittest.cc
+++ b/webrtc/modules/audio_device/android/audio_manager_unittest.cc
@@ -82,14 +82,14 @@ TEST_F(AudioManagerTest, ShowAudioParameterInfo) {
PRINT("%saudio layer: %s\n", kTag,
low_latency_out ? "Low latency OpenSL" : "Java/JNI based AudioTrack");
PRINT("%ssample rate: %d Hz\n", kTag, playout_parameters_.sample_rate());
- PRINT("%schannels: %d\n", kTag, playout_parameters_.channels());
+ PRINT("%schannels: %" PRIuS "\n", kTag, playout_parameters_.channels());
PRINT("%sframes per buffer: %" PRIuS " <=> %.2f ms\n", kTag,
playout_parameters_.frames_per_buffer(),
playout_parameters_.GetBufferSizeInMilliseconds());
PRINT("RECORD: \n");
PRINT("%saudio layer: %s\n", kTag, "Java/JNI based AudioRecord");
PRINT("%ssample rate: %d Hz\n", kTag, record_parameters_.sample_rate());
- PRINT("%schannels: %d\n", kTag, record_parameters_.channels());
+ PRINT("%schannels: %" PRIuS "\n", kTag, record_parameters_.channels());
PRINT("%sframes per buffer: %" PRIuS " <=> %.2f ms\n", kTag,
record_parameters_.frames_per_buffer(),
record_parameters_.GetBufferSizeInMilliseconds());
@@ -119,7 +119,7 @@ TEST_F(AudioManagerTest, AudioParametersWithDefaultConstruction) {
AudioParameters params;
EXPECT_FALSE(params.is_valid());
EXPECT_EQ(0, params.sample_rate());
- EXPECT_EQ(0, params.channels());
+ EXPECT_EQ(0U, params.channels());
EXPECT_EQ(0U, params.frames_per_buffer());
EXPECT_EQ(0U, params.frames_per_10ms_buffer());
EXPECT_EQ(0U, params.GetBytesPerFrame());
@@ -131,7 +131,7 @@ TEST_F(AudioManagerTest, AudioParametersWithDefaultConstruction) {
// Basic test of the AudioParameters class using non default construction.
TEST_F(AudioManagerTest, AudioParametersWithNonDefaultConstruction) {
const int kSampleRate = 48000;
- const int kChannels = 1;
+ const size_t kChannels = 1;
const size_t kFramesPerBuffer = 480;
const size_t kFramesPer10msBuffer = 480;
const size_t kBytesPerFrame = 2;
diff --git a/webrtc/modules/audio_device/android/audio_record_jni.cc b/webrtc/modules/audio_device/android/audio_record_jni.cc
index ba3212afc3..5dda7249ac 100644
--- a/webrtc/modules/audio_device/android/audio_record_jni.cc
+++ b/webrtc/modules/audio_device/android/audio_record_jni.cc
@@ -10,6 +10,8 @@
#include "webrtc/modules/audio_device/android/audio_record_jni.h"
+#include <utility>
+
#include <android/log.h>
#include "webrtc/base/arraysize.h"
@@ -28,23 +30,20 @@ namespace webrtc {
// AudioRecordJni::JavaAudioRecord implementation.
AudioRecordJni::JavaAudioRecord::JavaAudioRecord(
- NativeRegistration* native_reg, rtc::scoped_ptr<GlobalRef> audio_record)
- : audio_record_(audio_record.Pass()),
+ NativeRegistration* native_reg,
+ rtc::scoped_ptr<GlobalRef> audio_record)
+ : audio_record_(std::move(audio_record)),
init_recording_(native_reg->GetMethodId("initRecording", "(II)I")),
start_recording_(native_reg->GetMethodId("startRecording", "()Z")),
stop_recording_(native_reg->GetMethodId("stopRecording", "()Z")),
- enable_built_in_aec_(native_reg->GetMethodId(
- "enableBuiltInAEC", "(Z)Z")),
- enable_built_in_agc_(native_reg->GetMethodId(
- "enableBuiltInAGC", "(Z)Z")),
- enable_built_in_ns_(native_reg->GetMethodId(
- "enableBuiltInNS", "(Z)Z")) {
-}
+ enable_built_in_aec_(native_reg->GetMethodId("enableBuiltInAEC", "(Z)Z")),
+ enable_built_in_agc_(native_reg->GetMethodId("enableBuiltInAGC", "(Z)Z")),
+ enable_built_in_ns_(native_reg->GetMethodId("enableBuiltInNS", "(Z)Z")) {}
AudioRecordJni::JavaAudioRecord::~JavaAudioRecord() {}
int AudioRecordJni::JavaAudioRecord::InitRecording(
- int sample_rate, int channels) {
+ int sample_rate, size_t channels) {
return audio_record_->CallIntMethod(init_recording_,
static_cast<jint>(sample_rate),
static_cast<jint>(channels));
@@ -186,8 +185,8 @@ void AudioRecordJni::AttachAudioBuffer(AudioDeviceBuffer* audioBuffer) {
const int sample_rate_hz = audio_parameters_.sample_rate();
ALOGD("SetRecordingSampleRate(%d)", sample_rate_hz);
audio_device_buffer_->SetRecordingSampleRate(sample_rate_hz);
- const int channels = audio_parameters_.channels();
- ALOGD("SetRecordingChannels(%d)", channels);
+ const size_t channels = audio_parameters_.channels();
+ ALOGD("SetRecordingChannels(%" PRIuS ")", channels);
audio_device_buffer_->SetRecordingChannels(channels);
total_delay_in_milliseconds_ =
audio_manager_->GetDelayEstimateInMilliseconds();
diff --git a/webrtc/modules/audio_device/android/audio_record_jni.h b/webrtc/modules/audio_device/android/audio_record_jni.h
index efd516425a..766316a83a 100644
--- a/webrtc/modules/audio_device/android/audio_record_jni.h
+++ b/webrtc/modules/audio_device/android/audio_record_jni.h
@@ -17,8 +17,8 @@
#include "webrtc/modules/audio_device/android/audio_manager.h"
#include "webrtc/modules/audio_device/include/audio_device_defines.h"
#include "webrtc/modules/audio_device/audio_device_generic.h"
-#include "webrtc/modules/utility/interface/helpers_android.h"
-#include "webrtc/modules/utility/interface/jvm_android.h"
+#include "webrtc/modules/utility/include/helpers_android.h"
+#include "webrtc/modules/utility/include/jvm_android.h"
namespace webrtc {
@@ -49,7 +49,7 @@ class AudioRecordJni {
rtc::scoped_ptr<GlobalRef> audio_track);
~JavaAudioRecord();
- int InitRecording(int sample_rate, int channels);
+ int InitRecording(int sample_rate, size_t channels);
bool StartRecording();
bool StopRecording();
bool EnableBuiltInAEC(bool enable);
diff --git a/webrtc/modules/audio_device/android/audio_track_jni.cc b/webrtc/modules/audio_device/android/audio_track_jni.cc
index 29b21ae998..057e016405 100644
--- a/webrtc/modules/audio_device/android/audio_track_jni.cc
+++ b/webrtc/modules/audio_device/android/audio_track_jni.cc
@@ -11,6 +11,8 @@
#include "webrtc/modules/audio_device/android/audio_manager.h"
#include "webrtc/modules/audio_device/android/audio_track_jni.h"
+#include <utility>
+
#include <android/log.h>
#include "webrtc/base/arraysize.h"
@@ -28,16 +30,16 @@ namespace webrtc {
// AudioTrackJni::JavaAudioTrack implementation.
AudioTrackJni::JavaAudioTrack::JavaAudioTrack(
- NativeRegistration* native_reg, rtc::scoped_ptr<GlobalRef> audio_track)
- : audio_track_(audio_track.Pass()),
+ NativeRegistration* native_reg,
+ rtc::scoped_ptr<GlobalRef> audio_track)
+ : audio_track_(std::move(audio_track)),
init_playout_(native_reg->GetMethodId("initPlayout", "(II)V")),
start_playout_(native_reg->GetMethodId("startPlayout", "()Z")),
stop_playout_(native_reg->GetMethodId("stopPlayout", "()Z")),
set_stream_volume_(native_reg->GetMethodId("setStreamVolume", "(I)Z")),
- get_stream_max_volume_(native_reg->GetMethodId(
- "getStreamMaxVolume", "()I")),
- get_stream_volume_(native_reg->GetMethodId("getStreamVolume", "()I")) {
-}
+ get_stream_max_volume_(
+ native_reg->GetMethodId("getStreamMaxVolume", "()I")),
+ get_stream_volume_(native_reg->GetMethodId("getStreamVolume", "()I")) {}
AudioTrackJni::JavaAudioTrack::~JavaAudioTrack() {}
@@ -200,8 +202,8 @@ void AudioTrackJni::AttachAudioBuffer(AudioDeviceBuffer* audioBuffer) {
const int sample_rate_hz = audio_parameters_.sample_rate();
ALOGD("SetPlayoutSampleRate(%d)", sample_rate_hz);
audio_device_buffer_->SetPlayoutSampleRate(sample_rate_hz);
- const int channels = audio_parameters_.channels();
- ALOGD("SetPlayoutChannels(%d)", channels);
+ const size_t channels = audio_parameters_.channels();
+ ALOGD("SetPlayoutChannels(%" PRIuS ")", channels);
audio_device_buffer_->SetPlayoutChannels(channels);
}
diff --git a/webrtc/modules/audio_device/android/audio_track_jni.h b/webrtc/modules/audio_device/android/audio_track_jni.h
index 43bfcad657..067dc6c651 100644
--- a/webrtc/modules/audio_device/android/audio_track_jni.h
+++ b/webrtc/modules/audio_device/android/audio_track_jni.h
@@ -18,8 +18,8 @@
#include "webrtc/modules/audio_device/android/audio_manager.h"
#include "webrtc/modules/audio_device/include/audio_device_defines.h"
#include "webrtc/modules/audio_device/audio_device_generic.h"
-#include "webrtc/modules/utility/interface/helpers_android.h"
-#include "webrtc/modules/utility/interface/jvm_android.h"
+#include "webrtc/modules/utility/include/helpers_android.h"
+#include "webrtc/modules/utility/include/jvm_android.h"
namespace webrtc {
diff --git a/webrtc/modules/audio_device/android/build_info.cc b/webrtc/modules/audio_device/android/build_info.cc
index cb5dc293d7..6289697073 100644
--- a/webrtc/modules/audio_device/android/build_info.cc
+++ b/webrtc/modules/audio_device/android/build_info.cc
@@ -10,7 +10,7 @@
#include "webrtc/modules/audio_device/android/build_info.h"
-#include "webrtc/modules/utility/interface/helpers_android.h"
+#include "webrtc/modules/utility/include/helpers_android.h"
namespace webrtc {
diff --git a/webrtc/modules/audio_device/android/build_info.h b/webrtc/modules/audio_device/android/build_info.h
index d9b2871841..1490fa0772 100644
--- a/webrtc/modules/audio_device/android/build_info.h
+++ b/webrtc/modules/audio_device/android/build_info.h
@@ -14,7 +14,7 @@
#include <jni.h>
#include <string>
-#include "webrtc/modules/utility/interface/jvm_android.h"
+#include "webrtc/modules/utility/include/jvm_android.h"
namespace webrtc {
diff --git a/webrtc/modules/audio_device/android/ensure_initialized.cc b/webrtc/modules/audio_device/android/ensure_initialized.cc
index e8197b7ca0..b63aec1f27 100644
--- a/webrtc/modules/audio_device/android/ensure_initialized.cc
+++ b/webrtc/modules/audio_device/android/ensure_initialized.cc
@@ -14,11 +14,12 @@
// Note: this dependency is dangerous since it reaches into Chromium's base.
// There's a risk of e.g. macro clashes. This file may only be used in tests.
+#include "base/android/context_utils.h"
#include "base/android/jni_android.h"
#include "webrtc/base/checks.h"
#include "webrtc/modules/audio_device/android/audio_record_jni.h"
#include "webrtc/modules/audio_device/android/audio_track_jni.h"
-#include "webrtc/modules/utility/interface/jvm_android.h"
+#include "webrtc/modules/utility/include/jvm_android.h"
namespace webrtc {
namespace audiodevicemodule {
diff --git a/webrtc/modules/audio_device/android/java/src/org/webrtc/voiceengine/WebRtcAudioEffects.java b/webrtc/modules/audio_device/android/java/src/org/webrtc/voiceengine/WebRtcAudioEffects.java
index 9b90f4ab54..c3ab043868 100644
--- a/webrtc/modules/audio_device/android/java/src/org/webrtc/voiceengine/WebRtcAudioEffects.java
+++ b/webrtc/modules/audio_device/android/java/src/org/webrtc/voiceengine/WebRtcAudioEffects.java
@@ -10,6 +10,7 @@
package org.webrtc.voiceengine;
+import android.annotation.TargetApi;
import android.media.audiofx.AcousticEchoCanceler;
import android.media.audiofx.AudioEffect;
import android.media.audiofx.AudioEffect.Descriptor;
@@ -119,6 +120,7 @@ class WebRtcAudioEffects {
// Returns true if the platform AEC should be excluded based on its UUID.
// AudioEffect.queryEffects() can throw IllegalStateException.
+ @TargetApi(18)
private static boolean isAcousticEchoCancelerExcludedByUUID() {
for (Descriptor d : AudioEffect.queryEffects()) {
if (d.type.equals(AudioEffect.EFFECT_TYPE_AEC) &&
@@ -131,6 +133,7 @@ class WebRtcAudioEffects {
// Returns true if the platform AGC should be excluded based on its UUID.
// AudioEffect.queryEffects() can throw IllegalStateException.
+ @TargetApi(18)
private static boolean isAutomaticGainControlExcludedByUUID() {
for (Descriptor d : AudioEffect.queryEffects()) {
if (d.type.equals(AudioEffect.EFFECT_TYPE_AGC) &&
@@ -143,6 +146,7 @@ class WebRtcAudioEffects {
// Returns true if the platform NS should be excluded based on its UUID.
// AudioEffect.queryEffects() can throw IllegalStateException.
+ @TargetApi(18)
private static boolean isNoiseSuppressorExcludedByUUID() {
for (Descriptor d : AudioEffect.queryEffects()) {
if (d.type.equals(AudioEffect.EFFECT_TYPE_NS) &&
@@ -208,15 +212,6 @@ class WebRtcAudioEffects {
private WebRtcAudioEffects() {
Logging.d(TAG, "ctor" + WebRtcAudioUtils.getThreadInfo());
- for (Descriptor d : AudioEffect.queryEffects()) {
- if (effectTypeIsVoIP(d.type) || DEBUG) {
- // Only log information for VoIP effects (AEC, AEC and NS).
- Logging.d(TAG, "name: " + d.name + ", "
- + "mode: " + d.connectMode + ", "
- + "implementor: " + d.implementor + ", "
- + "UUID: " + d.uuid);
- }
- }
}
// Call this method to enable or disable the platform AEC. It modifies
@@ -282,6 +277,17 @@ class WebRtcAudioEffects {
assertTrue(agc == null);
assertTrue(ns == null);
+ // Add logging of supported effects but filter out "VoIP effects", i.e.,
+ // AEC, AEC and NS.
+ for (Descriptor d : AudioEffect.queryEffects()) {
+ if (effectTypeIsVoIP(d.type) || DEBUG) {
+ Logging.d(TAG, "name: " + d.name + ", "
+ + "mode: " + d.connectMode + ", "
+ + "implementor: " + d.implementor + ", "
+ + "UUID: " + d.uuid);
+ }
+ }
+
if (isAcousticEchoCancelerSupported()) {
// Create an AcousticEchoCanceler and attach it to the AudioRecord on
// the specified audio session.
@@ -366,7 +372,11 @@ class WebRtcAudioEffects {
// AudioEffect.Descriptor array that are actually not available on the device.
// As an example: Samsung Galaxy S6 includes an AGC in the descriptor but
// AutomaticGainControl.isAvailable() returns false.
+ @TargetApi(18)
private boolean effectTypeIsVoIP(UUID type) {
+ if (!WebRtcAudioUtils.runningOnJellyBeanMR2OrHigher())
+ return false;
+
return (AudioEffect.EFFECT_TYPE_AEC.equals(type)
&& isAcousticEchoCancelerSupported())
|| (AudioEffect.EFFECT_TYPE_AGC.equals(type)
diff --git a/webrtc/modules/audio_device/android/java/src/org/webrtc/voiceengine/WebRtcAudioManager.java b/webrtc/modules/audio_device/android/java/src/org/webrtc/voiceengine/WebRtcAudioManager.java
index cf2f03a2f1..1213f333d9 100644
--- a/webrtc/modules/audio_device/android/java/src/org/webrtc/voiceengine/WebRtcAudioManager.java
+++ b/webrtc/modules/audio_device/android/java/src/org/webrtc/voiceengine/WebRtcAudioManager.java
@@ -10,6 +10,7 @@
package org.webrtc.voiceengine;
+import android.annotation.TargetApi;
import android.content.Context;
import android.content.pm.PackageManager;
import android.media.AudioFormat;
@@ -33,11 +34,24 @@ import java.lang.Math;
// recommended to always use AudioManager.MODE_IN_COMMUNICATION.
// This class also adds support for output volume control of the
// STREAM_VOICE_CALL-type stream.
-class WebRtcAudioManager {
+public class WebRtcAudioManager {
private static final boolean DEBUG = false;
private static final String TAG = "WebRtcAudioManager";
+ private static boolean blacklistDeviceForOpenSLESUsage = false;
+ private static boolean blacklistDeviceForOpenSLESUsageIsOverridden = false;
+
+ // Call this method to override the deault list of blacklisted devices
+ // specified in WebRtcAudioUtils.BLACKLISTED_OPEN_SL_ES_MODELS.
+ // Allows an app to take control over which devices to exlude from using
+ // the OpenSL ES audio output path
+ public static synchronized void setBlacklistDeviceForOpenSLESUsage(
+ boolean enable) {
+ blacklistDeviceForOpenSLESUsageIsOverridden = true;
+ blacklistDeviceForOpenSLESUsage = enable;
+ }
+
// Default audio data format is PCM 16 bit per sample.
// Guaranteed to be supported by all devices.
private static final int BITS_PER_SAMPLE = 16;
@@ -71,7 +85,6 @@ class WebRtcAudioManager {
private int channels;
private int outputBufferSize;
private int inputBufferSize;
- private int outputStreamType;
WebRtcAudioManager(Context context, long nativeAudioManager) {
Logging.d(TAG, "ctor" + WebRtcAudioUtils.getThreadInfo());
@@ -85,7 +98,7 @@ class WebRtcAudioManager {
storeAudioParameters();
nativeCacheAudioParameters(
sampleRate, channels, hardwareAEC, hardwareAGC, hardwareNS,
- lowLatencyOutput, outputBufferSize, inputBufferSize, outputStreamType,
+ lowLatencyOutput, outputBufferSize, inputBufferSize,
nativeAudioManager);
}
@@ -110,8 +123,9 @@ class WebRtcAudioManager {
return (audioManager.getMode() == AudioManager.MODE_IN_COMMUNICATION);
}
- private boolean isDeviceBlacklistedForOpenSLESUsage() {
- boolean blacklisted =
+ private boolean isDeviceBlacklistedForOpenSLESUsage() {
+ boolean blacklisted = blacklistDeviceForOpenSLESUsageIsOverridden ?
+ blacklistDeviceForOpenSLESUsage :
WebRtcAudioUtils.deviceIsBlacklistedForOpenSLESUsage();
if (blacklisted) {
Logging.e(TAG, Build.MODEL + " is blacklisted for OpenSL ES usage!");
@@ -133,8 +147,6 @@ class WebRtcAudioManager {
getMinOutputFrameSize(sampleRate, channels);
// TODO(henrika): add support for low-latency input.
inputBufferSize = getMinInputFrameSize(sampleRate, channels);
- outputStreamType = WebRtcAudioUtils.getOutputStreamTypeFromAudioMode(
- audioManager.getMode());
}
// Gets the current earpiece state.
@@ -178,20 +190,26 @@ class WebRtcAudioManager {
// No overrides available. Deliver best possible estimate based on default
// Android AudioManager APIs.
final int sampleRateHz;
- if (!WebRtcAudioUtils.runningOnJellyBeanMR1OrHigher()) {
- sampleRateHz = WebRtcAudioUtils.getDefaultSampleRateHz();
+ if (WebRtcAudioUtils.runningOnJellyBeanMR1OrHigher()) {
+ sampleRateHz = getSampleRateOnJellyBeanMR10OrHigher();
} else {
- String sampleRateString = audioManager.getProperty(
- AudioManager.PROPERTY_OUTPUT_SAMPLE_RATE);
- sampleRateHz = (sampleRateString == null)
- ? WebRtcAudioUtils.getDefaultSampleRateHz()
- : Integer.parseInt(sampleRateString);
+ sampleRateHz = WebRtcAudioUtils.getDefaultSampleRateHz();
}
Logging.d(TAG, "Sample rate is set to " + sampleRateHz + " Hz");
return sampleRateHz;
}
+ @TargetApi(17)
+ private int getSampleRateOnJellyBeanMR10OrHigher() {
+ String sampleRateString = audioManager.getProperty(
+ AudioManager.PROPERTY_OUTPUT_SAMPLE_RATE);
+ return (sampleRateString == null)
+ ? WebRtcAudioUtils.getDefaultSampleRateHz()
+ : Integer.parseInt(sampleRateString);
+ }
+
// Returns the native output buffer size for low-latency output streams.
+ @TargetApi(17)
private int getLowLatencyOutputFramesPerBuffer() {
assertTrue(isLowLatencyOutputSupported());
if (!WebRtcAudioUtils.runningOnJellyBeanMR1OrHigher()) {
@@ -270,5 +288,5 @@ class WebRtcAudioManager {
private native void nativeCacheAudioParameters(
int sampleRate, int channels, boolean hardwareAEC, boolean hardwareAGC,
boolean hardwareNS, boolean lowLatencyOutput, int outputBufferSize,
- int inputBufferSize, int outputStreamType, long nativeAudioManager);
+ int inputBufferSize, long nativeAudioManager);
}
diff --git a/webrtc/modules/audio_device/android/java/src/org/webrtc/voiceengine/WebRtcAudioRecord.java b/webrtc/modules/audio_device/android/java/src/org/webrtc/voiceengine/WebRtcAudioRecord.java
index 7b31e08eed..ff77635843 100644
--- a/webrtc/modules/audio_device/android/java/src/org/webrtc/voiceengine/WebRtcAudioRecord.java
+++ b/webrtc/modules/audio_device/android/java/src/org/webrtc/voiceengine/WebRtcAudioRecord.java
@@ -192,10 +192,6 @@ class WebRtcAudioRecord {
Math.max(BUFFER_SIZE_FACTOR * minBufferSize, byteBuffer.capacity());
Logging.d(TAG, "bufferSizeInBytes: " + bufferSizeInBytes);
try {
- // TODO(henrika): the only supported audio source for input is currently
- // AudioSource.VOICE_COMMUNICATION. Is there any reason why we should
- // support other types, e.g. DEFAULT or MIC? Only reason I can think of
- // is if the device does not support VOICE_COMMUNICATION.
audioRecord = new AudioRecord(AudioSource.VOICE_COMMUNICATION,
sampleRate,
AudioFormat.CHANNEL_IN_MONO,
diff --git a/webrtc/modules/audio_device/android/java/src/org/webrtc/voiceengine/WebRtcAudioTrack.java b/webrtc/modules/audio_device/android/java/src/org/webrtc/voiceengine/WebRtcAudioTrack.java
index ec0e109169..11eb51383d 100644
--- a/webrtc/modules/audio_device/android/java/src/org/webrtc/voiceengine/WebRtcAudioTrack.java
+++ b/webrtc/modules/audio_device/android/java/src/org/webrtc/voiceengine/WebRtcAudioTrack.java
@@ -13,6 +13,7 @@ package org.webrtc.voiceengine;
import java.lang.Thread;
import java.nio.ByteBuffer;
+import android.annotation.TargetApi;
import android.content.Context;
import android.media.AudioFormat;
import android.media.AudioManager;
@@ -39,7 +40,6 @@ class WebRtcAudioTrack {
private final Context context;
private final long nativeAudioTrack;
private final AudioManager audioManager;
- private final int streamType;
private ByteBuffer byteBuffer;
@@ -91,13 +91,9 @@ class WebRtcAudioTrack {
assertTrue(sizeInBytes <= byteBuffer.remaining());
int bytesWritten = 0;
if (WebRtcAudioUtils.runningOnLollipopOrHigher()) {
- bytesWritten = audioTrack.write(byteBuffer,
- sizeInBytes,
- AudioTrack.WRITE_BLOCKING);
+ bytesWritten = writeOnLollipop(audioTrack, byteBuffer, sizeInBytes);
} else {
- bytesWritten = audioTrack.write(byteBuffer.array(),
- byteBuffer.arrayOffset(),
- sizeInBytes);
+ bytesWritten = writePreLollipop(audioTrack, byteBuffer, sizeInBytes);
}
if (bytesWritten != sizeInBytes) {
Logging.e(TAG, "AudioTrack.write failed: " + bytesWritten);
@@ -124,6 +120,15 @@ class WebRtcAudioTrack {
audioTrack.flush();
}
+ @TargetApi(21)
+ private int writeOnLollipop(AudioTrack audioTrack, ByteBuffer byteBuffer, int sizeInBytes) {
+ return audioTrack.write(byteBuffer, sizeInBytes, AudioTrack.WRITE_BLOCKING);
+ }
+
+ private int writePreLollipop(AudioTrack audioTrack, ByteBuffer byteBuffer, int sizeInBytes) {
+ return audioTrack.write(byteBuffer.array(), byteBuffer.arrayOffset(), sizeInBytes);
+ }
+
public void joinThread() {
keepAlive = false;
while (isAlive()) {
@@ -142,9 +147,6 @@ class WebRtcAudioTrack {
this.nativeAudioTrack = nativeAudioTrack;
audioManager = (AudioManager) context.getSystemService(
Context.AUDIO_SERVICE);
- this.streamType =
- WebRtcAudioUtils.getOutputStreamTypeFromAudioMode(
- audioManager.getMode());
if (DEBUG) {
WebRtcAudioUtils.logDeviceInfo(TAG);
}
@@ -181,7 +183,7 @@ class WebRtcAudioTrack {
// Create an AudioTrack object and initialize its associated audio buffer.
// The size of this buffer determines how long an AudioTrack can play
// before running out of data.
- audioTrack = new AudioTrack(streamType,
+ audioTrack = new AudioTrack(AudioManager.STREAM_VOICE_CALL,
sampleRate,
AudioFormat.CHANNEL_OUT_MONO,
AudioFormat.ENCODING_PCM_16BIT,
@@ -193,7 +195,7 @@ class WebRtcAudioTrack {
}
assertTrue(audioTrack.getState() == AudioTrack.STATE_INITIALIZED);
assertTrue(audioTrack.getPlayState() == AudioTrack.PLAYSTATE_STOPPED);
- assertTrue(audioTrack.getStreamType() == streamType);
+ assertTrue(audioTrack.getStreamType() == AudioManager.STREAM_VOICE_CALL);
}
private boolean startPlayout() {
@@ -217,32 +219,37 @@ class WebRtcAudioTrack {
return true;
}
- /** Get max possible volume index given type of audio stream. */
+ /** Get max possible volume index for a phone call audio stream. */
private int getStreamMaxVolume() {
Logging.d(TAG, "getStreamMaxVolume");
assertTrue(audioManager != null);
- return audioManager.getStreamMaxVolume(streamType);
+ return audioManager.getStreamMaxVolume(AudioManager.STREAM_VOICE_CALL);
}
- /** Set current volume level given type of audio stream. */
+ /** Set current volume level for a phone call audio stream. */
private boolean setStreamVolume(int volume) {
Logging.d(TAG, "setStreamVolume(" + volume + ")");
assertTrue(audioManager != null);
- if (WebRtcAudioUtils.runningOnLollipopOrHigher()) {
- if (audioManager.isVolumeFixed()) {
- Logging.e(TAG, "The device implements a fixed volume policy.");
- return false;
- }
+ if (isVolumeFixed()) {
+ Logging.e(TAG, "The device implements a fixed volume policy.");
+ return false;
}
- audioManager.setStreamVolume(streamType, volume, 0);
+ audioManager.setStreamVolume(AudioManager.STREAM_VOICE_CALL, volume, 0);
return true;
}
- /** Get current volume level given type of audio stream. */
+ @TargetApi(21)
+ private boolean isVolumeFixed() {
+ if (!WebRtcAudioUtils.runningOnLollipopOrHigher())
+ return false;
+ return audioManager.isVolumeFixed();
+ }
+
+ /** Get current volume level for a phone call audio stream. */
private int getStreamVolume() {
Logging.d(TAG, "getStreamVolume");
assertTrue(audioManager != null);
- return audioManager.getStreamVolume(streamType);
+ return audioManager.getStreamVolume(AudioManager.STREAM_VOICE_CALL);
}
/** Helper method which throws an exception when an assertion has failed. */
diff --git a/webrtc/modules/audio_device/android/java/src/org/webrtc/voiceengine/WebRtcAudioUtils.java b/webrtc/modules/audio_device/android/java/src/org/webrtc/voiceengine/WebRtcAudioUtils.java
index f08e11dad8..45f564a4dd 100644
--- a/webrtc/modules/audio_device/android/java/src/org/webrtc/voiceengine/WebRtcAudioUtils.java
+++ b/webrtc/modules/audio_device/android/java/src/org/webrtc/voiceengine/WebRtcAudioUtils.java
@@ -144,6 +144,11 @@ public final class WebRtcAudioUtils {
return Build.VERSION.SDK_INT >= Build.VERSION_CODES.JELLY_BEAN_MR1;
}
+ public static boolean runningOnJellyBeanMR2OrHigher() {
+ // July 24, 2013: Android 4.3. API Level 18.
+ return Build.VERSION.SDK_INT >= Build.VERSION_CODES.JELLY_BEAN_MR2;
+ }
+
public static boolean runningOnLollipopOrHigher() {
// API Level 21.
return Build.VERSION.SDK_INT >= Build.VERSION_CODES.LOLLIPOP;
@@ -193,37 +198,5 @@ public final class WebRtcAudioUtils {
permission,
Process.myPid(),
Process.myUid()) == PackageManager.PERMISSION_GRANTED;
- }
-
- // Convert the provided audio |mode| into most suitable audio output stream
- // type. The stream type is used for creating audio streams and for volume
- // changes. It is essential that the mode and type are in-line to ensure
- // correct behavior. If for example a STREAM_MUSIC type of stream is created
- // in a MODE_IN_COMMUNICATION mode, audio will be played out and the volume
- // icon will look OK but the actual volume will not be changed when the user
- // changes the volume slider.
- // TODO(henrika): there is currently no mapping to STREAM_ALARM, STREAM_DTMF,
- // or STREAM_NOTIFICATION types since I am unable to see a reason for using
- // them. There are only four different modes.
- public static int getOutputStreamTypeFromAudioMode(int mode) {
- Logging.d(TAG, "getOutputStreamTypeFromAudioMode(mode=" + mode + ")");
- switch (mode) {
- case AudioManager.MODE_NORMAL:
- // The audio stream for music playback.
- Logging.d(TAG, "AudioManager.STREAM_MUSIC");
- return AudioManager.STREAM_MUSIC;
- case AudioManager.MODE_RINGTONE:
- // Audio stream for the phone ring.
- Logging.d(TAG, "AudioManager.STREAM_RING");
- return AudioManager.STREAM_RING;
- case AudioManager.MODE_IN_CALL:
- case AudioManager.MODE_IN_COMMUNICATION:
- // Audio stream for phone calls.
- Logging.d(TAG, "AudioManager.STREAM_VOICE_CALL");
- return AudioManager.STREAM_VOICE_CALL;
- default:
- Logging.d(TAG, "AudioManager.USE_DEFAULT_STREAM_TYPE");
- return AudioManager.USE_DEFAULT_STREAM_TYPE;
}
- }
}
diff --git a/webrtc/modules/audio_device/android/opensles_player.cc b/webrtc/modules/audio_device/android/opensles_player.cc
index 40967c5fb9..d2bff4905e 100644
--- a/webrtc/modules/audio_device/android/opensles_player.cc
+++ b/webrtc/modules/audio_device/android/opensles_player.cc
@@ -15,6 +15,7 @@
#include "webrtc/base/arraysize.h"
#include "webrtc/base/checks.h"
#include "webrtc/base/format_macros.h"
+#include "webrtc/base/timeutils.h"
#include "webrtc/modules/audio_device/android/audio_manager.h"
#include "webrtc/modules/audio_device/fine_audio_buffer.h"
@@ -38,7 +39,6 @@ namespace webrtc {
OpenSLESPlayer::OpenSLESPlayer(AudioManager* audio_manager)
: audio_parameters_(audio_manager->GetPlayoutAudioParameters()),
- stream_type_(audio_manager->OutputStreamType()),
audio_device_buffer_(NULL),
initialized_(false),
playing_(false),
@@ -47,11 +47,9 @@ OpenSLESPlayer::OpenSLESPlayer(AudioManager* audio_manager)
engine_(nullptr),
player_(nullptr),
simple_buffer_queue_(nullptr),
- volume_(nullptr) {
+ volume_(nullptr),
+ last_play_time_(0) {
ALOGD("ctor%s", GetThreadInfo().c_str());
- RTC_DCHECK(stream_type_ == SL_ANDROID_STREAM_VOICE ||
- stream_type_ == SL_ANDROID_STREAM_RING ||
- stream_type_ == SL_ANDROID_STREAM_MEDIA) << stream_type_;
// Use native audio output parameters provided by the audio manager and
// define the PCM format structure.
pcm_format_ = CreatePCMConfiguration(audio_parameters_.channels(),
@@ -99,6 +97,7 @@ int OpenSLESPlayer::InitPlayout() {
CreateMix();
initialized_ = true;
buffer_index_ = 0;
+ last_play_time_ = rtc::Time();
return 0;
}
@@ -180,15 +179,15 @@ void OpenSLESPlayer::AttachAudioBuffer(AudioDeviceBuffer* audioBuffer) {
const int sample_rate_hz = audio_parameters_.sample_rate();
ALOGD("SetPlayoutSampleRate(%d)", sample_rate_hz);
audio_device_buffer_->SetPlayoutSampleRate(sample_rate_hz);
- const int channels = audio_parameters_.channels();
- ALOGD("SetPlayoutChannels(%d)", channels);
+ const size_t channels = audio_parameters_.channels();
+ ALOGD("SetPlayoutChannels(%" PRIuS ")", channels);
audio_device_buffer_->SetPlayoutChannels(channels);
RTC_CHECK(audio_device_buffer_);
AllocateDataBuffers();
}
SLDataFormat_PCM OpenSLESPlayer::CreatePCMConfiguration(
- int channels,
+ size_t channels,
int sample_rate,
size_t bits_per_sample) {
ALOGD("CreatePCMConfiguration");
@@ -237,7 +236,16 @@ void OpenSLESPlayer::AllocateDataBuffers() {
RTC_DCHECK(thread_checker_.CalledOnValidThread());
RTC_DCHECK(!simple_buffer_queue_);
RTC_CHECK(audio_device_buffer_);
- bytes_per_buffer_ = audio_parameters_.GetBytesPerBuffer();
+ // Don't use the lowest possible size as native buffer size. Instead,
+ // use 10ms to better match the frame size that WebRTC uses. It will result
+ // in a reduced risk for audio glitches and also in a more "clean" sequence
+ // of callbacks from the OpenSL ES thread in to WebRTC when asking for audio
+ // to render.
+ ALOGD("lowest possible buffer size: %" PRIuS,
+ audio_parameters_.GetBytesPerBuffer());
+ bytes_per_buffer_ = audio_parameters_.GetBytesPerFrame() *
+ audio_parameters_.frames_per_10ms_buffer();
+ RTC_DCHECK_GE(bytes_per_buffer_, audio_parameters_.GetBytesPerBuffer());
ALOGD("native buffer size: %" PRIuS, bytes_per_buffer_);
// Create a modified audio buffer class which allows us to ask for any number
// of samples (and not only multiple of 10ms) to match the native OpenSL ES
@@ -351,7 +359,7 @@ bool OpenSLESPlayer::CreateAudioPlayer() {
false);
// Set audio player configuration to SL_ANDROID_STREAM_VOICE which
// corresponds to android.media.AudioManager.STREAM_VOICE_CALL.
- SLint32 stream_type = stream_type_;
+ SLint32 stream_type = SL_ANDROID_STREAM_VOICE;
RETURN_ON_ERROR(
(*player_config)
->SetConfiguration(player_config, SL_ANDROID_KEY_STREAM_TYPE,
@@ -422,6 +430,15 @@ void OpenSLESPlayer::FillBufferQueue() {
}
void OpenSLESPlayer::EnqueuePlayoutData() {
+ // Check delta time between two successive callbacks and provide a warning
+ // if it becomes very large.
+ // TODO(henrika): using 100ms as upper limit but this value is rather random.
+ const uint32_t current_time = rtc::Time();
+ const uint32_t diff = current_time - last_play_time_;
+ if (diff > 100) {
+ ALOGW("Bad OpenSL ES playout timing, dT=%u [ms]", diff);
+ }
+ last_play_time_ = current_time;
// Read audio data from the WebRTC source using the FineAudioBuffer object
// to adjust for differences in buffer size between WebRTC (10ms) and native
// OpenSL ES.
diff --git a/webrtc/modules/audio_device/android/opensles_player.h b/webrtc/modules/audio_device/android/opensles_player.h
index 96b1d49ac5..fa9e931218 100644
--- a/webrtc/modules/audio_device/android/opensles_player.h
+++ b/webrtc/modules/audio_device/android/opensles_player.h
@@ -22,7 +22,7 @@
#include "webrtc/modules/audio_device/android/opensles_common.h"
#include "webrtc/modules/audio_device/include/audio_device_defines.h"
#include "webrtc/modules/audio_device/audio_device_generic.h"
-#include "webrtc/modules/utility/interface/helpers_android.h"
+#include "webrtc/modules/utility/include/helpers_android.h"
namespace webrtc {
@@ -52,7 +52,7 @@ class OpenSLESPlayer {
// buffer count of 2 or more, and a buffer size and sample rate that are
// compatible with the device's native output configuration provided via the
// audio manager at construction.
- static const int kNumOfOpenSLESBuffers = 2;
+ static const int kNumOfOpenSLESBuffers = 4;
// There is no need for this class to use JNI.
static int32_t SetAndroidAudioDeviceObjects(void* javaVM, void* context) {
@@ -94,7 +94,7 @@ class OpenSLESPlayer {
void EnqueuePlayoutData();
// Configures the SL_DATAFORMAT_PCM structure.
- SLDataFormat_PCM CreatePCMConfiguration(int channels,
+ SLDataFormat_PCM CreatePCMConfiguration(size_t channels,
int sample_rate,
size_t bits_per_sample);
@@ -130,20 +130,6 @@ class OpenSLESPlayer {
// AudioManager.
const AudioParameters audio_parameters_;
- // Contains the stream type provided to this class at construction by the
- // AudioManager. Possible input values are:
- // - AudioManager.STREAM_VOICE_CALL = 0
- // - AudioManager.STREAM_RING = 2
- // - AudioManager.STREAM_MUSIC = 3
- // These value are mapped to the corresponding audio playback stream type
- // values in the "OpenSL ES domain":
- // - SL_ANDROID_STREAM_VOICE <=> STREAM_VOICE_CALL (0)
- // - SL_ANDROID_STREAM_RING <=> STREAM_RING (2)
- // - SL_ANDROID_STREAM_MEDIA <=> STREAM_MUSIC (3)
- // when creating the audio player. See SLES/OpenSLES_AndroidConfiguration.h
- // for details.
- const int stream_type_;
-
// Raw pointer handle provided to us in AttachAudioBuffer(). Owned by the
// AudioDeviceModuleImpl class and called by AudioDeviceModuleImpl::Create().
AudioDeviceBuffer* audio_device_buffer_;
@@ -209,6 +195,9 @@ class OpenSLESPlayer {
// This interface exposes controls for manipulating the object’s audio volume
// properties. This interface is supported on the Audio Player object.
SLVolumeItf volume_;
+
+ // Last time the OpenSL ES layer asked for audio data to play out.
+ uint32_t last_play_time_;
};
} // namespace webrtc
diff --git a/webrtc/modules/audio_device/audio_device.gypi b/webrtc/modules/audio_device/audio_device.gypi
index 0678d33802..2db9f1e17b 100644
--- a/webrtc/modules/audio_device/audio_device.gypi
+++ b/webrtc/modules/audio_device/audio_device.gypi
@@ -20,13 +20,13 @@
],
'include_dirs': [
'.',
- '../interface',
+ '../include',
'include',
'dummy', # Contains dummy audio device implementations.
],
'direct_dependent_settings': {
'include_dirs': [
- '../interface',
+ '../include',
'include',
],
},
@@ -86,48 +86,26 @@
}],
['include_internal_audio_device==1', {
'sources': [
- 'android/audio_device_template.h',
- 'android/audio_manager.cc',
- 'android/audio_manager.h',
- 'android/audio_record_jni.cc',
- 'android/audio_record_jni.h',
- 'android/audio_track_jni.cc',
- 'android/audio_track_jni.h',
- 'android/build_info.cc',
- 'android/build_info.h',
- 'android/opensles_common.cc',
- 'android/opensles_common.h',
- 'android/opensles_player.cc',
- 'android/opensles_player.h',
'audio_device_impl.cc',
'audio_device_impl.h',
- 'ios/audio_device_ios.h',
- 'ios/audio_device_ios.mm',
- 'ios/audio_device_not_implemented_ios.mm',
- 'linux/alsasymboltable_linux.cc',
- 'linux/alsasymboltable_linux.h',
- 'linux/audio_device_alsa_linux.cc',
- 'linux/audio_device_alsa_linux.h',
- 'linux/audio_mixer_manager_alsa_linux.cc',
- 'linux/audio_mixer_manager_alsa_linux.h',
- 'linux/latebindingsymboltable_linux.cc',
- 'linux/latebindingsymboltable_linux.h',
- 'mac/audio_device_mac.cc',
- 'mac/audio_device_mac.h',
- 'mac/audio_mixer_manager_mac.cc',
- 'mac/audio_mixer_manager_mac.h',
- 'mac/portaudio/pa_memorybarrier.h',
- 'mac/portaudio/pa_ringbuffer.c',
- 'mac/portaudio/pa_ringbuffer.h',
- 'win/audio_device_core_win.cc',
- 'win/audio_device_core_win.h',
- 'win/audio_device_wave_win.cc',
- 'win/audio_device_wave_win.h',
- 'win/audio_mixer_manager_win.cc',
- 'win/audio_mixer_manager_win.h',
],
'conditions': [
['OS=="android"', {
+ 'sources': [
+ 'android/audio_device_template.h',
+ 'android/audio_manager.cc',
+ 'android/audio_manager.h',
+ 'android/audio_record_jni.cc',
+ 'android/audio_record_jni.h',
+ 'android/audio_track_jni.cc',
+ 'android/audio_track_jni.h',
+ 'android/build_info.cc',
+ 'android/build_info.h',
+ 'android/opensles_common.cc',
+ 'android/opensles_common.h',
+ 'android/opensles_player.cc',
+ 'android/opensles_player.h',
+ ],
'link_settings': {
'libraries': [
'-llog',
@@ -136,6 +114,16 @@
},
}],
['OS=="linux"', {
+ 'sources': [
+ 'linux/alsasymboltable_linux.cc',
+ 'linux/alsasymboltable_linux.h',
+ 'linux/audio_device_alsa_linux.cc',
+ 'linux/audio_device_alsa_linux.h',
+ 'linux/audio_mixer_manager_alsa_linux.cc',
+ 'linux/audio_mixer_manager_alsa_linux.h',
+ 'linux/latebindingsymboltable_linux.cc',
+ 'linux/latebindingsymboltable_linux.h',
+ ],
'defines': [
'LINUX_ALSA',
],
@@ -161,6 +149,15 @@
],
}],
['OS=="mac"', {
+ 'sources': [
+ 'mac/audio_device_mac.cc',
+ 'mac/audio_device_mac.h',
+ 'mac/audio_mixer_manager_mac.cc',
+ 'mac/audio_mixer_manager_mac.h',
+ 'mac/portaudio/pa_memorybarrier.h',
+ 'mac/portaudio/pa_ringbuffer.c',
+ 'mac/portaudio/pa_ringbuffer.h',
+ ],
'link_settings': {
'libraries': [
'$(SDKROOT)/System/Library/Frameworks/AudioToolbox.framework',
@@ -169,6 +166,11 @@
},
}],
['OS=="ios"', {
+ 'sources': [
+ 'ios/audio_device_ios.h',
+ 'ios/audio_device_ios.mm',
+ 'ios/audio_device_not_implemented_ios.mm',
+ ],
'xcode_settings': {
'CLANG_ENABLE_OBJC_ARC': 'YES',
},
@@ -184,6 +186,14 @@
},
}],
['OS=="win"', {
+ 'sources': [
+ 'win/audio_device_core_win.cc',
+ 'win/audio_device_core_win.h',
+ 'win/audio_device_wave_win.cc',
+ 'win/audio_device_wave_win.h',
+ 'win/audio_mixer_manager_win.cc',
+ 'win/audio_mixer_manager_win.h',
+ ],
'link_settings': {
'libraries': [
# Required for the built-in WASAPI AEC.
@@ -194,18 +204,40 @@
],
},
}],
+ ['OS=="win" and clang==1', {
+ 'msvs_settings': {
+ 'VCCLCompilerTool': {
+ 'AdditionalOptions': [
+ # Disable warnings failing when compiling with Clang on Windows.
+ # https://bugs.chromium.org/p/webrtc/issues/detail?id=5366
+ '-Wno-bool-conversion',
+ '-Wno-delete-non-virtual-dtor',
+ '-Wno-logical-op-parentheses',
+ '-Wno-microsoft-extra-qualification',
+ '-Wno-microsoft-goto',
+ '-Wno-missing-braces',
+ '-Wno-parentheses-equality',
+ '-Wno-reorder',
+ '-Wno-shift-overflow',
+ '-Wno-tautological-compare',
+ '-Wno-unused-private-field',
+ ],
+ },
+ },
+ }],
], # conditions
}], # include_internal_audio_device==1
], # conditions
},
],
'conditions': [
- ['include_tests==1', {
+ # Does not compile on iOS: webrtc:4755.
+ ['include_tests==1 and OS!="ios"', {
'targets': [
{
'target_name': 'audio_device_tests',
- 'type': 'executable',
- 'dependencies': [
+ 'type': 'executable',
+ 'dependencies': [
'audio_device',
'webrtc_utility',
'<(webrtc_root)/test/test.gyp:test_support_main',
@@ -236,7 +268,7 @@
],
},
], # targets
- }], # include_tests
+ }], # include_tests==1 and OS!=ios
],
}
diff --git a/webrtc/modules/audio_device/audio_device_buffer.cc b/webrtc/modules/audio_device/audio_device_buffer.cc
index e7b487d687..48ae88ee90 100644
--- a/webrtc/modules/audio_device/audio_device_buffer.cc
+++ b/webrtc/modules/audio_device/audio_device_buffer.cc
@@ -169,7 +169,7 @@ int32_t AudioDeviceBuffer::PlayoutSampleRate() const
// SetRecordingChannels
// ----------------------------------------------------------------------------
-int32_t AudioDeviceBuffer::SetRecordingChannels(uint8_t channels)
+int32_t AudioDeviceBuffer::SetRecordingChannels(size_t channels)
{
CriticalSectionScoped lock(&_critSect);
_recChannels = channels;
@@ -181,7 +181,7 @@ int32_t AudioDeviceBuffer::SetRecordingChannels(uint8_t channels)
// SetPlayoutChannels
// ----------------------------------------------------------------------------
-int32_t AudioDeviceBuffer::SetPlayoutChannels(uint8_t channels)
+int32_t AudioDeviceBuffer::SetPlayoutChannels(size_t channels)
{
CriticalSectionScoped lock(&_critSect);
_playChannels = channels;
@@ -239,7 +239,7 @@ int32_t AudioDeviceBuffer::RecordingChannel(AudioDeviceModule::ChannelType& chan
// RecordingChannels
// ----------------------------------------------------------------------------
-uint8_t AudioDeviceBuffer::RecordingChannels() const
+size_t AudioDeviceBuffer::RecordingChannels() const
{
return _recChannels;
}
@@ -248,7 +248,7 @@ uint8_t AudioDeviceBuffer::RecordingChannels() const
// PlayoutChannels
// ----------------------------------------------------------------------------
-uint8_t AudioDeviceBuffer::PlayoutChannels() const
+size_t AudioDeviceBuffer::PlayoutChannels() const
{
return _playChannels;
}
@@ -487,7 +487,7 @@ int32_t AudioDeviceBuffer::RequestPlayoutData(size_t nSamples)
{
uint32_t playSampleRate = 0;
size_t playBytesPerSample = 0;
- uint8_t playChannels = 0;
+ size_t playChannels = 0;
{
CriticalSectionScoped lock(&_critSect);
diff --git a/webrtc/modules/audio_device/audio_device_buffer.h b/webrtc/modules/audio_device/audio_device_buffer.h
index 2ab7ff5547..1095971040 100644
--- a/webrtc/modules/audio_device/audio_device_buffer.h
+++ b/webrtc/modules/audio_device/audio_device_buffer.h
@@ -40,10 +40,10 @@ public:
int32_t RecordingSampleRate() const;
int32_t PlayoutSampleRate() const;
- virtual int32_t SetRecordingChannels(uint8_t channels);
- virtual int32_t SetPlayoutChannels(uint8_t channels);
- uint8_t RecordingChannels() const;
- uint8_t PlayoutChannels() const;
+ virtual int32_t SetRecordingChannels(size_t channels);
+ virtual int32_t SetPlayoutChannels(size_t channels);
+ size_t RecordingChannels() const;
+ size_t PlayoutChannels() const;
int32_t SetRecordingChannel(
const AudioDeviceModule::ChannelType channel);
int32_t RecordingChannel(
@@ -80,8 +80,8 @@ private:
uint32_t _recSampleRate;
uint32_t _playSampleRate;
- uint8_t _recChannels;
- uint8_t _playChannels;
+ size_t _recChannels;
+ size_t _playChannels;
// selected recording channel (left/right/both)
AudioDeviceModule::ChannelType _recChannel;
diff --git a/webrtc/modules/audio_device/dummy/file_audio_device.cc b/webrtc/modules/audio_device/dummy/file_audio_device.cc
index 9c7bf069d8..aac0962a50 100644
--- a/webrtc/modules/audio_device/dummy/file_audio_device.cc
+++ b/webrtc/modules/audio_device/dummy/file_audio_device.cc
@@ -7,21 +7,20 @@
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
-#include <iostream>
+#include "webrtc/base/platform_thread.h"
#include "webrtc/modules/audio_device/dummy/file_audio_device.h"
#include "webrtc/system_wrappers/include/sleep.h"
-#include "webrtc/system_wrappers/include/thread_wrapper.h"
namespace webrtc {
-int kRecordingFixedSampleRate = 48000;
-int kRecordingNumChannels = 2;
-int kPlayoutFixedSampleRate = 48000;
-int kPlayoutNumChannels = 2;
-int kPlayoutBufferSize = kPlayoutFixedSampleRate / 100
- * kPlayoutNumChannels * 2;
-int kRecordingBufferSize = kRecordingFixedSampleRate / 100
- * kRecordingNumChannels * 2;
+const int kRecordingFixedSampleRate = 48000;
+const size_t kRecordingNumChannels = 2;
+const int kPlayoutFixedSampleRate = 48000;
+const size_t kPlayoutNumChannels = 2;
+const size_t kPlayoutBufferSize =
+ kPlayoutFixedSampleRate / 100 * kPlayoutNumChannels * 2;
+const size_t kRecordingBufferSize =
+ kRecordingFixedSampleRate / 100 * kRecordingNumChannels * 2;
FileAudioDevice::FileAudioDevice(const int32_t id,
const char* inputFilename,
@@ -195,9 +194,7 @@ int32_t FileAudioDevice::StartPlayout() {
_playoutFramesLeft = 0;
if (!_playoutBuffer) {
- _playoutBuffer = new int8_t[2 *
- kPlayoutNumChannels *
- kPlayoutFixedSampleRate/100];
+ _playoutBuffer = new int8_t[kPlayoutBufferSize];
}
if (!_playoutBuffer) {
_playing = false;
@@ -214,17 +211,10 @@ int32_t FileAudioDevice::StartPlayout() {
return -1;
}
- const char* threadName = "webrtc_audio_module_play_thread";
- _ptrThreadPlay = ThreadWrapper::CreateThread(PlayThreadFunc, this,
- threadName);
- if (!_ptrThreadPlay->Start()) {
- _ptrThreadPlay.reset();
- _playing = false;
- delete [] _playoutBuffer;
- _playoutBuffer = NULL;
- return -1;
- }
- _ptrThreadPlay->SetPriority(kRealtimePriority);
+ _ptrThreadPlay.reset(new rtc::PlatformThread(
+ PlayThreadFunc, this, "webrtc_audio_module_play_thread"));
+ _ptrThreadPlay->Start();
+ _ptrThreadPlay->SetPriority(rtc::kRealtimePriority);
return 0;
}
@@ -277,17 +267,11 @@ int32_t FileAudioDevice::StartRecording() {
return -1;
}
- const char* threadName = "webrtc_audio_module_capture_thread";
- _ptrThreadRec = ThreadWrapper::CreateThread(RecThreadFunc, this, threadName);
+ _ptrThreadRec.reset(new rtc::PlatformThread(
+ RecThreadFunc, this, "webrtc_audio_module_capture_thread"));
- if (!_ptrThreadRec->Start()) {
- _ptrThreadRec.reset();
- _recording = false;
- delete [] _recordingBuffer;
- _recordingBuffer = NULL;
- return -1;
- }
- _ptrThreadRec->SetPriority(kRealtimePriority);
+ _ptrThreadRec->Start();
+ _ptrThreadRec->SetPriority(rtc::kRealtimePriority);
return 0;
}
@@ -514,7 +498,12 @@ bool FileAudioDevice::PlayThreadProcess()
}
_playoutFramesLeft = 0;
_critSect.Leave();
- SleepMs(10 - (_clock->CurrentNtpInMilliseconds() - currentTime));
+
+ uint64_t deltaTimeMillis = _clock->CurrentNtpInMilliseconds() - currentTime;
+ if(deltaTimeMillis < 10) {
+ SleepMs(10 - deltaTimeMillis);
+ }
+
return true;
}
@@ -544,7 +533,12 @@ bool FileAudioDevice::RecThreadProcess()
}
_critSect.Leave();
- SleepMs(10 - (_clock->CurrentNtpInMilliseconds() - currentTime));
+
+ uint64_t deltaTimeMillis = _clock->CurrentNtpInMilliseconds() - currentTime;
+ if(deltaTimeMillis < 10) {
+ SleepMs(10 - deltaTimeMillis);
+ }
+
return true;
}
diff --git a/webrtc/modules/audio_device/dummy/file_audio_device.h b/webrtc/modules/audio_device/dummy/file_audio_device.h
index 0e1665ea72..77179409ea 100644
--- a/webrtc/modules/audio_device/dummy/file_audio_device.h
+++ b/webrtc/modules/audio_device/dummy/file_audio_device.h
@@ -20,9 +20,12 @@
#include "webrtc/system_wrappers/include/file_wrapper.h"
#include "webrtc/system_wrappers/include/clock.h"
+namespace rtc {
+class PlatformThread;
+} // namespace rtc
+
namespace webrtc {
class EventWrapper;
-class ThreadWrapper;
// This is a fake audio device which plays audio from a file as its microphone
// and plays out into a file.
@@ -178,8 +181,9 @@ class FileAudioDevice : public AudioDeviceGeneric {
size_t _recordingFramesIn10MS;
size_t _playoutFramesIn10MS;
- rtc::scoped_ptr<ThreadWrapper> _ptrThreadRec;
- rtc::scoped_ptr<ThreadWrapper> _ptrThreadPlay;
+ // TODO(pbos): Make plain members instead of pointers and stop resetting them.
+ rtc::scoped_ptr<rtc::PlatformThread> _ptrThreadRec;
+ rtc::scoped_ptr<rtc::PlatformThread> _ptrThreadPlay;
bool _playing;
bool _recording;
diff --git a/webrtc/modules/audio_device/include/audio_device.h b/webrtc/modules/audio_device/include/audio_device.h
index c2c2b88103..15e08730c7 100644
--- a/webrtc/modules/audio_device/include/audio_device.h
+++ b/webrtc/modules/audio_device/include/audio_device.h
@@ -12,7 +12,7 @@
#define MODULES_AUDIO_DEVICE_INCLUDE_AUDIO_DEVICE_H_
#include "webrtc/modules/audio_device/include/audio_device_defines.h"
-#include "webrtc/modules/interface/module.h"
+#include "webrtc/modules/include/module.h"
namespace webrtc {
diff --git a/webrtc/modules/audio_device/include/audio_device_defines.h b/webrtc/modules/audio_device/include/audio_device_defines.h
index 3ebbd23cc5..b847729f05 100644
--- a/webrtc/modules/audio_device/include/audio_device_defines.h
+++ b/webrtc/modules/audio_device/include/audio_device_defines.h
@@ -49,7 +49,7 @@ class AudioTransport {
virtual int32_t RecordedDataIsAvailable(const void* audioSamples,
const size_t nSamples,
const size_t nBytesPerSample,
- const uint8_t nChannels,
+ const size_t nChannels,
const uint32_t samplesPerSec,
const uint32_t totalDelayMS,
const int32_t clockDrift,
@@ -59,7 +59,7 @@ class AudioTransport {
virtual int32_t NeedMorePlayData(const size_t nSamples,
const size_t nBytesPerSample,
- const uint8_t nChannels,
+ const size_t nChannels,
const uint32_t samplesPerSec,
void* audioSamples,
size_t& nSamplesOut,
@@ -82,10 +82,10 @@ class AudioTransport {
// TODO(xians): Remove this interface after Chrome and Libjingle switches
// to OnData().
virtual int OnDataAvailable(const int voe_channels[],
- int number_of_voe_channels,
+ size_t number_of_voe_channels,
const int16_t* audio_data,
int sample_rate,
- int number_of_channels,
+ size_t number_of_channels,
size_t number_of_frames,
int audio_delay_milliseconds,
int current_volume,
@@ -103,7 +103,7 @@ class AudioTransport {
const void* audio_data,
int bits_per_sample,
int sample_rate,
- int number_of_channels,
+ size_t number_of_channels,
size_t number_of_frames) {}
// Method to push the captured audio data to the specific VoE channel.
@@ -116,7 +116,7 @@ class AudioTransport {
const void* audio_data,
int bits_per_sample,
int sample_rate,
- int number_of_channels,
+ size_t number_of_channels,
size_t number_of_frames) {}
// Method to pull mixed render audio data from all active VoE channels.
@@ -125,7 +125,7 @@ class AudioTransport {
// channel.
virtual void PullRenderData(int bits_per_sample,
int sample_rate,
- int number_of_channels,
+ size_t number_of_channels,
size_t number_of_frames,
void* audio_data,
int64_t* elapsed_time_ms,
@@ -149,27 +149,27 @@ class AudioParameters {
channels_(0),
frames_per_buffer_(0),
frames_per_10ms_buffer_(0) {}
- AudioParameters(int sample_rate, int channels, size_t frames_per_buffer)
+ AudioParameters(int sample_rate, size_t channels, size_t frames_per_buffer)
: sample_rate_(sample_rate),
channels_(channels),
frames_per_buffer_(frames_per_buffer),
frames_per_10ms_buffer_(static_cast<size_t>(sample_rate / 100)) {}
- void reset(int sample_rate, int channels, size_t frames_per_buffer) {
+ void reset(int sample_rate, size_t channels, size_t frames_per_buffer) {
sample_rate_ = sample_rate;
channels_ = channels;
frames_per_buffer_ = frames_per_buffer;
frames_per_10ms_buffer_ = static_cast<size_t>(sample_rate / 100);
}
size_t bits_per_sample() const { return kBitsPerSample; }
- void reset(int sample_rate, int channels, double ms_per_buffer) {
+ void reset(int sample_rate, size_t channels, double ms_per_buffer) {
reset(sample_rate, channels,
static_cast<size_t>(sample_rate * ms_per_buffer + 0.5));
}
- void reset(int sample_rate, int channels) {
+ void reset(int sample_rate, size_t channels) {
reset(sample_rate, channels, static_cast<size_t>(0));
}
int sample_rate() const { return sample_rate_; }
- int channels() const { return channels_; }
+ size_t channels() const { return channels_; }
size_t frames_per_buffer() const { return frames_per_buffer_; }
size_t frames_per_10ms_buffer() const { return frames_per_10ms_buffer_; }
size_t GetBytesPerFrame() const { return channels_ * kBitsPerSample / 8; }
@@ -200,7 +200,7 @@ class AudioParameters {
private:
int sample_rate_;
- int channels_;
+ size_t channels_;
size_t frames_per_buffer_;
size_t frames_per_10ms_buffer_;
};
diff --git a/webrtc/modules/audio_device/ios/audio_device_ios.h b/webrtc/modules/audio_device/ios/audio_device_ios.h
index 8f8ba0a9c5..c4eb0d6f64 100644
--- a/webrtc/modules/audio_device/ios/audio_device_ios.h
+++ b/webrtc/modules/audio_device/ios/audio_device_ios.h
@@ -182,7 +182,10 @@ class AudioDeviceIOS : public AudioDeviceGeneric {
bool InitPlayOrRecord();
// Closes and deletes the voice-processing I/O unit.
- bool ShutdownPlayOrRecord();
+ void ShutdownPlayOrRecord();
+
+ // Helper method for destroying the existing audio unit.
+ void DisposeAudioUnit();
// Callback function called on a real-time priority I/O thread from the audio
// unit. This method is used to signal that recorded audio is available.
diff --git a/webrtc/modules/audio_device/ios/audio_device_ios.mm b/webrtc/modules/audio_device/ios/audio_device_ios.mm
index f26e9f1cc7..f6dee5b3cf 100644
--- a/webrtc/modules/audio_device/ios/audio_device_ios.mm
+++ b/webrtc/modules/audio_device/ios/audio_device_ios.mm
@@ -19,12 +19,24 @@
#include "webrtc/base/atomicops.h"
#include "webrtc/base/checks.h"
+#include "webrtc/base/criticalsection.h"
#include "webrtc/base/logging.h"
+#include "webrtc/base/thread_annotations.h"
#include "webrtc/modules/audio_device/fine_audio_buffer.h"
-#include "webrtc/modules/utility/interface/helpers_ios.h"
+#include "webrtc/modules/utility/include/helpers_ios.h"
namespace webrtc {
+// Protects |g_audio_session_users|.
+static rtc::GlobalLockPod g_lock;
+
+// Counts number of users (=instances of this object) who needs an active
+// audio session. This variable is used to ensure that we only activate an audio
+// session for the first user and deactivate it for the last.
+// Member is static to ensure that the value is counted for all instances
+// and not per instance.
+static int g_audio_session_users GUARDED_BY(g_lock) = 0;
+
#define LOGI() LOG(LS_INFO) << "AudioDeviceIOS::"
#define LOG_AND_RETURN_IF_ERROR(error, message) \
@@ -74,25 +86,62 @@ const UInt32 kBytesPerSample = 2;
// Can most likely be removed.
const UInt16 kFixedPlayoutDelayEstimate = 30;
const UInt16 kFixedRecordDelayEstimate = 30;
+// Calls to AudioUnitInitialize() can fail if called back-to-back on different
+// ADM instances. A fall-back solution is to allow multiple sequential calls
+// with as small delay between each. This factor sets the max number of allowed
+// initialization attempts.
+const int kMaxNumberOfAudioUnitInitializeAttempts = 5;
+
using ios::CheckAndLogError;
+// Verifies that the current audio session supports input audio and that the
+// required category and mode are enabled.
+static bool VerifyAudioSession(AVAudioSession* session) {
+ LOG(LS_INFO) << "VerifyAudioSession";
+ // Ensure that the device currently supports audio input.
+ if (!session.isInputAvailable) {
+ LOG(LS_ERROR) << "No audio input path is available!";
+ return false;
+ }
+
+ // Ensure that the required category and mode are actually activated.
+ if (![session.category isEqualToString:AVAudioSessionCategoryPlayAndRecord]) {
+ LOG(LS_ERROR)
+ << "Failed to set category to AVAudioSessionCategoryPlayAndRecord";
+ return false;
+ }
+ if (![session.mode isEqualToString:AVAudioSessionModeVoiceChat]) {
+ LOG(LS_ERROR) << "Failed to set mode to AVAudioSessionModeVoiceChat";
+ return false;
+ }
+ return true;
+}
+
// Activates an audio session suitable for full duplex VoIP sessions when
// |activate| is true. Also sets the preferred sample rate and IO buffer
// duration. Deactivates an active audio session if |activate| is set to false.
-static void ActivateAudioSession(AVAudioSession* session, bool activate) {
+static bool ActivateAudioSession(AVAudioSession* session, bool activate)
+ EXCLUSIVE_LOCKS_REQUIRED(g_lock) {
LOG(LS_INFO) << "ActivateAudioSession(" << activate << ")";
@autoreleasepool {
NSError* error = nil;
BOOL success = NO;
- // Deactivate the audio session and return if |activate| is false.
if (!activate) {
- success = [session setActive:NO error:&error];
- RTC_DCHECK(CheckAndLogError(success, error));
- return;
+ // Deactivate the audio session using an extra option and then return.
+ // AVAudioSessionSetActiveOptionNotifyOthersOnDeactivation is used to
+ // ensure that other audio sessions that were interrupted by our session
+ // can return to their active state. It is recommended for VoIP apps to
+ // use this option.
+ success = [session
+ setActive:NO
+ withOptions:AVAudioSessionSetActiveOptionNotifyOthersOnDeactivation
+ error:&error];
+ return CheckAndLogError(success, error);
}
+ // Go ahead and active our own audio session since |activate| is true.
// Use a category which supports simultaneous recording and playback.
// By default, using this category implies that our app’s audio is
// nonmixable, hence activating the session will interrupt any other
@@ -121,7 +170,6 @@ static void ActivateAudioSession(AVAudioSession* session, bool activate) {
RTC_DCHECK(CheckAndLogError(success, error));
// Set the preferred audio I/O buffer duration, in seconds.
- // TODO(henrika): add more comments here.
error = nil;
success = [session setPreferredIOBufferDuration:kPreferredIOBufferDuration
error:&error];
@@ -131,13 +179,15 @@ static void ActivateAudioSession(AVAudioSession* session, bool activate) {
// session (e.g. phone call) has higher priority than ours.
error = nil;
success = [session setActive:YES error:&error];
- RTC_DCHECK(CheckAndLogError(success, error));
- RTC_CHECK(session.isInputAvailable) << "No input path is available!";
+ if (!CheckAndLogError(success, error)) {
+ return false;
+ }
- // Ensure that category and mode are actually activated.
- RTC_DCHECK(
- [session.category isEqualToString:AVAudioSessionCategoryPlayAndRecord]);
- RTC_DCHECK([session.mode isEqualToString:AVAudioSessionModeVoiceChat]);
+ // Ensure that the active audio session has the correct category and mode.
+ if (!VerifyAudioSession(session)) {
+ LOG(LS_ERROR) << "Failed to verify audio session category and mode";
+ return false;
+ }
// Try to set the preferred number of hardware audio channels. These calls
// must be done after setting the audio session’s category and mode and
@@ -156,7 +206,52 @@ static void ActivateAudioSession(AVAudioSession* session, bool activate) {
[session setPreferredOutputNumberOfChannels:kPreferredNumberOfChannels
error:&error];
RTC_DCHECK(CheckAndLogError(success, error));
+ return true;
+ }
+}
+
+// An application can create more than one ADM and start audio streaming
+// for all of them. It is essential that we only activate the app's audio
+// session once (for the first one) and deactivate it once (for the last).
+static bool ActivateAudioSession() {
+ LOGI() << "ActivateAudioSession";
+ rtc::GlobalLockScope ls(&g_lock);
+ if (g_audio_session_users == 0) {
+ // The system provides an audio session object upon launch of an
+ // application. However, we must initialize the session in order to
+ // handle interruptions. Implicit initialization occurs when obtaining
+ // a reference to the AVAudioSession object.
+ AVAudioSession* session = [AVAudioSession sharedInstance];
+ // Try to activate the audio session and ask for a set of preferred audio
+ // parameters.
+ if (!ActivateAudioSession(session, true)) {
+ LOG(LS_ERROR) << "Failed to activate the audio session";
+ return false;
+ }
+ LOG(LS_INFO) << "The audio session is now activated";
+ }
+ ++g_audio_session_users;
+ LOG(LS_INFO) << "Number of audio session users: " << g_audio_session_users;
+ return true;
+}
+
+// If more than one object is using the audio session, ensure that only the
+// last object deactivates. Apple recommends: "activate your audio session
+// only as needed and deactivate it when you are not using audio".
+static bool DeactivateAudioSession() {
+ LOGI() << "DeactivateAudioSession";
+ rtc::GlobalLockScope ls(&g_lock);
+ if (g_audio_session_users == 1) {
+ AVAudioSession* session = [AVAudioSession sharedInstance];
+ if (!ActivateAudioSession(session, false)) {
+ LOG(LS_ERROR) << "Failed to deactivate the audio session";
+ return false;
+ }
+ LOG(LS_INFO) << "Our audio session is now deactivated";
}
+ --g_audio_session_users;
+ LOG(LS_INFO) << "Number of audio session users: " << g_audio_session_users;
+ return true;
}
#if !defined(NDEBUG)
@@ -198,12 +293,13 @@ AudioDeviceIOS::AudioDeviceIOS()
initialized_(false),
rec_is_initialized_(false),
play_is_initialized_(false),
- audio_interruption_observer_(nullptr) {
+ audio_interruption_observer_(nullptr),
+ route_change_observer_(nullptr) {
LOGI() << "ctor" << ios::GetCurrentThreadDescription();
}
AudioDeviceIOS::~AudioDeviceIOS() {
- LOGI() << "~dtor";
+ LOGI() << "~dtor" << ios::GetCurrentThreadDescription();
RTC_DCHECK(thread_checker_.CalledOnValidThread());
Terminate();
}
@@ -245,8 +341,16 @@ int32_t AudioDeviceIOS::Terminate() {
if (!initialized_) {
return 0;
}
- ShutdownPlayOrRecord();
+ StopPlayout();
+ StopRecording();
initialized_ = false;
+ {
+ rtc::GlobalLockScope ls(&g_lock);
+ if (g_audio_session_users != 0) {
+ LOG(LS_WARNING) << "Object is destructed with an active audio session";
+ }
+ RTC_DCHECK_GE(g_audio_session_users, 0);
+ }
return 0;
}
@@ -258,7 +362,7 @@ int32_t AudioDeviceIOS::InitPlayout() {
RTC_DCHECK(!playing_);
if (!rec_is_initialized_) {
if (!InitPlayOrRecord()) {
- LOG_F(LS_ERROR) << "InitPlayOrRecord failed!";
+ LOG_F(LS_ERROR) << "InitPlayOrRecord failed for InitPlayout!";
return -1;
}
}
@@ -274,7 +378,7 @@ int32_t AudioDeviceIOS::InitRecording() {
RTC_DCHECK(!recording_);
if (!play_is_initialized_) {
if (!InitPlayOrRecord()) {
- LOG_F(LS_ERROR) << "InitPlayOrRecord failed!";
+ LOG_F(LS_ERROR) << "InitPlayOrRecord failed for InitRecording!";
return -1;
}
}
@@ -291,9 +395,11 @@ int32_t AudioDeviceIOS::StartPlayout() {
if (!recording_) {
OSStatus result = AudioOutputUnitStart(vpio_unit_);
if (result != noErr) {
- LOG_F(LS_ERROR) << "AudioOutputUnitStart failed: " << result;
+ LOG_F(LS_ERROR) << "AudioOutputUnitStart failed for StartPlayout: "
+ << result;
return -1;
}
+ LOG(LS_INFO) << "Voice-Processing I/O audio unit is now started";
}
rtc::AtomicOps::ReleaseStore(&playing_, 1);
return 0;
@@ -322,9 +428,11 @@ int32_t AudioDeviceIOS::StartRecording() {
if (!playing_) {
OSStatus result = AudioOutputUnitStart(vpio_unit_);
if (result != noErr) {
- LOG_F(LS_ERROR) << "AudioOutputUnitStart failed: " << result;
+ LOG_F(LS_ERROR) << "AudioOutputUnitStart failed for StartRecording: "
+ << result;
return -1;
}
+ LOG(LS_INFO) << "Voice-Processing I/O audio unit is now started";
}
rtc::AtomicOps::ReleaseStore(&recording_, 1);
return 0;
@@ -474,11 +582,12 @@ void AudioDeviceIOS::RegisterNotificationObservers() {
LOG(LS_INFO) << " OldDeviceUnavailable";
break;
case AVAudioSessionRouteChangeReasonCategoryChange:
+ // It turns out that we see this notification (at least in iOS 9.2)
+ // when making a switch from a BT device to e.g. Speaker using the
+ // iOS Control Center and that we therefore must check if the sample
+ // rate has changed. And if so is the case, restart the audio unit.
LOG(LS_INFO) << " CategoryChange";
LOG(LS_INFO) << " New category: " << ios::GetAudioSessionCategory();
- // Don't see this as route change since it can be triggered in
- // combination with session interruptions as well.
- valid_route_change = false;
break;
case AVAudioSessionRouteChangeReasonOverride:
LOG(LS_INFO) << " Override";
@@ -490,9 +599,11 @@ void AudioDeviceIOS::RegisterNotificationObservers() {
LOG(LS_INFO) << " NoSuitableRouteForCategory";
break;
case AVAudioSessionRouteChangeReasonRouteConfigurationChange:
- // Ignore this type of route change since we are focusing
+ // The set of input and output ports has not changed, but their
+ // configuration has, e.g., a port’s selected data source has
+ // changed. Ignore this type of route change since we are focusing
// on detecting headset changes.
- LOG(LS_INFO) << " RouteConfigurationChange";
+ LOG(LS_INFO) << " RouteConfigurationChange (ignored)";
valid_route_change = false;
break;
}
@@ -630,7 +741,7 @@ void AudioDeviceIOS::SetupAudioBuffersForActiveAudioSession() {
bool AudioDeviceIOS::SetupAndInitializeVoiceProcessingAudioUnit() {
LOGI() << "SetupAndInitializeVoiceProcessingAudioUnit";
- RTC_DCHECK(!vpio_unit_);
+ RTC_DCHECK(!vpio_unit_) << "VoiceProcessingIO audio unit already exists";
// Create an audio component description to identify the Voice-Processing
// I/O audio unit.
AudioComponentDescription vpio_unit_description;
@@ -639,34 +750,48 @@ bool AudioDeviceIOS::SetupAndInitializeVoiceProcessingAudioUnit() {
vpio_unit_description.componentManufacturer = kAudioUnitManufacturer_Apple;
vpio_unit_description.componentFlags = 0;
vpio_unit_description.componentFlagsMask = 0;
+
// Obtain an audio unit instance given the description.
AudioComponent found_vpio_unit_ref =
AudioComponentFindNext(nullptr, &vpio_unit_description);
// Create a Voice-Processing IO audio unit.
- LOG_AND_RETURN_IF_ERROR(
- AudioComponentInstanceNew(found_vpio_unit_ref, &vpio_unit_),
- "Failed to create a VoiceProcessingIO audio unit");
+ OSStatus result = noErr;
+ result = AudioComponentInstanceNew(found_vpio_unit_ref, &vpio_unit_);
+ if (result != noErr) {
+ vpio_unit_ = nullptr;
+ LOG(LS_ERROR) << "AudioComponentInstanceNew failed: " << result;
+ return false;
+ }
// A VP I/O unit's bus 1 connects to input hardware (microphone). Enable
// input on the input scope of the input element.
AudioUnitElement input_bus = 1;
UInt32 enable_input = 1;
- LOG_AND_RETURN_IF_ERROR(
- AudioUnitSetProperty(vpio_unit_, kAudioOutputUnitProperty_EnableIO,
- kAudioUnitScope_Input, input_bus, &enable_input,
- sizeof(enable_input)),
- "Failed to enable input on input scope of input element");
+ result = AudioUnitSetProperty(vpio_unit_, kAudioOutputUnitProperty_EnableIO,
+ kAudioUnitScope_Input, input_bus, &enable_input,
+ sizeof(enable_input));
+ if (result != noErr) {
+ DisposeAudioUnit();
+ LOG(LS_ERROR) << "Failed to enable input on input scope of input element: "
+ << result;
+ return false;
+ }
// A VP I/O unit's bus 0 connects to output hardware (speaker). Enable
// output on the output scope of the output element.
AudioUnitElement output_bus = 0;
UInt32 enable_output = 1;
- LOG_AND_RETURN_IF_ERROR(
- AudioUnitSetProperty(vpio_unit_, kAudioOutputUnitProperty_EnableIO,
- kAudioUnitScope_Output, output_bus, &enable_output,
- sizeof(enable_output)),
- "Failed to enable output on output scope of output element");
+ result = AudioUnitSetProperty(vpio_unit_, kAudioOutputUnitProperty_EnableIO,
+ kAudioUnitScope_Output, output_bus,
+ &enable_output, sizeof(enable_output));
+ if (result != noErr) {
+ DisposeAudioUnit();
+ LOG(LS_ERROR)
+ << "Failed to enable output on output scope of output element: "
+ << result;
+ return false;
+ }
// Set the application formats for input and output:
// - use same format in both directions
@@ -694,38 +819,55 @@ bool AudioDeviceIOS::SetupAndInitializeVoiceProcessingAudioUnit() {
#endif
// Set the application format on the output scope of the input element/bus.
- LOG_AND_RETURN_IF_ERROR(
- AudioUnitSetProperty(vpio_unit_, kAudioUnitProperty_StreamFormat,
- kAudioUnitScope_Output, input_bus,
- &application_format, size),
- "Failed to set application format on output scope of input element");
+ result = AudioUnitSetProperty(vpio_unit_, kAudioUnitProperty_StreamFormat,
+ kAudioUnitScope_Output, input_bus,
+ &application_format, size);
+ if (result != noErr) {
+ DisposeAudioUnit();
+ LOG(LS_ERROR)
+ << "Failed to set application format on output scope of input bus: "
+ << result;
+ return false;
+ }
// Set the application format on the input scope of the output element/bus.
- LOG_AND_RETURN_IF_ERROR(
- AudioUnitSetProperty(vpio_unit_, kAudioUnitProperty_StreamFormat,
- kAudioUnitScope_Input, output_bus,
- &application_format, size),
- "Failed to set application format on input scope of output element");
+ result = AudioUnitSetProperty(vpio_unit_, kAudioUnitProperty_StreamFormat,
+ kAudioUnitScope_Input, output_bus,
+ &application_format, size);
+ if (result != noErr) {
+ DisposeAudioUnit();
+ LOG(LS_ERROR)
+ << "Failed to set application format on input scope of output bus: "
+ << result;
+ return false;
+ }
// Specify the callback function that provides audio samples to the audio
// unit.
AURenderCallbackStruct render_callback;
render_callback.inputProc = GetPlayoutData;
render_callback.inputProcRefCon = this;
- LOG_AND_RETURN_IF_ERROR(
- AudioUnitSetProperty(vpio_unit_, kAudioUnitProperty_SetRenderCallback,
- kAudioUnitScope_Input, output_bus, &render_callback,
- sizeof(render_callback)),
- "Failed to specify the render callback on the output element");
+ result = AudioUnitSetProperty(
+ vpio_unit_, kAudioUnitProperty_SetRenderCallback, kAudioUnitScope_Input,
+ output_bus, &render_callback, sizeof(render_callback));
+ if (result != noErr) {
+ DisposeAudioUnit();
+ LOG(LS_ERROR) << "Failed to specify the render callback on the output bus: "
+ << result;
+ return false;
+ }
// Disable AU buffer allocation for the recorder, we allocate our own.
// TODO(henrika): not sure that it actually saves resource to make this call.
UInt32 flag = 0;
- LOG_AND_RETURN_IF_ERROR(
- AudioUnitSetProperty(vpio_unit_, kAudioUnitProperty_ShouldAllocateBuffer,
- kAudioUnitScope_Output, input_bus, &flag,
- sizeof(flag)),
- "Failed to disable buffer allocation on the input element");
+ result = AudioUnitSetProperty(
+ vpio_unit_, kAudioUnitProperty_ShouldAllocateBuffer,
+ kAudioUnitScope_Output, input_bus, &flag, sizeof(flag));
+ if (result != noErr) {
+ DisposeAudioUnit();
+ LOG(LS_ERROR) << "Failed to disable buffer allocation on the input bus: "
+ << result;
+ }
// Specify the callback to be called by the I/O thread to us when input audio
// is available. The recorded samples can then be obtained by calling the
@@ -733,16 +875,39 @@ bool AudioDeviceIOS::SetupAndInitializeVoiceProcessingAudioUnit() {
AURenderCallbackStruct input_callback;
input_callback.inputProc = RecordedDataIsAvailable;
input_callback.inputProcRefCon = this;
- LOG_AND_RETURN_IF_ERROR(
- AudioUnitSetProperty(vpio_unit_,
- kAudioOutputUnitProperty_SetInputCallback,
- kAudioUnitScope_Global, input_bus, &input_callback,
- sizeof(input_callback)),
- "Failed to specify the input callback on the input element");
+ result = AudioUnitSetProperty(vpio_unit_,
+ kAudioOutputUnitProperty_SetInputCallback,
+ kAudioUnitScope_Global, input_bus,
+ &input_callback, sizeof(input_callback));
+ if (result != noErr) {
+ DisposeAudioUnit();
+ LOG(LS_ERROR) << "Failed to specify the input callback on the input bus: "
+ << result;
+ }
// Initialize the Voice-Processing I/O unit instance.
- LOG_AND_RETURN_IF_ERROR(AudioUnitInitialize(vpio_unit_),
- "Failed to initialize the Voice-Processing I/O unit");
+ // Calls to AudioUnitInitialize() can fail if called back-to-back on
+ // different ADM instances. The error message in this case is -66635 which is
+ // undocumented. Tests have shown that calling AudioUnitInitialize a second
+ // time, after a short sleep, avoids this issue.
+ // See webrtc:5166 for details.
+ int failed_initalize_attempts = 0;
+ result = AudioUnitInitialize(vpio_unit_);
+ while (result != noErr) {
+ LOG(LS_ERROR) << "Failed to initialize the Voice-Processing I/O unit: "
+ << result;
+ ++failed_initalize_attempts;
+ if (failed_initalize_attempts == kMaxNumberOfAudioUnitInitializeAttempts) {
+ // Max number of initialization attempts exceeded, hence abort.
+ LOG(LS_WARNING) << "Too many initialization attempts";
+ DisposeAudioUnit();
+ return false;
+ }
+ LOG(LS_INFO) << "pause 100ms and try audio unit initialization again...";
+ [NSThread sleepForTimeInterval:0.1f];
+ result = AudioUnitInitialize(vpio_unit_);
+ }
+ LOG(LS_INFO) << "Voice-Processing I/O unit is now initialized";
return true;
}
@@ -772,18 +937,29 @@ bool AudioDeviceIOS::RestartAudioUnitWithNewFormat(float sample_rate) {
// Prepare the audio unit to render audio again.
LOG_AND_RETURN_IF_ERROR(AudioUnitInitialize(vpio_unit_),
"Failed to initialize the Voice-Processing I/O unit");
+ LOG(LS_INFO) << "Voice-Processing I/O unit is now reinitialized";
// Start rendering audio using the new format.
LOG_AND_RETURN_IF_ERROR(AudioOutputUnitStart(vpio_unit_),
"Failed to start the Voice-Processing I/O unit");
+ LOG(LS_INFO) << "Voice-Processing I/O unit is now restarted";
return true;
}
bool AudioDeviceIOS::InitPlayOrRecord() {
LOGI() << "InitPlayOrRecord";
+ // Activate the audio session if not already activated.
+ if (!ActivateAudioSession()) {
+ return false;
+ }
+
+ // Ensure that the active audio session has the correct category and mode.
AVAudioSession* session = [AVAudioSession sharedInstance];
- // Activate the audio session and ask for a set of preferred audio parameters.
- ActivateAudioSession(session, true);
+ if (!VerifyAudioSession(session)) {
+ DeactivateAudioSession();
+ LOG(LS_ERROR) << "Failed to verify audio session category and mode";
+ return false;
+ }
// Start observing audio session interruptions and route changes.
RegisterNotificationObservers();
@@ -793,16 +969,16 @@ bool AudioDeviceIOS::InitPlayOrRecord() {
// Create, setup and initialize a new Voice-Processing I/O unit.
if (!SetupAndInitializeVoiceProcessingAudioUnit()) {
+ // Reduce usage count for the audio session and possibly deactivate it if
+ // this object is the only user.
+ DeactivateAudioSession();
return false;
}
return true;
}
-bool AudioDeviceIOS::ShutdownPlayOrRecord() {
+void AudioDeviceIOS::ShutdownPlayOrRecord() {
LOGI() << "ShutdownPlayOrRecord";
- // Remove audio session notification observers.
- UnregisterNotificationObservers();
-
// Close and delete the voice-processing I/O unit.
OSStatus result = -1;
if (nullptr != vpio_unit_) {
@@ -814,18 +990,25 @@ bool AudioDeviceIOS::ShutdownPlayOrRecord() {
if (result != noErr) {
LOG_F(LS_ERROR) << "AudioUnitUninitialize failed: " << result;
}
- result = AudioComponentInstanceDispose(vpio_unit_);
- if (result != noErr) {
- LOG_F(LS_ERROR) << "AudioComponentInstanceDispose failed: " << result;
- }
- vpio_unit_ = nullptr;
+ DisposeAudioUnit();
}
+ // Remove audio session notification observers.
+ UnregisterNotificationObservers();
+
// All I/O should be stopped or paused prior to deactivating the audio
// session, hence we deactivate as last action.
- AVAudioSession* session = [AVAudioSession sharedInstance];
- ActivateAudioSession(session, false);
- return true;
+ DeactivateAudioSession();
+}
+
+void AudioDeviceIOS::DisposeAudioUnit() {
+ if (nullptr == vpio_unit_)
+ return;
+ OSStatus result = AudioComponentInstanceDispose(vpio_unit_);
+ if (result != noErr) {
+ LOG(LS_ERROR) << "AudioComponentInstanceDispose failed:" << result;
+ }
+ vpio_unit_ = nullptr;
}
OSStatus AudioDeviceIOS::RecordedDataIsAvailable(
@@ -855,8 +1038,11 @@ OSStatus AudioDeviceIOS::OnRecordedDataIsAvailable(
if (in_number_frames != record_parameters_.frames_per_buffer()) {
// We have seen short bursts (1-2 frames) where |in_number_frames| changes.
// Add a log to keep track of longer sequences if that should ever happen.
+ // Also return since calling AudioUnitRender in this state will only result
+ // in kAudio_ParamError (-50) anyhow.
LOG(LS_WARNING) << "in_number_frames (" << in_number_frames
<< ") != " << record_parameters_.frames_per_buffer();
+ return noErr;
}
// Obtain the recorded audio samples by initiating a rendering cycle.
// Since it happens on the input bus, the |io_data| parameter is a reference
@@ -866,7 +1052,7 @@ OSStatus AudioDeviceIOS::OnRecordedDataIsAvailable(
result = AudioUnitRender(vpio_unit_, io_action_flags, in_time_stamp,
in_bus_number, in_number_frames, io_data);
if (result != noErr) {
- LOG_F(LS_ERROR) << "AudioOutputUnitStart failed: " << result;
+ LOG_F(LS_ERROR) << "AudioUnitRender failed: " << result;
return result;
}
// Get a pointer to the recorded audio and send it to the WebRTC ADB.
diff --git a/webrtc/modules/audio_device/ios/audio_device_unittest_ios.cc b/webrtc/modules/audio_device/ios/audio_device_unittest_ios.cc
index b892f28b1d..076a67430d 100644
--- a/webrtc/modules/audio_device/ios/audio_device_unittest_ios.cc
+++ b/webrtc/modules/audio_device/ios/audio_device_unittest_ios.cc
@@ -373,7 +373,7 @@ class MockAudioTransport : public AudioTransport {
int32_t(const void* audioSamples,
const size_t nSamples,
const size_t nBytesPerSample,
- const uint8_t nChannels,
+ const size_t nChannels,
const uint32_t samplesPerSec,
const uint32_t totalDelayMS,
const int32_t clockDrift,
@@ -383,7 +383,7 @@ class MockAudioTransport : public AudioTransport {
MOCK_METHOD8(NeedMorePlayData,
int32_t(const size_t nSamples,
const size_t nBytesPerSample,
- const uint8_t nChannels,
+ const size_t nChannels,
const uint32_t samplesPerSec,
void* audioSamples,
size_t& nSamplesOut,
@@ -413,7 +413,7 @@ class MockAudioTransport : public AudioTransport {
int32_t RealRecordedDataIsAvailable(const void* audioSamples,
const size_t nSamples,
const size_t nBytesPerSample,
- const uint8_t nChannels,
+ const size_t nChannels,
const uint32_t samplesPerSec,
const uint32_t totalDelayMS,
const int32_t clockDrift,
@@ -428,14 +428,16 @@ class MockAudioTransport : public AudioTransport {
audio_stream_->Write(audioSamples, nSamples);
}
if (ReceivedEnoughCallbacks()) {
- test_is_done_->Set();
+ if (test_is_done_) {
+ test_is_done_->Set();
+ }
}
return 0;
}
int32_t RealNeedMorePlayData(const size_t nSamples,
const size_t nBytesPerSample,
- const uint8_t nChannels,
+ const size_t nChannels,
const uint32_t samplesPerSec,
void* audioSamples,
size_t& nSamplesOut,
@@ -450,7 +452,9 @@ class MockAudioTransport : public AudioTransport {
audio_stream_->Read(audioSamples, nSamples);
}
if (ReceivedEnoughCallbacks()) {
- test_is_done_->Set();
+ if (test_is_done_) {
+ test_is_done_->Set();
+ }
}
return 0;
}
@@ -636,6 +640,62 @@ TEST_F(AudioDeviceTest, StopPlayoutRequiresInitToRestart) {
EXPECT_FALSE(audio_device()->PlayoutIsInitialized());
}
+// Verify that we can create two ADMs and start playing on the second ADM.
+// Only the first active instance shall activate an audio session and the
+// last active instance shall deactivate the audio session. The test does not
+// explicitly verify correct audio session calls but instead focuses on
+// ensuring that audio starts for both ADMs.
+TEST_F(AudioDeviceTest, StartPlayoutOnTwoInstances) {
+ // Create and initialize a second/extra ADM instance. The default ADM is
+ // created by the test harness.
+ rtc::scoped_refptr<AudioDeviceModule> second_audio_device =
+ CreateAudioDevice(AudioDeviceModule::kPlatformDefaultAudio);
+ EXPECT_NE(second_audio_device.get(), nullptr);
+ EXPECT_EQ(0, second_audio_device->Init());
+
+ // Start playout for the default ADM but don't wait here. Instead use the
+ // upcoming second stream for that. We set the same expectation on number
+ // of callbacks as for the second stream.
+ NiceMock<MockAudioTransport> mock(kPlayout);
+ mock.HandleCallbacks(nullptr, nullptr, 0);
+ EXPECT_CALL(
+ mock, NeedMorePlayData(playout_frames_per_10ms_buffer(), kBytesPerSample,
+ playout_channels(), playout_sample_rate(),
+ NotNull(), _, _, _))
+ .Times(AtLeast(kNumCallbacks));
+ EXPECT_EQ(0, audio_device()->RegisterAudioCallback(&mock));
+ StartPlayout();
+
+ // Initialize playout for the second ADM. If all is OK, the second ADM shall
+ // reuse the audio session activated when the first ADM started playing.
+ // This call will also ensure that we avoid a problem related to initializing
+ // two different audio unit instances back to back (see webrtc:5166 for
+ // details).
+ EXPECT_EQ(0, second_audio_device->InitPlayout());
+ EXPECT_TRUE(second_audio_device->PlayoutIsInitialized());
+
+ // Start playout for the second ADM and verify that it starts as intended.
+ // Passing this test ensures that initialization of the second audio unit
+ // has been done successfully and that there is no conflict with the already
+ // playing first ADM.
+ MockAudioTransport mock2(kPlayout);
+ mock2.HandleCallbacks(test_is_done_.get(), nullptr, kNumCallbacks);
+ EXPECT_CALL(
+ mock2, NeedMorePlayData(playout_frames_per_10ms_buffer(), kBytesPerSample,
+ playout_channels(), playout_sample_rate(),
+ NotNull(), _, _, _))
+ .Times(AtLeast(kNumCallbacks));
+ EXPECT_EQ(0, second_audio_device->RegisterAudioCallback(&mock2));
+ EXPECT_EQ(0, second_audio_device->StartPlayout());
+ EXPECT_TRUE(second_audio_device->Playing());
+ test_is_done_->Wait(kTestTimeOutInMilliseconds);
+ EXPECT_EQ(0, second_audio_device->StopPlayout());
+ EXPECT_FALSE(second_audio_device->Playing());
+ EXPECT_FALSE(second_audio_device->PlayoutIsInitialized());
+
+ EXPECT_EQ(0, second_audio_device->Terminate());
+}
+
// Start playout and verify that the native audio layer starts asking for real
// audio samples to play out using the NeedMorePlayData callback.
TEST_F(AudioDeviceTest, StartPlayoutVerifyCallbacks) {
diff --git a/webrtc/modules/audio_device/linux/audio_device_alsa_linux.cc b/webrtc/modules/audio_device/linux/audio_device_alsa_linux.cc
index 8fa4fdf6f0..bdbccde050 100644
--- a/webrtc/modules/audio_device/linux/audio_device_alsa_linux.cc
+++ b/webrtc/modules/audio_device/linux/audio_device_alsa_linux.cc
@@ -207,7 +207,7 @@ int32_t AudioDeviceLinuxALSA::Terminate()
// RECORDING
if (_ptrThreadRec)
{
- ThreadWrapper* tmpThread = _ptrThreadRec.release();
+ rtc::PlatformThread* tmpThread = _ptrThreadRec.release();
_critSect.Leave();
tmpThread->Stop();
@@ -219,7 +219,7 @@ int32_t AudioDeviceLinuxALSA::Terminate()
// PLAYOUT
if (_ptrThreadPlay)
{
- ThreadWrapper* tmpThread = _ptrThreadPlay.release();
+ rtc::PlatformThread* tmpThread = _ptrThreadPlay.release();
_critSect.Leave();
tmpThread->Stop();
@@ -1363,21 +1363,11 @@ int32_t AudioDeviceLinuxALSA::StartRecording()
return -1;
}
// RECORDING
- const char* threadName = "webrtc_audio_module_capture_thread";
- _ptrThreadRec = ThreadWrapper::CreateThread(
- RecThreadFunc, this, threadName);
+ _ptrThreadRec.reset(new rtc::PlatformThread(
+ RecThreadFunc, this, "webrtc_audio_module_capture_thread"));
- if (!_ptrThreadRec->Start())
- {
- WEBRTC_TRACE(kTraceCritical, kTraceAudioDevice, _id,
- " failed to start the rec audio thread");
- _recording = false;
- _ptrThreadRec.reset();
- delete [] _recordingBuffer;
- _recordingBuffer = NULL;
- return -1;
- }
- _ptrThreadRec->SetPriority(kRealtimePriority);
+ _ptrThreadRec->Start();
+ _ptrThreadRec->SetPriority(rtc::kRealtimePriority);
errVal = LATE(snd_pcm_prepare)(_handleRecord);
if (errVal < 0)
@@ -1517,20 +1507,10 @@ int32_t AudioDeviceLinuxALSA::StartPlayout()
}
// PLAYOUT
- const char* threadName = "webrtc_audio_module_play_thread";
- _ptrThreadPlay = ThreadWrapper::CreateThread(PlayThreadFunc, this,
- threadName);
- if (!_ptrThreadPlay->Start())
- {
- WEBRTC_TRACE(kTraceCritical, kTraceAudioDevice, _id,
- " failed to start the play audio thread");
- _playing = false;
- _ptrThreadPlay.reset();
- delete [] _playoutBuffer;
- _playoutBuffer = NULL;
- return -1;
- }
- _ptrThreadPlay->SetPriority(kRealtimePriority);
+ _ptrThreadPlay.reset(new rtc::PlatformThread(
+ PlayThreadFunc, this, "webrtc_audio_module_play_thread"));
+ _ptrThreadPlay->Start();
+ _ptrThreadPlay->SetPriority(rtc::kRealtimePriority);
int errVal = LATE(snd_pcm_prepare)(_handlePlayout);
if (errVal < 0)
diff --git a/webrtc/modules/audio_device/linux/audio_device_alsa_linux.h b/webrtc/modules/audio_device/linux/audio_device_alsa_linux.h
index e2391a0456..4a1a5191be 100644
--- a/webrtc/modules/audio_device/linux/audio_device_alsa_linux.h
+++ b/webrtc/modules/audio_device/linux/audio_device_alsa_linux.h
@@ -11,10 +11,10 @@
#ifndef WEBRTC_AUDIO_DEVICE_AUDIO_DEVICE_ALSA_LINUX_H
#define WEBRTC_AUDIO_DEVICE_AUDIO_DEVICE_ALSA_LINUX_H
+#include "webrtc/base/platform_thread.h"
#include "webrtc/modules/audio_device/audio_device_generic.h"
#include "webrtc/modules/audio_device/linux/audio_mixer_manager_alsa_linux.h"
#include "webrtc/system_wrappers/include/critical_section_wrapper.h"
-#include "webrtc/system_wrappers/include/thread_wrapper.h"
#if defined(USE_X11)
#include <X11/Xlib.h>
@@ -185,8 +185,10 @@ private:
CriticalSectionWrapper& _critSect;
- rtc::scoped_ptr<ThreadWrapper> _ptrThreadRec;
- rtc::scoped_ptr<ThreadWrapper> _ptrThreadPlay;
+ // TODO(pbos): Make plain members and start/stop instead of resetting these
+ // pointers. A thread can be reused.
+ rtc::scoped_ptr<rtc::PlatformThread> _ptrThreadRec;
+ rtc::scoped_ptr<rtc::PlatformThread> _ptrThreadPlay;
int32_t _id;
diff --git a/webrtc/modules/audio_device/linux/audio_device_pulse_linux.cc b/webrtc/modules/audio_device/linux/audio_device_pulse_linux.cc
index 929a758e40..42c3ea8295 100644
--- a/webrtc/modules/audio_device/linux/audio_device_pulse_linux.cc
+++ b/webrtc/modules/audio_device/linux/audio_device_pulse_linux.cc
@@ -200,33 +200,17 @@ int32_t AudioDeviceLinuxPulse::Init()
}
// RECORDING
- const char* threadName = "webrtc_audio_module_rec_thread";
- _ptrThreadRec = ThreadWrapper::CreateThread(RecThreadFunc, this,
- threadName);
- if (!_ptrThreadRec->Start())
- {
- WEBRTC_TRACE(kTraceCritical, kTraceAudioDevice, _id,
- " failed to start the rec audio thread");
-
- _ptrThreadRec.reset();
- return -1;
- }
+ _ptrThreadRec.reset(new rtc::PlatformThread(
+ RecThreadFunc, this, "webrtc_audio_module_rec_thread"));
- _ptrThreadRec->SetPriority(kRealtimePriority);
+ _ptrThreadRec->Start();
+ _ptrThreadRec->SetPriority(rtc::kRealtimePriority);
// PLAYOUT
- threadName = "webrtc_audio_module_play_thread";
- _ptrThreadPlay = ThreadWrapper::CreateThread(PlayThreadFunc, this,
- threadName);
- if (!_ptrThreadPlay->Start())
- {
- WEBRTC_TRACE(kTraceCritical, kTraceAudioDevice, _id,
- " failed to start the play audio thread");
-
- _ptrThreadPlay.reset();
- return -1;
- }
- _ptrThreadPlay->SetPriority(kRealtimePriority);
+ _ptrThreadPlay.reset(new rtc::PlatformThread(
+ PlayThreadFunc, this, "webrtc_audio_module_play_thread"));
+ _ptrThreadPlay->Start();
+ _ptrThreadPlay->SetPriority(rtc::kRealtimePriority);
_initialized = true;
@@ -246,7 +230,7 @@ int32_t AudioDeviceLinuxPulse::Terminate()
// RECORDING
if (_ptrThreadRec)
{
- ThreadWrapper* tmpThread = _ptrThreadRec.release();
+ rtc::PlatformThread* tmpThread = _ptrThreadRec.release();
_timeEventRec.Set();
tmpThread->Stop();
@@ -256,7 +240,7 @@ int32_t AudioDeviceLinuxPulse::Terminate()
// PLAYOUT
if (_ptrThreadPlay)
{
- ThreadWrapper* tmpThread = _ptrThreadPlay.release();
+ rtc::PlatformThread* tmpThread = _ptrThreadPlay.release();
_timeEventPlay.Set();
tmpThread->Stop();
diff --git a/webrtc/modules/audio_device/linux/audio_device_pulse_linux.h b/webrtc/modules/audio_device/linux/audio_device_pulse_linux.h
index 718331188d..de8df0be1b 100644
--- a/webrtc/modules/audio_device/linux/audio_device_pulse_linux.h
+++ b/webrtc/modules/audio_device/linux/audio_device_pulse_linux.h
@@ -11,11 +11,11 @@
#ifndef WEBRTC_AUDIO_DEVICE_AUDIO_DEVICE_PULSE_LINUX_H
#define WEBRTC_AUDIO_DEVICE_AUDIO_DEVICE_PULSE_LINUX_H
+#include "webrtc/base/platform_thread.h"
+#include "webrtc/base/thread_checker.h"
#include "webrtc/modules/audio_device/audio_device_generic.h"
#include "webrtc/modules/audio_device/linux/audio_mixer_manager_pulse_linux.h"
#include "webrtc/system_wrappers/include/critical_section_wrapper.h"
-#include "webrtc/system_wrappers/include/thread_wrapper.h"
-#include "webrtc/base/thread_checker.h"
#include <X11/Xlib.h>
#include <pulse/pulseaudio.h>
@@ -284,8 +284,9 @@ private:
EventWrapper& _recStartEvent;
EventWrapper& _playStartEvent;
- rtc::scoped_ptr<ThreadWrapper> _ptrThreadPlay;
- rtc::scoped_ptr<ThreadWrapper> _ptrThreadRec;
+ // TODO(pbos): Remove scoped_ptr and use directly without resetting.
+ rtc::scoped_ptr<rtc::PlatformThread> _ptrThreadPlay;
+ rtc::scoped_ptr<rtc::PlatformThread> _ptrThreadRec;
int32_t _id;
AudioMixerManagerLinuxPulse _mixerManager;
diff --git a/webrtc/modules/audio_device/mac/audio_device_mac.cc b/webrtc/modules/audio_device/mac/audio_device_mac.cc
index db98675bf6..0f33d1124d 100644
--- a/webrtc/modules/audio_device/mac/audio_device_mac.cc
+++ b/webrtc/modules/audio_device/mac/audio_device_mac.cc
@@ -10,3227 +10,2758 @@
#include "webrtc/base/arraysize.h"
#include "webrtc/base/checks.h"
+#include "webrtc/base/platform_thread.h"
#include "webrtc/modules/audio_device/audio_device_config.h"
#include "webrtc/modules/audio_device/mac/audio_device_mac.h"
#include "webrtc/modules/audio_device/mac/portaudio/pa_ringbuffer.h"
#include "webrtc/system_wrappers/include/event_wrapper.h"
-#include "webrtc/system_wrappers/include/thread_wrapper.h"
#include "webrtc/system_wrappers/include/trace.h"
#include <ApplicationServices/ApplicationServices.h>
-#include <libkern/OSAtomic.h> // OSAtomicCompareAndSwap()
-#include <mach/mach.h> // mach_task_self()
-#include <sys/sysctl.h> // sysctlbyname()
-
-
-
-namespace webrtc
-{
-
-#define WEBRTC_CA_RETURN_ON_ERR(expr) \
- do { \
- err = expr; \
- if (err != noErr) { \
- logCAMsg(kTraceError, kTraceAudioDevice, _id, \
- "Error in " #expr, (const char *)&err); \
- return -1; \
- } \
- } while(0)
-
-#define WEBRTC_CA_LOG_ERR(expr) \
- do { \
- err = expr; \
- if (err != noErr) { \
- logCAMsg(kTraceError, kTraceAudioDevice, _id, \
- "Error in " #expr, (const char *)&err); \
- } \
- } while(0)
-
-#define WEBRTC_CA_LOG_WARN(expr) \
- do { \
- err = expr; \
- if (err != noErr) { \
- logCAMsg(kTraceWarning, kTraceAudioDevice, _id, \
- "Error in " #expr, (const char *)&err); \
- } \
- } while(0)
-
-enum
-{
- MaxNumberDevices = 64
-};
-
-void AudioDeviceMac::AtomicSet32(int32_t* theValue, int32_t newValue)
-{
- while (1)
- {
- int32_t oldValue = *theValue;
- if (OSAtomicCompareAndSwap32Barrier(oldValue, newValue, theValue)
- == true)
- {
- return;
- }
+#include <libkern/OSAtomic.h> // OSAtomicCompareAndSwap()
+#include <mach/mach.h> // mach_task_self()
+#include <sys/sysctl.h> // sysctlbyname()
+
+namespace webrtc {
+
+#define WEBRTC_CA_RETURN_ON_ERR(expr) \
+ do { \
+ err = expr; \
+ if (err != noErr) { \
+ logCAMsg(kTraceError, kTraceAudioDevice, _id, "Error in " #expr, \
+ (const char*) & err); \
+ return -1; \
+ } \
+ } while (0)
+
+#define WEBRTC_CA_LOG_ERR(expr) \
+ do { \
+ err = expr; \
+ if (err != noErr) { \
+ logCAMsg(kTraceError, kTraceAudioDevice, _id, "Error in " #expr, \
+ (const char*) & err); \
+ } \
+ } while (0)
+
+#define WEBRTC_CA_LOG_WARN(expr) \
+ do { \
+ err = expr; \
+ if (err != noErr) { \
+ logCAMsg(kTraceWarning, kTraceAudioDevice, _id, "Error in " #expr, \
+ (const char*) & err); \
+ } \
+ } while (0)
+
+enum { MaxNumberDevices = 64 };
+
+void AudioDeviceMac::AtomicSet32(int32_t* theValue, int32_t newValue) {
+ while (1) {
+ int32_t oldValue = *theValue;
+ if (OSAtomicCompareAndSwap32Barrier(oldValue, newValue, theValue) == true) {
+ return;
}
+ }
}
-int32_t AudioDeviceMac::AtomicGet32(int32_t* theValue)
-{
- while (1)
- {
- int32_t value = *theValue;
- if (OSAtomicCompareAndSwap32Barrier(value, value, theValue) == true)
- {
- return value;
- }
+int32_t AudioDeviceMac::AtomicGet32(int32_t* theValue) {
+ while (1) {
+ int32_t value = *theValue;
+ if (OSAtomicCompareAndSwap32Barrier(value, value, theValue) == true) {
+ return value;
}
+ }
}
// CoreAudio errors are best interpreted as four character strings.
void AudioDeviceMac::logCAMsg(const TraceLevel level,
const TraceModule module,
- const int32_t id, const char *msg,
- const char *err)
-{
+ const int32_t id,
+ const char* msg,
+ const char* err) {
RTC_DCHECK(msg != NULL);
RTC_DCHECK(err != NULL);
#ifdef WEBRTC_ARCH_BIG_ENDIAN
- WEBRTC_TRACE(level, module, id, "%s: %.4s", msg, err);
+ WEBRTC_TRACE(level, module, id, "%s: %.4s", msg, err);
#else
- // We need to flip the characters in this case.
- WEBRTC_TRACE(level, module, id, "%s: %.1s%.1s%.1s%.1s", msg, err + 3, err
- + 2, err + 1, err);
+ // We need to flip the characters in this case.
+ WEBRTC_TRACE(level, module, id, "%s: %.1s%.1s%.1s%.1s", msg, err + 3, err + 2,
+ err + 1, err);
#endif
}
-AudioDeviceMac::AudioDeviceMac(const int32_t id) :
- _ptrAudioBuffer(NULL),
- _critSect(*CriticalSectionWrapper::CreateCriticalSection()),
- _stopEventRec(*EventWrapper::Create()),
- _stopEvent(*EventWrapper::Create()),
- _id(id),
- _mixerManager(id),
- _inputDeviceIndex(0),
- _outputDeviceIndex(0),
- _inputDeviceID(kAudioObjectUnknown),
- _outputDeviceID(kAudioObjectUnknown),
- _inputDeviceIsSpecified(false),
- _outputDeviceIsSpecified(false),
- _recChannels(N_REC_CHANNELS),
- _playChannels(N_PLAY_CHANNELS),
- _captureBufData(NULL),
- _renderBufData(NULL),
- _playBufType(AudioDeviceModule::kFixedBufferSize),
- _initialized(false),
- _isShutDown(false),
- _recording(false),
- _playing(false),
- _recIsInitialized(false),
- _playIsInitialized(false),
- _AGC(false),
- _renderDeviceIsAlive(1),
- _captureDeviceIsAlive(1),
- _twoDevices(true),
- _doStop(false),
- _doStopRec(false),
- _macBookPro(false),
- _macBookProPanRight(false),
- _captureLatencyUs(0),
- _renderLatencyUs(0),
- _captureDelayUs(0),
- _renderDelayUs(0),
- _renderDelayOffsetSamples(0),
- _playBufDelayFixed(20),
- _playWarning(0),
- _playError(0),
- _recWarning(0),
- _recError(0),
- _paCaptureBuffer(NULL),
- _paRenderBuffer(NULL),
- _captureBufSizeSamples(0),
- _renderBufSizeSamples(0),
- prev_key_state_()
-{
- WEBRTC_TRACE(kTraceMemory, kTraceAudioDevice, id,
- "%s created", __FUNCTION__);
-
- RTC_DCHECK(&_stopEvent != NULL);
- RTC_DCHECK(&_stopEventRec != NULL);
-
- memset(_renderConvertData, 0, sizeof(_renderConvertData));
- memset(&_outStreamFormat, 0, sizeof(AudioStreamBasicDescription));
- memset(&_outDesiredFormat, 0, sizeof(AudioStreamBasicDescription));
- memset(&_inStreamFormat, 0, sizeof(AudioStreamBasicDescription));
- memset(&_inDesiredFormat, 0, sizeof(AudioStreamBasicDescription));
-}
-
-
-AudioDeviceMac::~AudioDeviceMac()
-{
- WEBRTC_TRACE(kTraceMemory, kTraceAudioDevice, _id,
- "%s destroyed", __FUNCTION__);
-
- if (!_isShutDown)
- {
- Terminate();
- }
+AudioDeviceMac::AudioDeviceMac(const int32_t id)
+ : _ptrAudioBuffer(NULL),
+ _critSect(*CriticalSectionWrapper::CreateCriticalSection()),
+ _stopEventRec(*EventWrapper::Create()),
+ _stopEvent(*EventWrapper::Create()),
+ _id(id),
+ _mixerManager(id),
+ _inputDeviceIndex(0),
+ _outputDeviceIndex(0),
+ _inputDeviceID(kAudioObjectUnknown),
+ _outputDeviceID(kAudioObjectUnknown),
+ _inputDeviceIsSpecified(false),
+ _outputDeviceIsSpecified(false),
+ _recChannels(N_REC_CHANNELS),
+ _playChannels(N_PLAY_CHANNELS),
+ _captureBufData(NULL),
+ _renderBufData(NULL),
+ _playBufType(AudioDeviceModule::kFixedBufferSize),
+ _initialized(false),
+ _isShutDown(false),
+ _recording(false),
+ _playing(false),
+ _recIsInitialized(false),
+ _playIsInitialized(false),
+ _AGC(false),
+ _renderDeviceIsAlive(1),
+ _captureDeviceIsAlive(1),
+ _twoDevices(true),
+ _doStop(false),
+ _doStopRec(false),
+ _macBookPro(false),
+ _macBookProPanRight(false),
+ _captureLatencyUs(0),
+ _renderLatencyUs(0),
+ _captureDelayUs(0),
+ _renderDelayUs(0),
+ _renderDelayOffsetSamples(0),
+ _playBufDelayFixed(20),
+ _playWarning(0),
+ _playError(0),
+ _recWarning(0),
+ _recError(0),
+ _paCaptureBuffer(NULL),
+ _paRenderBuffer(NULL),
+ _captureBufSizeSamples(0),
+ _renderBufSizeSamples(0),
+ prev_key_state_(),
+ get_mic_volume_counter_ms_(0) {
+ WEBRTC_TRACE(kTraceMemory, kTraceAudioDevice, id, "%s created", __FUNCTION__);
+
+ RTC_DCHECK(&_stopEvent != NULL);
+ RTC_DCHECK(&_stopEventRec != NULL);
+
+ memset(_renderConvertData, 0, sizeof(_renderConvertData));
+ memset(&_outStreamFormat, 0, sizeof(AudioStreamBasicDescription));
+ memset(&_outDesiredFormat, 0, sizeof(AudioStreamBasicDescription));
+ memset(&_inStreamFormat, 0, sizeof(AudioStreamBasicDescription));
+ memset(&_inDesiredFormat, 0, sizeof(AudioStreamBasicDescription));
+}
+
+AudioDeviceMac::~AudioDeviceMac() {
+ WEBRTC_TRACE(kTraceMemory, kTraceAudioDevice, _id, "%s destroyed",
+ __FUNCTION__);
+
+ if (!_isShutDown) {
+ Terminate();
+ }
- RTC_DCHECK(!capture_worker_thread_.get());
- RTC_DCHECK(!render_worker_thread_.get());
+ RTC_DCHECK(!capture_worker_thread_.get());
+ RTC_DCHECK(!render_worker_thread_.get());
- if (_paRenderBuffer)
- {
- delete _paRenderBuffer;
- _paRenderBuffer = NULL;
- }
+ if (_paRenderBuffer) {
+ delete _paRenderBuffer;
+ _paRenderBuffer = NULL;
+ }
- if (_paCaptureBuffer)
- {
- delete _paCaptureBuffer;
- _paCaptureBuffer = NULL;
- }
+ if (_paCaptureBuffer) {
+ delete _paCaptureBuffer;
+ _paCaptureBuffer = NULL;
+ }
- if (_renderBufData)
- {
- delete[] _renderBufData;
- _renderBufData = NULL;
- }
+ if (_renderBufData) {
+ delete[] _renderBufData;
+ _renderBufData = NULL;
+ }
- if (_captureBufData)
- {
- delete[] _captureBufData;
- _captureBufData = NULL;
- }
+ if (_captureBufData) {
+ delete[] _captureBufData;
+ _captureBufData = NULL;
+ }
- kern_return_t kernErr = KERN_SUCCESS;
- kernErr = semaphore_destroy(mach_task_self(), _renderSemaphore);
- if (kernErr != KERN_SUCCESS)
- {
- WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
- " semaphore_destroy() error: %d", kernErr);
- }
+ kern_return_t kernErr = KERN_SUCCESS;
+ kernErr = semaphore_destroy(mach_task_self(), _renderSemaphore);
+ if (kernErr != KERN_SUCCESS) {
+ WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
+ " semaphore_destroy() error: %d", kernErr);
+ }
- kernErr = semaphore_destroy(mach_task_self(), _captureSemaphore);
- if (kernErr != KERN_SUCCESS)
- {
- WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
- " semaphore_destroy() error: %d", kernErr);
- }
+ kernErr = semaphore_destroy(mach_task_self(), _captureSemaphore);
+ if (kernErr != KERN_SUCCESS) {
+ WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
+ " semaphore_destroy() error: %d", kernErr);
+ }
- delete &_stopEvent;
- delete &_stopEventRec;
- delete &_critSect;
+ delete &_stopEvent;
+ delete &_stopEventRec;
+ delete &_critSect;
}
// ============================================================================
// API
// ============================================================================
-void AudioDeviceMac::AttachAudioBuffer(AudioDeviceBuffer* audioBuffer)
-{
+void AudioDeviceMac::AttachAudioBuffer(AudioDeviceBuffer* audioBuffer) {
+ CriticalSectionScoped lock(&_critSect);
- CriticalSectionScoped lock(&_critSect);
+ _ptrAudioBuffer = audioBuffer;
- _ptrAudioBuffer = audioBuffer;
-
- // inform the AudioBuffer about default settings for this implementation
- _ptrAudioBuffer->SetRecordingSampleRate(N_REC_SAMPLES_PER_SEC);
- _ptrAudioBuffer->SetPlayoutSampleRate(N_PLAY_SAMPLES_PER_SEC);
- _ptrAudioBuffer->SetRecordingChannels(N_REC_CHANNELS);
- _ptrAudioBuffer->SetPlayoutChannels(N_PLAY_CHANNELS);
+ // inform the AudioBuffer about default settings for this implementation
+ _ptrAudioBuffer->SetRecordingSampleRate(N_REC_SAMPLES_PER_SEC);
+ _ptrAudioBuffer->SetPlayoutSampleRate(N_PLAY_SAMPLES_PER_SEC);
+ _ptrAudioBuffer->SetRecordingChannels(N_REC_CHANNELS);
+ _ptrAudioBuffer->SetPlayoutChannels(N_PLAY_CHANNELS);
}
int32_t AudioDeviceMac::ActiveAudioLayer(
- AudioDeviceModule::AudioLayer& audioLayer) const
-{
- audioLayer = AudioDeviceModule::kPlatformDefaultAudio;
- return 0;
+ AudioDeviceModule::AudioLayer& audioLayer) const {
+ audioLayer = AudioDeviceModule::kPlatformDefaultAudio;
+ return 0;
}
-int32_t AudioDeviceMac::Init()
-{
+int32_t AudioDeviceMac::Init() {
+ CriticalSectionScoped lock(&_critSect);
- CriticalSectionScoped lock(&_critSect);
+ if (_initialized) {
+ return 0;
+ }
- if (_initialized)
- {
- return 0;
- }
+ OSStatus err = noErr;
- OSStatus err = noErr;
+ _isShutDown = false;
- _isShutDown = false;
-
- // PortAudio ring buffers require an elementCount which is a power of two.
- if (_renderBufData == NULL)
- {
- UInt32 powerOfTwo = 1;
- while (powerOfTwo < PLAY_BUF_SIZE_IN_SAMPLES)
- {
- powerOfTwo <<= 1;
- }
- _renderBufSizeSamples = powerOfTwo;
- _renderBufData = new SInt16[_renderBufSizeSamples];
+ // PortAudio ring buffers require an elementCount which is a power of two.
+ if (_renderBufData == NULL) {
+ UInt32 powerOfTwo = 1;
+ while (powerOfTwo < PLAY_BUF_SIZE_IN_SAMPLES) {
+ powerOfTwo <<= 1;
}
+ _renderBufSizeSamples = powerOfTwo;
+ _renderBufData = new SInt16[_renderBufSizeSamples];
+ }
- if (_paRenderBuffer == NULL)
- {
- _paRenderBuffer = new PaUtilRingBuffer;
- PaRingBufferSize bufSize = -1;
- bufSize = PaUtil_InitializeRingBuffer(_paRenderBuffer, sizeof(SInt16),
- _renderBufSizeSamples,
- _renderBufData);
- if (bufSize == -1)
- {
- WEBRTC_TRACE(kTraceCritical, kTraceAudioDevice,
- _id, " PaUtil_InitializeRingBuffer() error");
- return -1;
- }
+ if (_paRenderBuffer == NULL) {
+ _paRenderBuffer = new PaUtilRingBuffer;
+ PaRingBufferSize bufSize = -1;
+ bufSize = PaUtil_InitializeRingBuffer(
+ _paRenderBuffer, sizeof(SInt16), _renderBufSizeSamples, _renderBufData);
+ if (bufSize == -1) {
+ WEBRTC_TRACE(kTraceCritical, kTraceAudioDevice, _id,
+ " PaUtil_InitializeRingBuffer() error");
+ return -1;
}
+ }
- if (_captureBufData == NULL)
- {
- UInt32 powerOfTwo = 1;
- while (powerOfTwo < REC_BUF_SIZE_IN_SAMPLES)
- {
- powerOfTwo <<= 1;
- }
- _captureBufSizeSamples = powerOfTwo;
- _captureBufData = new Float32[_captureBufSizeSamples];
+ if (_captureBufData == NULL) {
+ UInt32 powerOfTwo = 1;
+ while (powerOfTwo < REC_BUF_SIZE_IN_SAMPLES) {
+ powerOfTwo <<= 1;
}
+ _captureBufSizeSamples = powerOfTwo;
+ _captureBufData = new Float32[_captureBufSizeSamples];
+ }
- if (_paCaptureBuffer == NULL)
- {
- _paCaptureBuffer = new PaUtilRingBuffer;
- PaRingBufferSize bufSize = -1;
- bufSize = PaUtil_InitializeRingBuffer(_paCaptureBuffer,
- sizeof(Float32),
- _captureBufSizeSamples,
- _captureBufData);
- if (bufSize == -1)
- {
- WEBRTC_TRACE(kTraceCritical, kTraceAudioDevice,
- _id, " PaUtil_InitializeRingBuffer() error");
- return -1;
- }
+ if (_paCaptureBuffer == NULL) {
+ _paCaptureBuffer = new PaUtilRingBuffer;
+ PaRingBufferSize bufSize = -1;
+ bufSize =
+ PaUtil_InitializeRingBuffer(_paCaptureBuffer, sizeof(Float32),
+ _captureBufSizeSamples, _captureBufData);
+ if (bufSize == -1) {
+ WEBRTC_TRACE(kTraceCritical, kTraceAudioDevice, _id,
+ " PaUtil_InitializeRingBuffer() error");
+ return -1;
}
+ }
- kern_return_t kernErr = KERN_SUCCESS;
- kernErr = semaphore_create(mach_task_self(), &_renderSemaphore,
- SYNC_POLICY_FIFO, 0);
- if (kernErr != KERN_SUCCESS)
- {
- WEBRTC_TRACE(kTraceCritical, kTraceAudioDevice, _id,
- " semaphore_create() error: %d", kernErr);
- return -1;
- }
+ kern_return_t kernErr = KERN_SUCCESS;
+ kernErr = semaphore_create(mach_task_self(), &_renderSemaphore,
+ SYNC_POLICY_FIFO, 0);
+ if (kernErr != KERN_SUCCESS) {
+ WEBRTC_TRACE(kTraceCritical, kTraceAudioDevice, _id,
+ " semaphore_create() error: %d", kernErr);
+ return -1;
+ }
- kernErr = semaphore_create(mach_task_self(), &_captureSemaphore,
- SYNC_POLICY_FIFO, 0);
- if (kernErr != KERN_SUCCESS)
- {
- WEBRTC_TRACE(kTraceCritical, kTraceAudioDevice, _id,
- " semaphore_create() error: %d", kernErr);
- return -1;
- }
+ kernErr = semaphore_create(mach_task_self(), &_captureSemaphore,
+ SYNC_POLICY_FIFO, 0);
+ if (kernErr != KERN_SUCCESS) {
+ WEBRTC_TRACE(kTraceCritical, kTraceAudioDevice, _id,
+ " semaphore_create() error: %d", kernErr);
+ return -1;
+ }
- // Setting RunLoop to NULL here instructs HAL to manage its own thread for
- // notifications. This was the default behaviour on OS X 10.5 and earlier,
- // but now must be explicitly specified. HAL would otherwise try to use the
- // main thread to issue notifications.
- AudioObjectPropertyAddress propertyAddress = {
- kAudioHardwarePropertyRunLoop,
- kAudioObjectPropertyScopeGlobal,
- kAudioObjectPropertyElementMaster };
- CFRunLoopRef runLoop = NULL;
- UInt32 size = sizeof(CFRunLoopRef);
- WEBRTC_CA_RETURN_ON_ERR(AudioObjectSetPropertyData(kAudioObjectSystemObject,
- &propertyAddress, 0, NULL, size, &runLoop));
-
- // Listen for any device changes.
- propertyAddress.mSelector = kAudioHardwarePropertyDevices;
- WEBRTC_CA_LOG_ERR(AudioObjectAddPropertyListener(kAudioObjectSystemObject,
- &propertyAddress, &objectListenerProc, this));
-
- // Determine if this is a MacBook Pro
- _macBookPro = false;
- _macBookProPanRight = false;
- char buf[128];
- size_t length = sizeof(buf);
- memset(buf, 0, length);
-
- int intErr = sysctlbyname("hw.model", buf, &length, NULL, 0);
- if (intErr != 0)
- {
- WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
- " Error in sysctlbyname(): %d", err);
- } else
- {
- WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
- " Hardware model: %s", buf);
- if (strncmp(buf, "MacBookPro", 10) == 0)
- {
- _macBookPro = true;
- }
+ // Setting RunLoop to NULL here instructs HAL to manage its own thread for
+ // notifications. This was the default behaviour on OS X 10.5 and earlier,
+ // but now must be explicitly specified. HAL would otherwise try to use the
+ // main thread to issue notifications.
+ AudioObjectPropertyAddress propertyAddress = {
+ kAudioHardwarePropertyRunLoop, kAudioObjectPropertyScopeGlobal,
+ kAudioObjectPropertyElementMaster};
+ CFRunLoopRef runLoop = NULL;
+ UInt32 size = sizeof(CFRunLoopRef);
+ WEBRTC_CA_RETURN_ON_ERR(AudioObjectSetPropertyData(
+ kAudioObjectSystemObject, &propertyAddress, 0, NULL, size, &runLoop));
+
+ // Listen for any device changes.
+ propertyAddress.mSelector = kAudioHardwarePropertyDevices;
+ WEBRTC_CA_LOG_ERR(AudioObjectAddPropertyListener(
+ kAudioObjectSystemObject, &propertyAddress, &objectListenerProc, this));
+
+ // Determine if this is a MacBook Pro
+ _macBookPro = false;
+ _macBookProPanRight = false;
+ char buf[128];
+ size_t length = sizeof(buf);
+ memset(buf, 0, length);
+
+ int intErr = sysctlbyname("hw.model", buf, &length, NULL, 0);
+ if (intErr != 0) {
+ WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
+ " Error in sysctlbyname(): %d", err);
+ } else {
+ WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, " Hardware model: %s",
+ buf);
+ if (strncmp(buf, "MacBookPro", 10) == 0) {
+ _macBookPro = true;
}
+ }
- _playWarning = 0;
- _playError = 0;
- _recWarning = 0;
- _recError = 0;
+ _playWarning = 0;
+ _playError = 0;
+ _recWarning = 0;
+ _recError = 0;
- _initialized = true;
+ get_mic_volume_counter_ms_ = 0;
- return 0;
+ _initialized = true;
+
+ return 0;
}
-int32_t AudioDeviceMac::Terminate()
-{
+int32_t AudioDeviceMac::Terminate() {
+ if (!_initialized) {
+ return 0;
+ }
- if (!_initialized)
- {
- return 0;
- }
+ if (_recording) {
+ WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
+ " Recording must be stopped");
+ return -1;
+ }
- if (_recording)
- {
- WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
- " Recording must be stopped");
- return -1;
- }
+ if (_playing) {
+ WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
+ " Playback must be stopped");
+ return -1;
+ }
- if (_playing)
- {
- WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
- " Playback must be stopped");
- return -1;
- }
+ _critSect.Enter();
- _critSect.Enter();
+ _mixerManager.Close();
- _mixerManager.Close();
+ OSStatus err = noErr;
+ int retVal = 0;
- OSStatus err = noErr;
- int retVal = 0;
+ AudioObjectPropertyAddress propertyAddress = {
+ kAudioHardwarePropertyDevices, kAudioObjectPropertyScopeGlobal,
+ kAudioObjectPropertyElementMaster};
+ WEBRTC_CA_LOG_WARN(AudioObjectRemovePropertyListener(
+ kAudioObjectSystemObject, &propertyAddress, &objectListenerProc, this));
- AudioObjectPropertyAddress propertyAddress = {
- kAudioHardwarePropertyDevices, kAudioObjectPropertyScopeGlobal,
- kAudioObjectPropertyElementMaster };
- WEBRTC_CA_LOG_WARN(AudioObjectRemovePropertyListener(kAudioObjectSystemObject,
- &propertyAddress, &objectListenerProc, this));
-
- err = AudioHardwareUnload();
- if (err != noErr)
- {
- logCAMsg(kTraceError, kTraceAudioDevice, _id,
- "Error in AudioHardwareUnload()", (const char*) &err);
- retVal = -1;
- }
+ err = AudioHardwareUnload();
+ if (err != noErr) {
+ logCAMsg(kTraceError, kTraceAudioDevice, _id,
+ "Error in AudioHardwareUnload()", (const char*)&err);
+ retVal = -1;
+ }
- _isShutDown = true;
- _initialized = false;
- _outputDeviceIsSpecified = false;
- _inputDeviceIsSpecified = false;
+ _isShutDown = true;
+ _initialized = false;
+ _outputDeviceIsSpecified = false;
+ _inputDeviceIsSpecified = false;
- _critSect.Leave();
+ _critSect.Leave();
- return retVal;
+ return retVal;
}
-bool AudioDeviceMac::Initialized() const
-{
- return (_initialized);
+bool AudioDeviceMac::Initialized() const {
+ return (_initialized);
}
-int32_t AudioDeviceMac::SpeakerIsAvailable(bool& available)
-{
-
- bool wasInitialized = _mixerManager.SpeakerIsInitialized();
+int32_t AudioDeviceMac::SpeakerIsAvailable(bool& available) {
+ bool wasInitialized = _mixerManager.SpeakerIsInitialized();
- // Make an attempt to open up the
- // output mixer corresponding to the currently selected output device.
- //
- if (!wasInitialized && InitSpeaker() == -1)
- {
- available = false;
- return 0;
- }
+ // Make an attempt to open up the
+ // output mixer corresponding to the currently selected output device.
+ //
+ if (!wasInitialized && InitSpeaker() == -1) {
+ available = false;
+ return 0;
+ }
- // Given that InitSpeaker was successful, we know that a valid speaker
- // exists.
- available = true;
+ // Given that InitSpeaker was successful, we know that a valid speaker
+ // exists.
+ available = true;
- // Close the initialized output mixer
- //
- if (!wasInitialized)
- {
- _mixerManager.CloseSpeaker();
- }
+ // Close the initialized output mixer
+ //
+ if (!wasInitialized) {
+ _mixerManager.CloseSpeaker();
+ }
- return 0;
+ return 0;
}
-int32_t AudioDeviceMac::InitSpeaker()
-{
+int32_t AudioDeviceMac::InitSpeaker() {
+ CriticalSectionScoped lock(&_critSect);
- CriticalSectionScoped lock(&_critSect);
-
- if (_playing)
- {
- return -1;
- }
+ if (_playing) {
+ return -1;
+ }
- if (InitDevice(_outputDeviceIndex, _outputDeviceID, false) == -1)
- {
- return -1;
- }
+ if (InitDevice(_outputDeviceIndex, _outputDeviceID, false) == -1) {
+ return -1;
+ }
- if (_inputDeviceID == _outputDeviceID)
- {
- _twoDevices = false;
- } else
- {
- _twoDevices = true;
- }
+ if (_inputDeviceID == _outputDeviceID) {
+ _twoDevices = false;
+ } else {
+ _twoDevices = true;
+ }
- if (_mixerManager.OpenSpeaker(_outputDeviceID) == -1)
- {
- return -1;
- }
+ if (_mixerManager.OpenSpeaker(_outputDeviceID) == -1) {
+ return -1;
+ }
- return 0;
+ return 0;
}
-int32_t AudioDeviceMac::MicrophoneIsAvailable(bool& available)
-{
+int32_t AudioDeviceMac::MicrophoneIsAvailable(bool& available) {
+ bool wasInitialized = _mixerManager.MicrophoneIsInitialized();
- bool wasInitialized = _mixerManager.MicrophoneIsInitialized();
-
- // Make an attempt to open up the
- // input mixer corresponding to the currently selected output device.
- //
- if (!wasInitialized && InitMicrophone() == -1)
- {
- available = false;
- return 0;
- }
+ // Make an attempt to open up the
+ // input mixer corresponding to the currently selected output device.
+ //
+ if (!wasInitialized && InitMicrophone() == -1) {
+ available = false;
+ return 0;
+ }
- // Given that InitMicrophone was successful, we know that a valid microphone
- // exists.
- available = true;
+ // Given that InitMicrophone was successful, we know that a valid microphone
+ // exists.
+ available = true;
- // Close the initialized input mixer
- //
- if (!wasInitialized)
- {
- _mixerManager.CloseMicrophone();
- }
+ // Close the initialized input mixer
+ //
+ if (!wasInitialized) {
+ _mixerManager.CloseMicrophone();
+ }
- return 0;
+ return 0;
}
-int32_t AudioDeviceMac::InitMicrophone()
-{
-
- CriticalSectionScoped lock(&_critSect);
+int32_t AudioDeviceMac::InitMicrophone() {
+ CriticalSectionScoped lock(&_critSect);
- if (_recording)
- {
- return -1;
- }
+ if (_recording) {
+ return -1;
+ }
- if (InitDevice(_inputDeviceIndex, _inputDeviceID, true) == -1)
- {
- return -1;
- }
+ if (InitDevice(_inputDeviceIndex, _inputDeviceID, true) == -1) {
+ return -1;
+ }
- if (_inputDeviceID == _outputDeviceID)
- {
- _twoDevices = false;
- } else
- {
- _twoDevices = true;
- }
+ if (_inputDeviceID == _outputDeviceID) {
+ _twoDevices = false;
+ } else {
+ _twoDevices = true;
+ }
- if (_mixerManager.OpenMicrophone(_inputDeviceID) == -1)
- {
- return -1;
- }
+ if (_mixerManager.OpenMicrophone(_inputDeviceID) == -1) {
+ return -1;
+ }
- return 0;
+ return 0;
}
-bool AudioDeviceMac::SpeakerIsInitialized() const
-{
- return (_mixerManager.SpeakerIsInitialized());
+bool AudioDeviceMac::SpeakerIsInitialized() const {
+ return (_mixerManager.SpeakerIsInitialized());
}
-bool AudioDeviceMac::MicrophoneIsInitialized() const
-{
- return (_mixerManager.MicrophoneIsInitialized());
+bool AudioDeviceMac::MicrophoneIsInitialized() const {
+ return (_mixerManager.MicrophoneIsInitialized());
}
-int32_t AudioDeviceMac::SpeakerVolumeIsAvailable(bool& available)
-{
+int32_t AudioDeviceMac::SpeakerVolumeIsAvailable(bool& available) {
+ bool wasInitialized = _mixerManager.SpeakerIsInitialized();
- bool wasInitialized = _mixerManager.SpeakerIsInitialized();
-
- // Make an attempt to open up the
- // output mixer corresponding to the currently selected output device.
- //
- if (!wasInitialized && InitSpeaker() == -1)
- {
- // If we end up here it means that the selected speaker has no volume
- // control.
- available = false;
- return 0;
- }
+ // Make an attempt to open up the
+ // output mixer corresponding to the currently selected output device.
+ //
+ if (!wasInitialized && InitSpeaker() == -1) {
+ // If we end up here it means that the selected speaker has no volume
+ // control.
+ available = false;
+ return 0;
+ }
- // Given that InitSpeaker was successful, we know that a volume control exists
- //
- available = true;
+ // Given that InitSpeaker was successful, we know that a volume control exists
+ //
+ available = true;
- // Close the initialized output mixer
- //
- if (!wasInitialized)
- {
- _mixerManager.CloseSpeaker();
- }
+ // Close the initialized output mixer
+ //
+ if (!wasInitialized) {
+ _mixerManager.CloseSpeaker();
+ }
- return 0;
+ return 0;
}
-int32_t AudioDeviceMac::SetSpeakerVolume(uint32_t volume)
-{
-
- return (_mixerManager.SetSpeakerVolume(volume));
+int32_t AudioDeviceMac::SetSpeakerVolume(uint32_t volume) {
+ return (_mixerManager.SetSpeakerVolume(volume));
}
-int32_t AudioDeviceMac::SpeakerVolume(uint32_t& volume) const
-{
+int32_t AudioDeviceMac::SpeakerVolume(uint32_t& volume) const {
+ uint32_t level(0);
- uint32_t level(0);
-
- if (_mixerManager.SpeakerVolume(level) == -1)
- {
- return -1;
- }
+ if (_mixerManager.SpeakerVolume(level) == -1) {
+ return -1;
+ }
- volume = level;
- return 0;
+ volume = level;
+ return 0;
}
int32_t AudioDeviceMac::SetWaveOutVolume(uint16_t volumeLeft,
- uint16_t volumeRight)
-{
-
- WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
- " API call not supported on this platform");
- return -1;
+ uint16_t volumeRight) {
+ WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
+ " API call not supported on this platform");
+ return -1;
}
-int32_t
-AudioDeviceMac::WaveOutVolume(uint16_t& /*volumeLeft*/,
- uint16_t& /*volumeRight*/) const
-{
-
- WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
- " API call not supported on this platform");
- return -1;
+int32_t AudioDeviceMac::WaveOutVolume(uint16_t& /*volumeLeft*/,
+ uint16_t& /*volumeRight*/) const {
+ WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
+ " API call not supported on this platform");
+ return -1;
}
-int32_t AudioDeviceMac::MaxSpeakerVolume(uint32_t& maxVolume) const
-{
+int32_t AudioDeviceMac::MaxSpeakerVolume(uint32_t& maxVolume) const {
+ uint32_t maxVol(0);
- uint32_t maxVol(0);
-
- if (_mixerManager.MaxSpeakerVolume(maxVol) == -1)
- {
- return -1;
- }
+ if (_mixerManager.MaxSpeakerVolume(maxVol) == -1) {
+ return -1;
+ }
- maxVolume = maxVol;
- return 0;
+ maxVolume = maxVol;
+ return 0;
}
-int32_t AudioDeviceMac::MinSpeakerVolume(uint32_t& minVolume) const
-{
-
- uint32_t minVol(0);
+int32_t AudioDeviceMac::MinSpeakerVolume(uint32_t& minVolume) const {
+ uint32_t minVol(0);
- if (_mixerManager.MinSpeakerVolume(minVol) == -1)
- {
- return -1;
- }
+ if (_mixerManager.MinSpeakerVolume(minVol) == -1) {
+ return -1;
+ }
- minVolume = minVol;
- return 0;
+ minVolume = minVol;
+ return 0;
}
-int32_t
-AudioDeviceMac::SpeakerVolumeStepSize(uint16_t& stepSize) const
-{
-
- uint16_t delta(0);
+int32_t AudioDeviceMac::SpeakerVolumeStepSize(uint16_t& stepSize) const {
+ uint16_t delta(0);
- if (_mixerManager.SpeakerVolumeStepSize(delta) == -1)
- {
- return -1;
- }
+ if (_mixerManager.SpeakerVolumeStepSize(delta) == -1) {
+ return -1;
+ }
- stepSize = delta;
- return 0;
+ stepSize = delta;
+ return 0;
}
-int32_t AudioDeviceMac::SpeakerMuteIsAvailable(bool& available)
-{
-
- bool isAvailable(false);
- bool wasInitialized = _mixerManager.SpeakerIsInitialized();
+int32_t AudioDeviceMac::SpeakerMuteIsAvailable(bool& available) {
+ bool isAvailable(false);
+ bool wasInitialized = _mixerManager.SpeakerIsInitialized();
- // Make an attempt to open up the
- // output mixer corresponding to the currently selected output device.
- //
- if (!wasInitialized && InitSpeaker() == -1)
- {
- // If we end up here it means that the selected speaker has no volume
- // control, hence it is safe to state that there is no mute control
- // already at this stage.
- available = false;
- return 0;
- }
+ // Make an attempt to open up the
+ // output mixer corresponding to the currently selected output device.
+ //
+ if (!wasInitialized && InitSpeaker() == -1) {
+ // If we end up here it means that the selected speaker has no volume
+ // control, hence it is safe to state that there is no mute control
+ // already at this stage.
+ available = false;
+ return 0;
+ }
- // Check if the selected speaker has a mute control
- //
- _mixerManager.SpeakerMuteIsAvailable(isAvailable);
+ // Check if the selected speaker has a mute control
+ //
+ _mixerManager.SpeakerMuteIsAvailable(isAvailable);
- available = isAvailable;
+ available = isAvailable;
- // Close the initialized output mixer
- //
- if (!wasInitialized)
- {
- _mixerManager.CloseSpeaker();
- }
+ // Close the initialized output mixer
+ //
+ if (!wasInitialized) {
+ _mixerManager.CloseSpeaker();
+ }
- return 0;
+ return 0;
}
-int32_t AudioDeviceMac::SetSpeakerMute(bool enable)
-{
- return (_mixerManager.SetSpeakerMute(enable));
+int32_t AudioDeviceMac::SetSpeakerMute(bool enable) {
+ return (_mixerManager.SetSpeakerMute(enable));
}
-int32_t AudioDeviceMac::SpeakerMute(bool& enabled) const
-{
+int32_t AudioDeviceMac::SpeakerMute(bool& enabled) const {
+ bool muted(0);
- bool muted(0);
-
- if (_mixerManager.SpeakerMute(muted) == -1)
- {
- return -1;
- }
+ if (_mixerManager.SpeakerMute(muted) == -1) {
+ return -1;
+ }
- enabled = muted;
- return 0;
+ enabled = muted;
+ return 0;
}
-int32_t AudioDeviceMac::MicrophoneMuteIsAvailable(bool& available)
-{
+int32_t AudioDeviceMac::MicrophoneMuteIsAvailable(bool& available) {
+ bool isAvailable(false);
+ bool wasInitialized = _mixerManager.MicrophoneIsInitialized();
- bool isAvailable(false);
- bool wasInitialized = _mixerManager.MicrophoneIsInitialized();
-
- // Make an attempt to open up the
- // input mixer corresponding to the currently selected input device.
- //
- if (!wasInitialized && InitMicrophone() == -1)
- {
- // If we end up here it means that the selected microphone has no volume
- // control, hence it is safe to state that there is no boost control
- // already at this stage.
- available = false;
- return 0;
- }
+ // Make an attempt to open up the
+ // input mixer corresponding to the currently selected input device.
+ //
+ if (!wasInitialized && InitMicrophone() == -1) {
+ // If we end up here it means that the selected microphone has no volume
+ // control, hence it is safe to state that there is no boost control
+ // already at this stage.
+ available = false;
+ return 0;
+ }
- // Check if the selected microphone has a mute control
- //
- _mixerManager.MicrophoneMuteIsAvailable(isAvailable);
- available = isAvailable;
+ // Check if the selected microphone has a mute control
+ //
+ _mixerManager.MicrophoneMuteIsAvailable(isAvailable);
+ available = isAvailable;
- // Close the initialized input mixer
- //
- if (!wasInitialized)
- {
- _mixerManager.CloseMicrophone();
- }
+ // Close the initialized input mixer
+ //
+ if (!wasInitialized) {
+ _mixerManager.CloseMicrophone();
+ }
- return 0;
+ return 0;
}
-int32_t AudioDeviceMac::SetMicrophoneMute(bool enable)
-{
- return (_mixerManager.SetMicrophoneMute(enable));
+int32_t AudioDeviceMac::SetMicrophoneMute(bool enable) {
+ return (_mixerManager.SetMicrophoneMute(enable));
}
-int32_t AudioDeviceMac::MicrophoneMute(bool& enabled) const
-{
+int32_t AudioDeviceMac::MicrophoneMute(bool& enabled) const {
+ bool muted(0);
- bool muted(0);
-
- if (_mixerManager.MicrophoneMute(muted) == -1)
- {
- return -1;
- }
+ if (_mixerManager.MicrophoneMute(muted) == -1) {
+ return -1;
+ }
- enabled = muted;
- return 0;
+ enabled = muted;
+ return 0;
}
-int32_t AudioDeviceMac::MicrophoneBoostIsAvailable(bool& available)
-{
-
- bool isAvailable(false);
- bool wasInitialized = _mixerManager.MicrophoneIsInitialized();
+int32_t AudioDeviceMac::MicrophoneBoostIsAvailable(bool& available) {
+ bool isAvailable(false);
+ bool wasInitialized = _mixerManager.MicrophoneIsInitialized();
- // Enumerate all avaliable microphone and make an attempt to open up the
- // input mixer corresponding to the currently selected input device.
- //
- if (!wasInitialized && InitMicrophone() == -1)
- {
- // If we end up here it means that the selected microphone has no volume
- // control, hence it is safe to state that there is no boost control
- // already at this stage.
- available = false;
- return 0;
- }
+ // Enumerate all avaliable microphone and make an attempt to open up the
+ // input mixer corresponding to the currently selected input device.
+ //
+ if (!wasInitialized && InitMicrophone() == -1) {
+ // If we end up here it means that the selected microphone has no volume
+ // control, hence it is safe to state that there is no boost control
+ // already at this stage.
+ available = false;
+ return 0;
+ }
- // Check if the selected microphone has a boost control
- //
- _mixerManager.MicrophoneBoostIsAvailable(isAvailable);
- available = isAvailable;
+ // Check if the selected microphone has a boost control
+ //
+ _mixerManager.MicrophoneBoostIsAvailable(isAvailable);
+ available = isAvailable;
- // Close the initialized input mixer
- //
- if (!wasInitialized)
- {
- _mixerManager.CloseMicrophone();
- }
+ // Close the initialized input mixer
+ //
+ if (!wasInitialized) {
+ _mixerManager.CloseMicrophone();
+ }
- return 0;
+ return 0;
}
-int32_t AudioDeviceMac::SetMicrophoneBoost(bool enable)
-{
-
- return (_mixerManager.SetMicrophoneBoost(enable));
+int32_t AudioDeviceMac::SetMicrophoneBoost(bool enable) {
+ return (_mixerManager.SetMicrophoneBoost(enable));
}
-int32_t AudioDeviceMac::MicrophoneBoost(bool& enabled) const
-{
-
- bool onOff(0);
+int32_t AudioDeviceMac::MicrophoneBoost(bool& enabled) const {
+ bool onOff(0);
- if (_mixerManager.MicrophoneBoost(onOff) == -1)
- {
- return -1;
- }
+ if (_mixerManager.MicrophoneBoost(onOff) == -1) {
+ return -1;
+ }
- enabled = onOff;
- return 0;
+ enabled = onOff;
+ return 0;
}
-int32_t AudioDeviceMac::StereoRecordingIsAvailable(bool& available)
-{
-
- bool isAvailable(false);
- bool wasInitialized = _mixerManager.MicrophoneIsInitialized();
-
- if (!wasInitialized && InitMicrophone() == -1)
- {
- // Cannot open the specified device
- available = false;
- return 0;
- }
-
- // Check if the selected microphone can record stereo
- //
- _mixerManager.StereoRecordingIsAvailable(isAvailable);
- available = isAvailable;
-
- // Close the initialized input mixer
- //
- if (!wasInitialized)
- {
- _mixerManager.CloseMicrophone();
- }
+int32_t AudioDeviceMac::StereoRecordingIsAvailable(bool& available) {
+ bool isAvailable(false);
+ bool wasInitialized = _mixerManager.MicrophoneIsInitialized();
+ if (!wasInitialized && InitMicrophone() == -1) {
+ // Cannot open the specified device
+ available = false;
return 0;
-}
+ }
-int32_t AudioDeviceMac::SetStereoRecording(bool enable)
-{
+ // Check if the selected microphone can record stereo
+ //
+ _mixerManager.StereoRecordingIsAvailable(isAvailable);
+ available = isAvailable;
- if (enable)
- _recChannels = 2;
- else
- _recChannels = 1;
+ // Close the initialized input mixer
+ //
+ if (!wasInitialized) {
+ _mixerManager.CloseMicrophone();
+ }
- return 0;
+ return 0;
}
-int32_t AudioDeviceMac::StereoRecording(bool& enabled) const
-{
+int32_t AudioDeviceMac::SetStereoRecording(bool enable) {
+ if (enable)
+ _recChannels = 2;
+ else
+ _recChannels = 1;
- if (_recChannels == 2)
- enabled = true;
- else
- enabled = false;
-
- return 0;
+ return 0;
}
-int32_t AudioDeviceMac::StereoPlayoutIsAvailable(bool& available)
-{
-
- bool isAvailable(false);
- bool wasInitialized = _mixerManager.SpeakerIsInitialized();
+int32_t AudioDeviceMac::StereoRecording(bool& enabled) const {
+ if (_recChannels == 2)
+ enabled = true;
+ else
+ enabled = false;
- if (!wasInitialized && InitSpeaker() == -1)
- {
- // Cannot open the specified device
- available = false;
- return 0;
- }
-
- // Check if the selected microphone can record stereo
- //
- _mixerManager.StereoPlayoutIsAvailable(isAvailable);
- available = isAvailable;
+ return 0;
+}
- // Close the initialized input mixer
- //
- if (!wasInitialized)
- {
- _mixerManager.CloseSpeaker();
- }
+int32_t AudioDeviceMac::StereoPlayoutIsAvailable(bool& available) {
+ bool isAvailable(false);
+ bool wasInitialized = _mixerManager.SpeakerIsInitialized();
+ if (!wasInitialized && InitSpeaker() == -1) {
+ // Cannot open the specified device
+ available = false;
return 0;
-}
+ }
-int32_t AudioDeviceMac::SetStereoPlayout(bool enable)
-{
+ // Check if the selected microphone can record stereo
+ //
+ _mixerManager.StereoPlayoutIsAvailable(isAvailable);
+ available = isAvailable;
- if (enable)
- _playChannels = 2;
- else
- _playChannels = 1;
+ // Close the initialized input mixer
+ //
+ if (!wasInitialized) {
+ _mixerManager.CloseSpeaker();
+ }
- return 0;
+ return 0;
}
-int32_t AudioDeviceMac::StereoPlayout(bool& enabled) const
-{
-
- if (_playChannels == 2)
- enabled = true;
- else
- enabled = false;
+int32_t AudioDeviceMac::SetStereoPlayout(bool enable) {
+ if (enable)
+ _playChannels = 2;
+ else
+ _playChannels = 1;
- return 0;
+ return 0;
}
-int32_t AudioDeviceMac::SetAGC(bool enable)
-{
-
- _AGC = enable;
+int32_t AudioDeviceMac::StereoPlayout(bool& enabled) const {
+ if (_playChannels == 2)
+ enabled = true;
+ else
+ enabled = false;
- return 0;
+ return 0;
}
-bool AudioDeviceMac::AGC() const
-{
+int32_t AudioDeviceMac::SetAGC(bool enable) {
+ _AGC = enable;
- return _AGC;
+ return 0;
}
-int32_t AudioDeviceMac::MicrophoneVolumeIsAvailable(bool& available)
-{
+bool AudioDeviceMac::AGC() const {
+ return _AGC;
+}
- bool wasInitialized = _mixerManager.MicrophoneIsInitialized();
+int32_t AudioDeviceMac::MicrophoneVolumeIsAvailable(bool& available) {
+ bool wasInitialized = _mixerManager.MicrophoneIsInitialized();
- // Make an attempt to open up the
- // input mixer corresponding to the currently selected output device.
- //
- if (!wasInitialized && InitMicrophone() == -1)
- {
- // If we end up here it means that the selected microphone has no volume
- // control.
- available = false;
- return 0;
- }
+ // Make an attempt to open up the
+ // input mixer corresponding to the currently selected output device.
+ //
+ if (!wasInitialized && InitMicrophone() == -1) {
+ // If we end up here it means that the selected microphone has no volume
+ // control.
+ available = false;
+ return 0;
+ }
- // Given that InitMicrophone was successful, we know that a volume control
- // exists
- //
- available = true;
+ // Given that InitMicrophone was successful, we know that a volume control
+ // exists
+ //
+ available = true;
- // Close the initialized input mixer
- //
- if (!wasInitialized)
- {
- _mixerManager.CloseMicrophone();
- }
+ // Close the initialized input mixer
+ //
+ if (!wasInitialized) {
+ _mixerManager.CloseMicrophone();
+ }
- return 0;
+ return 0;
}
-int32_t AudioDeviceMac::SetMicrophoneVolume(uint32_t volume)
-{
-
- return (_mixerManager.SetMicrophoneVolume(volume));
+int32_t AudioDeviceMac::SetMicrophoneVolume(uint32_t volume) {
+ return (_mixerManager.SetMicrophoneVolume(volume));
}
-int32_t AudioDeviceMac::MicrophoneVolume(uint32_t& volume) const
-{
+int32_t AudioDeviceMac::MicrophoneVolume(uint32_t& volume) const {
+ uint32_t level(0);
- uint32_t level(0);
-
- if (_mixerManager.MicrophoneVolume(level) == -1)
- {
- WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
- " failed to retrive current microphone level");
- return -1;
- }
+ if (_mixerManager.MicrophoneVolume(level) == -1) {
+ WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
+ " failed to retrive current microphone level");
+ return -1;
+ }
- volume = level;
- return 0;
+ volume = level;
+ return 0;
}
-int32_t
-AudioDeviceMac::MaxMicrophoneVolume(uint32_t& maxVolume) const
-{
-
- uint32_t maxVol(0);
+int32_t AudioDeviceMac::MaxMicrophoneVolume(uint32_t& maxVolume) const {
+ uint32_t maxVol(0);
- if (_mixerManager.MaxMicrophoneVolume(maxVol) == -1)
- {
- return -1;
- }
+ if (_mixerManager.MaxMicrophoneVolume(maxVol) == -1) {
+ return -1;
+ }
- maxVolume = maxVol;
- return 0;
+ maxVolume = maxVol;
+ return 0;
}
-int32_t
-AudioDeviceMac::MinMicrophoneVolume(uint32_t& minVolume) const
-{
-
- uint32_t minVol(0);
+int32_t AudioDeviceMac::MinMicrophoneVolume(uint32_t& minVolume) const {
+ uint32_t minVol(0);
- if (_mixerManager.MinMicrophoneVolume(minVol) == -1)
- {
- return -1;
- }
+ if (_mixerManager.MinMicrophoneVolume(minVol) == -1) {
+ return -1;
+ }
- minVolume = minVol;
- return 0;
+ minVolume = minVol;
+ return 0;
}
-int32_t
-AudioDeviceMac::MicrophoneVolumeStepSize(uint16_t& stepSize) const
-{
-
- uint16_t delta(0);
+int32_t AudioDeviceMac::MicrophoneVolumeStepSize(uint16_t& stepSize) const {
+ uint16_t delta(0);
- if (_mixerManager.MicrophoneVolumeStepSize(delta) == -1)
- {
- return -1;
- }
+ if (_mixerManager.MicrophoneVolumeStepSize(delta) == -1) {
+ return -1;
+ }
- stepSize = delta;
- return 0;
+ stepSize = delta;
+ return 0;
}
-int16_t AudioDeviceMac::PlayoutDevices()
-{
-
- AudioDeviceID playDevices[MaxNumberDevices];
- return GetNumberDevices(kAudioDevicePropertyScopeOutput, playDevices,
- MaxNumberDevices);
+int16_t AudioDeviceMac::PlayoutDevices() {
+ AudioDeviceID playDevices[MaxNumberDevices];
+ return GetNumberDevices(kAudioDevicePropertyScopeOutput, playDevices,
+ MaxNumberDevices);
}
-int32_t AudioDeviceMac::SetPlayoutDevice(uint16_t index)
-{
- CriticalSectionScoped lock(&_critSect);
+int32_t AudioDeviceMac::SetPlayoutDevice(uint16_t index) {
+ CriticalSectionScoped lock(&_critSect);
- if (_playIsInitialized)
- {
- return -1;
- }
+ if (_playIsInitialized) {
+ return -1;
+ }
- AudioDeviceID playDevices[MaxNumberDevices];
- uint32_t nDevices = GetNumberDevices(kAudioDevicePropertyScopeOutput,
- playDevices, MaxNumberDevices);
- WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
- " number of availiable waveform-audio output devices is %u",
- nDevices);
-
- if (index > (nDevices - 1))
- {
- WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
- " device index is out of range [0,%u]", (nDevices - 1));
- return -1;
- }
+ AudioDeviceID playDevices[MaxNumberDevices];
+ uint32_t nDevices = GetNumberDevices(kAudioDevicePropertyScopeOutput,
+ playDevices, MaxNumberDevices);
+ WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
+ " number of availiable waveform-audio output devices is %u",
+ nDevices);
- _outputDeviceIndex = index;
- _outputDeviceIsSpecified = true;
+ if (index > (nDevices - 1)) {
+ WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
+ " device index is out of range [0,%u]", (nDevices - 1));
+ return -1;
+ }
- return 0;
+ _outputDeviceIndex = index;
+ _outputDeviceIsSpecified = true;
+
+ return 0;
}
int32_t AudioDeviceMac::SetPlayoutDevice(
- AudioDeviceModule::WindowsDeviceType /*device*/)
-{
- WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
- "WindowsDeviceType not supported");
- return -1;
+ AudioDeviceModule::WindowsDeviceType /*device*/) {
+ WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
+ "WindowsDeviceType not supported");
+ return -1;
}
-int32_t AudioDeviceMac::PlayoutDeviceName(
- uint16_t index,
- char name[kAdmMaxDeviceNameSize],
- char guid[kAdmMaxGuidSize])
-{
+int32_t AudioDeviceMac::PlayoutDeviceName(uint16_t index,
+ char name[kAdmMaxDeviceNameSize],
+ char guid[kAdmMaxGuidSize]) {
+ const uint16_t nDevices(PlayoutDevices());
- const uint16_t nDevices(PlayoutDevices());
-
- if ((index > (nDevices - 1)) || (name == NULL))
- {
- return -1;
- }
+ if ((index > (nDevices - 1)) || (name == NULL)) {
+ return -1;
+ }
- memset(name, 0, kAdmMaxDeviceNameSize);
+ memset(name, 0, kAdmMaxDeviceNameSize);
- if (guid != NULL)
- {
- memset(guid, 0, kAdmMaxGuidSize);
- }
+ if (guid != NULL) {
+ memset(guid, 0, kAdmMaxGuidSize);
+ }
- return GetDeviceName(kAudioDevicePropertyScopeOutput, index, name);
+ return GetDeviceName(kAudioDevicePropertyScopeOutput, index, name);
}
-int32_t AudioDeviceMac::RecordingDeviceName(
- uint16_t index,
- char name[kAdmMaxDeviceNameSize],
- char guid[kAdmMaxGuidSize])
-{
-
- const uint16_t nDevices(RecordingDevices());
+int32_t AudioDeviceMac::RecordingDeviceName(uint16_t index,
+ char name[kAdmMaxDeviceNameSize],
+ char guid[kAdmMaxGuidSize]) {
+ const uint16_t nDevices(RecordingDevices());
- if ((index > (nDevices - 1)) || (name == NULL))
- {
- return -1;
- }
+ if ((index > (nDevices - 1)) || (name == NULL)) {
+ return -1;
+ }
- memset(name, 0, kAdmMaxDeviceNameSize);
+ memset(name, 0, kAdmMaxDeviceNameSize);
- if (guid != NULL)
- {
- memset(guid, 0, kAdmMaxGuidSize);
- }
+ if (guid != NULL) {
+ memset(guid, 0, kAdmMaxGuidSize);
+ }
- return GetDeviceName(kAudioDevicePropertyScopeInput, index, name);
+ return GetDeviceName(kAudioDevicePropertyScopeInput, index, name);
}
-int16_t AudioDeviceMac::RecordingDevices()
-{
-
- AudioDeviceID recDevices[MaxNumberDevices];
- return GetNumberDevices(kAudioDevicePropertyScopeInput, recDevices,
- MaxNumberDevices);
+int16_t AudioDeviceMac::RecordingDevices() {
+ AudioDeviceID recDevices[MaxNumberDevices];
+ return GetNumberDevices(kAudioDevicePropertyScopeInput, recDevices,
+ MaxNumberDevices);
}
-int32_t AudioDeviceMac::SetRecordingDevice(uint16_t index)
-{
+int32_t AudioDeviceMac::SetRecordingDevice(uint16_t index) {
+ if (_recIsInitialized) {
+ return -1;
+ }
- if (_recIsInitialized)
- {
- return -1;
- }
+ AudioDeviceID recDevices[MaxNumberDevices];
+ uint32_t nDevices = GetNumberDevices(kAudioDevicePropertyScopeInput,
+ recDevices, MaxNumberDevices);
+ WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
+ " number of availiable waveform-audio input devices is %u",
+ nDevices);
- AudioDeviceID recDevices[MaxNumberDevices];
- uint32_t nDevices = GetNumberDevices(kAudioDevicePropertyScopeInput,
- recDevices, MaxNumberDevices);
- WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
- " number of availiable waveform-audio input devices is %u",
- nDevices);
-
- if (index > (nDevices - 1))
- {
- WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
- " device index is out of range [0,%u]", (nDevices - 1));
- return -1;
- }
+ if (index > (nDevices - 1)) {
+ WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
+ " device index is out of range [0,%u]", (nDevices - 1));
+ return -1;
+ }
- _inputDeviceIndex = index;
- _inputDeviceIsSpecified = true;
+ _inputDeviceIndex = index;
+ _inputDeviceIsSpecified = true;
- return 0;
+ return 0;
}
-
-int32_t
-AudioDeviceMac::SetRecordingDevice(AudioDeviceModule::WindowsDeviceType /*device*/)
-{
- WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
- "WindowsDeviceType not supported");
- return -1;
+int32_t AudioDeviceMac::SetRecordingDevice(
+ AudioDeviceModule::WindowsDeviceType /*device*/) {
+ WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
+ "WindowsDeviceType not supported");
+ return -1;
}
-int32_t AudioDeviceMac::PlayoutIsAvailable(bool& available)
-{
-
- available = true;
+int32_t AudioDeviceMac::PlayoutIsAvailable(bool& available) {
+ available = true;
- // Try to initialize the playout side
- if (InitPlayout() == -1)
- {
- available = false;
- }
+ // Try to initialize the playout side
+ if (InitPlayout() == -1) {
+ available = false;
+ }
- // We destroy the IOProc created by InitPlayout() in implDeviceIOProc().
- // We must actually start playout here in order to have the IOProc
- // deleted by calling StopPlayout().
- if (StartPlayout() == -1)
- {
- available = false;
- }
+ // We destroy the IOProc created by InitPlayout() in implDeviceIOProc().
+ // We must actually start playout here in order to have the IOProc
+ // deleted by calling StopPlayout().
+ if (StartPlayout() == -1) {
+ available = false;
+ }
- // Cancel effect of initialization
- if (StopPlayout() == -1)
- {
- available = false;
- }
+ // Cancel effect of initialization
+ if (StopPlayout() == -1) {
+ available = false;
+ }
- return 0;
+ return 0;
}
-int32_t AudioDeviceMac::RecordingIsAvailable(bool& available)
-{
-
- available = true;
+int32_t AudioDeviceMac::RecordingIsAvailable(bool& available) {
+ available = true;
- // Try to initialize the recording side
- if (InitRecording() == -1)
- {
- available = false;
- }
+ // Try to initialize the recording side
+ if (InitRecording() == -1) {
+ available = false;
+ }
- // We destroy the IOProc created by InitRecording() in implInDeviceIOProc().
- // We must actually start recording here in order to have the IOProc
- // deleted by calling StopRecording().
- if (StartRecording() == -1)
- {
- available = false;
- }
+ // We destroy the IOProc created by InitRecording() in implInDeviceIOProc().
+ // We must actually start recording here in order to have the IOProc
+ // deleted by calling StopRecording().
+ if (StartRecording() == -1) {
+ available = false;
+ }
- // Cancel effect of initialization
- if (StopRecording() == -1)
- {
- available = false;
- }
+ // Cancel effect of initialization
+ if (StopRecording() == -1) {
+ available = false;
+ }
- return 0;
+ return 0;
}
-int32_t AudioDeviceMac::InitPlayout()
-{
- CriticalSectionScoped lock(&_critSect);
+int32_t AudioDeviceMac::InitPlayout() {
+ CriticalSectionScoped lock(&_critSect);
- if (_playing)
- {
- return -1;
- }
+ if (_playing) {
+ return -1;
+ }
- if (!_outputDeviceIsSpecified)
- {
- return -1;
- }
+ if (!_outputDeviceIsSpecified) {
+ return -1;
+ }
- if (_playIsInitialized)
- {
- return 0;
- }
+ if (_playIsInitialized) {
+ return 0;
+ }
- // Initialize the speaker (devices might have been added or removed)
- if (InitSpeaker() == -1)
- {
- WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
- " InitSpeaker() failed");
- }
+ // Initialize the speaker (devices might have been added or removed)
+ if (InitSpeaker() == -1) {
+ WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
+ " InitSpeaker() failed");
+ }
- if (!MicrophoneIsInitialized())
- {
- // Make this call to check if we are using
- // one or two devices (_twoDevices)
- bool available = false;
- if (MicrophoneIsAvailable(available) == -1)
- {
- WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
- " MicrophoneIsAvailable() failed");
- }
+ if (!MicrophoneIsInitialized()) {
+ // Make this call to check if we are using
+ // one or two devices (_twoDevices)
+ bool available = false;
+ if (MicrophoneIsAvailable(available) == -1) {
+ WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
+ " MicrophoneIsAvailable() failed");
}
+ }
- PaUtil_FlushRingBuffer(_paRenderBuffer);
-
- OSStatus err = noErr;
- UInt32 size = 0;
- _renderDelayOffsetSamples = 0;
- _renderDelayUs = 0;
- _renderLatencyUs = 0;
- _renderDeviceIsAlive = 1;
- _doStop = false;
+ PaUtil_FlushRingBuffer(_paRenderBuffer);
+
+ OSStatus err = noErr;
+ UInt32 size = 0;
+ _renderDelayOffsetSamples = 0;
+ _renderDelayUs = 0;
+ _renderLatencyUs = 0;
+ _renderDeviceIsAlive = 1;
+ _doStop = false;
+
+ // The internal microphone of a MacBook Pro is located under the left speaker
+ // grille. When the internal speakers are in use, we want to fully stereo
+ // pan to the right.
+ AudioObjectPropertyAddress propertyAddress = {
+ kAudioDevicePropertyDataSource, kAudioDevicePropertyScopeOutput, 0};
+ if (_macBookPro) {
+ _macBookProPanRight = false;
+ Boolean hasProperty =
+ AudioObjectHasProperty(_outputDeviceID, &propertyAddress);
+ if (hasProperty) {
+ UInt32 dataSource = 0;
+ size = sizeof(dataSource);
+ WEBRTC_CA_LOG_WARN(AudioObjectGetPropertyData(
+ _outputDeviceID, &propertyAddress, 0, NULL, &size, &dataSource));
+
+ if (dataSource == 'ispk') {
+ _macBookProPanRight = true;
+ WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
+ "MacBook Pro using internal speakers; stereo"
+ " panning right");
+ } else {
+ WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
+ "MacBook Pro not using internal speakers");
+ }
- // The internal microphone of a MacBook Pro is located under the left speaker
- // grille. When the internal speakers are in use, we want to fully stereo
- // pan to the right.
- AudioObjectPropertyAddress
- propertyAddress = { kAudioDevicePropertyDataSource,
- kAudioDevicePropertyScopeOutput, 0 };
- if (_macBookPro)
- {
- _macBookProPanRight = false;
- Boolean hasProperty = AudioObjectHasProperty(_outputDeviceID,
- &propertyAddress);
- if (hasProperty)
- {
- UInt32 dataSource = 0;
- size = sizeof(dataSource);
- WEBRTC_CA_LOG_WARN(AudioObjectGetPropertyData(_outputDeviceID,
- &propertyAddress, 0, NULL, &size, &dataSource));
-
- if (dataSource == 'ispk')
- {
- _macBookProPanRight = true;
- WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice,
- _id,
- "MacBook Pro using internal speakers; stereo"
- " panning right");
- } else
- {
- WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice,
- _id, "MacBook Pro not using internal speakers");
- }
-
- // Add a listener to determine if the status changes.
- WEBRTC_CA_LOG_WARN(AudioObjectAddPropertyListener(_outputDeviceID,
- &propertyAddress, &objectListenerProc, this));
- }
+ // Add a listener to determine if the status changes.
+ WEBRTC_CA_LOG_WARN(AudioObjectAddPropertyListener(
+ _outputDeviceID, &propertyAddress, &objectListenerProc, this));
}
+ }
- // Get current stream description
- propertyAddress.mSelector = kAudioDevicePropertyStreamFormat;
- memset(&_outStreamFormat, 0, sizeof(_outStreamFormat));
- size = sizeof(_outStreamFormat);
- WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData(_outputDeviceID,
- &propertyAddress, 0, NULL, &size, &_outStreamFormat));
-
- if (_outStreamFormat.mFormatID != kAudioFormatLinearPCM)
- {
- logCAMsg(kTraceError, kTraceAudioDevice, _id,
- "Unacceptable output stream format -> mFormatID",
- (const char *) &_outStreamFormat.mFormatID);
- return -1;
- }
+ // Get current stream description
+ propertyAddress.mSelector = kAudioDevicePropertyStreamFormat;
+ memset(&_outStreamFormat, 0, sizeof(_outStreamFormat));
+ size = sizeof(_outStreamFormat);
+ WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData(
+ _outputDeviceID, &propertyAddress, 0, NULL, &size, &_outStreamFormat));
+
+ if (_outStreamFormat.mFormatID != kAudioFormatLinearPCM) {
+ logCAMsg(kTraceError, kTraceAudioDevice, _id,
+ "Unacceptable output stream format -> mFormatID",
+ (const char*)&_outStreamFormat.mFormatID);
+ return -1;
+ }
- if (_outStreamFormat.mChannelsPerFrame > N_DEVICE_CHANNELS)
- {
- WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
- "Too many channels on output device (mChannelsPerFrame = %d)",
- _outStreamFormat.mChannelsPerFrame);
- return -1;
- }
+ if (_outStreamFormat.mChannelsPerFrame > N_DEVICE_CHANNELS) {
+ WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
+ "Too many channels on output device (mChannelsPerFrame = %d)",
+ _outStreamFormat.mChannelsPerFrame);
+ return -1;
+ }
- if (_outStreamFormat.mFormatFlags & kAudioFormatFlagIsNonInterleaved)
- {
- WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
- "Non-interleaved audio data is not supported.",
- "AudioHardware streams should not have this format.");
- return -1;
- }
+ if (_outStreamFormat.mFormatFlags & kAudioFormatFlagIsNonInterleaved) {
+ WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
+ "Non-interleaved audio data is not supported.",
+ "AudioHardware streams should not have this format.");
+ return -1;
+ }
+ WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "Ouput stream format:");
+ WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
+ "mSampleRate = %f, mChannelsPerFrame = %u",
+ _outStreamFormat.mSampleRate,
+ _outStreamFormat.mChannelsPerFrame);
+ WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
+ "mBytesPerPacket = %u, mFramesPerPacket = %u",
+ _outStreamFormat.mBytesPerPacket,
+ _outStreamFormat.mFramesPerPacket);
+ WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
+ "mBytesPerFrame = %u, mBitsPerChannel = %u",
+ _outStreamFormat.mBytesPerFrame,
+ _outStreamFormat.mBitsPerChannel);
+ WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "mFormatFlags = %u",
+ _outStreamFormat.mFormatFlags);
+ logCAMsg(kTraceInfo, kTraceAudioDevice, _id, "mFormatID",
+ (const char*)&_outStreamFormat.mFormatID);
+
+ // Our preferred format to work with.
+ if (_outStreamFormat.mChannelsPerFrame < 2) {
+ // Disable stereo playout when we only have one channel on the device.
+ _playChannels = 1;
WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
- "Ouput stream format:");
- WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
- "mSampleRate = %f, mChannelsPerFrame = %u",
- _outStreamFormat.mSampleRate,
- _outStreamFormat.mChannelsPerFrame);
- WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
- "mBytesPerPacket = %u, mFramesPerPacket = %u",
- _outStreamFormat.mBytesPerPacket,
- _outStreamFormat.mFramesPerPacket);
- WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
- "mBytesPerFrame = %u, mBitsPerChannel = %u",
- _outStreamFormat.mBytesPerFrame,
- _outStreamFormat.mBitsPerChannel);
- WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
- "mFormatFlags = %u",
- _outStreamFormat.mFormatFlags);
- logCAMsg(kTraceInfo, kTraceAudioDevice, _id, "mFormatID",
- (const char *) &_outStreamFormat.mFormatID);
-
- // Our preferred format to work with.
- if (_outStreamFormat.mChannelsPerFrame < 2)
- {
- // Disable stereo playout when we only have one channel on the device.
- _playChannels = 1;
- WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
- "Stereo playout unavailable on this device");
- }
- WEBRTC_CA_RETURN_ON_ERR(SetDesiredPlayoutFormat());
+ "Stereo playout unavailable on this device");
+ }
+ WEBRTC_CA_RETURN_ON_ERR(SetDesiredPlayoutFormat());
- // Listen for format changes.
- propertyAddress.mSelector = kAudioDevicePropertyStreamFormat;
- WEBRTC_CA_RETURN_ON_ERR(AudioObjectAddPropertyListener(_outputDeviceID,
- &propertyAddress,
- &objectListenerProc,
- this));
-
- // Listen for processor overloads.
- propertyAddress.mSelector = kAudioDeviceProcessorOverload;
- WEBRTC_CA_LOG_WARN(AudioObjectAddPropertyListener(_outputDeviceID,
- &propertyAddress,
- &objectListenerProc,
- this));
-
- if (_twoDevices || !_recIsInitialized)
- {
- WEBRTC_CA_RETURN_ON_ERR(AudioDeviceCreateIOProcID(_outputDeviceID,
- deviceIOProc,
- this,
- &_deviceIOProcID));
- }
+ // Listen for format changes.
+ propertyAddress.mSelector = kAudioDevicePropertyStreamFormat;
+ WEBRTC_CA_RETURN_ON_ERR(AudioObjectAddPropertyListener(
+ _outputDeviceID, &propertyAddress, &objectListenerProc, this));
- _playIsInitialized = true;
+ // Listen for processor overloads.
+ propertyAddress.mSelector = kAudioDeviceProcessorOverload;
+ WEBRTC_CA_LOG_WARN(AudioObjectAddPropertyListener(
+ _outputDeviceID, &propertyAddress, &objectListenerProc, this));
- return 0;
+ if (_twoDevices || !_recIsInitialized) {
+ WEBRTC_CA_RETURN_ON_ERR(AudioDeviceCreateIOProcID(
+ _outputDeviceID, deviceIOProc, this, &_deviceIOProcID));
+ }
+
+ _playIsInitialized = true;
+
+ return 0;
}
-int32_t AudioDeviceMac::InitRecording()
-{
+int32_t AudioDeviceMac::InitRecording() {
+ CriticalSectionScoped lock(&_critSect);
- CriticalSectionScoped lock(&_critSect);
+ if (_recording) {
+ return -1;
+ }
- if (_recording)
- {
- return -1;
- }
+ if (!_inputDeviceIsSpecified) {
+ return -1;
+ }
- if (!_inputDeviceIsSpecified)
- {
- return -1;
- }
+ if (_recIsInitialized) {
+ return 0;
+ }
- if (_recIsInitialized)
- {
- return 0;
- }
+ // Initialize the microphone (devices might have been added or removed)
+ if (InitMicrophone() == -1) {
+ WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
+ " InitMicrophone() failed");
+ }
- // Initialize the microphone (devices might have been added or removed)
- if (InitMicrophone() == -1)
- {
- WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
- " InitMicrophone() failed");
+ if (!SpeakerIsInitialized()) {
+ // Make this call to check if we are using
+ // one or two devices (_twoDevices)
+ bool available = false;
+ if (SpeakerIsAvailable(available) == -1) {
+ WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
+ " SpeakerIsAvailable() failed");
}
+ }
- if (!SpeakerIsInitialized())
- {
- // Make this call to check if we are using
- // one or two devices (_twoDevices)
- bool available = false;
- if (SpeakerIsAvailable(available) == -1)
- {
- WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
- " SpeakerIsAvailable() failed");
- }
- }
+ OSStatus err = noErr;
+ UInt32 size = 0;
- OSStatus err = noErr;
- UInt32 size = 0;
-
- PaUtil_FlushRingBuffer(_paCaptureBuffer);
-
- _captureDelayUs = 0;
- _captureLatencyUs = 0;
- _captureDeviceIsAlive = 1;
- _doStopRec = false;
-
- // Get current stream description
- AudioObjectPropertyAddress
- propertyAddress = { kAudioDevicePropertyStreamFormat,
- kAudioDevicePropertyScopeInput, 0 };
- memset(&_inStreamFormat, 0, sizeof(_inStreamFormat));
- size = sizeof(_inStreamFormat);
- WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData(_inputDeviceID,
- &propertyAddress, 0, NULL, &size, &_inStreamFormat));
-
- if (_inStreamFormat.mFormatID != kAudioFormatLinearPCM)
- {
- logCAMsg(kTraceError, kTraceAudioDevice, _id,
- "Unacceptable input stream format -> mFormatID",
- (const char *) &_inStreamFormat.mFormatID);
- return -1;
- }
+ PaUtil_FlushRingBuffer(_paCaptureBuffer);
- if (_inStreamFormat.mChannelsPerFrame > N_DEVICE_CHANNELS)
- {
- WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
- "Too many channels on input device (mChannelsPerFrame = %d)",
- _inStreamFormat.mChannelsPerFrame);
- return -1;
- }
+ _captureDelayUs = 0;
+ _captureLatencyUs = 0;
+ _captureDeviceIsAlive = 1;
+ _doStopRec = false;
- const int io_block_size_samples = _inStreamFormat.mChannelsPerFrame *
- _inStreamFormat.mSampleRate / 100 * N_BLOCKS_IO;
- if (io_block_size_samples > _captureBufSizeSamples)
- {
- WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
- "Input IO block size (%d) is larger than ring buffer (%u)",
- io_block_size_samples, _captureBufSizeSamples);
- return -1;
- }
+ // Get current stream description
+ AudioObjectPropertyAddress propertyAddress = {
+ kAudioDevicePropertyStreamFormat, kAudioDevicePropertyScopeInput, 0};
+ memset(&_inStreamFormat, 0, sizeof(_inStreamFormat));
+ size = sizeof(_inStreamFormat);
+ WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData(
+ _inputDeviceID, &propertyAddress, 0, NULL, &size, &_inStreamFormat));
- WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
- " Input stream format:");
- WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
- " mSampleRate = %f, mChannelsPerFrame = %u",
- _inStreamFormat.mSampleRate, _inStreamFormat.mChannelsPerFrame);
- WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
- " mBytesPerPacket = %u, mFramesPerPacket = %u",
- _inStreamFormat.mBytesPerPacket,
- _inStreamFormat.mFramesPerPacket);
- WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
- " mBytesPerFrame = %u, mBitsPerChannel = %u",
- _inStreamFormat.mBytesPerFrame,
- _inStreamFormat.mBitsPerChannel);
- WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
- " mFormatFlags = %u",
- _inStreamFormat.mFormatFlags);
- logCAMsg(kTraceInfo, kTraceAudioDevice, _id, "mFormatID",
- (const char *) &_inStreamFormat.mFormatID);
+ if (_inStreamFormat.mFormatID != kAudioFormatLinearPCM) {
+ logCAMsg(kTraceError, kTraceAudioDevice, _id,
+ "Unacceptable input stream format -> mFormatID",
+ (const char*)&_inStreamFormat.mFormatID);
+ return -1;
+ }
- // Our preferred format to work with
- if (_inStreamFormat.mChannelsPerFrame >= 2 && (_recChannels == 2))
- {
- _inDesiredFormat.mChannelsPerFrame = 2;
- } else
- {
- // Disable stereo recording when we only have one channel on the device.
- _inDesiredFormat.mChannelsPerFrame = 1;
- _recChannels = 1;
- WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
- "Stereo recording unavailable on this device");
- }
+ if (_inStreamFormat.mChannelsPerFrame > N_DEVICE_CHANNELS) {
+ WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
+ "Too many channels on input device (mChannelsPerFrame = %d)",
+ _inStreamFormat.mChannelsPerFrame);
+ return -1;
+ }
- if (_ptrAudioBuffer)
- {
- // Update audio buffer with the selected parameters
- _ptrAudioBuffer->SetRecordingSampleRate(N_REC_SAMPLES_PER_SEC);
- _ptrAudioBuffer->SetRecordingChannels((uint8_t) _recChannels);
- }
+ const int io_block_size_samples = _inStreamFormat.mChannelsPerFrame *
+ _inStreamFormat.mSampleRate / 100 *
+ N_BLOCKS_IO;
+ if (io_block_size_samples > _captureBufSizeSamples) {
+ WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
+ "Input IO block size (%d) is larger than ring buffer (%u)",
+ io_block_size_samples, _captureBufSizeSamples);
+ return -1;
+ }
+
+ WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, " Input stream format:");
+ WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
+ " mSampleRate = %f, mChannelsPerFrame = %u",
+ _inStreamFormat.mSampleRate, _inStreamFormat.mChannelsPerFrame);
+ WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
+ " mBytesPerPacket = %u, mFramesPerPacket = %u",
+ _inStreamFormat.mBytesPerPacket,
+ _inStreamFormat.mFramesPerPacket);
+ WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
+ " mBytesPerFrame = %u, mBitsPerChannel = %u",
+ _inStreamFormat.mBytesPerFrame, _inStreamFormat.mBitsPerChannel);
+ WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, " mFormatFlags = %u",
+ _inStreamFormat.mFormatFlags);
+ logCAMsg(kTraceInfo, kTraceAudioDevice, _id, "mFormatID",
+ (const char*)&_inStreamFormat.mFormatID);
+
+ // Our preferred format to work with
+ if (_inStreamFormat.mChannelsPerFrame >= 2 && (_recChannels == 2)) {
+ _inDesiredFormat.mChannelsPerFrame = 2;
+ } else {
+ // Disable stereo recording when we only have one channel on the device.
+ _inDesiredFormat.mChannelsPerFrame = 1;
+ _recChannels = 1;
+ WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
+ "Stereo recording unavailable on this device");
+ }
+
+ if (_ptrAudioBuffer) {
+ // Update audio buffer with the selected parameters
+ _ptrAudioBuffer->SetRecordingSampleRate(N_REC_SAMPLES_PER_SEC);
+ _ptrAudioBuffer->SetRecordingChannels((uint8_t)_recChannels);
+ }
- _inDesiredFormat.mSampleRate = N_REC_SAMPLES_PER_SEC;
- _inDesiredFormat.mBytesPerPacket = _inDesiredFormat.mChannelsPerFrame
- * sizeof(SInt16);
- _inDesiredFormat.mFramesPerPacket = 1;
- _inDesiredFormat.mBytesPerFrame = _inDesiredFormat.mChannelsPerFrame
- * sizeof(SInt16);
- _inDesiredFormat.mBitsPerChannel = sizeof(SInt16) * 8;
+ _inDesiredFormat.mSampleRate = N_REC_SAMPLES_PER_SEC;
+ _inDesiredFormat.mBytesPerPacket =
+ _inDesiredFormat.mChannelsPerFrame * sizeof(SInt16);
+ _inDesiredFormat.mFramesPerPacket = 1;
+ _inDesiredFormat.mBytesPerFrame =
+ _inDesiredFormat.mChannelsPerFrame * sizeof(SInt16);
+ _inDesiredFormat.mBitsPerChannel = sizeof(SInt16) * 8;
- _inDesiredFormat.mFormatFlags = kLinearPCMFormatFlagIsSignedInteger
- | kLinearPCMFormatFlagIsPacked;
+ _inDesiredFormat.mFormatFlags =
+ kLinearPCMFormatFlagIsSignedInteger | kLinearPCMFormatFlagIsPacked;
#ifdef WEBRTC_ARCH_BIG_ENDIAN
- _inDesiredFormat.mFormatFlags |= kLinearPCMFormatFlagIsBigEndian;
+ _inDesiredFormat.mFormatFlags |= kLinearPCMFormatFlagIsBigEndian;
#endif
- _inDesiredFormat.mFormatID = kAudioFormatLinearPCM;
-
- WEBRTC_CA_RETURN_ON_ERR(AudioConverterNew(&_inStreamFormat, &_inDesiredFormat,
- &_captureConverter));
-
- // First try to set buffer size to desired value (10 ms * N_BLOCKS_IO)
- // TODO(xians): investigate this block.
- UInt32 bufByteCount = (UInt32)((_inStreamFormat.mSampleRate / 1000.0)
- * 10.0 * N_BLOCKS_IO * _inStreamFormat.mChannelsPerFrame
- * sizeof(Float32));
- if (_inStreamFormat.mFramesPerPacket != 0)
- {
- if (bufByteCount % _inStreamFormat.mFramesPerPacket != 0)
- {
- bufByteCount = ((UInt32)(bufByteCount
- / _inStreamFormat.mFramesPerPacket) + 1)
- * _inStreamFormat.mFramesPerPacket;
- }
+ _inDesiredFormat.mFormatID = kAudioFormatLinearPCM;
+
+ WEBRTC_CA_RETURN_ON_ERR(AudioConverterNew(&_inStreamFormat, &_inDesiredFormat,
+ &_captureConverter));
+
+ // First try to set buffer size to desired value (10 ms * N_BLOCKS_IO)
+ // TODO(xians): investigate this block.
+ UInt32 bufByteCount =
+ (UInt32)((_inStreamFormat.mSampleRate / 1000.0) * 10.0 * N_BLOCKS_IO *
+ _inStreamFormat.mChannelsPerFrame * sizeof(Float32));
+ if (_inStreamFormat.mFramesPerPacket != 0) {
+ if (bufByteCount % _inStreamFormat.mFramesPerPacket != 0) {
+ bufByteCount =
+ ((UInt32)(bufByteCount / _inStreamFormat.mFramesPerPacket) + 1) *
+ _inStreamFormat.mFramesPerPacket;
}
+ }
- // Ensure the buffer size is within the acceptable range provided by the device.
- propertyAddress.mSelector = kAudioDevicePropertyBufferSizeRange;
- AudioValueRange range;
- size = sizeof(range);
- WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData(_inputDeviceID,
- &propertyAddress, 0, NULL, &size, &range));
- if (range.mMinimum > bufByteCount)
- {
- bufByteCount = range.mMinimum;
- } else if (range.mMaximum < bufByteCount)
- {
- bufByteCount = range.mMaximum;
- }
+ // Ensure the buffer size is within the acceptable range provided by the
+ // device.
+ propertyAddress.mSelector = kAudioDevicePropertyBufferSizeRange;
+ AudioValueRange range;
+ size = sizeof(range);
+ WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData(
+ _inputDeviceID, &propertyAddress, 0, NULL, &size, &range));
+ if (range.mMinimum > bufByteCount) {
+ bufByteCount = range.mMinimum;
+ } else if (range.mMaximum < bufByteCount) {
+ bufByteCount = range.mMaximum;
+ }
- propertyAddress.mSelector = kAudioDevicePropertyBufferSize;
- size = sizeof(bufByteCount);
- WEBRTC_CA_RETURN_ON_ERR(AudioObjectSetPropertyData(_inputDeviceID,
- &propertyAddress, 0, NULL, size, &bufByteCount));
-
- // Get capture device latency
- propertyAddress.mSelector = kAudioDevicePropertyLatency;
- UInt32 latency = 0;
- size = sizeof(UInt32);
- WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData(_inputDeviceID,
- &propertyAddress, 0, NULL, &size, &latency));
- _captureLatencyUs = (UInt32)((1.0e6 * latency)
- / _inStreamFormat.mSampleRate);
-
- // Get capture stream latency
- propertyAddress.mSelector = kAudioDevicePropertyStreams;
- AudioStreamID stream = 0;
- size = sizeof(AudioStreamID);
- WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData(_inputDeviceID,
- &propertyAddress, 0, NULL, &size, &stream));
- propertyAddress.mSelector = kAudioStreamPropertyLatency;
- size = sizeof(UInt32);
- latency = 0;
- WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData(_inputDeviceID,
- &propertyAddress, 0, NULL, &size, &latency));
- _captureLatencyUs += (UInt32)((1.0e6 * latency)
- / _inStreamFormat.mSampleRate);
-
- // Listen for format changes
- // TODO(xians): should we be using kAudioDevicePropertyDeviceHasChanged?
- propertyAddress.mSelector = kAudioDevicePropertyStreamFormat;
- WEBRTC_CA_RETURN_ON_ERR(AudioObjectAddPropertyListener(_inputDeviceID,
- &propertyAddress, &objectListenerProc, this));
-
- // Listen for processor overloads
- propertyAddress.mSelector = kAudioDeviceProcessorOverload;
- WEBRTC_CA_LOG_WARN(AudioObjectAddPropertyListener(_inputDeviceID,
- &propertyAddress, &objectListenerProc, this));
-
- if (_twoDevices)
- {
- WEBRTC_CA_RETURN_ON_ERR(AudioDeviceCreateIOProcID(_inputDeviceID,
- inDeviceIOProc, this, &_inDeviceIOProcID));
- } else if (!_playIsInitialized)
- {
- WEBRTC_CA_RETURN_ON_ERR(AudioDeviceCreateIOProcID(_inputDeviceID,
- deviceIOProc, this, &_deviceIOProcID));
- }
+ propertyAddress.mSelector = kAudioDevicePropertyBufferSize;
+ size = sizeof(bufByteCount);
+ WEBRTC_CA_RETURN_ON_ERR(AudioObjectSetPropertyData(
+ _inputDeviceID, &propertyAddress, 0, NULL, size, &bufByteCount));
+
+ // Get capture device latency
+ propertyAddress.mSelector = kAudioDevicePropertyLatency;
+ UInt32 latency = 0;
+ size = sizeof(UInt32);
+ WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData(
+ _inputDeviceID, &propertyAddress, 0, NULL, &size, &latency));
+ _captureLatencyUs = (UInt32)((1.0e6 * latency) / _inStreamFormat.mSampleRate);
+
+ // Get capture stream latency
+ propertyAddress.mSelector = kAudioDevicePropertyStreams;
+ AudioStreamID stream = 0;
+ size = sizeof(AudioStreamID);
+ WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData(
+ _inputDeviceID, &propertyAddress, 0, NULL, &size, &stream));
+ propertyAddress.mSelector = kAudioStreamPropertyLatency;
+ size = sizeof(UInt32);
+ latency = 0;
+ WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData(
+ _inputDeviceID, &propertyAddress, 0, NULL, &size, &latency));
+ _captureLatencyUs +=
+ (UInt32)((1.0e6 * latency) / _inStreamFormat.mSampleRate);
+
+ // Listen for format changes
+ // TODO(xians): should we be using kAudioDevicePropertyDeviceHasChanged?
+ propertyAddress.mSelector = kAudioDevicePropertyStreamFormat;
+ WEBRTC_CA_RETURN_ON_ERR(AudioObjectAddPropertyListener(
+ _inputDeviceID, &propertyAddress, &objectListenerProc, this));
+
+ // Listen for processor overloads
+ propertyAddress.mSelector = kAudioDeviceProcessorOverload;
+ WEBRTC_CA_LOG_WARN(AudioObjectAddPropertyListener(
+ _inputDeviceID, &propertyAddress, &objectListenerProc, this));
+
+ if (_twoDevices) {
+ WEBRTC_CA_RETURN_ON_ERR(AudioDeviceCreateIOProcID(
+ _inputDeviceID, inDeviceIOProc, this, &_inDeviceIOProcID));
+ } else if (!_playIsInitialized) {
+ WEBRTC_CA_RETURN_ON_ERR(AudioDeviceCreateIOProcID(
+ _inputDeviceID, deviceIOProc, this, &_deviceIOProcID));
+ }
- // Mark recording side as initialized
- _recIsInitialized = true;
+ // Mark recording side as initialized
+ _recIsInitialized = true;
- return 0;
+ return 0;
}
-int32_t AudioDeviceMac::StartRecording()
-{
-
- CriticalSectionScoped lock(&_critSect);
+int32_t AudioDeviceMac::StartRecording() {
+ CriticalSectionScoped lock(&_critSect);
- if (!_recIsInitialized)
- {
- return -1;
- }
-
- if (_recording)
- {
- return 0;
- }
+ if (!_recIsInitialized) {
+ return -1;
+ }
- if (!_initialized)
- {
- WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
- " Recording worker thread has not been started");
- return -1;
- }
+ if (_recording) {
+ return 0;
+ }
- RTC_DCHECK(!capture_worker_thread_.get());
- capture_worker_thread_ =
- ThreadWrapper::CreateThread(RunCapture, this, "CaptureWorkerThread");
- RTC_DCHECK(capture_worker_thread_.get());
- capture_worker_thread_->Start();
- capture_worker_thread_->SetPriority(kRealtimePriority);
+ if (!_initialized) {
+ WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
+ " Recording worker thread has not been started");
+ return -1;
+ }
- OSStatus err = noErr;
- if (_twoDevices)
- {
- WEBRTC_CA_RETURN_ON_ERR(AudioDeviceStart(_inputDeviceID, _inDeviceIOProcID));
- } else if (!_playing)
- {
- WEBRTC_CA_RETURN_ON_ERR(AudioDeviceStart(_inputDeviceID, _deviceIOProcID));
- }
+ RTC_DCHECK(!capture_worker_thread_.get());
+ capture_worker_thread_.reset(
+ new rtc::PlatformThread(RunCapture, this, "CaptureWorkerThread"));
+ RTC_DCHECK(capture_worker_thread_.get());
+ capture_worker_thread_->Start();
+ capture_worker_thread_->SetPriority(rtc::kRealtimePriority);
+
+ OSStatus err = noErr;
+ if (_twoDevices) {
+ WEBRTC_CA_RETURN_ON_ERR(
+ AudioDeviceStart(_inputDeviceID, _inDeviceIOProcID));
+ } else if (!_playing) {
+ WEBRTC_CA_RETURN_ON_ERR(AudioDeviceStart(_inputDeviceID, _deviceIOProcID));
+ }
- _recording = true;
+ _recording = true;
- return 0;
+ return 0;
}
-int32_t AudioDeviceMac::StopRecording()
-{
-
- CriticalSectionScoped lock(&_critSect);
+int32_t AudioDeviceMac::StopRecording() {
+ CriticalSectionScoped lock(&_critSect);
- if (!_recIsInitialized)
- {
- return 0;
- }
-
- OSStatus err = noErr;
+ if (!_recIsInitialized) {
+ return 0;
+ }
- // Stop device
- int32_t captureDeviceIsAlive = AtomicGet32(&_captureDeviceIsAlive);
- if (_twoDevices)
- {
- if (_recording && captureDeviceIsAlive == 1)
- {
- _recording = false;
- _doStopRec = true; // Signal to io proc to stop audio device
- _critSect.Leave(); // Cannot be under lock, risk of deadlock
- if (kEventTimeout == _stopEventRec.Wait(2000))
- {
- CriticalSectionScoped critScoped(&_critSect);
- WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
- " Timed out stopping the capture IOProc. "
- "We may have failed to detect a device removal.");
-
- WEBRTC_CA_LOG_WARN(AudioDeviceStop(_inputDeviceID,
- _inDeviceIOProcID));
- WEBRTC_CA_LOG_WARN(
- AudioDeviceDestroyIOProcID(_inputDeviceID,
- _inDeviceIOProcID));
- }
- _critSect.Enter();
- _doStopRec = false;
- WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id,
- " Recording stopped");
- }
- }
- else
- {
- // We signal a stop for a shared device even when rendering has
- // not yet ended. This is to ensure the IOProc will return early as
- // intended (by checking |_recording|) before accessing
- // resources we free below (e.g. the capture converter).
- //
- // In the case of a shared devcie, the IOProc will verify
- // rendering has ended before stopping itself.
- if (_recording && captureDeviceIsAlive == 1)
- {
- _recording = false;
- _doStop = true; // Signal to io proc to stop audio device
- _critSect.Leave(); // Cannot be under lock, risk of deadlock
- if (kEventTimeout == _stopEvent.Wait(2000))
- {
- CriticalSectionScoped critScoped(&_critSect);
- WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
- " Timed out stopping the shared IOProc. "
- "We may have failed to detect a device removal.");
-
- // We assume rendering on a shared device has stopped as well if
- // the IOProc times out.
- WEBRTC_CA_LOG_WARN(AudioDeviceStop(_outputDeviceID,
- _deviceIOProcID));
- WEBRTC_CA_LOG_WARN(AudioDeviceDestroyIOProcID(_outputDeviceID,
- _deviceIOProcID));
- }
- _critSect.Enter();
- _doStop = false;
- WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id,
- " Recording stopped (shared)");
- }
+ OSStatus err = noErr;
+
+ // Stop device
+ int32_t captureDeviceIsAlive = AtomicGet32(&_captureDeviceIsAlive);
+ if (_twoDevices) {
+ if (_recording && captureDeviceIsAlive == 1) {
+ _recording = false;
+ _doStopRec = true; // Signal to io proc to stop audio device
+ _critSect.Leave(); // Cannot be under lock, risk of deadlock
+ if (kEventTimeout == _stopEventRec.Wait(2000)) {
+ CriticalSectionScoped critScoped(&_critSect);
+ WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
+ " Timed out stopping the capture IOProc. "
+ "We may have failed to detect a device removal.");
+
+ WEBRTC_CA_LOG_WARN(AudioDeviceStop(_inputDeviceID, _inDeviceIOProcID));
+ WEBRTC_CA_LOG_WARN(
+ AudioDeviceDestroyIOProcID(_inputDeviceID, _inDeviceIOProcID));
+ }
+ _critSect.Enter();
+ _doStopRec = false;
+ WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id, " Recording stopped");
+ }
+ } else {
+ // We signal a stop for a shared device even when rendering has
+ // not yet ended. This is to ensure the IOProc will return early as
+ // intended (by checking |_recording|) before accessing
+ // resources we free below (e.g. the capture converter).
+ //
+ // In the case of a shared devcie, the IOProc will verify
+ // rendering has ended before stopping itself.
+ if (_recording && captureDeviceIsAlive == 1) {
+ _recording = false;
+ _doStop = true; // Signal to io proc to stop audio device
+ _critSect.Leave(); // Cannot be under lock, risk of deadlock
+ if (kEventTimeout == _stopEvent.Wait(2000)) {
+ CriticalSectionScoped critScoped(&_critSect);
+ WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
+ " Timed out stopping the shared IOProc. "
+ "We may have failed to detect a device removal.");
+
+ // We assume rendering on a shared device has stopped as well if
+ // the IOProc times out.
+ WEBRTC_CA_LOG_WARN(AudioDeviceStop(_outputDeviceID, _deviceIOProcID));
+ WEBRTC_CA_LOG_WARN(
+ AudioDeviceDestroyIOProcID(_outputDeviceID, _deviceIOProcID));
+ }
+ _critSect.Enter();
+ _doStop = false;
+ WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id,
+ " Recording stopped (shared)");
}
+ }
- // Setting this signal will allow the worker thread to be stopped.
- AtomicSet32(&_captureDeviceIsAlive, 0);
+ // Setting this signal will allow the worker thread to be stopped.
+ AtomicSet32(&_captureDeviceIsAlive, 0);
- if (capture_worker_thread_.get()) {
- _critSect.Leave();
- capture_worker_thread_->Stop();
- capture_worker_thread_.reset();
- _critSect.Enter();
- }
+ if (capture_worker_thread_.get()) {
+ _critSect.Leave();
+ capture_worker_thread_->Stop();
+ capture_worker_thread_.reset();
+ _critSect.Enter();
+ }
- WEBRTC_CA_LOG_WARN(AudioConverterDispose(_captureConverter));
+ WEBRTC_CA_LOG_WARN(AudioConverterDispose(_captureConverter));
- // Remove listeners.
- AudioObjectPropertyAddress
- propertyAddress = { kAudioDevicePropertyStreamFormat,
- kAudioDevicePropertyScopeInput, 0 };
- WEBRTC_CA_LOG_WARN(AudioObjectRemovePropertyListener(_inputDeviceID,
- &propertyAddress, &objectListenerProc, this));
+ // Remove listeners.
+ AudioObjectPropertyAddress propertyAddress = {
+ kAudioDevicePropertyStreamFormat, kAudioDevicePropertyScopeInput, 0};
+ WEBRTC_CA_LOG_WARN(AudioObjectRemovePropertyListener(
+ _inputDeviceID, &propertyAddress, &objectListenerProc, this));
- propertyAddress.mSelector = kAudioDeviceProcessorOverload;
- WEBRTC_CA_LOG_WARN(AudioObjectRemovePropertyListener(_inputDeviceID,
- &propertyAddress, &objectListenerProc, this));
+ propertyAddress.mSelector = kAudioDeviceProcessorOverload;
+ WEBRTC_CA_LOG_WARN(AudioObjectRemovePropertyListener(
+ _inputDeviceID, &propertyAddress, &objectListenerProc, this));
- _recIsInitialized = false;
- _recording = false;
+ _recIsInitialized = false;
+ _recording = false;
- return 0;
+ return 0;
}
-bool AudioDeviceMac::RecordingIsInitialized() const
-{
- return (_recIsInitialized);
+bool AudioDeviceMac::RecordingIsInitialized() const {
+ return (_recIsInitialized);
}
-bool AudioDeviceMac::Recording() const
-{
- return (_recording);
+bool AudioDeviceMac::Recording() const {
+ return (_recording);
}
-bool AudioDeviceMac::PlayoutIsInitialized() const
-{
- return (_playIsInitialized);
+bool AudioDeviceMac::PlayoutIsInitialized() const {
+ return (_playIsInitialized);
}
-int32_t AudioDeviceMac::StartPlayout()
-{
+int32_t AudioDeviceMac::StartPlayout() {
+ CriticalSectionScoped lock(&_critSect);
- CriticalSectionScoped lock(&_critSect);
-
- if (!_playIsInitialized)
- {
- return -1;
- }
+ if (!_playIsInitialized) {
+ return -1;
+ }
- if (_playing)
- {
- return 0;
- }
+ if (_playing) {
+ return 0;
+ }
- RTC_DCHECK(!render_worker_thread_.get());
- render_worker_thread_ =
- ThreadWrapper::CreateThread(RunRender, this, "RenderWorkerThread");
- render_worker_thread_->Start();
- render_worker_thread_->SetPriority(kRealtimePriority);
+ RTC_DCHECK(!render_worker_thread_.get());
+ render_worker_thread_.reset(
+ new rtc::PlatformThread(RunRender, this, "RenderWorkerThread"));
+ render_worker_thread_->Start();
+ render_worker_thread_->SetPriority(rtc::kRealtimePriority);
- if (_twoDevices || !_recording)
- {
- OSStatus err = noErr;
- WEBRTC_CA_RETURN_ON_ERR(AudioDeviceStart(_outputDeviceID, _deviceIOProcID));
- }
- _playing = true;
+ if (_twoDevices || !_recording) {
+ OSStatus err = noErr;
+ WEBRTC_CA_RETURN_ON_ERR(AudioDeviceStart(_outputDeviceID, _deviceIOProcID));
+ }
+ _playing = true;
- return 0;
+ return 0;
}
-int32_t AudioDeviceMac::StopPlayout()
-{
+int32_t AudioDeviceMac::StopPlayout() {
+ CriticalSectionScoped lock(&_critSect);
- CriticalSectionScoped lock(&_critSect);
+ if (!_playIsInitialized) {
+ return 0;
+ }
- if (!_playIsInitialized)
- {
- return 0;
+ OSStatus err = noErr;
+
+ int32_t renderDeviceIsAlive = AtomicGet32(&_renderDeviceIsAlive);
+ if (_playing && renderDeviceIsAlive == 1) {
+ // We signal a stop for a shared device even when capturing has not
+ // yet ended. This is to ensure the IOProc will return early as
+ // intended (by checking |_playing|) before accessing resources we
+ // free below (e.g. the render converter).
+ //
+ // In the case of a shared device, the IOProc will verify capturing
+ // has ended before stopping itself.
+ _playing = false;
+ _doStop = true; // Signal to io proc to stop audio device
+ _critSect.Leave(); // Cannot be under lock, risk of deadlock
+ if (kEventTimeout == _stopEvent.Wait(2000)) {
+ CriticalSectionScoped critScoped(&_critSect);
+ WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
+ " Timed out stopping the render IOProc. "
+ "We may have failed to detect a device removal.");
+
+ // We assume capturing on a shared device has stopped as well if the
+ // IOProc times out.
+ WEBRTC_CA_LOG_WARN(AudioDeviceStop(_outputDeviceID, _deviceIOProcID));
+ WEBRTC_CA_LOG_WARN(
+ AudioDeviceDestroyIOProcID(_outputDeviceID, _deviceIOProcID));
}
+ _critSect.Enter();
+ _doStop = false;
+ WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id, "Playout stopped");
+ }
- OSStatus err = noErr;
+ // Setting this signal will allow the worker thread to be stopped.
+ AtomicSet32(&_renderDeviceIsAlive, 0);
+ if (render_worker_thread_.get()) {
+ _critSect.Leave();
+ render_worker_thread_->Stop();
+ render_worker_thread_.reset();
+ _critSect.Enter();
+ }
- int32_t renderDeviceIsAlive = AtomicGet32(&_renderDeviceIsAlive);
- if (_playing && renderDeviceIsAlive == 1)
- {
- // We signal a stop for a shared device even when capturing has not
- // yet ended. This is to ensure the IOProc will return early as
- // intended (by checking |_playing|) before accessing resources we
- // free below (e.g. the render converter).
- //
- // In the case of a shared device, the IOProc will verify capturing
- // has ended before stopping itself.
- _playing = false;
- _doStop = true; // Signal to io proc to stop audio device
- _critSect.Leave(); // Cannot be under lock, risk of deadlock
- if (kEventTimeout == _stopEvent.Wait(2000))
- {
- CriticalSectionScoped critScoped(&_critSect);
- WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
- " Timed out stopping the render IOProc. "
- "We may have failed to detect a device removal.");
-
- // We assume capturing on a shared device has stopped as well if the
- // IOProc times out.
- WEBRTC_CA_LOG_WARN(AudioDeviceStop(_outputDeviceID,
- _deviceIOProcID));
- WEBRTC_CA_LOG_WARN(AudioDeviceDestroyIOProcID(_outputDeviceID,
- _deviceIOProcID));
- }
- _critSect.Enter();
- _doStop = false;
- WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id,
- "Playout stopped");
- }
+ WEBRTC_CA_LOG_WARN(AudioConverterDispose(_renderConverter));
- // Setting this signal will allow the worker thread to be stopped.
- AtomicSet32(&_renderDeviceIsAlive, 0);
- if (render_worker_thread_.get()) {
- _critSect.Leave();
- render_worker_thread_->Stop();
- render_worker_thread_.reset();
- _critSect.Enter();
- }
+ // Remove listeners.
+ AudioObjectPropertyAddress propertyAddress = {
+ kAudioDevicePropertyStreamFormat, kAudioDevicePropertyScopeOutput, 0};
+ WEBRTC_CA_LOG_WARN(AudioObjectRemovePropertyListener(
+ _outputDeviceID, &propertyAddress, &objectListenerProc, this));
- WEBRTC_CA_LOG_WARN(AudioConverterDispose(_renderConverter));
+ propertyAddress.mSelector = kAudioDeviceProcessorOverload;
+ WEBRTC_CA_LOG_WARN(AudioObjectRemovePropertyListener(
+ _outputDeviceID, &propertyAddress, &objectListenerProc, this));
- // Remove listeners.
- AudioObjectPropertyAddress propertyAddress = {
- kAudioDevicePropertyStreamFormat, kAudioDevicePropertyScopeOutput,
- 0 };
- WEBRTC_CA_LOG_WARN(AudioObjectRemovePropertyListener(_outputDeviceID,
- &propertyAddress, &objectListenerProc, this));
-
- propertyAddress.mSelector = kAudioDeviceProcessorOverload;
- WEBRTC_CA_LOG_WARN(AudioObjectRemovePropertyListener(_outputDeviceID,
- &propertyAddress, &objectListenerProc, this));
-
- if (_macBookPro)
- {
- Boolean hasProperty = AudioObjectHasProperty(_outputDeviceID,
- &propertyAddress);
- if (hasProperty)
- {
- propertyAddress.mSelector = kAudioDevicePropertyDataSource;
- WEBRTC_CA_LOG_WARN(AudioObjectRemovePropertyListener(_outputDeviceID,
- &propertyAddress, &objectListenerProc, this));
- }
+ if (_macBookPro) {
+ Boolean hasProperty =
+ AudioObjectHasProperty(_outputDeviceID, &propertyAddress);
+ if (hasProperty) {
+ propertyAddress.mSelector = kAudioDevicePropertyDataSource;
+ WEBRTC_CA_LOG_WARN(AudioObjectRemovePropertyListener(
+ _outputDeviceID, &propertyAddress, &objectListenerProc, this));
}
+ }
- _playIsInitialized = false;
- _playing = false;
+ _playIsInitialized = false;
+ _playing = false;
- return 0;
+ return 0;
}
-int32_t AudioDeviceMac::PlayoutDelay(uint16_t& delayMS) const
-{
- int32_t renderDelayUs = AtomicGet32(&_renderDelayUs);
- delayMS = static_cast<uint16_t> (1e-3 * (renderDelayUs + _renderLatencyUs) +
- 0.5);
- return 0;
+int32_t AudioDeviceMac::PlayoutDelay(uint16_t& delayMS) const {
+ int32_t renderDelayUs = AtomicGet32(&_renderDelayUs);
+ delayMS =
+ static_cast<uint16_t>(1e-3 * (renderDelayUs + _renderLatencyUs) + 0.5);
+ return 0;
}
-int32_t AudioDeviceMac::RecordingDelay(uint16_t& delayMS) const
-{
- int32_t captureDelayUs = AtomicGet32(&_captureDelayUs);
- delayMS = static_cast<uint16_t> (1e-3 * (captureDelayUs +
- _captureLatencyUs) + 0.5);
- return 0;
+int32_t AudioDeviceMac::RecordingDelay(uint16_t& delayMS) const {
+ int32_t captureDelayUs = AtomicGet32(&_captureDelayUs);
+ delayMS =
+ static_cast<uint16_t>(1e-3 * (captureDelayUs + _captureLatencyUs) + 0.5);
+ return 0;
}
-bool AudioDeviceMac::Playing() const
-{
- return (_playing);
+bool AudioDeviceMac::Playing() const {
+ return (_playing);
}
int32_t AudioDeviceMac::SetPlayoutBuffer(
const AudioDeviceModule::BufferType type,
- uint16_t sizeMS)
-{
-
- if (type != AudioDeviceModule::kFixedBufferSize)
- {
- WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
- " Adaptive buffer size not supported on this platform");
- return -1;
- }
+ uint16_t sizeMS) {
+ if (type != AudioDeviceModule::kFixedBufferSize) {
+ WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
+ " Adaptive buffer size not supported on this platform");
+ return -1;
+ }
- _playBufType = type;
- _playBufDelayFixed = sizeMS;
- return 0;
+ _playBufType = type;
+ _playBufDelayFixed = sizeMS;
+ return 0;
}
-int32_t AudioDeviceMac::PlayoutBuffer(
- AudioDeviceModule::BufferType& type,
- uint16_t& sizeMS) const
-{
-
- type = _playBufType;
- sizeMS = _playBufDelayFixed;
+int32_t AudioDeviceMac::PlayoutBuffer(AudioDeviceModule::BufferType& type,
+ uint16_t& sizeMS) const {
+ type = _playBufType;
+ sizeMS = _playBufDelayFixed;
- return 0;
+ return 0;
}
// Not implemented for Mac.
-int32_t AudioDeviceMac::CPULoad(uint16_t& /*load*/) const
-{
-
- WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
- " API call not supported on this platform");
+int32_t AudioDeviceMac::CPULoad(uint16_t& /*load*/) const {
+ WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
+ " API call not supported on this platform");
- return -1;
+ return -1;
}
-bool AudioDeviceMac::PlayoutWarning() const
-{
- return (_playWarning > 0);
+bool AudioDeviceMac::PlayoutWarning() const {
+ return (_playWarning > 0);
}
-bool AudioDeviceMac::PlayoutError() const
-{
- return (_playError > 0);
+bool AudioDeviceMac::PlayoutError() const {
+ return (_playError > 0);
}
-bool AudioDeviceMac::RecordingWarning() const
-{
- return (_recWarning > 0);
+bool AudioDeviceMac::RecordingWarning() const {
+ return (_recWarning > 0);
}
-bool AudioDeviceMac::RecordingError() const
-{
- return (_recError > 0);
+bool AudioDeviceMac::RecordingError() const {
+ return (_recError > 0);
}
-void AudioDeviceMac::ClearPlayoutWarning()
-{
- _playWarning = 0;
+void AudioDeviceMac::ClearPlayoutWarning() {
+ _playWarning = 0;
}
-void AudioDeviceMac::ClearPlayoutError()
-{
- _playError = 0;
+void AudioDeviceMac::ClearPlayoutError() {
+ _playError = 0;
}
-void AudioDeviceMac::ClearRecordingWarning()
-{
- _recWarning = 0;
+void AudioDeviceMac::ClearRecordingWarning() {
+ _recWarning = 0;
}
-void AudioDeviceMac::ClearRecordingError()
-{
- _recError = 0;
+void AudioDeviceMac::ClearRecordingError() {
+ _recError = 0;
}
// ============================================================================
// Private Methods
// ============================================================================
-int32_t
-AudioDeviceMac::GetNumberDevices(const AudioObjectPropertyScope scope,
- AudioDeviceID scopedDeviceIds[],
- const uint32_t deviceListLength)
-{
- OSStatus err = noErr;
-
- AudioObjectPropertyAddress propertyAddress = {
- kAudioHardwarePropertyDevices, kAudioObjectPropertyScopeGlobal,
- kAudioObjectPropertyElementMaster };
- UInt32 size = 0;
- WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyDataSize(kAudioObjectSystemObject,
- &propertyAddress, 0, NULL, &size));
- if (size == 0)
- {
- WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
- "No devices");
- return 0;
- }
-
- AudioDeviceID* deviceIds = (AudioDeviceID*) malloc(size);
- UInt32 numberDevices = size / sizeof(AudioDeviceID);
- AudioBufferList* bufferList = NULL;
- UInt32 numberScopedDevices = 0;
+int32_t AudioDeviceMac::GetNumberDevices(const AudioObjectPropertyScope scope,
+ AudioDeviceID scopedDeviceIds[],
+ const uint32_t deviceListLength) {
+ OSStatus err = noErr;
+
+ AudioObjectPropertyAddress propertyAddress = {
+ kAudioHardwarePropertyDevices, kAudioObjectPropertyScopeGlobal,
+ kAudioObjectPropertyElementMaster};
+ UInt32 size = 0;
+ WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyDataSize(
+ kAudioObjectSystemObject, &propertyAddress, 0, NULL, &size));
+ if (size == 0) {
+ WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, "No devices");
+ return 0;
+ }
- // First check if there is a default device and list it
- UInt32 hardwareProperty = 0;
- if (scope == kAudioDevicePropertyScopeOutput)
- {
- hardwareProperty = kAudioHardwarePropertyDefaultOutputDevice;
- } else
- {
- hardwareProperty = kAudioHardwarePropertyDefaultInputDevice;
- }
+ AudioDeviceID* deviceIds = (AudioDeviceID*)malloc(size);
+ UInt32 numberDevices = size / sizeof(AudioDeviceID);
+ AudioBufferList* bufferList = NULL;
+ UInt32 numberScopedDevices = 0;
+
+ // First check if there is a default device and list it
+ UInt32 hardwareProperty = 0;
+ if (scope == kAudioDevicePropertyScopeOutput) {
+ hardwareProperty = kAudioHardwarePropertyDefaultOutputDevice;
+ } else {
+ hardwareProperty = kAudioHardwarePropertyDefaultInputDevice;
+ }
- AudioObjectPropertyAddress
- propertyAddressDefault = { hardwareProperty,
- kAudioObjectPropertyScopeGlobal,
- kAudioObjectPropertyElementMaster };
-
- AudioDeviceID usedID;
- UInt32 uintSize = sizeof(UInt32);
- WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData(kAudioObjectSystemObject,
- &propertyAddressDefault, 0, NULL, &uintSize, &usedID));
- if (usedID != kAudioDeviceUnknown)
- {
- scopedDeviceIds[numberScopedDevices] = usedID;
- numberScopedDevices++;
- } else
- {
- WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
- "GetNumberDevices(): Default device unknown");
- }
+ AudioObjectPropertyAddress propertyAddressDefault = {
+ hardwareProperty, kAudioObjectPropertyScopeGlobal,
+ kAudioObjectPropertyElementMaster};
+
+ AudioDeviceID usedID;
+ UInt32 uintSize = sizeof(UInt32);
+ WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData(kAudioObjectSystemObject,
+ &propertyAddressDefault, 0,
+ NULL, &uintSize, &usedID));
+ if (usedID != kAudioDeviceUnknown) {
+ scopedDeviceIds[numberScopedDevices] = usedID;
+ numberScopedDevices++;
+ } else {
+ WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
+ "GetNumberDevices(): Default device unknown");
+ }
- // Then list the rest of the devices
- bool listOK = true;
+ // Then list the rest of the devices
+ bool listOK = true;
- WEBRTC_CA_LOG_ERR(AudioObjectGetPropertyData(kAudioObjectSystemObject,
- &propertyAddress, 0, NULL, &size, deviceIds));
- if (err != noErr)
- {
+ WEBRTC_CA_LOG_ERR(AudioObjectGetPropertyData(
+ kAudioObjectSystemObject, &propertyAddress, 0, NULL, &size, deviceIds));
+ if (err != noErr) {
+ listOK = false;
+ } else {
+ propertyAddress.mSelector = kAudioDevicePropertyStreamConfiguration;
+ propertyAddress.mScope = scope;
+ propertyAddress.mElement = 0;
+ for (UInt32 i = 0; i < numberDevices; i++) {
+ // Check for input channels
+ WEBRTC_CA_LOG_ERR(AudioObjectGetPropertyDataSize(
+ deviceIds[i], &propertyAddress, 0, NULL, &size));
+ if (err == kAudioHardwareBadDeviceError) {
+ // This device doesn't actually exist; continue iterating.
+ continue;
+ } else if (err != noErr) {
listOK = false;
- } else
- {
- propertyAddress.mSelector = kAudioDevicePropertyStreamConfiguration;
- propertyAddress.mScope = scope;
- propertyAddress.mElement = 0;
- for (UInt32 i = 0; i < numberDevices; i++)
- {
- // Check for input channels
- WEBRTC_CA_LOG_ERR(AudioObjectGetPropertyDataSize(deviceIds[i],
- &propertyAddress, 0, NULL, &size));
- if (err == kAudioHardwareBadDeviceError)
- {
- // This device doesn't actually exist; continue iterating.
- continue;
- } else if (err != noErr)
- {
- listOK = false;
- break;
- }
-
- bufferList = (AudioBufferList*) malloc(size);
- WEBRTC_CA_LOG_ERR(AudioObjectGetPropertyData(deviceIds[i],
- &propertyAddress, 0, NULL, &size, bufferList));
- if (err != noErr)
- {
- listOK = false;
- break;
- }
-
- if (bufferList->mNumberBuffers > 0)
- {
- if (numberScopedDevices >= deviceListLength)
- {
- WEBRTC_TRACE(kTraceError,
- kTraceAudioDevice, _id,
- "Device list is not long enough");
- listOK = false;
- break;
- }
-
- scopedDeviceIds[numberScopedDevices] = deviceIds[i];
- numberScopedDevices++;
- }
-
- free(bufferList);
- bufferList = NULL;
- } // for
- }
+ break;
+ }
- if (!listOK)
- {
- if (deviceIds)
- {
- free(deviceIds);
- deviceIds = NULL;
+ bufferList = (AudioBufferList*)malloc(size);
+ WEBRTC_CA_LOG_ERR(AudioObjectGetPropertyData(
+ deviceIds[i], &propertyAddress, 0, NULL, &size, bufferList));
+ if (err != noErr) {
+ listOK = false;
+ break;
+ }
+
+ if (bufferList->mNumberBuffers > 0) {
+ if (numberScopedDevices >= deviceListLength) {
+ WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
+ "Device list is not long enough");
+ listOK = false;
+ break;
}
- if (bufferList)
- {
- free(bufferList);
- bufferList = NULL;
- }
+ scopedDeviceIds[numberScopedDevices] = deviceIds[i];
+ numberScopedDevices++;
+ }
- return -1;
+ free(bufferList);
+ bufferList = NULL;
+ } // for
+ }
+
+ if (!listOK) {
+ if (deviceIds) {
+ free(deviceIds);
+ deviceIds = NULL;
}
- // Happy ending
- if (deviceIds)
- {
- free(deviceIds);
- deviceIds = NULL;
+ if (bufferList) {
+ free(bufferList);
+ bufferList = NULL;
}
- return numberScopedDevices;
+ return -1;
+ }
+
+ // Happy ending
+ if (deviceIds) {
+ free(deviceIds);
+ deviceIds = NULL;
+ }
+
+ return numberScopedDevices;
}
-int32_t
-AudioDeviceMac::GetDeviceName(const AudioObjectPropertyScope scope,
- const uint16_t index,
- char* name)
-{
- OSStatus err = noErr;
- UInt32 len = kAdmMaxDeviceNameSize;
- AudioDeviceID deviceIds[MaxNumberDevices];
-
- int numberDevices = GetNumberDevices(scope, deviceIds, MaxNumberDevices);
- if (numberDevices < 0)
- {
- return -1;
- } else if (numberDevices == 0)
- {
- WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
- "No devices");
- return -1;
- }
+int32_t AudioDeviceMac::GetDeviceName(const AudioObjectPropertyScope scope,
+ const uint16_t index,
+ char* name) {
+ OSStatus err = noErr;
+ UInt32 len = kAdmMaxDeviceNameSize;
+ AudioDeviceID deviceIds[MaxNumberDevices];
- // If the number is below the number of devices, assume it's "WEBRTC ID"
- // otherwise assume it's a CoreAudio ID
- AudioDeviceID usedID;
-
- // Check if there is a default device
- bool isDefaultDevice = false;
- if (index == 0)
- {
- UInt32 hardwareProperty = 0;
- if (scope == kAudioDevicePropertyScopeOutput)
- {
- hardwareProperty = kAudioHardwarePropertyDefaultOutputDevice;
- } else
- {
- hardwareProperty = kAudioHardwarePropertyDefaultInputDevice;
- }
- AudioObjectPropertyAddress propertyAddress = { hardwareProperty,
- kAudioObjectPropertyScopeGlobal,
- kAudioObjectPropertyElementMaster };
- UInt32 size = sizeof(UInt32);
- WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData(kAudioObjectSystemObject,
- &propertyAddress, 0, NULL, &size, &usedID));
- if (usedID == kAudioDeviceUnknown)
- {
- WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
- "GetDeviceName(): Default device unknown");
- } else
- {
- isDefaultDevice = true;
- }
- }
+ int numberDevices = GetNumberDevices(scope, deviceIds, MaxNumberDevices);
+ if (numberDevices < 0) {
+ return -1;
+ } else if (numberDevices == 0) {
+ WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, "No devices");
+ return -1;
+ }
+
+ // If the number is below the number of devices, assume it's "WEBRTC ID"
+ // otherwise assume it's a CoreAudio ID
+ AudioDeviceID usedID;
+ // Check if there is a default device
+ bool isDefaultDevice = false;
+ if (index == 0) {
+ UInt32 hardwareProperty = 0;
+ if (scope == kAudioDevicePropertyScopeOutput) {
+ hardwareProperty = kAudioHardwarePropertyDefaultOutputDevice;
+ } else {
+ hardwareProperty = kAudioHardwarePropertyDefaultInputDevice;
+ }
AudioObjectPropertyAddress propertyAddress = {
- kAudioDevicePropertyDeviceName, scope, 0 };
-
- if (isDefaultDevice)
- {
- char devName[len];
-
- WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData(usedID,
- &propertyAddress, 0, NULL, &len, devName));
-
- sprintf(name, "default (%s)", devName);
- } else
- {
- if (index < numberDevices)
- {
- usedID = deviceIds[index];
- } else
- {
- usedID = index;
- }
+ hardwareProperty, kAudioObjectPropertyScopeGlobal,
+ kAudioObjectPropertyElementMaster};
+ UInt32 size = sizeof(UInt32);
+ WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData(
+ kAudioObjectSystemObject, &propertyAddress, 0, NULL, &size, &usedID));
+ if (usedID == kAudioDeviceUnknown) {
+ WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
+ "GetDeviceName(): Default device unknown");
+ } else {
+ isDefaultDevice = true;
+ }
+ }
- WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData(usedID,
- &propertyAddress, 0, NULL, &len, name));
+ AudioObjectPropertyAddress propertyAddress = {kAudioDevicePropertyDeviceName,
+ scope, 0};
+
+ if (isDefaultDevice) {
+ char devName[len];
+
+ WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData(usedID, &propertyAddress,
+ 0, NULL, &len, devName));
+
+ sprintf(name, "default (%s)", devName);
+ } else {
+ if (index < numberDevices) {
+ usedID = deviceIds[index];
+ } else {
+ usedID = index;
}
- return 0;
+ WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData(usedID, &propertyAddress,
+ 0, NULL, &len, name));
+ }
+
+ return 0;
}
int32_t AudioDeviceMac::InitDevice(const uint16_t userDeviceIndex,
AudioDeviceID& deviceId,
- const bool isInput)
-{
- OSStatus err = noErr;
- UInt32 size = 0;
- AudioObjectPropertyScope deviceScope;
- AudioObjectPropertySelector defaultDeviceSelector;
- AudioDeviceID deviceIds[MaxNumberDevices];
-
- if (isInput)
- {
- deviceScope = kAudioDevicePropertyScopeInput;
- defaultDeviceSelector = kAudioHardwarePropertyDefaultInputDevice;
- } else
- {
- deviceScope = kAudioDevicePropertyScopeOutput;
- defaultDeviceSelector = kAudioHardwarePropertyDefaultOutputDevice;
- }
+ const bool isInput) {
+ OSStatus err = noErr;
+ UInt32 size = 0;
+ AudioObjectPropertyScope deviceScope;
+ AudioObjectPropertySelector defaultDeviceSelector;
+ AudioDeviceID deviceIds[MaxNumberDevices];
+
+ if (isInput) {
+ deviceScope = kAudioDevicePropertyScopeInput;
+ defaultDeviceSelector = kAudioHardwarePropertyDefaultInputDevice;
+ } else {
+ deviceScope = kAudioDevicePropertyScopeOutput;
+ defaultDeviceSelector = kAudioHardwarePropertyDefaultOutputDevice;
+ }
- AudioObjectPropertyAddress
- propertyAddress = { defaultDeviceSelector,
- kAudioObjectPropertyScopeGlobal,
- kAudioObjectPropertyElementMaster };
-
- // Get the actual device IDs
- int numberDevices = GetNumberDevices(deviceScope, deviceIds,
- MaxNumberDevices);
- if (numberDevices < 0)
- {
- return -1;
- } else if (numberDevices == 0)
- {
- WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
- "InitDevice(): No devices");
- return -1;
- }
+ AudioObjectPropertyAddress propertyAddress = {
+ defaultDeviceSelector, kAudioObjectPropertyScopeGlobal,
+ kAudioObjectPropertyElementMaster};
- bool isDefaultDevice = false;
- deviceId = kAudioDeviceUnknown;
- if (userDeviceIndex == 0)
- {
- // Try to use default system device
- size = sizeof(AudioDeviceID);
- WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData(kAudioObjectSystemObject,
- &propertyAddress, 0, NULL, &size, &deviceId));
- if (deviceId == kAudioDeviceUnknown)
- {
- WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
- " No default device exists");
- } else
- {
- isDefaultDevice = true;
- }
- }
+ // Get the actual device IDs
+ int numberDevices =
+ GetNumberDevices(deviceScope, deviceIds, MaxNumberDevices);
+ if (numberDevices < 0) {
+ return -1;
+ } else if (numberDevices == 0) {
+ WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
+ "InitDevice(): No devices");
+ return -1;
+ }
- if (!isDefaultDevice)
- {
- deviceId = deviceIds[userDeviceIndex];
+ bool isDefaultDevice = false;
+ deviceId = kAudioDeviceUnknown;
+ if (userDeviceIndex == 0) {
+ // Try to use default system device
+ size = sizeof(AudioDeviceID);
+ WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData(
+ kAudioObjectSystemObject, &propertyAddress, 0, NULL, &size, &deviceId));
+ if (deviceId == kAudioDeviceUnknown) {
+ WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
+ " No default device exists");
+ } else {
+ isDefaultDevice = true;
}
+ }
- // Obtain device name and manufacturer for logging.
- // Also use this as a test to ensure a user-set device ID is valid.
- char devName[128];
- char devManf[128];
- memset(devName, 0, sizeof(devName));
- memset(devManf, 0, sizeof(devManf));
-
- propertyAddress.mSelector = kAudioDevicePropertyDeviceName;
- propertyAddress.mScope = deviceScope;
- propertyAddress.mElement = 0;
- size = sizeof(devName);
- WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData(deviceId,
- &propertyAddress, 0, NULL, &size, devName));
-
- propertyAddress.mSelector = kAudioDevicePropertyDeviceManufacturer;
- size = sizeof(devManf);
- WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData(deviceId,
- &propertyAddress, 0, NULL, &size, devManf));
+ if (!isDefaultDevice) {
+ deviceId = deviceIds[userDeviceIndex];
+ }
- if (isInput)
- {
- WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
- " Input device: %s %s", devManf, devName);
- } else
- {
- WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
- " Output device: %s %s", devManf, devName);
- }
+ // Obtain device name and manufacturer for logging.
+ // Also use this as a test to ensure a user-set device ID is valid.
+ char devName[128];
+ char devManf[128];
+ memset(devName, 0, sizeof(devName));
+ memset(devManf, 0, sizeof(devManf));
+
+ propertyAddress.mSelector = kAudioDevicePropertyDeviceName;
+ propertyAddress.mScope = deviceScope;
+ propertyAddress.mElement = 0;
+ size = sizeof(devName);
+ WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData(deviceId, &propertyAddress,
+ 0, NULL, &size, devName));
+
+ propertyAddress.mSelector = kAudioDevicePropertyDeviceManufacturer;
+ size = sizeof(devManf);
+ WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData(deviceId, &propertyAddress,
+ 0, NULL, &size, devManf));
+
+ if (isInput) {
+ WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, " Input device: %s %s",
+ devManf, devName);
+ } else {
+ WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, " Output device: %s %s",
+ devManf, devName);
+ }
- return 0;
+ return 0;
}
-OSStatus AudioDeviceMac::SetDesiredPlayoutFormat()
-{
- // Our preferred format to work with.
- _outDesiredFormat.mSampleRate = N_PLAY_SAMPLES_PER_SEC;
- _outDesiredFormat.mChannelsPerFrame = _playChannels;
+OSStatus AudioDeviceMac::SetDesiredPlayoutFormat() {
+ // Our preferred format to work with.
+ _outDesiredFormat.mSampleRate = N_PLAY_SAMPLES_PER_SEC;
+ _outDesiredFormat.mChannelsPerFrame = _playChannels;
- if (_ptrAudioBuffer)
- {
- // Update audio buffer with the selected parameters.
- _ptrAudioBuffer->SetPlayoutSampleRate(N_PLAY_SAMPLES_PER_SEC);
- _ptrAudioBuffer->SetPlayoutChannels((uint8_t) _playChannels);
- }
+ if (_ptrAudioBuffer) {
+ // Update audio buffer with the selected parameters.
+ _ptrAudioBuffer->SetPlayoutSampleRate(N_PLAY_SAMPLES_PER_SEC);
+ _ptrAudioBuffer->SetPlayoutChannels((uint8_t)_playChannels);
+ }
- _renderDelayOffsetSamples = _renderBufSizeSamples - N_BUFFERS_OUT *
- ENGINE_PLAY_BUF_SIZE_IN_SAMPLES * _outDesiredFormat.mChannelsPerFrame;
+ _renderDelayOffsetSamples = _renderBufSizeSamples -
+ N_BUFFERS_OUT * ENGINE_PLAY_BUF_SIZE_IN_SAMPLES *
+ _outDesiredFormat.mChannelsPerFrame;
- _outDesiredFormat.mBytesPerPacket = _outDesiredFormat.mChannelsPerFrame *
- sizeof(SInt16);
- // In uncompressed audio, a packet is one frame.
- _outDesiredFormat.mFramesPerPacket = 1;
- _outDesiredFormat.mBytesPerFrame = _outDesiredFormat.mChannelsPerFrame *
- sizeof(SInt16);
- _outDesiredFormat.mBitsPerChannel = sizeof(SInt16) * 8;
+ _outDesiredFormat.mBytesPerPacket =
+ _outDesiredFormat.mChannelsPerFrame * sizeof(SInt16);
+ // In uncompressed audio, a packet is one frame.
+ _outDesiredFormat.mFramesPerPacket = 1;
+ _outDesiredFormat.mBytesPerFrame =
+ _outDesiredFormat.mChannelsPerFrame * sizeof(SInt16);
+ _outDesiredFormat.mBitsPerChannel = sizeof(SInt16) * 8;
- _outDesiredFormat.mFormatFlags = kLinearPCMFormatFlagIsSignedInteger |
- kLinearPCMFormatFlagIsPacked;
+ _outDesiredFormat.mFormatFlags =
+ kLinearPCMFormatFlagIsSignedInteger | kLinearPCMFormatFlagIsPacked;
#ifdef WEBRTC_ARCH_BIG_ENDIAN
- _outDesiredFormat.mFormatFlags |= kLinearPCMFormatFlagIsBigEndian;
+ _outDesiredFormat.mFormatFlags |= kLinearPCMFormatFlagIsBigEndian;
#endif
- _outDesiredFormat.mFormatID = kAudioFormatLinearPCM;
-
- OSStatus err = noErr;
- WEBRTC_CA_RETURN_ON_ERR(AudioConverterNew(&_outDesiredFormat,
- &_outStreamFormat,
- &_renderConverter));
-
- // Try to set buffer size to desired value (_playBufDelayFixed).
- UInt32 bufByteCount = static_cast<UInt32> ((_outStreamFormat.mSampleRate /
- 1000.0) *
- _playBufDelayFixed *
- _outStreamFormat.mChannelsPerFrame *
- sizeof(Float32));
- if (_outStreamFormat.mFramesPerPacket != 0)
- {
- if (bufByteCount % _outStreamFormat.mFramesPerPacket != 0)
- {
- bufByteCount = (static_cast<UInt32> (bufByteCount /
- _outStreamFormat.mFramesPerPacket) + 1) *
- _outStreamFormat.mFramesPerPacket;
- }
- }
-
- // Ensure the buffer size is within the range provided by the device.
- AudioObjectPropertyAddress propertyAddress =
- {kAudioDevicePropertyDataSource,
- kAudioDevicePropertyScopeOutput,
- 0};
- propertyAddress.mSelector = kAudioDevicePropertyBufferSizeRange;
- AudioValueRange range;
- UInt32 size = sizeof(range);
- WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData(_outputDeviceID,
- &propertyAddress,
- 0,
- NULL,
- &size,
- &range));
- if (range.mMinimum > bufByteCount)
- {
- bufByteCount = range.mMinimum;
- } else if (range.mMaximum < bufByteCount)
- {
- bufByteCount = range.mMaximum;
+ _outDesiredFormat.mFormatID = kAudioFormatLinearPCM;
+
+ OSStatus err = noErr;
+ WEBRTC_CA_RETURN_ON_ERR(AudioConverterNew(
+ &_outDesiredFormat, &_outStreamFormat, &_renderConverter));
+
+ // Try to set buffer size to desired value (_playBufDelayFixed).
+ UInt32 bufByteCount = static_cast<UInt32>(
+ (_outStreamFormat.mSampleRate / 1000.0) * _playBufDelayFixed *
+ _outStreamFormat.mChannelsPerFrame * sizeof(Float32));
+ if (_outStreamFormat.mFramesPerPacket != 0) {
+ if (bufByteCount % _outStreamFormat.mFramesPerPacket != 0) {
+ bufByteCount = (static_cast<UInt32>(bufByteCount /
+ _outStreamFormat.mFramesPerPacket) +
+ 1) *
+ _outStreamFormat.mFramesPerPacket;
}
+ }
- propertyAddress.mSelector = kAudioDevicePropertyBufferSize;
- size = sizeof(bufByteCount);
- WEBRTC_CA_RETURN_ON_ERR(AudioObjectSetPropertyData(_outputDeviceID,
- &propertyAddress,
- 0,
- NULL,
- size,
- &bufByteCount));
-
- // Get render device latency.
- propertyAddress.mSelector = kAudioDevicePropertyLatency;
- UInt32 latency = 0;
- size = sizeof(UInt32);
- WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData(_outputDeviceID,
- &propertyAddress,
- 0,
- NULL,
- &size,
- &latency));
- _renderLatencyUs = static_cast<uint32_t> ((1.0e6 * latency) /
- _outStreamFormat.mSampleRate);
-
- // Get render stream latency.
- propertyAddress.mSelector = kAudioDevicePropertyStreams;
- AudioStreamID stream = 0;
- size = sizeof(AudioStreamID);
- WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData(_outputDeviceID,
- &propertyAddress,
- 0,
- NULL,
- &size,
- &stream));
- propertyAddress.mSelector = kAudioStreamPropertyLatency;
- size = sizeof(UInt32);
- latency = 0;
- WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData(_outputDeviceID,
- &propertyAddress,
- 0,
- NULL,
- &size,
- &latency));
- _renderLatencyUs += static_cast<uint32_t> ((1.0e6 * latency) /
- _outStreamFormat.mSampleRate);
+ // Ensure the buffer size is within the range provided by the device.
+ AudioObjectPropertyAddress propertyAddress = {
+ kAudioDevicePropertyDataSource, kAudioDevicePropertyScopeOutput, 0};
+ propertyAddress.mSelector = kAudioDevicePropertyBufferSizeRange;
+ AudioValueRange range;
+ UInt32 size = sizeof(range);
+ WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData(
+ _outputDeviceID, &propertyAddress, 0, NULL, &size, &range));
+ if (range.mMinimum > bufByteCount) {
+ bufByteCount = range.mMinimum;
+ } else if (range.mMaximum < bufByteCount) {
+ bufByteCount = range.mMaximum;
+ }
- WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
- " initial playout status: _renderDelayOffsetSamples=%d,"
- " _renderDelayUs=%d, _renderLatencyUs=%d",
- _renderDelayOffsetSamples, _renderDelayUs, _renderLatencyUs);
- return 0;
+ propertyAddress.mSelector = kAudioDevicePropertyBufferSize;
+ size = sizeof(bufByteCount);
+ WEBRTC_CA_RETURN_ON_ERR(AudioObjectSetPropertyData(
+ _outputDeviceID, &propertyAddress, 0, NULL, size, &bufByteCount));
+
+ // Get render device latency.
+ propertyAddress.mSelector = kAudioDevicePropertyLatency;
+ UInt32 latency = 0;
+ size = sizeof(UInt32);
+ WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData(
+ _outputDeviceID, &propertyAddress, 0, NULL, &size, &latency));
+ _renderLatencyUs =
+ static_cast<uint32_t>((1.0e6 * latency) / _outStreamFormat.mSampleRate);
+
+ // Get render stream latency.
+ propertyAddress.mSelector = kAudioDevicePropertyStreams;
+ AudioStreamID stream = 0;
+ size = sizeof(AudioStreamID);
+ WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData(
+ _outputDeviceID, &propertyAddress, 0, NULL, &size, &stream));
+ propertyAddress.mSelector = kAudioStreamPropertyLatency;
+ size = sizeof(UInt32);
+ latency = 0;
+ WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData(
+ _outputDeviceID, &propertyAddress, 0, NULL, &size, &latency));
+ _renderLatencyUs +=
+ static_cast<uint32_t>((1.0e6 * latency) / _outStreamFormat.mSampleRate);
+
+ WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
+ " initial playout status: _renderDelayOffsetSamples=%d,"
+ " _renderDelayUs=%d, _renderLatencyUs=%d",
+ _renderDelayOffsetSamples, _renderDelayUs, _renderLatencyUs);
+ return 0;
}
OSStatus AudioDeviceMac::objectListenerProc(
AudioObjectID objectId,
UInt32 numberAddresses,
const AudioObjectPropertyAddress addresses[],
- void* clientData)
-{
- AudioDeviceMac *ptrThis = (AudioDeviceMac *) clientData;
- RTC_DCHECK(ptrThis != NULL);
+ void* clientData) {
+ AudioDeviceMac* ptrThis = (AudioDeviceMac*)clientData;
+ RTC_DCHECK(ptrThis != NULL);
- ptrThis->implObjectListenerProc(objectId, numberAddresses, addresses);
+ ptrThis->implObjectListenerProc(objectId, numberAddresses, addresses);
- // AudioObjectPropertyListenerProc functions are supposed to return 0
- return 0;
+ // AudioObjectPropertyListenerProc functions are supposed to return 0
+ return 0;
}
OSStatus AudioDeviceMac::implObjectListenerProc(
const AudioObjectID objectId,
const UInt32 numberAddresses,
- const AudioObjectPropertyAddress addresses[])
-{
- WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id,
- "AudioDeviceMac::implObjectListenerProc()");
-
- for (UInt32 i = 0; i < numberAddresses; i++)
- {
- if (addresses[i].mSelector == kAudioHardwarePropertyDevices)
- {
- HandleDeviceChange();
- } else if (addresses[i].mSelector == kAudioDevicePropertyStreamFormat)
- {
- HandleStreamFormatChange(objectId, addresses[i]);
- } else if (addresses[i].mSelector == kAudioDevicePropertyDataSource)
- {
- HandleDataSourceChange(objectId, addresses[i]);
- } else if (addresses[i].mSelector == kAudioDeviceProcessorOverload)
- {
- HandleProcessorOverload(addresses[i]);
- }
+ const AudioObjectPropertyAddress addresses[]) {
+ WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id,
+ "AudioDeviceMac::implObjectListenerProc()");
+
+ for (UInt32 i = 0; i < numberAddresses; i++) {
+ if (addresses[i].mSelector == kAudioHardwarePropertyDevices) {
+ HandleDeviceChange();
+ } else if (addresses[i].mSelector == kAudioDevicePropertyStreamFormat) {
+ HandleStreamFormatChange(objectId, addresses[i]);
+ } else if (addresses[i].mSelector == kAudioDevicePropertyDataSource) {
+ HandleDataSourceChange(objectId, addresses[i]);
+ } else if (addresses[i].mSelector == kAudioDeviceProcessorOverload) {
+ HandleProcessorOverload(addresses[i]);
}
+ }
- return 0;
+ return 0;
}
-int32_t AudioDeviceMac::HandleDeviceChange()
-{
- OSStatus err = noErr;
+int32_t AudioDeviceMac::HandleDeviceChange() {
+ OSStatus err = noErr;
- WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id,
- "kAudioHardwarePropertyDevices");
-
- // A device has changed. Check if our registered devices have been removed.
- // Ensure the devices have been initialized, meaning the IDs are valid.
- if (MicrophoneIsInitialized())
- {
- AudioObjectPropertyAddress propertyAddress = {
- kAudioDevicePropertyDeviceIsAlive,
- kAudioDevicePropertyScopeInput, 0 };
- UInt32 deviceIsAlive = 1;
- UInt32 size = sizeof(UInt32);
- err = AudioObjectGetPropertyData(_inputDeviceID, &propertyAddress, 0,
- NULL, &size, &deviceIsAlive);
-
- if (err == kAudioHardwareBadDeviceError || deviceIsAlive == 0)
- {
- WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
- "Capture device is not alive (probably removed)");
- AtomicSet32(&_captureDeviceIsAlive, 0);
- _mixerManager.CloseMicrophone();
- if (_recError == 1)
- {
- WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice,
- _id, " pending recording error exists");
- }
- _recError = 1; // triggers callback from module process thread
- } else if (err != noErr)
- {
- logCAMsg(kTraceError, kTraceAudioDevice, _id,
- "Error in AudioDeviceGetProperty()", (const char*) &err);
- return -1;
- }
+ WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id,
+ "kAudioHardwarePropertyDevices");
+
+ // A device has changed. Check if our registered devices have been removed.
+ // Ensure the devices have been initialized, meaning the IDs are valid.
+ if (MicrophoneIsInitialized()) {
+ AudioObjectPropertyAddress propertyAddress = {
+ kAudioDevicePropertyDeviceIsAlive, kAudioDevicePropertyScopeInput, 0};
+ UInt32 deviceIsAlive = 1;
+ UInt32 size = sizeof(UInt32);
+ err = AudioObjectGetPropertyData(_inputDeviceID, &propertyAddress, 0, NULL,
+ &size, &deviceIsAlive);
+
+ if (err == kAudioHardwareBadDeviceError || deviceIsAlive == 0) {
+ WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
+ "Capture device is not alive (probably removed)");
+ AtomicSet32(&_captureDeviceIsAlive, 0);
+ _mixerManager.CloseMicrophone();
+ if (_recError == 1) {
+ WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
+ " pending recording error exists");
+ }
+ _recError = 1; // triggers callback from module process thread
+ } else if (err != noErr) {
+ logCAMsg(kTraceError, kTraceAudioDevice, _id,
+ "Error in AudioDeviceGetProperty()", (const char*)&err);
+ return -1;
}
+ }
- if (SpeakerIsInitialized())
- {
- AudioObjectPropertyAddress propertyAddress = {
- kAudioDevicePropertyDeviceIsAlive,
- kAudioDevicePropertyScopeOutput, 0 };
- UInt32 deviceIsAlive = 1;
- UInt32 size = sizeof(UInt32);
- err = AudioObjectGetPropertyData(_outputDeviceID, &propertyAddress, 0,
- NULL, &size, &deviceIsAlive);
-
- if (err == kAudioHardwareBadDeviceError || deviceIsAlive == 0)
- {
- WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
- "Render device is not alive (probably removed)");
- AtomicSet32(&_renderDeviceIsAlive, 0);
- _mixerManager.CloseSpeaker();
- if (_playError == 1)
- {
- WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice,
- _id, " pending playout error exists");
- }
- _playError = 1; // triggers callback from module process thread
- } else if (err != noErr)
- {
- logCAMsg(kTraceError, kTraceAudioDevice, _id,
- "Error in AudioDeviceGetProperty()", (const char*) &err);
- return -1;
- }
+ if (SpeakerIsInitialized()) {
+ AudioObjectPropertyAddress propertyAddress = {
+ kAudioDevicePropertyDeviceIsAlive, kAudioDevicePropertyScopeOutput, 0};
+ UInt32 deviceIsAlive = 1;
+ UInt32 size = sizeof(UInt32);
+ err = AudioObjectGetPropertyData(_outputDeviceID, &propertyAddress, 0, NULL,
+ &size, &deviceIsAlive);
+
+ if (err == kAudioHardwareBadDeviceError || deviceIsAlive == 0) {
+ WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
+ "Render device is not alive (probably removed)");
+ AtomicSet32(&_renderDeviceIsAlive, 0);
+ _mixerManager.CloseSpeaker();
+ if (_playError == 1) {
+ WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
+ " pending playout error exists");
+ }
+ _playError = 1; // triggers callback from module process thread
+ } else if (err != noErr) {
+ logCAMsg(kTraceError, kTraceAudioDevice, _id,
+ "Error in AudioDeviceGetProperty()", (const char*)&err);
+ return -1;
}
+ }
- return 0;
+ return 0;
}
int32_t AudioDeviceMac::HandleStreamFormatChange(
const AudioObjectID objectId,
- const AudioObjectPropertyAddress propertyAddress)
-{
- OSStatus err = noErr;
+ const AudioObjectPropertyAddress propertyAddress) {
+ OSStatus err = noErr;
- WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id,
- "Stream format changed");
+ WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id, "Stream format changed");
- if (objectId != _inputDeviceID && objectId != _outputDeviceID)
- {
- return 0;
- }
-
- // Get the new device format
- AudioStreamBasicDescription streamFormat;
- UInt32 size = sizeof(streamFormat);
- WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData(objectId,
- &propertyAddress, 0, NULL, &size, &streamFormat));
-
- if (streamFormat.mFormatID != kAudioFormatLinearPCM)
- {
- logCAMsg(kTraceError, kTraceAudioDevice, _id,
- "Unacceptable input stream format -> mFormatID",
- (const char *) &streamFormat.mFormatID);
- return -1;
- }
-
- if (streamFormat.mChannelsPerFrame > N_DEVICE_CHANNELS)
- {
- WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
- "Too many channels on device (mChannelsPerFrame = %d)",
- streamFormat.mChannelsPerFrame);
- return -1;
- }
+ if (objectId != _inputDeviceID && objectId != _outputDeviceID) {
+ return 0;
+ }
- WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
- "Stream format:");
- WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
- "mSampleRate = %f, mChannelsPerFrame = %u",
- streamFormat.mSampleRate, streamFormat.mChannelsPerFrame);
- WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
- "mBytesPerPacket = %u, mFramesPerPacket = %u",
- streamFormat.mBytesPerPacket, streamFormat.mFramesPerPacket);
- WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
- "mBytesPerFrame = %u, mBitsPerChannel = %u",
- streamFormat.mBytesPerFrame, streamFormat.mBitsPerChannel);
- WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
- "mFormatFlags = %u",
- streamFormat.mFormatFlags);
- logCAMsg(kTraceInfo, kTraceAudioDevice, _id, "mFormatID",
- (const char *) &streamFormat.mFormatID);
-
- if (propertyAddress.mScope == kAudioDevicePropertyScopeInput)
- {
- const int io_block_size_samples = streamFormat.mChannelsPerFrame *
- streamFormat.mSampleRate / 100 * N_BLOCKS_IO;
- if (io_block_size_samples > _captureBufSizeSamples)
- {
- WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
- "Input IO block size (%d) is larger than ring buffer (%u)",
- io_block_size_samples, _captureBufSizeSamples);
- return -1;
+ // Get the new device format
+ AudioStreamBasicDescription streamFormat;
+ UInt32 size = sizeof(streamFormat);
+ WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData(
+ objectId, &propertyAddress, 0, NULL, &size, &streamFormat));
- }
+ if (streamFormat.mFormatID != kAudioFormatLinearPCM) {
+ logCAMsg(kTraceError, kTraceAudioDevice, _id,
+ "Unacceptable input stream format -> mFormatID",
+ (const char*)&streamFormat.mFormatID);
+ return -1;
+ }
- memcpy(&_inStreamFormat, &streamFormat, sizeof(streamFormat));
-
- if (_inStreamFormat.mChannelsPerFrame >= 2 && (_recChannels == 2))
- {
- _inDesiredFormat.mChannelsPerFrame = 2;
- } else
- {
- // Disable stereo recording when we only have one channel on the device.
- _inDesiredFormat.mChannelsPerFrame = 1;
- _recChannels = 1;
- WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
- "Stereo recording unavailable on this device");
- }
+ if (streamFormat.mChannelsPerFrame > N_DEVICE_CHANNELS) {
+ WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
+ "Too many channels on device (mChannelsPerFrame = %d)",
+ streamFormat.mChannelsPerFrame);
+ return -1;
+ }
- if (_ptrAudioBuffer)
- {
- // Update audio buffer with the selected parameters
- _ptrAudioBuffer->SetRecordingSampleRate(N_REC_SAMPLES_PER_SEC);
- _ptrAudioBuffer->SetRecordingChannels((uint8_t) _recChannels);
- }
+ WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "Stream format:");
+ WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
+ "mSampleRate = %f, mChannelsPerFrame = %u",
+ streamFormat.mSampleRate, streamFormat.mChannelsPerFrame);
+ WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
+ "mBytesPerPacket = %u, mFramesPerPacket = %u",
+ streamFormat.mBytesPerPacket, streamFormat.mFramesPerPacket);
+ WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
+ "mBytesPerFrame = %u, mBitsPerChannel = %u",
+ streamFormat.mBytesPerFrame, streamFormat.mBitsPerChannel);
+ WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "mFormatFlags = %u",
+ streamFormat.mFormatFlags);
+ logCAMsg(kTraceInfo, kTraceAudioDevice, _id, "mFormatID",
+ (const char*)&streamFormat.mFormatID);
+
+ if (propertyAddress.mScope == kAudioDevicePropertyScopeInput) {
+ const int io_block_size_samples = streamFormat.mChannelsPerFrame *
+ streamFormat.mSampleRate / 100 *
+ N_BLOCKS_IO;
+ if (io_block_size_samples > _captureBufSizeSamples) {
+ WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
+ "Input IO block size (%d) is larger than ring buffer (%u)",
+ io_block_size_samples, _captureBufSizeSamples);
+ return -1;
+ }
+
+ memcpy(&_inStreamFormat, &streamFormat, sizeof(streamFormat));
+
+ if (_inStreamFormat.mChannelsPerFrame >= 2 && (_recChannels == 2)) {
+ _inDesiredFormat.mChannelsPerFrame = 2;
+ } else {
+ // Disable stereo recording when we only have one channel on the device.
+ _inDesiredFormat.mChannelsPerFrame = 1;
+ _recChannels = 1;
+ WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
+ "Stereo recording unavailable on this device");
+ }
+
+ if (_ptrAudioBuffer) {
+ // Update audio buffer with the selected parameters
+ _ptrAudioBuffer->SetRecordingSampleRate(N_REC_SAMPLES_PER_SEC);
+ _ptrAudioBuffer->SetRecordingChannels((uint8_t)_recChannels);
+ }
+
+ // Recreate the converter with the new format
+ // TODO(xians): make this thread safe
+ WEBRTC_CA_RETURN_ON_ERR(AudioConverterDispose(_captureConverter));
+
+ WEBRTC_CA_RETURN_ON_ERR(AudioConverterNew(&streamFormat, &_inDesiredFormat,
+ &_captureConverter));
+ } else {
+ memcpy(&_outStreamFormat, &streamFormat, sizeof(streamFormat));
- // Recreate the converter with the new format
- // TODO(xians): make this thread safe
- WEBRTC_CA_RETURN_ON_ERR(AudioConverterDispose(_captureConverter));
-
- WEBRTC_CA_RETURN_ON_ERR(AudioConverterNew(&streamFormat, &_inDesiredFormat,
- &_captureConverter));
- } else
- {
- memcpy(&_outStreamFormat, &streamFormat, sizeof(streamFormat));
-
- // Our preferred format to work with
- if (_outStreamFormat.mChannelsPerFrame < 2)
- {
- _playChannels = 1;
- WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
- "Stereo playout unavailable on this device");
- }
- WEBRTC_CA_RETURN_ON_ERR(SetDesiredPlayoutFormat());
+ // Our preferred format to work with
+ if (_outStreamFormat.mChannelsPerFrame < 2) {
+ _playChannels = 1;
+ WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
+ "Stereo playout unavailable on this device");
}
- return 0;
+ WEBRTC_CA_RETURN_ON_ERR(SetDesiredPlayoutFormat());
+ }
+ return 0;
}
int32_t AudioDeviceMac::HandleDataSourceChange(
const AudioObjectID objectId,
- const AudioObjectPropertyAddress propertyAddress)
-{
- OSStatus err = noErr;
+ const AudioObjectPropertyAddress propertyAddress) {
+ OSStatus err = noErr;
- if (_macBookPro && propertyAddress.mScope
- == kAudioDevicePropertyScopeOutput)
- {
- WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id,
- "Data source changed");
-
- _macBookProPanRight = false;
- UInt32 dataSource = 0;
- UInt32 size = sizeof(UInt32);
- WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData(objectId,
- &propertyAddress, 0, NULL, &size, &dataSource));
- if (dataSource == 'ispk')
- {
- _macBookProPanRight = true;
- WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
- "MacBook Pro using internal speakers; stereo panning right");
- } else
- {
- WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
- "MacBook Pro not using internal speakers");
- }
+ if (_macBookPro &&
+ propertyAddress.mScope == kAudioDevicePropertyScopeOutput) {
+ WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id, "Data source changed");
+
+ _macBookProPanRight = false;
+ UInt32 dataSource = 0;
+ UInt32 size = sizeof(UInt32);
+ WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData(
+ objectId, &propertyAddress, 0, NULL, &size, &dataSource));
+ if (dataSource == 'ispk') {
+ _macBookProPanRight = true;
+ WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
+ "MacBook Pro using internal speakers; stereo panning right");
+ } else {
+ WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
+ "MacBook Pro not using internal speakers");
}
+ }
- return 0;
+ return 0;
}
int32_t AudioDeviceMac::HandleProcessorOverload(
- const AudioObjectPropertyAddress propertyAddress)
-{
- // TODO(xians): we probably want to notify the user in some way of the
- // overload. However, the Windows interpretations of these errors seem to
- // be more severe than what ProcessorOverload is thrown for.
- //
- // We don't log the notification, as it's sent from the HAL's IO thread. We
- // don't want to slow it down even further.
- if (propertyAddress.mScope == kAudioDevicePropertyScopeInput)
- {
- //WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, "Capture processor
- // overload");
- //_callback->ProblemIsReported(
- // SndCardStreamObserver::ERecordingProblem);
- } else
- {
- //WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
- // "Render processor overload");
- //_callback->ProblemIsReported(
- // SndCardStreamObserver::EPlaybackProblem);
- }
+ const AudioObjectPropertyAddress propertyAddress) {
+ // TODO(xians): we probably want to notify the user in some way of the
+ // overload. However, the Windows interpretations of these errors seem to
+ // be more severe than what ProcessorOverload is thrown for.
+ //
+ // We don't log the notification, as it's sent from the HAL's IO thread. We
+ // don't want to slow it down even further.
+ if (propertyAddress.mScope == kAudioDevicePropertyScopeInput) {
+ // WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, "Capture processor
+ // overload");
+ //_callback->ProblemIsReported(
+ // SndCardStreamObserver::ERecordingProblem);
+ } else {
+ // WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
+ // "Render processor overload");
+ //_callback->ProblemIsReported(
+ // SndCardStreamObserver::EPlaybackProblem);
+ }
- return 0;
+ return 0;
}
// ============================================================================
// Thread Methods
// ============================================================================
-OSStatus AudioDeviceMac::deviceIOProc(AudioDeviceID, const AudioTimeStamp*,
+OSStatus AudioDeviceMac::deviceIOProc(AudioDeviceID,
+ const AudioTimeStamp*,
const AudioBufferList* inputData,
const AudioTimeStamp* inputTime,
AudioBufferList* outputData,
const AudioTimeStamp* outputTime,
- void *clientData)
-{
- AudioDeviceMac *ptrThis = (AudioDeviceMac *) clientData;
- RTC_DCHECK(ptrThis != NULL);
+ void* clientData) {
+ AudioDeviceMac* ptrThis = (AudioDeviceMac*)clientData;
+ RTC_DCHECK(ptrThis != NULL);
- ptrThis->implDeviceIOProc(inputData, inputTime, outputData, outputTime);
+ ptrThis->implDeviceIOProc(inputData, inputTime, outputData, outputTime);
- // AudioDeviceIOProc functions are supposed to return 0
- return 0;
+ // AudioDeviceIOProc functions are supposed to return 0
+ return 0;
}
OSStatus AudioDeviceMac::outConverterProc(AudioConverterRef,
- UInt32 *numberDataPackets,
- AudioBufferList *data,
- AudioStreamPacketDescription **,
- void *userData)
-{
- AudioDeviceMac *ptrThis = (AudioDeviceMac *) userData;
- RTC_DCHECK(ptrThis != NULL);
+ UInt32* numberDataPackets,
+ AudioBufferList* data,
+ AudioStreamPacketDescription**,
+ void* userData) {
+ AudioDeviceMac* ptrThis = (AudioDeviceMac*)userData;
+ RTC_DCHECK(ptrThis != NULL);
- return ptrThis->implOutConverterProc(numberDataPackets, data);
+ return ptrThis->implOutConverterProc(numberDataPackets, data);
}
-OSStatus AudioDeviceMac::inDeviceIOProc(AudioDeviceID, const AudioTimeStamp*,
+OSStatus AudioDeviceMac::inDeviceIOProc(AudioDeviceID,
+ const AudioTimeStamp*,
const AudioBufferList* inputData,
const AudioTimeStamp* inputTime,
AudioBufferList*,
- const AudioTimeStamp*, void* clientData)
-{
- AudioDeviceMac *ptrThis = (AudioDeviceMac *) clientData;
- RTC_DCHECK(ptrThis != NULL);
+ const AudioTimeStamp*,
+ void* clientData) {
+ AudioDeviceMac* ptrThis = (AudioDeviceMac*)clientData;
+ RTC_DCHECK(ptrThis != NULL);
- ptrThis->implInDeviceIOProc(inputData, inputTime);
+ ptrThis->implInDeviceIOProc(inputData, inputTime);
- // AudioDeviceIOProc functions are supposed to return 0
- return 0;
+ // AudioDeviceIOProc functions are supposed to return 0
+ return 0;
}
OSStatus AudioDeviceMac::inConverterProc(
AudioConverterRef,
- UInt32 *numberDataPackets,
- AudioBufferList *data,
- AudioStreamPacketDescription ** /*dataPacketDescription*/,
- void *userData)
-{
- AudioDeviceMac *ptrThis = static_cast<AudioDeviceMac*> (userData);
- RTC_DCHECK(ptrThis != NULL);
-
- return ptrThis->implInConverterProc(numberDataPackets, data);
-}
-
-OSStatus AudioDeviceMac::implDeviceIOProc(const AudioBufferList *inputData,
- const AudioTimeStamp *inputTime,
- AudioBufferList *outputData,
- const AudioTimeStamp *outputTime)
-{
- OSStatus err = noErr;
- UInt64 outputTimeNs = AudioConvertHostTimeToNanos(outputTime->mHostTime);
- UInt64 nowNs = AudioConvertHostTimeToNanos(AudioGetCurrentHostTime());
-
- if (!_twoDevices && _recording)
- {
- implInDeviceIOProc(inputData, inputTime);
- }
+ UInt32* numberDataPackets,
+ AudioBufferList* data,
+ AudioStreamPacketDescription** /*dataPacketDescription*/,
+ void* userData) {
+ AudioDeviceMac* ptrThis = static_cast<AudioDeviceMac*>(userData);
+ RTC_DCHECK(ptrThis != NULL);
+
+ return ptrThis->implInConverterProc(numberDataPackets, data);
+}
+
+OSStatus AudioDeviceMac::implDeviceIOProc(const AudioBufferList* inputData,
+ const AudioTimeStamp* inputTime,
+ AudioBufferList* outputData,
+ const AudioTimeStamp* outputTime) {
+ OSStatus err = noErr;
+ UInt64 outputTimeNs = AudioConvertHostTimeToNanos(outputTime->mHostTime);
+ UInt64 nowNs = AudioConvertHostTimeToNanos(AudioGetCurrentHostTime());
+
+ if (!_twoDevices && _recording) {
+ implInDeviceIOProc(inputData, inputTime);
+ }
- // Check if we should close down audio device
- // Double-checked locking optimization to remove locking overhead
- if (_doStop)
- {
- _critSect.Enter();
- if (_doStop)
- {
- if (_twoDevices || (!_recording && !_playing))
- {
- // In the case of a shared device, the single driving ioProc
- // is stopped here
- WEBRTC_CA_LOG_ERR(AudioDeviceStop(_outputDeviceID,
- _deviceIOProcID));
- WEBRTC_CA_LOG_WARN(AudioDeviceDestroyIOProcID(_outputDeviceID,
- _deviceIOProcID));
- if (err == noErr)
- {
- WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice,
- _id, " Playout or shared device stopped");
- }
- }
-
- _doStop = false;
- _stopEvent.Set();
- _critSect.Leave();
- return 0;
+ // Check if we should close down audio device
+ // Double-checked locking optimization to remove locking overhead
+ if (_doStop) {
+ _critSect.Enter();
+ if (_doStop) {
+ if (_twoDevices || (!_recording && !_playing)) {
+ // In the case of a shared device, the single driving ioProc
+ // is stopped here
+ WEBRTC_CA_LOG_ERR(AudioDeviceStop(_outputDeviceID, _deviceIOProcID));
+ WEBRTC_CA_LOG_WARN(
+ AudioDeviceDestroyIOProcID(_outputDeviceID, _deviceIOProcID));
+ if (err == noErr) {
+ WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id,
+ " Playout or shared device stopped");
}
- _critSect.Leave();
- }
+ }
- if (!_playing)
- {
- // This can be the case when a shared device is capturing but not
- // rendering. We allow the checks above before returning to avoid a
- // timeout when capturing is stopped.
- return 0;
+ _doStop = false;
+ _stopEvent.Set();
+ _critSect.Leave();
+ return 0;
}
+ _critSect.Leave();
+ }
- RTC_DCHECK(_outStreamFormat.mBytesPerFrame != 0);
- UInt32 size = outputData->mBuffers->mDataByteSize
- / _outStreamFormat.mBytesPerFrame;
-
- // TODO(xians): signal an error somehow?
- err = AudioConverterFillComplexBuffer(_renderConverter, outConverterProc,
- this, &size, outputData, NULL);
- if (err != noErr)
- {
- if (err == 1)
- {
- // This is our own error.
- WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
- " Error in AudioConverterFillComplexBuffer()");
- return 1;
- } else
- {
- logCAMsg(kTraceError, kTraceAudioDevice, _id,
- "Error in AudioConverterFillComplexBuffer()",
- (const char *) &err);
- return 1;
- }
+ if (!_playing) {
+ // This can be the case when a shared device is capturing but not
+ // rendering. We allow the checks above before returning to avoid a
+ // timeout when capturing is stopped.
+ return 0;
+ }
+
+ RTC_DCHECK(_outStreamFormat.mBytesPerFrame != 0);
+ UInt32 size =
+ outputData->mBuffers->mDataByteSize / _outStreamFormat.mBytesPerFrame;
+
+ // TODO(xians): signal an error somehow?
+ err = AudioConverterFillComplexBuffer(_renderConverter, outConverterProc,
+ this, &size, outputData, NULL);
+ if (err != noErr) {
+ if (err == 1) {
+ // This is our own error.
+ WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
+ " Error in AudioConverterFillComplexBuffer()");
+ return 1;
+ } else {
+ logCAMsg(kTraceError, kTraceAudioDevice, _id,
+ "Error in AudioConverterFillComplexBuffer()", (const char*)&err);
+ return 1;
}
+ }
- PaRingBufferSize bufSizeSamples =
- PaUtil_GetRingBufferReadAvailable(_paRenderBuffer);
+ PaRingBufferSize bufSizeSamples =
+ PaUtil_GetRingBufferReadAvailable(_paRenderBuffer);
- int32_t renderDelayUs = static_cast<int32_t> (1e-3 * (outputTimeNs - nowNs)
- + 0.5);
- renderDelayUs += static_cast<int32_t> ((1.0e6 * bufSizeSamples)
- / _outDesiredFormat.mChannelsPerFrame / _outDesiredFormat.mSampleRate
- + 0.5);
+ int32_t renderDelayUs =
+ static_cast<int32_t>(1e-3 * (outputTimeNs - nowNs) + 0.5);
+ renderDelayUs += static_cast<int32_t>(
+ (1.0e6 * bufSizeSamples) / _outDesiredFormat.mChannelsPerFrame /
+ _outDesiredFormat.mSampleRate +
+ 0.5);
- AtomicSet32(&_renderDelayUs, renderDelayUs);
+ AtomicSet32(&_renderDelayUs, renderDelayUs);
- return 0;
+ return 0;
}
-OSStatus AudioDeviceMac::implOutConverterProc(UInt32 *numberDataPackets,
- AudioBufferList *data)
-{
+OSStatus AudioDeviceMac::implOutConverterProc(UInt32* numberDataPackets,
+ AudioBufferList* data) {
RTC_DCHECK(data->mNumberBuffers == 1);
- PaRingBufferSize numSamples = *numberDataPackets
- * _outDesiredFormat.mChannelsPerFrame;
-
- data->mBuffers->mNumberChannels = _outDesiredFormat.mChannelsPerFrame;
- // Always give the converter as much as it wants, zero padding as required.
- data->mBuffers->mDataByteSize = *numberDataPackets
- * _outDesiredFormat.mBytesPerPacket;
- data->mBuffers->mData = _renderConvertData;
- memset(_renderConvertData, 0, sizeof(_renderConvertData));
-
- PaUtil_ReadRingBuffer(_paRenderBuffer, _renderConvertData, numSamples);
-
- kern_return_t kernErr = semaphore_signal_all(_renderSemaphore);
- if (kernErr != KERN_SUCCESS)
- {
- WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
- " semaphore_signal_all() error: %d", kernErr);
- return 1;
- }
+ PaRingBufferSize numSamples =
+ *numberDataPackets * _outDesiredFormat.mChannelsPerFrame;
- return 0;
+ data->mBuffers->mNumberChannels = _outDesiredFormat.mChannelsPerFrame;
+ // Always give the converter as much as it wants, zero padding as required.
+ data->mBuffers->mDataByteSize =
+ *numberDataPackets * _outDesiredFormat.mBytesPerPacket;
+ data->mBuffers->mData = _renderConvertData;
+ memset(_renderConvertData, 0, sizeof(_renderConvertData));
+
+ PaUtil_ReadRingBuffer(_paRenderBuffer, _renderConvertData, numSamples);
+
+ kern_return_t kernErr = semaphore_signal_all(_renderSemaphore);
+ if (kernErr != KERN_SUCCESS) {
+ WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
+ " semaphore_signal_all() error: %d", kernErr);
+ return 1;
+ }
+
+ return 0;
}
-OSStatus AudioDeviceMac::implInDeviceIOProc(const AudioBufferList *inputData,
- const AudioTimeStamp *inputTime)
-{
- OSStatus err = noErr;
- UInt64 inputTimeNs = AudioConvertHostTimeToNanos(inputTime->mHostTime);
- UInt64 nowNs = AudioConvertHostTimeToNanos(AudioGetCurrentHostTime());
-
- // Check if we should close down audio device
- // Double-checked locking optimization to remove locking overhead
- if (_doStopRec)
- {
- _critSect.Enter();
- if (_doStopRec)
- {
- // This will be signalled only when a shared device is not in use.
- WEBRTC_CA_LOG_ERR(AudioDeviceStop(_inputDeviceID, _inDeviceIOProcID));
- WEBRTC_CA_LOG_WARN(AudioDeviceDestroyIOProcID(_inputDeviceID,
- _inDeviceIOProcID));
- if (err == noErr)
- {
- WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice,
- _id, " Recording device stopped");
- }
-
- _doStopRec = false;
- _stopEventRec.Set();
- _critSect.Leave();
- return 0;
- }
- _critSect.Leave();
- }
+OSStatus AudioDeviceMac::implInDeviceIOProc(const AudioBufferList* inputData,
+ const AudioTimeStamp* inputTime) {
+ OSStatus err = noErr;
+ UInt64 inputTimeNs = AudioConvertHostTimeToNanos(inputTime->mHostTime);
+ UInt64 nowNs = AudioConvertHostTimeToNanos(AudioGetCurrentHostTime());
- if (!_recording)
- {
- // Allow above checks to avoid a timeout on stopping capture.
- return 0;
- }
+ // Check if we should close down audio device
+ // Double-checked locking optimization to remove locking overhead
+ if (_doStopRec) {
+ _critSect.Enter();
+ if (_doStopRec) {
+ // This will be signalled only when a shared device is not in use.
+ WEBRTC_CA_LOG_ERR(AudioDeviceStop(_inputDeviceID, _inDeviceIOProcID));
+ WEBRTC_CA_LOG_WARN(
+ AudioDeviceDestroyIOProcID(_inputDeviceID, _inDeviceIOProcID));
+ if (err == noErr) {
+ WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id,
+ " Recording device stopped");
+ }
- PaRingBufferSize bufSizeSamples =
- PaUtil_GetRingBufferReadAvailable(_paCaptureBuffer);
-
- int32_t captureDelayUs = static_cast<int32_t> (1e-3 * (nowNs - inputTimeNs)
- + 0.5);
- captureDelayUs
- += static_cast<int32_t> ((1.0e6 * bufSizeSamples)
- / _inStreamFormat.mChannelsPerFrame / _inStreamFormat.mSampleRate
- + 0.5);
-
- AtomicSet32(&_captureDelayUs, captureDelayUs);
-
- RTC_DCHECK(inputData->mNumberBuffers == 1);
- PaRingBufferSize numSamples = inputData->mBuffers->mDataByteSize
- * _inStreamFormat.mChannelsPerFrame / _inStreamFormat.mBytesPerPacket;
- PaUtil_WriteRingBuffer(_paCaptureBuffer, inputData->mBuffers->mData,
- numSamples);
-
- kern_return_t kernErr = semaphore_signal_all(_captureSemaphore);
- if (kernErr != KERN_SUCCESS)
- {
- WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
- " semaphore_signal_all() error: %d", kernErr);
+ _doStopRec = false;
+ _stopEventRec.Set();
+ _critSect.Leave();
+ return 0;
}
+ _critSect.Leave();
+ }
- return err;
-}
+ if (!_recording) {
+ // Allow above checks to avoid a timeout on stopping capture.
+ return 0;
+ }
-OSStatus AudioDeviceMac::implInConverterProc(UInt32 *numberDataPackets,
- AudioBufferList *data)
-{
- RTC_DCHECK(data->mNumberBuffers == 1);
- PaRingBufferSize numSamples = *numberDataPackets
- * _inStreamFormat.mChannelsPerFrame;
-
- while (PaUtil_GetRingBufferReadAvailable(_paCaptureBuffer) < numSamples)
- {
- mach_timespec_t timeout;
- timeout.tv_sec = 0;
- timeout.tv_nsec = TIMER_PERIOD_MS;
-
- kern_return_t kernErr = semaphore_timedwait(_captureSemaphore, timeout);
- if (kernErr == KERN_OPERATION_TIMED_OUT)
- {
- int32_t signal = AtomicGet32(&_captureDeviceIsAlive);
- if (signal == 0)
- {
- // The capture device is no longer alive; stop the worker thread.
- *numberDataPackets = 0;
- return 1;
- }
- } else if (kernErr != KERN_SUCCESS)
- {
- WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
- " semaphore_wait() error: %d", kernErr);
- }
- }
+ PaRingBufferSize bufSizeSamples =
+ PaUtil_GetRingBufferReadAvailable(_paCaptureBuffer);
- // Pass the read pointer directly to the converter to avoid a memcpy.
- void* dummyPtr;
- PaRingBufferSize dummySize;
- PaUtil_GetRingBufferReadRegions(_paCaptureBuffer, numSamples,
- &data->mBuffers->mData, &numSamples,
- &dummyPtr, &dummySize);
- PaUtil_AdvanceRingBufferReadIndex(_paCaptureBuffer, numSamples);
+ int32_t captureDelayUs =
+ static_cast<int32_t>(1e-3 * (nowNs - inputTimeNs) + 0.5);
+ captureDelayUs += static_cast<int32_t>((1.0e6 * bufSizeSamples) /
+ _inStreamFormat.mChannelsPerFrame /
+ _inStreamFormat.mSampleRate +
+ 0.5);
- data->mBuffers->mNumberChannels = _inStreamFormat.mChannelsPerFrame;
- *numberDataPackets = numSamples / _inStreamFormat.mChannelsPerFrame;
- data->mBuffers->mDataByteSize = *numberDataPackets
- * _inStreamFormat.mBytesPerPacket;
+ AtomicSet32(&_captureDelayUs, captureDelayUs);
- return 0;
+ RTC_DCHECK(inputData->mNumberBuffers == 1);
+ PaRingBufferSize numSamples = inputData->mBuffers->mDataByteSize *
+ _inStreamFormat.mChannelsPerFrame /
+ _inStreamFormat.mBytesPerPacket;
+ PaUtil_WriteRingBuffer(_paCaptureBuffer, inputData->mBuffers->mData,
+ numSamples);
+
+ kern_return_t kernErr = semaphore_signal_all(_captureSemaphore);
+ if (kernErr != KERN_SUCCESS) {
+ WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
+ " semaphore_signal_all() error: %d", kernErr);
+ }
+
+ return err;
}
-bool AudioDeviceMac::RunRender(void* ptrThis)
-{
- return static_cast<AudioDeviceMac*> (ptrThis)->RenderWorkerThread();
-}
-
-bool AudioDeviceMac::RenderWorkerThread()
-{
- PaRingBufferSize numSamples = ENGINE_PLAY_BUF_SIZE_IN_SAMPLES
- * _outDesiredFormat.mChannelsPerFrame;
- while (PaUtil_GetRingBufferWriteAvailable(_paRenderBuffer)
- - _renderDelayOffsetSamples < numSamples)
- {
- mach_timespec_t timeout;
- timeout.tv_sec = 0;
- timeout.tv_nsec = TIMER_PERIOD_MS;
-
- kern_return_t kernErr = semaphore_timedwait(_renderSemaphore, timeout);
- if (kernErr == KERN_OPERATION_TIMED_OUT)
- {
- int32_t signal = AtomicGet32(&_renderDeviceIsAlive);
- if (signal == 0)
- {
- // The render device is no longer alive; stop the worker thread.
- return false;
- }
- } else if (kernErr != KERN_SUCCESS)
- {
- WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
- " semaphore_timedwait() error: %d", kernErr);
- }
+OSStatus AudioDeviceMac::implInConverterProc(UInt32* numberDataPackets,
+ AudioBufferList* data) {
+ RTC_DCHECK(data->mNumberBuffers == 1);
+ PaRingBufferSize numSamples =
+ *numberDataPackets * _inStreamFormat.mChannelsPerFrame;
+
+ while (PaUtil_GetRingBufferReadAvailable(_paCaptureBuffer) < numSamples) {
+ mach_timespec_t timeout;
+ timeout.tv_sec = 0;
+ timeout.tv_nsec = TIMER_PERIOD_MS;
+
+ kern_return_t kernErr = semaphore_timedwait(_captureSemaphore, timeout);
+ if (kernErr == KERN_OPERATION_TIMED_OUT) {
+ int32_t signal = AtomicGet32(&_captureDeviceIsAlive);
+ if (signal == 0) {
+ // The capture device is no longer alive; stop the worker thread.
+ *numberDataPackets = 0;
+ return 1;
+ }
+ } else if (kernErr != KERN_SUCCESS) {
+ WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
+ " semaphore_wait() error: %d", kernErr);
}
+ }
- int8_t playBuffer[4 * ENGINE_PLAY_BUF_SIZE_IN_SAMPLES];
-
- if (!_ptrAudioBuffer)
- {
- WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
- " capture AudioBuffer is invalid");
+ // Pass the read pointer directly to the converter to avoid a memcpy.
+ void* dummyPtr;
+ PaRingBufferSize dummySize;
+ PaUtil_GetRingBufferReadRegions(_paCaptureBuffer, numSamples,
+ &data->mBuffers->mData, &numSamples,
+ &dummyPtr, &dummySize);
+ PaUtil_AdvanceRingBufferReadIndex(_paCaptureBuffer, numSamples);
+
+ data->mBuffers->mNumberChannels = _inStreamFormat.mChannelsPerFrame;
+ *numberDataPackets = numSamples / _inStreamFormat.mChannelsPerFrame;
+ data->mBuffers->mDataByteSize =
+ *numberDataPackets * _inStreamFormat.mBytesPerPacket;
+
+ return 0;
+}
+
+bool AudioDeviceMac::RunRender(void* ptrThis) {
+ return static_cast<AudioDeviceMac*>(ptrThis)->RenderWorkerThread();
+}
+
+bool AudioDeviceMac::RenderWorkerThread() {
+ PaRingBufferSize numSamples =
+ ENGINE_PLAY_BUF_SIZE_IN_SAMPLES * _outDesiredFormat.mChannelsPerFrame;
+ while (PaUtil_GetRingBufferWriteAvailable(_paRenderBuffer) -
+ _renderDelayOffsetSamples <
+ numSamples) {
+ mach_timespec_t timeout;
+ timeout.tv_sec = 0;
+ timeout.tv_nsec = TIMER_PERIOD_MS;
+
+ kern_return_t kernErr = semaphore_timedwait(_renderSemaphore, timeout);
+ if (kernErr == KERN_OPERATION_TIMED_OUT) {
+ int32_t signal = AtomicGet32(&_renderDeviceIsAlive);
+ if (signal == 0) {
+ // The render device is no longer alive; stop the worker thread.
return false;
+ }
+ } else if (kernErr != KERN_SUCCESS) {
+ WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
+ " semaphore_timedwait() error: %d", kernErr);
}
+ }
- // Ask for new PCM data to be played out using the AudioDeviceBuffer.
- uint32_t nSamples =
- _ptrAudioBuffer->RequestPlayoutData(ENGINE_PLAY_BUF_SIZE_IN_SAMPLES);
+ int8_t playBuffer[4 * ENGINE_PLAY_BUF_SIZE_IN_SAMPLES];
- nSamples = _ptrAudioBuffer->GetPlayoutData(playBuffer);
- if (nSamples != ENGINE_PLAY_BUF_SIZE_IN_SAMPLES)
- {
- WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
- " invalid number of output samples(%d)", nSamples);
- }
+ if (!_ptrAudioBuffer) {
+ WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
+ " capture AudioBuffer is invalid");
+ return false;
+ }
- uint32_t nOutSamples = nSamples * _outDesiredFormat.mChannelsPerFrame;
-
- SInt16 *pPlayBuffer = (SInt16 *) &playBuffer;
- if (_macBookProPanRight && (_playChannels == 2))
- {
- // Mix entirely into the right channel and zero the left channel.
- SInt32 sampleInt32 = 0;
- for (uint32_t sampleIdx = 0; sampleIdx < nOutSamples; sampleIdx
- += 2)
- {
- sampleInt32 = pPlayBuffer[sampleIdx];
- sampleInt32 += pPlayBuffer[sampleIdx + 1];
- sampleInt32 /= 2;
-
- if (sampleInt32 > 32767)
- {
- sampleInt32 = 32767;
- } else if (sampleInt32 < -32768)
- {
- sampleInt32 = -32768;
- }
-
- pPlayBuffer[sampleIdx] = 0;
- pPlayBuffer[sampleIdx + 1] = static_cast<SInt16> (sampleInt32);
- }
+ // Ask for new PCM data to be played out using the AudioDeviceBuffer.
+ uint32_t nSamples =
+ _ptrAudioBuffer->RequestPlayoutData(ENGINE_PLAY_BUF_SIZE_IN_SAMPLES);
+
+ nSamples = _ptrAudioBuffer->GetPlayoutData(playBuffer);
+ if (nSamples != ENGINE_PLAY_BUF_SIZE_IN_SAMPLES) {
+ WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
+ " invalid number of output samples(%d)", nSamples);
+ }
+
+ uint32_t nOutSamples = nSamples * _outDesiredFormat.mChannelsPerFrame;
+
+ SInt16* pPlayBuffer = (SInt16*)&playBuffer;
+ if (_macBookProPanRight && (_playChannels == 2)) {
+ // Mix entirely into the right channel and zero the left channel.
+ SInt32 sampleInt32 = 0;
+ for (uint32_t sampleIdx = 0; sampleIdx < nOutSamples; sampleIdx += 2) {
+ sampleInt32 = pPlayBuffer[sampleIdx];
+ sampleInt32 += pPlayBuffer[sampleIdx + 1];
+ sampleInt32 /= 2;
+
+ if (sampleInt32 > 32767) {
+ sampleInt32 = 32767;
+ } else if (sampleInt32 < -32768) {
+ sampleInt32 = -32768;
+ }
+
+ pPlayBuffer[sampleIdx] = 0;
+ pPlayBuffer[sampleIdx + 1] = static_cast<SInt16>(sampleInt32);
}
+ }
- PaUtil_WriteRingBuffer(_paRenderBuffer, pPlayBuffer, nOutSamples);
+ PaUtil_WriteRingBuffer(_paRenderBuffer, pPlayBuffer, nOutSamples);
- return true;
+ return true;
}
-bool AudioDeviceMac::RunCapture(void* ptrThis)
-{
- return static_cast<AudioDeviceMac*> (ptrThis)->CaptureWorkerThread();
+bool AudioDeviceMac::RunCapture(void* ptrThis) {
+ return static_cast<AudioDeviceMac*>(ptrThis)->CaptureWorkerThread();
}
-bool AudioDeviceMac::CaptureWorkerThread()
-{
- OSStatus err = noErr;
- UInt32 noRecSamples = ENGINE_REC_BUF_SIZE_IN_SAMPLES
- * _inDesiredFormat.mChannelsPerFrame;
- SInt16 recordBuffer[noRecSamples];
- UInt32 size = ENGINE_REC_BUF_SIZE_IN_SAMPLES;
-
- AudioBufferList engineBuffer;
- engineBuffer.mNumberBuffers = 1; // Interleaved channels.
- engineBuffer.mBuffers->mNumberChannels = _inDesiredFormat.mChannelsPerFrame;
- engineBuffer.mBuffers->mDataByteSize = _inDesiredFormat.mBytesPerPacket
- * noRecSamples;
- engineBuffer.mBuffers->mData = recordBuffer;
-
- err = AudioConverterFillComplexBuffer(_captureConverter, inConverterProc,
- this, &size, &engineBuffer, NULL);
- if (err != noErr)
- {
- if (err == 1)
- {
- // This is our own error.
- return false;
- } else
- {
- logCAMsg(kTraceError, kTraceAudioDevice, _id,
- "Error in AudioConverterFillComplexBuffer()",
- (const char *) &err);
- return false;
- }
+bool AudioDeviceMac::CaptureWorkerThread() {
+ OSStatus err = noErr;
+ UInt32 noRecSamples =
+ ENGINE_REC_BUF_SIZE_IN_SAMPLES * _inDesiredFormat.mChannelsPerFrame;
+ SInt16 recordBuffer[noRecSamples];
+ UInt32 size = ENGINE_REC_BUF_SIZE_IN_SAMPLES;
+
+ AudioBufferList engineBuffer;
+ engineBuffer.mNumberBuffers = 1; // Interleaved channels.
+ engineBuffer.mBuffers->mNumberChannels = _inDesiredFormat.mChannelsPerFrame;
+ engineBuffer.mBuffers->mDataByteSize =
+ _inDesiredFormat.mBytesPerPacket * noRecSamples;
+ engineBuffer.mBuffers->mData = recordBuffer;
+
+ err = AudioConverterFillComplexBuffer(_captureConverter, inConverterProc,
+ this, &size, &engineBuffer, NULL);
+ if (err != noErr) {
+ if (err == 1) {
+ // This is our own error.
+ return false;
+ } else {
+ logCAMsg(kTraceError, kTraceAudioDevice, _id,
+ "Error in AudioConverterFillComplexBuffer()", (const char*)&err);
+ return false;
}
+ }
- // TODO(xians): what if the returned size is incorrect?
- if (size == ENGINE_REC_BUF_SIZE_IN_SAMPLES)
- {
- uint32_t currentMicLevel(0);
- uint32_t newMicLevel(0);
- int32_t msecOnPlaySide;
- int32_t msecOnRecordSide;
-
- int32_t captureDelayUs = AtomicGet32(&_captureDelayUs);
- int32_t renderDelayUs = AtomicGet32(&_renderDelayUs);
-
- msecOnPlaySide = static_cast<int32_t> (1e-3 * (renderDelayUs +
- _renderLatencyUs) + 0.5);
- msecOnRecordSide = static_cast<int32_t> (1e-3 * (captureDelayUs +
- _captureLatencyUs) +
- 0.5);
-
- if (!_ptrAudioBuffer)
- {
- WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
- " capture AudioBuffer is invalid");
- return false;
- }
+ // TODO(xians): what if the returned size is incorrect?
+ if (size == ENGINE_REC_BUF_SIZE_IN_SAMPLES) {
+ uint32_t currentMicLevel(0);
+ uint32_t newMicLevel(0);
+ int32_t msecOnPlaySide;
+ int32_t msecOnRecordSide;
- // store the recorded buffer (no action will be taken if the
- // #recorded samples is not a full buffer)
- _ptrAudioBuffer->SetRecordedBuffer((int8_t*) &recordBuffer,
- (uint32_t) size);
-
- if (AGC())
- {
- // store current mic level in the audio buffer if AGC is enabled
- if (MicrophoneVolume(currentMicLevel) == 0)
- {
- // this call does not affect the actual microphone volume
- _ptrAudioBuffer->SetCurrentMicLevel(currentMicLevel);
- }
- }
+ int32_t captureDelayUs = AtomicGet32(&_captureDelayUs);
+ int32_t renderDelayUs = AtomicGet32(&_renderDelayUs);
- _ptrAudioBuffer->SetVQEData(msecOnPlaySide, msecOnRecordSide, 0);
-
- _ptrAudioBuffer->SetTypingStatus(KeyPressed());
-
- // deliver recorded samples at specified sample rate, mic level etc.
- // to the observer using callback
- _ptrAudioBuffer->DeliverRecordedData();
-
- if (AGC())
- {
- newMicLevel = _ptrAudioBuffer->NewMicLevel();
- if (newMicLevel != 0)
- {
- // The VQE will only deliver non-zero microphone levels when
- // a change is needed.
- // Set this new mic level (received from the observer as return
- // value in the callback).
- WEBRTC_TRACE(kTraceStream, kTraceAudioDevice,
- _id, " AGC change of volume: old=%u => new=%u",
- currentMicLevel, newMicLevel);
- if (SetMicrophoneVolume(newMicLevel) == -1)
- {
- WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
- " the required modification of the microphone "
- "volume failed");
- }
- }
+ msecOnPlaySide =
+ static_cast<int32_t>(1e-3 * (renderDelayUs + _renderLatencyUs) + 0.5);
+ msecOnRecordSide =
+ static_cast<int32_t>(1e-3 * (captureDelayUs + _captureLatencyUs) + 0.5);
+
+ if (!_ptrAudioBuffer) {
+ WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
+ " capture AudioBuffer is invalid");
+ return false;
+ }
+
+ // store the recorded buffer (no action will be taken if the
+ // #recorded samples is not a full buffer)
+ _ptrAudioBuffer->SetRecordedBuffer((int8_t*)&recordBuffer, (uint32_t)size);
+
+ if (AGC()) {
+ // Use mod to ensure we check the volume on the first pass.
+ if (get_mic_volume_counter_ms_ % kGetMicVolumeIntervalMs == 0) {
+ get_mic_volume_counter_ms_ = 0;
+ // store current mic level in the audio buffer if AGC is enabled
+ if (MicrophoneVolume(currentMicLevel) == 0) {
+ // this call does not affect the actual microphone volume
+ _ptrAudioBuffer->SetCurrentMicLevel(currentMicLevel);
}
+ }
+ get_mic_volume_counter_ms_ += kBufferSizeMs;
+ }
+
+ _ptrAudioBuffer->SetVQEData(msecOnPlaySide, msecOnRecordSide, 0);
+
+ _ptrAudioBuffer->SetTypingStatus(KeyPressed());
+
+ // deliver recorded samples at specified sample rate, mic level etc.
+ // to the observer using callback
+ _ptrAudioBuffer->DeliverRecordedData();
+
+ if (AGC()) {
+ newMicLevel = _ptrAudioBuffer->NewMicLevel();
+ if (newMicLevel != 0) {
+ // The VQE will only deliver non-zero microphone levels when
+ // a change is needed.
+ // Set this new mic level (received from the observer as return
+ // value in the callback).
+ WEBRTC_TRACE(kTraceStream, kTraceAudioDevice, _id,
+ " AGC change of volume: old=%u => new=%u",
+ currentMicLevel, newMicLevel);
+ if (SetMicrophoneVolume(newMicLevel) == -1) {
+ WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
+ " the required modification of the microphone "
+ "volume failed");
+ }
+ }
}
+ }
- return true;
+ return true;
}
bool AudioDeviceMac::KeyPressed() {
bool key_down = false;
// Loop through all Mac virtual key constant values.
- for (unsigned int key_index = 0;
- key_index < arraysize(prev_key_state_);
- ++key_index) {
- bool keyState = CGEventSourceKeyState(
- kCGEventSourceStateHIDSystemState,
- key_index);
+ for (unsigned int key_index = 0; key_index < arraysize(prev_key_state_);
+ ++key_index) {
+ bool keyState =
+ CGEventSourceKeyState(kCGEventSourceStateHIDSystemState, key_index);
// A false -> true change in keymap means a key is pressed.
key_down |= (keyState && !prev_key_state_[key_index]);
// Save current state.
diff --git a/webrtc/modules/audio_device/mac/audio_device_mac.h b/webrtc/modules/audio_device/mac/audio_device_mac.h
index f2b66b4d06..ca3a51997d 100644
--- a/webrtc/modules/audio_device/mac/audio_device_mac.h
+++ b/webrtc/modules/audio_device/mac/audio_device_mac.h
@@ -23,350 +23,359 @@
struct PaUtilRingBuffer;
-namespace webrtc
-{
+namespace rtc {
+class PlatformThread;
+} // namespace rtc
+
+namespace webrtc {
class EventWrapper;
-class ThreadWrapper;
const uint32_t N_REC_SAMPLES_PER_SEC = 48000;
const uint32_t N_PLAY_SAMPLES_PER_SEC = 48000;
-const uint32_t N_REC_CHANNELS = 1; // default is mono recording
-const uint32_t N_PLAY_CHANNELS = 2; // default is stereo playout
+const uint32_t N_REC_CHANNELS = 1; // default is mono recording
+const uint32_t N_PLAY_CHANNELS = 2; // default is stereo playout
const uint32_t N_DEVICE_CHANNELS = 64;
-const uint32_t ENGINE_REC_BUF_SIZE_IN_SAMPLES = (N_REC_SAMPLES_PER_SEC / 100);
-const uint32_t ENGINE_PLAY_BUF_SIZE_IN_SAMPLES = (N_PLAY_SAMPLES_PER_SEC / 100);
+const int kBufferSizeMs = 10;
+
+const uint32_t ENGINE_REC_BUF_SIZE_IN_SAMPLES =
+ N_REC_SAMPLES_PER_SEC * kBufferSizeMs / 1000;
+const uint32_t ENGINE_PLAY_BUF_SIZE_IN_SAMPLES =
+ N_PLAY_SAMPLES_PER_SEC * kBufferSizeMs / 1000;
const int N_BLOCKS_IO = 2;
-const int N_BUFFERS_IN = 2; // Must be at least N_BLOCKS_IO.
+const int N_BUFFERS_IN = 2; // Must be at least N_BLOCKS_IO.
const int N_BUFFERS_OUT = 3; // Must be at least N_BLOCKS_IO.
-const uint32_t TIMER_PERIOD_MS = (2 * 10 * N_BLOCKS_IO * 1000000);
+const uint32_t TIMER_PERIOD_MS = 2 * 10 * N_BLOCKS_IO * 1000000;
const uint32_t REC_BUF_SIZE_IN_SAMPLES =
ENGINE_REC_BUF_SIZE_IN_SAMPLES * N_DEVICE_CHANNELS * N_BUFFERS_IN;
const uint32_t PLAY_BUF_SIZE_IN_SAMPLES =
ENGINE_PLAY_BUF_SIZE_IN_SAMPLES * N_PLAY_CHANNELS * N_BUFFERS_OUT;
-class AudioDeviceMac: public AudioDeviceGeneric
-{
-public:
- AudioDeviceMac(const int32_t id);
- ~AudioDeviceMac();
-
- // Retrieve the currently utilized audio layer
- virtual int32_t
- ActiveAudioLayer(AudioDeviceModule::AudioLayer& audioLayer) const;
-
- // Main initializaton and termination
- virtual int32_t Init();
- virtual int32_t Terminate();
- virtual bool Initialized() const;
-
- // Device enumeration
- virtual int16_t PlayoutDevices();
- virtual int16_t RecordingDevices();
- virtual int32_t PlayoutDeviceName(
- uint16_t index,
- char name[kAdmMaxDeviceNameSize],
- char guid[kAdmMaxGuidSize]);
- virtual int32_t RecordingDeviceName(
- uint16_t index,
- char name[kAdmMaxDeviceNameSize],
- char guid[kAdmMaxGuidSize]);
-
- // Device selection
- virtual int32_t SetPlayoutDevice(uint16_t index);
- virtual int32_t SetPlayoutDevice(
- AudioDeviceModule::WindowsDeviceType device);
- virtual int32_t SetRecordingDevice(uint16_t index);
- virtual int32_t SetRecordingDevice(
- AudioDeviceModule::WindowsDeviceType device);
-
- // Audio transport initialization
- virtual int32_t PlayoutIsAvailable(bool& available);
- virtual int32_t InitPlayout();
- virtual bool PlayoutIsInitialized() const;
- virtual int32_t RecordingIsAvailable(bool& available);
- virtual int32_t InitRecording();
- virtual bool RecordingIsInitialized() const;
-
- // Audio transport control
- virtual int32_t StartPlayout();
- virtual int32_t StopPlayout();
- virtual bool Playing() const;
- virtual int32_t StartRecording();
- virtual int32_t StopRecording();
- virtual bool Recording() const;
-
- // Microphone Automatic Gain Control (AGC)
- virtual int32_t SetAGC(bool enable);
- virtual bool AGC() const;
-
- // Volume control based on the Windows Wave API (Windows only)
- virtual int32_t SetWaveOutVolume(uint16_t volumeLeft, uint16_t volumeRight);
- virtual int32_t WaveOutVolume(uint16_t& volumeLeft,
- uint16_t& volumeRight) const;
-
- // Audio mixer initialization
- virtual int32_t InitSpeaker();
- virtual bool SpeakerIsInitialized() const;
- virtual int32_t InitMicrophone();
- virtual bool MicrophoneIsInitialized() const;
-
- // Speaker volume controls
- virtual int32_t SpeakerVolumeIsAvailable(bool& available);
- virtual int32_t SetSpeakerVolume(uint32_t volume);
- virtual int32_t SpeakerVolume(uint32_t& volume) const;
- virtual int32_t MaxSpeakerVolume(uint32_t& maxVolume) const;
- virtual int32_t MinSpeakerVolume(uint32_t& minVolume) const;
- virtual int32_t SpeakerVolumeStepSize(uint16_t& stepSize) const;
-
- // Microphone volume controls
- virtual int32_t MicrophoneVolumeIsAvailable(bool& available);
- virtual int32_t SetMicrophoneVolume(uint32_t volume);
- virtual int32_t MicrophoneVolume(uint32_t& volume) const;
- virtual int32_t MaxMicrophoneVolume(uint32_t& maxVolume) const;
- virtual int32_t MinMicrophoneVolume(uint32_t& minVolume) const;
- virtual int32_t
- MicrophoneVolumeStepSize(uint16_t& stepSize) const;
-
- // Microphone mute control
- virtual int32_t MicrophoneMuteIsAvailable(bool& available);
- virtual int32_t SetMicrophoneMute(bool enable);
- virtual int32_t MicrophoneMute(bool& enabled) const;
-
- // Speaker mute control
- virtual int32_t SpeakerMuteIsAvailable(bool& available);
- virtual int32_t SetSpeakerMute(bool enable);
- virtual int32_t SpeakerMute(bool& enabled) const;
-
- // Microphone boost control
- virtual int32_t MicrophoneBoostIsAvailable(bool& available);
- virtual int32_t SetMicrophoneBoost(bool enable);
- virtual int32_t MicrophoneBoost(bool& enabled) const;
-
- // Stereo support
- virtual int32_t StereoPlayoutIsAvailable(bool& available);
- virtual int32_t SetStereoPlayout(bool enable);
- virtual int32_t StereoPlayout(bool& enabled) const;
- virtual int32_t StereoRecordingIsAvailable(bool& available);
- virtual int32_t SetStereoRecording(bool enable);
- virtual int32_t StereoRecording(bool& enabled) const;
-
- // Delay information and control
- virtual int32_t
- SetPlayoutBuffer(const AudioDeviceModule::BufferType type,
- uint16_t sizeMS);
- virtual int32_t PlayoutBuffer(AudioDeviceModule::BufferType& type,
- uint16_t& sizeMS) const;
- virtual int32_t PlayoutDelay(uint16_t& delayMS) const;
- virtual int32_t RecordingDelay(uint16_t& delayMS) const;
-
- // CPU load
- virtual int32_t CPULoad(uint16_t& load) const;
-
- virtual bool PlayoutWarning() const;
- virtual bool PlayoutError() const;
- virtual bool RecordingWarning() const;
- virtual bool RecordingError() const;
- virtual void ClearPlayoutWarning();
- virtual void ClearPlayoutError();
- virtual void ClearRecordingWarning();
- virtual void ClearRecordingError();
-
- virtual void AttachAudioBuffer(AudioDeviceBuffer* audioBuffer);
-
-private:
- virtual int32_t MicrophoneIsAvailable(bool& available);
- virtual int32_t SpeakerIsAvailable(bool& available);
-
- static void AtomicSet32(int32_t* theValue, int32_t newValue);
- static int32_t AtomicGet32(int32_t* theValue);
-
- static void logCAMsg(const TraceLevel level,
- const TraceModule module,
- const int32_t id, const char *msg,
- const char *err);
-
- int32_t GetNumberDevices(const AudioObjectPropertyScope scope,
- AudioDeviceID scopedDeviceIds[],
- const uint32_t deviceListLength);
-
- int32_t GetDeviceName(const AudioObjectPropertyScope scope,
- const uint16_t index, char* name);
-
- int32_t InitDevice(uint16_t userDeviceIndex,
- AudioDeviceID& deviceId, bool isInput);
-
- // Always work with our preferred playout format inside VoE.
- // Then convert the output to the OS setting using an AudioConverter.
- OSStatus SetDesiredPlayoutFormat();
-
- static OSStatus
- objectListenerProc(AudioObjectID objectId, UInt32 numberAddresses,
- const AudioObjectPropertyAddress addresses[],
- void* clientData);
-
- OSStatus
- implObjectListenerProc(AudioObjectID objectId, UInt32 numberAddresses,
- const AudioObjectPropertyAddress addresses[]);
-
- int32_t HandleDeviceChange();
-
- int32_t
- HandleStreamFormatChange(AudioObjectID objectId,
+const int kGetMicVolumeIntervalMs = 1000;
+
+class AudioDeviceMac : public AudioDeviceGeneric {
+ public:
+ AudioDeviceMac(const int32_t id);
+ ~AudioDeviceMac();
+
+ // Retrieve the currently utilized audio layer
+ virtual int32_t ActiveAudioLayer(
+ AudioDeviceModule::AudioLayer& audioLayer) const;
+
+ // Main initializaton and termination
+ virtual int32_t Init();
+ virtual int32_t Terminate();
+ virtual bool Initialized() const;
+
+ // Device enumeration
+ virtual int16_t PlayoutDevices();
+ virtual int16_t RecordingDevices();
+ virtual int32_t PlayoutDeviceName(uint16_t index,
+ char name[kAdmMaxDeviceNameSize],
+ char guid[kAdmMaxGuidSize]);
+ virtual int32_t RecordingDeviceName(uint16_t index,
+ char name[kAdmMaxDeviceNameSize],
+ char guid[kAdmMaxGuidSize]);
+
+ // Device selection
+ virtual int32_t SetPlayoutDevice(uint16_t index);
+ virtual int32_t SetPlayoutDevice(AudioDeviceModule::WindowsDeviceType device);
+ virtual int32_t SetRecordingDevice(uint16_t index);
+ virtual int32_t SetRecordingDevice(
+ AudioDeviceModule::WindowsDeviceType device);
+
+ // Audio transport initialization
+ virtual int32_t PlayoutIsAvailable(bool& available);
+ virtual int32_t InitPlayout();
+ virtual bool PlayoutIsInitialized() const;
+ virtual int32_t RecordingIsAvailable(bool& available);
+ virtual int32_t InitRecording();
+ virtual bool RecordingIsInitialized() const;
+
+ // Audio transport control
+ virtual int32_t StartPlayout();
+ virtual int32_t StopPlayout();
+ virtual bool Playing() const;
+ virtual int32_t StartRecording();
+ virtual int32_t StopRecording();
+ virtual bool Recording() const;
+
+ // Microphone Automatic Gain Control (AGC)
+ virtual int32_t SetAGC(bool enable);
+ virtual bool AGC() const;
+
+ // Volume control based on the Windows Wave API (Windows only)
+ virtual int32_t SetWaveOutVolume(uint16_t volumeLeft, uint16_t volumeRight);
+ virtual int32_t WaveOutVolume(uint16_t& volumeLeft,
+ uint16_t& volumeRight) const;
+
+ // Audio mixer initialization
+ virtual int32_t InitSpeaker();
+ virtual bool SpeakerIsInitialized() const;
+ virtual int32_t InitMicrophone();
+ virtual bool MicrophoneIsInitialized() const;
+
+ // Speaker volume controls
+ virtual int32_t SpeakerVolumeIsAvailable(bool& available);
+ virtual int32_t SetSpeakerVolume(uint32_t volume);
+ virtual int32_t SpeakerVolume(uint32_t& volume) const;
+ virtual int32_t MaxSpeakerVolume(uint32_t& maxVolume) const;
+ virtual int32_t MinSpeakerVolume(uint32_t& minVolume) const;
+ virtual int32_t SpeakerVolumeStepSize(uint16_t& stepSize) const;
+
+ // Microphone volume controls
+ virtual int32_t MicrophoneVolumeIsAvailable(bool& available);
+ virtual int32_t SetMicrophoneVolume(uint32_t volume);
+ virtual int32_t MicrophoneVolume(uint32_t& volume) const;
+ virtual int32_t MaxMicrophoneVolume(uint32_t& maxVolume) const;
+ virtual int32_t MinMicrophoneVolume(uint32_t& minVolume) const;
+ virtual int32_t MicrophoneVolumeStepSize(uint16_t& stepSize) const;
+
+ // Microphone mute control
+ virtual int32_t MicrophoneMuteIsAvailable(bool& available);
+ virtual int32_t SetMicrophoneMute(bool enable);
+ virtual int32_t MicrophoneMute(bool& enabled) const;
+
+ // Speaker mute control
+ virtual int32_t SpeakerMuteIsAvailable(bool& available);
+ virtual int32_t SetSpeakerMute(bool enable);
+ virtual int32_t SpeakerMute(bool& enabled) const;
+
+ // Microphone boost control
+ virtual int32_t MicrophoneBoostIsAvailable(bool& available);
+ virtual int32_t SetMicrophoneBoost(bool enable);
+ virtual int32_t MicrophoneBoost(bool& enabled) const;
+
+ // Stereo support
+ virtual int32_t StereoPlayoutIsAvailable(bool& available);
+ virtual int32_t SetStereoPlayout(bool enable);
+ virtual int32_t StereoPlayout(bool& enabled) const;
+ virtual int32_t StereoRecordingIsAvailable(bool& available);
+ virtual int32_t SetStereoRecording(bool enable);
+ virtual int32_t StereoRecording(bool& enabled) const;
+
+ // Delay information and control
+ virtual int32_t SetPlayoutBuffer(const AudioDeviceModule::BufferType type,
+ uint16_t sizeMS);
+ virtual int32_t PlayoutBuffer(AudioDeviceModule::BufferType& type,
+ uint16_t& sizeMS) const;
+ virtual int32_t PlayoutDelay(uint16_t& delayMS) const;
+ virtual int32_t RecordingDelay(uint16_t& delayMS) const;
+
+ // CPU load
+ virtual int32_t CPULoad(uint16_t& load) const;
+
+ virtual bool PlayoutWarning() const;
+ virtual bool PlayoutError() const;
+ virtual bool RecordingWarning() const;
+ virtual bool RecordingError() const;
+ virtual void ClearPlayoutWarning();
+ virtual void ClearPlayoutError();
+ virtual void ClearRecordingWarning();
+ virtual void ClearRecordingError();
+
+ virtual void AttachAudioBuffer(AudioDeviceBuffer* audioBuffer);
+
+ private:
+ virtual int32_t MicrophoneIsAvailable(bool& available);
+ virtual int32_t SpeakerIsAvailable(bool& available);
+
+ static void AtomicSet32(int32_t* theValue, int32_t newValue);
+ static int32_t AtomicGet32(int32_t* theValue);
+
+ static void logCAMsg(const TraceLevel level,
+ const TraceModule module,
+ const int32_t id,
+ const char* msg,
+ const char* err);
+
+ int32_t GetNumberDevices(const AudioObjectPropertyScope scope,
+ AudioDeviceID scopedDeviceIds[],
+ const uint32_t deviceListLength);
+
+ int32_t GetDeviceName(const AudioObjectPropertyScope scope,
+ const uint16_t index,
+ char* name);
+
+ int32_t InitDevice(uint16_t userDeviceIndex,
+ AudioDeviceID& deviceId,
+ bool isInput);
+
+ // Always work with our preferred playout format inside VoE.
+ // Then convert the output to the OS setting using an AudioConverter.
+ OSStatus SetDesiredPlayoutFormat();
+
+ static OSStatus objectListenerProc(
+ AudioObjectID objectId,
+ UInt32 numberAddresses,
+ const AudioObjectPropertyAddress addresses[],
+ void* clientData);
+
+ OSStatus implObjectListenerProc(AudioObjectID objectId,
+ UInt32 numberAddresses,
+ const AudioObjectPropertyAddress addresses[]);
+
+ int32_t HandleDeviceChange();
+
+ int32_t HandleStreamFormatChange(AudioObjectID objectId,
+ AudioObjectPropertyAddress propertyAddress);
+
+ int32_t HandleDataSourceChange(AudioObjectID objectId,
AudioObjectPropertyAddress propertyAddress);
- int32_t
- HandleDataSourceChange(AudioObjectID objectId,
- AudioObjectPropertyAddress propertyAddress);
-
- int32_t
- HandleProcessorOverload(AudioObjectPropertyAddress propertyAddress);
-
- static OSStatus deviceIOProc(AudioDeviceID device,
- const AudioTimeStamp *now,
- const AudioBufferList *inputData,
- const AudioTimeStamp *inputTime,
- AudioBufferList *outputData,
+ int32_t HandleProcessorOverload(AudioObjectPropertyAddress propertyAddress);
+
+ static OSStatus deviceIOProc(AudioDeviceID device,
+ const AudioTimeStamp* now,
+ const AudioBufferList* inputData,
+ const AudioTimeStamp* inputTime,
+ AudioBufferList* outputData,
+ const AudioTimeStamp* outputTime,
+ void* clientData);
+
+ static OSStatus outConverterProc(
+ AudioConverterRef audioConverter,
+ UInt32* numberDataPackets,
+ AudioBufferList* data,
+ AudioStreamPacketDescription** dataPacketDescription,
+ void* userData);
+
+ static OSStatus inDeviceIOProc(AudioDeviceID device,
+ const AudioTimeStamp* now,
+ const AudioBufferList* inputData,
+ const AudioTimeStamp* inputTime,
+ AudioBufferList* outputData,
const AudioTimeStamp* outputTime,
- void *clientData);
+ void* clientData);
- static OSStatus
- outConverterProc(AudioConverterRef audioConverter,
- UInt32 *numberDataPackets, AudioBufferList *data,
- AudioStreamPacketDescription **dataPacketDescription,
- void *userData);
+ static OSStatus inConverterProc(
+ AudioConverterRef audioConverter,
+ UInt32* numberDataPackets,
+ AudioBufferList* data,
+ AudioStreamPacketDescription** dataPacketDescription,
+ void* inUserData);
- static OSStatus inDeviceIOProc(AudioDeviceID device,
- const AudioTimeStamp *now,
- const AudioBufferList *inputData,
- const AudioTimeStamp *inputTime,
- AudioBufferList *outputData,
- const AudioTimeStamp *outputTime,
- void *clientData);
+ OSStatus implDeviceIOProc(const AudioBufferList* inputData,
+ const AudioTimeStamp* inputTime,
+ AudioBufferList* outputData,
+ const AudioTimeStamp* outputTime);
- static OSStatus
- inConverterProc(AudioConverterRef audioConverter,
- UInt32 *numberDataPackets, AudioBufferList *data,
- AudioStreamPacketDescription **dataPacketDescription,
- void *inUserData);
+ OSStatus implOutConverterProc(UInt32* numberDataPackets,
+ AudioBufferList* data);
- OSStatus implDeviceIOProc(const AudioBufferList *inputData,
- const AudioTimeStamp *inputTime,
- AudioBufferList *outputData,
- const AudioTimeStamp *outputTime);
+ OSStatus implInDeviceIOProc(const AudioBufferList* inputData,
+ const AudioTimeStamp* inputTime);
- OSStatus implOutConverterProc(UInt32 *numberDataPackets,
- AudioBufferList *data);
+ OSStatus implInConverterProc(UInt32* numberDataPackets,
+ AudioBufferList* data);
- OSStatus implInDeviceIOProc(const AudioBufferList *inputData,
- const AudioTimeStamp *inputTime);
+ static bool RunCapture(void*);
+ static bool RunRender(void*);
+ bool CaptureWorkerThread();
+ bool RenderWorkerThread();
- OSStatus implInConverterProc(UInt32 *numberDataPackets,
- AudioBufferList *data);
+ bool KeyPressed();
- static bool RunCapture(void*);
- static bool RunRender(void*);
- bool CaptureWorkerThread();
- bool RenderWorkerThread();
+ AudioDeviceBuffer* _ptrAudioBuffer;
- bool KeyPressed();
+ CriticalSectionWrapper& _critSect;
- AudioDeviceBuffer* _ptrAudioBuffer;
+ EventWrapper& _stopEventRec;
+ EventWrapper& _stopEvent;
- CriticalSectionWrapper& _critSect;
+ // TODO(pbos): Replace with direct members, just start/stop, no need to
+ // recreate the thread.
+ // Only valid/running between calls to StartRecording and StopRecording.
+ rtc::scoped_ptr<rtc::PlatformThread> capture_worker_thread_;
- EventWrapper& _stopEventRec;
- EventWrapper& _stopEvent;
+ // Only valid/running between calls to StartPlayout and StopPlayout.
+ rtc::scoped_ptr<rtc::PlatformThread> render_worker_thread_;
- // Only valid/running between calls to StartRecording and StopRecording.
- rtc::scoped_ptr<ThreadWrapper> capture_worker_thread_;
+ int32_t _id;
- // Only valid/running between calls to StartPlayout and StopPlayout.
- rtc::scoped_ptr<ThreadWrapper> render_worker_thread_;
+ AudioMixerManagerMac _mixerManager;
- int32_t _id;
-
- AudioMixerManagerMac _mixerManager;
-
- uint16_t _inputDeviceIndex;
- uint16_t _outputDeviceIndex;
- AudioDeviceID _inputDeviceID;
- AudioDeviceID _outputDeviceID;
+ uint16_t _inputDeviceIndex;
+ uint16_t _outputDeviceIndex;
+ AudioDeviceID _inputDeviceID;
+ AudioDeviceID _outputDeviceID;
#if __MAC_OS_X_VERSION_MAX_ALLOWED >= 1050
- AudioDeviceIOProcID _inDeviceIOProcID;
- AudioDeviceIOProcID _deviceIOProcID;
+ AudioDeviceIOProcID _inDeviceIOProcID;
+ AudioDeviceIOProcID _deviceIOProcID;
#endif
- bool _inputDeviceIsSpecified;
- bool _outputDeviceIsSpecified;
+ bool _inputDeviceIsSpecified;
+ bool _outputDeviceIsSpecified;
+
+ uint8_t _recChannels;
+ uint8_t _playChannels;
- uint8_t _recChannels;
- uint8_t _playChannels;
+ Float32* _captureBufData;
+ SInt16* _renderBufData;
- Float32* _captureBufData;
- SInt16* _renderBufData;
+ SInt16 _renderConvertData[PLAY_BUF_SIZE_IN_SAMPLES];
- SInt16 _renderConvertData[PLAY_BUF_SIZE_IN_SAMPLES];
+ AudioDeviceModule::BufferType _playBufType;
- AudioDeviceModule::BufferType _playBufType;
+ bool _initialized;
+ bool _isShutDown;
+ bool _recording;
+ bool _playing;
+ bool _recIsInitialized;
+ bool _playIsInitialized;
+ bool _AGC;
- bool _initialized;
- bool _isShutDown;
- bool _recording;
- bool _playing;
- bool _recIsInitialized;
- bool _playIsInitialized;
- bool _AGC;
+ // Atomically set varaibles
+ int32_t _renderDeviceIsAlive;
+ int32_t _captureDeviceIsAlive;
- // Atomically set varaibles
- int32_t _renderDeviceIsAlive;
- int32_t _captureDeviceIsAlive;
+ bool _twoDevices;
+ bool _doStop; // For play if not shared device or play+rec if shared device
+ bool _doStopRec; // For rec if not shared device
+ bool _macBookPro;
+ bool _macBookProPanRight;
- bool _twoDevices;
- bool _doStop; // For play if not shared device or play+rec if shared device
- bool _doStopRec; // For rec if not shared device
- bool _macBookPro;
- bool _macBookProPanRight;
+ AudioConverterRef _captureConverter;
+ AudioConverterRef _renderConverter;
- AudioConverterRef _captureConverter;
- AudioConverterRef _renderConverter;
+ AudioStreamBasicDescription _outStreamFormat;
+ AudioStreamBasicDescription _outDesiredFormat;
+ AudioStreamBasicDescription _inStreamFormat;
+ AudioStreamBasicDescription _inDesiredFormat;
- AudioStreamBasicDescription _outStreamFormat;
- AudioStreamBasicDescription _outDesiredFormat;
- AudioStreamBasicDescription _inStreamFormat;
- AudioStreamBasicDescription _inDesiredFormat;
+ uint32_t _captureLatencyUs;
+ uint32_t _renderLatencyUs;
- uint32_t _captureLatencyUs;
- uint32_t _renderLatencyUs;
+ // Atomically set variables
+ mutable int32_t _captureDelayUs;
+ mutable int32_t _renderDelayUs;
- // Atomically set variables
- mutable int32_t _captureDelayUs;
- mutable int32_t _renderDelayUs;
+ int32_t _renderDelayOffsetSamples;
- int32_t _renderDelayOffsetSamples;
+ uint16_t _playBufDelayFixed; // fixed playback delay
- uint16_t _playBufDelayFixed; // fixed playback delay
+ uint16_t _playWarning;
+ uint16_t _playError;
+ uint16_t _recWarning;
+ uint16_t _recError;
- uint16_t _playWarning;
- uint16_t _playError;
- uint16_t _recWarning;
- uint16_t _recError;
+ PaUtilRingBuffer* _paCaptureBuffer;
+ PaUtilRingBuffer* _paRenderBuffer;
- PaUtilRingBuffer* _paCaptureBuffer;
- PaUtilRingBuffer* _paRenderBuffer;
+ semaphore_t _renderSemaphore;
+ semaphore_t _captureSemaphore;
- semaphore_t _renderSemaphore;
- semaphore_t _captureSemaphore;
+ int _captureBufSizeSamples;
+ int _renderBufSizeSamples;
- int _captureBufSizeSamples;
- int _renderBufSizeSamples;
+ // Typing detection
+ // 0x5c is key "9", after that comes function keys.
+ bool prev_key_state_[0x5d];
- // Typing detection
- // 0x5c is key "9", after that comes function keys.
- bool prev_key_state_[0x5d];
+ int get_mic_volume_counter_ms_;
};
} // namespace webrtc
diff --git a/webrtc/modules/audio_device/mac/audio_mixer_manager_mac.cc b/webrtc/modules/audio_device/mac/audio_mixer_manager_mac.cc
index 9016ffe508..e7e0754695 100644
--- a/webrtc/modules/audio_device/mac/audio_mixer_manager_mac.cc
+++ b/webrtc/modules/audio_device/mac/audio_mixer_manager_mac.cc
@@ -11,1134 +11,989 @@
#include "webrtc/modules/audio_device/mac/audio_mixer_manager_mac.h"
#include "webrtc/system_wrappers/include/trace.h"
-#include <unistd.h> // getpid()
+#include <unistd.h> // getpid()
namespace webrtc {
-
-#define WEBRTC_CA_RETURN_ON_ERR(expr) \
- do { \
- err = expr; \
- if (err != noErr) { \
- logCAMsg(kTraceError, kTraceAudioDevice, _id, \
- "Error in " #expr, (const char *)&err); \
- return -1; \
- } \
- } while(0)
-
-#define WEBRTC_CA_LOG_ERR(expr) \
- do { \
- err = expr; \
- if (err != noErr) { \
- logCAMsg(kTraceError, kTraceAudioDevice, _id, \
- "Error in " #expr, (const char *)&err); \
- } \
- } while(0)
-
-#define WEBRTC_CA_LOG_WARN(expr) \
- do { \
- err = expr; \
- if (err != noErr) { \
- logCAMsg(kTraceWarning, kTraceAudioDevice, _id, \
- "Error in " #expr, (const char *)&err); \
- } \
- } while(0)
-
-AudioMixerManagerMac::AudioMixerManagerMac(const int32_t id) :
- _critSect(*CriticalSectionWrapper::CreateCriticalSection()),
- _id(id),
- _inputDeviceID(kAudioObjectUnknown),
- _outputDeviceID(kAudioObjectUnknown),
- _noInputChannels(0),
- _noOutputChannels(0)
-{
- WEBRTC_TRACE(kTraceMemory, kTraceAudioDevice, _id,
- "%s constructed", __FUNCTION__);
-}
-
-AudioMixerManagerMac::~AudioMixerManagerMac()
-{
- WEBRTC_TRACE(kTraceMemory, kTraceAudioDevice, _id,
- "%s destructed", __FUNCTION__);
- Close();
-
- delete &_critSect;
+#define WEBRTC_CA_RETURN_ON_ERR(expr) \
+ do { \
+ err = expr; \
+ if (err != noErr) { \
+ logCAMsg(kTraceError, kTraceAudioDevice, _id, "Error in " #expr, \
+ (const char*) & err); \
+ return -1; \
+ } \
+ } while (0)
+
+#define WEBRTC_CA_LOG_ERR(expr) \
+ do { \
+ err = expr; \
+ if (err != noErr) { \
+ logCAMsg(kTraceError, kTraceAudioDevice, _id, "Error in " #expr, \
+ (const char*) & err); \
+ } \
+ } while (0)
+
+#define WEBRTC_CA_LOG_WARN(expr) \
+ do { \
+ err = expr; \
+ if (err != noErr) { \
+ logCAMsg(kTraceWarning, kTraceAudioDevice, _id, "Error in " #expr, \
+ (const char*) & err); \
+ } \
+ } while (0)
+
+AudioMixerManagerMac::AudioMixerManagerMac(const int32_t id)
+ : _critSect(*CriticalSectionWrapper::CreateCriticalSection()),
+ _id(id),
+ _inputDeviceID(kAudioObjectUnknown),
+ _outputDeviceID(kAudioObjectUnknown),
+ _noInputChannels(0),
+ _noOutputChannels(0) {
+ WEBRTC_TRACE(kTraceMemory, kTraceAudioDevice, _id, "%s constructed",
+ __FUNCTION__);
+}
+
+AudioMixerManagerMac::~AudioMixerManagerMac() {
+ WEBRTC_TRACE(kTraceMemory, kTraceAudioDevice, _id, "%s destructed",
+ __FUNCTION__);
+
+ Close();
+
+ delete &_critSect;
}
// ============================================================================
// PUBLIC METHODS
// ============================================================================
-int32_t AudioMixerManagerMac::Close()
-{
- WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "%s",
- __FUNCTION__);
+int32_t AudioMixerManagerMac::Close() {
+ WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "%s", __FUNCTION__);
- CriticalSectionScoped lock(&_critSect);
+ CriticalSectionScoped lock(&_critSect);
- CloseSpeaker();
- CloseMicrophone();
-
- return 0;
+ CloseSpeaker();
+ CloseMicrophone();
+ return 0;
}
-int32_t AudioMixerManagerMac::CloseSpeaker()
-{
- WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "%s",
- __FUNCTION__);
+int32_t AudioMixerManagerMac::CloseSpeaker() {
+ WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "%s", __FUNCTION__);
- CriticalSectionScoped lock(&_critSect);
+ CriticalSectionScoped lock(&_critSect);
- _outputDeviceID = kAudioObjectUnknown;
- _noOutputChannels = 0;
+ _outputDeviceID = kAudioObjectUnknown;
+ _noOutputChannels = 0;
- return 0;
+ return 0;
}
-int32_t AudioMixerManagerMac::CloseMicrophone()
-{
- WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "%s",
- __FUNCTION__);
+int32_t AudioMixerManagerMac::CloseMicrophone() {
+ WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "%s", __FUNCTION__);
- CriticalSectionScoped lock(&_critSect);
+ CriticalSectionScoped lock(&_critSect);
- _inputDeviceID = kAudioObjectUnknown;
- _noInputChannels = 0;
+ _inputDeviceID = kAudioObjectUnknown;
+ _noInputChannels = 0;
- return 0;
+ return 0;
}
-int32_t AudioMixerManagerMac::OpenSpeaker(AudioDeviceID deviceID)
-{
- WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
- "AudioMixerManagerMac::OpenSpeaker(id=%d)", deviceID);
-
- CriticalSectionScoped lock(&_critSect);
-
- OSStatus err = noErr;
- UInt32 size = 0;
- pid_t hogPid = -1;
+int32_t AudioMixerManagerMac::OpenSpeaker(AudioDeviceID deviceID) {
+ WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
+ "AudioMixerManagerMac::OpenSpeaker(id=%d)", deviceID);
- _outputDeviceID = deviceID;
+ CriticalSectionScoped lock(&_critSect);
- // Check which process, if any, has hogged the device.
- AudioObjectPropertyAddress propertyAddress = { kAudioDevicePropertyHogMode,
- kAudioDevicePropertyScopeOutput, 0 };
+ OSStatus err = noErr;
+ UInt32 size = 0;
+ pid_t hogPid = -1;
- size = sizeof(hogPid);
- WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData(_outputDeviceID,
- &propertyAddress, 0, NULL, &size, &hogPid));
-
- if (hogPid == -1)
- {
- WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
- " No process has hogged the input device");
- }
- // getpid() is apparently "always successful"
- else if (hogPid == getpid())
- {
- WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
- " Our process has hogged the input device");
- } else
- {
- WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
- " Another process (pid = %d) has hogged the input device",
- static_cast<int> (hogPid));
-
- return -1;
- }
+ _outputDeviceID = deviceID;
- // get number of channels from stream format
- propertyAddress.mSelector = kAudioDevicePropertyStreamFormat;
+ // Check which process, if any, has hogged the device.
+ AudioObjectPropertyAddress propertyAddress = {
+ kAudioDevicePropertyHogMode, kAudioDevicePropertyScopeOutput, 0};
- // Get the stream format, to be able to read the number of channels.
- AudioStreamBasicDescription streamFormat;
- size = sizeof(AudioStreamBasicDescription);
- memset(&streamFormat, 0, size);
- WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData(_outputDeviceID,
- &propertyAddress, 0, NULL, &size, &streamFormat));
+ size = sizeof(hogPid);
+ WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData(
+ _outputDeviceID, &propertyAddress, 0, NULL, &size, &hogPid));
- _noOutputChannels = streamFormat.mChannelsPerFrame;
-
- return 0;
-}
-
-int32_t AudioMixerManagerMac::OpenMicrophone(AudioDeviceID deviceID)
-{
+ if (hogPid == -1) {
WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
- "AudioMixerManagerMac::OpenMicrophone(id=%d)", deviceID);
-
- CriticalSectionScoped lock(&_critSect);
-
- OSStatus err = noErr;
- UInt32 size = 0;
- pid_t hogPid = -1;
-
- _inputDeviceID = deviceID;
-
- // Check which process, if any, has hogged the device.
- AudioObjectPropertyAddress propertyAddress = { kAudioDevicePropertyHogMode,
- kAudioDevicePropertyScopeInput, 0 };
- size = sizeof(hogPid);
- WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData(_inputDeviceID,
- &propertyAddress, 0, NULL, &size, &hogPid));
- if (hogPid == -1)
- {
- WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
- " No process has hogged the input device");
- }
- // getpid() is apparently "always successful"
- else if (hogPid == getpid())
- {
- WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
- " Our process has hogged the input device");
- } else
- {
- WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
- " Another process (pid = %d) has hogged the input device",
- static_cast<int> (hogPid));
-
- return -1;
- }
-
- // get number of channels from stream format
- propertyAddress.mSelector = kAudioDevicePropertyStreamFormat;
-
- // Get the stream format, to be able to read the number of channels.
- AudioStreamBasicDescription streamFormat;
- size = sizeof(AudioStreamBasicDescription);
- memset(&streamFormat, 0, size);
- WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData(_inputDeviceID,
- &propertyAddress, 0, NULL, &size, &streamFormat));
-
- _noInputChannels = streamFormat.mChannelsPerFrame;
+ " No process has hogged the input device");
+ }
+ // getpid() is apparently "always successful"
+ else if (hogPid == getpid()) {
+ WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
+ " Our process has hogged the input device");
+ } else {
+ WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
+ " Another process (pid = %d) has hogged the input device",
+ static_cast<int>(hogPid));
- return 0;
-}
+ return -1;
+ }
-bool AudioMixerManagerMac::SpeakerIsInitialized() const
-{
- WEBRTC_TRACE(kTraceMemory, kTraceAudioDevice, _id, "%s",
- __FUNCTION__);
+ // get number of channels from stream format
+ propertyAddress.mSelector = kAudioDevicePropertyStreamFormat;
- return (_outputDeviceID != kAudioObjectUnknown);
-}
+ // Get the stream format, to be able to read the number of channels.
+ AudioStreamBasicDescription streamFormat;
+ size = sizeof(AudioStreamBasicDescription);
+ memset(&streamFormat, 0, size);
+ WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData(
+ _outputDeviceID, &propertyAddress, 0, NULL, &size, &streamFormat));
-bool AudioMixerManagerMac::MicrophoneIsInitialized() const
-{
- WEBRTC_TRACE(kTraceMemory, kTraceAudioDevice, _id, "%s",
- __FUNCTION__);
+ _noOutputChannels = streamFormat.mChannelsPerFrame;
- return (_inputDeviceID != kAudioObjectUnknown);
+ return 0;
}
-int32_t AudioMixerManagerMac::SetSpeakerVolume(uint32_t volume)
-{
- WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
- "AudioMixerManagerMac::SetSpeakerVolume(volume=%u)", volume);
+int32_t AudioMixerManagerMac::OpenMicrophone(AudioDeviceID deviceID) {
+ WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
+ "AudioMixerManagerMac::OpenMicrophone(id=%d)", deviceID);
- CriticalSectionScoped lock(&_critSect);
+ CriticalSectionScoped lock(&_critSect);
- if (_outputDeviceID == kAudioObjectUnknown)
- {
- WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
- " device ID has not been set");
- return -1;
- }
+ OSStatus err = noErr;
+ UInt32 size = 0;
+ pid_t hogPid = -1;
- OSStatus err = noErr;
- UInt32 size = 0;
- bool success = false;
+ _inputDeviceID = deviceID;
- // volume range is 0.0 - 1.0, convert from 0 -255
- const Float32 vol = (Float32)(volume / 255.0);
+ // Check which process, if any, has hogged the device.
+ AudioObjectPropertyAddress propertyAddress = {
+ kAudioDevicePropertyHogMode, kAudioDevicePropertyScopeInput, 0};
+ size = sizeof(hogPid);
+ WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData(
+ _inputDeviceID, &propertyAddress, 0, NULL, &size, &hogPid));
+ if (hogPid == -1) {
+ WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
+ " No process has hogged the input device");
+ }
+ // getpid() is apparently "always successful"
+ else if (hogPid == getpid()) {
+ WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
+ " Our process has hogged the input device");
+ } else {
+ WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
+ " Another process (pid = %d) has hogged the input device",
+ static_cast<int>(hogPid));
- assert(vol <= 1.0 && vol >= 0.0);
+ return -1;
+ }
- // Does the capture device have a master volume control?
- // If so, use it exclusively.
- AudioObjectPropertyAddress propertyAddress = {
- kAudioDevicePropertyVolumeScalar, kAudioDevicePropertyScopeOutput,
- 0 };
- Boolean isSettable = false;
- err = AudioObjectIsPropertySettable(_outputDeviceID, &propertyAddress,
- &isSettable);
- if (err == noErr && isSettable)
- {
- size = sizeof(vol);
- WEBRTC_CA_RETURN_ON_ERR(AudioObjectSetPropertyData(_outputDeviceID,
- &propertyAddress, 0, NULL, size, &vol));
+ // get number of channels from stream format
+ propertyAddress.mSelector = kAudioDevicePropertyStreamFormat;
- return 0;
- }
-
- // Otherwise try to set each channel.
- for (UInt32 i = 1; i <= _noOutputChannels; i++)
- {
- propertyAddress.mElement = i;
- isSettable = false;
- err = AudioObjectIsPropertySettable(_outputDeviceID, &propertyAddress,
- &isSettable);
- if (err == noErr && isSettable)
- {
- size = sizeof(vol);
- WEBRTC_CA_RETURN_ON_ERR(AudioObjectSetPropertyData(_outputDeviceID,
- &propertyAddress, 0, NULL, size, &vol));
- }
- success = true;
- }
+ // Get the stream format, to be able to read the number of channels.
+ AudioStreamBasicDescription streamFormat;
+ size = sizeof(AudioStreamBasicDescription);
+ memset(&streamFormat, 0, size);
+ WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData(
+ _inputDeviceID, &propertyAddress, 0, NULL, &size, &streamFormat));
- if (!success)
- {
- WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
- " Unable to set a volume on any output channel");
- return -1;
- }
+ _noInputChannels = streamFormat.mChannelsPerFrame;
- return 0;
+ return 0;
}
-int32_t AudioMixerManagerMac::SpeakerVolume(uint32_t& volume) const
-{
+bool AudioMixerManagerMac::SpeakerIsInitialized() const {
+ WEBRTC_TRACE(kTraceMemory, kTraceAudioDevice, _id, "%s", __FUNCTION__);
- if (_outputDeviceID == kAudioObjectUnknown)
- {
- WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
- " device ID has not been set");
- return -1;
- }
-
- OSStatus err = noErr;
- UInt32 size = 0;
- unsigned int channels = 0;
- Float32 channelVol = 0;
- Float32 vol = 0;
-
- // Does the device have a master volume control?
- // If so, use it exclusively.
- AudioObjectPropertyAddress propertyAddress = {
- kAudioDevicePropertyVolumeScalar, kAudioDevicePropertyScopeOutput,
- 0 };
- Boolean hasProperty = AudioObjectHasProperty(_outputDeviceID,
- &propertyAddress);
- if (hasProperty)
- {
- size = sizeof(vol);
- WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData(_outputDeviceID,
- &propertyAddress, 0, NULL, &size, &vol));
-
- // vol 0.0 to 1.0 -> convert to 0 - 255
- volume = static_cast<uint32_t> (vol * 255 + 0.5);
- } else
- {
- // Otherwise get the average volume across channels.
- vol = 0;
- for (UInt32 i = 1; i <= _noOutputChannels; i++)
- {
- channelVol = 0;
- propertyAddress.mElement = i;
- hasProperty = AudioObjectHasProperty(_outputDeviceID,
- &propertyAddress);
- if (hasProperty)
- {
- size = sizeof(channelVol);
- WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData(_outputDeviceID,
- &propertyAddress, 0, NULL, &size, &channelVol));
-
- vol += channelVol;
- channels++;
- }
- }
-
- if (channels == 0)
- {
- WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
- " Unable to get a volume on any channel");
- return -1;
- }
-
- assert(channels > 0);
- // vol 0.0 to 1.0 -> convert to 0 - 255
- volume = static_cast<uint32_t> (255 * vol / channels + 0.5);
- }
-
- WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
- " AudioMixerManagerMac::SpeakerVolume() => vol=%i", vol);
-
- return 0;
+ return (_outputDeviceID != kAudioObjectUnknown);
}
-int32_t
-AudioMixerManagerMac::MaxSpeakerVolume(uint32_t& maxVolume) const
-{
+bool AudioMixerManagerMac::MicrophoneIsInitialized() const {
+ WEBRTC_TRACE(kTraceMemory, kTraceAudioDevice, _id, "%s", __FUNCTION__);
- if (_outputDeviceID == kAudioObjectUnknown)
- {
- WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
- " device ID has not been set");
- return -1;
- }
-
- // volume range is 0.0 to 1.0
- // we convert that to 0 - 255
- maxVolume = 255;
-
- return 0;
+ return (_inputDeviceID != kAudioObjectUnknown);
}
-int32_t
-AudioMixerManagerMac::MinSpeakerVolume(uint32_t& minVolume) const
-{
+int32_t AudioMixerManagerMac::SetSpeakerVolume(uint32_t volume) {
+ WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
+ "AudioMixerManagerMac::SetSpeakerVolume(volume=%u)", volume);
- if (_outputDeviceID == kAudioObjectUnknown)
- {
- WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
- " device ID has not been set");
- return -1;
- }
+ CriticalSectionScoped lock(&_critSect);
- // volume range is 0.0 to 1.0
- // we convert that to 0 - 255
- minVolume = 0;
+ if (_outputDeviceID == kAudioObjectUnknown) {
+ WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
+ " device ID has not been set");
+ return -1;
+ }
- return 0;
-}
+ OSStatus err = noErr;
+ UInt32 size = 0;
+ bool success = false;
-int32_t
-AudioMixerManagerMac::SpeakerVolumeStepSize(uint16_t& stepSize) const
-{
+ // volume range is 0.0 - 1.0, convert from 0 -255
+ const Float32 vol = (Float32)(volume / 255.0);
- if (_outputDeviceID == kAudioObjectUnknown)
- {
- WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
- " device ID has not been set");
- return -1;
- }
+ assert(vol <= 1.0 && vol >= 0.0);
- // volume range is 0.0 to 1.0
- // we convert that to 0 - 255
- stepSize = 1;
+ // Does the capture device have a master volume control?
+ // If so, use it exclusively.
+ AudioObjectPropertyAddress propertyAddress = {
+ kAudioDevicePropertyVolumeScalar, kAudioDevicePropertyScopeOutput, 0};
+ Boolean isSettable = false;
+ err = AudioObjectIsPropertySettable(_outputDeviceID, &propertyAddress,
+ &isSettable);
+ if (err == noErr && isSettable) {
+ size = sizeof(vol);
+ WEBRTC_CA_RETURN_ON_ERR(AudioObjectSetPropertyData(
+ _outputDeviceID, &propertyAddress, 0, NULL, size, &vol));
return 0;
-}
+ }
-int32_t AudioMixerManagerMac::SpeakerVolumeIsAvailable(bool& available)
-{
- if (_outputDeviceID == kAudioObjectUnknown)
- {
- WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
- " device ID has not been set");
- return -1;
- }
-
- OSStatus err = noErr;
-
- // Does the capture device have a master volume control?
- // If so, use it exclusively.
- AudioObjectPropertyAddress propertyAddress = {
- kAudioDevicePropertyVolumeScalar, kAudioDevicePropertyScopeOutput,
- 0 };
- Boolean isSettable = false;
+ // Otherwise try to set each channel.
+ for (UInt32 i = 1; i <= _noOutputChannels; i++) {
+ propertyAddress.mElement = i;
+ isSettable = false;
err = AudioObjectIsPropertySettable(_outputDeviceID, &propertyAddress,
&isSettable);
- if (err == noErr && isSettable)
- {
- available = true;
- return 0;
- }
-
- // Otherwise try to set each channel.
- for (UInt32 i = 1; i <= _noOutputChannels; i++)
- {
- propertyAddress.mElement = i;
- isSettable = false;
- err = AudioObjectIsPropertySettable(_outputDeviceID, &propertyAddress,
- &isSettable);
- if (err != noErr || !isSettable)
- {
- available = false;
- WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
- " Volume cannot be set for output channel %d, err=%d",
- i, err);
- return -1;
- }
- }
-
+ if (err == noErr && isSettable) {
+ size = sizeof(vol);
+ WEBRTC_CA_RETURN_ON_ERR(AudioObjectSetPropertyData(
+ _outputDeviceID, &propertyAddress, 0, NULL, size, &vol));
+ }
+ success = true;
+ }
+
+ if (!success) {
+ WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
+ " Unable to set a volume on any output channel");
+ return -1;
+ }
+
+ return 0;
+}
+
+int32_t AudioMixerManagerMac::SpeakerVolume(uint32_t& volume) const {
+ if (_outputDeviceID == kAudioObjectUnknown) {
+ WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
+ " device ID has not been set");
+ return -1;
+ }
+
+ OSStatus err = noErr;
+ UInt32 size = 0;
+ unsigned int channels = 0;
+ Float32 channelVol = 0;
+ Float32 vol = 0;
+
+ // Does the device have a master volume control?
+ // If so, use it exclusively.
+ AudioObjectPropertyAddress propertyAddress = {
+ kAudioDevicePropertyVolumeScalar, kAudioDevicePropertyScopeOutput, 0};
+ Boolean hasProperty =
+ AudioObjectHasProperty(_outputDeviceID, &propertyAddress);
+ if (hasProperty) {
+ size = sizeof(vol);
+ WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData(
+ _outputDeviceID, &propertyAddress, 0, NULL, &size, &vol));
+
+ // vol 0.0 to 1.0 -> convert to 0 - 255
+ volume = static_cast<uint32_t>(vol * 255 + 0.5);
+ } else {
+ // Otherwise get the average volume across channels.
+ vol = 0;
+ for (UInt32 i = 1; i <= _noOutputChannels; i++) {
+ channelVol = 0;
+ propertyAddress.mElement = i;
+ hasProperty = AudioObjectHasProperty(_outputDeviceID, &propertyAddress);
+ if (hasProperty) {
+ size = sizeof(channelVol);
+ WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData(
+ _outputDeviceID, &propertyAddress, 0, NULL, &size, &channelVol));
+
+ vol += channelVol;
+ channels++;
+ }
+ }
+
+ if (channels == 0) {
+ WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
+ " Unable to get a volume on any channel");
+ return -1;
+ }
+
+ assert(channels > 0);
+ // vol 0.0 to 1.0 -> convert to 0 - 255
+ volume = static_cast<uint32_t>(255 * vol / channels + 0.5);
+ }
+
+ WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
+ " AudioMixerManagerMac::SpeakerVolume() => vol=%i", vol);
+
+ return 0;
+}
+
+int32_t AudioMixerManagerMac::MaxSpeakerVolume(uint32_t& maxVolume) const {
+ if (_outputDeviceID == kAudioObjectUnknown) {
+ WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
+ " device ID has not been set");
+ return -1;
+ }
+
+ // volume range is 0.0 to 1.0
+ // we convert that to 0 - 255
+ maxVolume = 255;
+
+ return 0;
+}
+
+int32_t AudioMixerManagerMac::MinSpeakerVolume(uint32_t& minVolume) const {
+ if (_outputDeviceID == kAudioObjectUnknown) {
+ WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
+ " device ID has not been set");
+ return -1;
+ }
+
+ // volume range is 0.0 to 1.0
+ // we convert that to 0 - 255
+ minVolume = 0;
+
+ return 0;
+}
+
+int32_t AudioMixerManagerMac::SpeakerVolumeStepSize(uint16_t& stepSize) const {
+ if (_outputDeviceID == kAudioObjectUnknown) {
+ WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
+ " device ID has not been set");
+ return -1;
+ }
+
+ // volume range is 0.0 to 1.0
+ // we convert that to 0 - 255
+ stepSize = 1;
+
+ return 0;
+}
+
+int32_t AudioMixerManagerMac::SpeakerVolumeIsAvailable(bool& available) {
+ if (_outputDeviceID == kAudioObjectUnknown) {
+ WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
+ " device ID has not been set");
+ return -1;
+ }
+
+ OSStatus err = noErr;
+
+ // Does the capture device have a master volume control?
+ // If so, use it exclusively.
+ AudioObjectPropertyAddress propertyAddress = {
+ kAudioDevicePropertyVolumeScalar, kAudioDevicePropertyScopeOutput, 0};
+ Boolean isSettable = false;
+ err = AudioObjectIsPropertySettable(_outputDeviceID, &propertyAddress,
+ &isSettable);
+ if (err == noErr && isSettable) {
available = true;
return 0;
-}
-
-int32_t AudioMixerManagerMac::SpeakerMuteIsAvailable(bool& available)
-{
- if (_outputDeviceID == kAudioObjectUnknown)
- {
- WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
- " device ID has not been set");
- return -1;
- }
+ }
- OSStatus err = noErr;
-
- // Does the capture device have a master mute control?
- // If so, use it exclusively.
- AudioObjectPropertyAddress propertyAddress = { kAudioDevicePropertyMute,
- kAudioDevicePropertyScopeOutput, 0 };
- Boolean isSettable = false;
+ // Otherwise try to set each channel.
+ for (UInt32 i = 1; i <= _noOutputChannels; i++) {
+ propertyAddress.mElement = i;
+ isSettable = false;
err = AudioObjectIsPropertySettable(_outputDeviceID, &propertyAddress,
&isSettable);
- if (err == noErr && isSettable)
- {
- available = true;
- return 0;
- }
-
- // Otherwise try to set each channel.
- for (UInt32 i = 1; i <= _noOutputChannels; i++)
- {
- propertyAddress.mElement = i;
- isSettable = false;
- err = AudioObjectIsPropertySettable(_outputDeviceID, &propertyAddress,
- &isSettable);
- if (err != noErr || !isSettable)
- {
- available = false;
- WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
- " Mute cannot be set for output channel %d, err=%d",
- i, err);
- return -1;
- }
- }
-
+ if (err != noErr || !isSettable) {
+ available = false;
+ WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
+ " Volume cannot be set for output channel %d, err=%d", i,
+ err);
+ return -1;
+ }
+ }
+
+ available = true;
+ return 0;
+}
+
+int32_t AudioMixerManagerMac::SpeakerMuteIsAvailable(bool& available) {
+ if (_outputDeviceID == kAudioObjectUnknown) {
+ WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
+ " device ID has not been set");
+ return -1;
+ }
+
+ OSStatus err = noErr;
+
+ // Does the capture device have a master mute control?
+ // If so, use it exclusively.
+ AudioObjectPropertyAddress propertyAddress = {
+ kAudioDevicePropertyMute, kAudioDevicePropertyScopeOutput, 0};
+ Boolean isSettable = false;
+ err = AudioObjectIsPropertySettable(_outputDeviceID, &propertyAddress,
+ &isSettable);
+ if (err == noErr && isSettable) {
available = true;
return 0;
-}
-
-int32_t AudioMixerManagerMac::SetSpeakerMute(bool enable)
-{
- WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
- "AudioMixerManagerMac::SetSpeakerMute(enable=%u)", enable);
-
- CriticalSectionScoped lock(&_critSect);
-
- if (_outputDeviceID == kAudioObjectUnknown)
- {
- WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
- " device ID has not been set");
- return -1;
- }
-
- OSStatus err = noErr;
- UInt32 size = 0;
- UInt32 mute = enable ? 1 : 0;
- bool success = false;
+ }
- // Does the render device have a master mute control?
- // If so, use it exclusively.
- AudioObjectPropertyAddress propertyAddress = { kAudioDevicePropertyMute,
- kAudioDevicePropertyScopeOutput, 0 };
- Boolean isSettable = false;
+ // Otherwise try to set each channel.
+ for (UInt32 i = 1; i <= _noOutputChannels; i++) {
+ propertyAddress.mElement = i;
+ isSettable = false;
err = AudioObjectIsPropertySettable(_outputDeviceID, &propertyAddress,
&isSettable);
- if (err == noErr && isSettable)
- {
- size = sizeof(mute);
- WEBRTC_CA_RETURN_ON_ERR(AudioObjectSetPropertyData(_outputDeviceID,
- &propertyAddress, 0, NULL, size, &mute));
-
- return 0;
- }
-
- // Otherwise try to set each channel.
- for (UInt32 i = 1; i <= _noOutputChannels; i++)
- {
- propertyAddress.mElement = i;
- isSettable = false;
- err = AudioObjectIsPropertySettable(_outputDeviceID, &propertyAddress,
- &isSettable);
- if (err == noErr && isSettable)
- {
- size = sizeof(mute);
- WEBRTC_CA_RETURN_ON_ERR(AudioObjectSetPropertyData(_outputDeviceID,
- &propertyAddress, 0, NULL, size, &mute));
- }
- success = true;
- }
-
- if (!success)
- {
- WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
- " Unable to set mute on any input channel");
- return -1;
- }
+ if (err != noErr || !isSettable) {
+ available = false;
+ WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
+ " Mute cannot be set for output channel %d, err=%d", i, err);
+ return -1;
+ }
+ }
+
+ available = true;
+ return 0;
+}
+
+int32_t AudioMixerManagerMac::SetSpeakerMute(bool enable) {
+ WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
+ "AudioMixerManagerMac::SetSpeakerMute(enable=%u)", enable);
+
+ CriticalSectionScoped lock(&_critSect);
+
+ if (_outputDeviceID == kAudioObjectUnknown) {
+ WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
+ " device ID has not been set");
+ return -1;
+ }
+
+ OSStatus err = noErr;
+ UInt32 size = 0;
+ UInt32 mute = enable ? 1 : 0;
+ bool success = false;
+
+ // Does the render device have a master mute control?
+ // If so, use it exclusively.
+ AudioObjectPropertyAddress propertyAddress = {
+ kAudioDevicePropertyMute, kAudioDevicePropertyScopeOutput, 0};
+ Boolean isSettable = false;
+ err = AudioObjectIsPropertySettable(_outputDeviceID, &propertyAddress,
+ &isSettable);
+ if (err == noErr && isSettable) {
+ size = sizeof(mute);
+ WEBRTC_CA_RETURN_ON_ERR(AudioObjectSetPropertyData(
+ _outputDeviceID, &propertyAddress, 0, NULL, size, &mute));
return 0;
-}
+ }
-int32_t AudioMixerManagerMac::SpeakerMute(bool& enabled) const
-{
-
- if (_outputDeviceID == kAudioObjectUnknown)
- {
- WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
- " device ID has not been set");
- return -1;
- }
-
- OSStatus err = noErr;
- UInt32 size = 0;
- unsigned int channels = 0;
- UInt32 channelMuted = 0;
- UInt32 muted = 0;
-
- // Does the device have a master volume control?
- // If so, use it exclusively.
- AudioObjectPropertyAddress propertyAddress = { kAudioDevicePropertyMute,
- kAudioDevicePropertyScopeOutput, 0 };
- Boolean hasProperty = AudioObjectHasProperty(_outputDeviceID,
- &propertyAddress);
- if (hasProperty)
- {
- size = sizeof(muted);
- WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData(_outputDeviceID,
- &propertyAddress, 0, NULL, &size, &muted));
-
- // 1 means muted
- enabled = static_cast<bool> (muted);
- } else
- {
- // Otherwise check if all channels are muted.
- for (UInt32 i = 1; i <= _noOutputChannels; i++)
- {
- muted = 0;
- propertyAddress.mElement = i;
- hasProperty = AudioObjectHasProperty(_outputDeviceID,
- &propertyAddress);
- if (hasProperty)
- {
- size = sizeof(channelMuted);
- WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData(_outputDeviceID,
- &propertyAddress, 0, NULL, &size, &channelMuted));
-
- muted = (muted && channelMuted);
- channels++;
- }
- }
-
- if (channels == 0)
- {
- WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
- " Unable to get mute for any channel");
- return -1;
- }
-
- assert(channels > 0);
- // 1 means muted
- enabled = static_cast<bool> (muted);
- }
-
- WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
- " AudioMixerManagerMac::SpeakerMute() => enabled=%d, enabled");
-
- return 0;
-}
-
-int32_t AudioMixerManagerMac::StereoPlayoutIsAvailable(bool& available)
-{
- if (_outputDeviceID == kAudioObjectUnknown)
- {
- WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
- " device ID has not been set");
- return -1;
- }
-
- available = (_noOutputChannels == 2);
- return 0;
-}
-
-int32_t AudioMixerManagerMac::StereoRecordingIsAvailable(bool& available)
-{
- if (_inputDeviceID == kAudioObjectUnknown)
- {
- WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
- " device ID has not been set");
- return -1;
- }
-
- available = (_noInputChannels == 2);
- return 0;
-}
-
-int32_t AudioMixerManagerMac::MicrophoneMuteIsAvailable(bool& available)
-{
- if (_inputDeviceID == kAudioObjectUnknown)
- {
- WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
- " device ID has not been set");
- return -1;
- }
-
- OSStatus err = noErr;
-
- // Does the capture device have a master mute control?
- // If so, use it exclusively.
- AudioObjectPropertyAddress propertyAddress = { kAudioDevicePropertyMute,
- kAudioDevicePropertyScopeInput, 0 };
- Boolean isSettable = false;
- err = AudioObjectIsPropertySettable(_inputDeviceID, &propertyAddress,
+ // Otherwise try to set each channel.
+ for (UInt32 i = 1; i <= _noOutputChannels; i++) {
+ propertyAddress.mElement = i;
+ isSettable = false;
+ err = AudioObjectIsPropertySettable(_outputDeviceID, &propertyAddress,
&isSettable);
- if (err == noErr && isSettable)
- {
- available = true;
- return 0;
- }
-
- // Otherwise try to set each channel.
- for (UInt32 i = 1; i <= _noInputChannels; i++)
- {
- propertyAddress.mElement = i;
- isSettable = false;
- err = AudioObjectIsPropertySettable(_inputDeviceID, &propertyAddress,
- &isSettable);
- if (err != noErr || !isSettable)
- {
- available = false;
- WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
- " Mute cannot be set for output channel %d, err=%d",
- i, err);
- return -1;
- }
- }
-
+ if (err == noErr && isSettable) {
+ size = sizeof(mute);
+ WEBRTC_CA_RETURN_ON_ERR(AudioObjectSetPropertyData(
+ _outputDeviceID, &propertyAddress, 0, NULL, size, &mute));
+ }
+ success = true;
+ }
+
+ if (!success) {
+ WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
+ " Unable to set mute on any input channel");
+ return -1;
+ }
+
+ return 0;
+}
+
+int32_t AudioMixerManagerMac::SpeakerMute(bool& enabled) const {
+ if (_outputDeviceID == kAudioObjectUnknown) {
+ WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
+ " device ID has not been set");
+ return -1;
+ }
+
+ OSStatus err = noErr;
+ UInt32 size = 0;
+ unsigned int channels = 0;
+ UInt32 channelMuted = 0;
+ UInt32 muted = 0;
+
+ // Does the device have a master volume control?
+ // If so, use it exclusively.
+ AudioObjectPropertyAddress propertyAddress = {
+ kAudioDevicePropertyMute, kAudioDevicePropertyScopeOutput, 0};
+ Boolean hasProperty =
+ AudioObjectHasProperty(_outputDeviceID, &propertyAddress);
+ if (hasProperty) {
+ size = sizeof(muted);
+ WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData(
+ _outputDeviceID, &propertyAddress, 0, NULL, &size, &muted));
+
+ // 1 means muted
+ enabled = static_cast<bool>(muted);
+ } else {
+ // Otherwise check if all channels are muted.
+ for (UInt32 i = 1; i <= _noOutputChannels; i++) {
+ muted = 0;
+ propertyAddress.mElement = i;
+ hasProperty = AudioObjectHasProperty(_outputDeviceID, &propertyAddress);
+ if (hasProperty) {
+ size = sizeof(channelMuted);
+ WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData(
+ _outputDeviceID, &propertyAddress, 0, NULL, &size, &channelMuted));
+
+ muted = (muted && channelMuted);
+ channels++;
+ }
+ }
+
+ if (channels == 0) {
+ WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
+ " Unable to get mute for any channel");
+ return -1;
+ }
+
+ assert(channels > 0);
+ // 1 means muted
+ enabled = static_cast<bool>(muted);
+ }
+
+ WEBRTC_TRACE(
+ kTraceInfo, kTraceAudioDevice, _id,
+ " AudioMixerManagerMac::SpeakerMute() => enabled=%d, enabled");
+
+ return 0;
+}
+
+int32_t AudioMixerManagerMac::StereoPlayoutIsAvailable(bool& available) {
+ if (_outputDeviceID == kAudioObjectUnknown) {
+ WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
+ " device ID has not been set");
+ return -1;
+ }
+
+ available = (_noOutputChannels == 2);
+ return 0;
+}
+
+int32_t AudioMixerManagerMac::StereoRecordingIsAvailable(bool& available) {
+ if (_inputDeviceID == kAudioObjectUnknown) {
+ WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
+ " device ID has not been set");
+ return -1;
+ }
+
+ available = (_noInputChannels == 2);
+ return 0;
+}
+
+int32_t AudioMixerManagerMac::MicrophoneMuteIsAvailable(bool& available) {
+ if (_inputDeviceID == kAudioObjectUnknown) {
+ WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
+ " device ID has not been set");
+ return -1;
+ }
+
+ OSStatus err = noErr;
+
+ // Does the capture device have a master mute control?
+ // If so, use it exclusively.
+ AudioObjectPropertyAddress propertyAddress = {
+ kAudioDevicePropertyMute, kAudioDevicePropertyScopeInput, 0};
+ Boolean isSettable = false;
+ err = AudioObjectIsPropertySettable(_inputDeviceID, &propertyAddress,
+ &isSettable);
+ if (err == noErr && isSettable) {
available = true;
return 0;
-}
+ }
-int32_t AudioMixerManagerMac::SetMicrophoneMute(bool enable)
-{
- WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
- "AudioMixerManagerMac::SetMicrophoneMute(enable=%u)", enable);
-
- CriticalSectionScoped lock(&_critSect);
-
- if (_inputDeviceID == kAudioObjectUnknown)
- {
- WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
- " device ID has not been set");
- return -1;
- }
-
- OSStatus err = noErr;
- UInt32 size = 0;
- UInt32 mute = enable ? 1 : 0;
- bool success = false;
-
- // Does the capture device have a master mute control?
- // If so, use it exclusively.
- AudioObjectPropertyAddress propertyAddress = { kAudioDevicePropertyMute,
- kAudioDevicePropertyScopeInput, 0 };
- Boolean isSettable = false;
+ // Otherwise try to set each channel.
+ for (UInt32 i = 1; i <= _noInputChannels; i++) {
+ propertyAddress.mElement = i;
+ isSettable = false;
err = AudioObjectIsPropertySettable(_inputDeviceID, &propertyAddress,
&isSettable);
- if (err == noErr && isSettable)
- {
- size = sizeof(mute);
- WEBRTC_CA_RETURN_ON_ERR(AudioObjectSetPropertyData(_inputDeviceID,
- &propertyAddress, 0, NULL, size, &mute));
-
- return 0;
- }
-
- // Otherwise try to set each channel.
- for (UInt32 i = 1; i <= _noInputChannels; i++)
- {
- propertyAddress.mElement = i;
- isSettable = false;
- err = AudioObjectIsPropertySettable(_inputDeviceID, &propertyAddress,
- &isSettable);
- if (err == noErr && isSettable)
- {
- size = sizeof(mute);
- WEBRTC_CA_RETURN_ON_ERR(AudioObjectSetPropertyData(_inputDeviceID,
- &propertyAddress, 0, NULL, size, &mute));
- }
- success = true;
- }
-
- if (!success)
- {
- WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
- " Unable to set mute on any input channel");
- return -1;
- }
-
- return 0;
-}
-
-int32_t AudioMixerManagerMac::MicrophoneMute(bool& enabled) const
-{
-
- if (_inputDeviceID == kAudioObjectUnknown)
- {
- WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
- " device ID has not been set");
- return -1;
- }
-
- OSStatus err = noErr;
- UInt32 size = 0;
- unsigned int channels = 0;
- UInt32 channelMuted = 0;
- UInt32 muted = 0;
-
- // Does the device have a master volume control?
- // If so, use it exclusively.
- AudioObjectPropertyAddress propertyAddress = { kAudioDevicePropertyMute,
- kAudioDevicePropertyScopeInput, 0 };
- Boolean hasProperty = AudioObjectHasProperty(_inputDeviceID,
- &propertyAddress);
- if (hasProperty)
- {
- size = sizeof(muted);
- WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData(_inputDeviceID,
- &propertyAddress, 0, NULL, &size, &muted));
-
- // 1 means muted
- enabled = static_cast<bool> (muted);
- } else
- {
- // Otherwise check if all channels are muted.
- for (UInt32 i = 1; i <= _noInputChannels; i++)
- {
- muted = 0;
- propertyAddress.mElement = i;
- hasProperty = AudioObjectHasProperty(_inputDeviceID,
- &propertyAddress);
- if (hasProperty)
- {
- size = sizeof(channelMuted);
- WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData(_inputDeviceID,
- &propertyAddress, 0, NULL, &size, &channelMuted));
-
- muted = (muted && channelMuted);
- channels++;
- }
- }
-
- if (channels == 0)
- {
- WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
- " Unable to get mute for any channel");
- return -1;
- }
-
- assert(channels > 0);
- // 1 means muted
- enabled = static_cast<bool> (muted);
- }
-
- WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
- " AudioMixerManagerMac::MicrophoneMute() => enabled=%d",
- enabled);
+ if (err != noErr || !isSettable) {
+ available = false;
+ WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
+ " Mute cannot be set for output channel %d, err=%d", i, err);
+ return -1;
+ }
+ }
+
+ available = true;
+ return 0;
+}
+
+int32_t AudioMixerManagerMac::SetMicrophoneMute(bool enable) {
+ WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
+ "AudioMixerManagerMac::SetMicrophoneMute(enable=%u)", enable);
+
+ CriticalSectionScoped lock(&_critSect);
+
+ if (_inputDeviceID == kAudioObjectUnknown) {
+ WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
+ " device ID has not been set");
+ return -1;
+ }
+
+ OSStatus err = noErr;
+ UInt32 size = 0;
+ UInt32 mute = enable ? 1 : 0;
+ bool success = false;
+
+ // Does the capture device have a master mute control?
+ // If so, use it exclusively.
+ AudioObjectPropertyAddress propertyAddress = {
+ kAudioDevicePropertyMute, kAudioDevicePropertyScopeInput, 0};
+ Boolean isSettable = false;
+ err = AudioObjectIsPropertySettable(_inputDeviceID, &propertyAddress,
+ &isSettable);
+ if (err == noErr && isSettable) {
+ size = sizeof(mute);
+ WEBRTC_CA_RETURN_ON_ERR(AudioObjectSetPropertyData(
+ _inputDeviceID, &propertyAddress, 0, NULL, size, &mute));
return 0;
-}
-
-int32_t AudioMixerManagerMac::MicrophoneBoostIsAvailable(bool& available)
-{
- if (_inputDeviceID == kAudioObjectUnknown)
- {
- WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
- " device ID has not been set");
- return -1;
- }
-
- available = false; // No AudioObjectPropertySelector value for Mic Boost
+ }
- return 0;
-}
-
-int32_t AudioMixerManagerMac::SetMicrophoneBoost(bool enable)
-{
- WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
- "AudioMixerManagerMac::SetMicrophoneBoost(enable=%u)", enable);
-
- CriticalSectionScoped lock(&_critSect);
-
- if (_inputDeviceID == kAudioObjectUnknown)
- {
- WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
- " device ID has not been set");
- return -1;
- }
-
- // Ensure that the selected microphone has a valid boost control.
- bool available(false);
- MicrophoneBoostIsAvailable(available);
- if (!available)
- {
- WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
- " it is not possible to enable microphone boost");
- return -1;
- }
-
- // It is assumed that the call above fails!
- return 0;
-}
-
-int32_t AudioMixerManagerMac::MicrophoneBoost(bool& enabled) const
-{
-
- if (_inputDeviceID == kAudioObjectUnknown)
- {
- WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
- " device ID has not been set");
- return -1;
- }
-
- // Microphone boost cannot be enabled on this platform!
- enabled = false;
-
- return 0;
-}
-
-int32_t AudioMixerManagerMac::MicrophoneVolumeIsAvailable(bool& available)
-{
- if (_inputDeviceID == kAudioObjectUnknown)
- {
- WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
- " device ID has not been set");
- return -1;
- }
-
- OSStatus err = noErr;
-
- // Does the capture device have a master volume control?
- // If so, use it exclusively.
- AudioObjectPropertyAddress
- propertyAddress = { kAudioDevicePropertyVolumeScalar,
- kAudioDevicePropertyScopeInput, 0 };
- Boolean isSettable = false;
+ // Otherwise try to set each channel.
+ for (UInt32 i = 1; i <= _noInputChannels; i++) {
+ propertyAddress.mElement = i;
+ isSettable = false;
err = AudioObjectIsPropertySettable(_inputDeviceID, &propertyAddress,
&isSettable);
- if (err == noErr && isSettable)
- {
- available = true;
- return 0;
- }
-
- // Otherwise try to set each channel.
- for (UInt32 i = 1; i <= _noInputChannels; i++)
- {
- propertyAddress.mElement = i;
- isSettable = false;
- err = AudioObjectIsPropertySettable(_inputDeviceID, &propertyAddress,
- &isSettable);
- if (err != noErr || !isSettable)
- {
- available = false;
- WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
- " Volume cannot be set for input channel %d, err=%d",
- i, err);
- return -1;
- }
- }
-
+ if (err == noErr && isSettable) {
+ size = sizeof(mute);
+ WEBRTC_CA_RETURN_ON_ERR(AudioObjectSetPropertyData(
+ _inputDeviceID, &propertyAddress, 0, NULL, size, &mute));
+ }
+ success = true;
+ }
+
+ if (!success) {
+ WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
+ " Unable to set mute on any input channel");
+ return -1;
+ }
+
+ return 0;
+}
+
+int32_t AudioMixerManagerMac::MicrophoneMute(bool& enabled) const {
+ if (_inputDeviceID == kAudioObjectUnknown) {
+ WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
+ " device ID has not been set");
+ return -1;
+ }
+
+ OSStatus err = noErr;
+ UInt32 size = 0;
+ unsigned int channels = 0;
+ UInt32 channelMuted = 0;
+ UInt32 muted = 0;
+
+ // Does the device have a master volume control?
+ // If so, use it exclusively.
+ AudioObjectPropertyAddress propertyAddress = {
+ kAudioDevicePropertyMute, kAudioDevicePropertyScopeInput, 0};
+ Boolean hasProperty =
+ AudioObjectHasProperty(_inputDeviceID, &propertyAddress);
+ if (hasProperty) {
+ size = sizeof(muted);
+ WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData(
+ _inputDeviceID, &propertyAddress, 0, NULL, &size, &muted));
+
+ // 1 means muted
+ enabled = static_cast<bool>(muted);
+ } else {
+ // Otherwise check if all channels are muted.
+ for (UInt32 i = 1; i <= _noInputChannels; i++) {
+ muted = 0;
+ propertyAddress.mElement = i;
+ hasProperty = AudioObjectHasProperty(_inputDeviceID, &propertyAddress);
+ if (hasProperty) {
+ size = sizeof(channelMuted);
+ WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData(
+ _inputDeviceID, &propertyAddress, 0, NULL, &size, &channelMuted));
+
+ muted = (muted && channelMuted);
+ channels++;
+ }
+ }
+
+ if (channels == 0) {
+ WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
+ " Unable to get mute for any channel");
+ return -1;
+ }
+
+ assert(channels > 0);
+ // 1 means muted
+ enabled = static_cast<bool>(muted);
+ }
+
+ WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
+ " AudioMixerManagerMac::MicrophoneMute() => enabled=%d",
+ enabled);
+
+ return 0;
+}
+
+int32_t AudioMixerManagerMac::MicrophoneBoostIsAvailable(bool& available) {
+ if (_inputDeviceID == kAudioObjectUnknown) {
+ WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
+ " device ID has not been set");
+ return -1;
+ }
+
+ available = false; // No AudioObjectPropertySelector value for Mic Boost
+
+ return 0;
+}
+
+int32_t AudioMixerManagerMac::SetMicrophoneBoost(bool enable) {
+ WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
+ "AudioMixerManagerMac::SetMicrophoneBoost(enable=%u)", enable);
+
+ CriticalSectionScoped lock(&_critSect);
+
+ if (_inputDeviceID == kAudioObjectUnknown) {
+ WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
+ " device ID has not been set");
+ return -1;
+ }
+
+ // Ensure that the selected microphone has a valid boost control.
+ bool available(false);
+ MicrophoneBoostIsAvailable(available);
+ if (!available) {
+ WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
+ " it is not possible to enable microphone boost");
+ return -1;
+ }
+
+ // It is assumed that the call above fails!
+ return 0;
+}
+
+int32_t AudioMixerManagerMac::MicrophoneBoost(bool& enabled) const {
+ if (_inputDeviceID == kAudioObjectUnknown) {
+ WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
+ " device ID has not been set");
+ return -1;
+ }
+
+ // Microphone boost cannot be enabled on this platform!
+ enabled = false;
+
+ return 0;
+}
+
+int32_t AudioMixerManagerMac::MicrophoneVolumeIsAvailable(bool& available) {
+ if (_inputDeviceID == kAudioObjectUnknown) {
+ WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
+ " device ID has not been set");
+ return -1;
+ }
+
+ OSStatus err = noErr;
+
+ // Does the capture device have a master volume control?
+ // If so, use it exclusively.
+ AudioObjectPropertyAddress propertyAddress = {
+ kAudioDevicePropertyVolumeScalar, kAudioDevicePropertyScopeInput, 0};
+ Boolean isSettable = false;
+ err = AudioObjectIsPropertySettable(_inputDeviceID, &propertyAddress,
+ &isSettable);
+ if (err == noErr && isSettable) {
available = true;
return 0;
-}
-
-int32_t AudioMixerManagerMac::SetMicrophoneVolume(uint32_t volume)
-{
- WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
- "AudioMixerManagerMac::SetMicrophoneVolume(volume=%u)", volume);
-
- CriticalSectionScoped lock(&_critSect);
+ }
- if (_inputDeviceID == kAudioObjectUnknown)
- {
- WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
- " device ID has not been set");
- return -1;
- }
-
- OSStatus err = noErr;
- UInt32 size = 0;
- bool success = false;
-
- // volume range is 0.0 - 1.0, convert from 0 - 255
- const Float32 vol = (Float32)(volume / 255.0);
-
- assert(vol <= 1.0 && vol >= 0.0);
-
- // Does the capture device have a master volume control?
- // If so, use it exclusively.
- AudioObjectPropertyAddress
- propertyAddress = { kAudioDevicePropertyVolumeScalar,
- kAudioDevicePropertyScopeInput, 0 };
- Boolean isSettable = false;
+ // Otherwise try to set each channel.
+ for (UInt32 i = 1; i <= _noInputChannels; i++) {
+ propertyAddress.mElement = i;
+ isSettable = false;
err = AudioObjectIsPropertySettable(_inputDeviceID, &propertyAddress,
&isSettable);
- if (err == noErr && isSettable)
- {
- size = sizeof(vol);
- WEBRTC_CA_RETURN_ON_ERR(AudioObjectSetPropertyData(_inputDeviceID,
- &propertyAddress, 0, NULL, size, &vol));
-
- return 0;
+ if (err != noErr || !isSettable) {
+ available = false;
+ WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
+ " Volume cannot be set for input channel %d, err=%d", i,
+ err);
+ return -1;
}
+ }
- // Otherwise try to set each channel.
- for (UInt32 i = 1; i <= _noInputChannels; i++)
- {
- propertyAddress.mElement = i;
- isSettable = false;
- err = AudioObjectIsPropertySettable(_inputDeviceID, &propertyAddress,
- &isSettable);
- if (err == noErr && isSettable)
- {
- size = sizeof(vol);
- WEBRTC_CA_RETURN_ON_ERR(AudioObjectSetPropertyData(_inputDeviceID,
- &propertyAddress, 0, NULL, size, &vol));
- }
- success = true;
- }
-
- if (!success)
- {
- WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
- " Unable to set a level on any input channel");
- return -1;
- }
-
- return 0;
+ available = true;
+ return 0;
}
-int32_t
-AudioMixerManagerMac::MicrophoneVolume(uint32_t& volume) const
-{
-
- if (_inputDeviceID == kAudioObjectUnknown)
- {
- WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
- " device ID has not been set");
- return -1;
- }
+int32_t AudioMixerManagerMac::SetMicrophoneVolume(uint32_t volume) {
+ WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
+ "AudioMixerManagerMac::SetMicrophoneVolume(volume=%u)", volume);
- OSStatus err = noErr;
- UInt32 size = 0;
- unsigned int channels = 0;
- Float32 channelVol = 0;
- Float32 volFloat32 = 0;
-
- // Does the device have a master volume control?
- // If so, use it exclusively.
- AudioObjectPropertyAddress
- propertyAddress = { kAudioDevicePropertyVolumeScalar,
- kAudioDevicePropertyScopeInput, 0 };
- Boolean hasProperty = AudioObjectHasProperty(_inputDeviceID,
- &propertyAddress);
- if (hasProperty)
- {
- size = sizeof(volFloat32);
- WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData(_inputDeviceID,
- &propertyAddress, 0, NULL, &size, &volFloat32));
-
- // vol 0.0 to 1.0 -> convert to 0 - 255
- volume = static_cast<uint32_t> (volFloat32 * 255 + 0.5);
- } else
- {
- // Otherwise get the average volume across channels.
- volFloat32 = 0;
- for (UInt32 i = 1; i <= _noInputChannels; i++)
- {
- channelVol = 0;
- propertyAddress.mElement = i;
- hasProperty = AudioObjectHasProperty(_inputDeviceID,
- &propertyAddress);
- if (hasProperty)
- {
- size = sizeof(channelVol);
- WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData(_inputDeviceID,
- &propertyAddress, 0, NULL, &size, &channelVol));
-
- volFloat32 += channelVol;
- channels++;
- }
- }
-
- if (channels == 0)
- {
- WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
- " Unable to get a level on any channel");
- return -1;
- }
-
- assert(channels > 0);
- // vol 0.0 to 1.0 -> convert to 0 - 255
- volume = static_cast<uint32_t>
- (255 * volFloat32 / channels + 0.5);
- }
+ CriticalSectionScoped lock(&_critSect);
- WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
- " AudioMixerManagerMac::MicrophoneVolume() => vol=%u",
- volume);
+ if (_inputDeviceID == kAudioObjectUnknown) {
+ WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
+ " device ID has not been set");
+ return -1;
+ }
- return 0;
-}
+ OSStatus err = noErr;
+ UInt32 size = 0;
+ bool success = false;
-int32_t
-AudioMixerManagerMac::MaxMicrophoneVolume(uint32_t& maxVolume) const
-{
+ // volume range is 0.0 - 1.0, convert from 0 - 255
+ const Float32 vol = (Float32)(volume / 255.0);
- if (_inputDeviceID == kAudioObjectUnknown)
- {
- WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
- " device ID has not been set");
- return -1;
- }
+ assert(vol <= 1.0 && vol >= 0.0);
- // volume range is 0.0 to 1.0
- // we convert that to 0 - 255
- maxVolume = 255;
+ // Does the capture device have a master volume control?
+ // If so, use it exclusively.
+ AudioObjectPropertyAddress propertyAddress = {
+ kAudioDevicePropertyVolumeScalar, kAudioDevicePropertyScopeInput, 0};
+ Boolean isSettable = false;
+ err = AudioObjectIsPropertySettable(_inputDeviceID, &propertyAddress,
+ &isSettable);
+ if (err == noErr && isSettable) {
+ size = sizeof(vol);
+ WEBRTC_CA_RETURN_ON_ERR(AudioObjectSetPropertyData(
+ _inputDeviceID, &propertyAddress, 0, NULL, size, &vol));
return 0;
-}
-
-int32_t
-AudioMixerManagerMac::MinMicrophoneVolume(uint32_t& minVolume) const
-{
-
- if (_inputDeviceID == kAudioObjectUnknown)
- {
- WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
- " device ID has not been set");
- return -1;
- }
+ }
- // volume range is 0.0 to 1.0
- // we convert that to 0 - 10
- minVolume = 0;
+ // Otherwise try to set each channel.
+ for (UInt32 i = 1; i <= _noInputChannels; i++) {
+ propertyAddress.mElement = i;
+ isSettable = false;
+ err = AudioObjectIsPropertySettable(_inputDeviceID, &propertyAddress,
+ &isSettable);
+ if (err == noErr && isSettable) {
+ size = sizeof(vol);
+ WEBRTC_CA_RETURN_ON_ERR(AudioObjectSetPropertyData(
+ _inputDeviceID, &propertyAddress, 0, NULL, size, &vol));
+ }
+ success = true;
+ }
+
+ if (!success) {
+ WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
+ " Unable to set a level on any input channel");
+ return -1;
+ }
+
+ return 0;
+}
+
+int32_t AudioMixerManagerMac::MicrophoneVolume(uint32_t& volume) const {
+ if (_inputDeviceID == kAudioObjectUnknown) {
+ WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
+ " device ID has not been set");
+ return -1;
+ }
+
+ OSStatus err = noErr;
+ UInt32 size = 0;
+ unsigned int channels = 0;
+ Float32 channelVol = 0;
+ Float32 volFloat32 = 0;
+
+ // Does the device have a master volume control?
+ // If so, use it exclusively.
+ AudioObjectPropertyAddress propertyAddress = {
+ kAudioDevicePropertyVolumeScalar, kAudioDevicePropertyScopeInput, 0};
+ Boolean hasProperty =
+ AudioObjectHasProperty(_inputDeviceID, &propertyAddress);
+ if (hasProperty) {
+ size = sizeof(volFloat32);
+ WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData(
+ _inputDeviceID, &propertyAddress, 0, NULL, &size, &volFloat32));
+
+ // vol 0.0 to 1.0 -> convert to 0 - 255
+ volume = static_cast<uint32_t>(volFloat32 * 255 + 0.5);
+ } else {
+ // Otherwise get the average volume across channels.
+ volFloat32 = 0;
+ for (UInt32 i = 1; i <= _noInputChannels; i++) {
+ channelVol = 0;
+ propertyAddress.mElement = i;
+ hasProperty = AudioObjectHasProperty(_inputDeviceID, &propertyAddress);
+ if (hasProperty) {
+ size = sizeof(channelVol);
+ WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData(
+ _inputDeviceID, &propertyAddress, 0, NULL, &size, &channelVol));
+
+ volFloat32 += channelVol;
+ channels++;
+ }
+ }
+
+ if (channels == 0) {
+ WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
+ " Unable to get a level on any channel");
+ return -1;
+ }
+
+ assert(channels > 0);
+ // vol 0.0 to 1.0 -> convert to 0 - 255
+ volume = static_cast<uint32_t>(255 * volFloat32 / channels + 0.5);
+ }
+
+ WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
+ " AudioMixerManagerMac::MicrophoneVolume() => vol=%u",
+ volume);
+
+ return 0;
+}
+
+int32_t AudioMixerManagerMac::MaxMicrophoneVolume(uint32_t& maxVolume) const {
+ if (_inputDeviceID == kAudioObjectUnknown) {
+ WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
+ " device ID has not been set");
+ return -1;
+ }
+
+ // volume range is 0.0 to 1.0
+ // we convert that to 0 - 255
+ maxVolume = 255;
+
+ return 0;
+}
+
+int32_t AudioMixerManagerMac::MinMicrophoneVolume(uint32_t& minVolume) const {
+ if (_inputDeviceID == kAudioObjectUnknown) {
+ WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
+ " device ID has not been set");
+ return -1;
+ }
+
+ // volume range is 0.0 to 1.0
+ // we convert that to 0 - 10
+ minVolume = 0;
- return 0;
+ return 0;
}
-int32_t
-AudioMixerManagerMac::MicrophoneVolumeStepSize(uint16_t& stepSize) const
-{
-
- if (_inputDeviceID == kAudioObjectUnknown)
- {
- WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
- " device ID has not been set");
- return -1;
- }
-
- // volume range is 0.0 to 1.0
- // we convert that to 0 - 10
- stepSize = 1;
-
- return 0;
+int32_t AudioMixerManagerMac::MicrophoneVolumeStepSize(
+ uint16_t& stepSize) const {
+ if (_inputDeviceID == kAudioObjectUnknown) {
+ WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
+ " device ID has not been set");
+ return -1;
+ }
+
+ // volume range is 0.0 to 1.0
+ // we convert that to 0 - 10
+ stepSize = 1;
+
+ return 0;
}
// ============================================================================
@@ -1148,18 +1003,18 @@ AudioMixerManagerMac::MicrophoneVolumeStepSize(uint16_t& stepSize) const
// CoreAudio errors are best interpreted as four character strings.
void AudioMixerManagerMac::logCAMsg(const TraceLevel level,
const TraceModule module,
- const int32_t id, const char *msg,
- const char *err)
-{
- assert(msg != NULL);
- assert(err != NULL);
+ const int32_t id,
+ const char* msg,
+ const char* err) {
+ assert(msg != NULL);
+ assert(err != NULL);
#ifdef WEBRTC_ARCH_BIG_ENDIAN
- WEBRTC_TRACE(level, module, id, "%s: %.4s", msg, err);
+ WEBRTC_TRACE(level, module, id, "%s: %.4s", msg, err);
#else
- // We need to flip the characters in this case.
- WEBRTC_TRACE(level, module, id, "%s: %.1s%.1s%.1s%.1s", msg, err + 3, err
- + 2, err + 1, err);
+ // We need to flip the characters in this case.
+ WEBRTC_TRACE(level, module, id, "%s: %.1s%.1s%.1s%.1s", msg, err + 3, err + 2,
+ err + 1, err);
#endif
}
diff --git a/webrtc/modules/audio_device/mac/audio_mixer_manager_mac.h b/webrtc/modules/audio_device/mac/audio_mixer_manager_mac.h
index 94d6928921..9cbfe2deb4 100644
--- a/webrtc/modules/audio_device/mac/audio_mixer_manager_mac.h
+++ b/webrtc/modules/audio_device/mac/audio_mixer_manager_mac.h
@@ -18,63 +18,62 @@
#include <CoreAudio/CoreAudio.h>
namespace webrtc {
-
-class AudioMixerManagerMac
-{
-public:
- int32_t OpenSpeaker(AudioDeviceID deviceID);
- int32_t OpenMicrophone(AudioDeviceID deviceID);
- int32_t SetSpeakerVolume(uint32_t volume);
- int32_t SpeakerVolume(uint32_t& volume) const;
- int32_t MaxSpeakerVolume(uint32_t& maxVolume) const;
- int32_t MinSpeakerVolume(uint32_t& minVolume) const;
- int32_t SpeakerVolumeStepSize(uint16_t& stepSize) const;
- int32_t SpeakerVolumeIsAvailable(bool& available);
- int32_t SpeakerMuteIsAvailable(bool& available);
- int32_t SetSpeakerMute(bool enable);
- int32_t SpeakerMute(bool& enabled) const;
- int32_t StereoPlayoutIsAvailable(bool& available);
- int32_t StereoRecordingIsAvailable(bool& available);
- int32_t MicrophoneMuteIsAvailable(bool& available);
- int32_t SetMicrophoneMute(bool enable);
- int32_t MicrophoneMute(bool& enabled) const;
- int32_t MicrophoneBoostIsAvailable(bool& available);
- int32_t SetMicrophoneBoost(bool enable);
- int32_t MicrophoneBoost(bool& enabled) const;
- int32_t MicrophoneVolumeIsAvailable(bool& available);
- int32_t SetMicrophoneVolume(uint32_t volume);
- int32_t MicrophoneVolume(uint32_t& volume) const;
- int32_t MaxMicrophoneVolume(uint32_t& maxVolume) const;
- int32_t MinMicrophoneVolume(uint32_t& minVolume) const;
- int32_t MicrophoneVolumeStepSize(uint16_t& stepSize) const;
- int32_t Close();
- int32_t CloseSpeaker();
- int32_t CloseMicrophone();
- bool SpeakerIsInitialized() const;
- bool MicrophoneIsInitialized() const;
-public:
- AudioMixerManagerMac(const int32_t id);
- ~AudioMixerManagerMac();
+class AudioMixerManagerMac {
+ public:
+ int32_t OpenSpeaker(AudioDeviceID deviceID);
+ int32_t OpenMicrophone(AudioDeviceID deviceID);
+ int32_t SetSpeakerVolume(uint32_t volume);
+ int32_t SpeakerVolume(uint32_t& volume) const;
+ int32_t MaxSpeakerVolume(uint32_t& maxVolume) const;
+ int32_t MinSpeakerVolume(uint32_t& minVolume) const;
+ int32_t SpeakerVolumeStepSize(uint16_t& stepSize) const;
+ int32_t SpeakerVolumeIsAvailable(bool& available);
+ int32_t SpeakerMuteIsAvailable(bool& available);
+ int32_t SetSpeakerMute(bool enable);
+ int32_t SpeakerMute(bool& enabled) const;
+ int32_t StereoPlayoutIsAvailable(bool& available);
+ int32_t StereoRecordingIsAvailable(bool& available);
+ int32_t MicrophoneMuteIsAvailable(bool& available);
+ int32_t SetMicrophoneMute(bool enable);
+ int32_t MicrophoneMute(bool& enabled) const;
+ int32_t MicrophoneBoostIsAvailable(bool& available);
+ int32_t SetMicrophoneBoost(bool enable);
+ int32_t MicrophoneBoost(bool& enabled) const;
+ int32_t MicrophoneVolumeIsAvailable(bool& available);
+ int32_t SetMicrophoneVolume(uint32_t volume);
+ int32_t MicrophoneVolume(uint32_t& volume) const;
+ int32_t MaxMicrophoneVolume(uint32_t& maxVolume) const;
+ int32_t MinMicrophoneVolume(uint32_t& minVolume) const;
+ int32_t MicrophoneVolumeStepSize(uint16_t& stepSize) const;
+ int32_t Close();
+ int32_t CloseSpeaker();
+ int32_t CloseMicrophone();
+ bool SpeakerIsInitialized() const;
+ bool MicrophoneIsInitialized() const;
-private:
- static void logCAMsg(const TraceLevel level,
- const TraceModule module,
- const int32_t id, const char *msg,
- const char *err);
+ public:
+ AudioMixerManagerMac(const int32_t id);
+ ~AudioMixerManagerMac();
-private:
- CriticalSectionWrapper& _critSect;
- int32_t _id;
+ private:
+ static void logCAMsg(const TraceLevel level,
+ const TraceModule module,
+ const int32_t id,
+ const char* msg,
+ const char* err);
- AudioDeviceID _inputDeviceID;
- AudioDeviceID _outputDeviceID;
+ private:
+ CriticalSectionWrapper& _critSect;
+ int32_t _id;
- uint16_t _noInputChannels;
- uint16_t _noOutputChannels;
+ AudioDeviceID _inputDeviceID;
+ AudioDeviceID _outputDeviceID;
+ uint16_t _noInputChannels;
+ uint16_t _noOutputChannels;
};
-
+
} // namespace webrtc
#endif // AUDIO_MIXER_MAC_H
diff --git a/webrtc/modules/audio_device/main/interface/audio_device.h b/webrtc/modules/audio_device/main/interface/audio_device.h
deleted file mode 100644
index 71f16b685a..0000000000
--- a/webrtc/modules/audio_device/main/interface/audio_device.h
+++ /dev/null
@@ -1,16 +0,0 @@
-/*
- * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
- *
- * Use of this source code is governed by a BSD-style license
- * that can be found in the LICENSE file in the root of the source
- * tree. An additional intellectual property rights grant can be found
- * in the file PATENTS. All contributing project authors may
- * be found in the AUTHORS file in the root of the source tree.
- */
-
-#ifndef MODULES_AUDIO_DEVICE_MAIN_INTERFACE_AUDIO_DEVICE_H_
-#define MODULES_AUDIO_DEVICE_MAIN_INTERFACE_AUDIO_DEVICE_H_
-
-#include "webrtc/modules/audio_device/include/audio_device.h"
-
-#endif // MODULES_AUDIO_DEVICE_MAIN_INTERFACE_AUDIO_DEVICE_H_
diff --git a/webrtc/modules/audio_device/main/source/OWNERS b/webrtc/modules/audio_device/main/source/OWNERS
deleted file mode 100644
index 3ee6b4bf5f..0000000000
--- a/webrtc/modules/audio_device/main/source/OWNERS
+++ /dev/null
@@ -1,5 +0,0 @@
-
-# These are for the common case of adding or renaming files. If you're doing
-# structural changes, please get a review from a reviewer in this file.
-per-file *.gyp=*
-per-file *.gypi=*
diff --git a/webrtc/modules/audio_device/main/source/audio_device.gypi b/webrtc/modules/audio_device/main/source/audio_device.gypi
deleted file mode 100644
index 126eb98ac1..0000000000
--- a/webrtc/modules/audio_device/main/source/audio_device.gypi
+++ /dev/null
@@ -1,14 +0,0 @@
-# Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
-#
-# Use of this source code is governed by a BSD-style license
-# that can be found in the LICENSE file in the root of the source
-# tree. An additional intellectual property rights grant can be found
-# in the file PATENTS. All contributing project authors may
-# be found in the AUTHORS file in the root of the source tree.
-
-{
- 'includes': [
- '../../audio_device.gypi',
- ],
-}
-
diff --git a/webrtc/modules/audio_device/test/audio_device_test_api.cc b/webrtc/modules/audio_device/test/audio_device_test_api.cc
index f36a049ffc..26a2dcd8c4 100644
--- a/webrtc/modules/audio_device/test/audio_device_test_api.cc
+++ b/webrtc/modules/audio_device/test/audio_device_test_api.cc
@@ -85,7 +85,7 @@ class AudioTransportAPI: public AudioTransport {
int32_t RecordedDataIsAvailable(const void* audioSamples,
const size_t nSamples,
const size_t nBytesPerSample,
- const uint8_t nChannels,
+ const size_t nChannels,
const uint32_t sampleRate,
const uint32_t totalDelay,
const int32_t clockSkew,
@@ -110,7 +110,7 @@ class AudioTransportAPI: public AudioTransport {
int32_t NeedMorePlayData(const size_t nSamples,
const size_t nBytesPerSample,
- const uint8_t nChannels,
+ const size_t nChannels,
const uint32_t sampleRate,
void* audioSamples,
size_t& nSamplesOut,
@@ -128,29 +128,6 @@ class AudioTransportAPI: public AudioTransport {
return 0;
}
- int OnDataAvailable(const int voe_channels[],
- int number_of_voe_channels,
- const int16_t* audio_data,
- int sample_rate,
- int number_of_channels,
- size_t number_of_frames,
- int audio_delay_milliseconds,
- int current_volume,
- bool key_pressed,
- bool need_audio_processing) override {
- return 0;
- }
-
- void PushCaptureData(int voe_channel, const void* audio_data,
- int bits_per_sample, int sample_rate,
- int number_of_channels,
- size_t number_of_frames) override {}
-
- void PullRenderData(int bits_per_sample, int sample_rate,
- int number_of_channels, size_t number_of_frames,
- void* audio_data,
- int64_t* elapsed_time_ms,
- int64_t* ntp_time_ms) override {}
private:
uint32_t rec_count_;
uint32_t play_count_;
@@ -1040,9 +1017,15 @@ TEST_F(AudioDeviceAPITest, MicrophoneVolumeIsAvailable) {
// MicrophoneVolume
// MaxMicrophoneVolume
// MinMicrophoneVolume
-// NOTE: Disabled on mac due to issue 257.
-#ifndef WEBRTC_MAC
-TEST_F(AudioDeviceAPITest, MicrophoneVolumeTests) {
+
+// Disabled on Mac and Linux,
+// see https://bugs.chromium.org/p/webrtc/issues/detail?id=5414
+#if defined(WEBRTC_MAC) || defined(WEBRTC_LINUX)
+#define MAYBE_MicrophoneVolumeTests DISABLED_MicrophoneVolumeTests
+#else
+#define MAYBE_MicrophoneVolumeTests MicrophoneVolumeTests
+#endif
+TEST_F(AudioDeviceAPITest, MAYBE_MicrophoneVolumeTests) {
uint32_t vol(0);
uint32_t volume(0);
uint32_t maxVolume(0);
@@ -1135,7 +1118,6 @@ TEST_F(AudioDeviceAPITest, MicrophoneVolumeTests) {
EXPECT_EQ(0, audio_device_->SetMicrophoneVolume(maxVolume/10));
}
}
-#endif // !WEBRTC_MAC
TEST_F(AudioDeviceAPITest, SpeakerMuteIsAvailable) {
bool available;
diff --git a/webrtc/modules/audio_device/test/audio_device_test_defines.h b/webrtc/modules/audio_device/test/audio_device_test_defines.h
index 5068646bdd..cc8e3e3aef 100644
--- a/webrtc/modules/audio_device/test/audio_device_test_defines.h
+++ b/webrtc/modules/audio_device/test/audio_device_test_defines.h
@@ -13,7 +13,7 @@
#include "webrtc/common_types.h"
#include "webrtc/modules/audio_device/include/audio_device.h"
-#include "webrtc/modules/utility/interface/process_thread.h"
+#include "webrtc/modules/utility/include/process_thread.h"
#include "webrtc/system_wrappers/include/trace.h"
#ifdef _WIN32
diff --git a/webrtc/modules/audio_device/test/func_test_manager.cc b/webrtc/modules/audio_device/test/func_test_manager.cc
index 0ebfc8395f..0a2963e9ae 100644
--- a/webrtc/modules/audio_device/test/func_test_manager.cc
+++ b/webrtc/modules/audio_device/test/func_test_manager.cc
@@ -194,7 +194,7 @@ int32_t AudioTransportImpl::RecordedDataIsAvailable(
const void* audioSamples,
const size_t nSamples,
const size_t nBytesPerSample,
- const uint8_t nChannels,
+ const size_t nChannels,
const uint32_t samplesPerSec,
const uint32_t totalDelayMS,
const int32_t clockDrift,
@@ -339,7 +339,7 @@ int32_t AudioTransportImpl::RecordedDataIsAvailable(
int32_t AudioTransportImpl::NeedMorePlayData(
const size_t nSamples,
const size_t nBytesPerSample,
- const uint8_t nChannels,
+ const size_t nChannels,
const uint32_t samplesPerSec,
void* audioSamples,
size_t& nSamplesOut,
@@ -365,7 +365,7 @@ int32_t AudioTransportImpl::NeedMorePlayData(
int16_t* ptr16Out = NULL;
const size_t nSamplesIn = packet->nSamples;
- const uint8_t nChannelsIn = packet->nChannels;
+ const size_t nChannelsIn = packet->nChannels;
const uint32_t samplesPerSecIn = packet->samplesPerSec;
const size_t nBytesPerSampleIn = packet->nBytesPerSample;
@@ -573,32 +573,6 @@ int32_t AudioTransportImpl::NeedMorePlayData(
return 0;
}
-int AudioTransportImpl::OnDataAvailable(const int voe_channels[],
- int number_of_voe_channels,
- const int16_t* audio_data,
- int sample_rate,
- int number_of_channels,
- size_t number_of_frames,
- int audio_delay_milliseconds,
- int current_volume,
- bool key_pressed,
- bool need_audio_processing) {
- return 0;
-}
-
-void AudioTransportImpl::PushCaptureData(int voe_channel,
- const void* audio_data,
- int bits_per_sample, int sample_rate,
- int number_of_channels,
- size_t number_of_frames) {}
-
-void AudioTransportImpl::PullRenderData(int bits_per_sample, int sample_rate,
- int number_of_channels,
- size_t number_of_frames,
- void* audio_data,
- int64_t* elapsed_time_ms,
- int64_t* ntp_time_ms) {}
-
FuncTestManager::FuncTestManager() :
_audioDevice(NULL),
_audioEventObserver(NULL),
@@ -686,7 +660,7 @@ int32_t FuncTestManager::Close()
_audioDevice = NULL;
}
- // return the ThreadWrapper (singleton)
+ // return the PlatformThread (singleton)
Trace::ReturnTrace();
// PRINT_TEST_RESULTS;
diff --git a/webrtc/modules/audio_device/test/func_test_manager.h b/webrtc/modules/audio_device/test/func_test_manager.h
index 6ef13490d8..b7cc81cc1a 100644
--- a/webrtc/modules/audio_device/test/func_test_manager.h
+++ b/webrtc/modules/audio_device/test/func_test_manager.h
@@ -49,7 +49,7 @@ struct AudioPacket
uint8_t dataBuffer[4 * 960];
size_t nSamples;
size_t nBytesPerSample;
- uint8_t nChannels;
+ size_t nChannels;
uint32_t samplesPerSec;
};
@@ -88,7 +88,7 @@ public:
int32_t RecordedDataIsAvailable(const void* audioSamples,
const size_t nSamples,
const size_t nBytesPerSample,
- const uint8_t nChannels,
+ const size_t nChannels,
const uint32_t samplesPerSec,
const uint32_t totalDelayMS,
const int32_t clockDrift,
@@ -98,35 +98,13 @@ public:
int32_t NeedMorePlayData(const size_t nSamples,
const size_t nBytesPerSample,
- const uint8_t nChannels,
+ const size_t nChannels,
const uint32_t samplesPerSec,
void* audioSamples,
size_t& nSamplesOut,
int64_t* elapsed_time_ms,
int64_t* ntp_time_ms) override;
- int OnDataAvailable(const int voe_channels[],
- int number_of_voe_channels,
- const int16_t* audio_data,
- int sample_rate,
- int number_of_channels,
- size_t number_of_frames,
- int audio_delay_milliseconds,
- int current_volume,
- bool key_pressed,
- bool need_audio_processing) override;
-
- void PushCaptureData(int voe_channel, const void* audio_data,
- int bits_per_sample, int sample_rate,
- int number_of_channels,
- size_t number_of_frames) override;
-
- void PullRenderData(int bits_per_sample, int sample_rate,
- int number_of_channels, size_t number_of_frames,
- void* audio_data,
- int64_t* elapsed_time_ms,
- int64_t* ntp_time_ms) override;
-
AudioTransportImpl(AudioDeviceModule* audioDevice);
~AudioTransportImpl();
diff --git a/webrtc/modules/audio_device/win/audio_device_wave_win.cc b/webrtc/modules/audio_device/win/audio_device_wave_win.cc
index 96bee7425a..6f4d7df397 100644
--- a/webrtc/modules/audio_device/win/audio_device_wave_win.cc
+++ b/webrtc/modules/audio_device/win/audio_device_wave_win.cc
@@ -228,15 +228,9 @@ int32_t AudioDeviceWindowsWave::Init()
}
const char* threadName = "webrtc_audio_module_thread";
- _ptrThread = ThreadWrapper::CreateThread(ThreadFunc, this, threadName);
- if (!_ptrThread->Start())
- {
- WEBRTC_TRACE(kTraceCritical, kTraceAudioDevice, _id,
- "failed to start the audio thread");
- _ptrThread.reset();
- return -1;
- }
- _ptrThread->SetPriority(kRealtimePriority);
+ _ptrThread.reset(new rtc::PlatformThread(ThreadFunc, this, threadName));
+ _ptrThread->Start();
+ _ptrThread->SetPriority(rtc::kRealtimePriority);
const bool periodic(true);
if (!_timeEvent.StartTimer(periodic, TIMER_PERIOD_MS))
@@ -250,12 +244,8 @@ int32_t AudioDeviceWindowsWave::Init()
WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
"periodic timer (dT=%d) is now active", TIMER_PERIOD_MS);
- _hGetCaptureVolumeThread = CreateThread(NULL,
- 0,
- GetCaptureVolumeThread,
- this,
- 0,
- NULL);
+ _hGetCaptureVolumeThread =
+ CreateThread(NULL, 0, GetCaptureVolumeThread, this, 0, NULL);
if (_hGetCaptureVolumeThread == NULL)
{
WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
@@ -265,12 +255,8 @@ int32_t AudioDeviceWindowsWave::Init()
SetThreadPriority(_hGetCaptureVolumeThread, THREAD_PRIORITY_NORMAL);
- _hSetCaptureVolumeThread = CreateThread(NULL,
- 0,
- SetCaptureVolumeThread,
- this,
- 0,
- NULL);
+ _hSetCaptureVolumeThread =
+ CreateThread(NULL, 0, SetCaptureVolumeThread, this, 0, NULL);
if (_hSetCaptureVolumeThread == NULL)
{
WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
@@ -303,7 +289,7 @@ int32_t AudioDeviceWindowsWave::Terminate()
if (_ptrThread)
{
- ThreadWrapper* tmpThread = _ptrThread.release();
+ rtc::PlatformThread* tmpThread = _ptrThread.release();
_critSect.Leave();
_timeEvent.Set();
diff --git a/webrtc/modules/audio_device/win/audio_device_wave_win.h b/webrtc/modules/audio_device/win/audio_device_wave_win.h
index c99185c3ab..a1cfc6acbf 100644
--- a/webrtc/modules/audio_device/win/audio_device_wave_win.h
+++ b/webrtc/modules/audio_device/win/audio_device_wave_win.h
@@ -11,9 +11,9 @@
#ifndef WEBRTC_AUDIO_DEVICE_AUDIO_DEVICE_WAVE_WIN_H
#define WEBRTC_AUDIO_DEVICE_AUDIO_DEVICE_WAVE_WIN_H
+#include "webrtc/base/platform_thread.h"
#include "webrtc/modules/audio_device/audio_device_generic.h"
#include "webrtc/modules/audio_device/win/audio_mixer_manager_win.h"
-#include "webrtc/system_wrappers/include/thread_wrapper.h"
#pragma comment( lib, "winmm.lib" )
@@ -222,7 +222,8 @@ private:
HANDLE _hShutdownSetVolumeEvent;
HANDLE _hSetCaptureVolumeEvent;
- rtc::scoped_ptr<ThreadWrapper> _ptrThread;
+ // TODO(pbos): Remove scoped_ptr usage and use PlatformThread directly
+ rtc::scoped_ptr<rtc::PlatformThread> _ptrThread;
CriticalSectionWrapper& _critSectCb;
diff --git a/webrtc/modules/audio_processing/BUILD.gn b/webrtc/modules/audio_processing/BUILD.gn
index 2d0c602ef0..9d91911bc2 100644
--- a/webrtc/modules/audio_processing/BUILD.gn
+++ b/webrtc/modules/audio_processing/BUILD.gn
@@ -30,12 +30,12 @@ source_set("audio_processing") {
"aec/aec_resampler.c",
"aec/aec_resampler.h",
"aec/echo_cancellation.c",
+ "aec/echo_cancellation.h",
"aec/echo_cancellation_internal.h",
- "aec/include/echo_cancellation.h",
"aecm/aecm_core.c",
"aecm/aecm_core.h",
"aecm/echo_control_mobile.c",
- "aecm/include/echo_control_mobile.h",
+ "aecm/echo_control_mobile.h",
"agc/agc.cc",
"agc/agc.h",
"agc/agc_manager_direct.cc",
@@ -161,8 +161,8 @@ source_set("audio_processing") {
if (rtc_prefer_fixed_point) {
defines += [ "WEBRTC_NS_FIXED" ]
sources += [
- "ns/include/noise_suppression_x.h",
"ns/noise_suppression_x.c",
+ "ns/noise_suppression_x.h",
"ns/nsx_core.c",
"ns/nsx_core.h",
"ns/nsx_defines.h",
@@ -176,8 +176,8 @@ source_set("audio_processing") {
defines += [ "WEBRTC_NS_FLOAT" ]
sources += [
"ns/defines.h",
- "ns/include/noise_suppression.h",
"ns/noise_suppression.c",
+ "ns/noise_suppression.h",
"ns/ns_core.c",
"ns/ns_core.h",
"ns/windows_private.h",
diff --git a/webrtc/modules/audio_processing/OWNERS b/webrtc/modules/audio_processing/OWNERS
index 7b760682b0..d14f7f8614 100644
--- a/webrtc/modules/audio_processing/OWNERS
+++ b/webrtc/modules/audio_processing/OWNERS
@@ -1,5 +1,4 @@
aluebs@webrtc.org
-andrew@webrtc.org
henrik.lundin@webrtc.org
peah@webrtc.org
diff --git a/webrtc/modules/audio_processing/aec/aec_core.c b/webrtc/modules/audio_processing/aec/aec_core.c
index f8eed32372..901e0fde0b 100644
--- a/webrtc/modules/audio_processing/aec/aec_core.c
+++ b/webrtc/modules/audio_processing/aec/aec_core.c
@@ -44,7 +44,6 @@ static const int countLen = 50;
static const int kDelayMetricsAggregationWindow = 1250; // 5 seconds at 16 kHz.
// Quantities to control H band scaling for SWB input
-static const int flagHbandCn = 1; // flag for adding comfort noise in H band
static const float cnScaleHband =
(float)0.4; // scale for comfort noise in H band
// Initial bin for averaging nlp gain in low band
@@ -135,6 +134,9 @@ WebRtcAecFilterAdaptation WebRtcAec_FilterAdaptation;
WebRtcAecOverdriveAndSuppress WebRtcAec_OverdriveAndSuppress;
WebRtcAecComfortNoise WebRtcAec_ComfortNoise;
WebRtcAecSubBandCoherence WebRtcAec_SubbandCoherence;
+WebRtcAecStoreAsComplex WebRtcAec_StoreAsComplex;
+WebRtcAecPartitionDelay WebRtcAec_PartitionDelay;
+WebRtcAecWindowData WebRtcAec_WindowData;
__inline static float MulRe(float aRe, float aIm, float bRe, float bIm) {
return aRe * bRe - aIm * bIm;
@@ -151,40 +153,49 @@ static int CmpFloat(const void* a, const void* b) {
return (*da > *db) - (*da < *db);
}
-static void FilterFar(AecCore* aec, float yf[2][PART_LEN1]) {
+static void FilterFar(
+ int num_partitions,
+ int x_fft_buf_block_pos,
+ float x_fft_buf[2][kExtendedNumPartitions * PART_LEN1],
+ float h_fft_buf[2][kExtendedNumPartitions * PART_LEN1],
+ float y_fft[2][PART_LEN1]) {
int i;
- for (i = 0; i < aec->num_partitions; i++) {
+ for (i = 0; i < num_partitions; i++) {
int j;
- int xPos = (i + aec->xfBufBlockPos) * PART_LEN1;
+ int xPos = (i + x_fft_buf_block_pos) * PART_LEN1;
int pos = i * PART_LEN1;
// Check for wrap
- if (i + aec->xfBufBlockPos >= aec->num_partitions) {
- xPos -= aec->num_partitions * (PART_LEN1);
+ if (i + x_fft_buf_block_pos >= num_partitions) {
+ xPos -= num_partitions * (PART_LEN1);
}
for (j = 0; j < PART_LEN1; j++) {
- yf[0][j] += MulRe(aec->xfBuf[0][xPos + j],
- aec->xfBuf[1][xPos + j],
- aec->wfBuf[0][pos + j],
- aec->wfBuf[1][pos + j]);
- yf[1][j] += MulIm(aec->xfBuf[0][xPos + j],
- aec->xfBuf[1][xPos + j],
- aec->wfBuf[0][pos + j],
- aec->wfBuf[1][pos + j]);
+ y_fft[0][j] += MulRe(x_fft_buf[0][xPos + j],
+ x_fft_buf[1][xPos + j],
+ h_fft_buf[0][pos + j],
+ h_fft_buf[1][pos + j]);
+ y_fft[1][j] += MulIm(x_fft_buf[0][xPos + j],
+ x_fft_buf[1][xPos + j],
+ h_fft_buf[0][pos + j],
+ h_fft_buf[1][pos + j]);
}
}
}
-static void ScaleErrorSignal(AecCore* aec, float ef[2][PART_LEN1]) {
- const float mu = aec->extended_filter_enabled ? kExtendedMu : aec->normal_mu;
- const float error_threshold = aec->extended_filter_enabled
+static void ScaleErrorSignal(int extended_filter_enabled,
+ float normal_mu,
+ float normal_error_threshold,
+ float x_pow[PART_LEN1],
+ float ef[2][PART_LEN1]) {
+ const float mu = extended_filter_enabled ? kExtendedMu : normal_mu;
+ const float error_threshold = extended_filter_enabled
? kExtendedErrorThreshold
- : aec->normal_error_threshold;
+ : normal_error_threshold;
int i;
float abs_ef;
for (i = 0; i < (PART_LEN1); i++) {
- ef[0][i] /= (aec->xPow[i] + 1e-10f);
- ef[1][i] /= (aec->xPow[i] + 1e-10f);
+ ef[0][i] /= (x_pow[i] + 1e-10f);
+ ef[1][i] /= (x_pow[i] + 1e-10f);
abs_ef = sqrtf(ef[0][i] * ef[0][i] + ef[1][i] * ef[1][i]);
if (abs_ef > error_threshold) {
@@ -199,59 +210,40 @@ static void ScaleErrorSignal(AecCore* aec, float ef[2][PART_LEN1]) {
}
}
-// Time-unconstrined filter adaptation.
-// TODO(andrew): consider for a low-complexity mode.
-// static void FilterAdaptationUnconstrained(AecCore* aec, float *fft,
-// float ef[2][PART_LEN1]) {
-// int i, j;
-// for (i = 0; i < aec->num_partitions; i++) {
-// int xPos = (i + aec->xfBufBlockPos)*(PART_LEN1);
-// int pos;
-// // Check for wrap
-// if (i + aec->xfBufBlockPos >= aec->num_partitions) {
-// xPos -= aec->num_partitions * PART_LEN1;
-// }
-//
-// pos = i * PART_LEN1;
-//
-// for (j = 0; j < PART_LEN1; j++) {
-// aec->wfBuf[0][pos + j] += MulRe(aec->xfBuf[0][xPos + j],
-// -aec->xfBuf[1][xPos + j],
-// ef[0][j], ef[1][j]);
-// aec->wfBuf[1][pos + j] += MulIm(aec->xfBuf[0][xPos + j],
-// -aec->xfBuf[1][xPos + j],
-// ef[0][j], ef[1][j]);
-// }
-// }
-//}
-
-static void FilterAdaptation(AecCore* aec, float* fft, float ef[2][PART_LEN1]) {
+
+static void FilterAdaptation(
+ int num_partitions,
+ int x_fft_buf_block_pos,
+ float x_fft_buf[2][kExtendedNumPartitions * PART_LEN1],
+ float e_fft[2][PART_LEN1],
+ float h_fft_buf[2][kExtendedNumPartitions * PART_LEN1]) {
int i, j;
- for (i = 0; i < aec->num_partitions; i++) {
- int xPos = (i + aec->xfBufBlockPos) * (PART_LEN1);
+ float fft[PART_LEN2];
+ for (i = 0; i < num_partitions; i++) {
+ int xPos = (i + x_fft_buf_block_pos) * (PART_LEN1);
int pos;
// Check for wrap
- if (i + aec->xfBufBlockPos >= aec->num_partitions) {
- xPos -= aec->num_partitions * PART_LEN1;
+ if (i + x_fft_buf_block_pos >= num_partitions) {
+ xPos -= num_partitions * PART_LEN1;
}
pos = i * PART_LEN1;
for (j = 0; j < PART_LEN; j++) {
- fft[2 * j] = MulRe(aec->xfBuf[0][xPos + j],
- -aec->xfBuf[1][xPos + j],
- ef[0][j],
- ef[1][j]);
- fft[2 * j + 1] = MulIm(aec->xfBuf[0][xPos + j],
- -aec->xfBuf[1][xPos + j],
- ef[0][j],
- ef[1][j]);
+ fft[2 * j] = MulRe(x_fft_buf[0][xPos + j],
+ -x_fft_buf[1][xPos + j],
+ e_fft[0][j],
+ e_fft[1][j]);
+ fft[2 * j + 1] = MulIm(x_fft_buf[0][xPos + j],
+ -x_fft_buf[1][xPos + j],
+ e_fft[0][j],
+ e_fft[1][j]);
}
- fft[1] = MulRe(aec->xfBuf[0][xPos + PART_LEN],
- -aec->xfBuf[1][xPos + PART_LEN],
- ef[0][PART_LEN],
- ef[1][PART_LEN]);
+ fft[1] = MulRe(x_fft_buf[0][xPos + PART_LEN],
+ -x_fft_buf[1][xPos + PART_LEN],
+ e_fft[0][PART_LEN],
+ e_fft[1][PART_LEN]);
aec_rdft_inverse_128(fft);
memset(fft + PART_LEN, 0, sizeof(float) * PART_LEN);
@@ -265,12 +257,12 @@ static void FilterAdaptation(AecCore* aec, float* fft, float ef[2][PART_LEN1]) {
}
aec_rdft_forward_128(fft);
- aec->wfBuf[0][pos] += fft[0];
- aec->wfBuf[0][pos + PART_LEN] += fft[1];
+ h_fft_buf[0][pos] += fft[0];
+ h_fft_buf[0][pos + PART_LEN] += fft[1];
for (j = 1; j < PART_LEN; j++) {
- aec->wfBuf[0][pos + j] += fft[2 * j];
- aec->wfBuf[1][pos + j] += fft[2 * j + 1];
+ h_fft_buf[0][pos + j] += fft[2 * j];
+ h_fft_buf[1][pos + j] += fft[2 * j + 1];
}
}
}
@@ -334,12 +326,13 @@ const float WebRtcAec_kMinFarendPSD = 15;
// - sde : cross-PSD of near-end and residual echo
// - sxd : cross-PSD of near-end and far-end
//
-// In addition to updating the PSDs, also the filter diverge state is determined
-// upon actions are taken.
+// In addition to updating the PSDs, also the filter diverge state is
+// determined.
static void SmoothedPSD(AecCore* aec,
float efw[2][PART_LEN1],
float dfw[2][PART_LEN1],
- float xfw[2][PART_LEN1]) {
+ float xfw[2][PART_LEN1],
+ int* extreme_filter_divergence) {
// Power estimate smoothing coefficients.
const float* ptrGCoh = aec->extended_filter_enabled
? WebRtcAec_kExtendedSmoothingCoefficients[aec->mult - 1]
@@ -380,15 +373,12 @@ static void SmoothedPSD(AecCore* aec,
seSum += aec->se[i];
}
- // Divergent filter safeguard.
+ // Divergent filter safeguard update.
aec->divergeState = (aec->divergeState ? 1.05f : 1.0f) * seSum > sdSum;
- if (aec->divergeState)
- memcpy(efw, dfw, sizeof(efw[0][0]) * 2 * PART_LEN1);
-
- // Reset if error is significantly larger than nearend (13 dB).
- if (!aec->extended_filter_enabled && seSum > (19.95f * sdSum))
- memset(aec->wfBuf, 0, sizeof(aec->wfBuf));
+ // Signal extreme filter divergence if the error is significantly larger
+ // than the nearend (13 dB).
+ *extreme_filter_divergence = (seSum > (19.95f * sdSum));
}
// Window time domain data to be used by the fft.
@@ -417,32 +407,15 @@ __inline static void StoreAsComplex(const float* data,
static void SubbandCoherence(AecCore* aec,
float efw[2][PART_LEN1],
+ float dfw[2][PART_LEN1],
float xfw[2][PART_LEN1],
float* fft,
float* cohde,
- float* cohxd) {
- float dfw[2][PART_LEN1];
+ float* cohxd,
+ int* extreme_filter_divergence) {
int i;
- if (aec->delayEstCtr == 0)
- aec->delayIdx = PartitionDelay(aec);
-
- // Use delayed far.
- memcpy(xfw,
- aec->xfwBuf + aec->delayIdx * PART_LEN1,
- sizeof(xfw[0][0]) * 2 * PART_LEN1);
-
- // Windowed near fft
- WindowData(fft, aec->dBuf);
- aec_rdft_forward_128(fft);
- StoreAsComplex(fft, dfw);
-
- // Windowed error fft
- WindowData(fft, aec->eBuf);
- aec_rdft_forward_128(fft);
- StoreAsComplex(fft, efw);
-
- SmoothedPSD(aec, efw, dfw, xfw);
+ SmoothedPSD(aec, efw, dfw, xfw, extreme_filter_divergence);
// Subband coherence
for (i = 0; i < PART_LEN1; i++) {
@@ -458,23 +431,23 @@ static void SubbandCoherence(AecCore* aec,
static void GetHighbandGain(const float* lambda, float* nlpGainHband) {
int i;
- nlpGainHband[0] = (float)0.0;
+ *nlpGainHband = (float)0.0;
for (i = freqAvgIc; i < PART_LEN1 - 1; i++) {
- nlpGainHband[0] += lambda[i];
+ *nlpGainHband += lambda[i];
}
- nlpGainHband[0] /= (float)(PART_LEN1 - 1 - freqAvgIc);
+ *nlpGainHband /= (float)(PART_LEN1 - 1 - freqAvgIc);
}
static void ComfortNoise(AecCore* aec,
float efw[2][PART_LEN1],
- complex_t* comfortNoiseHband,
+ float comfortNoiseHband[2][PART_LEN1],
const float* noisePow,
const float* lambda) {
int i, num;
float rand[PART_LEN];
float noise, noiseAvg, tmp, tmpAvg;
int16_t randW16[PART_LEN];
- complex_t u[PART_LEN1];
+ float u[2][PART_LEN1];
const float pi2 = 6.28318530717959f;
@@ -486,22 +459,22 @@ static void ComfortNoise(AecCore* aec,
// Reject LF noise
u[0][0] = 0;
- u[0][1] = 0;
+ u[1][0] = 0;
for (i = 1; i < PART_LEN1; i++) {
tmp = pi2 * rand[i - 1];
noise = sqrtf(noisePow[i]);
- u[i][0] = noise * cosf(tmp);
- u[i][1] = -noise * sinf(tmp);
+ u[0][i] = noise * cosf(tmp);
+ u[1][i] = -noise * sinf(tmp);
}
- u[PART_LEN][1] = 0;
+ u[1][PART_LEN] = 0;
for (i = 0; i < PART_LEN1; i++) {
// This is the proper weighting to match the background noise power
tmp = sqrtf(WEBRTC_SPL_MAX(1 - lambda[i] * lambda[i], 0));
// tmp = 1 - lambda[i];
- efw[0][i] += tmp * u[i][0];
- efw[1][i] += tmp * u[i][1];
+ efw[0][i] += tmp * u[0][i];
+ efw[1][i] += tmp * u[1][i];
}
// For H band comfort noise
@@ -509,7 +482,7 @@ static void ComfortNoise(AecCore* aec,
noiseAvg = 0.0;
tmpAvg = 0.0;
num = 0;
- if (aec->num_bands > 1 && flagHbandCn == 1) {
+ if (aec->num_bands > 1) {
// average noise scale
// average over second half of freq spectrum (i.e., 4->8khz)
@@ -534,21 +507,24 @@ static void ComfortNoise(AecCore* aec,
// TODO: we should probably have a new random vector here.
// Reject LF noise
u[0][0] = 0;
- u[0][1] = 0;
+ u[1][0] = 0;
for (i = 1; i < PART_LEN1; i++) {
tmp = pi2 * rand[i - 1];
// Use average noise for H band
- u[i][0] = noiseAvg * (float)cos(tmp);
- u[i][1] = -noiseAvg * (float)sin(tmp);
+ u[0][i] = noiseAvg * (float)cos(tmp);
+ u[1][i] = -noiseAvg * (float)sin(tmp);
}
- u[PART_LEN][1] = 0;
+ u[1][PART_LEN] = 0;
for (i = 0; i < PART_LEN1; i++) {
// Use average NLP weight for H band
- comfortNoiseHband[i][0] = tmpAvg * u[i][0];
- comfortNoiseHband[i][1] = tmpAvg * u[i][1];
+ comfortNoiseHband[0][i] = tmpAvg * u[0][i];
+ comfortNoiseHband[1][i] = tmpAvg * u[1][i];
}
+ } else {
+ memset(comfortNoiseHband, 0,
+ 2 * PART_LEN1 * sizeof(comfortNoiseHband[0][0]));
}
}
@@ -837,21 +813,29 @@ static void UpdateDelayMetrics(AecCore* self) {
return;
}
-static void TimeToFrequency(float time_data[PART_LEN2],
- float freq_data[2][PART_LEN1],
- int window) {
- int i = 0;
-
- // TODO(bjornv): Should we have a different function/wrapper for windowed FFT?
- if (window) {
- for (i = 0; i < PART_LEN; i++) {
- time_data[i] *= WebRtcAec_sqrtHanning[i];
- time_data[PART_LEN + i] *= WebRtcAec_sqrtHanning[PART_LEN - i];
- }
+static void ScaledInverseFft(float freq_data[2][PART_LEN1],
+ float time_data[PART_LEN2],
+ float scale,
+ int conjugate) {
+ int i;
+ const float normalization = scale / ((float)PART_LEN2);
+ const float sign = (conjugate ? -1 : 1);
+ time_data[0] = freq_data[0][0] * normalization;
+ time_data[1] = freq_data[0][PART_LEN] * normalization;
+ for (i = 1; i < PART_LEN; i++) {
+ time_data[2 * i] = freq_data[0][i] * normalization;
+ time_data[2 * i + 1] = sign * freq_data[1][i] * normalization;
}
+ aec_rdft_inverse_128(time_data);
+}
+
+static void Fft(float time_data[PART_LEN2],
+ float freq_data[2][PART_LEN1]) {
+ int i;
aec_rdft_forward_128(time_data);
- // Reorder.
+
+ // Reorder fft output data.
freq_data[1][0] = 0;
freq_data[1][PART_LEN] = 0;
freq_data[0][0] = time_data[0];
@@ -862,13 +846,6 @@ static void TimeToFrequency(float time_data[PART_LEN2],
}
}
-static int MoveFarReadPtrWithoutSystemDelayUpdate(AecCore* self, int elements) {
- WebRtc_MoveReadPtr(self->far_buf_windowed, elements);
-#ifdef WEBRTC_AEC_DEBUG_DUMP
- WebRtc_MoveReadPtr(self->far_time_buf, elements);
-#endif
- return WebRtc_MoveReadPtr(self->far_buf, elements);
-}
static int SignalBasedDelayCorrection(AecCore* self) {
int delay_correction = 0;
@@ -909,7 +886,7 @@ static int SignalBasedDelayCorrection(AecCore* self) {
const int upper_bound = self->num_partitions * 3 / 4;
const int do_correction = delay <= lower_bound || delay > upper_bound;
if (do_correction == 1) {
- int available_read = (int)WebRtc_available_read(self->far_buf);
+ int available_read = (int)WebRtc_available_read(self->far_time_buf);
// With |shift_offset| we gradually rely on the delay estimates. For
// positive delays we reduce the correction by |shift_offset| to lower the
// risk of pushing the AEC into a non causal state. For negative delays
@@ -942,13 +919,94 @@ static int SignalBasedDelayCorrection(AecCore* self) {
return delay_correction;
}
-static void NonLinearProcessing(AecCore* aec,
- float* output,
- float* const* outputH) {
- float efw[2][PART_LEN1], xfw[2][PART_LEN1];
- complex_t comfortNoiseHband[PART_LEN1];
+static void EchoSubtraction(
+ AecCore* aec,
+ int num_partitions,
+ int x_fft_buf_block_pos,
+ int metrics_mode,
+ int extended_filter_enabled,
+ float normal_mu,
+ float normal_error_threshold,
+ float x_fft_buf[2][kExtendedNumPartitions * PART_LEN1],
+ float* const y,
+ float x_pow[PART_LEN1],
+ float h_fft_buf[2][kExtendedNumPartitions * PART_LEN1],
+ PowerLevel* linout_level,
+ float echo_subtractor_output[PART_LEN]) {
+ float s_fft[2][PART_LEN1];
+ float e_extended[PART_LEN2];
+ float s_extended[PART_LEN2];
+ float *s;
+ float e[PART_LEN];
+ float e_fft[2][PART_LEN1];
+ int i;
+ memset(s_fft, 0, sizeof(s_fft));
+
+ // Conditionally reset the echo subtraction filter if the filter has diverged
+ // significantly.
+ if (!aec->extended_filter_enabled &&
+ aec->extreme_filter_divergence) {
+ memset(aec->wfBuf, 0, sizeof(aec->wfBuf));
+ aec->extreme_filter_divergence = 0;
+ }
+
+ // Produce echo estimate s_fft.
+ WebRtcAec_FilterFar(num_partitions,
+ x_fft_buf_block_pos,
+ x_fft_buf,
+ h_fft_buf,
+ s_fft);
+
+ // Compute the time-domain echo estimate s.
+ ScaledInverseFft(s_fft, s_extended, 2.0f, 0);
+ s = &s_extended[PART_LEN];
+
+ // Compute the time-domain echo prediction error.
+ for (i = 0; i < PART_LEN; ++i) {
+ e[i] = y[i] - s[i];
+ }
+
+ // Compute the frequency domain echo prediction error.
+ memset(e_extended, 0, sizeof(float) * PART_LEN);
+ memcpy(e_extended + PART_LEN, e, sizeof(float) * PART_LEN);
+ Fft(e_extended, e_fft);
+
+ RTC_AEC_DEBUG_RAW_WRITE(aec->e_fft_file,
+ &e_fft[0][0],
+ sizeof(e_fft[0][0]) * PART_LEN1 * 2);
+
+ if (metrics_mode == 1) {
+ // Note that the first PART_LEN samples in fft (before transformation) are
+ // zero. Hence, the scaling by two in UpdateLevel() should not be
+ // performed. That scaling is taken care of in UpdateMetrics() instead.
+ UpdateLevel(linout_level, e_fft);
+ }
+
+ // Scale error signal inversely with far power.
+ WebRtcAec_ScaleErrorSignal(extended_filter_enabled,
+ normal_mu,
+ normal_error_threshold,
+ x_pow,
+ e_fft);
+ WebRtcAec_FilterAdaptation(num_partitions,
+ x_fft_buf_block_pos,
+ x_fft_buf,
+ e_fft,
+ h_fft_buf);
+ memcpy(echo_subtractor_output, e, sizeof(float) * PART_LEN);
+}
+
+
+static void EchoSuppression(AecCore* aec,
+ float farend[PART_LEN2],
+ float* echo_subtractor_output,
+ float* output,
+ float* const* outputH) {
+ float efw[2][PART_LEN1];
+ float xfw[2][PART_LEN1];
+ float dfw[2][PART_LEN1];
+ float comfortNoiseHband[2][PART_LEN1];
float fft[PART_LEN2];
- float scale, dtmp;
float nlpGainHband;
int i;
size_t j;
@@ -972,27 +1030,51 @@ static void NonLinearProcessing(AecCore* aec,
float* xfw_ptr = NULL;
- aec->delayEstCtr++;
- if (aec->delayEstCtr == delayEstInterval) {
- aec->delayEstCtr = 0;
- }
+ // Update eBuf with echo subtractor output.
+ memcpy(aec->eBuf + PART_LEN,
+ echo_subtractor_output,
+ sizeof(float) * PART_LEN);
- // initialize comfort noise for H band
- memset(comfortNoiseHband, 0, sizeof(comfortNoiseHband));
- nlpGainHband = (float)0.0;
- dtmp = (float)0.0;
+ // Analysis filter banks for the echo suppressor.
+ // Windowed near-end ffts.
+ WindowData(fft, aec->dBuf);
+ aec_rdft_forward_128(fft);
+ StoreAsComplex(fft, dfw);
+
+ // Windowed echo suppressor output ffts.
+ WindowData(fft, aec->eBuf);
+ aec_rdft_forward_128(fft);
+ StoreAsComplex(fft, efw);
- // We should always have at least one element stored in |far_buf|.
- assert(WebRtc_available_read(aec->far_buf_windowed) > 0);
// NLP
- WebRtc_ReadBuffer(aec->far_buf_windowed, (void**)&xfw_ptr, &xfw[0][0], 1);
- // TODO(bjornv): Investigate if we can reuse |far_buf_windowed| instead of
- // |xfwBuf|.
+ // Convert far-end partition to the frequency domain with windowing.
+ WindowData(fft, farend);
+ Fft(fft, xfw);
+ xfw_ptr = &xfw[0][0];
+
// Buffer far.
memcpy(aec->xfwBuf, xfw_ptr, sizeof(float) * 2 * PART_LEN1);
- WebRtcAec_SubbandCoherence(aec, efw, xfw, fft, cohde, cohxd);
+ aec->delayEstCtr++;
+ if (aec->delayEstCtr == delayEstInterval) {
+ aec->delayEstCtr = 0;
+ aec->delayIdx = WebRtcAec_PartitionDelay(aec);
+ }
+
+ // Use delayed far.
+ memcpy(xfw,
+ aec->xfwBuf + aec->delayIdx * PART_LEN1,
+ sizeof(xfw[0][0]) * 2 * PART_LEN1);
+
+ WebRtcAec_SubbandCoherence(aec, efw, dfw, xfw, fft, cohde, cohxd,
+ &aec->extreme_filter_divergence);
+
+ // Select the microphone signal as output if the filter is deemed to have
+ // diverged.
+ if (aec->divergeState) {
+ memcpy(efw, dfw, sizeof(efw[0][0]) * 2 * PART_LEN1);
+ }
hNlXdAvg = 0;
for (i = minPrefBand; i < prefBandSize + minPrefBand; i++) {
@@ -1098,67 +1180,51 @@ static void NonLinearProcessing(AecCore* aec,
// scaling only in UpdateMetrics().
UpdateLevel(&aec->nlpoutlevel, efw);
}
+
// Inverse error fft.
- fft[0] = efw[0][0];
- fft[1] = efw[0][PART_LEN];
- for (i = 1; i < PART_LEN; i++) {
- fft[2 * i] = efw[0][i];
- // Sign change required by Ooura fft.
- fft[2 * i + 1] = -efw[1][i];
- }
- aec_rdft_inverse_128(fft);
+ ScaledInverseFft(efw, fft, 2.0f, 1);
// Overlap and add to obtain output.
- scale = 2.0f / PART_LEN2;
for (i = 0; i < PART_LEN; i++) {
- fft[i] *= scale; // fft scaling
- fft[i] = fft[i] * WebRtcAec_sqrtHanning[i] + aec->outBuf[i];
-
- fft[PART_LEN + i] *= scale; // fft scaling
- aec->outBuf[i] = fft[PART_LEN + i] * WebRtcAec_sqrtHanning[PART_LEN - i];
+ output[i] = (fft[i] * WebRtcAec_sqrtHanning[i] +
+ aec->outBuf[i] * WebRtcAec_sqrtHanning[PART_LEN - i]);
// Saturate output to keep it in the allowed range.
output[i] = WEBRTC_SPL_SAT(
- WEBRTC_SPL_WORD16_MAX, fft[i], WEBRTC_SPL_WORD16_MIN);
+ WEBRTC_SPL_WORD16_MAX, output[i], WEBRTC_SPL_WORD16_MIN);
}
+ memcpy(aec->outBuf, &fft[PART_LEN], PART_LEN * sizeof(aec->outBuf[0]));
// For H band
if (aec->num_bands > 1) {
-
// H band gain
// average nlp over low band: average over second half of freq spectrum
// (4->8khz)
GetHighbandGain(hNl, &nlpGainHband);
// Inverse comfort_noise
- if (flagHbandCn == 1) {
- fft[0] = comfortNoiseHband[0][0];
- fft[1] = comfortNoiseHband[PART_LEN][0];
- for (i = 1; i < PART_LEN; i++) {
- fft[2 * i] = comfortNoiseHband[i][0];
- fft[2 * i + 1] = comfortNoiseHband[i][1];
- }
- aec_rdft_inverse_128(fft);
- scale = 2.0f / PART_LEN2;
- }
+ ScaledInverseFft(comfortNoiseHband, fft, 2.0f, 0);
// compute gain factor
for (j = 0; j < aec->num_bands - 1; ++j) {
for (i = 0; i < PART_LEN; i++) {
- dtmp = aec->dBufH[j][i];
- dtmp = dtmp * nlpGainHband; // for variable gain
+ outputH[j][i] = aec->dBufH[j][i] * nlpGainHband;
+ }
+ }
- // add some comfort noise where Hband is attenuated
- if (flagHbandCn == 1 && j == 0) {
- fft[i] *= scale; // fft scaling
- dtmp += cnScaleHband * fft[i];
- }
+ // Add some comfort noise where Hband is attenuated.
+ for (i = 0; i < PART_LEN; i++) {
+ outputH[0][i] += cnScaleHband * fft[i];
+ }
- // Saturate output to keep it in the allowed range.
+ // Saturate output to keep it in the allowed range.
+ for (j = 0; j < aec->num_bands - 1; ++j) {
+ for (i = 0; i < PART_LEN; i++) {
outputH[j][i] = WEBRTC_SPL_SAT(
- WEBRTC_SPL_WORD16_MAX, dtmp, WEBRTC_SPL_WORD16_MIN);
+ WEBRTC_SPL_WORD16_MAX, outputH[j][i], WEBRTC_SPL_WORD16_MIN);
}
}
+
}
// Copy the current block to the old position.
@@ -1177,11 +1243,9 @@ static void NonLinearProcessing(AecCore* aec,
static void ProcessBlock(AecCore* aec) {
size_t i;
- float y[PART_LEN], e[PART_LEN];
- float scale;
float fft[PART_LEN2];
- float xf[2][PART_LEN1], yf[2][PART_LEN1], ef[2][PART_LEN1];
+ float xf[2][PART_LEN1];
float df[2][PART_LEN1];
float far_spectrum = 0.0f;
float near_spectrum = 0.0f;
@@ -1198,15 +1262,18 @@ static void ProcessBlock(AecCore* aec) {
float nearend[PART_LEN];
float* nearend_ptr = NULL;
+ float farend[PART_LEN2];
+ float* farend_ptr = NULL;
+ float echo_subtractor_output[PART_LEN];
float output[PART_LEN];
float outputH[NUM_HIGH_BANDS_MAX][PART_LEN];
float* outputH_ptr[NUM_HIGH_BANDS_MAX];
+ float* xf_ptr = NULL;
+
for (i = 0; i < NUM_HIGH_BANDS_MAX; ++i) {
outputH_ptr[i] = outputH[i];
}
- float* xf_ptr = NULL;
-
// Concatenate old and new nearend blocks.
for (i = 0; i < aec->num_bands - 1; ++i) {
WebRtc_ReadBuffer(aec->nearFrBufH[i],
@@ -1218,25 +1285,28 @@ static void ProcessBlock(AecCore* aec) {
WebRtc_ReadBuffer(aec->nearFrBuf, (void**)&nearend_ptr, nearend, PART_LEN);
memcpy(aec->dBuf + PART_LEN, nearend_ptr, sizeof(nearend));
- // ---------- Ooura fft ----------
+ // We should always have at least one element stored in |far_buf|.
+ assert(WebRtc_available_read(aec->far_time_buf) > 0);
+ WebRtc_ReadBuffer(aec->far_time_buf, (void**)&farend_ptr, farend, 1);
#ifdef WEBRTC_AEC_DEBUG_DUMP
{
- float farend[PART_LEN];
- float* farend_ptr = NULL;
- WebRtc_ReadBuffer(aec->far_time_buf, (void**)&farend_ptr, farend, 1);
- RTC_AEC_DEBUG_WAV_WRITE(aec->farFile, farend_ptr, PART_LEN);
+ // TODO(minyue): |farend_ptr| starts from buffered samples. This will be
+ // modified when |aec->far_time_buf| is revised.
+ RTC_AEC_DEBUG_WAV_WRITE(aec->farFile, &farend_ptr[PART_LEN], PART_LEN);
+
RTC_AEC_DEBUG_WAV_WRITE(aec->nearFile, nearend_ptr, PART_LEN);
}
#endif
- // We should always have at least one element stored in |far_buf|.
- assert(WebRtc_available_read(aec->far_buf) > 0);
- WebRtc_ReadBuffer(aec->far_buf, (void**)&xf_ptr, &xf[0][0], 1);
+ // Convert far-end signal to the frequency domain.
+ memcpy(fft, farend_ptr, sizeof(float) * PART_LEN2);
+ Fft(fft, xf);
+ xf_ptr = &xf[0][0];
// Near fft
memcpy(fft, aec->dBuf, sizeof(float) * PART_LEN2);
- TimeToFrequency(fft, df, 0);
+ Fft(fft, df);
// Power smoothing
for (i = 0; i < PART_LEN1; i++) {
@@ -1314,60 +1384,25 @@ static void ProcessBlock(AecCore* aec) {
&xf_ptr[PART_LEN1],
sizeof(float) * PART_LEN1);
- memset(yf, 0, sizeof(yf));
-
- // Filter far
- WebRtcAec_FilterFar(aec, yf);
-
- // Inverse fft to obtain echo estimate and error.
- fft[0] = yf[0][0];
- fft[1] = yf[0][PART_LEN];
- for (i = 1; i < PART_LEN; i++) {
- fft[2 * i] = yf[0][i];
- fft[2 * i + 1] = yf[1][i];
- }
- aec_rdft_inverse_128(fft);
-
- scale = 2.0f / PART_LEN2;
- for (i = 0; i < PART_LEN; i++) {
- y[i] = fft[PART_LEN + i] * scale; // fft scaling
- }
-
- for (i = 0; i < PART_LEN; i++) {
- e[i] = nearend_ptr[i] - y[i];
- }
-
- // Error fft
- memcpy(aec->eBuf + PART_LEN, e, sizeof(float) * PART_LEN);
- memset(fft, 0, sizeof(float) * PART_LEN);
- memcpy(fft + PART_LEN, e, sizeof(float) * PART_LEN);
- // TODO(bjornv): Change to use TimeToFrequency().
- aec_rdft_forward_128(fft);
-
- ef[1][0] = 0;
- ef[1][PART_LEN] = 0;
- ef[0][0] = fft[0];
- ef[0][PART_LEN] = fft[1];
- for (i = 1; i < PART_LEN; i++) {
- ef[0][i] = fft[2 * i];
- ef[1][i] = fft[2 * i + 1];
- }
-
- RTC_AEC_DEBUG_RAW_WRITE(aec->e_fft_file,
- &ef[0][0],
- sizeof(ef[0][0]) * PART_LEN1 * 2);
-
- if (aec->metricsMode == 1) {
- // Note that the first PART_LEN samples in fft (before transformation) are
- // zero. Hence, the scaling by two in UpdateLevel() should not be
- // performed. That scaling is taken care of in UpdateMetrics() instead.
- UpdateLevel(&aec->linoutlevel, ef);
- }
-
- // Scale error signal inversely with far power.
- WebRtcAec_ScaleErrorSignal(aec, ef);
- WebRtcAec_FilterAdaptation(aec, fft, ef);
- NonLinearProcessing(aec, output, outputH_ptr);
+ // Perform echo subtraction.
+ EchoSubtraction(aec,
+ aec->num_partitions,
+ aec->xfBufBlockPos,
+ aec->metricsMode,
+ aec->extended_filter_enabled,
+ aec->normal_mu,
+ aec->normal_error_threshold,
+ aec->xfBuf,
+ nearend_ptr,
+ aec->xPow,
+ aec->wfBuf,
+ &aec->linoutlevel,
+ echo_subtractor_output);
+
+ RTC_AEC_DEBUG_WAV_WRITE(aec->outLinearFile, echo_subtractor_output, PART_LEN);
+
+ // Perform echo suppression.
+ EchoSuppression(aec, farend_ptr, echo_subtractor_output, output, outputH_ptr);
if (aec->metricsMode == 1) {
// Update power levels and echo metrics
@@ -1383,7 +1418,6 @@ static void ProcessBlock(AecCore* aec) {
WebRtc_WriteBuffer(aec->outFrBufH[i], outputH[i], PART_LEN);
}
- RTC_AEC_DEBUG_WAV_WRITE(aec->outLinearFile, e, PART_LEN);
RTC_AEC_DEBUG_WAV_WRITE(aec->outFile, output, PART_LEN);
}
@@ -1422,26 +1456,20 @@ AecCore* WebRtcAec_CreateAec() {
}
// Create far-end buffers.
- aec->far_buf =
- WebRtc_CreateBuffer(kBufSizePartitions, sizeof(float) * 2 * PART_LEN1);
- if (!aec->far_buf) {
- WebRtcAec_FreeAec(aec);
- return NULL;
- }
- aec->far_buf_windowed =
- WebRtc_CreateBuffer(kBufSizePartitions, sizeof(float) * 2 * PART_LEN1);
- if (!aec->far_buf_windowed) {
- WebRtcAec_FreeAec(aec);
- return NULL;
- }
-#ifdef WEBRTC_AEC_DEBUG_DUMP
- aec->instance_index = webrtc_aec_instance_count;
+ // For bit exactness with legacy code, each element in |far_time_buf| is
+ // supposed to contain |PART_LEN2| samples with an overlap of |PART_LEN|
+ // samples from the last frame.
+ // TODO(minyue): reduce |far_time_buf| to non-overlapped |PART_LEN| samples.
aec->far_time_buf =
- WebRtc_CreateBuffer(kBufSizePartitions, sizeof(float) * PART_LEN);
+ WebRtc_CreateBuffer(kBufSizePartitions, sizeof(float) * PART_LEN2);
if (!aec->far_time_buf) {
WebRtcAec_FreeAec(aec);
return NULL;
}
+
+#ifdef WEBRTC_AEC_DEBUG_DUMP
+ aec->instance_index = webrtc_aec_instance_count;
+
aec->farFile = aec->nearFile = aec->outFile = aec->outLinearFile = NULL;
aec->debug_dump_count = 0;
#endif
@@ -1477,6 +1505,10 @@ AecCore* WebRtcAec_CreateAec() {
WebRtcAec_OverdriveAndSuppress = OverdriveAndSuppress;
WebRtcAec_ComfortNoise = ComfortNoise;
WebRtcAec_SubbandCoherence = SubbandCoherence;
+ WebRtcAec_StoreAsComplex = StoreAsComplex;
+ WebRtcAec_PartitionDelay = PartitionDelay;
+ WebRtcAec_WindowData = WindowData;
+
#if defined(WEBRTC_ARCH_X86_FAMILY)
if (WebRtc_GetCPUInfo(kSSE2)) {
@@ -1515,11 +1547,8 @@ void WebRtcAec_FreeAec(AecCore* aec) {
WebRtc_FreeBuffer(aec->outFrBufH[i]);
}
- WebRtc_FreeBuffer(aec->far_buf);
- WebRtc_FreeBuffer(aec->far_buf_windowed);
-#ifdef WEBRTC_AEC_DEBUG_DUMP
WebRtc_FreeBuffer(aec->far_time_buf);
-#endif
+
RTC_AEC_DEBUG_WAV_CLOSE(aec->farFile);
RTC_AEC_DEBUG_WAV_CLOSE(aec->nearFile);
RTC_AEC_DEBUG_WAV_CLOSE(aec->outFile);
@@ -1555,10 +1584,9 @@ int WebRtcAec_InitAec(AecCore* aec, int sampFreq) {
}
// Initialize far-end buffers.
- WebRtc_InitBuffer(aec->far_buf);
- WebRtc_InitBuffer(aec->far_buf_windowed);
-#ifdef WEBRTC_AEC_DEBUG_DUMP
WebRtc_InitBuffer(aec->far_time_buf);
+
+#ifdef WEBRTC_AEC_DEBUG_DUMP
{
int process_rate = sampFreq > 16000 ? 16000 : sampFreq;
RTC_AEC_DEBUG_WAV_REOPEN("aec_far", aec->instance_index,
@@ -1693,6 +1721,8 @@ int WebRtcAec_InitAec(AecCore* aec, int sampFreq) {
aec->seed = 777;
aec->delayEstCtr = 0;
+ aec->extreme_filter_divergence = 0;
+
// Metrics disabled by default
aec->metricsMode = 0;
InitMetrics(aec);
@@ -1700,27 +1730,22 @@ int WebRtcAec_InitAec(AecCore* aec, int sampFreq) {
return 0;
}
-void WebRtcAec_BufferFarendPartition(AecCore* aec, const float* farend) {
- float fft[PART_LEN2];
- float xf[2][PART_LEN1];
+// For bit exactness with a legacy code, |farend| is supposed to contain
+// |PART_LEN2| samples with an overlap of |PART_LEN| samples from the last
+// frame.
+// TODO(minyue): reduce |farend| to non-overlapped |PART_LEN| samples.
+void WebRtcAec_BufferFarendPartition(AecCore* aec, const float* farend) {
// Check if the buffer is full, and in that case flush the oldest data.
- if (WebRtc_available_write(aec->far_buf) < 1) {
+ if (WebRtc_available_write(aec->far_time_buf) < 1) {
WebRtcAec_MoveFarReadPtr(aec, 1);
}
- // Convert far-end partition to the frequency domain without windowing.
- memcpy(fft, farend, sizeof(float) * PART_LEN2);
- TimeToFrequency(fft, xf, 0);
- WebRtc_WriteBuffer(aec->far_buf, &xf[0][0], 1);
- // Convert far-end partition to the frequency domain with windowing.
- memcpy(fft, farend, sizeof(float) * PART_LEN2);
- TimeToFrequency(fft, xf, 1);
- WebRtc_WriteBuffer(aec->far_buf_windowed, &xf[0][0], 1);
+ WebRtc_WriteBuffer(aec->far_time_buf, farend, 1);
}
int WebRtcAec_MoveFarReadPtr(AecCore* aec, int elements) {
- int elements_moved = MoveFarReadPtrWithoutSystemDelayUpdate(aec, elements);
+ int elements_moved = WebRtc_MoveReadPtr(aec->far_time_buf, elements);
aec->system_delay -= elements_moved * PART_LEN;
return elements_moved;
}
@@ -1794,14 +1819,14 @@ void WebRtcAec_ProcessFrames(AecCore* aec,
// rounding, like -16.
int move_elements = (aec->knownDelay - knownDelay - 32) / PART_LEN;
int moved_elements =
- MoveFarReadPtrWithoutSystemDelayUpdate(aec, move_elements);
+ WebRtc_MoveReadPtr(aec->far_time_buf, move_elements);
aec->knownDelay -= moved_elements * PART_LEN;
} else {
// 2 b) Apply signal based delay correction.
int move_elements = SignalBasedDelayCorrection(aec);
int moved_elements =
- MoveFarReadPtrWithoutSystemDelayUpdate(aec, move_elements);
- int far_near_buffer_diff = WebRtc_available_read(aec->far_buf) -
+ WebRtc_MoveReadPtr(aec->far_time_buf, move_elements);
+ int far_near_buffer_diff = WebRtc_available_read(aec->far_time_buf) -
WebRtc_available_read(aec->nearFrBuf) / PART_LEN;
WebRtc_SoftResetDelayEstimator(aec->delay_estimator, moved_elements);
WebRtc_SoftResetDelayEstimatorFarend(aec->delay_estimator_farend,
@@ -1880,10 +1905,6 @@ void WebRtcAec_GetEchoStats(AecCore* self,
*a_nlp = self->aNlp;
}
-#ifdef WEBRTC_AEC_DEBUG_DUMP
-void* WebRtcAec_far_time_buf(AecCore* self) { return self->far_time_buf; }
-#endif
-
void WebRtcAec_SetConfigCore(AecCore* self,
int nlp_mode,
int metrics_mode,
diff --git a/webrtc/modules/audio_processing/aec/aec_core_internal.h b/webrtc/modules/audio_processing/aec/aec_core_internal.h
index 2de028379b..3809c82567 100644
--- a/webrtc/modules/audio_processing/aec/aec_core_internal.h
+++ b/webrtc/modules/audio_processing/aec/aec_core_internal.h
@@ -95,8 +95,8 @@ struct AecCore {
int xfBufBlockPos;
- RingBuffer* far_buf;
- RingBuffer* far_buf_windowed;
+ RingBuffer* far_time_buf;
+
int system_delay; // Current system delay buffered in AEC.
int mult; // sampling frequency multiple
@@ -152,6 +152,10 @@ struct AecCore {
// Runtime selection of number of filter partitions.
int num_partitions;
+ // Flag that extreme filter divergence has been detected by the Echo
+ // Suppressor.
+ int extreme_filter_divergence;
+
#ifdef WEBRTC_AEC_DEBUG_DUMP
// Sequence number of this AEC instance, so that different instances can
// choose different dump file names.
@@ -161,7 +165,6 @@ struct AecCore {
// each time.
int debug_dump_count;
- RingBuffer* far_time_buf;
rtc_WavWriter* farFile;
rtc_WavWriter* nearFile;
rtc_WavWriter* outFile;
@@ -170,13 +173,25 @@ struct AecCore {
#endif
};
-typedef void (*WebRtcAecFilterFar)(AecCore* aec, float yf[2][PART_LEN1]);
+typedef void (*WebRtcAecFilterFar)(
+ int num_partitions,
+ int x_fft_buf_block_pos,
+ float x_fft_buf[2][kExtendedNumPartitions * PART_LEN1],
+ float h_fft_buf[2][kExtendedNumPartitions * PART_LEN1],
+ float y_fft[2][PART_LEN1]);
extern WebRtcAecFilterFar WebRtcAec_FilterFar;
-typedef void (*WebRtcAecScaleErrorSignal)(AecCore* aec, float ef[2][PART_LEN1]);
-extern WebRtcAecScaleErrorSignal WebRtcAec_ScaleErrorSignal;
-typedef void (*WebRtcAecFilterAdaptation)(AecCore* aec,
- float* fft,
+typedef void (*WebRtcAecScaleErrorSignal)(int extended_filter_enabled,
+ float normal_mu,
+ float normal_error_threshold,
+ float x_pow[PART_LEN1],
float ef[2][PART_LEN1]);
+extern WebRtcAecScaleErrorSignal WebRtcAec_ScaleErrorSignal;
+typedef void (*WebRtcAecFilterAdaptation)(
+ int num_partitions,
+ int x_fft_buf_block_pos,
+ float x_fft_buf[2][kExtendedNumPartitions * PART_LEN1],
+ float e_fft[2][PART_LEN1],
+ float h_fft_buf[2][kExtendedNumPartitions * PART_LEN1]);
extern WebRtcAecFilterAdaptation WebRtcAec_FilterAdaptation;
typedef void (*WebRtcAecOverdriveAndSuppress)(AecCore* aec,
float hNl[PART_LEN1],
@@ -186,17 +201,29 @@ extern WebRtcAecOverdriveAndSuppress WebRtcAec_OverdriveAndSuppress;
typedef void (*WebRtcAecComfortNoise)(AecCore* aec,
float efw[2][PART_LEN1],
- complex_t* comfortNoiseHband,
+ float comfortNoiseHband[2][PART_LEN1],
const float* noisePow,
const float* lambda);
extern WebRtcAecComfortNoise WebRtcAec_ComfortNoise;
typedef void (*WebRtcAecSubBandCoherence)(AecCore* aec,
float efw[2][PART_LEN1],
+ float dfw[2][PART_LEN1],
float xfw[2][PART_LEN1],
float* fft,
float* cohde,
- float* cohxd);
+ float* cohxd,
+ int* extreme_filter_divergence);
extern WebRtcAecSubBandCoherence WebRtcAec_SubbandCoherence;
+typedef int (*WebRtcAecPartitionDelay)(const AecCore* aec);
+extern WebRtcAecPartitionDelay WebRtcAec_PartitionDelay;
+
+typedef void (*WebRtcAecStoreAsComplex)(const float* data,
+ float data_complex[2][PART_LEN1]);
+extern WebRtcAecStoreAsComplex WebRtcAec_StoreAsComplex;
+
+typedef void (*WebRtcAecWindowData)(float* x_windowed, const float* x);
+extern WebRtcAecWindowData WebRtcAec_WindowData;
+
#endif // WEBRTC_MODULES_AUDIO_PROCESSING_AEC_AEC_CORE_INTERNAL_H_
diff --git a/webrtc/modules/audio_processing/aec/aec_core_mips.c b/webrtc/modules/audio_processing/aec/aec_core_mips.c
index bb33087aee..035a4b76af 100644
--- a/webrtc/modules/audio_processing/aec/aec_core_mips.c
+++ b/webrtc/modules/audio_processing/aec/aec_core_mips.c
@@ -20,13 +20,12 @@
#include "webrtc/modules/audio_processing/aec/aec_core_internal.h"
#include "webrtc/modules/audio_processing/aec/aec_rdft.h"
-static const int flagHbandCn = 1; // flag for adding comfort noise in H band
extern const float WebRtcAec_weightCurve[65];
extern const float WebRtcAec_overDriveCurve[65];
void WebRtcAec_ComfortNoise_mips(AecCore* aec,
float efw[2][PART_LEN1],
- complex_t* comfortNoiseHband,
+ float comfortNoiseHband[2][PART_LEN1],
const float* noisePow,
const float* lambda) {
int i, num;
@@ -274,7 +273,7 @@ void WebRtcAec_ComfortNoise_mips(AecCore* aec,
noiseAvg = 0.0;
tmpAvg = 0.0;
num = 0;
- if ((aec->sampFreq == 32000 || aec->sampFreq == 48000) && flagHbandCn == 1) {
+ if (aec->num_bands > 1) {
for (i = 0; i < PART_LEN; i++) {
rand[i] = ((float)randW16[i]) / 32768;
}
@@ -314,27 +313,35 @@ void WebRtcAec_ComfortNoise_mips(AecCore* aec,
for (i = 0; i < PART_LEN1; i++) {
// Use average NLP weight for H band
- comfortNoiseHband[i][0] = tmpAvg * u[i][0];
- comfortNoiseHband[i][1] = tmpAvg * u[i][1];
+ comfortNoiseHband[0][i] = tmpAvg * u[i][0];
+ comfortNoiseHband[1][i] = tmpAvg * u[i][1];
}
+ } else {
+ memset(comfortNoiseHband, 0,
+ 2 * PART_LEN1 * sizeof(comfortNoiseHband[0][0]));
}
}
-void WebRtcAec_FilterFar_mips(AecCore* aec, float yf[2][PART_LEN1]) {
+void WebRtcAec_FilterFar_mips(
+ int num_partitions,
+ int x_fft_buf_block_pos,
+ float x_fft_buf[2][kExtendedNumPartitions * PART_LEN1],
+ float h_fft_buf[2][kExtendedNumPartitions * PART_LEN1],
+ float y_fft[2][PART_LEN1]) {
int i;
- for (i = 0; i < aec->num_partitions; i++) {
- int xPos = (i + aec->xfBufBlockPos) * PART_LEN1;
+ for (i = 0; i < num_partitions; i++) {
+ int xPos = (i + x_fft_buf_block_pos) * PART_LEN1;
int pos = i * PART_LEN1;
// Check for wrap
- if (i + aec->xfBufBlockPos >= aec->num_partitions) {
- xPos -= aec->num_partitions * (PART_LEN1);
+ if (i + x_fft_buf_block_pos >= num_partitions) {
+ xPos -= num_partitions * (PART_LEN1);
}
- float* yf0 = yf[0];
- float* yf1 = yf[1];
- float* aRe = aec->xfBuf[0] + xPos;
- float* aIm = aec->xfBuf[1] + xPos;
- float* bRe = aec->wfBuf[0] + pos;
- float* bIm = aec->wfBuf[1] + pos;
+ float* yf0 = y_fft[0];
+ float* yf1 = y_fft[1];
+ float* aRe = x_fft_buf[0] + xPos;
+ float* aIm = x_fft_buf[1] + xPos;
+ float* bRe = h_fft_buf[0] + pos;
+ float* bIm = h_fft_buf[1] + pos;
float f0, f1, f2, f3, f4, f5, f6, f7, f8, f9, f10, f11, f12, f13;
int len = PART_LEN1 >> 1;
@@ -432,23 +439,27 @@ void WebRtcAec_FilterFar_mips(AecCore* aec, float yf[2][PART_LEN1]) {
}
}
-void WebRtcAec_FilterAdaptation_mips(AecCore* aec,
- float* fft,
- float ef[2][PART_LEN1]) {
+void WebRtcAec_FilterAdaptation_mips(
+ int num_partitions,
+ int x_fft_buf_block_pos,
+ float x_fft_buf[2][kExtendedNumPartitions * PART_LEN1],
+ float e_fft[2][PART_LEN1],
+ float h_fft_buf[2][kExtendedNumPartitions * PART_LEN1]) {
+ float fft[PART_LEN2];
int i;
- for (i = 0; i < aec->num_partitions; i++) {
- int xPos = (i + aec->xfBufBlockPos)*(PART_LEN1);
+ for (i = 0; i < num_partitions; i++) {
+ int xPos = (i + x_fft_buf_block_pos)*(PART_LEN1);
int pos;
// Check for wrap
- if (i + aec->xfBufBlockPos >= aec->num_partitions) {
- xPos -= aec->num_partitions * PART_LEN1;
+ if (i + x_fft_buf_block_pos >= num_partitions) {
+ xPos -= num_partitions * PART_LEN1;
}
pos = i * PART_LEN1;
- float* aRe = aec->xfBuf[0] + xPos;
- float* aIm = aec->xfBuf[1] + xPos;
- float* bRe = ef[0];
- float* bIm = ef[1];
+ float* aRe = x_fft_buf[0] + xPos;
+ float* aIm = x_fft_buf[1] + xPos;
+ float* bRe = e_fft[0];
+ float* bIm = e_fft[1];
float* fft_tmp;
float f0, f1, f2, f3, f4, f5, f6 ,f7, f8, f9, f10, f11, f12;
@@ -573,8 +584,8 @@ void WebRtcAec_FilterAdaptation_mips(AecCore* aec,
);
}
aec_rdft_forward_128(fft);
- aRe = aec->wfBuf[0] + pos;
- aIm = aec->wfBuf[1] + pos;
+ aRe = h_fft_buf[0] + pos;
+ aIm = h_fft_buf[1] + pos;
__asm __volatile (
".set push \n\t"
".set noreorder \n\t"
@@ -699,15 +710,18 @@ void WebRtcAec_OverdriveAndSuppress_mips(AecCore* aec,
}
}
-void WebRtcAec_ScaleErrorSignal_mips(AecCore* aec, float ef[2][PART_LEN1]) {
- const float mu = aec->extended_filter_enabled ? kExtendedMu : aec->normal_mu;
- const float error_threshold = aec->extended_filter_enabled
+void WebRtcAec_ScaleErrorSignal_mips(int extended_filter_enabled,
+ float normal_mu,
+ float normal_error_threshold,
+ float x_pow[PART_LEN1],
+ float ef[2][PART_LEN1]) {
+ const float mu = extended_filter_enabled ? kExtendedMu : normal_mu;
+ const float error_threshold = extended_filter_enabled
? kExtendedErrorThreshold
- : aec->normal_error_threshold;
+ : normal_error_threshold;
int len = (PART_LEN1);
float* ef0 = ef[0];
float* ef1 = ef[1];
- float* xPow = aec->xPow;
float fac1 = 1e-10f;
float err_th2 = error_threshold * error_threshold;
float f0, f1, f2;
@@ -719,7 +733,7 @@ void WebRtcAec_ScaleErrorSignal_mips(AecCore* aec, float ef[2][PART_LEN1]) {
".set push \n\t"
".set noreorder \n\t"
"1: \n\t"
- "lwc1 %[f0], 0(%[xPow]) \n\t"
+ "lwc1 %[f0], 0(%[x_pow]) \n\t"
"lwc1 %[f1], 0(%[ef0]) \n\t"
"lwc1 %[f2], 0(%[ef1]) \n\t"
"add.s %[f0], %[f0], %[fac1] \n\t"
@@ -747,7 +761,7 @@ void WebRtcAec_ScaleErrorSignal_mips(AecCore* aec, float ef[2][PART_LEN1]) {
"swc1 %[f1], 0(%[ef0]) \n\t"
"swc1 %[f2], 0(%[ef1]) \n\t"
"addiu %[len], %[len], -1 \n\t"
- "addiu %[xPow], %[xPow], 4 \n\t"
+ "addiu %[x_pow], %[x_pow], 4 \n\t"
"addiu %[ef0], %[ef0], 4 \n\t"
"bgtz %[len], 1b \n\t"
" addiu %[ef1], %[ef1], 4 \n\t"
@@ -756,7 +770,7 @@ void WebRtcAec_ScaleErrorSignal_mips(AecCore* aec, float ef[2][PART_LEN1]) {
#if !defined(MIPS32_R2_LE)
[f3] "=&f" (f3),
#endif
- [xPow] "+r" (xPow), [ef0] "+r" (ef0), [ef1] "+r" (ef1),
+ [x_pow] "+r" (x_pow), [ef0] "+r" (ef0), [ef1] "+r" (ef1),
[len] "+r" (len)
: [fac1] "f" (fac1), [err_th2] "f" (err_th2), [mu] "f" (mu),
[err_th] "f" (error_threshold)
@@ -771,4 +785,3 @@ void WebRtcAec_InitAec_mips(void) {
WebRtcAec_ComfortNoise = WebRtcAec_ComfortNoise_mips;
WebRtcAec_OverdriveAndSuppress = WebRtcAec_OverdriveAndSuppress_mips;
}
-
diff --git a/webrtc/modules/audio_processing/aec/aec_core_neon.c b/webrtc/modules/audio_processing/aec/aec_core_neon.c
index 9a677aaa67..7898ab2543 100644
--- a/webrtc/modules/audio_processing/aec/aec_core_neon.c
+++ b/webrtc/modules/audio_processing/aec/aec_core_neon.c
@@ -34,45 +34,49 @@ __inline static float MulIm(float aRe, float aIm, float bRe, float bIm) {
return aRe * bIm + aIm * bRe;
}
-static void FilterFarNEON(AecCore* aec, float yf[2][PART_LEN1]) {
+static void FilterFarNEON(
+ int num_partitions,
+ int x_fft_buf_block_pos,
+ float x_fft_buf[2][kExtendedNumPartitions * PART_LEN1],
+ float h_fft_buf[2][kExtendedNumPartitions * PART_LEN1],
+ float y_fft[2][PART_LEN1]) {
int i;
- const int num_partitions = aec->num_partitions;
for (i = 0; i < num_partitions; i++) {
int j;
- int xPos = (i + aec->xfBufBlockPos) * PART_LEN1;
+ int xPos = (i + x_fft_buf_block_pos) * PART_LEN1;
int pos = i * PART_LEN1;
// Check for wrap
- if (i + aec->xfBufBlockPos >= num_partitions) {
+ if (i + x_fft_buf_block_pos >= num_partitions) {
xPos -= num_partitions * PART_LEN1;
}
// vectorized code (four at once)
for (j = 0; j + 3 < PART_LEN1; j += 4) {
- const float32x4_t xfBuf_re = vld1q_f32(&aec->xfBuf[0][xPos + j]);
- const float32x4_t xfBuf_im = vld1q_f32(&aec->xfBuf[1][xPos + j]);
- const float32x4_t wfBuf_re = vld1q_f32(&aec->wfBuf[0][pos + j]);
- const float32x4_t wfBuf_im = vld1q_f32(&aec->wfBuf[1][pos + j]);
- const float32x4_t yf_re = vld1q_f32(&yf[0][j]);
- const float32x4_t yf_im = vld1q_f32(&yf[1][j]);
- const float32x4_t a = vmulq_f32(xfBuf_re, wfBuf_re);
- const float32x4_t e = vmlsq_f32(a, xfBuf_im, wfBuf_im);
- const float32x4_t c = vmulq_f32(xfBuf_re, wfBuf_im);
- const float32x4_t f = vmlaq_f32(c, xfBuf_im, wfBuf_re);
- const float32x4_t g = vaddq_f32(yf_re, e);
- const float32x4_t h = vaddq_f32(yf_im, f);
- vst1q_f32(&yf[0][j], g);
- vst1q_f32(&yf[1][j], h);
+ const float32x4_t x_fft_buf_re = vld1q_f32(&x_fft_buf[0][xPos + j]);
+ const float32x4_t x_fft_buf_im = vld1q_f32(&x_fft_buf[1][xPos + j]);
+ const float32x4_t h_fft_buf_re = vld1q_f32(&h_fft_buf[0][pos + j]);
+ const float32x4_t h_fft_buf_im = vld1q_f32(&h_fft_buf[1][pos + j]);
+ const float32x4_t y_fft_re = vld1q_f32(&y_fft[0][j]);
+ const float32x4_t y_fft_im = vld1q_f32(&y_fft[1][j]);
+ const float32x4_t a = vmulq_f32(x_fft_buf_re, h_fft_buf_re);
+ const float32x4_t e = vmlsq_f32(a, x_fft_buf_im, h_fft_buf_im);
+ const float32x4_t c = vmulq_f32(x_fft_buf_re, h_fft_buf_im);
+ const float32x4_t f = vmlaq_f32(c, x_fft_buf_im, h_fft_buf_re);
+ const float32x4_t g = vaddq_f32(y_fft_re, e);
+ const float32x4_t h = vaddq_f32(y_fft_im, f);
+ vst1q_f32(&y_fft[0][j], g);
+ vst1q_f32(&y_fft[1][j], h);
}
// scalar code for the remaining items.
for (; j < PART_LEN1; j++) {
- yf[0][j] += MulRe(aec->xfBuf[0][xPos + j],
- aec->xfBuf[1][xPos + j],
- aec->wfBuf[0][pos + j],
- aec->wfBuf[1][pos + j]);
- yf[1][j] += MulIm(aec->xfBuf[0][xPos + j],
- aec->xfBuf[1][xPos + j],
- aec->wfBuf[0][pos + j],
- aec->wfBuf[1][pos + j]);
+ y_fft[0][j] += MulRe(x_fft_buf[0][xPos + j],
+ x_fft_buf[1][xPos + j],
+ h_fft_buf[0][pos + j],
+ h_fft_buf[1][pos + j]);
+ y_fft[1][j] += MulIm(x_fft_buf[0][xPos + j],
+ x_fft_buf[1][xPos + j],
+ h_fft_buf[0][pos + j],
+ h_fft_buf[1][pos + j]);
}
}
}
@@ -122,20 +126,24 @@ static float32x4_t vsqrtq_f32(float32x4_t s) {
}
#endif // WEBRTC_ARCH_ARM64
-static void ScaleErrorSignalNEON(AecCore* aec, float ef[2][PART_LEN1]) {
- const float mu = aec->extended_filter_enabled ? kExtendedMu : aec->normal_mu;
- const float error_threshold = aec->extended_filter_enabled ?
- kExtendedErrorThreshold : aec->normal_error_threshold;
+static void ScaleErrorSignalNEON(int extended_filter_enabled,
+ float normal_mu,
+ float normal_error_threshold,
+ float x_pow[PART_LEN1],
+ float ef[2][PART_LEN1]) {
+ const float mu = extended_filter_enabled ? kExtendedMu : normal_mu;
+ const float error_threshold = extended_filter_enabled ?
+ kExtendedErrorThreshold : normal_error_threshold;
const float32x4_t k1e_10f = vdupq_n_f32(1e-10f);
const float32x4_t kMu = vmovq_n_f32(mu);
const float32x4_t kThresh = vmovq_n_f32(error_threshold);
int i;
// vectorized code (four at once)
for (i = 0; i + 3 < PART_LEN1; i += 4) {
- const float32x4_t xPow = vld1q_f32(&aec->xPow[i]);
+ const float32x4_t x_pow_local = vld1q_f32(&x_pow[i]);
const float32x4_t ef_re_base = vld1q_f32(&ef[0][i]);
const float32x4_t ef_im_base = vld1q_f32(&ef[1][i]);
- const float32x4_t xPowPlus = vaddq_f32(xPow, k1e_10f);
+ const float32x4_t xPowPlus = vaddq_f32(x_pow_local, k1e_10f);
float32x4_t ef_re = vdivq_f32(ef_re_base, xPowPlus);
float32x4_t ef_im = vdivq_f32(ef_im_base, xPowPlus);
const float32x4_t ef_re2 = vmulq_f32(ef_re, ef_re);
@@ -162,8 +170,8 @@ static void ScaleErrorSignalNEON(AecCore* aec, float ef[2][PART_LEN1]) {
// scalar code for the remaining items.
for (; i < PART_LEN1; i++) {
float abs_ef;
- ef[0][i] /= (aec->xPow[i] + 1e-10f);
- ef[1][i] /= (aec->xPow[i] + 1e-10f);
+ ef[0][i] /= (x_pow[i] + 1e-10f);
+ ef[1][i] /= (x_pow[i] + 1e-10f);
abs_ef = sqrtf(ef[0][i] * ef[0][i] + ef[1][i] * ef[1][i]);
if (abs_ef > error_threshold) {
@@ -178,34 +186,37 @@ static void ScaleErrorSignalNEON(AecCore* aec, float ef[2][PART_LEN1]) {
}
}
-static void FilterAdaptationNEON(AecCore* aec,
- float* fft,
- float ef[2][PART_LEN1]) {
+static void FilterAdaptationNEON(
+ int num_partitions,
+ int x_fft_buf_block_pos,
+ float x_fft_buf[2][kExtendedNumPartitions * PART_LEN1],
+ float e_fft[2][PART_LEN1],
+ float h_fft_buf[2][kExtendedNumPartitions * PART_LEN1]) {
+ float fft[PART_LEN2];
int i;
- const int num_partitions = aec->num_partitions;
for (i = 0; i < num_partitions; i++) {
- int xPos = (i + aec->xfBufBlockPos) * PART_LEN1;
+ int xPos = (i + x_fft_buf_block_pos) * PART_LEN1;
int pos = i * PART_LEN1;
int j;
// Check for wrap
- if (i + aec->xfBufBlockPos >= num_partitions) {
+ if (i + x_fft_buf_block_pos >= num_partitions) {
xPos -= num_partitions * PART_LEN1;
}
// Process the whole array...
for (j = 0; j < PART_LEN; j += 4) {
- // Load xfBuf and ef.
- const float32x4_t xfBuf_re = vld1q_f32(&aec->xfBuf[0][xPos + j]);
- const float32x4_t xfBuf_im = vld1q_f32(&aec->xfBuf[1][xPos + j]);
- const float32x4_t ef_re = vld1q_f32(&ef[0][j]);
- const float32x4_t ef_im = vld1q_f32(&ef[1][j]);
- // Calculate the product of conjugate(xfBuf) by ef.
+ // Load x_fft_buf and e_fft.
+ const float32x4_t x_fft_buf_re = vld1q_f32(&x_fft_buf[0][xPos + j]);
+ const float32x4_t x_fft_buf_im = vld1q_f32(&x_fft_buf[1][xPos + j]);
+ const float32x4_t e_fft_re = vld1q_f32(&e_fft[0][j]);
+ const float32x4_t e_fft_im = vld1q_f32(&e_fft[1][j]);
+ // Calculate the product of conjugate(x_fft_buf) by e_fft.
// re(conjugate(a) * b) = aRe * bRe + aIm * bIm
// im(conjugate(a) * b)= aRe * bIm - aIm * bRe
- const float32x4_t a = vmulq_f32(xfBuf_re, ef_re);
- const float32x4_t e = vmlaq_f32(a, xfBuf_im, ef_im);
- const float32x4_t c = vmulq_f32(xfBuf_re, ef_im);
- const float32x4_t f = vmlsq_f32(c, xfBuf_im, ef_re);
+ const float32x4_t a = vmulq_f32(x_fft_buf_re, e_fft_re);
+ const float32x4_t e = vmlaq_f32(a, x_fft_buf_im, e_fft_im);
+ const float32x4_t c = vmulq_f32(x_fft_buf_re, e_fft_im);
+ const float32x4_t f = vmlsq_f32(c, x_fft_buf_im, e_fft_re);
// Interleave real and imaginary parts.
const float32x4x2_t g_n_h = vzipq_f32(e, f);
// Store
@@ -213,10 +224,10 @@ static void FilterAdaptationNEON(AecCore* aec,
vst1q_f32(&fft[2 * j + 4], g_n_h.val[1]);
}
// ... and fixup the first imaginary entry.
- fft[1] = MulRe(aec->xfBuf[0][xPos + PART_LEN],
- -aec->xfBuf[1][xPos + PART_LEN],
- ef[0][PART_LEN],
- ef[1][PART_LEN]);
+ fft[1] = MulRe(x_fft_buf[0][xPos + PART_LEN],
+ -x_fft_buf[1][xPos + PART_LEN],
+ e_fft[0][PART_LEN],
+ e_fft[1][PART_LEN]);
aec_rdft_inverse_128(fft);
memset(fft + PART_LEN, 0, sizeof(float) * PART_LEN);
@@ -234,21 +245,21 @@ static void FilterAdaptationNEON(AecCore* aec,
aec_rdft_forward_128(fft);
{
- const float wt1 = aec->wfBuf[1][pos];
- aec->wfBuf[0][pos + PART_LEN] += fft[1];
+ const float wt1 = h_fft_buf[1][pos];
+ h_fft_buf[0][pos + PART_LEN] += fft[1];
for (j = 0; j < PART_LEN; j += 4) {
- float32x4_t wtBuf_re = vld1q_f32(&aec->wfBuf[0][pos + j]);
- float32x4_t wtBuf_im = vld1q_f32(&aec->wfBuf[1][pos + j]);
+ float32x4_t wtBuf_re = vld1q_f32(&h_fft_buf[0][pos + j]);
+ float32x4_t wtBuf_im = vld1q_f32(&h_fft_buf[1][pos + j]);
const float32x4_t fft0 = vld1q_f32(&fft[2 * j + 0]);
const float32x4_t fft4 = vld1q_f32(&fft[2 * j + 4]);
const float32x4x2_t fft_re_im = vuzpq_f32(fft0, fft4);
wtBuf_re = vaddq_f32(wtBuf_re, fft_re_im.val[0]);
wtBuf_im = vaddq_f32(wtBuf_im, fft_re_im.val[1]);
- vst1q_f32(&aec->wfBuf[0][pos + j], wtBuf_re);
- vst1q_f32(&aec->wfBuf[1][pos + j], wtBuf_im);
+ vst1q_f32(&h_fft_buf[0][pos + j], wtBuf_re);
+ vst1q_f32(&h_fft_buf[1][pos + j], wtBuf_im);
}
- aec->wfBuf[1][pos] = wt1;
+ h_fft_buf[1][pos] = wt1;
}
}
}
@@ -442,7 +453,7 @@ static void OverdriveAndSuppressNEON(AecCore* aec,
}
}
-static int PartitionDelay(const AecCore* aec) {
+static int PartitionDelayNEON(const AecCore* aec) {
// Measures the energy in each filter partition and returns the partition with
// highest energy.
// TODO(bjornv): Spread computational cost by computing one partition per
@@ -499,7 +510,8 @@ static int PartitionDelay(const AecCore* aec) {
static void SmoothedPSD(AecCore* aec,
float efw[2][PART_LEN1],
float dfw[2][PART_LEN1],
- float xfw[2][PART_LEN1]) {
+ float xfw[2][PART_LEN1],
+ int* extreme_filter_divergence) {
// Power estimate smoothing coefficients.
const float* ptrGCoh = aec->extended_filter_enabled
? WebRtcAec_kExtendedSmoothingCoefficients[aec->mult - 1]
@@ -615,19 +627,16 @@ static void SmoothedPSD(AecCore* aec,
seSum += aec->se[i];
}
- // Divergent filter safeguard.
+ // Divergent filter safeguard update.
aec->divergeState = (aec->divergeState ? 1.05f : 1.0f) * seSum > sdSum;
- if (aec->divergeState)
- memcpy(efw, dfw, sizeof(efw[0][0]) * 2 * PART_LEN1);
-
- // Reset if error is significantly larger than nearend (13 dB).
- if (!aec->extended_filter_enabled && seSum > (19.95f * sdSum))
- memset(aec->wfBuf, 0, sizeof(aec->wfBuf));
+ // Signal extreme filter divergence if the error is significantly larger
+ // than the nearend (13 dB).
+ *extreme_filter_divergence = (seSum > (19.95f * sdSum));
}
// Window time domain data to be used by the fft.
-__inline static void WindowData(float* x_windowed, const float* x) {
+static void WindowDataNEON(float* x_windowed, const float* x) {
int i;
for (i = 0; i < PART_LEN; i += 4) {
const float32x4_t vec_Buf1 = vld1q_f32(&x[i]);
@@ -648,8 +657,8 @@ __inline static void WindowData(float* x_windowed, const float* x) {
}
// Puts fft output data into a complex valued array.
-__inline static void StoreAsComplex(const float* data,
- float data_complex[2][PART_LEN1]) {
+static void StoreAsComplexNEON(const float* data,
+ float data_complex[2][PART_LEN1]) {
int i;
for (i = 0; i < PART_LEN; i += 4) {
const float32x4x2_t vec_data = vld2q_f32(&data[2 * i]);
@@ -665,32 +674,15 @@ __inline static void StoreAsComplex(const float* data,
static void SubbandCoherenceNEON(AecCore* aec,
float efw[2][PART_LEN1],
+ float dfw[2][PART_LEN1],
float xfw[2][PART_LEN1],
float* fft,
float* cohde,
- float* cohxd) {
- float dfw[2][PART_LEN1];
+ float* cohxd,
+ int* extreme_filter_divergence) {
int i;
- if (aec->delayEstCtr == 0)
- aec->delayIdx = PartitionDelay(aec);
-
- // Use delayed far.
- memcpy(xfw,
- aec->xfwBuf + aec->delayIdx * PART_LEN1,
- sizeof(xfw[0][0]) * 2 * PART_LEN1);
-
- // Windowed near fft
- WindowData(fft, aec->dBuf);
- aec_rdft_forward_128(fft);
- StoreAsComplex(fft, dfw);
-
- // Windowed error fft
- WindowData(fft, aec->eBuf);
- aec_rdft_forward_128(fft);
- StoreAsComplex(fft, efw);
-
- SmoothedPSD(aec, efw, dfw, xfw);
+ SmoothedPSD(aec, efw, dfw, xfw, extreme_filter_divergence);
{
const float32x4_t vec_1eminus10 = vdupq_n_f32(1e-10f);
@@ -732,5 +724,7 @@ void WebRtcAec_InitAec_neon(void) {
WebRtcAec_FilterAdaptation = FilterAdaptationNEON;
WebRtcAec_OverdriveAndSuppress = OverdriveAndSuppressNEON;
WebRtcAec_SubbandCoherence = SubbandCoherenceNEON;
+ WebRtcAec_StoreAsComplex = StoreAsComplexNEON;
+ WebRtcAec_PartitionDelay = PartitionDelayNEON;
+ WebRtcAec_WindowData = WindowDataNEON;
}
-
diff --git a/webrtc/modules/audio_processing/aec/aec_core_sse2.c b/webrtc/modules/audio_processing/aec/aec_core_sse2.c
index b1bffcbb9f..f897a4c0c7 100644
--- a/webrtc/modules/audio_processing/aec/aec_core_sse2.c
+++ b/webrtc/modules/audio_processing/aec/aec_core_sse2.c
@@ -29,67 +29,76 @@ __inline static float MulIm(float aRe, float aIm, float bRe, float bIm) {
return aRe * bIm + aIm * bRe;
}
-static void FilterFarSSE2(AecCore* aec, float yf[2][PART_LEN1]) {
+static void FilterFarSSE2(
+ int num_partitions,
+ int x_fft_buf_block_pos,
+ float x_fft_buf[2][kExtendedNumPartitions * PART_LEN1],
+ float h_fft_buf[2][kExtendedNumPartitions * PART_LEN1],
+ float y_fft[2][PART_LEN1]) {
+
int i;
- const int num_partitions = aec->num_partitions;
for (i = 0; i < num_partitions; i++) {
int j;
- int xPos = (i + aec->xfBufBlockPos) * PART_LEN1;
+ int xPos = (i + x_fft_buf_block_pos) * PART_LEN1;
int pos = i * PART_LEN1;
// Check for wrap
- if (i + aec->xfBufBlockPos >= num_partitions) {
+ if (i + x_fft_buf_block_pos >= num_partitions) {
xPos -= num_partitions * (PART_LEN1);
}
// vectorized code (four at once)
for (j = 0; j + 3 < PART_LEN1; j += 4) {
- const __m128 xfBuf_re = _mm_loadu_ps(&aec->xfBuf[0][xPos + j]);
- const __m128 xfBuf_im = _mm_loadu_ps(&aec->xfBuf[1][xPos + j]);
- const __m128 wfBuf_re = _mm_loadu_ps(&aec->wfBuf[0][pos + j]);
- const __m128 wfBuf_im = _mm_loadu_ps(&aec->wfBuf[1][pos + j]);
- const __m128 yf_re = _mm_loadu_ps(&yf[0][j]);
- const __m128 yf_im = _mm_loadu_ps(&yf[1][j]);
- const __m128 a = _mm_mul_ps(xfBuf_re, wfBuf_re);
- const __m128 b = _mm_mul_ps(xfBuf_im, wfBuf_im);
- const __m128 c = _mm_mul_ps(xfBuf_re, wfBuf_im);
- const __m128 d = _mm_mul_ps(xfBuf_im, wfBuf_re);
+ const __m128 x_fft_buf_re = _mm_loadu_ps(&x_fft_buf[0][xPos + j]);
+ const __m128 x_fft_buf_im = _mm_loadu_ps(&x_fft_buf[1][xPos + j]);
+ const __m128 h_fft_buf_re = _mm_loadu_ps(&h_fft_buf[0][pos + j]);
+ const __m128 h_fft_buf_im = _mm_loadu_ps(&h_fft_buf[1][pos + j]);
+ const __m128 y_fft_re = _mm_loadu_ps(&y_fft[0][j]);
+ const __m128 y_fft_im = _mm_loadu_ps(&y_fft[1][j]);
+ const __m128 a = _mm_mul_ps(x_fft_buf_re, h_fft_buf_re);
+ const __m128 b = _mm_mul_ps(x_fft_buf_im, h_fft_buf_im);
+ const __m128 c = _mm_mul_ps(x_fft_buf_re, h_fft_buf_im);
+ const __m128 d = _mm_mul_ps(x_fft_buf_im, h_fft_buf_re);
const __m128 e = _mm_sub_ps(a, b);
const __m128 f = _mm_add_ps(c, d);
- const __m128 g = _mm_add_ps(yf_re, e);
- const __m128 h = _mm_add_ps(yf_im, f);
- _mm_storeu_ps(&yf[0][j], g);
- _mm_storeu_ps(&yf[1][j], h);
+ const __m128 g = _mm_add_ps(y_fft_re, e);
+ const __m128 h = _mm_add_ps(y_fft_im, f);
+ _mm_storeu_ps(&y_fft[0][j], g);
+ _mm_storeu_ps(&y_fft[1][j], h);
}
// scalar code for the remaining items.
for (; j < PART_LEN1; j++) {
- yf[0][j] += MulRe(aec->xfBuf[0][xPos + j],
- aec->xfBuf[1][xPos + j],
- aec->wfBuf[0][pos + j],
- aec->wfBuf[1][pos + j]);
- yf[1][j] += MulIm(aec->xfBuf[0][xPos + j],
- aec->xfBuf[1][xPos + j],
- aec->wfBuf[0][pos + j],
- aec->wfBuf[1][pos + j]);
+ y_fft[0][j] += MulRe(x_fft_buf[0][xPos + j],
+ x_fft_buf[1][xPos + j],
+ h_fft_buf[0][pos + j],
+ h_fft_buf[1][pos + j]);
+ y_fft[1][j] += MulIm(x_fft_buf[0][xPos + j],
+ x_fft_buf[1][xPos + j],
+ h_fft_buf[0][pos + j],
+ h_fft_buf[1][pos + j]);
}
}
}
-static void ScaleErrorSignalSSE2(AecCore* aec, float ef[2][PART_LEN1]) {
+static void ScaleErrorSignalSSE2(int extended_filter_enabled,
+ float normal_mu,
+ float normal_error_threshold,
+ float x_pow[PART_LEN1],
+ float ef[2][PART_LEN1]) {
const __m128 k1e_10f = _mm_set1_ps(1e-10f);
- const __m128 kMu = aec->extended_filter_enabled ? _mm_set1_ps(kExtendedMu)
- : _mm_set1_ps(aec->normal_mu);
- const __m128 kThresh = aec->extended_filter_enabled
+ const __m128 kMu = extended_filter_enabled ? _mm_set1_ps(kExtendedMu)
+ : _mm_set1_ps(normal_mu);
+ const __m128 kThresh = extended_filter_enabled
? _mm_set1_ps(kExtendedErrorThreshold)
- : _mm_set1_ps(aec->normal_error_threshold);
+ : _mm_set1_ps(normal_error_threshold);
int i;
// vectorized code (four at once)
for (i = 0; i + 3 < PART_LEN1; i += 4) {
- const __m128 xPow = _mm_loadu_ps(&aec->xPow[i]);
+ const __m128 x_pow_local = _mm_loadu_ps(&x_pow[i]);
const __m128 ef_re_base = _mm_loadu_ps(&ef[0][i]);
const __m128 ef_im_base = _mm_loadu_ps(&ef[1][i]);
- const __m128 xPowPlus = _mm_add_ps(xPow, k1e_10f);
+ const __m128 xPowPlus = _mm_add_ps(x_pow_local, k1e_10f);
__m128 ef_re = _mm_div_ps(ef_re_base, xPowPlus);
__m128 ef_im = _mm_div_ps(ef_im_base, xPowPlus);
const __m128 ef_re2 = _mm_mul_ps(ef_re, ef_re);
@@ -116,14 +125,14 @@ static void ScaleErrorSignalSSE2(AecCore* aec, float ef[2][PART_LEN1]) {
// scalar code for the remaining items.
{
const float mu =
- aec->extended_filter_enabled ? kExtendedMu : aec->normal_mu;
- const float error_threshold = aec->extended_filter_enabled
+ extended_filter_enabled ? kExtendedMu : normal_mu;
+ const float error_threshold = extended_filter_enabled
? kExtendedErrorThreshold
- : aec->normal_error_threshold;
+ : normal_error_threshold;
for (; i < (PART_LEN1); i++) {
float abs_ef;
- ef[0][i] /= (aec->xPow[i] + 1e-10f);
- ef[1][i] /= (aec->xPow[i] + 1e-10f);
+ ef[0][i] /= (x_pow[i] + 1e-10f);
+ ef[1][i] /= (x_pow[i] + 1e-10f);
abs_ef = sqrtf(ef[0][i] * ef[0][i] + ef[1][i] * ef[1][i]);
if (abs_ef > error_threshold) {
@@ -139,33 +148,36 @@ static void ScaleErrorSignalSSE2(AecCore* aec, float ef[2][PART_LEN1]) {
}
}
-static void FilterAdaptationSSE2(AecCore* aec,
- float* fft,
- float ef[2][PART_LEN1]) {
+static void FilterAdaptationSSE2(
+ int num_partitions,
+ int x_fft_buf_block_pos,
+ float x_fft_buf[2][kExtendedNumPartitions * PART_LEN1],
+ float e_fft[2][PART_LEN1],
+ float h_fft_buf[2][kExtendedNumPartitions * PART_LEN1]) {
+ float fft[PART_LEN2];
int i, j;
- const int num_partitions = aec->num_partitions;
for (i = 0; i < num_partitions; i++) {
- int xPos = (i + aec->xfBufBlockPos) * (PART_LEN1);
+ int xPos = (i + x_fft_buf_block_pos) * (PART_LEN1);
int pos = i * PART_LEN1;
// Check for wrap
- if (i + aec->xfBufBlockPos >= num_partitions) {
+ if (i + x_fft_buf_block_pos >= num_partitions) {
xPos -= num_partitions * PART_LEN1;
}
// Process the whole array...
for (j = 0; j < PART_LEN; j += 4) {
- // Load xfBuf and ef.
- const __m128 xfBuf_re = _mm_loadu_ps(&aec->xfBuf[0][xPos + j]);
- const __m128 xfBuf_im = _mm_loadu_ps(&aec->xfBuf[1][xPos + j]);
- const __m128 ef_re = _mm_loadu_ps(&ef[0][j]);
- const __m128 ef_im = _mm_loadu_ps(&ef[1][j]);
- // Calculate the product of conjugate(xfBuf) by ef.
+ // Load x_fft_buf and e_fft.
+ const __m128 x_fft_buf_re = _mm_loadu_ps(&x_fft_buf[0][xPos + j]);
+ const __m128 x_fft_buf_im = _mm_loadu_ps(&x_fft_buf[1][xPos + j]);
+ const __m128 e_fft_re = _mm_loadu_ps(&e_fft[0][j]);
+ const __m128 e_fft_im = _mm_loadu_ps(&e_fft[1][j]);
+ // Calculate the product of conjugate(x_fft_buf) by e_fft.
// re(conjugate(a) * b) = aRe * bRe + aIm * bIm
// im(conjugate(a) * b)= aRe * bIm - aIm * bRe
- const __m128 a = _mm_mul_ps(xfBuf_re, ef_re);
- const __m128 b = _mm_mul_ps(xfBuf_im, ef_im);
- const __m128 c = _mm_mul_ps(xfBuf_re, ef_im);
- const __m128 d = _mm_mul_ps(xfBuf_im, ef_re);
+ const __m128 a = _mm_mul_ps(x_fft_buf_re, e_fft_re);
+ const __m128 b = _mm_mul_ps(x_fft_buf_im, e_fft_im);
+ const __m128 c = _mm_mul_ps(x_fft_buf_re, e_fft_im);
+ const __m128 d = _mm_mul_ps(x_fft_buf_im, e_fft_re);
const __m128 e = _mm_add_ps(a, b);
const __m128 f = _mm_sub_ps(c, d);
// Interleave real and imaginary parts.
@@ -176,10 +188,10 @@ static void FilterAdaptationSSE2(AecCore* aec,
_mm_storeu_ps(&fft[2 * j + 4], h);
}
// ... and fixup the first imaginary entry.
- fft[1] = MulRe(aec->xfBuf[0][xPos + PART_LEN],
- -aec->xfBuf[1][xPos + PART_LEN],
- ef[0][PART_LEN],
- ef[1][PART_LEN]);
+ fft[1] = MulRe(x_fft_buf[0][xPos + PART_LEN],
+ -x_fft_buf[1][xPos + PART_LEN],
+ e_fft[0][PART_LEN],
+ e_fft[1][PART_LEN]);
aec_rdft_inverse_128(fft);
memset(fft + PART_LEN, 0, sizeof(float) * PART_LEN);
@@ -197,11 +209,11 @@ static void FilterAdaptationSSE2(AecCore* aec,
aec_rdft_forward_128(fft);
{
- float wt1 = aec->wfBuf[1][pos];
- aec->wfBuf[0][pos + PART_LEN] += fft[1];
+ float wt1 = h_fft_buf[1][pos];
+ h_fft_buf[0][pos + PART_LEN] += fft[1];
for (j = 0; j < PART_LEN; j += 4) {
- __m128 wtBuf_re = _mm_loadu_ps(&aec->wfBuf[0][pos + j]);
- __m128 wtBuf_im = _mm_loadu_ps(&aec->wfBuf[1][pos + j]);
+ __m128 wtBuf_re = _mm_loadu_ps(&h_fft_buf[0][pos + j]);
+ __m128 wtBuf_im = _mm_loadu_ps(&h_fft_buf[1][pos + j]);
const __m128 fft0 = _mm_loadu_ps(&fft[2 * j + 0]);
const __m128 fft4 = _mm_loadu_ps(&fft[2 * j + 4]);
const __m128 fft_re =
@@ -210,10 +222,10 @@ static void FilterAdaptationSSE2(AecCore* aec,
_mm_shuffle_ps(fft0, fft4, _MM_SHUFFLE(3, 1, 3, 1));
wtBuf_re = _mm_add_ps(wtBuf_re, fft_re);
wtBuf_im = _mm_add_ps(wtBuf_im, fft_im);
- _mm_storeu_ps(&aec->wfBuf[0][pos + j], wtBuf_re);
- _mm_storeu_ps(&aec->wfBuf[1][pos + j], wtBuf_im);
+ _mm_storeu_ps(&h_fft_buf[0][pos + j], wtBuf_re);
+ _mm_storeu_ps(&h_fft_buf[1][pos + j], wtBuf_im);
}
- aec->wfBuf[1][pos] = wt1;
+ h_fft_buf[1][pos] = wt1;
}
}
}
@@ -427,7 +439,8 @@ __inline static void _mm_add_ps_4x1(__m128 sum, float *dst) {
sum = _mm_add_ps(sum, _mm_shuffle_ps(sum, sum, _MM_SHUFFLE(1, 1, 1, 1)));
_mm_store_ss(dst, sum);
}
-static int PartitionDelay(const AecCore* aec) {
+
+static int PartitionDelaySSE2(const AecCore* aec) {
// Measures the energy in each filter partition and returns the partition with
// highest energy.
// TODO(bjornv): Spread computational cost by computing one partition per
@@ -476,7 +489,8 @@ static int PartitionDelay(const AecCore* aec) {
static void SmoothedPSD(AecCore* aec,
float efw[2][PART_LEN1],
float dfw[2][PART_LEN1],
- float xfw[2][PART_LEN1]) {
+ float xfw[2][PART_LEN1],
+ int* extreme_filter_divergence) {
// Power estimate smoothing coefficients.
const float* ptrGCoh = aec->extended_filter_enabled
? WebRtcAec_kExtendedSmoothingCoefficients[aec->mult - 1]
@@ -595,19 +609,16 @@ static void SmoothedPSD(AecCore* aec,
seSum += aec->se[i];
}
- // Divergent filter safeguard.
+ // Divergent filter safeguard update.
aec->divergeState = (aec->divergeState ? 1.05f : 1.0f) * seSum > sdSum;
- if (aec->divergeState)
- memcpy(efw, dfw, sizeof(efw[0][0]) * 2 * PART_LEN1);
-
- // Reset if error is significantly larger than nearend (13 dB).
- if (!aec->extended_filter_enabled && seSum > (19.95f * sdSum))
- memset(aec->wfBuf, 0, sizeof(aec->wfBuf));
+ // Signal extreme filter divergence if the error is significantly larger
+ // than the nearend (13 dB).
+ *extreme_filter_divergence = (seSum > (19.95f * sdSum));
}
// Window time domain data to be used by the fft.
-__inline static void WindowData(float* x_windowed, const float* x) {
+static void WindowDataSSE2(float* x_windowed, const float* x) {
int i;
for (i = 0; i < PART_LEN; i += 4) {
const __m128 vec_Buf1 = _mm_loadu_ps(&x[i]);
@@ -627,8 +638,8 @@ __inline static void WindowData(float* x_windowed, const float* x) {
}
// Puts fft output data into a complex valued array.
-__inline static void StoreAsComplex(const float* data,
- float data_complex[2][PART_LEN1]) {
+static void StoreAsComplexSSE2(const float* data,
+ float data_complex[2][PART_LEN1]) {
int i;
for (i = 0; i < PART_LEN; i += 4) {
const __m128 vec_fft0 = _mm_loadu_ps(&data[2 * i]);
@@ -649,32 +660,15 @@ __inline static void StoreAsComplex(const float* data,
static void SubbandCoherenceSSE2(AecCore* aec,
float efw[2][PART_LEN1],
+ float dfw[2][PART_LEN1],
float xfw[2][PART_LEN1],
float* fft,
float* cohde,
- float* cohxd) {
- float dfw[2][PART_LEN1];
+ float* cohxd,
+ int* extreme_filter_divergence) {
int i;
- if (aec->delayEstCtr == 0)
- aec->delayIdx = PartitionDelay(aec);
-
- // Use delayed far.
- memcpy(xfw,
- aec->xfwBuf + aec->delayIdx * PART_LEN1,
- sizeof(xfw[0][0]) * 2 * PART_LEN1);
-
- // Windowed near fft
- WindowData(fft, aec->dBuf);
- aec_rdft_forward_128(fft);
- StoreAsComplex(fft, dfw);
-
- // Windowed error fft
- WindowData(fft, aec->eBuf);
- aec_rdft_forward_128(fft);
- StoreAsComplex(fft, efw);
-
- SmoothedPSD(aec, efw, dfw, xfw);
+ SmoothedPSD(aec, efw, dfw, xfw, extreme_filter_divergence);
{
const __m128 vec_1eminus10 = _mm_set1_ps(1e-10f);
@@ -728,4 +722,7 @@ void WebRtcAec_InitAec_SSE2(void) {
WebRtcAec_FilterAdaptation = FilterAdaptationSSE2;
WebRtcAec_OverdriveAndSuppress = OverdriveAndSuppressSSE2;
WebRtcAec_SubbandCoherence = SubbandCoherenceSSE2;
+ WebRtcAec_StoreAsComplex = StoreAsComplexSSE2;
+ WebRtcAec_PartitionDelay = PartitionDelaySSE2;
+ WebRtcAec_WindowData = WindowDataSSE2;
}
diff --git a/webrtc/modules/audio_processing/aec/echo_cancellation.c b/webrtc/modules/audio_processing/aec/echo_cancellation.c
index 0f5cd31ddb..aab1718b24 100644
--- a/webrtc/modules/audio_processing/aec/echo_cancellation.c
+++ b/webrtc/modules/audio_processing/aec/echo_cancellation.c
@@ -11,7 +11,7 @@
/*
* Contains the API functions for the AEC.
*/
-#include "webrtc/modules/audio_processing/aec/include/echo_cancellation.h"
+#include "webrtc/modules/audio_processing/aec/echo_cancellation.h"
#include <math.h>
#ifdef WEBRTC_AEC_DEBUG_DUMP
@@ -146,7 +146,6 @@ void* WebRtcAec_Create() {
}
aecpc->initFlag = 0;
- aecpc->lastError = 0;
#ifdef WEBRTC_AEC_DEBUG_DUMP
{
@@ -192,26 +191,22 @@ int32_t WebRtcAec_Init(void* aecInst, int32_t sampFreq, int32_t scSampFreq) {
sampFreq != 16000 &&
sampFreq != 32000 &&
sampFreq != 48000) {
- aecpc->lastError = AEC_BAD_PARAMETER_ERROR;
- return -1;
+ return AEC_BAD_PARAMETER_ERROR;
}
aecpc->sampFreq = sampFreq;
if (scSampFreq < 1 || scSampFreq > 96000) {
- aecpc->lastError = AEC_BAD_PARAMETER_ERROR;
- return -1;
+ return AEC_BAD_PARAMETER_ERROR;
}
aecpc->scSampFreq = scSampFreq;
// Initialize echo canceller core
if (WebRtcAec_InitAec(aecpc->aec, aecpc->sampFreq) == -1) {
- aecpc->lastError = AEC_UNSPECIFIED_ERROR;
- return -1;
+ return AEC_UNSPECIFIED_ERROR;
}
if (WebRtcAec_InitResampler(aecpc->resampler, aecpc->scSampFreq) == -1) {
- aecpc->lastError = AEC_UNSPECIFIED_ERROR;
- return -1;
+ return AEC_UNSPECIFIED_ERROR;
}
WebRtc_InitBuffer(aecpc->far_pre_buf);
@@ -261,13 +256,32 @@ int32_t WebRtcAec_Init(void* aecInst, int32_t sampFreq, int32_t scSampFreq) {
aecConfig.delay_logging = kAecFalse;
if (WebRtcAec_set_config(aecpc, aecConfig) == -1) {
- aecpc->lastError = AEC_UNSPECIFIED_ERROR;
- return -1;
+ return AEC_UNSPECIFIED_ERROR;
}
return 0;
}
+// Returns any error that is caused when buffering the
+// far-end signal.
+int32_t WebRtcAec_GetBufferFarendError(void* aecInst,
+ const float* farend,
+ size_t nrOfSamples) {
+ Aec* aecpc = aecInst;
+
+ if (!farend)
+ return AEC_NULL_POINTER_ERROR;
+
+ if (aecpc->initFlag != initCheck)
+ return AEC_UNINITIALIZED_ERROR;
+
+ // number of samples == 160 for SWB input
+ if (nrOfSamples != 80 && nrOfSamples != 160)
+ return AEC_BAD_PARAMETER_ERROR;
+
+ return 0;
+}
+
// only buffer L band for farend
int32_t WebRtcAec_BufferFarend(void* aecInst,
const float* farend,
@@ -277,21 +291,13 @@ int32_t WebRtcAec_BufferFarend(void* aecInst,
float new_farend[MAX_RESAMP_LEN];
const float* farend_ptr = farend;
- if (farend == NULL) {
- aecpc->lastError = AEC_NULL_POINTER_ERROR;
- return -1;
- }
+ // Get any error caused by buffering the farend signal.
+ int32_t error_code = WebRtcAec_GetBufferFarendError(aecInst, farend,
+ nrOfSamples);
- if (aecpc->initFlag != initCheck) {
- aecpc->lastError = AEC_UNINITIALIZED_ERROR;
- return -1;
- }
+ if (error_code != 0)
+ return error_code;
- // number of samples == 160 for SWB input
- if (nrOfSamples != 80 && nrOfSamples != 160) {
- aecpc->lastError = AEC_BAD_PARAMETER_ERROR;
- return -1;
- }
if (aecpc->skewMode == kAecTrue && aecpc->resample == kAecTrue) {
// Resample and get a new number of samples
@@ -311,7 +317,8 @@ int32_t WebRtcAec_BufferFarend(void* aecInst,
// Write the time-domain data to |far_pre_buf|.
WebRtc_WriteBuffer(aecpc->far_pre_buf, farend_ptr, newNrOfSamples);
- // Transform to frequency domain if we have enough data.
+ // TODO(minyue): reduce to |PART_LEN| samples for each buffering, when
+ // WebRtcAec_BufferFarendPartition() is changed to take |PART_LEN| samples.
while (WebRtc_available_read(aecpc->far_pre_buf) >= PART_LEN2) {
// We have enough data to pass to the FFT, hence read PART_LEN2 samples.
{
@@ -319,10 +326,6 @@ int32_t WebRtcAec_BufferFarend(void* aecInst,
float tmp[PART_LEN2];
WebRtc_ReadBuffer(aecpc->far_pre_buf, (void**)&ptmp, tmp, PART_LEN2);
WebRtcAec_BufferFarendPartition(aecpc->aec, ptmp);
-#ifdef WEBRTC_AEC_DEBUG_DUMP
- WebRtc_WriteBuffer(
- WebRtcAec_far_time_buf(aecpc->aec), &ptmp[PART_LEN], 1);
-#endif
}
// Rewind |far_pre_buf| PART_LEN samples for overlap before continuing.
@@ -343,29 +346,24 @@ int32_t WebRtcAec_Process(void* aecInst,
int32_t retVal = 0;
if (out == NULL) {
- aecpc->lastError = AEC_NULL_POINTER_ERROR;
- return -1;
+ return AEC_NULL_POINTER_ERROR;
}
if (aecpc->initFlag != initCheck) {
- aecpc->lastError = AEC_UNINITIALIZED_ERROR;
- return -1;
+ return AEC_UNINITIALIZED_ERROR;
}
// number of samples == 160 for SWB input
if (nrOfSamples != 80 && nrOfSamples != 160) {
- aecpc->lastError = AEC_BAD_PARAMETER_ERROR;
- return -1;
+ return AEC_BAD_PARAMETER_ERROR;
}
if (msInSndCardBuf < 0) {
msInSndCardBuf = 0;
- aecpc->lastError = AEC_BAD_PARAMETER_WARNING;
- retVal = -1;
+ retVal = AEC_BAD_PARAMETER_WARNING;
} else if (msInSndCardBuf > kMaxTrustedDelayMs) {
// The clamping is now done in ProcessExtended/Normal().
- aecpc->lastError = AEC_BAD_PARAMETER_WARNING;
- retVal = -1;
+ retVal = AEC_BAD_PARAMETER_WARNING;
}
// This returns the value of aec->extended_filter_enabled.
@@ -378,15 +376,13 @@ int32_t WebRtcAec_Process(void* aecInst,
msInSndCardBuf,
skew);
} else {
- if (ProcessNormal(aecpc,
- nearend,
- num_bands,
- out,
- nrOfSamples,
- msInSndCardBuf,
- skew) != 0) {
- retVal = -1;
- }
+ retVal = ProcessNormal(aecpc,
+ nearend,
+ num_bands,
+ out,
+ nrOfSamples,
+ msInSndCardBuf,
+ skew);
}
#ifdef WEBRTC_AEC_DEBUG_DUMP
@@ -405,31 +401,26 @@ int32_t WebRtcAec_Process(void* aecInst,
int WebRtcAec_set_config(void* handle, AecConfig config) {
Aec* self = (Aec*)handle;
if (self->initFlag != initCheck) {
- self->lastError = AEC_UNINITIALIZED_ERROR;
- return -1;
+ return AEC_UNINITIALIZED_ERROR;
}
if (config.skewMode != kAecFalse && config.skewMode != kAecTrue) {
- self->lastError = AEC_BAD_PARAMETER_ERROR;
- return -1;
+ return AEC_BAD_PARAMETER_ERROR;
}
self->skewMode = config.skewMode;
if (config.nlpMode != kAecNlpConservative &&
config.nlpMode != kAecNlpModerate &&
config.nlpMode != kAecNlpAggressive) {
- self->lastError = AEC_BAD_PARAMETER_ERROR;
- return -1;
+ return AEC_BAD_PARAMETER_ERROR;
}
if (config.metricsMode != kAecFalse && config.metricsMode != kAecTrue) {
- self->lastError = AEC_BAD_PARAMETER_ERROR;
- return -1;
+ return AEC_BAD_PARAMETER_ERROR;
}
if (config.delay_logging != kAecFalse && config.delay_logging != kAecTrue) {
- self->lastError = AEC_BAD_PARAMETER_ERROR;
- return -1;
+ return AEC_BAD_PARAMETER_ERROR;
}
WebRtcAec_SetConfigCore(
@@ -440,12 +431,10 @@ int WebRtcAec_set_config(void* handle, AecConfig config) {
int WebRtcAec_get_echo_status(void* handle, int* status) {
Aec* self = (Aec*)handle;
if (status == NULL) {
- self->lastError = AEC_NULL_POINTER_ERROR;
- return -1;
+ return AEC_NULL_POINTER_ERROR;
}
if (self->initFlag != initCheck) {
- self->lastError = AEC_UNINITIALIZED_ERROR;
- return -1;
+ return AEC_UNINITIALIZED_ERROR;
}
*status = WebRtcAec_echo_state(self->aec);
@@ -466,12 +455,10 @@ int WebRtcAec_GetMetrics(void* handle, AecMetrics* metrics) {
return -1;
}
if (metrics == NULL) {
- self->lastError = AEC_NULL_POINTER_ERROR;
- return -1;
+ return AEC_NULL_POINTER_ERROR;
}
if (self->initFlag != initCheck) {
- self->lastError = AEC_UNINITIALIZED_ERROR;
- return -1;
+ return AEC_UNINITIALIZED_ERROR;
}
WebRtcAec_GetEchoStats(self->aec, &erl, &erle, &a_nlp);
@@ -556,32 +543,24 @@ int WebRtcAec_GetDelayMetrics(void* handle,
float* fraction_poor_delays) {
Aec* self = handle;
if (median == NULL) {
- self->lastError = AEC_NULL_POINTER_ERROR;
- return -1;
+ return AEC_NULL_POINTER_ERROR;
}
if (std == NULL) {
- self->lastError = AEC_NULL_POINTER_ERROR;
- return -1;
+ return AEC_NULL_POINTER_ERROR;
}
if (self->initFlag != initCheck) {
- self->lastError = AEC_UNINITIALIZED_ERROR;
- return -1;
+ return AEC_UNINITIALIZED_ERROR;
}
if (WebRtcAec_GetDelayMetricsCore(self->aec, median, std,
fraction_poor_delays) ==
-1) {
// Logging disabled.
- self->lastError = AEC_UNSUPPORTED_FUNCTION_ERROR;
- return -1;
+ return AEC_UNSUPPORTED_FUNCTION_ERROR;
}
return 0;
}
-int32_t WebRtcAec_get_error_code(void* aecInst) {
- Aec* aecpc = aecInst;
- return aecpc->lastError;
-}
AecCore* WebRtcAec_aec_core(void* handle) {
if (!handle) {
@@ -617,7 +596,7 @@ static int ProcessNormal(Aec* aecpc,
retVal = WebRtcAec_GetSkew(aecpc->resampler, skew, &aecpc->skew);
if (retVal == -1) {
aecpc->skew = 0;
- aecpc->lastError = AEC_BAD_PARAMETER_WARNING;
+ retVal = AEC_BAD_PARAMETER_WARNING;
}
aecpc->skew /= aecpc->sampFactor * nrOfSamples;
diff --git a/webrtc/modules/audio_processing/aec/include/echo_cancellation.h b/webrtc/modules/audio_processing/aec/echo_cancellation.h
index a340cf84d0..de84b2e6d1 100644
--- a/webrtc/modules/audio_processing/aec/include/echo_cancellation.h
+++ b/webrtc/modules/audio_processing/aec/echo_cancellation.h
@@ -8,8 +8,8 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#ifndef WEBRTC_MODULES_AUDIO_PROCESSING_AEC_INCLUDE_ECHO_CANCELLATION_H_
-#define WEBRTC_MODULES_AUDIO_PROCESSING_AEC_INCLUDE_ECHO_CANCELLATION_H_
+#ifndef WEBRTC_MODULES_AUDIO_PROCESSING_AEC_ECHO_CANCELLATION_H_
+#define WEBRTC_MODULES_AUDIO_PROCESSING_AEC_ECHO_CANCELLATION_H_
#include <stddef.h>
@@ -109,13 +109,32 @@ int32_t WebRtcAec_Init(void* aecInst, int32_t sampFreq, int32_t scSampFreq);
* Outputs Description
* -------------------------------------------------------------------
* int32_t return 0: OK
- * -1: error
+ * 12000-12050: error code
*/
int32_t WebRtcAec_BufferFarend(void* aecInst,
const float* farend,
size_t nrOfSamples);
/*
+ * Reports any errors that would arise if buffering a farend buffer
+ *
+ * Inputs Description
+ * -------------------------------------------------------------------
+ * void* aecInst Pointer to the AEC instance
+ * const float* farend In buffer containing one frame of
+ * farend signal for L band
+ * int16_t nrOfSamples Number of samples in farend buffer
+ *
+ * Outputs Description
+ * -------------------------------------------------------------------
+ * int32_t return 0: OK
+ * 12000-12050: error code
+ */
+int32_t WebRtcAec_GetBufferFarendError(void* aecInst,
+ const float* farend,
+ size_t nrOfSamples);
+
+/*
* Runs the echo canceller on an 80 or 160 sample blocks of data.
*
* Inputs Description
@@ -136,7 +155,7 @@ int32_t WebRtcAec_BufferFarend(void* aecInst,
* float* const* out Out buffer, one frame of processed nearend
* for each band
* int32_t return 0: OK
- * -1: error
+ * 12000-12050: error code
*/
int32_t WebRtcAec_Process(void* aecInst,
const float* const* nearend,
@@ -157,8 +176,8 @@ int32_t WebRtcAec_Process(void* aecInst,
*
* Outputs Description
* -------------------------------------------------------------------
- * int return 0: OK
- * -1: error
+ * int return 0: OK
+ * 12000-12050: error code
*/
int WebRtcAec_set_config(void* handle, AecConfig config);
@@ -173,8 +192,8 @@ int WebRtcAec_set_config(void* handle, AecConfig config);
* -------------------------------------------------------------------
* int* status 0: Almost certainly nearend single-talk
* 1: Might not be neared single-talk
- * int return 0: OK
- * -1: error
+ * int return 0: OK
+ * 12000-12050: error code
*/
int WebRtcAec_get_echo_status(void* handle, int* status);
@@ -189,8 +208,8 @@ int WebRtcAec_get_echo_status(void* handle, int* status);
* -------------------------------------------------------------------
* AecMetrics* metrics Struct which will be filled out with the
* current echo metrics.
- * int return 0: OK
- * -1: error
+ * int return 0: OK
+ * 12000-12050: error code
*/
int WebRtcAec_GetMetrics(void* handle, AecMetrics* metrics);
@@ -208,27 +227,14 @@ int WebRtcAec_GetMetrics(void* handle, AecMetrics* metrics);
* float* fraction_poor_delays Fraction of the delay estimates that may
* cause the AEC to perform poorly.
*
- * int return 0: OK
- * -1: error
+ * int return 0: OK
+ * 12000-12050: error code
*/
int WebRtcAec_GetDelayMetrics(void* handle,
int* median,
int* std,
float* fraction_poor_delays);
-/*
- * Gets the last error code.
- *
- * Inputs Description
- * -------------------------------------------------------------------
- * void* aecInst Pointer to the AEC instance
- *
- * Outputs Description
- * -------------------------------------------------------------------
- * int32_t return 11000-11100: error code
- */
-int32_t WebRtcAec_get_error_code(void* aecInst);
-
// Returns a pointer to the low level AEC handle.
//
// Input:
@@ -242,4 +248,4 @@ struct AecCore* WebRtcAec_aec_core(void* handle);
#ifdef __cplusplus
}
#endif
-#endif // WEBRTC_MODULES_AUDIO_PROCESSING_AEC_INCLUDE_ECHO_CANCELLATION_H_
+#endif // WEBRTC_MODULES_AUDIO_PROCESSING_AEC_ECHO_CANCELLATION_H_
diff --git a/webrtc/modules/audio_processing/aec/echo_cancellation_internal.h b/webrtc/modules/audio_processing/aec/echo_cancellation_internal.h
index 95a6cf3324..e87219f33d 100644
--- a/webrtc/modules/audio_processing/aec/echo_cancellation_internal.h
+++ b/webrtc/modules/audio_processing/aec/echo_cancellation_internal.h
@@ -57,8 +57,6 @@ typedef struct {
RingBuffer* far_pre_buf; // Time domain far-end pre-buffer.
- int lastError;
-
int farend_started;
AecCore* aec;
diff --git a/webrtc/modules/audio_processing/aec/echo_cancellation_unittest.cc b/webrtc/modules/audio_processing/aec/echo_cancellation_unittest.cc
index 315ac3e9f9..42db082ff9 100644
--- a/webrtc/modules/audio_processing/aec/echo_cancellation_unittest.cc
+++ b/webrtc/modules/audio_processing/aec/echo_cancellation_unittest.cc
@@ -10,7 +10,7 @@
// TODO(bjornv): Make this a comprehensive test.
-#include "webrtc/modules/audio_processing/aec/include/echo_cancellation.h"
+#include "webrtc/modules/audio_processing/aec/echo_cancellation.h"
#include <stdlib.h>
#include <time.h>
diff --git a/webrtc/modules/audio_processing/aec/system_delay_unittest.cc b/webrtc/modules/audio_processing/aec/system_delay_unittest.cc
index 07e3cf8add..567118d828 100644
--- a/webrtc/modules/audio_processing/aec/system_delay_unittest.cc
+++ b/webrtc/modules/audio_processing/aec/system_delay_unittest.cc
@@ -13,8 +13,7 @@ extern "C" {
#include "webrtc/modules/audio_processing/aec/aec_core.h"
}
#include "webrtc/modules/audio_processing/aec/echo_cancellation_internal.h"
-#include "webrtc/modules/audio_processing/aec/include/echo_cancellation.h"
-#include "webrtc/test/testsupport/gtest_disable.h"
+#include "webrtc/modules/audio_processing/aec/echo_cancellation.h"
#include "webrtc/typedefs.h"
namespace {
diff --git a/webrtc/modules/audio_processing/aecm/aecm_core.c b/webrtc/modules/audio_processing/aecm/aecm_core.c
index f0d85d5328..6bf1cf7f3e 100644
--- a/webrtc/modules/audio_processing/aecm/aecm_core.c
+++ b/webrtc/modules/audio_processing/aecm/aecm_core.c
@@ -16,7 +16,7 @@
#include "webrtc/common_audio/ring_buffer.h"
#include "webrtc/common_audio/signal_processing/include/real_fft.h"
-#include "webrtc/modules/audio_processing/aecm/include/echo_control_mobile.h"
+#include "webrtc/modules/audio_processing/aecm/echo_control_mobile.h"
#include "webrtc/modules/audio_processing/utility/delay_estimator_wrapper.h"
#include "webrtc/system_wrappers/include/compile_assert_c.h"
#include "webrtc/system_wrappers/include/cpu_features_wrapper.h"
diff --git a/webrtc/modules/audio_processing/aecm/aecm_core_c.c b/webrtc/modules/audio_processing/aecm/aecm_core_c.c
index df95e8bedf..3a8fafa4ec 100644
--- a/webrtc/modules/audio_processing/aecm/aecm_core_c.c
+++ b/webrtc/modules/audio_processing/aecm/aecm_core_c.c
@@ -16,7 +16,7 @@
#include "webrtc/common_audio/ring_buffer.h"
#include "webrtc/common_audio/signal_processing/include/real_fft.h"
-#include "webrtc/modules/audio_processing/aecm/include/echo_control_mobile.h"
+#include "webrtc/modules/audio_processing/aecm/echo_control_mobile.h"
#include "webrtc/modules/audio_processing/utility/delay_estimator_wrapper.h"
#include "webrtc/system_wrappers/include/compile_assert_c.h"
#include "webrtc/system_wrappers/include/cpu_features_wrapper.h"
diff --git a/webrtc/modules/audio_processing/aecm/aecm_core_mips.c b/webrtc/modules/audio_processing/aecm/aecm_core_mips.c
index 3c2343a892..3ca9982ebf 100644
--- a/webrtc/modules/audio_processing/aecm/aecm_core_mips.c
+++ b/webrtc/modules/audio_processing/aecm/aecm_core_mips.c
@@ -12,7 +12,7 @@
#include <assert.h>
-#include "webrtc/modules/audio_processing/aecm/include/echo_control_mobile.h"
+#include "webrtc/modules/audio_processing/aecm/echo_control_mobile.h"
#include "webrtc/modules/audio_processing/utility/delay_estimator_wrapper.h"
static const ALIGN8_BEG int16_t WebRtcAecm_kSqrtHanning[] ALIGN8_END = {
diff --git a/webrtc/modules/audio_processing/aecm/echo_control_mobile.c b/webrtc/modules/audio_processing/aecm/echo_control_mobile.c
index 83781e97fe..91e6f0e80c 100644
--- a/webrtc/modules/audio_processing/aecm/echo_control_mobile.c
+++ b/webrtc/modules/audio_processing/aecm/echo_control_mobile.c
@@ -8,7 +8,7 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#include "webrtc/modules/audio_processing/aecm/include/echo_control_mobile.h"
+#include "webrtc/modules/audio_processing/aecm/echo_control_mobile.h"
#ifdef AEC_DEBUG
#include <stdio.h>
@@ -68,8 +68,6 @@ typedef struct
// Structures
RingBuffer *farendBuf;
- int lastError;
-
AecmCore* aecmCore;
} AecMobile;
@@ -100,7 +98,6 @@ void* WebRtcAecm_Create() {
}
aecm->initFlag = 0;
- aecm->lastError = 0;
#ifdef AEC_DEBUG
aecm->aecmCore->farFile = fopen("aecFar.pcm","wb");
@@ -151,16 +148,14 @@ int32_t WebRtcAecm_Init(void *aecmInst, int32_t sampFreq)
if (sampFreq != 8000 && sampFreq != 16000)
{
- aecm->lastError = AECM_BAD_PARAMETER_ERROR;
- return -1;
+ return AECM_BAD_PARAMETER_ERROR;
}
aecm->sampFreq = sampFreq;
// Initialize AECM core
if (WebRtcAecm_InitCore(aecm->aecmCore, aecm->sampFreq) == -1)
{
- aecm->lastError = AECM_UNSPECIFIED_ERROR;
- return -1;
+ return AECM_UNSPECIFIED_ERROR;
}
// Initialize farend buffer
@@ -191,51 +186,53 @@ int32_t WebRtcAecm_Init(void *aecmInst, int32_t sampFreq)
if (WebRtcAecm_set_config(aecm, aecConfig) == -1)
{
- aecm->lastError = AECM_UNSPECIFIED_ERROR;
- return -1;
+ return AECM_UNSPECIFIED_ERROR;
}
return 0;
}
-int32_t WebRtcAecm_BufferFarend(void *aecmInst, const int16_t *farend,
- size_t nrOfSamples)
-{
+// Returns any error that is caused when buffering the
+// farend signal.
+int32_t WebRtcAecm_GetBufferFarendError(void *aecmInst, const int16_t *farend,
+ size_t nrOfSamples) {
AecMobile* aecm = aecmInst;
- int32_t retVal = 0;
- if (aecm == NULL)
- {
- return -1;
- }
+ if (aecm == NULL)
+ return -1;
- if (farend == NULL)
- {
- aecm->lastError = AECM_NULL_POINTER_ERROR;
- return -1;
- }
+ if (farend == NULL)
+ return AECM_NULL_POINTER_ERROR;
- if (aecm->initFlag != kInitCheck)
- {
- aecm->lastError = AECM_UNINITIALIZED_ERROR;
- return -1;
- }
+ if (aecm->initFlag != kInitCheck)
+ return AECM_UNINITIALIZED_ERROR;
- if (nrOfSamples != 80 && nrOfSamples != 160)
- {
- aecm->lastError = AECM_BAD_PARAMETER_ERROR;
- return -1;
- }
+ if (nrOfSamples != 80 && nrOfSamples != 160)
+ return AECM_BAD_PARAMETER_ERROR;
- // TODO: Is this really a good idea?
- if (!aecm->ECstartup)
- {
- WebRtcAecm_DelayComp(aecm);
- }
+ return 0;
+}
- WebRtc_WriteBuffer(aecm->farendBuf, farend, nrOfSamples);
- return retVal;
+int32_t WebRtcAecm_BufferFarend(void *aecmInst, const int16_t *farend,
+ size_t nrOfSamples) {
+ AecMobile* aecm = aecmInst;
+
+ const int32_t err =
+ WebRtcAecm_GetBufferFarendError(aecmInst, farend, nrOfSamples);
+
+ if (err != 0)
+ return err;
+
+ // TODO(unknown): Is this really a good idea?
+ if (!aecm->ECstartup)
+ {
+ WebRtcAecm_DelayComp(aecm);
+ }
+
+ WebRtc_WriteBuffer(aecm->farendBuf, farend, nrOfSamples);
+
+ return 0;
}
int32_t WebRtcAecm_Process(void *aecmInst, const int16_t *nearendNoisy,
@@ -259,38 +256,32 @@ int32_t WebRtcAecm_Process(void *aecmInst, const int16_t *nearendNoisy,
if (nearendNoisy == NULL)
{
- aecm->lastError = AECM_NULL_POINTER_ERROR;
- return -1;
+ return AECM_NULL_POINTER_ERROR;
}
if (out == NULL)
{
- aecm->lastError = AECM_NULL_POINTER_ERROR;
- return -1;
+ return AECM_NULL_POINTER_ERROR;
}
if (aecm->initFlag != kInitCheck)
{
- aecm->lastError = AECM_UNINITIALIZED_ERROR;
- return -1;
+ return AECM_UNINITIALIZED_ERROR;
}
if (nrOfSamples != 80 && nrOfSamples != 160)
{
- aecm->lastError = AECM_BAD_PARAMETER_ERROR;
- return -1;
+ return AECM_BAD_PARAMETER_ERROR;
}
if (msInSndCardBuf < 0)
{
msInSndCardBuf = 0;
- aecm->lastError = AECM_BAD_PARAMETER_WARNING;
- retVal = -1;
+ retVal = AECM_BAD_PARAMETER_WARNING;
} else if (msInSndCardBuf > 500)
{
msInSndCardBuf = 500;
- aecm->lastError = AECM_BAD_PARAMETER_WARNING;
- retVal = -1;
+ retVal = AECM_BAD_PARAMETER_WARNING;
}
msInSndCardBuf += 10;
aecm->msInSndCardBuf = msInSndCardBuf;
@@ -453,21 +444,18 @@ int32_t WebRtcAecm_set_config(void *aecmInst, AecmConfig config)
if (aecm->initFlag != kInitCheck)
{
- aecm->lastError = AECM_UNINITIALIZED_ERROR;
- return -1;
+ return AECM_UNINITIALIZED_ERROR;
}
if (config.cngMode != AecmFalse && config.cngMode != AecmTrue)
{
- aecm->lastError = AECM_BAD_PARAMETER_ERROR;
- return -1;
+ return AECM_BAD_PARAMETER_ERROR;
}
aecm->aecmCore->cngMode = config.cngMode;
if (config.echoMode < 0 || config.echoMode > 4)
{
- aecm->lastError = AECM_BAD_PARAMETER_ERROR;
- return -1;
+ return AECM_BAD_PARAMETER_ERROR;
}
aecm->echoMode = config.echoMode;
@@ -524,33 +512,6 @@ int32_t WebRtcAecm_set_config(void *aecmInst, AecmConfig config)
return 0;
}
-int32_t WebRtcAecm_get_config(void *aecmInst, AecmConfig *config)
-{
- AecMobile* aecm = aecmInst;
-
- if (aecm == NULL)
- {
- return -1;
- }
-
- if (config == NULL)
- {
- aecm->lastError = AECM_NULL_POINTER_ERROR;
- return -1;
- }
-
- if (aecm->initFlag != kInitCheck)
- {
- aecm->lastError = AECM_UNINITIALIZED_ERROR;
- return -1;
- }
-
- config->cngMode = aecm->aecmCore->cngMode;
- config->echoMode = aecm->echoMode;
-
- return 0;
-}
-
int32_t WebRtcAecm_InitEchoPath(void* aecmInst,
const void* echo_path,
size_t size_bytes)
@@ -562,19 +523,16 @@ int32_t WebRtcAecm_InitEchoPath(void* aecmInst,
return -1;
}
if (echo_path == NULL) {
- aecm->lastError = AECM_NULL_POINTER_ERROR;
- return -1;
+ return AECM_NULL_POINTER_ERROR;
}
if (size_bytes != WebRtcAecm_echo_path_size_bytes())
{
// Input channel size does not match the size of AECM
- aecm->lastError = AECM_BAD_PARAMETER_ERROR;
- return -1;
+ return AECM_BAD_PARAMETER_ERROR;
}
if (aecm->initFlag != kInitCheck)
{
- aecm->lastError = AECM_UNINITIALIZED_ERROR;
- return -1;
+ return AECM_UNINITIALIZED_ERROR;
}
WebRtcAecm_InitEchoPathCore(aecm->aecmCore, echo_path_ptr);
@@ -593,19 +551,16 @@ int32_t WebRtcAecm_GetEchoPath(void* aecmInst,
return -1;
}
if (echo_path == NULL) {
- aecm->lastError = AECM_NULL_POINTER_ERROR;
- return -1;
+ return AECM_NULL_POINTER_ERROR;
}
if (size_bytes != WebRtcAecm_echo_path_size_bytes())
{
// Input channel size does not match the size of AECM
- aecm->lastError = AECM_BAD_PARAMETER_ERROR;
- return -1;
+ return AECM_BAD_PARAMETER_ERROR;
}
if (aecm->initFlag != kInitCheck)
{
- aecm->lastError = AECM_UNINITIALIZED_ERROR;
- return -1;
+ return AECM_UNINITIALIZED_ERROR;
}
memcpy(echo_path_ptr, aecm->aecmCore->channelStored, size_bytes);
@@ -617,17 +572,6 @@ size_t WebRtcAecm_echo_path_size_bytes()
return (PART_LEN1 * sizeof(int16_t));
}
-int32_t WebRtcAecm_get_error_code(void *aecmInst)
-{
- AecMobile* aecm = aecmInst;
-
- if (aecm == NULL)
- {
- return -1;
- }
-
- return aecm->lastError;
-}
static int WebRtcAecm_EstBufDelay(AecMobile* aecm, short msInSndCardBuf) {
short delayNew, nSampSndCard;
diff --git a/webrtc/modules/audio_processing/aecm/include/echo_control_mobile.h b/webrtc/modules/audio_processing/aecm/echo_control_mobile.h
index 7ae15c2a3d..b45ff59907 100644
--- a/webrtc/modules/audio_processing/aecm/include/echo_control_mobile.h
+++ b/webrtc/modules/audio_processing/aecm/echo_control_mobile.h
@@ -8,8 +8,8 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#ifndef WEBRTC_MODULES_AUDIO_PROCESSING_AECM_INCLUDE_ECHO_CONTROL_MOBILE_H_
-#define WEBRTC_MODULES_AUDIO_PROCESSING_AECM_INCLUDE_ECHO_CONTROL_MOBILE_H_
+#ifndef WEBRTC_MODULES_AUDIO_PROCESSING_AECM_ECHO_CONTROL_MOBILE_H_
+#define WEBRTC_MODULES_AUDIO_PROCESSING_AECM_ECHO_CONTROL_MOBILE_H_
#include <stdlib.h>
@@ -66,7 +66,7 @@ void WebRtcAecm_Free(void* aecmInst);
* Outputs Description
* -------------------------------------------------------------------
* int32_t return 0: OK
- * -1: error
+ * 1200-12004,12100: error/warning
*/
int32_t WebRtcAecm_Init(void* aecmInst, int32_t sampFreq);
@@ -83,13 +83,32 @@ int32_t WebRtcAecm_Init(void* aecmInst, int32_t sampFreq);
* Outputs Description
* -------------------------------------------------------------------
* int32_t return 0: OK
- * -1: error
+ * 1200-12004,12100: error/warning
*/
int32_t WebRtcAecm_BufferFarend(void* aecmInst,
const int16_t* farend,
size_t nrOfSamples);
/*
+ * Reports any errors that would arise when buffering a farend buffer.
+ *
+ * Inputs Description
+ * -------------------------------------------------------------------
+ * void* aecmInst Pointer to the AECM instance
+ * int16_t* farend In buffer containing one frame of
+ * farend signal
+ * int16_t nrOfSamples Number of samples in farend buffer
+ *
+ * Outputs Description
+ * -------------------------------------------------------------------
+ * int32_t return 0: OK
+ * 1200-12004,12100: error/warning
+ */
+int32_t WebRtcAecm_GetBufferFarendError(void* aecmInst,
+ const int16_t* farend,
+ size_t nrOfSamples);
+
+/*
* Runs the AECM on an 80 or 160 sample blocks of data.
*
* Inputs Description
@@ -112,7 +131,7 @@ int32_t WebRtcAecm_BufferFarend(void* aecmInst,
* -------------------------------------------------------------------
* int16_t* out Out buffer, one frame of processed nearend
* int32_t return 0: OK
- * -1: error
+ * 1200-12004,12100: error/warning
*/
int32_t WebRtcAecm_Process(void* aecmInst,
const int16_t* nearendNoisy,
@@ -133,27 +152,11 @@ int32_t WebRtcAecm_Process(void* aecmInst,
* Outputs Description
* -------------------------------------------------------------------
* int32_t return 0: OK
- * -1: error
+ * 1200-12004,12100: error/warning
*/
int32_t WebRtcAecm_set_config(void* aecmInst, AecmConfig config);
/*
- * This function enables the user to set certain parameters on-the-fly
- *
- * Inputs Description
- * -------------------------------------------------------------------
- * void* aecmInst Pointer to the AECM instance
- *
- * Outputs Description
- * -------------------------------------------------------------------
- * AecmConfig* config Pointer to the config instance that
- * all properties will be written to
- * int32_t return 0: OK
- * -1: error
- */
-int32_t WebRtcAecm_get_config(void *aecmInst, AecmConfig *config);
-
-/*
* This function enables the user to set the echo path on-the-fly.
*
* Inputs Description
@@ -165,7 +168,7 @@ int32_t WebRtcAecm_get_config(void *aecmInst, AecmConfig *config);
* Outputs Description
* -------------------------------------------------------------------
* int32_t return 0: OK
- * -1: error
+ * 1200-12004,12100: error/warning
*/
int32_t WebRtcAecm_InitEchoPath(void* aecmInst,
const void* echo_path,
@@ -184,7 +187,7 @@ int32_t WebRtcAecm_InitEchoPath(void* aecmInst,
* Outputs Description
* -------------------------------------------------------------------
* int32_t return 0: OK
- * -1: error
+ * 1200-12004,12100: error/warning
*/
int32_t WebRtcAecm_GetEchoPath(void* aecmInst,
void* echo_path,
@@ -199,20 +202,8 @@ int32_t WebRtcAecm_GetEchoPath(void* aecmInst,
*/
size_t WebRtcAecm_echo_path_size_bytes();
-/*
- * Gets the last error code.
- *
- * Inputs Description
- * -------------------------------------------------------------------
- * void* aecmInst Pointer to the AECM instance
- *
- * Outputs Description
- * -------------------------------------------------------------------
- * int32_t return 11000-11100: error code
- */
-int32_t WebRtcAecm_get_error_code(void *aecmInst);
#ifdef __cplusplus
}
#endif
-#endif // WEBRTC_MODULES_AUDIO_PROCESSING_AECM_INCLUDE_ECHO_CONTROL_MOBILE_H_
+#endif // WEBRTC_MODULES_AUDIO_PROCESSING_AECM_ECHO_CONTROL_MOBILE_H_
diff --git a/webrtc/modules/audio_processing/agc/agc.cc b/webrtc/modules/audio_processing/agc/agc.cc
index 706b963aa1..fc78f07ebb 100644
--- a/webrtc/modules/audio_processing/agc/agc.cc
+++ b/webrtc/modules/audio_processing/agc/agc.cc
@@ -19,7 +19,7 @@
#include "webrtc/base/checks.h"
#include "webrtc/modules/audio_processing/agc/histogram.h"
#include "webrtc/modules/audio_processing/agc/utility.h"
-#include "webrtc/modules/interface/module_common_types.h"
+#include "webrtc/modules/include/module_common_types.h"
namespace webrtc {
namespace {
diff --git a/webrtc/modules/audio_processing/agc/agc_manager_direct.cc b/webrtc/modules/audio_processing/agc/agc_manager_direct.cc
index 867022dcbf..e56984a1b1 100644
--- a/webrtc/modules/audio_processing/agc/agc_manager_direct.cc
+++ b/webrtc/modules/audio_processing/agc/agc_manager_direct.cc
@@ -19,7 +19,7 @@
#include "webrtc/modules/audio_processing/agc/gain_map_internal.h"
#include "webrtc/modules/audio_processing/gain_control_impl.h"
-#include "webrtc/modules/interface/module_common_types.h"
+#include "webrtc/modules/include/module_common_types.h"
#include "webrtc/system_wrappers/include/logging.h"
namespace webrtc {
@@ -168,19 +168,19 @@ int AgcManagerDirect::Initialize() {
// example, what happens when we change devices.
if (gctrl_->set_mode(GainControl::kFixedDigital) != 0) {
- LOG_FERR1(LS_ERROR, set_mode, GainControl::kFixedDigital);
+ LOG(LS_ERROR) << "set_mode(GainControl::kFixedDigital) failed.";
return -1;
}
if (gctrl_->set_target_level_dbfs(2) != 0) {
- LOG_FERR1(LS_ERROR, set_target_level_dbfs, 2);
+ LOG(LS_ERROR) << "set_target_level_dbfs(2) failed.";
return -1;
}
if (gctrl_->set_compression_gain_db(kDefaultCompressionGain) != 0) {
- LOG_FERR1(LS_ERROR, set_compression_gain_db, kDefaultCompressionGain);
+ LOG(LS_ERROR) << "set_compression_gain_db(kDefaultCompressionGain) failed.";
return -1;
}
if (gctrl_->enable_limiter(true) != 0) {
- LOG_FERR1(LS_ERROR, enable_limiter, true);
+ LOG(LS_ERROR) << "enable_limiter(true) failed.";
return -1;
}
return 0;
@@ -244,7 +244,7 @@ void AgcManagerDirect::Process(const int16_t* audio,
}
if (agc_->Process(audio, length, sample_rate_hz) != 0) {
- LOG_FERR0(LS_ERROR, Agc::Process);
+ LOG(LS_ERROR) << "Agc::Process failed";
assert(false);
}
@@ -434,7 +434,8 @@ void AgcManagerDirect::UpdateCompressor() {
compression_ = new_compression;
compression_accumulator_ = new_compression;
if (gctrl_->set_compression_gain_db(compression_) != 0) {
- LOG_FERR1(LS_ERROR, set_compression_gain_db, compression_);
+ LOG(LS_ERROR) << "set_compression_gain_db(" << compression_
+ << ") failed.";
}
}
}
diff --git a/webrtc/modules/audio_processing/agc/agc_unittest.cc b/webrtc/modules/audio_processing/agc/agc_unittest.cc
index 66a8a2b1b3..25b99d8773 100644
--- a/webrtc/modules/audio_processing/agc/agc_unittest.cc
+++ b/webrtc/modules/audio_processing/agc/agc_unittest.cc
@@ -13,7 +13,7 @@
#include "gmock/gmock.h"
#include "gtest/gtest.h"
-#include "webrtc/modules/interface/module_common_types.h"
+#include "webrtc/modules/include/module_common_types.h"
#include "webrtc/test/testsupport/fileutils.h"
#include "webrtc/tools/agc/test_utils.h"
diff --git a/webrtc/modules/audio_processing/agc/histogram.cc b/webrtc/modules/audio_processing/agc/histogram.cc
index 1d3035fe12..5c66727a9f 100644
--- a/webrtc/modules/audio_processing/agc/histogram.cc
+++ b/webrtc/modules/audio_processing/agc/histogram.cc
@@ -13,7 +13,7 @@
#include <cmath>
#include <cstring>
-#include "webrtc/modules/interface/module_common_types.h"
+#include "webrtc/modules/include/module_common_types.h"
namespace webrtc {
diff --git a/webrtc/modules/audio_processing/agc/legacy/analog_agc.c b/webrtc/modules/audio_processing/agc/legacy/analog_agc.c
index be644d9701..3a1dc9d5ce 100644
--- a/webrtc/modules/audio_processing/agc/legacy/analog_agc.c
+++ b/webrtc/modules/audio_processing/agc/legacy/analog_agc.c
@@ -250,34 +250,35 @@ int WebRtcAgc_AddMic(void *state, int16_t* const* in_mic, size_t num_bands,
return 0;
}
-int WebRtcAgc_AddFarend(void *state, const int16_t *in_far, size_t samples)
-{
+int WebRtcAgc_AddFarend(void *state, const int16_t *in_far, size_t samples) {
+ LegacyAgc* stt = (LegacyAgc*)state;
+
+ int err = WebRtcAgc_GetAddFarendError(state, samples);
+
+ if (err != 0)
+ return err;
+
+ return WebRtcAgc_AddFarendToDigital(&stt->digitalAgc, in_far, samples);
+}
+
+int WebRtcAgc_GetAddFarendError(void *state, size_t samples) {
LegacyAgc* stt;
stt = (LegacyAgc*)state;
- if (stt == NULL)
- {
- return -1;
- }
+ if (stt == NULL)
+ return -1;
- if (stt->fs == 8000)
- {
- if (samples != 80)
- {
- return -1;
- }
- } else if (stt->fs == 16000 || stt->fs == 32000 || stt->fs == 48000)
- {
- if (samples != 160)
- {
- return -1;
- }
- } else
- {
- return -1;
- }
+ if (stt->fs == 8000) {
+ if (samples != 80)
+ return -1;
+ } else if (stt->fs == 16000 || stt->fs == 32000 || stt->fs == 48000) {
+ if (samples != 160)
+ return -1;
+ } else {
+ return -1;
+ }
- return WebRtcAgc_AddFarendToDigital(&stt->digitalAgc, in_far, samples);
+ return 0;
}
int WebRtcAgc_VirtualMic(void *agcInst, int16_t* const* in_near,
diff --git a/webrtc/modules/audio_processing/agc/legacy/gain_control.h b/webrtc/modules/audio_processing/agc/legacy/gain_control.h
index 08c1988f01..db942fe5ec 100644
--- a/webrtc/modules/audio_processing/agc/legacy/gain_control.h
+++ b/webrtc/modules/audio_processing/agc/legacy/gain_control.h
@@ -50,6 +50,20 @@ extern "C"
#endif
/*
+ * This function analyses the number of samples passed to
+ * farend and produces any error code that could arise.
+ *
+ * Input:
+ * - agcInst : AGC instance.
+ * - samples : Number of samples in input vector.
+ *
+ * Return value:
+ * : 0 - Normal operation.
+ * : -1 - Error.
+ */
+int WebRtcAgc_GetAddFarendError(void* state, size_t samples);
+
+/*
* This function processes a 10 ms frame of far-end speech to determine
* if there is active speech. The length of the input speech vector must be
* given in samples (80 when FS=8000, and 160 when FS=16000, FS=32000 or
diff --git a/webrtc/modules/audio_processing/agc/mock_agc.h b/webrtc/modules/audio_processing/agc/mock_agc.h
index 13dbd2edd5..e362200d86 100644
--- a/webrtc/modules/audio_processing/agc/mock_agc.h
+++ b/webrtc/modules/audio_processing/agc/mock_agc.h
@@ -14,7 +14,7 @@
#include "webrtc/modules/audio_processing/agc/agc.h"
#include "gmock/gmock.h"
-#include "webrtc/modules/interface/module_common_types.h"
+#include "webrtc/modules/include/module_common_types.h"
namespace webrtc {
diff --git a/webrtc/modules/audio_processing/audio_buffer.cc b/webrtc/modules/audio_processing/audio_buffer.cc
index 81790a159b..ff64267e8c 100644
--- a/webrtc/modules/audio_processing/audio_buffer.cc
+++ b/webrtc/modules/audio_processing/audio_buffer.cc
@@ -26,7 +26,7 @@ const size_t kSamplesPer48kHzChannel = 480;
int KeyboardChannelIndex(const StreamConfig& stream_config) {
if (!stream_config.has_keyboard()) {
assert(false);
- return -1;
+ return 0;
}
return stream_config.num_channels();
@@ -44,9 +44,9 @@ size_t NumBandsFromSamplesPerChannel(size_t num_frames) {
} // namespace
AudioBuffer::AudioBuffer(size_t input_num_frames,
- int num_input_channels,
+ size_t num_input_channels,
size_t process_num_frames,
- int num_process_channels,
+ size_t num_process_channels,
size_t output_num_frames)
: input_num_frames_(input_num_frames),
num_input_channels_(num_input_channels),
@@ -74,7 +74,7 @@ AudioBuffer::AudioBuffer(size_t input_num_frames,
num_proc_channels_));
if (input_num_frames_ != proc_num_frames_) {
- for (int i = 0; i < num_proc_channels_; ++i) {
+ for (size_t i = 0; i < num_proc_channels_; ++i) {
input_resamplers_.push_back(
new PushSincResampler(input_num_frames_,
proc_num_frames_));
@@ -82,7 +82,7 @@ AudioBuffer::AudioBuffer(size_t input_num_frames,
}
if (output_num_frames_ != proc_num_frames_) {
- for (int i = 0; i < num_proc_channels_; ++i) {
+ for (size_t i = 0; i < num_proc_channels_; ++i) {
output_resamplers_.push_back(
new PushSincResampler(proc_num_frames_,
output_num_frames_));
@@ -130,7 +130,7 @@ void AudioBuffer::CopyFrom(const float* const* data,
// Resample.
if (input_num_frames_ != proc_num_frames_) {
- for (int i = 0; i < num_proc_channels_; ++i) {
+ for (size_t i = 0; i < num_proc_channels_; ++i) {
input_resamplers_[i]->Resample(data_ptr[i],
input_num_frames_,
process_buffer_->channels()[i],
@@ -140,7 +140,7 @@ void AudioBuffer::CopyFrom(const float* const* data,
}
// Convert to the S16 range.
- for (int i = 0; i < num_proc_channels_; ++i) {
+ for (size_t i = 0; i < num_proc_channels_; ++i) {
FloatToFloatS16(data_ptr[i],
proc_num_frames_,
data_->fbuf()->channels()[i]);
@@ -150,7 +150,7 @@ void AudioBuffer::CopyFrom(const float* const* data,
void AudioBuffer::CopyTo(const StreamConfig& stream_config,
float* const* data) {
assert(stream_config.num_frames() == output_num_frames_);
- assert(stream_config.num_channels() == num_channels_);
+ assert(stream_config.num_channels() == num_channels_ || num_channels_ == 1);
// Convert to the float range.
float* const* data_ptr = data;
@@ -158,7 +158,7 @@ void AudioBuffer::CopyTo(const StreamConfig& stream_config,
// Convert to an intermediate buffer for subsequent resampling.
data_ptr = process_buffer_->channels();
}
- for (int i = 0; i < num_channels_; ++i) {
+ for (size_t i = 0; i < num_channels_; ++i) {
FloatS16ToFloat(data_->fbuf()->channels()[i],
proc_num_frames_,
data_ptr[i]);
@@ -166,13 +166,18 @@ void AudioBuffer::CopyTo(const StreamConfig& stream_config,
// Resample.
if (output_num_frames_ != proc_num_frames_) {
- for (int i = 0; i < num_channels_; ++i) {
+ for (size_t i = 0; i < num_channels_; ++i) {
output_resamplers_[i]->Resample(data_ptr[i],
proc_num_frames_,
data[i],
output_num_frames_);
}
}
+
+ // Upmix.
+ for (size_t i = num_channels_; i < stream_config.num_channels(); ++i) {
+ memcpy(data[i], data[0], output_num_frames_ * sizeof(**data));
+ }
}
void AudioBuffer::InitForNewData() {
@@ -192,13 +197,13 @@ int16_t* const* AudioBuffer::channels() {
return data_->ibuf()->channels();
}
-const int16_t* const* AudioBuffer::split_bands_const(int channel) const {
+const int16_t* const* AudioBuffer::split_bands_const(size_t channel) const {
return split_data_.get() ?
split_data_->ibuf_const()->bands(channel) :
data_->ibuf_const()->bands(channel);
}
-int16_t* const* AudioBuffer::split_bands(int channel) {
+int16_t* const* AudioBuffer::split_bands(size_t channel) {
mixed_low_pass_valid_ = false;
return split_data_.get() ?
split_data_->ibuf()->bands(channel) :
@@ -249,13 +254,13 @@ float* const* AudioBuffer::channels_f() {
return data_->fbuf()->channels();
}
-const float* const* AudioBuffer::split_bands_const_f(int channel) const {
+const float* const* AudioBuffer::split_bands_const_f(size_t channel) const {
return split_data_.get() ?
split_data_->fbuf_const()->bands(channel) :
data_->fbuf_const()->bands(channel);
}
-float* const* AudioBuffer::split_bands_f(int channel) {
+float* const* AudioBuffer::split_bands_f(size_t channel) {
mixed_low_pass_valid_ = false;
return split_data_.get() ?
split_data_->fbuf()->bands(channel) :
@@ -336,11 +341,11 @@ AudioFrame::VADActivity AudioBuffer::activity() const {
return activity_;
}
-int AudioBuffer::num_channels() const {
+size_t AudioBuffer::num_channels() const {
return num_channels_;
}
-void AudioBuffer::set_num_channels(int num_channels) {
+void AudioBuffer::set_num_channels(size_t num_channels) {
num_channels_ = num_channels;
}
@@ -393,7 +398,7 @@ void AudioBuffer::DeinterleaveFrom(AudioFrame* frame) {
// Resample.
if (input_num_frames_ != proc_num_frames_) {
- for (int i = 0; i < num_proc_channels_; ++i) {
+ for (size_t i = 0; i < num_proc_channels_; ++i) {
input_resamplers_[i]->Resample(input_buffer_->fbuf_const()->channels()[i],
input_num_frames_,
data_->fbuf()->channels()[i],
@@ -418,7 +423,7 @@ void AudioBuffer::InterleaveTo(AudioFrame* frame, bool data_changed) {
output_buffer_.reset(
new IFChannelBuffer(output_num_frames_, num_channels_));
}
- for (int i = 0; i < num_channels_; ++i) {
+ for (size_t i = 0; i < num_channels_; ++i) {
output_resamplers_[i]->Resample(
data_->fbuf()->channels()[i], proc_num_frames_,
output_buffer_->fbuf()->channels()[i], output_num_frames_);
@@ -443,7 +448,7 @@ void AudioBuffer::CopyLowPassToReference() {
new ChannelBuffer<int16_t>(num_split_frames_,
num_proc_channels_));
}
- for (int i = 0; i < num_proc_channels_; i++) {
+ for (size_t i = 0; i < num_proc_channels_; i++) {
memcpy(low_pass_reference_channels_->channels()[i],
split_bands_const(i)[kBand0To8kHz],
low_pass_reference_channels_->num_frames_per_band() *
diff --git a/webrtc/modules/audio_processing/audio_buffer.h b/webrtc/modules/audio_processing/audio_buffer.h
index 864633f267..ff12ca2d95 100644
--- a/webrtc/modules/audio_processing/audio_buffer.h
+++ b/webrtc/modules/audio_processing/audio_buffer.h
@@ -15,7 +15,7 @@
#include "webrtc/common_audio/channel_buffer.h"
#include "webrtc/modules/audio_processing/include/audio_processing.h"
#include "webrtc/modules/audio_processing/splitting_filter.h"
-#include "webrtc/modules/interface/module_common_types.h"
+#include "webrtc/modules/include/module_common_types.h"
#include "webrtc/system_wrappers/include/scoped_vector.h"
#include "webrtc/typedefs.h"
@@ -34,14 +34,14 @@ class AudioBuffer {
public:
// TODO(ajm): Switch to take ChannelLayouts.
AudioBuffer(size_t input_num_frames,
- int num_input_channels,
+ size_t num_input_channels,
size_t process_num_frames,
- int num_process_channels,
+ size_t num_process_channels,
size_t output_num_frames);
virtual ~AudioBuffer();
- int num_channels() const;
- void set_num_channels(int num_channels);
+ size_t num_channels() const;
+ void set_num_channels(size_t num_channels);
size_t num_frames() const;
size_t num_frames_per_band() const;
size_t num_keyboard_frames() const;
@@ -65,10 +65,10 @@ class AudioBuffer {
// 0 <= channel < |num_proc_channels_|
// 0 <= band < |num_bands_|
// 0 <= sample < |num_split_frames_|
- int16_t* const* split_bands(int channel);
- const int16_t* const* split_bands_const(int channel) const;
- float* const* split_bands_f(int channel);
- const float* const* split_bands_const_f(int channel) const;
+ int16_t* const* split_bands(size_t channel);
+ const int16_t* const* split_bands_const(size_t channel) const;
+ float* const* split_bands_f(size_t channel);
+ const float* const* split_bands_const_f(size_t channel) const;
// Returns a pointer array to the channels for a specific band.
// Usage:
@@ -128,16 +128,16 @@ class AudioBuffer {
// The audio is passed into DeinterleaveFrom() or CopyFrom() with input
// format (samples per channel and number of channels).
const size_t input_num_frames_;
- const int num_input_channels_;
+ const size_t num_input_channels_;
// The audio is stored by DeinterleaveFrom() or CopyFrom() with processing
// format.
const size_t proc_num_frames_;
- const int num_proc_channels_;
+ const size_t num_proc_channels_;
// The audio is returned by InterleaveTo() and CopyTo() with output samples
// per channels and the current number of channels. This last one can be
// changed at any time using set_num_channels().
const size_t output_num_frames_;
- int num_channels_;
+ size_t num_channels_;
size_t num_bands_;
size_t num_split_frames_;
diff --git a/webrtc/modules/audio_processing/audio_processing.gypi b/webrtc/modules/audio_processing/audio_processing.gypi
index 8f1fbdf0be..7ddd4f5a15 100644
--- a/webrtc/modules/audio_processing/audio_processing.gypi
+++ b/webrtc/modules/audio_processing/audio_processing.gypi
@@ -41,11 +41,11 @@
'aec/aec_resampler.h',
'aec/echo_cancellation.c',
'aec/echo_cancellation_internal.h',
- 'aec/include/echo_cancellation.h',
+ 'aec/echo_cancellation.h',
'aecm/aecm_core.c',
'aecm/aecm_core.h',
'aecm/echo_control_mobile.c',
- 'aecm/include/echo_control_mobile.h',
+ 'aecm/echo_control_mobile.h',
'agc/agc.cc',
'agc/agc.h',
'agc/agc_manager_direct.cc',
@@ -162,7 +162,7 @@
['prefer_fixed_point==1', {
'defines': ['WEBRTC_NS_FIXED'],
'sources': [
- 'ns/include/noise_suppression_x.h',
+ 'ns/noise_suppression_x.h',
'ns/noise_suppression_x.c',
'ns/nsx_core.c',
'ns/nsx_core.h',
@@ -183,7 +183,7 @@
'defines': ['WEBRTC_NS_FLOAT'],
'sources': [
'ns/defines.h',
- 'ns/include/noise_suppression.h',
+ 'ns/noise_suppression.h',
'ns/noise_suppression.c',
'ns/ns_core.c',
'ns/ns_core.h',
diff --git a/webrtc/modules/audio_processing/audio_processing_impl.cc b/webrtc/modules/audio_processing/audio_processing_impl.cc
index c6574151d0..744309c774 100644
--- a/webrtc/modules/audio_processing/audio_processing_impl.cc
+++ b/webrtc/modules/audio_processing/audio_processing_impl.cc
@@ -15,6 +15,7 @@
#include "webrtc/base/checks.h"
#include "webrtc/base/platform_file.h"
+#include "webrtc/base/trace_event.h"
#include "webrtc/common_audio/audio_converter.h"
#include "webrtc/common_audio/channel_buffer.h"
#include "webrtc/common_audio/include/audio_util.h"
@@ -36,8 +37,7 @@ extern "C" {
#include "webrtc/modules/audio_processing/processing_component.h"
#include "webrtc/modules/audio_processing/transient/transient_suppressor.h"
#include "webrtc/modules/audio_processing/voice_detection_impl.h"
-#include "webrtc/modules/interface/module_common_types.h"
-#include "webrtc/system_wrappers/include/critical_section_wrapper.h"
+#include "webrtc/modules/include/module_common_types.h"
#include "webrtc/system_wrappers/include/file_wrapper.h"
#include "webrtc/system_wrappers/include/logging.h"
#include "webrtc/system_wrappers/include/metrics.h"
@@ -75,7 +75,6 @@ static bool LayoutHasKeyboard(AudioProcessing::ChannelLayout layout) {
assert(false);
return false;
}
-
} // namespace
// Throughout webrtc, it's assumed that success is represented by zero.
@@ -147,6 +146,35 @@ class GainControlForNewAgc : public GainControl, public VolumeCallbacks {
int volume_;
};
+struct AudioProcessingImpl::ApmPublicSubmodules {
+ ApmPublicSubmodules()
+ : echo_cancellation(nullptr),
+ echo_control_mobile(nullptr),
+ gain_control(nullptr) {}
+ // Accessed externally of APM without any lock acquired.
+ EchoCancellationImpl* echo_cancellation;
+ EchoControlMobileImpl* echo_control_mobile;
+ GainControlImpl* gain_control;
+ rtc::scoped_ptr<HighPassFilterImpl> high_pass_filter;
+ rtc::scoped_ptr<LevelEstimatorImpl> level_estimator;
+ rtc::scoped_ptr<NoiseSuppressionImpl> noise_suppression;
+ rtc::scoped_ptr<VoiceDetectionImpl> voice_detection;
+ rtc::scoped_ptr<GainControlForNewAgc> gain_control_for_new_agc;
+
+ // Accessed internally from both render and capture.
+ rtc::scoped_ptr<TransientSuppressor> transient_suppressor;
+ rtc::scoped_ptr<IntelligibilityEnhancer> intelligibility_enhancer;
+};
+
+struct AudioProcessingImpl::ApmPrivateSubmodules {
+ explicit ApmPrivateSubmodules(Beamformer<float>* beamformer)
+ : beamformer(beamformer) {}
+ // Accessed internally from capture or during initialization
+ std::list<ProcessingComponent*> component_list;
+ rtc::scoped_ptr<Beamformer<float>> beamformer;
+ rtc::scoped_ptr<AgcManagerDirect> agc_manager;
+};
+
const int AudioProcessing::kNativeSampleRatesHz[] = {
AudioProcessing::kSampleRate8kHz,
AudioProcessing::kSampleRate16kHz,
@@ -172,7 +200,7 @@ AudioProcessing* AudioProcessing::Create(const Config& config,
AudioProcessingImpl* apm = new AudioProcessingImpl(config, beamformer);
if (apm->Initialize() != kNoError) {
delete apm;
- apm = NULL;
+ apm = nullptr;
}
return apm;
@@ -183,102 +211,82 @@ AudioProcessingImpl::AudioProcessingImpl(const Config& config)
AudioProcessingImpl::AudioProcessingImpl(const Config& config,
Beamformer<float>* beamformer)
- : echo_cancellation_(NULL),
- echo_control_mobile_(NULL),
- gain_control_(NULL),
- high_pass_filter_(NULL),
- level_estimator_(NULL),
- noise_suppression_(NULL),
- voice_detection_(NULL),
- crit_(CriticalSectionWrapper::CreateCriticalSection()),
-#ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP
- debug_file_(FileWrapper::Create()),
- event_msg_(new audioproc::Event()),
-#endif
- api_format_({{{kSampleRate16kHz, 1, false},
- {kSampleRate16kHz, 1, false},
- {kSampleRate16kHz, 1, false},
- {kSampleRate16kHz, 1, false}}}),
- fwd_proc_format_(kSampleRate16kHz),
- rev_proc_format_(kSampleRate16kHz, 1),
- split_rate_(kSampleRate16kHz),
- stream_delay_ms_(0),
- delay_offset_ms_(0),
- was_stream_delay_set_(false),
- last_stream_delay_ms_(0),
- last_aec_system_delay_ms_(0),
- stream_delay_jumps_(-1),
- aec_system_delay_jumps_(-1),
- output_will_be_muted_(false),
- key_pressed_(false),
+ : public_submodules_(new ApmPublicSubmodules()),
+ private_submodules_(new ApmPrivateSubmodules(beamformer)),
+ constants_(config.Get<ExperimentalAgc>().startup_min_volume,
#if defined(WEBRTC_ANDROID) || defined(WEBRTC_IOS)
- use_new_agc_(false),
+ false,
#else
- use_new_agc_(config.Get<ExperimentalAgc>().enabled),
+ config.Get<ExperimentalAgc>().enabled,
#endif
- agc_startup_min_volume_(config.Get<ExperimentalAgc>().startup_min_volume),
+ config.Get<Intelligibility>().enabled),
+
#if defined(WEBRTC_ANDROID) || defined(WEBRTC_IOS)
- transient_suppressor_enabled_(false),
+ capture_(false,
#else
- transient_suppressor_enabled_(config.Get<ExperimentalNs>().enabled),
+ capture_(config.Get<ExperimentalNs>().enabled,
#endif
- beamformer_enabled_(config.Get<Beamforming>().enabled),
- beamformer_(beamformer),
- array_geometry_(config.Get<Beamforming>().array_geometry),
- target_direction_(config.Get<Beamforming>().target_direction),
- intelligibility_enabled_(config.Get<Intelligibility>().enabled) {
- echo_cancellation_ = new EchoCancellationImpl(this, crit_);
- component_list_.push_back(echo_cancellation_);
-
- echo_control_mobile_ = new EchoControlMobileImpl(this, crit_);
- component_list_.push_back(echo_control_mobile_);
-
- gain_control_ = new GainControlImpl(this, crit_);
- component_list_.push_back(gain_control_);
-
- high_pass_filter_ = new HighPassFilterImpl(this, crit_);
- component_list_.push_back(high_pass_filter_);
-
- level_estimator_ = new LevelEstimatorImpl(this, crit_);
- component_list_.push_back(level_estimator_);
-
- noise_suppression_ = new NoiseSuppressionImpl(this, crit_);
- component_list_.push_back(noise_suppression_);
-
- voice_detection_ = new VoiceDetectionImpl(this, crit_);
- component_list_.push_back(voice_detection_);
-
- gain_control_for_new_agc_.reset(new GainControlForNewAgc(gain_control_));
+ config.Get<Beamforming>().array_geometry,
+ config.Get<Beamforming>().target_direction),
+ capture_nonlocked_(config.Get<Beamforming>().enabled)
+{
+ {
+ rtc::CritScope cs_render(&crit_render_);
+ rtc::CritScope cs_capture(&crit_capture_);
+
+ public_submodules_->echo_cancellation =
+ new EchoCancellationImpl(this, &crit_render_, &crit_capture_);
+ public_submodules_->echo_control_mobile =
+ new EchoControlMobileImpl(this, &crit_render_, &crit_capture_);
+ public_submodules_->gain_control =
+ new GainControlImpl(this, &crit_capture_, &crit_capture_);
+ public_submodules_->high_pass_filter.reset(
+ new HighPassFilterImpl(&crit_capture_));
+ public_submodules_->level_estimator.reset(
+ new LevelEstimatorImpl(&crit_capture_));
+ public_submodules_->noise_suppression.reset(
+ new NoiseSuppressionImpl(&crit_capture_));
+ public_submodules_->voice_detection.reset(
+ new VoiceDetectionImpl(&crit_capture_));
+ public_submodules_->gain_control_for_new_agc.reset(
+ new GainControlForNewAgc(public_submodules_->gain_control));
+
+ private_submodules_->component_list.push_back(
+ public_submodules_->echo_cancellation);
+ private_submodules_->component_list.push_back(
+ public_submodules_->echo_control_mobile);
+ private_submodules_->component_list.push_back(
+ public_submodules_->gain_control);
+ }
SetExtraOptions(config);
}
AudioProcessingImpl::~AudioProcessingImpl() {
- {
- CriticalSectionScoped crit_scoped(crit_);
- // Depends on gain_control_ and gain_control_for_new_agc_.
- agc_manager_.reset();
- // Depends on gain_control_.
- gain_control_for_new_agc_.reset();
- while (!component_list_.empty()) {
- ProcessingComponent* component = component_list_.front();
- component->Destroy();
- delete component;
- component_list_.pop_front();
- }
+ // Depends on gain_control_ and
+ // public_submodules_->gain_control_for_new_agc.
+ private_submodules_->agc_manager.reset();
+ // Depends on gain_control_.
+ public_submodules_->gain_control_for_new_agc.reset();
+ while (!private_submodules_->component_list.empty()) {
+ ProcessingComponent* component =
+ private_submodules_->component_list.front();
+ component->Destroy();
+ delete component;
+ private_submodules_->component_list.pop_front();
+ }
#ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP
- if (debug_file_->Open()) {
- debug_file_->CloseFile();
- }
-#endif
+ if (debug_dump_.debug_file->Open()) {
+ debug_dump_.debug_file->CloseFile();
}
- delete crit_;
- crit_ = NULL;
+#endif
}
int AudioProcessingImpl::Initialize() {
- CriticalSectionScoped crit_scoped(crit_);
+ // Run in a single-threaded manner during initialization.
+ rtc::CritScope cs_render(&crit_render_);
+ rtc::CritScope cs_capture(&crit_capture_);
return InitializeLocked();
}
@@ -306,44 +314,73 @@ int AudioProcessingImpl::Initialize(int input_sample_rate_hz,
}
int AudioProcessingImpl::Initialize(const ProcessingConfig& processing_config) {
- CriticalSectionScoped crit_scoped(crit_);
+ // Run in a single-threaded manner during initialization.
+ rtc::CritScope cs_render(&crit_render_);
+ rtc::CritScope cs_capture(&crit_capture_);
+ return InitializeLocked(processing_config);
+}
+
+int AudioProcessingImpl::MaybeInitializeRender(
+ const ProcessingConfig& processing_config) {
+ return MaybeInitialize(processing_config);
+}
+
+int AudioProcessingImpl::MaybeInitializeCapture(
+ const ProcessingConfig& processing_config) {
+ return MaybeInitialize(processing_config);
+}
+
+// Calls InitializeLocked() if any of the audio parameters have changed from
+// their current values (needs to be called while holding the crit_render_lock).
+int AudioProcessingImpl::MaybeInitialize(
+ const ProcessingConfig& processing_config) {
+ // Called from both threads. Thread check is therefore not possible.
+ if (processing_config == formats_.api_format) {
+ return kNoError;
+ }
+
+ rtc::CritScope cs_capture(&crit_capture_);
return InitializeLocked(processing_config);
}
int AudioProcessingImpl::InitializeLocked() {
const int fwd_audio_buffer_channels =
- beamformer_enabled_ ? api_format_.input_stream().num_channels()
- : api_format_.output_stream().num_channels();
+ capture_nonlocked_.beamformer_enabled
+ ? formats_.api_format.input_stream().num_channels()
+ : formats_.api_format.output_stream().num_channels();
const int rev_audio_buffer_out_num_frames =
- api_format_.reverse_output_stream().num_frames() == 0
- ? rev_proc_format_.num_frames()
- : api_format_.reverse_output_stream().num_frames();
- if (api_format_.reverse_input_stream().num_channels() > 0) {
- render_audio_.reset(new AudioBuffer(
- api_format_.reverse_input_stream().num_frames(),
- api_format_.reverse_input_stream().num_channels(),
- rev_proc_format_.num_frames(), rev_proc_format_.num_channels(),
+ formats_.api_format.reverse_output_stream().num_frames() == 0
+ ? formats_.rev_proc_format.num_frames()
+ : formats_.api_format.reverse_output_stream().num_frames();
+ if (formats_.api_format.reverse_input_stream().num_channels() > 0) {
+ render_.render_audio.reset(new AudioBuffer(
+ formats_.api_format.reverse_input_stream().num_frames(),
+ formats_.api_format.reverse_input_stream().num_channels(),
+ formats_.rev_proc_format.num_frames(),
+ formats_.rev_proc_format.num_channels(),
rev_audio_buffer_out_num_frames));
if (rev_conversion_needed()) {
- render_converter_ = AudioConverter::Create(
- api_format_.reverse_input_stream().num_channels(),
- api_format_.reverse_input_stream().num_frames(),
- api_format_.reverse_output_stream().num_channels(),
- api_format_.reverse_output_stream().num_frames());
+ render_.render_converter = AudioConverter::Create(
+ formats_.api_format.reverse_input_stream().num_channels(),
+ formats_.api_format.reverse_input_stream().num_frames(),
+ formats_.api_format.reverse_output_stream().num_channels(),
+ formats_.api_format.reverse_output_stream().num_frames());
} else {
- render_converter_.reset(nullptr);
+ render_.render_converter.reset(nullptr);
}
} else {
- render_audio_.reset(nullptr);
- render_converter_.reset(nullptr);
+ render_.render_audio.reset(nullptr);
+ render_.render_converter.reset(nullptr);
}
- capture_audio_.reset(new AudioBuffer(
- api_format_.input_stream().num_frames(),
- api_format_.input_stream().num_channels(), fwd_proc_format_.num_frames(),
- fwd_audio_buffer_channels, api_format_.output_stream().num_frames()));
+ capture_.capture_audio.reset(
+ new AudioBuffer(formats_.api_format.input_stream().num_frames(),
+ formats_.api_format.input_stream().num_channels(),
+ capture_nonlocked_.fwd_proc_format.num_frames(),
+ fwd_audio_buffer_channels,
+ formats_.api_format.output_stream().num_frames()));
// Initialize all components.
- for (auto item : component_list_) {
+ for (auto item : private_submodules_->component_list) {
int err = item->Initialize();
if (err != kNoError) {
return err;
@@ -351,15 +388,16 @@ int AudioProcessingImpl::InitializeLocked() {
}
InitializeExperimentalAgc();
-
InitializeTransient();
-
InitializeBeamformer();
-
InitializeIntelligibility();
+ InitializeHighPassFilter();
+ InitializeNoiseSuppression();
+ InitializeLevelEstimator();
+ InitializeVoiceDetection();
#ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP
- if (debug_file_->Open()) {
+ if (debug_dump_.debug_file->Open()) {
int err = WriteInitMessage();
if (err != kNoError) {
return err;
@@ -372,16 +410,13 @@ int AudioProcessingImpl::InitializeLocked() {
int AudioProcessingImpl::InitializeLocked(const ProcessingConfig& config) {
for (const auto& stream : config.streams) {
- if (stream.num_channels() < 0) {
- return kBadNumberChannelsError;
- }
if (stream.num_channels() > 0 && stream.sample_rate_hz() <= 0) {
return kBadSampleRateError;
}
}
- const int num_in_channels = config.input_stream().num_channels();
- const int num_out_channels = config.output_stream().num_channels();
+ const size_t num_in_channels = config.input_stream().num_channels();
+ const size_t num_out_channels = config.output_stream().num_channels();
// Need at least one input channel.
// Need either one output channel or as many outputs as there are inputs.
@@ -390,18 +425,17 @@ int AudioProcessingImpl::InitializeLocked(const ProcessingConfig& config) {
return kBadNumberChannelsError;
}
- if (beamformer_enabled_ &&
- (static_cast<size_t>(num_in_channels) != array_geometry_.size() ||
- num_out_channels > 1)) {
+ if (capture_nonlocked_.beamformer_enabled &&
+ num_in_channels != capture_.array_geometry.size()) {
return kBadNumberChannelsError;
}
- api_format_ = config;
+ formats_.api_format = config;
// We process at the closest native rate >= min(input rate, output rate)...
const int min_proc_rate =
- std::min(api_format_.input_stream().sample_rate_hz(),
- api_format_.output_stream().sample_rate_hz());
+ std::min(formats_.api_format.input_stream().sample_rate_hz(),
+ formats_.api_format.output_stream().sample_rate_hz());
int fwd_proc_rate;
for (size_t i = 0; i < kNumNativeSampleRates; ++i) {
fwd_proc_rate = kNativeSampleRatesHz[i];
@@ -410,20 +444,20 @@ int AudioProcessingImpl::InitializeLocked(const ProcessingConfig& config) {
}
}
// ...with one exception.
- if (echo_control_mobile_->is_enabled() &&
+ if (public_submodules_->echo_control_mobile->is_enabled() &&
min_proc_rate > kMaxAECMSampleRateHz) {
fwd_proc_rate = kMaxAECMSampleRateHz;
}
- fwd_proc_format_ = StreamConfig(fwd_proc_rate);
+ capture_nonlocked_.fwd_proc_format = StreamConfig(fwd_proc_rate);
// We normally process the reverse stream at 16 kHz. Unless...
int rev_proc_rate = kSampleRate16kHz;
- if (fwd_proc_format_.sample_rate_hz() == kSampleRate8kHz) {
+ if (capture_nonlocked_.fwd_proc_format.sample_rate_hz() == kSampleRate8kHz) {
// ...the forward stream is at 8 kHz.
rev_proc_rate = kSampleRate8kHz;
} else {
- if (api_format_.reverse_input_stream().sample_rate_hz() ==
+ if (formats_.api_format.reverse_input_stream().sample_rate_hz() ==
kSampleRate32kHz) {
// ...or the input is at 32 kHz, in which case we use the splitting
// filter rather than the resampler.
@@ -433,66 +467,89 @@ int AudioProcessingImpl::InitializeLocked(const ProcessingConfig& config) {
// Always downmix the reverse stream to mono for analysis. This has been
// demonstrated to work well for AEC in most practical scenarios.
- rev_proc_format_ = StreamConfig(rev_proc_rate, 1);
+ formats_.rev_proc_format = StreamConfig(rev_proc_rate, 1);
- if (fwd_proc_format_.sample_rate_hz() == kSampleRate32kHz ||
- fwd_proc_format_.sample_rate_hz() == kSampleRate48kHz) {
- split_rate_ = kSampleRate16kHz;
+ if (capture_nonlocked_.fwd_proc_format.sample_rate_hz() == kSampleRate32kHz ||
+ capture_nonlocked_.fwd_proc_format.sample_rate_hz() == kSampleRate48kHz) {
+ capture_nonlocked_.split_rate = kSampleRate16kHz;
} else {
- split_rate_ = fwd_proc_format_.sample_rate_hz();
+ capture_nonlocked_.split_rate =
+ capture_nonlocked_.fwd_proc_format.sample_rate_hz();
}
return InitializeLocked();
}
-// Calls InitializeLocked() if any of the audio parameters have changed from
-// their current values.
-int AudioProcessingImpl::MaybeInitializeLocked(
- const ProcessingConfig& processing_config) {
- if (processing_config == api_format_) {
- return kNoError;
- }
- return InitializeLocked(processing_config);
-}
-
void AudioProcessingImpl::SetExtraOptions(const Config& config) {
- CriticalSectionScoped crit_scoped(crit_);
- for (auto item : component_list_) {
+ // Run in a single-threaded manner when setting the extra options.
+ rtc::CritScope cs_render(&crit_render_);
+ rtc::CritScope cs_capture(&crit_capture_);
+ for (auto item : private_submodules_->component_list) {
item->SetExtraOptions(config);
}
- if (transient_suppressor_enabled_ != config.Get<ExperimentalNs>().enabled) {
- transient_suppressor_enabled_ = config.Get<ExperimentalNs>().enabled;
+ if (capture_.transient_suppressor_enabled !=
+ config.Get<ExperimentalNs>().enabled) {
+ capture_.transient_suppressor_enabled =
+ config.Get<ExperimentalNs>().enabled;
InitializeTransient();
}
+
+#ifdef WEBRTC_ANDROID_PLATFORM_BUILD
+ if (capture_nonlocked_.beamformer_enabled !=
+ config.Get<Beamforming>().enabled) {
+ capture_nonlocked_.beamformer_enabled = config.Get<Beamforming>().enabled;
+ if (config.Get<Beamforming>().array_geometry.size() > 1) {
+ capture_.array_geometry = config.Get<Beamforming>().array_geometry;
+ }
+ capture_.target_direction = config.Get<Beamforming>().target_direction;
+ InitializeBeamformer();
+ }
+#endif // WEBRTC_ANDROID_PLATFORM_BUILD
}
+int AudioProcessingImpl::input_sample_rate_hz() const {
+ // Accessed from outside APM, hence a lock is needed.
+ rtc::CritScope cs(&crit_capture_);
+ return formats_.api_format.input_stream().sample_rate_hz();
+}
int AudioProcessingImpl::proc_sample_rate_hz() const {
- return fwd_proc_format_.sample_rate_hz();
+ // Used as callback from submodules, hence locking is not allowed.
+ return capture_nonlocked_.fwd_proc_format.sample_rate_hz();
}
int AudioProcessingImpl::proc_split_sample_rate_hz() const {
- return split_rate_;
+ // Used as callback from submodules, hence locking is not allowed.
+ return capture_nonlocked_.split_rate;
+}
+
+size_t AudioProcessingImpl::num_reverse_channels() const {
+ // Used as callback from submodules, hence locking is not allowed.
+ return formats_.rev_proc_format.num_channels();
}
-int AudioProcessingImpl::num_reverse_channels() const {
- return rev_proc_format_.num_channels();
+size_t AudioProcessingImpl::num_input_channels() const {
+ // Used as callback from submodules, hence locking is not allowed.
+ return formats_.api_format.input_stream().num_channels();
}
-int AudioProcessingImpl::num_input_channels() const {
- return api_format_.input_stream().num_channels();
+size_t AudioProcessingImpl::num_proc_channels() const {
+ // Used as callback from submodules, hence locking is not allowed.
+ return capture_nonlocked_.beamformer_enabled ? 1 : num_output_channels();
}
-int AudioProcessingImpl::num_output_channels() const {
- return api_format_.output_stream().num_channels();
+size_t AudioProcessingImpl::num_output_channels() const {
+ // Used as callback from submodules, hence locking is not allowed.
+ return formats_.api_format.output_stream().num_channels();
}
void AudioProcessingImpl::set_output_will_be_muted(bool muted) {
- CriticalSectionScoped lock(crit_);
- output_will_be_muted_ = muted;
- if (agc_manager_.get()) {
- agc_manager_->SetCaptureMuted(output_will_be_muted_);
+ rtc::CritScope cs(&crit_capture_);
+ capture_.output_will_be_muted = muted;
+ if (private_submodules_->agc_manager.get()) {
+ private_submodules_->agc_manager->SetCaptureMuted(
+ capture_.output_will_be_muted);
}
}
@@ -504,13 +561,21 @@ int AudioProcessingImpl::ProcessStream(const float* const* src,
int output_sample_rate_hz,
ChannelLayout output_layout,
float* const* dest) {
- CriticalSectionScoped crit_scoped(crit_);
- StreamConfig input_stream = api_format_.input_stream();
+ TRACE_EVENT0("webrtc", "AudioProcessing::ProcessStream_ChannelLayout");
+ StreamConfig input_stream;
+ StreamConfig output_stream;
+ {
+ // Access the formats_.api_format.input_stream beneath the capture lock.
+ // The lock must be released as it is later required in the call
+ // to ProcessStream(,,,);
+ rtc::CritScope cs(&crit_capture_);
+ input_stream = formats_.api_format.input_stream();
+ output_stream = formats_.api_format.output_stream();
+ }
+
input_stream.set_sample_rate_hz(input_sample_rate_hz);
input_stream.set_num_channels(ChannelsFromLayout(input_layout));
input_stream.set_has_keyboard(LayoutHasKeyboard(input_layout));
-
- StreamConfig output_stream = api_format_.output_stream();
output_stream.set_sample_rate_hz(output_sample_rate_hz);
output_stream.set_num_channels(ChannelsFromLayout(output_layout));
output_stream.set_has_keyboard(LayoutHasKeyboard(output_layout));
@@ -525,44 +590,64 @@ int AudioProcessingImpl::ProcessStream(const float* const* src,
const StreamConfig& input_config,
const StreamConfig& output_config,
float* const* dest) {
- CriticalSectionScoped crit_scoped(crit_);
- if (!src || !dest) {
- return kNullPointerError;
+ TRACE_EVENT0("webrtc", "AudioProcessing::ProcessStream_StreamConfig");
+ ProcessingConfig processing_config;
+ {
+ // Acquire the capture lock in order to safely call the function
+ // that retrieves the render side data. This function accesses apm
+ // getters that need the capture lock held when being called.
+ rtc::CritScope cs_capture(&crit_capture_);
+ public_submodules_->echo_cancellation->ReadQueuedRenderData();
+ public_submodules_->echo_control_mobile->ReadQueuedRenderData();
+ public_submodules_->gain_control->ReadQueuedRenderData();
+
+ if (!src || !dest) {
+ return kNullPointerError;
+ }
+
+ processing_config = formats_.api_format;
}
- ProcessingConfig processing_config = api_format_;
processing_config.input_stream() = input_config;
processing_config.output_stream() = output_config;
- RETURN_ON_ERR(MaybeInitializeLocked(processing_config));
+ {
+ // Do conditional reinitialization.
+ rtc::CritScope cs_render(&crit_render_);
+ RETURN_ON_ERR(MaybeInitializeCapture(processing_config));
+ }
+ rtc::CritScope cs_capture(&crit_capture_);
assert(processing_config.input_stream().num_frames() ==
- api_format_.input_stream().num_frames());
+ formats_.api_format.input_stream().num_frames());
#ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP
- if (debug_file_->Open()) {
+ if (debug_dump_.debug_file->Open()) {
RETURN_ON_ERR(WriteConfigMessage(false));
- event_msg_->set_type(audioproc::Event::STREAM);
- audioproc::Stream* msg = event_msg_->mutable_stream();
+ debug_dump_.capture.event_msg->set_type(audioproc::Event::STREAM);
+ audioproc::Stream* msg = debug_dump_.capture.event_msg->mutable_stream();
const size_t channel_size =
- sizeof(float) * api_format_.input_stream().num_frames();
- for (int i = 0; i < api_format_.input_stream().num_channels(); ++i)
+ sizeof(float) * formats_.api_format.input_stream().num_frames();
+ for (size_t i = 0; i < formats_.api_format.input_stream().num_channels();
+ ++i)
msg->add_input_channel(src[i], channel_size);
}
#endif
- capture_audio_->CopyFrom(src, api_format_.input_stream());
+ capture_.capture_audio->CopyFrom(src, formats_.api_format.input_stream());
RETURN_ON_ERR(ProcessStreamLocked());
- capture_audio_->CopyTo(api_format_.output_stream(), dest);
+ capture_.capture_audio->CopyTo(formats_.api_format.output_stream(), dest);
#ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP
- if (debug_file_->Open()) {
- audioproc::Stream* msg = event_msg_->mutable_stream();
+ if (debug_dump_.debug_file->Open()) {
+ audioproc::Stream* msg = debug_dump_.capture.event_msg->mutable_stream();
const size_t channel_size =
- sizeof(float) * api_format_.output_stream().num_frames();
- for (int i = 0; i < api_format_.output_stream().num_channels(); ++i)
+ sizeof(float) * formats_.api_format.output_stream().num_frames();
+ for (size_t i = 0; i < formats_.api_format.output_stream().num_channels();
+ ++i)
msg->add_output_channel(dest[i], channel_size);
- RETURN_ON_ERR(WriteMessageToDebugFile());
+ RETURN_ON_ERR(WriteMessageToDebugFile(debug_dump_.debug_file.get(),
+ &crit_debug_, &debug_dump_.capture));
}
#endif
@@ -570,7 +655,20 @@ int AudioProcessingImpl::ProcessStream(const float* const* src,
}
int AudioProcessingImpl::ProcessStream(AudioFrame* frame) {
- CriticalSectionScoped crit_scoped(crit_);
+ TRACE_EVENT0("webrtc", "AudioProcessing::ProcessStream_AudioFrame");
+ {
+ // Acquire the capture lock in order to safely call the function
+ // that retrieves the render side data. This function accesses apm
+ // getters that need the capture lock held when being called.
+ // The lock needs to be released as
+ // public_submodules_->echo_control_mobile->is_enabled() aquires this lock
+ // as well.
+ rtc::CritScope cs_capture(&crit_capture_);
+ public_submodules_->echo_cancellation->ReadQueuedRenderData();
+ public_submodules_->echo_control_mobile->ReadQueuedRenderData();
+ public_submodules_->gain_control->ReadQueuedRenderData();
+ }
+
if (!frame) {
return kNullPointerError;
}
@@ -581,46 +679,62 @@ int AudioProcessingImpl::ProcessStream(AudioFrame* frame) {
frame->sample_rate_hz_ != kSampleRate48kHz) {
return kBadSampleRateError;
}
- if (echo_control_mobile_->is_enabled() &&
+
+ if (public_submodules_->echo_control_mobile->is_enabled() &&
frame->sample_rate_hz_ > kMaxAECMSampleRateHz) {
LOG(LS_ERROR) << "AECM only supports 16 or 8 kHz sample rates";
return kUnsupportedComponentError;
}
- // TODO(ajm): The input and output rates and channels are currently
- // constrained to be identical in the int16 interface.
- ProcessingConfig processing_config = api_format_;
+ ProcessingConfig processing_config;
+ {
+ // Aquire lock for the access of api_format.
+ // The lock is released immediately due to the conditional
+ // reinitialization.
+ rtc::CritScope cs_capture(&crit_capture_);
+ // TODO(ajm): The input and output rates and channels are currently
+ // constrained to be identical in the int16 interface.
+ processing_config = formats_.api_format;
+ }
processing_config.input_stream().set_sample_rate_hz(frame->sample_rate_hz_);
processing_config.input_stream().set_num_channels(frame->num_channels_);
processing_config.output_stream().set_sample_rate_hz(frame->sample_rate_hz_);
processing_config.output_stream().set_num_channels(frame->num_channels_);
- RETURN_ON_ERR(MaybeInitializeLocked(processing_config));
- if (frame->samples_per_channel_ != api_format_.input_stream().num_frames()) {
+ {
+ // Do conditional reinitialization.
+ rtc::CritScope cs_render(&crit_render_);
+ RETURN_ON_ERR(MaybeInitializeCapture(processing_config));
+ }
+ rtc::CritScope cs_capture(&crit_capture_);
+ if (frame->samples_per_channel_ !=
+ formats_.api_format.input_stream().num_frames()) {
return kBadDataLengthError;
}
#ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP
- if (debug_file_->Open()) {
- event_msg_->set_type(audioproc::Event::STREAM);
- audioproc::Stream* msg = event_msg_->mutable_stream();
+ if (debug_dump_.debug_file->Open()) {
+ debug_dump_.capture.event_msg->set_type(audioproc::Event::STREAM);
+ audioproc::Stream* msg = debug_dump_.capture.event_msg->mutable_stream();
const size_t data_size =
sizeof(int16_t) * frame->samples_per_channel_ * frame->num_channels_;
msg->set_input_data(frame->data_, data_size);
}
#endif
- capture_audio_->DeinterleaveFrom(frame);
+ capture_.capture_audio->DeinterleaveFrom(frame);
RETURN_ON_ERR(ProcessStreamLocked());
- capture_audio_->InterleaveTo(frame, output_copy_needed(is_data_processed()));
+ capture_.capture_audio->InterleaveTo(frame,
+ output_copy_needed(is_data_processed()));
#ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP
- if (debug_file_->Open()) {
- audioproc::Stream* msg = event_msg_->mutable_stream();
+ if (debug_dump_.debug_file->Open()) {
+ audioproc::Stream* msg = debug_dump_.capture.event_msg->mutable_stream();
const size_t data_size =
sizeof(int16_t) * frame->samples_per_channel_ * frame->num_channels_;
msg->set_output_data(frame->data_, data_size);
- RETURN_ON_ERR(WriteMessageToDebugFile());
+ RETURN_ON_ERR(WriteMessageToDebugFile(debug_dump_.debug_file.get(),
+ &crit_debug_, &debug_dump_.capture));
}
#endif
@@ -629,22 +743,25 @@ int AudioProcessingImpl::ProcessStream(AudioFrame* frame) {
int AudioProcessingImpl::ProcessStreamLocked() {
#ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP
- if (debug_file_->Open()) {
- audioproc::Stream* msg = event_msg_->mutable_stream();
- msg->set_delay(stream_delay_ms_);
- msg->set_drift(echo_cancellation_->stream_drift_samples());
+ if (debug_dump_.debug_file->Open()) {
+ audioproc::Stream* msg = debug_dump_.capture.event_msg->mutable_stream();
+ msg->set_delay(capture_nonlocked_.stream_delay_ms);
+ msg->set_drift(
+ public_submodules_->echo_cancellation->stream_drift_samples());
msg->set_level(gain_control()->stream_analog_level());
- msg->set_keypress(key_pressed_);
+ msg->set_keypress(capture_.key_pressed);
}
#endif
MaybeUpdateHistograms();
- AudioBuffer* ca = capture_audio_.get(); // For brevity.
+ AudioBuffer* ca = capture_.capture_audio.get(); // For brevity.
- if (use_new_agc_ && gain_control_->is_enabled()) {
- agc_manager_->AnalyzePreProcess(ca->channels()[0], ca->num_channels(),
- fwd_proc_format_.num_frames());
+ if (constants_.use_new_agc &&
+ public_submodules_->gain_control->is_enabled()) {
+ private_submodules_->agc_manager->AnalyzePreProcess(
+ ca->channels()[0], ca->num_channels(),
+ capture_nonlocked_.fwd_proc_format.num_frames());
}
bool data_processed = is_data_processed();
@@ -652,34 +769,41 @@ int AudioProcessingImpl::ProcessStreamLocked() {
ca->SplitIntoFrequencyBands();
}
- if (intelligibility_enabled_) {
- intelligibility_enhancer_->AnalyzeCaptureAudio(
- ca->split_channels_f(kBand0To8kHz), split_rate_, ca->num_channels());
+ if (constants_.intelligibility_enabled) {
+ public_submodules_->intelligibility_enhancer->AnalyzeCaptureAudio(
+ ca->split_channels_f(kBand0To8kHz), capture_nonlocked_.split_rate,
+ ca->num_channels());
}
- if (beamformer_enabled_) {
- beamformer_->ProcessChunk(*ca->split_data_f(), ca->split_data_f());
+ if (capture_nonlocked_.beamformer_enabled) {
+ private_submodules_->beamformer->ProcessChunk(*ca->split_data_f(),
+ ca->split_data_f());
ca->set_num_channels(1);
}
- RETURN_ON_ERR(high_pass_filter_->ProcessCaptureAudio(ca));
- RETURN_ON_ERR(gain_control_->AnalyzeCaptureAudio(ca));
- RETURN_ON_ERR(noise_suppression_->AnalyzeCaptureAudio(ca));
- RETURN_ON_ERR(echo_cancellation_->ProcessCaptureAudio(ca));
+ public_submodules_->high_pass_filter->ProcessCaptureAudio(ca);
+ RETURN_ON_ERR(public_submodules_->gain_control->AnalyzeCaptureAudio(ca));
+ public_submodules_->noise_suppression->AnalyzeCaptureAudio(ca);
+ RETURN_ON_ERR(public_submodules_->echo_cancellation->ProcessCaptureAudio(ca));
- if (echo_control_mobile_->is_enabled() && noise_suppression_->is_enabled()) {
+ if (public_submodules_->echo_control_mobile->is_enabled() &&
+ public_submodules_->noise_suppression->is_enabled()) {
ca->CopyLowPassToReference();
}
- RETURN_ON_ERR(noise_suppression_->ProcessCaptureAudio(ca));
- RETURN_ON_ERR(echo_control_mobile_->ProcessCaptureAudio(ca));
- RETURN_ON_ERR(voice_detection_->ProcessCaptureAudio(ca));
+ public_submodules_->noise_suppression->ProcessCaptureAudio(ca);
+ RETURN_ON_ERR(
+ public_submodules_->echo_control_mobile->ProcessCaptureAudio(ca));
+ public_submodules_->voice_detection->ProcessCaptureAudio(ca);
- if (use_new_agc_ && gain_control_->is_enabled() &&
- (!beamformer_enabled_ || beamformer_->is_target_present())) {
- agc_manager_->Process(ca->split_bands_const(0)[kBand0To8kHz],
- ca->num_frames_per_band(), split_rate_);
+ if (constants_.use_new_agc &&
+ public_submodules_->gain_control->is_enabled() &&
+ (!capture_nonlocked_.beamformer_enabled ||
+ private_submodules_->beamformer->is_target_present())) {
+ private_submodules_->agc_manager->Process(
+ ca->split_bands_const(0)[kBand0To8kHz], ca->num_frames_per_band(),
+ capture_nonlocked_.split_rate);
}
- RETURN_ON_ERR(gain_control_->ProcessCaptureAudio(ca));
+ RETURN_ON_ERR(public_submodules_->gain_control->ProcessCaptureAudio(ca));
if (synthesis_needed(data_processed)) {
ca->MergeFrequencyBands();
@@ -687,21 +811,23 @@ int AudioProcessingImpl::ProcessStreamLocked() {
// TODO(aluebs): Investigate if the transient suppression placement should be
// before or after the AGC.
- if (transient_suppressor_enabled_) {
+ if (capture_.transient_suppressor_enabled) {
float voice_probability =
- agc_manager_.get() ? agc_manager_->voice_probability() : 1.f;
+ private_submodules_->agc_manager.get()
+ ? private_submodules_->agc_manager->voice_probability()
+ : 1.f;
- transient_suppressor_->Suppress(
+ public_submodules_->transient_suppressor->Suppress(
ca->channels_f()[0], ca->num_frames(), ca->num_channels(),
ca->split_bands_const_f(0)[kBand0To8kHz], ca->num_frames_per_band(),
ca->keyboard_data(), ca->num_keyboard_frames(), voice_probability,
- key_pressed_);
+ capture_.key_pressed);
}
// The level estimator operates on the recombined data.
- RETURN_ON_ERR(level_estimator_->ProcessStream(ca));
+ public_submodules_->level_estimator->ProcessStream(ca);
- was_stream_delay_set_ = false;
+ capture_.was_stream_delay_set = false;
return kNoError;
}
@@ -709,13 +835,15 @@ int AudioProcessingImpl::AnalyzeReverseStream(const float* const* data,
size_t samples_per_channel,
int rev_sample_rate_hz,
ChannelLayout layout) {
+ TRACE_EVENT0("webrtc", "AudioProcessing::AnalyzeReverseStream_ChannelLayout");
+ rtc::CritScope cs(&crit_render_);
const StreamConfig reverse_config = {
rev_sample_rate_hz, ChannelsFromLayout(layout), LayoutHasKeyboard(layout),
};
if (samples_per_channel != reverse_config.num_frames()) {
return kBadDataLengthError;
}
- return AnalyzeReverseStream(data, reverse_config, reverse_config);
+ return AnalyzeReverseStreamLocked(data, reverse_config, reverse_config);
}
int AudioProcessingImpl::ProcessReverseStream(
@@ -723,13 +851,17 @@ int AudioProcessingImpl::ProcessReverseStream(
const StreamConfig& reverse_input_config,
const StreamConfig& reverse_output_config,
float* const* dest) {
- RETURN_ON_ERR(
- AnalyzeReverseStream(src, reverse_input_config, reverse_output_config));
+ TRACE_EVENT0("webrtc", "AudioProcessing::ProcessReverseStream_StreamConfig");
+ rtc::CritScope cs(&crit_render_);
+ RETURN_ON_ERR(AnalyzeReverseStreamLocked(src, reverse_input_config,
+ reverse_output_config));
if (is_rev_processed()) {
- render_audio_->CopyTo(api_format_.reverse_output_stream(), dest);
- } else if (rev_conversion_needed()) {
- render_converter_->Convert(src, reverse_input_config.num_samples(), dest,
- reverse_output_config.num_samples());
+ render_.render_audio->CopyTo(formats_.api_format.reverse_output_stream(),
+ dest);
+ } else if (render_check_rev_conversion_needed()) {
+ render_.render_converter->Convert(src, reverse_input_config.num_samples(),
+ dest,
+ reverse_output_config.num_samples());
} else {
CopyAudioIfNeeded(src, reverse_input_config.num_frames(),
reverse_input_config.num_channels(), dest);
@@ -738,55 +870,61 @@ int AudioProcessingImpl::ProcessReverseStream(
return kNoError;
}
-int AudioProcessingImpl::AnalyzeReverseStream(
+int AudioProcessingImpl::AnalyzeReverseStreamLocked(
const float* const* src,
const StreamConfig& reverse_input_config,
const StreamConfig& reverse_output_config) {
- CriticalSectionScoped crit_scoped(crit_);
- if (src == NULL) {
+ if (src == nullptr) {
return kNullPointerError;
}
- if (reverse_input_config.num_channels() <= 0) {
+ if (reverse_input_config.num_channels() == 0) {
return kBadNumberChannelsError;
}
- ProcessingConfig processing_config = api_format_;
+ ProcessingConfig processing_config = formats_.api_format;
processing_config.reverse_input_stream() = reverse_input_config;
processing_config.reverse_output_stream() = reverse_output_config;
- RETURN_ON_ERR(MaybeInitializeLocked(processing_config));
+ RETURN_ON_ERR(MaybeInitializeRender(processing_config));
assert(reverse_input_config.num_frames() ==
- api_format_.reverse_input_stream().num_frames());
+ formats_.api_format.reverse_input_stream().num_frames());
#ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP
- if (debug_file_->Open()) {
- event_msg_->set_type(audioproc::Event::REVERSE_STREAM);
- audioproc::ReverseStream* msg = event_msg_->mutable_reverse_stream();
+ if (debug_dump_.debug_file->Open()) {
+ debug_dump_.render.event_msg->set_type(audioproc::Event::REVERSE_STREAM);
+ audioproc::ReverseStream* msg =
+ debug_dump_.render.event_msg->mutable_reverse_stream();
const size_t channel_size =
- sizeof(float) * api_format_.reverse_input_stream().num_frames();
- for (int i = 0; i < api_format_.reverse_input_stream().num_channels(); ++i)
+ sizeof(float) * formats_.api_format.reverse_input_stream().num_frames();
+ for (size_t i = 0;
+ i < formats_.api_format.reverse_input_stream().num_channels(); ++i)
msg->add_channel(src[i], channel_size);
- RETURN_ON_ERR(WriteMessageToDebugFile());
+ RETURN_ON_ERR(WriteMessageToDebugFile(debug_dump_.debug_file.get(),
+ &crit_debug_, &debug_dump_.render));
}
#endif
- render_audio_->CopyFrom(src, api_format_.reverse_input_stream());
+ render_.render_audio->CopyFrom(src,
+ formats_.api_format.reverse_input_stream());
return ProcessReverseStreamLocked();
}
int AudioProcessingImpl::ProcessReverseStream(AudioFrame* frame) {
+ TRACE_EVENT0("webrtc", "AudioProcessing::ProcessReverseStream_AudioFrame");
RETURN_ON_ERR(AnalyzeReverseStream(frame));
+ rtc::CritScope cs(&crit_render_);
if (is_rev_processed()) {
- render_audio_->InterleaveTo(frame, true);
+ render_.render_audio->InterleaveTo(frame, true);
}
return kNoError;
}
int AudioProcessingImpl::AnalyzeReverseStream(AudioFrame* frame) {
- CriticalSectionScoped crit_scoped(crit_);
- if (frame == NULL) {
+ TRACE_EVENT0("webrtc", "AudioProcessing::AnalyzeReverseStream_AudioFrame");
+ rtc::CritScope cs(&crit_render_);
+ if (frame == nullptr) {
return kNullPointerError;
}
// Must be a native rate.
@@ -797,7 +935,8 @@ int AudioProcessingImpl::AnalyzeReverseStream(AudioFrame* frame) {
return kBadSampleRateError;
}
// This interface does not tolerate different forward and reverse rates.
- if (frame->sample_rate_hz_ != api_format_.input_stream().sample_rate_hz()) {
+ if (frame->sample_rate_hz_ !=
+ formats_.api_format.input_stream().sample_rate_hz()) {
return kBadSampleRateError;
}
@@ -805,7 +944,7 @@ int AudioProcessingImpl::AnalyzeReverseStream(AudioFrame* frame) {
return kBadNumberChannelsError;
}
- ProcessingConfig processing_config = api_format_;
+ ProcessingConfig processing_config = formats_.api_format;
processing_config.reverse_input_stream().set_sample_rate_hz(
frame->sample_rate_hz_);
processing_config.reverse_input_stream().set_num_channels(
@@ -815,44 +954,52 @@ int AudioProcessingImpl::AnalyzeReverseStream(AudioFrame* frame) {
processing_config.reverse_output_stream().set_num_channels(
frame->num_channels_);
- RETURN_ON_ERR(MaybeInitializeLocked(processing_config));
+ RETURN_ON_ERR(MaybeInitializeRender(processing_config));
if (frame->samples_per_channel_ !=
- api_format_.reverse_input_stream().num_frames()) {
+ formats_.api_format.reverse_input_stream().num_frames()) {
return kBadDataLengthError;
}
#ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP
- if (debug_file_->Open()) {
- event_msg_->set_type(audioproc::Event::REVERSE_STREAM);
- audioproc::ReverseStream* msg = event_msg_->mutable_reverse_stream();
+ if (debug_dump_.debug_file->Open()) {
+ debug_dump_.render.event_msg->set_type(audioproc::Event::REVERSE_STREAM);
+ audioproc::ReverseStream* msg =
+ debug_dump_.render.event_msg->mutable_reverse_stream();
const size_t data_size =
sizeof(int16_t) * frame->samples_per_channel_ * frame->num_channels_;
msg->set_data(frame->data_, data_size);
- RETURN_ON_ERR(WriteMessageToDebugFile());
+ RETURN_ON_ERR(WriteMessageToDebugFile(debug_dump_.debug_file.get(),
+ &crit_debug_, &debug_dump_.render));
}
#endif
- render_audio_->DeinterleaveFrom(frame);
+ render_.render_audio->DeinterleaveFrom(frame);
return ProcessReverseStreamLocked();
}
int AudioProcessingImpl::ProcessReverseStreamLocked() {
- AudioBuffer* ra = render_audio_.get(); // For brevity.
- if (rev_proc_format_.sample_rate_hz() == kSampleRate32kHz) {
+ AudioBuffer* ra = render_.render_audio.get(); // For brevity.
+ if (formats_.rev_proc_format.sample_rate_hz() == kSampleRate32kHz) {
ra->SplitIntoFrequencyBands();
}
- if (intelligibility_enabled_) {
- intelligibility_enhancer_->ProcessRenderAudio(
- ra->split_channels_f(kBand0To8kHz), split_rate_, ra->num_channels());
+ if (constants_.intelligibility_enabled) {
+ // Currently run in single-threaded mode when the intelligibility
+ // enhancer is activated.
+ // TODO(peah): Fix to be properly multi-threaded.
+ rtc::CritScope cs(&crit_capture_);
+ public_submodules_->intelligibility_enhancer->ProcessRenderAudio(
+ ra->split_channels_f(kBand0To8kHz), capture_nonlocked_.split_rate,
+ ra->num_channels());
}
- RETURN_ON_ERR(echo_cancellation_->ProcessRenderAudio(ra));
- RETURN_ON_ERR(echo_control_mobile_->ProcessRenderAudio(ra));
- if (!use_new_agc_) {
- RETURN_ON_ERR(gain_control_->ProcessRenderAudio(ra));
+ RETURN_ON_ERR(public_submodules_->echo_cancellation->ProcessRenderAudio(ra));
+ RETURN_ON_ERR(
+ public_submodules_->echo_control_mobile->ProcessRenderAudio(ra));
+ if (!constants_.use_new_agc) {
+ RETURN_ON_ERR(public_submodules_->gain_control->ProcessRenderAudio(ra));
}
- if (rev_proc_format_.sample_rate_hz() == kSampleRate32kHz &&
+ if (formats_.rev_proc_format.sample_rate_hz() == kSampleRate32kHz &&
is_rev_processed()) {
ra->MergeFrequencyBands();
}
@@ -861,9 +1008,10 @@ int AudioProcessingImpl::ProcessReverseStreamLocked() {
}
int AudioProcessingImpl::set_stream_delay_ms(int delay) {
+ rtc::CritScope cs(&crit_capture_);
Error retval = kNoError;
- was_stream_delay_set_ = true;
- delay += delay_offset_ms_;
+ capture_.was_stream_delay_set = true;
+ delay += capture_.delay_offset_ms;
if (delay < 0) {
delay = 0;
@@ -876,50 +1024,56 @@ int AudioProcessingImpl::set_stream_delay_ms(int delay) {
retval = kBadStreamParameterWarning;
}
- stream_delay_ms_ = delay;
+ capture_nonlocked_.stream_delay_ms = delay;
return retval;
}
int AudioProcessingImpl::stream_delay_ms() const {
- return stream_delay_ms_;
+ // Used as callback from submodules, hence locking is not allowed.
+ return capture_nonlocked_.stream_delay_ms;
}
bool AudioProcessingImpl::was_stream_delay_set() const {
- return was_stream_delay_set_;
+ // Used as callback from submodules, hence locking is not allowed.
+ return capture_.was_stream_delay_set;
}
void AudioProcessingImpl::set_stream_key_pressed(bool key_pressed) {
- key_pressed_ = key_pressed;
+ rtc::CritScope cs(&crit_capture_);
+ capture_.key_pressed = key_pressed;
}
void AudioProcessingImpl::set_delay_offset_ms(int offset) {
- CriticalSectionScoped crit_scoped(crit_);
- delay_offset_ms_ = offset;
+ rtc::CritScope cs(&crit_capture_);
+ capture_.delay_offset_ms = offset;
}
int AudioProcessingImpl::delay_offset_ms() const {
- return delay_offset_ms_;
+ rtc::CritScope cs(&crit_capture_);
+ return capture_.delay_offset_ms;
}
int AudioProcessingImpl::StartDebugRecording(
const char filename[AudioProcessing::kMaxFilenameSize]) {
- CriticalSectionScoped crit_scoped(crit_);
+ // Run in a single-threaded manner.
+ rtc::CritScope cs_render(&crit_render_);
+ rtc::CritScope cs_capture(&crit_capture_);
static_assert(kMaxFilenameSize == FileWrapper::kMaxFileNameSize, "");
- if (filename == NULL) {
+ if (filename == nullptr) {
return kNullPointerError;
}
#ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP
// Stop any ongoing recording.
- if (debug_file_->Open()) {
- if (debug_file_->CloseFile() == -1) {
+ if (debug_dump_.debug_file->Open()) {
+ if (debug_dump_.debug_file->CloseFile() == -1) {
return kFileError;
}
}
- if (debug_file_->OpenFile(filename, false) == -1) {
- debug_file_->CloseFile();
+ if (debug_dump_.debug_file->OpenFile(filename, false) == -1) {
+ debug_dump_.debug_file->CloseFile();
return kFileError;
}
@@ -932,21 +1086,23 @@ int AudioProcessingImpl::StartDebugRecording(
}
int AudioProcessingImpl::StartDebugRecording(FILE* handle) {
- CriticalSectionScoped crit_scoped(crit_);
+ // Run in a single-threaded manner.
+ rtc::CritScope cs_render(&crit_render_);
+ rtc::CritScope cs_capture(&crit_capture_);
- if (handle == NULL) {
+ if (handle == nullptr) {
return kNullPointerError;
}
#ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP
// Stop any ongoing recording.
- if (debug_file_->Open()) {
- if (debug_file_->CloseFile() == -1) {
+ if (debug_dump_.debug_file->Open()) {
+ if (debug_dump_.debug_file->CloseFile() == -1) {
return kFileError;
}
}
- if (debug_file_->OpenFromFileHandle(handle, true, false) == -1) {
+ if (debug_dump_.debug_file->OpenFromFileHandle(handle, true, false) == -1) {
return kFileError;
}
@@ -960,17 +1116,22 @@ int AudioProcessingImpl::StartDebugRecording(FILE* handle) {
int AudioProcessingImpl::StartDebugRecordingForPlatformFile(
rtc::PlatformFile handle) {
+ // Run in a single-threaded manner.
+ rtc::CritScope cs_render(&crit_render_);
+ rtc::CritScope cs_capture(&crit_capture_);
FILE* stream = rtc::FdopenPlatformFileForWriting(handle);
return StartDebugRecording(stream);
}
int AudioProcessingImpl::StopDebugRecording() {
- CriticalSectionScoped crit_scoped(crit_);
+ // Run in a single-threaded manner.
+ rtc::CritScope cs_render(&crit_render_);
+ rtc::CritScope cs_capture(&crit_capture_);
#ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP
// We just return if recording hasn't started.
- if (debug_file_->Open()) {
- if (debug_file_->CloseFile() == -1) {
+ if (debug_dump_.debug_file->Open()) {
+ if (debug_dump_.debug_file->CloseFile() == -1) {
return kFileError;
}
}
@@ -981,58 +1142,87 @@ int AudioProcessingImpl::StopDebugRecording() {
}
EchoCancellation* AudioProcessingImpl::echo_cancellation() const {
- return echo_cancellation_;
+ // Adding a lock here has no effect as it allows any access to the submodule
+ // from the returned pointer.
+ return public_submodules_->echo_cancellation;
}
EchoControlMobile* AudioProcessingImpl::echo_control_mobile() const {
- return echo_control_mobile_;
+ // Adding a lock here has no effect as it allows any access to the submodule
+ // from the returned pointer.
+ return public_submodules_->echo_control_mobile;
}
GainControl* AudioProcessingImpl::gain_control() const {
- if (use_new_agc_) {
- return gain_control_for_new_agc_.get();
+ // Adding a lock here has no effect as it allows any access to the submodule
+ // from the returned pointer.
+ if (constants_.use_new_agc) {
+ return public_submodules_->gain_control_for_new_agc.get();
}
- return gain_control_;
+ return public_submodules_->gain_control;
}
HighPassFilter* AudioProcessingImpl::high_pass_filter() const {
- return high_pass_filter_;
+ // Adding a lock here has no effect as it allows any access to the submodule
+ // from the returned pointer.
+ return public_submodules_->high_pass_filter.get();
}
LevelEstimator* AudioProcessingImpl::level_estimator() const {
- return level_estimator_;
+ // Adding a lock here has no effect as it allows any access to the submodule
+ // from the returned pointer.
+ return public_submodules_->level_estimator.get();
}
NoiseSuppression* AudioProcessingImpl::noise_suppression() const {
- return noise_suppression_;
+ // Adding a lock here has no effect as it allows any access to the submodule
+ // from the returned pointer.
+ return public_submodules_->noise_suppression.get();
}
VoiceDetection* AudioProcessingImpl::voice_detection() const {
- return voice_detection_;
+ // Adding a lock here has no effect as it allows any access to the submodule
+ // from the returned pointer.
+ return public_submodules_->voice_detection.get();
}
bool AudioProcessingImpl::is_data_processed() const {
- if (beamformer_enabled_) {
+ if (capture_nonlocked_.beamformer_enabled) {
return true;
}
int enabled_count = 0;
- for (auto item : component_list_) {
+ for (auto item : private_submodules_->component_list) {
if (item->is_component_enabled()) {
enabled_count++;
}
}
+ if (public_submodules_->high_pass_filter->is_enabled()) {
+ enabled_count++;
+ }
+ if (public_submodules_->noise_suppression->is_enabled()) {
+ enabled_count++;
+ }
+ if (public_submodules_->level_estimator->is_enabled()) {
+ enabled_count++;
+ }
+ if (public_submodules_->voice_detection->is_enabled()) {
+ enabled_count++;
+ }
- // Data is unchanged if no components are enabled, or if only level_estimator_
- // or voice_detection_ is enabled.
+ // Data is unchanged if no components are enabled, or if only
+ // public_submodules_->level_estimator
+ // or public_submodules_->voice_detection is enabled.
if (enabled_count == 0) {
return false;
} else if (enabled_count == 1) {
- if (level_estimator_->is_enabled() || voice_detection_->is_enabled()) {
+ if (public_submodules_->level_estimator->is_enabled() ||
+ public_submodules_->voice_detection->is_enabled()) {
return false;
}
} else if (enabled_count == 2) {
- if (level_estimator_->is_enabled() && voice_detection_->is_enabled()) {
+ if (public_submodules_->level_estimator->is_enabled() &&
+ public_submodules_->voice_detection->is_enabled()) {
return false;
}
}
@@ -1041,149 +1231,194 @@ bool AudioProcessingImpl::is_data_processed() const {
bool AudioProcessingImpl::output_copy_needed(bool is_data_processed) const {
// Check if we've upmixed or downmixed the audio.
- return ((api_format_.output_stream().num_channels() !=
- api_format_.input_stream().num_channels()) ||
- is_data_processed || transient_suppressor_enabled_);
+ return ((formats_.api_format.output_stream().num_channels() !=
+ formats_.api_format.input_stream().num_channels()) ||
+ is_data_processed || capture_.transient_suppressor_enabled);
}
bool AudioProcessingImpl::synthesis_needed(bool is_data_processed) const {
return (is_data_processed &&
- (fwd_proc_format_.sample_rate_hz() == kSampleRate32kHz ||
- fwd_proc_format_.sample_rate_hz() == kSampleRate48kHz));
+ (capture_nonlocked_.fwd_proc_format.sample_rate_hz() ==
+ kSampleRate32kHz ||
+ capture_nonlocked_.fwd_proc_format.sample_rate_hz() ==
+ kSampleRate48kHz));
}
bool AudioProcessingImpl::analysis_needed(bool is_data_processed) const {
- if (!is_data_processed && !voice_detection_->is_enabled() &&
- !transient_suppressor_enabled_) {
- // Only level_estimator_ is enabled.
+ if (!is_data_processed &&
+ !public_submodules_->voice_detection->is_enabled() &&
+ !capture_.transient_suppressor_enabled) {
+ // Only public_submodules_->level_estimator is enabled.
return false;
- } else if (fwd_proc_format_.sample_rate_hz() == kSampleRate32kHz ||
- fwd_proc_format_.sample_rate_hz() == kSampleRate48kHz) {
- // Something besides level_estimator_ is enabled, and we have super-wb.
+ } else if (capture_nonlocked_.fwd_proc_format.sample_rate_hz() ==
+ kSampleRate32kHz ||
+ capture_nonlocked_.fwd_proc_format.sample_rate_hz() ==
+ kSampleRate48kHz) {
+ // Something besides public_submodules_->level_estimator is enabled, and we
+ // have super-wb.
return true;
}
return false;
}
bool AudioProcessingImpl::is_rev_processed() const {
- return intelligibility_enabled_ && intelligibility_enhancer_->active();
+ return constants_.intelligibility_enabled &&
+ public_submodules_->intelligibility_enhancer->active();
+}
+
+bool AudioProcessingImpl::render_check_rev_conversion_needed() const {
+ return rev_conversion_needed();
}
bool AudioProcessingImpl::rev_conversion_needed() const {
- return (api_format_.reverse_input_stream() !=
- api_format_.reverse_output_stream());
+ return (formats_.api_format.reverse_input_stream() !=
+ formats_.api_format.reverse_output_stream());
}
void AudioProcessingImpl::InitializeExperimentalAgc() {
- if (use_new_agc_) {
- if (!agc_manager_.get()) {
- agc_manager_.reset(new AgcManagerDirect(gain_control_,
- gain_control_for_new_agc_.get(),
- agc_startup_min_volume_));
+ if (constants_.use_new_agc) {
+ if (!private_submodules_->agc_manager.get()) {
+ private_submodules_->agc_manager.reset(new AgcManagerDirect(
+ public_submodules_->gain_control,
+ public_submodules_->gain_control_for_new_agc.get(),
+ constants_.agc_startup_min_volume));
}
- agc_manager_->Initialize();
- agc_manager_->SetCaptureMuted(output_will_be_muted_);
+ private_submodules_->agc_manager->Initialize();
+ private_submodules_->agc_manager->SetCaptureMuted(
+ capture_.output_will_be_muted);
}
}
void AudioProcessingImpl::InitializeTransient() {
- if (transient_suppressor_enabled_) {
- if (!transient_suppressor_.get()) {
- transient_suppressor_.reset(new TransientSuppressor());
+ if (capture_.transient_suppressor_enabled) {
+ if (!public_submodules_->transient_suppressor.get()) {
+ public_submodules_->transient_suppressor.reset(new TransientSuppressor());
}
- transient_suppressor_->Initialize(
- fwd_proc_format_.sample_rate_hz(), split_rate_,
- api_format_.output_stream().num_channels());
+ public_submodules_->transient_suppressor->Initialize(
+ capture_nonlocked_.fwd_proc_format.sample_rate_hz(),
+ capture_nonlocked_.split_rate,
+ num_proc_channels());
}
}
void AudioProcessingImpl::InitializeBeamformer() {
- if (beamformer_enabled_) {
- if (!beamformer_) {
- beamformer_.reset(
- new NonlinearBeamformer(array_geometry_, target_direction_));
+ if (capture_nonlocked_.beamformer_enabled) {
+ if (!private_submodules_->beamformer) {
+ private_submodules_->beamformer.reset(new NonlinearBeamformer(
+ capture_.array_geometry, capture_.target_direction));
}
- beamformer_->Initialize(kChunkSizeMs, split_rate_);
+ private_submodules_->beamformer->Initialize(kChunkSizeMs,
+ capture_nonlocked_.split_rate);
}
}
void AudioProcessingImpl::InitializeIntelligibility() {
- if (intelligibility_enabled_) {
+ if (constants_.intelligibility_enabled) {
IntelligibilityEnhancer::Config config;
- config.sample_rate_hz = split_rate_;
- config.num_capture_channels = capture_audio_->num_channels();
- config.num_render_channels = render_audio_->num_channels();
- intelligibility_enhancer_.reset(new IntelligibilityEnhancer(config));
+ config.sample_rate_hz = capture_nonlocked_.split_rate;
+ config.num_capture_channels = capture_.capture_audio->num_channels();
+ config.num_render_channels = render_.render_audio->num_channels();
+ public_submodules_->intelligibility_enhancer.reset(
+ new IntelligibilityEnhancer(config));
}
}
+void AudioProcessingImpl::InitializeHighPassFilter() {
+ public_submodules_->high_pass_filter->Initialize(num_proc_channels(),
+ proc_sample_rate_hz());
+}
+
+void AudioProcessingImpl::InitializeNoiseSuppression() {
+ public_submodules_->noise_suppression->Initialize(num_proc_channels(),
+ proc_sample_rate_hz());
+}
+
+void AudioProcessingImpl::InitializeLevelEstimator() {
+ public_submodules_->level_estimator->Initialize();
+}
+
+void AudioProcessingImpl::InitializeVoiceDetection() {
+ public_submodules_->voice_detection->Initialize(proc_split_sample_rate_hz());
+}
+
void AudioProcessingImpl::MaybeUpdateHistograms() {
static const int kMinDiffDelayMs = 60;
if (echo_cancellation()->is_enabled()) {
// Activate delay_jumps_ counters if we know echo_cancellation is runnning.
// If a stream has echo we know that the echo_cancellation is in process.
- if (stream_delay_jumps_ == -1 && echo_cancellation()->stream_has_echo()) {
- stream_delay_jumps_ = 0;
+ if (capture_.stream_delay_jumps == -1 &&
+ echo_cancellation()->stream_has_echo()) {
+ capture_.stream_delay_jumps = 0;
}
- if (aec_system_delay_jumps_ == -1 &&
+ if (capture_.aec_system_delay_jumps == -1 &&
echo_cancellation()->stream_has_echo()) {
- aec_system_delay_jumps_ = 0;
+ capture_.aec_system_delay_jumps = 0;
}
// Detect a jump in platform reported system delay and log the difference.
- const int diff_stream_delay_ms = stream_delay_ms_ - last_stream_delay_ms_;
- if (diff_stream_delay_ms > kMinDiffDelayMs && last_stream_delay_ms_ != 0) {
- RTC_HISTOGRAM_COUNTS("WebRTC.Audio.PlatformReportedStreamDelayJump",
- diff_stream_delay_ms, kMinDiffDelayMs, 1000, 100);
- if (stream_delay_jumps_ == -1) {
- stream_delay_jumps_ = 0; // Activate counter if needed.
+ const int diff_stream_delay_ms =
+ capture_nonlocked_.stream_delay_ms - capture_.last_stream_delay_ms;
+ if (diff_stream_delay_ms > kMinDiffDelayMs &&
+ capture_.last_stream_delay_ms != 0) {
+ RTC_HISTOGRAM_COUNTS_SPARSE(
+ "WebRTC.Audio.PlatformReportedStreamDelayJump", diff_stream_delay_ms,
+ kMinDiffDelayMs, 1000, 100);
+ if (capture_.stream_delay_jumps == -1) {
+ capture_.stream_delay_jumps = 0; // Activate counter if needed.
}
- stream_delay_jumps_++;
+ capture_.stream_delay_jumps++;
}
- last_stream_delay_ms_ = stream_delay_ms_;
+ capture_.last_stream_delay_ms = capture_nonlocked_.stream_delay_ms;
// Detect a jump in AEC system delay and log the difference.
- const int frames_per_ms = rtc::CheckedDivExact(split_rate_, 1000);
+ const int frames_per_ms =
+ rtc::CheckedDivExact(capture_nonlocked_.split_rate, 1000);
const int aec_system_delay_ms =
WebRtcAec_system_delay(echo_cancellation()->aec_core()) / frames_per_ms;
const int diff_aec_system_delay_ms =
- aec_system_delay_ms - last_aec_system_delay_ms_;
+ aec_system_delay_ms - capture_.last_aec_system_delay_ms;
if (diff_aec_system_delay_ms > kMinDiffDelayMs &&
- last_aec_system_delay_ms_ != 0) {
- RTC_HISTOGRAM_COUNTS("WebRTC.Audio.AecSystemDelayJump",
- diff_aec_system_delay_ms, kMinDiffDelayMs, 1000,
- 100);
- if (aec_system_delay_jumps_ == -1) {
- aec_system_delay_jumps_ = 0; // Activate counter if needed.
+ capture_.last_aec_system_delay_ms != 0) {
+ RTC_HISTOGRAM_COUNTS_SPARSE("WebRTC.Audio.AecSystemDelayJump",
+ diff_aec_system_delay_ms, kMinDiffDelayMs,
+ 1000, 100);
+ if (capture_.aec_system_delay_jumps == -1) {
+ capture_.aec_system_delay_jumps = 0; // Activate counter if needed.
}
- aec_system_delay_jumps_++;
+ capture_.aec_system_delay_jumps++;
}
- last_aec_system_delay_ms_ = aec_system_delay_ms;
+ capture_.last_aec_system_delay_ms = aec_system_delay_ms;
}
}
void AudioProcessingImpl::UpdateHistogramsOnCallEnd() {
- CriticalSectionScoped crit_scoped(crit_);
- if (stream_delay_jumps_ > -1) {
- RTC_HISTOGRAM_ENUMERATION(
+ // Run in a single-threaded manner.
+ rtc::CritScope cs_render(&crit_render_);
+ rtc::CritScope cs_capture(&crit_capture_);
+
+ if (capture_.stream_delay_jumps > -1) {
+ RTC_HISTOGRAM_ENUMERATION_SPARSE(
"WebRTC.Audio.NumOfPlatformReportedStreamDelayJumps",
- stream_delay_jumps_, 51);
+ capture_.stream_delay_jumps, 51);
}
- stream_delay_jumps_ = -1;
- last_stream_delay_ms_ = 0;
+ capture_.stream_delay_jumps = -1;
+ capture_.last_stream_delay_ms = 0;
- if (aec_system_delay_jumps_ > -1) {
- RTC_HISTOGRAM_ENUMERATION("WebRTC.Audio.NumOfAecSystemDelayJumps",
- aec_system_delay_jumps_, 51);
+ if (capture_.aec_system_delay_jumps > -1) {
+ RTC_HISTOGRAM_ENUMERATION_SPARSE("WebRTC.Audio.NumOfAecSystemDelayJumps",
+ capture_.aec_system_delay_jumps, 51);
}
- aec_system_delay_jumps_ = -1;
- last_aec_system_delay_ms_ = 0;
+ capture_.aec_system_delay_jumps = -1;
+ capture_.last_aec_system_delay_ms = 0;
}
#ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP
-int AudioProcessingImpl::WriteMessageToDebugFile() {
- int32_t size = event_msg_->ByteSize();
+int AudioProcessingImpl::WriteMessageToDebugFile(
+ FileWrapper* debug_file,
+ rtc::CriticalSection* crit_debug,
+ ApmDebugDumpThreadState* debug_state) {
+ int32_t size = debug_state->event_msg->ByteSize();
if (size <= 0) {
return kUnspecifiedError;
}
@@ -1192,82 +1427,100 @@ int AudioProcessingImpl::WriteMessageToDebugFile() {
// pretty safe in assuming little-endian.
#endif
- if (!event_msg_->SerializeToString(&event_str_)) {
+ if (!debug_state->event_msg->SerializeToString(&debug_state->event_str)) {
return kUnspecifiedError;
}
- // Write message preceded by its size.
- if (!debug_file_->Write(&size, sizeof(int32_t))) {
- return kFileError;
- }
- if (!debug_file_->Write(event_str_.data(), event_str_.length())) {
- return kFileError;
+ {
+ // Ensure atomic writes of the message.
+ rtc::CritScope cs_capture(crit_debug);
+ // Write message preceded by its size.
+ if (!debug_file->Write(&size, sizeof(int32_t))) {
+ return kFileError;
+ }
+ if (!debug_file->Write(debug_state->event_str.data(),
+ debug_state->event_str.length())) {
+ return kFileError;
+ }
}
- event_msg_->Clear();
+ debug_state->event_msg->Clear();
return kNoError;
}
int AudioProcessingImpl::WriteInitMessage() {
- event_msg_->set_type(audioproc::Event::INIT);
- audioproc::Init* msg = event_msg_->mutable_init();
- msg->set_sample_rate(api_format_.input_stream().sample_rate_hz());
- msg->set_num_input_channels(api_format_.input_stream().num_channels());
- msg->set_num_output_channels(api_format_.output_stream().num_channels());
- msg->set_num_reverse_channels(
- api_format_.reverse_input_stream().num_channels());
+ debug_dump_.capture.event_msg->set_type(audioproc::Event::INIT);
+ audioproc::Init* msg = debug_dump_.capture.event_msg->mutable_init();
+ msg->set_sample_rate(formats_.api_format.input_stream().sample_rate_hz());
+
+ msg->set_num_input_channels(static_cast<google::protobuf::int32>(
+ formats_.api_format.input_stream().num_channels()));
+ msg->set_num_output_channels(static_cast<google::protobuf::int32>(
+ formats_.api_format.output_stream().num_channels()));
+ msg->set_num_reverse_channels(static_cast<google::protobuf::int32>(
+ formats_.api_format.reverse_input_stream().num_channels()));
msg->set_reverse_sample_rate(
- api_format_.reverse_input_stream().sample_rate_hz());
- msg->set_output_sample_rate(api_format_.output_stream().sample_rate_hz());
- // TODO(ekmeyerson): Add reverse output fields to event_msg_.
-
- RETURN_ON_ERR(WriteMessageToDebugFile());
+ formats_.api_format.reverse_input_stream().sample_rate_hz());
+ msg->set_output_sample_rate(
+ formats_.api_format.output_stream().sample_rate_hz());
+ // TODO(ekmeyerson): Add reverse output fields to
+ // debug_dump_.capture.event_msg.
+
+ RETURN_ON_ERR(WriteMessageToDebugFile(debug_dump_.debug_file.get(),
+ &crit_debug_, &debug_dump_.capture));
return kNoError;
}
int AudioProcessingImpl::WriteConfigMessage(bool forced) {
audioproc::Config config;
- config.set_aec_enabled(echo_cancellation_->is_enabled());
+ config.set_aec_enabled(public_submodules_->echo_cancellation->is_enabled());
config.set_aec_delay_agnostic_enabled(
- echo_cancellation_->is_delay_agnostic_enabled());
+ public_submodules_->echo_cancellation->is_delay_agnostic_enabled());
config.set_aec_drift_compensation_enabled(
- echo_cancellation_->is_drift_compensation_enabled());
+ public_submodules_->echo_cancellation->is_drift_compensation_enabled());
config.set_aec_extended_filter_enabled(
- echo_cancellation_->is_extended_filter_enabled());
- config.set_aec_suppression_level(
- static_cast<int>(echo_cancellation_->suppression_level()));
+ public_submodules_->echo_cancellation->is_extended_filter_enabled());
+ config.set_aec_suppression_level(static_cast<int>(
+ public_submodules_->echo_cancellation->suppression_level()));
- config.set_aecm_enabled(echo_control_mobile_->is_enabled());
+ config.set_aecm_enabled(
+ public_submodules_->echo_control_mobile->is_enabled());
config.set_aecm_comfort_noise_enabled(
- echo_control_mobile_->is_comfort_noise_enabled());
- config.set_aecm_routing_mode(
- static_cast<int>(echo_control_mobile_->routing_mode()));
+ public_submodules_->echo_control_mobile->is_comfort_noise_enabled());
+ config.set_aecm_routing_mode(static_cast<int>(
+ public_submodules_->echo_control_mobile->routing_mode()));
- config.set_agc_enabled(gain_control_->is_enabled());
- config.set_agc_mode(static_cast<int>(gain_control_->mode()));
- config.set_agc_limiter_enabled(gain_control_->is_limiter_enabled());
- config.set_noise_robust_agc_enabled(use_new_agc_);
+ config.set_agc_enabled(public_submodules_->gain_control->is_enabled());
+ config.set_agc_mode(
+ static_cast<int>(public_submodules_->gain_control->mode()));
+ config.set_agc_limiter_enabled(
+ public_submodules_->gain_control->is_limiter_enabled());
+ config.set_noise_robust_agc_enabled(constants_.use_new_agc);
- config.set_hpf_enabled(high_pass_filter_->is_enabled());
+ config.set_hpf_enabled(public_submodules_->high_pass_filter->is_enabled());
- config.set_ns_enabled(noise_suppression_->is_enabled());
- config.set_ns_level(static_cast<int>(noise_suppression_->level()));
+ config.set_ns_enabled(public_submodules_->noise_suppression->is_enabled());
+ config.set_ns_level(
+ static_cast<int>(public_submodules_->noise_suppression->level()));
- config.set_transient_suppression_enabled(transient_suppressor_enabled_);
+ config.set_transient_suppression_enabled(
+ capture_.transient_suppressor_enabled);
std::string serialized_config = config.SerializeAsString();
- if (!forced && last_serialized_config_ == serialized_config) {
+ if (!forced &&
+ debug_dump_.capture.last_serialized_config == serialized_config) {
return kNoError;
}
- last_serialized_config_ = serialized_config;
+ debug_dump_.capture.last_serialized_config = serialized_config;
- event_msg_->set_type(audioproc::Event::CONFIG);
- event_msg_->mutable_config()->CopyFrom(config);
+ debug_dump_.capture.event_msg->set_type(audioproc::Event::CONFIG);
+ debug_dump_.capture.event_msg->mutable_config()->CopyFrom(config);
- RETURN_ON_ERR(WriteMessageToDebugFile());
+ RETURN_ON_ERR(WriteMessageToDebugFile(debug_dump_.debug_file.get(),
+ &crit_debug_, &debug_dump_.capture));
return kNoError;
}
#endif // WEBRTC_AUDIOPROC_DEBUG_DUMP
diff --git a/webrtc/modules/audio_processing/audio_processing_impl.h b/webrtc/modules/audio_processing/audio_processing_impl.h
index 542886ee10..b310896903 100644
--- a/webrtc/modules/audio_processing/audio_processing_impl.h
+++ b/webrtc/modules/audio_processing/audio_processing_impl.h
@@ -15,50 +15,38 @@
#include <string>
#include <vector>
+#include "webrtc/base/criticalsection.h"
#include "webrtc/base/scoped_ptr.h"
#include "webrtc/base/thread_annotations.h"
+#include "webrtc/modules/audio_processing/audio_buffer.h"
#include "webrtc/modules/audio_processing/include/audio_processing.h"
+#include "webrtc/system_wrappers/include/file_wrapper.h"
+
+#ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP
+// Files generated at build-time by the protobuf compiler.
+#ifdef WEBRTC_ANDROID_PLATFORM_BUILD
+#include "external/webrtc/webrtc/modules/audio_processing/debug.pb.h"
+#else
+#include "webrtc/audio_processing/debug.pb.h"
+#endif
+#endif // WEBRTC_AUDIOPROC_DEBUG_DUMP
namespace webrtc {
class AgcManagerDirect;
-class AudioBuffer;
class AudioConverter;
template<typename T>
class Beamformer;
-class CriticalSectionWrapper;
-class EchoCancellationImpl;
-class EchoControlMobileImpl;
-class FileWrapper;
-class GainControlImpl;
-class GainControlForNewAgc;
-class HighPassFilterImpl;
-class LevelEstimatorImpl;
-class NoiseSuppressionImpl;
-class ProcessingComponent;
-class TransientSuppressor;
-class VoiceDetectionImpl;
-class IntelligibilityEnhancer;
-
-#ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP
-namespace audioproc {
-
-class Event;
-
-} // namespace audioproc
-#endif
-
class AudioProcessingImpl : public AudioProcessing {
public:
+ // Methods forcing APM to run in a single-threaded manner.
+ // Acquires both the render and capture locks.
explicit AudioProcessingImpl(const Config& config);
-
// AudioProcessingImpl takes ownership of beamformer.
AudioProcessingImpl(const Config& config, Beamformer<float>* beamformer);
virtual ~AudioProcessingImpl();
-
- // AudioProcessing methods.
int Initialize() override;
int Initialize(int input_sample_rate_hz,
int output_sample_rate_hz,
@@ -68,12 +56,14 @@ class AudioProcessingImpl : public AudioProcessing {
ChannelLayout reverse_layout) override;
int Initialize(const ProcessingConfig& processing_config) override;
void SetExtraOptions(const Config& config) override;
- int proc_sample_rate_hz() const override;
- int proc_split_sample_rate_hz() const override;
- int num_input_channels() const override;
- int num_output_channels() const override;
- int num_reverse_channels() const override;
- void set_output_will_be_muted(bool muted) override;
+ void UpdateHistogramsOnCallEnd() override;
+ int StartDebugRecording(const char filename[kMaxFilenameSize]) override;
+ int StartDebugRecording(FILE* handle) override;
+ int StartDebugRecordingForPlatformFile(rtc::PlatformFile handle) override;
+ int StopDebugRecording() override;
+
+ // Capture-side exclusive methods possibly running APM in a
+ // multi-threaded manner. Acquire the capture lock.
int ProcessStream(AudioFrame* frame) override;
int ProcessStream(const float* const* src,
size_t samples_per_channel,
@@ -86,6 +76,15 @@ class AudioProcessingImpl : public AudioProcessing {
const StreamConfig& input_config,
const StreamConfig& output_config,
float* const* dest) override;
+ void set_output_will_be_muted(bool muted) override;
+ int set_stream_delay_ms(int delay) override;
+ void set_delay_offset_ms(int offset) override;
+ int delay_offset_ms() const override;
+ void set_stream_key_pressed(bool key_pressed) override;
+ int input_sample_rate_hz() const override;
+
+ // Render-side exclusive methods possibly running APM in a
+ // multi-threaded manner. Acquire the render lock.
int AnalyzeReverseStream(AudioFrame* frame) override;
int ProcessReverseStream(AudioFrame* frame) override;
int AnalyzeReverseStream(const float* const* data,
@@ -96,17 +95,25 @@ class AudioProcessingImpl : public AudioProcessing {
const StreamConfig& reverse_input_config,
const StreamConfig& reverse_output_config,
float* const* dest) override;
- int set_stream_delay_ms(int delay) override;
+
+ // Methods only accessed from APM submodules or
+ // from AudioProcessing tests in a single-threaded manner.
+ // Hence there is no need for locks in these.
+ int proc_sample_rate_hz() const override;
+ int proc_split_sample_rate_hz() const override;
+ size_t num_input_channels() const override;
+ size_t num_proc_channels() const override;
+ size_t num_output_channels() const override;
+ size_t num_reverse_channels() const override;
int stream_delay_ms() const override;
- bool was_stream_delay_set() const override;
- void set_delay_offset_ms(int offset) override;
- int delay_offset_ms() const override;
- void set_stream_key_pressed(bool key_pressed) override;
- int StartDebugRecording(const char filename[kMaxFilenameSize]) override;
- int StartDebugRecording(FILE* handle) override;
- int StartDebugRecordingForPlatformFile(rtc::PlatformFile handle) override;
- int StopDebugRecording() override;
- void UpdateHistogramsOnCallEnd() override;
+ bool was_stream_delay_set() const override
+ EXCLUSIVE_LOCKS_REQUIRED(crit_capture_);
+
+ // Methods returning pointers to APM submodules.
+ // No locks are aquired in those, as those locks
+ // would offer no protection (the submodules are
+ // created only once in a single-treaded manner
+ // during APM creation).
EchoCancellation* echo_cancellation() const override;
EchoControlMobile* echo_control_mobile() const override;
GainControl* gain_control() const override;
@@ -117,101 +124,216 @@ class AudioProcessingImpl : public AudioProcessing {
protected:
// Overridden in a mock.
- virtual int InitializeLocked() EXCLUSIVE_LOCKS_REQUIRED(crit_);
+ virtual int InitializeLocked()
+ EXCLUSIVE_LOCKS_REQUIRED(crit_render_, crit_capture_);
private:
+ struct ApmPublicSubmodules;
+ struct ApmPrivateSubmodules;
+
+#ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP
+ // State for the debug dump.
+ struct ApmDebugDumpThreadState {
+ ApmDebugDumpThreadState() : event_msg(new audioproc::Event()) {}
+ rtc::scoped_ptr<audioproc::Event> event_msg; // Protobuf message.
+ std::string event_str; // Memory for protobuf serialization.
+
+ // Serialized string of last saved APM configuration.
+ std::string last_serialized_config;
+ };
+
+ struct ApmDebugDumpState {
+ ApmDebugDumpState() : debug_file(FileWrapper::Create()) {}
+ rtc::scoped_ptr<FileWrapper> debug_file;
+ ApmDebugDumpThreadState render;
+ ApmDebugDumpThreadState capture;
+ };
+#endif
+
+ // Method for modifying the formats struct that are called from both
+ // the render and capture threads. The check for whether modifications
+ // are needed is done while holding the render lock only, thereby avoiding
+ // that the capture thread blocks the render thread.
+ // The struct is modified in a single-threaded manner by holding both the
+ // render and capture locks.
+ int MaybeInitialize(const ProcessingConfig& config)
+ EXCLUSIVE_LOCKS_REQUIRED(crit_render_);
+
+ int MaybeInitializeRender(const ProcessingConfig& processing_config)
+ EXCLUSIVE_LOCKS_REQUIRED(crit_render_);
+
+ int MaybeInitializeCapture(const ProcessingConfig& processing_config)
+ EXCLUSIVE_LOCKS_REQUIRED(crit_render_);
+
+ // Method for checking for the need of conversion. Accesses the formats
+ // structs in a read manner but the requirement for the render lock to be held
+ // was added as it currently anyway is always called in that manner.
+ bool rev_conversion_needed() const EXCLUSIVE_LOCKS_REQUIRED(crit_render_);
+ bool render_check_rev_conversion_needed() const
+ EXCLUSIVE_LOCKS_REQUIRED(crit_render_);
+
+ // Methods requiring APM running in a single-threaded manner.
+ // Are called with both the render and capture locks already
+ // acquired.
+ void InitializeExperimentalAgc()
+ EXCLUSIVE_LOCKS_REQUIRED(crit_render_, crit_capture_);
+ void InitializeTransient()
+ EXCLUSIVE_LOCKS_REQUIRED(crit_render_, crit_capture_);
+ void InitializeBeamformer()
+ EXCLUSIVE_LOCKS_REQUIRED(crit_render_, crit_capture_);
+ void InitializeIntelligibility()
+ EXCLUSIVE_LOCKS_REQUIRED(crit_render_, crit_capture_);
+ void InitializeHighPassFilter()
+ EXCLUSIVE_LOCKS_REQUIRED(crit_capture_);
+ void InitializeNoiseSuppression()
+ EXCLUSIVE_LOCKS_REQUIRED(crit_capture_);
+ void InitializeLevelEstimator()
+ EXCLUSIVE_LOCKS_REQUIRED(crit_capture_);
+ void InitializeVoiceDetection()
+ EXCLUSIVE_LOCKS_REQUIRED(crit_capture_);
int InitializeLocked(const ProcessingConfig& config)
- EXCLUSIVE_LOCKS_REQUIRED(crit_);
- int MaybeInitializeLocked(const ProcessingConfig& config)
- EXCLUSIVE_LOCKS_REQUIRED(crit_);
+ EXCLUSIVE_LOCKS_REQUIRED(crit_render_, crit_capture_);
+
+ // Capture-side exclusive methods possibly running APM in a multi-threaded
+ // manner that are called with the render lock already acquired.
+ int ProcessStreamLocked() EXCLUSIVE_LOCKS_REQUIRED(crit_capture_);
+ bool output_copy_needed(bool is_data_processed) const
+ EXCLUSIVE_LOCKS_REQUIRED(crit_capture_);
+ bool is_data_processed() const EXCLUSIVE_LOCKS_REQUIRED(crit_capture_);
+ bool synthesis_needed(bool is_data_processed) const
+ EXCLUSIVE_LOCKS_REQUIRED(crit_capture_);
+ bool analysis_needed(bool is_data_processed) const
+ EXCLUSIVE_LOCKS_REQUIRED(crit_capture_);
+ void MaybeUpdateHistograms() EXCLUSIVE_LOCKS_REQUIRED(crit_capture_);
+
+ // Render-side exclusive methods possibly running APM in a multi-threaded
+ // manner that are called with the render lock already acquired.
// TODO(ekm): Remove once all clients updated to new interface.
- int AnalyzeReverseStream(const float* const* src,
- const StreamConfig& input_config,
- const StreamConfig& output_config);
- int ProcessStreamLocked() EXCLUSIVE_LOCKS_REQUIRED(crit_);
- int ProcessReverseStreamLocked() EXCLUSIVE_LOCKS_REQUIRED(crit_);
-
- bool is_data_processed() const;
- bool output_copy_needed(bool is_data_processed) const;
- bool synthesis_needed(bool is_data_processed) const;
- bool analysis_needed(bool is_data_processed) const;
- bool is_rev_processed() const;
- bool rev_conversion_needed() const;
- void InitializeExperimentalAgc() EXCLUSIVE_LOCKS_REQUIRED(crit_);
- void InitializeTransient() EXCLUSIVE_LOCKS_REQUIRED(crit_);
- void InitializeBeamformer() EXCLUSIVE_LOCKS_REQUIRED(crit_);
- void InitializeIntelligibility() EXCLUSIVE_LOCKS_REQUIRED(crit_);
- void MaybeUpdateHistograms() EXCLUSIVE_LOCKS_REQUIRED(crit_);
-
- EchoCancellationImpl* echo_cancellation_;
- EchoControlMobileImpl* echo_control_mobile_;
- GainControlImpl* gain_control_;
- HighPassFilterImpl* high_pass_filter_;
- LevelEstimatorImpl* level_estimator_;
- NoiseSuppressionImpl* noise_suppression_;
- VoiceDetectionImpl* voice_detection_;
- rtc::scoped_ptr<GainControlForNewAgc> gain_control_for_new_agc_;
-
- std::list<ProcessingComponent*> component_list_;
- CriticalSectionWrapper* crit_;
- rtc::scoped_ptr<AudioBuffer> render_audio_;
- rtc::scoped_ptr<AudioBuffer> capture_audio_;
- rtc::scoped_ptr<AudioConverter> render_converter_;
+ int AnalyzeReverseStreamLocked(const float* const* src,
+ const StreamConfig& input_config,
+ const StreamConfig& output_config)
+ EXCLUSIVE_LOCKS_REQUIRED(crit_render_);
+ bool is_rev_processed() const EXCLUSIVE_LOCKS_REQUIRED(crit_render_);
+ int ProcessReverseStreamLocked() EXCLUSIVE_LOCKS_REQUIRED(crit_render_);
+
+// Debug dump methods that are internal and called without locks.
+// TODO(peah): Make thread safe.
#ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP
// TODO(andrew): make this more graceful. Ideally we would split this stuff
// out into a separate class with an "enabled" and "disabled" implementation.
- int WriteMessageToDebugFile();
- int WriteInitMessage();
+ static int WriteMessageToDebugFile(FileWrapper* debug_file,
+ rtc::CriticalSection* crit_debug,
+ ApmDebugDumpThreadState* debug_state);
+ int WriteInitMessage() EXCLUSIVE_LOCKS_REQUIRED(crit_render_, crit_capture_);
// Writes Config message. If not |forced|, only writes the current config if
// it is different from the last saved one; if |forced|, writes the config
// regardless of the last saved.
- int WriteConfigMessage(bool forced);
+ int WriteConfigMessage(bool forced) EXCLUSIVE_LOCKS_REQUIRED(crit_capture_)
+ EXCLUSIVE_LOCKS_REQUIRED(crit_capture_);
- rtc::scoped_ptr<FileWrapper> debug_file_;
- rtc::scoped_ptr<audioproc::Event> event_msg_; // Protobuf message.
- std::string event_str_; // Memory for protobuf serialization.
+ // Critical section.
+ mutable rtc::CriticalSection crit_debug_;
- // Serialized string of last saved APM configuration.
- std::string last_serialized_config_;
+ // Debug dump state.
+ ApmDebugDumpState debug_dump_;
#endif
- // Format of processing streams at input/output call sites.
- ProcessingConfig api_format_;
-
- // Only the rate and samples fields of fwd_proc_format_ are used because the
- // forward processing number of channels is mutable and is tracked by the
- // capture_audio_.
- StreamConfig fwd_proc_format_;
- StreamConfig rev_proc_format_;
- int split_rate_;
-
- int stream_delay_ms_;
- int delay_offset_ms_;
- bool was_stream_delay_set_;
- int last_stream_delay_ms_;
- int last_aec_system_delay_ms_;
- int stream_delay_jumps_;
- int aec_system_delay_jumps_;
-
- bool output_will_be_muted_ GUARDED_BY(crit_);
-
- bool key_pressed_;
-
- // Only set through the constructor's Config parameter.
- const bool use_new_agc_;
- rtc::scoped_ptr<AgcManagerDirect> agc_manager_ GUARDED_BY(crit_);
- int agc_startup_min_volume_;
-
- bool transient_suppressor_enabled_;
- rtc::scoped_ptr<TransientSuppressor> transient_suppressor_;
- const bool beamformer_enabled_;
- rtc::scoped_ptr<Beamformer<float>> beamformer_;
- const std::vector<Point> array_geometry_;
- const SphericalPointf target_direction_;
-
- bool intelligibility_enabled_;
- rtc::scoped_ptr<IntelligibilityEnhancer> intelligibility_enhancer_;
+ // Critical sections.
+ mutable rtc::CriticalSection crit_render_ ACQUIRED_BEFORE(crit_capture_);
+ mutable rtc::CriticalSection crit_capture_;
+
+ // Structs containing the pointers to the submodules.
+ rtc::scoped_ptr<ApmPublicSubmodules> public_submodules_;
+ rtc::scoped_ptr<ApmPrivateSubmodules> private_submodules_
+ GUARDED_BY(crit_capture_);
+
+ // State that is written to while holding both the render and capture locks
+ // but can be read without any lock being held.
+ // As this is only accessed internally of APM, and all internal methods in APM
+ // either are holding the render or capture locks, this construct is safe as
+ // it is not possible to read the variables while writing them.
+ struct ApmFormatState {
+ ApmFormatState()
+ : // Format of processing streams at input/output call sites.
+ api_format({{{kSampleRate16kHz, 1, false},
+ {kSampleRate16kHz, 1, false},
+ {kSampleRate16kHz, 1, false},
+ {kSampleRate16kHz, 1, false}}}),
+ rev_proc_format(kSampleRate16kHz, 1) {}
+ ProcessingConfig api_format;
+ StreamConfig rev_proc_format;
+ } formats_;
+
+ // APM constants.
+ const struct ApmConstants {
+ ApmConstants(int agc_startup_min_volume,
+ bool use_new_agc,
+ bool intelligibility_enabled)
+ : // Format of processing streams at input/output call sites.
+ agc_startup_min_volume(agc_startup_min_volume),
+ use_new_agc(use_new_agc),
+ intelligibility_enabled(intelligibility_enabled) {}
+ int agc_startup_min_volume;
+ bool use_new_agc;
+ bool intelligibility_enabled;
+ } constants_;
+
+ struct ApmCaptureState {
+ ApmCaptureState(bool transient_suppressor_enabled,
+ const std::vector<Point>& array_geometry,
+ SphericalPointf target_direction)
+ : aec_system_delay_jumps(-1),
+ delay_offset_ms(0),
+ was_stream_delay_set(false),
+ last_stream_delay_ms(0),
+ last_aec_system_delay_ms(0),
+ stream_delay_jumps(-1),
+ output_will_be_muted(false),
+ key_pressed(false),
+ transient_suppressor_enabled(transient_suppressor_enabled),
+ array_geometry(array_geometry),
+ target_direction(target_direction),
+ fwd_proc_format(kSampleRate16kHz),
+ split_rate(kSampleRate16kHz) {}
+ int aec_system_delay_jumps;
+ int delay_offset_ms;
+ bool was_stream_delay_set;
+ int last_stream_delay_ms;
+ int last_aec_system_delay_ms;
+ int stream_delay_jumps;
+ bool output_will_be_muted;
+ bool key_pressed;
+ bool transient_suppressor_enabled;
+ std::vector<Point> array_geometry;
+ SphericalPointf target_direction;
+ rtc::scoped_ptr<AudioBuffer> capture_audio;
+ // Only the rate and samples fields of fwd_proc_format_ are used because the
+ // forward processing number of channels is mutable and is tracked by the
+ // capture_audio_.
+ StreamConfig fwd_proc_format;
+ int split_rate;
+ } capture_ GUARDED_BY(crit_capture_);
+
+ struct ApmCaptureNonLockedState {
+ ApmCaptureNonLockedState(bool beamformer_enabled)
+ : fwd_proc_format(kSampleRate16kHz),
+ split_rate(kSampleRate16kHz),
+ stream_delay_ms(0),
+ beamformer_enabled(beamformer_enabled) {}
+ // Only the rate and samples fields of fwd_proc_format_ are used because the
+ // forward processing number of channels is mutable and is tracked by the
+ // capture_audio_.
+ StreamConfig fwd_proc_format;
+ int split_rate;
+ int stream_delay_ms;
+ bool beamformer_enabled;
+ } capture_nonlocked_;
+
+ struct ApmRenderState {
+ rtc::scoped_ptr<AudioConverter> render_converter;
+ rtc::scoped_ptr<AudioBuffer> render_audio;
+ } render_ GUARDED_BY(crit_render_);
};
} // namespace webrtc
diff --git a/webrtc/modules/audio_processing/audio_processing_impl_locking_unittest.cc b/webrtc/modules/audio_processing/audio_processing_impl_locking_unittest.cc
new file mode 100644
index 0000000000..e1e6a310a5
--- /dev/null
+++ b/webrtc/modules/audio_processing/audio_processing_impl_locking_unittest.cc
@@ -0,0 +1,1133 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "webrtc/modules/audio_processing/audio_processing_impl.h"
+
+#include <algorithm>
+#include <vector>
+
+#include "testing/gtest/include/gtest/gtest.h"
+#include "webrtc/base/array_view.h"
+#include "webrtc/base/criticalsection.h"
+#include "webrtc/base/event.h"
+#include "webrtc/base/platform_thread.h"
+#include "webrtc/base/random.h"
+#include "webrtc/config.h"
+#include "webrtc/modules/audio_processing/test/test_utils.h"
+#include "webrtc/modules/include/module_common_types.h"
+#include "webrtc/system_wrappers/include/sleep.h"
+
+namespace webrtc {
+
+namespace {
+
+class AudioProcessingImplLockTest;
+
+// Type of the render thread APM API call to use in the test.
+enum class RenderApiImpl {
+ ProcessReverseStreamImpl1,
+ ProcessReverseStreamImpl2,
+ AnalyzeReverseStreamImpl1,
+ AnalyzeReverseStreamImpl2
+};
+
+// Type of the capture thread APM API call to use in the test.
+enum class CaptureApiImpl {
+ ProcessStreamImpl1,
+ ProcessStreamImpl2,
+ ProcessStreamImpl3
+};
+
+// The runtime parameter setting scheme to use in the test.
+enum class RuntimeParameterSettingScheme {
+ SparseStreamMetadataChangeScheme,
+ ExtremeStreamMetadataChangeScheme,
+ FixedMonoStreamMetadataScheme,
+ FixedStereoStreamMetadataScheme
+};
+
+// Variant of echo canceller settings to use in the test.
+enum class AecType {
+ BasicWebRtcAecSettings,
+ AecTurnedOff,
+ BasicWebRtcAecSettingsWithExtentedFilter,
+ BasicWebRtcAecSettingsWithDelayAgnosticAec,
+ BasicWebRtcAecSettingsWithAecMobile
+};
+
+// Thread-safe random number generator wrapper.
+class RandomGenerator {
+ public:
+ RandomGenerator() : rand_gen_(42U) {}
+
+ int RandInt(int min, int max) {
+ rtc::CritScope cs(&crit_);
+ return rand_gen_.Rand(min, max);
+ }
+
+ int RandInt(int max) {
+ rtc::CritScope cs(&crit_);
+ return rand_gen_.Rand(max);
+ }
+
+ float RandFloat() {
+ rtc::CritScope cs(&crit_);
+ return rand_gen_.Rand<float>();
+ }
+
+ private:
+ rtc::CriticalSection crit_;
+ Random rand_gen_ GUARDED_BY(crit_);
+};
+
+// Variables related to the audio data and formats.
+struct AudioFrameData {
+ explicit AudioFrameData(int max_frame_size) {
+ // Set up the two-dimensional arrays needed for the APM API calls.
+ input_framechannels.resize(2 * max_frame_size);
+ input_frame.resize(2);
+ input_frame[0] = &input_framechannels[0];
+ input_frame[1] = &input_framechannels[max_frame_size];
+
+ output_frame_channels.resize(2 * max_frame_size);
+ output_frame.resize(2);
+ output_frame[0] = &output_frame_channels[0];
+ output_frame[1] = &output_frame_channels[max_frame_size];
+ }
+
+ AudioFrame frame;
+ std::vector<float*> output_frame;
+ std::vector<float> output_frame_channels;
+ AudioProcessing::ChannelLayout output_channel_layout =
+ AudioProcessing::ChannelLayout::kMono;
+ int input_sample_rate_hz = 16000;
+ int input_number_of_channels = -1;
+ std::vector<float*> input_frame;
+ std::vector<float> input_framechannels;
+ AudioProcessing::ChannelLayout input_channel_layout =
+ AudioProcessing::ChannelLayout::kMono;
+ int output_sample_rate_hz = 16000;
+ int output_number_of_channels = -1;
+ StreamConfig input_stream_config;
+ StreamConfig output_stream_config;
+ int input_samples_per_channel = -1;
+ int output_samples_per_channel = -1;
+};
+
+// The configuration for the test.
+struct TestConfig {
+ // Test case generator for the test configurations to use in the brief tests.
+ static std::vector<TestConfig> GenerateBriefTestConfigs() {
+ std::vector<TestConfig> test_configs;
+ AecType aec_types[] = {AecType::BasicWebRtcAecSettingsWithDelayAgnosticAec,
+ AecType::BasicWebRtcAecSettingsWithAecMobile};
+ for (auto aec_type : aec_types) {
+ TestConfig test_config;
+ test_config.aec_type = aec_type;
+
+ test_config.min_number_of_calls = 300;
+
+ // Perform tests only with the extreme runtime parameter setting scheme.
+ test_config.runtime_parameter_setting_scheme =
+ RuntimeParameterSettingScheme::ExtremeStreamMetadataChangeScheme;
+
+ // Only test 16 kHz for this test suite.
+ test_config.initial_sample_rate_hz = 16000;
+
+ // Create test config for the second processing API function set.
+ test_config.render_api_function =
+ RenderApiImpl::ProcessReverseStreamImpl2;
+ test_config.capture_api_function = CaptureApiImpl::ProcessStreamImpl2;
+
+ // Create test config for the first processing API function set.
+ test_configs.push_back(test_config);
+ test_config.render_api_function =
+ RenderApiImpl::AnalyzeReverseStreamImpl2;
+ test_config.capture_api_function = CaptureApiImpl::ProcessStreamImpl3;
+ test_configs.push_back(test_config);
+ }
+
+ // Return the created test configurations.
+ return test_configs;
+ }
+
+ // Test case generator for the test configurations to use in the extensive
+ // tests.
+ static std::vector<TestConfig> GenerateExtensiveTestConfigs() {
+ // Lambda functions for the test config generation.
+ auto add_processing_apis = [](TestConfig test_config) {
+ struct AllowedApiCallCombinations {
+ RenderApiImpl render_api;
+ CaptureApiImpl capture_api;
+ };
+
+ const AllowedApiCallCombinations api_calls[] = {
+ {RenderApiImpl::ProcessReverseStreamImpl1,
+ CaptureApiImpl::ProcessStreamImpl1},
+ {RenderApiImpl::AnalyzeReverseStreamImpl1,
+ CaptureApiImpl::ProcessStreamImpl1},
+ {RenderApiImpl::ProcessReverseStreamImpl2,
+ CaptureApiImpl::ProcessStreamImpl2},
+ {RenderApiImpl::ProcessReverseStreamImpl2,
+ CaptureApiImpl::ProcessStreamImpl3},
+ {RenderApiImpl::AnalyzeReverseStreamImpl2,
+ CaptureApiImpl::ProcessStreamImpl2},
+ {RenderApiImpl::AnalyzeReverseStreamImpl2,
+ CaptureApiImpl::ProcessStreamImpl3}};
+ std::vector<TestConfig> out;
+ for (auto api_call : api_calls) {
+ test_config.render_api_function = api_call.render_api;
+ test_config.capture_api_function = api_call.capture_api;
+ out.push_back(test_config);
+ }
+ return out;
+ };
+
+ auto add_aec_settings = [](const std::vector<TestConfig>& in) {
+ std::vector<TestConfig> out;
+ AecType aec_types[] = {
+ AecType::BasicWebRtcAecSettings, AecType::AecTurnedOff,
+ AecType::BasicWebRtcAecSettingsWithExtentedFilter,
+ AecType::BasicWebRtcAecSettingsWithDelayAgnosticAec,
+ AecType::BasicWebRtcAecSettingsWithAecMobile};
+ for (auto test_config : in) {
+ for (auto aec_type : aec_types) {
+ test_config.aec_type = aec_type;
+ out.push_back(test_config);
+ }
+ }
+ return out;
+ };
+
+ auto add_settings_scheme = [](const std::vector<TestConfig>& in) {
+ std::vector<TestConfig> out;
+ RuntimeParameterSettingScheme schemes[] = {
+ RuntimeParameterSettingScheme::SparseStreamMetadataChangeScheme,
+ RuntimeParameterSettingScheme::ExtremeStreamMetadataChangeScheme,
+ RuntimeParameterSettingScheme::FixedMonoStreamMetadataScheme,
+ RuntimeParameterSettingScheme::FixedStereoStreamMetadataScheme};
+
+ for (auto test_config : in) {
+ for (auto scheme : schemes) {
+ test_config.runtime_parameter_setting_scheme = scheme;
+ out.push_back(test_config);
+ }
+ }
+ return out;
+ };
+
+ auto add_sample_rates = [](const std::vector<TestConfig>& in) {
+ const int sample_rates[] = {8000, 16000, 32000, 48000};
+
+ std::vector<TestConfig> out;
+ for (auto test_config : in) {
+ auto available_rates =
+ (test_config.aec_type ==
+ AecType::BasicWebRtcAecSettingsWithAecMobile
+ ? rtc::ArrayView<const int>(sample_rates, 2)
+ : rtc::ArrayView<const int>(sample_rates));
+
+ for (auto rate : available_rates) {
+ test_config.initial_sample_rate_hz = rate;
+ out.push_back(test_config);
+ }
+ }
+ return out;
+ };
+
+ // Generate test configurations of the relevant combinations of the
+ // parameters to
+ // test.
+ TestConfig test_config;
+ test_config.min_number_of_calls = 10000;
+ return add_sample_rates(add_settings_scheme(
+ add_aec_settings(add_processing_apis(test_config))));
+ }
+
+ RenderApiImpl render_api_function = RenderApiImpl::ProcessReverseStreamImpl2;
+ CaptureApiImpl capture_api_function = CaptureApiImpl::ProcessStreamImpl2;
+ RuntimeParameterSettingScheme runtime_parameter_setting_scheme =
+ RuntimeParameterSettingScheme::ExtremeStreamMetadataChangeScheme;
+ int initial_sample_rate_hz = 16000;
+ AecType aec_type = AecType::BasicWebRtcAecSettingsWithDelayAgnosticAec;
+ int min_number_of_calls = 300;
+};
+
+// Handler for the frame counters.
+class FrameCounters {
+ public:
+ void IncreaseRenderCounter() {
+ rtc::CritScope cs(&crit_);
+ render_count++;
+ }
+
+ void IncreaseCaptureCounter() {
+ rtc::CritScope cs(&crit_);
+ capture_count++;
+ }
+
+ int GetCaptureCounter() const {
+ rtc::CritScope cs(&crit_);
+ return capture_count;
+ }
+
+ int GetRenderCounter() const {
+ rtc::CritScope cs(&crit_);
+ return render_count;
+ }
+
+ int CaptureMinusRenderCounters() const {
+ rtc::CritScope cs(&crit_);
+ return capture_count - render_count;
+ }
+
+ int RenderMinusCaptureCounters() const {
+ return -CaptureMinusRenderCounters();
+ }
+
+ bool BothCountersExceedeThreshold(int threshold) {
+ rtc::CritScope cs(&crit_);
+ return (render_count > threshold && capture_count > threshold);
+ }
+
+ private:
+ mutable rtc::CriticalSection crit_;
+ int render_count GUARDED_BY(crit_) = 0;
+ int capture_count GUARDED_BY(crit_) = 0;
+};
+
+// Class for handling the capture side processing.
+class CaptureProcessor {
+ public:
+ CaptureProcessor(int max_frame_size,
+ RandomGenerator* rand_gen,
+ rtc::Event* render_call_event,
+ rtc::Event* capture_call_event,
+ FrameCounters* shared_counters_state,
+ AudioProcessingImplLockTest* test_framework,
+ TestConfig* test_config,
+ AudioProcessing* apm);
+ bool Process();
+
+ private:
+ static const int kMaxCallDifference = 10;
+ static const float kCaptureInputFloatLevel;
+ static const int kCaptureInputFixLevel = 1024;
+
+ void PrepareFrame();
+ void CallApmCaptureSide();
+ void ApplyRuntimeSettingScheme();
+
+ RandomGenerator* const rand_gen_ = nullptr;
+ rtc::Event* const render_call_event_ = nullptr;
+ rtc::Event* const capture_call_event_ = nullptr;
+ FrameCounters* const frame_counters_ = nullptr;
+ AudioProcessingImplLockTest* const test_ = nullptr;
+ const TestConfig* const test_config_ = nullptr;
+ AudioProcessing* const apm_ = nullptr;
+ AudioFrameData frame_data_;
+};
+
+// Class for handling the stats processing.
+class StatsProcessor {
+ public:
+ StatsProcessor(RandomGenerator* rand_gen,
+ TestConfig* test_config,
+ AudioProcessing* apm);
+ bool Process();
+
+ private:
+ RandomGenerator* rand_gen_ = nullptr;
+ TestConfig* test_config_ = nullptr;
+ AudioProcessing* apm_ = nullptr;
+};
+
+// Class for handling the render side processing.
+class RenderProcessor {
+ public:
+ RenderProcessor(int max_frame_size,
+ RandomGenerator* rand_gen,
+ rtc::Event* render_call_event,
+ rtc::Event* capture_call_event,
+ FrameCounters* shared_counters_state,
+ AudioProcessingImplLockTest* test_framework,
+ TestConfig* test_config,
+ AudioProcessing* apm);
+ bool Process();
+
+ private:
+ static const int kMaxCallDifference = 10;
+ static const int kRenderInputFixLevel = 16384;
+ static const float kRenderInputFloatLevel;
+
+ void PrepareFrame();
+ void CallApmRenderSide();
+ void ApplyRuntimeSettingScheme();
+
+ RandomGenerator* const rand_gen_ = nullptr;
+ rtc::Event* const render_call_event_ = nullptr;
+ rtc::Event* const capture_call_event_ = nullptr;
+ FrameCounters* const frame_counters_ = nullptr;
+ AudioProcessingImplLockTest* const test_ = nullptr;
+ const TestConfig* const test_config_ = nullptr;
+ AudioProcessing* const apm_ = nullptr;
+ AudioFrameData frame_data_;
+ bool first_render_call_ = true;
+};
+
+class AudioProcessingImplLockTest
+ : public ::testing::TestWithParam<TestConfig> {
+ public:
+ AudioProcessingImplLockTest();
+ bool RunTest();
+ bool MaybeEndTest();
+
+ private:
+ static const int kTestTimeOutLimit = 10 * 60 * 1000;
+ static const int kMaxFrameSize = 480;
+
+ // ::testing::TestWithParam<> implementation
+ void SetUp() override;
+ void TearDown() override;
+
+ // Thread callback for the render thread
+ static bool RenderProcessorThreadFunc(void* context) {
+ return reinterpret_cast<AudioProcessingImplLockTest*>(context)
+ ->render_thread_state_.Process();
+ }
+
+ // Thread callback for the capture thread
+ static bool CaptureProcessorThreadFunc(void* context) {
+ return reinterpret_cast<AudioProcessingImplLockTest*>(context)
+ ->capture_thread_state_.Process();
+ }
+
+ // Thread callback for the stats thread
+ static bool StatsProcessorThreadFunc(void* context) {
+ return reinterpret_cast<AudioProcessingImplLockTest*>(context)
+ ->stats_thread_state_.Process();
+ }
+
+ // Tests whether all the required render and capture side calls have been
+ // done.
+ bool TestDone() {
+ return frame_counters_.BothCountersExceedeThreshold(
+ test_config_.min_number_of_calls);
+ }
+
+ // Start the threads used in the test.
+ void StartThreads() {
+ render_thread_.Start();
+ render_thread_.SetPriority(rtc::kRealtimePriority);
+ capture_thread_.Start();
+ capture_thread_.SetPriority(rtc::kRealtimePriority);
+ stats_thread_.Start();
+ stats_thread_.SetPriority(rtc::kNormalPriority);
+ }
+
+ // Event handlers for the test.
+ rtc::Event test_complete_;
+ rtc::Event render_call_event_;
+ rtc::Event capture_call_event_;
+
+ // Thread related variables.
+ rtc::PlatformThread render_thread_;
+ rtc::PlatformThread capture_thread_;
+ rtc::PlatformThread stats_thread_;
+ mutable RandomGenerator rand_gen_;
+
+ rtc::scoped_ptr<AudioProcessing> apm_;
+ TestConfig test_config_;
+ FrameCounters frame_counters_;
+ RenderProcessor render_thread_state_;
+ CaptureProcessor capture_thread_state_;
+ StatsProcessor stats_thread_state_;
+};
+
+// Sleeps a random time between 0 and max_sleep milliseconds.
+void SleepRandomMs(int max_sleep, RandomGenerator* rand_gen) {
+ int sleeptime = rand_gen->RandInt(0, max_sleep);
+ SleepMs(sleeptime);
+}
+
+// Populates a float audio frame with random data.
+void PopulateAudioFrame(float** frame,
+ float amplitude,
+ size_t num_channels,
+ size_t samples_per_channel,
+ RandomGenerator* rand_gen) {
+ for (size_t ch = 0; ch < num_channels; ch++) {
+ for (size_t k = 0; k < samples_per_channel; k++) {
+ // Store random 16 bit quantized float number between +-amplitude.
+ frame[ch][k] = amplitude * (2 * rand_gen->RandFloat() - 1);
+ }
+ }
+}
+
+// Populates an audioframe frame of AudioFrame type with random data.
+void PopulateAudioFrame(AudioFrame* frame,
+ int16_t amplitude,
+ RandomGenerator* rand_gen) {
+ ASSERT_GT(amplitude, 0);
+ ASSERT_LE(amplitude, 32767);
+ for (size_t ch = 0; ch < frame->num_channels_; ch++) {
+ for (size_t k = 0; k < frame->samples_per_channel_; k++) {
+ // Store random 16 bit number between -(amplitude+1) and
+ // amplitude.
+ frame->data_[k * ch] =
+ rand_gen->RandInt(2 * amplitude + 1) - amplitude - 1;
+ }
+ }
+}
+
+AudioProcessingImplLockTest::AudioProcessingImplLockTest()
+ : test_complete_(false, false),
+ render_call_event_(false, false),
+ capture_call_event_(false, false),
+ render_thread_(RenderProcessorThreadFunc, this, "render"),
+ capture_thread_(CaptureProcessorThreadFunc, this, "capture"),
+ stats_thread_(StatsProcessorThreadFunc, this, "stats"),
+ apm_(AudioProcessingImpl::Create()),
+ render_thread_state_(kMaxFrameSize,
+ &rand_gen_,
+ &render_call_event_,
+ &capture_call_event_,
+ &frame_counters_,
+ this,
+ &test_config_,
+ apm_.get()),
+ capture_thread_state_(kMaxFrameSize,
+ &rand_gen_,
+ &render_call_event_,
+ &capture_call_event_,
+ &frame_counters_,
+ this,
+ &test_config_,
+ apm_.get()),
+ stats_thread_state_(&rand_gen_, &test_config_, apm_.get()) {}
+
+// Run the test with a timeout.
+bool AudioProcessingImplLockTest::RunTest() {
+ StartThreads();
+ return test_complete_.Wait(kTestTimeOutLimit);
+}
+
+bool AudioProcessingImplLockTest::MaybeEndTest() {
+ if (HasFatalFailure() || TestDone()) {
+ test_complete_.Set();
+ return true;
+ }
+ return false;
+}
+
+// Setup of test and APM.
+void AudioProcessingImplLockTest::SetUp() {
+ test_config_ = static_cast<TestConfig>(GetParam());
+
+ ASSERT_EQ(apm_->kNoError, apm_->level_estimator()->Enable(true));
+ ASSERT_EQ(apm_->kNoError, apm_->gain_control()->Enable(true));
+
+ ASSERT_EQ(apm_->kNoError,
+ apm_->gain_control()->set_mode(GainControl::kAdaptiveDigital));
+ ASSERT_EQ(apm_->kNoError, apm_->gain_control()->Enable(true));
+
+ ASSERT_EQ(apm_->kNoError, apm_->noise_suppression()->Enable(true));
+ ASSERT_EQ(apm_->kNoError, apm_->voice_detection()->Enable(true));
+
+ Config config;
+ if (test_config_.aec_type == AecType::AecTurnedOff) {
+ ASSERT_EQ(apm_->kNoError, apm_->echo_control_mobile()->Enable(false));
+ ASSERT_EQ(apm_->kNoError, apm_->echo_cancellation()->Enable(false));
+ } else if (test_config_.aec_type ==
+ AecType::BasicWebRtcAecSettingsWithAecMobile) {
+ ASSERT_EQ(apm_->kNoError, apm_->echo_control_mobile()->Enable(true));
+ ASSERT_EQ(apm_->kNoError, apm_->echo_cancellation()->Enable(false));
+ } else {
+ ASSERT_EQ(apm_->kNoError, apm_->echo_control_mobile()->Enable(false));
+ ASSERT_EQ(apm_->kNoError, apm_->echo_cancellation()->Enable(true));
+ ASSERT_EQ(apm_->kNoError, apm_->echo_cancellation()->enable_metrics(true));
+ ASSERT_EQ(apm_->kNoError,
+ apm_->echo_cancellation()->enable_delay_logging(true));
+
+ config.Set<ExtendedFilter>(
+ new ExtendedFilter(test_config_.aec_type ==
+ AecType::BasicWebRtcAecSettingsWithExtentedFilter));
+
+ config.Set<DelayAgnostic>(
+ new DelayAgnostic(test_config_.aec_type ==
+ AecType::BasicWebRtcAecSettingsWithDelayAgnosticAec));
+
+ apm_->SetExtraOptions(config);
+ }
+}
+
+void AudioProcessingImplLockTest::TearDown() {
+ render_call_event_.Set();
+ capture_call_event_.Set();
+ render_thread_.Stop();
+ capture_thread_.Stop();
+ stats_thread_.Stop();
+}
+
+StatsProcessor::StatsProcessor(RandomGenerator* rand_gen,
+ TestConfig* test_config,
+ AudioProcessing* apm)
+ : rand_gen_(rand_gen), test_config_(test_config), apm_(apm) {}
+
+// Implements the callback functionality for the statistics
+// collection thread.
+bool StatsProcessor::Process() {
+ SleepRandomMs(100, rand_gen_);
+
+ EXPECT_EQ(apm_->echo_cancellation()->is_enabled(),
+ ((test_config_->aec_type != AecType::AecTurnedOff) &&
+ (test_config_->aec_type !=
+ AecType::BasicWebRtcAecSettingsWithAecMobile)));
+ apm_->echo_cancellation()->stream_drift_samples();
+ EXPECT_EQ(apm_->echo_control_mobile()->is_enabled(),
+ (test_config_->aec_type != AecType::AecTurnedOff) &&
+ (test_config_->aec_type ==
+ AecType::BasicWebRtcAecSettingsWithAecMobile));
+ EXPECT_TRUE(apm_->gain_control()->is_enabled());
+ apm_->gain_control()->stream_analog_level();
+ EXPECT_TRUE(apm_->noise_suppression()->is_enabled());
+
+ // The below return values are not testable.
+ apm_->noise_suppression()->speech_probability();
+ apm_->voice_detection()->is_enabled();
+
+ return true;
+}
+
+const float CaptureProcessor::kCaptureInputFloatLevel = 0.03125f;
+
+CaptureProcessor::CaptureProcessor(int max_frame_size,
+ RandomGenerator* rand_gen,
+ rtc::Event* render_call_event,
+ rtc::Event* capture_call_event,
+ FrameCounters* shared_counters_state,
+ AudioProcessingImplLockTest* test_framework,
+ TestConfig* test_config,
+ AudioProcessing* apm)
+ : rand_gen_(rand_gen),
+ render_call_event_(render_call_event),
+ capture_call_event_(capture_call_event),
+ frame_counters_(shared_counters_state),
+ test_(test_framework),
+ test_config_(test_config),
+ apm_(apm),
+ frame_data_(max_frame_size) {}
+
+// Implements the callback functionality for the capture thread.
+bool CaptureProcessor::Process() {
+ // Sleep a random time to simulate thread jitter.
+ SleepRandomMs(3, rand_gen_);
+
+ // Check whether the test is done.
+ if (test_->MaybeEndTest()) {
+ return false;
+ }
+
+ // Ensure that the number of render and capture calls do not
+ // differ too much.
+ if (frame_counters_->CaptureMinusRenderCounters() > kMaxCallDifference) {
+ render_call_event_->Wait(rtc::Event::kForever);
+ }
+
+ // Apply any specified capture side APM non-processing runtime calls.
+ ApplyRuntimeSettingScheme();
+
+ // Apply the capture side processing call.
+ CallApmCaptureSide();
+
+ // Increase the number of capture-side calls.
+ frame_counters_->IncreaseCaptureCounter();
+
+ // Flag to the render thread that another capture API call has occurred
+ // by triggering this threads call event.
+ capture_call_event_->Set();
+
+ return true;
+}
+
+// Prepares a frame with relevant audio data and metadata.
+void CaptureProcessor::PrepareFrame() {
+ // Restrict to a common fixed sample rate if the AudioFrame
+ // interface is used.
+ if (test_config_->capture_api_function ==
+ CaptureApiImpl::ProcessStreamImpl1) {
+ frame_data_.input_sample_rate_hz = test_config_->initial_sample_rate_hz;
+ frame_data_.output_sample_rate_hz = test_config_->initial_sample_rate_hz;
+ }
+
+ // Prepare the audioframe data and metadata.
+ frame_data_.input_samples_per_channel =
+ frame_data_.input_sample_rate_hz * AudioProcessing::kChunkSizeMs / 1000;
+ frame_data_.frame.sample_rate_hz_ = frame_data_.input_sample_rate_hz;
+ frame_data_.frame.num_channels_ = frame_data_.input_number_of_channels;
+ frame_data_.frame.samples_per_channel_ =
+ frame_data_.input_samples_per_channel;
+ PopulateAudioFrame(&frame_data_.frame, kCaptureInputFixLevel, rand_gen_);
+
+ // Prepare the float audio input data and metadata.
+ frame_data_.input_stream_config.set_sample_rate_hz(
+ frame_data_.input_sample_rate_hz);
+ frame_data_.input_stream_config.set_num_channels(
+ frame_data_.input_number_of_channels);
+ frame_data_.input_stream_config.set_has_keyboard(false);
+ PopulateAudioFrame(&frame_data_.input_frame[0], kCaptureInputFloatLevel,
+ frame_data_.input_number_of_channels,
+ frame_data_.input_samples_per_channel, rand_gen_);
+ frame_data_.input_channel_layout =
+ (frame_data_.input_number_of_channels == 1
+ ? AudioProcessing::ChannelLayout::kMono
+ : AudioProcessing::ChannelLayout::kStereo);
+
+ // Prepare the float audio output data and metadata.
+ frame_data_.output_samples_per_channel =
+ frame_data_.output_sample_rate_hz * AudioProcessing::kChunkSizeMs / 1000;
+ frame_data_.output_stream_config.set_sample_rate_hz(
+ frame_data_.output_sample_rate_hz);
+ frame_data_.output_stream_config.set_num_channels(
+ frame_data_.output_number_of_channels);
+ frame_data_.output_stream_config.set_has_keyboard(false);
+ frame_data_.output_channel_layout =
+ (frame_data_.output_number_of_channels == 1
+ ? AudioProcessing::ChannelLayout::kMono
+ : AudioProcessing::ChannelLayout::kStereo);
+}
+
+// Applies the capture side processing API call.
+void CaptureProcessor::CallApmCaptureSide() {
+ // Prepare a proper capture side processing API call input.
+ PrepareFrame();
+
+ // Set the stream delay
+ apm_->set_stream_delay_ms(30);
+
+ // Call the specified capture side API processing method.
+ int result = AudioProcessing::kNoError;
+ switch (test_config_->capture_api_function) {
+ case CaptureApiImpl::ProcessStreamImpl1:
+ result = apm_->ProcessStream(&frame_data_.frame);
+ break;
+ case CaptureApiImpl::ProcessStreamImpl2:
+ result = apm_->ProcessStream(
+ &frame_data_.input_frame[0], frame_data_.input_samples_per_channel,
+ frame_data_.input_sample_rate_hz, frame_data_.input_channel_layout,
+ frame_data_.output_sample_rate_hz, frame_data_.output_channel_layout,
+ &frame_data_.output_frame[0]);
+ break;
+ case CaptureApiImpl::ProcessStreamImpl3:
+ result = apm_->ProcessStream(
+ &frame_data_.input_frame[0], frame_data_.input_stream_config,
+ frame_data_.output_stream_config, &frame_data_.output_frame[0]);
+ break;
+ default:
+ FAIL();
+ }
+
+ // Check the return code for error.
+ ASSERT_EQ(AudioProcessing::kNoError, result);
+}
+
+// Applies any runtime capture APM API calls and audio stream characteristics
+// specified by the scheme for the test.
+void CaptureProcessor::ApplyRuntimeSettingScheme() {
+ const int capture_count_local = frame_counters_->GetCaptureCounter();
+
+ // Update the number of channels and sample rates for the input and output.
+ // Note that the counts frequencies for when to set parameters
+ // are set using prime numbers in order to ensure that the
+ // permutation scheme in the parameter setting changes.
+ switch (test_config_->runtime_parameter_setting_scheme) {
+ case RuntimeParameterSettingScheme::SparseStreamMetadataChangeScheme:
+ if (capture_count_local == 0)
+ frame_data_.input_sample_rate_hz = 16000;
+ else if (capture_count_local % 11 == 0)
+ frame_data_.input_sample_rate_hz = 32000;
+ else if (capture_count_local % 73 == 0)
+ frame_data_.input_sample_rate_hz = 48000;
+ else if (capture_count_local % 89 == 0)
+ frame_data_.input_sample_rate_hz = 16000;
+ else if (capture_count_local % 97 == 0)
+ frame_data_.input_sample_rate_hz = 8000;
+
+ if (capture_count_local == 0)
+ frame_data_.input_number_of_channels = 1;
+ else if (capture_count_local % 4 == 0)
+ frame_data_.input_number_of_channels =
+ (frame_data_.input_number_of_channels == 1 ? 2 : 1);
+
+ if (capture_count_local == 0)
+ frame_data_.output_sample_rate_hz = 16000;
+ else if (capture_count_local % 5 == 0)
+ frame_data_.output_sample_rate_hz = 32000;
+ else if (capture_count_local % 47 == 0)
+ frame_data_.output_sample_rate_hz = 48000;
+ else if (capture_count_local % 53 == 0)
+ frame_data_.output_sample_rate_hz = 16000;
+ else if (capture_count_local % 71 == 0)
+ frame_data_.output_sample_rate_hz = 8000;
+
+ if (capture_count_local == 0)
+ frame_data_.output_number_of_channels = 1;
+ else if (capture_count_local % 8 == 0)
+ frame_data_.output_number_of_channels =
+ (frame_data_.output_number_of_channels == 1 ? 2 : 1);
+ break;
+ case RuntimeParameterSettingScheme::ExtremeStreamMetadataChangeScheme:
+ if (capture_count_local % 2 == 0) {
+ frame_data_.input_number_of_channels = 1;
+ frame_data_.input_sample_rate_hz = 16000;
+ frame_data_.output_number_of_channels = 1;
+ frame_data_.output_sample_rate_hz = 16000;
+ } else {
+ frame_data_.input_number_of_channels =
+ (frame_data_.input_number_of_channels == 1 ? 2 : 1);
+ if (frame_data_.input_sample_rate_hz == 8000)
+ frame_data_.input_sample_rate_hz = 16000;
+ else if (frame_data_.input_sample_rate_hz == 16000)
+ frame_data_.input_sample_rate_hz = 32000;
+ else if (frame_data_.input_sample_rate_hz == 32000)
+ frame_data_.input_sample_rate_hz = 48000;
+ else if (frame_data_.input_sample_rate_hz == 48000)
+ frame_data_.input_sample_rate_hz = 8000;
+
+ frame_data_.output_number_of_channels =
+ (frame_data_.output_number_of_channels == 1 ? 2 : 1);
+ if (frame_data_.output_sample_rate_hz == 8000)
+ frame_data_.output_sample_rate_hz = 16000;
+ else if (frame_data_.output_sample_rate_hz == 16000)
+ frame_data_.output_sample_rate_hz = 32000;
+ else if (frame_data_.output_sample_rate_hz == 32000)
+ frame_data_.output_sample_rate_hz = 48000;
+ else if (frame_data_.output_sample_rate_hz == 48000)
+ frame_data_.output_sample_rate_hz = 8000;
+ }
+ break;
+ case RuntimeParameterSettingScheme::FixedMonoStreamMetadataScheme:
+ if (capture_count_local == 0) {
+ frame_data_.input_sample_rate_hz = 16000;
+ frame_data_.input_number_of_channels = 1;
+ frame_data_.output_sample_rate_hz = 16000;
+ frame_data_.output_number_of_channels = 1;
+ }
+ break;
+ case RuntimeParameterSettingScheme::FixedStereoStreamMetadataScheme:
+ if (capture_count_local == 0) {
+ frame_data_.input_sample_rate_hz = 16000;
+ frame_data_.input_number_of_channels = 2;
+ frame_data_.output_sample_rate_hz = 16000;
+ frame_data_.output_number_of_channels = 2;
+ }
+ break;
+ default:
+ FAIL();
+ }
+
+ // Call any specified runtime APM setter and
+ // getter calls.
+ switch (test_config_->runtime_parameter_setting_scheme) {
+ case RuntimeParameterSettingScheme::SparseStreamMetadataChangeScheme:
+ case RuntimeParameterSettingScheme::FixedMonoStreamMetadataScheme:
+ break;
+ case RuntimeParameterSettingScheme::ExtremeStreamMetadataChangeScheme:
+ case RuntimeParameterSettingScheme::FixedStereoStreamMetadataScheme:
+ if (capture_count_local % 2 == 0) {
+ ASSERT_EQ(AudioProcessing::Error::kNoError,
+ apm_->set_stream_delay_ms(30));
+ apm_->set_stream_key_pressed(true);
+ apm_->set_delay_offset_ms(15);
+ EXPECT_EQ(apm_->delay_offset_ms(), 15);
+ } else {
+ ASSERT_EQ(AudioProcessing::Error::kNoError,
+ apm_->set_stream_delay_ms(50));
+ apm_->set_stream_key_pressed(false);
+ apm_->set_delay_offset_ms(20);
+ EXPECT_EQ(apm_->delay_offset_ms(), 20);
+ apm_->delay_offset_ms();
+ }
+ break;
+ default:
+ FAIL();
+ }
+
+ // Restric the number of output channels not to exceed
+ // the number of input channels.
+ frame_data_.output_number_of_channels =
+ std::min(frame_data_.output_number_of_channels,
+ frame_data_.input_number_of_channels);
+}
+
+const float RenderProcessor::kRenderInputFloatLevel = 0.5f;
+
+RenderProcessor::RenderProcessor(int max_frame_size,
+ RandomGenerator* rand_gen,
+ rtc::Event* render_call_event,
+ rtc::Event* capture_call_event,
+ FrameCounters* shared_counters_state,
+ AudioProcessingImplLockTest* test_framework,
+ TestConfig* test_config,
+ AudioProcessing* apm)
+ : rand_gen_(rand_gen),
+ render_call_event_(render_call_event),
+ capture_call_event_(capture_call_event),
+ frame_counters_(shared_counters_state),
+ test_(test_framework),
+ test_config_(test_config),
+ apm_(apm),
+ frame_data_(max_frame_size) {}
+
+// Implements the callback functionality for the render thread.
+bool RenderProcessor::Process() {
+ // Conditional wait to ensure that a capture call has been done
+ // before the first render call is performed (implicitly
+ // required by the APM API).
+ if (first_render_call_) {
+ capture_call_event_->Wait(rtc::Event::kForever);
+ first_render_call_ = false;
+ }
+
+ // Sleep a random time to simulate thread jitter.
+ SleepRandomMs(3, rand_gen_);
+
+ // Check whether the test is done.
+ if (test_->MaybeEndTest()) {
+ return false;
+ }
+
+ // Ensure that the number of render and capture calls do not
+ // differ too much.
+ if (frame_counters_->RenderMinusCaptureCounters() > kMaxCallDifference) {
+ capture_call_event_->Wait(rtc::Event::kForever);
+ }
+
+ // Apply any specified render side APM non-processing runtime calls.
+ ApplyRuntimeSettingScheme();
+
+ // Apply the render side processing call.
+ CallApmRenderSide();
+
+ // Increase the number of render-side calls.
+ frame_counters_->IncreaseRenderCounter();
+
+ // Flag to the capture thread that another render API call has occurred
+ // by triggering this threads call event.
+ render_call_event_->Set();
+ return true;
+}
+
+// Prepares the render side frame and the accompanying metadata
+// with the appropriate information.
+void RenderProcessor::PrepareFrame() {
+ // Restrict to a common fixed sample rate if the AudioFrame interface is
+ // used.
+ if ((test_config_->render_api_function ==
+ RenderApiImpl::AnalyzeReverseStreamImpl1) ||
+ (test_config_->render_api_function ==
+ RenderApiImpl::ProcessReverseStreamImpl1) ||
+ (test_config_->aec_type !=
+ AecType::BasicWebRtcAecSettingsWithAecMobile)) {
+ frame_data_.input_sample_rate_hz = test_config_->initial_sample_rate_hz;
+ frame_data_.output_sample_rate_hz = test_config_->initial_sample_rate_hz;
+ }
+
+ // Prepare the audioframe data and metadata
+ frame_data_.input_samples_per_channel =
+ frame_data_.input_sample_rate_hz * AudioProcessing::kChunkSizeMs / 1000;
+ frame_data_.frame.sample_rate_hz_ = frame_data_.input_sample_rate_hz;
+ frame_data_.frame.num_channels_ = frame_data_.input_number_of_channels;
+ frame_data_.frame.samples_per_channel_ =
+ frame_data_.input_samples_per_channel;
+ PopulateAudioFrame(&frame_data_.frame, kRenderInputFixLevel, rand_gen_);
+
+ // Prepare the float audio input data and metadata.
+ frame_data_.input_stream_config.set_sample_rate_hz(
+ frame_data_.input_sample_rate_hz);
+ frame_data_.input_stream_config.set_num_channels(
+ frame_data_.input_number_of_channels);
+ frame_data_.input_stream_config.set_has_keyboard(false);
+ PopulateAudioFrame(&frame_data_.input_frame[0], kRenderInputFloatLevel,
+ frame_data_.input_number_of_channels,
+ frame_data_.input_samples_per_channel, rand_gen_);
+ frame_data_.input_channel_layout =
+ (frame_data_.input_number_of_channels == 1
+ ? AudioProcessing::ChannelLayout::kMono
+ : AudioProcessing::ChannelLayout::kStereo);
+
+ // Prepare the float audio output data and metadata.
+ frame_data_.output_samples_per_channel =
+ frame_data_.output_sample_rate_hz * AudioProcessing::kChunkSizeMs / 1000;
+ frame_data_.output_stream_config.set_sample_rate_hz(
+ frame_data_.output_sample_rate_hz);
+ frame_data_.output_stream_config.set_num_channels(
+ frame_data_.output_number_of_channels);
+ frame_data_.output_stream_config.set_has_keyboard(false);
+ frame_data_.output_channel_layout =
+ (frame_data_.output_number_of_channels == 1
+ ? AudioProcessing::ChannelLayout::kMono
+ : AudioProcessing::ChannelLayout::kStereo);
+}
+
+// Makes the render side processing API call.
+void RenderProcessor::CallApmRenderSide() {
+ // Prepare a proper render side processing API call input.
+ PrepareFrame();
+
+ // Call the specified render side API processing method.
+ int result = AudioProcessing::kNoError;
+ switch (test_config_->render_api_function) {
+ case RenderApiImpl::ProcessReverseStreamImpl1:
+ result = apm_->ProcessReverseStream(&frame_data_.frame);
+ break;
+ case RenderApiImpl::ProcessReverseStreamImpl2:
+ result = apm_->ProcessReverseStream(
+ &frame_data_.input_frame[0], frame_data_.input_stream_config,
+ frame_data_.output_stream_config, &frame_data_.output_frame[0]);
+ break;
+ case RenderApiImpl::AnalyzeReverseStreamImpl1:
+ result = apm_->AnalyzeReverseStream(&frame_data_.frame);
+ break;
+ case RenderApiImpl::AnalyzeReverseStreamImpl2:
+ result = apm_->AnalyzeReverseStream(
+ &frame_data_.input_frame[0], frame_data_.input_samples_per_channel,
+ frame_data_.input_sample_rate_hz, frame_data_.input_channel_layout);
+ break;
+ default:
+ FAIL();
+ }
+
+ // Check the return code for error.
+ ASSERT_EQ(AudioProcessing::kNoError, result);
+}
+
+// Applies any render capture side APM API calls and audio stream
+// characteristics
+// specified by the scheme for the test.
+void RenderProcessor::ApplyRuntimeSettingScheme() {
+ const int render_count_local = frame_counters_->GetRenderCounter();
+
+ // Update the number of channels and sample rates for the input and output.
+ // Note that the counts frequencies for when to set parameters
+ // are set using prime numbers in order to ensure that the
+ // permutation scheme in the parameter setting changes.
+ switch (test_config_->runtime_parameter_setting_scheme) {
+ case RuntimeParameterSettingScheme::SparseStreamMetadataChangeScheme:
+ if (render_count_local == 0)
+ frame_data_.input_sample_rate_hz = 16000;
+ else if (render_count_local % 47 == 0)
+ frame_data_.input_sample_rate_hz = 32000;
+ else if (render_count_local % 71 == 0)
+ frame_data_.input_sample_rate_hz = 48000;
+ else if (render_count_local % 79 == 0)
+ frame_data_.input_sample_rate_hz = 16000;
+ else if (render_count_local % 83 == 0)
+ frame_data_.input_sample_rate_hz = 8000;
+
+ if (render_count_local == 0)
+ frame_data_.input_number_of_channels = 1;
+ else if (render_count_local % 4 == 0)
+ frame_data_.input_number_of_channels =
+ (frame_data_.input_number_of_channels == 1 ? 2 : 1);
+
+ if (render_count_local == 0)
+ frame_data_.output_sample_rate_hz = 16000;
+ else if (render_count_local % 17 == 0)
+ frame_data_.output_sample_rate_hz = 32000;
+ else if (render_count_local % 19 == 0)
+ frame_data_.output_sample_rate_hz = 48000;
+ else if (render_count_local % 29 == 0)
+ frame_data_.output_sample_rate_hz = 16000;
+ else if (render_count_local % 61 == 0)
+ frame_data_.output_sample_rate_hz = 8000;
+
+ if (render_count_local == 0)
+ frame_data_.output_number_of_channels = 1;
+ else if (render_count_local % 8 == 0)
+ frame_data_.output_number_of_channels =
+ (frame_data_.output_number_of_channels == 1 ? 2 : 1);
+ break;
+ case RuntimeParameterSettingScheme::ExtremeStreamMetadataChangeScheme:
+ if (render_count_local == 0) {
+ frame_data_.input_number_of_channels = 1;
+ frame_data_.input_sample_rate_hz = 16000;
+ frame_data_.output_number_of_channels = 1;
+ frame_data_.output_sample_rate_hz = 16000;
+ } else {
+ frame_data_.input_number_of_channels =
+ (frame_data_.input_number_of_channels == 1 ? 2 : 1);
+ if (frame_data_.input_sample_rate_hz == 8000)
+ frame_data_.input_sample_rate_hz = 16000;
+ else if (frame_data_.input_sample_rate_hz == 16000)
+ frame_data_.input_sample_rate_hz = 32000;
+ else if (frame_data_.input_sample_rate_hz == 32000)
+ frame_data_.input_sample_rate_hz = 48000;
+ else if (frame_data_.input_sample_rate_hz == 48000)
+ frame_data_.input_sample_rate_hz = 8000;
+
+ frame_data_.output_number_of_channels =
+ (frame_data_.output_number_of_channels == 1 ? 2 : 1);
+ if (frame_data_.output_sample_rate_hz == 8000)
+ frame_data_.output_sample_rate_hz = 16000;
+ else if (frame_data_.output_sample_rate_hz == 16000)
+ frame_data_.output_sample_rate_hz = 32000;
+ else if (frame_data_.output_sample_rate_hz == 32000)
+ frame_data_.output_sample_rate_hz = 48000;
+ else if (frame_data_.output_sample_rate_hz == 48000)
+ frame_data_.output_sample_rate_hz = 8000;
+ }
+ break;
+ case RuntimeParameterSettingScheme::FixedMonoStreamMetadataScheme:
+ if (render_count_local == 0) {
+ frame_data_.input_sample_rate_hz = 16000;
+ frame_data_.input_number_of_channels = 1;
+ frame_data_.output_sample_rate_hz = 16000;
+ frame_data_.output_number_of_channels = 1;
+ }
+ break;
+ case RuntimeParameterSettingScheme::FixedStereoStreamMetadataScheme:
+ if (render_count_local == 0) {
+ frame_data_.input_sample_rate_hz = 16000;
+ frame_data_.input_number_of_channels = 2;
+ frame_data_.output_sample_rate_hz = 16000;
+ frame_data_.output_number_of_channels = 2;
+ }
+ break;
+ default:
+ FAIL();
+ }
+
+ // Restric the number of output channels not to exceed
+ // the number of input channels.
+ frame_data_.output_number_of_channels =
+ std::min(frame_data_.output_number_of_channels,
+ frame_data_.input_number_of_channels);
+}
+
+} // anonymous namespace
+
+TEST_P(AudioProcessingImplLockTest, LockTest) {
+ // Run test and verify that it did not time out.
+ ASSERT_TRUE(RunTest());
+}
+
+// Instantiate tests from the extreme test configuration set.
+INSTANTIATE_TEST_CASE_P(
+ DISABLED_AudioProcessingImplLockExtensive,
+ AudioProcessingImplLockTest,
+ ::testing::ValuesIn(TestConfig::GenerateExtensiveTestConfigs()));
+
+INSTANTIATE_TEST_CASE_P(
+ AudioProcessingImplLockBrief,
+ AudioProcessingImplLockTest,
+ ::testing::ValuesIn(TestConfig::GenerateBriefTestConfigs()));
+
+} // namespace webrtc
diff --git a/webrtc/modules/audio_processing/audio_processing_impl_unittest.cc b/webrtc/modules/audio_processing/audio_processing_impl_unittest.cc
index f4c36d0009..ed20daaa61 100644
--- a/webrtc/modules/audio_processing/audio_processing_impl_unittest.cc
+++ b/webrtc/modules/audio_processing/audio_processing_impl_unittest.cc
@@ -14,7 +14,7 @@
#include "testing/gtest/include/gtest/gtest.h"
#include "webrtc/config.h"
#include "webrtc/modules/audio_processing/test/test_utils.h"
-#include "webrtc/modules/interface/module_common_types.h"
+#include "webrtc/modules/include/module_common_types.h"
using ::testing::Invoke;
using ::testing::Return;
diff --git a/webrtc/modules/audio_processing/audio_processing_performance_unittest.cc b/webrtc/modules/audio_processing/audio_processing_performance_unittest.cc
new file mode 100644
index 0000000000..0c8c060ea3
--- /dev/null
+++ b/webrtc/modules/audio_processing/audio_processing_performance_unittest.cc
@@ -0,0 +1,724 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+#include "webrtc/modules/audio_processing/audio_processing_impl.h"
+
+#include <math.h>
+
+#include <algorithm>
+#include <vector>
+
+#include "testing/gtest/include/gtest/gtest.h"
+#include "webrtc/base/array_view.h"
+#include "webrtc/base/criticalsection.h"
+#include "webrtc/base/platform_thread.h"
+#include "webrtc/base/random.h"
+#include "webrtc/base/safe_conversions.h"
+#include "webrtc/config.h"
+#include "webrtc/modules/audio_processing/test/test_utils.h"
+#include "webrtc/modules/include/module_common_types.h"
+#include "webrtc/system_wrappers/include/clock.h"
+#include "webrtc/system_wrappers/include/event_wrapper.h"
+#include "webrtc/system_wrappers/include/sleep.h"
+#include "webrtc/test/testsupport/perf_test.h"
+
+namespace webrtc {
+
+namespace {
+
+static const bool kPrintAllDurations = false;
+
+class CallSimulator;
+
+// Type of the render thread APM API call to use in the test.
+enum class ProcessorType { kRender, kCapture };
+
+// Variant of APM processing settings to use in the test.
+enum class SettingsType {
+ kDefaultApmDesktop,
+ kDefaultApmMobile,
+ kDefaultApmDesktopAndBeamformer,
+ kDefaultApmDesktopAndIntelligibilityEnhancer,
+ kAllSubmodulesTurnedOff,
+ kDefaultDesktopApmWithoutDelayAgnostic,
+ kDefaultDesktopApmWithoutExtendedFilter
+};
+
+// Variables related to the audio data and formats.
+struct AudioFrameData {
+ explicit AudioFrameData(size_t max_frame_size) {
+ // Set up the two-dimensional arrays needed for the APM API calls.
+ input_framechannels.resize(2 * max_frame_size);
+ input_frame.resize(2);
+ input_frame[0] = &input_framechannels[0];
+ input_frame[1] = &input_framechannels[max_frame_size];
+
+ output_frame_channels.resize(2 * max_frame_size);
+ output_frame.resize(2);
+ output_frame[0] = &output_frame_channels[0];
+ output_frame[1] = &output_frame_channels[max_frame_size];
+ }
+
+ std::vector<float> output_frame_channels;
+ std::vector<float*> output_frame;
+ std::vector<float> input_framechannels;
+ std::vector<float*> input_frame;
+ StreamConfig input_stream_config;
+ StreamConfig output_stream_config;
+};
+
+// The configuration for the test.
+struct SimulationConfig {
+ SimulationConfig(int sample_rate_hz, SettingsType simulation_settings)
+ : sample_rate_hz(sample_rate_hz),
+ simulation_settings(simulation_settings) {}
+
+ static std::vector<SimulationConfig> GenerateSimulationConfigs() {
+ std::vector<SimulationConfig> simulation_configs;
+#ifndef WEBRTC_ANDROID
+ const SettingsType desktop_settings[] = {
+ SettingsType::kDefaultApmDesktop, SettingsType::kAllSubmodulesTurnedOff,
+ SettingsType::kDefaultDesktopApmWithoutDelayAgnostic,
+ SettingsType::kDefaultDesktopApmWithoutExtendedFilter};
+
+ const int desktop_sample_rates[] = {8000, 16000, 32000, 48000};
+
+ for (auto sample_rate : desktop_sample_rates) {
+ for (auto settings : desktop_settings) {
+ simulation_configs.push_back(SimulationConfig(sample_rate, settings));
+ }
+ }
+
+ const SettingsType intelligibility_enhancer_settings[] = {
+ SettingsType::kDefaultApmDesktopAndIntelligibilityEnhancer};
+
+ const int intelligibility_enhancer_sample_rates[] = {8000, 16000, 32000,
+ 48000};
+
+ for (auto sample_rate : intelligibility_enhancer_sample_rates) {
+ for (auto settings : intelligibility_enhancer_settings) {
+ simulation_configs.push_back(SimulationConfig(sample_rate, settings));
+ }
+ }
+
+ const SettingsType beamformer_settings[] = {
+ SettingsType::kDefaultApmDesktopAndBeamformer};
+
+ const int beamformer_sample_rates[] = {8000, 16000, 32000, 48000};
+
+ for (auto sample_rate : beamformer_sample_rates) {
+ for (auto settings : beamformer_settings) {
+ simulation_configs.push_back(SimulationConfig(sample_rate, settings));
+ }
+ }
+#endif
+
+ const SettingsType mobile_settings[] = {SettingsType::kDefaultApmMobile};
+
+ const int mobile_sample_rates[] = {8000, 16000};
+
+ for (auto sample_rate : mobile_sample_rates) {
+ for (auto settings : mobile_settings) {
+ simulation_configs.push_back(SimulationConfig(sample_rate, settings));
+ }
+ }
+
+ return simulation_configs;
+ }
+
+ std::string SettingsDescription() const {
+ std::string description;
+ switch (simulation_settings) {
+ case SettingsType::kDefaultApmMobile:
+ description = "DefaultApmMobile";
+ break;
+ case SettingsType::kDefaultApmDesktop:
+ description = "DefaultApmDesktop";
+ break;
+ case SettingsType::kDefaultApmDesktopAndBeamformer:
+ description = "DefaultApmDesktopAndBeamformer";
+ break;
+ case SettingsType::kDefaultApmDesktopAndIntelligibilityEnhancer:
+ description = "DefaultApmDesktopAndIntelligibilityEnhancer";
+ break;
+ case SettingsType::kAllSubmodulesTurnedOff:
+ description = "AllSubmodulesOff";
+ break;
+ case SettingsType::kDefaultDesktopApmWithoutDelayAgnostic:
+ description = "DefaultDesktopApmWithoutDelayAgnostic";
+ break;
+ case SettingsType::kDefaultDesktopApmWithoutExtendedFilter:
+ description = "DefaultDesktopApmWithoutExtendedFilter";
+ break;
+ }
+ return description;
+ }
+
+ int sample_rate_hz = 16000;
+ SettingsType simulation_settings = SettingsType::kDefaultApmDesktop;
+};
+
+// Handler for the frame counters.
+class FrameCounters {
+ public:
+ void IncreaseRenderCounter() {
+ rtc::CritScope cs(&crit_);
+ render_count_++;
+ }
+
+ void IncreaseCaptureCounter() {
+ rtc::CritScope cs(&crit_);
+ capture_count_++;
+ }
+
+ int GetCaptureCounter() const {
+ rtc::CritScope cs(&crit_);
+ return capture_count_;
+ }
+
+ int GetRenderCounter() const {
+ rtc::CritScope cs(&crit_);
+ return render_count_;
+ }
+
+ int CaptureMinusRenderCounters() const {
+ rtc::CritScope cs(&crit_);
+ return capture_count_ - render_count_;
+ }
+
+ int RenderMinusCaptureCounters() const {
+ return -CaptureMinusRenderCounters();
+ }
+
+ bool BothCountersExceedeThreshold(int threshold) const {
+ rtc::CritScope cs(&crit_);
+ return (render_count_ > threshold && capture_count_ > threshold);
+ }
+
+ private:
+ mutable rtc::CriticalSection crit_;
+ int render_count_ GUARDED_BY(crit_) = 0;
+ int capture_count_ GUARDED_BY(crit_) = 0;
+};
+
+// Class that protects a flag using a lock.
+class LockedFlag {
+ public:
+ bool get_flag() const {
+ rtc::CritScope cs(&crit_);
+ return flag_;
+ }
+
+ void set_flag() {
+ rtc::CritScope cs(&crit_);
+ flag_ = true;
+ }
+
+ private:
+ mutable rtc::CriticalSection crit_;
+ bool flag_ GUARDED_BY(crit_) = false;
+};
+
+// Parent class for the thread processors.
+class TimedThreadApiProcessor {
+ public:
+ TimedThreadApiProcessor(ProcessorType processor_type,
+ Random* rand_gen,
+ FrameCounters* shared_counters_state,
+ LockedFlag* capture_call_checker,
+ CallSimulator* test_framework,
+ const SimulationConfig* simulation_config,
+ AudioProcessing* apm,
+ int num_durations_to_store,
+ float input_level,
+ int num_channels)
+ : rand_gen_(rand_gen),
+ frame_counters_(shared_counters_state),
+ capture_call_checker_(capture_call_checker),
+ test_(test_framework),
+ simulation_config_(simulation_config),
+ apm_(apm),
+ frame_data_(kMaxFrameSize),
+ clock_(webrtc::Clock::GetRealTimeClock()),
+ num_durations_to_store_(num_durations_to_store),
+ input_level_(input_level),
+ processor_type_(processor_type),
+ num_channels_(num_channels) {
+ api_call_durations_.reserve(num_durations_to_store_);
+ }
+
+ // Implements the callback functionality for the threads.
+ bool Process();
+
+ // Method for printing out the simulation statistics.
+ void print_processor_statistics(std::string processor_name) const {
+ const std::string modifier = "_api_call_duration";
+
+ // Lambda function for creating a test printout string.
+ auto create_mean_and_std_string = [](int64_t average,
+ int64_t standard_dev) {
+ std::string s = std::to_string(average);
+ s += ", ";
+ s += std::to_string(standard_dev);
+ return s;
+ };
+
+ const std::string sample_rate_name =
+ "_" + std::to_string(simulation_config_->sample_rate_hz) + "Hz";
+
+ webrtc::test::PrintResultMeanAndError(
+ "apm_timing", sample_rate_name, processor_name,
+ create_mean_and_std_string(GetDurationAverage(),
+ GetDurationStandardDeviation()),
+ "us", false);
+
+ if (kPrintAllDurations) {
+ std::string value_string = "";
+ for (int64_t duration : api_call_durations_) {
+ value_string += std::to_string(duration) + ",";
+ }
+ webrtc::test::PrintResultList("apm_call_durations", sample_rate_name,
+ processor_name, value_string, "us", false);
+ }
+ }
+
+ void AddDuration(int64_t duration) {
+ if (api_call_durations_.size() < num_durations_to_store_) {
+ api_call_durations_.push_back(duration);
+ }
+ }
+
+ private:
+ static const int kMaxCallDifference = 10;
+ static const int kMaxFrameSize = 480;
+ static const int kNumInitializationFrames = 5;
+
+ int64_t GetDurationStandardDeviation() const {
+ double variance = 0;
+ const int64_t average_duration = GetDurationAverage();
+ for (size_t k = kNumInitializationFrames; k < api_call_durations_.size();
+ k++) {
+ int64_t tmp = api_call_durations_[k] - average_duration;
+ variance += static_cast<double>(tmp * tmp);
+ }
+ const int denominator = rtc::checked_cast<int>(api_call_durations_.size()) -
+ kNumInitializationFrames;
+ return (denominator > 0
+ ? rtc::checked_cast<int64_t>(sqrt(variance / denominator))
+ : -1);
+ }
+
+ int64_t GetDurationAverage() const {
+ int64_t average_duration = 0;
+ for (size_t k = kNumInitializationFrames; k < api_call_durations_.size();
+ k++) {
+ average_duration += api_call_durations_[k];
+ }
+ const int denominator = rtc::checked_cast<int>(api_call_durations_.size()) -
+ kNumInitializationFrames;
+ return (denominator > 0 ? average_duration / denominator : -1);
+ }
+
+ int ProcessCapture() {
+ // Set the stream delay.
+ apm_->set_stream_delay_ms(30);
+
+ // Call and time the specified capture side API processing method.
+ const int64_t start_time = clock_->TimeInMicroseconds();
+ const int result = apm_->ProcessStream(
+ &frame_data_.input_frame[0], frame_data_.input_stream_config,
+ frame_data_.output_stream_config, &frame_data_.output_frame[0]);
+ const int64_t end_time = clock_->TimeInMicroseconds();
+
+ frame_counters_->IncreaseCaptureCounter();
+
+ AddDuration(end_time - start_time);
+
+ if (first_process_call_) {
+ // Flag that the capture side has been called at least once
+ // (needed to ensure that a capture call has been done
+ // before the first render call is performed (implicitly
+ // required by the APM API).
+ capture_call_checker_->set_flag();
+ first_process_call_ = false;
+ }
+ return result;
+ }
+
+ bool ReadyToProcessCapture() {
+ return (frame_counters_->CaptureMinusRenderCounters() <=
+ kMaxCallDifference);
+ }
+
+ int ProcessRender() {
+ // Call and time the specified render side API processing method.
+ const int64_t start_time = clock_->TimeInMicroseconds();
+ const int result = apm_->ProcessReverseStream(
+ &frame_data_.input_frame[0], frame_data_.input_stream_config,
+ frame_data_.output_stream_config, &frame_data_.output_frame[0]);
+ const int64_t end_time = clock_->TimeInMicroseconds();
+ frame_counters_->IncreaseRenderCounter();
+
+ AddDuration(end_time - start_time);
+
+ return result;
+ }
+
+ bool ReadyToProcessRender() {
+ // Do not process until at least one capture call has been done.
+ // (implicitly required by the APM API).
+ if (first_process_call_ && !capture_call_checker_->get_flag()) {
+ return false;
+ }
+
+ // Ensure that the number of render and capture calls do not differ too
+ // much.
+ if (frame_counters_->RenderMinusCaptureCounters() > kMaxCallDifference) {
+ return false;
+ }
+
+ first_process_call_ = false;
+ return true;
+ }
+
+ void PrepareFrame() {
+ // Lambda function for populating a float multichannel audio frame
+ // with random data.
+ auto populate_audio_frame = [](float amplitude, size_t num_channels,
+ size_t samples_per_channel, Random* rand_gen,
+ float** frame) {
+ for (size_t ch = 0; ch < num_channels; ch++) {
+ for (size_t k = 0; k < samples_per_channel; k++) {
+ // Store random float number with a value between +-amplitude.
+ frame[ch][k] = amplitude * (2 * rand_gen->Rand<float>() - 1);
+ }
+ }
+ };
+
+ // Prepare the audio input data and metadata.
+ frame_data_.input_stream_config.set_sample_rate_hz(
+ simulation_config_->sample_rate_hz);
+ frame_data_.input_stream_config.set_num_channels(num_channels_);
+ frame_data_.input_stream_config.set_has_keyboard(false);
+ populate_audio_frame(input_level_, num_channels_,
+ (simulation_config_->sample_rate_hz *
+ AudioProcessing::kChunkSizeMs / 1000),
+ rand_gen_, &frame_data_.input_frame[0]);
+
+ // Prepare the float audio output data and metadata.
+ frame_data_.output_stream_config.set_sample_rate_hz(
+ simulation_config_->sample_rate_hz);
+ frame_data_.output_stream_config.set_num_channels(1);
+ frame_data_.output_stream_config.set_has_keyboard(false);
+ }
+
+ bool ReadyToProcess() {
+ switch (processor_type_) {
+ case ProcessorType::kRender:
+ return ReadyToProcessRender();
+ break;
+ case ProcessorType::kCapture:
+ return ReadyToProcessCapture();
+ break;
+ }
+
+ // Should not be reached, but the return statement is needed for the code to
+ // build successfully on Android.
+ RTC_NOTREACHED();
+ return false;
+ }
+
+ Random* rand_gen_ = nullptr;
+ FrameCounters* frame_counters_ = nullptr;
+ LockedFlag* capture_call_checker_ = nullptr;
+ CallSimulator* test_ = nullptr;
+ const SimulationConfig* const simulation_config_ = nullptr;
+ AudioProcessing* apm_ = nullptr;
+ AudioFrameData frame_data_;
+ webrtc::Clock* clock_;
+ const size_t num_durations_to_store_;
+ std::vector<int64_t> api_call_durations_;
+ const float input_level_;
+ bool first_process_call_ = true;
+ const ProcessorType processor_type_;
+ const int num_channels_ = 1;
+};
+
+// Class for managing the test simulation.
+class CallSimulator : public ::testing::TestWithParam<SimulationConfig> {
+ public:
+ CallSimulator()
+ : test_complete_(EventWrapper::Create()),
+ render_thread_(
+ new rtc::PlatformThread(RenderProcessorThreadFunc, this, "render")),
+ capture_thread_(new rtc::PlatformThread(CaptureProcessorThreadFunc,
+ this,
+ "capture")),
+ rand_gen_(42U),
+ simulation_config_(static_cast<SimulationConfig>(GetParam())) {}
+
+ // Run the call simulation with a timeout.
+ EventTypeWrapper Run() {
+ StartThreads();
+
+ EventTypeWrapper result = test_complete_->Wait(kTestTimeout);
+
+ StopThreads();
+
+ render_thread_state_->print_processor_statistics(
+ simulation_config_.SettingsDescription() + "_render");
+ capture_thread_state_->print_processor_statistics(
+ simulation_config_.SettingsDescription() + "_capture");
+
+ return result;
+ }
+
+ // Tests whether all the required render and capture side calls have been
+ // done.
+ bool MaybeEndTest() {
+ if (frame_counters_.BothCountersExceedeThreshold(kMinNumFramesToProcess)) {
+ test_complete_->Set();
+ return true;
+ }
+ return false;
+ }
+
+ private:
+ static const float kCaptureInputFloatLevel;
+ static const float kRenderInputFloatLevel;
+ static const int kMinNumFramesToProcess = 150;
+ static const int32_t kTestTimeout = 3 * 10 * kMinNumFramesToProcess;
+
+ // ::testing::TestWithParam<> implementation.
+ void TearDown() override { StopThreads(); }
+
+ // Stop all running threads.
+ void StopThreads() {
+ render_thread_->Stop();
+ capture_thread_->Stop();
+ }
+
+ // Simulator and APM setup.
+ void SetUp() override {
+ // Lambda function for setting the default APM runtime settings for desktop.
+ auto set_default_desktop_apm_runtime_settings = [](AudioProcessing* apm) {
+ ASSERT_EQ(apm->kNoError, apm->level_estimator()->Enable(true));
+ ASSERT_EQ(apm->kNoError, apm->gain_control()->Enable(true));
+ ASSERT_EQ(apm->kNoError,
+ apm->gain_control()->set_mode(GainControl::kAdaptiveDigital));
+ ASSERT_EQ(apm->kNoError, apm->gain_control()->Enable(true));
+ ASSERT_EQ(apm->kNoError, apm->noise_suppression()->Enable(true));
+ ASSERT_EQ(apm->kNoError, apm->voice_detection()->Enable(true));
+ ASSERT_EQ(apm->kNoError, apm->echo_control_mobile()->Enable(false));
+ ASSERT_EQ(apm->kNoError, apm->echo_cancellation()->Enable(true));
+ ASSERT_EQ(apm->kNoError, apm->echo_cancellation()->enable_metrics(true));
+ ASSERT_EQ(apm->kNoError,
+ apm->echo_cancellation()->enable_delay_logging(true));
+ };
+
+ // Lambda function for setting the default APM runtime settings for mobile.
+ auto set_default_mobile_apm_runtime_settings = [](AudioProcessing* apm) {
+ ASSERT_EQ(apm->kNoError, apm->level_estimator()->Enable(true));
+ ASSERT_EQ(apm->kNoError, apm->gain_control()->Enable(true));
+ ASSERT_EQ(apm->kNoError,
+ apm->gain_control()->set_mode(GainControl::kAdaptiveDigital));
+ ASSERT_EQ(apm->kNoError, apm->gain_control()->Enable(true));
+ ASSERT_EQ(apm->kNoError, apm->noise_suppression()->Enable(true));
+ ASSERT_EQ(apm->kNoError, apm->voice_detection()->Enable(true));
+ ASSERT_EQ(apm->kNoError, apm->echo_control_mobile()->Enable(true));
+ ASSERT_EQ(apm->kNoError, apm->echo_cancellation()->Enable(false));
+ };
+
+ // Lambda function for turning off all of the APM runtime settings
+ // submodules.
+ auto turn_off_default_apm_runtime_settings = [](AudioProcessing* apm) {
+ ASSERT_EQ(apm->kNoError, apm->level_estimator()->Enable(false));
+ ASSERT_EQ(apm->kNoError, apm->gain_control()->Enable(false));
+ ASSERT_EQ(apm->kNoError,
+ apm->gain_control()->set_mode(GainControl::kAdaptiveDigital));
+ ASSERT_EQ(apm->kNoError, apm->gain_control()->Enable(false));
+ ASSERT_EQ(apm->kNoError, apm->noise_suppression()->Enable(false));
+ ASSERT_EQ(apm->kNoError, apm->voice_detection()->Enable(false));
+ ASSERT_EQ(apm->kNoError, apm->echo_control_mobile()->Enable(false));
+ ASSERT_EQ(apm->kNoError, apm->echo_cancellation()->Enable(false));
+ ASSERT_EQ(apm->kNoError, apm->echo_cancellation()->enable_metrics(false));
+ ASSERT_EQ(apm->kNoError,
+ apm->echo_cancellation()->enable_delay_logging(false));
+ };
+
+ // Lambda function for adding default desktop APM settings to a config.
+ auto add_default_desktop_config = [](Config* config) {
+ config->Set<ExtendedFilter>(new ExtendedFilter(true));
+ config->Set<DelayAgnostic>(new DelayAgnostic(true));
+ };
+
+ // Lambda function for adding beamformer settings to a config.
+ auto add_beamformer_config = [](Config* config) {
+ const size_t num_mics = 2;
+ const std::vector<Point> array_geometry =
+ ParseArrayGeometry("0 0 0 0.05 0 0", num_mics);
+ RTC_CHECK_EQ(array_geometry.size(), num_mics);
+
+ config->Set<Beamforming>(
+ new Beamforming(true, array_geometry,
+ SphericalPointf(DegreesToRadians(90), 0.f, 1.f)));
+ };
+
+ int num_capture_channels = 1;
+ switch (simulation_config_.simulation_settings) {
+ case SettingsType::kDefaultApmMobile: {
+ apm_.reset(AudioProcessingImpl::Create());
+ ASSERT_TRUE(!!apm_);
+ set_default_mobile_apm_runtime_settings(apm_.get());
+ break;
+ }
+ case SettingsType::kDefaultApmDesktop: {
+ Config config;
+ add_default_desktop_config(&config);
+ apm_.reset(AudioProcessingImpl::Create(config));
+ ASSERT_TRUE(!!apm_);
+ set_default_desktop_apm_runtime_settings(apm_.get());
+ apm_->SetExtraOptions(config);
+ break;
+ }
+ case SettingsType::kDefaultApmDesktopAndBeamformer: {
+ Config config;
+ add_beamformer_config(&config);
+ add_default_desktop_config(&config);
+ apm_.reset(AudioProcessingImpl::Create(config));
+ ASSERT_TRUE(!!apm_);
+ set_default_desktop_apm_runtime_settings(apm_.get());
+ apm_->SetExtraOptions(config);
+ num_capture_channels = 2;
+ break;
+ }
+ case SettingsType::kDefaultApmDesktopAndIntelligibilityEnhancer: {
+ Config config;
+ config.Set<Intelligibility>(new Intelligibility(true));
+ add_default_desktop_config(&config);
+ apm_.reset(AudioProcessingImpl::Create(config));
+ ASSERT_TRUE(!!apm_);
+ set_default_desktop_apm_runtime_settings(apm_.get());
+ apm_->SetExtraOptions(config);
+ break;
+ }
+ case SettingsType::kAllSubmodulesTurnedOff: {
+ apm_.reset(AudioProcessingImpl::Create());
+ ASSERT_TRUE(!!apm_);
+ turn_off_default_apm_runtime_settings(apm_.get());
+ break;
+ }
+ case SettingsType::kDefaultDesktopApmWithoutDelayAgnostic: {
+ Config config;
+ config.Set<ExtendedFilter>(new ExtendedFilter(true));
+ config.Set<DelayAgnostic>(new DelayAgnostic(false));
+ apm_.reset(AudioProcessingImpl::Create(config));
+ ASSERT_TRUE(!!apm_);
+ set_default_desktop_apm_runtime_settings(apm_.get());
+ apm_->SetExtraOptions(config);
+ break;
+ }
+ case SettingsType::kDefaultDesktopApmWithoutExtendedFilter: {
+ Config config;
+ config.Set<ExtendedFilter>(new ExtendedFilter(false));
+ config.Set<DelayAgnostic>(new DelayAgnostic(true));
+ apm_.reset(AudioProcessingImpl::Create(config));
+ ASSERT_TRUE(!!apm_);
+ set_default_desktop_apm_runtime_settings(apm_.get());
+ apm_->SetExtraOptions(config);
+ break;
+ }
+ }
+
+ render_thread_state_.reset(new TimedThreadApiProcessor(
+ ProcessorType::kRender, &rand_gen_, &frame_counters_,
+ &capture_call_checker_, this, &simulation_config_, apm_.get(),
+ kMinNumFramesToProcess, kRenderInputFloatLevel, 1));
+ capture_thread_state_.reset(new TimedThreadApiProcessor(
+ ProcessorType::kCapture, &rand_gen_, &frame_counters_,
+ &capture_call_checker_, this, &simulation_config_, apm_.get(),
+ kMinNumFramesToProcess, kCaptureInputFloatLevel, num_capture_channels));
+ }
+
+ // Thread callback for the render thread.
+ static bool RenderProcessorThreadFunc(void* context) {
+ return reinterpret_cast<CallSimulator*>(context)
+ ->render_thread_state_->Process();
+ }
+
+ // Thread callback for the capture thread.
+ static bool CaptureProcessorThreadFunc(void* context) {
+ return reinterpret_cast<CallSimulator*>(context)
+ ->capture_thread_state_->Process();
+ }
+
+ // Start the threads used in the test.
+ void StartThreads() {
+ ASSERT_NO_FATAL_FAILURE(render_thread_->Start());
+ render_thread_->SetPriority(rtc::kRealtimePriority);
+ ASSERT_NO_FATAL_FAILURE(capture_thread_->Start());
+ capture_thread_->SetPriority(rtc::kRealtimePriority);
+ }
+
+ // Event handler for the test.
+ const rtc::scoped_ptr<EventWrapper> test_complete_;
+
+ // Thread related variables.
+ rtc::scoped_ptr<rtc::PlatformThread> render_thread_;
+ rtc::scoped_ptr<rtc::PlatformThread> capture_thread_;
+ Random rand_gen_;
+
+ rtc::scoped_ptr<AudioProcessing> apm_;
+ const SimulationConfig simulation_config_;
+ FrameCounters frame_counters_;
+ LockedFlag capture_call_checker_;
+ rtc::scoped_ptr<TimedThreadApiProcessor> render_thread_state_;
+ rtc::scoped_ptr<TimedThreadApiProcessor> capture_thread_state_;
+};
+
+// Implements the callback functionality for the threads.
+bool TimedThreadApiProcessor::Process() {
+ PrepareFrame();
+
+ // Wait in a spinlock manner until it is ok to start processing.
+ // Note that SleepMs is not applicable since it only allows sleeping
+ // on a millisecond basis which is too long.
+ while (!ReadyToProcess()) {
+ }
+
+ int result = AudioProcessing::kNoError;
+ switch (processor_type_) {
+ case ProcessorType::kRender:
+ result = ProcessRender();
+ break;
+ case ProcessorType::kCapture:
+ result = ProcessCapture();
+ break;
+ }
+
+ EXPECT_EQ(result, AudioProcessing::kNoError);
+
+ return !test_->MaybeEndTest();
+}
+
+const float CallSimulator::kRenderInputFloatLevel = 0.5f;
+const float CallSimulator::kCaptureInputFloatLevel = 0.03125f;
+} // anonymous namespace
+
+TEST_P(CallSimulator, ApiCallDurationTest) {
+ // Run test and verify that it did not time out.
+ EXPECT_EQ(kEventSignaled, Run());
+}
+
+INSTANTIATE_TEST_CASE_P(
+ AudioProcessingPerformanceTest,
+ CallSimulator,
+ ::testing::ValuesIn(SimulationConfig::GenerateSimulationConfigs()));
+
+} // namespace webrtc
diff --git a/webrtc/modules/audio_processing/audio_processing_tests.gypi b/webrtc/modules/audio_processing/audio_processing_tests.gypi
index 0314c69b04..523602baba 100644
--- a/webrtc/modules/audio_processing/audio_processing_tests.gypi
+++ b/webrtc/modules/audio_processing/audio_processing_tests.gypi
@@ -128,7 +128,11 @@
'<(webrtc_root)/test/test.gyp:test_support',
'<(DEPTH)/third_party/gflags/gflags.gyp:gflags',
],
- 'sources': [ 'test/audioproc_float.cc', ],
+ 'sources': [
+ 'test/audio_file_processor.cc',
+ 'test/audio_file_processor.h',
+ 'test/audioproc_float.cc',
+ ],
},
{
'target_name': 'unpack_aecdump',
diff --git a/webrtc/modules/audio_processing/beamformer/array_util.cc b/webrtc/modules/audio_processing/beamformer/array_util.cc
index 8aaeee9f59..6b1c474269 100644
--- a/webrtc/modules/audio_processing/beamformer/array_util.cc
+++ b/webrtc/modules/audio_processing/beamformer/array_util.cc
@@ -56,7 +56,7 @@ bool ArePerpendicular(const Point& a, const Point& b) {
return std::abs(DotProduct(a, b)) < kMaxDotProduct;
}
-rtc::Maybe<Point> GetDirectionIfLinear(
+rtc::Optional<Point> GetDirectionIfLinear(
const std::vector<Point>& array_geometry) {
RTC_DCHECK_GT(array_geometry.size(), 1u);
const Point first_pair_direction =
@@ -65,13 +65,14 @@ rtc::Maybe<Point> GetDirectionIfLinear(
const Point pair_direction =
PairDirection(array_geometry[i - 1], array_geometry[i]);
if (!AreParallel(first_pair_direction, pair_direction)) {
- return rtc::Maybe<Point>();
+ return rtc::Optional<Point>();
}
}
- return first_pair_direction;
+ return rtc::Optional<Point>(first_pair_direction);
}
-rtc::Maybe<Point> GetNormalIfPlanar(const std::vector<Point>& array_geometry) {
+rtc::Optional<Point> GetNormalIfPlanar(
+ const std::vector<Point>& array_geometry) {
RTC_DCHECK_GT(array_geometry.size(), 1u);
const Point first_pair_direction =
PairDirection(array_geometry[0], array_geometry[1]);
@@ -85,30 +86,30 @@ rtc::Maybe<Point> GetNormalIfPlanar(const std::vector<Point>& array_geometry) {
}
}
if (is_linear) {
- return rtc::Maybe<Point>();
+ return rtc::Optional<Point>();
}
const Point normal_direction =
CrossProduct(first_pair_direction, pair_direction);
for (; i < array_geometry.size(); ++i) {
pair_direction = PairDirection(array_geometry[i - 1], array_geometry[i]);
if (!ArePerpendicular(normal_direction, pair_direction)) {
- return rtc::Maybe<Point>();
+ return rtc::Optional<Point>();
}
}
- return normal_direction;
+ return rtc::Optional<Point>(normal_direction);
}
-rtc::Maybe<Point> GetArrayNormalIfExists(
+rtc::Optional<Point> GetArrayNormalIfExists(
const std::vector<Point>& array_geometry) {
- const rtc::Maybe<Point> direction = GetDirectionIfLinear(array_geometry);
+ const rtc::Optional<Point> direction = GetDirectionIfLinear(array_geometry);
if (direction) {
- return Point(direction->y(), -direction->x(), 0.f);
+ return rtc::Optional<Point>(Point(direction->y(), -direction->x(), 0.f));
}
- const rtc::Maybe<Point> normal = GetNormalIfPlanar(array_geometry);
+ const rtc::Optional<Point> normal = GetNormalIfPlanar(array_geometry);
if (normal && normal->z() < kMaxDotProduct) {
return normal;
}
- return rtc::Maybe<Point>();
+ return rtc::Optional<Point>();
}
Point AzimuthToPoint(float azimuth) {
diff --git a/webrtc/modules/audio_processing/beamformer/array_util.h b/webrtc/modules/audio_processing/beamformer/array_util.h
index 7fff9735a1..f86ad5dee6 100644
--- a/webrtc/modules/audio_processing/beamformer/array_util.h
+++ b/webrtc/modules/audio_processing/beamformer/array_util.h
@@ -14,7 +14,7 @@
#include <cmath>
#include <vector>
-#include "webrtc/base/maybe.h"
+#include "webrtc/base/optional.h"
namespace webrtc {
@@ -59,15 +59,16 @@ float GetMinimumSpacing(const std::vector<Point>& array_geometry);
// If the given array geometry is linear it returns the direction without
// normalizing.
-rtc::Maybe<Point> GetDirectionIfLinear(
+rtc::Optional<Point> GetDirectionIfLinear(
const std::vector<Point>& array_geometry);
// If the given array geometry is planar it returns the normal without
// normalizing.
-rtc::Maybe<Point> GetNormalIfPlanar(const std::vector<Point>& array_geometry);
+rtc::Optional<Point> GetNormalIfPlanar(
+ const std::vector<Point>& array_geometry);
// Returns the normal of an array if it has one and it is in the xy-plane.
-rtc::Maybe<Point> GetArrayNormalIfExists(
+rtc::Optional<Point> GetArrayNormalIfExists(
const std::vector<Point>& array_geometry);
// The resulting Point will be in the xy-plane.
diff --git a/webrtc/modules/audio_processing/beamformer/complex_matrix.h b/webrtc/modules/audio_processing/beamformer/complex_matrix.h
index bfa3563b89..707c51564b 100644
--- a/webrtc/modules/audio_processing/beamformer/complex_matrix.h
+++ b/webrtc/modules/audio_processing/beamformer/complex_matrix.h
@@ -27,10 +27,10 @@ class ComplexMatrix : public Matrix<complex<T> > {
public:
ComplexMatrix() : Matrix<complex<T> >() {}
- ComplexMatrix(int num_rows, int num_columns)
+ ComplexMatrix(size_t num_rows, size_t num_columns)
: Matrix<complex<T> >(num_rows, num_columns) {}
- ComplexMatrix(const complex<T>* data, int num_rows, int num_columns)
+ ComplexMatrix(const complex<T>* data, size_t num_rows, size_t num_columns)
: Matrix<complex<T> >(data, num_rows, num_columns) {}
// Complex Matrix operations.
@@ -51,7 +51,7 @@ class ComplexMatrix : public Matrix<complex<T> > {
ComplexMatrix& ConjugateTranspose() {
this->CopyDataToScratch();
- int num_rows = this->num_rows();
+ size_t num_rows = this->num_rows();
this->SetNumRows(this->num_columns());
this->SetNumColumns(num_rows);
this->Resize();
@@ -82,8 +82,8 @@ class ComplexMatrix : public Matrix<complex<T> > {
private:
ComplexMatrix& ConjugateTranspose(const complex<T>* const* src) {
complex<T>* const* elements = this->elements();
- for (int i = 0; i < this->num_rows(); ++i) {
- for (int j = 0; j < this->num_columns(); ++j) {
+ for (size_t i = 0; i < this->num_rows(); ++i) {
+ for (size_t j = 0; j < this->num_columns(); ++j) {
elements[i][j] = conj(src[j][i]);
}
}
diff --git a/webrtc/modules/audio_processing/beamformer/covariance_matrix_generator.cc b/webrtc/modules/audio_processing/beamformer/covariance_matrix_generator.cc
index d0728325fc..78f4df5ca9 100644
--- a/webrtc/modules/audio_processing/beamformer/covariance_matrix_generator.cc
+++ b/webrtc/modules/audio_processing/beamformer/covariance_matrix_generator.cc
@@ -27,7 +27,7 @@ float BesselJ0(float x) {
// Calculates the Euclidean norm for a row vector.
float Norm(const ComplexMatrix<float>& x) {
- RTC_CHECK_EQ(1, x.num_rows());
+ RTC_CHECK_EQ(1u, x.num_rows());
const size_t length = x.num_columns();
const complex<float>* elems = x.elements()[0];
float result = 0.f;
@@ -43,8 +43,8 @@ void CovarianceMatrixGenerator::UniformCovarianceMatrix(
float wave_number,
const std::vector<Point>& geometry,
ComplexMatrix<float>* mat) {
- RTC_CHECK_EQ(static_cast<int>(geometry.size()), mat->num_rows());
- RTC_CHECK_EQ(static_cast<int>(geometry.size()), mat->num_columns());
+ RTC_CHECK_EQ(geometry.size(), mat->num_rows());
+ RTC_CHECK_EQ(geometry.size(), mat->num_columns());
complex<float>* const* mat_els = mat->elements();
for (size_t i = 0; i < geometry.size(); ++i) {
@@ -68,8 +68,8 @@ void CovarianceMatrixGenerator::AngledCovarianceMatrix(
int sample_rate,
const std::vector<Point>& geometry,
ComplexMatrix<float>* mat) {
- RTC_CHECK_EQ(static_cast<int>(geometry.size()), mat->num_rows());
- RTC_CHECK_EQ(static_cast<int>(geometry.size()), mat->num_columns());
+ RTC_CHECK_EQ(geometry.size(), mat->num_rows());
+ RTC_CHECK_EQ(geometry.size(), mat->num_columns());
ComplexMatrix<float> interf_cov_vector(1, geometry.size());
ComplexMatrix<float> interf_cov_vector_transposed(geometry.size(), 1);
@@ -94,8 +94,8 @@ void CovarianceMatrixGenerator::PhaseAlignmentMasks(
const std::vector<Point>& geometry,
float angle,
ComplexMatrix<float>* mat) {
- RTC_CHECK_EQ(1, mat->num_rows());
- RTC_CHECK_EQ(static_cast<int>(geometry.size()), mat->num_columns());
+ RTC_CHECK_EQ(1u, mat->num_rows());
+ RTC_CHECK_EQ(geometry.size(), mat->num_columns());
float freq_in_hertz =
(static_cast<float>(frequency_bin) / fft_size) * sample_rate;
diff --git a/webrtc/modules/audio_processing/beamformer/matrix.h b/webrtc/modules/audio_processing/beamformer/matrix.h
index 162aef1dac..51c1cece97 100644
--- a/webrtc/modules/audio_processing/beamformer/matrix.h
+++ b/webrtc/modules/audio_processing/beamformer/matrix.h
@@ -67,7 +67,7 @@ class Matrix {
Matrix() : num_rows_(0), num_columns_(0) {}
// Allocates space for the elements and initializes all values to zero.
- Matrix(int num_rows, int num_columns)
+ Matrix(size_t num_rows, size_t num_columns)
: num_rows_(num_rows), num_columns_(num_columns) {
Resize();
scratch_data_.resize(num_rows_ * num_columns_);
@@ -75,7 +75,7 @@ class Matrix {
}
// Copies |data| into the new Matrix.
- Matrix(const T* data, int num_rows, int num_columns)
+ Matrix(const T* data, size_t num_rows, size_t num_columns)
: num_rows_(0), num_columns_(0) {
CopyFrom(data, num_rows, num_columns);
scratch_data_.resize(num_rows_ * num_columns_);
@@ -90,23 +90,23 @@ class Matrix {
}
// Copy |data| into the Matrix. The current data is lost.
- void CopyFrom(const T* const data, int num_rows, int num_columns) {
+ void CopyFrom(const T* const data, size_t num_rows, size_t num_columns) {
Resize(num_rows, num_columns);
memcpy(&data_[0], data, num_rows_ * num_columns_ * sizeof(data_[0]));
}
Matrix& CopyFromColumn(const T* const* src,
size_t column_index,
- int num_rows) {
+ size_t num_rows) {
Resize(1, num_rows);
- for (int i = 0; i < num_columns_; ++i) {
+ for (size_t i = 0; i < num_columns_; ++i) {
data_[i] = src[i][column_index];
}
return *this;
}
- void Resize(int num_rows, int num_columns) {
+ void Resize(size_t num_rows, size_t num_columns) {
if (num_rows != num_rows_ || num_columns != num_columns_) {
num_rows_ = num_rows;
num_columns_ = num_columns;
@@ -115,8 +115,8 @@ class Matrix {
}
// Accessors and mutators.
- int num_rows() const { return num_rows_; }
- int num_columns() const { return num_columns_; }
+ size_t num_rows() const { return num_rows_; }
+ size_t num_columns() const { return num_columns_; }
T* const* elements() { return &elements_[0]; }
const T* const* elements() const { return &elements_[0]; }
@@ -124,7 +124,7 @@ class Matrix {
RTC_CHECK_EQ(num_rows_, num_columns_);
T trace = 0;
- for (int i = 0; i < num_rows_; ++i) {
+ for (size_t i = 0; i < num_rows_; ++i) {
trace += elements_[i][i];
}
return trace;
@@ -282,8 +282,8 @@ class Matrix {
std::ostringstream ss;
ss << std::endl << "Matrix" << std::endl;
- for (int i = 0; i < num_rows_; ++i) {
- for (int j = 0; j < num_columns_; ++j) {
+ for (size_t i = 0; i < num_rows_; ++i) {
+ for (size_t j = 0; j < num_columns_; ++j) {
ss << elements_[i][j] << " ";
}
ss << std::endl;
@@ -294,8 +294,8 @@ class Matrix {
}
protected:
- void SetNumRows(const int num_rows) { num_rows_ = num_rows; }
- void SetNumColumns(const int num_columns) { num_columns_ = num_columns; }
+ void SetNumRows(const size_t num_rows) { num_rows_ = num_rows; }
+ void SetNumColumns(const size_t num_columns) { num_columns_ = num_columns; }
T* data() { return &data_[0]; }
const T* data() const { return &data_[0]; }
const T* const* scratch_elements() const { return &scratch_elements_[0]; }
@@ -307,7 +307,7 @@ class Matrix {
data_.resize(size);
elements_.resize(num_rows_);
- for (int i = 0; i < num_rows_; ++i) {
+ for (size_t i = 0; i < num_rows_; ++i) {
elements_[i] = &data_[i * num_columns_];
}
}
@@ -317,14 +317,14 @@ class Matrix {
scratch_data_ = data_;
scratch_elements_.resize(num_rows_);
- for (int i = 0; i < num_rows_; ++i) {
+ for (size_t i = 0; i < num_rows_; ++i) {
scratch_elements_[i] = &scratch_data_[i * num_columns_];
}
}
private:
- int num_rows_;
- int num_columns_;
+ size_t num_rows_;
+ size_t num_columns_;
std::vector<T> data_;
std::vector<T*> elements_;
@@ -336,8 +336,8 @@ class Matrix {
// Helpers for Transpose and Multiply operations that unify in-place and
// out-of-place solutions.
Matrix& Transpose(const T* const* src) {
- for (int i = 0; i < num_rows_; ++i) {
- for (int j = 0; j < num_columns_; ++j) {
+ for (size_t i = 0; i < num_rows_; ++i) {
+ for (size_t j = 0; j < num_columns_; ++j) {
elements_[i][j] = src[j][i];
}
}
@@ -345,11 +345,13 @@ class Matrix {
return *this;
}
- Matrix& Multiply(const T* const* lhs, int num_rows_rhs, const T* const* rhs) {
- for (int row = 0; row < num_rows_; ++row) {
- for (int col = 0; col < num_columns_; ++col) {
+ Matrix& Multiply(const T* const* lhs,
+ size_t num_rows_rhs,
+ const T* const* rhs) {
+ for (size_t row = 0; row < num_rows_; ++row) {
+ for (size_t col = 0; col < num_columns_; ++col) {
T cur_element = 0;
- for (int i = 0; i < num_rows_rhs; ++i) {
+ for (size_t i = 0; i < num_rows_rhs; ++i) {
cur_element += lhs[row][i] * rhs[i][col];
}
diff --git a/webrtc/modules/audio_processing/beamformer/matrix_test_helpers.h b/webrtc/modules/audio_processing/beamformer/matrix_test_helpers.h
index 7c58670068..9891a8220c 100644
--- a/webrtc/modules/audio_processing/beamformer/matrix_test_helpers.h
+++ b/webrtc/modules/audio_processing/beamformer/matrix_test_helpers.h
@@ -34,8 +34,8 @@ class MatrixTestHelpers {
const T* const* expected_elements = expected.elements();
const T* const* actual_elements = actual.elements();
- for (int i = 0; i < expected.num_rows(); ++i) {
- for (int j = 0; j < expected.num_columns(); ++j) {
+ for (size_t i = 0; i < expected.num_rows(); ++i) {
+ for (size_t j = 0; j < expected.num_columns(); ++j) {
EXPECT_EQ(expected_elements[i][j], actual_elements[i][j]);
}
}
@@ -48,8 +48,8 @@ class MatrixTestHelpers {
const float* const* expected_elements = expected.elements();
const float* const* actual_elements = actual.elements();
- for (int i = 0; i < expected.num_rows(); ++i) {
- for (int j = 0; j < expected.num_columns(); ++j) {
+ for (size_t i = 0; i < expected.num_rows(); ++i) {
+ for (size_t j = 0; j < expected.num_columns(); ++j) {
EXPECT_NEAR(expected_elements[i][j], actual_elements[i][j], kTolerance);
}
}
@@ -63,8 +63,8 @@ class MatrixTestHelpers {
const complex<float>* const* expected_elements = expected.elements();
const complex<float>* const* actual_elements = actual.elements();
- for (int i = 0; i < expected.num_rows(); ++i) {
- for (int j = 0; j < expected.num_columns(); ++j) {
+ for (size_t i = 0; i < expected.num_rows(); ++i) {
+ for (size_t j = 0; j < expected.num_columns(); ++j) {
EXPECT_NEAR(expected_elements[i][j].real(),
actual_elements[i][j].real(),
kTolerance);
@@ -84,8 +84,8 @@ class MatrixTestHelpers {
const complex<float>* const* expected_elements = expected.elements();
const complex<float>* const* actual_elements = actual.elements();
- for (int i = 0; i < expected.num_rows(); ++i) {
- for (int j = 0; j < expected.num_columns(); ++j) {
+ for (size_t i = 0; i < expected.num_rows(); ++i) {
+ for (size_t j = 0; j < expected.num_columns(); ++j) {
EXPECT_NEAR(expected_elements[i][j].real(),
actual_elements[i][j].real(),
tolerance);
diff --git a/webrtc/modules/audio_processing/beamformer/nonlinear_beamformer.cc b/webrtc/modules/audio_processing/beamformer/nonlinear_beamformer.cc
index 029fa089fc..6ea7234f6f 100644
--- a/webrtc/modules/audio_processing/beamformer/nonlinear_beamformer.cc
+++ b/webrtc/modules/audio_processing/beamformer/nonlinear_beamformer.cc
@@ -79,7 +79,7 @@ const float kCompensationGain = 2.f;
// The returned norm is clamped to be non-negative.
float Norm(const ComplexMatrix<float>& mat,
const ComplexMatrix<float>& norm_mat) {
- RTC_CHECK_EQ(norm_mat.num_rows(), 1);
+ RTC_CHECK_EQ(1u, norm_mat.num_rows());
RTC_CHECK_EQ(norm_mat.num_columns(), mat.num_rows());
RTC_CHECK_EQ(norm_mat.num_columns(), mat.num_columns());
@@ -89,8 +89,8 @@ float Norm(const ComplexMatrix<float>& mat,
const complex<float>* const* mat_els = mat.elements();
const complex<float>* const* norm_mat_els = norm_mat.elements();
- for (int i = 0; i < norm_mat.num_columns(); ++i) {
- for (int j = 0; j < norm_mat.num_columns(); ++j) {
+ for (size_t i = 0; i < norm_mat.num_columns(); ++i) {
+ for (size_t j = 0; j < norm_mat.num_columns(); ++j) {
first_product += conj(norm_mat_els[0][j]) * mat_els[j][i];
}
second_product += first_product * norm_mat_els[0][i];
@@ -102,15 +102,15 @@ float Norm(const ComplexMatrix<float>& mat,
// Does conjugate(|lhs|) * |rhs| for row vectors |lhs| and |rhs|.
complex<float> ConjugateDotProduct(const ComplexMatrix<float>& lhs,
const ComplexMatrix<float>& rhs) {
- RTC_CHECK_EQ(lhs.num_rows(), 1);
- RTC_CHECK_EQ(rhs.num_rows(), 1);
+ RTC_CHECK_EQ(1u, lhs.num_rows());
+ RTC_CHECK_EQ(1u, rhs.num_rows());
RTC_CHECK_EQ(lhs.num_columns(), rhs.num_columns());
const complex<float>* const* lhs_elements = lhs.elements();
const complex<float>* const* rhs_elements = rhs.elements();
complex<float> result = complex<float>(0.f, 0.f);
- for (int i = 0; i < lhs.num_columns(); ++i) {
+ for (size_t i = 0; i < lhs.num_columns(); ++i) {
result += conj(lhs_elements[0][i]) * rhs_elements[0][i];
}
@@ -126,8 +126,8 @@ size_t Round(float x) {
float SumAbs(const ComplexMatrix<float>& mat) {
float sum_abs = 0.f;
const complex<float>* const* mat_els = mat.elements();
- for (int i = 0; i < mat.num_rows(); ++i) {
- for (int j = 0; j < mat.num_columns(); ++j) {
+ for (size_t i = 0; i < mat.num_rows(); ++i) {
+ for (size_t j = 0; j < mat.num_columns(); ++j) {
sum_abs += std::abs(mat_els[i][j]);
}
}
@@ -138,8 +138,8 @@ float SumAbs(const ComplexMatrix<float>& mat) {
float SumSquares(const ComplexMatrix<float>& mat) {
float sum_squares = 0.f;
const complex<float>* const* mat_els = mat.elements();
- for (int i = 0; i < mat.num_rows(); ++i) {
- for (int j = 0; j < mat.num_columns(); ++j) {
+ for (size_t i = 0; i < mat.num_rows(); ++i) {
+ for (size_t j = 0; j < mat.num_columns(); ++j) {
float abs_value = std::abs(mat_els[i][j]);
sum_squares += abs_value * abs_value;
}
@@ -150,20 +150,20 @@ float SumSquares(const ComplexMatrix<float>& mat) {
// Does |out| = |in|.' * conj(|in|) for row vector |in|.
void TransposedConjugatedProduct(const ComplexMatrix<float>& in,
ComplexMatrix<float>* out) {
- RTC_CHECK_EQ(in.num_rows(), 1);
+ RTC_CHECK_EQ(1u, in.num_rows());
RTC_CHECK_EQ(out->num_rows(), in.num_columns());
RTC_CHECK_EQ(out->num_columns(), in.num_columns());
const complex<float>* in_elements = in.elements()[0];
complex<float>* const* out_elements = out->elements();
- for (int i = 0; i < out->num_rows(); ++i) {
- for (int j = 0; j < out->num_columns(); ++j) {
+ for (size_t i = 0; i < out->num_rows(); ++i) {
+ for (size_t j = 0; j < out->num_columns(); ++j) {
out_elements[i][j] = in_elements[i] * conj(in_elements[j]);
}
}
}
std::vector<Point> GetCenteredArray(std::vector<Point> array_geometry) {
- for (int dim = 0; dim < 3; ++dim) {
+ for (size_t dim = 0; dim < 3; ++dim) {
float center = 0.f;
for (size_t i = 0; i < array_geometry.size(); ++i) {
center += array_geometry[i].c[dim];
@@ -379,7 +379,7 @@ void NonlinearBeamformer::ProcessChunk(const ChannelBuffer<float>& input,
(high_pass_postfilter_mask_ - old_high_pass_mask) /
input.num_frames_per_band();
// Apply the smoothed high-pass mask to the first channel of each band.
- // This can be done because the effct of the linear beamformer is negligible
+ // This can be done because the effect of the linear beamformer is negligible
// compared to the post-filter.
for (size_t i = 1; i < input.num_bands(); ++i) {
float smoothed_mask = old_high_pass_mask;
@@ -408,13 +408,13 @@ bool NonlinearBeamformer::IsInBeam(const SphericalPointf& spherical_point) {
}
void NonlinearBeamformer::ProcessAudioBlock(const complex_f* const* input,
- int num_input_channels,
+ size_t num_input_channels,
size_t num_freq_bins,
- int num_output_channels,
+ size_t num_output_channels,
complex_f* const* output) {
- RTC_CHECK_EQ(num_freq_bins, kNumFreqBins);
- RTC_CHECK_EQ(num_input_channels, num_input_channels_);
- RTC_CHECK_EQ(num_output_channels, 1);
+ RTC_CHECK_EQ(kNumFreqBins, num_freq_bins);
+ RTC_CHECK_EQ(num_input_channels_, num_input_channels);
+ RTC_CHECK_EQ(1u, num_output_channels);
// Calculating the post-filter masks. Note that we need two for each
// frequency bin to account for the positive and negative interferer
@@ -483,7 +483,7 @@ void NonlinearBeamformer::ApplyMasks(const complex_f* const* input,
const complex_f* delay_sum_mask_els =
normalized_delay_sum_masks_[f_ix].elements()[0];
- for (int c_ix = 0; c_ix < num_input_channels_; ++c_ix) {
+ for (size_t c_ix = 0; c_ix < num_input_channels_; ++c_ix) {
output_channel[f_ix] += input[c_ix][f_ix] * delay_sum_mask_els[c_ix];
}
diff --git a/webrtc/modules/audio_processing/beamformer/nonlinear_beamformer.h b/webrtc/modules/audio_processing/beamformer/nonlinear_beamformer.h
index 565c1f349f..29c416ca91 100644
--- a/webrtc/modules/audio_processing/beamformer/nonlinear_beamformer.h
+++ b/webrtc/modules/audio_processing/beamformer/nonlinear_beamformer.h
@@ -67,9 +67,9 @@ class NonlinearBeamformer
// Process one frequency-domain block of audio. This is where the fun
// happens. Implements LappedTransform::Callback.
void ProcessAudioBlock(const complex<float>* const* input,
- int num_input_channels,
+ size_t num_input_channels,
size_t num_freq_bins,
- int num_output_channels,
+ size_t num_output_channels,
complex<float>* const* output) override;
private:
@@ -129,12 +129,12 @@ class NonlinearBeamformer
float window_[kFftSize];
// Parameters exposed to the user.
- const int num_input_channels_;
+ const size_t num_input_channels_;
int sample_rate_hz_;
const std::vector<Point> array_geometry_;
// The normal direction of the array if it has one and it is in the xy-plane.
- const rtc::Maybe<Point> array_normal_;
+ const rtc::Optional<Point> array_normal_;
// Minimum spacing between microphone pairs.
const float min_mic_spacing_;
diff --git a/webrtc/modules/audio_processing/beamformer/nonlinear_beamformer_test.cc b/webrtc/modules/audio_processing/beamformer/nonlinear_beamformer_test.cc
index cc752485e9..d187552692 100644
--- a/webrtc/modules/audio_processing/beamformer/nonlinear_beamformer_test.cc
+++ b/webrtc/modules/audio_processing/beamformer/nonlinear_beamformer_test.cc
@@ -12,6 +12,7 @@
#include "gflags/gflags.h"
#include "webrtc/base/checks.h"
+#include "webrtc/base/format_macros.h"
#include "webrtc/common_audio/channel_buffer.h"
#include "webrtc/common_audio/wav_file.h"
#include "webrtc/modules/audio_processing/beamformer/nonlinear_beamformer.h"
@@ -52,9 +53,9 @@ int main(int argc, char* argv[]) {
NonlinearBeamformer bf(array_geometry);
bf.Initialize(kChunkSizeMs, in_file.sample_rate());
- printf("Input file: %s\nChannels: %d, Sample rate: %d Hz\n\n",
+ printf("Input file: %s\nChannels: %" PRIuS ", Sample rate: %d Hz\n\n",
FLAGS_i.c_str(), in_file.num_channels(), in_file.sample_rate());
- printf("Output file: %s\nChannels: %d, Sample rate: %d Hz\n\n",
+ printf("Output file: %s\nChannels: %" PRIuS ", Sample rate: %d Hz\n\n",
FLAGS_o.c_str(), out_file.num_channels(), out_file.sample_rate());
ChannelBuffer<float> in_buf(
diff --git a/webrtc/modules/audio_processing/common.h b/webrtc/modules/audio_processing/common.h
index ed8a0544c3..d4ddb92b50 100644
--- a/webrtc/modules/audio_processing/common.h
+++ b/webrtc/modules/audio_processing/common.h
@@ -17,7 +17,7 @@
namespace webrtc {
-static inline int ChannelsFromLayout(AudioProcessing::ChannelLayout layout) {
+static inline size_t ChannelsFromLayout(AudioProcessing::ChannelLayout layout) {
switch (layout) {
case AudioProcessing::kMono:
case AudioProcessing::kMonoAndKeyboard:
@@ -27,7 +27,7 @@ static inline int ChannelsFromLayout(AudioProcessing::ChannelLayout layout) {
return 2;
}
assert(false);
- return -1;
+ return 0;
}
} // namespace webrtc
diff --git a/webrtc/modules/audio_processing/echo_cancellation_impl.cc b/webrtc/modules/audio_processing/echo_cancellation_impl.cc
index 56ee9e0fff..debc597c54 100644
--- a/webrtc/modules/audio_processing/echo_cancellation_impl.cc
+++ b/webrtc/modules/audio_processing/echo_cancellation_impl.cc
@@ -16,9 +16,8 @@
extern "C" {
#include "webrtc/modules/audio_processing/aec/aec_core.h"
}
-#include "webrtc/modules/audio_processing/aec/include/echo_cancellation.h"
+#include "webrtc/modules/audio_processing/aec/echo_cancellation.h"
#include "webrtc/modules/audio_processing/audio_buffer.h"
-#include "webrtc/system_wrappers/include/critical_section_wrapper.h"
namespace webrtc {
@@ -53,13 +52,22 @@ AudioProcessing::Error MapError(int err) {
return AudioProcessing::kUnspecifiedError;
}
}
+
+// Maximum length that a frame of samples can have.
+static const size_t kMaxAllowedValuesOfSamplesPerFrame = 160;
+// Maximum number of frames to buffer in the render queue.
+// TODO(peah): Decrease this once we properly handle hugely unbalanced
+// reverse and forward call numbers.
+static const size_t kMaxNumFramesToBuffer = 100;
} // namespace
EchoCancellationImpl::EchoCancellationImpl(const AudioProcessing* apm,
- CriticalSectionWrapper* crit)
+ rtc::CriticalSection* crit_render,
+ rtc::CriticalSection* crit_capture)
: ProcessingComponent(),
apm_(apm),
- crit_(crit),
+ crit_render_(crit_render),
+ crit_capture_(crit_capture),
drift_compensation_enabled_(false),
metrics_enabled_(false),
suppression_level_(kModerateSuppression),
@@ -68,87 +76,131 @@ EchoCancellationImpl::EchoCancellationImpl(const AudioProcessing* apm,
stream_has_echo_(false),
delay_logging_enabled_(false),
extended_filter_enabled_(false),
- delay_agnostic_enabled_(false) {
+ delay_agnostic_enabled_(false),
+ render_queue_element_max_size_(0) {
+ RTC_DCHECK(apm);
+ RTC_DCHECK(crit_render);
+ RTC_DCHECK(crit_capture);
}
EchoCancellationImpl::~EchoCancellationImpl() {}
int EchoCancellationImpl::ProcessRenderAudio(const AudioBuffer* audio) {
+ rtc::CritScope cs_render(crit_render_);
if (!is_component_enabled()) {
- return apm_->kNoError;
+ return AudioProcessing::kNoError;
}
assert(audio->num_frames_per_band() <= 160);
assert(audio->num_channels() == apm_->num_reverse_channels());
- int err = apm_->kNoError;
+ int err = AudioProcessing::kNoError;
// The ordering convention must be followed to pass to the correct AEC.
size_t handle_index = 0;
- for (int i = 0; i < apm_->num_output_channels(); i++) {
- for (int j = 0; j < audio->num_channels(); j++) {
+ render_queue_buffer_.clear();
+ for (size_t i = 0; i < apm_->num_output_channels(); i++) {
+ for (size_t j = 0; j < audio->num_channels(); j++) {
Handle* my_handle = static_cast<Handle*>(handle(handle_index));
- err = WebRtcAec_BufferFarend(
- my_handle,
- audio->split_bands_const_f(j)[kBand0To8kHz],
+ // Retrieve any error code produced by the buffering of the farend
+ // signal
+ err = WebRtcAec_GetBufferFarendError(
+ my_handle, audio->split_bands_const_f(j)[kBand0To8kHz],
audio->num_frames_per_band());
- if (err != apm_->kNoError) {
- return GetHandleError(my_handle); // TODO(ajm): warning possible?
+ if (err != AudioProcessing::kNoError) {
+ return MapError(err); // TODO(ajm): warning possible?
}
- handle_index++;
+ // Buffer the samples in the render queue.
+ render_queue_buffer_.insert(render_queue_buffer_.end(),
+ audio->split_bands_const_f(j)[kBand0To8kHz],
+ (audio->split_bands_const_f(j)[kBand0To8kHz] +
+ audio->num_frames_per_band()));
}
}
- return apm_->kNoError;
+ // Insert the samples into the queue.
+ if (!render_signal_queue_->Insert(&render_queue_buffer_)) {
+ // The data queue is full and needs to be emptied.
+ ReadQueuedRenderData();
+
+ // Retry the insert (should always work).
+ RTC_DCHECK_EQ(render_signal_queue_->Insert(&render_queue_buffer_), true);
+ }
+
+ return AudioProcessing::kNoError;
+}
+
+// Read chunks of data that were received and queued on the render side from
+// a queue. All the data chunks are buffered into the farend signal of the AEC.
+void EchoCancellationImpl::ReadQueuedRenderData() {
+ rtc::CritScope cs_capture(crit_capture_);
+ if (!is_component_enabled()) {
+ return;
+ }
+
+ while (render_signal_queue_->Remove(&capture_queue_buffer_)) {
+ size_t handle_index = 0;
+ size_t buffer_index = 0;
+ const size_t num_frames_per_band =
+ capture_queue_buffer_.size() /
+ (apm_->num_output_channels() * apm_->num_reverse_channels());
+ for (size_t i = 0; i < apm_->num_output_channels(); i++) {
+ for (size_t j = 0; j < apm_->num_reverse_channels(); j++) {
+ Handle* my_handle = static_cast<Handle*>(handle(handle_index));
+ WebRtcAec_BufferFarend(my_handle, &capture_queue_buffer_[buffer_index],
+ num_frames_per_band);
+
+ buffer_index += num_frames_per_band;
+ handle_index++;
+ }
+ }
+ }
}
int EchoCancellationImpl::ProcessCaptureAudio(AudioBuffer* audio) {
+ rtc::CritScope cs_capture(crit_capture_);
if (!is_component_enabled()) {
- return apm_->kNoError;
+ return AudioProcessing::kNoError;
}
if (!apm_->was_stream_delay_set()) {
- return apm_->kStreamParameterNotSetError;
+ return AudioProcessing::kStreamParameterNotSetError;
}
if (drift_compensation_enabled_ && !was_stream_drift_set_) {
- return apm_->kStreamParameterNotSetError;
+ return AudioProcessing::kStreamParameterNotSetError;
}
assert(audio->num_frames_per_band() <= 160);
- assert(audio->num_channels() == apm_->num_output_channels());
+ assert(audio->num_channels() == apm_->num_proc_channels());
- int err = apm_->kNoError;
+ int err = AudioProcessing::kNoError;
// The ordering convention must be followed to pass to the correct AEC.
size_t handle_index = 0;
stream_has_echo_ = false;
- for (int i = 0; i < audio->num_channels(); i++) {
- for (int j = 0; j < apm_->num_reverse_channels(); j++) {
+ for (size_t i = 0; i < audio->num_channels(); i++) {
+ for (size_t j = 0; j < apm_->num_reverse_channels(); j++) {
Handle* my_handle = handle(handle_index);
- err = WebRtcAec_Process(
- my_handle,
- audio->split_bands_const_f(i),
- audio->num_bands(),
- audio->split_bands_f(i),
- audio->num_frames_per_band(),
- apm_->stream_delay_ms(),
- stream_drift_samples_);
-
- if (err != apm_->kNoError) {
- err = GetHandleError(my_handle);
+ err = WebRtcAec_Process(my_handle, audio->split_bands_const_f(i),
+ audio->num_bands(), audio->split_bands_f(i),
+ audio->num_frames_per_band(),
+ apm_->stream_delay_ms(), stream_drift_samples_);
+
+ if (err != AudioProcessing::kNoError) {
+ err = MapError(err);
// TODO(ajm): Figure out how to return warnings properly.
- if (err != apm_->kBadStreamParameterWarning) {
+ if (err != AudioProcessing::kBadStreamParameterWarning) {
return err;
}
}
int status = 0;
err = WebRtcAec_get_echo_status(my_handle, &status);
- if (err != apm_->kNoError) {
- return GetHandleError(my_handle);
+ if (err != AudioProcessing::kNoError) {
+ return MapError(err);
}
if (status == 1) {
@@ -160,77 +212,92 @@ int EchoCancellationImpl::ProcessCaptureAudio(AudioBuffer* audio) {
}
was_stream_drift_set_ = false;
- return apm_->kNoError;
+ return AudioProcessing::kNoError;
}
int EchoCancellationImpl::Enable(bool enable) {
- CriticalSectionScoped crit_scoped(crit_);
+ // Run in a single-threaded manner.
+ rtc::CritScope cs_render(crit_render_);
+ rtc::CritScope cs_capture(crit_capture_);
// Ensure AEC and AECM are not both enabled.
+ // The is_enabled call is safe from a deadlock perspective
+ // as both locks are already held in the correct order.
if (enable && apm_->echo_control_mobile()->is_enabled()) {
- return apm_->kBadParameterError;
+ return AudioProcessing::kBadParameterError;
}
return EnableComponent(enable);
}
bool EchoCancellationImpl::is_enabled() const {
+ rtc::CritScope cs(crit_capture_);
return is_component_enabled();
}
int EchoCancellationImpl::set_suppression_level(SuppressionLevel level) {
- CriticalSectionScoped crit_scoped(crit_);
- if (MapSetting(level) == -1) {
- return apm_->kBadParameterError;
+ {
+ if (MapSetting(level) == -1) {
+ return AudioProcessing::kBadParameterError;
+ }
+ rtc::CritScope cs(crit_capture_);
+ suppression_level_ = level;
}
-
- suppression_level_ = level;
return Configure();
}
EchoCancellation::SuppressionLevel EchoCancellationImpl::suppression_level()
const {
+ rtc::CritScope cs(crit_capture_);
return suppression_level_;
}
int EchoCancellationImpl::enable_drift_compensation(bool enable) {
- CriticalSectionScoped crit_scoped(crit_);
- drift_compensation_enabled_ = enable;
+ {
+ rtc::CritScope cs(crit_capture_);
+ drift_compensation_enabled_ = enable;
+ }
return Configure();
}
bool EchoCancellationImpl::is_drift_compensation_enabled() const {
+ rtc::CritScope cs(crit_capture_);
return drift_compensation_enabled_;
}
void EchoCancellationImpl::set_stream_drift_samples(int drift) {
+ rtc::CritScope cs(crit_capture_);
was_stream_drift_set_ = true;
stream_drift_samples_ = drift;
}
int EchoCancellationImpl::stream_drift_samples() const {
+ rtc::CritScope cs(crit_capture_);
return stream_drift_samples_;
}
int EchoCancellationImpl::enable_metrics(bool enable) {
- CriticalSectionScoped crit_scoped(crit_);
- metrics_enabled_ = enable;
+ {
+ rtc::CritScope cs(crit_capture_);
+ metrics_enabled_ = enable;
+ }
return Configure();
}
bool EchoCancellationImpl::are_metrics_enabled() const {
+ rtc::CritScope cs(crit_capture_);
return metrics_enabled_;
}
// TODO(ajm): we currently just use the metrics from the first AEC. Think more
// aboue the best way to extend this to multi-channel.
int EchoCancellationImpl::GetMetrics(Metrics* metrics) {
- CriticalSectionScoped crit_scoped(crit_);
+ rtc::CritScope cs(crit_capture_);
if (metrics == NULL) {
- return apm_->kNullPointerError;
+ return AudioProcessing::kNullPointerError;
}
if (!is_component_enabled() || !metrics_enabled_) {
- return apm_->kNotEnabledError;
+ return AudioProcessing::kNotEnabledError;
}
AecMetrics my_metrics;
@@ -239,8 +306,8 @@ int EchoCancellationImpl::GetMetrics(Metrics* metrics) {
Handle* my_handle = static_cast<Handle*>(handle(0));
int err = WebRtcAec_GetMetrics(my_handle, &my_metrics);
- if (err != apm_->kNoError) {
- return GetHandleError(my_handle);
+ if (err != AudioProcessing::kNoError) {
+ return MapError(err);
}
metrics->residual_echo_return_loss.instant = my_metrics.rerl.instant;
@@ -263,62 +330,70 @@ int EchoCancellationImpl::GetMetrics(Metrics* metrics) {
metrics->a_nlp.maximum = my_metrics.aNlp.max;
metrics->a_nlp.minimum = my_metrics.aNlp.min;
- return apm_->kNoError;
+ return AudioProcessing::kNoError;
}
bool EchoCancellationImpl::stream_has_echo() const {
+ rtc::CritScope cs(crit_capture_);
return stream_has_echo_;
}
int EchoCancellationImpl::enable_delay_logging(bool enable) {
- CriticalSectionScoped crit_scoped(crit_);
- delay_logging_enabled_ = enable;
+ {
+ rtc::CritScope cs(crit_capture_);
+ delay_logging_enabled_ = enable;
+ }
return Configure();
}
bool EchoCancellationImpl::is_delay_logging_enabled() const {
+ rtc::CritScope cs(crit_capture_);
return delay_logging_enabled_;
}
bool EchoCancellationImpl::is_delay_agnostic_enabled() const {
+ rtc::CritScope cs(crit_capture_);
return delay_agnostic_enabled_;
}
bool EchoCancellationImpl::is_extended_filter_enabled() const {
+ rtc::CritScope cs(crit_capture_);
return extended_filter_enabled_;
}
// TODO(bjornv): How should we handle the multi-channel case?
int EchoCancellationImpl::GetDelayMetrics(int* median, int* std) {
+ rtc::CritScope cs(crit_capture_);
float fraction_poor_delays = 0;
return GetDelayMetrics(median, std, &fraction_poor_delays);
}
int EchoCancellationImpl::GetDelayMetrics(int* median, int* std,
float* fraction_poor_delays) {
- CriticalSectionScoped crit_scoped(crit_);
+ rtc::CritScope cs(crit_capture_);
if (median == NULL) {
- return apm_->kNullPointerError;
+ return AudioProcessing::kNullPointerError;
}
if (std == NULL) {
- return apm_->kNullPointerError;
+ return AudioProcessing::kNullPointerError;
}
if (!is_component_enabled() || !delay_logging_enabled_) {
- return apm_->kNotEnabledError;
+ return AudioProcessing::kNotEnabledError;
}
Handle* my_handle = static_cast<Handle*>(handle(0));
- if (WebRtcAec_GetDelayMetrics(my_handle, median, std, fraction_poor_delays) !=
- apm_->kNoError) {
- return GetHandleError(my_handle);
+ const int err =
+ WebRtcAec_GetDelayMetrics(my_handle, median, std, fraction_poor_delays);
+ if (err != AudioProcessing::kNoError) {
+ return MapError(err);
}
- return apm_->kNoError;
+ return AudioProcessing::kNoError;
}
struct AecCore* EchoCancellationImpl::aec_core() const {
- CriticalSectionScoped crit_scoped(crit_);
+ rtc::CritScope cs(crit_capture_);
if (!is_component_enabled()) {
return NULL;
}
@@ -328,16 +403,51 @@ struct AecCore* EchoCancellationImpl::aec_core() const {
int EchoCancellationImpl::Initialize() {
int err = ProcessingComponent::Initialize();
- if (err != apm_->kNoError || !is_component_enabled()) {
- return err;
+ {
+ rtc::CritScope cs(crit_capture_);
+ if (err != AudioProcessing::kNoError || !is_component_enabled()) {
+ return err;
+ }
}
- return apm_->kNoError;
+ AllocateRenderQueue();
+
+ return AudioProcessing::kNoError;
+}
+
+void EchoCancellationImpl::AllocateRenderQueue() {
+ const size_t new_render_queue_element_max_size = std::max<size_t>(
+ static_cast<size_t>(1),
+ kMaxAllowedValuesOfSamplesPerFrame * num_handles_required());
+
+ rtc::CritScope cs_render(crit_render_);
+ rtc::CritScope cs_capture(crit_capture_);
+
+ // Reallocate the queue if the queue item size is too small to fit the
+ // data to put in the queue.
+ if (render_queue_element_max_size_ < new_render_queue_element_max_size) {
+ render_queue_element_max_size_ = new_render_queue_element_max_size;
+
+ std::vector<float> template_queue_element(render_queue_element_max_size_);
+
+ render_signal_queue_.reset(
+ new SwapQueue<std::vector<float>, RenderQueueItemVerifier<float>>(
+ kMaxNumFramesToBuffer, template_queue_element,
+ RenderQueueItemVerifier<float>(render_queue_element_max_size_)));
+
+ render_queue_buffer_.resize(render_queue_element_max_size_);
+ capture_queue_buffer_.resize(render_queue_element_max_size_);
+ } else {
+ render_signal_queue_->Clear();
+ }
}
void EchoCancellationImpl::SetExtraOptions(const Config& config) {
- extended_filter_enabled_ = config.Get<ExtendedFilter>().enabled;
- delay_agnostic_enabled_ = config.Get<DelayAgnostic>().enabled;
+ {
+ rtc::CritScope cs(crit_capture_);
+ extended_filter_enabled_ = config.Get<ExtendedFilter>().enabled;
+ delay_agnostic_enabled_ = config.Get<DelayAgnostic>().enabled;
+ }
Configure();
}
@@ -351,23 +461,25 @@ void EchoCancellationImpl::DestroyHandle(void* handle) const {
}
int EchoCancellationImpl::InitializeHandle(void* handle) const {
+ // Not locked as it only relies on APM public API which is threadsafe.
+
assert(handle != NULL);
// TODO(ajm): Drift compensation is disabled in practice. If restored, it
// should be managed internally and not depend on the hardware sample rate.
// For now, just hardcode a 48 kHz value.
return WebRtcAec_Init(static_cast<Handle*>(handle),
- apm_->proc_sample_rate_hz(),
- 48000);
+ apm_->proc_sample_rate_hz(), 48000);
}
int EchoCancellationImpl::ConfigureHandle(void* handle) const {
+ rtc::CritScope cs_render(crit_render_);
+ rtc::CritScope cs_capture(crit_capture_);
assert(handle != NULL);
AecConfig config;
config.metricsMode = metrics_enabled_;
config.nlpMode = MapSetting(suppression_level_);
config.skewMode = drift_compensation_enabled_;
config.delay_logging = delay_logging_enabled_;
-
WebRtcAec_enable_extended_filter(
WebRtcAec_aec_core(static_cast<Handle*>(handle)),
extended_filter_enabled_ ? 1 : 0);
@@ -377,13 +489,14 @@ int EchoCancellationImpl::ConfigureHandle(void* handle) const {
return WebRtcAec_set_config(static_cast<Handle*>(handle), config);
}
-int EchoCancellationImpl::num_handles_required() const {
- return apm_->num_output_channels() *
- apm_->num_reverse_channels();
+size_t EchoCancellationImpl::num_handles_required() const {
+ // Not locked as it only relies on APM public API which is threadsafe.
+ return apm_->num_output_channels() * apm_->num_reverse_channels();
}
int EchoCancellationImpl::GetHandleError(void* handle) const {
+ // Not locked as it does not rely on anything in the state.
assert(handle != NULL);
- return MapError(WebRtcAec_get_error_code(static_cast<Handle*>(handle)));
+ return AudioProcessing::kUnspecifiedError;
}
} // namespace webrtc
diff --git a/webrtc/modules/audio_processing/echo_cancellation_impl.h b/webrtc/modules/audio_processing/echo_cancellation_impl.h
index 070dcabc5d..a40a267e32 100644
--- a/webrtc/modules/audio_processing/echo_cancellation_impl.h
+++ b/webrtc/modules/audio_processing/echo_cancellation_impl.h
@@ -11,19 +11,22 @@
#ifndef WEBRTC_MODULES_AUDIO_PROCESSING_ECHO_CANCELLATION_IMPL_H_
#define WEBRTC_MODULES_AUDIO_PROCESSING_ECHO_CANCELLATION_IMPL_H_
+#include "webrtc/base/criticalsection.h"
+#include "webrtc/base/scoped_ptr.h"
+#include "webrtc/common_audio/swap_queue.h"
#include "webrtc/modules/audio_processing/include/audio_processing.h"
#include "webrtc/modules/audio_processing/processing_component.h"
namespace webrtc {
class AudioBuffer;
-class CriticalSectionWrapper;
class EchoCancellationImpl : public EchoCancellation,
public ProcessingComponent {
public:
EchoCancellationImpl(const AudioProcessing* apm,
- CriticalSectionWrapper* crit);
+ rtc::CriticalSection* crit_render,
+ rtc::CriticalSection* crit_capture);
virtual ~EchoCancellationImpl();
int ProcessRenderAudio(const AudioBuffer* audio);
@@ -38,10 +41,13 @@ class EchoCancellationImpl : public EchoCancellation,
// ProcessingComponent implementation.
int Initialize() override;
void SetExtraOptions(const Config& config) override;
-
bool is_delay_agnostic_enabled() const;
bool is_extended_filter_enabled() const;
+ // Reads render side data that has been queued on the render call.
+ // Called holding the capture lock.
+ void ReadQueuedRenderData();
+
private:
// EchoCancellation implementation.
int Enable(bool enable) override;
@@ -58,6 +64,7 @@ class EchoCancellationImpl : public EchoCancellation,
int GetDelayMetrics(int* median,
int* std,
float* fraction_poor_delays) override;
+
struct AecCore* aec_core() const override;
// ProcessingComponent implementation.
@@ -65,20 +72,35 @@ class EchoCancellationImpl : public EchoCancellation,
int InitializeHandle(void* handle) const override;
int ConfigureHandle(void* handle) const override;
void DestroyHandle(void* handle) const override;
- int num_handles_required() const override;
+ size_t num_handles_required() const override;
int GetHandleError(void* handle) const override;
+ void AllocateRenderQueue();
+
+ // Not guarded as its public API is thread safe.
const AudioProcessing* apm_;
- CriticalSectionWrapper* crit_;
- bool drift_compensation_enabled_;
- bool metrics_enabled_;
- SuppressionLevel suppression_level_;
- int stream_drift_samples_;
- bool was_stream_drift_set_;
- bool stream_has_echo_;
- bool delay_logging_enabled_;
- bool extended_filter_enabled_;
- bool delay_agnostic_enabled_;
+
+ rtc::CriticalSection* const crit_render_ ACQUIRED_BEFORE(crit_capture_);
+ rtc::CriticalSection* const crit_capture_;
+
+ bool drift_compensation_enabled_ GUARDED_BY(crit_capture_);
+ bool metrics_enabled_ GUARDED_BY(crit_capture_);
+ SuppressionLevel suppression_level_ GUARDED_BY(crit_capture_);
+ int stream_drift_samples_ GUARDED_BY(crit_capture_);
+ bool was_stream_drift_set_ GUARDED_BY(crit_capture_);
+ bool stream_has_echo_ GUARDED_BY(crit_capture_);
+ bool delay_logging_enabled_ GUARDED_BY(crit_capture_);
+ bool extended_filter_enabled_ GUARDED_BY(crit_capture_);
+ bool delay_agnostic_enabled_ GUARDED_BY(crit_capture_);
+
+ size_t render_queue_element_max_size_ GUARDED_BY(crit_render_)
+ GUARDED_BY(crit_capture_);
+ std::vector<float> render_queue_buffer_ GUARDED_BY(crit_render_);
+ std::vector<float> capture_queue_buffer_ GUARDED_BY(crit_capture_);
+
+ // Lock protection not needed.
+ rtc::scoped_ptr<SwapQueue<std::vector<float>, RenderQueueItemVerifier<float>>>
+ render_signal_queue_;
};
} // namespace webrtc
diff --git a/webrtc/modules/audio_processing/echo_cancellation_impl_unittest.cc b/webrtc/modules/audio_processing/echo_cancellation_impl_unittest.cc
index b2e11981fa..7f152bf942 100644
--- a/webrtc/modules/audio_processing/echo_cancellation_impl_unittest.cc
+++ b/webrtc/modules/audio_processing/echo_cancellation_impl_unittest.cc
@@ -14,7 +14,6 @@ extern "C" {
#include "webrtc/modules/audio_processing/aec/aec_core.h"
}
#include "webrtc/modules/audio_processing/include/audio_processing.h"
-#include "webrtc/test/testsupport/gtest_disable.h"
namespace webrtc {
diff --git a/webrtc/modules/audio_processing/echo_control_mobile_impl.cc b/webrtc/modules/audio_processing/echo_control_mobile_impl.cc
index 954aac7d4a..f2df5f7984 100644
--- a/webrtc/modules/audio_processing/echo_control_mobile_impl.cc
+++ b/webrtc/modules/audio_processing/echo_control_mobile_impl.cc
@@ -13,9 +13,8 @@
#include <assert.h>
#include <string.h>
-#include "webrtc/modules/audio_processing/aecm/include/echo_control_mobile.h"
+#include "webrtc/modules/audio_processing/aecm/echo_control_mobile.h"
#include "webrtc/modules/audio_processing/audio_buffer.h"
-#include "webrtc/system_wrappers/include/critical_section_wrapper.h"
#include "webrtc/system_wrappers/include/logging.h"
namespace webrtc {
@@ -56,6 +55,12 @@ AudioProcessing::Error MapError(int err) {
return AudioProcessing::kUnspecifiedError;
}
}
+// Maximum length that a frame of samples can have.
+static const size_t kMaxAllowedValuesOfSamplesPerFrame = 160;
+// Maximum number of frames to buffer in the render queue.
+// TODO(peah): Decrease this once we properly handle hugely unbalanced
+// reverse and forward call numbers.
+static const size_t kMaxNumFramesToBuffer = 100;
} // namespace
size_t EchoControlMobile::echo_path_size_bytes() {
@@ -63,13 +68,20 @@ size_t EchoControlMobile::echo_path_size_bytes() {
}
EchoControlMobileImpl::EchoControlMobileImpl(const AudioProcessing* apm,
- CriticalSectionWrapper* crit)
- : ProcessingComponent(),
- apm_(apm),
- crit_(crit),
- routing_mode_(kSpeakerphone),
- comfort_noise_enabled_(true),
- external_echo_path_(NULL) {}
+ rtc::CriticalSection* crit_render,
+ rtc::CriticalSection* crit_capture)
+ : ProcessingComponent(),
+ apm_(apm),
+ crit_render_(crit_render),
+ crit_capture_(crit_capture),
+ routing_mode_(kSpeakerphone),
+ comfort_noise_enabled_(true),
+ external_echo_path_(NULL),
+ render_queue_element_max_size_(0) {
+ RTC_DCHECK(apm);
+ RTC_DCHECK(crit_render);
+ RTC_DCHECK(crit_capture);
+}
EchoControlMobileImpl::~EchoControlMobileImpl() {
if (external_echo_path_ != NULL) {
@@ -79,53 +91,98 @@ EchoControlMobileImpl::~EchoControlMobileImpl() {
}
int EchoControlMobileImpl::ProcessRenderAudio(const AudioBuffer* audio) {
+ rtc::CritScope cs_render(crit_render_);
+
if (!is_component_enabled()) {
- return apm_->kNoError;
+ return AudioProcessing::kNoError;
}
assert(audio->num_frames_per_band() <= 160);
assert(audio->num_channels() == apm_->num_reverse_channels());
- int err = apm_->kNoError;
-
+ int err = AudioProcessing::kNoError;
// The ordering convention must be followed to pass to the correct AECM.
size_t handle_index = 0;
- for (int i = 0; i < apm_->num_output_channels(); i++) {
- for (int j = 0; j < audio->num_channels(); j++) {
+ render_queue_buffer_.clear();
+ for (size_t i = 0; i < apm_->num_output_channels(); i++) {
+ for (size_t j = 0; j < audio->num_channels(); j++) {
Handle* my_handle = static_cast<Handle*>(handle(handle_index));
- err = WebRtcAecm_BufferFarend(
- my_handle,
- audio->split_bands_const(j)[kBand0To8kHz],
+ err = WebRtcAecm_GetBufferFarendError(
+ my_handle, audio->split_bands_const(j)[kBand0To8kHz],
audio->num_frames_per_band());
- if (err != apm_->kNoError) {
- return GetHandleError(my_handle); // TODO(ajm): warning possible?
- }
+ if (err != AudioProcessing::kNoError)
+ return MapError(err); // TODO(ajm): warning possible?);
+
+ // Buffer the samples in the render queue.
+ render_queue_buffer_.insert(render_queue_buffer_.end(),
+ audio->split_bands_const(j)[kBand0To8kHz],
+ (audio->split_bands_const(j)[kBand0To8kHz] +
+ audio->num_frames_per_band()));
handle_index++;
}
}
- return apm_->kNoError;
+ // Insert the samples into the queue.
+ if (!render_signal_queue_->Insert(&render_queue_buffer_)) {
+ // The data queue is full and needs to be emptied.
+ ReadQueuedRenderData();
+
+ // Retry the insert (should always work).
+ RTC_DCHECK_EQ(render_signal_queue_->Insert(&render_queue_buffer_), true);
+ }
+
+ return AudioProcessing::kNoError;
+}
+
+// Read chunks of data that were received and queued on the render side from
+// a queue. All the data chunks are buffered into the farend signal of the AEC.
+void EchoControlMobileImpl::ReadQueuedRenderData() {
+ rtc::CritScope cs_capture(crit_capture_);
+
+ if (!is_component_enabled()) {
+ return;
+ }
+
+ while (render_signal_queue_->Remove(&capture_queue_buffer_)) {
+ size_t handle_index = 0;
+ size_t buffer_index = 0;
+ const size_t num_frames_per_band =
+ capture_queue_buffer_.size() /
+ (apm_->num_output_channels() * apm_->num_reverse_channels());
+ for (size_t i = 0; i < apm_->num_output_channels(); i++) {
+ for (size_t j = 0; j < apm_->num_reverse_channels(); j++) {
+ Handle* my_handle = static_cast<Handle*>(handle(handle_index));
+ WebRtcAecm_BufferFarend(my_handle, &capture_queue_buffer_[buffer_index],
+ num_frames_per_band);
+
+ buffer_index += num_frames_per_band;
+ handle_index++;
+ }
+ }
+ }
}
int EchoControlMobileImpl::ProcessCaptureAudio(AudioBuffer* audio) {
+ rtc::CritScope cs_capture(crit_capture_);
+
if (!is_component_enabled()) {
- return apm_->kNoError;
+ return AudioProcessing::kNoError;
}
if (!apm_->was_stream_delay_set()) {
- return apm_->kStreamParameterNotSetError;
+ return AudioProcessing::kStreamParameterNotSetError;
}
assert(audio->num_frames_per_band() <= 160);
assert(audio->num_channels() == apm_->num_output_channels());
- int err = apm_->kNoError;
+ int err = AudioProcessing::kNoError;
// The ordering convention must be followed to pass to the correct AECM.
size_t handle_index = 0;
- for (int i = 0; i < audio->num_channels(); i++) {
+ for (size_t i = 0; i < audio->num_channels(); i++) {
// TODO(ajm): improve how this works, possibly inside AECM.
// This is kind of hacked up.
const int16_t* noisy = audio->low_pass_reference(i);
@@ -134,7 +191,7 @@ int EchoControlMobileImpl::ProcessCaptureAudio(AudioBuffer* audio) {
noisy = clean;
clean = NULL;
}
- for (int j = 0; j < apm_->num_reverse_channels(); j++) {
+ for (size_t j = 0; j < apm_->num_reverse_channels(); j++) {
Handle* my_handle = static_cast<Handle*>(handle(handle_index));
err = WebRtcAecm_Process(
my_handle,
@@ -144,109 +201,158 @@ int EchoControlMobileImpl::ProcessCaptureAudio(AudioBuffer* audio) {
audio->num_frames_per_band(),
apm_->stream_delay_ms());
- if (err != apm_->kNoError) {
- return GetHandleError(my_handle); // TODO(ajm): warning possible?
- }
+ if (err != AudioProcessing::kNoError)
+ return MapError(err);
handle_index++;
}
}
- return apm_->kNoError;
+ return AudioProcessing::kNoError;
}
int EchoControlMobileImpl::Enable(bool enable) {
- CriticalSectionScoped crit_scoped(crit_);
// Ensure AEC and AECM are not both enabled.
+ rtc::CritScope cs_render(crit_render_);
+ rtc::CritScope cs_capture(crit_capture_);
+ // The is_enabled call is safe from a deadlock perspective
+ // as both locks are allready held in the correct order.
if (enable && apm_->echo_cancellation()->is_enabled()) {
- return apm_->kBadParameterError;
+ return AudioProcessing::kBadParameterError;
}
return EnableComponent(enable);
}
bool EchoControlMobileImpl::is_enabled() const {
+ rtc::CritScope cs(crit_capture_);
return is_component_enabled();
}
int EchoControlMobileImpl::set_routing_mode(RoutingMode mode) {
- CriticalSectionScoped crit_scoped(crit_);
if (MapSetting(mode) == -1) {
- return apm_->kBadParameterError;
+ return AudioProcessing::kBadParameterError;
}
- routing_mode_ = mode;
+ {
+ rtc::CritScope cs(crit_capture_);
+ routing_mode_ = mode;
+ }
return Configure();
}
EchoControlMobile::RoutingMode EchoControlMobileImpl::routing_mode()
const {
+ rtc::CritScope cs(crit_capture_);
return routing_mode_;
}
int EchoControlMobileImpl::enable_comfort_noise(bool enable) {
- CriticalSectionScoped crit_scoped(crit_);
- comfort_noise_enabled_ = enable;
+ {
+ rtc::CritScope cs(crit_capture_);
+ comfort_noise_enabled_ = enable;
+ }
return Configure();
}
bool EchoControlMobileImpl::is_comfort_noise_enabled() const {
+ rtc::CritScope cs(crit_capture_);
return comfort_noise_enabled_;
}
int EchoControlMobileImpl::SetEchoPath(const void* echo_path,
size_t size_bytes) {
- CriticalSectionScoped crit_scoped(crit_);
- if (echo_path == NULL) {
- return apm_->kNullPointerError;
- }
- if (size_bytes != echo_path_size_bytes()) {
- // Size mismatch
- return apm_->kBadParameterError;
- }
+ {
+ rtc::CritScope cs_render(crit_render_);
+ rtc::CritScope cs_capture(crit_capture_);
+ if (echo_path == NULL) {
+ return AudioProcessing::kNullPointerError;
+ }
+ if (size_bytes != echo_path_size_bytes()) {
+ // Size mismatch
+ return AudioProcessing::kBadParameterError;
+ }
- if (external_echo_path_ == NULL) {
- external_echo_path_ = new unsigned char[size_bytes];
+ if (external_echo_path_ == NULL) {
+ external_echo_path_ = new unsigned char[size_bytes];
+ }
+ memcpy(external_echo_path_, echo_path, size_bytes);
}
- memcpy(external_echo_path_, echo_path, size_bytes);
return Initialize();
}
int EchoControlMobileImpl::GetEchoPath(void* echo_path,
size_t size_bytes) const {
- CriticalSectionScoped crit_scoped(crit_);
+ rtc::CritScope cs(crit_capture_);
if (echo_path == NULL) {
- return apm_->kNullPointerError;
+ return AudioProcessing::kNullPointerError;
}
if (size_bytes != echo_path_size_bytes()) {
// Size mismatch
- return apm_->kBadParameterError;
+ return AudioProcessing::kBadParameterError;
}
if (!is_component_enabled()) {
- return apm_->kNotEnabledError;
+ return AudioProcessing::kNotEnabledError;
}
// Get the echo path from the first channel
Handle* my_handle = static_cast<Handle*>(handle(0));
- if (WebRtcAecm_GetEchoPath(my_handle, echo_path, size_bytes) != 0) {
- return GetHandleError(my_handle);
- }
+ int32_t err = WebRtcAecm_GetEchoPath(my_handle, echo_path, size_bytes);
+ if (err != 0)
+ return MapError(err);
- return apm_->kNoError;
+ return AudioProcessing::kNoError;
}
int EchoControlMobileImpl::Initialize() {
- if (!is_component_enabled()) {
- return apm_->kNoError;
+ {
+ rtc::CritScope cs_capture(crit_capture_);
+ if (!is_component_enabled()) {
+ return AudioProcessing::kNoError;
+ }
}
- if (apm_->proc_sample_rate_hz() > apm_->kSampleRate16kHz) {
+ if (apm_->proc_sample_rate_hz() > AudioProcessing::kSampleRate16kHz) {
LOG(LS_ERROR) << "AECM only supports 16 kHz or lower sample rates";
- return apm_->kBadSampleRateError;
+ return AudioProcessing::kBadSampleRateError;
}
- return ProcessingComponent::Initialize();
+ int err = ProcessingComponent::Initialize();
+ if (err != AudioProcessing::kNoError) {
+ return err;
+ }
+
+ AllocateRenderQueue();
+
+ return AudioProcessing::kNoError;
+}
+
+void EchoControlMobileImpl::AllocateRenderQueue() {
+ const size_t new_render_queue_element_max_size = std::max<size_t>(
+ static_cast<size_t>(1),
+ kMaxAllowedValuesOfSamplesPerFrame * num_handles_required());
+
+ rtc::CritScope cs_render(crit_render_);
+ rtc::CritScope cs_capture(crit_capture_);
+
+ // Reallocate the queue if the queue item size is too small to fit the
+ // data to put in the queue.
+ if (render_queue_element_max_size_ < new_render_queue_element_max_size) {
+ render_queue_element_max_size_ = new_render_queue_element_max_size;
+
+ std::vector<int16_t> template_queue_element(render_queue_element_max_size_);
+
+ render_signal_queue_.reset(
+ new SwapQueue<std::vector<int16_t>, RenderQueueItemVerifier<int16_t>>(
+ kMaxNumFramesToBuffer, template_queue_element,
+ RenderQueueItemVerifier<int16_t>(render_queue_element_max_size_)));
+
+ render_queue_buffer_.resize(render_queue_element_max_size_);
+ capture_queue_buffer_.resize(render_queue_element_max_size_);
+ } else {
+ render_signal_queue_->Clear();
+ }
}
void* EchoControlMobileImpl::CreateHandle() const {
@@ -254,10 +360,14 @@ void* EchoControlMobileImpl::CreateHandle() const {
}
void EchoControlMobileImpl::DestroyHandle(void* handle) const {
+ // This method is only called in a non-concurrent manner during APM
+ // destruction.
WebRtcAecm_Free(static_cast<Handle*>(handle));
}
int EchoControlMobileImpl::InitializeHandle(void* handle) const {
+ rtc::CritScope cs_render(crit_render_);
+ rtc::CritScope cs_capture(crit_capture_);
assert(handle != NULL);
Handle* my_handle = static_cast<Handle*>(handle);
if (WebRtcAecm_Init(my_handle, apm_->proc_sample_rate_hz()) != 0) {
@@ -271,10 +381,12 @@ int EchoControlMobileImpl::InitializeHandle(void* handle) const {
}
}
- return apm_->kNoError;
+ return AudioProcessing::kNoError;
}
int EchoControlMobileImpl::ConfigureHandle(void* handle) const {
+ rtc::CritScope cs_render(crit_render_);
+ rtc::CritScope cs_capture(crit_capture_);
AecmConfig config;
config.cngMode = comfort_noise_enabled_;
config.echoMode = MapSetting(routing_mode_);
@@ -282,13 +394,14 @@ int EchoControlMobileImpl::ConfigureHandle(void* handle) const {
return WebRtcAecm_set_config(static_cast<Handle*>(handle), config);
}
-int EchoControlMobileImpl::num_handles_required() const {
- return apm_->num_output_channels() *
- apm_->num_reverse_channels();
+size_t EchoControlMobileImpl::num_handles_required() const {
+ // Not locked as it only relies on APM public API which is threadsafe.
+ return apm_->num_output_channels() * apm_->num_reverse_channels();
}
int EchoControlMobileImpl::GetHandleError(void* handle) const {
+ // Not locked as it does not rely on anything in the state.
assert(handle != NULL);
- return MapError(WebRtcAecm_get_error_code(static_cast<Handle*>(handle)));
+ return AudioProcessing::kUnspecifiedError;
}
} // namespace webrtc
diff --git a/webrtc/modules/audio_processing/echo_control_mobile_impl.h b/webrtc/modules/audio_processing/echo_control_mobile_impl.h
index da7022545f..4d6529d3ac 100644
--- a/webrtc/modules/audio_processing/echo_control_mobile_impl.h
+++ b/webrtc/modules/audio_processing/echo_control_mobile_impl.h
@@ -11,19 +11,23 @@
#ifndef WEBRTC_MODULES_AUDIO_PROCESSING_ECHO_CONTROL_MOBILE_IMPL_H_
#define WEBRTC_MODULES_AUDIO_PROCESSING_ECHO_CONTROL_MOBILE_IMPL_H_
+#include "webrtc/base/criticalsection.h"
+#include "webrtc/base/scoped_ptr.h"
+#include "webrtc/common_audio/swap_queue.h"
#include "webrtc/modules/audio_processing/include/audio_processing.h"
#include "webrtc/modules/audio_processing/processing_component.h"
namespace webrtc {
class AudioBuffer;
-class CriticalSectionWrapper;
class EchoControlMobileImpl : public EchoControlMobile,
public ProcessingComponent {
public:
EchoControlMobileImpl(const AudioProcessing* apm,
- CriticalSectionWrapper* crit);
+ rtc::CriticalSection* crit_render,
+ rtc::CriticalSection* crit_capture);
+
virtual ~EchoControlMobileImpl();
int ProcessRenderAudio(const AudioBuffer* audio);
@@ -37,6 +41,9 @@ class EchoControlMobileImpl : public EchoControlMobile,
// ProcessingComponent implementation.
int Initialize() override;
+ // Reads render side data that has been queued on the render call.
+ void ReadQueuedRenderData();
+
private:
// EchoControlMobile implementation.
int Enable(bool enable) override;
@@ -46,18 +53,37 @@ class EchoControlMobileImpl : public EchoControlMobile,
int GetEchoPath(void* echo_path, size_t size_bytes) const override;
// ProcessingComponent implementation.
+ // Called holding both the render and capture locks.
void* CreateHandle() const override;
int InitializeHandle(void* handle) const override;
int ConfigureHandle(void* handle) const override;
void DestroyHandle(void* handle) const override;
- int num_handles_required() const override;
+ size_t num_handles_required() const override;
int GetHandleError(void* handle) const override;
+ void AllocateRenderQueue();
+
+ // Not guarded as its public API is thread safe.
const AudioProcessing* apm_;
- CriticalSectionWrapper* crit_;
- RoutingMode routing_mode_;
- bool comfort_noise_enabled_;
- unsigned char* external_echo_path_;
+
+ rtc::CriticalSection* const crit_render_ ACQUIRED_BEFORE(crit_capture_);
+ rtc::CriticalSection* const crit_capture_;
+
+ RoutingMode routing_mode_ GUARDED_BY(crit_capture_);
+ bool comfort_noise_enabled_ GUARDED_BY(crit_capture_);
+ unsigned char* external_echo_path_ GUARDED_BY(crit_render_)
+ GUARDED_BY(crit_capture_);
+
+ size_t render_queue_element_max_size_ GUARDED_BY(crit_render_)
+ GUARDED_BY(crit_capture_);
+
+ std::vector<int16_t> render_queue_buffer_ GUARDED_BY(crit_render_);
+ std::vector<int16_t> capture_queue_buffer_ GUARDED_BY(crit_capture_);
+
+ // Lock protection not needed.
+ rtc::scoped_ptr<
+ SwapQueue<std::vector<int16_t>, RenderQueueItemVerifier<int16_t>>>
+ render_signal_queue_;
};
} // namespace webrtc
diff --git a/webrtc/modules/audio_processing/gain_control_impl.cc b/webrtc/modules/audio_processing/gain_control_impl.cc
index 3b1537e796..04a6c7ba29 100644
--- a/webrtc/modules/audio_processing/gain_control_impl.cc
+++ b/webrtc/modules/audio_processing/gain_control_impl.cc
@@ -14,7 +14,6 @@
#include "webrtc/modules/audio_processing/audio_buffer.h"
#include "webrtc/modules/audio_processing/agc/legacy/gain_control.h"
-#include "webrtc/system_wrappers/include/critical_section_wrapper.h"
namespace webrtc {
@@ -33,60 +32,113 @@ int16_t MapSetting(GainControl::Mode mode) {
assert(false);
return -1;
}
+
+// Maximum length that a frame of samples can have.
+static const size_t kMaxAllowedValuesOfSamplesPerFrame = 160;
+// Maximum number of frames to buffer in the render queue.
+// TODO(peah): Decrease this once we properly handle hugely unbalanced
+// reverse and forward call numbers.
+static const size_t kMaxNumFramesToBuffer = 100;
+
} // namespace
GainControlImpl::GainControlImpl(const AudioProcessing* apm,
- CriticalSectionWrapper* crit)
- : ProcessingComponent(),
- apm_(apm),
- crit_(crit),
- mode_(kAdaptiveAnalog),
- minimum_capture_level_(0),
- maximum_capture_level_(255),
- limiter_enabled_(true),
- target_level_dbfs_(3),
- compression_gain_db_(9),
- analog_capture_level_(0),
- was_analog_level_set_(false),
- stream_is_saturated_(false) {}
+ rtc::CriticalSection* crit_render,
+ rtc::CriticalSection* crit_capture)
+ : ProcessingComponent(),
+ apm_(apm),
+ crit_render_(crit_render),
+ crit_capture_(crit_capture),
+ mode_(kAdaptiveAnalog),
+ minimum_capture_level_(0),
+ maximum_capture_level_(255),
+ limiter_enabled_(true),
+ target_level_dbfs_(3),
+ compression_gain_db_(9),
+ analog_capture_level_(0),
+ was_analog_level_set_(false),
+ stream_is_saturated_(false),
+ render_queue_element_max_size_(0) {
+ RTC_DCHECK(apm);
+ RTC_DCHECK(crit_render);
+ RTC_DCHECK(crit_capture);
+}
GainControlImpl::~GainControlImpl() {}
int GainControlImpl::ProcessRenderAudio(AudioBuffer* audio) {
+ rtc::CritScope cs(crit_render_);
if (!is_component_enabled()) {
- return apm_->kNoError;
+ return AudioProcessing::kNoError;
}
assert(audio->num_frames_per_band() <= 160);
- for (int i = 0; i < num_handles(); i++) {
+ render_queue_buffer_.resize(0);
+ for (size_t i = 0; i < num_handles(); i++) {
Handle* my_handle = static_cast<Handle*>(handle(i));
- int err = WebRtcAgc_AddFarend(
- my_handle,
- audio->mixed_low_pass_data(),
- audio->num_frames_per_band());
+ int err =
+ WebRtcAgc_GetAddFarendError(my_handle, audio->num_frames_per_band());
- if (err != apm_->kNoError) {
+ if (err != AudioProcessing::kNoError)
return GetHandleError(my_handle);
- }
+
+ // Buffer the samples in the render queue.
+ render_queue_buffer_.insert(
+ render_queue_buffer_.end(), audio->mixed_low_pass_data(),
+ (audio->mixed_low_pass_data() + audio->num_frames_per_band()));
+ }
+
+ // Insert the samples into the queue.
+ if (!render_signal_queue_->Insert(&render_queue_buffer_)) {
+ // The data queue is full and needs to be emptied.
+ ReadQueuedRenderData();
+
+ // Retry the insert (should always work).
+ RTC_DCHECK_EQ(render_signal_queue_->Insert(&render_queue_buffer_), true);
+ }
+
+ return AudioProcessing::kNoError;
+}
+
+// Read chunks of data that were received and queued on the render side from
+// a queue. All the data chunks are buffered into the farend signal of the AGC.
+void GainControlImpl::ReadQueuedRenderData() {
+ rtc::CritScope cs(crit_capture_);
+
+ if (!is_component_enabled()) {
+ return;
}
- return apm_->kNoError;
+ while (render_signal_queue_->Remove(&capture_queue_buffer_)) {
+ size_t buffer_index = 0;
+ const size_t num_frames_per_band =
+ capture_queue_buffer_.size() / num_handles();
+ for (size_t i = 0; i < num_handles(); i++) {
+ Handle* my_handle = static_cast<Handle*>(handle(i));
+ WebRtcAgc_AddFarend(my_handle, &capture_queue_buffer_[buffer_index],
+ num_frames_per_band);
+
+ buffer_index += num_frames_per_band;
+ }
+ }
}
int GainControlImpl::AnalyzeCaptureAudio(AudioBuffer* audio) {
+ rtc::CritScope cs(crit_capture_);
+
if (!is_component_enabled()) {
- return apm_->kNoError;
+ return AudioProcessing::kNoError;
}
assert(audio->num_frames_per_band() <= 160);
assert(audio->num_channels() == num_handles());
- int err = apm_->kNoError;
+ int err = AudioProcessing::kNoError;
if (mode_ == kAdaptiveAnalog) {
capture_levels_.assign(num_handles(), analog_capture_level_);
- for (int i = 0; i < num_handles(); i++) {
+ for (size_t i = 0; i < num_handles(); i++) {
Handle* my_handle = static_cast<Handle*>(handle(i));
err = WebRtcAgc_AddMic(
my_handle,
@@ -94,13 +146,13 @@ int GainControlImpl::AnalyzeCaptureAudio(AudioBuffer* audio) {
audio->num_bands(),
audio->num_frames_per_band());
- if (err != apm_->kNoError) {
+ if (err != AudioProcessing::kNoError) {
return GetHandleError(my_handle);
}
}
} else if (mode_ == kAdaptiveDigital) {
- for (int i = 0; i < num_handles(); i++) {
+ for (size_t i = 0; i < num_handles(); i++) {
Handle* my_handle = static_cast<Handle*>(handle(i));
int32_t capture_level_out = 0;
@@ -114,34 +166,38 @@ int GainControlImpl::AnalyzeCaptureAudio(AudioBuffer* audio) {
capture_levels_[i] = capture_level_out;
- if (err != apm_->kNoError) {
+ if (err != AudioProcessing::kNoError) {
return GetHandleError(my_handle);
}
}
}
- return apm_->kNoError;
+ return AudioProcessing::kNoError;
}
int GainControlImpl::ProcessCaptureAudio(AudioBuffer* audio) {
+ rtc::CritScope cs(crit_capture_);
+
if (!is_component_enabled()) {
- return apm_->kNoError;
+ return AudioProcessing::kNoError;
}
if (mode_ == kAdaptiveAnalog && !was_analog_level_set_) {
- return apm_->kStreamParameterNotSetError;
+ return AudioProcessing::kStreamParameterNotSetError;
}
assert(audio->num_frames_per_band() <= 160);
assert(audio->num_channels() == num_handles());
stream_is_saturated_ = false;
- for (int i = 0; i < num_handles(); i++) {
+ for (size_t i = 0; i < num_handles(); i++) {
Handle* my_handle = static_cast<Handle*>(handle(i));
int32_t capture_level_out = 0;
uint8_t saturation_warning = 0;
+ // The call to stream_has_echo() is ok from a deadlock perspective
+ // as the capture lock is allready held.
int err = WebRtcAgc_Process(
my_handle,
audio->split_bands_const(i),
@@ -153,7 +209,7 @@ int GainControlImpl::ProcessCaptureAudio(AudioBuffer* audio) {
apm_->echo_cancellation()->stream_has_echo(),
&saturation_warning);
- if (err != apm_->kNoError) {
+ if (err != AudioProcessing::kNoError) {
return GetHandleError(my_handle);
}
@@ -166,7 +222,7 @@ int GainControlImpl::ProcessCaptureAudio(AudioBuffer* audio) {
if (mode_ == kAdaptiveAnalog) {
// Take the analog level to be the average across the handles.
analog_capture_level_ = 0;
- for (int i = 0; i < num_handles(); i++) {
+ for (size_t i = 0; i < num_handles(); i++) {
analog_capture_level_ += capture_levels_[i];
}
@@ -174,22 +230,24 @@ int GainControlImpl::ProcessCaptureAudio(AudioBuffer* audio) {
}
was_analog_level_set_ = false;
- return apm_->kNoError;
+ return AudioProcessing::kNoError;
}
// TODO(ajm): ensure this is called under kAdaptiveAnalog.
int GainControlImpl::set_stream_analog_level(int level) {
- CriticalSectionScoped crit_scoped(crit_);
+ rtc::CritScope cs(crit_capture_);
+
was_analog_level_set_ = true;
if (level < minimum_capture_level_ || level > maximum_capture_level_) {
- return apm_->kBadParameterError;
+ return AudioProcessing::kBadParameterError;
}
analog_capture_level_ = level;
- return apm_->kNoError;
+ return AudioProcessing::kNoError;
}
int GainControlImpl::stream_analog_level() {
+ rtc::CritScope cs(crit_capture_);
// TODO(ajm): enable this assertion?
//assert(mode_ == kAdaptiveAnalog);
@@ -197,18 +255,21 @@ int GainControlImpl::stream_analog_level() {
}
int GainControlImpl::Enable(bool enable) {
- CriticalSectionScoped crit_scoped(crit_);
+ rtc::CritScope cs_render(crit_render_);
+ rtc::CritScope cs_capture(crit_capture_);
return EnableComponent(enable);
}
bool GainControlImpl::is_enabled() const {
+ rtc::CritScope cs(crit_capture_);
return is_component_enabled();
}
int GainControlImpl::set_mode(Mode mode) {
- CriticalSectionScoped crit_scoped(crit_);
+ rtc::CritScope cs_render(crit_render_);
+ rtc::CritScope cs_capture(crit_capture_);
if (MapSetting(mode) == -1) {
- return apm_->kBadParameterError;
+ return AudioProcessing::kBadParameterError;
}
mode_ = mode;
@@ -216,22 +277,23 @@ int GainControlImpl::set_mode(Mode mode) {
}
GainControl::Mode GainControlImpl::mode() const {
+ rtc::CritScope cs(crit_capture_);
return mode_;
}
int GainControlImpl::set_analog_level_limits(int minimum,
int maximum) {
- CriticalSectionScoped crit_scoped(crit_);
+ rtc::CritScope cs(crit_capture_);
if (minimum < 0) {
- return apm_->kBadParameterError;
+ return AudioProcessing::kBadParameterError;
}
if (maximum > 65535) {
- return apm_->kBadParameterError;
+ return AudioProcessing::kBadParameterError;
}
if (maximum < minimum) {
- return apm_->kBadParameterError;
+ return AudioProcessing::kBadParameterError;
}
minimum_capture_level_ = minimum;
@@ -241,21 +303,24 @@ int GainControlImpl::set_analog_level_limits(int minimum,
}
int GainControlImpl::analog_level_minimum() const {
+ rtc::CritScope cs(crit_capture_);
return minimum_capture_level_;
}
int GainControlImpl::analog_level_maximum() const {
+ rtc::CritScope cs(crit_capture_);
return maximum_capture_level_;
}
bool GainControlImpl::stream_is_saturated() const {
+ rtc::CritScope cs(crit_capture_);
return stream_is_saturated_;
}
int GainControlImpl::set_target_level_dbfs(int level) {
- CriticalSectionScoped crit_scoped(crit_);
+ rtc::CritScope cs(crit_capture_);
if (level > 31 || level < 0) {
- return apm_->kBadParameterError;
+ return AudioProcessing::kBadParameterError;
}
target_level_dbfs_ = level;
@@ -263,13 +328,14 @@ int GainControlImpl::set_target_level_dbfs(int level) {
}
int GainControlImpl::target_level_dbfs() const {
+ rtc::CritScope cs(crit_capture_);
return target_level_dbfs_;
}
int GainControlImpl::set_compression_gain_db(int gain) {
- CriticalSectionScoped crit_scoped(crit_);
+ rtc::CritScope cs(crit_capture_);
if (gain < 0 || gain > 90) {
- return apm_->kBadParameterError;
+ return AudioProcessing::kBadParameterError;
}
compression_gain_db_ = gain;
@@ -277,27 +343,59 @@ int GainControlImpl::set_compression_gain_db(int gain) {
}
int GainControlImpl::compression_gain_db() const {
+ rtc::CritScope cs(crit_capture_);
return compression_gain_db_;
}
int GainControlImpl::enable_limiter(bool enable) {
- CriticalSectionScoped crit_scoped(crit_);
+ rtc::CritScope cs(crit_capture_);
limiter_enabled_ = enable;
return Configure();
}
bool GainControlImpl::is_limiter_enabled() const {
+ rtc::CritScope cs(crit_capture_);
return limiter_enabled_;
}
int GainControlImpl::Initialize() {
int err = ProcessingComponent::Initialize();
- if (err != apm_->kNoError || !is_component_enabled()) {
+ if (err != AudioProcessing::kNoError || !is_component_enabled()) {
return err;
}
- capture_levels_.assign(num_handles(), analog_capture_level_);
- return apm_->kNoError;
+ AllocateRenderQueue();
+
+ rtc::CritScope cs_capture(crit_capture_);
+ const int n = num_handles();
+ RTC_CHECK_GE(n, 0) << "Bad number of handles: " << n;
+
+ capture_levels_.assign(n, analog_capture_level_);
+ return AudioProcessing::kNoError;
+}
+
+void GainControlImpl::AllocateRenderQueue() {
+ const size_t new_render_queue_element_max_size =
+ std::max<size_t>(static_cast<size_t>(1),
+ kMaxAllowedValuesOfSamplesPerFrame * num_handles());
+
+ rtc::CritScope cs_render(crit_render_);
+ rtc::CritScope cs_capture(crit_capture_);
+
+ if (render_queue_element_max_size_ < new_render_queue_element_max_size) {
+ render_queue_element_max_size_ = new_render_queue_element_max_size;
+ std::vector<int16_t> template_queue_element(render_queue_element_max_size_);
+
+ render_signal_queue_.reset(
+ new SwapQueue<std::vector<int16_t>, RenderQueueItemVerifier<int16_t>>(
+ kMaxNumFramesToBuffer, template_queue_element,
+ RenderQueueItemVerifier<int16_t>(render_queue_element_max_size_)));
+
+ render_queue_buffer_.resize(render_queue_element_max_size_);
+ capture_queue_buffer_.resize(render_queue_element_max_size_);
+ } else {
+ render_signal_queue_->Clear();
+ }
}
void* GainControlImpl::CreateHandle() const {
@@ -309,6 +407,9 @@ void GainControlImpl::DestroyHandle(void* handle) const {
}
int GainControlImpl::InitializeHandle(void* handle) const {
+ rtc::CritScope cs_render(crit_render_);
+ rtc::CritScope cs_capture(crit_capture_);
+
return WebRtcAgc_Init(static_cast<Handle*>(handle),
minimum_capture_level_,
maximum_capture_level_,
@@ -317,6 +418,8 @@ int GainControlImpl::InitializeHandle(void* handle) const {
}
int GainControlImpl::ConfigureHandle(void* handle) const {
+ rtc::CritScope cs_render(crit_render_);
+ rtc::CritScope cs_capture(crit_capture_);
WebRtcAgcConfig config;
// TODO(ajm): Flip the sign here (since AGC expects a positive value) if we
// change the interface.
@@ -330,14 +433,15 @@ int GainControlImpl::ConfigureHandle(void* handle) const {
return WebRtcAgc_set_config(static_cast<Handle*>(handle), config);
}
-int GainControlImpl::num_handles_required() const {
- return apm_->num_output_channels();
+size_t GainControlImpl::num_handles_required() const {
+ // Not locked as it only relies on APM public API which is threadsafe.
+ return apm_->num_proc_channels();
}
int GainControlImpl::GetHandleError(void* handle) const {
// The AGC has no get_error() function.
// (Despite listing errors in its interface...)
assert(handle != NULL);
- return apm_->kUnspecifiedError;
+ return AudioProcessing::kUnspecifiedError;
}
} // namespace webrtc
diff --git a/webrtc/modules/audio_processing/gain_control_impl.h b/webrtc/modules/audio_processing/gain_control_impl.h
index f24d200cf2..72789ba5e1 100644
--- a/webrtc/modules/audio_processing/gain_control_impl.h
+++ b/webrtc/modules/audio_processing/gain_control_impl.h
@@ -13,19 +13,23 @@
#include <vector>
+#include "webrtc/base/criticalsection.h"
+#include "webrtc/base/scoped_ptr.h"
+#include "webrtc/base/thread_annotations.h"
+#include "webrtc/common_audio/swap_queue.h"
#include "webrtc/modules/audio_processing/include/audio_processing.h"
#include "webrtc/modules/audio_processing/processing_component.h"
namespace webrtc {
class AudioBuffer;
-class CriticalSectionWrapper;
class GainControlImpl : public GainControl,
public ProcessingComponent {
public:
GainControlImpl(const AudioProcessing* apm,
- CriticalSectionWrapper* crit);
+ rtc::CriticalSection* crit_render,
+ rtc::CriticalSection* crit_capture);
virtual ~GainControlImpl();
int ProcessRenderAudio(AudioBuffer* audio);
@@ -41,6 +45,9 @@ class GainControlImpl : public GainControl,
bool is_limiter_enabled() const override;
Mode mode() const override;
+ // Reads render side data that has been queued on the render call.
+ void ReadQueuedRenderData();
+
private:
// GainControl implementation.
int Enable(bool enable) override;
@@ -61,21 +68,37 @@ class GainControlImpl : public GainControl,
int InitializeHandle(void* handle) const override;
int ConfigureHandle(void* handle) const override;
void DestroyHandle(void* handle) const override;
- int num_handles_required() const override;
+ size_t num_handles_required() const override;
int GetHandleError(void* handle) const override;
+ void AllocateRenderQueue();
+
+ // Not guarded as its public API is thread safe.
const AudioProcessing* apm_;
- CriticalSectionWrapper* crit_;
- Mode mode_;
- int minimum_capture_level_;
- int maximum_capture_level_;
- bool limiter_enabled_;
- int target_level_dbfs_;
- int compression_gain_db_;
- std::vector<int> capture_levels_;
- int analog_capture_level_;
- bool was_analog_level_set_;
- bool stream_is_saturated_;
+
+ rtc::CriticalSection* const crit_render_ ACQUIRED_BEFORE(crit_capture_);
+ rtc::CriticalSection* const crit_capture_;
+
+ Mode mode_ GUARDED_BY(crit_capture_);
+ int minimum_capture_level_ GUARDED_BY(crit_capture_);
+ int maximum_capture_level_ GUARDED_BY(crit_capture_);
+ bool limiter_enabled_ GUARDED_BY(crit_capture_);
+ int target_level_dbfs_ GUARDED_BY(crit_capture_);
+ int compression_gain_db_ GUARDED_BY(crit_capture_);
+ std::vector<int> capture_levels_ GUARDED_BY(crit_capture_);
+ int analog_capture_level_ GUARDED_BY(crit_capture_);
+ bool was_analog_level_set_ GUARDED_BY(crit_capture_);
+ bool stream_is_saturated_ GUARDED_BY(crit_capture_);
+
+ size_t render_queue_element_max_size_ GUARDED_BY(crit_render_)
+ GUARDED_BY(crit_capture_);
+ std::vector<int16_t> render_queue_buffer_ GUARDED_BY(crit_render_);
+ std::vector<int16_t> capture_queue_buffer_ GUARDED_BY(crit_capture_);
+
+ // Lock protection not needed.
+ rtc::scoped_ptr<
+ SwapQueue<std::vector<int16_t>, RenderQueueItemVerifier<int16_t>>>
+ render_signal_queue_;
};
} // namespace webrtc
diff --git a/webrtc/modules/audio_processing/high_pass_filter_impl.cc b/webrtc/modules/audio_processing/high_pass_filter_impl.cc
index 29e482078e..375d58febb 100644
--- a/webrtc/modules/audio_processing/high_pass_filter_impl.cc
+++ b/webrtc/modules/audio_processing/high_pass_filter_impl.cc
@@ -10,159 +10,125 @@
#include "webrtc/modules/audio_processing/high_pass_filter_impl.h"
-#include <assert.h>
-
#include "webrtc/common_audio/signal_processing/include/signal_processing_library.h"
#include "webrtc/modules/audio_processing/audio_buffer.h"
#include "webrtc/system_wrappers/include/critical_section_wrapper.h"
-#include "webrtc/typedefs.h"
-
namespace webrtc {
namespace {
-const int16_t kFilterCoefficients8kHz[5] =
- {3798, -7596, 3798, 7807, -3733};
-
-const int16_t kFilterCoefficients[5] =
- {4012, -8024, 4012, 8002, -3913};
-
-struct FilterState {
- int16_t y[4];
- int16_t x[2];
- const int16_t* ba;
-};
-
-int InitializeFilter(FilterState* hpf, int sample_rate_hz) {
- assert(hpf != NULL);
+const int16_t kFilterCoefficients8kHz[5] = {3798, -7596, 3798, 7807, -3733};
+const int16_t kFilterCoefficients[5] = {4012, -8024, 4012, 8002, -3913};
+} // namespace
- if (sample_rate_hz == AudioProcessing::kSampleRate8kHz) {
- hpf->ba = kFilterCoefficients8kHz;
- } else {
- hpf->ba = kFilterCoefficients;
+class HighPassFilterImpl::BiquadFilter {
+ public:
+ explicit BiquadFilter(int sample_rate_hz) :
+ ba_(sample_rate_hz == AudioProcessing::kSampleRate8kHz ?
+ kFilterCoefficients8kHz : kFilterCoefficients)
+ {
+ Reset();
}
- WebRtcSpl_MemSetW16(hpf->x, 0, 2);
- WebRtcSpl_MemSetW16(hpf->y, 0, 4);
-
- return AudioProcessing::kNoError;
-}
-
-int Filter(FilterState* hpf, int16_t* data, size_t length) {
- assert(hpf != NULL);
-
- int32_t tmp_int32 = 0;
- int16_t* y = hpf->y;
- int16_t* x = hpf->x;
- const int16_t* ba = hpf->ba;
-
- for (size_t i = 0; i < length; i++) {
- // y[i] = b[0] * x[i] + b[1] * x[i-1] + b[2] * x[i-2]
- // + -a[1] * y[i-1] + -a[2] * y[i-2];
-
- tmp_int32 = y[1] * ba[3]; // -a[1] * y[i-1] (low part)
- tmp_int32 += y[3] * ba[4]; // -a[2] * y[i-2] (low part)
- tmp_int32 = (tmp_int32 >> 15);
- tmp_int32 += y[0] * ba[3]; // -a[1] * y[i-1] (high part)
- tmp_int32 += y[2] * ba[4]; // -a[2] * y[i-2] (high part)
- tmp_int32 = (tmp_int32 << 1);
-
- tmp_int32 += data[i] * ba[0]; // b[0]*x[0]
- tmp_int32 += x[0] * ba[1]; // b[1]*x[i-1]
- tmp_int32 += x[1] * ba[2]; // b[2]*x[i-2]
-
- // Update state (input part)
- x[1] = x[0];
- x[0] = data[i];
-
- // Update state (filtered part)
- y[2] = y[0];
- y[3] = y[1];
- y[0] = static_cast<int16_t>(tmp_int32 >> 13);
- y[1] = static_cast<int16_t>(
- (tmp_int32 - (static_cast<int32_t>(y[0]) << 13)) << 2);
-
- // Rounding in Q12, i.e. add 2^11
- tmp_int32 += 2048;
-
- // Saturate (to 2^27) so that the HP filtered signal does not overflow
- tmp_int32 = WEBRTC_SPL_SAT(static_cast<int32_t>(134217727),
- tmp_int32,
- static_cast<int32_t>(-134217728));
-
- // Convert back to Q0 and use rounding.
- data[i] = (int16_t)(tmp_int32 >> 12);
+ void Reset() {
+ std::memset(x_, 0, sizeof(x_));
+ std::memset(y_, 0, sizeof(y_));
}
- return AudioProcessing::kNoError;
-}
-} // namespace
+ void Process(int16_t* data, size_t length) {
+ const int16_t* const ba = ba_;
+ int16_t* x = x_;
+ int16_t* y = y_;
+ int32_t tmp_int32 = 0;
+
+ for (size_t i = 0; i < length; i++) {
+ // y[i] = b[0] * x[i] + b[1] * x[i-1] + b[2] * x[i-2]
+ // + -a[1] * y[i-1] + -a[2] * y[i-2];
+
+ tmp_int32 = y[1] * ba[3]; // -a[1] * y[i-1] (low part)
+ tmp_int32 += y[3] * ba[4]; // -a[2] * y[i-2] (low part)
+ tmp_int32 = (tmp_int32 >> 15);
+ tmp_int32 += y[0] * ba[3]; // -a[1] * y[i-1] (high part)
+ tmp_int32 += y[2] * ba[4]; // -a[2] * y[i-2] (high part)
+ tmp_int32 = (tmp_int32 << 1);
+
+ tmp_int32 += data[i] * ba[0]; // b[0] * x[0]
+ tmp_int32 += x[0] * ba[1]; // b[1] * x[i-1]
+ tmp_int32 += x[1] * ba[2]; // b[2] * x[i-2]
+
+ // Update state (input part).
+ x[1] = x[0];
+ x[0] = data[i];
+
+ // Update state (filtered part).
+ y[2] = y[0];
+ y[3] = y[1];
+ y[0] = static_cast<int16_t>(tmp_int32 >> 13);
+ y[1] = static_cast<int16_t>(
+ (tmp_int32 - (static_cast<int32_t>(y[0]) << 13)) << 2);
+
+ // Rounding in Q12, i.e. add 2^11.
+ tmp_int32 += 2048;
+
+ // Saturate (to 2^27) so that the HP filtered signal does not overflow.
+ tmp_int32 = WEBRTC_SPL_SAT(static_cast<int32_t>(134217727),
+ tmp_int32,
+ static_cast<int32_t>(-134217728));
+
+ // Convert back to Q0 and use rounding.
+ data[i] = static_cast<int16_t>(tmp_int32 >> 12);
+ }
+ }
-typedef FilterState Handle;
+ private:
+ const int16_t* const ba_ = nullptr;
+ int16_t x_[2];
+ int16_t y_[4];
+};
-HighPassFilterImpl::HighPassFilterImpl(const AudioProcessing* apm,
- CriticalSectionWrapper* crit)
- : ProcessingComponent(),
- apm_(apm),
- crit_(crit) {}
+HighPassFilterImpl::HighPassFilterImpl(rtc::CriticalSection* crit)
+ : crit_(crit) {
+ RTC_DCHECK(crit_);
+}
HighPassFilterImpl::~HighPassFilterImpl() {}
-int HighPassFilterImpl::ProcessCaptureAudio(AudioBuffer* audio) {
- int err = apm_->kNoError;
-
- if (!is_component_enabled()) {
- return apm_->kNoError;
+void HighPassFilterImpl::Initialize(size_t channels, int sample_rate_hz) {
+ std::vector<rtc::scoped_ptr<BiquadFilter>> new_filters(channels);
+ for (size_t i = 0; i < channels; i++) {
+ new_filters[i].reset(new BiquadFilter(sample_rate_hz));
}
+ rtc::CritScope cs(crit_);
+ filters_.swap(new_filters);
+}
- assert(audio->num_frames_per_band() <= 160);
-
- for (int i = 0; i < num_handles(); i++) {
- Handle* my_handle = static_cast<Handle*>(handle(i));
- err = Filter(my_handle,
- audio->split_bands(i)[kBand0To8kHz],
- audio->num_frames_per_band());
-
- if (err != apm_->kNoError) {
- return GetHandleError(my_handle);
- }
+void HighPassFilterImpl::ProcessCaptureAudio(AudioBuffer* audio) {
+ RTC_DCHECK(audio);
+ rtc::CritScope cs(crit_);
+ if (!enabled_) {
+ return;
}
- return apm_->kNoError;
+ RTC_DCHECK_GE(160u, audio->num_frames_per_band());
+ RTC_DCHECK_EQ(filters_.size(), audio->num_channels());
+ for (size_t i = 0; i < filters_.size(); i++) {
+ filters_[i]->Process(audio->split_bands(i)[kBand0To8kHz],
+ audio->num_frames_per_band());
+ }
}
int HighPassFilterImpl::Enable(bool enable) {
- CriticalSectionScoped crit_scoped(crit_);
- return EnableComponent(enable);
+ rtc::CritScope cs(crit_);
+ if (!enabled_ && enable) {
+ for (auto& filter : filters_) {
+ filter->Reset();
+ }
+ }
+ enabled_ = enable;
+ return AudioProcessing::kNoError;
}
bool HighPassFilterImpl::is_enabled() const {
- return is_component_enabled();
-}
-
-void* HighPassFilterImpl::CreateHandle() const {
- return new FilterState;
-}
-
-void HighPassFilterImpl::DestroyHandle(void* handle) const {
- delete static_cast<Handle*>(handle);
-}
-
-int HighPassFilterImpl::InitializeHandle(void* handle) const {
- return InitializeFilter(static_cast<Handle*>(handle),
- apm_->proc_sample_rate_hz());
-}
-
-int HighPassFilterImpl::ConfigureHandle(void* /*handle*/) const {
- return apm_->kNoError; // Not configurable.
-}
-
-int HighPassFilterImpl::num_handles_required() const {
- return apm_->num_output_channels();
-}
-
-int HighPassFilterImpl::GetHandleError(void* handle) const {
- // The component has no detailed errors.
- assert(handle != NULL);
- return apm_->kUnspecifiedError;
+ rtc::CritScope cs(crit_);
+ return enabled_;
}
} // namespace webrtc
diff --git a/webrtc/modules/audio_processing/high_pass_filter_impl.h b/webrtc/modules/audio_processing/high_pass_filter_impl.h
index 90b393e903..0e985bac7a 100644
--- a/webrtc/modules/audio_processing/high_pass_filter_impl.h
+++ b/webrtc/modules/audio_processing/high_pass_filter_impl.h
@@ -11,39 +11,34 @@
#ifndef WEBRTC_MODULES_AUDIO_PROCESSING_HIGH_PASS_FILTER_IMPL_H_
#define WEBRTC_MODULES_AUDIO_PROCESSING_HIGH_PASS_FILTER_IMPL_H_
+#include "webrtc/base/constructormagic.h"
+#include "webrtc/base/criticalsection.h"
+#include "webrtc/base/scoped_ptr.h"
#include "webrtc/modules/audio_processing/include/audio_processing.h"
-#include "webrtc/modules/audio_processing/processing_component.h"
namespace webrtc {
class AudioBuffer;
-class CriticalSectionWrapper;
-class HighPassFilterImpl : public HighPassFilter,
- public ProcessingComponent {
+class HighPassFilterImpl : public HighPassFilter {
public:
- HighPassFilterImpl(const AudioProcessing* apm, CriticalSectionWrapper* crit);
- virtual ~HighPassFilterImpl();
+ explicit HighPassFilterImpl(rtc::CriticalSection* crit);
+ ~HighPassFilterImpl() override;
- int ProcessCaptureAudio(AudioBuffer* audio);
+ // TODO(peah): Fold into ctor, once public API is removed.
+ void Initialize(size_t channels, int sample_rate_hz);
+ void ProcessCaptureAudio(AudioBuffer* audio);
// HighPassFilter implementation.
+ int Enable(bool enable) override;
bool is_enabled() const override;
private:
- // HighPassFilter implementation.
- int Enable(bool enable) override;
-
- // ProcessingComponent implementation.
- void* CreateHandle() const override;
- int InitializeHandle(void* handle) const override;
- int ConfigureHandle(void* handle) const override;
- void DestroyHandle(void* handle) const override;
- int num_handles_required() const override;
- int GetHandleError(void* handle) const override;
-
- const AudioProcessing* apm_;
- CriticalSectionWrapper* crit_;
+ class BiquadFilter;
+ rtc::CriticalSection* const crit_ = nullptr;
+ bool enabled_ GUARDED_BY(crit_) = false;
+ std::vector<rtc::scoped_ptr<BiquadFilter>> filters_ GUARDED_BY(crit_);
+ RTC_DISALLOW_IMPLICIT_CONSTRUCTORS(HighPassFilterImpl);
};
} // namespace webrtc
diff --git a/webrtc/modules/audio_processing/include/audio_processing.h b/webrtc/modules/audio_processing/include/audio_processing.h
index c8ddc6a483..9a3a4b32d5 100644
--- a/webrtc/modules/audio_processing/include/audio_processing.h
+++ b/webrtc/modules/audio_processing/include/audio_processing.h
@@ -65,6 +65,7 @@ class VoiceDetection;
struct ExtendedFilter {
ExtendedFilter() : enabled(false) {}
explicit ExtendedFilter(bool enabled) : enabled(enabled) {}
+ static const ConfigOptionID identifier = ConfigOptionID::kExtendedFilter;
bool enabled;
};
@@ -76,6 +77,7 @@ struct ExtendedFilter {
struct DelayAgnostic {
DelayAgnostic() : enabled(false) {}
explicit DelayAgnostic(bool enabled) : enabled(enabled) {}
+ static const ConfigOptionID identifier = ConfigOptionID::kDelayAgnostic;
bool enabled;
};
@@ -96,6 +98,7 @@ struct ExperimentalAgc {
: enabled(enabled), startup_min_volume(kAgcStartupMinVolume) {}
ExperimentalAgc(bool enabled, int startup_min_volume)
: enabled(enabled), startup_min_volume(startup_min_volume) {}
+ static const ConfigOptionID identifier = ConfigOptionID::kExperimentalAgc;
bool enabled;
int startup_min_volume;
};
@@ -105,6 +108,7 @@ struct ExperimentalAgc {
struct ExperimentalNs {
ExperimentalNs() : enabled(false) {}
explicit ExperimentalNs(bool enabled) : enabled(enabled) {}
+ static const ConfigOptionID identifier = ConfigOptionID::kExperimentalNs;
bool enabled;
};
@@ -127,6 +131,7 @@ struct Beamforming {
: enabled(enabled),
array_geometry(array_geometry),
target_direction(target_direction) {}
+ static const ConfigOptionID identifier = ConfigOptionID::kBeamforming;
const bool enabled;
const std::vector<Point> array_geometry;
const SphericalPointf target_direction;
@@ -141,6 +146,7 @@ struct Beamforming {
struct Intelligibility {
Intelligibility() : enabled(false) {}
explicit Intelligibility(bool enabled) : enabled(enabled) {}
+ static const ConfigOptionID identifier = ConfigOptionID::kIntelligibility;
bool enabled;
};
@@ -279,13 +285,18 @@ class AudioProcessing {
// ensures the options are applied immediately.
virtual void SetExtraOptions(const Config& config) = 0;
+ // TODO(peah): Remove after voice engine no longer requires it to resample
+ // the reverse stream to the forward rate.
+ virtual int input_sample_rate_hz() const = 0;
+
// TODO(ajm): Only intended for internal use. Make private and friend the
// necessary classes?
virtual int proc_sample_rate_hz() const = 0;
virtual int proc_split_sample_rate_hz() const = 0;
- virtual int num_input_channels() const = 0;
- virtual int num_output_channels() const = 0;
- virtual int num_reverse_channels() const = 0;
+ virtual size_t num_input_channels() const = 0;
+ virtual size_t num_proc_channels() const = 0;
+ virtual size_t num_output_channels() const = 0;
+ virtual size_t num_reverse_channels() const = 0;
// Set to true when the output of AudioProcessing will be muted or in some
// other way not used. Ideally, the captured audio would still be processed,
@@ -497,7 +508,7 @@ class StreamConfig {
// is true, the last channel in any corresponding list of
// channels is the keyboard channel.
StreamConfig(int sample_rate_hz = 0,
- int num_channels = 0,
+ size_t num_channels = 0,
bool has_keyboard = false)
: sample_rate_hz_(sample_rate_hz),
num_channels_(num_channels),
@@ -508,14 +519,14 @@ class StreamConfig {
sample_rate_hz_ = value;
num_frames_ = calculate_frames(value);
}
- void set_num_channels(int value) { num_channels_ = value; }
+ void set_num_channels(size_t value) { num_channels_ = value; }
void set_has_keyboard(bool value) { has_keyboard_ = value; }
int sample_rate_hz() const { return sample_rate_hz_; }
// The number of channels in the stream, not including the keyboard channel if
// present.
- int num_channels() const { return num_channels_; }
+ size_t num_channels() const { return num_channels_; }
bool has_keyboard() const { return has_keyboard_; }
size_t num_frames() const { return num_frames_; }
@@ -536,7 +547,7 @@ class StreamConfig {
}
int sample_rate_hz_;
- int num_channels_;
+ size_t num_channels_;
bool has_keyboard_;
size_t num_frames_;
};
diff --git a/webrtc/modules/audio_processing/include/mock_audio_processing.h b/webrtc/modules/audio_processing/include/mock_audio_processing.h
index 4ff52baf1c..9e1f2d5861 100644
--- a/webrtc/modules/audio_processing/include/mock_audio_processing.h
+++ b/webrtc/modules/audio_processing/include/mock_audio_processing.h
@@ -201,11 +201,11 @@ class MockAudioProcessing : public AudioProcessing {
MOCK_CONST_METHOD0(proc_split_sample_rate_hz,
int());
MOCK_CONST_METHOD0(num_input_channels,
- int());
+ size_t());
MOCK_CONST_METHOD0(num_output_channels,
- int());
+ size_t());
MOCK_CONST_METHOD0(num_reverse_channels,
- int());
+ size_t());
MOCK_METHOD1(set_output_will_be_muted,
void(bool muted));
MOCK_CONST_METHOD0(output_will_be_muted,
diff --git a/webrtc/modules/audio_processing/intelligibility/intelligibility_enhancer.cc b/webrtc/modules/audio_processing/intelligibility/intelligibility_enhancer.cc
index d014ce060c..fe964aba8c 100644
--- a/webrtc/modules/audio_processing/intelligibility/intelligibility_enhancer.cc
+++ b/webrtc/modules/audio_processing/intelligibility/intelligibility_enhancer.cc
@@ -54,12 +54,12 @@ IntelligibilityEnhancer::TransformCallback::TransformCallback(
void IntelligibilityEnhancer::TransformCallback::ProcessAudioBlock(
const complex<float>* const* in_block,
- int in_channels,
+ size_t in_channels,
size_t frames,
- int /* out_channels */,
+ size_t /* out_channels */,
complex<float>* const* out_block) {
RTC_DCHECK_EQ(parent_->freqs_, frames);
- for (int i = 0; i < in_channels; ++i) {
+ for (size_t i = 0; i < in_channels; ++i) {
parent_->DispatchAudio(source_, in_block[i], out_block[i]);
}
}
@@ -129,7 +129,7 @@ IntelligibilityEnhancer::IntelligibilityEnhancer(const Config& config)
void IntelligibilityEnhancer::ProcessRenderAudio(float* const* audio,
int sample_rate_hz,
- int num_channels) {
+ size_t num_channels) {
RTC_CHECK_EQ(sample_rate_hz_, sample_rate_hz);
RTC_CHECK_EQ(num_render_channels_, num_channels);
@@ -138,7 +138,7 @@ void IntelligibilityEnhancer::ProcessRenderAudio(float* const* audio,
}
if (active_) {
- for (int i = 0; i < num_render_channels_; ++i) {
+ for (size_t i = 0; i < num_render_channels_; ++i) {
memcpy(audio[i], temp_render_out_buffer_.channels()[i],
chunk_length_ * sizeof(**audio));
}
@@ -147,7 +147,7 @@ void IntelligibilityEnhancer::ProcessRenderAudio(float* const* audio,
void IntelligibilityEnhancer::AnalyzeCaptureAudio(float* const* audio,
int sample_rate_hz,
- int num_channels) {
+ size_t num_channels) {
RTC_CHECK_EQ(sample_rate_hz_, sample_rate_hz);
RTC_CHECK_EQ(num_capture_channels_, num_channels);
diff --git a/webrtc/modules/audio_processing/intelligibility/intelligibility_enhancer.h b/webrtc/modules/audio_processing/intelligibility/intelligibility_enhancer.h
index 1e9e35ac2a..1eb22342ad 100644
--- a/webrtc/modules/audio_processing/intelligibility/intelligibility_enhancer.h
+++ b/webrtc/modules/audio_processing/intelligibility/intelligibility_enhancer.h
@@ -47,8 +47,8 @@ class IntelligibilityEnhancer {
gain_change_limit(0.1f),
rho(0.02f) {}
int sample_rate_hz;
- int num_capture_channels;
- int num_render_channels;
+ size_t num_capture_channels;
+ size_t num_render_channels;
intelligibility::VarianceArray::StepType var_type;
float var_decay_rate;
size_t var_window_size;
@@ -63,12 +63,12 @@ class IntelligibilityEnhancer {
// Reads and processes chunk of noise stream in time domain.
void AnalyzeCaptureAudio(float* const* audio,
int sample_rate_hz,
- int num_channels);
+ size_t num_channels);
// Reads chunk of speech in time domain and updates with modified signal.
void ProcessRenderAudio(float* const* audio,
int sample_rate_hz,
- int num_channels);
+ size_t num_channels);
bool active() const;
private:
@@ -85,9 +85,9 @@ class IntelligibilityEnhancer {
// All in frequency domain, receives input |in_block|, applies
// intelligibility enhancement, and writes result to |out_block|.
void ProcessAudioBlock(const std::complex<float>* const* in_block,
- int in_channels,
+ size_t in_channels,
size_t frames,
- int out_channels,
+ size_t out_channels,
std::complex<float>* const* out_block) override;
private:
@@ -144,8 +144,8 @@ class IntelligibilityEnhancer {
const size_t bank_size_; // Num ERB filters.
const int sample_rate_hz_;
const int erb_resolution_;
- const int num_capture_channels_;
- const int num_render_channels_;
+ const size_t num_capture_channels_;
+ const size_t num_render_channels_;
const int analysis_rate_; // Num blocks before gains recalculated.
const bool active_; // Whether render gains are being updated.
diff --git a/webrtc/modules/audio_processing/intelligibility/test/intelligibility_proc.cc b/webrtc/modules/audio_processing/intelligibility/test/intelligibility_proc.cc
index 27d0ab48bb..4d2f5f4c5d 100644
--- a/webrtc/modules/audio_processing/intelligibility/test/intelligibility_proc.cc
+++ b/webrtc/modules/audio_processing/intelligibility/test/intelligibility_proc.cc
@@ -68,7 +68,7 @@ DEFINE_string(out_file,
"Enhanced output. Use '-' to "
"play through aplay immediately.");
-const int kNumChannels = 1;
+const size_t kNumChannels = 1;
// void function for gtest
void void_main(int argc, char* argv[]) {
diff --git a/webrtc/modules/audio_processing/level_estimator_impl.cc b/webrtc/modules/audio_processing/level_estimator_impl.cc
index 35fe697c2d..187873e33e 100644
--- a/webrtc/modules/audio_processing/level_estimator_impl.cc
+++ b/webrtc/modules/audio_processing/level_estimator_impl.cc
@@ -11,76 +11,55 @@
#include "webrtc/modules/audio_processing/level_estimator_impl.h"
#include "webrtc/modules/audio_processing/audio_buffer.h"
-#include "webrtc/modules/audio_processing/include/audio_processing.h"
#include "webrtc/modules/audio_processing/rms_level.h"
#include "webrtc/system_wrappers/include/critical_section_wrapper.h"
namespace webrtc {
-LevelEstimatorImpl::LevelEstimatorImpl(const AudioProcessing* apm,
- CriticalSectionWrapper* crit)
- : ProcessingComponent(),
- crit_(crit) {}
+LevelEstimatorImpl::LevelEstimatorImpl(rtc::CriticalSection* crit)
+ : crit_(crit), rms_(new RMSLevel()) {
+ RTC_DCHECK(crit);
+}
LevelEstimatorImpl::~LevelEstimatorImpl() {}
-int LevelEstimatorImpl::ProcessStream(AudioBuffer* audio) {
- if (!is_component_enabled()) {
- return AudioProcessing::kNoError;
- }
+void LevelEstimatorImpl::Initialize() {
+ rtc::CritScope cs(crit_);
+ rms_->Reset();
+}
- RMSLevel* rms_level = static_cast<RMSLevel*>(handle(0));
- for (int i = 0; i < audio->num_channels(); ++i) {
- rms_level->Process(audio->channels_const()[i],
- audio->num_frames());
+void LevelEstimatorImpl::ProcessStream(AudioBuffer* audio) {
+ RTC_DCHECK(audio);
+ rtc::CritScope cs(crit_);
+ if (!enabled_) {
+ return;
}
- return AudioProcessing::kNoError;
+ for (size_t i = 0; i < audio->num_channels(); i++) {
+ rms_->Process(audio->channels_const()[i], audio->num_frames());
+ }
}
int LevelEstimatorImpl::Enable(bool enable) {
- CriticalSectionScoped crit_scoped(crit_);
- return EnableComponent(enable);
+ rtc::CritScope cs(crit_);
+ if (enable && !enabled_) {
+ rms_->Reset();
+ }
+ enabled_ = enable;
+ return AudioProcessing::kNoError;
}
bool LevelEstimatorImpl::is_enabled() const {
- return is_component_enabled();
+ rtc::CritScope cs(crit_);
+ return enabled_;
}
int LevelEstimatorImpl::RMS() {
- if (!is_component_enabled()) {
+ rtc::CritScope cs(crit_);
+ if (!enabled_) {
return AudioProcessing::kNotEnabledError;
}
- RMSLevel* rms_level = static_cast<RMSLevel*>(handle(0));
- return rms_level->RMS();
-}
-
-// The ProcessingComponent implementation is pretty weird in this class since
-// we have only a single instance of the trivial underlying component.
-void* LevelEstimatorImpl::CreateHandle() const {
- return new RMSLevel;
-}
-
-void LevelEstimatorImpl::DestroyHandle(void* handle) const {
- delete static_cast<RMSLevel*>(handle);
+ return rms_->RMS();
}
-
-int LevelEstimatorImpl::InitializeHandle(void* handle) const {
- static_cast<RMSLevel*>(handle)->Reset();
- return AudioProcessing::kNoError;
-}
-
-int LevelEstimatorImpl::ConfigureHandle(void* /*handle*/) const {
- return AudioProcessing::kNoError;
-}
-
-int LevelEstimatorImpl::num_handles_required() const {
- return 1;
-}
-
-int LevelEstimatorImpl::GetHandleError(void* /*handle*/) const {
- return AudioProcessing::kUnspecifiedError;
-}
-
} // namespace webrtc
diff --git a/webrtc/modules/audio_processing/level_estimator_impl.h b/webrtc/modules/audio_processing/level_estimator_impl.h
index 0d0050c7e7..4401da37e4 100644
--- a/webrtc/modules/audio_processing/level_estimator_impl.h
+++ b/webrtc/modules/audio_processing/level_estimator_impl.h
@@ -11,43 +11,36 @@
#ifndef WEBRTC_MODULES_AUDIO_PROCESSING_LEVEL_ESTIMATOR_IMPL_H_
#define WEBRTC_MODULES_AUDIO_PROCESSING_LEVEL_ESTIMATOR_IMPL_H_
+#include "webrtc/base/constructormagic.h"
+#include "webrtc/base/criticalsection.h"
+#include "webrtc/base/scoped_ptr.h"
#include "webrtc/modules/audio_processing/include/audio_processing.h"
-#include "webrtc/modules/audio_processing/processing_component.h"
-#include "webrtc/modules/audio_processing/rms_level.h"
namespace webrtc {
class AudioBuffer;
-class CriticalSectionWrapper;
+class RMSLevel;
-class LevelEstimatorImpl : public LevelEstimator,
- public ProcessingComponent {
+class LevelEstimatorImpl : public LevelEstimator {
public:
- LevelEstimatorImpl(const AudioProcessing* apm,
- CriticalSectionWrapper* crit);
- virtual ~LevelEstimatorImpl();
+ explicit LevelEstimatorImpl(rtc::CriticalSection* crit);
+ ~LevelEstimatorImpl() override;
- int ProcessStream(AudioBuffer* audio);
+ // TODO(peah): Fold into ctor, once public API is removed.
+ void Initialize();
+ void ProcessStream(AudioBuffer* audio);
// LevelEstimator implementation.
- bool is_enabled() const override;
-
- private:
- // LevelEstimator implementation.
int Enable(bool enable) override;
+ bool is_enabled() const override;
int RMS() override;
- // ProcessingComponent implementation.
- void* CreateHandle() const override;
- int InitializeHandle(void* handle) const override;
- int ConfigureHandle(void* handle) const override;
- void DestroyHandle(void* handle) const override;
- int num_handles_required() const override;
- int GetHandleError(void* handle) const override;
-
- CriticalSectionWrapper* crit_;
+ private:
+ rtc::CriticalSection* const crit_ = nullptr;
+ bool enabled_ GUARDED_BY(crit_) = false;
+ rtc::scoped_ptr<RMSLevel> rms_ GUARDED_BY(crit_);
+ RTC_DISALLOW_IMPLICIT_CONSTRUCTORS(LevelEstimatorImpl);
};
-
} // namespace webrtc
#endif // WEBRTC_MODULES_AUDIO_PROCESSING_LEVEL_ESTIMATOR_IMPL_H_
diff --git a/webrtc/modules/audio_processing/logging/aec_logging.h b/webrtc/modules/audio_processing/logging/aec_logging.h
index 3cf9ff89ed..b062913be2 100644
--- a/webrtc/modules/audio_processing/logging/aec_logging.h
+++ b/webrtc/modules/audio_processing/logging/aec_logging.h
@@ -43,6 +43,20 @@
(void) fwrite(data, data_size, 1, file); \
} while (0)
+// Dumps a raw scalar int32 to file.
+#define RTC_AEC_DEBUG_RAW_WRITE_SCALAR_INT32(file, data) \
+ do { \
+ int32_t value_to_store = data; \
+ (void) fwrite(&value_to_store, sizeof(value_to_store), 1, file); \
+ } while (0)
+
+// Dumps a raw scalar double to file.
+#define RTC_AEC_DEBUG_RAW_WRITE_SCALAR_DOUBLE(file, data) \
+ do { \
+ double value_to_store = data; \
+ (void) fwrite(&value_to_store, sizeof(value_to_store), 1, file); \
+ } while (0)
+
// Opens a raw data file for writing using the specified sample rate.
#define RTC_AEC_DEBUG_RAW_OPEN(name, instance_counter, file) \
do { \
@@ -73,6 +87,14 @@
do { \
} while (0)
+#define RTC_AEC_DEBUG_RAW_WRITE_SCALAR_INT32(file, data) \
+ do { \
+ } while (0)
+
+#define RTC_AEC_DEBUG_RAW_WRITE_SCALAR_DOUBLE(file, data) \
+ do { \
+ } while (0)
+
#define RTC_AEC_DEBUG_RAW_OPEN(file, name, instance_counter) \
do { \
} while (0)
diff --git a/webrtc/modules/audio_processing/noise_suppression_impl.cc b/webrtc/modules/audio_processing/noise_suppression_impl.cc
index 65ec3c445e..de7e856676 100644
--- a/webrtc/modules/audio_processing/noise_suppression_impl.cc
+++ b/webrtc/modules/audio_processing/noise_suppression_impl.cc
@@ -10,172 +10,166 @@
#include "webrtc/modules/audio_processing/noise_suppression_impl.h"
-#include <assert.h>
-
#include "webrtc/modules/audio_processing/audio_buffer.h"
#if defined(WEBRTC_NS_FLOAT)
-#include "webrtc/modules/audio_processing/ns/include/noise_suppression.h"
+#include "webrtc/modules/audio_processing/ns/noise_suppression.h"
+#define NS_CREATE WebRtcNs_Create
+#define NS_FREE WebRtcNs_Free
+#define NS_INIT WebRtcNs_Init
+#define NS_SET_POLICY WebRtcNs_set_policy
+typedef NsHandle NsState;
#elif defined(WEBRTC_NS_FIXED)
-#include "webrtc/modules/audio_processing/ns/include/noise_suppression_x.h"
+#include "webrtc/modules/audio_processing/ns/noise_suppression_x.h"
+#define NS_CREATE WebRtcNsx_Create
+#define NS_FREE WebRtcNsx_Free
+#define NS_INIT WebRtcNsx_Init
+#define NS_SET_POLICY WebRtcNsx_set_policy
+typedef NsxHandle NsState;
#endif
-#include "webrtc/system_wrappers/include/critical_section_wrapper.h"
-
namespace webrtc {
-
-#if defined(WEBRTC_NS_FLOAT)
-typedef NsHandle Handle;
-#elif defined(WEBRTC_NS_FIXED)
-typedef NsxHandle Handle;
-#endif
-
-namespace {
-int MapSetting(NoiseSuppression::Level level) {
- switch (level) {
- case NoiseSuppression::kLow:
- return 0;
- case NoiseSuppression::kModerate:
- return 1;
- case NoiseSuppression::kHigh:
- return 2;
- case NoiseSuppression::kVeryHigh:
- return 3;
+class NoiseSuppressionImpl::Suppressor {
+ public:
+ explicit Suppressor(int sample_rate_hz) {
+ state_ = NS_CREATE();
+ RTC_CHECK(state_);
+ int error = NS_INIT(state_, sample_rate_hz);
+ RTC_DCHECK_EQ(0, error);
+ }
+ ~Suppressor() {
+ NS_FREE(state_);
}
- assert(false);
- return -1;
+ NsState* state() { return state_; }
+ private:
+ NsState* state_ = nullptr;
+ RTC_DISALLOW_IMPLICIT_CONSTRUCTORS(Suppressor);
+};
+
+NoiseSuppressionImpl::NoiseSuppressionImpl(rtc::CriticalSection* crit)
+ : crit_(crit) {
+ RTC_DCHECK(crit);
}
-} // namespace
-
-NoiseSuppressionImpl::NoiseSuppressionImpl(const AudioProcessing* apm,
- CriticalSectionWrapper* crit)
- : ProcessingComponent(),
- apm_(apm),
- crit_(crit),
- level_(kModerate) {}
NoiseSuppressionImpl::~NoiseSuppressionImpl() {}
-int NoiseSuppressionImpl::AnalyzeCaptureAudio(AudioBuffer* audio) {
-#if defined(WEBRTC_NS_FLOAT)
- if (!is_component_enabled()) {
- return apm_->kNoError;
+void NoiseSuppressionImpl::Initialize(size_t channels, int sample_rate_hz) {
+ rtc::CritScope cs(crit_);
+ channels_ = channels;
+ sample_rate_hz_ = sample_rate_hz;
+ std::vector<rtc::scoped_ptr<Suppressor>> new_suppressors;
+ if (enabled_) {
+ new_suppressors.resize(channels);
+ for (size_t i = 0; i < channels; i++) {
+ new_suppressors[i].reset(new Suppressor(sample_rate_hz));
+ }
}
- assert(audio->num_frames_per_band() <= 160);
- assert(audio->num_channels() == num_handles());
+ suppressors_.swap(new_suppressors);
+ set_level(level_);
+}
- for (int i = 0; i < num_handles(); ++i) {
- Handle* my_handle = static_cast<Handle*>(handle(i));
+void NoiseSuppressionImpl::AnalyzeCaptureAudio(AudioBuffer* audio) {
+ RTC_DCHECK(audio);
+#if defined(WEBRTC_NS_FLOAT)
+ rtc::CritScope cs(crit_);
+ if (!enabled_) {
+ return;
+ }
- WebRtcNs_Analyze(my_handle, audio->split_bands_const_f(i)[kBand0To8kHz]);
+ RTC_DCHECK_GE(160u, audio->num_frames_per_band());
+ RTC_DCHECK_EQ(suppressors_.size(), audio->num_channels());
+ for (size_t i = 0; i < suppressors_.size(); i++) {
+ WebRtcNs_Analyze(suppressors_[i]->state(),
+ audio->split_bands_const_f(i)[kBand0To8kHz]);
}
#endif
- return apm_->kNoError;
}
-int NoiseSuppressionImpl::ProcessCaptureAudio(AudioBuffer* audio) {
- if (!is_component_enabled()) {
- return apm_->kNoError;
+void NoiseSuppressionImpl::ProcessCaptureAudio(AudioBuffer* audio) {
+ RTC_DCHECK(audio);
+ rtc::CritScope cs(crit_);
+ if (!enabled_) {
+ return;
}
- assert(audio->num_frames_per_band() <= 160);
- assert(audio->num_channels() == num_handles());
- for (int i = 0; i < num_handles(); ++i) {
- Handle* my_handle = static_cast<Handle*>(handle(i));
+ RTC_DCHECK_GE(160u, audio->num_frames_per_band());
+ RTC_DCHECK_EQ(suppressors_.size(), audio->num_channels());
+ for (size_t i = 0; i < suppressors_.size(); i++) {
#if defined(WEBRTC_NS_FLOAT)
- WebRtcNs_Process(my_handle,
+ WebRtcNs_Process(suppressors_[i]->state(),
audio->split_bands_const_f(i),
audio->num_bands(),
audio->split_bands_f(i));
#elif defined(WEBRTC_NS_FIXED)
- WebRtcNsx_Process(my_handle,
+ WebRtcNsx_Process(suppressors_[i]->state(),
audio->split_bands_const(i),
audio->num_bands(),
audio->split_bands(i));
#endif
}
- return apm_->kNoError;
}
int NoiseSuppressionImpl::Enable(bool enable) {
- CriticalSectionScoped crit_scoped(crit_);
- return EnableComponent(enable);
+ rtc::CritScope cs(crit_);
+ if (enabled_ != enable) {
+ enabled_ = enable;
+ Initialize(channels_, sample_rate_hz_);
+ }
+ return AudioProcessing::kNoError;
}
bool NoiseSuppressionImpl::is_enabled() const {
- return is_component_enabled();
+ rtc::CritScope cs(crit_);
+ return enabled_;
}
int NoiseSuppressionImpl::set_level(Level level) {
- CriticalSectionScoped crit_scoped(crit_);
- if (MapSetting(level) == -1) {
- return apm_->kBadParameterError;
+ int policy = 1;
+ switch (level) {
+ case NoiseSuppression::kLow:
+ policy = 0;
+ break;
+ case NoiseSuppression::kModerate:
+ policy = 1;
+ break;
+ case NoiseSuppression::kHigh:
+ policy = 2;
+ break;
+ case NoiseSuppression::kVeryHigh:
+ policy = 3;
+ break;
+ default:
+ RTC_NOTREACHED();
}
-
+ rtc::CritScope cs(crit_);
level_ = level;
- return Configure();
+ for (auto& suppressor : suppressors_) {
+ int error = NS_SET_POLICY(suppressor->state(), policy);
+ RTC_DCHECK_EQ(0, error);
+ }
+ return AudioProcessing::kNoError;
}
NoiseSuppression::Level NoiseSuppressionImpl::level() const {
+ rtc::CritScope cs(crit_);
return level_;
}
float NoiseSuppressionImpl::speech_probability() const {
+ rtc::CritScope cs(crit_);
#if defined(WEBRTC_NS_FLOAT)
float probability_average = 0.0f;
- for (int i = 0; i < num_handles(); i++) {
- Handle* my_handle = static_cast<Handle*>(handle(i));
- probability_average += WebRtcNs_prior_speech_probability(my_handle);
+ for (auto& suppressor : suppressors_) {
+ probability_average +=
+ WebRtcNs_prior_speech_probability(suppressor->state());
}
- return probability_average / num_handles();
+ if (!suppressors_.empty()) {
+ probability_average /= suppressors_.size();
+ }
+ return probability_average;
#elif defined(WEBRTC_NS_FIXED)
+ // TODO(peah): Returning error code as a float! Remove this.
// Currently not available for the fixed point implementation.
- return apm_->kUnsupportedFunctionError;
+ return AudioProcessing::kUnsupportedFunctionError;
#endif
}
-
-void* NoiseSuppressionImpl::CreateHandle() const {
-#if defined(WEBRTC_NS_FLOAT)
- return WebRtcNs_Create();
-#elif defined(WEBRTC_NS_FIXED)
- return WebRtcNsx_Create();
-#endif
-}
-
-void NoiseSuppressionImpl::DestroyHandle(void* handle) const {
-#if defined(WEBRTC_NS_FLOAT)
- WebRtcNs_Free(static_cast<Handle*>(handle));
-#elif defined(WEBRTC_NS_FIXED)
- WebRtcNsx_Free(static_cast<Handle*>(handle));
-#endif
-}
-
-int NoiseSuppressionImpl::InitializeHandle(void* handle) const {
-#if defined(WEBRTC_NS_FLOAT)
- return WebRtcNs_Init(static_cast<Handle*>(handle),
- apm_->proc_sample_rate_hz());
-#elif defined(WEBRTC_NS_FIXED)
- return WebRtcNsx_Init(static_cast<Handle*>(handle),
- apm_->proc_sample_rate_hz());
-#endif
-}
-
-int NoiseSuppressionImpl::ConfigureHandle(void* handle) const {
-#if defined(WEBRTC_NS_FLOAT)
- return WebRtcNs_set_policy(static_cast<Handle*>(handle),
- MapSetting(level_));
-#elif defined(WEBRTC_NS_FIXED)
- return WebRtcNsx_set_policy(static_cast<Handle*>(handle),
- MapSetting(level_));
-#endif
-}
-
-int NoiseSuppressionImpl::num_handles_required() const {
- return apm_->num_output_channels();
-}
-
-int NoiseSuppressionImpl::GetHandleError(void* handle) const {
- // The NS has no get_error() function.
- assert(handle != NULL);
- return apm_->kUnspecifiedError;
-}
} // namespace webrtc
diff --git a/webrtc/modules/audio_processing/noise_suppression_impl.h b/webrtc/modules/audio_processing/noise_suppression_impl.h
index 76a39b8e09..debbc61bc9 100644
--- a/webrtc/modules/audio_processing/noise_suppression_impl.h
+++ b/webrtc/modules/audio_processing/noise_suppression_impl.h
@@ -11,47 +11,42 @@
#ifndef WEBRTC_MODULES_AUDIO_PROCESSING_NOISE_SUPPRESSION_IMPL_H_
#define WEBRTC_MODULES_AUDIO_PROCESSING_NOISE_SUPPRESSION_IMPL_H_
+#include "webrtc/base/constructormagic.h"
+#include "webrtc/base/criticalsection.h"
+#include "webrtc/base/scoped_ptr.h"
#include "webrtc/modules/audio_processing/include/audio_processing.h"
-#include "webrtc/modules/audio_processing/processing_component.h"
namespace webrtc {
class AudioBuffer;
-class CriticalSectionWrapper;
-class NoiseSuppressionImpl : public NoiseSuppression,
- public ProcessingComponent {
+class NoiseSuppressionImpl : public NoiseSuppression {
public:
- NoiseSuppressionImpl(const AudioProcessing* apm,
- CriticalSectionWrapper* crit);
- virtual ~NoiseSuppressionImpl();
+ explicit NoiseSuppressionImpl(rtc::CriticalSection* crit);
+ ~NoiseSuppressionImpl() override;
- int AnalyzeCaptureAudio(AudioBuffer* audio);
- int ProcessCaptureAudio(AudioBuffer* audio);
+ // TODO(peah): Fold into ctor, once public API is removed.
+ void Initialize(size_t channels, int sample_rate_hz);
+ void AnalyzeCaptureAudio(AudioBuffer* audio);
+ void ProcessCaptureAudio(AudioBuffer* audio);
// NoiseSuppression implementation.
+ int Enable(bool enable) override;
bool is_enabled() const override;
- float speech_probability() const override;
+ int set_level(Level level) override;
Level level() const override;
+ float speech_probability() const override;
private:
- // NoiseSuppression implementation.
- int Enable(bool enable) override;
- int set_level(Level level) override;
-
- // ProcessingComponent implementation.
- void* CreateHandle() const override;
- int InitializeHandle(void* handle) const override;
- int ConfigureHandle(void* handle) const override;
- void DestroyHandle(void* handle) const override;
- int num_handles_required() const override;
- int GetHandleError(void* handle) const override;
-
- const AudioProcessing* apm_;
- CriticalSectionWrapper* crit_;
- Level level_;
+ class Suppressor;
+ rtc::CriticalSection* const crit_;
+ bool enabled_ GUARDED_BY(crit_) = false;
+ Level level_ GUARDED_BY(crit_) = kModerate;
+ size_t channels_ GUARDED_BY(crit_) = 0;
+ int sample_rate_hz_ GUARDED_BY(crit_) = 0;
+ std::vector<rtc::scoped_ptr<Suppressor>> suppressors_ GUARDED_BY(crit_);
+ RTC_DISALLOW_IMPLICIT_CONSTRUCTORS(NoiseSuppressionImpl);
};
-
} // namespace webrtc
#endif // WEBRTC_MODULES_AUDIO_PROCESSING_NOISE_SUPPRESSION_IMPL_H_
diff --git a/webrtc/modules/audio_processing/ns/noise_suppression.c b/webrtc/modules/audio_processing/ns/noise_suppression.c
index 13f1b2d6dc..dd05e0ab3d 100644
--- a/webrtc/modules/audio_processing/ns/noise_suppression.c
+++ b/webrtc/modules/audio_processing/ns/noise_suppression.c
@@ -8,7 +8,7 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#include "webrtc/modules/audio_processing/ns/include/noise_suppression.h"
+#include "webrtc/modules/audio_processing/ns/noise_suppression.h"
#include <stdlib.h>
#include <string.h>
diff --git a/webrtc/modules/audio_processing/ns/include/noise_suppression.h b/webrtc/modules/audio_processing/ns/noise_suppression.h
index 9dac56bdee..8018118b60 100644
--- a/webrtc/modules/audio_processing/ns/include/noise_suppression.h
+++ b/webrtc/modules/audio_processing/ns/noise_suppression.h
@@ -8,8 +8,8 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#ifndef WEBRTC_MODULES_AUDIO_PROCESSING_NS_INCLUDE_NOISE_SUPPRESSION_H_
-#define WEBRTC_MODULES_AUDIO_PROCESSING_NS_INCLUDE_NOISE_SUPPRESSION_H_
+#ifndef WEBRTC_MODULES_AUDIO_PROCESSING_NS_NOISE_SUPPRESSION_H_
+#define WEBRTC_MODULES_AUDIO_PROCESSING_NS_NOISE_SUPPRESSION_H_
#include <stddef.h>
@@ -113,4 +113,4 @@ float WebRtcNs_prior_speech_probability(NsHandle* handle);
}
#endif
-#endif // WEBRTC_MODULES_AUDIO_PROCESSING_NS_INCLUDE_NOISE_SUPPRESSION_H_
+#endif // WEBRTC_MODULES_AUDIO_PROCESSING_NS_NOISE_SUPPRESSION_H_
diff --git a/webrtc/modules/audio_processing/ns/noise_suppression_x.c b/webrtc/modules/audio_processing/ns/noise_suppression_x.c
index 150fe608dd..0a5ba13300 100644
--- a/webrtc/modules/audio_processing/ns/noise_suppression_x.c
+++ b/webrtc/modules/audio_processing/ns/noise_suppression_x.c
@@ -8,7 +8,7 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#include "webrtc/modules/audio_processing/ns/include/noise_suppression_x.h"
+#include "webrtc/modules/audio_processing/ns/noise_suppression_x.h"
#include <stdlib.h>
diff --git a/webrtc/modules/audio_processing/ns/include/noise_suppression_x.h b/webrtc/modules/audio_processing/ns/noise_suppression_x.h
index 88fe4cd635..02b44cc091 100644
--- a/webrtc/modules/audio_processing/ns/include/noise_suppression_x.h
+++ b/webrtc/modules/audio_processing/ns/noise_suppression_x.h
@@ -8,8 +8,8 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#ifndef WEBRTC_MODULES_AUDIO_PROCESSING_NS_INCLUDE_NOISE_SUPPRESSION_X_H_
-#define WEBRTC_MODULES_AUDIO_PROCESSING_NS_INCLUDE_NOISE_SUPPRESSION_X_H_
+#ifndef WEBRTC_MODULES_AUDIO_PROCESSING_NS_NOISE_SUPPRESSION_X_H_
+#define WEBRTC_MODULES_AUDIO_PROCESSING_NS_NOISE_SUPPRESSION_X_H_
#include "webrtc/typedefs.h"
@@ -85,4 +85,4 @@ void WebRtcNsx_Process(NsxHandle* nsxInst,
}
#endif
-#endif // WEBRTC_MODULES_AUDIO_PROCESSING_NS_INCLUDE_NOISE_SUPPRESSION_X_H_
+#endif // WEBRTC_MODULES_AUDIO_PROCESSING_NS_NOISE_SUPPRESSION_X_H_
diff --git a/webrtc/modules/audio_processing/ns/ns_core.c b/webrtc/modules/audio_processing/ns/ns_core.c
index 1d6091400e..5ce64cee29 100644
--- a/webrtc/modules/audio_processing/ns/ns_core.c
+++ b/webrtc/modules/audio_processing/ns/ns_core.c
@@ -15,7 +15,7 @@
#include "webrtc/common_audio/fft4g.h"
#include "webrtc/common_audio/signal_processing/include/signal_processing_library.h"
-#include "webrtc/modules/audio_processing/ns/include/noise_suppression.h"
+#include "webrtc/modules/audio_processing/ns/noise_suppression.h"
#include "webrtc/modules/audio_processing/ns/ns_core.h"
#include "webrtc/modules/audio_processing/ns/windows_private.h"
diff --git a/webrtc/modules/audio_processing/ns/nsx_core.c b/webrtc/modules/audio_processing/ns/nsx_core.c
index 71445792f5..25f16d26ab 100644
--- a/webrtc/modules/audio_processing/ns/nsx_core.c
+++ b/webrtc/modules/audio_processing/ns/nsx_core.c
@@ -8,7 +8,7 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#include "webrtc/modules/audio_processing/ns/include/noise_suppression_x.h"
+#include "webrtc/modules/audio_processing/ns/noise_suppression_x.h"
#include <assert.h>
#include <math.h>
diff --git a/webrtc/modules/audio_processing/ns/nsx_core_c.c b/webrtc/modules/audio_processing/ns/nsx_core_c.c
index 14322d38cc..da7aa3d5db 100644
--- a/webrtc/modules/audio_processing/ns/nsx_core_c.c
+++ b/webrtc/modules/audio_processing/ns/nsx_core_c.c
@@ -10,7 +10,7 @@
#include <assert.h>
-#include "webrtc/modules/audio_processing/ns/include/noise_suppression_x.h"
+#include "webrtc/modules/audio_processing/ns/noise_suppression_x.h"
#include "webrtc/modules/audio_processing/ns/nsx_core.h"
#include "webrtc/modules/audio_processing/ns/nsx_defines.h"
diff --git a/webrtc/modules/audio_processing/ns/nsx_core_mips.c b/webrtc/modules/audio_processing/ns/nsx_core_mips.c
index d99be8720b..7688d82d78 100644
--- a/webrtc/modules/audio_processing/ns/nsx_core_mips.c
+++ b/webrtc/modules/audio_processing/ns/nsx_core_mips.c
@@ -11,7 +11,7 @@
#include <assert.h>
#include <string.h>
-#include "webrtc/modules/audio_processing/ns/include/noise_suppression_x.h"
+#include "webrtc/modules/audio_processing/ns/noise_suppression_x.h"
#include "webrtc/modules/audio_processing/ns/nsx_core.h"
static const int16_t kIndicatorTable[17] = {
diff --git a/webrtc/modules/audio_processing/processing_component.cc b/webrtc/modules/audio_processing/processing_component.cc
index 9e16d7c4ee..7abd8e2100 100644
--- a/webrtc/modules/audio_processing/processing_component.cc
+++ b/webrtc/modules/audio_processing/processing_component.cc
@@ -55,12 +55,12 @@ bool ProcessingComponent::is_component_enabled() const {
return enabled_;
}
-void* ProcessingComponent::handle(int index) const {
+void* ProcessingComponent::handle(size_t index) const {
assert(index < num_handles_);
return handles_[index];
}
-int ProcessingComponent::num_handles() const {
+size_t ProcessingComponent::num_handles() const {
return num_handles_;
}
@@ -70,12 +70,12 @@ int ProcessingComponent::Initialize() {
}
num_handles_ = num_handles_required();
- if (num_handles_ > static_cast<int>(handles_.size())) {
+ if (num_handles_ > handles_.size()) {
handles_.resize(num_handles_, NULL);
}
- assert(static_cast<int>(handles_.size()) >= num_handles_);
- for (int i = 0; i < num_handles_; i++) {
+ assert(handles_.size() >= num_handles_);
+ for (size_t i = 0; i < num_handles_; i++) {
if (handles_[i] == NULL) {
handles_[i] = CreateHandle();
if (handles_[i] == NULL) {
@@ -98,8 +98,8 @@ int ProcessingComponent::Configure() {
return AudioProcessing::kNoError;
}
- assert(static_cast<int>(handles_.size()) >= num_handles_);
- for (int i = 0; i < num_handles_; i++) {
+ assert(handles_.size() >= num_handles_);
+ for (size_t i = 0; i < num_handles_; i++) {
int err = ConfigureHandle(handles_[i]);
if (err != AudioProcessing::kNoError) {
return GetHandleError(handles_[i]);
diff --git a/webrtc/modules/audio_processing/processing_component.h b/webrtc/modules/audio_processing/processing_component.h
index 8ee3ac6c7d..577f1570ad 100644
--- a/webrtc/modules/audio_processing/processing_component.h
+++ b/webrtc/modules/audio_processing/processing_component.h
@@ -17,6 +17,22 @@
namespace webrtc {
+// Functor to use when supplying a verifier function for the queue item
+// verifcation.
+template <typename T>
+class RenderQueueItemVerifier {
+ public:
+ explicit RenderQueueItemVerifier(size_t minimum_capacity)
+ : minimum_capacity_(minimum_capacity) {}
+
+ bool operator()(const std::vector<T>& v) const {
+ return v.capacity() >= minimum_capacity_;
+ }
+
+ private:
+ size_t minimum_capacity_;
+};
+
class ProcessingComponent {
public:
ProcessingComponent();
@@ -31,21 +47,21 @@ class ProcessingComponent {
protected:
virtual int Configure();
int EnableComponent(bool enable);
- void* handle(int index) const;
- int num_handles() const;
+ void* handle(size_t index) const;
+ size_t num_handles() const;
private:
virtual void* CreateHandle() const = 0;
virtual int InitializeHandle(void* handle) const = 0;
virtual int ConfigureHandle(void* handle) const = 0;
virtual void DestroyHandle(void* handle) const = 0;
- virtual int num_handles_required() const = 0;
+ virtual size_t num_handles_required() const = 0;
virtual int GetHandleError(void* handle) const = 0;
std::vector<void*> handles_;
bool initialized_;
bool enabled_;
- int num_handles_;
+ size_t num_handles_;
};
} // namespace webrtc
diff --git a/webrtc/modules/audio_processing/splitting_filter.cc b/webrtc/modules/audio_processing/splitting_filter.cc
index 60427e2db6..46cc9352c2 100644
--- a/webrtc/modules/audio_processing/splitting_filter.cc
+++ b/webrtc/modules/audio_processing/splitting_filter.cc
@@ -16,7 +16,7 @@
namespace webrtc {
-SplittingFilter::SplittingFilter(int num_channels,
+SplittingFilter::SplittingFilter(size_t num_channels,
size_t num_bands,
size_t num_frames)
: num_bands_(num_bands) {
@@ -24,7 +24,7 @@ SplittingFilter::SplittingFilter(int num_channels,
if (num_bands_ == 2) {
two_bands_states_.resize(num_channels);
} else if (num_bands_ == 3) {
- for (int i = 0; i < num_channels; ++i) {
+ for (size_t i = 0; i < num_channels; ++i) {
three_band_filter_banks_.push_back(new ThreeBandFilterBank(num_frames));
}
}
@@ -58,8 +58,7 @@ void SplittingFilter::Synthesis(const IFChannelBuffer* bands,
void SplittingFilter::TwoBandsAnalysis(const IFChannelBuffer* data,
IFChannelBuffer* bands) {
- RTC_DCHECK_EQ(static_cast<int>(two_bands_states_.size()),
- data->num_channels());
+ RTC_DCHECK_EQ(two_bands_states_.size(), data->num_channels());
for (size_t i = 0; i < two_bands_states_.size(); ++i) {
WebRtcSpl_AnalysisQMF(data->ibuf_const()->channels()[i],
data->num_frames(),
@@ -72,8 +71,7 @@ void SplittingFilter::TwoBandsAnalysis(const IFChannelBuffer* data,
void SplittingFilter::TwoBandsSynthesis(const IFChannelBuffer* bands,
IFChannelBuffer* data) {
- RTC_DCHECK_EQ(static_cast<int>(two_bands_states_.size()),
- data->num_channels());
+ RTC_DCHECK_EQ(two_bands_states_.size(), data->num_channels());
for (size_t i = 0; i < two_bands_states_.size(); ++i) {
WebRtcSpl_SynthesisQMF(bands->ibuf_const()->channels(0)[i],
bands->ibuf_const()->channels(1)[i],
@@ -86,8 +84,7 @@ void SplittingFilter::TwoBandsSynthesis(const IFChannelBuffer* bands,
void SplittingFilter::ThreeBandsAnalysis(const IFChannelBuffer* data,
IFChannelBuffer* bands) {
- RTC_DCHECK_EQ(static_cast<int>(three_band_filter_banks_.size()),
- data->num_channels());
+ RTC_DCHECK_EQ(three_band_filter_banks_.size(), data->num_channels());
for (size_t i = 0; i < three_band_filter_banks_.size(); ++i) {
three_band_filter_banks_[i]->Analysis(data->fbuf_const()->channels()[i],
data->num_frames(),
@@ -97,8 +94,7 @@ void SplittingFilter::ThreeBandsAnalysis(const IFChannelBuffer* data,
void SplittingFilter::ThreeBandsSynthesis(const IFChannelBuffer* bands,
IFChannelBuffer* data) {
- RTC_DCHECK_EQ(static_cast<int>(three_band_filter_banks_.size()),
- data->num_channels());
+ RTC_DCHECK_EQ(three_band_filter_banks_.size(), data->num_channels());
for (size_t i = 0; i < three_band_filter_banks_.size(); ++i) {
three_band_filter_banks_[i]->Synthesis(bands->fbuf_const()->bands(i),
bands->num_frames_per_band(),
diff --git a/webrtc/modules/audio_processing/splitting_filter.h b/webrtc/modules/audio_processing/splitting_filter.h
index 4698d3fe2b..6b81c2fb05 100644
--- a/webrtc/modules/audio_processing/splitting_filter.h
+++ b/webrtc/modules/audio_processing/splitting_filter.h
@@ -45,7 +45,7 @@ struct TwoBandsStates {
// used.
class SplittingFilter {
public:
- SplittingFilter(int num_channels, size_t num_bands, size_t num_frames);
+ SplittingFilter(size_t num_channels, size_t num_bands, size_t num_frames);
void Analysis(const IFChannelBuffer* data, IFChannelBuffer* bands);
void Synthesis(const IFChannelBuffer* bands, IFChannelBuffer* data);
diff --git a/webrtc/modules/audio_processing/test/audio_file_processor.cc b/webrtc/modules/audio_processing/test/audio_file_processor.cc
new file mode 100644
index 0000000000..56e9b4b96f
--- /dev/null
+++ b/webrtc/modules/audio_processing/test/audio_file_processor.cc
@@ -0,0 +1,180 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "webrtc/modules/audio_processing/test/audio_file_processor.h"
+
+#include <algorithm>
+#include <utility>
+
+#include "webrtc/base/checks.h"
+#include "webrtc/modules/audio_processing/test/protobuf_utils.h"
+
+using rtc::scoped_ptr;
+using rtc::CheckedDivExact;
+using std::vector;
+using webrtc::audioproc::Event;
+using webrtc::audioproc::Init;
+using webrtc::audioproc::ReverseStream;
+using webrtc::audioproc::Stream;
+
+namespace webrtc {
+namespace {
+
+// Returns a StreamConfig corresponding to file.
+StreamConfig GetStreamConfig(const WavFile& file) {
+ return StreamConfig(file.sample_rate(), file.num_channels());
+}
+
+// Returns a ChannelBuffer corresponding to file.
+ChannelBuffer<float> GetChannelBuffer(const WavFile& file) {
+ return ChannelBuffer<float>(
+ CheckedDivExact(file.sample_rate(), AudioFileProcessor::kChunksPerSecond),
+ file.num_channels());
+}
+
+} // namespace
+
+WavFileProcessor::WavFileProcessor(scoped_ptr<AudioProcessing> ap,
+ scoped_ptr<WavReader> in_file,
+ scoped_ptr<WavWriter> out_file)
+ : ap_(std::move(ap)),
+ in_buf_(GetChannelBuffer(*in_file)),
+ out_buf_(GetChannelBuffer(*out_file)),
+ input_config_(GetStreamConfig(*in_file)),
+ output_config_(GetStreamConfig(*out_file)),
+ buffer_reader_(std::move(in_file)),
+ buffer_writer_(std::move(out_file)) {}
+
+bool WavFileProcessor::ProcessChunk() {
+ if (!buffer_reader_.Read(&in_buf_)) {
+ return false;
+ }
+ {
+ const auto st = ScopedTimer(mutable_proc_time());
+ RTC_CHECK_EQ(kNoErr,
+ ap_->ProcessStream(in_buf_.channels(), input_config_,
+ output_config_, out_buf_.channels()));
+ }
+ buffer_writer_.Write(out_buf_);
+ return true;
+}
+
+AecDumpFileProcessor::AecDumpFileProcessor(scoped_ptr<AudioProcessing> ap,
+ FILE* dump_file,
+ scoped_ptr<WavWriter> out_file)
+ : ap_(std::move(ap)),
+ dump_file_(dump_file),
+ out_buf_(GetChannelBuffer(*out_file)),
+ output_config_(GetStreamConfig(*out_file)),
+ buffer_writer_(std::move(out_file)) {
+ RTC_CHECK(dump_file_) << "Could not open dump file for reading.";
+}
+
+AecDumpFileProcessor::~AecDumpFileProcessor() {
+ fclose(dump_file_);
+}
+
+bool AecDumpFileProcessor::ProcessChunk() {
+ Event event_msg;
+
+ // Continue until we process our first Stream message.
+ do {
+ if (!ReadMessageFromFile(dump_file_, &event_msg)) {
+ return false;
+ }
+
+ if (event_msg.type() == Event::INIT) {
+ RTC_CHECK(event_msg.has_init());
+ HandleMessage(event_msg.init());
+
+ } else if (event_msg.type() == Event::STREAM) {
+ RTC_CHECK(event_msg.has_stream());
+ HandleMessage(event_msg.stream());
+
+ } else if (event_msg.type() == Event::REVERSE_STREAM) {
+ RTC_CHECK(event_msg.has_reverse_stream());
+ HandleMessage(event_msg.reverse_stream());
+ }
+ } while (event_msg.type() != Event::STREAM);
+
+ return true;
+}
+
+void AecDumpFileProcessor::HandleMessage(const Init& msg) {
+ RTC_CHECK(msg.has_sample_rate());
+ RTC_CHECK(msg.has_num_input_channels());
+ RTC_CHECK(msg.has_num_reverse_channels());
+
+ in_buf_.reset(new ChannelBuffer<float>(
+ CheckedDivExact(msg.sample_rate(), kChunksPerSecond),
+ msg.num_input_channels()));
+ const int reverse_sample_rate = msg.has_reverse_sample_rate()
+ ? msg.reverse_sample_rate()
+ : msg.sample_rate();
+ reverse_buf_.reset(new ChannelBuffer<float>(
+ CheckedDivExact(reverse_sample_rate, kChunksPerSecond),
+ msg.num_reverse_channels()));
+ input_config_ = StreamConfig(msg.sample_rate(), msg.num_input_channels());
+ reverse_config_ =
+ StreamConfig(reverse_sample_rate, msg.num_reverse_channels());
+
+ const ProcessingConfig config = {
+ {input_config_, output_config_, reverse_config_, reverse_config_}};
+ RTC_CHECK_EQ(kNoErr, ap_->Initialize(config));
+}
+
+void AecDumpFileProcessor::HandleMessage(const Stream& msg) {
+ RTC_CHECK(!msg.has_input_data());
+ RTC_CHECK_EQ(in_buf_->num_channels(),
+ static_cast<size_t>(msg.input_channel_size()));
+
+ for (int i = 0; i < msg.input_channel_size(); ++i) {
+ RTC_CHECK_EQ(in_buf_->num_frames() * sizeof(*in_buf_->channels()[i]),
+ msg.input_channel(i).size());
+ std::memcpy(in_buf_->channels()[i], msg.input_channel(i).data(),
+ msg.input_channel(i).size());
+ }
+ {
+ const auto st = ScopedTimer(mutable_proc_time());
+ RTC_CHECK_EQ(kNoErr, ap_->set_stream_delay_ms(msg.delay()));
+ ap_->echo_cancellation()->set_stream_drift_samples(msg.drift());
+ if (msg.has_keypress()) {
+ ap_->set_stream_key_pressed(msg.keypress());
+ }
+ RTC_CHECK_EQ(kNoErr,
+ ap_->ProcessStream(in_buf_->channels(), input_config_,
+ output_config_, out_buf_.channels()));
+ }
+
+ buffer_writer_.Write(out_buf_);
+}
+
+void AecDumpFileProcessor::HandleMessage(const ReverseStream& msg) {
+ RTC_CHECK(!msg.has_data());
+ RTC_CHECK_EQ(reverse_buf_->num_channels(),
+ static_cast<size_t>(msg.channel_size()));
+
+ for (int i = 0; i < msg.channel_size(); ++i) {
+ RTC_CHECK_EQ(reverse_buf_->num_frames() * sizeof(*in_buf_->channels()[i]),
+ msg.channel(i).size());
+ std::memcpy(reverse_buf_->channels()[i], msg.channel(i).data(),
+ msg.channel(i).size());
+ }
+ {
+ const auto st = ScopedTimer(mutable_proc_time());
+ // TODO(ajm): This currently discards the processed output, which is needed
+ // for e.g. intelligibility enhancement.
+ RTC_CHECK_EQ(kNoErr, ap_->ProcessReverseStream(
+ reverse_buf_->channels(), reverse_config_,
+ reverse_config_, reverse_buf_->channels()));
+ }
+}
+
+} // namespace webrtc
diff --git a/webrtc/modules/audio_processing/test/audio_file_processor.h b/webrtc/modules/audio_processing/test/audio_file_processor.h
new file mode 100644
index 0000000000..a3153b2244
--- /dev/null
+++ b/webrtc/modules/audio_processing/test/audio_file_processor.h
@@ -0,0 +1,139 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef WEBRTC_MODULES_AUDIO_PROCESSING_TEST_AUDIO_FILE_PROCESSOR_H_
+#define WEBRTC_MODULES_AUDIO_PROCESSING_TEST_AUDIO_FILE_PROCESSOR_H_
+
+#include <algorithm>
+#include <limits>
+#include <vector>
+
+#include "webrtc/base/scoped_ptr.h"
+#include "webrtc/common_audio/channel_buffer.h"
+#include "webrtc/common_audio/wav_file.h"
+#include "webrtc/modules/audio_processing/include/audio_processing.h"
+#include "webrtc/modules/audio_processing/test/test_utils.h"
+#include "webrtc/system_wrappers/include/tick_util.h"
+
+#ifdef WEBRTC_ANDROID_PLATFORM_BUILD
+#include "external/webrtc/webrtc/modules/audio_processing/debug.pb.h"
+#else
+#include "webrtc/audio_processing/debug.pb.h"
+#endif
+
+namespace webrtc {
+
+// Holds a few statistics about a series of TickIntervals.
+struct TickIntervalStats {
+ TickIntervalStats() : min(std::numeric_limits<int64_t>::max()) {}
+ TickInterval sum;
+ TickInterval max;
+ TickInterval min;
+};
+
+// Interface for processing an input file with an AudioProcessing instance and
+// dumping the results to an output file.
+class AudioFileProcessor {
+ public:
+ static const int kChunksPerSecond = 1000 / AudioProcessing::kChunkSizeMs;
+
+ virtual ~AudioFileProcessor() {}
+
+ // Processes one AudioProcessing::kChunkSizeMs of data from the input file and
+ // writes to the output file.
+ virtual bool ProcessChunk() = 0;
+
+ // Returns the execution time of all AudioProcessing calls.
+ const TickIntervalStats& proc_time() const { return proc_time_; }
+
+ protected:
+ // RAII class for execution time measurement. Updates the provided
+ // TickIntervalStats based on the time between ScopedTimer creation and
+ // leaving the enclosing scope.
+ class ScopedTimer {
+ public:
+ explicit ScopedTimer(TickIntervalStats* proc_time)
+ : proc_time_(proc_time), start_time_(TickTime::Now()) {}
+
+ ~ScopedTimer() {
+ TickInterval interval = TickTime::Now() - start_time_;
+ proc_time_->sum += interval;
+ proc_time_->max = std::max(proc_time_->max, interval);
+ proc_time_->min = std::min(proc_time_->min, interval);
+ }
+
+ private:
+ TickIntervalStats* const proc_time_;
+ TickTime start_time_;
+ };
+
+ TickIntervalStats* mutable_proc_time() { return &proc_time_; }
+
+ private:
+ TickIntervalStats proc_time_;
+};
+
+// Used to read from and write to WavFile objects.
+class WavFileProcessor final : public AudioFileProcessor {
+ public:
+ // Takes ownership of all parameters.
+ WavFileProcessor(rtc::scoped_ptr<AudioProcessing> ap,
+ rtc::scoped_ptr<WavReader> in_file,
+ rtc::scoped_ptr<WavWriter> out_file);
+ virtual ~WavFileProcessor() {}
+
+ // Processes one chunk from the WAV input and writes to the WAV output.
+ bool ProcessChunk() override;
+
+ private:
+ rtc::scoped_ptr<AudioProcessing> ap_;
+
+ ChannelBuffer<float> in_buf_;
+ ChannelBuffer<float> out_buf_;
+ const StreamConfig input_config_;
+ const StreamConfig output_config_;
+ ChannelBufferWavReader buffer_reader_;
+ ChannelBufferWavWriter buffer_writer_;
+};
+
+// Used to read from an aecdump file and write to a WavWriter.
+class AecDumpFileProcessor final : public AudioFileProcessor {
+ public:
+ // Takes ownership of all parameters.
+ AecDumpFileProcessor(rtc::scoped_ptr<AudioProcessing> ap,
+ FILE* dump_file,
+ rtc::scoped_ptr<WavWriter> out_file);
+
+ virtual ~AecDumpFileProcessor();
+
+ // Processes messages from the aecdump file until the first Stream message is
+ // completed. Passes other data from the aecdump messages as appropriate.
+ bool ProcessChunk() override;
+
+ private:
+ void HandleMessage(const webrtc::audioproc::Init& msg);
+ void HandleMessage(const webrtc::audioproc::Stream& msg);
+ void HandleMessage(const webrtc::audioproc::ReverseStream& msg);
+
+ rtc::scoped_ptr<AudioProcessing> ap_;
+ FILE* dump_file_;
+
+ rtc::scoped_ptr<ChannelBuffer<float>> in_buf_;
+ rtc::scoped_ptr<ChannelBuffer<float>> reverse_buf_;
+ ChannelBuffer<float> out_buf_;
+ StreamConfig input_config_;
+ StreamConfig reverse_config_;
+ const StreamConfig output_config_;
+ ChannelBufferWavWriter buffer_writer_;
+};
+
+} // namespace webrtc
+
+#endif // WEBRTC_MODULES_AUDIO_PROCESSING_TEST_AUDIO_FILE_PROCESSOR_H_
diff --git a/webrtc/modules/audio_processing/test/audio_processing_unittest.cc b/webrtc/modules/audio_processing/test/audio_processing_unittest.cc
index c013a369fe..324cb7bec6 100644
--- a/webrtc/modules/audio_processing/test/audio_processing_unittest.cc
+++ b/webrtc/modules/audio_processing/test/audio_processing_unittest.cc
@@ -14,6 +14,7 @@
#include <limits>
#include <queue>
+#include "webrtc/base/arraysize.h"
#include "webrtc/base/scoped_ptr.h"
#include "webrtc/common_audio/include/audio_util.h"
#include "webrtc/common_audio/resampler/include/push_resampler.h"
@@ -23,11 +24,10 @@
#include "webrtc/modules/audio_processing/include/audio_processing.h"
#include "webrtc/modules/audio_processing/test/protobuf_utils.h"
#include "webrtc/modules/audio_processing/test/test_utils.h"
-#include "webrtc/modules/interface/module_common_types.h"
+#include "webrtc/modules/include/module_common_types.h"
#include "webrtc/system_wrappers/include/event_wrapper.h"
#include "webrtc/system_wrappers/include/trace.h"
#include "webrtc/test/testsupport/fileutils.h"
-#include "webrtc/test/testsupport/gtest_disable.h"
#ifdef WEBRTC_ANDROID_PLATFORM_BUILD
#include "gtest/gtest.h"
#include "external/webrtc/webrtc/modules/audio_processing/test/unittest.pb.h"
@@ -53,11 +53,8 @@ namespace {
// file. This is the typical case. When the file should be updated, it can
// be set to true with the command-line switch --write_ref_data.
bool write_ref_data = false;
-const int kChannels[] = {1, 2};
-const size_t kChannelsSize = sizeof(kChannels) / sizeof(*kChannels);
-
+const google::protobuf::int32 kChannels[] = {1, 2};
const int kSampleRates[] = {8000, 16000, 32000, 48000};
-const size_t kSampleRatesSize = sizeof(kSampleRates) / sizeof(*kSampleRates);
#if defined(WEBRTC_AUDIOPROC_FIXED_PROFILE)
// AECM doesn't support super-wb.
@@ -65,8 +62,6 @@ const int kProcessSampleRates[] = {8000, 16000};
#elif defined(WEBRTC_AUDIOPROC_FLOAT_PROFILE)
const int kProcessSampleRates[] = {8000, 16000, 32000, 48000};
#endif
-const size_t kProcessSampleRatesSize = sizeof(kProcessSampleRates) /
- sizeof(*kProcessSampleRates);
enum StreamDirection { kForward = 0, kReverse };
@@ -77,7 +72,7 @@ void ConvertToFloat(const int16_t* int_data, ChannelBuffer<float>* cb) {
cb->num_frames(),
cb->num_channels(),
cb_int.channels());
- for (int i = 0; i < cb->num_channels(); ++i) {
+ for (size_t i = 0; i < cb->num_channels(); ++i) {
S16ToFloat(cb_int.channels()[i],
cb->num_frames(),
cb->channels()[i]);
@@ -89,7 +84,7 @@ void ConvertToFloat(const AudioFrame& frame, ChannelBuffer<float>* cb) {
}
// Number of channels including the keyboard channel.
-int TotalChannelsFromLayout(AudioProcessing::ChannelLayout layout) {
+size_t TotalChannelsFromLayout(AudioProcessing::ChannelLayout layout) {
switch (layout) {
case AudioProcessing::kMono:
return 1;
@@ -100,7 +95,7 @@ int TotalChannelsFromLayout(AudioProcessing::ChannelLayout layout) {
return 3;
}
assert(false);
- return -1;
+ return 0;
}
int TruncateToMultipleOf10(int value) {
@@ -108,25 +103,25 @@ int TruncateToMultipleOf10(int value) {
}
void MixStereoToMono(const float* stereo, float* mono,
- int samples_per_channel) {
- for (int i = 0; i < samples_per_channel; ++i)
+ size_t samples_per_channel) {
+ for (size_t i = 0; i < samples_per_channel; ++i)
mono[i] = (stereo[i * 2] + stereo[i * 2 + 1]) / 2;
}
void MixStereoToMono(const int16_t* stereo, int16_t* mono,
- int samples_per_channel) {
- for (int i = 0; i < samples_per_channel; ++i)
+ size_t samples_per_channel) {
+ for (size_t i = 0; i < samples_per_channel; ++i)
mono[i] = (stereo[i * 2] + stereo[i * 2 + 1]) >> 1;
}
-void CopyLeftToRightChannel(int16_t* stereo, int samples_per_channel) {
- for (int i = 0; i < samples_per_channel; i++) {
+void CopyLeftToRightChannel(int16_t* stereo, size_t samples_per_channel) {
+ for (size_t i = 0; i < samples_per_channel; i++) {
stereo[i * 2 + 1] = stereo[i * 2];
}
}
-void VerifyChannelsAreEqual(int16_t* stereo, int samples_per_channel) {
- for (int i = 0; i < samples_per_channel; i++) {
+void VerifyChannelsAreEqual(int16_t* stereo, size_t samples_per_channel) {
+ for (size_t i = 0; i < samples_per_channel; i++) {
EXPECT_EQ(stereo[i * 2 + 1], stereo[i * 2]);
}
}
@@ -139,7 +134,7 @@ void SetFrameTo(AudioFrame* frame, int16_t value) {
}
void SetFrameTo(AudioFrame* frame, int16_t left, int16_t right) {
- ASSERT_EQ(2, frame->num_channels_);
+ ASSERT_EQ(2u, frame->num_channels_);
for (size_t i = 0; i < frame->samples_per_channel_ * 2; i += 2) {
frame->data_[i] = left;
frame->data_[i + 1] = right;
@@ -199,9 +194,9 @@ T AbsValue(T a) {
}
int16_t MaxAudioFrame(const AudioFrame& frame) {
- const int length = frame.samples_per_channel_ * frame.num_channels_;
+ const size_t length = frame.samples_per_channel_ * frame.num_channels_;
int16_t max_data = AbsValue(frame.data_[0]);
- for (int i = 1; i < length; i++) {
+ for (size_t i = 1; i < length; i++) {
max_data = std::max(max_data, AbsValue(frame.data_[i]));
}
@@ -264,10 +259,10 @@ std::string OutputFilePath(std::string name,
int output_rate,
int reverse_input_rate,
int reverse_output_rate,
- int num_input_channels,
- int num_output_channels,
- int num_reverse_input_channels,
- int num_reverse_output_channels,
+ size_t num_input_channels,
+ size_t num_output_channels,
+ size_t num_reverse_input_channels,
+ size_t num_reverse_output_channels,
StreamDirection file_direction) {
std::ostringstream ss;
ss << name << "_i" << num_input_channels << "_" << input_rate / 1000 << "_ir"
@@ -362,9 +357,9 @@ class ApmTest : public ::testing::Test {
void Init(int sample_rate_hz,
int output_sample_rate_hz,
int reverse_sample_rate_hz,
- int num_input_channels,
- int num_output_channels,
- int num_reverse_channels,
+ size_t num_input_channels,
+ size_t num_output_channels,
+ size_t num_reverse_channels,
bool open_output_file);
void Init(AudioProcessing* ap);
void EnableAllComponents();
@@ -377,12 +372,12 @@ class ApmTest : public ::testing::Test {
void ProcessDelayVerificationTest(int delay_ms, int system_delay_ms,
int delay_min, int delay_max);
void TestChangingChannelsInt16Interface(
- int num_channels,
+ size_t num_channels,
AudioProcessing::Error expected_return);
- void TestChangingForwardChannels(int num_in_channels,
- int num_out_channels,
+ void TestChangingForwardChannels(size_t num_in_channels,
+ size_t num_out_channels,
AudioProcessing::Error expected_return);
- void TestChangingReverseChannels(int num_rev_channels,
+ void TestChangingReverseChannels(size_t num_rev_channels,
AudioProcessing::Error expected_return);
void RunQuantizedVolumeDoesNotGetStuckTest(int sample_rate);
void RunManualVolumeChangeIsPossibleTest(int sample_rate);
@@ -403,7 +398,7 @@ class ApmTest : public ::testing::Test {
rtc::scoped_ptr<ChannelBuffer<float> > float_cb_;
rtc::scoped_ptr<ChannelBuffer<float> > revfloat_cb_;
int output_sample_rate_hz_;
- int num_output_channels_;
+ size_t num_output_channels_;
FILE* far_file_;
FILE* near_file_;
FILE* out_file_;
@@ -487,9 +482,9 @@ void ApmTest::Init(AudioProcessing* ap) {
void ApmTest::Init(int sample_rate_hz,
int output_sample_rate_hz,
int reverse_sample_rate_hz,
- int num_input_channels,
- int num_output_channels,
- int num_reverse_channels,
+ size_t num_input_channels,
+ size_t num_output_channels,
+ size_t num_reverse_channels,
bool open_output_file) {
SetContainerFormat(sample_rate_hz, num_input_channels, frame_, &float_cb_);
output_sample_rate_hz_ = output_sample_rate_hz;
@@ -821,7 +816,7 @@ TEST_F(ApmTest, DelayOffsetWithLimitsIsSetProperly) {
}
void ApmTest::TestChangingChannelsInt16Interface(
- int num_channels,
+ size_t num_channels,
AudioProcessing::Error expected_return) {
frame_->num_channels_ = num_channels;
EXPECT_EQ(expected_return, apm_->ProcessStream(frame_));
@@ -829,8 +824,8 @@ void ApmTest::TestChangingChannelsInt16Interface(
}
void ApmTest::TestChangingForwardChannels(
- int num_in_channels,
- int num_out_channels,
+ size_t num_in_channels,
+ size_t num_out_channels,
AudioProcessing::Error expected_return) {
const StreamConfig input_stream = {frame_->sample_rate_hz_, num_in_channels};
const StreamConfig output_stream = {output_sample_rate_hz_, num_out_channels};
@@ -841,7 +836,7 @@ void ApmTest::TestChangingForwardChannels(
}
void ApmTest::TestChangingReverseChannels(
- int num_rev_channels,
+ size_t num_rev_channels,
AudioProcessing::Error expected_return) {
const ProcessingConfig processing_config = {
{{frame_->sample_rate_hz_, apm_->num_input_channels()},
@@ -862,11 +857,11 @@ TEST_F(ApmTest, ChannelsInt16Interface) {
TestChangingChannelsInt16Interface(0, apm_->kBadNumberChannelsError);
- for (int i = 1; i < 4; i++) {
+ for (size_t i = 1; i < 4; i++) {
TestChangingChannelsInt16Interface(i, kNoErr);
EXPECT_EQ(i, apm_->num_input_channels());
// We always force the number of reverse channels used for processing to 1.
- EXPECT_EQ(1, apm_->num_reverse_channels());
+ EXPECT_EQ(1u, apm_->num_reverse_channels());
}
}
@@ -877,8 +872,8 @@ TEST_F(ApmTest, Channels) {
TestChangingForwardChannels(0, 1, apm_->kBadNumberChannelsError);
TestChangingReverseChannels(0, apm_->kBadNumberChannelsError);
- for (int i = 1; i < 4; ++i) {
- for (int j = 0; j < 1; ++j) {
+ for (size_t i = 1; i < 4; ++i) {
+ for (size_t j = 0; j < 1; ++j) {
// Output channels much be one or match input channels.
if (j == 1 || i == j) {
TestChangingForwardChannels(i, j, kNoErr);
@@ -887,7 +882,7 @@ TEST_F(ApmTest, Channels) {
EXPECT_EQ(i, apm_->num_input_channels());
EXPECT_EQ(j, apm_->num_output_channels());
// The number of reverse channels used for processing to is always 1.
- EXPECT_EQ(1, apm_->num_reverse_channels());
+ EXPECT_EQ(1u, apm_->num_reverse_channels());
} else {
TestChangingForwardChannels(i, j,
AudioProcessing::kBadNumberChannelsError);
@@ -902,7 +897,7 @@ TEST_F(ApmTest, SampleRatesInt) {
EXPECT_EQ(apm_->kBadSampleRateError, ProcessStreamChooser(kIntFormat));
// Testing valid sample rates
int fs[] = {8000, 16000, 32000, 48000};
- for (size_t i = 0; i < sizeof(fs) / sizeof(*fs); i++) {
+ for (size_t i = 0; i < arraysize(fs); i++) {
SetContainerFormat(fs[i], 2, frame_, &float_cb_);
EXPECT_NOERR(ProcessStreamChooser(kIntFormat));
}
@@ -921,7 +916,7 @@ TEST_F(ApmTest, EchoCancellation) {
EchoCancellation::kModerateSuppression,
EchoCancellation::kHighSuppression,
};
- for (size_t i = 0; i < sizeof(level)/sizeof(*level); i++) {
+ for (size_t i = 0; i < arraysize(level); i++) {
EXPECT_EQ(apm_->kNoError,
apm_->echo_cancellation()->set_suppression_level(level[i]));
EXPECT_EQ(level[i],
@@ -998,7 +993,7 @@ TEST_F(ApmTest, DISABLED_EchoCancellationReportsCorrectDelays) {
// Test a couple of corner cases and verify that the estimated delay is
// within a valid region (set to +-1.5 blocks). Note that these cases are
// sampling frequency dependent.
- for (size_t i = 0; i < kProcessSampleRatesSize; i++) {
+ for (size_t i = 0; i < arraysize(kProcessSampleRates); i++) {
Init(kProcessSampleRates[i],
kProcessSampleRates[i],
kProcessSampleRates[i],
@@ -1070,7 +1065,7 @@ TEST_F(ApmTest, EchoControlMobile) {
EchoControlMobile::kSpeakerphone,
EchoControlMobile::kLoudSpeakerphone,
};
- for (size_t i = 0; i < sizeof(mode)/sizeof(*mode); i++) {
+ for (size_t i = 0; i < arraysize(mode); i++) {
EXPECT_EQ(apm_->kNoError,
apm_->echo_control_mobile()->set_routing_mode(mode[i]));
EXPECT_EQ(mode[i],
@@ -1135,7 +1130,7 @@ TEST_F(ApmTest, GainControl) {
GainControl::kAdaptiveDigital,
GainControl::kFixedDigital
};
- for (size_t i = 0; i < sizeof(mode)/sizeof(*mode); i++) {
+ for (size_t i = 0; i < arraysize(mode); i++) {
EXPECT_EQ(apm_->kNoError,
apm_->gain_control()->set_mode(mode[i]));
EXPECT_EQ(mode[i], apm_->gain_control()->mode());
@@ -1151,7 +1146,7 @@ TEST_F(ApmTest, GainControl) {
apm_->gain_control()->target_level_dbfs()));
int level_dbfs[] = {0, 6, 31};
- for (size_t i = 0; i < sizeof(level_dbfs)/sizeof(*level_dbfs); i++) {
+ for (size_t i = 0; i < arraysize(level_dbfs); i++) {
EXPECT_EQ(apm_->kNoError,
apm_->gain_control()->set_target_level_dbfs(level_dbfs[i]));
EXPECT_EQ(level_dbfs[i], apm_->gain_control()->target_level_dbfs());
@@ -1169,7 +1164,7 @@ TEST_F(ApmTest, GainControl) {
apm_->gain_control()->compression_gain_db()));
int gain_db[] = {0, 10, 90};
- for (size_t i = 0; i < sizeof(gain_db)/sizeof(*gain_db); i++) {
+ for (size_t i = 0; i < arraysize(gain_db); i++) {
EXPECT_EQ(apm_->kNoError,
apm_->gain_control()->set_compression_gain_db(gain_db[i]));
EXPECT_EQ(gain_db[i], apm_->gain_control()->compression_gain_db());
@@ -1200,14 +1195,14 @@ TEST_F(ApmTest, GainControl) {
apm_->gain_control()->analog_level_maximum()));
int min_level[] = {0, 255, 1024};
- for (size_t i = 0; i < sizeof(min_level)/sizeof(*min_level); i++) {
+ for (size_t i = 0; i < arraysize(min_level); i++) {
EXPECT_EQ(apm_->kNoError,
apm_->gain_control()->set_analog_level_limits(min_level[i], 1024));
EXPECT_EQ(min_level[i], apm_->gain_control()->analog_level_minimum());
}
int max_level[] = {0, 1024, 65535};
- for (size_t i = 0; i < sizeof(min_level)/sizeof(*min_level); i++) {
+ for (size_t i = 0; i < arraysize(min_level); i++) {
EXPECT_EQ(apm_->kNoError,
apm_->gain_control()->set_analog_level_limits(0, max_level[i]));
EXPECT_EQ(max_level[i], apm_->gain_control()->analog_level_maximum());
@@ -1246,7 +1241,7 @@ void ApmTest::RunQuantizedVolumeDoesNotGetStuckTest(int sample_rate) {
// Verifies that despite volume slider quantization, the AGC can continue to
// increase its volume.
TEST_F(ApmTest, QuantizedVolumeDoesNotGetStuck) {
- for (size_t i = 0; i < kSampleRatesSize; ++i) {
+ for (size_t i = 0; i < arraysize(kSampleRates); ++i) {
RunQuantizedVolumeDoesNotGetStuckTest(kSampleRates[i]);
}
}
@@ -1291,7 +1286,7 @@ void ApmTest::RunManualVolumeChangeIsPossibleTest(int sample_rate) {
}
TEST_F(ApmTest, ManualVolumeChangeIsPossible) {
- for (size_t i = 0; i < kSampleRatesSize; ++i) {
+ for (size_t i = 0; i < arraysize(kSampleRates); ++i) {
RunManualVolumeChangeIsPossibleTest(kSampleRates[i]);
}
}
@@ -1299,11 +1294,11 @@ TEST_F(ApmTest, ManualVolumeChangeIsPossible) {
#if !defined(WEBRTC_ANDROID) && !defined(WEBRTC_IOS)
TEST_F(ApmTest, AgcOnlyAdaptsWhenTargetSignalIsPresent) {
const int kSampleRateHz = 16000;
- const int kSamplesPerChannel =
- AudioProcessing::kChunkSizeMs * kSampleRateHz / 1000;
- const int kNumInputChannels = 2;
- const int kNumOutputChannels = 1;
- const int kNumChunks = 700;
+ const size_t kSamplesPerChannel =
+ static_cast<size_t>(AudioProcessing::kChunkSizeMs * kSampleRateHz / 1000);
+ const size_t kNumInputChannels = 2;
+ const size_t kNumOutputChannels = 1;
+ const size_t kNumChunks = 700;
const float kScaleFactor = 0.25f;
Config config;
std::vector<webrtc::Point> geometry;
@@ -1317,8 +1312,8 @@ TEST_F(ApmTest, AgcOnlyAdaptsWhenTargetSignalIsPresent) {
EXPECT_EQ(kNoErr, apm->gain_control()->Enable(true));
ChannelBuffer<float> src_buf(kSamplesPerChannel, kNumInputChannels);
ChannelBuffer<float> dest_buf(kSamplesPerChannel, kNumOutputChannels);
- const int max_length = kSamplesPerChannel * std::max(kNumInputChannels,
- kNumOutputChannels);
+ const size_t max_length = kSamplesPerChannel * std::max(kNumInputChannels,
+ kNumOutputChannels);
rtc::scoped_ptr<int16_t[]> int_data(new int16_t[max_length]);
rtc::scoped_ptr<float[]> float_data(new float[max_length]);
std::string filename = ResourceFilePath("far", kSampleRateHz);
@@ -1330,13 +1325,13 @@ TEST_F(ApmTest, AgcOnlyAdaptsWhenTargetSignalIsPresent) {
bool is_target = false;
EXPECT_CALL(*beamformer, is_target_present())
.WillRepeatedly(testing::ReturnPointee(&is_target));
- for (int i = 0; i < kNumChunks; ++i) {
+ for (size_t i = 0; i < kNumChunks; ++i) {
ASSERT_TRUE(ReadChunk(far_file,
int_data.get(),
float_data.get(),
&src_buf));
- for (int j = 0; j < kNumInputChannels; ++j) {
- for (int k = 0; k < kSamplesPerChannel; ++k) {
+ for (size_t j = 0; j < kNumInputChannels; ++j) {
+ for (size_t k = 0; k < kSamplesPerChannel; ++k) {
src_buf.channels()[j][k] *= kScaleFactor;
}
}
@@ -1355,13 +1350,13 @@ TEST_F(ApmTest, AgcOnlyAdaptsWhenTargetSignalIsPresent) {
apm->gain_control()->compression_gain_db());
rewind(far_file);
is_target = true;
- for (int i = 0; i < kNumChunks; ++i) {
+ for (size_t i = 0; i < kNumChunks; ++i) {
ASSERT_TRUE(ReadChunk(far_file,
int_data.get(),
float_data.get(),
&src_buf));
- for (int j = 0; j < kNumInputChannels; ++j) {
- for (int k = 0; k < kSamplesPerChannel; ++k) {
+ for (size_t j = 0; j < kNumInputChannels; ++j) {
+ for (size_t k = 0; k < kSamplesPerChannel; ++k) {
src_buf.channels()[j][k] *= kScaleFactor;
}
}
@@ -1390,7 +1385,7 @@ TEST_F(ApmTest, NoiseSuppression) {
NoiseSuppression::kHigh,
NoiseSuppression::kVeryHigh
};
- for (size_t i = 0; i < sizeof(level)/sizeof(*level); i++) {
+ for (size_t i = 0; i < arraysize(level); i++) {
EXPECT_EQ(apm_->kNoError,
apm_->noise_suppression()->set_level(level[i]));
EXPECT_EQ(level[i], apm_->noise_suppression()->level());
@@ -1492,7 +1487,7 @@ TEST_F(ApmTest, VoiceDetection) {
VoiceDetection::kModerateLikelihood,
VoiceDetection::kHighLikelihood
};
- for (size_t i = 0; i < sizeof(likelihood)/sizeof(*likelihood); i++) {
+ for (size_t i = 0; i < arraysize(likelihood); i++) {
EXPECT_EQ(apm_->kNoError,
apm_->voice_detection()->set_likelihood(likelihood[i]));
EXPECT_EQ(likelihood[i], apm_->voice_detection()->likelihood());
@@ -1524,7 +1519,7 @@ TEST_F(ApmTest, VoiceDetection) {
AudioFrame::kVadPassive,
AudioFrame::kVadUnknown
};
- for (size_t i = 0; i < sizeof(activity)/sizeof(*activity); i++) {
+ for (size_t i = 0; i < arraysize(activity); i++) {
frame_->vad_activity_ = activity[i];
EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame_));
EXPECT_EQ(activity[i], frame_->vad_activity_);
@@ -1550,7 +1545,7 @@ TEST_F(ApmTest, AllProcessingDisabledByDefault) {
}
TEST_F(ApmTest, NoProcessingWhenAllComponentsDisabled) {
- for (size_t i = 0; i < kSampleRatesSize; i++) {
+ for (size_t i = 0; i < arraysize(kSampleRates); i++) {
Init(kSampleRates[i], kSampleRates[i], kSampleRates[i], 2, 2, 2, false);
SetFrameTo(frame_, 1000, 2000);
AudioFrame frame_copy;
@@ -1602,7 +1597,7 @@ TEST_F(ApmTest, NoProcessingWhenAllComponentsDisabledFloat) {
TEST_F(ApmTest, IdenticalInputChannelsResultInIdenticalOutputChannels) {
EnableAllComponents();
- for (size_t i = 0; i < kProcessSampleRatesSize; i++) {
+ for (size_t i = 0; i < arraysize(kProcessSampleRates); i++) {
Init(kProcessSampleRates[i],
kProcessSampleRates[i],
kProcessSampleRates[i],
@@ -1751,7 +1746,8 @@ void ApmTest::ProcessDebugDump(const std::string& in_filename,
const audioproc::ReverseStream msg = event_msg.reverse_stream();
if (msg.channel_size() > 0) {
- ASSERT_EQ(revframe_->num_channels_, msg.channel_size());
+ ASSERT_EQ(revframe_->num_channels_,
+ static_cast<size_t>(msg.channel_size()));
for (int i = 0; i < msg.channel_size(); ++i) {
memcpy(revfloat_cb_->channels()[i],
msg.channel(i).data(),
@@ -1781,7 +1777,8 @@ void ApmTest::ProcessDebugDump(const std::string& in_filename,
}
if (msg.input_channel_size() > 0) {
- ASSERT_EQ(frame_->num_channels_, msg.input_channel_size());
+ ASSERT_EQ(frame_->num_channels_,
+ static_cast<size_t>(msg.input_channel_size()));
for (int i = 0; i < msg.input_channel_size(); ++i) {
memcpy(float_cb_->channels()[i],
msg.input_channel(i).data(),
@@ -1939,11 +1936,14 @@ TEST_F(ApmTest, FloatAndIntInterfacesGiveSimilarResults) {
if (test->num_input_channels() != test->num_output_channels())
continue;
- const int num_render_channels = test->num_reverse_channels();
- const int num_input_channels = test->num_input_channels();
- const int num_output_channels = test->num_output_channels();
- const int samples_per_channel = test->sample_rate() *
- AudioProcessing::kChunkSizeMs / 1000;
+ const size_t num_render_channels =
+ static_cast<size_t>(test->num_reverse_channels());
+ const size_t num_input_channels =
+ static_cast<size_t>(test->num_input_channels());
+ const size_t num_output_channels =
+ static_cast<size_t>(test->num_output_channels());
+ const size_t samples_per_channel = static_cast<size_t>(
+ test->sample_rate() * AudioProcessing::kChunkSizeMs / 1000);
Init(test->sample_rate(), test->sample_rate(), test->sample_rate(),
num_input_channels, num_output_channels, num_render_channels, true);
@@ -1984,7 +1984,7 @@ TEST_F(ApmTest, FloatAndIntInterfacesGiveSimilarResults) {
test->sample_rate(),
LayoutFromChannels(num_output_channels),
float_cb_->channels()));
- for (int j = 0; j < num_output_channels; ++j) {
+ for (size_t j = 0; j < num_output_channels; ++j) {
FloatToS16(float_cb_->channels()[j],
samples_per_channel,
output_cb.channels()[j]);
@@ -2017,7 +2017,7 @@ TEST_F(ApmTest, FloatAndIntInterfacesGiveSimilarResults) {
0.01);
// Reset in case of downmixing.
- frame_->num_channels_ = test->num_input_channels();
+ frame_->num_channels_ = static_cast<size_t>(test->num_input_channels());
}
rewind(far_file_);
rewind(near_file_);
@@ -2035,9 +2035,9 @@ TEST_F(ApmTest, Process) {
OpenFileAndReadMessage(ref_filename_, &ref_data);
} else {
// Write the desired tests to the protobuf reference file.
- for (size_t i = 0; i < kChannelsSize; i++) {
- for (size_t j = 0; j < kChannelsSize; j++) {
- for (size_t l = 0; l < kProcessSampleRatesSize; l++) {
+ for (size_t i = 0; i < arraysize(kChannels); i++) {
+ for (size_t j = 0; j < arraysize(kChannels); j++) {
+ for (size_t l = 0; l < arraysize(kProcessSampleRates); l++) {
audioproc::Test* test = ref_data.add_test();
test->set_num_reverse_channels(kChannels[i]);
test->set_num_input_channels(kChannels[j]);
@@ -2078,9 +2078,9 @@ TEST_F(ApmTest, Process) {
Init(test->sample_rate(),
test->sample_rate(),
test->sample_rate(),
- test->num_input_channels(),
- test->num_output_channels(),
- test->num_reverse_channels(),
+ static_cast<size_t>(test->num_input_channels()),
+ static_cast<size_t>(test->num_output_channels()),
+ static_cast<size_t>(test->num_reverse_channels()),
true);
int frame_count = 0;
@@ -2105,7 +2105,8 @@ TEST_F(ApmTest, Process) {
EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame_));
// Ensure the frame was downmixed properly.
- EXPECT_EQ(test->num_output_channels(), frame_->num_channels_);
+ EXPECT_EQ(static_cast<size_t>(test->num_output_channels()),
+ frame_->num_channels_);
max_output_average += MaxAudioFrame(*frame_);
@@ -2135,7 +2136,7 @@ TEST_F(ApmTest, Process) {
ASSERT_EQ(frame_size, write_count);
// Reset in case of downmixing.
- frame_->num_channels_ = test->num_input_channels();
+ frame_->num_channels_ = static_cast<size_t>(test->num_input_channels());
frame_count++;
}
max_output_average /= frame_count;
@@ -2264,12 +2265,11 @@ TEST_F(ApmTest, NoErrorsWithKeyboardChannel) {
{AudioProcessing::kStereoAndKeyboard, AudioProcessing::kMono},
{AudioProcessing::kStereoAndKeyboard, AudioProcessing::kStereo},
};
- size_t channel_format_size = sizeof(cf) / sizeof(*cf);
rtc::scoped_ptr<AudioProcessing> ap(AudioProcessing::Create());
// Enable one component just to ensure some processing takes place.
ap->noise_suppression()->Enable(true);
- for (size_t i = 0; i < channel_format_size; ++i) {
+ for (size_t i = 0; i < arraysize(cf); ++i) {
const int in_rate = 44100;
const int out_rate = 48000;
ChannelBuffer<float> in_cb(SamplesFromRate(in_rate),
@@ -2296,7 +2296,7 @@ TEST_F(ApmTest, NoErrorsWithKeyboardChannel) {
// error results to the supplied accumulators.
void UpdateBestSNR(const float* ref,
const float* test,
- int length,
+ size_t length,
int expected_delay,
double* variance_acc,
double* sq_error_acc) {
@@ -2308,7 +2308,7 @@ void UpdateBestSNR(const float* ref,
++delay) {
double sq_error = 0;
double variance = 0;
- for (int i = 0; i < length - delay; ++i) {
+ for (size_t i = 0; i < length - delay; ++i) {
double error = test[i + delay] - ref[i];
sq_error += error * error;
variance += ref[i] * ref[i];
@@ -2360,14 +2360,10 @@ class AudioProcessingTest
static void SetUpTestCase() {
// Create all needed output reference files.
const int kNativeRates[] = {8000, 16000, 32000, 48000};
- const size_t kNativeRatesSize =
- sizeof(kNativeRates) / sizeof(*kNativeRates);
- const int kNumChannels[] = {1, 2};
- const size_t kNumChannelsSize =
- sizeof(kNumChannels) / sizeof(*kNumChannels);
- for (size_t i = 0; i < kNativeRatesSize; ++i) {
- for (size_t j = 0; j < kNumChannelsSize; ++j) {
- for (size_t k = 0; k < kNumChannelsSize; ++k) {
+ const size_t kNumChannels[] = {1, 2};
+ for (size_t i = 0; i < arraysize(kNativeRates); ++i) {
+ for (size_t j = 0; j < arraysize(kNumChannels); ++j) {
+ for (size_t k = 0; k < arraysize(kNumChannels); ++k) {
// The reference files always have matching input and output channels.
ProcessFormat(kNativeRates[i], kNativeRates[i], kNativeRates[i],
kNativeRates[i], kNumChannels[j], kNumChannels[j],
@@ -2388,10 +2384,10 @@ class AudioProcessingTest
int output_rate,
int reverse_input_rate,
int reverse_output_rate,
- int num_input_channels,
- int num_output_channels,
- int num_reverse_input_channels,
- int num_reverse_output_channels,
+ size_t num_input_channels,
+ size_t num_output_channels,
+ size_t num_reverse_input_channels,
+ size_t num_reverse_output_channels,
std::string output_file_prefix) {
Config config;
config.Set<ExperimentalAgc>(new ExperimentalAgc(false));
@@ -2466,18 +2462,19 @@ class AudioProcessingTest
// Dump forward output to file.
Interleave(out_cb.channels(), out_cb.num_frames(), out_cb.num_channels(),
float_data.get());
- int out_length = out_cb.num_channels() * out_cb.num_frames();
+ size_t out_length = out_cb.num_channels() * out_cb.num_frames();
- ASSERT_EQ(static_cast<size_t>(out_length),
+ ASSERT_EQ(out_length,
fwrite(float_data.get(), sizeof(float_data[0]),
out_length, out_file));
// Dump reverse output to file.
Interleave(rev_out_cb.channels(), rev_out_cb.num_frames(),
rev_out_cb.num_channels(), float_data.get());
- int rev_out_length = rev_out_cb.num_channels() * rev_out_cb.num_frames();
+ size_t rev_out_length =
+ rev_out_cb.num_channels() * rev_out_cb.num_frames();
- ASSERT_EQ(static_cast<size_t>(rev_out_length),
+ ASSERT_EQ(rev_out_length,
fwrite(float_data.get(), sizeof(float_data[0]), rev_out_length,
rev_out_file));
@@ -2513,9 +2510,8 @@ TEST_P(AudioProcessingTest, Formats) {
{2, 2, 1, 1},
{2, 2, 2, 2},
};
- size_t channel_format_size = sizeof(cf) / sizeof(*cf);
- for (size_t i = 0; i < channel_format_size; ++i) {
+ for (size_t i = 0; i < arraysize(cf); ++i) {
ProcessFormat(input_rate_, output_rate_, reverse_input_rate_,
reverse_output_rate_, cf[i].num_input, cf[i].num_output,
cf[i].num_reverse_input, cf[i].num_reverse_output, "out");
@@ -2565,8 +2561,8 @@ TEST_P(AudioProcessingTest, Formats) {
ASSERT_TRUE(out_file != NULL);
ASSERT_TRUE(ref_file != NULL);
- const int ref_length = SamplesFromRate(ref_rate) * out_num;
- const int out_length = SamplesFromRate(out_rate) * out_num;
+ const size_t ref_length = SamplesFromRate(ref_rate) * out_num;
+ const size_t out_length = SamplesFromRate(out_rate) * out_num;
// Data from the reference file.
rtc::scoped_ptr<float[]> ref_data(new float[ref_length]);
// Data from the output file.
@@ -2606,8 +2602,9 @@ TEST_P(AudioProcessingTest, Formats) {
if (out_rate != ref_rate) {
// Resample the output back to its internal processing rate if
// necssary.
- ASSERT_EQ(ref_length, resampler.Resample(out_ptr, out_length,
- cmp_data.get(), ref_length));
+ ASSERT_EQ(ref_length,
+ static_cast<size_t>(resampler.Resample(
+ out_ptr, out_length, cmp_data.get(), ref_length)));
out_ptr = cmp_data.get();
}
@@ -2752,9 +2749,5 @@ INSTANTIATE_TEST_CASE_P(
std::tr1::make_tuple(16000, 16000, 16000, 16000, 0, 0)));
#endif
-// TODO(henrike): re-implement functionality lost when removing the old main
-// function. See
-// https://code.google.com/p/webrtc/issues/detail?id=1981
-
} // namespace
} // namespace webrtc
diff --git a/webrtc/modules/audio_processing/test/audioproc_float.cc b/webrtc/modules/audio_processing/test/audioproc_float.cc
index 811e9070fa..a489d255c8 100644
--- a/webrtc/modules/audio_processing/test/audioproc_float.cc
+++ b/webrtc/modules/audio_processing/test/audioproc_float.cc
@@ -9,35 +9,50 @@
*/
#include <stdio.h>
+#include <iostream>
#include <sstream>
#include <string>
+#include <utility>
#include "gflags/gflags.h"
#include "webrtc/base/checks.h"
+#include "webrtc/base/format_macros.h"
#include "webrtc/base/scoped_ptr.h"
#include "webrtc/common_audio/channel_buffer.h"
#include "webrtc/common_audio/wav_file.h"
#include "webrtc/modules/audio_processing/include/audio_processing.h"
+#include "webrtc/modules/audio_processing/test/audio_file_processor.h"
#include "webrtc/modules/audio_processing/test/protobuf_utils.h"
#include "webrtc/modules/audio_processing/test/test_utils.h"
#include "webrtc/system_wrappers/include/tick_util.h"
#include "webrtc/test/testsupport/trace_to_stderr.h"
-DEFINE_string(dump, "", "The name of the debug dump file to read from.");
-DEFINE_string(i, "", "The name of the input file to read from.");
-DEFINE_string(i_rev, "", "The name of the reverse input file to read from.");
-DEFINE_string(o, "out.wav", "Name of the output file to write to.");
-DEFINE_string(o_rev,
- "out_rev.wav",
- "Name of the reverse output file to write to.");
-DEFINE_int32(out_channels, 0, "Number of output channels. Defaults to input.");
-DEFINE_int32(out_sample_rate, 0,
- "Output sample rate in Hz. Defaults to input.");
+namespace {
+
+bool ValidateOutChannels(const char* flagname, int32_t value) {
+ return value >= 0;
+}
+
+} // namespace
+
+DEFINE_string(dump, "", "Name of the aecdump debug file to read from.");
+DEFINE_string(i, "", "Name of the capture input stream file to read from.");
+DEFINE_string(
+ o,
+ "out.wav",
+ "Name of the output file to write the processed capture stream to.");
+DEFINE_int32(out_channels, 1, "Number of output channels.");
+const bool out_channels_dummy =
+ google::RegisterFlagValidator(&FLAGS_out_channels, &ValidateOutChannels);
+DEFINE_int32(out_sample_rate, 48000, "Output sample rate in Hz.");
DEFINE_string(mic_positions, "",
"Space delimited cartesian coordinates of microphones in meters. "
"The coordinates of each point are contiguous. "
"For a two element array: \"x1 y1 z1 x2 y2 z2\"");
-DEFINE_double(target_angle_degrees, 90, "The azimuth of the target in radians");
+DEFINE_double(
+ target_angle_degrees,
+ 90,
+ "The azimuth of the target in degrees. Only applies to beamforming.");
DEFINE_bool(aec, false, "Enable echo cancellation.");
DEFINE_bool(agc, false, "Enable automatic gain control.");
@@ -64,15 +79,6 @@ const char kUsage[] =
"All components are disabled by default. If any bi-directional components\n"
"are enabled, only debug dump files are permitted.";
-// Returns a StreamConfig corresponding to wav_file if it's non-nullptr.
-// Otherwise returns a default initialized StreamConfig.
-StreamConfig MakeStreamConfig(const WavFile* wav_file) {
- if (wav_file) {
- return {wav_file->sample_rate(), wav_file->num_channels()};
- }
- return {};
-}
-
} // namespace
int main(int argc, char* argv[]) {
@@ -84,158 +90,75 @@ int main(int argc, char* argv[]) {
"An input file must be specified with either -i or -dump.\n");
return 1;
}
- if (!FLAGS_dump.empty()) {
- fprintf(stderr, "FIXME: the -dump option is not yet implemented.\n");
+ if (FLAGS_dump.empty() && (FLAGS_aec || FLAGS_ie)) {
+ fprintf(stderr, "-aec and -ie require a -dump file.\n");
+ return 1;
+ }
+ if (FLAGS_ie) {
+ fprintf(stderr,
+ "FIXME(ajm): The intelligibility enhancer output is not dumped.\n");
return 1;
}
test::TraceToStderr trace_to_stderr(true);
- WavReader in_file(FLAGS_i);
- // If the output format is uninitialized, use the input format.
- const int out_channels =
- FLAGS_out_channels ? FLAGS_out_channels : in_file.num_channels();
- const int out_sample_rate =
- FLAGS_out_sample_rate ? FLAGS_out_sample_rate : in_file.sample_rate();
- WavWriter out_file(FLAGS_o, out_sample_rate, out_channels);
-
Config config;
- config.Set<ExperimentalNs>(new ExperimentalNs(FLAGS_ts || FLAGS_all));
- config.Set<Intelligibility>(new Intelligibility(FLAGS_ie || FLAGS_all));
-
if (FLAGS_bf || FLAGS_all) {
- const size_t num_mics = in_file.num_channels();
- const std::vector<Point> array_geometry =
- ParseArrayGeometry(FLAGS_mic_positions, num_mics);
- RTC_CHECK_EQ(array_geometry.size(), num_mics);
-
+ if (FLAGS_mic_positions.empty()) {
+ fprintf(stderr, "-mic_positions must be specified when -bf is used.\n");
+ return 1;
+ }
config.Set<Beamforming>(new Beamforming(
- true, array_geometry,
+ true, ParseArrayGeometry(FLAGS_mic_positions),
SphericalPointf(DegreesToRadians(FLAGS_target_angle_degrees), 0.f,
1.f)));
}
+ config.Set<ExperimentalNs>(new ExperimentalNs(FLAGS_ts || FLAGS_all));
+ config.Set<Intelligibility>(new Intelligibility(FLAGS_ie || FLAGS_all));
rtc::scoped_ptr<AudioProcessing> ap(AudioProcessing::Create(config));
- if (!FLAGS_dump.empty()) {
- RTC_CHECK_EQ(kNoErr,
- ap->echo_cancellation()->Enable(FLAGS_aec || FLAGS_all));
- } else if (FLAGS_aec) {
- fprintf(stderr, "-aec requires a -dump file.\n");
- return -1;
- }
- bool process_reverse = !FLAGS_i_rev.empty();
+ RTC_CHECK_EQ(kNoErr, ap->echo_cancellation()->Enable(FLAGS_aec || FLAGS_all));
RTC_CHECK_EQ(kNoErr, ap->gain_control()->Enable(FLAGS_agc || FLAGS_all));
- RTC_CHECK_EQ(kNoErr,
- ap->gain_control()->set_mode(GainControl::kFixedDigital));
RTC_CHECK_EQ(kNoErr, ap->high_pass_filter()->Enable(FLAGS_hpf || FLAGS_all));
RTC_CHECK_EQ(kNoErr, ap->noise_suppression()->Enable(FLAGS_ns || FLAGS_all));
- if (FLAGS_ns_level != -1)
+ if (FLAGS_ns_level != -1) {
RTC_CHECK_EQ(kNoErr,
ap->noise_suppression()->set_level(
static_cast<NoiseSuppression::Level>(FLAGS_ns_level)));
-
- printf("Input file: %s\nChannels: %d, Sample rate: %d Hz\n\n",
- FLAGS_i.c_str(), in_file.num_channels(), in_file.sample_rate());
- printf("Output file: %s\nChannels: %d, Sample rate: %d Hz\n\n",
- FLAGS_o.c_str(), out_file.num_channels(), out_file.sample_rate());
-
- ChannelBuffer<float> in_buf(
- rtc::CheckedDivExact(in_file.sample_rate(), kChunksPerSecond),
- in_file.num_channels());
- ChannelBuffer<float> out_buf(
- rtc::CheckedDivExact(out_file.sample_rate(), kChunksPerSecond),
- out_file.num_channels());
-
- std::vector<float> in_interleaved(in_buf.size());
- std::vector<float> out_interleaved(out_buf.size());
-
- rtc::scoped_ptr<WavReader> in_rev_file;
- rtc::scoped_ptr<WavWriter> out_rev_file;
- rtc::scoped_ptr<ChannelBuffer<float>> in_rev_buf;
- rtc::scoped_ptr<ChannelBuffer<float>> out_rev_buf;
- std::vector<float> in_rev_interleaved;
- std::vector<float> out_rev_interleaved;
- if (process_reverse) {
- in_rev_file.reset(new WavReader(FLAGS_i_rev));
- out_rev_file.reset(new WavWriter(FLAGS_o_rev, in_rev_file->sample_rate(),
- in_rev_file->num_channels()));
- printf("In rev file: %s\nChannels: %d, Sample rate: %d Hz\n\n",
- FLAGS_i_rev.c_str(), in_rev_file->num_channels(),
- in_rev_file->sample_rate());
- printf("Out rev file: %s\nChannels: %d, Sample rate: %d Hz\n\n",
- FLAGS_o_rev.c_str(), out_rev_file->num_channels(),
- out_rev_file->sample_rate());
- in_rev_buf.reset(new ChannelBuffer<float>(
- rtc::CheckedDivExact(in_rev_file->sample_rate(), kChunksPerSecond),
- in_rev_file->num_channels()));
- in_rev_interleaved.resize(in_rev_buf->size());
- out_rev_buf.reset(new ChannelBuffer<float>(
- rtc::CheckedDivExact(out_rev_file->sample_rate(), kChunksPerSecond),
- out_rev_file->num_channels()));
- out_rev_interleaved.resize(out_rev_buf->size());
+ }
+ ap->set_stream_key_pressed(FLAGS_ts);
+
+ rtc::scoped_ptr<AudioFileProcessor> processor;
+ auto out_file = rtc_make_scoped_ptr(new WavWriter(
+ FLAGS_o, FLAGS_out_sample_rate, static_cast<size_t>(FLAGS_out_channels)));
+ std::cout << FLAGS_o << ": " << out_file->FormatAsString() << std::endl;
+ if (FLAGS_dump.empty()) {
+ auto in_file = rtc_make_scoped_ptr(new WavReader(FLAGS_i));
+ std::cout << FLAGS_i << ": " << in_file->FormatAsString() << std::endl;
+ processor.reset(new WavFileProcessor(std::move(ap), std::move(in_file),
+ std::move(out_file)));
+
+ } else {
+ processor.reset(new AecDumpFileProcessor(
+ std::move(ap), fopen(FLAGS_dump.c_str(), "rb"), std::move(out_file)));
}
- TickTime processing_start_time;
- TickInterval accumulated_time;
int num_chunks = 0;
-
- const auto input_config = MakeStreamConfig(&in_file);
- const auto output_config = MakeStreamConfig(&out_file);
- const auto reverse_input_config = MakeStreamConfig(in_rev_file.get());
- const auto reverse_output_config = MakeStreamConfig(out_rev_file.get());
-
- while (in_file.ReadSamples(in_interleaved.size(),
- &in_interleaved[0]) == in_interleaved.size()) {
- // Have logs display the file time rather than wallclock time.
+ while (processor->ProcessChunk()) {
trace_to_stderr.SetTimeSeconds(num_chunks * 1.f / kChunksPerSecond);
- FloatS16ToFloat(&in_interleaved[0], in_interleaved.size(),
- &in_interleaved[0]);
- Deinterleave(&in_interleaved[0], in_buf.num_frames(),
- in_buf.num_channels(), in_buf.channels());
- if (process_reverse) {
- in_rev_file->ReadSamples(in_rev_interleaved.size(),
- in_rev_interleaved.data());
- FloatS16ToFloat(in_rev_interleaved.data(), in_rev_interleaved.size(),
- in_rev_interleaved.data());
- Deinterleave(in_rev_interleaved.data(), in_rev_buf->num_frames(),
- in_rev_buf->num_channels(), in_rev_buf->channels());
- }
-
- if (FLAGS_perf) {
- processing_start_time = TickTime::Now();
- }
- RTC_CHECK_EQ(kNoErr, ap->ProcessStream(in_buf.channels(), input_config,
- output_config, out_buf.channels()));
- if (process_reverse) {
- RTC_CHECK_EQ(kNoErr, ap->ProcessReverseStream(
- in_rev_buf->channels(), reverse_input_config,
- reverse_output_config, out_rev_buf->channels()));
- }
- if (FLAGS_perf) {
- accumulated_time += TickTime::Now() - processing_start_time;
- }
-
- Interleave(out_buf.channels(), out_buf.num_frames(),
- out_buf.num_channels(), &out_interleaved[0]);
- FloatToFloatS16(&out_interleaved[0], out_interleaved.size(),
- &out_interleaved[0]);
- out_file.WriteSamples(&out_interleaved[0], out_interleaved.size());
- if (process_reverse) {
- Interleave(out_rev_buf->channels(), out_rev_buf->num_frames(),
- out_rev_buf->num_channels(), out_rev_interleaved.data());
- FloatToFloatS16(out_rev_interleaved.data(), out_rev_interleaved.size(),
- out_rev_interleaved.data());
- out_rev_file->WriteSamples(out_rev_interleaved.data(),
- out_rev_interleaved.size());
- }
- num_chunks++;
+ ++num_chunks;
}
+
if (FLAGS_perf) {
- int64_t execution_time_ms = accumulated_time.Milliseconds();
- printf("\nExecution time: %.3f s\nFile time: %.2f s\n"
- "Time per chunk: %.3f ms\n",
- execution_time_ms * 0.001f, num_chunks * 1.f / kChunksPerSecond,
- execution_time_ms * 1.f / num_chunks);
+ const auto& proc_time = processor->proc_time();
+ int64_t exec_time_us = proc_time.sum.Microseconds();
+ printf(
+ "\nExecution time: %.3f s, File time: %.2f s\n"
+ "Time per chunk (mean, max, min):\n%.0f us, %.0f us, %.0f us\n",
+ exec_time_us * 1e-6, num_chunks * 1.f / kChunksPerSecond,
+ exec_time_us * 1.f / num_chunks, 1.f * proc_time.max.Microseconds(),
+ 1.f * proc_time.min.Microseconds());
}
+
return 0;
}
diff --git a/webrtc/modules/audio_processing/test/debug_dump_test.cc b/webrtc/modules/audio_processing/test/debug_dump_test.cc
new file mode 100644
index 0000000000..005faa0f44
--- /dev/null
+++ b/webrtc/modules/audio_processing/test/debug_dump_test.cc
@@ -0,0 +1,612 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <stddef.h> // size_t
+#include <string>
+#include <vector>
+
+#include "testing/gtest/include/gtest/gtest.h"
+#include "webrtc/audio_processing/debug.pb.h"
+#include "webrtc/base/checks.h"
+#include "webrtc/base/scoped_ptr.h"
+#include "webrtc/common_audio/channel_buffer.h"
+#include "webrtc/modules/audio_coding/neteq/tools/resample_input_audio_file.h"
+#include "webrtc/modules/audio_processing/include/audio_processing.h"
+#include "webrtc/modules/audio_processing/test/protobuf_utils.h"
+#include "webrtc/modules/audio_processing/test/test_utils.h"
+#include "webrtc/test/testsupport/fileutils.h"
+
+namespace webrtc {
+namespace test {
+
+namespace {
+
+void MaybeResetBuffer(rtc::scoped_ptr<ChannelBuffer<float>>* buffer,
+ const StreamConfig& config) {
+ auto& buffer_ref = *buffer;
+ if (!buffer_ref.get() || buffer_ref->num_frames() != config.num_frames() ||
+ buffer_ref->num_channels() != config.num_channels()) {
+ buffer_ref.reset(new ChannelBuffer<float>(config.num_frames(),
+ config.num_channels()));
+ }
+}
+
+class DebugDumpGenerator {
+ public:
+ DebugDumpGenerator(const std::string& input_file_name,
+ int input_file_rate_hz,
+ int input_channels,
+ const std::string& reverse_file_name,
+ int reverse_file_rate_hz,
+ int reverse_channels,
+ const Config& config,
+ const std::string& dump_file_name);
+
+ // Constructor that uses default input files.
+ explicit DebugDumpGenerator(const Config& config);
+
+ ~DebugDumpGenerator();
+
+ // Changes the sample rate of the input audio to the APM.
+ void SetInputRate(int rate_hz);
+
+ // Sets if converts stereo input signal to mono by discarding other channels.
+ void ForceInputMono(bool mono);
+
+ // Changes the sample rate of the reverse audio to the APM.
+ void SetReverseRate(int rate_hz);
+
+ // Sets if converts stereo reverse signal to mono by discarding other
+ // channels.
+ void ForceReverseMono(bool mono);
+
+ // Sets the required sample rate of the APM output.
+ void SetOutputRate(int rate_hz);
+
+ // Sets the required channels of the APM output.
+ void SetOutputChannels(int channels);
+
+ std::string dump_file_name() const { return dump_file_name_; }
+
+ void StartRecording();
+ void Process(size_t num_blocks);
+ void StopRecording();
+ AudioProcessing* apm() const { return apm_.get(); }
+
+ private:
+ static void ReadAndDeinterleave(ResampleInputAudioFile* audio, int channels,
+ const StreamConfig& config,
+ float* const* buffer);
+
+ // APM input/output settings.
+ StreamConfig input_config_;
+ StreamConfig reverse_config_;
+ StreamConfig output_config_;
+
+ // Input file format.
+ const std::string input_file_name_;
+ ResampleInputAudioFile input_audio_;
+ const int input_file_channels_;
+
+ // Reverse file format.
+ const std::string reverse_file_name_;
+ ResampleInputAudioFile reverse_audio_;
+ const int reverse_file_channels_;
+
+ // Buffer for APM input/output.
+ rtc::scoped_ptr<ChannelBuffer<float>> input_;
+ rtc::scoped_ptr<ChannelBuffer<float>> reverse_;
+ rtc::scoped_ptr<ChannelBuffer<float>> output_;
+
+ rtc::scoped_ptr<AudioProcessing> apm_;
+
+ const std::string dump_file_name_;
+};
+
+DebugDumpGenerator::DebugDumpGenerator(const std::string& input_file_name,
+ int input_rate_hz,
+ int input_channels,
+ const std::string& reverse_file_name,
+ int reverse_rate_hz,
+ int reverse_channels,
+ const Config& config,
+ const std::string& dump_file_name)
+ : input_config_(input_rate_hz, input_channels),
+ reverse_config_(reverse_rate_hz, reverse_channels),
+ output_config_(input_rate_hz, input_channels),
+ input_audio_(input_file_name, input_rate_hz, input_rate_hz),
+ input_file_channels_(input_channels),
+ reverse_audio_(reverse_file_name, reverse_rate_hz, reverse_rate_hz),
+ reverse_file_channels_(reverse_channels),
+ input_(new ChannelBuffer<float>(input_config_.num_frames(),
+ input_config_.num_channels())),
+ reverse_(new ChannelBuffer<float>(reverse_config_.num_frames(),
+ reverse_config_.num_channels())),
+ output_(new ChannelBuffer<float>(output_config_.num_frames(),
+ output_config_.num_channels())),
+ apm_(AudioProcessing::Create(config)),
+ dump_file_name_(dump_file_name) {
+}
+
+DebugDumpGenerator::DebugDumpGenerator(const Config& config)
+ : DebugDumpGenerator(ResourcePath("near32_stereo", "pcm"), 32000, 2,
+ ResourcePath("far32_stereo", "pcm"), 32000, 2,
+ config,
+ TempFilename(OutputPath(), "debug_aec")) {
+}
+
+DebugDumpGenerator::~DebugDumpGenerator() {
+ remove(dump_file_name_.c_str());
+}
+
+void DebugDumpGenerator::SetInputRate(int rate_hz) {
+ input_audio_.set_output_rate_hz(rate_hz);
+ input_config_.set_sample_rate_hz(rate_hz);
+ MaybeResetBuffer(&input_, input_config_);
+}
+
+void DebugDumpGenerator::ForceInputMono(bool mono) {
+ const int channels = mono ? 1 : input_file_channels_;
+ input_config_.set_num_channels(channels);
+ MaybeResetBuffer(&input_, input_config_);
+}
+
+void DebugDumpGenerator::SetReverseRate(int rate_hz) {
+ reverse_audio_.set_output_rate_hz(rate_hz);
+ reverse_config_.set_sample_rate_hz(rate_hz);
+ MaybeResetBuffer(&reverse_, reverse_config_);
+}
+
+void DebugDumpGenerator::ForceReverseMono(bool mono) {
+ const int channels = mono ? 1 : reverse_file_channels_;
+ reverse_config_.set_num_channels(channels);
+ MaybeResetBuffer(&reverse_, reverse_config_);
+}
+
+void DebugDumpGenerator::SetOutputRate(int rate_hz) {
+ output_config_.set_sample_rate_hz(rate_hz);
+ MaybeResetBuffer(&output_, output_config_);
+}
+
+void DebugDumpGenerator::SetOutputChannels(int channels) {
+ output_config_.set_num_channels(channels);
+ MaybeResetBuffer(&output_, output_config_);
+}
+
+void DebugDumpGenerator::StartRecording() {
+ apm_->StartDebugRecording(dump_file_name_.c_str());
+}
+
+void DebugDumpGenerator::Process(size_t num_blocks) {
+ for (size_t i = 0; i < num_blocks; ++i) {
+ ReadAndDeinterleave(&reverse_audio_, reverse_file_channels_,
+ reverse_config_, reverse_->channels());
+ ReadAndDeinterleave(&input_audio_, input_file_channels_, input_config_,
+ input_->channels());
+ RTC_CHECK_EQ(AudioProcessing::kNoError, apm_->set_stream_delay_ms(100));
+ apm_->set_stream_key_pressed(i % 10 == 9);
+ RTC_CHECK_EQ(AudioProcessing::kNoError,
+ apm_->ProcessStream(input_->channels(), input_config_,
+ output_config_, output_->channels()));
+
+ RTC_CHECK_EQ(AudioProcessing::kNoError,
+ apm_->ProcessReverseStream(reverse_->channels(),
+ reverse_config_,
+ reverse_config_,
+ reverse_->channels()));
+ }
+}
+
+void DebugDumpGenerator::StopRecording() {
+ apm_->StopDebugRecording();
+}
+
+void DebugDumpGenerator::ReadAndDeinterleave(ResampleInputAudioFile* audio,
+ int channels,
+ const StreamConfig& config,
+ float* const* buffer) {
+ const size_t num_frames = config.num_frames();
+ const int out_channels = config.num_channels();
+
+ std::vector<int16_t> signal(channels * num_frames);
+
+ audio->Read(num_frames * channels, &signal[0]);
+
+ // We only allow reducing number of channels by discarding some channels.
+ RTC_CHECK_LE(out_channels, channels);
+ for (int channel = 0; channel < out_channels; ++channel) {
+ for (size_t i = 0; i < num_frames; ++i) {
+ buffer[channel][i] = S16ToFloat(signal[i * channels + channel]);
+ }
+ }
+}
+
+} // namespace
+
+class DebugDumpTest : public ::testing::Test {
+ public:
+ DebugDumpTest();
+
+ // VerifyDebugDump replays a debug dump using APM and verifies that the result
+ // is bit-exact-identical to the output channel in the dump. This is only
+ // guaranteed if the debug dump is started on the first frame.
+ void VerifyDebugDump(const std::string& dump_file_name);
+
+ private:
+ // Following functions are facilities for replaying debug dumps.
+ void OnInitEvent(const audioproc::Init& msg);
+ void OnStreamEvent(const audioproc::Stream& msg);
+ void OnReverseStreamEvent(const audioproc::ReverseStream& msg);
+ void OnConfigEvent(const audioproc::Config& msg);
+
+ void MaybeRecreateApm(const audioproc::Config& msg);
+ void ConfigureApm(const audioproc::Config& msg);
+
+ // Buffer for APM input/output.
+ rtc::scoped_ptr<ChannelBuffer<float>> input_;
+ rtc::scoped_ptr<ChannelBuffer<float>> reverse_;
+ rtc::scoped_ptr<ChannelBuffer<float>> output_;
+
+ rtc::scoped_ptr<AudioProcessing> apm_;
+
+ StreamConfig input_config_;
+ StreamConfig reverse_config_;
+ StreamConfig output_config_;
+};
+
+DebugDumpTest::DebugDumpTest()
+ : input_(nullptr), // will be created upon usage.
+ reverse_(nullptr),
+ output_(nullptr),
+ apm_(nullptr) {
+}
+
+void DebugDumpTest::VerifyDebugDump(const std::string& in_filename) {
+ FILE* in_file = fopen(in_filename.c_str(), "rb");
+ ASSERT_TRUE(in_file);
+ audioproc::Event event_msg;
+
+ while (ReadMessageFromFile(in_file, &event_msg)) {
+ switch (event_msg.type()) {
+ case audioproc::Event::INIT:
+ OnInitEvent(event_msg.init());
+ break;
+ case audioproc::Event::STREAM:
+ OnStreamEvent(event_msg.stream());
+ break;
+ case audioproc::Event::REVERSE_STREAM:
+ OnReverseStreamEvent(event_msg.reverse_stream());
+ break;
+ case audioproc::Event::CONFIG:
+ OnConfigEvent(event_msg.config());
+ break;
+ case audioproc::Event::UNKNOWN_EVENT:
+ // We do not expect receive UNKNOWN event currently.
+ FAIL();
+ }
+ }
+ fclose(in_file);
+}
+
+// OnInitEvent reset the input/output/reserve channel format.
+void DebugDumpTest::OnInitEvent(const audioproc::Init& msg) {
+ ASSERT_TRUE(msg.has_num_input_channels());
+ ASSERT_TRUE(msg.has_output_sample_rate());
+ ASSERT_TRUE(msg.has_num_output_channels());
+ ASSERT_TRUE(msg.has_reverse_sample_rate());
+ ASSERT_TRUE(msg.has_num_reverse_channels());
+
+ input_config_ = StreamConfig(msg.sample_rate(), msg.num_input_channels());
+ output_config_ =
+ StreamConfig(msg.output_sample_rate(), msg.num_output_channels());
+ reverse_config_ =
+ StreamConfig(msg.reverse_sample_rate(), msg.num_reverse_channels());
+
+ MaybeResetBuffer(&input_, input_config_);
+ MaybeResetBuffer(&output_, output_config_);
+ MaybeResetBuffer(&reverse_, reverse_config_);
+}
+
+// OnStreamEvent replays an input signal and verifies the output.
+void DebugDumpTest::OnStreamEvent(const audioproc::Stream& msg) {
+ // APM should have been created.
+ ASSERT_TRUE(apm_.get());
+
+ EXPECT_NOERR(apm_->gain_control()->set_stream_analog_level(msg.level()));
+ EXPECT_NOERR(apm_->set_stream_delay_ms(msg.delay()));
+ apm_->echo_cancellation()->set_stream_drift_samples(msg.drift());
+ if (msg.has_keypress())
+ apm_->set_stream_key_pressed(msg.keypress());
+ else
+ apm_->set_stream_key_pressed(true);
+
+ ASSERT_EQ(input_config_.num_channels(),
+ static_cast<size_t>(msg.input_channel_size()));
+ ASSERT_EQ(input_config_.num_frames() * sizeof(float),
+ msg.input_channel(0).size());
+
+ for (int i = 0; i < msg.input_channel_size(); ++i) {
+ memcpy(input_->channels()[i], msg.input_channel(i).data(),
+ msg.input_channel(i).size());
+ }
+
+ ASSERT_EQ(AudioProcessing::kNoError,
+ apm_->ProcessStream(input_->channels(), input_config_,
+ output_config_, output_->channels()));
+
+ // Check that output of APM is bit-exact to the output in the dump.
+ ASSERT_EQ(output_config_.num_channels(),
+ static_cast<size_t>(msg.output_channel_size()));
+ ASSERT_EQ(output_config_.num_frames() * sizeof(float),
+ msg.output_channel(0).size());
+ for (int i = 0; i < msg.output_channel_size(); ++i) {
+ ASSERT_EQ(0, memcmp(output_->channels()[i], msg.output_channel(i).data(),
+ msg.output_channel(i).size()));
+ }
+}
+
+void DebugDumpTest::OnReverseStreamEvent(const audioproc::ReverseStream& msg) {
+ // APM should have been created.
+ ASSERT_TRUE(apm_.get());
+
+ ASSERT_GT(msg.channel_size(), 0);
+ ASSERT_EQ(reverse_config_.num_channels(),
+ static_cast<size_t>(msg.channel_size()));
+ ASSERT_EQ(reverse_config_.num_frames() * sizeof(float),
+ msg.channel(0).size());
+
+ for (int i = 0; i < msg.channel_size(); ++i) {
+ memcpy(reverse_->channels()[i], msg.channel(i).data(),
+ msg.channel(i).size());
+ }
+
+ ASSERT_EQ(AudioProcessing::kNoError,
+ apm_->ProcessReverseStream(reverse_->channels(),
+ reverse_config_,
+ reverse_config_,
+ reverse_->channels()));
+}
+
+void DebugDumpTest::OnConfigEvent(const audioproc::Config& msg) {
+ MaybeRecreateApm(msg);
+ ConfigureApm(msg);
+}
+
+void DebugDumpTest::MaybeRecreateApm(const audioproc::Config& msg) {
+ // These configurations cannot be changed on the fly.
+ Config config;
+ ASSERT_TRUE(msg.has_aec_delay_agnostic_enabled());
+ config.Set<DelayAgnostic>(
+ new DelayAgnostic(msg.aec_delay_agnostic_enabled()));
+
+ ASSERT_TRUE(msg.has_noise_robust_agc_enabled());
+ config.Set<ExperimentalAgc>(
+ new ExperimentalAgc(msg.noise_robust_agc_enabled()));
+
+ ASSERT_TRUE(msg.has_transient_suppression_enabled());
+ config.Set<ExperimentalNs>(
+ new ExperimentalNs(msg.transient_suppression_enabled()));
+
+ ASSERT_TRUE(msg.has_aec_extended_filter_enabled());
+ config.Set<ExtendedFilter>(new ExtendedFilter(
+ msg.aec_extended_filter_enabled()));
+
+ // We only create APM once, since changes on these fields should not
+ // happen in current implementation.
+ if (!apm_.get()) {
+ apm_.reset(AudioProcessing::Create(config));
+ }
+}
+
+void DebugDumpTest::ConfigureApm(const audioproc::Config& msg) {
+ // AEC configs.
+ ASSERT_TRUE(msg.has_aec_enabled());
+ EXPECT_EQ(AudioProcessing::kNoError,
+ apm_->echo_cancellation()->Enable(msg.aec_enabled()));
+
+ ASSERT_TRUE(msg.has_aec_drift_compensation_enabled());
+ EXPECT_EQ(AudioProcessing::kNoError,
+ apm_->echo_cancellation()->enable_drift_compensation(
+ msg.aec_drift_compensation_enabled()));
+
+ ASSERT_TRUE(msg.has_aec_suppression_level());
+ EXPECT_EQ(AudioProcessing::kNoError,
+ apm_->echo_cancellation()->set_suppression_level(
+ static_cast<EchoCancellation::SuppressionLevel>(
+ msg.aec_suppression_level())));
+
+ // AECM configs.
+ ASSERT_TRUE(msg.has_aecm_enabled());
+ EXPECT_EQ(AudioProcessing::kNoError,
+ apm_->echo_control_mobile()->Enable(msg.aecm_enabled()));
+
+ ASSERT_TRUE(msg.has_aecm_comfort_noise_enabled());
+ EXPECT_EQ(AudioProcessing::kNoError,
+ apm_->echo_control_mobile()->enable_comfort_noise(
+ msg.aecm_comfort_noise_enabled()));
+
+ ASSERT_TRUE(msg.has_aecm_routing_mode());
+ EXPECT_EQ(AudioProcessing::kNoError,
+ apm_->echo_control_mobile()->set_routing_mode(
+ static_cast<EchoControlMobile::RoutingMode>(
+ msg.aecm_routing_mode())));
+
+ // AGC configs.
+ ASSERT_TRUE(msg.has_agc_enabled());
+ EXPECT_EQ(AudioProcessing::kNoError,
+ apm_->gain_control()->Enable(msg.agc_enabled()));
+
+ ASSERT_TRUE(msg.has_agc_mode());
+ EXPECT_EQ(AudioProcessing::kNoError,
+ apm_->gain_control()->set_mode(
+ static_cast<GainControl::Mode>(msg.agc_mode())));
+
+ ASSERT_TRUE(msg.has_agc_limiter_enabled());
+ EXPECT_EQ(AudioProcessing::kNoError,
+ apm_->gain_control()->enable_limiter(msg.agc_limiter_enabled()));
+
+ // HPF configs.
+ ASSERT_TRUE(msg.has_hpf_enabled());
+ EXPECT_EQ(AudioProcessing::kNoError,
+ apm_->high_pass_filter()->Enable(msg.hpf_enabled()));
+
+ // NS configs.
+ ASSERT_TRUE(msg.has_ns_enabled());
+ EXPECT_EQ(AudioProcessing::kNoError,
+ apm_->noise_suppression()->Enable(msg.ns_enabled()));
+
+ ASSERT_TRUE(msg.has_ns_level());
+ EXPECT_EQ(AudioProcessing::kNoError,
+ apm_->noise_suppression()->set_level(
+ static_cast<NoiseSuppression::Level>(msg.ns_level())));
+}
+
+TEST_F(DebugDumpTest, SimpleCase) {
+ Config config;
+ DebugDumpGenerator generator(config);
+ generator.StartRecording();
+ generator.Process(100);
+ generator.StopRecording();
+ VerifyDebugDump(generator.dump_file_name());
+}
+
+TEST_F(DebugDumpTest, ChangeInputFormat) {
+ Config config;
+ DebugDumpGenerator generator(config);
+ generator.StartRecording();
+ generator.Process(100);
+ generator.SetInputRate(48000);
+
+ generator.ForceInputMono(true);
+ // Number of output channel should not be larger than that of input. APM will
+ // fail otherwise.
+ generator.SetOutputChannels(1);
+
+ generator.Process(100);
+ generator.StopRecording();
+ VerifyDebugDump(generator.dump_file_name());
+}
+
+TEST_F(DebugDumpTest, ChangeReverseFormat) {
+ Config config;
+ DebugDumpGenerator generator(config);
+ generator.StartRecording();
+ generator.Process(100);
+ generator.SetReverseRate(48000);
+ generator.ForceReverseMono(true);
+ generator.Process(100);
+ generator.StopRecording();
+ VerifyDebugDump(generator.dump_file_name());
+}
+
+TEST_F(DebugDumpTest, ChangeOutputFormat) {
+ Config config;
+ DebugDumpGenerator generator(config);
+ generator.StartRecording();
+ generator.Process(100);
+ generator.SetOutputRate(48000);
+ generator.SetOutputChannels(1);
+ generator.Process(100);
+ generator.StopRecording();
+ VerifyDebugDump(generator.dump_file_name());
+}
+
+TEST_F(DebugDumpTest, ToggleAec) {
+ Config config;
+ DebugDumpGenerator generator(config);
+ generator.StartRecording();
+ generator.Process(100);
+
+ EchoCancellation* aec = generator.apm()->echo_cancellation();
+ EXPECT_EQ(AudioProcessing::kNoError, aec->Enable(!aec->is_enabled()));
+
+ generator.Process(100);
+ generator.StopRecording();
+ VerifyDebugDump(generator.dump_file_name());
+}
+
+TEST_F(DebugDumpTest, ToggleDelayAgnosticAec) {
+ Config config;
+ config.Set<DelayAgnostic>(new DelayAgnostic(true));
+ DebugDumpGenerator generator(config);
+ generator.StartRecording();
+ generator.Process(100);
+
+ EchoCancellation* aec = generator.apm()->echo_cancellation();
+ EXPECT_EQ(AudioProcessing::kNoError, aec->Enable(!aec->is_enabled()));
+
+ generator.Process(100);
+ generator.StopRecording();
+ VerifyDebugDump(generator.dump_file_name());
+}
+
+TEST_F(DebugDumpTest, ToggleAecLevel) {
+ Config config;
+ DebugDumpGenerator generator(config);
+ EchoCancellation* aec = generator.apm()->echo_cancellation();
+ EXPECT_EQ(AudioProcessing::kNoError, aec->Enable(true));
+ EXPECT_EQ(AudioProcessing::kNoError,
+ aec->set_suppression_level(EchoCancellation::kLowSuppression));
+ generator.StartRecording();
+ generator.Process(100);
+
+ EXPECT_EQ(AudioProcessing::kNoError,
+ aec->set_suppression_level(EchoCancellation::kHighSuppression));
+ generator.Process(100);
+ generator.StopRecording();
+ VerifyDebugDump(generator.dump_file_name());
+}
+
+#if defined(WEBRTC_ANDROID)
+// AGC may not be supported on Android.
+#define MAYBE_ToggleAgc DISABLED_ToggleAgc
+#else
+#define MAYBE_ToggleAgc ToggleAgc
+#endif
+TEST_F(DebugDumpTest, MAYBE_ToggleAgc) {
+ Config config;
+ DebugDumpGenerator generator(config);
+ generator.StartRecording();
+ generator.Process(100);
+
+ GainControl* agc = generator.apm()->gain_control();
+ EXPECT_EQ(AudioProcessing::kNoError, agc->Enable(!agc->is_enabled()));
+
+ generator.Process(100);
+ generator.StopRecording();
+ VerifyDebugDump(generator.dump_file_name());
+}
+
+TEST_F(DebugDumpTest, ToggleNs) {
+ Config config;
+ DebugDumpGenerator generator(config);
+ generator.StartRecording();
+ generator.Process(100);
+
+ NoiseSuppression* ns = generator.apm()->noise_suppression();
+ EXPECT_EQ(AudioProcessing::kNoError, ns->Enable(!ns->is_enabled()));
+
+ generator.Process(100);
+ generator.StopRecording();
+ VerifyDebugDump(generator.dump_file_name());
+}
+
+TEST_F(DebugDumpTest, TransientSuppressionOn) {
+ Config config;
+ config.Set<ExperimentalNs>(new ExperimentalNs(true));
+ DebugDumpGenerator generator(config);
+ generator.StartRecording();
+ generator.Process(100);
+ generator.StopRecording();
+ VerifyDebugDump(generator.dump_file_name());
+}
+
+} // namespace test
+} // namespace webrtc
diff --git a/webrtc/modules/audio_processing/test/process_test.cc b/webrtc/modules/audio_processing/test/process_test.cc
index 43165404c8..6e20a787e7 100644
--- a/webrtc/modules/audio_processing/test/process_test.cc
+++ b/webrtc/modules/audio_processing/test/process_test.cc
@@ -17,12 +17,13 @@
#include <algorithm>
+#include "webrtc/base/format_macros.h"
#include "webrtc/base/scoped_ptr.h"
#include "webrtc/common.h"
#include "webrtc/modules/audio_processing/include/audio_processing.h"
#include "webrtc/modules/audio_processing/test/protobuf_utils.h"
#include "webrtc/modules/audio_processing/test/test_utils.h"
-#include "webrtc/modules/interface/module_common_types.h"
+#include "webrtc/modules/include/module_common_types.h"
#include "webrtc/system_wrappers/include/cpu_features_wrapper.h"
#include "webrtc/system_wrappers/include/tick_util.h"
#include "webrtc/test/testsupport/fileutils.h"
@@ -159,9 +160,9 @@ void void_main(int argc, char* argv[]) {
int32_t sample_rate_hz = 16000;
- int num_capture_input_channels = 1;
- int num_capture_output_channels = 1;
- int num_render_channels = 1;
+ size_t num_capture_input_channels = 1;
+ size_t num_capture_output_channels = 1;
+ size_t num_render_channels = 1;
int samples_per_channel = sample_rate_hz / 100;
@@ -207,14 +208,14 @@ void void_main(int argc, char* argv[]) {
} else if (strcmp(argv[i], "-ch") == 0) {
i++;
ASSERT_LT(i + 1, argc) << "Specify number of channels after -ch";
- ASSERT_EQ(1, sscanf(argv[i], "%d", &num_capture_input_channels));
+ ASSERT_EQ(1, sscanf(argv[i], "%" PRIuS, &num_capture_input_channels));
i++;
- ASSERT_EQ(1, sscanf(argv[i], "%d", &num_capture_output_channels));
+ ASSERT_EQ(1, sscanf(argv[i], "%" PRIuS, &num_capture_output_channels));
} else if (strcmp(argv[i], "-rch") == 0) {
i++;
ASSERT_LT(i, argc) << "Specify number of channels after -rch";
- ASSERT_EQ(1, sscanf(argv[i], "%d", &num_render_channels));
+ ASSERT_EQ(1, sscanf(argv[i], "%" PRIuS, &num_render_channels));
} else if (strcmp(argv[i], "-aec") == 0) {
ASSERT_EQ(apm->kNoError, apm->echo_cancellation()->Enable(true));
@@ -447,10 +448,10 @@ void void_main(int argc, char* argv[]) {
if (verbose) {
printf("Sample rate: %d Hz\n", sample_rate_hz);
- printf("Primary channels: %d (in), %d (out)\n",
+ printf("Primary channels: %" PRIuS " (in), %" PRIuS " (out)\n",
num_capture_input_channels,
num_capture_output_channels);
- printf("Reverse channels: %d \n", num_render_channels);
+ printf("Reverse channels: %" PRIuS "\n", num_render_channels);
}
const std::string out_path = webrtc::test::OutputPath();
@@ -601,14 +602,18 @@ void void_main(int argc, char* argv[]) {
if (msg.has_output_sample_rate()) {
output_sample_rate = msg.output_sample_rate();
}
- output_layout = LayoutFromChannels(msg.num_output_channels());
- ASSERT_EQ(kNoErr, apm->Initialize(
- msg.sample_rate(),
- output_sample_rate,
- reverse_sample_rate,
- LayoutFromChannels(msg.num_input_channels()),
- output_layout,
- LayoutFromChannels(msg.num_reverse_channels())));
+ output_layout =
+ LayoutFromChannels(static_cast<size_t>(msg.num_output_channels()));
+ ASSERT_EQ(kNoErr,
+ apm->Initialize(
+ msg.sample_rate(),
+ output_sample_rate,
+ reverse_sample_rate,
+ LayoutFromChannels(
+ static_cast<size_t>(msg.num_input_channels())),
+ output_layout,
+ LayoutFromChannels(
+ static_cast<size_t>(msg.num_reverse_channels()))));
samples_per_channel = msg.sample_rate() / 100;
far_frame.sample_rate_hz_ = reverse_sample_rate;
@@ -636,11 +641,11 @@ void void_main(int argc, char* argv[]) {
}
if (!raw_output) {
- // The WAV file needs to be reset every time, because it cant change
- // it's sample rate or number of channels.
- output_wav_file.reset(new WavWriter(out_filename + ".wav",
- output_sample_rate,
- msg.num_output_channels()));
+ // The WAV file needs to be reset every time, because it can't change
+ // its sample rate or number of channels.
+ output_wav_file.reset(new WavWriter(
+ out_filename + ".wav", output_sample_rate,
+ static_cast<size_t>(msg.num_output_channels())));
}
} else if (event_msg.type() == Event::REVERSE_STREAM) {
@@ -1049,7 +1054,9 @@ void void_main(int argc, char* argv[]) {
}
}
}
- printf("100%% complete\r");
+ if (progress) {
+ printf("100%% complete\r");
+ }
if (aecm_echo_path_out_file != NULL) {
const size_t path_size =
diff --git a/webrtc/modules/audio_processing/test/test_utils.cc b/webrtc/modules/audio_processing/test/test_utils.cc
index 1b9ac3ce4c..0bd70126ae 100644
--- a/webrtc/modules/audio_processing/test/test_utils.cc
+++ b/webrtc/modules/audio_processing/test/test_utils.cc
@@ -8,6 +8,8 @@
* be found in the AUTHORS file in the root of the source tree.
*/
+#include <utility>
+
#include "webrtc/base/checks.h"
#include "webrtc/modules/audio_processing/test/test_utils.h"
@@ -31,6 +33,35 @@ void RawFile::WriteSamples(const float* samples, size_t num_samples) {
fwrite(samples, sizeof(*samples), num_samples, file_handle_);
}
+ChannelBufferWavReader::ChannelBufferWavReader(rtc::scoped_ptr<WavReader> file)
+ : file_(std::move(file)) {}
+
+bool ChannelBufferWavReader::Read(ChannelBuffer<float>* buffer) {
+ RTC_CHECK_EQ(file_->num_channels(), buffer->num_channels());
+ interleaved_.resize(buffer->size());
+ if (file_->ReadSamples(interleaved_.size(), &interleaved_[0]) !=
+ interleaved_.size()) {
+ return false;
+ }
+
+ FloatS16ToFloat(&interleaved_[0], interleaved_.size(), &interleaved_[0]);
+ Deinterleave(&interleaved_[0], buffer->num_frames(), buffer->num_channels(),
+ buffer->channels());
+ return true;
+}
+
+ChannelBufferWavWriter::ChannelBufferWavWriter(rtc::scoped_ptr<WavWriter> file)
+ : file_(std::move(file)) {}
+
+void ChannelBufferWavWriter::Write(const ChannelBuffer<float>& buffer) {
+ RTC_CHECK_EQ(file_->num_channels(), buffer.num_channels());
+ interleaved_.resize(buffer.size());
+ Interleave(buffer.channels(), buffer.num_frames(), buffer.num_channels(),
+ &interleaved_[0]);
+ FloatToFloatS16(&interleaved_[0], interleaved_.size(), &interleaved_[0]);
+ file_->WriteSamples(&interleaved_[0], interleaved_.size());
+}
+
void WriteIntData(const int16_t* data,
size_t length,
WavWriter* wav_file,
@@ -44,8 +75,8 @@ void WriteIntData(const int16_t* data,
}
void WriteFloatData(const float* const* data,
- int samples_per_channel,
- int num_channels,
+ size_t samples_per_channel,
+ size_t num_channels,
WavWriter* wav_file,
RawFile* raw_file) {
size_t length = num_channels * samples_per_channel;
@@ -74,8 +105,8 @@ FILE* OpenFile(const std::string& filename, const char* mode) {
return file;
}
-int SamplesFromRate(int rate) {
- return AudioProcessing::kChunkSizeMs * rate / 1000;
+size_t SamplesFromRate(int rate) {
+ return static_cast<size_t>(AudioProcessing::kChunkSizeMs * rate / 1000);
}
void SetFrameSampleRate(AudioFrame* frame,
@@ -85,35 +116,39 @@ void SetFrameSampleRate(AudioFrame* frame,
sample_rate_hz / 1000;
}
-AudioProcessing::ChannelLayout LayoutFromChannels(int num_channels) {
+AudioProcessing::ChannelLayout LayoutFromChannels(size_t num_channels) {
switch (num_channels) {
case 1:
return AudioProcessing::kMono;
case 2:
return AudioProcessing::kStereo;
default:
- assert(false);
+ RTC_CHECK(false);
return AudioProcessing::kMono;
}
}
-std::vector<Point> ParseArrayGeometry(const std::string& mic_positions,
- size_t num_mics) {
+std::vector<Point> ParseArrayGeometry(const std::string& mic_positions) {
const std::vector<float> values = ParseList<float>(mic_positions);
- RTC_CHECK_EQ(values.size(), 3 * num_mics)
- << "Could not parse mic_positions or incorrect number of points.";
+ const size_t num_mics =
+ rtc::CheckedDivExact(values.size(), static_cast<size_t>(3));
+ RTC_CHECK_GT(num_mics, 0u) << "mic_positions is not large enough.";
std::vector<Point> result;
result.reserve(num_mics);
for (size_t i = 0; i < values.size(); i += 3) {
- double x = values[i + 0];
- double y = values[i + 1];
- double z = values[i + 2];
- result.push_back(Point(x, y, z));
+ result.push_back(Point(values[i + 0], values[i + 1], values[i + 2]));
}
return result;
}
+std::vector<Point> ParseArrayGeometry(const std::string& mic_positions,
+ size_t num_mics) {
+ std::vector<Point> result = ParseArrayGeometry(mic_positions);
+ RTC_CHECK_EQ(result.size(), num_mics)
+ << "Could not parse mic_positions or incorrect number of points.";
+ return result;
+}
} // namespace webrtc
diff --git a/webrtc/modules/audio_processing/test/test_utils.h b/webrtc/modules/audio_processing/test/test_utils.h
index 8dd380b15d..e23beb66f4 100644
--- a/webrtc/modules/audio_processing/test/test_utils.h
+++ b/webrtc/modules/audio_processing/test/test_utils.h
@@ -22,7 +22,7 @@
#include "webrtc/common_audio/channel_buffer.h"
#include "webrtc/common_audio/wav_file.h"
#include "webrtc/modules/audio_processing/include/audio_processing.h"
-#include "webrtc/modules/interface/module_common_types.h"
+#include "webrtc/modules/include/module_common_types.h"
namespace webrtc {
@@ -43,28 +43,57 @@ class RawFile final {
RTC_DISALLOW_COPY_AND_ASSIGN(RawFile);
};
+// Reads ChannelBuffers from a provided WavReader.
+class ChannelBufferWavReader final {
+ public:
+ explicit ChannelBufferWavReader(rtc::scoped_ptr<WavReader> file);
+
+ // Reads data from the file according to the |buffer| format. Returns false if
+ // a full buffer can't be read from the file.
+ bool Read(ChannelBuffer<float>* buffer);
+
+ private:
+ rtc::scoped_ptr<WavReader> file_;
+ std::vector<float> interleaved_;
+
+ RTC_DISALLOW_COPY_AND_ASSIGN(ChannelBufferWavReader);
+};
+
+// Writes ChannelBuffers to a provided WavWriter.
+class ChannelBufferWavWriter final {
+ public:
+ explicit ChannelBufferWavWriter(rtc::scoped_ptr<WavWriter> file);
+ void Write(const ChannelBuffer<float>& buffer);
+
+ private:
+ rtc::scoped_ptr<WavWriter> file_;
+ std::vector<float> interleaved_;
+
+ RTC_DISALLOW_COPY_AND_ASSIGN(ChannelBufferWavWriter);
+};
+
void WriteIntData(const int16_t* data,
size_t length,
WavWriter* wav_file,
RawFile* raw_file);
void WriteFloatData(const float* const* data,
- int samples_per_channel,
- int num_channels,
+ size_t samples_per_channel,
+ size_t num_channels,
WavWriter* wav_file,
RawFile* raw_file);
// Exits on failure; do not use in unit tests.
FILE* OpenFile(const std::string& filename, const char* mode);
-int SamplesFromRate(int rate);
+size_t SamplesFromRate(int rate);
void SetFrameSampleRate(AudioFrame* frame,
int sample_rate_hz);
template <typename T>
void SetContainerFormat(int sample_rate_hz,
- int num_channels,
+ size_t num_channels,
AudioFrame* frame,
rtc::scoped_ptr<ChannelBuffer<T> >* cb) {
SetFrameSampleRate(frame, sample_rate_hz);
@@ -72,14 +101,14 @@ void SetContainerFormat(int sample_rate_hz,
cb->reset(new ChannelBuffer<T>(frame->samples_per_channel_, num_channels));
}
-AudioProcessing::ChannelLayout LayoutFromChannels(int num_channels);
+AudioProcessing::ChannelLayout LayoutFromChannels(size_t num_channels);
template <typename T>
-float ComputeSNR(const T* ref, const T* test, int length, float* variance) {
+float ComputeSNR(const T* ref, const T* test, size_t length, float* variance) {
float mse = 0;
float mean = 0;
*variance = 0;
- for (int i = 0; i < length; ++i) {
+ for (size_t i = 0; i < length; ++i) {
T error = ref[i] - test[i];
mse += error * error;
*variance += ref[i] * ref[i];
@@ -118,6 +147,9 @@ std::vector<T> ParseList(const std::string& to_parse) {
std::vector<Point> ParseArrayGeometry(const std::string& mic_positions,
size_t num_mics);
+// Same as above, but without the num_mics check for when it isn't available.
+std::vector<Point> ParseArrayGeometry(const std::string& mic_positions);
+
} // namespace webrtc
#endif // WEBRTC_MODULES_AUDIO_PROCESSING_TEST_TEST_UTILS_H_
diff --git a/webrtc/modules/audio_processing/test/unpack.cc b/webrtc/modules/audio_processing/test/unpack.cc
index 24578e240c..8b2b082f97 100644
--- a/webrtc/modules/audio_processing/test/unpack.cc
+++ b/webrtc/modules/audio_processing/test/unpack.cc
@@ -17,6 +17,7 @@
#include "gflags/gflags.h"
#include "webrtc/audio_processing/debug.pb.h"
+#include "webrtc/base/format_macros.h"
#include "webrtc/base/scoped_ptr.h"
#include "webrtc/modules/audio_processing/test/protobuf_utils.h"
#include "webrtc/modules/audio_processing/test/test_utils.h"
@@ -76,12 +77,12 @@ int do_main(int argc, char* argv[]) {
Event event_msg;
int frame_count = 0;
- int reverse_samples_per_channel = 0;
- int input_samples_per_channel = 0;
- int output_samples_per_channel = 0;
- int num_reverse_channels = 0;
- int num_input_channels = 0;
- int num_output_channels = 0;
+ size_t reverse_samples_per_channel = 0;
+ size_t input_samples_per_channel = 0;
+ size_t output_samples_per_channel = 0;
+ size_t num_reverse_channels = 0;
+ size_t num_input_channels = 0;
+ size_t num_output_channels = 0;
rtc::scoped_ptr<WavWriter> reverse_wav_file;
rtc::scoped_ptr<WavWriter> input_wav_file;
rtc::scoped_ptr<WavWriter> output_wav_file;
@@ -117,7 +118,7 @@ int do_main(int argc, char* argv[]) {
}
rtc::scoped_ptr<const float* []> data(
new const float* [num_reverse_channels]);
- for (int i = 0; i < num_reverse_channels; ++i) {
+ for (size_t i = 0; i < num_reverse_channels; ++i) {
data[i] = reinterpret_cast<const float*>(msg.channel(i).data());
}
WriteFloatData(data.get(),
@@ -148,7 +149,7 @@ int do_main(int argc, char* argv[]) {
}
rtc::scoped_ptr<const float* []> data(
new const float* [num_input_channels]);
- for (int i = 0; i < num_input_channels; ++i) {
+ for (size_t i = 0; i < num_input_channels; ++i) {
data[i] = reinterpret_cast<const float*>(msg.input_channel(i).data());
}
WriteFloatData(data.get(),
@@ -172,7 +173,7 @@ int do_main(int argc, char* argv[]) {
}
rtc::scoped_ptr<const float* []> data(
new const float* [num_output_channels]);
- for (int i = 0; i < num_output_channels; ++i) {
+ for (size_t i = 0; i < num_output_channels; ++i) {
data[i] =
reinterpret_cast<const float*>(msg.output_channel(i).data());
}
@@ -268,11 +269,14 @@ int do_main(int argc, char* argv[]) {
" Reverse sample rate: %d\n",
reverse_sample_rate);
num_input_channels = msg.num_input_channels();
- fprintf(settings_file, " Input channels: %d\n", num_input_channels);
+ fprintf(settings_file, " Input channels: %" PRIuS "\n",
+ num_input_channels);
num_output_channels = msg.num_output_channels();
- fprintf(settings_file, " Output channels: %d\n", num_output_channels);
+ fprintf(settings_file, " Output channels: %" PRIuS "\n",
+ num_output_channels);
num_reverse_channels = msg.num_reverse_channels();
- fprintf(settings_file, " Reverse channels: %d\n", num_reverse_channels);
+ fprintf(settings_file, " Reverse channels: %" PRIuS "\n",
+ num_reverse_channels);
fprintf(settings_file, "\n");
@@ -283,9 +287,12 @@ int do_main(int argc, char* argv[]) {
output_sample_rate = input_sample_rate;
}
- reverse_samples_per_channel = reverse_sample_rate / 100;
- input_samples_per_channel = input_sample_rate / 100;
- output_samples_per_channel = output_sample_rate / 100;
+ reverse_samples_per_channel =
+ static_cast<size_t>(reverse_sample_rate / 100);
+ input_samples_per_channel =
+ static_cast<size_t>(input_sample_rate / 100);
+ output_samples_per_channel =
+ static_cast<size_t>(output_sample_rate / 100);
if (!FLAGS_raw) {
// The WAV files need to be reset every time, because they cant change
diff --git a/webrtc/modules/audio_processing/transient/file_utils_unittest.cc b/webrtc/modules/audio_processing/transient/file_utils_unittest.cc
index 7a035d2b41..7fb7d2d6a9 100644
--- a/webrtc/modules/audio_processing/transient/file_utils_unittest.cc
+++ b/webrtc/modules/audio_processing/transient/file_utils_unittest.cc
@@ -17,7 +17,6 @@
#include "webrtc/base/scoped_ptr.h"
#include "webrtc/system_wrappers/include/file_wrapper.h"
#include "webrtc/test/testsupport/fileutils.h"
-#include "webrtc/test/testsupport/gtest_disable.h"
#include "webrtc/typedefs.h"
namespace webrtc {
@@ -59,7 +58,12 @@ class TransientFileUtilsTest: public ::testing::Test {
const std::string kTestFileNamef;
};
-TEST_F(TransientFileUtilsTest, DISABLED_ON_IOS(ConvertByteArrayToFloat)) {
+#if defined(WEBRTC_IOS)
+#define MAYBE_ConvertByteArrayToFloat DISABLED_ConvertByteArrayToFloat
+#else
+#define MAYBE_ConvertByteArrayToFloat ConvertByteArrayToFloat
+#endif
+TEST_F(TransientFileUtilsTest, MAYBE_ConvertByteArrayToFloat) {
float value = 0.0;
EXPECT_EQ(0, ConvertByteArrayToFloat(kPiBytesf, &value));
@@ -72,7 +76,12 @@ TEST_F(TransientFileUtilsTest, DISABLED_ON_IOS(ConvertByteArrayToFloat)) {
EXPECT_FLOAT_EQ(kAvogadro, value);
}
-TEST_F(TransientFileUtilsTest, DISABLED_ON_IOS(ConvertByteArrayToDouble)) {
+#if defined(WEBRTC_IOS)
+#define MAYBE_ConvertByteArrayToDouble DISABLED_ConvertByteArrayToDouble
+#else
+#define MAYBE_ConvertByteArrayToDouble ConvertByteArrayToDouble
+#endif
+TEST_F(TransientFileUtilsTest, MAYBE_ConvertByteArrayToDouble) {
double value = 0.0;
EXPECT_EQ(0, ConvertByteArrayToDouble(kPiBytes, &value));
@@ -85,7 +94,12 @@ TEST_F(TransientFileUtilsTest, DISABLED_ON_IOS(ConvertByteArrayToDouble)) {
EXPECT_DOUBLE_EQ(kAvogadro, value);
}
-TEST_F(TransientFileUtilsTest, DISABLED_ON_IOS(ConvertFloatToByteArray)) {
+#if defined(WEBRTC_IOS)
+#define MAYBE_ConvertFloatToByteArray DISABLED_ConvertFloatToByteArray
+#else
+#define MAYBE_ConvertFloatToByteArray ConvertFloatToByteArray
+#endif
+TEST_F(TransientFileUtilsTest, MAYBE_ConvertFloatToByteArray) {
rtc::scoped_ptr<uint8_t[]> bytes(new uint8_t[4]);
EXPECT_EQ(0, ConvertFloatToByteArray(kPi, bytes.get()));
@@ -98,7 +112,12 @@ TEST_F(TransientFileUtilsTest, DISABLED_ON_IOS(ConvertFloatToByteArray)) {
EXPECT_EQ(0, memcmp(bytes.get(), kAvogadroBytesf, 4));
}
-TEST_F(TransientFileUtilsTest, DISABLED_ON_IOS(ConvertDoubleToByteArray)) {
+#if defined(WEBRTC_IOS)
+#define MAYBE_ConvertDoubleToByteArray DISABLED_ConvertDoubleToByteArray
+#else
+#define MAYBE_ConvertDoubleToByteArray ConvertDoubleToByteArray
+#endif
+TEST_F(TransientFileUtilsTest, MAYBE_ConvertDoubleToByteArray) {
rtc::scoped_ptr<uint8_t[]> bytes(new uint8_t[8]);
EXPECT_EQ(0, ConvertDoubleToByteArray(kPi, bytes.get()));
@@ -111,7 +130,12 @@ TEST_F(TransientFileUtilsTest, DISABLED_ON_IOS(ConvertDoubleToByteArray)) {
EXPECT_EQ(0, memcmp(bytes.get(), kAvogadroBytes, 8));
}
-TEST_F(TransientFileUtilsTest, DISABLED_ON_IOS(ReadInt16BufferFromFile)) {
+#if defined(WEBRTC_IOS)
+#define MAYBE_ReadInt16BufferFromFile DISABLED_ReadInt16BufferFromFile
+#else
+#define MAYBE_ReadInt16BufferFromFile ReadInt16BufferFromFile
+#endif
+TEST_F(TransientFileUtilsTest, MAYBE_ReadInt16BufferFromFile) {
std::string test_filename = kTestFileName;
rtc::scoped_ptr<FileWrapper> file(FileWrapper::Create());
@@ -149,8 +173,13 @@ TEST_F(TransientFileUtilsTest, DISABLED_ON_IOS(ReadInt16BufferFromFile)) {
EXPECT_EQ(17631, buffer[kBufferLength - 1]);
}
-TEST_F(TransientFileUtilsTest,
- DISABLED_ON_IOS(ReadInt16FromFileToFloatBuffer)) {
+#if defined(WEBRTC_IOS)
+#define MAYBE_ReadInt16FromFileToFloatBuffer \
+ DISABLED_ReadInt16FromFileToFloatBuffer
+#else
+#define MAYBE_ReadInt16FromFileToFloatBuffer ReadInt16FromFileToFloatBuffer
+#endif
+TEST_F(TransientFileUtilsTest, MAYBE_ReadInt16FromFileToFloatBuffer) {
std::string test_filename = kTestFileName;
rtc::scoped_ptr<FileWrapper> file(FileWrapper::Create());
@@ -191,8 +220,13 @@ TEST_F(TransientFileUtilsTest,
EXPECT_DOUBLE_EQ(17631, buffer[kBufferLength - 1]);
}
-TEST_F(TransientFileUtilsTest,
- DISABLED_ON_IOS(ReadInt16FromFileToDoubleBuffer)) {
+#if defined(WEBRTC_IOS)
+#define MAYBE_ReadInt16FromFileToDoubleBuffer \
+ DISABLED_ReadInt16FromFileToDoubleBuffer
+#else
+#define MAYBE_ReadInt16FromFileToDoubleBuffer ReadInt16FromFileToDoubleBuffer
+#endif
+TEST_F(TransientFileUtilsTest, MAYBE_ReadInt16FromFileToDoubleBuffer) {
std::string test_filename = kTestFileName;
rtc::scoped_ptr<FileWrapper> file(FileWrapper::Create());
@@ -232,7 +266,12 @@ TEST_F(TransientFileUtilsTest,
EXPECT_DOUBLE_EQ(17631, buffer[kBufferLength - 1]);
}
-TEST_F(TransientFileUtilsTest, DISABLED_ON_IOS(ReadFloatBufferFromFile)) {
+#if defined(WEBRTC_IOS)
+#define MAYBE_ReadFloatBufferFromFile DISABLED_ReadFloatBufferFromFile
+#else
+#define MAYBE_ReadFloatBufferFromFile ReadFloatBufferFromFile
+#endif
+TEST_F(TransientFileUtilsTest, MAYBE_ReadFloatBufferFromFile) {
std::string test_filename = kTestFileNamef;
rtc::scoped_ptr<FileWrapper> file(FileWrapper::Create());
@@ -269,7 +308,12 @@ TEST_F(TransientFileUtilsTest, DISABLED_ON_IOS(ReadFloatBufferFromFile)) {
EXPECT_FLOAT_EQ(kAvogadro, buffer[2]);
}
-TEST_F(TransientFileUtilsTest, DISABLED_ON_IOS(ReadDoubleBufferFromFile)) {
+#if defined(WEBRTC_IOS)
+#define MAYBE_ReadDoubleBufferFromFile DISABLED_ReadDoubleBufferFromFile
+#else
+#define MAYBE_ReadDoubleBufferFromFile ReadDoubleBufferFromFile
+#endif
+TEST_F(TransientFileUtilsTest, MAYBE_ReadDoubleBufferFromFile) {
std::string test_filename = kTestFileName;
rtc::scoped_ptr<FileWrapper> file(FileWrapper::Create());
@@ -306,7 +350,12 @@ TEST_F(TransientFileUtilsTest, DISABLED_ON_IOS(ReadDoubleBufferFromFile)) {
EXPECT_DOUBLE_EQ(kAvogadro, buffer[2]);
}
-TEST_F(TransientFileUtilsTest, DISABLED_ON_IOS(WriteInt16BufferToFile)) {
+#if defined(WEBRTC_IOS)
+#define MAYBE_WriteInt16BufferToFile DISABLED_WriteInt16BufferToFile
+#else
+#define MAYBE_WriteInt16BufferToFile WriteInt16BufferToFile
+#endif
+TEST_F(TransientFileUtilsTest, MAYBE_WriteInt16BufferToFile) {
rtc::scoped_ptr<FileWrapper> file(FileWrapper::Create());
std::string kOutFileName = test::TempFilename(test::OutputPath(),
@@ -348,7 +397,12 @@ TEST_F(TransientFileUtilsTest, DISABLED_ON_IOS(WriteInt16BufferToFile)) {
kBufferLength * sizeof(written_buffer[0])));
}
-TEST_F(TransientFileUtilsTest, DISABLED_ON_IOS(WriteFloatBufferToFile)) {
+#if defined(WEBRTC_IOS)
+#define MAYBE_WriteFloatBufferToFile DISABLED_WriteFloatBufferToFile
+#else
+#define MAYBE_WriteFloatBufferToFile WriteFloatBufferToFile
+#endif
+TEST_F(TransientFileUtilsTest, MAYBE_WriteFloatBufferToFile) {
rtc::scoped_ptr<FileWrapper> file(FileWrapper::Create());
std::string kOutFileName = test::TempFilename(test::OutputPath(),
@@ -390,7 +444,12 @@ TEST_F(TransientFileUtilsTest, DISABLED_ON_IOS(WriteFloatBufferToFile)) {
kBufferLength * sizeof(written_buffer[0])));
}
-TEST_F(TransientFileUtilsTest, DISABLED_ON_IOS(WriteDoubleBufferToFile)) {
+#if defined(WEBRTC_IOS)
+#define MAYBE_WriteDoubleBufferToFile DISABLED_WriteDoubleBufferToFile
+#else
+#define MAYBE_WriteDoubleBufferToFile WriteDoubleBufferToFile
+#endif
+TEST_F(TransientFileUtilsTest, MAYBE_WriteDoubleBufferToFile) {
rtc::scoped_ptr<FileWrapper> file(FileWrapper::Create());
std::string kOutFileName = test::TempFilename(test::OutputPath(),
@@ -432,7 +491,12 @@ TEST_F(TransientFileUtilsTest, DISABLED_ON_IOS(WriteDoubleBufferToFile)) {
kBufferLength * sizeof(written_buffer[0])));
}
-TEST_F(TransientFileUtilsTest, DISABLED_ON_IOS(ExpectedErrorReturnValues)) {
+#if defined(WEBRTC_IOS)
+#define MAYBE_ExpectedErrorReturnValues DISABLED_ExpectedErrorReturnValues
+#else
+#define MAYBE_ExpectedErrorReturnValues ExpectedErrorReturnValues
+#endif
+TEST_F(TransientFileUtilsTest, MAYBE_ExpectedErrorReturnValues) {
std::string test_filename = kTestFileName;
double value;
diff --git a/webrtc/modules/audio_processing/transient/transient_detector_unittest.cc b/webrtc/modules/audio_processing/transient/transient_detector_unittest.cc
index 6a70a3f92c..b60077510b 100644
--- a/webrtc/modules/audio_processing/transient/transient_detector_unittest.cc
+++ b/webrtc/modules/audio_processing/transient/transient_detector_unittest.cc
@@ -19,8 +19,7 @@
#include "webrtc/modules/audio_processing/transient/file_utils.h"
#include "webrtc/system_wrappers/include/file_wrapper.h"
#include "webrtc/test/testsupport/fileutils.h"
-#include "webrtc/test/testsupport/gtest_disable.h"
- #include "webrtc/typedefs.h"
+#include "webrtc/typedefs.h"
namespace webrtc {
@@ -37,7 +36,11 @@ static const size_t kNumberOfSampleRates =
// The files contain all the results in double precision (Little endian).
// The audio files used with different sample rates are stored in the same
// directory.
-TEST(TransientDetectorTest, DISABLED_ON_IOS(CorrectnessBasedOnFiles)) {
+#if defined(WEBRTC_IOS)
+TEST(TransientDetectorTest, DISABLED_CorrectnessBasedOnFiles) {
+#else
+TEST(TransientDetectorTest, CorrectnessBasedOnFiles) {
+#endif
for (size_t i = 0; i < kNumberOfSampleRates; ++i) {
int sample_rate_hz = kSampleRatesHz[i];
diff --git a/webrtc/modules/audio_processing/transient/transient_suppression_test.cc b/webrtc/modules/audio_processing/transient/transient_suppression_test.cc
index 506abaf203..b7b7595abf 100644
--- a/webrtc/modules/audio_processing/transient/transient_suppression_test.cc
+++ b/webrtc/modules/audio_processing/transient/transient_suppression_test.cc
@@ -19,7 +19,7 @@
#include "webrtc/base/scoped_ptr.h"
#include "webrtc/common_audio/include/audio_util.h"
#include "webrtc/modules/audio_processing/agc/agc.h"
-#include "webrtc/modules/interface/module_common_types.h"
+#include "webrtc/modules/include/module_common_types.h"
#include "webrtc/test/testsupport/fileutils.h"
#include "webrtc/typedefs.h"
diff --git a/webrtc/modules/audio_processing/transient/wpd_tree_unittest.cc b/webrtc/modules/audio_processing/transient/wpd_tree_unittest.cc
index 7c99f4f161..e4e9048f88 100644
--- a/webrtc/modules/audio_processing/transient/wpd_tree_unittest.cc
+++ b/webrtc/modules/audio_processing/transient/wpd_tree_unittest.cc
@@ -19,7 +19,6 @@
#include "webrtc/modules/audio_processing/transient/file_utils.h"
#include "webrtc/system_wrappers/include/file_wrapper.h"
#include "webrtc/test/testsupport/fileutils.h"
-#include "webrtc/test/testsupport/gtest_disable.h"
namespace webrtc {
@@ -69,7 +68,11 @@ TEST(WPDTreeTest, Construction) {
// It also writes the results in its own set of files in the out directory.
// Matlab and output files contain all the results in double precision (Little
// endian) appended.
-TEST(WPDTreeTest, DISABLED_ON_IOS(CorrectnessBasedOnMatlabFiles)) {
+#if defined(WEBRTC_IOS)
+TEST(WPDTreeTest, DISABLED_CorrectnessBasedOnMatlabFiles) {
+#else
+TEST(WPDTreeTest, CorrectnessBasedOnMatlabFiles) {
+#endif
// 10 ms at 16000 Hz.
const size_t kTestBufferSize = 160;
const int kLevels = 3;
diff --git a/webrtc/modules/audio_processing/typing_detection.h b/webrtc/modules/audio_processing/typing_detection.h
index 5fa6456e9e..40608f885d 100644
--- a/webrtc/modules/audio_processing/typing_detection.h
+++ b/webrtc/modules/audio_processing/typing_detection.h
@@ -11,7 +11,7 @@
#ifndef WEBRTC_MODULES_AUDIO_PROCESSING_TYPING_DETECTION_H_
#define WEBRTC_MODULES_AUDIO_PROCESSING_TYPING_DETECTION_H_
-#include "webrtc/modules/interface/module_common_types.h"
+#include "webrtc/modules/include/module_common_types.h"
#include "webrtc/typedefs.h"
namespace webrtc {
diff --git a/webrtc/modules/audio_processing/vad/pitch_based_vad.cc b/webrtc/modules/audio_processing/vad/pitch_based_vad.cc
index 39ec37e6ec..fce144de6b 100644
--- a/webrtc/modules/audio_processing/vad/pitch_based_vad.cc
+++ b/webrtc/modules/audio_processing/vad/pitch_based_vad.cc
@@ -18,7 +18,7 @@
#include "webrtc/modules/audio_processing/vad/common.h"
#include "webrtc/modules/audio_processing/vad/noise_gmm_tables.h"
#include "webrtc/modules/audio_processing/vad/voice_gmm_tables.h"
-#include "webrtc/modules/interface/module_common_types.h"
+#include "webrtc/modules/include/module_common_types.h"
namespace webrtc {
diff --git a/webrtc/modules/audio_processing/vad/standalone_vad.cc b/webrtc/modules/audio_processing/vad/standalone_vad.cc
index 468b8ff3f0..1209526a92 100644
--- a/webrtc/modules/audio_processing/vad/standalone_vad.cc
+++ b/webrtc/modules/audio_processing/vad/standalone_vad.cc
@@ -12,8 +12,8 @@
#include <assert.h>
-#include "webrtc/modules/interface/module_common_types.h"
-#include "webrtc/modules/utility/interface/audio_frame_operations.h"
+#include "webrtc/modules/include/module_common_types.h"
+#include "webrtc/modules/utility/include/audio_frame_operations.h"
#include "webrtc/typedefs.h"
namespace webrtc {
diff --git a/webrtc/modules/audio_processing/vad/standalone_vad_unittest.cc b/webrtc/modules/audio_processing/vad/standalone_vad_unittest.cc
index 942008e733..1d1dcc7066 100644
--- a/webrtc/modules/audio_processing/vad/standalone_vad_unittest.cc
+++ b/webrtc/modules/audio_processing/vad/standalone_vad_unittest.cc
@@ -14,9 +14,8 @@
#include "testing/gtest/include/gtest/gtest.h"
#include "webrtc/base/scoped_ptr.h"
-#include "webrtc/modules/interface/module_common_types.h"
+#include "webrtc/modules/include/module_common_types.h"
#include "webrtc/test/testsupport/fileutils.h"
-#include "webrtc/test/testsupport/gtest_disable.h"
namespace webrtc {
@@ -55,7 +54,11 @@ TEST(StandaloneVadTest, Api) {
EXPECT_EQ(kMode, vad->mode());
}
-TEST(StandaloneVadTest, DISABLED_ON_IOS(ActivityDetection)) {
+#if defined(WEBRTC_IOS)
+TEST(StandaloneVadTest, DISABLED_ActivityDetection) {
+#else
+TEST(StandaloneVadTest, ActivityDetection) {
+#endif
rtc::scoped_ptr<StandaloneVad> vad(StandaloneVad::Create());
const size_t kDataLength = kLength10Ms;
int16_t data[kDataLength] = {0};
diff --git a/webrtc/modules/audio_processing/vad/vad_audio_proc.cc b/webrtc/modules/audio_processing/vad/vad_audio_proc.cc
index 8535d1ff57..1a595597b6 100644
--- a/webrtc/modules/audio_processing/vad/vad_audio_proc.cc
+++ b/webrtc/modules/audio_processing/vad/vad_audio_proc.cc
@@ -23,7 +23,7 @@ extern "C" {
#include "webrtc/modules/audio_coding/codecs/isac/main/source/pitch_estimator.h"
#include "webrtc/modules/audio_coding/codecs/isac/main/source/structs.h"
}
-#include "webrtc/modules/interface/module_common_types.h"
+#include "webrtc/modules/include/module_common_types.h"
namespace webrtc {
diff --git a/webrtc/modules/audio_processing/vad/vad_audio_proc_unittest.cc b/webrtc/modules/audio_processing/vad/vad_audio_proc_unittest.cc
index f509af476f..a8a4ead2e3 100644
--- a/webrtc/modules/audio_processing/vad/vad_audio_proc_unittest.cc
+++ b/webrtc/modules/audio_processing/vad/vad_audio_proc_unittest.cc
@@ -21,7 +21,7 @@
#include "testing/gtest/include/gtest/gtest.h"
#include "webrtc/modules/audio_processing/vad/common.h"
-#include "webrtc/modules/interface/module_common_types.h"
+#include "webrtc/modules/include/module_common_types.h"
#include "webrtc/test/testsupport/fileutils.h"
namespace webrtc {
diff --git a/webrtc/modules/audio_processing/vad/voice_activity_detector.cc b/webrtc/modules/audio_processing/vad/voice_activity_detector.cc
index ef56a3574c..fc9d103918 100644
--- a/webrtc/modules/audio_processing/vad/voice_activity_detector.cc
+++ b/webrtc/modules/audio_processing/vad/voice_activity_detector.cc
@@ -18,7 +18,7 @@ namespace webrtc {
namespace {
const size_t kMaxLength = 320;
-const int kNumChannels = 1;
+const size_t kNumChannels = 1;
const double kDefaultVoiceValue = 1.0;
const double kNeutralProbability = 0.5;
diff --git a/webrtc/modules/audio_processing/voice_detection_impl.cc b/webrtc/modules/audio_processing/voice_detection_impl.cc
index 374189e709..22d218c371 100644
--- a/webrtc/modules/audio_processing/voice_detection_impl.cc
+++ b/webrtc/modules/audio_processing/voice_detection_impl.cc
@@ -10,61 +10,61 @@
#include "webrtc/modules/audio_processing/voice_detection_impl.h"
-#include <assert.h>
-
#include "webrtc/common_audio/vad/include/webrtc_vad.h"
#include "webrtc/modules/audio_processing/audio_buffer.h"
-#include "webrtc/system_wrappers/include/critical_section_wrapper.h"
namespace webrtc {
-
-typedef VadInst Handle;
-
-namespace {
-int MapSetting(VoiceDetection::Likelihood likelihood) {
- switch (likelihood) {
- case VoiceDetection::kVeryLowLikelihood:
- return 3;
- case VoiceDetection::kLowLikelihood:
- return 2;
- case VoiceDetection::kModerateLikelihood:
- return 1;
- case VoiceDetection::kHighLikelihood:
- return 0;
+class VoiceDetectionImpl::Vad {
+ public:
+ Vad() {
+ state_ = WebRtcVad_Create();
+ RTC_CHECK(state_);
+ int error = WebRtcVad_Init(state_);
+ RTC_DCHECK_EQ(0, error);
+ }
+ ~Vad() {
+ WebRtcVad_Free(state_);
}
- assert(false);
- return -1;
+ VadInst* state() { return state_; }
+ private:
+ VadInst* state_ = nullptr;
+ RTC_DISALLOW_COPY_AND_ASSIGN(Vad);
+};
+
+VoiceDetectionImpl::VoiceDetectionImpl(rtc::CriticalSection* crit)
+ : crit_(crit) {
+ RTC_DCHECK(crit);
}
-} // namespace
-
-VoiceDetectionImpl::VoiceDetectionImpl(const AudioProcessing* apm,
- CriticalSectionWrapper* crit)
- : ProcessingComponent(),
- apm_(apm),
- crit_(crit),
- stream_has_voice_(false),
- using_external_vad_(false),
- likelihood_(kLowLikelihood),
- frame_size_ms_(10),
- frame_size_samples_(0) {}
VoiceDetectionImpl::~VoiceDetectionImpl() {}
-int VoiceDetectionImpl::ProcessCaptureAudio(AudioBuffer* audio) {
- if (!is_component_enabled()) {
- return apm_->kNoError;
+void VoiceDetectionImpl::Initialize(int sample_rate_hz) {
+ rtc::CritScope cs(crit_);
+ sample_rate_hz_ = sample_rate_hz;
+ rtc::scoped_ptr<Vad> new_vad;
+ if (enabled_) {
+ new_vad.reset(new Vad());
}
+ vad_.swap(new_vad);
+ using_external_vad_ = false;
+ frame_size_samples_ =
+ static_cast<size_t>(frame_size_ms_ * sample_rate_hz_) / 1000;
+ set_likelihood(likelihood_);
+}
+void VoiceDetectionImpl::ProcessCaptureAudio(AudioBuffer* audio) {
+ rtc::CritScope cs(crit_);
+ if (!enabled_) {
+ return;
+ }
if (using_external_vad_) {
using_external_vad_ = false;
- return apm_->kNoError;
+ return;
}
- assert(audio->num_frames_per_band() <= 160);
+ RTC_DCHECK_GE(160u, audio->num_frames_per_band());
// TODO(ajm): concatenate data in frame buffer here.
-
- int vad_ret = WebRtcVad_Process(static_cast<Handle*>(handle(0)),
- apm_->proc_split_sample_rate_hz(),
+ int vad_ret = WebRtcVad_Process(vad_->state(), sample_rate_hz_,
audio->mixed_low_pass_data(),
frame_size_samples_);
if (vad_ret == 0) {
@@ -74,103 +74,81 @@ int VoiceDetectionImpl::ProcessCaptureAudio(AudioBuffer* audio) {
stream_has_voice_ = true;
audio->set_activity(AudioFrame::kVadActive);
} else {
- return apm_->kUnspecifiedError;
+ RTC_NOTREACHED();
}
-
- return apm_->kNoError;
}
int VoiceDetectionImpl::Enable(bool enable) {
- CriticalSectionScoped crit_scoped(crit_);
- return EnableComponent(enable);
+ rtc::CritScope cs(crit_);
+ if (enabled_ != enable) {
+ enabled_ = enable;
+ Initialize(sample_rate_hz_);
+ }
+ return AudioProcessing::kNoError;
}
bool VoiceDetectionImpl::is_enabled() const {
- return is_component_enabled();
+ rtc::CritScope cs(crit_);
+ return enabled_;
}
int VoiceDetectionImpl::set_stream_has_voice(bool has_voice) {
+ rtc::CritScope cs(crit_);
using_external_vad_ = true;
stream_has_voice_ = has_voice;
- return apm_->kNoError;
+ return AudioProcessing::kNoError;
}
bool VoiceDetectionImpl::stream_has_voice() const {
+ rtc::CritScope cs(crit_);
// TODO(ajm): enable this assertion?
//assert(using_external_vad_ || is_component_enabled());
return stream_has_voice_;
}
int VoiceDetectionImpl::set_likelihood(VoiceDetection::Likelihood likelihood) {
- CriticalSectionScoped crit_scoped(crit_);
- if (MapSetting(likelihood) == -1) {
- return apm_->kBadParameterError;
- }
-
+ rtc::CritScope cs(crit_);
likelihood_ = likelihood;
- return Configure();
+ if (enabled_) {
+ int mode = 2;
+ switch (likelihood) {
+ case VoiceDetection::kVeryLowLikelihood:
+ mode = 3;
+ break;
+ case VoiceDetection::kLowLikelihood:
+ mode = 2;
+ break;
+ case VoiceDetection::kModerateLikelihood:
+ mode = 1;
+ break;
+ case VoiceDetection::kHighLikelihood:
+ mode = 0;
+ break;
+ default:
+ RTC_NOTREACHED();
+ break;
+ }
+ int error = WebRtcVad_set_mode(vad_->state(), mode);
+ RTC_DCHECK_EQ(0, error);
+ }
+ return AudioProcessing::kNoError;
}
VoiceDetection::Likelihood VoiceDetectionImpl::likelihood() const {
+ rtc::CritScope cs(crit_);
return likelihood_;
}
int VoiceDetectionImpl::set_frame_size_ms(int size) {
- CriticalSectionScoped crit_scoped(crit_);
- assert(size == 10); // TODO(ajm): remove when supported.
- if (size != 10 &&
- size != 20 &&
- size != 30) {
- return apm_->kBadParameterError;
- }
-
+ rtc::CritScope cs(crit_);
+ RTC_DCHECK_EQ(10, size); // TODO(ajm): remove when supported.
frame_size_ms_ = size;
-
- return Initialize();
+ Initialize(sample_rate_hz_);
+ return AudioProcessing::kNoError;
}
int VoiceDetectionImpl::frame_size_ms() const {
+ rtc::CritScope cs(crit_);
return frame_size_ms_;
}
-
-int VoiceDetectionImpl::Initialize() {
- int err = ProcessingComponent::Initialize();
- if (err != apm_->kNoError || !is_component_enabled()) {
- return err;
- }
-
- using_external_vad_ = false;
- frame_size_samples_ = static_cast<size_t>(
- frame_size_ms_ * apm_->proc_split_sample_rate_hz() / 1000);
- // TODO(ajm): intialize frame buffer here.
-
- return apm_->kNoError;
-}
-
-void* VoiceDetectionImpl::CreateHandle() const {
- return WebRtcVad_Create();
-}
-
-void VoiceDetectionImpl::DestroyHandle(void* handle) const {
- WebRtcVad_Free(static_cast<Handle*>(handle));
-}
-
-int VoiceDetectionImpl::InitializeHandle(void* handle) const {
- return WebRtcVad_Init(static_cast<Handle*>(handle));
-}
-
-int VoiceDetectionImpl::ConfigureHandle(void* handle) const {
- return WebRtcVad_set_mode(static_cast<Handle*>(handle),
- MapSetting(likelihood_));
-}
-
-int VoiceDetectionImpl::num_handles_required() const {
- return 1;
-}
-
-int VoiceDetectionImpl::GetHandleError(void* handle) const {
- // The VAD has no get_error() function.
- assert(handle != NULL);
- return apm_->kUnspecifiedError;
-}
} // namespace webrtc
diff --git a/webrtc/modules/audio_processing/voice_detection_impl.h b/webrtc/modules/audio_processing/voice_detection_impl.h
index b18808316e..0d6d8cf14a 100644
--- a/webrtc/modules/audio_processing/voice_detection_impl.h
+++ b/webrtc/modules/audio_processing/voice_detection_impl.h
@@ -11,31 +11,27 @@
#ifndef WEBRTC_MODULES_AUDIO_PROCESSING_VOICE_DETECTION_IMPL_H_
#define WEBRTC_MODULES_AUDIO_PROCESSING_VOICE_DETECTION_IMPL_H_
+#include "webrtc/base/constructormagic.h"
+#include "webrtc/base/criticalsection.h"
+#include "webrtc/base/scoped_ptr.h"
#include "webrtc/modules/audio_processing/include/audio_processing.h"
-#include "webrtc/modules/audio_processing/processing_component.h"
namespace webrtc {
class AudioBuffer;
-class CriticalSectionWrapper;
-class VoiceDetectionImpl : public VoiceDetection,
- public ProcessingComponent {
+class VoiceDetectionImpl : public VoiceDetection {
public:
- VoiceDetectionImpl(const AudioProcessing* apm, CriticalSectionWrapper* crit);
- virtual ~VoiceDetectionImpl();
+ explicit VoiceDetectionImpl(rtc::CriticalSection* crit);
+ ~VoiceDetectionImpl() override;
- int ProcessCaptureAudio(AudioBuffer* audio);
+ // TODO(peah): Fold into ctor, once public API is removed.
+ void Initialize(int sample_rate_hz);
+ void ProcessCaptureAudio(AudioBuffer* audio);
// VoiceDetection implementation.
- bool is_enabled() const override;
-
- // ProcessingComponent implementation.
- int Initialize() override;
-
- private:
- // VoiceDetection implementation.
int Enable(bool enable) override;
+ bool is_enabled() const override;
int set_stream_has_voice(bool has_voice) override;
bool stream_has_voice() const override;
int set_likelihood(Likelihood likelihood) override;
@@ -43,21 +39,18 @@ class VoiceDetectionImpl : public VoiceDetection,
int set_frame_size_ms(int size) override;
int frame_size_ms() const override;
- // ProcessingComponent implementation.
- void* CreateHandle() const override;
- int InitializeHandle(void* handle) const override;
- int ConfigureHandle(void* handle) const override;
- void DestroyHandle(void* handle) const override;
- int num_handles_required() const override;
- int GetHandleError(void* handle) const override;
-
- const AudioProcessing* apm_;
- CriticalSectionWrapper* crit_;
- bool stream_has_voice_;
- bool using_external_vad_;
- Likelihood likelihood_;
- int frame_size_ms_;
- size_t frame_size_samples_;
+ private:
+ class Vad;
+ rtc::CriticalSection* const crit_;
+ bool enabled_ GUARDED_BY(crit_) = false;
+ bool stream_has_voice_ GUARDED_BY(crit_) = false;
+ bool using_external_vad_ GUARDED_BY(crit_) = false;
+ Likelihood likelihood_ GUARDED_BY(crit_) = kLowLikelihood;
+ int frame_size_ms_ GUARDED_BY(crit_) = 10;
+ size_t frame_size_samples_ GUARDED_BY(crit_) = 0;
+ int sample_rate_hz_ GUARDED_BY(crit_) = 0;
+ rtc::scoped_ptr<Vad> vad_ GUARDED_BY(crit_);
+ RTC_DISALLOW_IMPLICIT_CONSTRUCTORS(VoiceDetectionImpl);
};
} // namespace webrtc
diff --git a/webrtc/modules/bitrate_controller/BUILD.gn b/webrtc/modules/bitrate_controller/BUILD.gn
index 4ef536b572..5e3741ba93 100644
--- a/webrtc/modules/bitrate_controller/BUILD.gn
+++ b/webrtc/modules/bitrate_controller/BUILD.gn
@@ -10,7 +10,6 @@ import("../../build/webrtc.gni")
source_set("bitrate_controller") {
sources = [
- "bitrate_allocator.cc",
"bitrate_controller_impl.cc",
"bitrate_controller_impl.h",
"include/bitrate_allocator.h",
diff --git a/webrtc/modules/bitrate_controller/bitrate_allocator.cc b/webrtc/modules/bitrate_controller/bitrate_allocator.cc
deleted file mode 100644
index 0aec528cde..0000000000
--- a/webrtc/modules/bitrate_controller/bitrate_allocator.cc
+++ /dev/null
@@ -1,190 +0,0 @@
-/*
- * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
- *
- * Use of this source code is governed by a BSD-style license
- * that can be found in the LICENSE file in the root of the source
- * tree. An additional intellectual property rights grant can be found
- * in the file PATENTS. All contributing project authors may
- * be found in the AUTHORS file in the root of the source tree.
- *
- */
-
-#include "webrtc/modules/bitrate_controller/include/bitrate_allocator.h"
-
-#include <algorithm>
-#include <utility>
-
-#include "webrtc/modules/bitrate_controller/include/bitrate_controller.h"
-
-namespace webrtc {
-
-// Allow packets to be transmitted in up to 2 times max video bitrate if the
-// bandwidth estimate allows it.
-const int kTransmissionMaxBitrateMultiplier = 2;
-const int kDefaultBitrateBps = 300000;
-
-BitrateAllocator::BitrateAllocator()
- : crit_sect_(CriticalSectionWrapper::CreateCriticalSection()),
- bitrate_observers_(),
- enforce_min_bitrate_(true),
- last_bitrate_bps_(kDefaultBitrateBps),
- last_fraction_loss_(0),
- last_rtt_(0) {
-}
-
-
-void BitrateAllocator::OnNetworkChanged(uint32_t bitrate,
- uint8_t fraction_loss,
- int64_t rtt) {
- CriticalSectionScoped lock(crit_sect_.get());
- last_bitrate_bps_ = bitrate;
- last_fraction_loss_ = fraction_loss;
- last_rtt_ = rtt;
- ObserverBitrateMap allocation = AllocateBitrates();
- for (const auto& kv : allocation)
- kv.first->OnNetworkChanged(kv.second, last_fraction_loss_, last_rtt_);
-}
-
-BitrateAllocator::ObserverBitrateMap BitrateAllocator::AllocateBitrates() {
- if (bitrate_observers_.empty())
- return ObserverBitrateMap();
-
- uint32_t sum_min_bitrates = 0;
- for (const auto& observer : bitrate_observers_)
- sum_min_bitrates += observer.second.min_bitrate;
- if (last_bitrate_bps_ <= sum_min_bitrates)
- return LowRateAllocation(last_bitrate_bps_);
- else
- return NormalRateAllocation(last_bitrate_bps_, sum_min_bitrates);
-}
-
-int BitrateAllocator::AddBitrateObserver(BitrateObserver* observer,
- uint32_t min_bitrate_bps,
- uint32_t max_bitrate_bps) {
- CriticalSectionScoped lock(crit_sect_.get());
-
- BitrateObserverConfList::iterator it =
- FindObserverConfigurationPair(observer);
-
- // Allow the max bitrate to be exceeded for FEC and retransmissions.
- // TODO(holmer): We have to get rid of this hack as it makes it difficult to
- // properly allocate bitrate. The allocator should instead distribute any
- // extra bitrate after all streams have maxed out.
- max_bitrate_bps *= kTransmissionMaxBitrateMultiplier;
- if (it != bitrate_observers_.end()) {
- // Update current configuration.
- it->second.min_bitrate = min_bitrate_bps;
- it->second.max_bitrate = max_bitrate_bps;
- } else {
- // Add new settings.
- bitrate_observers_.push_back(BitrateObserverConfiguration(
- observer, BitrateConfiguration(min_bitrate_bps, max_bitrate_bps)));
- bitrate_observers_modified_ = true;
- }
-
- ObserverBitrateMap allocation = AllocateBitrates();
- int new_observer_bitrate_bps = 0;
- for (auto& kv : allocation) {
- kv.first->OnNetworkChanged(kv.second, last_fraction_loss_, last_rtt_);
- if (kv.first == observer)
- new_observer_bitrate_bps = kv.second;
- }
- return new_observer_bitrate_bps;
-}
-
-void BitrateAllocator::RemoveBitrateObserver(BitrateObserver* observer) {
- CriticalSectionScoped lock(crit_sect_.get());
- BitrateObserverConfList::iterator it =
- FindObserverConfigurationPair(observer);
- if (it != bitrate_observers_.end()) {
- bitrate_observers_.erase(it);
- bitrate_observers_modified_ = true;
- }
-}
-
-void BitrateAllocator::GetMinMaxBitrateSumBps(int* min_bitrate_sum_bps,
- int* max_bitrate_sum_bps) const {
- *min_bitrate_sum_bps = 0;
- *max_bitrate_sum_bps = 0;
-
- CriticalSectionScoped lock(crit_sect_.get());
- for (const auto& observer : bitrate_observers_) {
- *min_bitrate_sum_bps += observer.second.min_bitrate;
- *max_bitrate_sum_bps += observer.second.max_bitrate;
- }
-}
-
-BitrateAllocator::BitrateObserverConfList::iterator
-BitrateAllocator::FindObserverConfigurationPair(
- const BitrateObserver* observer) {
- for (auto it = bitrate_observers_.begin(); it != bitrate_observers_.end();
- ++it) {
- if (it->first == observer)
- return it;
- }
- return bitrate_observers_.end();
-}
-
-void BitrateAllocator::EnforceMinBitrate(bool enforce_min_bitrate) {
- CriticalSectionScoped lock(crit_sect_.get());
- enforce_min_bitrate_ = enforce_min_bitrate;
-}
-
-BitrateAllocator::ObserverBitrateMap BitrateAllocator::NormalRateAllocation(
- uint32_t bitrate,
- uint32_t sum_min_bitrates) {
- uint32_t number_of_observers = bitrate_observers_.size();
- uint32_t bitrate_per_observer =
- (bitrate - sum_min_bitrates) / number_of_observers;
- // Use map to sort list based on max bitrate.
- ObserverSortingMap list_max_bitrates;
- for (const auto& observer : bitrate_observers_) {
- list_max_bitrates.insert(std::pair<uint32_t, ObserverConfiguration>(
- observer.second.max_bitrate,
- ObserverConfiguration(observer.first, observer.second.min_bitrate)));
- }
- ObserverBitrateMap allocation;
- ObserverSortingMap::iterator max_it = list_max_bitrates.begin();
- while (max_it != list_max_bitrates.end()) {
- number_of_observers--;
- uint32_t observer_allowance =
- max_it->second.min_bitrate + bitrate_per_observer;
- if (max_it->first < observer_allowance) {
- // We have more than enough for this observer.
- // Carry the remainder forward.
- uint32_t remainder = observer_allowance - max_it->first;
- if (number_of_observers != 0) {
- bitrate_per_observer += remainder / number_of_observers;
- }
- allocation[max_it->second.observer] = max_it->first;
- } else {
- allocation[max_it->second.observer] = observer_allowance;
- }
- list_max_bitrates.erase(max_it);
- // Prepare next iteration.
- max_it = list_max_bitrates.begin();
- }
- return allocation;
-}
-
-BitrateAllocator::ObserverBitrateMap BitrateAllocator::LowRateAllocation(
- uint32_t bitrate) {
- ObserverBitrateMap allocation;
- if (enforce_min_bitrate_) {
- // Min bitrate to all observers.
- for (const auto& observer : bitrate_observers_)
- allocation[observer.first] = observer.second.min_bitrate;
- } else {
- // Allocate up to |min_bitrate| to one observer at a time, until
- // |bitrate| is depleted.
- uint32_t remainder = bitrate;
- for (const auto& observer : bitrate_observers_) {
- uint32_t allocated_bitrate =
- std::min(remainder, observer.second.min_bitrate);
- allocation[observer.first] = allocated_bitrate;
- remainder -= allocated_bitrate;
- }
- }
- return allocation;
-}
-} // namespace webrtc
diff --git a/webrtc/modules/bitrate_controller/bitrate_allocator_unittest.cc b/webrtc/modules/bitrate_controller/bitrate_allocator_unittest.cc
deleted file mode 100644
index 4fc7e83b5b..0000000000
--- a/webrtc/modules/bitrate_controller/bitrate_allocator_unittest.cc
+++ /dev/null
@@ -1,212 +0,0 @@
-/*
- * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
- *
- * Use of this source code is governed by a BSD-style license
- * that can be found in the LICENSE file in the root of the source
- * tree. An additional intellectual property rights grant can be found
- * in the file PATENTS. All contributing project authors may
- * be found in the AUTHORS file in the root of the source tree.
- */
-
-#include <algorithm>
-#include <vector>
-
-#include "testing/gtest/include/gtest/gtest.h"
-#include "webrtc/modules/bitrate_controller/include/bitrate_allocator.h"
-#include "webrtc/modules/bitrate_controller/include/bitrate_controller.h"
-
-namespace webrtc {
-
-class TestBitrateObserver : public BitrateObserver {
- public:
- TestBitrateObserver()
- : last_bitrate_(0), last_fraction_loss_(0), last_rtt_(0) {}
-
- virtual void OnNetworkChanged(uint32_t bitrate,
- uint8_t fraction_loss,
- int64_t rtt) {
- last_bitrate_ = bitrate;
- last_fraction_loss_ = fraction_loss;
- last_rtt_ = rtt;
- }
- uint32_t last_bitrate_;
- uint8_t last_fraction_loss_;
- int64_t last_rtt_;
-};
-
-class BitrateAllocatorTest : public ::testing::Test {
- protected:
- BitrateAllocatorTest() : allocator_(new BitrateAllocator()) {
- allocator_->OnNetworkChanged(300000u, 0, 0);
- }
- ~BitrateAllocatorTest() {}
-
- rtc::scoped_ptr<BitrateAllocator> allocator_;
-};
-
-TEST_F(BitrateAllocatorTest, UpdatingBitrateObserver) {
- TestBitrateObserver bitrate_observer;
- int start_bitrate =
- allocator_->AddBitrateObserver(&bitrate_observer, 100000, 1500000);
- EXPECT_EQ(300000, start_bitrate);
- allocator_->OnNetworkChanged(200000, 0, 0);
- EXPECT_EQ(200000u, bitrate_observer.last_bitrate_);
-
- // TODO(pbos): Expect capping to 1.5M instead of 3M when not boosting the max
- // bitrate for FEC/retransmissions (see TODO in BitrateAllocator).
- allocator_->OnNetworkChanged(4000000, 0, 0);
- EXPECT_EQ(3000000u, bitrate_observer.last_bitrate_);
- start_bitrate =
- allocator_->AddBitrateObserver(&bitrate_observer, 100000, 4000000);
- EXPECT_EQ(4000000, start_bitrate);
-
- start_bitrate =
- allocator_->AddBitrateObserver(&bitrate_observer, 100000, 1500000);
- EXPECT_EQ(3000000, start_bitrate);
- EXPECT_EQ(3000000u, bitrate_observer.last_bitrate_);
- allocator_->OnNetworkChanged(1500000, 0, 0);
- EXPECT_EQ(1500000u, bitrate_observer.last_bitrate_);
-}
-
-TEST_F(BitrateAllocatorTest, TwoBitrateObserversOneRtcpObserver) {
- TestBitrateObserver bitrate_observer_1;
- TestBitrateObserver bitrate_observer_2;
- int start_bitrate =
- allocator_->AddBitrateObserver(&bitrate_observer_1, 100000, 300000);
- EXPECT_EQ(300000, start_bitrate);
- start_bitrate =
- allocator_->AddBitrateObserver(&bitrate_observer_2, 200000, 300000);
- EXPECT_EQ(200000, start_bitrate);
-
- // Test too low start bitrate, hence lower than sum of min. Min bitrates will
- // be allocated to all observers.
- allocator_->OnNetworkChanged(200000, 0, 50);
- EXPECT_EQ(100000u, bitrate_observer_1.last_bitrate_);
- EXPECT_EQ(0, bitrate_observer_1.last_fraction_loss_);
- EXPECT_EQ(50, bitrate_observer_1.last_rtt_);
- EXPECT_EQ(200000u, bitrate_observer_2.last_bitrate_);
- EXPECT_EQ(0, bitrate_observer_2.last_fraction_loss_);
- EXPECT_EQ(50, bitrate_observer_2.last_rtt_);
-
- // Test a bitrate which should be distributed equally.
- allocator_->OnNetworkChanged(500000, 0, 50);
- const uint32_t kBitrateToShare = 500000 - 200000 - 100000;
- EXPECT_EQ(100000u + kBitrateToShare / 2, bitrate_observer_1.last_bitrate_);
- EXPECT_EQ(200000u + kBitrateToShare / 2, bitrate_observer_2.last_bitrate_);
-
- // Limited by 2x max bitrates since we leave room for FEC and retransmissions.
- allocator_->OnNetworkChanged(1500000, 0, 50);
- EXPECT_EQ(600000u, bitrate_observer_1.last_bitrate_);
- EXPECT_EQ(600000u, bitrate_observer_2.last_bitrate_);
-}
-
-class BitrateAllocatorTestNoEnforceMin : public ::testing::Test {
- protected:
- BitrateAllocatorTestNoEnforceMin() : allocator_(new BitrateAllocator()) {
- allocator_->EnforceMinBitrate(false);
- allocator_->OnNetworkChanged(300000u, 0, 0);
- }
- ~BitrateAllocatorTestNoEnforceMin() {}
-
- rtc::scoped_ptr<BitrateAllocator> allocator_;
-};
-
-// The following three tests verify that the EnforceMinBitrate() method works
-// as intended.
-TEST_F(BitrateAllocatorTestNoEnforceMin, OneBitrateObserver) {
- TestBitrateObserver bitrate_observer_1;
- int start_bitrate =
- allocator_->AddBitrateObserver(&bitrate_observer_1, 100000, 400000);
- EXPECT_EQ(300000, start_bitrate);
-
- // High REMB.
- allocator_->OnNetworkChanged(150000, 0, 0);
- EXPECT_EQ(150000u, bitrate_observer_1.last_bitrate_);
-
- // Low REMB.
- allocator_->OnNetworkChanged(10000, 0, 0);
- EXPECT_EQ(10000u, bitrate_observer_1.last_bitrate_);
-
- allocator_->RemoveBitrateObserver(&bitrate_observer_1);
-}
-
-TEST_F(BitrateAllocatorTestNoEnforceMin, ThreeBitrateObservers) {
- TestBitrateObserver bitrate_observer_1;
- TestBitrateObserver bitrate_observer_2;
- TestBitrateObserver bitrate_observer_3;
- // Set up the observers with min bitrates at 100000, 200000, and 300000.
- int start_bitrate =
- allocator_->AddBitrateObserver(&bitrate_observer_1, 100000, 400000);
- EXPECT_EQ(300000, start_bitrate);
-
- start_bitrate =
- allocator_->AddBitrateObserver(&bitrate_observer_2, 200000, 400000);
- EXPECT_EQ(200000, start_bitrate);
- EXPECT_EQ(100000u, bitrate_observer_1.last_bitrate_);
-
- start_bitrate =
- allocator_->AddBitrateObserver(&bitrate_observer_3, 300000, 400000);
- EXPECT_EQ(0, start_bitrate);
- EXPECT_EQ(100000u, bitrate_observer_1.last_bitrate_);
- EXPECT_EQ(200000u, bitrate_observer_2.last_bitrate_);
-
- // High REMB. Make sure the controllers get a fair share of the surplus
- // (i.e., what is left after each controller gets its min rate).
- allocator_->OnNetworkChanged(690000, 0, 0);
- // Verify that each observer gets its min rate (sum of min rates is 600000),
- // and that the remaining 90000 is divided equally among the three.
- uint32_t bitrate_to_share = 690000u - 100000u - 200000u - 300000u;
- EXPECT_EQ(100000u + bitrate_to_share / 3, bitrate_observer_1.last_bitrate_);
- EXPECT_EQ(200000u + bitrate_to_share / 3, bitrate_observer_2.last_bitrate_);
- EXPECT_EQ(300000u + bitrate_to_share / 3, bitrate_observer_3.last_bitrate_);
-
- // High REMB, but below the sum of min bitrates.
- allocator_->OnNetworkChanged(500000, 0, 0);
- // Verify that the first and second observers get their min bitrates, and the
- // third gets the remainder.
- EXPECT_EQ(100000u, bitrate_observer_1.last_bitrate_); // Min bitrate.
- EXPECT_EQ(200000u, bitrate_observer_2.last_bitrate_); // Min bitrate.
- EXPECT_EQ(200000u, bitrate_observer_3.last_bitrate_); // Remainder.
-
- // Low REMB.
- allocator_->OnNetworkChanged(10000, 0, 0);
- // Verify that the first observer gets all the rate, and the rest get zero.
- EXPECT_EQ(10000u, bitrate_observer_1.last_bitrate_);
- EXPECT_EQ(0u, bitrate_observer_2.last_bitrate_);
- EXPECT_EQ(0u, bitrate_observer_3.last_bitrate_);
-
- allocator_->RemoveBitrateObserver(&bitrate_observer_1);
- allocator_->RemoveBitrateObserver(&bitrate_observer_2);
- allocator_->RemoveBitrateObserver(&bitrate_observer_3);
-}
-
-TEST_F(BitrateAllocatorTest, ThreeBitrateObserversLowRembEnforceMin) {
- TestBitrateObserver bitrate_observer_1;
- TestBitrateObserver bitrate_observer_2;
- TestBitrateObserver bitrate_observer_3;
- int start_bitrate =
- allocator_->AddBitrateObserver(&bitrate_observer_1, 100000, 400000);
- EXPECT_EQ(300000, start_bitrate);
-
- start_bitrate =
- allocator_->AddBitrateObserver(&bitrate_observer_2, 200000, 400000);
- EXPECT_EQ(200000, start_bitrate);
- EXPECT_EQ(100000u, bitrate_observer_1.last_bitrate_);
-
- start_bitrate =
- allocator_->AddBitrateObserver(&bitrate_observer_3, 300000, 400000);
- EXPECT_EQ(300000, start_bitrate);
- EXPECT_EQ(100000, static_cast<int>(bitrate_observer_1.last_bitrate_));
- EXPECT_EQ(200000, static_cast<int>(bitrate_observer_2.last_bitrate_));
-
- // Low REMB. Verify that all observers still get their respective min bitrate.
- allocator_->OnNetworkChanged(1000, 0, 0);
- EXPECT_EQ(100000u, bitrate_observer_1.last_bitrate_); // Min cap.
- EXPECT_EQ(200000u, bitrate_observer_2.last_bitrate_); // Min cap.
- EXPECT_EQ(300000u, bitrate_observer_3.last_bitrate_); // Min cap.
-
- allocator_->RemoveBitrateObserver(&bitrate_observer_1);
- allocator_->RemoveBitrateObserver(&bitrate_observer_2);
- allocator_->RemoveBitrateObserver(&bitrate_observer_3);
-}
-} // namespace webrtc
diff --git a/webrtc/modules/bitrate_controller/bitrate_controller.gypi b/webrtc/modules/bitrate_controller/bitrate_controller.gypi
index 44c1b89ef2..3d86f2e32a 100644
--- a/webrtc/modules/bitrate_controller/bitrate_controller.gypi
+++ b/webrtc/modules/bitrate_controller/bitrate_controller.gypi
@@ -15,11 +15,9 @@
'<(webrtc_root)/system_wrappers/system_wrappers.gyp:system_wrappers',
],
'sources': [
- 'bitrate_allocator.cc',
'bitrate_controller_impl.cc',
'bitrate_controller_impl.h',
'include/bitrate_controller.h',
- 'include/bitrate_allocator.h',
'send_side_bandwidth_estimation.cc',
'send_side_bandwidth_estimation.h',
],
diff --git a/webrtc/modules/bitrate_controller/bitrate_controller_impl.cc b/webrtc/modules/bitrate_controller/bitrate_controller_impl.cc
index 8857ee4b4a..f8fd2bb987 100644
--- a/webrtc/modules/bitrate_controller/bitrate_controller_impl.cc
+++ b/webrtc/modules/bitrate_controller/bitrate_controller_impl.cc
@@ -14,7 +14,7 @@
#include <algorithm>
#include <utility>
-#include "webrtc/modules/rtp_rtcp/interface/rtp_rtcp_defines.h"
+#include "webrtc/modules/rtp_rtcp/include/rtp_rtcp_defines.h"
namespace webrtc {
@@ -129,6 +129,11 @@ void BitrateControllerImpl::SetReservedBitrate(uint32_t reserved_bitrate_bps) {
MaybeTriggerOnNetworkChanged();
}
+void BitrateControllerImpl::SetEventLog(RtcEventLog* event_log) {
+ rtc::CritScope cs(&critsect_);
+ bandwidth_estimation_.SetEventLog(event_log);
+}
+
void BitrateControllerImpl::OnReceivedEstimatedBitrate(uint32_t bitrate) {
{
rtc::CritScope cs(&critsect_);
diff --git a/webrtc/modules/bitrate_controller/bitrate_controller_impl.h b/webrtc/modules/bitrate_controller/bitrate_controller_impl.h
index a33a0e6f04..b601899631 100644
--- a/webrtc/modules/bitrate_controller/bitrate_controller_impl.h
+++ b/webrtc/modules/bitrate_controller/bitrate_controller_impl.h
@@ -41,6 +41,8 @@ class BitrateControllerImpl : public BitrateController {
void SetReservedBitrate(uint32_t reserved_bitrate_bps) override;
+ void SetEventLog(RtcEventLog* event_log) override;
+
int64_t TimeUntilNextProcess() override;
int32_t Process() override;
diff --git a/webrtc/modules/bitrate_controller/bitrate_controller_unittest.cc b/webrtc/modules/bitrate_controller/bitrate_controller_unittest.cc
index 72831c78d6..2b9e589fbd 100644
--- a/webrtc/modules/bitrate_controller/bitrate_controller_unittest.cc
+++ b/webrtc/modules/bitrate_controller/bitrate_controller_unittest.cc
@@ -14,7 +14,7 @@
#include <vector>
#include "webrtc/modules/bitrate_controller/include/bitrate_controller.h"
-#include "webrtc/modules/rtp_rtcp/interface/rtp_rtcp_defines.h"
+#include "webrtc/modules/rtp_rtcp/include/rtp_rtcp_defines.h"
using webrtc::RtcpBandwidthObserver;
using webrtc::BitrateObserver;
diff --git a/webrtc/modules/bitrate_controller/include/bitrate_allocator.h b/webrtc/modules/bitrate_controller/include/bitrate_allocator.h
deleted file mode 100644
index 34b9ed5328..0000000000
--- a/webrtc/modules/bitrate_controller/include/bitrate_allocator.h
+++ /dev/null
@@ -1,99 +0,0 @@
-/*
- * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
- *
- * Use of this source code is governed by a BSD-style license
- * that can be found in the LICENSE file in the root of the source
- * tree. An additional intellectual property rights grant can be found
- * in the file PATENTS. All contributing project authors may
- * be found in the AUTHORS file in the root of the source tree.
- *
- * Usage: this class will register multiple RtcpBitrateObserver's one at each
- * RTCP module. It will aggregate the results and run one bandwidth estimation
- * and push the result to the encoders via BitrateObserver(s).
- */
-
-#ifndef WEBRTC_MODULES_BITRATE_CONTROLLER_INCLUDE_BITRATE_ALLOCATOR_H_
-#define WEBRTC_MODULES_BITRATE_CONTROLLER_INCLUDE_BITRATE_ALLOCATOR_H_
-
-#include <list>
-#include <map>
-#include <utility>
-
-#include "webrtc/base/scoped_ptr.h"
-#include "webrtc/base/thread_annotations.h"
-#include "webrtc/system_wrappers/include/critical_section_wrapper.h"
-
-namespace webrtc {
-
-class BitrateObserver;
-
-class BitrateAllocator {
- public:
- BitrateAllocator();
-
- void OnNetworkChanged(uint32_t target_bitrate,
- uint8_t fraction_loss,
- int64_t rtt);
-
- // Set the start and max send bitrate used by the bandwidth management.
- //
- // |observer| updates bitrates if already in use.
- // |min_bitrate_bps| = 0 equals no min bitrate.
- // |max_bitrate_bps| = 0 equals no max bitrate.
- // Returns bitrate allocated for the bitrate observer.
- int AddBitrateObserver(BitrateObserver* observer,
- uint32_t min_bitrate_bps,
- uint32_t max_bitrate_bps);
-
- void RemoveBitrateObserver(BitrateObserver* observer);
-
- void GetMinMaxBitrateSumBps(int* min_bitrate_sum_bps,
- int* max_bitrate_sum_bps) const;
-
- // This method controls the behavior when the available bitrate is lower than
- // the minimum bitrate, or the sum of minimum bitrates.
- // When true, the bitrate will never be set lower than the minimum bitrate(s).
- // When false, the bitrate observers will be allocated rates up to their
- // respective minimum bitrate, satisfying one observer after the other.
- void EnforceMinBitrate(bool enforce_min_bitrate);
-
- private:
- struct BitrateConfiguration {
- BitrateConfiguration(uint32_t min_bitrate, uint32_t max_bitrate)
- : min_bitrate(min_bitrate), max_bitrate(max_bitrate) {}
- uint32_t min_bitrate;
- uint32_t max_bitrate;
- };
- struct ObserverConfiguration {
- ObserverConfiguration(BitrateObserver* observer, uint32_t bitrate)
- : observer(observer), min_bitrate(bitrate) {}
- BitrateObserver* const observer;
- uint32_t min_bitrate;
- };
- typedef std::pair<BitrateObserver*, BitrateConfiguration>
- BitrateObserverConfiguration;
- typedef std::list<BitrateObserverConfiguration> BitrateObserverConfList;
- typedef std::multimap<uint32_t, ObserverConfiguration> ObserverSortingMap;
- typedef std::map<BitrateObserver*, int> ObserverBitrateMap;
-
- BitrateObserverConfList::iterator FindObserverConfigurationPair(
- const BitrateObserver* observer) EXCLUSIVE_LOCKS_REQUIRED(crit_sect_);
- ObserverBitrateMap AllocateBitrates() EXCLUSIVE_LOCKS_REQUIRED(crit_sect_);
- ObserverBitrateMap NormalRateAllocation(uint32_t bitrate,
- uint32_t sum_min_bitrates)
- EXCLUSIVE_LOCKS_REQUIRED(crit_sect_);
-
- ObserverBitrateMap LowRateAllocation(uint32_t bitrate)
- EXCLUSIVE_LOCKS_REQUIRED(crit_sect_);
-
- rtc::scoped_ptr<CriticalSectionWrapper> crit_sect_;
- // Stored in a list to keep track of the insertion order.
- BitrateObserverConfList bitrate_observers_ GUARDED_BY(crit_sect_);
- bool bitrate_observers_modified_ GUARDED_BY(crit_sect_);
- bool enforce_min_bitrate_ GUARDED_BY(crit_sect_);
- uint32_t last_bitrate_bps_ GUARDED_BY(crit_sect_);
- uint8_t last_fraction_loss_ GUARDED_BY(crit_sect_);
- int64_t last_rtt_ GUARDED_BY(crit_sect_);
-};
-} // namespace webrtc
-#endif // WEBRTC_MODULES_BITRATE_CONTROLLER_INCLUDE_BITRATE_ALLOCATOR_H_
diff --git a/webrtc/modules/bitrate_controller/include/bitrate_controller.h b/webrtc/modules/bitrate_controller/include/bitrate_controller.h
index bb532886c7..d1eca8e0fe 100644
--- a/webrtc/modules/bitrate_controller/include/bitrate_controller.h
+++ b/webrtc/modules/bitrate_controller/include/bitrate_controller.h
@@ -17,12 +17,13 @@
#include <map>
-#include "webrtc/modules/interface/module.h"
-#include "webrtc/modules/rtp_rtcp/interface/rtp_rtcp_defines.h"
+#include "webrtc/modules/include/module.h"
+#include "webrtc/modules/rtp_rtcp/include/rtp_rtcp_defines.h"
namespace webrtc {
class CriticalSectionWrapper;
+class RtcEventLog;
struct PacketInfo;
class BitrateObserver {
@@ -56,6 +57,8 @@ class BitrateController : public Module {
virtual void SetStartBitrate(int start_bitrate_bps) = 0;
virtual void SetMinMaxBitrate(int min_bitrate_bps, int max_bitrate_bps) = 0;
+ virtual void SetEventLog(RtcEventLog* event_log) = 0;
+
// Gets the available payload bandwidth in bits per second. Note that
// this bandwidth excludes packet headers.
virtual bool AvailableBandwidth(uint32_t* bandwidth) const = 0;
diff --git a/webrtc/modules/bitrate_controller/include/mock/mock_bitrate_controller.h b/webrtc/modules/bitrate_controller/include/mock/mock_bitrate_controller.h
new file mode 100644
index 0000000000..7a7d2e406b
--- /dev/null
+++ b/webrtc/modules/bitrate_controller/include/mock/mock_bitrate_controller.h
@@ -0,0 +1,30 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef WEBRTC_MODULES_BITRATE_CONTROLLER_INCLUDE_MOCK_MOCK_BITRATE_CONTROLLER_H_
+#define WEBRTC_MODULES_BITRATE_CONTROLLER_INCLUDE_MOCK_MOCK_BITRATE_CONTROLLER_H_
+
+#include "testing/gmock/include/gmock/gmock.h"
+#include "webrtc/modules/bitrate_controller/include/bitrate_controller.h"
+
+namespace webrtc {
+namespace test {
+
+class MockBitrateObserver : public BitrateObserver {
+ public:
+ MOCK_METHOD3(OnNetworkChanged,
+ void(uint32_t bitrate_bps,
+ uint8_t fraction_loss,
+ int64_t rtt_ms));
+};
+} // namespace test
+} // namespace webrtc
+
+#endif // WEBRTC_MODULES_BITRATE_CONTROLLER_INCLUDE_MOCK_MOCK_BITRATE_CONTROLLER_H_
diff --git a/webrtc/modules/bitrate_controller/send_side_bandwidth_estimation.cc b/webrtc/modules/bitrate_controller/send_side_bandwidth_estimation.cc
index 9fa7b0dfc0..258c4d94de 100644
--- a/webrtc/modules/bitrate_controller/send_side_bandwidth_estimation.cc
+++ b/webrtc/modules/bitrate_controller/send_side_bandwidth_estimation.cc
@@ -13,9 +13,10 @@
#include <cmath>
#include "webrtc/base/checks.h"
+#include "webrtc/base/logging.h"
#include "webrtc/system_wrappers/include/field_trial.h"
-#include "webrtc/system_wrappers/include/logging.h"
#include "webrtc/system_wrappers/include/metrics.h"
+#include "webrtc/call/rtc_event_log.h"
namespace webrtc {
namespace {
@@ -59,7 +60,8 @@ SendSideBandwidthEstimation::SendSideBandwidthEstimation()
initially_lost_packets_(0),
bitrate_at_2_seconds_kbps_(0),
uma_update_state_(kNoUpdate),
- rampup_uma_stats_updated_(kNumUmaRampupMetrics, false) {}
+ rampup_uma_stats_updated_(kNumUmaRampupMetrics, false),
+ event_log_(nullptr) {}
SendSideBandwidthEstimation::~SendSideBandwidthEstimation() {}
@@ -144,8 +146,8 @@ void SendSideBandwidthEstimation::UpdateUmaStats(int64_t now_ms,
for (size_t i = 0; i < kNumUmaRampupMetrics; ++i) {
if (!rampup_uma_stats_updated_[i] &&
bitrate_kbps >= kUmaRampupMetrics[i].bitrate_kbps) {
- RTC_HISTOGRAM_COUNTS_100000(kUmaRampupMetrics[i].metric_name,
- now_ms - first_report_time_ms_);
+ RTC_HISTOGRAM_COUNTS_SPARSE_100000(kUmaRampupMetrics[i].metric_name,
+ now_ms - first_report_time_ms_);
rampup_uma_stats_updated_[i] = true;
}
}
@@ -154,22 +156,19 @@ void SendSideBandwidthEstimation::UpdateUmaStats(int64_t now_ms,
} else if (uma_update_state_ == kNoUpdate) {
uma_update_state_ = kFirstDone;
bitrate_at_2_seconds_kbps_ = bitrate_kbps;
- RTC_HISTOGRAM_COUNTS(
- "WebRTC.BWE.InitiallyLostPackets", initially_lost_packets_, 0, 100, 50);
- RTC_HISTOGRAM_COUNTS(
- "WebRTC.BWE.InitialRtt", static_cast<int>(rtt), 0, 2000, 50);
- RTC_HISTOGRAM_COUNTS("WebRTC.BWE.InitialBandwidthEstimate",
- bitrate_at_2_seconds_kbps_,
- 0,
- 2000,
- 50);
+ RTC_HISTOGRAM_COUNTS_SPARSE("WebRTC.BWE.InitiallyLostPackets",
+ initially_lost_packets_, 0, 100, 50);
+ RTC_HISTOGRAM_COUNTS_SPARSE("WebRTC.BWE.InitialRtt", static_cast<int>(rtt),
+ 0, 2000, 50);
+ RTC_HISTOGRAM_COUNTS_SPARSE("WebRTC.BWE.InitialBandwidthEstimate",
+ bitrate_at_2_seconds_kbps_, 0, 2000, 50);
} else if (uma_update_state_ == kFirstDone &&
now_ms - first_report_time_ms_ >= kBweConverganceTimeMs) {
uma_update_state_ = kDone;
int bitrate_diff_kbps =
std::max(bitrate_at_2_seconds_kbps_ - bitrate_kbps, 0);
- RTC_HISTOGRAM_COUNTS(
- "WebRTC.BWE.InitialVsConvergedDiff", bitrate_diff_kbps, 0, 2000, 50);
+ RTC_HISTOGRAM_COUNTS_SPARSE("WebRTC.BWE.InitialVsConvergedDiff",
+ bitrate_diff_kbps, 0, 2000, 50);
}
}
@@ -206,6 +205,11 @@ void SendSideBandwidthEstimation::UpdateEstimate(int64_t now_ms) {
// rates).
bitrate_ += 1000;
+ if (event_log_) {
+ event_log_->LogBwePacketLossEvent(
+ bitrate_, last_fraction_loss_,
+ expected_packets_since_last_loss_update_);
+ }
} else if (last_fraction_loss_ <= 26) {
// Loss between 2% - 10%: Do nothing.
} else {
@@ -224,6 +228,11 @@ void SendSideBandwidthEstimation::UpdateEstimate(int64_t now_ms) {
512.0);
has_decreased_since_last_fraction_loss_ = true;
}
+ if (event_log_) {
+ event_log_->LogBwePacketLossEvent(
+ bitrate_, last_fraction_loss_,
+ expected_packets_since_last_loss_update_);
+ }
}
}
bitrate_ = CapBitrateToThresholds(now_ms, bitrate_);
@@ -274,4 +283,9 @@ uint32_t SendSideBandwidthEstimation::CapBitrateToThresholds(
}
return bitrate;
}
+
+void SendSideBandwidthEstimation::SetEventLog(RtcEventLog* event_log) {
+ event_log_ = event_log;
+}
+
} // namespace webrtc
diff --git a/webrtc/modules/bitrate_controller/send_side_bandwidth_estimation.h b/webrtc/modules/bitrate_controller/send_side_bandwidth_estimation.h
index 40061d3ee7..7ffb42cb54 100644
--- a/webrtc/modules/bitrate_controller/send_side_bandwidth_estimation.h
+++ b/webrtc/modules/bitrate_controller/send_side_bandwidth_estimation.h
@@ -15,10 +15,13 @@
#include <deque>
-#include "webrtc/modules/rtp_rtcp/interface/rtp_rtcp_defines.h"
+#include "webrtc/modules/rtp_rtcp/include/rtp_rtcp_defines.h"
#include "webrtc/system_wrappers/include/critical_section_wrapper.h"
namespace webrtc {
+
+class RtcEventLog;
+
class SendSideBandwidthEstimation {
public:
SendSideBandwidthEstimation();
@@ -42,6 +45,8 @@ class SendSideBandwidthEstimation {
void SetMinMaxBitrate(int min_bitrate, int max_bitrate);
int GetMinBitrate() const;
+ void SetEventLog(RtcEventLog* event_log);
+
private:
enum UmaState { kNoUpdate, kFirstDone, kDone };
@@ -81,6 +86,7 @@ class SendSideBandwidthEstimation {
int bitrate_at_2_seconds_kbps_;
UmaState uma_update_state_;
std::vector<bool> rampup_uma_stats_updated_;
+ RtcEventLog* event_log_;
};
} // namespace webrtc
#endif // WEBRTC_MODULES_BITRATE_CONTROLLER_SEND_SIDE_BANDWIDTH_ESTIMATION_H_
diff --git a/webrtc/modules/desktop_capture/differ_block.cc b/webrtc/modules/desktop_capture/differ_block.cc
index 0dcb377411..d4cbda3601 100644
--- a/webrtc/modules/desktop_capture/differ_block.cc
+++ b/webrtc/modules/desktop_capture/differ_block.cc
@@ -12,7 +12,7 @@
#include <string.h>
-#include "build/build_config.h"
+#include "webrtc/typedefs.h"
#include "webrtc/modules/desktop_capture/differ_block_sse2.h"
#include "webrtc/system_wrappers/include/cpu_features_wrapper.h"
@@ -38,7 +38,7 @@ bool BlockDifference(const uint8_t* image1,
static bool (*diff_proc)(const uint8_t*, const uint8_t*, int) = NULL;
if (!diff_proc) {
-#if defined(ARCH_CPU_ARM_FAMILY) || defined(ARCH_CPU_MIPS_FAMILY)
+#if defined(WEBRTC_ARCH_ARM_FAMILY) || defined(WEBRTC_ARCH_MIPS_FAMILY)
// For ARM and MIPS processors, always use C version.
// TODO(hclam): Implement a NEON version.
diff_proc = &BlockDifference_C;
diff --git a/webrtc/modules/desktop_capture/screen_capturer_win.cc b/webrtc/modules/desktop_capture/screen_capturer_win.cc
index 1f33155656..18be4eb30b 100644
--- a/webrtc/modules/desktop_capture/screen_capturer_win.cc
+++ b/webrtc/modules/desktop_capture/screen_capturer_win.cc
@@ -10,6 +10,8 @@
#include "webrtc/modules/desktop_capture/screen_capturer.h"
+#include <utility>
+
#include "webrtc/modules/desktop_capture/desktop_capture_options.h"
#include "webrtc/modules/desktop_capture/win/screen_capturer_win_gdi.h"
#include "webrtc/modules/desktop_capture/win/screen_capturer_win_magnifier.h"
@@ -22,7 +24,7 @@ ScreenCapturer* ScreenCapturer::Create(const DesktopCaptureOptions& options) {
new ScreenCapturerWinGdi(options));
if (options.allow_use_magnification_api())
- return new ScreenCapturerWinMagnifier(gdi_capturer.Pass());
+ return new ScreenCapturerWinMagnifier(std::move(gdi_capturer));
return gdi_capturer.release();
}
diff --git a/webrtc/modules/desktop_capture/win/screen_capturer_win_magnifier.cc b/webrtc/modules/desktop_capture/win/screen_capturer_win_magnifier.cc
index db40478023..066943d294 100644
--- a/webrtc/modules/desktop_capture/win/screen_capturer_win_magnifier.cc
+++ b/webrtc/modules/desktop_capture/win/screen_capturer_win_magnifier.cc
@@ -12,6 +12,8 @@
#include <assert.h>
+#include <utility>
+
#include "webrtc/modules/desktop_capture/desktop_capture_options.h"
#include "webrtc/modules/desktop_capture/desktop_frame.h"
#include "webrtc/modules/desktop_capture/desktop_frame_win.h"
@@ -37,7 +39,7 @@ Atomic32 ScreenCapturerWinMagnifier::tls_index_(TLS_OUT_OF_INDEXES);
ScreenCapturerWinMagnifier::ScreenCapturerWinMagnifier(
rtc::scoped_ptr<ScreenCapturer> fallback_capturer)
- : fallback_capturer_(fallback_capturer.Pass()),
+ : fallback_capturer_(std::move(fallback_capturer)),
fallback_capturer_started_(false),
callback_(NULL),
current_screen_id_(kFullDesktopScreenId),
@@ -53,8 +55,7 @@ ScreenCapturerWinMagnifier::ScreenCapturerWinMagnifier(
host_window_(NULL),
magnifier_window_(NULL),
magnifier_initialized_(false),
- magnifier_capture_succeeded_(true) {
-}
+ magnifier_capture_succeeded_(true) {}
ScreenCapturerWinMagnifier::~ScreenCapturerWinMagnifier() {
// DestroyWindow must be called before MagUninitialize. magnifier_window_ is
@@ -236,7 +237,7 @@ BOOL ScreenCapturerWinMagnifier::OnMagImageScalingCallback(
RECT unclipped,
RECT clipped,
HRGN dirty) {
- assert(tls_index_.Value() != TLS_OUT_OF_INDEXES);
+ assert(tls_index_.Value() != static_cast<int32_t>(TLS_OUT_OF_INDEXES));
ScreenCapturerWinMagnifier* owner =
reinterpret_cast<ScreenCapturerWinMagnifier*>(
@@ -369,7 +370,7 @@ bool ScreenCapturerWinMagnifier::InitializeMagnifier() {
}
}
- if (tls_index_.Value() == TLS_OUT_OF_INDEXES) {
+ if (tls_index_.Value() == static_cast<int32_t>(TLS_OUT_OF_INDEXES)) {
// More than one threads may get here at the same time, but only one will
// write to tls_index_ using CompareExchange.
DWORD new_tls_index = TlsAlloc();
@@ -377,7 +378,7 @@ bool ScreenCapturerWinMagnifier::InitializeMagnifier() {
TlsFree(new_tls_index);
}
- assert(tls_index_.Value() != TLS_OUT_OF_INDEXES);
+ assert(tls_index_.Value() != static_cast<int32_t>(TLS_OUT_OF_INDEXES));
TlsSetValue(tls_index_.Value(), this);
magnifier_initialized_ = true;
diff --git a/webrtc/modules/desktop_capture/window_capturer_win.cc b/webrtc/modules/desktop_capture/window_capturer_win.cc
index c0d71167a5..54b2768aa8 100644
--- a/webrtc/modules/desktop_capture/window_capturer_win.cc
+++ b/webrtc/modules/desktop_capture/window_capturer_win.cc
@@ -156,15 +156,16 @@ void WindowCapturerWin::Capture(const DesktopRegion& region) {
return;
}
- // Stop capturing if the window has been closed or hidden.
- if (!IsWindow(window_) || !IsWindowVisible(window_)) {
+ // Stop capturing if the window has been closed.
+ if (!IsWindow(window_)) {
callback_->OnCaptureCompleted(NULL);
return;
}
- // Return a 1x1 black frame if the window is minimized, to match the behavior
- // on Mac.
- if (IsIconic(window_)) {
+ // Return a 1x1 black frame if the window is minimized or invisible, to match
+ // behavior on mace. Window can be temporarily invisible during the
+ // transition of full screen mode on/off.
+ if (IsIconic(window_) || !IsWindowVisible(window_)) {
BasicDesktopFrame* frame = new BasicDesktopFrame(DesktopSize(1, 1));
memset(frame->data(), 0, frame->stride() * frame->size().height());
diff --git a/webrtc/modules/interface/module.h b/webrtc/modules/include/module.h
index ffd3065a5c..d02aa95dc8 100644
--- a/webrtc/modules/interface/module.h
+++ b/webrtc/modules/include/module.h
@@ -8,8 +8,8 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#ifndef MODULES_INTERFACE_MODULE_H_
-#define MODULES_INTERFACE_MODULE_H_
+#ifndef WEBRTC_MODULES_INCLUDE_MODULE_H_
+#define WEBRTC_MODULES_INCLUDE_MODULE_H_
#include "webrtc/typedefs.h"
@@ -78,4 +78,4 @@ class RefCountedModule : public Module {
} // namespace webrtc
-#endif // MODULES_INTERFACE_MODULE_H_
+#endif // WEBRTC_MODULES_INCLUDE_MODULE_H_
diff --git a/webrtc/modules/interface/module_common_types.h b/webrtc/modules/include/module_common_types.h
index 45e93d8fad..89c5f1b49b 100644
--- a/webrtc/modules/interface/module_common_types.h
+++ b/webrtc/modules/include/module_common_types.h
@@ -8,8 +8,8 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#ifndef MODULE_COMMON_TYPES_H
-#define MODULE_COMMON_TYPES_H
+#ifndef WEBRTC_MODULES_INCLUDE_MODULE_COMMON_TYPES_H_
+#define WEBRTC_MODULES_INCLUDE_MODULE_COMMON_TYPES_H_
#include <assert.h>
#include <string.h> // memcpy
@@ -28,7 +28,7 @@ struct RTPAudioHeader {
uint8_t numEnergy; // number of valid entries in arrOfEnergy
uint8_t arrOfEnergy[kRtpCsrcSize]; // one energy byte (0-9) per channel
bool isCNG; // is this CNG
- uint8_t channel; // number of channels 2 = stereo
+ size_t channel; // number of channels 2 = stereo
};
const int16_t kNoPictureId = -1;
@@ -38,6 +38,7 @@ const int16_t kNoTl0PicIdx = -1;
const uint8_t kNoTemporalIdx = 0xFF;
const uint8_t kNoSpatialIdx = 0xFF;
const uint8_t kNoGofIdx = 0xFF;
+const uint8_t kNumVp9Buffers = 8;
const size_t kMaxVp9RefPics = 3;
const size_t kMaxVp9FramesInGof = 0xFF; // 8 bits
const size_t kMaxVp9NumberOfSpatialLayers = 8;
@@ -131,7 +132,7 @@ struct GofInfoVP9 {
temporal_idx[i] = src.temporal_idx[i];
temporal_up_switch[i] = src.temporal_up_switch[i];
num_ref_pics[i] = src.num_ref_pics[i];
- for (size_t r = 0; r < num_ref_pics[i]; ++r) {
+ for (uint8_t r = 0; r < num_ref_pics[i]; ++r) {
pid_diff[i][r] = src.pid_diff[i][r];
}
}
@@ -140,8 +141,8 @@ struct GofInfoVP9 {
size_t num_frames_in_gof;
uint8_t temporal_idx[kMaxVp9FramesInGof];
bool temporal_up_switch[kMaxVp9FramesInGof];
- size_t num_ref_pics[kMaxVp9FramesInGof];
- int16_t pid_diff[kMaxVp9FramesInGof][kMaxVp9RefPics];
+ uint8_t num_ref_pics[kMaxVp9FramesInGof];
+ uint8_t pid_diff[kMaxVp9FramesInGof][kMaxVp9RefPics];
};
struct RTPVideoHeaderVP9 {
@@ -185,9 +186,9 @@ struct RTPVideoHeaderVP9 {
uint8_t gof_idx; // Index to predefined temporal frame info in SS data.
- size_t num_ref_pics; // Number of reference pictures used by this layer
- // frame.
- int16_t pid_diff[kMaxVp9RefPics]; // P_DIFF signaled to derive the PictureID
+ uint8_t num_ref_pics; // Number of reference pictures used by this layer
+ // frame.
+ uint8_t pid_diff[kMaxVp9RefPics]; // P_DIFF signaled to derive the PictureID
// of the reference pictures.
int16_t ref_picture_id[kMaxVp9RefPics]; // PictureID of reference pictures.
@@ -507,7 +508,7 @@ class AudioFrame {
void UpdateFrame(int id, uint32_t timestamp, const int16_t* data,
size_t samples_per_channel, int sample_rate_hz,
SpeechType speech_type, VADActivity vad_activity,
- int num_channels = 1, uint32_t energy = -1);
+ size_t num_channels = 1, uint32_t energy = -1);
AudioFrame& Append(const AudioFrame& rhs);
@@ -531,7 +532,7 @@ class AudioFrame {
int16_t data_[kMaxDataSizeSamples];
size_t samples_per_channel_;
int sample_rate_hz_;
- int num_channels_;
+ size_t num_channels_;
SpeechType speech_type_;
VADActivity vad_activity_;
// Note that there is no guarantee that |energy_| is correct. Any user of this
@@ -573,7 +574,7 @@ inline void AudioFrame::UpdateFrame(int id,
int sample_rate_hz,
SpeechType speech_type,
VADActivity vad_activity,
- int num_channels,
+ size_t num_channels,
uint32_t energy) {
id_ = id;
timestamp_ = timestamp;
@@ -584,7 +585,6 @@ inline void AudioFrame::UpdateFrame(int id,
num_channels_ = num_channels;
energy_ = energy;
- assert(num_channels >= 0);
const size_t length = samples_per_channel * num_channels;
assert(length <= kMaxDataSizeSamples);
if (data != NULL) {
@@ -609,7 +609,6 @@ inline void AudioFrame::CopyFrom(const AudioFrame& src) {
energy_ = src.energy_;
interleaved_ = src.interleaved_;
- assert(num_channels_ >= 0);
const size_t length = samples_per_channel_ * num_channels_;
assert(length <= kMaxDataSizeSamples);
memcpy(data_, src.data_, sizeof(int16_t) * length);
@@ -807,4 +806,4 @@ class SequenceNumberUnwrapper {
} // namespace webrtc
-#endif // MODULE_COMMON_TYPES_H
+#endif // WEBRTC_MODULES_INCLUDE_MODULE_COMMON_TYPES_H_
diff --git a/webrtc/modules/media_file/BUILD.gn b/webrtc/modules/media_file/BUILD.gn
index 05cfb4e555..2a4be728f3 100644
--- a/webrtc/modules/media_file/BUILD.gn
+++ b/webrtc/modules/media_file/BUILD.gn
@@ -14,12 +14,12 @@ config("media_file_config") {
source_set("media_file") {
sources = [
- "interface/media_file.h",
- "interface/media_file_defines.h",
- "source/media_file_impl.cc",
- "source/media_file_impl.h",
- "source/media_file_utility.cc",
- "source/media_file_utility.h",
+ "media_file.h",
+ "media_file_defines.h",
+ "media_file_impl.cc",
+ "media_file_impl.h",
+ "media_file_utility.cc",
+ "media_file_utility.h",
]
if (is_win) {
diff --git a/webrtc/modules/media_file/OWNERS b/webrtc/modules/media_file/OWNERS
index beb9729e04..f6467a4161 100644
--- a/webrtc/modules/media_file/OWNERS
+++ b/webrtc/modules/media_file/OWNERS
@@ -1,5 +1,10 @@
-mflodman@webrtc.org
-perkj@webrtc.org
-niklas.enbom@webrtc.org
-
-per-file BUILD.gn=kjellander@webrtc.org
+mflodman@webrtc.org
+perkj@webrtc.org
+niklas.enbom@webrtc.org
+
+# These are for the common case of adding or renaming files. If you're doing
+# structural changes, please get a review from a reviewer in this file.
+per-file *.gyp=*
+per-file *.gypi=*
+
+per-file BUILD.gn=kjellander@webrtc.org
diff --git a/webrtc/modules/media_file/media_file.gypi b/webrtc/modules/media_file/media_file.gypi
index 4ec80c3c52..94a99a22f1 100644
--- a/webrtc/modules/media_file/media_file.gypi
+++ b/webrtc/modules/media_file/media_file.gypi
@@ -17,12 +17,12 @@
'<(webrtc_root)/common_audio/common_audio.gyp:common_audio',
],
'sources': [
- 'interface/media_file.h',
- 'interface/media_file_defines.h',
- 'source/media_file_impl.cc',
- 'source/media_file_impl.h',
- 'source/media_file_utility.cc',
- 'source/media_file_utility.h',
+ 'media_file.h',
+ 'media_file_defines.h',
+ 'media_file_impl.cc',
+ 'media_file_impl.h',
+ 'media_file_utility.cc',
+ 'media_file_utility.h',
], # source
# TODO(jschuh): Bug 1348: fix size_t to int truncations.
'msvs_disabled_warnings': [ 4267, ],
diff --git a/webrtc/modules/media_file/interface/media_file.h b/webrtc/modules/media_file/media_file.h
index 5b09ad4383..f6924d6bb0 100644
--- a/webrtc/modules/media_file/interface/media_file.h
+++ b/webrtc/modules/media_file/media_file.h
@@ -8,13 +8,13 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#ifndef WEBRTC_MODULES_MEDIA_FILE_INTERFACE_MEDIA_FILE_H_
-#define WEBRTC_MODULES_MEDIA_FILE_INTERFACE_MEDIA_FILE_H_
+#ifndef WEBRTC_MODULES_MEDIA_FILE_MEDIA_FILE_H_
+#define WEBRTC_MODULES_MEDIA_FILE_MEDIA_FILE_H_
#include "webrtc/common_types.h"
-#include "webrtc/modules/interface/module.h"
-#include "webrtc/modules/interface/module_common_types.h"
-#include "webrtc/modules/media_file/interface/media_file_defines.h"
+#include "webrtc/modules/include/module.h"
+#include "webrtc/modules/include/module_common_types.h"
+#include "webrtc/modules/media_file/media_file_defines.h"
#include "webrtc/typedefs.h"
namespace webrtc {
@@ -177,4 +177,4 @@ protected:
virtual ~MediaFile() {}
};
} // namespace webrtc
-#endif // WEBRTC_MODULES_MEDIA_FILE_INTERFACE_MEDIA_FILE_H_
+#endif // WEBRTC_MODULES_MEDIA_FILE_MEDIA_FILE_H_
diff --git a/webrtc/modules/media_file/interface/media_file_defines.h b/webrtc/modules/media_file/media_file_defines.h
index ded71a8ca7..a021a148a5 100644
--- a/webrtc/modules/media_file/interface/media_file_defines.h
+++ b/webrtc/modules/media_file/media_file_defines.h
@@ -8,11 +8,11 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#ifndef WEBRTC_MODULES_MEDIA_FILE_INTERFACE_MEDIA_FILE_DEFINES_H_
-#define WEBRTC_MODULES_MEDIA_FILE_INTERFACE_MEDIA_FILE_DEFINES_H_
+#ifndef WEBRTC_MODULES_MEDIA_FILE_MEDIA_FILE_DEFINES_H_
+#define WEBRTC_MODULES_MEDIA_FILE_MEDIA_FILE_DEFINES_H_
#include "webrtc/engine_configurations.h"
-#include "webrtc/modules/interface/module_common_types.h"
+#include "webrtc/modules/include/module_common_types.h"
#include "webrtc/typedefs.h"
namespace webrtc {
@@ -48,4 +48,4 @@ protected:
FileCallback() {}
};
} // namespace webrtc
-#endif // WEBRTC_MODULES_MEDIA_FILE_INTERFACE_MEDIA_FILE_DEFINES_H_
+#endif // WEBRTC_MODULES_MEDIA_FILE_MEDIA_FILE_DEFINES_H_
diff --git a/webrtc/modules/media_file/source/media_file_impl.cc b/webrtc/modules/media_file/media_file_impl.cc
index 50175b86d5..abc7b9d9e0 100644
--- a/webrtc/modules/media_file/source/media_file_impl.cc
+++ b/webrtc/modules/media_file/media_file_impl.cc
@@ -11,7 +11,7 @@
#include <assert.h>
#include "webrtc/base/format_macros.h"
-#include "webrtc/modules/media_file/source/media_file_impl.h"
+#include "webrtc/modules/media_file/media_file_impl.h"
#include "webrtc/system_wrappers/include/critical_section_wrapper.h"
#include "webrtc/system_wrappers/include/file_wrapper.h"
#include "webrtc/system_wrappers/include/tick_util.h"
diff --git a/webrtc/modules/media_file/source/media_file_impl.h b/webrtc/modules/media_file/media_file_impl.h
index cdb54d880d..c23f514c75 100644
--- a/webrtc/modules/media_file/source/media_file_impl.h
+++ b/webrtc/modules/media_file/media_file_impl.h
@@ -8,14 +8,14 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#ifndef WEBRTC_MODULES_MEDIA_FILE_SOURCE_MEDIA_FILE_IMPL_H_
-#define WEBRTC_MODULES_MEDIA_FILE_SOURCE_MEDIA_FILE_IMPL_H_
+#ifndef WEBRTC_MODULES_MEDIA_FILE_MEDIA_FILE_IMPL_H_
+#define WEBRTC_MODULES_MEDIA_FILE_MEDIA_FILE_IMPL_H_
#include "webrtc/common_types.h"
-#include "webrtc/modules/interface/module_common_types.h"
-#include "webrtc/modules/media_file/interface/media_file.h"
-#include "webrtc/modules/media_file/interface/media_file_defines.h"
-#include "webrtc/modules/media_file/source/media_file_utility.h"
+#include "webrtc/modules/include/module_common_types.h"
+#include "webrtc/modules/media_file/media_file.h"
+#include "webrtc/modules/media_file/media_file_defines.h"
+#include "webrtc/modules/media_file/media_file_utility.h"
#include "webrtc/system_wrappers/include/critical_section_wrapper.h"
namespace webrtc {
@@ -145,4 +145,4 @@ private:
};
} // namespace webrtc
-#endif // WEBRTC_MODULES_MEDIA_FILE_SOURCE_MEDIA_FILE_IMPL_H_
+#endif // WEBRTC_MODULES_MEDIA_FILE_MEDIA_FILE_IMPL_H_
diff --git a/webrtc/modules/media_file/source/media_file_unittest.cc b/webrtc/modules/media_file/media_file_unittest.cc
index 370d13228a..6541a8fb7c 100644
--- a/webrtc/modules/media_file/source/media_file_unittest.cc
+++ b/webrtc/modules/media_file/media_file_unittest.cc
@@ -9,10 +9,9 @@
*/
#include "testing/gtest/include/gtest/gtest.h"
-#include "webrtc/modules/media_file/interface/media_file.h"
+#include "webrtc/modules/media_file/media_file.h"
#include "webrtc/system_wrappers/include/sleep.h"
#include "webrtc/test/testsupport/fileutils.h"
-#include "webrtc/test/testsupport/gtest_disable.h"
class MediaFileTest : public testing::Test {
protected:
@@ -28,8 +27,14 @@ class MediaFileTest : public testing::Test {
webrtc::MediaFile* media_file_;
};
-TEST_F(MediaFileTest, DISABLED_ON_IOS(
- DISABLED_ON_ANDROID(StartPlayingAudioFileWithoutError))) {
+#if defined(WEBRTC_ANDROID) || defined(WEBRTC_IOS)
+#define MAYBE_StartPlayingAudioFileWithoutError \
+ DISABLED_StartPlayingAudioFileWithoutError
+#else
+#define MAYBE_StartPlayingAudioFileWithoutError \
+ StartPlayingAudioFileWithoutError
+#endif
+TEST_F(MediaFileTest, MAYBE_StartPlayingAudioFileWithoutError) {
// TODO(leozwang): Use hard coded filename here, we want to
// loop through all audio files in future
const std::string audio_file = webrtc::test::ProjectRootPath() +
@@ -47,7 +52,12 @@ TEST_F(MediaFileTest, DISABLED_ON_IOS(
ASSERT_EQ(0, media_file_->StopPlaying());
}
-TEST_F(MediaFileTest, DISABLED_ON_IOS(WriteWavFile)) {
+#if defined(WEBRTC_IOS)
+#define MAYBE_WriteWavFile DISABLED_WriteWavFile
+#else
+#define MAYBE_WriteWavFile WriteWavFile
+#endif
+TEST_F(MediaFileTest, MAYBE_WriteWavFile) {
// Write file.
static const size_t kHeaderSize = 44;
static const size_t kPayloadSize = 320;
diff --git a/webrtc/modules/media_file/source/media_file_utility.cc b/webrtc/modules/media_file/media_file_utility.cc
index 61ae442d0e..8a815cc25d 100644
--- a/webrtc/modules/media_file/source/media_file_utility.cc
+++ b/webrtc/modules/media_file/media_file_utility.cc
@@ -8,7 +8,7 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#include "webrtc/modules/media_file/source/media_file_utility.h"
+#include "webrtc/modules/media_file/media_file_utility.h"
#include <assert.h>
#include <sys/stat.h>
@@ -19,7 +19,7 @@
#include "webrtc/common_audio/wav_header.h"
#include "webrtc/common_types.h"
#include "webrtc/engine_configurations.h"
-#include "webrtc/modules/interface/module_common_types.h"
+#include "webrtc/modules/include/module_common_types.h"
#include "webrtc/system_wrappers/include/file_wrapper.h"
#include "webrtc/system_wrappers/include/trace.h"
@@ -38,8 +38,8 @@ struct WAVE_RIFF_header
// the chunk size (16, 18 or 40 byte)
struct WAVE_CHUNK_header
{
- int8_t fmt_ckID[4];
- int32_t fmt_ckSize;
+ int8_t fmt_ckID[4];
+ uint32_t fmt_ckSize;
};
} // unnamed namespace
@@ -79,15 +79,15 @@ int32_t ModuleFileUtility::ReadWavHeader(InStream& wav)
// TODO (hellner): tmpStr and tmpStr2 seems unnecessary here.
char tmpStr[6] = "FOUR";
unsigned char tmpStr2[4];
- int32_t i, len;
+ size_t i;
bool dataFound = false;
bool fmtFound = false;
int8_t dummyRead;
_dataSize = 0;
- len = wav.Read(&RIFFheaderObj, sizeof(WAVE_RIFF_header));
- if(len != sizeof(WAVE_RIFF_header))
+ int len = wav.Read(&RIFFheaderObj, sizeof(WAVE_RIFF_header));
+ if (len != static_cast<int>(sizeof(WAVE_RIFF_header)))
{
WEBRTC_TRACE(kTraceError, kTraceFile, _id,
"Not a wave file (too short)");
@@ -123,14 +123,13 @@ int32_t ModuleFileUtility::ReadWavHeader(InStream& wav)
// in a subroutine.
memcpy(tmpStr2, &CHUNKheaderObj.fmt_ckSize, 4);
CHUNKheaderObj.fmt_ckSize =
- (int32_t) ((uint32_t) tmpStr2[0] +
- (((uint32_t)tmpStr2[1])<<8) +
- (((uint32_t)tmpStr2[2])<<16) +
- (((uint32_t)tmpStr2[3])<<24));
+ (uint32_t)tmpStr2[0] + (((uint32_t)tmpStr2[1]) << 8) +
+ (((uint32_t)tmpStr2[2]) << 16) + (((uint32_t)tmpStr2[3]) << 24);
memcpy(tmpStr, CHUNKheaderObj.fmt_ckID, 4);
- while ((len == sizeof(WAVE_CHUNK_header)) && (!fmtFound || !dataFound))
+ while ((len == static_cast<int>(sizeof(WAVE_CHUNK_header))) &&
+ (!fmtFound || !dataFound))
{
if(strcmp(tmpStr, "fmt ") == 0)
{
@@ -164,9 +163,14 @@ int32_t ModuleFileUtility::ReadWavHeader(InStream& wav)
(int16_t) ((uint32_t)tmpStr2[0] +
(((uint32_t)tmpStr2[1])<<8));
+ if (CHUNKheaderObj.fmt_ckSize < sizeof(WAVE_FMTINFO_header))
+ {
+ WEBRTC_TRACE(kTraceError, kTraceFile, _id,
+ "Chunk size is too small");
+ return -1;
+ }
for (i = 0;
- i < (CHUNKheaderObj.fmt_ckSize -
- (int32_t)sizeof(WAVE_FMTINFO_header));
+ i < CHUNKheaderObj.fmt_ckSize - sizeof(WAVE_FMTINFO_header);
i++)
{
len = wav.Read(&dummyRead, 1);
@@ -187,7 +191,7 @@ int32_t ModuleFileUtility::ReadWavHeader(InStream& wav)
}
else
{
- for (i = 0; i < (CHUNKheaderObj.fmt_ckSize); i++)
+ for (i = 0; i < CHUNKheaderObj.fmt_ckSize; i++)
{
len = wav.Read(&dummyRead, 1);
if(len != 1)
@@ -203,10 +207,8 @@ int32_t ModuleFileUtility::ReadWavHeader(InStream& wav)
memcpy(tmpStr2, &CHUNKheaderObj.fmt_ckSize, 4);
CHUNKheaderObj.fmt_ckSize =
- (int32_t) ((uint32_t)tmpStr2[0] +
- (((uint32_t)tmpStr2[1])<<8) +
- (((uint32_t)tmpStr2[2])<<16) +
- (((uint32_t)tmpStr2[3])<<24));
+ (uint32_t)tmpStr2[0] + (((uint32_t)tmpStr2[1]) << 8) +
+ (((uint32_t)tmpStr2[2]) << 16) + (((uint32_t)tmpStr2[3]) << 24);
memcpy(tmpStr, CHUNKheaderObj.fmt_ckID, 4);
}
@@ -241,35 +243,17 @@ int32_t ModuleFileUtility::ReadWavHeader(InStream& wav)
}
// Calculate the number of bytes that 10 ms of audio data correspond to.
- if(_wavFormatObj.formatTag == kWavFormatPcm)
- {
- // TODO (hellner): integer division for 22050 and 11025 would yield
- // the same result as the else statement. Remove those
- // special cases?
- if(_wavFormatObj.nSamplesPerSec == 44100)
- {
- _readSizeBytes = 440 * _wavFormatObj.nChannels *
- (_wavFormatObj.nBitsPerSample / 8);
- } else if(_wavFormatObj.nSamplesPerSec == 22050) {
- _readSizeBytes = 220 * _wavFormatObj.nChannels *
- (_wavFormatObj.nBitsPerSample / 8);
- } else if(_wavFormatObj.nSamplesPerSec == 11025) {
- _readSizeBytes = 110 * _wavFormatObj.nChannels *
- (_wavFormatObj.nBitsPerSample / 8);
- } else {
- _readSizeBytes = (_wavFormatObj.nSamplesPerSec/100) *
- _wavFormatObj.nChannels * (_wavFormatObj.nBitsPerSample / 8);
- }
-
- } else {
- _readSizeBytes = (_wavFormatObj.nSamplesPerSec/100) *
- _wavFormatObj.nChannels * (_wavFormatObj.nBitsPerSample / 8);
- }
+ size_t samples_per_10ms =
+ ((_wavFormatObj.formatTag == kWavFormatPcm) &&
+ (_wavFormatObj.nSamplesPerSec == 44100)) ?
+ 440 : static_cast<size_t>(_wavFormatObj.nSamplesPerSec / 100);
+ _readSizeBytes = samples_per_10ms * _wavFormatObj.nChannels *
+ (_wavFormatObj.nBitsPerSample / 8);
return 0;
}
int32_t ModuleFileUtility::InitWavCodec(uint32_t samplesPerSec,
- uint32_t channels,
+ size_t channels,
uint32_t bitsPerSample,
uint32_t formatTag)
{
@@ -376,15 +360,15 @@ int32_t ModuleFileUtility::InitWavReading(InStream& wav,
if(start > 0)
{
uint8_t dummy[WAV_MAX_BUFFER_SIZE];
- int32_t readLength;
+ int readLength;
if(_readSizeBytes <= WAV_MAX_BUFFER_SIZE)
{
while (_playoutPositionMs < start)
{
readLength = wav.Read(dummy, _readSizeBytes);
- if(readLength == _readSizeBytes)
+ if(readLength == static_cast<int>(_readSizeBytes))
{
- _readPos += readLength;
+ _readPos += _readSizeBytes;
_playoutPositionMs += 10;
}
else // Must have reached EOF before start position!
@@ -406,7 +390,7 @@ int32_t ModuleFileUtility::InitWavReading(InStream& wav,
{
return -1;
}
- _bytesPerSample = _wavFormatObj.nBitsPerSample / 8;
+ _bytesPerSample = static_cast<size_t>(_wavFormatObj.nBitsPerSample / 8);
_startPointInMs = start;
@@ -431,9 +415,9 @@ int32_t ModuleFileUtility::ReadWavDataAsMono(
bufferSize);
// The number of bytes that should be read from file.
- const uint32_t totalBytesNeeded = _readSizeBytes;
+ const size_t totalBytesNeeded = _readSizeBytes;
// The number of bytes that will be written to outData.
- const uint32_t bytesRequested = (codec_info_.channels == 2) ?
+ const size_t bytesRequested = (codec_info_.channels == 2) ?
totalBytesNeeded >> 1 : totalBytesNeeded;
if(bufferSize < bytesRequested)
{
@@ -472,7 +456,7 @@ int32_t ModuleFileUtility::ReadWavDataAsMono(
// Output data is should be mono.
if(codec_info_.channels == 2)
{
- for (uint32_t i = 0; i < bytesRequested / _bytesPerSample; i++)
+ for (size_t i = 0; i < bytesRequested / _bytesPerSample; i++)
{
// Sample value is the average of left and right buffer rounded to
// closest integer value. Note samples can be either 1 or 2 byte.
@@ -490,7 +474,7 @@ int32_t ModuleFileUtility::ReadWavDataAsMono(
}
memcpy(outData, _tempData, bytesRequested);
}
- return bytesRequested;
+ return static_cast<int32_t>(bytesRequested);
}
int32_t ModuleFileUtility::ReadWavDataAsStereo(
@@ -534,10 +518,10 @@ int32_t ModuleFileUtility::ReadWavDataAsStereo(
}
// The number of bytes that should be read from file.
- const uint32_t totalBytesNeeded = _readSizeBytes;
+ const size_t totalBytesNeeded = _readSizeBytes;
// The number of bytes that will be written to the left and the right
// buffers.
- const uint32_t bytesRequested = totalBytesNeeded >> 1;
+ const size_t bytesRequested = totalBytesNeeded >> 1;
if(bufferSize < bytesRequested)
{
WEBRTC_TRACE(kTraceError, kTraceFile, _id,
@@ -558,7 +542,7 @@ int32_t ModuleFileUtility::ReadWavDataAsStereo(
// either 1 or 2 bytes
if(_bytesPerSample == 1)
{
- for (uint32_t i = 0; i < bytesRequested; i++)
+ for (size_t i = 0; i < bytesRequested; i++)
{
outDataLeft[i] = _tempData[2 * i];
outDataRight[i] = _tempData[(2 * i) + 1];
@@ -572,35 +556,29 @@ int32_t ModuleFileUtility::ReadWavDataAsStereo(
outDataRight);
// Bytes requested to samples requested.
- uint32_t sampleCount = bytesRequested >> 1;
- for (uint32_t i = 0; i < sampleCount; i++)
+ size_t sampleCount = bytesRequested >> 1;
+ for (size_t i = 0; i < sampleCount; i++)
{
outLeft[i] = sampleData[2 * i];
outRight[i] = sampleData[(2 * i) + 1];
}
} else {
WEBRTC_TRACE(kTraceError, kTraceFile, _id,
- "ReadWavStereoData: unsupported sample size %d!",
+ "ReadWavStereoData: unsupported sample size %" PRIuS "!",
_bytesPerSample);
assert(false);
return -1;
}
- return bytesRequested;
+ return static_cast<int32_t>(bytesRequested);
}
-int32_t ModuleFileUtility::ReadWavData(
- InStream& wav,
- uint8_t* buffer,
- const uint32_t dataLengthInBytes)
+int32_t ModuleFileUtility::ReadWavData(InStream& wav,
+ uint8_t* buffer,
+ size_t dataLengthInBytes)
{
- WEBRTC_TRACE(
- kTraceStream,
- kTraceFile,
- _id,
- "ModuleFileUtility::ReadWavData(wav= 0x%x, buffer= 0x%x, dataLen= %ld)",
- &wav,
- buffer,
- dataLengthInBytes);
+ WEBRTC_TRACE(kTraceStream, kTraceFile, _id,
+ "ModuleFileUtility::ReadWavData(wav= 0x%x, buffer= 0x%x, "
+ "dataLen= %" PRIuS ")", &wav, buffer, dataLengthInBytes);
if(buffer == NULL)
@@ -613,7 +591,7 @@ int32_t ModuleFileUtility::ReadWavData(
// Make sure that a read won't return too few samples.
// TODO (hellner): why not read the remaining bytes needed from the start
// of the file?
- if((_dataSize - _readPos) < (int32_t)dataLengthInBytes)
+ if(_dataSize < (_readPos + dataLengthInBytes))
{
// Rewind() being -1 may be due to the file not supposed to be looped.
if(wav.Rewind() == -1)
@@ -685,8 +663,7 @@ int32_t ModuleFileUtility::InitWavWriting(OutStream& wav,
return -1;
}
_writing = false;
- uint32_t channels = (codecInst.channels == 0) ?
- 1 : codecInst.channels;
+ size_t channels = (codecInst.channels == 0) ? 1 : codecInst.channels;
if(STR_CASE_CMP(codecInst.plname, "PCMU") == 0)
{
@@ -696,7 +673,8 @@ int32_t ModuleFileUtility::InitWavWriting(OutStream& wav,
{
return -1;
}
- }else if(STR_CASE_CMP(codecInst.plname, "PCMA") == 0)
+ }
+ else if(STR_CASE_CMP(codecInst.plname, "PCMA") == 0)
{
_bytesPerSample = 1;
if(WriteWavHeader(wav, 8000, _bytesPerSample, channels, kWavFormatALaw,
@@ -729,15 +707,9 @@ int32_t ModuleFileUtility::WriteWavData(OutStream& out,
const int8_t* buffer,
const size_t dataLength)
{
- WEBRTC_TRACE(
- kTraceStream,
- kTraceFile,
- _id,
- "ModuleFileUtility::WriteWavData(out= 0x%x, buf= 0x%x, dataLen= %" PRIuS
- ")",
- &out,
- buffer,
- dataLength);
+ WEBRTC_TRACE(kTraceStream, kTraceFile, _id,
+ "ModuleFileUtility::WriteWavData(out= 0x%x, buf= 0x%x, "
+ "dataLen= %" PRIuS ")", &out, buffer, dataLength);
if(buffer == NULL)
{
@@ -757,19 +729,19 @@ int32_t ModuleFileUtility::WriteWavData(OutStream& out,
int32_t ModuleFileUtility::WriteWavHeader(
OutStream& wav,
- const uint32_t freq,
- const uint32_t bytesPerSample,
- const uint32_t channels,
- const uint32_t format,
- const uint32_t lengthInBytes)
+ uint32_t freq,
+ size_t bytesPerSample,
+ size_t channels,
+ uint32_t format,
+ size_t lengthInBytes)
{
// Frame size in bytes for 10 ms of audio.
// TODO (hellner): 44.1 kHz has 440 samples frame size. Doesn't seem to
// be taken into consideration here!
- const int32_t frameSize = (freq / 100) * channels;
+ const size_t frameSize = (freq / 100) * channels;
// Calculate the number of full frames that the wave file contain.
- const int32_t dataLengthInBytes = frameSize * (lengthInBytes / frameSize);
+ const size_t dataLengthInBytes = frameSize * (lengthInBytes / frameSize);
uint8_t buf[kWavHeaderSize];
webrtc::WriteWavHeader(buf, channels, freq, static_cast<WavFormat>(format),
@@ -785,8 +757,7 @@ int32_t ModuleFileUtility::UpdateWavHeader(OutStream& wav)
{
return -1;
}
- uint32_t channels = (codec_info_.channels == 0) ?
- 1 : codec_info_.channels;
+ size_t channels = (codec_info_.channels == 0) ? 1 : codec_info_.channels;
if(STR_CASE_CMP(codec_info_.plname, "L16") == 0)
{
@@ -839,22 +810,17 @@ int32_t ModuleFileUtility::ReadPreEncodedData(
int8_t* outData,
const size_t bufferSize)
{
- WEBRTC_TRACE(
- kTraceStream,
- kTraceFile,
- _id,
- "ModuleFileUtility::ReadPreEncodedData(in= 0x%x, outData= 0x%x, "
- "bufferSize= %" PRIuS ")",
- &in,
- outData,
- bufferSize);
+ WEBRTC_TRACE(kTraceStream, kTraceFile, _id,
+ "ModuleFileUtility::ReadPreEncodedData(in= 0x%x, "
+ "outData= 0x%x, bufferSize= %" PRIuS ")", &in, outData,
+ bufferSize);
if(outData == NULL)
{
WEBRTC_TRACE(kTraceError, kTraceFile, _id, "output buffer NULL");
}
- uint32_t frameLen;
+ size_t frameLen;
uint8_t buf[64];
// Each frame has a two byte header containing the frame length.
int32_t res = in.Read(buf, 2);
@@ -874,12 +840,9 @@ int32_t ModuleFileUtility::ReadPreEncodedData(
frameLen = buf[0] + buf[1] * 256;
if(bufferSize < frameLen)
{
- WEBRTC_TRACE(
- kTraceError,
- kTraceFile,
- _id,
- "buffer not large enough to read %d bytes of pre-encoded data!",
- frameLen);
+ WEBRTC_TRACE(kTraceError, kTraceFile, _id,
+ "buffer not large enough to read %" PRIuS " bytes of "
+ "pre-encoded data!", frameLen);
return -1;
}
return in.Read(outData, frameLen);
@@ -897,24 +860,19 @@ int32_t ModuleFileUtility::InitPreEncodedWriting(
}
_writing = true;
_bytesWritten = 1;
- out.Write(&_codecId, 1);
- return 0;
+ out.Write(&_codecId, 1);
+ return 0;
}
int32_t ModuleFileUtility::WritePreEncodedData(
OutStream& out,
- const int8_t* buffer,
+ const int8_t* buffer,
const size_t dataLength)
{
- WEBRTC_TRACE(
- kTraceStream,
- kTraceFile,
- _id,
- "ModuleFileUtility::WritePreEncodedData(out= 0x%x, inData= 0x%x, "
- "dataLen= %" PRIuS ")",
- &out,
- buffer,
- dataLength);
+ WEBRTC_TRACE(kTraceStream, kTraceFile, _id,
+ "ModuleFileUtility::WritePreEncodedData(out= 0x%x, "
+ "inData= 0x%x, dataLen= %" PRIuS ")", &out, buffer,
+ dataLength);
if(buffer == NULL)
{
@@ -945,15 +903,9 @@ int32_t ModuleFileUtility::InitCompressedReading(
const uint32_t start,
const uint32_t stop)
{
- WEBRTC_TRACE(
- kTraceDebug,
- kTraceFile,
- _id,
- "ModuleFileUtility::InitCompressedReading(in= 0x%x, start= %d,\
- stop= %d)",
- &in,
- start,
- stop);
+ WEBRTC_TRACE(kTraceDebug, kTraceFile, _id,
+ "ModuleFileUtility::InitCompressedReading(in= 0x%x, "
+ "start= %d, stop= %d)", &in, start, stop);
#if defined(WEBRTC_CODEC_ILBC)
int16_t read_len = 0;
@@ -976,9 +928,8 @@ int32_t ModuleFileUtility::InitCompressedReading(
if(cnt==64)
{
return -1;
- } else {
- buf[cnt]=0;
}
+ buf[cnt]=0;
#ifdef WEBRTC_CODEC_ILBC
if(!strcmp("#!iLBC20\n", buf))
@@ -996,14 +947,11 @@ int32_t ModuleFileUtility::InitCompressedReading(
while (_playoutPositionMs <= _startPointInMs)
{
read_len = in.Read(buf, 38);
- if(read_len == 38)
- {
- _playoutPositionMs += 20;
- }
- else
+ if(read_len != 38)
{
return -1;
}
+ _playoutPositionMs += 20;
}
}
}
@@ -1023,14 +971,11 @@ int32_t ModuleFileUtility::InitCompressedReading(
while (_playoutPositionMs <= _startPointInMs)
{
read_len = in.Read(buf, 50);
- if(read_len == 50)
- {
- _playoutPositionMs += 20;
- }
- else
+ if(read_len != 50)
{
return -1;
}
+ _playoutPositionMs += 20;
}
}
}
@@ -1047,17 +992,11 @@ int32_t ModuleFileUtility::ReadCompressedData(InStream& in,
int8_t* outData,
size_t bufferSize)
{
- WEBRTC_TRACE(
- kTraceStream,
- kTraceFile,
- _id,
- "ModuleFileUtility::ReadCompressedData(in=0x%x, outData=0x%x, bytes=%"
- PRIuS ")",
- &in,
- outData,
- bufferSize);
+ WEBRTC_TRACE(kTraceStream, kTraceFile, _id,
+ "ModuleFileUtility::ReadCompressedData(in=0x%x, outData=0x%x, "
+ "bytes=%" PRIuS ")", &in, outData, bufferSize);
- uint32_t bytesRead = 0;
+ int bytesRead = 0;
if(! _reading)
{
@@ -1069,8 +1008,8 @@ int32_t ModuleFileUtility::ReadCompressedData(InStream& in,
if((_codecId == kCodecIlbc20Ms) ||
(_codecId == kCodecIlbc30Ms))
{
- uint32_t byteSize = 0;
- if(_codecId == kCodecIlbc30Ms)
+ size_t byteSize = 0;
+ if(_codecId == kCodecIlbc30Ms)
{
byteSize = 50;
}
@@ -1081,20 +1020,20 @@ int32_t ModuleFileUtility::ReadCompressedData(InStream& in,
if(bufferSize < byteSize)
{
WEBRTC_TRACE(kTraceError, kTraceFile, _id,
- "output buffer is too short to read ILBC compressed\
- data.");
+ "output buffer is too short to read ILBC compressed "
+ "data.");
assert(false);
return -1;
}
bytesRead = in.Read(outData, byteSize);
- if(bytesRead != byteSize)
+ if(bytesRead != static_cast<int>(byteSize))
{
if(!in.Rewind())
{
InitCompressedReading(in, _startPointInMs, _stopPointInMs);
bytesRead = in.Read(outData, byteSize);
- if(bytesRead != byteSize)
+ if(bytesRead != static_cast<int>(byteSize))
{
_reading = false;
return -1;
@@ -1136,9 +1075,8 @@ int32_t ModuleFileUtility::InitCompressedWriting(
const CodecInst& codecInst)
{
WEBRTC_TRACE(kTraceDebug, kTraceFile, _id,
- "ModuleFileUtility::InitCompressedWriting(out= 0x%x,\
- codecName= %s)",
- &out, codecInst.plname);
+ "ModuleFileUtility::InitCompressedWriting(out= 0x%x, "
+ "codecName= %s)", &out, codecInst.plname);
_writing = false;
@@ -1177,15 +1115,9 @@ int32_t ModuleFileUtility::WriteCompressedData(
const int8_t* buffer,
const size_t dataLength)
{
- WEBRTC_TRACE(
- kTraceStream,
- kTraceFile,
- _id,
- "ModuleFileUtility::WriteCompressedData(out= 0x%x, buf= 0x%x, "
- "dataLen= %" PRIuS ")",
- &out,
- buffer,
- dataLength);
+ WEBRTC_TRACE(kTraceStream, kTraceFile, _id,
+ "ModuleFileUtility::WriteCompressedData(out= 0x%x, buf= 0x%x, "
+ "dataLen= %" PRIuS ")", &out, buffer, dataLength);
if(buffer == NULL)
{
@@ -1204,19 +1136,12 @@ int32_t ModuleFileUtility::InitPCMReading(InStream& pcm,
const uint32_t stop,
uint32_t freq)
{
- WEBRTC_TRACE(
- kTraceInfo,
- kTraceFile,
- _id,
- "ModuleFileUtility::InitPCMReading(pcm= 0x%x, start=%d, stop=%d,\
- freq=%d)",
- &pcm,
- start,
- stop,
- freq);
+ WEBRTC_TRACE(kTraceInfo, kTraceFile, _id,
+ "ModuleFileUtility::InitPCMReading(pcm= 0x%x, start=%d, "
+ "stop=%d, freq=%d)", &pcm, start, stop, freq);
int8_t dummy[320];
- int32_t read_len;
+ int read_len;
_playoutPositionMs = 0;
_startPointInMs = start;
@@ -1261,14 +1186,11 @@ int32_t ModuleFileUtility::InitPCMReading(InStream& pcm,
while (_playoutPositionMs < _startPointInMs)
{
read_len = pcm.Read(dummy, _readSizeBytes);
- if(read_len == _readSizeBytes)
+ if(read_len != static_cast<int>(_readSizeBytes))
{
- _playoutPositionMs += 10;
- }
- else // Must have reached EOF before start position!
- {
- return -1;
+ return -1; // Must have reached EOF before start position!
}
+ _playoutPositionMs += 10;
}
}
_reading = true;
@@ -1279,23 +1201,17 @@ int32_t ModuleFileUtility::ReadPCMData(InStream& pcm,
int8_t* outData,
size_t bufferSize)
{
- WEBRTC_TRACE(
- kTraceStream,
- kTraceFile,
- _id,
- "ModuleFileUtility::ReadPCMData(pcm= 0x%x, outData= 0x%x, bufSize= %"
- PRIuS ")",
- &pcm,
- outData,
- bufferSize);
+ WEBRTC_TRACE(kTraceStream, kTraceFile, _id,
+ "ModuleFileUtility::ReadPCMData(pcm= 0x%x, outData= 0x%x, "
+ "bufSize= %" PRIuS ")", &pcm, outData, bufferSize);
if(outData == NULL)
{
- WEBRTC_TRACE(kTraceError, kTraceFile, _id,"buffer NULL");
+ WEBRTC_TRACE(kTraceError, kTraceFile, _id, "buffer NULL");
}
// Readsize for 10ms of audio data (2 bytes per sample).
- uint32_t bytesRequested = 2 * codec_info_.plfreq / 100;
+ size_t bytesRequested = static_cast<size_t>(2 * codec_info_.plfreq / 100);
if(bufferSize < bytesRequested)
{
WEBRTC_TRACE(kTraceError, kTraceFile, _id,
@@ -1304,8 +1220,8 @@ int32_t ModuleFileUtility::ReadPCMData(InStream& pcm,
return -1;
}
- uint32_t bytesRead = pcm.Read(outData, bytesRequested);
- if(bytesRead < bytesRequested)
+ int bytesRead = pcm.Read(outData, bytesRequested);
+ if(bytesRead < static_cast<int>(bytesRequested))
{
if(pcm.Rewind() == -1)
{
@@ -1320,9 +1236,9 @@ int32_t ModuleFileUtility::ReadPCMData(InStream& pcm,
}
else
{
- int32_t rest = bytesRequested - bytesRead;
- int32_t len = pcm.Read(&(outData[bytesRead]), rest);
- if(len == rest)
+ size_t rest = bytesRequested - bytesRead;
+ int len = pcm.Read(&(outData[bytesRead]), rest);
+ if(len == static_cast<int>(rest))
{
bytesRead += len;
}
@@ -1334,7 +1250,7 @@ int32_t ModuleFileUtility::ReadPCMData(InStream& pcm,
if(bytesRead <= 0)
{
WEBRTC_TRACE(kTraceError, kTraceFile, _id,
- "ReadPCMData: Failed to rewind audio file.");
+ "ReadPCMData: Failed to rewind audio file.");
return -1;
}
}
@@ -1343,7 +1259,7 @@ int32_t ModuleFileUtility::ReadPCMData(InStream& pcm,
if(bytesRead <= 0)
{
WEBRTC_TRACE(kTraceStream, kTraceFile, _id,
- "ReadPCMData: end of file");
+ "ReadPCMData: end of file");
return -1;
}
_playoutPositionMs += 10;
@@ -1414,15 +1330,9 @@ int32_t ModuleFileUtility::WritePCMData(OutStream& out,
const int8_t* buffer,
const size_t dataLength)
{
- WEBRTC_TRACE(
- kTraceStream,
- kTraceFile,
- _id,
- "ModuleFileUtility::WritePCMData(out= 0x%x, buf= 0x%x, dataLen= %" PRIuS
- ")",
- &out,
- buffer,
- dataLength);
+ WEBRTC_TRACE(kTraceStream, kTraceFile, _id,
+ "ModuleFileUtility::WritePCMData(out= 0x%x, buf= 0x%x, "
+ "dataLen= %" PRIuS ")", &out, buffer, dataLength);
if(buffer == NULL)
{
@@ -1585,7 +1495,7 @@ int32_t ModuleFileUtility::FileDurationMs(const char* fileName,
case kFileFormatCompressedFile:
{
int32_t cnt = 0;
- int32_t read_len = 0;
+ int read_len = 0;
char buf[64];
do
{
@@ -1642,15 +1552,8 @@ int32_t ModuleFileUtility::FileDurationMs(const char* fileName,
uint32_t ModuleFileUtility::PlayoutPositionMs()
{
WEBRTC_TRACE(kTraceStream, kTraceFile, _id,
- "ModuleFileUtility::PlayoutPosition()");
+ "ModuleFileUtility::PlayoutPosition()");
- if(_reading)
- {
- return _playoutPositionMs;
- }
- else
- {
- return 0;
- }
+ return _reading ? _playoutPositionMs : 0;
}
} // namespace webrtc
diff --git a/webrtc/modules/media_file/source/media_file_utility.h b/webrtc/modules/media_file/media_file_utility.h
index 2823ceca8a..bc2fa5a2f0 100644
--- a/webrtc/modules/media_file/source/media_file_utility.h
+++ b/webrtc/modules/media_file/media_file_utility.h
@@ -9,13 +9,13 @@
*/
// Note: the class cannot be used for reading and writing at the same time.
-#ifndef WEBRTC_MODULES_MEDIA_FILE_SOURCE_MEDIA_FILE_UTILITY_H_
-#define WEBRTC_MODULES_MEDIA_FILE_SOURCE_MEDIA_FILE_UTILITY_H_
+#ifndef WEBRTC_MODULES_MEDIA_FILE_MEDIA_FILE_UTILITY_H_
+#define WEBRTC_MODULES_MEDIA_FILE_MEDIA_FILE_UTILITY_H_
#include <stdio.h>
#include "webrtc/common_types.h"
-#include "webrtc/modules/media_file/interface/media_file_defines.h"
+#include "webrtc/modules/media_file/media_file_defines.h"
namespace webrtc {
class InStream;
@@ -176,11 +176,11 @@ public:
private:
// Biggest WAV frame supported is 10 ms at 48kHz of 2 channel, 16 bit audio.
- enum{WAV_MAX_BUFFER_SIZE = 480*2*2};
+ static const size_t WAV_MAX_BUFFER_SIZE = 480 * 2 * 2;
int32_t InitWavCodec(uint32_t samplesPerSec,
- uint32_t channels,
+ size_t channels,
uint32_t bitsPerSample,
uint32_t formatTag);
@@ -194,16 +194,16 @@ private:
// stereo. format is the encode format (e.g. PCMU, PCMA, PCM etc).
// lengthInBytes is the number of bytes the audio samples are using up.
int32_t WriteWavHeader(OutStream& stream,
- const uint32_t freqInHz,
- const uint32_t bytesPerSample,
- const uint32_t channels,
- const uint32_t format,
- const uint32_t lengthInBytes);
+ uint32_t freqInHz,
+ size_t bytesPerSample,
+ size_t channels,
+ uint32_t format,
+ size_t lengthInBytes);
// Put dataLengthInBytes of audio data from stream into the audioBuffer.
// The return value is the number of bytes written to audioBuffer.
int32_t ReadWavData(InStream& stream, uint8_t* audioBuffer,
- const uint32_t dataLengthInBytes);
+ size_t dataLengthInBytes);
// Update the current audio codec being used for reading or writing
// according to codecInst.
@@ -254,10 +254,10 @@ private:
// TODO (hellner): why store multiple formats. Just store either codec_info_
// or _wavFormatObj and supply conversion functions.
WAVE_FMTINFO_header _wavFormatObj;
- int32_t _dataSize; // Chunk size if reading a WAV file
+ size_t _dataSize; // Chunk size if reading a WAV file
// Number of bytes to read. I.e. frame size in bytes. May be multiple
// chunks if reading WAV.
- int32_t _readSizeBytes;
+ size_t _readSizeBytes;
int32_t _id;
@@ -270,8 +270,8 @@ private:
MediaFileUtility_CodecType _codecId;
// The amount of bytes, on average, used for one audio sample.
- int32_t _bytesPerSample;
- int32_t _readPos;
+ size_t _bytesPerSample;
+ size_t _readPos;
// Only reading or writing can be enabled, not both.
bool _reading;
@@ -281,4 +281,4 @@ private:
uint8_t _tempData[WAV_MAX_BUFFER_SIZE];
};
} // namespace webrtc
-#endif // WEBRTC_MODULES_MEDIA_FILE_SOURCE_MEDIA_FILE_UTILITY_H_
+#endif // WEBRTC_MODULES_MEDIA_FILE_MEDIA_FILE_UTILITY_H_
diff --git a/webrtc/modules/media_file/source/OWNERS b/webrtc/modules/media_file/source/OWNERS
deleted file mode 100644
index 3ee6b4bf5f..0000000000
--- a/webrtc/modules/media_file/source/OWNERS
+++ /dev/null
@@ -1,5 +0,0 @@
-
-# These are for the common case of adding or renaming files. If you're doing
-# structural changes, please get a review from a reviewer in this file.
-per-file *.gyp=*
-per-file *.gypi=*
diff --git a/webrtc/modules/module_common_types_unittest.cc b/webrtc/modules/module_common_types_unittest.cc
index bc0b7a1a5b..acd58476a1 100644
--- a/webrtc/modules/module_common_types_unittest.cc
+++ b/webrtc/modules/module_common_types_unittest.cc
@@ -8,7 +8,7 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#include "webrtc/modules/interface/module_common_types.h"
+#include "webrtc/modules/include/module_common_types.h"
#include "testing/gtest/include/gtest/gtest.h"
diff --git a/webrtc/modules/modules.gyp b/webrtc/modules/modules.gyp
index f3ac454c19..a7dc0ffe53 100644
--- a/webrtc/modules/modules.gyp
+++ b/webrtc/modules/modules.gyp
@@ -49,352 +49,6 @@
},
'targets': [
{
- 'target_name': 'modules_unittests',
- 'type': '<(gtest_target_type)',
- 'defines': [
- '<@(audio_coding_defines)',
- ],
- 'dependencies': [
- 'acm_receive_test',
- 'acm_send_test',
- 'audio_coding_module',
- 'audio_conference_mixer',
- 'audio_device' ,
- 'audio_processing',
- 'audioproc_test_utils',
- 'bitrate_controller',
- 'bwe_simulator',
- 'cng',
- 'desktop_capture',
- 'isac_fix',
- 'media_file',
- 'neteq',
- 'neteq_test_support',
- 'neteq_unittest_tools',
- 'paced_sender',
- 'pcm16b', # Needed by NetEq tests.
- 'red',
- 'remote_bitrate_estimator',
- 'rtp_rtcp',
- 'video_codecs_test_framework',
- 'video_processing',
- 'webrtc_utility',
- 'webrtc_video_coding',
- '<@(neteq_dependencies)',
- '<(DEPTH)/testing/gmock.gyp:gmock',
- '<(DEPTH)/testing/gtest.gyp:gtest',
- '<(DEPTH)/third_party/gflags/gflags.gyp:gflags',
- '<(webrtc_root)/common.gyp:webrtc_common',
- '<(webrtc_root)/common_audio/common_audio.gyp:common_audio',
- '<(webrtc_root)/modules/modules.gyp:video_capture',
- '<(webrtc_root)/modules/video_coding/codecs/vp8/vp8.gyp:webrtc_vp8',
- '<(webrtc_root)/modules/video_coding/codecs/vp9/vp9.gyp:webrtc_vp9',
- '<(webrtc_root)/system_wrappers/system_wrappers.gyp:system_wrappers',
- '<(webrtc_root)/test/test.gyp:frame_generator',
- '<(webrtc_root)/test/test.gyp:rtp_test_utils',
- '<(webrtc_root)/test/test.gyp:test_support_main',
- '<(webrtc_root)/test/webrtc_test_common.gyp:webrtc_test_common',
- '<(webrtc_root)/tools/tools.gyp:agc_test_utils',
- ],
- 'sources': [
- 'audio_coding/codecs/cng/audio_encoder_cng_unittest.cc',
- 'audio_coding/main/acm2/acm_receiver_unittest_oldapi.cc',
- 'audio_coding/main/acm2/audio_coding_module_unittest_oldapi.cc',
- 'audio_coding/main/acm2/call_statistics_unittest.cc',
- 'audio_coding/main/acm2/codec_owner_unittest.cc',
- 'audio_coding/main/acm2/initial_delay_manager_unittest.cc',
- 'audio_coding/codecs/cng/cng_unittest.cc',
- 'audio_coding/codecs/isac/fix/source/filters_unittest.cc',
- 'audio_coding/codecs/isac/fix/source/filterbanks_unittest.cc',
- 'audio_coding/codecs/isac/fix/source/lpc_masking_model_unittest.cc',
- 'audio_coding/codecs/isac/fix/source/transform_unittest.cc',
- 'audio_coding/codecs/isac/main/source/audio_encoder_isac_unittest.cc',
- 'audio_coding/codecs/isac/main/source/isac_unittest.cc',
- 'audio_coding/codecs/isac/unittest.cc',
- 'audio_coding/codecs/opus/audio_encoder_opus_unittest.cc',
- 'audio_coding/codecs/opus/opus_unittest.cc',
- 'audio_coding/codecs/red/audio_encoder_copy_red_unittest.cc',
- 'audio_coding/neteq/audio_classifier_unittest.cc',
- 'audio_coding/neteq/audio_multi_vector_unittest.cc',
- 'audio_coding/neteq/audio_vector_unittest.cc',
- 'audio_coding/neteq/background_noise_unittest.cc',
- 'audio_coding/neteq/buffer_level_filter_unittest.cc',
- 'audio_coding/neteq/comfort_noise_unittest.cc',
- 'audio_coding/neteq/decision_logic_unittest.cc',
- 'audio_coding/neteq/decoder_database_unittest.cc',
- 'audio_coding/neteq/delay_manager_unittest.cc',
- 'audio_coding/neteq/delay_peak_detector_unittest.cc',
- 'audio_coding/neteq/dsp_helper_unittest.cc',
- 'audio_coding/neteq/dtmf_buffer_unittest.cc',
- 'audio_coding/neteq/dtmf_tone_generator_unittest.cc',
- 'audio_coding/neteq/expand_unittest.cc',
- 'audio_coding/neteq/merge_unittest.cc',
- 'audio_coding/neteq/nack_unittest.cc',
- 'audio_coding/neteq/neteq_external_decoder_unittest.cc',
- 'audio_coding/neteq/neteq_impl_unittest.cc',
- 'audio_coding/neteq/neteq_network_stats_unittest.cc',
- 'audio_coding/neteq/neteq_stereo_unittest.cc',
- 'audio_coding/neteq/neteq_unittest.cc',
- 'audio_coding/neteq/normal_unittest.cc',
- 'audio_coding/neteq/packet_buffer_unittest.cc',
- 'audio_coding/neteq/payload_splitter_unittest.cc',
- 'audio_coding/neteq/post_decode_vad_unittest.cc',
- 'audio_coding/neteq/random_vector_unittest.cc',
- 'audio_coding/neteq/sync_buffer_unittest.cc',
- 'audio_coding/neteq/timestamp_scaler_unittest.cc',
- 'audio_coding/neteq/time_stretch_unittest.cc',
- 'audio_coding/neteq/mock/mock_audio_decoder.h',
- 'audio_coding/neteq/mock/mock_audio_vector.h',
- 'audio_coding/neteq/mock/mock_buffer_level_filter.h',
- 'audio_coding/neteq/mock/mock_decoder_database.h',
- 'audio_coding/neteq/mock/mock_delay_manager.h',
- 'audio_coding/neteq/mock/mock_delay_peak_detector.h',
- 'audio_coding/neteq/mock/mock_dtmf_buffer.h',
- 'audio_coding/neteq/mock/mock_dtmf_tone_generator.h',
- 'audio_coding/neteq/mock/mock_expand.h',
- 'audio_coding/neteq/mock/mock_external_decoder_pcm16b.h',
- 'audio_coding/neteq/mock/mock_packet_buffer.h',
- 'audio_coding/neteq/mock/mock_payload_splitter.h',
- 'audio_coding/neteq/tools/input_audio_file_unittest.cc',
- 'audio_coding/neteq/tools/packet_unittest.cc',
- 'audio_conference_mixer/test/audio_conference_mixer_unittest.cc',
- 'audio_device/fine_audio_buffer_unittest.cc',
- 'audio_processing/aec/echo_cancellation_unittest.cc',
- 'audio_processing/aec/system_delay_unittest.cc',
- 'audio_processing/agc/agc_manager_direct_unittest.cc',
- # TODO(ajm): Fix to match new interface.
- # 'audio_processing/agc/agc_unittest.cc',
- 'audio_processing/agc/histogram_unittest.cc',
- 'audio_processing/agc/mock_agc.h',
- 'audio_processing/beamformer/array_util_unittest.cc',
- 'audio_processing/beamformer/complex_matrix_unittest.cc',
- 'audio_processing/beamformer/covariance_matrix_generator_unittest.cc',
- 'audio_processing/beamformer/matrix_unittest.cc',
- 'audio_processing/beamformer/mock_nonlinear_beamformer.h',
- 'audio_processing/beamformer/nonlinear_beamformer_unittest.cc',
- 'audio_processing/echo_cancellation_impl_unittest.cc',
- 'audio_processing/intelligibility/intelligibility_enhancer_unittest.cc',
- 'audio_processing/intelligibility/intelligibility_utils_unittest.cc',
- 'audio_processing/splitting_filter_unittest.cc',
- 'audio_processing/transient/dyadic_decimator_unittest.cc',
- 'audio_processing/transient/file_utils.cc',
- 'audio_processing/transient/file_utils.h',
- 'audio_processing/transient/file_utils_unittest.cc',
- 'audio_processing/transient/moving_moments_unittest.cc',
- 'audio_processing/transient/transient_detector_unittest.cc',
- 'audio_processing/transient/transient_suppressor_unittest.cc',
- 'audio_processing/transient/wpd_node_unittest.cc',
- 'audio_processing/transient/wpd_tree_unittest.cc',
- 'audio_processing/utility/delay_estimator_unittest.cc',
- 'audio_processing/vad/gmm_unittest.cc',
- 'audio_processing/vad/pitch_based_vad_unittest.cc',
- 'audio_processing/vad/pitch_internal_unittest.cc',
- 'audio_processing/vad/pole_zero_filter_unittest.cc',
- 'audio_processing/vad/standalone_vad_unittest.cc',
- 'audio_processing/vad/vad_audio_proc_unittest.cc',
- 'audio_processing/vad/vad_circular_buffer_unittest.cc',
- 'audio_processing/vad/voice_activity_detector_unittest.cc',
- 'bitrate_controller/bitrate_allocator_unittest.cc',
- 'bitrate_controller/bitrate_controller_unittest.cc',
- 'bitrate_controller/send_side_bandwidth_estimation_unittest.cc',
- 'desktop_capture/desktop_and_cursor_composer_unittest.cc',
- 'desktop_capture/desktop_region_unittest.cc',
- 'desktop_capture/differ_block_unittest.cc',
- 'desktop_capture/differ_unittest.cc',
- 'desktop_capture/mouse_cursor_monitor_unittest.cc',
- 'desktop_capture/screen_capturer_helper_unittest.cc',
- 'desktop_capture/screen_capturer_mac_unittest.cc',
- 'desktop_capture/screen_capturer_mock_objects.h',
- 'desktop_capture/screen_capturer_unittest.cc',
- 'desktop_capture/window_capturer_unittest.cc',
- 'desktop_capture/win/cursor_unittest.cc',
- 'desktop_capture/win/cursor_unittest_resources.h',
- 'desktop_capture/win/cursor_unittest_resources.rc',
- 'media_file/source/media_file_unittest.cc',
- 'module_common_types_unittest.cc',
- 'pacing/bitrate_prober_unittest.cc',
- 'pacing/paced_sender_unittest.cc',
- 'pacing/packet_router_unittest.cc',
- 'remote_bitrate_estimator/bwe_simulations.cc',
- 'remote_bitrate_estimator/include/mock/mock_remote_bitrate_observer.h',
- 'remote_bitrate_estimator/include/mock/mock_remote_bitrate_estimator.h',
- 'remote_bitrate_estimator/inter_arrival_unittest.cc',
- 'remote_bitrate_estimator/overuse_detector_unittest.cc',
- 'remote_bitrate_estimator/rate_statistics_unittest.cc',
- 'remote_bitrate_estimator/remote_bitrate_estimator_abs_send_time_unittest.cc',
- 'remote_bitrate_estimator/remote_bitrate_estimator_single_stream_unittest.cc',
- 'remote_bitrate_estimator/remote_bitrate_estimator_unittest_helper.cc',
- 'remote_bitrate_estimator/remote_bitrate_estimator_unittest_helper.h',
- 'remote_bitrate_estimator/remote_estimator_proxy_unittest.cc',
- 'remote_bitrate_estimator/send_time_history_unittest.cc',
- 'remote_bitrate_estimator/test/bwe_test_framework_unittest.cc',
- 'remote_bitrate_estimator/test/bwe_unittest.cc',
- 'remote_bitrate_estimator/test/metric_recorder_unittest.cc',
- 'remote_bitrate_estimator/test/estimators/nada_unittest.cc',
- 'remote_bitrate_estimator/transport_feedback_adapter_unittest.cc',
- 'rtp_rtcp/source/mock/mock_rtp_payload_strategy.h',
- 'rtp_rtcp/source/byte_io_unittest.cc',
- 'rtp_rtcp/source/fec_receiver_unittest.cc',
- 'rtp_rtcp/source/fec_test_helper.cc',
- 'rtp_rtcp/source/fec_test_helper.h',
- 'rtp_rtcp/source/h264_sps_parser_unittest.cc',
- 'rtp_rtcp/source/h264_bitstream_parser_unittest.cc',
- 'rtp_rtcp/source/nack_rtx_unittest.cc',
- 'rtp_rtcp/source/packet_loss_stats_unittest.cc',
- 'rtp_rtcp/source/producer_fec_unittest.cc',
- 'rtp_rtcp/source/receive_statistics_unittest.cc',
- 'rtp_rtcp/source/remote_ntp_time_estimator_unittest.cc',
- 'rtp_rtcp/source/rtcp_format_remb_unittest.cc',
- 'rtp_rtcp/source/rtcp_packet_unittest.cc',
- 'rtp_rtcp/source/rtcp_packet/transport_feedback_unittest.cc',
- 'rtp_rtcp/source/rtcp_receiver_unittest.cc',
- 'rtp_rtcp/source/rtcp_sender_unittest.cc',
- 'rtp_rtcp/source/rtcp_utility_unittest.cc',
- 'rtp_rtcp/source/rtp_fec_unittest.cc',
- 'rtp_rtcp/source/rtp_format_h264_unittest.cc',
- 'rtp_rtcp/source/rtp_format_vp8_test_helper.cc',
- 'rtp_rtcp/source/rtp_format_vp8_test_helper.h',
- 'rtp_rtcp/source/rtp_format_vp8_unittest.cc',
- 'rtp_rtcp/source/rtp_format_vp9_unittest.cc',
- 'rtp_rtcp/source/rtp_packet_history_unittest.cc',
- 'rtp_rtcp/source/rtp_payload_registry_unittest.cc',
- 'rtp_rtcp/source/rtp_rtcp_impl_unittest.cc',
- 'rtp_rtcp/source/rtp_header_extension_unittest.cc',
- 'rtp_rtcp/source/rtp_sender_unittest.cc',
- 'rtp_rtcp/source/vp8_partition_aggregator_unittest.cc',
- 'rtp_rtcp/test/testAPI/test_api.cc',
- 'rtp_rtcp/test/testAPI/test_api.h',
- 'rtp_rtcp/test/testAPI/test_api_audio.cc',
- 'rtp_rtcp/test/testAPI/test_api_rtcp.cc',
- 'rtp_rtcp/test/testAPI/test_api_video.cc',
- 'utility/source/audio_frame_operations_unittest.cc',
- 'utility/source/file_player_unittests.cc',
- 'utility/source/process_thread_impl_unittest.cc',
- 'video_coding/codecs/test/packet_manipulator_unittest.cc',
- 'video_coding/codecs/test/stats_unittest.cc',
- 'video_coding/codecs/test/videoprocessor_unittest.cc',
- 'video_coding/codecs/vp8/default_temporal_layers_unittest.cc',
- 'video_coding/codecs/vp8/reference_picture_selection_unittest.cc',
- 'video_coding/codecs/vp8/screenshare_layers_unittest.cc',
- 'video_coding/codecs/vp8/simulcast_encoder_adapter_unittest.cc',
- 'video_coding/codecs/vp8/simulcast_unittest.cc',
- 'video_coding/codecs/vp8/simulcast_unittest.h',
- 'video_coding/main/interface/mock/mock_vcm_callbacks.h',
- 'video_coding/main/source/decoding_state_unittest.cc',
- 'video_coding/main/source/jitter_buffer_unittest.cc',
- 'video_coding/main/source/jitter_estimator_tests.cc',
- 'video_coding/main/source/media_optimization_unittest.cc',
- 'video_coding/main/source/receiver_unittest.cc',
- 'video_coding/main/source/session_info_unittest.cc',
- 'video_coding/main/source/timing_unittest.cc',
- 'video_coding/main/source/video_coding_robustness_unittest.cc',
- 'video_coding/main/source/video_receiver_unittest.cc',
- 'video_coding/main/source/video_sender_unittest.cc',
- 'video_coding/main/source/qm_select_unittest.cc',
- 'video_coding/main/source/test/stream_generator.cc',
- 'video_coding/main/source/test/stream_generator.h',
- 'video_coding/utility/quality_scaler_unittest.cc',
- 'video_processing/main/test/unit_test/brightness_detection_test.cc',
- 'video_processing/main/test/unit_test/content_metrics_test.cc',
- 'video_processing/main/test/unit_test/deflickering_test.cc',
- 'video_processing/main/test/unit_test/video_processing_unittest.cc',
- 'video_processing/main/test/unit_test/video_processing_unittest.h',
- ],
- 'conditions': [
- ['enable_bwe_test_logging==1', {
- 'defines': [ 'BWE_TEST_LOGGING_COMPILE_TIME_ENABLE=1' ],
- }, {
- 'defines': [ 'BWE_TEST_LOGGING_COMPILE_TIME_ENABLE=0' ],
- 'sources!': [
- 'remote_bitrate_estimator/test/bwe_test_logging.cc'
- ],
- }],
- # Run screen/window capturer tests only on platforms where they are
- # supported.
- ['desktop_capture_supported==0', {
- 'sources!': [
- 'desktop_capture/desktop_and_cursor_composer_unittest.cc',
- 'desktop_capture/mouse_cursor_monitor_unittest.cc',
- 'desktop_capture/screen_capturer_helper_unittest.cc',
- 'desktop_capture/screen_capturer_mac_unittest.cc',
- 'desktop_capture/screen_capturer_mock_objects.h',
- 'desktop_capture/screen_capturer_unittest.cc',
- 'desktop_capture/window_capturer_unittest.cc',
- ],
- }],
- ['prefer_fixed_point==1', {
- 'defines': [ 'WEBRTC_AUDIOPROC_FIXED_PROFILE' ],
- }, {
- 'defines': [ 'WEBRTC_AUDIOPROC_FLOAT_PROFILE' ],
- }],
- ['enable_protobuf==1', {
- 'defines': [
- 'WEBRTC_AUDIOPROC_DEBUG_DUMP',
- ],
- 'dependencies': [
- 'audioproc_protobuf_utils',
- 'audioproc_unittest_proto',
- ],
- 'sources': [
- 'audio_processing/audio_processing_impl_unittest.cc',
- 'audio_processing/test/audio_processing_unittest.cc',
- 'audio_processing/test/test_utils.h',
- ],
- }],
- ['build_libvpx==1', {
- 'dependencies': [
- '<(libvpx_dir)/libvpx.gyp:libvpx_new',
- ],
- }],
- ['OS=="android"', {
- 'dependencies': [
- '<(DEPTH)/testing/android/native_test.gyp:native_test_native_code',
- ],
- # Need to disable error due to the line in
- # base/android/jni_android.h triggering it:
- # const BASE_EXPORT jobject GetApplicationContext()
- # error: type qualifiers ignored on function return type
- 'cflags': [
- '-Wno-ignored-qualifiers',
- ],
- 'sources': [
- 'audio_device/android/audio_device_unittest.cc',
- 'audio_device/android/audio_manager_unittest.cc',
- 'audio_device/android/ensure_initialized.cc',
- 'audio_device/android/ensure_initialized.h',
- ],
- }],
- ['OS=="ios"', {
- 'sources': [
- 'video_coding/codecs/h264/h264_video_toolbox_nalu_unittest.cc',
- 'audio_device/ios/audio_device_unittest_ios.cc',
- ],
- 'mac_bundle_resources': [
- '<(DEPTH)/resources/audio_coding/speech_mono_16kHz.pcm',
- '<(DEPTH)/resources/audio_coding/testfile32kHz.pcm',
- '<(DEPTH)/resources/audio_coding/teststereo32kHz.pcm',
- '<(DEPTH)/resources/audio_device/audio_short16.pcm',
- '<(DEPTH)/resources/audio_device/audio_short44.pcm',
- '<(DEPTH)/resources/audio_device/audio_short48.pcm',
- '<(DEPTH)/resources/audio_processing/agc/agc_no_circular_buffer.dat',
- '<(DEPTH)/resources/audio_processing/agc/agc_pitch_gain.dat',
- '<(DEPTH)/resources/audio_processing/agc/agc_pitch_lag.dat',
- '<(DEPTH)/resources/audio_processing/agc/agc_spectral_peak.dat',
- '<(DEPTH)/resources/audio_processing/agc/agc_voicing_prob.dat',
- '<(DEPTH)/resources/audio_processing/agc/agc_with_circular_buffer.dat',
- '<(DEPTH)/resources/short_mixed_mono_48.dat',
- '<(DEPTH)/resources/short_mixed_mono_48.pcm',
- '<(DEPTH)/resources/short_mixed_stereo_48.dat',
- '<(DEPTH)/resources/short_mixed_stereo_48.pcm',
- ],
- }],
- ],
- # Disable warnings to enable Win64 build, issue 1323.
- 'msvs_disabled_warnings': [
- 4267, # size_t to int truncation.
- ],
- },
- {
'target_name': 'modules_tests',
'type': '<(gtest_target_type)',
'dependencies': [
@@ -417,25 +71,24 @@
'<@(audio_coding_defines)',
],
'sources': [
- 'audio_coding/main/test/APITest.cc',
- 'audio_coding/main/test/Channel.cc',
- 'audio_coding/main/test/EncodeDecodeTest.cc',
- 'audio_coding/main/test/PCMFile.cc',
- 'audio_coding/main/test/PacketLossTest.cc',
- 'audio_coding/main/test/RTPFile.cc',
- 'audio_coding/main/test/SpatialAudio.cc',
- 'audio_coding/main/test/TestAllCodecs.cc',
- 'audio_coding/main/test/TestRedFec.cc',
- 'audio_coding/main/test/TestStereo.cc',
- 'audio_coding/main/test/TestVADDTX.cc',
- 'audio_coding/main/test/Tester.cc',
- 'audio_coding/main/test/TimedTrace.cc',
- 'audio_coding/main/test/TwoWayCommunication.cc',
- 'audio_coding/main/test/iSACTest.cc',
- 'audio_coding/main/test/initial_delay_unittest.cc',
- 'audio_coding/main/test/opus_test.cc',
- 'audio_coding/main/test/target_delay_unittest.cc',
- 'audio_coding/main/test/utility.cc',
+ 'audio_coding/test/APITest.cc',
+ 'audio_coding/test/Channel.cc',
+ 'audio_coding/test/EncodeDecodeTest.cc',
+ 'audio_coding/test/PCMFile.cc',
+ 'audio_coding/test/PacketLossTest.cc',
+ 'audio_coding/test/RTPFile.cc',
+ 'audio_coding/test/SpatialAudio.cc',
+ 'audio_coding/test/TestAllCodecs.cc',
+ 'audio_coding/test/TestRedFec.cc',
+ 'audio_coding/test/TestStereo.cc',
+ 'audio_coding/test/TestVADDTX.cc',
+ 'audio_coding/test/Tester.cc',
+ 'audio_coding/test/TimedTrace.cc',
+ 'audio_coding/test/TwoWayCommunication.cc',
+ 'audio_coding/test/iSACTest.cc',
+ 'audio_coding/test/opus_test.cc',
+ 'audio_coding/test/target_delay_unittest.cc',
+ 'audio_coding/test/utility.cc',
'rtp_rtcp/test/testFec/test_fec.cc',
'video_coding/codecs/test/videoprocessor_integrationtest.cc',
'video_coding/codecs/vp8/test/vp8_impl_unittest.cc',
@@ -450,6 +103,379 @@
},
],
'conditions': [
+ # Does not compile on iOS for ia32 or x64: webrtc:4755.
+ ['OS!="ios" or target_arch=="arm" or target_arch=="arm64"', {
+ 'targets': [
+ {
+ 'target_name': 'modules_unittests',
+ 'type': '<(gtest_target_type)',
+ 'defines': [
+ '<@(audio_coding_defines)',
+ ],
+ 'dependencies': [
+ 'acm_receive_test',
+ 'acm_send_test',
+ 'audio_coding_module',
+ 'audio_conference_mixer',
+ 'audio_device' ,
+ 'audio_processing',
+ 'audioproc_test_utils',
+ 'bitrate_controller',
+ 'bwe_simulator',
+ 'cng',
+ 'desktop_capture',
+ 'isac_fix',
+ 'media_file',
+ 'neteq',
+ 'neteq_test_support',
+ 'neteq_unittest_tools',
+ 'paced_sender',
+ 'pcm16b', # Needed by NetEq tests.
+ 'red',
+ 'remote_bitrate_estimator',
+ 'rtp_rtcp',
+ 'video_codecs_test_framework',
+ 'video_processing',
+ 'webrtc_utility',
+ 'webrtc_video_coding',
+ '<@(neteq_dependencies)',
+ '<(DEPTH)/testing/gmock.gyp:gmock',
+ '<(DEPTH)/testing/gtest.gyp:gtest',
+ '<(DEPTH)/third_party/gflags/gflags.gyp:gflags',
+ '<(webrtc_root)/common.gyp:webrtc_common',
+ '<(webrtc_root)/common_audio/common_audio.gyp:common_audio',
+ '<(webrtc_root)/common_video/common_video.gyp:common_video',
+ '<(webrtc_root)/modules/modules.gyp:video_capture',
+ '<(webrtc_root)/modules/video_coding/codecs/vp8/vp8.gyp:webrtc_vp8',
+ '<(webrtc_root)/modules/video_coding/codecs/vp9/vp9.gyp:webrtc_vp9',
+ '<(webrtc_root)/system_wrappers/system_wrappers.gyp:system_wrappers',
+ '<(webrtc_root)/test/test.gyp:fake_video_frames',
+ '<(webrtc_root)/test/test.gyp:rtp_test_utils',
+ '<(webrtc_root)/test/test.gyp:test_support_main',
+ '<(webrtc_root)/test/webrtc_test_common.gyp:webrtc_test_common',
+ '<(webrtc_root)/tools/tools.gyp:agc_test_utils',
+ ],
+ 'sources': [
+ 'audio_coding/codecs/cng/audio_encoder_cng_unittest.cc',
+ 'audio_coding/acm2/acm_receiver_unittest_oldapi.cc',
+ 'audio_coding/acm2/audio_coding_module_unittest_oldapi.cc',
+ 'audio_coding/acm2/call_statistics_unittest.cc',
+ 'audio_coding/acm2/codec_manager_unittest.cc',
+ 'audio_coding/acm2/initial_delay_manager_unittest.cc',
+ 'audio_coding/acm2/rent_a_codec_unittest.cc',
+ 'audio_coding/codecs/cng/cng_unittest.cc',
+ 'audio_coding/codecs/isac/fix/source/filters_unittest.cc',
+ 'audio_coding/codecs/isac/fix/source/filterbanks_unittest.cc',
+ 'audio_coding/codecs/isac/fix/source/lpc_masking_model_unittest.cc',
+ 'audio_coding/codecs/isac/fix/source/transform_unittest.cc',
+ 'audio_coding/codecs/isac/main/source/audio_encoder_isac_unittest.cc',
+ 'audio_coding/codecs/isac/main/source/isac_unittest.cc',
+ 'audio_coding/codecs/isac/unittest.cc',
+ 'audio_coding/codecs/opus/audio_encoder_opus_unittest.cc',
+ 'audio_coding/codecs/opus/opus_unittest.cc',
+ 'audio_coding/codecs/red/audio_encoder_copy_red_unittest.cc',
+ 'audio_coding/neteq/audio_classifier_unittest.cc',
+ 'audio_coding/neteq/audio_multi_vector_unittest.cc',
+ 'audio_coding/neteq/audio_vector_unittest.cc',
+ 'audio_coding/neteq/background_noise_unittest.cc',
+ 'audio_coding/neteq/buffer_level_filter_unittest.cc',
+ 'audio_coding/neteq/comfort_noise_unittest.cc',
+ 'audio_coding/neteq/decision_logic_unittest.cc',
+ 'audio_coding/neteq/decoder_database_unittest.cc',
+ 'audio_coding/neteq/delay_manager_unittest.cc',
+ 'audio_coding/neteq/delay_peak_detector_unittest.cc',
+ 'audio_coding/neteq/dsp_helper_unittest.cc',
+ 'audio_coding/neteq/dtmf_buffer_unittest.cc',
+ 'audio_coding/neteq/dtmf_tone_generator_unittest.cc',
+ 'audio_coding/neteq/expand_unittest.cc',
+ 'audio_coding/neteq/merge_unittest.cc',
+ 'audio_coding/neteq/nack_unittest.cc',
+ 'audio_coding/neteq/neteq_external_decoder_unittest.cc',
+ 'audio_coding/neteq/neteq_impl_unittest.cc',
+ 'audio_coding/neteq/neteq_network_stats_unittest.cc',
+ 'audio_coding/neteq/neteq_stereo_unittest.cc',
+ 'audio_coding/neteq/neteq_unittest.cc',
+ 'audio_coding/neteq/normal_unittest.cc',
+ 'audio_coding/neteq/packet_buffer_unittest.cc',
+ 'audio_coding/neteq/payload_splitter_unittest.cc',
+ 'audio_coding/neteq/post_decode_vad_unittest.cc',
+ 'audio_coding/neteq/random_vector_unittest.cc',
+ 'audio_coding/neteq/sync_buffer_unittest.cc',
+ 'audio_coding/neteq/timestamp_scaler_unittest.cc',
+ 'audio_coding/neteq/time_stretch_unittest.cc',
+ 'audio_coding/neteq/mock/mock_audio_decoder.h',
+ 'audio_coding/neteq/mock/mock_audio_vector.h',
+ 'audio_coding/neteq/mock/mock_buffer_level_filter.h',
+ 'audio_coding/neteq/mock/mock_decoder_database.h',
+ 'audio_coding/neteq/mock/mock_delay_manager.h',
+ 'audio_coding/neteq/mock/mock_delay_peak_detector.h',
+ 'audio_coding/neteq/mock/mock_dtmf_buffer.h',
+ 'audio_coding/neteq/mock/mock_dtmf_tone_generator.h',
+ 'audio_coding/neteq/mock/mock_expand.h',
+ 'audio_coding/neteq/mock/mock_external_decoder_pcm16b.h',
+ 'audio_coding/neteq/mock/mock_packet_buffer.h',
+ 'audio_coding/neteq/mock/mock_payload_splitter.h',
+ 'audio_coding/neteq/tools/input_audio_file_unittest.cc',
+ 'audio_coding/neteq/tools/packet_unittest.cc',
+ 'audio_conference_mixer/test/audio_conference_mixer_unittest.cc',
+ 'audio_device/fine_audio_buffer_unittest.cc',
+ 'audio_processing/aec/echo_cancellation_unittest.cc',
+ 'audio_processing/aec/system_delay_unittest.cc',
+ 'audio_processing/agc/agc_manager_direct_unittest.cc',
+ # TODO(ajm): Fix to match new interface.
+ # 'audio_processing/agc/agc_unittest.cc',
+ 'audio_processing/agc/histogram_unittest.cc',
+ 'audio_processing/agc/mock_agc.h',
+ 'audio_processing/beamformer/array_util_unittest.cc',
+ 'audio_processing/beamformer/complex_matrix_unittest.cc',
+ 'audio_processing/beamformer/covariance_matrix_generator_unittest.cc',
+ 'audio_processing/beamformer/matrix_unittest.cc',
+ 'audio_processing/beamformer/mock_nonlinear_beamformer.h',
+ 'audio_processing/beamformer/nonlinear_beamformer_unittest.cc',
+ 'audio_processing/echo_cancellation_impl_unittest.cc',
+ 'audio_processing/intelligibility/intelligibility_enhancer_unittest.cc',
+ 'audio_processing/intelligibility/intelligibility_utils_unittest.cc',
+ 'audio_processing/splitting_filter_unittest.cc',
+ 'audio_processing/transient/dyadic_decimator_unittest.cc',
+ 'audio_processing/transient/file_utils.cc',
+ 'audio_processing/transient/file_utils.h',
+ 'audio_processing/transient/file_utils_unittest.cc',
+ 'audio_processing/transient/moving_moments_unittest.cc',
+ 'audio_processing/transient/transient_detector_unittest.cc',
+ 'audio_processing/transient/transient_suppressor_unittest.cc',
+ 'audio_processing/transient/wpd_node_unittest.cc',
+ 'audio_processing/transient/wpd_tree_unittest.cc',
+ 'audio_processing/utility/delay_estimator_unittest.cc',
+ 'audio_processing/vad/gmm_unittest.cc',
+ 'audio_processing/vad/pitch_based_vad_unittest.cc',
+ 'audio_processing/vad/pitch_internal_unittest.cc',
+ 'audio_processing/vad/pole_zero_filter_unittest.cc',
+ 'audio_processing/vad/standalone_vad_unittest.cc',
+ 'audio_processing/vad/vad_audio_proc_unittest.cc',
+ 'audio_processing/vad/vad_circular_buffer_unittest.cc',
+ 'audio_processing/vad/voice_activity_detector_unittest.cc',
+ 'bitrate_controller/bitrate_controller_unittest.cc',
+ 'bitrate_controller/send_side_bandwidth_estimation_unittest.cc',
+ 'desktop_capture/desktop_and_cursor_composer_unittest.cc',
+ 'desktop_capture/desktop_region_unittest.cc',
+ 'desktop_capture/differ_block_unittest.cc',
+ 'desktop_capture/differ_unittest.cc',
+ 'desktop_capture/mouse_cursor_monitor_unittest.cc',
+ 'desktop_capture/screen_capturer_helper_unittest.cc',
+ 'desktop_capture/screen_capturer_mac_unittest.cc',
+ 'desktop_capture/screen_capturer_mock_objects.h',
+ 'desktop_capture/screen_capturer_unittest.cc',
+ 'desktop_capture/window_capturer_unittest.cc',
+ 'desktop_capture/win/cursor_unittest.cc',
+ 'desktop_capture/win/cursor_unittest_resources.h',
+ 'desktop_capture/win/cursor_unittest_resources.rc',
+ 'media_file/media_file_unittest.cc',
+ 'module_common_types_unittest.cc',
+ 'pacing/bitrate_prober_unittest.cc',
+ 'pacing/paced_sender_unittest.cc',
+ 'pacing/packet_router_unittest.cc',
+ 'remote_bitrate_estimator/bwe_simulations.cc',
+ 'remote_bitrate_estimator/include/mock/mock_remote_bitrate_observer.h',
+ 'remote_bitrate_estimator/include/mock/mock_remote_bitrate_estimator.h',
+ 'remote_bitrate_estimator/inter_arrival_unittest.cc',
+ 'remote_bitrate_estimator/overuse_detector_unittest.cc',
+ 'remote_bitrate_estimator/rate_statistics_unittest.cc',
+ 'remote_bitrate_estimator/remote_bitrate_estimator_abs_send_time_unittest.cc',
+ 'remote_bitrate_estimator/remote_bitrate_estimator_single_stream_unittest.cc',
+ 'remote_bitrate_estimator/remote_bitrate_estimator_unittest_helper.cc',
+ 'remote_bitrate_estimator/remote_bitrate_estimator_unittest_helper.h',
+ 'remote_bitrate_estimator/remote_estimator_proxy_unittest.cc',
+ 'remote_bitrate_estimator/send_time_history_unittest.cc',
+ 'remote_bitrate_estimator/test/bwe_test_framework_unittest.cc',
+ 'remote_bitrate_estimator/test/bwe_unittest.cc',
+ 'remote_bitrate_estimator/test/metric_recorder_unittest.cc',
+ 'remote_bitrate_estimator/test/estimators/nada_unittest.cc',
+ 'remote_bitrate_estimator/transport_feedback_adapter_unittest.cc',
+ 'rtp_rtcp/source/mock/mock_rtp_payload_strategy.h',
+ 'rtp_rtcp/source/byte_io_unittest.cc',
+ 'rtp_rtcp/source/fec_receiver_unittest.cc',
+ 'rtp_rtcp/source/fec_test_helper.cc',
+ 'rtp_rtcp/source/fec_test_helper.h',
+ 'rtp_rtcp/source/h264_sps_parser_unittest.cc',
+ 'rtp_rtcp/source/h264_bitstream_parser_unittest.cc',
+ 'rtp_rtcp/source/nack_rtx_unittest.cc',
+ 'rtp_rtcp/source/packet_loss_stats_unittest.cc',
+ 'rtp_rtcp/source/producer_fec_unittest.cc',
+ 'rtp_rtcp/source/receive_statistics_unittest.cc',
+ 'rtp_rtcp/source/remote_ntp_time_estimator_unittest.cc',
+ 'rtp_rtcp/source/rtcp_format_remb_unittest.cc',
+ 'rtp_rtcp/source/rtcp_packet_unittest.cc',
+ 'rtp_rtcp/source/rtcp_packet/app_unittest.cc',
+ 'rtp_rtcp/source/rtcp_packet/bye_unittest.cc',
+ 'rtp_rtcp/source/rtcp_packet/compound_packet_unittest.cc',
+ 'rtp_rtcp/source/rtcp_packet/dlrr_unittest.cc',
+ 'rtp_rtcp/source/rtcp_packet/extended_jitter_report_unittest.cc',
+ 'rtp_rtcp/source/rtcp_packet/nack_unittest.cc',
+ 'rtp_rtcp/source/rtcp_packet/pli_unittest.cc',
+ 'rtp_rtcp/source/rtcp_packet/receiver_report_unittest.cc',
+ 'rtp_rtcp/source/rtcp_packet/report_block_unittest.cc',
+ 'rtp_rtcp/source/rtcp_packet/rrtr_unittest.cc',
+ 'rtp_rtcp/source/rtcp_packet/sli_unittest.cc',
+ 'rtp_rtcp/source/rtcp_packet/tmmbn_unittest.cc',
+ 'rtp_rtcp/source/rtcp_packet/tmmbr_unittest.cc',
+ 'rtp_rtcp/source/rtcp_packet/transport_feedback_unittest.cc',
+ 'rtp_rtcp/source/rtcp_packet/voip_metric_unittest.cc',
+ 'rtp_rtcp/source/rtcp_receiver_unittest.cc',
+ 'rtp_rtcp/source/rtcp_sender_unittest.cc',
+ 'rtp_rtcp/source/rtcp_utility_unittest.cc',
+ 'rtp_rtcp/source/rtp_fec_unittest.cc',
+ 'rtp_rtcp/source/rtp_format_h264_unittest.cc',
+ 'rtp_rtcp/source/rtp_format_vp8_test_helper.cc',
+ 'rtp_rtcp/source/rtp_format_vp8_test_helper.h',
+ 'rtp_rtcp/source/rtp_format_vp8_unittest.cc',
+ 'rtp_rtcp/source/rtp_format_vp9_unittest.cc',
+ 'rtp_rtcp/source/rtp_packet_history_unittest.cc',
+ 'rtp_rtcp/source/rtp_payload_registry_unittest.cc',
+ 'rtp_rtcp/source/rtp_rtcp_impl_unittest.cc',
+ 'rtp_rtcp/source/rtp_header_extension_unittest.cc',
+ 'rtp_rtcp/source/rtp_sender_unittest.cc',
+ 'rtp_rtcp/source/time_util_unittest.cc',
+ 'rtp_rtcp/source/vp8_partition_aggregator_unittest.cc',
+ 'rtp_rtcp/test/testAPI/test_api.cc',
+ 'rtp_rtcp/test/testAPI/test_api.h',
+ 'rtp_rtcp/test/testAPI/test_api_audio.cc',
+ 'rtp_rtcp/test/testAPI/test_api_rtcp.cc',
+ 'rtp_rtcp/test/testAPI/test_api_video.cc',
+ 'utility/source/audio_frame_operations_unittest.cc',
+ 'utility/source/file_player_unittests.cc',
+ 'utility/source/process_thread_impl_unittest.cc',
+ 'video_coding/codecs/test/packet_manipulator_unittest.cc',
+ 'video_coding/codecs/test/stats_unittest.cc',
+ 'video_coding/codecs/test/videoprocessor_unittest.cc',
+ 'video_coding/codecs/vp8/default_temporal_layers_unittest.cc',
+ 'video_coding/codecs/vp8/reference_picture_selection_unittest.cc',
+ 'video_coding/codecs/vp8/screenshare_layers_unittest.cc',
+ 'video_coding/codecs/vp8/simulcast_encoder_adapter_unittest.cc',
+ 'video_coding/codecs/vp8/simulcast_unittest.cc',
+ 'video_coding/codecs/vp8/simulcast_unittest.h',
+ 'video_coding/codecs/vp9/screenshare_layers_unittest.cc',
+ 'video_coding/include/mock/mock_vcm_callbacks.h',
+ 'video_coding/decoding_state_unittest.cc',
+ 'video_coding/jitter_buffer_unittest.cc',
+ 'video_coding/jitter_estimator_tests.cc',
+ 'video_coding/media_optimization_unittest.cc',
+ 'video_coding/receiver_unittest.cc',
+ 'video_coding/session_info_unittest.cc',
+ 'video_coding/timing_unittest.cc',
+ 'video_coding/video_coding_robustness_unittest.cc',
+ 'video_coding/video_receiver_unittest.cc',
+ 'video_coding/video_sender_unittest.cc',
+ 'video_coding/qm_select_unittest.cc',
+ 'video_coding/test/stream_generator.cc',
+ 'video_coding/test/stream_generator.h',
+ 'video_coding/utility/quality_scaler_unittest.cc',
+ 'video_processing/test/brightness_detection_test.cc',
+ 'video_processing/test/content_metrics_test.cc',
+ 'video_processing/test/deflickering_test.cc',
+ 'video_processing/test/denoiser_test.cc',
+ 'video_processing/test/video_processing_unittest.cc',
+ 'video_processing/test/video_processing_unittest.h',
+ ],
+ 'conditions': [
+ ['enable_bwe_test_logging==1', {
+ 'defines': [ 'BWE_TEST_LOGGING_COMPILE_TIME_ENABLE=1' ],
+ }, {
+ 'defines': [ 'BWE_TEST_LOGGING_COMPILE_TIME_ENABLE=0' ],
+ 'sources!': [
+ 'remote_bitrate_estimator/test/bwe_test_logging.cc'
+ ],
+ }],
+ # Run screen/window capturer tests only on platforms where they are
+ # supported.
+ ['desktop_capture_supported==0', {
+ 'sources!': [
+ 'desktop_capture/desktop_and_cursor_composer_unittest.cc',
+ 'desktop_capture/mouse_cursor_monitor_unittest.cc',
+ 'desktop_capture/screen_capturer_helper_unittest.cc',
+ 'desktop_capture/screen_capturer_mac_unittest.cc',
+ 'desktop_capture/screen_capturer_mock_objects.h',
+ 'desktop_capture/screen_capturer_unittest.cc',
+ 'desktop_capture/window_capturer_unittest.cc',
+ ],
+ }],
+ ['prefer_fixed_point==1', {
+ 'defines': [ 'WEBRTC_AUDIOPROC_FIXED_PROFILE' ],
+ }, {
+ 'defines': [ 'WEBRTC_AUDIOPROC_FLOAT_PROFILE' ],
+ }],
+ ['enable_protobuf==1', {
+ 'defines': [
+ 'WEBRTC_AUDIOPROC_DEBUG_DUMP',
+ 'WEBRTC_NETEQ_UNITTEST_BITEXACT',
+ ],
+ 'dependencies': [
+ 'audioproc_protobuf_utils',
+ 'audioproc_unittest_proto',
+ 'neteq_unittest_proto',
+ ],
+ 'sources': [
+ 'audio_processing/audio_processing_impl_locking_unittest.cc',
+ 'audio_processing/audio_processing_impl_unittest.cc',
+ 'audio_processing/test/audio_processing_unittest.cc',
+ 'audio_processing/test/debug_dump_test.cc',
+ 'audio_processing/test/test_utils.h',
+ ],
+ }],
+ ['build_libvpx==1', {
+ 'dependencies': [
+ '<(libvpx_dir)/libvpx.gyp:libvpx_new',
+ ],
+ }],
+ ['OS=="android"', {
+ 'dependencies': [
+ '<(DEPTH)/testing/android/native_test.gyp:native_test_native_code',
+ ],
+ # Need to disable error due to the line in
+ # base/android/jni_android.h triggering it:
+ # const BASE_EXPORT jobject GetApplicationContext()
+ # error: type qualifiers ignored on function return type
+ 'cflags': [
+ '-Wno-ignored-qualifiers',
+ ],
+ 'sources': [
+ 'audio_device/android/audio_device_unittest.cc',
+ 'audio_device/android/audio_manager_unittest.cc',
+ 'audio_device/android/ensure_initialized.cc',
+ 'audio_device/android/ensure_initialized.h',
+ ],
+ }],
+ ['OS=="ios"', {
+ 'sources': [
+ 'video_coding/codecs/h264/h264_video_toolbox_nalu_unittest.cc',
+ 'audio_device/ios/audio_device_unittest_ios.cc',
+ ],
+ 'mac_bundle_resources': [
+ '<(DEPTH)/resources/audio_coding/speech_mono_16kHz.pcm',
+ '<(DEPTH)/resources/audio_coding/testfile32kHz.pcm',
+ '<(DEPTH)/resources/audio_coding/teststereo32kHz.pcm',
+ '<(DEPTH)/resources/audio_device/audio_short16.pcm',
+ '<(DEPTH)/resources/audio_device/audio_short44.pcm',
+ '<(DEPTH)/resources/audio_device/audio_short48.pcm',
+ '<(DEPTH)/resources/audio_processing/agc/agc_no_circular_buffer.dat',
+ '<(DEPTH)/resources/audio_processing/agc/agc_pitch_gain.dat',
+ '<(DEPTH)/resources/audio_processing/agc/agc_pitch_lag.dat',
+ '<(DEPTH)/resources/audio_processing/agc/agc_spectral_peak.dat',
+ '<(DEPTH)/resources/audio_processing/agc/agc_voicing_prob.dat',
+ '<(DEPTH)/resources/audio_processing/agc/agc_with_circular_buffer.dat',
+ '<(DEPTH)/resources/short_mixed_mono_48.dat',
+ '<(DEPTH)/resources/short_mixed_mono_48.pcm',
+ '<(DEPTH)/resources/short_mixed_stereo_48.dat',
+ '<(DEPTH)/resources/short_mixed_stereo_48.pcm',
+ ],
+ }],
+ ],
+ # Disable warnings to enable Win64 build, issue 1323.
+ 'msvs_disabled_warnings': [
+ 4267, # size_t to int truncation.
+ ],
+ },
+ ],
+ }],
['OS=="android"', {
'targets': [
{
diff --git a/webrtc/modules/modules_unittests.isolate b/webrtc/modules/modules_unittests.isolate
index ba444df8c1..d988821af0 100644
--- a/webrtc/modules/modules_unittests.isolate
+++ b/webrtc/modules/modules_unittests.isolate
@@ -27,10 +27,7 @@
'<(DEPTH)/resources/audio_coding/neteq4_universal_ref.pcm',
'<(DEPTH)/resources/audio_coding/neteq4_universal_ref_win_32.pcm',
'<(DEPTH)/resources/audio_coding/neteq4_universal_ref_win_64.pcm',
- '<(DEPTH)/resources/audio_coding/neteq_network_stats.dat',
- '<(DEPTH)/resources/audio_coding/neteq_rtcp_stats.dat',
'<(DEPTH)/resources/audio_coding/neteq_universal_new.rtp',
- '<(DEPTH)/resources/audio_coding/neteq_universal_ref.pcm',
'<(DEPTH)/resources/audio_coding/speech_mono_16kHz.pcm',
'<(DEPTH)/resources/audio_coding/speech_mono_32_48kHz.pcm',
'<(DEPTH)/resources/audio_coding/testfile32kHz.pcm',
diff --git a/webrtc/modules/pacing/BUILD.gn b/webrtc/modules/pacing/BUILD.gn
index 3e478c1e76..0354c64fcb 100644
--- a/webrtc/modules/pacing/BUILD.gn
+++ b/webrtc/modules/pacing/BUILD.gn
@@ -10,10 +10,10 @@ source_set("pacing") {
sources = [
"bitrate_prober.cc",
"bitrate_prober.h",
- "include/paced_sender.h",
- "include/packet_router.h",
"paced_sender.cc",
+ "paced_sender.h",
"packet_router.cc",
+ "packet_router.h",
]
configs += [ "../..:common_config" ]
diff --git a/webrtc/modules/pacing/bitrate_prober.cc b/webrtc/modules/pacing/bitrate_prober.cc
index bbbe54f54e..41ad5fa11a 100644
--- a/webrtc/modules/pacing/bitrate_prober.cc
+++ b/webrtc/modules/pacing/bitrate_prober.cc
@@ -15,8 +15,8 @@
#include <limits>
#include <sstream>
-#include "webrtc/modules/pacing/include/paced_sender.h"
-#include "webrtc/system_wrappers/include/logging.h"
+#include "webrtc/base/logging.h"
+#include "webrtc/modules/pacing/paced_sender.h"
namespace webrtc {
diff --git a/webrtc/modules/pacing/include/mock/mock_paced_sender.h b/webrtc/modules/pacing/mock/mock_paced_sender.h
index b2cefdff8b..01d5f6a6e9 100644
--- a/webrtc/modules/pacing/include/mock/mock_paced_sender.h
+++ b/webrtc/modules/pacing/mock/mock_paced_sender.h
@@ -8,14 +8,14 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#ifndef WEBRTC_MODULES_PACING_INCLUDE_MOCK_MOCK_PACED_SENDER_H_
-#define WEBRTC_MODULES_PACING_INCLUDE_MOCK_MOCK_PACED_SENDER_H_
+#ifndef WEBRTC_MODULES_PACING_MOCK_MOCK_PACED_SENDER_H_
+#define WEBRTC_MODULES_PACING_MOCK_MOCK_PACED_SENDER_H_
#include "testing/gmock/include/gmock/gmock.h"
#include <vector>
-#include "webrtc/modules/pacing/include/paced_sender.h"
+#include "webrtc/modules/pacing/paced_sender.h"
#include "webrtc/system_wrappers/include/clock.h"
namespace webrtc {
@@ -35,4 +35,4 @@ class MockPacedSender : public PacedSender {
} // namespace webrtc
-#endif // WEBRTC_MODULES_PACING_INCLUDE_MOCK_MOCK_PACED_SENDER_H_
+#endif // WEBRTC_MODULES_PACING_MOCK_MOCK_PACED_SENDER_H_
diff --git a/webrtc/modules/pacing/paced_sender.cc b/webrtc/modules/pacing/paced_sender.cc
index 5d7ae17b23..121f860c7d 100644
--- a/webrtc/modules/pacing/paced_sender.cc
+++ b/webrtc/modules/pacing/paced_sender.cc
@@ -8,20 +8,19 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#include "webrtc/modules/pacing/include/paced_sender.h"
-
-#include <assert.h>
+#include "webrtc/modules/pacing/paced_sender.h"
#include <map>
#include <queue>
#include <set>
-#include "webrtc/modules/interface/module_common_types.h"
+#include "webrtc/base/checks.h"
+#include "webrtc/base/logging.h"
+#include "webrtc/modules/include/module_common_types.h"
#include "webrtc/modules/pacing/bitrate_prober.h"
#include "webrtc/system_wrappers/include/clock.h"
#include "webrtc/system_wrappers/include/critical_section_wrapper.h"
#include "webrtc/system_wrappers/include/field_trial.h"
-#include "webrtc/system_wrappers/include/logging.h"
namespace {
// Time limit in milliseconds between packet bursts.
@@ -33,6 +32,9 @@ const int64_t kMaxIntervalTimeMs = 30;
} // namespace
+// TODO(sprang): Move at least PacketQueue and MediaBudget out to separate
+// files, so that we can more easily test them.
+
namespace webrtc {
namespace paced_sender {
struct Packet {
@@ -86,13 +88,19 @@ struct Comparator {
// Class encapsulating a priority queue with some extensions.
class PacketQueue {
public:
- PacketQueue() : bytes_(0) {}
+ explicit PacketQueue(Clock* clock)
+ : bytes_(0),
+ clock_(clock),
+ queue_time_sum_(0),
+ time_last_updated_(clock_->TimeInMilliseconds()) {}
virtual ~PacketQueue() {}
void Push(const Packet& packet) {
- if (!AddToDupeSet(packet)) {
+ if (!AddToDupeSet(packet))
return;
- }
+
+ UpdateQueueTime(packet.enqueue_time_ms);
+
// Store packet in list, use pointers in priority queue for cheaper moves.
// Packets have a handle to its own iterator in the list, for easy removal
// when popping from queue.
@@ -114,7 +122,11 @@ class PacketQueue {
void FinalizePop(const Packet& packet) {
RemoveFromDupeSet(packet);
bytes_ -= packet.bytes;
+ queue_time_sum_ -= (time_last_updated_ - packet.enqueue_time_ms);
packet_list_.erase(packet.this_it);
+ RTC_DCHECK_EQ(packet_list_.size(), prio_queue_.size());
+ if (packet_list_.empty())
+ RTC_DCHECK_EQ(0u, queue_time_sum_);
}
bool Empty() const { return prio_queue_.empty(); }
@@ -123,13 +135,29 @@ class PacketQueue {
uint64_t SizeInBytes() const { return bytes_; }
- int64_t OldestEnqueueTime() const {
- std::list<Packet>::const_reverse_iterator it = packet_list_.rbegin();
+ int64_t OldestEnqueueTimeMs() const {
+ auto it = packet_list_.rbegin();
if (it == packet_list_.rend())
return 0;
return it->enqueue_time_ms;
}
+ void UpdateQueueTime(int64_t timestamp_ms) {
+ RTC_DCHECK_GE(timestamp_ms, time_last_updated_);
+ int64_t delta = timestamp_ms - time_last_updated_;
+ // Use packet packet_list_.size() not prio_queue_.size() here, as there
+ // might be an outstanding element popped from prio_queue_ currently in the
+ // SendPacket() call, while packet_list_ will always be correct.
+ queue_time_sum_ += delta * packet_list_.size();
+ time_last_updated_ = timestamp_ms;
+ }
+
+ int64_t AverageQueueTimeMs() const {
+ if (prio_queue_.empty())
+ return 0;
+ return queue_time_sum_ / packet_list_.size();
+ }
+
private:
// Try to add a packet to the set of ssrc/seqno identifiers currently in the
// queue. Return true if inserted, false if this is a duplicate.
@@ -147,7 +175,7 @@ class PacketQueue {
void RemoveFromDupeSet(const Packet& packet) {
SsrcSeqNoMap::iterator it = dupe_map_.find(packet.ssrc);
- assert(it != dupe_map_.end());
+ RTC_DCHECK(it != dupe_map_.end());
it->second.erase(packet.sequence_number);
if (it->second.empty()) {
dupe_map_.erase(it);
@@ -165,6 +193,9 @@ class PacketQueue {
// Map<ssrc, set<seq_no> >, for checking duplicates.
typedef std::map<uint32_t, std::set<uint16_t> > SsrcSeqNoMap;
SsrcSeqNoMap dupe_map_;
+ Clock* const clock_;
+ int64_t queue_time_sum_;
+ int64_t time_last_updated_;
};
class IntervalBudget {
@@ -209,6 +240,7 @@ class IntervalBudget {
};
} // namespace paced_sender
+const int64_t PacedSender::kMaxQueueLengthMs = 2000;
const float PacedSender::kDefaultPaceMultiplier = 2.5f;
PacedSender::PacedSender(Clock* clock,
@@ -225,8 +257,9 @@ PacedSender::PacedSender(Clock* clock,
padding_budget_(new paced_sender::IntervalBudget(min_bitrate_kbps)),
prober_(new BitrateProber()),
bitrate_bps_(1000 * bitrate_kbps),
+ max_bitrate_kbps_(max_bitrate_kbps),
time_last_update_us_(clock->TimeInMicroseconds()),
- packets_(new paced_sender::PacketQueue()),
+ packets_(new paced_sender::PacketQueue(clock)),
packet_counter_(0) {
UpdateBytesPerInterval(kMinPacketLimitMs);
}
@@ -244,7 +277,7 @@ void PacedSender::Resume() {
}
void PacedSender::SetProbingEnabled(bool enabled) {
- assert(packet_counter_ == 0);
+ RTC_CHECK_EQ(0u, packet_counter_);
probing_enabled_ = enabled;
}
@@ -252,9 +285,12 @@ void PacedSender::UpdateBitrate(int bitrate_kbps,
int max_bitrate_kbps,
int min_bitrate_kbps) {
CriticalSectionScoped cs(critsect_.get());
- media_budget_->set_target_rate_kbps(max_bitrate_kbps);
+ // Don't set media bitrate here as it may be boosted in order to meet max
+ // queue time constraint. Just update max_bitrate_kbps_ and let media_budget_
+ // be updated in Process().
padding_budget_->set_target_rate_kbps(min_bitrate_kbps);
bitrate_bps_ = 1000 * bitrate_kbps;
+ max_bitrate_kbps_ = max_bitrate_kbps;
}
void PacedSender::InsertPacket(RtpPacketSender::Priority priority,
@@ -265,25 +301,23 @@ void PacedSender::InsertPacket(RtpPacketSender::Priority priority,
bool retransmission) {
CriticalSectionScoped cs(critsect_.get());
- if (probing_enabled_ && !prober_->IsProbing()) {
+ if (probing_enabled_ && !prober_->IsProbing())
prober_->SetEnabled(true);
- }
prober_->MaybeInitializeProbe(bitrate_bps_);
- if (capture_time_ms < 0) {
- capture_time_ms = clock_->TimeInMilliseconds();
- }
+ int64_t now_ms = clock_->TimeInMilliseconds();
+ if (capture_time_ms < 0)
+ capture_time_ms = now_ms;
- packets_->Push(paced_sender::Packet(
- priority, ssrc, sequence_number, capture_time_ms,
- clock_->TimeInMilliseconds(), bytes, retransmission, packet_counter_++));
+ packets_->Push(paced_sender::Packet(priority, ssrc, sequence_number,
+ capture_time_ms, now_ms, bytes,
+ retransmission, packet_counter_++));
}
int64_t PacedSender::ExpectedQueueTimeMs() const {
CriticalSectionScoped cs(critsect_.get());
- int target_rate = media_budget_->target_rate_kbps();
- assert(target_rate > 0);
- return static_cast<int64_t>(packets_->SizeInBytes() * 8 / target_rate);
+ RTC_DCHECK_GT(max_bitrate_kbps_, 0);
+ return static_cast<int64_t>(packets_->SizeInBytes() * 8 / max_bitrate_kbps_);
}
size_t PacedSender::QueueSizePackets() const {
@@ -294,20 +328,25 @@ size_t PacedSender::QueueSizePackets() const {
int64_t PacedSender::QueueInMs() const {
CriticalSectionScoped cs(critsect_.get());
- int64_t oldest_packet = packets_->OldestEnqueueTime();
+ int64_t oldest_packet = packets_->OldestEnqueueTimeMs();
if (oldest_packet == 0)
return 0;
return clock_->TimeInMilliseconds() - oldest_packet;
}
+int64_t PacedSender::AverageQueueTimeMs() {
+ CriticalSectionScoped cs(critsect_.get());
+ packets_->UpdateQueueTime(clock_->TimeInMilliseconds());
+ return packets_->AverageQueueTimeMs();
+}
+
int64_t PacedSender::TimeUntilNextProcess() {
CriticalSectionScoped cs(critsect_.get());
if (prober_->IsProbing()) {
int64_t ret = prober_->TimeUntilNextProbe(clock_->TimeInMilliseconds());
- if (ret >= 0) {
+ if (ret >= 0)
return ret;
- }
}
int64_t elapsed_time_us = clock_->TimeInMicroseconds() - time_last_update_us_;
int64_t elapsed_time_ms = (elapsed_time_us + 500) / 1000;
@@ -319,27 +358,42 @@ int32_t PacedSender::Process() {
CriticalSectionScoped cs(critsect_.get());
int64_t elapsed_time_ms = (now_us - time_last_update_us_ + 500) / 1000;
time_last_update_us_ = now_us;
- if (paused_)
- return 0;
- if (elapsed_time_ms > 0) {
+ int target_bitrate_kbps = max_bitrate_kbps_;
+ // TODO(holmer): Remove the !paused_ check when issue 5307 has been fixed.
+ if (!paused_ && elapsed_time_ms > 0) {
+ size_t queue_size_bytes = packets_->SizeInBytes();
+ if (queue_size_bytes > 0) {
+ // Assuming equal size packets and input/output rate, the average packet
+ // has avg_time_left_ms left to get queue_size_bytes out of the queue, if
+ // time constraint shall be met. Determine bitrate needed for that.
+ packets_->UpdateQueueTime(clock_->TimeInMilliseconds());
+ int64_t avg_time_left_ms = std::max<int64_t>(
+ 1, kMaxQueueLengthMs - packets_->AverageQueueTimeMs());
+ int min_bitrate_needed_kbps =
+ static_cast<int>(queue_size_bytes * 8 / avg_time_left_ms);
+ if (min_bitrate_needed_kbps > target_bitrate_kbps)
+ target_bitrate_kbps = min_bitrate_needed_kbps;
+ }
+
+ media_budget_->set_target_rate_kbps(target_bitrate_kbps);
+
int64_t delta_time_ms = std::min(kMaxIntervalTimeMs, elapsed_time_ms);
UpdateBytesPerInterval(delta_time_ms);
}
while (!packets_->Empty()) {
- if (media_budget_->bytes_remaining() == 0 && !prober_->IsProbing()) {
+ if (media_budget_->bytes_remaining() == 0 && !prober_->IsProbing())
return 0;
- }
// Since we need to release the lock in order to send, we first pop the
// element from the priority queue but keep it in storage, so that we can
// reinsert it if send fails.
const paced_sender::Packet& packet = packets_->BeginPop();
+
if (SendPacket(packet)) {
// Send succeeded, remove it from the queue.
packets_->FinalizePop(packet);
- if (prober_->IsProbing()) {
+ if (prober_->IsProbing())
return 0;
- }
} else {
// Send failed, put it back into the queue.
packets_->CancelPop(packet);
@@ -347,14 +401,16 @@ int32_t PacedSender::Process() {
}
}
- if (!packets_->Empty())
+ // TODO(holmer): Remove the paused_ check when issue 5307 has been fixed.
+ if (paused_ || !packets_->Empty())
return 0;
size_t padding_needed;
- if (prober_->IsProbing())
+ if (prober_->IsProbing()) {
padding_needed = prober_->RecommendedPacketSize();
- else
+ } else {
padding_needed = padding_budget_->bytes_remaining();
+ }
if (padding_needed > 0)
SendPadding(static_cast<size_t>(padding_needed));
@@ -362,6 +418,11 @@ int32_t PacedSender::Process() {
}
bool PacedSender::SendPacket(const paced_sender::Packet& packet) {
+ // TODO(holmer): Because of this bug issue 5307 we have to send audio
+ // packets even when the pacer is paused. Here we assume audio packets are
+ // always high priority and that they are the only high priority packets.
+ if (paused_ && packet.priority != kHighPriority)
+ return false;
critsect_->Leave();
const bool success = callback_->TimeToSendPacket(packet.ssrc,
packet.sequence_number,
@@ -369,7 +430,9 @@ bool PacedSender::SendPacket(const paced_sender::Packet& packet) {
packet.retransmission);
critsect_->Enter();
- if (success) {
+ // TODO(holmer): High priority packets should only be accounted for if we are
+ // allocating bandwidth for audio.
+ if (success && packet.priority != kHighPriority) {
// Update media bytes sent.
prober_->PacketSent(clock_->TimeInMilliseconds(), packet.bytes);
media_budget_->UseBudget(packet.bytes);
diff --git a/webrtc/modules/pacing/include/paced_sender.h b/webrtc/modules/pacing/paced_sender.h
index f142f55173..62e794fdbc 100644
--- a/webrtc/modules/pacing/include/paced_sender.h
+++ b/webrtc/modules/pacing/paced_sender.h
@@ -8,16 +8,16 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#ifndef WEBRTC_MODULES_PACING_INCLUDE_PACED_SENDER_H_
-#define WEBRTC_MODULES_PACING_INCLUDE_PACED_SENDER_H_
+#ifndef WEBRTC_MODULES_PACING_PACED_SENDER_H_
+#define WEBRTC_MODULES_PACING_PACED_SENDER_H_
#include <list>
#include <set>
#include "webrtc/base/scoped_ptr.h"
#include "webrtc/base/thread_annotations.h"
-#include "webrtc/modules/interface/module.h"
-#include "webrtc/modules/rtp_rtcp/interface/rtp_rtcp_defines.h"
+#include "webrtc/modules/include/module.h"
+#include "webrtc/modules/rtp_rtcp/include/rtp_rtcp_defines.h"
#include "webrtc/typedefs.h"
namespace webrtc {
@@ -51,7 +51,11 @@ class PacedSender : public Module, public RtpPacketSender {
virtual ~Callback() {}
};
- static const int64_t kDefaultMaxQueueLengthMs = 2000;
+ // Expected max pacer delay in ms. If ExpectedQueueTimeMs() is higher than
+ // this value, the packet producers should wait (eg drop frames rather than
+ // encoding them). Bitrate sent may temporarily exceed target set by
+ // UpdateBitrate() so that this limit will be upheld.
+ static const int64_t kMaxQueueLengthMs;
// Pace in kbits/s until we receive first estimate.
static const int kDefaultInitialPaceKbps = 2000;
// Pacing-rate relative to our target send rate.
@@ -109,6 +113,10 @@ class PacedSender : public Module, public RtpPacketSender {
// packets in the queue, given the current size and bitrate, ignoring prio.
virtual int64_t ExpectedQueueTimeMs() const;
+ // Returns the average time since being enqueued, in milliseconds, for all
+ // packets currently in the pacer queue, or 0 if queue is empty.
+ virtual int64_t AverageQueueTimeMs();
+
// Returns the number of milliseconds until the module want a worker thread
// to call Process.
int64_t TimeUntilNextProcess() override;
@@ -142,7 +150,10 @@ class PacedSender : public Module, public RtpPacketSender {
GUARDED_BY(critsect_);
rtc::scoped_ptr<BitrateProber> prober_ GUARDED_BY(critsect_);
+ // Actual configured bitrates (media_budget_ may temporarily be higher in
+ // order to meet pace time constraint).
int bitrate_bps_ GUARDED_BY(critsect_);
+ int max_bitrate_kbps_ GUARDED_BY(critsect_);
int64_t time_last_update_us_ GUARDED_BY(critsect_);
@@ -150,4 +161,4 @@ class PacedSender : public Module, public RtpPacketSender {
uint64_t packet_counter_;
};
} // namespace webrtc
-#endif // WEBRTC_MODULES_PACING_INCLUDE_PACED_SENDER_H_
+#endif // WEBRTC_MODULES_PACING_PACED_SENDER_H_
diff --git a/webrtc/modules/pacing/paced_sender_unittest.cc b/webrtc/modules/pacing/paced_sender_unittest.cc
index c27444c5ac..588bf3b669 100644
--- a/webrtc/modules/pacing/paced_sender_unittest.cc
+++ b/webrtc/modules/pacing/paced_sender_unittest.cc
@@ -12,7 +12,7 @@
#include "testing/gmock/include/gmock/gmock.h"
#include "testing/gtest/include/gtest/gtest.h"
-#include "webrtc/modules/pacing/include/paced_sender.h"
+#include "webrtc/modules/pacing/paced_sender.h"
#include "webrtc/system_wrappers/include/clock.h"
using testing::_;
@@ -471,13 +471,15 @@ TEST_F(PacedSenderTest, Priority) {
sequence_number++, capture_time_ms, 250, false);
send_bucket_->InsertPacket(PacedSender::kNormalPriority, ssrc,
sequence_number++, capture_time_ms, 250, false);
+ send_bucket_->InsertPacket(PacedSender::kNormalPriority, ssrc,
+ sequence_number++, capture_time_ms, 250, false);
send_bucket_->InsertPacket(PacedSender::kHighPriority, ssrc,
sequence_number++, capture_time_ms, 250, false);
// Expect all high and normal priority to be sent out first.
EXPECT_CALL(callback_, TimeToSendPadding(_)).Times(0);
EXPECT_CALL(callback_, TimeToSendPacket(ssrc, _, capture_time_ms, false))
- .Times(3)
+ .Times(4)
.WillRepeatedly(Return(true));
EXPECT_EQ(5, send_bucket_->TimeUntilNextProcess());
@@ -497,6 +499,37 @@ TEST_F(PacedSenderTest, Priority) {
EXPECT_EQ(0, send_bucket_->Process());
}
+TEST_F(PacedSenderTest, HighPrioDoesntAffectBudget) {
+ uint32_t ssrc = 12346;
+ uint16_t sequence_number = 1234;
+ int64_t capture_time_ms = 56789;
+
+ // As high prio packets doesn't affect the budget, we should be able to send
+ // a high number of them at once.
+ for (int i = 0; i < 25; ++i) {
+ SendAndExpectPacket(PacedSender::kHighPriority, ssrc, sequence_number++,
+ capture_time_ms, 250, false);
+ }
+ send_bucket_->Process();
+ // Low prio packets does affect the budget, so we should only be able to send
+ // 3 at once, the 4th should be queued.
+ for (int i = 0; i < 3; ++i) {
+ SendAndExpectPacket(PacedSender::kLowPriority, ssrc, sequence_number++,
+ capture_time_ms, 250, false);
+ }
+ send_bucket_->InsertPacket(PacedSender::kLowPriority, ssrc, sequence_number,
+ capture_time_ms, 250, false);
+ EXPECT_EQ(5, send_bucket_->TimeUntilNextProcess());
+ clock_.AdvanceTimeMilliseconds(5);
+ send_bucket_->Process();
+ EXPECT_CALL(callback_,
+ TimeToSendPacket(ssrc, sequence_number++, capture_time_ms, false))
+ .Times(1);
+ EXPECT_EQ(5, send_bucket_->TimeUntilNextProcess());
+ clock_.AdvanceTimeMilliseconds(5);
+ send_bucket_->Process();
+}
+
TEST_F(PacedSenderTest, Pause) {
uint32_t ssrc_low_priority = 12345;
uint32_t ssrc = 12346;
@@ -560,20 +593,16 @@ TEST_F(PacedSenderTest, Pause) {
EXPECT_CALL(callback_, TimeToSendPacket(_, _, capture_time_ms, false))
.Times(3)
.WillRepeatedly(Return(true));
- send_bucket_->Resume();
-
- EXPECT_EQ(5, send_bucket_->TimeUntilNextProcess());
- clock_.AdvanceTimeMilliseconds(5);
- EXPECT_EQ(0, send_bucket_->TimeUntilNextProcess());
- EXPECT_EQ(0, send_bucket_->Process());
-
EXPECT_CALL(callback_, TimeToSendPacket(_, _, second_capture_time_ms, false))
.Times(1)
.WillRepeatedly(Return(true));
+ send_bucket_->Resume();
+
EXPECT_EQ(5, send_bucket_->TimeUntilNextProcess());
clock_.AdvanceTimeMilliseconds(5);
EXPECT_EQ(0, send_bucket_->TimeUntilNextProcess());
EXPECT_EQ(0, send_bucket_->Process());
+
EXPECT_EQ(0, send_bucket_->QueueInMs());
}
@@ -664,10 +693,9 @@ TEST_F(PacedSenderTest, ExpectedQueueTimeMs) {
EXPECT_EQ(0, send_bucket_->ExpectedQueueTimeMs());
- // Allow for aliasing, duration should be in [expected(n - 1), expected(n)].
- EXPECT_LE(duration, queue_in_ms);
- EXPECT_GE(duration,
- queue_in_ms - static_cast<int64_t>(kPacketSize * 8 / kMaxBitrate));
+ // Allow for aliasing, duration should be within one pack of max time limit.
+ EXPECT_NEAR(duration, PacedSender::kMaxQueueLengthMs,
+ static_cast<int64_t>(kPacketSize * 8 / kMaxBitrate));
}
TEST_F(PacedSenderTest, QueueTimeGrowsOverTime) {
@@ -830,5 +858,51 @@ TEST_F(PacedSenderTest, PaddingOveruse) {
send_bucket_->Process();
}
+TEST_F(PacedSenderTest, AverageQueueTime) {
+ uint32_t ssrc = 12346;
+ uint16_t sequence_number = 1234;
+ const size_t kPacketSize = 1200;
+ const int kBitrateBps = 10 * kPacketSize * 8; // 10 packets per second.
+ const int kBitrateKbps = (kBitrateBps + 500) / 1000;
+
+ send_bucket_->UpdateBitrate(kBitrateKbps, kBitrateKbps, kBitrateKbps);
+
+ EXPECT_EQ(0, send_bucket_->AverageQueueTimeMs());
+
+ int64_t first_capture_time = clock_.TimeInMilliseconds();
+ send_bucket_->InsertPacket(PacedSender::kNormalPriority, ssrc,
+ sequence_number, first_capture_time, kPacketSize,
+ false);
+ clock_.AdvanceTimeMilliseconds(10);
+ send_bucket_->InsertPacket(PacedSender::kNormalPriority, ssrc,
+ sequence_number + 1, clock_.TimeInMilliseconds(),
+ kPacketSize, false);
+ clock_.AdvanceTimeMilliseconds(10);
+
+ EXPECT_EQ((20 + 10) / 2, send_bucket_->AverageQueueTimeMs());
+
+ // Only first packet (queued for 20ms) should be removed, leave the second
+ // packet (queued for 10ms) alone in the queue.
+ EXPECT_CALL(callback_, TimeToSendPacket(ssrc, sequence_number,
+ first_capture_time, false))
+ .Times(1)
+ .WillRepeatedly(Return(true));
+ send_bucket_->Process();
+
+ EXPECT_EQ(10, send_bucket_->AverageQueueTimeMs());
+
+ clock_.AdvanceTimeMilliseconds(10);
+ EXPECT_CALL(callback_, TimeToSendPacket(ssrc, sequence_number + 1,
+ first_capture_time + 10, false))
+ .Times(1)
+ .WillRepeatedly(Return(true));
+ for (int i = 0; i < 3; ++i) {
+ clock_.AdvanceTimeMilliseconds(30); // Max delta.
+ send_bucket_->Process();
+ }
+
+ EXPECT_EQ(0, send_bucket_->AverageQueueTimeMs());
+}
+
} // namespace test
} // namespace webrtc
diff --git a/webrtc/modules/pacing/pacing.gypi b/webrtc/modules/pacing/pacing.gypi
index faa97841c1..90f663c1b0 100644
--- a/webrtc/modules/pacing/pacing.gypi
+++ b/webrtc/modules/pacing/pacing.gypi
@@ -17,12 +17,12 @@
'<(webrtc_root)/modules/modules.gyp:rtp_rtcp',
],
'sources': [
- 'include/paced_sender.h',
- 'include/packet_router.h',
'bitrate_prober.cc',
'bitrate_prober.h',
'paced_sender.cc',
+ 'paced_sender.h',
'packet_router.cc',
+ 'packet_router.h',
],
},
], # targets
diff --git a/webrtc/modules/pacing/packet_router.cc b/webrtc/modules/pacing/packet_router.cc
index 563773b41f..5fd350834a 100644
--- a/webrtc/modules/pacing/packet_router.cc
+++ b/webrtc/modules/pacing/packet_router.cc
@@ -8,12 +8,12 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#include "webrtc/modules/pacing/include/packet_router.h"
+#include "webrtc/modules/pacing/packet_router.h"
#include "webrtc/base/atomicops.h"
#include "webrtc/base/checks.h"
-#include "webrtc/modules/rtp_rtcp/interface/rtp_rtcp.h"
-#include "webrtc/modules/rtp_rtcp/interface/rtp_rtcp_defines.h"
+#include "webrtc/modules/rtp_rtcp/include/rtp_rtcp.h"
+#include "webrtc/modules/rtp_rtcp/include/rtp_rtcp_defines.h"
#include "webrtc/modules/rtp_rtcp/source/rtcp_packet/transport_feedback.h"
namespace webrtc {
diff --git a/webrtc/modules/pacing/include/packet_router.h b/webrtc/modules/pacing/packet_router.h
index 9d461d13a9..edef1aa9b3 100644
--- a/webrtc/modules/pacing/include/packet_router.h
+++ b/webrtc/modules/pacing/packet_router.h
@@ -8,8 +8,8 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#ifndef WEBRTC_MODULES_PACING_INCLUDE_PACKET_ROUTER_H_
-#define WEBRTC_MODULES_PACING_INCLUDE_PACKET_ROUTER_H_
+#ifndef WEBRTC_MODULES_PACING_PACKET_ROUTER_H_
+#define WEBRTC_MODULES_PACING_PACKET_ROUTER_H_
#include <list>
@@ -18,8 +18,8 @@
#include "webrtc/base/scoped_ptr.h"
#include "webrtc/base/thread_annotations.h"
#include "webrtc/common_types.h"
-#include "webrtc/modules/pacing/include/paced_sender.h"
-#include "webrtc/modules/rtp_rtcp/interface/rtp_rtcp_defines.h"
+#include "webrtc/modules/pacing/paced_sender.h"
+#include "webrtc/modules/rtp_rtcp/include/rtp_rtcp_defines.h"
namespace webrtc {
@@ -63,4 +63,4 @@ class PacketRouter : public PacedSender::Callback,
RTC_DISALLOW_COPY_AND_ASSIGN(PacketRouter);
};
} // namespace webrtc
-#endif // WEBRTC_MODULES_PACING_INCLUDE_PACKET_ROUTER_H_
+#endif // WEBRTC_MODULES_PACING_PACKET_ROUTER_H_
diff --git a/webrtc/modules/pacing/packet_router_unittest.cc b/webrtc/modules/pacing/packet_router_unittest.cc
index eecb13757c..31acf44b9b 100644
--- a/webrtc/modules/pacing/packet_router_unittest.cc
+++ b/webrtc/modules/pacing/packet_router_unittest.cc
@@ -13,8 +13,8 @@
#include "webrtc/base/checks.h"
#include "testing/gmock/include/gmock/gmock.h"
#include "testing/gtest/include/gtest/gtest.h"
-#include "webrtc/modules/pacing/include/packet_router.h"
-#include "webrtc/modules/rtp_rtcp/interface/rtp_rtcp.h"
+#include "webrtc/modules/pacing/packet_router.h"
+#include "webrtc/modules/rtp_rtcp/include/rtp_rtcp.h"
#include "webrtc/modules/rtp_rtcp/mocks/mock_rtp_rtcp.h"
#include "webrtc/base/scoped_ptr.h"
diff --git a/webrtc/modules/remote_bitrate_estimator/aimd_rate_control.cc b/webrtc/modules/remote_bitrate_estimator/aimd_rate_control.cc
index 2d5573228d..4820e6295f 100644
--- a/webrtc/modules/remote_bitrate_estimator/aimd_rate_control.cc
+++ b/webrtc/modules/remote_bitrate_estimator/aimd_rate_control.cc
@@ -88,8 +88,7 @@ uint32_t AimdRateControl::LatestEstimate() const {
uint32_t AimdRateControl::UpdateBandwidthEstimate(int64_t now_ms) {
current_bitrate_bps_ = ChangeBitrate(current_bitrate_bps_,
- current_input_._incomingBitRate,
- now_ms);
+ current_input_.incoming_bitrate, now_ms);
if (now_ms - time_of_last_log_ > kLogIntervalMs) {
time_of_last_log_ = now_ms;
}
@@ -109,21 +108,21 @@ void AimdRateControl::Update(const RateControlInput* input, int64_t now_ms) {
const int64_t kInitializationTimeMs = 5000;
RTC_DCHECK_LE(kBitrateWindowMs, kInitializationTimeMs);
if (time_first_incoming_estimate_ < 0) {
- if (input->_incomingBitRate > 0) {
+ if (input->incoming_bitrate > 0) {
time_first_incoming_estimate_ = now_ms;
}
} else if (now_ms - time_first_incoming_estimate_ > kInitializationTimeMs &&
- input->_incomingBitRate > 0) {
- current_bitrate_bps_ = input->_incomingBitRate;
+ input->incoming_bitrate > 0) {
+ current_bitrate_bps_ = input->incoming_bitrate;
bitrate_is_initialized_ = true;
}
}
- if (updated_ && current_input_._bwState == kBwOverusing) {
+ if (updated_ && current_input_.bw_state == kBwOverusing) {
// Only update delay factor and incoming bit rate. We always want to react
// on an over-use.
- current_input_._noiseVar = input->_noiseVar;
- current_input_._incomingBitRate = input->_incomingBitRate;
+ current_input_.noise_var = input->noise_var;
+ current_input_.incoming_bitrate = input->incoming_bitrate;
} else {
updated_ = true;
current_input_ = *input;
@@ -145,7 +144,7 @@ uint32_t AimdRateControl::ChangeBitrate(uint32_t current_bitrate_bps,
// An over-use should always trigger us to reduce the bitrate, even though
// we have not yet established our first estimate. By acting on the over-use,
// we will end up with a valid estimate.
- if (!bitrate_is_initialized_ && current_input_._bwState != kBwOverusing)
+ if (!bitrate_is_initialized_ && current_input_.bw_state != kBwOverusing)
return current_bitrate_bps_;
updated_ = false;
ChangeState(current_input_, now_ms);
@@ -284,7 +283,7 @@ void AimdRateControl::UpdateMaxBitRateEstimate(float incoming_bitrate_kbps) {
void AimdRateControl::ChangeState(const RateControlInput& input,
int64_t now_ms) {
- switch (current_input_._bwState) {
+ switch (current_input_.bw_state) {
case kBwNormal:
if (rate_control_state_ == kRcHold) {
time_last_bitrate_change_ = now_ms;
diff --git a/webrtc/modules/remote_bitrate_estimator/aimd_rate_control.h b/webrtc/modules/remote_bitrate_estimator/aimd_rate_control.h
index bc5ca41dff..93ae2190d6 100644
--- a/webrtc/modules/remote_bitrate_estimator/aimd_rate_control.h
+++ b/webrtc/modules/remote_bitrate_estimator/aimd_rate_control.h
@@ -84,4 +84,4 @@ class AimdRateControl {
};
} // namespace webrtc
-#endif // WEBRTC_MODULES_REMOTE_BITRATE_ESTIMATOR_AIMD_RATE_CONTROL_H_
+#endif // WEBRTC_MODULES_REMOTE_BITRATE_ESTIMATOR_AIMD_RATE_CONTROL_H_
diff --git a/webrtc/modules/remote_bitrate_estimator/bwe_simulations.cc b/webrtc/modules/remote_bitrate_estimator/bwe_simulations.cc
index cb8d0db5c7..11fd64f84e 100644
--- a/webrtc/modules/remote_bitrate_estimator/bwe_simulations.cc
+++ b/webrtc/modules/remote_bitrate_estimator/bwe_simulations.cc
@@ -36,7 +36,7 @@ class BweSimulation : public BweTest,
VerboseLogging(true);
}
- test::Random random_;
+ Random random_;
private:
RTC_DISALLOW_COPY_AND_ASSIGN(BweSimulation);
@@ -74,7 +74,6 @@ TEST_P(BweSimulation, Verizon4gDownlinkTest) {
}
TEST_P(BweSimulation, Choke1000kbps500kbps1000kbpsBiDirectional) {
-
const int kFlowIds[] = {0, 1};
const size_t kNumFlows = sizeof(kFlowIds) / sizeof(kFlowIds[0]);
@@ -106,7 +105,6 @@ TEST_P(BweSimulation, Choke1000kbps500kbps1000kbpsBiDirectional) {
}
TEST_P(BweSimulation, Choke1000kbps500kbps1000kbps) {
-
AdaptiveVideoSource source(0, 30, 300, 0, 0);
VideoSender sender(&uplink_, &source, GetParam());
ChokeFilter choke(&uplink_, 0);
@@ -243,7 +241,7 @@ TEST_P(BweSimulation, PacerGoogleWifiTrace3Mbps) {
}
TEST_P(BweSimulation, SelfFairnessTest) {
- srand(Clock::GetRealTimeClock()->TimeInMicroseconds());
+ Random prng(Clock::GetRealTimeClock()->TimeInMicroseconds());
const int kAllFlowIds[] = {0, 1, 2, 3};
const size_t kNumFlows = sizeof(kAllFlowIds) / sizeof(kAllFlowIds[0]);
rtc::scoped_ptr<VideoSource> sources[kNumFlows];
@@ -252,7 +250,7 @@ TEST_P(BweSimulation, SelfFairnessTest) {
// Streams started 20 seconds apart to give them different advantage when
// competing for the bandwidth.
sources[i].reset(new AdaptiveVideoSource(kAllFlowIds[i], 30, 300, 0,
- i * (rand() % 40000)));
+ i * prng.Rand(39999)));
senders[i].reset(new VideoSender(&uplink_, sources[i].get(), GetParam()));
}
@@ -283,9 +281,9 @@ TEST_P(BweSimulation, PacedSelfFairness50msTest) {
const int64_t kAverageOffsetMs = 20 * 1000;
const int kNumRmcatFlows = 4;
int64_t offsets_ms[kNumRmcatFlows];
- offsets_ms[0] = random_.Rand(0, 2 * kAverageOffsetMs);
+ offsets_ms[0] = random_.Rand(2 * kAverageOffsetMs);
for (int i = 1; i < kNumRmcatFlows; ++i) {
- offsets_ms[i] = offsets_ms[i - 1] + random_.Rand(0, 2 * kAverageOffsetMs);
+ offsets_ms[i] = offsets_ms[i - 1] + random_.Rand(2 * kAverageOffsetMs);
}
RunFairnessTest(GetParam(), kNumRmcatFlows, 0, 1000, 3000, 50, 50, 0,
offsets_ms);
@@ -295,9 +293,9 @@ TEST_P(BweSimulation, PacedSelfFairness500msTest) {
const int64_t kAverageOffsetMs = 20 * 1000;
const int kNumRmcatFlows = 4;
int64_t offsets_ms[kNumRmcatFlows];
- offsets_ms[0] = random_.Rand(0, 2 * kAverageOffsetMs);
+ offsets_ms[0] = random_.Rand(2 * kAverageOffsetMs);
for (int i = 1; i < kNumRmcatFlows; ++i) {
- offsets_ms[i] = offsets_ms[i - 1] + random_.Rand(0, 2 * kAverageOffsetMs);
+ offsets_ms[i] = offsets_ms[i - 1] + random_.Rand(2 * kAverageOffsetMs);
}
RunFairnessTest(GetParam(), kNumRmcatFlows, 0, 1000, 3000, 500, 50, 0,
offsets_ms);
@@ -307,28 +305,28 @@ TEST_P(BweSimulation, PacedSelfFairness1000msTest) {
const int64_t kAverageOffsetMs = 20 * 1000;
const int kNumRmcatFlows = 4;
int64_t offsets_ms[kNumRmcatFlows];
- offsets_ms[0] = random_.Rand(0, 2 * kAverageOffsetMs);
+ offsets_ms[0] = random_.Rand(2 * kAverageOffsetMs);
for (int i = 1; i < kNumRmcatFlows; ++i) {
- offsets_ms[i] = offsets_ms[i - 1] + random_.Rand(0, 2 * kAverageOffsetMs);
+ offsets_ms[i] = offsets_ms[i - 1] + random_.Rand(2 * kAverageOffsetMs);
}
RunFairnessTest(GetParam(), 4, 0, 1000, 3000, 1000, 50, 0, offsets_ms);
}
TEST_P(BweSimulation, TcpFairness50msTest) {
const int64_t kAverageOffsetMs = 20 * 1000;
- int64_t offset_ms[] = {random_.Rand(0, 2 * kAverageOffsetMs), 0};
+ int64_t offset_ms[] = {random_.Rand(2 * kAverageOffsetMs), 0};
RunFairnessTest(GetParam(), 1, 1, 1000, 2000, 50, 50, 0, offset_ms);
}
TEST_P(BweSimulation, TcpFairness500msTest) {
const int64_t kAverageOffsetMs = 20 * 1000;
- int64_t offset_ms[] = {random_.Rand(0, 2 * kAverageOffsetMs), 0};
+ int64_t offset_ms[] = {random_.Rand(2 * kAverageOffsetMs), 0};
RunFairnessTest(GetParam(), 1, 1, 1000, 2000, 500, 50, 0, offset_ms);
}
TEST_P(BweSimulation, TcpFairness1000msTest) {
const int kAverageOffsetMs = 20 * 1000;
- int64_t offset_ms[] = {random_.Rand(0, 2 * kAverageOffsetMs), 0};
+ int64_t offset_ms[] = {random_.Rand(2 * kAverageOffsetMs), 0};
RunFairnessTest(GetParam(), 1, 1, 1000, 2000, 1000, 50, 0, offset_ms);
}
diff --git a/webrtc/modules/remote_bitrate_estimator/include/bwe_defines.h b/webrtc/modules/remote_bitrate_estimator/include/bwe_defines.h
index 844fde5b71..3fb7e29e5b 100644
--- a/webrtc/modules/remote_bitrate_estimator/include/bwe_defines.h
+++ b/webrtc/modules/remote_bitrate_estimator/include/bwe_defines.h
@@ -8,53 +8,40 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#ifndef WEBRTC_MODULES_RTP_RTCP_SOURCE_BWE_DEFINES_H_
-#define WEBRTC_MODULES_RTP_RTCP_SOURCE_BWE_DEFINES_H_
+#ifndef WEBRTC_MODULES_REMOTE_BITRATE_ESTIMATOR_INCLUDE_BWE_DEFINES_H_
+#define WEBRTC_MODULES_REMOTE_BITRATE_ESTIMATOR_INCLUDE_BWE_DEFINES_H_
#include "webrtc/typedefs.h"
-#define BWE_MAX(a,b) ((a)>(b)?(a):(b))
-#define BWE_MIN(a,b) ((a)<(b)?(a):(b))
+#define BWE_MAX(a, b) ((a) > (b) ? (a) : (b))
+#define BWE_MIN(a, b) ((a) < (b) ? (a) : (b))
namespace webrtc {
static const int64_t kBitrateWindowMs = 1000;
-enum BandwidthUsage
-{
- kBwNormal = 0,
- kBwUnderusing = 1,
- kBwOverusing = 2,
+enum BandwidthUsage {
+ kBwNormal = 0,
+ kBwUnderusing = 1,
+ kBwOverusing = 2,
};
-enum RateControlState
-{
- kRcHold,
- kRcIncrease,
- kRcDecrease
-};
+enum RateControlState { kRcHold, kRcIncrease, kRcDecrease };
-enum RateControlRegion
-{
- kRcNearMax,
- kRcAboveMax,
- kRcMaxUnknown
-};
+enum RateControlRegion { kRcNearMax, kRcAboveMax, kRcMaxUnknown };
+
+struct RateControlInput {
+ RateControlInput(BandwidthUsage bw_state,
+ uint32_t incoming_bitrate,
+ double noise_var)
+ : bw_state(bw_state),
+ incoming_bitrate(incoming_bitrate),
+ noise_var(noise_var) {}
-class RateControlInput
-{
-public:
- RateControlInput(BandwidthUsage bwState,
- uint32_t incomingBitRate,
- double noiseVar)
- : _bwState(bwState),
- _incomingBitRate(incomingBitRate),
- _noiseVar(noiseVar) {}
-
- BandwidthUsage _bwState;
- uint32_t _incomingBitRate;
- double _noiseVar;
+ BandwidthUsage bw_state;
+ uint32_t incoming_bitrate;
+ double noise_var;
};
} // namespace webrtc
-#endif // WEBRTC_MODULES_RTP_RTCP_SOURCE_BWE_DEFINES_H_
+#endif // WEBRTC_MODULES_REMOTE_BITRATE_ESTIMATOR_INCLUDE_BWE_DEFINES_H_
diff --git a/webrtc/modules/remote_bitrate_estimator/include/mock/mock_remote_bitrate_observer.h b/webrtc/modules/remote_bitrate_estimator/include/mock/mock_remote_bitrate_observer.h
index edfac977a2..ae05912b5f 100644
--- a/webrtc/modules/remote_bitrate_estimator/include/mock/mock_remote_bitrate_observer.h
+++ b/webrtc/modules/remote_bitrate_estimator/include/mock/mock_remote_bitrate_observer.h
@@ -8,8 +8,8 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#ifndef WEBRTC_MODULES_REMOTE_BITRATE_ESTIMATOR_INCLUDE_MOCK_MOCK_REMOTE_BITRATE_ESTIMATOR_H_
-#define WEBRTC_MODULES_REMOTE_BITRATE_ESTIMATOR_INCLUDE_MOCK_MOCK_REMOTE_BITRATE_ESTIMATOR_H_
+#ifndef WEBRTC_MODULES_REMOTE_BITRATE_ESTIMATOR_INCLUDE_MOCK_MOCK_REMOTE_BITRATE_OBSERVER_H_
+#define WEBRTC_MODULES_REMOTE_BITRATE_ESTIMATOR_INCLUDE_MOCK_MOCK_REMOTE_BITRATE_OBSERVER_H_
#include <vector>
@@ -26,4 +26,4 @@ class MockRemoteBitrateObserver : public RemoteBitrateObserver {
} // namespace webrtc
-#endif // WEBRTC_MODULES_REMOTE_BITRATE_ESTIMATOR_INCLUDE_MOCK_MOCK_REMOTE_BITRATE_ESTIMATOR_H_
+#endif // WEBRTC_MODULES_REMOTE_BITRATE_ESTIMATOR_INCLUDE_MOCK_MOCK_REMOTE_BITRATE_OBSERVER_H_
diff --git a/webrtc/modules/remote_bitrate_estimator/include/remote_bitrate_estimator.h b/webrtc/modules/remote_bitrate_estimator/include/remote_bitrate_estimator.h
index 4bd9d8c7bc..0734cbf255 100644
--- a/webrtc/modules/remote_bitrate_estimator/include/remote_bitrate_estimator.h
+++ b/webrtc/modules/remote_bitrate_estimator/include/remote_bitrate_estimator.h
@@ -17,9 +17,9 @@
#include <vector>
#include "webrtc/common_types.h"
-#include "webrtc/modules/interface/module.h"
-#include "webrtc/modules/interface/module_common_types.h"
-#include "webrtc/modules/rtp_rtcp/interface/rtp_rtcp_defines.h"
+#include "webrtc/modules/include/module.h"
+#include "webrtc/modules/include/module_common_types.h"
+#include "webrtc/modules/rtp_rtcp/include/rtp_rtcp_defines.h"
#include "webrtc/typedefs.h"
namespace webrtc {
diff --git a/webrtc/modules/remote_bitrate_estimator/include/send_time_history.h b/webrtc/modules/remote_bitrate_estimator/include/send_time_history.h
index 92d2e28132..a643c1f103 100644
--- a/webrtc/modules/remote_bitrate_estimator/include/send_time_history.h
+++ b/webrtc/modules/remote_bitrate_estimator/include/send_time_history.h
@@ -8,8 +8,8 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#ifndef WEBRTC_MODULES_BITRATE_CONTROLLER_SEND_TIME_HISTORY_H_
-#define WEBRTC_MODULES_BITRATE_CONTROLLER_SEND_TIME_HISTORY_H_
+#ifndef WEBRTC_MODULES_REMOTE_BITRATE_ESTIMATOR_INCLUDE_SEND_TIME_HISTORY_H_
+#define WEBRTC_MODULES_REMOTE_BITRATE_ESTIMATOR_INCLUDE_SEND_TIME_HISTORY_H_
#include <map>
@@ -45,4 +45,4 @@ class SendTimeHistory {
};
} // namespace webrtc
-#endif // WEBRTC_MODULES_BITRATE_CONTROLLER_SEND_TIME_HISTORY_H_
+#endif // WEBRTC_MODULES_REMOTE_BITRATE_ESTIMATOR_INCLUDE_SEND_TIME_HISTORY_H_
diff --git a/webrtc/modules/remote_bitrate_estimator/inter_arrival.cc b/webrtc/modules/remote_bitrate_estimator/inter_arrival.cc
index 3dee305bad..f75bc2b03e 100644
--- a/webrtc/modules/remote_bitrate_estimator/inter_arrival.cc
+++ b/webrtc/modules/remote_bitrate_estimator/inter_arrival.cc
@@ -14,7 +14,7 @@
#include <cassert>
#include "webrtc/base/logging.h"
-#include "webrtc/modules/interface/module_common_types.h"
+#include "webrtc/modules/include/module_common_types.h"
namespace webrtc {
@@ -71,8 +71,7 @@ bool InterArrival::ComputeDeltas(uint32_t timestamp,
current_timestamp_group_.first_timestamp = timestamp;
current_timestamp_group_.timestamp = timestamp;
current_timestamp_group_.size = 0;
- }
- else {
+ } else {
current_timestamp_group_.timestamp = LatestTimestamp(
current_timestamp_group_.timestamp, timestamp);
}
diff --git a/webrtc/modules/remote_bitrate_estimator/overuse_detector.cc b/webrtc/modules/remote_bitrate_estimator/overuse_detector.cc
index c9340892f2..0acd7c29c5 100644
--- a/webrtc/modules/remote_bitrate_estimator/overuse_detector.cc
+++ b/webrtc/modules/remote_bitrate_estimator/overuse_detector.cc
@@ -10,11 +10,13 @@
#include "webrtc/modules/remote_bitrate_estimator/overuse_detector.h"
-#include <algorithm>
-#include <sstream>
#include <math.h>
#include <stdlib.h>
+#include <algorithm>
+#include <sstream>
+#include <string>
+
#include "webrtc/base/checks.h"
#include "webrtc/base/common.h"
#include "webrtc/modules/remote_bitrate_estimator/include/bwe_defines.h"
diff --git a/webrtc/modules/remote_bitrate_estimator/overuse_detector.h b/webrtc/modules/remote_bitrate_estimator/overuse_detector.h
index bb69a8a0a1..56e9c14206 100644
--- a/webrtc/modules/remote_bitrate_estimator/overuse_detector.h
+++ b/webrtc/modules/remote_bitrate_estimator/overuse_detector.h
@@ -13,7 +13,7 @@
#include <list>
#include "webrtc/base/constructormagic.h"
-#include "webrtc/modules/interface/module_common_types.h"
+#include "webrtc/modules/include/module_common_types.h"
#include "webrtc/modules/remote_bitrate_estimator/include/bwe_defines.h"
#include "webrtc/typedefs.h"
diff --git a/webrtc/modules/remote_bitrate_estimator/overuse_detector_unittest.cc b/webrtc/modules/remote_bitrate_estimator/overuse_detector_unittest.cc
index dcad04b5f6..50909ebd01 100644
--- a/webrtc/modules/remote_bitrate_estimator/overuse_detector_unittest.cc
+++ b/webrtc/modules/remote_bitrate_estimator/overuse_detector_unittest.cc
@@ -9,11 +9,14 @@
*/
#include <math.h>
+
+#include <algorithm>
#include <cmath>
#include <cstdlib>
#include "testing/gtest/include/gtest/gtest.h"
+#include "webrtc/base/random.h"
#include "webrtc/base/scoped_ptr.h"
#include "webrtc/common_types.h"
#include "webrtc/modules/remote_bitrate_estimator/inter_arrival.h"
@@ -21,8 +24,6 @@
#include "webrtc/modules/remote_bitrate_estimator/overuse_estimator.h"
#include "webrtc/modules/remote_bitrate_estimator/rate_statistics.h"
#include "webrtc/test/field_trial.h"
-#include "webrtc/test/random.h"
-#include "webrtc/test/testsupport/gtest_disable.h"
namespace webrtc {
namespace testing {
@@ -38,7 +39,7 @@ class OveruseDetectorTest : public ::testing::Test {
overuse_detector_(),
overuse_estimator_(new OveruseEstimator(options_)),
inter_arrival_(new InterArrival(5 * 90, kRtpTimestampToMs, true)),
- random_(1234) {}
+ random_(123456789) {}
protected:
void SetUp() override {
@@ -55,9 +56,10 @@ class OveruseDetectorTest : public ::testing::Test {
}
rtp_timestamp_ += mean_ms * 90;
now_ms_ += mean_ms;
- receive_time_ms_ =
- std::max(receive_time_ms_,
- now_ms_ + random_.Gaussian(0, standard_deviation_ms));
+ receive_time_ms_ = std::max<int64_t>(
+ receive_time_ms_,
+ now_ms_ + static_cast<int64_t>(
+ random_.Gaussian(0, standard_deviation_ms) + 0.5));
if (kBwOverusing == overuse_detector_->State()) {
if (last_overuse + 1 != i) {
unique_overuse++;
@@ -77,9 +79,10 @@ class OveruseDetectorTest : public ::testing::Test {
}
rtp_timestamp_ += mean_ms * 90;
now_ms_ += mean_ms + drift_per_frame_ms;
- receive_time_ms_ =
- std::max(receive_time_ms_,
- now_ms_ + random_.Gaussian(0, standard_deviation_ms));
+ receive_time_ms_ = std::max<int64_t>(
+ receive_time_ms_,
+ now_ms_ + static_cast<int64_t>(
+ random_.Gaussian(0, standard_deviation_ms) + 0.5));
if (kBwOverusing == overuse_detector_->State()) {
return i + 1;
}
@@ -114,7 +117,7 @@ class OveruseDetectorTest : public ::testing::Test {
rtc::scoped_ptr<OveruseDetector> overuse_detector_;
rtc::scoped_ptr<OveruseEstimator> overuse_estimator_;
rtc::scoped_ptr<InterArrival> inter_arrival_;
- test::Random random_;
+ Random random_;
};
TEST_F(OveruseDetectorTest, GaussianRandom) {
@@ -222,7 +225,7 @@ TEST_F(OveruseDetectorTest, DISABLED_OveruseWithHighVariance100Kbit10fps) {
UpdateDetector(rtp_timestamp, now_ms_, packet_size);
rtp_timestamp += frame_duration_ms * 90;
if (i % 2) {
- offset = rand() % 50;
+ offset = random_.Rand(0, 49);
now_ms_ += frame_duration_ms - offset;
} else {
now_ms_ += frame_duration_ms + offset;
@@ -254,7 +257,7 @@ TEST_F(OveruseDetectorTest, DISABLED_OveruseWithLowVariance100Kbit10fps) {
UpdateDetector(rtp_timestamp, now_ms_, packet_size);
rtp_timestamp += frame_duration_ms * 90;
if (i % 2) {
- offset = rand() % 2;
+ offset = random_.Rand(0, 1);
now_ms_ += frame_duration_ms - offset;
} else {
now_ms_ += frame_duration_ms + offset;
@@ -290,7 +293,7 @@ TEST_F(OveruseDetectorTest, OveruseWithLowVariance2000Kbit30fps) {
UpdateDetector(rtp_timestamp, now_ms_, packet_size);
rtp_timestamp += frame_duration_ms * 90;
if (i % 2) {
- offset = rand() % 2;
+ offset = random_.Rand(0, 1);
now_ms_ += frame_duration_ms - offset;
} else {
now_ms_ += frame_duration_ms + offset;
@@ -314,8 +317,13 @@ TEST_F(OveruseDetectorTest, OveruseWithLowVariance2000Kbit30fps) {
EXPECT_EQ(kBwOverusing, overuse_detector_->State());
}
-TEST_F(OveruseDetectorTest,
- DISABLED_ON_ANDROID(LowGaussianVariance30Kbit3fps)) {
+#if defined(WEBRTC_ANDROID)
+#define MAYBE_LowGaussianVariance30Kbit3fps \
+ DISABLED_LowGaussianVariance30Kbit3fps
+#else
+#define MAYBE_LowGaussianVariance30Kbit3fps LowGaussianVariance30Kbit3fps
+#endif
+TEST_F(OveruseDetectorTest, MAYBE_LowGaussianVariance30Kbit3fps) {
size_t packet_size = 1200;
int packets_per_frame = 1;
int frame_duration_ms = 333;
@@ -323,10 +331,10 @@ TEST_F(OveruseDetectorTest,
int sigma_ms = 3;
int unique_overuse = Run100000Samples(packets_per_frame, packet_size,
frame_duration_ms, sigma_ms);
- EXPECT_EQ(13, unique_overuse);
+ EXPECT_EQ(1, unique_overuse);
int frames_until_overuse = RunUntilOveruse(packets_per_frame, packet_size,
frame_duration_ms, sigma_ms, drift_per_frame_ms);
- EXPECT_EQ(14, frames_until_overuse);
+ EXPECT_EQ(13, frames_until_overuse);
}
TEST_F(OveruseDetectorTest, LowGaussianVarianceFastDrift30Kbit3fps) {
@@ -337,7 +345,7 @@ TEST_F(OveruseDetectorTest, LowGaussianVarianceFastDrift30Kbit3fps) {
int sigma_ms = 3;
int unique_overuse = Run100000Samples(packets_per_frame, packet_size,
frame_duration_ms, sigma_ms);
- EXPECT_EQ(13, unique_overuse);
+ EXPECT_EQ(1, unique_overuse);
int frames_until_overuse = RunUntilOveruse(packets_per_frame, packet_size,
frame_duration_ms, sigma_ms, drift_per_frame_ms);
EXPECT_EQ(4, frames_until_overuse);
@@ -351,10 +359,10 @@ TEST_F(OveruseDetectorTest, HighGaussianVariance30Kbit3fps) {
int sigma_ms = 10;
int unique_overuse = Run100000Samples(packets_per_frame, packet_size,
frame_duration_ms, sigma_ms);
- EXPECT_EQ(46, unique_overuse);
+ EXPECT_EQ(1, unique_overuse);
int frames_until_overuse = RunUntilOveruse(packets_per_frame, packet_size,
frame_duration_ms, sigma_ms, drift_per_frame_ms);
- EXPECT_EQ(42, frames_until_overuse);
+ EXPECT_EQ(32, frames_until_overuse);
}
TEST_F(OveruseDetectorTest, HighGaussianVarianceFastDrift30Kbit3fps) {
@@ -365,14 +373,19 @@ TEST_F(OveruseDetectorTest, HighGaussianVarianceFastDrift30Kbit3fps) {
int sigma_ms = 10;
int unique_overuse = Run100000Samples(packets_per_frame, packet_size,
frame_duration_ms, sigma_ms);
- EXPECT_EQ(46, unique_overuse);
+ EXPECT_EQ(1, unique_overuse);
int frames_until_overuse = RunUntilOveruse(packets_per_frame, packet_size,
frame_duration_ms, sigma_ms, drift_per_frame_ms);
EXPECT_EQ(4, frames_until_overuse);
}
-TEST_F(OveruseDetectorTest,
- DISABLED_ON_ANDROID(LowGaussianVariance100Kbit5fps)) {
+#if defined(WEBRTC_ANDROID)
+#define MAYBE_LowGaussianVariance100Kbit5fps \
+ DISABLED_LowGaussianVariance100Kbit5fps
+#else
+#define MAYBE_LowGaussianVariance100Kbit5fps LowGaussianVariance100Kbit5fps
+#endif
+TEST_F(OveruseDetectorTest, MAYBE_LowGaussianVariance100Kbit5fps) {
size_t packet_size = 1200;
int packets_per_frame = 2;
int frame_duration_ms = 200;
@@ -380,14 +393,19 @@ TEST_F(OveruseDetectorTest,
int sigma_ms = 3;
int unique_overuse = Run100000Samples(packets_per_frame, packet_size,
frame_duration_ms, sigma_ms);
- EXPECT_EQ(12, unique_overuse);
+ EXPECT_EQ(0, unique_overuse);
int frames_until_overuse = RunUntilOveruse(packets_per_frame, packet_size,
frame_duration_ms, sigma_ms, drift_per_frame_ms);
- EXPECT_EQ(12, frames_until_overuse);
+ EXPECT_EQ(13, frames_until_overuse);
}
-TEST_F(OveruseDetectorTest,
- DISABLED_ON_ANDROID(HighGaussianVariance100Kbit5fps)) {
+#if defined(WEBRTC_ANDROID)
+#define MAYBE_HighGaussianVariance100Kbit5fps \
+ DISABLED_HighGaussianVariance100Kbit5fps
+#else
+#define MAYBE_HighGaussianVariance100Kbit5fps HighGaussianVariance100Kbit5fps
+#endif
+TEST_F(OveruseDetectorTest, MAYBE_HighGaussianVariance100Kbit5fps) {
size_t packet_size = 1200;
int packets_per_frame = 2;
int frame_duration_ms = 200;
@@ -395,14 +413,19 @@ TEST_F(OveruseDetectorTest,
int sigma_ms = 10;
int unique_overuse = Run100000Samples(packets_per_frame, packet_size,
frame_duration_ms, sigma_ms);
- EXPECT_EQ(16, unique_overuse);
+ EXPECT_EQ(1, unique_overuse);
int frames_until_overuse = RunUntilOveruse(packets_per_frame, packet_size,
frame_duration_ms, sigma_ms, drift_per_frame_ms);
- EXPECT_EQ(37, frames_until_overuse);
+ EXPECT_EQ(32, frames_until_overuse);
}
-TEST_F(OveruseDetectorTest,
- DISABLED_ON_ANDROID(LowGaussianVariance100Kbit10fps)) {
+#if defined(WEBRTC_ANDROID)
+#define MAYBE_LowGaussianVariance100Kbit10fps \
+ DISABLED_LowGaussianVariance100Kbit10fps
+#else
+#define MAYBE_LowGaussianVariance100Kbit10fps LowGaussianVariance100Kbit10fps
+#endif
+TEST_F(OveruseDetectorTest, MAYBE_LowGaussianVariance100Kbit10fps) {
size_t packet_size = 1200;
int packets_per_frame = 1;
int frame_duration_ms = 100;
@@ -410,14 +433,19 @@ TEST_F(OveruseDetectorTest,
int sigma_ms = 3;
int unique_overuse = Run100000Samples(packets_per_frame, packet_size,
frame_duration_ms, sigma_ms);
- EXPECT_EQ(12, unique_overuse);
+ EXPECT_EQ(1, unique_overuse);
int frames_until_overuse = RunUntilOveruse(packets_per_frame, packet_size,
frame_duration_ms, sigma_ms, drift_per_frame_ms);
- EXPECT_EQ(12, frames_until_overuse);
+ EXPECT_EQ(13, frames_until_overuse);
}
-TEST_F(OveruseDetectorTest,
- DISABLED_ON_ANDROID(HighGaussianVariance100Kbit10fps)) {
+#if defined(WEBRTC_ANDROID)
+#define MAYBE_HighGaussianVariance100Kbit10fps \
+ DISABLED_HighGaussianVariance100Kbit10fps
+#else
+#define MAYBE_HighGaussianVariance100Kbit10fps HighGaussianVariance100Kbit10fps
+#endif
+TEST_F(OveruseDetectorTest, MAYBE_HighGaussianVariance100Kbit10fps) {
size_t packet_size = 1200;
int packets_per_frame = 1;
int frame_duration_ms = 100;
@@ -425,14 +453,19 @@ TEST_F(OveruseDetectorTest,
int sigma_ms = 10;
int unique_overuse = Run100000Samples(packets_per_frame, packet_size,
frame_duration_ms, sigma_ms);
- EXPECT_EQ(12, unique_overuse);
+ EXPECT_EQ(0, unique_overuse);
int frames_until_overuse = RunUntilOveruse(packets_per_frame, packet_size,
frame_duration_ms, sigma_ms, drift_per_frame_ms);
- EXPECT_EQ(37, frames_until_overuse);
+ EXPECT_EQ(32, frames_until_overuse);
}
-TEST_F(OveruseDetectorTest,
- DISABLED_ON_ANDROID(LowGaussianVariance300Kbit30fps)) {
+#if defined(WEBRTC_ANDROID)
+#define MAYBE_LowGaussianVariance300Kbit30fps \
+ DISABLED_LowGaussianVariance300Kbit30fps
+#else
+#define MAYBE_LowGaussianVariance300Kbit30fps LowGaussianVariance300Kbit30fps
+#endif
+TEST_F(OveruseDetectorTest, MAYBE_LowGaussianVariance300Kbit30fps) {
size_t packet_size = 1200;
int packets_per_frame = 1;
int frame_duration_ms = 33;
@@ -443,7 +476,7 @@ TEST_F(OveruseDetectorTest,
EXPECT_EQ(0, unique_overuse);
int frames_until_overuse = RunUntilOveruse(packets_per_frame, packet_size,
frame_duration_ms, sigma_ms, drift_per_frame_ms);
- EXPECT_EQ(14, frames_until_overuse);
+ EXPECT_EQ(15, frames_until_overuse);
}
TEST_F(OveruseDetectorTest, LowGaussianVarianceFastDrift300Kbit30fps) {
@@ -471,7 +504,7 @@ TEST_F(OveruseDetectorTest, HighGaussianVariance300Kbit30fps) {
EXPECT_EQ(0, unique_overuse);
int frames_until_overuse = RunUntilOveruse(packets_per_frame, packet_size,
frame_duration_ms, sigma_ms, drift_per_frame_ms);
- EXPECT_EQ(49, frames_until_overuse);
+ EXPECT_EQ(41, frames_until_overuse);
}
TEST_F(OveruseDetectorTest, HighGaussianVarianceFastDrift300Kbit30fps) {
@@ -485,11 +518,16 @@ TEST_F(OveruseDetectorTest, HighGaussianVarianceFastDrift300Kbit30fps) {
EXPECT_EQ(0, unique_overuse);
int frames_until_overuse = RunUntilOveruse(packets_per_frame, packet_size,
frame_duration_ms, sigma_ms, drift_per_frame_ms);
- EXPECT_EQ(8, frames_until_overuse);
+ EXPECT_EQ(10, frames_until_overuse);
}
-TEST_F(OveruseDetectorTest,
- DISABLED_ON_ANDROID(LowGaussianVariance1000Kbit30fps)) {
+#if defined(WEBRTC_ANDROID)
+#define MAYBE_LowGaussianVariance1000Kbit30fps \
+ DISABLED_LowGaussianVariance1000Kbit30fps
+#else
+#define MAYBE_LowGaussianVariance1000Kbit30fps LowGaussianVariance1000Kbit30fps
+#endif
+TEST_F(OveruseDetectorTest, MAYBE_LowGaussianVariance1000Kbit30fps) {
size_t packet_size = 1200;
int packets_per_frame = 3;
int frame_duration_ms = 33;
@@ -500,7 +538,7 @@ TEST_F(OveruseDetectorTest,
EXPECT_EQ(0, unique_overuse);
int frames_until_overuse = RunUntilOveruse(packets_per_frame, packet_size,
frame_duration_ms, sigma_ms, drift_per_frame_ms);
- EXPECT_EQ(14, frames_until_overuse);
+ EXPECT_EQ(15, frames_until_overuse);
}
TEST_F(OveruseDetectorTest, LowGaussianVarianceFastDrift1000Kbit30fps) {
@@ -528,7 +566,7 @@ TEST_F(OveruseDetectorTest, HighGaussianVariance1000Kbit30fps) {
EXPECT_EQ(0, unique_overuse);
int frames_until_overuse = RunUntilOveruse(packets_per_frame, packet_size,
frame_duration_ms, sigma_ms, drift_per_frame_ms);
- EXPECT_EQ(49, frames_until_overuse);
+ EXPECT_EQ(41, frames_until_overuse);
}
TEST_F(OveruseDetectorTest, HighGaussianVarianceFastDrift1000Kbit30fps) {
@@ -542,11 +580,16 @@ TEST_F(OveruseDetectorTest, HighGaussianVarianceFastDrift1000Kbit30fps) {
EXPECT_EQ(0, unique_overuse);
int frames_until_overuse = RunUntilOveruse(packets_per_frame, packet_size,
frame_duration_ms, sigma_ms, drift_per_frame_ms);
- EXPECT_EQ(8, frames_until_overuse);
+ EXPECT_EQ(10, frames_until_overuse);
}
-TEST_F(OveruseDetectorTest,
- DISABLED_ON_ANDROID(LowGaussianVariance2000Kbit30fps)) {
+#if defined(WEBRTC_ANDROID)
+#define MAYBE_LowGaussianVariance2000Kbit30fps \
+ DISABLED_LowGaussianVariance2000Kbit30fps
+#else
+#define MAYBE_LowGaussianVariance2000Kbit30fps LowGaussianVariance2000Kbit30fps
+#endif
+TEST_F(OveruseDetectorTest, MAYBE_LowGaussianVariance2000Kbit30fps) {
size_t packet_size = 1200;
int packets_per_frame = 6;
int frame_duration_ms = 33;
@@ -557,7 +600,7 @@ TEST_F(OveruseDetectorTest,
EXPECT_EQ(0, unique_overuse);
int frames_until_overuse = RunUntilOveruse(packets_per_frame, packet_size,
frame_duration_ms, sigma_ms, drift_per_frame_ms);
- EXPECT_EQ(14, frames_until_overuse);
+ EXPECT_EQ(15, frames_until_overuse);
}
TEST_F(OveruseDetectorTest, LowGaussianVarianceFastDrift2000Kbit30fps) {
@@ -585,7 +628,7 @@ TEST_F(OveruseDetectorTest, HighGaussianVariance2000Kbit30fps) {
EXPECT_EQ(0, unique_overuse);
int frames_until_overuse = RunUntilOveruse(packets_per_frame, packet_size,
frame_duration_ms, sigma_ms, drift_per_frame_ms);
- EXPECT_EQ(49, frames_until_overuse);
+ EXPECT_EQ(41, frames_until_overuse);
}
TEST_F(OveruseDetectorTest, HighGaussianVarianceFastDrift2000Kbit30fps) {
@@ -599,7 +642,7 @@ TEST_F(OveruseDetectorTest, HighGaussianVarianceFastDrift2000Kbit30fps) {
EXPECT_EQ(0, unique_overuse);
int frames_until_overuse = RunUntilOveruse(packets_per_frame, packet_size,
frame_duration_ms, sigma_ms, drift_per_frame_ms);
- EXPECT_EQ(8, frames_until_overuse);
+ EXPECT_EQ(10, frames_until_overuse);
}
class OveruseDetectorExperimentTest : public OveruseDetectorTest {
diff --git a/webrtc/modules/remote_bitrate_estimator/overuse_estimator.cc b/webrtc/modules/remote_bitrate_estimator/overuse_estimator.cc
index 4be7b7493b..83917912e8 100644
--- a/webrtc/modules/remote_bitrate_estimator/overuse_estimator.cc
+++ b/webrtc/modules/remote_bitrate_estimator/overuse_estimator.cc
@@ -10,14 +10,15 @@
#include "webrtc/modules/remote_bitrate_estimator/overuse_estimator.h"
-#include <algorithm>
#include <assert.h>
#include <math.h>
#include <stdlib.h>
#include <string.h>
+#include <algorithm>
+
+#include "webrtc/base/logging.h"
#include "webrtc/modules/remote_bitrate_estimator/include/bwe_defines.h"
-#include "webrtc/system_wrappers/include/logging.h"
namespace webrtc {
diff --git a/webrtc/modules/remote_bitrate_estimator/remote_bitrate_estimator_abs_send_time.cc b/webrtc/modules/remote_bitrate_estimator/remote_bitrate_estimator_abs_send_time.cc
index 2dc32a7ee7..97e5cd32e5 100644
--- a/webrtc/modules/remote_bitrate_estimator/remote_bitrate_estimator_abs_send_time.cc
+++ b/webrtc/modules/remote_bitrate_estimator/remote_bitrate_estimator_abs_send_time.cc
@@ -12,14 +12,16 @@
#include <math.h>
+#include <algorithm>
+
#include "webrtc/base/constructormagic.h"
+#include "webrtc/base/logging.h"
#include "webrtc/base/scoped_ptr.h"
#include "webrtc/base/thread_annotations.h"
+#include "webrtc/modules/pacing/paced_sender.h"
#include "webrtc/modules/remote_bitrate_estimator/include/remote_bitrate_estimator.h"
-#include "webrtc/modules/pacing/include/paced_sender.h"
#include "webrtc/system_wrappers/include/clock.h"
#include "webrtc/system_wrappers/include/critical_section_wrapper.h"
-#include "webrtc/system_wrappers/include/logging.h"
#include "webrtc/typedefs.h"
namespace webrtc {
@@ -241,6 +243,7 @@ void RemoteBitrateEstimatorAbsSendTime::IncomingPacket(int64_t arrival_time_ms,
if (!header.extension.hasAbsoluteSendTime) {
LOG(LS_WARNING) << "RemoteBitrateEstimatorAbsSendTimeImpl: Incoming packet "
"is missing absolute send time extension!";
+ return;
}
IncomingPacketInfo(arrival_time_ms, header.extension.absoluteSendTime,
payload_size, header.ssrc, was_paced);
diff --git a/webrtc/modules/remote_bitrate_estimator/remote_bitrate_estimator_abs_send_time_unittest.cc b/webrtc/modules/remote_bitrate_estimator/remote_bitrate_estimator_abs_send_time_unittest.cc
index 195c95aacb..908daf6c31 100644
--- a/webrtc/modules/remote_bitrate_estimator/remote_bitrate_estimator_abs_send_time_unittest.cc
+++ b/webrtc/modules/remote_bitrate_estimator/remote_bitrate_estimator_abs_send_time_unittest.cc
@@ -17,7 +17,6 @@ namespace webrtc {
class RemoteBitrateEstimatorAbsSendTimeTest :
public RemoteBitrateEstimatorTest {
public:
-
RemoteBitrateEstimatorAbsSendTimeTest() {}
virtual void SetUp() {
bitrate_estimator_.reset(new RemoteBitrateEstimatorAbsSendTime(
diff --git a/webrtc/modules/remote_bitrate_estimator/remote_bitrate_estimator_single_stream.cc b/webrtc/modules/remote_bitrate_estimator/remote_bitrate_estimator_single_stream.cc
index f1a1cb6602..4b7732c80f 100644
--- a/webrtc/modules/remote_bitrate_estimator/remote_bitrate_estimator_single_stream.cc
+++ b/webrtc/modules/remote_bitrate_estimator/remote_bitrate_estimator_single_stream.cc
@@ -10,16 +10,18 @@
#include "webrtc/modules/remote_bitrate_estimator/remote_bitrate_estimator_single_stream.h"
+#include <utility>
+
#include "webrtc/base/constructormagic.h"
+#include "webrtc/base/logging.h"
#include "webrtc/base/scoped_ptr.h"
#include "webrtc/base/thread_annotations.h"
+#include "webrtc/modules/remote_bitrate_estimator/aimd_rate_control.h"
#include "webrtc/modules/remote_bitrate_estimator/inter_arrival.h"
#include "webrtc/modules/remote_bitrate_estimator/overuse_detector.h"
#include "webrtc/modules/remote_bitrate_estimator/overuse_estimator.h"
-#include "webrtc/modules/remote_bitrate_estimator/aimd_rate_control.h"
#include "webrtc/system_wrappers/include/clock.h"
#include "webrtc/system_wrappers/include/critical_section_wrapper.h"
-#include "webrtc/system_wrappers/include/logging.h"
#include "webrtc/typedefs.h"
namespace webrtc {
@@ -28,19 +30,20 @@ enum { kTimestampGroupLengthMs = 5 };
static const double kTimestampToMs = 1.0 / 90.0;
struct RemoteBitrateEstimatorSingleStream::Detector {
- explicit Detector(int64_t last_packet_time_ms,
- const OverUseDetectorOptions& options,
- bool enable_burst_grouping)
- : last_packet_time_ms(last_packet_time_ms),
- inter_arrival(90 * kTimestampGroupLengthMs, kTimestampToMs,
- enable_burst_grouping),
- estimator(options),
- detector(options) {}
- int64_t last_packet_time_ms;
- InterArrival inter_arrival;
- OveruseEstimator estimator;
- OveruseDetector detector;
- };
+ explicit Detector(int64_t last_packet_time_ms,
+ const OverUseDetectorOptions& options,
+ bool enable_burst_grouping)
+ : last_packet_time_ms(last_packet_time_ms),
+ inter_arrival(90 * kTimestampGroupLengthMs,
+ kTimestampToMs,
+ enable_burst_grouping),
+ estimator(options),
+ detector(options) {}
+ int64_t last_packet_time_ms;
+ InterArrival inter_arrival;
+ OveruseEstimator estimator;
+ OveruseDetector detector;
+};
RemoteBitrateEstimatorSingleStream::RemoteBitrateEstimatorSingleStream(
RemoteBitrateObserver* observer,
diff --git a/webrtc/modules/remote_bitrate_estimator/remote_bitrate_estimator_single_stream_unittest.cc b/webrtc/modules/remote_bitrate_estimator/remote_bitrate_estimator_single_stream_unittest.cc
index a6c182a7bc..7a26a7e63b 100644
--- a/webrtc/modules/remote_bitrate_estimator/remote_bitrate_estimator_single_stream_unittest.cc
+++ b/webrtc/modules/remote_bitrate_estimator/remote_bitrate_estimator_single_stream_unittest.cc
@@ -17,7 +17,6 @@ namespace webrtc {
class RemoteBitrateEstimatorSingleTest :
public RemoteBitrateEstimatorTest {
public:
-
RemoteBitrateEstimatorSingleTest() {}
virtual void SetUp() {
bitrate_estimator_.reset(new RemoteBitrateEstimatorSingleStream(
diff --git a/webrtc/modules/remote_bitrate_estimator/remote_bitrate_estimator_unittest_helper.cc b/webrtc/modules/remote_bitrate_estimator/remote_bitrate_estimator_unittest_helper.cc
index 315f5422d9..8b9c0b9a1d 100644
--- a/webrtc/modules/remote_bitrate_estimator/remote_bitrate_estimator_unittest_helper.cc
+++ b/webrtc/modules/remote_bitrate_estimator/remote_bitrate_estimator_unittest_helper.cc
@@ -10,6 +10,7 @@
#include "webrtc/modules/remote_bitrate_estimator/remote_bitrate_estimator_unittest_helper.h"
#include <algorithm>
+#include <limits>
#include <utility>
namespace webrtc {
@@ -383,11 +384,11 @@ void RemoteBitrateEstimatorTest::RateIncreaseReorderingTestHelper(
2 * kFrameIntervalAbsSendTime);
IncomingPacket(kDefaultSsrc, 1000, clock_.TimeInMilliseconds(), timestamp,
absolute_send_time, true);
- IncomingPacket(
- kDefaultSsrc, 1000, clock_.TimeInMilliseconds(),
- timestamp - 90 * kFrameIntervalMs,
- AddAbsSendTime(absolute_send_time, -int(kFrameIntervalAbsSendTime)),
- true);
+ IncomingPacket(kDefaultSsrc, 1000, clock_.TimeInMilliseconds(),
+ timestamp - 90 * kFrameIntervalMs,
+ AddAbsSendTime(absolute_send_time,
+ -static_cast<int>(kFrameIntervalAbsSendTime)),
+ true);
}
bitrate_estimator_->Process();
EXPECT_TRUE(bitrate_observer_->updated());
@@ -520,8 +521,8 @@ void RemoteBitrateEstimatorTest::TestTimestampGroupingTestHelper() {
uint32_t timestamp = 0;
// Initialize absolute_send_time (24 bits) so that it will definitely wrap
// during the test.
- uint32_t absolute_send_time =
- AddAbsSendTime((1 << 24), -int(50 * kFrameIntervalAbsSendTime));
+ uint32_t absolute_send_time = AddAbsSendTime(
+ (1 << 24), -static_cast<int>(50 * kFrameIntervalAbsSendTime));
// Initial set of frames to increase the bitrate. 6 seconds to have enough
// time for the first estimate to be generated and for Process() to be called.
for (int i = 0; i <= 6 * kFramerate; ++i) {
@@ -556,8 +557,10 @@ void RemoteBitrateEstimatorTest::TestTimestampGroupingTestHelper() {
// Increase time until next batch to simulate over-use.
clock_.AdvanceTimeMilliseconds(10);
timestamp += 90 * kFrameIntervalMs - kTimestampGroupLength;
- absolute_send_time = AddAbsSendTime(absolute_send_time, AddAbsSendTime(
- kFrameIntervalAbsSendTime, -int(kTimestampGroupLengthAbsSendTime)));
+ absolute_send_time = AddAbsSendTime(
+ absolute_send_time,
+ AddAbsSendTime(kFrameIntervalAbsSendTime,
+ -static_cast<int>(kTimestampGroupLengthAbsSendTime)));
bitrate_estimator_->Process();
}
EXPECT_TRUE(bitrate_observer_->updated());
diff --git a/webrtc/modules/remote_bitrate_estimator/remote_bitrate_estimator_unittest_helper.h b/webrtc/modules/remote_bitrate_estimator/remote_bitrate_estimator_unittest_helper.h
index 606bb6c4e6..8343d7d57b 100644
--- a/webrtc/modules/remote_bitrate_estimator/remote_bitrate_estimator_unittest_helper.h
+++ b/webrtc/modules/remote_bitrate_estimator/remote_bitrate_estimator_unittest_helper.h
@@ -14,6 +14,7 @@
#include <list>
#include <map>
#include <utility>
+#include <vector>
#include "testing/gtest/include/gtest/gtest.h"
#include "webrtc/base/constructormagic.h"
diff --git a/webrtc/modules/remote_bitrate_estimator/remote_bitrate_estimators_test.cc b/webrtc/modules/remote_bitrate_estimator/remote_bitrate_estimators_test.cc
index d6f049f6ac..2ce144129b 100644
--- a/webrtc/modules/remote_bitrate_estimator/remote_bitrate_estimators_test.cc
+++ b/webrtc/modules/remote_bitrate_estimator/remote_bitrate_estimators_test.cc
@@ -13,8 +13,10 @@
#include <unistd.h>
#endif
+#include <algorithm>
#include <sstream>
+#include "webrtc/base/random.h"
#include "webrtc/modules/remote_bitrate_estimator/include/remote_bitrate_estimator.h"
#include "webrtc/modules/remote_bitrate_estimator/test/bwe_test.h"
#include "webrtc/modules/remote_bitrate_estimator/test/packet_receiver.h"
@@ -242,18 +244,20 @@ class BweFeedbackTest
: public BweTest,
public ::testing::TestWithParam<BandwidthEstimatorType> {
public:
- BweFeedbackTest() : BweTest() {}
+#ifdef WEBRTC_WIN
+ BweFeedbackTest()
+ : BweTest(), random_(Clock::GetRealTimeClock()->TimeInMicroseconds()) {}
+#else
+ BweFeedbackTest()
+ : BweTest(),
+ // Multiply the time by a random-ish odd number derived from the PID.
+ random_((getpid() | 1) *
+ Clock::GetRealTimeClock()->TimeInMicroseconds()) {}
+#endif
virtual ~BweFeedbackTest() {}
protected:
- void SetUp() override {
- unsigned int seed = Clock::GetRealTimeClock()->TimeInMicroseconds();
-#ifndef WEBRTC_WIN
- seed *= getpid();
-#endif
- srand(seed);
- BweTest::SetUp();
- }
+ Random random_;
private:
RTC_DISALLOW_COPY_AND_ASSIGN(BweFeedbackTest);
@@ -356,7 +360,7 @@ TEST_P(BweFeedbackTest, PacedSelfFairness50msTest) {
const int kNumRmcatFlows = 4;
int64_t offset_ms[kNumRmcatFlows];
for (int i = 0; i < kNumRmcatFlows; ++i) {
- offset_ms[i] = std::max(0, 5000 * i + rand() % 2001 - 1000);
+ offset_ms[i] = std::max(0, 5000 * i + random_.Rand(-1000, 1000));
}
RunFairnessTest(GetParam(), kNumRmcatFlows, 0, 300, 3000, 50, kRttMs,
@@ -370,7 +374,7 @@ TEST_P(BweFeedbackTest, PacedSelfFairness500msTest) {
const int kNumRmcatFlows = 4;
int64_t offset_ms[kNumRmcatFlows];
for (int i = 0; i < kNumRmcatFlows; ++i) {
- offset_ms[i] = std::max(0, 5000 * i + rand() % 2001 - 1000);
+ offset_ms[i] = std::max(0, 5000 * i + random_.Rand(-1000, 1000));
}
RunFairnessTest(GetParam(), kNumRmcatFlows, 0, 300, 3000, 500, kRttMs,
@@ -384,7 +388,7 @@ TEST_P(BweFeedbackTest, PacedSelfFairness1000msTest) {
const int kNumRmcatFlows = 4;
int64_t offset_ms[kNumRmcatFlows];
for (int i = 0; i < kNumRmcatFlows; ++i) {
- offset_ms[i] = std::max(0, 5000 * i + rand() % 2001 - 1000);
+ offset_ms[i] = std::max(0, 5000 * i + random_.Rand(-1000, 1000));
}
RunFairnessTest(GetParam(), kNumRmcatFlows, 0, 300, 3000, 1000, kRttMs,
@@ -397,7 +401,7 @@ TEST_P(BweFeedbackTest, TcpFairness50msTest) {
int64_t offset_ms[2]; // One TCP, one RMCAT flow.
for (int i = 0; i < 2; ++i) {
- offset_ms[i] = std::max(0, 5000 * i + rand() % 2001 - 1000);
+ offset_ms[i] = std::max(0, 5000 * i + random_.Rand(-1000, 1000));
}
RunFairnessTest(GetParam(), 1, 1, 300, 2000, 50, kRttMs, kMaxJitterMs,
@@ -410,7 +414,7 @@ TEST_P(BweFeedbackTest, TcpFairness500msTest) {
int64_t offset_ms[2]; // One TCP, one RMCAT flow.
for (int i = 0; i < 2; ++i) {
- offset_ms[i] = std::max(0, 5000 * i + rand() % 2001 - 1000);
+ offset_ms[i] = std::max(0, 5000 * i + random_.Rand(-1000, 1000));
}
RunFairnessTest(GetParam(), 1, 1, 300, 2000, 500, kRttMs, kMaxJitterMs,
@@ -423,7 +427,7 @@ TEST_P(BweFeedbackTest, TcpFairness1000msTest) {
int64_t offset_ms[2]; // One TCP, one RMCAT flow.
for (int i = 0; i < 2; ++i) {
- offset_ms[i] = std::max(0, 5000 * i + rand() % 2001 - 1000);
+ offset_ms[i] = std::max(0, 5000 * i + random_.Rand(-1000, 1000));
}
RunFairnessTest(GetParam(), 1, 1, 300, 2000, 1000, kRttMs, kMaxJitterMs,
diff --git a/webrtc/modules/remote_bitrate_estimator/remote_estimator_proxy.cc b/webrtc/modules/remote_bitrate_estimator/remote_estimator_proxy.cc
index b7f9f65dbc..15ca42dda9 100644
--- a/webrtc/modules/remote_bitrate_estimator/remote_estimator_proxy.cc
+++ b/webrtc/modules/remote_bitrate_estimator/remote_estimator_proxy.cc
@@ -13,14 +13,14 @@
#include "webrtc/base/checks.h"
#include "webrtc/base/logging.h"
#include "webrtc/system_wrappers/include/clock.h"
-#include "webrtc/modules/pacing/include/packet_router.h"
+#include "webrtc/modules/pacing/packet_router.h"
#include "webrtc/modules/rtp_rtcp/source/rtcp_packet/transport_feedback.h"
-#include "webrtc/modules/rtp_rtcp/interface/rtp_rtcp.h"
+#include "webrtc/modules/rtp_rtcp/include/rtp_rtcp.h"
namespace webrtc {
// TODO(sprang): Tune these!
-const int RemoteEstimatorProxy::kDefaultProcessIntervalMs = 200;
+const int RemoteEstimatorProxy::kDefaultProcessIntervalMs = 50;
const int RemoteEstimatorProxy::kBackWindowMs = 500;
RemoteEstimatorProxy::RemoteEstimatorProxy(Clock* clock,
diff --git a/webrtc/modules/remote_bitrate_estimator/remote_estimator_proxy.h b/webrtc/modules/remote_bitrate_estimator/remote_estimator_proxy.h
index e867ff77a4..98a68b3dcf 100644
--- a/webrtc/modules/remote_bitrate_estimator/remote_estimator_proxy.h
+++ b/webrtc/modules/remote_bitrate_estimator/remote_estimator_proxy.h
@@ -15,7 +15,7 @@
#include <vector>
#include "webrtc/base/criticalsection.h"
-#include "webrtc/modules/interface/module_common_types.h"
+#include "webrtc/modules/include/module_common_types.h"
#include "webrtc/modules/remote_bitrate_estimator/include/remote_bitrate_estimator.h"
namespace webrtc {
diff --git a/webrtc/modules/remote_bitrate_estimator/remote_estimator_proxy_unittest.cc b/webrtc/modules/remote_bitrate_estimator/remote_estimator_proxy_unittest.cc
index 826a724e33..7ddd31467b 100644
--- a/webrtc/modules/remote_bitrate_estimator/remote_estimator_proxy_unittest.cc
+++ b/webrtc/modules/remote_bitrate_estimator/remote_estimator_proxy_unittest.cc
@@ -11,7 +11,7 @@
#include "testing/gmock/include/gmock/gmock.h"
#include "testing/gtest/include/gtest/gtest.h"
-#include "webrtc/modules/pacing/include/packet_router.h"
+#include "webrtc/modules/pacing/packet_router.h"
#include "webrtc/modules/remote_bitrate_estimator/remote_estimator_proxy.h"
#include "webrtc/modules/rtp_rtcp/source/rtcp_packet/transport_feedback.h"
#include "webrtc/system_wrappers/include/clock.h"
diff --git a/webrtc/modules/remote_bitrate_estimator/test/bwe.h b/webrtc/modules/remote_bitrate_estimator/test/bwe.h
index ef9b3149d7..8d29de2619 100644
--- a/webrtc/modules/remote_bitrate_estimator/test/bwe.h
+++ b/webrtc/modules/remote_bitrate_estimator/test/bwe.h
@@ -11,7 +11,10 @@
#ifndef WEBRTC_MODULES_REMOTE_BITRATE_ESTIMATOR_TEST_BWE_H_
#define WEBRTC_MODULES_REMOTE_BITRATE_ESTIMATOR_TEST_BWE_H_
+#include <list>
+#include <map>
#include <sstream>
+#include <string>
#include "webrtc/test/testsupport/gtest_prod_util.h"
#include "webrtc/modules/remote_bitrate_estimator/test/packet.h"
diff --git a/webrtc/modules/remote_bitrate_estimator/test/bwe_test.cc b/webrtc/modules/remote_bitrate_estimator/test/bwe_test.cc
index f837638474..9da21c1aaa 100644
--- a/webrtc/modules/remote_bitrate_estimator/test/bwe_test.cc
+++ b/webrtc/modules/remote_bitrate_estimator/test/bwe_test.cc
@@ -12,9 +12,10 @@
#include <sstream>
+#include "webrtc/base/arraysize.h"
#include "webrtc/base/common.h"
#include "webrtc/base/scoped_ptr.h"
-#include "webrtc/modules/interface/module_common_types.h"
+#include "webrtc/modules/include/module_common_types.h"
#include "webrtc/modules/remote_bitrate_estimator/test/bwe_test_framework.h"
#include "webrtc/modules/remote_bitrate_estimator/test/metric_recorder.h"
#include "webrtc/modules/remote_bitrate_estimator/test/packet_receiver.h"
@@ -662,7 +663,7 @@ void BweTest::RunSelfFairness(BandwidthEstimatorType bwe_type) {
void BweTest::RunRoundTripTimeFairness(BandwidthEstimatorType bwe_type) {
const int kAllFlowIds[] = {0, 1, 2, 3, 4}; // Five RMCAT flows.
const int64_t kAllOneWayDelayMs[] = {10, 25, 50, 100, 150};
- const size_t kNumFlows = ARRAY_SIZE(kAllFlowIds);
+ const size_t kNumFlows = arraysize(kAllFlowIds);
rtc::scoped_ptr<AdaptiveVideoSource> sources[kNumFlows];
rtc::scoped_ptr<VideoSender> senders[kNumFlows];
rtc::scoped_ptr<MetricRecorder> metric_recorders[kNumFlows];
@@ -774,10 +775,10 @@ void BweTest::RunMultipleShortTcpFairness(
const int kAllTcpFlowIds[] = {2, 3, 4, 5, 6, 7, 8, 9, 10, 11};
assert(tcp_starting_times_ms.size() == tcp_file_sizes_bytes.size() &&
- tcp_starting_times_ms.size() == ARRAY_SIZE(kAllTcpFlowIds));
+ tcp_starting_times_ms.size() == arraysize(kAllTcpFlowIds));
- const size_t kNumRmcatFlows = ARRAY_SIZE(kAllRmcatFlowIds);
- const size_t kNumTotalFlows = kNumRmcatFlows + ARRAY_SIZE(kAllTcpFlowIds);
+ const size_t kNumRmcatFlows = arraysize(kAllRmcatFlowIds);
+ const size_t kNumTotalFlows = kNumRmcatFlows + arraysize(kAllTcpFlowIds);
rtc::scoped_ptr<AdaptiveVideoSource> sources[kNumRmcatFlows];
rtc::scoped_ptr<PacketSender> senders[kNumTotalFlows];
@@ -869,7 +870,7 @@ void BweTest::RunMultipleShortTcpFairness(
// During the test, one of the flows is paused and later resumed.
void BweTest::RunPauseResumeFlows(BandwidthEstimatorType bwe_type) {
const int kAllFlowIds[] = {0, 1, 2}; // Three RMCAT flows.
- const size_t kNumFlows = ARRAY_SIZE(kAllFlowIds);
+ const size_t kNumFlows = arraysize(kAllFlowIds);
rtc::scoped_ptr<AdaptiveVideoSource> sources[kNumFlows];
rtc::scoped_ptr<VideoSender> senders[kNumFlows];
@@ -947,7 +948,7 @@ std::vector<int> BweTest::GetFileSizesBytes(int num_files) {
const int kMinKbytes = 100;
const int kMaxKbytes = 1000;
- test::Random random(0x12345678);
+ Random random(0x12345678);
std::vector<int> tcp_file_sizes_bytes;
while (num_files-- > 0) {
@@ -960,7 +961,7 @@ std::vector<int> BweTest::GetFileSizesBytes(int num_files) {
std::vector<int64_t> BweTest::GetStartingTimesMs(int num_files) {
// OFF state behaves as an exp. distribution with mean = 10 seconds.
const float kMeanMs = 10000.0f;
- test::Random random(0x12345678);
+ Random random(0x12345678);
std::vector<int64_t> tcp_starting_times_ms;
diff --git a/webrtc/modules/remote_bitrate_estimator/test/bwe_test_baselinefile.h b/webrtc/modules/remote_bitrate_estimator/test/bwe_test_baselinefile.h
index 64dfa85535..b3df7124e3 100644
--- a/webrtc/modules/remote_bitrate_estimator/test/bwe_test_baselinefile.h
+++ b/webrtc/modules/remote_bitrate_estimator/test/bwe_test_baselinefile.h
@@ -12,7 +12,7 @@
#define WEBRTC_MODULES_REMOTE_BITRATE_ESTIMATOR_TEST_BWE_TEST_BASELINEFILE_H_
#include <string>
-#include "webrtc/modules/interface/module_common_types.h"
+#include "webrtc/modules/include/module_common_types.h"
namespace webrtc {
namespace testing {
diff --git a/webrtc/modules/remote_bitrate_estimator/test/bwe_test_fileutils.h b/webrtc/modules/remote_bitrate_estimator/test/bwe_test_fileutils.h
index 2881eba424..d470324ac3 100644
--- a/webrtc/modules/remote_bitrate_estimator/test/bwe_test_fileutils.h
+++ b/webrtc/modules/remote_bitrate_estimator/test/bwe_test_fileutils.h
@@ -16,7 +16,7 @@
#include <string>
#include "webrtc/base/constructormagic.h"
-#include "webrtc/modules/interface/module_common_types.h"
+#include "webrtc/modules/include/module_common_types.h"
namespace webrtc {
namespace testing {
diff --git a/webrtc/modules/remote_bitrate_estimator/test/bwe_test_framework.cc b/webrtc/modules/remote_bitrate_estimator/test/bwe_test_framework.cc
index 4574d3d8a1..41bf836c9e 100644
--- a/webrtc/modules/remote_bitrate_estimator/test/bwe_test_framework.cc
+++ b/webrtc/modules/remote_bitrate_estimator/test/bwe_test_framework.cc
@@ -323,7 +323,7 @@ void LossFilter::SetLoss(float loss_percent) {
void LossFilter::RunFor(int64_t /*time_ms*/, Packets* in_out) {
assert(in_out);
for (PacketsIt it = in_out->begin(); it != in_out->end(); ) {
- if (random_.Rand() < loss_fraction_) {
+ if (random_.Rand<float>() < loss_fraction_) {
delete *it;
it = in_out->erase(it);
} else {
@@ -391,7 +391,7 @@ void JitterFilter::SetMaxJitter(int64_t max_jitter_ms) {
}
namespace {
-inline int64_t TruncatedNSigmaGaussian(test::Random* const random,
+inline int64_t TruncatedNSigmaGaussian(Random* const random,
int64_t mean,
int64_t std_dev) {
int64_t gaussian_random = random->Gaussian(mean, std_dev);
@@ -459,7 +459,7 @@ void ReorderFilter::RunFor(int64_t /*time_ms*/, Packets* in_out) {
PacketsIt last_it = in_out->begin();
PacketsIt it = last_it;
while (++it != in_out->end()) {
- if (random_.Rand() < reorder_fraction_) {
+ if (random_.Rand<float>() < reorder_fraction_) {
int64_t t1 = (*last_it)->send_time_us();
int64_t t2 = (*it)->send_time_us();
std::swap(*last_it, *it);
@@ -586,7 +586,7 @@ bool TraceBasedDeliveryFilter::Init(const std::string& filename) {
return false;
}
int64_t first_timestamp = -1;
- while(!feof(trace_file)) {
+ while (!feof(trace_file)) {
const size_t kMaxLineLength = 100;
char line[kMaxLineLength];
if (fgets(line, kMaxLineLength, trace_file)) {
@@ -680,6 +680,7 @@ VideoSource::VideoSource(int flow_id,
frame_period_ms_(1000.0 / fps),
bits_per_second_(1000 * kbps),
frame_size_bytes_(bits_per_second_ / 8 / fps),
+ random_(0x12345678),
flow_id_(flow_id),
next_frame_ms_(first_frame_offset_ms),
next_frame_rand_ms_(0),
@@ -713,9 +714,7 @@ void VideoSource::RunFor(int64_t time_ms, Packets* in_out) {
const int64_t kRandAmplitude = 2;
// A variance picked uniformly from {-1, 0, 1} ms is added to the frame
// timestamp.
- next_frame_rand_ms_ =
- kRandAmplitude * static_cast<float>(rand()) / RAND_MAX -
- kRandAmplitude / 2;
+ next_frame_rand_ms_ = kRandAmplitude * (random_.Rand<float>() - 0.5);
// Ensure frame will not have a negative timestamp.
int64_t next_frame_ms =
diff --git a/webrtc/modules/remote_bitrate_estimator/test/bwe_test_framework.h b/webrtc/modules/remote_bitrate_estimator/test/bwe_test_framework.h
index 6b24cf30a6..3bb9b95f4b 100644
--- a/webrtc/modules/remote_bitrate_estimator/test/bwe_test_framework.h
+++ b/webrtc/modules/remote_bitrate_estimator/test/bwe_test_framework.h
@@ -17,21 +17,23 @@
#include <algorithm>
#include <list>
#include <numeric>
+#include <set>
#include <sstream>
#include <string>
+#include <utility>
#include <vector>
#include "webrtc/base/common.h"
+#include "webrtc/base/random.h"
#include "webrtc/base/scoped_ptr.h"
#include "webrtc/modules/bitrate_controller/include/bitrate_controller.h"
-#include "webrtc/modules/interface/module_common_types.h"
-#include "webrtc/modules/pacing/include/paced_sender.h"
+#include "webrtc/modules/include/module_common_types.h"
+#include "webrtc/modules/pacing/paced_sender.h"
#include "webrtc/modules/remote_bitrate_estimator/include/remote_bitrate_estimator.h"
#include "webrtc/modules/remote_bitrate_estimator/test/bwe_test_logging.h"
#include "webrtc/modules/remote_bitrate_estimator/test/packet.h"
-#include "webrtc/modules/rtp_rtcp/interface/rtp_rtcp_defines.h"
+#include "webrtc/modules/rtp_rtcp/include/rtp_rtcp_defines.h"
#include "webrtc/system_wrappers/include/clock.h"
-#include "webrtc/test/random.h"
namespace webrtc {
@@ -44,7 +46,7 @@ class DelayCapHelper;
class RateCounter {
public:
- RateCounter(int64_t window_size_ms)
+ explicit RateCounter(int64_t window_size_ms)
: window_size_us_(1000 * window_size_ms),
recently_received_packets_(0),
recently_received_bytes_(0),
@@ -265,7 +267,7 @@ class LossFilter : public PacketProcessor {
virtual void RunFor(int64_t time_ms, Packets* in_out);
private:
- test::Random random_;
+ Random random_;
float loss_fraction_;
RTC_DISALLOW_IMPLICIT_CONSTRUCTORS(LossFilter);
@@ -299,7 +301,7 @@ class JitterFilter : public PacketProcessor {
int64_t MeanUs();
private:
- test::Random random_;
+ Random random_;
int64_t stddev_jitter_us_;
int64_t last_send_time_us_;
bool reordering_; // False by default.
@@ -318,7 +320,7 @@ class ReorderFilter : public PacketProcessor {
virtual void RunFor(int64_t time_ms, Packets* in_out);
private:
- test::Random random_;
+ Random random_;
float reorder_fraction_;
RTC_DISALLOW_IMPLICIT_CONSTRUCTORS(ReorderFilter);
@@ -415,6 +417,7 @@ class VideoSource {
uint32_t frame_size_bytes_;
private:
+ Random random_;
const int flow_id_;
int64_t next_frame_ms_;
int64_t next_frame_rand_ms_;
diff --git a/webrtc/modules/remote_bitrate_estimator/test/bwe_test_framework_unittest.cc b/webrtc/modules/remote_bitrate_estimator/test/bwe_test_framework_unittest.cc
index 627260678b..6bdfa847df 100644
--- a/webrtc/modules/remote_bitrate_estimator/test/bwe_test_framework_unittest.cc
+++ b/webrtc/modules/remote_bitrate_estimator/test/bwe_test_framework_unittest.cc
@@ -22,39 +22,6 @@ namespace webrtc {
namespace testing {
namespace bwe {
-TEST(BweTestFramework_RandomTest, Gaussian) {
- enum {
- kN = 100000,
- kBuckets = 100,
- kMean = 49,
- kStddev = 10
- };
-
- test::Random random(0x12345678);
-
- int buckets[kBuckets] = {0};
- for (int i = 0; i < kN; ++i) {
- int index = random.Gaussian(kMean, kStddev);
- if (index >= 0 && index < kBuckets) {
- buckets[index]++;
- }
- }
-
- const double kPi = 3.14159265358979323846;
- const double kScale = kN / (kStddev * sqrt(2.0 * kPi));
- const double kDiv = -2.0 * kStddev * kStddev;
- double self_corr = 0.0;
- double bucket_corr = 0.0;
- for (int n = 0; n < kBuckets; ++n) {
- double normal_dist = kScale * exp((n - kMean) * (n - kMean) / kDiv);
- self_corr += normal_dist * normal_dist;
- bucket_corr += normal_dist * buckets[n];
- }
- printf("Correlation: %f (random sample), %f (self), %f (quotient)\n",
- bucket_corr, self_corr, bucket_corr / self_corr);
- EXPECT_NEAR(1.0, bucket_corr / self_corr, 0.0004);
-}
-
static bool IsSequenceNumberSorted(const Packets& packets) {
PacketsConstIt last_it = packets.begin();
for (PacketsConstIt it = last_it; it != packets.end(); ++it) {
@@ -533,7 +500,7 @@ TEST(BweTestFramework_JitterFilterTest, Jitter1031) {
TestJitterFilter(1031);
}
-static void TestReorderFilter(uint16_t reorder_percent, uint16_t near_value) {
+static void TestReorderFilter(uint16_t reorder_percent) {
const uint16_t kPacketCount = 10000;
// Generate packets with 10 ms interval.
@@ -559,16 +526,23 @@ static void TestReorderFilter(uint16_t reorder_percent, uint16_t near_value) {
for (auto* packet : packets) {
const MediaPacket* media_packet = static_cast<const MediaPacket*>(packet);
uint16_t sequence_number = media_packet->header().sequenceNumber;
+ // The expected position for sequence number s is in position s-1.
if (sequence_number < last_sequence_number) {
distance += last_sequence_number - sequence_number;
}
last_sequence_number = sequence_number;
}
- // Because reordering is random, we allow a threshold when comparing. The
- // maximum distance a packet can be moved is PacketCount - 1.
- EXPECT_NEAR(
- ((kPacketCount - 1) * reorder_percent) / 100, distance, near_value);
+ // The probability that two elements are swapped is p = reorder_percent / 100.
+ double p = static_cast<double>(reorder_percent) / 100;
+ // The expected number of swaps we perform is p * (PacketCount - 1),
+ // and each swap increases the distance by one.
+ double mean = p * (kPacketCount - 1);
+ // If pair i is chosen to be swapped with probability p, the variance for that
+ // pair is p * (1 - p). Since there are (kPacketCount - 1) independent pairs,
+ // the variance for the number of swaps is (kPacketCount - 1) * p * (1 - p).
+ double std_deviation = sqrt((kPacketCount - 1) * p * (1 - p));
+ EXPECT_NEAR(mean, distance, 3 * std_deviation);
for (auto* packet : packets)
delete packet;
@@ -576,23 +550,23 @@ static void TestReorderFilter(uint16_t reorder_percent, uint16_t near_value) {
TEST(BweTestFramework_ReorderFilterTest, Reorder0) {
// For 0% reordering, no packets should have been moved, so result is exact.
- TestReorderFilter(0, 0);
+ TestReorderFilter(0);
}
TEST(BweTestFramework_ReorderFilterTest, Reorder10) {
- TestReorderFilter(10, 30);
+ TestReorderFilter(10);
}
TEST(BweTestFramework_ReorderFilterTest, Reorder20) {
- TestReorderFilter(20, 20);
+ TestReorderFilter(20);
}
TEST(BweTestFramework_ReorderFilterTest, Reorder50) {
- TestReorderFilter(50, 20);
+ TestReorderFilter(50);
}
TEST(BweTestFramework_ReorderFilterTest, Reorder70) {
- TestReorderFilter(70, 20);
+ TestReorderFilter(70);
}
TEST(BweTestFramework_ReorderFilterTest, Reorder100) {
@@ -600,7 +574,7 @@ TEST(BweTestFramework_ReorderFilterTest, Reorder100) {
// adjacent packets, when the likelihood of a swap is 1.0, a swap will always
// occur, so the stream will be in order except for the first packet, which
// has been moved to the end. Therefore we expect the result to be exact here.
- TestReorderFilter(100.0, 0);
+ TestReorderFilter(100.0);
}
class BweTestFramework_ChokeFilterTest : public ::testing::Test {
diff --git a/webrtc/modules/remote_bitrate_estimator/test/bwe_test_logging.cc b/webrtc/modules/remote_bitrate_estimator/test/bwe_test_logging.cc
index dcc08d8dde..3a84e81a0b 100644
--- a/webrtc/modules/remote_bitrate_estimator/test/bwe_test_logging.cc
+++ b/webrtc/modules/remote_bitrate_estimator/test/bwe_test_logging.cc
@@ -18,8 +18,8 @@
#include <algorithm>
#include <sstream>
+#include "webrtc/base/platform_thread.h"
#include "webrtc/system_wrappers/include/critical_section_wrapper.h"
-#include "webrtc/system_wrappers/include/thread_wrapper.h"
namespace webrtc {
namespace testing {
@@ -57,27 +57,27 @@ Logging* Logging::GetInstance() {
void Logging::SetGlobalContext(uint32_t name) {
CriticalSectionScoped cs(crit_sect_.get());
- thread_map_[ThreadWrapper::GetThreadId()].global_state.tag = ToString(name);
+ thread_map_[rtc::CurrentThreadId()].global_state.tag = ToString(name);
}
void Logging::SetGlobalContext(const std::string& name) {
CriticalSectionScoped cs(crit_sect_.get());
- thread_map_[ThreadWrapper::GetThreadId()].global_state.tag = name;
+ thread_map_[rtc::CurrentThreadId()].global_state.tag = name;
}
void Logging::SetGlobalContext(const char* name) {
CriticalSectionScoped cs(crit_sect_.get());
- thread_map_[ThreadWrapper::GetThreadId()].global_state.tag = name;
+ thread_map_[rtc::CurrentThreadId()].global_state.tag = name;
}
void Logging::SetGlobalEnable(bool enabled) {
CriticalSectionScoped cs(crit_sect_.get());
- thread_map_[ThreadWrapper::GetThreadId()].global_state.enabled = enabled;
+ thread_map_[rtc::CurrentThreadId()].global_state.enabled = enabled;
}
void Logging::Log(const char format[], ...) {
CriticalSectionScoped cs(crit_sect_.get());
- ThreadMap::iterator it = thread_map_.find(ThreadWrapper::GetThreadId());
+ ThreadMap::iterator it = thread_map_.find(rtc::CurrentThreadId());
assert(it != thread_map_.end());
const State& state = it->second.stack.top();
if (state.enabled) {
@@ -96,7 +96,7 @@ void Logging::Plot(int figure, double value) {
void Logging::Plot(int figure, double value, const std::string& alg_name) {
CriticalSectionScoped cs(crit_sect_.get());
- ThreadMap::iterator it = thread_map_.find(ThreadWrapper::GetThreadId());
+ ThreadMap::iterator it = thread_map_.find(rtc::CurrentThreadId());
assert(it != thread_map_.end());
const State& state = it->second.stack.top();
std::string label = state.tag + '@' + alg_name;
@@ -119,7 +119,7 @@ void Logging::PlotBar(int figure,
double value,
int flow_id) {
CriticalSectionScoped cs(crit_sect_.get());
- ThreadMap::iterator it = thread_map_.find(ThreadWrapper::GetThreadId());
+ ThreadMap::iterator it = thread_map_.find(rtc::CurrentThreadId());
assert(it != thread_map_.end());
const State& state = it->second.stack.top();
if (state.enabled) {
@@ -132,7 +132,7 @@ void Logging::PlotBaselineBar(int figure,
double value,
int flow_id) {
CriticalSectionScoped cs(crit_sect_.get());
- ThreadMap::iterator it = thread_map_.find(ThreadWrapper::GetThreadId());
+ ThreadMap::iterator it = thread_map_.find(rtc::CurrentThreadId());
assert(it != thread_map_.end());
const State& state = it->second.stack.top();
if (state.enabled) {
@@ -148,7 +148,7 @@ void Logging::PlotErrorBar(int figure,
const std::string& error_title,
int flow_id) {
CriticalSectionScoped cs(crit_sect_.get());
- ThreadMap::iterator it = thread_map_.find(ThreadWrapper::GetThreadId());
+ ThreadMap::iterator it = thread_map_.find(rtc::CurrentThreadId());
assert(it != thread_map_.end());
const State& state = it->second.stack.top();
if (state.enabled) {
@@ -167,7 +167,7 @@ void Logging::PlotLimitErrorBar(int figure,
const std::string& limit_title,
int flow_id) {
CriticalSectionScoped cs(crit_sect_.get());
- ThreadMap::iterator it = thread_map_.find(ThreadWrapper::GetThreadId());
+ ThreadMap::iterator it = thread_map_.find(rtc::CurrentThreadId());
assert(it != thread_map_.end());
const State& state = it->second.stack.top();
if (state.enabled) {
@@ -182,7 +182,7 @@ void Logging::PlotLabel(int figure,
const std::string& y_label,
int num_flows) {
CriticalSectionScoped cs(crit_sect_.get());
- ThreadMap::iterator it = thread_map_.find(ThreadWrapper::GetThreadId());
+ ThreadMap::iterator it = thread_map_.find(rtc::CurrentThreadId());
assert(it != thread_map_.end());
const State& state = it->second.stack.top();
if (state.enabled) {
@@ -219,7 +219,7 @@ void Logging::PushState(const std::string& append_to_tag, int64_t timestamp_ms,
bool enabled) {
CriticalSectionScoped cs(crit_sect_.get());
State new_state(append_to_tag, timestamp_ms, enabled);
- ThreadState* thread_state = &thread_map_[ThreadWrapper::GetThreadId()];
+ ThreadState* thread_state = &thread_map_[rtc::CurrentThreadId()];
std::stack<State>* stack = &thread_state->stack;
if (stack->empty()) {
new_state.MergePrevious(thread_state->global_state);
@@ -231,7 +231,7 @@ void Logging::PushState(const std::string& append_to_tag, int64_t timestamp_ms,
void Logging::PopState() {
CriticalSectionScoped cs(crit_sect_.get());
- ThreadMap::iterator it = thread_map_.find(ThreadWrapper::GetThreadId());
+ ThreadMap::iterator it = thread_map_.find(rtc::CurrentThreadId());
assert(it != thread_map_.end());
std::stack<State>* stack = &it->second.stack;
int64_t newest_timestamp_ms = stack->top().timestamp_ms;
diff --git a/webrtc/modules/remote_bitrate_estimator/test/bwe_test_logging.h b/webrtc/modules/remote_bitrate_estimator/test/bwe_test_logging.h
index 4115d30c2a..cc7807ba8a 100644
--- a/webrtc/modules/remote_bitrate_estimator/test/bwe_test_logging.h
+++ b/webrtc/modules/remote_bitrate_estimator/test/bwe_test_logging.h
@@ -130,12 +130,12 @@
#define BWE_TEST_LOGGING_GLOBAL_CONTEXT(name) \
do { \
webrtc::testing::bwe::Logging::GetInstance()->SetGlobalContext(name); \
- } while (0);
+ } while (0)
#define BWE_TEST_LOGGING_GLOBAL_ENABLE(enabled) \
do { \
webrtc::testing::bwe::Logging::GetInstance()->SetGlobalEnable(enabled); \
- } while (0);
+ } while (0)
#define __BWE_TEST_LOGGING_CONTEXT_NAME(ctx, line) ctx ## line
#define __BWE_TEST_LOGGING_CONTEXT_DECLARE(ctx, line, name, time, enabled) \
@@ -155,36 +155,36 @@
do { \
BWE_TEST_LOGGING_CONTEXT(name); \
webrtc::testing::bwe::Logging::GetInstance()->Log(format, _1); \
- } while (0);
+ } while (0)
#define BWE_TEST_LOGGING_LOG2(name, format, _1, _2) \
do { \
BWE_TEST_LOGGING_CONTEXT(name); \
webrtc::testing::bwe::Logging::GetInstance()->Log(format, _1, _2); \
- } while (0);
+ } while (0)
#define BWE_TEST_LOGGING_LOG3(name, format, _1, _2, _3) \
do { \
BWE_TEST_LOGGING_CONTEXT(name); \
webrtc::testing::bwe::Logging::GetInstance()->Log(format, _1, _2, _3); \
- } while (0);
+ } while (0)
#define BWE_TEST_LOGGING_LOG4(name, format, _1, _2, _3, _4) \
do { \
BWE_TEST_LOGGING_CONTEXT(name); \
webrtc::testing::bwe::Logging::GetInstance()->Log(format, _1, _2, _3, \
_4); \
- } while (0);
+ } while (0)
#define BWE_TEST_LOGGING_LOG5(name, format, _1, _2, _3, _4, _5) \
do {\
BWE_TEST_LOGGING_CONTEXT(name); \
webrtc::testing::bwe::Logging::GetInstance()->Log(format, _1, _2, _3, \
_4, _5); \
- } while (0);
+ } while (0)
#define BWE_TEST_LOGGING_PLOT(figure, name, time, value) \
do { \
__BWE_TEST_LOGGING_CONTEXT_DECLARE(__bwe_log_, __PLOT__, name, \
static_cast<int64_t>(time), true); \
webrtc::testing::bwe::Logging::GetInstance()->Plot(figure, value); \
- } while (0);
+ } while (0)
#define BWE_TEST_LOGGING_PLOT_WITH_NAME(figure, name, time, value, alg_name) \
do { \
@@ -192,21 +192,21 @@
static_cast<int64_t>(time), true); \
webrtc::testing::bwe::Logging::GetInstance()->Plot(figure, value, \
alg_name); \
- } while (0);
+ } while (0)
#define BWE_TEST_LOGGING_BAR(figure, name, value, flow_id) \
do { \
BWE_TEST_LOGGING_CONTEXT(name); \
webrtc::testing::bwe::Logging::GetInstance()->PlotBar(figure, name, value, \
flow_id); \
- } while (0);
+ } while (0)
#define BWE_TEST_LOGGING_BASELINEBAR(figure, name, value, flow_id) \
do { \
BWE_TEST_LOGGING_CONTEXT(name); \
webrtc::testing::bwe::Logging::GetInstance()->PlotBaselineBar( \
figure, name, value, flow_id); \
- } while (0);
+ } while (0)
#define BWE_TEST_LOGGING_ERRORBAR(figure, name, value, ylow, yhigh, title, \
flow_id) \
@@ -214,7 +214,7 @@
BWE_TEST_LOGGING_CONTEXT(name); \
webrtc::testing::bwe::Logging::GetInstance()->PlotErrorBar( \
figure, name, value, ylow, yhigh, title, flow_id); \
- } while (0);
+ } while (0)
#define BWE_TEST_LOGGING_LIMITERRORBAR( \
figure, name, value, ylow, yhigh, error_title, ymax, limit_title, flow_id) \
@@ -223,14 +223,14 @@
webrtc::testing::bwe::Logging::GetInstance()->PlotLimitErrorBar( \
figure, name, value, ylow, yhigh, error_title, ymax, limit_title, \
flow_id); \
- } while (0);
+ } while (0)
#define BWE_TEST_LOGGING_LABEL(figure, title, y_label, num_flows) \
do { \
BWE_TEST_LOGGING_CONTEXT(title); \
webrtc::testing::bwe::Logging::GetInstance()->PlotLabel( \
figure, title, y_label, num_flows); \
- } while (0);
+ } while (0)
namespace webrtc {
diff --git a/webrtc/modules/remote_bitrate_estimator/test/bwe_unittest.cc b/webrtc/modules/remote_bitrate_estimator/test/bwe_unittest.cc
index 6b3ce4847c..6245ccd25d 100644
--- a/webrtc/modules/remote_bitrate_estimator/test/bwe_unittest.cc
+++ b/webrtc/modules/remote_bitrate_estimator/test/bwe_unittest.cc
@@ -13,6 +13,7 @@
#include <vector>
#include "testing/gtest/include/gtest/gtest.h"
+#include "webrtc/base/arraysize.h"
namespace webrtc {
namespace testing {
@@ -241,7 +242,7 @@ TEST_F(BweReceiverTest, PacketLossDuplicatedPackets) {
// Missing the element 5.
const uint16_t kSequenceNumbers[] = {1, 2, 3, 4, 6, 7, 8};
- const int kNumPackets = ARRAY_SIZE(kSequenceNumbers);
+ const int kNumPackets = arraysize(kSequenceNumbers);
// Insert each sequence number twice.
for (int i = 0; i < 2; ++i) {
diff --git a/webrtc/modules/remote_bitrate_estimator/test/estimators/nada.cc b/webrtc/modules/remote_bitrate_estimator/test/estimators/nada.cc
index d77447f1ea..6166ff8c2d 100644
--- a/webrtc/modules/remote_bitrate_estimator/test/estimators/nada.cc
+++ b/webrtc/modules/remote_bitrate_estimator/test/estimators/nada.cc
@@ -18,10 +18,11 @@
#include <algorithm>
#include <vector>
+#include "webrtc/base/arraysize.h"
#include "webrtc/base/common.h"
#include "webrtc/modules/remote_bitrate_estimator/test/estimators/nada.h"
#include "webrtc/modules/remote_bitrate_estimator/test/bwe_test_logging.h"
-#include "webrtc/modules/rtp_rtcp/interface/receive_statistics.h"
+#include "webrtc/modules/rtp_rtcp/include/receive_statistics.h"
namespace webrtc {
namespace testing {
@@ -63,7 +64,7 @@ void NadaBweReceiver::ReceivePacket(int64_t arrival_time_ms,
}
delay_signal_ms_ = delay_ms - baseline_delay_ms_; // Refered as d_n.
- const int kMedian = ARRAY_SIZE(last_delays_ms_);
+ const int kMedian = arraysize(last_delays_ms_);
last_delays_ms_[(last_delays_index_++) % kMedian] = delay_signal_ms_;
int size = std::min(last_delays_index_, kMedian);
diff --git a/webrtc/modules/remote_bitrate_estimator/test/estimators/nada.h b/webrtc/modules/remote_bitrate_estimator/test/estimators/nada.h
index eee90cf463..bf23d09884 100644
--- a/webrtc/modules/remote_bitrate_estimator/test/estimators/nada.h
+++ b/webrtc/modules/remote_bitrate_estimator/test/estimators/nada.h
@@ -20,7 +20,7 @@
#include <list>
#include <map>
-#include "webrtc/modules/interface/module_common_types.h"
+#include "webrtc/modules/include/module_common_types.h"
#include "webrtc/modules/remote_bitrate_estimator/test/bwe.h"
#include "webrtc/voice_engine/channel.h"
diff --git a/webrtc/modules/remote_bitrate_estimator/test/estimators/nada_unittest.cc b/webrtc/modules/remote_bitrate_estimator/test/estimators/nada_unittest.cc
index a0f56b73b7..51afae1df4 100644
--- a/webrtc/modules/remote_bitrate_estimator/test/estimators/nada_unittest.cc
+++ b/webrtc/modules/remote_bitrate_estimator/test/estimators/nada_unittest.cc
@@ -13,6 +13,7 @@
#include <algorithm>
#include <numeric>
+#include "webrtc/base/arraysize.h"
#include "webrtc/base/common.h"
#include "webrtc/base/scoped_ptr.h"
#include "webrtc/modules/remote_bitrate_estimator/test/bwe_test_framework.h"
@@ -357,7 +358,7 @@ TEST_F(NadaReceiverSideTest, FeedbackIncreasingDelay) {
// Baseline delay will be 50 ms.
// Delay signals should be: [0 10 20 30 40 50 60 70] ms.
const int64_t kMedianFilteredDelaysMs[] = {0, 5, 10, 15, 20, 30, 40, 50};
- const int kNumPackets = ARRAY_SIZE(kMedianFilteredDelaysMs);
+ const int kNumPackets = arraysize(kMedianFilteredDelaysMs);
const float kAlpha = 0.1f; // Used for exponential smoothing.
int64_t exp_smoothed_delays_ms[kNumPackets];
@@ -426,7 +427,7 @@ TEST_F(NadaReceiverSideTest, FeedbackWarpedDelay) {
// Delay signals should be: [0 200 400 600 800 1000 1200 1400] ms.
const int64_t kMedianFilteredDelaysMs[] = {
0, 100, 200, 300, 400, 600, 800, 1000};
- const int kNumPackets = ARRAY_SIZE(kMedianFilteredDelaysMs);
+ const int kNumPackets = arraysize(kMedianFilteredDelaysMs);
const float kAlpha = 0.1f; // Used for exponential smoothing.
int64_t exp_smoothed_delays_ms[kNumPackets];
@@ -480,7 +481,7 @@ TEST_F(FilterTest, ExponentialSmoothingConstantArray) {
TEST_F(FilterTest, ExponentialSmoothingInitialPertubation) {
const int64_t kSignal[] = {90000, 0, 0, 0, 0, 0};
- const int kNumElements = ARRAY_SIZE(kSignal);
+ const int kNumElements = arraysize(kSignal);
int64_t exp_smoothed[kNumElements];
ExponentialSmoothingFilter(kSignal, kNumElements, exp_smoothed);
for (int i = 1; i < kNumElements; ++i) {
diff --git a/webrtc/modules/remote_bitrate_estimator/test/estimators/remb.cc b/webrtc/modules/remote_bitrate_estimator/test/estimators/remb.cc
index b18b9f06b9..9599b01933 100644
--- a/webrtc/modules/remote_bitrate_estimator/test/estimators/remb.cc
+++ b/webrtc/modules/remote_bitrate_estimator/test/estimators/remb.cc
@@ -17,7 +17,7 @@
#include "webrtc/modules/bitrate_controller/include/bitrate_controller.h"
#include "webrtc/modules/remote_bitrate_estimator/remote_bitrate_estimator_abs_send_time.h"
#include "webrtc/modules/remote_bitrate_estimator/test/bwe_test_logging.h"
-#include "webrtc/modules/rtp_rtcp/interface/receive_statistics.h"
+#include "webrtc/modules/rtp_rtcp/include/receive_statistics.h"
namespace webrtc {
namespace testing {
diff --git a/webrtc/modules/remote_bitrate_estimator/test/estimators/tcp.cc b/webrtc/modules/remote_bitrate_estimator/test/estimators/tcp.cc
index a02abc6ab8..b7e4f971fa 100644
--- a/webrtc/modules/remote_bitrate_estimator/test/estimators/tcp.cc
+++ b/webrtc/modules/remote_bitrate_estimator/test/estimators/tcp.cc
@@ -16,7 +16,7 @@
#include "webrtc/base/common.h"
#include "webrtc/modules/bitrate_controller/include/bitrate_controller.h"
#include "webrtc/modules/remote_bitrate_estimator/test/bwe_test_logging.h"
-#include "webrtc/modules/rtp_rtcp/interface/receive_statistics.h"
+#include "webrtc/modules/rtp_rtcp/include/receive_statistics.h"
namespace webrtc {
namespace testing {
diff --git a/webrtc/modules/remote_bitrate_estimator/test/metric_recorder.cc b/webrtc/modules/remote_bitrate_estimator/test/metric_recorder.cc
index 6202b4a6a3..559757c0eb 100644
--- a/webrtc/modules/remote_bitrate_estimator/test/metric_recorder.cc
+++ b/webrtc/modules/remote_bitrate_estimator/test/metric_recorder.cc
@@ -10,10 +10,10 @@
#include "webrtc/modules/remote_bitrate_estimator/test/metric_recorder.h"
-#include "webrtc/modules/remote_bitrate_estimator/test/packet_sender.h"
-
#include <algorithm>
+#include "webrtc/modules/remote_bitrate_estimator/test/packet_sender.h"
+
namespace webrtc {
namespace testing {
namespace bwe {
diff --git a/webrtc/modules/remote_bitrate_estimator/test/packet.h b/webrtc/modules/remote_bitrate_estimator/test/packet.h
index 11885a4544..4a361c4dc2 100644
--- a/webrtc/modules/remote_bitrate_estimator/test/packet.h
+++ b/webrtc/modules/remote_bitrate_estimator/test/packet.h
@@ -16,7 +16,7 @@
#include <vector>
#include "webrtc/common_types.h"
-#include "webrtc/modules/rtp_rtcp/interface/rtp_rtcp_defines.h"
+#include "webrtc/modules/rtp_rtcp/include/rtp_rtcp_defines.h"
#include "webrtc/modules/remote_bitrate_estimator/include/remote_bitrate_estimator.h"
namespace webrtc {
diff --git a/webrtc/modules/remote_bitrate_estimator/test/packet_receiver.cc b/webrtc/modules/remote_bitrate_estimator/test/packet_receiver.cc
index f70c212af7..793e06421f 100644
--- a/webrtc/modules/remote_bitrate_estimator/test/packet_receiver.cc
+++ b/webrtc/modules/remote_bitrate_estimator/test/packet_receiver.cc
@@ -14,10 +14,10 @@
#include "testing/gtest/include/gtest/gtest.h"
#include "webrtc/base/common.h"
-#include "webrtc/modules/interface/module_common_types.h"
+#include "webrtc/modules/include/module_common_types.h"
#include "webrtc/modules/remote_bitrate_estimator/test/bwe.h"
#include "webrtc/modules/remote_bitrate_estimator/test/bwe_test_framework.h"
-#include "webrtc/modules/rtp_rtcp/interface/receive_statistics.h"
+#include "webrtc/modules/rtp_rtcp/include/receive_statistics.h"
#include "webrtc/system_wrappers/include/clock.h"
namespace webrtc {
diff --git a/webrtc/modules/remote_bitrate_estimator/test/packet_sender.cc b/webrtc/modules/remote_bitrate_estimator/test/packet_sender.cc
index f1faa49d7e..3bcbc0a071 100644
--- a/webrtc/modules/remote_bitrate_estimator/test/packet_sender.cc
+++ b/webrtc/modules/remote_bitrate_estimator/test/packet_sender.cc
@@ -15,7 +15,7 @@
#include <sstream>
#include "webrtc/base/checks.h"
-#include "webrtc/modules/interface/module_common_types.h"
+#include "webrtc/modules/include/module_common_types.h"
#include "webrtc/modules/remote_bitrate_estimator/test/bwe.h"
#include "webrtc/modules/remote_bitrate_estimator/test/metric_recorder.h"
diff --git a/webrtc/modules/remote_bitrate_estimator/test/packet_sender.h b/webrtc/modules/remote_bitrate_estimator/test/packet_sender.h
index c42647e2d3..f48ed62f57 100644
--- a/webrtc/modules/remote_bitrate_estimator/test/packet_sender.h
+++ b/webrtc/modules/remote_bitrate_estimator/test/packet_sender.h
@@ -13,11 +13,12 @@
#include <list>
#include <limits>
+#include <set>
#include <string>
#include "webrtc/base/constructormagic.h"
#include "webrtc/base/scoped_ptr.h"
-#include "webrtc/modules/interface/module.h"
+#include "webrtc/modules/include/module.h"
#include "webrtc/modules/remote_bitrate_estimator/test/bwe.h"
#include "webrtc/modules/remote_bitrate_estimator/test/bwe_test_framework.h"
@@ -149,7 +150,7 @@ class TcpSender : public PacketSender {
private:
struct InFlight {
public:
- InFlight(const MediaPacket& packet)
+ explicit InFlight(const MediaPacket& packet)
: sequence_number(packet.header().sequenceNumber),
time_ms(packet.send_time_ms()) {}
diff --git a/webrtc/modules/remote_bitrate_estimator/tools/bwe_rtp.cc b/webrtc/modules/remote_bitrate_estimator/tools/bwe_rtp.cc
index 9493805a1c..f138035de5 100644
--- a/webrtc/modules/remote_bitrate_estimator/tools/bwe_rtp.cc
+++ b/webrtc/modules/remote_bitrate_estimator/tools/bwe_rtp.cc
@@ -10,15 +10,17 @@
#include "webrtc/modules/remote_bitrate_estimator/tools/bwe_rtp.h"
-#include <sstream>
#include <stdio.h>
+
+#include <set>
+#include <sstream>
#include <string>
#include "gflags/gflags.h"
#include "webrtc/modules/remote_bitrate_estimator/remote_bitrate_estimator_abs_send_time.h"
#include "webrtc/modules/remote_bitrate_estimator/remote_bitrate_estimator_single_stream.h"
-#include "webrtc/modules/rtp_rtcp/interface/rtp_header_parser.h"
-#include "webrtc/modules/rtp_rtcp/interface/rtp_payload_registry.h"
+#include "webrtc/modules/rtp_rtcp/include/rtp_header_parser.h"
+#include "webrtc/modules/rtp_rtcp/include/rtp_payload_registry.h"
#include "webrtc/test/rtp_file_reader.h"
namespace flags {
diff --git a/webrtc/modules/remote_bitrate_estimator/tools/bwe_rtp_play.cc b/webrtc/modules/remote_bitrate_estimator/tools/bwe_rtp_play.cc
index 19e4a07b4d..4574faf8b7 100644
--- a/webrtc/modules/remote_bitrate_estimator/tools/bwe_rtp_play.cc
+++ b/webrtc/modules/remote_bitrate_estimator/tools/bwe_rtp_play.cc
@@ -14,8 +14,8 @@
#include "webrtc/base/scoped_ptr.h"
#include "webrtc/modules/remote_bitrate_estimator/include/remote_bitrate_estimator.h"
#include "webrtc/modules/remote_bitrate_estimator/tools/bwe_rtp.h"
-#include "webrtc/modules/rtp_rtcp/interface/rtp_header_parser.h"
-#include "webrtc/modules/rtp_rtcp/interface/rtp_payload_registry.h"
+#include "webrtc/modules/rtp_rtcp/include/rtp_header_parser.h"
+#include "webrtc/modules/rtp_rtcp/include/rtp_payload_registry.h"
#include "webrtc/test/rtp_file_reader.h"
class Observer : public webrtc::RemoteBitrateObserver {
diff --git a/webrtc/modules/remote_bitrate_estimator/tools/rtp_to_text.cc b/webrtc/modules/remote_bitrate_estimator/tools/rtp_to_text.cc
index e277481886..bf698728e8 100644
--- a/webrtc/modules/remote_bitrate_estimator/tools/rtp_to_text.cc
+++ b/webrtc/modules/remote_bitrate_estimator/tools/rtp_to_text.cc
@@ -14,8 +14,8 @@
#include "webrtc/base/format_macros.h"
#include "webrtc/base/scoped_ptr.h"
#include "webrtc/modules/remote_bitrate_estimator/tools/bwe_rtp.h"
-#include "webrtc/modules/rtp_rtcp/interface/rtp_header_parser.h"
-#include "webrtc/modules/rtp_rtcp/interface/rtp_payload_registry.h"
+#include "webrtc/modules/rtp_rtcp/include/rtp_header_parser.h"
+#include "webrtc/modules/rtp_rtcp/include/rtp_payload_registry.h"
#include "webrtc/test/rtp_file_reader.h"
int main(int argc, char** argv) {
diff --git a/webrtc/modules/remote_bitrate_estimator/transport_feedback_adapter.cc b/webrtc/modules/remote_bitrate_estimator/transport_feedback_adapter.cc
index f2e073aa53..5904594ac8 100644
--- a/webrtc/modules/remote_bitrate_estimator/transport_feedback_adapter.cc
+++ b/webrtc/modules/remote_bitrate_estimator/transport_feedback_adapter.cc
@@ -8,14 +8,15 @@
* be found in the AUTHORS file in the root of the source tree.
*/
+#include "webrtc/modules/remote_bitrate_estimator/transport_feedback_adapter.h"
+
#include <limits>
#include "webrtc/base/checks.h"
#include "webrtc/base/logging.h"
#include "webrtc/modules/remote_bitrate_estimator/remote_bitrate_estimator_abs_send_time.h"
-#include "webrtc/modules/remote_bitrate_estimator/transport_feedback_adapter.h"
#include "webrtc/modules/rtp_rtcp/source/rtcp_packet/transport_feedback.h"
-#include "webrtc/modules/utility/interface/process_thread.h"
+#include "webrtc/modules/utility/include/process_thread.h"
namespace webrtc {
diff --git a/webrtc/modules/remote_bitrate_estimator/transport_feedback_adapter.h b/webrtc/modules/remote_bitrate_estimator/transport_feedback_adapter.h
index 58829b072b..93f30e6cee 100644
--- a/webrtc/modules/remote_bitrate_estimator/transport_feedback_adapter.h
+++ b/webrtc/modules/remote_bitrate_estimator/transport_feedback_adapter.h
@@ -15,8 +15,8 @@
#include "webrtc/base/criticalsection.h"
#include "webrtc/base/thread_annotations.h"
-#include "webrtc/modules/rtp_rtcp/interface/rtp_rtcp_defines.h"
-#include "webrtc/modules/interface/module_common_types.h"
+#include "webrtc/modules/rtp_rtcp/include/rtp_rtcp_defines.h"
+#include "webrtc/modules/include/module_common_types.h"
#include "webrtc/modules/remote_bitrate_estimator/include/remote_bitrate_estimator.h"
#include "webrtc/modules/remote_bitrate_estimator/include/send_time_history.h"
diff --git a/webrtc/modules/remote_bitrate_estimator/transport_feedback_adapter_unittest.cc b/webrtc/modules/remote_bitrate_estimator/transport_feedback_adapter_unittest.cc
index b2bc646e2d..64d0e55397 100644
--- a/webrtc/modules/remote_bitrate_estimator/transport_feedback_adapter_unittest.cc
+++ b/webrtc/modules/remote_bitrate_estimator/transport_feedback_adapter_unittest.cc
@@ -18,9 +18,9 @@
#include "webrtc/base/scoped_ptr.h"
#include "webrtc/modules/remote_bitrate_estimator/include/mock/mock_remote_bitrate_estimator.h"
#include "webrtc/modules/remote_bitrate_estimator/transport_feedback_adapter.h"
-#include "webrtc/modules/rtp_rtcp/interface/rtp_rtcp_defines.h"
+#include "webrtc/modules/rtp_rtcp/include/rtp_rtcp_defines.h"
#include "webrtc/modules/rtp_rtcp/source/rtcp_packet/transport_feedback.h"
-#include "webrtc/modules/utility/interface/mock/mock_process_thread.h"
+#include "webrtc/modules/utility/include/mock/mock_process_thread.h"
#include "webrtc/system_wrappers/include/clock.h"
using ::testing::_;
diff --git a/webrtc/modules/rtp_rtcp/BUILD.gn b/webrtc/modules/rtp_rtcp/BUILD.gn
index c651424611..a3d3403172 100644
--- a/webrtc/modules/rtp_rtcp/BUILD.gn
+++ b/webrtc/modules/rtp_rtcp/BUILD.gn
@@ -10,14 +10,14 @@ import("../../build/webrtc.gni")
source_set("rtp_rtcp") {
sources = [
- "interface/fec_receiver.h",
- "interface/receive_statistics.h",
- "interface/remote_ntp_time_estimator.h",
- "interface/rtp_header_parser.h",
- "interface/rtp_payload_registry.h",
- "interface/rtp_receiver.h",
- "interface/rtp_rtcp.h",
- "interface/rtp_rtcp_defines.h",
+ "include/fec_receiver.h",
+ "include/receive_statistics.h",
+ "include/remote_ntp_time_estimator.h",
+ "include/rtp_header_parser.h",
+ "include/rtp_payload_registry.h",
+ "include/rtp_receiver.h",
+ "include/rtp_rtcp.h",
+ "include/rtp_rtcp_defines.h",
"mocks/mock_rtp_rtcp.h",
"source/bitrate.cc",
"source/bitrate.h",
@@ -46,8 +46,40 @@ source_set("rtp_rtcp") {
"source/remote_ntp_time_estimator.cc",
"source/rtcp_packet.cc",
"source/rtcp_packet.h",
+ "source/rtcp_packet/app.cc",
+ "source/rtcp_packet/app.h",
+ "source/rtcp_packet/bye.cc",
+ "source/rtcp_packet/bye.h",
+ "source/rtcp_packet/compound_packet.cc",
+ "source/rtcp_packet/compound_packet.h",
+ "source/rtcp_packet/dlrr.cc",
+ "source/rtcp_packet/dlrr.h",
+ "source/rtcp_packet/extended_jitter_report.cc",
+ "source/rtcp_packet/extended_jitter_report.h",
+ "source/rtcp_packet/nack.cc",
+ "source/rtcp_packet/nack.h",
+ "source/rtcp_packet/pli.cc",
+ "source/rtcp_packet/pli.h",
+ "source/rtcp_packet/psfb.cc",
+ "source/rtcp_packet/psfb.h",
+ "source/rtcp_packet/receiver_report.cc",
+ "source/rtcp_packet/receiver_report.h",
+ "source/rtcp_packet/report_block.cc",
+ "source/rtcp_packet/report_block.h",
+ "source/rtcp_packet/rrtr.cc",
+ "source/rtcp_packet/rrtr.h",
+ "source/rtcp_packet/rtpfb.cc",
+ "source/rtcp_packet/rtpfb.h",
+ "source/rtcp_packet/sli.cc",
+ "source/rtcp_packet/sli.h",
+ "source/rtcp_packet/tmmbn.cc",
+ "source/rtcp_packet/tmmbn.h",
+ "source/rtcp_packet/tmmbr.cc",
+ "source/rtcp_packet/tmmbr.h",
"source/rtcp_packet/transport_feedback.cc",
"source/rtcp_packet/transport_feedback.h",
+ "source/rtcp_packet/voip_metric.cc",
+ "source/rtcp_packet/voip_metric.h",
"source/rtcp_receiver.cc",
"source/rtcp_receiver.h",
"source/rtcp_receiver_help.cc",
diff --git a/webrtc/modules/rtp_rtcp/OWNERS b/webrtc/modules/rtp_rtcp/OWNERS
index 2b08b6b2dc..fd12dcea0c 100644
--- a/webrtc/modules/rtp_rtcp/OWNERS
+++ b/webrtc/modules/rtp_rtcp/OWNERS
@@ -1,6 +1,11 @@
-stefan@webrtc.org
-henrik.lundin@webrtc.org
-mflodman@webrtc.org
-asapersson@webrtc.org
-
-per-file BUILD.gn=kjellander@webrtc.org
+stefan@webrtc.org
+henrik.lundin@webrtc.org
+mflodman@webrtc.org
+asapersson@webrtc.org
+
+# These are for the common case of adding or renaming files. If you're doing
+# structural changes, please get a review from a reviewer in this file.
+per-file *.gyp=*
+per-file *.gypi=*
+
+per-file BUILD.gn=kjellander@webrtc.org
diff --git a/webrtc/modules/rtp_rtcp/interface/fec_receiver.h b/webrtc/modules/rtp_rtcp/include/fec_receiver.h
index 3608165dab..65e85ad7a5 100644
--- a/webrtc/modules/rtp_rtcp/interface/fec_receiver.h
+++ b/webrtc/modules/rtp_rtcp/include/fec_receiver.h
@@ -8,10 +8,10 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#ifndef WEBRTC_MODULES_RTP_RTCP_INTERFACE_FEC_RECEIVER_H_
-#define WEBRTC_MODULES_RTP_RTCP_INTERFACE_FEC_RECEIVER_H_
+#ifndef WEBRTC_MODULES_RTP_RTCP_INCLUDE_FEC_RECEIVER_H_
+#define WEBRTC_MODULES_RTP_RTCP_INCLUDE_FEC_RECEIVER_H_
-#include "webrtc/modules/rtp_rtcp/interface/rtp_rtcp_defines.h"
+#include "webrtc/modules/rtp_rtcp/include/rtp_rtcp_defines.h"
#include "webrtc/typedefs.h"
namespace webrtc {
@@ -43,4 +43,4 @@ class FecReceiver {
virtual FecPacketCounter GetPacketCounter() const = 0;
};
} // namespace webrtc
-#endif // WEBRTC_MODULES_RTP_RTCP_INTERFACE_FEC_RECEIVER_H_
+#endif // WEBRTC_MODULES_RTP_RTCP_INCLUDE_FEC_RECEIVER_H_
diff --git a/webrtc/modules/rtp_rtcp/interface/receive_statistics.h b/webrtc/modules/rtp_rtcp/include/receive_statistics.h
index 6bd5cd846e..b4a7cd0de2 100644
--- a/webrtc/modules/rtp_rtcp/interface/receive_statistics.h
+++ b/webrtc/modules/rtp_rtcp/include/receive_statistics.h
@@ -8,13 +8,13 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#ifndef WEBRTC_MODULES_RTP_RTCP_INTERFACE_RECEIVE_STATISTICS_H_
-#define WEBRTC_MODULES_RTP_RTCP_INTERFACE_RECEIVE_STATISTICS_H_
+#ifndef WEBRTC_MODULES_RTP_RTCP_INCLUDE_RECEIVE_STATISTICS_H_
+#define WEBRTC_MODULES_RTP_RTCP_INCLUDE_RECEIVE_STATISTICS_H_
#include <map>
-#include "webrtc/modules/interface/module.h"
-#include "webrtc/modules/interface/module_common_types.h"
+#include "webrtc/modules/include/module.h"
+#include "webrtc/modules/include/module_common_types.h"
#include "webrtc/typedefs.h"
namespace webrtc {
@@ -99,4 +99,4 @@ class NullReceiveStatistics : public ReceiveStatistics {
};
} // namespace webrtc
-#endif // WEBRTC_MODULES_RTP_RTCP_INTERFACE_RECEIVE_STATISTICS_H_
+#endif // WEBRTC_MODULES_RTP_RTCP_INCLUDE_RECEIVE_STATISTICS_H_
diff --git a/webrtc/modules/rtp_rtcp/interface/remote_ntp_time_estimator.h b/webrtc/modules/rtp_rtcp/include/remote_ntp_time_estimator.h
index 0ffba212a6..56c6e48691 100644
--- a/webrtc/modules/rtp_rtcp/interface/remote_ntp_time_estimator.h
+++ b/webrtc/modules/rtp_rtcp/include/remote_ntp_time_estimator.h
@@ -8,8 +8,8 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#ifndef WEBRTC_MODULES_RTP_RTCP_INTERFACE_REMOTE_NTP_TIME_ESTIMATOR_H_
-#define WEBRTC_MODULES_RTP_RTCP_INTERFACE_REMOTE_NTP_TIME_ESTIMATOR_H_
+#ifndef WEBRTC_MODULES_RTP_RTCP_INCLUDE_REMOTE_NTP_TIME_ESTIMATOR_H_
+#define WEBRTC_MODULES_RTP_RTCP_INCLUDE_REMOTE_NTP_TIME_ESTIMATOR_H_
#include "webrtc/base/scoped_ptr.h"
#include "webrtc/system_wrappers/include/rtp_to_ntp.h"
@@ -48,4 +48,4 @@ class RemoteNtpTimeEstimator {
} // namespace webrtc
-#endif // WEBRTC_MODULES_RTP_RTCP_INTERFACE_REMOTE_NTP_TIME_ESTIMATOR_H_
+#endif // WEBRTC_MODULES_RTP_RTCP_INCLUDE_REMOTE_NTP_TIME_ESTIMATOR_H_
diff --git a/webrtc/modules/rtp_rtcp/interface/rtp_cvo.h b/webrtc/modules/rtp_rtcp/include/rtp_cvo.h
index c7a0268ef0..2e30d898ec 100644
--- a/webrtc/modules/rtp_rtcp/interface/rtp_cvo.h
+++ b/webrtc/modules/rtp_rtcp/include/rtp_cvo.h
@@ -7,8 +7,8 @@
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
-#ifndef WEBRTC_MODULES_RTP_RTCP_INTERFACE_RTP_CVO__H_
-#define WEBRTC_MODULES_RTP_RTCP_INTERFACE_RTP_CVO__H_
+#ifndef WEBRTC_MODULES_RTP_RTCP_INCLUDE_RTP_CVO_H_
+#define WEBRTC_MODULES_RTP_RTCP_INCLUDE_RTP_CVO_H_
#include "webrtc/common_video/rotation.h"
@@ -51,4 +51,4 @@ inline VideoRotation ConvertCVOByteToVideoRotation(uint8_t rotation) {
}
} // namespace webrtc
-#endif // WEBRTC_MODULES_RTP_RTCP_INTERFACE_RTP_CVO__H_
+#endif // WEBRTC_MODULES_RTP_RTCP_INCLUDE_RTP_CVO_H_
diff --git a/webrtc/modules/rtp_rtcp/interface/rtp_header_parser.h b/webrtc/modules/rtp_rtcp/include/rtp_header_parser.h
index 2809996b25..329de32611 100644
--- a/webrtc/modules/rtp_rtcp/interface/rtp_header_parser.h
+++ b/webrtc/modules/rtp_rtcp/include/rtp_header_parser.h
@@ -7,10 +7,10 @@
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
-#ifndef WEBRTC_MODULES_RTP_RTCP_INTERFACE_RTP_HEADER_PARSER_H_
-#define WEBRTC_MODULES_RTP_RTCP_INTERFACE_RTP_HEADER_PARSER_H_
+#ifndef WEBRTC_MODULES_RTP_RTCP_INCLUDE_RTP_HEADER_PARSER_H_
+#define WEBRTC_MODULES_RTP_RTCP_INCLUDE_RTP_HEADER_PARSER_H_
-#include "webrtc/modules/rtp_rtcp/interface/rtp_rtcp_defines.h"
+#include "webrtc/modules/rtp_rtcp/include/rtp_rtcp_defines.h"
#include "webrtc/typedefs.h"
namespace webrtc {
@@ -41,4 +41,4 @@ class RtpHeaderParser {
virtual bool DeregisterRtpHeaderExtension(RTPExtensionType type) = 0;
};
} // namespace webrtc
-#endif // WEBRTC_MODULES_RTP_RTCP_INTERFACE_RTP_HEADER_PARSER_H_
+#endif // WEBRTC_MODULES_RTP_RTCP_INCLUDE_RTP_HEADER_PARSER_H_
diff --git a/webrtc/modules/rtp_rtcp/interface/rtp_payload_registry.h b/webrtc/modules/rtp_rtcp/include/rtp_payload_registry.h
index 313bef1112..fae864107f 100644
--- a/webrtc/modules/rtp_rtcp/interface/rtp_payload_registry.h
+++ b/webrtc/modules/rtp_rtcp/include/rtp_payload_registry.h
@@ -8,8 +8,10 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#ifndef WEBRTC_MODULES_RTP_RTCP_INTERFACE_RTP_PAYLOAD_REGISTRY_H_
-#define WEBRTC_MODULES_RTP_RTCP_INTERFACE_RTP_PAYLOAD_REGISTRY_H_
+#ifndef WEBRTC_MODULES_RTP_RTCP_INCLUDE_RTP_PAYLOAD_REGISTRY_H_
+#define WEBRTC_MODULES_RTP_RTCP_INCLUDE_RTP_PAYLOAD_REGISTRY_H_
+
+#include <map>
#include "webrtc/base/scoped_ptr.h"
#include "webrtc/modules/rtp_rtcp/source/rtp_receiver_strategy.h"
@@ -27,7 +29,7 @@ class RTPPayloadStrategy {
virtual bool PayloadIsCompatible(const RtpUtility::Payload& payload,
const uint32_t frequency,
- const uint8_t channels,
+ const size_t channels,
const uint32_t rate) const = 0;
virtual void UpdatePayloadRate(RtpUtility::Payload* payload,
@@ -37,7 +39,7 @@ class RTPPayloadStrategy {
const char payloadName[RTP_PAYLOAD_NAME_SIZE],
const int8_t payloadType,
const uint32_t frequency,
- const uint8_t channels,
+ const size_t channels,
const uint32_t rate) const = 0;
virtual int GetPayloadTypeFrequency(
@@ -52,14 +54,14 @@ class RTPPayloadStrategy {
class RTPPayloadRegistry {
public:
// The registry takes ownership of the strategy.
- RTPPayloadRegistry(RTPPayloadStrategy* rtp_payload_strategy);
+ explicit RTPPayloadRegistry(RTPPayloadStrategy* rtp_payload_strategy);
~RTPPayloadRegistry();
int32_t RegisterReceivePayload(
const char payload_name[RTP_PAYLOAD_NAME_SIZE],
const int8_t payload_type,
const uint32_t frequency,
- const uint8_t channels,
+ const size_t channels,
const uint32_t rate,
bool* created_new_payload_type);
@@ -69,7 +71,7 @@ class RTPPayloadRegistry {
int32_t ReceivePayloadType(
const char payload_name[RTP_PAYLOAD_NAME_SIZE],
const uint32_t frequency,
- const uint8_t channels,
+ const size_t channels,
const uint32_t rate,
int8_t* payload_type) const;
@@ -108,8 +110,16 @@ class RTPPayloadRegistry {
int GetPayloadTypeFrequency(uint8_t payload_type) const;
+ // DEPRECATED. Use PayloadTypeToPayload below that returns const Payload*
+ // instead of taking output parameter.
+ // TODO(danilchap): Remove this when all callers have been updated.
bool PayloadTypeToPayload(const uint8_t payload_type,
- RtpUtility::Payload*& payload) const;
+ RtpUtility::Payload*& payload) const { // NOLINT
+ payload =
+ const_cast<RtpUtility::Payload*>(PayloadTypeToPayload(payload_type));
+ return payload != nullptr;
+ }
+ const RtpUtility::Payload* PayloadTypeToPayload(uint8_t payload_type) const;
void ResetLastReceivedPayloadTypes() {
CriticalSectionScoped cs(crit_sect_.get());
@@ -145,7 +155,7 @@ class RTPPayloadRegistry {
int8_t last_received_media_payload_type() const {
CriticalSectionScoped cs(crit_sect_.get());
return last_received_media_payload_type_;
- };
+ }
bool use_rtx_payload_mapping_on_restore() const {
CriticalSectionScoped cs(crit_sect_.get());
@@ -163,7 +173,7 @@ class RTPPayloadRegistry {
const char payload_name[RTP_PAYLOAD_NAME_SIZE],
const size_t payload_name_length,
const uint32_t frequency,
- const uint8_t channels,
+ const size_t channels,
const uint32_t rate);
bool IsRtxInternal(const RTPHeader& header) const;
@@ -190,4 +200,4 @@ class RTPPayloadRegistry {
} // namespace webrtc
-#endif // WEBRTC_MODULES_RTP_RTCP_INTERFACE_RTP_PAYLOAD_REGISTRY_H_
+#endif // WEBRTC_MODULES_RTP_RTCP_INCLUDE_RTP_PAYLOAD_REGISTRY_H_
diff --git a/webrtc/modules/rtp_rtcp/interface/rtp_receiver.h b/webrtc/modules/rtp_rtcp/include/rtp_receiver.h
index 2fb8ac5d61..0640d5cc19 100644
--- a/webrtc/modules/rtp_rtcp/interface/rtp_receiver.h
+++ b/webrtc/modules/rtp_rtcp/include/rtp_receiver.h
@@ -8,10 +8,10 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#ifndef WEBRTC_MODULES_RTP_RTCP_INTERFACE_RTP_RECEIVER_H_
-#define WEBRTC_MODULES_RTP_RTCP_INTERFACE_RTP_RECEIVER_H_
+#ifndef WEBRTC_MODULES_RTP_RTCP_INCLUDE_RTP_RECEIVER_H_
+#define WEBRTC_MODULES_RTP_RTCP_INCLUDE_RTP_RECEIVER_H_
-#include "webrtc/modules/rtp_rtcp/interface/rtp_rtcp_defines.h"
+#include "webrtc/modules/rtp_rtcp/include/rtp_rtcp_defines.h"
#include "webrtc/typedefs.h"
namespace webrtc {
@@ -61,7 +61,7 @@ class RtpReceiver {
const char payload_name[RTP_PAYLOAD_NAME_SIZE],
const int8_t payload_type,
const uint32_t frequency,
- const uint8_t channels,
+ const size_t channels,
const uint32_t rate) = 0;
// De-registers |payload_type| from the payload registry.
@@ -100,4 +100,4 @@ class RtpReceiver {
};
} // namespace webrtc
-#endif // WEBRTC_MODULES_RTP_RTCP_INTERFACE_RTP_RECEIVER_H_
+#endif // WEBRTC_MODULES_RTP_RTCP_INCLUDE_RTP_RECEIVER_H_
diff --git a/webrtc/modules/rtp_rtcp/interface/rtp_rtcp.h b/webrtc/modules/rtp_rtcp/include/rtp_rtcp.h
index f907408573..6a7022a94c 100644
--- a/webrtc/modules/rtp_rtcp/interface/rtp_rtcp.h
+++ b/webrtc/modules/rtp_rtcp/include/rtp_rtcp.h
@@ -8,14 +8,15 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#ifndef WEBRTC_MODULES_RTP_RTCP_INTERFACE_RTP_RTCP_H_
-#define WEBRTC_MODULES_RTP_RTCP_INTERFACE_RTP_RTCP_H_
+#ifndef WEBRTC_MODULES_RTP_RTCP_INCLUDE_RTP_RTCP_H_
+#define WEBRTC_MODULES_RTP_RTCP_INCLUDE_RTP_RTCP_H_
#include <set>
+#include <utility>
#include <vector>
-#include "webrtc/modules/interface/module.h"
-#include "webrtc/modules/rtp_rtcp/interface/rtp_rtcp_defines.h"
+#include "webrtc/modules/include/module.h"
+#include "webrtc/modules/rtp_rtcp/include/rtp_rtcp_defines.h"
namespace webrtc {
// Forward declarations.
@@ -580,9 +581,13 @@ class RtpRtcp : public Module {
*
* return -1 on failure else 0
*/
- virtual int32_t SendREDPayloadType(
- int8_t& payloadType) const = 0;
-
+ // DEPRECATED. Use SendREDPayloadType below that takes output parameter
+ // by pointer instead of by reference.
+ // TODO(danilchap): Remove this when all callers have been updated.
+ int32_t SendREDPayloadType(int8_t& payloadType) const { // NOLINT
+ return SendREDPayloadType(&payloadType);
+ }
+ virtual int32_t SendREDPayloadType(int8_t* payload_type) const = 0;
/*
* Store the audio level in dBov for header-extension-for-audio-level-
* indication.
@@ -614,10 +619,17 @@ class RtpRtcp : public Module {
/*
* Get generic FEC setting
*/
- virtual void GenericFECStatus(bool& enable,
- uint8_t& payloadTypeRED,
- uint8_t& payloadTypeFEC) = 0;
-
+ // DEPRECATED. Use GenericFECStatus below that takes output parameters
+ // by pointers instead of by references.
+ // TODO(danilchap): Remove this when all callers have been updated.
+ void GenericFECStatus(bool& enable, // NOLINT
+ uint8_t& payloadTypeRED, // NOLINT
+ uint8_t& payloadTypeFEC) { // NOLINT
+ GenericFECStatus(&enable, &payloadTypeRED, &payloadTypeFEC);
+ }
+ virtual void GenericFECStatus(bool* enable,
+ uint8_t* payload_type_red,
+ uint8_t* payload_type_fec) = 0;
virtual int32_t SetFecParameters(
const FecProtectionParams* delta_params,
@@ -638,4 +650,4 @@ class RtpRtcp : public Module {
virtual int32_t RequestKeyFrame() = 0;
};
} // namespace webrtc
-#endif // WEBRTC_MODULES_RTP_RTCP_INTERFACE_RTP_RTCP_H_
+#endif // WEBRTC_MODULES_RTP_RTCP_INCLUDE_RTP_RTCP_H_
diff --git a/webrtc/modules/rtp_rtcp/interface/rtp_rtcp_defines.h b/webrtc/modules/rtp_rtcp/include/rtp_rtcp_defines.h
index 6936352aca..fad97f19cc 100644
--- a/webrtc/modules/rtp_rtcp/interface/rtp_rtcp_defines.h
+++ b/webrtc/modules/rtp_rtcp/include/rtp_rtcp_defines.h
@@ -8,13 +8,13 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#ifndef WEBRTC_MODULES_RTP_RTCP_INTERFACE_RTP_RTCP_DEFINES_H_
-#define WEBRTC_MODULES_RTP_RTCP_INTERFACE_RTP_RTCP_DEFINES_H_
+#ifndef WEBRTC_MODULES_RTP_RTCP_INCLUDE_RTP_RTCP_DEFINES_H_
+#define WEBRTC_MODULES_RTP_RTCP_INCLUDE_RTP_RTCP_DEFINES_H_
#include <stddef.h>
#include <list>
-#include "webrtc/modules/interface/module_common_types.h"
+#include "webrtc/modules/include/module_common_types.h"
#include "webrtc/system_wrappers/include/clock.h"
#include "webrtc/typedefs.h"
@@ -33,31 +33,23 @@ const int kVideoPayloadTypeFrequency = 90000;
// Minimum RTP header size in bytes.
const uint8_t kRtpHeaderSize = 12;
-struct AudioPayload
-{
+struct AudioPayload {
uint32_t frequency;
- uint8_t channels;
+ size_t channels;
uint32_t rate;
};
-struct VideoPayload
-{
+struct VideoPayload {
RtpVideoCodecTypes videoCodecType;
uint32_t maxRate;
};
-union PayloadUnion
-{
+union PayloadUnion {
AudioPayload Audio;
VideoPayload Video;
};
-enum RTPAliveType
-{
- kRtpDead = 0,
- kRtpNoRtp = 1,
- kRtpAlive = 2
-};
+enum RTPAliveType { kRtpDead = 0, kRtpNoRtp = 1, kRtpAlive = 2 };
enum ProtectionType {
kUnprotectedPacket,
@@ -78,10 +70,7 @@ enum RTPExtensionType {
kRtpExtensionTransportSequenceNumber,
};
-enum RTCPAppSubTypes
-{
- kAppSubtypeBwe = 0x00
-};
+enum RTCPAppSubTypes { kAppSubtypeBwe = 0x00 };
// TODO(sprang): Make this an enum class once rtcp_receiver has been cleaned up.
enum RTCPPacketType : uint32_t {
@@ -109,17 +98,9 @@ enum RTCPPacketType : uint32_t {
enum KeyFrameRequestMethod { kKeyFrameReqPliRtcp, kKeyFrameReqFirRtcp };
-enum RtpRtcpPacketType
-{
- kPacketRtp = 0,
- kPacketKeepAlive = 1
-};
+enum RtpRtcpPacketType { kPacketRtp = 0, kPacketKeepAlive = 1 };
-enum NACKMethod
-{
- kNackOff = 0,
- kNackRtcp = 2
-};
+enum NACKMethod { kNackOff = 0, kNackRtcp = 2 };
enum RetransmissionMode : uint8_t {
kRetransmitOff = 0x0,
@@ -138,8 +119,7 @@ enum RtxMode {
const size_t kRtxHeaderSize = 2;
-struct RTCPSenderInfo
-{
+struct RTCPSenderInfo {
uint32_t NTPseconds;
uint32_t NTPfraction;
uint32_t RTPtimeStamp;
@@ -206,40 +186,36 @@ struct RtpState {
bool media_has_been_sent;
};
-class RtpData
-{
-public:
- virtual ~RtpData() {}
+class RtpData {
+ public:
+ virtual ~RtpData() {}
- virtual int32_t OnReceivedPayloadData(
- const uint8_t* payloadData,
- const size_t payloadSize,
- const WebRtcRTPHeader* rtpHeader) = 0;
+ virtual int32_t OnReceivedPayloadData(const uint8_t* payloadData,
+ const size_t payloadSize,
+ const WebRtcRTPHeader* rtpHeader) = 0;
- virtual bool OnRecoveredPacket(const uint8_t* packet,
- size_t packet_length) = 0;
+ virtual bool OnRecoveredPacket(const uint8_t* packet,
+ size_t packet_length) = 0;
};
-class RtpFeedback
-{
-public:
- virtual ~RtpFeedback() {}
-
- // Receiving payload change or SSRC change. (return success!)
- /*
- * channels - number of channels in codec (1 = mono, 2 = stereo)
- */
- virtual int32_t OnInitializeDecoder(
- const int8_t payloadType,
- const char payloadName[RTP_PAYLOAD_NAME_SIZE],
- const int frequency,
- const uint8_t channels,
- const uint32_t rate) = 0;
-
- virtual void OnIncomingSSRCChanged(const uint32_t ssrc) = 0;
-
- virtual void OnIncomingCSRCChanged(const uint32_t CSRC,
- const bool added) = 0;
+class RtpFeedback {
+ public:
+ virtual ~RtpFeedback() {}
+
+ // Receiving payload change or SSRC change. (return success!)
+ /*
+ * channels - number of channels in codec (1 = mono, 2 = stereo)
+ */
+ virtual int32_t OnInitializeDecoder(
+ const int8_t payloadType,
+ const char payloadName[RTP_PAYLOAD_NAME_SIZE],
+ const int frequency,
+ const size_t channels,
+ const uint32_t rate) = 0;
+
+ virtual void OnIncomingSSRCChanged(const uint32_t ssrc) = 0;
+
+ virtual void OnIncomingCSRCChanged(const uint32_t CSRC, const bool added) = 0;
};
class RtpAudioFeedback {
@@ -346,7 +322,7 @@ class RtcpRttStats {
virtual int64_t LastProcessedRtt() const = 0;
- virtual ~RtcpRttStats() {};
+ virtual ~RtcpRttStats() {}
};
// Null object version of RtpFeedback.
@@ -357,7 +333,7 @@ class NullRtpFeedback : public RtpFeedback {
int32_t OnInitializeDecoder(const int8_t payloadType,
const char payloadName[RTP_PAYLOAD_NAME_SIZE],
const int frequency,
- const uint8_t channels,
+ const size_t channels,
const uint32_t rate) override {
return 0;
}
@@ -437,4 +413,4 @@ class TransportSequenceNumberAllocator {
};
} // namespace webrtc
-#endif // WEBRTC_MODULES_RTP_RTCP_INTERFACE_RTP_RTCP_DEFINES_H_
+#endif // WEBRTC_MODULES_RTP_RTCP_INCLUDE_RTP_RTCP_DEFINES_H_
diff --git a/webrtc/modules/rtp_rtcp/mocks/mock_rtp_rtcp.h b/webrtc/modules/rtp_rtcp/mocks/mock_rtp_rtcp.h
index bc4aec8967..796be1304c 100644
--- a/webrtc/modules/rtp_rtcp/mocks/mock_rtp_rtcp.h
+++ b/webrtc/modules/rtp_rtcp/mocks/mock_rtp_rtcp.h
@@ -11,11 +11,15 @@
#ifndef WEBRTC_MODULES_RTP_RTCP_MOCKS_MOCK_RTP_RTCP_H_
#define WEBRTC_MODULES_RTP_RTCP_MOCKS_MOCK_RTP_RTCP_H_
+#include <set>
+#include <utility>
+#include <vector>
+
#include "testing/gmock/include/gmock/gmock.h"
-#include "webrtc/modules/interface/module.h"
-#include "webrtc/modules/rtp_rtcp/interface/rtp_rtcp.h"
-#include "webrtc/modules/rtp_rtcp/interface/rtp_rtcp_defines.h"
+#include "webrtc/modules/include/module.h"
+#include "webrtc/modules/rtp_rtcp/include/rtp_rtcp.h"
+#include "webrtc/modules/rtp_rtcp/include/rtp_rtcp_defines.h"
#include "webrtc/modules/rtp_rtcp/source/rtcp_packet/transport_feedback.h"
namespace webrtc {
@@ -108,7 +112,10 @@ class MockRtpRtcp : public RtpRtcp {
MOCK_CONST_METHOD0(SendingMedia,
bool());
MOCK_CONST_METHOD4(BitrateSent,
- void(uint32_t* totalRate, uint32_t* videoRate, uint32_t* fecRate, uint32_t* nackRate));
+ void(uint32_t* totalRate,
+ uint32_t* videoRate,
+ uint32_t* fecRate,
+ uint32_t* nackRate));
MOCK_METHOD1(RegisterVideoBitrateObserver, void(BitrateStatisticsObserver*));
MOCK_CONST_METHOD0(GetVideoBitrateObserver, BitrateStatisticsObserver*(void));
MOCK_CONST_METHOD1(EstimatedReceiveBandwidth,
@@ -172,7 +179,10 @@ class MockRtpRtcp : public RtpRtcp {
MOCK_CONST_METHOD1(RemoteRTCPStat,
int32_t(std::vector<RTCPReportBlock>* receiveBlocks));
MOCK_METHOD4(SetRTCPApplicationSpecificData,
- int32_t(const uint8_t subType, const uint32_t name, const uint8_t* data, const uint16_t length));
+ int32_t(const uint8_t subType,
+ const uint32_t name,
+ const uint8_t* data,
+ const uint16_t length));
MOCK_METHOD1(SetRTCPVoIPMetrics,
int32_t(const RTCPVoIPMetric* VoIPMetric));
MOCK_METHOD1(SetRtcpXrRrtrStatus,
@@ -214,12 +224,9 @@ class MockRtpRtcp : public RtpRtcp {
int32_t(const uint8_t key, const uint16_t time_ms, const uint8_t level));
MOCK_METHOD1(SetSendREDPayloadType,
int32_t(const int8_t payloadType));
- MOCK_CONST_METHOD1(SendREDPayloadType,
- int32_t(int8_t& payloadType));
+ MOCK_CONST_METHOD1(SendREDPayloadType, int32_t(int8_t* payloadType));
MOCK_METHOD2(SetRTPAudioLevelIndicationStatus,
int32_t(const bool enable, const uint8_t ID));
- MOCK_CONST_METHOD2(GetRTPAudioLevelIndicationStatus,
- int32_t(bool& enable, uint8_t& ID));
MOCK_METHOD1(SetAudioLevel,
int32_t(const uint8_t level_dBov));
MOCK_METHOD1(SetTargetSendBitrate,
@@ -229,9 +236,9 @@ class MockRtpRtcp : public RtpRtcp {
const uint8_t payload_type_red,
const uint8_t payload_type_fec));
MOCK_METHOD3(GenericFECStatus,
- void(bool& enable,
- uint8_t& payloadTypeRED,
- uint8_t& payloadTypeFEC));
+ void(bool* enable,
+ uint8_t* payloadTypeRED,
+ uint8_t* payloadTypeFEC));
MOCK_METHOD2(SetFecParameters,
int32_t(const FecProtectionParams* delta_params,
const FecProtectionParams* key_params));
@@ -239,8 +246,6 @@ class MockRtpRtcp : public RtpRtcp {
int32_t(const KeyFrameRequestMethod method));
MOCK_METHOD0(RequestKeyFrame,
int32_t());
- MOCK_CONST_METHOD3(Version,
- int32_t(char* version, uint32_t& remaining_buffer_in_bytes, uint32_t& position));
MOCK_METHOD0(TimeUntilNextProcess,
int64_t());
MOCK_METHOD0(Process,
diff --git a/webrtc/modules/rtp_rtcp/rtp_rtcp.gypi b/webrtc/modules/rtp_rtcp/rtp_rtcp.gypi
index e35a75cd0f..d340f746be 100644
--- a/webrtc/modules/rtp_rtcp/rtp_rtcp.gypi
+++ b/webrtc/modules/rtp_rtcp/rtp_rtcp.gypi
@@ -17,14 +17,14 @@
],
'sources': [
# Common
- 'interface/fec_receiver.h',
- 'interface/receive_statistics.h',
- 'interface/remote_ntp_time_estimator.h',
- 'interface/rtp_header_parser.h',
- 'interface/rtp_payload_registry.h',
- 'interface/rtp_receiver.h',
- 'interface/rtp_rtcp.h',
- 'interface/rtp_rtcp_defines.h',
+ 'include/fec_receiver.h',
+ 'include/receive_statistics.h',
+ 'include/remote_ntp_time_estimator.h',
+ 'include/rtp_header_parser.h',
+ 'include/rtp_payload_registry.h',
+ 'include/rtp_receiver.h',
+ 'include/rtp_rtcp.h',
+ 'include/rtp_rtcp_defines.h',
'source/bitrate.cc',
'source/bitrate.h',
'source/byte_io.h',
@@ -41,8 +41,40 @@
'source/rtp_rtcp_impl.h',
'source/rtcp_packet.cc',
'source/rtcp_packet.h',
+ 'source/rtcp_packet/app.cc',
+ 'source/rtcp_packet/app.h',
+ 'source/rtcp_packet/bye.cc',
+ 'source/rtcp_packet/bye.h',
+ 'source/rtcp_packet/compound_packet.cc',
+ 'source/rtcp_packet/compound_packet.h',
+ 'source/rtcp_packet/dlrr.cc',
+ 'source/rtcp_packet/dlrr.h',
+ 'source/rtcp_packet/extended_jitter_report.cc',
+ 'source/rtcp_packet/extended_jitter_report.h',
+ 'source/rtcp_packet/nack.cc',
+ 'source/rtcp_packet/nack.h',
+ 'source/rtcp_packet/pli.cc',
+ 'source/rtcp_packet/pli.h',
+ 'source/rtcp_packet/psfb.cc',
+ 'source/rtcp_packet/psfb.h',
+ 'source/rtcp_packet/receiver_report.cc',
+ 'source/rtcp_packet/receiver_report.h',
+ 'source/rtcp_packet/report_block.cc',
+ 'source/rtcp_packet/report_block.h',
+ 'source/rtcp_packet/rrtr.cc',
+ 'source/rtcp_packet/rrtr.h',
+ 'source/rtcp_packet/rtpfb.cc',
+ 'source/rtcp_packet/rtpfb.h',
+ 'source/rtcp_packet/sli.cc',
+ 'source/rtcp_packet/sli.h',
+ 'source/rtcp_packet/tmmbn.cc',
+ 'source/rtcp_packet/tmmbn.h',
+ 'source/rtcp_packet/tmmbr.cc',
+ 'source/rtcp_packet/tmmbr.h',
'source/rtcp_packet/transport_feedback.cc',
'source/rtcp_packet/transport_feedback.h',
+ 'source/rtcp_packet/voip_metric.cc',
+ 'source/rtcp_packet/voip_metric.h',
'source/rtcp_receiver.cc',
'source/rtcp_receiver.h',
'source/rtcp_receiver_help.cc',
diff --git a/webrtc/modules/rtp_rtcp/source/CPPLINT.cfg b/webrtc/modules/rtp_rtcp/source/CPPLINT.cfg
new file mode 100644
index 0000000000..c318452482
--- /dev/null
+++ b/webrtc/modules/rtp_rtcp/source/CPPLINT.cfg
@@ -0,0 +1,6 @@
+#tmmbr_help is refactored in CL#1474693002
+exclude_files=tmmbr_help.*
+#rtcp_utility planned to be removed when webrtc:5260 will be finished.
+exclude_files=rtcp_utility.*
+#rtcp_receiver/rtcp_receiver_help will be refactored more deeply as part of webrtc:5260
+exclude_files=rtcp_receiver.*
diff --git a/webrtc/modules/rtp_rtcp/source/byte_io.h b/webrtc/modules/rtp_rtcp/source/byte_io.h
index d8903b8483..c69c178078 100644
--- a/webrtc/modules/rtp_rtcp/source/byte_io.h
+++ b/webrtc/modules/rtp_rtcp/source/byte_io.h
@@ -51,17 +51,14 @@ namespace webrtc {
// platform that doesn't use two's complement, implement conversion to/from
// wire format.
-namespace {
-inline void AssertTwosComplement() {
- // Assume the if any one signed integer type is two's complement, then all
- // other will be too.
- static_assert(
- (-1 & 0x03) == 0x03,
- "Only two's complement representation of signed integers supported.");
-}
+// Assume the if any one signed integer type is two's complement, then all
+// other will be too.
+static_assert(
+ (-1 & 0x03) == 0x03,
+ "Only two's complement representation of signed integers supported.");
+
// Plain const char* won't work for static_assert, use #define instead.
#define kSizeErrorMsg "Byte size must be less than or equal to data type size."
-}
// Utility class for getting the unsigned equivalent of a signed type.
template <typename T>
diff --git a/webrtc/modules/rtp_rtcp/source/dtmf_queue.cc b/webrtc/modules/rtp_rtcp/source/dtmf_queue.cc
index becea912ab..ab21b8704a 100644
--- a/webrtc/modules/rtp_rtcp/source/dtmf_queue.cc
+++ b/webrtc/modules/rtp_rtcp/source/dtmf_queue.cc
@@ -10,7 +10,7 @@
#include "webrtc/modules/rtp_rtcp/source/dtmf_queue.h"
-#include <string.h> //memset
+#include <string.h>
namespace webrtc {
DTMFqueue::DTMFqueue()
@@ -21,7 +21,9 @@ DTMFqueue::DTMFqueue()
memset(dtmf_level_, 0, sizeof(dtmf_level_));
}
-DTMFqueue::~DTMFqueue() { delete dtmf_critsect_; }
+DTMFqueue::~DTMFqueue() {
+ delete dtmf_critsect_;
+}
int32_t DTMFqueue::AddDTMF(uint8_t key, uint16_t len, uint8_t level) {
CriticalSectionScoped lock(dtmf_critsect_);
diff --git a/webrtc/modules/rtp_rtcp/source/fec_private_tables_bursty.h b/webrtc/modules/rtp_rtcp/source/fec_private_tables_bursty.h
index 6105ae1d24..0b39908bb1 100644
--- a/webrtc/modules/rtp_rtcp/source/fec_private_tables_bursty.h
+++ b/webrtc/modules/rtp_rtcp/source/fec_private_tables_bursty.h
@@ -27,7 +27,8 @@
#include "webrtc/typedefs.h"
-namespace {
+namespace webrtc {
+namespace fec_private_tables {
const uint8_t kMaskBursty1_1[2] = {
0x80, 0x00
@@ -756,5 +757,6 @@ const uint8_t** kPacketMaskBurstyTbl[12] = {
kPacketMaskBursty12
};
-} // namespace
+} // namespace fec_private_tables
+} // namespace webrtc
#endif // WEBRTC_MODULES_RTP_RTCP_SOURCE_FEC_PRIVATE_TABLES_BURSTY_H_
diff --git a/webrtc/modules/rtp_rtcp/source/fec_private_tables_random.h b/webrtc/modules/rtp_rtcp/source/fec_private_tables_random.h
index ff6de43b76..295d749873 100644
--- a/webrtc/modules/rtp_rtcp/source/fec_private_tables_random.h
+++ b/webrtc/modules/rtp_rtcp/source/fec_private_tables_random.h
@@ -8,8 +8,8 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#ifndef WEBRTC_MODULES_RTP_RTCP_SOURCE_FEC_PRIVATE_TABLES_H_
-#define WEBRTC_MODULES_RTP_RTCP_SOURCE_FEC_PRIVATE_TABLES_H_
+#ifndef WEBRTC_MODULES_RTP_RTCP_SOURCE_FEC_PRIVATE_TABLES_RANDOM_H_
+#define WEBRTC_MODULES_RTP_RTCP_SOURCE_FEC_PRIVATE_TABLES_RANDOM_H_
// This file contains a set of packets masks for the FEC code. The masks in
// this table are specifically designed to favor recovery to random loss.
@@ -17,7 +17,8 @@
#include "webrtc/typedefs.h"
-namespace {
+namespace webrtc {
+namespace fec_private_tables {
const uint8_t kMaskRandom10_1[2] = {
0xff, 0xc0
@@ -24518,5 +24519,6 @@ const uint8_t** kPacketMaskRandomTbl[48] = {
kPacketMaskRandom48
};
-} // namespace
-#endif // WEBRTC_MODULES_RTP_RTCP_SOURCE_FEC_PRIVATE_TABLES_H_
+} // namespace fec_private_tables
+} // namespace webrtc
+#endif // WEBRTC_MODULES_RTP_RTCP_SOURCE_FEC_PRIVATE_TABLES_RANDOM_H_
diff --git a/webrtc/modules/rtp_rtcp/source/fec_receiver_impl.h b/webrtc/modules/rtp_rtcp/source/fec_receiver_impl.h
index 24db39b902..6a63813f40 100644
--- a/webrtc/modules/rtp_rtcp/source/fec_receiver_impl.h
+++ b/webrtc/modules/rtp_rtcp/source/fec_receiver_impl.h
@@ -14,8 +14,8 @@
// This header is included to get the nested declaration of Packet structure.
#include "webrtc/base/scoped_ptr.h"
-#include "webrtc/modules/rtp_rtcp/interface/fec_receiver.h"
-#include "webrtc/modules/rtp_rtcp/interface/rtp_rtcp_defines.h"
+#include "webrtc/modules/rtp_rtcp/include/fec_receiver.h"
+#include "webrtc/modules/rtp_rtcp/include/rtp_rtcp_defines.h"
#include "webrtc/modules/rtp_rtcp/source/forward_error_correction.h"
#include "webrtc/typedefs.h"
@@ -25,7 +25,7 @@ class CriticalSectionWrapper;
class FecReceiverImpl : public FecReceiver {
public:
- FecReceiverImpl(RtpData* callback);
+ explicit FecReceiverImpl(RtpData* callback);
virtual ~FecReceiverImpl();
int32_t AddReceivedRedPacket(const RTPHeader& rtp_header,
diff --git a/webrtc/modules/rtp_rtcp/source/fec_receiver_unittest.cc b/webrtc/modules/rtp_rtcp/source/fec_receiver_unittest.cc
index f64b537a52..bb22e1d580 100644
--- a/webrtc/modules/rtp_rtcp/source/fec_receiver_unittest.cc
+++ b/webrtc/modules/rtp_rtcp/source/fec_receiver_unittest.cc
@@ -15,8 +15,8 @@
#include "testing/gmock/include/gmock/gmock.h"
#include "testing/gtest/include/gtest/gtest.h"
#include "webrtc/base/scoped_ptr.h"
-#include "webrtc/modules/rtp_rtcp/interface/fec_receiver.h"
-#include "webrtc/modules/rtp_rtcp/interface/rtp_header_parser.h"
+#include "webrtc/modules/rtp_rtcp/include/fec_receiver.h"
+#include "webrtc/modules/rtp_rtcp/include/rtp_header_parser.h"
#include "webrtc/modules/rtp_rtcp/mocks/mock_rtp_rtcp.h"
#include "webrtc/modules/rtp_rtcp/source/byte_io.h"
#include "webrtc/modules/rtp_rtcp/source/fec_test_helper.h"
diff --git a/webrtc/modules/rtp_rtcp/source/fec_test_helper.h b/webrtc/modules/rtp_rtcp/source/fec_test_helper.h
index e1791adba3..aacc2d1ecc 100644
--- a/webrtc/modules/rtp_rtcp/source/fec_test_helper.h
+++ b/webrtc/modules/rtp_rtcp/source/fec_test_helper.h
@@ -11,7 +11,7 @@
#ifndef WEBRTC_MODULES_RTP_RTCP_SOURCE_FEC_TEST_HELPER_H_
#define WEBRTC_MODULES_RTP_RTCP_SOURCE_FEC_TEST_HELPER_H_
-#include "webrtc/modules/interface/module_common_types.h"
+#include "webrtc/modules/include/module_common_types.h"
#include "webrtc/modules/rtp_rtcp/source/forward_error_correction.h"
namespace webrtc {
@@ -54,6 +54,6 @@ class FrameGenerator {
uint16_t seq_num_;
uint32_t timestamp_;
};
-}
+} // namespace webrtc
#endif // WEBRTC_MODULES_RTP_RTCP_SOURCE_FEC_TEST_HELPER_H_
diff --git a/webrtc/modules/rtp_rtcp/source/forward_error_correction.cc b/webrtc/modules/rtp_rtcp/source/forward_error_correction.cc
index aad418f015..b85d813790 100644
--- a/webrtc/modules/rtp_rtcp/source/forward_error_correction.cc
+++ b/webrtc/modules/rtp_rtcp/source/forward_error_correction.cc
@@ -10,15 +10,15 @@
#include "webrtc/modules/rtp_rtcp/source/forward_error_correction.h"
-#include <assert.h>
#include <stdlib.h>
#include <string.h>
#include <algorithm>
#include <iterator>
+#include "webrtc/base/checks.h"
#include "webrtc/base/logging.h"
-#include "webrtc/modules/rtp_rtcp/interface/rtp_rtcp_defines.h"
+#include "webrtc/modules/rtp_rtcp/include/rtp_rtcp_defines.h"
#include "webrtc/modules/rtp_rtcp/source/byte_io.h"
#include "webrtc/modules/rtp_rtcp/source/forward_error_correction_internal.h"
@@ -36,11 +36,11 @@ const uint8_t kUlpHeaderSizeLBitClear = (2 + kMaskSizeLBitClear);
// Transport header size in bytes. Assume UDP/IPv4 as a reasonable minimum.
const uint8_t kTransportOverhead = 28;
-enum {
- kMaxFecPackets = ForwardErrorCorrection::kMaxMediaPackets
-};
+enum { kMaxFecPackets = ForwardErrorCorrection::kMaxMediaPackets };
-int32_t ForwardErrorCorrection::Packet::AddRef() { return ++ref_count_; }
+int32_t ForwardErrorCorrection::Packet::AddRef() {
+ return ++ref_count_;
+}
int32_t ForwardErrorCorrection::Packet::Release() {
int32_t ref_count;
@@ -72,7 +72,8 @@ class FecPacket : public ForwardErrorCorrection::SortablePacket {
};
bool ForwardErrorCorrection::SortablePacket::LessThan(
- const SortablePacket* first, const SortablePacket* second) {
+ const SortablePacket* first,
+ const SortablePacket* second) {
return IsNewerSequenceNumber(second->seq_num, first->seq_num);
}
@@ -83,8 +84,7 @@ ForwardErrorCorrection::RecoveredPacket::RecoveredPacket() {}
ForwardErrorCorrection::RecoveredPacket::~RecoveredPacket() {}
ForwardErrorCorrection::ForwardErrorCorrection()
- : generated_fec_packets_(kMaxMediaPackets),
- fec_packet_received_(false) {}
+ : generated_fec_packets_(kMaxMediaPackets), fec_packet_received_(false) {}
ForwardErrorCorrection::~ForwardErrorCorrection() {}
@@ -112,7 +112,6 @@ int32_t ForwardErrorCorrection::GenerateFEC(const PacketList& media_packet_list,
FecMaskType fec_mask_type,
PacketList* fec_packet_list) {
const uint16_t num_media_packets = media_packet_list.size();
-
// Sanity check arguments.
assert(num_media_packets > 0);
assert(num_important_packets >= 0 &&
@@ -126,12 +125,10 @@ int32_t ForwardErrorCorrection::GenerateFEC(const PacketList& media_packet_list,
}
bool l_bit = (num_media_packets > 8 * kMaskSizeLBitClear);
- int num_maskBytes = l_bit ? kMaskSizeLBitSet : kMaskSizeLBitClear;
+ int num_mask_bytes = l_bit ? kMaskSizeLBitSet : kMaskSizeLBitClear;
// Do some error checking on the media packets.
- PacketList::const_iterator media_list_it = media_packet_list.begin();
- while (media_list_it != media_packet_list.end()) {
- Packet* media_packet = *media_list_it;
+ for (Packet* media_packet : media_packet_list) {
assert(media_packet);
if (media_packet->length < kRtpHeaderSize) {
@@ -146,7 +143,6 @@ int32_t ForwardErrorCorrection::GenerateFEC(const PacketList& media_packet_list,
LOG(LS_WARNING) << "Media packet " << media_packet->length << " bytes "
<< "with overhead is larger than " << IP_PACKET_SIZE;
}
- media_list_it++;
}
int num_fec_packets =
@@ -167,29 +163,29 @@ int32_t ForwardErrorCorrection::GenerateFEC(const PacketList& media_packet_list,
// -- Generate packet masks --
// Always allocate space for a large mask.
- uint8_t* packet_mask = new uint8_t[num_fec_packets * kMaskSizeLBitSet];
- memset(packet_mask, 0, num_fec_packets * num_maskBytes);
+ rtc::scoped_ptr<uint8_t[]> packet_mask(
+ new uint8_t[num_fec_packets * kMaskSizeLBitSet]);
+ memset(packet_mask.get(), 0, num_fec_packets * num_mask_bytes);
internal::GeneratePacketMasks(num_media_packets, num_fec_packets,
num_important_packets, use_unequal_protection,
- mask_table, packet_mask);
-
- int num_maskBits = InsertZerosInBitMasks(media_packet_list, packet_mask,
- num_maskBytes, num_fec_packets);
+ mask_table, packet_mask.get());
- l_bit = (num_maskBits > 8 * kMaskSizeLBitClear);
+ int num_mask_bits = InsertZerosInBitMasks(
+ media_packet_list, packet_mask.get(), num_mask_bytes, num_fec_packets);
- if (num_maskBits < 0) {
- delete[] packet_mask;
+ if (num_mask_bits < 0) {
return -1;
}
+ l_bit = (num_mask_bits > 8 * kMaskSizeLBitClear);
if (l_bit) {
- num_maskBytes = kMaskSizeLBitSet;
+ num_mask_bytes = kMaskSizeLBitSet;
}
- GenerateFecBitStrings(media_packet_list, packet_mask, num_fec_packets, l_bit);
- GenerateFecUlpHeaders(media_packet_list, packet_mask, l_bit, num_fec_packets);
+ GenerateFecBitStrings(media_packet_list, packet_mask.get(), num_fec_packets,
+ l_bit);
+ GenerateFecUlpHeaders(media_packet_list, packet_mask.get(), l_bit,
+ num_fec_packets);
- delete[] packet_mask;
return 0;
}
@@ -206,26 +202,30 @@ int ForwardErrorCorrection::GetNumberOfFecPackets(int num_media_packets,
}
void ForwardErrorCorrection::GenerateFecBitStrings(
- const PacketList& media_packet_list, uint8_t* packet_mask,
- int num_fec_packets, bool l_bit) {
+ const PacketList& media_packet_list,
+ uint8_t* packet_mask,
+ int num_fec_packets,
+ bool l_bit) {
if (media_packet_list.empty()) {
return;
}
uint8_t media_payload_length[2];
- const int num_maskBytes = l_bit ? kMaskSizeLBitSet : kMaskSizeLBitClear;
+ const int num_mask_bytes = l_bit ? kMaskSizeLBitSet : kMaskSizeLBitClear;
const uint16_t ulp_header_size =
l_bit ? kUlpHeaderSizeLBitSet : kUlpHeaderSizeLBitClear;
const uint16_t fec_rtp_offset =
kFecHeaderSize + ulp_header_size - kRtpHeaderSize;
for (int i = 0; i < num_fec_packets; ++i) {
+ Packet* const fec_packet = &generated_fec_packets_[i];
PacketList::const_iterator media_list_it = media_packet_list.begin();
- uint32_t pkt_mask_idx = i * num_maskBytes;
+ uint32_t pkt_mask_idx = i * num_mask_bytes;
uint32_t media_pkt_idx = 0;
uint16_t fec_packet_length = 0;
uint16_t prev_seq_num = ParseSequenceNumber((*media_list_it)->data);
while (media_list_it != media_packet_list.end()) {
- // Each FEC packet has a multiple byte mask.
+ // Each FEC packet has a multiple byte mask. Determine if this media
+ // packet should be included in FEC packet i.
if (packet_mask[pkt_mask_idx] & (1 << (7 - media_pkt_idx))) {
Packet* media_packet = *media_list_it;
@@ -235,42 +235,40 @@ void ForwardErrorCorrection::GenerateFecBitStrings(
fec_packet_length = media_packet->length + fec_rtp_offset;
// On the first protected packet, we don't need to XOR.
- if (generated_fec_packets_[i].length == 0) {
+ if (fec_packet->length == 0) {
// Copy the first 2 bytes of the RTP header.
- memcpy(generated_fec_packets_[i].data, media_packet->data, 2);
+ memcpy(fec_packet->data, media_packet->data, 2);
// Copy the 5th to 8th bytes of the RTP header.
- memcpy(&generated_fec_packets_[i].data[4], &media_packet->data[4], 4);
+ memcpy(&fec_packet->data[4], &media_packet->data[4], 4);
// Copy network-ordered payload size.
- memcpy(&generated_fec_packets_[i].data[8], media_payload_length, 2);
+ memcpy(&fec_packet->data[8], media_payload_length, 2);
// Copy RTP payload, leaving room for the ULP header.
- memcpy(
- &generated_fec_packets_[i].data[kFecHeaderSize + ulp_header_size],
- &media_packet->data[kRtpHeaderSize],
- media_packet->length - kRtpHeaderSize);
+ memcpy(&fec_packet->data[kFecHeaderSize + ulp_header_size],
+ &media_packet->data[kRtpHeaderSize],
+ media_packet->length - kRtpHeaderSize);
} else {
// XOR with the first 2 bytes of the RTP header.
- generated_fec_packets_[i].data[0] ^= media_packet->data[0];
- generated_fec_packets_[i].data[1] ^= media_packet->data[1];
+ fec_packet->data[0] ^= media_packet->data[0];
+ fec_packet->data[1] ^= media_packet->data[1];
// XOR with the 5th to 8th bytes of the RTP header.
for (uint32_t j = 4; j < 8; ++j) {
- generated_fec_packets_[i].data[j] ^= media_packet->data[j];
+ fec_packet->data[j] ^= media_packet->data[j];
}
// XOR with the network-ordered payload size.
- generated_fec_packets_[i].data[8] ^= media_payload_length[0];
- generated_fec_packets_[i].data[9] ^= media_payload_length[1];
+ fec_packet->data[8] ^= media_payload_length[0];
+ fec_packet->data[9] ^= media_payload_length[1];
// XOR with RTP payload, leaving room for the ULP header.
for (int32_t j = kFecHeaderSize + ulp_header_size;
j < fec_packet_length; j++) {
- generated_fec_packets_[i].data[j] ^=
- media_packet->data[j - fec_rtp_offset];
+ fec_packet->data[j] ^= media_packet->data[j - fec_rtp_offset];
}
}
- if (fec_packet_length > generated_fec_packets_[i].length) {
- generated_fec_packets_[i].length = fec_packet_length;
+ if (fec_packet_length > fec_packet->length) {
+ fec_packet->length = fec_packet_length;
}
}
media_list_it++;
@@ -279,19 +277,18 @@ void ForwardErrorCorrection::GenerateFecBitStrings(
media_pkt_idx += static_cast<uint16_t>(seq_num - prev_seq_num);
prev_seq_num = seq_num;
}
- if (media_pkt_idx == 8) {
- // Switch to the next mask byte.
- media_pkt_idx = 0;
- pkt_mask_idx++;
- }
+ pkt_mask_idx += media_pkt_idx / 8;
+ media_pkt_idx %= 8;
}
- assert(generated_fec_packets_[i].length);
- //Note: This shouldn't happen: means packet mask is wrong or poorly designed
+ RTC_DCHECK_GT(fec_packet->length, 0u)
+ << "Packet mask is wrong or poorly designed.";
}
}
int ForwardErrorCorrection::InsertZerosInBitMasks(
- const PacketList& media_packets, uint8_t* packet_mask, int num_mask_bytes,
+ const PacketList& media_packets,
+ uint8_t* packet_mask,
+ int num_mask_bytes,
int num_fec_packets) {
uint8_t* new_mask = NULL;
if (media_packets.size() <= 1) {
@@ -307,6 +304,9 @@ int ForwardErrorCorrection::InsertZerosInBitMasks(
// required.
return media_packets.size();
}
+ // We can only protect 8 * kMaskSizeLBitSet packets.
+ if (total_missing_seq_nums + media_packets.size() > 8 * kMaskSizeLBitSet)
+ return -1;
// Allocate the new mask.
int new_mask_bytes = kMaskSizeLBitClear;
if (media_packets.size() + total_missing_seq_nums > 8 * kMaskSizeLBitClear) {
@@ -357,7 +357,8 @@ int ForwardErrorCorrection::InsertZerosInBitMasks(
return new_bit_index;
}
-void ForwardErrorCorrection::InsertZeroColumns(int num_zeros, uint8_t* new_mask,
+void ForwardErrorCorrection::InsertZeroColumns(int num_zeros,
+ uint8_t* new_mask,
int new_mask_bytes,
int num_fec_packets,
int new_bit_index) {
@@ -368,9 +369,12 @@ void ForwardErrorCorrection::InsertZeroColumns(int num_zeros, uint8_t* new_mask,
}
}
-void ForwardErrorCorrection::CopyColumn(uint8_t* new_mask, int new_mask_bytes,
- uint8_t* old_mask, int old_mask_bytes,
- int num_fec_packets, int new_bit_index,
+void ForwardErrorCorrection::CopyColumn(uint8_t* new_mask,
+ int new_mask_bytes,
+ uint8_t* old_mask,
+ int old_mask_bytes,
+ int num_fec_packets,
+ int new_bit_index,
int old_bit_index) {
// Copy column from the old mask to the beginning of the new mask and shift it
// out from the old mask.
@@ -386,7 +390,9 @@ void ForwardErrorCorrection::CopyColumn(uint8_t* new_mask, int new_mask_bytes,
}
void ForwardErrorCorrection::GenerateFecUlpHeaders(
- const PacketList& media_packet_list, uint8_t* packet_mask, bool l_bit,
+ const PacketList& media_packet_list,
+ uint8_t* packet_mask,
+ bool l_bit,
int num_fec_packets) {
// -- Generate FEC and ULP headers --
//
@@ -412,33 +418,34 @@ void ForwardErrorCorrection::GenerateFecUlpHeaders(
PacketList::const_iterator media_list_it = media_packet_list.begin();
Packet* media_packet = *media_list_it;
assert(media_packet != NULL);
- int num_maskBytes = l_bit ? kMaskSizeLBitSet : kMaskSizeLBitClear;
+ int num_mask_bytes = l_bit ? kMaskSizeLBitSet : kMaskSizeLBitClear;
const uint16_t ulp_header_size =
l_bit ? kUlpHeaderSizeLBitSet : kUlpHeaderSizeLBitClear;
for (int i = 0; i < num_fec_packets; ++i) {
+ Packet* const fec_packet = &generated_fec_packets_[i];
// -- FEC header --
- generated_fec_packets_[i].data[0] &= 0x7f; // Set E to zero.
+ fec_packet->data[0] &= 0x7f; // Set E to zero.
if (l_bit == 0) {
- generated_fec_packets_[i].data[0] &= 0xbf; // Clear the L bit.
+ fec_packet->data[0] &= 0xbf; // Clear the L bit.
} else {
- generated_fec_packets_[i].data[0] |= 0x40; // Set the L bit.
+ fec_packet->data[0] |= 0x40; // Set the L bit.
}
// Two byte sequence number from first RTP packet to SN base.
// We use the same sequence number base for every FEC packet,
// but that's not required in general.
- memcpy(&generated_fec_packets_[i].data[2], &media_packet->data[2], 2);
+ memcpy(&fec_packet->data[2], &media_packet->data[2], 2);
// -- ULP header --
// Copy the payload size to the protection length field.
// (We protect the entire packet.)
ByteWriter<uint16_t>::WriteBigEndian(
- &generated_fec_packets_[i].data[10],
- generated_fec_packets_[i].length - kFecHeaderSize - ulp_header_size);
+ &fec_packet->data[10],
+ fec_packet->length - kFecHeaderSize - ulp_header_size);
// Copy the packet mask.
- memcpy(&generated_fec_packets_[i].data[12], &packet_mask[i * num_maskBytes],
- num_maskBytes);
+ memcpy(&fec_packet->data[12], &packet_mask[i * num_mask_bytes],
+ num_mask_bytes);
}
}
@@ -460,7 +467,7 @@ void ForwardErrorCorrection::ResetState(
ProtectedPacketList::iterator protected_packet_list_it;
protected_packet_list_it = fec_packet->protected_pkt_list.begin();
while (protected_packet_list_it != fec_packet->protected_pkt_list.end()) {
- delete* protected_packet_list_it;
+ delete *protected_packet_list_it;
protected_packet_list_it =
fec_packet->protected_pkt_list.erase(protected_packet_list_it);
}
@@ -472,7 +479,8 @@ void ForwardErrorCorrection::ResetState(
}
void ForwardErrorCorrection::InsertMediaPacket(
- ReceivedPacket* rx_packet, RecoveredPacketList* recovered_packet_list) {
+ ReceivedPacket* rx_packet,
+ RecoveredPacketList* recovered_packet_list) {
RecoveredPacketList::iterator recovered_packet_list_it =
recovered_packet_list->begin();
@@ -538,9 +546,9 @@ void ForwardErrorCorrection::InsertFECPacket(
const uint16_t seq_num_base =
ByteReader<uint16_t>::ReadBigEndian(&fec_packet->pkt->data[2]);
- const uint16_t maskSizeBytes =
- (fec_packet->pkt->data[0] & 0x40) ? kMaskSizeLBitSet
- : kMaskSizeLBitClear; // L bit set?
+ const uint16_t maskSizeBytes = (fec_packet->pkt->data[0] & 0x40)
+ ? kMaskSizeLBitSet
+ : kMaskSizeLBitClear; // L bit set?
for (uint16_t byte_idx = 0; byte_idx < maskSizeBytes; ++byte_idx) {
uint8_t packet_mask = fec_packet->pkt->data[12 + byte_idx];
@@ -574,7 +582,8 @@ void ForwardErrorCorrection::InsertFECPacket(
}
void ForwardErrorCorrection::AssignRecoveredPackets(
- FecPacket* fec_packet, const RecoveredPacketList* recovered_packets) {
+ FecPacket* fec_packet,
+ const RecoveredPacketList* recovered_packets) {
// Search for missing packets which have arrived or have been recovered by
// another FEC packet.
ProtectedPacketList* not_recovered = &fec_packet->protected_pkt_list;
@@ -599,7 +608,6 @@ void ForwardErrorCorrection::AssignRecoveredPackets(
void ForwardErrorCorrection::InsertPackets(
ReceivedPacketList* received_packet_list,
RecoveredPacketList* recovered_packet_list) {
-
while (!received_packet_list->empty()) {
ReceivedPacket* rx_packet = received_packet_list->front();
@@ -611,9 +619,9 @@ void ForwardErrorCorrection::InsertPackets(
// old FEC packets based on timestamp information or better sequence number
// thresholding (e.g., to distinguish between wrap-around and reordering).
if (!fec_packet_list_.empty()) {
- uint16_t seq_num_diff = abs(
- static_cast<int>(rx_packet->seq_num) -
- static_cast<int>(fec_packet_list_.front()->seq_num));
+ uint16_t seq_num_diff =
+ abs(static_cast<int>(rx_packet->seq_num) -
+ static_cast<int>(fec_packet_list_.front()->seq_num));
if (seq_num_diff > 0x3fff) {
DiscardFECPacket(fec_packet_list_.front());
fec_packet_list_.pop_front();
@@ -637,9 +645,9 @@ void ForwardErrorCorrection::InsertPackets(
bool ForwardErrorCorrection::InitRecovery(const FecPacket* fec_packet,
RecoveredPacket* recovered) {
// This is the first packet which we try to recover with.
- const uint16_t ulp_header_size =
- fec_packet->pkt->data[0] & 0x40 ? kUlpHeaderSizeLBitSet
- : kUlpHeaderSizeLBitClear; // L bit set?
+ const uint16_t ulp_header_size = fec_packet->pkt->data[0] & 0x40
+ ? kUlpHeaderSizeLBitSet
+ : kUlpHeaderSizeLBitClear; // L bit set?
if (fec_packet->pkt->length <
static_cast<size_t>(kFecHeaderSize + ulp_header_size)) {
LOG(LS_WARNING)
diff --git a/webrtc/modules/rtp_rtcp/source/forward_error_correction.h b/webrtc/modules/rtp_rtcp/source/forward_error_correction.h
index f92f014db3..9ba6ce0438 100644
--- a/webrtc/modules/rtp_rtcp/source/forward_error_correction.h
+++ b/webrtc/modules/rtp_rtcp/source/forward_error_correction.h
@@ -15,7 +15,7 @@
#include <vector>
#include "webrtc/base/scoped_ref_ptr.h"
-#include "webrtc/modules/rtp_rtcp/interface/rtp_rtcp_defines.h"
+#include "webrtc/modules/rtp_rtcp/include/rtp_rtcp_defines.h"
#include "webrtc/system_wrappers/include/ref_count.h"
#include "webrtc/typedefs.h"
diff --git a/webrtc/modules/rtp_rtcp/source/forward_error_correction_internal.cc b/webrtc/modules/rtp_rtcp/source/forward_error_correction_internal.cc
index 6d9be90de1..fae59078b1 100644
--- a/webrtc/modules/rtp_rtcp/source/forward_error_correction_internal.cc
+++ b/webrtc/modules/rtp_rtcp/source/forward_error_correction_internal.cc
@@ -17,6 +17,8 @@
#include "webrtc/modules/rtp_rtcp/source/fec_private_tables_random.h"
namespace {
+using webrtc::fec_private_tables::kPacketMaskBurstyTbl;
+using webrtc::fec_private_tables::kPacketMaskRandomTbl;
// Allow for different modes of protection for packets in UEP case.
enum ProtectionMode {
@@ -37,8 +39,11 @@ enum ProtectionMode {
// [0, num_rows * num_sub_mask_bytes]
// \param[out] packet_mask A pointer to hold the output mask, of size
// [0, x * num_mask_bytes], where x >= num_rows.
-void FitSubMask(int num_mask_bytes, int num_sub_mask_bytes, int num_rows,
- const uint8_t* sub_mask, uint8_t* packet_mask) {
+void FitSubMask(int num_mask_bytes,
+ int num_sub_mask_bytes,
+ int num_rows,
+ const uint8_t* sub_mask,
+ uint8_t* packet_mask) {
if (num_mask_bytes == num_sub_mask_bytes) {
memcpy(packet_mask, sub_mask, num_rows * num_sub_mask_bytes);
} else {
@@ -70,13 +75,15 @@ void FitSubMask(int num_mask_bytes, int num_sub_mask_bytes, int num_rows,
// \param[out] packet_mask A pointer to hold the output mask, of size
// [0, x * num_mask_bytes],
// where x >= end_row_fec.
-// TODO (marpan): This function is doing three things at the same time:
+// TODO(marpan): This function is doing three things at the same time:
// shift within a byte, byte shift and resizing.
// Split up into subroutines.
-void ShiftFitSubMask(int num_mask_bytes, int res_mask_bytes,
- int num_column_shift, int end_row, const uint8_t* sub_mask,
+void ShiftFitSubMask(int num_mask_bytes,
+ int res_mask_bytes,
+ int num_column_shift,
+ int end_row,
+ const uint8_t* sub_mask,
uint8_t* packet_mask) {
-
// Number of bit shifts within a byte
const int num_bit_shifts = (num_column_shift % 8);
const int num_byte_shifts = num_column_shift >> 3;
@@ -128,7 +135,6 @@ void ShiftFitSubMask(int num_mask_bytes, int res_mask_bytes,
// For the first byte in the row (j=0 case).
shift_right_curr_byte = sub_mask[pkt_mask_idx2] >> num_bit_shifts;
packet_mask[pkt_mask_idx] = shift_right_curr_byte;
-
}
}
} // namespace
@@ -151,7 +157,9 @@ FecMaskType PacketMaskTable::InitMaskType(FecMaskType fec_mask_type,
assert(num_media_packets <= static_cast<int>(sizeof(kPacketMaskRandomTbl) /
sizeof(*kPacketMaskRandomTbl)));
switch (fec_mask_type) {
- case kFecMaskRandom: { return kFecMaskRandom; }
+ case kFecMaskRandom: {
+ return kFecMaskRandom;
+ }
case kFecMaskBursty: {
int max_media_packets = static_cast<int>(sizeof(kPacketMaskBurstyTbl) /
sizeof(*kPacketMaskBurstyTbl));
@@ -170,17 +178,24 @@ FecMaskType PacketMaskTable::InitMaskType(FecMaskType fec_mask_type,
// |fec_mask_type|.
const uint8_t*** PacketMaskTable::InitMaskTable(FecMaskType fec_mask_type) {
switch (fec_mask_type) {
- case kFecMaskRandom: { return kPacketMaskRandomTbl; }
- case kFecMaskBursty: { return kPacketMaskBurstyTbl; }
+ case kFecMaskRandom: {
+ return kPacketMaskRandomTbl;
+ }
+ case kFecMaskBursty: {
+ return kPacketMaskBurstyTbl;
+ }
}
assert(false);
return kPacketMaskRandomTbl;
}
// Remaining protection after important (first partition) packet protection
-void RemainingPacketProtection(int num_media_packets, int num_fec_remaining,
- int num_fec_for_imp_packets, int num_mask_bytes,
- ProtectionMode mode, uint8_t* packet_mask,
+void RemainingPacketProtection(int num_media_packets,
+ int num_fec_remaining,
+ int num_fec_for_imp_packets,
+ int num_mask_bytes,
+ ProtectionMode mode,
+ uint8_t* packet_mask,
const PacketMaskTable& mask_table) {
if (mode == kModeNoOverlap) {
// sub_mask21
@@ -191,8 +206,10 @@ void RemainingPacketProtection(int num_media_packets, int num_fec_remaining,
const int res_mask_bytes =
(l_bit == 1) ? kMaskSizeLBitSet : kMaskSizeLBitClear;
- const uint8_t* packet_mask_sub_21 = mask_table.fec_packet_mask_table()[
- num_media_packets - num_fec_for_imp_packets - 1][num_fec_remaining - 1];
+ const uint8_t* packet_mask_sub_21 =
+ mask_table.fec_packet_mask_table()[num_media_packets -
+ num_fec_for_imp_packets -
+ 1][num_fec_remaining - 1];
ShiftFitSubMask(num_mask_bytes, res_mask_bytes, num_fec_for_imp_packets,
(num_fec_for_imp_packets + num_fec_remaining),
@@ -201,8 +218,9 @@ void RemainingPacketProtection(int num_media_packets, int num_fec_remaining,
} else if (mode == kModeOverlap || mode == kModeBiasFirstPacket) {
// sub_mask22
- const uint8_t* packet_mask_sub_22 = mask_table
- .fec_packet_mask_table()[num_media_packets - 1][num_fec_remaining - 1];
+ const uint8_t* packet_mask_sub_22 =
+ mask_table.fec_packet_mask_table()[num_media_packets -
+ 1][num_fec_remaining - 1];
FitSubMask(num_mask_bytes, num_mask_bytes, num_fec_remaining,
packet_mask_sub_22,
@@ -217,41 +235,42 @@ void RemainingPacketProtection(int num_media_packets, int num_fec_remaining,
} else {
assert(false);
}
-
}
// Protection for important (first partition) packets
-void ImportantPacketProtection(int num_fec_for_imp_packets, int num_imp_packets,
- int num_mask_bytes, uint8_t* packet_mask,
+void ImportantPacketProtection(int num_fec_for_imp_packets,
+ int num_imp_packets,
+ int num_mask_bytes,
+ uint8_t* packet_mask,
const PacketMaskTable& mask_table) {
const int l_bit = num_imp_packets > 16 ? 1 : 0;
const int num_imp_mask_bytes =
(l_bit == 1) ? kMaskSizeLBitSet : kMaskSizeLBitClear;
// Get sub_mask1 from table
- const uint8_t* packet_mask_sub_1 = mask_table.fec_packet_mask_table()[
- num_imp_packets - 1][num_fec_for_imp_packets - 1];
+ const uint8_t* packet_mask_sub_1 =
+ mask_table.fec_packet_mask_table()[num_imp_packets -
+ 1][num_fec_for_imp_packets - 1];
FitSubMask(num_mask_bytes, num_imp_mask_bytes, num_fec_for_imp_packets,
packet_mask_sub_1, packet_mask);
-
}
// This function sets the protection allocation: i.e., how many FEC packets
// to use for num_imp (1st partition) packets, given the: number of media
// packets, number of FEC packets, and number of 1st partition packets.
-int SetProtectionAllocation(int num_media_packets, int num_fec_packets,
+int SetProtectionAllocation(int num_media_packets,
+ int num_fec_packets,
int num_imp_packets) {
-
- // TODO (marpan): test different cases for protection allocation:
+ // TODO(marpan): test different cases for protection allocation:
// Use at most (alloc_par * num_fec_packets) for important packets.
float alloc_par = 0.5;
int max_num_fec_for_imp = alloc_par * num_fec_packets;
- int num_fec_for_imp_packets =
- (num_imp_packets < max_num_fec_for_imp) ? num_imp_packets
- : max_num_fec_for_imp;
+ int num_fec_for_imp_packets = (num_imp_packets < max_num_fec_for_imp)
+ ? num_imp_packets
+ : max_num_fec_for_imp;
// Fall back to equal protection in this case
if (num_fec_packets == 1 && (num_media_packets > 2 * num_imp_packets)) {
@@ -268,7 +287,7 @@ int SetProtectionAllocation(int num_media_packets, int num_fec_packets,
// Current version has 3 modes (options) to build UEP mask from existing ones.
// Various other combinations may be added in future versions.
// Longer-term, we may add another set of tables specifically for UEP cases.
-// TODO (marpan): also consider modification of masks for bursty loss cases.
+// TODO(marpan): also consider modification of masks for bursty loss cases.
// Mask is characterized as (#packets_to_protect, #fec_for_protection).
// Protection factor defined as: (#fec_for_protection / #packets_to_protect).
@@ -306,13 +325,14 @@ int SetProtectionAllocation(int num_media_packets, int num_fec_packets,
// Protection Mode 2 may be extended for a sort of sliding protection
// (i.e., vary the number/density of "1s" across columns) across packets.
-void UnequalProtectionMask(int num_media_packets, int num_fec_packets,
- int num_imp_packets, int num_mask_bytes,
+void UnequalProtectionMask(int num_media_packets,
+ int num_fec_packets,
+ int num_imp_packets,
+ int num_mask_bytes,
uint8_t* packet_mask,
const PacketMaskTable& mask_table) {
-
// Set Protection type and allocation
- // TODO (marpan): test/update for best mode and some combinations thereof.
+ // TODO(marpan): test/update for best mode and some combinations thereof.
ProtectionMode mode = kModeOverlap;
int num_fec_for_imp_packets = 0;
@@ -341,11 +361,12 @@ void UnequalProtectionMask(int num_media_packets, int num_fec_packets,
num_fec_for_imp_packets, num_mask_bytes, mode,
packet_mask, mask_table);
}
-
}
-void GeneratePacketMasks(int num_media_packets, int num_fec_packets,
- int num_imp_packets, bool use_unequal_protection,
+void GeneratePacketMasks(int num_media_packets,
+ int num_fec_packets,
+ int num_imp_packets,
+ bool use_unequal_protection,
const PacketMaskTable& mask_table,
uint8_t* packet_mask) {
assert(num_media_packets > 0);
@@ -361,16 +382,15 @@ void GeneratePacketMasks(int num_media_packets, int num_fec_packets,
// Retrieve corresponding mask table directly:for equal-protection case.
// Mask = (k,n-k), with protection factor = (n-k)/k,
// where k = num_media_packets, n=total#packets, (n-k)=num_fec_packets.
- memcpy(packet_mask, mask_table.fec_packet_mask_table()[
- num_media_packets - 1][num_fec_packets - 1],
+ memcpy(packet_mask,
+ mask_table.fec_packet_mask_table()[num_media_packets -
+ 1][num_fec_packets - 1],
num_fec_packets * num_mask_bytes);
- } else //UEP case
- {
+ } else { // UEP case
UnequalProtectionMask(num_media_packets, num_fec_packets, num_imp_packets,
num_mask_bytes, packet_mask, mask_table);
-
} // End of UEP modification
-} //End of GetPacketMasks
+} // End of GetPacketMasks
} // namespace internal
} // namespace webrtc
diff --git a/webrtc/modules/rtp_rtcp/source/h264_bitstream_parser.h b/webrtc/modules/rtp_rtcp/source/h264_bitstream_parser.h
index 53ef2a61f4..28276afb72 100644
--- a/webrtc/modules/rtp_rtcp/source/h264_bitstream_parser.h
+++ b/webrtc/modules/rtp_rtcp/source/h264_bitstream_parser.h
@@ -11,8 +11,8 @@
#ifndef WEBRTC_MODULES_RTP_RTCP_SOURCE_H264_BITSTREAM_PARSER_H_
#define WEBRTC_MODULES_RTP_RTCP_SOURCE_H264_BITSTREAM_PARSER_H_
-#include <stdint.h>
#include <stddef.h>
+#include <stdint.h>
namespace rtc {
class BitBuffer;
diff --git a/webrtc/modules/rtp_rtcp/source/h264_sps_parser_unittest.cc b/webrtc/modules/rtp_rtcp/source/h264_sps_parser_unittest.cc
index ceadf4cb6b..7a7e3ed293 100644
--- a/webrtc/modules/rtp_rtcp/source/h264_sps_parser_unittest.cc
+++ b/webrtc/modules/rtp_rtcp/source/h264_sps_parser_unittest.cc
@@ -12,6 +12,7 @@
#include "testing/gtest/include/gtest/gtest.h"
+#include "webrtc/base/arraysize.h"
#include "webrtc/base/bitbuffer.h"
namespace webrtc {
@@ -121,7 +122,7 @@ TEST(H264SpsParserTest, TestSampleSPSHdLandscape) {
const uint8_t buffer[] = {0x7A, 0x00, 0x1F, 0xBC, 0xD9, 0x40, 0x50, 0x05,
0xBA, 0x10, 0x00, 0x00, 0x03, 0x00, 0xC0, 0x00,
0x00, 0x2A, 0xE0, 0xF1, 0x83, 0x19, 0x60};
- H264SpsParser parser = H264SpsParser(buffer, ARRAY_SIZE(buffer));
+ H264SpsParser parser = H264SpsParser(buffer, arraysize(buffer));
EXPECT_TRUE(parser.Parse());
EXPECT_EQ(1280u, parser.width());
EXPECT_EQ(720u, parser.height());
@@ -133,7 +134,7 @@ TEST(H264SpsParserTest, TestSampleSPSVgaLandscape) {
const uint8_t buffer[] = {0x7A, 0x00, 0x1E, 0xBC, 0xD9, 0x40, 0xA0, 0x2F,
0xF8, 0x98, 0x40, 0x00, 0x00, 0x03, 0x01, 0x80,
0x00, 0x00, 0x56, 0x83, 0xC5, 0x8B, 0x65, 0x80};
- H264SpsParser parser = H264SpsParser(buffer, ARRAY_SIZE(buffer));
+ H264SpsParser parser = H264SpsParser(buffer, arraysize(buffer));
EXPECT_TRUE(parser.Parse());
EXPECT_EQ(640u, parser.width());
EXPECT_EQ(360u, parser.height());
@@ -145,7 +146,7 @@ TEST(H264SpsParserTest, TestSampleSPSWeirdResolution) {
const uint8_t buffer[] = {0x7A, 0x00, 0x0D, 0xBC, 0xD9, 0x43, 0x43, 0x3E,
0x5E, 0x10, 0x00, 0x00, 0x03, 0x00, 0x60, 0x00,
0x00, 0x15, 0xA0, 0xF1, 0x42, 0x99, 0x60};
- H264SpsParser parser = H264SpsParser(buffer, ARRAY_SIZE(buffer));
+ H264SpsParser parser = H264SpsParser(buffer, arraysize(buffer));
EXPECT_TRUE(parser.Parse());
EXPECT_EQ(200u, parser.width());
EXPECT_EQ(400u, parser.height());
@@ -154,7 +155,7 @@ TEST(H264SpsParserTest, TestSampleSPSWeirdResolution) {
TEST(H264SpsParserTest, TestSyntheticSPSQvgaLandscape) {
uint8_t buffer[kSpsBufferMaxSize] = {0};
GenerateFakeSps(320u, 180u, buffer);
- H264SpsParser parser = H264SpsParser(buffer, ARRAY_SIZE(buffer));
+ H264SpsParser parser = H264SpsParser(buffer, arraysize(buffer));
EXPECT_TRUE(parser.Parse());
EXPECT_EQ(320u, parser.width());
EXPECT_EQ(180u, parser.height());
@@ -163,7 +164,7 @@ TEST(H264SpsParserTest, TestSyntheticSPSQvgaLandscape) {
TEST(H264SpsParserTest, TestSyntheticSPSWeirdResolution) {
uint8_t buffer[kSpsBufferMaxSize] = {0};
GenerateFakeSps(156u, 122u, buffer);
- H264SpsParser parser = H264SpsParser(buffer, ARRAY_SIZE(buffer));
+ H264SpsParser parser = H264SpsParser(buffer, arraysize(buffer));
EXPECT_TRUE(parser.Parse());
EXPECT_EQ(156u, parser.width());
EXPECT_EQ(122u, parser.height());
diff --git a/webrtc/modules/rtp_rtcp/source/mock/mock_rtp_payload_strategy.h b/webrtc/modules/rtp_rtcp/source/mock/mock_rtp_payload_strategy.h
index f577cbaad1..011829cc6c 100644
--- a/webrtc/modules/rtp_rtcp/source/mock/mock_rtp_payload_strategy.h
+++ b/webrtc/modules/rtp_rtcp/source/mock/mock_rtp_payload_strategy.h
@@ -8,11 +8,11 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#ifndef WEBRTC_MODULES_RTP_RTCP_SOURCE_MOCK_MOCK_RTP_PAYLOAD_REGISTRY_H_
-#define WEBRTC_MODULES_RTP_RTCP_SOURCE_MOCK_MOCK_RTP_PAYLOAD_REGISTRY_H_
+#ifndef WEBRTC_MODULES_RTP_RTCP_SOURCE_MOCK_MOCK_RTP_PAYLOAD_STRATEGY_H_
+#define WEBRTC_MODULES_RTP_RTCP_SOURCE_MOCK_MOCK_RTP_PAYLOAD_STRATEGY_H_
#include "testing/gmock/include/gmock/gmock.h"
-#include "webrtc/modules/rtp_rtcp/interface/rtp_payload_registry.h"
+#include "webrtc/modules/rtp_rtcp/include/rtp_payload_registry.h"
namespace webrtc {
@@ -23,7 +23,7 @@ class MockRTPPayloadStrategy : public RTPPayloadStrategy {
MOCK_CONST_METHOD4(PayloadIsCompatible,
bool(const RtpUtility::Payload& payload,
const uint32_t frequency,
- const uint8_t channels,
+ const size_t channels,
const uint32_t rate));
MOCK_CONST_METHOD2(UpdatePayloadRate,
void(RtpUtility::Payload* payload, const uint32_t rate));
@@ -34,10 +34,10 @@ class MockRTPPayloadStrategy : public RTPPayloadStrategy {
RtpUtility::Payload*(const char payloadName[RTP_PAYLOAD_NAME_SIZE],
const int8_t payloadType,
const uint32_t frequency,
- const uint8_t channels,
+ const size_t channels,
const uint32_t rate));
};
} // namespace webrtc
-#endif // WEBRTC_MODULES_RTP_RTCP_SOURCE_MOCK_MOCK_RTP_PAYLOAD_REGISTRY_H_
+#endif // WEBRTC_MODULES_RTP_RTCP_SOURCE_MOCK_MOCK_RTP_PAYLOAD_STRATEGY_H_
diff --git a/webrtc/modules/rtp_rtcp/source/nack_rtx_unittest.cc b/webrtc/modules/rtp_rtcp/source/nack_rtx_unittest.cc
index 07a3693507..e19c31bfec 100644
--- a/webrtc/modules/rtp_rtcp/source/nack_rtx_unittest.cc
+++ b/webrtc/modules/rtp_rtcp/source/nack_rtx_unittest.cc
@@ -16,15 +16,15 @@
#include "testing/gtest/include/gtest/gtest.h"
#include "webrtc/base/scoped_ptr.h"
#include "webrtc/common_types.h"
-#include "webrtc/modules/rtp_rtcp/interface/receive_statistics.h"
-#include "webrtc/modules/rtp_rtcp/interface/rtp_header_parser.h"
-#include "webrtc/modules/rtp_rtcp/interface/rtp_payload_registry.h"
-#include "webrtc/modules/rtp_rtcp/interface/rtp_receiver.h"
-#include "webrtc/modules/rtp_rtcp/interface/rtp_rtcp.h"
-#include "webrtc/modules/rtp_rtcp/interface/rtp_rtcp_defines.h"
+#include "webrtc/modules/rtp_rtcp/include/receive_statistics.h"
+#include "webrtc/modules/rtp_rtcp/include/rtp_header_parser.h"
+#include "webrtc/modules/rtp_rtcp/include/rtp_payload_registry.h"
+#include "webrtc/modules/rtp_rtcp/include/rtp_receiver.h"
+#include "webrtc/modules/rtp_rtcp/include/rtp_rtcp.h"
+#include "webrtc/modules/rtp_rtcp/include/rtp_rtcp_defines.h"
#include "webrtc/transport.h"
-using namespace webrtc;
+namespace webrtc {
const int kVideoNackListSize = 30;
const uint32_t kTestSsrc = 3456;
@@ -35,8 +35,7 @@ const int kNumFrames = 30;
const int kPayloadType = 123;
const int kRtxPayloadType = 98;
-class VerifyingRtxReceiver : public NullRtpData
-{
+class VerifyingRtxReceiver : public NullRtpData {
public:
VerifyingRtxReceiver() {}
@@ -54,7 +53,7 @@ class VerifyingRtxReceiver : public NullRtpData
class TestRtpFeedback : public NullRtpFeedback {
public:
- TestRtpFeedback(RtpRtcp* rtp_rtcp) : rtp_rtcp_(rtp_rtcp) {}
+ explicit TestRtpFeedback(RtpRtcp* rtp_rtcp) : rtp_rtcp_(rtp_rtcp) {}
virtual ~TestRtpFeedback() {}
void OnIncomingSSRCChanged(const uint32_t ssrc) override {
@@ -86,9 +85,7 @@ class RtxLoopBackTransport : public webrtc::Transport {
rtp_receiver_ = receiver;
}
- void DropEveryNthPacket(int n) {
- packet_loss_ = n;
- }
+ void DropEveryNthPacket(int n) { packet_loss_ = n; }
void DropConsecutivePackets(int start, int total) {
consecutive_drop_start_ = start;
@@ -100,15 +97,13 @@ class RtxLoopBackTransport : public webrtc::Transport {
size_t len,
const PacketOptions& options) override {
count_++;
- const unsigned char* ptr = static_cast<const unsigned char*>(data);
+ const unsigned char* ptr = static_cast<const unsigned char*>(data);
uint32_t ssrc = (ptr[8] << 24) + (ptr[9] << 16) + (ptr[10] << 8) + ptr[11];
- if (ssrc == rtx_ssrc_) count_rtx_ssrc_++;
+ if (ssrc == rtx_ssrc_)
+ count_rtx_ssrc_++;
uint16_t sequence_number = (ptr[2] << 8) + ptr[3];
size_t packet_length = len;
- // TODO(pbos): Figure out why this needs to be initialized. Likely this
- // is hiding a bug either in test setup or other code.
- // https://code.google.com/p/webrtc/issues/detail?id=3183
- uint8_t restored_packet[1500] = {0};
+ uint8_t restored_packet[1500];
RTPHeader header;
rtc::scoped_ptr<RtpHeaderParser> parser(RtpHeaderParser::Create());
if (!parser->Parse(ptr, len, &header)) {
@@ -136,21 +131,19 @@ class RtxLoopBackTransport : public webrtc::Transport {
if (!parser->Parse(restored_packet, packet_length, &header)) {
return false;
}
+ ptr = restored_packet;
} else {
rtp_payload_registry_->SetIncomingPayloadType(header);
}
- const uint8_t* restored_packet_payload =
- restored_packet + header.headerLength;
- packet_length -= header.headerLength;
PayloadUnion payload_specific;
if (!rtp_payload_registry_->GetPayloadSpecifics(header.payloadType,
&payload_specific)) {
return false;
}
- if (!rtp_receiver_->IncomingRtpPacket(header, restored_packet_payload,
- packet_length, payload_specific,
- true)) {
+ if (!rtp_receiver_->IncomingRtpPacket(header, ptr + header.headerLength,
+ packet_length - header.headerLength,
+ payload_specific, true)) {
return false;
}
return true;
@@ -194,8 +187,7 @@ class RtpRtcpRtxNackTest : public ::testing::Test {
rtp_feedback_.reset(new TestRtpFeedback(rtp_rtcp_module_));
rtp_receiver_.reset(RtpReceiver::CreateVideoReceiver(
- &fake_clock, &receiver_, rtp_feedback_.get(),
- &rtp_payload_registry_));
+ &fake_clock, &receiver_, rtp_feedback_.get(), &rtp_payload_registry_));
rtp_rtcp_module_->SetSSRC(kTestSsrc);
rtp_rtcp_module_->SetRTCPStatus(RtcpMode::kCompound);
@@ -215,11 +207,9 @@ class RtpRtcpRtxNackTest : public ::testing::Test {
EXPECT_EQ(0, rtp_rtcp_module_->RegisterSendPayload(video_codec));
rtp_rtcp_module_->SetRtxSendPayloadType(kRtxPayloadType, kPayloadType);
- EXPECT_EQ(0, rtp_receiver_->RegisterReceivePayload(video_codec.plName,
- video_codec.plType,
- 90000,
- 0,
- video_codec.maxBitrate));
+ EXPECT_EQ(0, rtp_receiver_->RegisterReceivePayload(
+ video_codec.plName, video_codec.plType, 90000, 0,
+ video_codec.maxBitrate));
rtp_payload_registry_.SetRtxPayloadType(kRtxPayloadType, kPayloadType);
for (size_t n = 0; n < payload_data_length; n++) {
@@ -230,8 +220,7 @@ class RtpRtcpRtxNackTest : public ::testing::Test {
int BuildNackList(uint16_t* nack_list) {
receiver_.sequence_numbers_.sort();
std::list<uint16_t> missing_sequence_numbers;
- std::list<uint16_t>::iterator it =
- receiver_.sequence_numbers_.begin();
+ std::list<uint16_t>::iterator it = receiver_.sequence_numbers_.begin();
while (it != receiver_.sequence_numbers_.end()) {
uint16_t sequence_number_1 = *it;
@@ -239,15 +228,14 @@ class RtpRtcpRtxNackTest : public ::testing::Test {
if (it != receiver_.sequence_numbers_.end()) {
uint16_t sequence_number_2 = *it;
// Add all missing sequence numbers to list
- for (uint16_t i = sequence_number_1 + 1; i < sequence_number_2;
- ++i) {
+ for (uint16_t i = sequence_number_1 + 1; i < sequence_number_2; ++i) {
missing_sequence_numbers.push_back(i);
}
}
}
int n = 0;
for (it = missing_sequence_numbers.begin();
- it != missing_sequence_numbers.end(); ++it) {
+ it != missing_sequence_numbers.end(); ++it) {
nack_list[n++] = (*it);
}
return n;
@@ -298,7 +286,7 @@ class RtpRtcpRtxNackTest : public ::testing::Test {
rtc::scoped_ptr<TestRtpFeedback> rtp_feedback_;
RtxLoopBackTransport transport_;
VerifyingRtxReceiver receiver_;
- uint8_t payload_data[65000];
+ uint8_t payload_data[65000];
size_t payload_data_length;
SimulatedClock fake_clock;
};
@@ -345,8 +333,10 @@ TEST_F(RtpRtcpRtxNackTest, RtxNack) {
RunRtxTest(kRtxRetransmitted, 10);
EXPECT_EQ(kTestSequenceNumber, *(receiver_.sequence_numbers_.begin()));
EXPECT_EQ(kTestSequenceNumber + kTestNumberOfPackets - 1,
- *(receiver_.sequence_numbers_.rbegin()));
+ *(receiver_.sequence_numbers_.rbegin()));
EXPECT_EQ(kTestNumberOfPackets, receiver_.sequence_numbers_.size());
EXPECT_EQ(kTestNumberOfRtxPackets, transport_.count_rtx_ssrc_);
EXPECT_TRUE(ExpectedPacketsReceived());
}
+
+} // namespace webrtc
diff --git a/webrtc/modules/rtp_rtcp/source/producer_fec_unittest.cc b/webrtc/modules/rtp_rtcp/source/producer_fec_unittest.cc
index 683b951f1e..be4b453454 100644
--- a/webrtc/modules/rtp_rtcp/source/producer_fec_unittest.cc
+++ b/webrtc/modules/rtp_rtcp/source/producer_fec_unittest.cc
@@ -9,8 +9,10 @@
*/
#include <list>
+#include <vector>
#include "testing/gtest/include/gtest/gtest.h"
+#include "webrtc/modules/rtp_rtcp/source/byte_io.h"
#include "webrtc/modules/rtp_rtcp/source/fec_test_helper.h"
#include "webrtc/modules/rtp_rtcp/source/forward_error_correction.h"
#include "webrtc/modules/rtp_rtcp/source/producer_fec.h"
@@ -54,6 +56,53 @@ class ProducerFecTest : public ::testing::Test {
FrameGenerator* generator_;
};
+// Verifies bug found via fuzzing, where a gap in the packet sequence caused us
+// to move past the end of the current FEC packet mask byte without moving to
+// the next byte. That likely caused us to repeatedly read from the same byte,
+// and if that byte didn't protect packets we would generate empty FEC.
+TEST_F(ProducerFecTest, NoEmptyFecWithSeqNumGaps) {
+ struct Packet {
+ size_t header_size;
+ size_t payload_size;
+ uint16_t seq_num;
+ bool marker_bit;
+ };
+ std::vector<Packet> protected_packets;
+ protected_packets.push_back({15, 3, 41, 0});
+ protected_packets.push_back({14, 1, 43, 0});
+ protected_packets.push_back({19, 0, 48, 0});
+ protected_packets.push_back({19, 0, 50, 0});
+ protected_packets.push_back({14, 3, 51, 0});
+ protected_packets.push_back({13, 8, 52, 0});
+ protected_packets.push_back({19, 2, 53, 0});
+ protected_packets.push_back({12, 3, 54, 0});
+ protected_packets.push_back({21, 0, 55, 0});
+ protected_packets.push_back({13, 3, 57, 1});
+ FecProtectionParams params = {117, 0, 3, kFecMaskBursty};
+ producer_->SetFecParameters(&params, 0);
+ uint8_t packet[28] = {0};
+ for (Packet p : protected_packets) {
+ if (p.marker_bit) {
+ packet[1] |= 0x80;
+ } else {
+ packet[1] &= ~0x80;
+ }
+ ByteWriter<uint16_t>::WriteBigEndian(&packet[2], p.seq_num);
+ producer_->AddRtpPacketAndGenerateFec(packet, p.payload_size,
+ p.header_size);
+ uint16_t num_fec_packets = producer_->NumAvailableFecPackets();
+ std::vector<RedPacket*> fec_packets;
+ if (num_fec_packets > 0) {
+ fec_packets =
+ producer_->GetFecPackets(kRedPayloadType, 99, 100, p.header_size);
+ EXPECT_EQ(num_fec_packets, fec_packets.size());
+ }
+ for (RedPacket* fec_packet : fec_packets) {
+ delete fec_packet;
+ }
+ }
+}
+
TEST_F(ProducerFecTest, OneFrameFec) {
// The number of media packets (|kNumPackets|), number of frames (one for
// this test), and the protection factor (|params->fec_rate|) are set to make
diff --git a/webrtc/modules/rtp_rtcp/source/receive_statistics_impl.cc b/webrtc/modules/rtp_rtcp/source/receive_statistics_impl.cc
index 8ac7e0a383..24f1e2c96e 100644
--- a/webrtc/modules/rtp_rtcp/source/receive_statistics_impl.cc
+++ b/webrtc/modules/rtp_rtcp/source/receive_statistics_impl.cc
@@ -14,7 +14,7 @@
#include "webrtc/base/scoped_ptr.h"
#include "webrtc/modules/rtp_rtcp/source/bitrate.h"
-#include "webrtc/modules/rtp_rtcp/source/rtp_utility.h"
+#include "webrtc/modules/rtp_rtcp/source/time_util.h"
#include "webrtc/system_wrappers/include/critical_section_wrapper.h"
namespace webrtc {
@@ -37,8 +37,6 @@ StreamStatisticianImpl::StreamStatisticianImpl(
cumulative_loss_(0),
jitter_q4_transmission_time_offset_(0),
last_receive_time_ms_(0),
- last_receive_time_secs_(0),
- last_receive_time_frac_(0),
last_received_timestamp_(0),
last_received_transmission_time_offset_(0),
received_seq_first_(0),
@@ -79,9 +77,7 @@ void StreamStatisticianImpl::UpdateCounters(const RTPHeader& header,
// are received, 4 will be ignored.
if (in_order) {
// Current time in samples.
- uint32_t receive_time_secs;
- uint32_t receive_time_frac;
- clock_->CurrentNtp(receive_time_secs, receive_time_frac);
+ NtpTime receive_time(*clock_);
// Wrong if we use RetransmitOfOldPacket.
if (receive_counters_.transmitted.packets > 1 &&
@@ -97,11 +93,10 @@ void StreamStatisticianImpl::UpdateCounters(const RTPHeader& header,
if (header.timestamp != last_received_timestamp_ &&
(receive_counters_.transmitted.packets -
receive_counters_.retransmitted.packets) > 1) {
- UpdateJitter(header, receive_time_secs, receive_time_frac);
+ UpdateJitter(header, receive_time);
}
last_received_timestamp_ = header.timestamp;
- last_receive_time_secs_ = receive_time_secs;
- last_receive_time_frac_ = receive_time_frac;
+ last_receive_time_ntp_ = receive_time;
last_receive_time_ms_ = clock_->TimeInMilliseconds();
}
@@ -113,14 +108,11 @@ void StreamStatisticianImpl::UpdateCounters(const RTPHeader& header,
}
void StreamStatisticianImpl::UpdateJitter(const RTPHeader& header,
- uint32_t receive_time_secs,
- uint32_t receive_time_frac) {
- uint32_t receive_time_rtp = RtpUtility::ConvertNTPTimeToRTP(
- receive_time_secs, receive_time_frac, header.payload_type_frequency);
+ NtpTime receive_time) {
+ uint32_t receive_time_rtp =
+ NtpToRtp(receive_time, header.payload_type_frequency);
uint32_t last_receive_time_rtp =
- RtpUtility::ConvertNTPTimeToRTP(last_receive_time_secs_,
- last_receive_time_frac_,
- header.payload_type_frequency);
+ NtpToRtp(last_receive_time_ntp_, header.payload_type_frequency);
int32_t time_diff_samples = (receive_time_rtp - last_receive_time_rtp) -
(header.timestamp - last_received_timestamp_);
@@ -267,6 +259,7 @@ RtcpStatistics StreamStatisticianImpl::CalculateRtcpStatistics() {
stats.fraction_lost = local_fraction_lost;
// We need a counter for cumulative loss too.
+ // TODO(danilchap): Ensure cumulative loss is below maximum value of 2^24.
cumulative_loss_ += missing;
stats.cumulative_lost = cumulative_loss_;
stats.extended_max_sequence_number =
@@ -319,8 +312,8 @@ void StreamStatisticianImpl::ProcessBitrate() {
void StreamStatisticianImpl::LastReceiveTimeNtp(uint32_t* secs,
uint32_t* frac) const {
CriticalSectionScoped cs(stream_lock_.get());
- *secs = last_receive_time_secs_;
- *frac = last_receive_time_frac_;
+ *secs = last_receive_time_ntp_.seconds();
+ *frac = last_receive_time_ntp_.fractions();
}
bool StreamStatisticianImpl::IsRetransmitOfOldPacket(
diff --git a/webrtc/modules/rtp_rtcp/source/receive_statistics_impl.h b/webrtc/modules/rtp_rtcp/source/receive_statistics_impl.h
index fe42990fe9..025dcd42c7 100644
--- a/webrtc/modules/rtp_rtcp/source/receive_statistics_impl.h
+++ b/webrtc/modules/rtp_rtcp/source/receive_statistics_impl.h
@@ -11,13 +11,15 @@
#ifndef WEBRTC_MODULES_RTP_RTCP_SOURCE_RECEIVE_STATISTICS_IMPL_H_
#define WEBRTC_MODULES_RTP_RTCP_SOURCE_RECEIVE_STATISTICS_IMPL_H_
-#include "webrtc/modules/rtp_rtcp/interface/receive_statistics.h"
+#include "webrtc/modules/rtp_rtcp/include/receive_statistics.h"
#include <algorithm>
+#include <map>
#include "webrtc/base/scoped_ptr.h"
#include "webrtc/modules/rtp_rtcp/source/bitrate.h"
#include "webrtc/system_wrappers/include/critical_section_wrapper.h"
+#include "webrtc/system_wrappers/include/ntp_time.h"
namespace webrtc {
@@ -51,9 +53,7 @@ class StreamStatisticianImpl : public StreamStatistician {
private:
bool InOrderPacketInternal(uint16_t sequence_number) const;
RtcpStatistics CalculateRtcpStatistics();
- void UpdateJitter(const RTPHeader& header,
- uint32_t receive_time_secs,
- uint32_t receive_time_frac);
+ void UpdateJitter(const RTPHeader& header, NtpTime receive_time);
void UpdateCounters(const RTPHeader& rtp_header,
size_t packet_length,
bool retransmitted);
@@ -72,8 +72,7 @@ class StreamStatisticianImpl : public StreamStatistician {
uint32_t jitter_q4_transmission_time_offset_;
int64_t last_receive_time_ms_;
- uint32_t last_receive_time_secs_;
- uint32_t last_receive_time_frac_;
+ NtpTime last_receive_time_ntp_;
uint32_t last_received_timestamp_;
int32_t last_received_transmission_time_offset_;
uint16_t received_seq_first_;
diff --git a/webrtc/modules/rtp_rtcp/source/receive_statistics_unittest.cc b/webrtc/modules/rtp_rtcp/source/receive_statistics_unittest.cc
index 5b522281df..c265c17c04 100644
--- a/webrtc/modules/rtp_rtcp/source/receive_statistics_unittest.cc
+++ b/webrtc/modules/rtp_rtcp/source/receive_statistics_unittest.cc
@@ -11,7 +11,7 @@
#include "testing/gmock/include/gmock/gmock.h"
#include "testing/gtest/include/gtest/gtest.h"
#include "webrtc/base/scoped_ptr.h"
-#include "webrtc/modules/rtp_rtcp/interface/receive_statistics.h"
+#include "webrtc/modules/rtp_rtcp/include/receive_statistics.h"
#include "webrtc/system_wrappers/include/clock.h"
namespace webrtc {
diff --git a/webrtc/modules/rtp_rtcp/source/remote_ntp_time_estimator.cc b/webrtc/modules/rtp_rtcp/source/remote_ntp_time_estimator.cc
index 74fc9cdc56..ccc15ec417 100644
--- a/webrtc/modules/rtp_rtcp/source/remote_ntp_time_estimator.cc
+++ b/webrtc/modules/rtp_rtcp/source/remote_ntp_time_estimator.cc
@@ -8,7 +8,7 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#include "webrtc/modules/rtp_rtcp/interface/remote_ntp_time_estimator.h"
+#include "webrtc/modules/rtp_rtcp/include/remote_ntp_time_estimator.h"
#include "webrtc/base/logging.h"
#include "webrtc/system_wrappers/include/clock.h"
diff --git a/webrtc/modules/rtp_rtcp/source/remote_ntp_time_estimator_unittest.cc b/webrtc/modules/rtp_rtcp/source/remote_ntp_time_estimator_unittest.cc
index bc9cf2ee39..797c7883a9 100644
--- a/webrtc/modules/rtp_rtcp/source/remote_ntp_time_estimator_unittest.cc
+++ b/webrtc/modules/rtp_rtcp/source/remote_ntp_time_estimator_unittest.cc
@@ -11,7 +11,7 @@
#include "testing/gmock/include/gmock/gmock.h"
#include "testing/gtest/include/gtest/gtest.h"
#include "webrtc/common_types.h"
-#include "webrtc/modules/rtp_rtcp/interface/remote_ntp_time_estimator.h"
+#include "webrtc/modules/rtp_rtcp/include/remote_ntp_time_estimator.h"
#include "webrtc/system_wrappers/include/clock.h"
using ::testing::_;
diff --git a/webrtc/modules/rtp_rtcp/source/rtcp_format_remb_unittest.cc b/webrtc/modules/rtp_rtcp/source/rtcp_format_remb_unittest.cc
index 7a7645fd1b..87c0259b3e 100644
--- a/webrtc/modules/rtp_rtcp/source/rtcp_format_remb_unittest.cc
+++ b/webrtc/modules/rtp_rtcp/source/rtcp_format_remb_unittest.cc
@@ -21,16 +21,13 @@
#include "webrtc/test/null_transport.h"
#include "webrtc/typedefs.h"
+namespace webrtc {
namespace {
-using namespace webrtc;
-
-
class TestTransport : public Transport {
public:
- TestTransport(RTCPReceiver* rtcp_receiver) :
- rtcp_receiver_(rtcp_receiver) {
- }
+ explicit TestTransport(RTCPReceiver* rtcp_receiver)
+ : rtcp_receiver_(rtcp_receiver) {}
bool SendRtp(const uint8_t* /*data*/,
size_t /*len*/,
@@ -38,9 +35,8 @@ class TestTransport : public Transport {
return false;
}
bool SendRtcp(const uint8_t* packet, size_t packetLength) override {
- RTCPUtility::RTCPParserV2 rtcpParser((uint8_t*)packet,
- packetLength,
- true); // Allow non-compound RTCP
+ RTCPUtility::RTCPParserV2 rtcpParser(packet, packetLength,
+ true); // Allow non-compound RTCP
EXPECT_TRUE(rtcpParser.IsValid());
RTCPHelp::RTCPPacketInformation rtcpPacketInformation;
@@ -53,11 +49,11 @@ class TestTransport : public Transport {
rtcpPacketInformation.receiverEstimatedMaxBitrate);
return true;
}
+
private:
RTCPReceiver* rtcp_receiver_;
};
-
class RtcpFormatRembTest : public ::testing::Test {
protected:
RtcpFormatRembTest()
@@ -134,3 +130,4 @@ TEST_F(RtcpFormatRembTest, TestCompund) {
EXPECT_EQ(0, rtcp_sender_->SendRTCP(feedback_state, kRtcpRemb));
}
} // namespace
+} // namespace webrtc
diff --git a/webrtc/modules/rtp_rtcp/source/rtcp_packet.cc b/webrtc/modules/rtp_rtcp/source/rtcp_packet.cc
index 792caa7b8b..eef2978371 100644
--- a/webrtc/modules/rtp_rtcp/source/rtcp_packet.cc
+++ b/webrtc/modules/rtp_rtcp/source/rtcp_packet.cc
@@ -10,6 +10,8 @@
#include "webrtc/modules/rtp_rtcp/source/rtcp_packet.h"
+#include <algorithm>
+
#include "webrtc/base/checks.h"
#include "webrtc/base/logging.h"
#include "webrtc/modules/rtp_rtcp/source/byte_io.h"
@@ -19,38 +21,25 @@ using webrtc::RTCPUtility::kBtReceiverReferenceTime;
using webrtc::RTCPUtility::kBtVoipMetric;
using webrtc::RTCPUtility::PT_APP;
-using webrtc::RTCPUtility::PT_BYE;
using webrtc::RTCPUtility::PT_IJ;
using webrtc::RTCPUtility::PT_PSFB;
-using webrtc::RTCPUtility::PT_RR;
using webrtc::RTCPUtility::PT_RTPFB;
using webrtc::RTCPUtility::PT_SDES;
using webrtc::RTCPUtility::PT_SR;
using webrtc::RTCPUtility::PT_XR;
using webrtc::RTCPUtility::RTCPPacketAPP;
-using webrtc::RTCPUtility::RTCPPacketBYE;
using webrtc::RTCPUtility::RTCPPacketPSFBAPP;
using webrtc::RTCPUtility::RTCPPacketPSFBFIR;
using webrtc::RTCPUtility::RTCPPacketPSFBFIRItem;
-using webrtc::RTCPUtility::RTCPPacketPSFBPLI;
using webrtc::RTCPUtility::RTCPPacketPSFBREMBItem;
using webrtc::RTCPUtility::RTCPPacketPSFBRPSI;
-using webrtc::RTCPUtility::RTCPPacketPSFBSLI;
-using webrtc::RTCPUtility::RTCPPacketPSFBSLIItem;
using webrtc::RTCPUtility::RTCPPacketReportBlockItem;
-using webrtc::RTCPUtility::RTCPPacketRR;
using webrtc::RTCPUtility::RTCPPacketRTPFBNACK;
using webrtc::RTCPUtility::RTCPPacketRTPFBNACKItem;
-using webrtc::RTCPUtility::RTCPPacketRTPFBTMMBN;
-using webrtc::RTCPUtility::RTCPPacketRTPFBTMMBNItem;
-using webrtc::RTCPUtility::RTCPPacketRTPFBTMMBR;
-using webrtc::RTCPUtility::RTCPPacketRTPFBTMMBRItem;
using webrtc::RTCPUtility::RTCPPacketSR;
using webrtc::RTCPUtility::RTCPPacketXRDLRRReportBlockItem;
-using webrtc::RTCPUtility::RTCPPacketXRReceiverReferenceTimeItem;
using webrtc::RTCPUtility::RTCPPacketXR;
-using webrtc::RTCPUtility::RTCPPacketXRVOIPMetricItem;
namespace webrtc {
namespace rtcp {
@@ -122,21 +111,6 @@ void CreateSenderReport(const RTCPPacketSR& sr,
AssignUWord32(buffer, pos, sr.SenderOctetCount);
}
-// Receiver report (RR), header (RFC 3550).
-//
-// 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
-// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
-// |V=2|P| RC | PT=RR=201 | length |
-// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
-// | SSRC of packet sender |
-// +=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+
-
-void CreateReceiverReport(const RTCPPacketRR& rr,
- uint8_t* buffer,
- size_t* pos) {
- AssignUWord32(buffer, pos, rr.SenderSSRC);
-}
-
// Report block (RFC 3550).
//
// 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
@@ -154,43 +128,15 @@ void CreateReceiverReport(const RTCPPacketRR& rr,
// | delay since last SR (DLSR) |
// +=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+
-void CreateReportBlocks(const std::vector<RTCPPacketReportBlockItem>& blocks,
+void CreateReportBlocks(const std::vector<ReportBlock>& blocks,
uint8_t* buffer,
size_t* pos) {
- for (std::vector<RTCPPacketReportBlockItem>::const_iterator
- it = blocks.begin(); it != blocks.end(); ++it) {
- AssignUWord32(buffer, pos, (*it).SSRC);
- AssignUWord8(buffer, pos, (*it).FractionLost);
- AssignUWord24(buffer, pos, (*it).CumulativeNumOfPacketsLost);
- AssignUWord32(buffer, pos, (*it).ExtendedHighestSequenceNumber);
- AssignUWord32(buffer, pos, (*it).Jitter);
- AssignUWord32(buffer, pos, (*it).LastSR);
- AssignUWord32(buffer, pos, (*it).DelayLastSR);
+ for (const ReportBlock& block : blocks) {
+ block.Create(buffer + *pos);
+ *pos += ReportBlock::kLength;
}
}
-// Transmission Time Offsets in RTP Streams (RFC 5450).
-//
-// 0 1 2 3
-// 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
-// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
-// hdr |V=2|P| RC | PT=IJ=195 | length |
-// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
-// | inter-arrival jitter |
-// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
-// . .
-// . .
-// . .
-// | inter-arrival jitter |
-// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
-
-void CreateIj(const std::vector<uint32_t>& ij_items,
- uint8_t* buffer,
- size_t* pos) {
- for (uint32_t item : ij_items)
- AssignUWord32(buffer, pos, item);
-}
-
// Source Description (SDES) (RFC 3550).
//
// 0 1 2 3
@@ -233,129 +179,6 @@ void CreateSdes(const std::vector<Sdes::Chunk>& chunks,
}
}
-// Bye packet (BYE) (RFC 3550).
-//
-// 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
-// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
-// |V=2|P| SC | PT=BYE=203 | length |
-// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
-// | SSRC/CSRC |
-// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
-// : ... :
-// +=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+
-// (opt) | length | reason for leaving ...
-// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
-
-void CreateBye(const RTCPPacketBYE& bye,
- const std::vector<uint32_t>& csrcs,
- uint8_t* buffer,
- size_t* pos) {
- AssignUWord32(buffer, pos, bye.SenderSSRC);
- for (uint32_t csrc : csrcs)
- AssignUWord32(buffer, pos, csrc);
-}
-
-// Application-Defined packet (APP) (RFC 3550).
-//
-// 0 1 2 3
-// 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
-// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
-// |V=2|P| subtype | PT=APP=204 | length |
-// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
-// | SSRC/CSRC |
-// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
-// | name (ASCII) |
-// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
-// | application-dependent data ...
-// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
-
-void CreateApp(const RTCPPacketAPP& app,
- uint32_t ssrc,
- uint8_t* buffer,
- size_t* pos) {
- AssignUWord32(buffer, pos, ssrc);
- AssignUWord32(buffer, pos, app.Name);
- memcpy(buffer + *pos, app.Data, app.Size);
- *pos += app.Size;
-}
-
-// RFC 4585: Feedback format.
-//
-// Common packet format:
-//
-// 0 1 2 3
-// 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
-// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
-// |V=2|P| FMT | PT | length |
-// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
-// | SSRC of packet sender |
-// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
-// | SSRC of media source |
-// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
-// : Feedback Control Information (FCI) :
-// :
-//
-
-// Picture loss indication (PLI) (RFC 4585).
-//
-// FCI: no feedback control information.
-
-void CreatePli(const RTCPPacketPSFBPLI& pli,
- uint8_t* buffer,
- size_t* pos) {
- AssignUWord32(buffer, pos, pli.SenderSSRC);
- AssignUWord32(buffer, pos, pli.MediaSSRC);
-}
-
-// Slice loss indication (SLI) (RFC 4585).
-//
-// FCI:
-//
-// 0 1 2 3
-// 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
-// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
-// | First | Number | PictureID |
-// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
-
-void CreateSli(const RTCPPacketPSFBSLI& sli,
- const RTCPPacketPSFBSLIItem& sli_item,
- uint8_t* buffer,
- size_t* pos) {
- AssignUWord32(buffer, pos, sli.SenderSSRC);
- AssignUWord32(buffer, pos, sli.MediaSSRC);
-
- AssignUWord8(buffer, pos, sli_item.FirstMB >> 5);
- AssignUWord8(buffer, pos, (sli_item.FirstMB << 3) +
- ((sli_item.NumberOfMB >> 10) & 0x07));
- AssignUWord8(buffer, pos, sli_item.NumberOfMB >> 2);
- AssignUWord8(buffer, pos, (sli_item.NumberOfMB << 6) + sli_item.PictureId);
-}
-
-// Generic NACK (RFC 4585).
-//
-// FCI:
-//
-// 0 1 2 3
-// 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
-// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
-// | PID | BLP |
-// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
-
-void CreateNack(const RTCPPacketRTPFBNACK& nack,
- const std::vector<RTCPPacketRTPFBNACKItem>& nack_fields,
- size_t start_index,
- size_t end_index,
- uint8_t* buffer,
- size_t* pos) {
- AssignUWord32(buffer, pos, nack.SenderSSRC);
- AssignUWord32(buffer, pos, nack.MediaSSRC);
- for (size_t i = start_index; i < end_index; ++i) {
- const RTCPPacketRTPFBNACKItem& nack_item = nack_fields[i];
- AssignUWord16(buffer, pos, nack_item.PacketID);
- AssignUWord16(buffer, pos, nack_item.BitMask);
- }
-}
-
// Reference picture selection indication (RPSI) (RFC 4585).
//
// FCI:
@@ -407,66 +230,6 @@ void CreateFir(const RTCPPacketPSFBFIR& fir,
AssignUWord24(buffer, pos, 0);
}
-void CreateTmmbrItem(const RTCPPacketRTPFBTMMBRItem& tmmbr_item,
- uint8_t* buffer,
- size_t* pos) {
- uint32_t bitrate_bps = tmmbr_item.MaxTotalMediaBitRate * 1000;
- uint32_t mantissa = 0;
- uint8_t exp = 0;
- ComputeMantissaAnd6bitBase2Exponent(bitrate_bps, 17, &mantissa, &exp);
-
- AssignUWord32(buffer, pos, tmmbr_item.SSRC);
- AssignUWord8(buffer, pos, (exp << 2) + ((mantissa >> 15) & 0x03));
- AssignUWord8(buffer, pos, mantissa >> 7);
- AssignUWord8(buffer, pos, (mantissa << 1) +
- ((tmmbr_item.MeasuredOverhead >> 8) & 0x01));
- AssignUWord8(buffer, pos, tmmbr_item.MeasuredOverhead);
-}
-
-// Temporary Maximum Media Stream Bit Rate Request (TMMBR) (RFC 5104).
-//
-// FCI:
-//
-// 0 1 2 3
-// 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
-// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
-// | SSRC |
-// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
-// | MxTBR Exp | MxTBR Mantissa |Measured Overhead|
-// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
-
-void CreateTmmbr(const RTCPPacketRTPFBTMMBR& tmmbr,
- const RTCPPacketRTPFBTMMBRItem& tmmbr_item,
- uint8_t* buffer,
- size_t* pos) {
- AssignUWord32(buffer, pos, tmmbr.SenderSSRC);
- AssignUWord32(buffer, pos, kUnusedMediaSourceSsrc0);
- CreateTmmbrItem(tmmbr_item, buffer, pos);
-}
-
-// Temporary Maximum Media Stream Bit Rate Notification (TMMBN) (RFC 5104).
-//
-// FCI:
-//
-// 0 1 2 3
-// 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
-// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
-// | SSRC |
-// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
-// | MxTBR Exp | MxTBR Mantissa |Measured Overhead|
-// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
-
-void CreateTmmbn(const RTCPPacketRTPFBTMMBN& tmmbn,
- const std::vector<RTCPPacketRTPFBTMMBRItem>& tmmbn_items,
- uint8_t* buffer,
- size_t* pos) {
- AssignUWord32(buffer, pos, tmmbn.SenderSSRC);
- AssignUWord32(buffer, pos, kUnusedMediaSourceSsrc0);
- for (uint8_t i = 0; i < tmmbn_items.size(); ++i) {
- CreateTmmbrItem(tmmbn_items[i], buffer, pos);
- }
-}
-
// Receiver Estimated Max Bitrate (REMB) (draft-alvestrand-rmcat-remb).
//
// 0 1 2 3
@@ -529,130 +292,6 @@ void CreateXrHeader(const RTCPPacketXR& header,
AssignUWord32(buffer, pos, header.OriginatorSSRC);
}
-void CreateXrBlockHeader(uint8_t block_type,
- uint16_t block_length,
- uint8_t* buffer,
- size_t* pos) {
- AssignUWord8(buffer, pos, block_type);
- AssignUWord8(buffer, pos, 0);
- AssignUWord16(buffer, pos, block_length);
-}
-
-// Receiver Reference Time Report Block (RFC 3611).
-//
-// 0 1 2 3
-// 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
-// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
-// | BT=4 | reserved | block length = 2 |
-// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
-// | NTP timestamp, most significant word |
-// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
-// | NTP timestamp, least significant word |
-// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
-
-void CreateRrtr(const std::vector<RTCPPacketXRReceiverReferenceTimeItem>& rrtrs,
- uint8_t* buffer,
- size_t* pos) {
- const uint16_t kBlockLength = 2;
- for (std::vector<RTCPPacketXRReceiverReferenceTimeItem>::const_iterator it =
- rrtrs.begin(); it != rrtrs.end(); ++it) {
- CreateXrBlockHeader(kBtReceiverReferenceTime, kBlockLength, buffer, pos);
- AssignUWord32(buffer, pos, (*it).NTPMostSignificant);
- AssignUWord32(buffer, pos, (*it).NTPLeastSignificant);
- }
-}
-
-// DLRR Report Block (RFC 3611).
-//
-// 0 1 2 3
-// 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
-// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
-// | BT=5 | reserved | block length |
-// +=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+
-// | SSRC_1 (SSRC of first receiver) | sub-
-// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ block
-// | last RR (LRR) | 1
-// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
-// | delay since last RR (DLRR) |
-// +=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+
-// | SSRC_2 (SSRC of second receiver) | sub-
-// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ block
-// : ... : 2
-
-void CreateDlrr(const std::vector<Xr::DlrrBlock>& dlrrs,
- uint8_t* buffer,
- size_t* pos) {
- for (std::vector<Xr::DlrrBlock>::const_iterator it = dlrrs.begin();
- it != dlrrs.end(); ++it) {
- if ((*it).empty()) {
- continue;
- }
- uint16_t block_length = 3 * (*it).size();
- CreateXrBlockHeader(kBtDlrr, block_length, buffer, pos);
- for (Xr::DlrrBlock::const_iterator it_block = (*it).begin();
- it_block != (*it).end(); ++it_block) {
- AssignUWord32(buffer, pos, (*it_block).SSRC);
- AssignUWord32(buffer, pos, (*it_block).LastRR);
- AssignUWord32(buffer, pos, (*it_block).DelayLastRR);
- }
- }
-}
-
-// VoIP Metrics Report Block (RFC 3611).
-//
-// 0 1 2 3
-// 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
-// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
-// | BT=7 | reserved | block length = 8 |
-// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
-// | SSRC of source |
-// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
-// | loss rate | discard rate | burst density | gap density |
-// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
-// | burst duration | gap duration |
-// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
-// | round trip delay | end system delay |
-// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
-// | signal level | noise level | RERL | Gmin |
-// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
-// | R factor | ext. R factor | MOS-LQ | MOS-CQ |
-// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
-// | RX config | reserved | JB nominal |
-// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
-// | JB maximum | JB abs max |
-// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
-
-void CreateVoipMetric(const std::vector<RTCPPacketXRVOIPMetricItem>& metrics,
- uint8_t* buffer,
- size_t* pos) {
- const uint16_t kBlockLength = 8;
- for (std::vector<RTCPPacketXRVOIPMetricItem>::const_iterator it =
- metrics.begin(); it != metrics.end(); ++it) {
- CreateXrBlockHeader(kBtVoipMetric, kBlockLength, buffer, pos);
- AssignUWord32(buffer, pos, (*it).SSRC);
- AssignUWord8(buffer, pos, (*it).lossRate);
- AssignUWord8(buffer, pos, (*it).discardRate);
- AssignUWord8(buffer, pos, (*it).burstDensity);
- AssignUWord8(buffer, pos, (*it).gapDensity);
- AssignUWord16(buffer, pos, (*it).burstDuration);
- AssignUWord16(buffer, pos, (*it).gapDuration);
- AssignUWord16(buffer, pos, (*it).roundTripDelay);
- AssignUWord16(buffer, pos, (*it).endSystemDelay);
- AssignUWord8(buffer, pos, (*it).signalLevel);
- AssignUWord8(buffer, pos, (*it).noiseLevel);
- AssignUWord8(buffer, pos, (*it).RERL);
- AssignUWord8(buffer, pos, (*it).Gmin);
- AssignUWord8(buffer, pos, (*it).Rfactor);
- AssignUWord8(buffer, pos, (*it).extRfactor);
- AssignUWord8(buffer, pos, (*it).MOSLQ);
- AssignUWord8(buffer, pos, (*it).MOSCQ);
- AssignUWord8(buffer, pos, (*it).RXconfig);
- AssignUWord8(buffer, pos, 0);
- AssignUWord16(buffer, pos, (*it).JBnominal);
- AssignUWord16(buffer, pos, (*it).JBmax);
- AssignUWord16(buffer, pos, (*it).JBabsMax);
- }
-}
} // namespace
void RtcpPacket::Append(RtcpPacket* packet) {
@@ -751,17 +390,6 @@ void RtcpPacket::CreateHeader(
AssignUWord16(buffer, pos, length);
}
-bool Empty::Create(uint8_t* packet,
- size_t* index,
- size_t max_length,
- RtcpPacket::PacketReadyCallback* callback) const {
- return true;
-}
-
-size_t Empty::BlockLength() const {
- return 0;
-}
-
bool SenderReport::Create(uint8_t* packet,
size_t* index,
size_t max_length,
@@ -781,58 +409,11 @@ bool SenderReport::WithReportBlock(const ReportBlock& block) {
LOG(LS_WARNING) << "Max report blocks reached.";
return false;
}
- report_blocks_.push_back(block.report_block_);
+ report_blocks_.push_back(block);
sr_.NumberOfReportBlocks = report_blocks_.size();
return true;
}
-bool ReceiverReport::Create(uint8_t* packet,
- size_t* index,
- size_t max_length,
- RtcpPacket::PacketReadyCallback* callback) const {
- while (*index + BlockLength() > max_length) {
- if (!OnBufferFull(packet, index, callback))
- return false;
- }
- CreateHeader(rr_.NumberOfReportBlocks, PT_RR, HeaderLength(), packet, index);
- CreateReceiverReport(rr_, packet, index);
- CreateReportBlocks(report_blocks_, packet, index);
- return true;
-}
-
-bool ReceiverReport::WithReportBlock(const ReportBlock& block) {
- if (report_blocks_.size() >= kMaxNumberOfReportBlocks) {
- LOG(LS_WARNING) << "Max report blocks reached.";
- return false;
- }
- report_blocks_.push_back(block.report_block_);
- rr_.NumberOfReportBlocks = report_blocks_.size();
- return true;
-}
-
-bool Ij::Create(uint8_t* packet,
- size_t* index,
- size_t max_length,
- RtcpPacket::PacketReadyCallback* callback) const {
- while (*index + BlockLength() > max_length) {
- if (!OnBufferFull(packet, index, callback))
- return false;
- }
- size_t length = ij_items_.size();
- CreateHeader(length, PT_IJ, length, packet, index);
- CreateIj(ij_items_, packet, index);
- return true;
-}
-
-bool Ij::WithJitterItem(uint32_t jitter) {
- if (ij_items_.size() >= kMaxNumberOfIjItems) {
- LOG(LS_WARNING) << "Max inter-arrival jitter items reached.";
- return false;
- }
- ij_items_.push_back(jitter);
- return true;
-}
-
bool Sdes::Create(uint8_t* packet,
size_t* index,
size_t max_length,
@@ -876,129 +457,6 @@ size_t Sdes::BlockLength() const {
return length;
}
-bool Bye::Create(uint8_t* packet,
- size_t* index,
- size_t max_length,
- RtcpPacket::PacketReadyCallback* callback) const {
- while (*index + BlockLength() > max_length) {
- if (!OnBufferFull(packet, index, callback))
- return false;
- }
- size_t length = HeaderLength();
- CreateHeader(length, PT_BYE, length, packet, index);
- CreateBye(bye_, csrcs_, packet, index);
- return true;
-}
-
-bool Bye::WithCsrc(uint32_t csrc) {
- if (csrcs_.size() >= kMaxNumberOfCsrcs) {
- LOG(LS_WARNING) << "Max CSRC size reached.";
- return false;
- }
- csrcs_.push_back(csrc);
- return true;
-}
-
-bool App::Create(uint8_t* packet,
- size_t* index,
- size_t max_length,
- RtcpPacket::PacketReadyCallback* callback) const {
- while (*index + BlockLength() > max_length) {
- if (!OnBufferFull(packet, index, callback))
- return false;
- }
- CreateHeader(app_.SubType, PT_APP, HeaderLength(), packet, index);
- CreateApp(app_, ssrc_, packet, index);
- return true;
-}
-
-bool Pli::Create(uint8_t* packet,
- size_t* index,
- size_t max_length,
- RtcpPacket::PacketReadyCallback* callback) const {
- while (*index + BlockLength() > max_length) {
- if (!OnBufferFull(packet, index, callback))
- return false;
- }
- const uint8_t kFmt = 1;
- CreateHeader(kFmt, PT_PSFB, HeaderLength(), packet, index);
- CreatePli(pli_, packet, index);
- return true;
-}
-
-bool Sli::Create(uint8_t* packet,
- size_t* index,
- size_t max_length,
- RtcpPacket::PacketReadyCallback* callback) const {
- while (*index + BlockLength() > max_length) {
- if (!OnBufferFull(packet, index, callback))
- return false;
- }
- const uint8_t kFmt = 2;
- CreateHeader(kFmt, PT_PSFB, HeaderLength(), packet, index);
- CreateSli(sli_, sli_item_, packet, index);
- return true;
-}
-
-bool Nack::Create(uint8_t* packet,
- size_t* index,
- size_t max_length,
- RtcpPacket::PacketReadyCallback* callback) const {
- assert(!nack_fields_.empty());
- // If nack list can't fit in packet, try to fragment.
- size_t nack_index = 0;
- do {
- size_t bytes_left_in_buffer = max_length - *index;
- if (bytes_left_in_buffer < kCommonFbFmtLength + 4) {
- if (!OnBufferFull(packet, index, callback))
- return false;
- continue;
- }
- int64_t num_nack_fields =
- std::min((bytes_left_in_buffer - kCommonFbFmtLength) / 4,
- nack_fields_.size() - nack_index);
-
- const uint8_t kFmt = 1;
- size_t size_bytes = (num_nack_fields * 4) + kCommonFbFmtLength;
- size_t header_length = ((size_bytes + 3) / 4) - 1; // As 32bit words - 1
- CreateHeader(kFmt, PT_RTPFB, header_length, packet, index);
- CreateNack(nack_, nack_fields_, nack_index, nack_index + num_nack_fields,
- packet, index);
-
- nack_index += num_nack_fields;
- } while (nack_index < nack_fields_.size());
-
- return true;
-}
-
-size_t Nack::BlockLength() const {
- return (nack_fields_.size() * 4) + kCommonFbFmtLength;
-}
-
-void Nack::WithList(const uint16_t* nack_list, int length) {
- assert(nack_list);
- assert(nack_fields_.empty());
- int i = 0;
- while (i < length) {
- uint16_t pid = nack_list[i++];
- // Bitmask specifies losses in any of the 16 packets following the pid.
- uint16_t bitmask = 0;
- while (i < length) {
- int shift = static_cast<uint16_t>(nack_list[i] - pid) - 1;
- if (shift >= 0 && shift <= 15) {
- bitmask |= (1 << shift);
- ++i;
- } else {
- break;
- }
- }
- RTCPUtility::RTCPPacketRTPFBNACKItem item;
- item.PacketID = pid;
- item.BitMask = bitmask;
- nack_fields_.push_back(item);
- }
-}
-
bool Rpsi::Create(uint8_t* packet,
size_t* index,
size_t max_length,
@@ -1077,48 +535,6 @@ void Remb::AppliesTo(uint32_t ssrc) {
remb_item_.SSRCs[remb_item_.NumberOfSSRCs++] = ssrc;
}
-bool Tmmbr::Create(uint8_t* packet,
- size_t* index,
- size_t max_length,
- RtcpPacket::PacketReadyCallback* callback) const {
- while (*index + BlockLength() > max_length) {
- if (!OnBufferFull(packet, index, callback))
- return false;
- }
- const uint8_t kFmt = 3;
- CreateHeader(kFmt, PT_RTPFB, HeaderLength(), packet, index);
- CreateTmmbr(tmmbr_, tmmbr_item_, packet, index);
- return true;
-}
-
-bool Tmmbn::WithTmmbr(uint32_t ssrc, uint32_t bitrate_kbps, uint16_t overhead) {
- assert(overhead <= 0x1ff);
- if (tmmbn_items_.size() >= kMaxNumberOfTmmbrs) {
- LOG(LS_WARNING) << "Max TMMBN size reached.";
- return false;
- }
- RTCPPacketRTPFBTMMBRItem tmmbn_item;
- tmmbn_item.SSRC = ssrc;
- tmmbn_item.MaxTotalMediaBitRate = bitrate_kbps;
- tmmbn_item.MeasuredOverhead = overhead;
- tmmbn_items_.push_back(tmmbn_item);
- return true;
-}
-
-bool Tmmbn::Create(uint8_t* packet,
- size_t* index,
- size_t max_length,
- RtcpPacket::PacketReadyCallback* callback) const {
- while (*index + BlockLength() > max_length) {
- if (!OnBufferFull(packet, index, callback))
- return false;
- }
- const uint8_t kFmt = 4;
- CreateHeader(kFmt, PT_RTPFB, HeaderLength(), packet, index);
- CreateTmmbn(tmmbn_, tmmbn_items_, packet, index);
- return true;
-}
-
bool Xr::Create(uint8_t* packet,
size_t* index,
size_t max_length,
@@ -1129,29 +545,38 @@ bool Xr::Create(uint8_t* packet,
}
CreateHeader(0U, PT_XR, HeaderLength(), packet, index);
CreateXrHeader(xr_header_, packet, index);
- CreateRrtr(rrtr_blocks_, packet, index);
- CreateDlrr(dlrr_blocks_, packet, index);
- CreateVoipMetric(voip_metric_blocks_, packet, index);
+ for (const Rrtr& block : rrtr_blocks_) {
+ block.Create(packet + *index);
+ *index += Rrtr::kLength;
+ }
+ for (const Dlrr& block : dlrr_blocks_) {
+ block.Create(packet + *index);
+ *index += block.BlockLength();
+ }
+ for (const VoipMetric& block : voip_metric_blocks_) {
+ block.Create(packet + *index);
+ *index += VoipMetric::kLength;
+ }
return true;
}
bool Xr::WithRrtr(Rrtr* rrtr) {
- assert(rrtr);
+ RTC_DCHECK(rrtr);
if (rrtr_blocks_.size() >= kMaxNumberOfRrtrBlocks) {
LOG(LS_WARNING) << "Max RRTR blocks reached.";
return false;
}
- rrtr_blocks_.push_back(rrtr->rrtr_block_);
+ rrtr_blocks_.push_back(*rrtr);
return true;
}
bool Xr::WithDlrr(Dlrr* dlrr) {
- assert(dlrr);
+ RTC_DCHECK(dlrr);
if (dlrr_blocks_.size() >= kMaxNumberOfDlrrBlocks) {
LOG(LS_WARNING) << "Max DLRR blocks reached.";
return false;
}
- dlrr_blocks_.push_back(dlrr->dlrr_block_);
+ dlrr_blocks_.push_back(*dlrr);
return true;
}
@@ -1161,38 +586,18 @@ bool Xr::WithVoipMetric(VoipMetric* voip_metric) {
LOG(LS_WARNING) << "Max Voip Metric blocks reached.";
return false;
}
- voip_metric_blocks_.push_back(voip_metric->metric_);
+ voip_metric_blocks_.push_back(*voip_metric);
return true;
}
size_t Xr::DlrrLength() const {
- const size_t kBlockHeaderLen = 4;
- const size_t kSubBlockLen = 12;
size_t length = 0;
- for (std::vector<DlrrBlock>::const_iterator it = dlrr_blocks_.begin();
- it != dlrr_blocks_.end(); ++it) {
- if (!(*it).empty()) {
- length += kBlockHeaderLen + kSubBlockLen * (*it).size();
- }
+ for (const Dlrr& block : dlrr_blocks_) {
+ length += block.BlockLength();
}
return length;
}
-bool Dlrr::WithDlrrItem(uint32_t ssrc,
- uint32_t last_rr,
- uint32_t delay_last_rr) {
- if (dlrr_block_.size() >= kMaxNumberOfDlrrItems) {
- LOG(LS_WARNING) << "Max DLRR items reached.";
- return false;
- }
- RTCPPacketXRDLRRReportBlockItem dlrr;
- dlrr.SSRC = ssrc;
- dlrr.LastRR = last_rr;
- dlrr.DelayLastRR = delay_last_rr;
- dlrr_block_.push_back(dlrr);
- return true;
-}
-
RawPacket::RawPacket(size_t buffer_length)
: buffer_length_(buffer_length), length_(0) {
buffer_.reset(new uint8_t[buffer_length]);
diff --git a/webrtc/modules/rtp_rtcp/source/rtcp_packet.h b/webrtc/modules/rtp_rtcp/source/rtcp_packet.h
index 3c34957c36..01c97c38ba 100644
--- a/webrtc/modules/rtp_rtcp/source/rtcp_packet.h
+++ b/webrtc/modules/rtp_rtcp/source/rtcp_packet.h
@@ -9,16 +9,20 @@
*
*/
-#ifndef WEBRTC_MODULES_RTP_RTCP_RTCP_PACKET_H_
-#define WEBRTC_MODULES_RTP_RTCP_RTCP_PACKET_H_
+#ifndef WEBRTC_MODULES_RTP_RTCP_SOURCE_RTCP_PACKET_H_
+#define WEBRTC_MODULES_RTP_RTCP_SOURCE_RTCP_PACKET_H_
#include <map>
#include <string>
#include <vector>
#include "webrtc/base/scoped_ptr.h"
+#include "webrtc/modules/rtp_rtcp/include/rtp_rtcp_defines.h"
+#include "webrtc/modules/rtp_rtcp/source/rtcp_packet/dlrr.h"
+#include "webrtc/modules/rtp_rtcp/source/rtcp_packet/report_block.h"
+#include "webrtc/modules/rtp_rtcp/source/rtcp_packet/rrtr.h"
+#include "webrtc/modules/rtp_rtcp/source/rtcp_packet/voip_metric.h"
#include "webrtc/modules/rtp_rtcp/source/rtcp_utility.h"
-#include "webrtc/modules/rtp_rtcp/interface/rtp_rtcp_defines.h"
#include "webrtc/typedefs.h"
namespace webrtc {
@@ -27,10 +31,7 @@ namespace rtcp {
static const int kCommonFbFmtLength = 12;
static const int kReportBlockLength = 24;
-class Dlrr;
class RawPacket;
-class Rrtr;
-class VoipMetric;
// Class for building RTCP packets.
//
@@ -115,93 +116,17 @@ class RtcpPacket {
size_t HeaderLength() const;
static const size_t kHeaderLength = 4;
+ std::vector<RtcpPacket*> appended_packets_;
private:
bool CreateAndAddAppended(uint8_t* packet,
size_t* index,
size_t max_length,
PacketReadyCallback* callback) const;
-
- std::vector<RtcpPacket*> appended_packets_;
};
// TODO(sprang): Move RtcpPacket subclasses out to separate files.
-class Empty : public RtcpPacket {
- public:
- Empty() : RtcpPacket() {}
-
- virtual ~Empty() {}
-
- protected:
- bool Create(uint8_t* packet,
- size_t* index,
- size_t max_length,
- RtcpPacket::PacketReadyCallback* callback) const override;
-
- size_t BlockLength() const override;
-
- private:
- RTC_DISALLOW_COPY_AND_ASSIGN(Empty);
-};
-
-// From RFC 3550, RTP: A Transport Protocol for Real-Time Applications.
-//
-// RTCP report block (RFC 3550).
-//
-// 0 1 2 3
-// 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
-// +=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+
-// | SSRC_1 (SSRC of first source) |
-// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
-// | fraction lost | cumulative number of packets lost |
-// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
-// | extended highest sequence number received |
-// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
-// | interarrival jitter |
-// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
-// | last SR (LSR) |
-// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
-// | delay since last SR (DLSR) |
-// +=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+
-
-class ReportBlock {
- public:
- ReportBlock() {
- // TODO(asapersson): Consider adding a constructor to struct.
- memset(&report_block_, 0, sizeof(report_block_));
- }
-
- ~ReportBlock() {}
-
- void To(uint32_t ssrc) {
- report_block_.SSRC = ssrc;
- }
- void WithFractionLost(uint8_t fraction_lost) {
- report_block_.FractionLost = fraction_lost;
- }
- void WithCumulativeLost(uint32_t cumulative_lost) {
- report_block_.CumulativeNumOfPacketsLost = cumulative_lost;
- }
- void WithExtHighestSeqNum(uint32_t ext_highest_seq_num) {
- report_block_.ExtendedHighestSequenceNumber = ext_highest_seq_num;
- }
- void WithJitter(uint32_t jitter) {
- report_block_.Jitter = jitter;
- }
- void WithLastSr(uint32_t last_sr) {
- report_block_.LastSR = last_sr;
- }
- void WithDelayLastSr(uint32_t delay_last_sr) {
- report_block_.DelayLastSR = delay_last_sr;
- }
-
- private:
- friend class SenderReport;
- friend class ReceiverReport;
- RTCPUtility::RTCPPacketReportBlockItem report_block_;
-};
-
// RTCP sender report (RFC 3550).
//
// 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
@@ -268,102 +193,11 @@ class SenderReport : public RtcpPacket {
}
RTCPUtility::RTCPPacketSR sr_;
- std::vector<RTCPUtility::RTCPPacketReportBlockItem> report_blocks_;
+ std::vector<ReportBlock> report_blocks_;
RTC_DISALLOW_COPY_AND_ASSIGN(SenderReport);
};
-//
-// RTCP receiver report (RFC 3550).
-//
-// 0 1 2 3
-// 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
-// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
-// |V=2|P| RC | PT=RR=201 | length |
-// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
-// | SSRC of packet sender |
-// +=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+
-// | report block(s) |
-// | .... |
-
-class ReceiverReport : public RtcpPacket {
- public:
- ReceiverReport() : RtcpPacket() {
- memset(&rr_, 0, sizeof(rr_));
- }
-
- virtual ~ReceiverReport() {}
-
- void From(uint32_t ssrc) {
- rr_.SenderSSRC = ssrc;
- }
- bool WithReportBlock(const ReportBlock& block);
-
- protected:
- bool Create(uint8_t* packet,
- size_t* index,
- size_t max_length,
- RtcpPacket::PacketReadyCallback* callback) const override;
-
- private:
- static const int kMaxNumberOfReportBlocks = 0x1F;
-
- size_t BlockLength() const {
- const size_t kRrHeaderLength = 8;
- return kRrHeaderLength + report_blocks_.size() * kReportBlockLength;
- }
-
- RTCPUtility::RTCPPacketRR rr_;
- std::vector<RTCPUtility::RTCPPacketReportBlockItem> report_blocks_;
-
- RTC_DISALLOW_COPY_AND_ASSIGN(ReceiverReport);
-};
-
-// Transmission Time Offsets in RTP Streams (RFC 5450).
-//
-// 0 1 2 3
-// 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
-// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
-// hdr |V=2|P| RC | PT=IJ=195 | length |
-// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
-// | inter-arrival jitter |
-// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
-// . .
-// . .
-// . .
-// | inter-arrival jitter |
-// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
-//
-// If present, this RTCP packet must be placed after a receiver report
-// (inside a compound RTCP packet), and MUST have the same value for RC
-// (reception report count) as the receiver report.
-
-class Ij : public RtcpPacket {
- public:
- Ij() : RtcpPacket() {}
-
- virtual ~Ij() {}
-
- bool WithJitterItem(uint32_t jitter);
-
- protected:
- bool Create(uint8_t* packet,
- size_t* index,
- size_t max_length,
- RtcpPacket::PacketReadyCallback* callback) const override;
-
- private:
- static const int kMaxNumberOfIjItems = 0x1f;
-
- size_t BlockLength() const {
- return kHeaderLength + 4 * ij_items_.size();
- }
-
- std::vector<uint32_t> ij_items_;
-
- RTC_DISALLOW_COPY_AND_ASSIGN(Ij);
-};
-
// Source Description (SDES) (RFC 3550).
//
// 0 1 2 3
@@ -420,262 +254,6 @@ class Sdes : public RtcpPacket {
RTC_DISALLOW_COPY_AND_ASSIGN(Sdes);
};
-//
-// Bye packet (BYE) (RFC 3550).
-//
-// 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
-// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
-// |V=2|P| SC | PT=BYE=203 | length |
-// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
-// | SSRC/CSRC |
-// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
-// : ... :
-// +=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+
-// (opt) | length | reason for leaving ...
-// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
-
-class Bye : public RtcpPacket {
- public:
- Bye() : RtcpPacket() {
- memset(&bye_, 0, sizeof(bye_));
- }
-
- virtual ~Bye() {}
-
- void From(uint32_t ssrc) {
- bye_.SenderSSRC = ssrc;
- }
-
- bool WithCsrc(uint32_t csrc);
-
- // TODO(sprang): Add support for reason field?
-
- protected:
- bool Create(uint8_t* packet,
- size_t* index,
- size_t max_length,
- RtcpPacket::PacketReadyCallback* callback) const override;
-
- private:
- static const int kMaxNumberOfCsrcs = 0x1f - 1; // First item is sender SSRC.
-
- size_t BlockLength() const {
- size_t source_count = 1 + csrcs_.size();
- return kHeaderLength + 4 * source_count;
- }
-
- RTCPUtility::RTCPPacketBYE bye_;
- std::vector<uint32_t> csrcs_;
-
- RTC_DISALLOW_COPY_AND_ASSIGN(Bye);
-};
-
-// Application-Defined packet (APP) (RFC 3550).
-//
-// 0 1 2 3
-// 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
-// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
-// |V=2|P| subtype | PT=APP=204 | length |
-// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
-// | SSRC/CSRC |
-// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
-// | name (ASCII) |
-// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
-// | application-dependent data ...
-// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
-
-class App : public RtcpPacket {
- public:
- App()
- : RtcpPacket(),
- ssrc_(0) {
- memset(&app_, 0, sizeof(app_));
- }
-
- virtual ~App() {}
-
- void From(uint32_t ssrc) {
- ssrc_ = ssrc;
- }
- void WithSubType(uint8_t subtype) {
- assert(subtype <= 0x1f);
- app_.SubType = subtype;
- }
- void WithName(uint32_t name) {
- app_.Name = name;
- }
- void WithData(const uint8_t* data, uint16_t data_length) {
- assert(data);
- assert(data_length <= kRtcpAppCode_DATA_SIZE);
- assert(data_length % 4 == 0);
- memcpy(app_.Data, data, data_length);
- app_.Size = data_length;
- }
-
- protected:
- bool Create(uint8_t* packet,
- size_t* index,
- size_t max_length,
- RtcpPacket::PacketReadyCallback* callback) const override;
-
- private:
- size_t BlockLength() const {
- return 12 + app_.Size;
- }
-
- uint32_t ssrc_;
- RTCPUtility::RTCPPacketAPP app_;
-
- RTC_DISALLOW_COPY_AND_ASSIGN(App);
-};
-
-// RFC 4585: Feedback format.
-//
-// Common packet format:
-//
-// 0 1 2 3
-// 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
-// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
-// |V=2|P| FMT | PT | length |
-// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
-// | SSRC of packet sender |
-// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
-// | SSRC of media source |
-// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
-// : Feedback Control Information (FCI) :
-// :
-
-// Picture loss indication (PLI) (RFC 4585).
-//
-// FCI: no feedback control information.
-
-class Pli : public RtcpPacket {
- public:
- Pli() : RtcpPacket() {
- memset(&pli_, 0, sizeof(pli_));
- }
-
- virtual ~Pli() {}
-
- void From(uint32_t ssrc) {
- pli_.SenderSSRC = ssrc;
- }
- void To(uint32_t ssrc) {
- pli_.MediaSSRC = ssrc;
- }
-
- protected:
- bool Create(uint8_t* packet,
- size_t* index,
- size_t max_length,
- RtcpPacket::PacketReadyCallback* callback) const override;
-
- private:
- size_t BlockLength() const {
- return kCommonFbFmtLength;
- }
-
- RTCPUtility::RTCPPacketPSFBPLI pli_;
-
- RTC_DISALLOW_COPY_AND_ASSIGN(Pli);
-};
-
-// Slice loss indication (SLI) (RFC 4585).
-//
-// FCI:
-// 0 1 2 3
-// 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
-// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
-// | First | Number | PictureID |
-// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
-
-class Sli : public RtcpPacket {
- public:
- Sli() : RtcpPacket() {
- memset(&sli_, 0, sizeof(sli_));
- memset(&sli_item_, 0, sizeof(sli_item_));
- }
-
- virtual ~Sli() {}
-
- void From(uint32_t ssrc) {
- sli_.SenderSSRC = ssrc;
- }
- void To(uint32_t ssrc) {
- sli_.MediaSSRC = ssrc;
- }
- void WithFirstMb(uint16_t first_mb) {
- assert(first_mb <= 0x1fff);
- sli_item_.FirstMB = first_mb;
- }
- void WithNumberOfMb(uint16_t number_mb) {
- assert(number_mb <= 0x1fff);
- sli_item_.NumberOfMB = number_mb;
- }
- void WithPictureId(uint8_t picture_id) {
- assert(picture_id <= 0x3f);
- sli_item_.PictureId = picture_id;
- }
-
- protected:
- bool Create(uint8_t* packet,
- size_t* index,
- size_t max_length,
- RtcpPacket::PacketReadyCallback* callback) const override;
-
- private:
- size_t BlockLength() const {
- const size_t kFciLength = 4;
- return kCommonFbFmtLength + kFciLength;
- }
-
- RTCPUtility::RTCPPacketPSFBSLI sli_;
- RTCPUtility::RTCPPacketPSFBSLIItem sli_item_;
-
- RTC_DISALLOW_COPY_AND_ASSIGN(Sli);
-};
-
-// Generic NACK (RFC 4585).
-//
-// FCI:
-// 0 1 2 3
-// 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
-// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
-// | PID | BLP |
-// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
-
-class Nack : public RtcpPacket {
- public:
- Nack() : RtcpPacket() {
- memset(&nack_, 0, sizeof(nack_));
- }
-
- virtual ~Nack() {}
-
- void From(uint32_t ssrc) {
- nack_.SenderSSRC = ssrc;
- }
- void To(uint32_t ssrc) {
- nack_.MediaSSRC = ssrc;
- }
- void WithList(const uint16_t* nack_list, int length);
-
- protected:
- bool Create(uint8_t* packet,
- size_t* index,
- size_t max_length,
- RtcpPacket::PacketReadyCallback* callback) const override;
-
- size_t BlockLength() const override;
-
- private:
-
- RTCPUtility::RTCPPacketRTPFBNACK nack_;
- std::vector<RTCPUtility::RTCPPacketRTPFBNACKItem> nack_fields_;
-
- RTC_DISALLOW_COPY_AND_ASSIGN(Nack);
-};
-
// Reference picture selection indication (RPSI) (RFC 4585).
//
// FCI:
@@ -775,105 +353,6 @@ class Fir : public RtcpPacket {
RTCPUtility::RTCPPacketPSFBFIRItem fir_item_;
};
-// Temporary Maximum Media Stream Bit Rate Request (TMMBR) (RFC 5104).
-//
-// FCI:
-//
-// 0 1 2 3
-// 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
-// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
-// | SSRC |
-// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
-// | MxTBR Exp | MxTBR Mantissa |Measured Overhead|
-// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
-
-class Tmmbr : public RtcpPacket {
- public:
- Tmmbr() : RtcpPacket() {
- memset(&tmmbr_, 0, sizeof(tmmbr_));
- memset(&tmmbr_item_, 0, sizeof(tmmbr_item_));
- }
-
- virtual ~Tmmbr() {}
-
- void From(uint32_t ssrc) {
- tmmbr_.SenderSSRC = ssrc;
- }
- void To(uint32_t ssrc) {
- tmmbr_item_.SSRC = ssrc;
- }
- void WithBitrateKbps(uint32_t bitrate_kbps) {
- tmmbr_item_.MaxTotalMediaBitRate = bitrate_kbps;
- }
- void WithOverhead(uint16_t overhead) {
- assert(overhead <= 0x1ff);
- tmmbr_item_.MeasuredOverhead = overhead;
- }
-
- protected:
- bool Create(uint8_t* packet,
- size_t* index,
- size_t max_length,
- RtcpPacket::PacketReadyCallback* callback) const override;
-
- private:
- size_t BlockLength() const {
- const size_t kFciLen = 8;
- return kCommonFbFmtLength + kFciLen;
- }
-
- RTCPUtility::RTCPPacketRTPFBTMMBR tmmbr_;
- RTCPUtility::RTCPPacketRTPFBTMMBRItem tmmbr_item_;
-
- RTC_DISALLOW_COPY_AND_ASSIGN(Tmmbr);
-};
-
-// Temporary Maximum Media Stream Bit Rate Notification (TMMBN) (RFC 5104).
-//
-// FCI:
-//
-// 0 1 2 3
-// 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
-// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
-// | SSRC |
-// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
-// | MxTBR Exp | MxTBR Mantissa |Measured Overhead|
-// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
-
-class Tmmbn : public RtcpPacket {
- public:
- Tmmbn() : RtcpPacket() {
- memset(&tmmbn_, 0, sizeof(tmmbn_));
- }
-
- virtual ~Tmmbn() {}
-
- void From(uint32_t ssrc) {
- tmmbn_.SenderSSRC = ssrc;
- }
- // Max 50 TMMBR can be added per TMMBN.
- bool WithTmmbr(uint32_t ssrc, uint32_t bitrate_kbps, uint16_t overhead);
-
- protected:
- bool Create(uint8_t* packet,
- size_t* index,
- size_t max_length,
- RtcpPacket::PacketReadyCallback* callback) const override;
-
- private:
- static const int kMaxNumberOfTmmbrs = 50;
-
- size_t BlockLength() const {
- const size_t kFciLen = 8;
- return kCommonFbFmtLength + kFciLen * tmmbn_items_.size();
- }
-
- RTCPUtility::RTCPPacketRTPFBTMMBN tmmbn_;
- std::vector<RTCPUtility::RTCPPacketRTPFBTMMBRItem> tmmbn_items_;
-
- RTC_DISALLOW_COPY_AND_ASSIGN(Tmmbn);
-};
-
// Receiver Estimated Max Bitrate (REMB) (draft-alvestrand-rmcat-remb).
//
// 0 1 2 3
@@ -978,163 +457,22 @@ class Xr : public RtcpPacket {
return kXrHeaderLength + RrtrLength() + DlrrLength() + VoipMetricLength();
}
- size_t RrtrLength() const {
- const size_t kRrtrBlockLength = 12;
- return kRrtrBlockLength * rrtr_blocks_.size();
- }
+ size_t RrtrLength() const { return Rrtr::kLength * rrtr_blocks_.size(); }
size_t DlrrLength() const;
size_t VoipMetricLength() const {
- const size_t kVoipMetricBlockLength = 36;
- return kVoipMetricBlockLength * voip_metric_blocks_.size();
+ return VoipMetric::kLength * voip_metric_blocks_.size();
}
RTCPUtility::RTCPPacketXR xr_header_;
- std::vector<RTCPUtility::RTCPPacketXRReceiverReferenceTimeItem> rrtr_blocks_;
- std::vector<DlrrBlock> dlrr_blocks_;
- std::vector<RTCPUtility::RTCPPacketXRVOIPMetricItem> voip_metric_blocks_;
+ std::vector<Rrtr> rrtr_blocks_;
+ std::vector<Dlrr> dlrr_blocks_;
+ std::vector<VoipMetric> voip_metric_blocks_;
RTC_DISALLOW_COPY_AND_ASSIGN(Xr);
};
-// Receiver Reference Time Report Block (RFC 3611).
-//
-// 0 1 2 3
-// 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
-// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
-// | BT=4 | reserved | block length = 2 |
-// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
-// | NTP timestamp, most significant word |
-// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
-// | NTP timestamp, least significant word |
-// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
-
-class Rrtr {
- public:
- Rrtr() {
- memset(&rrtr_block_, 0, sizeof(rrtr_block_));
- }
- ~Rrtr() {}
-
- void WithNtpSec(uint32_t sec) {
- rrtr_block_.NTPMostSignificant = sec;
- }
- void WithNtpFrac(uint32_t frac) {
- rrtr_block_.NTPLeastSignificant = frac;
- }
-
- private:
- friend class Xr;
- RTCPUtility::RTCPPacketXRReceiverReferenceTimeItem rrtr_block_;
-
- RTC_DISALLOW_COPY_AND_ASSIGN(Rrtr);
-};
-
-// DLRR Report Block (RFC 3611).
-//
-// 0 1 2 3
-// 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
-// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
-// | BT=5 | reserved | block length |
-// +=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+
-// | SSRC_1 (SSRC of first receiver) | sub-
-// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ block
-// | last RR (LRR) | 1
-// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
-// | delay since last RR (DLRR) |
-// +=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+
-// | SSRC_2 (SSRC of second receiver) | sub-
-// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ block
-// : ... : 2
-
-class Dlrr {
- public:
- Dlrr() {}
- ~Dlrr() {}
-
- // Max 100 DLRR Items can be added per DLRR report block.
- bool WithDlrrItem(uint32_t ssrc, uint32_t last_rr, uint32_t delay_last_rr);
-
- private:
- friend class Xr;
- static const int kMaxNumberOfDlrrItems = 100;
-
- std::vector<RTCPUtility::RTCPPacketXRDLRRReportBlockItem> dlrr_block_;
-
- RTC_DISALLOW_COPY_AND_ASSIGN(Dlrr);
-};
-
-// VoIP Metrics Report Block (RFC 3611).
-//
-// 0 1 2 3
-// 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
-// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
-// | BT=7 | reserved | block length = 8 |
-// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
-// | SSRC of source |
-// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
-// | loss rate | discard rate | burst density | gap density |
-// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
-// | burst duration | gap duration |
-// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
-// | round trip delay | end system delay |
-// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
-// | signal level | noise level | RERL | Gmin |
-// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
-// | R factor | ext. R factor | MOS-LQ | MOS-CQ |
-// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
-// | RX config | reserved | JB nominal |
-// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
-// | JB maximum | JB abs max |
-// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
-
-class VoipMetric {
- public:
- VoipMetric() {
- memset(&metric_, 0, sizeof(metric_));
- }
- ~VoipMetric() {}
-
- void To(uint32_t ssrc) { metric_.SSRC = ssrc; }
- void LossRate(uint8_t loss_rate) { metric_.lossRate = loss_rate; }
- void DiscardRate(uint8_t discard_rate) { metric_.discardRate = discard_rate; }
- void BurstDensity(uint8_t burst_density) {
- metric_.burstDensity = burst_density;
- }
- void GapDensity(uint8_t gap_density) { metric_.gapDensity = gap_density; }
- void BurstDuration(uint16_t burst_duration) {
- metric_.burstDuration = burst_duration;
- }
- void GapDuration(uint16_t gap_duration) {
- metric_.gapDuration = gap_duration;
- }
- void RoundTripDelay(uint16_t round_trip_delay) {
- metric_.roundTripDelay = round_trip_delay;
- }
- void EndSystemDelay(uint16_t end_system_delay) {
- metric_.endSystemDelay = end_system_delay;
- }
- void SignalLevel(uint8_t signal_level) { metric_.signalLevel = signal_level; }
- void NoiseLevel(uint8_t noise_level) { metric_.noiseLevel = noise_level; }
- void Rerl(uint8_t rerl) { metric_.RERL = rerl; }
- void Gmin(uint8_t gmin) { metric_.Gmin = gmin; }
- void Rfactor(uint8_t rfactor) { metric_.Rfactor = rfactor; }
- void ExtRfactor(uint8_t extrfactor) { metric_.extRfactor = extrfactor; }
- void MosLq(uint8_t moslq) { metric_.MOSLQ = moslq; }
- void MosCq(uint8_t moscq) { metric_.MOSCQ = moscq; }
- void RxConfig(uint8_t rxconfig) { metric_.RXconfig = rxconfig; }
- void JbNominal(uint16_t jbnominal) { metric_.JBnominal = jbnominal; }
- void JbMax(uint16_t jbmax) { metric_.JBmax = jbmax; }
- void JbAbsMax(uint16_t jbabsmax) { metric_.JBabsMax = jbabsmax; }
-
- private:
- friend class Xr;
- RTCPUtility::RTCPPacketXRVOIPMetricItem metric_;
-
- RTC_DISALLOW_COPY_AND_ASSIGN(VoipMetric);
-};
-
// Class holding a RTCP packet.
//
// Takes a built rtcp packet.
@@ -1163,4 +501,4 @@ class RawPacket {
} // namespace rtcp
} // namespace webrtc
-#endif // WEBRTC_MODULES_RTP_RTCP_RTCP_PACKET_H_
+#endif // WEBRTC_MODULES_RTP_RTCP_SOURCE_RTCP_PACKET_H_
diff --git a/webrtc/modules/rtp_rtcp/source/rtcp_packet/app.cc b/webrtc/modules/rtp_rtcp/source/rtcp_packet/app.cc
new file mode 100644
index 0000000000..a1ad8d6427
--- /dev/null
+++ b/webrtc/modules/rtp_rtcp/source/rtcp_packet/app.cc
@@ -0,0 +1,79 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "webrtc/modules/rtp_rtcp/source/rtcp_packet/app.h"
+
+#include "webrtc/base/checks.h"
+#include "webrtc/base/logging.h"
+#include "webrtc/modules/rtp_rtcp/source/byte_io.h"
+
+using webrtc::RTCPUtility::RtcpCommonHeader;
+
+namespace webrtc {
+namespace rtcp {
+
+// Application-Defined packet (APP) (RFC 3550).
+//
+// 0 1 2 3
+// 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// |V=2|P| subtype | PT=APP=204 | length |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// 0 | SSRC/CSRC |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// 4 | name (ASCII) |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// 8 | application-dependent data ...
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+bool App::Parse(const RtcpCommonHeader& header, const uint8_t* payload) {
+ RTC_DCHECK(header.packet_type == kPacketType);
+
+ sub_type_ = header.count_or_format;
+ ssrc_ = ByteReader<uint32_t>::ReadBigEndian(&payload[0]);
+ name_ = ByteReader<uint32_t>::ReadBigEndian(&payload[4]);
+ data_.SetData(&payload[8], header.payload_size_bytes - 8);
+ return true;
+}
+
+void App::WithSubType(uint8_t subtype) {
+ RTC_DCHECK_LE(subtype, 0x1f);
+ sub_type_ = subtype;
+}
+
+void App::WithData(const uint8_t* data, size_t data_length) {
+ RTC_DCHECK(data);
+ RTC_DCHECK_EQ(0u, data_length % 4) << "Data must be 32 bits aligned.";
+ RTC_DCHECK(data_length <= kMaxDataSize) << "App data size << " << data_length
+ << "exceed maximum of "
+ << kMaxDataSize << " bytes.";
+ data_.SetData(data, data_length);
+}
+
+bool App::Create(uint8_t* packet,
+ size_t* index,
+ size_t max_length,
+ RtcpPacket::PacketReadyCallback* callback) const {
+ while (*index + BlockLength() > max_length) {
+ if (!OnBufferFull(packet, index, callback))
+ return false;
+ }
+ const size_t index_end = *index + BlockLength();
+ CreateHeader(sub_type_, kPacketType, HeaderLength(), packet, index);
+
+ ByteWriter<uint32_t>::WriteBigEndian(&packet[*index + 0], ssrc_);
+ ByteWriter<uint32_t>::WriteBigEndian(&packet[*index + 4], name_);
+ memcpy(&packet[*index + 8], data_.data(), data_.size());
+ *index += (8 + data_.size());
+ RTC_DCHECK_EQ(index_end, *index);
+ return true;
+}
+
+} // namespace rtcp
+} // namespace webrtc
diff --git a/webrtc/modules/rtp_rtcp/source/rtcp_packet/app.h b/webrtc/modules/rtp_rtcp/source/rtcp_packet/app.h
new file mode 100644
index 0000000000..16bd3fc2a2
--- /dev/null
+++ b/webrtc/modules/rtp_rtcp/source/rtcp_packet/app.h
@@ -0,0 +1,66 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef WEBRTC_MODULES_RTP_RTCP_SOURCE_RTCP_PACKET_APP_H_
+#define WEBRTC_MODULES_RTP_RTCP_SOURCE_RTCP_PACKET_APP_H_
+
+#include "webrtc/base/buffer.h"
+#include "webrtc/modules/rtp_rtcp/include/rtp_rtcp_defines.h"
+#include "webrtc/modules/rtp_rtcp/source/rtcp_packet.h"
+#include "webrtc/modules/rtp_rtcp/source/rtcp_utility.h"
+
+namespace webrtc {
+namespace rtcp {
+
+class App : public RtcpPacket {
+ public:
+ static const uint8_t kPacketType = 204;
+ // 28 bytes for UDP header
+ // 12 bytes for RTCP app header
+ static const size_t kMaxDataSize = IP_PACKET_SIZE - 12 - 28;
+ App() : sub_type_(0), ssrc_(0), name_(0) {}
+
+ virtual ~App() {}
+
+ // Parse assumes header is already parsed and validated.
+ bool Parse(const RTCPUtility::RtcpCommonHeader& header,
+ const uint8_t* payload); // Size of the payload is in the header.
+
+ void From(uint32_t ssrc) { ssrc_ = ssrc; }
+ void WithSubType(uint8_t subtype);
+ void WithName(uint32_t name) { name_ = name; }
+ void WithData(const uint8_t* data, size_t data_length);
+
+ uint8_t sub_type() const { return sub_type_; }
+ uint32_t ssrc() const { return ssrc_; }
+ uint32_t name() const { return name_; }
+ size_t data_size() const { return data_.size(); }
+ const uint8_t* data() const { return data_.data(); }
+
+ protected:
+ bool Create(uint8_t* packet,
+ size_t* index,
+ size_t max_length,
+ RtcpPacket::PacketReadyCallback* callback) const override;
+
+ private:
+ size_t BlockLength() const override { return 12 + data_.size(); }
+
+ uint8_t sub_type_;
+ uint32_t ssrc_;
+ uint32_t name_;
+ rtc::Buffer data_;
+
+ RTC_DISALLOW_COPY_AND_ASSIGN(App);
+};
+
+} // namespace rtcp
+} // namespace webrtc
+#endif // WEBRTC_MODULES_RTP_RTCP_SOURCE_RTCP_PACKET_APP_H_
diff --git a/webrtc/modules/rtp_rtcp/source/rtcp_packet/app_unittest.cc b/webrtc/modules/rtp_rtcp/source/rtcp_packet/app_unittest.cc
new file mode 100644
index 0000000000..4451fe8fb5
--- /dev/null
+++ b/webrtc/modules/rtp_rtcp/source/rtcp_packet/app_unittest.cc
@@ -0,0 +1,81 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "webrtc/modules/rtp_rtcp/source/rtcp_packet/app.h"
+
+#include <limits>
+
+#include "testing/gtest/include/gtest/gtest.h"
+#include "webrtc/modules/rtp_rtcp/source/rtcp_utility.h"
+
+using webrtc::rtcp::App;
+using webrtc::rtcp::RawPacket;
+using webrtc::RTCPUtility::RtcpCommonHeader;
+using webrtc::RTCPUtility::RtcpParseCommonHeader;
+
+namespace webrtc {
+namespace {
+
+const uint32_t kName = ((uint32_t)'n' << 24) | ((uint32_t)'a' << 16) |
+ ((uint32_t)'m' << 8) | (uint32_t)'e';
+const uint32_t kSenderSsrc = 0x12345678;
+
+class RtcpPacketAppTest : public ::testing::Test {
+ protected:
+ void BuildPacket() { packet = app.Build(); }
+ void ParsePacket() {
+ RtcpCommonHeader header;
+ EXPECT_TRUE(
+ RtcpParseCommonHeader(packet->Buffer(), packet->Length(), &header));
+ // Check there is exactly one RTCP packet in the buffer.
+ EXPECT_EQ(header.BlockSize(), packet->Length());
+ EXPECT_TRUE(parsed_.Parse(
+ header, packet->Buffer() + RtcpCommonHeader::kHeaderSizeBytes));
+ }
+
+ App app;
+ rtc::scoped_ptr<RawPacket> packet;
+ const App& parsed() { return parsed_; }
+
+ private:
+ App parsed_;
+};
+
+TEST_F(RtcpPacketAppTest, WithNoData) {
+ app.WithSubType(30);
+ app.WithName(kName);
+
+ BuildPacket();
+ ParsePacket();
+
+ EXPECT_EQ(30U, parsed().sub_type());
+ EXPECT_EQ(kName, parsed().name());
+ EXPECT_EQ(0u, parsed().data_size());
+}
+
+TEST_F(RtcpPacketAppTest, WithData) {
+ app.From(kSenderSsrc);
+ app.WithSubType(30);
+ app.WithName(kName);
+ const uint8_t kData[] = {'t', 'e', 's', 't', 'd', 'a', 't', 'a'};
+ const size_t kDataLength = sizeof(kData) / sizeof(kData[0]);
+ app.WithData(kData, kDataLength);
+
+ BuildPacket();
+ ParsePacket();
+
+ EXPECT_EQ(30U, parsed().sub_type());
+ EXPECT_EQ(kName, parsed().name());
+ EXPECT_EQ(kDataLength, parsed().data_size());
+ EXPECT_EQ(0, memcmp(kData, parsed().data(), kDataLength));
+}
+
+} // namespace
+} // namespace webrtc
diff --git a/webrtc/modules/rtp_rtcp/source/rtcp_packet/bye.cc b/webrtc/modules/rtp_rtcp/source/rtcp_packet/bye.cc
new file mode 100644
index 0000000000..4cfc921ce5
--- /dev/null
+++ b/webrtc/modules/rtp_rtcp/source/rtcp_packet/bye.cc
@@ -0,0 +1,133 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "webrtc/modules/rtp_rtcp/source/rtcp_packet/bye.h"
+
+#include "webrtc/base/checks.h"
+#include "webrtc/base/logging.h"
+#include "webrtc/modules/rtp_rtcp/source/byte_io.h"
+
+using webrtc::RTCPUtility::RtcpCommonHeader;
+
+namespace webrtc {
+namespace rtcp {
+
+// Bye packet (BYE) (RFC 3550).
+//
+// 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// |V=2|P| SC | PT=BYE=203 | length |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// | SSRC/CSRC |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// : ... :
+// +=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+
+// (opt) | length | reason for leaving ...
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+Bye::Bye() : sender_ssrc_(0) {}
+
+bool Bye::Parse(const RtcpCommonHeader& header, const uint8_t* payload) {
+ RTC_DCHECK(header.packet_type == kPacketType);
+
+ const uint8_t src_count = header.count_or_format;
+ // Validate packet.
+ if (header.payload_size_bytes < 4u * src_count) {
+ LOG(LS_WARNING)
+ << "Packet is too small to contain CSRCs it promise to have.";
+ return false;
+ }
+ bool has_reason = (header.payload_size_bytes > 4u * src_count);
+ uint8_t reason_length = 0;
+ if (has_reason) {
+ reason_length = payload[4u * src_count];
+ if (header.payload_size_bytes - 4u * src_count < 1u + reason_length) {
+ LOG(LS_WARNING) << "Invalid reason length: " << reason_length;
+ return false;
+ }
+ }
+ // Once sure packet is valid, copy values.
+ if (src_count == 0) { // A count value of zero is valid, but useless.
+ sender_ssrc_ = 0;
+ csrcs_.clear();
+ } else {
+ sender_ssrc_ = ByteReader<uint32_t>::ReadBigEndian(payload);
+ csrcs_.resize(src_count - 1);
+ for (size_t i = 1; i < src_count; ++i)
+ csrcs_[i - 1] = ByteReader<uint32_t>::ReadBigEndian(&payload[4 * i]);
+ }
+
+ if (has_reason) {
+ reason_.assign(reinterpret_cast<const char*>(&payload[4u * src_count + 1]),
+ reason_length);
+ } else {
+ reason_.clear();
+ }
+
+ return true;
+}
+
+bool Bye::Create(uint8_t* packet,
+ size_t* index,
+ size_t max_length,
+ RtcpPacket::PacketReadyCallback* callback) const {
+ while (*index + BlockLength() > max_length) {
+ if (!OnBufferFull(packet, index, callback))
+ return false;
+ }
+ const size_t index_end = *index + BlockLength();
+
+ CreateHeader(1 + csrcs_.size(), kPacketType, HeaderLength(), packet, index);
+ // Store srcs of the leaving clients.
+ ByteWriter<uint32_t>::WriteBigEndian(&packet[*index], sender_ssrc_);
+ *index += sizeof(uint32_t);
+ for (uint32_t csrc : csrcs_) {
+ ByteWriter<uint32_t>::WriteBigEndian(&packet[*index], csrc);
+ *index += sizeof(uint32_t);
+ }
+ // Store the reason to leave.
+ if (!reason_.empty()) {
+ uint8_t reason_length = reason_.size();
+ packet[(*index)++] = reason_length;
+ memcpy(&packet[*index], reason_.data(), reason_length);
+ *index += reason_length;
+ // Add padding bytes if needed.
+ size_t bytes_to_pad = index_end - *index;
+ RTC_DCHECK_LE(bytes_to_pad, 3u);
+ if (bytes_to_pad > 0) {
+ memset(&packet[*index], 0, bytes_to_pad);
+ *index += bytes_to_pad;
+ }
+ }
+ RTC_DCHECK_EQ(index_end, *index);
+ return true;
+}
+
+bool Bye::WithCsrc(uint32_t csrc) {
+ if (csrcs_.size() >= kMaxNumberOfCsrcs) {
+ LOG(LS_WARNING) << "Max CSRC size reached.";
+ return false;
+ }
+ csrcs_.push_back(csrc);
+ return true;
+}
+
+void Bye::WithReason(const std::string& reason) {
+ RTC_DCHECK_LE(reason.size(), 0xffu);
+ reason_ = reason;
+}
+
+size_t Bye::BlockLength() const {
+ size_t src_count = (1 + csrcs_.size());
+ size_t reason_size_in_32bits = reason_.empty() ? 0 : (reason_.size() / 4 + 1);
+ return kHeaderLength + 4 * (src_count + reason_size_in_32bits);
+}
+
+} // namespace rtcp
+} // namespace webrtc
diff --git a/webrtc/modules/rtp_rtcp/source/rtcp_packet/bye.h b/webrtc/modules/rtp_rtcp/source/rtcp_packet/bye.h
new file mode 100644
index 0000000000..6b4a181330
--- /dev/null
+++ b/webrtc/modules/rtp_rtcp/source/rtcp_packet/bye.h
@@ -0,0 +1,63 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ *
+ */
+
+#ifndef WEBRTC_MODULES_RTP_RTCP_SOURCE_RTCP_PACKET_BYE_H_
+#define WEBRTC_MODULES_RTP_RTCP_SOURCE_RTCP_PACKET_BYE_H_
+
+#include <string>
+#include <vector>
+
+#include "webrtc/modules/rtp_rtcp/source/rtcp_packet.h"
+#include "webrtc/modules/rtp_rtcp/source/rtcp_utility.h"
+
+namespace webrtc {
+namespace rtcp {
+
+class Bye : public RtcpPacket {
+ public:
+ static const uint8_t kPacketType = 203;
+
+ Bye();
+ virtual ~Bye() {}
+
+ // Parse assumes header is already parsed and validated.
+ bool Parse(const RTCPUtility::RtcpCommonHeader& header,
+ const uint8_t* payload); // Size of the payload is in the header.
+
+ void From(uint32_t ssrc) { sender_ssrc_ = ssrc; }
+ bool WithCsrc(uint32_t csrc);
+ void WithReason(const std::string& reason);
+
+ uint32_t sender_ssrc() const { return sender_ssrc_; }
+ const std::vector<uint32_t>& csrcs() const { return csrcs_; }
+ const std::string& reason() const { return reason_; }
+
+ protected:
+ bool Create(uint8_t* packet,
+ size_t* index,
+ size_t max_length,
+ RtcpPacket::PacketReadyCallback* callback) const override;
+
+ private:
+ static const int kMaxNumberOfCsrcs = 0x1f - 1; // First item is sender SSRC.
+
+ size_t BlockLength() const override;
+
+ uint32_t sender_ssrc_;
+ std::vector<uint32_t> csrcs_;
+ std::string reason_;
+
+ RTC_DISALLOW_COPY_AND_ASSIGN(Bye);
+};
+
+} // namespace rtcp
+} // namespace webrtc
+#endif // WEBRTC_MODULES_RTP_RTCP_SOURCE_RTCP_PACKET_BYE_H_
diff --git a/webrtc/modules/rtp_rtcp/source/rtcp_packet/bye_unittest.cc b/webrtc/modules/rtp_rtcp/source/rtcp_packet/bye_unittest.cc
new file mode 100644
index 0000000000..d2ae8ed782
--- /dev/null
+++ b/webrtc/modules/rtp_rtcp/source/rtcp_packet/bye_unittest.cc
@@ -0,0 +1,173 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "webrtc/modules/rtp_rtcp/source/rtcp_packet/bye.h"
+
+#include "testing/gmock/include/gmock/gmock.h"
+#include "testing/gtest/include/gtest/gtest.h"
+#include "webrtc/modules/rtp_rtcp/source/rtcp_utility.h"
+
+using ::testing::ElementsAre;
+
+using webrtc::rtcp::Bye;
+using webrtc::rtcp::RawPacket;
+using webrtc::RTCPUtility::RtcpCommonHeader;
+using webrtc::RTCPUtility::RtcpParseCommonHeader;
+
+namespace webrtc {
+namespace {
+
+const uint32_t kSenderSsrc = 0x12345678;
+const uint32_t kCsrc1 = 0x22232425;
+const uint32_t kCsrc2 = 0x33343536;
+
+class RtcpPacketByeTest : public ::testing::Test {
+ protected:
+ void BuildPacket() { packet = bye.Build(); }
+ void ParsePacket() {
+ RtcpCommonHeader header;
+ EXPECT_TRUE(
+ RtcpParseCommonHeader(packet->Buffer(), packet->Length(), &header));
+ // Check that there is exactly one RTCP packet in the buffer.
+ EXPECT_EQ(header.BlockSize(), packet->Length());
+ EXPECT_TRUE(parsed_bye.Parse(
+ header, packet->Buffer() + RtcpCommonHeader::kHeaderSizeBytes));
+ }
+
+ Bye bye;
+ rtc::scoped_ptr<RawPacket> packet;
+ Bye parsed_bye;
+};
+
+TEST_F(RtcpPacketByeTest, Bye) {
+ bye.From(kSenderSsrc);
+
+ BuildPacket();
+ ParsePacket();
+
+ EXPECT_EQ(kSenderSsrc, parsed_bye.sender_ssrc());
+ EXPECT_TRUE(parsed_bye.csrcs().empty());
+ EXPECT_TRUE(parsed_bye.reason().empty());
+}
+
+TEST_F(RtcpPacketByeTest, WithCsrcs) {
+ bye.From(kSenderSsrc);
+ EXPECT_TRUE(bye.WithCsrc(kCsrc1));
+ EXPECT_TRUE(bye.WithCsrc(kCsrc2));
+ EXPECT_TRUE(bye.reason().empty());
+
+ BuildPacket();
+ EXPECT_EQ(16u, packet->Length()); // Header: 4, 3xSRCs: 12, Reason: 0.
+
+ ParsePacket();
+
+ EXPECT_EQ(kSenderSsrc, parsed_bye.sender_ssrc());
+ EXPECT_THAT(parsed_bye.csrcs(), ElementsAre(kCsrc1, kCsrc2));
+ EXPECT_TRUE(parsed_bye.reason().empty());
+}
+
+TEST_F(RtcpPacketByeTest, WithCsrcsAndReason) {
+ const std::string kReason = "Some Reason";
+
+ bye.From(kSenderSsrc);
+ EXPECT_TRUE(bye.WithCsrc(kCsrc1));
+ EXPECT_TRUE(bye.WithCsrc(kCsrc2));
+ bye.WithReason(kReason);
+
+ BuildPacket();
+ EXPECT_EQ(28u, packet->Length()); // Header: 4, 3xSRCs: 12, Reason: 12.
+
+ ParsePacket();
+
+ EXPECT_EQ(kSenderSsrc, parsed_bye.sender_ssrc());
+ EXPECT_THAT(parsed_bye.csrcs(), ElementsAre(kCsrc1, kCsrc2));
+ EXPECT_EQ(kReason, parsed_bye.reason());
+}
+
+TEST_F(RtcpPacketByeTest, WithTooManyCsrcs) {
+ bye.From(kSenderSsrc);
+ const int kMaxCsrcs = (1 << 5) - 2; // 5 bit len, first item is sender SSRC.
+ for (int i = 0; i < kMaxCsrcs; ++i) {
+ EXPECT_TRUE(bye.WithCsrc(i));
+ }
+ EXPECT_FALSE(bye.WithCsrc(kMaxCsrcs));
+}
+
+TEST_F(RtcpPacketByeTest, WithAReason) {
+ const std::string kReason = "Some Random Reason";
+
+ bye.From(kSenderSsrc);
+ bye.WithReason(kReason);
+
+ BuildPacket();
+ ParsePacket();
+
+ EXPECT_EQ(kSenderSsrc, parsed_bye.sender_ssrc());
+ EXPECT_TRUE(parsed_bye.csrcs().empty());
+ EXPECT_EQ(kReason, parsed_bye.reason());
+}
+
+TEST_F(RtcpPacketByeTest, WithReasons) {
+ // Test that packet creation/parsing behave with reasons of different length
+ // both when it require padding and when it does not.
+ for (size_t reminder = 0; reminder < 4; ++reminder) {
+ const std::string kReason(4 + reminder, 'a' + reminder);
+ bye.From(kSenderSsrc);
+ bye.WithReason(kReason);
+
+ BuildPacket();
+ ParsePacket();
+
+ EXPECT_EQ(kReason, parsed_bye.reason());
+ }
+}
+
+TEST_F(RtcpPacketByeTest, ParseEmptyPacket) {
+ RtcpCommonHeader header;
+ header.packet_type = Bye::kPacketType;
+ header.count_or_format = 0;
+ header.payload_size_bytes = 0;
+ uint8_t empty_payload[1];
+
+ EXPECT_TRUE(parsed_bye.Parse(header, empty_payload + 1));
+ EXPECT_EQ(0u, parsed_bye.sender_ssrc());
+ EXPECT_TRUE(parsed_bye.csrcs().empty());
+ EXPECT_TRUE(parsed_bye.reason().empty());
+}
+
+TEST_F(RtcpPacketByeTest, ParseFailOnInvalidSrcCount) {
+ bye.From(kSenderSsrc);
+
+ BuildPacket();
+
+ RtcpCommonHeader header;
+ RtcpParseCommonHeader(packet->Buffer(), packet->Length(), &header);
+ header.count_or_format = 2; // Lie there are 2 ssrcs, not one.
+
+ EXPECT_FALSE(parsed_bye.Parse(
+ header, packet->Buffer() + RtcpCommonHeader::kHeaderSizeBytes));
+}
+
+TEST_F(RtcpPacketByeTest, ParseFailOnInvalidReasonLength) {
+ bye.From(kSenderSsrc);
+ bye.WithReason("18 characters long");
+
+ BuildPacket();
+
+ RtcpCommonHeader header;
+ RtcpParseCommonHeader(packet->Buffer(), packet->Length(), &header);
+ header.payload_size_bytes -= 4; // Payload is usually 32bit aligned.
+
+ EXPECT_FALSE(parsed_bye.Parse(
+ header, packet->Buffer() + RtcpCommonHeader::kHeaderSizeBytes));
+}
+
+} // namespace
+} // namespace webrtc
diff --git a/webrtc/modules/rtp_rtcp/source/rtcp_packet/compound_packet.cc b/webrtc/modules/rtp_rtcp/source/rtcp_packet/compound_packet.cc
new file mode 100644
index 0000000000..8f5afd5dd1
--- /dev/null
+++ b/webrtc/modules/rtp_rtcp/source/rtcp_packet/compound_packet.cc
@@ -0,0 +1,28 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "webrtc/modules/rtp_rtcp/source/rtcp_packet/compound_packet.h"
+
+namespace webrtc {
+namespace rtcp {
+
+bool CompoundPacket::Create(uint8_t* packet,
+ size_t* index,
+ size_t max_length,
+ RtcpPacket::PacketReadyCallback* callback) const {
+ return true;
+}
+
+size_t CompoundPacket::BlockLength() const {
+ return 0;
+}
+
+} // namespace rtcp
+} // namespace webrtc
diff --git a/webrtc/modules/rtp_rtcp/source/rtcp_packet/compound_packet.h b/webrtc/modules/rtp_rtcp/source/rtcp_packet/compound_packet.h
new file mode 100644
index 0000000000..f2f49a8ffb
--- /dev/null
+++ b/webrtc/modules/rtp_rtcp/source/rtcp_packet/compound_packet.h
@@ -0,0 +1,41 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ *
+ */
+
+#ifndef WEBRTC_MODULES_RTP_RTCP_SOURCE_RTCP_PACKET_COMPOUND_PACKET_H_
+#define WEBRTC_MODULES_RTP_RTCP_SOURCE_RTCP_PACKET_COMPOUND_PACKET_H_
+
+#include "webrtc/base/basictypes.h"
+#include "webrtc/modules/rtp_rtcp/source/rtcp_packet.h"
+
+namespace webrtc {
+namespace rtcp {
+
+class CompoundPacket : public RtcpPacket {
+ public:
+ CompoundPacket() : RtcpPacket() {}
+
+ virtual ~CompoundPacket() {}
+
+ protected:
+ bool Create(uint8_t* packet,
+ size_t* index,
+ size_t max_length,
+ RtcpPacket::PacketReadyCallback* callback) const override;
+
+ size_t BlockLength() const override;
+
+ private:
+ RTC_DISALLOW_COPY_AND_ASSIGN(CompoundPacket);
+};
+
+} // namespace rtcp
+} // namespace webrtc
+#endif // WEBRTC_MODULES_RTP_RTCP_SOURCE_RTCP_PACKET_COMPOUND_PACKET_H_
diff --git a/webrtc/modules/rtp_rtcp/source/rtcp_packet/compound_packet_unittest.cc b/webrtc/modules/rtp_rtcp/source/rtcp_packet/compound_packet_unittest.cc
new file mode 100644
index 0000000000..83dc5f6ed3
--- /dev/null
+++ b/webrtc/modules/rtp_rtcp/source/rtcp_packet/compound_packet_unittest.cc
@@ -0,0 +1,157 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "webrtc/modules/rtp_rtcp/source/rtcp_packet/compound_packet.h"
+
+#include "testing/gtest/include/gtest/gtest.h"
+#include "webrtc/modules/rtp_rtcp/source/rtcp_packet.h"
+#include "webrtc/modules/rtp_rtcp/source/rtcp_packet/bye.h"
+#include "webrtc/modules/rtp_rtcp/source/rtcp_packet/receiver_report.h"
+#include "webrtc/test/rtcp_packet_parser.h"
+
+using webrtc::rtcp::Bye;
+using webrtc::rtcp::CompoundPacket;
+using webrtc::rtcp::Fir;
+using webrtc::rtcp::RawPacket;
+using webrtc::rtcp::ReceiverReport;
+using webrtc::rtcp::ReportBlock;
+using webrtc::rtcp::SenderReport;
+using webrtc::test::RtcpPacketParser;
+
+namespace webrtc {
+
+const uint32_t kSenderSsrc = 0x12345678;
+
+TEST(RtcpCompoundPacketTest, AppendPacket) {
+ Fir fir;
+ ReportBlock rb;
+ ReceiverReport rr;
+ rr.From(kSenderSsrc);
+ EXPECT_TRUE(rr.WithReportBlock(rb));
+ rr.Append(&fir);
+
+ rtc::scoped_ptr<RawPacket> packet(rr.Build());
+ RtcpPacketParser parser;
+ parser.Parse(packet->Buffer(), packet->Length());
+ EXPECT_EQ(1, parser.receiver_report()->num_packets());
+ EXPECT_EQ(kSenderSsrc, parser.receiver_report()->Ssrc());
+ EXPECT_EQ(1, parser.report_block()->num_packets());
+ EXPECT_EQ(1, parser.fir()->num_packets());
+}
+
+TEST(RtcpCompoundPacketTest, AppendPacketOnEmpty) {
+ CompoundPacket empty;
+ ReceiverReport rr;
+ rr.From(kSenderSsrc);
+ empty.Append(&rr);
+
+ rtc::scoped_ptr<RawPacket> packet(empty.Build());
+ RtcpPacketParser parser;
+ parser.Parse(packet->Buffer(), packet->Length());
+ EXPECT_EQ(1, parser.receiver_report()->num_packets());
+ EXPECT_EQ(0, parser.report_block()->num_packets());
+}
+
+TEST(RtcpCompoundPacketTest, AppendPacketWithOwnAppendedPacket) {
+ Fir fir;
+ Bye bye;
+ ReportBlock rb;
+
+ ReceiverReport rr;
+ EXPECT_TRUE(rr.WithReportBlock(rb));
+ rr.Append(&fir);
+
+ SenderReport sr;
+ sr.Append(&bye);
+ sr.Append(&rr);
+
+ rtc::scoped_ptr<RawPacket> packet(sr.Build());
+ RtcpPacketParser parser;
+ parser.Parse(packet->Buffer(), packet->Length());
+ EXPECT_EQ(1, parser.sender_report()->num_packets());
+ EXPECT_EQ(1, parser.receiver_report()->num_packets());
+ EXPECT_EQ(1, parser.report_block()->num_packets());
+ EXPECT_EQ(1, parser.bye()->num_packets());
+ EXPECT_EQ(1, parser.fir()->num_packets());
+}
+
+TEST(RtcpCompoundPacketTest, BuildWithInputBuffer) {
+ Fir fir;
+ ReportBlock rb;
+ ReceiverReport rr;
+ rr.From(kSenderSsrc);
+ EXPECT_TRUE(rr.WithReportBlock(rb));
+ rr.Append(&fir);
+
+ const size_t kRrLength = 8;
+ const size_t kReportBlockLength = 24;
+ const size_t kFirLength = 20;
+
+ class Verifier : public rtcp::RtcpPacket::PacketReadyCallback {
+ public:
+ void OnPacketReady(uint8_t* data, size_t length) override {
+ RtcpPacketParser parser;
+ parser.Parse(data, length);
+ EXPECT_EQ(1, parser.receiver_report()->num_packets());
+ EXPECT_EQ(1, parser.report_block()->num_packets());
+ EXPECT_EQ(1, parser.fir()->num_packets());
+ ++packets_created_;
+ }
+
+ int packets_created_ = 0;
+ } verifier;
+ const size_t kBufferSize = kRrLength + kReportBlockLength + kFirLength;
+ uint8_t buffer[kBufferSize];
+ EXPECT_TRUE(rr.BuildExternalBuffer(buffer, kBufferSize, &verifier));
+ EXPECT_EQ(1, verifier.packets_created_);
+}
+
+TEST(RtcpCompoundPacketTest, BuildWithTooSmallBuffer_FragmentedSend) {
+ Fir fir;
+ ReportBlock rb;
+ ReceiverReport rr;
+ rr.From(kSenderSsrc);
+ EXPECT_TRUE(rr.WithReportBlock(rb));
+ rr.Append(&fir);
+
+ const size_t kRrLength = 8;
+ const size_t kReportBlockLength = 24;
+
+ class Verifier : public rtcp::RtcpPacket::PacketReadyCallback {
+ public:
+ void OnPacketReady(uint8_t* data, size_t length) override {
+ RtcpPacketParser parser;
+ parser.Parse(data, length);
+ switch (packets_created_++) {
+ case 0:
+ EXPECT_EQ(1, parser.receiver_report()->num_packets());
+ EXPECT_EQ(1, parser.report_block()->num_packets());
+ EXPECT_EQ(0, parser.fir()->num_packets());
+ break;
+ case 1:
+ EXPECT_EQ(0, parser.receiver_report()->num_packets());
+ EXPECT_EQ(0, parser.report_block()->num_packets());
+ EXPECT_EQ(1, parser.fir()->num_packets());
+ break;
+ default:
+ ADD_FAILURE() << "OnPacketReady not expected to be called "
+ << packets_created_ << " times.";
+ }
+ }
+
+ int packets_created_ = 0;
+ } verifier;
+ const size_t kBufferSize = kRrLength + kReportBlockLength;
+ uint8_t buffer[kBufferSize];
+ EXPECT_TRUE(rr.BuildExternalBuffer(buffer, kBufferSize, &verifier));
+ EXPECT_EQ(2, verifier.packets_created_);
+}
+
+} // namespace webrtc
diff --git a/webrtc/modules/rtp_rtcp/source/rtcp_packet/dlrr.cc b/webrtc/modules/rtp_rtcp/source/rtcp_packet/dlrr.cc
new file mode 100644
index 0000000000..6d6c48fada
--- /dev/null
+++ b/webrtc/modules/rtp_rtcp/source/rtcp_packet/dlrr.cc
@@ -0,0 +1,100 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "webrtc/modules/rtp_rtcp/source/rtcp_packet/dlrr.h"
+
+#include "webrtc/base/checks.h"
+#include "webrtc/base/logging.h"
+#include "webrtc/modules/rtp_rtcp/source/byte_io.h"
+
+namespace webrtc {
+namespace rtcp {
+// DLRR Report Block (RFC 3611).
+//
+// 0 1 2 3
+// 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// | BT=5 | reserved | block length |
+// +=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+
+// | SSRC_1 (SSRC of first receiver) | sub-
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ block
+// | last RR (LRR) | 1
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// | delay since last RR (DLRR) |
+// +=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+
+// | SSRC_2 (SSRC of second receiver) | sub-
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ block
+// : ... : 2
+bool Dlrr::Parse(const uint8_t* buffer, uint16_t block_length_32bits) {
+ RTC_DCHECK(buffer[0] == kBlockType);
+ // kReserved = buffer[1];
+ RTC_DCHECK_EQ(block_length_32bits,
+ ByteReader<uint16_t>::ReadBigEndian(&buffer[2]));
+ if (block_length_32bits % 3 != 0) {
+ LOG(LS_WARNING) << "Invalid size for dlrr block.";
+ return false;
+ }
+
+ size_t blocks_count = block_length_32bits / 3;
+ const uint8_t* read_at = buffer + kBlockHeaderLength;
+ sub_blocks_.resize(blocks_count);
+ for (SubBlock& sub_block : sub_blocks_) {
+ sub_block.ssrc = ByteReader<uint32_t>::ReadBigEndian(&read_at[0]);
+ sub_block.last_rr = ByteReader<uint32_t>::ReadBigEndian(&read_at[4]);
+ sub_block.delay_since_last_rr =
+ ByteReader<uint32_t>::ReadBigEndian(&read_at[8]);
+ read_at += kSubBlockLength;
+ }
+ return true;
+}
+
+size_t Dlrr::BlockLength() const {
+ if (sub_blocks_.empty())
+ return 0;
+ return kBlockHeaderLength + kSubBlockLength * sub_blocks_.size();
+}
+
+void Dlrr::Create(uint8_t* buffer) const {
+ if (sub_blocks_.empty()) // No subblocks, no need to write header either.
+ return;
+ // Create block header.
+ const uint8_t kReserved = 0;
+ buffer[0] = kBlockType;
+ buffer[1] = kReserved;
+ ByteWriter<uint16_t>::WriteBigEndian(&buffer[2], 3 * sub_blocks_.size());
+ // Create sub blocks.
+ uint8_t* write_at = buffer + kBlockHeaderLength;
+ for (const SubBlock& sub_block : sub_blocks_) {
+ ByteWriter<uint32_t>::WriteBigEndian(&write_at[0], sub_block.ssrc);
+ ByteWriter<uint32_t>::WriteBigEndian(&write_at[4], sub_block.last_rr);
+ ByteWriter<uint32_t>::WriteBigEndian(&write_at[8],
+ sub_block.delay_since_last_rr);
+ write_at += kSubBlockLength;
+ }
+ RTC_DCHECK_EQ(buffer + BlockLength(), write_at);
+}
+
+bool Dlrr::WithDlrrItem(uint32_t ssrc,
+ uint32_t last_rr,
+ uint32_t delay_last_rr) {
+ if (sub_blocks_.size() >= kMaxNumberOfDlrrItems) {
+ LOG(LS_WARNING) << "Max DLRR items reached.";
+ return false;
+ }
+ SubBlock block;
+ block.ssrc = ssrc;
+ block.last_rr = last_rr;
+ block.delay_since_last_rr = delay_last_rr;
+ sub_blocks_.push_back(block);
+ return true;
+}
+
+} // namespace rtcp
+} // namespace webrtc
diff --git a/webrtc/modules/rtp_rtcp/source/rtcp_packet/dlrr.h b/webrtc/modules/rtp_rtcp/source/rtcp_packet/dlrr.h
new file mode 100644
index 0000000000..9af2dedf3f
--- /dev/null
+++ b/webrtc/modules/rtp_rtcp/source/rtcp_packet/dlrr.h
@@ -0,0 +1,63 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ *
+ */
+
+#ifndef WEBRTC_MODULES_RTP_RTCP_SOURCE_RTCP_PACKET_DLRR_H_
+#define WEBRTC_MODULES_RTP_RTCP_SOURCE_RTCP_PACKET_DLRR_H_
+
+#include <vector>
+
+#include "webrtc/base/basictypes.h"
+
+namespace webrtc {
+namespace rtcp {
+
+// DLRR Report Block: Delay since the Last Receiver Report (RFC 3611).
+class Dlrr {
+ public:
+ struct SubBlock {
+ // RFC 3611 4.5
+ uint32_t ssrc;
+ uint32_t last_rr;
+ uint32_t delay_since_last_rr;
+ };
+
+ static const uint8_t kBlockType = 5;
+ static const size_t kMaxNumberOfDlrrItems = 100;
+
+ Dlrr() {}
+ Dlrr(const Dlrr& other) = default;
+ ~Dlrr() {}
+
+ Dlrr& operator=(const Dlrr& other) = default;
+
+ // Second parameter is value read from block header,
+ // i.e. size of block in 32bits excluding block header itself.
+ bool Parse(const uint8_t* buffer, uint16_t block_length_32bits);
+
+ size_t BlockLength() const;
+ // Fills buffer with the Dlrr.
+ // Consumes BlockLength() bytes.
+ void Create(uint8_t* buffer) const;
+
+ // Max 100 DLRR Items can be added per DLRR report block.
+ bool WithDlrrItem(uint32_t ssrc, uint32_t last_rr, uint32_t delay_last_rr);
+
+ const std::vector<SubBlock>& sub_blocks() const { return sub_blocks_; }
+
+ private:
+ static const size_t kBlockHeaderLength = 4;
+ static const size_t kSubBlockLength = 12;
+
+ std::vector<SubBlock> sub_blocks_;
+};
+} // namespace rtcp
+} // namespace webrtc
+#endif // WEBRTC_MODULES_RTP_RTCP_SOURCE_RTCP_PACKET_DLRR_H_
diff --git a/webrtc/modules/rtp_rtcp/source/rtcp_packet/dlrr_unittest.cc b/webrtc/modules/rtp_rtcp/source/rtcp_packet/dlrr_unittest.cc
new file mode 100644
index 0000000000..c7c139c560
--- /dev/null
+++ b/webrtc/modules/rtp_rtcp/source/rtcp_packet/dlrr_unittest.cc
@@ -0,0 +1,102 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "webrtc/modules/rtp_rtcp/source/rtcp_packet/dlrr.h"
+
+#include "testing/gtest/include/gtest/gtest.h"
+
+#include "webrtc/modules/rtp_rtcp/source/byte_io.h"
+
+using webrtc::rtcp::Dlrr;
+
+namespace webrtc {
+namespace {
+
+const uint32_t kSsrc = 0x12345678;
+const uint32_t kLastRR = 0x23344556;
+const uint32_t kDelay = 0x33343536;
+const uint8_t kBlock[] = {0x05, 0x00, 0x00, 0x03, 0x12, 0x34, 0x56, 0x78,
+ 0x23, 0x34, 0x45, 0x56, 0x33, 0x34, 0x35, 0x36};
+const size_t kBlockSizeBytes = sizeof(kBlock);
+
+TEST(RtcpPacketDlrrTest, Empty) {
+ Dlrr dlrr;
+
+ EXPECT_EQ(0u, dlrr.BlockLength());
+}
+
+TEST(RtcpPacketDlrrTest, Create) {
+ Dlrr dlrr;
+ EXPECT_TRUE(dlrr.WithDlrrItem(kSsrc, kLastRR, kDelay));
+
+ ASSERT_EQ(kBlockSizeBytes, dlrr.BlockLength());
+ uint8_t buffer[kBlockSizeBytes];
+
+ dlrr.Create(buffer);
+ EXPECT_EQ(0, memcmp(buffer, kBlock, kBlockSizeBytes));
+}
+
+TEST(RtcpPacketDlrrTest, Parse) {
+ Dlrr dlrr;
+ uint16_t block_length = ByteReader<uint16_t>::ReadBigEndian(&kBlock[2]);
+ EXPECT_TRUE(dlrr.Parse(kBlock, block_length));
+
+ EXPECT_EQ(1u, dlrr.sub_blocks().size());
+ const Dlrr::SubBlock& block = dlrr.sub_blocks().front();
+ EXPECT_EQ(kSsrc, block.ssrc);
+ EXPECT_EQ(kLastRR, block.last_rr);
+ EXPECT_EQ(kDelay, block.delay_since_last_rr);
+}
+
+TEST(RtcpPacketDlrrTest, ParseFailsOnBadSize) {
+ const size_t kBigBufferSize = 0x100; // More than enough.
+ uint8_t buffer[kBigBufferSize];
+ buffer[0] = Dlrr::kBlockType;
+ buffer[1] = 0; // Reserved.
+ buffer[2] = 0; // Most significant size byte.
+ for (uint8_t size = 3; size < 6; ++size) {
+ buffer[3] = size;
+ Dlrr dlrr;
+ // Parse should be successful only when size is multiple of 3.
+ EXPECT_EQ(size % 3 == 0, dlrr.Parse(buffer, static_cast<uint16_t>(size)));
+ }
+}
+
+TEST(RtcpPacketDlrrTest, FailsOnTooManySubBlocks) {
+ Dlrr dlrr;
+ for (size_t i = 1; i <= Dlrr::kMaxNumberOfDlrrItems; ++i) {
+ EXPECT_TRUE(dlrr.WithDlrrItem(kSsrc + i, kLastRR + i, kDelay + i));
+ }
+ EXPECT_FALSE(dlrr.WithDlrrItem(kSsrc, kLastRR, kDelay));
+}
+
+TEST(RtcpPacketDlrrTest, CreateAndParseMaxSubBlocks) {
+ const size_t kBufferSize = 0x1000; // More than enough.
+ uint8_t buffer[kBufferSize];
+
+ // Create.
+ Dlrr dlrr;
+ for (size_t i = 1; i <= Dlrr::kMaxNumberOfDlrrItems; ++i) {
+ EXPECT_TRUE(dlrr.WithDlrrItem(kSsrc + i, kLastRR + i, kDelay + i));
+ }
+ size_t used_buffer_size = dlrr.BlockLength();
+ ASSERT_LE(used_buffer_size, kBufferSize);
+ dlrr.Create(buffer);
+
+ // Parse.
+ Dlrr parsed;
+ uint16_t block_length = ByteReader<uint16_t>::ReadBigEndian(&buffer[2]);
+ EXPECT_EQ(used_buffer_size, (block_length + 1) * 4u);
+ EXPECT_TRUE(parsed.Parse(buffer, block_length));
+ EXPECT_TRUE(parsed.sub_blocks().size() == Dlrr::kMaxNumberOfDlrrItems);
+}
+
+} // namespace
+} // namespace webrtc
diff --git a/webrtc/modules/rtp_rtcp/source/rtcp_packet/extended_jitter_report.cc b/webrtc/modules/rtp_rtcp/source/rtcp_packet/extended_jitter_report.cc
new file mode 100644
index 0000000000..030f9f81fa
--- /dev/null
+++ b/webrtc/modules/rtp_rtcp/source/rtcp_packet/extended_jitter_report.cc
@@ -0,0 +1,95 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "webrtc/modules/rtp_rtcp/source/rtcp_packet/extended_jitter_report.h"
+
+#include "webrtc/base/checks.h"
+#include "webrtc/base/logging.h"
+#include "webrtc/modules/rtp_rtcp/source/byte_io.h"
+#include "webrtc/modules/rtp_rtcp/source/rtp_utility.h"
+
+using webrtc::RTCPUtility::RtcpCommonHeader;
+
+namespace webrtc {
+namespace rtcp {
+
+// Transmission Time Offsets in RTP Streams (RFC 5450).
+//
+// 0 1 2 3
+// 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// hdr |V=2|P| RC | PT=IJ=195 | length |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// | inter-arrival jitter |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// . .
+// . .
+// . .
+// | inter-arrival jitter |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+//
+// If present, this RTCP packet must be placed after a receiver report
+// (inside a compound RTCP packet), and MUST have the same value for RC
+// (reception report count) as the receiver report.
+
+bool ExtendedJitterReport::Parse(const RtcpCommonHeader& header,
+ const uint8_t* payload) {
+ RTC_DCHECK(header.packet_type == kPacketType);
+
+ const uint8_t jitters_count = header.count_or_format;
+ const size_t kJitterSizeBytes = 4u;
+
+ if (header.payload_size_bytes < jitters_count * kJitterSizeBytes) {
+ LOG(LS_WARNING) << "Packet is too small to contain all the jitter.";
+ return false;
+ }
+
+ inter_arrival_jitters_.resize(jitters_count);
+ for (size_t index = 0; index < jitters_count; ++index) {
+ inter_arrival_jitters_[index] =
+ ByteReader<uint32_t>::ReadBigEndian(&payload[index * kJitterSizeBytes]);
+ }
+
+ return true;
+}
+
+bool ExtendedJitterReport::WithJitter(uint32_t jitter) {
+ if (inter_arrival_jitters_.size() >= kMaxNumberOfJitters) {
+ LOG(LS_WARNING) << "Max inter-arrival jitter items reached.";
+ return false;
+ }
+ inter_arrival_jitters_.push_back(jitter);
+ return true;
+}
+
+bool ExtendedJitterReport::Create(
+ uint8_t* packet,
+ size_t* index,
+ size_t max_length,
+ RtcpPacket::PacketReadyCallback* callback) const {
+ while (*index + BlockLength() > max_length) {
+ if (!OnBufferFull(packet, index, callback))
+ return false;
+ }
+ const size_t index_end = *index + BlockLength();
+ size_t length = inter_arrival_jitters_.size();
+ CreateHeader(length, kPacketType, length, packet, index);
+
+ for (uint32_t jitter : inter_arrival_jitters_) {
+ ByteWriter<uint32_t>::WriteBigEndian(packet + *index, jitter);
+ *index += sizeof(uint32_t);
+ }
+ // Sanity check.
+ RTC_DCHECK_EQ(index_end, *index);
+ return true;
+}
+
+} // namespace rtcp
+} // namespace webrtc
diff --git a/webrtc/modules/rtp_rtcp/source/rtcp_packet/extended_jitter_report.h b/webrtc/modules/rtp_rtcp/source/rtcp_packet/extended_jitter_report.h
new file mode 100644
index 0000000000..49de7be1a8
--- /dev/null
+++ b/webrtc/modules/rtp_rtcp/source/rtcp_packet/extended_jitter_report.h
@@ -0,0 +1,63 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef WEBRTC_MODULES_RTP_RTCP_SOURCE_RTCP_PACKET_EXTENDED_JITTER_REPORT_H_
+#define WEBRTC_MODULES_RTP_RTCP_SOURCE_RTCP_PACKET_EXTENDED_JITTER_REPORT_H_
+
+#include <vector>
+
+#include "webrtc/base/checks.h"
+#include "webrtc/modules/rtp_rtcp/source/rtcp_packet.h"
+#include "webrtc/modules/rtp_rtcp/source/rtcp_utility.h"
+
+namespace webrtc {
+namespace rtcp {
+
+class ExtendedJitterReport : public RtcpPacket {
+ public:
+ static const uint8_t kPacketType = 195;
+
+ ExtendedJitterReport() : RtcpPacket() {}
+
+ virtual ~ExtendedJitterReport() {}
+
+ // Parse assumes header is already parsed and validated.
+ bool Parse(const RTCPUtility::RtcpCommonHeader& header,
+ const uint8_t* payload); // Size of the payload is in the header.
+
+ bool WithJitter(uint32_t jitter);
+
+ size_t jitters_count() const { return inter_arrival_jitters_.size(); }
+ uint32_t jitter(size_t index) const {
+ RTC_DCHECK_LT(index, jitters_count());
+ return inter_arrival_jitters_[index];
+ }
+
+ protected:
+ bool Create(uint8_t* packet,
+ size_t* index,
+ size_t max_length,
+ RtcpPacket::PacketReadyCallback* callback) const override;
+
+ private:
+ static const int kMaxNumberOfJitters = 0x1f;
+
+ size_t BlockLength() const override {
+ return kHeaderLength + 4 * inter_arrival_jitters_.size();
+ }
+
+ std::vector<uint32_t> inter_arrival_jitters_;
+
+ RTC_DISALLOW_COPY_AND_ASSIGN(ExtendedJitterReport);
+};
+
+} // namespace rtcp
+} // namespace webrtc
+#endif // WEBRTC_MODULES_RTP_RTCP_SOURCE_RTCP_PACKET_EXTENDED_JITTER_REPORT_H_
diff --git a/webrtc/modules/rtp_rtcp/source/rtcp_packet/extended_jitter_report_unittest.cc b/webrtc/modules/rtp_rtcp/source/rtcp_packet/extended_jitter_report_unittest.cc
new file mode 100644
index 0000000000..09d7b6305f
--- /dev/null
+++ b/webrtc/modules/rtp_rtcp/source/rtcp_packet/extended_jitter_report_unittest.cc
@@ -0,0 +1,98 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "webrtc/modules/rtp_rtcp/source/rtcp_packet/extended_jitter_report.h"
+
+#include <limits>
+
+#include "testing/gtest/include/gtest/gtest.h"
+#include "webrtc/modules/rtp_rtcp/source/rtcp_utility.h"
+
+using webrtc::rtcp::RawPacket;
+using webrtc::rtcp::ExtendedJitterReport;
+using webrtc::RTCPUtility::RtcpCommonHeader;
+using webrtc::RTCPUtility::RtcpParseCommonHeader;
+
+namespace webrtc {
+namespace {
+
+class RtcpPacketExtendedJitterReportTest : public ::testing::Test {
+ protected:
+ void BuildPacket() { packet = ij.Build(); }
+ void ParsePacket() {
+ RtcpCommonHeader header;
+ EXPECT_TRUE(
+ RtcpParseCommonHeader(packet->Buffer(), packet->Length(), &header));
+ EXPECT_EQ(header.BlockSize(), packet->Length());
+ EXPECT_TRUE(parsed_.Parse(
+ header, packet->Buffer() + RtcpCommonHeader::kHeaderSizeBytes));
+ }
+
+ ExtendedJitterReport ij;
+ rtc::scoped_ptr<RawPacket> packet;
+ const ExtendedJitterReport& parsed() { return parsed_; }
+
+ private:
+ ExtendedJitterReport parsed_;
+};
+
+TEST_F(RtcpPacketExtendedJitterReportTest, NoItem) {
+ // No initialization because packet is empty.
+ BuildPacket();
+ ParsePacket();
+
+ EXPECT_EQ(0u, parsed().jitters_count());
+}
+
+TEST_F(RtcpPacketExtendedJitterReportTest, OneItem) {
+ EXPECT_TRUE(ij.WithJitter(0x11121314));
+
+ BuildPacket();
+ ParsePacket();
+
+ EXPECT_EQ(1u, parsed().jitters_count());
+ EXPECT_EQ(0x11121314U, parsed().jitter(0));
+}
+
+TEST_F(RtcpPacketExtendedJitterReportTest, TwoItems) {
+ EXPECT_TRUE(ij.WithJitter(0x11121418));
+ EXPECT_TRUE(ij.WithJitter(0x22242628));
+
+ BuildPacket();
+ ParsePacket();
+
+ EXPECT_EQ(2u, parsed().jitters_count());
+ EXPECT_EQ(0x11121418U, parsed().jitter(0));
+ EXPECT_EQ(0x22242628U, parsed().jitter(1));
+}
+
+TEST_F(RtcpPacketExtendedJitterReportTest, TooManyItems) {
+ const int kMaxIjItems = (1 << 5) - 1;
+ for (int i = 0; i < kMaxIjItems; ++i) {
+ EXPECT_TRUE(ij.WithJitter(i));
+ }
+ EXPECT_FALSE(ij.WithJitter(kMaxIjItems));
+}
+
+TEST_F(RtcpPacketExtendedJitterReportTest, ParseFailWithTooManyItems) {
+ ij.WithJitter(0x11121418);
+ BuildPacket();
+ RtcpCommonHeader header;
+ RtcpParseCommonHeader(packet->Buffer(), packet->Length(), &header);
+ header.count_or_format++; // Damage package.
+
+ ExtendedJitterReport parsed;
+
+ EXPECT_FALSE(parsed.Parse(
+ header, packet->Buffer() + RtcpCommonHeader::kHeaderSizeBytes));
+}
+
+} // namespace
+} // namespace webrtc
diff --git a/webrtc/modules/rtp_rtcp/source/rtcp_packet/nack.cc b/webrtc/modules/rtp_rtcp/source/rtcp_packet/nack.cc
new file mode 100644
index 0000000000..8b9b354a06
--- /dev/null
+++ b/webrtc/modules/rtp_rtcp/source/rtcp_packet/nack.cc
@@ -0,0 +1,163 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "webrtc/modules/rtp_rtcp/source/rtcp_packet/nack.h"
+
+#include <algorithm>
+
+#include "webrtc/base/checks.h"
+#include "webrtc/base/logging.h"
+#include "webrtc/modules/rtp_rtcp/source/byte_io.h"
+
+using webrtc::RTCPUtility::RtcpCommonHeader;
+
+namespace webrtc {
+namespace rtcp {
+
+// RFC 4585: Feedback format.
+//
+// Common packet format:
+//
+// 0 1 2 3
+// 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// |V=2|P| FMT | PT | length |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// 0 | SSRC of packet sender |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// 4 | SSRC of media source |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// : Feedback Control Information (FCI) :
+// : :
+//
+// Generic NACK (RFC 4585).
+//
+// FCI:
+// 0 1 2 3
+// 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// | PID | BLP |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+bool Nack::Parse(const RtcpCommonHeader& header, const uint8_t* payload) {
+ RTC_DCHECK(header.packet_type == kPacketType);
+ RTC_DCHECK(header.count_or_format == kFeedbackMessageType);
+
+ if (header.payload_size_bytes < kCommonFeedbackLength + kNackItemLength) {
+ LOG(LS_WARNING) << "Payload length " << header.payload_size_bytes
+ << " is too small for a Nack.";
+ return false;
+ }
+ size_t nack_items =
+ (header.payload_size_bytes - kCommonFeedbackLength) / kNackItemLength;
+
+ ParseCommonFeedback(payload);
+ const uint8_t* next_nack = payload + kCommonFeedbackLength;
+
+ packet_ids_.clear();
+ packed_.resize(nack_items);
+ for (size_t index = 0; index < nack_items; ++index) {
+ packed_[index].first_pid = ByteReader<uint16_t>::ReadBigEndian(next_nack);
+ packed_[index].bitmask = ByteReader<uint16_t>::ReadBigEndian(next_nack + 2);
+ next_nack += kNackItemLength;
+ }
+ Unpack();
+
+ return true;
+}
+
+bool Nack::Create(uint8_t* packet,
+ size_t* index,
+ size_t max_length,
+ RtcpPacket::PacketReadyCallback* callback) const {
+ RTC_DCHECK(!packed_.empty());
+ // If nack list can't fit in packet, try to fragment.
+ size_t nack_index = 0;
+ const size_t kCommonFbFmtLength = kHeaderLength + kCommonFeedbackLength;
+ do {
+ size_t bytes_left_in_buffer = max_length - *index;
+ if (bytes_left_in_buffer < kCommonFbFmtLength + kNackItemLength) {
+ if (!OnBufferFull(packet, index, callback))
+ return false;
+ continue;
+ }
+ size_t num_nack_fields =
+ std::min((bytes_left_in_buffer - kCommonFbFmtLength) / kNackItemLength,
+ packed_.size() - nack_index);
+
+ size_t size_bytes =
+ (num_nack_fields * kNackItemLength) + kCommonFbFmtLength;
+ size_t header_length = ((size_bytes + 3) / 4) - 1; // As 32bit words - 1
+ CreateHeader(kFeedbackMessageType, kPacketType, header_length, packet,
+ index);
+ CreateCommonFeedback(packet + *index);
+ *index += kCommonFeedbackLength;
+ size_t end_index = nack_index + num_nack_fields;
+ for (; nack_index < end_index; ++nack_index) {
+ const auto& item = packed_[nack_index];
+ ByteWriter<uint16_t>::WriteBigEndian(packet + *index + 0, item.first_pid);
+ ByteWriter<uint16_t>::WriteBigEndian(packet + *index + 2, item.bitmask);
+ *index += kNackItemLength;
+ }
+ RTC_DCHECK_LE(*index, max_length);
+ } while (nack_index < packed_.size());
+
+ return true;
+}
+
+size_t Nack::BlockLength() const {
+ return (packed_.size() * kNackItemLength) + kCommonFeedbackLength +
+ kHeaderLength;
+}
+
+void Nack::WithList(const uint16_t* nack_list, size_t length) {
+ RTC_DCHECK(nack_list);
+ RTC_DCHECK(packet_ids_.empty());
+ RTC_DCHECK(packed_.empty());
+ packet_ids_.assign(nack_list, nack_list + length);
+ Pack();
+}
+
+void Nack::Pack() {
+ RTC_DCHECK(!packet_ids_.empty());
+ RTC_DCHECK(packed_.empty());
+ auto it = packet_ids_.begin();
+ const auto end = packet_ids_.end();
+ while (it != end) {
+ PackedNack item;
+ item.first_pid = *it++;
+ // Bitmask specifies losses in any of the 16 packets following the pid.
+ item.bitmask = 0;
+ while (it != end) {
+ uint16_t shift = static_cast<uint16_t>(*it - item.first_pid - 1);
+ if (shift <= 15) {
+ item.bitmask |= (1 << shift);
+ ++it;
+ } else {
+ break;
+ }
+ }
+ packed_.push_back(item);
+ }
+}
+
+void Nack::Unpack() {
+ RTC_DCHECK(packet_ids_.empty());
+ RTC_DCHECK(!packed_.empty());
+ for (const PackedNack& item : packed_) {
+ packet_ids_.push_back(item.first_pid);
+ uint16_t pid = item.first_pid + 1;
+ for (uint16_t bitmask = item.bitmask; bitmask != 0; bitmask >>= 1, ++pid)
+ if (bitmask & 1)
+ packet_ids_.push_back(pid);
+ }
+}
+
+} // namespace rtcp
+} // namespace webrtc
diff --git a/webrtc/modules/rtp_rtcp/source/rtcp_packet/nack.h b/webrtc/modules/rtp_rtcp/source/rtcp_packet/nack.h
new file mode 100644
index 0000000000..fb2be113a2
--- /dev/null
+++ b/webrtc/modules/rtp_rtcp/source/rtcp_packet/nack.h
@@ -0,0 +1,63 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef WEBRTC_MODULES_RTP_RTCP_SOURCE_RTCP_PACKET_NACK_H_
+#define WEBRTC_MODULES_RTP_RTCP_SOURCE_RTCP_PACKET_NACK_H_
+
+#include <vector>
+
+#include "webrtc/base/basictypes.h"
+#include "webrtc/modules/rtp_rtcp/source/rtcp_packet/rtpfb.h"
+#include "webrtc/modules/rtp_rtcp/source/rtcp_utility.h"
+
+namespace webrtc {
+namespace rtcp {
+
+class Nack : public Rtpfb {
+ public:
+ const uint8_t kFeedbackMessageType = 1;
+ Nack() {}
+
+ virtual ~Nack() {}
+
+ // Parse assumes header is already parsed and validated.
+ bool Parse(const RTCPUtility::RtcpCommonHeader& header,
+ const uint8_t* payload); // Size of the payload is in the header.
+
+ void WithList(const uint16_t* nack_list, size_t length);
+ const std::vector<uint16_t>& packet_ids() const { return packet_ids_; }
+
+ protected:
+ bool Create(uint8_t* packet,
+ size_t* index,
+ size_t max_length,
+ RtcpPacket::PacketReadyCallback* callback) const override;
+
+ size_t BlockLength() const override;
+
+ private:
+ const size_t kNackItemLength = 4;
+ struct PackedNack {
+ uint16_t first_pid;
+ uint16_t bitmask;
+ };
+
+ void Pack(); // Fills packed_ using packed_ids_. (used in WithList).
+ void Unpack(); // Fills packet_ids_ using packed_. (used in Parse).
+
+ std::vector<PackedNack> packed_;
+ std::vector<uint16_t> packet_ids_;
+
+ RTC_DISALLOW_COPY_AND_ASSIGN(Nack);
+};
+
+} // namespace rtcp
+} // namespace webrtc
+#endif // WEBRTC_MODULES_RTP_RTCP_SOURCE_RTCP_PACKET_NACK_H_
diff --git a/webrtc/modules/rtp_rtcp/source/rtcp_packet/nack_unittest.cc b/webrtc/modules/rtp_rtcp/source/rtcp_packet/nack_unittest.cc
new file mode 100644
index 0000000000..01e30f5644
--- /dev/null
+++ b/webrtc/modules/rtp_rtcp/source/rtcp_packet/nack_unittest.cc
@@ -0,0 +1,190 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "webrtc/modules/rtp_rtcp/source/rtcp_packet/nack.h"
+
+#include "testing/gmock/include/gmock/gmock.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+using ::testing::_;
+using ::testing::ElementsAreArray;
+using ::testing::Invoke;
+using ::testing::UnorderedElementsAreArray;
+
+using webrtc::rtcp::Nack;
+using webrtc::rtcp::RawPacket;
+using webrtc::RTCPUtility::RtcpCommonHeader;
+using webrtc::RTCPUtility::RtcpParseCommonHeader;
+
+namespace webrtc {
+namespace {
+
+const uint32_t kSenderSsrc = 0x12345678;
+const uint32_t kRemoteSsrc = 0x23456789;
+
+const uint16_t kList[] = {0, 1, 3, 8, 16};
+const size_t kListLength = sizeof(kList) / sizeof(kList[0]);
+const uint8_t kPacket[] = {0x81, 205, 0x00, 0x03, 0x12, 0x34, 0x56, 0x78,
+ 0x23, 0x45, 0x67, 0x89, 0x00, 0x00, 0x80, 0x85};
+const size_t kPacketLength = sizeof(kPacket);
+
+const uint16_t kWrapList[] = {0xffdc, 0xffec, 0xfffe, 0xffff, 0x0000,
+ 0x0001, 0x0003, 0x0014, 0x0064};
+const size_t kWrapListLength = sizeof(kWrapList) / sizeof(kWrapList[0]);
+const uint8_t kWrapPacket[] = {0x81, 205, 0x00, 0x06, 0x12, 0x34, 0x56, 0x78,
+ 0x23, 0x45, 0x67, 0x89, 0xff, 0xdc, 0x80, 0x00,
+ 0xff, 0xfe, 0x00, 0x17, 0x00, 0x14, 0x00, 0x00,
+ 0x00, 0x64, 0x00, 0x00};
+const size_t kWrapPacketLength = sizeof(kWrapPacket);
+
+TEST(RtcpPacketNackTest, Create) {
+ Nack nack;
+ nack.From(kSenderSsrc);
+ nack.To(kRemoteSsrc);
+ nack.WithList(kList, kListLength);
+
+ rtc::scoped_ptr<RawPacket> packet = nack.Build();
+
+ EXPECT_EQ(kPacketLength, packet->Length());
+ EXPECT_EQ(0, memcmp(kPacket, packet->Buffer(), kPacketLength));
+}
+
+TEST(RtcpPacketNackTest, Parse) {
+ RtcpCommonHeader header;
+ EXPECT_TRUE(RtcpParseCommonHeader(kPacket, kPacketLength, &header));
+ EXPECT_EQ(kPacketLength, header.BlockSize());
+ Nack parsed;
+
+ EXPECT_TRUE(
+ parsed.Parse(header, kPacket + RtcpCommonHeader::kHeaderSizeBytes));
+ const Nack& const_parsed = parsed;
+
+ EXPECT_EQ(kSenderSsrc, const_parsed.sender_ssrc());
+ EXPECT_EQ(kRemoteSsrc, const_parsed.media_ssrc());
+ EXPECT_THAT(const_parsed.packet_ids(), ElementsAreArray(kList));
+}
+
+TEST(RtcpPacketNackTest, CreateWrap) {
+ Nack nack;
+ nack.From(kSenderSsrc);
+ nack.To(kRemoteSsrc);
+ nack.WithList(kWrapList, kWrapListLength);
+
+ rtc::scoped_ptr<RawPacket> packet = nack.Build();
+
+ EXPECT_EQ(kWrapPacketLength, packet->Length());
+ EXPECT_EQ(0, memcmp(kWrapPacket, packet->Buffer(), kWrapPacketLength));
+}
+
+TEST(RtcpPacketNackTest, ParseWrap) {
+ RtcpCommonHeader header;
+ EXPECT_TRUE(RtcpParseCommonHeader(kWrapPacket, kWrapPacketLength, &header));
+ EXPECT_EQ(kWrapPacketLength, header.BlockSize());
+
+ Nack parsed;
+ EXPECT_TRUE(
+ parsed.Parse(header, kWrapPacket + RtcpCommonHeader::kHeaderSizeBytes));
+
+ EXPECT_EQ(kSenderSsrc, parsed.sender_ssrc());
+ EXPECT_EQ(kRemoteSsrc, parsed.media_ssrc());
+ EXPECT_THAT(parsed.packet_ids(), ElementsAreArray(kWrapList));
+}
+
+TEST(RtcpPacketNackTest, BadOrder) {
+ // Does not guarantee optimal packing, but should guarantee correctness.
+ const uint16_t kUnorderedList[] = {1, 25, 13, 12, 9, 27, 29};
+ const size_t kUnorderedListLength =
+ sizeof(kUnorderedList) / sizeof(kUnorderedList[0]);
+ Nack nack;
+ nack.From(kSenderSsrc);
+ nack.To(kRemoteSsrc);
+ nack.WithList(kUnorderedList, kUnorderedListLength);
+
+ rtc::scoped_ptr<RawPacket> packet = nack.Build();
+
+ Nack parsed;
+ RtcpCommonHeader header;
+ EXPECT_TRUE(
+ RtcpParseCommonHeader(packet->Buffer(), packet->Length(), &header));
+ EXPECT_TRUE(parsed.Parse(
+ header, packet->Buffer() + RtcpCommonHeader::kHeaderSizeBytes));
+
+ EXPECT_EQ(kSenderSsrc, parsed.sender_ssrc());
+ EXPECT_EQ(kRemoteSsrc, parsed.media_ssrc());
+ EXPECT_THAT(parsed.packet_ids(), UnorderedElementsAreArray(kUnorderedList));
+}
+
+TEST(RtcpPacketNackTest, CreateFragmented) {
+ Nack nack;
+ const uint16_t kList[] = {1, 100, 200, 300, 400};
+ const uint16_t kListLength = sizeof(kList) / sizeof(kList[0]);
+ nack.From(kSenderSsrc);
+ nack.To(kRemoteSsrc);
+ nack.WithList(kList, kListLength);
+
+ class MockPacketReadyCallback : public rtcp::RtcpPacket::PacketReadyCallback {
+ public:
+ MOCK_METHOD2(OnPacketReady, void(uint8_t*, size_t));
+ } verifier;
+
+ class NackVerifier {
+ public:
+ explicit NackVerifier(std::vector<uint16_t> ids) : ids_(ids) {}
+ void operator()(uint8_t* data, size_t length) {
+ RtcpCommonHeader header;
+ EXPECT_TRUE(RtcpParseCommonHeader(data, length, &header));
+ EXPECT_EQ(length, header.BlockSize());
+ Nack nack;
+ EXPECT_TRUE(
+ nack.Parse(header, data + RtcpCommonHeader::kHeaderSizeBytes));
+ EXPECT_EQ(kSenderSsrc, nack.sender_ssrc());
+ EXPECT_EQ(kRemoteSsrc, nack.media_ssrc());
+ EXPECT_THAT(nack.packet_ids(), ElementsAreArray(ids_));
+ }
+ std::vector<uint16_t> ids_;
+ } packet1({1, 100, 200}), packet2({300, 400});
+
+ EXPECT_CALL(verifier, OnPacketReady(_, _))
+ .WillOnce(Invoke(packet1))
+ .WillOnce(Invoke(packet2));
+ const size_t kBufferSize = 12 + (3 * 4); // Fits common header + 3 nack items
+ uint8_t buffer[kBufferSize];
+ EXPECT_TRUE(nack.BuildExternalBuffer(buffer, kBufferSize, &verifier));
+}
+
+TEST(RtcpPacketNackTest, CreateFailsWithTooSmallBuffer) {
+ const uint16_t kList[] = {1};
+ const size_t kMinNackBlockSize = 16;
+ Nack nack;
+ nack.From(kSenderSsrc);
+ nack.To(kRemoteSsrc);
+ nack.WithList(kList, 1);
+ class Verifier : public rtcp::RtcpPacket::PacketReadyCallback {
+ public:
+ void OnPacketReady(uint8_t* data, size_t length) override {
+ ADD_FAILURE() << "Buffer should be too small.";
+ }
+ } verifier;
+ uint8_t buffer[kMinNackBlockSize - 1];
+ EXPECT_FALSE(
+ nack.BuildExternalBuffer(buffer, kMinNackBlockSize - 1, &verifier));
+}
+
+TEST(RtcpPacketNackTest, ParseFailsWithTooSmallBuffer) {
+ RtcpCommonHeader header;
+ EXPECT_TRUE(RtcpParseCommonHeader(kPacket, kPacketLength, &header));
+ header.payload_size_bytes--; // Damage the packet
+ Nack parsed;
+ EXPECT_FALSE(
+ parsed.Parse(header, kPacket + RtcpCommonHeader::kHeaderSizeBytes));
+}
+
+} // namespace
+} // namespace webrtc
diff --git a/webrtc/modules/rtp_rtcp/source/rtcp_packet/pli.cc b/webrtc/modules/rtp_rtcp/source/rtcp_packet/pli.cc
new file mode 100644
index 0000000000..3673491058
--- /dev/null
+++ b/webrtc/modules/rtp_rtcp/source/rtcp_packet/pli.cc
@@ -0,0 +1,70 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "webrtc/modules/rtp_rtcp/source/rtcp_packet/pli.h"
+
+#include "webrtc/base/checks.h"
+#include "webrtc/base/logging.h"
+
+using webrtc::RTCPUtility::RtcpCommonHeader;
+
+namespace webrtc {
+namespace rtcp {
+
+// RFC 4585: Feedback format.
+//
+// Common packet format:
+//
+// 0 1 2 3
+// 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// |V=2|P| FMT | PT | length |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// | SSRC of packet sender |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// | SSRC of media source |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// : Feedback Control Information (FCI) :
+// : :
+
+//
+// Picture loss indication (PLI) (RFC 4585).
+// FCI: no feedback control information.
+bool Pli::Parse(const RtcpCommonHeader& header, const uint8_t* payload) {
+ RTC_DCHECK(header.packet_type == kPacketType);
+ RTC_DCHECK(header.count_or_format == kFeedbackMessageType);
+
+ if (header.payload_size_bytes < kCommonFeedbackLength) {
+ LOG(LS_WARNING) << "Packet is too small to be a valid PLI packet";
+ return false;
+ }
+
+ ParseCommonFeedback(payload);
+ return true;
+}
+
+bool Pli::Create(uint8_t* packet,
+ size_t* index,
+ size_t max_length,
+ RtcpPacket::PacketReadyCallback* callback) const {
+ while (*index + BlockLength() > max_length) {
+ if (!OnBufferFull(packet, index, callback))
+ return false;
+ }
+
+ CreateHeader(kFeedbackMessageType, kPacketType, HeaderLength(), packet,
+ index);
+ CreateCommonFeedback(packet + *index);
+ *index += kCommonFeedbackLength;
+ return true;
+}
+
+} // namespace rtcp
+} // namespace webrtc
diff --git a/webrtc/modules/rtp_rtcp/source/rtcp_packet/pli.h b/webrtc/modules/rtp_rtcp/source/rtcp_packet/pli.h
new file mode 100644
index 0000000000..5567825830
--- /dev/null
+++ b/webrtc/modules/rtp_rtcp/source/rtcp_packet/pli.h
@@ -0,0 +1,49 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ *
+ */
+
+#ifndef WEBRTC_MODULES_RTP_RTCP_SOURCE_RTCP_PACKET_PLI_H_
+#define WEBRTC_MODULES_RTP_RTCP_SOURCE_RTCP_PACKET_PLI_H_
+
+#include "webrtc/base/basictypes.h"
+#include "webrtc/modules/rtp_rtcp/source/rtcp_packet/psfb.h"
+
+namespace webrtc {
+namespace rtcp {
+
+// Picture loss indication (PLI) (RFC 4585).
+class Pli : public Psfb {
+ public:
+ static const uint8_t kFeedbackMessageType = 1;
+
+ Pli() {}
+ virtual ~Pli() {}
+
+ // Parse assumes header is already parsed and validated.
+ bool Parse(const RTCPUtility::RtcpCommonHeader& header,
+ const uint8_t* payload); // Size of the payload is in the header.
+
+ protected:
+ bool Create(uint8_t* packet,
+ size_t* index,
+ size_t max_length,
+ RtcpPacket::PacketReadyCallback* callback) const override;
+
+ private:
+ size_t BlockLength() const override {
+ return kHeaderLength + kCommonFeedbackLength;
+ }
+
+ RTC_DISALLOW_COPY_AND_ASSIGN(Pli);
+};
+
+} // namespace rtcp
+} // namespace webrtc
+#endif // WEBRTC_MODULES_RTP_RTCP_SOURCE_RTCP_PACKET_PLI_H_
diff --git a/webrtc/modules/rtp_rtcp/source/rtcp_packet/pli_unittest.cc b/webrtc/modules/rtp_rtcp/source/rtcp_packet/pli_unittest.cc
new file mode 100644
index 0000000000..1c47c3ffb1
--- /dev/null
+++ b/webrtc/modules/rtp_rtcp/source/rtcp_packet/pli_unittest.cc
@@ -0,0 +1,66 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "webrtc/modules/rtp_rtcp/source/rtcp_packet/pli.h"
+
+#include "testing/gtest/include/gtest/gtest.h"
+#include "webrtc/modules/rtp_rtcp/source/rtcp_utility.h"
+
+using webrtc::rtcp::Pli;
+using webrtc::rtcp::RawPacket;
+using webrtc::RTCPUtility::RtcpCommonHeader;
+using webrtc::RTCPUtility::RtcpParseCommonHeader;
+
+namespace webrtc {
+namespace {
+
+const uint32_t kSenderSsrc = 0x12345678;
+const uint32_t kRemoteSsrc = 0x23456789;
+// Manually created Pli packet matching constants above.
+const uint8_t kPacket[] = {0x81, 206, 0x00, 0x02,
+ 0x12, 0x34, 0x56, 0x78,
+ 0x23, 0x45, 0x67, 0x89};
+const size_t kPacketLength = sizeof(kPacket);
+
+TEST(RtcpPacketPliTest, Parse) {
+ RtcpCommonHeader header;
+ EXPECT_TRUE(RtcpParseCommonHeader(kPacket, kPacketLength, &header));
+ Pli mutable_parsed;
+ EXPECT_TRUE(mutable_parsed.Parse(
+ header, kPacket + RtcpCommonHeader::kHeaderSizeBytes));
+ const Pli& parsed = mutable_parsed; // Read values from constant object.
+
+ EXPECT_EQ(kSenderSsrc, parsed.sender_ssrc());
+ EXPECT_EQ(kRemoteSsrc, parsed.media_ssrc());
+}
+
+TEST(RtcpPacketPliTest, Create) {
+ Pli pli;
+ pli.From(kSenderSsrc);
+ pli.To(kRemoteSsrc);
+
+ rtc::scoped_ptr<RawPacket> packet(pli.Build());
+
+ ASSERT_EQ(kPacketLength, packet->Length());
+ EXPECT_EQ(0, memcmp(kPacket, packet->Buffer(), kPacketLength));
+}
+
+TEST(RtcpPacketPliTest, ParseFailsOnTooSmallPacket) {
+ RtcpCommonHeader header;
+ EXPECT_TRUE(RtcpParseCommonHeader(kPacket, kPacketLength, &header));
+ header.payload_size_bytes--;
+
+ Pli parsed;
+ EXPECT_FALSE(
+ parsed.Parse(header, kPacket + RtcpCommonHeader::kHeaderSizeBytes));
+}
+
+} // namespace
+} // namespace webrtc
diff --git a/webrtc/modules/rtp_rtcp/source/rtcp_packet/psfb.cc b/webrtc/modules/rtp_rtcp/source/rtcp_packet/psfb.cc
new file mode 100644
index 0000000000..d1ee401dab
--- /dev/null
+++ b/webrtc/modules/rtp_rtcp/source/rtcp_packet/psfb.cc
@@ -0,0 +1,45 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "webrtc/modules/rtp_rtcp/source/rtcp_packet/psfb.h"
+
+#include "webrtc/modules/rtp_rtcp/source/byte_io.h"
+
+namespace webrtc {
+namespace rtcp {
+
+// RFC 4585: Feedback format.
+//
+// Common packet format:
+//
+// 0 1 2 3
+// 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// |V=2|P| FMT | PT | length |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// 0 | SSRC of packet sender |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// 4 | SSRC of media source |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// : Feedback Control Information (FCI) :
+// : :
+
+void Psfb::ParseCommonFeedback(const uint8_t* payload) {
+ sender_ssrc_ = ByteReader<uint32_t>::ReadBigEndian(&payload[0]);
+ media_ssrc_ = ByteReader<uint32_t>::ReadBigEndian(&payload[4]);
+}
+
+void Psfb::CreateCommonFeedback(uint8_t* payload) const {
+ ByteWriter<uint32_t>::WriteBigEndian(&payload[0], sender_ssrc_);
+ ByteWriter<uint32_t>::WriteBigEndian(&payload[4], media_ssrc_);
+}
+
+} // namespace rtcp
+} // namespace webrtc
diff --git a/webrtc/modules/rtp_rtcp/source/rtcp_packet/psfb.h b/webrtc/modules/rtp_rtcp/source/rtcp_packet/psfb.h
new file mode 100644
index 0000000000..dddcdecba6
--- /dev/null
+++ b/webrtc/modules/rtp_rtcp/source/rtcp_packet/psfb.h
@@ -0,0 +1,48 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ *
+ */
+
+#ifndef WEBRTC_MODULES_RTP_RTCP_SOURCE_RTCP_PACKET_PSFB_H_
+#define WEBRTC_MODULES_RTP_RTCP_SOURCE_RTCP_PACKET_PSFB_H_
+
+#include "webrtc/base/basictypes.h"
+#include "webrtc/modules/rtp_rtcp/source/rtcp_packet.h"
+
+namespace webrtc {
+namespace rtcp {
+
+// PSFB: Payload-specific feedback message.
+// RFC 4585, Section 6.3.
+class Psfb : public RtcpPacket {
+ public:
+ static const uint8_t kPacketType = 206;
+
+ Psfb() : sender_ssrc_(0), media_ssrc_(0) {}
+ virtual ~Psfb() {}
+
+ void From(uint32_t ssrc) { sender_ssrc_ = ssrc; }
+ void To(uint32_t ssrc) { media_ssrc_ = ssrc; }
+
+ uint32_t sender_ssrc() const { return sender_ssrc_; }
+ uint32_t media_ssrc() const { return media_ssrc_; }
+
+ protected:
+ static const size_t kCommonFeedbackLength = 8;
+ void ParseCommonFeedback(const uint8_t* payload);
+ void CreateCommonFeedback(uint8_t* payload) const;
+
+ private:
+ uint32_t sender_ssrc_;
+ uint32_t media_ssrc_;
+};
+
+} // namespace rtcp
+} // namespace webrtc
+#endif // WEBRTC_MODULES_RTP_RTCP_SOURCE_RTCP_PACKET_PSFB_H_
diff --git a/webrtc/modules/rtp_rtcp/source/rtcp_packet/receiver_report.cc b/webrtc/modules/rtp_rtcp/source/rtcp_packet/receiver_report.cc
new file mode 100644
index 0000000000..ef64b4f51b
--- /dev/null
+++ b/webrtc/modules/rtp_rtcp/source/rtcp_packet/receiver_report.cc
@@ -0,0 +1,89 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "webrtc/modules/rtp_rtcp/source/rtcp_packet/receiver_report.h"
+
+#include "webrtc/base/checks.h"
+#include "webrtc/base/logging.h"
+#include "webrtc/modules/rtp_rtcp/source/byte_io.h"
+
+using webrtc::RTCPUtility::RtcpCommonHeader;
+
+namespace webrtc {
+namespace rtcp {
+
+//
+// RTCP receiver report (RFC 3550).
+//
+// 0 1 2 3
+// 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// |V=2|P| RC | PT=RR=201 | length |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// | SSRC of packet sender |
+// +=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+
+// | report block(s) |
+// | .... |
+bool ReceiverReport::Parse(const RTCPUtility::RtcpCommonHeader& header,
+ const uint8_t* payload) {
+ RTC_DCHECK(header.packet_type == kPacketType);
+
+ const uint8_t report_blocks_count = header.count_or_format;
+
+ if (header.payload_size_bytes <
+ kRrBaseLength + report_blocks_count * ReportBlock::kLength) {
+ LOG(LS_WARNING) << "Packet is too small to contain all the data.";
+ return false;
+ }
+
+ sender_ssrc_ = ByteReader<uint32_t>::ReadBigEndian(payload);
+
+ const uint8_t* next_report_block = payload + kRrBaseLength;
+
+ report_blocks_.resize(report_blocks_count);
+ for (ReportBlock& block : report_blocks_) {
+ block.Parse(next_report_block, ReportBlock::kLength);
+ next_report_block += ReportBlock::kLength;
+ }
+
+ RTC_DCHECK_LE(next_report_block, payload + header.payload_size_bytes);
+ return true;
+}
+
+bool ReceiverReport::Create(uint8_t* packet,
+ size_t* index,
+ size_t max_length,
+ RtcpPacket::PacketReadyCallback* callback) const {
+ while (*index + BlockLength() > max_length) {
+ if (!OnBufferFull(packet, index, callback))
+ return false;
+ }
+ CreateHeader(report_blocks_.size(), kPacketType, HeaderLength(), packet,
+ index);
+ ByteWriter<uint32_t>::WriteBigEndian(packet + *index, sender_ssrc_);
+ *index += kRrBaseLength;
+ for (const ReportBlock& block : report_blocks_) {
+ block.Create(packet + *index);
+ *index += ReportBlock::kLength;
+ }
+ return true;
+}
+
+bool ReceiverReport::WithReportBlock(const ReportBlock& block) {
+ if (report_blocks_.size() >= kMaxNumberOfReportBlocks) {
+ LOG(LS_WARNING) << "Max report blocks reached.";
+ return false;
+ }
+ report_blocks_.push_back(block);
+ return true;
+}
+
+} // namespace rtcp
+} // namespace webrtc
diff --git a/webrtc/modules/rtp_rtcp/source/rtcp_packet/receiver_report.h b/webrtc/modules/rtp_rtcp/source/rtcp_packet/receiver_report.h
new file mode 100644
index 0000000000..172a84ea2f
--- /dev/null
+++ b/webrtc/modules/rtp_rtcp/source/rtcp_packet/receiver_report.h
@@ -0,0 +1,66 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef WEBRTC_MODULES_RTP_RTCP_SOURCE_RTCP_PACKET_RECEIVER_REPORT_H_
+#define WEBRTC_MODULES_RTP_RTCP_SOURCE_RTCP_PACKET_RECEIVER_REPORT_H_
+
+#include <vector>
+
+#include "webrtc/base/basictypes.h"
+#include "webrtc/modules/rtp_rtcp/source/rtcp_packet.h"
+#include "webrtc/modules/rtp_rtcp/source/rtcp_packet/report_block.h"
+#include "webrtc/modules/rtp_rtcp/source/rtcp_utility.h"
+
+namespace webrtc {
+namespace rtcp {
+
+class ReceiverReport : public RtcpPacket {
+ public:
+ static const uint8_t kPacketType = 201;
+ ReceiverReport() : sender_ssrc_(0) {}
+
+ virtual ~ReceiverReport() {}
+
+ // Parse assumes header is already parsed and validated.
+ bool Parse(const RTCPUtility::RtcpCommonHeader& header,
+ const uint8_t* payload); // Size of the payload is in the header.
+
+ void From(uint32_t ssrc) { sender_ssrc_ = ssrc; }
+ bool WithReportBlock(const ReportBlock& block);
+
+ uint32_t sender_ssrc() const { return sender_ssrc_; }
+ const std::vector<ReportBlock>& report_blocks() const {
+ return report_blocks_;
+ }
+
+ protected:
+ bool Create(uint8_t* packet,
+ size_t* index,
+ size_t max_length,
+ RtcpPacket::PacketReadyCallback* callback) const override;
+
+ private:
+ static const size_t kRrBaseLength = 4;
+ static const size_t kMaxNumberOfReportBlocks = 0x1F;
+
+ size_t BlockLength() const {
+ return kHeaderLength + kRrBaseLength +
+ report_blocks_.size() * ReportBlock::kLength;
+ }
+
+ uint32_t sender_ssrc_;
+ std::vector<ReportBlock> report_blocks_;
+
+ RTC_DISALLOW_COPY_AND_ASSIGN(ReceiverReport);
+};
+
+} // namespace rtcp
+} // namespace webrtc
+#endif // WEBRTC_MODULES_RTP_RTCP_SOURCE_RTCP_PACKET_RECEIVER_REPORT_H_
diff --git a/webrtc/modules/rtp_rtcp/source/rtcp_packet/receiver_report_unittest.cc b/webrtc/modules/rtp_rtcp/source/rtcp_packet/receiver_report_unittest.cc
new file mode 100644
index 0000000000..ff3da600a5
--- /dev/null
+++ b/webrtc/modules/rtp_rtcp/source/rtcp_packet/receiver_report_unittest.cc
@@ -0,0 +1,145 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "webrtc/modules/rtp_rtcp/source/rtcp_packet/receiver_report.h"
+
+#include "testing/gtest/include/gtest/gtest.h"
+
+using webrtc::rtcp::RawPacket;
+using webrtc::rtcp::ReceiverReport;
+using webrtc::rtcp::ReportBlock;
+using webrtc::RTCPUtility::RtcpCommonHeader;
+using webrtc::RTCPUtility::RtcpParseCommonHeader;
+
+namespace webrtc {
+namespace {
+const uint32_t kSenderSsrc = 0x12345678;
+const uint32_t kRemoteSsrc = 0x23456789;
+const uint8_t kFractionLost = 55;
+const uint32_t kCumulativeLost = 0x111213;
+const uint32_t kExtHighestSeqNum = 0x22232425;
+const uint32_t kJitter = 0x33343536;
+const uint32_t kLastSr = 0x44454647;
+const uint32_t kDelayLastSr = 0x55565758;
+// Manually created ReceiverReport with one ReportBlock matching constants
+// above.
+// Having this block allows to test Create and Parse separately.
+const uint8_t kPacket[] = {0x81, 201, 0x00, 0x07, 0x12, 0x34, 0x56, 0x78,
+ 0x23, 0x45, 0x67, 0x89, 55, 0x11, 0x12, 0x13,
+ 0x22, 0x23, 0x24, 0x25, 0x33, 0x34, 0x35, 0x36,
+ 0x44, 0x45, 0x46, 0x47, 0x55, 0x56, 0x57, 0x58};
+const size_t kPacketLength = sizeof(kPacket);
+
+class RtcpPacketReceiverReportTest : public ::testing::Test {
+ protected:
+ void BuildPacket() { packet = rr.Build(); }
+ void ParsePacket() {
+ RtcpCommonHeader header;
+ EXPECT_TRUE(
+ RtcpParseCommonHeader(packet->Buffer(), packet->Length(), &header));
+ EXPECT_EQ(header.BlockSize(), packet->Length());
+ EXPECT_TRUE(parsed_.Parse(
+ header, packet->Buffer() + RtcpCommonHeader::kHeaderSizeBytes));
+ }
+
+ ReceiverReport rr;
+ rtc::scoped_ptr<RawPacket> packet;
+ const ReceiverReport& parsed() { return parsed_; }
+
+ private:
+ ReceiverReport parsed_;
+};
+
+TEST_F(RtcpPacketReceiverReportTest, Parse) {
+ RtcpCommonHeader header;
+ RtcpParseCommonHeader(kPacket, kPacketLength, &header);
+ EXPECT_TRUE(rr.Parse(header, kPacket + RtcpCommonHeader::kHeaderSizeBytes));
+ const ReceiverReport& parsed = rr;
+
+ EXPECT_EQ(kSenderSsrc, parsed.sender_ssrc());
+ EXPECT_EQ(1u, parsed.report_blocks().size());
+ const ReportBlock& rb = parsed.report_blocks().front();
+ EXPECT_EQ(kRemoteSsrc, rb.source_ssrc());
+ EXPECT_EQ(kFractionLost, rb.fraction_lost());
+ EXPECT_EQ(kCumulativeLost, rb.cumulative_lost());
+ EXPECT_EQ(kExtHighestSeqNum, rb.extended_high_seq_num());
+ EXPECT_EQ(kJitter, rb.jitter());
+ EXPECT_EQ(kLastSr, rb.last_sr());
+ EXPECT_EQ(kDelayLastSr, rb.delay_since_last_sr());
+}
+
+TEST_F(RtcpPacketReceiverReportTest, ParseFailsOnIncorrectSize) {
+ RtcpCommonHeader header;
+ RtcpParseCommonHeader(kPacket, kPacketLength, &header);
+ header.count_or_format++; // Damage the packet.
+ EXPECT_FALSE(rr.Parse(header, kPacket + RtcpCommonHeader::kHeaderSizeBytes));
+}
+
+TEST_F(RtcpPacketReceiverReportTest, Create) {
+ rr.From(kSenderSsrc);
+ ReportBlock rb;
+ rb.To(kRemoteSsrc);
+ rb.WithFractionLost(kFractionLost);
+ rb.WithCumulativeLost(kCumulativeLost);
+ rb.WithExtHighestSeqNum(kExtHighestSeqNum);
+ rb.WithJitter(kJitter);
+ rb.WithLastSr(kLastSr);
+ rb.WithDelayLastSr(kDelayLastSr);
+ rr.WithReportBlock(rb);
+
+ BuildPacket();
+
+ ASSERT_EQ(kPacketLength, packet->Length());
+ EXPECT_EQ(0, memcmp(kPacket, packet->Buffer(), kPacketLength));
+}
+
+TEST_F(RtcpPacketReceiverReportTest, WithoutReportBlocks) {
+ rr.From(kSenderSsrc);
+
+ BuildPacket();
+ ParsePacket();
+
+ EXPECT_EQ(kSenderSsrc, parsed().sender_ssrc());
+ EXPECT_EQ(0u, parsed().report_blocks().size());
+}
+
+TEST_F(RtcpPacketReceiverReportTest, WithTwoReportBlocks) {
+ ReportBlock rb1;
+ rb1.To(kRemoteSsrc);
+ ReportBlock rb2;
+ rb2.To(kRemoteSsrc + 1);
+
+ rr.From(kSenderSsrc);
+ EXPECT_TRUE(rr.WithReportBlock(rb1));
+ EXPECT_TRUE(rr.WithReportBlock(rb2));
+
+ BuildPacket();
+ ParsePacket();
+
+ EXPECT_EQ(kSenderSsrc, parsed().sender_ssrc());
+ EXPECT_EQ(2u, parsed().report_blocks().size());
+ EXPECT_EQ(kRemoteSsrc, parsed().report_blocks()[0].source_ssrc());
+ EXPECT_EQ(kRemoteSsrc + 1, parsed().report_blocks()[1].source_ssrc());
+}
+
+TEST_F(RtcpPacketReceiverReportTest, WithTooManyReportBlocks) {
+ rr.From(kSenderSsrc);
+ const size_t kMaxReportBlocks = (1 << 5) - 1;
+ ReportBlock rb;
+ for (size_t i = 0; i < kMaxReportBlocks; ++i) {
+ rb.To(kRemoteSsrc + i);
+ EXPECT_TRUE(rr.WithReportBlock(rb));
+ }
+ rb.To(kRemoteSsrc + kMaxReportBlocks);
+ EXPECT_FALSE(rr.WithReportBlock(rb));
+}
+
+} // namespace
+} // namespace webrtc
diff --git a/webrtc/modules/rtp_rtcp/source/rtcp_packet/report_block.cc b/webrtc/modules/rtp_rtcp/source/rtcp_packet/report_block.cc
new file mode 100644
index 0000000000..4911dbf5b7
--- /dev/null
+++ b/webrtc/modules/rtp_rtcp/source/rtcp_packet/report_block.cc
@@ -0,0 +1,89 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "webrtc/modules/rtp_rtcp/source/rtcp_packet/report_block.h"
+
+#include "webrtc/base/checks.h"
+#include "webrtc/base/logging.h"
+#include "webrtc/modules/rtp_rtcp/source/byte_io.h"
+
+namespace webrtc {
+namespace rtcp {
+
+// From RFC 3550, RTP: A Transport Protocol for Real-Time Applications.
+//
+// RTCP report block (RFC 3550).
+//
+// 0 1 2 3
+// 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+// +=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+
+// 0 | SSRC_1 (SSRC of first source) |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// 4 | fraction lost | cumulative number of packets lost |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// 8 | extended highest sequence number received |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// 12 | interarrival jitter |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// 16 | last SR (LSR) |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// 20 | delay since last SR (DLSR) |
+// 24 +=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+
+ReportBlock::ReportBlock()
+ : source_ssrc_(0),
+ fraction_lost_(0),
+ cumulative_lost_(0),
+ extended_high_seq_num_(0),
+ jitter_(0),
+ last_sr_(0),
+ delay_since_last_sr_(0) {}
+
+bool ReportBlock::Parse(const uint8_t* buffer, size_t length) {
+ RTC_DCHECK(buffer != nullptr);
+ if (length < ReportBlock::kLength) {
+ LOG(LS_ERROR) << "Report Block should be 24 bytes long";
+ return false;
+ }
+
+ source_ssrc_ = ByteReader<uint32_t>::ReadBigEndian(&buffer[0]);
+ fraction_lost_ = buffer[4];
+ cumulative_lost_ = ByteReader<uint32_t, 3>::ReadBigEndian(&buffer[5]);
+ extended_high_seq_num_ = ByteReader<uint32_t>::ReadBigEndian(&buffer[8]);
+ jitter_ = ByteReader<uint32_t>::ReadBigEndian(&buffer[12]);
+ last_sr_ = ByteReader<uint32_t>::ReadBigEndian(&buffer[16]);
+ delay_since_last_sr_ = ByteReader<uint32_t>::ReadBigEndian(&buffer[20]);
+
+ return true;
+}
+
+void ReportBlock::Create(uint8_t* buffer) const {
+ // Runtime check should be done while setting cumulative_lost.
+ RTC_DCHECK_LT(cumulative_lost(), (1u << 24)); // Have only 3 bytes for it.
+
+ ByteWriter<uint32_t>::WriteBigEndian(&buffer[0], source_ssrc());
+ ByteWriter<uint8_t>::WriteBigEndian(&buffer[4], fraction_lost());
+ ByteWriter<uint32_t, 3>::WriteBigEndian(&buffer[5], cumulative_lost());
+ ByteWriter<uint32_t>::WriteBigEndian(&buffer[8], extended_high_seq_num());
+ ByteWriter<uint32_t>::WriteBigEndian(&buffer[12], jitter());
+ ByteWriter<uint32_t>::WriteBigEndian(&buffer[16], last_sr());
+ ByteWriter<uint32_t>::WriteBigEndian(&buffer[20], delay_since_last_sr());
+}
+
+bool ReportBlock::WithCumulativeLost(uint32_t cumulative_lost) {
+ if (cumulative_lost >= (1u << 24)) { // Have only 3 bytes to store it.
+ LOG(LS_WARNING) << "Cumulative lost is too big to fit into Report Block";
+ return false;
+ }
+ cumulative_lost_ = cumulative_lost;
+ return true;
+}
+
+} // namespace rtcp
+} // namespace webrtc
diff --git a/webrtc/modules/rtp_rtcp/source/rtcp_packet/report_block.h b/webrtc/modules/rtp_rtcp/source/rtcp_packet/report_block.h
new file mode 100644
index 0000000000..ef99e17297
--- /dev/null
+++ b/webrtc/modules/rtp_rtcp/source/rtcp_packet/report_block.h
@@ -0,0 +1,67 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ *
+ */
+
+#ifndef WEBRTC_MODULES_RTP_RTCP_SOURCE_RTCP_PACKET_REPORT_BLOCK_H_
+#define WEBRTC_MODULES_RTP_RTCP_SOURCE_RTCP_PACKET_REPORT_BLOCK_H_
+
+#include "webrtc/base/basictypes.h"
+
+namespace webrtc {
+namespace rtcp {
+
+class ReportBlock {
+ public:
+ static const size_t kLength = 24;
+
+ ReportBlock();
+ ~ReportBlock() {}
+
+ bool Parse(const uint8_t* buffer, size_t length);
+
+ // Fills buffer with the ReportBlock.
+ // Consumes ReportBlock::kLength bytes.
+ void Create(uint8_t* buffer) const;
+
+ void To(uint32_t ssrc) { source_ssrc_ = ssrc; }
+ void WithFractionLost(uint8_t fraction_lost) {
+ fraction_lost_ = fraction_lost;
+ }
+ bool WithCumulativeLost(uint32_t cumulative_lost);
+ void WithExtHighestSeqNum(uint32_t ext_highest_seq_num) {
+ extended_high_seq_num_ = ext_highest_seq_num;
+ }
+ void WithJitter(uint32_t jitter) { jitter_ = jitter; }
+ void WithLastSr(uint32_t last_sr) { last_sr_ = last_sr; }
+ void WithDelayLastSr(uint32_t delay_last_sr) {
+ delay_since_last_sr_ = delay_last_sr;
+ }
+
+ uint32_t source_ssrc() const { return source_ssrc_; }
+ uint8_t fraction_lost() const { return fraction_lost_; }
+ uint32_t cumulative_lost() const { return cumulative_lost_; }
+ uint32_t extended_high_seq_num() const { return extended_high_seq_num_; }
+ uint32_t jitter() const { return jitter_; }
+ uint32_t last_sr() const { return last_sr_; }
+ uint32_t delay_since_last_sr() const { return delay_since_last_sr_; }
+
+ private:
+ uint32_t source_ssrc_;
+ uint8_t fraction_lost_;
+ uint32_t cumulative_lost_;
+ uint32_t extended_high_seq_num_;
+ uint32_t jitter_;
+ uint32_t last_sr_;
+ uint32_t delay_since_last_sr_;
+};
+
+} // namespace rtcp
+} // namespace webrtc
+#endif // WEBRTC_MODULES_RTP_RTCP_SOURCE_RTCP_PACKET_REPORT_BLOCK_H_
diff --git a/webrtc/modules/rtp_rtcp/source/rtcp_packet/report_block_unittest.cc b/webrtc/modules/rtp_rtcp/source/rtcp_packet/report_block_unittest.cc
new file mode 100644
index 0000000000..85bbb404a4
--- /dev/null
+++ b/webrtc/modules/rtp_rtcp/source/rtcp_packet/report_block_unittest.cc
@@ -0,0 +1,86 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "webrtc/modules/rtp_rtcp/source/rtcp_packet/report_block.h"
+
+#include <limits>
+
+#include "testing/gtest/include/gtest/gtest.h"
+#include "webrtc/base/random.h"
+
+using webrtc::rtcp::ReportBlock;
+
+namespace webrtc {
+namespace {
+
+const uint32_t kRemoteSsrc = 0x23456789;
+const uint8_t kFractionLost = 55;
+// Use values that are streamed differently LE and BE.
+const uint32_t kCumulativeLost = 0x111213;
+const uint32_t kExtHighestSeqNum = 0x22232425;
+const uint32_t kJitter = 0x33343536;
+const uint32_t kLastSr = 0x44454647;
+const uint32_t kDelayLastSr = 0x55565758;
+const size_t kBufferLength = ReportBlock::kLength;
+
+TEST(RtcpPacketReportBlockTest, ParseChecksLength) {
+ uint8_t buffer[kBufferLength];
+ memset(buffer, 0, sizeof(buffer));
+
+ ReportBlock rb;
+ EXPECT_FALSE(rb.Parse(buffer, kBufferLength - 1));
+ EXPECT_TRUE(rb.Parse(buffer, kBufferLength));
+}
+
+TEST(RtcpPacketReportBlockTest, ParseAnyData) {
+ uint8_t buffer[kBufferLength];
+ // Fill buffer with semi-random data.
+ Random generator(0x256F8A285EC829ull);
+ for (size_t i = 0; i < kBufferLength; ++i)
+ buffer[i] = static_cast<uint8_t>(generator.Rand(0, 0xff));
+
+ ReportBlock rb;
+ EXPECT_TRUE(rb.Parse(buffer, kBufferLength));
+}
+
+TEST(RtcpPacketReportBlockTest, ParseMatchCreate) {
+ ReportBlock rb;
+ rb.To(kRemoteSsrc);
+ rb.WithFractionLost(kFractionLost);
+ rb.WithCumulativeLost(kCumulativeLost);
+ rb.WithExtHighestSeqNum(kExtHighestSeqNum);
+ rb.WithJitter(kJitter);
+ rb.WithLastSr(kLastSr);
+ rb.WithDelayLastSr(kDelayLastSr);
+
+ uint8_t buffer[kBufferLength];
+ rb.Create(buffer);
+
+ ReportBlock parsed;
+ EXPECT_TRUE(parsed.Parse(buffer, kBufferLength));
+
+ EXPECT_EQ(kRemoteSsrc, parsed.source_ssrc());
+ EXPECT_EQ(kFractionLost, parsed.fraction_lost());
+ EXPECT_EQ(kCumulativeLost, parsed.cumulative_lost());
+ EXPECT_EQ(kExtHighestSeqNum, parsed.extended_high_seq_num());
+ EXPECT_EQ(kJitter, parsed.jitter());
+ EXPECT_EQ(kLastSr, parsed.last_sr());
+ EXPECT_EQ(kDelayLastSr, parsed.delay_since_last_sr());
+}
+
+TEST(RtcpPacketReportBlockTest, ValidateCumulativeLost) {
+ const uint32_t kMaxCumulativeLost = 0xffffff;
+ ReportBlock rb;
+ EXPECT_FALSE(rb.WithCumulativeLost(kMaxCumulativeLost + 1));
+ EXPECT_TRUE(rb.WithCumulativeLost(kMaxCumulativeLost));
+}
+
+} // namespace
+} // namespace webrtc
diff --git a/webrtc/modules/rtp_rtcp/source/rtcp_packet/rrtr.cc b/webrtc/modules/rtp_rtcp/source/rtcp_packet/rrtr.cc
new file mode 100644
index 0000000000..db4ae67326
--- /dev/null
+++ b/webrtc/modules/rtp_rtcp/source/rtcp_packet/rrtr.cc
@@ -0,0 +1,49 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "webrtc/modules/rtp_rtcp/source/rtcp_packet/rrtr.h"
+
+#include "webrtc/base/checks.h"
+#include "webrtc/modules/rtp_rtcp/source/byte_io.h"
+
+namespace webrtc {
+namespace rtcp {
+// Receiver Reference Time Report Block (RFC 3611).
+//
+// 0 1 2 3
+// 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// | BT=4 | reserved | block length = 2 |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// | NTP timestamp, most significant word |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// | NTP timestamp, least significant word |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+
+void Rrtr::Parse(const uint8_t* buffer) {
+ RTC_DCHECK(buffer[0] == kBlockType);
+ // reserved = buffer[1];
+ RTC_DCHECK(ByteReader<uint16_t>::ReadBigEndian(&buffer[2]) == kBlockLength);
+ uint32_t seconds = ByteReader<uint32_t>::ReadBigEndian(&buffer[4]);
+ uint32_t fraction = ByteReader<uint32_t>::ReadBigEndian(&buffer[8]);
+ ntp_.Set(seconds, fraction);
+}
+
+void Rrtr::Create(uint8_t* buffer) const {
+ const uint8_t kReserved = 0;
+ buffer[0] = kBlockType;
+ buffer[1] = kReserved;
+ ByteWriter<uint16_t>::WriteBigEndian(&buffer[2], kBlockLength);
+ ByteWriter<uint32_t>::WriteBigEndian(&buffer[4], ntp_.seconds());
+ ByteWriter<uint32_t>::WriteBigEndian(&buffer[8], ntp_.fractions());
+}
+
+} // namespace rtcp
+} // namespace webrtc
diff --git a/webrtc/modules/rtp_rtcp/source/rtcp_packet/rrtr.h b/webrtc/modules/rtp_rtcp/source/rtcp_packet/rrtr.h
new file mode 100644
index 0000000000..3354f61df6
--- /dev/null
+++ b/webrtc/modules/rtp_rtcp/source/rtcp_packet/rrtr.h
@@ -0,0 +1,49 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ *
+ */
+
+#ifndef WEBRTC_MODULES_RTP_RTCP_SOURCE_RTCP_PACKET_RRTR_H_
+#define WEBRTC_MODULES_RTP_RTCP_SOURCE_RTCP_PACKET_RRTR_H_
+
+#include "webrtc/base/basictypes.h"
+#include "webrtc/system_wrappers/include/ntp_time.h"
+
+namespace webrtc {
+namespace rtcp {
+
+class Rrtr {
+ public:
+ static const uint8_t kBlockType = 4;
+ static const uint16_t kBlockLength = 2;
+ static const size_t kLength = 4 * (kBlockLength + 1); // 12
+
+ Rrtr() {}
+ Rrtr(const Rrtr&) = default;
+ ~Rrtr() {}
+
+ Rrtr& operator=(const Rrtr&) = default;
+
+ void Parse(const uint8_t* buffer);
+
+ // Fills buffer with the Rrtr.
+ // Consumes Rrtr::kLength bytes.
+ void Create(uint8_t* buffer) const;
+
+ void WithNtp(const NtpTime& ntp) { ntp_ = ntp; }
+
+ NtpTime ntp() const { return ntp_; }
+
+ private:
+ NtpTime ntp_;
+};
+
+} // namespace rtcp
+} // namespace webrtc
+#endif // WEBRTC_MODULES_RTP_RTCP_SOURCE_RTCP_PACKET_RRTR_H_
diff --git a/webrtc/modules/rtp_rtcp/source/rtcp_packet/rrtr_unittest.cc b/webrtc/modules/rtp_rtcp/source/rtcp_packet/rrtr_unittest.cc
new file mode 100644
index 0000000000..6536e06186
--- /dev/null
+++ b/webrtc/modules/rtp_rtcp/source/rtcp_packet/rrtr_unittest.cc
@@ -0,0 +1,51 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "webrtc/modules/rtp_rtcp/source/rtcp_packet/rrtr.h"
+
+#include "testing/gtest/include/gtest/gtest.h"
+
+using webrtc::rtcp::Rrtr;
+
+namespace webrtc {
+namespace {
+
+const uint32_t kNtpSec = 0x12345678;
+const uint32_t kNtpFrac = 0x23456789;
+const uint8_t kBlock[] = {0x04, 0x00, 0x00, 0x02,
+ 0x12, 0x34, 0x56, 0x78,
+ 0x23, 0x45, 0x67, 0x89};
+const size_t kBlockSizeBytes = sizeof(kBlock);
+static_assert(
+ kBlockSizeBytes == Rrtr::kLength,
+ "Size of manually created Rrtr block should match class constant");
+
+TEST(RtcpPacketRrtrTest, Create) {
+ uint8_t buffer[Rrtr::kLength];
+ Rrtr rrtr;
+ rrtr.WithNtp(NtpTime(kNtpSec, kNtpFrac));
+
+ rrtr.Create(buffer);
+ EXPECT_EQ(0, memcmp(buffer, kBlock, kBlockSizeBytes));
+}
+
+TEST(RtcpPacketRrtrTest, Parse) {
+ Rrtr read_rrtr;
+ read_rrtr.Parse(kBlock);
+
+ // Run checks on const object to ensure all accessors have const modifier.
+ const Rrtr& parsed = read_rrtr;
+
+ EXPECT_EQ(kNtpSec, parsed.ntp().seconds());
+ EXPECT_EQ(kNtpFrac, parsed.ntp().fractions());
+}
+
+} // namespace
+} // namespace webrtc
diff --git a/webrtc/modules/rtp_rtcp/source/rtcp_packet/rtpfb.cc b/webrtc/modules/rtp_rtcp/source/rtcp_packet/rtpfb.cc
new file mode 100644
index 0000000000..b5571d45a3
--- /dev/null
+++ b/webrtc/modules/rtp_rtcp/source/rtcp_packet/rtpfb.cc
@@ -0,0 +1,45 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "webrtc/modules/rtp_rtcp/source/rtcp_packet/rtpfb.h"
+
+#include "webrtc/modules/rtp_rtcp/source/byte_io.h"
+
+namespace webrtc {
+namespace rtcp {
+
+// RFC 4585, Section 6.1: Feedback format.
+//
+// Common packet format:
+//
+// 0 1 2 3
+// 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// |V=2|P| FMT | PT | length |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// 0 | SSRC of packet sender |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// 4 | SSRC of media source |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// : Feedback Control Information (FCI) :
+// : :
+
+void Rtpfb::ParseCommonFeedback(const uint8_t* payload) {
+ sender_ssrc_ = ByteReader<uint32_t>::ReadBigEndian(&payload[0]);
+ media_ssrc_ = ByteReader<uint32_t>::ReadBigEndian(&payload[4]);
+}
+
+void Rtpfb::CreateCommonFeedback(uint8_t* payload) const {
+ ByteWriter<uint32_t>::WriteBigEndian(&payload[0], sender_ssrc_);
+ ByteWriter<uint32_t>::WriteBigEndian(&payload[4], media_ssrc_);
+}
+
+} // namespace rtcp
+} // namespace webrtc
diff --git a/webrtc/modules/rtp_rtcp/source/rtcp_packet/rtpfb.h b/webrtc/modules/rtp_rtcp/source/rtcp_packet/rtpfb.h
new file mode 100644
index 0000000000..801aa085c4
--- /dev/null
+++ b/webrtc/modules/rtp_rtcp/source/rtcp_packet/rtpfb.h
@@ -0,0 +1,48 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ *
+ */
+
+#ifndef WEBRTC_MODULES_RTP_RTCP_SOURCE_RTCP_PACKET_RTPFB_H_
+#define WEBRTC_MODULES_RTP_RTCP_SOURCE_RTCP_PACKET_RTPFB_H_
+
+#include "webrtc/base/basictypes.h"
+#include "webrtc/modules/rtp_rtcp/source/rtcp_packet.h"
+
+namespace webrtc {
+namespace rtcp {
+
+// RTPFB: Transport layer feedback message.
+// RFC4585, Section 6.2
+class Rtpfb : public RtcpPacket {
+ public:
+ static const uint8_t kPacketType = 205;
+
+ Rtpfb() : sender_ssrc_(0), media_ssrc_(0) {}
+ virtual ~Rtpfb() {}
+
+ void From(uint32_t ssrc) { sender_ssrc_ = ssrc; }
+ void To(uint32_t ssrc) { media_ssrc_ = ssrc; }
+
+ uint32_t sender_ssrc() const { return sender_ssrc_; }
+ uint32_t media_ssrc() const { return media_ssrc_; }
+
+ protected:
+ static const size_t kCommonFeedbackLength = 8;
+ void ParseCommonFeedback(const uint8_t* payload);
+ void CreateCommonFeedback(uint8_t* payload) const;
+
+ private:
+ uint32_t sender_ssrc_;
+ uint32_t media_ssrc_;
+};
+
+} // namespace rtcp
+} // namespace webrtc
+#endif // WEBRTC_MODULES_RTP_RTCP_SOURCE_RTCP_PACKET_RTPFB_H_
diff --git a/webrtc/modules/rtp_rtcp/source/rtcp_packet/sli.cc b/webrtc/modules/rtp_rtcp/source/rtcp_packet/sli.cc
new file mode 100644
index 0000000000..829f3a9db9
--- /dev/null
+++ b/webrtc/modules/rtp_rtcp/source/rtcp_packet/sli.cc
@@ -0,0 +1,108 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "webrtc/modules/rtp_rtcp/source/rtcp_packet/sli.h"
+
+#include "webrtc/base/checks.h"
+#include "webrtc/base/logging.h"
+#include "webrtc/modules/rtp_rtcp/source/byte_io.h"
+
+using webrtc::RTCPUtility::RtcpCommonHeader;
+
+namespace webrtc {
+namespace rtcp {
+// RFC 4585: Feedback format.
+//
+// Common packet format:
+//
+// 0 1 2 3
+// 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// |V=2|P| FMT | PT | length |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// | SSRC of packet sender |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// | SSRC of media source |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// : Feedback Control Information (FCI) :
+// : :
+//
+// Slice loss indication (SLI) (RFC 4585).
+// FCI:
+// 0 1 2 3
+// 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// | First | Number | PictureID |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+Sli::Macroblocks::Macroblocks(uint8_t picture_id,
+ uint16_t first,
+ uint16_t number) {
+ RTC_DCHECK_LE(first, 0x1fff);
+ RTC_DCHECK_LE(number, 0x1fff);
+ RTC_DCHECK_LE(picture_id, 0x3f);
+ item_ = (first << 19) | (number << 6) | picture_id;
+}
+
+void Sli::Macroblocks::Parse(const uint8_t* buffer) {
+ item_ = ByteReader<uint32_t>::ReadBigEndian(buffer);
+}
+
+void Sli::Macroblocks::Create(uint8_t* buffer) const {
+ ByteWriter<uint32_t>::WriteBigEndian(buffer, item_);
+}
+
+bool Sli::Parse(const RtcpCommonHeader& header, const uint8_t* payload) {
+ RTC_DCHECK(header.packet_type == kPacketType);
+ RTC_DCHECK(header.count_or_format == kFeedbackMessageType);
+
+ if (header.payload_size_bytes <
+ kCommonFeedbackLength + Macroblocks::kLength) {
+ LOG(LS_WARNING) << "Packet is too small to be a valid SLI packet";
+ return false;
+ }
+
+ size_t number_of_items =
+ (header.payload_size_bytes - kCommonFeedbackLength) /
+ Macroblocks::kLength;
+
+ ParseCommonFeedback(payload);
+ items_.resize(number_of_items);
+
+ const uint8_t* next_item = payload + kCommonFeedbackLength;
+ for (Macroblocks& item : items_) {
+ item.Parse(next_item);
+ next_item += Macroblocks::kLength;
+ }
+
+ return true;
+}
+
+bool Sli::Create(uint8_t* packet,
+ size_t* index,
+ size_t max_length,
+ RtcpPacket::PacketReadyCallback* callback) const {
+ RTC_DCHECK(!items_.empty());
+ while (*index + BlockLength() > max_length) {
+ if (!OnBufferFull(packet, index, callback))
+ return false;
+ }
+ CreateHeader(kFeedbackMessageType, kPacketType, HeaderLength(), packet,
+ index);
+ CreateCommonFeedback(packet + *index);
+ *index += kCommonFeedbackLength;
+ for (const Macroblocks& item : items_) {
+ item.Create(packet + *index);
+ *index += Macroblocks::kLength;
+ }
+ return true;
+}
+
+} // namespace rtcp
+} // namespace webrtc
diff --git a/webrtc/modules/rtp_rtcp/source/rtcp_packet/sli.h b/webrtc/modules/rtp_rtcp/source/rtcp_packet/sli.h
new file mode 100644
index 0000000000..5d9e6c93e9
--- /dev/null
+++ b/webrtc/modules/rtp_rtcp/source/rtcp_packet/sli.h
@@ -0,0 +1,81 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ *
+ */
+
+#ifndef WEBRTC_MODULES_RTP_RTCP_SOURCE_RTCP_PACKET_SLI_H_
+#define WEBRTC_MODULES_RTP_RTCP_SOURCE_RTCP_PACKET_SLI_H_
+
+#include <vector>
+
+#include "webrtc/base/basictypes.h"
+#include "webrtc/modules/rtp_rtcp/source/rtcp_packet/psfb.h"
+#include "webrtc/modules/rtp_rtcp/source/rtcp_utility.h"
+
+namespace webrtc {
+namespace rtcp {
+
+// Slice loss indication (SLI) (RFC 4585).
+class Sli : public Psfb {
+ public:
+ static const uint8_t kFeedbackMessageType = 2;
+ class Macroblocks {
+ public:
+ static const size_t kLength = 4;
+ Macroblocks() : item_(0) {}
+ Macroblocks(uint8_t picture_id, uint16_t first, uint16_t number);
+ ~Macroblocks() {}
+
+ void Parse(const uint8_t* buffer);
+ void Create(uint8_t* buffer) const;
+
+ uint16_t first() const { return item_ >> 19; }
+ uint16_t number() const { return (item_ >> 6) & 0x1fff; }
+ uint8_t picture_id() const { return (item_ & 0x3f); }
+
+ private:
+ uint32_t item_;
+ };
+
+ Sli() {}
+ virtual ~Sli() {}
+
+ // Parse assumes header is already parsed and validated.
+ bool Parse(const RTCPUtility::RtcpCommonHeader& header,
+ const uint8_t* payload); // Size of the payload is in the header.
+
+ void WithPictureId(uint8_t picture_id,
+ uint16_t first_macroblock = 0,
+ uint16_t number_macroblocks = 0x1fff) {
+ items_.push_back(
+ Macroblocks(picture_id, first_macroblock, number_macroblocks));
+ }
+
+ const std::vector<Macroblocks>& macroblocks() const { return items_; }
+
+ protected:
+ bool Create(uint8_t* packet,
+ size_t* index,
+ size_t max_length,
+ RtcpPacket::PacketReadyCallback* callback) const override;
+
+ private:
+ size_t BlockLength() const override {
+ return RtcpPacket::kHeaderLength + Psfb::kCommonFeedbackLength +
+ items_.size() * Macroblocks::kLength;
+ }
+
+ std::vector<Macroblocks> items_;
+
+ RTC_DISALLOW_COPY_AND_ASSIGN(Sli);
+};
+
+} // namespace rtcp
+} // namespace webrtc
+#endif // WEBRTC_MODULES_RTP_RTCP_SOURCE_RTCP_PACKET_SLI_H_
diff --git a/webrtc/modules/rtp_rtcp/source/rtcp_packet/sli_unittest.cc b/webrtc/modules/rtp_rtcp/source/rtcp_packet/sli_unittest.cc
new file mode 100644
index 0000000000..c2be16846b
--- /dev/null
+++ b/webrtc/modules/rtp_rtcp/source/rtcp_packet/sli_unittest.cc
@@ -0,0 +1,91 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "webrtc/modules/rtp_rtcp/source/rtcp_packet/sli.h"
+
+#include "testing/gmock/include/gmock/gmock.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+using testing::ElementsAreArray;
+using testing::make_tuple;
+using webrtc::rtcp::RawPacket;
+using webrtc::rtcp::Sli;
+using webrtc::RTCPUtility::RtcpCommonHeader;
+using webrtc::RTCPUtility::RtcpParseCommonHeader;
+
+namespace webrtc {
+namespace {
+
+const uint32_t kSenderSsrc = 0x12345678;
+const uint32_t kRemoteSsrc = 0x23456789;
+
+const uint8_t kPictureId = 0x3f;
+const uint16_t kFirstMb = 0x1e61;
+const uint16_t kNumberOfMb = 0x1a0a;
+const uint32_t kSliItem = (static_cast<uint32_t>(kFirstMb) << 19) |
+ (static_cast<uint32_t>(kNumberOfMb) << 6) |
+ static_cast<uint32_t>(kPictureId);
+
+// Manually created Sli packet matching constants above.
+const uint8_t kPacket[] = {0x82, 206, 0x00, 0x03,
+ 0x12, 0x34, 0x56, 0x78,
+ 0x23, 0x45, 0x67, 0x89,
+ (kSliItem >> 24) & 0xff,
+ (kSliItem >> 16) & 0xff,
+ (kSliItem >> 8) & 0xff,
+ kSliItem & 0xff};
+const size_t kPacketLength = sizeof(kPacket);
+
+bool ParseSli(const uint8_t* buffer, size_t length, Sli* sli) {
+ RtcpCommonHeader header;
+ EXPECT_TRUE(RtcpParseCommonHeader(buffer, length, &header));
+ EXPECT_EQ(length, header.BlockSize());
+ return sli->Parse(header, buffer + RtcpCommonHeader::kHeaderSizeBytes);
+}
+
+TEST(RtcpPacketSliTest, Create) {
+ Sli sli;
+ sli.From(kSenderSsrc);
+ sli.To(kRemoteSsrc);
+ sli.WithPictureId(kPictureId, kFirstMb, kNumberOfMb);
+
+ rtc::scoped_ptr<RawPacket> packet(sli.Build());
+
+ EXPECT_THAT(make_tuple(packet->Buffer(), packet->Length()),
+ ElementsAreArray(kPacket));
+}
+
+TEST(RtcpPacketSliTest, Parse) {
+ Sli mutable_parsed;
+ EXPECT_TRUE(ParseSli(kPacket, kPacketLength, &mutable_parsed));
+ const Sli& parsed = mutable_parsed; // Read values from constant object.
+
+ EXPECT_EQ(kSenderSsrc, parsed.sender_ssrc());
+ EXPECT_EQ(kRemoteSsrc, parsed.media_ssrc());
+ EXPECT_EQ(1u, parsed.macroblocks().size());
+ EXPECT_EQ(kFirstMb, parsed.macroblocks()[0].first());
+ EXPECT_EQ(kNumberOfMb, parsed.macroblocks()[0].number());
+ EXPECT_EQ(kPictureId, parsed.macroblocks()[0].picture_id());
+}
+
+TEST(RtcpPacketSliTest, ParseFailsOnTooSmallPacket) {
+ Sli sli;
+ sli.From(kSenderSsrc);
+ sli.To(kRemoteSsrc);
+ sli.WithPictureId(kPictureId, kFirstMb, kNumberOfMb);
+
+ rtc::scoped_ptr<RawPacket> packet(sli.Build());
+ packet->MutableBuffer()[3]--; // Decrease size by 1 word (4 bytes).
+
+ EXPECT_FALSE(ParseSli(packet->Buffer(), packet->Length() - 4, &sli));
+}
+
+} // namespace
+} // namespace webrtc
diff --git a/webrtc/modules/rtp_rtcp/source/rtcp_packet/tmmbn.cc b/webrtc/modules/rtp_rtcp/source/rtcp_packet/tmmbn.cc
new file mode 100644
index 0000000000..fd0219cf82
--- /dev/null
+++ b/webrtc/modules/rtp_rtcp/source/rtcp_packet/tmmbn.cc
@@ -0,0 +1,119 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "webrtc/modules/rtp_rtcp/source/rtcp_packet/tmmbn.h"
+
+#include "webrtc/base/logging.h"
+#include "webrtc/modules/rtp_rtcp/source/byte_io.h"
+
+using webrtc::RTCPUtility::PT_RTPFB;
+using webrtc::RTCPUtility::RTCPPacketRTPFBTMMBN;
+using webrtc::RTCPUtility::RTCPPacketRTPFBTMMBRItem;
+
+namespace webrtc {
+namespace rtcp {
+namespace {
+const uint32_t kUnusedMediaSourceSsrc0 = 0;
+void AssignUWord8(uint8_t* buffer, size_t* offset, uint8_t value) {
+ buffer[(*offset)++] = value;
+}
+void AssignUWord32(uint8_t* buffer, size_t* offset, uint32_t value) {
+ ByteWriter<uint32_t>::WriteBigEndian(buffer + *offset, value);
+ *offset += 4;
+}
+
+void ComputeMantissaAnd6bitBase2Exponent(uint32_t input_base10,
+ uint8_t bits_mantissa,
+ uint32_t* mantissa,
+ uint8_t* exp) {
+ // input_base10 = mantissa * 2^exp
+ assert(bits_mantissa <= 32);
+ uint32_t mantissa_max = (1 << bits_mantissa) - 1;
+ uint8_t exponent = 0;
+ for (uint32_t i = 0; i < 64; ++i) {
+ if (input_base10 <= (mantissa_max << i)) {
+ exponent = i;
+ break;
+ }
+ }
+ *exp = exponent;
+ *mantissa = (input_base10 >> exponent);
+}
+
+void CreateTmmbrItem(const RTCPPacketRTPFBTMMBRItem& tmmbr_item,
+ uint8_t* buffer,
+ size_t* pos) {
+ uint32_t bitrate_bps = tmmbr_item.MaxTotalMediaBitRate * 1000;
+ uint32_t mantissa = 0;
+ uint8_t exp = 0;
+ ComputeMantissaAnd6bitBase2Exponent(bitrate_bps, 17, &mantissa, &exp);
+
+ AssignUWord32(buffer, pos, tmmbr_item.SSRC);
+ AssignUWord8(buffer, pos, (exp << 2) + ((mantissa >> 15) & 0x03));
+ AssignUWord8(buffer, pos, mantissa >> 7);
+ AssignUWord8(buffer, pos, (mantissa << 1) +
+ ((tmmbr_item.MeasuredOverhead >> 8) & 0x01));
+ AssignUWord8(buffer, pos, tmmbr_item.MeasuredOverhead);
+}
+
+// Temporary Maximum Media Stream Bit Rate Notification (TMMBN) (RFC 5104).
+//
+// FCI:
+//
+// 0 1 2 3
+// 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// | SSRC |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// | MxTBR Exp | MxTBR Mantissa |Measured Overhead|
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+
+void CreateTmmbn(const RTCPPacketRTPFBTMMBN& tmmbn,
+ const std::vector<RTCPPacketRTPFBTMMBRItem>& tmmbn_items,
+ uint8_t* buffer,
+ size_t* pos) {
+ AssignUWord32(buffer, pos, tmmbn.SenderSSRC);
+ AssignUWord32(buffer, pos, kUnusedMediaSourceSsrc0);
+ for (uint8_t i = 0; i < tmmbn_items.size(); ++i) {
+ CreateTmmbrItem(tmmbn_items[i], buffer, pos);
+ }
+}
+} // namespace
+
+bool Tmmbn::WithTmmbr(uint32_t ssrc, uint32_t bitrate_kbps, uint16_t overhead) {
+ assert(overhead <= 0x1ff);
+ if (tmmbn_items_.size() >= kMaxNumberOfTmmbrs) {
+ LOG(LS_WARNING) << "Max TMMBN size reached.";
+ return false;
+ }
+ RTCPPacketRTPFBTMMBRItem tmmbn_item;
+ tmmbn_item.SSRC = ssrc;
+ tmmbn_item.MaxTotalMediaBitRate = bitrate_kbps;
+ tmmbn_item.MeasuredOverhead = overhead;
+ tmmbn_items_.push_back(tmmbn_item);
+ return true;
+}
+
+bool Tmmbn::Create(uint8_t* packet,
+ size_t* index,
+ size_t max_length,
+ RtcpPacket::PacketReadyCallback* callback) const {
+ while (*index + BlockLength() > max_length) {
+ if (!OnBufferFull(packet, index, callback))
+ return false;
+ }
+ const uint8_t kFmt = 4;
+ CreateHeader(kFmt, PT_RTPFB, HeaderLength(), packet, index);
+ CreateTmmbn(tmmbn_, tmmbn_items_, packet, index);
+ return true;
+}
+
+} // namespace rtcp
+} // namespace webrtc
diff --git a/webrtc/modules/rtp_rtcp/source/rtcp_packet/tmmbn.h b/webrtc/modules/rtp_rtcp/source/rtcp_packet/tmmbn.h
new file mode 100644
index 0000000000..82bf9dd9e9
--- /dev/null
+++ b/webrtc/modules/rtp_rtcp/source/rtcp_packet/tmmbn.h
@@ -0,0 +1,60 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ *
+ */
+
+#ifndef WEBRTC_MODULES_RTP_RTCP_SOURCE_RTCP_PACKET_TMMBN_H_
+#define WEBRTC_MODULES_RTP_RTCP_SOURCE_RTCP_PACKET_TMMBN_H_
+
+#include <vector>
+#include "webrtc/base/basictypes.h"
+#include "webrtc/modules/rtp_rtcp/source/rtcp_packet.h"
+#include "webrtc/modules/rtp_rtcp/source/rtcp_utility.h"
+
+namespace webrtc {
+namespace rtcp {
+
+// Temporary Maximum Media Stream Bit Rate Notification (TMMBN) (RFC 5104).
+class Tmmbn : public RtcpPacket {
+ public:
+ Tmmbn() : RtcpPacket() {
+ memset(&tmmbn_, 0, sizeof(tmmbn_));
+ }
+
+ virtual ~Tmmbn() {}
+
+ void From(uint32_t ssrc) {
+ tmmbn_.SenderSSRC = ssrc;
+ }
+ // Max 50 TMMBR can be added per TMMBN.
+ bool WithTmmbr(uint32_t ssrc, uint32_t bitrate_kbps, uint16_t overhead);
+
+ protected:
+ bool Create(uint8_t* packet,
+ size_t* index,
+ size_t max_length,
+ RtcpPacket::PacketReadyCallback* callback) const override;
+
+ private:
+ static const int kMaxNumberOfTmmbrs = 50;
+
+ size_t BlockLength() const {
+ const size_t kFciLen = 8;
+ return kCommonFbFmtLength + kFciLen * tmmbn_items_.size();
+ }
+
+ RTCPUtility::RTCPPacketRTPFBTMMBN tmmbn_;
+ std::vector<RTCPUtility::RTCPPacketRTPFBTMMBRItem> tmmbn_items_;
+
+ RTC_DISALLOW_COPY_AND_ASSIGN(Tmmbn);
+};
+
+} // namespace rtcp
+} // namespace webrtc
+#endif // WEBRTC_MODULES_RTP_RTCP_SOURCE_RTCP_PACKET_TMMBN_H_
diff --git a/webrtc/modules/rtp_rtcp/source/rtcp_packet/tmmbn_unittest.cc b/webrtc/modules/rtp_rtcp/source/rtcp_packet/tmmbn_unittest.cc
new file mode 100644
index 0000000000..32d64a97b4
--- /dev/null
+++ b/webrtc/modules/rtp_rtcp/source/rtcp_packet/tmmbn_unittest.cc
@@ -0,0 +1,84 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "webrtc/modules/rtp_rtcp/source/rtcp_packet/tmmbn.h"
+
+#include "testing/gtest/include/gtest/gtest.h"
+
+#include "webrtc/modules/rtp_rtcp/source/byte_io.h"
+#include "webrtc/test/rtcp_packet_parser.h"
+
+using webrtc::rtcp::RawPacket;
+using webrtc::rtcp::Tmmbn;
+using webrtc::test::RtcpPacketParser;
+
+namespace webrtc {
+const uint32_t kSenderSsrc = 0x12345678;
+const uint32_t kRemoteSsrc = 0x23456789;
+
+TEST(RtcpPacketTest, TmmbnWithNoItem) {
+ Tmmbn tmmbn;
+ tmmbn.From(kSenderSsrc);
+
+ rtc::scoped_ptr<RawPacket> packet(tmmbn.Build());
+ RtcpPacketParser parser;
+ parser.Parse(packet->Buffer(), packet->Length());
+ EXPECT_EQ(1, parser.tmmbn()->num_packets());
+ EXPECT_EQ(kSenderSsrc, parser.tmmbn()->Ssrc());
+ EXPECT_EQ(0, parser.tmmbn_items()->num_packets());
+}
+
+TEST(RtcpPacketTest, TmmbnWithOneItem) {
+ Tmmbn tmmbn;
+ tmmbn.From(kSenderSsrc);
+ EXPECT_TRUE(tmmbn.WithTmmbr(kRemoteSsrc, 312, 60));
+
+ rtc::scoped_ptr<RawPacket> packet(tmmbn.Build());
+ RtcpPacketParser parser;
+ parser.Parse(packet->Buffer(), packet->Length());
+ EXPECT_EQ(1, parser.tmmbn()->num_packets());
+ EXPECT_EQ(kSenderSsrc, parser.tmmbn()->Ssrc());
+ EXPECT_EQ(1, parser.tmmbn_items()->num_packets());
+ EXPECT_EQ(kRemoteSsrc, parser.tmmbn_items()->Ssrc(0));
+ EXPECT_EQ(312U, parser.tmmbn_items()->BitrateKbps(0));
+ EXPECT_EQ(60U, parser.tmmbn_items()->Overhead(0));
+}
+
+TEST(RtcpPacketTest, TmmbnWithTwoItems) {
+ Tmmbn tmmbn;
+ tmmbn.From(kSenderSsrc);
+ EXPECT_TRUE(tmmbn.WithTmmbr(kRemoteSsrc, 312, 60));
+ EXPECT_TRUE(tmmbn.WithTmmbr(kRemoteSsrc + 1, 1288, 40));
+
+ rtc::scoped_ptr<RawPacket> packet(tmmbn.Build());
+ RtcpPacketParser parser;
+ parser.Parse(packet->Buffer(), packet->Length());
+ EXPECT_EQ(1, parser.tmmbn()->num_packets());
+ EXPECT_EQ(kSenderSsrc, parser.tmmbn()->Ssrc());
+ EXPECT_EQ(2, parser.tmmbn_items()->num_packets());
+ EXPECT_EQ(kRemoteSsrc, parser.tmmbn_items()->Ssrc(0));
+ EXPECT_EQ(312U, parser.tmmbn_items()->BitrateKbps(0));
+ EXPECT_EQ(60U, parser.tmmbn_items()->Overhead(0));
+ EXPECT_EQ(kRemoteSsrc + 1, parser.tmmbn_items()->Ssrc(1));
+ EXPECT_EQ(1288U, parser.tmmbn_items()->BitrateKbps(1));
+ EXPECT_EQ(40U, parser.tmmbn_items()->Overhead(1));
+}
+
+TEST(RtcpPacketTest, TmmbnWithTooManyItems) {
+ Tmmbn tmmbn;
+ tmmbn.From(kSenderSsrc);
+ const int kMaxTmmbrItems = 50;
+ for (int i = 0; i < kMaxTmmbrItems; ++i)
+ EXPECT_TRUE(tmmbn.WithTmmbr(kRemoteSsrc + i, 312, 60));
+
+ EXPECT_FALSE(tmmbn.WithTmmbr(kRemoteSsrc + kMaxTmmbrItems, 312, 60));
+}
+
+} // namespace webrtc
diff --git a/webrtc/modules/rtp_rtcp/source/rtcp_packet/tmmbr.cc b/webrtc/modules/rtp_rtcp/source/rtcp_packet/tmmbr.cc
new file mode 100644
index 0000000000..4df167de79
--- /dev/null
+++ b/webrtc/modules/rtp_rtcp/source/rtcp_packet/tmmbr.cc
@@ -0,0 +1,105 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "webrtc/modules/rtp_rtcp/source/rtcp_packet/tmmbr.h"
+
+#include "webrtc/base/logging.h"
+#include "webrtc/modules/rtp_rtcp/source/byte_io.h"
+
+using webrtc::RTCPUtility::PT_RTPFB;
+using webrtc::RTCPUtility::RTCPPacketRTPFBTMMBR;
+using webrtc::RTCPUtility::RTCPPacketRTPFBTMMBRItem;
+
+namespace webrtc {
+namespace rtcp {
+namespace {
+const uint32_t kUnusedMediaSourceSsrc0 = 0;
+
+void AssignUWord8(uint8_t* buffer, size_t* offset, uint8_t value) {
+ buffer[(*offset)++] = value;
+}
+
+void AssignUWord32(uint8_t* buffer, size_t* offset, uint32_t value) {
+ ByteWriter<uint32_t>::WriteBigEndian(buffer + *offset, value);
+ *offset += 4;
+}
+
+void ComputeMantissaAnd6bitBase2Exponent(uint32_t input_base10,
+ uint8_t bits_mantissa,
+ uint32_t* mantissa,
+ uint8_t* exp) {
+ // input_base10 = mantissa * 2^exp
+ assert(bits_mantissa <= 32);
+ uint32_t mantissa_max = (1 << bits_mantissa) - 1;
+ uint8_t exponent = 0;
+ for (uint32_t i = 0; i < 64; ++i) {
+ if (input_base10 <= (mantissa_max << i)) {
+ exponent = i;
+ break;
+ }
+ }
+ *exp = exponent;
+ *mantissa = (input_base10 >> exponent);
+}
+
+void CreateTmmbrItem(const RTCPPacketRTPFBTMMBRItem& tmmbr_item,
+ uint8_t* buffer,
+ size_t* pos) {
+ uint32_t bitrate_bps = tmmbr_item.MaxTotalMediaBitRate * 1000;
+ uint32_t mantissa = 0;
+ uint8_t exp = 0;
+ ComputeMantissaAnd6bitBase2Exponent(bitrate_bps, 17, &mantissa, &exp);
+
+ AssignUWord32(buffer, pos, tmmbr_item.SSRC);
+ AssignUWord8(buffer, pos, (exp << 2) + ((mantissa >> 15) & 0x03));
+ AssignUWord8(buffer, pos, mantissa >> 7);
+ AssignUWord8(buffer, pos, (mantissa << 1) +
+ ((tmmbr_item.MeasuredOverhead >> 8) & 0x01));
+ AssignUWord8(buffer, pos, tmmbr_item.MeasuredOverhead);
+}
+
+// Temporary Maximum Media Stream Bit Rate Request (TMMBR) (RFC 5104).
+//
+// FCI:
+//
+// 0 1 2 3
+// 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// | SSRC |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// | MxTBR Exp | MxTBR Mantissa |Measured Overhead|
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+
+void CreateTmmbr(const RTCPPacketRTPFBTMMBR& tmmbr,
+ const RTCPPacketRTPFBTMMBRItem& tmmbr_item,
+ uint8_t* buffer,
+ size_t* pos) {
+ AssignUWord32(buffer, pos, tmmbr.SenderSSRC);
+ AssignUWord32(buffer, pos, kUnusedMediaSourceSsrc0);
+ CreateTmmbrItem(tmmbr_item, buffer, pos);
+}
+} // namespace
+
+bool Tmmbr::Create(uint8_t* packet,
+ size_t* index,
+ size_t max_length,
+ RtcpPacket::PacketReadyCallback* callback) const {
+ while (*index + BlockLength() > max_length) {
+ if (!OnBufferFull(packet, index, callback))
+ return false;
+ }
+ const uint8_t kFmt = 3;
+ CreateHeader(kFmt, PT_RTPFB, HeaderLength(), packet, index);
+ CreateTmmbr(tmmbr_, tmmbr_item_, packet, index);
+ return true;
+}
+
+} // namespace rtcp
+} // namespace webrtc
diff --git a/webrtc/modules/rtp_rtcp/source/rtcp_packet/tmmbr.h b/webrtc/modules/rtp_rtcp/source/rtcp_packet/tmmbr.h
new file mode 100644
index 0000000000..84a4180ad3
--- /dev/null
+++ b/webrtc/modules/rtp_rtcp/source/rtcp_packet/tmmbr.h
@@ -0,0 +1,64 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ *
+ */
+
+#ifndef WEBRTC_MODULES_RTP_RTCP_SOURCE_RTCP_PACKET_TMMBR_H_
+#define WEBRTC_MODULES_RTP_RTCP_SOURCE_RTCP_PACKET_TMMBR_H_
+
+#include "webrtc/base/basictypes.h"
+#include "webrtc/modules/rtp_rtcp/source/rtcp_packet.h"
+#include "webrtc/modules/rtp_rtcp/source/rtcp_utility.h"
+
+namespace webrtc {
+namespace rtcp {
+// Temporary Maximum Media Stream Bit Rate Request (TMMBR) (RFC 5104).
+class Tmmbr : public RtcpPacket {
+ public:
+ Tmmbr() : RtcpPacket() {
+ memset(&tmmbr_, 0, sizeof(tmmbr_));
+ memset(&tmmbr_item_, 0, sizeof(tmmbr_item_));
+ }
+
+ virtual ~Tmmbr() {}
+
+ void From(uint32_t ssrc) {
+ tmmbr_.SenderSSRC = ssrc;
+ }
+ void To(uint32_t ssrc) {
+ tmmbr_item_.SSRC = ssrc;
+ }
+ void WithBitrateKbps(uint32_t bitrate_kbps) {
+ tmmbr_item_.MaxTotalMediaBitRate = bitrate_kbps;
+ }
+ void WithOverhead(uint16_t overhead) {
+ assert(overhead <= 0x1ff);
+ tmmbr_item_.MeasuredOverhead = overhead;
+ }
+
+ protected:
+ bool Create(uint8_t* packet,
+ size_t* index,
+ size_t max_length,
+ RtcpPacket::PacketReadyCallback* callback) const override;
+
+ private:
+ size_t BlockLength() const {
+ const size_t kFciLen = 8;
+ return kCommonFbFmtLength + kFciLen;
+ }
+
+ RTCPUtility::RTCPPacketRTPFBTMMBR tmmbr_;
+ RTCPUtility::RTCPPacketRTPFBTMMBRItem tmmbr_item_;
+
+ RTC_DISALLOW_COPY_AND_ASSIGN(Tmmbr);
+};
+} // namespace rtcp
+} // namespace webrtc
+#endif // WEBRTC_MODULES_RTP_RTCP_SOURCE_RTCP_PACKET_TMMBR_H_
diff --git a/webrtc/modules/rtp_rtcp/source/rtcp_packet/tmmbr_unittest.cc b/webrtc/modules/rtp_rtcp/source/rtcp_packet/tmmbr_unittest.cc
new file mode 100644
index 0000000000..6d71caa251
--- /dev/null
+++ b/webrtc/modules/rtp_rtcp/source/rtcp_packet/tmmbr_unittest.cc
@@ -0,0 +1,43 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "webrtc/modules/rtp_rtcp/source/rtcp_packet/tmmbr.h"
+
+#include "testing/gtest/include/gtest/gtest.h"
+
+#include "webrtc/modules/rtp_rtcp/source/byte_io.h"
+#include "webrtc/test/rtcp_packet_parser.h"
+
+using webrtc::rtcp::RawPacket;
+using webrtc::rtcp::Tmmbr;
+using webrtc::test::RtcpPacketParser;
+
+namespace webrtc {
+const uint32_t kSenderSsrc = 0x12345678;
+const uint32_t kRemoteSsrc = 0x23456789;
+
+TEST(RtcpPacketTest, Tmmbr) {
+ Tmmbr tmmbr;
+ tmmbr.From(kSenderSsrc);
+ tmmbr.To(kRemoteSsrc);
+ tmmbr.WithBitrateKbps(312);
+ tmmbr.WithOverhead(60);
+
+ rtc::scoped_ptr<RawPacket> packet(tmmbr.Build());
+ RtcpPacketParser parser;
+ parser.Parse(packet->Buffer(), packet->Length());
+ EXPECT_EQ(1, parser.tmmbr()->num_packets());
+ EXPECT_EQ(kSenderSsrc, parser.tmmbr()->Ssrc());
+ EXPECT_EQ(1, parser.tmmbr_item()->num_packets());
+ EXPECT_EQ(312U, parser.tmmbr_item()->BitrateKbps());
+ EXPECT_EQ(60U, parser.tmmbr_item()->Overhead());
+}
+
+} // namespace webrtc
diff --git a/webrtc/modules/rtp_rtcp/source/rtcp_packet/transport_feedback.h b/webrtc/modules/rtp_rtcp/source/rtcp_packet/transport_feedback.h
index 4cc1f38479..ad6fd166f2 100644
--- a/webrtc/modules/rtp_rtcp/source/rtcp_packet/transport_feedback.h
+++ b/webrtc/modules/rtp_rtcp/source/rtcp_packet/transport_feedback.h
@@ -15,7 +15,7 @@
#include <vector>
#include "webrtc/base/constructormagic.h"
-#include "webrtc/modules/interface/module_common_types.h"
+#include "webrtc/modules/include/module_common_types.h"
#include "webrtc/modules/rtp_rtcp/source/rtcp_packet.h"
namespace webrtc {
diff --git a/webrtc/modules/rtp_rtcp/source/rtcp_packet/voip_metric.cc b/webrtc/modules/rtp_rtcp/source/rtcp_packet/voip_metric.cc
new file mode 100644
index 0000000000..a79d48e1ca
--- /dev/null
+++ b/webrtc/modules/rtp_rtcp/source/rtcp_packet/voip_metric.cc
@@ -0,0 +1,107 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "webrtc/modules/rtp_rtcp/source/rtcp_packet/voip_metric.h"
+
+#include "webrtc/base/checks.h"
+#include "webrtc/modules/rtp_rtcp/source/byte_io.h"
+
+namespace webrtc {
+namespace rtcp {
+// VoIP Metrics Report Block (RFC 3611).
+//
+// 0 1 2 3
+// 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// 0 | BT=7 | reserved | block length = 8 |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// 4 | SSRC of source |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// 8 | loss rate | discard rate | burst density | gap density |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// 12 | burst duration | gap duration |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// 16 | round trip delay | end system delay |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// 20 | signal level | noise level | RERL | Gmin |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// 24 | R factor | ext. R factor | MOS-LQ | MOS-CQ |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// 28 | RX config | reserved | JB nominal |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// 32 | JB maximum | JB abs max |
+// 36 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+VoipMetric::VoipMetric() : ssrc_(0) {
+ memset(&voip_metric_, 0, sizeof(voip_metric_));
+}
+
+void VoipMetric::Parse(const uint8_t* buffer) {
+ RTC_DCHECK(buffer[0] == kBlockType);
+ // reserved = buffer[1];
+ RTC_DCHECK(ByteReader<uint16_t>::ReadBigEndian(&buffer[2]) == kBlockLength);
+ ssrc_ = ByteReader<uint32_t>::ReadBigEndian(&buffer[4]);
+ voip_metric_.lossRate = buffer[8];
+ voip_metric_.discardRate = buffer[9];
+ voip_metric_.burstDensity = buffer[10];
+ voip_metric_.gapDensity = buffer[11];
+ voip_metric_.burstDuration = ByteReader<uint16_t>::ReadBigEndian(&buffer[12]);
+ voip_metric_.gapDuration = ByteReader<uint16_t>::ReadBigEndian(&buffer[14]);
+ voip_metric_.roundTripDelay =
+ ByteReader<uint16_t>::ReadBigEndian(&buffer[16]);
+ voip_metric_.endSystemDelay =
+ ByteReader<uint16_t>::ReadBigEndian(&buffer[18]);
+ voip_metric_.signalLevel = buffer[20];
+ voip_metric_.noiseLevel = buffer[21];
+ voip_metric_.RERL = buffer[22];
+ voip_metric_.Gmin = buffer[23];
+ voip_metric_.Rfactor = buffer[24];
+ voip_metric_.extRfactor = buffer[25];
+ voip_metric_.MOSLQ = buffer[26];
+ voip_metric_.MOSCQ = buffer[27];
+ voip_metric_.RXconfig = buffer[28];
+ // reserved = buffer[29];
+ voip_metric_.JBnominal = ByteReader<uint16_t>::ReadBigEndian(&buffer[30]);
+ voip_metric_.JBmax = ByteReader<uint16_t>::ReadBigEndian(&buffer[32]);
+ voip_metric_.JBabsMax = ByteReader<uint16_t>::ReadBigEndian(&buffer[34]);
+}
+
+void VoipMetric::Create(uint8_t* buffer) const {
+ const uint8_t kReserved = 0;
+ buffer[0] = kBlockType;
+ buffer[1] = kReserved;
+ ByteWriter<uint16_t>::WriteBigEndian(&buffer[2], kBlockLength);
+ ByteWriter<uint32_t>::WriteBigEndian(&buffer[4], ssrc_);
+ buffer[8] = voip_metric_.lossRate;
+ buffer[9] = voip_metric_.discardRate;
+ buffer[10] = voip_metric_.burstDensity;
+ buffer[11] = voip_metric_.gapDensity;
+ ByteWriter<uint16_t>::WriteBigEndian(&buffer[12], voip_metric_.burstDuration);
+ ByteWriter<uint16_t>::WriteBigEndian(&buffer[14], voip_metric_.gapDuration);
+ ByteWriter<uint16_t>::WriteBigEndian(&buffer[16],
+ voip_metric_.roundTripDelay);
+ ByteWriter<uint16_t>::WriteBigEndian(&buffer[18],
+ voip_metric_.endSystemDelay);
+ buffer[20] = voip_metric_.signalLevel;
+ buffer[21] = voip_metric_.noiseLevel;
+ buffer[22] = voip_metric_.RERL;
+ buffer[23] = voip_metric_.Gmin;
+ buffer[24] = voip_metric_.Rfactor;
+ buffer[25] = voip_metric_.extRfactor;
+ buffer[26] = voip_metric_.MOSLQ;
+ buffer[27] = voip_metric_.MOSCQ;
+ buffer[28] = voip_metric_.RXconfig;
+ buffer[29] = kReserved;
+ ByteWriter<uint16_t>::WriteBigEndian(&buffer[30], voip_metric_.JBnominal);
+ ByteWriter<uint16_t>::WriteBigEndian(&buffer[32], voip_metric_.JBmax);
+ ByteWriter<uint16_t>::WriteBigEndian(&buffer[34], voip_metric_.JBabsMax);
+}
+
+} // namespace rtcp
+} // namespace webrtc
diff --git a/webrtc/modules/rtp_rtcp/source/rtcp_packet/voip_metric.h b/webrtc/modules/rtp_rtcp/source/rtcp_packet/voip_metric.h
new file mode 100644
index 0000000000..9e3e41995a
--- /dev/null
+++ b/webrtc/modules/rtp_rtcp/source/rtcp_packet/voip_metric.h
@@ -0,0 +1,53 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ *
+ */
+
+#ifndef WEBRTC_MODULES_RTP_RTCP_SOURCE_RTCP_PACKET_VOIP_METRIC_H_
+#define WEBRTC_MODULES_RTP_RTCP_SOURCE_RTCP_PACKET_VOIP_METRIC_H_
+
+#include "webrtc/base/basictypes.h"
+#include "webrtc/modules/include/module_common_types.h"
+
+namespace webrtc {
+namespace rtcp {
+
+class VoipMetric {
+ public:
+ static const uint8_t kBlockType = 7;
+ static const uint16_t kBlockLength = 8;
+ static const size_t kLength = 4 * (kBlockLength + 1); // 36
+ VoipMetric();
+ VoipMetric(const VoipMetric&) = default;
+ ~VoipMetric() {}
+
+ VoipMetric& operator=(const VoipMetric&) = default;
+
+ void Parse(const uint8_t* buffer);
+
+ // Fills buffer with the VoipMetric.
+ // Consumes VoipMetric::kLength bytes.
+ void Create(uint8_t* buffer) const;
+
+ void To(uint32_t ssrc) { ssrc_ = ssrc; }
+ void WithVoipMetric(const RTCPVoIPMetric& voip_metric) {
+ voip_metric_ = voip_metric;
+ }
+
+ uint32_t ssrc() const { return ssrc_; }
+ const RTCPVoIPMetric& voip_metric() const { return voip_metric_; }
+
+ private:
+ uint32_t ssrc_;
+ RTCPVoIPMetric voip_metric_;
+};
+
+} // namespace rtcp
+} // namespace webrtc
+#endif // WEBRTC_MODULES_RTP_RTCP_SOURCE_RTCP_PACKET_VOIP_METRIC_H_
diff --git a/webrtc/modules/rtp_rtcp/source/rtcp_packet/voip_metric_unittest.cc b/webrtc/modules/rtp_rtcp/source/rtcp_packet/voip_metric_unittest.cc
new file mode 100644
index 0000000000..44c82d67a9
--- /dev/null
+++ b/webrtc/modules/rtp_rtcp/source/rtcp_packet/voip_metric_unittest.cc
@@ -0,0 +1,93 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "webrtc/modules/rtp_rtcp/source/rtcp_packet/voip_metric.h"
+
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace webrtc {
+namespace rtcp {
+namespace {
+
+const uint32_t kRemoteSsrc = 0x23456789;
+const uint8_t kBlock[] = {0x07, 0x00, 0x00, 0x08, 0x23, 0x45, 0x67, 0x89,
+ 0x01, 0x02, 0x03, 0x04, 0x11, 0x12, 0x22, 0x23,
+ 0x33, 0x34, 0x44, 0x45, 0x05, 0x06, 0x07, 0x08,
+ 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x00, 0x55, 0x56,
+ 0x66, 0x67, 0x77, 0x78};
+const size_t kBlockSizeBytes = sizeof(kBlock);
+static_assert(
+ kBlockSizeBytes == VoipMetric::kLength,
+ "Size of manually created Voip Metric block should match class constant");
+
+TEST(RtcpPacketVoipMetricTest, Create) {
+ uint8_t buffer[VoipMetric::kLength];
+ RTCPVoIPMetric metric;
+ metric.lossRate = 1;
+ metric.discardRate = 2;
+ metric.burstDensity = 3;
+ metric.gapDensity = 4;
+ metric.burstDuration = 0x1112;
+ metric.gapDuration = 0x2223;
+ metric.roundTripDelay = 0x3334;
+ metric.endSystemDelay = 0x4445;
+ metric.signalLevel = 5;
+ metric.noiseLevel = 6;
+ metric.RERL = 7;
+ metric.Gmin = 8;
+ metric.Rfactor = 9;
+ metric.extRfactor = 10;
+ metric.MOSLQ = 11;
+ metric.MOSCQ = 12;
+ metric.RXconfig = 13;
+ metric.JBnominal = 0x5556;
+ metric.JBmax = 0x6667;
+ metric.JBabsMax = 0x7778;
+ VoipMetric metric_block;
+ metric_block.To(kRemoteSsrc);
+ metric_block.WithVoipMetric(metric);
+
+ metric_block.Create(buffer);
+ EXPECT_EQ(0, memcmp(buffer, kBlock, kBlockSizeBytes));
+}
+
+TEST(RtcpPacketVoipMetricTest, Parse) {
+ VoipMetric read_metric;
+ read_metric.Parse(kBlock);
+
+ // Run checks on const object to ensure all accessors have const modifier.
+ const VoipMetric& parsed = read_metric;
+
+ EXPECT_EQ(kRemoteSsrc, parsed.ssrc());
+ EXPECT_EQ(1, parsed.voip_metric().lossRate);
+ EXPECT_EQ(2, parsed.voip_metric().discardRate);
+ EXPECT_EQ(3, parsed.voip_metric().burstDensity);
+ EXPECT_EQ(4, parsed.voip_metric().gapDensity);
+ EXPECT_EQ(0x1112, parsed.voip_metric().burstDuration);
+ EXPECT_EQ(0x2223, parsed.voip_metric().gapDuration);
+ EXPECT_EQ(0x3334, parsed.voip_metric().roundTripDelay);
+ EXPECT_EQ(0x4445, parsed.voip_metric().endSystemDelay);
+ EXPECT_EQ(5, parsed.voip_metric().signalLevel);
+ EXPECT_EQ(6, parsed.voip_metric().noiseLevel);
+ EXPECT_EQ(7, parsed.voip_metric().RERL);
+ EXPECT_EQ(8, parsed.voip_metric().Gmin);
+ EXPECT_EQ(9, parsed.voip_metric().Rfactor);
+ EXPECT_EQ(10, parsed.voip_metric().extRfactor);
+ EXPECT_EQ(11, parsed.voip_metric().MOSLQ);
+ EXPECT_EQ(12, parsed.voip_metric().MOSCQ);
+ EXPECT_EQ(13, parsed.voip_metric().RXconfig);
+ EXPECT_EQ(0x5556, parsed.voip_metric().JBnominal);
+ EXPECT_EQ(0x6667, parsed.voip_metric().JBmax);
+ EXPECT_EQ(0x7778, parsed.voip_metric().JBabsMax);
+}
+
+} // namespace
+} // namespace rtcp
+} // namespace webrtc
diff --git a/webrtc/modules/rtp_rtcp/source/rtcp_packet_unittest.cc b/webrtc/modules/rtp_rtcp/source/rtcp_packet_unittest.cc
index 77520b633b..22f61f5cab 100644
--- a/webrtc/modules/rtp_rtcp/source/rtcp_packet_unittest.cc
+++ b/webrtc/modules/rtp_rtcp/source/rtcp_packet_unittest.cc
@@ -14,6 +14,9 @@
#include "testing/gtest/include/gtest/gtest.h"
#include "webrtc/modules/rtp_rtcp/source/rtcp_packet.h"
+#include "webrtc/modules/rtp_rtcp/source/rtcp_packet/app.h"
+#include "webrtc/modules/rtp_rtcp/source/rtcp_packet/bye.h"
+#include "webrtc/modules/rtp_rtcp/source/rtcp_packet/receiver_report.h"
#include "webrtc/test/rtcp_packet_parser.h"
using ::testing::ElementsAre;
@@ -21,23 +24,15 @@ using ::testing::ElementsAre;
using webrtc::rtcp::App;
using webrtc::rtcp::Bye;
using webrtc::rtcp::Dlrr;
-using webrtc::rtcp::Empty;
using webrtc::rtcp::Fir;
-using webrtc::rtcp::Ij;
-using webrtc::rtcp::Nack;
-using webrtc::rtcp::Pli;
-using webrtc::rtcp::Sdes;
-using webrtc::rtcp::SenderReport;
-using webrtc::rtcp::Sli;
using webrtc::rtcp::RawPacket;
using webrtc::rtcp::ReceiverReport;
using webrtc::rtcp::Remb;
using webrtc::rtcp::ReportBlock;
using webrtc::rtcp::Rpsi;
using webrtc::rtcp::Rrtr;
+using webrtc::rtcp::Sdes;
using webrtc::rtcp::SenderReport;
-using webrtc::rtcp::Tmmbn;
-using webrtc::rtcp::Tmmbr;
using webrtc::rtcp::VoipMetric;
using webrtc::rtcp::Xr;
using webrtc::test::RtcpPacketParser;
@@ -47,81 +42,6 @@ namespace webrtc {
const uint32_t kSenderSsrc = 0x12345678;
const uint32_t kRemoteSsrc = 0x23456789;
-TEST(RtcpPacketTest, Rr) {
- ReceiverReport rr;
- rr.From(kSenderSsrc);
-
- rtc::scoped_ptr<RawPacket> packet(rr.Build());
- RtcpPacketParser parser;
- parser.Parse(packet->Buffer(), packet->Length());
- EXPECT_EQ(1, parser.receiver_report()->num_packets());
- EXPECT_EQ(kSenderSsrc, parser.receiver_report()->Ssrc());
- EXPECT_EQ(0, parser.report_block()->num_packets());
-}
-
-TEST(RtcpPacketTest, RrWithOneReportBlock) {
- ReportBlock rb;
- rb.To(kRemoteSsrc);
- rb.WithFractionLost(55);
- rb.WithCumulativeLost(0x111111);
- rb.WithExtHighestSeqNum(0x22222222);
- rb.WithJitter(0x33333333);
- rb.WithLastSr(0x44444444);
- rb.WithDelayLastSr(0x55555555);
-
- ReceiverReport rr;
- rr.From(kSenderSsrc);
- EXPECT_TRUE(rr.WithReportBlock(rb));
-
- rtc::scoped_ptr<RawPacket> packet(rr.Build());
- RtcpPacketParser parser;
- parser.Parse(packet->Buffer(), packet->Length());
- EXPECT_EQ(1, parser.receiver_report()->num_packets());
- EXPECT_EQ(kSenderSsrc, parser.receiver_report()->Ssrc());
- EXPECT_EQ(1, parser.report_block()->num_packets());
- EXPECT_EQ(kRemoteSsrc, parser.report_block()->Ssrc());
- EXPECT_EQ(55U, parser.report_block()->FractionLost());
- EXPECT_EQ(0x111111U, parser.report_block()->CumPacketLost());
- EXPECT_EQ(0x22222222U, parser.report_block()->ExtHighestSeqNum());
- EXPECT_EQ(0x33333333U, parser.report_block()->Jitter());
- EXPECT_EQ(0x44444444U, parser.report_block()->LastSr());
- EXPECT_EQ(0x55555555U, parser.report_block()->DelayLastSr());
-}
-
-TEST(RtcpPacketTest, RrWithTwoReportBlocks) {
- ReportBlock rb1;
- rb1.To(kRemoteSsrc);
- ReportBlock rb2;
- rb2.To(kRemoteSsrc + 1);
-
- ReceiverReport rr;
- rr.From(kSenderSsrc);
- EXPECT_TRUE(rr.WithReportBlock(rb1));
- EXPECT_TRUE(rr.WithReportBlock(rb2));
-
- rtc::scoped_ptr<RawPacket> packet(rr.Build());
- RtcpPacketParser parser;
- parser.Parse(packet->Buffer(), packet->Length());
- EXPECT_EQ(1, parser.receiver_report()->num_packets());
- EXPECT_EQ(kSenderSsrc, parser.receiver_report()->Ssrc());
- EXPECT_EQ(2, parser.report_block()->num_packets());
- EXPECT_EQ(1, parser.report_blocks_per_ssrc(kRemoteSsrc));
- EXPECT_EQ(1, parser.report_blocks_per_ssrc(kRemoteSsrc + 1));
-}
-
-TEST(RtcpPacketTest, RrWithTooManyReportBlocks) {
- ReceiverReport rr;
- rr.From(kSenderSsrc);
- const int kMaxReportBlocks = (1 << 5) - 1;
- ReportBlock rb;
- for (int i = 0; i < kMaxReportBlocks; ++i) {
- rb.To(kRemoteSsrc + i);
- EXPECT_TRUE(rr.WithReportBlock(rb));
- }
- rb.To(kRemoteSsrc + kMaxReportBlocks);
- EXPECT_FALSE(rr.WithReportBlock(rb));
-}
-
TEST(RtcpPacketTest, Sr) {
SenderReport sr;
sr.From(kSenderSsrc);
@@ -196,50 +116,6 @@ TEST(RtcpPacketTest, SrWithTooManyReportBlocks) {
EXPECT_FALSE(sr.WithReportBlock(rb));
}
-TEST(RtcpPacketTest, IjNoItem) {
- Ij ij;
-
- rtc::scoped_ptr<RawPacket> packet(ij.Build());
- RtcpPacketParser parser;
- parser.Parse(packet->Buffer(), packet->Length());
- EXPECT_EQ(1, parser.ij()->num_packets());
- EXPECT_EQ(0, parser.ij_item()->num_packets());
-}
-
-TEST(RtcpPacketTest, IjOneItem) {
- Ij ij;
- EXPECT_TRUE(ij.WithJitterItem(0x11111111));
-
- rtc::scoped_ptr<RawPacket> packet(ij.Build());
- RtcpPacketParser parser;
- parser.Parse(packet->Buffer(), packet->Length());
- EXPECT_EQ(1, parser.ij()->num_packets());
- EXPECT_EQ(1, parser.ij_item()->num_packets());
- EXPECT_EQ(0x11111111U, parser.ij_item()->Jitter());
-}
-
-TEST(RtcpPacketTest, IjTwoItems) {
- Ij ij;
- EXPECT_TRUE(ij.WithJitterItem(0x11111111));
- EXPECT_TRUE(ij.WithJitterItem(0x22222222));
-
- rtc::scoped_ptr<RawPacket> packet(ij.Build());
- RtcpPacketParser parser;
- parser.Parse(packet->Buffer(), packet->Length());
- EXPECT_EQ(1, parser.ij()->num_packets());
- EXPECT_EQ(2, parser.ij_item()->num_packets());
- EXPECT_EQ(0x22222222U, parser.ij_item()->Jitter());
-}
-
-TEST(RtcpPacketTest, IjTooManyItems) {
- Ij ij;
- const int kMaxIjItems = (1 << 5) - 1;
- for (int i = 0; i < kMaxIjItems; ++i) {
- EXPECT_TRUE(ij.WithJitterItem(i));
- }
- EXPECT_FALSE(ij.WithJitterItem(kMaxIjItems));
-}
-
TEST(RtcpPacketTest, AppWithNoData) {
App app;
app.WithSubType(30);
@@ -339,140 +215,6 @@ TEST(RtcpPacketTest, CnameItemWithEmptyString) {
EXPECT_EQ("", parser.sdes_chunk()->Cname());
}
-TEST(RtcpPacketTest, Pli) {
- Pli pli;
- pli.From(kSenderSsrc);
- pli.To(kRemoteSsrc);
-
- rtc::scoped_ptr<RawPacket> packet(pli.Build());
- RtcpPacketParser parser;
- parser.Parse(packet->Buffer(), packet->Length());
- EXPECT_EQ(1, parser.pli()->num_packets());
- EXPECT_EQ(kSenderSsrc, parser.pli()->Ssrc());
- EXPECT_EQ(kRemoteSsrc, parser.pli()->MediaSsrc());
-}
-
-TEST(RtcpPacketTest, Sli) {
- const uint16_t kFirstMb = 7777;
- const uint16_t kNumberOfMb = 6666;
- const uint8_t kPictureId = 60;
- Sli sli;
- sli.From(kSenderSsrc);
- sli.To(kRemoteSsrc);
- sli.WithFirstMb(kFirstMb);
- sli.WithNumberOfMb(kNumberOfMb);
- sli.WithPictureId(kPictureId);
-
- rtc::scoped_ptr<RawPacket> packet(sli.Build());
- RtcpPacketParser parser;
- parser.Parse(packet->Buffer(), packet->Length());
- EXPECT_EQ(1, parser.sli()->num_packets());
- EXPECT_EQ(kSenderSsrc, parser.sli()->Ssrc());
- EXPECT_EQ(kRemoteSsrc, parser.sli()->MediaSsrc());
- EXPECT_EQ(1, parser.sli_item()->num_packets());
- EXPECT_EQ(kFirstMb, parser.sli_item()->FirstMb());
- EXPECT_EQ(kNumberOfMb, parser.sli_item()->NumberOfMb());
- EXPECT_EQ(kPictureId, parser.sli_item()->PictureId());
-}
-
-TEST(RtcpPacketTest, Nack) {
- Nack nack;
- const uint16_t kList[] = {0, 1, 3, 8, 16};
- const uint16_t kListLength = sizeof(kList) / sizeof(kList[0]);
- nack.From(kSenderSsrc);
- nack.To(kRemoteSsrc);
- nack.WithList(kList, kListLength);
- rtc::scoped_ptr<RawPacket> packet(nack.Build());
- RtcpPacketParser parser;
- parser.Parse(packet->Buffer(), packet->Length());
- EXPECT_EQ(1, parser.nack()->num_packets());
- EXPECT_EQ(kSenderSsrc, parser.nack()->Ssrc());
- EXPECT_EQ(kRemoteSsrc, parser.nack()->MediaSsrc());
- EXPECT_EQ(1, parser.nack_item()->num_packets());
- std::vector<uint16_t> seqs = parser.nack_item()->last_nack_list();
- EXPECT_EQ(kListLength, seqs.size());
- for (size_t i = 0; i < kListLength; ++i) {
- EXPECT_EQ(kList[i], seqs[i]);
- }
-}
-
-TEST(RtcpPacketTest, NackWithWrap) {
- Nack nack;
- const uint16_t kList[] = {65500, 65516, 65534, 65535, 0, 1, 3, 20, 100};
- const uint16_t kListLength = sizeof(kList) / sizeof(kList[0]);
- nack.From(kSenderSsrc);
- nack.To(kRemoteSsrc);
- nack.WithList(kList, kListLength);
- rtc::scoped_ptr<RawPacket> packet(nack.Build());
- RtcpPacketParser parser;
- parser.Parse(packet->Buffer(), packet->Length());
- EXPECT_EQ(1, parser.nack()->num_packets());
- EXPECT_EQ(kSenderSsrc, parser.nack()->Ssrc());
- EXPECT_EQ(kRemoteSsrc, parser.nack()->MediaSsrc());
- EXPECT_EQ(4, parser.nack_item()->num_packets());
- std::vector<uint16_t> seqs = parser.nack_item()->last_nack_list();
- EXPECT_EQ(kListLength, seqs.size());
- for (size_t i = 0; i < kListLength; ++i) {
- EXPECT_EQ(kList[i], seqs[i]);
- }
-}
-
-TEST(RtcpPacketTest, NackFragmented) {
- Nack nack;
- const uint16_t kList[] = {1, 100, 200, 300, 400};
- const uint16_t kListLength = sizeof(kList) / sizeof(kList[0]);
- nack.From(kSenderSsrc);
- nack.To(kRemoteSsrc);
- nack.WithList(kList, kListLength);
-
- class Verifier : public rtcp::RtcpPacket::PacketReadyCallback {
- public:
- void OnPacketReady(uint8_t* data, size_t length) override {
- ++packets_created_;
- RtcpPacketParser parser;
- parser.Parse(data, length);
- EXPECT_EQ(1, parser.nack()->num_packets());
- EXPECT_EQ(kSenderSsrc, parser.nack()->Ssrc());
- EXPECT_EQ(kRemoteSsrc, parser.nack()->MediaSsrc());
- switch (packets_created_) {
- case 1:
- EXPECT_THAT(parser.nack_item()->last_nack_list(),
- ElementsAre(1, 100, 200));
- break;
- case 2:
- EXPECT_THAT(parser.nack_item()->last_nack_list(),
- ElementsAre(300, 400));
- break;
- default:
- ADD_FAILURE() << "Unexpected packet count: " << packets_created_;
- }
- }
- int packets_created_ = 0;
- } verifier;
- const size_t kBufferSize = 12 + (3 * 4); // Fits common header + 3 nack items
- uint8_t buffer[kBufferSize];
- EXPECT_TRUE(nack.BuildExternalBuffer(buffer, kBufferSize, &verifier));
- EXPECT_EQ(2, verifier.packets_created_);
-}
-
-TEST(RtcpPacketTest, NackWithTooSmallBuffer) {
- const uint16_t kList[] = {1};
- const size_t kMinNackBlockSize = 16;
- Nack nack;
- nack.From(kSenderSsrc);
- nack.To(kRemoteSsrc);
- nack.WithList(kList, 1);
- class Verifier : public rtcp::RtcpPacket::PacketReadyCallback {
- public:
- void OnPacketReady(uint8_t* data, size_t length) override {
- ADD_FAILURE() << "Buffer should be too small.";
- }
- } verifier;
- uint8_t buffer[kMinNackBlockSize - 1];
- EXPECT_FALSE(
- nack.BuildExternalBuffer(buffer, kMinNackBlockSize - 1, &verifier));
-}
-
TEST(RtcpPacketTest, Rpsi) {
Rpsi rpsi;
// 1000001 (7 bits = 1 byte in native string).
@@ -562,127 +304,6 @@ TEST(RtcpPacketTest, Fir) {
EXPECT_EQ(123U, parser.fir_item()->SeqNum());
}
-TEST(RtcpPacketTest, AppendPacket) {
- Fir fir;
- ReportBlock rb;
- ReceiverReport rr;
- rr.From(kSenderSsrc);
- EXPECT_TRUE(rr.WithReportBlock(rb));
- rr.Append(&fir);
-
- rtc::scoped_ptr<RawPacket> packet(rr.Build());
- RtcpPacketParser parser;
- parser.Parse(packet->Buffer(), packet->Length());
- EXPECT_EQ(1, parser.receiver_report()->num_packets());
- EXPECT_EQ(kSenderSsrc, parser.receiver_report()->Ssrc());
- EXPECT_EQ(1, parser.report_block()->num_packets());
- EXPECT_EQ(1, parser.fir()->num_packets());
-}
-
-TEST(RtcpPacketTest, AppendPacketOnEmpty) {
- Empty empty;
- ReceiverReport rr;
- rr.From(kSenderSsrc);
- empty.Append(&rr);
-
- rtc::scoped_ptr<RawPacket> packet(empty.Build());
- RtcpPacketParser parser;
- parser.Parse(packet->Buffer(), packet->Length());
- EXPECT_EQ(1, parser.receiver_report()->num_packets());
- EXPECT_EQ(0, parser.report_block()->num_packets());
-}
-
-TEST(RtcpPacketTest, AppendPacketWithOwnAppendedPacket) {
- Fir fir;
- Bye bye;
- ReportBlock rb;
-
- ReceiverReport rr;
- EXPECT_TRUE(rr.WithReportBlock(rb));
- rr.Append(&fir);
-
- SenderReport sr;
- sr.Append(&bye);
- sr.Append(&rr);
-
- rtc::scoped_ptr<RawPacket> packet(sr.Build());
- RtcpPacketParser parser;
- parser.Parse(packet->Buffer(), packet->Length());
- EXPECT_EQ(1, parser.sender_report()->num_packets());
- EXPECT_EQ(1, parser.receiver_report()->num_packets());
- EXPECT_EQ(1, parser.report_block()->num_packets());
- EXPECT_EQ(1, parser.bye()->num_packets());
- EXPECT_EQ(1, parser.fir()->num_packets());
-}
-
-TEST(RtcpPacketTest, Bye) {
- Bye bye;
- bye.From(kSenderSsrc);
-
- rtc::scoped_ptr<RawPacket> packet(bye.Build());
- RtcpPacketParser parser;
- parser.Parse(packet->Buffer(), packet->Length());
- EXPECT_EQ(1, parser.bye()->num_packets());
- EXPECT_EQ(kSenderSsrc, parser.bye()->Ssrc());
-}
-
-TEST(RtcpPacketTest, ByeWithCsrcs) {
- Fir fir;
- Bye bye;
- bye.From(kSenderSsrc);
- EXPECT_TRUE(bye.WithCsrc(0x22222222));
- EXPECT_TRUE(bye.WithCsrc(0x33333333));
- bye.Append(&fir);
-
- rtc::scoped_ptr<RawPacket> packet(bye.Build());
- RtcpPacketParser parser;
- parser.Parse(packet->Buffer(), packet->Length());
- EXPECT_EQ(1, parser.bye()->num_packets());
- EXPECT_EQ(kSenderSsrc, parser.bye()->Ssrc());
- EXPECT_EQ(1, parser.fir()->num_packets());
-}
-
-TEST(RtcpPacketTest, ByeWithTooManyCsrcs) {
- Bye bye;
- bye.From(kSenderSsrc);
- const int kMaxCsrcs = (1 << 5) - 2; // 5 bit len, first item is sender SSRC.
- for (int i = 0; i < kMaxCsrcs; ++i) {
- EXPECT_TRUE(bye.WithCsrc(i));
- }
- EXPECT_FALSE(bye.WithCsrc(kMaxCsrcs));
-}
-
-TEST(RtcpPacketTest, BuildWithInputBuffer) {
- Fir fir;
- ReportBlock rb;
- ReceiverReport rr;
- rr.From(kSenderSsrc);
- EXPECT_TRUE(rr.WithReportBlock(rb));
- rr.Append(&fir);
-
- const size_t kRrLength = 8;
- const size_t kReportBlockLength = 24;
- const size_t kFirLength = 20;
-
- class Verifier : public rtcp::RtcpPacket::PacketReadyCallback {
- public:
- void OnPacketReady(uint8_t* data, size_t length) override {
- RtcpPacketParser parser;
- parser.Parse(data, length);
- EXPECT_EQ(1, parser.receiver_report()->num_packets());
- EXPECT_EQ(1, parser.report_block()->num_packets());
- EXPECT_EQ(1, parser.fir()->num_packets());
- ++packets_created_;
- }
-
- int packets_created_ = 0;
- } verifier;
- const size_t kBufferSize = kRrLength + kReportBlockLength + kFirLength;
- uint8_t buffer[kBufferSize];
- EXPECT_TRUE(rr.BuildExternalBuffer(buffer, kBufferSize, &verifier));
- EXPECT_EQ(1, verifier.packets_created_);
-}
-
TEST(RtcpPacketTest, BuildWithTooSmallBuffer) {
ReportBlock rb;
ReceiverReport rr;
@@ -703,47 +324,6 @@ TEST(RtcpPacketTest, BuildWithTooSmallBuffer) {
EXPECT_FALSE(rr.BuildExternalBuffer(buffer, kBufferSize, &verifier));
}
-TEST(RtcpPacketTest, BuildWithTooSmallBuffer_FragmentedSend) {
- Fir fir;
- ReportBlock rb;
- ReceiverReport rr;
- rr.From(kSenderSsrc);
- EXPECT_TRUE(rr.WithReportBlock(rb));
- rr.Append(&fir);
-
- const size_t kRrLength = 8;
- const size_t kReportBlockLength = 24;
-
- class Verifier : public rtcp::RtcpPacket::PacketReadyCallback {
- public:
- void OnPacketReady(uint8_t* data, size_t length) override {
- RtcpPacketParser parser;
- parser.Parse(data, length);
- switch (packets_created_++) {
- case 0:
- EXPECT_EQ(1, parser.receiver_report()->num_packets());
- EXPECT_EQ(1, parser.report_block()->num_packets());
- EXPECT_EQ(0, parser.fir()->num_packets());
- break;
- case 1:
- EXPECT_EQ(0, parser.receiver_report()->num_packets());
- EXPECT_EQ(0, parser.report_block()->num_packets());
- EXPECT_EQ(1, parser.fir()->num_packets());
- break;
- default:
- ADD_FAILURE() << "OnPacketReady not expected to be called "
- << packets_created_ << " times.";
- }
- }
-
- int packets_created_ = 0;
- } verifier;
- const size_t kBufferSize = kRrLength + kReportBlockLength;
- uint8_t buffer[kBufferSize];
- EXPECT_TRUE(rr.BuildExternalBuffer(buffer, kBufferSize, &verifier));
- EXPECT_EQ(2, verifier.packets_created_);
-}
-
TEST(RtcpPacketTest, Remb) {
Remb remb;
remb.From(kSenderSsrc);
@@ -765,81 +345,6 @@ TEST(RtcpPacketTest, Remb) {
EXPECT_EQ(kRemoteSsrc + 2, ssrcs[2]);
}
-TEST(RtcpPacketTest, Tmmbr) {
- Tmmbr tmmbr;
- tmmbr.From(kSenderSsrc);
- tmmbr.To(kRemoteSsrc);
- tmmbr.WithBitrateKbps(312);
- tmmbr.WithOverhead(60);
-
- rtc::scoped_ptr<RawPacket> packet(tmmbr.Build());
- RtcpPacketParser parser;
- parser.Parse(packet->Buffer(), packet->Length());
- EXPECT_EQ(1, parser.tmmbr()->num_packets());
- EXPECT_EQ(kSenderSsrc, parser.tmmbr()->Ssrc());
- EXPECT_EQ(1, parser.tmmbr_item()->num_packets());
- EXPECT_EQ(312U, parser.tmmbr_item()->BitrateKbps());
- EXPECT_EQ(60U, parser.tmmbr_item()->Overhead());
-}
-
-TEST(RtcpPacketTest, TmmbnWithNoItem) {
- Tmmbn tmmbn;
- tmmbn.From(kSenderSsrc);
-
- rtc::scoped_ptr<RawPacket> packet(tmmbn.Build());
- RtcpPacketParser parser;
- parser.Parse(packet->Buffer(), packet->Length());
- EXPECT_EQ(1, parser.tmmbn()->num_packets());
- EXPECT_EQ(kSenderSsrc, parser.tmmbn()->Ssrc());
- EXPECT_EQ(0, parser.tmmbn_items()->num_packets());
-}
-
-TEST(RtcpPacketTest, TmmbnWithOneItem) {
- Tmmbn tmmbn;
- tmmbn.From(kSenderSsrc);
- EXPECT_TRUE(tmmbn.WithTmmbr(kRemoteSsrc, 312, 60));
-
- rtc::scoped_ptr<RawPacket> packet(tmmbn.Build());
- RtcpPacketParser parser;
- parser.Parse(packet->Buffer(), packet->Length());
- EXPECT_EQ(1, parser.tmmbn()->num_packets());
- EXPECT_EQ(kSenderSsrc, parser.tmmbn()->Ssrc());
- EXPECT_EQ(1, parser.tmmbn_items()->num_packets());
- EXPECT_EQ(kRemoteSsrc, parser.tmmbn_items()->Ssrc(0));
- EXPECT_EQ(312U, parser.tmmbn_items()->BitrateKbps(0));
- EXPECT_EQ(60U, parser.tmmbn_items()->Overhead(0));
-}
-
-TEST(RtcpPacketTest, TmmbnWithTwoItems) {
- Tmmbn tmmbn;
- tmmbn.From(kSenderSsrc);
- EXPECT_TRUE(tmmbn.WithTmmbr(kRemoteSsrc, 312, 60));
- EXPECT_TRUE(tmmbn.WithTmmbr(kRemoteSsrc + 1, 1288, 40));
-
- rtc::scoped_ptr<RawPacket> packet(tmmbn.Build());
- RtcpPacketParser parser;
- parser.Parse(packet->Buffer(), packet->Length());
- EXPECT_EQ(1, parser.tmmbn()->num_packets());
- EXPECT_EQ(kSenderSsrc, parser.tmmbn()->Ssrc());
- EXPECT_EQ(2, parser.tmmbn_items()->num_packets());
- EXPECT_EQ(kRemoteSsrc, parser.tmmbn_items()->Ssrc(0));
- EXPECT_EQ(312U, parser.tmmbn_items()->BitrateKbps(0));
- EXPECT_EQ(60U, parser.tmmbn_items()->Overhead(0));
- EXPECT_EQ(kRemoteSsrc + 1, parser.tmmbn_items()->Ssrc(1));
- EXPECT_EQ(1288U, parser.tmmbn_items()->BitrateKbps(1));
- EXPECT_EQ(40U, parser.tmmbn_items()->Overhead(1));
-}
-
-TEST(RtcpPacketTest, TmmbnWithTooManyItems) {
- Tmmbn tmmbn;
- tmmbn.From(kSenderSsrc);
- const int kMaxTmmbrItems = 50;
- for (int i = 0; i < kMaxTmmbrItems; ++i)
- EXPECT_TRUE(tmmbn.WithTmmbr(kRemoteSsrc + i, 312, 60));
-
- EXPECT_FALSE(tmmbn.WithTmmbr(kRemoteSsrc + kMaxTmmbrItems, 312, 60));
-}
-
TEST(RtcpPacketTest, XrWithNoReportBlocks) {
Xr xr;
xr.From(kSenderSsrc);
@@ -853,8 +358,7 @@ TEST(RtcpPacketTest, XrWithNoReportBlocks) {
TEST(RtcpPacketTest, XrWithRrtr) {
Rrtr rrtr;
- rrtr.WithNtpSec(0x11111111);
- rrtr.WithNtpFrac(0x22222222);
+ rrtr.WithNtp(NtpTime(0x11111111, 0x22222222));
Xr xr;
xr.From(kSenderSsrc);
EXPECT_TRUE(xr.WithRrtr(&rrtr));
@@ -871,11 +375,9 @@ TEST(RtcpPacketTest, XrWithRrtr) {
TEST(RtcpPacketTest, XrWithTwoRrtrBlocks) {
Rrtr rrtr1;
- rrtr1.WithNtpSec(0x11111111);
- rrtr1.WithNtpFrac(0x22222222);
+ rrtr1.WithNtp(NtpTime(0x11111111, 0x22222222));
Rrtr rrtr2;
- rrtr2.WithNtpSec(0x33333333);
- rrtr2.WithNtpFrac(0x44444444);
+ rrtr2.WithNtp(NtpTime(0x33333333, 0x44444444));
Xr xr;
xr.From(kSenderSsrc);
EXPECT_TRUE(xr.WithRrtr(&rrtr1));
@@ -967,32 +469,33 @@ TEST(RtcpPacketTest, XrWithTwoDlrrBlocks) {
}
TEST(RtcpPacketTest, XrWithVoipMetric) {
- VoipMetric metric;
- metric.To(kRemoteSsrc);
- metric.LossRate(1);
- metric.DiscardRate(2);
- metric.BurstDensity(3);
- metric.GapDensity(4);
- metric.BurstDuration(0x1111);
- metric.GapDuration(0x2222);
- metric.RoundTripDelay(0x3333);
- metric.EndSystemDelay(0x4444);
- metric.SignalLevel(5);
- metric.NoiseLevel(6);
- metric.Rerl(7);
- metric.Gmin(8);
- metric.Rfactor(9);
- metric.ExtRfactor(10);
- metric.MosLq(11);
- metric.MosCq(12);
- metric.RxConfig(13);
- metric.JbNominal(0x5555);
- metric.JbMax(0x6666);
- metric.JbAbsMax(0x7777);
-
+ RTCPVoIPMetric metric;
+ metric.lossRate = 1;
+ metric.discardRate = 2;
+ metric.burstDensity = 3;
+ metric.gapDensity = 4;
+ metric.burstDuration = 0x1111;
+ metric.gapDuration = 0x2222;
+ metric.roundTripDelay = 0x3333;
+ metric.endSystemDelay = 0x4444;
+ metric.signalLevel = 5;
+ metric.noiseLevel = 6;
+ metric.RERL = 7;
+ metric.Gmin = 8;
+ metric.Rfactor = 9;
+ metric.extRfactor = 10;
+ metric.MOSLQ = 11;
+ metric.MOSCQ = 12;
+ metric.RXconfig = 13;
+ metric.JBnominal = 0x5555;
+ metric.JBmax = 0x6666;
+ metric.JBabsMax = 0x7777;
+ VoipMetric metric_block;
+ metric_block.To(kRemoteSsrc);
+ metric_block.WithVoipMetric(metric);
Xr xr;
xr.From(kSenderSsrc);
- EXPECT_TRUE(xr.WithVoipMetric(&metric));
+ EXPECT_TRUE(xr.WithVoipMetric(&metric_block));
rtc::scoped_ptr<RawPacket> packet(xr.Build());
RtcpPacketParser parser;
diff --git a/webrtc/modules/rtp_rtcp/source/rtcp_receiver.cc b/webrtc/modules/rtp_rtcp/source/rtcp_receiver.cc
index b914838109..d65b04c8ab 100644
--- a/webrtc/modules/rtp_rtcp/source/rtcp_receiver.cc
+++ b/webrtc/modules/rtp_rtcp/source/rtcp_receiver.cc
@@ -23,8 +23,13 @@
#include "webrtc/modules/rtp_rtcp/source/rtp_rtcp_impl.h"
namespace webrtc {
-using namespace RTCPUtility;
-using namespace RTCPHelp;
+using RTCPHelp::RTCPPacketInformation;
+using RTCPHelp::RTCPReceiveInformation;
+using RTCPHelp::RTCPReportBlockInformation;
+using RTCPUtility::kBtVoipMetric;
+using RTCPUtility::RTCPCnameInformation;
+using RTCPUtility::RTCPPacketReportBlockItem;
+using RTCPUtility::RTCPPacketTypes;
// The number of RTCP time intervals needed to trigger a timeout.
const int kRrTimeoutIntervals = 3;
@@ -741,7 +746,7 @@ bool RTCPReceiver::UpdateRTCPReceiveInformationTimers() {
return updateBoundingSet;
}
-int32_t RTCPReceiver::BoundingSet(bool &tmmbrOwner, TMMBRSet* boundingSetRec) {
+int32_t RTCPReceiver::BoundingSet(bool* tmmbrOwner, TMMBRSet* boundingSetRec) {
CriticalSectionScoped lock(_criticalSectionRTCPReceiver);
std::map<uint32_t, RTCPReceiveInformation*>::iterator receiveInfoIt =
@@ -761,7 +766,7 @@ int32_t RTCPReceiver::BoundingSet(bool &tmmbrOwner, TMMBRSet* boundingSetRec) {
i++) {
if(receiveInfo->TmmbnBoundingSet.Ssrc(i) == main_ssrc_) {
// owner of bounding set
- tmmbrOwner = true;
+ *tmmbrOwner = true;
}
boundingSetRec->SetEntry(i,
receiveInfo->TmmbnBoundingSet.Tmmbr(i),
diff --git a/webrtc/modules/rtp_rtcp/source/rtcp_receiver.h b/webrtc/modules/rtp_rtcp/source/rtcp_receiver.h
index 272397675b..24861bd49e 100644
--- a/webrtc/modules/rtp_rtcp/source/rtcp_receiver.h
+++ b/webrtc/modules/rtp_rtcp/source/rtcp_receiver.h
@@ -12,11 +12,11 @@
#define WEBRTC_MODULES_RTP_RTCP_SOURCE_RTCP_RECEIVER_H_
#include <map>
-#include <vector>
#include <set>
+#include <vector>
#include "webrtc/base/thread_annotations.h"
-#include "webrtc/modules/rtp_rtcp/interface/rtp_rtcp_defines.h"
+#include "webrtc/modules/rtp_rtcp/include/rtp_rtcp_defines.h"
#include "webrtc/modules/rtp_rtcp/source/rtcp_receiver_help.h"
#include "webrtc/modules/rtp_rtcp/source/rtcp_utility.h"
#include "webrtc/modules/rtp_rtcp/source/rtp_utility.h"
@@ -103,7 +103,7 @@ public:
bool UpdateRTCPReceiveInformationTimers();
- int32_t BoundingSet(bool &tmmbrOwner, TMMBRSet* boundingSetRec);
+ int32_t BoundingSet(bool* tmmbrOwner, TMMBRSet* boundingSetRec);
int32_t UpdateTMMBR();
diff --git a/webrtc/modules/rtp_rtcp/source/rtcp_receiver_help.cc b/webrtc/modules/rtp_rtcp/source/rtcp_receiver_help.cc
index 718990d10b..a5c0e28282 100644
--- a/webrtc/modules/rtp_rtcp/source/rtcp_receiver_help.cc
+++ b/webrtc/modules/rtp_rtcp/source/rtcp_receiver_help.cc
@@ -17,7 +17,7 @@
#include "webrtc/modules/rtp_rtcp/source/rtp_utility.h"
namespace webrtc {
-using namespace RTCPHelp;
+namespace RTCPHelp {
RTCPPacketInformation::RTCPPacketInformation()
: rtcpPacketTypeFlags(0),
@@ -190,4 +190,5 @@ void RTCPReceiveInformation::VerifyAndAllocateBoundingSet(
const uint32_t minimumSize) {
TmmbnBoundingSet.VerifyAndAllocateSet(minimumSize);
}
+} // namespace RTCPHelp
} // namespace webrtc
diff --git a/webrtc/modules/rtp_rtcp/source/rtcp_receiver_help.h b/webrtc/modules/rtp_rtcp/source/rtcp_receiver_help.h
index 37b7b88370..a792841962 100644
--- a/webrtc/modules/rtp_rtcp/source/rtcp_receiver_help.h
+++ b/webrtc/modules/rtp_rtcp/source/rtcp_receiver_help.h
@@ -11,10 +11,12 @@
#ifndef WEBRTC_MODULES_RTP_RTCP_SOURCE_RTCP_RECEIVER_HELP_H_
#define WEBRTC_MODULES_RTP_RTCP_SOURCE_RTCP_RECEIVER_HELP_H_
+#include <list>
+#include <vector>
#include "webrtc/base/constructormagic.h"
#include "webrtc/base/scoped_ptr.h"
-#include "webrtc/modules/rtp_rtcp/interface/rtp_rtcp_defines.h" // RTCPReportBlock
+#include "webrtc/modules/rtp_rtcp/include/rtp_rtcp_defines.h" // RTCPReportBlock
#include "webrtc/modules/rtp_rtcp/source/rtcp_utility.h"
#include "webrtc/modules/rtp_rtcp/source/tmmbr_help.h"
#include "webrtc/typedefs.h"
diff --git a/webrtc/modules/rtp_rtcp/source/rtcp_receiver_unittest.cc b/webrtc/modules/rtp_rtcp/source/rtcp_receiver_unittest.cc
index 1581845476..5d2fda347e 100644
--- a/webrtc/modules/rtp_rtcp/source/rtcp_receiver_unittest.cc
+++ b/webrtc/modules/rtp_rtcp/source/rtcp_receiver_unittest.cc
@@ -15,17 +15,23 @@
#include "testing/gmock/include/gmock/gmock.h"
#include "testing/gtest/include/gtest/gtest.h"
-// Note: This file has no directory. Lint warning must be ignored.
#include "webrtc/common_types.h"
#include "webrtc/modules/remote_bitrate_estimator/include/mock/mock_remote_bitrate_observer.h"
#include "webrtc/modules/remote_bitrate_estimator/remote_bitrate_estimator_single_stream.h"
#include "webrtc/modules/rtp_rtcp/source/byte_io.h"
#include "webrtc/modules/rtp_rtcp/source/rtcp_packet.h"
+#include "webrtc/modules/rtp_rtcp/source/rtcp_packet/app.h"
+#include "webrtc/modules/rtp_rtcp/source/rtcp_packet/bye.h"
+#include "webrtc/modules/rtp_rtcp/source/rtcp_packet/extended_jitter_report.h"
+#include "webrtc/modules/rtp_rtcp/source/rtcp_packet/pli.h"
+#include "webrtc/modules/rtp_rtcp/source/rtcp_packet/receiver_report.h"
+#include "webrtc/modules/rtp_rtcp/source/rtcp_packet/sli.h"
+#include "webrtc/modules/rtp_rtcp/source/rtcp_packet/tmmbr.h"
+#include "webrtc/modules/rtp_rtcp/source/rtcp_packet/transport_feedback.h"
#include "webrtc/modules/rtp_rtcp/source/rtcp_receiver.h"
#include "webrtc/modules/rtp_rtcp/source/rtcp_sender.h"
#include "webrtc/modules/rtp_rtcp/source/rtp_rtcp_impl.h"
#include "webrtc/modules/rtp_rtcp/source/rtp_utility.h"
-#include "webrtc/modules/rtp_rtcp/source/rtcp_packet/transport_feedback.h"
namespace webrtc {
@@ -384,20 +390,20 @@ TEST_F(RtcpReceiverTest, GetRtt) {
}
TEST_F(RtcpReceiverTest, InjectIjWithNoItem) {
- rtcp::Ij ij;
+ rtcp::ExtendedJitterReport ij;
rtc::scoped_ptr<rtcp::RawPacket> packet(ij.Build());
EXPECT_EQ(0, InjectRtcpPacket(packet->Buffer(), packet->Length()));
EXPECT_EQ(0U, rtcp_packet_info_.rtcpPacketTypeFlags);
}
TEST_F(RtcpReceiverTest, InjectIjWithOneItem) {
- rtcp::Ij ij;
- ij.WithJitterItem(0x11111111);
+ rtcp::ExtendedJitterReport ij;
+ ij.WithJitter(0x11213141);
rtc::scoped_ptr<rtcp::RawPacket> packet(ij.Build());
EXPECT_EQ(0, InjectRtcpPacket(packet->Buffer(), packet->Length()));
EXPECT_EQ(kRtcpTransmissionTimeOffset, rtcp_packet_info_.rtcpPacketTypeFlags);
- EXPECT_EQ(0x11111111U, rtcp_packet_info_.interArrivalJitter);
+ EXPECT_EQ(0x11213141U, rtcp_packet_info_.interArrivalJitter);
}
TEST_F(RtcpReceiverTest, InjectAppWithNoData) {
@@ -586,7 +592,9 @@ TEST_F(RtcpReceiverTest, InjectXrVoipPacket) {
const uint8_t kLossRate = 123;
rtcp::VoipMetric voip_metric;
voip_metric.To(kSourceSsrc);
- voip_metric.LossRate(kLossRate);
+ RTCPVoIPMetric metric;
+ metric.lossRate = kLossRate;
+ voip_metric.WithVoipMetric(metric);
rtcp::Xr xr;
xr.From(0x2345);
xr.WithVoipMetric(&voip_metric);
@@ -615,8 +623,7 @@ TEST_F(RtcpReceiverTest, XrVoipPacketNotToUsIgnored) {
TEST_F(RtcpReceiverTest, InjectXrReceiverReferenceTimePacket) {
rtcp::Rrtr rrtr;
- rrtr.WithNtpSec(0x10203);
- rrtr.WithNtpFrac(0x40506);
+ rrtr.WithNtp(NtpTime(0x10203, 0x40506));
rtcp::Xr xr;
xr.From(0x2345);
xr.WithRrtr(&rrtr);
@@ -751,13 +758,12 @@ TEST_F(RtcpReceiverTest, LastReceivedXrReferenceTimeInfoInitiallyFalse) {
TEST_F(RtcpReceiverTest, GetLastReceivedXrReferenceTimeInfo) {
const uint32_t kSenderSsrc = 0x123456;
- const uint32_t kNtpSec = 0x10203;
- const uint32_t kNtpFrac = 0x40506;
- const uint32_t kNtpMid = RTCPUtility::MidNtp(kNtpSec, kNtpFrac);
+ const NtpTime kNtp(0x10203, 0x40506);
+ const uint32_t kNtpMid =
+ RTCPUtility::MidNtp(kNtp.seconds(), kNtp.fractions());
rtcp::Rrtr rrtr;
- rrtr.WithNtpSec(kNtpSec);
- rrtr.WithNtpFrac(kNtpFrac);
+ rrtr.WithNtp(kNtp);
rtcp::Xr xr;
xr.From(kSenderSsrc);
xr.WithRrtr(&rrtr);
diff --git a/webrtc/modules/rtp_rtcp/source/rtcp_sender.cc b/webrtc/modules/rtp_rtcp/source/rtcp_sender.cc
index 22b9477e05..848d73b2c4 100644
--- a/webrtc/modules/rtp_rtcp/source/rtcp_sender.cc
+++ b/webrtc/modules/rtp_rtcp/source/rtcp_sender.cc
@@ -11,17 +11,26 @@
#include "webrtc/modules/rtp_rtcp/source/rtcp_sender.h"
#include <assert.h> // assert
-#include <stdlib.h> // rand
#include <string.h> // memcpy
#include <algorithm> // min
#include <limits> // max
+#include <utility>
#include "webrtc/base/checks.h"
#include "webrtc/base/logging.h"
#include "webrtc/base/trace_event.h"
#include "webrtc/common_types.h"
#include "webrtc/modules/rtp_rtcp/source/byte_io.h"
+#include "webrtc/modules/rtp_rtcp/source/rtcp_packet/app.h"
+#include "webrtc/modules/rtp_rtcp/source/rtcp_packet/bye.h"
+#include "webrtc/modules/rtp_rtcp/source/rtcp_packet/compound_packet.h"
+#include "webrtc/modules/rtp_rtcp/source/rtcp_packet/nack.h"
+#include "webrtc/modules/rtp_rtcp/source/rtcp_packet/pli.h"
+#include "webrtc/modules/rtp_rtcp/source/rtcp_packet/receiver_report.h"
+#include "webrtc/modules/rtp_rtcp/source/rtcp_packet/sli.h"
+#include "webrtc/modules/rtp_rtcp/source/rtcp_packet/tmmbn.h"
+#include "webrtc/modules/rtp_rtcp/source/rtcp_packet/tmmbr.h"
#include "webrtc/modules/rtp_rtcp/source/rtcp_packet/transport_feedback.h"
#include "webrtc/modules/rtp_rtcp/source/rtp_rtcp_impl.h"
#include "webrtc/system_wrappers/include/critical_section_wrapper.h"
@@ -31,13 +40,11 @@ namespace webrtc {
using RTCPUtility::RTCPCnameInformation;
NACKStringBuilder::NACKStringBuilder()
- : stream_(""), count_(0), prevNack_(0), consecutive_(false) {
-}
+ : stream_(""), count_(0), prevNack_(0), consecutive_(false) {}
NACKStringBuilder::~NACKStringBuilder() {}
-void NACKStringBuilder::PushNACK(uint16_t nack)
-{
+void NACKStringBuilder::PushNACK(uint16_t nack) {
if (count_ == 0) {
stream_ << nack;
} else if (nack == prevNack_ + 1) {
@@ -71,64 +78,63 @@ RTCPSender::FeedbackState::FeedbackState()
last_rr_ntp_frac(0),
remote_sr(0),
has_last_xr_rr(false),
- module(nullptr) {
-}
+ module(nullptr) {}
-struct RTCPSender::RtcpContext {
- RtcpContext(const FeedbackState& feedback_state,
- int32_t nack_size,
- const uint16_t* nack_list,
- bool repeat,
- uint64_t picture_id,
- uint8_t* buffer,
- uint32_t buffer_size)
- : feedback_state(feedback_state),
- nack_size(nack_size),
- nack_list(nack_list),
- repeat(repeat),
- picture_id(picture_id),
- buffer(buffer),
- buffer_size(buffer_size),
- ntp_sec(0),
- ntp_frac(0),
- position(0) {}
-
- uint8_t* AllocateData(uint32_t bytes) {
- RTC_DCHECK_LE(position + bytes, buffer_size);
- uint8_t* ptr = &buffer[position];
- position += bytes;
- return ptr;
+class PacketContainer : public rtcp::CompoundPacket,
+ public rtcp::RtcpPacket::PacketReadyCallback {
+ public:
+ explicit PacketContainer(Transport* transport)
+ : transport_(transport), bytes_sent_(0) {}
+ virtual ~PacketContainer() {
+ for (RtcpPacket* packet : appended_packets_)
+ delete packet;
}
- const FeedbackState& feedback_state;
- int32_t nack_size;
- const uint16_t* nack_list;
- bool repeat;
- uint64_t picture_id;
- uint8_t* buffer;
- uint32_t buffer_size;
- uint32_t ntp_sec;
- uint32_t ntp_frac;
- uint32_t position;
-};
-
-// TODO(sprang): Once all builders use RtcpPacket, call SendToNetwork() here.
-class RTCPSender::PacketBuiltCallback
- : public rtcp::RtcpPacket::PacketReadyCallback {
- public:
- PacketBuiltCallback(RtcpContext* context) : context_(context) {}
- virtual ~PacketBuiltCallback() {}
void OnPacketReady(uint8_t* data, size_t length) override {
- context_->position += length;
+ if (transport_->SendRtcp(data, length))
+ bytes_sent_ += length;
}
- bool BuildPacket(const rtcp::RtcpPacket& packet) {
- return packet.BuildExternalBuffer(
- &context_->buffer[context_->position],
- context_->buffer_size - context_->position, this);
+
+ size_t SendPackets() {
+ rtcp::CompoundPacket::Build(this);
+ return bytes_sent_;
}
private:
- RtcpContext* const context_;
+ Transport* transport_;
+ size_t bytes_sent_;
+};
+
+class RTCPSender::RtcpContext {
+ public:
+ RtcpContext(const FeedbackState& feedback_state,
+ int32_t nack_size,
+ const uint16_t* nack_list,
+ bool repeat,
+ uint64_t picture_id,
+ uint32_t ntp_sec,
+ uint32_t ntp_frac,
+ PacketContainer* container)
+ : feedback_state_(feedback_state),
+ nack_size_(nack_size),
+ nack_list_(nack_list),
+ repeat_(repeat),
+ picture_id_(picture_id),
+ ntp_sec_(ntp_sec),
+ ntp_frac_(ntp_frac),
+ container_(container) {}
+
+ virtual ~RtcpContext() {}
+
+ const FeedbackState& feedback_state_;
+ const int32_t nack_size_;
+ const uint16_t* nack_list_;
+ const bool repeat_;
+ const uint64_t picture_id_;
+ const uint32_t ntp_sec_;
+ const uint32_t ntp_frac_;
+
+ PacketContainer* const container_;
};
RTCPSender::RTCPSender(
@@ -139,6 +145,7 @@ RTCPSender::RTCPSender(
Transport* outgoing_transport)
: audio_(audio),
clock_(clock),
+ random_(clock_->TimeInMicroseconds()),
method_(RtcpMode::kOff),
transport_(outgoing_transport),
@@ -193,8 +200,7 @@ RTCPSender::RTCPSender(
builders_[kRtcpXrDlrrReportBlock] = &RTCPSender::BuildDlrr;
}
-RTCPSender::~RTCPSender() {
-}
+RTCPSender::~RTCPSender() {}
RtcpMode RTCPSender::Status() const {
CriticalSectionScoped lock(critical_section_rtcp_sender_.get());
@@ -340,63 +346,63 @@ int32_t RTCPSender::RemoveMixedCNAME(uint32_t SSRC) {
}
bool RTCPSender::TimeToSendRTCPReport(bool sendKeyframeBeforeRTP) const {
-/*
- For audio we use a fix 5 sec interval
+ /*
+ For audio we use a fix 5 sec interval
- For video we use 1 sec interval fo a BW smaller than 360 kbit/s,
- technicaly we break the max 5% RTCP BW for video below 10 kbit/s but
- that should be extremely rare
+ For video we use 1 sec interval fo a BW smaller than 360 kbit/s,
+ technicaly we break the max 5% RTCP BW for video below 10 kbit/s but
+ that should be extremely rare
-From RFC 3550
+ From RFC 3550
- MAX RTCP BW is 5% if the session BW
- A send report is approximately 65 bytes inc CNAME
- A receiver report is approximately 28 bytes
+ MAX RTCP BW is 5% if the session BW
+ A send report is approximately 65 bytes inc CNAME
+ A receiver report is approximately 28 bytes
- The RECOMMENDED value for the reduced minimum in seconds is 360
- divided by the session bandwidth in kilobits/second. This minimum
- is smaller than 5 seconds for bandwidths greater than 72 kb/s.
+ The RECOMMENDED value for the reduced minimum in seconds is 360
+ divided by the session bandwidth in kilobits/second. This minimum
+ is smaller than 5 seconds for bandwidths greater than 72 kb/s.
- If the participant has not yet sent an RTCP packet (the variable
- initial is true), the constant Tmin is set to 2.5 seconds, else it
- is set to 5 seconds.
+ If the participant has not yet sent an RTCP packet (the variable
+ initial is true), the constant Tmin is set to 2.5 seconds, else it
+ is set to 5 seconds.
- The interval between RTCP packets is varied randomly over the
- range [0.5,1.5] times the calculated interval to avoid unintended
- synchronization of all participants
+ The interval between RTCP packets is varied randomly over the
+ range [0.5,1.5] times the calculated interval to avoid unintended
+ synchronization of all participants
- if we send
- If the participant is a sender (we_sent true), the constant C is
- set to the average RTCP packet size (avg_rtcp_size) divided by 25%
- of the RTCP bandwidth (rtcp_bw), and the constant n is set to the
- number of senders.
+ if we send
+ If the participant is a sender (we_sent true), the constant C is
+ set to the average RTCP packet size (avg_rtcp_size) divided by 25%
+ of the RTCP bandwidth (rtcp_bw), and the constant n is set to the
+ number of senders.
- if we receive only
- If we_sent is not true, the constant C is set
- to the average RTCP packet size divided by 75% of the RTCP
- bandwidth. The constant n is set to the number of receivers
- (members - senders). If the number of senders is greater than
- 25%, senders and receivers are treated together.
+ if we receive only
+ If we_sent is not true, the constant C is set
+ to the average RTCP packet size divided by 75% of the RTCP
+ bandwidth. The constant n is set to the number of receivers
+ (members - senders). If the number of senders is greater than
+ 25%, senders and receivers are treated together.
- reconsideration NOT required for peer-to-peer
- "timer reconsideration" is
- employed. This algorithm implements a simple back-off mechanism
- which causes users to hold back RTCP packet transmission if the
- group sizes are increasing.
+ reconsideration NOT required for peer-to-peer
+ "timer reconsideration" is
+ employed. This algorithm implements a simple back-off mechanism
+ which causes users to hold back RTCP packet transmission if the
+ group sizes are increasing.
- n = number of members
- C = avg_size/(rtcpBW/4)
+ n = number of members
+ C = avg_size/(rtcpBW/4)
- 3. The deterministic calculated interval Td is set to max(Tmin, n*C).
+ 3. The deterministic calculated interval Td is set to max(Tmin, n*C).
- 4. The calculated interval T is set to a number uniformly distributed
- between 0.5 and 1.5 times the deterministic calculated interval.
+ 4. The calculated interval T is set to a number uniformly distributed
+ between 0.5 and 1.5 times the deterministic calculated interval.
- 5. The resulting value of T is divided by e-3/2=1.21828 to compensate
- for the fact that the timer reconsideration algorithm converges to
- a value of the RTCP bandwidth below the intended average
-*/
+ 5. The resulting value of T is divided by e-3/2=1.21828 to compensate
+ for the fact that the timer reconsideration algorithm converges to
+ a value of the RTCP bandwidth below the intended average
+ */
int64_t now = clock_->TimeInMilliseconds();
@@ -451,32 +457,15 @@ bool RTCPSender::SendTimeOfXrRrReport(uint32_t mid_ntp,
return true;
}
-int32_t RTCPSender::AddReportBlock(const RTCPReportBlock& report_block) {
- if (report_blocks_.size() >= RTCP_MAX_REPORT_BLOCKS) {
- LOG(LS_WARNING) << "Too many report blocks.";
- return -1;
- }
- rtcp::ReportBlock* block = &report_blocks_[report_block.remoteSSRC];
- block->To(report_block.remoteSSRC);
- block->WithFractionLost(report_block.fractionLost);
- block->WithCumulativeLost(report_block.cumulativeLost);
- block->WithExtHighestSeqNum(report_block.extendedHighSeqNum);
- block->WithJitter(report_block.jitter);
- block->WithLastSr(report_block.lastSR);
- block->WithDelayLastSr(report_block.delaySinceLastSR);
-
- return 0;
-}
-
-RTCPSender::BuildResult RTCPSender::BuildSR(RtcpContext* ctx) {
+rtc::scoped_ptr<rtcp::RtcpPacket> RTCPSender::BuildSR(const RtcpContext& ctx) {
for (int i = (RTCP_NUMBER_OF_SR - 2); i >= 0; i--) {
// shift old
last_send_report_[i + 1] = last_send_report_[i];
last_rtcp_time_[i + 1] = last_rtcp_time_[i];
}
- last_rtcp_time_[0] = Clock::NtpToMs(ctx->ntp_sec, ctx->ntp_frac);
- last_send_report_[0] = (ctx->ntp_sec << 16) + (ctx->ntp_frac >> 16);
+ last_rtcp_time_[0] = Clock::NtpToMs(ctx.ntp_sec_, ctx.ntp_frac_);
+ last_send_report_[0] = (ctx.ntp_sec_ << 16) + (ctx.ntp_frac_ >> 16);
// The timestamp of this RTCP packet should be estimated as the timestamp of
// the frame being captured at this moment. We are calculating that
@@ -485,67 +474,52 @@ RTCPSender::BuildResult RTCPSender::BuildSR(RtcpContext* ctx) {
uint32_t rtp_timestamp =
start_timestamp_ + last_rtp_timestamp_ +
(clock_->TimeInMilliseconds() - last_frame_capture_time_ms_) *
- (ctx->feedback_state.frequency_hz / 1000);
+ (ctx.feedback_state_.frequency_hz / 1000);
- rtcp::SenderReport report;
- report.From(ssrc_);
- report.WithNtpSec(ctx->ntp_sec);
- report.WithNtpFrac(ctx->ntp_frac);
- report.WithRtpTimestamp(rtp_timestamp);
- report.WithPacketCount(ctx->feedback_state.packets_sent);
- report.WithOctetCount(ctx->feedback_state.media_bytes_sent);
+ rtcp::SenderReport* report = new rtcp::SenderReport();
+ report->From(ssrc_);
+ report->WithNtpSec(ctx.ntp_sec_);
+ report->WithNtpFrac(ctx.ntp_frac_);
+ report->WithRtpTimestamp(rtp_timestamp);
+ report->WithPacketCount(ctx.feedback_state_.packets_sent);
+ report->WithOctetCount(ctx.feedback_state_.media_bytes_sent);
for (auto it : report_blocks_)
- report.WithReportBlock(it.second);
-
- PacketBuiltCallback callback(ctx);
- if (!callback.BuildPacket(report))
- return BuildResult::kTruncated;
+ report->WithReportBlock(it.second);
report_blocks_.clear();
- return BuildResult::kSuccess;
+
+ return rtc::scoped_ptr<rtcp::SenderReport>(report);
}
-RTCPSender::BuildResult RTCPSender::BuildSDES(RtcpContext* ctx) {
+rtc::scoped_ptr<rtcp::RtcpPacket> RTCPSender::BuildSDES(
+ const RtcpContext& ctx) {
size_t length_cname = cname_.length();
RTC_CHECK_LT(length_cname, static_cast<size_t>(RTCP_CNAME_SIZE));
- rtcp::Sdes sdes;
- sdes.WithCName(ssrc_, cname_);
+ rtcp::Sdes* sdes = new rtcp::Sdes();
+ sdes->WithCName(ssrc_, cname_);
for (const auto it : csrc_cnames_)
- sdes.WithCName(it.first, it.second);
+ sdes->WithCName(it.first, it.second);
- PacketBuiltCallback callback(ctx);
- if (!callback.BuildPacket(sdes))
- return BuildResult::kTruncated;
-
- return BuildResult::kSuccess;
+ return rtc::scoped_ptr<rtcp::Sdes>(sdes);
}
-RTCPSender::BuildResult RTCPSender::BuildRR(RtcpContext* ctx) {
- rtcp::ReceiverReport report;
- report.From(ssrc_);
+rtc::scoped_ptr<rtcp::RtcpPacket> RTCPSender::BuildRR(const RtcpContext& ctx) {
+ rtcp::ReceiverReport* report = new rtcp::ReceiverReport();
+ report->From(ssrc_);
for (auto it : report_blocks_)
- report.WithReportBlock(it.second);
-
- PacketBuiltCallback callback(ctx);
- if (!callback.BuildPacket(report))
- return BuildResult::kTruncated;
+ report->WithReportBlock(it.second);
report_blocks_.clear();
-
- return BuildResult::kSuccess;
+ return rtc::scoped_ptr<rtcp::ReceiverReport>(report);
}
-RTCPSender::BuildResult RTCPSender::BuildPLI(RtcpContext* ctx) {
- rtcp::Pli pli;
- pli.From(ssrc_);
- pli.To(remote_ssrc_);
-
- PacketBuiltCallback callback(ctx);
- if (!callback.BuildPacket(pli))
- return BuildResult::kTruncated;
+rtc::scoped_ptr<rtcp::RtcpPacket> RTCPSender::BuildPLI(const RtcpContext& ctx) {
+ rtcp::Pli* pli = new rtcp::Pli();
+ pli->From(ssrc_);
+ pli->To(remote_ssrc_);
TRACE_EVENT_INSTANT0(TRACE_DISABLED_BY_DEFAULT("webrtc_rtp"),
"RTCPSender::PLI");
@@ -553,21 +527,17 @@ RTCPSender::BuildResult RTCPSender::BuildPLI(RtcpContext* ctx) {
TRACE_COUNTER_ID1(TRACE_DISABLED_BY_DEFAULT("webrtc_rtp"), "RTCP_PLICount",
ssrc_, packet_type_counter_.pli_packets);
- return BuildResult::kSuccess;
+ return rtc::scoped_ptr<rtcp::Pli>(pli);
}
-RTCPSender::BuildResult RTCPSender::BuildFIR(RtcpContext* ctx) {
- if (!ctx->repeat)
+rtc::scoped_ptr<rtcp::RtcpPacket> RTCPSender::BuildFIR(const RtcpContext& ctx) {
+ if (!ctx.repeat_)
++sequence_number_fir_; // Do not increase if repetition.
- rtcp::Fir fir;
- fir.From(ssrc_);
- fir.To(remote_ssrc_);
- fir.WithCommandSeqNum(sequence_number_fir_);
-
- PacketBuiltCallback callback(ctx);
- if (!callback.BuildPacket(fir))
- return BuildResult::kTruncated;
+ rtcp::Fir* fir = new rtcp::Fir();
+ fir->From(ssrc_);
+ fir->To(remote_ssrc_);
+ fir->WithCommandSeqNum(sequence_number_fir_);
TRACE_EVENT_INSTANT0(TRACE_DISABLED_BY_DEFAULT("webrtc_rtp"),
"RTCPSender::FIR");
@@ -575,7 +545,7 @@ RTCPSender::BuildResult RTCPSender::BuildFIR(RtcpContext* ctx) {
TRACE_COUNTER_ID1(TRACE_DISABLED_BY_DEFAULT("webrtc_rtp"), "RTCP_FIRCount",
ssrc_, packet_type_counter_.fir_packets);
- return BuildResult::kSuccess;
+ return rtc::scoped_ptr<rtcp::Fir>(fir);
}
/*
@@ -585,20 +555,14 @@ RTCPSender::BuildResult RTCPSender::BuildFIR(RtcpContext* ctx) {
| First | Number | PictureID |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
*/
-RTCPSender::BuildResult RTCPSender::BuildSLI(RtcpContext* ctx) {
- rtcp::Sli sli;
- sli.From(ssrc_);
- sli.To(remote_ssrc_);
+rtc::scoped_ptr<rtcp::RtcpPacket> RTCPSender::BuildSLI(const RtcpContext& ctx) {
+ rtcp::Sli* sli = new rtcp::Sli();
+ sli->From(ssrc_);
+ sli->To(remote_ssrc_);
// Crop picture id to 6 least significant bits.
- sli.WithPictureId(ctx->picture_id & 0x3F);
- sli.WithFirstMb(0);
- sli.WithNumberOfMb(0x1FFF); // 13 bits, only ones for now.
-
- PacketBuiltCallback callback(ctx);
- if (!callback.BuildPacket(sli))
- return BuildResult::kTruncated;
+ sli->WithPictureId(ctx.picture_id_ & 0x3F);
- return BuildResult::kSuccess;
+ return rtc::scoped_ptr<rtcp::Sli>(sli);
}
/*
@@ -613,38 +577,32 @@ RTCPSender::BuildResult RTCPSender::BuildSLI(RtcpContext* ctx) {
/*
* Note: not generic made for VP8
*/
-RTCPSender::BuildResult RTCPSender::BuildRPSI(RtcpContext* ctx) {
- if (ctx->feedback_state.send_payload_type == 0xFF)
- return BuildResult::kError;
-
- rtcp::Rpsi rpsi;
- rpsi.From(ssrc_);
- rpsi.To(remote_ssrc_);
- rpsi.WithPayloadType(ctx->feedback_state.send_payload_type);
- rpsi.WithPictureId(ctx->picture_id);
-
- PacketBuiltCallback callback(ctx);
- if (!callback.BuildPacket(rpsi))
- return BuildResult::kTruncated;
-
- return BuildResult::kSuccess;
+rtc::scoped_ptr<rtcp::RtcpPacket> RTCPSender::BuildRPSI(
+ const RtcpContext& ctx) {
+ if (ctx.feedback_state_.send_payload_type == 0xFF)
+ return nullptr;
+
+ rtcp::Rpsi* rpsi = new rtcp::Rpsi();
+ rpsi->From(ssrc_);
+ rpsi->To(remote_ssrc_);
+ rpsi->WithPayloadType(ctx.feedback_state_.send_payload_type);
+ rpsi->WithPictureId(ctx.picture_id_);
+
+ return rtc::scoped_ptr<rtcp::Rpsi>(rpsi);
}
-RTCPSender::BuildResult RTCPSender::BuildREMB(RtcpContext* ctx) {
- rtcp::Remb remb;
- remb.From(ssrc_);
+rtc::scoped_ptr<rtcp::RtcpPacket> RTCPSender::BuildREMB(
+ const RtcpContext& ctx) {
+ rtcp::Remb* remb = new rtcp::Remb();
+ remb->From(ssrc_);
for (uint32_t ssrc : remb_ssrcs_)
- remb.AppliesTo(ssrc);
- remb.WithBitrateBps(remb_bitrate_);
-
- PacketBuiltCallback callback(ctx);
- if (!callback.BuildPacket(remb))
- return BuildResult::kTruncated;
+ remb->AppliesTo(ssrc);
+ remb->WithBitrateBps(remb_bitrate_);
TRACE_EVENT_INSTANT0(TRACE_DISABLED_BY_DEFAULT("webrtc_rtp"),
"RTCPSender::REMB");
- return BuildResult::kSuccess;
+ return rtc::scoped_ptr<rtcp::Remb>(remb);
}
void RTCPSender::SetTargetBitrate(unsigned int target_bitrate) {
@@ -652,9 +610,10 @@ void RTCPSender::SetTargetBitrate(unsigned int target_bitrate) {
tmmbr_send_ = target_bitrate / 1000;
}
-RTCPSender::BuildResult RTCPSender::BuildTMMBR(RtcpContext* ctx) {
- if (ctx->feedback_state.module == NULL)
- return BuildResult::kError;
+rtc::scoped_ptr<rtcp::RtcpPacket> RTCPSender::BuildTMMBR(
+ const RtcpContext& ctx) {
+ if (ctx.feedback_state_.module == nullptr)
+ return nullptr;
// Before sending the TMMBR check the received TMMBN, only an owner is
// allowed to raise the bitrate:
// * If the sender is an owner of the TMMBN -> send TMMBR
@@ -669,14 +628,14 @@ RTCPSender::BuildResult RTCPSender::BuildTMMBR(RtcpContext* ctx) {
// will accuire criticalSectionRTCPReceiver_ is a potental deadlock but
// since RTCPreceiver is not doing the reverse we should be fine
int32_t lengthOfBoundingSet =
- ctx->feedback_state.module->BoundingSet(tmmbrOwner, candidateSet);
+ ctx.feedback_state_.module->BoundingSet(&tmmbrOwner, candidateSet);
if (lengthOfBoundingSet > 0) {
for (int32_t i = 0; i < lengthOfBoundingSet; i++) {
if (candidateSet->Tmmbr(i) == tmmbr_send_ &&
candidateSet->PacketOH(i) == packet_oh_send_) {
- // do not send the same tuple
- return BuildResult::kAborted;
+ // Do not send the same tuple.
+ return nullptr;
}
}
if (!tmmbrOwner) {
@@ -687,124 +646,69 @@ RTCPSender::BuildResult RTCPSender::BuildTMMBR(RtcpContext* ctx) {
int numCandidates = lengthOfBoundingSet + 1;
// find bounding set
- TMMBRSet* boundingSet = NULL;
+ TMMBRSet* boundingSet = nullptr;
int numBoundingSet = tmmbr_help_.FindTMMBRBoundingSet(boundingSet);
if (numBoundingSet > 0 || numBoundingSet <= numCandidates)
tmmbrOwner = tmmbr_help_.IsOwner(ssrc_, numBoundingSet);
if (!tmmbrOwner) {
- // did not enter bounding set, no meaning to send this request
- return BuildResult::kAborted;
+ // Did not enter bounding set, no meaning to send this request.
+ return nullptr;
}
}
}
- if (tmmbr_send_) {
- rtcp::Tmmbr tmmbr;
- tmmbr.From(ssrc_);
- tmmbr.To(remote_ssrc_);
- tmmbr.WithBitrateKbps(tmmbr_send_);
- tmmbr.WithOverhead(packet_oh_send_);
+ if (!tmmbr_send_)
+ return nullptr;
- PacketBuiltCallback callback(ctx);
- if (!callback.BuildPacket(tmmbr))
- return BuildResult::kTruncated;
- }
- return BuildResult::kSuccess;
+ rtcp::Tmmbr* tmmbr = new rtcp::Tmmbr();
+ tmmbr->From(ssrc_);
+ tmmbr->To(remote_ssrc_);
+ tmmbr->WithBitrateKbps(tmmbr_send_);
+ tmmbr->WithOverhead(packet_oh_send_);
+
+ return rtc::scoped_ptr<rtcp::Tmmbr>(tmmbr);
}
-RTCPSender::BuildResult RTCPSender::BuildTMMBN(RtcpContext* ctx) {
+rtc::scoped_ptr<rtcp::RtcpPacket> RTCPSender::BuildTMMBN(
+ const RtcpContext& ctx) {
TMMBRSet* boundingSet = tmmbr_help_.BoundingSetToSend();
- if (boundingSet == NULL)
- return BuildResult::kError;
+ if (boundingSet == nullptr)
+ return nullptr;
- rtcp::Tmmbn tmmbn;
- tmmbn.From(ssrc_);
+ rtcp::Tmmbn* tmmbn = new rtcp::Tmmbn();
+ tmmbn->From(ssrc_);
for (uint32_t i = 0; i < boundingSet->lengthOfSet(); i++) {
if (boundingSet->Tmmbr(i) > 0) {
- tmmbn.WithTmmbr(boundingSet->Ssrc(i), boundingSet->Tmmbr(i),
- boundingSet->PacketOH(i));
+ tmmbn->WithTmmbr(boundingSet->Ssrc(i), boundingSet->Tmmbr(i),
+ boundingSet->PacketOH(i));
}
}
- PacketBuiltCallback callback(ctx);
- if (!callback.BuildPacket(tmmbn))
- return BuildResult::kTruncated;
-
- return BuildResult::kSuccess;
+ return rtc::scoped_ptr<rtcp::Tmmbn>(tmmbn);
}
-RTCPSender::BuildResult RTCPSender::BuildAPP(RtcpContext* ctx) {
- rtcp::App app;
- app.From(ssrc_);
- app.WithSubType(app_sub_type_);
- app.WithName(app_name_);
- app.WithData(app_data_.get(), app_length_);
-
- PacketBuiltCallback callback(ctx);
- if (!callback.BuildPacket(app))
- return BuildResult::kTruncated;
+rtc::scoped_ptr<rtcp::RtcpPacket> RTCPSender::BuildAPP(const RtcpContext& ctx) {
+ rtcp::App* app = new rtcp::App();
+ app->From(ssrc_);
+ app->WithSubType(app_sub_type_);
+ app->WithName(app_name_);
+ app->WithData(app_data_.get(), app_length_);
- return BuildResult::kSuccess;
+ return rtc::scoped_ptr<rtcp::App>(app);
}
-RTCPSender::BuildResult RTCPSender::BuildNACK(RtcpContext* ctx) {
- // sanity
- if (ctx->position + 16 >= IP_PACKET_SIZE) {
- LOG(LS_WARNING) << "Failed to build NACK.";
- return BuildResult::kTruncated;
- }
-
- // int size, uint16_t* nack_list
- // add nack list
- uint8_t FMT = 1;
- *ctx->AllocateData(1) = 0x80 + FMT;
- *ctx->AllocateData(1) = 205;
-
- *ctx->AllocateData(1) = 0;
- int nack_size_pos_ = ctx->position;
- *ctx->AllocateData(1) = 3; // setting it to one kNACK signal as default
-
- // Add our own SSRC
- ByteWriter<uint32_t>::WriteBigEndian(ctx->AllocateData(4), ssrc_);
-
- // Add the remote SSRC
- ByteWriter<uint32_t>::WriteBigEndian(ctx->AllocateData(4), remote_ssrc_);
-
- // Build NACK bitmasks and write them to the RTCP message.
- // The nack list should be sorted and not contain duplicates if one
- // wants to build the smallest rtcp nack packet.
- int numOfNackFields = 0;
- int maxNackFields =
- std::min<int>(kRtcpMaxNackFields, (IP_PACKET_SIZE - ctx->position) / 4);
- int i = 0;
- while (i < ctx->nack_size && numOfNackFields < maxNackFields) {
- uint16_t nack = ctx->nack_list[i++];
- uint16_t bitmask = 0;
- while (i < ctx->nack_size) {
- int shift = static_cast<uint16_t>(ctx->nack_list[i] - nack) - 1;
- if (shift >= 0 && shift <= 15) {
- bitmask |= (1 << shift);
- ++i;
- } else {
- break;
- }
- }
- // Write the sequence number and the bitmask to the packet.
- assert(ctx->position + 4 < IP_PACKET_SIZE);
- ByteWriter<uint16_t>::WriteBigEndian(ctx->AllocateData(2), nack);
- ByteWriter<uint16_t>::WriteBigEndian(ctx->AllocateData(2), bitmask);
- numOfNackFields++;
- }
- ctx->buffer[nack_size_pos_] = static_cast<uint8_t>(2 + numOfNackFields);
-
- if (i != ctx->nack_size)
- LOG(LS_WARNING) << "Nack list too large for one packet.";
+rtc::scoped_ptr<rtcp::RtcpPacket> RTCPSender::BuildNACK(
+ const RtcpContext& ctx) {
+ rtcp::Nack* nack = new rtcp::Nack();
+ nack->From(ssrc_);
+ nack->To(remote_ssrc_);
+ nack->WithList(ctx.nack_list_, ctx.nack_size_);
// Report stats.
NACKStringBuilder stringBuilder;
- for (int idx = 0; idx < i; ++idx) {
- stringBuilder.PushNACK(ctx->nack_list[idx]);
- nack_stats_.ReportRequest(ctx->nack_list[idx]);
+ for (int idx = 0; idx < ctx.nack_size_; ++idx) {
+ stringBuilder.PushNACK(ctx.nack_list_[idx]);
+ nack_stats_.ReportRequest(ctx.nack_list_[idx]);
}
packet_type_counter_.nack_requests = nack_stats_.requests();
packet_type_counter_.unique_nack_requests = nack_stats_.unique_requests();
@@ -816,101 +720,66 @@ RTCPSender::BuildResult RTCPSender::BuildNACK(RtcpContext* ctx) {
TRACE_COUNTER_ID1(TRACE_DISABLED_BY_DEFAULT("webrtc_rtp"), "RTCP_NACKCount",
ssrc_, packet_type_counter_.nack_packets);
- return BuildResult::kSuccess;
+ return rtc::scoped_ptr<rtcp::Nack>(nack);
}
-RTCPSender::BuildResult RTCPSender::BuildBYE(RtcpContext* ctx) {
- rtcp::Bye bye;
- bye.From(ssrc_);
+rtc::scoped_ptr<rtcp::RtcpPacket> RTCPSender::BuildBYE(const RtcpContext& ctx) {
+ rtcp::Bye* bye = new rtcp::Bye();
+ bye->From(ssrc_);
for (uint32_t csrc : csrcs_)
- bye.WithCsrc(csrc);
+ bye->WithCsrc(csrc);
- PacketBuiltCallback callback(ctx);
- if (!callback.BuildPacket(bye))
- return BuildResult::kTruncated;
-
- return BuildResult::kSuccess;
+ return rtc::scoped_ptr<rtcp::Bye>(bye);
}
-RTCPSender::BuildResult RTCPSender::BuildReceiverReferenceTime(
- RtcpContext* ctx) {
-
+rtc::scoped_ptr<rtcp::RtcpPacket> RTCPSender::BuildReceiverReferenceTime(
+ const RtcpContext& ctx) {
if (last_xr_rr_.size() >= RTCP_NUMBER_OF_SR)
last_xr_rr_.erase(last_xr_rr_.begin());
last_xr_rr_.insert(std::pair<uint32_t, int64_t>(
- RTCPUtility::MidNtp(ctx->ntp_sec, ctx->ntp_frac),
- Clock::NtpToMs(ctx->ntp_sec, ctx->ntp_frac)));
+ RTCPUtility::MidNtp(ctx.ntp_sec_, ctx.ntp_frac_),
+ Clock::NtpToMs(ctx.ntp_sec_, ctx.ntp_frac_)));
- rtcp::Xr xr;
- xr.From(ssrc_);
+ rtcp::Xr* xr = new rtcp::Xr();
+ xr->From(ssrc_);
rtcp::Rrtr rrtr;
- rrtr.WithNtpSec(ctx->ntp_sec);
- rrtr.WithNtpFrac(ctx->ntp_frac);
+ rrtr.WithNtp(NtpTime(ctx.ntp_sec_, ctx.ntp_frac_));
- xr.WithRrtr(&rrtr);
+ xr->WithRrtr(&rrtr);
// TODO(sprang): Merge XR report sending to contain all of RRTR, DLRR, VOIP?
- PacketBuiltCallback callback(ctx);
- if (!callback.BuildPacket(xr))
- return BuildResult::kTruncated;
-
- return BuildResult::kSuccess;
+ return rtc::scoped_ptr<rtcp::Xr>(xr);
}
-RTCPSender::BuildResult RTCPSender::BuildDlrr(RtcpContext* ctx) {
- rtcp::Xr xr;
- xr.From(ssrc_);
+rtc::scoped_ptr<rtcp::RtcpPacket> RTCPSender::BuildDlrr(
+ const RtcpContext& ctx) {
+ rtcp::Xr* xr = new rtcp::Xr();
+ xr->From(ssrc_);
rtcp::Dlrr dlrr;
- const RtcpReceiveTimeInfo& info = ctx->feedback_state.last_xr_rr;
+ const RtcpReceiveTimeInfo& info = ctx.feedback_state_.last_xr_rr;
dlrr.WithDlrrItem(info.sourceSSRC, info.lastRR, info.delaySinceLastRR);
- xr.WithDlrr(&dlrr);
+ xr->WithDlrr(&dlrr);
- PacketBuiltCallback callback(ctx);
- if (!callback.BuildPacket(xr))
- return BuildResult::kTruncated;
-
- return BuildResult::kSuccess;
+ return rtc::scoped_ptr<rtcp::Xr>(xr);
}
// TODO(sprang): Add a unit test for this, or remove if the code isn't used.
-RTCPSender::BuildResult RTCPSender::BuildVoIPMetric(RtcpContext* ctx) {
- rtcp::Xr xr;
- xr.From(ssrc_);
+rtc::scoped_ptr<rtcp::RtcpPacket> RTCPSender::BuildVoIPMetric(
+ const RtcpContext& context) {
+ rtcp::Xr* xr = new rtcp::Xr();
+ xr->From(ssrc_);
rtcp::VoipMetric voip;
voip.To(remote_ssrc_);
- voip.LossRate(xr_voip_metric_.lossRate);
- voip.DiscardRate(xr_voip_metric_.discardRate);
- voip.BurstDensity(xr_voip_metric_.burstDensity);
- voip.GapDensity(xr_voip_metric_.gapDensity);
- voip.BurstDuration(xr_voip_metric_.burstDuration);
- voip.GapDuration(xr_voip_metric_.gapDuration);
- voip.RoundTripDelay(xr_voip_metric_.roundTripDelay);
- voip.EndSystemDelay(xr_voip_metric_.endSystemDelay);
- voip.SignalLevel(xr_voip_metric_.signalLevel);
- voip.NoiseLevel(xr_voip_metric_.noiseLevel);
- voip.Rerl(xr_voip_metric_.RERL);
- voip.Gmin(xr_voip_metric_.Gmin);
- voip.Rfactor(xr_voip_metric_.Rfactor);
- voip.ExtRfactor(xr_voip_metric_.extRfactor);
- voip.MosLq(xr_voip_metric_.MOSLQ);
- voip.MosCq(xr_voip_metric_.MOSCQ);
- voip.RxConfig(xr_voip_metric_.RXconfig);
- voip.JbNominal(xr_voip_metric_.JBnominal);
- voip.JbMax(xr_voip_metric_.JBmax);
- voip.JbAbsMax(xr_voip_metric_.JBabsMax);
-
- xr.WithVoipMetric(&voip);
-
- PacketBuiltCallback callback(ctx);
- if (!callback.BuildPacket(xr))
- return BuildResult::kTruncated;
-
- return BuildResult::kSuccess;
+ voip.WithVoipMetric(xr_voip_metric_);
+
+ xr->WithVoipMetric(&voip);
+
+ return rtc::scoped_ptr<rtcp::Xr>(xr);
}
int32_t RTCPSender::SendRTCP(const FeedbackState& feedback_state,
@@ -926,43 +795,59 @@ int32_t RTCPSender::SendRTCP(const FeedbackState& feedback_state,
int32_t RTCPSender::SendCompoundRTCP(
const FeedbackState& feedback_state,
- const std::set<RTCPPacketType>& packetTypes,
+ const std::set<RTCPPacketType>& packet_types,
int32_t nack_size,
const uint16_t* nack_list,
bool repeat,
uint64_t pictureID) {
+ PacketContainer container(transport_);
{
CriticalSectionScoped lock(critical_section_rtcp_sender_.get());
if (method_ == RtcpMode::kOff) {
LOG(LS_WARNING) << "Can't send rtcp if it is disabled.";
return -1;
}
- }
- uint8_t rtcp_buffer[IP_PACKET_SIZE];
- int rtcp_length =
- PrepareRTCP(feedback_state, packetTypes, nack_size, nack_list, repeat,
- pictureID, rtcp_buffer, IP_PACKET_SIZE);
- // Sanity don't send empty packets.
- if (rtcp_length <= 0)
- return -1;
+ // We need to send our NTP even if we haven't received any reports.
+ uint32_t ntp_sec;
+ uint32_t ntp_frac;
+ clock_->CurrentNtp(ntp_sec, ntp_frac);
+ RtcpContext context(feedback_state, nack_size, nack_list, repeat, pictureID,
+ ntp_sec, ntp_frac, &container);
+
+ PrepareReport(packet_types, feedback_state);
+
+ auto it = report_flags_.begin();
+ while (it != report_flags_.end()) {
+ auto builder_it = builders_.find(it->type);
+ RTC_DCHECK(builder_it != builders_.end());
+ if (it->is_volatile) {
+ report_flags_.erase(it++);
+ } else {
+ ++it;
+ }
- return SendToNetwork(rtcp_buffer, static_cast<size_t>(rtcp_length));
-}
+ BuilderFunc func = builder_it->second;
+ rtc::scoped_ptr<rtcp::RtcpPacket> packet = (this->*func)(context);
+ if (packet.get() == nullptr)
+ return -1;
+ container.Append(packet.release());
+ }
-int RTCPSender::PrepareRTCP(const FeedbackState& feedback_state,
- const std::set<RTCPPacketType>& packetTypes,
- int32_t nack_size,
- const uint16_t* nack_list,
- bool repeat,
- uint64_t pictureID,
- uint8_t* rtcp_buffer,
- int buffer_size) {
- CriticalSectionScoped lock(critical_section_rtcp_sender_.get());
+ if (packet_type_counter_observer_ != nullptr) {
+ packet_type_counter_observer_->RtcpPacketTypesCounterUpdated(
+ remote_ssrc_, packet_type_counter_);
+ }
- RtcpContext context(feedback_state, nack_size, nack_list, repeat, pictureID,
- rtcp_buffer, buffer_size);
+ RTC_DCHECK(AllVolatileFlagsConsumed());
+ }
+
+ size_t bytes_sent = container.SendPackets();
+ return bytes_sent == 0 ? -1 : 0;
+}
+void RTCPSender::PrepareReport(const std::set<RTCPPacketType>& packetTypes,
+ const FeedbackState& feedback_state) {
// Add all flags as volatile. Non volatile entries will not be overwritten
// and all new volatile flags added will be consumed by the end of this call.
SetFlags(packetTypes, true);
@@ -986,9 +871,6 @@ int RTCPSender::PrepareRTCP(const FeedbackState& feedback_state,
if (IsFlagPresent(kRtcpSr) || (IsFlagPresent(kRtcpRr) && !cname_.empty()))
SetFlag(kRtcpSdes, true);
- // We need to send our NTP even if we haven't received any reports.
- clock_->CurrentNtp(context.ntp_sec, context.ntp_frac);
-
if (generate_report) {
if (!sending_ && xr_send_receiver_reference_time_enabled_)
SetFlag(kRtcpXrReceiverReferenceTime, true);
@@ -996,15 +878,9 @@ int RTCPSender::PrepareRTCP(const FeedbackState& feedback_state,
SetFlag(kRtcpXrDlrrReportBlock, true);
// generate next time to send an RTCP report
- // seeded from RTP constructor
- int32_t random = rand() % 1000;
- int32_t timeToNext = RTCP_INTERVAL_AUDIO_MS;
-
- if (audio_) {
- timeToNext = (RTCP_INTERVAL_AUDIO_MS / 2) +
- (RTCP_INTERVAL_AUDIO_MS * random / 1000);
- } else {
- uint32_t minIntervalMs = RTCP_INTERVAL_AUDIO_MS;
+ uint32_t minIntervalMs = RTCP_INTERVAL_AUDIO_MS;
+
+ if (!audio_) {
if (sending_) {
// Calculate bandwidth for video; 360 / send bandwidth in kbit/s.
uint32_t send_bitrate_kbit = feedback_state.send_bitrate / 1000;
@@ -1013,74 +889,46 @@ int RTCPSender::PrepareRTCP(const FeedbackState& feedback_state,
}
if (minIntervalMs > RTCP_INTERVAL_VIDEO_MS)
minIntervalMs = RTCP_INTERVAL_VIDEO_MS;
- timeToNext = (minIntervalMs / 2) + (minIntervalMs * random / 1000);
}
+ // The interval between RTCP packets is varied randomly over the
+ // range [1/2,3/2] times the calculated interval.
+ uint32_t timeToNext =
+ random_.Rand(minIntervalMs * 1 / 2, minIntervalMs * 3 / 2);
next_time_to_send_rtcp_ = clock_->TimeInMilliseconds() + timeToNext;
StatisticianMap statisticians =
receive_statistics_->GetActiveStatisticians();
- if (!statisticians.empty()) {
- for (auto it = statisticians.begin(); it != statisticians.end(); ++it) {
- RTCPReportBlock report_block;
- if (PrepareReport(feedback_state, it->first, it->second,
- &report_block)) {
- AddReportBlock(report_block);
- }
- }
- }
- }
-
- auto it = report_flags_.begin();
- while (it != report_flags_.end()) {
- auto builder = builders_.find(it->type);
- RTC_DCHECK(builder != builders_.end());
- if (it->is_volatile) {
- report_flags_.erase(it++);
- } else {
- ++it;
- }
-
- uint32_t start_position = context.position;
- BuildResult result = (this->*(builder->second))(&context);
- switch (result) {
- case BuildResult::kError:
- return -1;
- case BuildResult::kTruncated:
- return context.position;
- case BuildResult::kAborted:
- context.position = start_position;
- FALLTHROUGH();
- case BuildResult::kSuccess:
- continue;
- default:
- abort();
+ RTC_DCHECK(report_blocks_.empty());
+ for (auto& it : statisticians) {
+ AddReportBlock(feedback_state, it.first, it.second);
}
}
-
- if (packet_type_counter_observer_ != NULL) {
- packet_type_counter_observer_->RtcpPacketTypesCounterUpdated(
- remote_ssrc_, packet_type_counter_);
- }
-
- RTC_DCHECK(AllVolatileFlagsConsumed());
-
- return context.position;
}
-bool RTCPSender::PrepareReport(const FeedbackState& feedback_state,
- uint32_t ssrc,
- StreamStatistician* statistician,
- RTCPReportBlock* report_block) {
+bool RTCPSender::AddReportBlock(const FeedbackState& feedback_state,
+ uint32_t ssrc,
+ StreamStatistician* statistician) {
// Do we have receive statistics to send?
RtcpStatistics stats;
if (!statistician->GetStatistics(&stats, true))
return false;
- report_block->fractionLost = stats.fraction_lost;
- report_block->cumulativeLost = stats.cumulative_lost;
- report_block->extendedHighSeqNum =
- stats.extended_max_sequence_number;
- report_block->jitter = stats.jitter;
- report_block->remoteSSRC = ssrc;
+
+ if (report_blocks_.size() >= RTCP_MAX_REPORT_BLOCKS) {
+ LOG(LS_WARNING) << "Too many report blocks.";
+ return false;
+ }
+ RTC_DCHECK(report_blocks_.find(ssrc) == report_blocks_.end());
+ rtcp::ReportBlock* block = &report_blocks_[ssrc];
+ block->To(ssrc);
+ block->WithFractionLost(stats.fraction_lost);
+ if (!block->WithCumulativeLost(stats.cumulative_lost)) {
+ report_blocks_.erase(ssrc);
+ LOG(LS_WARNING) << "Cumulative lost is oversized.";
+ return false;
+ }
+ block->WithExtHighestSeqNum(stats.extended_max_sequence_number);
+ block->WithJitter(stats.jitter);
+ block->WithLastSr(feedback_state.remote_sr);
// TODO(sprang): Do we really need separate time stamps for each report?
// Get our NTP as late as possible to avoid a race.
@@ -1089,7 +937,6 @@ bool RTCPSender::PrepareReport(const FeedbackState& feedback_state,
clock_->CurrentNtp(ntp_secs, ntp_frac);
// Delay since last received report.
- uint32_t delaySinceLastReceivedSR = 0;
if ((feedback_state.last_rr_ntp_secs != 0) ||
(feedback_state.last_rr_ntp_frac != 0)) {
// Get the 16 lowest bits of seconds and the 16 highest bits of fractions.
@@ -1101,19 +948,11 @@ bool RTCPSender::PrepareReport(const FeedbackState& feedback_state,
receiveTime <<= 16;
receiveTime += (feedback_state.last_rr_ntp_frac & 0xffff0000) >> 16;
- delaySinceLastReceivedSR = now-receiveTime;
+ block->WithDelayLastSr(now - receiveTime);
}
- report_block->delaySinceLastSR = delaySinceLastReceivedSR;
- report_block->lastSR = feedback_state.remote_sr;
return true;
}
-int32_t RTCPSender::SendToNetwork(const uint8_t* dataBuffer, size_t length) {
- if (transport_->SendRtcp(dataBuffer, length))
- return 0;
- return -1;
-}
-
void RTCPSender::SetCsrcs(const std::vector<uint32_t>& csrcs) {
assert(csrcs.size() <= kRtpCsrcSize);
CriticalSectionScoped lock(critical_section_rtcp_sender_.get());
@@ -1203,7 +1042,7 @@ bool RTCPSender::AllVolatileFlagsConsumed() const {
bool RTCPSender::SendFeedbackPacket(const rtcp::TransportFeedback& packet) {
class Sender : public rtcp::RtcpPacket::PacketReadyCallback {
public:
- Sender(Transport* transport)
+ explicit Sender(Transport* transport)
: transport_(transport), send_failure_(false) {}
void OnPacketReady(uint8_t* data, size_t length) override {
diff --git a/webrtc/modules/rtp_rtcp/source/rtcp_sender.h b/webrtc/modules/rtp_rtcp/source/rtcp_sender.h
index 9ec928363b..dd3aec4c9f 100644
--- a/webrtc/modules/rtp_rtcp/source/rtcp_sender.h
+++ b/webrtc/modules/rtp_rtcp/source/rtcp_sender.h
@@ -15,13 +15,15 @@
#include <set>
#include <sstream>
#include <string>
+#include <vector>
+#include "webrtc/base/random.h"
#include "webrtc/base/scoped_ptr.h"
#include "webrtc/base/thread_annotations.h"
#include "webrtc/modules/remote_bitrate_estimator/include/bwe_defines.h"
#include "webrtc/modules/remote_bitrate_estimator/include/remote_bitrate_estimator.h"
-#include "webrtc/modules/rtp_rtcp/interface/receive_statistics.h"
-#include "webrtc/modules/rtp_rtcp/interface/rtp_rtcp_defines.h"
+#include "webrtc/modules/rtp_rtcp/include/receive_statistics.h"
+#include "webrtc/modules/rtp_rtcp/include/rtp_rtcp_defines.h"
#include "webrtc/modules/rtp_rtcp/source/rtcp_packet.h"
#include "webrtc/modules/rtp_rtcp/source/rtcp_utility.h"
#include "webrtc/modules/rtp_rtcp/source/rtp_utility.h"
@@ -50,277 +52,251 @@ class NACKStringBuilder {
};
class RTCPSender {
-public:
- struct FeedbackState {
- FeedbackState();
+ public:
+ struct FeedbackState {
+ FeedbackState();
+
+ uint8_t send_payload_type;
+ uint32_t frequency_hz;
+ uint32_t packets_sent;
+ size_t media_bytes_sent;
+ uint32_t send_bitrate;
+
+ uint32_t last_rr_ntp_secs;
+ uint32_t last_rr_ntp_frac;
+ uint32_t remote_sr;
+
+ bool has_last_xr_rr;
+ RtcpReceiveTimeInfo last_xr_rr;
+
+ // Used when generating TMMBR.
+ ModuleRtpRtcpImpl* module;
+ };
+
+ RTCPSender(bool audio,
+ Clock* clock,
+ ReceiveStatistics* receive_statistics,
+ RtcpPacketTypeCounterObserver* packet_type_counter_observer,
+ Transport* outgoing_transport);
+ virtual ~RTCPSender();
+
+ RtcpMode Status() const;
+ void SetRTCPStatus(RtcpMode method);
+
+ bool Sending() const;
+ int32_t SetSendingStatus(const FeedbackState& feedback_state,
+ bool enabled); // combine the functions
+
+ int32_t SetNackStatus(bool enable);
- uint8_t send_payload_type;
- uint32_t frequency_hz;
- uint32_t packets_sent;
- size_t media_bytes_sent;
- uint32_t send_bitrate;
+ void SetStartTimestamp(uint32_t start_timestamp);
- uint32_t last_rr_ntp_secs;
- uint32_t last_rr_ntp_frac;
- uint32_t remote_sr;
+ void SetLastRtpTime(uint32_t rtp_timestamp, int64_t capture_time_ms);
- bool has_last_xr_rr;
- RtcpReceiveTimeInfo last_xr_rr;
+ void SetSSRC(uint32_t ssrc);
- // Used when generating TMMBR.
- ModuleRtpRtcpImpl* module;
- };
+ void SetRemoteSSRC(uint32_t ssrc);
- RTCPSender(bool audio,
- Clock* clock,
- ReceiveStatistics* receive_statistics,
- RtcpPacketTypeCounterObserver* packet_type_counter_observer,
- Transport* outgoing_transport);
- virtual ~RTCPSender();
+ int32_t SetCNAME(const char* cName);
- RtcpMode Status() const;
- void SetRTCPStatus(RtcpMode method);
+ int32_t AddMixedCNAME(uint32_t SSRC, const char* c_name);
- bool Sending() const;
- int32_t SetSendingStatus(const FeedbackState& feedback_state,
- bool enabled); // combine the functions
+ int32_t RemoveMixedCNAME(uint32_t SSRC);
- int32_t SetNackStatus(bool enable);
+ int64_t SendTimeOfSendReport(uint32_t sendReport);
- void SetStartTimestamp(uint32_t start_timestamp);
+ bool SendTimeOfXrRrReport(uint32_t mid_ntp, int64_t* time_ms) const;
- void SetLastRtpTime(uint32_t rtp_timestamp, int64_t capture_time_ms);
+ bool TimeToSendRTCPReport(bool sendKeyframeBeforeRTP = false) const;
- void SetSSRC(uint32_t ssrc);
+ int32_t SendRTCP(const FeedbackState& feedback_state,
+ RTCPPacketType packetType,
+ int32_t nackSize = 0,
+ const uint16_t* nackList = 0,
+ bool repeat = false,
+ uint64_t pictureID = 0);
- void SetRemoteSSRC(uint32_t ssrc);
+ int32_t SendCompoundRTCP(const FeedbackState& feedback_state,
+ const std::set<RTCPPacketType>& packetTypes,
+ int32_t nackSize = 0,
+ const uint16_t* nackList = 0,
+ bool repeat = false,
+ uint64_t pictureID = 0);
- int32_t SetCNAME(const char* cName);
+ bool REMB() const;
- int32_t AddMixedCNAME(uint32_t SSRC, const char* c_name);
+ void SetREMBStatus(bool enable);
- int32_t RemoveMixedCNAME(uint32_t SSRC);
+ void SetREMBData(uint32_t bitrate, const std::vector<uint32_t>& ssrcs);
- int64_t SendTimeOfSendReport(uint32_t sendReport);
+ bool TMMBR() const;
- bool SendTimeOfXrRrReport(uint32_t mid_ntp, int64_t* time_ms) const;
+ void SetTMMBRStatus(bool enable);
- bool TimeToSendRTCPReport(bool sendKeyframeBeforeRTP = false) const;
+ int32_t SetTMMBN(const TMMBRSet* boundingSet, uint32_t maxBitrateKbit);
- int32_t SendRTCP(const FeedbackState& feedback_state,
- RTCPPacketType packetType,
- int32_t nackSize = 0,
- const uint16_t* nackList = 0,
- bool repeat = false,
- uint64_t pictureID = 0);
-
- int32_t SendCompoundRTCP(const FeedbackState& feedback_state,
- const std::set<RTCPPacketType>& packetTypes,
- int32_t nackSize = 0,
- const uint16_t* nackList = 0,
- bool repeat = false,
- uint64_t pictureID = 0);
-
- bool REMB() const;
-
- void SetREMBStatus(bool enable);
-
- void SetREMBData(uint32_t bitrate, const std::vector<uint32_t>& ssrcs);
-
- bool TMMBR() const;
-
- void SetTMMBRStatus(bool enable);
-
- int32_t SetTMMBN(const TMMBRSet* boundingSet, uint32_t maxBitrateKbit);
-
- int32_t SetApplicationSpecificData(uint8_t subType,
- uint32_t name,
- const uint8_t* data,
- uint16_t length);
- int32_t SetRTCPVoIPMetrics(const RTCPVoIPMetric* VoIPMetric);
-
- void SendRtcpXrReceiverReferenceTime(bool enable);
-
- bool RtcpXrReceiverReferenceTime() const;
-
- void SetCsrcs(const std::vector<uint32_t>& csrcs);
-
- void SetTargetBitrate(unsigned int target_bitrate);
- bool SendFeedbackPacket(const rtcp::TransportFeedback& packet);
-
-private:
- struct RtcpContext;
-
- // The BuildResult indicates the outcome of a call to a builder method,
- // constructing a part of an RTCP packet:
- //
- // kError
- // Building RTCP packet failed, propagate error out to caller.
- // kAbort
- // The (partial) block being build should not be included. Reset current
- // buffer position to the state before the method call and proceed to the
- // next packet type.
- // kTruncated
- // There is not enough room in the buffer to fit the data being constructed.
- // (IP packet is full). Proceed to the next packet type, and call this
- // method again when a new buffer has been allocated.
- // TODO(sprang): Actually allocate multiple packets if needed.
- // kSuccess
- // Data has been successfully placed in the buffer.
-
- enum class BuildResult { kError, kAborted, kTruncated, kSuccess };
-
- int32_t SendToNetwork(const uint8_t* dataBuffer, size_t length);
-
- int32_t AddReportBlock(const RTCPReportBlock& report_block)
- EXCLUSIVE_LOCKS_REQUIRED(critical_section_rtcp_sender_);
-
- bool PrepareReport(const FeedbackState& feedback_state,
- uint32_t ssrc,
- StreamStatistician* statistician,
- RTCPReportBlock* report_block);
-
- int PrepareRTCP(const FeedbackState& feedback_state,
- const std::set<RTCPPacketType>& packetTypes,
- int32_t nackSize,
- const uint16_t* nackList,
- bool repeat,
- uint64_t pictureID,
- uint8_t* rtcp_buffer,
- int buffer_size);
-
- BuildResult BuildSR(RtcpContext* context)
- EXCLUSIVE_LOCKS_REQUIRED(critical_section_rtcp_sender_);
- BuildResult BuildRR(RtcpContext* context)
- EXCLUSIVE_LOCKS_REQUIRED(critical_section_rtcp_sender_);
- BuildResult BuildSDES(RtcpContext* context)
- EXCLUSIVE_LOCKS_REQUIRED(critical_section_rtcp_sender_);
- BuildResult BuildPLI(RtcpContext* context)
- EXCLUSIVE_LOCKS_REQUIRED(critical_section_rtcp_sender_);
- BuildResult BuildREMB(RtcpContext* context)
- EXCLUSIVE_LOCKS_REQUIRED(critical_section_rtcp_sender_);
- BuildResult BuildTMMBR(RtcpContext* context)
- EXCLUSIVE_LOCKS_REQUIRED(critical_section_rtcp_sender_);
- BuildResult BuildTMMBN(RtcpContext* context)
- EXCLUSIVE_LOCKS_REQUIRED(critical_section_rtcp_sender_);
- BuildResult BuildAPP(RtcpContext* context)
- EXCLUSIVE_LOCKS_REQUIRED(critical_section_rtcp_sender_);
- BuildResult BuildVoIPMetric(RtcpContext* context)
- EXCLUSIVE_LOCKS_REQUIRED(critical_section_rtcp_sender_);
- BuildResult BuildBYE(RtcpContext* context)
- EXCLUSIVE_LOCKS_REQUIRED(critical_section_rtcp_sender_);
- BuildResult BuildFIR(RtcpContext* context)
- EXCLUSIVE_LOCKS_REQUIRED(critical_section_rtcp_sender_);
- BuildResult BuildSLI(RtcpContext* context)
- EXCLUSIVE_LOCKS_REQUIRED(critical_section_rtcp_sender_);
- BuildResult BuildRPSI(RtcpContext* context)
- EXCLUSIVE_LOCKS_REQUIRED(critical_section_rtcp_sender_);
- BuildResult BuildNACK(RtcpContext* context)
- EXCLUSIVE_LOCKS_REQUIRED(critical_section_rtcp_sender_);
- BuildResult BuildReceiverReferenceTime(RtcpContext* context)
- EXCLUSIVE_LOCKS_REQUIRED(critical_section_rtcp_sender_);
- BuildResult BuildDlrr(RtcpContext* context)
- EXCLUSIVE_LOCKS_REQUIRED(critical_section_rtcp_sender_);
-
-private:
- const bool audio_;
- Clock* const clock_;
- RtcpMode method_ GUARDED_BY(critical_section_rtcp_sender_);
-
- Transport* const transport_;
-
- rtc::scoped_ptr<CriticalSectionWrapper> critical_section_rtcp_sender_;
- bool using_nack_ GUARDED_BY(critical_section_rtcp_sender_);
- bool sending_ GUARDED_BY(critical_section_rtcp_sender_);
- bool remb_enabled_ GUARDED_BY(critical_section_rtcp_sender_);
-
- int64_t next_time_to_send_rtcp_ GUARDED_BY(critical_section_rtcp_sender_);
-
- uint32_t start_timestamp_ GUARDED_BY(critical_section_rtcp_sender_);
- uint32_t last_rtp_timestamp_ GUARDED_BY(critical_section_rtcp_sender_);
- int64_t last_frame_capture_time_ms_ GUARDED_BY(critical_section_rtcp_sender_);
- uint32_t ssrc_ GUARDED_BY(critical_section_rtcp_sender_);
- // SSRC that we receive on our RTP channel
- uint32_t remote_ssrc_ GUARDED_BY(critical_section_rtcp_sender_);
- std::string cname_ GUARDED_BY(critical_section_rtcp_sender_);
-
- ReceiveStatistics* receive_statistics_
- GUARDED_BY(critical_section_rtcp_sender_);
- std::map<uint32_t, rtcp::ReportBlock> report_blocks_
- GUARDED_BY(critical_section_rtcp_sender_);
- std::map<uint32_t, std::string> csrc_cnames_
- GUARDED_BY(critical_section_rtcp_sender_);
-
- // Sent
- uint32_t last_send_report_[RTCP_NUMBER_OF_SR] GUARDED_BY(
- critical_section_rtcp_sender_); // allow packet loss and RTT above 1 sec
- int64_t last_rtcp_time_[RTCP_NUMBER_OF_SR] GUARDED_BY(
- critical_section_rtcp_sender_);
-
- // Sent XR receiver reference time report.
- // <mid ntp (mid 32 bits of the 64 bits NTP timestamp), send time in ms>.
- std::map<uint32_t, int64_t> last_xr_rr_
- GUARDED_BY(critical_section_rtcp_sender_);
-
- // send CSRCs
- std::vector<uint32_t> csrcs_ GUARDED_BY(critical_section_rtcp_sender_);
-
- // Full intra request
- uint8_t sequence_number_fir_ GUARDED_BY(critical_section_rtcp_sender_);
-
- // REMB
- uint32_t remb_bitrate_ GUARDED_BY(critical_section_rtcp_sender_);
- std::vector<uint32_t> remb_ssrcs_ GUARDED_BY(critical_section_rtcp_sender_);
-
- TMMBRHelp tmmbr_help_ GUARDED_BY(critical_section_rtcp_sender_);
- uint32_t tmmbr_send_ GUARDED_BY(critical_section_rtcp_sender_);
- uint32_t packet_oh_send_ GUARDED_BY(critical_section_rtcp_sender_);
-
- // APP
- uint8_t app_sub_type_ GUARDED_BY(critical_section_rtcp_sender_);
- uint32_t app_name_ GUARDED_BY(critical_section_rtcp_sender_);
- rtc::scoped_ptr<uint8_t[]> app_data_ GUARDED_BY(critical_section_rtcp_sender_);
- uint16_t app_length_ GUARDED_BY(critical_section_rtcp_sender_);
-
- // True if sending of XR Receiver reference time report is enabled.
- bool xr_send_receiver_reference_time_enabled_
- GUARDED_BY(critical_section_rtcp_sender_);
-
- // XR VoIP metric
- RTCPVoIPMetric xr_voip_metric_ GUARDED_BY(critical_section_rtcp_sender_);
-
- RtcpPacketTypeCounterObserver* const packet_type_counter_observer_;
- RtcpPacketTypeCounter packet_type_counter_
- GUARDED_BY(critical_section_rtcp_sender_);
-
- RTCPUtility::NackStats nack_stats_ GUARDED_BY(critical_section_rtcp_sender_);
-
- void SetFlag(RTCPPacketType type, bool is_volatile)
- EXCLUSIVE_LOCKS_REQUIRED(critical_section_rtcp_sender_);
- void SetFlags(const std::set<RTCPPacketType>& types, bool is_volatile)
- EXCLUSIVE_LOCKS_REQUIRED(critical_section_rtcp_sender_);
- bool IsFlagPresent(RTCPPacketType type) const
- EXCLUSIVE_LOCKS_REQUIRED(critical_section_rtcp_sender_);
- bool ConsumeFlag(RTCPPacketType type, bool forced = false)
- EXCLUSIVE_LOCKS_REQUIRED(critical_section_rtcp_sender_);
- bool AllVolatileFlagsConsumed() const
- EXCLUSIVE_LOCKS_REQUIRED(critical_section_rtcp_sender_);
- struct ReportFlag {
- ReportFlag(RTCPPacketType type, bool is_volatile)
- : type(type), is_volatile(is_volatile) {}
- bool operator<(const ReportFlag& flag) const { return type < flag.type; }
- bool operator==(const ReportFlag& flag) const { return type == flag.type; }
- const RTCPPacketType type;
- const bool is_volatile;
- };
-
- std::set<ReportFlag> report_flags_ GUARDED_BY(critical_section_rtcp_sender_);
-
- typedef BuildResult (RTCPSender::*Builder)(RtcpContext*);
- std::map<RTCPPacketType, Builder> builders_;
-
- class PacketBuiltCallback;
+ int32_t SetApplicationSpecificData(uint8_t subType,
+ uint32_t name,
+ const uint8_t* data,
+ uint16_t length);
+ int32_t SetRTCPVoIPMetrics(const RTCPVoIPMetric* VoIPMetric);
+
+ void SendRtcpXrReceiverReferenceTime(bool enable);
+
+ bool RtcpXrReceiverReferenceTime() const;
+
+ void SetCsrcs(const std::vector<uint32_t>& csrcs);
+
+ void SetTargetBitrate(unsigned int target_bitrate);
+ bool SendFeedbackPacket(const rtcp::TransportFeedback& packet);
+
+ private:
+ class RtcpContext;
+
+ // Determine which RTCP messages should be sent and setup flags.
+ void PrepareReport(const std::set<RTCPPacketType>& packetTypes,
+ const FeedbackState& feedback_state)
+ EXCLUSIVE_LOCKS_REQUIRED(critical_section_rtcp_sender_);
+
+ bool AddReportBlock(const FeedbackState& feedback_state,
+ uint32_t ssrc,
+ StreamStatistician* statistician)
+ EXCLUSIVE_LOCKS_REQUIRED(critical_section_rtcp_sender_);
+
+ rtc::scoped_ptr<rtcp::RtcpPacket> BuildSR(const RtcpContext& context)
+ EXCLUSIVE_LOCKS_REQUIRED(critical_section_rtcp_sender_);
+ rtc::scoped_ptr<rtcp::RtcpPacket> BuildRR(const RtcpContext& context)
+ EXCLUSIVE_LOCKS_REQUIRED(critical_section_rtcp_sender_);
+ rtc::scoped_ptr<rtcp::RtcpPacket> BuildSDES(const RtcpContext& context)
+ EXCLUSIVE_LOCKS_REQUIRED(critical_section_rtcp_sender_);
+ rtc::scoped_ptr<rtcp::RtcpPacket> BuildPLI(const RtcpContext& context)
+ EXCLUSIVE_LOCKS_REQUIRED(critical_section_rtcp_sender_);
+ rtc::scoped_ptr<rtcp::RtcpPacket> BuildREMB(const RtcpContext& context)
+ EXCLUSIVE_LOCKS_REQUIRED(critical_section_rtcp_sender_);
+ rtc::scoped_ptr<rtcp::RtcpPacket> BuildTMMBR(const RtcpContext& context)
+ EXCLUSIVE_LOCKS_REQUIRED(critical_section_rtcp_sender_);
+ rtc::scoped_ptr<rtcp::RtcpPacket> BuildTMMBN(const RtcpContext& context)
+ EXCLUSIVE_LOCKS_REQUIRED(critical_section_rtcp_sender_);
+ rtc::scoped_ptr<rtcp::RtcpPacket> BuildAPP(const RtcpContext& context)
+ EXCLUSIVE_LOCKS_REQUIRED(critical_section_rtcp_sender_);
+ rtc::scoped_ptr<rtcp::RtcpPacket> BuildVoIPMetric(const RtcpContext& context)
+ EXCLUSIVE_LOCKS_REQUIRED(critical_section_rtcp_sender_);
+ rtc::scoped_ptr<rtcp::RtcpPacket> BuildBYE(const RtcpContext& context)
+ EXCLUSIVE_LOCKS_REQUIRED(critical_section_rtcp_sender_);
+ rtc::scoped_ptr<rtcp::RtcpPacket> BuildFIR(const RtcpContext& context)
+ EXCLUSIVE_LOCKS_REQUIRED(critical_section_rtcp_sender_);
+ rtc::scoped_ptr<rtcp::RtcpPacket> BuildSLI(const RtcpContext& context)
+ EXCLUSIVE_LOCKS_REQUIRED(critical_section_rtcp_sender_);
+ rtc::scoped_ptr<rtcp::RtcpPacket> BuildRPSI(const RtcpContext& context)
+ EXCLUSIVE_LOCKS_REQUIRED(critical_section_rtcp_sender_);
+ rtc::scoped_ptr<rtcp::RtcpPacket> BuildNACK(const RtcpContext& context)
+ EXCLUSIVE_LOCKS_REQUIRED(critical_section_rtcp_sender_);
+ rtc::scoped_ptr<rtcp::RtcpPacket> BuildReceiverReferenceTime(
+ const RtcpContext& context)
+ EXCLUSIVE_LOCKS_REQUIRED(critical_section_rtcp_sender_);
+ rtc::scoped_ptr<rtcp::RtcpPacket> BuildDlrr(const RtcpContext& context)
+ EXCLUSIVE_LOCKS_REQUIRED(critical_section_rtcp_sender_);
+
+ private:
+ const bool audio_;
+ Clock* const clock_;
+ Random random_ GUARDED_BY(critical_section_rtcp_sender_);
+ RtcpMode method_ GUARDED_BY(critical_section_rtcp_sender_);
+
+ Transport* const transport_;
+
+ rtc::scoped_ptr<CriticalSectionWrapper> critical_section_rtcp_sender_;
+ bool using_nack_ GUARDED_BY(critical_section_rtcp_sender_);
+ bool sending_ GUARDED_BY(critical_section_rtcp_sender_);
+ bool remb_enabled_ GUARDED_BY(critical_section_rtcp_sender_);
+
+ int64_t next_time_to_send_rtcp_ GUARDED_BY(critical_section_rtcp_sender_);
+
+ uint32_t start_timestamp_ GUARDED_BY(critical_section_rtcp_sender_);
+ uint32_t last_rtp_timestamp_ GUARDED_BY(critical_section_rtcp_sender_);
+ int64_t last_frame_capture_time_ms_ GUARDED_BY(critical_section_rtcp_sender_);
+ uint32_t ssrc_ GUARDED_BY(critical_section_rtcp_sender_);
+ // SSRC that we receive on our RTP channel
+ uint32_t remote_ssrc_ GUARDED_BY(critical_section_rtcp_sender_);
+ std::string cname_ GUARDED_BY(critical_section_rtcp_sender_);
+
+ ReceiveStatistics* receive_statistics_
+ GUARDED_BY(critical_section_rtcp_sender_);
+ std::map<uint32_t, rtcp::ReportBlock> report_blocks_
+ GUARDED_BY(critical_section_rtcp_sender_);
+ std::map<uint32_t, std::string> csrc_cnames_
+ GUARDED_BY(critical_section_rtcp_sender_);
+
+ // Sent
+ uint32_t last_send_report_[RTCP_NUMBER_OF_SR] GUARDED_BY(
+ critical_section_rtcp_sender_); // allow packet loss and RTT above 1 sec
+ int64_t last_rtcp_time_[RTCP_NUMBER_OF_SR] GUARDED_BY(
+ critical_section_rtcp_sender_);
+
+ // Sent XR receiver reference time report.
+ // <mid ntp (mid 32 bits of the 64 bits NTP timestamp), send time in ms>.
+ std::map<uint32_t, int64_t> last_xr_rr_
+ GUARDED_BY(critical_section_rtcp_sender_);
+
+ // send CSRCs
+ std::vector<uint32_t> csrcs_ GUARDED_BY(critical_section_rtcp_sender_);
+
+ // Full intra request
+ uint8_t sequence_number_fir_ GUARDED_BY(critical_section_rtcp_sender_);
+
+ // REMB
+ uint32_t remb_bitrate_ GUARDED_BY(critical_section_rtcp_sender_);
+ std::vector<uint32_t> remb_ssrcs_ GUARDED_BY(critical_section_rtcp_sender_);
+
+ TMMBRHelp tmmbr_help_ GUARDED_BY(critical_section_rtcp_sender_);
+ uint32_t tmmbr_send_ GUARDED_BY(critical_section_rtcp_sender_);
+ uint32_t packet_oh_send_ GUARDED_BY(critical_section_rtcp_sender_);
+
+ // APP
+ uint8_t app_sub_type_ GUARDED_BY(critical_section_rtcp_sender_);
+ uint32_t app_name_ GUARDED_BY(critical_section_rtcp_sender_);
+ rtc::scoped_ptr<uint8_t[]> app_data_
+ GUARDED_BY(critical_section_rtcp_sender_);
+ uint16_t app_length_ GUARDED_BY(critical_section_rtcp_sender_);
+
+ // True if sending of XR Receiver reference time report is enabled.
+ bool xr_send_receiver_reference_time_enabled_
+ GUARDED_BY(critical_section_rtcp_sender_);
+
+ // XR VoIP metric
+ RTCPVoIPMetric xr_voip_metric_ GUARDED_BY(critical_section_rtcp_sender_);
+
+ RtcpPacketTypeCounterObserver* const packet_type_counter_observer_;
+ RtcpPacketTypeCounter packet_type_counter_
+ GUARDED_BY(critical_section_rtcp_sender_);
+
+ RTCPUtility::NackStats nack_stats_ GUARDED_BY(critical_section_rtcp_sender_);
+
+ void SetFlag(RTCPPacketType type, bool is_volatile)
+ EXCLUSIVE_LOCKS_REQUIRED(critical_section_rtcp_sender_);
+ void SetFlags(const std::set<RTCPPacketType>& types, bool is_volatile)
+ EXCLUSIVE_LOCKS_REQUIRED(critical_section_rtcp_sender_);
+ bool IsFlagPresent(RTCPPacketType type) const
+ EXCLUSIVE_LOCKS_REQUIRED(critical_section_rtcp_sender_);
+ bool ConsumeFlag(RTCPPacketType type, bool forced = false)
+ EXCLUSIVE_LOCKS_REQUIRED(critical_section_rtcp_sender_);
+ bool AllVolatileFlagsConsumed() const
+ EXCLUSIVE_LOCKS_REQUIRED(critical_section_rtcp_sender_);
+ struct ReportFlag {
+ ReportFlag(RTCPPacketType type, bool is_volatile)
+ : type(type), is_volatile(is_volatile) {}
+ bool operator<(const ReportFlag& flag) const { return type < flag.type; }
+ bool operator==(const ReportFlag& flag) const { return type == flag.type; }
+ const RTCPPacketType type;
+ const bool is_volatile;
+ };
+
+ std::set<ReportFlag> report_flags_ GUARDED_BY(critical_section_rtcp_sender_);
+
+ typedef rtc::scoped_ptr<rtcp::RtcpPacket> (RTCPSender::*BuilderFunc)(
+ const RtcpContext&);
+ std::map<RTCPPacketType, BuilderFunc> builders_;
};
} // namespace webrtc
-#endif // WEBRTC_MODULES_RTP_RTCP_SOURCE_RTCP_SENDER_H_
+#endif // WEBRTC_MODULES_RTP_RTCP_SOURCE_RTCP_SENDER_H_
diff --git a/webrtc/modules/rtp_rtcp/source/rtcp_utility.cc b/webrtc/modules/rtp_rtcp/source/rtcp_utility.cc
index d2b80438cc..e19499612d 100644
--- a/webrtc/modules/rtp_rtcp/source/rtcp_utility.cc
+++ b/webrtc/modules/rtp_rtcp/source/rtcp_utility.cc
@@ -8,9 +8,7 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#include "webrtc/base/checks.h"
#include "webrtc/modules/rtp_rtcp/source/rtcp_utility.h"
-#include "webrtc/modules/rtp_rtcp/source/rtcp_packet/transport_feedback.h"
#include <assert.h>
#include <math.h> // ceil
@@ -19,6 +17,7 @@
#include "webrtc/base/checks.h"
#include "webrtc/base/logging.h"
#include "webrtc/modules/rtp_rtcp/source/byte_io.h"
+#include "webrtc/modules/rtp_rtcp/source/rtcp_packet/transport_feedback.h"
namespace webrtc {
@@ -42,8 +41,8 @@ void NackStats::ReportRequest(uint16_t sequence_number) {
uint32_t MidNtp(uint32_t ntp_sec, uint32_t ntp_frac) {
return (ntp_sec << 16) + (ntp_frac >> 16);
-} // end RTCPUtility
}
+} // namespace RTCPUtility
// RTCPParserV2 : currently read only
RTCPUtility::RTCPParserV2::RTCPParserV2(const uint8_t* rtcpData,
diff --git a/webrtc/modules/rtp_rtcp/source/rtcp_utility.h b/webrtc/modules/rtp_rtcp/source/rtcp_utility.h
index f05d512919..0b03ceb56e 100644
--- a/webrtc/modules/rtp_rtcp/source/rtcp_utility.h
+++ b/webrtc/modules/rtp_rtcp/source/rtcp_utility.h
@@ -14,7 +14,7 @@
#include <stddef.h> // size_t, ptrdiff_t
#include "webrtc/base/scoped_ptr.h"
-#include "webrtc/modules/rtp_rtcp/interface/rtp_rtcp_defines.h"
+#include "webrtc/modules/rtp_rtcp/include/rtp_rtcp_defines.h"
#include "webrtc/modules/rtp_rtcp/source/rtp_rtcp_config.h"
#include "webrtc/typedefs.h"
@@ -487,6 +487,6 @@ class RTCPPacketIterator {
RtcpCommonHeader _header;
};
-} // RTCPUtility
+} // namespace RTCPUtility
} // namespace webrtc
#endif // WEBRTC_MODULES_RTP_RTCP_SOURCE_RTCP_UTILITY_H_
diff --git a/webrtc/modules/rtp_rtcp/source/rtp_fec_unittest.cc b/webrtc/modules/rtp_rtcp/source/rtp_fec_unittest.cc
index 541f522f8d..80f961bd1e 100644
--- a/webrtc/modules/rtp_rtcp/source/rtp_fec_unittest.cc
+++ b/webrtc/modules/rtp_rtcp/source/rtp_fec_unittest.cc
@@ -11,6 +11,7 @@
#include <list>
#include "testing/gtest/include/gtest/gtest.h"
+#include "webrtc/base/random.h"
#include "webrtc/modules/rtp_rtcp/source/byte_io.h"
#include "webrtc/modules/rtp_rtcp/source/forward_error_correction.h"
@@ -41,8 +42,12 @@ template <typename T> void ClearList(std::list<T*>* my_list) {
class RtpFecTest : public ::testing::Test {
protected:
RtpFecTest()
- : fec_(new ForwardErrorCorrection()), ssrc_(rand()), fec_seq_num_(0) {}
+ : random_(0xfec133700742),
+ fec_(new ForwardErrorCorrection()),
+ ssrc_(random_.Rand<uint32_t>()),
+ fec_seq_num_(0) {}
+ webrtc::Random random_;
ForwardErrorCorrection* fec_;
int ssrc_;
uint16_t fec_seq_num_;
@@ -891,22 +896,20 @@ int RtpFecTest::ConstructMediaPacketsSeqNum(int num_media_packets,
assert(num_media_packets > 0);
ForwardErrorCorrection::Packet* media_packet = NULL;
int sequence_number = start_seq_num;
- int time_stamp = rand();
+ int time_stamp = random_.Rand<int>();
for (int i = 0; i < num_media_packets; ++i) {
media_packet = new ForwardErrorCorrection::Packet;
media_packet_list_.push_back(media_packet);
- media_packet->length = static_cast<size_t>(
- (static_cast<float>(rand()) / RAND_MAX) *
- (IP_PACKET_SIZE - kRtpHeaderSize - kTransportOverhead -
- ForwardErrorCorrection::PacketOverhead()));
+ const uint32_t kMinPacketSize = kRtpHeaderSize;
+ const uint32_t kMaxPacketSize = IP_PACKET_SIZE - kRtpHeaderSize -
+ kTransportOverhead -
+ ForwardErrorCorrection::PacketOverhead();
+ media_packet->length = random_.Rand(kMinPacketSize, kMaxPacketSize);
- if (media_packet->length < kRtpHeaderSize) {
- media_packet->length = kRtpHeaderSize;
- }
// Generate random values for the first 2 bytes
- media_packet->data[0] = static_cast<uint8_t>(rand() % 256);
- media_packet->data[1] = static_cast<uint8_t>(rand() % 256);
+ media_packet->data[0] = random_.Rand<uint8_t>();
+ media_packet->data[1] = random_.Rand<uint8_t>();
// The first two bits are assumed to be 10 by the FEC encoder.
// In fact the FEC decoder will set the two first bits to 10 regardless of
@@ -929,7 +932,7 @@ int RtpFecTest::ConstructMediaPacketsSeqNum(int num_media_packets,
// Generate random values for payload.
for (size_t j = 12; j < media_packet->length; ++j) {
- media_packet->data[j] = static_cast<uint8_t>(rand() % 256);
+ media_packet->data[j] = random_.Rand<uint8_t>();
}
sequence_number++;
}
@@ -940,5 +943,5 @@ int RtpFecTest::ConstructMediaPacketsSeqNum(int num_media_packets,
}
int RtpFecTest::ConstructMediaPackets(int num_media_packets) {
- return ConstructMediaPacketsSeqNum(num_media_packets, rand());
+ return ConstructMediaPacketsSeqNum(num_media_packets, random_.Rand<int>());
}
diff --git a/webrtc/modules/rtp_rtcp/source/rtp_format.h b/webrtc/modules/rtp_rtcp/source/rtp_format.h
index 18225f9bb4..3519499248 100644
--- a/webrtc/modules/rtp_rtcp/source/rtp_format.h
+++ b/webrtc/modules/rtp_rtcp/source/rtp_format.h
@@ -14,8 +14,8 @@
#include <string>
#include "webrtc/base/constructormagic.h"
-#include "webrtc/modules/interface/module_common_types.h"
-#include "webrtc/modules/rtp_rtcp/interface/rtp_rtcp_defines.h"
+#include "webrtc/modules/include/module_common_types.h"
+#include "webrtc/modules/rtp_rtcp/include/rtp_rtcp_defines.h"
namespace webrtc {
diff --git a/webrtc/modules/rtp_rtcp/source/rtp_format_h264.cc b/webrtc/modules/rtp_rtcp/source/rtp_format_h264.cc
index aeef44364a..c422577c81 100644
--- a/webrtc/modules/rtp_rtcp/source/rtp_format_h264.cc
+++ b/webrtc/modules/rtp_rtcp/source/rtp_format_h264.cc
@@ -11,7 +11,7 @@
#include <string.h>
#include "webrtc/base/logging.h"
-#include "webrtc/modules/interface/module_common_types.h"
+#include "webrtc/modules/include/module_common_types.h"
#include "webrtc/modules/rtp_rtcp/source/byte_io.h"
#include "webrtc/modules/rtp_rtcp/source/h264_sps_parser.h"
#include "webrtc/modules/rtp_rtcp/source/rtp_format_h264.h"
diff --git a/webrtc/modules/rtp_rtcp/source/rtp_format_h264_unittest.cc b/webrtc/modules/rtp_rtcp/source/rtp_format_h264_unittest.cc
index 1a14b5554a..d29e3d4f21 100644
--- a/webrtc/modules/rtp_rtcp/source/rtp_format_h264_unittest.cc
+++ b/webrtc/modules/rtp_rtcp/source/rtp_format_h264_unittest.cc
@@ -13,7 +13,7 @@
#include "testing/gmock/include/gmock/gmock.h"
#include "testing/gtest/include/gtest/gtest.h"
#include "webrtc/base/scoped_ptr.h"
-#include "webrtc/modules/interface/module_common_types.h"
+#include "webrtc/modules/include/module_common_types.h"
#include "webrtc/modules/rtp_rtcp/mocks/mock_rtp_rtcp.h"
#include "webrtc/modules/rtp_rtcp/source/rtp_format.h"
diff --git a/webrtc/modules/rtp_rtcp/source/rtp_format_video_generic.cc b/webrtc/modules/rtp_rtcp/source/rtp_format_video_generic.cc
index 39b64c6ffa..b47e9b9359 100644
--- a/webrtc/modules/rtp_rtcp/source/rtp_format_video_generic.cc
+++ b/webrtc/modules/rtp_rtcp/source/rtp_format_video_generic.cc
@@ -11,7 +11,7 @@
#include <string>
#include "webrtc/base/logging.h"
-#include "webrtc/modules/interface/module_common_types.h"
+#include "webrtc/modules/include/module_common_types.h"
#include "webrtc/modules/rtp_rtcp/source/rtp_format_video_generic.h"
namespace webrtc {
diff --git a/webrtc/modules/rtp_rtcp/source/rtp_format_vp8.h b/webrtc/modules/rtp_rtcp/source/rtp_format_vp8.h
index 63db349c74..d62ecba85f 100644
--- a/webrtc/modules/rtp_rtcp/source/rtp_format_vp8.h
+++ b/webrtc/modules/rtp_rtcp/source/rtp_format_vp8.h
@@ -30,7 +30,7 @@
#include <vector>
#include "webrtc/base/constructormagic.h"
-#include "webrtc/modules/interface/module_common_types.h"
+#include "webrtc/modules/include/module_common_types.h"
#include "webrtc/modules/rtp_rtcp/source/rtp_format.h"
#include "webrtc/typedefs.h"
diff --git a/webrtc/modules/rtp_rtcp/source/rtp_format_vp8_test_helper.h b/webrtc/modules/rtp_rtcp/source/rtp_format_vp8_test_helper.h
index 2fe963251f..668476833d 100644
--- a/webrtc/modules/rtp_rtcp/source/rtp_format_vp8_test_helper.h
+++ b/webrtc/modules/rtp_rtcp/source/rtp_format_vp8_test_helper.h
@@ -19,7 +19,7 @@
#define WEBRTC_MODULES_RTP_RTCP_SOURCE_RTP_FORMAT_VP8_TEST_HELPER_H_
#include "webrtc/base/constructormagic.h"
-#include "webrtc/modules/interface/module_common_types.h"
+#include "webrtc/modules/include/module_common_types.h"
#include "webrtc/modules/rtp_rtcp/source/rtp_format_vp8.h"
#include "webrtc/typedefs.h"
diff --git a/webrtc/modules/rtp_rtcp/source/rtp_format_vp9.cc b/webrtc/modules/rtp_rtcp/source/rtp_format_vp9.cc
index 0e76a8eae8..d2f22d5044 100644
--- a/webrtc/modules/rtp_rtcp/source/rtp_format_vp9.cc
+++ b/webrtc/modules/rtp_rtcp/source/rtp_format_vp9.cc
@@ -47,10 +47,6 @@ int16_t Tl0PicIdxField(const RTPVideoHeaderVP9& hdr, uint8_t def) {
return (hdr.tl0_pic_idx == kNoTl0PicIdx) ? def : hdr.tl0_pic_idx;
}
-uint8_t GofIdxField(const RTPVideoHeaderVP9& hdr, uint8_t def) {
- return (hdr.gof_idx == kNoGofIdx) ? def : hdr.gof_idx;
-}
-
// Picture ID:
//
// +-+-+-+-+-+-+-+-+
@@ -74,19 +70,17 @@ bool PictureIdPresent(const RTPVideoHeaderVP9& hdr) {
// Flexible mode (F=1): Non-flexible mode (F=0):
//
// +-+-+-+-+-+-+-+-+ +-+-+-+-+-+-+-+-+
-// L: | T |U| S |D| |GOF_IDX| S |D|
+// L: | T |U| S |D| | T |U| S |D|
// +-+-+-+-+-+-+-+-+ +-+-+-+-+-+-+-+-+
// | TL0PICIDX |
// +-+-+-+-+-+-+-+-+
//
size_t LayerInfoLength(const RTPVideoHeaderVP9& hdr) {
- if (hdr.flexible_mode) {
- return (hdr.temporal_idx == kNoTemporalIdx &&
- hdr.spatial_idx == kNoSpatialIdx) ? 0 : 1;
- } else {
- return (hdr.gof_idx == kNoGofIdx &&
- hdr.spatial_idx == kNoSpatialIdx) ? 0 : 2;
+ if (hdr.temporal_idx == kNoTemporalIdx &&
+ hdr.spatial_idx == kNoSpatialIdx) {
+ return 0;
}
+ return hdr.flexible_mode ? 1 : 2;
}
bool LayerInfoPresent(const RTPVideoHeaderVP9& hdr) {
@@ -198,8 +192,8 @@ bool WritePictureId(const RTPVideoHeaderVP9& vp9,
// L: | T |U| S |D|
// +-+-+-+-+-+-+-+-+
//
-bool WriteLayerInfoFlexibleMode(const RTPVideoHeaderVP9& vp9,
- rtc::BitBufferWriter* writer) {
+bool WriteLayerInfoCommon(const RTPVideoHeaderVP9& vp9,
+ rtc::BitBufferWriter* writer) {
RETURN_FALSE_ON_ERROR(writer->WriteBits(TemporalIdxField(vp9, 0), 3));
RETURN_FALSE_ON_ERROR(writer->WriteBits(vp9.temporal_up_switch ? 1 : 0, 1));
RETURN_FALSE_ON_ERROR(writer->WriteBits(SpatialIdxField(vp9, 0), 3));
@@ -210,27 +204,26 @@ bool WriteLayerInfoFlexibleMode(const RTPVideoHeaderVP9& vp9,
// Non-flexible mode (F=0):
//
// +-+-+-+-+-+-+-+-+
-// L: |GOF_IDX| S |D|
+// L: | T |U| S |D|
// +-+-+-+-+-+-+-+-+
// | TL0PICIDX |
// +-+-+-+-+-+-+-+-+
//
bool WriteLayerInfoNonFlexibleMode(const RTPVideoHeaderVP9& vp9,
rtc::BitBufferWriter* writer) {
- RETURN_FALSE_ON_ERROR(writer->WriteBits(GofIdxField(vp9, 0), 4));
- RETURN_FALSE_ON_ERROR(writer->WriteBits(SpatialIdxField(vp9, 0), 3));
- RETURN_FALSE_ON_ERROR(writer->WriteBits(vp9.inter_layer_predicted ? 1: 0, 1));
RETURN_FALSE_ON_ERROR(writer->WriteUInt8(Tl0PicIdxField(vp9, 0)));
return true;
}
bool WriteLayerInfo(const RTPVideoHeaderVP9& vp9,
rtc::BitBufferWriter* writer) {
- if (vp9.flexible_mode) {
- return WriteLayerInfoFlexibleMode(vp9, writer);
- } else {
- return WriteLayerInfoNonFlexibleMode(vp9, writer);
- }
+ if (!WriteLayerInfoCommon(vp9, writer))
+ return false;
+
+ if (vp9.flexible_mode)
+ return true;
+
+ return WriteLayerInfoNonFlexibleMode(vp9, writer);
}
// Reference indices:
@@ -246,7 +239,7 @@ bool WriteRefIndices(const RTPVideoHeaderVP9& vp9,
vp9.num_ref_pics == 0 || vp9.num_ref_pics > kMaxVp9RefPics) {
return false;
}
- for (size_t i = 0; i < vp9.num_ref_pics; ++i) {
+ for (uint8_t i = 0; i < vp9.num_ref_pics; ++i) {
bool n_bit = !(i == vp9.num_ref_pics - 1);
RETURN_FALSE_ON_ERROR(writer->WriteBits(vp9.pid_diff[i], 7));
RETURN_FALSE_ON_ERROR(writer->WriteBits(n_bit ? 1 : 0, 1));
@@ -301,7 +294,7 @@ bool WriteSsData(const RTPVideoHeaderVP9& vp9, rtc::BitBufferWriter* writer) {
writer->WriteBits(vp9.gof.temporal_up_switch[i] ? 1 : 0, 1));
RETURN_FALSE_ON_ERROR(writer->WriteBits(vp9.gof.num_ref_pics[i], 2));
RETURN_FALSE_ON_ERROR(writer->WriteBits(kReservedBitValue0, 2));
- for (size_t r = 0; r < vp9.gof.num_ref_pics[i]; ++r) {
+ for (uint8_t r = 0; r < vp9.gof.num_ref_pics[i]; ++r) {
RETURN_FALSE_ON_ERROR(writer->WriteUInt8(vp9.gof.pid_diff[i][r]));
}
}
@@ -337,8 +330,7 @@ bool ParsePictureId(rtc::BitBuffer* parser, RTPVideoHeaderVP9* vp9) {
// L: | T |U| S |D|
// +-+-+-+-+-+-+-+-+
//
-bool ParseLayerInfoFlexibleMode(rtc::BitBuffer* parser,
- RTPVideoHeaderVP9* vp9) {
+bool ParseLayerInfoCommon(rtc::BitBuffer* parser, RTPVideoHeaderVP9* vp9) {
uint32_t t, u_bit, s, d_bit;
RETURN_FALSE_ON_ERROR(parser->ReadBits(&t, 3));
RETURN_FALSE_ON_ERROR(parser->ReadBits(&u_bit, 1));
@@ -354,32 +346,27 @@ bool ParseLayerInfoFlexibleMode(rtc::BitBuffer* parser,
// Layer indices (non-flexible mode):
//
// +-+-+-+-+-+-+-+-+
-// L: |GOF_IDX| S |D|
+// L: | T |U| S |D|
// +-+-+-+-+-+-+-+-+
// | TL0PICIDX |
// +-+-+-+-+-+-+-+-+
//
bool ParseLayerInfoNonFlexibleMode(rtc::BitBuffer* parser,
RTPVideoHeaderVP9* vp9) {
- uint32_t gof_idx, s, d_bit;
uint8_t tl0picidx;
- RETURN_FALSE_ON_ERROR(parser->ReadBits(&gof_idx, 4));
- RETURN_FALSE_ON_ERROR(parser->ReadBits(&s, 3));
- RETURN_FALSE_ON_ERROR(parser->ReadBits(&d_bit, 1));
RETURN_FALSE_ON_ERROR(parser->ReadUInt8(&tl0picidx));
- vp9->gof_idx = gof_idx;
- vp9->spatial_idx = s;
- vp9->inter_layer_predicted = d_bit ? true : false;
vp9->tl0_pic_idx = tl0picidx;
return true;
}
bool ParseLayerInfo(rtc::BitBuffer* parser, RTPVideoHeaderVP9* vp9) {
- if (vp9->flexible_mode) {
- return ParseLayerInfoFlexibleMode(parser, vp9);
- } else {
- return ParseLayerInfoNonFlexibleMode(parser, vp9);
- }
+ if (!ParseLayerInfoCommon(parser, vp9))
+ return false;
+
+ if (vp9->flexible_mode)
+ return true;
+
+ return ParseLayerInfoNonFlexibleMode(parser, vp9);
}
// Reference indices:
@@ -466,7 +453,7 @@ bool ParseSsData(rtc::BitBuffer* parser, RTPVideoHeaderVP9* vp9) {
vp9->gof.temporal_up_switch[i] = u_bit ? true : false;
vp9->gof.num_ref_pics[i] = r;
- for (size_t p = 0; p < vp9->gof.num_ref_pics[i]; ++p) {
+ for (uint8_t p = 0; p < vp9->gof.num_ref_pics[i]; ++p) {
uint8_t p_diff;
RETURN_FALSE_ON_ERROR(parser->ReadUInt8(&p_diff));
vp9->gof.pid_diff[i][p] = p_diff;
@@ -604,7 +591,7 @@ bool RtpPacketizerVp9::NextPacket(uint8_t* buffer,
// +-+-+-+-+-+-+-+-+
// M: | EXTENDED PID | (RECOMMENDED)
// +-+-+-+-+-+-+-+-+
-// L: |GOF_IDX| S |D| (CONDITIONALLY RECOMMENDED)
+// L: | T |U| S |D| (CONDITIONALLY RECOMMENDED)
// +-+-+-+-+-+-+-+-+
// | TL0PICIDX | (CONDITIONALLY REQUIRED)
// +-+-+-+-+-+-+-+-+
@@ -738,7 +725,8 @@ bool RtpDepacketizerVp9::Parse(ParsedPayload* parsed_payload,
parsed_payload->type.Video.height = vp9->height[0];
}
}
- parsed_payload->type.Video.isFirstPacket = b_bit && (vp9->spatial_idx == 0);
+ parsed_payload->type.Video.isFirstPacket =
+ b_bit && (!l_bit || !vp9->inter_layer_predicted);
uint64_t rem_bits = parser.RemainingBitCount();
assert(rem_bits % 8 == 0);
diff --git a/webrtc/modules/rtp_rtcp/source/rtp_format_vp9.h b/webrtc/modules/rtp_rtcp/source/rtp_format_vp9.h
index abce7e7791..3feca4392a 100644
--- a/webrtc/modules/rtp_rtcp/source/rtp_format_vp9.h
+++ b/webrtc/modules/rtp_rtcp/source/rtp_format_vp9.h
@@ -25,7 +25,7 @@
#include <string>
#include "webrtc/base/constructormagic.h"
-#include "webrtc/modules/interface/module_common_types.h"
+#include "webrtc/modules/include/module_common_types.h"
#include "webrtc/modules/rtp_rtcp/source/rtp_format.h"
#include "webrtc/typedefs.h"
diff --git a/webrtc/modules/rtp_rtcp/source/rtp_format_vp9_unittest.cc b/webrtc/modules/rtp_rtcp/source/rtp_format_vp9_unittest.cc
index 66ab5cdb71..5bbafe459d 100644
--- a/webrtc/modules/rtp_rtcp/source/rtp_format_vp9_unittest.cc
+++ b/webrtc/modules/rtp_rtcp/source/rtp_format_vp9_unittest.cc
@@ -55,7 +55,7 @@ void VerifyHeader(const RTPVideoHeaderVP9& expected,
actual.gof.temporal_up_switch[i]);
EXPECT_EQ(expected.gof.temporal_idx[i], actual.gof.temporal_idx[i]);
EXPECT_EQ(expected.gof.num_ref_pics[i], actual.gof.num_ref_pics[i]);
- for (size_t j = 0; j < expected.gof.num_ref_pics[i]; j++) {
+ for (uint8_t j = 0; j < expected.gof.num_ref_pics[i]; j++) {
EXPECT_EQ(expected.gof.pid_diff[i][j], actual.gof.pid_diff[i][j]);
}
}
@@ -112,7 +112,7 @@ void ParseAndCheckPacket(const uint8_t* packet,
// +-+-+-+-+-+-+-+-+
// M: | EXTENDED PID | (RECOMMENDED)
// +-+-+-+-+-+-+-+-+
-// L: |GOF_IDX| S |D| (CONDITIONALLY RECOMMENDED)
+// L: | T |U| S |D| (CONDITIONALLY RECOMMENDED)
// +-+-+-+-+-+-+-+-+
// | TL0PICIDX | (CONDITIONALLY REQUIRED)
// +-+-+-+-+-+-+-+-+
@@ -255,7 +255,8 @@ TEST_F(RtpPacketizerVp9Test, TestLayerInfoWithNonFlexibleMode) {
const size_t kFrameSize = 30;
const size_t kPacketSize = 25;
- expected_.gof_idx = 3;
+ expected_.temporal_idx = 3;
+ expected_.temporal_up_switch = true; // U
expected_.num_spatial_layers = 3;
expected_.spatial_idx = 2;
expected_.inter_layer_predicted = true; // D
@@ -264,9 +265,9 @@ TEST_F(RtpPacketizerVp9Test, TestLayerInfoWithNonFlexibleMode) {
// Two packets:
// | I:0, P:0, L:1, F:0, B:1, E:0, V:0 | (3hdr + 15 payload)
- // L: | GOF_IDX:3, S:2, D:1 | TL0PICIDX:117 |
+ // L: | T:3, U:1, S:2, D:1 | TL0PICIDX:117 |
// | I:0, P:0, L:1, F:0, B:0, E:1, V:0 | (3hdr + 15 payload)
- // L: | GOF_IDX:3, S:2, D:1 | TL0PICIDX:117 |
+ // L: | T:3, U:1, S:2, D:1 | TL0PICIDX:117 |
const size_t kExpectedHdrSizes[] = {3, 3};
const size_t kExpectedSizes[] = {18, 18};
const size_t kExpectedNum = GTEST_ARRAY_SIZE_(kExpectedSizes);
@@ -505,16 +506,20 @@ TEST_F(RtpDepacketizerVp9Test, ParseTwoBytePictureId) {
TEST_F(RtpDepacketizerVp9Test, ParseLayerInfoWithNonFlexibleMode) {
const uint8_t kHeaderLength = 3;
- const uint8_t kGofIdx = 7;
+ const uint8_t kTemporalIdx = 2;
+ const uint8_t kUbit = 1;
const uint8_t kSpatialIdx = 1;
const uint8_t kDbit = 1;
const uint8_t kTl0PicIdx = 17;
uint8_t packet[13] = {0};
packet[0] = 0x20; // I:0 P:0 L:1 F:0 B:0 E:0 V:0 R:0
- packet[1] = (kGofIdx << 4) | (kSpatialIdx << 1) | kDbit; // GOF_IDX:7 S:1 D:1
- packet[2] = kTl0PicIdx; // TL0PICIDX:17
+ packet[1] = (kTemporalIdx << 5) | (kUbit << 4) | (kSpatialIdx << 1) | kDbit;
+ packet[2] = kTl0PicIdx;
- expected_.gof_idx = kGofIdx;
+ // T:2 U:1 S:1 D:1
+ // TL0PICIDX:17
+ expected_.temporal_idx = kTemporalIdx;
+ expected_.temporal_up_switch = kUbit ? true : false;
expected_.spatial_idx = kSpatialIdx;
expected_.inter_layer_predicted = kDbit ? true : false;
expected_.tl0_pic_idx = kTl0PicIdx;
@@ -545,9 +550,9 @@ TEST_F(RtpDepacketizerVp9Test, ParseLayerInfoWithFlexibleMode) {
TEST_F(RtpDepacketizerVp9Test, ParseRefIdx) {
const uint8_t kHeaderLength = 6;
const int16_t kPictureId = 17;
- const int16_t kPdiff1 = 17;
- const int16_t kPdiff2 = 18;
- const int16_t kPdiff3 = 127;
+ const uint8_t kPdiff1 = 17;
+ const uint8_t kPdiff2 = 18;
+ const uint8_t kPdiff3 = 127;
uint8_t packet[13] = {0};
packet[0] = 0xD8; // I:1 P:1 L:0 F:1 B:1 E:0 V:0 R:0
packet[1] = 0x80 | ((kPictureId >> 8) & 0x7F); // Two byte pictureID.
@@ -577,7 +582,7 @@ TEST_F(RtpDepacketizerVp9Test, ParseRefIdx) {
}
TEST_F(RtpDepacketizerVp9Test, ParseRefIdxFailsWithNoPictureId) {
- const int16_t kPdiff = 3;
+ const uint8_t kPdiff = 3;
uint8_t packet[13] = {0};
packet[0] = 0x58; // I:0 P:1 L:0 F:1 B:1 E:0 V:0 R:0
packet[1] = (kPdiff << 1); // P,F: P_DIFF:3 N:0
@@ -587,7 +592,7 @@ TEST_F(RtpDepacketizerVp9Test, ParseRefIdxFailsWithNoPictureId) {
}
TEST_F(RtpDepacketizerVp9Test, ParseRefIdxFailsWithTooManyRefPics) {
- const int16_t kPdiff = 3;
+ const uint8_t kPdiff = 3;
uint8_t packet[13] = {0};
packet[0] = 0xD8; // I:1 P:1 L:0 F:1 B:1 E:0 V:0 R:0
packet[1] = kMaxOneBytePictureId; // I: PICTURE ID:127
diff --git a/webrtc/modules/rtp_rtcp/source/rtp_header_extension.h b/webrtc/modules/rtp_rtcp/source/rtp_header_extension.h
index 7be3c2e5c4..342e38a1f2 100644
--- a/webrtc/modules/rtp_rtcp/source/rtp_header_extension.h
+++ b/webrtc/modules/rtp_rtcp/source/rtp_header_extension.h
@@ -8,12 +8,12 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#ifndef WEBRTC_MODULES_RTP_RTCP_RTP_HEADER_EXTENSION_H_
-#define WEBRTC_MODULES_RTP_RTCP_RTP_HEADER_EXTENSION_H_
+#ifndef WEBRTC_MODULES_RTP_RTCP_SOURCE_RTP_HEADER_EXTENSION_H_
+#define WEBRTC_MODULES_RTP_RTCP_SOURCE_RTP_HEADER_EXTENSION_H_
#include <map>
-#include "webrtc/modules/rtp_rtcp/interface/rtp_rtcp_defines.h"
+#include "webrtc/modules/rtp_rtcp/include/rtp_rtcp_defines.h"
#include "webrtc/typedefs.h"
namespace webrtc {
@@ -28,7 +28,7 @@ const size_t kVideoRotationLength = 2;
const size_t kTransportSequenceNumberLength = 3;
struct HeaderExtension {
- HeaderExtension(RTPExtensionType extension_type)
+ explicit HeaderExtension(RTPExtensionType extension_type)
: type(extension_type), length(0), active(true) {
Init();
}
@@ -112,6 +112,7 @@ class RtpHeaderExtensionMap {
int32_t Register(const RTPExtensionType type, const uint8_t id, bool active);
std::map<uint8_t, HeaderExtension*> extensionMap_;
};
-}
+} // namespace webrtc
+
+#endif // WEBRTC_MODULES_RTP_RTCP_SOURCE_RTP_HEADER_EXTENSION_H_
-#endif // WEBRTC_MODULES_RTP_RTCP_RTP_HEADER_EXTENSION_H_
diff --git a/webrtc/modules/rtp_rtcp/source/rtp_header_extension_unittest.cc b/webrtc/modules/rtp_rtcp/source/rtp_header_extension_unittest.cc
index 520cf7a962..ca37750621 100644
--- a/webrtc/modules/rtp_rtcp/source/rtp_header_extension_unittest.cc
+++ b/webrtc/modules/rtp_rtcp/source/rtp_header_extension_unittest.cc
@@ -15,7 +15,7 @@
#include "testing/gtest/include/gtest/gtest.h"
-#include "webrtc/modules/rtp_rtcp/interface/rtp_rtcp_defines.h"
+#include "webrtc/modules/rtp_rtcp/include/rtp_rtcp_defines.h"
#include "webrtc/modules/rtp_rtcp/source/rtp_header_extension.h"
#include "webrtc/typedefs.h"
diff --git a/webrtc/modules/rtp_rtcp/source/rtp_header_parser.cc b/webrtc/modules/rtp_rtcp/source/rtp_header_parser.cc
index 266bad8858..d4cbe544cc 100644
--- a/webrtc/modules/rtp_rtcp/source/rtp_header_parser.cc
+++ b/webrtc/modules/rtp_rtcp/source/rtp_header_parser.cc
@@ -7,7 +7,7 @@
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
-#include "webrtc/modules/rtp_rtcp/interface/rtp_header_parser.h"
+#include "webrtc/modules/rtp_rtcp/include/rtp_header_parser.h"
#include "webrtc/base/scoped_ptr.h"
#include "webrtc/modules/rtp_rtcp/source/rtp_header_extension.h"
@@ -58,7 +58,7 @@ bool RtpHeaderParserImpl::Parse(const uint8_t* packet,
rtp_header_extension_map_.GetCopy(&map);
}
- const bool valid_rtpheader = rtp_parser.Parse(*header, &map);
+ const bool valid_rtpheader = rtp_parser.Parse(header, &map);
if (!valid_rtpheader) {
return false;
}
diff --git a/webrtc/modules/rtp_rtcp/source/rtp_packet_history.cc b/webrtc/modules/rtp_rtcp/source/rtp_packet_history.cc
index aa941d63ff..49f9d8530a 100644
--- a/webrtc/modules/rtp_rtcp/source/rtp_packet_history.cc
+++ b/webrtc/modules/rtp_rtcp/source/rtp_packet_history.cc
@@ -13,6 +13,8 @@
#include <assert.h>
#include <stdlib.h>
#include <string.h> // memset
+
+#include <algorithm>
#include <limits>
#include <set>
diff --git a/webrtc/modules/rtp_rtcp/source/rtp_packet_history.h b/webrtc/modules/rtp_rtcp/source/rtp_packet_history.h
index e97d11eeaa..8e1a732b19 100644
--- a/webrtc/modules/rtp_rtcp/source/rtp_packet_history.h
+++ b/webrtc/modules/rtp_rtcp/source/rtp_packet_history.h
@@ -10,14 +10,14 @@
* Class for storing RTP packets.
*/
-#ifndef WEBRTC_MODULES_RTP_RTCP_RTP_PACKET_HISTORY_H_
-#define WEBRTC_MODULES_RTP_RTCP_RTP_PACKET_HISTORY_H_
+#ifndef WEBRTC_MODULES_RTP_RTCP_SOURCE_RTP_PACKET_HISTORY_H_
+#define WEBRTC_MODULES_RTP_RTCP_SOURCE_RTP_PACKET_HISTORY_H_
#include <vector>
#include "webrtc/base/thread_annotations.h"
-#include "webrtc/modules/interface/module_common_types.h"
-#include "webrtc/modules/rtp_rtcp/interface/rtp_rtcp_defines.h"
+#include "webrtc/modules/include/module_common_types.h"
+#include "webrtc/modules/rtp_rtcp/include/rtp_rtcp_defines.h"
#include "webrtc/typedefs.h"
namespace webrtc {
@@ -29,7 +29,7 @@ static const size_t kMaxHistoryCapacity = 9600;
class RTPPacketHistory {
public:
- RTPPacketHistory(Clock* clock);
+ explicit RTPPacketHistory(Clock* clock);
~RTPPacketHistory();
void SetStorePacketsStatus(bool enable, uint16_t number_to_store);
@@ -101,4 +101,4 @@ class RTPPacketHistory {
std::vector<StoredPacket> stored_packets_ GUARDED_BY(critsect_);
};
} // namespace webrtc
-#endif // WEBRTC_MODULES_RTP_RTCP_RTP_PACKET_HISTORY_H_
+#endif // WEBRTC_MODULES_RTP_RTCP_SOURCE_RTP_PACKET_HISTORY_H_
diff --git a/webrtc/modules/rtp_rtcp/source/rtp_packet_history_unittest.cc b/webrtc/modules/rtp_rtcp/source/rtp_packet_history_unittest.cc
index 00a6ac7ed2..a406d8bc9b 100644
--- a/webrtc/modules/rtp_rtcp/source/rtp_packet_history_unittest.cc
+++ b/webrtc/modules/rtp_rtcp/source/rtp_packet_history_unittest.cc
@@ -12,10 +12,9 @@
#include "testing/gtest/include/gtest/gtest.h"
-#include "webrtc/modules/rtp_rtcp/interface/rtp_rtcp_defines.h"
+#include "webrtc/modules/rtp_rtcp/include/rtp_rtcp_defines.h"
#include "webrtc/modules/rtp_rtcp/source/rtp_packet_history.h"
#include "webrtc/system_wrappers/include/clock.h"
-#include "webrtc/video_engine/vie_defines.h"
#include "webrtc/typedefs.h"
namespace webrtc {
@@ -29,7 +28,7 @@ class RtpPacketHistoryTest : public ::testing::Test {
~RtpPacketHistoryTest() {
delete hist_;
}
-
+
SimulatedClock fake_clock_;
RTPPacketHistory* hist_;
enum {kPayload = 127};
@@ -54,7 +53,7 @@ class RtpPacketHistoryTest : public ::testing::Test {
array[(*cur_pos)++] = ssrc >> 16;
array[(*cur_pos)++] = ssrc >> 8;
array[(*cur_pos)++] = ssrc;
- }
+ }
};
TEST_F(RtpPacketHistoryTest, SetStoreStatus) {
@@ -268,6 +267,7 @@ TEST_F(RtpPacketHistoryTest, DynamicExpansion) {
}
TEST_F(RtpPacketHistoryTest, FullExpansion) {
+ static const int kSendSidePacketHistorySize = 600;
hist_->SetStorePacketsStatus(true, kSendSidePacketHistorySize);
size_t len;
int64_t capture_time_ms = fake_clock_.TimeInMilliseconds();
diff --git a/webrtc/modules/rtp_rtcp/source/rtp_payload_registry.cc b/webrtc/modules/rtp_rtcp/source/rtp_payload_registry.cc
index 38d1450c23..ce0bcd7fed 100644
--- a/webrtc/modules/rtp_rtcp/source/rtp_payload_registry.cc
+++ b/webrtc/modules/rtp_rtcp/source/rtp_payload_registry.cc
@@ -8,7 +8,7 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#include "webrtc/modules/rtp_rtcp/interface/rtp_payload_registry.h"
+#include "webrtc/modules/rtp_rtcp/include/rtp_payload_registry.h"
#include "webrtc/base/logging.h"
#include "webrtc/modules/rtp_rtcp/source/byte_io.h"
@@ -40,7 +40,7 @@ int32_t RTPPayloadRegistry::RegisterReceivePayload(
const char payload_name[RTP_PAYLOAD_NAME_SIZE],
const int8_t payload_type,
const uint32_t frequency,
- const uint8_t channels,
+ const size_t channels,
const uint32_t rate,
bool* created_new_payload) {
assert(payload_type >= 0);
@@ -139,7 +139,7 @@ void RTPPayloadRegistry::DeregisterAudioCodecOrRedTypeRegardlessOfPayloadType(
const char payload_name[RTP_PAYLOAD_NAME_SIZE],
const size_t payload_name_length,
const uint32_t frequency,
- const uint8_t channels,
+ const size_t channels,
const uint32_t rate) {
RtpUtility::PayloadTypeMap::iterator iterator = payload_type_map_.begin();
for (; iterator != payload_type_map_.end(); ++iterator) {
@@ -171,7 +171,7 @@ void RTPPayloadRegistry::DeregisterAudioCodecOrRedTypeRegardlessOfPayloadType(
int32_t RTPPayloadRegistry::ReceivePayloadType(
const char payload_name[RTP_PAYLOAD_NAME_SIZE],
const uint32_t frequency,
- const uint8_t channels,
+ const size_t channels,
const uint32_t rate,
int8_t* payload_type) const {
assert(payload_type);
@@ -343,17 +343,16 @@ bool RTPPayloadRegistry::GetPayloadSpecifics(uint8_t payload_type,
int RTPPayloadRegistry::GetPayloadTypeFrequency(
uint8_t payload_type) const {
- RtpUtility::Payload* payload;
- if (!PayloadTypeToPayload(payload_type, payload)) {
+ const RtpUtility::Payload* payload = PayloadTypeToPayload(payload_type);
+ if (!payload) {
return -1;
}
CriticalSectionScoped cs(crit_sect_.get());
return rtp_payload_strategy_->GetPayloadTypeFrequency(*payload);
}
-bool RTPPayloadRegistry::PayloadTypeToPayload(
- const uint8_t payload_type,
- RtpUtility::Payload*& payload) const {
+const RtpUtility::Payload* RTPPayloadRegistry::PayloadTypeToPayload(
+ uint8_t payload_type) const {
CriticalSectionScoped cs(crit_sect_.get());
RtpUtility::PayloadTypeMap::const_iterator it =
@@ -361,11 +360,10 @@ bool RTPPayloadRegistry::PayloadTypeToPayload(
// Check that this is a registered payload type.
if (it == payload_type_map_.end()) {
- return false;
+ return nullptr;
}
- payload = it->second;
- return true;
+ return it->second;
}
void RTPPayloadRegistry::SetIncomingPayloadType(const RTPHeader& header) {
@@ -390,7 +388,7 @@ class RTPPayloadAudioStrategy : public RTPPayloadStrategy {
bool PayloadIsCompatible(const RtpUtility::Payload& payload,
const uint32_t frequency,
- const uint8_t channels,
+ const size_t channels,
const uint32_t rate) const override {
return
payload.audio &&
@@ -409,7 +407,7 @@ class RTPPayloadAudioStrategy : public RTPPayloadStrategy {
const char payloadName[RTP_PAYLOAD_NAME_SIZE],
const int8_t payloadType,
const uint32_t frequency,
- const uint8_t channels,
+ const size_t channels,
const uint32_t rate) const override {
RtpUtility::Payload* payload = new RtpUtility::Payload;
payload->name[RTP_PAYLOAD_NAME_SIZE - 1] = 0;
@@ -433,7 +431,7 @@ class RTPPayloadVideoStrategy : public RTPPayloadStrategy {
bool PayloadIsCompatible(const RtpUtility::Payload& payload,
const uint32_t frequency,
- const uint8_t channels,
+ const size_t channels,
const uint32_t rate) const override {
return !payload.audio;
}
@@ -447,7 +445,7 @@ class RTPPayloadVideoStrategy : public RTPPayloadStrategy {
const char payloadName[RTP_PAYLOAD_NAME_SIZE],
const int8_t payloadType,
const uint32_t frequency,
- const uint8_t channels,
+ const size_t channels,
const uint32_t rate) const override {
RtpVideoCodecTypes videoType = kRtpVideoGeneric;
diff --git a/webrtc/modules/rtp_rtcp/source/rtp_payload_registry_unittest.cc b/webrtc/modules/rtp_rtcp/source/rtp_payload_registry_unittest.cc
index 0b9bf2751e..b73666d1af 100644
--- a/webrtc/modules/rtp_rtcp/source/rtp_payload_registry_unittest.cc
+++ b/webrtc/modules/rtp_rtcp/source/rtp_payload_registry_unittest.cc
@@ -8,12 +8,12 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#include "webrtc/modules/rtp_rtcp/interface/rtp_payload_registry.h"
+#include "webrtc/modules/rtp_rtcp/include/rtp_payload_registry.h"
#include "testing/gmock/include/gmock/gmock.h"
#include "testing/gtest/include/gtest/gtest.h"
#include "webrtc/base/scoped_ptr.h"
-#include "webrtc/modules/rtp_rtcp/interface/rtp_header_parser.h"
+#include "webrtc/modules/rtp_rtcp/include/rtp_header_parser.h"
#include "webrtc/modules/rtp_rtcp/source/byte_io.h"
#include "webrtc/modules/rtp_rtcp/source/mock/mock_rtp_payload_strategy.h"
#include "webrtc/modules/rtp_rtcp/source/rtp_utility.h"
@@ -25,7 +25,7 @@ using ::testing::Return;
using ::testing::_;
static const char* kTypicalPayloadName = "name";
-static const uint8_t kTypicalChannels = 1;
+static const size_t kTypicalChannels = 1;
static const int kTypicalFrequency = 44000;
static const int kTypicalRate = 32 * 1024;
@@ -52,10 +52,9 @@ class RtpPayloadRegistryTest : public ::testing::Test {
RtpUtility::Payload* returned_payload_on_heap =
new RtpUtility::Payload(returned_payload);
EXPECT_CALL(*mock_payload_strategy_,
- CreatePayloadType(kTypicalPayloadName, payload_type,
- kTypicalFrequency,
- kTypicalChannels,
- rate)).WillOnce(Return(returned_payload_on_heap));
+ CreatePayloadType(kTypicalPayloadName, payload_type,
+ kTypicalFrequency, kTypicalChannels, rate))
+ .WillOnce(Return(returned_payload_on_heap));
return returned_payload_on_heap;
}
@@ -70,14 +69,14 @@ TEST_F(RtpPayloadRegistryTest, RegistersAndRemembersPayloadsUntilDeregistered) {
bool new_payload_created = false;
EXPECT_EQ(0, rtp_payload_registry_->RegisterReceivePayload(
- kTypicalPayloadName, payload_type, kTypicalFrequency, kTypicalChannels,
- kTypicalRate, &new_payload_created));
+ kTypicalPayloadName, payload_type, kTypicalFrequency,
+ kTypicalChannels, kTypicalRate, &new_payload_created));
EXPECT_TRUE(new_payload_created) << "A new payload WAS created.";
- RtpUtility::Payload* retrieved_payload = NULL;
- EXPECT_TRUE(rtp_payload_registry_->PayloadTypeToPayload(payload_type,
- retrieved_payload));
+ const RtpUtility::Payload* retrieved_payload =
+ rtp_payload_registry_->PayloadTypeToPayload(payload_type);
+ EXPECT_TRUE(retrieved_payload);
// We should get back the exact pointer to the payload returned by the
// payload strategy.
@@ -85,32 +84,30 @@ TEST_F(RtpPayloadRegistryTest, RegistersAndRemembersPayloadsUntilDeregistered) {
// Now forget about it and verify it's gone.
EXPECT_EQ(0, rtp_payload_registry_->DeRegisterReceivePayload(payload_type));
- EXPECT_FALSE(rtp_payload_registry_->PayloadTypeToPayload(
- payload_type, retrieved_payload));
+ EXPECT_FALSE(rtp_payload_registry_->PayloadTypeToPayload(payload_type));
}
TEST_F(RtpPayloadRegistryTest, AudioRedWorkProperly) {
const uint8_t kRedPayloadType = 127;
const int kRedSampleRate = 8000;
- const int kRedChannels = 1;
+ const size_t kRedChannels = 1;
const int kRedBitRate = 0;
// This creates an audio RTP payload strategy.
- rtp_payload_registry_.reset(new RTPPayloadRegistry(
- RTPPayloadStrategy::CreateStrategy(true)));
+ rtp_payload_registry_.reset(
+ new RTPPayloadRegistry(RTPPayloadStrategy::CreateStrategy(true)));
bool new_payload_created = false;
EXPECT_EQ(0, rtp_payload_registry_->RegisterReceivePayload(
- "red", kRedPayloadType, kRedSampleRate, kRedChannels, kRedBitRate,
- &new_payload_created));
+ "red", kRedPayloadType, kRedSampleRate, kRedChannels,
+ kRedBitRate, &new_payload_created));
EXPECT_TRUE(new_payload_created);
EXPECT_EQ(kRedPayloadType, rtp_payload_registry_->red_payload_type());
- RtpUtility::Payload* retrieved_payload = NULL;
- EXPECT_TRUE(rtp_payload_registry_->PayloadTypeToPayload(kRedPayloadType,
- retrieved_payload));
- ASSERT_TRUE(retrieved_payload);
+ const RtpUtility::Payload* retrieved_payload =
+ rtp_payload_registry_->PayloadTypeToPayload(kRedPayloadType);
+ EXPECT_TRUE(retrieved_payload);
EXPECT_TRUE(retrieved_payload->audio);
EXPECT_STRCASEEQ("red", retrieved_payload->name);
@@ -127,27 +124,29 @@ TEST_F(RtpPayloadRegistryTest,
RtpUtility::Payload* first_payload_on_heap =
ExpectReturnOfTypicalAudioPayload(payload_type, kTypicalRate);
EXPECT_EQ(0, rtp_payload_registry_->RegisterReceivePayload(
- kTypicalPayloadName, payload_type, kTypicalFrequency, kTypicalChannels,
- kTypicalRate, &ignored));
+ kTypicalPayloadName, payload_type, kTypicalFrequency,
+ kTypicalChannels, kTypicalRate, &ignored));
EXPECT_EQ(-1, rtp_payload_registry_->RegisterReceivePayload(
- kTypicalPayloadName, payload_type, kTypicalFrequency, kTypicalChannels,
- kTypicalRate, &ignored)) << "Adding same codec twice = bad.";
+ kTypicalPayloadName, payload_type, kTypicalFrequency,
+ kTypicalChannels, kTypicalRate, &ignored))
+ << "Adding same codec twice = bad.";
RtpUtility::Payload* second_payload_on_heap =
ExpectReturnOfTypicalAudioPayload(payload_type - 1, kTypicalRate);
EXPECT_EQ(0, rtp_payload_registry_->RegisterReceivePayload(
- kTypicalPayloadName, payload_type - 1, kTypicalFrequency,
- kTypicalChannels, kTypicalRate, &ignored)) <<
- "With a different payload type is fine though.";
+ kTypicalPayloadName, payload_type - 1, kTypicalFrequency,
+ kTypicalChannels, kTypicalRate, &ignored))
+ << "With a different payload type is fine though.";
// Ensure both payloads are preserved.
- RtpUtility::Payload* retrieved_payload = NULL;
- EXPECT_TRUE(rtp_payload_registry_->PayloadTypeToPayload(payload_type,
- retrieved_payload));
+ const RtpUtility::Payload* retrieved_payload =
+ rtp_payload_registry_->PayloadTypeToPayload(payload_type);
+ EXPECT_TRUE(retrieved_payload);
EXPECT_EQ(first_payload_on_heap, retrieved_payload);
- EXPECT_TRUE(rtp_payload_registry_->PayloadTypeToPayload(payload_type - 1,
- retrieved_payload));
+ retrieved_payload =
+ rtp_payload_registry_->PayloadTypeToPayload(payload_type - 1);
+ EXPECT_TRUE(retrieved_payload);
EXPECT_EQ(second_payload_on_heap, retrieved_payload);
// Ok, update the rate for one of the codecs. If either the incoming rate or
@@ -158,8 +157,8 @@ TEST_F(RtpPayloadRegistryTest,
EXPECT_CALL(*mock_payload_strategy_,
UpdatePayloadRate(first_payload_on_heap, kTypicalRate));
EXPECT_EQ(0, rtp_payload_registry_->RegisterReceivePayload(
- kTypicalPayloadName, payload_type, kTypicalFrequency, kTypicalChannels,
- kTypicalRate, &ignored));
+ kTypicalPayloadName, payload_type, kTypicalFrequency,
+ kTypicalChannels, kTypicalRate, &ignored));
}
TEST_F(RtpPayloadRegistryTest,
@@ -174,35 +173,31 @@ TEST_F(RtpPayloadRegistryTest,
bool ignored = false;
ExpectReturnOfTypicalAudioPayload(payload_type, kTypicalRate);
EXPECT_EQ(0, rtp_payload_registry_->RegisterReceivePayload(
- kTypicalPayloadName, payload_type, kTypicalFrequency, kTypicalChannels,
- kTypicalRate, &ignored));
+ kTypicalPayloadName, payload_type, kTypicalFrequency,
+ kTypicalChannels, kTypicalRate, &ignored));
ExpectReturnOfTypicalAudioPayload(payload_type - 1, kTypicalRate);
EXPECT_EQ(0, rtp_payload_registry_->RegisterReceivePayload(
- kTypicalPayloadName, payload_type - 1, kTypicalFrequency,
- kTypicalChannels, kTypicalRate, &ignored));
+ kTypicalPayloadName, payload_type - 1, kTypicalFrequency,
+ kTypicalChannels, kTypicalRate, &ignored));
- RtpUtility::Payload* retrieved_payload = NULL;
- EXPECT_FALSE(rtp_payload_registry_->PayloadTypeToPayload(
- payload_type, retrieved_payload)) << "The first payload should be "
- "deregistered because the only thing that differs is payload type.";
- EXPECT_TRUE(rtp_payload_registry_->PayloadTypeToPayload(
- payload_type - 1, retrieved_payload)) <<
- "The second payload should still be registered though.";
+ EXPECT_FALSE(rtp_payload_registry_->PayloadTypeToPayload(payload_type))
+ << "The first payload should be "
+ "deregistered because the only thing that differs is payload type.";
+ EXPECT_TRUE(rtp_payload_registry_->PayloadTypeToPayload(payload_type - 1))
+ << "The second payload should still be registered though.";
// Now ensure non-compatible codecs aren't removed.
ON_CALL(*mock_payload_strategy_, PayloadIsCompatible(_, _, _, _))
.WillByDefault(Return(false));
ExpectReturnOfTypicalAudioPayload(payload_type + 1, kTypicalRate);
EXPECT_EQ(0, rtp_payload_registry_->RegisterReceivePayload(
- kTypicalPayloadName, payload_type + 1, kTypicalFrequency,
- kTypicalChannels, kTypicalRate, &ignored));
-
- EXPECT_TRUE(rtp_payload_registry_->PayloadTypeToPayload(
- payload_type - 1, retrieved_payload)) <<
- "Not compatible; both payloads should be kept.";
- EXPECT_TRUE(rtp_payload_registry_->PayloadTypeToPayload(
- payload_type + 1, retrieved_payload)) <<
- "Not compatible; both payloads should be kept.";
+ kTypicalPayloadName, payload_type + 1, kTypicalFrequency,
+ kTypicalChannels, kTypicalRate, &ignored));
+
+ EXPECT_TRUE(rtp_payload_registry_->PayloadTypeToPayload(payload_type - 1))
+ << "Not compatible; both payloads should be kept.";
+ EXPECT_TRUE(rtp_payload_registry_->PayloadTypeToPayload(payload_type + 1))
+ << "Not compatible; both payloads should be kept.";
}
TEST_F(RtpPayloadRegistryTest,
@@ -218,18 +213,17 @@ TEST_F(RtpPayloadRegistryTest,
bool ignored;
ExpectReturnOfTypicalAudioPayload(34, kTypicalRate);
EXPECT_EQ(0, rtp_payload_registry_->RegisterReceivePayload(
- kTypicalPayloadName, 34, kTypicalFrequency, kTypicalChannels,
- kTypicalRate, &ignored));
+ kTypicalPayloadName, 34, kTypicalFrequency, kTypicalChannels,
+ kTypicalRate, &ignored));
EXPECT_EQ(-1, rtp_payload_registry_->last_received_payload_type());
media_type_unchanged = rtp_payload_registry_->ReportMediaPayloadType(18);
EXPECT_FALSE(media_type_unchanged);
}
-class ParameterizedRtpPayloadRegistryTest :
- public RtpPayloadRegistryTest,
- public ::testing::WithParamInterface<int> {
-};
+class ParameterizedRtpPayloadRegistryTest
+ : public RtpPayloadRegistryTest,
+ public ::testing::WithParamInterface<int> {};
TEST_P(ParameterizedRtpPayloadRegistryTest,
FailsToRegisterKnownPayloadsWeAreNotInterestedIn) {
@@ -237,26 +231,26 @@ TEST_P(ParameterizedRtpPayloadRegistryTest,
bool ignored;
EXPECT_EQ(-1, rtp_payload_registry_->RegisterReceivePayload(
- "whatever", static_cast<uint8_t>(payload_type), 19, 1, 17, &ignored));
+ "whatever", static_cast<uint8_t>(payload_type), 19, 1, 17,
+ &ignored));
}
INSTANTIATE_TEST_CASE_P(TestKnownBadPayloadTypes,
ParameterizedRtpPayloadRegistryTest,
testing::Values(64, 72, 73, 74, 75, 76, 77, 78, 79));
-class RtpPayloadRegistryGenericTest :
- public RtpPayloadRegistryTest,
- public ::testing::WithParamInterface<int> {
-};
+class RtpPayloadRegistryGenericTest
+ : public RtpPayloadRegistryTest,
+ public ::testing::WithParamInterface<int> {};
TEST_P(RtpPayloadRegistryGenericTest, RegisterGenericReceivePayloadType) {
int payload_type = GetParam();
bool ignored;
- EXPECT_EQ(0, rtp_payload_registry_->RegisterReceivePayload("generic-codec",
- static_cast<int8_t>(payload_type),
- 19, 1, 17, &ignored)); // dummy values, except for payload_type
+ EXPECT_EQ(0, rtp_payload_registry_->RegisterReceivePayload(
+ "generic-codec", static_cast<int8_t>(payload_type), 19, 1,
+ 17, &ignored)); // dummy values, except for payload_type
}
// Generates an RTX packet for the given length and original sequence number.
@@ -395,7 +389,8 @@ TEST_F(RtpPayloadRegistryTest, InvalidRtxConfiguration) {
TestRtxPacket(rtp_payload_registry_.get(), 106, 0, false);
}
-INSTANTIATE_TEST_CASE_P(TestDynamicRange, RtpPayloadRegistryGenericTest,
- testing::Range(96, 127+1));
+INSTANTIATE_TEST_CASE_P(TestDynamicRange,
+ RtpPayloadRegistryGenericTest,
+ testing::Range(96, 127 + 1));
} // namespace webrtc
diff --git a/webrtc/modules/rtp_rtcp/source/rtp_receiver_audio.h b/webrtc/modules/rtp_rtcp/source/rtp_receiver_audio.h
index 176852e01e..1dd07d1cc9 100644
--- a/webrtc/modules/rtp_rtcp/source/rtp_receiver_audio.h
+++ b/webrtc/modules/rtp_rtcp/source/rtp_receiver_audio.h
@@ -14,8 +14,8 @@
#include <set>
#include "webrtc/base/scoped_ptr.h"
-#include "webrtc/modules/rtp_rtcp/interface/rtp_receiver.h"
-#include "webrtc/modules/rtp_rtcp/interface/rtp_rtcp_defines.h"
+#include "webrtc/modules/rtp_rtcp/include/rtp_receiver.h"
+#include "webrtc/modules/rtp_rtcp/include/rtp_rtcp_defines.h"
#include "webrtc/modules/rtp_rtcp/source/rtp_receiver_strategy.h"
#include "webrtc/modules/rtp_rtcp/source/rtp_utility.h"
#include "webrtc/typedefs.h"
@@ -42,9 +42,7 @@ class RTPReceiverAudio : public RTPReceiverStrategy,
// Is TelephoneEvent configured with payload type payload_type
bool TelephoneEventPayloadType(const int8_t payload_type) const;
- TelephoneEventHandler* GetTelephoneEventHandler() {
- return this;
- }
+ TelephoneEventHandler* GetTelephoneEventHandler() { return this; }
// Returns true if CNG is configured with payload type payload_type. If so,
// the frequency and cng_payload_type_has_changed are filled in.
@@ -96,13 +94,11 @@ class RTPReceiverAudio : public RTPReceiverStrategy,
int Energy(uint8_t array_of_energy[kRtpCsrcSize]) const override;
private:
-
- int32_t ParseAudioCodecSpecific(
- WebRtcRTPHeader* rtp_header,
- const uint8_t* payload_data,
- size_t payload_length,
- const AudioPayload& audio_specific,
- bool is_red);
+ int32_t ParseAudioCodecSpecific(WebRtcRTPHeader* rtp_header,
+ const uint8_t* payload_data,
+ size_t payload_length,
+ const AudioPayload& audio_specific,
+ bool is_red);
uint32_t last_received_frequency_;
diff --git a/webrtc/modules/rtp_rtcp/source/rtp_receiver_impl.cc b/webrtc/modules/rtp_rtcp/source/rtp_receiver_impl.cc
index e1ebd0c8bb..2e21f230d3 100644
--- a/webrtc/modules/rtp_rtcp/source/rtp_receiver_impl.cc
+++ b/webrtc/modules/rtp_rtcp/source/rtp_receiver_impl.cc
@@ -16,13 +16,12 @@
#include <string.h>
#include "webrtc/base/logging.h"
-#include "webrtc/modules/rtp_rtcp/interface/rtp_payload_registry.h"
-#include "webrtc/modules/rtp_rtcp/interface/rtp_rtcp_defines.h"
+#include "webrtc/modules/rtp_rtcp/include/rtp_payload_registry.h"
+#include "webrtc/modules/rtp_rtcp/include/rtp_rtcp_defines.h"
#include "webrtc/modules/rtp_rtcp/source/rtp_receiver_strategy.h"
namespace webrtc {
-using RtpUtility::GetCurrentRTP;
using RtpUtility::Payload;
using RtpUtility::StringCompare;
@@ -97,7 +96,7 @@ int32_t RtpReceiverImpl::RegisterReceivePayload(
const char payload_name[RTP_PAYLOAD_NAME_SIZE],
const int8_t payload_type,
const uint32_t frequency,
- const uint8_t channels,
+ const size_t channels,
const uint32_t rate) {
CriticalSectionScoped lock(critical_section_rtp_receiver_.get());
@@ -170,7 +169,7 @@ bool RtpReceiverImpl::IncomingRtpPacket(
int8_t first_payload_byte = payload_length > 0 ? payload[0] : 0;
bool is_red = false;
- if (CheckPayloadChanged(rtp_header, first_payload_byte, is_red,
+ if (CheckPayloadChanged(rtp_header, first_payload_byte, &is_red,
&payload_specific) == -1) {
if (payload_length == 0) {
// OK, keep-alive packet.
@@ -253,7 +252,7 @@ void RtpReceiverImpl::CheckSSRCChanged(const RTPHeader& rtp_header) {
bool new_ssrc = false;
bool re_initialize_decoder = false;
char payload_name[RTP_PAYLOAD_NAME_SIZE];
- uint8_t channels = 1;
+ size_t channels = 1;
uint32_t rate = 0;
{
@@ -276,12 +275,11 @@ void RtpReceiverImpl::CheckSSRCChanged(const RTPHeader& rtp_header) {
if (rtp_header.payloadType == last_received_payload_type) {
re_initialize_decoder = true;
- Payload* payload;
- if (!rtp_payload_registry_->PayloadTypeToPayload(
- rtp_header.payloadType, payload)) {
+ const Payload* payload = rtp_payload_registry_->PayloadTypeToPayload(
+ rtp_header.payloadType);
+ if (!payload) {
return;
}
- assert(payload);
payload_name[RTP_PAYLOAD_NAME_SIZE - 1] = 0;
strncpy(payload_name, payload->name, RTP_PAYLOAD_NAME_SIZE - 1);
if (payload->audio) {
@@ -321,7 +319,7 @@ void RtpReceiverImpl::CheckSSRCChanged(const RTPHeader& rtp_header) {
// last known payload).
int32_t RtpReceiverImpl::CheckPayloadChanged(const RTPHeader& rtp_header,
const int8_t first_payload_byte,
- bool& is_red,
+ bool* is_red,
PayloadUnion* specific_payload) {
bool re_initialize_decoder = false;
@@ -339,7 +337,7 @@ int32_t RtpReceiverImpl::CheckPayloadChanged(const RTPHeader& rtp_header,
if (rtp_payload_registry_->red_payload_type() == payload_type) {
// Get the real codec payload type.
payload_type = first_payload_byte & 0x7f;
- is_red = true;
+ *is_red = true;
if (rtp_payload_registry_->red_payload_type() == payload_type) {
// Invalid payload type, traced by caller. If we proceeded here,
@@ -361,16 +359,16 @@ int32_t RtpReceiverImpl::CheckPayloadChanged(const RTPHeader& rtp_header,
&should_discard_changes);
if (should_discard_changes) {
- is_red = false;
+ *is_red = false;
return 0;
}
- Payload* payload;
- if (!rtp_payload_registry_->PayloadTypeToPayload(payload_type, payload)) {
+ const Payload* payload =
+ rtp_payload_registry_->PayloadTypeToPayload(payload_type);
+ if (!payload) {
// Not a registered payload type.
return -1;
}
- assert(payload);
payload_name[RTP_PAYLOAD_NAME_SIZE - 1] = 0;
strncpy(payload_name, payload->name, RTP_PAYLOAD_NAME_SIZE - 1);
@@ -391,7 +389,7 @@ int32_t RtpReceiverImpl::CheckPayloadChanged(const RTPHeader& rtp_header,
}
} else {
rtp_media_receiver_->GetLastMediaSpecificPayload(specific_payload);
- is_red = false;
+ *is_red = false;
}
} // End critsect.
diff --git a/webrtc/modules/rtp_rtcp/source/rtp_receiver_impl.h b/webrtc/modules/rtp_rtcp/source/rtp_receiver_impl.h
index 46741d59b4..5cf94c2859 100644
--- a/webrtc/modules/rtp_rtcp/source/rtp_receiver_impl.h
+++ b/webrtc/modules/rtp_rtcp/source/rtp_receiver_impl.h
@@ -12,8 +12,8 @@
#define WEBRTC_MODULES_RTP_RTCP_SOURCE_RTP_RECEIVER_IMPL_H_
#include "webrtc/base/scoped_ptr.h"
-#include "webrtc/modules/rtp_rtcp/interface/rtp_receiver.h"
-#include "webrtc/modules/rtp_rtcp/interface/rtp_rtcp_defines.h"
+#include "webrtc/modules/rtp_rtcp/include/rtp_receiver.h"
+#include "webrtc/modules/rtp_rtcp/include/rtp_rtcp_defines.h"
#include "webrtc/modules/rtp_rtcp/source/rtp_receiver_strategy.h"
#include "webrtc/system_wrappers/include/critical_section_wrapper.h"
#include "webrtc/typedefs.h"
@@ -36,7 +36,7 @@ class RtpReceiverImpl : public RtpReceiver {
int32_t RegisterReceivePayload(const char payload_name[RTP_PAYLOAD_NAME_SIZE],
const int8_t payload_type,
const uint32_t frequency,
- const uint8_t channels,
+ const size_t channels,
const uint32_t rate) override;
int32_t DeRegisterReceivePayload(const int8_t payload_type) override;
@@ -71,7 +71,7 @@ class RtpReceiverImpl : public RtpReceiver {
void CheckCSRC(const WebRtcRTPHeader& rtp_header);
int32_t CheckPayloadChanged(const RTPHeader& rtp_header,
const int8_t first_payload_byte,
- bool& is_red,
+ bool* is_red,
PayloadUnion* payload);
Clock* clock_;
diff --git a/webrtc/modules/rtp_rtcp/source/rtp_receiver_strategy.h b/webrtc/modules/rtp_rtcp/source/rtp_receiver_strategy.h
index 37c3e6e49a..0f7ad30e87 100644
--- a/webrtc/modules/rtp_rtcp/source/rtp_receiver_strategy.h
+++ b/webrtc/modules/rtp_rtcp/source/rtp_receiver_strategy.h
@@ -12,8 +12,8 @@
#define WEBRTC_MODULES_RTP_RTCP_SOURCE_RTP_RECEIVER_STRATEGY_H_
#include "webrtc/base/scoped_ptr.h"
-#include "webrtc/modules/rtp_rtcp/interface/rtp_rtcp.h"
-#include "webrtc/modules/rtp_rtcp/interface/rtp_rtcp_defines.h"
+#include "webrtc/modules/rtp_rtcp/include/rtp_rtcp.h"
+#include "webrtc/modules/rtp_rtcp/include/rtp_rtcp_defines.h"
#include "webrtc/modules/rtp_rtcp/source/rtp_utility.h"
#include "webrtc/system_wrappers/include/critical_section_wrapper.h"
#include "webrtc/typedefs.h"
@@ -95,7 +95,7 @@ class RTPReceiverStrategy {
// Note: Implementations may call the callback for other reasons than calls
// to ParseRtpPacket, for instance if the implementation somehow recovers a
// packet.
- RTPReceiverStrategy(RtpData* data_callback);
+ explicit RTPReceiverStrategy(RtpData* data_callback);
rtc::scoped_ptr<CriticalSectionWrapper> crit_sect_;
PayloadUnion last_payload_;
diff --git a/webrtc/modules/rtp_rtcp/source/rtp_receiver_video.cc b/webrtc/modules/rtp_rtcp/source/rtp_receiver_video.cc
index 1af2d4857e..53051dd321 100644
--- a/webrtc/modules/rtp_rtcp/source/rtp_receiver_video.cc
+++ b/webrtc/modules/rtp_rtcp/source/rtp_receiver_video.cc
@@ -16,8 +16,8 @@
#include "webrtc/base/checks.h"
#include "webrtc/base/logging.h"
#include "webrtc/base/trace_event.h"
-#include "webrtc/modules/rtp_rtcp/interface/rtp_cvo.h"
-#include "webrtc/modules/rtp_rtcp/interface/rtp_payload_registry.h"
+#include "webrtc/modules/rtp_rtcp/include/rtp_cvo.h"
+#include "webrtc/modules/rtp_rtcp/include/rtp_payload_registry.h"
#include "webrtc/modules/rtp_rtcp/source/rtp_format.h"
#include "webrtc/modules/rtp_rtcp/source/rtp_format_video_generic.h"
#include "webrtc/modules/rtp_rtcp/source/rtp_utility.h"
diff --git a/webrtc/modules/rtp_rtcp/source/rtp_receiver_video.h b/webrtc/modules/rtp_rtcp/source/rtp_receiver_video.h
index 23128df6e1..56f761a2e1 100644
--- a/webrtc/modules/rtp_rtcp/source/rtp_receiver_video.h
+++ b/webrtc/modules/rtp_rtcp/source/rtp_receiver_video.h
@@ -12,7 +12,7 @@
#define WEBRTC_MODULES_RTP_RTCP_SOURCE_RTP_RECEIVER_VIDEO_H_
#include "webrtc/base/scoped_ptr.h"
-#include "webrtc/modules/rtp_rtcp/interface/rtp_rtcp_defines.h"
+#include "webrtc/modules/rtp_rtcp/include/rtp_rtcp_defines.h"
#include "webrtc/modules/rtp_rtcp/source/bitrate.h"
#include "webrtc/modules/rtp_rtcp/source/rtp_receiver_strategy.h"
#include "webrtc/modules/rtp_rtcp/source/rtp_utility.h"
diff --git a/webrtc/modules/rtp_rtcp/source/rtp_rtcp_config.h b/webrtc/modules/rtp_rtcp/source/rtp_rtcp_config.h
index 7cfebd91a8..a2cd52736f 100644
--- a/webrtc/modules/rtp_rtcp/source/rtp_rtcp_config.h
+++ b/webrtc/modules/rtp_rtcp/source/rtp_rtcp_config.h
@@ -13,36 +13,37 @@
// Configuration file for RTP utilities (RTPSender, RTPReceiver ...)
namespace webrtc {
-enum { NACK_BYTECOUNT_SIZE = 60}; // size of our NACK history
+enum { NACK_BYTECOUNT_SIZE = 60 }; // size of our NACK history
// A sanity for the NACK list parsing at the send-side.
enum { kSendSideNackListSizeSanity = 20000 };
enum { kDefaultMaxReorderingThreshold = 50 }; // In sequence numbers.
enum { kRtcpMaxNackFields = 253 };
-enum { RTCP_INTERVAL_VIDEO_MS = 1000 };
-enum { RTCP_INTERVAL_AUDIO_MS = 5000 };
-enum { RTCP_SEND_BEFORE_KEY_FRAME_MS= 100 };
-enum { RTCP_MAX_REPORT_BLOCKS = 31}; // RFC 3550 page 37
-enum { RTCP_MIN_FRAME_LENGTH_MS = 17};
-enum { kRtcpAppCode_DATA_SIZE = 32*4}; // multiple of 4, this is not a limitation of the size
-enum { RTCP_RPSI_DATA_SIZE = 30};
-enum { RTCP_NUMBER_OF_SR = 60 };
-
-enum { MAX_NUMBER_OF_TEMPORAL_ID = 8 }; // RFC
-enum { MAX_NUMBER_OF_DEPENDENCY_QUALITY_ID = 128 };// RFC
+enum { RTCP_INTERVAL_VIDEO_MS = 1000 };
+enum { RTCP_INTERVAL_AUDIO_MS = 5000 };
+enum { RTCP_SEND_BEFORE_KEY_FRAME_MS = 100 };
+enum { RTCP_MAX_REPORT_BLOCKS = 31 }; // RFC 3550 page 37
+enum { RTCP_MIN_FRAME_LENGTH_MS = 17 };
+enum {
+ kRtcpAppCode_DATA_SIZE = 32 * 4
+}; // multiple of 4, this is not a limitation of the size
+enum { RTCP_RPSI_DATA_SIZE = 30 };
+enum { RTCP_NUMBER_OF_SR = 60 };
+
+enum { MAX_NUMBER_OF_TEMPORAL_ID = 8 }; // RFC
+enum { MAX_NUMBER_OF_DEPENDENCY_QUALITY_ID = 128 }; // RFC
enum { MAX_NUMBER_OF_REMB_FEEDBACK_SSRCS = 255 };
-enum { BW_HISTORY_SIZE = 35};
+enum { BW_HISTORY_SIZE = 35 };
-#define MIN_AUDIO_BW_MANAGEMENT_BITRATE 6
-#define MIN_VIDEO_BW_MANAGEMENT_BITRATE 30
+#define MIN_AUDIO_BW_MANAGEMENT_BITRATE 6
+#define MIN_VIDEO_BW_MANAGEMENT_BITRATE 30
-enum { DTMF_OUTBAND_MAX = 20};
+enum { DTMF_OUTBAND_MAX = 20 };
enum { RTP_MAX_BURST_SLEEP_TIME = 500 };
enum { RTP_AUDIO_LEVEL_UNIQUE_ID = 0xbede };
-enum { RTP_MAX_PACKETS_PER_FRAME= 512 }; // must be multiple of 32
+enum { RTP_MAX_PACKETS_PER_FRAME = 512 }; // must be multiple of 32
} // namespace webrtc
-
-#endif // WEBRTC_MODULES_RTP_RTCP_SOURCE_RTP_RTCP_CONFIG_H_
+#endif // WEBRTC_MODULES_RTP_RTCP_SOURCE_RTP_RTCP_CONFIG_H_
diff --git a/webrtc/modules/rtp_rtcp/source/rtp_rtcp_impl.cc b/webrtc/modules/rtp_rtcp/source/rtp_rtcp_impl.cc
index 451f8bfa93..450eed698e 100644
--- a/webrtc/modules/rtp_rtcp/source/rtp_rtcp_impl.cc
+++ b/webrtc/modules/rtp_rtcp/source/rtp_rtcp_impl.cc
@@ -183,8 +183,13 @@ int32_t ModuleRtpRtcpImpl::Process() {
set_rtt_ms(rtt_stats_->LastProcessedRtt());
}
- if (rtcp_sender_.TimeToSendRTCPReport())
- rtcp_sender_.SendRTCP(GetFeedbackState(), kRtcpReport);
+ // For sending streams, make sure to not send a SR before media has been sent.
+ if (rtcp_sender_.TimeToSendRTCPReport()) {
+ RTCPSender::FeedbackState state = GetFeedbackState();
+ // Prevent sending streams to send SR before any media has been sent.
+ if (!rtcp_sender_.Sending() || state.packets_sent > 0)
+ rtcp_sender_.SendRTCP(state, kRtcpReport);
+ }
if (UpdateRTCPReceiveInformationTimers()) {
// A receiver has timed out
@@ -402,6 +407,7 @@ int32_t ModuleRtpRtcpImpl::SendOutgoingData(
const RTPFragmentationHeader* fragmentation,
const RTPVideoHeader* rtp_video_hdr) {
rtcp_sender_.SetLastRtpTime(time_stamp, capture_time_ms);
+ // Make sure an RTCP report isn't queued behind a key frame.
if (rtcp_sender_.TimeToSendRTCPReport(kVideoFrameKey == frame_type)) {
rtcp_sender_.SendRTCP(GetFeedbackState(), kRtcpReport);
}
@@ -794,9 +800,8 @@ int32_t ModuleRtpRtcpImpl::SetSendREDPayloadType(
}
// Get payload type for Redundant Audio Data RFC 2198.
-int32_t ModuleRtpRtcpImpl::SendREDPayloadType(
- int8_t& payload_type) const {
- return rtp_sender_.RED(&payload_type);
+int32_t ModuleRtpRtcpImpl::SendREDPayloadType(int8_t* payload_type) const {
+ return rtp_sender_.RED(payload_type);
}
void ModuleRtpRtcpImpl::SetTargetSendBitrate(uint32_t bitrate_bps) {
@@ -832,11 +837,10 @@ void ModuleRtpRtcpImpl::SetGenericFECStatus(
rtp_sender_.SetGenericFECStatus(enable, payload_type_red, payload_type_fec);
}
-void ModuleRtpRtcpImpl::GenericFECStatus(bool& enable,
- uint8_t& payload_type_red,
- uint8_t& payload_type_fec) {
- rtp_sender_.GenericFECStatus(&enable, &payload_type_red,
- &payload_type_fec);
+void ModuleRtpRtcpImpl::GenericFECStatus(bool* enable,
+ uint8_t* payload_type_red,
+ uint8_t* payload_type_fec) {
+ rtp_sender_.GenericFECStatus(enable, payload_type_red, payload_type_fec);
}
int32_t ModuleRtpRtcpImpl::SetFecParameters(
@@ -952,8 +956,8 @@ bool ModuleRtpRtcpImpl::UpdateRTCPReceiveInformationTimers() {
}
// Called from RTCPsender.
-int32_t ModuleRtpRtcpImpl::BoundingSet(bool& tmmbr_owner,
- TMMBRSet*& bounding_set) {
+int32_t ModuleRtpRtcpImpl::BoundingSet(bool* tmmbr_owner,
+ TMMBRSet* bounding_set) {
return rtcp_receiver_.BoundingSet(tmmbr_owner, bounding_set);
}
diff --git a/webrtc/modules/rtp_rtcp/source/rtp_rtcp_impl.h b/webrtc/modules/rtp_rtcp/source/rtp_rtcp_impl.h
index c9b6686c0a..04e09c1217 100644
--- a/webrtc/modules/rtp_rtcp/source/rtp_rtcp_impl.h
+++ b/webrtc/modules/rtp_rtcp/source/rtp_rtcp_impl.h
@@ -12,10 +12,12 @@
#define WEBRTC_MODULES_RTP_RTCP_SOURCE_RTP_RTCP_IMPL_H_
#include <list>
+#include <set>
+#include <utility>
#include <vector>
#include "webrtc/base/scoped_ptr.h"
-#include "webrtc/modules/rtp_rtcp/interface/rtp_rtcp.h"
+#include "webrtc/modules/rtp_rtcp/include/rtp_rtcp.h"
#include "webrtc/modules/rtp_rtcp/source/packet_loss_stats.h"
#include "webrtc/modules/rtp_rtcp/source/rtcp_receiver.h"
#include "webrtc/modules/rtp_rtcp/source/rtcp_sender.h"
@@ -258,7 +260,7 @@ class ModuleRtpRtcpImpl : public RtpRtcp {
int32_t SetSendREDPayloadType(int8_t payload_type) override;
// Get payload type for Redundant Audio Data RFC 2198.
- int32_t SendREDPayloadType(int8_t& payload_type) const override;
+ int32_t SendREDPayloadType(int8_t* payload_type) const override;
// Store the audio level in d_bov for header-extension-for-audio-level-
// indication.
@@ -280,9 +282,9 @@ class ModuleRtpRtcpImpl : public RtpRtcp {
uint8_t payload_type_red,
uint8_t payload_type_fec) override;
- void GenericFECStatus(bool& enable,
- uint8_t& payload_type_red,
- uint8_t& payload_type_fec) override;
+ void GenericFECStatus(bool* enable,
+ uint8_t* payload_type_red,
+ uint8_t* payload_type_fec) override;
int32_t SetFecParameters(const FecProtectionParams* delta_params,
const FecProtectionParams* key_params) override;
@@ -293,7 +295,7 @@ class ModuleRtpRtcpImpl : public RtpRtcp {
bool LastReceivedXrReferenceTimeInfo(RtcpReceiveTimeInfo* info) const;
- virtual int32_t BoundingSet(bool& tmmbr_owner, TMMBRSet*& bounding_set_rec);
+ int32_t BoundingSet(bool* tmmbr_owner, TMMBRSet* bounding_set_rec);
void BitrateSent(uint32_t* total_rate,
uint32_t* video_rate,
diff --git a/webrtc/modules/rtp_rtcp/source/rtp_rtcp_impl_unittest.cc b/webrtc/modules/rtp_rtcp/source/rtp_rtcp_impl_unittest.cc
index 4c94764ee6..8329f603f9 100644
--- a/webrtc/modules/rtp_rtcp/source/rtp_rtcp_impl_unittest.cc
+++ b/webrtc/modules/rtp_rtcp/source/rtp_rtcp_impl_unittest.cc
@@ -8,13 +8,17 @@
* be found in the AUTHORS file in the root of the source tree.
*/
+#include <map>
+#include <set>
+
#include "testing/gmock/include/gmock/gmock.h"
#include "testing/gtest/include/gtest/gtest.h"
#include "webrtc/common_types.h"
-#include "webrtc/modules/rtp_rtcp/interface/rtp_header_parser.h"
-#include "webrtc/modules/rtp_rtcp/interface/rtp_rtcp_defines.h"
+#include "webrtc/modules/rtp_rtcp/include/rtp_header_parser.h"
+#include "webrtc/modules/rtp_rtcp/include/rtp_rtcp_defines.h"
#include "webrtc/modules/rtp_rtcp/source/rtcp_packet.h"
+#include "webrtc/modules/rtp_rtcp/source/rtcp_packet/nack.h"
#include "webrtc/modules/rtp_rtcp/source/rtp_rtcp_impl.h"
#include "webrtc/system_wrappers/include/scoped_vector.h"
#include "webrtc/test/rtcp_packet_parser.h"
@@ -94,7 +98,7 @@ class SendTransport : public Transport,
class RtpRtcpModule : public RtcpPacketTypeCounterObserver {
public:
- RtpRtcpModule(SimulatedClock* clock)
+ explicit RtpRtcpModule(SimulatedClock* clock)
: receive_statistics_(ReceiveStatistics::Create(clock)) {
RtpRtcp::Configuration config;
config.audio = false;
@@ -346,6 +350,27 @@ TEST_F(RtpRtcpImplTest, RttForReceiverOnly) {
EXPECT_EQ(2 * kOneWayNetworkDelayMs, receiver_.impl_->rtt_ms());
}
+TEST_F(RtpRtcpImplTest, NoSrBeforeMedia) {
+ // Ignore fake transport delays in this test.
+ sender_.transport_.SimulateNetworkDelay(0, &clock_);
+ receiver_.transport_.SimulateNetworkDelay(0, &clock_);
+
+ sender_.impl_->Process();
+ EXPECT_EQ(-1, sender_.RtcpSent().first_packet_time_ms);
+
+ // Verify no SR is sent before media has been sent, RR should still be sent
+ // from the receiving module though.
+ clock_.AdvanceTimeMilliseconds(2000);
+ int64_t current_time = clock_.TimeInMilliseconds();
+ sender_.impl_->Process();
+ receiver_.impl_->Process();
+ EXPECT_EQ(-1, sender_.RtcpSent().first_packet_time_ms);
+ EXPECT_EQ(receiver_.RtcpSent().first_packet_time_ms, current_time);
+
+ SendFrame(&sender_, kBaseLayerTid);
+ EXPECT_EQ(sender_.RtcpSent().first_packet_time_ms, current_time);
+}
+
TEST_F(RtpRtcpImplTest, RtcpPacketTypeCounter_Nack) {
EXPECT_EQ(-1, receiver_.RtcpSent().first_packet_time_ms);
EXPECT_EQ(-1, sender_.RtcpReceived().first_packet_time_ms);
@@ -522,5 +547,4 @@ TEST_F(RtpRtcpImplTest, UniqueNackRequests) {
EXPECT_EQ(6U, sender_.RtcpReceived().unique_nack_requests);
EXPECT_EQ(75, sender_.RtcpReceived().UniqueNackRequestsInPercent());
}
-
} // namespace webrtc
diff --git a/webrtc/modules/rtp_rtcp/source/rtp_sender.cc b/webrtc/modules/rtp_rtcp/source/rtp_sender.cc
index 50f476829d..f4933afdd9 100644
--- a/webrtc/modules/rtp_rtcp/source/rtp_sender.cc
+++ b/webrtc/modules/rtp_rtcp/source/rtp_sender.cc
@@ -11,15 +11,17 @@
#include "webrtc/modules/rtp_rtcp/source/rtp_sender.h"
#include <stdlib.h> // srand
+#include <algorithm>
#include <utility>
#include "webrtc/base/checks.h"
#include "webrtc/base/logging.h"
#include "webrtc/base/trace_event.h"
-#include "webrtc/modules/rtp_rtcp/interface/rtp_cvo.h"
+#include "webrtc/modules/rtp_rtcp/include/rtp_cvo.h"
#include "webrtc/modules/rtp_rtcp/source/byte_io.h"
#include "webrtc/modules/rtp_rtcp/source/rtp_sender_audio.h"
#include "webrtc/modules/rtp_rtcp/source/rtp_sender_video.h"
+#include "webrtc/modules/rtp_rtcp/source/time_util.h"
#include "webrtc/system_wrappers/include/critical_section_wrapper.h"
#include "webrtc/system_wrappers/include/tick_util.h"
@@ -33,6 +35,7 @@ static const uint32_t kAbsSendTimeFraction = 18;
namespace {
const size_t kRtpHeaderLength = 12;
+const uint16_t kMaxInitRtpSeqNumber = 32767; // 2^15 -1.
const char* FrameTypeToString(FrameType frame_type) {
switch (frame_type) {
@@ -125,6 +128,7 @@ RTPSender::RTPSender(
// TickTime.
clock_delta_ms_(clock_->TimeInMilliseconds() -
TickTime::MillisecondTimestamp()),
+ random_(clock_->TimeInMicroseconds()),
bitrates_(new BitrateAggregator(bitrate_callback)),
total_bitrate_sent_(clock, bitrates_->total_bitrate_observer()),
audio_configured_(audio),
@@ -182,8 +186,8 @@ RTPSender::RTPSender(
ssrc_rtx_ = ssrc_db_.CreateSSRC(); // Can't be 0.
bitrates_->set_ssrc(ssrc_);
// Random start, 16 bits. Can't be 0.
- sequence_number_rtx_ = static_cast<uint16_t>(rand() + 1) & 0x7FFF;
- sequence_number_ = static_cast<uint16_t>(rand() + 1) & 0x7FFF;
+ sequence_number_rtx_ = random_.Rand(1, kMaxInitRtpSeqNumber);
+ sequence_number_ = random_.Rand(1, kMaxInitRtpSeqNumber);
}
RTPSender::~RTPSender() {
@@ -292,7 +296,7 @@ int32_t RTPSender::RegisterPayload(
const char payload_name[RTP_PAYLOAD_NAME_SIZE],
int8_t payload_number,
uint32_t frequency,
- uint8_t channels,
+ size_t channels,
uint32_t rate) {
assert(payload_name);
CriticalSectionScoped cs(send_critsect_.get());
@@ -323,11 +327,11 @@ int32_t RTPSender::RegisterPayload(
return -1;
}
int32_t ret_val = 0;
- RtpUtility::Payload* payload = NULL;
+ RtpUtility::Payload* payload = nullptr;
if (audio_configured_) {
// TODO(mflodman): Change to CreateAudioPayload and make static.
ret_val = audio_->RegisterAudioPayload(payload_name, payload_number,
- frequency, channels, rate, payload);
+ frequency, channels, rate, &payload);
} else {
payload = video_->CreateVideoPayload(payload_name, payload_number, rate);
}
@@ -452,7 +456,7 @@ int32_t RTPSender::CheckPayloadType(int8_t payload_type,
}
if (audio_configured_) {
int8_t red_pl_type = -1;
- if (audio_->RED(red_pl_type) == 0) {
+ if (audio_->RED(&red_pl_type) == 0) {
// We have configured RED.
if (red_pl_type == payload_type) {
// And it's a match...
@@ -469,7 +473,8 @@ int32_t RTPSender::CheckPayloadType(int8_t payload_type,
std::map<int8_t, RtpUtility::Payload*>::iterator it =
payload_type_map_.find(payload_type);
if (it == payload_type_map_.end()) {
- LOG(LS_WARNING) << "Payload type " << payload_type << " not registered.";
+ LOG(LS_WARNING) << "Payload type " << static_cast<int>(payload_type)
+ << " not registered.";
return -1;
}
SetSendPayloadType(payload_type);
@@ -512,7 +517,8 @@ int32_t RTPSender::SendOutgoingData(FrameType frame_type,
}
RtpVideoCodecTypes video_type = kRtpVideoGeneric;
if (CheckPayloadType(payload_type, &video_type) != 0) {
- LOG(LS_ERROR) << "Don't send data with unknown payload type.";
+ LOG(LS_ERROR) << "Don't send data with unknown payload type: "
+ << static_cast<int>(payload_type) << ".";
return -1;
}
@@ -573,7 +579,7 @@ size_t RTPSender::TrySendRedundantPayloads(size_t bytes_to_send) {
break;
RtpUtility::RtpHeaderParser rtp_parser(buffer, length);
RTPHeader rtp_header;
- rtp_parser.Parse(rtp_header);
+ rtp_parser.Parse(&rtp_header);
bytes_left -= static_cast<int>(length - rtp_header.headerLength);
}
return bytes_to_send - bytes_left;
@@ -583,8 +589,7 @@ void RTPSender::BuildPaddingPacket(uint8_t* packet,
size_t header_length,
size_t padding_length) {
packet[0] |= 0x20; // Set padding bit.
- int32_t *data =
- reinterpret_cast<int32_t *>(&(packet[header_length]));
+ int32_t* data = reinterpret_cast<int32_t*>(&(packet[header_length]));
// Fill data buffer with random data.
for (size_t j = 0; j < (padding_length >> 2); ++j) {
@@ -665,7 +670,7 @@ size_t RTPSender::SendPadData(size_t bytes,
RtpUtility::RtpHeaderParser rtp_parser(padding_packet, length);
RTPHeader rtp_header;
- rtp_parser.Parse(rtp_header);
+ rtp_parser.Parse(&rtp_header);
if (capture_time_ms > 0) {
UpdateTransmissionTimeOffset(
@@ -717,7 +722,7 @@ int32_t RTPSender::ReSendPacket(uint16_t packet_id, int64_t min_resend_time) {
if (paced_sender_) {
RtpUtility::RtpHeaderParser rtp_parser(data_buffer, length);
RTPHeader header;
- if (!rtp_parser.Parse(header)) {
+ if (!rtp_parser.Parse(&header)) {
assert(false);
return -1;
}
@@ -725,7 +730,7 @@ int32_t RTPSender::ReSendPacket(uint16_t packet_id, int64_t min_resend_time) {
// TickTime.
int64_t corrected_capture_tims_ms = capture_time_ms + clock_delta_ms_;
paced_sender_->InsertPacket(
- RtpPacketSender::kHighPriority, header.ssrc, header.sequenceNumber,
+ RtpPacketSender::kNormalPriority, header.ssrc, header.sequenceNumber,
corrected_capture_tims_ms, length - header.headerLength, true);
return length;
@@ -903,11 +908,11 @@ bool RTPSender::PrepareAndSendPacket(uint8_t* buffer,
int64_t capture_time_ms,
bool send_over_rtx,
bool is_retransmit) {
- uint8_t *buffer_to_send_ptr = buffer;
+ uint8_t* buffer_to_send_ptr = buffer;
RtpUtility::RtpHeaderParser rtp_parser(buffer, length);
RTPHeader rtp_header;
- rtp_parser.Parse(rtp_header);
+ rtp_parser.Parse(&rtp_header);
if (!is_retransmit && rtp_header.markerBit) {
TRACE_EVENT_ASYNC_END0(TRACE_DISABLED_BY_DEFAULT("webrtc_rtp"), "PacedSend",
capture_time_ms);
@@ -996,14 +1001,14 @@ bool RTPSender::IsFecPacket(const uint8_t* buffer,
bool fec_enabled;
uint8_t pt_red;
uint8_t pt_fec;
- video_->GenericFECStatus(fec_enabled, pt_red, pt_fec);
+ video_->GenericFECStatus(&fec_enabled, &pt_red, &pt_fec);
return fec_enabled &&
header.payloadType == pt_red &&
buffer[header.headerLength] == pt_fec;
}
size_t RTPSender::TimeToSendPadding(size_t bytes) {
- if (bytes == 0)
+ if (audio_configured_ || bytes == 0)
return 0;
{
CriticalSectionScoped cs(send_critsect_.get());
@@ -1026,7 +1031,7 @@ int32_t RTPSender::SendToNetwork(uint8_t* buffer,
RtpUtility::RtpHeaderParser rtp_parser(buffer,
payload_length + rtp_header_length);
RTPHeader rtp_header;
- rtp_parser.Parse(rtp_header);
+ rtp_parser.Parse(&rtp_header);
int64_t now_ms = clock_->TimeInMilliseconds();
@@ -1169,7 +1174,7 @@ size_t RTPSender::CreateRtpHeader(uint8_t* header,
int32_t rtp_header_length = kRtpHeaderLength;
if (csrcs.size() > 0) {
- uint8_t *ptr = &header[rtp_header_length];
+ uint8_t* ptr = &header[rtp_header_length];
for (size_t i = 0; i < csrcs.size(); ++i) {
ByteWriter<uint32_t>::WriteBigEndian(ptr, csrcs[i]);
ptr += 4;
@@ -1638,7 +1643,7 @@ uint16_t RTPSender::UpdateTransportSequenceNumber(
void RTPSender::SetSendingStatus(bool enabled) {
if (enabled) {
uint32_t frequency_hz = SendPayloadFrequency();
- uint32_t RTPtime = RtpUtility::GetCurrentRTP(clock_, frequency_hz);
+ uint32_t RTPtime = CurrentRtp(*clock_, frequency_hz);
// Will be ignored if it's already configured via API.
SetStartTimestamp(RTPtime, false);
@@ -1653,8 +1658,7 @@ void RTPSender::SetSendingStatus(bool enabled) {
// Don't initialize seq number if SSRC passed externally.
if (!sequence_number_forced_ && !ssrc_forced_) {
// Generate a new sequence number.
- sequence_number_ =
- rand() / (RAND_MAX / MAX_INIT_RTP_SEQ_NUMBER); // NOLINT
+ sequence_number_ = random_.Rand(1, kMaxInitRtpSeqNumber);
}
}
}
@@ -1716,8 +1720,7 @@ void RTPSender::SetSSRC(uint32_t ssrc) {
ssrc_ = ssrc;
bitrates_->set_ssrc(ssrc_);
if (!sequence_number_forced_) {
- sequence_number_ =
- rand() / (RAND_MAX / MAX_INIT_RTP_SEQ_NUMBER); // NOLINT
+ sequence_number_ = random_.Rand(1, kMaxInitRtpSeqNumber);
}
}
@@ -1775,7 +1778,7 @@ int32_t RTPSender::RED(int8_t *payload_type) const {
if (!audio_configured_) {
return -1;
}
- return audio_->RED(*payload_type);
+ return audio_->RED(payload_type);
}
RtpVideoCodecTypes RTPSender::VideoCodecType() const {
@@ -1801,7 +1804,7 @@ void RTPSender::GenericFECStatus(bool* enable,
uint8_t* payload_type_red,
uint8_t* payload_type_fec) const {
RTC_DCHECK(!audio_configured_);
- video_->GenericFECStatus(*enable, *payload_type_red, *payload_type_fec);
+ video_->GenericFECStatus(enable, payload_type_red, payload_type_fec);
}
int32_t RTPSender::SetFecParameters(
@@ -1823,7 +1826,7 @@ void RTPSender::BuildRtxPacket(uint8_t* buffer, size_t* length,
reinterpret_cast<const uint8_t*>(buffer), *length);
RTPHeader rtp_header;
- rtp_parser.Parse(rtp_header);
+ rtp_parser.Parse(&rtp_header);
// Add original RTP header.
memcpy(data_buffer_rtx, buffer, rtp_header.headerLength);
@@ -1836,7 +1839,7 @@ void RTPSender::BuildRtxPacket(uint8_t* buffer, size_t* length,
}
// Replace sequence number.
- uint8_t *ptr = data_buffer_rtx + 2;
+ uint8_t* ptr = data_buffer_rtx + 2;
ByteWriter<uint16_t>::WriteBigEndian(ptr, sequence_number_rtx_++);
// Replace SSRC.
diff --git a/webrtc/modules/rtp_rtcp/source/rtp_sender.h b/webrtc/modules/rtp_rtcp/source/rtp_sender.h
index a134370c76..3c62336507 100644
--- a/webrtc/modules/rtp_rtcp/source/rtp_sender.h
+++ b/webrtc/modules/rtp_rtcp/source/rtp_sender.h
@@ -10,14 +10,16 @@
#ifndef WEBRTC_MODULES_RTP_RTCP_SOURCE_RTP_SENDER_H_
#define WEBRTC_MODULES_RTP_RTCP_SOURCE_RTP_SENDER_H_
-#include <assert.h>
-#include <math.h>
+#include <list>
#include <map>
+#include <utility>
+#include <vector>
+#include "webrtc/base/random.h"
#include "webrtc/base/thread_annotations.h"
#include "webrtc/common_types.h"
-#include "webrtc/modules/rtp_rtcp/interface/rtp_rtcp_defines.h"
+#include "webrtc/modules/rtp_rtcp/include/rtp_rtcp_defines.h"
#include "webrtc/modules/rtp_rtcp/source/bitrate.h"
#include "webrtc/modules/rtp_rtcp/source/rtp_header_extension.h"
#include "webrtc/modules/rtp_rtcp/source/rtp_packet_history.h"
@@ -26,8 +28,6 @@
#include "webrtc/modules/rtp_rtcp/source/ssrc_database.h"
#include "webrtc/transport.h"
-#define MAX_INIT_RTP_SEQ_NUMBER 32767 // 2^15 -1.
-
namespace webrtc {
class BitrateAggregator;
@@ -116,7 +116,7 @@ class RTPSender : public RTPSenderInterface {
int32_t RegisterPayload(
const char payload_name[RTP_PAYLOAD_NAME_SIZE],
const int8_t payload_type, const uint32_t frequency,
- const uint8_t channels, const uint32_t rate);
+ const size_t channels, const uint32_t rate);
int32_t DeRegisterSendPayload(const int8_t payload_type);
@@ -163,7 +163,7 @@ class RTPSender : public RTPSenderInterface {
int32_t SetTransportSequenceNumber(uint16_t sequence_number);
int32_t RegisterRtpHeaderExtension(RTPExtensionType type, uint8_t id);
- virtual bool IsRtpHeaderExtensionRegistered(RTPExtensionType type) override;
+ bool IsRtpHeaderExtensionRegistered(RTPExtensionType type) override;
int32_t DeregisterRtpHeaderExtension(RTPExtensionType type);
size_t RtpHeaderExtensionTotalLength() const;
@@ -202,10 +202,10 @@ class RTPSender : public RTPSenderInterface {
bool is_voiced,
uint8_t dBov) const;
- virtual bool UpdateVideoRotation(uint8_t* rtp_packet,
- size_t rtp_packet_length,
- const RTPHeader& rtp_header,
- VideoRotation rotation) const override;
+ bool UpdateVideoRotation(uint8_t* rtp_packet,
+ size_t rtp_packet_length,
+ const RTPHeader& rtp_header,
+ VideoRotation rotation) const override;
bool TimeToSendPacket(uint16_t sequence_number, int64_t capture_time_ms,
bool retransmission);
@@ -386,6 +386,7 @@ class RTPSender : public RTPSenderInterface {
Clock* clock_;
int64_t clock_delta_ms_;
+ Random random_ GUARDED_BY(send_critsect_);
rtc::scoped_ptr<BitrateAggregator> bitrates_;
Bitrate total_bitrate_sent_;
diff --git a/webrtc/modules/rtp_rtcp/source/rtp_sender_audio.cc b/webrtc/modules/rtp_rtcp/source/rtp_sender_audio.cc
index 1fc9a89ce1..2aa4961cdc 100644
--- a/webrtc/modules/rtp_rtcp/source/rtp_sender_audio.cc
+++ b/webrtc/modules/rtp_rtcp/source/rtp_sender_audio.cc
@@ -10,12 +10,12 @@
#include "webrtc/modules/rtp_rtcp/source/rtp_sender_audio.h"
-#include <assert.h> //assert
-#include <string.h> //memcpy
+#include <string.h>
#include "webrtc/base/trace_event.h"
-#include "webrtc/modules/rtp_rtcp/interface/rtp_rtcp_defines.h"
+#include "webrtc/modules/rtp_rtcp/include/rtp_rtcp_defines.h"
#include "webrtc/modules/rtp_rtcp/source/byte_io.h"
+#include "webrtc/system_wrappers/include/tick_util.h"
namespace webrtc {
@@ -47,8 +47,7 @@ RTPSenderAudio::RTPSenderAudio(Clock* clock,
_lastPayloadType(-1),
_audioLevel_dBov(0) {}
-RTPSenderAudio::~RTPSenderAudio() {
-}
+RTPSenderAudio::~RTPSenderAudio() {}
int RTPSenderAudio::AudioFrequency() const {
return kDtmfFrequencyHz;
@@ -56,22 +55,20 @@ int RTPSenderAudio::AudioFrequency() const {
// set audio packet size, used to determine when it's time to send a DTMF packet
// in silence (CNG)
-int32_t
-RTPSenderAudio::SetAudioPacketSize(const uint16_t packetSizeSamples)
-{
- CriticalSectionScoped cs(_sendAudioCritsect.get());
+int32_t RTPSenderAudio::SetAudioPacketSize(uint16_t packetSizeSamples) {
+ CriticalSectionScoped cs(_sendAudioCritsect.get());
- _packetSizeSamples = packetSizeSamples;
- return 0;
+ _packetSizeSamples = packetSizeSamples;
+ return 0;
}
int32_t RTPSenderAudio::RegisterAudioPayload(
const char payloadName[RTP_PAYLOAD_NAME_SIZE],
const int8_t payloadType,
const uint32_t frequency,
- const uint8_t channels,
+ const size_t channels,
const uint32_t rate,
- RtpUtility::Payload*& payload) {
+ RtpUtility::Payload** payload) {
if (RtpUtility::StringCompare(payloadName, "cn", 2)) {
CriticalSectionScoped cs(_sendAudioCritsect.get());
// we can have multiple CNG payload types
@@ -99,72 +96,65 @@ int32_t RTPSenderAudio::RegisterAudioPayload(
return 0;
// The default timestamp rate is 8000 Hz, but other rates may be defined.
}
- payload = new RtpUtility::Payload;
- payload->typeSpecific.Audio.frequency = frequency;
- payload->typeSpecific.Audio.channels = channels;
- payload->typeSpecific.Audio.rate = rate;
- payload->audio = true;
- payload->name[RTP_PAYLOAD_NAME_SIZE - 1] = '\0';
- strncpy(payload->name, payloadName, RTP_PAYLOAD_NAME_SIZE - 1);
+ *payload = new RtpUtility::Payload;
+ (*payload)->typeSpecific.Audio.frequency = frequency;
+ (*payload)->typeSpecific.Audio.channels = channels;
+ (*payload)->typeSpecific.Audio.rate = rate;
+ (*payload)->audio = true;
+ (*payload)->name[RTP_PAYLOAD_NAME_SIZE - 1] = '\0';
+ strncpy((*payload)->name, payloadName, RTP_PAYLOAD_NAME_SIZE - 1);
return 0;
}
-bool
-RTPSenderAudio::MarkerBit(const FrameType frameType,
- const int8_t payload_type)
-{
- CriticalSectionScoped cs(_sendAudioCritsect.get());
- // for audio true for first packet in a speech burst
- bool markerBit = false;
- if (_lastPayloadType != payload_type) {
- if (payload_type != -1 && (_cngNBPayloadType == payload_type ||
- _cngWBPayloadType == payload_type ||
- _cngSWBPayloadType == payload_type ||
- _cngFBPayloadType == payload_type)) {
- // Only set a marker bit when we change payload type to a non CNG
- return false;
- }
+bool RTPSenderAudio::MarkerBit(FrameType frameType, int8_t payload_type) {
+ CriticalSectionScoped cs(_sendAudioCritsect.get());
+ // for audio true for first packet in a speech burst
+ bool markerBit = false;
+ if (_lastPayloadType != payload_type) {
+ if (payload_type != -1 && (_cngNBPayloadType == payload_type ||
+ _cngWBPayloadType == payload_type ||
+ _cngSWBPayloadType == payload_type ||
+ _cngFBPayloadType == payload_type)) {
+ // Only set a marker bit when we change payload type to a non CNG
+ return false;
+ }
- // payload_type differ
- if (_lastPayloadType == -1) {
- if (frameType != kAudioFrameCN) {
- // first packet and NOT CNG
- return true;
- } else {
- // first packet and CNG
- _inbandVADactive = true;
- return false;
- }
+ // payload_type differ
+ if (_lastPayloadType == -1) {
+ if (frameType != kAudioFrameCN) {
+ // first packet and NOT CNG
+ return true;
+ } else {
+ // first packet and CNG
+ _inbandVADactive = true;
+ return false;
}
-
- // not first packet AND
- // not CNG AND
- // payload_type changed
-
- // set a marker bit when we change payload type
- markerBit = true;
}
- // For G.723 G.729, AMR etc we can have inband VAD
- if(frameType == kAudioFrameCN)
- {
- _inbandVADactive = true;
+ // not first packet AND
+ // not CNG AND
+ // payload_type changed
- } else if(_inbandVADactive)
- {
- _inbandVADactive = false;
- markerBit = true;
- }
- return markerBit;
+ // set a marker bit when we change payload type
+ markerBit = true;
+ }
+
+ // For G.723 G.729, AMR etc we can have inband VAD
+ if (frameType == kAudioFrameCN) {
+ _inbandVADactive = true;
+ } else if (_inbandVADactive) {
+ _inbandVADactive = false;
+ markerBit = true;
+ }
+ return markerBit;
}
-int32_t RTPSenderAudio::SendAudio(
- const FrameType frameType,
- const int8_t payloadType,
- const uint32_t captureTimeStamp,
- const uint8_t* payloadData,
- const size_t dataSize,
- const RTPFragmentationHeader* fragmentation) {
+int32_t RTPSenderAudio::SendAudio(FrameType frameType,
+ int8_t payloadType,
+ uint32_t captureTimeStamp,
+ const uint8_t* payloadData,
+ size_t dataSize,
+ const RTPFragmentationHeader* fragmentation) {
// TODO(pwestin) Breakup function in smaller functions.
size_t payloadSize = dataSize;
size_t maxPayloadLength = _rtpSender->MaxPayloadLength();
@@ -185,8 +175,8 @@ int32_t RTPSenderAudio::SendAudio(
// Check if we have pending DTMFs to send
if (!_dtmfEventIsOn && PendingDTMF()) {
- int64_t delaySinceLastDTMF = _clock->TimeInMilliseconds() -
- _dtmfTimeLastSent;
+ int64_t delaySinceLastDTMF =
+ _clock->TimeInMilliseconds() - _dtmfTimeLastSent;
if (delaySinceLastDTMF > 100) {
// New tone to play
@@ -294,128 +284,120 @@ int32_t RTPSenderAudio::SendAudio(
// Too large payload buffer.
return -1;
}
- if (red_payload_type >= 0 && // Have we configured RED?
- fragmentation && fragmentation->fragmentationVectorSize > 1 &&
- !markerBit) {
- if (timestampOffset <= 0x3fff) {
- if (fragmentation->fragmentationVectorSize != 2) {
- // we only support 2 codecs when using RED
- return -1;
- }
- // only 0x80 if we have multiple blocks
- dataBuffer[rtpHeaderLength++] =
- 0x80 + fragmentation->fragmentationPlType[1];
- size_t blockLength = fragmentation->fragmentationLength[1];
-
- // sanity blockLength
- if (blockLength > 0x3ff) { // block length 10 bits 1023 bytes
- return -1;
- }
- uint32_t REDheader = (timestampOffset << 10) + blockLength;
- ByteWriter<uint32_t>::WriteBigEndian(dataBuffer + rtpHeaderLength,
- REDheader);
- rtpHeaderLength += 3;
-
- dataBuffer[rtpHeaderLength++] = fragmentation->fragmentationPlType[0];
- // copy the RED data
- memcpy(dataBuffer + rtpHeaderLength,
- payloadData + fragmentation->fragmentationOffset[1],
- fragmentation->fragmentationLength[1]);
-
- // copy the normal data
- memcpy(dataBuffer + rtpHeaderLength +
- fragmentation->fragmentationLength[1],
- payloadData + fragmentation->fragmentationOffset[0],
- fragmentation->fragmentationLength[0]);
-
- payloadSize = fragmentation->fragmentationLength[0] +
- fragmentation->fragmentationLength[1];
- } else {
- // silence for too long send only new data
- dataBuffer[rtpHeaderLength++] = fragmentation->fragmentationPlType[0];
- memcpy(dataBuffer + rtpHeaderLength,
- payloadData + fragmentation->fragmentationOffset[0],
- fragmentation->fragmentationLength[0]);
+ if (red_payload_type >= 0 && // Have we configured RED?
+ fragmentation && fragmentation->fragmentationVectorSize > 1 &&
+ !markerBit) {
+ if (timestampOffset <= 0x3fff) {
+ if (fragmentation->fragmentationVectorSize != 2) {
+ // we only support 2 codecs when using RED
+ return -1;
+ }
+ // only 0x80 if we have multiple blocks
+ dataBuffer[rtpHeaderLength++] =
+ 0x80 + fragmentation->fragmentationPlType[1];
+ size_t blockLength = fragmentation->fragmentationLength[1];
- payloadSize = fragmentation->fragmentationLength[0];
+ // sanity blockLength
+ if (blockLength > 0x3ff) { // block length 10 bits 1023 bytes
+ return -1;
}
+ uint32_t REDheader = (timestampOffset << 10) + blockLength;
+ ByteWriter<uint32_t>::WriteBigEndian(dataBuffer + rtpHeaderLength,
+ REDheader);
+ rtpHeaderLength += 3;
+
+ dataBuffer[rtpHeaderLength++] = fragmentation->fragmentationPlType[0];
+ // copy the RED data
+ memcpy(dataBuffer + rtpHeaderLength,
+ payloadData + fragmentation->fragmentationOffset[1],
+ fragmentation->fragmentationLength[1]);
+
+ // copy the normal data
+ memcpy(
+ dataBuffer + rtpHeaderLength + fragmentation->fragmentationLength[1],
+ payloadData + fragmentation->fragmentationOffset[0],
+ fragmentation->fragmentationLength[0]);
+
+ payloadSize = fragmentation->fragmentationLength[0] +
+ fragmentation->fragmentationLength[1];
} else {
- if (fragmentation && fragmentation->fragmentationVectorSize > 0) {
- // use the fragment info if we have one
- dataBuffer[rtpHeaderLength++] = fragmentation->fragmentationPlType[0];
- memcpy(dataBuffer + rtpHeaderLength,
- payloadData + fragmentation->fragmentationOffset[0],
- fragmentation->fragmentationLength[0]);
-
- payloadSize = fragmentation->fragmentationLength[0];
- } else {
- memcpy(dataBuffer + rtpHeaderLength, payloadData, payloadSize);
- }
+ // silence for too long send only new data
+ dataBuffer[rtpHeaderLength++] = fragmentation->fragmentationPlType[0];
+ memcpy(dataBuffer + rtpHeaderLength,
+ payloadData + fragmentation->fragmentationOffset[0],
+ fragmentation->fragmentationLength[0]);
+
+ payloadSize = fragmentation->fragmentationLength[0];
}
- {
- CriticalSectionScoped cs(_sendAudioCritsect.get());
- _lastPayloadType = payloadType;
+ } else {
+ if (fragmentation && fragmentation->fragmentationVectorSize > 0) {
+ // use the fragment info if we have one
+ dataBuffer[rtpHeaderLength++] = fragmentation->fragmentationPlType[0];
+ memcpy(dataBuffer + rtpHeaderLength,
+ payloadData + fragmentation->fragmentationOffset[0],
+ fragmentation->fragmentationLength[0]);
+
+ payloadSize = fragmentation->fragmentationLength[0];
+ } else {
+ memcpy(dataBuffer + rtpHeaderLength, payloadData, payloadSize);
}
- // Update audio level extension, if included.
- size_t packetSize = payloadSize + rtpHeaderLength;
- RtpUtility::RtpHeaderParser rtp_parser(dataBuffer, packetSize);
- RTPHeader rtp_header;
- rtp_parser.Parse(rtp_header);
- _rtpSender->UpdateAudioLevel(dataBuffer, packetSize, rtp_header,
- (frameType == kAudioFrameSpeech),
- audio_level_dbov);
- TRACE_EVENT_ASYNC_END2("webrtc", "Audio", captureTimeStamp, "timestamp",
- _rtpSender->Timestamp(), "seqnum",
- _rtpSender->SequenceNumber());
- return _rtpSender->SendToNetwork(dataBuffer, payloadSize, rtpHeaderLength,
- -1, kAllowRetransmission,
- RtpPacketSender::kHighPriority);
}
-
- // Audio level magnitude and voice activity flag are set for each RTP packet
-int32_t
-RTPSenderAudio::SetAudioLevel(const uint8_t level_dBov)
-{
- if (level_dBov > 127)
- {
- return -1;
- }
+ {
CriticalSectionScoped cs(_sendAudioCritsect.get());
- _audioLevel_dBov = level_dBov;
- return 0;
+ _lastPayloadType = payloadType;
+ }
+ // Update audio level extension, if included.
+ size_t packetSize = payloadSize + rtpHeaderLength;
+ RtpUtility::RtpHeaderParser rtp_parser(dataBuffer, packetSize);
+ RTPHeader rtp_header;
+ rtp_parser.Parse(&rtp_header);
+ _rtpSender->UpdateAudioLevel(dataBuffer, packetSize, rtp_header,
+ (frameType == kAudioFrameSpeech),
+ audio_level_dbov);
+ TRACE_EVENT_ASYNC_END2("webrtc", "Audio", captureTimeStamp, "timestamp",
+ _rtpSender->Timestamp(), "seqnum",
+ _rtpSender->SequenceNumber());
+ return _rtpSender->SendToNetwork(dataBuffer, payloadSize, rtpHeaderLength,
+ TickTime::MillisecondTimestamp(),
+ kAllowRetransmission,
+ RtpPacketSender::kHighPriority);
}
- // Set payload type for Redundant Audio Data RFC 2198
-int32_t
-RTPSenderAudio::SetRED(const int8_t payloadType)
-{
- if(payloadType < -1 )
- {
- return -1;
- }
- CriticalSectionScoped cs(_sendAudioCritsect.get());
- _REDPayloadType = payloadType;
- return 0;
+// Audio level magnitude and voice activity flag are set for each RTP packet
+int32_t RTPSenderAudio::SetAudioLevel(uint8_t level_dBov) {
+ if (level_dBov > 127) {
+ return -1;
+ }
+ CriticalSectionScoped cs(_sendAudioCritsect.get());
+ _audioLevel_dBov = level_dBov;
+ return 0;
}
- // Get payload type for Redundant Audio Data RFC 2198
-int32_t
-RTPSenderAudio::RED(int8_t& payloadType) const
-{
- CriticalSectionScoped cs(_sendAudioCritsect.get());
- if(_REDPayloadType == -1)
- {
- // not configured
- return -1;
- }
- payloadType = _REDPayloadType;
- return 0;
+// Set payload type for Redundant Audio Data RFC 2198
+int32_t RTPSenderAudio::SetRED(int8_t payloadType) {
+ if (payloadType < -1) {
+ return -1;
+ }
+ CriticalSectionScoped cs(_sendAudioCritsect.get());
+ _REDPayloadType = payloadType;
+ return 0;
+}
+
+// Get payload type for Redundant Audio Data RFC 2198
+int32_t RTPSenderAudio::RED(int8_t* payloadType) const {
+ CriticalSectionScoped cs(_sendAudioCritsect.get());
+ if (_REDPayloadType == -1) {
+ // not configured
+ return -1;
+ }
+ *payloadType = _REDPayloadType;
+ return 0;
}
// Send a TelephoneEvent tone using RFC 2833 (4733)
-int32_t RTPSenderAudio::SendTelephoneEvent(const uint8_t key,
- const uint16_t time_ms,
- const uint8_t level) {
+int32_t RTPSenderAudio::SendTelephoneEvent(uint8_t key,
+ uint16_t time_ms,
+ uint8_t level) {
{
CriticalSectionScoped lock(_sendAudioCritsect.get());
if (_dtmfPayloadType < 0) {
@@ -426,63 +408,57 @@ int32_t RTPSenderAudio::SendTelephoneEvent(const uint8_t key,
return AddDTMF(key, time_ms, level);
}
-int32_t
-RTPSenderAudio::SendTelephoneEventPacket(bool ended,
- int8_t dtmf_payload_type,
- uint32_t dtmfTimeStamp,
- uint16_t duration,
- bool markerBit)
-{
- uint8_t dtmfbuffer[IP_PACKET_SIZE];
- uint8_t sendCount = 1;
- int32_t retVal = 0;
-
- if(ended)
- {
- // resend last packet in an event 3 times
- sendCount = 3;
- }
- do
- {
- //Send DTMF data
- _rtpSender->BuildRTPheader(dtmfbuffer, dtmf_payload_type, markerBit,
- dtmfTimeStamp, _clock->TimeInMilliseconds());
-
- // reset CSRC and X bit
- dtmfbuffer[0] &= 0xe0;
-
- //Create DTMF data
- /* From RFC 2833:
-
- 0 1 2 3
- 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
- +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
- | event |E|R| volume | duration |
- +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
- */
- // R bit always cleared
- uint8_t R = 0x00;
- uint8_t volume = _dtmfLevel;
-
- // First packet un-ended
- uint8_t E = ended ? 0x80 : 0x00;
-
- // First byte is Event number, equals key number
- dtmfbuffer[12] = _dtmfKey;
- dtmfbuffer[13] = E|R|volume;
- ByteWriter<uint16_t>::WriteBigEndian(dtmfbuffer + 14, duration);
-
- TRACE_EVENT_INSTANT2(TRACE_DISABLED_BY_DEFAULT("webrtc_rtp"),
- "Audio::SendTelephoneEvent", "timestamp",
- dtmfTimeStamp, "seqnum",
- _rtpSender->SequenceNumber());
- retVal = _rtpSender->SendToNetwork(dtmfbuffer, 4, 12, -1,
- kAllowRetransmission,
- RtpPacketSender::kHighPriority);
- sendCount--;
-
- }while (sendCount > 0 && retVal == 0);
-
- return retVal;
+int32_t RTPSenderAudio::SendTelephoneEventPacket(bool ended,
+ int8_t dtmf_payload_type,
+ uint32_t dtmfTimeStamp,
+ uint16_t duration,
+ bool markerBit) {
+ uint8_t dtmfbuffer[IP_PACKET_SIZE];
+ uint8_t sendCount = 1;
+ int32_t retVal = 0;
+
+ if (ended) {
+ // resend last packet in an event 3 times
+ sendCount = 3;
+ }
+ do {
+ // Send DTMF data
+ _rtpSender->BuildRTPheader(dtmfbuffer, dtmf_payload_type, markerBit,
+ dtmfTimeStamp, _clock->TimeInMilliseconds());
+
+ // reset CSRC and X bit
+ dtmfbuffer[0] &= 0xe0;
+
+ // Create DTMF data
+ /* From RFC 2833:
+
+ 0 1 2 3
+ 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | event |E|R| volume | duration |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ */
+ // R bit always cleared
+ uint8_t R = 0x00;
+ uint8_t volume = _dtmfLevel;
+
+ // First packet un-ended
+ uint8_t E = ended ? 0x80 : 0x00;
+
+ // First byte is Event number, equals key number
+ dtmfbuffer[12] = _dtmfKey;
+ dtmfbuffer[13] = E | R | volume;
+ ByteWriter<uint16_t>::WriteBigEndian(dtmfbuffer + 14, duration);
+
+ TRACE_EVENT_INSTANT2(TRACE_DISABLED_BY_DEFAULT("webrtc_rtp"),
+ "Audio::SendTelephoneEvent", "timestamp",
+ dtmfTimeStamp, "seqnum", _rtpSender->SequenceNumber());
+ retVal = _rtpSender->SendToNetwork(
+ dtmfbuffer, 4, 12, TickTime::MillisecondTimestamp(),
+ kAllowRetransmission, RtpPacketSender::kHighPriority);
+ sendCount--;
+ } while (sendCount > 0 && retVal == 0);
+
+ return retVal;
}
} // namespace webrtc
diff --git a/webrtc/modules/rtp_rtcp/source/rtp_sender_audio.h b/webrtc/modules/rtp_rtcp/source/rtp_sender_audio.h
index dd16fe51b4..1e96d17a67 100644
--- a/webrtc/modules/rtp_rtcp/source/rtp_sender_audio.h
+++ b/webrtc/modules/rtp_rtcp/source/rtp_sender_audio.h
@@ -19,92 +19,91 @@
#include "webrtc/typedefs.h"
namespace webrtc {
-class RTPSenderAudio: public DTMFqueue
-{
-public:
- RTPSenderAudio(Clock* clock,
- RTPSender* rtpSender,
- RtpAudioFeedback* audio_feedback);
- virtual ~RTPSenderAudio();
-
- int32_t RegisterAudioPayload(const char payloadName[RTP_PAYLOAD_NAME_SIZE],
- const int8_t payloadType,
- const uint32_t frequency,
- const uint8_t channels,
- const uint32_t rate,
- RtpUtility::Payload*& payload);
-
- int32_t SendAudio(const FrameType frameType,
- const int8_t payloadType,
- const uint32_t captureTimeStamp,
- const uint8_t* payloadData,
- const size_t payloadSize,
- const RTPFragmentationHeader* fragmentation);
-
- // set audio packet size, used to determine when it's time to send a DTMF packet in silence (CNG)
- int32_t SetAudioPacketSize(const uint16_t packetSizeSamples);
-
- // Store the audio level in dBov for header-extension-for-audio-level-indication.
- // Valid range is [0,100]. Actual value is negative.
- int32_t SetAudioLevel(const uint8_t level_dBov);
-
- // Send a DTMF tone using RFC 2833 (4733)
- int32_t SendTelephoneEvent(const uint8_t key,
- const uint16_t time_ms,
- const uint8_t level);
-
- int AudioFrequency() const;
-
- // Set payload type for Redundant Audio Data RFC 2198
- int32_t SetRED(const int8_t payloadType);
-
- // Get payload type for Redundant Audio Data RFC 2198
- int32_t RED(int8_t& payloadType) const;
-
-protected:
- int32_t SendTelephoneEventPacket(bool ended,
- int8_t dtmf_payload_type,
- uint32_t dtmfTimeStamp,
- uint16_t duration,
- bool markerBit); // set on first packet in talk burst
-
- bool MarkerBit(const FrameType frameType,
- const int8_t payloadType);
-
-private:
- Clock* const _clock;
- RTPSender* const _rtpSender;
- RtpAudioFeedback* const _audioFeedback;
-
- rtc::scoped_ptr<CriticalSectionWrapper> _sendAudioCritsect;
-
- uint16_t _packetSizeSamples GUARDED_BY(_sendAudioCritsect);
-
- // DTMF
- bool _dtmfEventIsOn;
- bool _dtmfEventFirstPacketSent;
- int8_t _dtmfPayloadType GUARDED_BY(_sendAudioCritsect);
- uint32_t _dtmfTimestamp;
- uint8_t _dtmfKey;
- uint32_t _dtmfLengthSamples;
- uint8_t _dtmfLevel;
- int64_t _dtmfTimeLastSent;
- uint32_t _dtmfTimestampLastSent;
-
- int8_t _REDPayloadType GUARDED_BY(_sendAudioCritsect);
-
- // VAD detection, used for markerbit
- bool _inbandVADactive GUARDED_BY(_sendAudioCritsect);
- int8_t _cngNBPayloadType GUARDED_BY(_sendAudioCritsect);
- int8_t _cngWBPayloadType GUARDED_BY(_sendAudioCritsect);
- int8_t _cngSWBPayloadType GUARDED_BY(_sendAudioCritsect);
- int8_t _cngFBPayloadType GUARDED_BY(_sendAudioCritsect);
- int8_t _lastPayloadType GUARDED_BY(_sendAudioCritsect);
-
- // Audio level indication
- // (https://datatracker.ietf.org/doc/draft-lennox-avt-rtp-audio-level-exthdr/)
- uint8_t _audioLevel_dBov GUARDED_BY(_sendAudioCritsect);
+class RTPSenderAudio : public DTMFqueue {
+ public:
+ RTPSenderAudio(Clock* clock,
+ RTPSender* rtpSender,
+ RtpAudioFeedback* audio_feedback);
+ virtual ~RTPSenderAudio();
+
+ int32_t RegisterAudioPayload(const char payloadName[RTP_PAYLOAD_NAME_SIZE],
+ int8_t payloadType,
+ uint32_t frequency,
+ size_t channels,
+ uint32_t rate,
+ RtpUtility::Payload** payload);
+
+ int32_t SendAudio(FrameType frameType,
+ int8_t payloadType,
+ uint32_t captureTimeStamp,
+ const uint8_t* payloadData,
+ size_t payloadSize,
+ const RTPFragmentationHeader* fragmentation);
+
+ // set audio packet size, used to determine when it's time to send a DTMF
+ // packet in silence (CNG)
+ int32_t SetAudioPacketSize(uint16_t packetSizeSamples);
+
+ // Store the audio level in dBov for
+ // header-extension-for-audio-level-indication.
+ // Valid range is [0,100]. Actual value is negative.
+ int32_t SetAudioLevel(uint8_t level_dBov);
+
+ // Send a DTMF tone using RFC 2833 (4733)
+ int32_t SendTelephoneEvent(uint8_t key, uint16_t time_ms, uint8_t level);
+
+ int AudioFrequency() const;
+
+ // Set payload type for Redundant Audio Data RFC 2198
+ int32_t SetRED(int8_t payloadType);
+
+ // Get payload type for Redundant Audio Data RFC 2198
+ int32_t RED(int8_t* payloadType) const;
+
+ protected:
+ int32_t SendTelephoneEventPacket(
+ bool ended,
+ int8_t dtmf_payload_type,
+ uint32_t dtmfTimeStamp,
+ uint16_t duration,
+ bool markerBit); // set on first packet in talk burst
+
+ bool MarkerBit(const FrameType frameType, const int8_t payloadType);
+
+ private:
+ Clock* const _clock;
+ RTPSender* const _rtpSender;
+ RtpAudioFeedback* const _audioFeedback;
+
+ rtc::scoped_ptr<CriticalSectionWrapper> _sendAudioCritsect;
+
+ uint16_t _packetSizeSamples GUARDED_BY(_sendAudioCritsect);
+
+ // DTMF
+ bool _dtmfEventIsOn;
+ bool _dtmfEventFirstPacketSent;
+ int8_t _dtmfPayloadType GUARDED_BY(_sendAudioCritsect);
+ uint32_t _dtmfTimestamp;
+ uint8_t _dtmfKey;
+ uint32_t _dtmfLengthSamples;
+ uint8_t _dtmfLevel;
+ int64_t _dtmfTimeLastSent;
+ uint32_t _dtmfTimestampLastSent;
+
+ int8_t _REDPayloadType GUARDED_BY(_sendAudioCritsect);
+
+ // VAD detection, used for markerbit
+ bool _inbandVADactive GUARDED_BY(_sendAudioCritsect);
+ int8_t _cngNBPayloadType GUARDED_BY(_sendAudioCritsect);
+ int8_t _cngWBPayloadType GUARDED_BY(_sendAudioCritsect);
+ int8_t _cngSWBPayloadType GUARDED_BY(_sendAudioCritsect);
+ int8_t _cngFBPayloadType GUARDED_BY(_sendAudioCritsect);
+ int8_t _lastPayloadType GUARDED_BY(_sendAudioCritsect);
+
+ // Audio level indication
+ // (https://datatracker.ietf.org/doc/draft-lennox-avt-rtp-audio-level-exthdr/)
+ uint8_t _audioLevel_dBov GUARDED_BY(_sendAudioCritsect);
};
} // namespace webrtc
-#endif // WEBRTC_MODULES_RTP_RTCP_SOURCE_RTP_SENDER_AUDIO_H_
+#endif // WEBRTC_MODULES_RTP_RTCP_SOURCE_RTP_SENDER_AUDIO_H_
diff --git a/webrtc/modules/rtp_rtcp/source/rtp_sender_unittest.cc b/webrtc/modules/rtp_rtcp/source/rtp_sender_unittest.cc
index fde6d47ceb..6bc122201a 100644
--- a/webrtc/modules/rtp_rtcp/source/rtp_sender_unittest.cc
+++ b/webrtc/modules/rtp_rtcp/source/rtp_sender_unittest.cc
@@ -12,20 +12,22 @@
* This file includes unit tests for the RTPSender.
*/
+#include <list>
+#include <vector>
+
#include "testing/gmock/include/gmock/gmock.h"
#include "testing/gtest/include/gtest/gtest.h"
-
#include "webrtc/base/buffer.h"
#include "webrtc/base/scoped_ptr.h"
-#include "webrtc/modules/rtp_rtcp/interface/rtp_cvo.h"
-#include "webrtc/modules/rtp_rtcp/interface/rtp_header_parser.h"
-#include "webrtc/modules/rtp_rtcp/interface/rtp_rtcp_defines.h"
+#include "webrtc/modules/rtp_rtcp/include/rtp_cvo.h"
+#include "webrtc/modules/rtp_rtcp/include/rtp_header_parser.h"
+#include "webrtc/modules/rtp_rtcp/include/rtp_rtcp_defines.h"
#include "webrtc/modules/rtp_rtcp/source/rtp_format_video_generic.h"
#include "webrtc/modules/rtp_rtcp/source/rtp_header_extension.h"
#include "webrtc/modules/rtp_rtcp/source/rtp_sender.h"
#include "webrtc/modules/rtp_rtcp/source/rtp_sender_video.h"
-#include "webrtc/system_wrappers/include/stl_util.h"
#include "webrtc/modules/rtp_rtcp/source/rtp_utility.h"
+#include "webrtc/system_wrappers/include/stl_util.h"
#include "webrtc/test/mock_transport.h"
#include "webrtc/typedefs.h"
@@ -90,9 +92,7 @@ class LoopbackTransportTest : public webrtc::Transport {
sent_packets_.push_back(buffer);
return true;
}
- bool SendRtcp(const uint8_t* data, size_t len) override {
- return false;
- }
+ bool SendRtcp(const uint8_t* data, size_t len) override { return false; }
int packets_sent_;
size_t last_sent_packet_len_;
size_t total_bytes_sent_;
@@ -163,11 +163,8 @@ class RtpSenderTest : public ::testing::Test {
void SendPacket(int64_t capture_time_ms, int payload_length) {
uint32_t timestamp = capture_time_ms * 90;
- int32_t rtp_length = rtp_sender_->BuildRTPheader(packet_,
- kPayload,
- kMarkerBit,
- timestamp,
- capture_time_ms);
+ int32_t rtp_length = rtp_sender_->BuildRTPheader(
+ packet_, kPayload, kMarkerBit, timestamp, capture_time_ms);
ASSERT_GE(rtp_length, 0);
// Packet should be stored in a send bucket.
@@ -186,7 +183,7 @@ class RtpSenderTestWithoutPacer : public RtpSenderTest {
class RtpSenderVideoTest : public RtpSenderTest {
protected:
- virtual void SetUp() override {
+ void SetUp() override {
// TODO(pbos): Set up to use pacer.
SetUpRtpSender(false);
rtp_sender_video_.reset(
@@ -211,7 +208,7 @@ class RtpSenderVideoTest : public RtpSenderTest {
} else {
ASSERT_EQ(kRtpHeaderSize, length);
}
- ASSERT_TRUE(rtp_parser.Parse(rtp_header, map));
+ ASSERT_TRUE(rtp_parser.Parse(&rtp_header, map));
ASSERT_FALSE(rtp_parser.RTCP());
EXPECT_EQ(payload_, rtp_header.payloadType);
EXPECT_EQ(seq_num, rtp_header.sequenceNumber);
@@ -228,53 +225,57 @@ TEST_F(RtpSenderTestWithoutPacer,
RegisterRtpTransmissionTimeOffsetHeaderExtension) {
EXPECT_EQ(0u, rtp_sender_->RtpHeaderExtensionTotalLength());
EXPECT_EQ(0, rtp_sender_->RegisterRtpHeaderExtension(
- kRtpExtensionTransmissionTimeOffset, kTransmissionTimeOffsetExtensionId));
+ kRtpExtensionTransmissionTimeOffset,
+ kTransmissionTimeOffsetExtensionId));
EXPECT_EQ(kRtpOneByteHeaderLength + kTransmissionTimeOffsetLength,
rtp_sender_->RtpHeaderExtensionTotalLength());
EXPECT_EQ(0, rtp_sender_->DeregisterRtpHeaderExtension(
- kRtpExtensionTransmissionTimeOffset));
+ kRtpExtensionTransmissionTimeOffset));
EXPECT_EQ(0u, rtp_sender_->RtpHeaderExtensionTotalLength());
}
TEST_F(RtpSenderTestWithoutPacer, RegisterRtpAbsoluteSendTimeHeaderExtension) {
EXPECT_EQ(0u, rtp_sender_->RtpHeaderExtensionTotalLength());
- EXPECT_EQ(0, rtp_sender_->RegisterRtpHeaderExtension(
- kRtpExtensionAbsoluteSendTime, kAbsoluteSendTimeExtensionId));
+ EXPECT_EQ(
+ 0, rtp_sender_->RegisterRtpHeaderExtension(kRtpExtensionAbsoluteSendTime,
+ kAbsoluteSendTimeExtensionId));
EXPECT_EQ(RtpUtility::Word32Align(kRtpOneByteHeaderLength +
kAbsoluteSendTimeLength),
rtp_sender_->RtpHeaderExtensionTotalLength());
EXPECT_EQ(0, rtp_sender_->DeregisterRtpHeaderExtension(
- kRtpExtensionAbsoluteSendTime));
+ kRtpExtensionAbsoluteSendTime));
EXPECT_EQ(0u, rtp_sender_->RtpHeaderExtensionTotalLength());
}
TEST_F(RtpSenderTestWithoutPacer, RegisterRtpAudioLevelHeaderExtension) {
EXPECT_EQ(0u, rtp_sender_->RtpHeaderExtensionTotalLength());
- EXPECT_EQ(0, rtp_sender_->RegisterRtpHeaderExtension(
- kRtpExtensionAudioLevel, kAudioLevelExtensionId));
+ EXPECT_EQ(0, rtp_sender_->RegisterRtpHeaderExtension(kRtpExtensionAudioLevel,
+ kAudioLevelExtensionId));
EXPECT_EQ(
RtpUtility::Word32Align(kRtpOneByteHeaderLength + kAudioLevelLength),
rtp_sender_->RtpHeaderExtensionTotalLength());
- EXPECT_EQ(0, rtp_sender_->DeregisterRtpHeaderExtension(
- kRtpExtensionAudioLevel));
+ EXPECT_EQ(0,
+ rtp_sender_->DeregisterRtpHeaderExtension(kRtpExtensionAudioLevel));
EXPECT_EQ(0u, rtp_sender_->RtpHeaderExtensionTotalLength());
}
TEST_F(RtpSenderTestWithoutPacer, RegisterRtpHeaderExtensions) {
EXPECT_EQ(0u, rtp_sender_->RtpHeaderExtensionTotalLength());
EXPECT_EQ(0, rtp_sender_->RegisterRtpHeaderExtension(
- kRtpExtensionTransmissionTimeOffset, kTransmissionTimeOffsetExtensionId));
+ kRtpExtensionTransmissionTimeOffset,
+ kTransmissionTimeOffsetExtensionId));
EXPECT_EQ(RtpUtility::Word32Align(kRtpOneByteHeaderLength +
kTransmissionTimeOffsetLength),
rtp_sender_->RtpHeaderExtensionTotalLength());
- EXPECT_EQ(0, rtp_sender_->RegisterRtpHeaderExtension(
- kRtpExtensionAbsoluteSendTime, kAbsoluteSendTimeExtensionId));
+ EXPECT_EQ(
+ 0, rtp_sender_->RegisterRtpHeaderExtension(kRtpExtensionAbsoluteSendTime,
+ kAbsoluteSendTimeExtensionId));
EXPECT_EQ(RtpUtility::Word32Align(kRtpOneByteHeaderLength +
kTransmissionTimeOffsetLength +
kAbsoluteSendTimeLength),
rtp_sender_->RtpHeaderExtensionTotalLength());
- EXPECT_EQ(0, rtp_sender_->RegisterRtpHeaderExtension(
- kRtpExtensionAudioLevel, kAudioLevelExtensionId));
+ EXPECT_EQ(0, rtp_sender_->RegisterRtpHeaderExtension(kRtpExtensionAudioLevel,
+ kAudioLevelExtensionId));
EXPECT_EQ(RtpUtility::Word32Align(
kRtpOneByteHeaderLength + kTransmissionTimeOffsetLength +
kAbsoluteSendTimeLength + kAudioLevelLength),
@@ -290,18 +291,18 @@ TEST_F(RtpSenderTestWithoutPacer, RegisterRtpHeaderExtensions) {
// Deregister starts.
EXPECT_EQ(0, rtp_sender_->DeregisterRtpHeaderExtension(
- kRtpExtensionTransmissionTimeOffset));
+ kRtpExtensionTransmissionTimeOffset));
EXPECT_EQ(RtpUtility::Word32Align(kRtpOneByteHeaderLength +
kAbsoluteSendTimeLength +
kAudioLevelLength + kVideoRotationLength),
rtp_sender_->RtpHeaderExtensionTotalLength());
EXPECT_EQ(0, rtp_sender_->DeregisterRtpHeaderExtension(
- kRtpExtensionAbsoluteSendTime));
+ kRtpExtensionAbsoluteSendTime));
EXPECT_EQ(RtpUtility::Word32Align(kRtpOneByteHeaderLength +
kAudioLevelLength + kVideoRotationLength),
rtp_sender_->RtpHeaderExtensionTotalLength());
- EXPECT_EQ(0, rtp_sender_->DeregisterRtpHeaderExtension(
- kRtpExtensionAudioLevel));
+ EXPECT_EQ(0,
+ rtp_sender_->DeregisterRtpHeaderExtension(kRtpExtensionAudioLevel));
EXPECT_EQ(
RtpUtility::Word32Align(kRtpOneByteHeaderLength + kVideoRotationLength),
rtp_sender_->RtpHeaderExtensionTotalLength());
@@ -334,7 +335,7 @@ TEST_F(RtpSenderTestWithoutPacer, BuildRTPPacket) {
webrtc::RtpUtility::RtpHeaderParser rtp_parser(packet_, length);
webrtc::RTPHeader rtp_header;
- const bool valid_rtp_header = rtp_parser.Parse(rtp_header, nullptr);
+ const bool valid_rtp_header = rtp_parser.Parse(&rtp_header, nullptr);
ASSERT_TRUE(valid_rtp_header);
ASSERT_FALSE(rtp_parser.RTCP());
@@ -354,7 +355,8 @@ TEST_F(RtpSenderTestWithoutPacer,
BuildRTPPacketWithTransmissionOffsetExtension) {
EXPECT_EQ(0, rtp_sender_->SetTransmissionTimeOffset(kTimeOffset));
EXPECT_EQ(0, rtp_sender_->RegisterRtpHeaderExtension(
- kRtpExtensionTransmissionTimeOffset, kTransmissionTimeOffsetExtensionId));
+ kRtpExtensionTransmissionTimeOffset,
+ kTransmissionTimeOffsetExtensionId));
size_t length = static_cast<size_t>(rtp_sender_->BuildRTPheader(
packet_, kPayload, kMarkerBit, kTimestamp, 0));
@@ -368,7 +370,7 @@ TEST_F(RtpSenderTestWithoutPacer,
RtpHeaderExtensionMap map;
map.Register(kRtpExtensionTransmissionTimeOffset,
kTransmissionTimeOffsetExtensionId);
- const bool valid_rtp_header = rtp_parser.Parse(rtp_header, &map);
+ const bool valid_rtp_header = rtp_parser.Parse(&rtp_header, &map);
ASSERT_TRUE(valid_rtp_header);
ASSERT_FALSE(rtp_parser.RTCP());
@@ -379,7 +381,7 @@ TEST_F(RtpSenderTestWithoutPacer,
// Parse without map extension
webrtc::RTPHeader rtp_header2;
- const bool valid_rtp_header2 = rtp_parser.Parse(rtp_header2, nullptr);
+ const bool valid_rtp_header2 = rtp_parser.Parse(&rtp_header2, nullptr);
ASSERT_TRUE(valid_rtp_header2);
VerifyRTPHeaderCommon(rtp_header2);
@@ -393,7 +395,8 @@ TEST_F(RtpSenderTestWithoutPacer,
const int kNegTimeOffset = -500;
EXPECT_EQ(0, rtp_sender_->SetTransmissionTimeOffset(kNegTimeOffset));
EXPECT_EQ(0, rtp_sender_->RegisterRtpHeaderExtension(
- kRtpExtensionTransmissionTimeOffset, kTransmissionTimeOffsetExtensionId));
+ kRtpExtensionTransmissionTimeOffset,
+ kTransmissionTimeOffsetExtensionId));
size_t length = static_cast<size_t>(rtp_sender_->BuildRTPheader(
packet_, kPayload, kMarkerBit, kTimestamp, 0));
@@ -407,7 +410,7 @@ TEST_F(RtpSenderTestWithoutPacer,
RtpHeaderExtensionMap map;
map.Register(kRtpExtensionTransmissionTimeOffset,
kTransmissionTimeOffsetExtensionId);
- const bool valid_rtp_header = rtp_parser.Parse(rtp_header, &map);
+ const bool valid_rtp_header = rtp_parser.Parse(&rtp_header, &map);
ASSERT_TRUE(valid_rtp_header);
ASSERT_FALSE(rtp_parser.RTCP());
@@ -419,8 +422,9 @@ TEST_F(RtpSenderTestWithoutPacer,
TEST_F(RtpSenderTestWithoutPacer, BuildRTPPacketWithAbsoluteSendTimeExtension) {
EXPECT_EQ(0, rtp_sender_->SetAbsoluteSendTime(kAbsoluteSendTime));
- EXPECT_EQ(0, rtp_sender_->RegisterRtpHeaderExtension(
- kRtpExtensionAbsoluteSendTime, kAbsoluteSendTimeExtensionId));
+ EXPECT_EQ(
+ 0, rtp_sender_->RegisterRtpHeaderExtension(kRtpExtensionAbsoluteSendTime,
+ kAbsoluteSendTimeExtensionId));
size_t length = static_cast<size_t>(rtp_sender_->BuildRTPheader(
packet_, kPayload, kMarkerBit, kTimestamp, 0));
@@ -433,7 +437,7 @@ TEST_F(RtpSenderTestWithoutPacer, BuildRTPPacketWithAbsoluteSendTimeExtension) {
RtpHeaderExtensionMap map;
map.Register(kRtpExtensionAbsoluteSendTime, kAbsoluteSendTimeExtensionId);
- const bool valid_rtp_header = rtp_parser.Parse(rtp_header, &map);
+ const bool valid_rtp_header = rtp_parser.Parse(&rtp_header, &map);
ASSERT_TRUE(valid_rtp_header);
ASSERT_FALSE(rtp_parser.RTCP());
@@ -444,7 +448,7 @@ TEST_F(RtpSenderTestWithoutPacer, BuildRTPPacketWithAbsoluteSendTimeExtension) {
// Parse without map extension
webrtc::RTPHeader rtp_header2;
- const bool valid_rtp_header2 = rtp_parser.Parse(rtp_header2, nullptr);
+ const bool valid_rtp_header2 = rtp_parser.Parse(&rtp_header2, nullptr);
ASSERT_TRUE(valid_rtp_header2);
VerifyRTPHeaderCommon(rtp_header2);
@@ -472,7 +476,7 @@ TEST_F(RtpSenderTestWithoutPacer, BuildRTPPacketWithVideoRotation_MarkerBit) {
webrtc::RtpUtility::RtpHeaderParser rtp_parser(packet_, length);
webrtc::RTPHeader rtp_header;
- ASSERT_TRUE(rtp_parser.Parse(rtp_header, &map));
+ ASSERT_TRUE(rtp_parser.Parse(&rtp_header, &map));
ASSERT_FALSE(rtp_parser.RTCP());
VerifyRTPHeaderCommon(rtp_header);
EXPECT_EQ(length, rtp_header.headerLength);
@@ -500,7 +504,7 @@ TEST_F(RtpSenderTestWithoutPacer,
webrtc::RtpUtility::RtpHeaderParser rtp_parser(packet_, length);
webrtc::RTPHeader rtp_header;
- ASSERT_TRUE(rtp_parser.Parse(rtp_header, &map));
+ ASSERT_TRUE(rtp_parser.Parse(&rtp_header, &map));
ASSERT_FALSE(rtp_parser.RTCP());
VerifyRTPHeaderCommon(rtp_header, false);
EXPECT_EQ(length, rtp_header.headerLength);
@@ -508,8 +512,8 @@ TEST_F(RtpSenderTestWithoutPacer,
}
TEST_F(RtpSenderTestWithoutPacer, BuildRTPPacketWithAudioLevelExtension) {
- EXPECT_EQ(0, rtp_sender_->RegisterRtpHeaderExtension(
- kRtpExtensionAudioLevel, kAudioLevelExtensionId));
+ EXPECT_EQ(0, rtp_sender_->RegisterRtpHeaderExtension(kRtpExtensionAudioLevel,
+ kAudioLevelExtensionId));
size_t length = static_cast<size_t>(rtp_sender_->BuildRTPheader(
packet_, kPayload, kMarkerBit, kTimestamp, 0));
@@ -521,12 +525,12 @@ TEST_F(RtpSenderTestWithoutPacer, BuildRTPPacketWithAudioLevelExtension) {
webrtc::RTPHeader rtp_header;
// Updating audio level is done in RTPSenderAudio, so simulate it here.
- rtp_parser.Parse(rtp_header);
+ rtp_parser.Parse(&rtp_header);
rtp_sender_->UpdateAudioLevel(packet_, length, rtp_header, true, kAudioLevel);
RtpHeaderExtensionMap map;
map.Register(kRtpExtensionAudioLevel, kAudioLevelExtensionId);
- const bool valid_rtp_header = rtp_parser.Parse(rtp_header, &map);
+ const bool valid_rtp_header = rtp_parser.Parse(&rtp_header, &map);
ASSERT_TRUE(valid_rtp_header);
ASSERT_FALSE(rtp_parser.RTCP());
@@ -538,7 +542,7 @@ TEST_F(RtpSenderTestWithoutPacer, BuildRTPPacketWithAudioLevelExtension) {
// Parse without map extension
webrtc::RTPHeader rtp_header2;
- const bool valid_rtp_header2 = rtp_parser.Parse(rtp_header2, nullptr);
+ const bool valid_rtp_header2 = rtp_parser.Parse(&rtp_header2, nullptr);
ASSERT_TRUE(valid_rtp_header2);
VerifyRTPHeaderCommon(rtp_header2);
@@ -554,11 +558,13 @@ TEST_F(RtpSenderTestWithoutPacer, BuildRTPPacketWithHeaderExtensions) {
EXPECT_EQ(0,
rtp_sender_->SetTransportSequenceNumber(kTransportSequenceNumber));
EXPECT_EQ(0, rtp_sender_->RegisterRtpHeaderExtension(
- kRtpExtensionTransmissionTimeOffset, kTransmissionTimeOffsetExtensionId));
- EXPECT_EQ(0, rtp_sender_->RegisterRtpHeaderExtension(
- kRtpExtensionAbsoluteSendTime, kAbsoluteSendTimeExtensionId));
- EXPECT_EQ(0, rtp_sender_->RegisterRtpHeaderExtension(
- kRtpExtensionAudioLevel, kAudioLevelExtensionId));
+ kRtpExtensionTransmissionTimeOffset,
+ kTransmissionTimeOffsetExtensionId));
+ EXPECT_EQ(
+ 0, rtp_sender_->RegisterRtpHeaderExtension(kRtpExtensionAbsoluteSendTime,
+ kAbsoluteSendTimeExtensionId));
+ EXPECT_EQ(0, rtp_sender_->RegisterRtpHeaderExtension(kRtpExtensionAudioLevel,
+ kAudioLevelExtensionId));
EXPECT_EQ(0, rtp_sender_->RegisterRtpHeaderExtension(
kRtpExtensionTransportSequenceNumber,
kTransportSequenceNumberExtensionId));
@@ -573,7 +579,7 @@ TEST_F(RtpSenderTestWithoutPacer, BuildRTPPacketWithHeaderExtensions) {
webrtc::RTPHeader rtp_header;
// Updating audio level is done in RTPSenderAudio, so simulate it here.
- rtp_parser.Parse(rtp_header);
+ rtp_parser.Parse(&rtp_header);
rtp_sender_->UpdateAudioLevel(packet_, length, rtp_header, true, kAudioLevel);
RtpHeaderExtensionMap map;
@@ -583,7 +589,7 @@ TEST_F(RtpSenderTestWithoutPacer, BuildRTPPacketWithHeaderExtensions) {
map.Register(kRtpExtensionAudioLevel, kAudioLevelExtensionId);
map.Register(kRtpExtensionTransportSequenceNumber,
kTransportSequenceNumberExtensionId);
- const bool valid_rtp_header = rtp_parser.Parse(rtp_header, &map);
+ const bool valid_rtp_header = rtp_parser.Parse(&rtp_header, &map);
ASSERT_TRUE(valid_rtp_header);
ASSERT_FALSE(rtp_parser.RTCP());
@@ -602,7 +608,7 @@ TEST_F(RtpSenderTestWithoutPacer, BuildRTPPacketWithHeaderExtensions) {
// Parse without map extension
webrtc::RTPHeader rtp_header2;
- const bool valid_rtp_header2 = rtp_parser.Parse(rtp_header2, nullptr);
+ const bool valid_rtp_header2 = rtp_parser.Parse(&rtp_header2, nullptr);
ASSERT_TRUE(valid_rtp_header2);
VerifyRTPHeaderCommon(rtp_header2);
@@ -626,9 +632,11 @@ TEST_F(RtpSenderTest, TrafficSmoothingWithExtensions) {
rtp_sender_->SetStorePacketsStatus(true, 10);
EXPECT_EQ(0, rtp_sender_->RegisterRtpHeaderExtension(
- kRtpExtensionTransmissionTimeOffset, kTransmissionTimeOffsetExtensionId));
- EXPECT_EQ(0, rtp_sender_->RegisterRtpHeaderExtension(
- kRtpExtensionAbsoluteSendTime, kAbsoluteSendTimeExtensionId));
+ kRtpExtensionTransmissionTimeOffset,
+ kTransmissionTimeOffsetExtensionId));
+ EXPECT_EQ(
+ 0, rtp_sender_->RegisterRtpHeaderExtension(kRtpExtensionAbsoluteSendTime,
+ kAbsoluteSendTimeExtensionId));
rtp_sender_->SetTargetBitrate(300000);
int64_t capture_time_ms = fake_clock_.TimeInMilliseconds();
int rtp_length_int = rtp_sender_->BuildRTPheader(
@@ -659,7 +667,7 @@ TEST_F(RtpSenderTest, TrafficSmoothingWithExtensions) {
map.Register(kRtpExtensionTransmissionTimeOffset,
kTransmissionTimeOffsetExtensionId);
map.Register(kRtpExtensionAbsoluteSendTime, kAbsoluteSendTimeExtensionId);
- const bool valid_rtp_header = rtp_parser.Parse(rtp_header, &map);
+ const bool valid_rtp_header = rtp_parser.Parse(&rtp_header, &map);
ASSERT_TRUE(valid_rtp_header);
// Verify transmission time offset.
@@ -676,9 +684,11 @@ TEST_F(RtpSenderTest, TrafficSmoothingRetransmits) {
rtp_sender_->SetStorePacketsStatus(true, 10);
EXPECT_EQ(0, rtp_sender_->RegisterRtpHeaderExtension(
- kRtpExtensionTransmissionTimeOffset, kTransmissionTimeOffsetExtensionId));
- EXPECT_EQ(0, rtp_sender_->RegisterRtpHeaderExtension(
- kRtpExtensionAbsoluteSendTime, kAbsoluteSendTimeExtensionId));
+ kRtpExtensionTransmissionTimeOffset,
+ kTransmissionTimeOffsetExtensionId));
+ EXPECT_EQ(
+ 0, rtp_sender_->RegisterRtpHeaderExtension(kRtpExtensionAbsoluteSendTime,
+ kAbsoluteSendTimeExtensionId));
rtp_sender_->SetTargetBitrate(300000);
int64_t capture_time_ms = fake_clock_.TimeInMilliseconds();
int rtp_length_int = rtp_sender_->BuildRTPheader(
@@ -717,7 +727,7 @@ TEST_F(RtpSenderTest, TrafficSmoothingRetransmits) {
map.Register(kRtpExtensionTransmissionTimeOffset,
kTransmissionTimeOffsetExtensionId);
map.Register(kRtpExtensionAbsoluteSendTime, kAbsoluteSendTimeExtensionId);
- const bool valid_rtp_header = rtp_parser.Parse(rtp_header, &map);
+ const bool valid_rtp_header = rtp_parser.Parse(&rtp_header, &map);
ASSERT_TRUE(valid_rtp_header);
// Verify transmission time offset.
@@ -740,10 +750,12 @@ TEST_F(RtpSenderTest, SendPadding) {
rtp_sender_->SetStorePacketsStatus(true, 10);
size_t rtp_header_len = kRtpHeaderSize;
EXPECT_EQ(0, rtp_sender_->RegisterRtpHeaderExtension(
- kRtpExtensionTransmissionTimeOffset, kTransmissionTimeOffsetExtensionId));
+ kRtpExtensionTransmissionTimeOffset,
+ kTransmissionTimeOffsetExtensionId));
rtp_header_len += 4; // 4 bytes extension.
- EXPECT_EQ(0, rtp_sender_->RegisterRtpHeaderExtension(
- kRtpExtensionAbsoluteSendTime, kAbsoluteSendTimeExtensionId));
+ EXPECT_EQ(
+ 0, rtp_sender_->RegisterRtpHeaderExtension(kRtpExtensionAbsoluteSendTime,
+ kAbsoluteSendTimeExtensionId));
rtp_header_len += 4; // 4 bytes extension.
rtp_header_len += 4; // 4 extra bytes common to all extension headers.
@@ -815,8 +827,8 @@ TEST_F(RtpSenderTest, SendPadding) {
// Send a regular video packet again.
capture_time_ms = fake_clock_.TimeInMilliseconds();
- rtp_length_int = rtp_sender_->BuildRTPheader(
- packet_, kPayload, kMarkerBit, timestamp, capture_time_ms);
+ rtp_length_int = rtp_sender_->BuildRTPheader(packet_, kPayload, kMarkerBit,
+ timestamp, capture_time_ms);
ASSERT_NE(-1, rtp_length_int);
rtp_length = static_cast<size_t>(rtp_length_int);
@@ -830,8 +842,8 @@ TEST_F(RtpSenderTest, SendPadding) {
EXPECT_EQ(++total_packets_sent, transport_.packets_sent_);
EXPECT_EQ(rtp_length, transport_.last_sent_packet_len_);
// Parse sent packet.
- ASSERT_TRUE(rtp_parser->Parse(transport_.last_sent_packet_, rtp_length,
- &rtp_header));
+ ASSERT_TRUE(
+ rtp_parser->Parse(transport_.last_sent_packet_, rtp_length, &rtp_header));
// Verify sequence number and timestamp.
EXPECT_EQ(seq_num, rtp_header.sequenceNumber);
@@ -858,8 +870,9 @@ TEST_F(RtpSenderTest, SendRedundantPayloads) {
uint16_t seq_num = kSeqNum;
rtp_sender_->SetStorePacketsStatus(true, 10);
int32_t rtp_header_len = kRtpHeaderSize;
- EXPECT_EQ(0, rtp_sender_->RegisterRtpHeaderExtension(
- kRtpExtensionAbsoluteSendTime, kAbsoluteSendTimeExtensionId));
+ EXPECT_EQ(
+ 0, rtp_sender_->RegisterRtpHeaderExtension(kRtpExtensionAbsoluteSendTime,
+ kAbsoluteSendTimeExtensionId));
rtp_header_len += 4; // 4 bytes extension.
rtp_header_len += 4; // 4 extra bytes common to all extension headers.
@@ -876,8 +889,8 @@ TEST_F(RtpSenderTest, SendRedundantPayloads) {
kAbsoluteSendTimeExtensionId);
rtp_sender_->SetTargetBitrate(300000);
const size_t kNumPayloadSizes = 10;
- const size_t kPayloadSizes[kNumPayloadSizes] = {500, 550, 600, 650, 700, 750,
- 800, 850, 900, 950};
+ const size_t kPayloadSizes[kNumPayloadSizes] = {500, 550, 600, 650, 700,
+ 750, 800, 850, 900, 950};
// Send 10 packets of increasing size.
for (size_t i = 0; i < kNumPayloadSizes; ++i) {
int64_t capture_time_ms = fake_clock_.TimeInMilliseconds();
@@ -921,10 +934,10 @@ TEST_F(RtpSenderTestWithoutPacer, SendGenericVideo) {
RtpUtility::RtpHeaderParser rtp_parser(transport_.last_sent_packet_,
transport_.last_sent_packet_len_);
webrtc::RTPHeader rtp_header;
- ASSERT_TRUE(rtp_parser.Parse(rtp_header));
+ ASSERT_TRUE(rtp_parser.Parse(&rtp_header));
- const uint8_t* payload_data = GetPayloadData(rtp_header,
- transport_.last_sent_packet_);
+ const uint8_t* payload_data =
+ GetPayloadData(rtp_header, transport_.last_sent_packet_);
uint8_t generic_header = *payload_data++;
ASSERT_EQ(sizeof(payload) + sizeof(generic_header),
@@ -946,7 +959,7 @@ TEST_F(RtpSenderTestWithoutPacer, SendGenericVideo) {
RtpUtility::RtpHeaderParser rtp_parser2(transport_.last_sent_packet_,
transport_.last_sent_packet_len_);
- ASSERT_TRUE(rtp_parser.Parse(rtp_header));
+ ASSERT_TRUE(rtp_parser.Parse(&rtp_header));
payload_data = GetPayloadData(rtp_header, transport_.last_sent_packet_);
generic_header = *payload_data++;
@@ -1043,9 +1056,8 @@ TEST_F(RtpSenderTest, BitrateCallbacks) {
char payload_name[RTP_PAYLOAD_NAME_SIZE] = "GENERIC";
const uint8_t payload_type = 127;
- ASSERT_EQ(
- 0,
- rtp_sender_->RegisterPayload(payload_name, payload_type, 90000, 0, 1500));
+ ASSERT_EQ(0, rtp_sender_->RegisterPayload(payload_name, payload_type, 90000,
+ 0, 1500));
uint8_t payload[] = {47, 11, 32, 93, 89};
rtp_sender_->SetStorePacketsStatus(true, 1);
uint32_t ssrc = rtp_sender_->SSRC();
@@ -1057,13 +1069,8 @@ TEST_F(RtpSenderTest, BitrateCallbacks) {
// Send a few frames.
for (uint32_t i = 0; i < kNumPackets; ++i) {
ASSERT_EQ(0,
- rtp_sender_->SendOutgoingData(kVideoFrameKey,
- payload_type,
- 1234,
- 4321,
- payload,
- sizeof(payload),
- 0));
+ rtp_sender_->SendOutgoingData(kVideoFrameKey, payload_type, 1234,
+ 4321, payload, sizeof(payload), 0));
fake_clock_.AdvanceTimeMilliseconds(kPacketInterval);
}
@@ -1100,8 +1107,7 @@ class RtpSenderAudioTest : public RtpSenderTest {
TEST_F(RtpSenderTestWithoutPacer, StreamDataCountersCallbacks) {
class TestCallback : public StreamDataCountersCallback {
public:
- TestCallback()
- : StreamDataCountersCallback(), ssrc_(0), counters_() {}
+ TestCallback() : StreamDataCountersCallback(), ssrc_(0), counters_() {}
virtual ~TestCallback() {}
void DataCountersUpdated(const StreamDataCounters& counters,
@@ -1127,7 +1133,6 @@ TEST_F(RtpSenderTestWithoutPacer, StreamDataCountersCallbacks) {
MatchPacketCounter(counters.retransmitted, counters_.retransmitted);
EXPECT_EQ(counters.fec.packets, counters_.fec.packets);
}
-
} callback;
const uint8_t kRedPayloadType = 96;
@@ -1212,10 +1217,10 @@ TEST_F(RtpSenderAudioTest, SendAudio) {
RtpUtility::RtpHeaderParser rtp_parser(transport_.last_sent_packet_,
transport_.last_sent_packet_len_);
webrtc::RTPHeader rtp_header;
- ASSERT_TRUE(rtp_parser.Parse(rtp_header));
+ ASSERT_TRUE(rtp_parser.Parse(&rtp_header));
- const uint8_t* payload_data = GetPayloadData(rtp_header,
- transport_.last_sent_packet_);
+ const uint8_t* payload_data =
+ GetPayloadData(rtp_header, transport_.last_sent_packet_);
ASSERT_EQ(sizeof(payload),
GetPayloadDataLength(rtp_header, transport_.last_sent_packet_len_));
@@ -1225,8 +1230,8 @@ TEST_F(RtpSenderAudioTest, SendAudio) {
TEST_F(RtpSenderAudioTest, SendAudioWithAudioLevelExtension) {
EXPECT_EQ(0, rtp_sender_->SetAudioLevel(kAudioLevel));
- EXPECT_EQ(0, rtp_sender_->RegisterRtpHeaderExtension(
- kRtpExtensionAudioLevel, kAudioLevelExtensionId));
+ EXPECT_EQ(0, rtp_sender_->RegisterRtpHeaderExtension(kRtpExtensionAudioLevel,
+ kAudioLevelExtensionId));
char payload_name[RTP_PAYLOAD_NAME_SIZE] = "PAYLOAD_NAME";
const uint8_t payload_type = 127;
@@ -1241,21 +1246,22 @@ TEST_F(RtpSenderAudioTest, SendAudioWithAudioLevelExtension) {
RtpUtility::RtpHeaderParser rtp_parser(transport_.last_sent_packet_,
transport_.last_sent_packet_len_);
webrtc::RTPHeader rtp_header;
- ASSERT_TRUE(rtp_parser.Parse(rtp_header));
+ ASSERT_TRUE(rtp_parser.Parse(&rtp_header));
- const uint8_t* payload_data = GetPayloadData(rtp_header,
- transport_.last_sent_packet_);
+ const uint8_t* payload_data =
+ GetPayloadData(rtp_header, transport_.last_sent_packet_);
ASSERT_EQ(sizeof(payload),
GetPayloadDataLength(rtp_header, transport_.last_sent_packet_len_));
EXPECT_EQ(0, memcmp(payload, payload_data, sizeof(payload)));
- uint8_t extension[] = { 0xbe, 0xde, 0x00, 0x01,
- (kAudioLevelExtensionId << 4) + 0, // ID + length.
- kAudioLevel, // Data.
- 0x00, 0x00 // Padding.
- };
+ uint8_t extension[] = {
+ 0xbe, 0xde, 0x00, 0x01,
+ (kAudioLevelExtensionId << 4) + 0, // ID + length.
+ kAudioLevel, // Data.
+ 0x00, 0x00 // Padding.
+ };
EXPECT_EQ(0, memcmp(extension, payload_data - sizeof(extension),
sizeof(extension)));
@@ -1270,14 +1276,14 @@ TEST_F(RtpSenderAudioTest, SendAudioWithAudioLevelExtension) {
TEST_F(RtpSenderAudioTest, CheckMarkerBitForTelephoneEvents) {
char payload_name[RTP_PAYLOAD_NAME_SIZE] = "telephone-event";
uint8_t payload_type = 126;
- ASSERT_EQ(0, rtp_sender_->RegisterPayload(payload_name, payload_type, 0,
- 0, 0));
+ ASSERT_EQ(0,
+ rtp_sender_->RegisterPayload(payload_name, payload_type, 0, 0, 0));
// For Telephone events, payload is not added to the registered payload list,
// it will register only the payload used for audio stream.
// Registering the payload again for audio stream with different payload name.
- strcpy(payload_name, "payload_name");
- ASSERT_EQ(0, rtp_sender_->RegisterPayload(payload_name, payload_type, 8000,
- 1, 0));
+ const char kPayloadName[] = "payload_name";
+ ASSERT_EQ(
+ 0, rtp_sender_->RegisterPayload(kPayloadName, payload_type, 8000, 1, 0));
int64_t capture_time_ms = fake_clock_.TimeInMilliseconds();
// DTMF event key=9, duration=500 and attenuationdB=10
rtp_sender_->SendTelephoneEvent(9, 500, 10);
@@ -1298,8 +1304,7 @@ TEST_F(RtpSenderAudioTest, CheckMarkerBitForTelephoneEvents) {
ASSERT_TRUE(rtp_parser.get() != nullptr);
webrtc::RTPHeader rtp_header;
ASSERT_TRUE(rtp_parser->Parse(transport_.last_sent_packet_,
- transport_.last_sent_packet_len_,
- &rtp_header));
+ transport_.last_sent_packet_len_, &rtp_header));
// Marker Bit should be set to 1 for first packet.
EXPECT_TRUE(rtp_header.markerBit);
@@ -1307,8 +1312,7 @@ TEST_F(RtpSenderAudioTest, CheckMarkerBitForTelephoneEvents) {
capture_time_ms + 4000, 0, nullptr,
0, nullptr));
ASSERT_TRUE(rtp_parser->Parse(transport_.last_sent_packet_,
- transport_.last_sent_packet_len_,
- &rtp_header));
+ transport_.last_sent_packet_len_, &rtp_header));
// Marker Bit should be set to 0 for rest of the packets.
EXPECT_FALSE(rtp_header.markerBit);
}
@@ -1321,19 +1325,13 @@ TEST_F(RtpSenderTestWithoutPacer, BytesReportedCorrectly) {
rtp_sender_->SetRtxPayloadType(kPayloadType - 1, kPayloadType);
rtp_sender_->SetRtxStatus(kRtxRetransmitted | kRtxRedundantPayloads);
- ASSERT_EQ(
- 0,
- rtp_sender_->RegisterPayload(kPayloadName, kPayloadType, 90000, 0, 1500));
+ ASSERT_EQ(0, rtp_sender_->RegisterPayload(kPayloadName, kPayloadType, 90000,
+ 0, 1500));
uint8_t payload[] = {47, 11, 32, 93, 89};
- ASSERT_EQ(0,
- rtp_sender_->SendOutgoingData(kVideoFrameKey,
- kPayloadType,
- 1234,
- 4321,
- payload,
- sizeof(payload),
- 0));
+ ASSERT_EQ(
+ 0, rtp_sender_->SendOutgoingData(kVideoFrameKey, kPayloadType, 1234, 4321,
+ payload, sizeof(payload), 0));
// Will send 2 full-size padding packets.
rtp_sender_->TimeToSendPadding(1);
@@ -1353,17 +1351,17 @@ TEST_F(RtpSenderTestWithoutPacer, BytesReportedCorrectly) {
EXPECT_EQ(rtx_stats.transmitted.padding_bytes, 2 * kMaxPaddingSize);
EXPECT_EQ(rtp_stats.transmitted.TotalBytes(),
- rtp_stats.transmitted.payload_bytes +
- rtp_stats.transmitted.header_bytes +
- rtp_stats.transmitted.padding_bytes);
+ rtp_stats.transmitted.payload_bytes +
+ rtp_stats.transmitted.header_bytes +
+ rtp_stats.transmitted.padding_bytes);
EXPECT_EQ(rtx_stats.transmitted.TotalBytes(),
- rtx_stats.transmitted.payload_bytes +
- rtx_stats.transmitted.header_bytes +
- rtx_stats.transmitted.padding_bytes);
+ rtx_stats.transmitted.payload_bytes +
+ rtx_stats.transmitted.header_bytes +
+ rtx_stats.transmitted.padding_bytes);
- EXPECT_EQ(transport_.total_bytes_sent_,
- rtp_stats.transmitted.TotalBytes() +
- rtx_stats.transmitted.TotalBytes());
+ EXPECT_EQ(
+ transport_.total_bytes_sent_,
+ rtp_stats.transmitted.TotalBytes() + rtx_stats.transmitted.TotalBytes());
}
TEST_F(RtpSenderTestWithoutPacer, RespectsNackBitrateLimit) {
diff --git a/webrtc/modules/rtp_rtcp/source/rtp_sender_video.cc b/webrtc/modules/rtp_rtcp/source/rtp_sender_video.cc
index 66062771de..5a565dfa99 100644
--- a/webrtc/modules/rtp_rtcp/source/rtp_sender_video.cc
+++ b/webrtc/modules/rtp_rtcp/source/rtp_sender_video.cc
@@ -18,7 +18,7 @@
#include "webrtc/base/checks.h"
#include "webrtc/base/logging.h"
#include "webrtc/base/trace_event.h"
-#include "webrtc/modules/rtp_rtcp/interface/rtp_rtcp_defines.h"
+#include "webrtc/modules/rtp_rtcp/include/rtp_rtcp_defines.h"
#include "webrtc/modules/rtp_rtcp/source/byte_io.h"
#include "webrtc/modules/rtp_rtcp/source/producer_fec.h"
#include "webrtc/modules/rtp_rtcp/source/rtp_format_video_generic.h"
@@ -42,13 +42,13 @@ RTPSenderVideo::RTPSenderVideo(Clock* clock, RTPSenderInterface* rtpSender)
_retransmissionSettings(kRetransmitBaseLayer),
// Generic FEC
- _fec(),
- _fecEnabled(false),
- _payloadTypeRED(-1),
- _payloadTypeFEC(-1),
+ fec_(),
+ fec_enabled_(false),
+ red_payload_type_(-1),
+ fec_payload_type_(-1),
delta_fec_params_(),
key_fec_params_(),
- producer_fec_(&_fec),
+ producer_fec_(&fec_),
_fecOverheadRate(clock, NULL),
_videoBitrate(clock, NULL) {
memset(&delta_fec_params_, 0, sizeof(delta_fec_params_));
@@ -104,7 +104,7 @@ void RTPSenderVideo::SendVideoPacket(uint8_t* data_buffer,
StorageType storage) {
if (_rtpSender.SendToNetwork(data_buffer, payload_length, rtp_header_length,
capture_time_ms, storage,
- RtpPacketSender::kNormalPriority) == 0) {
+ RtpPacketSender::kLowPriority) == 0) {
_videoBitrate.Update(payload_length + rtp_header_length);
TRACE_EVENT_INSTANT2(TRACE_DISABLED_BY_DEFAULT("webrtc_rtp"),
"Video::PacketNormal", "timestamp", capture_timestamp,
@@ -130,7 +130,7 @@ void RTPSenderVideo::SendVideoPacketAsRed(uint8_t* data_buffer,
// Only protect while creating RED and FEC packets, not when sending.
CriticalSectionScoped cs(crit_.get());
red_packet.reset(producer_fec_.BuildRedPacket(
- data_buffer, payload_length, rtp_header_length, _payloadTypeRED));
+ data_buffer, payload_length, rtp_header_length, red_payload_type_));
if (protect) {
producer_fec_.AddRtpPacketAndGenerateFec(data_buffer, payload_length,
rtp_header_length);
@@ -140,7 +140,7 @@ void RTPSenderVideo::SendVideoPacketAsRed(uint8_t* data_buffer,
next_fec_sequence_number =
_rtpSender.AllocateSequenceNumber(num_fec_packets);
fec_packets = producer_fec_.GetFecPackets(
- _payloadTypeRED, _payloadTypeFEC, next_fec_sequence_number,
+ red_payload_type_, fec_payload_type_, next_fec_sequence_number,
rtp_header_length);
RTC_DCHECK_EQ(num_fec_packets, fec_packets.size());
if (_retransmissionSettings & kRetransmitFECPackets)
@@ -150,7 +150,7 @@ void RTPSenderVideo::SendVideoPacketAsRed(uint8_t* data_buffer,
if (_rtpSender.SendToNetwork(
red_packet->data(), red_packet->length() - rtp_header_length,
rtp_header_length, capture_time_ms, media_packet_storage,
- RtpPacketSender::kNormalPriority) == 0) {
+ RtpPacketSender::kLowPriority) == 0) {
_videoBitrate.Update(red_packet->length());
TRACE_EVENT_INSTANT2(TRACE_DISABLED_BY_DEFAULT("webrtc_rtp"),
"Video::PacketRed", "timestamp", capture_timestamp,
@@ -162,7 +162,7 @@ void RTPSenderVideo::SendVideoPacketAsRed(uint8_t* data_buffer,
if (_rtpSender.SendToNetwork(
fec_packet->data(), fec_packet->length() - rtp_header_length,
rtp_header_length, capture_time_ms, fec_storage,
- RtpPacketSender::kNormalPriority) == 0) {
+ RtpPacketSender::kLowPriority) == 0) {
_fecOverheadRate.Update(fec_packet->length());
TRACE_EVENT_INSTANT2(TRACE_DISABLED_BY_DEFAULT("webrtc_rtp"),
"Video::PacketFec", "timestamp", capture_timestamp,
@@ -180,9 +180,9 @@ void RTPSenderVideo::SetGenericFECStatus(const bool enable,
const uint8_t payloadTypeRED,
const uint8_t payloadTypeFEC) {
CriticalSectionScoped cs(crit_.get());
- _fecEnabled = enable;
- _payloadTypeRED = payloadTypeRED;
- _payloadTypeFEC = payloadTypeFEC;
+ fec_enabled_ = enable;
+ red_payload_type_ = payloadTypeRED;
+ fec_payload_type_ = payloadTypeFEC;
memset(&delta_fec_params_, 0, sizeof(delta_fec_params_));
memset(&key_fec_params_, 0, sizeof(key_fec_params_));
delta_fec_params_.max_fec_frames = key_fec_params_.max_fec_frames = 1;
@@ -190,18 +190,18 @@ void RTPSenderVideo::SetGenericFECStatus(const bool enable,
kFecMaskRandom;
}
-void RTPSenderVideo::GenericFECStatus(bool& enable,
- uint8_t& payloadTypeRED,
- uint8_t& payloadTypeFEC) const {
+void RTPSenderVideo::GenericFECStatus(bool* enable,
+ uint8_t* payloadTypeRED,
+ uint8_t* payloadTypeFEC) const {
CriticalSectionScoped cs(crit_.get());
- enable = _fecEnabled;
- payloadTypeRED = _payloadTypeRED;
- payloadTypeFEC = _payloadTypeFEC;
+ *enable = fec_enabled_;
+ *payloadTypeRED = red_payload_type_;
+ *payloadTypeFEC = fec_payload_type_;
}
size_t RTPSenderVideo::FECPacketOverhead() const {
CriticalSectionScoped cs(crit_.get());
- if (_fecEnabled) {
+ if (fec_enabled_) {
// Overhead is FEC headers plus RED for FEC header plus anything in RTP
// header beyond the 12 bytes base header (CSRC list, extensions...)
// This reason for the header extensions to be included here is that
@@ -247,7 +247,7 @@ int32_t RTPSenderVideo::SendVideo(const RtpVideoCodecTypes videoType,
frameType == kVideoFrameKey ? &key_fec_params_ : &delta_fec_params_;
producer_fec_.SetFecParameters(fec_params, 0);
storage = packetizer->GetStorageType(_retransmissionSettings);
- fec_enabled = _fecEnabled;
+ fec_enabled = fec_enabled_;
}
// Register CVO rtp header extension at the first time when we receive a frame
@@ -304,7 +304,7 @@ int32_t RTPSenderVideo::SendVideo(const RtpVideoCodecTypes videoType,
size_t packetSize = payloadSize + rtp_header_length;
RtpUtility::RtpHeaderParser rtp_parser(dataBuffer, packetSize);
RTPHeader rtp_header;
- rtp_parser.Parse(rtp_header);
+ rtp_parser.Parse(&rtp_header);
_rtpSender.UpdateVideoRotation(dataBuffer, packetSize, rtp_header,
rtpHdr->rotation);
}
diff --git a/webrtc/modules/rtp_rtcp/source/rtp_sender_video.h b/webrtc/modules/rtp_rtcp/source/rtp_sender_video.h
index f412542d86..e59321ab93 100644
--- a/webrtc/modules/rtp_rtcp/source/rtp_sender_video.h
+++ b/webrtc/modules/rtp_rtcp/source/rtp_sender_video.h
@@ -16,7 +16,7 @@
#include "webrtc/base/scoped_ptr.h"
#include "webrtc/base/thread_annotations.h"
#include "webrtc/common_types.h"
-#include "webrtc/modules/rtp_rtcp/interface/rtp_rtcp_defines.h"
+#include "webrtc/modules/rtp_rtcp/include/rtp_rtcp_defines.h"
#include "webrtc/modules/rtp_rtcp/source/bitrate.h"
#include "webrtc/modules/rtp_rtcp/source/forward_error_correction.h"
#include "webrtc/modules/rtp_rtcp/source/producer_fec.h"
@@ -67,9 +67,9 @@ class RTPSenderVideo {
const uint8_t payloadTypeRED,
const uint8_t payloadTypeFEC);
- void GenericFECStatus(bool& enable,
- uint8_t& payloadTypeRED,
- uint8_t& payloadTypeFEC) const;
+ void GenericFECStatus(bool* enable,
+ uint8_t* payloadTypeRED,
+ uint8_t* payloadTypeFEC) const;
void SetFecParameters(const FecProtectionParams* delta_params,
const FecProtectionParams* key_params);
@@ -82,7 +82,7 @@ class RTPSenderVideo {
int SelectiveRetransmissions() const;
void SetSelectiveRetransmissions(uint8_t settings);
-private:
+ private:
void SendVideoPacket(uint8_t* dataBuffer,
const size_t payloadLength,
const size_t rtpHeaderLength,
@@ -110,10 +110,10 @@ private:
int32_t _retransmissionSettings GUARDED_BY(crit_);
// FEC
- ForwardErrorCorrection _fec;
- bool _fecEnabled GUARDED_BY(crit_);
- int8_t _payloadTypeRED GUARDED_BY(crit_);
- int8_t _payloadTypeFEC GUARDED_BY(crit_);
+ ForwardErrorCorrection fec_;
+ bool fec_enabled_ GUARDED_BY(crit_);
+ int8_t red_payload_type_ GUARDED_BY(crit_);
+ int8_t fec_payload_type_ GUARDED_BY(crit_);
FecProtectionParams delta_fec_params_ GUARDED_BY(crit_);
FecProtectionParams key_fec_params_ GUARDED_BY(crit_);
ProducerFec producer_fec_ GUARDED_BY(crit_);
diff --git a/webrtc/modules/rtp_rtcp/source/rtp_utility.cc b/webrtc/modules/rtp_rtcp/source/rtp_utility.cc
index bf0b30a064..0f0ad835b1 100644
--- a/webrtc/modules/rtp_rtcp/source/rtp_utility.cc
+++ b/webrtc/modules/rtp_rtcp/source/rtp_utility.cc
@@ -10,38 +10,10 @@
#include "webrtc/modules/rtp_rtcp/source/rtp_utility.h"
-#include <assert.h>
-#include <math.h> // ceil
-#include <string.h> // memcpy
-
-#if defined(_WIN32)
-// Order for these headers are important
-#include <winsock2.h> // timeval
-#include <windows.h> // FILETIME
-#include <MMSystem.h> // timeGetTime
-#elif ((defined WEBRTC_LINUX) || (defined WEBRTC_MAC))
-#include <sys/time.h> // gettimeofday
-#include <time.h>
-#endif
-#if (defined(_DEBUG) && defined(_WIN32) && (_MSC_VER >= 1400))
-#include <stdio.h>
-#endif
+#include <string.h>
#include "webrtc/base/logging.h"
#include "webrtc/modules/rtp_rtcp/source/byte_io.h"
-#include "webrtc/system_wrappers/include/tick_util.h"
-
-#if (defined(_DEBUG) && defined(_WIN32) && (_MSC_VER >= 1400))
-#define DEBUG_PRINT(...) \
- { \
- char msg[256]; \
- sprintf(msg, __VA_ARGS__); \
- OutputDebugString(msg); \
- }
-#else
-// special fix for visual 2003
-#define DEBUG_PRINT(exp) ((void)0)
-#endif // defined(_DEBUG) && defined(_WIN32)
namespace webrtc {
@@ -77,50 +49,18 @@ enum {
};
/*
- * Time routines.
- */
-
-uint32_t GetCurrentRTP(Clock* clock, uint32_t freq) {
- const bool use_global_clock = (clock == NULL);
- Clock* local_clock = clock;
- if (use_global_clock) {
- local_clock = Clock::GetRealTimeClock();
- }
- uint32_t secs = 0, frac = 0;
- local_clock->CurrentNtp(secs, frac);
- if (use_global_clock) {
- delete local_clock;
- }
- return ConvertNTPTimeToRTP(secs, frac, freq);
-}
-
-uint32_t ConvertNTPTimeToRTP(uint32_t NTPsec, uint32_t NTPfrac, uint32_t freq) {
- float ftemp = (float)NTPfrac / (float)NTP_FRAC;
- uint32_t tmp = (uint32_t)(ftemp * freq);
- return NTPsec * freq + tmp;
-}
-
-uint32_t ConvertNTPTimeToMS(uint32_t NTPsec, uint32_t NTPfrac) {
- int freq = 1000;
- float ftemp = (float)NTPfrac / (float)NTP_FRAC;
- uint32_t tmp = (uint32_t)(ftemp * freq);
- uint32_t MStime = NTPsec * freq + tmp;
- return MStime;
-}
-
-/*
* Misc utility routines
*/
#if defined(_WIN32)
bool StringCompare(const char* str1, const char* str2,
const uint32_t length) {
- return (_strnicmp(str1, str2, length) == 0) ? true : false;
+ return _strnicmp(str1, str2, length) == 0;
}
#elif defined(WEBRTC_LINUX) || defined(WEBRTC_MAC)
bool StringCompare(const char* str1, const char* str2,
const uint32_t length) {
- return (strncasecmp(str1, str2, length) == 0) ? true : false;
+ return strncasecmp(str1, str2, length) == 0;
}
#endif
@@ -131,10 +71,6 @@ size_t Word32Align(size_t size) {
return size;
}
-uint32_t pow2(uint8_t exp) {
- return 1 << exp;
-}
-
RtpHeaderParser::RtpHeaderParser(const uint8_t* rtpData,
const size_t rtpDataLength)
: _ptrRTPDataBegin(rtpData),
@@ -244,7 +180,7 @@ bool RtpHeaderParser::ParseRtcp(RTPHeader* header) const {
return true;
}
-bool RtpHeaderParser::Parse(RTPHeader& header,
+bool RtpHeaderParser::Parse(RTPHeader* header,
RtpHeaderExtensionMap* ptrExtensionMap) const {
const ptrdiff_t length = _ptrRTPDataEnd - _ptrRTPDataBegin;
if (length < kRtpMinParseLength) {
@@ -283,39 +219,39 @@ bool RtpHeaderParser::Parse(RTPHeader& header,
return false;
}
- header.markerBit = M;
- header.payloadType = PT;
- header.sequenceNumber = sequenceNumber;
- header.timestamp = RTPTimestamp;
- header.ssrc = SSRC;
- header.numCSRCs = CC;
- header.paddingLength = P ? *(_ptrRTPDataEnd - 1) : 0;
+ header->markerBit = M;
+ header->payloadType = PT;
+ header->sequenceNumber = sequenceNumber;
+ header->timestamp = RTPTimestamp;
+ header->ssrc = SSRC;
+ header->numCSRCs = CC;
+ header->paddingLength = P ? *(_ptrRTPDataEnd - 1) : 0;
for (uint8_t i = 0; i < CC; ++i) {
uint32_t CSRC = ByteReader<uint32_t>::ReadBigEndian(ptr);
ptr += 4;
- header.arrOfCSRCs[i] = CSRC;
+ header->arrOfCSRCs[i] = CSRC;
}
- header.headerLength = 12 + CSRCocts;
+ header->headerLength = 12 + CSRCocts;
// If in effect, MAY be omitted for those packets for which the offset
// is zero.
- header.extension.hasTransmissionTimeOffset = false;
- header.extension.transmissionTimeOffset = 0;
+ header->extension.hasTransmissionTimeOffset = false;
+ header->extension.transmissionTimeOffset = 0;
// May not be present in packet.
- header.extension.hasAbsoluteSendTime = false;
- header.extension.absoluteSendTime = 0;
+ header->extension.hasAbsoluteSendTime = false;
+ header->extension.absoluteSendTime = 0;
// May not be present in packet.
- header.extension.hasAudioLevel = false;
- header.extension.voiceActivity = false;
- header.extension.audioLevel = 0;
+ header->extension.hasAudioLevel = false;
+ header->extension.voiceActivity = false;
+ header->extension.audioLevel = 0;
// May not be present in packet.
- header.extension.hasVideoRotation = false;
- header.extension.videoRotation = 0;
+ header->extension.hasVideoRotation = false;
+ header->extension.videoRotation = 0;
if (X) {
/* RTP header extension, RFC 3550.
@@ -332,7 +268,7 @@ bool RtpHeaderParser::Parse(RTPHeader& header,
return false;
}
- header.headerLength += 4;
+ header->headerLength += 4;
uint16_t definedByProfile = ByteReader<uint16_t>::ReadBigEndian(ptr);
ptr += 2;
@@ -352,15 +288,16 @@ bool RtpHeaderParser::Parse(RTPHeader& header,
ptrRTPDataExtensionEnd,
ptr);
}
- header.headerLength += XLen;
+ header->headerLength += XLen;
}
- if (header.headerLength + header.paddingLength > static_cast<size_t>(length))
+ if (header->headerLength + header->paddingLength >
+ static_cast<size_t>(length))
return false;
return true;
}
void RtpHeaderParser::ParseOneByteExtensionHeader(
- RTPHeader& header,
+ RTPHeader* header,
const RtpHeaderExtensionMap* ptrExtensionMap,
const uint8_t* ptrRTPDataExtensionEnd,
const uint8_t* ptr) const {
@@ -377,8 +314,8 @@ void RtpHeaderParser::ParseOneByteExtensionHeader(
// Note that 'len' is the header extension element length, which is the
// number of bytes - 1.
- const uint8_t id = (*ptr & 0xf0) >> 4;
- const uint8_t len = (*ptr & 0x0f);
+ const int id = (*ptr & 0xf0) >> 4;
+ const int len = (*ptr & 0x0f);
ptr++;
if (id == 15) {
@@ -390,8 +327,7 @@ void RtpHeaderParser::ParseOneByteExtensionHeader(
RTPExtensionType type;
if (ptrExtensionMap->GetType(id, &type) != 0) {
// If we encounter an unknown extension, just skip over it.
- LOG(LS_WARNING) << "Failed to find extension id: "
- << static_cast<int>(id);
+ LOG(LS_WARNING) << "Failed to find extension id: " << id;
} else {
switch (type) {
case kRtpExtensionTransmissionTimeOffset: {
@@ -406,9 +342,9 @@ void RtpHeaderParser::ParseOneByteExtensionHeader(
// | ID | len=2 | transmission offset |
// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
- header.extension.transmissionTimeOffset =
+ header->extension.transmissionTimeOffset =
ByteReader<int32_t, 3>::ReadBigEndian(ptr);
- header.extension.hasTransmissionTimeOffset = true;
+ header->extension.hasTransmissionTimeOffset = true;
break;
}
case kRtpExtensionAudioLevel: {
@@ -422,9 +358,9 @@ void RtpHeaderParser::ParseOneByteExtensionHeader(
// | ID | len=0 |V| level |
// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
//
- header.extension.audioLevel = ptr[0] & 0x7f;
- header.extension.voiceActivity = (ptr[0] & 0x80) != 0;
- header.extension.hasAudioLevel = true;
+ header->extension.audioLevel = ptr[0] & 0x7f;
+ header->extension.voiceActivity = (ptr[0] & 0x80) != 0;
+ header->extension.hasAudioLevel = true;
break;
}
case kRtpExtensionAbsoluteSendTime: {
@@ -438,9 +374,9 @@ void RtpHeaderParser::ParseOneByteExtensionHeader(
// | ID | len=2 | absolute send time |
// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
- header.extension.absoluteSendTime =
+ header->extension.absoluteSendTime =
ByteReader<uint32_t, 3>::ReadBigEndian(ptr);
- header.extension.hasAbsoluteSendTime = true;
+ header->extension.hasAbsoluteSendTime = true;
break;
}
case kRtpExtensionVideoRotation: {
@@ -454,14 +390,14 @@ void RtpHeaderParser::ParseOneByteExtensionHeader(
// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
// | ID | len=0 |0 0 0 0 C F R R|
// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
- header.extension.hasVideoRotation = true;
- header.extension.videoRotation = ptr[0];
+ header->extension.hasVideoRotation = true;
+ header->extension.videoRotation = ptr[0];
break;
}
case kRtpExtensionTransportSequenceNumber: {
if (len != 1) {
- LOG(LS_WARNING)
- << "Incorrect peer connection sequence number len: " << len;
+ LOG(LS_WARNING) << "Incorrect transport sequence number len: "
+ << len;
return;
}
// 0 1 2
@@ -472,8 +408,8 @@ void RtpHeaderParser::ParseOneByteExtensionHeader(
uint16_t sequence_number = ptr[0] << 8;
sequence_number += ptr[1];
- header.extension.transportSequenceNumber = sequence_number;
- header.extension.hasTransportSequenceNumber = true;
+ header->extension.transportSequenceNumber = sequence_number;
+ header->extension.hasTransportSequenceNumber = true;
break;
}
default: {
@@ -502,5 +438,4 @@ uint8_t RtpHeaderParser::ParsePaddingBytes(
return num_zero_bytes;
}
} // namespace RtpUtility
-
} // namespace webrtc
diff --git a/webrtc/modules/rtp_rtcp/source/rtp_utility.h b/webrtc/modules/rtp_rtcp/source/rtp_utility.h
index af20f97e82..23c175356a 100644
--- a/webrtc/modules/rtp_rtcp/source/rtp_utility.h
+++ b/webrtc/modules/rtp_rtcp/source/rtp_utility.h
@@ -11,10 +11,11 @@
#ifndef WEBRTC_MODULES_RTP_RTCP_SOURCE_RTP_UTILITY_H_
#define WEBRTC_MODULES_RTP_RTCP_SOURCE_RTP_UTILITY_H_
-#include <stddef.h> // size_t, ptrdiff_t
+#include <map>
-#include "webrtc/modules/rtp_rtcp/interface/rtp_rtcp_defines.h"
-#include "webrtc/modules/rtp_rtcp/interface/receive_statistics.h"
+#include "webrtc/base/deprecation.h"
+#include "webrtc/modules/rtp_rtcp/include/receive_statistics.h"
+#include "webrtc/modules/rtp_rtcp/include/rtp_rtcp_defines.h"
#include "webrtc/modules/rtp_rtcp/source/rtp_header_extension.h"
#include "webrtc/modules/rtp_rtcp/source/rtp_rtcp_config.h"
#include "webrtc/typedefs.h"
@@ -29,71 +30,48 @@ RtpAudioFeedback* NullObjectRtpAudioFeedback();
ReceiveStatistics* NullObjectReceiveStatistics();
namespace RtpUtility {
- // January 1970, in NTP seconds.
- const uint32_t NTP_JAN_1970 = 2208988800UL;
-
- // Magic NTP fractional unit.
- const double NTP_FRAC = 4.294967296E+9;
-
- struct Payload
- {
- char name[RTP_PAYLOAD_NAME_SIZE];
- bool audio;
- PayloadUnion typeSpecific;
- };
-
- typedef std::map<int8_t, Payload*> PayloadTypeMap;
-
- // Return the current RTP timestamp from the NTP timestamp
- // returned by the specified clock.
- uint32_t GetCurrentRTP(Clock* clock, uint32_t freq);
-
- // Return the current RTP absolute timestamp.
- uint32_t ConvertNTPTimeToRTP(uint32_t NTPsec,
- uint32_t NTPfrac,
- uint32_t freq);
-
- uint32_t pow2(uint8_t exp);
-
- // Returns true if |newTimestamp| is older than |existingTimestamp|.
- // |wrapped| will be set to true if there has been a wraparound between the
- // two timestamps.
- bool OldTimestamp(uint32_t newTimestamp,
- uint32_t existingTimestamp,
- bool* wrapped);
-
- bool StringCompare(const char* str1,
- const char* str2,
- const uint32_t length);
-
- // Round up to the nearest size that is a multiple of 4.
- size_t Word32Align(size_t size);
-
- class RtpHeaderParser {
- public:
- RtpHeaderParser(const uint8_t* rtpData, size_t rtpDataLength);
- ~RtpHeaderParser();
-
- bool RTCP() const;
- bool ParseRtcp(RTPHeader* header) const;
- bool Parse(RTPHeader& parsedPacket,
- RtpHeaderExtensionMap* ptrExtensionMap = NULL) const;
-
- private:
- void ParseOneByteExtensionHeader(
- RTPHeader& parsedPacket,
- const RtpHeaderExtensionMap* ptrExtensionMap,
- const uint8_t* ptrRTPDataExtensionEnd,
- const uint8_t* ptr) const;
-
- uint8_t ParsePaddingBytes(
- const uint8_t* ptrRTPDataExtensionEnd,
- const uint8_t* ptr) const;
-
- const uint8_t* const _ptrRTPDataBegin;
- const uint8_t* const _ptrRTPDataEnd;
- };
+
+struct Payload {
+ char name[RTP_PAYLOAD_NAME_SIZE];
+ bool audio;
+ PayloadUnion typeSpecific;
+};
+
+typedef std::map<int8_t, Payload*> PayloadTypeMap;
+
+bool StringCompare(const char* str1, const char* str2, const uint32_t length);
+
+// Round up to the nearest size that is a multiple of 4.
+size_t Word32Align(size_t size);
+
+class RtpHeaderParser {
+ public:
+ RtpHeaderParser(const uint8_t* rtpData, size_t rtpDataLength);
+ ~RtpHeaderParser();
+
+ bool RTCP() const;
+ bool ParseRtcp(RTPHeader* header) const;
+ bool Parse(RTPHeader* parsedPacket,
+ RtpHeaderExtensionMap* ptrExtensionMap = nullptr) const;
+ RTC_DEPRECATED bool Parse(
+ RTPHeader& parsedPacket, // NOLINT(runtime/references)
+ RtpHeaderExtensionMap* ptrExtensionMap = nullptr) const {
+ return Parse(&parsedPacket, ptrExtensionMap);
+ }
+
+ private:
+ void ParseOneByteExtensionHeader(RTPHeader* parsedPacket,
+ const RtpHeaderExtensionMap* ptrExtensionMap,
+ const uint8_t* ptrRTPDataExtensionEnd,
+ const uint8_t* ptr) const;
+
+ uint8_t ParsePaddingBytes(const uint8_t* ptrRTPDataExtensionEnd,
+ const uint8_t* ptr) const;
+
+ const uint8_t* const _ptrRTPDataBegin;
+ const uint8_t* const _ptrRTPDataEnd;
+};
} // namespace RtpUtility
} // namespace webrtc
-#endif // WEBRTC_MODULES_RTP_RTCP_SOURCE_RTP_UTILITY_H_
+#endif // WEBRTC_MODULES_RTP_RTCP_SOURCE_RTP_UTILITY_H_
diff --git a/webrtc/modules/rtp_rtcp/source/ssrc_database.cc b/webrtc/modules/rtp_rtcp/source/ssrc_database.cc
index 6fb7c4701a..fb02b7ef12 100644
--- a/webrtc/modules/rtp_rtcp/source/ssrc_database.cc
+++ b/webrtc/modules/rtp_rtcp/source/ssrc_database.cc
@@ -10,110 +10,51 @@
#include "webrtc/modules/rtp_rtcp/source/ssrc_database.h"
-#include <assert.h>
-#include <stdlib.h>
-
+#include "webrtc/base/checks.h"
+#include "webrtc/system_wrappers/include/clock.h"
#include "webrtc/system_wrappers/include/critical_section_wrapper.h"
-#ifdef _WIN32
- #include <windows.h>
- #include <MMSystem.h> //timeGetTime
-
-// TODO(hellner): investigate if it is necessary to disable these warnings.
- #pragma warning(disable:4311)
- #pragma warning(disable:4312)
-#else
- #include <stdio.h>
- #include <string.h>
- #include <time.h>
- #include <sys/time.h>
-#endif
-
namespace webrtc {
-SSRCDatabase*
-SSRCDatabase::StaticInstance(CountOperation count_operation)
-{
- SSRCDatabase* impl =
- GetStaticInstance<SSRCDatabase>(count_operation);
- return impl;
+namespace {
+uint64_t Seed() {
+ return Clock::GetRealTimeClock()->TimeInMicroseconds();
}
+} // namespace
-SSRCDatabase*
-SSRCDatabase::GetSSRCDatabase()
-{
- return StaticInstance(kAddRef);
+SSRCDatabase* SSRCDatabase::GetSSRCDatabase() {
+ return GetStaticInstance<SSRCDatabase>(kAddRef);
}
-void
-SSRCDatabase::ReturnSSRCDatabase()
-{
- StaticInstance(kRelease);
+void SSRCDatabase::ReturnSSRCDatabase() {
+ GetStaticInstance<SSRCDatabase>(kRelease);
}
-uint32_t
-SSRCDatabase::CreateSSRC()
-{
- CriticalSectionScoped lock(_critSect);
+uint32_t SSRCDatabase::CreateSSRC() {
+ CriticalSectionScoped lock(crit_.get());
- uint32_t ssrc = GenerateRandom();
-
- while(_ssrcMap.find(ssrc) != _ssrcMap.end())
- {
- ssrc = GenerateRandom();
+ while (true) { // Try until get a new ssrc.
+ // 0 and 0xffffffff are invalid values for SSRC.
+ uint32_t ssrc = random_.Rand(1u, 0xfffffffe);
+ if (ssrcs_.insert(ssrc).second) {
+ return ssrc;
}
- _ssrcMap[ssrc] = 0;
-
- return ssrc;
+ }
}
-int32_t
-SSRCDatabase::RegisterSSRC(const uint32_t ssrc)
-{
- CriticalSectionScoped lock(_critSect);
- _ssrcMap[ssrc] = 0;
- return 0;
+void SSRCDatabase::RegisterSSRC(uint32_t ssrc) {
+ CriticalSectionScoped lock(crit_.get());
+ ssrcs_.insert(ssrc);
}
-int32_t
-SSRCDatabase::ReturnSSRC(const uint32_t ssrc)
-{
- CriticalSectionScoped lock(_critSect);
- _ssrcMap.erase(ssrc);
- return 0;
+void SSRCDatabase::ReturnSSRC(uint32_t ssrc) {
+ CriticalSectionScoped lock(crit_.get());
+ ssrcs_.erase(ssrc);
}
SSRCDatabase::SSRCDatabase()
-{
- // we need to seed the random generator, otherwise we get 26500 each time, hardly a random value :)
-#ifdef _WIN32
- srand(timeGetTime());
-#else
- struct timeval tv;
- struct timezone tz;
- gettimeofday(&tv, &tz);
- srand(tv.tv_usec);
-#endif
+ : crit_(CriticalSectionWrapper::CreateCriticalSection()), random_(Seed()) {}
- _critSect = CriticalSectionWrapper::CreateCriticalSection();
+SSRCDatabase::~SSRCDatabase() {
}
-SSRCDatabase::~SSRCDatabase()
-{
- _ssrcMap.clear();
- delete _critSect;
-}
-
-uint32_t SSRCDatabase::GenerateRandom()
-{
- uint32_t ssrc = 0;
- do
- {
- ssrc = rand();
- ssrc = ssrc <<16;
- ssrc += rand();
-
- } while (ssrc == 0 || ssrc == 0xffffffff);
-
- return ssrc;
-}
} // namespace webrtc
diff --git a/webrtc/modules/rtp_rtcp/source/ssrc_database.h b/webrtc/modules/rtp_rtcp/source/ssrc_database.h
index 7129d0de76..7a3133638d 100644
--- a/webrtc/modules/rtp_rtcp/source/ssrc_database.h
+++ b/webrtc/modules/rtp_rtcp/source/ssrc_database.h
@@ -11,43 +11,41 @@
#ifndef WEBRTC_MODULES_RTP_RTCP_SOURCE_SSRC_DATABASE_H_
#define WEBRTC_MODULES_RTP_RTCP_SOURCE_SSRC_DATABASE_H_
-#include <map>
+#include <set>
+#include "webrtc/base/random.h"
+#include "webrtc/base/scoped_ptr.h"
#include "webrtc/system_wrappers/include/static_instance.h"
#include "webrtc/typedefs.h"
namespace webrtc {
class CriticalSectionWrapper;
-class SSRCDatabase
-{
-public:
- static SSRCDatabase* GetSSRCDatabase();
- static void ReturnSSRCDatabase();
+class SSRCDatabase {
+ public:
+ static SSRCDatabase* GetSSRCDatabase();
+ static void ReturnSSRCDatabase();
- uint32_t CreateSSRC();
- int32_t RegisterSSRC(const uint32_t ssrc);
- int32_t ReturnSSRC(const uint32_t ssrc);
+ uint32_t CreateSSRC();
+ void RegisterSSRC(uint32_t ssrc);
+ void ReturnSSRC(uint32_t ssrc);
-protected:
- SSRCDatabase();
- virtual ~SSRCDatabase();
+ protected:
+ SSRCDatabase();
+ virtual ~SSRCDatabase();
- static SSRCDatabase* CreateInstance() { return new SSRCDatabase(); }
+ static SSRCDatabase* CreateInstance() { return new SSRCDatabase(); }
-private:
- // Friend function to allow the SSRC destructor to be accessed from the
- // template class.
- friend SSRCDatabase* GetStaticInstance<SSRCDatabase>(
- CountOperation count_operation);
- static SSRCDatabase* StaticInstance(CountOperation count_operation);
+ private:
+ // Friend function to allow the SSRC destructor to be accessed from the
+ // template class.
+ friend SSRCDatabase* GetStaticInstance<SSRCDatabase>(
+ CountOperation count_operation);
- uint32_t GenerateRandom();
-
- std::map<uint32_t, uint32_t> _ssrcMap;
-
- CriticalSectionWrapper* _critSect;
+ rtc::scoped_ptr<CriticalSectionWrapper> crit_;
+ Random random_ GUARDED_BY(crit_);
+ std::set<uint32_t> ssrcs_ GUARDED_BY(crit_);
};
} // namespace webrtc
-#endif // WEBRTC_MODULES_RTP_RTCP_SOURCE_SSRC_DATABASE_H_
+#endif // WEBRTC_MODULES_RTP_RTCP_SOURCE_SSRC_DATABASE_H_
diff --git a/webrtc/modules/rtp_rtcp/source/time_util.h b/webrtc/modules/rtp_rtcp/source/time_util.h
new file mode 100644
index 0000000000..5b544ddf9a
--- /dev/null
+++ b/webrtc/modules/rtp_rtcp/source/time_util.h
@@ -0,0 +1,48 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef WEBRTC_MODULES_RTP_RTCP_SOURCE_TIME_UTIL_H_
+#define WEBRTC_MODULES_RTP_RTCP_SOURCE_TIME_UTIL_H_
+
+#include "webrtc/base/basictypes.h"
+#include "webrtc/system_wrappers/include/ntp_time.h"
+
+namespace webrtc {
+
+// Converts NTP timestamp to RTP timestamp.
+inline uint32_t NtpToRtp(NtpTime ntp, uint32_t freq) {
+ uint32_t tmp = (static_cast<uint64_t>(ntp.fractions()) * freq) >> 32;
+ return ntp.seconds() * freq + tmp;
+}
+// Return the current RTP timestamp from the NTP timestamp
+// returned by the specified clock.
+inline uint32_t CurrentRtp(const Clock& clock, uint32_t freq) {
+ return NtpToRtp(NtpTime(clock), freq);
+}
+
+// Helper function for compact ntp representation:
+// RFC 3550, Section 4. Time Format.
+// Wallclock time is represented using the timestamp format of
+// the Network Time Protocol (NTP).
+// ...
+// In some fields where a more compact representation is
+// appropriate, only the middle 32 bits are used; that is, the low 16
+// bits of the integer part and the high 16 bits of the fractional part.
+inline uint32_t CompactNtp(NtpTime ntp) {
+ return (ntp.seconds() << 16) | (ntp.fractions() >> 16);
+}
+// Converts interval between compact ntp timestamps to milliseconds.
+// This interval can be upto ~18.2 hours (2^16 seconds).
+inline uint32_t CompactNtpIntervalToMs(uint32_t compact_ntp_interval) {
+ return static_cast<uint64_t>(compact_ntp_interval) * 1000 / (1 << 16);
+}
+
+} // namespace webrtc
+#endif // WEBRTC_MODULES_RTP_RTCP_SOURCE_TIME_UTIL_H_
diff --git a/webrtc/modules/rtp_rtcp/source/time_util_unittest.cc b/webrtc/modules/rtp_rtcp/source/time_util_unittest.cc
new file mode 100644
index 0000000000..7efb83ccad
--- /dev/null
+++ b/webrtc/modules/rtp_rtcp/source/time_util_unittest.cc
@@ -0,0 +1,62 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+#include "webrtc/modules/rtp_rtcp/source/time_util.h"
+
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace webrtc {
+
+TEST(TimeUtilTest, CompactNtp) {
+ const uint32_t kNtpSec = 0x12345678;
+ const uint32_t kNtpFrac = 0x23456789;
+ const NtpTime kNtp(kNtpSec, kNtpFrac);
+ const uint32_t kNtpMid = 0x56782345;
+ EXPECT_EQ(kNtpMid, CompactNtp(kNtp));
+}
+
+TEST(TimeUtilTest, CompactNtpToMs) {
+ const NtpTime ntp1(0x12345, 0x23456);
+ const NtpTime ntp2(0x12654, 0x64335);
+ uint32_t ms_diff = ntp2.ToMs() - ntp1.ToMs();
+ uint32_t ntp_diff = CompactNtp(ntp2) - CompactNtp(ntp1);
+
+ uint32_t ntp_to_ms_diff = CompactNtpIntervalToMs(ntp_diff);
+
+ EXPECT_NEAR(ms_diff, ntp_to_ms_diff, 1);
+}
+
+TEST(TimeUtilTest, CompactNtpToMsWithWrap) {
+ const NtpTime ntp1(0x1ffff, 0x23456);
+ const NtpTime ntp2(0x20000, 0x64335);
+ uint32_t ms_diff = ntp2.ToMs() - ntp1.ToMs();
+
+ // While ntp2 > ntp1, there compact ntp presentation happen to be opposite.
+ // That shouldn't be a problem as long as unsigned arithmetic is used.
+ ASSERT_GT(ntp2.ToMs(), ntp1.ToMs());
+ ASSERT_LT(CompactNtp(ntp2), CompactNtp(ntp1));
+
+ uint32_t ntp_diff = CompactNtp(ntp2) - CompactNtp(ntp1);
+ uint32_t ntp_to_ms_diff = CompactNtpIntervalToMs(ntp_diff);
+
+ EXPECT_NEAR(ms_diff, ntp_to_ms_diff, 1);
+}
+
+TEST(TimeUtilTest, CompactNtpToMsLarge) {
+ const NtpTime ntp1(0x10000, 0x23456);
+ const NtpTime ntp2(0x1ffff, 0x64335);
+ uint32_t ms_diff = ntp2.ToMs() - ntp1.ToMs();
+ // Ntp difference close to maximum of ~18 hours should convert correctly too.
+ ASSERT_GT(ms_diff, 18u * 3600 * 1000);
+ uint32_t ntp_diff = CompactNtp(ntp2) - CompactNtp(ntp1);
+ uint32_t ntp_to_ms_diff = CompactNtpIntervalToMs(ntp_diff);
+
+ EXPECT_NEAR(ms_diff, ntp_to_ms_diff, 1);
+}
+} // namespace webrtc
diff --git a/webrtc/modules/rtp_rtcp/source/tmmbr_help.cc b/webrtc/modules/rtp_rtcp/source/tmmbr_help.cc
index fb1ed625ed..f994ff7049 100644
--- a/webrtc/modules/rtp_rtcp/source/tmmbr_help.cc
+++ b/webrtc/modules/rtp_rtcp/source/tmmbr_help.cc
@@ -11,8 +11,10 @@
#include "webrtc/modules/rtp_rtcp/source/tmmbr_help.h"
#include <assert.h>
-#include <limits>
#include <string.h>
+
+#include <limits>
+
#include "webrtc/modules/rtp_rtcp/source/rtp_rtcp_config.h"
namespace webrtc {
diff --git a/webrtc/modules/rtp_rtcp/source/video_codec_information.h b/webrtc/modules/rtp_rtcp/source/video_codec_information.h
index 456b3bb934..7b819d060f 100644
--- a/webrtc/modules/rtp_rtcp/source/video_codec_information.h
+++ b/webrtc/modules/rtp_rtcp/source/video_codec_information.h
@@ -15,14 +15,13 @@
#include "webrtc/modules/rtp_rtcp/source/rtp_utility.h"
namespace webrtc {
-class VideoCodecInformation
-{
-public:
- virtual void Reset() = 0;
+class VideoCodecInformation {
+ public:
+ virtual void Reset() = 0;
- virtual RtpVideoCodecTypes Type() = 0;
- virtual ~VideoCodecInformation(){};
+ virtual RtpVideoCodecTypes Type() = 0;
+ virtual ~VideoCodecInformation() {}
};
} // namespace webrtc
-#endif // WEBRTC_MODULES_RTP_RTCP_SOURCE_VIDEO_CODEC_INFORMATION_H_
+#endif // WEBRTC_MODULES_RTP_RTCP_SOURCE_VIDEO_CODEC_INFORMATION_H_
diff --git a/webrtc/modules/rtp_rtcp/source/vp8_partition_aggregator.cc b/webrtc/modules/rtp_rtcp/source/vp8_partition_aggregator.cc
index feed784839..9721a7e9ac 100644
--- a/webrtc/modules/rtp_rtcp/source/vp8_partition_aggregator.cc
+++ b/webrtc/modules/rtp_rtcp/source/vp8_partition_aggregator.cc
@@ -37,9 +37,8 @@ PartitionTreeNode::PartitionTreeNode(PartitionTreeNode* parent,
PartitionTreeNode* PartitionTreeNode::CreateRootNode(const size_t* size_vector,
size_t num_partitions) {
- PartitionTreeNode* root_node =
- new PartitionTreeNode(NULL, &size_vector[1], num_partitions - 1,
- size_vector[0]);
+ PartitionTreeNode* root_node = new PartitionTreeNode(
+ NULL, &size_vector[1], num_partitions - 1, size_vector[0]);
root_node->set_packet_start(true);
return root_node;
}
@@ -54,7 +53,7 @@ int PartitionTreeNode::Cost(size_t penalty) {
if (num_partitions_ == 0) {
// This is a solution node.
cost = std::max(max_parent_size_, this_size_int()) -
- std::min(min_parent_size_, this_size_int());
+ std::min(min_parent_size_, this_size_int());
} else {
cost = std::max(max_parent_size_, this_size_int()) - min_parent_size_;
}
@@ -68,9 +67,7 @@ bool PartitionTreeNode::CreateChildren(size_t max_size) {
if (this_size_ + size_vector_[0] <= max_size) {
assert(!children_[kLeftChild]);
children_[kLeftChild] =
- new PartitionTreeNode(this,
- &size_vector_[1],
- num_partitions_ - 1,
+ new PartitionTreeNode(this, &size_vector_[1], num_partitions_ - 1,
this_size_ + size_vector_[0]);
children_[kLeftChild]->set_max_parent_size(max_parent_size_);
children_[kLeftChild]->set_min_parent_size(min_parent_size_);
@@ -80,10 +77,8 @@ bool PartitionTreeNode::CreateChildren(size_t max_size) {
}
if (this_size_ > 0) {
assert(!children_[kRightChild]);
- children_[kRightChild] = new PartitionTreeNode(this,
- &size_vector_[1],
- num_partitions_ - 1,
- size_vector_[0]);
+ children_[kRightChild] = new PartitionTreeNode(
+ this, &size_vector_[1], num_partitions_ - 1, size_vector_[0]);
children_[kRightChild]->set_max_parent_size(
std::max(max_parent_size_, this_size_int()));
children_[kRightChild]->set_min_parent_size(
@@ -148,7 +143,8 @@ PartitionTreeNode* PartitionTreeNode::GetOptimalNode(size_t max_size,
Vp8PartitionAggregator::Vp8PartitionAggregator(
const RTPFragmentationHeader& fragmentation,
- size_t first_partition_idx, size_t last_partition_idx)
+ size_t first_partition_idx,
+ size_t last_partition_idx)
: root_(NULL),
num_partitions_(last_partition_idx - first_partition_idx + 1),
size_vector_(new size_t[num_partitions_]),
@@ -158,14 +154,14 @@ Vp8PartitionAggregator::Vp8PartitionAggregator(
for (size_t i = 0; i < num_partitions_; ++i) {
size_vector_[i] =
fragmentation.fragmentationLength[i + first_partition_idx];
- largest_partition_size_ = std::max(largest_partition_size_,
- size_vector_[i]);
+ largest_partition_size_ =
+ std::max(largest_partition_size_, size_vector_[i]);
}
root_ = PartitionTreeNode::CreateRootNode(size_vector_, num_partitions_);
}
Vp8PartitionAggregator::~Vp8PartitionAggregator() {
- delete [] size_vector_;
+ delete[] size_vector_;
delete root_;
}
@@ -190,14 +186,16 @@ Vp8PartitionAggregator::FindOptimalConfiguration(size_t max_size,
assert(packet_index > 0);
assert(temp_node != NULL);
config_vector[i - 1] = packet_index - 1;
- if (temp_node->packet_start()) --packet_index;
+ if (temp_node->packet_start())
+ --packet_index;
temp_node = temp_node->parent();
}
return config_vector;
}
void Vp8PartitionAggregator::CalcMinMax(const ConfigVec& config,
- int* min_size, int* max_size) const {
+ int* min_size,
+ int* max_size) const {
if (*min_size < 0) {
*min_size = std::numeric_limits<int>::max();
}
@@ -263,8 +261,8 @@ size_t Vp8PartitionAggregator::CalcNumberOfFragments(
}
assert(num_fragments > 0);
// TODO(mflodman) Assert disabled since it's falsely triggered, see issue 293.
- //assert(large_partition_size / num_fragments + 1 <= max_payload_size);
+ // assert(large_partition_size / num_fragments + 1 <= max_payload_size);
return num_fragments;
}
-} // namespace
+} // namespace webrtc
diff --git a/webrtc/modules/rtp_rtcp/source/vp8_partition_aggregator.h b/webrtc/modules/rtp_rtcp/source/vp8_partition_aggregator.h
index 53b678f3b9..ccd22e5be2 100644
--- a/webrtc/modules/rtp_rtcp/source/vp8_partition_aggregator.h
+++ b/webrtc/modules/rtp_rtcp/source/vp8_partition_aggregator.h
@@ -14,7 +14,7 @@
#include <vector>
#include "webrtc/base/constructormagic.h"
-#include "webrtc/modules/interface/module_common_types.h"
+#include "webrtc/modules/include/module_common_types.h"
#include "webrtc/typedefs.h"
namespace webrtc {
@@ -132,6 +132,6 @@ class Vp8PartitionAggregator {
RTC_DISALLOW_COPY_AND_ASSIGN(Vp8PartitionAggregator);
};
-} // namespace
+} // namespace webrtc
#endif // WEBRTC_MODULES_RTP_RTCP_SOURCE_VP8_PARTITION_AGGREGATOR_H_
diff --git a/webrtc/modules/rtp_rtcp/source/vp8_partition_aggregator_unittest.cc b/webrtc/modules/rtp_rtcp/source/vp8_partition_aggregator_unittest.cc
index 4650c94047..726d83ec50 100644
--- a/webrtc/modules/rtp_rtcp/source/vp8_partition_aggregator_unittest.cc
+++ b/webrtc/modules/rtp_rtcp/source/vp8_partition_aggregator_unittest.cc
@@ -209,4 +209,4 @@ TEST(Vp8PartitionAggregator, TestCalcNumberOfFragments) {
1600, kMTU, 1, 900, 1000));
}
-} // namespace
+} // namespace webrtc
diff --git a/webrtc/modules/rtp_rtcp/test/BWEStandAlone/BWEStandAlone.cc b/webrtc/modules/rtp_rtcp/test/BWEStandAlone/BWEStandAlone.cc
deleted file mode 100644
index 711be4a623..0000000000
--- a/webrtc/modules/rtp_rtcp/test/BWEStandAlone/BWEStandAlone.cc
+++ /dev/null
@@ -1,199 +0,0 @@
-/*
- * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
- *
- * Use of this source code is governed by a BSD-style license
- * that can be found in the LICENSE file in the root of the source
- * tree. An additional intellectual property rights grant can be found
- * in the file PATENTS. All contributing project authors may
- * be found in the AUTHORS file in the root of the source tree.
- */
-
-// BWEStandAlone.cpp : Defines the entry point for the console application.
-//
-
-#include <stdio.h>
-#include <string>
-
-#include "webrtc/modules/rtp_rtcp/interface/rtp_rtcp.h"
-#include "webrtc/system_wrappers/include/event_wrapper.h"
-#include "webrtc/system_wrappers/include/trace.h"
-#include "webrtc/test/channel_transport/udp_transport.h"
-
-#include "webrtc/modules/rtp_rtcp/test/BWEStandAlone/TestLoadGenerator.h"
-#include "webrtc/modules/rtp_rtcp/test/BWEStandAlone/TestSenderReceiver.h"
-
-#include "webrtc/modules/rtp_rtcp/test/BWEStandAlone/MatlabPlot.h"
-
-//#include "vld.h"
-
-class myTransportCB: public UdpTransportData
-{
-public:
- myTransportCB (RtpRtcp *rtpMod) : _rtpMod(rtpMod) {};
-protected:
- // Inherited from UdpTransportData
- void IncomingRTPPacket(const int8_t* incomingRtpPacket,
- const size_t rtpPacketLength,
- const int8_t* fromIP,
- const uint16_t fromPort) override;
-
- void IncomingRTCPPacket(const int8_t* incomingRtcpPacket,
- const size_t rtcpPacketLength,
- const int8_t* fromIP,
- const uint16_t fromPort) override;
-
-private:
- RtpRtcp *_rtpMod;
-};
-
-void myTransportCB::IncomingRTPPacket(const int8_t* incomingRtpPacket,
- const size_t rtpPacketLength,
- const int8_t* fromIP,
- const uint16_t fromPort)
-{
- printf("Receiving RTP from IP %s, port %u\n", fromIP, fromPort);
- _rtpMod->IncomingPacket((uint8_t *) incomingRtpPacket, rtpPacketLength);
-}
-
-void myTransportCB::IncomingRTCPPacket(const int8_t* incomingRtcpPacket,
- const size_t rtcpPacketLength,
- const int8_t* fromIP,
- const uint16_t fromPort)
-{
- printf("Receiving RTCP from IP %s, port %u\n", fromIP, fromPort);
- _rtpMod->IncomingPacket((uint8_t *) incomingRtcpPacket, rtcpPacketLength);
-}
-
-
-int main(int argc, char* argv[])
-{
- bool isSender = false;
- bool isReceiver = false;
- uint16_t port;
- std::string ip;
- TestSenderReceiver *sendrec = new TestSenderReceiver();
- TestLoadGenerator *gen;
-
- if (argc == 2)
- {
- // receiver only
- isReceiver = true;
-
- // read port
- port = atoi(argv[1]);
- }
- else if (argc == 3)
- {
- // sender and receiver
- isSender = true;
- isReceiver = true;
-
- // read IP
- ip = argv[1];
-
- // read port
- port = atoi(argv[2]);
- }
-
- Trace::CreateTrace();
- Trace::SetTraceFile("BWEStandAloneTrace.txt");
- Trace::set_level_filter(webrtc::kTraceAll);
-
- sendrec->InitReceiver(port);
-
- sendrec->Start();
-
- if (isSender)
- {
- const uint32_t startRateKbps = 1000;
- //gen = new CBRGenerator(sendrec, 1000, 500);
- gen = new CBRFixFRGenerator(sendrec, startRateKbps, 90000, 30, 0.2);
- //gen = new PeriodicKeyFixFRGenerator(sendrec, startRateKbps, 90000, 30, 0.2, 7, 300);
- //const uint16_t numFrameRates = 5;
- //const uint8_t frameRates[numFrameRates] = {30, 15, 20, 23, 25};
- //gen = new CBRVarFRGenerator(sendrec, 1000, frameRates, numFrameRates, 90000, 4.0, 0.1, 0.2);
- //gen = new CBRFrameDropGenerator(sendrec, startRateKbps, 90000, 0.2);
- sendrec->SetLoadGenerator(gen);
- sendrec->InitSender(startRateKbps, ip.c_str(), port);
- gen->Start();
- }
-
- while (1)
- {
- }
-
- if (isSender)
- {
- gen->Stop();
- delete gen;
- }
-
- delete sendrec;
-
- //uint8_t numberOfSocketThreads = 1;
- //UdpTransport* transport = UdpTransport::Create(0, numberOfSocketThreads);
-
- //RtpRtcp* rtp = RtpRtcp::CreateRtpRtcp(1, false);
- //if (rtp->InitSender() != 0)
- //{
- // exit(1);
- //}
- //if (rtp->RegisterSendTransport(transport) != 0)
- //{
- // exit(1);
- //}
-
-// transport->InitializeSendSockets("192.168.200.39", 8000);
- //transport->InitializeSendSockets("127.0.0.1", 10000);
- //transport->InitializeSourcePorts(8000);
-
-
- return(0);
- // myTransportCB *tp = new myTransportCB(rtp);
- // transport->InitializeReceiveSockets(tp, 10000, "0.0.0.0");
- // transport->StartReceiving(500);
-
- // int8_t data[100];
- // for (int i = 0; i < 100; data[i] = i++);
-
- // for (int i = 0; i < 100; i++)
- // {
- // transport->SendRaw(data, 100, false);
- // }
-
-
-
- // int32_t totTime = 0;
- // while (totTime < 10000)
- // {
- // transport->Process();
- // int32_t wTime = transport->TimeUntilNextProcess();
- // totTime += wTime;
- // Sleep(wTime);
- // }
-
-
- //if (transport)
- //{
- // // Destroy the Socket Transport module
- // transport->StopReceiving();
- // transport->InitializeReceiveSockets(NULL,0);// deregister callback
- // UdpTransport::Destroy(transport);
- // transport = NULL;
- // }
-
- // if (tp)
- // {
- // delete tp;
- // tp = NULL;
- // }
-
- // if (rtp)
- // {
- // RtpRtcp::DestroyRtpRtcp(rtp);
- // rtp = NULL;
- // }
-
-
- //return 0;
-}
diff --git a/webrtc/modules/rtp_rtcp/test/BWEStandAlone/MatlabPlot.cc b/webrtc/modules/rtp_rtcp/test/BWEStandAlone/MatlabPlot.cc
deleted file mode 100644
index fe54d67ee7..0000000000
--- a/webrtc/modules/rtp_rtcp/test/BWEStandAlone/MatlabPlot.cc
+++ /dev/null
@@ -1,1055 +0,0 @@
-/*
- * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
- *
- * Use of this source code is governed by a BSD-style license
- * that can be found in the LICENSE file in the root of the source
- * tree. An additional intellectual property rights grant can be found
- * in the file PATENTS. All contributing project authors may
- * be found in the AUTHORS file in the root of the source tree.
- */
-
-#include "webrtc/modules/rtp_rtcp/test/BWEStandAlone/MatlabPlot.h"
-
-#include <math.h>
-#include <stdio.h>
-
-#include <algorithm>
-#include <sstream>
-
-#ifdef MATLAB
-#include "engine.h"
-#endif
-
-#include "webrtc/system_wrappers/include/critical_section_wrapper.h"
-#include "webrtc/system_wrappers/include/event_wrapper.h"
-#include "webrtc/system_wrappers/include/tick_util.h"
-
-using namespace webrtc;
-
-#ifdef MATLAB
-MatlabEngine eng;
-
-MatlabLine::MatlabLine(int maxLen /*= -1*/, const char *plotAttrib /*= NULL*/, const char *name /*= NULL*/)
-:
-_xArray(NULL),
-_yArray(NULL),
-_maxLen(maxLen),
-_plotAttribute(),
-_name()
-{
- if (_maxLen > 0)
- {
- _xArray = mxCreateDoubleMatrix(1, _maxLen, mxREAL);
- _yArray = mxCreateDoubleMatrix(1, _maxLen, mxREAL);
- }
-
- if (plotAttrib)
- {
- _plotAttribute = plotAttrib;
- }
-
- if (name)
- {
- _name = name;
- }
-}
-
-MatlabLine::~MatlabLine()
-{
- if (_xArray != NULL)
- {
- mxDestroyArray(_xArray);
- }
- if (_yArray != NULL)
- {
- mxDestroyArray(_yArray);
- }
-}
-
-void MatlabLine::Append(double x, double y)
-{
- if (_maxLen > 0 && _xData.size() > static_cast<uint32_t>(_maxLen))
- {
- _xData.resize(_maxLen);
- _yData.resize(_maxLen);
- }
-
- _xData.push_front(x);
- _yData.push_front(y);
-}
-
-
-// append y-data with running integer index as x-data
-void MatlabLine::Append(double y)
-{
- if (_xData.empty())
- {
- // first element is index 0
- Append(0, y);
- }
- else
- {
- // take last x-value and increment
- double temp = _xData.back(); // last x-value
- Append(temp + 1, y);
- }
-}
-
-
-void MatlabLine::SetMaxLen(int maxLen)
-{
- if (maxLen <= 0)
- {
- // means no maxLen
- _maxLen = -1;
- }
- else
- {
- _maxLen = maxLen;
-
- if (_xArray != NULL)
- {
- mxDestroyArray(_xArray);
- mxDestroyArray(_yArray);
- }
- _xArray = mxCreateDoubleMatrix(1, _maxLen, mxREAL);
- _yArray = mxCreateDoubleMatrix(1, _maxLen, mxREAL);
-
- maxLen = ((unsigned int)maxLen <= _xData.size()) ? maxLen : (int)_xData.size();
- _xData.resize(maxLen);
- _yData.resize(maxLen);
-
- //// reserve the right amount of memory
- //_xData.reserve(_maxLen);
- //_yData.reserve(_maxLen);
- }
-}
-
-void MatlabLine::SetAttribute(char *plotAttrib)
-{
- _plotAttribute = plotAttrib;
-}
-
-void MatlabLine::SetName(char *name)
-{
- _name = name;
-}
-
-void MatlabLine::GetPlotData(mxArray** xData, mxArray** yData)
-{
- // Make sure we have enough Matlab allocated memory.
- // Assuming both arrays (x and y) are of the same size.
- if (_xData.empty())
- {
- return; // No data
- }
- unsigned int size = 0;
- if (_xArray != NULL)
- {
- size = (unsigned int)mxGetNumberOfElements(_xArray);
- }
- if (size < _xData.size())
- {
- if (_xArray != NULL)
- {
- mxDestroyArray(_xArray);
- mxDestroyArray(_yArray);
- }
- _xArray = mxCreateDoubleMatrix(1, _xData.size(), mxREAL);
- _yArray = mxCreateDoubleMatrix(1, _yData.size(), mxREAL);
- }
-
- if (!_xData.empty())
- {
- double* x = mxGetPr(_xArray);
-
- std::list<double>::iterator it = _xData.begin();
-
- for (int i = 0; it != _xData.end(); it++, i++)
- {
- x[i] = *it;
- }
- }
-
- if (!_yData.empty())
- {
- double* y = mxGetPr(_yArray);
-
- std::list<double>::iterator it = _yData.begin();
-
- for (int i = 0; it != _yData.end(); it++, i++)
- {
- y[i] = *it;
- }
- }
- *xData = _xArray;
- *yData = _yArray;
-}
-
-std::string MatlabLine::GetXName()
-{
- std::ostringstream xString;
- xString << "x_" << _name;
- return xString.str();
-}
-
-std::string MatlabLine::GetYName()
-{
- std::ostringstream yString;
- yString << "y_" << _name;
- return yString.str();
-}
-
-std::string MatlabLine::GetPlotString()
-{
-
- std::ostringstream s;
-
- if (_xData.size() == 0)
- {
- s << "[0 1], [0 1]"; // To get an empty plot
- }
- else
- {
- s << GetXName() << "(1:" << _xData.size() << "),";
- s << GetYName() << "(1:" << _yData.size() << ")";
- }
-
- s << ", '";
- s << _plotAttribute;
- s << "'";
-
- return s.str();
-}
-
-std::string MatlabLine::GetRefreshString()
-{
- std::ostringstream s;
-
- if (_xData.size() > 0)
- {
- s << "set(h,'xdata',"<< GetXName() <<"(1:" << _xData.size() << "),'ydata',"<< GetYName() << "(1:" << _yData.size() << "));";
- }
- else
- {
- s << "set(h,'xdata',[NaN],'ydata',[NaN]);";
- }
- return s.str();
-}
-
-std::string MatlabLine::GetLegendString()
-{
- return ("'" + _name + "'");
-}
-
-bool MatlabLine::hasLegend()
-{
- return (!_name.empty());
-}
-
-
-// remove data points, but keep attributes
-void MatlabLine::Reset()
-{
- _xData.clear();
- _yData.clear();
-}
-
-
-void MatlabLine::UpdateTrendLine(MatlabLine * sourceData, double slope, double offset)
-{
- Reset(); // reset data, not attributes and name
-
- double thexMin = sourceData->xMin();
- double thexMax = sourceData->xMax();
- Append(thexMin, thexMin * slope + offset);
- Append(thexMax, thexMax * slope + offset);
-}
-
-double MatlabLine::xMin()
-{
- if (!_xData.empty())
- {
- std::list<double>::iterator theStart = _xData.begin();
- std::list<double>::iterator theEnd = _xData.end();
- return(*min_element(theStart, theEnd));
- }
- return (0.0);
-}
-
-double MatlabLine::xMax()
-{
- if (!_xData.empty())
- {
- std::list<double>::iterator theStart = _xData.begin();
- std::list<double>::iterator theEnd = _xData.end();
- return(*max_element(theStart, theEnd));
- }
- return (0.0);
-}
-
-double MatlabLine::yMin()
-{
- if (!_yData.empty())
- {
- std::list<double>::iterator theStart = _yData.begin();
- std::list<double>::iterator theEnd = _yData.end();
- return(*min_element(theStart, theEnd));
- }
- return (0.0);
-}
-
-double MatlabLine::yMax()
-{
- if (!_yData.empty())
- {
- std::list<double>::iterator theStart = _yData.begin();
- std::list<double>::iterator theEnd = _yData.end();
- return(*max_element(theStart, theEnd));
- }
- return (0.0);
-}
-
-
-
-MatlabTimeLine::MatlabTimeLine(int horizonSeconds /*= -1*/, const char *plotAttrib /*= NULL*/,
- const char *name /*= NULL*/,
- int64_t refTimeMs /* = -1*/)
- :
-_timeHorizon(horizonSeconds),
-MatlabLine(-1, plotAttrib, name) // infinite number of elements
-{
- if (refTimeMs < 0)
- _refTimeMs = TickTime::MillisecondTimestamp();
- else
- _refTimeMs = refTimeMs;
-}
-
-void MatlabTimeLine::Append(double y)
-{
- MatlabLine::Append(static_cast<double>(TickTime::MillisecondTimestamp() - _refTimeMs) / 1000.0, y);
-
- PurgeOldData();
-}
-
-
-void MatlabTimeLine::PurgeOldData()
-{
- if (_timeHorizon > 0)
- {
- // remove old data
- double historyLimit = static_cast<double>(TickTime::MillisecondTimestamp() - _refTimeMs) / 1000.0
- - _timeHorizon; // remove data points older than this
-
- std::list<double>::reverse_iterator ritx = _xData.rbegin();
- uint32_t removeCount = 0;
- while (ritx != _xData.rend())
- {
- if (*ritx >= historyLimit)
- {
- break;
- }
- ritx++;
- removeCount++;
- }
- if (removeCount == 0)
- {
- return;
- }
-
- // remove the range [begin, it).
- //if (removeCount > 10)
- //{
- // printf("Removing %lu elements\n", removeCount);
- //}
- _xData.resize(_xData.size() - removeCount);
- _yData.resize(_yData.size() - removeCount);
- }
-}
-
-
-int64_t MatlabTimeLine::GetRefTime()
-{
- return(_refTimeMs);
-}
-
-
-
-
-MatlabPlot::MatlabPlot()
-:
-_figHandle(-1),
-_smartAxis(false),
-_critSect(CriticalSectionWrapper::CreateCriticalSection()),
-_timeToPlot(false),
-_plotting(false),
-_enabled(true),
-_firstPlot(true),
-_legendEnabled(true),
-_donePlottingEvent(EventWrapper::Create())
-{
- CriticalSectionScoped cs(_critSect);
-
- _xlim[0] = 0;
- _xlim[1] = 0;
- _ylim[0] = 0;
- _ylim[1] = 0;
-
-#ifdef PLOT_TESTING
- _plotStartTime = -1;
- _plotDelay = 0;
-#endif
-
-}
-
-
-MatlabPlot::~MatlabPlot()
-{
- _critSect->Enter();
-
- // delete all line objects
- while (!_line.empty())
- {
- delete *(_line.end() - 1);
- _line.pop_back();
- }
-
- delete _critSect;
- delete _donePlottingEvent;
-}
-
-
-int MatlabPlot::AddLine(int maxLen /*= -1*/, const char *plotAttrib /*= NULL*/, const char *name /*= NULL*/)
-{
- CriticalSectionScoped cs(_critSect);
- if (!_enabled)
- {
- return -1;
- }
-
- MatlabLine *newLine = new MatlabLine(maxLen, plotAttrib, name);
- _line.push_back(newLine);
-
- return (static_cast<int>(_line.size() - 1)); // index of newly inserted line
-}
-
-
-int MatlabPlot::AddTimeLine(int maxLen /*= -1*/, const char *plotAttrib /*= NULL*/, const char *name /*= NULL*/,
- int64_t refTimeMs /*= -1*/)
-{
- CriticalSectionScoped cs(_critSect);
-
- if (!_enabled)
- {
- return -1;
- }
-
- MatlabTimeLine *newLine = new MatlabTimeLine(maxLen, plotAttrib, name, refTimeMs);
- _line.push_back(newLine);
-
- return (static_cast<int>(_line.size() - 1)); // index of newly inserted line
-}
-
-
-int MatlabPlot::GetLineIx(const char *name)
-{
- CriticalSectionScoped cs(_critSect);
-
- if (!_enabled)
- {
- return -1;
- }
-
- // search the list for a matching line name
- std::vector<MatlabLine*>::iterator it = _line.begin();
- bool matchFound = false;
- int lineIx = 0;
-
- for (; it != _line.end(); it++, lineIx++)
- {
- if ((*it)->_name == name)
- {
- matchFound = true;
- break;
- }
- }
-
- if (matchFound)
- {
- return (lineIx);
- }
- else
- {
- return (-1);
- }
-}
-
-
-void MatlabPlot::Append(int lineIndex, double x, double y)
-{
- CriticalSectionScoped cs(_critSect);
-
- if (!_enabled)
- {
- return;
- }
-
- // sanity for index
- if (lineIndex < 0 || lineIndex >= static_cast<int>(_line.size()))
- {
- throw "Line index out of range";
- exit(1);
- }
-
- return (_line[lineIndex]->Append(x, y));
-}
-
-
-void MatlabPlot::Append(int lineIndex, double y)
-{
- CriticalSectionScoped cs(_critSect);
-
- if (!_enabled)
- {
- return;
- }
-
- // sanity for index
- if (lineIndex < 0 || lineIndex >= static_cast<int>(_line.size()))
- {
- throw "Line index out of range";
- exit(1);
- }
-
- return (_line[lineIndex]->Append(y));
-}
-
-
-int MatlabPlot::Append(const char *name, double x, double y)
-{
- CriticalSectionScoped cs(_critSect);
-
- if (!_enabled)
- {
- return -1;
- }
-
- // search the list for a matching line name
- int lineIx = GetLineIx(name);
-
- if (lineIx < 0) //(!matchFound)
- {
- // no match; append new line
- lineIx = AddLine(-1, NULL, name);
- }
-
- // append data to line
- Append(lineIx, x, y);
- return (lineIx);
-}
-
-int MatlabPlot::Append(const char *name, double y)
-{
- CriticalSectionScoped cs(_critSect);
-
- if (!_enabled)
- {
- return -1;
- }
-
- // search the list for a matching line name
- int lineIx = GetLineIx(name);
-
- if (lineIx < 0) //(!matchFound)
- {
- // no match; append new line
- lineIx = AddLine(-1, NULL, name);
- }
-
- // append data to line
- Append(lineIx, y);
- return (lineIx);
-}
-
-int MatlabPlot::Length(char *name)
-{
- CriticalSectionScoped cs(_critSect);
-
- if (!_enabled)
- {
- return -1;
- }
-
- int ix = GetLineIx(name);
- if (ix >= 0)
- {
- return (static_cast<int>(_line[ix]->_xData.size()));
- }
- else
- {
- return (-1);
- }
-}
-
-
-void MatlabPlot::SetPlotAttribute(char *name, char *plotAttrib)
-{
- CriticalSectionScoped cs(_critSect);
-
- if (!_enabled)
- {
- return;
- }
-
- int lineIx = GetLineIx(name);
-
- if (lineIx >= 0)
- {
- _line[lineIx]->SetAttribute(plotAttrib);
- }
-}
-
-// Must be called under critical section _critSect
-void MatlabPlot::UpdateData(Engine* ep)
-{
- if (!_enabled)
- {
- return;
- }
-
- for (std::vector<MatlabLine*>::iterator it = _line.begin(); it != _line.end(); it++)
- {
- mxArray* xData = NULL;
- mxArray* yData = NULL;
- (*it)->GetPlotData(&xData, &yData);
- if (xData != NULL)
- {
- std::string xName = (*it)->GetXName();
- std::string yName = (*it)->GetYName();
- _critSect->Leave();
-#ifdef MATLAB6
- mxSetName(xData, xName.c_str());
- mxSetName(yData, yName.c_str());
- engPutArray(ep, xData);
- engPutArray(ep, yData);
-#else
- int ret = engPutVariable(ep, xName.c_str(), xData);
- assert(ret == 0);
- ret = engPutVariable(ep, yName.c_str(), yData);
- assert(ret == 0);
-#endif
- _critSect->Enter();
- }
- }
-}
-
-bool MatlabPlot::GetPlotCmd(std::ostringstream & cmd, Engine* ep)
-{
- _critSect->Enter();
-
- if (!DataAvailable())
- {
- return false;
- }
-
- if (_firstPlot)
- {
- GetPlotCmd(cmd);
- _firstPlot = false;
- }
- else
- {
- GetRefreshCmd(cmd);
- }
-
- UpdateData(ep);
-
- _critSect->Leave();
-
- return true;
-}
-
-// Call inside critsect
-void MatlabPlot::GetPlotCmd(std::ostringstream & cmd)
-{
- // we have something to plot
- // empty the stream
- cmd.str(""); // (this seems to be the only way)
-
- cmd << "figure; h" << _figHandle << "= plot(";
-
- // first line
- std::vector<MatlabLine*>::iterator it = _line.begin();
- cmd << (*it)->GetPlotString();
-
- it++;
-
- // remaining lines
- for (; it != _line.end(); it++)
- {
- cmd << ", ";
- cmd << (*it)->GetPlotString();
- }
-
- cmd << "); ";
-
- if (_legendEnabled)
- {
- GetLegendCmd(cmd);
- }
-
- if (_smartAxis)
- {
- double xMin = _xlim[0];
- double xMax = _xlim[1];
- double yMax = _ylim[1];
- for (std::vector<MatlabLine*>::iterator it = _line.begin(); it != _line.end(); it++)
- {
- xMax = std::max(xMax, (*it)->xMax());
- xMin = std::min(xMin, (*it)->xMin());
-
- yMax = std::max(yMax, (*it)->yMax());
- yMax = std::max(yMax, fabs((*it)->yMin()));
- }
- _xlim[0] = xMin;
- _xlim[1] = xMax;
- _ylim[0] = -yMax;
- _ylim[1] = yMax;
-
- cmd << "axis([" << _xlim[0] << ", " << _xlim[1] << ", " << _ylim[0] << ", " << _ylim[1] << "]);";
- }
-
- int i=1;
- for (it = _line.begin(); it != _line.end(); i++, it++)
- {
- cmd << "set(h" << _figHandle << "(" << i << "), 'Tag', " << (*it)->GetLegendString() << ");";
- }
-}
-
-// Call inside critsect
-void MatlabPlot::GetRefreshCmd(std::ostringstream & cmd)
-{
- cmd.str(""); // (this seems to be the only way)
- std::vector<MatlabLine*>::iterator it = _line.begin();
- for (it = _line.begin(); it != _line.end(); it++)
- {
- cmd << "h = findobj(0, 'Tag', " << (*it)->GetLegendString() << ");";
- cmd << (*it)->GetRefreshString();
- }
- //if (_legendEnabled)
- //{
- // GetLegendCmd(cmd);
- //}
-}
-
-void MatlabPlot::GetLegendCmd(std::ostringstream & cmd)
-{
- std::vector<MatlabLine*>::iterator it = _line.begin();
- bool anyLegend = false;
- for (; it != _line.end(); it++)
- {
- anyLegend = anyLegend || (*it)->hasLegend();
- }
- if (anyLegend)
- {
- // create the legend
-
- cmd << "legend(h" << _figHandle << ",{";
-
-
- // iterate lines
- int i = 0;
- for (std::vector<MatlabLine*>::iterator it = _line.begin(); it != _line.end(); it++)
- {
- if (i > 0)
- {
- cmd << ", ";
- }
- cmd << (*it)->GetLegendString();
- i++;
- }
-
- cmd << "}, 2); "; // place legend in upper-left corner
- }
-}
-
-// Call inside critsect
-bool MatlabPlot::DataAvailable()
-{
- if (!_enabled)
- {
- return false;
- }
-
- for (std::vector<MatlabLine*>::iterator it = _line.begin(); it != _line.end(); it++)
- {
- (*it)->PurgeOldData();
- }
-
- return true;
-}
-
-void MatlabPlot::Plot()
-{
- CriticalSectionScoped cs(_critSect);
-
- _timeToPlot = true;
-
-#ifdef PLOT_TESTING
- _plotStartTime = TickTime::MillisecondTimestamp();
-#endif
-}
-
-
-void MatlabPlot::Reset()
-{
- CriticalSectionScoped cs(_critSect);
-
- _enabled = true;
-
- for (std::vector<MatlabLine*>::iterator it = _line.begin(); it != _line.end(); it++)
- {
- (*it)->Reset();
- }
-
-}
-
-void MatlabPlot::SetFigHandle(int handle)
-{
- CriticalSectionScoped cs(_critSect);
-
- if (handle > 0)
- _figHandle = handle;
-}
-
-bool
-MatlabPlot::TimeToPlot()
-{
- CriticalSectionScoped cs(_critSect);
- return _enabled && _timeToPlot;
-}
-
-void
-MatlabPlot::Plotting()
-{
- CriticalSectionScoped cs(_critSect);
- _plotting = true;
-}
-
-void
-MatlabPlot::DonePlotting()
-{
- CriticalSectionScoped cs(_critSect);
- _timeToPlot = false;
- _plotting = false;
- _donePlottingEvent->Set();
-}
-
-void
-MatlabPlot::DisablePlot()
-{
- _critSect->Enter();
- while (_plotting)
- {
- _critSect->Leave();
- _donePlottingEvent->Wait(WEBRTC_EVENT_INFINITE);
- _critSect->Enter();
- }
- _enabled = false;
-}
-
-int MatlabPlot::MakeTrend(const char *sourceName, const char *trendName, double slope, double offset, const char *plotAttrib)
-{
- CriticalSectionScoped cs(_critSect);
-
- int sourceIx;
- int trendIx;
-
- sourceIx = GetLineIx(sourceName);
- if (sourceIx < 0)
- {
- // could not find source
- return (-1);
- }
-
- trendIx = GetLineIx(trendName);
- if (trendIx < 0)
- {
- // no trend found; add new line
- trendIx = AddLine(2 /*maxLen*/, plotAttrib, trendName);
- }
-
- _line[trendIx]->UpdateTrendLine(_line[sourceIx], slope, offset);
-
- return (trendIx);
-
-}
-
-
-MatlabEngine::MatlabEngine()
-:
-_critSect(CriticalSectionWrapper::CreateCriticalSection()),
-_eventPtr(NULL),
-_running(false),
-_numPlots(0)
-{
- _eventPtr = EventWrapper::Create();
-
- _plotThread = ThreadWrapper::CreateThread(MatlabEngine::PlotThread, this,
- kLowPriority, "MatlabPlot");
- _running = true;
- _plotThread->Start();
-}
-
-MatlabEngine::~MatlabEngine()
-{
- _critSect->Enter();
-
- if (_plotThread)
- {
- _running = false;
- _eventPtr->Set();
-
- _plotThread->Stop();
- }
-
- _plots.clear();
-
- delete _eventPtr;
- _eventPtr = NULL;
-
- _critSect->Leave();
- delete _critSect;
-
-}
-
-MatlabPlot * MatlabEngine::NewPlot(MatlabPlot *newPlot)
-{
- CriticalSectionScoped cs(_critSect);
-
- //MatlabPlot *newPlot = new MatlabPlot();
-
- if (newPlot)
- {
- newPlot->SetFigHandle(++_numPlots); // first plot is number 1
- _plots.push_back(newPlot);
- }
-
- return (newPlot);
-
-}
-
-
-void MatlabEngine::DeletePlot(MatlabPlot *plot)
-{
- CriticalSectionScoped cs(_critSect);
-
- if (plot == NULL)
- {
- return;
- }
-
- std::vector<MatlabPlot *>::iterator it;
- for (it = _plots.begin(); it < _plots.end(); it++)
- {
- if (plot == *it)
- {
- break;
- }
- }
-
- assert (plot == *it);
-
- (*it)->DisablePlot();
-
- _plots.erase(it);
- --_numPlots;
-
- delete plot;
-}
-
-
-bool MatlabEngine::PlotThread(void *obj)
-{
- if (!obj)
- {
- return (false);
- }
-
- MatlabEngine *eng = (MatlabEngine *) obj;
-
- Engine *ep = engOpen(NULL);
- if (!ep)
- {
- throw "Cannot open Matlab engine";
- return (false);
- }
-
- engSetVisible(ep, true);
- engEvalString(ep, "close all;");
-
- while (eng->_running)
- {
- eng->_critSect->Enter();
-
- // iterate through all plots
- for (unsigned int ix = 0; ix < eng->_plots.size(); ix++)
- {
- MatlabPlot *plot = eng->_plots[ix];
- if (plot->TimeToPlot())
- {
- plot->Plotting();
- eng->_critSect->Leave();
- std::ostringstream cmd;
-
- if (engEvalString(ep, cmd.str().c_str()))
- {
- // engine dead
- return (false);
- }
-
- // empty the stream
- cmd.str(""); // (this seems to be the only way)
- if (plot->GetPlotCmd(cmd, ep))
- {
- // things to plot, we have already accessed what we need in the plot
- plot->DonePlotting();
-
- int64_t start = TickTime::MillisecondTimestamp();
- // plot it
- int ret = engEvalString(ep, cmd.str().c_str());
- printf("time=%I64i\n", TickTime::MillisecondTimestamp() - start);
- if (ret)
- {
- // engine dead
- return (false);
- }
-
-#ifdef PLOT_TESTING
- if(plot->_plotStartTime >= 0)
- {
- plot->_plotDelay = TickTime::MillisecondTimestamp() - plot->_plotStartTime;
- plot->_plotStartTime = -1;
- }
-#endif
- }
- eng->_critSect->Enter();
- }
- }
-
- eng->_critSect->Leave();
- // wait a while
- eng->_eventPtr->Wait(66); // 33 ms
- }
-
- if (ep)
- {
- engClose(ep);
- ep = NULL;
- }
-
- return (true);
-
-}
-
-#endif // MATLAB
diff --git a/webrtc/modules/rtp_rtcp/test/BWEStandAlone/MatlabPlot.h b/webrtc/modules/rtp_rtcp/test/BWEStandAlone/MatlabPlot.h
deleted file mode 100644
index 3ed89f8f91..0000000000
--- a/webrtc/modules/rtp_rtcp/test/BWEStandAlone/MatlabPlot.h
+++ /dev/null
@@ -1,170 +0,0 @@
-/*
- * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
- *
- * Use of this source code is governed by a BSD-style license
- * that can be found in the LICENSE file in the root of the source
- * tree. An additional intellectual property rights grant can be found
- * in the file PATENTS. All contributing project authors may
- * be found in the AUTHORS file in the root of the source tree.
- */
-
-#ifndef WEBRTC_MODULES_RTP_RTCP_TEST_BWESTANDALONE_MATLABPLOT_H_
-#define WEBRTC_MODULES_RTP_RTCP_TEST_BWESTANDALONE_MATLABPLOT_H_
-
-#include <list>
-#include <string>
-#include <vector>
-
-#include "webrtc/typedefs.h"
-#include "webrtc/system_wrappers/include/thread_wrapper.h"
-
-namespace webrtc {
-class CriticalSectionWrapper;
-class EventWrapper;
-}
-
-//#define PLOT_TESTING
-
-#ifdef MATLAB
-
-typedef struct engine Engine;
-typedef struct mxArray_tag mxArray;
-
-class MatlabLine
-{
- friend class MatlabPlot;
-
-public:
- MatlabLine(int maxLen = -1, const char *plotAttrib = NULL, const char *name = NULL);
- ~MatlabLine();
- virtual void Append(double x, double y);
- virtual void Append(double y);
- void SetMaxLen(int maxLen);
- void SetAttribute(char *plotAttrib);
- void SetName(char *name);
- void Reset();
- virtual void PurgeOldData() {};
-
- void UpdateTrendLine(MatlabLine * sourceData, double slope, double offset);
-
- double xMin();
- double xMax();
- double yMin();
- double yMax();
-
-protected:
- void GetPlotData(mxArray** xData, mxArray** yData);
- std::string GetXName();
- std::string GetYName();
- std::string GetPlotString();
- std::string GetRefreshString();
- std::string GetLegendString();
- bool hasLegend();
- std::list<double> _xData;
- std::list<double> _yData;
- mxArray* _xArray;
- mxArray* _yArray;
- int _maxLen;
- std::string _plotAttribute;
- std::string _name;
-};
-
-
-class MatlabTimeLine : public MatlabLine
-{
-public:
- MatlabTimeLine(int horizonSeconds = -1, const char *plotAttrib = NULL, const char *name = NULL,
- int64_t refTimeMs = -1);
- ~MatlabTimeLine() {};
- void Append(double y);
- void PurgeOldData();
- int64_t GetRefTime();
-
-private:
- int64_t _refTimeMs;
- int _timeHorizon;
-};
-
-
-class MatlabPlot
-{
- friend class MatlabEngine;
-
-public:
- MatlabPlot();
- ~MatlabPlot();
-
- int AddLine(int maxLen = -1, const char *plotAttrib = NULL, const char *name = NULL);
- int AddTimeLine(int maxLen = -1, const char *plotAttrib = NULL, const char *name = NULL,
- int64_t refTimeMs = -1);
- int GetLineIx(const char *name);
- void Append(int lineIndex, double x, double y);
- void Append(int lineIndex, double y);
- int Append(const char *name, double x, double y);
- int Append(const char *name, double y);
- int Length(char *name);
- void SetPlotAttribute(char *name, char *plotAttrib);
- void Plot();
- void Reset();
- void SmartAxis(bool status = true) { _smartAxis = status; };
- void SetFigHandle(int handle);
- void EnableLegend(bool enable) { _legendEnabled = enable; };
-
- bool TimeToPlot();
- void Plotting();
- void DonePlotting();
- void DisablePlot();
-
- int MakeTrend(const char *sourceName, const char *trendName, double slope, double offset, const char *plotAttrib = NULL);
-
-#ifdef PLOT_TESTING
- int64_t _plotStartTime;
- int64_t _plotDelay;
-#endif
-
-private:
- void UpdateData(Engine* ep);
- bool GetPlotCmd(std::ostringstream & cmd, Engine* ep);
- void GetPlotCmd(std::ostringstream & cmd); // call inside crit sect
- void GetRefreshCmd(std::ostringstream & cmd); // call inside crit sect
- void GetLegendCmd(std::ostringstream & cmd);
- bool DataAvailable();
-
- std::vector<MatlabLine *> _line;
- int _figHandle;
- bool _smartAxis;
- double _xlim[2];
- double _ylim[2];
- webrtc::CriticalSectionWrapper *_critSect;
- bool _timeToPlot;
- bool _plotting;
- bool _enabled;
- bool _firstPlot;
- bool _legendEnabled;
- webrtc::EventWrapper* _donePlottingEvent;
-};
-
-
-class MatlabEngine
-{
-public:
- MatlabEngine();
- ~MatlabEngine();
-
- MatlabPlot * NewPlot(MatlabPlot *newPlot);
- void DeletePlot(MatlabPlot *plot);
-
-private:
- static bool PlotThread(void *obj);
-
- std::vector<MatlabPlot *> _plots;
- webrtc::CriticalSectionWrapper *_critSect;
- webrtc::EventWrapper *_eventPtr;
- rtc::scoped_ptr<webrtc::ThreadWrapper> _plotThread;
- bool _running;
- int _numPlots;
-};
-
-#endif //MATLAB
-
-#endif // WEBRTC_MODULES_RTP_RTCP_TEST_BWESTANDALONE_MATLABPLOT_H_
diff --git a/webrtc/modules/rtp_rtcp/test/BWEStandAlone/TestLoadGenerator.cc b/webrtc/modules/rtp_rtcp/test/BWEStandAlone/TestLoadGenerator.cc
deleted file mode 100644
index 0e3e87976c..0000000000
--- a/webrtc/modules/rtp_rtcp/test/BWEStandAlone/TestLoadGenerator.cc
+++ /dev/null
@@ -1,432 +0,0 @@
-/*
- * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
- *
- * Use of this source code is governed by a BSD-style license
- * that can be found in the LICENSE file in the root of the source
- * tree. An additional intellectual property rights grant can be found
- * in the file PATENTS. All contributing project authors may
- * be found in the AUTHORS file in the root of the source tree.
- */
-
-#include "webrtc/modules/rtp_rtcp/test/BWEStandAlone/TestLoadGenerator.h"
-
-#include <stdio.h>
-
-#include <algorithm>
-
-#include "webrtc/modules/rtp_rtcp/test/BWEStandAlone/TestSenderReceiver.h"
-#include "webrtc/system_wrappers/include/critical_section_wrapper.h"
-#include "webrtc/system_wrappers/include/event_wrapper.h"
-#include "webrtc/system_wrappers/include/tick_util.h"
-
-
-bool SenderThreadFunction(void *obj)
-{
- if (obj == NULL)
- {
- return false;
- }
- TestLoadGenerator *_genObj = static_cast<TestLoadGenerator *>(obj);
-
- return _genObj->GeneratorLoop();
-}
-
-
-TestLoadGenerator::TestLoadGenerator(TestSenderReceiver *sender, int32_t rtpSampleRate)
-:
-_critSect(CriticalSectionWrapper::CreateCriticalSection()),
-_eventPtr(NULL),
-_bitrateKbps(0),
-_sender(sender),
-_running(false),
-_rtpSampleRate(rtpSampleRate)
-{
-}
-
-TestLoadGenerator::~TestLoadGenerator ()
-{
- if (_running)
- {
- Stop();
- }
-
- delete _critSect;
-}
-
-int32_t TestLoadGenerator::SetBitrate (int32_t newBitrateKbps)
-{
- CriticalSectionScoped cs(_critSect);
-
- if (newBitrateKbps < 0)
- {
- return -1;
- }
-
- _bitrateKbps = newBitrateKbps;
-
- printf("New bitrate = %i kbps\n", _bitrateKbps);
-
- return _bitrateKbps;
-}
-
-
-int32_t TestLoadGenerator::Start (const char *threadName)
-{
- CriticalSectionScoped cs(_critSect);
-
- _eventPtr = EventWrapper::Create();
-
- _genThread = ThreadWrapper::CreateThread(SenderThreadFunction, this,
- threadName);
- _running = true;
-
- _genThread->Start();
- _genThread->SetPriority(kRealtimePriority);
-
- return 0;
-}
-
-
-int32_t TestLoadGenerator::Stop ()
-{
- _critSect.Enter();
-
- if (_genThread)
- {
- _running = false;
- _eventPtr->Set();
-
- _genThread->Stop();
- _genThread.reset();
-
- delete _eventPtr;
- _eventPtr = NULL;
- }
-
- _critSect.Leave();
- return (0);
-}
-
-
-int TestLoadGenerator::generatePayload ()
-{
- return(generatePayload( static_cast<uint32_t>( TickTime::MillisecondTimestamp() * _rtpSampleRate / 1000 )));
-}
-
-
-int TestLoadGenerator::sendPayload (const uint32_t timeStamp,
- const uint8_t* payloadData,
- const size_t payloadSize,
- const webrtc::FrameType frameType /*= webrtc::kVideoFrameDelta*/)
-{
-
- return (_sender->SendOutgoingData(timeStamp, payloadData, payloadSize, frameType));
-}
-
-
-CBRGenerator::CBRGenerator (TestSenderReceiver *sender,
- size_t payloadSizeBytes,
- int32_t bitrateKbps,
- int32_t rtpSampleRate)
-:
-//_eventPtr(NULL),
-_payloadSizeBytes(payloadSizeBytes),
-_payload(new uint8_t[payloadSizeBytes]),
-TestLoadGenerator(sender, rtpSampleRate)
-{
- SetBitrate (bitrateKbps);
-}
-
-CBRGenerator::~CBRGenerator ()
-{
- if (_running)
- {
- Stop();
- }
-
- if (_payload)
- {
- delete [] _payload;
- }
-
-}
-
-bool CBRGenerator::GeneratorLoop ()
-{
- double periodMs;
- int64_t nextSendTime = TickTime::MillisecondTimestamp();
-
-
- // no critSect
- while (_running)
- {
- // send data (critSect inside)
- generatePayload( static_cast<uint32_t>(nextSendTime * _rtpSampleRate / 1000) );
-
- // calculate wait time
- periodMs = 8.0 * _payloadSizeBytes / ( _bitrateKbps );
-
- nextSendTime = static_cast<int64_t>(nextSendTime + periodMs);
-
- int32_t waitTime = static_cast<int32_t>(nextSendTime - TickTime::MillisecondTimestamp());
- if (waitTime < 0)
- {
- waitTime = 0;
- }
- // wait
- _eventPtr->Wait(static_cast<int32_t>(waitTime));
- }
-
- return true;
-}
-
-int CBRGenerator::generatePayload ( uint32_t timestamp )
-{
- CriticalSectionScoped cs(_critSect);
-
- //uint8_t *payload = new uint8_t[_payloadSizeBytes];
-
- int ret = sendPayload(timestamp, _payload, _payloadSizeBytes);
-
- //delete [] payload;
- return ret;
-}
-
-
-
-
-/////////////////////
-
-CBRFixFRGenerator::CBRFixFRGenerator (TestSenderReceiver *sender, int32_t bitrateKbps,
- int32_t rtpSampleRate, int32_t frameRateFps /*= 30*/,
- double spread /*= 0.0*/)
-:
-//_eventPtr(NULL),
-_payloadSizeBytes(0),
-_payload(NULL),
-_payloadAllocLen(0),
-_frameRateFps(frameRateFps),
-_spreadFactor(spread),
-TestLoadGenerator(sender, rtpSampleRate)
-{
- SetBitrate (bitrateKbps);
-}
-
-CBRFixFRGenerator::~CBRFixFRGenerator ()
-{
- if (_running)
- {
- Stop();
- }
-
- if (_payload)
- {
- delete [] _payload;
- _payloadAllocLen = 0;
- }
-
-}
-
-bool CBRFixFRGenerator::GeneratorLoop ()
-{
- double periodMs;
- int64_t nextSendTime = TickTime::MillisecondTimestamp();
-
- _critSect.Enter();
-
- if (_frameRateFps <= 0)
- {
- return false;
- }
-
- _critSect.Leave();
-
- // no critSect
- while (_running)
- {
- _critSect.Enter();
-
- // calculate payload size
- _payloadSizeBytes = nextPayloadSize();
-
- if (_payloadSizeBytes > 0)
- {
-
- if (_payloadAllocLen < _payloadSizeBytes * (1 + _spreadFactor))
- {
- // re-allocate _payload
- if (_payload)
- {
- delete [] _payload;
- _payload = NULL;
- }
-
- _payloadAllocLen = static_cast<int32_t>((_payloadSizeBytes * (1 + _spreadFactor) * 3) / 2 + .5); // 50% extra to avoid frequent re-alloc
- _payload = new uint8_t[_payloadAllocLen];
- }
-
-
- // send data (critSect inside)
- generatePayload( static_cast<uint32_t>(nextSendTime * _rtpSampleRate / 1000) );
- }
-
- _critSect.Leave();
-
- // calculate wait time
- periodMs = 1000.0 / _frameRateFps;
- nextSendTime = static_cast<int64_t>(nextSendTime + periodMs + 0.5);
-
- int32_t waitTime = static_cast<int32_t>(nextSendTime - TickTime::MillisecondTimestamp());
- if (waitTime < 0)
- {
- waitTime = 0;
- }
- // wait
- _eventPtr->Wait(waitTime);
- }
-
- return true;
-}
-
-size_t CBRFixFRGenerator::nextPayloadSize()
-{
- const double periodMs = 1000.0 / _frameRateFps;
- return static_cast<size_t>(_bitrateKbps * periodMs / 8 + 0.5);
-}
-
-int CBRFixFRGenerator::generatePayload ( uint32_t timestamp )
-{
- CriticalSectionScoped cs(_critSect);
-
- double factor = ((double) rand() - RAND_MAX/2) / RAND_MAX; // [-0.5; 0.5]
- factor = 1 + 2 * _spreadFactor * factor; // [1 - _spreadFactor ; 1 + _spreadFactor]
-
- size_t thisPayloadBytes = static_cast<size_t>(_payloadSizeBytes * factor);
- // sanity
- if (thisPayloadBytes > _payloadAllocLen)
- {
- thisPayloadBytes = _payloadAllocLen;
- }
-
- int ret = sendPayload(timestamp, _payload, thisPayloadBytes);
- return ret;
-}
-
-
-/////////////////////
-
-PeriodicKeyFixFRGenerator::PeriodicKeyFixFRGenerator (TestSenderReceiver *sender, int32_t bitrateKbps,
- int32_t rtpSampleRate, int32_t frameRateFps /*= 30*/,
- double spread /*= 0.0*/, double keyFactor /*= 4.0*/, uint32_t keyPeriod /*= 300*/)
-:
-_keyFactor(keyFactor),
-_keyPeriod(keyPeriod),
-_frameCount(0),
-CBRFixFRGenerator(sender, bitrateKbps, rtpSampleRate, frameRateFps, spread)
-{
-}
-
-size_t PeriodicKeyFixFRGenerator::nextPayloadSize()
-{
- // calculate payload size for a delta frame
- size_t payloadSizeBytes = static_cast<size_t>(1000 * _bitrateKbps /
- (8.0 * _frameRateFps * (1.0 + (_keyFactor - 1.0) / _keyPeriod)) + 0.5);
-
- if (_frameCount % _keyPeriod == 0)
- {
- // this is a key frame, scale the payload size
- payloadSizeBytes =
- static_cast<size_t>(_keyFactor * _payloadSizeBytes + 0.5);
- }
- _frameCount++;
-
- return payloadSizeBytes;
-}
-
-////////////////////
-
-CBRVarFRGenerator::CBRVarFRGenerator(TestSenderReceiver *sender, int32_t bitrateKbps, const uint8_t* frameRates,
- uint16_t numFrameRates, int32_t rtpSampleRate, double avgFrPeriodMs,
- double frSpreadFactor, double spreadFactor)
-:
-_avgFrPeriodMs(avgFrPeriodMs),
-_frSpreadFactor(frSpreadFactor),
-_frameRates(NULL),
-_numFrameRates(numFrameRates),
-_frChangeTimeMs(TickTime::MillisecondTimestamp() + _avgFrPeriodMs),
-CBRFixFRGenerator(sender, bitrateKbps, rtpSampleRate, frameRates[0], spreadFactor)
-{
- _frameRates = new uint8_t[_numFrameRates];
- memcpy(_frameRates, frameRates, _numFrameRates);
-}
-
-CBRVarFRGenerator::~CBRVarFRGenerator()
-{
- delete [] _frameRates;
-}
-
-void CBRVarFRGenerator::ChangeFrameRate()
-{
- const int64_t nowMs = TickTime::MillisecondTimestamp();
- if (nowMs < _frChangeTimeMs)
- {
- return;
- }
- // Time to change frame rate
- uint16_t frIndex = static_cast<uint16_t>(static_cast<double>(rand()) / RAND_MAX
- * (_numFrameRates - 1) + 0.5) ;
- assert(frIndex < _numFrameRates);
- _frameRateFps = _frameRates[frIndex];
- // Update the next frame rate change time
- double factor = ((double) rand() - RAND_MAX/2) / RAND_MAX; // [-0.5; 0.5]
- factor = 1 + 2 * _frSpreadFactor * factor; // [1 - _frSpreadFactor ; 1 + _frSpreadFactor]
- _frChangeTimeMs = nowMs + static_cast<int64_t>(1000.0 * factor *
- _avgFrPeriodMs + 0.5);
-
- printf("New frame rate: %d\n", _frameRateFps);
-}
-
-size_t CBRVarFRGenerator::nextPayloadSize()
-{
- ChangeFrameRate();
- return CBRFixFRGenerator::nextPayloadSize();
-}
-
-////////////////////
-
-CBRFrameDropGenerator::CBRFrameDropGenerator(TestSenderReceiver *sender, int32_t bitrateKbps,
- int32_t rtpSampleRate, double spreadFactor)
-:
-_accBits(0),
-CBRFixFRGenerator(sender, bitrateKbps, rtpSampleRate, 30, spreadFactor)
-{
-}
-
-CBRFrameDropGenerator::~CBRFrameDropGenerator()
-{
-}
-
-size_t CBRFrameDropGenerator::nextPayloadSize()
-{
- _accBits -= 1000 * _bitrateKbps / _frameRateFps;
- if (_accBits < 0)
- {
- _accBits = 0;
- }
- if (_accBits > 0.3 * _bitrateKbps * 1000)
- {
- //printf("drop\n");
- return 0;
- }
- else
- {
- //printf("keep\n");
- const double periodMs = 1000.0 / _frameRateFps;
- size_t frameSize =
- static_cast<size_t>(_bitrateKbps * periodMs / 8 + 0.5);
- frameSize =
- std::max(frameSize, static_cast<size_t>(300 * periodMs / 8 + 0.5));
- _accBits += frameSize * 8;
- return frameSize;
- }
-}
diff --git a/webrtc/modules/rtp_rtcp/test/BWEStandAlone/TestLoadGenerator.h b/webrtc/modules/rtp_rtcp/test/BWEStandAlone/TestLoadGenerator.h
deleted file mode 100644
index bd83962fa3..0000000000
--- a/webrtc/modules/rtp_rtcp/test/BWEStandAlone/TestLoadGenerator.h
+++ /dev/null
@@ -1,149 +0,0 @@
-/*
- * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
- *
- * Use of this source code is governed by a BSD-style license
- * that can be found in the LICENSE file in the root of the source
- * tree. An additional intellectual property rights grant can be found
- * in the file PATENTS. All contributing project authors may
- * be found in the AUTHORS file in the root of the source tree.
- */
-
-#ifndef WEBRTC_MODULES_RTP_RTCP_TEST_BWESTANDALONE_TESTLOADGENERATOR_H_
-#define WEBRTC_MODULES_RTP_RTCP_TEST_BWESTANDALONE_TESTLOADGENERATOR_H_
-
-#include <stdlib.h>
-
-#include "webrtc/modules/interface/module_common_types.h"
-#include "webrtc/system_wrappers/include/thread_wrapper.h"
-#include "webrtc/typedefs.h"
-
-class TestSenderReceiver;
-namespace webrtc {
-class CriticalSectionWrapper;
-class EventWrapper;
-}
-
-class TestLoadGenerator
-{
-public:
- TestLoadGenerator (TestSenderReceiver *sender, int32_t rtpSampleRate = 90000);
- virtual ~TestLoadGenerator ();
-
- int32_t SetBitrate (int32_t newBitrateKbps);
- virtual int32_t Start (const char *threadName = NULL);
- virtual int32_t Stop ();
- virtual bool GeneratorLoop () = 0;
-
-protected:
- virtual int generatePayload ( uint32_t timestamp ) = 0;
- int generatePayload ();
- int sendPayload (const uint32_t timeStamp,
- const uint8_t* payloadData,
- const size_t payloadSize,
- const webrtc::FrameType frameType = webrtc::kVideoFrameDelta);
-
- webrtc::CriticalSectionWrapper* _critSect;
- webrtc::EventWrapper *_eventPtr;
- rtc::scoped_ptr<webrtc::ThreadWrapper> _genThread;
- int32_t _bitrateKbps;
- TestSenderReceiver *_sender;
- bool _running;
- int32_t _rtpSampleRate;
-};
-
-
-class CBRGenerator : public TestLoadGenerator
-{
-public:
- CBRGenerator (TestSenderReceiver *sender,
- size_t payloadSizeBytes,
- int32_t bitrateKbps,
- int32_t rtpSampleRate = 90000);
- virtual ~CBRGenerator ();
-
- virtual int32_t Start () {return (TestLoadGenerator::Start("CBRGenerator"));};
-
- virtual bool GeneratorLoop ();
-
-protected:
- virtual int generatePayload ( uint32_t timestamp );
-
- size_t _payloadSizeBytes;
- uint8_t *_payload;
-};
-
-
-class CBRFixFRGenerator : public TestLoadGenerator // constant bitrate and fixed frame rate
-{
-public:
- CBRFixFRGenerator (TestSenderReceiver *sender, int32_t bitrateKbps, int32_t rtpSampleRate = 90000,
- int32_t frameRateFps = 30, double spread = 0.0);
- virtual ~CBRFixFRGenerator ();
-
- virtual int32_t Start () {return (TestLoadGenerator::Start("CBRFixFRGenerator"));};
-
- virtual bool GeneratorLoop ();
-
-protected:
- virtual size_t nextPayloadSize ();
- virtual int generatePayload ( uint32_t timestamp );
-
- size_t _payloadSizeBytes;
- uint8_t *_payload;
- size_t _payloadAllocLen;
- int32_t _frameRateFps;
- double _spreadFactor;
-};
-
-class PeriodicKeyFixFRGenerator : public CBRFixFRGenerator // constant bitrate and fixed frame rate with periodically large frames
-{
-public:
- PeriodicKeyFixFRGenerator (TestSenderReceiver *sender, int32_t bitrateKbps, int32_t rtpSampleRate = 90000,
- int32_t frameRateFps = 30, double spread = 0.0, double keyFactor = 4.0, uint32_t keyPeriod = 300);
- virtual ~PeriodicKeyFixFRGenerator () {}
-
-protected:
- virtual size_t nextPayloadSize ();
-
- double _keyFactor;
- uint32_t _keyPeriod;
- uint32_t _frameCount;
-};
-
-// Probably better to inherit CBRFixFRGenerator from CBRVarFRGenerator, but since
-// the fix FR version already existed this was easier.
-class CBRVarFRGenerator : public CBRFixFRGenerator // constant bitrate and variable frame rate
-{
-public:
- CBRVarFRGenerator(TestSenderReceiver *sender, int32_t bitrateKbps, const uint8_t* frameRates,
- uint16_t numFrameRates, int32_t rtpSampleRate = 90000, double avgFrPeriodMs = 5.0,
- double frSpreadFactor = 0.05, double spreadFactor = 0.0);
-
- ~CBRVarFRGenerator();
-
-protected:
- virtual void ChangeFrameRate();
- virtual size_t nextPayloadSize ();
-
- double _avgFrPeriodMs;
- double _frSpreadFactor;
- uint8_t* _frameRates;
- uint16_t _numFrameRates;
- int64_t _frChangeTimeMs;
-};
-
-class CBRFrameDropGenerator : public CBRFixFRGenerator // constant bitrate and variable frame rate
-{
-public:
- CBRFrameDropGenerator(TestSenderReceiver *sender, int32_t bitrateKbps,
- int32_t rtpSampleRate = 90000, double spreadFactor = 0.0);
-
- ~CBRFrameDropGenerator();
-
-protected:
- virtual size_t nextPayloadSize();
-
- double _accBits;
-};
-
-#endif // WEBRTC_MODULES_RTP_RTCP_TEST_BWESTANDALONE_TESTLOADGENERATOR_H_
diff --git a/webrtc/modules/rtp_rtcp/test/BWEStandAlone/TestSenderReceiver.cc b/webrtc/modules/rtp_rtcp/test/BWEStandAlone/TestSenderReceiver.cc
deleted file mode 100644
index 47f2880638..0000000000
--- a/webrtc/modules/rtp_rtcp/test/BWEStandAlone/TestSenderReceiver.cc
+++ /dev/null
@@ -1,411 +0,0 @@
-/*
- * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
- *
- * Use of this source code is governed by a BSD-style license
- * that can be found in the LICENSE file in the root of the source
- * tree. An additional intellectual property rights grant can be found
- * in the file PATENTS. All contributing project authors may
- * be found in the AUTHORS file in the root of the source tree.
- */
-
-#include "webrtc/modules/rtp_rtcp/test/BWEStandAlone/TestSenderReceiver.h"
-
-#include <stdio.h>
-#include <stdlib.h>
-
-#include "webrtc/modules/rtp_rtcp/interface/rtp_rtcp.h"
-#include "webrtc/modules/rtp_rtcp/test/BWEStandAlone/TestLoadGenerator.h"
-#include "webrtc/system_wrappers/include/critical_section_wrapper.h"
-#include "webrtc/system_wrappers/include/event_wrapper.h"
-#include "webrtc/system_wrappers/include/tick_util.h"
-#include "webrtc/test/channel_transport/udp_transport.h"
-
-#define NR_OF_SOCKET_BUFFERS 500
-
-
-bool ProcThreadFunction(void *obj)
-{
- if (obj == NULL)
- {
- return false;
- }
- TestSenderReceiver *theObj = static_cast<TestSenderReceiver *>(obj);
-
- return theObj->ProcLoop();
-}
-
-
-TestSenderReceiver::TestSenderReceiver (void)
-:
-_critSect(CriticalSectionWrapper::CreateCriticalSection()),
-_eventPtr(NULL),
-_running(false),
-_payloadType(0),
-_loadGenerator(NULL),
-_isSender(false),
-_isReceiver(false),
-_sendRecCB(NULL),
-_lastBytesReceived(0),
-_lastTime(-1)
-{
- // RTP/RTCP module
- _rtp = RtpRtcp::CreateRtpRtcp(0, false);
- if (!_rtp)
- {
- throw "Could not create RTP/RTCP module";
- exit(1);
- }
-
- if (_rtp->InitReceiver() != 0)
- {
- throw "_rtp->InitReceiver()";
- exit(1);
- }
-
- if (_rtp->InitSender() != 0)
- {
- throw "_rtp->InitSender()";
- exit(1);
- }
-
- // SocketTransport module
- uint8_t numberOfThreads = 1;
- _transport = UdpTransport::Create(0, numberOfThreads);
- if (!_transport)
- {
- throw "Could not create transport module";
- exit(1);
- }
-}
-
-TestSenderReceiver::~TestSenderReceiver (void)
-{
-
- Stop(); // N.B. without critSect
-
- _critSect->Enter();
-
- if (_rtp)
- {
- RtpRtcp::DestroyRtpRtcp(_rtp);
- _rtp = NULL;
- }
-
- if (_transport)
- {
- UdpTransport::Destroy(_transport);
- _transport = NULL;
- }
-
- delete _critSect;
-
-}
-
-
-int32_t TestSenderReceiver::InitReceiver (const uint16_t rtpPort,
- const uint16_t rtcpPort,
- const int8_t payloadType /*= 127*/)
-{
- CriticalSectionScoped cs(_critSect);
-
- // init transport
- if (_transport->InitializeReceiveSockets(this, rtpPort/*, 0, NULL, 0, true*/) != 0)
- {
- throw "_transport->InitializeReceiveSockets";
- exit(1);
- }
-
- if (_rtp->RegisterIncomingRTPCallback(this) != 0)
- {
- throw "_rtp->RegisterIncomingRTPCallback";
- exit(1);
- }
-
- if (_rtp->RegisterIncomingDataCallback(this) != 0)
- {
- throw "_rtp->RegisterIncomingRTPCallback";
- exit(1);
- }
-
- if (_rtp->SetRTCPStatus(RtcpMode::kReducedSize) != 0) {
- throw "_rtp->SetRTCPStatus";
- exit(1);
- }
-
- if (_rtp->SetTMMBRStatus(true) != 0)
- {
- throw "_rtp->SetTMMBRStatus";
- exit(1);
- }
-
- if (_rtp->RegisterReceivePayload("I420", payloadType, 90000) != 0)
- {
- throw "_rtp->RegisterReceivePayload";
- exit(1);
- }
-
- _isReceiver = true;
-
- return (0);
-}
-
-
-int32_t TestSenderReceiver::Start()
-{
- CriticalSectionScoped cs(_critSect);
-
- _eventPtr = EventWrapper::Create();
-
- if (_rtp->SetSendingStatus(true) != 0)
- {
- throw "_rtp->SetSendingStatus";
- exit(1);
- }
-
- _procThread = ThreadWrapper::CreateThread(ProcThreadFunction, this,
- "TestSenderReceiver");
-
- _running = true;
-
- if (_isReceiver)
- {
- if (_transport->StartReceiving(NR_OF_SOCKET_BUFFERS) != 0)
- {
- throw "_transport->StartReceiving";
- exit(1);
- }
- }
-
- _procThread->Start();
- _procThread->SetPriority(kRealtimePriority);
-
- return 0;
-
-}
-
-
-int32_t TestSenderReceiver::Stop ()
-{
- CriticalSectionScoped cs(_critSect);
-
- _transport->StopReceiving();
-
- if (_procThread)
- {
- _running = false;
- _eventPtr->Set();
-
- _procThread->Stop();
- _procThread.reset();
-
- delete _eventPtr;
- }
-
- return (0);
-}
-
-
-bool TestSenderReceiver::ProcLoop(void)
-{
-
- // process RTP/RTCP module
- _rtp->Process();
-
- // process SocketTransport module
- _transport->Process();
-
- // no critSect
- while (_running)
- {
- // ask RTP/RTCP module for wait time
- int32_t rtpWait = _rtp->TimeUntilNextProcess();
-
- // ask SocketTransport module for wait time
- int32_t tpWait = _transport->TimeUntilNextProcess();
-
- int32_t minWait = (rtpWait < tpWait) ? rtpWait: tpWait;
- minWait = (minWait > 0) ? minWait : 0;
- // wait
- _eventPtr->Wait(minWait);
-
- // process RTP/RTCP module
- _rtp->Process();
-
- // process SocketTransport module
- _transport->Process();
-
- }
-
- return true;
-}
-
-
-int32_t TestSenderReceiver::ReceiveBitrateKbps ()
-{
- size_t bytesSent;
- uint32_t packetsSent;
- size_t bytesReceived;
- uint32_t packetsReceived;
-
- if (_rtp->DataCountersRTP(&bytesSent, &packetsSent, &bytesReceived, &packetsReceived) == 0)
- {
- int64_t now = TickTime::MillisecondTimestamp();
- int32_t kbps = 0;
- if (now > _lastTime)
- {
- if (_lastTime > 0)
- {
- // 8 * bytes / ms = kbps
- kbps = static_cast<int32_t>(
- (8 * (bytesReceived - _lastBytesReceived)) / (now - _lastTime));
- }
- _lastTime = now;
- _lastBytesReceived = bytesReceived;
- }
- return (kbps);
- }
-
- return (-1);
-}
-
-
-int32_t TestSenderReceiver::SetPacketTimeout(const uint32_t timeoutMS)
-{
- return (_rtp->SetPacketTimeout(timeoutMS, 0 /* RTCP timeout */));
-}
-
-
-int32_t TestSenderReceiver::OnReceivedPayloadData(const uint8_t* payloadData,
- const size_t payloadSize,
- const webrtc::WebRtcRTPHeader* rtpHeader)
-{
- //printf("OnReceivedPayloadData\n");
- return (0);
-}
-
-
-void TestSenderReceiver::IncomingRTPPacket(const int8_t* incomingRtpPacket,
- const size_t rtpPacketLength,
- const int8_t* fromIP,
- const uint16_t fromPort)
-{
- _rtp->IncomingPacket((uint8_t *) incomingRtpPacket, rtpPacketLength);
-}
-
-
-
-void TestSenderReceiver::IncomingRTCPPacket(const int8_t* incomingRtcpPacket,
- const size_t rtcpPacketLength,
- const int8_t* fromIP,
- const uint16_t fromPort)
-{
- _rtp->IncomingPacket((uint8_t *) incomingRtcpPacket, rtcpPacketLength);
-}
-
-
-
-
-
-///////////////////
-
-
-int32_t TestSenderReceiver::InitSender (const uint32_t startBitrateKbps,
- const int8_t* ipAddr,
- const uint16_t rtpPort,
- const uint16_t rtcpPort /*= 0*/,
- const int8_t payloadType /*= 127*/)
-{
- CriticalSectionScoped cs(_critSect);
-
- _payloadType = payloadType;
-
- // check load generator valid
- if (_loadGenerator)
- {
- _loadGenerator->SetBitrate(startBitrateKbps);
- }
-
- if (_rtp->RegisterSendTransport(_transport) != 0)
- {
- throw "_rtp->RegisterSendTransport";
- exit(1);
- }
- if (_rtp->RegisterSendPayload("I420", _payloadType, 90000) != 0)
- {
- throw "_rtp->RegisterSendPayload";
- exit(1);
- }
-
- if (_rtp->RegisterIncomingVideoCallback(this) != 0)
- {
- throw "_rtp->RegisterIncomingVideoCallback";
- exit(1);
- }
-
- if (_rtp->SetRTCPStatus(RtcpMode::kReducedSize) != 0) {
- throw "_rtp->SetRTCPStatus";
- exit(1);
- }
-
- if (_rtp->SetSendBitrate(startBitrateKbps*1000, 0, MAX_BITRATE_KBPS) != 0)
- {
- throw "_rtp->SetSendBitrate";
- exit(1);
- }
-
-
- // SocketTransport
- if (_transport->InitializeSendSockets(ipAddr, rtpPort, rtcpPort))
- {
- throw "_transport->InitializeSendSockets";
- exit(1);
- }
-
- _isSender = true;
-
- return (0);
-}
-
-
-
-int32_t
-TestSenderReceiver::SendOutgoingData(const uint32_t timeStamp,
- const uint8_t* payloadData,
- const size_t payloadSize,
- const webrtc::FrameType frameType /*= webrtc::kVideoFrameDelta*/)
-{
- return (_rtp->SendOutgoingData(frameType, _payloadType, timeStamp, payloadData, payloadSize));
-}
-
-
-int32_t TestSenderReceiver::SetLoadGenerator(TestLoadGenerator *generator)
-{
- CriticalSectionScoped cs(_critSect);
-
- _loadGenerator = generator;
- return(0);
-
-}
-
-void TestSenderReceiver::OnNetworkChanged(const int32_t id,
- const uint32_t minBitrateBps,
- const uint32_t maxBitrateBps,
- const uint8_t fractionLost,
- const uint16_t roundTripTimeMs,
- const uint16_t bwEstimateKbitMin,
- const uint16_t bwEstimateKbitMax)
-{
- if (_loadGenerator)
- {
- _loadGenerator->SetBitrate(maxBitrateBps/1000);
- }
-
- if (_sendRecCB)
- {
- _sendRecCB->OnOnNetworkChanged(maxBitrateBps,
- fractionLost,
- roundTripTimeMs,
- bwEstimateKbitMin,
- bwEstimateKbitMax);
- }
-}
diff --git a/webrtc/modules/rtp_rtcp/test/BWEStandAlone/TestSenderReceiver.h b/webrtc/modules/rtp_rtcp/test/BWEStandAlone/TestSenderReceiver.h
deleted file mode 100644
index f394412a73..0000000000
--- a/webrtc/modules/rtp_rtcp/test/BWEStandAlone/TestSenderReceiver.h
+++ /dev/null
@@ -1,153 +0,0 @@
-/*
- * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
- *
- * Use of this source code is governed by a BSD-style license
- * that can be found in the LICENSE file in the root of the source
- * tree. An additional intellectual property rights grant can be found
- * in the file PATENTS. All contributing project authors may
- * be found in the AUTHORS file in the root of the source tree.
- */
-
-#ifndef WEBRTC_MODULES_RTP_RTCP_TEST_BWESTANDALONE_TESTSENDERRECEIVER_H_
-#define WEBRTC_MODULES_RTP_RTCP_TEST_BWESTANDALONE_TESTSENDERRECEIVER_H_
-
-#include "webrtc/modules/rtp_rtcp/interface/rtp_rtcp.h"
-#include "webrtc/modules/rtp_rtcp/interface/rtp_rtcp_defines.h"
-#include "webrtc/system_wrappers/include/thread_wrapper.h"
-#include "webrtc/test/channel_transport/udp_transport.h"
-#include "webrtc/typedefs.h"
-
-class TestLoadGenerator;
-namespace webrtc {
-class CriticalSectionWrapper;
-class EventWrapper;
-}
-
-using namespace webrtc;
-
-#define MAX_BITRATE_KBPS 50000
-
-
-class SendRecCB
-{
-public:
- virtual void OnOnNetworkChanged(const uint32_t bitrateTarget,
- const uint8_t fractionLost,
- const uint16_t roundTripTimeMs,
- const uint16_t bwEstimateKbitMin,
- const uint16_t bwEstimateKbitMax) = 0;
-
- virtual ~SendRecCB() {};
-};
-
-
-class TestSenderReceiver : public RtpFeedback, public RtpData, public UdpTransportData, public RtpVideoFeedback
-{
-
-public:
- TestSenderReceiver (void);
-
- ~TestSenderReceiver (void);
-
- void SetCallback (SendRecCB *cb) { _sendRecCB = cb; };
-
- int32_t Start();
-
- int32_t Stop();
-
- bool ProcLoop();
-
- /////////////////////////////////////////////
- // Receiver methods
-
- int32_t InitReceiver (const uint16_t rtpPort,
- const uint16_t rtcpPort = 0,
- const int8_t payloadType = 127);
-
- int32_t ReceiveBitrateKbps ();
-
- int32_t SetPacketTimeout(const uint32_t timeoutMS);
-
- // Inherited from RtpFeedback
- int32_t OnInitializeDecoder(const int32_t id,
- const int8_t payloadType,
- const int8_t payloadName[RTP_PAYLOAD_NAME_SIZE],
- const uint32_t frequency,
- const uint8_t channels,
- const uint32_t rate) override {
- return 0;
- }
-
- void OnIncomingSSRCChanged(const int32_t id, const uint32_t SSRC) override {
- }
-
- void OnIncomingCSRCChanged(const int32_t id,
- const uint32_t CSRC,
- const bool added) override {}
-
- // Inherited from RtpData
- int32_t OnReceivedPayloadData(
- const uint8_t* payloadData,
- const size_t payloadSize,
- const webrtc::WebRtcRTPHeader* rtpHeader) override;
-
- // Inherited from UdpTransportData
- void IncomingRTPPacket(const int8_t* incomingRtpPacket,
- const size_t rtpPacketLength,
- const int8_t* fromIP,
- const uint16_t fromPort) override;
-
- void IncomingRTCPPacket(const int8_t* incomingRtcpPacket,
- const size_t rtcpPacketLength,
- const int8_t* fromIP,
- const uint16_t fromPort) override;
-
- /////////////////////////////////
- // Sender methods
-
- int32_t InitSender (const uint32_t startBitrateKbps,
- const int8_t* ipAddr,
- const uint16_t rtpPort,
- const uint16_t rtcpPort = 0,
- const int8_t payloadType = 127);
-
- int32_t SendOutgoingData(const uint32_t timeStamp,
- const uint8_t* payloadData,
- const size_t payloadSize,
- const webrtc::FrameType frameType = webrtc::kVideoFrameDelta);
-
- int32_t SetLoadGenerator(TestLoadGenerator *generator);
-
- uint32_t BitrateSent() { return (_rtp->BitrateSent()); };
-
-
- // Inherited from RtpVideoFeedback
- virtual void OnReceivedIntraFrameRequest(const int32_t id,
- const uint8_t message = 0) {};
-
- virtual void OnNetworkChanged(const int32_t id,
- const uint32_t minBitrateBps,
- const uint32_t maxBitrateBps,
- const uint8_t fractionLost,
- const uint16_t roundTripTimeMs,
- const uint16_t bwEstimateKbitMin,
- const uint16_t bwEstimateKbitMax);
-
-private:
- RtpRtcp* _rtp;
- UdpTransport* _transport;
- webrtc::CriticalSectionWrapper* _critSect;
- webrtc::EventWrapper *_eventPtr;
- rtc::scoped_ptr<webrtc::ThreadWrapper> _procThread;
- bool _running;
- int8_t _payloadType;
- TestLoadGenerator* _loadGenerator;
- bool _isSender;
- bool _isReceiver;
- SendRecCB * _sendRecCB;
- size_t _lastBytesReceived;
- int64_t _lastTime;
-
-};
-
-#endif // WEBRTC_MODULES_RTP_RTCP_TEST_BWESTANDALONE_TESTSENDERRECEIVER_H_
diff --git a/webrtc/modules/rtp_rtcp/test/bwe_standalone.gypi b/webrtc/modules/rtp_rtcp/test/bwe_standalone.gypi
deleted file mode 100644
index e45daec77d..0000000000
--- a/webrtc/modules/rtp_rtcp/test/bwe_standalone.gypi
+++ /dev/null
@@ -1,85 +0,0 @@
-# Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
-#
-# Use of this source code is governed by a BSD-style license
-# that can be found in the LICENSE file in the root of the source
-# tree. An additional intellectual property rights grant can be found
-# in the file PATENTS. All contributing project authors may
-# be found in the AUTHORS file in the root of the source tree.
-
-{
- 'targets': [
- {
- 'target_name': 'bwe_standalone',
- 'type': 'executable',
- 'dependencies': [
- 'matlab_plotting',
- 'rtp_rtcp',
- 'udp_transport',
- '<(webrtc_root)/system_wrappers/system_wrappers.gyp:system_wrappers',
- ],
- 'sources': [
- 'BWEStandAlone/BWEStandAlone.cc',
- 'BWEStandAlone/TestLoadGenerator.cc',
- 'BWEStandAlone/TestLoadGenerator.h',
- 'BWEStandAlone/TestSenderReceiver.cc',
- 'BWEStandAlone/TestSenderReceiver.h',
- ], # source
- 'conditions': [
- ['OS=="linux"', {
- 'cflags': [
- '-fexceptions', # enable exceptions
- ],
- },
- ],
- ],
- },
-
- {
- 'target_name': 'matlab_plotting',
- 'type': 'static_library',
- 'dependencies': [
- 'matlab_plotting_include',
- '<(webrtc_root)/system_wrappers/system_wrappers.gyp:system_wrappers',
- ],
- 'include_dirs': [
- '/opt/matlab2010a/extern/include',
- ],
- 'export_dependent_settings': [
- 'matlab_plotting_include',
- ],
- 'sources': [
- 'BWEStandAlone/MatlabPlot.cc',
- 'BWEStandAlone/MatlabPlot.h',
- ],
- 'link_settings': {
- 'ldflags' : [
- '-L/opt/matlab2010a/bin/glnxa64',
- '-leng',
- '-lmx',
- '-Wl,-rpath,/opt/matlab2010a/bin/glnxa64',
- ],
- },
- 'defines': [
- 'MATLAB',
- ],
- 'conditions': [
- ['OS=="linux"', {
- 'cflags': [
- '-fexceptions', # enable exceptions
- ],
- },
- ],
- ],
- },
-
- {
- 'target_name': 'matlab_plotting_include',
- 'type': 'none',
- 'direct_dependent_settings': {
- 'include_dirs': [
- 'BWEStandAlone',
- ],
- },
- },
- ],
-}
diff --git a/webrtc/modules/rtp_rtcp/test/testAPI/test_api.cc b/webrtc/modules/rtp_rtcp/test/testAPI/test_api.cc
index 0270e55802..1d4d6d04a5 100644
--- a/webrtc/modules/rtp_rtcp/test/testAPI/test_api.cc
+++ b/webrtc/modules/rtp_rtcp/test/testAPI/test_api.cc
@@ -9,14 +9,14 @@
*/
#include "webrtc/modules/rtp_rtcp/test/testAPI/test_api.h"
-#include "webrtc/test/null_transport.h"
#include <algorithm>
#include <vector>
-using namespace webrtc;
+#include "webrtc/test/null_transport.h"
namespace webrtc {
+
void LoopBackTransport::SetSendModule(RtpRtcp* rtp_rtcp_module,
RTPPayloadRegistry* payload_registry,
RtpReceiver* receiver,
@@ -76,7 +76,6 @@ int32_t TestRtpReceiver::OnReceivedPayloadData(
payload_size_ = payload_size;
return 0;
}
-} // namespace webrtc
class RtpRtcpAPITest : public ::testing::Test {
protected:
@@ -187,3 +186,5 @@ TEST_F(RtpRtcpAPITest, RtxReceiver) {
rtx_header.payloadType = 0;
EXPECT_TRUE(rtp_payload_registry_->IsRtx(rtx_header));
}
+
+} // namespace webrtc
diff --git a/webrtc/modules/rtp_rtcp/test/testAPI/test_api.h b/webrtc/modules/rtp_rtcp/test/testAPI/test_api.h
index 73334a8b26..d8040f7902 100644
--- a/webrtc/modules/rtp_rtcp/test/testAPI/test_api.h
+++ b/webrtc/modules/rtp_rtcp/test/testAPI/test_api.h
@@ -7,16 +7,18 @@
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
+#ifndef WEBRTC_MODULES_RTP_RTCP_TEST_TESTAPI_TEST_API_H_
+#define WEBRTC_MODULES_RTP_RTCP_TEST_TESTAPI_TEST_API_H_
#include "testing/gtest/include/gtest/gtest.h"
#include "webrtc/base/scoped_ptr.h"
#include "webrtc/common_types.h"
-#include "webrtc/modules/rtp_rtcp/interface/receive_statistics.h"
-#include "webrtc/modules/rtp_rtcp/interface/rtp_header_parser.h"
-#include "webrtc/modules/rtp_rtcp/interface/rtp_payload_registry.h"
-#include "webrtc/modules/rtp_rtcp/interface/rtp_receiver.h"
-#include "webrtc/modules/rtp_rtcp/interface/rtp_rtcp.h"
-#include "webrtc/modules/rtp_rtcp/interface/rtp_rtcp_defines.h"
+#include "webrtc/modules/rtp_rtcp/include/receive_statistics.h"
+#include "webrtc/modules/rtp_rtcp/include/rtp_header_parser.h"
+#include "webrtc/modules/rtp_rtcp/include/rtp_payload_registry.h"
+#include "webrtc/modules/rtp_rtcp/include/rtp_receiver.h"
+#include "webrtc/modules/rtp_rtcp/include/rtp_rtcp.h"
+#include "webrtc/modules/rtp_rtcp/include/rtp_rtcp_defines.h"
#include "webrtc/transport.h"
namespace webrtc {
@@ -68,3 +70,4 @@ class TestRtpReceiver : public NullRtpData {
};
} // namespace webrtc
+#endif // WEBRTC_MODULES_RTP_RTCP_TEST_TESTAPI_TEST_API_H_
diff --git a/webrtc/modules/rtp_rtcp/test/testAPI/test_api_audio.cc b/webrtc/modules/rtp_rtcp/test/testAPI/test_api_audio.cc
index 745386d485..634969b311 100644
--- a/webrtc/modules/rtp_rtcp/test/testAPI/test_api_audio.cc
+++ b/webrtc/modules/rtp_rtcp/test/testAPI/test_api_audio.cc
@@ -15,12 +15,12 @@
#include "webrtc/modules/rtp_rtcp/test/testAPI/test_api.h"
#include "webrtc/common_types.h"
-#include "webrtc/modules/rtp_rtcp/interface/rtp_rtcp.h"
-#include "webrtc/modules/rtp_rtcp/interface/rtp_rtcp_defines.h"
+#include "webrtc/modules/rtp_rtcp/include/rtp_rtcp.h"
+#include "webrtc/modules/rtp_rtcp/include/rtp_rtcp_defines.h"
#include "webrtc/modules/rtp_rtcp/source/rtp_receiver_audio.h"
-using namespace webrtc;
-
+namespace webrtc {
+namespace {
#define test_rate 64000u
class VerifyingAudioReceiver : public NullRtpData {
@@ -64,7 +64,7 @@ class RTPCallback : public NullRtpFeedback {
int32_t OnInitializeDecoder(const int8_t payloadType,
const char payloadName[RTP_PAYLOAD_NAME_SIZE],
const int frequency,
- const uint8_t channels,
+ const size_t channels,
const uint32_t rate) override {
if (payloadType == 96) {
EXPECT_EQ(test_rate, rate) <<
@@ -165,7 +165,7 @@ TEST_F(RtpRtcpAudioTest, Basic) {
module1->SetStartTimestamp(test_timestamp);
// Test detection at the end of a DTMF tone.
- //EXPECT_EQ(0, module2->SetTelephoneEventForwardToDecoder(true));
+ // EXPECT_EQ(0, module2->SetTelephoneEventForwardToDecoder(true));
EXPECT_EQ(0, module1->SetSendingStatus(true));
@@ -241,7 +241,7 @@ TEST_F(RtpRtcpAudioTest, RED) {
EXPECT_EQ(0, module1->SetSendREDPayloadType(voice_codec.pltype));
int8_t red = 0;
- EXPECT_EQ(0, module1->SendREDPayloadType(red));
+ EXPECT_EQ(0, module1->SendREDPayloadType(&red));
EXPECT_EQ(voice_codec.pltype, red);
EXPECT_EQ(0, rtp_receiver1_->RegisterReceivePayload(
voice_codec.plname,
@@ -278,7 +278,7 @@ TEST_F(RtpRtcpAudioTest, RED) {
&fragmentation));
EXPECT_EQ(0, module1->SetSendREDPayloadType(-1));
- EXPECT_EQ(-1, module1->SendREDPayloadType(red));
+ EXPECT_EQ(-1, module1->SendREDPayloadType(&red));
}
TEST_F(RtpRtcpAudioTest, DTMF) {
@@ -334,7 +334,7 @@ TEST_F(RtpRtcpAudioTest, DTMF) {
// Send RTP packets for 16 tones a 160 ms 100ms
// pause between = 2560ms + 1600ms = 4160ms
- for (;timeStamp <= 250 * 160; timeStamp += 160) {
+ for (; timeStamp <= 250 * 160; timeStamp += 160) {
EXPECT_EQ(0, module1->SendOutgoingData(webrtc::kAudioFrameSpeech, 96,
timeStamp, -1, test, 4));
fake_clock.AdvanceTimeMilliseconds(20);
@@ -342,10 +342,13 @@ TEST_F(RtpRtcpAudioTest, DTMF) {
}
EXPECT_EQ(0, module1->SendTelephoneEventOutband(32, 9000, 10));
- for (;timeStamp <= 740 * 160; timeStamp += 160) {
+ for (; timeStamp <= 740 * 160; timeStamp += 160) {
EXPECT_EQ(0, module1->SendOutgoingData(webrtc::kAudioFrameSpeech, 96,
timeStamp, -1, test, 4));
fake_clock.AdvanceTimeMilliseconds(20);
module1->Process();
}
}
+
+} // namespace
+} // namespace webrtc
diff --git a/webrtc/modules/rtp_rtcp/test/testAPI/test_api_rtcp.cc b/webrtc/modules/rtp_rtcp/test/testAPI/test_api_rtcp.cc
index e9d81122b1..6c60bf1f6d 100644
--- a/webrtc/modules/rtp_rtcp/test/testAPI/test_api_rtcp.cc
+++ b/webrtc/modules/rtp_rtcp/test/testAPI/test_api_rtcp.cc
@@ -14,13 +14,14 @@
#include "testing/gmock/include/gmock/gmock.h"
#include "testing/gtest/include/gtest/gtest.h"
#include "webrtc/common_types.h"
-#include "webrtc/modules/rtp_rtcp/interface/receive_statistics.h"
-#include "webrtc/modules/rtp_rtcp/interface/rtp_rtcp.h"
-#include "webrtc/modules/rtp_rtcp/interface/rtp_rtcp_defines.h"
+#include "webrtc/modules/rtp_rtcp/include/receive_statistics.h"
+#include "webrtc/modules/rtp_rtcp/include/rtp_rtcp.h"
+#include "webrtc/modules/rtp_rtcp/include/rtp_rtcp_defines.h"
#include "webrtc/modules/rtp_rtcp/source/rtp_receiver_audio.h"
#include "webrtc/modules/rtp_rtcp/test/testAPI/test_api.h"
-using namespace webrtc;
+namespace webrtc {
+namespace {
const uint64_t kTestPictureId = 12345678;
const uint8_t kSliPictureId = 156;
@@ -29,30 +30,29 @@ class RtcpCallback : public RtcpIntraFrameObserver {
public:
void SetModule(RtpRtcp* module) {
_rtpRtcpModule = module;
- };
+ }
virtual void OnRTCPPacketTimeout(const int32_t id) {
}
virtual void OnLipSyncUpdate(const int32_t id,
- const int32_t audioVideoOffset) {
- };
- virtual void OnReceivedIntraFrameRequest(uint32_t ssrc) {
- };
+ const int32_t audioVideoOffset) {}
+ virtual void OnReceivedIntraFrameRequest(uint32_t ssrc) {}
virtual void OnReceivedSLI(uint32_t ssrc,
uint8_t pictureId) {
EXPECT_EQ(kSliPictureId & 0x3f, pictureId);
- };
+ }
virtual void OnReceivedRPSI(uint32_t ssrc,
uint64_t pictureId) {
EXPECT_EQ(kTestPictureId, pictureId);
- };
- virtual void OnLocalSsrcChanged(uint32_t old_ssrc, uint32_t new_ssrc) {};
+ }
+ virtual void OnLocalSsrcChanged(uint32_t old_ssrc, uint32_t new_ssrc) {}
+
private:
RtpRtcp* _rtpRtcpModule;
};
class TestRtpFeedback : public NullRtpFeedback {
public:
- TestRtpFeedback(RtpRtcp* rtp_rtcp) : rtp_rtcp_(rtp_rtcp) {}
+ explicit TestRtpFeedback(RtpRtcp* rtp_rtcp) : rtp_rtcp_(rtp_rtcp) {}
virtual ~TestRtpFeedback() {}
void OnIncomingSSRCChanged(const uint32_t ssrc) override {
@@ -266,3 +266,6 @@ TEST_F(RtpRtcpRtcpTest, RemoteRTCPStatRemote) {
EXPECT_EQ(test_sequence_number, report_blocks[0].extendedHighSeqNum);
EXPECT_EQ(0u, report_blocks[0].fractionLost);
}
+
+} // namespace
+} // namespace webrtc
diff --git a/webrtc/modules/rtp_rtcp/test/testAPI/test_api_video.cc b/webrtc/modules/rtp_rtcp/test/testAPI/test_api_video.cc
index 30a6a1c303..16ea540bd5 100644
--- a/webrtc/modules/rtp_rtcp/test/testAPI/test_api_video.cc
+++ b/webrtc/modules/rtp_rtcp/test/testAPI/test_api_video.cc
@@ -15,9 +15,9 @@
#include "testing/gtest/include/gtest/gtest.h"
#include "webrtc/common_types.h"
-#include "webrtc/modules/rtp_rtcp/interface/rtp_payload_registry.h"
-#include "webrtc/modules/rtp_rtcp/interface/rtp_rtcp.h"
-#include "webrtc/modules/rtp_rtcp/interface/rtp_rtcp_defines.h"
+#include "webrtc/modules/rtp_rtcp/include/rtp_payload_registry.h"
+#include "webrtc/modules/rtp_rtcp/include/rtp_rtcp.h"
+#include "webrtc/modules/rtp_rtcp/include/rtp_rtcp_defines.h"
#include "webrtc/modules/rtp_rtcp/source/byte_io.h"
#include "webrtc/modules/rtp_rtcp/source/rtp_receiver_video.h"
#include "webrtc/modules/rtp_rtcp/test/testAPI/test_api.h"
diff --git a/webrtc/modules/rtp_rtcp/test/testFec/average_residual_loss_xor_codes.h b/webrtc/modules/rtp_rtcp/test/testFec/average_residual_loss_xor_codes.h
index 2e8d676e47..6c233bba17 100644
--- a/webrtc/modules/rtp_rtcp/test/testFec/average_residual_loss_xor_codes.h
+++ b/webrtc/modules/rtp_rtcp/test/testFec/average_residual_loss_xor_codes.h
@@ -7,8 +7,10 @@
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
+#ifndef WEBRTC_MODULES_RTP_RTCP_TEST_TESTFEC_AVERAGE_RESIDUAL_LOSS_XOR_CODES_H_
+#define WEBRTC_MODULES_RTP_RTCP_TEST_TESTFEC_AVERAGE_RESIDUAL_LOSS_XOR_CODES_H_
-namespace {
+namespace webrtc {
// Maximum number of media packets allowed in this test. The burst mask types
// are currently defined up to (kMaxMediaPacketsTest, kMaxMediaPacketsTest).
@@ -185,4 +187,5 @@ const float kMaxResidualLossBurstyMask[kNumberCodes] = {
0.009657f
};
-} // namespace
+} // namespace webrtc
+#endif // WEBRTC_MODULES_RTP_RTCP_TEST_TESTFEC_AVERAGE_RESIDUAL_LOSS_XOR_CODES_H_
diff --git a/webrtc/modules/rtp_rtcp/test/testFec/test_fec.cc b/webrtc/modules/rtp_rtcp/test/testFec/test_fec.cc
index a8eafdd27e..b164b7e04c 100644
--- a/webrtc/modules/rtp_rtcp/test/testFec/test_fec.cc
+++ b/webrtc/modules/rtp_rtcp/test/testFec/test_fec.cc
@@ -22,43 +22,49 @@
#include <list>
#include "testing/gtest/include/gtest/gtest.h"
-#include "webrtc/modules/rtp_rtcp/source/fec_private_tables_bursty.h"
+#include "webrtc/base/random.h"
+#include "webrtc/modules/rtp_rtcp/source/byte_io.h"
#include "webrtc/modules/rtp_rtcp/source/forward_error_correction.h"
#include "webrtc/modules/rtp_rtcp/source/forward_error_correction_internal.h"
-
-#include "webrtc/modules/rtp_rtcp/source/byte_io.h"
#include "webrtc/test/testsupport/fileutils.h"
-//#define VERBOSE_OUTPUT
+// #define VERBOSE_OUTPUT
namespace webrtc {
+namespace fec_private_tables {
+extern const uint8_t** kPacketMaskBurstyTbl[12];
+}
namespace test {
+using fec_private_tables::kPacketMaskBurstyTbl;
void ReceivePackets(
ForwardErrorCorrection::ReceivedPacketList* toDecodeList,
ForwardErrorCorrection::ReceivedPacketList* receivedPacketList,
- uint32_t numPacketsToDecode, float reorderRate, float duplicateRate) {
+ size_t numPacketsToDecode,
+ float reorderRate,
+ float duplicateRate,
+ Random* random) {
assert(toDecodeList->empty());
assert(numPacketsToDecode <= receivedPacketList->size());
ForwardErrorCorrection::ReceivedPacketList::iterator it;
- for (uint32_t i = 0; i < numPacketsToDecode; i++) {
+ for (size_t i = 0; i < numPacketsToDecode; i++) {
it = receivedPacketList->begin();
// Reorder packets.
- float randomVariable = static_cast<float>(rand()) / RAND_MAX;
+ float randomVariable = random->Rand<float>();
while (randomVariable < reorderRate) {
++it;
if (it == receivedPacketList->end()) {
--it;
break;
}
- randomVariable = static_cast<float>(rand()) / RAND_MAX;
+ randomVariable = random->Rand<float>();
}
ForwardErrorCorrection::ReceivedPacket* receivedPacket = *it;
toDecodeList->push_back(receivedPacket);
// Duplicate packets.
- randomVariable = static_cast<float>(rand()) / RAND_MAX;
+ randomVariable = random->Rand<float>();
while (randomVariable < duplicateRate) {
ForwardErrorCorrection::ReceivedPacket* duplicatePacket =
new ForwardErrorCorrection::ReceivedPacket;
@@ -69,7 +75,7 @@ void ReceivePackets(
duplicatePacket->pkt->length = receivedPacket->pkt->length;
toDecodeList->push_back(duplicatePacket);
- randomVariable = static_cast<float>(rand()) / RAND_MAX;
+ randomVariable = random->Rand<float>();
}
receivedPacketList->erase(it);
}
@@ -77,12 +83,8 @@ void ReceivePackets(
TEST(FecTest, FecTest) {
// TODO(marpan): Split this function into subroutines/helper functions.
- enum {
- kMaxNumberMediaPackets = 48
- };
- enum {
- kMaxNumberFecPackets = 48
- };
+ enum { kMaxNumberMediaPackets = 48 };
+ enum { kMaxNumberFecPackets = 48 };
const uint32_t kNumMaskBytesL0 = 2;
const uint32_t kNumMaskBytesL1 = 6;
@@ -91,15 +93,12 @@ TEST(FecTest, FecTest) {
const bool kUseUnequalProtection = true;
// FEC mask types.
- const FecMaskType kMaskTypes[] = { kFecMaskRandom, kFecMaskBursty };
+ const FecMaskType kMaskTypes[] = {kFecMaskRandom, kFecMaskBursty};
const int kNumFecMaskTypes = sizeof(kMaskTypes) / sizeof(*kMaskTypes);
- // TODO(pbos): Fix this. Hack to prevent a warning
- // ('-Wunneeded-internal-declaration') from clang.
- (void) kPacketMaskBurstyTbl;
-
// Maximum number of media packets allowed for the mask type.
- const uint16_t kMaxMediaPackets[] = {kMaxNumberMediaPackets,
+ const uint16_t kMaxMediaPackets[] = {
+ kMaxNumberMediaPackets,
sizeof(kPacketMaskBurstyTbl) / sizeof(*kPacketMaskBurstyTbl)};
ASSERT_EQ(12, kMaxMediaPackets[1]) << "Max media packets for bursty mode not "
@@ -115,7 +114,7 @@ TEST(FecTest, FecTest) {
ForwardErrorCorrection::Packet* mediaPacket = NULL;
// Running over only one loss rate to limit execution time.
- const float lossRate[] = { 0.5f };
+ const float lossRate[] = {0.5f};
const uint32_t lossRateSize = sizeof(lossRate) / sizeof(*lossRate);
const float reorderRate = 0.1f;
const float duplicateRate = 0.1f;
@@ -127,7 +126,7 @@ TEST(FecTest, FecTest) {
// Seed the random number generator, storing the seed to file in order to
// reproduce past results.
const unsigned int randomSeed = static_cast<unsigned int>(time(NULL));
- srand(randomSeed);
+ Random random(randomSeed);
std::string filename = webrtc::test::OutputPath() + "randomSeedLog.txt";
FILE* randomSeedFile = fopen(filename.c_str(), "a");
fprintf(randomSeedFile, "%u\n", randomSeed);
@@ -135,15 +134,13 @@ TEST(FecTest, FecTest) {
randomSeedFile = NULL;
uint16_t seqNum = 0;
- uint32_t timeStamp = static_cast<uint32_t>(rand());
- const uint32_t ssrc = static_cast<uint32_t>(rand());
+ uint32_t timeStamp = random.Rand<uint32_t>();
+ const uint32_t ssrc = random.Rand(1u, 0xfffffffe);
// Loop over the mask types: random and bursty.
for (int mask_type_idx = 0; mask_type_idx < kNumFecMaskTypes;
++mask_type_idx) {
-
for (uint32_t lossRateIdx = 0; lossRateIdx < lossRateSize; ++lossRateIdx) {
-
printf("Loss rate: %.2f, Mask type %d \n", lossRate[lossRateIdx],
mask_type_idx);
@@ -159,14 +156,12 @@ TEST(FecTest, FecTest) {
for (uint32_t numFecPackets = 1;
numFecPackets <= numMediaPackets && numFecPackets <= packetMaskMax;
numFecPackets++) {
-
// Loop over numImpPackets: usually <= (0.3*numMediaPackets).
// For this test we check up to ~ (numMediaPackets / 4).
uint32_t maxNumImpPackets = numMediaPackets / 4 + 1;
for (uint32_t numImpPackets = 0; numImpPackets <= maxNumImpPackets &&
- numImpPackets <= packetMaskMax;
+ numImpPackets <= packetMaskMax;
numImpPackets++) {
-
uint8_t protectionFactor =
static_cast<uint8_t>(numFecPackets * 255 / numMediaPackets);
@@ -181,10 +176,11 @@ TEST(FecTest, FecTest) {
mask_table, packetMask);
#ifdef VERBOSE_OUTPUT
- printf("%u media packets, %u FEC packets, %u numImpPackets, "
- "loss rate = %.2f \n",
- numMediaPackets, numFecPackets, numImpPackets,
- lossRate[lossRateIdx]);
+ printf(
+ "%u media packets, %u FEC packets, %u numImpPackets, "
+ "loss rate = %.2f \n",
+ numMediaPackets, numFecPackets, numImpPackets,
+ lossRate[lossRateIdx]);
printf("Packet mask matrix \n");
#endif
@@ -232,16 +228,15 @@ TEST(FecTest, FecTest) {
for (uint32_t i = 0; i < numMediaPackets; ++i) {
mediaPacket = new ForwardErrorCorrection::Packet;
mediaPacketList.push_back(mediaPacket);
- mediaPacket->length = static_cast<size_t>(
- (static_cast<float>(rand()) / RAND_MAX) *
- (IP_PACKET_SIZE - 12 - 28 -
- ForwardErrorCorrection::PacketOverhead()));
- if (mediaPacket->length < 12) {
- mediaPacket->length = 12;
- }
+ const uint32_t kMinPacketSize = 12;
+ const uint32_t kMaxPacketSize = static_cast<uint32_t>(
+ IP_PACKET_SIZE - 12 - 28 -
+ ForwardErrorCorrection::PacketOverhead());
+ mediaPacket->length = random.Rand(kMinPacketSize, kMaxPacketSize);
+
// Generate random values for the first 2 bytes.
- mediaPacket->data[0] = static_cast<uint8_t>(rand() % 256);
- mediaPacket->data[1] = static_cast<uint8_t>(rand() % 256);
+ mediaPacket->data[0] = random.Rand<uint8_t>();
+ mediaPacket->data[1] = random.Rand<uint8_t>();
// The first two bits are assumed to be 10 by the
// FEC encoder. In fact the FEC decoder will set the
@@ -266,7 +261,7 @@ TEST(FecTest, FecTest) {
ByteWriter<uint32_t>::WriteBigEndian(&mediaPacket->data[8], ssrc);
// Generate random values for payload
for (size_t j = 12; j < mediaPacket->length; ++j) {
- mediaPacket->data[j] = static_cast<uint8_t>(rand() % 256);
+ mediaPacket->data[j] = random.Rand<uint8_t>();
}
seqNum++;
}
@@ -289,8 +284,7 @@ TEST(FecTest, FecTest) {
while (mediaPacketListItem != mediaPacketList.end()) {
mediaPacket = *mediaPacketListItem;
// We want a value between 0 and 1.
- const float lossRandomVariable =
- (static_cast<float>(rand()) / (RAND_MAX));
+ const float lossRandomVariable = random.Rand<float>();
if (lossRandomVariable >= lossRate[lossRateIdx]) {
mediaLossMask[mediaPacketIdx] = 1;
@@ -315,8 +309,7 @@ TEST(FecTest, FecTest) {
uint32_t fecPacketIdx = 0;
while (fecPacketListItem != fecPacketList.end()) {
fecPacket = *fecPacketListItem;
- const float lossRandomVariable =
- (static_cast<float>(rand()) / (RAND_MAX));
+ const float lossRandomVariable = random.Rand<float>();
if (lossRandomVariable >= lossRate[lossRateIdx]) {
fecLossMask[fecPacketIdx] = 1;
receivedPacket = new ForwardErrorCorrection::ReceivedPacket;
@@ -387,18 +380,15 @@ TEST(FecTest, FecTest) {
// For error-checking frame completion.
bool fecPacketReceived = false;
while (!receivedPacketList.empty()) {
- uint32_t numPacketsToDecode = static_cast<uint32_t>(
- (static_cast<float>(rand()) / RAND_MAX) *
- receivedPacketList.size() + 0.5);
- if (numPacketsToDecode < 1) {
- numPacketsToDecode = 1;
- }
+ size_t numPacketsToDecode = random.Rand(
+ 1u, static_cast<uint32_t>(receivedPacketList.size()));
ReceivePackets(&toDecodeList, &receivedPacketList,
- numPacketsToDecode, reorderRate, duplicateRate);
+ numPacketsToDecode, reorderRate, duplicateRate,
+ &random);
if (fecPacketReceived == false) {
ForwardErrorCorrection::ReceivedPacketList::iterator
- toDecodeIt = toDecodeList.begin();
+ toDecodeIt = toDecodeList.begin();
while (toDecodeIt != toDecodeList.end()) {
receivedPacket = *toDecodeIt;
if (receivedPacket->is_fec) {
@@ -418,11 +408,11 @@ TEST(FecTest, FecTest) {
if (mediaLossMask[mediaPacketIdx] == 1) {
// Should have recovered this packet.
ForwardErrorCorrection::RecoveredPacketList::iterator
- recoveredPacketListItem = recoveredPacketList.begin();
+ recoveredPacketListItem = recoveredPacketList.begin();
- ASSERT_FALSE(
- recoveredPacketListItem == recoveredPacketList.end())
- << "Insufficient number of recovered packets.";
+ ASSERT_FALSE(recoveredPacketListItem ==
+ recoveredPacketList.end())
+ << "Insufficient number of recovered packets.";
mediaPacket = *mediaPacketListItem;
ForwardErrorCorrection::RecoveredPacket* recoveredPacket =
*recoveredPacketListItem;
@@ -462,7 +452,7 @@ TEST(FecTest, FecTest) {
// Delete received packets we didn't pass to DecodeFEC(), due to
// early frame completion.
ForwardErrorCorrection::ReceivedPacketList::iterator
- receivedPacketIt = receivedPacketList.begin();
+ receivedPacketIt = receivedPacketList.begin();
while (receivedPacketIt != receivedPacketList.end()) {
receivedPacket = *receivedPacketIt;
delete receivedPacket;
@@ -476,11 +466,11 @@ TEST(FecTest, FecTest) {
}
timeStamp += 90000 / 30;
} // loop over numImpPackets
- } // loop over FecPackets
- } // loop over numMediaPackets
+ } // loop over FecPackets
+ } // loop over numMediaPackets
delete[] packetMask;
} // loop over loss rates
- } // loop over mask types
+ } // loop over mask types
// Have DecodeFEC free allocated memory.
fec.ResetState(&recoveredPacketList);
diff --git a/webrtc/modules/rtp_rtcp/test/testFec/test_packet_masks_metrics.cc b/webrtc/modules/rtp_rtcp/test/testFec/test_packet_masks_metrics.cc
index 843a7f77f5..466214c740 100644
--- a/webrtc/modules/rtp_rtcp/test/testFec/test_packet_masks_metrics.cc
+++ b/webrtc/modules/rtp_rtcp/test/testFec/test_packet_masks_metrics.cc
@@ -59,13 +59,6 @@ enum { kMaxNumberMediaPackets = 48 };
// Maximum number of media packets allowed for each mask type.
const uint16_t kMaxMediaPackets[] = {kMaxNumberMediaPackets, 12};
-// Maximum number of media packets allowed in this test. The burst mask types
-// are currently defined up to (k=12,m=12).
-const int kMaxMediaPacketsTest = 12;
-
-// Maximum number of FEC codes considered in this test.
-const int kNumberCodes = kMaxMediaPacketsTest * (kMaxMediaPacketsTest + 1) / 2;
-
// Maximum gap size for characterizing the consecutiveness of the loss.
const int kMaxGapSize = 2 * kMaxMediaPacketsTest;
@@ -407,7 +400,7 @@ class FecPacketMaskMetricsTest : public ::testing::Test {
// Loop over all loss configurations for the symbol sequence of length
// |tot_num_packets|. In this version we process up to (k=12, m=12) codes,
// and get exact expressions for the residual loss.
- // TODO (marpan): For larger codes, loop over some random sample of loss
+ // TODO(marpan): For larger codes, loop over some random sample of loss
// configurations, sampling driven by the underlying statistical loss model
// (importance sampling).
@@ -427,7 +420,7 @@ class FecPacketMaskMetricsTest : public ::testing::Test {
// Map configuration number to a loss state.
for (int j = 0; j < tot_num_packets; j++) {
- state[j]=0; // Received state.
+ state[j] = 0; // Received state.
int bit_value = i >> (tot_num_packets - j - 1) & 1;
if (bit_value == 1) {
state[j] = 1; // Lost state.
@@ -860,9 +853,9 @@ TEST_F(FecPacketMaskMetricsTest, FecXorVsRS) {
EXPECT_GE(kMetricsXorBursty[code_index].average_residual_loss[k],
kMetricsReedSolomon[code_index].average_residual_loss[k]);
}
- // TODO (marpan): There are some cases (for high loss rates and/or
- // burst loss models) where XOR is better than RS. Is there some pattern
- // we can identify and enforce as a constraint?
+ // TODO(marpan): There are some cases (for high loss rates and/or
+ // burst loss models) where XOR is better than RS. Is there some pattern
+ // we can identify and enforce as a constraint?
}
}
}
@@ -874,7 +867,7 @@ TEST_F(FecPacketMaskMetricsTest, FecXorVsRS) {
TEST_F(FecPacketMaskMetricsTest, FecTrendXorVsRsLossRate) {
SetLossModels();
SetCodeParams();
- // TODO (marpan): Examine this further to see if the condition can be strictly
+ // TODO(marpan): Examine this further to see if the condition can be strictly
// satisfied (i.e., scale = 1.0) for all codes with different/better masks.
double scale = 0.90;
int num_loss_rates = sizeof(kAverageLossRate) /
@@ -898,7 +891,7 @@ TEST_F(FecPacketMaskMetricsTest, FecTrendXorVsRsLossRate) {
kMetricsXorRandom[code_index].average_residual_loss[k+1];
EXPECT_GE(diff_rs_xor_random_loss1, scale * diff_rs_xor_random_loss2);
}
- // TODO (marpan): Investigate the cases for the bursty mask where
+ // TODO(marpan): Investigate the cases for the bursty mask where
// this trend is not strictly satisfied.
}
}
@@ -937,7 +930,7 @@ TEST_F(FecPacketMaskMetricsTest, FecBehaviorViaProtectionLevelAndLength) {
EXPECT_LT(
kMetricsReedSolomon[code_index2].average_residual_loss[k],
kMetricsReedSolomon[code_index1].average_residual_loss[k]);
- // TODO (marpan): There are some corner cases where this is not
+ // TODO(marpan): There are some corner cases where this is not
// satisfied with the current packet masks. Look into updating
// these cases to see if this behavior should/can be satisfied,
// with overall lower residual loss for those XOR codes.
@@ -963,7 +956,7 @@ TEST_F(FecPacketMaskMetricsTest, FecVarianceBehaviorXorVsRs) {
SetCodeParams();
// The condition is not strictly satisfied with the current masks,
// i.e., for some codes, the variance of XOR may be slightly higher than RS.
- // TODO (marpan): Examine this further to see if the condition can be strictly
+ // TODO(marpan): Examine this further to see if the condition can be strictly
// satisfied (i.e., scale = 1.0) for all codes with different/better masks.
double scale = 0.95;
for (int code_index = 0; code_index < max_num_codes_; code_index++) {
@@ -998,7 +991,7 @@ TEST_F(FecPacketMaskMetricsTest, FecXorBurstyPerfectRecoveryConsecutiveLoss) {
// bursty mask type, for random loss models at low loss rates.
// The XOR codes with bursty mask types are generally better than the one with
// random mask type, for bursty loss models and/or high loss rates.
-// TODO (marpan): Enable this test when some of the packet masks are updated.
+// TODO(marpan): Enable this test when some of the packet masks are updated.
// Some isolated cases of the codes don't pass this currently.
/*
TEST_F(FecPacketMaskMetricsTest, FecXorRandomVsBursty) {
diff --git a/webrtc/modules/utility/BUILD.gn b/webrtc/modules/utility/BUILD.gn
index 163515c466..6704cd6d9a 100644
--- a/webrtc/modules/utility/BUILD.gn
+++ b/webrtc/modules/utility/BUILD.gn
@@ -10,12 +10,12 @@ import("../../build/webrtc.gni")
source_set("utility") {
sources = [
- "interface/audio_frame_operations.h",
- "interface/file_player.h",
- "interface/file_recorder.h",
- "interface/helpers_android.h",
- "interface/jvm_android.h",
- "interface/process_thread.h",
+ "include/audio_frame_operations.h",
+ "include/file_player.h",
+ "include/file_recorder.h",
+ "include/helpers_android.h",
+ "include/jvm_android.h",
+ "include/process_thread.h",
"source/audio_frame_operations.cc",
"source/coder.cc",
"source/coder.h",
diff --git a/webrtc/modules/utility/OWNERS b/webrtc/modules/utility/OWNERS
index 347d278614..65cb70c9b9 100644
--- a/webrtc/modules/utility/OWNERS
+++ b/webrtc/modules/utility/OWNERS
@@ -1,4 +1,9 @@
asapersson@webrtc.org
perkj@webrtc.org
+# These are for the common case of adding or renaming files. If you're doing
+# structural changes, please get a review from a reviewer in this file.
+per-file *.gyp=*
+per-file *.gypi=*
+
per-file BUILD.gn=kjellander@webrtc.org
diff --git a/webrtc/modules/utility/interface/audio_frame_operations.h b/webrtc/modules/utility/include/audio_frame_operations.h
index c2af68ab1b..1551d86894 100644
--- a/webrtc/modules/utility/interface/audio_frame_operations.h
+++ b/webrtc/modules/utility/include/audio_frame_operations.h
@@ -8,8 +8,8 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#ifndef WEBRTC_VOICE_ENGINE_AUDIO_FRAME_OPERATIONS_H_
-#define WEBRTC_VOICE_ENGINE_AUDIO_FRAME_OPERATIONS_H_
+#ifndef WEBRTC_MODULES_UTILITY_INCLUDE_AUDIO_FRAME_OPERATIONS_H_
+#define WEBRTC_MODULES_UTILITY_INCLUDE_AUDIO_FRAME_OPERATIONS_H_
#include "webrtc/typedefs.h"
@@ -55,4 +55,4 @@ class AudioFrameOperations {
} // namespace webrtc
-#endif // #ifndef WEBRTC_VOICE_ENGINE_AUDIO_FRAME_OPERATIONS_H_
+#endif // #ifndef WEBRTC_MODULES_UTILITY_INCLUDE_AUDIO_FRAME_OPERATIONS_H_
diff --git a/webrtc/modules/utility/interface/file_player.h b/webrtc/modules/utility/include/file_player.h
index 44f03e475a..4ca134a669 100644
--- a/webrtc/modules/utility/interface/file_player.h
+++ b/webrtc/modules/utility/include/file_player.h
@@ -8,12 +8,12 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#ifndef WEBRTC_MODULES_UTILITY_INTERFACE_FILE_PLAYER_H_
-#define WEBRTC_MODULES_UTILITY_INTERFACE_FILE_PLAYER_H_
+#ifndef WEBRTC_MODULES_UTILITY_INCLUDE_FILE_PLAYER_H_
+#define WEBRTC_MODULES_UTILITY_INCLUDE_FILE_PLAYER_H_
#include "webrtc/common_types.h"
#include "webrtc/engine_configurations.h"
-#include "webrtc/modules/interface/module_common_types.h"
+#include "webrtc/modules/include/module_common_types.h"
#include "webrtc/typedefs.h"
#include "webrtc/video_frame.h"
@@ -108,4 +108,4 @@ protected:
};
} // namespace webrtc
-#endif // WEBRTC_MODULES_UTILITY_INTERFACE_FILE_PLAYER_H_
+#endif // WEBRTC_MODULES_UTILITY_INCLUDE_FILE_PLAYER_H_
diff --git a/webrtc/modules/utility/interface/file_recorder.h b/webrtc/modules/utility/include/file_recorder.h
index f2ce785368..09ed8ae350 100644
--- a/webrtc/modules/utility/interface/file_recorder.h
+++ b/webrtc/modules/utility/include/file_recorder.h
@@ -8,13 +8,13 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#ifndef WEBRTC_MODULES_UTILITY_INTERFACE_FILE_RECORDER_H_
-#define WEBRTC_MODULES_UTILITY_INTERFACE_FILE_RECORDER_H_
+#ifndef WEBRTC_MODULES_UTILITY_INCLUDE_FILE_RECORDER_H_
+#define WEBRTC_MODULES_UTILITY_INCLUDE_FILE_RECORDER_H_
#include "webrtc/common_types.h"
#include "webrtc/engine_configurations.h"
-#include "webrtc/modules/interface/module_common_types.h"
-#include "webrtc/modules/media_file/interface/media_file_defines.h"
+#include "webrtc/modules/include/module_common_types.h"
+#include "webrtc/modules/media_file/media_file_defines.h"
#include "webrtc/system_wrappers/include/tick_util.h"
#include "webrtc/typedefs.h"
#include "webrtc/video_frame.h"
@@ -81,4 +81,4 @@ protected:
};
} // namespace webrtc
-#endif // WEBRTC_MODULES_UTILITY_INTERFACE_FILE_RECORDER_H_
+#endif // WEBRTC_MODULES_UTILITY_INCLUDE_FILE_RECORDER_H_
diff --git a/webrtc/modules/utility/interface/helpers_android.h b/webrtc/modules/utility/include/helpers_android.h
index 5c73fe4566..2840ca965e 100644
--- a/webrtc/modules/utility/interface/helpers_android.h
+++ b/webrtc/modules/utility/include/helpers_android.h
@@ -8,8 +8,8 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#ifndef WEBRTC_MODULES_UTILITY_INTERFACE_HELPERS_ANDROID_H_
-#define WEBRTC_MODULES_UTILITY_INTERFACE_HELPERS_ANDROID_H_
+#ifndef WEBRTC_MODULES_UTILITY_INCLUDE_HELPERS_ANDROID_H_
+#define WEBRTC_MODULES_UTILITY_INCLUDE_HELPERS_ANDROID_H_
#include <jni.h>
#include <string>
@@ -84,4 +84,4 @@ class ScopedGlobalRef {
} // namespace webrtc
-#endif // WEBRTC_MODULES_UTILITY_INTERFACE_HELPERS_ANDROID_H_
+#endif // WEBRTC_MODULES_UTILITY_INCLUDE_HELPERS_ANDROID_H_
diff --git a/webrtc/modules/utility/interface/helpers_ios.h b/webrtc/modules/utility/include/helpers_ios.h
index a5edee0279..a5a07ace17 100644
--- a/webrtc/modules/utility/interface/helpers_ios.h
+++ b/webrtc/modules/utility/include/helpers_ios.h
@@ -8,8 +8,8 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#ifndef WEBRTC_MODULES_UTILITY_INTERFACE_HELPERS_IOS_H_
-#define WEBRTC_MODULES_UTILITY_INTERFACE_HELPERS_IOS_H_
+#ifndef WEBRTC_MODULES_UTILITY_INCLUDE_HELPERS_IOS_H_
+#define WEBRTC_MODULES_UTILITY_INCLUDE_HELPERS_IOS_H_
#if defined(WEBRTC_IOS)
@@ -56,4 +56,4 @@ std::string GetDeviceName();
#endif // defined(WEBRTC_IOS)
-#endif // WEBRTC_MODULES_UTILITY_INTERFACE_HELPERS_IOS_H_
+#endif // WEBRTC_MODULES_UTILITY_INCLUDE_HELPERS_IOS_H_
diff --git a/webrtc/modules/utility/interface/jvm_android.h b/webrtc/modules/utility/include/jvm_android.h
index 0744fdbf12..f527dff632 100644
--- a/webrtc/modules/utility/interface/jvm_android.h
+++ b/webrtc/modules/utility/include/jvm_android.h
@@ -8,15 +8,15 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#ifndef WEBRTC_MODULES_UTILITY_SOURCE_JVM_H_
-#define WEBRTC_MODULES_UTILITY_SOURCE_JVM_H_
+#ifndef WEBRTC_MODULES_UTILITY_INCLUDE_JVM_ANDROID_H_
+#define WEBRTC_MODULES_UTILITY_INCLUDE_JVM_ANDROID_H_
#include <jni.h>
#include <string>
#include "webrtc/base/scoped_ptr.h"
#include "webrtc/base/thread_checker.h"
-#include "webrtc/modules/utility/interface/helpers_android.h"
+#include "webrtc/modules/utility/include/helpers_android.h"
namespace webrtc {
@@ -182,4 +182,4 @@ class JVM {
} // namespace webrtc
-#endif // WEBRTC_MODULES_UTILITY_SOURCE_JVM_H_
+#endif // WEBRTC_MODULES_UTILITY_INCLUDE_JVM_ANDROID_H_
diff --git a/webrtc/modules/utility/interface/mock/mock_process_thread.h b/webrtc/modules/utility/include/mock/mock_process_thread.h
index fd108a8354..56d92f4527 100644
--- a/webrtc/modules/utility/interface/mock/mock_process_thread.h
+++ b/webrtc/modules/utility/include/mock/mock_process_thread.h
@@ -8,10 +8,10 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#ifndef WEBRTC_MODULES_UTILITY_INTERFACE_MOCK_PROCESS_THREAD_H_
-#define WEBRTC_MODULES_UTILITY_INTERFACE_MOCK_PROCESS_THREAD_H_
+#ifndef WEBRTC_MODULES_UTILITY_INCLUDE_MOCK_MOCK_PROCESS_THREAD_H_
+#define WEBRTC_MODULES_UTILITY_INCLUDE_MOCK_MOCK_PROCESS_THREAD_H_
-#include "webrtc/modules/utility/interface/process_thread.h"
+#include "webrtc/modules/utility/include/process_thread.h"
#include "testing/gmock/include/gmock/gmock.h"
@@ -35,4 +35,4 @@ class MockProcessThread : public ProcessThread {
};
} // namespace webrtc
-#endif // WEBRTC_MODULES_UTILITY_INTERFACE_MOCK_PROCESS_THREAD_H_
+#endif // WEBRTC_MODULES_UTILITY_INCLUDE_MOCK_MOCK_PROCESS_THREAD_H_
diff --git a/webrtc/modules/utility/interface/process_thread.h b/webrtc/modules/utility/include/process_thread.h
index 451a5a301b..285a5ea587 100644
--- a/webrtc/modules/utility/interface/process_thread.h
+++ b/webrtc/modules/utility/include/process_thread.h
@@ -8,8 +8,8 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#ifndef WEBRTC_MODULES_UTILITY_INTERFACE_PROCESS_THREAD_H_
-#define WEBRTC_MODULES_UTILITY_INTERFACE_PROCESS_THREAD_H_
+#ifndef WEBRTC_MODULES_UTILITY_INCLUDE_PROCESS_THREAD_H_
+#define WEBRTC_MODULES_UTILITY_INCLUDE_PROCESS_THREAD_H_
#include "webrtc/typedefs.h"
#include "webrtc/base/scoped_ptr.h"
@@ -63,4 +63,4 @@ class ProcessThread {
} // namespace webrtc
-#endif // WEBRTC_MODULES_UTILITY_INTERFACE_PROCESS_THREAD_H_
+#endif // WEBRTC_MODULES_UTILITY_INCLUDE_PROCESS_THREAD_H_
diff --git a/webrtc/modules/utility/source/audio_frame_operations.cc b/webrtc/modules/utility/source/audio_frame_operations.cc
index c07ca1fdf6..fe09d7972f 100644
--- a/webrtc/modules/utility/source/audio_frame_operations.cc
+++ b/webrtc/modules/utility/source/audio_frame_operations.cc
@@ -8,8 +8,8 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#include "webrtc/modules/interface/module_common_types.h"
-#include "webrtc/modules/utility/interface/audio_frame_operations.h"
+#include "webrtc/modules/include/module_common_types.h"
+#include "webrtc/modules/utility/include/audio_frame_operations.h"
namespace webrtc {
diff --git a/webrtc/modules/utility/source/audio_frame_operations_unittest.cc b/webrtc/modules/utility/source/audio_frame_operations_unittest.cc
index c278cdddcd..fff8f4407b 100644
--- a/webrtc/modules/utility/source/audio_frame_operations_unittest.cc
+++ b/webrtc/modules/utility/source/audio_frame_operations_unittest.cc
@@ -10,8 +10,8 @@
#include "testing/gtest/include/gtest/gtest.h"
-#include "webrtc/modules/interface/module_common_types.h"
-#include "webrtc/modules/utility/interface/audio_frame_operations.h"
+#include "webrtc/modules/include/module_common_types.h"
+#include "webrtc/modules/utility/include/audio_frame_operations.h"
namespace webrtc {
namespace {
diff --git a/webrtc/modules/utility/source/coder.cc b/webrtc/modules/utility/source/coder.cc
index 4ec5f9b4e2..18b690dc67 100644
--- a/webrtc/modules/utility/source/coder.cc
+++ b/webrtc/modules/utility/source/coder.cc
@@ -9,7 +9,7 @@
*/
#include "webrtc/common_types.h"
-#include "webrtc/modules/interface/module_common_types.h"
+#include "webrtc/modules/include/module_common_types.h"
#include "webrtc/modules/utility/source/coder.h"
namespace webrtc {
diff --git a/webrtc/modules/utility/source/coder.h b/webrtc/modules/utility/source/coder.h
index 4270e9b380..abfa87efe1 100644
--- a/webrtc/modules/utility/source/coder.h
+++ b/webrtc/modules/utility/source/coder.h
@@ -13,7 +13,7 @@
#include "webrtc/base/scoped_ptr.h"
#include "webrtc/common_types.h"
-#include "webrtc/modules/audio_coding/main/include/audio_coding_module.h"
+#include "webrtc/modules/audio_coding/include/audio_coding_module.h"
#include "webrtc/typedefs.h"
namespace webrtc {
diff --git a/webrtc/modules/utility/source/file_player_impl.h b/webrtc/modules/utility/source/file_player_impl.h
index f411db9151..beb6379ff0 100644
--- a/webrtc/modules/utility/source/file_player_impl.h
+++ b/webrtc/modules/utility/source/file_player_impl.h
@@ -14,9 +14,9 @@
#include "webrtc/common_audio/resampler/include/resampler.h"
#include "webrtc/common_types.h"
#include "webrtc/engine_configurations.h"
-#include "webrtc/modules/media_file/interface/media_file.h"
-#include "webrtc/modules/media_file/interface/media_file_defines.h"
-#include "webrtc/modules/utility/interface/file_player.h"
+#include "webrtc/modules/media_file/media_file.h"
+#include "webrtc/modules/media_file/media_file_defines.h"
+#include "webrtc/modules/utility/include/file_player.h"
#include "webrtc/modules/utility/source/coder.h"
#include "webrtc/system_wrappers/include/critical_section_wrapper.h"
#include "webrtc/system_wrappers/include/tick_util.h"
diff --git a/webrtc/modules/utility/source/file_player_unittests.cc b/webrtc/modules/utility/source/file_player_unittests.cc
index 4b65acdeef..58471e5e8d 100644
--- a/webrtc/modules/utility/source/file_player_unittests.cc
+++ b/webrtc/modules/utility/source/file_player_unittests.cc
@@ -10,7 +10,7 @@
// Unit tests for FilePlayer.
-#include "webrtc/modules/utility/interface/file_player.h"
+#include "webrtc/modules/utility/include/file_player.h"
#include <stdio.h>
#include <string>
@@ -20,7 +20,6 @@
#include "webrtc/base/md5digest.h"
#include "webrtc/base/stringencode.h"
#include "webrtc/test/testsupport/fileutils.h"
-#include "webrtc/test/testsupport/gtest_disable.h"
DEFINE_bool(file_player_output, false, "Generate reference files.");
@@ -82,7 +81,12 @@ class FilePlayerTest : public ::testing::Test {
FILE* output_file_;
};
-TEST_F(FilePlayerTest, DISABLED_ON_IOS(PlayWavPcmuFile)) {
+#if defined(WEBRTC_IOS)
+#define MAYBE_PlayWavPcmuFile DISABLED_PlayWavPcmuFile
+#else
+#define MAYBE_PlayWavPcmuFile PlayWavPcmuFile
+#endif
+TEST_F(FilePlayerTest, MAYBE_PlayWavPcmuFile) {
const std::string kFileName =
test::ResourcePath("utility/encapsulated_pcmu_8khz", "wav");
// The file is longer than this, but keeping the output shorter limits the
@@ -93,7 +97,12 @@ TEST_F(FilePlayerTest, DISABLED_ON_IOS(PlayWavPcmuFile)) {
PlayFileAndCheck(kFileName, kRefChecksum, kOutputLengthMs);
}
-TEST_F(FilePlayerTest, DISABLED_ON_IOS(PlayWavPcm16File)) {
+#if defined(WEBRTC_IOS)
+#define MAYBE_PlayWavPcm16File DISABLED_PlayWavPcm16File
+#else
+#define MAYBE_PlayWavPcm16File PlayWavPcm16File
+#endif
+TEST_F(FilePlayerTest, MAYBE_PlayWavPcm16File) {
const std::string kFileName =
test::ResourcePath("utility/encapsulated_pcm16b_8khz", "wav");
// The file is longer than this, but keeping the output shorter limits the
diff --git a/webrtc/modules/utility/source/file_recorder_impl.cc b/webrtc/modules/utility/source/file_recorder_impl.cc
index 13926deb4a..88b20eeac2 100644
--- a/webrtc/modules/utility/source/file_recorder_impl.cc
+++ b/webrtc/modules/utility/source/file_recorder_impl.cc
@@ -10,7 +10,7 @@
#include "webrtc/common_video/libyuv/include/webrtc_libyuv.h"
#include "webrtc/engine_configurations.h"
-#include "webrtc/modules/media_file/interface/media_file.h"
+#include "webrtc/modules/media_file/media_file.h"
#include "webrtc/modules/utility/source/file_recorder_impl.h"
#include "webrtc/system_wrappers/include/logging.h"
diff --git a/webrtc/modules/utility/source/file_recorder_impl.h b/webrtc/modules/utility/source/file_recorder_impl.h
index 8ea96bdad4..697d759375 100644
--- a/webrtc/modules/utility/source/file_recorder_impl.h
+++ b/webrtc/modules/utility/source/file_recorder_impl.h
@@ -17,16 +17,16 @@
#include <list>
+#include "webrtc/base/platform_thread.h"
#include "webrtc/common_audio/resampler/include/resampler.h"
#include "webrtc/common_types.h"
#include "webrtc/engine_configurations.h"
-#include "webrtc/modules/interface/module_common_types.h"
-#include "webrtc/modules/media_file/interface/media_file.h"
-#include "webrtc/modules/media_file/interface/media_file_defines.h"
-#include "webrtc/modules/utility/interface/file_recorder.h"
+#include "webrtc/modules/include/module_common_types.h"
+#include "webrtc/modules/media_file/media_file.h"
+#include "webrtc/modules/media_file/media_file_defines.h"
+#include "webrtc/modules/utility/include/file_recorder.h"
#include "webrtc/modules/utility/source/coder.h"
#include "webrtc/system_wrappers/include/event_wrapper.h"
-#include "webrtc/system_wrappers/include/thread_wrapper.h"
#include "webrtc/system_wrappers/include/tick_util.h"
#include "webrtc/typedefs.h"
diff --git a/webrtc/modules/utility/source/helpers_android.cc b/webrtc/modules/utility/source/helpers_android.cc
index 25652f237e..aea35f8d5a 100644
--- a/webrtc/modules/utility/source/helpers_android.cc
+++ b/webrtc/modules/utility/source/helpers_android.cc
@@ -9,7 +9,7 @@
*/
#include "webrtc/base/checks.h"
-#include "webrtc/modules/utility/interface/helpers_android.h"
+#include "webrtc/modules/utility/include/helpers_android.h"
#include <android/log.h>
#include <assert.h>
diff --git a/webrtc/modules/utility/source/helpers_ios.mm b/webrtc/modules/utility/source/helpers_ios.mm
index 90b7c8f605..2d0ac098c1 100644
--- a/webrtc/modules/utility/source/helpers_ios.mm
+++ b/webrtc/modules/utility/source/helpers_ios.mm
@@ -18,7 +18,7 @@
#include "webrtc/base/checks.h"
#include "webrtc/base/logging.h"
#include "webrtc/base/scoped_ptr.h"
-#include "webrtc/modules/utility/interface/helpers_ios.h"
+#include "webrtc/modules/utility/include/helpers_ios.h"
namespace webrtc {
namespace ios {
diff --git a/webrtc/modules/utility/source/jvm_android.cc b/webrtc/modules/utility/source/jvm_android.cc
index 648c1685ea..eb37fda040 100644
--- a/webrtc/modules/utility/source/jvm_android.cc
+++ b/webrtc/modules/utility/source/jvm_android.cc
@@ -10,7 +10,7 @@
#include <android/log.h>
-#include "webrtc/modules/utility/interface/jvm_android.h"
+#include "webrtc/modules/utility/include/jvm_android.h"
#include "webrtc/base/checks.h"
diff --git a/webrtc/modules/utility/source/process_thread_impl.cc b/webrtc/modules/utility/source/process_thread_impl.cc
index 04fa88739f..8cdf01634c 100644
--- a/webrtc/modules/utility/source/process_thread_impl.cc
+++ b/webrtc/modules/utility/source/process_thread_impl.cc
@@ -11,7 +11,7 @@
#include "webrtc/modules/utility/source/process_thread_impl.h"
#include "webrtc/base/checks.h"
-#include "webrtc/modules/interface/module.h"
+#include "webrtc/modules/include/module.h"
#include "webrtc/system_wrappers/include/logging.h"
#include "webrtc/system_wrappers/include/tick_util.h"
@@ -38,8 +38,7 @@ ProcessThread::~ProcessThread() {}
// static
rtc::scoped_ptr<ProcessThread> ProcessThread::Create(
const char* thread_name) {
- return rtc::scoped_ptr<ProcessThread>(new ProcessThreadImpl(thread_name))
- .Pass();
+ return rtc::scoped_ptr<ProcessThread>(new ProcessThreadImpl(thread_name));
}
ProcessThreadImpl::ProcessThreadImpl(const char* thread_name)
@@ -76,9 +75,9 @@ void ProcessThreadImpl::Start() {
m.module->ProcessThreadAttached(this);
}
- thread_ = ThreadWrapper::CreateThread(&ProcessThreadImpl::Run, this,
- thread_name_);
- RTC_CHECK(thread_->Start());
+ thread_.reset(
+ new rtc::PlatformThread(&ProcessThreadImpl::Run, this, thread_name_));
+ thread_->Start();
}
void ProcessThreadImpl::Stop() {
@@ -93,7 +92,7 @@ void ProcessThreadImpl::Stop() {
wake_up_->Set();
- RTC_CHECK(thread_->Stop());
+ thread_->Stop();
stop_ = false;
// TODO(tommi): Since DeRegisterModule is currently being called from
diff --git a/webrtc/modules/utility/source/process_thread_impl.h b/webrtc/modules/utility/source/process_thread_impl.h
index 4e5861b41e..1c0a0cdfdd 100644
--- a/webrtc/modules/utility/source/process_thread_impl.h
+++ b/webrtc/modules/utility/source/process_thread_impl.h
@@ -15,10 +15,10 @@
#include <queue>
#include "webrtc/base/criticalsection.h"
+#include "webrtc/base/platform_thread.h"
#include "webrtc/base/thread_checker.h"
-#include "webrtc/modules/utility/interface/process_thread.h"
+#include "webrtc/modules/utility/include/process_thread.h"
#include "webrtc/system_wrappers/include/event_wrapper.h"
-#include "webrtc/system_wrappers/include/thread_wrapper.h"
#include "webrtc/typedefs.h"
namespace webrtc {
@@ -70,7 +70,8 @@ class ProcessThreadImpl : public ProcessThread {
rtc::ThreadChecker thread_checker_;
const rtc::scoped_ptr<EventWrapper> wake_up_;
- rtc::scoped_ptr<ThreadWrapper> thread_;
+ // TODO(pbos): Remove scoped_ptr and stop recreating the thread.
+ rtc::scoped_ptr<rtc::PlatformThread> thread_;
ModuleList modules_;
// TODO(tommi): Support delayed tasks.
diff --git a/webrtc/modules/utility/source/process_thread_impl_unittest.cc b/webrtc/modules/utility/source/process_thread_impl_unittest.cc
index e080545312..0b35fad7d2 100644
--- a/webrtc/modules/utility/source/process_thread_impl_unittest.cc
+++ b/webrtc/modules/utility/source/process_thread_impl_unittest.cc
@@ -8,9 +8,11 @@
* be found in the AUTHORS file in the root of the source tree.
*/
+#include <utility>
+
#include "testing/gmock/include/gmock/gmock.h"
#include "testing/gtest/include/gtest/gtest.h"
-#include "webrtc/modules/interface/module.h"
+#include "webrtc/modules/include/module.h"
#include "webrtc/modules/utility/source/process_thread_impl.h"
#include "webrtc/system_wrappers/include/tick_util.h"
@@ -251,8 +253,9 @@ TEST(ProcessThreadImpl, WakeUp) {
rtc::scoped_ptr<EventWrapper> called(EventWrapper::Create());
MockModule module;
- int64_t start_time = 0;
- int64_t called_time = 0;
+ int64_t start_time;
+ int64_t called_time;
+
// Ask for a callback after 1000ms.
// TimeUntilNextProcess will be called twice.
// The first time we use it to get the thread into a waiting state.
@@ -281,8 +284,6 @@ TEST(ProcessThreadImpl, WakeUp) {
EXPECT_CALL(module, ProcessThreadAttached(nullptr)).Times(1);
thread.Stop();
- ASSERT_GT(start_time, 0);
- ASSERT_GT(called_time, 0);
EXPECT_GE(called_time, start_time);
uint32_t diff = called_time - start_time;
// We should have been called back much quicker than 1sec.
@@ -296,7 +297,7 @@ TEST(ProcessThreadImpl, PostTask) {
rtc::scoped_ptr<EventWrapper> task_ran(EventWrapper::Create());
rtc::scoped_ptr<RaiseEventTask> task(new RaiseEventTask(task_ran.get()));
thread.Start();
- thread.PostTask(task.Pass());
+ thread.PostTask(std::move(task));
EXPECT_EQ(kEventSignaled, task_ran->Wait(100));
thread.Stop();
}
diff --git a/webrtc/modules/utility/utility.gypi b/webrtc/modules/utility/utility.gypi
index 38c9e3ebd9..e5b0a4d9c0 100644
--- a/webrtc/modules/utility/utility.gypi
+++ b/webrtc/modules/utility/utility.gypi
@@ -18,13 +18,13 @@
'<(webrtc_root)/system_wrappers/system_wrappers.gyp:system_wrappers',
],
'sources': [
- 'interface/audio_frame_operations.h',
- 'interface/file_player.h',
- 'interface/file_recorder.h',
- 'interface/helpers_android.h',
- 'interface/helpers_ios.h',
- 'interface/jvm_android.h',
- 'interface/process_thread.h',
+ 'include/audio_frame_operations.h',
+ 'include/file_player.h',
+ 'include/file_recorder.h',
+ 'include/helpers_android.h',
+ 'include/helpers_ios.h',
+ 'include/jvm_android.h',
+ 'include/process_thread.h',
'source/audio_frame_operations.cc',
'source/coder.cc',
'source/coder.h',
diff --git a/webrtc/modules/video_capture/BUILD.gn b/webrtc/modules/video_capture/BUILD.gn
index b0ed6f4e6c..78f5212950 100644
--- a/webrtc/modules/video_capture/BUILD.gn
+++ b/webrtc/modules/video_capture/BUILD.gn
@@ -16,11 +16,11 @@ source_set("video_capture_module") {
sources = [
"device_info_impl.cc",
"device_info_impl.h",
- "include/video_capture.h",
- "include/video_capture_defines.h",
- "include/video_capture_factory.h",
+ "video_capture.h",
"video_capture_config.h",
+ "video_capture_defines.h",
"video_capture_delay.h",
+ "video_capture_factory.h",
"video_capture_factory.cc",
"video_capture_impl.cc",
"video_capture_impl.h",
diff --git a/webrtc/modules/video_capture/device_info_impl.h b/webrtc/modules/video_capture/device_info_impl.h
index 420808bcee..44e7dd596b 100644
--- a/webrtc/modules/video_capture/device_info_impl.h
+++ b/webrtc/modules/video_capture/device_info_impl.h
@@ -13,7 +13,7 @@
#include <vector>
-#include "webrtc/modules/video_capture/include/video_capture.h"
+#include "webrtc/modules/video_capture/video_capture.h"
#include "webrtc/modules/video_capture/video_capture_delay.h"
#include "webrtc/system_wrappers/include/rw_lock_wrapper.h"
diff --git a/webrtc/modules/video_capture/ios/device_info_ios_objc.h b/webrtc/modules/video_capture/ios/device_info_ios_objc.h
index 65444bedcb..d67b559972 100644
--- a/webrtc/modules/video_capture/ios/device_info_ios_objc.h
+++ b/webrtc/modules/video_capture/ios/device_info_ios_objc.h
@@ -13,7 +13,7 @@
#import <AVFoundation/AVFoundation.h>
-#include "webrtc/modules/video_capture/include/video_capture_defines.h"
+#include "webrtc/modules/video_capture/video_capture_defines.h"
@interface DeviceInfoIosObjC : NSObject
+ (int)captureDeviceCount;
diff --git a/webrtc/modules/video_capture/linux/video_capture_linux.cc b/webrtc/modules/video_capture/linux/video_capture_linux.cc
index fe99c7136f..401a69d8c1 100644
--- a/webrtc/modules/video_capture/linux/video_capture_linux.cc
+++ b/webrtc/modules/video_capture/linux/video_capture_linux.cc
@@ -280,10 +280,10 @@ int32_t VideoCaptureModuleV4L2::StartCapture(
//start capture thread;
if (!_captureThread)
{
- _captureThread = ThreadWrapper::CreateThread(
- VideoCaptureModuleV4L2::CaptureThread, this, "CaptureThread");
+ _captureThread.reset(new rtc::PlatformThread(
+ VideoCaptureModuleV4L2::CaptureThread, this, "CaptureThread"));
_captureThread->Start();
- _captureThread->SetPriority(kHighPriority);
+ _captureThread->SetPriority(rtc::kHighPriority);
}
// Needed to start UVC camera - from the uvcview application
diff --git a/webrtc/modules/video_capture/linux/video_capture_linux.h b/webrtc/modules/video_capture/linux/video_capture_linux.h
index 996f8e10ca..8172eb8d2a 100644
--- a/webrtc/modules/video_capture/linux/video_capture_linux.h
+++ b/webrtc/modules/video_capture/linux/video_capture_linux.h
@@ -11,9 +11,9 @@
#ifndef WEBRTC_MODULES_VIDEO_CAPTURE_MAIN_SOURCE_LINUX_VIDEO_CAPTURE_LINUX_H_
#define WEBRTC_MODULES_VIDEO_CAPTURE_MAIN_SOURCE_LINUX_VIDEO_CAPTURE_LINUX_H_
+#include "webrtc/base/platform_thread.h"
#include "webrtc/common_types.h"
#include "webrtc/modules/video_capture/video_capture_impl.h"
-#include "webrtc/system_wrappers/include/thread_wrapper.h"
namespace webrtc
{
@@ -39,7 +39,8 @@ private:
bool AllocateVideoBuffers();
bool DeAllocateVideoBuffers();
- rtc::scoped_ptr<ThreadWrapper> _captureThread;
+ // TODO(pbos): Stop using scoped_ptr and resetting the thread.
+ rtc::scoped_ptr<rtc::PlatformThread> _captureThread;
CriticalSectionWrapper* _captureCritSect;
int32_t _deviceId;
diff --git a/webrtc/modules/video_capture/mac/qtkit/video_capture_qtkit_info.mm b/webrtc/modules/video_capture/mac/qtkit/video_capture_qtkit_info.mm
index 1251ecd830..0c0c6a1261 100644
--- a/webrtc/modules/video_capture/mac/qtkit/video_capture_qtkit_info.mm
+++ b/webrtc/modules/video_capture/mac/qtkit/video_capture_qtkit_info.mm
@@ -9,7 +9,7 @@
*/
#import "webrtc/modules/video_capture/mac/qtkit/video_capture_qtkit_info_objc.h"
-#include "webrtc/modules/video_capture/include/video_capture.h"
+#include "webrtc/modules/video_capture/video_capture.h"
#include "webrtc/modules/video_capture/video_capture_config.h"
#include "webrtc/system_wrappers/include/trace.h"
diff --git a/webrtc/modules/video_capture/test/video_capture_unittest.cc b/webrtc/modules/video_capture/test/video_capture_unittest.cc
index 2b8786b0fe..45d2d2f241 100644
--- a/webrtc/modules/video_capture/test/video_capture_unittest.cc
+++ b/webrtc/modules/video_capture/test/video_capture_unittest.cc
@@ -17,13 +17,12 @@
#include "webrtc/base/scoped_ptr.h"
#include "webrtc/base/scoped_ref_ptr.h"
#include "webrtc/common_video/libyuv/include/webrtc_libyuv.h"
-#include "webrtc/modules/utility/interface/process_thread.h"
-#include "webrtc/modules/video_capture/include/video_capture.h"
-#include "webrtc/modules/video_capture/include/video_capture_factory.h"
+#include "webrtc/modules/utility/include/process_thread.h"
+#include "webrtc/modules/video_capture/video_capture.h"
+#include "webrtc/modules/video_capture/video_capture_factory.h"
#include "webrtc/system_wrappers/include/critical_section_wrapper.h"
#include "webrtc/system_wrappers/include/sleep.h"
#include "webrtc/system_wrappers/include/tick_util.h"
-#include "webrtc/test/testsupport/gtest_disable.h"
#include "webrtc/video_frame.h"
using rtc::scoped_ptr;
@@ -47,14 +46,14 @@ using webrtc::VideoCaptureModule;
SleepMs(5); \
res = (ex); \
} \
- } while (0);\
+ } while (0)
#define EXPECT_TRUE_WAIT(ex, timeout) \
do { \
bool res; \
WAIT_(ex, timeout, res); \
if (!res) EXPECT_TRUE(ex); \
- } while (0);
+ } while (0)
static const int kTimeOut = 5000;
@@ -275,7 +274,14 @@ class VideoCaptureTest : public testing::Test {
unsigned int number_of_devices_;
};
-TEST_F(VideoCaptureTest, CreateDelete) {
+#ifdef WEBRTC_MAC
+// Currently fails on Mac 64-bit, see
+// https://bugs.chromium.org/p/webrtc/issues/detail?id=5406
+#define MAYBE_CreateDelete DISABLED_CreateDelete
+#else
+#define MAYBE_CreateDelete CreateDelete
+#endif
+TEST_F(VideoCaptureTest, MAYBE_CreateDelete) {
for (int i = 0; i < 5; ++i) {
int64_t start_time = TickTime::MillisecondTimestamp();
TestVideoCaptureCallback capture_observer;
@@ -312,7 +318,14 @@ TEST_F(VideoCaptureTest, CreateDelete) {
}
}
-TEST_F(VideoCaptureTest, Capabilities) {
+#ifdef WEBRTC_MAC
+// Currently fails on Mac 64-bit, see
+// https://bugs.chromium.org/p/webrtc/issues/detail?id=5406
+#define MAYBE_Capabilities DISABLED_Capabilities
+#else
+#define MAYBE_Capabilities Capabilities
+#endif
+TEST_F(VideoCaptureTest, MAYBE_Capabilities) {
#ifdef WEBRTC_MAC
printf("Video capture capabilities are not supported on Mac.\n");
return;
@@ -479,7 +492,12 @@ TEST_F(VideoCaptureExternalTest, TestExternalCapture) {
// Test frame rate and no picture alarm.
// Flaky on Win32, see webrtc:3270.
-TEST_F(VideoCaptureExternalTest, DISABLED_ON_WIN(FrameRate)) {
+#if defined(WEBRTC_WIN)
+#define MAYBE_FrameRate DISABLED_FrameRate
+#else
+#define MAYBE_FrameRate FrameRate
+#endif
+TEST_F(VideoCaptureExternalTest, MAYBE_FrameRate) {
int64_t testTime = 3;
TickTime startTime = TickTime::Now();
diff --git a/webrtc/modules/video_capture/video_capture.gypi b/webrtc/modules/video_capture/video_capture.gypi
index f552df7758..c80f2bf5b5 100644
--- a/webrtc/modules/video_capture/video_capture.gypi
+++ b/webrtc/modules/video_capture/video_capture.gypi
@@ -23,11 +23,11 @@
'sources': [
'device_info_impl.cc',
'device_info_impl.h',
- 'include/video_capture.h',
- 'include/video_capture_defines.h',
- 'include/video_capture_factory.h',
+ 'video_capture.h',
'video_capture_config.h',
+ 'video_capture_defines.h',
'video_capture_delay.h',
+ 'video_capture_factory.h',
'video_capture_factory.cc',
'video_capture_impl.cc',
'video_capture_impl.h',
@@ -116,6 +116,23 @@
],
},
}], # win
+ ['OS=="win" and clang==1', {
+ 'msvs_settings': {
+ 'VCCLCompilerTool': {
+ 'AdditionalOptions': [
+ # Disable warnings failing when compiling with Clang on Windows.
+ # https://bugs.chromium.org/p/webrtc/issues/detail?id=5366
+ '-Wno-comment',
+ '-Wno-ignored-attributes',
+ '-Wno-microsoft-extra-qualification',
+ '-Wno-missing-braces',
+ '-Wno-overloaded-virtual',
+ '-Wno-reorder',
+ '-Wno-writable-strings',
+ ],
+ },
+ },
+ }],
['OS=="ios"', {
'sources': [
'ios/device_info_ios.h',
diff --git a/webrtc/modules/video_capture/include/video_capture.h b/webrtc/modules/video_capture/video_capture.h
index 09b4502115..08d02211c7 100644
--- a/webrtc/modules/video_capture/include/video_capture.h
+++ b/webrtc/modules/video_capture/video_capture.h
@@ -8,12 +8,12 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#ifndef WEBRTC_MODULES_VIDEO_CAPTURE_INCLUDE_VIDEO_CAPTURE_H_
-#define WEBRTC_MODULES_VIDEO_CAPTURE_INCLUDE_VIDEO_CAPTURE_H_
+#ifndef WEBRTC_MODULES_VIDEO_CAPTURE_VIDEO_CAPTURE_H_
+#define WEBRTC_MODULES_VIDEO_CAPTURE_VIDEO_CAPTURE_H_
#include "webrtc/common_video/rotation.h"
-#include "webrtc/modules/interface/module.h"
-#include "webrtc/modules/video_capture/include/video_capture_defines.h"
+#include "webrtc/modules/include/module.h"
+#include "webrtc/modules/video_capture/video_capture_defines.h"
namespace webrtc {
@@ -157,4 +157,4 @@ protected:
};
} // namespace webrtc
-#endif // WEBRTC_MODULES_VIDEO_CAPTURE_INCLUDE_VIDEO_CAPTURE_H_
+#endif // WEBRTC_MODULES_VIDEO_CAPTURE_VIDEO_CAPTURE_H_
diff --git a/webrtc/modules/video_capture/include/video_capture_defines.h b/webrtc/modules/video_capture/video_capture_defines.h
index 1dee4fa814..ef97ecab9d 100644
--- a/webrtc/modules/video_capture/include/video_capture_defines.h
+++ b/webrtc/modules/video_capture/video_capture_defines.h
@@ -8,10 +8,10 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#ifndef WEBRTC_MODULES_VIDEO_CAPTURE_INCLUDE_VIDEO_CAPTURE_DEFINES_H_
-#define WEBRTC_MODULES_VIDEO_CAPTURE_INCLUDE_VIDEO_CAPTURE_DEFINES_H_
+#ifndef WEBRTC_MODULES_VIDEO_CAPTURE_VIDEO_CAPTURE_DEFINES_H_
+#define WEBRTC_MODULES_VIDEO_CAPTURE_VIDEO_CAPTURE_DEFINES_H_
-#include "webrtc/modules/interface/module_common_types.h"
+#include "webrtc/modules/include/module_common_types.h"
#include "webrtc/typedefs.h"
#include "webrtc/video_frame.h"
@@ -115,4 +115,4 @@ protected:
} // namespace webrtc
-#endif // WEBRTC_MODULES_VIDEO_CAPTURE_INCLUDE_VIDEO_CAPTURE_DEFINES_H_
+#endif // WEBRTC_MODULES_VIDEO_CAPTURE_VIDEO_CAPTURE_DEFINES_H_
diff --git a/webrtc/modules/video_capture/video_capture_factory.cc b/webrtc/modules/video_capture/video_capture_factory.cc
index f88f916ba4..618c08bac6 100644
--- a/webrtc/modules/video_capture/video_capture_factory.cc
+++ b/webrtc/modules/video_capture/video_capture_factory.cc
@@ -8,7 +8,7 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#include "webrtc/modules/video_capture/include/video_capture_factory.h"
+#include "webrtc/modules/video_capture/video_capture_factory.h"
#include "webrtc/modules/video_capture/video_capture_impl.h"
diff --git a/webrtc/modules/video_capture/include/video_capture_factory.h b/webrtc/modules/video_capture/video_capture_factory.h
index f78437d1a0..4765be1fde 100644
--- a/webrtc/modules/video_capture/include/video_capture_factory.h
+++ b/webrtc/modules/video_capture/video_capture_factory.h
@@ -11,10 +11,10 @@
// This file contains interfaces used for creating the VideoCaptureModule
// and DeviceInfo.
-#ifndef WEBRTC_MODULES_VIDEO_CAPTURE_INCLUDE_VIDEO_CAPTURE_FACTORY_H_
-#define WEBRTC_MODULES_VIDEO_CAPTURE_INCLUDE_VIDEO_CAPTURE_FACTORY_H_
+#ifndef WEBRTC_MODULES_VIDEO_CAPTURE_VIDEO_CAPTURE_FACTORY_H_
+#define WEBRTC_MODULES_VIDEO_CAPTURE_VIDEO_CAPTURE_FACTORY_H_
-#include "webrtc/modules/video_capture/include/video_capture.h"
+#include "webrtc/modules/video_capture/video_capture.h"
namespace webrtc {
@@ -42,4 +42,4 @@ class VideoCaptureFactory {
} // namespace webrtc
-#endif // WEBRTC_MODULES_VIDEO_CAPTURE_INCLUDE_VIDEO_CAPTURE_FACTORY_H_
+#endif // WEBRTC_MODULES_VIDEO_CAPTURE_VIDEO_CAPTURE_FACTORY_H_
diff --git a/webrtc/modules/video_capture/video_capture_impl.cc b/webrtc/modules/video_capture/video_capture_impl.cc
index 4046181505..90730cd984 100644
--- a/webrtc/modules/video_capture/video_capture_impl.cc
+++ b/webrtc/modules/video_capture/video_capture_impl.cc
@@ -14,7 +14,7 @@
#include "webrtc/base/trace_event.h"
#include "webrtc/common_video/libyuv/include/webrtc_libyuv.h"
-#include "webrtc/modules/interface/module_common_types.h"
+#include "webrtc/modules/include/module_common_types.h"
#include "webrtc/modules/video_capture/video_capture_config.h"
#include "webrtc/system_wrappers/include/clock.h"
#include "webrtc/system_wrappers/include/critical_section_wrapper.h"
diff --git a/webrtc/modules/video_capture/video_capture_impl.h b/webrtc/modules/video_capture/video_capture_impl.h
index deb989c251..164421776c 100644
--- a/webrtc/modules/video_capture/video_capture_impl.h
+++ b/webrtc/modules/video_capture/video_capture_impl.h
@@ -17,7 +17,7 @@
#include "webrtc/common_video/libyuv/include/webrtc_libyuv.h"
#include "webrtc/common_video/rotation.h"
-#include "webrtc/modules/video_capture/include/video_capture.h"
+#include "webrtc/modules/video_capture/video_capture.h"
#include "webrtc/modules/video_capture/video_capture_config.h"
#include "webrtc/system_wrappers/include/tick_util.h"
#include "webrtc/video_frame.h"
diff --git a/webrtc/modules/video_capture/windows/sink_filter_ds.h b/webrtc/modules/video_capture/windows/sink_filter_ds.h
index 064cd9d7d3..6be74f69f9 100644
--- a/webrtc/modules/video_capture/windows/sink_filter_ds.h
+++ b/webrtc/modules/video_capture/windows/sink_filter_ds.h
@@ -13,7 +13,7 @@
#include <Streams.h> // Include base DS filter header files
-#include "webrtc/modules/video_capture/include/video_capture_defines.h"
+#include "webrtc/modules/video_capture/video_capture_defines.h"
namespace webrtc
{
diff --git a/webrtc/modules/video_coding/BUILD.gn b/webrtc/modules/video_coding/BUILD.gn
index 9e8cd47e53..32ac627ed2 100644
--- a/webrtc/modules/video_coding/BUILD.gn
+++ b/webrtc/modules/video_coding/BUILD.gn
@@ -10,57 +10,57 @@ import("../../build/webrtc.gni")
source_set("video_coding") {
sources = [
- "main/interface/video_coding.h",
- "main/interface/video_coding_defines.h",
- "main/source/codec_database.cc",
- "main/source/codec_database.h",
- "main/source/codec_timer.cc",
- "main/source/codec_timer.h",
- "main/source/content_metrics_processing.cc",
- "main/source/content_metrics_processing.h",
- "main/source/decoding_state.cc",
- "main/source/decoding_state.h",
- "main/source/encoded_frame.cc",
- "main/source/encoded_frame.h",
- "main/source/fec_tables_xor.h",
- "main/source/frame_buffer.cc",
- "main/source/frame_buffer.h",
- "main/source/generic_decoder.cc",
- "main/source/generic_decoder.h",
- "main/source/generic_encoder.cc",
- "main/source/generic_encoder.h",
- "main/source/inter_frame_delay.cc",
- "main/source/inter_frame_delay.h",
- "main/source/internal_defines.h",
- "main/source/jitter_buffer.cc",
- "main/source/jitter_buffer.h",
- "main/source/jitter_buffer_common.h",
- "main/source/jitter_estimator.cc",
- "main/source/jitter_estimator.h",
- "main/source/media_opt_util.cc",
- "main/source/media_opt_util.h",
- "main/source/media_optimization.cc",
- "main/source/media_optimization.h",
- "main/source/nack_fec_tables.h",
- "main/source/packet.cc",
- "main/source/packet.h",
- "main/source/qm_select.cc",
- "main/source/qm_select.h",
- "main/source/qm_select_data.h",
- "main/source/receiver.cc",
- "main/source/receiver.h",
- "main/source/rtt_filter.cc",
- "main/source/rtt_filter.h",
- "main/source/session_info.cc",
- "main/source/session_info.h",
- "main/source/timestamp_map.cc",
- "main/source/timestamp_map.h",
- "main/source/timing.cc",
- "main/source/timing.h",
- "main/source/video_coding_impl.cc",
- "main/source/video_coding_impl.h",
- "main/source/video_receiver.cc",
- "main/source/video_sender.cc",
+ "codec_database.cc",
+ "codec_database.h",
+ "codec_timer.cc",
+ "codec_timer.h",
+ "content_metrics_processing.cc",
+ "content_metrics_processing.h",
+ "decoding_state.cc",
+ "decoding_state.h",
+ "encoded_frame.cc",
+ "encoded_frame.h",
+ "fec_tables_xor.h",
+ "frame_buffer.cc",
+ "frame_buffer.h",
+ "generic_decoder.cc",
+ "generic_decoder.h",
+ "generic_encoder.cc",
+ "generic_encoder.h",
+ "include/video_coding.h",
+ "include/video_coding_defines.h",
+ "inter_frame_delay.cc",
+ "inter_frame_delay.h",
+ "internal_defines.h",
+ "jitter_buffer.cc",
+ "jitter_buffer.h",
+ "jitter_buffer_common.h",
+ "jitter_estimator.cc",
+ "jitter_estimator.h",
+ "media_opt_util.cc",
+ "media_opt_util.h",
+ "media_optimization.cc",
+ "media_optimization.h",
+ "nack_fec_tables.h",
+ "packet.cc",
+ "packet.h",
+ "qm_select.cc",
+ "qm_select.h",
+ "qm_select_data.h",
+ "receiver.cc",
+ "receiver.h",
+ "rtt_filter.cc",
+ "rtt_filter.h",
+ "session_info.cc",
+ "session_info.h",
+ "timestamp_map.cc",
+ "timestamp_map.h",
+ "timing.cc",
+ "timing.h",
+ "video_coding_impl.cc",
+ "video_coding_impl.h",
+ "video_receiver.cc",
+ "video_sender.cc",
]
configs += [ "../..:common_config" ]
@@ -94,14 +94,14 @@ source_set("video_coding") {
source_set("video_coding_utility") {
sources = [
"utility/frame_dropper.cc",
- "utility/include/frame_dropper.h",
- "utility/include/moving_average.h",
- "utility/include/qp_parser.h",
- "utility/include/quality_scaler.h",
- "utility/include/vp8_header_parser.h",
+ "utility/frame_dropper.h",
+ "utility/moving_average.h",
"utility/qp_parser.cc",
+ "utility/qp_parser.h",
"utility/quality_scaler.cc",
+ "utility/quality_scaler.h",
"utility/vp8_header_parser.cc",
+ "utility/vp8_header_parser.h",
]
configs += [ "../..:common_config" ]
@@ -136,6 +136,18 @@ source_set("webrtc_h264") {
deps = [
"../../system_wrappers",
]
+
+ if (use_third_party_h264) {
+ # Dependency added so that variables use_openh264 and ffmpeg_branding are
+ # recognized build arguments (avoid "Build argument has no effect" error).
+ # The variables and dependencies will be used for real as soon as
+ # https://codereview.webrtc.org/1306813009/ lands. In the meantime, the
+ # build arguments are to be used by waterfall/trybots.
+ deps += [
+ "//third_party/ffmpeg:ffmpeg",
+ "//third_party/openh264:encoder",
+ ]
+ }
}
# TODO(tkchin): Source set for webrtc_h264_video_toolbox. Currently not
@@ -209,19 +221,15 @@ source_set("webrtc_vp8") {
}
source_set("webrtc_vp9") {
- if (rtc_build_vp9) {
- sources = [
- "codecs/vp9/include/vp9.h",
- "codecs/vp9/vp9_frame_buffer_pool.cc",
- "codecs/vp9/vp9_frame_buffer_pool.h",
- "codecs/vp9/vp9_impl.cc",
- "codecs/vp9/vp9_impl.h",
- ]
- } else {
- sources = [
- "codecs/vp9/vp9_dummy_impl.cc",
- ]
- }
+ sources = [
+ "codecs/vp9/include/vp9.h",
+ "codecs/vp9/screenshare_layers.cc",
+ "codecs/vp9/screenshare_layers.h",
+ "codecs/vp9/vp9_frame_buffer_pool.cc",
+ "codecs/vp9/vp9_frame_buffer_pool.h",
+ "codecs/vp9/vp9_impl.cc",
+ "codecs/vp9/vp9_impl.h",
+ ]
configs += [ "../..:common_config" ]
public_configs = [ "../..:common_inherited_config" ]
diff --git a/webrtc/modules/video_coding/OWNERS b/webrtc/modules/video_coding/OWNERS
index f452c9ed83..389d632dfd 100644
--- a/webrtc/modules/video_coding/OWNERS
+++ b/webrtc/modules/video_coding/OWNERS
@@ -1,4 +1,9 @@
stefan@webrtc.org
marpan@webrtc.org
+# These are for the common case of adding or renaming files. If you're doing
+# structural changes, please get a review from a reviewer in this file.
+per-file *.gyp=*
+per-file *.gypi=*
+
per-file BUILD.gn=kjellander@webrtc.org
diff --git a/webrtc/modules/video_coding/main/source/codec_database.cc b/webrtc/modules/video_coding/codec_database.cc
index bfdc609e3c..1fae435bab 100644
--- a/webrtc/modules/video_coding/main/source/codec_database.cc
+++ b/webrtc/modules/video_coding/codec_database.cc
@@ -8,26 +8,18 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#include "webrtc/modules/video_coding/main/source/codec_database.h"
+#include "webrtc/modules/video_coding/codec_database.h"
#include <assert.h>
#include "webrtc/base/checks.h"
+#include "webrtc/base/logging.h"
#include "webrtc/engine_configurations.h"
-#ifdef VIDEOCODEC_H264
#include "webrtc/modules/video_coding/codecs/h264/include/h264.h"
-#endif
-#ifdef VIDEOCODEC_I420
#include "webrtc/modules/video_coding/codecs/i420/include/i420.h"
-#endif
-#ifdef VIDEOCODEC_VP8
#include "webrtc/modules/video_coding/codecs/vp8/include/vp8.h"
-#endif
-#ifdef VIDEOCODEC_VP9
#include "webrtc/modules/video_coding/codecs/vp9/include/vp9.h"
-#endif
-#include "webrtc/modules/video_coding/main/source/internal_defines.h"
-#include "webrtc/system_wrappers/include/logging.h"
+#include "webrtc/modules/video_coding/internal_defines.h"
namespace {
const size_t kDefaultPayloadSize = 1440;
@@ -74,9 +66,9 @@ VideoCodecH264 VideoEncoder::GetDefaultH264Settings() {
h264_settings.profile = kProfileBase;
h264_settings.frameDroppingOn = true;
h264_settings.keyFrameInterval = 3000;
- h264_settings.spsData = NULL;
+ h264_settings.spsData = nullptr;
h264_settings.spsLen = 0;
- h264_settings.ppsData = NULL;
+ h264_settings.ppsData = nullptr;
h264_settings.ppsLen = 0;
return h264_settings;
@@ -93,12 +85,9 @@ VCMDecoderMapItem::VCMDecoderMapItem(VideoCodec* settings,
VCMExtDecoderMapItem::VCMExtDecoderMapItem(
VideoDecoder* external_decoder_instance,
- uint8_t payload_type,
- bool internal_render_timing)
+ uint8_t payload_type)
: payload_type(payload_type),
- external_decoder_instance(external_decoder_instance),
- internal_render_timing(internal_render_timing) {
-}
+ external_decoder_instance(external_decoder_instance) {}
VCMCodecDataBase::VCMCodecDataBase(
VideoEncoderRateObserver* encoder_rate_observer,
@@ -110,35 +99,27 @@ VCMCodecDataBase::VCMCodecDataBase(
send_codec_(),
receive_codec_(),
encoder_payload_type_(0),
- external_encoder_(NULL),
+ external_encoder_(nullptr),
internal_source_(false),
encoder_rate_observer_(encoder_rate_observer),
encoded_frame_callback_(encoded_frame_callback),
- ptr_decoder_(NULL),
+ ptr_decoder_(nullptr),
dec_map_(),
dec_external_map_() {}
VCMCodecDataBase::~VCMCodecDataBase() {
- ResetSender();
- ResetReceiver();
-}
-
-int VCMCodecDataBase::NumberOfCodecs() {
- return VCM_NUM_VIDEO_CODECS_AVAILABLE;
+ DeleteEncoder();
+ ReleaseDecoder(ptr_decoder_);
+ for (auto& kv : dec_map_)
+ delete kv.second;
+ for (auto& kv : dec_external_map_)
+ delete kv.second;
}
-bool VCMCodecDataBase::Codec(int list_id,
- VideoCodec* settings) {
- if (!settings) {
- return false;
- }
- if (list_id >= VCM_NUM_VIDEO_CODECS_AVAILABLE) {
- return false;
- }
+void VCMCodecDataBase::Codec(VideoCodecType codec_type, VideoCodec* settings) {
memset(settings, 0, sizeof(VideoCodec));
- switch (list_id) {
-#ifdef VIDEOCODEC_VP8
- case VCM_VP8_IDX: {
+ switch (codec_type) {
+ case kVideoCodecVP8:
strncpy(settings->plName, "VP8", 4);
settings->codecType = kVideoCodecVP8;
// 96 to 127 dynamic payload types for video codecs.
@@ -152,11 +133,8 @@ bool VCMCodecDataBase::Codec(int list_id,
settings->numberOfSimulcastStreams = 0;
settings->qpMax = 56;
settings->codecSpecific.VP8 = VideoEncoder::GetDefaultVp8Settings();
- return true;
- }
-#endif
-#ifdef VIDEOCODEC_VP9
- case VCM_VP9_IDX: {
+ return;
+ case kVideoCodecVP9:
strncpy(settings->plName, "VP9", 4);
settings->codecType = kVideoCodecVP9;
// 96 to 127 dynamic payload types for video codecs.
@@ -170,11 +148,8 @@ bool VCMCodecDataBase::Codec(int list_id,
settings->numberOfSimulcastStreams = 0;
settings->qpMax = 56;
settings->codecSpecific.VP9 = VideoEncoder::GetDefaultVp9Settings();
- return true;
- }
-#endif
-#ifdef VIDEOCODEC_H264
- case VCM_H264_IDX: {
+ return;
+ case kVideoCodecH264:
strncpy(settings->plName, "H264", 5);
settings->codecType = kVideoCodecH264;
// 96 to 127 dynamic payload types for video codecs.
@@ -188,11 +163,8 @@ bool VCMCodecDataBase::Codec(int list_id,
settings->numberOfSimulcastStreams = 0;
settings->qpMax = 56;
settings->codecSpecific.H264 = VideoEncoder::GetDefaultH264Settings();
- return true;
- }
-#endif
-#ifdef VIDEOCODEC_I420
- case VCM_I420_IDX: {
+ return;
+ case kVideoCodecI420:
strncpy(settings->plName, "I420", 5);
settings->codecType = kVideoCodecI420;
// 96 to 127 dynamic payload types for video codecs.
@@ -207,32 +179,14 @@ bool VCMCodecDataBase::Codec(int list_id,
settings->height = VCM_DEFAULT_CODEC_HEIGHT;
settings->minBitrate = VCM_MIN_BITRATE;
settings->numberOfSimulcastStreams = 0;
- return true;
- }
-#endif
- default: {
- return false;
- }
- }
-}
-
-bool VCMCodecDataBase::Codec(VideoCodecType codec_type,
- VideoCodec* settings) {
- for (int i = 0; i < VCMCodecDataBase::NumberOfCodecs(); i++) {
- const bool ret = VCMCodecDataBase::Codec(i, settings);
- if (!ret) {
- return false;
- }
- if (codec_type == settings->codecType) {
- return true;
- }
+ return;
+ case kVideoCodecRED:
+ case kVideoCodecULPFEC:
+ case kVideoCodecGeneric:
+ case kVideoCodecUnknown:
+ RTC_NOTREACHED();
+ return;
}
- return false;
-}
-
-void VCMCodecDataBase::ResetSender() {
- DeleteEncoder();
- periodic_key_frames_ = false;
}
// Assuming only one registered encoder - since only one used, no need for more.
@@ -264,8 +218,9 @@ bool VCMCodecDataBase::SetSendCodec(const VideoCodec* send_codec,
if (new_send_codec.maxBitrate == 0) {
// max is one bit per pixel
new_send_codec.maxBitrate = (static_cast<int>(send_codec->height) *
- static_cast<int>(send_codec->width) *
- static_cast<int>(send_codec->maxFramerate)) / 1000;
+ static_cast<int>(send_codec->width) *
+ static_cast<int>(send_codec->maxFramerate)) /
+ 1000;
if (send_codec->startBitrate > new_send_codec.maxBitrate) {
// But if the user tries to set a higher start bit rate we will
// increase the max accordingly.
@@ -328,8 +283,8 @@ VideoCodecType VCMCodecDataBase::SendCodec() const {
return send_codec_.codecType;
}
-bool VCMCodecDataBase::DeregisterExternalEncoder(
- uint8_t payload_type, bool* was_send_codec) {
+bool VCMCodecDataBase::DeregisterExternalEncoder(uint8_t payload_type,
+ bool* was_send_codec) {
assert(was_send_codec);
*was_send_codec = false;
if (encoder_payload_type_ != payload_type) {
@@ -342,15 +297,14 @@ bool VCMCodecDataBase::DeregisterExternalEncoder(
*was_send_codec = true;
}
encoder_payload_type_ = 0;
- external_encoder_ = NULL;
+ external_encoder_ = nullptr;
internal_source_ = false;
return true;
}
-void VCMCodecDataBase::RegisterExternalEncoder(
- VideoEncoder* external_encoder,
- uint8_t payload_type,
- bool internal_source) {
+void VCMCodecDataBase::RegisterExternalEncoder(VideoEncoder* external_encoder,
+ uint8_t payload_type,
+ bool internal_source) {
// Since only one encoder can be used at a given time, only one external
// encoder can be registered/used.
external_encoder_ = external_encoder;
@@ -360,9 +314,8 @@ void VCMCodecDataBase::RegisterExternalEncoder(
}
bool VCMCodecDataBase::RequiresEncoderReset(const VideoCodec& new_send_codec) {
- if (ptr_encoder_ == NULL) {
+ if (!ptr_encoder_)
return true;
- }
// Does not check startBitrate or maxFramerate
if (new_send_codec.codecType != send_codec_.codecType ||
@@ -419,8 +372,7 @@ bool VCMCodecDataBase::RequiresEncoderReset(const VideoCodec& new_send_codec) {
++i) {
if (memcmp(&new_send_codec.simulcastStream[i],
&send_codec_.simulcastStream[i],
- sizeof(new_send_codec.simulcastStream[i])) !=
- 0) {
+ sizeof(new_send_codec.simulcastStream[i])) != 0) {
return true;
}
}
@@ -440,22 +392,6 @@ bool VCMCodecDataBase::SetPeriodicKeyFrames(bool enable) {
return true;
}
-void VCMCodecDataBase::ResetReceiver() {
- ReleaseDecoder(ptr_decoder_);
- ptr_decoder_ = NULL;
- memset(&receive_codec_, 0, sizeof(VideoCodec));
- while (!dec_map_.empty()) {
- DecoderMap::iterator it = dec_map_.begin();
- delete (*it).second;
- dec_map_.erase(it);
- }
- while (!dec_external_map_.empty()) {
- ExternalDecoderMap::iterator external_it = dec_external_map_.begin();
- delete (*external_it).second;
- dec_external_map_.erase(external_it);
- }
-}
-
bool VCMCodecDataBase::DeregisterExternalDecoder(uint8_t payload_type) {
ExternalDecoderMap::iterator it = dec_external_map_.find(payload_type);
if (it == dec_external_map_.end()) {
@@ -465,43 +401,36 @@ bool VCMCodecDataBase::DeregisterExternalDecoder(uint8_t payload_type) {
// We can't use payload_type to check if the decoder is currently in use,
// because payload type may be out of date (e.g. before we decode the first
// frame after RegisterReceiveCodec)
- if (ptr_decoder_ != NULL &&
- &ptr_decoder_->_decoder == (*it).second->external_decoder_instance) {
+ if (ptr_decoder_ != nullptr &&
+ ptr_decoder_->_decoder == (*it).second->external_decoder_instance) {
// Release it if it was registered and in use.
ReleaseDecoder(ptr_decoder_);
- ptr_decoder_ = NULL;
+ ptr_decoder_ = nullptr;
}
DeregisterReceiveCodec(payload_type);
- delete (*it).second;
+ delete it->second;
dec_external_map_.erase(it);
return true;
}
// Add the external encoder object to the list of external decoders.
// Won't be registered as a receive codec until RegisterReceiveCodec is called.
-bool VCMCodecDataBase::RegisterExternalDecoder(
- VideoDecoder* external_decoder,
- uint8_t payload_type,
- bool internal_render_timing) {
+void VCMCodecDataBase::RegisterExternalDecoder(VideoDecoder* external_decoder,
+ uint8_t payload_type) {
// Check if payload value already exists, if so - erase old and insert new.
- VCMExtDecoderMapItem* ext_decoder = new VCMExtDecoderMapItem(
- external_decoder, payload_type, internal_render_timing);
- if (!ext_decoder) {
- return false;
- }
+ VCMExtDecoderMapItem* ext_decoder =
+ new VCMExtDecoderMapItem(external_decoder, payload_type);
DeregisterExternalDecoder(payload_type);
dec_external_map_[payload_type] = ext_decoder;
- return true;
}
bool VCMCodecDataBase::DecoderRegistered() const {
return !dec_map_.empty();
}
-bool VCMCodecDataBase::RegisterReceiveCodec(
- const VideoCodec* receive_codec,
- int number_of_cores,
- bool require_key_frame) {
+bool VCMCodecDataBase::RegisterReceiveCodec(const VideoCodec* receive_codec,
+ int number_of_cores,
+ bool require_key_frame) {
if (number_of_cores < 0) {
return false;
}
@@ -511,20 +440,17 @@ bool VCMCodecDataBase::RegisterReceiveCodec(
return false;
}
VideoCodec* new_receive_codec = new VideoCodec(*receive_codec);
- dec_map_[receive_codec->plType] = new VCMDecoderMapItem(new_receive_codec,
- number_of_cores,
- require_key_frame);
+ dec_map_[receive_codec->plType] = new VCMDecoderMapItem(
+ new_receive_codec, number_of_cores, require_key_frame);
return true;
}
-bool VCMCodecDataBase::DeregisterReceiveCodec(
- uint8_t payload_type) {
+bool VCMCodecDataBase::DeregisterReceiveCodec(uint8_t payload_type) {
DecoderMap::iterator it = dec_map_.find(payload_type);
if (it == dec_map_.end()) {
return false;
}
- VCMDecoderMapItem* dec_item = (*it).second;
- delete dec_item;
+ delete it->second;
dec_map_.erase(it);
if (receive_codec_.plType == payload_type) {
// This codec is currently in use.
@@ -550,49 +476,50 @@ VideoCodecType VCMCodecDataBase::ReceiveCodec() const {
}
VCMGenericDecoder* VCMCodecDataBase::GetDecoder(
- uint8_t payload_type, VCMDecodedFrameCallback* decoded_frame_callback) {
+ const VCMEncodedFrame& frame,
+ VCMDecodedFrameCallback* decoded_frame_callback) {
+ uint8_t payload_type = frame.PayloadType();
if (payload_type == receive_codec_.plType || payload_type == 0) {
return ptr_decoder_;
}
// Check for exisitng decoder, if exists - delete.
if (ptr_decoder_) {
ReleaseDecoder(ptr_decoder_);
- ptr_decoder_ = NULL;
+ ptr_decoder_ = nullptr;
memset(&receive_codec_, 0, sizeof(VideoCodec));
}
- ptr_decoder_ = CreateAndInitDecoder(payload_type, &receive_codec_);
+ ptr_decoder_ = CreateAndInitDecoder(frame, &receive_codec_);
if (!ptr_decoder_) {
- return NULL;
+ return nullptr;
}
VCMReceiveCallback* callback = decoded_frame_callback->UserReceiveCallback();
- if (callback) callback->OnIncomingPayloadType(receive_codec_.plType);
- if (ptr_decoder_->RegisterDecodeCompleteCallback(decoded_frame_callback)
- < 0) {
+ if (callback)
+ callback->OnIncomingPayloadType(receive_codec_.plType);
+ if (ptr_decoder_->RegisterDecodeCompleteCallback(decoded_frame_callback) <
+ 0) {
ReleaseDecoder(ptr_decoder_);
- ptr_decoder_ = NULL;
+ ptr_decoder_ = nullptr;
memset(&receive_codec_, 0, sizeof(VideoCodec));
- return NULL;
+ return nullptr;
}
return ptr_decoder_;
}
void VCMCodecDataBase::ReleaseDecoder(VCMGenericDecoder* decoder) const {
if (decoder) {
- assert(&decoder->_decoder);
+ assert(decoder->_decoder);
decoder->Release();
if (!decoder->External()) {
- delete &decoder->_decoder;
+ delete decoder->_decoder;
}
delete decoder;
}
}
-bool VCMCodecDataBase::SupportsRenderScheduling() const {
- const VCMExtDecoderMapItem* ext_item = FindExternalDecoderItem(
- receive_codec_.plType);
- if (ext_item == nullptr)
+bool VCMCodecDataBase::PrefersLateDecoding() const {
+ if (!ptr_decoder_)
return true;
- return ext_item->internal_render_timing;
+ return ptr_decoder_->PrefersLateDecoding();
}
bool VCMCodecDataBase::MatchesCurrentResolution(int width, int height) const {
@@ -600,33 +527,43 @@ bool VCMCodecDataBase::MatchesCurrentResolution(int width, int height) const {
}
VCMGenericDecoder* VCMCodecDataBase::CreateAndInitDecoder(
- uint8_t payload_type,
+ const VCMEncodedFrame& frame,
VideoCodec* new_codec) const {
+ uint8_t payload_type = frame.PayloadType();
assert(new_codec);
const VCMDecoderMapItem* decoder_item = FindDecoderItem(payload_type);
if (!decoder_item) {
LOG(LS_ERROR) << "Can't find a decoder associated with payload type: "
<< static_cast<int>(payload_type);
- return NULL;
+ return nullptr;
}
- VCMGenericDecoder* ptr_decoder = NULL;
+ VCMGenericDecoder* ptr_decoder = nullptr;
const VCMExtDecoderMapItem* external_dec_item =
FindExternalDecoderItem(payload_type);
if (external_dec_item) {
// External codec.
ptr_decoder = new VCMGenericDecoder(
- *external_dec_item->external_decoder_instance, true);
+ external_dec_item->external_decoder_instance, true);
} else {
// Create decoder.
ptr_decoder = CreateDecoder(decoder_item->settings->codecType);
}
if (!ptr_decoder)
- return NULL;
+ return nullptr;
+ // Copy over input resolutions to prevent codec reinitialization due to
+ // the first frame being of a different resolution than the database values.
+ // This is best effort, since there's no guarantee that width/height have been
+ // parsed yet (and may be zero).
+ if (frame.EncodedImage()._encodedWidth > 0 &&
+ frame.EncodedImage()._encodedHeight > 0) {
+ decoder_item->settings->width = frame.EncodedImage()._encodedWidth;
+ decoder_item->settings->height = frame.EncodedImage()._encodedHeight;
+ }
if (ptr_decoder->InitDecode(decoder_item->settings.get(),
decoder_item->number_of_cores) < 0) {
ReleaseDecoder(ptr_decoder);
- return NULL;
+ return nullptr;
}
memcpy(new_codec, decoder_item->settings.get(), sizeof(VideoCodec));
return ptr_decoder;
@@ -641,30 +578,22 @@ void VCMCodecDataBase::DeleteEncoder() {
VCMGenericDecoder* VCMCodecDataBase::CreateDecoder(VideoCodecType type) const {
switch (type) {
-#ifdef VIDEOCODEC_VP8
case kVideoCodecVP8:
- return new VCMGenericDecoder(*(VP8Decoder::Create()));
-#endif
-#ifdef VIDEOCODEC_VP9
+ return new VCMGenericDecoder(VP8Decoder::Create());
case kVideoCodecVP9:
- return new VCMGenericDecoder(*(VP9Decoder::Create()));
-#endif
-#ifdef VIDEOCODEC_I420
+ return new VCMGenericDecoder(VP9Decoder::Create());
case kVideoCodecI420:
- return new VCMGenericDecoder(*(new I420Decoder));
-#endif
-#ifdef VIDEOCODEC_H264
+ return new VCMGenericDecoder(new I420Decoder());
case kVideoCodecH264:
if (H264Decoder::IsSupported()) {
- return new VCMGenericDecoder(*(H264Decoder::Create()));
+ return new VCMGenericDecoder(H264Decoder::Create());
}
break;
-#endif
default:
break;
}
LOG(LS_WARNING) << "No internal decoder of this type exists.";
- return NULL;
+ return nullptr;
}
const VCMDecoderMapItem* VCMCodecDataBase::FindDecoderItem(
@@ -673,7 +602,7 @@ const VCMDecoderMapItem* VCMCodecDataBase::FindDecoderItem(
if (it != dec_map_.end()) {
return (*it).second;
}
- return NULL;
+ return nullptr;
}
const VCMExtDecoderMapItem* VCMCodecDataBase::FindExternalDecoderItem(
@@ -682,6 +611,6 @@ const VCMExtDecoderMapItem* VCMCodecDataBase::FindExternalDecoderItem(
if (it != dec_external_map_.end()) {
return (*it).second;
}
- return NULL;
+ return nullptr;
}
} // namespace webrtc
diff --git a/webrtc/modules/video_coding/main/source/codec_database.h b/webrtc/modules/video_coding/codec_database.h
index 93aa9c3ba8..62ec30a46e 100644
--- a/webrtc/modules/video_coding/main/source/codec_database.h
+++ b/webrtc/modules/video_coding/codec_database.h
@@ -8,16 +8,16 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#ifndef WEBRTC_MODULES_VIDEO_CODING_MAIN_SOURCE_CODEC_DATABASE_H_
-#define WEBRTC_MODULES_VIDEO_CODING_MAIN_SOURCE_CODEC_DATABASE_H_
+#ifndef WEBRTC_MODULES_VIDEO_CODING_CODEC_DATABASE_H_
+#define WEBRTC_MODULES_VIDEO_CODING_CODEC_DATABASE_H_
#include <map>
#include "webrtc/base/scoped_ptr.h"
-#include "webrtc/modules/video_coding/codecs/interface/video_codec_interface.h"
-#include "webrtc/modules/video_coding/main/interface/video_coding.h"
-#include "webrtc/modules/video_coding/main/source/generic_decoder.h"
-#include "webrtc/modules/video_coding/main/source/generic_encoder.h"
+#include "webrtc/modules/video_coding/include/video_codec_interface.h"
+#include "webrtc/modules/video_coding/include/video_coding.h"
+#include "webrtc/modules/video_coding/generic_decoder.h"
+#include "webrtc/modules/video_coding/generic_encoder.h"
#include "webrtc/typedefs.h"
namespace webrtc {
@@ -36,12 +36,10 @@ struct VCMDecoderMapItem {
struct VCMExtDecoderMapItem {
public:
VCMExtDecoderMapItem(VideoDecoder* external_decoder_instance,
- uint8_t payload_type,
- bool internal_render_timing);
+ uint8_t payload_type);
uint8_t payload_type;
VideoDecoder* external_decoder_instance;
- bool internal_render_timing;
};
class VCMCodecDataBase {
@@ -51,16 +49,8 @@ class VCMCodecDataBase {
~VCMCodecDataBase();
// Sender Side
- // Returns the number of supported codecs (or -1 in case of error).
- static int NumberOfCodecs();
-
- // Returns the default settings for the codec with id |list_id|.
- static bool Codec(int list_id, VideoCodec* settings);
-
// Returns the default settings for the codec with type |codec_type|.
- static bool Codec(VideoCodecType codec_type, VideoCodec* settings);
-
- void ResetSender();
+ static void Codec(VideoCodecType codec_type, VideoCodec* settings);
// Sets the sender side codec and initiates the desired codec given the
// VideoCodec struct.
@@ -94,19 +84,12 @@ class VCMCodecDataBase {
bool SetPeriodicKeyFrames(bool enable);
- // Receiver Side
- void ResetReceiver();
-
// Deregisters an external decoder object specified by |payload_type|.
bool DeregisterExternalDecoder(uint8_t payload_type);
// Registers an external decoder object to the payload type |payload_type|.
- // |internal_render_timing| is set to true if the |external_decoder| has
- // built in rendering which is able to obey the render timestamps of the
- // encoded frames.
- bool RegisterExternalDecoder(VideoDecoder* external_decoder,
- uint8_t payload_type,
- bool internal_render_timing);
+ void RegisterExternalDecoder(VideoDecoder* external_decoder,
+ uint8_t payload_type);
bool DecoderRegistered() const;
@@ -128,16 +111,16 @@ class VCMCodecDataBase {
// NULL is returned if no encoder with the specified payload type was found
// and the function failed to create one.
VCMGenericDecoder* GetDecoder(
- uint8_t payload_type, VCMDecodedFrameCallback* decoded_frame_callback);
+ const VCMEncodedFrame& frame,
+ VCMDecodedFrameCallback* decoded_frame_callback);
// Deletes the memory of the decoder instance |decoder|. Used to delete
// deep copies returned by CreateDecoderCopy().
void ReleaseDecoder(VCMGenericDecoder* decoder) const;
- // Returns true if the currently active decoder supports render scheduling,
- // that is, it is able to render frames according to the render timestamp of
- // the encoded frames.
- bool SupportsRenderScheduling() const;
+ // Returns true if the currently active decoder prefer to decode frames late.
+ // That means that frames must be decoded near the render times stamp.
+ bool PrefersLateDecoding() const;
bool MatchesCurrentResolution(int width, int height) const;
@@ -145,7 +128,7 @@ class VCMCodecDataBase {
typedef std::map<uint8_t, VCMDecoderMapItem*> DecoderMap;
typedef std::map<uint8_t, VCMExtDecoderMapItem*> ExternalDecoderMap;
- VCMGenericDecoder* CreateAndInitDecoder(uint8_t payload_type,
+ VCMGenericDecoder* CreateAndInitDecoder(const VCMEncodedFrame& frame,
VideoCodec* new_codec) const;
// Determines whether a new codec has to be created or not.
@@ -181,4 +164,4 @@ class VCMCodecDataBase {
} // namespace webrtc
-#endif // WEBRTC_MODULES_VIDEO_CODING_MAIN_SOURCE_CODEC_DATABASE_H_
+#endif // WEBRTC_MODULES_VIDEO_CODING_CODEC_DATABASE_H_
diff --git a/webrtc/modules/video_coding/codec_timer.cc b/webrtc/modules/video_coding/codec_timer.cc
new file mode 100644
index 0000000000..60add8fc4b
--- /dev/null
+++ b/webrtc/modules/video_coding/codec_timer.cc
@@ -0,0 +1,96 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "webrtc/modules/video_coding/codec_timer.h"
+
+#include <assert.h>
+
+namespace webrtc {
+
+// The first kIgnoredSampleCount samples will be ignored.
+static const int32_t kIgnoredSampleCount = 5;
+
+VCMCodecTimer::VCMCodecTimer()
+ : _filteredMax(0), _ignoredSampleCount(0), _shortMax(0), _history() {
+ Reset();
+}
+
+void VCMCodecTimer::Reset() {
+ _filteredMax = 0;
+ _ignoredSampleCount = 0;
+ _shortMax = 0;
+ for (int i = 0; i < MAX_HISTORY_SIZE; i++) {
+ _history[i].shortMax = 0;
+ _history[i].timeMs = -1;
+ }
+}
+
+// Update the max-value filter
+void VCMCodecTimer::MaxFilter(int32_t decodeTime, int64_t nowMs) {
+ if (_ignoredSampleCount >= kIgnoredSampleCount) {
+ UpdateMaxHistory(decodeTime, nowMs);
+ ProcessHistory(nowMs);
+ } else {
+ _ignoredSampleCount++;
+ }
+}
+
+void VCMCodecTimer::UpdateMaxHistory(int32_t decodeTime, int64_t now) {
+ if (_history[0].timeMs >= 0 && now - _history[0].timeMs < SHORT_FILTER_MS) {
+ if (decodeTime > _shortMax) {
+ _shortMax = decodeTime;
+ }
+ } else {
+ // Only add a new value to the history once a second
+ if (_history[0].timeMs == -1) {
+ // First, no shift
+ _shortMax = decodeTime;
+ } else {
+ // Shift
+ for (int i = (MAX_HISTORY_SIZE - 2); i >= 0; i--) {
+ _history[i + 1].shortMax = _history[i].shortMax;
+ _history[i + 1].timeMs = _history[i].timeMs;
+ }
+ }
+ if (_shortMax == 0) {
+ _shortMax = decodeTime;
+ }
+
+ _history[0].shortMax = _shortMax;
+ _history[0].timeMs = now;
+ _shortMax = 0;
+ }
+}
+
+void VCMCodecTimer::ProcessHistory(int64_t nowMs) {
+ _filteredMax = _shortMax;
+ if (_history[0].timeMs == -1) {
+ return;
+ }
+ for (int i = 0; i < MAX_HISTORY_SIZE; i++) {
+ if (_history[i].timeMs == -1) {
+ break;
+ }
+ if (nowMs - _history[i].timeMs > MAX_HISTORY_SIZE * SHORT_FILTER_MS) {
+ // This sample (and all samples after this) is too old
+ break;
+ }
+ if (_history[i].shortMax > _filteredMax) {
+ // This sample is the largest one this far into the history
+ _filteredMax = _history[i].shortMax;
+ }
+ }
+}
+
+// Get the maximum observed time within a time window
+int32_t VCMCodecTimer::RequiredDecodeTimeMs(FrameType /*frameType*/) const {
+ return _filteredMax;
+}
+} // namespace webrtc
diff --git a/webrtc/modules/video_coding/codec_timer.h b/webrtc/modules/video_coding/codec_timer.h
new file mode 100644
index 0000000000..8ebd82ab9c
--- /dev/null
+++ b/webrtc/modules/video_coding/codec_timer.h
@@ -0,0 +1,57 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef WEBRTC_MODULES_VIDEO_CODING_CODEC_TIMER_H_
+#define WEBRTC_MODULES_VIDEO_CODING_CODEC_TIMER_H_
+
+#include "webrtc/modules/include/module_common_types.h"
+#include "webrtc/typedefs.h"
+
+namespace webrtc {
+
+// MAX_HISTORY_SIZE * SHORT_FILTER_MS defines the window size in milliseconds
+#define MAX_HISTORY_SIZE 10
+#define SHORT_FILTER_MS 1000
+
+class VCMShortMaxSample {
+ public:
+ VCMShortMaxSample() : shortMax(0), timeMs(-1) {}
+
+ int32_t shortMax;
+ int64_t timeMs;
+};
+
+class VCMCodecTimer {
+ public:
+ VCMCodecTimer();
+
+ // Updates the max filtered decode time.
+ void MaxFilter(int32_t newDecodeTimeMs, int64_t nowMs);
+
+ // Empty the list of timers.
+ void Reset();
+
+ // Get the required decode time in ms.
+ int32_t RequiredDecodeTimeMs(FrameType frameType) const;
+
+ private:
+ void UpdateMaxHistory(int32_t decodeTime, int64_t now);
+ void ProcessHistory(int64_t nowMs);
+
+ int32_t _filteredMax;
+ // The number of samples ignored so far.
+ int32_t _ignoredSampleCount;
+ int32_t _shortMax;
+ VCMShortMaxSample _history[MAX_HISTORY_SIZE];
+};
+
+} // namespace webrtc
+
+#endif // WEBRTC_MODULES_VIDEO_CODING_CODEC_TIMER_H_
diff --git a/webrtc/modules/video_coding/codecs/h264/h264_video_toolbox_decoder.cc b/webrtc/modules/video_coding/codecs/h264/h264_video_toolbox_decoder.cc
index 61ef80bbf1..6fee2e6f36 100644
--- a/webrtc/modules/video_coding/codecs/h264/h264_video_toolbox_decoder.cc
+++ b/webrtc/modules/video_coding/codecs/h264/h264_video_toolbox_decoder.cc
@@ -16,7 +16,7 @@
#include "libyuv/convert.h"
#include "webrtc/base/checks.h"
#include "webrtc/base/logging.h"
-#include "webrtc/common_video/interface/video_frame_buffer.h"
+#include "webrtc/common_video/include/video_frame_buffer.h"
#include "webrtc/modules/video_coding/codecs/h264/h264_video_toolbox_nalu.h"
#include "webrtc/video_frame.h"
@@ -106,8 +106,7 @@ namespace webrtc {
H264VideoToolboxDecoder::H264VideoToolboxDecoder()
: callback_(nullptr),
video_format_(nullptr),
- decompression_session_(nullptr) {
-}
+ decompression_session_(nullptr) {}
H264VideoToolboxDecoder::~H264VideoToolboxDecoder() {
DestroyDecompressionSession();
@@ -129,8 +128,7 @@ int H264VideoToolboxDecoder::Decode(
CMSampleBufferRef sample_buffer = nullptr;
if (!H264AnnexBBufferToCMSampleBuffer(input_image._buffer,
- input_image._length,
- video_format_,
+ input_image._length, video_format_,
&sample_buffer)) {
return WEBRTC_VIDEO_CODEC_ERROR;
}
@@ -206,11 +204,8 @@ int H264VideoToolboxDecoder::ResetDecompressionSession() {
int64_t nv12type = kCVPixelFormatType_420YpCbCr8BiPlanarFullRange;
CFNumberRef pixel_format =
CFNumberCreate(nullptr, kCFNumberLongType, &nv12type);
- CFTypeRef values[attributes_size] = {
- kCFBooleanTrue,
- io_surface_value,
- pixel_format
- };
+ CFTypeRef values[attributes_size] = {kCFBooleanTrue, io_surface_value,
+ pixel_format};
CFDictionaryRef attributes =
internal::CreateCFDictionary(keys, values, attributes_size);
if (io_surface_value) {
@@ -266,6 +261,10 @@ void H264VideoToolboxDecoder::SetVideoFormat(
}
}
+const char* H264VideoToolboxDecoder::ImplementationName() const {
+ return "VideoToolbox";
+}
+
} // namespace webrtc
#endif // defined(WEBRTC_VIDEO_TOOLBOX_SUPPORTED)
diff --git a/webrtc/modules/video_coding/codecs/h264/h264_video_toolbox_decoder.h b/webrtc/modules/video_coding/codecs/h264/h264_video_toolbox_decoder.h
index f54ddb9efd..6d64307a82 100644
--- a/webrtc/modules/video_coding/codecs/h264/h264_video_toolbox_decoder.h
+++ b/webrtc/modules/video_coding/codecs/h264/h264_video_toolbox_decoder.h
@@ -45,6 +45,8 @@ class H264VideoToolboxDecoder : public H264Decoder {
int Reset() override;
+ const char* ImplementationName() const override;
+
private:
int ResetDecompressionSession();
void ConfigureDecompressionSession();
diff --git a/webrtc/modules/video_coding/codecs/h264/h264_video_toolbox_encoder.cc b/webrtc/modules/video_coding/codecs/h264/h264_video_toolbox_encoder.cc
index d677f8b812..7df4ec74ba 100644
--- a/webrtc/modules/video_coding/codecs/h264/h264_video_toolbox_encoder.cc
+++ b/webrtc/modules/video_coding/codecs/h264/h264_video_toolbox_encoder.cc
@@ -99,11 +99,7 @@ struct FrameEncodeParams {
int32_t h,
int64_t rtms,
uint32_t ts)
- : callback(cb),
- width(w),
- height(h),
- render_time_ms(rtms),
- timestamp(ts) {
+ : callback(cb), width(w), height(h), render_time_ms(rtms), timestamp(ts) {
if (csi) {
codec_specific_info = *csi;
} else {
@@ -146,9 +142,8 @@ bool CopyVideoFrameToPixelBuffer(const webrtc::VideoFrame& frame,
int ret = libyuv::I420ToNV12(
frame.buffer(webrtc::kYPlane), frame.stride(webrtc::kYPlane),
frame.buffer(webrtc::kUPlane), frame.stride(webrtc::kUPlane),
- frame.buffer(webrtc::kVPlane), frame.stride(webrtc::kVPlane),
- dst_y, dst_stride_y, dst_uv, dst_stride_uv,
- frame.width(), frame.height());
+ frame.buffer(webrtc::kVPlane), frame.stride(webrtc::kVPlane), dst_y,
+ dst_stride_y, dst_uv, dst_stride_uv, frame.width(), frame.height());
CVPixelBufferUnlockBaseAddress(pixel_buffer, 0);
if (ret) {
LOG(LS_ERROR) << "Error converting I420 VideoFrame to NV12 :" << ret;
@@ -188,10 +183,8 @@ void VTCompressionOutputCallback(void* encoder,
// TODO(tkchin): Allocate buffers through a pool.
rtc::scoped_ptr<rtc::Buffer> buffer(new rtc::Buffer());
rtc::scoped_ptr<webrtc::RTPFragmentationHeader> header;
- if (!H264CMSampleBufferToAnnexBBuffer(sample_buffer,
- is_keyframe,
- buffer.get(),
- header.accept())) {
+ if (!H264CMSampleBufferToAnnexBBuffer(sample_buffer, is_keyframe,
+ buffer.get(), header.accept())) {
return;
}
webrtc::EncodedImage frame(buffer->data(), buffer->size(), buffer->size());
@@ -215,8 +208,7 @@ void VTCompressionOutputCallback(void* encoder,
namespace webrtc {
H264VideoToolboxEncoder::H264VideoToolboxEncoder()
- : callback_(nullptr), compression_session_(nullptr) {
-}
+ : callback_(nullptr), compression_session_(nullptr) {}
H264VideoToolboxEncoder::~H264VideoToolboxEncoder() {
DestroyCompressionSession();
@@ -289,8 +281,8 @@ int H264VideoToolboxEncoder::Encode(
CMTimeMake(input_image.render_time_ms(), 1000);
CFDictionaryRef frame_properties = nullptr;
if (is_keyframe_required) {
- CFTypeRef keys[] = { kVTEncodeFrameOptionKey_ForceKeyFrame };
- CFTypeRef values[] = { kCFBooleanTrue };
+ CFTypeRef keys[] = {kVTEncodeFrameOptionKey_ForceKeyFrame};
+ CFTypeRef values[] = {kCFBooleanTrue};
frame_properties = internal::CreateCFDictionary(keys, values, 1);
}
rtc::scoped_ptr<internal::FrameEncodeParams> encode_params;
@@ -359,11 +351,8 @@ int H264VideoToolboxEncoder::ResetCompressionSession() {
int64_t nv12type = kCVPixelFormatType_420YpCbCr8BiPlanarFullRange;
CFNumberRef pixel_format =
CFNumberCreate(nullptr, kCFNumberLongType, &nv12type);
- CFTypeRef values[attributes_size] = {
- kCFBooleanTrue,
- io_surface_value,
- pixel_format
- };
+ CFTypeRef values[attributes_size] = {kCFBooleanTrue, io_surface_value,
+ pixel_format};
CFDictionaryRef source_attributes =
internal::CreateCFDictionary(keys, values, attributes_size);
if (io_surface_value) {
@@ -376,15 +365,11 @@ int H264VideoToolboxEncoder::ResetCompressionSession() {
}
OSStatus status = VTCompressionSessionCreate(
nullptr, // use default allocator
- width_,
- height_,
- kCMVideoCodecType_H264,
+ width_, height_, kCMVideoCodecType_H264,
nullptr, // use default encoder
source_attributes,
nullptr, // use default compressed data allocator
- internal::VTCompressionOutputCallback,
- this,
- &compression_session_);
+ internal::VTCompressionOutputCallback, this, &compression_session_);
if (source_attributes) {
CFRelease(source_attributes);
source_attributes = nullptr;
@@ -434,6 +419,10 @@ void H264VideoToolboxEncoder::DestroyCompressionSession() {
}
}
+const char* H264VideoToolboxEncoder::ImplementationName() const {
+ return "VideoToolbox";
+}
+
} // namespace webrtc
#endif // defined(WEBRTC_VIDEO_TOOLBOX_SUPPORTED)
diff --git a/webrtc/modules/video_coding/codecs/h264/h264_video_toolbox_encoder.h b/webrtc/modules/video_coding/codecs/h264/h264_video_toolbox_encoder.h
index f4fb86fa04..269e0411b2 100644
--- a/webrtc/modules/video_coding/codecs/h264/h264_video_toolbox_encoder.h
+++ b/webrtc/modules/video_coding/codecs/h264/h264_video_toolbox_encoder.h
@@ -48,6 +48,8 @@ class H264VideoToolboxEncoder : public H264Encoder {
int Release() override;
+ const char* ImplementationName() const override;
+
private:
int ResetCompressionSession();
void ConfigureCompressionSession();
diff --git a/webrtc/modules/video_coding/codecs/h264/h264_video_toolbox_nalu.cc b/webrtc/modules/video_coding/codecs/h264/h264_video_toolbox_nalu.cc
index caca96d3d8..322c213f7b 100644
--- a/webrtc/modules/video_coding/codecs/h264/h264_video_toolbox_nalu.cc
+++ b/webrtc/modules/video_coding/codecs/h264/h264_video_toolbox_nalu.cc
@@ -154,11 +154,10 @@ bool H264CMSampleBufferToAnnexBBuffer(
return true;
}
-bool H264AnnexBBufferToCMSampleBuffer(
- const uint8_t* annexb_buffer,
- size_t annexb_buffer_size,
- CMVideoFormatDescriptionRef video_format,
- CMSampleBufferRef* out_sample_buffer) {
+bool H264AnnexBBufferToCMSampleBuffer(const uint8_t* annexb_buffer,
+ size_t annexb_buffer_size,
+ CMVideoFormatDescriptionRef video_format,
+ CMSampleBufferRef* out_sample_buffer) {
RTC_DCHECK(annexb_buffer);
RTC_DCHECK(out_sample_buffer);
*out_sample_buffer = nullptr;
diff --git a/webrtc/modules/video_coding/codecs/h264/h264_video_toolbox_nalu.h b/webrtc/modules/video_coding/codecs/h264/h264_video_toolbox_nalu.h
index 230dea94a0..31ef525816 100644
--- a/webrtc/modules/video_coding/codecs/h264/h264_video_toolbox_nalu.h
+++ b/webrtc/modules/video_coding/codecs/h264/h264_video_toolbox_nalu.h
@@ -9,8 +9,8 @@
*
*/
-#ifndef WEBRTC_MODULES_VIDEO_CODING_CODECS_H264_H264_VIDEO_TOOLBOX_NALU_H
-#define WEBRTC_MODULES_VIDEO_CODING_CODECS_H264_H264_VIDEO_TOOLBOX_NALU_H
+#ifndef WEBRTC_MODULES_VIDEO_CODING_CODECS_H264_H264_VIDEO_TOOLBOX_NALU_H_
+#define WEBRTC_MODULES_VIDEO_CODING_CODECS_H264_H264_VIDEO_TOOLBOX_NALU_H_
#include "webrtc/modules/video_coding/codecs/h264/include/h264.h"
@@ -19,7 +19,7 @@
#include <CoreMedia/CoreMedia.h>
#include "webrtc/base/buffer.h"
-#include "webrtc/modules/interface/module_common_types.h"
+#include "webrtc/modules/include/module_common_types.h"
namespace webrtc {
@@ -39,11 +39,10 @@ bool H264CMSampleBufferToAnnexBBuffer(
// If |is_keyframe| is true then |video_format| is ignored since the format will
// be read from the buffer. Otherwise |video_format| must be provided.
// Caller is responsible for releasing the created sample buffer.
-bool H264AnnexBBufferToCMSampleBuffer(
- const uint8_t* annexb_buffer,
- size_t annexb_buffer_size,
- CMVideoFormatDescriptionRef video_format,
- CMSampleBufferRef* out_sample_buffer);
+bool H264AnnexBBufferToCMSampleBuffer(const uint8_t* annexb_buffer,
+ size_t annexb_buffer_size,
+ CMVideoFormatDescriptionRef video_format,
+ CMSampleBufferRef* out_sample_buffer);
// Helper class for reading NALUs from an RTP Annex B buffer.
class AnnexBBufferReader final {
@@ -97,4 +96,4 @@ class AvccBufferWriter final {
} // namespace webrtc
#endif // defined(WEBRTC_VIDEO_TOOLBOX_SUPPORTED)
-#endif // WEBRTC_MODULES_VIDEO_CODING_CODECS_H264_H264_VIDEO_TOOLBOX_NALU_H
+#endif // WEBRTC_MODULES_VIDEO_CODING_CODECS_H264_H264_VIDEO_TOOLBOX_NALU_H_
diff --git a/webrtc/modules/video_coding/codecs/h264/include/h264.h b/webrtc/modules/video_coding/codecs/h264/include/h264.h
index 3f52839a6c..50ca57c1c9 100644
--- a/webrtc/modules/video_coding/codecs/h264/include/h264.h
+++ b/webrtc/modules/video_coding/codecs/h264/include/h264.h
@@ -23,7 +23,7 @@
#endif // defined(WEBRTC_IOS) || defined(WEBRTC_MAC)
-#include "webrtc/modules/video_coding/codecs/interface/video_codec_interface.h"
+#include "webrtc/modules/video_coding/include/video_codec_interface.h"
namespace webrtc {
diff --git a/webrtc/modules/video_coding/codecs/i420/i420.cc b/webrtc/modules/video_coding/codecs/i420/i420.cc
index cf546a07a1..7f06b4cf7d 100644
--- a/webrtc/modules/video_coding/codecs/i420/i420.cc
+++ b/webrtc/modules/video_coding/codecs/i420/i420.cc
@@ -21,20 +21,19 @@ const size_t kI420HeaderSize = 4;
namespace webrtc {
-I420Encoder::I420Encoder() : _inited(false), _encodedImage(),
- _encodedCompleteCallback(NULL) {
-}
+I420Encoder::I420Encoder()
+ : _inited(false), _encodedImage(), _encodedCompleteCallback(NULL) {}
I420Encoder::~I420Encoder() {
_inited = false;
- delete [] _encodedImage._buffer;
+ delete[] _encodedImage._buffer;
}
int I420Encoder::Release() {
// Should allocate an encoded frame and then release it here, for that we
// actually need an init flag.
if (_encodedImage._buffer != NULL) {
- delete [] _encodedImage._buffer;
+ delete[] _encodedImage._buffer;
_encodedImage._buffer = NULL;
}
_inited = false;
@@ -53,7 +52,7 @@ int I420Encoder::InitEncode(const VideoCodec* codecSettings,
// Allocating encoded memory.
if (_encodedImage._buffer != NULL) {
- delete [] _encodedImage._buffer;
+ delete[] _encodedImage._buffer;
_encodedImage._buffer = NULL;
_encodedImage._size = 0;
}
@@ -101,18 +100,18 @@ int I420Encoder::Encode(const VideoFrame& inputImage,
kI420HeaderSize;
if (_encodedImage._size > req_length) {
// Reallocate buffer.
- delete [] _encodedImage._buffer;
+ delete[] _encodedImage._buffer;
_encodedImage._buffer = new uint8_t[req_length];
_encodedImage._size = req_length;
}
- uint8_t *buffer = _encodedImage._buffer;
+ uint8_t* buffer = _encodedImage._buffer;
buffer = InsertHeader(buffer, width, height);
- int ret_length = ExtractBuffer(inputImage, req_length - kI420HeaderSize,
- buffer);
+ int ret_length =
+ ExtractBuffer(inputImage, req_length - kI420HeaderSize, buffer);
if (ret_length < 0)
return WEBRTC_VIDEO_CODEC_MEMORY;
_encodedImage._length = ret_length + kI420HeaderSize;
@@ -121,7 +120,8 @@ int I420Encoder::Encode(const VideoFrame& inputImage,
return WEBRTC_VIDEO_CODEC_OK;
}
-uint8_t* I420Encoder::InsertHeader(uint8_t *buffer, uint16_t width,
+uint8_t* I420Encoder::InsertHeader(uint8_t* buffer,
+ uint16_t width,
uint16_t height) {
*buffer++ = static_cast<uint8_t>(width >> 8);
*buffer++ = static_cast<uint8_t>(width & 0xFF);
@@ -130,30 +130,29 @@ uint8_t* I420Encoder::InsertHeader(uint8_t *buffer, uint16_t width,
return buffer;
}
-int
-I420Encoder::RegisterEncodeCompleteCallback(EncodedImageCallback* callback) {
+int I420Encoder::RegisterEncodeCompleteCallback(
+ EncodedImageCallback* callback) {
_encodedCompleteCallback = callback;
return WEBRTC_VIDEO_CODEC_OK;
}
-
-I420Decoder::I420Decoder() : _decodedImage(), _width(0), _height(0),
- _inited(false), _decodeCompleteCallback(NULL) {
-}
+I420Decoder::I420Decoder()
+ : _decodedImage(),
+ _width(0),
+ _height(0),
+ _inited(false),
+ _decodeCompleteCallback(NULL) {}
I420Decoder::~I420Decoder() {
Release();
}
-int
-I420Decoder::Reset() {
+int I420Decoder::Reset() {
return WEBRTC_VIDEO_CODEC_OK;
}
-
-int
-I420Decoder::InitDecode(const VideoCodec* codecSettings,
- int /*numberOfCores */) {
+int I420Decoder::InitDecode(const VideoCodec* codecSettings,
+ int /*numberOfCores */) {
if (codecSettings == NULL) {
return WEBRTC_VIDEO_CODEC_ERR_PARAMETER;
} else if (codecSettings->width < 1 || codecSettings->height < 1) {
@@ -165,7 +164,8 @@ I420Decoder::InitDecode(const VideoCodec* codecSettings,
return WEBRTC_VIDEO_CODEC_OK;
}
-int I420Decoder::Decode(const EncodedImage& inputImage, bool /*missingFrames*/,
+int I420Decoder::Decode(const EncodedImage& inputImage,
+ bool /*missingFrames*/,
const RTPFragmentationHeader* /*fragmentation*/,
const CodecSpecificInfo* /*codecSpecificInfo*/,
int64_t /*renderTimeMs*/) {
@@ -203,8 +203,8 @@ int I420Decoder::Decode(const EncodedImage& inputImage, bool /*missingFrames*/,
}
// Set decoded image parameters.
int half_width = (_width + 1) / 2;
- _decodedImage.CreateEmptyFrame(_width, _height,
- _width, half_width, half_width);
+ _decodedImage.CreateEmptyFrame(_width, _height, _width, half_width,
+ half_width);
// Converting from buffer to plane representation.
int ret = ConvertToI420(kI420, buffer, 0, 0, _width, _height, 0,
kVideoRotation_0, &_decodedImage);
@@ -218,7 +218,8 @@ int I420Decoder::Decode(const EncodedImage& inputImage, bool /*missingFrames*/,
}
const uint8_t* I420Decoder::ExtractHeader(const uint8_t* buffer,
- uint16_t* width, uint16_t* height) {
+ uint16_t* width,
+ uint16_t* height) {
*width = static_cast<uint16_t>(*buffer++) << 8;
*width |= *buffer++;
*height = static_cast<uint16_t>(*buffer++) << 8;
diff --git a/webrtc/modules/video_coding/codecs/i420/include/i420.h b/webrtc/modules/video_coding/codecs/i420/include/i420.h
index 8990ccf878..9f77845e96 100644
--- a/webrtc/modules/video_coding/codecs/i420/include/i420.h
+++ b/webrtc/modules/video_coding/codecs/i420/include/i420.h
@@ -8,12 +8,12 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#ifndef WEBRTC_MODULES_VIDEO_CODING_CODECS_I420_MAIN_INTERFACE_I420_H_
-#define WEBRTC_MODULES_VIDEO_CODING_CODECS_I420_MAIN_INTERFACE_I420_H_
+#ifndef WEBRTC_MODULES_VIDEO_CODING_CODECS_I420_INCLUDE_I420_H_
+#define WEBRTC_MODULES_VIDEO_CODING_CODECS_I420_INCLUDE_I420_H_
#include <vector>
-#include "webrtc/modules/video_coding/codecs/interface/video_codec_interface.h"
+#include "webrtc/modules/video_coding/include/video_codec_interface.h"
#include "webrtc/typedefs.h"
namespace webrtc {
@@ -24,45 +24,45 @@ class I420Encoder : public VideoEncoder {
virtual ~I420Encoder();
-// Initialize the encoder with the information from the VideoCodec.
-//
-// Input:
-// - codecSettings : Codec settings.
-// - numberOfCores : Number of cores available for the encoder.
-// - maxPayloadSize : The maximum size each payload is allowed
-// to have. Usually MTU - overhead.
-//
-// Return value : WEBRTC_VIDEO_CODEC_OK if OK.
-// <0 - Error
+ // Initialize the encoder with the information from the VideoCodec.
+ //
+ // Input:
+ // - codecSettings : Codec settings.
+ // - numberOfCores : Number of cores available for the encoder.
+ // - maxPayloadSize : The maximum size each payload is allowed
+ // to have. Usually MTU - overhead.
+ //
+ // Return value : WEBRTC_VIDEO_CODEC_OK if OK.
+ // <0 - Error
int InitEncode(const VideoCodec* codecSettings,
int /*numberOfCores*/,
size_t /*maxPayloadSize*/) override;
-// "Encode" an I420 image (as a part of a video stream). The encoded image
-// will be returned to the user via the encode complete callback.
-//
-// Input:
-// - inputImage : Image to be encoded.
-// - codecSpecificInfo : Pointer to codec specific data.
-// - frameType : Frame type to be sent (Key /Delta).
-//
-// Return value : WEBRTC_VIDEO_CODEC_OK if OK.
-// <0 - Error
+ // "Encode" an I420 image (as a part of a video stream). The encoded image
+ // will be returned to the user via the encode complete callback.
+ //
+ // Input:
+ // - inputImage : Image to be encoded.
+ // - codecSpecificInfo : Pointer to codec specific data.
+ // - frameType : Frame type to be sent (Key /Delta).
+ //
+ // Return value : WEBRTC_VIDEO_CODEC_OK if OK.
+ // <0 - Error
int Encode(const VideoFrame& inputImage,
const CodecSpecificInfo* /*codecSpecificInfo*/,
const std::vector<FrameType>* /*frame_types*/) override;
-// Register an encode complete callback object.
-//
-// Input:
-// - callback : Callback object which handles encoded images.
-//
-// Return value : WEBRTC_VIDEO_CODEC_OK if OK, < 0 otherwise.
+ // Register an encode complete callback object.
+ //
+ // Input:
+ // - callback : Callback object which handles encoded images.
+ //
+ // Return value : WEBRTC_VIDEO_CODEC_OK if OK, < 0 otherwise.
int RegisterEncodeCompleteCallback(EncodedImageCallback* callback) override;
-// Free encoder memory.
-//
-// Return value : WEBRTC_VIDEO_CODEC_OK if OK, < 0 otherwise.
+ // Free encoder memory.
+ //
+ // Return value : WEBRTC_VIDEO_CODEC_OK if OK, < 0 otherwise.
int Release() override;
int SetRates(uint32_t /*newBitRate*/, uint32_t /*frameRate*/) override {
@@ -76,12 +76,13 @@ class I420Encoder : public VideoEncoder {
void OnDroppedFrame() override {}
private:
- static uint8_t* InsertHeader(uint8_t* buffer, uint16_t width,
+ static uint8_t* InsertHeader(uint8_t* buffer,
+ uint16_t width,
uint16_t height);
- bool _inited;
- EncodedImage _encodedImage;
- EncodedImageCallback* _encodedCompleteCallback;
+ bool _inited;
+ EncodedImage _encodedImage;
+ EncodedImageCallback* _encodedCompleteCallback;
}; // class I420Encoder
class I420Decoder : public VideoDecoder {
@@ -90,50 +91,50 @@ class I420Decoder : public VideoDecoder {
virtual ~I420Decoder();
-// Initialize the decoder.
-// The user must notify the codec of width and height values.
-//
-// Return value : WEBRTC_VIDEO_CODEC_OK.
-// <0 - Errors
+ // Initialize the decoder.
+ // The user must notify the codec of width and height values.
+ //
+ // Return value : WEBRTC_VIDEO_CODEC_OK.
+ // <0 - Errors
int InitDecode(const VideoCodec* codecSettings,
int /*numberOfCores*/) override;
-// Decode encoded image (as a part of a video stream). The decoded image
-// will be returned to the user through the decode complete callback.
-//
-// Input:
-// - inputImage : Encoded image to be decoded
-// - missingFrames : True if one or more frames have been lost
-// since the previous decode call.
-// - codecSpecificInfo : pointer to specific codec data
-// - renderTimeMs : Render time in Ms
-//
-// Return value : WEBRTC_VIDEO_CODEC_OK if OK
-// <0 - Error
+ // Decode encoded image (as a part of a video stream). The decoded image
+ // will be returned to the user through the decode complete callback.
+ //
+ // Input:
+ // - inputImage : Encoded image to be decoded
+ // - missingFrames : True if one or more frames have been lost
+ // since the previous decode call.
+ // - codecSpecificInfo : pointer to specific codec data
+ // - renderTimeMs : Render time in Ms
+ //
+ // Return value : WEBRTC_VIDEO_CODEC_OK if OK
+ // <0 - Error
int Decode(const EncodedImage& inputImage,
bool missingFrames,
const RTPFragmentationHeader* /*fragmentation*/,
const CodecSpecificInfo* /*codecSpecificInfo*/,
int64_t /*renderTimeMs*/) override;
-// Register a decode complete callback object.
-//
-// Input:
-// - callback : Callback object which handles decoded images.
-//
-// Return value : WEBRTC_VIDEO_CODEC_OK if OK, < 0 otherwise.
+ // Register a decode complete callback object.
+ //
+ // Input:
+ // - callback : Callback object which handles decoded images.
+ //
+ // Return value : WEBRTC_VIDEO_CODEC_OK if OK, < 0 otherwise.
int RegisterDecodeCompleteCallback(DecodedImageCallback* callback) override;
-// Free decoder memory.
-//
-// Return value : WEBRTC_VIDEO_CODEC_OK if OK.
-// <0 - Error
+ // Free decoder memory.
+ //
+ // Return value : WEBRTC_VIDEO_CODEC_OK if OK.
+ // <0 - Error
int Release() override;
-// Reset decoder state and prepare for a new call.
-//
-// Return value : WEBRTC_VIDEO_CODEC_OK.
-// <0 - Error
+ // Reset decoder state and prepare for a new call.
+ //
+ // Return value : WEBRTC_VIDEO_CODEC_OK.
+ // <0 - Error
int Reset() override;
private:
@@ -142,12 +143,12 @@ class I420Decoder : public VideoDecoder {
uint16_t* height);
VideoFrame _decodedImage;
- int _width;
- int _height;
- bool _inited;
- DecodedImageCallback* _decodeCompleteCallback;
+ int _width;
+ int _height;
+ bool _inited;
+ DecodedImageCallback* _decodeCompleteCallback;
}; // class I420Decoder
} // namespace webrtc
-#endif // WEBRTC_MODULES_VIDEO_CODING_CODECS_I420_MAIN_INTERFACE_I420_H_
+#endif // WEBRTC_MODULES_VIDEO_CODING_CODECS_I420_INCLUDE_I420_H_
diff --git a/webrtc/modules/video_coding/codecs/interface/mock/mock_video_codec_interface.h b/webrtc/modules/video_coding/codecs/interface/mock/mock_video_codec_interface.h
index 6c926d4794..d727e896ad 100644
--- a/webrtc/modules/video_coding/codecs/interface/mock/mock_video_codec_interface.h
+++ b/webrtc/modules/video_coding/codecs/interface/mock/mock_video_codec_interface.h
@@ -11,27 +11,32 @@
#ifndef WEBRTC_MODULES_VIDEO_CODING_CODECS_INTERFACE_MOCK_MOCK_VIDEO_CODEC_INTERFACE_H_
#define WEBRTC_MODULES_VIDEO_CODING_CODECS_INTERFACE_MOCK_MOCK_VIDEO_CODEC_INTERFACE_H_
+#pragma message("WARNING: video_coding/codecs/interface is DEPRECATED; "
+ "use video_coding/include")
#include <string>
+#include <vector>
#include "testing/gmock/include/gmock/gmock.h"
-#include "webrtc/modules/video_coding/codecs/interface/video_codec_interface.h"
+#include "webrtc/modules/video_coding/include/video_codec_interface.h"
#include "webrtc/typedefs.h"
namespace webrtc {
class MockEncodedImageCallback : public EncodedImageCallback {
public:
- MOCK_METHOD3(Encoded, int32_t(const EncodedImage& encodedImage,
- const CodecSpecificInfo* codecSpecificInfo,
- const RTPFragmentationHeader* fragmentation));
+ MOCK_METHOD3(Encoded,
+ int32_t(const EncodedImage& encodedImage,
+ const CodecSpecificInfo* codecSpecificInfo,
+ const RTPFragmentationHeader* fragmentation));
};
class MockVideoEncoder : public VideoEncoder {
public:
- MOCK_CONST_METHOD2(Version, int32_t(int8_t *version, int32_t length));
- MOCK_METHOD3(InitEncode, int32_t(const VideoCodec* codecSettings,
- int32_t numberOfCores,
- size_t maxPayloadSize));
+ MOCK_CONST_METHOD2(Version, int32_t(int8_t* version, int32_t length));
+ MOCK_METHOD3(InitEncode,
+ int32_t(const VideoCodec* codecSettings,
+ int32_t numberOfCores,
+ size_t maxPayloadSize));
MOCK_METHOD3(Encode,
int32_t(const VideoFrame& inputImage,
const CodecSpecificInfo* codecSpecificInfo,
@@ -47,22 +52,24 @@ class MockVideoEncoder : public VideoEncoder {
class MockDecodedImageCallback : public DecodedImageCallback {
public:
- MOCK_METHOD1(Decoded, int32_t(VideoFrame& decodedImage));
+ MOCK_METHOD1(Decoded, int32_t(const VideoFrame& decodedImage));
+ MOCK_METHOD2(Decoded,
+ int32_t(const VideoFrame& decodedImage, int64_t decode_time_ms));
MOCK_METHOD1(ReceivedDecodedReferenceFrame,
int32_t(const uint64_t pictureId));
- MOCK_METHOD1(ReceivedDecodedFrame,
- int32_t(const uint64_t pictureId));
+ MOCK_METHOD1(ReceivedDecodedFrame, int32_t(const uint64_t pictureId));
};
class MockVideoDecoder : public VideoDecoder {
public:
- MOCK_METHOD2(InitDecode, int32_t(const VideoCodec* codecSettings,
- int32_t numberOfCores));
- MOCK_METHOD5(Decode, int32_t(const EncodedImage& inputImage,
- bool missingFrames,
- const RTPFragmentationHeader* fragmentation,
- const CodecSpecificInfo* codecSpecificInfo,
- int64_t renderTimeMs));
+ MOCK_METHOD2(InitDecode,
+ int32_t(const VideoCodec* codecSettings, int32_t numberOfCores));
+ MOCK_METHOD5(Decode,
+ int32_t(const EncodedImage& inputImage,
+ bool missingFrames,
+ const RTPFragmentationHeader* fragmentation,
+ const CodecSpecificInfo* codecSpecificInfo,
+ int64_t renderTimeMs));
MOCK_METHOD1(RegisterDecodeCompleteCallback,
int32_t(DecodedImageCallback* callback));
MOCK_METHOD0(Release, int32_t());
diff --git a/webrtc/modules/video_coding/codecs/interface/video_codec_interface.h b/webrtc/modules/video_coding/codecs/interface/video_codec_interface.h
index 6363ab7332..6bcfa909bd 100644
--- a/webrtc/modules/video_coding/codecs/interface/video_codec_interface.h
+++ b/webrtc/modules/video_coding/codecs/interface/video_codec_interface.h
@@ -8,23 +8,24 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#ifndef WEBRTC_MODULES_VIDEO_CODING_CODECS_INTERFACE_VIDEO_CODEC_INTERFACE_H
-#define WEBRTC_MODULES_VIDEO_CODING_CODECS_INTERFACE_VIDEO_CODEC_INTERFACE_H
+#ifndef WEBRTC_MODULES_VIDEO_CODING_CODECS_INTERFACE_VIDEO_CODEC_INTERFACE_H_
+#define WEBRTC_MODULES_VIDEO_CODING_CODECS_INTERFACE_VIDEO_CODEC_INTERFACE_H_
+#pragma message("WARNING: video_coding/codecs/interface is DEPRECATED; "
+ "use video_coding/include")
#include <vector>
#include "webrtc/common_types.h"
-#include "webrtc/modules/interface/module_common_types.h"
-#include "webrtc/modules/video_coding/codecs/interface/video_error_codes.h"
+#include "webrtc/modules/include/module_common_types.h"
+#include "webrtc/modules/video_coding/include/video_error_codes.h"
#include "webrtc/typedefs.h"
#include "webrtc/video_decoder.h"
#include "webrtc/video_encoder.h"
#include "webrtc/video_frame.h"
-namespace webrtc
-{
+namespace webrtc {
-class RTPFragmentationHeader; // forward declaration
+class RTPFragmentationHeader; // forward declaration
// Note: if any pointers are added to this struct, it must be fitted
// with a copy-constructor. See below.
@@ -68,6 +69,10 @@ struct CodecSpecificInfoVP9 {
uint16_t width[kMaxVp9NumberOfSpatialLayers];
uint16_t height[kMaxVp9NumberOfSpatialLayers];
GofInfoVP9 gof;
+
+ // Frame reference data.
+ uint8_t num_ref_pics;
+ uint8_t p_diff[kMaxVp9RefPics];
};
struct CodecSpecificInfoGeneric {
@@ -86,12 +91,11 @@ union CodecSpecificInfoUnion {
// Note: if any pointers are added to this struct or its sub-structs, it
// must be fitted with a copy-constructor. This is because it is copied
// in the copy-constructor of VCMEncodedFrame.
-struct CodecSpecificInfo
-{
- VideoCodecType codecType;
- CodecSpecificInfoUnion codecSpecific;
+struct CodecSpecificInfo {
+ VideoCodecType codecType;
+ CodecSpecificInfoUnion codecSpecific;
};
} // namespace webrtc
-#endif // WEBRTC_MODULES_VIDEO_CODING_CODECS_INTERFACE_VIDEO_CODEC_INTERFACE_H
+#endif // WEBRTC_MODULES_VIDEO_CODING_CODECS_INTERFACE_VIDEO_CODEC_INTERFACE_H_
diff --git a/webrtc/modules/video_coding/codecs/interface/video_error_codes.h b/webrtc/modules/video_coding/codecs/interface/video_error_codes.h
index 28e5a32d43..ea8829df80 100644
--- a/webrtc/modules/video_coding/codecs/interface/video_error_codes.h
+++ b/webrtc/modules/video_coding/codecs/interface/video_error_codes.h
@@ -8,8 +8,11 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#ifndef WEBRTC_MODULES_VIDEO_CODING_CODECS_INTERFACE_VIDEO_ERROR_CODES_H
-#define WEBRTC_MODULES_VIDEO_CODING_CODECS_INTERFACE_VIDEO_ERROR_CODES_H
+#ifndef WEBRTC_MODULES_VIDEO_CODING_CODECS_INTERFACE_VIDEO_ERROR_CODES_H_
+#define WEBRTC_MODULES_VIDEO_CODING_CODECS_INTERFACE_VIDEO_ERROR_CODES_H_
+
+#pragma message("WARNING: video_coding/codecs/interface is DEPRECATED; "
+ "use video_coding/include")
// NOTE: in sync with video_coding_module_defines.h
@@ -29,4 +32,4 @@
#define WEBRTC_VIDEO_CODEC_FALLBACK_SOFTWARE -13
#define WEBRTC_VIDEO_CODEC_TARGET_BITRATE_OVERSHOOT -14
-#endif // WEBRTC_MODULES_VIDEO_CODING_CODECS_INTERFACE_VIDEO_ERROR_CODES_H
+#endif // WEBRTC_MODULES_VIDEO_CODING_CODECS_INTERFACE_VIDEO_ERROR_CODES_H_
diff --git a/webrtc/modules/video_coding/codecs/test/packet_manipulator.cc b/webrtc/modules/video_coding/codecs/test/packet_manipulator.cc
index 36ba0e8272..b554b4e9ae 100644
--- a/webrtc/modules/video_coding/codecs/test/packet_manipulator.cc
+++ b/webrtc/modules/video_coding/codecs/test/packet_manipulator.cc
@@ -57,7 +57,7 @@ int PacketManipulatorImpl::ManipulatePackets(
active_burst_packets_--;
nbr_packets_dropped++;
} else if (RandomUniform() < config_.packet_loss_probability ||
- packet_loss_has_occurred) {
+ packet_loss_has_occurred) {
packet_loss_has_occurred = true;
nbr_packets_dropped++;
if (config_.packet_loss_mode == kBurst) {
@@ -91,9 +91,9 @@ inline double PacketManipulatorImpl::RandomUniform() {
// get the same behavior as long as we're using a fixed initial seed.
critsect_->Enter();
srand(random_seed_);
- random_seed_ = rand();
+ random_seed_ = rand(); // NOLINT (rand_r instead of rand)
critsect_->Leave();
- return (random_seed_ + 1.0)/(RAND_MAX + 1.0);
+ return (random_seed_ + 1.0) / (RAND_MAX + 1.0);
}
const char* PacketLossModeToStr(PacketLossMode e) {
@@ -109,4 +109,4 @@ const char* PacketLossModeToStr(PacketLossMode e) {
}
} // namespace test
-} // namespace webrtcc
+} // namespace webrtc
diff --git a/webrtc/modules/video_coding/codecs/test/packet_manipulator.h b/webrtc/modules/video_coding/codecs/test/packet_manipulator.h
index 16a9dc22ef..3334be072b 100644
--- a/webrtc/modules/video_coding/codecs/test/packet_manipulator.h
+++ b/webrtc/modules/video_coding/codecs/test/packet_manipulator.h
@@ -13,7 +13,7 @@
#include <stdlib.h>
-#include "webrtc/modules/video_coding/codecs/interface/video_codec_interface.h"
+#include "webrtc/modules/video_coding/include/video_codec_interface.h"
#include "webrtc/system_wrappers/include/critical_section_wrapper.h"
#include "webrtc/test/testsupport/packet_reader.h"
@@ -36,10 +36,11 @@ const char* PacketLossModeToStr(PacketLossMode e);
// scenarios caused by network interference.
struct NetworkingConfig {
NetworkingConfig()
- : packet_size_in_bytes(1500), max_payload_size_in_bytes(1440),
- packet_loss_mode(kUniform), packet_loss_probability(0.0),
- packet_loss_burst_length(1) {
- }
+ : packet_size_in_bytes(1500),
+ max_payload_size_in_bytes(1440),
+ packet_loss_mode(kUniform),
+ packet_loss_probability(0.0),
+ packet_loss_burst_length(1) {}
// Packet size in bytes. Default: 1500 bytes.
size_t packet_size_in_bytes;
@@ -93,9 +94,11 @@ class PacketManipulatorImpl : public PacketManipulator {
virtual ~PacketManipulatorImpl();
int ManipulatePackets(webrtc::EncodedImage* encoded_image) override;
virtual void InitializeRandomSeed(unsigned int seed);
+
protected:
// Returns a uniformly distributed random value between 0.0 and 1.0
virtual double RandomUniform();
+
private:
PacketReader* packet_reader_;
const NetworkingConfig& config_;
diff --git a/webrtc/modules/video_coding/codecs/test/packet_manipulator_unittest.cc b/webrtc/modules/video_coding/codecs/test/packet_manipulator_unittest.cc
index ace7bc0507..8c3d30dc0d 100644
--- a/webrtc/modules/video_coding/codecs/test/packet_manipulator_unittest.cc
+++ b/webrtc/modules/video_coding/codecs/test/packet_manipulator_unittest.cc
@@ -13,7 +13,7 @@
#include <queue>
#include "testing/gtest/include/gtest/gtest.h"
-#include "webrtc/modules/video_coding/codecs/interface/video_codec_interface.h"
+#include "webrtc/modules/video_coding/include/video_codec_interface.h"
#include "webrtc/modules/video_coding/codecs/test/predictive_packet_manipulator.h"
#include "webrtc/test/testsupport/unittest_utils.h"
#include "webrtc/typedefs.h"
@@ -25,7 +25,7 @@ const double kNeverDropProbability = 0.0;
const double kAlwaysDropProbability = 1.0;
const int kBurstLength = 1;
-class PacketManipulatorTest: public PacketRelatedTest {
+class PacketManipulatorTest : public PacketRelatedTest {
protected:
PacketReader packet_reader_;
EncodedImage image_;
@@ -50,19 +50,15 @@ class PacketManipulatorTest: public PacketRelatedTest {
virtual ~PacketManipulatorTest() {}
- void SetUp() {
- PacketRelatedTest::SetUp();
- }
+ void SetUp() { PacketRelatedTest::SetUp(); }
- void TearDown() {
- PacketRelatedTest::TearDown();
- }
+ void TearDown() { PacketRelatedTest::TearDown(); }
void VerifyPacketLoss(int expected_nbr_packets_dropped,
int actual_nbr_packets_dropped,
size_t expected_packet_data_length,
uint8_t* expected_packet_data,
- EncodedImage& actual_image) {
+ const EncodedImage& actual_image) {
EXPECT_EQ(expected_nbr_packets_dropped, actual_nbr_packets_dropped);
EXPECT_EQ(expected_packet_data_length, image_._length);
EXPECT_EQ(0, memcmp(expected_packet_data, actual_image._buffer,
@@ -75,10 +71,10 @@ TEST_F(PacketManipulatorTest, Constructor) {
}
TEST_F(PacketManipulatorTest, DropNone) {
- PacketManipulatorImpl manipulator(&packet_reader_, no_drop_config_, false);
+ PacketManipulatorImpl manipulator(&packet_reader_, no_drop_config_, false);
int nbr_packets_dropped = manipulator.ManipulatePackets(&image_);
- VerifyPacketLoss(0, nbr_packets_dropped, kPacketDataLength,
- packet_data_, image_);
+ VerifyPacketLoss(0, nbr_packets_dropped, kPacketDataLength, packet_data_,
+ image_);
}
TEST_F(PacketManipulatorTest, UniformDropNoneSmallFrame) {
@@ -87,15 +83,14 @@ TEST_F(PacketManipulatorTest, UniformDropNoneSmallFrame) {
PacketManipulatorImpl manipulator(&packet_reader_, no_drop_config_, false);
int nbr_packets_dropped = manipulator.ManipulatePackets(&image_);
- VerifyPacketLoss(0, nbr_packets_dropped, data_length,
- packet_data_, image_);
+ VerifyPacketLoss(0, nbr_packets_dropped, data_length, packet_data_, image_);
}
TEST_F(PacketManipulatorTest, UniformDropAll) {
PacketManipulatorImpl manipulator(&packet_reader_, drop_config_, false);
int nbr_packets_dropped = manipulator.ManipulatePackets(&image_);
- VerifyPacketLoss(kPacketDataNumberOfPackets, nbr_packets_dropped,
- 0, packet_data_, image_);
+ VerifyPacketLoss(kPacketDataNumberOfPackets, nbr_packets_dropped, 0,
+ packet_data_, image_);
}
// Use our customized test class to make the second packet being lost
diff --git a/webrtc/modules/video_coding/codecs/test/predictive_packet_manipulator.cc b/webrtc/modules/video_coding/codecs/test/predictive_packet_manipulator.cc
index c92cfa48a7..9eba205a88 100644
--- a/webrtc/modules/video_coding/codecs/test/predictive_packet_manipulator.cc
+++ b/webrtc/modules/video_coding/codecs/test/predictive_packet_manipulator.cc
@@ -19,13 +19,11 @@ namespace webrtc {
namespace test {
PredictivePacketManipulator::PredictivePacketManipulator(
- PacketReader* packet_reader, const NetworkingConfig& config)
- : PacketManipulatorImpl(packet_reader, config, false) {
-}
-
-PredictivePacketManipulator::~PredictivePacketManipulator() {
-}
+ PacketReader* packet_reader,
+ const NetworkingConfig& config)
+ : PacketManipulatorImpl(packet_reader, config, false) {}
+PredictivePacketManipulator::~PredictivePacketManipulator() {}
void PredictivePacketManipulator::AddRandomResult(double result) {
assert(result >= 0.0 && result <= 1.0);
@@ -33,8 +31,9 @@ void PredictivePacketManipulator::AddRandomResult(double result) {
}
double PredictivePacketManipulator::RandomUniform() {
- if(random_results_.size() == 0u) {
- fprintf(stderr, "No more stored results, please make sure AddRandomResult()"
+ if (random_results_.size() == 0u) {
+ fprintf(stderr,
+ "No more stored results, please make sure AddRandomResult()"
"is called same amount of times you're going to invoke the "
"RandomUniform() function, i.e. once per packet.\n");
assert(false);
@@ -45,4 +44,4 @@ double PredictivePacketManipulator::RandomUniform() {
}
} // namespace test
-} // namespace webrtcc
+} // namespace webrtc
diff --git a/webrtc/modules/video_coding/codecs/test/predictive_packet_manipulator.h b/webrtc/modules/video_coding/codecs/test/predictive_packet_manipulator.h
index 082712d870..45c7848c67 100644
--- a/webrtc/modules/video_coding/codecs/test/predictive_packet_manipulator.h
+++ b/webrtc/modules/video_coding/codecs/test/predictive_packet_manipulator.h
@@ -31,6 +31,7 @@ class PredictivePacketManipulator : public PacketManipulatorImpl {
// FIFO queue so they will be returned in the same order they were added.
// Result parameter must be 0.0 to 1.0.
void AddRandomResult(double result);
+
protected:
// Returns a uniformly distributed random value between 0.0 and 1.0
double RandomUniform() override;
diff --git a/webrtc/modules/video_coding/codecs/test/stats.cc b/webrtc/modules/video_coding/codecs/test/stats.cc
index f87407d223..478b2f4901 100644
--- a/webrtc/modules/video_coding/codecs/test/stats.cc
+++ b/webrtc/modules/video_coding/codecs/test/stats.cc
@@ -39,19 +39,19 @@ Stats::Stats() {}
Stats::~Stats() {}
bool LessForEncodeTime(const FrameStatistic& s1, const FrameStatistic& s2) {
- return s1.encode_time_in_us < s2.encode_time_in_us;
+ return s1.encode_time_in_us < s2.encode_time_in_us;
}
bool LessForDecodeTime(const FrameStatistic& s1, const FrameStatistic& s2) {
- return s1.decode_time_in_us < s2.decode_time_in_us;
+ return s1.decode_time_in_us < s2.decode_time_in_us;
}
bool LessForEncodedSize(const FrameStatistic& s1, const FrameStatistic& s2) {
- return s1.encoded_frame_length_in_bytes < s2.encoded_frame_length_in_bytes;
+ return s1.encoded_frame_length_in_bytes < s2.encoded_frame_length_in_bytes;
}
bool LessForBitRate(const FrameStatistic& s1, const FrameStatistic& s2) {
- return s1.bit_rate_in_kbps < s2.bit_rate_in_kbps;
+ return s1.bit_rate_in_kbps < s2.bit_rate_in_kbps;
}
FrameStatistic& Stats::NewFrame(int frame_number) {
@@ -78,8 +78,7 @@ void Stats::PrintSummary() {
size_t nbr_keyframes = 0;
size_t nbr_nonkeyframes = 0;
- for (FrameStatisticsIterator it = stats_.begin();
- it != stats_.end(); ++it) {
+ for (FrameStatisticsIterator it = stats_.begin(); it != stats_.end(); ++it) {
total_encoding_time_in_us += it->encode_time_in_us;
total_decoding_time_in_us += it->decode_time_in_us;
total_encoded_frames_lengths += it->encoded_frame_length_in_bytes;
@@ -96,15 +95,13 @@ void Stats::PrintSummary() {
// ENCODING
printf("Encoding time:\n");
- frame = std::min_element(stats_.begin(),
- stats_.end(), LessForEncodeTime);
- printf(" Min : %7d us (frame %d)\n",
- frame->encode_time_in_us, frame->frame_number);
+ frame = std::min_element(stats_.begin(), stats_.end(), LessForEncodeTime);
+ printf(" Min : %7d us (frame %d)\n", frame->encode_time_in_us,
+ frame->frame_number);
- frame = std::max_element(stats_.begin(),
- stats_.end(), LessForEncodeTime);
- printf(" Max : %7d us (frame %d)\n",
- frame->encode_time_in_us, frame->frame_number);
+ frame = std::max_element(stats_.begin(), stats_.end(), LessForEncodeTime);
+ printf(" Max : %7d us (frame %d)\n", frame->encode_time_in_us,
+ frame->frame_number);
printf(" Average : %7d us\n",
static_cast<int>(total_encoding_time_in_us / stats_.size()));
@@ -115,7 +112,7 @@ void Stats::PrintSummary() {
// failures)
std::vector<FrameStatistic> decoded_frames;
for (std::vector<FrameStatistic>::iterator it = stats_.begin();
- it != stats_.end(); ++it) {
+ it != stats_.end(); ++it) {
if (it->decoding_successful) {
decoded_frames.push_back(*it);
}
@@ -123,15 +120,15 @@ void Stats::PrintSummary() {
if (decoded_frames.size() == 0) {
printf("No successfully decoded frames exist in this statistics.\n");
} else {
- frame = std::min_element(decoded_frames.begin(),
- decoded_frames.end(), LessForDecodeTime);
- printf(" Min : %7d us (frame %d)\n",
- frame->decode_time_in_us, frame->frame_number);
+ frame = std::min_element(decoded_frames.begin(), decoded_frames.end(),
+ LessForDecodeTime);
+ printf(" Min : %7d us (frame %d)\n", frame->decode_time_in_us,
+ frame->frame_number);
- frame = std::max_element(decoded_frames.begin(),
- decoded_frames.end(), LessForDecodeTime);
- printf(" Max : %7d us (frame %d)\n",
- frame->decode_time_in_us, frame->frame_number);
+ frame = std::max_element(decoded_frames.begin(), decoded_frames.end(),
+ LessForDecodeTime);
+ printf(" Max : %7d us (frame %d)\n", frame->decode_time_in_us,
+ frame->frame_number);
printf(" Average : %7d us\n",
static_cast<int>(total_decoding_time_in_us / decoded_frames.size()));
@@ -141,13 +138,11 @@ void Stats::PrintSummary() {
// SIZE
printf("Frame sizes:\n");
- frame = std::min_element(stats_.begin(),
- stats_.end(), LessForEncodedSize);
+ frame = std::min_element(stats_.begin(), stats_.end(), LessForEncodedSize);
printf(" Min : %7" PRIuS " bytes (frame %d)\n",
frame->encoded_frame_length_in_bytes, frame->frame_number);
- frame = std::max_element(stats_.begin(),
- stats_.end(), LessForEncodedSize);
+ frame = std::max_element(stats_.begin(), stats_.end(), LessForEncodedSize);
printf(" Max : %7" PRIuS " bytes (frame %d)\n",
frame->encoded_frame_length_in_bytes, frame->frame_number);
@@ -167,21 +162,17 @@ void Stats::PrintSummary() {
// BIT RATE
printf("Bit rates:\n");
- frame = std::min_element(stats_.begin(),
- stats_.end(), LessForBitRate);
- printf(" Min bit rate: %7d kbps (frame %d)\n",
- frame->bit_rate_in_kbps, frame->frame_number);
+ frame = std::min_element(stats_.begin(), stats_.end(), LessForBitRate);
+ printf(" Min bit rate: %7d kbps (frame %d)\n", frame->bit_rate_in_kbps,
+ frame->frame_number);
- frame = std::max_element(stats_.begin(),
- stats_.end(), LessForBitRate);
- printf(" Max bit rate: %7d kbps (frame %d)\n",
- frame->bit_rate_in_kbps, frame->frame_number);
+ frame = std::max_element(stats_.begin(), stats_.end(), LessForBitRate);
+ printf(" Max bit rate: %7d kbps (frame %d)\n", frame->bit_rate_in_kbps,
+ frame->frame_number);
printf("\n");
- printf("Total encoding time : %7d ms.\n",
- total_encoding_time_in_us / 1000);
- printf("Total decoding time : %7d ms.\n",
- total_decoding_time_in_us / 1000);
+ printf("Total encoding time : %7d ms.\n", total_encoding_time_in_us / 1000);
+ printf("Total decoding time : %7d ms.\n", total_decoding_time_in_us / 1000);
printf("Total processing time: %7d ms.\n",
(total_encoding_time_in_us + total_decoding_time_in_us) / 1000);
}
diff --git a/webrtc/modules/video_coding/codecs/test/stats.h b/webrtc/modules/video_coding/codecs/test/stats.h
index 83ba108bb7..9092631ca1 100644
--- a/webrtc/modules/video_coding/codecs/test/stats.h
+++ b/webrtc/modules/video_coding/codecs/test/stats.h
@@ -13,7 +13,7 @@
#include <vector>
-#include "webrtc/common_video/interface/video_image.h"
+#include "webrtc/common_video/include/video_image.h"
namespace webrtc {
namespace test {
diff --git a/webrtc/modules/video_coding/codecs/test/stats_unittest.cc b/webrtc/modules/video_coding/codecs/test/stats_unittest.cc
index a2d27e71d6..0403ccfdb3 100644
--- a/webrtc/modules/video_coding/codecs/test/stats_unittest.cc
+++ b/webrtc/modules/video_coding/codecs/test/stats_unittest.cc
@@ -16,21 +16,15 @@
namespace webrtc {
namespace test {
-class StatsTest: public testing::Test {
+class StatsTest : public testing::Test {
protected:
- StatsTest() {
- }
+ StatsTest() {}
- virtual ~StatsTest() {
- }
+ virtual ~StatsTest() {}
- void SetUp() {
- stats_ = new Stats();
- }
+ void SetUp() { stats_ = new Stats(); }
- void TearDown() {
- delete stats_;
- }
+ void TearDown() { delete stats_; }
Stats* stats_;
};
diff --git a/webrtc/modules/video_coding/codecs/test/videoprocessor.cc b/webrtc/modules/video_coding/codecs/test/videoprocessor.cc
index c814dfe0e7..7376000bd5 100644
--- a/webrtc/modules/video_coding/codecs/test/videoprocessor.cc
+++ b/webrtc/modules/video_coding/codecs/test/videoprocessor.cc
@@ -93,14 +93,18 @@ bool VideoProcessorImpl::Init() {
int32_t register_result =
encoder_->RegisterEncodeCompleteCallback(encode_callback_);
if (register_result != WEBRTC_VIDEO_CODEC_OK) {
- fprintf(stderr, "Failed to register encode complete callback, return code: "
- "%d\n", register_result);
+ fprintf(stderr,
+ "Failed to register encode complete callback, return code: "
+ "%d\n",
+ register_result);
return false;
}
register_result = decoder_->RegisterDecodeCompleteCallback(decode_callback_);
if (register_result != WEBRTC_VIDEO_CODEC_OK) {
- fprintf(stderr, "Failed to register decode complete callback, return code: "
- "%d\n", register_result);
+ fprintf(stderr,
+ "Failed to register decode complete callback, return code: "
+ "%d\n",
+ register_result);
return false;
}
// Init the encoder and decoder
@@ -146,13 +150,14 @@ VideoProcessorImpl::~VideoProcessorImpl() {
delete decode_callback_;
}
-
void VideoProcessorImpl::SetRates(int bit_rate, int frame_rate) {
int set_rates_result = encoder_->SetRates(bit_rate, frame_rate);
assert(set_rates_result >= 0);
if (set_rates_result < 0) {
- fprintf(stderr, "Failed to update encoder with new rate %d, "
- "return code: %d\n", bit_rate, set_rates_result);
+ fprintf(stderr,
+ "Failed to update encoder with new rate %d, "
+ "return code: %d\n",
+ bit_rate, set_rates_result);
}
num_dropped_frames_ = 0;
num_spatial_resizes_ = 0;
@@ -175,7 +180,7 @@ int VideoProcessorImpl::NumberSpatialResizes() {
}
bool VideoProcessorImpl::ProcessFrame(int frame_number) {
- assert(frame_number >=0);
+ assert(frame_number >= 0);
if (!initialized_) {
fprintf(stderr, "Attempting to use uninitialized VideoProcessor!\n");
return false;
@@ -186,10 +191,8 @@ bool VideoProcessorImpl::ProcessFrame(int frame_number) {
}
if (frame_reader_->ReadFrame(source_buffer_)) {
// Copy the source frame to the newly read frame data.
- source_frame_.CreateFrame(source_buffer_,
- config_.codec_settings->width,
- config_.codec_settings->height,
- kVideoRotation_0);
+ source_frame_.CreateFrame(source_buffer_, config_.codec_settings->width,
+ config_.codec_settings->height, kVideoRotation_0);
// Ensure we have a new statistics data object we can fill:
FrameStatistic& stat = stats_->NewFrame(frame_number);
@@ -224,10 +227,10 @@ bool VideoProcessorImpl::ProcessFrame(int frame_number) {
void VideoProcessorImpl::FrameEncoded(const EncodedImage& encoded_image) {
// Timestamp is frame number, so this gives us #dropped frames.
- int num_dropped_from_prev_encode = encoded_image._timeStamp -
- prev_time_stamp_ - 1;
- num_dropped_frames_ += num_dropped_from_prev_encode;
- prev_time_stamp_ = encoded_image._timeStamp;
+ int num_dropped_from_prev_encode =
+ encoded_image._timeStamp - prev_time_stamp_ - 1;
+ num_dropped_frames_ += num_dropped_from_prev_encode;
+ prev_time_stamp_ = encoded_image._timeStamp;
if (num_dropped_from_prev_encode > 0) {
// For dropped frames, we write out the last decoded frame to avoid getting
// out of sync for the computation of PSNR and SSIM.
@@ -244,15 +247,16 @@ void VideoProcessorImpl::FrameEncoded(const EncodedImage& encoded_image) {
TickTime encode_stop = TickTime::Now();
int frame_number = encoded_image._timeStamp;
FrameStatistic& stat = stats_->stats_[frame_number];
- stat.encode_time_in_us = GetElapsedTimeMicroseconds(encode_start_,
- encode_stop);
+ stat.encode_time_in_us =
+ GetElapsedTimeMicroseconds(encode_start_, encode_stop);
stat.encoding_successful = true;
stat.encoded_frame_length_in_bytes = encoded_image._length;
stat.frame_number = encoded_image._timeStamp;
stat.frame_type = encoded_image._frameType;
stat.bit_rate_in_kbps = encoded_image._length * bit_rate_factor_;
- stat.total_packets = encoded_image._length /
- config_.networking_config.packet_size_in_bytes + 1;
+ stat.total_packets =
+ encoded_image._length / config_.networking_config.packet_size_in_bytes +
+ 1;
// Perform packet loss if criteria is fullfilled:
bool exclude_this_frame = false;
@@ -280,7 +284,7 @@ void VideoProcessorImpl::FrameEncoded(const EncodedImage& encoded_image) {
copied_image._buffer = copied_buffer.get();
if (!exclude_this_frame) {
stat.packets_dropped =
- packet_manipulator_->ManipulatePackets(&copied_image);
+ packet_manipulator_->ManipulatePackets(&copied_image);
}
// Keep track of if frames are lost due to packet loss so we can tell
@@ -305,26 +309,25 @@ void VideoProcessorImpl::FrameDecoded(const VideoFrame& image) {
int frame_number = image.timestamp();
// Report stats
FrameStatistic& stat = stats_->stats_[frame_number];
- stat.decode_time_in_us = GetElapsedTimeMicroseconds(decode_start_,
- decode_stop);
+ stat.decode_time_in_us =
+ GetElapsedTimeMicroseconds(decode_start_, decode_stop);
stat.decoding_successful = true;
// Check for resize action (either down or up):
if (static_cast<int>(image.width()) != last_encoder_frame_width_ ||
- static_cast<int>(image.height()) != last_encoder_frame_height_ ) {
+ static_cast<int>(image.height()) != last_encoder_frame_height_) {
++num_spatial_resizes_;
last_encoder_frame_width_ = image.width();
last_encoder_frame_height_ = image.height();
}
// Check if codec size is different from native/original size, and if so,
// upsample back to original size: needed for PSNR and SSIM computations.
- if (image.width() != config_.codec_settings->width ||
+ if (image.width() != config_.codec_settings->width ||
image.height() != config_.codec_settings->height) {
VideoFrame up_image;
- int ret_val = scaler_.Set(image.width(), image.height(),
- config_.codec_settings->width,
- config_.codec_settings->height,
- kI420, kI420, kScaleBilinear);
+ int ret_val = scaler_.Set(
+ image.width(), image.height(), config_.codec_settings->width,
+ config_.codec_settings->height, kI420, kI420, kScaleBilinear);
assert(ret_val >= 0);
if (ret_val < 0) {
fprintf(stderr, "Failed to set scalar for frame: %d, return code: %d\n",
@@ -366,7 +369,8 @@ void VideoProcessorImpl::FrameDecoded(const VideoFrame& image) {
}
int VideoProcessorImpl::GetElapsedTimeMicroseconds(
- const webrtc::TickTime& start, const webrtc::TickTime& stop) {
+ const webrtc::TickTime& start,
+ const webrtc::TickTime& stop) {
uint64_t encode_time = (stop - start).Microseconds();
assert(encode_time <
static_cast<unsigned int>(std::numeric_limits<int>::max()));
@@ -404,8 +408,7 @@ const char* VideoCodecTypeToStr(webrtc::VideoCodecType e) {
}
// Callbacks
-int32_t
-VideoProcessorImpl::VideoProcessorEncodeCompleteCallback::Encoded(
+int32_t VideoProcessorImpl::VideoProcessorEncodeCompleteCallback::Encoded(
const EncodedImage& encoded_image,
const webrtc::CodecSpecificInfo* codec_specific_info,
const webrtc::RTPFragmentationHeader* fragmentation) {
diff --git a/webrtc/modules/video_coding/codecs/test/videoprocessor.h b/webrtc/modules/video_coding/codecs/test/videoprocessor.h
index 0b094ae73e..3ee08fd46a 100644
--- a/webrtc/modules/video_coding/codecs/test/videoprocessor.h
+++ b/webrtc/modules/video_coding/codecs/test/videoprocessor.h
@@ -13,9 +13,10 @@
#include <string>
+#include "webrtc/base/checks.h"
#include "webrtc/common_video/libyuv/include/scaler.h"
#include "webrtc/common_video/libyuv/include/webrtc_libyuv.h"
-#include "webrtc/modules/video_coding/codecs/interface/video_codec_interface.h"
+#include "webrtc/modules/video_coding/include/video_codec_interface.h"
#include "webrtc/modules/video_coding/codecs/test/packet_manipulator.h"
#include "webrtc/modules/video_coding/codecs/test/stats.h"
#include "webrtc/system_wrappers/include/tick_util.h"
@@ -242,12 +243,16 @@ class VideoProcessorImpl : public VideoProcessor {
// Callback class required to implement according to the VideoDecoder API.
class VideoProcessorDecodeCompleteCallback
- : public webrtc::DecodedImageCallback {
+ : public webrtc::DecodedImageCallback {
public:
- explicit VideoProcessorDecodeCompleteCallback(VideoProcessorImpl* vp)
- : video_processor_(vp) {
+ explicit VideoProcessorDecodeCompleteCallback(VideoProcessorImpl* vp)
+ : video_processor_(vp) {}
+ int32_t Decoded(webrtc::VideoFrame& image) override;
+ int32_t Decoded(webrtc::VideoFrame& image,
+ int64_t decode_time_ms) override {
+ RTC_NOTREACHED();
+ return -1;
}
- int32_t Decoded(webrtc::VideoFrame& image) override;
private:
VideoProcessorImpl* video_processor_;
diff --git a/webrtc/modules/video_coding/codecs/test/videoprocessor_integrationtest.cc b/webrtc/modules/video_coding/codecs/test/videoprocessor_integrationtest.cc
index 3d6aedb22a..7b92616e1b 100644
--- a/webrtc/modules/video_coding/codecs/test/videoprocessor_integrationtest.cc
+++ b/webrtc/modules/video_coding/codecs/test/videoprocessor_integrationtest.cc
@@ -12,17 +12,16 @@
#include "testing/gtest/include/gtest/gtest.h"
-#include "webrtc/modules/video_coding/codecs/interface/video_codec_interface.h"
+#include "webrtc/modules/video_coding/include/video_codec_interface.h"
#include "webrtc/modules/video_coding/codecs/test/packet_manipulator.h"
#include "webrtc/modules/video_coding/codecs/test/videoprocessor.h"
#include "webrtc/modules/video_coding/codecs/vp8/include/vp8.h"
#include "webrtc/modules/video_coding/codecs/vp9/include/vp9.h"
#include "webrtc/modules/video_coding/codecs/vp8/include/vp8_common_types.h"
-#include "webrtc/modules/video_coding/main/interface/video_coding.h"
+#include "webrtc/modules/video_coding/include/video_coding.h"
#include "webrtc/test/testsupport/fileutils.h"
#include "webrtc/test/testsupport/frame_reader.h"
#include "webrtc/test/testsupport/frame_writer.h"
-#include "webrtc/test/testsupport/gtest_disable.h"
#include "webrtc/test/testsupport/metrics/video_metrics.h"
#include "webrtc/test/testsupport/packet_reader.h"
#include "webrtc/typedefs.h"
@@ -81,7 +80,6 @@ struct RateControlMetrics {
int num_key_frames;
};
-
// Sequence used is foreman (CIF): may be better to use VGA for resize test.
const int kCIFWidth = 352;
const int kCIFHeight = 288;
@@ -101,7 +99,7 @@ const float kScaleKeyFrameSize = 0.5f;
// dropping/spatial resize, and temporal layers. The limits for the rate
// control metrics are set to be fairly conservative, so failure should only
// happen when some significant regression or breakdown occurs.
-class VideoProcessorIntegrationTest: public testing::Test {
+class VideoProcessorIntegrationTest : public testing::Test {
protected:
VideoEncoder* encoder_;
VideoDecoder* decoder_;
@@ -148,7 +146,6 @@ class VideoProcessorIntegrationTest: public testing::Test {
bool frame_dropper_on_;
bool spatial_resize_on_;
-
VideoProcessorIntegrationTest() {}
virtual ~VideoProcessorIntegrationTest() {}
@@ -165,14 +162,13 @@ class VideoProcessorIntegrationTest: public testing::Test {
// CIF is currently used for all tests below.
// Setup the TestConfig struct for processing of a clip in CIF resolution.
- config_.input_filename =
- webrtc::test::ResourcePath("foreman_cif", "yuv");
+ config_.input_filename = webrtc::test::ResourcePath("foreman_cif", "yuv");
// Generate an output filename in a safe way.
config_.output_filename = webrtc::test::TempFilename(
webrtc::test::OutputPath(), "videoprocessor_integrationtest");
- config_.frame_length_in_bytes = CalcBufferSize(kI420,
- kCIFWidth, kCIFHeight);
+ config_.frame_length_in_bytes =
+ CalcBufferSize(kI420, kCIFWidth, kCIFHeight);
config_.verbose = false;
// Only allow encoder/decoder to use single core, for predictability.
config_.use_single_core = true;
@@ -188,52 +184,46 @@ class VideoProcessorIntegrationTest: public testing::Test {
// These features may be set depending on the test.
switch (config_.codec_settings->codecType) {
- case kVideoCodecVP8:
- config_.codec_settings->codecSpecific.VP8.errorConcealmentOn =
- error_concealment_on_;
- config_.codec_settings->codecSpecific.VP8.denoisingOn =
- denoising_on_;
- config_.codec_settings->codecSpecific.VP8.numberOfTemporalLayers =
- num_temporal_layers_;
- config_.codec_settings->codecSpecific.VP8.frameDroppingOn =
- frame_dropper_on_;
- config_.codec_settings->codecSpecific.VP8.automaticResizeOn =
- spatial_resize_on_;
- config_.codec_settings->codecSpecific.VP8.keyFrameInterval =
- kBaseKeyFrameInterval;
- break;
- case kVideoCodecVP9:
- config_.codec_settings->codecSpecific.VP9.denoisingOn =
- denoising_on_;
- config_.codec_settings->codecSpecific.VP9.numberOfTemporalLayers =
- num_temporal_layers_;
- config_.codec_settings->codecSpecific.VP9.frameDroppingOn =
- frame_dropper_on_;
- config_.codec_settings->codecSpecific.VP9.automaticResizeOn =
- spatial_resize_on_;
- config_.codec_settings->codecSpecific.VP9.keyFrameInterval =
- kBaseKeyFrameInterval;
- break;
- default:
- assert(false);
- break;
- }
- frame_reader_ =
- new webrtc::test::FrameReaderImpl(config_.input_filename,
- config_.frame_length_in_bytes);
- frame_writer_ =
- new webrtc::test::FrameWriterImpl(config_.output_filename,
- config_.frame_length_in_bytes);
+ case kVideoCodecVP8:
+ config_.codec_settings->codecSpecific.VP8.errorConcealmentOn =
+ error_concealment_on_;
+ config_.codec_settings->codecSpecific.VP8.denoisingOn = denoising_on_;
+ config_.codec_settings->codecSpecific.VP8.numberOfTemporalLayers =
+ num_temporal_layers_;
+ config_.codec_settings->codecSpecific.VP8.frameDroppingOn =
+ frame_dropper_on_;
+ config_.codec_settings->codecSpecific.VP8.automaticResizeOn =
+ spatial_resize_on_;
+ config_.codec_settings->codecSpecific.VP8.keyFrameInterval =
+ kBaseKeyFrameInterval;
+ break;
+ case kVideoCodecVP9:
+ config_.codec_settings->codecSpecific.VP9.denoisingOn = denoising_on_;
+ config_.codec_settings->codecSpecific.VP9.numberOfTemporalLayers =
+ num_temporal_layers_;
+ config_.codec_settings->codecSpecific.VP9.frameDroppingOn =
+ frame_dropper_on_;
+ config_.codec_settings->codecSpecific.VP9.automaticResizeOn =
+ spatial_resize_on_;
+ config_.codec_settings->codecSpecific.VP9.keyFrameInterval =
+ kBaseKeyFrameInterval;
+ break;
+ default:
+ assert(false);
+ break;
+ }
+ frame_reader_ = new webrtc::test::FrameReaderImpl(
+ config_.input_filename, config_.frame_length_in_bytes);
+ frame_writer_ = new webrtc::test::FrameWriterImpl(
+ config_.output_filename, config_.frame_length_in_bytes);
ASSERT_TRUE(frame_reader_->Init());
ASSERT_TRUE(frame_writer_->Init());
packet_manipulator_ = new webrtc::test::PacketManipulatorImpl(
&packet_reader_, config_.networking_config, config_.verbose);
- processor_ = new webrtc::test::VideoProcessorImpl(encoder_, decoder_,
- frame_reader_,
- frame_writer_,
- packet_manipulator_,
- config_, &stats_);
+ processor_ = new webrtc::test::VideoProcessorImpl(
+ encoder_, decoder_, frame_reader_, frame_writer_, packet_manipulator_,
+ config_, &stats_);
ASSERT_TRUE(processor_->Init());
}
@@ -247,7 +237,7 @@ class VideoProcessorIntegrationTest: public testing::Test {
encoding_bitrate_[i] = 0.0f;
// Update layer per-frame-bandwidth.
per_frame_bandwidth_[i] = static_cast<float>(bit_rate_layer_[i]) /
- static_cast<float>(frame_rate_layer_[i]);
+ static_cast<float>(frame_rate_layer_[i]);
}
// Set maximum size of key frames, following setting in the VP8 wrapper.
float max_key_size = kScaleKeyFrameSize * kOptimalBufferSize * frame_rate_;
@@ -274,28 +264,28 @@ class VideoProcessorIntegrationTest: public testing::Test {
// Update rate mismatch relative to per-frame bandwidth for delta frames.
if (frame_type == kVideoFrameDelta) {
// TODO(marpan): Should we count dropped (zero size) frames in mismatch?
- sum_frame_size_mismatch_[layer_] += fabs(encoded_size_kbits -
- per_frame_bandwidth_[layer_]) /
- per_frame_bandwidth_[layer_];
+ sum_frame_size_mismatch_[layer_] +=
+ fabs(encoded_size_kbits - per_frame_bandwidth_[layer_]) /
+ per_frame_bandwidth_[layer_];
} else {
- float target_size = (frame_num == 1) ? target_size_key_frame_initial_ :
- target_size_key_frame_;
- sum_key_frame_size_mismatch_ += fabs(encoded_size_kbits - target_size) /
- target_size;
+ float target_size = (frame_num == 1) ? target_size_key_frame_initial_
+ : target_size_key_frame_;
+ sum_key_frame_size_mismatch_ +=
+ fabs(encoded_size_kbits - target_size) / target_size;
num_key_frames_ += 1;
}
sum_encoded_frame_size_[layer_] += encoded_size_kbits;
// Encoding bitrate per layer: from the start of the update/run to the
// current frame.
encoding_bitrate_[layer_] = sum_encoded_frame_size_[layer_] *
- frame_rate_layer_[layer_] /
- num_frames_per_update_[layer_];
+ frame_rate_layer_[layer_] /
+ num_frames_per_update_[layer_];
// Total encoding rate: from the start of the update/run to current frame.
sum_encoded_frame_size_total_ += encoded_size_kbits;
- encoding_bitrate_total_ = sum_encoded_frame_size_total_ * frame_rate_ /
- num_frames_total_;
- perc_encoding_rate_mismatch_ = 100 * fabs(encoding_bitrate_total_ -
- bit_rate_) / bit_rate_;
+ encoding_bitrate_total_ =
+ sum_encoded_frame_size_total_ * frame_rate_ / num_frames_total_;
+ perc_encoding_rate_mismatch_ =
+ 100 * fabs(encoding_bitrate_total_ - bit_rate_) / bit_rate_;
if (perc_encoding_rate_mismatch_ < kPercTargetvsActualMismatch &&
!encoding_rate_within_target_) {
num_frames_to_hit_target_ = num_frames_total_;
@@ -314,34 +304,38 @@ class VideoProcessorIntegrationTest: public testing::Test {
int num_key_frames) {
int num_dropped_frames = processor_->NumberDroppedFrames();
int num_resize_actions = processor_->NumberSpatialResizes();
- printf("For update #: %d,\n "
+ printf(
+ "For update #: %d,\n "
" Target Bitrate: %d,\n"
" Encoding bitrate: %f,\n"
" Frame rate: %d \n",
update_index, bit_rate_, encoding_bitrate_total_, frame_rate_);
- printf(" Number of frames to approach target rate = %d, \n"
- " Number of dropped frames = %d, \n"
- " Number of spatial resizes = %d, \n",
- num_frames_to_hit_target_, num_dropped_frames, num_resize_actions);
+ printf(
+ " Number of frames to approach target rate = %d, \n"
+ " Number of dropped frames = %d, \n"
+ " Number of spatial resizes = %d, \n",
+ num_frames_to_hit_target_, num_dropped_frames, num_resize_actions);
EXPECT_LE(perc_encoding_rate_mismatch_, max_encoding_rate_mismatch);
if (num_key_frames_ > 0) {
- int perc_key_frame_size_mismatch = 100 * sum_key_frame_size_mismatch_ /
- num_key_frames_;
- printf(" Number of Key frames: %d \n"
- " Key frame rate mismatch: %d \n",
- num_key_frames_, perc_key_frame_size_mismatch);
+ int perc_key_frame_size_mismatch =
+ 100 * sum_key_frame_size_mismatch_ / num_key_frames_;
+ printf(
+ " Number of Key frames: %d \n"
+ " Key frame rate mismatch: %d \n",
+ num_key_frames_, perc_key_frame_size_mismatch);
EXPECT_LE(perc_key_frame_size_mismatch, max_key_frame_size_mismatch);
}
printf("\n");
printf("Rates statistics for Layer data \n");
- for (int i = 0; i < num_temporal_layers_ ; i++) {
+ for (int i = 0; i < num_temporal_layers_; i++) {
printf("Layer #%d \n", i);
- int perc_frame_size_mismatch = 100 * sum_frame_size_mismatch_[i] /
- num_frames_per_update_[i];
- int perc_encoding_rate_mismatch = 100 * fabs(encoding_bitrate_[i] -
- bit_rate_layer_[i]) /
- bit_rate_layer_[i];
- printf(" Target Layer Bit rate: %f \n"
+ int perc_frame_size_mismatch =
+ 100 * sum_frame_size_mismatch_[i] / num_frames_per_update_[i];
+ int perc_encoding_rate_mismatch =
+ 100 * fabs(encoding_bitrate_[i] - bit_rate_layer_[i]) /
+ bit_rate_layer_[i];
+ printf(
+ " Target Layer Bit rate: %f \n"
" Layer frame rate: %f, \n"
" Layer per frame bandwidth: %f, \n"
" Layer Encoding bit rate: %f, \n"
@@ -366,13 +360,13 @@ class VideoProcessorIntegrationTest: public testing::Test {
if (num_temporal_layers_ == 1) {
layer_ = 0;
} else if (num_temporal_layers_ == 2) {
- // layer 0: 0 2 4 ...
- // layer 1: 1 3
- if (frame_number % 2 == 0) {
- layer_ = 0;
- } else {
- layer_ = 1;
- }
+ // layer 0: 0 2 4 ...
+ // layer 1: 1 3
+ if (frame_number % 2 == 0) {
+ layer_ = 0;
+ } else {
+ layer_ = 1;
+ }
} else if (num_temporal_layers_ == 3) {
// layer 0: 0 4 8 ...
// layer 1: 2 6
@@ -391,20 +385,20 @@ class VideoProcessorIntegrationTest: public testing::Test {
// Set the bitrate and frame rate per layer, for up to 3 layers.
void SetLayerRates() {
- assert(num_temporal_layers_<= 3);
+ assert(num_temporal_layers_ <= 3);
for (int i = 0; i < num_temporal_layers_; i++) {
float bit_rate_ratio =
kVp8LayerRateAlloction[num_temporal_layers_ - 1][i];
if (i > 0) {
- float bit_rate_delta_ratio = kVp8LayerRateAlloction
- [num_temporal_layers_ - 1][i] -
+ float bit_rate_delta_ratio =
+ kVp8LayerRateAlloction[num_temporal_layers_ - 1][i] -
kVp8LayerRateAlloction[num_temporal_layers_ - 1][i - 1];
bit_rate_layer_[i] = bit_rate_ * bit_rate_delta_ratio;
} else {
bit_rate_layer_[i] = bit_rate_ * bit_rate_ratio;
}
- frame_rate_layer_[i] = frame_rate_ / static_cast<float>(
- 1 << (num_temporal_layers_ - 1));
+ frame_rate_layer_[i] =
+ frame_rate_ / static_cast<float>(1 << (num_temporal_layers_ - 1));
}
if (num_temporal_layers_ == 3) {
frame_rate_layer_[2] = frame_rate_ / 2.0f;
@@ -437,12 +431,12 @@ class VideoProcessorIntegrationTest: public testing::Test {
spatial_resize_on_ = process.spatial_resize_on;
SetUpCodecConfig();
// Update the layers and the codec with the initial rates.
- bit_rate_ = rate_profile.target_bit_rate[0];
+ bit_rate_ = rate_profile.target_bit_rate[0];
frame_rate_ = rate_profile.input_frame_rate[0];
SetLayerRates();
// Set the initial target size for key frame.
- target_size_key_frame_initial_ = 0.5 * kInitialBufferSize *
- bit_rate_layer_[0];
+ target_size_key_frame_initial_ =
+ 0.5 * kInitialBufferSize * bit_rate_layer_[0];
processor_->SetRates(bit_rate_, frame_rate_);
// Process each frame, up to |num_frames|.
int num_frames = rate_profile.num_frames;
@@ -452,7 +446,7 @@ class VideoProcessorIntegrationTest: public testing::Test {
int frame_number = 0;
FrameType frame_type = kVideoFrameDelta;
while (processor_->ProcessFrame(frame_number) &&
- frame_number < num_frames) {
+ frame_number < num_frames) {
// Get the layer index for the frame |frame_number|.
LayerIndexForFrame(frame_number);
// Get the frame_type.
@@ -468,8 +462,7 @@ class VideoProcessorIntegrationTest: public testing::Test {
if (frame_number ==
rate_profile.frame_index_rate_update[update_index + 1]) {
VerifyRateControl(
- update_index,
- rc_metrics[update_index].max_key_frame_size_mismatch,
+ update_index, rc_metrics[update_index].max_key_frame_size_mismatch,
rc_metrics[update_index].max_delta_frame_size_mismatch,
rc_metrics[update_index].max_encoding_rate_mismatch,
rc_metrics[update_index].max_time_hit_target,
@@ -478,23 +471,22 @@ class VideoProcessorIntegrationTest: public testing::Test {
rc_metrics[update_index].num_key_frames);
// Update layer rates and the codec with new rates.
++update_index;
- bit_rate_ = rate_profile.target_bit_rate[update_index];
+ bit_rate_ = rate_profile.target_bit_rate[update_index];
frame_rate_ = rate_profile.input_frame_rate[update_index];
SetLayerRates();
- ResetRateControlMetrics(rate_profile.
- frame_index_rate_update[update_index + 1]);
+ ResetRateControlMetrics(
+ rate_profile.frame_index_rate_update[update_index + 1]);
processor_->SetRates(bit_rate_, frame_rate_);
}
}
- VerifyRateControl(
- update_index,
- rc_metrics[update_index].max_key_frame_size_mismatch,
- rc_metrics[update_index].max_delta_frame_size_mismatch,
- rc_metrics[update_index].max_encoding_rate_mismatch,
- rc_metrics[update_index].max_time_hit_target,
- rc_metrics[update_index].max_num_dropped_frames,
- rc_metrics[update_index].num_spatial_resizes,
- rc_metrics[update_index].num_key_frames);
+ VerifyRateControl(update_index,
+ rc_metrics[update_index].max_key_frame_size_mismatch,
+ rc_metrics[update_index].max_delta_frame_size_mismatch,
+ rc_metrics[update_index].max_encoding_rate_mismatch,
+ rc_metrics[update_index].max_time_hit_target,
+ rc_metrics[update_index].max_num_dropped_frames,
+ rc_metrics[update_index].num_spatial_resizes,
+ rc_metrics[update_index].num_key_frames);
EXPECT_EQ(num_frames, frame_number);
EXPECT_EQ(num_frames + 1, static_cast<int>(stats_.stats_.size()));
@@ -507,16 +499,14 @@ class VideoProcessorIntegrationTest: public testing::Test {
// TODO(marpan): should compute these quality metrics per SetRates update.
webrtc::test::QualityMetricsResult psnr_result, ssim_result;
- EXPECT_EQ(0, webrtc::test::I420MetricsFromFiles(
- config_.input_filename.c_str(),
- config_.output_filename.c_str(),
- config_.codec_settings->width,
- config_.codec_settings->height,
- &psnr_result,
- &ssim_result));
+ EXPECT_EQ(
+ 0, webrtc::test::I420MetricsFromFiles(
+ config_.input_filename.c_str(), config_.output_filename.c_str(),
+ config_.codec_settings->width, config_.codec_settings->height,
+ &psnr_result, &ssim_result));
printf("PSNR avg: %f, min: %f SSIM avg: %f, min: %f\n",
- psnr_result.average, psnr_result.min,
- ssim_result.average, ssim_result.min);
+ psnr_result.average, psnr_result.min, ssim_result.average,
+ ssim_result.min);
stats_.PrintSummary();
EXPECT_GT(psnr_result.average, quality_metrics.minimum_avg_psnr);
EXPECT_GT(psnr_result.min, quality_metrics.minimum_min_psnr);
@@ -549,7 +539,7 @@ void SetCodecParameters(CodecConfigPars* process_settings,
bool spatial_resize_on) {
process_settings->codec_type = codec_type;
process_settings->packet_loss = packet_loss;
- process_settings->key_frame_interval = key_frame_interval;
+ process_settings->key_frame_interval = key_frame_interval;
process_settings->num_temporal_layers = num_temporal_layers,
process_settings->error_concealment_on = error_concealment_on;
process_settings->denoising_on = denoising_on;
@@ -608,9 +598,7 @@ TEST_F(VideoProcessorIntegrationTest, Process0PercentPacketLossVP9) {
// Metrics for rate control.
RateControlMetrics rc_metrics[1];
SetRateControlMetrics(rc_metrics, 0, 0, 40, 20, 10, 20, 0, 1);
- ProcessFramesAndVerify(quality_metrics,
- rate_profile,
- process_settings,
+ ProcessFramesAndVerify(quality_metrics, rate_profile, process_settings,
rc_metrics);
}
@@ -632,13 +620,10 @@ TEST_F(VideoProcessorIntegrationTest, Process5PercentPacketLossVP9) {
// Metrics for rate control.
RateControlMetrics rc_metrics[1];
SetRateControlMetrics(rc_metrics, 0, 0, 40, 20, 10, 20, 0, 1);
- ProcessFramesAndVerify(quality_metrics,
- rate_profile,
- process_settings,
+ ProcessFramesAndVerify(quality_metrics, rate_profile, process_settings,
rc_metrics);
}
-
// VP9: Run with no packet loss, with varying bitrate (3 rate updates):
// low to high to medium. Check that quality and encoder response to the new
// target rate/per-frame bandwidth (for each rate update) is within limits.
@@ -657,15 +642,13 @@ TEST_F(VideoProcessorIntegrationTest, ProcessNoLossChangeBitRateVP9) {
false, true, false);
// Metrics for expected quality.
QualityMetrics quality_metrics;
- SetQualityMetrics(&quality_metrics, 35.9, 30.0, 0.90, 0.85);
+ SetQualityMetrics(&quality_metrics, 35.7, 30.0, 0.90, 0.85);
// Metrics for rate control.
RateControlMetrics rc_metrics[3];
SetRateControlMetrics(rc_metrics, 0, 0, 30, 20, 20, 30, 0, 1);
SetRateControlMetrics(rc_metrics, 1, 2, 0, 20, 20, 60, 0, 0);
SetRateControlMetrics(rc_metrics, 2, 0, 0, 25, 20, 40, 0, 0);
- ProcessFramesAndVerify(quality_metrics,
- rate_profile,
- process_settings,
+ ProcessFramesAndVerify(quality_metrics, rate_profile, process_settings,
rc_metrics);
}
@@ -695,12 +678,10 @@ TEST_F(VideoProcessorIntegrationTest,
SetQualityMetrics(&quality_metrics, 31.5, 18.0, 0.80, 0.44);
// Metrics for rate control.
RateControlMetrics rc_metrics[3];
- SetRateControlMetrics(rc_metrics, 0, 35, 50, 70, 15, 45, 0, 1);
+ SetRateControlMetrics(rc_metrics, 0, 38, 50, 75, 15, 45, 0, 1);
SetRateControlMetrics(rc_metrics, 1, 10, 0, 40, 10, 30, 0, 0);
SetRateControlMetrics(rc_metrics, 2, 5, 0, 30, 5, 20, 0, 0);
- ProcessFramesAndVerify(quality_metrics,
- rate_profile,
- process_settings,
+ ProcessFramesAndVerify(quality_metrics, rate_profile, process_settings,
rc_metrics);
}
@@ -721,19 +702,13 @@ TEST_F(VideoProcessorIntegrationTest, ProcessNoLossDenoiserOnVP9) {
// Metrics for rate control.
RateControlMetrics rc_metrics[1];
SetRateControlMetrics(rc_metrics, 0, 0, 40, 20, 10, 20, 0, 1);
- ProcessFramesAndVerify(quality_metrics,
- rate_profile,
- process_settings,
+ ProcessFramesAndVerify(quality_metrics, rate_profile, process_settings,
rc_metrics);
}
// Run with no packet loss, at low bitrate.
-// spatial_resize is on, and for this low bitrate expect two resizes during the
-// sequence; first resize is 3/4, second is 1/2 (from original).
+// spatial_resize is on, for this low bitrate expect one resize in sequence.
// Resize happens on delta frame. Expect only one key frame (first frame).
-// Disable for msan, see
-// https://code.google.com/p/webrtc/issues/detail?id=5110 for details.
-#if !defined(MEMORY_SANITIZER)
TEST_F(VideoProcessorIntegrationTest, ProcessNoLossSpatialResizeFrameDropVP9) {
config_.networking_config.packet_loss_probability = 0;
// Bitrate and frame rate profile.
@@ -743,20 +718,17 @@ TEST_F(VideoProcessorIntegrationTest, ProcessNoLossSpatialResizeFrameDropVP9) {
rate_profile.num_frames = kNbrFramesLong;
// Codec/network settings.
CodecConfigPars process_settings;
- SetCodecParameters(&process_settings, kVideoCodecVP9, 0.0f, -1,
- 1, false, false, true, true);
+ SetCodecParameters(&process_settings, kVideoCodecVP9, 0.0f, -1, 1, false,
+ false, true, true);
// Metrics for expected quality.
QualityMetrics quality_metrics;
- SetQualityMetrics(&quality_metrics, 25.0, 13.0, 0.70, 0.40);
+ SetQualityMetrics(&quality_metrics, 24.0, 13.0, 0.65, 0.37);
// Metrics for rate control.
RateControlMetrics rc_metrics[1];
- SetRateControlMetrics(rc_metrics, 0, 180, 70, 130, 15, 80, 2, 1);
- ProcessFramesAndVerify(quality_metrics,
- rate_profile,
- process_settings,
+ SetRateControlMetrics(rc_metrics, 0, 228, 70, 160, 15, 80, 1, 1);
+ ProcessFramesAndVerify(quality_metrics, rate_profile, process_settings,
rc_metrics);
}
-#endif
// TODO(marpan): Add temporal layer test for VP9, once changes are in
// vp9 wrapper for this.
@@ -780,9 +752,7 @@ TEST_F(VideoProcessorIntegrationTest, ProcessZeroPacketLoss) {
// Metrics for rate control.
RateControlMetrics rc_metrics[1];
SetRateControlMetrics(rc_metrics, 0, 0, 40, 20, 10, 15, 0, 1);
- ProcessFramesAndVerify(quality_metrics,
- rate_profile,
- process_settings,
+ ProcessFramesAndVerify(quality_metrics, rate_profile, process_settings,
rc_metrics);
}
@@ -804,9 +774,7 @@ TEST_F(VideoProcessorIntegrationTest, Process5PercentPacketLoss) {
// Metrics for rate control.
RateControlMetrics rc_metrics[1];
SetRateControlMetrics(rc_metrics, 0, 0, 40, 20, 10, 15, 0, 1);
- ProcessFramesAndVerify(quality_metrics,
- rate_profile,
- process_settings,
+ ProcessFramesAndVerify(quality_metrics, rate_profile, process_settings,
rc_metrics);
}
@@ -828,9 +796,7 @@ TEST_F(VideoProcessorIntegrationTest, Process10PercentPacketLoss) {
// Metrics for rate control.
RateControlMetrics rc_metrics[1];
SetRateControlMetrics(rc_metrics, 0, 0, 40, 20, 10, 15, 0, 1);
- ProcessFramesAndVerify(quality_metrics,
- rate_profile,
- process_settings,
+ ProcessFramesAndVerify(quality_metrics, rate_profile, process_settings,
rc_metrics);
}
@@ -847,8 +813,13 @@ TEST_F(VideoProcessorIntegrationTest, Process10PercentPacketLoss) {
// low to high to medium. Check that quality and encoder response to the new
// target rate/per-frame bandwidth (for each rate update) is within limits.
// One key frame (first frame only) in sequence.
-TEST_F(VideoProcessorIntegrationTest,
- DISABLED_ON_ANDROID(ProcessNoLossChangeBitRateVP8)) {
+#if defined(WEBRTC_ANDROID)
+#define MAYBE_ProcessNoLossChangeBitRateVP8 \
+ DISABLED_ProcessNoLossChangeBitRateVP8
+#else
+#define MAYBE_ProcessNoLossChangeBitRateVP8 ProcessNoLossChangeBitRateVP8
+#endif
+TEST_F(VideoProcessorIntegrationTest, MAYBE_ProcessNoLossChangeBitRateVP8) {
// Bitrate and frame rate profile.
RateProfile rate_profile;
SetRateProfilePars(&rate_profile, 0, 200, 30, 0);
@@ -868,9 +839,7 @@ TEST_F(VideoProcessorIntegrationTest,
SetRateControlMetrics(rc_metrics, 0, 0, 45, 20, 10, 15, 0, 1);
SetRateControlMetrics(rc_metrics, 1, 0, 0, 25, 20, 10, 0, 0);
SetRateControlMetrics(rc_metrics, 2, 0, 0, 25, 15, 10, 0, 0);
- ProcessFramesAndVerify(quality_metrics,
- rate_profile,
- process_settings,
+ ProcessFramesAndVerify(quality_metrics, rate_profile, process_settings,
rc_metrics);
}
@@ -881,8 +850,15 @@ TEST_F(VideoProcessorIntegrationTest,
// for the rate control metrics can be lower. One key frame (first frame only).
// Note: quality after update should be higher but we currently compute quality
// metrics averaged over whole sequence run.
+#if defined(WEBRTC_ANDROID)
+#define MAYBE_ProcessNoLossChangeFrameRateFrameDropVP8 \
+ DISABLED_ProcessNoLossChangeFrameRateFrameDropVP8
+#else
+#define MAYBE_ProcessNoLossChangeFrameRateFrameDropVP8 \
+ ProcessNoLossChangeFrameRateFrameDropVP8
+#endif
TEST_F(VideoProcessorIntegrationTest,
- DISABLED_ON_ANDROID(ProcessNoLossChangeFrameRateFrameDropVP8)) {
+ MAYBE_ProcessNoLossChangeFrameRateFrameDropVP8) {
config_.networking_config.packet_loss_probability = 0;
// Bitrate and frame rate profile.
RateProfile rate_profile;
@@ -903,16 +879,21 @@ TEST_F(VideoProcessorIntegrationTest,
SetRateControlMetrics(rc_metrics, 0, 40, 20, 75, 15, 60, 0, 1);
SetRateControlMetrics(rc_metrics, 1, 10, 0, 25, 10, 35, 0, 0);
SetRateControlMetrics(rc_metrics, 2, 0, 0, 20, 10, 15, 0, 0);
- ProcessFramesAndVerify(quality_metrics,
- rate_profile,
- process_settings,
+ ProcessFramesAndVerify(quality_metrics, rate_profile, process_settings,
rc_metrics);
}
// Run with no packet loss, at low bitrate. During this time we should've
// resized once. Expect 2 key frames generated (first and one for resize).
+#if defined(WEBRTC_ANDROID)
+#define MAYBE_ProcessNoLossSpatialResizeFrameDropVP8 \
+ DISABLED_ProcessNoLossSpatialResizeFrameDropVP8
+#else
+#define MAYBE_ProcessNoLossSpatialResizeFrameDropVP8 \
+ ProcessNoLossSpatialResizeFrameDropVP8
+#endif
TEST_F(VideoProcessorIntegrationTest,
- DISABLED_ON_ANDROID(ProcessNoLossSpatialResizeFrameDropVP8)) {
+ MAYBE_ProcessNoLossSpatialResizeFrameDropVP8) {
config_.networking_config.packet_loss_probability = 0;
// Bitrate and frame rate profile.
RateProfile rate_profile;
@@ -921,17 +902,15 @@ TEST_F(VideoProcessorIntegrationTest,
rate_profile.num_frames = kNbrFramesLong;
// Codec/network settings.
CodecConfigPars process_settings;
- SetCodecParameters(&process_settings, kVideoCodecVP8, 0.0f, -1,
- 1, false, true, true, true);
+ SetCodecParameters(&process_settings, kVideoCodecVP8, 0.0f, -1, 1, false,
+ true, true, true);
// Metrics for expected quality.
QualityMetrics quality_metrics;
SetQualityMetrics(&quality_metrics, 25.0, 15.0, 0.70, 0.40);
// Metrics for rate control.
RateControlMetrics rc_metrics[1];
SetRateControlMetrics(rc_metrics, 0, 160, 60, 120, 20, 70, 1, 2);
- ProcessFramesAndVerify(quality_metrics,
- rate_profile,
- process_settings,
+ ProcessFramesAndVerify(quality_metrics, rate_profile, process_settings,
rc_metrics);
}
@@ -940,8 +919,13 @@ TEST_F(VideoProcessorIntegrationTest,
// encoding rate mismatch are applied to each layer.
// No dropped frames in this test, and internal spatial resizer is off.
// One key frame (first frame only) in sequence, so no spatial resizing.
-TEST_F(VideoProcessorIntegrationTest,
- DISABLED_ON_ANDROID(ProcessNoLossTemporalLayersVP8)) {
+#if defined(WEBRTC_ANDROID)
+#define MAYBE_ProcessNoLossTemporalLayersVP8 \
+ DISABLED_ProcessNoLossTemporalLayersVP8
+#else
+#define MAYBE_ProcessNoLossTemporalLayersVP8 ProcessNoLossTemporalLayersVP8
+#endif
+TEST_F(VideoProcessorIntegrationTest, MAYBE_ProcessNoLossTemporalLayersVP8) {
config_.networking_config.packet_loss_probability = 0;
// Bitrate and frame rate profile.
RateProfile rate_profile;
@@ -960,9 +944,7 @@ TEST_F(VideoProcessorIntegrationTest,
RateControlMetrics rc_metrics[2];
SetRateControlMetrics(rc_metrics, 0, 0, 20, 30, 10, 10, 0, 1);
SetRateControlMetrics(rc_metrics, 1, 0, 0, 30, 15, 10, 0, 0);
- ProcessFramesAndVerify(quality_metrics,
- rate_profile,
- process_settings,
+ ProcessFramesAndVerify(quality_metrics, rate_profile, process_settings,
rc_metrics);
}
} // namespace webrtc
diff --git a/webrtc/modules/video_coding/codecs/test/videoprocessor_unittest.cc b/webrtc/modules/video_coding/codecs/test/videoprocessor_unittest.cc
index 88b5467f1f..148d8dc74a 100644
--- a/webrtc/modules/video_coding/codecs/test/videoprocessor_unittest.cc
+++ b/webrtc/modules/video_coding/codecs/test/videoprocessor_unittest.cc
@@ -10,10 +10,10 @@
#include "testing/gmock/include/gmock/gmock.h"
#include "testing/gtest/include/gtest/gtest.h"
-#include "webrtc/modules/video_coding/codecs/interface/mock/mock_video_codec_interface.h"
+#include "webrtc/modules/video_coding/include/mock/mock_video_codec_interface.h"
#include "webrtc/modules/video_coding/codecs/test/mock/mock_packet_manipulator.h"
#include "webrtc/modules/video_coding/codecs/test/videoprocessor.h"
-#include "webrtc/modules/video_coding/main/interface/video_coding.h"
+#include "webrtc/modules/video_coding/include/video_coding.h"
#include "webrtc/test/testsupport/mock/mock_frame_reader.h"
#include "webrtc/test/testsupport/mock/mock_frame_writer.h"
#include "webrtc/test/testsupport/packet_reader.h"
@@ -29,7 +29,7 @@ namespace test {
// Very basic testing for VideoProcessor. It's mostly tested by running the
// video_quality_measurement program.
-class VideoProcessorTest: public testing::Test {
+class VideoProcessorTest : public testing::Test {
protected:
MockVideoEncoder encoder_mock_;
MockVideoDecoder decoder_mock_;
@@ -53,44 +53,34 @@ class VideoProcessorTest: public testing::Test {
void TearDown() {}
void ExpectInit() {
- EXPECT_CALL(encoder_mock_, InitEncode(_, _, _))
- .Times(1);
+ EXPECT_CALL(encoder_mock_, InitEncode(_, _, _)).Times(1);
EXPECT_CALL(encoder_mock_, RegisterEncodeCompleteCallback(_))
- .Times(AtLeast(1));
- EXPECT_CALL(decoder_mock_, InitDecode(_, _))
- .Times(1);
+ .Times(AtLeast(1));
+ EXPECT_CALL(decoder_mock_, InitDecode(_, _)).Times(1);
EXPECT_CALL(decoder_mock_, RegisterDecodeCompleteCallback(_))
- .Times(AtLeast(1));
- EXPECT_CALL(frame_reader_mock_, NumberOfFrames())
- .WillOnce(Return(1));
- EXPECT_CALL(frame_reader_mock_, FrameLength())
- .WillOnce(Return(152064));
+ .Times(AtLeast(1));
+ EXPECT_CALL(frame_reader_mock_, NumberOfFrames()).WillOnce(Return(1));
+ EXPECT_CALL(frame_reader_mock_, FrameLength()).WillOnce(Return(152064));
}
};
TEST_F(VideoProcessorTest, Init) {
ExpectInit();
- VideoProcessorImpl video_processor(&encoder_mock_, &decoder_mock_,
- &frame_reader_mock_,
- &frame_writer_mock_,
- &packet_manipulator_mock_, config_,
- &stats_);
+ VideoProcessorImpl video_processor(
+ &encoder_mock_, &decoder_mock_, &frame_reader_mock_, &frame_writer_mock_,
+ &packet_manipulator_mock_, config_, &stats_);
ASSERT_TRUE(video_processor.Init());
}
TEST_F(VideoProcessorTest, ProcessFrame) {
ExpectInit();
- EXPECT_CALL(encoder_mock_, Encode(_, _, _))
- .Times(1);
- EXPECT_CALL(frame_reader_mock_, ReadFrame(_))
- .WillOnce(Return(true));
+ EXPECT_CALL(encoder_mock_, Encode(_, _, _)).Times(1);
+ EXPECT_CALL(frame_reader_mock_, ReadFrame(_)).WillOnce(Return(true));
// Since we don't return any callback from the mock, the decoder will not
// be more than initialized...
- VideoProcessorImpl video_processor(&encoder_mock_, &decoder_mock_,
- &frame_reader_mock_,
- &frame_writer_mock_,
- &packet_manipulator_mock_, config_,
- &stats_);
+ VideoProcessorImpl video_processor(
+ &encoder_mock_, &decoder_mock_, &frame_reader_mock_, &frame_writer_mock_,
+ &packet_manipulator_mock_, config_, &stats_);
ASSERT_TRUE(video_processor.Init());
video_processor.ProcessFrame(0);
}
diff --git a/webrtc/modules/video_coding/codecs/tools/video_quality_measurement.cc b/webrtc/modules/video_coding/codecs/tools/video_quality_measurement.cc
index 22be5a83cc..37fad483f7 100644
--- a/webrtc/modules/video_coding/codecs/tools/video_quality_measurement.cc
+++ b/webrtc/modules/video_coding/codecs/tools/video_quality_measurement.cc
@@ -16,7 +16,7 @@
#include <sys/stat.h> // To check for directory existence.
#ifndef S_ISDIR // Not defined in stat.h on Windows.
-#define S_ISDIR(mode) (((mode) & S_IFMT) == S_IFDIR)
+#define S_ISDIR(mode) (((mode)&S_IFMT) == S_IFDIR)
#endif
#include "gflags/gflags.h"
@@ -26,7 +26,7 @@
#include "webrtc/modules/video_coding/codecs/test/stats.h"
#include "webrtc/modules/video_coding/codecs/test/videoprocessor.h"
#include "webrtc/modules/video_coding/codecs/vp8/include/vp8.h"
-#include "webrtc/modules/video_coding/main/interface/video_coding.h"
+#include "webrtc/modules/video_coding/include/video_coding.h"
#include "webrtc/system_wrappers/include/trace.h"
#include "webrtc/test/testsupport/frame_reader.h"
#include "webrtc/test/testsupport/frame_writer.h"
@@ -34,68 +34,102 @@
#include "webrtc/test/testsupport/packet_reader.h"
DEFINE_string(test_name, "Quality test", "The name of the test to run. ");
-DEFINE_string(test_description, "", "A more detailed description about what "
+DEFINE_string(test_description,
+ "",
+ "A more detailed description about what "
"the current test is about.");
-DEFINE_string(input_filename, "", "Input file. "
+DEFINE_string(input_filename,
+ "",
+ "Input file. "
"The source video file to be encoded and decoded. Must be in "
".yuv format");
DEFINE_int32(width, -1, "Width in pixels of the frames in the input file.");
DEFINE_int32(height, -1, "Height in pixels of the frames in the input file.");
-DEFINE_int32(framerate, 30, "Frame rate of the input file, in FPS "
+DEFINE_int32(framerate,
+ 30,
+ "Frame rate of the input file, in FPS "
"(frames-per-second). ");
-DEFINE_string(output_dir, ".", "Output directory. "
+DEFINE_string(output_dir,
+ ".",
+ "Output directory. "
"The directory where the output file will be put. Must already "
"exist.");
-DEFINE_bool(use_single_core, false, "Force using a single core. If set to "
+DEFINE_bool(use_single_core,
+ false,
+ "Force using a single core. If set to "
"true, only one core will be used for processing. Using a single "
"core is necessary to get a deterministic behavior for the"
"encoded frames - using multiple cores will produce different "
"encoded frames since multiple cores are competing to consume the "
"byte budget for each frame in parallel. If set to false, "
"the maximum detected number of cores will be used. ");
-DEFINE_bool(disable_fixed_random_seed , false, "Set this flag to disable the"
+DEFINE_bool(disable_fixed_random_seed,
+ false,
+ "Set this flag to disable the"
"usage of a fixed random seed for the random generator used "
"for packet loss. Disabling this will cause consecutive runs "
"loose packets at different locations, which is bad for "
"reproducibility.");
-DEFINE_string(output_filename, "", "Output file. "
+DEFINE_string(output_filename,
+ "",
+ "Output file. "
"The name of the output video file resulting of the processing "
"of the source file. By default this is the same name as the "
"input file with '_out' appended before the extension.");
DEFINE_int32(bitrate, 500, "Bit rate in kilobits/second.");
-DEFINE_int32(keyframe_interval, 0, "Forces a keyframe every Nth frame. "
+DEFINE_int32(keyframe_interval,
+ 0,
+ "Forces a keyframe every Nth frame. "
"0 means the encoder decides when to insert keyframes. Note that "
"the encoder may create a keyframe in other locations in addition "
"to the interval that is set using this parameter.");
-DEFINE_int32(temporal_layers, 0, "The number of temporal layers to use "
+DEFINE_int32(temporal_layers,
+ 0,
+ "The number of temporal layers to use "
"(VP8 specific codec setting). Must be 0-4.");
-DEFINE_int32(packet_size, 1500, "Simulated network packet size in bytes (MTU). "
+DEFINE_int32(packet_size,
+ 1500,
+ "Simulated network packet size in bytes (MTU). "
"Used for packet loss simulation.");
-DEFINE_int32(max_payload_size, 1440, "Max payload size in bytes for the "
+DEFINE_int32(max_payload_size,
+ 1440,
+ "Max payload size in bytes for the "
"encoder.");
-DEFINE_string(packet_loss_mode, "uniform", "Packet loss mode. Two different "
+DEFINE_string(packet_loss_mode,
+ "uniform",
+ "Packet loss mode. Two different "
"packet loss models are supported: uniform or burst. This "
"setting has no effect unless packet_loss_rate is >0. ");
-DEFINE_double(packet_loss_probability, 0.0, "Packet loss probability. A value "
+DEFINE_double(packet_loss_probability,
+ 0.0,
+ "Packet loss probability. A value "
"between 0.0 and 1.0 that defines the probability of a packet "
"being lost. 0.1 means 10% and so on.");
-DEFINE_int32(packet_loss_burst_length, 1, "Packet loss burst length. Defines "
+DEFINE_int32(packet_loss_burst_length,
+ 1,
+ "Packet loss burst length. Defines "
"how many packets will be lost in a burst when a packet has been "
"decided to be lost. Must be >=1.");
-DEFINE_bool(csv, false, "CSV output. Enabling this will output all frame "
+DEFINE_bool(csv,
+ false,
+ "CSV output. Enabling this will output all frame "
"statistics at the end of execution. Recommended to run combined "
"with --noverbose to avoid mixing output.");
-DEFINE_bool(python, false, "Python output. Enabling this will output all frame "
+DEFINE_bool(python,
+ false,
+ "Python output. Enabling this will output all frame "
"statistics as a Python script at the end of execution. "
"Recommended to run combine with --noverbose to avoid mixing "
"output.");
-DEFINE_bool(verbose, true, "Verbose mode. Prints a lot of debugging info. "
+DEFINE_bool(verbose,
+ true,
+ "Verbose mode. Prints a lot of debugging info. "
"Suitable for tracking progress but not for capturing output. "
"Disable with --noverbose flag.");
// Custom log method that only prints if the verbose flag is given.
// Supports all the standard printf parameters and formatting (just forwarded).
-int Log(const char *format, ...) {
+int Log(const char* format, ...) {
int result = 0;
if (FLAGS_verbose) {
va_list args;
@@ -132,9 +166,9 @@ int HandleCommandLineFlags(webrtc::test::TestConfig* config) {
// Verify the output dir exists.
struct stat dir_info;
if (!(stat(FLAGS_output_dir.c_str(), &dir_info) == 0 &&
- S_ISDIR(dir_info.st_mode))) {
+ S_ISDIR(dir_info.st_mode))) {
fprintf(stderr, "Cannot find output directory: %s\n",
- FLAGS_output_dir.c_str());
+ FLAGS_output_dir.c_str());
return 3;
}
config->output_dir = FLAGS_output_dir;
@@ -148,16 +182,16 @@ int HandleCommandLineFlags(webrtc::test::TestConfig* config) {
startIndex = 0;
}
FLAGS_output_filename =
- FLAGS_input_filename.substr(startIndex,
- FLAGS_input_filename.find_last_of(".")
- - startIndex) + "_out.yuv";
+ FLAGS_input_filename.substr(
+ startIndex, FLAGS_input_filename.find_last_of(".") - startIndex) +
+ "_out.yuv";
}
// Verify output file can be written.
if (FLAGS_output_dir == ".") {
config->output_filename = FLAGS_output_filename;
} else {
- config->output_filename = FLAGS_output_dir + "/"+ FLAGS_output_filename;
+ config->output_filename = FLAGS_output_dir + "/" + FLAGS_output_filename;
}
test_file = fopen(config->output_filename.c_str(), "wb");
if (test_file == NULL) {
@@ -232,27 +266,32 @@ int HandleCommandLineFlags(webrtc::test::TestConfig* config) {
// Check packet loss settings
if (FLAGS_packet_loss_mode != "uniform" &&
FLAGS_packet_loss_mode != "burst") {
- fprintf(stderr, "Unsupported packet loss mode, must be 'uniform' or "
+ fprintf(stderr,
+ "Unsupported packet loss mode, must be 'uniform' or "
"'burst'\n.");
return 10;
}
config->networking_config.packet_loss_mode = webrtc::test::kUniform;
if (FLAGS_packet_loss_mode == "burst") {
- config->networking_config.packet_loss_mode = webrtc::test::kBurst;
+ config->networking_config.packet_loss_mode = webrtc::test::kBurst;
}
if (FLAGS_packet_loss_probability < 0.0 ||
FLAGS_packet_loss_probability > 1.0) {
- fprintf(stderr, "Invalid packet loss probability. Must be 0.0 - 1.0, "
- "was: %f\n", FLAGS_packet_loss_probability);
+ fprintf(stderr,
+ "Invalid packet loss probability. Must be 0.0 - 1.0, "
+ "was: %f\n",
+ FLAGS_packet_loss_probability);
return 11;
}
config->networking_config.packet_loss_probability =
FLAGS_packet_loss_probability;
if (FLAGS_packet_loss_burst_length < 1) {
- fprintf(stderr, "Invalid packet loss burst length, must be >=1, "
- "was: %d\n", FLAGS_packet_loss_burst_length);
+ fprintf(stderr,
+ "Invalid packet loss burst length, must be >=1, "
+ "was: %d\n",
+ FLAGS_packet_loss_burst_length);
return 12;
}
config->networking_config.packet_loss_burst_length =
@@ -264,10 +303,9 @@ int HandleCommandLineFlags(webrtc::test::TestConfig* config) {
void CalculateSsimVideoMetrics(webrtc::test::TestConfig* config,
webrtc::test::QualityMetricsResult* result) {
Log("Calculating SSIM...\n");
- I420SSIMFromFiles(config->input_filename.c_str(),
- config->output_filename.c_str(),
- config->codec_settings->width,
- config->codec_settings->height, result);
+ I420SSIMFromFiles(
+ config->input_filename.c_str(), config->output_filename.c_str(),
+ config->codec_settings->width, config->codec_settings->height, result);
Log(" Average: %3.2f\n", result->average);
Log(" Min : %3.2f (frame %d)\n", result->min, result->min_frame_number);
Log(" Max : %3.2f (frame %d)\n", result->max, result->max_frame_number);
@@ -276,10 +314,9 @@ void CalculateSsimVideoMetrics(webrtc::test::TestConfig* config,
void CalculatePsnrVideoMetrics(webrtc::test::TestConfig* config,
webrtc::test::QualityMetricsResult* result) {
Log("Calculating PSNR...\n");
- I420PSNRFromFiles(config->input_filename.c_str(),
- config->output_filename.c_str(),
- config->codec_settings->width,
- config->codec_settings->height, result);
+ I420PSNRFromFiles(
+ config->input_filename.c_str(), config->output_filename.c_str(),
+ config->codec_settings->width, config->codec_settings->height, result);
Log(" Average: %3.2f\n", result->average);
Log(" Min : %3.2f (frame %d)\n", result->min, result->min_frame_number);
Log(" Max : %3.2f (frame %d)\n", result->max, result->max_frame_number);
@@ -309,9 +346,11 @@ void PrintConfigurationSummary(const webrtc::test::TestConfig& config) {
void PrintCsvOutput(const webrtc::test::Stats& stats,
const webrtc::test::QualityMetricsResult& ssim_result,
const webrtc::test::QualityMetricsResult& psnr_result) {
- Log("\nCSV output (recommended to run with --noverbose to skip the "
- "above output)\n");
- printf("frame_number encoding_successful decoding_successful "
+ Log(
+ "\nCSV output (recommended to run with --noverbose to skip the "
+ "above output)\n");
+ printf(
+ "frame_number encoding_successful decoding_successful "
"encode_return_code decode_return_code "
"encode_time_in_us decode_time_in_us "
"bit_rate_in_kbps encoded_frame_length_in_bytes frame_type "
@@ -322,22 +361,13 @@ void PrintCsvOutput(const webrtc::test::Stats& stats,
const webrtc::test::FrameStatistic& f = stats.stats_[i];
const webrtc::test::FrameResult& ssim = ssim_result.frames[i];
const webrtc::test::FrameResult& psnr = psnr_result.frames[i];
- printf("%4d, %d, %d, %2d, %2d, %6d, %6d, %5d, %7" PRIuS ", %d, %2d, %2"
- PRIuS ", %5.3f, %5.2f\n",
- f.frame_number,
- f.encoding_successful,
- f.decoding_successful,
- f.encode_return_code,
- f.decode_return_code,
- f.encode_time_in_us,
- f.decode_time_in_us,
- f.bit_rate_in_kbps,
- f.encoded_frame_length_in_bytes,
- f.frame_type,
- f.packets_dropped,
- f.total_packets,
- ssim.value,
- psnr.value);
+ printf("%4d, %d, %d, %2d, %2d, %6d, %6d, %5d, %7" PRIuS
+ ", %d, %2d, %2" PRIuS ", %5.3f, %5.2f\n",
+ f.frame_number, f.encoding_successful, f.decoding_successful,
+ f.encode_return_code, f.decode_return_code, f.encode_time_in_us,
+ f.decode_time_in_us, f.bit_rate_in_kbps,
+ f.encoded_frame_length_in_bytes, f.frame_type, f.packets_dropped,
+ f.total_packets, ssim.value, psnr.value);
}
}
@@ -345,91 +375,85 @@ void PrintPythonOutput(const webrtc::test::TestConfig& config,
const webrtc::test::Stats& stats,
const webrtc::test::QualityMetricsResult& ssim_result,
const webrtc::test::QualityMetricsResult& psnr_result) {
- Log("\nPython output (recommended to run with --noverbose to skip the "
- "above output)\n");
- printf("test_configuration = ["
- "{'name': 'name', 'value': '%s'},\n"
- "{'name': 'description', 'value': '%s'},\n"
- "{'name': 'test_number', 'value': '%d'},\n"
- "{'name': 'input_filename', 'value': '%s'},\n"
- "{'name': 'output_filename', 'value': '%s'},\n"
- "{'name': 'output_dir', 'value': '%s'},\n"
- "{'name': 'packet_size_in_bytes', 'value': '%" PRIuS "'},\n"
- "{'name': 'max_payload_size_in_bytes', 'value': '%" PRIuS "'},\n"
- "{'name': 'packet_loss_mode', 'value': '%s'},\n"
- "{'name': 'packet_loss_probability', 'value': '%f'},\n"
- "{'name': 'packet_loss_burst_length', 'value': '%d'},\n"
- "{'name': 'exclude_frame_types', 'value': '%s'},\n"
- "{'name': 'frame_length_in_bytes', 'value': '%" PRIuS "'},\n"
- "{'name': 'use_single_core', 'value': '%s'},\n"
- "{'name': 'keyframe_interval;', 'value': '%d'},\n"
- "{'name': 'video_codec_type', 'value': '%s'},\n"
- "{'name': 'width', 'value': '%d'},\n"
- "{'name': 'height', 'value': '%d'},\n"
- "{'name': 'bit_rate_in_kbps', 'value': '%d'},\n"
- "]\n",
- config.name.c_str(),
- config.description.c_str(),
- config.test_number,
- config.input_filename.c_str(),
- config.output_filename.c_str(),
- config.output_dir.c_str(),
- config.networking_config.packet_size_in_bytes,
- config.networking_config.max_payload_size_in_bytes,
- PacketLossModeToStr(config.networking_config.packet_loss_mode),
- config.networking_config.packet_loss_probability,
- config.networking_config.packet_loss_burst_length,
- ExcludeFrameTypesToStr(config.exclude_frame_types),
- config.frame_length_in_bytes,
- config.use_single_core ? "True " : "False",
- config.keyframe_interval,
- webrtc::test::VideoCodecTypeToStr(config.codec_settings->codecType),
- config.codec_settings->width,
- config.codec_settings->height,
- config.codec_settings->startBitrate);
- printf("frame_data_types = {"
- "'frame_number': ('number', 'Frame number'),\n"
- "'encoding_successful': ('boolean', 'Encoding successful?'),\n"
- "'decoding_successful': ('boolean', 'Decoding successful?'),\n"
- "'encode_time': ('number', 'Encode time (us)'),\n"
- "'decode_time': ('number', 'Decode time (us)'),\n"
- "'encode_return_code': ('number', 'Encode return code'),\n"
- "'decode_return_code': ('number', 'Decode return code'),\n"
- "'bit_rate': ('number', 'Bit rate (kbps)'),\n"
- "'encoded_frame_length': "
- "('number', 'Encoded frame length (bytes)'),\n"
- "'frame_type': ('string', 'Frame type'),\n"
- "'packets_dropped': ('number', 'Packets dropped'),\n"
- "'total_packets': ('number', 'Total packets'),\n"
- "'ssim': ('number', 'SSIM'),\n"
- "'psnr': ('number', 'PSNR (dB)'),\n"
- "}\n");
+ Log(
+ "\nPython output (recommended to run with --noverbose to skip the "
+ "above output)\n");
+ printf(
+ "test_configuration = ["
+ "{'name': 'name', 'value': '%s'},\n"
+ "{'name': 'description', 'value': '%s'},\n"
+ "{'name': 'test_number', 'value': '%d'},\n"
+ "{'name': 'input_filename', 'value': '%s'},\n"
+ "{'name': 'output_filename', 'value': '%s'},\n"
+ "{'name': 'output_dir', 'value': '%s'},\n"
+ "{'name': 'packet_size_in_bytes', 'value': '%" PRIuS
+ "'},\n"
+ "{'name': 'max_payload_size_in_bytes', 'value': '%" PRIuS
+ "'},\n"
+ "{'name': 'packet_loss_mode', 'value': '%s'},\n"
+ "{'name': 'packet_loss_probability', 'value': '%f'},\n"
+ "{'name': 'packet_loss_burst_length', 'value': '%d'},\n"
+ "{'name': 'exclude_frame_types', 'value': '%s'},\n"
+ "{'name': 'frame_length_in_bytes', 'value': '%" PRIuS
+ "'},\n"
+ "{'name': 'use_single_core', 'value': '%s'},\n"
+ "{'name': 'keyframe_interval;', 'value': '%d'},\n"
+ "{'name': 'video_codec_type', 'value': '%s'},\n"
+ "{'name': 'width', 'value': '%d'},\n"
+ "{'name': 'height', 'value': '%d'},\n"
+ "{'name': 'bit_rate_in_kbps', 'value': '%d'},\n"
+ "]\n",
+ config.name.c_str(), config.description.c_str(), config.test_number,
+ config.input_filename.c_str(), config.output_filename.c_str(),
+ config.output_dir.c_str(), config.networking_config.packet_size_in_bytes,
+ config.networking_config.max_payload_size_in_bytes,
+ PacketLossModeToStr(config.networking_config.packet_loss_mode),
+ config.networking_config.packet_loss_probability,
+ config.networking_config.packet_loss_burst_length,
+ ExcludeFrameTypesToStr(config.exclude_frame_types),
+ config.frame_length_in_bytes, config.use_single_core ? "True " : "False",
+ config.keyframe_interval,
+ webrtc::test::VideoCodecTypeToStr(config.codec_settings->codecType),
+ config.codec_settings->width, config.codec_settings->height,
+ config.codec_settings->startBitrate);
+ printf(
+ "frame_data_types = {"
+ "'frame_number': ('number', 'Frame number'),\n"
+ "'encoding_successful': ('boolean', 'Encoding successful?'),\n"
+ "'decoding_successful': ('boolean', 'Decoding successful?'),\n"
+ "'encode_time': ('number', 'Encode time (us)'),\n"
+ "'decode_time': ('number', 'Decode time (us)'),\n"
+ "'encode_return_code': ('number', 'Encode return code'),\n"
+ "'decode_return_code': ('number', 'Decode return code'),\n"
+ "'bit_rate': ('number', 'Bit rate (kbps)'),\n"
+ "'encoded_frame_length': "
+ "('number', 'Encoded frame length (bytes)'),\n"
+ "'frame_type': ('string', 'Frame type'),\n"
+ "'packets_dropped': ('number', 'Packets dropped'),\n"
+ "'total_packets': ('number', 'Total packets'),\n"
+ "'ssim': ('number', 'SSIM'),\n"
+ "'psnr': ('number', 'PSNR (dB)'),\n"
+ "}\n");
printf("frame_data = [");
for (unsigned int i = 0; i < stats.stats_.size(); ++i) {
const webrtc::test::FrameStatistic& f = stats.stats_[i];
const webrtc::test::FrameResult& ssim = ssim_result.frames[i];
const webrtc::test::FrameResult& psnr = psnr_result.frames[i];
- printf("{'frame_number': %d, "
- "'encoding_successful': %s, 'decoding_successful': %s, "
- "'encode_time': %d, 'decode_time': %d, "
- "'encode_return_code': %d, 'decode_return_code': %d, "
- "'bit_rate': %d, 'encoded_frame_length': %" PRIuS ", "
- "'frame_type': %s, 'packets_dropped': %d, "
- "'total_packets': %" PRIuS ", 'ssim': %f, 'psnr': %f},\n",
- f.frame_number,
- f.encoding_successful ? "True " : "False",
- f.decoding_successful ? "True " : "False",
- f.encode_time_in_us,
- f.decode_time_in_us,
- f.encode_return_code,
- f.decode_return_code,
- f.bit_rate_in_kbps,
- f.encoded_frame_length_in_bytes,
- f.frame_type == webrtc::kVideoFrameDelta ? "'Delta'" : "'Other'",
- f.packets_dropped,
- f.total_packets,
- ssim.value,
- psnr.value);
+ printf(
+ "{'frame_number': %d, "
+ "'encoding_successful': %s, 'decoding_successful': %s, "
+ "'encode_time': %d, 'decode_time': %d, "
+ "'encode_return_code': %d, 'decode_return_code': %d, "
+ "'bit_rate': %d, 'encoded_frame_length': %" PRIuS
+ ", "
+ "'frame_type': %s, 'packets_dropped': %d, "
+ "'total_packets': %" PRIuS ", 'ssim': %f, 'psnr': %f},\n",
+ f.frame_number, f.encoding_successful ? "True " : "False",
+ f.decoding_successful ? "True " : "False", f.encode_time_in_us,
+ f.decode_time_in_us, f.encode_return_code, f.decode_return_code,
+ f.bit_rate_in_kbps, f.encoded_frame_length_in_bytes,
+ f.frame_type == webrtc::kVideoFrameDelta ? "'Delta'" : "'Other'",
+ f.packets_dropped, f.total_packets, ssim.value, psnr.value);
}
printf("]\n");
}
@@ -438,10 +462,14 @@ void PrintPythonOutput(const webrtc::test::TestConfig& config,
// The input file must be in YUV format.
int main(int argc, char* argv[]) {
std::string program_name = argv[0];
- std::string usage = "Quality test application for video comparisons.\n"
- "Run " + program_name + " --helpshort for usage.\n"
- "Example usage:\n" + program_name +
- " --input_filename=filename.yuv --width=352 --height=288\n";
+ std::string usage =
+ "Quality test application for video comparisons.\n"
+ "Run " +
+ program_name +
+ " --helpshort for usage.\n"
+ "Example usage:\n" +
+ program_name +
+ " --input_filename=filename.yuv --width=352 --height=288\n";
google::SetUsageMessage(usage);
google::ParseCommandLineFlags(&argc, &argv, true);
@@ -478,10 +506,8 @@ int main(int argc, char* argv[]) {
packet_manipulator.InitializeRandomSeed(time(NULL));
}
webrtc::test::VideoProcessor* processor =
- new webrtc::test::VideoProcessorImpl(encoder, decoder,
- &frame_reader,
- &frame_writer,
- &packet_manipulator,
+ new webrtc::test::VideoProcessorImpl(encoder, decoder, &frame_reader,
+ &frame_writer, &packet_manipulator,
config, &stats);
processor->Init();
diff --git a/webrtc/modules/video_coding/codecs/vp8/default_temporal_layers.cc b/webrtc/modules/video_coding/codecs/vp8/default_temporal_layers.cc
index da6008ba3d..9226fa774c 100644
--- a/webrtc/modules/video_coding/codecs/vp8/default_temporal_layers.cc
+++ b/webrtc/modules/video_coding/codecs/vp8/default_temporal_layers.cc
@@ -13,8 +13,8 @@
#include <stdlib.h>
#include <string.h>
-#include "webrtc/modules/interface/module_common_types.h"
-#include "webrtc/modules/video_coding/codecs/interface/video_codec_interface.h"
+#include "webrtc/modules/include/module_common_types.h"
+#include "webrtc/modules/video_coding/include/video_codec_interface.h"
#include "webrtc/modules/video_coding/codecs/vp8/include/vp8_common_types.h"
#include "vpx/vpx_encoder.h"
@@ -41,7 +41,7 @@ int DefaultTemporalLayers::CurrentLayerId() const {
int index = pattern_idx_ % temporal_ids_length_;
assert(index >= 0);
return temporal_ids_[index];
- }
+}
bool DefaultTemporalLayers::ConfigureBitrates(int bitrateKbit,
int max_bitrate_kbit,
@@ -56,8 +56,7 @@ bool DefaultTemporalLayers::ConfigureBitrates(int bitrateKbit,
cfg->ts_periodicity = temporal_ids_length_;
cfg->ts_target_bitrate[0] = bitrateKbit;
cfg->ts_rate_decimator[0] = 1;
- memcpy(cfg->ts_layer_id,
- temporal_ids_,
+ memcpy(cfg->ts_layer_id, temporal_ids_,
sizeof(unsigned int) * temporal_ids_length_);
temporal_pattern_length_ = 1;
temporal_pattern_[0] = kTemporalUpdateLastRefAll;
@@ -74,8 +73,7 @@ bool DefaultTemporalLayers::ConfigureBitrates(int bitrateKbit,
cfg->ts_target_bitrate[1] = bitrateKbit;
cfg->ts_rate_decimator[0] = 2;
cfg->ts_rate_decimator[1] = 1;
- memcpy(cfg->ts_layer_id,
- temporal_ids_,
+ memcpy(cfg->ts_layer_id, temporal_ids_,
sizeof(unsigned int) * temporal_ids_length_);
temporal_pattern_length_ = 8;
temporal_pattern_[0] = kTemporalUpdateLastAndGoldenRefAltRef;
@@ -103,8 +101,7 @@ bool DefaultTemporalLayers::ConfigureBitrates(int bitrateKbit,
cfg->ts_rate_decimator[0] = 4;
cfg->ts_rate_decimator[1] = 2;
cfg->ts_rate_decimator[2] = 1;
- memcpy(cfg->ts_layer_id,
- temporal_ids_,
+ memcpy(cfg->ts_layer_id, temporal_ids_,
sizeof(unsigned int) * temporal_ids_length_);
temporal_pattern_length_ = 8;
temporal_pattern_[0] = kTemporalUpdateLastAndGoldenRefAltRef;
@@ -138,8 +135,7 @@ bool DefaultTemporalLayers::ConfigureBitrates(int bitrateKbit,
cfg->ts_rate_decimator[1] = 4;
cfg->ts_rate_decimator[2] = 2;
cfg->ts_rate_decimator[3] = 1;
- memcpy(cfg->ts_layer_id,
- temporal_ids_,
+ memcpy(cfg->ts_layer_id, temporal_ids_,
sizeof(unsigned int) * temporal_ids_length_);
temporal_pattern_length_ = 16;
temporal_pattern_[0] = kTemporalUpdateLast;
@@ -243,7 +239,7 @@ int DefaultTemporalLayers::EncodeFlags(uint32_t timestamp) {
void DefaultTemporalLayers::PopulateCodecSpecific(
bool base_layer_sync,
- CodecSpecificInfoVP8 *vp8_info,
+ CodecSpecificInfoVP8* vp8_info,
uint32_t timestamp) {
assert(number_of_temporal_layers_ > 0);
assert(0 < temporal_ids_length_);
@@ -254,8 +250,8 @@ void DefaultTemporalLayers::PopulateCodecSpecific(
vp8_info->tl0PicIdx = kNoTl0PicIdx;
} else {
if (base_layer_sync) {
- vp8_info->temporalIdx = 0;
- vp8_info->layerSync = true;
+ vp8_info->temporalIdx = 0;
+ vp8_info->layerSync = true;
} else {
vp8_info->temporalIdx = CurrentLayerId();
TemporalReferences temporal_reference =
@@ -267,7 +263,7 @@ void DefaultTemporalLayers::PopulateCodecSpecific(
kTemporalUpdateGoldenWithoutDependencyRefAltRef ||
temporal_reference == kTemporalUpdateNoneNoRefGoldenRefAltRef ||
(temporal_reference == kTemporalUpdateNone &&
- number_of_temporal_layers_ == 4)) {
+ number_of_temporal_layers_ == 4)) {
vp8_info->layerSync = true;
} else {
vp8_info->layerSync = false;
diff --git a/webrtc/modules/video_coding/codecs/vp8/default_temporal_layers_unittest.cc b/webrtc/modules/video_coding/codecs/vp8/default_temporal_layers_unittest.cc
index 34121cbcf6..461ba69a72 100644
--- a/webrtc/modules/video_coding/codecs/vp8/default_temporal_layers_unittest.cc
+++ b/webrtc/modules/video_coding/codecs/vp8/default_temporal_layers_unittest.cc
@@ -8,9 +8,8 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-
#include "testing/gtest/include/gtest/gtest.h"
-#include "webrtc/modules/video_coding/codecs/interface/video_codec_interface.h"
+#include "webrtc/modules/video_coding/include/video_codec_interface.h"
#include "webrtc/modules/video_coding/codecs/vp8/default_temporal_layers.h"
#include "vpx/vpx_encoder.h"
@@ -19,47 +18,36 @@
namespace webrtc {
enum {
- kTemporalUpdateLast = VP8_EFLAG_NO_UPD_GF |
- VP8_EFLAG_NO_UPD_ARF |
+ kTemporalUpdateLast = VP8_EFLAG_NO_UPD_GF | VP8_EFLAG_NO_UPD_ARF |
VP8_EFLAG_NO_REF_GF |
VP8_EFLAG_NO_REF_ARF,
- kTemporalUpdateGoldenWithoutDependency = VP8_EFLAG_NO_REF_GF |
- VP8_EFLAG_NO_REF_ARF |
- VP8_EFLAG_NO_UPD_ARF |
- VP8_EFLAG_NO_UPD_LAST,
- kTemporalUpdateGolden = VP8_EFLAG_NO_REF_ARF |
- VP8_EFLAG_NO_UPD_ARF |
- VP8_EFLAG_NO_UPD_LAST,
- kTemporalUpdateAltrefWithoutDependency = VP8_EFLAG_NO_REF_ARF |
- VP8_EFLAG_NO_REF_GF |
- VP8_EFLAG_NO_UPD_GF |
- VP8_EFLAG_NO_UPD_LAST,
- kTemporalUpdateAltref = VP8_EFLAG_NO_UPD_GF |
- VP8_EFLAG_NO_UPD_LAST,
- kTemporalUpdateNone = VP8_EFLAG_NO_UPD_GF |
- VP8_EFLAG_NO_UPD_ARF |
+ kTemporalUpdateGoldenWithoutDependency =
+ VP8_EFLAG_NO_REF_GF | VP8_EFLAG_NO_REF_ARF | VP8_EFLAG_NO_UPD_ARF |
+ VP8_EFLAG_NO_UPD_LAST,
+ kTemporalUpdateGolden =
+ VP8_EFLAG_NO_REF_ARF | VP8_EFLAG_NO_UPD_ARF | VP8_EFLAG_NO_UPD_LAST,
+ kTemporalUpdateAltrefWithoutDependency =
+ VP8_EFLAG_NO_REF_ARF | VP8_EFLAG_NO_REF_GF | VP8_EFLAG_NO_UPD_GF |
+ VP8_EFLAG_NO_UPD_LAST,
+ kTemporalUpdateAltref = VP8_EFLAG_NO_UPD_GF | VP8_EFLAG_NO_UPD_LAST,
+ kTemporalUpdateNone = VP8_EFLAG_NO_UPD_GF | VP8_EFLAG_NO_UPD_ARF |
VP8_EFLAG_NO_UPD_LAST |
VP8_EFLAG_NO_UPD_ENTROPY,
- kTemporalUpdateNoneNoRefAltRef = VP8_EFLAG_NO_REF_ARF |
- VP8_EFLAG_NO_UPD_GF |
+ kTemporalUpdateNoneNoRefAltRef = VP8_EFLAG_NO_REF_ARF | VP8_EFLAG_NO_UPD_GF |
VP8_EFLAG_NO_UPD_ARF |
VP8_EFLAG_NO_UPD_LAST |
VP8_EFLAG_NO_UPD_ENTROPY,
- kTemporalUpdateNoneNoRefGolden = VP8_EFLAG_NO_REF_GF |
- VP8_EFLAG_NO_UPD_GF |
+ kTemporalUpdateNoneNoRefGolden = VP8_EFLAG_NO_REF_GF | VP8_EFLAG_NO_UPD_GF |
VP8_EFLAG_NO_UPD_ARF |
VP8_EFLAG_NO_UPD_LAST |
VP8_EFLAG_NO_UPD_ENTROPY,
- kTemporalUpdateGoldenWithoutDependencyRefAltRef = VP8_EFLAG_NO_REF_GF |
- VP8_EFLAG_NO_UPD_ARF |
- VP8_EFLAG_NO_UPD_LAST,
- kTemporalUpdateGoldenRefAltRef = VP8_EFLAG_NO_UPD_ARF |
- VP8_EFLAG_NO_UPD_LAST,
- kTemporalUpdateLastRefAltRef = VP8_EFLAG_NO_UPD_GF |
- VP8_EFLAG_NO_UPD_ARF |
- VP8_EFLAG_NO_REF_GF,
- kTemporalUpdateLastAndGoldenRefAltRef = VP8_EFLAG_NO_UPD_ARF |
- VP8_EFLAG_NO_REF_GF,
+ kTemporalUpdateGoldenWithoutDependencyRefAltRef =
+ VP8_EFLAG_NO_REF_GF | VP8_EFLAG_NO_UPD_ARF | VP8_EFLAG_NO_UPD_LAST,
+ kTemporalUpdateGoldenRefAltRef = VP8_EFLAG_NO_UPD_ARF | VP8_EFLAG_NO_UPD_LAST,
+ kTemporalUpdateLastRefAltRef =
+ VP8_EFLAG_NO_UPD_GF | VP8_EFLAG_NO_UPD_ARF | VP8_EFLAG_NO_REF_GF,
+ kTemporalUpdateLastAndGoldenRefAltRef =
+ VP8_EFLAG_NO_UPD_ARF | VP8_EFLAG_NO_REF_GF,
};
TEST(TemporalLayersTest, 2Layers) {
@@ -68,29 +56,30 @@ TEST(TemporalLayersTest, 2Layers) {
CodecSpecificInfoVP8 vp8_info;
tl.ConfigureBitrates(500, 500, 30, &cfg);
- int expected_flags[16] = { kTemporalUpdateLastAndGoldenRefAltRef,
- kTemporalUpdateGoldenWithoutDependencyRefAltRef,
- kTemporalUpdateLastRefAltRef,
- kTemporalUpdateGoldenRefAltRef,
- kTemporalUpdateLastRefAltRef,
- kTemporalUpdateGoldenRefAltRef,
- kTemporalUpdateLastRefAltRef,
- kTemporalUpdateNone,
- kTemporalUpdateLastAndGoldenRefAltRef,
- kTemporalUpdateGoldenWithoutDependencyRefAltRef,
- kTemporalUpdateLastRefAltRef,
- kTemporalUpdateGoldenRefAltRef,
- kTemporalUpdateLastRefAltRef,
- kTemporalUpdateGoldenRefAltRef,
- kTemporalUpdateLastRefAltRef,
- kTemporalUpdateNone,
- };
- int expected_temporal_idx[16] =
- { 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1 };
-
- bool expected_layer_sync[16] =
- { false, true, false, false, false, false, false, false,
- false, true, false, false, false, false, false, false };
+ int expected_flags[16] = {
+ kTemporalUpdateLastAndGoldenRefAltRef,
+ kTemporalUpdateGoldenWithoutDependencyRefAltRef,
+ kTemporalUpdateLastRefAltRef,
+ kTemporalUpdateGoldenRefAltRef,
+ kTemporalUpdateLastRefAltRef,
+ kTemporalUpdateGoldenRefAltRef,
+ kTemporalUpdateLastRefAltRef,
+ kTemporalUpdateNone,
+ kTemporalUpdateLastAndGoldenRefAltRef,
+ kTemporalUpdateGoldenWithoutDependencyRefAltRef,
+ kTemporalUpdateLastRefAltRef,
+ kTemporalUpdateGoldenRefAltRef,
+ kTemporalUpdateLastRefAltRef,
+ kTemporalUpdateGoldenRefAltRef,
+ kTemporalUpdateLastRefAltRef,
+ kTemporalUpdateNone,
+ };
+ int expected_temporal_idx[16] = {0, 1, 0, 1, 0, 1, 0, 1,
+ 0, 1, 0, 1, 0, 1, 0, 1};
+
+ bool expected_layer_sync[16] = {false, true, false, false, false, false,
+ false, false, false, true, false, false,
+ false, false, false, false};
uint32_t timestamp = 0;
for (int i = 0; i < 16; ++i) {
@@ -108,29 +97,30 @@ TEST(TemporalLayersTest, 3Layers) {
CodecSpecificInfoVP8 vp8_info;
tl.ConfigureBitrates(500, 500, 30, &cfg);
- int expected_flags[16] = { kTemporalUpdateLastAndGoldenRefAltRef,
- kTemporalUpdateNoneNoRefGolden,
- kTemporalUpdateGoldenWithoutDependencyRefAltRef,
- kTemporalUpdateNone,
- kTemporalUpdateLastRefAltRef,
- kTemporalUpdateNone,
- kTemporalUpdateGoldenRefAltRef,
- kTemporalUpdateNone,
- kTemporalUpdateLastAndGoldenRefAltRef,
- kTemporalUpdateNoneNoRefGolden,
- kTemporalUpdateGoldenWithoutDependencyRefAltRef,
- kTemporalUpdateNone,
- kTemporalUpdateLastRefAltRef,
- kTemporalUpdateNone,
- kTemporalUpdateGoldenRefAltRef,
- kTemporalUpdateNone,
+ int expected_flags[16] = {
+ kTemporalUpdateLastAndGoldenRefAltRef,
+ kTemporalUpdateNoneNoRefGolden,
+ kTemporalUpdateGoldenWithoutDependencyRefAltRef,
+ kTemporalUpdateNone,
+ kTemporalUpdateLastRefAltRef,
+ kTemporalUpdateNone,
+ kTemporalUpdateGoldenRefAltRef,
+ kTemporalUpdateNone,
+ kTemporalUpdateLastAndGoldenRefAltRef,
+ kTemporalUpdateNoneNoRefGolden,
+ kTemporalUpdateGoldenWithoutDependencyRefAltRef,
+ kTemporalUpdateNone,
+ kTemporalUpdateLastRefAltRef,
+ kTemporalUpdateNone,
+ kTemporalUpdateGoldenRefAltRef,
+ kTemporalUpdateNone,
};
- int expected_temporal_idx[16] =
- { 0, 2, 1, 2, 0, 2, 1, 2, 0, 2, 1, 2, 0, 2, 1, 2 };
+ int expected_temporal_idx[16] = {0, 2, 1, 2, 0, 2, 1, 2,
+ 0, 2, 1, 2, 0, 2, 1, 2};
- bool expected_layer_sync[16] =
- { false, true, true, false, false, false, false, false,
- false, true, true, false, false, false, false, false };
+ bool expected_layer_sync[16] = {false, true, true, false, false, false,
+ false, false, false, true, true, false,
+ false, false, false, false};
unsigned int timestamp = 0;
for (int i = 0; i < 16; ++i) {
@@ -165,12 +155,12 @@ TEST(TemporalLayersTest, 4Layers) {
kTemporalUpdateAltref,
kTemporalUpdateNone,
};
- int expected_temporal_idx[16] =
- { 0, 3, 2, 3, 1, 3, 2, 3, 0, 3, 2, 3, 1, 3, 2, 3 };
+ int expected_temporal_idx[16] = {0, 3, 2, 3, 1, 3, 2, 3,
+ 0, 3, 2, 3, 1, 3, 2, 3};
- bool expected_layer_sync[16] =
- { false, true, true, true, true, true, false, true,
- false, true, false, true, false, true, false, true };
+ bool expected_layer_sync[16] = {false, true, true, true, true, true,
+ false, true, false, true, false, true,
+ false, true, false, true};
uint32_t timestamp = 0;
for (int i = 0; i < 16; ++i) {
@@ -198,8 +188,7 @@ TEST(TemporalLayersTest, KeyFrame) {
kTemporalUpdateGoldenRefAltRef,
kTemporalUpdateNone,
};
- int expected_temporal_idx[8] =
- { 0, 0, 0, 0, 0, 0, 0, 2};
+ int expected_temporal_idx[8] = {0, 0, 0, 0, 0, 0, 0, 2};
uint32_t timestamp = 0;
for (int i = 0; i < 7; ++i) {
diff --git a/webrtc/modules/video_coding/codecs/vp8/include/vp8.h b/webrtc/modules/video_coding/codecs/vp8/include/vp8.h
index f5dae471d2..dd3514235d 100644
--- a/webrtc/modules/video_coding/codecs/vp8/include/vp8.h
+++ b/webrtc/modules/video_coding/codecs/vp8/include/vp8.h
@@ -13,7 +13,7 @@
#ifndef WEBRTC_MODULES_VIDEO_CODING_CODECS_VP8_INCLUDE_VP8_H_
#define WEBRTC_MODULES_VIDEO_CODING_CODECS_VP8_INCLUDE_VP8_H_
-#include "webrtc/modules/video_coding/codecs/interface/video_codec_interface.h"
+#include "webrtc/modules/video_coding/include/video_codec_interface.h"
namespace webrtc {
@@ -21,16 +21,15 @@ class VP8Encoder : public VideoEncoder {
public:
static VP8Encoder* Create();
- virtual ~VP8Encoder() {};
+ virtual ~VP8Encoder() {}
}; // end of VP8Encoder class
-
class VP8Decoder : public VideoDecoder {
public:
static VP8Decoder* Create();
- virtual ~VP8Decoder() {};
+ virtual ~VP8Decoder() {}
}; // end of VP8Decoder class
} // namespace webrtc
-#endif // WEBRTC_MODULES_VIDEO_CODING_CODECS_VP8_INCLUDE_VP8_H_
+#endif // WEBRTC_MODULES_VIDEO_CODING_CODECS_VP8_INCLUDE_VP8_H_
diff --git a/webrtc/modules/video_coding/codecs/vp8/include/vp8_common_types.h b/webrtc/modules/video_coding/codecs/vp8/include/vp8_common_types.h
index c2cefdd94e..7a27e4429a 100644
--- a/webrtc/modules/video_coding/codecs/vp8/include/vp8_common_types.h
+++ b/webrtc/modules/video_coding/codecs/vp8/include/vp8_common_types.h
@@ -8,8 +8,8 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#ifndef WEBRTC_MODULES_VIDEO_CODING_CODECS_VP8_COMMON_TYPES_H_
-#define WEBRTC_MODULES_VIDEO_CODING_CODECS_VP8_COMMON_TYPES_H_
+#ifndef WEBRTC_MODULES_VIDEO_CODING_CODECS_VP8_INCLUDE_VP8_COMMON_TYPES_H_
+#define WEBRTC_MODULES_VIDEO_CODING_CODECS_VP8_INCLUDE_VP8_COMMON_TYPES_H_
#include "webrtc/common_types.h"
@@ -19,11 +19,11 @@ namespace webrtc {
// Values as required for the VP8 codec (accumulating).
static const float
kVp8LayerRateAlloction[kMaxTemporalStreams][kMaxTemporalStreams] = {
- {1.0f, 1.0f, 1.0f, 1.0f}, // 1 layer
- {0.6f, 1.0f, 1.0f, 1.0f}, // 2 layers {60%, 40%}
- {0.4f, 0.6f, 1.0f, 1.0f}, // 3 layers {40%, 20%, 40%}
- {0.25f, 0.4f, 0.6f, 1.0f} // 4 layers {25%, 15%, 20%, 40%}
+ {1.0f, 1.0f, 1.0f, 1.0f}, // 1 layer
+ {0.6f, 1.0f, 1.0f, 1.0f}, // 2 layers {60%, 40%}
+ {0.4f, 0.6f, 1.0f, 1.0f}, // 3 layers {40%, 20%, 40%}
+ {0.25f, 0.4f, 0.6f, 1.0f} // 4 layers {25%, 15%, 20%, 40%}
};
} // namespace webrtc
-#endif // WEBRTC_MODULES_VIDEO_CODING_CODECS_VP8_COMMON_TYPES_H_
+#endif // WEBRTC_MODULES_VIDEO_CODING_CODECS_VP8_INCLUDE_VP8_COMMON_TYPES_H_
diff --git a/webrtc/modules/video_coding/codecs/vp8/realtime_temporal_layers.cc b/webrtc/modules/video_coding/codecs/vp8/realtime_temporal_layers.cc
index 15b5af9200..d22601358f 100644
--- a/webrtc/modules/video_coding/codecs/vp8/realtime_temporal_layers.cc
+++ b/webrtc/modules/video_coding/codecs/vp8/realtime_temporal_layers.cc
@@ -12,7 +12,7 @@
#include "vpx/vpx_encoder.h"
#include "vpx/vp8cx.h"
-#include "webrtc/modules/video_coding/codecs/interface/video_codec_interface.h"
+#include "webrtc/modules/video_coding/include/video_codec_interface.h"
#include "webrtc/modules/video_coding/codecs/vp8/include/vp8_common_types.h"
#include "webrtc/modules/video_coding/codecs/vp8/temporal_layers.h"
@@ -23,7 +23,8 @@ namespace webrtc {
namespace {
enum {
kTemporalUpdateLast = VP8_EFLAG_NO_UPD_GF | VP8_EFLAG_NO_UPD_ARF |
- VP8_EFLAG_NO_REF_GF | VP8_EFLAG_NO_REF_ARF,
+ VP8_EFLAG_NO_REF_GF |
+ VP8_EFLAG_NO_REF_ARF,
kTemporalUpdateGolden =
VP8_EFLAG_NO_REF_ARF | VP8_EFLAG_NO_UPD_ARF | VP8_EFLAG_NO_UPD_LAST,
@@ -37,13 +38,15 @@ enum {
kTemporalUpdateAltref | VP8_EFLAG_NO_REF_ARF | VP8_EFLAG_NO_REF_GF,
kTemporalUpdateNone = VP8_EFLAG_NO_UPD_GF | VP8_EFLAG_NO_UPD_ARF |
- VP8_EFLAG_NO_UPD_LAST | VP8_EFLAG_NO_UPD_ENTROPY,
+ VP8_EFLAG_NO_UPD_LAST |
+ VP8_EFLAG_NO_UPD_ENTROPY,
kTemporalUpdateNoneNoRefAltref = kTemporalUpdateNone | VP8_EFLAG_NO_REF_ARF,
kTemporalUpdateNoneNoRefGoldenRefAltRef =
VP8_EFLAG_NO_REF_GF | VP8_EFLAG_NO_UPD_GF | VP8_EFLAG_NO_UPD_ARF |
- VP8_EFLAG_NO_UPD_LAST | VP8_EFLAG_NO_UPD_ENTROPY,
+ VP8_EFLAG_NO_UPD_LAST |
+ VP8_EFLAG_NO_UPD_ENTROPY,
kTemporalUpdateGoldenWithoutDependencyRefAltRef =
VP8_EFLAG_NO_REF_GF | VP8_EFLAG_NO_UPD_ARF | VP8_EFLAG_NO_UPD_LAST,
@@ -133,12 +136,14 @@ class RealTimeTemporalLayers : public TemporalLayers {
layer_ids_length_ = sizeof(layer_ids) / sizeof(*layer_ids);
static const int encode_flags[] = {
- kTemporalUpdateLastAndGoldenRefAltRef,
- kTemporalUpdateGoldenWithoutDependencyRefAltRef,
- kTemporalUpdateLastRefAltRef, kTemporalUpdateGoldenRefAltRef,
- kTemporalUpdateLastRefAltRef, kTemporalUpdateGoldenRefAltRef,
- kTemporalUpdateLastRefAltRef, kTemporalUpdateNone
- };
+ kTemporalUpdateLastAndGoldenRefAltRef,
+ kTemporalUpdateGoldenWithoutDependencyRefAltRef,
+ kTemporalUpdateLastRefAltRef,
+ kTemporalUpdateGoldenRefAltRef,
+ kTemporalUpdateLastRefAltRef,
+ kTemporalUpdateGoldenRefAltRef,
+ kTemporalUpdateLastRefAltRef,
+ kTemporalUpdateNone};
encode_flags_length_ = sizeof(encode_flags) / sizeof(*layer_ids);
encode_flags_ = encode_flags;
@@ -153,12 +158,14 @@ class RealTimeTemporalLayers : public TemporalLayers {
layer_ids_length_ = sizeof(layer_ids) / sizeof(*layer_ids);
static const int encode_flags[] = {
- kTemporalUpdateLastAndGoldenRefAltRef,
- kTemporalUpdateNoneNoRefGoldenRefAltRef,
- kTemporalUpdateGoldenWithoutDependencyRefAltRef, kTemporalUpdateNone,
- kTemporalUpdateLastRefAltRef, kTemporalUpdateNone,
- kTemporalUpdateGoldenRefAltRef, kTemporalUpdateNone
- };
+ kTemporalUpdateLastAndGoldenRefAltRef,
+ kTemporalUpdateNoneNoRefGoldenRefAltRef,
+ kTemporalUpdateGoldenWithoutDependencyRefAltRef,
+ kTemporalUpdateNone,
+ kTemporalUpdateLastRefAltRef,
+ kTemporalUpdateNone,
+ kTemporalUpdateGoldenRefAltRef,
+ kTemporalUpdateNone};
encode_flags_length_ = sizeof(encode_flags) / sizeof(*layer_ids);
encode_flags_ = encode_flags;
@@ -172,8 +179,8 @@ class RealTimeTemporalLayers : public TemporalLayers {
assert(false);
return false;
}
- memcpy(
- cfg->ts_layer_id, layer_ids_, sizeof(unsigned int) * layer_ids_length_);
+ memcpy(cfg->ts_layer_id, layer_ids_,
+ sizeof(unsigned int) * layer_ids_length_);
return true;
}
diff --git a/webrtc/modules/video_coding/codecs/vp8/reference_picture_selection.cc b/webrtc/modules/video_coding/codecs/vp8/reference_picture_selection.cc
index a922e35712..1838e32eb7 100644
--- a/webrtc/modules/video_coding/codecs/vp8/reference_picture_selection.cc
+++ b/webrtc/modules/video_coding/codecs/vp8/reference_picture_selection.cc
@@ -25,8 +25,7 @@ ReferencePictureSelection::ReferencePictureSelection()
last_sent_ref_update_time_(0),
established_ref_picture_id_(0),
last_refresh_time_(0),
- rtt_(0) {
-}
+ rtt_(0) {}
void ReferencePictureSelection::Init() {
update_golden_next_ = true;
@@ -62,7 +61,8 @@ bool ReferencePictureSelection::ReceivedSLI(uint32_t now_ts) {
return send_refresh;
}
-int ReferencePictureSelection::EncodeFlags(int picture_id, bool send_refresh,
+int ReferencePictureSelection::EncodeFlags(int picture_id,
+ bool send_refresh,
uint32_t now_ts) {
int flags = 0;
// We can't refresh the decoder until we have established the key frame.
@@ -87,12 +87,12 @@ int ReferencePictureSelection::EncodeFlags(int picture_id, bool send_refresh,
received_ack_) {
flags |= VP8_EFLAG_NO_REF_LAST; // Don't reference the last frame.
if (update_golden_next_) {
- flags |= VP8_EFLAG_FORCE_GF; // Update the golden reference.
+ flags |= VP8_EFLAG_FORCE_GF; // Update the golden reference.
flags |= VP8_EFLAG_NO_UPD_ARF; // Don't update alt-ref.
- flags |= VP8_EFLAG_NO_REF_GF; // Don't reference the golden frame.
+ flags |= VP8_EFLAG_NO_REF_GF; // Don't reference the golden frame.
} else {
- flags |= VP8_EFLAG_FORCE_ARF; // Update the alt-ref reference.
- flags |= VP8_EFLAG_NO_UPD_GF; // Don't update the golden frame.
+ flags |= VP8_EFLAG_FORCE_ARF; // Update the alt-ref reference.
+ flags |= VP8_EFLAG_NO_UPD_GF; // Don't update the golden frame.
flags |= VP8_EFLAG_NO_REF_ARF; // Don't reference the alt-ref frame.
}
last_sent_ref_picture_id_ = picture_id;
@@ -103,9 +103,9 @@ int ReferencePictureSelection::EncodeFlags(int picture_id, bool send_refresh,
if (established_golden_)
flags |= VP8_EFLAG_NO_REF_ARF; // Don't reference the alt-ref frame.
else
- flags |= VP8_EFLAG_NO_REF_GF; // Don't reference the golden frame.
- flags |= VP8_EFLAG_NO_UPD_GF; // Don't update the golden frame.
- flags |= VP8_EFLAG_NO_UPD_ARF; // Don't update the alt-ref frame.
+ flags |= VP8_EFLAG_NO_REF_GF; // Don't reference the golden frame.
+ flags |= VP8_EFLAG_NO_UPD_GF; // Don't update the golden frame.
+ flags |= VP8_EFLAG_NO_UPD_ARF; // Don't update the alt-ref frame.
}
return flags;
}
diff --git a/webrtc/modules/video_coding/codecs/vp8/reference_picture_selection_unittest.cc b/webrtc/modules/video_coding/codecs/vp8/reference_picture_selection_unittest.cc
index c6474e5bd1..742bb96e91 100644
--- a/webrtc/modules/video_coding/codecs/vp8/reference_picture_selection_unittest.cc
+++ b/webrtc/modules/video_coding/codecs/vp8/reference_picture_selection_unittest.cc
@@ -22,25 +22,19 @@ static const uint32_t kMinUpdateInterval = 10;
// Should match the values set in reference_picture_selection.h
static const int kRtt = 10;
-static const int kNoPropagationGolden = VP8_EFLAG_NO_REF_ARF |
- VP8_EFLAG_NO_UPD_GF |
- VP8_EFLAG_NO_UPD_ARF;
-static const int kNoPropagationAltRef = VP8_EFLAG_NO_REF_GF |
- VP8_EFLAG_NO_UPD_GF |
- VP8_EFLAG_NO_UPD_ARF;
-static const int kPropagateGolden = VP8_EFLAG_FORCE_GF |
- VP8_EFLAG_NO_UPD_ARF |
- VP8_EFLAG_NO_REF_GF |
- VP8_EFLAG_NO_REF_LAST;
-static const int kPropagateAltRef = VP8_EFLAG_FORCE_ARF |
- VP8_EFLAG_NO_UPD_GF |
- VP8_EFLAG_NO_REF_ARF |
- VP8_EFLAG_NO_REF_LAST;
-static const int kRefreshFromGolden = VP8_EFLAG_NO_REF_LAST |
- VP8_EFLAG_NO_REF_ARF;
-static const int kRefreshFromAltRef = VP8_EFLAG_NO_REF_LAST |
- VP8_EFLAG_NO_REF_GF;
-
+static const int kNoPropagationGolden =
+ VP8_EFLAG_NO_REF_ARF | VP8_EFLAG_NO_UPD_GF | VP8_EFLAG_NO_UPD_ARF;
+static const int kNoPropagationAltRef =
+ VP8_EFLAG_NO_REF_GF | VP8_EFLAG_NO_UPD_GF | VP8_EFLAG_NO_UPD_ARF;
+static const int kPropagateGolden = VP8_EFLAG_FORCE_GF | VP8_EFLAG_NO_UPD_ARF |
+ VP8_EFLAG_NO_REF_GF | VP8_EFLAG_NO_REF_LAST;
+static const int kPropagateAltRef = VP8_EFLAG_FORCE_ARF | VP8_EFLAG_NO_UPD_GF |
+ VP8_EFLAG_NO_REF_ARF |
+ VP8_EFLAG_NO_REF_LAST;
+static const int kRefreshFromGolden =
+ VP8_EFLAG_NO_REF_LAST | VP8_EFLAG_NO_REF_ARF;
+static const int kRefreshFromAltRef =
+ VP8_EFLAG_NO_REF_LAST | VP8_EFLAG_NO_REF_GF;
class TestRPS : public ::testing::Test {
protected:
@@ -84,15 +78,15 @@ TEST_F(TestRPS, TestDecoderRefresh) {
EXPECT_EQ(rps_.ReceivedSLI(90 * time), true);
// Enough time have elapsed since the previous reference propagation, we will
// therefore get both a refresh from golden and a propagation of alt-ref.
- EXPECT_EQ(rps_.EncodeFlags(5, true, 90 * time), kRefreshFromGolden |
- kPropagateAltRef);
+ EXPECT_EQ(rps_.EncodeFlags(5, true, 90 * time),
+ kRefreshFromGolden | kPropagateAltRef);
rps_.ReceivedRPSI(5);
time += kRtt + 1;
// Enough time for a new refresh, but not enough time for a reference
// propagation.
EXPECT_EQ(rps_.ReceivedSLI(90 * time), true);
- EXPECT_EQ(rps_.EncodeFlags(6, true, 90 * time), kRefreshFromAltRef |
- kNoPropagationAltRef);
+ EXPECT_EQ(rps_.EncodeFlags(6, true, 90 * time),
+ kRefreshFromAltRef | kNoPropagationAltRef);
}
TEST_F(TestRPS, TestWrap) {
diff --git a/webrtc/modules/video_coding/codecs/vp8/screenshare_layers.cc b/webrtc/modules/video_coding/codecs/vp8/screenshare_layers.cc
index 0fbb2a6c40..536587a13e 100644
--- a/webrtc/modules/video_coding/codecs/vp8/screenshare_layers.cc
+++ b/webrtc/modules/video_coding/codecs/vp8/screenshare_layers.cc
@@ -11,10 +11,12 @@
#include <stdlib.h>
+#include <algorithm>
+
#include "webrtc/base/checks.h"
#include "vpx/vpx_encoder.h"
#include "vpx/vp8cx.h"
-#include "webrtc/modules/video_coding/codecs/interface/video_codec_interface.h"
+#include "webrtc/modules/video_coding/include/video_codec_interface.h"
namespace webrtc {
@@ -188,7 +190,7 @@ void ScreenshareLayers::FrameEncoded(unsigned int size,
}
void ScreenshareLayers::PopulateCodecSpecific(bool base_layer_sync,
- CodecSpecificInfoVP8 *vp8_info,
+ CodecSpecificInfoVP8* vp8_info,
uint32_t timestamp) {
int64_t unwrapped_timestamp = time_wrap_handler_.Unwrap(timestamp);
if (number_of_temporal_layers_ == 1) {
diff --git a/webrtc/modules/video_coding/codecs/vp8/screenshare_layers.h b/webrtc/modules/video_coding/codecs/vp8/screenshare_layers.h
index 90a8b1b883..7628758209 100644
--- a/webrtc/modules/video_coding/codecs/vp8/screenshare_layers.h
+++ b/webrtc/modules/video_coding/codecs/vp8/screenshare_layers.h
@@ -15,7 +15,7 @@
#include "webrtc/base/timeutils.h"
#include "webrtc/modules/video_coding/codecs/vp8/temporal_layers.h"
-#include "webrtc/modules/video_coding/utility/include/frame_dropper.h"
+#include "webrtc/modules/video_coding/utility/frame_dropper.h"
#include "webrtc/typedefs.h"
namespace webrtc {
diff --git a/webrtc/modules/video_coding/codecs/vp8/screenshare_layers_unittest.cc b/webrtc/modules/video_coding/codecs/vp8/screenshare_layers_unittest.cc
index 628e336568..f31ed5e4d8 100644
--- a/webrtc/modules/video_coding/codecs/vp8/screenshare_layers_unittest.cc
+++ b/webrtc/modules/video_coding/codecs/vp8/screenshare_layers_unittest.cc
@@ -12,9 +12,9 @@
#include "vpx/vpx_encoder.h"
#include "vpx/vp8cx.h"
#include "webrtc/base/scoped_ptr.h"
-#include "webrtc/modules/video_coding/codecs/interface/video_codec_interface.h"
+#include "webrtc/modules/video_coding/include/video_codec_interface.h"
#include "webrtc/modules/video_coding/codecs/vp8/screenshare_layers.h"
-#include "webrtc/modules/video_coding/utility/include/mock/mock_frame_dropper.h"
+#include "webrtc/modules/video_coding/utility/mock/mock_frame_dropper.h"
using ::testing::_;
using ::testing::NiceMock;
diff --git a/webrtc/modules/video_coding/codecs/vp8/simulcast_encoder_adapter.cc b/webrtc/modules/video_coding/codecs/vp8/simulcast_encoder_adapter.cc
index 5dc4ac78f1..40e438f7e4 100644
--- a/webrtc/modules/video_coding/codecs/vp8/simulcast_encoder_adapter.cc
+++ b/webrtc/modules/video_coding/codecs/vp8/simulcast_encoder_adapter.cc
@@ -215,9 +215,7 @@ int SimulcastEncoderAdapter::InitEncode(const VideoCodec* inst,
}
VideoEncoder* encoder = factory_->Create();
- ret = encoder->InitEncode(&stream_codec,
- number_of_cores,
- max_payload_size);
+ ret = encoder->InitEncode(&stream_codec, number_of_cores, max_payload_size);
if (ret < 0) {
Release();
return ret;
@@ -284,35 +282,25 @@ int SimulcastEncoderAdapter::Encode(
// scale it to match what the encoder expects (below).
if ((dst_width == src_width && dst_height == src_height) ||
input_image.IsZeroSize()) {
- streaminfos_[stream_idx].encoder->Encode(input_image,
- codec_specific_info,
+ streaminfos_[stream_idx].encoder->Encode(input_image, codec_specific_info,
&stream_frame_types);
} else {
VideoFrame dst_frame;
// Making sure that destination frame is of sufficient size.
// Aligning stride values based on width.
- dst_frame.CreateEmptyFrame(dst_width, dst_height,
- dst_width, (dst_width + 1) / 2,
- (dst_width + 1) / 2);
- libyuv::I420Scale(input_image.buffer(kYPlane),
- input_image.stride(kYPlane),
- input_image.buffer(kUPlane),
- input_image.stride(kUPlane),
- input_image.buffer(kVPlane),
- input_image.stride(kVPlane),
- src_width, src_height,
- dst_frame.buffer(kYPlane),
- dst_frame.stride(kYPlane),
- dst_frame.buffer(kUPlane),
- dst_frame.stride(kUPlane),
- dst_frame.buffer(kVPlane),
- dst_frame.stride(kVPlane),
- dst_width, dst_height,
- libyuv::kFilterBilinear);
+ dst_frame.CreateEmptyFrame(dst_width, dst_height, dst_width,
+ (dst_width + 1) / 2, (dst_width + 1) / 2);
+ libyuv::I420Scale(
+ input_image.buffer(kYPlane), input_image.stride(kYPlane),
+ input_image.buffer(kUPlane), input_image.stride(kUPlane),
+ input_image.buffer(kVPlane), input_image.stride(kVPlane), src_width,
+ src_height, dst_frame.buffer(kYPlane), dst_frame.stride(kYPlane),
+ dst_frame.buffer(kUPlane), dst_frame.stride(kUPlane),
+ dst_frame.buffer(kVPlane), dst_frame.stride(kVPlane), dst_width,
+ dst_height, libyuv::kFilterBilinear);
dst_frame.set_timestamp(input_image.timestamp());
dst_frame.set_render_time_ms(input_image.render_time_ms());
- streaminfos_[stream_idx].encoder->Encode(dst_frame,
- codec_specific_info,
+ streaminfos_[stream_idx].encoder->Encode(dst_frame, codec_specific_info,
&stream_frame_types);
}
}
@@ -426,16 +414,17 @@ uint32_t SimulcastEncoderAdapter::GetStreamBitrate(
// current stream's |targetBitrate|, otherwise it's capped by |maxBitrate|.
if (stream_idx < codec_.numberOfSimulcastStreams - 1) {
unsigned int max_rate = codec_.simulcastStream[stream_idx].maxBitrate;
- if (new_bitrate_kbit >= SumStreamTargetBitrate(stream_idx + 1, codec_) +
- codec_.simulcastStream[stream_idx + 1].minBitrate) {
+ if (new_bitrate_kbit >=
+ SumStreamTargetBitrate(stream_idx + 1, codec_) +
+ codec_.simulcastStream[stream_idx + 1].minBitrate) {
max_rate = codec_.simulcastStream[stream_idx].targetBitrate;
}
return std::min(new_bitrate_kbit - sum_target_lower_streams, max_rate);
} else {
- // For the highest stream (highest resolution), the |targetBitRate| and
- // |maxBitrate| are not used. Any excess bitrate (above the targets of
- // all lower streams) is given to this (highest resolution) stream.
- return new_bitrate_kbit - sum_target_lower_streams;
+ // For the highest stream (highest resolution), the |targetBitRate| and
+ // |maxBitrate| are not used. Any excess bitrate (above the targets of
+ // all lower streams) is given to this (highest resolution) stream.
+ return new_bitrate_kbit - sum_target_lower_streams;
}
} else {
// Not enough bitrate for this stream.
@@ -507,4 +496,11 @@ bool SimulcastEncoderAdapter::SupportsNativeHandle() const {
return streaminfos_[0].encoder->SupportsNativeHandle();
}
+const char* SimulcastEncoderAdapter::ImplementationName() const {
+ // We should not be calling this method before streaminfos_ are configured.
+ RTC_DCHECK(!streaminfos_.empty());
+ // TODO(pbos): Support multiple implementation names for different encoders.
+ return streaminfos_[0].encoder->ImplementationName();
+}
+
} // namespace webrtc
diff --git a/webrtc/modules/video_coding/codecs/vp8/simulcast_encoder_adapter.h b/webrtc/modules/video_coding/codecs/vp8/simulcast_encoder_adapter.h
index afec024abc..05a96c7336 100644
--- a/webrtc/modules/video_coding/codecs/vp8/simulcast_encoder_adapter.h
+++ b/webrtc/modules/video_coding/codecs/vp8/simulcast_encoder_adapter.h
@@ -59,6 +59,7 @@ class SimulcastEncoderAdapter : public VP8Encoder {
int GetTargetFramerate() override;
bool SupportsNativeHandle() const override;
+ const char* ImplementationName() const override;
private:
struct StreamInfo {
@@ -71,8 +72,8 @@ class SimulcastEncoderAdapter : public VP8Encoder {
send_stream(true) {}
StreamInfo(VideoEncoder* encoder,
EncodedImageCallback* callback,
- unsigned short width,
- unsigned short height,
+ uint16_t width,
+ uint16_t height,
bool send_stream)
: encoder(encoder),
callback(callback),
@@ -83,8 +84,8 @@ class SimulcastEncoderAdapter : public VP8Encoder {
// Deleted by SimulcastEncoderAdapter::Release().
VideoEncoder* encoder;
EncodedImageCallback* callback;
- unsigned short width;
- unsigned short height;
+ uint16_t width;
+ uint16_t height;
bool key_frame_request;
bool send_stream;
};
@@ -118,4 +119,3 @@ class SimulcastEncoderAdapter : public VP8Encoder {
} // namespace webrtc
#endif // WEBRTC_MODULES_VIDEO_CODING_CODECS_VP8_SIMULCAST_ENCODER_ADAPTER_H_
-
diff --git a/webrtc/modules/video_coding/codecs/vp8/simulcast_encoder_adapter_unittest.cc b/webrtc/modules/video_coding/codecs/vp8/simulcast_encoder_adapter_unittest.cc
index 218b5e2d1a..86b8e0b345 100644
--- a/webrtc/modules/video_coding/codecs/vp8/simulcast_encoder_adapter_unittest.cc
+++ b/webrtc/modules/video_coding/codecs/vp8/simulcast_encoder_adapter_unittest.cc
@@ -11,7 +11,7 @@
#include <vector>
#include "testing/gmock/include/gmock/gmock.h"
-#include "webrtc/modules/video_coding/codecs/interface/video_codec_interface.h"
+#include "webrtc/modules/video_coding/include/video_codec_interface.h"
#include "webrtc/modules/video_coding/codecs/vp8/simulcast_encoder_adapter.h"
#include "webrtc/modules/video_coding/codecs/vp8/simulcast_unittest.h"
#include "webrtc/modules/video_coding/codecs/vp8/vp8_factory.h"
@@ -27,12 +27,10 @@ static VP8Encoder* CreateTestEncoderAdapter() {
class TestSimulcastEncoderAdapter : public TestVp8Simulcast {
public:
TestSimulcastEncoderAdapter()
- : TestVp8Simulcast(CreateTestEncoderAdapter(),
- VP8Decoder::Create()) {}
+ : TestVp8Simulcast(CreateTestEncoderAdapter(), VP8Decoder::Create()) {}
+
protected:
- virtual void SetUp() {
- TestVp8Simulcast::SetUp();
- }
+ virtual void SetUp() { TestVp8Simulcast::SetUp(); }
virtual void TearDown() {
TestVp8Simulcast::TearDown();
VP8EncoderFactoryConfig::set_use_simulcast_adapter(false);
@@ -97,8 +95,7 @@ TEST_F(TestSimulcastEncoderAdapter, TestSpatioTemporalLayers321PatternEncoder) {
// TODO(ronghuawu): Enable this test when SkipEncodingUnusedStreams option is
// implemented for SimulcastEncoderAdapter.
-TEST_F(TestSimulcastEncoderAdapter,
- DISABLED_TestSkipEncodingUnusedStreams) {
+TEST_F(TestSimulcastEncoderAdapter, DISABLED_TestSkipEncodingUnusedStreams) {
TestVp8Simulcast::TestSkipEncodingUnusedStreams();
}
@@ -127,23 +124,17 @@ class MockVideoEncoder : public VideoEncoder {
return 0;
}
- int32_t Release() override {
- return 0;
- }
+ int32_t Release() override { return 0; }
int32_t SetRates(uint32_t newBitRate, uint32_t frameRate) override {
return 0;
}
- MOCK_METHOD2(SetChannelParameters,
- int32_t(uint32_t packetLoss, int64_t rtt));
+ MOCK_METHOD2(SetChannelParameters, int32_t(uint32_t packetLoss, int64_t rtt));
- bool SupportsNativeHandle() const override {
- return supports_native_handle_;
- }
+ bool SupportsNativeHandle() const override { return supports_native_handle_; }
- virtual ~MockVideoEncoder() {
- }
+ virtual ~MockVideoEncoder() {}
const VideoCodec& codec() const { return codec_; }
@@ -200,7 +191,8 @@ class TestSimulcastEncoderAdapterFakeHelper {
EXPECT_TRUE(!factory_->encoders().empty());
for (size_t i = 0; i < factory_->encoders().size(); ++i) {
EXPECT_CALL(*factory_->encoders()[i],
- SetChannelParameters(packetLoss, rtt)).Times(1);
+ SetChannelParameters(packetLoss, rtt))
+ .Times(1);
}
}
@@ -249,8 +241,7 @@ class TestSimulcastEncoderAdapterFake : public ::testing::Test,
void SetupCodec() {
TestVp8Simulcast::DefaultSettings(
- &codec_,
- static_cast<const int*>(kTestTemporalLayerProfile));
+ &codec_, static_cast<const int*>(kTestTemporalLayerProfile));
EXPECT_EQ(0, adapter_->InitEncode(&codec_, 1, 1200));
adapter_->RegisterEncodeCompleteCallback(this);
}
diff --git a/webrtc/modules/video_coding/codecs/vp8/simulcast_unittest.cc b/webrtc/modules/video_coding/codecs/vp8/simulcast_unittest.cc
index 373a55237f..f23affee41 100644
--- a/webrtc/modules/video_coding/codecs/vp8/simulcast_unittest.cc
+++ b/webrtc/modules/video_coding/codecs/vp8/simulcast_unittest.cc
@@ -13,18 +13,14 @@
namespace webrtc {
namespace testing {
-class TestVp8Impl
- : public TestVp8Simulcast {
+class TestVp8Impl : public TestVp8Simulcast {
public:
TestVp8Impl()
- : TestVp8Simulcast(VP8Encoder::Create(), VP8Decoder::Create()) {}
+ : TestVp8Simulcast(VP8Encoder::Create(), VP8Decoder::Create()) {}
+
protected:
- virtual void SetUp() {
- TestVp8Simulcast::SetUp();
- }
- virtual void TearDown() {
- TestVp8Simulcast::TearDown();
- }
+ virtual void SetUp() { TestVp8Simulcast::SetUp(); }
+ virtual void TearDown() { TestVp8Simulcast::TearDown(); }
};
TEST_F(TestVp8Impl, TestKeyFrameRequestsOnAllStreams) {
diff --git a/webrtc/modules/video_coding/codecs/vp8/simulcast_unittest.h b/webrtc/modules/video_coding/codecs/vp8/simulcast_unittest.h
index e4fc986545..7a7a2c253b 100644
--- a/webrtc/modules/video_coding/codecs/vp8/simulcast_unittest.h
+++ b/webrtc/modules/video_coding/codecs/vp8/simulcast_unittest.h
@@ -14,10 +14,11 @@
#include <algorithm>
#include <vector>
+#include "webrtc/base/checks.h"
#include "webrtc/base/scoped_ptr.h"
#include "webrtc/common.h"
#include "webrtc/common_video/libyuv/include/webrtc_libyuv.h"
-#include "webrtc/modules/video_coding/codecs/interface/mock/mock_video_codec_interface.h"
+#include "webrtc/modules/video_coding/include/mock/mock_video_codec_interface.h"
#include "webrtc/modules/video_coding/codecs/vp8/include/vp8.h"
#include "webrtc/modules/video_coding/codecs/vp8/temporal_layers.h"
#include "webrtc/video_frame.h"
@@ -43,10 +44,8 @@ const int kMinBitrates[kNumberOfSimulcastStreams] = {50, 150, 600};
const int kTargetBitrates[kNumberOfSimulcastStreams] = {100, 450, 1000};
const int kDefaultTemporalLayerProfile[3] = {3, 3, 3};
-template<typename T> void SetExpectedValues3(T value0,
- T value1,
- T value2,
- T* expected_values) {
+template <typename T>
+void SetExpectedValues3(T value0, T value1, T value2, T* expected_values) {
expected_values[0] = value0;
expected_values[1] = value1;
expected_values[2] = value2;
@@ -54,15 +53,14 @@ template<typename T> void SetExpectedValues3(T value0,
class Vp8TestEncodedImageCallback : public EncodedImageCallback {
public:
- Vp8TestEncodedImageCallback()
- : picture_id_(-1) {
+ Vp8TestEncodedImageCallback() : picture_id_(-1) {
memset(temporal_layer_, -1, sizeof(temporal_layer_));
memset(layer_sync_, false, sizeof(layer_sync_));
}
~Vp8TestEncodedImageCallback() {
- delete [] encoded_key_frame_._buffer;
- delete [] encoded_frame_._buffer;
+ delete[] encoded_key_frame_._buffer;
+ delete[] encoded_frame_._buffer;
}
virtual int32_t Encoded(const EncodedImage& encoded_image,
@@ -71,22 +69,20 @@ class Vp8TestEncodedImageCallback : public EncodedImageCallback {
// Only store the base layer.
if (codec_specific_info->codecSpecific.VP8.simulcastIdx == 0) {
if (encoded_image._frameType == kVideoFrameKey) {
- delete [] encoded_key_frame_._buffer;
+ delete[] encoded_key_frame_._buffer;
encoded_key_frame_._buffer = new uint8_t[encoded_image._size];
encoded_key_frame_._size = encoded_image._size;
encoded_key_frame_._length = encoded_image._length;
encoded_key_frame_._frameType = kVideoFrameKey;
encoded_key_frame_._completeFrame = encoded_image._completeFrame;
- memcpy(encoded_key_frame_._buffer,
- encoded_image._buffer,
+ memcpy(encoded_key_frame_._buffer, encoded_image._buffer,
encoded_image._length);
} else {
- delete [] encoded_frame_._buffer;
+ delete[] encoded_frame_._buffer;
encoded_frame_._buffer = new uint8_t[encoded_image._size];
encoded_frame_._size = encoded_image._size;
encoded_frame_._length = encoded_image._length;
- memcpy(encoded_frame_._buffer,
- encoded_image._buffer,
+ memcpy(encoded_frame_._buffer, encoded_image._buffer,
encoded_image._length);
}
}
@@ -97,8 +93,10 @@ class Vp8TestEncodedImageCallback : public EncodedImageCallback {
codec_specific_info->codecSpecific.VP8.temporalIdx;
return 0;
}
- void GetLastEncodedFrameInfo(int* picture_id, int* temporal_layer,
- bool* layer_sync, int stream) {
+ void GetLastEncodedFrameInfo(int* picture_id,
+ int* temporal_layer,
+ bool* layer_sync,
+ int stream) {
*picture_id = picture_id_;
*temporal_layer = temporal_layer_[stream];
*layer_sync = layer_sync_[stream];
@@ -120,10 +118,8 @@ class Vp8TestEncodedImageCallback : public EncodedImageCallback {
class Vp8TestDecodedImageCallback : public DecodedImageCallback {
public:
- Vp8TestDecodedImageCallback()
- : decoded_frames_(0) {
- }
- virtual int32_t Decoded(VideoFrame& decoded_image) {
+ Vp8TestDecodedImageCallback() : decoded_frames_(0) {}
+ int32_t Decoded(VideoFrame& decoded_image) override {
for (int i = 0; i < decoded_image.width(); ++i) {
EXPECT_NEAR(kColorY, decoded_image.buffer(kYPlane)[i], 1);
}
@@ -136,9 +132,11 @@ class Vp8TestDecodedImageCallback : public DecodedImageCallback {
decoded_frames_++;
return 0;
}
- int DecodedFrames() {
- return decoded_frames_;
+ int32_t Decoded(VideoFrame& decoded_image, int64_t decode_time_ms) override {
+ RTC_NOTREACHED();
+ return -1;
}
+ int DecodedFrames() { return decoded_frames_; }
private:
int decoded_frames_;
@@ -161,8 +159,7 @@ class SkipEncodingUnusedStreamsTest {
std::vector<unsigned int> configured_bitrates;
for (std::vector<TemporalLayers*>::const_iterator it =
spy_factory->spying_layers_.begin();
- it != spy_factory->spying_layers_.end();
- ++it) {
+ it != spy_factory->spying_layers_.end(); ++it) {
configured_bitrates.push_back(
static_cast<SpyingTemporalLayers*>(*it)->configured_bitrate_);
}
@@ -185,8 +182,8 @@ class SkipEncodingUnusedStreamsTest {
int framerate,
vpx_codec_enc_cfg_t* cfg) override {
configured_bitrate_ = bitrate_kbit;
- return layers_->ConfigureBitrates(
- bitrate_kbit, max_bitrate_kbit, framerate, cfg);
+ return layers_->ConfigureBitrates(bitrate_kbit, max_bitrate_kbit,
+ framerate, cfg);
}
void PopulateCodecSpecific(bool base_layer_sync,
@@ -228,16 +225,15 @@ class SkipEncodingUnusedStreamsTest {
class TestVp8Simulcast : public ::testing::Test {
public:
TestVp8Simulcast(VP8Encoder* encoder, VP8Decoder* decoder)
- : encoder_(encoder),
- decoder_(decoder) {}
+ : encoder_(encoder), decoder_(decoder) {}
// Creates an VideoFrame from |plane_colors|.
static void CreateImage(VideoFrame* frame, int plane_colors[kNumOfPlanes]) {
for (int plane_num = 0; plane_num < kNumOfPlanes; ++plane_num) {
- int width = (plane_num != kYPlane ? (frame->width() + 1) / 2 :
- frame->width());
- int height = (plane_num != kYPlane ? (frame->height() + 1) / 2 :
- frame->height());
+ int width =
+ (plane_num != kYPlane ? (frame->width() + 1) / 2 : frame->width());
+ int height =
+ (plane_num != kYPlane ? (frame->height() + 1) / 2 : frame->height());
PlaneType plane_type = static_cast<PlaneType>(plane_num);
uint8_t* data = frame->buffer(plane_type);
// Setting allocated area to zero - setting only image size to
@@ -267,24 +263,15 @@ class TestVp8Simulcast : public ::testing::Test {
settings->height = kDefaultHeight;
settings->numberOfSimulcastStreams = kNumberOfSimulcastStreams;
ASSERT_EQ(3, kNumberOfSimulcastStreams);
- ConfigureStream(kDefaultWidth / 4, kDefaultHeight / 4,
- kMaxBitrates[0],
- kMinBitrates[0],
- kTargetBitrates[0],
- &settings->simulcastStream[0],
- temporal_layer_profile[0]);
- ConfigureStream(kDefaultWidth / 2, kDefaultHeight / 2,
- kMaxBitrates[1],
- kMinBitrates[1],
- kTargetBitrates[1],
- &settings->simulcastStream[1],
- temporal_layer_profile[1]);
- ConfigureStream(kDefaultWidth, kDefaultHeight,
- kMaxBitrates[2],
- kMinBitrates[2],
- kTargetBitrates[2],
- &settings->simulcastStream[2],
- temporal_layer_profile[2]);
+ ConfigureStream(kDefaultWidth / 4, kDefaultHeight / 4, kMaxBitrates[0],
+ kMinBitrates[0], kTargetBitrates[0],
+ &settings->simulcastStream[0], temporal_layer_profile[0]);
+ ConfigureStream(kDefaultWidth / 2, kDefaultHeight / 2, kMaxBitrates[1],
+ kMinBitrates[1], kTargetBitrates[1],
+ &settings->simulcastStream[1], temporal_layer_profile[1]);
+ ConfigureStream(kDefaultWidth, kDefaultHeight, kMaxBitrates[2],
+ kMinBitrates[2], kTargetBitrates[2],
+ &settings->simulcastStream[2], temporal_layer_profile[2]);
settings->codecSpecific.VP8.resilience = kResilientStream;
settings->codecSpecific.VP8.denoisingOn = true;
settings->codecSpecific.VP8.errorConcealmentOn = false;
@@ -312,9 +299,7 @@ class TestVp8Simulcast : public ::testing::Test {
}
protected:
- virtual void SetUp() {
- SetUpCodec(kDefaultTemporalLayerProfile);
- }
+ virtual void SetUp() { SetUpCodec(kDefaultTemporalLayerProfile); }
virtual void SetUpCodec(const int* temporal_layer_profile) {
encoder_->RegisterEncodeCompleteCallback(&encoder_callback_);
@@ -323,14 +308,14 @@ class TestVp8Simulcast : public ::testing::Test {
EXPECT_EQ(0, encoder_->InitEncode(&settings_, 1, 1200));
EXPECT_EQ(0, decoder_->InitDecode(&settings_, 1));
int half_width = (kDefaultWidth + 1) / 2;
- input_frame_.CreateEmptyFrame(kDefaultWidth, kDefaultHeight,
- kDefaultWidth, half_width, half_width);
+ input_frame_.CreateEmptyFrame(kDefaultWidth, kDefaultHeight, kDefaultWidth,
+ half_width, half_width);
memset(input_frame_.buffer(kYPlane), 0,
- input_frame_.allocated_size(kYPlane));
+ input_frame_.allocated_size(kYPlane));
memset(input_frame_.buffer(kUPlane), 0,
- input_frame_.allocated_size(kUPlane));
+ input_frame_.allocated_size(kUPlane));
memset(input_frame_.buffer(kVPlane), 0,
- input_frame_.allocated_size(kVPlane));
+ input_frame_.allocated_size(kVPlane));
}
virtual void TearDown() {
@@ -342,28 +327,34 @@ class TestVp8Simulcast : public ::testing::Test {
ASSERT_GE(expected_video_streams, 0);
ASSERT_LE(expected_video_streams, kNumberOfSimulcastStreams);
if (expected_video_streams >= 1) {
- EXPECT_CALL(encoder_callback_, Encoded(
- AllOf(Field(&EncodedImage::_frameType, frame_type),
- Field(&EncodedImage::_encodedWidth, kDefaultWidth / 4),
- Field(&EncodedImage::_encodedHeight, kDefaultHeight / 4)), _, _)
- )
+ EXPECT_CALL(
+ encoder_callback_,
+ Encoded(
+ AllOf(Field(&EncodedImage::_frameType, frame_type),
+ Field(&EncodedImage::_encodedWidth, kDefaultWidth / 4),
+ Field(&EncodedImage::_encodedHeight, kDefaultHeight / 4)),
+ _, _))
.Times(1)
.WillRepeatedly(Return(0));
}
if (expected_video_streams >= 2) {
- EXPECT_CALL(encoder_callback_, Encoded(
- AllOf(Field(&EncodedImage::_frameType, frame_type),
- Field(&EncodedImage::_encodedWidth, kDefaultWidth / 2),
- Field(&EncodedImage::_encodedHeight, kDefaultHeight / 2)), _, _)
- )
+ EXPECT_CALL(
+ encoder_callback_,
+ Encoded(
+ AllOf(Field(&EncodedImage::_frameType, frame_type),
+ Field(&EncodedImage::_encodedWidth, kDefaultWidth / 2),
+ Field(&EncodedImage::_encodedHeight, kDefaultHeight / 2)),
+ _, _))
.Times(1)
.WillRepeatedly(Return(0));
}
if (expected_video_streams >= 3) {
- EXPECT_CALL(encoder_callback_, Encoded(
- AllOf(Field(&EncodedImage::_frameType, frame_type),
- Field(&EncodedImage::_encodedWidth, kDefaultWidth),
- Field(&EncodedImage::_encodedHeight, kDefaultHeight)), _, _))
+ EXPECT_CALL(
+ encoder_callback_,
+ Encoded(AllOf(Field(&EncodedImage::_frameType, frame_type),
+ Field(&EncodedImage::_encodedWidth, kDefaultWidth),
+ Field(&EncodedImage::_encodedHeight, kDefaultHeight)),
+ _, _))
.Times(1)
.WillRepeatedly(Return(0));
}
@@ -477,8 +468,8 @@ class TestVp8Simulcast : public ::testing::Test {
void TestPaddingOneStreamTwoMaxedOut() {
// We are just below limit of sending third stream, so we should get
// first stream's rate maxed out at |targetBitrate|, second at |maxBitrate|.
- encoder_->SetRates(kTargetBitrates[0] + kTargetBitrates[1] +
- kMinBitrates[2] - 1, 30);
+ encoder_->SetRates(
+ kTargetBitrates[0] + kTargetBitrates[1] + kMinBitrates[2] - 1, 30);
std::vector<FrameType> frame_types(kNumberOfSimulcastStreams,
kVideoFrameDelta);
ExpectStreams(kVideoFrameKey, 2);
@@ -491,8 +482,8 @@ class TestVp8Simulcast : public ::testing::Test {
void TestSendAllStreams() {
// We have just enough to send all streams.
- encoder_->SetRates(kTargetBitrates[0] + kTargetBitrates[1] +
- kMinBitrates[2], 30);
+ encoder_->SetRates(
+ kTargetBitrates[0] + kTargetBitrates[1] + kMinBitrates[2], 30);
std::vector<FrameType> frame_types(kNumberOfSimulcastStreams,
kVideoFrameDelta);
ExpectStreams(kVideoFrameKey, 3);
@@ -505,8 +496,7 @@ class TestVp8Simulcast : public ::testing::Test {
void TestDisablingStreams() {
// We should get three media streams.
- encoder_->SetRates(kMaxBitrates[0] + kMaxBitrates[1] +
- kMaxBitrates[2], 30);
+ encoder_->SetRates(kMaxBitrates[0] + kMaxBitrates[1] + kMaxBitrates[2], 30);
std::vector<FrameType> frame_types(kNumberOfSimulcastStreams,
kVideoFrameDelta);
ExpectStreams(kVideoFrameKey, 3);
@@ -517,8 +507,8 @@ class TestVp8Simulcast : public ::testing::Test {
EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, &frame_types));
// We should only get two streams and padding for one.
- encoder_->SetRates(kTargetBitrates[0] + kTargetBitrates[1] +
- kMinBitrates[2] / 2, 30);
+ encoder_->SetRates(
+ kTargetBitrates[0] + kTargetBitrates[1] + kMinBitrates[2] / 2, 30);
ExpectStreams(kVideoFrameDelta, 2);
input_frame_.set_timestamp(input_frame_.timestamp() + 3000);
EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, &frame_types));
@@ -537,16 +527,16 @@ class TestVp8Simulcast : public ::testing::Test {
EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, &frame_types));
// We should only get two streams and padding for one.
- encoder_->SetRates(kTargetBitrates[0] + kTargetBitrates[1] +
- kMinBitrates[2] / 2, 30);
+ encoder_->SetRates(
+ kTargetBitrates[0] + kTargetBitrates[1] + kMinBitrates[2] / 2, 30);
// We get a key frame because a new stream is being enabled.
ExpectStreams(kVideoFrameKey, 2);
input_frame_.set_timestamp(input_frame_.timestamp() + 3000);
EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, &frame_types));
// We should get all three streams.
- encoder_->SetRates(kTargetBitrates[0] + kTargetBitrates[1] +
- kTargetBitrates[2], 30);
+ encoder_->SetRates(
+ kTargetBitrates[0] + kTargetBitrates[1] + kTargetBitrates[2], 30);
// We get a key frame because a new stream is being enabled.
ExpectStreams(kVideoFrameKey, 3);
input_frame_.set_timestamp(input_frame_.timestamp() + 3000);
@@ -571,20 +561,20 @@ class TestVp8Simulcast : public ::testing::Test {
input_frame_.CreateEmptyFrame(settings_.width, settings_.height,
settings_.width, half_width, half_width);
memset(input_frame_.buffer(kYPlane), 0,
- input_frame_.allocated_size(kYPlane));
+ input_frame_.allocated_size(kYPlane));
memset(input_frame_.buffer(kUPlane), 0,
- input_frame_.allocated_size(kUPlane));
+ input_frame_.allocated_size(kUPlane));
memset(input_frame_.buffer(kVPlane), 0,
- input_frame_.allocated_size(kVPlane));
+ input_frame_.allocated_size(kVPlane));
// The for loop above did not set the bitrate of the highest layer.
- settings_.simulcastStream[settings_.numberOfSimulcastStreams - 1].
- maxBitrate = 0;
+ settings_.simulcastStream[settings_.numberOfSimulcastStreams - 1]
+ .maxBitrate = 0;
// The highest layer has to correspond to the non-simulcast resolution.
- settings_.simulcastStream[settings_.numberOfSimulcastStreams - 1].
- width = settings_.width;
- settings_.simulcastStream[settings_.numberOfSimulcastStreams - 1].
- height = settings_.height;
+ settings_.simulcastStream[settings_.numberOfSimulcastStreams - 1].width =
+ settings_.width;
+ settings_.simulcastStream[settings_.numberOfSimulcastStreams - 1].height =
+ settings_.height;
EXPECT_EQ(0, encoder_->InitEncode(&settings_, 1, 1200));
// Encode one frame and verify.
@@ -612,21 +602,17 @@ class TestVp8Simulcast : public ::testing::Test {
input_frame_.CreateEmptyFrame(settings_.width, settings_.height,
settings_.width, half_width, half_width);
memset(input_frame_.buffer(kYPlane), 0,
- input_frame_.allocated_size(kYPlane));
+ input_frame_.allocated_size(kYPlane));
memset(input_frame_.buffer(kUPlane), 0,
- input_frame_.allocated_size(kUPlane));
+ input_frame_.allocated_size(kUPlane));
memset(input_frame_.buffer(kVPlane), 0,
- input_frame_.allocated_size(kVPlane));
+ input_frame_.allocated_size(kVPlane));
EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, &frame_types));
}
- void TestSwitchingToOneStream() {
- SwitchingToOneStream(1024, 768);
- }
+ void TestSwitchingToOneStream() { SwitchingToOneStream(1024, 768); }
- void TestSwitchingToOneOddStream() {
- SwitchingToOneStream(1023, 769);
- }
+ void TestSwitchingToOneOddStream() { SwitchingToOneStream(1023, 769); }
void TestRPSIEncoder() {
Vp8TestEncodedImageCallback encoder_callback;
@@ -777,67 +763,55 @@ class TestVp8Simulcast : public ::testing::Test {
encoder_->RegisterEncodeCompleteCallback(&encoder_callback);
encoder_->SetRates(kMaxBitrates[2], 30); // To get all three streams.
- int expected_temporal_idx[3] = { -1, -1, -1};
+ int expected_temporal_idx[3] = {-1, -1, -1};
bool expected_layer_sync[3] = {false, false, false};
// First frame: #0.
EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, NULL));
SetExpectedValues3<int>(0, 0, 0, expected_temporal_idx);
SetExpectedValues3<bool>(true, true, true, expected_layer_sync);
- VerifyTemporalIdxAndSyncForAllSpatialLayers(&encoder_callback,
- expected_temporal_idx,
- expected_layer_sync,
- 3);
+ VerifyTemporalIdxAndSyncForAllSpatialLayers(
+ &encoder_callback, expected_temporal_idx, expected_layer_sync, 3);
// Next frame: #1.
input_frame_.set_timestamp(input_frame_.timestamp() + 3000);
EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, NULL));
SetExpectedValues3<int>(2, 2, 2, expected_temporal_idx);
SetExpectedValues3<bool>(true, true, true, expected_layer_sync);
- VerifyTemporalIdxAndSyncForAllSpatialLayers(&encoder_callback,
- expected_temporal_idx,
- expected_layer_sync,
- 3);
+ VerifyTemporalIdxAndSyncForAllSpatialLayers(
+ &encoder_callback, expected_temporal_idx, expected_layer_sync, 3);
// Next frame: #2.
input_frame_.set_timestamp(input_frame_.timestamp() + 3000);
EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, NULL));
SetExpectedValues3<int>(1, 1, 1, expected_temporal_idx);
SetExpectedValues3<bool>(true, true, true, expected_layer_sync);
- VerifyTemporalIdxAndSyncForAllSpatialLayers(&encoder_callback,
- expected_temporal_idx,
- expected_layer_sync,
- 3);
+ VerifyTemporalIdxAndSyncForAllSpatialLayers(
+ &encoder_callback, expected_temporal_idx, expected_layer_sync, 3);
// Next frame: #3.
input_frame_.set_timestamp(input_frame_.timestamp() + 3000);
EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, NULL));
SetExpectedValues3<int>(2, 2, 2, expected_temporal_idx);
SetExpectedValues3<bool>(false, false, false, expected_layer_sync);
- VerifyTemporalIdxAndSyncForAllSpatialLayers(&encoder_callback,
- expected_temporal_idx,
- expected_layer_sync,
- 3);
+ VerifyTemporalIdxAndSyncForAllSpatialLayers(
+ &encoder_callback, expected_temporal_idx, expected_layer_sync, 3);
// Next frame: #4.
input_frame_.set_timestamp(input_frame_.timestamp() + 3000);
EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, NULL));
SetExpectedValues3<int>(0, 0, 0, expected_temporal_idx);
SetExpectedValues3<bool>(false, false, false, expected_layer_sync);
- VerifyTemporalIdxAndSyncForAllSpatialLayers(&encoder_callback,
- expected_temporal_idx,
- expected_layer_sync,
- 3);
+ VerifyTemporalIdxAndSyncForAllSpatialLayers(
+ &encoder_callback, expected_temporal_idx, expected_layer_sync, 3);
// Next frame: #5.
input_frame_.set_timestamp(input_frame_.timestamp() + 3000);
EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, NULL));
SetExpectedValues3<int>(2, 2, 2, expected_temporal_idx);
SetExpectedValues3<bool>(false, false, false, expected_layer_sync);
- VerifyTemporalIdxAndSyncForAllSpatialLayers(&encoder_callback,
- expected_temporal_idx,
- expected_layer_sync,
- 3);
+ VerifyTemporalIdxAndSyncForAllSpatialLayers(
+ &encoder_callback, expected_temporal_idx, expected_layer_sync, 3);
}
// Test the layer pattern and sync flag for various spatial-temporal patterns.
@@ -858,67 +832,55 @@ class TestVp8Simulcast : public ::testing::Test {
encoder_->RegisterEncodeCompleteCallback(&encoder_callback);
encoder_->SetRates(kMaxBitrates[2], 30); // To get all three streams.
- int expected_temporal_idx[3] = { -1, -1, -1};
+ int expected_temporal_idx[3] = {-1, -1, -1};
bool expected_layer_sync[3] = {false, false, false};
// First frame: #0.
EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, NULL));
SetExpectedValues3<int>(0, 0, 255, expected_temporal_idx);
SetExpectedValues3<bool>(true, true, false, expected_layer_sync);
- VerifyTemporalIdxAndSyncForAllSpatialLayers(&encoder_callback,
- expected_temporal_idx,
- expected_layer_sync,
- 3);
+ VerifyTemporalIdxAndSyncForAllSpatialLayers(
+ &encoder_callback, expected_temporal_idx, expected_layer_sync, 3);
// Next frame: #1.
input_frame_.set_timestamp(input_frame_.timestamp() + 3000);
EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, NULL));
SetExpectedValues3<int>(2, 1, 255, expected_temporal_idx);
SetExpectedValues3<bool>(true, true, false, expected_layer_sync);
- VerifyTemporalIdxAndSyncForAllSpatialLayers(&encoder_callback,
- expected_temporal_idx,
- expected_layer_sync,
- 3);
+ VerifyTemporalIdxAndSyncForAllSpatialLayers(
+ &encoder_callback, expected_temporal_idx, expected_layer_sync, 3);
// Next frame: #2.
input_frame_.set_timestamp(input_frame_.timestamp() + 3000);
EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, NULL));
SetExpectedValues3<int>(1, 0, 255, expected_temporal_idx);
SetExpectedValues3<bool>(true, false, false, expected_layer_sync);
- VerifyTemporalIdxAndSyncForAllSpatialLayers(&encoder_callback,
- expected_temporal_idx,
- expected_layer_sync,
- 3);
+ VerifyTemporalIdxAndSyncForAllSpatialLayers(
+ &encoder_callback, expected_temporal_idx, expected_layer_sync, 3);
// Next frame: #3.
input_frame_.set_timestamp(input_frame_.timestamp() + 3000);
EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, NULL));
SetExpectedValues3<int>(2, 1, 255, expected_temporal_idx);
SetExpectedValues3<bool>(false, false, false, expected_layer_sync);
- VerifyTemporalIdxAndSyncForAllSpatialLayers(&encoder_callback,
- expected_temporal_idx,
- expected_layer_sync,
- 3);
+ VerifyTemporalIdxAndSyncForAllSpatialLayers(
+ &encoder_callback, expected_temporal_idx, expected_layer_sync, 3);
// Next frame: #4.
input_frame_.set_timestamp(input_frame_.timestamp() + 3000);
EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, NULL));
SetExpectedValues3<int>(0, 0, 255, expected_temporal_idx);
SetExpectedValues3<bool>(false, false, false, expected_layer_sync);
- VerifyTemporalIdxAndSyncForAllSpatialLayers(&encoder_callback,
- expected_temporal_idx,
- expected_layer_sync,
- 3);
+ VerifyTemporalIdxAndSyncForAllSpatialLayers(
+ &encoder_callback, expected_temporal_idx, expected_layer_sync, 3);
// Next frame: #5.
input_frame_.set_timestamp(input_frame_.timestamp() + 3000);
EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, NULL));
SetExpectedValues3<int>(2, 1, 255, expected_temporal_idx);
SetExpectedValues3<bool>(false, false, false, expected_layer_sync);
- VerifyTemporalIdxAndSyncForAllSpatialLayers(&encoder_callback,
- expected_temporal_idx,
- expected_layer_sync,
- 3);
+ VerifyTemporalIdxAndSyncForAllSpatialLayers(
+ &encoder_callback, expected_temporal_idx, expected_layer_sync, 3);
}
void TestStrideEncodeDecode() {
@@ -932,8 +894,8 @@ class TestVp8Simulcast : public ::testing::Test {
// 1. stride > width 2. stride_y != stride_uv/2
int stride_y = kDefaultWidth + 20;
int stride_uv = ((kDefaultWidth + 1) / 2) + 5;
- input_frame_.CreateEmptyFrame(kDefaultWidth, kDefaultHeight,
- stride_y, stride_uv, stride_uv);
+ input_frame_.CreateEmptyFrame(kDefaultWidth, kDefaultHeight, stride_y,
+ stride_uv, stride_uv);
// Set color.
int plane_offset[kNumOfPlanes];
plane_offset[kYPlane] = kColorY;
@@ -963,10 +925,9 @@ class TestVp8Simulcast : public ::testing::Test {
void TestSkipEncodingUnusedStreams() {
SkipEncodingUnusedStreamsTest test;
std::vector<unsigned int> configured_bitrate =
- test.RunTest(encoder_.get(),
- &settings_,
- 1); // Target bit rate 1, to force all streams but the
- // base one to be exceeding bandwidth constraints.
+ test.RunTest(encoder_.get(), &settings_,
+ 1); // Target bit rate 1, to force all streams but the
+ // base one to be exceeding bandwidth constraints.
EXPECT_EQ(static_cast<size_t>(kNumberOfSimulcastStreams),
configured_bitrate.size());
@@ -975,8 +936,7 @@ class TestVp8Simulcast : public ::testing::Test {
int stream = 0;
for (std::vector<unsigned int>::const_iterator it =
configured_bitrate.begin();
- it != configured_bitrate.end();
- ++it) {
+ it != configured_bitrate.end(); ++it) {
if (stream == 0) {
EXPECT_EQ(min_bitrate, *it);
} else {
diff --git a/webrtc/modules/video_coding/codecs/vp8/temporal_layers.h b/webrtc/modules/video_coding/codecs/vp8/temporal_layers.h
index 7607210d5c..47112c64aa 100644
--- a/webrtc/modules/video_coding/codecs/vp8/temporal_layers.h
+++ b/webrtc/modules/video_coding/codecs/vp8/temporal_layers.h
@@ -14,7 +14,8 @@
#include "vpx/vpx_encoder.h"
-#include "webrtc/common_video/interface/video_image.h"
+#include "webrtc/common.h"
+#include "webrtc/common_video/include/video_image.h"
#include "webrtc/typedefs.h"
namespace webrtc {
@@ -30,6 +31,8 @@ class TemporalLayers {
virtual ~Factory() {}
virtual TemporalLayers* Create(int temporal_layers,
uint8_t initial_tl0_pic_idx) const;
+ static const ConfigOptionID identifier =
+ ConfigOptionID::kTemporalLayersFactory;
};
virtual ~TemporalLayers() {}
diff --git a/webrtc/modules/video_coding/codecs/vp8/test/vp8_impl_unittest.cc b/webrtc/modules/video_coding/codecs/vp8/test/vp8_impl_unittest.cc
index 5ec674f16a..c3d77da063 100644
--- a/webrtc/modules/video_coding/codecs/vp8/test/vp8_impl_unittest.cc
+++ b/webrtc/modules/video_coding/codecs/vp8/test/vp8_impl_unittest.cc
@@ -11,12 +11,12 @@
#include <stdio.h>
#include "testing/gtest/include/gtest/gtest.h"
+#include "webrtc/base/checks.h"
#include "webrtc/base/scoped_ptr.h"
#include "webrtc/common_video/libyuv/include/webrtc_libyuv.h"
#include "webrtc/modules/video_coding/codecs/vp8/include/vp8.h"
#include "webrtc/system_wrappers/include/tick_util.h"
#include "webrtc/test/testsupport/fileutils.h"
-#include "webrtc/test/testsupport/gtest_disable.h"
namespace webrtc {
@@ -78,7 +78,11 @@ class Vp8UnitTestDecodeCompleteCallback : public webrtc::DecodedImageCallback {
public:
explicit Vp8UnitTestDecodeCompleteCallback(VideoFrame* frame)
: decoded_frame_(frame), decode_complete(false) {}
- int Decoded(webrtc::VideoFrame& frame);
+ int32_t Decoded(VideoFrame& frame) override;
+ int32_t Decoded(VideoFrame& frame, int64_t decode_time_ms) override {
+ RTC_NOTREACHED();
+ return -1;
+ }
bool DecodeComplete();
private:
@@ -216,7 +220,12 @@ TEST_F(TestVp8Impl, EncoderParameterTest) {
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, decoder_->InitDecode(&codec_inst_, 1));
}
-TEST_F(TestVp8Impl, DISABLED_ON_ANDROID(AlignedStrideEncodeDecode)) {
+#if defined(WEBRTC_ANDROID)
+#define MAYBE_AlignedStrideEncodeDecode DISABLED_AlignedStrideEncodeDecode
+#else
+#define MAYBE_AlignedStrideEncodeDecode AlignedStrideEncodeDecode
+#endif
+TEST_F(TestVp8Impl, MAYBE_AlignedStrideEncodeDecode) {
SetUpEncodeDecode();
encoder_->Encode(input_frame_, NULL, NULL);
EXPECT_GT(WaitForEncodedFrame(), 0u);
@@ -232,7 +241,12 @@ TEST_F(TestVp8Impl, DISABLED_ON_ANDROID(AlignedStrideEncodeDecode)) {
EXPECT_EQ(kTestNtpTimeMs, decoded_frame_.ntp_time_ms());
}
-TEST_F(TestVp8Impl, DISABLED_ON_ANDROID(DecodeWithACompleteKeyFrame)) {
+#if defined(WEBRTC_ANDROID)
+#define MAYBE_DecodeWithACompleteKeyFrame DISABLED_DecodeWithACompleteKeyFrame
+#else
+#define MAYBE_DecodeWithACompleteKeyFrame DecodeWithACompleteKeyFrame
+#endif
+TEST_F(TestVp8Impl, MAYBE_DecodeWithACompleteKeyFrame) {
SetUpEncodeDecode();
encoder_->Encode(input_frame_, NULL, NULL);
EXPECT_GT(WaitForEncodedFrame(), 0u);
diff --git a/webrtc/modules/video_coding/codecs/vp8/vp8_factory.h b/webrtc/modules/video_coding/codecs/vp8/vp8_factory.h
index 84745ea5a1..52f8aa30b8 100644
--- a/webrtc/modules/video_coding/codecs/vp8/vp8_factory.h
+++ b/webrtc/modules/video_coding/codecs/vp8/vp8_factory.h
@@ -32,4 +32,3 @@ class VP8EncoderFactoryConfig {
} // namespace webrtc
#endif // WEBRTC_MODULES_VIDEO_CODING_CODECS_VP8_VP8_FACTORY_H_
-
diff --git a/webrtc/modules/video_coding/codecs/vp8/vp8_impl.cc b/webrtc/modules/video_coding/codecs/vp8/vp8_impl.cc
index 029ccd1f27..5a04f6a43d 100644
--- a/webrtc/modules/video_coding/codecs/vp8/vp8_impl.cc
+++ b/webrtc/modules/video_coding/codecs/vp8/vp8_impl.cc
@@ -16,7 +16,7 @@
#include <algorithm>
// NOTE(ajm): Path provided by gyp.
-#include "libyuv/scale.h" // NOLINT
+#include "libyuv/scale.h" // NOLINT
#include "libyuv/convert.h" // NOLINT
#include "webrtc/base/checks.h"
@@ -24,8 +24,8 @@
#include "webrtc/common.h"
#include "webrtc/common_types.h"
#include "webrtc/common_video/libyuv/include/webrtc_libyuv.h"
-#include "webrtc/modules/interface/module_common_types.h"
-#include "webrtc/modules/video_coding/codecs/interface/video_codec_interface.h"
+#include "webrtc/modules/include/module_common_types.h"
+#include "webrtc/modules/video_coding/include/video_codec_interface.h"
#include "webrtc/modules/video_coding/codecs/vp8/include/vp8_common_types.h"
#include "webrtc/modules/video_coding/codecs/vp8/screenshare_layers.h"
#include "webrtc/modules/video_coding/codecs/vp8/temporal_layers.h"
@@ -68,10 +68,9 @@ std::vector<int> GetStreamBitratesKbps(const VideoCodec& codec,
std::vector<int> bitrates_kbps(codec.numberOfSimulcastStreams);
// Allocate min -> target bitrates as long as we have bitrate to spend.
size_t last_active_stream = 0;
- for (size_t i = 0;
- i < static_cast<size_t>(codec.numberOfSimulcastStreams) &&
- bitrate_to_allocate_kbps >=
- static_cast<int>(codec.simulcastStream[i].minBitrate);
+ for (size_t i = 0; i < static_cast<size_t>(codec.numberOfSimulcastStreams) &&
+ bitrate_to_allocate_kbps >=
+ static_cast<int>(codec.simulcastStream[i].minBitrate);
++i) {
last_active_stream = i;
int allocated_bitrate_kbps =
@@ -132,7 +131,7 @@ bool ValidSimulcastResolutions(const VideoCodec& codec, int num_streams) {
return true;
}
-int NumStreamsDisabled(std::vector<bool>& streams) {
+int NumStreamsDisabled(const std::vector<bool>& streams) {
int num_disabled = 0;
for (bool stream : streams) {
if (!stream)
@@ -183,7 +182,7 @@ int VP8EncoderImpl::Release() {
while (!encoded_images_.empty()) {
EncodedImage& image = encoded_images_.back();
- delete [] image._buffer;
+ delete[] image._buffer;
encoded_images_.pop_back();
}
while (!encoders_.empty()) {
@@ -289,10 +288,8 @@ int VP8EncoderImpl::SetRates(uint32_t new_bitrate_kbit,
target_bitrate = tl0_bitrate;
}
configurations_[i].rc_target_bitrate = target_bitrate;
- temporal_layers_[stream_idx]->ConfigureBitrates(target_bitrate,
- max_bitrate,
- framerate,
- &configurations_[i]);
+ temporal_layers_[stream_idx]->ConfigureBitrates(
+ target_bitrate, max_bitrate, framerate, &configurations_[i]);
if (vpx_codec_enc_config_set(&encoders_[i], &configurations_[i])) {
return WEBRTC_VIDEO_CODEC_ERROR;
}
@@ -301,6 +298,10 @@ int VP8EncoderImpl::SetRates(uint32_t new_bitrate_kbit,
return WEBRTC_VIDEO_CODEC_OK;
}
+const char* VP8EncoderImpl::ImplementationName() const {
+ return "libvpx";
+}
+
void VP8EncoderImpl::SetStreamState(bool send_stream,
int stream_idx) {
if (send_stream && !send_stream_[stream_idx]) {
@@ -311,8 +312,8 @@ void VP8EncoderImpl::SetStreamState(bool send_stream,
}
void VP8EncoderImpl::SetupTemporalLayers(int num_streams,
- int num_temporal_layers,
- const VideoCodec& codec) {
+ int num_temporal_layers,
+ const VideoCodec& codec) {
const Config default_options;
const TemporalLayers::Factory& tl_factory =
(codec.extra_options ? codec.extra_options : &default_options)
@@ -330,15 +331,16 @@ void VP8EncoderImpl::SetupTemporalLayers(int num_streams,
for (int i = 0; i < num_streams; ++i) {
// TODO(andresp): crash if layers is invalid.
int layers = codec.simulcastStream[i].numberOfTemporalLayers;
- if (layers < 1) layers = 1;
+ if (layers < 1)
+ layers = 1;
temporal_layers_.push_back(tl_factory.Create(layers, rand()));
}
}
}
int VP8EncoderImpl::InitEncode(const VideoCodec* inst,
- int number_of_cores,
- size_t /*maxPayloadSize */) {
+ int number_of_cores,
+ size_t /*maxPayloadSize */) {
if (inst == NULL) {
return WEBRTC_VIDEO_CODEC_ERR_PARAMETER;
}
@@ -375,12 +377,13 @@ int VP8EncoderImpl::InitEncode(const VideoCodec* inst,
return WEBRTC_VIDEO_CODEC_ERR_PARAMETER;
}
- int num_temporal_layers = doing_simulcast ?
- inst->simulcastStream[0].numberOfTemporalLayers :
- inst->codecSpecific.VP8.numberOfTemporalLayers;
+ int num_temporal_layers =
+ doing_simulcast ? inst->simulcastStream[0].numberOfTemporalLayers
+ : inst->codecSpecific.VP8.numberOfTemporalLayers;
// TODO(andresp): crash if num temporal layers is bananas.
- if (num_temporal_layers < 1) num_temporal_layers = 1;
+ if (num_temporal_layers < 1)
+ num_temporal_layers = 1;
SetupTemporalLayers(number_of_streams, num_temporal_layers, *inst);
feedback_mode_ = inst->codecSpecific.VP8.feedbackModeOn;
@@ -410,7 +413,7 @@ int VP8EncoderImpl::InitEncode(const VideoCodec* inst,
int idx = number_of_streams - 1;
for (int i = 0; i < (number_of_streams - 1); ++i, --idx) {
int gcd = GCD(inst->simulcastStream[idx].width,
- inst->simulcastStream[idx-1].width);
+ inst->simulcastStream[idx - 1].width);
downsampling_factors_[i].num = inst->simulcastStream[idx].width / gcd;
downsampling_factors_[i].den = inst->simulcastStream[idx - 1].width / gcd;
send_stream_[i] = false;
@@ -422,20 +425,20 @@ int VP8EncoderImpl::InitEncode(const VideoCodec* inst,
}
for (int i = 0; i < number_of_streams; ++i) {
// Random start, 16 bits is enough.
- picture_id_[i] = static_cast<uint16_t>(rand()) & 0x7FFF;
+ picture_id_[i] = static_cast<uint16_t>(rand()) & 0x7FFF; // NOLINT
last_key_frame_picture_id_[i] = -1;
// allocate memory for encoded image
if (encoded_images_[i]._buffer != NULL) {
- delete [] encoded_images_[i]._buffer;
+ delete[] encoded_images_[i]._buffer;
}
- encoded_images_[i]._size = CalcBufferSize(kI420,
- codec_.width, codec_.height);
+ encoded_images_[i]._size =
+ CalcBufferSize(kI420, codec_.width, codec_.height);
encoded_images_[i]._buffer = new uint8_t[encoded_images_[i]._size];
encoded_images_[i]._completeFrame = true;
}
// populate encoder configuration with default values
- if (vpx_codec_enc_config_default(vpx_codec_vp8_cx(),
- &configurations_[0], 0)) {
+ if (vpx_codec_enc_config_default(vpx_codec_vp8_cx(), &configurations_[0],
+ 0)) {
return WEBRTC_VIDEO_CODEC_ERROR;
}
// setting the time base of the codec
@@ -459,8 +462,8 @@ int VP8EncoderImpl::InitEncode(const VideoCodec* inst,
break;
case kResilientFrames:
#ifdef INDEPENDENT_PARTITIONS
- configurations_[0]-g_error_resilient = VPX_ERROR_RESILIENT_DEFAULT |
- VPX_ERROR_RESILIENT_PARTITIONS;
+ configurations_[0] - g_error_resilient =
+ VPX_ERROR_RESILIENT_DEFAULT | VPX_ERROR_RESILIENT_PARTITIONS;
break;
#else
return WEBRTC_VIDEO_CODEC_ERR_PARAMETER; // Not supported
@@ -536,20 +539,18 @@ int VP8EncoderImpl::InitEncode(const VideoCodec* inst,
// Determine number of threads based on the image size and #cores.
// TODO(fbarchard): Consider number of Simulcast layers.
- configurations_[0].g_threads = NumberOfThreads(configurations_[0].g_w,
- configurations_[0].g_h,
- number_of_cores);
+ configurations_[0].g_threads = NumberOfThreads(
+ configurations_[0].g_w, configurations_[0].g_h, number_of_cores);
// Creating a wrapper to the image - setting image data to NULL.
// Actual pointer will be set in encode. Setting align to 1, as it
// is meaningless (no memory allocation is done here).
- vpx_img_wrap(&raw_images_[0], VPX_IMG_FMT_I420, inst->width, inst->height,
- 1, NULL);
+ vpx_img_wrap(&raw_images_[0], VPX_IMG_FMT_I420, inst->width, inst->height, 1,
+ NULL);
if (encoders_.size() == 1) {
configurations_[0].rc_target_bitrate = inst->startBitrate;
- temporal_layers_[0]->ConfigureBitrates(inst->startBitrate,
- inst->maxBitrate,
+ temporal_layers_[0]->ConfigureBitrates(inst->startBitrate, inst->maxBitrate,
inst->maxFramerate,
&configurations_[0]);
} else {
@@ -641,20 +642,15 @@ int VP8EncoderImpl::InitAndSetControlSettings() {
flags |= VPX_CODEC_USE_OUTPUT_PARTITION;
if (encoders_.size() > 1) {
- int error = vpx_codec_enc_init_multi(&encoders_[0],
- vpx_codec_vp8_cx(),
- &configurations_[0],
- encoders_.size(),
- flags,
- &downsampling_factors_[0]);
+ int error = vpx_codec_enc_init_multi(&encoders_[0], vpx_codec_vp8_cx(),
+ &configurations_[0], encoders_.size(),
+ flags, &downsampling_factors_[0]);
if (error) {
return WEBRTC_VIDEO_CODEC_UNINITIALIZED;
}
} else {
- if (vpx_codec_enc_init(&encoders_[0],
- vpx_codec_vp8_cx(),
- &configurations_[0],
- flags)) {
+ if (vpx_codec_enc_init(&encoders_[0], vpx_codec_vp8_cx(),
+ &configurations_[0], flags)) {
return WEBRTC_VIDEO_CODEC_UNINITIALIZED;
}
}
@@ -671,13 +667,13 @@ int VP8EncoderImpl::InitAndSetControlSettings() {
#else
denoiser_state = kDenoiserOnAdaptive;
#endif
- vpx_codec_control(&encoders_[0], VP8E_SET_NOISE_SENSITIVITY,
- codec_.codecSpecific.VP8.denoisingOn ?
- denoiser_state : kDenoiserOff);
+ vpx_codec_control(
+ &encoders_[0], VP8E_SET_NOISE_SENSITIVITY,
+ codec_.codecSpecific.VP8.denoisingOn ? denoiser_state : kDenoiserOff);
if (encoders_.size() > 2) {
- vpx_codec_control(&encoders_[1], VP8E_SET_NOISE_SENSITIVITY,
- codec_.codecSpecific.VP8.denoisingOn ?
- denoiser_state : kDenoiserOff);
+ vpx_codec_control(
+ &encoders_[1], VP8E_SET_NOISE_SENSITIVITY,
+ codec_.codecSpecific.VP8.denoisingOn ? denoiser_state : kDenoiserOff);
}
for (size_t i = 0; i < encoders_.size(); ++i) {
// Allow more screen content to be detected as static.
@@ -710,14 +706,12 @@ uint32_t VP8EncoderImpl::MaxIntraTarget(uint32_t optimalBuffersize) {
// Don't go below 3 times the per frame bandwidth.
const uint32_t minIntraTh = 300;
- return (targetPct < minIntraTh) ? minIntraTh: targetPct;
+ return (targetPct < minIntraTh) ? minIntraTh : targetPct;
}
int VP8EncoderImpl::Encode(const VideoFrame& frame,
const CodecSpecificInfo* codec_specific_info,
const std::vector<FrameType>* frame_types) {
- TRACE_EVENT1("webrtc", "VP8::Encode", "timestamp", frame.timestamp());
-
if (!inited_)
return WEBRTC_VIDEO_CODEC_UNINITIALIZED;
if (frame.IsZeroSize())
@@ -731,7 +725,7 @@ int VP8EncoderImpl::Encode(const VideoFrame& frame,
quality_scaler_enabled_ ? quality_scaler_.GetScaledFrame(frame) : frame;
if (quality_scaler_enabled_ && (input_image.width() != codec_.width ||
- input_image.height() != codec_.height)) {
+ input_image.height() != codec_.height)) {
int ret = UpdateCodecFrameSize(input_image);
if (ret < 0)
return ret;
@@ -747,11 +741,11 @@ int VP8EncoderImpl::Encode(const VideoFrame& frame,
// Image in vpx_image_t format.
// Input image is const. VP8's raw image is not defined as const.
raw_images_[0].planes[VPX_PLANE_Y] =
- const_cast<uint8_t*>(input_image.buffer(kYPlane));
+ const_cast<uint8_t*>(input_image.buffer(kYPlane));
raw_images_[0].planes[VPX_PLANE_U] =
- const_cast<uint8_t*>(input_image.buffer(kUPlane));
+ const_cast<uint8_t*>(input_image.buffer(kUPlane));
raw_images_[0].planes[VPX_PLANE_V] =
- const_cast<uint8_t*>(input_image.buffer(kVPlane));
+ const_cast<uint8_t*>(input_image.buffer(kVPlane));
raw_images_[0].stride[VPX_PLANE_Y] = input_image.stride(kYPlane);
raw_images_[0].stride[VPX_PLANE_U] = input_image.stride(kUPlane);
@@ -760,17 +754,17 @@ int VP8EncoderImpl::Encode(const VideoFrame& frame,
for (size_t i = 1; i < encoders_.size(); ++i) {
// Scale the image down a number of times by downsampling factor
libyuv::I420Scale(
- raw_images_[i-1].planes[VPX_PLANE_Y],
- raw_images_[i-1].stride[VPX_PLANE_Y],
- raw_images_[i-1].planes[VPX_PLANE_U],
- raw_images_[i-1].stride[VPX_PLANE_U],
- raw_images_[i-1].planes[VPX_PLANE_V],
- raw_images_[i-1].stride[VPX_PLANE_V],
- raw_images_[i-1].d_w, raw_images_[i-1].d_h,
- raw_images_[i].planes[VPX_PLANE_Y], raw_images_[i].stride[VPX_PLANE_Y],
- raw_images_[i].planes[VPX_PLANE_U], raw_images_[i].stride[VPX_PLANE_U],
- raw_images_[i].planes[VPX_PLANE_V], raw_images_[i].stride[VPX_PLANE_V],
- raw_images_[i].d_w, raw_images_[i].d_h, libyuv::kFilterBilinear);
+ raw_images_[i - 1].planes[VPX_PLANE_Y],
+ raw_images_[i - 1].stride[VPX_PLANE_Y],
+ raw_images_[i - 1].planes[VPX_PLANE_U],
+ raw_images_[i - 1].stride[VPX_PLANE_U],
+ raw_images_[i - 1].planes[VPX_PLANE_V],
+ raw_images_[i - 1].stride[VPX_PLANE_V], raw_images_[i - 1].d_w,
+ raw_images_[i - 1].d_h, raw_images_[i].planes[VPX_PLANE_Y],
+ raw_images_[i].stride[VPX_PLANE_Y], raw_images_[i].planes[VPX_PLANE_U],
+ raw_images_[i].stride[VPX_PLANE_U], raw_images_[i].planes[VPX_PLANE_V],
+ raw_images_[i].stride[VPX_PLANE_V], raw_images_[i].d_w,
+ raw_images_[i].d_h, libyuv::kFilterBilinear);
}
vpx_enc_frame_flags_t flags[kMaxSimulcastStreams];
for (size_t i = 0; i < encoders_.size(); ++i) {
@@ -805,8 +799,8 @@ int VP8EncoderImpl::Encode(const VideoFrame& frame,
if (send_key_frame) {
// Adapt the size of the key frame when in screenshare with 1 temporal
// layer.
- if (encoders_.size() == 1 && codec_.mode == kScreensharing
- && codec_.codecSpecific.VP8.numberOfTemporalLayers <= 1) {
+ if (encoders_.size() == 1 && codec_.mode == kScreensharing &&
+ codec_.codecSpecific.VP8.numberOfTemporalLayers <= 1) {
const uint32_t forceKeyFrameIntraTh = 100;
vpx_codec_control(&(encoders_[0]), VP8E_SET_MAX_INTRA_BITRATE_PCT,
forceKeyFrameIntraTh);
@@ -818,13 +812,12 @@ int VP8EncoderImpl::Encode(const VideoFrame& frame,
}
std::fill(key_frame_request_.begin(), key_frame_request_.end(), false);
} else if (codec_specific_info &&
- codec_specific_info->codecType == kVideoCodecVP8) {
+ codec_specific_info->codecType == kVideoCodecVP8) {
if (feedback_mode_) {
// Handle RPSI and SLI messages and set up the appropriate encode flags.
bool sendRefresh = false;
if (codec_specific_info->codecSpecific.VP8.hasReceivedRPSI) {
- rps_.ReceivedRPSI(
- codec_specific_info->codecSpecific.VP8.pictureIdRPSI);
+ rps_.ReceivedRPSI(codec_specific_info->codecSpecific.VP8.pictureIdRPSI);
}
if (codec_specific_info->codecSpecific.VP8.hasReceivedSLI) {
sendRefresh = rps_.ReceivedSLI(input_image.timestamp());
@@ -876,8 +869,7 @@ int VP8EncoderImpl::Encode(const VideoFrame& frame,
}
vpx_codec_control(&encoders_[i], VP8E_SET_FRAME_FLAGS, flags[stream_idx]);
- vpx_codec_control(&encoders_[i],
- VP8E_SET_TEMPORAL_LAYER_ID,
+ vpx_codec_control(&encoders_[i], VP8E_SET_TEMPORAL_LAYER_ID,
temporal_layers_[stream_idx]->CurrentLayerId());
}
// TODO(holmer): Ideally the duration should be the timestamp diff of this
@@ -895,7 +887,7 @@ int VP8EncoderImpl::Encode(const VideoFrame& frame,
// Reset specific intra frame thresholds, following the key frame.
if (send_key_frame) {
vpx_codec_control(&(encoders_[0]), VP8E_SET_MAX_INTRA_BITRATE_PCT,
- rc_max_intra_target_);
+ rc_max_intra_target_);
}
if (error)
return WEBRTC_VIDEO_CODEC_ERROR;
@@ -913,8 +905,7 @@ int VP8EncoderImpl::UpdateCodecFrameSize(const VideoFrame& input_image) {
codec_.simulcastStream[0].height = input_image.height();
}
// Update the cpu_speed setting for resolution change.
- vpx_codec_control(&(encoders_[0]),
- VP8E_SET_CPUUSED,
+ vpx_codec_control(&(encoders_[0]), VP8E_SET_CPUUSED,
SetCpuSpeed(codec_.width, codec_.height));
raw_images_[0].w = codec_.width;
raw_images_[0].h = codec_.height;
@@ -947,13 +938,12 @@ void VP8EncoderImpl::PopulateCodecSpecific(
}
vp8Info->simulcastIdx = stream_idx;
vp8Info->keyIdx = kNoKeyIdx; // TODO(hlundin) populate this
- vp8Info->nonReference = (pkt.data.frame.flags & VPX_FRAME_IS_DROPPABLE) ?
- true : false;
+ vp8Info->nonReference =
+ (pkt.data.frame.flags & VPX_FRAME_IS_DROPPABLE) ? true : false;
bool base_layer_sync_point = (pkt.data.frame.flags & VPX_FRAME_IS_KEY) ||
- only_predicting_from_key_frame;
+ only_predicting_from_key_frame;
temporal_layers_[stream_idx]->PopulateCodecSpecific(base_layer_sync_point,
- vp8Info,
- timestamp);
+ vp8Info, timestamp);
// Prepare next.
picture_id_[stream_idx] = (picture_id_[stream_idx] + 1) & 0x7FFF;
}
@@ -966,27 +956,26 @@ int VP8EncoderImpl::GetEncodedPartitions(const VideoFrame& input_image,
int stream_idx = static_cast<int>(encoders_.size()) - 1;
int result = WEBRTC_VIDEO_CODEC_OK;
for (size_t encoder_idx = 0; encoder_idx < encoders_.size();
- ++encoder_idx, --stream_idx) {
+ ++encoder_idx, --stream_idx) {
vpx_codec_iter_t iter = NULL;
int part_idx = 0;
encoded_images_[encoder_idx]._length = 0;
encoded_images_[encoder_idx]._frameType = kVideoFrameDelta;
RTPFragmentationHeader frag_info;
// token_partitions_ is number of bits used.
- frag_info.VerifyAndAllocateFragmentationHeader((1 << token_partitions_)
- + 1);
+ frag_info.VerifyAndAllocateFragmentationHeader((1 << token_partitions_) +
+ 1);
CodecSpecificInfo codec_specific;
- const vpx_codec_cx_pkt_t *pkt = NULL;
- while ((pkt = vpx_codec_get_cx_data(&encoders_[encoder_idx],
- &iter)) != NULL) {
+ const vpx_codec_cx_pkt_t* pkt = NULL;
+ while ((pkt = vpx_codec_get_cx_data(&encoders_[encoder_idx], &iter)) !=
+ NULL) {
switch (pkt->kind) {
case VPX_CODEC_CX_FRAME_PKT: {
uint32_t length = encoded_images_[encoder_idx]._length;
memcpy(&encoded_images_[encoder_idx]._buffer[length],
- pkt->data.frame.buf,
- pkt->data.frame.sz);
+ pkt->data.frame.buf, pkt->data.frame.sz);
frag_info.fragmentationOffset[part_idx] = length;
- frag_info.fragmentationLength[part_idx] = pkt->data.frame.sz;
+ frag_info.fragmentationLength[part_idx] = pkt->data.frame.sz;
frag_info.fragmentationPlType[part_idx] = 0; // not known here
frag_info.fragmentationTimeDiff[part_idx] = 0;
encoded_images_[encoder_idx]._length += pkt->data.frame.sz;
@@ -1063,7 +1052,6 @@ int VP8EncoderImpl::RegisterEncodeCompleteCallback(
return WEBRTC_VIDEO_CODEC_OK;
}
-
VP8DecoderImpl::VP8DecoderImpl()
: decode_complete_callback_(NULL),
inited_(false),
@@ -1075,8 +1063,7 @@ VP8DecoderImpl::VP8DecoderImpl()
propagation_cnt_(-1),
last_frame_width_(0),
last_frame_height_(0),
- key_frame_required_(true) {
-}
+ key_frame_required_(true) {}
VP8DecoderImpl::~VP8DecoderImpl() {
inited_ = true; // in order to do the actual release
@@ -1092,8 +1079,7 @@ int VP8DecoderImpl::Reset() {
return WEBRTC_VIDEO_CODEC_OK;
}
-int VP8DecoderImpl::InitDecode(const VideoCodec* inst,
- int number_of_cores) {
+int VP8DecoderImpl::InitDecode(const VideoCodec* inst, int number_of_cores) {
int ret_val = Release();
if (ret_val < 0) {
return ret_val;
@@ -1104,12 +1090,12 @@ int VP8DecoderImpl::InitDecode(const VideoCodec* inst,
if (inst && inst->codecType == kVideoCodecVP8) {
feedback_mode_ = inst->codecSpecific.VP8.feedbackModeOn;
}
- vpx_codec_dec_cfg_t cfg;
+ vpx_codec_dec_cfg_t cfg;
// Setting number of threads to a constant value (1)
cfg.threads = 1;
cfg.h = cfg.w = 0; // set after decode
-vpx_codec_flags_t flags = 0;
+ vpx_codec_flags_t flags = 0;
#if !defined(WEBRTC_ARCH_ARM) && !defined(WEBRTC_ARCH_ARM64)
flags = VPX_CODEC_USE_POSTPROC;
#ifdef INDEPENDENT_PARTITIONS
@@ -1134,10 +1120,10 @@ vpx_codec_flags_t flags = 0;
}
int VP8DecoderImpl::Decode(const EncodedImage& input_image,
- bool missing_frames,
- const RTPFragmentationHeader* fragmentation,
- const CodecSpecificInfo* codec_specific_info,
- int64_t /*render_time_ms*/) {
+ bool missing_frames,
+ const RTPFragmentationHeader* fragmentation,
+ const CodecSpecificInfo* codec_specific_info,
+ int64_t /*render_time_ms*/) {
if (!inited_) {
return WEBRTC_VIDEO_CODEC_UNINITIALIZED;
}
@@ -1188,9 +1174,9 @@ int VP8DecoderImpl::Decode(const EncodedImage& input_image,
if (input_image._frameType == kVideoFrameKey &&
input_image._completeFrame) {
propagation_cnt_ = -1;
- // Start count on first loss.
+ // Start count on first loss.
} else if ((!input_image._completeFrame || missing_frames) &&
- propagation_cnt_ == -1) {
+ propagation_cnt_ == -1) {
propagation_cnt_ = 0;
}
if (propagation_cnt_ >= 0) {
@@ -1242,15 +1228,15 @@ int VP8DecoderImpl::Decode(const EncodedImage& input_image,
if (input_image._frameType == kVideoFrameKey && input_image._buffer != NULL) {
const uint32_t bytes_to_copy = input_image._length;
if (last_keyframe_._size < bytes_to_copy) {
- delete [] last_keyframe_._buffer;
+ delete[] last_keyframe_._buffer;
last_keyframe_._buffer = NULL;
last_keyframe_._size = 0;
}
uint8_t* temp_buffer = last_keyframe_._buffer; // Save buffer ptr.
- uint32_t temp_size = last_keyframe_._size; // Save size.
- last_keyframe_ = input_image; // Shallow copy.
- last_keyframe_._buffer = temp_buffer; // Restore buffer ptr.
- last_keyframe_._size = temp_size; // Restore buffer size.
+ uint32_t temp_size = last_keyframe_._size; // Save size.
+ last_keyframe_ = input_image; // Shallow copy.
+ last_keyframe_._buffer = temp_buffer; // Restore buffer ptr.
+ last_keyframe_._size = temp_size; // Restore buffer size.
if (!last_keyframe_._buffer) {
// Allocate memory.
last_keyframe_._size = bytes_to_copy;
@@ -1300,7 +1286,8 @@ int VP8DecoderImpl::Decode(const EncodedImage& input_image,
}
if (picture_id > -1) {
if (((reference_updates & VP8_GOLD_FRAME) ||
- (reference_updates & VP8_ALTR_FRAME)) && !corrupted) {
+ (reference_updates & VP8_ALTR_FRAME)) &&
+ !corrupted) {
decode_complete_callback_->ReceivedDecodedReferenceFrame(picture_id);
}
decode_complete_callback_->ReceivedDecodedFrame(picture_id);
@@ -1323,14 +1310,10 @@ int VP8DecoderImpl::DecodePartitions(
const EncodedImage& input_image,
const RTPFragmentationHeader* fragmentation) {
for (int i = 0; i < fragmentation->fragmentationVectorSize; ++i) {
- const uint8_t* partition = input_image._buffer +
- fragmentation->fragmentationOffset[i];
- const uint32_t partition_length =
- fragmentation->fragmentationLength[i];
- if (vpx_codec_decode(decoder_,
- partition,
- partition_length,
- 0,
+ const uint8_t* partition =
+ input_image._buffer + fragmentation->fragmentationOffset[i];
+ const uint32_t partition_length = fragmentation->fragmentationLength[i];
+ if (vpx_codec_decode(decoder_, partition, partition_length, 0,
VPX_DL_REALTIME)) {
return WEBRTC_VIDEO_CODEC_ERROR;
}
@@ -1343,8 +1326,8 @@ int VP8DecoderImpl::DecodePartitions(
}
int VP8DecoderImpl::ReturnFrame(const vpx_image_t* img,
- uint32_t timestamp,
- int64_t ntp_time_ms) {
+ uint32_t timestamp,
+ int64_t ntp_time_ms) {
if (img == NULL) {
// Decoder OK and NULL image => No show frame
return WEBRTC_VIDEO_CODEC_NO_OUTPUT;
@@ -1354,14 +1337,13 @@ int VP8DecoderImpl::ReturnFrame(const vpx_image_t* img,
// Allocate memory for decoded image.
VideoFrame decoded_image(buffer_pool_.CreateBuffer(img->d_w, img->d_h),
timestamp, 0, kVideoRotation_0);
- libyuv::I420Copy(
- img->planes[VPX_PLANE_Y], img->stride[VPX_PLANE_Y],
- img->planes[VPX_PLANE_U], img->stride[VPX_PLANE_U],
- img->planes[VPX_PLANE_V], img->stride[VPX_PLANE_V],
- decoded_image.buffer(kYPlane), decoded_image.stride(kYPlane),
- decoded_image.buffer(kUPlane), decoded_image.stride(kUPlane),
- decoded_image.buffer(kVPlane), decoded_image.stride(kVPlane),
- img->d_w, img->d_h);
+ libyuv::I420Copy(img->planes[VPX_PLANE_Y], img->stride[VPX_PLANE_Y],
+ img->planes[VPX_PLANE_U], img->stride[VPX_PLANE_U],
+ img->planes[VPX_PLANE_V], img->stride[VPX_PLANE_V],
+ decoded_image.buffer(kYPlane), decoded_image.stride(kYPlane),
+ decoded_image.buffer(kUPlane), decoded_image.stride(kUPlane),
+ decoded_image.buffer(kVPlane), decoded_image.stride(kVPlane),
+ img->d_w, img->d_h);
decoded_image.set_ntp_time_ms(ntp_time_ms);
int ret = decode_complete_callback_->Decoded(decoded_image);
if (ret != 0)
@@ -1380,7 +1362,7 @@ int VP8DecoderImpl::RegisterDecodeCompleteCallback(
int VP8DecoderImpl::Release() {
if (last_keyframe_._buffer != NULL) {
- delete [] last_keyframe_._buffer;
+ delete[] last_keyframe_._buffer;
last_keyframe_._buffer = NULL;
}
if (decoder_ != NULL) {
@@ -1400,15 +1382,19 @@ int VP8DecoderImpl::Release() {
return WEBRTC_VIDEO_CODEC_OK;
}
+const char* VP8DecoderImpl::ImplementationName() const {
+ return "libvpx";
+}
+
int VP8DecoderImpl::CopyReference(VP8DecoderImpl* copy) {
// The type of frame to copy should be set in ref_frame_->frame_type
// before the call to this function.
- if (vpx_codec_control(decoder_, VP8_COPY_REFERENCE, ref_frame_)
- != VPX_CODEC_OK) {
+ if (vpx_codec_control(decoder_, VP8_COPY_REFERENCE, ref_frame_) !=
+ VPX_CODEC_OK) {
return -1;
}
- if (vpx_codec_control(copy->decoder_, VP8_SET_REFERENCE, ref_frame_)
- != VPX_CODEC_OK) {
+ if (vpx_codec_control(copy->decoder_, VP8_SET_REFERENCE, ref_frame_) !=
+ VPX_CODEC_OK) {
return -1;
}
return 0;
diff --git a/webrtc/modules/video_coding/codecs/vp8/vp8_impl.h b/webrtc/modules/video_coding/codecs/vp8/vp8_impl.h
index ba14ed5841..9d5fb713a4 100644
--- a/webrtc/modules/video_coding/codecs/vp8/vp8_impl.h
+++ b/webrtc/modules/video_coding/codecs/vp8/vp8_impl.h
@@ -22,12 +22,12 @@
#include "vpx/vp8cx.h"
#include "vpx/vp8dx.h"
-#include "webrtc/common_video/interface/i420_buffer_pool.h"
-#include "webrtc/modules/video_coding/codecs/interface/video_codec_interface.h"
+#include "webrtc/common_video/include/i420_buffer_pool.h"
+#include "webrtc/modules/video_coding/include/video_codec_interface.h"
#include "webrtc/modules/video_coding/codecs/vp8/include/vp8.h"
#include "webrtc/modules/video_coding/codecs/vp8/reference_picture_selection.h"
-#include "webrtc/modules/video_coding/utility/include/frame_dropper.h"
-#include "webrtc/modules/video_coding/utility/include/quality_scaler.h"
+#include "webrtc/modules/video_coding/utility/frame_dropper.h"
+#include "webrtc/modules/video_coding/utility/quality_scaler.h"
#include "webrtc/video_frame.h"
namespace webrtc {
@@ -58,8 +58,11 @@ class VP8EncoderImpl : public VP8Encoder {
void OnDroppedFrame() override {}
+ const char* ImplementationName() const override;
+
private:
- void SetupTemporalLayers(int num_streams, int num_temporal_layers,
+ void SetupTemporalLayers(int num_streams,
+ int num_temporal_layers,
const VideoCodec& codec);
// Set the cpu_speed setting for encoder based on resolution and/or platform.
@@ -126,15 +129,17 @@ class VP8DecoderImpl : public VP8Decoder {
int InitDecode(const VideoCodec* inst, int number_of_cores) override;
int Decode(const EncodedImage& input_image,
- bool missing_frames,
- const RTPFragmentationHeader* fragmentation,
- const CodecSpecificInfo* codec_specific_info,
- int64_t /*render_time_ms*/) override;
+ bool missing_frames,
+ const RTPFragmentationHeader* fragmentation,
+ const CodecSpecificInfo* codec_specific_info,
+ int64_t /*render_time_ms*/) override;
int RegisterDecodeCompleteCallback(DecodedImageCallback* callback) override;
int Release() override;
int Reset() override;
+ const char* ImplementationName() const override;
+
private:
// Copy reference image from this _decoder to the _decoder in copyTo. Set
// which frame type to copy in _refFrame->frame_type before the call to
@@ -165,4 +170,3 @@ class VP8DecoderImpl : public VP8Decoder {
} // namespace webrtc
#endif // WEBRTC_MODULES_VIDEO_CODING_CODECS_VP8_VP8_IMPL_H_
-
diff --git a/webrtc/modules/video_coding/codecs/vp8/vp8_sequence_coder.cc b/webrtc/modules/video_coding/codecs/vp8/vp8_sequence_coder.cc
index 5843d83fa7..9e546653db 100644
--- a/webrtc/modules/video_coding/codecs/vp8/vp8_sequence_coder.cc
+++ b/webrtc/modules/video_coding/codecs/vp8/vp8_sequence_coder.cc
@@ -1,4 +1,4 @@
- /*
+/*
* Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
@@ -9,8 +9,9 @@
*/
#include "testing/gtest/include/gtest/gtest.h"
+#include "webrtc/base/checks.h"
#include "webrtc/base/scoped_ptr.h"
-#include "webrtc/common_video/interface/video_image.h"
+#include "webrtc/common_video/include/video_image.h"
#include "webrtc/common_video/libyuv/include/webrtc_libyuv.h"
#include "webrtc/modules/video_coding/codecs/vp8/include/vp8.h"
#include "webrtc/system_wrappers/include/tick_util.h"
@@ -22,8 +23,7 @@
class Vp8SequenceCoderEncodeCallback : public webrtc::EncodedImageCallback {
public:
explicit Vp8SequenceCoderEncodeCallback(FILE* encoded_file)
- : encoded_file_(encoded_file),
- encoded_bytes_(0) {}
+ : encoded_file_(encoded_file), encoded_bytes_(0) {}
~Vp8SequenceCoderEncodeCallback();
int Encoded(const webrtc::EncodedImage& encoded_image,
const webrtc::CodecSpecificInfo* codecSpecificInfo,
@@ -31,6 +31,7 @@ class Vp8SequenceCoderEncodeCallback : public webrtc::EncodedImageCallback {
// Returns the encoded image.
webrtc::EncodedImage encoded_image() { return encoded_image_; }
size_t encoded_bytes() { return encoded_bytes_; }
+
private:
webrtc::EncodedImage encoded_image_;
FILE* encoded_file_;
@@ -38,7 +39,7 @@ class Vp8SequenceCoderEncodeCallback : public webrtc::EncodedImageCallback {
};
Vp8SequenceCoderEncodeCallback::~Vp8SequenceCoderEncodeCallback() {
- delete [] encoded_image_._buffer;
+ delete[] encoded_image_._buffer;
encoded_image_._buffer = NULL;
}
int Vp8SequenceCoderEncodeCallback::Encoded(
@@ -46,7 +47,7 @@ int Vp8SequenceCoderEncodeCallback::Encoded(
const webrtc::CodecSpecificInfo* codecSpecificInfo,
const webrtc::RTPFragmentationHeader* fragmentation) {
if (encoded_image_._size < encoded_image._size) {
- delete [] encoded_image_._buffer;
+ delete[] encoded_image_._buffer;
encoded_image_._buffer = NULL;
encoded_image_._buffer = new uint8_t[encoded_image._size];
encoded_image_._size = encoded_image._size;
@@ -68,7 +69,11 @@ class Vp8SequenceCoderDecodeCallback : public webrtc::DecodedImageCallback {
public:
explicit Vp8SequenceCoderDecodeCallback(FILE* decoded_file)
: decoded_file_(decoded_file) {}
- int Decoded(webrtc::VideoFrame& frame);
+ int32_t Decoded(webrtc::VideoFrame& frame) override;
+ int32_t Decoded(webrtc::VideoFrame& frame, int64_t decode_time_ms) override {
+ RTC_NOTREACHED();
+ return -1;
+ }
bool DecodeComplete();
private:
@@ -80,16 +85,16 @@ int Vp8SequenceCoderDecodeCallback::Decoded(webrtc::VideoFrame& image) {
return 0;
}
-int SequenceCoder(webrtc::test::CommandLineParser& parser) {
- int width = strtol((parser.GetFlag("w")).c_str(), NULL, 10);
- int height = strtol((parser.GetFlag("h")).c_str(), NULL, 10);
- int framerate = strtol((parser.GetFlag("f")).c_str(), NULL, 10);
+int SequenceCoder(webrtc::test::CommandLineParser* parser) {
+ int width = strtol((parser->GetFlag("w")).c_str(), NULL, 10);
+ int height = strtol((parser->GetFlag("h")).c_str(), NULL, 10);
+ int framerate = strtol((parser->GetFlag("f")).c_str(), NULL, 10);
if (width <= 0 || height <= 0 || framerate <= 0) {
fprintf(stderr, "Error: Resolution cannot be <= 0!\n");
return -1;
}
- int target_bitrate = strtol((parser.GetFlag("b")).c_str(), NULL, 10);
+ int target_bitrate = strtol((parser->GetFlag("b")).c_str(), NULL, 10);
if (target_bitrate <= 0) {
fprintf(stderr, "Error: Bit-rate cannot be <= 0!\n");
return -1;
@@ -97,20 +102,20 @@ int SequenceCoder(webrtc::test::CommandLineParser& parser) {
// SetUp
// Open input file.
- std::string encoded_file_name = parser.GetFlag("encoded_file");
+ std::string encoded_file_name = parser->GetFlag("encoded_file");
FILE* encoded_file = fopen(encoded_file_name.c_str(), "wb");
if (encoded_file == NULL) {
fprintf(stderr, "Error: Cannot open encoded file\n");
return -1;
}
- std::string input_file_name = parser.GetFlag("input_file");
+ std::string input_file_name = parser->GetFlag("input_file");
FILE* input_file = fopen(input_file_name.c_str(), "rb");
if (input_file == NULL) {
fprintf(stderr, "Error: Cannot open input file\n");
return -1;
}
// Open output file.
- std::string output_file_name = parser.GetFlag("output_file");
+ std::string output_file_name = parser->GetFlag("output_file");
FILE* output_file = fopen(output_file_name.c_str(), "wb");
if (output_file == NULL) {
fprintf(stderr, "Error: Cannot open output file\n");
@@ -118,8 +123,8 @@ int SequenceCoder(webrtc::test::CommandLineParser& parser) {
}
// Get range of frames: will encode num_frames following start_frame).
- int start_frame = strtol((parser.GetFlag("start_frame")).c_str(), NULL, 10);
- int num_frames = strtol((parser.GetFlag("num_frames")).c_str(), NULL, 10);
+ int start_frame = strtol((parser->GetFlag("start_frame")).c_str(), NULL, 10);
+ int num_frames = strtol((parser->GetFlag("num_frames")).c_str(), NULL, 10);
// Codec SetUp.
webrtc::VideoCodec inst;
@@ -157,8 +162,8 @@ int SequenceCoder(webrtc::test::CommandLineParser& parser) {
int frames_processed = 0;
input_frame.CreateEmptyFrame(width, height, width, half_width, half_width);
while (!feof(input_file) &&
- (num_frames == -1 || frames_processed < num_frames)) {
- if (fread(frame_buffer.get(), 1, length, input_file) != length)
+ (num_frames == -1 || frames_processed < num_frames)) {
+ if (fread(frame_buffer.get(), 1, length, input_file) != length)
continue;
if (frame_cnt >= start_frame) {
webrtc::ConvertToI420(webrtc::kI420, frame_buffer.get(), 0, 0, width,
@@ -179,33 +184,35 @@ int SequenceCoder(webrtc::test::CommandLineParser& parser) {
printf("Actual bitrate: %f kbps\n", actual_bit_rate / 1000);
webrtc::test::QualityMetricsResult psnr_result, ssim_result;
EXPECT_EQ(0, webrtc::test::I420MetricsFromFiles(
- input_file_name.c_str(), output_file_name.c_str(),
- inst.width, inst.height,
- &psnr_result, &ssim_result));
+ input_file_name.c_str(), output_file_name.c_str(),
+ inst.width, inst.height, &psnr_result, &ssim_result));
printf("PSNR avg: %f[dB], min: %f[dB]\nSSIM avg: %f, min: %f\n",
- psnr_result.average, psnr_result.min,
- ssim_result.average, ssim_result.min);
+ psnr_result.average, psnr_result.min, ssim_result.average,
+ ssim_result.min);
return frame_cnt;
}
int main(int argc, char** argv) {
std::string program_name = argv[0];
- std::string usage = "Encode and decodes a video sequence, and writes"
- "results to a file.\n"
- "Example usage:\n" + program_name + " functionality"
- " --w=352 --h=288 --input_file=input.yuv --output_file=output.yuv "
- " Command line flags:\n"
- " - width(int): The width of the input file. Default: 352\n"
- " - height(int): The height of the input file. Default: 288\n"
- " - input_file(string): The YUV file to encode."
- " Default: foreman.yuv\n"
- " - encoded_file(string): The vp8 encoded file (encoder output)."
- " Default: vp8_encoded.vp8\n"
- " - output_file(string): The yuv decoded file (decoder output)."
- " Default: vp8_decoded.yuv\n."
- " - start_frame - frame number in which encoding will begin. Default: 0"
- " - num_frames - Number of frames to be processed. "
- " Default: -1 (entire sequence).";
+ std::string usage =
+ "Encode and decodes a video sequence, and writes"
+ "results to a file.\n"
+ "Example usage:\n" +
+ program_name +
+ " functionality"
+ " --w=352 --h=288 --input_file=input.yuv --output_file=output.yuv "
+ " Command line flags:\n"
+ " - width(int): The width of the input file. Default: 352\n"
+ " - height(int): The height of the input file. Default: 288\n"
+ " - input_file(string): The YUV file to encode."
+ " Default: foreman.yuv\n"
+ " - encoded_file(string): The vp8 encoded file (encoder output)."
+ " Default: vp8_encoded.vp8\n"
+ " - output_file(string): The yuv decoded file (decoder output)."
+ " Default: vp8_decoded.yuv\n."
+ " - start_frame - frame number in which encoding will begin. Default: 0"
+ " - num_frames - Number of frames to be processed. "
+ " Default: -1 (entire sequence).";
webrtc::test::CommandLineParser parser;
@@ -223,8 +230,8 @@ int main(int argc, char** argv) {
parser.SetFlag("output_file", webrtc::test::OutputPath() + "vp8_decoded.yuv");
parser.SetFlag("encoded_file",
webrtc::test::OutputPath() + "vp8_encoded.vp8");
- parser.SetFlag("input_file", webrtc::test::ResourcePath("foreman_cif",
- "yuv"));
+ parser.SetFlag("input_file",
+ webrtc::test::ResourcePath("foreman_cif", "yuv"));
parser.SetFlag("help", "false");
parser.ProcessFlags();
@@ -234,5 +241,5 @@ int main(int argc, char** argv) {
}
parser.PrintEnteredFlags();
- return SequenceCoder(parser);
+ return SequenceCoder(&parser);
}
diff --git a/webrtc/modules/video_coding/codecs/vp9/include/vp9.h b/webrtc/modules/video_coding/codecs/vp9/include/vp9.h
index cd77f72dcb..3bcbe46b3a 100644
--- a/webrtc/modules/video_coding/codecs/vp9/include/vp9.h
+++ b/webrtc/modules/video_coding/codecs/vp9/include/vp9.h
@@ -12,7 +12,7 @@
#ifndef WEBRTC_MODULES_VIDEO_CODING_CODECS_VP9_INCLUDE_VP9_H_
#define WEBRTC_MODULES_VIDEO_CODING_CODECS_VP9_INCLUDE_VP9_H_
-#include "webrtc/modules/video_coding/codecs/interface/video_codec_interface.h"
+#include "webrtc/modules/video_coding/include/video_codec_interface.h"
namespace webrtc {
@@ -23,7 +23,6 @@ class VP9Encoder : public VideoEncoder {
virtual ~VP9Encoder() {}
};
-
class VP9Decoder : public VideoDecoder {
public:
static VP9Decoder* Create();
diff --git a/webrtc/modules/video_coding/codecs/vp9/screenshare_layers.cc b/webrtc/modules/video_coding/codecs/vp9/screenshare_layers.cc
new file mode 100644
index 0000000000..c7ed78a192
--- /dev/null
+++ b/webrtc/modules/video_coding/codecs/vp9/screenshare_layers.cc
@@ -0,0 +1,93 @@
+/* Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+*
+* Use of this source code is governed by a BSD-style license
+* that can be found in the LICENSE file in the root of the source
+* tree. An additional intellectual property rights grant can be found
+* in the file PATENTS. All contributing project authors may
+* be found in the AUTHORS file in the root of the source tree.
+*/
+
+#include <algorithm>
+#include "webrtc/modules/video_coding/codecs/vp9/screenshare_layers.h"
+#include "webrtc/base/checks.h"
+
+namespace webrtc {
+
+ScreenshareLayersVP9::ScreenshareLayersVP9(uint8_t num_layers)
+ : num_layers_(num_layers),
+ start_layer_(0),
+ last_timestamp_(0),
+ timestamp_initialized_(false) {
+ RTC_DCHECK_GT(num_layers, 0);
+ RTC_DCHECK_LE(num_layers, kMaxVp9NumberOfSpatialLayers);
+ memset(bits_used_, 0, sizeof(bits_used_));
+ memset(threshold_kbps_, 0, sizeof(threshold_kbps_));
+}
+
+uint8_t ScreenshareLayersVP9::GetStartLayer() const {
+ return start_layer_;
+}
+
+void ScreenshareLayersVP9::ConfigureBitrate(int threshold_kbps,
+ uint8_t layer_id) {
+ // The upper layer is always the layer we spill frames
+ // to when the bitrate becomes to high, therefore setting
+ // a max limit is not allowed. The top layer bitrate is
+ // never used either so configuring it makes no difference.
+ RTC_DCHECK_LT(layer_id, num_layers_ - 1);
+ threshold_kbps_[layer_id] = threshold_kbps;
+}
+
+void ScreenshareLayersVP9::LayerFrameEncoded(unsigned int size_bytes,
+ uint8_t layer_id) {
+ RTC_DCHECK_LT(layer_id, num_layers_);
+ bits_used_[layer_id] += size_bytes * 8;
+}
+
+VP9EncoderImpl::SuperFrameRefSettings
+ScreenshareLayersVP9::GetSuperFrameSettings(uint32_t timestamp,
+ bool is_keyframe) {
+ VP9EncoderImpl::SuperFrameRefSettings settings;
+ if (!timestamp_initialized_) {
+ last_timestamp_ = timestamp;
+ timestamp_initialized_ = true;
+ }
+ float time_diff = (timestamp - last_timestamp_) / 90.f;
+ float total_bits_used = 0;
+ float total_threshold_kbps = 0;
+ start_layer_ = 0;
+
+ // Up to (num_layers - 1) because we only have
+ // (num_layers - 1) thresholds to check.
+ for (int layer_id = 0; layer_id < num_layers_ - 1; ++layer_id) {
+ bits_used_[layer_id] = std::max(
+ 0.f, bits_used_[layer_id] - time_diff * threshold_kbps_[layer_id]);
+ total_bits_used += bits_used_[layer_id];
+ total_threshold_kbps += threshold_kbps_[layer_id];
+
+ // If this is a keyframe then there should be no
+ // references to any previous frames.
+ if (!is_keyframe) {
+ settings.layer[layer_id].ref_buf1 = layer_id;
+ if (total_bits_used > total_threshold_kbps * 1000)
+ start_layer_ = layer_id + 1;
+ }
+
+ settings.layer[layer_id].upd_buf = layer_id;
+ }
+ // Since the above loop does not iterate over the last layer
+ // the reference of the last layer has to be set after the loop,
+ // and if this is a keyframe there should be no references to
+ // any previous frames.
+ if (!is_keyframe)
+ settings.layer[num_layers_ - 1].ref_buf1 = num_layers_ - 1;
+
+ settings.layer[num_layers_ - 1].upd_buf = num_layers_ - 1;
+ settings.is_keyframe = is_keyframe;
+ settings.start_layer = start_layer_;
+ settings.stop_layer = num_layers_ - 1;
+ last_timestamp_ = timestamp;
+ return settings;
+}
+
+} // namespace webrtc
diff --git a/webrtc/modules/video_coding/codecs/vp9/screenshare_layers.h b/webrtc/modules/video_coding/codecs/vp9/screenshare_layers.h
new file mode 100644
index 0000000000..5a901ae359
--- /dev/null
+++ b/webrtc/modules/video_coding/codecs/vp9/screenshare_layers.h
@@ -0,0 +1,66 @@
+/* Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+*
+* Use of this source code is governed by a BSD-style license
+* that can be found in the LICENSE file in the root of the source
+* tree. An additional intellectual property rights grant can be found
+* in the file PATENTS. All contributing project authors may
+* be found in the AUTHORS file in the root of the source tree.
+*/
+
+#ifndef WEBRTC_MODULES_VIDEO_CODING_CODECS_VP9_SCREENSHARE_LAYERS_H_
+#define WEBRTC_MODULES_VIDEO_CODING_CODECS_VP9_SCREENSHARE_LAYERS_H_
+
+#include "webrtc/modules/video_coding/codecs/vp9/vp9_impl.h"
+
+namespace webrtc {
+
+class ScreenshareLayersVP9 {
+ public:
+ explicit ScreenshareLayersVP9(uint8_t num_layers);
+
+ // The target bitrate for layer with id layer_id.
+ void ConfigureBitrate(int threshold_kbps, uint8_t layer_id);
+
+ // The current start layer.
+ uint8_t GetStartLayer() const;
+
+ // Update the layer with the size of the layer frame.
+ void LayerFrameEncoded(unsigned int size_bytes, uint8_t layer_id);
+
+ // Get the layer settings for the next superframe.
+ //
+ // In short, each time the GetSuperFrameSettings is called the
+ // bitrate of every layer is calculated and if the cummulative
+ // bitrate exceeds the configured cummulative bitrates
+ // (ConfigureBitrate to configure) up to and including that
+ // layer then the resulting encoding settings for the
+ // superframe will only encode layers above that layer.
+ VP9EncoderImpl::SuperFrameRefSettings GetSuperFrameSettings(
+ uint32_t timestamp,
+ bool is_keyframe);
+
+ private:
+ // How many layers that are used.
+ uint8_t num_layers_;
+
+ // The index of the first layer to encode.
+ uint8_t start_layer_;
+
+ // Cummulative target kbps for the different layers.
+ float threshold_kbps_[kMaxVp9NumberOfSpatialLayers - 1];
+
+ // How many bits that has been used for a certain layer. Increased in
+ // FrameEncoded() by the size of the encoded frame and decreased in
+ // GetSuperFrameSettings() depending on the time between frames.
+ float bits_used_[kMaxVp9NumberOfSpatialLayers];
+
+ // Timestamp of last frame.
+ uint32_t last_timestamp_;
+
+ // If the last_timestamp_ has been set.
+ bool timestamp_initialized_;
+};
+
+} // namespace webrtc
+
+#endif // WEBRTC_MODULES_VIDEO_CODING_CODECS_VP9_SCREENSHARE_LAYERS_H_
diff --git a/webrtc/modules/video_coding/codecs/vp9/screenshare_layers_unittest.cc b/webrtc/modules/video_coding/codecs/vp9/screenshare_layers_unittest.cc
new file mode 100644
index 0000000000..5eb7b237ac
--- /dev/null
+++ b/webrtc/modules/video_coding/codecs/vp9/screenshare_layers_unittest.cc
@@ -0,0 +1,323 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <limits>
+
+#include "testing/gtest/include/gtest/gtest.h"
+#include "vpx/vp8cx.h"
+#include "webrtc/base/logging.h"
+#include "webrtc/modules/video_coding/codecs/vp9/screenshare_layers.h"
+#include "webrtc/modules/video_coding/codecs/vp9/vp9_impl.h"
+#include "webrtc/system_wrappers/include/clock.h"
+
+namespace webrtc {
+
+typedef VP9EncoderImpl::SuperFrameRefSettings Settings;
+
+const uint32_t kTickFrequency = 90000;
+
+class ScreenshareLayerTestVP9 : public ::testing::Test {
+ protected:
+ ScreenshareLayerTestVP9() : clock_(0) {}
+ virtual ~ScreenshareLayerTestVP9() {}
+
+ void InitScreenshareLayers(int layers) {
+ layers_.reset(new ScreenshareLayersVP9(layers));
+ }
+
+ void ConfigureBitrateForLayer(int kbps, uint8_t layer_id) {
+ layers_->ConfigureBitrate(kbps, layer_id);
+ }
+
+ void AdvanceTime(int64_t milliseconds) {
+ clock_.AdvanceTimeMilliseconds(milliseconds);
+ }
+
+ void AddKilobitsToLayer(int kilobits, uint8_t layer_id) {
+ layers_->LayerFrameEncoded(kilobits * 1000 / 8, layer_id);
+ }
+
+ void EqualRefsForLayer(const Settings& actual, uint8_t layer_id) {
+ EXPECT_EQ(expected_.layer[layer_id].upd_buf,
+ actual.layer[layer_id].upd_buf);
+ EXPECT_EQ(expected_.layer[layer_id].ref_buf1,
+ actual.layer[layer_id].ref_buf1);
+ EXPECT_EQ(expected_.layer[layer_id].ref_buf2,
+ actual.layer[layer_id].ref_buf2);
+ EXPECT_EQ(expected_.layer[layer_id].ref_buf3,
+ actual.layer[layer_id].ref_buf3);
+ }
+
+ void EqualRefs(const Settings& actual) {
+ for (unsigned int layer_id = 0; layer_id < kMaxVp9NumberOfSpatialLayers;
+ ++layer_id) {
+ EqualRefsForLayer(actual, layer_id);
+ }
+ }
+
+ void EqualStartStopKeyframe(const Settings& actual) {
+ EXPECT_EQ(expected_.start_layer, actual.start_layer);
+ EXPECT_EQ(expected_.stop_layer, actual.stop_layer);
+ EXPECT_EQ(expected_.is_keyframe, actual.is_keyframe);
+ }
+
+ // Check that the settings returned by GetSuperFrameSettings() is
+ // equal to the expected_ settings.
+ void EqualToExpected() {
+ uint32_t frame_timestamp_ =
+ clock_.TimeInMilliseconds() * (kTickFrequency / 1000);
+ Settings actual =
+ layers_->GetSuperFrameSettings(frame_timestamp_, expected_.is_keyframe);
+ EqualRefs(actual);
+ EqualStartStopKeyframe(actual);
+ }
+
+ Settings expected_;
+ SimulatedClock clock_;
+ rtc::scoped_ptr<ScreenshareLayersVP9> layers_;
+};
+
+TEST_F(ScreenshareLayerTestVP9, NoRefsOnKeyFrame) {
+ const int kNumLayers = kMaxVp9NumberOfSpatialLayers;
+ InitScreenshareLayers(kNumLayers);
+ expected_.start_layer = 0;
+ expected_.stop_layer = kNumLayers - 1;
+
+ for (int l = 0; l < kNumLayers; ++l) {
+ expected_.layer[l].upd_buf = l;
+ }
+ expected_.is_keyframe = true;
+ EqualToExpected();
+
+ for (int l = 0; l < kNumLayers; ++l) {
+ expected_.layer[l].ref_buf1 = l;
+ }
+ expected_.is_keyframe = false;
+ EqualToExpected();
+}
+
+// Test if it is possible to send at a high bitrate (over the threshold)
+// after a longer period of low bitrate. This should not be possible.
+TEST_F(ScreenshareLayerTestVP9, DontAccumelateAvailableBitsOverTime) {
+ InitScreenshareLayers(2);
+ ConfigureBitrateForLayer(100, 0);
+
+ expected_.layer[0].upd_buf = 0;
+ expected_.layer[0].ref_buf1 = 0;
+ expected_.layer[1].upd_buf = 1;
+ expected_.layer[1].ref_buf1 = 1;
+ expected_.start_layer = 0;
+ expected_.stop_layer = 1;
+
+ // Send 10 frames at a low bitrate (50 kbps)
+ for (int i = 0; i < 10; ++i) {
+ AdvanceTime(200);
+ EqualToExpected();
+ AddKilobitsToLayer(10, 0);
+ }
+
+ AdvanceTime(200);
+ EqualToExpected();
+ AddKilobitsToLayer(301, 0);
+
+ // Send 10 frames at a high bitrate (200 kbps)
+ expected_.start_layer = 1;
+ for (int i = 0; i < 10; ++i) {
+ AdvanceTime(200);
+ EqualToExpected();
+ AddKilobitsToLayer(40, 1);
+ }
+}
+
+// Test if used bits are accumelated over layers, as they should;
+TEST_F(ScreenshareLayerTestVP9, AccumelateUsedBitsOverLayers) {
+ const int kNumLayers = kMaxVp9NumberOfSpatialLayers;
+ InitScreenshareLayers(kNumLayers);
+ for (int l = 0; l < kNumLayers - 1; ++l)
+ ConfigureBitrateForLayer(100, l);
+ for (int l = 0; l < kNumLayers; ++l) {
+ expected_.layer[l].upd_buf = l;
+ expected_.layer[l].ref_buf1 = l;
+ }
+
+ expected_.start_layer = 0;
+ expected_.stop_layer = kNumLayers - 1;
+ EqualToExpected();
+
+ for (int layer = 0; layer < kNumLayers - 1; ++layer) {
+ expected_.start_layer = layer;
+ EqualToExpected();
+ AddKilobitsToLayer(101, layer);
+ }
+}
+
+// General testing of the bitrate controller.
+TEST_F(ScreenshareLayerTestVP9, 2LayerBitrate) {
+ InitScreenshareLayers(2);
+ ConfigureBitrateForLayer(100, 0);
+
+ expected_.layer[0].upd_buf = 0;
+ expected_.layer[1].upd_buf = 1;
+ expected_.layer[0].ref_buf1 = -1;
+ expected_.layer[1].ref_buf1 = -1;
+ expected_.start_layer = 0;
+ expected_.stop_layer = 1;
+
+ expected_.is_keyframe = true;
+ EqualToExpected();
+ AddKilobitsToLayer(100, 0);
+
+ expected_.layer[0].ref_buf1 = 0;
+ expected_.layer[1].ref_buf1 = 1;
+ expected_.is_keyframe = false;
+ AdvanceTime(199);
+ EqualToExpected();
+ AddKilobitsToLayer(100, 0);
+
+ expected_.start_layer = 1;
+ for (int frame = 0; frame < 3; ++frame) {
+ AdvanceTime(200);
+ EqualToExpected();
+ AddKilobitsToLayer(100, 1);
+ }
+
+ // Just before enough bits become available for L0 @0.999 seconds.
+ AdvanceTime(199);
+ EqualToExpected();
+ AddKilobitsToLayer(100, 1);
+
+ // Just after enough bits become available for L0 @1.0001 seconds.
+ expected_.start_layer = 0;
+ AdvanceTime(2);
+ EqualToExpected();
+ AddKilobitsToLayer(100, 0);
+
+ // Keyframes always encode all layers, even if it is over budget.
+ expected_.layer[0].ref_buf1 = -1;
+ expected_.layer[1].ref_buf1 = -1;
+ expected_.is_keyframe = true;
+ AdvanceTime(499);
+ EqualToExpected();
+ expected_.layer[0].ref_buf1 = 0;
+ expected_.layer[1].ref_buf1 = 1;
+ expected_.start_layer = 1;
+ expected_.is_keyframe = false;
+ EqualToExpected();
+ AddKilobitsToLayer(100, 0);
+
+ // 400 kb in L0 --> @3 second mark to fall below the threshold..
+ // just before @2.999 seconds.
+ expected_.is_keyframe = false;
+ AdvanceTime(1499);
+ EqualToExpected();
+ AddKilobitsToLayer(100, 1);
+
+ // just after @3.001 seconds.
+ expected_.start_layer = 0;
+ AdvanceTime(2);
+ EqualToExpected();
+ AddKilobitsToLayer(100, 0);
+}
+
+// General testing of the bitrate controller.
+TEST_F(ScreenshareLayerTestVP9, 3LayerBitrate) {
+ InitScreenshareLayers(3);
+ ConfigureBitrateForLayer(100, 0);
+ ConfigureBitrateForLayer(100, 1);
+
+ for (int l = 0; l < 3; ++l) {
+ expected_.layer[l].upd_buf = l;
+ expected_.layer[l].ref_buf1 = l;
+ }
+ expected_.start_layer = 0;
+ expected_.stop_layer = 2;
+
+ EqualToExpected();
+ AddKilobitsToLayer(105, 0);
+ AddKilobitsToLayer(30, 1);
+
+ AdvanceTime(199);
+ EqualToExpected();
+ AddKilobitsToLayer(105, 0);
+ AddKilobitsToLayer(30, 1);
+
+ expected_.start_layer = 1;
+ AdvanceTime(200);
+ EqualToExpected();
+ AddKilobitsToLayer(130, 1);
+
+ expected_.start_layer = 2;
+ AdvanceTime(200);
+ EqualToExpected();
+
+ // 400 kb in L1 --> @1.0 second mark to fall below threshold.
+ // 210 kb in L0 --> @1.1 second mark to fall below threshold.
+ // Just before L1 @0.999 seconds.
+ AdvanceTime(399);
+ EqualToExpected();
+
+ // Just after L1 @1.001 seconds.
+ expected_.start_layer = 1;
+ AdvanceTime(2);
+ EqualToExpected();
+
+ // Just before L0 @1.099 seconds.
+ AdvanceTime(99);
+ EqualToExpected();
+
+ // Just after L0 @1.101 seconds.
+ expected_.start_layer = 0;
+ AdvanceTime(2);
+ EqualToExpected();
+
+ // @1.1 seconds
+ AdvanceTime(99);
+ EqualToExpected();
+ AddKilobitsToLayer(200, 1);
+
+ expected_.is_keyframe = true;
+ for (int l = 0; l < 3; ++l)
+ expected_.layer[l].ref_buf1 = -1;
+ AdvanceTime(200);
+ EqualToExpected();
+
+ expected_.is_keyframe = false;
+ expected_.start_layer = 2;
+ for (int l = 0; l < 3; ++l)
+ expected_.layer[l].ref_buf1 = l;
+ AdvanceTime(200);
+ EqualToExpected();
+}
+
+// Test that the bitrate calculations are
+// correct when the timestamp wrap.
+TEST_F(ScreenshareLayerTestVP9, TimestampWrap) {
+ InitScreenshareLayers(2);
+ ConfigureBitrateForLayer(100, 0);
+
+ expected_.layer[0].upd_buf = 0;
+ expected_.layer[0].ref_buf1 = 0;
+ expected_.layer[1].upd_buf = 1;
+ expected_.layer[1].ref_buf1 = 1;
+ expected_.start_layer = 0;
+ expected_.stop_layer = 1;
+
+ // Advance time to just before the timestamp wraps.
+ AdvanceTime(std::numeric_limits<uint32_t>::max() / (kTickFrequency / 1000));
+ EqualToExpected();
+ AddKilobitsToLayer(200, 0);
+
+ // Wrap
+ expected_.start_layer = 1;
+ AdvanceTime(1);
+ EqualToExpected();
+}
+
+} // namespace webrtc
diff --git a/webrtc/modules/video_coding/codecs/vp9/vp9.gyp b/webrtc/modules/video_coding/codecs/vp9/vp9.gyp
index 752521c5cb..8993d79bd7 100644
--- a/webrtc/modules/video_coding/codecs/vp9/vp9.gyp
+++ b/webrtc/modules/video_coding/codecs/vp9/vp9.gyp
@@ -14,30 +14,26 @@
{
'target_name': 'webrtc_vp9',
'type': 'static_library',
- 'dependencies': [
- '<(webrtc_root)/common_video/common_video.gyp:common_video',
- '<(webrtc_root)/modules/video_coding/utility/video_coding_utility.gyp:video_coding_utility',
- '<(webrtc_root)/system_wrappers/system_wrappers.gyp:system_wrappers',
- ],
'conditions': [
['build_libvpx==1', {
'dependencies': [
'<(libvpx_dir)/libvpx.gyp:libvpx_new',
],
}],
- ['build_vp9==1', {
- 'sources': [
- 'include/vp9.h',
- 'vp9_frame_buffer_pool.cc',
- 'vp9_frame_buffer_pool.h',
- 'vp9_impl.cc',
- 'vp9_impl.h',
- ],
- }, {
- 'sources': [
- 'vp9_dummy_impl.cc',
- ],
- }],
+ ],
+ 'dependencies': [
+ '<(webrtc_root)/common_video/common_video.gyp:common_video',
+ '<(webrtc_root)/modules/video_coding/utility/video_coding_utility.gyp:video_coding_utility',
+ '<(webrtc_root)/system_wrappers/system_wrappers.gyp:system_wrappers',
+ ],
+ 'sources': [
+ 'include/vp9.h',
+ 'screenshare_layers.cc',
+ 'screenshare_layers.h',
+ 'vp9_frame_buffer_pool.cc',
+ 'vp9_frame_buffer_pool.h',
+ 'vp9_impl.cc',
+ 'vp9_impl.h',
],
},
],
diff --git a/webrtc/modules/video_coding/codecs/vp9/vp9_dummy_impl.cc b/webrtc/modules/video_coding/codecs/vp9/vp9_dummy_impl.cc
deleted file mode 100644
index 491ccbe79c..0000000000
--- a/webrtc/modules/video_coding/codecs/vp9/vp9_dummy_impl.cc
+++ /dev/null
@@ -1,19 +0,0 @@
-/*
- * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
- *
- * Use of this source code is governed by a BSD-style license
- * that can be found in the LICENSE file in the root of the source
- * tree. An additional intellectual property rights grant can be found
- * in the file PATENTS. All contributing project authors may
- * be found in the AUTHORS file in the root of the source tree.
- *
- */
-
-// This file contains an implementation of empty webrtc VP9 encoder/decoder
-// factories so it is possible to build webrtc without linking with vp9.
-#include "webrtc/modules/video_coding/codecs/vp9/vp9_impl.h"
-
-namespace webrtc {
-VP9Encoder* VP9Encoder::Create() { return nullptr; }
-VP9Decoder* VP9Decoder::Create() { return nullptr; }
-}
diff --git a/webrtc/modules/video_coding/codecs/vp9/vp9_frame_buffer_pool.cc b/webrtc/modules/video_coding/codecs/vp9/vp9_frame_buffer_pool.cc
index bedbe68ca8..62c05d34fa 100644
--- a/webrtc/modules/video_coding/codecs/vp9/vp9_frame_buffer_pool.cc
+++ b/webrtc/modules/video_coding/codecs/vp9/vp9_frame_buffer_pool.cc
@@ -16,7 +16,7 @@
#include "vpx/vpx_frame_buffer.h"
#include "webrtc/base/checks.h"
-#include "webrtc/system_wrappers/include/logging.h"
+#include "webrtc/base/logging.h"
namespace webrtc {
diff --git a/webrtc/modules/video_coding/codecs/vp9/vp9_impl.cc b/webrtc/modules/video_coding/codecs/vp9/vp9_impl.cc
index 0ca7eeabe9..e554795519 100644
--- a/webrtc/modules/video_coding/codecs/vp9/vp9_impl.cc
+++ b/webrtc/modules/video_coding/codecs/vp9/vp9_impl.cc
@@ -21,36 +21,31 @@
#include "vpx/vp8cx.h"
#include "vpx/vp8dx.h"
-#include "webrtc/base/bind.h"
#include "webrtc/base/checks.h"
+#include "webrtc/base/keep_ref_until_done.h"
+#include "webrtc/base/logging.h"
#include "webrtc/base/trace_event.h"
#include "webrtc/common.h"
#include "webrtc/common_video/libyuv/include/webrtc_libyuv.h"
-#include "webrtc/modules/interface/module_common_types.h"
-#include "webrtc/system_wrappers/include/logging.h"
+#include "webrtc/modules/include/module_common_types.h"
+#include "webrtc/modules/video_coding/codecs/vp9/screenshare_layers.h"
#include "webrtc/system_wrappers/include/tick_util.h"
-namespace {
-
-// VP9DecoderImpl::ReturnFrame helper function used with WrappedI420Buffer.
-static void WrappedI420BufferNoLongerUsedCb(
- webrtc::Vp9FrameBufferPool::Vp9FrameBuffer* img_buffer) {
- img_buffer->Release();
-}
-
-} // anonymous namespace
-
namespace webrtc {
// Only positive speeds, range for real-time coding currently is: 5 - 8.
// Lower means slower/better quality, higher means fastest/lower quality.
int GetCpuSpeed(int width, int height) {
+#if defined(WEBRTC_ARCH_ARM) || defined(WEBRTC_ARCH_ARM64)
+ return 8;
+#else
// For smaller resolutions, use lower speed setting (get some coding gain at
// the cost of increased encoding complexity).
if (width * height <= 352 * 288)
return 5;
else
return 7;
+#endif
}
VP9Encoder* VP9Encoder::Create() {
@@ -59,7 +54,7 @@ VP9Encoder* VP9Encoder::Create() {
void VP9EncoderImpl::EncoderOutputCodedPacketCallback(vpx_codec_cx_pkt* pkt,
void* user_data) {
- VP9EncoderImpl* enc = (VP9EncoderImpl*)(user_data);
+ VP9EncoderImpl* enc = static_cast<VP9EncoderImpl*>(user_data);
enc->GetEncodedLayerFrame(pkt);
}
@@ -76,9 +71,12 @@ VP9EncoderImpl::VP9EncoderImpl()
raw_(NULL),
input_image_(NULL),
tl0_pic_idx_(0),
- gof_idx_(0),
+ frames_since_kf_(0),
num_temporal_layers_(0),
- num_spatial_layers_(0) {
+ num_spatial_layers_(0),
+ frames_encoded_(0),
+ // Use two spatial when screensharing with flexible mode.
+ spatial_layer_(new ScreenshareLayersVP9(2)) {
memset(&codec_, 0, sizeof(codec_));
uint32_t seed = static_cast<uint32_t>(TickTime::MillisecondTimestamp());
srand(seed);
@@ -90,7 +88,7 @@ VP9EncoderImpl::~VP9EncoderImpl() {
int VP9EncoderImpl::Release() {
if (encoded_image_._buffer != NULL) {
- delete [] encoded_image_._buffer;
+ delete[] encoded_image_._buffer;
encoded_image_._buffer = NULL;
}
if (encoder_ != NULL) {
@@ -112,42 +110,72 @@ int VP9EncoderImpl::Release() {
return WEBRTC_VIDEO_CODEC_OK;
}
+bool VP9EncoderImpl::ExplicitlyConfiguredSpatialLayers() const {
+ // We check target_bitrate_bps of the 0th layer to see if the spatial layers
+ // (i.e. bitrates) were explicitly configured.
+ return num_spatial_layers_ > 1 &&
+ codec_.spatialLayers[0].target_bitrate_bps > 0;
+}
+
bool VP9EncoderImpl::SetSvcRates() {
- float rate_ratio[VPX_MAX_LAYERS] = {0};
- float total = 0;
uint8_t i = 0;
- for (i = 0; i < num_spatial_layers_; ++i) {
- if (svc_internal_.svc_params.scaling_factor_num[i] <= 0 ||
- svc_internal_.svc_params.scaling_factor_den[i] <= 0) {
+ if (ExplicitlyConfiguredSpatialLayers()) {
+ if (num_temporal_layers_ > 1) {
+ LOG(LS_ERROR) << "Multiple temporal layers when manually specifying "
+ "spatial layers not implemented yet!";
return false;
}
- rate_ratio[i] = static_cast<float>(
- svc_internal_.svc_params.scaling_factor_num[i]) /
- svc_internal_.svc_params.scaling_factor_den[i];
- total += rate_ratio[i];
- }
-
- for (i = 0; i < num_spatial_layers_; ++i) {
- config_->ss_target_bitrate[i] = static_cast<unsigned int>(
- config_->rc_target_bitrate * rate_ratio[i] / total);
- if (num_temporal_layers_ == 1) {
- config_->layer_target_bitrate[i] = config_->ss_target_bitrate[i];
- } else if (num_temporal_layers_ == 2) {
- config_->layer_target_bitrate[i * num_temporal_layers_] =
- config_->ss_target_bitrate[i] * 2 / 3;
- config_->layer_target_bitrate[i * num_temporal_layers_ + 1] =
- config_->ss_target_bitrate[i];
- } else if (num_temporal_layers_ == 3) {
- config_->layer_target_bitrate[i * num_temporal_layers_] =
- config_->ss_target_bitrate[i] / 2;
- config_->layer_target_bitrate[i * num_temporal_layers_ + 1] =
- config_->layer_target_bitrate[i * num_temporal_layers_] +
- (config_->ss_target_bitrate[i] / 4);
- config_->layer_target_bitrate[i * num_temporal_layers_ + 2] =
- config_->ss_target_bitrate[i];
- } else {
- return false;
+ int total_bitrate_bps = 0;
+ for (i = 0; i < num_spatial_layers_; ++i)
+ total_bitrate_bps += codec_.spatialLayers[i].target_bitrate_bps;
+ // If total bitrate differs now from what has been specified at the
+ // beginning, update the bitrates in the same ratio as before.
+ for (i = 0; i < num_spatial_layers_; ++i) {
+ config_->ss_target_bitrate[i] = config_->layer_target_bitrate[i] =
+ static_cast<int>(static_cast<int64_t>(config_->rc_target_bitrate) *
+ codec_.spatialLayers[i].target_bitrate_bps /
+ total_bitrate_bps);
+ }
+ } else {
+ float rate_ratio[VPX_MAX_LAYERS] = {0};
+ float total = 0;
+
+ for (i = 0; i < num_spatial_layers_; ++i) {
+ if (svc_internal_.svc_params.scaling_factor_num[i] <= 0 ||
+ svc_internal_.svc_params.scaling_factor_den[i] <= 0) {
+ LOG(LS_ERROR) << "Scaling factors not specified!";
+ return false;
+ }
+ rate_ratio[i] =
+ static_cast<float>(svc_internal_.svc_params.scaling_factor_num[i]) /
+ svc_internal_.svc_params.scaling_factor_den[i];
+ total += rate_ratio[i];
+ }
+
+ for (i = 0; i < num_spatial_layers_; ++i) {
+ config_->ss_target_bitrate[i] = static_cast<unsigned int>(
+ config_->rc_target_bitrate * rate_ratio[i] / total);
+ if (num_temporal_layers_ == 1) {
+ config_->layer_target_bitrate[i] = config_->ss_target_bitrate[i];
+ } else if (num_temporal_layers_ == 2) {
+ config_->layer_target_bitrate[i * num_temporal_layers_] =
+ config_->ss_target_bitrate[i] * 2 / 3;
+ config_->layer_target_bitrate[i * num_temporal_layers_ + 1] =
+ config_->ss_target_bitrate[i];
+ } else if (num_temporal_layers_ == 3) {
+ config_->layer_target_bitrate[i * num_temporal_layers_] =
+ config_->ss_target_bitrate[i] / 2;
+ config_->layer_target_bitrate[i * num_temporal_layers_ + 1] =
+ config_->layer_target_bitrate[i * num_temporal_layers_] +
+ (config_->ss_target_bitrate[i] / 4);
+ config_->layer_target_bitrate[i * num_temporal_layers_ + 2] =
+ config_->ss_target_bitrate[i];
+ } else {
+ LOG(LS_ERROR) << "Unsupported number of temporal layers: "
+ << num_temporal_layers_;
+ return false;
+ }
}
}
@@ -178,6 +206,7 @@ int VP9EncoderImpl::SetRates(uint32_t new_bitrate_kbit,
}
config_->rc_target_bitrate = new_bitrate_kbit;
codec_.maxFramerate = new_framerate;
+ spatial_layer_->ConfigureBitrate(new_bitrate_kbit, 0);
if (!SetSvcRates()) {
return WEBRTC_VIDEO_CODEC_ERR_PARAMETER;
@@ -216,6 +245,7 @@ int VP9EncoderImpl::InitEncode(const VideoCodec* inst,
if (inst->codecSpecific.VP9.numberOfSpatialLayers > 2) {
return WEBRTC_VIDEO_CODEC_ERR_PARAMETER;
}
+
int retVal = Release();
if (retVal < 0) {
return retVal;
@@ -237,10 +267,10 @@ int VP9EncoderImpl::InitEncode(const VideoCodec* inst,
num_temporal_layers_ = 1;
// Random start 16 bits is enough.
- picture_id_ = static_cast<uint16_t>(rand()) & 0x7FFF;
+ picture_id_ = static_cast<uint16_t>(rand()) & 0x7FFF; // NOLINT
// Allocate memory for encoded image
if (encoded_image_._buffer != NULL) {
- delete [] encoded_image_._buffer;
+ delete[] encoded_image_._buffer;
}
encoded_image_._size = CalcBufferSize(kI420, codec_.width, codec_.height);
encoded_image_._buffer = new uint8_t[encoded_image_._size];
@@ -248,8 +278,8 @@ int VP9EncoderImpl::InitEncode(const VideoCodec* inst,
// Creating a wrapper to the image - setting image data to NULL. Actual
// pointer will be set in encode. Setting align to 1, as it is meaningless
// (actual memory is not allocated).
- raw_ = vpx_img_wrap(NULL, VPX_IMG_FMT_I420, codec_.width, codec_.height,
- 1, NULL);
+ raw_ = vpx_img_wrap(NULL, VPX_IMG_FMT_I420, codec_.width, codec_.height, 1,
+ NULL);
// Populate encoder configuration with default values.
if (vpx_codec_enc_config_default(vpx_codec_vp9_cx(), config_, 0)) {
return WEBRTC_VIDEO_CODEC_ERROR;
@@ -264,8 +294,8 @@ int VP9EncoderImpl::InitEncode(const VideoCodec* inst,
config_->g_lag_in_frames = 0; // 0- no frame lagging
config_->g_threads = 1;
// Rate control settings.
- config_->rc_dropframe_thresh = inst->codecSpecific.VP9.frameDroppingOn ?
- 30 : 0;
+ config_->rc_dropframe_thresh =
+ inst->codecSpecific.VP9.frameDroppingOn ? 30 : 0;
config_->rc_end_usage = VPX_CBR;
config_->g_pass = VPX_RC_ONE_PASS;
config_->rc_min_quantizer = 2;
@@ -277,24 +307,32 @@ int VP9EncoderImpl::InitEncode(const VideoCodec* inst,
config_->rc_buf_sz = 1000;
// Set the maximum target size of any key-frame.
rc_max_intra_target_ = MaxIntraTarget(config_->rc_buf_optimal_sz);
- if (inst->codecSpecific.VP9.keyFrameInterval > 0) {
+ if (inst->codecSpecific.VP9.keyFrameInterval > 0) {
config_->kf_mode = VPX_KF_AUTO;
config_->kf_max_dist = inst->codecSpecific.VP9.keyFrameInterval;
+ // Needs to be set (in svc mode) to get correct periodic key frame interval
+ // (will have no effect in non-svc).
+ config_->kf_min_dist = config_->kf_max_dist;
} else {
config_->kf_mode = VPX_KF_DISABLED;
}
- config_->rc_resize_allowed = inst->codecSpecific.VP9.automaticResizeOn ?
- 1 : 0;
+ config_->rc_resize_allowed =
+ inst->codecSpecific.VP9.automaticResizeOn ? 1 : 0;
// Determine number of threads based on the image size and #cores.
- config_->g_threads = NumberOfThreads(config_->g_w,
- config_->g_h,
- number_of_cores);
+ config_->g_threads =
+ NumberOfThreads(config_->g_w, config_->g_h, number_of_cores);
cpu_speed_ = GetCpuSpeed(config_->g_w, config_->g_h);
// TODO(asapersson): Check configuration of temporal switch up and increase
// pattern length.
- if (num_temporal_layers_ == 1) {
+ is_flexible_mode_ = inst->codecSpecific.VP9.flexibleMode;
+ if (is_flexible_mode_) {
+ config_->temporal_layering_mode = VP9E_TEMPORAL_LAYERING_MODE_BYPASS;
+ config_->ts_number_layers = num_temporal_layers_;
+ if (codec_.mode == kScreensharing)
+ spatial_layer_->ConfigureBitrate(inst->startBitrate, 0);
+ } else if (num_temporal_layers_ == 1) {
gof_.SetGofInfoVP9(kTemporalStructureMode1);
config_->temporal_layering_mode = VP9E_TEMPORAL_LAYERING_MODE_NOLAYERING;
config_->ts_number_layers = 1;
@@ -326,7 +364,7 @@ int VP9EncoderImpl::InitEncode(const VideoCodec* inst,
return WEBRTC_VIDEO_CODEC_ERR_PARAMETER;
}
- tl0_pic_idx_ = static_cast<uint8_t>(rand());
+ tl0_pic_idx_ = static_cast<uint8_t>(rand()); // NOLINT
return InitAndSetControlSettings(inst);
}
@@ -347,16 +385,28 @@ int VP9EncoderImpl::NumberOfThreads(int width,
}
int VP9EncoderImpl::InitAndSetControlSettings(const VideoCodec* inst) {
- config_->ss_number_layers = num_spatial_layers_;
-
- int scaling_factor_num = 256;
- for (int i = num_spatial_layers_ - 1; i >= 0; --i) {
+ // Set QP-min/max per spatial and temporal layer.
+ int tot_num_layers = num_spatial_layers_ * num_temporal_layers_;
+ for (int i = 0; i < tot_num_layers; ++i) {
svc_internal_.svc_params.max_quantizers[i] = config_->rc_max_quantizer;
svc_internal_.svc_params.min_quantizers[i] = config_->rc_min_quantizer;
- // 1:2 scaling in each dimension.
- svc_internal_.svc_params.scaling_factor_num[i] = scaling_factor_num;
- svc_internal_.svc_params.scaling_factor_den[i] = 256;
- scaling_factor_num /= 2;
+ }
+ config_->ss_number_layers = num_spatial_layers_;
+ if (ExplicitlyConfiguredSpatialLayers()) {
+ for (int i = 0; i < num_spatial_layers_; ++i) {
+ const auto& layer = codec_.spatialLayers[i];
+ svc_internal_.svc_params.scaling_factor_num[i] = layer.scaling_factor_num;
+ svc_internal_.svc_params.scaling_factor_den[i] = layer.scaling_factor_den;
+ }
+ } else {
+ int scaling_factor_num = 256;
+ for (int i = num_spatial_layers_ - 1; i >= 0; --i) {
+ // 1:2 scaling in each dimension.
+ svc_internal_.svc_params.scaling_factor_num[i] = scaling_factor_num;
+ svc_internal_.svc_params.scaling_factor_den[i] = 256;
+ if (codec_.mode != kScreensharing)
+ scaling_factor_num /= 2;
+ }
}
if (!SetSvcRates()) {
@@ -381,8 +431,10 @@ int VP9EncoderImpl::InitAndSetControlSettings(const VideoCodec* inst) {
}
// Register callback for getting each spatial layer.
vpx_codec_priv_output_cx_pkt_cb_pair_t cbp = {
- VP9EncoderImpl::EncoderOutputCodedPacketCallback, (void*)(this)};
- vpx_codec_control(encoder_, VP9E_REGISTER_CX_CALLBACK, (void*)(&cbp));
+ VP9EncoderImpl::EncoderOutputCodedPacketCallback,
+ reinterpret_cast<void*>(this)};
+ vpx_codec_control(encoder_, VP9E_REGISTER_CX_CALLBACK,
+ reinterpret_cast<void*>(&cbp));
// Control function to set the number of column tiles in encoding a frame, in
// log2 unit: e.g., 0 = 1 tile column, 1 = 2 tile columns, 2 = 4 tile columns.
@@ -417,7 +469,7 @@ uint32_t VP9EncoderImpl::MaxIntraTarget(uint32_t optimal_buffer_size) {
optimal_buffer_size * scale_par * codec_.maxFramerate / 10;
// Don't go below 3 times the per frame bandwidth.
const uint32_t min_intra_size = 300;
- return (target_pct < min_intra_size) ? min_intra_size: target_pct;
+ return (target_pct < min_intra_size) ? min_intra_size : target_pct;
}
int VP9EncoderImpl::Encode(const VideoFrame& input_image,
@@ -455,12 +507,35 @@ int VP9EncoderImpl::Encode(const VideoFrame& input_image,
raw_->stride[VPX_PLANE_U] = input_image.stride(kUPlane);
raw_->stride[VPX_PLANE_V] = input_image.stride(kVPlane);
- int flags = 0;
+ vpx_enc_frame_flags_t flags = 0;
bool send_keyframe = (frame_type == kVideoFrameKey);
if (send_keyframe) {
// Key frame request from caller.
flags = VPX_EFLAG_FORCE_KF;
}
+
+ if (is_flexible_mode_) {
+ SuperFrameRefSettings settings;
+
+ // These structs are copied when calling vpx_codec_control,
+ // therefore it is ok for them to go out of scope.
+ vpx_svc_ref_frame_config enc_layer_conf;
+ vpx_svc_layer_id layer_id;
+
+ if (codec_.mode == kRealtimeVideo) {
+ // Real time video not yet implemented in flexible mode.
+ RTC_NOTREACHED();
+ } else {
+ settings = spatial_layer_->GetSuperFrameSettings(input_image.timestamp(),
+ send_keyframe);
+ }
+ enc_layer_conf = GenerateRefsAndFlags(settings);
+ layer_id.temporal_layer_id = 0;
+ layer_id.spatial_layer_id = settings.start_layer;
+ vpx_codec_control(encoder_, VP9E_SET_SVC_LAYER_ID, &layer_id);
+ vpx_codec_control(encoder_, VP9E_SET_SVC_REF_FRAME_CONFIG, &enc_layer_conf);
+ }
+
assert(codec_.maxFramerate > 0);
uint32_t duration = 90000 / codec_.maxFramerate;
if (vpx_codec_encode(encoder_, raw_, timestamp_, duration, flags,
@@ -473,12 +548,12 @@ int VP9EncoderImpl::Encode(const VideoFrame& input_image,
}
void VP9EncoderImpl::PopulateCodecSpecific(CodecSpecificInfo* codec_specific,
- const vpx_codec_cx_pkt& pkt,
- uint32_t timestamp) {
+ const vpx_codec_cx_pkt& pkt,
+ uint32_t timestamp) {
assert(codec_specific != NULL);
codec_specific->codecType = kVideoCodecVP9;
- CodecSpecificInfoVP9 *vp9_info = &(codec_specific->codecSpecific.VP9);
- // TODO(asapersson): Set correct values.
+ CodecSpecificInfoVP9* vp9_info = &(codec_specific->codecSpecific.VP9);
+ // TODO(asapersson): Set correct value.
vp9_info->inter_pic_predicted =
(pkt.data.frame.flags & VPX_FRAME_IS_KEY) ? false : true;
vp9_info->flexible_mode = codec_.codecSpecific.VP9.flexibleMode;
@@ -486,9 +561,6 @@ void VP9EncoderImpl::PopulateCodecSpecific(CodecSpecificInfo* codec_specific,
!codec_.codecSpecific.VP9.flexibleMode)
? true
: false;
- if (pkt.data.frame.flags & VPX_FRAME_IS_KEY) {
- gof_idx_ = 0;
- }
vpx_svc_layer_id_t layer_id = {0};
vpx_codec_control(encoder_, VP9E_GET_SVC_LAYER_ID, &layer_id);
@@ -511,25 +583,31 @@ void VP9EncoderImpl::PopulateCodecSpecific(CodecSpecificInfo* codec_specific,
vp9_info->ss_data_available = false;
}
- if (vp9_info->flexible_mode) {
- vp9_info->gof_idx = kNoGofIdx;
+ // TODO(asapersson): this info has to be obtained from the encoder.
+ vp9_info->temporal_up_switch = false;
+
+ bool is_first_frame = false;
+ if (is_flexible_mode_) {
+ is_first_frame =
+ layer_id.spatial_layer_id == spatial_layer_->GetStartLayer();
} else {
- vp9_info->gof_idx =
- static_cast<uint8_t>(gof_idx_++ % gof_.num_frames_in_gof);
+ is_first_frame = layer_id.spatial_layer_id == 0;
}
- // TODO(asapersson): this info has to be obtained from the encoder.
- vp9_info->temporal_up_switch = true;
-
- if (layer_id.spatial_layer_id == 0) {
+ if (is_first_frame) {
picture_id_ = (picture_id_ + 1) & 0x7FFF;
// TODO(asapersson): this info has to be obtained from the encoder.
vp9_info->inter_layer_predicted = false;
+ ++frames_since_kf_;
} else {
// TODO(asapersson): this info has to be obtained from the encoder.
vp9_info->inter_layer_predicted = true;
}
+ if (pkt.data.frame.flags & VPX_FRAME_IS_KEY) {
+ frames_since_kf_ = 0;
+ }
+
vp9_info->picture_id = picture_id_;
if (!vp9_info->flexible_mode) {
@@ -542,6 +620,20 @@ void VP9EncoderImpl::PopulateCodecSpecific(CodecSpecificInfo* codec_specific,
// Always populate this, so that the packetizer can properly set the marker
// bit.
vp9_info->num_spatial_layers = num_spatial_layers_;
+
+ vp9_info->num_ref_pics = 0;
+ if (vp9_info->flexible_mode) {
+ vp9_info->gof_idx = kNoGofIdx;
+ vp9_info->num_ref_pics = num_ref_pics_[layer_id.spatial_layer_id];
+ for (int i = 0; i < num_ref_pics_[layer_id.spatial_layer_id]; ++i) {
+ vp9_info->p_diff[i] = p_diff_[layer_id.spatial_layer_id][i];
+ }
+ } else {
+ vp9_info->gof_idx =
+ static_cast<uint8_t>(frames_since_kf_ % gof_.num_frames_in_gof);
+ vp9_info->temporal_up_switch = gof_.temporal_up_switch[vp9_info->gof_idx];
+ }
+
if (vp9_info->ss_data_available) {
vp9_info->spatial_layer_resolution_present = true;
for (size_t i = 0; i < vp9_info->num_spatial_layers; ++i) {
@@ -577,6 +669,14 @@ int VP9EncoderImpl::GetEncodedLayerFrame(const vpx_codec_cx_pkt* pkt) {
frag_info.fragmentationPlType[part_idx] = 0;
frag_info.fragmentationTimeDiff[part_idx] = 0;
encoded_image_._length += static_cast<uint32_t>(pkt->data.frame.sz);
+
+ vpx_svc_layer_id_t layer_id = {0};
+ vpx_codec_control(encoder_, VP9E_GET_SVC_LAYER_ID, &layer_id);
+ if (is_flexible_mode_ && codec_.mode == kScreensharing)
+ spatial_layer_->LayerFrameEncoded(
+ static_cast<unsigned int>(encoded_image_._length),
+ layer_id.spatial_layer_id);
+
assert(encoded_image_._length <= encoded_image_._size);
// End of frame.
@@ -598,6 +698,108 @@ int VP9EncoderImpl::GetEncodedLayerFrame(const vpx_codec_cx_pkt* pkt) {
return WEBRTC_VIDEO_CODEC_OK;
}
+vpx_svc_ref_frame_config VP9EncoderImpl::GenerateRefsAndFlags(
+ const SuperFrameRefSettings& settings) {
+ static const vpx_enc_frame_flags_t kAllFlags =
+ VP8_EFLAG_NO_REF_ARF | VP8_EFLAG_NO_REF_GF | VP8_EFLAG_NO_REF_LAST |
+ VP8_EFLAG_NO_UPD_LAST | VP8_EFLAG_NO_UPD_ARF | VP8_EFLAG_NO_UPD_GF;
+ vpx_svc_ref_frame_config sf_conf = {};
+ if (settings.is_keyframe) {
+ // Used later on to make sure we don't make any invalid references.
+ memset(buffer_updated_at_frame_, -1, sizeof(buffer_updated_at_frame_));
+ for (int layer = settings.start_layer; layer <= settings.stop_layer;
+ ++layer) {
+ num_ref_pics_[layer] = 0;
+ buffer_updated_at_frame_[settings.layer[layer].upd_buf] = frames_encoded_;
+ // When encoding a keyframe only the alt_fb_idx is used
+ // to specify which layer ends up in which buffer.
+ sf_conf.alt_fb_idx[layer] = settings.layer[layer].upd_buf;
+ }
+ } else {
+ for (int layer_idx = settings.start_layer; layer_idx <= settings.stop_layer;
+ ++layer_idx) {
+ vpx_enc_frame_flags_t layer_flags = kAllFlags;
+ num_ref_pics_[layer_idx] = 0;
+ int8_t refs[3] = {settings.layer[layer_idx].ref_buf1,
+ settings.layer[layer_idx].ref_buf2,
+ settings.layer[layer_idx].ref_buf3};
+
+ for (unsigned int ref_idx = 0; ref_idx < kMaxVp9RefPics; ++ref_idx) {
+ if (refs[ref_idx] == -1)
+ continue;
+
+ RTC_DCHECK_GE(refs[ref_idx], 0);
+ RTC_DCHECK_LE(refs[ref_idx], 7);
+ // Easier to remove flags from all flags rather than having to
+ // build the flags from 0.
+ switch (num_ref_pics_[layer_idx]) {
+ case 0: {
+ sf_conf.lst_fb_idx[layer_idx] = refs[ref_idx];
+ layer_flags &= ~VP8_EFLAG_NO_REF_LAST;
+ break;
+ }
+ case 1: {
+ sf_conf.gld_fb_idx[layer_idx] = refs[ref_idx];
+ layer_flags &= ~VP8_EFLAG_NO_REF_GF;
+ break;
+ }
+ case 2: {
+ sf_conf.alt_fb_idx[layer_idx] = refs[ref_idx];
+ layer_flags &= ~VP8_EFLAG_NO_REF_ARF;
+ break;
+ }
+ }
+ // Make sure we don't reference a buffer that hasn't been
+ // used at all or hasn't been used since a keyframe.
+ RTC_DCHECK_NE(buffer_updated_at_frame_[refs[ref_idx]], -1);
+
+ p_diff_[layer_idx][num_ref_pics_[layer_idx]] =
+ frames_encoded_ - buffer_updated_at_frame_[refs[ref_idx]];
+ num_ref_pics_[layer_idx]++;
+ }
+
+ bool upd_buf_same_as_a_ref = false;
+ if (settings.layer[layer_idx].upd_buf != -1) {
+ for (unsigned int ref_idx = 0; ref_idx < kMaxVp9RefPics; ++ref_idx) {
+ if (settings.layer[layer_idx].upd_buf == refs[ref_idx]) {
+ switch (ref_idx) {
+ case 0: {
+ layer_flags &= ~VP8_EFLAG_NO_UPD_LAST;
+ break;
+ }
+ case 1: {
+ layer_flags &= ~VP8_EFLAG_NO_UPD_GF;
+ break;
+ }
+ case 2: {
+ layer_flags &= ~VP8_EFLAG_NO_UPD_ARF;
+ break;
+ }
+ }
+ upd_buf_same_as_a_ref = true;
+ break;
+ }
+ }
+ if (!upd_buf_same_as_a_ref) {
+ // If we have three references and a buffer is specified to be
+ // updated, then that buffer must be the same as one of the
+ // three references.
+ RTC_CHECK_LT(num_ref_pics_[layer_idx], kMaxVp9RefPics);
+
+ sf_conf.alt_fb_idx[layer_idx] = settings.layer[layer_idx].upd_buf;
+ layer_flags ^= VP8_EFLAG_NO_UPD_ARF;
+ }
+
+ int updated_buffer = settings.layer[layer_idx].upd_buf;
+ buffer_updated_at_frame_[updated_buffer] = frames_encoded_;
+ sf_conf.frame_flags[layer_idx] = layer_flags;
+ }
+ }
+ }
+ ++frames_encoded_;
+ return sf_conf;
+}
+
int VP9EncoderImpl::SetChannelParameters(uint32_t packet_loss, int64_t rtt) {
return WEBRTC_VIDEO_CODEC_OK;
}
@@ -608,6 +810,10 @@ int VP9EncoderImpl::RegisterEncodeCompleteCallback(
return WEBRTC_VIDEO_CODEC_OK;
}
+const char* VP9EncoderImpl::ImplementationName() const {
+ return "libvpx";
+}
+
VP9Decoder* VP9Decoder::Create() {
return new VP9DecoderImpl();
}
@@ -652,7 +858,7 @@ int VP9DecoderImpl::InitDecode(const VideoCodec* inst, int number_of_cores) {
if (decoder_ == NULL) {
decoder_ = new vpx_codec_ctx_t;
}
- vpx_codec_dec_cfg_t cfg;
+ vpx_codec_dec_cfg_t cfg;
// Setting number of threads to a constant value (1)
cfg.threads = 1;
cfg.h = cfg.w = 0; // set after decode
@@ -705,10 +911,8 @@ int VP9DecoderImpl::Decode(const EncodedImage& input_image,
}
// During decode libvpx may get and release buffers from |frame_buffer_pool_|.
// In practice libvpx keeps a few (~3-4) buffers alive at a time.
- if (vpx_codec_decode(decoder_,
- buffer,
- static_cast<unsigned int>(input_image._length),
- 0,
+ if (vpx_codec_decode(decoder_, buffer,
+ static_cast<unsigned int>(input_image._length), 0,
VPX_DL_REALTIME)) {
return WEBRTC_VIDEO_CODEC_ERROR;
}
@@ -730,24 +934,22 @@ int VP9DecoderImpl::ReturnFrame(const vpx_image_t* img, uint32_t timestamp) {
}
// This buffer contains all of |img|'s image data, a reference counted
- // Vp9FrameBuffer. Performing AddRef/Release ensures it is not released and
- // recycled during use (libvpx is done with the buffers after a few
+ // Vp9FrameBuffer. (libvpx is done with the buffers after a few
// vpx_codec_decode calls or vpx_codec_destroy).
Vp9FrameBufferPool::Vp9FrameBuffer* img_buffer =
static_cast<Vp9FrameBufferPool::Vp9FrameBuffer*>(img->fb_priv);
- img_buffer->AddRef();
// The buffer can be used directly by the VideoFrame (without copy) by
// using a WrappedI420Buffer.
rtc::scoped_refptr<WrappedI420Buffer> img_wrapped_buffer(
new rtc::RefCountedObject<webrtc::WrappedI420Buffer>(
- img->d_w, img->d_h,
- img->planes[VPX_PLANE_Y], img->stride[VPX_PLANE_Y],
- img->planes[VPX_PLANE_U], img->stride[VPX_PLANE_U],
- img->planes[VPX_PLANE_V], img->stride[VPX_PLANE_V],
+ img->d_w, img->d_h, img->planes[VPX_PLANE_Y],
+ img->stride[VPX_PLANE_Y], img->planes[VPX_PLANE_U],
+ img->stride[VPX_PLANE_U], img->planes[VPX_PLANE_V],
+ img->stride[VPX_PLANE_V],
// WrappedI420Buffer's mechanism for allowing the release of its frame
// buffer is through a callback function. This is where we should
// release |img_buffer|.
- rtc::Bind(&WrappedI420BufferNoLongerUsedCb, img_buffer)));
+ rtc::KeepRefUntilDone(img_buffer)));
VideoFrame decoded_image;
decoded_image.set_video_frame_buffer(img_wrapped_buffer);
@@ -781,4 +983,9 @@ int VP9DecoderImpl::Release() {
inited_ = false;
return WEBRTC_VIDEO_CODEC_OK;
}
+
+const char* VP9DecoderImpl::ImplementationName() const {
+ return "libvpx";
+}
+
} // namespace webrtc
diff --git a/webrtc/modules/video_coding/codecs/vp9/vp9_impl.h b/webrtc/modules/video_coding/codecs/vp9/vp9_impl.h
index f9c123079e..bfa4540304 100644
--- a/webrtc/modules/video_coding/codecs/vp9/vp9_impl.h
+++ b/webrtc/modules/video_coding/codecs/vp9/vp9_impl.h
@@ -9,8 +9,10 @@
*
*/
-#ifndef WEBRTC_MODULES_VIDEO_CODING_CODECS_VP9_IMPL_H_
-#define WEBRTC_MODULES_VIDEO_CODING_CODECS_VP9_IMPL_H_
+#ifndef WEBRTC_MODULES_VIDEO_CODING_CODECS_VP9_VP9_IMPL_H_
+#define WEBRTC_MODULES_VIDEO_CODING_CODECS_VP9_VP9_IMPL_H_
+
+#include <vector>
#include "webrtc/modules/video_coding/codecs/vp9/include/vp9.h"
#include "webrtc/modules/video_coding/codecs/vp9/vp9_frame_buffer_pool.h"
@@ -21,6 +23,8 @@
namespace webrtc {
+class ScreenshareLayersVP9;
+
class VP9EncoderImpl : public VP9Encoder {
public:
VP9EncoderImpl();
@@ -45,6 +49,22 @@ class VP9EncoderImpl : public VP9Encoder {
void OnDroppedFrame() override {}
+ const char* ImplementationName() const override;
+
+ struct LayerFrameRefSettings {
+ int8_t upd_buf = -1; // -1 - no update, 0..7 - update buffer 0..7
+ int8_t ref_buf1 = -1; // -1 - no reference, 0..7 - reference buffer 0..7
+ int8_t ref_buf2 = -1; // -1 - no reference, 0..7 - reference buffer 0..7
+ int8_t ref_buf3 = -1; // -1 - no reference, 0..7 - reference buffer 0..7
+ };
+
+ struct SuperFrameRefSettings {
+ LayerFrameRefSettings layer[kMaxVp9NumberOfSpatialLayers];
+ uint8_t start_layer = 0; // The first spatial layer to be encoded.
+ uint8_t stop_layer = 0; // The last spatial layer to be encoded.
+ bool is_keyframe = false;
+ };
+
private:
// Determine number of encoder threads to use.
int NumberOfThreads(int width, int height, int number_of_cores);
@@ -56,8 +76,18 @@ class VP9EncoderImpl : public VP9Encoder {
const vpx_codec_cx_pkt& pkt,
uint32_t timestamp);
+ bool ExplicitlyConfiguredSpatialLayers() const;
bool SetSvcRates();
+ // Used for flexible mode to set the flags and buffer references used
+ // by the encoder. Also calculates the references used by the RTP
+ // packetizer.
+ //
+ // Has to be called for every frame (keyframes included) to update the
+ // state used to calculate references.
+ vpx_svc_ref_frame_config GenerateRefsAndFlags(
+ const SuperFrameRefSettings& settings);
+
virtual int GetEncodedLayerFrame(const vpx_codec_cx_pkt* pkt);
// Callback function for outputting packets per spatial layer.
@@ -88,11 +118,18 @@ class VP9EncoderImpl : public VP9Encoder {
GofInfoVP9 gof_; // Contains each frame's temporal information for
// non-flexible mode.
uint8_t tl0_pic_idx_; // Only used in non-flexible mode.
- size_t gof_idx_; // Only used in non-flexible mode.
+ size_t frames_since_kf_;
uint8_t num_temporal_layers_;
uint8_t num_spatial_layers_;
-};
+ // Used for flexible mode.
+ bool is_flexible_mode_;
+ int64_t buffer_updated_at_frame_[kNumVp9Buffers];
+ int64_t frames_encoded_;
+ uint8_t num_ref_pics_[kMaxVp9NumberOfSpatialLayers];
+ uint8_t p_diff_[kMaxVp9NumberOfSpatialLayers][kMaxVp9RefPics];
+ rtc::scoped_ptr<ScreenshareLayersVP9> spatial_layer_;
+};
class VP9DecoderImpl : public VP9Decoder {
public:
@@ -114,6 +151,8 @@ class VP9DecoderImpl : public VP9Decoder {
int Reset() override;
+ const char* ImplementationName() const override;
+
private:
int ReturnFrame(const vpx_image_t* img, uint32_t timeStamp);
@@ -127,4 +166,4 @@ class VP9DecoderImpl : public VP9Decoder {
};
} // namespace webrtc
-#endif // WEBRTC_MODULES_VIDEO_CODING_CODECS_VP9_IMPL_H_
+#endif // WEBRTC_MODULES_VIDEO_CODING_CODECS_VP9_VP9_IMPL_H_
diff --git a/webrtc/modules/video_coding/main/source/content_metrics_processing.cc b/webrtc/modules/video_coding/content_metrics_processing.cc
index 757ffb0e46..0c3a6dbc6c 100644
--- a/webrtc/modules/video_coding/main/source/content_metrics_processing.cc
+++ b/webrtc/modules/video_coding/content_metrics_processing.cc
@@ -8,12 +8,12 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#include "webrtc/modules/video_coding/main/source/content_metrics_processing.h"
+#include "webrtc/modules/video_coding/content_metrics_processing.h"
#include <math.h>
-#include "webrtc/modules/interface/module_common_types.h"
-#include "webrtc/modules/video_coding/main/interface/video_coding_defines.h"
+#include "webrtc/modules/include/module_common_types.h"
+#include "webrtc/modules/video_coding/include/video_coding_defines.h"
namespace webrtc {
//////////////////////////////////
@@ -38,15 +38,15 @@ int VCMContentMetricsProcessing::Reset() {
recursive_avg_->Reset();
uniform_avg_->Reset();
frame_cnt_uniform_avg_ = 0;
- avg_motion_level_ = 0.0f;
+ avg_motion_level_ = 0.0f;
avg_spatial_level_ = 0.0f;
return VCM_OK;
}
void VCMContentMetricsProcessing::UpdateFrameRate(uint32_t frameRate) {
// Update factor for recursive averaging.
- recursive_avg_factor_ = static_cast<float> (1000.0f) /
- static_cast<float>(frameRate * kQmMinIntervalMs);
+ recursive_avg_factor_ = static_cast<float>(1000.0f) /
+ static_cast<float>(frameRate * kQmMinIntervalMs);
}
VideoContentMetrics* VCMContentMetricsProcessing::LongTermAvgData() {
@@ -58,10 +58,10 @@ VideoContentMetrics* VCMContentMetricsProcessing::ShortTermAvgData() {
return NULL;
}
// Two metrics are used: motion and spatial level.
- uniform_avg_->motion_magnitude = avg_motion_level_ /
- static_cast<float>(frame_cnt_uniform_avg_);
- uniform_avg_->spatial_pred_err = avg_spatial_level_ /
- static_cast<float>(frame_cnt_uniform_avg_);
+ uniform_avg_->motion_magnitude =
+ avg_motion_level_ / static_cast<float>(frame_cnt_uniform_avg_);
+ uniform_avg_->spatial_pred_err =
+ avg_spatial_level_ / static_cast<float>(frame_cnt_uniform_avg_);
return uniform_avg_;
}
@@ -73,7 +73,7 @@ void VCMContentMetricsProcessing::ResetShortTermAvgData() {
}
int VCMContentMetricsProcessing::UpdateContentData(
- const VideoContentMetrics *contentMetrics) {
+ const VideoContentMetrics* contentMetrics) {
if (contentMetrics == NULL) {
return VCM_OK;
}
@@ -81,7 +81,7 @@ int VCMContentMetricsProcessing::UpdateContentData(
}
int VCMContentMetricsProcessing::ProcessContent(
- const VideoContentMetrics *contentMetrics) {
+ const VideoContentMetrics* contentMetrics) {
// Update the recursive averaged metrics: average is over longer window
// of time: over QmMinIntervalMs ms.
UpdateRecursiveAvg(contentMetrics);
@@ -92,34 +92,33 @@ int VCMContentMetricsProcessing::ProcessContent(
}
void VCMContentMetricsProcessing::UpdateUniformAvg(
- const VideoContentMetrics *contentMetrics) {
+ const VideoContentMetrics* contentMetrics) {
// Update frame counter.
frame_cnt_uniform_avg_ += 1;
// Update averaged metrics: motion and spatial level are used.
avg_motion_level_ += contentMetrics->motion_magnitude;
- avg_spatial_level_ += contentMetrics->spatial_pred_err;
+ avg_spatial_level_ += contentMetrics->spatial_pred_err;
return;
}
void VCMContentMetricsProcessing::UpdateRecursiveAvg(
- const VideoContentMetrics *contentMetrics) {
-
+ const VideoContentMetrics* contentMetrics) {
// Spatial metrics: 2x2, 1x2(H), 2x1(V).
- recursive_avg_->spatial_pred_err = (1 - recursive_avg_factor_) *
- recursive_avg_->spatial_pred_err +
+ recursive_avg_->spatial_pred_err =
+ (1 - recursive_avg_factor_) * recursive_avg_->spatial_pred_err +
recursive_avg_factor_ * contentMetrics->spatial_pred_err;
- recursive_avg_->spatial_pred_err_h = (1 - recursive_avg_factor_) *
- recursive_avg_->spatial_pred_err_h +
+ recursive_avg_->spatial_pred_err_h =
+ (1 - recursive_avg_factor_) * recursive_avg_->spatial_pred_err_h +
recursive_avg_factor_ * contentMetrics->spatial_pred_err_h;
- recursive_avg_->spatial_pred_err_v = (1 - recursive_avg_factor_) *
- recursive_avg_->spatial_pred_err_v +
+ recursive_avg_->spatial_pred_err_v =
+ (1 - recursive_avg_factor_) * recursive_avg_->spatial_pred_err_v +
recursive_avg_factor_ * contentMetrics->spatial_pred_err_v;
// Motion metric: Derived from NFD (normalized frame difference).
- recursive_avg_->motion_magnitude = (1 - recursive_avg_factor_) *
- recursive_avg_->motion_magnitude +
+ recursive_avg_->motion_magnitude =
+ (1 - recursive_avg_factor_) * recursive_avg_->motion_magnitude +
recursive_avg_factor_ * contentMetrics->motion_magnitude;
}
-} // namespace
+} // namespace webrtc
diff --git a/webrtc/modules/video_coding/main/source/content_metrics_processing.h b/webrtc/modules/video_coding/content_metrics_processing.h
index 3517f757d4..3f67ec19c9 100644
--- a/webrtc/modules/video_coding/main/source/content_metrics_processing.h
+++ b/webrtc/modules/video_coding/content_metrics_processing.h
@@ -18,14 +18,10 @@ namespace webrtc {
struct VideoContentMetrics;
// QM interval time (in ms)
-enum {
- kQmMinIntervalMs = 10000
-};
+enum { kQmMinIntervalMs = 10000 };
// Flag for NFD metric vs motion metric
-enum {
- kNfdMetric = 1
-};
+enum { kNfdMetric = 1 };
/**********************************/
/* Content Metrics Processing */
@@ -36,7 +32,7 @@ class VCMContentMetricsProcessing {
~VCMContentMetricsProcessing();
// Update class with latest metrics.
- int UpdateContentData(const VideoContentMetrics *contentMetrics);
+ int UpdateContentData(const VideoContentMetrics* contentMetrics);
// Reset the short-term averaged content data.
void ResetShortTermAvgData();
@@ -57,13 +53,13 @@ class VCMContentMetricsProcessing {
private:
// Compute working average.
- int ProcessContent(const VideoContentMetrics *contentMetrics);
+ int ProcessContent(const VideoContentMetrics* contentMetrics);
// Update the recursive averaged metrics: longer time average (~5/10 secs).
- void UpdateRecursiveAvg(const VideoContentMetrics *contentMetrics);
+ void UpdateRecursiveAvg(const VideoContentMetrics* contentMetrics);
// Update the uniform averaged metrics: shorter time average (~RTCP report).
- void UpdateUniformAvg(const VideoContentMetrics *contentMetrics);
+ void UpdateUniformAvg(const VideoContentMetrics* contentMetrics);
VideoContentMetrics* recursive_avg_;
VideoContentMetrics* uniform_avg_;
diff --git a/webrtc/modules/video_coding/main/source/decoding_state.cc b/webrtc/modules/video_coding/decoding_state.cc
index cc92f1c83f..89be9b66c1 100644
--- a/webrtc/modules/video_coding/main/source/decoding_state.cc
+++ b/webrtc/modules/video_coding/decoding_state.cc
@@ -8,12 +8,12 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#include "webrtc/modules/video_coding/main/source/decoding_state.h"
+#include "webrtc/modules/video_coding/decoding_state.h"
-#include "webrtc/modules/interface/module_common_types.h"
-#include "webrtc/modules/video_coding/main/source/frame_buffer.h"
-#include "webrtc/modules/video_coding/main/source/jitter_buffer_common.h"
-#include "webrtc/modules/video_coding/main/source/packet.h"
+#include "webrtc/modules/include/module_common_types.h"
+#include "webrtc/modules/video_coding/frame_buffer.h"
+#include "webrtc/modules/video_coding/jitter_buffer_common.h"
+#include "webrtc/modules/video_coding/packet.h"
namespace webrtc {
@@ -24,7 +24,9 @@ VCMDecodingState::VCMDecodingState()
temporal_id_(kNoTemporalIdx),
tl0_pic_id_(kNoTl0PicIdx),
full_sync_(true),
- in_initial_state_(true) {}
+ in_initial_state_(true) {
+ memset(frame_decoded_, 0, sizeof(frame_decoded_));
+}
VCMDecodingState::~VCMDecodingState() {}
@@ -37,6 +39,7 @@ void VCMDecodingState::Reset() {
tl0_pic_id_ = kNoTl0PicIdx;
full_sync_ = true;
in_initial_state_ = true;
+ memset(frame_decoded_, 0, sizeof(frame_decoded_));
}
uint32_t VCMDecodingState::time_stamp() const {
@@ -63,12 +66,33 @@ bool VCMDecodingState::IsOldPacket(const VCMPacket* packet) const {
void VCMDecodingState::SetState(const VCMFrameBuffer* frame) {
assert(frame != NULL && frame->GetHighSeqNum() >= 0);
- UpdateSyncState(frame);
+ if (!UsingFlexibleMode(frame))
+ UpdateSyncState(frame);
sequence_num_ = static_cast<uint16_t>(frame->GetHighSeqNum());
time_stamp_ = frame->TimeStamp();
picture_id_ = frame->PictureId();
temporal_id_ = frame->TemporalId();
tl0_pic_id_ = frame->Tl0PicId();
+
+ if (UsingFlexibleMode(frame)) {
+ uint16_t frame_index = picture_id_ % kFrameDecodedLength;
+ if (in_initial_state_) {
+ frame_decoded_cleared_to_ = frame_index;
+ } else if (frame->FrameType() == kVideoFrameKey) {
+ memset(frame_decoded_, 0, sizeof(frame_decoded_));
+ frame_decoded_cleared_to_ = frame_index;
+ } else {
+ if (AheadOfFramesDecodedClearedTo(frame_index)) {
+ while (frame_decoded_cleared_to_ != frame_index) {
+ frame_decoded_cleared_to_ =
+ (frame_decoded_cleared_to_ + 1) % kFrameDecodedLength;
+ frame_decoded_[frame_decoded_cleared_to_] = false;
+ }
+ }
+ }
+ frame_decoded_[frame_index] = true;
+ }
+
in_initial_state_ = false;
}
@@ -80,6 +104,8 @@ void VCMDecodingState::CopyFrom(const VCMDecodingState& state) {
tl0_pic_id_ = state.tl0_pic_id_;
full_sync_ = state.full_sync_;
in_initial_state_ = state.in_initial_state_;
+ frame_decoded_cleared_to_ = state.frame_decoded_cleared_to_;
+ memcpy(frame_decoded_, state.frame_decoded_, sizeof(frame_decoded_));
}
bool VCMDecodingState::UpdateEmptyFrame(const VCMFrameBuffer* frame) {
@@ -140,8 +166,8 @@ void VCMDecodingState::UpdateSyncState(const VCMFrameBuffer* frame) {
full_sync_ = ContinuousPictureId(frame->PictureId());
}
} else {
- full_sync_ = ContinuousSeqNum(static_cast<uint16_t>(
- frame->GetLowSeqNum()));
+ full_sync_ =
+ ContinuousSeqNum(static_cast<uint16_t>(frame->GetLowSeqNum()));
}
}
}
@@ -173,7 +199,11 @@ bool VCMDecodingState::ContinuousFrame(const VCMFrameBuffer* frame) const {
if (!full_sync_ && !frame->LayerSync())
return false;
if (UsingPictureId(frame)) {
- return ContinuousPictureId(frame->PictureId());
+ if (UsingFlexibleMode(frame)) {
+ return ContinuousFrameRefs(frame);
+ } else {
+ return ContinuousPictureId(frame->PictureId());
+ }
} else {
return ContinuousSeqNum(static_cast<uint16_t>(frame->GetLowSeqNum()));
}
@@ -199,8 +229,7 @@ bool VCMDecodingState::ContinuousSeqNum(uint16_t seq_num) const {
return seq_num == static_cast<uint16_t>(sequence_num_ + 1);
}
-bool VCMDecodingState::ContinuousLayer(int temporal_id,
- int tl0_pic_id) const {
+bool VCMDecodingState::ContinuousLayer(int temporal_id, int tl0_pic_id) const {
// First, check if applicable.
if (temporal_id == kNoTemporalIdx || tl0_pic_id == kNoTl0PicIdx)
return false;
@@ -216,8 +245,41 @@ bool VCMDecodingState::ContinuousLayer(int temporal_id,
return (static_cast<uint8_t>(tl0_pic_id_ + 1) == tl0_pic_id);
}
+bool VCMDecodingState::ContinuousFrameRefs(const VCMFrameBuffer* frame) const {
+ uint8_t num_refs = frame->CodecSpecific()->codecSpecific.VP9.num_ref_pics;
+ for (uint8_t r = 0; r < num_refs; ++r) {
+ uint16_t frame_ref = frame->PictureId() -
+ frame->CodecSpecific()->codecSpecific.VP9.p_diff[r];
+ uint16_t frame_index = frame_ref % kFrameDecodedLength;
+ if (AheadOfFramesDecodedClearedTo(frame_index) ||
+ !frame_decoded_[frame_index]) {
+ return false;
+ }
+ }
+ return true;
+}
+
bool VCMDecodingState::UsingPictureId(const VCMFrameBuffer* frame) const {
return (frame->PictureId() != kNoPictureId && picture_id_ != kNoPictureId);
}
+bool VCMDecodingState::UsingFlexibleMode(const VCMFrameBuffer* frame) const {
+ return frame->CodecSpecific()->codecType == kVideoCodecVP9 &&
+ frame->CodecSpecific()->codecSpecific.VP9.flexible_mode;
+}
+
+// TODO(philipel): change how check work, this check practially
+// limits the max p_diff to 64.
+bool VCMDecodingState::AheadOfFramesDecodedClearedTo(uint16_t index) const {
+ // No way of knowing for sure if we are actually ahead of
+ // frame_decoded_cleared_to_. We just make the assumption
+ // that we are not trying to reference back to a very old
+ // index, but instead are referencing a newer index.
+ uint16_t diff =
+ index > frame_decoded_cleared_to_
+ ? kFrameDecodedLength - (index - frame_decoded_cleared_to_)
+ : frame_decoded_cleared_to_ - index;
+ return diff > kFrameDecodedLength / 2;
+}
+
} // namespace webrtc
diff --git a/webrtc/modules/video_coding/main/source/decoding_state.h b/webrtc/modules/video_coding/decoding_state.h
index 99ee335195..f4ea8ae081 100644
--- a/webrtc/modules/video_coding/main/source/decoding_state.h
+++ b/webrtc/modules/video_coding/decoding_state.h
@@ -21,6 +21,11 @@ class VCMPacket;
class VCMDecodingState {
public:
+ // The max number of bits used to reference back
+ // to a previous frame when using flexible mode.
+ static const uint16_t kNumRefBits = 7;
+ static const uint16_t kFrameDecodedLength = 1 << kNumRefBits;
+
VCMDecodingState();
~VCMDecodingState();
// Check for old frame
@@ -52,17 +57,24 @@ class VCMDecodingState {
bool ContinuousPictureId(int picture_id) const;
bool ContinuousSeqNum(uint16_t seq_num) const;
bool ContinuousLayer(int temporal_id, int tl0_pic_id) const;
+ bool ContinuousFrameRefs(const VCMFrameBuffer* frame) const;
bool UsingPictureId(const VCMFrameBuffer* frame) const;
+ bool UsingFlexibleMode(const VCMFrameBuffer* frame) const;
+ bool AheadOfFramesDecodedClearedTo(uint16_t index) const;
// Keep state of last decoded frame.
// TODO(mikhal/stefan): create designated classes to handle these types.
- uint16_t sequence_num_;
- uint32_t time_stamp_;
- int picture_id_;
- int temporal_id_;
- int tl0_pic_id_;
- bool full_sync_; // Sync flag when temporal layers are used.
- bool in_initial_state_;
+ uint16_t sequence_num_;
+ uint32_t time_stamp_;
+ int picture_id_;
+ int temporal_id_;
+ int tl0_pic_id_;
+ bool full_sync_; // Sync flag when temporal layers are used.
+ bool in_initial_state_;
+
+ // Used to check references in flexible mode.
+ bool frame_decoded_[kFrameDecodedLength];
+ uint16_t frame_decoded_cleared_to_;
};
} // namespace webrtc
diff --git a/webrtc/modules/video_coding/main/source/decoding_state_unittest.cc b/webrtc/modules/video_coding/decoding_state_unittest.cc
index feae701a65..5f5d0d38b1 100644
--- a/webrtc/modules/video_coding/main/source/decoding_state_unittest.cc
+++ b/webrtc/modules/video_coding/decoding_state_unittest.cc
@@ -11,11 +11,11 @@
#include <string.h>
#include "testing/gtest/include/gtest/gtest.h"
-#include "webrtc/modules/interface/module_common_types.h"
-#include "webrtc/modules/video_coding/main/source/decoding_state.h"
-#include "webrtc/modules/video_coding/main/source/frame_buffer.h"
-#include "webrtc/modules/video_coding/main/source/jitter_buffer_common.h"
-#include "webrtc/modules/video_coding/main/source/packet.h"
+#include "webrtc/modules/include/module_common_types.h"
+#include "webrtc/modules/video_coding/decoding_state.h"
+#include "webrtc/modules/video_coding/frame_buffer.h"
+#include "webrtc/modules/video_coding/jitter_buffer_common.h"
+#include "webrtc/modules/video_coding/packet.h"
namespace webrtc {
@@ -446,4 +446,254 @@ TEST(TestDecodingState, PictureIdRepeat) {
EXPECT_FALSE(dec_state.ContinuousFrame(&frame));
}
+TEST(TestDecodingState, FrameContinuityFlexibleModeKeyFrame) {
+ VCMDecodingState dec_state;
+ VCMFrameBuffer frame;
+ VCMPacket packet;
+ packet.isFirstPacket = true;
+ packet.timestamp = 1;
+ packet.seqNum = 0xffff;
+ uint8_t data[] = "I need a data pointer for this test!";
+ packet.sizeBytes = sizeof(data);
+ packet.dataPtr = data;
+ packet.codecSpecificHeader.codec = kRtpVideoVp9;
+
+ RTPVideoHeaderVP9& vp9_hdr = packet.codecSpecificHeader.codecHeader.VP9;
+ vp9_hdr.picture_id = 10;
+ vp9_hdr.flexible_mode = true;
+
+ FrameData frame_data;
+ frame_data.rtt_ms = 0;
+ frame_data.rolling_average_packets_per_frame = -1;
+
+ // Key frame as first frame
+ packet.frameType = kVideoFrameKey;
+ EXPECT_LE(0, frame.InsertPacket(packet, 0, kNoErrors, frame_data));
+ EXPECT_TRUE(dec_state.ContinuousFrame(&frame));
+ dec_state.SetState(&frame);
+
+ // Key frame again
+ vp9_hdr.picture_id = 11;
+ frame.Reset();
+ EXPECT_LE(0, frame.InsertPacket(packet, 0, kNoErrors, frame_data));
+ EXPECT_TRUE(dec_state.ContinuousFrame(&frame));
+ dec_state.SetState(&frame);
+
+ // Ref to 11, continuous
+ frame.Reset();
+ packet.frameType = kVideoFrameDelta;
+ vp9_hdr.picture_id = 12;
+ vp9_hdr.num_ref_pics = 1;
+ vp9_hdr.pid_diff[0] = 1;
+ EXPECT_LE(0, frame.InsertPacket(packet, 0, kNoErrors, frame_data));
+ EXPECT_TRUE(dec_state.ContinuousFrame(&frame));
+}
+
+TEST(TestDecodingState, FrameContinuityFlexibleModeOutOfOrderFrames) {
+ VCMDecodingState dec_state;
+ VCMFrameBuffer frame;
+ VCMPacket packet;
+ packet.isFirstPacket = true;
+ packet.timestamp = 1;
+ packet.seqNum = 0xffff;
+ uint8_t data[] = "I need a data pointer for this test!";
+ packet.sizeBytes = sizeof(data);
+ packet.dataPtr = data;
+ packet.codecSpecificHeader.codec = kRtpVideoVp9;
+
+ RTPVideoHeaderVP9& vp9_hdr = packet.codecSpecificHeader.codecHeader.VP9;
+ vp9_hdr.picture_id = 10;
+ vp9_hdr.flexible_mode = true;
+
+ FrameData frame_data;
+ frame_data.rtt_ms = 0;
+ frame_data.rolling_average_packets_per_frame = -1;
+
+ // Key frame as first frame
+ packet.frameType = kVideoFrameKey;
+ EXPECT_LE(0, frame.InsertPacket(packet, 0, kNoErrors, frame_data));
+ EXPECT_TRUE(dec_state.ContinuousFrame(&frame));
+ dec_state.SetState(&frame);
+
+ // Ref to 10, continuous
+ frame.Reset();
+ packet.frameType = kVideoFrameDelta;
+ vp9_hdr.picture_id = 15;
+ vp9_hdr.num_ref_pics = 1;
+ vp9_hdr.pid_diff[0] = 5;
+ EXPECT_LE(0, frame.InsertPacket(packet, 0, kNoErrors, frame_data));
+ EXPECT_TRUE(dec_state.ContinuousFrame(&frame));
+ dec_state.SetState(&frame);
+
+ // Out of order, last id 15, this id 12, ref to 10, continuous
+ frame.Reset();
+ vp9_hdr.picture_id = 12;
+ vp9_hdr.pid_diff[0] = 2;
+ EXPECT_LE(0, frame.InsertPacket(packet, 0, kNoErrors, frame_data));
+ EXPECT_TRUE(dec_state.ContinuousFrame(&frame));
+ dec_state.SetState(&frame);
+
+ // Ref 10, 12, 15, continuous
+ frame.Reset();
+ vp9_hdr.picture_id = 20;
+ vp9_hdr.num_ref_pics = 3;
+ vp9_hdr.pid_diff[0] = 10;
+ vp9_hdr.pid_diff[1] = 8;
+ vp9_hdr.pid_diff[2] = 5;
+ EXPECT_LE(0, frame.InsertPacket(packet, 0, kNoErrors, frame_data));
+ EXPECT_TRUE(dec_state.ContinuousFrame(&frame));
+}
+
+TEST(TestDecodingState, FrameContinuityFlexibleModeGeneral) {
+ VCMDecodingState dec_state;
+ VCMFrameBuffer frame;
+ VCMPacket packet;
+ packet.isFirstPacket = true;
+ packet.timestamp = 1;
+ packet.seqNum = 0xffff;
+ uint8_t data[] = "I need a data pointer for this test!";
+ packet.sizeBytes = sizeof(data);
+ packet.dataPtr = data;
+ packet.codecSpecificHeader.codec = kRtpVideoVp9;
+
+ RTPVideoHeaderVP9& vp9_hdr = packet.codecSpecificHeader.codecHeader.VP9;
+ vp9_hdr.picture_id = 10;
+ vp9_hdr.flexible_mode = true;
+
+ FrameData frame_data;
+ frame_data.rtt_ms = 0;
+ frame_data.rolling_average_packets_per_frame = -1;
+
+ // Key frame as first frame
+ packet.frameType = kVideoFrameKey;
+ EXPECT_LE(0, frame.InsertPacket(packet, 0, kNoErrors, frame_data));
+ EXPECT_TRUE(dec_state.ContinuousFrame(&frame));
+
+ // Delta frame as first frame
+ frame.Reset();
+ packet.frameType = kVideoFrameDelta;
+ EXPECT_LE(0, frame.InsertPacket(packet, 0, kNoErrors, frame_data));
+ EXPECT_FALSE(dec_state.ContinuousFrame(&frame));
+
+ // Key frame then delta frame
+ frame.Reset();
+ packet.frameType = kVideoFrameKey;
+ EXPECT_LE(0, frame.InsertPacket(packet, 0, kNoErrors, frame_data));
+ dec_state.SetState(&frame);
+ frame.Reset();
+ packet.frameType = kVideoFrameDelta;
+ vp9_hdr.num_ref_pics = 1;
+ vp9_hdr.picture_id = 15;
+ vp9_hdr.pid_diff[0] = 5;
+ EXPECT_LE(0, frame.InsertPacket(packet, 0, kNoErrors, frame_data));
+ EXPECT_TRUE(dec_state.ContinuousFrame(&frame));
+ dec_state.SetState(&frame);
+
+ // Ref to 11, not continuous
+ frame.Reset();
+ vp9_hdr.picture_id = 16;
+ EXPECT_LE(0, frame.InsertPacket(packet, 0, kNoErrors, frame_data));
+ EXPECT_FALSE(dec_state.ContinuousFrame(&frame));
+
+ // Ref to 15, continuous
+ frame.Reset();
+ vp9_hdr.picture_id = 16;
+ vp9_hdr.pid_diff[0] = 1;
+ EXPECT_LE(0, frame.InsertPacket(packet, 0, kNoErrors, frame_data));
+ EXPECT_TRUE(dec_state.ContinuousFrame(&frame));
+ dec_state.SetState(&frame);
+
+ // Ref to 11 and 15, not continuous
+ frame.Reset();
+ vp9_hdr.picture_id = 20;
+ vp9_hdr.num_ref_pics = 2;
+ vp9_hdr.pid_diff[0] = 9;
+ vp9_hdr.pid_diff[1] = 5;
+ EXPECT_LE(0, frame.InsertPacket(packet, 0, kNoErrors, frame_data));
+ EXPECT_FALSE(dec_state.ContinuousFrame(&frame));
+
+ // Ref to 10, 15 and 16, continuous
+ frame.Reset();
+ vp9_hdr.picture_id = 22;
+ vp9_hdr.num_ref_pics = 3;
+ vp9_hdr.pid_diff[0] = 12;
+ vp9_hdr.pid_diff[1] = 7;
+ vp9_hdr.pid_diff[2] = 6;
+ EXPECT_LE(0, frame.InsertPacket(packet, 0, kNoErrors, frame_data));
+ EXPECT_TRUE(dec_state.ContinuousFrame(&frame));
+ dec_state.SetState(&frame);
+
+ // Key Frame, continuous
+ frame.Reset();
+ packet.frameType = kVideoFrameKey;
+ vp9_hdr.picture_id = VCMDecodingState::kFrameDecodedLength - 2;
+ vp9_hdr.num_ref_pics = 0;
+ EXPECT_LE(0, frame.InsertPacket(packet, 0, kNoErrors, frame_data));
+ EXPECT_TRUE(dec_state.ContinuousFrame(&frame));
+ dec_state.SetState(&frame);
+
+ // Frame at last index, ref to KF, continuous
+ frame.Reset();
+ packet.frameType = kVideoFrameDelta;
+ vp9_hdr.picture_id = VCMDecodingState::kFrameDecodedLength - 1;
+ vp9_hdr.num_ref_pics = 1;
+ vp9_hdr.pid_diff[0] = 1;
+ EXPECT_LE(0, frame.InsertPacket(packet, 0, kNoErrors, frame_data));
+ EXPECT_TRUE(dec_state.ContinuousFrame(&frame));
+ dec_state.SetState(&frame);
+
+ // Frame after wrapping buffer length, ref to last index, continuous
+ frame.Reset();
+ vp9_hdr.picture_id = 0;
+ vp9_hdr.num_ref_pics = 1;
+ vp9_hdr.pid_diff[0] = 1;
+ EXPECT_LE(0, frame.InsertPacket(packet, 0, kNoErrors, frame_data));
+ EXPECT_TRUE(dec_state.ContinuousFrame(&frame));
+ dec_state.SetState(&frame);
+
+ // Frame after wrapping start frame, ref to 0, continuous
+ frame.Reset();
+ vp9_hdr.picture_id = 20;
+ vp9_hdr.num_ref_pics = 1;
+ vp9_hdr.pid_diff[0] = 20;
+ EXPECT_LE(0, frame.InsertPacket(packet, 0, kNoErrors, frame_data));
+ EXPECT_TRUE(dec_state.ContinuousFrame(&frame));
+ dec_state.SetState(&frame);
+
+ // Frame after wrapping start frame, ref to 10, not continuous
+ frame.Reset();
+ vp9_hdr.picture_id = 23;
+ vp9_hdr.num_ref_pics = 1;
+ vp9_hdr.pid_diff[0] = 13;
+ EXPECT_LE(0, frame.InsertPacket(packet, 0, kNoErrors, frame_data));
+ EXPECT_FALSE(dec_state.ContinuousFrame(&frame));
+
+ // Key frame, continuous
+ frame.Reset();
+ packet.frameType = kVideoFrameKey;
+ vp9_hdr.picture_id = 25;
+ vp9_hdr.num_ref_pics = 0;
+ EXPECT_LE(0, frame.InsertPacket(packet, 0, kNoErrors, frame_data));
+ EXPECT_TRUE(dec_state.ContinuousFrame(&frame));
+ dec_state.SetState(&frame);
+
+ // Ref to KF, continuous
+ frame.Reset();
+ packet.frameType = kVideoFrameDelta;
+ vp9_hdr.picture_id = 26;
+ vp9_hdr.num_ref_pics = 1;
+ vp9_hdr.pid_diff[0] = 1;
+ EXPECT_LE(0, frame.InsertPacket(packet, 0, kNoErrors, frame_data));
+ EXPECT_TRUE(dec_state.ContinuousFrame(&frame));
+ dec_state.SetState(&frame);
+
+ // Ref to frame previous to KF, not continuous
+ frame.Reset();
+ vp9_hdr.picture_id = 30;
+ vp9_hdr.num_ref_pics = 1;
+ vp9_hdr.pid_diff[0] = 30;
+ EXPECT_LE(0, frame.InsertPacket(packet, 0, kNoErrors, frame_data));
+ EXPECT_FALSE(dec_state.ContinuousFrame(&frame));
+}
+
} // namespace webrtc
diff --git a/webrtc/modules/video_coding/main/source/encoded_frame.cc b/webrtc/modules/video_coding/encoded_frame.cc
index d86704d632..261074ae73 100644
--- a/webrtc/modules/video_coding/main/source/encoded_frame.cc
+++ b/webrtc/modules/video_coding/encoded_frame.cc
@@ -8,10 +8,10 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#include "webrtc/modules/video_coding/main/interface/video_coding_defines.h"
-#include "webrtc/modules/video_coding/main/source/encoded_frame.h"
-#include "webrtc/modules/video_coding/main/source/generic_encoder.h"
-#include "webrtc/modules/video_coding/main/source/jitter_buffer_common.h"
+#include "webrtc/modules/video_coding/include/video_coding_defines.h"
+#include "webrtc/modules/video_coding/encoded_frame.h"
+#include "webrtc/modules/video_coding/generic_encoder.h"
+#include "webrtc/modules/video_coding/jitter_buffer_common.h"
namespace webrtc {
@@ -24,7 +24,7 @@ VCMEncodedFrame::VCMEncodedFrame()
_fragmentation(),
_rotation(kVideoRotation_0),
_rotation_set(false) {
- _codecSpecificInfo.codecType = kVideoCodecUnknown;
+ _codecSpecificInfo.codecType = kVideoCodecUnknown;
}
VCMEncodedFrame::VCMEncodedFrame(const webrtc::EncodedImage& rhs)
@@ -36,15 +36,14 @@ VCMEncodedFrame::VCMEncodedFrame(const webrtc::EncodedImage& rhs)
_fragmentation(),
_rotation(kVideoRotation_0),
_rotation_set(false) {
- _codecSpecificInfo.codecType = kVideoCodecUnknown;
- _buffer = NULL;
- _size = 0;
- _length = 0;
- if (rhs._buffer != NULL)
- {
- VerifyAndAllocate(rhs._length);
- memcpy(_buffer, rhs._buffer, rhs._length);
- }
+ _codecSpecificInfo.codecType = kVideoCodecUnknown;
+ _buffer = NULL;
+ _size = 0;
+ _length = 0;
+ if (rhs._buffer != NULL) {
+ VerifyAndAllocate(rhs._length);
+ memcpy(_buffer, rhs._buffer, rhs._length);
+ }
}
VCMEncodedFrame::VCMEncodedFrame(const VCMEncodedFrame& rhs)
@@ -60,49 +59,43 @@ VCMEncodedFrame::VCMEncodedFrame(const VCMEncodedFrame& rhs)
_buffer = NULL;
_size = 0;
_length = 0;
- if (rhs._buffer != NULL)
- {
- VerifyAndAllocate(rhs._length);
- memcpy(_buffer, rhs._buffer, rhs._length);
- _length = rhs._length;
+ if (rhs._buffer != NULL) {
+ VerifyAndAllocate(rhs._length);
+ memcpy(_buffer, rhs._buffer, rhs._length);
+ _length = rhs._length;
}
_fragmentation.CopyFrom(rhs._fragmentation);
}
-VCMEncodedFrame::~VCMEncodedFrame()
-{
- Free();
+VCMEncodedFrame::~VCMEncodedFrame() {
+ Free();
}
-void VCMEncodedFrame::Free()
-{
- Reset();
- if (_buffer != NULL)
- {
- delete [] _buffer;
- _buffer = NULL;
- }
+void VCMEncodedFrame::Free() {
+ Reset();
+ if (_buffer != NULL) {
+ delete[] _buffer;
+ _buffer = NULL;
+ }
}
-void VCMEncodedFrame::Reset()
-{
- _renderTimeMs = -1;
- _timeStamp = 0;
- _payloadType = 0;
- _frameType = kVideoFrameDelta;
- _encodedWidth = 0;
- _encodedHeight = 0;
- _completeFrame = false;
- _missingFrame = false;
- _length = 0;
- _codecSpecificInfo.codecType = kVideoCodecUnknown;
- _codec = kVideoCodecUnknown;
- _rotation = kVideoRotation_0;
- _rotation_set = false;
+void VCMEncodedFrame::Reset() {
+ _renderTimeMs = -1;
+ _timeStamp = 0;
+ _payloadType = 0;
+ _frameType = kVideoFrameDelta;
+ _encodedWidth = 0;
+ _encodedHeight = 0;
+ _completeFrame = false;
+ _missingFrame = false;
+ _length = 0;
+ _codecSpecificInfo.codecType = kVideoCodecUnknown;
+ _codec = kVideoCodecUnknown;
+ _rotation = kVideoRotation_0;
+ _rotation_set = false;
}
-void VCMEncodedFrame::CopyCodecSpecific(const RTPVideoHeader* header)
-{
+void VCMEncodedFrame::CopyCodecSpecific(const RTPVideoHeader* header) {
if (header) {
switch (header->codec) {
case kRtpVideoVp8: {
@@ -147,6 +140,12 @@ void VCMEncodedFrame::CopyCodecSpecific(const RTPVideoHeader* header)
header->codecHeader.VP9.inter_pic_predicted;
_codecSpecificInfo.codecSpecific.VP9.flexible_mode =
header->codecHeader.VP9.flexible_mode;
+ _codecSpecificInfo.codecSpecific.VP9.num_ref_pics =
+ header->codecHeader.VP9.num_ref_pics;
+ for (uint8_t r = 0; r < header->codecHeader.VP9.num_ref_pics; ++r) {
+ _codecSpecificInfo.codecSpecific.VP9.p_diff[r] =
+ header->codecHeader.VP9.pid_diff[r];
+ }
_codecSpecificInfo.codecSpecific.VP9.ss_data_available =
header->codecHeader.VP9.ss_data_available;
if (header->codecHeader.VP9.picture_id != kNoPictureId) {
@@ -209,21 +208,18 @@ const RTPFragmentationHeader* VCMEncodedFrame::FragmentationHeader() const {
return &_fragmentation;
}
-void VCMEncodedFrame::VerifyAndAllocate(size_t minimumSize)
-{
- if(minimumSize > _size)
- {
- // create buffer of sufficient size
- uint8_t* newBuffer = new uint8_t[minimumSize];
- if(_buffer)
- {
- // copy old data
- memcpy(newBuffer, _buffer, _size);
- delete [] _buffer;
- }
- _buffer = newBuffer;
- _size = minimumSize;
+void VCMEncodedFrame::VerifyAndAllocate(size_t minimumSize) {
+ if (minimumSize > _size) {
+ // create buffer of sufficient size
+ uint8_t* newBuffer = new uint8_t[minimumSize];
+ if (_buffer) {
+ // copy old data
+ memcpy(newBuffer, _buffer, _size);
+ delete[] _buffer;
}
+ _buffer = newBuffer;
+ _size = minimumSize;
+ }
}
} // namespace webrtc
diff --git a/webrtc/modules/video_coding/encoded_frame.h b/webrtc/modules/video_coding/encoded_frame.h
new file mode 100644
index 0000000000..9034200980
--- /dev/null
+++ b/webrtc/modules/video_coding/encoded_frame.h
@@ -0,0 +1,132 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef WEBRTC_MODULES_VIDEO_CODING_ENCODED_FRAME_H_
+#define WEBRTC_MODULES_VIDEO_CODING_ENCODED_FRAME_H_
+
+#include <vector>
+
+#include "webrtc/common_types.h"
+#include "webrtc/common_video/include/video_image.h"
+#include "webrtc/modules/include/module_common_types.h"
+#include "webrtc/modules/video_coding/include/video_codec_interface.h"
+#include "webrtc/modules/video_coding/include/video_coding_defines.h"
+
+namespace webrtc {
+
+class VCMEncodedFrame : protected EncodedImage {
+ public:
+ VCMEncodedFrame();
+ explicit VCMEncodedFrame(const webrtc::EncodedImage& rhs);
+ VCMEncodedFrame(const VCMEncodedFrame& rhs);
+
+ ~VCMEncodedFrame();
+ /**
+ * Delete VideoFrame and resets members to zero
+ */
+ void Free();
+ /**
+ * Set render time in milliseconds
+ */
+ void SetRenderTime(const int64_t renderTimeMs) {
+ _renderTimeMs = renderTimeMs;
+ }
+
+ /**
+ * Set the encoded frame size
+ */
+ void SetEncodedSize(uint32_t width, uint32_t height) {
+ _encodedWidth = width;
+ _encodedHeight = height;
+ }
+ /**
+ * Get the encoded image
+ */
+ const webrtc::EncodedImage& EncodedImage() const {
+ return static_cast<const webrtc::EncodedImage&>(*this);
+ }
+ /**
+ * Get pointer to frame buffer
+ */
+ const uint8_t* Buffer() const { return _buffer; }
+ /**
+ * Get frame length
+ */
+ size_t Length() const { return _length; }
+ /**
+ * Get frame timestamp (90kHz)
+ */
+ uint32_t TimeStamp() const { return _timeStamp; }
+ /**
+ * Get render time in milliseconds
+ */
+ int64_t RenderTimeMs() const { return _renderTimeMs; }
+ /**
+ * Get frame type
+ */
+ webrtc::FrameType FrameType() const { return _frameType; }
+ /**
+ * Get frame rotation
+ */
+ VideoRotation rotation() const { return _rotation; }
+ /**
+ * True if this frame is complete, false otherwise
+ */
+ bool Complete() const { return _completeFrame; }
+ /**
+ * True if there's a frame missing before this frame
+ */
+ bool MissingFrame() const { return _missingFrame; }
+ /**
+ * Payload type of the encoded payload
+ */
+ uint8_t PayloadType() const { return _payloadType; }
+ /**
+ * Get codec specific info.
+ * The returned pointer is only valid as long as the VCMEncodedFrame
+ * is valid. Also, VCMEncodedFrame owns the pointer and will delete
+ * the object.
+ */
+ const CodecSpecificInfo* CodecSpecific() const { return &_codecSpecificInfo; }
+
+ const RTPFragmentationHeader* FragmentationHeader() const;
+
+ protected:
+ /**
+ * Verifies that current allocated buffer size is larger than or equal to the
+ * input size.
+ * If the current buffer size is smaller, a new allocation is made and the old
+ * buffer data
+ * is copied to the new buffer.
+ * Buffer size is updated to minimumSize.
+ */
+ void VerifyAndAllocate(size_t minimumSize);
+
+ void Reset();
+
+ void CopyCodecSpecific(const RTPVideoHeader* header);
+
+ int64_t _renderTimeMs;
+ uint8_t _payloadType;
+ bool _missingFrame;
+ CodecSpecificInfo _codecSpecificInfo;
+ webrtc::VideoCodecType _codec;
+ RTPFragmentationHeader _fragmentation;
+ VideoRotation _rotation;
+
+ // Video rotation is only set along with the last packet for each frame
+ // (same as marker bit). This |_rotation_set| is only for debugging purpose
+ // to ensure we don't set it twice for a frame.
+ bool _rotation_set;
+};
+
+} // namespace webrtc
+
+#endif // WEBRTC_MODULES_VIDEO_CODING_ENCODED_FRAME_H_
diff --git a/webrtc/modules/video_coding/fec_tables_xor.h b/webrtc/modules/video_coding/fec_tables_xor.h
new file mode 100644
index 0000000000..fa5bd7bde4
--- /dev/null
+++ b/webrtc/modules/video_coding/fec_tables_xor.h
@@ -0,0 +1,459 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef WEBRTC_MODULES_VIDEO_CODING_FEC_TABLES_XOR_H_
+#define WEBRTC_MODULES_VIDEO_CODING_FEC_TABLES_XOR_H_
+
+// This is a private header for media_opt_util.cc.
+// It should not be included by other files.
+
+namespace webrtc {
+
+// Table for Protection factor (code rate) of delta frames, for the XOR FEC.
+// Input is the packet loss and an effective rate (bits/frame).
+// Output is array kCodeRateXORTable[k], where k = rate_i*129 + loss_j;
+// loss_j = 0,1,..128, and rate_i varies over some range.
+static const int kSizeCodeRateXORTable = 6450;
+static const unsigned char kCodeRateXORTable[kSizeCodeRateXORTable] = {
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11,
+ 11, 11, 11, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39,
+ 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39,
+ 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39,
+ 39, 39, 39, 39, 39, 39, 51, 51, 51, 51, 51, 51, 51, 51, 51,
+ 51, 51, 51, 51, 51, 51, 51, 51, 51, 51, 51, 51, 51, 51, 51,
+ 51, 51, 51, 51, 51, 51, 51, 51, 51, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 8, 8, 8,
+ 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 30, 30, 30,
+ 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 56, 56, 56,
+ 56, 56, 56, 56, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65,
+ 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65,
+ 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65,
+ 87, 87, 87, 87, 87, 87, 87, 87, 87, 87, 87, 87, 87, 87, 87,
+ 87, 87, 87, 87, 87, 87, 87, 87, 87, 78, 78, 78, 78, 78, 78,
+ 78, 78, 78, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 6, 6, 6, 23, 23, 23, 23, 23, 23, 23, 23, 23,
+ 23, 23, 23, 23, 23, 23, 44, 44, 44, 44, 44, 44, 50, 50, 50,
+ 50, 50, 50, 50, 50, 50, 68, 68, 68, 68, 68, 68, 68, 85, 85,
+ 85, 85, 85, 85, 85, 85, 85, 85, 85, 85, 85, 85, 85, 85, 85,
+ 85, 85, 85, 85, 85, 85, 85, 85, 85, 85, 85, 85, 85, 85, 85,
+ 85, 85, 85, 85, 85, 85, 85, 85, 85, 105, 105, 105, 105, 105, 105,
+ 105, 105, 105, 105, 105, 105, 105, 105, 105, 105, 105, 105, 105, 105, 105,
+ 105, 105, 105, 88, 88, 88, 88, 88, 88, 88, 88, 88, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 5, 5, 5, 5, 5, 5, 19, 19, 19,
+ 36, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41,
+ 55, 55, 55, 55, 55, 55, 69, 69, 69, 69, 69, 69, 69, 69, 69,
+ 75, 75, 80, 80, 80, 80, 80, 97, 97, 97, 97, 97, 97, 97, 97,
+ 97, 97, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102,
+ 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102,
+ 102, 102, 102, 116, 116, 116, 116, 116, 116, 116, 116, 116, 116, 116, 116,
+ 116, 116, 116, 116, 116, 116, 116, 116, 116, 116, 116, 116, 100, 100, 100,
+ 100, 100, 100, 100, 100, 100, 0, 0, 0, 0, 0, 0, 0, 0, 4,
+ 16, 16, 16, 16, 16, 16, 30, 35, 35, 47, 58, 58, 58, 58, 58,
+ 58, 58, 58, 58, 58, 58, 58, 58, 58, 63, 63, 63, 63, 63, 63,
+ 77, 77, 77, 77, 77, 77, 77, 82, 82, 82, 82, 94, 94, 94, 94,
+ 94, 105, 105, 105, 105, 110, 110, 110, 110, 110, 110, 122, 122, 122, 122,
+ 122, 122, 122, 122, 122, 122, 122, 122, 122, 122, 122, 122, 122, 122, 122,
+ 122, 122, 122, 122, 122, 122, 122, 122, 122, 122, 122, 122, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 115, 115, 115, 115, 115, 115, 115, 115, 115,
+ 0, 0, 0, 0, 0, 0, 0, 4, 14, 27, 27, 27, 27, 27, 31,
+ 41, 52, 52, 56, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69,
+ 69, 69, 69, 69, 69, 69, 69, 69, 69, 79, 79, 79, 79, 83, 83,
+ 83, 94, 94, 94, 94, 106, 106, 106, 106, 106, 115, 115, 115, 115, 125,
+ 125, 125, 125, 125, 125, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 0, 0, 0, 0, 3, 3,
+ 3, 17, 28, 38, 38, 38, 38, 38, 47, 51, 63, 63, 63, 72, 72,
+ 72, 72, 72, 72, 72, 76, 76, 76, 76, 80, 80, 80, 80, 80, 80,
+ 80, 80, 80, 84, 84, 84, 84, 93, 93, 93, 105, 105, 105, 105, 114,
+ 114, 114, 114, 114, 124, 124, 124, 124, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 0, 0, 0, 0, 12, 12, 12, 35, 43, 47, 47, 47,
+ 47, 47, 58, 58, 66, 66, 66, 70, 70, 70, 70, 70, 73, 73, 82,
+ 82, 82, 86, 94, 94, 94, 94, 94, 94, 94, 94, 94, 94, 94, 94,
+ 94, 105, 105, 105, 114, 114, 114, 114, 117, 117, 117, 117, 117, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 0, 0, 0,
+ 0, 24, 24, 24, 49, 53, 53, 53, 53, 53, 53, 61, 61, 64, 64,
+ 64, 64, 70, 70, 70, 70, 78, 78, 88, 88, 88, 96, 106, 106, 106,
+ 106, 106, 106, 106, 106, 106, 106, 112, 112, 112, 120, 120, 120, 124, 124,
+ 124, 124, 124, 124, 124, 124, 124, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 0, 0, 0, 5, 36, 36, 36, 55, 55,
+ 55, 55, 55, 55, 55, 58, 58, 58, 58, 58, 64, 78, 78, 78, 78,
+ 87, 87, 94, 94, 94, 103, 110, 110, 110, 110, 110, 110, 110, 110, 116,
+ 116, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 0, 0, 0, 18, 43, 43, 43, 53, 53, 53, 53, 53, 53, 53, 53,
+ 58, 58, 58, 58, 71, 87, 87, 87, 87, 94, 94, 97, 97, 97, 109,
+ 111, 111, 111, 111, 111, 111, 111, 111, 125, 125, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 0, 0, 0, 31, 46, 46,
+ 46, 48, 48, 48, 48, 48, 48, 48, 48, 66, 66, 66, 66, 80, 93,
+ 93, 93, 93, 95, 95, 95, 95, 100, 115, 115, 115, 115, 115, 115, 115,
+ 115, 115, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 0, 0, 4, 40, 45, 45, 45, 45, 45, 45, 45, 45,
+ 49, 49, 49, 74, 74, 74, 74, 86, 90, 90, 90, 90, 95, 95, 95,
+ 95, 106, 120, 120, 120, 120, 120, 120, 120, 120, 120, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 0, 0, 14,
+ 42, 42, 42, 42, 42, 42, 42, 42, 46, 56, 56, 56, 80, 80, 80,
+ 80, 84, 84, 84, 84, 88, 99, 99, 99, 99, 111, 122, 122, 122, 122,
+ 122, 122, 122, 122, 122, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 0, 0, 26, 40, 40, 40, 40, 40, 40,
+ 40, 40, 54, 66, 66, 66, 80, 80, 80, 80, 80, 80, 80, 84, 94,
+ 106, 106, 106, 106, 116, 120, 120, 120, 120, 120, 120, 120, 120, 124, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 0, 3, 34, 38, 38, 38, 38, 38, 42, 42, 42, 63, 72, 72, 76,
+ 80, 80, 80, 80, 80, 80, 80, 89, 101, 114, 114, 114, 114, 118, 118,
+ 118, 118, 118, 118, 118, 118, 118, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 0, 12, 36, 36, 36, 36,
+ 36, 36, 49, 49, 49, 69, 73, 76, 86, 86, 86, 86, 86, 86, 86,
+ 86, 97, 109, 122, 122, 122, 122, 122, 122, 122, 122, 122, 122, 122, 122,
+ 122, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 0, 22, 34, 34, 34, 34, 38, 38, 57, 57, 57, 69,
+ 73, 82, 92, 92, 92, 92, 92, 92, 96, 96, 104, 117, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 0, 29, 33,
+ 33, 33, 33, 44, 44, 62, 62, 62, 69, 77, 87, 95, 95, 95, 95,
+ 95, 95, 107, 107, 110, 120, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 0, 31, 31, 31, 31, 31, 51, 51, 62,
+ 65, 65, 73, 83, 91, 94, 94, 94, 94, 97, 97, 114, 114, 114, 122,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 0, 29, 29, 29, 29, 29, 56, 56, 59, 70, 70, 79, 86, 89, 89,
+ 89, 89, 89, 100, 100, 116, 116, 116, 122, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 0, 28, 28, 28, 28, 28,
+ 57, 57, 57, 76, 76, 83, 86, 86, 86, 86, 86, 89, 104, 104, 114,
+ 114, 114, 124, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 0, 27, 27, 27, 27, 30, 55, 55, 55, 80, 80, 83,
+ 86, 86, 86, 86, 86, 93, 108, 108, 111, 111, 111, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 0, 26, 26,
+ 26, 26, 36, 53, 53, 53, 80, 80, 80, 90, 90, 90, 90, 90, 98,
+ 107, 107, 107, 107, 107, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 0, 26, 26, 26, 28, 42, 52, 54, 54,
+ 78, 78, 78, 95, 95, 95, 97, 97, 104, 106, 106, 106, 106, 106, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 0, 24, 24, 24, 33, 47, 49, 58, 58, 74, 74, 74, 97, 97, 97,
+ 106, 106, 108, 108, 108, 108, 108, 108, 124, 124, 124, 124, 124, 124, 124,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 0, 24, 24, 24, 39, 48,
+ 50, 63, 63, 72, 74, 74, 96, 96, 96, 109, 111, 111, 111, 111, 111,
+ 111, 111, 119, 119, 122, 122, 122, 122, 122, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 0, 23, 23, 23, 43, 46, 54, 66, 66, 69, 77, 77,
+ 92, 92, 92, 105, 113, 113, 113, 113, 113, 113, 113, 115, 117, 123, 123,
+ 123, 123, 123, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 0, 22, 22,
+ 22, 44, 44, 59, 67, 67, 67, 81, 81, 89, 89, 89, 97, 112, 112,
+ 112, 112, 112, 112, 112, 112, 119, 126, 126, 126, 126, 126, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 0, 21, 21, 24, 43, 45, 63, 65, 65,
+ 67, 85, 85, 87, 87, 87, 91, 109, 109, 109, 111, 111, 111, 111, 111,
+ 123, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 0, 21, 21, 28, 42, 50, 63, 63, 66, 71, 85, 85, 85, 85, 87,
+ 92, 106, 106, 108, 114, 114, 114, 114, 114, 125, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 0, 20, 20, 34, 41, 54,
+ 62, 62, 69, 75, 82, 82, 82, 82, 92, 98, 105, 105, 110, 117, 117,
+ 117, 117, 117, 124, 124, 126, 126, 126, 126, 126, 126, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 0, 20, 20, 38, 40, 58, 60, 60, 73, 78, 80, 80,
+ 80, 80, 100, 105, 107, 107, 113, 118, 118, 118, 118, 118, 120, 120, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 0, 19, 21,
+ 38, 40, 58, 58, 60, 75, 77, 77, 77, 81, 81, 107, 109, 109, 109,
+ 114, 116, 116, 116, 116, 116, 116, 116, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 0, 18, 25, 37, 44, 56, 56, 63, 75,
+ 75, 75, 75, 88, 88, 111, 111, 111, 111, 112, 112, 112, 112, 112, 112,
+ 112, 114, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 0, 18, 30, 36, 48, 55, 55, 67, 73, 73, 73, 73, 97, 97, 110,
+ 110, 110, 110, 110, 110, 110, 110, 110, 110, 110, 116, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 0, 18, 34, 36, 52, 55,
+ 55, 70, 72, 73, 73, 73, 102, 104, 108, 108, 108, 108, 109, 109, 109,
+ 109, 109, 109, 109, 119, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 0, 17, 35, 35, 52, 59, 59, 70, 70, 76, 76, 76,
+ 99, 105, 105, 105, 105, 105, 111, 111, 111, 111, 111, 111, 111, 121, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 0, 17, 34,
+ 36, 51, 61, 62, 70, 70, 80, 80, 80, 93, 103, 103, 103, 103, 103,
+ 112, 112, 112, 112, 112, 116, 118, 124, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 0, 16, 33, 39, 50, 59, 65, 72, 72,
+ 82, 82, 82, 91, 100, 100, 100, 100, 100, 109, 109, 109, 109, 109, 121,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 0, 16, 32, 43, 48, 54, 66, 75, 75, 81, 83, 83, 92, 97, 97,
+ 97, 99, 99, 105, 105, 105, 105, 105, 123, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 0, 15, 31, 46, 47, 49,
+ 69, 77, 77, 81, 85, 85, 93, 95, 95, 95, 100, 100, 102, 102, 102,
+ 102, 102, 120, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 0, 15, 30, 46, 48, 48, 70, 75, 79, 82, 87, 87,
+ 92, 94, 94, 94, 103, 103, 103, 103, 103, 104, 104, 115, 120, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 0, 15, 30,
+ 45, 50, 50, 68, 70, 80, 85, 89, 89, 90, 95, 95, 95, 104, 104,
+ 104, 104, 104, 109, 109, 112, 114, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 0, 14, 29, 44, 54, 54, 64, 64, 83,
+ 87, 88, 88, 88, 98, 98, 98, 103, 103, 103, 103, 103, 113, 113, 113,
+ 113, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 0, 14, 29, 43, 56, 56, 61, 61, 84, 85, 88, 88, 88, 100, 100,
+ 100, 102, 102, 102, 102, 102, 113, 116, 116, 116, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 0, 14, 28, 42, 57, 57,
+ 62, 62, 80, 80, 91, 91, 91, 100, 100, 100, 100, 100, 100, 100, 100,
+ 109, 119, 119, 119, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 0, 14, 28, 42, 56, 56, 65, 66, 76, 76, 92, 92,
+ 92, 97, 97, 97, 101, 101, 101, 101, 101, 106, 121, 121, 121, 126, 126,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 0, 13, 27,
+ 41, 55, 55, 67, 72, 74, 74, 90, 90, 90, 91, 91, 91, 105, 105,
+ 105, 105, 105, 107, 122, 122, 122, 123, 123, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 0, 13, 27, 40, 54, 54, 67, 76, 76,
+ 76, 85, 85, 85, 85, 85, 85, 112, 112, 112, 112, 112, 112, 121, 121,
+ 121, 121, 121, 126, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+};
+
+} // namespace webrtc
+
+#endif // WEBRTC_MODULES_VIDEO_CODING_FEC_TABLES_XOR_H_
diff --git a/webrtc/modules/video_coding/frame_buffer.cc b/webrtc/modules/video_coding/frame_buffer.cc
new file mode 100644
index 0000000000..b6ddeda4e7
--- /dev/null
+++ b/webrtc/modules/video_coding/frame_buffer.cc
@@ -0,0 +1,270 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "webrtc/modules/video_coding/frame_buffer.h"
+
+#include <assert.h>
+#include <string.h>
+
+#include "webrtc/base/checks.h"
+#include "webrtc/base/logging.h"
+#include "webrtc/modules/video_coding/packet.h"
+
+namespace webrtc {
+
+VCMFrameBuffer::VCMFrameBuffer()
+ : _state(kStateEmpty), _nackCount(0), _latestPacketTimeMs(-1) {}
+
+VCMFrameBuffer::~VCMFrameBuffer() {}
+
+VCMFrameBuffer::VCMFrameBuffer(const VCMFrameBuffer& rhs)
+ : VCMEncodedFrame(rhs),
+ _state(rhs._state),
+ _sessionInfo(),
+ _nackCount(rhs._nackCount),
+ _latestPacketTimeMs(rhs._latestPacketTimeMs) {
+ _sessionInfo = rhs._sessionInfo;
+ _sessionInfo.UpdateDataPointers(rhs._buffer, _buffer);
+}
+
+webrtc::FrameType VCMFrameBuffer::FrameType() const {
+ return _sessionInfo.FrameType();
+}
+
+int32_t VCMFrameBuffer::GetLowSeqNum() const {
+ return _sessionInfo.LowSequenceNumber();
+}
+
+int32_t VCMFrameBuffer::GetHighSeqNum() const {
+ return _sessionInfo.HighSequenceNumber();
+}
+
+int VCMFrameBuffer::PictureId() const {
+ return _sessionInfo.PictureId();
+}
+
+int VCMFrameBuffer::TemporalId() const {
+ return _sessionInfo.TemporalId();
+}
+
+bool VCMFrameBuffer::LayerSync() const {
+ return _sessionInfo.LayerSync();
+}
+
+int VCMFrameBuffer::Tl0PicId() const {
+ return _sessionInfo.Tl0PicId();
+}
+
+bool VCMFrameBuffer::NonReference() const {
+ return _sessionInfo.NonReference();
+}
+
+void VCMFrameBuffer::SetGofInfo(const GofInfoVP9& gof_info, size_t idx) {
+ _sessionInfo.SetGofInfo(gof_info, idx);
+ // TODO(asapersson): Consider adding hdr->VP9.ref_picture_id for testing.
+ _codecSpecificInfo.codecSpecific.VP9.temporal_idx =
+ gof_info.temporal_idx[idx];
+ _codecSpecificInfo.codecSpecific.VP9.temporal_up_switch =
+ gof_info.temporal_up_switch[idx];
+}
+
+bool VCMFrameBuffer::IsSessionComplete() const {
+ return _sessionInfo.complete();
+}
+
+// Insert packet
+VCMFrameBufferEnum VCMFrameBuffer::InsertPacket(
+ const VCMPacket& packet,
+ int64_t timeInMs,
+ VCMDecodeErrorMode decode_error_mode,
+ const FrameData& frame_data) {
+ assert(!(NULL == packet.dataPtr && packet.sizeBytes > 0));
+ if (packet.dataPtr != NULL) {
+ _payloadType = packet.payloadType;
+ }
+
+ if (kStateEmpty == _state) {
+ // First packet (empty and/or media) inserted into this frame.
+ // store some info and set some initial values.
+ _timeStamp = packet.timestamp;
+ // We only take the ntp timestamp of the first packet of a frame.
+ ntp_time_ms_ = packet.ntp_time_ms_;
+ _codec = packet.codec;
+ if (packet.frameType != kEmptyFrame) {
+ // first media packet
+ SetState(kStateIncomplete);
+ }
+ }
+
+ uint32_t requiredSizeBytes =
+ Length() + packet.sizeBytes +
+ (packet.insertStartCode ? kH264StartCodeLengthBytes : 0);
+ if (requiredSizeBytes >= _size) {
+ const uint8_t* prevBuffer = _buffer;
+ const uint32_t increments =
+ requiredSizeBytes / kBufferIncStepSizeBytes +
+ (requiredSizeBytes % kBufferIncStepSizeBytes > 0);
+ const uint32_t newSize = _size + increments * kBufferIncStepSizeBytes;
+ if (newSize > kMaxJBFrameSizeBytes) {
+ LOG(LS_ERROR) << "Failed to insert packet due to frame being too "
+ "big.";
+ return kSizeError;
+ }
+ VerifyAndAllocate(newSize);
+ _sessionInfo.UpdateDataPointers(prevBuffer, _buffer);
+ }
+
+ if (packet.width > 0 && packet.height > 0) {
+ _encodedWidth = packet.width;
+ _encodedHeight = packet.height;
+ }
+
+ // Don't copy payload specific data for empty packets (e.g padding packets).
+ if (packet.sizeBytes > 0)
+ CopyCodecSpecific(&packet.codecSpecificHeader);
+
+ int retVal =
+ _sessionInfo.InsertPacket(packet, _buffer, decode_error_mode, frame_data);
+ if (retVal == -1) {
+ return kSizeError;
+ } else if (retVal == -2) {
+ return kDuplicatePacket;
+ } else if (retVal == -3) {
+ return kOutOfBoundsPacket;
+ }
+ // update length
+ _length = Length() + static_cast<uint32_t>(retVal);
+
+ _latestPacketTimeMs = timeInMs;
+
+ // http://www.etsi.org/deliver/etsi_ts/126100_126199/126114/12.07.00_60/
+ // ts_126114v120700p.pdf Section 7.4.5.
+ // The MTSI client shall add the payload bytes as defined in this clause
+ // onto the last RTP packet in each group of packets which make up a key
+ // frame (I-frame or IDR frame in H.264 (AVC), or an IRAP picture in H.265
+ // (HEVC)).
+ if (packet.markerBit) {
+ RTC_DCHECK(!_rotation_set);
+ _rotation = packet.codecSpecificHeader.rotation;
+ _rotation_set = true;
+ }
+
+ if (_sessionInfo.complete()) {
+ SetState(kStateComplete);
+ return kCompleteSession;
+ } else if (_sessionInfo.decodable()) {
+ SetState(kStateDecodable);
+ return kDecodableSession;
+ }
+ return kIncomplete;
+}
+
+int64_t VCMFrameBuffer::LatestPacketTimeMs() const {
+ return _latestPacketTimeMs;
+}
+
+void VCMFrameBuffer::IncrementNackCount() {
+ _nackCount++;
+}
+
+int16_t VCMFrameBuffer::GetNackCount() const {
+ return _nackCount;
+}
+
+bool VCMFrameBuffer::HaveFirstPacket() const {
+ return _sessionInfo.HaveFirstPacket();
+}
+
+bool VCMFrameBuffer::HaveLastPacket() const {
+ return _sessionInfo.HaveLastPacket();
+}
+
+int VCMFrameBuffer::NumPackets() const {
+ return _sessionInfo.NumPackets();
+}
+
+void VCMFrameBuffer::Reset() {
+ _length = 0;
+ _timeStamp = 0;
+ _sessionInfo.Reset();
+ _payloadType = 0;
+ _nackCount = 0;
+ _latestPacketTimeMs = -1;
+ _state = kStateEmpty;
+ VCMEncodedFrame::Reset();
+}
+
+// Set state of frame
+void VCMFrameBuffer::SetState(VCMFrameBufferStateEnum state) {
+ if (_state == state) {
+ return;
+ }
+ switch (state) {
+ case kStateIncomplete:
+ // we can go to this state from state kStateEmpty
+ assert(_state == kStateEmpty);
+
+ // Do nothing, we received a packet
+ break;
+
+ case kStateComplete:
+ assert(_state == kStateEmpty || _state == kStateIncomplete ||
+ _state == kStateDecodable);
+
+ break;
+
+ case kStateEmpty:
+ // Should only be set to empty through Reset().
+ assert(false);
+ break;
+
+ case kStateDecodable:
+ assert(_state == kStateEmpty || _state == kStateIncomplete);
+ break;
+ }
+ _state = state;
+}
+
+// Get current state of frame
+VCMFrameBufferStateEnum VCMFrameBuffer::GetState() const {
+ return _state;
+}
+
+// Get current state of frame
+VCMFrameBufferStateEnum VCMFrameBuffer::GetState(uint32_t& timeStamp) const {
+ timeStamp = TimeStamp();
+ return GetState();
+}
+
+bool VCMFrameBuffer::IsRetransmitted() const {
+ return _sessionInfo.session_nack();
+}
+
+void VCMFrameBuffer::PrepareForDecode(bool continuous) {
+#ifdef INDEPENDENT_PARTITIONS
+ if (_codec == kVideoCodecVP8) {
+ _length = _sessionInfo.BuildVP8FragmentationHeader(_buffer, _length,
+ &_fragmentation);
+ } else {
+ size_t bytes_removed = _sessionInfo.MakeDecodable();
+ _length -= bytes_removed;
+ }
+#else
+ size_t bytes_removed = _sessionInfo.MakeDecodable();
+ _length -= bytes_removed;
+#endif
+ // Transfer frame information to EncodedFrame and create any codec
+ // specific information.
+ _frameType = _sessionInfo.FrameType();
+ _completeFrame = _sessionInfo.complete();
+ _missingFrame = !continuous;
+}
+
+} // namespace webrtc
diff --git a/webrtc/modules/video_coding/main/source/frame_buffer.h b/webrtc/modules/video_coding/frame_buffer.h
index ab4ff6574e..f5a707efe4 100644
--- a/webrtc/modules/video_coding/main/source/frame_buffer.h
+++ b/webrtc/modules/video_coding/frame_buffer.h
@@ -8,14 +8,14 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#ifndef WEBRTC_MODULES_VIDEO_CODING_MAIN_SOURCE_FRAME_BUFFER_H_
-#define WEBRTC_MODULES_VIDEO_CODING_MAIN_SOURCE_FRAME_BUFFER_H_
-
-#include "webrtc/modules/interface/module_common_types.h"
-#include "webrtc/modules/video_coding/main/interface/video_coding.h"
-#include "webrtc/modules/video_coding/main/source/encoded_frame.h"
-#include "webrtc/modules/video_coding/main/source/jitter_buffer_common.h"
-#include "webrtc/modules/video_coding/main/source/session_info.h"
+#ifndef WEBRTC_MODULES_VIDEO_CODING_FRAME_BUFFER_H_
+#define WEBRTC_MODULES_VIDEO_CODING_FRAME_BUFFER_H_
+
+#include "webrtc/modules/include/module_common_types.h"
+#include "webrtc/modules/video_coding/include/video_coding.h"
+#include "webrtc/modules/video_coding/encoded_frame.h"
+#include "webrtc/modules/video_coding/jitter_buffer_common.h"
+#include "webrtc/modules/video_coding/session_info.h"
#include "webrtc/typedefs.h"
namespace webrtc {
@@ -89,4 +89,4 @@ class VCMFrameBuffer : public VCMEncodedFrame {
} // namespace webrtc
-#endif // WEBRTC_MODULES_VIDEO_CODING_MAIN_SOURCE_FRAME_BUFFER_H_
+#endif // WEBRTC_MODULES_VIDEO_CODING_FRAME_BUFFER_H_
diff --git a/webrtc/modules/video_coding/generic_decoder.cc b/webrtc/modules/video_coding/generic_decoder.cc
new file mode 100644
index 0000000000..5cbe0f5ba0
--- /dev/null
+++ b/webrtc/modules/video_coding/generic_decoder.cc
@@ -0,0 +1,192 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "webrtc/base/logging.h"
+#include "webrtc/base/trace_event.h"
+#include "webrtc/modules/video_coding/include/video_coding.h"
+#include "webrtc/modules/video_coding/generic_decoder.h"
+#include "webrtc/modules/video_coding/internal_defines.h"
+#include "webrtc/system_wrappers/include/clock.h"
+
+namespace webrtc {
+
+VCMDecodedFrameCallback::VCMDecodedFrameCallback(VCMTiming* timing,
+ Clock* clock)
+ : _critSect(CriticalSectionWrapper::CreateCriticalSection()),
+ _clock(clock),
+ _receiveCallback(NULL),
+ _timing(timing),
+ _timestampMap(kDecoderFrameMemoryLength),
+ _lastReceivedPictureID(0) {}
+
+VCMDecodedFrameCallback::~VCMDecodedFrameCallback() {
+ delete _critSect;
+}
+
+void VCMDecodedFrameCallback::SetUserReceiveCallback(
+ VCMReceiveCallback* receiveCallback) {
+ CriticalSectionScoped cs(_critSect);
+ _receiveCallback = receiveCallback;
+}
+
+VCMReceiveCallback* VCMDecodedFrameCallback::UserReceiveCallback() {
+ CriticalSectionScoped cs(_critSect);
+ return _receiveCallback;
+}
+
+int32_t VCMDecodedFrameCallback::Decoded(VideoFrame& decodedImage) {
+ return Decoded(decodedImage, -1);
+}
+
+int32_t VCMDecodedFrameCallback::Decoded(VideoFrame& decodedImage,
+ int64_t decode_time_ms) {
+ TRACE_EVENT_INSTANT1("webrtc", "VCMDecodedFrameCallback::Decoded",
+ "timestamp", decodedImage.timestamp());
+ // TODO(holmer): We should improve this so that we can handle multiple
+ // callbacks from one call to Decode().
+ VCMFrameInformation* frameInfo;
+ VCMReceiveCallback* callback;
+ {
+ CriticalSectionScoped cs(_critSect);
+ frameInfo = _timestampMap.Pop(decodedImage.timestamp());
+ callback = _receiveCallback;
+ }
+
+ if (frameInfo == NULL) {
+ LOG(LS_WARNING) << "Too many frames backed up in the decoder, dropping "
+ "this one.";
+ return WEBRTC_VIDEO_CODEC_OK;
+ }
+
+ const int64_t now_ms = _clock->TimeInMilliseconds();
+ if (decode_time_ms < 0) {
+ decode_time_ms =
+ static_cast<int32_t>(now_ms - frameInfo->decodeStartTimeMs);
+ }
+ _timing->StopDecodeTimer(decodedImage.timestamp(), decode_time_ms, now_ms,
+ frameInfo->renderTimeMs);
+
+ if (callback != NULL) {
+ decodedImage.set_render_time_ms(frameInfo->renderTimeMs);
+ decodedImage.set_rotation(frameInfo->rotation);
+ callback->FrameToRender(decodedImage);
+ }
+ return WEBRTC_VIDEO_CODEC_OK;
+}
+
+int32_t VCMDecodedFrameCallback::ReceivedDecodedReferenceFrame(
+ const uint64_t pictureId) {
+ CriticalSectionScoped cs(_critSect);
+ if (_receiveCallback != NULL) {
+ return _receiveCallback->ReceivedDecodedReferenceFrame(pictureId);
+ }
+ return -1;
+}
+
+int32_t VCMDecodedFrameCallback::ReceivedDecodedFrame(
+ const uint64_t pictureId) {
+ _lastReceivedPictureID = pictureId;
+ return 0;
+}
+
+uint64_t VCMDecodedFrameCallback::LastReceivedPictureID() const {
+ return _lastReceivedPictureID;
+}
+
+void VCMDecodedFrameCallback::OnDecoderImplementationName(
+ const char* implementation_name) {
+ CriticalSectionScoped cs(_critSect);
+ if (_receiveCallback)
+ _receiveCallback->OnDecoderImplementationName(implementation_name);
+}
+
+void VCMDecodedFrameCallback::Map(uint32_t timestamp,
+ VCMFrameInformation* frameInfo) {
+ CriticalSectionScoped cs(_critSect);
+ _timestampMap.Add(timestamp, frameInfo);
+}
+
+int32_t VCMDecodedFrameCallback::Pop(uint32_t timestamp) {
+ CriticalSectionScoped cs(_critSect);
+ if (_timestampMap.Pop(timestamp) == NULL) {
+ return VCM_GENERAL_ERROR;
+ }
+ return VCM_OK;
+}
+
+VCMGenericDecoder::VCMGenericDecoder(VideoDecoder* decoder, bool isExternal)
+ : _callback(NULL),
+ _frameInfos(),
+ _nextFrameInfoIdx(0),
+ _decoder(decoder),
+ _codecType(kVideoCodecUnknown),
+ _isExternal(isExternal),
+ _keyFrameDecoded(false) {}
+
+VCMGenericDecoder::~VCMGenericDecoder() {}
+
+int32_t VCMGenericDecoder::InitDecode(const VideoCodec* settings,
+ int32_t numberOfCores) {
+ TRACE_EVENT0("webrtc", "VCMGenericDecoder::InitDecode");
+ _codecType = settings->codecType;
+
+ return _decoder->InitDecode(settings, numberOfCores);
+}
+
+int32_t VCMGenericDecoder::Decode(const VCMEncodedFrame& frame, int64_t nowMs) {
+ TRACE_EVENT1("webrtc", "VCMGenericDecoder::Decode", "timestamp",
+ frame.EncodedImage()._timeStamp);
+ _frameInfos[_nextFrameInfoIdx].decodeStartTimeMs = nowMs;
+ _frameInfos[_nextFrameInfoIdx].renderTimeMs = frame.RenderTimeMs();
+ _frameInfos[_nextFrameInfoIdx].rotation = frame.rotation();
+ _callback->Map(frame.TimeStamp(), &_frameInfos[_nextFrameInfoIdx]);
+
+ _nextFrameInfoIdx = (_nextFrameInfoIdx + 1) % kDecoderFrameMemoryLength;
+ int32_t ret = _decoder->Decode(frame.EncodedImage(), frame.MissingFrame(),
+ frame.FragmentationHeader(),
+ frame.CodecSpecific(), frame.RenderTimeMs());
+
+ _callback->OnDecoderImplementationName(_decoder->ImplementationName());
+ if (ret < WEBRTC_VIDEO_CODEC_OK) {
+ LOG(LS_WARNING) << "Failed to decode frame with timestamp "
+ << frame.TimeStamp() << ", error code: " << ret;
+ _callback->Pop(frame.TimeStamp());
+ return ret;
+ } else if (ret == WEBRTC_VIDEO_CODEC_NO_OUTPUT ||
+ ret == WEBRTC_VIDEO_CODEC_REQUEST_SLI) {
+ // No output
+ _callback->Pop(frame.TimeStamp());
+ }
+ return ret;
+}
+
+int32_t VCMGenericDecoder::Release() {
+ return _decoder->Release();
+}
+
+int32_t VCMGenericDecoder::Reset() {
+ return _decoder->Reset();
+}
+
+int32_t VCMGenericDecoder::RegisterDecodeCompleteCallback(
+ VCMDecodedFrameCallback* callback) {
+ _callback = callback;
+ return _decoder->RegisterDecodeCompleteCallback(callback);
+}
+
+bool VCMGenericDecoder::External() const {
+ return _isExternal;
+}
+
+bool VCMGenericDecoder::PrefersLateDecoding() const {
+ return _decoder->PrefersLateDecoding();
+}
+
+} // namespace webrtc
diff --git a/webrtc/modules/video_coding/generic_decoder.h b/webrtc/modules/video_coding/generic_decoder.h
new file mode 100644
index 0000000000..67ceabfc53
--- /dev/null
+++ b/webrtc/modules/video_coding/generic_decoder.h
@@ -0,0 +1,111 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef WEBRTC_MODULES_VIDEO_CODING_GENERIC_DECODER_H_
+#define WEBRTC_MODULES_VIDEO_CODING_GENERIC_DECODER_H_
+
+#include "webrtc/modules/include/module_common_types.h"
+#include "webrtc/modules/video_coding/include/video_codec_interface.h"
+#include "webrtc/modules/video_coding/encoded_frame.h"
+#include "webrtc/modules/video_coding/timestamp_map.h"
+#include "webrtc/modules/video_coding/timing.h"
+
+namespace webrtc {
+
+class VCMReceiveCallback;
+
+enum { kDecoderFrameMemoryLength = 10 };
+
+struct VCMFrameInformation {
+ int64_t renderTimeMs;
+ int64_t decodeStartTimeMs;
+ void* userData;
+ VideoRotation rotation;
+};
+
+class VCMDecodedFrameCallback : public DecodedImageCallback {
+ public:
+ VCMDecodedFrameCallback(VCMTiming* timing, Clock* clock);
+ virtual ~VCMDecodedFrameCallback();
+ void SetUserReceiveCallback(VCMReceiveCallback* receiveCallback);
+ VCMReceiveCallback* UserReceiveCallback();
+
+ virtual int32_t Decoded(VideoFrame& decodedImage); // NOLINT
+ virtual int32_t Decoded(VideoFrame& decodedImage, // NOLINT
+ int64_t decode_time_ms);
+ virtual int32_t ReceivedDecodedReferenceFrame(const uint64_t pictureId);
+ virtual int32_t ReceivedDecodedFrame(const uint64_t pictureId);
+
+ uint64_t LastReceivedPictureID() const;
+ void OnDecoderImplementationName(const char* implementation_name);
+
+ void Map(uint32_t timestamp, VCMFrameInformation* frameInfo);
+ int32_t Pop(uint32_t timestamp);
+
+ private:
+ // Protect |_receiveCallback| and |_timestampMap|.
+ CriticalSectionWrapper* _critSect;
+ Clock* _clock;
+ VCMReceiveCallback* _receiveCallback GUARDED_BY(_critSect);
+ VCMTiming* _timing;
+ VCMTimestampMap _timestampMap GUARDED_BY(_critSect);
+ uint64_t _lastReceivedPictureID;
+};
+
+class VCMGenericDecoder {
+ friend class VCMCodecDataBase;
+
+ public:
+ explicit VCMGenericDecoder(VideoDecoder* decoder, bool isExternal = false);
+ ~VCMGenericDecoder();
+
+ /**
+ * Initialize the decoder with the information from the VideoCodec
+ */
+ int32_t InitDecode(const VideoCodec* settings, int32_t numberOfCores);
+
+ /**
+ * Decode to a raw I420 frame,
+ *
+ * inputVideoBuffer reference to encoded video frame
+ */
+ int32_t Decode(const VCMEncodedFrame& inputFrame, int64_t nowMs);
+
+ /**
+ * Free the decoder memory
+ */
+ int32_t Release();
+
+ /**
+ * Reset the decoder state, prepare for a new call
+ */
+ int32_t Reset();
+
+ /**
+ * Set decode callback. Deregistering while decoding is illegal.
+ */
+ int32_t RegisterDecodeCompleteCallback(VCMDecodedFrameCallback* callback);
+
+ bool External() const;
+ bool PrefersLateDecoding() const;
+
+ private:
+ VCMDecodedFrameCallback* _callback;
+ VCMFrameInformation _frameInfos[kDecoderFrameMemoryLength];
+ uint32_t _nextFrameInfoIdx;
+ VideoDecoder* const _decoder;
+ VideoCodecType _codecType;
+ bool _isExternal;
+ bool _keyFrameDecoded;
+};
+
+} // namespace webrtc
+
+#endif // WEBRTC_MODULES_VIDEO_CODING_GENERIC_DECODER_H_
diff --git a/webrtc/modules/video_coding/main/source/generic_encoder.cc b/webrtc/modules/video_coding/generic_encoder.cc
index de196040f0..c7444ce99f 100644
--- a/webrtc/modules/video_coding/main/source/generic_encoder.cc
+++ b/webrtc/modules/video_coding/generic_encoder.cc
@@ -8,13 +8,17 @@
* be found in the AUTHORS file in the root of the source tree.
*/
+#include "webrtc/modules/video_coding/generic_encoder.h"
+
+#include <vector>
+
#include "webrtc/base/checks.h"
+#include "webrtc/base/logging.h"
+#include "webrtc/base/trace_event.h"
#include "webrtc/engine_configurations.h"
-#include "webrtc/modules/video_coding/main/source/encoded_frame.h"
-#include "webrtc/modules/video_coding/main/source/generic_encoder.h"
-#include "webrtc/modules/video_coding/main/source/media_optimization.h"
+#include "webrtc/modules/video_coding/encoded_frame.h"
+#include "webrtc/modules/video_coding/media_optimization.h"
#include "webrtc/system_wrappers/include/critical_section_wrapper.h"
-#include "webrtc/system_wrappers/include/logging.h"
namespace webrtc {
namespace {
@@ -27,8 +31,7 @@ void CopyCodecSpecific(const CodecSpecificInfo* info, RTPVideoHeader* rtp) {
rtp->codec = kRtpVideoVp8;
rtp->codecHeader.VP8.InitRTPVideoHeaderVP8();
rtp->codecHeader.VP8.pictureId = info->codecSpecific.VP8.pictureId;
- rtp->codecHeader.VP8.nonReference =
- info->codecSpecific.VP8.nonReference;
+ rtp->codecHeader.VP8.nonReference = info->codecSpecific.VP8.nonReference;
rtp->codecHeader.VP8.temporalIdx = info->codecSpecific.VP8.temporalIdx;
rtp->codecHeader.VP8.layerSync = info->codecSpecific.VP8.layerSync;
rtp->codecHeader.VP8.tl0PicIdx = info->codecSpecific.VP8.tl0PicIdx;
@@ -54,11 +57,9 @@ void CopyCodecSpecific(const CodecSpecificInfo* info, RTPVideoHeader* rtp) {
rtp->codecHeader.VP9.inter_layer_predicted =
info->codecSpecific.VP9.inter_layer_predicted;
rtp->codecHeader.VP9.gof_idx = info->codecSpecific.VP9.gof_idx;
-
- // Packetizer needs to know the number of spatial layers to correctly set
- // the marker bit, even when the number won't be written in the packet.
rtp->codecHeader.VP9.num_spatial_layers =
info->codecSpecific.VP9.num_spatial_layers;
+
if (info->codecSpecific.VP9.ss_data_available) {
rtp->codecHeader.VP9.spatial_layer_resolution_present =
info->codecSpecific.VP9.spatial_layer_resolution_present;
@@ -71,6 +72,10 @@ void CopyCodecSpecific(const CodecSpecificInfo* info, RTPVideoHeader* rtp) {
}
rtp->codecHeader.VP9.gof.CopyGofInfoVP9(info->codecSpecific.VP9.gof);
}
+
+ rtp->codecHeader.VP9.num_ref_pics = info->codecSpecific.VP9.num_ref_pics;
+ for (int i = 0; i < info->codecSpecific.VP9.num_ref_pics; ++i)
+ rtp->codecHeader.VP9.pid_diff[i] = info->codecSpecific.VP9.p_diff[i];
return;
}
case kVideoCodecH264:
@@ -86,7 +91,7 @@ void CopyCodecSpecific(const CodecSpecificInfo* info, RTPVideoHeader* rtp) {
}
} // namespace
-//#define DEBUG_ENCODER_BIT_STREAM
+// #define DEBUG_ENCODER_BIT_STREAM
VCMGenericEncoder::VCMGenericEncoder(
VideoEncoder* encoder,
@@ -110,6 +115,7 @@ int32_t VCMGenericEncoder::Release() {
int32_t VCMGenericEncoder::InitEncode(const VideoCodec* settings,
int32_t numberOfCores,
size_t maxPayloadSize) {
+ TRACE_EVENT0("webrtc", "VCMGenericEncoder::InitEncode");
{
rtc::CritScope lock(&params_lock_);
encoder_params_.target_bitrate = settings->startBitrate * 1000;
@@ -130,6 +136,9 @@ int32_t VCMGenericEncoder::InitEncode(const VideoCodec* settings,
int32_t VCMGenericEncoder::Encode(const VideoFrame& inputFrame,
const CodecSpecificInfo* codecSpecificInfo,
const std::vector<FrameType>& frameTypes) {
+ TRACE_EVENT1("webrtc", "VCMGenericEncoder::Encode", "timestamp",
+ inputFrame.timestamp());
+
for (FrameType frame_type : frameTypes)
RTC_DCHECK(frame_type == kVideoFrameKey || frame_type == kVideoFrameDelta);
@@ -143,6 +152,12 @@ int32_t VCMGenericEncoder::Encode(const VideoFrame& inputFrame,
vcm_encoded_frame_callback_->SetRotation(rotation_);
int32_t result = encoder_->Encode(inputFrame, codecSpecificInfo, &frameTypes);
+
+ if (vcm_encoded_frame_callback_) {
+ vcm_encoded_frame_callback_->SignalLastEncoderImplementationUsed(
+ encoder_->ImplementationName());
+ }
+
if (is_screenshare_ &&
result == WEBRTC_VIDEO_CODEC_TARGET_BITRATE_OVERSHOOT) {
// Target bitrate exceeded, encoder state has been reset - try again.
@@ -182,10 +197,8 @@ EncoderParameters VCMGenericEncoder::GetEncoderParameters() const {
return encoder_params_;
}
-int32_t
-VCMGenericEncoder::SetPeriodicKeyFrames(bool enable)
-{
- return encoder_->SetPeriodicKeyFrames(enable);
+int32_t VCMGenericEncoder::SetPeriodicKeyFrames(bool enable) {
+ return encoder_->SetPeriodicKeyFrames(enable);
}
int32_t VCMGenericEncoder::RequestFrame(
@@ -194,10 +207,8 @@ int32_t VCMGenericEncoder::RequestFrame(
return encoder_->Encode(image, NULL, &frame_types);
}
-bool
-VCMGenericEncoder::InternalSource() const
-{
- return internal_source_;
+bool VCMGenericEncoder::InternalSource() const {
+ return internal_source_;
}
void VCMGenericEncoder::OnDroppedFrame() {
@@ -212,12 +223,12 @@ int VCMGenericEncoder::GetTargetFramerate() {
return encoder_->GetTargetFramerate();
}
- /***************************
- * Callback Implementation
- ***************************/
+/***************************
+ * Callback Implementation
+ ***************************/
VCMEncodedFrameCallback::VCMEncodedFrameCallback(
EncodedImageCallback* post_encode_callback)
- : _sendCallback(),
+ : send_callback_(),
_mediaOpt(NULL),
_payloadType(0),
_internalSource(false),
@@ -229,39 +240,37 @@ VCMEncodedFrameCallback::VCMEncodedFrameCallback(
#endif
{
#ifdef DEBUG_ENCODER_BIT_STREAM
- _bitStreamAfterEncoder = fopen("encoderBitStream.bit", "wb");
+ _bitStreamAfterEncoder = fopen("encoderBitStream.bit", "wb");
#endif
}
-VCMEncodedFrameCallback::~VCMEncodedFrameCallback()
-{
+VCMEncodedFrameCallback::~VCMEncodedFrameCallback() {
#ifdef DEBUG_ENCODER_BIT_STREAM
- fclose(_bitStreamAfterEncoder);
+ fclose(_bitStreamAfterEncoder);
#endif
}
-int32_t
-VCMEncodedFrameCallback::SetTransportCallback(VCMPacketizationCallback* transport)
-{
- _sendCallback = transport;
- return VCM_OK;
+int32_t VCMEncodedFrameCallback::SetTransportCallback(
+ VCMPacketizationCallback* transport) {
+ send_callback_ = transport;
+ return VCM_OK;
}
int32_t VCMEncodedFrameCallback::Encoded(
- const EncodedImage& encodedImage,
+ const EncodedImage& encoded_image,
const CodecSpecificInfo* codecSpecificInfo,
const RTPFragmentationHeader* fragmentationHeader) {
- RTC_DCHECK(encodedImage._frameType == kVideoFrameKey ||
- encodedImage._frameType == kVideoFrameDelta);
- post_encode_callback_->Encoded(encodedImage, NULL, NULL);
+ TRACE_EVENT_INSTANT1("webrtc", "VCMEncodedFrameCallback::Encoded",
+ "timestamp", encoded_image._timeStamp);
+ post_encode_callback_->Encoded(encoded_image, NULL, NULL);
- if (_sendCallback == NULL) {
+ if (send_callback_ == NULL) {
return VCM_UNINITIALIZED;
}
#ifdef DEBUG_ENCODER_BIT_STREAM
if (_bitStreamAfterEncoder != NULL) {
- fwrite(encodedImage._buffer, 1, encodedImage._length,
+ fwrite(encoded_image._buffer, 1, encoded_image._length,
_bitStreamAfterEncoder);
}
#endif
@@ -274,25 +283,29 @@ int32_t VCMEncodedFrameCallback::Encoded(
}
rtpVideoHeader.rotation = _rotation;
- int32_t callbackReturn = _sendCallback->SendData(
- _payloadType, encodedImage, *fragmentationHeader, rtpVideoHeaderPtr);
+ int32_t callbackReturn = send_callback_->SendData(
+ _payloadType, encoded_image, *fragmentationHeader, rtpVideoHeaderPtr);
if (callbackReturn < 0) {
return callbackReturn;
}
if (_mediaOpt != NULL) {
- _mediaOpt->UpdateWithEncodedData(encodedImage);
+ _mediaOpt->UpdateWithEncodedData(encoded_image);
if (_internalSource)
return _mediaOpt->DropFrame(); // Signal to encoder to drop next frame.
}
return VCM_OK;
}
-void
-VCMEncodedFrameCallback::SetMediaOpt(
- media_optimization::MediaOptimization *mediaOpt)
-{
- _mediaOpt = mediaOpt;
+void VCMEncodedFrameCallback::SetMediaOpt(
+ media_optimization::MediaOptimization* mediaOpt) {
+ _mediaOpt = mediaOpt;
+}
+
+void VCMEncodedFrameCallback::SignalLastEncoderImplementationUsed(
+ const char* implementation_name) {
+ if (send_callback_)
+ send_callback_->OnEncoderImplementationName(implementation_name);
}
} // namespace webrtc
diff --git a/webrtc/modules/video_coding/generic_encoder.h b/webrtc/modules/video_coding/generic_encoder.h
new file mode 100644
index 0000000000..f739edb44f
--- /dev/null
+++ b/webrtc/modules/video_coding/generic_encoder.h
@@ -0,0 +1,149 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef WEBRTC_MODULES_VIDEO_CODING_GENERIC_ENCODER_H_
+#define WEBRTC_MODULES_VIDEO_CODING_GENERIC_ENCODER_H_
+
+#include <stdio.h>
+#include <vector>
+
+#include "webrtc/modules/video_coding/include/video_codec_interface.h"
+#include "webrtc/modules/video_coding/include/video_coding_defines.h"
+
+#include "webrtc/base/criticalsection.h"
+#include "webrtc/base/scoped_ptr.h"
+
+namespace webrtc {
+class CriticalSectionWrapper;
+
+namespace media_optimization {
+class MediaOptimization;
+} // namespace media_optimization
+
+struct EncoderParameters {
+ uint32_t target_bitrate;
+ uint8_t loss_rate;
+ int64_t rtt;
+ uint32_t input_frame_rate;
+};
+
+/*************************************/
+/* VCMEncodeFrameCallback class */
+/***********************************/
+class VCMEncodedFrameCallback : public EncodedImageCallback {
+ public:
+ explicit VCMEncodedFrameCallback(
+ EncodedImageCallback* post_encode_callback);
+ virtual ~VCMEncodedFrameCallback();
+
+ /*
+ * Callback implementation - codec encode complete
+ */
+ int32_t Encoded(
+ const EncodedImage& encodedImage,
+ const CodecSpecificInfo* codecSpecificInfo = NULL,
+ const RTPFragmentationHeader* fragmentationHeader = NULL);
+ /*
+ * Callback implementation - generic encoder encode complete
+ */
+ int32_t SetTransportCallback(VCMPacketizationCallback* transport);
+ /**
+ * Set media Optimization
+ */
+ void SetMediaOpt(media_optimization::MediaOptimization* mediaOpt);
+
+ void SetPayloadType(uint8_t payloadType) {
+ _payloadType = payloadType;
+ }
+
+ void SetInternalSource(bool internalSource) {
+ _internalSource = internalSource;
+ }
+
+ void SetRotation(VideoRotation rotation) { _rotation = rotation; }
+ void SignalLastEncoderImplementationUsed(
+ const char* encoder_implementation_name);
+
+ private:
+ VCMPacketizationCallback* send_callback_;
+ media_optimization::MediaOptimization* _mediaOpt;
+ uint8_t _payloadType;
+ bool _internalSource;
+ VideoRotation _rotation;
+
+ EncodedImageCallback* post_encode_callback_;
+
+#ifdef DEBUG_ENCODER_BIT_STREAM
+ FILE* _bitStreamAfterEncoder;
+#endif
+}; // end of VCMEncodeFrameCallback class
+
+/******************************/
+/* VCMGenericEncoder class */
+/******************************/
+class VCMGenericEncoder {
+ friend class VCMCodecDataBase;
+
+ public:
+ VCMGenericEncoder(VideoEncoder* encoder,
+ VideoEncoderRateObserver* rate_observer,
+ VCMEncodedFrameCallback* encoded_frame_callback,
+ bool internalSource);
+ ~VCMGenericEncoder();
+ /**
+ * Free encoder memory
+ */
+ int32_t Release();
+ /**
+ * Initialize the encoder with the information from the VideoCodec
+ */
+ int32_t InitEncode(const VideoCodec* settings,
+ int32_t numberOfCores,
+ size_t maxPayloadSize);
+ /**
+ * Encode raw image
+ * inputFrame : Frame containing raw image
+ * codecSpecificInfo : Specific codec data
+ * cameraFrameRate : Request or information from the remote side
+ * frameType : The requested frame type to encode
+ */
+ int32_t Encode(const VideoFrame& inputFrame,
+ const CodecSpecificInfo* codecSpecificInfo,
+ const std::vector<FrameType>& frameTypes);
+
+ void SetEncoderParameters(const EncoderParameters& params);
+ EncoderParameters GetEncoderParameters() const;
+
+ int32_t SetPeriodicKeyFrames(bool enable);
+
+ int32_t RequestFrame(const std::vector<FrameType>& frame_types);
+
+ bool InternalSource() const;
+
+ void OnDroppedFrame();
+
+ bool SupportsNativeHandle() const;
+
+ int GetTargetFramerate();
+
+ private:
+ VideoEncoder* const encoder_;
+ VideoEncoderRateObserver* const rate_observer_;
+ VCMEncodedFrameCallback* const vcm_encoded_frame_callback_;
+ const bool internal_source_;
+ mutable rtc::CriticalSection params_lock_;
+ EncoderParameters encoder_params_ GUARDED_BY(params_lock_);
+ VideoRotation rotation_;
+ bool is_screenshare_;
+}; // end of VCMGenericEncoder class
+
+} // namespace webrtc
+
+#endif // WEBRTC_MODULES_VIDEO_CODING_GENERIC_ENCODER_H_
diff --git a/webrtc/modules/video_coding/main/interface/mock/mock_vcm_callbacks.h b/webrtc/modules/video_coding/include/mock/mock_vcm_callbacks.h
index 302d4a3a13..0185dae333 100644
--- a/webrtc/modules/video_coding/main/interface/mock/mock_vcm_callbacks.h
+++ b/webrtc/modules/video_coding/include/mock/mock_vcm_callbacks.h
@@ -8,11 +8,11 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#ifndef WEBRTC_MODULES_VIDEO_CODING_MAIN_INTERFACE_MOCK_MOCK_VCM_CALLBACKS_H_
-#define WEBRTC_MODULES_VIDEO_CODING_MAIN_INTERFACE_MOCK_MOCK_VCM_CALLBACKS_H_
+#ifndef WEBRTC_MODULES_VIDEO_CODING_INCLUDE_MOCK_MOCK_VCM_CALLBACKS_H_
+#define WEBRTC_MODULES_VIDEO_CODING_INCLUDE_MOCK_MOCK_VCM_CALLBACKS_H_
#include "testing/gmock/include/gmock/gmock.h"
-#include "webrtc/modules/video_coding/main/interface/video_coding_defines.h"
+#include "webrtc/modules/video_coding/include/video_coding_defines.h"
#include "webrtc/typedefs.h"
namespace webrtc {
@@ -20,16 +20,15 @@ namespace webrtc {
class MockVCMFrameTypeCallback : public VCMFrameTypeCallback {
public:
MOCK_METHOD0(RequestKeyFrame, int32_t());
- MOCK_METHOD1(SliceLossIndicationRequest,
- int32_t(const uint64_t pictureId));
+ MOCK_METHOD1(SliceLossIndicationRequest, int32_t(const uint64_t pictureId));
};
class MockPacketRequestCallback : public VCMPacketRequestCallback {
public:
- MOCK_METHOD2(ResendPackets, int32_t(const uint16_t* sequenceNumbers,
- uint16_t length));
+ MOCK_METHOD2(ResendPackets,
+ int32_t(const uint16_t* sequenceNumbers, uint16_t length));
};
} // namespace webrtc
-#endif // WEBRTC_MODULES_VIDEO_CODING_MAIN_INTERFACE_MOCK_MOCK_VCM_CALLBACKS_H_
+#endif // WEBRTC_MODULES_VIDEO_CODING_INCLUDE_MOCK_MOCK_VCM_CALLBACKS_H_
diff --git a/webrtc/modules/video_coding/include/mock/mock_video_codec_interface.h b/webrtc/modules/video_coding/include/mock/mock_video_codec_interface.h
new file mode 100644
index 0000000000..9cb4a83535
--- /dev/null
+++ b/webrtc/modules/video_coding/include/mock/mock_video_codec_interface.h
@@ -0,0 +1,81 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef WEBRTC_MODULES_VIDEO_CODING_INCLUDE_MOCK_MOCK_VIDEO_CODEC_INTERFACE_H_
+#define WEBRTC_MODULES_VIDEO_CODING_INCLUDE_MOCK_MOCK_VIDEO_CODEC_INTERFACE_H_
+
+#include <string>
+#include <vector>
+
+#include "testing/gmock/include/gmock/gmock.h"
+#include "webrtc/modules/video_coding/include/video_codec_interface.h"
+#include "webrtc/typedefs.h"
+
+namespace webrtc {
+
+class MockEncodedImageCallback : public EncodedImageCallback {
+ public:
+ MOCK_METHOD3(Encoded,
+ int32_t(const EncodedImage& encodedImage,
+ const CodecSpecificInfo* codecSpecificInfo,
+ const RTPFragmentationHeader* fragmentation));
+};
+
+class MockVideoEncoder : public VideoEncoder {
+ public:
+ MOCK_CONST_METHOD2(Version, int32_t(int8_t* version, int32_t length));
+ MOCK_METHOD3(InitEncode,
+ int32_t(const VideoCodec* codecSettings,
+ int32_t numberOfCores,
+ size_t maxPayloadSize));
+ MOCK_METHOD3(Encode,
+ int32_t(const VideoFrame& inputImage,
+ const CodecSpecificInfo* codecSpecificInfo,
+ const std::vector<FrameType>* frame_types));
+ MOCK_METHOD1(RegisterEncodeCompleteCallback,
+ int32_t(EncodedImageCallback* callback));
+ MOCK_METHOD0(Release, int32_t());
+ MOCK_METHOD0(Reset, int32_t());
+ MOCK_METHOD2(SetChannelParameters, int32_t(uint32_t packetLoss, int64_t rtt));
+ MOCK_METHOD2(SetRates, int32_t(uint32_t newBitRate, uint32_t frameRate));
+ MOCK_METHOD1(SetPeriodicKeyFrames, int32_t(bool enable));
+};
+
+class MockDecodedImageCallback : public DecodedImageCallback {
+ public:
+ MOCK_METHOD1(Decoded, int32_t(VideoFrame& decodedImage)); // NOLINT
+ MOCK_METHOD2(Decoded,
+ int32_t(VideoFrame& decodedImage, // NOLINT
+ int64_t decode_time_ms));
+ MOCK_METHOD1(ReceivedDecodedReferenceFrame,
+ int32_t(const uint64_t pictureId));
+ MOCK_METHOD1(ReceivedDecodedFrame, int32_t(const uint64_t pictureId));
+};
+
+class MockVideoDecoder : public VideoDecoder {
+ public:
+ MOCK_METHOD2(InitDecode,
+ int32_t(const VideoCodec* codecSettings, int32_t numberOfCores));
+ MOCK_METHOD5(Decode,
+ int32_t(const EncodedImage& inputImage,
+ bool missingFrames,
+ const RTPFragmentationHeader* fragmentation,
+ const CodecSpecificInfo* codecSpecificInfo,
+ int64_t renderTimeMs));
+ MOCK_METHOD1(RegisterDecodeCompleteCallback,
+ int32_t(DecodedImageCallback* callback));
+ MOCK_METHOD0(Release, int32_t());
+ MOCK_METHOD0(Reset, int32_t());
+ MOCK_METHOD0(Copy, VideoDecoder*());
+};
+
+} // namespace webrtc
+
+#endif // WEBRTC_MODULES_VIDEO_CODING_INCLUDE_MOCK_MOCK_VIDEO_CODEC_INTERFACE_H_
diff --git a/webrtc/modules/video_coding/include/video_codec_interface.h b/webrtc/modules/video_coding/include/video_codec_interface.h
new file mode 100644
index 0000000000..19303c0d67
--- /dev/null
+++ b/webrtc/modules/video_coding/include/video_codec_interface.h
@@ -0,0 +1,99 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef WEBRTC_MODULES_VIDEO_CODING_INCLUDE_VIDEO_CODEC_INTERFACE_H_
+#define WEBRTC_MODULES_VIDEO_CODING_INCLUDE_VIDEO_CODEC_INTERFACE_H_
+
+#include <vector>
+
+#include "webrtc/common_types.h"
+#include "webrtc/modules/include/module_common_types.h"
+#include "webrtc/modules/video_coding/include/video_error_codes.h"
+#include "webrtc/typedefs.h"
+#include "webrtc/video_decoder.h"
+#include "webrtc/video_encoder.h"
+#include "webrtc/video_frame.h"
+
+namespace webrtc {
+
+class RTPFragmentationHeader; // forward declaration
+
+// Note: if any pointers are added to this struct, it must be fitted
+// with a copy-constructor. See below.
+struct CodecSpecificInfoVP8 {
+ bool hasReceivedSLI;
+ uint8_t pictureIdSLI;
+ bool hasReceivedRPSI;
+ uint64_t pictureIdRPSI;
+ int16_t pictureId; // Negative value to skip pictureId.
+ bool nonReference;
+ uint8_t simulcastIdx;
+ uint8_t temporalIdx;
+ bool layerSync;
+ int tl0PicIdx; // Negative value to skip tl0PicIdx.
+ int8_t keyIdx; // Negative value to skip keyIdx.
+};
+
+struct CodecSpecificInfoVP9 {
+ bool has_received_sli;
+ uint8_t picture_id_sli;
+ bool has_received_rpsi;
+ uint64_t picture_id_rpsi;
+ int16_t picture_id; // Negative value to skip pictureId.
+
+ bool inter_pic_predicted; // This layer frame is dependent on previously
+ // coded frame(s).
+ bool flexible_mode;
+ bool ss_data_available;
+
+ int tl0_pic_idx; // Negative value to skip tl0PicIdx.
+ uint8_t temporal_idx;
+ uint8_t spatial_idx;
+ bool temporal_up_switch;
+ bool inter_layer_predicted; // Frame is dependent on directly lower spatial
+ // layer frame.
+ uint8_t gof_idx;
+
+ // SS data.
+ size_t num_spatial_layers; // Always populated.
+ bool spatial_layer_resolution_present;
+ uint16_t width[kMaxVp9NumberOfSpatialLayers];
+ uint16_t height[kMaxVp9NumberOfSpatialLayers];
+ GofInfoVP9 gof;
+
+ // Frame reference data.
+ uint8_t num_ref_pics;
+ uint8_t p_diff[kMaxVp9RefPics];
+};
+
+struct CodecSpecificInfoGeneric {
+ uint8_t simulcast_idx;
+};
+
+struct CodecSpecificInfoH264 {};
+
+union CodecSpecificInfoUnion {
+ CodecSpecificInfoGeneric generic;
+ CodecSpecificInfoVP8 VP8;
+ CodecSpecificInfoVP9 VP9;
+ CodecSpecificInfoH264 H264;
+};
+
+// Note: if any pointers are added to this struct or its sub-structs, it
+// must be fitted with a copy-constructor. This is because it is copied
+// in the copy-constructor of VCMEncodedFrame.
+struct CodecSpecificInfo {
+ VideoCodecType codecType;
+ CodecSpecificInfoUnion codecSpecific;
+};
+
+} // namespace webrtc
+
+#endif // WEBRTC_MODULES_VIDEO_CODING_INCLUDE_VIDEO_CODEC_INTERFACE_H_
diff --git a/webrtc/modules/video_coding/include/video_coding.h b/webrtc/modules/video_coding/include/video_coding.h
new file mode 100644
index 0000000000..c46896c823
--- /dev/null
+++ b/webrtc/modules/video_coding/include/video_coding.h
@@ -0,0 +1,519 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef WEBRTC_MODULES_VIDEO_CODING_INCLUDE_VIDEO_CODING_H_
+#define WEBRTC_MODULES_VIDEO_CODING_INCLUDE_VIDEO_CODING_H_
+
+#if defined(WEBRTC_WIN)
+// This is a workaround on Windows due to the fact that some Windows
+// headers define CreateEvent as a macro to either CreateEventW or CreateEventA.
+// This can cause problems since we use that name as well and could
+// declare them as one thing here whereas in another place a windows header
+// may have been included and then implementing CreateEvent() causes compilation
+// errors. So for consistency, we include the main windows header here.
+#include <windows.h>
+#endif
+
+#include "webrtc/modules/include/module.h"
+#include "webrtc/modules/include/module_common_types.h"
+#include "webrtc/modules/video_coding/include/video_coding_defines.h"
+#include "webrtc/system_wrappers/include/event_wrapper.h"
+#include "webrtc/video_frame.h"
+
+namespace webrtc {
+
+class Clock;
+class EncodedImageCallback;
+class VideoEncoder;
+class VideoDecoder;
+struct CodecSpecificInfo;
+
+class EventFactory {
+ public:
+ virtual ~EventFactory() {}
+
+ virtual EventWrapper* CreateEvent() = 0;
+};
+
+class EventFactoryImpl : public EventFactory {
+ public:
+ virtual ~EventFactoryImpl() {}
+
+ virtual EventWrapper* CreateEvent() { return EventWrapper::Create(); }
+};
+
+// Used to indicate which decode with errors mode should be used.
+enum VCMDecodeErrorMode {
+ kNoErrors, // Never decode with errors. Video will freeze
+ // if nack is disabled.
+ kSelectiveErrors, // Frames that are determined decodable in
+ // VCMSessionInfo may be decoded with missing
+ // packets. As not all incomplete frames will be
+ // decodable, video will freeze if nack is disabled.
+ kWithErrors // Release frames as needed. Errors may be
+ // introduced as some encoded frames may not be
+ // complete.
+};
+
+class VideoCodingModule : public Module {
+ public:
+ enum SenderNackMode { kNackNone, kNackAll, kNackSelective };
+
+ enum ReceiverRobustness { kNone, kHardNack, kSoftNack, kReferenceSelection };
+
+ static VideoCodingModule* Create(
+ Clock* clock,
+ VideoEncoderRateObserver* encoder_rate_observer,
+ VCMQMSettingsCallback* qm_settings_callback);
+
+ static VideoCodingModule* Create(Clock* clock, EventFactory* event_factory);
+
+ static void Destroy(VideoCodingModule* module);
+
+ // Get supported codec settings using codec type
+ //
+ // Input:
+ // - codecType : The codec type to get settings for
+ // - codec : Memory where the codec settings will be stored
+ //
+ // Return value : VCM_OK, on success
+ // VCM_PARAMETER_ERROR if codec not supported
+ static void Codec(VideoCodecType codecType, VideoCodec* codec);
+
+ /*
+ * Sender
+ */
+
+ // Registers a codec to be used for encoding. Calling this
+ // API multiple times overwrites any previously registered codecs.
+ //
+ // NOTE: Must be called on the thread that constructed the VCM instance.
+ //
+ // Input:
+ // - sendCodec : Settings for the codec to be registered.
+ // - numberOfCores : The number of cores the codec is allowed
+ // to use.
+ // - maxPayloadSize : The maximum size each payload is allowed
+ // to have. Usually MTU - overhead.
+ //
+ // Return value : VCM_OK, on success.
+ // < 0, on error.
+ virtual int32_t RegisterSendCodec(const VideoCodec* sendCodec,
+ uint32_t numberOfCores,
+ uint32_t maxPayloadSize) = 0;
+
+ // Register an external encoder object. This can not be used together with
+ // external decoder callbacks.
+ //
+ // Input:
+ // - externalEncoder : Encoder object to be used for encoding frames
+ // inserted
+ // with the AddVideoFrame API.
+ // - payloadType : The payload type bound which this encoder is bound
+ // to.
+ //
+ // Return value : VCM_OK, on success.
+ // < 0, on error.
+ // TODO(pbos): Remove return type when unused elsewhere.
+ virtual int32_t RegisterExternalEncoder(VideoEncoder* externalEncoder,
+ uint8_t payloadType,
+ bool internalSource = false) = 0;
+
+ // API to get currently configured encoder target bitrate in bits/s.
+ //
+ // Return value : 0, on success.
+ // < 0, on error.
+ virtual int Bitrate(unsigned int* bitrate) const = 0;
+
+ // API to get currently configured encoder target frame rate.
+ //
+ // Return value : 0, on success.
+ // < 0, on error.
+ virtual int FrameRate(unsigned int* framerate) const = 0;
+
+ // Sets the parameters describing the send channel. These parameters are
+ // inputs to the
+ // Media Optimization inside the VCM and also specifies the target bit rate
+ // for the
+ // encoder. Bit rate used by NACK should already be compensated for by the
+ // user.
+ //
+ // Input:
+ // - target_bitrate : The target bitrate for VCM in bits/s.
+ // - lossRate : Fractions of lost packets the past second.
+ // (loss rate in percent = 100 * packetLoss /
+ // 255)
+ // - rtt : Current round-trip time in ms.
+ //
+ // Return value : VCM_OK, on success.
+ // < 0, on error.
+ virtual int32_t SetChannelParameters(uint32_t target_bitrate,
+ uint8_t lossRate,
+ int64_t rtt) = 0;
+
+ // Sets the parameters describing the receive channel. These parameters are
+ // inputs to the
+ // Media Optimization inside the VCM.
+ //
+ // Input:
+ // - rtt : Current round-trip time in ms.
+ // with the most amount available bandwidth in
+ // a conference
+ // scenario
+ //
+ // Return value : VCM_OK, on success.
+ // < 0, on error.
+ virtual int32_t SetReceiveChannelParameters(int64_t rtt) = 0;
+
+ // Register a transport callback which will be called to deliver the encoded
+ // data and
+ // side information.
+ //
+ // Input:
+ // - transport : The callback object to register.
+ //
+ // Return value : VCM_OK, on success.
+ // < 0, on error.
+ virtual int32_t RegisterTransportCallback(
+ VCMPacketizationCallback* transport) = 0;
+
+ // Register video output information callback which will be called to deliver
+ // information
+ // about the video stream produced by the encoder, for instance the average
+ // frame rate and
+ // bit rate.
+ //
+ // Input:
+ // - outputInformation : The callback object to register.
+ //
+ // Return value : VCM_OK, on success.
+ // < 0, on error.
+ virtual int32_t RegisterSendStatisticsCallback(
+ VCMSendStatisticsCallback* sendStats) = 0;
+
+ // Register a video protection callback which will be called to deliver
+ // the requested FEC rate and NACK status (on/off).
+ //
+ // Input:
+ // - protection : The callback object to register.
+ //
+ // Return value : VCM_OK, on success.
+ // < 0, on error.
+ virtual int32_t RegisterProtectionCallback(
+ VCMProtectionCallback* protection) = 0;
+
+ // Enable or disable a video protection method.
+ //
+ // Input:
+ // - videoProtection : The method to enable or disable.
+ // - enable : True if the method should be enabled, false if
+ // it should be disabled.
+ //
+ // Return value : VCM_OK, on success.
+ // < 0, on error.
+ virtual int32_t SetVideoProtection(VCMVideoProtection videoProtection,
+ bool enable) = 0;
+
+ // Add one raw video frame to the encoder. This function does all the
+ // necessary
+ // processing, then decides what frame type to encode, or if the frame should
+ // be
+ // dropped. If the frame should be encoded it passes the frame to the encoder
+ // before it returns.
+ //
+ // Input:
+ // - videoFrame : Video frame to encode.
+ // - codecSpecificInfo : Extra codec information, e.g., pre-parsed
+ // in-band signaling.
+ //
+ // Return value : VCM_OK, on success.
+ // < 0, on error.
+ virtual int32_t AddVideoFrame(
+ const VideoFrame& videoFrame,
+ const VideoContentMetrics* contentMetrics = NULL,
+ const CodecSpecificInfo* codecSpecificInfo = NULL) = 0;
+
+ // Next frame encoded should be an intra frame (keyframe).
+ //
+ // Return value : VCM_OK, on success.
+ // < 0, on error.
+ virtual int32_t IntraFrameRequest(int stream_index) = 0;
+
+ // Frame Dropper enable. Can be used to disable the frame dropping when the
+ // encoder
+ // over-uses its bit rate. This API is designed to be used when the encoded
+ // frames
+ // are supposed to be stored to an AVI file, or when the I420 codec is used
+ // and the
+ // target bit rate shouldn't affect the frame rate.
+ //
+ // Input:
+ // - enable : True to enable the setting, false to disable it.
+ //
+ // Return value : VCM_OK, on success.
+ // < 0, on error.
+ virtual int32_t EnableFrameDropper(bool enable) = 0;
+
+ /*
+ * Receiver
+ */
+
+ // Register possible receive codecs, can be called multiple times for
+ // different codecs.
+ // The module will automatically switch between registered codecs depending on
+ // the
+ // payload type of incoming frames. The actual decoder will be created when
+ // needed.
+ //
+ // Input:
+ // - receiveCodec : Settings for the codec to be registered.
+ // - numberOfCores : Number of CPU cores that the decoder is allowed
+ // to use.
+ // - requireKeyFrame : Set this to true if you don't want any delta
+ // frames
+ // to be decoded until the first key frame has been
+ // decoded.
+ //
+ // Return value : VCM_OK, on success.
+ // < 0, on error.
+ virtual int32_t RegisterReceiveCodec(const VideoCodec* receiveCodec,
+ int32_t numberOfCores,
+ bool requireKeyFrame = false) = 0;
+
+ // Register an externally defined decoder/renderer object. Can be a decoder
+ // only or a
+ // decoder coupled with a renderer. Note that RegisterReceiveCodec must be
+ // called to
+ // be used for decoding incoming streams.
+ //
+ // Input:
+ // - externalDecoder : The external decoder/renderer object.
+ // - payloadType : The payload type which this decoder should
+ // be
+ // registered to.
+ //
+ virtual void RegisterExternalDecoder(VideoDecoder* externalDecoder,
+ uint8_t payloadType) = 0;
+
+ // Register a receive callback. Will be called whenever there is a new frame
+ // ready
+ // for rendering.
+ //
+ // Input:
+ // - receiveCallback : The callback object to be used by the
+ // module when a
+ // frame is ready for rendering.
+ // De-register with a NULL pointer.
+ //
+ // Return value : VCM_OK, on success.
+ // < 0, on error.
+ virtual int32_t RegisterReceiveCallback(
+ VCMReceiveCallback* receiveCallback) = 0;
+
+ // Register a receive statistics callback which will be called to deliver
+ // information
+ // about the video stream received by the receiving side of the VCM, for
+ // instance the
+ // average frame rate and bit rate.
+ //
+ // Input:
+ // - receiveStats : The callback object to register.
+ //
+ // Return value : VCM_OK, on success.
+ // < 0, on error.
+ virtual int32_t RegisterReceiveStatisticsCallback(
+ VCMReceiveStatisticsCallback* receiveStats) = 0;
+
+ // Register a decoder timing callback which will be called to deliver
+ // information about the timing of the decoder in the receiving side of the
+ // VCM, for instance the current and maximum frame decode latency.
+ //
+ // Input:
+ // - decoderTiming : The callback object to register.
+ //
+ // Return value : VCM_OK, on success.
+ // < 0, on error.
+ virtual int32_t RegisterDecoderTimingCallback(
+ VCMDecoderTimingCallback* decoderTiming) = 0;
+
+ // Register a frame type request callback. This callback will be called when
+ // the
+ // module needs to request specific frame types from the send side.
+ //
+ // Input:
+ // - frameTypeCallback : The callback object to be used by the
+ // module when
+ // requesting a specific type of frame from
+ // the send side.
+ // De-register with a NULL pointer.
+ //
+ // Return value : VCM_OK, on success.
+ // < 0, on error.
+ virtual int32_t RegisterFrameTypeCallback(
+ VCMFrameTypeCallback* frameTypeCallback) = 0;
+
+ // Registers a callback which is called whenever the receive side of the VCM
+ // encounters holes in the packet sequence and needs packets to be
+ // retransmitted.
+ //
+ // Input:
+ // - callback : The callback to be registered in the VCM.
+ //
+ // Return value : VCM_OK, on success.
+ // <0, on error.
+ virtual int32_t RegisterPacketRequestCallback(
+ VCMPacketRequestCallback* callback) = 0;
+
+ // Waits for the next frame in the jitter buffer to become complete
+ // (waits no longer than maxWaitTimeMs), then passes it to the decoder for
+ // decoding.
+ // Should be called as often as possible to get the most out of the decoder.
+ //
+ // Return value : VCM_OK, on success.
+ // < 0, on error.
+ virtual int32_t Decode(uint16_t maxWaitTimeMs = 200) = 0;
+
+ // Registers a callback which conveys the size of the render buffer.
+ virtual int RegisterRenderBufferSizeCallback(
+ VCMRenderBufferSizeCallback* callback) = 0;
+
+ // Reset the decoder state to the initial state.
+ //
+ // Return value : VCM_OK, on success.
+ // < 0, on error.
+ virtual int32_t ResetDecoder() = 0;
+
+ // API to get the codec which is currently used for decoding by the module.
+ //
+ // Input:
+ // - currentReceiveCodec : Settings for the codec to be registered.
+ //
+ // Return value : VCM_OK, on success.
+ // < 0, on error.
+ virtual int32_t ReceiveCodec(VideoCodec* currentReceiveCodec) const = 0;
+
+ // API to get the codec type currently used for decoding by the module.
+ //
+ // Return value : codecy type, on success.
+ // kVideoCodecUnknown, on error or if no receive codec is
+ // registered
+ virtual VideoCodecType ReceiveCodec() const = 0;
+
+ // Insert a parsed packet into the receiver side of the module. Will be placed
+ // in the
+ // jitter buffer waiting for the frame to become complete. Returns as soon as
+ // the packet
+ // has been placed in the jitter buffer.
+ //
+ // Input:
+ // - incomingPayload : Payload of the packet.
+ // - payloadLength : Length of the payload.
+ // - rtpInfo : The parsed header.
+ //
+ // Return value : VCM_OK, on success.
+ // < 0, on error.
+ virtual int32_t IncomingPacket(const uint8_t* incomingPayload,
+ size_t payloadLength,
+ const WebRtcRTPHeader& rtpInfo) = 0;
+
+ // Minimum playout delay (Used for lip-sync). This is the minimum delay
+ // required
+ // to sync with audio. Not included in VideoCodingModule::Delay()
+ // Defaults to 0 ms.
+ //
+ // Input:
+ // - minPlayoutDelayMs : Additional delay in ms.
+ //
+ // Return value : VCM_OK, on success.
+ // < 0, on error.
+ virtual int32_t SetMinimumPlayoutDelay(uint32_t minPlayoutDelayMs) = 0;
+
+ // Set the time required by the renderer to render a frame.
+ //
+ // Input:
+ // - timeMS : The time in ms required by the renderer to render a
+ // frame.
+ //
+ // Return value : VCM_OK, on success.
+ // < 0, on error.
+ virtual int32_t SetRenderDelay(uint32_t timeMS) = 0;
+
+ // The total delay desired by the VCM. Can be less than the minimum
+ // delay set with SetMinimumPlayoutDelay.
+ //
+ // Return value : Total delay in ms, on success.
+ // < 0, on error.
+ virtual int32_t Delay() const = 0;
+
+ // Returns the number of packets discarded by the jitter buffer due to being
+ // too late. This can include duplicated packets which arrived after the
+ // frame was sent to the decoder. Therefore packets which were prematurely
+ // NACKed will be counted.
+ virtual uint32_t DiscardedPackets() const = 0;
+
+ // Robustness APIs
+
+ // Set the receiver robustness mode. The mode decides how the receiver
+ // responds to losses in the stream. The type of counter-measure (soft or
+ // hard NACK, dual decoder, RPS, etc.) is selected through the
+ // robustnessMode parameter. The errorMode parameter decides if it is
+ // allowed to display frames corrupted by losses. Note that not all
+ // combinations of the two parameters are feasible. An error will be
+ // returned for invalid combinations.
+ // Input:
+ // - robustnessMode : selected robustness mode.
+ // - errorMode : selected error mode.
+ //
+ // Return value : VCM_OK, on success;
+ // < 0, on error.
+ virtual int SetReceiverRobustnessMode(ReceiverRobustness robustnessMode,
+ VCMDecodeErrorMode errorMode) = 0;
+
+ // Set the decode error mode. The mode decides which errors (if any) are
+ // allowed in decodable frames. Note that setting decode_error_mode to
+ // anything other than kWithErrors without enabling nack will cause
+ // long-term freezes (resulting from frequent key frame requests) if
+ // packet loss occurs.
+ virtual void SetDecodeErrorMode(VCMDecodeErrorMode decode_error_mode) = 0;
+
+ // Sets the maximum number of sequence numbers that we are allowed to NACK
+ // and the oldest sequence number that we will consider to NACK. If a
+ // sequence number older than |max_packet_age_to_nack| is missing
+ // a key frame will be requested. A key frame will also be requested if the
+ // time of incomplete or non-continuous frames in the jitter buffer is above
+ // |max_incomplete_time_ms|.
+ virtual void SetNackSettings(size_t max_nack_list_size,
+ int max_packet_age_to_nack,
+ int max_incomplete_time_ms) = 0;
+
+ // Setting a desired delay to the VCM receiver. Video rendering will be
+ // delayed by at least desired_delay_ms.
+ virtual int SetMinReceiverDelay(int desired_delay_ms) = 0;
+
+ // Lets the sender suspend video when the rate drops below
+ // |threshold_bps|, and turns back on when the rate goes back up above
+ // |threshold_bps| + |window_bps|.
+ virtual void SuspendBelowMinBitrate() = 0;
+
+ // Returns true if SuspendBelowMinBitrate is engaged and the video has been
+ // suspended due to bandwidth limitations; otherwise false.
+ virtual bool VideoSuspended() const = 0;
+
+ virtual void RegisterPreDecodeImageCallback(
+ EncodedImageCallback* observer) = 0;
+ virtual void RegisterPostEncodeImageCallback(
+ EncodedImageCallback* post_encode_callback) = 0;
+ // Releases pending decode calls, permitting faster thread shutdown.
+ virtual void TriggerDecoderShutdown() = 0;
+};
+
+} // namespace webrtc
+
+#endif // WEBRTC_MODULES_VIDEO_CODING_INCLUDE_VIDEO_CODING_H_
diff --git a/webrtc/modules/video_coding/main/interface/video_coding_defines.h b/webrtc/modules/video_coding/include/video_coding_defines.h
index fd38d64415..673a02b713 100644
--- a/webrtc/modules/video_coding/main/interface/video_coding_defines.h
+++ b/webrtc/modules/video_coding/include/video_coding_defines.h
@@ -8,33 +8,33 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#ifndef WEBRTC_MODULES_INTERFACE_VIDEO_CODING_DEFINES_H_
-#define WEBRTC_MODULES_INTERFACE_VIDEO_CODING_DEFINES_H_
+#ifndef WEBRTC_MODULES_VIDEO_CODING_INCLUDE_VIDEO_CODING_DEFINES_H_
+#define WEBRTC_MODULES_VIDEO_CODING_INCLUDE_VIDEO_CODING_DEFINES_H_
-#include "webrtc/modules/interface/module_common_types.h"
+#include "webrtc/modules/include/module_common_types.h"
#include "webrtc/typedefs.h"
#include "webrtc/video_frame.h"
namespace webrtc {
// Error codes
-#define VCM_FRAME_NOT_READY 3
-#define VCM_REQUEST_SLI 2
-#define VCM_MISSING_CALLBACK 1
-#define VCM_OK 0
-#define VCM_GENERAL_ERROR -1
-#define VCM_LEVEL_EXCEEDED -2
-#define VCM_MEMORY -3
-#define VCM_PARAMETER_ERROR -4
-#define VCM_UNKNOWN_PAYLOAD -5
-#define VCM_CODEC_ERROR -6
-#define VCM_UNINITIALIZED -7
+#define VCM_FRAME_NOT_READY 3
+#define VCM_REQUEST_SLI 2
+#define VCM_MISSING_CALLBACK 1
+#define VCM_OK 0
+#define VCM_GENERAL_ERROR -1
+#define VCM_LEVEL_EXCEEDED -2
+#define VCM_MEMORY -3
+#define VCM_PARAMETER_ERROR -4
+#define VCM_UNKNOWN_PAYLOAD -5
+#define VCM_CODEC_ERROR -6
+#define VCM_UNINITIALIZED -7
#define VCM_NO_CODEC_REGISTERED -8
#define VCM_JITTER_BUFFER_ERROR -9
-#define VCM_OLD_PACKET_ERROR -10
-#define VCM_NO_FRAME_DECODED -11
-#define VCM_ERROR_REQUEST_SLI -12
-#define VCM_NOT_IMPLEMENTED -20
+#define VCM_OLD_PACKET_ERROR -10
+#define VCM_NO_FRAME_DECODED -11
+#define VCM_ERROR_REQUEST_SLI -12
+#define VCM_NOT_IMPLEMENTED -20
enum { kDefaultStartBitrateKbps = 300 };
@@ -62,40 +62,42 @@ class VCMPacketizationCallback {
const RTPFragmentationHeader& fragmentationHeader,
const RTPVideoHeader* rtpVideoHdr) = 0;
+ virtual void OnEncoderImplementationName(const char* implementation_name) {}
+
protected:
- virtual ~VCMPacketizationCallback() {
- }
+ virtual ~VCMPacketizationCallback() {}
};
-// Callback class used for passing decoded frames which are ready to be rendered.
+// Callback class used for passing decoded frames which are ready to be
+// rendered.
class VCMReceiveCallback {
public:
- virtual int32_t FrameToRender(VideoFrame& videoFrame) = 0;
- virtual int32_t ReceivedDecodedReferenceFrame(
- const uint64_t pictureId) {
+ virtual int32_t FrameToRender(VideoFrame& videoFrame) = 0; // NOLINT
+ virtual int32_t ReceivedDecodedReferenceFrame(const uint64_t pictureId) {
return -1;
}
// Called when the current receive codec changes.
virtual void OnIncomingPayloadType(int payload_type) {}
+ virtual void OnDecoderImplementationName(const char* implementation_name) {}
protected:
- virtual ~VCMReceiveCallback() {
- }
+ virtual ~VCMReceiveCallback() {}
};
-// Callback class used for informing the user of the bit rate and frame rate produced by the
+// Callback class used for informing the user of the bit rate and frame rate
+// produced by the
// encoder.
class VCMSendStatisticsCallback {
public:
virtual int32_t SendStatistics(const uint32_t bitRate,
- const uint32_t frameRate) = 0;
+ const uint32_t frameRate) = 0;
protected:
- virtual ~VCMSendStatisticsCallback() {
- }
+ virtual ~VCMSendStatisticsCallback() {}
};
-// Callback class used for informing the user of the incoming bit rate and frame rate.
+// Callback class used for informing the user of the incoming bit rate and frame
+// rate.
class VCMReceiveStatisticsCallback {
public:
virtual void OnReceiveRatesUpdated(uint32_t bitRate, uint32_t frameRate) = 0;
@@ -103,8 +105,7 @@ class VCMReceiveStatisticsCallback {
virtual void OnFrameCountsUpdated(const FrameCounts& frame_counts) = 0;
protected:
- virtual ~VCMReceiveStatisticsCallback() {
- }
+ virtual ~VCMReceiveStatisticsCallback() {}
};
// Callback class used for informing the user of decode timing info.
@@ -133,8 +134,7 @@ class VCMProtectionCallback {
uint32_t* sent_fec_rate_bps) = 0;
protected:
- virtual ~VCMProtectionCallback() {
- }
+ virtual ~VCMProtectionCallback() {}
};
class VideoEncoderRateObserver {
@@ -143,31 +143,30 @@ class VideoEncoderRateObserver {
virtual void OnSetRates(uint32_t bitrate_bps, int framerate) = 0;
};
-// Callback class used for telling the user about what frame type needed to continue decoding.
+// Callback class used for telling the user about what frame type needed to
+// continue decoding.
// Typically a key frame when the stream has been corrupted in some way.
class VCMFrameTypeCallback {
public:
virtual int32_t RequestKeyFrame() = 0;
- virtual int32_t SliceLossIndicationRequest(
- const uint64_t pictureId) {
+ virtual int32_t SliceLossIndicationRequest(const uint64_t pictureId) {
return -1;
}
protected:
- virtual ~VCMFrameTypeCallback() {
- }
+ virtual ~VCMFrameTypeCallback() {}
};
-// Callback class used for telling the user about which packet sequence numbers are currently
+// Callback class used for telling the user about which packet sequence numbers
+// are currently
// missing and need to be resent.
class VCMPacketRequestCallback {
public:
virtual int32_t ResendPackets(const uint16_t* sequenceNumbers,
- uint16_t length) = 0;
+ uint16_t length) = 0;
protected:
- virtual ~VCMPacketRequestCallback() {
- }
+ virtual ~VCMPacketRequestCallback() {}
};
// Callback used to inform the user of the the desired resolution
@@ -175,14 +174,13 @@ class VCMPacketRequestCallback {
class VCMQMSettingsCallback {
public:
virtual int32_t SetVideoQMSettings(const uint32_t frameRate,
- const uint32_t width,
- const uint32_t height) = 0;
+ const uint32_t width,
+ const uint32_t height) = 0;
virtual void SetTargetFramerate(int frame_rate) = 0;
protected:
- virtual ~VCMQMSettingsCallback() {
- }
+ virtual ~VCMQMSettingsCallback() {}
};
// Callback class used for telling the user about the size (in time) of the
@@ -192,10 +190,9 @@ class VCMRenderBufferSizeCallback {
virtual void RenderBufferSizeMs(int buffer_size_ms) = 0;
protected:
- virtual ~VCMRenderBufferSizeCallback() {
- }
+ virtual ~VCMRenderBufferSizeCallback() {}
};
} // namespace webrtc
-#endif // WEBRTC_MODULES_INTERFACE_VIDEO_CODING_DEFINES_H_
+#endif // WEBRTC_MODULES_VIDEO_CODING_INCLUDE_VIDEO_CODING_DEFINES_H_
diff --git a/webrtc/modules/video_coding/include/video_error_codes.h b/webrtc/modules/video_coding/include/video_error_codes.h
new file mode 100644
index 0000000000..360aa87744
--- /dev/null
+++ b/webrtc/modules/video_coding/include/video_error_codes.h
@@ -0,0 +1,32 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef WEBRTC_MODULES_VIDEO_CODING_INCLUDE_VIDEO_ERROR_CODES_H_
+#define WEBRTC_MODULES_VIDEO_CODING_INCLUDE_VIDEO_ERROR_CODES_H_
+
+// NOTE: in sync with video_coding_module_defines.h
+
+// Define return values
+
+#define WEBRTC_VIDEO_CODEC_REQUEST_SLI 2
+#define WEBRTC_VIDEO_CODEC_NO_OUTPUT 1
+#define WEBRTC_VIDEO_CODEC_OK 0
+#define WEBRTC_VIDEO_CODEC_ERROR -1
+#define WEBRTC_VIDEO_CODEC_LEVEL_EXCEEDED -2
+#define WEBRTC_VIDEO_CODEC_MEMORY -3
+#define WEBRTC_VIDEO_CODEC_ERR_PARAMETER -4
+#define WEBRTC_VIDEO_CODEC_ERR_SIZE -5
+#define WEBRTC_VIDEO_CODEC_TIMEOUT -6
+#define WEBRTC_VIDEO_CODEC_UNINITIALIZED -7
+#define WEBRTC_VIDEO_CODEC_ERR_REQUEST_SLI -12
+#define WEBRTC_VIDEO_CODEC_FALLBACK_SOFTWARE -13
+#define WEBRTC_VIDEO_CODEC_TARGET_BITRATE_OVERSHOOT -14
+
+#endif // WEBRTC_MODULES_VIDEO_CODING_INCLUDE_VIDEO_ERROR_CODES_H_
diff --git a/webrtc/modules/video_coding/inter_frame_delay.cc b/webrtc/modules/video_coding/inter_frame_delay.cc
new file mode 100644
index 0000000000..fb3b54d204
--- /dev/null
+++ b/webrtc/modules/video_coding/inter_frame_delay.cc
@@ -0,0 +1,107 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "webrtc/modules/video_coding/inter_frame_delay.h"
+
+namespace webrtc {
+
+VCMInterFrameDelay::VCMInterFrameDelay(int64_t currentWallClock) {
+ Reset(currentWallClock);
+}
+
+// Resets the delay estimate
+void VCMInterFrameDelay::Reset(int64_t currentWallClock) {
+ _zeroWallClock = currentWallClock;
+ _wrapArounds = 0;
+ _prevWallClock = 0;
+ _prevTimestamp = 0;
+ _dTS = 0;
+}
+
+// Calculates the delay of a frame with the given timestamp.
+// This method is called when the frame is complete.
+bool VCMInterFrameDelay::CalculateDelay(uint32_t timestamp,
+ int64_t* delay,
+ int64_t currentWallClock) {
+ if (_prevWallClock == 0) {
+ // First set of data, initialization, wait for next frame
+ _prevWallClock = currentWallClock;
+ _prevTimestamp = timestamp;
+ *delay = 0;
+ return true;
+ }
+
+ int32_t prevWrapArounds = _wrapArounds;
+ CheckForWrapArounds(timestamp);
+
+ // This will be -1 for backward wrap arounds and +1 for forward wrap arounds
+ int32_t wrapAroundsSincePrev = _wrapArounds - prevWrapArounds;
+
+ // Account for reordering in jitter variance estimate in the future?
+ // Note that this also captures incomplete frames which are grabbed
+ // for decoding after a later frame has been complete, i.e. real
+ // packet losses.
+ if ((wrapAroundsSincePrev == 0 && timestamp < _prevTimestamp) ||
+ wrapAroundsSincePrev < 0) {
+ *delay = 0;
+ return false;
+ }
+
+ // Compute the compensated timestamp difference and convert it to ms and
+ // round it to closest integer.
+ _dTS = static_cast<int64_t>(
+ (timestamp + wrapAroundsSincePrev * (static_cast<int64_t>(1) << 32) -
+ _prevTimestamp) /
+ 90.0 +
+ 0.5);
+
+ // frameDelay is the difference of dT and dTS -- i.e. the difference of
+ // the wall clock time difference and the timestamp difference between
+ // two following frames.
+ *delay = static_cast<int64_t>(currentWallClock - _prevWallClock - _dTS);
+
+ _prevTimestamp = timestamp;
+ _prevWallClock = currentWallClock;
+
+ return true;
+}
+
+// Returns the current difference between incoming timestamps
+uint32_t VCMInterFrameDelay::CurrentTimeStampDiffMs() const {
+ if (_dTS < 0) {
+ return 0;
+ }
+ return static_cast<uint32_t>(_dTS);
+}
+
+// Investigates if the timestamp clock has overflowed since the last timestamp
+// and
+// keeps track of the number of wrap arounds since reset.
+void VCMInterFrameDelay::CheckForWrapArounds(uint32_t timestamp) {
+ if (timestamp < _prevTimestamp) {
+ // This difference will probably be less than -2^31 if we have had a wrap
+ // around
+ // (e.g. timestamp = 1, _previousTimestamp = 2^32 - 1). Since it is cast to
+ // a Word32,
+ // it should be positive.
+ if (static_cast<int32_t>(timestamp - _prevTimestamp) > 0) {
+ // Forward wrap around
+ _wrapArounds++;
+ }
+ // This difference will probably be less than -2^31 if we have had a
+ // backward
+ // wrap around.
+ // Since it is cast to a Word32, it should be positive.
+ } else if (static_cast<int32_t>(_prevTimestamp - timestamp) > 0) {
+ // Backward wrap around
+ _wrapArounds--;
+ }
+}
+} // namespace webrtc
diff --git a/webrtc/modules/video_coding/inter_frame_delay.h b/webrtc/modules/video_coding/inter_frame_delay.h
new file mode 100644
index 0000000000..94b73908bb
--- /dev/null
+++ b/webrtc/modules/video_coding/inter_frame_delay.h
@@ -0,0 +1,67 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef WEBRTC_MODULES_VIDEO_CODING_INTER_FRAME_DELAY_H_
+#define WEBRTC_MODULES_VIDEO_CODING_INTER_FRAME_DELAY_H_
+
+#include "webrtc/typedefs.h"
+
+namespace webrtc {
+
+class VCMInterFrameDelay {
+ public:
+ explicit VCMInterFrameDelay(int64_t currentWallClock);
+
+ // Resets the estimate. Zeros are given as parameters.
+ void Reset(int64_t currentWallClock);
+
+ // Calculates the delay of a frame with the given timestamp.
+ // This method is called when the frame is complete.
+ //
+ // Input:
+ // - timestamp : RTP timestamp of a received frame
+ // - *delay : Pointer to memory where the result should be
+ // stored
+ // - currentWallClock : The current time in milliseconds.
+ // Should be -1 for normal operation, only used
+ // for testing.
+ // Return value : true if OK, false when reordered timestamps
+ bool CalculateDelay(uint32_t timestamp,
+ int64_t* delay,
+ int64_t currentWallClock);
+
+ // Returns the current difference between incoming timestamps
+ //
+ // Return value : Wrap-around compensated difference between
+ // incoming
+ // timestamps.
+ uint32_t CurrentTimeStampDiffMs() const;
+
+ private:
+ // Controls if the RTP timestamp counter has had a wrap around
+ // between the current and the previously received frame.
+ //
+ // Input:
+ // - timestmap : RTP timestamp of the current frame.
+ void CheckForWrapArounds(uint32_t timestamp);
+
+ int64_t _zeroWallClock; // Local timestamp of the first video packet received
+ int32_t _wrapArounds; // Number of wrapArounds detected
+ // The previous timestamp passed to the delay estimate
+ uint32_t _prevTimestamp;
+ // The previous wall clock timestamp used by the delay estimate
+ int64_t _prevWallClock;
+ // Wrap-around compensated difference between incoming timestamps
+ int64_t _dTS;
+};
+
+} // namespace webrtc
+
+#endif // WEBRTC_MODULES_VIDEO_CODING_INTER_FRAME_DELAY_H_
diff --git a/webrtc/modules/video_coding/internal_defines.h b/webrtc/modules/video_coding/internal_defines.h
new file mode 100644
index 0000000000..e225726dea
--- /dev/null
+++ b/webrtc/modules/video_coding/internal_defines.h
@@ -0,0 +1,41 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef WEBRTC_MODULES_VIDEO_CODING_INTERNAL_DEFINES_H_
+#define WEBRTC_MODULES_VIDEO_CODING_INTERNAL_DEFINES_H_
+
+#include "webrtc/typedefs.h"
+
+namespace webrtc {
+
+#define MASK_32_BITS(x) (0xFFFFFFFF & (x))
+
+inline uint32_t MaskWord64ToUWord32(int64_t w64) {
+ return static_cast<uint32_t>(MASK_32_BITS(w64));
+}
+
+#define VCM_MAX(a, b) (((a) > (b)) ? (a) : (b))
+#define VCM_MIN(a, b) (((a) < (b)) ? (a) : (b))
+
+#define VCM_DEFAULT_CODEC_WIDTH 352
+#define VCM_DEFAULT_CODEC_HEIGHT 288
+#define VCM_DEFAULT_FRAME_RATE 30
+#define VCM_MIN_BITRATE 30
+#define VCM_FLUSH_INDICATOR 4
+
+#define VCM_NO_RECEIVER_ID 0
+
+inline int32_t VCMId(const int32_t vcmId, const int32_t receiverId = 0) {
+ return static_cast<int32_t>((vcmId << 16) + receiverId);
+}
+
+} // namespace webrtc
+
+#endif // WEBRTC_MODULES_VIDEO_CODING_INTERNAL_DEFINES_H_
diff --git a/webrtc/modules/video_coding/main/source/jitter_buffer.cc b/webrtc/modules/video_coding/jitter_buffer.cc
index bfdd7867d9..640bcb4f22 100644
--- a/webrtc/modules/video_coding/main/source/jitter_buffer.cc
+++ b/webrtc/modules/video_coding/jitter_buffer.cc
@@ -7,7 +7,7 @@
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
-#include "webrtc/modules/video_coding/main/source/jitter_buffer.h"
+#include "webrtc/modules/video_coding/jitter_buffer.h"
#include <assert.h>
@@ -15,19 +15,19 @@
#include <utility>
#include "webrtc/base/checks.h"
+#include "webrtc/base/logging.h"
#include "webrtc/base/trace_event.h"
-#include "webrtc/modules/rtp_rtcp/interface/rtp_rtcp_defines.h"
-#include "webrtc/modules/video_coding/main/interface/video_coding.h"
-#include "webrtc/modules/video_coding/main/source/frame_buffer.h"
-#include "webrtc/modules/video_coding/main/source/inter_frame_delay.h"
-#include "webrtc/modules/video_coding/main/source/internal_defines.h"
-#include "webrtc/modules/video_coding/main/source/jitter_buffer_common.h"
-#include "webrtc/modules/video_coding/main/source/jitter_estimator.h"
-#include "webrtc/modules/video_coding/main/source/packet.h"
+#include "webrtc/modules/rtp_rtcp/include/rtp_rtcp_defines.h"
+#include "webrtc/modules/video_coding/include/video_coding.h"
+#include "webrtc/modules/video_coding/frame_buffer.h"
+#include "webrtc/modules/video_coding/inter_frame_delay.h"
+#include "webrtc/modules/video_coding/internal_defines.h"
+#include "webrtc/modules/video_coding/jitter_buffer_common.h"
+#include "webrtc/modules/video_coding/jitter_estimator.h"
+#include "webrtc/modules/video_coding/packet.h"
#include "webrtc/system_wrappers/include/clock.h"
#include "webrtc/system_wrappers/include/critical_section_wrapper.h"
#include "webrtc/system_wrappers/include/event_wrapper.h"
-#include "webrtc/system_wrappers/include/logging.h"
#include "webrtc/system_wrappers/include/metrics.h"
namespace webrtc {
@@ -38,6 +38,10 @@ static const uint32_t kSsCleanupIntervalSec = 60;
// Use this rtt if no value has been reported.
static const int64_t kDefaultRtt = 200;
+// Request a keyframe if no continuous frame has been received for this
+// number of milliseconds and NACKs are disabled.
+static const int64_t kMaxDiscontinuousFramesTime = 1000;
+
typedef std::pair<uint32_t, VCMFrameBuffer*> FrameListPair;
bool IsKeyFrame(FrameListPair pair) {
@@ -89,7 +93,7 @@ int FrameList::RecycleFramesUntilKeyFrame(FrameList::iterator* key_frame_it,
}
void FrameList::CleanUpOldOrEmptyFrames(VCMDecodingState* decoding_state,
- UnorderedFrameList* free_frames) {
+ UnorderedFrameList* free_frames) {
while (!empty()) {
VCMFrameBuffer* oldest_frame = Front();
bool remove_frame = false;
@@ -168,6 +172,7 @@ void Vp9SsMap::AdvanceFront(uint32_t timestamp) {
ss_map_[timestamp] = gof;
}
+// TODO(asapersson): Update according to updates in RTP payload profile.
bool Vp9SsMap::UpdatePacket(VCMPacket* packet) {
uint8_t gof_idx = packet->codecSpecificHeader.codecHeader.VP9.gof_idx;
if (gof_idx == kNoGofIdx)
@@ -186,7 +191,7 @@ bool Vp9SsMap::UpdatePacket(VCMPacket* packet) {
// TODO(asapersson): Set vp9.ref_picture_id[i] and add usage.
vp9->num_ref_pics = it->second.num_ref_pics[gof_idx];
- for (size_t i = 0; i < it->second.num_ref_pics[gof_idx]; ++i) {
+ for (uint8_t i = 0; i < it->second.num_ref_pics[gof_idx]; ++i) {
vp9->pid_diff[i] = it->second.pid_diff[gof_idx][i];
}
return true;
@@ -214,7 +219,7 @@ VCMJitterBuffer::VCMJitterBuffer(Clock* clock,
: clock_(clock),
running_(false),
crit_sect_(CriticalSectionWrapper::CreateCriticalSection()),
- frame_event_(event.Pass()),
+ frame_event_(std::move(event)),
max_number_of_frames_(kStartNumberOfFrames),
free_frames_(),
decodable_frames_(),
@@ -276,17 +281,18 @@ void VCMJitterBuffer::UpdateHistograms() {
return;
}
- RTC_HISTOGRAM_PERCENTAGE("WebRTC.Video.DiscardedPacketsInPercent",
- num_discarded_packets_ * 100 / num_packets_);
- RTC_HISTOGRAM_PERCENTAGE("WebRTC.Video.DuplicatedPacketsInPercent",
- num_duplicated_packets_ * 100 / num_packets_);
+ RTC_HISTOGRAM_PERCENTAGE_SPARSE("WebRTC.Video.DiscardedPacketsInPercent",
+ num_discarded_packets_ * 100 / num_packets_);
+ RTC_HISTOGRAM_PERCENTAGE_SPARSE("WebRTC.Video.DuplicatedPacketsInPercent",
+ num_duplicated_packets_ * 100 / num_packets_);
int total_frames =
receive_statistics_.key_frames + receive_statistics_.delta_frames;
if (total_frames > 0) {
- RTC_HISTOGRAM_COUNTS_100("WebRTC.Video.CompleteFramesReceivedPerSecond",
+ RTC_HISTOGRAM_COUNTS_SPARSE_100(
+ "WebRTC.Video.CompleteFramesReceivedPerSecond",
static_cast<int>((total_frames / elapsed_sec) + 0.5f));
- RTC_HISTOGRAM_COUNTS_1000(
+ RTC_HISTOGRAM_COUNTS_SPARSE_1000(
"WebRTC.Video.KeyFramesReceivedInPermille",
static_cast<int>(
(receive_statistics_.key_frames * 1000.0f / total_frames) + 0.5f));
@@ -316,7 +322,6 @@ void VCMJitterBuffer::Start() {
first_packet_since_reset_ = true;
rtt_ms_ = kDefaultRtt;
last_decoded_state_.Reset();
- vp9_ss_map_.Reset();
}
void VCMJitterBuffer::Stop() {
@@ -324,7 +329,6 @@ void VCMJitterBuffer::Stop() {
UpdateHistograms();
running_ = false;
last_decoded_state_.Reset();
- vp9_ss_map_.Reset();
// Make sure all frames are free and reset.
for (FrameList::iterator it = decodable_frames_.begin();
@@ -356,7 +360,6 @@ void VCMJitterBuffer::Flush() {
decodable_frames_.Reset(&free_frames_);
incomplete_frames_.Reset(&free_frames_);
last_decoded_state_.Reset(); // TODO(mikhal): sync reset.
- vp9_ss_map_.Reset();
num_consecutive_old_packets_ = 0;
// Also reset the jitter and delay estimates
jitter_estimate_.Reset();
@@ -428,8 +431,8 @@ void VCMJitterBuffer::IncomingRateStatistics(unsigned int* framerate,
if (incoming_bit_count_ == 0) {
*bitrate = 0;
} else {
- *bitrate = 10 * ((100 * incoming_bit_count_) /
- static_cast<unsigned int>(diff));
+ *bitrate =
+ 10 * ((100 * incoming_bit_count_) / static_cast<unsigned int>(diff));
}
incoming_bit_rate_ = *bitrate;
@@ -470,8 +473,8 @@ bool VCMJitterBuffer::CompleteSequenceWithNextFrame() {
// Returns immediately or a |max_wait_time_ms| ms event hang waiting for a
// complete frame, |max_wait_time_ms| decided by caller.
-bool VCMJitterBuffer::NextCompleteTimestamp(
- uint32_t max_wait_time_ms, uint32_t* timestamp) {
+bool VCMJitterBuffer::NextCompleteTimestamp(uint32_t max_wait_time_ms,
+ uint32_t* timestamp) {
crit_sect_->Enter();
if (!running_) {
crit_sect_->Leave();
@@ -481,13 +484,13 @@ bool VCMJitterBuffer::NextCompleteTimestamp(
if (decodable_frames_.empty() ||
decodable_frames_.Front()->GetState() != kStateComplete) {
- const int64_t end_wait_time_ms = clock_->TimeInMilliseconds() +
- max_wait_time_ms;
+ const int64_t end_wait_time_ms =
+ clock_->TimeInMilliseconds() + max_wait_time_ms;
int64_t wait_time_ms = max_wait_time_ms;
while (wait_time_ms > 0) {
crit_sect_->Leave();
const EventTypeWrapper ret =
- frame_event_->Wait(static_cast<uint32_t>(wait_time_ms));
+ frame_event_->Wait(static_cast<uint32_t>(wait_time_ms));
crit_sect_->Enter();
if (ret == kEventSignaled) {
// Are we shutting down the jitter buffer?
@@ -530,16 +533,25 @@ bool VCMJitterBuffer::NextMaybeIncompleteTimestamp(uint32_t* timestamp) {
CleanUpOldOrEmptyFrames();
+ VCMFrameBuffer* oldest_frame;
if (decodable_frames_.empty()) {
- return false;
- }
- VCMFrameBuffer* oldest_frame = decodable_frames_.Front();
- // If we have exactly one frame in the buffer, release it only if it is
- // complete. We know decodable_frames_ is not empty due to the previous
- // check.
- if (decodable_frames_.size() == 1 && incomplete_frames_.empty()
- && oldest_frame->GetState() != kStateComplete) {
- return false;
+ if (nack_mode_ != kNoNack || incomplete_frames_.size() <= 1) {
+ return false;
+ }
+ oldest_frame = incomplete_frames_.Front();
+ // Frame will only be removed from buffer if it is complete (or decodable).
+ if (oldest_frame->GetState() < kStateComplete) {
+ return false;
+ }
+ } else {
+ oldest_frame = decodable_frames_.Front();
+ // If we have exactly one frame in the buffer, release it only if it is
+ // complete. We know decodable_frames_ is not empty due to the previous
+ // check.
+ if (decodable_frames_.size() == 1 && incomplete_frames_.empty() &&
+ oldest_frame->GetState() != kStateComplete) {
+ return false;
+ }
}
*timestamp = oldest_frame->TimeStamp();
@@ -576,8 +588,7 @@ VCMEncodedFrame* VCMJitterBuffer::ExtractAndSetDecode(uint32_t timestamp) {
} else {
// Wait for this one to get complete.
waiting_for_completion_.frame_size = frame->Length();
- waiting_for_completion_.latest_packet_time =
- frame->LatestPacketTimeMs();
+ waiting_for_completion_.latest_packet_time = frame->LatestPacketTimeMs();
waiting_for_completion_.timestamp = frame->TimeStamp();
}
}
@@ -688,21 +699,6 @@ VCMFrameBufferEnum VCMJitterBuffer::InsertPacket(const VCMPacket& packet,
num_consecutive_old_packets_ = 0;
- if (packet.codec == kVideoCodecVP9) {
- if (packet.codecSpecificHeader.codecHeader.VP9.flexible_mode) {
- // TODO(asapersson): Add support for flexible mode.
- return kGeneralError;
- }
- if (!packet.codecSpecificHeader.codecHeader.VP9.flexible_mode) {
- if (vp9_ss_map_.Insert(packet))
- vp9_ss_map_.UpdateFrames(&incomplete_frames_);
-
- vp9_ss_map_.UpdatePacket(const_cast<VCMPacket*>(&packet));
- }
- if (!last_decoded_state_.in_initial_state())
- vp9_ss_map_.RemoveOld(last_decoded_state_.time_stamp());
- }
-
VCMFrameBuffer* frame;
FrameList* frame_list;
const VCMFrameBufferEnum error = GetFrame(packet, &frame, &frame_list);
@@ -745,8 +741,8 @@ VCMFrameBufferEnum VCMJitterBuffer::InsertPacket(const VCMPacket& packet,
frame->InsertPacket(packet, now_ms, decode_error_mode_, frame_data);
if (previous_state != kStateComplete) {
- TRACE_EVENT_ASYNC_BEGIN1("webrtc", "Video", frame->TimeStamp(),
- "timestamp", frame->TimeStamp());
+ TRACE_EVENT_ASYNC_BEGIN1("webrtc", "Video", frame->TimeStamp(), "timestamp",
+ frame->TimeStamp());
}
if (buffer_state > 0) {
@@ -763,8 +759,8 @@ VCMFrameBufferEnum VCMJitterBuffer::InsertPacket(const VCMPacket& packet,
buffer_state = kFlushIndicator;
}
- latest_received_sequence_number_ = LatestSequenceNumber(
- latest_received_sequence_number_, packet.seqNum);
+ latest_received_sequence_number_ =
+ LatestSequenceNumber(latest_received_sequence_number_, packet.seqNum);
}
}
@@ -796,6 +792,12 @@ VCMFrameBufferEnum VCMJitterBuffer::InsertPacket(const VCMPacket& packet,
FindAndInsertContinuousFrames(*frame);
} else {
incomplete_frames_.InsertFrame(frame);
+ // If NACKs are enabled, keyframes are triggered by |GetNackList|.
+ if (nack_mode_ == kNoNack &&
+ NonContinuousOrIncompleteDuration() >
+ 90 * kMaxDiscontinuousFramesTime) {
+ return kFlushIndicator;
+ }
}
break;
}
@@ -806,6 +808,12 @@ VCMFrameBufferEnum VCMJitterBuffer::InsertPacket(const VCMPacket& packet,
return kNoError;
} else {
incomplete_frames_.InsertFrame(frame);
+ // If NACKs are enabled, keyframes are triggered by |GetNackList|.
+ if (nack_mode_ == kNoNack &&
+ NonContinuousOrIncompleteDuration() >
+ 90 * kMaxDiscontinuousFramesTime) {
+ return kFlushIndicator;
+ }
}
break;
}
@@ -824,15 +832,15 @@ VCMFrameBufferEnum VCMJitterBuffer::InsertPacket(const VCMPacket& packet,
case kFlushIndicator:
free_frames_.push_back(frame);
return kFlushIndicator;
- default: assert(false);
+ default:
+ assert(false);
}
return buffer_state;
}
-bool VCMJitterBuffer::IsContinuousInState(const VCMFrameBuffer& frame,
+bool VCMJitterBuffer::IsContinuousInState(
+ const VCMFrameBuffer& frame,
const VCMDecodingState& decoding_state) const {
- if (decode_error_mode_ == kWithErrors)
- return true;
// Is this frame (complete or decodable) and continuous?
// kStateDecodable will never be set when decode_error_mode_ is false
// as SessionInfo determines this state based on the error mode (and frame
@@ -849,7 +857,7 @@ bool VCMJitterBuffer::IsContinuous(const VCMFrameBuffer& frame) const {
VCMDecodingState decoding_state;
decoding_state.CopyFrom(last_decoded_state_);
for (FrameList::const_iterator it = decodable_frames_.begin();
- it != decodable_frames_.end(); ++it) {
+ it != decodable_frames_.end(); ++it) {
VCMFrameBuffer* decodable_frame = it->second;
if (IsNewerTimestamp(decodable_frame->TimeStamp(), frame.TimeStamp())) {
break;
@@ -882,7 +890,7 @@ void VCMJitterBuffer::FindAndInsertContinuousFramesWithState(
// 1. Continuous base or sync layer.
// 2. The end of the list was reached.
for (FrameList::iterator it = incomplete_frames_.begin();
- it != incomplete_frames_.end();) {
+ it != incomplete_frames_.end();) {
VCMFrameBuffer* frame = it->second;
if (IsNewerTimestamp(original_decoded_state.time_stamp(),
frame->TimeStamp())) {
@@ -992,16 +1000,18 @@ std::vector<uint16_t> VCMJitterBuffer::GetNackList(bool* request_key_frame) {
if (last_decoded_state_.in_initial_state()) {
VCMFrameBuffer* next_frame = NextFrame();
const bool first_frame_is_key = next_frame &&
- next_frame->FrameType() == kVideoFrameKey &&
- next_frame->HaveFirstPacket();
+ next_frame->FrameType() == kVideoFrameKey &&
+ next_frame->HaveFirstPacket();
if (!first_frame_is_key) {
- bool have_non_empty_frame = decodable_frames_.end() != find_if(
- decodable_frames_.begin(), decodable_frames_.end(),
- HasNonEmptyState);
+ bool have_non_empty_frame =
+ decodable_frames_.end() != find_if(decodable_frames_.begin(),
+ decodable_frames_.end(),
+ HasNonEmptyState);
if (!have_non_empty_frame) {
- have_non_empty_frame = incomplete_frames_.end() != find_if(
- incomplete_frames_.begin(), incomplete_frames_.end(),
- HasNonEmptyState);
+ have_non_empty_frame =
+ incomplete_frames_.end() != find_if(incomplete_frames_.begin(),
+ incomplete_frames_.end(),
+ HasNonEmptyState);
}
bool found_key_frame = RecycleFramesUntilKeyFrame();
if (!found_key_frame) {
@@ -1020,8 +1030,8 @@ std::vector<uint16_t> VCMJitterBuffer::GetNackList(bool* request_key_frame) {
LOG_F(LS_WARNING) << "Too long non-decodable duration: "
<< non_continuous_incomplete_duration << " > "
<< 90 * max_incomplete_time_ms_;
- FrameList::reverse_iterator rit = find_if(incomplete_frames_.rbegin(),
- incomplete_frames_.rend(), IsKeyFrame);
+ FrameList::reverse_iterator rit = find_if(
+ incomplete_frames_.rbegin(), incomplete_frames_.rend(), IsKeyFrame);
if (rit == incomplete_frames_.rend()) {
// Request a key frame if we don't have one already.
*request_key_frame = true;
@@ -1061,8 +1071,7 @@ bool VCMJitterBuffer::UpdateNackList(uint16_t sequence_number) {
// Make sure we don't add packets which are already too old to be decoded.
if (!last_decoded_state_.in_initial_state()) {
latest_received_sequence_number_ = LatestSequenceNumber(
- latest_received_sequence_number_,
- last_decoded_state_.sequence_num());
+ latest_received_sequence_number_, last_decoded_state_.sequence_num());
}
if (IsNewerSequenceNumber(sequence_number,
latest_received_sequence_number_)) {
@@ -1112,8 +1121,8 @@ bool VCMJitterBuffer::MissingTooOldPacket(
if (missing_sequence_numbers_.empty()) {
return false;
}
- const uint16_t age_of_oldest_missing_packet = latest_sequence_number -
- *missing_sequence_numbers_.begin();
+ const uint16_t age_of_oldest_missing_packet =
+ latest_sequence_number - *missing_sequence_numbers_.begin();
// Recycle frames if the NACK list contains too old sequence numbers as
// the packets may have already been dropped by the sender.
return age_of_oldest_missing_packet > max_packet_age_to_nack_;
@@ -1121,8 +1130,8 @@ bool VCMJitterBuffer::MissingTooOldPacket(
bool VCMJitterBuffer::HandleTooOldPackets(uint16_t latest_sequence_number) {
bool key_frame_found = false;
- const uint16_t age_of_oldest_missing_packet = latest_sequence_number -
- *missing_sequence_numbers_.begin();
+ const uint16_t age_of_oldest_missing_packet =
+ latest_sequence_number - *missing_sequence_numbers_.begin();
LOG_F(LS_WARNING) << "NACK list contains too old sequence numbers: "
<< age_of_oldest_missing_packet << " > "
<< max_packet_age_to_nack_;
@@ -1136,9 +1145,9 @@ void VCMJitterBuffer::DropPacketsFromNackList(
uint16_t last_decoded_sequence_number) {
// Erase all sequence numbers from the NACK list which we won't need any
// longer.
- missing_sequence_numbers_.erase(missing_sequence_numbers_.begin(),
- missing_sequence_numbers_.upper_bound(
- last_decoded_sequence_number));
+ missing_sequence_numbers_.erase(
+ missing_sequence_numbers_.begin(),
+ missing_sequence_numbers_.upper_bound(last_decoded_sequence_number));
}
int64_t VCMJitterBuffer::LastDecodedTimestamp() const {
@@ -1222,11 +1231,11 @@ void VCMJitterBuffer::CountFrame(const VCMFrameBuffer& frame) {
incoming_frame_count_++;
if (frame.FrameType() == kVideoFrameKey) {
- TRACE_EVENT_ASYNC_STEP0("webrtc", "Video",
- frame.TimeStamp(), "KeyComplete");
+ TRACE_EVENT_ASYNC_STEP0("webrtc", "Video", frame.TimeStamp(),
+ "KeyComplete");
} else {
- TRACE_EVENT_ASYNC_STEP0("webrtc", "Video",
- frame.TimeStamp(), "DeltaComplete");
+ TRACE_EVENT_ASYNC_STEP0("webrtc", "Video", frame.TimeStamp(),
+ "DeltaComplete");
}
// Update receive statistics. We count all layers, thus when you use layers
@@ -1244,13 +1253,13 @@ void VCMJitterBuffer::CountFrame(const VCMFrameBuffer& frame) {
void VCMJitterBuffer::UpdateAveragePacketsPerFrame(int current_number_packets) {
if (frame_counter_ > kFastConvergeThreshold) {
- average_packets_per_frame_ = average_packets_per_frame_
- * (1 - kNormalConvergeMultiplier)
- + current_number_packets * kNormalConvergeMultiplier;
+ average_packets_per_frame_ =
+ average_packets_per_frame_ * (1 - kNormalConvergeMultiplier) +
+ current_number_packets * kNormalConvergeMultiplier;
} else if (frame_counter_ > 0) {
- average_packets_per_frame_ = average_packets_per_frame_
- * (1 - kFastConvergeMultiplier)
- + current_number_packets * kFastConvergeMultiplier;
+ average_packets_per_frame_ =
+ average_packets_per_frame_ * (1 - kFastConvergeMultiplier) +
+ current_number_packets * kFastConvergeMultiplier;
frame_counter_++;
} else {
average_packets_per_frame_ = current_number_packets;
@@ -1272,7 +1281,7 @@ void VCMJitterBuffer::CleanUpOldOrEmptyFrames() {
// Must be called from within |crit_sect_|.
bool VCMJitterBuffer::IsPacketRetransmitted(const VCMPacket& packet) const {
return missing_sequence_numbers_.find(packet.seqNum) !=
- missing_sequence_numbers_.end();
+ missing_sequence_numbers_.end();
}
// Must be called under the critical section |crit_sect_|. Should never be
@@ -1304,18 +1313,16 @@ void VCMJitterBuffer::UpdateJitterEstimate(const VCMFrameBuffer& frame,
// Must be called under the critical section |crit_sect_|. Should never be
// called with retransmitted frames, they must be filtered out before this
// function is called.
-void VCMJitterBuffer::UpdateJitterEstimate(
- int64_t latest_packet_time_ms,
- uint32_t timestamp,
- unsigned int frame_size,
- bool incomplete_frame) {
+void VCMJitterBuffer::UpdateJitterEstimate(int64_t latest_packet_time_ms,
+ uint32_t timestamp,
+ unsigned int frame_size,
+ bool incomplete_frame) {
if (latest_packet_time_ms == -1) {
return;
}
int64_t frame_delay;
- bool not_reordered = inter_frame_delay_.CalculateDelay(timestamp,
- &frame_delay,
- latest_packet_time_ms);
+ bool not_reordered = inter_frame_delay_.CalculateDelay(
+ timestamp, &frame_delay, latest_packet_time_ms);
// Filter out frames which have been reordered in time by the network
if (not_reordered) {
// Update the jitter estimate with the new samples
diff --git a/webrtc/modules/video_coding/main/source/jitter_buffer.h b/webrtc/modules/video_coding/jitter_buffer.h
index f4a3638f7d..01e27752d2 100644
--- a/webrtc/modules/video_coding/main/source/jitter_buffer.h
+++ b/webrtc/modules/video_coding/jitter_buffer.h
@@ -8,8 +8,8 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#ifndef WEBRTC_MODULES_VIDEO_CODING_MAIN_SOURCE_JITTER_BUFFER_H_
-#define WEBRTC_MODULES_VIDEO_CODING_MAIN_SOURCE_JITTER_BUFFER_H_
+#ifndef WEBRTC_MODULES_VIDEO_CODING_JITTER_BUFFER_H_
+#define WEBRTC_MODULES_VIDEO_CODING_JITTER_BUFFER_H_
#include <list>
#include <map>
@@ -18,22 +18,19 @@
#include "webrtc/base/constructormagic.h"
#include "webrtc/base/thread_annotations.h"
-#include "webrtc/modules/interface/module_common_types.h"
-#include "webrtc/modules/video_coding/main/interface/video_coding.h"
-#include "webrtc/modules/video_coding/main/interface/video_coding_defines.h"
-#include "webrtc/modules/video_coding/main/source/decoding_state.h"
-#include "webrtc/modules/video_coding/main/source/inter_frame_delay.h"
-#include "webrtc/modules/video_coding/main/source/jitter_buffer_common.h"
-#include "webrtc/modules/video_coding/main/source/jitter_estimator.h"
+#include "webrtc/modules/include/module_common_types.h"
+#include "webrtc/modules/video_coding/include/video_coding.h"
+#include "webrtc/modules/video_coding/include/video_coding_defines.h"
+#include "webrtc/modules/video_coding/decoding_state.h"
+#include "webrtc/modules/video_coding/inter_frame_delay.h"
+#include "webrtc/modules/video_coding/jitter_buffer_common.h"
+#include "webrtc/modules/video_coding/jitter_estimator.h"
#include "webrtc/system_wrappers/include/critical_section_wrapper.h"
#include "webrtc/typedefs.h"
namespace webrtc {
-enum VCMNackMode {
- kNack,
- kNoNack
-};
+enum VCMNackMode { kNack, kNoNack };
// forward declarations
class Clock;
@@ -54,8 +51,7 @@ struct VCMJitterSample {
class TimestampLessThan {
public:
- bool operator() (uint32_t timestamp1,
- uint32_t timestamp2) const {
+ bool operator()(uint32_t timestamp1, uint32_t timestamp2) const {
return IsNewerTimestamp(timestamp2, timestamp1);
}
};
@@ -68,7 +64,7 @@ class FrameList
VCMFrameBuffer* Front() const;
VCMFrameBuffer* Back() const;
int RecycleFramesUntilKeyFrame(FrameList::iterator* key_frame_it,
- UnorderedFrameList* free_frames);
+ UnorderedFrameList* free_frames);
void CleanUpOldOrEmptyFrames(VCMDecodingState* decoding_state,
UnorderedFrameList* free_frames);
void Reset(UnorderedFrameList* free_frames);
@@ -141,8 +137,7 @@ class VCMJitterBuffer {
int num_discarded_packets() const;
// Statistics, Calculate frame and bit rates.
- void IncomingRateStatistics(unsigned int* framerate,
- unsigned int* bitrate);
+ void IncomingRateStatistics(unsigned int* framerate, unsigned int* bitrate);
// Checks if the packet sequence will be complete if the next frame would be
// grabbed for decoding. That is, if a frame has been lost between the
@@ -177,8 +172,7 @@ class VCMJitterBuffer {
// Inserts a packet into a frame returned from GetFrame().
// If the return value is <= 0, |frame| is invalidated and the pointer must
// be dropped after this function returns.
- VCMFrameBufferEnum InsertPacket(const VCMPacket& packet,
- bool* retransmitted);
+ VCMFrameBufferEnum InsertPacket(const VCMPacket& packet, bool* retransmitted);
// Returns the estimated jitter in milliseconds.
uint32_t EstimatedJitterMs();
@@ -192,7 +186,8 @@ class VCMJitterBuffer {
// |low_rtt_nack_threshold_ms| is an RTT threshold in ms below which we expect
// to rely on NACK only, and therefore are using larger buffers to have time
// to wait for retransmissions.
- void SetNackMode(VCMNackMode mode, int64_t low_rtt_nack_threshold_ms,
+ void SetNackMode(VCMNackMode mode,
+ int64_t low_rtt_nack_threshold_ms,
int64_t high_rtt_nack_threshold_ms);
void SetNackSettings(size_t max_nack_list_size,
@@ -209,7 +204,7 @@ class VCMJitterBuffer {
// session. Changes will not influence frames already in the buffer.
void SetDecodeErrorMode(VCMDecodeErrorMode error_mode);
int64_t LastDecodedTimestamp() const;
- VCMDecodeErrorMode decode_error_mode() const {return decode_error_mode_;}
+ VCMDecodeErrorMode decode_error_mode() const { return decode_error_mode_; }
// Used to compute time of complete continuous frames. Returns the timestamps
// corresponding to the start and end of the continuous complete buffer.
@@ -220,8 +215,8 @@ class VCMJitterBuffer {
private:
class SequenceNumberLessThan {
public:
- bool operator() (const uint16_t& sequence_number1,
- const uint16_t& sequence_number2) const {
+ bool operator()(const uint16_t& sequence_number1,
+ const uint16_t& sequence_number2) const {
return IsNewerSequenceNumber(sequence_number2, sequence_number1);
}
};
@@ -338,8 +333,6 @@ class VCMJitterBuffer {
FrameList incomplete_frames_ GUARDED_BY(crit_sect_);
VCMDecodingState last_decoded_state_ GUARDED_BY(crit_sect_);
bool first_packet_since_reset_;
- // Contains scalability structure data for VP9.
- Vp9SsMap vp9_ss_map_ GUARDED_BY(crit_sect_);
// Statistics.
VCMReceiveStatisticsCallback* stats_callback_ GUARDED_BY(crit_sect_);
@@ -393,4 +386,4 @@ class VCMJitterBuffer {
};
} // namespace webrtc
-#endif // WEBRTC_MODULES_VIDEO_CODING_MAIN_SOURCE_JITTER_BUFFER_H_
+#endif // WEBRTC_MODULES_VIDEO_CODING_JITTER_BUFFER_H_
diff --git a/webrtc/modules/video_coding/main/source/jitter_buffer_common.h b/webrtc/modules/video_coding/jitter_buffer_common.h
index 97af78087a..65356f1d1b 100644
--- a/webrtc/modules/video_coding/main/source/jitter_buffer_common.h
+++ b/webrtc/modules/video_coding/jitter_buffer_common.h
@@ -19,11 +19,11 @@ namespace webrtc {
static const float kFastConvergeMultiplier = 0.4f;
static const float kNormalConvergeMultiplier = 0.2f;
-enum { kMaxNumberOfFrames = 300 };
-enum { kStartNumberOfFrames = 6 };
-enum { kMaxVideoDelayMs = 10000 };
+enum { kMaxNumberOfFrames = 300 };
+enum { kStartNumberOfFrames = 6 };
+enum { kMaxVideoDelayMs = 10000 };
enum { kPacketsPerFrameMultiplier = 5 };
-enum { kFastConvergeThreshold = 5};
+enum { kFastConvergeThreshold = 5 };
enum VCMJitterBufferEnum {
kMaxConsecutiveOldFrames = 60,
@@ -36,36 +36,36 @@ enum VCMJitterBufferEnum {
};
enum VCMFrameBufferEnum {
- kOutOfBoundsPacket = -7,
- kNotInitialized = -6,
- kOldPacket = -5,
- kGeneralError = -4,
- kFlushIndicator = -3, // Indicator that a flush has occurred.
- kTimeStampError = -2,
- kSizeError = -1,
- kNoError = 0,
- kIncomplete = 1, // Frame incomplete.
- kCompleteSession = 3, // at least one layer in the frame complete.
- kDecodableSession = 4, // Frame incomplete, but ready to be decoded
- kDuplicatePacket = 5 // We're receiving a duplicate packet.
+ kOutOfBoundsPacket = -7,
+ kNotInitialized = -6,
+ kOldPacket = -5,
+ kGeneralError = -4,
+ kFlushIndicator = -3, // Indicator that a flush has occurred.
+ kTimeStampError = -2,
+ kSizeError = -1,
+ kNoError = 0,
+ kIncomplete = 1, // Frame incomplete.
+ kCompleteSession = 3, // at least one layer in the frame complete.
+ kDecodableSession = 4, // Frame incomplete, but ready to be decoded
+ kDuplicatePacket = 5 // We're receiving a duplicate packet.
};
enum VCMFrameBufferStateEnum {
- kStateEmpty, // frame popped by the RTP receiver
- kStateIncomplete, // frame that have one or more packet(s) stored
- kStateComplete, // frame that have all packets
- kStateDecodable // Hybrid mode - frame can be decoded
+ kStateEmpty, // frame popped by the RTP receiver
+ kStateIncomplete, // frame that have one or more packet(s) stored
+ kStateComplete, // frame that have all packets
+ kStateDecodable // Hybrid mode - frame can be decoded
};
-enum { kH264StartCodeLengthBytes = 4};
+enum { kH264StartCodeLengthBytes = 4 };
// Used to indicate if a received packet contain a complete NALU (or equivalent)
enum VCMNaluCompleteness {
- kNaluUnset = 0, // Packet has not been filled.
- kNaluComplete = 1, // Packet can be decoded as is.
- kNaluStart, // Packet contain beginning of NALU
- kNaluIncomplete, // Packet is not beginning or end of NALU
- kNaluEnd, // Packet is the end of a NALU
+ kNaluUnset = 0, // Packet has not been filled.
+ kNaluComplete = 1, // Packet can be decoded as is.
+ kNaluStart, // Packet contain beginning of NALU
+ kNaluIncomplete, // Packet is not beginning or end of NALU
+ kNaluEnd, // Packet is the end of a NALU
};
} // namespace webrtc
diff --git a/webrtc/modules/video_coding/main/source/jitter_buffer_unittest.cc b/webrtc/modules/video_coding/jitter_buffer_unittest.cc
index d6c6d4985b..8abc1b5471 100644
--- a/webrtc/modules/video_coding/main/source/jitter_buffer_unittest.cc
+++ b/webrtc/modules/video_coding/jitter_buffer_unittest.cc
@@ -13,12 +13,12 @@
#include <list>
#include "testing/gtest/include/gtest/gtest.h"
-#include "webrtc/modules/video_coding/main/source/frame_buffer.h"
-#include "webrtc/modules/video_coding/main/source/jitter_buffer.h"
-#include "webrtc/modules/video_coding/main/source/media_opt_util.h"
-#include "webrtc/modules/video_coding/main/source/packet.h"
-#include "webrtc/modules/video_coding/main/source/test/stream_generator.h"
-#include "webrtc/modules/video_coding/main/test/test_util.h"
+#include "webrtc/modules/video_coding/frame_buffer.h"
+#include "webrtc/modules/video_coding/jitter_buffer.h"
+#include "webrtc/modules/video_coding/media_opt_util.h"
+#include "webrtc/modules/video_coding/packet.h"
+#include "webrtc/modules/video_coding/test/stream_generator.h"
+#include "webrtc/modules/video_coding/test/test_util.h"
#include "webrtc/system_wrappers/include/clock.h"
#include "webrtc/system_wrappers/include/metrics.h"
#include "webrtc/test/histogram.h"
@@ -26,13 +26,12 @@
namespace webrtc {
namespace {
- const uint32_t kProcessIntervalSec = 60;
+const uint32_t kProcessIntervalSec = 60;
} // namespace
class Vp9SsMapTest : public ::testing::Test {
protected:
- Vp9SsMapTest()
- : packet_(data_, 1400, 1234, 1, true) {}
+ Vp9SsMapTest() : packet_(data_, 1400, 1234, 1, true) {}
virtual void SetUp() {
packet_.isFirstPacket = true;
@@ -234,8 +233,8 @@ class TestBasicJitterBuffer : public ::testing::Test {
}
void CheckOutFrame(VCMEncodedFrame* frame_out,
- unsigned int size,
- bool startCode) {
+ unsigned int size,
+ bool startCode) {
ASSERT_TRUE(frame_out);
const uint8_t* outData = frame_out->Buffer();
@@ -280,7 +279,6 @@ class TestBasicJitterBuffer : public ::testing::Test {
rtc::scoped_ptr<VCMJitterBuffer> jitter_buffer_;
};
-
class TestRunningJitterBuffer : public ::testing::Test {
protected:
enum { kDataBufferSize = 10 };
@@ -294,8 +292,8 @@ class TestRunningJitterBuffer : public ::testing::Test {
rtc::scoped_ptr<EventWrapper>(event_factory_.CreateEvent()));
stream_generator_ = new StreamGenerator(0, clock_->TimeInMilliseconds());
jitter_buffer_->Start();
- jitter_buffer_->SetNackSettings(max_nack_list_size_,
- oldest_packet_to_nack_, 0);
+ jitter_buffer_->SetNackSettings(max_nack_list_size_, oldest_packet_to_nack_,
+ 0);
memset(data_buffer_, 0, kDataBufferSize);
}
@@ -396,9 +394,7 @@ class TestJitterBufferNack : public TestRunningJitterBuffer {
jitter_buffer_->SetNackMode(kNack, -1, -1);
}
- virtual void TearDown() {
- TestRunningJitterBuffer::TearDown();
- }
+ virtual void TearDown() { TestRunningJitterBuffer::TearDown(); }
};
TEST_F(TestBasicJitterBuffer, StopRunning) {
@@ -431,8 +427,8 @@ TEST_F(TestBasicJitterBuffer, SinglePacketFrame) {
// Insert the packet to the jitter buffer and get a frame.
bool retransmitted = false;
- EXPECT_EQ(kCompleteSession, jitter_buffer_->InsertPacket(*packet_,
- &retransmitted));
+ EXPECT_EQ(kCompleteSession,
+ jitter_buffer_->InsertPacket(*packet_, &retransmitted));
VCMEncodedFrame* frame_out = DecodeCompleteFrame();
CheckOutFrame(frame_out, size_, false);
EXPECT_EQ(kVideoFrameKey, frame_out->FrameType());
@@ -450,8 +446,8 @@ TEST_F(TestBasicJitterBuffer, VerifyHistogramStats) {
// Insert single packet frame to the jitter buffer and get a frame.
bool retransmitted = false;
- EXPECT_EQ(kCompleteSession, jitter_buffer_->InsertPacket(*packet_,
- &retransmitted));
+ EXPECT_EQ(kCompleteSession,
+ jitter_buffer_->InsertPacket(*packet_, &retransmitted));
VCMEncodedFrame* frame_out = DecodeCompleteFrame();
CheckOutFrame(frame_out, size_, false);
EXPECT_EQ(kVideoFrameKey, frame_out->FrameType());
@@ -460,25 +456,25 @@ TEST_F(TestBasicJitterBuffer, VerifyHistogramStats) {
// Verify that histograms are updated when the jitter buffer is stopped.
clock_->AdvanceTimeMilliseconds(metrics::kMinRunTimeInSeconds * 1000);
jitter_buffer_->Stop();
- EXPECT_EQ(0, test::LastHistogramSample(
- "WebRTC.Video.DiscardedPacketsInPercent"));
- EXPECT_EQ(0, test::LastHistogramSample(
- "WebRTC.Video.DuplicatedPacketsInPercent"));
+ EXPECT_EQ(
+ 0, test::LastHistogramSample("WebRTC.Video.DiscardedPacketsInPercent"));
+ EXPECT_EQ(
+ 0, test::LastHistogramSample("WebRTC.Video.DuplicatedPacketsInPercent"));
EXPECT_NE(-1, test::LastHistogramSample(
- "WebRTC.Video.CompleteFramesReceivedPerSecond"));
+ "WebRTC.Video.CompleteFramesReceivedPerSecond"));
EXPECT_EQ(1000, test::LastHistogramSample(
- "WebRTC.Video.KeyFramesReceivedInPermille"));
+ "WebRTC.Video.KeyFramesReceivedInPermille"));
// Verify that histograms are not updated if stop is called again.
jitter_buffer_->Stop();
+ EXPECT_EQ(
+ 1, test::NumHistogramSamples("WebRTC.Video.DiscardedPacketsInPercent"));
+ EXPECT_EQ(
+ 1, test::NumHistogramSamples("WebRTC.Video.DuplicatedPacketsInPercent"));
EXPECT_EQ(1, test::NumHistogramSamples(
- "WebRTC.Video.DiscardedPacketsInPercent"));
- EXPECT_EQ(1, test::NumHistogramSamples(
- "WebRTC.Video.DuplicatedPacketsInPercent"));
- EXPECT_EQ(1, test::NumHistogramSamples(
- "WebRTC.Video.CompleteFramesReceivedPerSecond"));
- EXPECT_EQ(1, test::NumHistogramSamples(
- "WebRTC.Video.KeyFramesReceivedInPermille"));
+ "WebRTC.Video.CompleteFramesReceivedPerSecond"));
+ EXPECT_EQ(
+ 1, test::NumHistogramSamples("WebRTC.Video.KeyFramesReceivedInPermille"));
}
TEST_F(TestBasicJitterBuffer, DualPacketFrame) {
@@ -487,8 +483,8 @@ TEST_F(TestBasicJitterBuffer, DualPacketFrame) {
packet_->markerBit = false;
bool retransmitted = false;
- EXPECT_EQ(kIncomplete, jitter_buffer_->InsertPacket(*packet_,
- &retransmitted));
+ EXPECT_EQ(kIncomplete,
+ jitter_buffer_->InsertPacket(*packet_, &retransmitted));
VCMEncodedFrame* frame_out = DecodeCompleteFrame();
// Should not be complete.
EXPECT_TRUE(frame_out == NULL);
@@ -498,8 +494,8 @@ TEST_F(TestBasicJitterBuffer, DualPacketFrame) {
packet_->markerBit = true;
packet_->seqNum = seq_num_;
- EXPECT_EQ(kCompleteSession, jitter_buffer_->InsertPacket(*packet_,
- &retransmitted));
+ EXPECT_EQ(kCompleteSession,
+ jitter_buffer_->InsertPacket(*packet_, &retransmitted));
frame_out = DecodeCompleteFrame();
CheckOutFrame(frame_out, 2 * size_, false);
@@ -514,8 +510,8 @@ TEST_F(TestBasicJitterBuffer, 100PacketKeyFrame) {
packet_->markerBit = false;
bool retransmitted = false;
- EXPECT_EQ(kIncomplete, jitter_buffer_->InsertPacket(*packet_,
- &retransmitted));
+ EXPECT_EQ(kIncomplete,
+ jitter_buffer_->InsertPacket(*packet_, &retransmitted));
VCMEncodedFrame* frame_out = DecodeCompleteFrame();
@@ -530,8 +526,8 @@ TEST_F(TestBasicJitterBuffer, 100PacketKeyFrame) {
packet_->markerBit = false;
packet_->seqNum = seq_num_;
- EXPECT_EQ(kIncomplete, jitter_buffer_->InsertPacket(*packet_,
- &retransmitted));
+ EXPECT_EQ(kIncomplete,
+ jitter_buffer_->InsertPacket(*packet_, &retransmitted));
loop++;
} while (loop < 98);
@@ -541,8 +537,8 @@ TEST_F(TestBasicJitterBuffer, 100PacketKeyFrame) {
packet_->markerBit = true;
packet_->seqNum = seq_num_;
- EXPECT_EQ(kCompleteSession, jitter_buffer_->InsertPacket(*packet_,
- &retransmitted));
+ EXPECT_EQ(kCompleteSession,
+ jitter_buffer_->InsertPacket(*packet_, &retransmitted));
frame_out = DecodeCompleteFrame();
@@ -558,8 +554,8 @@ TEST_F(TestBasicJitterBuffer, 100PacketDeltaFrame) {
packet_->markerBit = true;
bool retransmitted = false;
- EXPECT_EQ(kCompleteSession, jitter_buffer_->InsertPacket(*packet_,
- &retransmitted));
+ EXPECT_EQ(kCompleteSession,
+ jitter_buffer_->InsertPacket(*packet_, &retransmitted));
VCMEncodedFrame* frame_out = DecodeCompleteFrame();
EXPECT_FALSE(frame_out == NULL);
jitter_buffer_->ReleaseFrame(frame_out);
@@ -570,8 +566,8 @@ TEST_F(TestBasicJitterBuffer, 100PacketDeltaFrame) {
packet_->frameType = kVideoFrameDelta;
packet_->timestamp += 33 * 90;
- EXPECT_EQ(kIncomplete, jitter_buffer_->InsertPacket(*packet_,
- &retransmitted));
+ EXPECT_EQ(kIncomplete,
+ jitter_buffer_->InsertPacket(*packet_, &retransmitted));
frame_out = DecodeCompleteFrame();
@@ -586,8 +582,8 @@ TEST_F(TestBasicJitterBuffer, 100PacketDeltaFrame) {
packet_->seqNum = seq_num_;
// Insert a packet into a frame.
- EXPECT_EQ(kIncomplete, jitter_buffer_->InsertPacket(*packet_,
- &retransmitted));
+ EXPECT_EQ(kIncomplete,
+ jitter_buffer_->InsertPacket(*packet_, &retransmitted));
loop++;
} while (loop < 98);
@@ -597,8 +593,8 @@ TEST_F(TestBasicJitterBuffer, 100PacketDeltaFrame) {
packet_->markerBit = true;
packet_->seqNum = seq_num_;
- EXPECT_EQ(kCompleteSession, jitter_buffer_->InsertPacket(*packet_,
- &retransmitted));
+ EXPECT_EQ(kCompleteSession,
+ jitter_buffer_->InsertPacket(*packet_, &retransmitted));
frame_out = DecodeCompleteFrame();
@@ -617,8 +613,8 @@ TEST_F(TestBasicJitterBuffer, PacketReorderingReverseOrder) {
packet_->timestamp = timestamp_;
bool retransmitted = false;
- EXPECT_EQ(kIncomplete, jitter_buffer_->InsertPacket(*packet_,
- &retransmitted));
+ EXPECT_EQ(kIncomplete,
+ jitter_buffer_->InsertPacket(*packet_, &retransmitted));
VCMEncodedFrame* frame_out = DecodeCompleteFrame();
@@ -632,8 +628,8 @@ TEST_F(TestBasicJitterBuffer, PacketReorderingReverseOrder) {
packet_->markerBit = false;
packet_->seqNum = seq_num_;
- EXPECT_EQ(kIncomplete, jitter_buffer_->InsertPacket(*packet_,
- &retransmitted));
+ EXPECT_EQ(kIncomplete,
+ jitter_buffer_->InsertPacket(*packet_, &retransmitted));
loop++;
} while (loop < 98);
@@ -643,10 +639,10 @@ TEST_F(TestBasicJitterBuffer, PacketReorderingReverseOrder) {
packet_->markerBit = false;
packet_->seqNum = seq_num_;
- EXPECT_EQ(kCompleteSession, jitter_buffer_->InsertPacket(*packet_,
- &retransmitted));
+ EXPECT_EQ(kCompleteSession,
+ jitter_buffer_->InsertPacket(*packet_, &retransmitted));
- frame_out = DecodeCompleteFrame();;
+ frame_out = DecodeCompleteFrame();
CheckOutFrame(frame_out, 100 * size_, false);
@@ -660,8 +656,8 @@ TEST_F(TestBasicJitterBuffer, FrameReordering2Frames2PacketsEach) {
packet_->markerBit = false;
bool retransmitted = false;
- EXPECT_EQ(kIncomplete, jitter_buffer_->InsertPacket(*packet_,
- &retransmitted));
+ EXPECT_EQ(kIncomplete,
+ jitter_buffer_->InsertPacket(*packet_, &retransmitted));
VCMEncodedFrame* frame_out = DecodeCompleteFrame();
@@ -672,23 +668,23 @@ TEST_F(TestBasicJitterBuffer, FrameReordering2Frames2PacketsEach) {
packet_->markerBit = true;
packet_->seqNum = seq_num_;
- EXPECT_EQ(kCompleteSession, jitter_buffer_->InsertPacket(*packet_,
- &retransmitted));
+ EXPECT_EQ(kCompleteSession,
+ jitter_buffer_->InsertPacket(*packet_, &retransmitted));
// check that we fail to get frame since seqnum is not continuous
frame_out = DecodeCompleteFrame();
EXPECT_TRUE(frame_out == NULL);
seq_num_ -= 3;
- timestamp_ -= 33*90;
+ timestamp_ -= 33 * 90;
packet_->frameType = kVideoFrameKey;
packet_->isFirstPacket = true;
packet_->markerBit = false;
packet_->seqNum = seq_num_;
packet_->timestamp = timestamp_;
- EXPECT_EQ(kIncomplete, jitter_buffer_->InsertPacket(*packet_,
- &retransmitted));
+ EXPECT_EQ(kIncomplete,
+ jitter_buffer_->InsertPacket(*packet_, &retransmitted));
frame_out = DecodeCompleteFrame();
@@ -700,8 +696,8 @@ TEST_F(TestBasicJitterBuffer, FrameReordering2Frames2PacketsEach) {
packet_->markerBit = true;
packet_->seqNum = seq_num_;
- EXPECT_EQ(kCompleteSession, jitter_buffer_->InsertPacket(*packet_,
- &retransmitted));
+ EXPECT_EQ(kCompleteSession,
+ jitter_buffer_->InsertPacket(*packet_, &retransmitted));
frame_out = DecodeCompleteFrame();
CheckOutFrame(frame_out, 2 * size_, false);
@@ -781,8 +777,8 @@ TEST_F(TestBasicJitterBuffer, DuplicatePackets) {
EXPECT_EQ(0, jitter_buffer_->num_duplicated_packets());
bool retransmitted = false;
- EXPECT_EQ(kIncomplete, jitter_buffer_->InsertPacket(*packet_,
- &retransmitted));
+ EXPECT_EQ(kIncomplete,
+ jitter_buffer_->InsertPacket(*packet_, &retransmitted));
VCMEncodedFrame* frame_out = DecodeCompleteFrame();
@@ -791,8 +787,8 @@ TEST_F(TestBasicJitterBuffer, DuplicatePackets) {
EXPECT_EQ(0, jitter_buffer_->num_duplicated_packets());
// Insert a packet into a frame.
- EXPECT_EQ(kDuplicatePacket, jitter_buffer_->InsertPacket(*packet_,
- &retransmitted));
+ EXPECT_EQ(kDuplicatePacket,
+ jitter_buffer_->InsertPacket(*packet_, &retransmitted));
EXPECT_EQ(2, jitter_buffer_->num_packets());
EXPECT_EQ(1, jitter_buffer_->num_duplicated_packets());
@@ -801,8 +797,8 @@ TEST_F(TestBasicJitterBuffer, DuplicatePackets) {
packet_->markerBit = true;
packet_->isFirstPacket = false;
- EXPECT_EQ(kCompleteSession, jitter_buffer_->InsertPacket(*packet_,
- &retransmitted));
+ EXPECT_EQ(kCompleteSession,
+ jitter_buffer_->InsertPacket(*packet_, &retransmitted));
frame_out = DecodeCompleteFrame();
ASSERT_TRUE(frame_out != NULL);
@@ -885,7 +881,6 @@ TEST_F(TestBasicJitterBuffer, TestSkipForwardVp9) {
packet_->codecSpecificHeader.codecHeader.VP9.spatial_idx = 0;
packet_->codecSpecificHeader.codecHeader.VP9.beginning_of_frame = true;
packet_->codecSpecificHeader.codecHeader.VP9.end_of_frame = true;
- packet_->codecSpecificHeader.codecHeader.VP9.temporal_idx = kNoTemporalIdx;
packet_->codecSpecificHeader.codecHeader.VP9.temporal_up_switch = false;
packet_->seqNum = 65485;
@@ -893,7 +888,7 @@ TEST_F(TestBasicJitterBuffer, TestSkipForwardVp9) {
packet_->frameType = kVideoFrameKey;
packet_->codecSpecificHeader.codecHeader.VP9.picture_id = 5;
packet_->codecSpecificHeader.codecHeader.VP9.tl0_pic_idx = 200;
- packet_->codecSpecificHeader.codecHeader.VP9.gof_idx = 0;
+ packet_->codecSpecificHeader.codecHeader.VP9.temporal_idx = 0;
packet_->codecSpecificHeader.codecHeader.VP9.ss_data_available = true;
packet_->codecSpecificHeader.codecHeader.VP9.gof.SetGofInfoVP9(
kTemporalStructureMode3); // kTemporalStructureMode3: 0-2-1-2..
@@ -905,7 +900,7 @@ TEST_F(TestBasicJitterBuffer, TestSkipForwardVp9) {
packet_->frameType = kVideoFrameDelta;
packet_->codecSpecificHeader.codecHeader.VP9.picture_id = 9;
packet_->codecSpecificHeader.codecHeader.VP9.tl0_pic_idx = 201;
- packet_->codecSpecificHeader.codecHeader.VP9.gof_idx = 0;
+ packet_->codecSpecificHeader.codecHeader.VP9.temporal_idx = 0;
packet_->codecSpecificHeader.codecHeader.VP9.ss_data_available = false;
EXPECT_EQ(kCompleteSession, jitter_buffer_->InsertPacket(*packet_, &re));
@@ -939,22 +934,22 @@ TEST_F(TestBasicJitterBuffer, ReorderedVp9SsData_3TlLayers) {
packet_->codecSpecificHeader.codecHeader.VP9.spatial_idx = 0;
packet_->codecSpecificHeader.codecHeader.VP9.beginning_of_frame = true;
packet_->codecSpecificHeader.codecHeader.VP9.end_of_frame = true;
- packet_->codecSpecificHeader.codecHeader.VP9.temporal_idx = kNoTemporalIdx;
- packet_->codecSpecificHeader.codecHeader.VP9.temporal_up_switch = false;
packet_->codecSpecificHeader.codecHeader.VP9.tl0_pic_idx = 200;
packet_->seqNum = 65486;
packet_->timestamp = 6000;
packet_->frameType = kVideoFrameDelta;
packet_->codecSpecificHeader.codecHeader.VP9.picture_id = 6;
- packet_->codecSpecificHeader.codecHeader.VP9.gof_idx = 1;
+ packet_->codecSpecificHeader.codecHeader.VP9.temporal_idx = 2;
+ packet_->codecSpecificHeader.codecHeader.VP9.temporal_up_switch = true;
EXPECT_EQ(kCompleteSession, jitter_buffer_->InsertPacket(*packet_, &re));
packet_->seqNum = 65487;
packet_->timestamp = 9000;
packet_->frameType = kVideoFrameDelta;
packet_->codecSpecificHeader.codecHeader.VP9.picture_id = 7;
- packet_->codecSpecificHeader.codecHeader.VP9.gof_idx = 2;
+ packet_->codecSpecificHeader.codecHeader.VP9.temporal_idx = 1;
+ packet_->codecSpecificHeader.codecHeader.VP9.temporal_up_switch = true;
EXPECT_EQ(kCompleteSession, jitter_buffer_->InsertPacket(*packet_, &re));
// Insert first frame with SS data.
@@ -964,7 +959,8 @@ TEST_F(TestBasicJitterBuffer, ReorderedVp9SsData_3TlLayers) {
packet_->width = 352;
packet_->height = 288;
packet_->codecSpecificHeader.codecHeader.VP9.picture_id = 5;
- packet_->codecSpecificHeader.codecHeader.VP9.gof_idx = 0;
+ packet_->codecSpecificHeader.codecHeader.VP9.temporal_idx = 0;
+ packet_->codecSpecificHeader.codecHeader.VP9.temporal_up_switch = false;
packet_->codecSpecificHeader.codecHeader.VP9.ss_data_available = true;
packet_->codecSpecificHeader.codecHeader.VP9.gof.SetGofInfoVP9(
kTemporalStructureMode3); // kTemporalStructureMode3: 0-2-1-2..
@@ -1011,8 +1007,6 @@ TEST_F(TestBasicJitterBuffer, ReorderedVp9SsData_2Tl2SLayers) {
packet_->codecSpecificHeader.codecHeader.VP9.flexible_mode = false;
packet_->codecSpecificHeader.codecHeader.VP9.beginning_of_frame = true;
packet_->codecSpecificHeader.codecHeader.VP9.end_of_frame = true;
- packet_->codecSpecificHeader.codecHeader.VP9.temporal_idx = kNoTemporalIdx;
- packet_->codecSpecificHeader.codecHeader.VP9.temporal_up_switch = false;
packet_->codecSpecificHeader.codecHeader.VP9.tl0_pic_idx = 200;
packet_->isFirstPacket = true;
@@ -1022,7 +1016,8 @@ TEST_F(TestBasicJitterBuffer, ReorderedVp9SsData_2Tl2SLayers) {
packet_->frameType = kVideoFrameDelta;
packet_->codecSpecificHeader.codecHeader.VP9.spatial_idx = 0;
packet_->codecSpecificHeader.codecHeader.VP9.picture_id = 6;
- packet_->codecSpecificHeader.codecHeader.VP9.gof_idx = 1;
+ packet_->codecSpecificHeader.codecHeader.VP9.temporal_idx = 1;
+ packet_->codecSpecificHeader.codecHeader.VP9.temporal_up_switch = true;
EXPECT_EQ(kIncomplete, jitter_buffer_->InsertPacket(*packet_, &re));
packet_->isFirstPacket = false;
@@ -1031,7 +1026,8 @@ TEST_F(TestBasicJitterBuffer, ReorderedVp9SsData_2Tl2SLayers) {
packet_->frameType = kVideoFrameDelta;
packet_->codecSpecificHeader.codecHeader.VP9.spatial_idx = 1;
packet_->codecSpecificHeader.codecHeader.VP9.picture_id = 6;
- packet_->codecSpecificHeader.codecHeader.VP9.gof_idx = 1;
+ packet_->codecSpecificHeader.codecHeader.VP9.temporal_idx = 1;
+ packet_->codecSpecificHeader.codecHeader.VP9.temporal_up_switch = true;
EXPECT_EQ(kCompleteSession, jitter_buffer_->InsertPacket(*packet_, &re));
packet_->isFirstPacket = false;
@@ -1041,7 +1037,8 @@ TEST_F(TestBasicJitterBuffer, ReorderedVp9SsData_2Tl2SLayers) {
packet_->frameType = kVideoFrameKey;
packet_->codecSpecificHeader.codecHeader.VP9.spatial_idx = 1;
packet_->codecSpecificHeader.codecHeader.VP9.picture_id = 5;
- packet_->codecSpecificHeader.codecHeader.VP9.gof_idx = 0;
+ packet_->codecSpecificHeader.codecHeader.VP9.temporal_idx = 0;
+ packet_->codecSpecificHeader.codecHeader.VP9.temporal_up_switch = false;
EXPECT_EQ(kIncomplete, jitter_buffer_->InsertPacket(*packet_, &re));
// Insert first frame with SS data.
@@ -1053,7 +1050,8 @@ TEST_F(TestBasicJitterBuffer, ReorderedVp9SsData_2Tl2SLayers) {
packet_->height = 288;
packet_->codecSpecificHeader.codecHeader.VP9.spatial_idx = 0;
packet_->codecSpecificHeader.codecHeader.VP9.picture_id = 5;
- packet_->codecSpecificHeader.codecHeader.VP9.gof_idx = 0;
+ packet_->codecSpecificHeader.codecHeader.VP9.temporal_idx = 0;
+ packet_->codecSpecificHeader.codecHeader.VP9.temporal_up_switch = false;
packet_->codecSpecificHeader.codecHeader.VP9.ss_data_available = true;
packet_->codecSpecificHeader.codecHeader.VP9.gof.SetGofInfoVP9(
kTemporalStructureMode2); // kTemporalStructureMode3: 0-1-0-1..
@@ -1084,8 +1082,8 @@ TEST_F(TestBasicJitterBuffer, H264InsertStartCode) {
packet_->insertStartCode = true;
bool retransmitted = false;
- EXPECT_EQ(kIncomplete, jitter_buffer_->InsertPacket(*packet_,
- &retransmitted));
+ EXPECT_EQ(kIncomplete,
+ jitter_buffer_->InsertPacket(*packet_, &retransmitted));
VCMEncodedFrame* frame_out = DecodeCompleteFrame();
@@ -1097,8 +1095,8 @@ TEST_F(TestBasicJitterBuffer, H264InsertStartCode) {
packet_->markerBit = true;
packet_->seqNum = seq_num_;
- EXPECT_EQ(kCompleteSession, jitter_buffer_->InsertPacket(*packet_,
- &retransmitted));
+ EXPECT_EQ(kCompleteSession,
+ jitter_buffer_->InsertPacket(*packet_, &retransmitted));
frame_out = DecodeCompleteFrame();
CheckOutFrame(frame_out, size_ * 2 + 4 * 2, true);
@@ -1118,8 +1116,8 @@ TEST_F(TestBasicJitterBuffer, PacketLossWithSelectiveErrorsThresholdCheck) {
packet_->timestamp = timestamp_;
bool retransmitted = false;
- EXPECT_EQ(kIncomplete, jitter_buffer_->InsertPacket(*packet_,
- &retransmitted));
+ EXPECT_EQ(kIncomplete,
+ jitter_buffer_->InsertPacket(*packet_, &retransmitted));
uint32_t timestamp = 0;
EXPECT_FALSE(jitter_buffer_->NextCompleteTimestamp(0, &timestamp));
EXPECT_FALSE(jitter_buffer_->NextMaybeIncompleteTimestamp(&timestamp));
@@ -1127,8 +1125,8 @@ TEST_F(TestBasicJitterBuffer, PacketLossWithSelectiveErrorsThresholdCheck) {
packet_->isFirstPacket = false;
for (int i = 1; i < 9; ++i) {
packet_->seqNum++;
- EXPECT_EQ(kIncomplete, jitter_buffer_->InsertPacket(*packet_,
- &retransmitted));
+ EXPECT_EQ(kIncomplete,
+ jitter_buffer_->InsertPacket(*packet_, &retransmitted));
EXPECT_FALSE(jitter_buffer_->NextCompleteTimestamp(0, &timestamp));
EXPECT_FALSE(jitter_buffer_->NextMaybeIncompleteTimestamp(&timestamp));
}
@@ -1137,8 +1135,8 @@ TEST_F(TestBasicJitterBuffer, PacketLossWithSelectiveErrorsThresholdCheck) {
packet_->markerBit = true;
packet_->seqNum++;
- EXPECT_EQ(kCompleteSession, jitter_buffer_->InsertPacket(*packet_,
- &retransmitted));
+ EXPECT_EQ(kCompleteSession,
+ jitter_buffer_->InsertPacket(*packet_, &retransmitted));
VCMEncodedFrame* frame_out = DecodeCompleteFrame();
CheckOutFrame(frame_out, 10 * size_, false);
EXPECT_EQ(kVideoFrameKey, frame_out->FrameType());
@@ -1152,8 +1150,8 @@ TEST_F(TestBasicJitterBuffer, PacketLossWithSelectiveErrorsThresholdCheck) {
packet_->seqNum += 100;
packet_->timestamp += 33 * 90 * 8;
- EXPECT_EQ(kDecodableSession, jitter_buffer_->InsertPacket(*packet_,
- &retransmitted));
+ EXPECT_EQ(kDecodableSession,
+ jitter_buffer_->InsertPacket(*packet_, &retransmitted));
EXPECT_FALSE(jitter_buffer_->NextCompleteTimestamp(0, &timestamp));
EXPECT_FALSE(jitter_buffer_->NextMaybeIncompleteTimestamp(&timestamp));
@@ -1161,23 +1159,23 @@ TEST_F(TestBasicJitterBuffer, PacketLossWithSelectiveErrorsThresholdCheck) {
packet_->seqNum -= 99;
packet_->timestamp -= 33 * 90 * 7;
- EXPECT_EQ(kDecodableSession, jitter_buffer_->InsertPacket(*packet_,
- &retransmitted));
+ EXPECT_EQ(kDecodableSession,
+ jitter_buffer_->InsertPacket(*packet_, &retransmitted));
EXPECT_FALSE(jitter_buffer_->NextCompleteTimestamp(0, &timestamp));
EXPECT_TRUE(jitter_buffer_->NextMaybeIncompleteTimestamp(&timestamp));
packet_->isFirstPacket = false;
for (int i = 1; i < 8; ++i) {
packet_->seqNum++;
- EXPECT_EQ(kDecodableSession, jitter_buffer_->InsertPacket(*packet_,
- &retransmitted));
+ EXPECT_EQ(kDecodableSession,
+ jitter_buffer_->InsertPacket(*packet_, &retransmitted));
EXPECT_FALSE(jitter_buffer_->NextCompleteTimestamp(0, &timestamp));
EXPECT_TRUE(jitter_buffer_->NextMaybeIncompleteTimestamp(&timestamp));
}
packet_->seqNum++;
- EXPECT_EQ(kDecodableSession, jitter_buffer_->InsertPacket(*packet_,
- &retransmitted));
+ EXPECT_EQ(kDecodableSession,
+ jitter_buffer_->InsertPacket(*packet_, &retransmitted));
EXPECT_FALSE(jitter_buffer_->NextCompleteTimestamp(0, &timestamp));
EXPECT_TRUE(jitter_buffer_->NextMaybeIncompleteTimestamp(&timestamp));
@@ -1189,8 +1187,7 @@ TEST_F(TestBasicJitterBuffer, PacketLossWithSelectiveErrorsThresholdCheck) {
packet_->markerBit = true;
packet_->seqNum++;
- EXPECT_EQ(kOldPacket, jitter_buffer_->InsertPacket(*packet_,
- &retransmitted));
+ EXPECT_EQ(kOldPacket, jitter_buffer_->InsertPacket(*packet_, &retransmitted));
}
// Make sure first packet is present before a frame can be decoded.
@@ -1204,8 +1201,8 @@ TEST_F(TestBasicJitterBuffer, PacketLossWithSelectiveErrorsIncompleteKey) {
packet_->timestamp = timestamp_;
bool retransmitted = false;
- EXPECT_EQ(kCompleteSession, jitter_buffer_->InsertPacket(*packet_,
- &retransmitted));
+ EXPECT_EQ(kCompleteSession,
+ jitter_buffer_->InsertPacket(*packet_, &retransmitted));
VCMEncodedFrame* frame_out = DecodeCompleteFrame();
CheckOutFrame(frame_out, size_, false);
EXPECT_EQ(kVideoFrameKey, frame_out->FrameType());
@@ -1217,9 +1214,9 @@ TEST_F(TestBasicJitterBuffer, PacketLossWithSelectiveErrorsIncompleteKey) {
packet_->isFirstPacket = false;
packet_->markerBit = false;
packet_->seqNum += 100;
- packet_->timestamp += 33*90*8;
- EXPECT_EQ(kIncomplete, jitter_buffer_->InsertPacket(*packet_,
- &retransmitted));
+ packet_->timestamp += 33 * 90 * 8;
+ EXPECT_EQ(kIncomplete,
+ jitter_buffer_->InsertPacket(*packet_, &retransmitted));
uint32_t timestamp;
EXPECT_FALSE(jitter_buffer_->NextCompleteTimestamp(0, &timestamp));
EXPECT_FALSE(jitter_buffer_->NextMaybeIncompleteTimestamp(&timestamp));
@@ -1228,10 +1225,10 @@ TEST_F(TestBasicJitterBuffer, PacketLossWithSelectiveErrorsIncompleteKey) {
packet_->frameType = kVideoFrameKey;
packet_->isFirstPacket = true;
packet_->seqNum -= 99;
- packet_->timestamp -= 33*90*7;
+ packet_->timestamp -= 33 * 90 * 7;
- EXPECT_EQ(kIncomplete, jitter_buffer_->InsertPacket(*packet_,
- &retransmitted));
+ EXPECT_EQ(kIncomplete,
+ jitter_buffer_->InsertPacket(*packet_, &retransmitted));
EXPECT_FALSE(jitter_buffer_->NextCompleteTimestamp(0, &timestamp));
EXPECT_FALSE(jitter_buffer_->NextMaybeIncompleteTimestamp(&timestamp));
@@ -1240,8 +1237,8 @@ TEST_F(TestBasicJitterBuffer, PacketLossWithSelectiveErrorsIncompleteKey) {
packet_->isFirstPacket = false;
for (int i = 1; i < 5; ++i) {
packet_->seqNum++;
- EXPECT_EQ(kIncomplete, jitter_buffer_->InsertPacket(*packet_,
- &retransmitted));
+ EXPECT_EQ(kIncomplete,
+ jitter_buffer_->InsertPacket(*packet_, &retransmitted));
EXPECT_FALSE(jitter_buffer_->NextCompleteTimestamp(0, &timestamp));
EXPECT_FALSE(jitter_buffer_->NextMaybeIncompleteTimestamp(&timestamp));
}
@@ -1249,8 +1246,8 @@ TEST_F(TestBasicJitterBuffer, PacketLossWithSelectiveErrorsIncompleteKey) {
// Complete key frame.
packet_->markerBit = true;
packet_->seqNum++;
- EXPECT_EQ(kCompleteSession, jitter_buffer_->InsertPacket(*packet_,
- &retransmitted));
+ EXPECT_EQ(kCompleteSession,
+ jitter_buffer_->InsertPacket(*packet_, &retransmitted));
frame_out = DecodeCompleteFrame();
CheckOutFrame(frame_out, 6 * size_, false);
EXPECT_EQ(kVideoFrameKey, frame_out->FrameType());
@@ -1268,8 +1265,8 @@ TEST_F(TestBasicJitterBuffer, PacketLossWithSelectiveErrorsMissingFirstPacket) {
packet_->timestamp = timestamp_;
bool retransmitted = false;
- EXPECT_EQ(kCompleteSession, jitter_buffer_->InsertPacket(*packet_,
- &retransmitted));
+ EXPECT_EQ(kCompleteSession,
+ jitter_buffer_->InsertPacket(*packet_, &retransmitted));
VCMEncodedFrame* frame_out = DecodeCompleteFrame();
CheckOutFrame(frame_out, size_, false);
EXPECT_EQ(kVideoFrameKey, frame_out->FrameType());
@@ -1281,9 +1278,9 @@ TEST_F(TestBasicJitterBuffer, PacketLossWithSelectiveErrorsMissingFirstPacket) {
packet_->isFirstPacket = false;
packet_->markerBit = false;
packet_->seqNum += 100;
- packet_->timestamp += 33*90*8;
- EXPECT_EQ(kIncomplete, jitter_buffer_->InsertPacket(*packet_,
- &retransmitted));
+ packet_->timestamp += 33 * 90 * 8;
+ EXPECT_EQ(kIncomplete,
+ jitter_buffer_->InsertPacket(*packet_, &retransmitted));
uint32_t timestamp;
EXPECT_FALSE(jitter_buffer_->NextCompleteTimestamp(0, &timestamp));
EXPECT_FALSE(jitter_buffer_->NextMaybeIncompleteTimestamp(&timestamp));
@@ -1291,17 +1288,17 @@ TEST_F(TestBasicJitterBuffer, PacketLossWithSelectiveErrorsMissingFirstPacket) {
// Insert second frame with the first packet missing. Make sure we're waiting
// for the key frame to be complete.
packet_->seqNum -= 98;
- packet_->timestamp -= 33*90*7;
+ packet_->timestamp -= 33 * 90 * 7;
- EXPECT_EQ(kIncomplete, jitter_buffer_->InsertPacket(*packet_,
- &retransmitted));
+ EXPECT_EQ(kIncomplete,
+ jitter_buffer_->InsertPacket(*packet_, &retransmitted));
EXPECT_FALSE(jitter_buffer_->NextCompleteTimestamp(0, &timestamp));
EXPECT_FALSE(jitter_buffer_->NextMaybeIncompleteTimestamp(&timestamp));
for (int i = 0; i < 5; ++i) {
packet_->seqNum++;
- EXPECT_EQ(kIncomplete, jitter_buffer_->InsertPacket(*packet_,
- &retransmitted));
+ EXPECT_EQ(kIncomplete,
+ jitter_buffer_->InsertPacket(*packet_, &retransmitted));
EXPECT_FALSE(jitter_buffer_->NextCompleteTimestamp(0, &timestamp));
EXPECT_FALSE(jitter_buffer_->NextMaybeIncompleteTimestamp(&timestamp));
}
@@ -1309,8 +1306,8 @@ TEST_F(TestBasicJitterBuffer, PacketLossWithSelectiveErrorsMissingFirstPacket) {
// Add first packet. Frame should now be decodable, but incomplete.
packet_->isFirstPacket = true;
packet_->seqNum -= 6;
- EXPECT_EQ(kDecodableSession, jitter_buffer_->InsertPacket(*packet_,
- &retransmitted));
+ EXPECT_EQ(kDecodableSession,
+ jitter_buffer_->InsertPacket(*packet_, &retransmitted));
EXPECT_FALSE(jitter_buffer_->NextCompleteTimestamp(0, &timestamp));
EXPECT_TRUE(jitter_buffer_->NextMaybeIncompleteTimestamp(&timestamp));
@@ -1329,8 +1326,8 @@ TEST_F(TestBasicJitterBuffer, DiscontinuousStreamWhenDecodingWithErrors) {
packet_->seqNum = seq_num_;
packet_->timestamp = timestamp_;
bool retransmitted = false;
- EXPECT_EQ(kCompleteSession, jitter_buffer_->InsertPacket(*packet_,
- &retransmitted));
+ EXPECT_EQ(kCompleteSession,
+ jitter_buffer_->InsertPacket(*packet_, &retransmitted));
uint32_t next_timestamp;
EXPECT_TRUE(jitter_buffer_->NextCompleteTimestamp(0, &next_timestamp));
EXPECT_EQ(packet_->timestamp, next_timestamp);
@@ -1346,8 +1343,8 @@ TEST_F(TestBasicJitterBuffer, DiscontinuousStreamWhenDecodingWithErrors) {
packet_->markerBit = false;
packet_->seqNum = seq_num_;
packet_->timestamp = timestamp_;
- EXPECT_EQ(kDecodableSession, jitter_buffer_->InsertPacket(*packet_,
- &retransmitted));
+ EXPECT_EQ(kDecodableSession,
+ jitter_buffer_->InsertPacket(*packet_, &retransmitted));
// Insert a packet (so the previous one will be released).
timestamp_ += 33 * 90;
seq_num_ += 2;
@@ -1356,8 +1353,8 @@ TEST_F(TestBasicJitterBuffer, DiscontinuousStreamWhenDecodingWithErrors) {
packet_->markerBit = false;
packet_->seqNum = seq_num_;
packet_->timestamp = timestamp_;
- EXPECT_EQ(kDecodableSession, jitter_buffer_->InsertPacket(*packet_,
- &retransmitted));
+ EXPECT_EQ(kDecodableSession,
+ jitter_buffer_->InsertPacket(*packet_, &retransmitted));
EXPECT_FALSE(jitter_buffer_->NextCompleteTimestamp(0, &next_timestamp));
EXPECT_TRUE(jitter_buffer_->NextMaybeIncompleteTimestamp(&next_timestamp));
EXPECT_EQ(packet_->timestamp - 33 * 90, next_timestamp);
@@ -1382,12 +1379,12 @@ TEST_F(TestBasicJitterBuffer, PacketLoss) {
packet_->completeNALU = kNaluStart;
bool retransmitted = false;
- EXPECT_EQ(kDecodableSession, jitter_buffer_->InsertPacket(*packet_,
- &retransmitted));
+ EXPECT_EQ(kDecodableSession,
+ jitter_buffer_->InsertPacket(*packet_, &retransmitted));
for (int i = 0; i < 11; ++i) {
webrtc::FrameType frametype = kVideoFrameDelta;
seq_num_++;
- timestamp_ += 33*90;
+ timestamp_ += 33 * 90;
packet_->frameType = frametype;
packet_->isFirstPacket = true;
packet_->markerBit = false;
@@ -1395,8 +1392,8 @@ TEST_F(TestBasicJitterBuffer, PacketLoss) {
packet_->timestamp = timestamp_;
packet_->completeNALU = kNaluStart;
- EXPECT_EQ(kDecodableSession, jitter_buffer_->InsertPacket(*packet_,
- &retransmitted));
+ EXPECT_EQ(kDecodableSession,
+ jitter_buffer_->InsertPacket(*packet_, &retransmitted));
VCMEncodedFrame* frame_out = DecodeCompleteFrame();
@@ -1430,9 +1427,9 @@ TEST_F(TestBasicJitterBuffer, PacketLoss) {
CheckOutFrame(frame_out, size_, false);
if (i == 0) {
- EXPECT_EQ(kVideoFrameKey, frame_out->FrameType());
+ EXPECT_EQ(kVideoFrameKey, frame_out->FrameType());
} else {
- EXPECT_EQ(frametype, frame_out->FrameType());
+ EXPECT_EQ(frametype, frame_out->FrameType());
}
EXPECT_FALSE(frame_out->Complete());
EXPECT_FALSE(frame_out->MissingFrame());
@@ -1446,18 +1443,15 @@ TEST_F(TestBasicJitterBuffer, PacketLoss) {
timestamp_ -= 33 * 90;
packet_->timestamp = timestamp_ - 1000;
- EXPECT_EQ(kOldPacket, jitter_buffer_->InsertPacket(*packet_,
- &retransmitted));
+ EXPECT_EQ(kOldPacket, jitter_buffer_->InsertPacket(*packet_, &retransmitted));
packet_->timestamp = timestamp_ - 500;
- EXPECT_EQ(kOldPacket, jitter_buffer_->InsertPacket(*packet_,
- &retransmitted));
+ EXPECT_EQ(kOldPacket, jitter_buffer_->InsertPacket(*packet_, &retransmitted));
packet_->timestamp = timestamp_ - 100;
- EXPECT_EQ(kOldPacket, jitter_buffer_->InsertPacket(*packet_,
- &retransmitted));
+ EXPECT_EQ(kOldPacket, jitter_buffer_->InsertPacket(*packet_, &retransmitted));
EXPECT_EQ(3, jitter_buffer_->num_discarded_packets());
@@ -1476,8 +1470,8 @@ TEST_F(TestBasicJitterBuffer, DeltaFrame100PacketsWithSeqNumWrap) {
packet_->timestamp = timestamp_;
bool retransmitted = false;
- EXPECT_EQ(kIncomplete, jitter_buffer_->InsertPacket(*packet_,
- &retransmitted));
+ EXPECT_EQ(kIncomplete,
+ jitter_buffer_->InsertPacket(*packet_, &retransmitted));
VCMEncodedFrame* frame_out = DecodeCompleteFrame();
@@ -1490,8 +1484,8 @@ TEST_F(TestBasicJitterBuffer, DeltaFrame100PacketsWithSeqNumWrap) {
packet_->markerBit = false;
packet_->seqNum = seq_num_;
- EXPECT_EQ(kIncomplete, jitter_buffer_->InsertPacket(*packet_,
- &retransmitted));
+ EXPECT_EQ(kIncomplete,
+ jitter_buffer_->InsertPacket(*packet_, &retransmitted));
frame_out = DecodeCompleteFrame();
@@ -1505,8 +1499,8 @@ TEST_F(TestBasicJitterBuffer, DeltaFrame100PacketsWithSeqNumWrap) {
packet_->markerBit = true;
packet_->seqNum = seq_num_;
- EXPECT_EQ(kCompleteSession, jitter_buffer_->InsertPacket(*packet_,
- &retransmitted));
+ EXPECT_EQ(kCompleteSession,
+ jitter_buffer_->InsertPacket(*packet_, &retransmitted));
frame_out = DecodeCompleteFrame();
@@ -1525,8 +1519,8 @@ TEST_F(TestBasicJitterBuffer, PacketReorderingReverseWithNegSeqNumWrap) {
packet_->seqNum = seq_num_;
bool retransmitted = false;
- EXPECT_EQ(kIncomplete, jitter_buffer_->InsertPacket(*packet_,
- &retransmitted));
+ EXPECT_EQ(kIncomplete,
+ jitter_buffer_->InsertPacket(*packet_, &retransmitted));
VCMEncodedFrame* frame_out = DecodeCompleteFrame();
// Should not be complete.
@@ -1540,8 +1534,8 @@ TEST_F(TestBasicJitterBuffer, PacketReorderingReverseWithNegSeqNumWrap) {
packet_->markerBit = false;
packet_->seqNum = seq_num_;
- EXPECT_EQ(kIncomplete, jitter_buffer_->InsertPacket(*packet_,
- &retransmitted));
+ EXPECT_EQ(kIncomplete,
+ jitter_buffer_->InsertPacket(*packet_, &retransmitted));
frame_out = DecodeCompleteFrame();
@@ -1556,8 +1550,8 @@ TEST_F(TestBasicJitterBuffer, PacketReorderingReverseWithNegSeqNumWrap) {
packet_->markerBit = false;
packet_->seqNum = seq_num_;
- EXPECT_EQ(kCompleteSession, jitter_buffer_->InsertPacket(*packet_,
- &retransmitted));
+ EXPECT_EQ(kCompleteSession,
+ jitter_buffer_->InsertPacket(*packet_, &retransmitted));
frame_out = DecodeCompleteFrame();
CheckOutFrame(frame_out, 100 * size_, false);
@@ -1579,8 +1573,8 @@ TEST_F(TestBasicJitterBuffer, TestInsertOldFrame) {
packet_->seqNum = seq_num_;
bool retransmitted = false;
- EXPECT_EQ(kCompleteSession, jitter_buffer_->InsertPacket(*packet_,
- &retransmitted));
+ EXPECT_EQ(kCompleteSession,
+ jitter_buffer_->InsertPacket(*packet_, &retransmitted));
VCMEncodedFrame* frame_out = DecodeCompleteFrame();
EXPECT_EQ(3000u, frame_out->TimeStamp());
@@ -1596,8 +1590,7 @@ TEST_F(TestBasicJitterBuffer, TestInsertOldFrame) {
packet_->seqNum = seq_num_;
packet_->timestamp = timestamp_;
- EXPECT_EQ(kOldPacket, jitter_buffer_->InsertPacket(*packet_,
- &retransmitted));
+ EXPECT_EQ(kOldPacket, jitter_buffer_->InsertPacket(*packet_, &retransmitted));
}
TEST_F(TestBasicJitterBuffer, TestInsertOldFrameWithSeqNumWrap) {
@@ -1615,8 +1608,8 @@ TEST_F(TestBasicJitterBuffer, TestInsertOldFrameWithSeqNumWrap) {
packet_->timestamp = timestamp_;
bool retransmitted = false;
- EXPECT_EQ(kCompleteSession, jitter_buffer_->InsertPacket(*packet_,
- &retransmitted));
+ EXPECT_EQ(kCompleteSession,
+ jitter_buffer_->InsertPacket(*packet_, &retransmitted));
VCMEncodedFrame* frame_out = DecodeCompleteFrame();
EXPECT_EQ(timestamp_, frame_out->TimeStamp());
@@ -1635,10 +1628,8 @@ TEST_F(TestBasicJitterBuffer, TestInsertOldFrameWithSeqNumWrap) {
packet_->seqNum = seq_num_;
packet_->timestamp = timestamp_;
-
// This timestamp is old.
- EXPECT_EQ(kOldPacket, jitter_buffer_->InsertPacket(*packet_,
- &retransmitted));
+ EXPECT_EQ(kOldPacket, jitter_buffer_->InsertPacket(*packet_, &retransmitted));
}
TEST_F(TestBasicJitterBuffer, TimestampWrap) {
@@ -1655,8 +1646,8 @@ TEST_F(TestBasicJitterBuffer, TimestampWrap) {
packet_->timestamp = timestamp_;
bool retransmitted = false;
- EXPECT_EQ(kIncomplete, jitter_buffer_->InsertPacket(*packet_,
- &retransmitted));
+ EXPECT_EQ(kIncomplete,
+ jitter_buffer_->InsertPacket(*packet_, &retransmitted));
VCMEncodedFrame* frame_out = DecodeCompleteFrame();
EXPECT_TRUE(frame_out == NULL);
@@ -1666,23 +1657,23 @@ TEST_F(TestBasicJitterBuffer, TimestampWrap) {
packet_->markerBit = true;
packet_->seqNum = seq_num_;
- EXPECT_EQ(kCompleteSession, jitter_buffer_->InsertPacket(*packet_,
- &retransmitted));
+ EXPECT_EQ(kCompleteSession,
+ jitter_buffer_->InsertPacket(*packet_, &retransmitted));
frame_out = DecodeCompleteFrame();
CheckOutFrame(frame_out, 2 * size_, false);
jitter_buffer_->ReleaseFrame(frame_out);
seq_num_++;
- timestamp_ += 33*90;
+ timestamp_ += 33 * 90;
packet_->frameType = kVideoFrameDelta;
packet_->isFirstPacket = true;
packet_->markerBit = false;
packet_->seqNum = seq_num_;
packet_->timestamp = timestamp_;
- EXPECT_EQ(kIncomplete, jitter_buffer_->InsertPacket(*packet_,
- &retransmitted));
+ EXPECT_EQ(kIncomplete,
+ jitter_buffer_->InsertPacket(*packet_, &retransmitted));
frame_out = DecodeCompleteFrame();
EXPECT_TRUE(frame_out == NULL);
@@ -1692,8 +1683,8 @@ TEST_F(TestBasicJitterBuffer, TimestampWrap) {
packet_->markerBit = true;
packet_->seqNum = seq_num_;
- EXPECT_EQ(kCompleteSession, jitter_buffer_->InsertPacket(*packet_,
- &retransmitted));
+ EXPECT_EQ(kCompleteSession,
+ jitter_buffer_->InsertPacket(*packet_, &retransmitted));
frame_out = DecodeCompleteFrame();
CheckOutFrame(frame_out, 2 * size_, false);
@@ -1715,8 +1706,8 @@ TEST_F(TestBasicJitterBuffer, 2FrameWithTimestampWrap) {
bool retransmitted = false;
// Insert first frame (session will be complete).
- EXPECT_EQ(kCompleteSession, jitter_buffer_->InsertPacket(*packet_,
- &retransmitted));
+ EXPECT_EQ(kCompleteSession,
+ jitter_buffer_->InsertPacket(*packet_, &retransmitted));
// Insert next frame.
seq_num_++;
@@ -1727,8 +1718,8 @@ TEST_F(TestBasicJitterBuffer, 2FrameWithTimestampWrap) {
packet_->seqNum = seq_num_;
packet_->timestamp = timestamp_;
- EXPECT_EQ(kCompleteSession, jitter_buffer_->InsertPacket(*packet_,
- &retransmitted));
+ EXPECT_EQ(kCompleteSession,
+ jitter_buffer_->InsertPacket(*packet_, &retransmitted));
VCMEncodedFrame* frame_out = DecodeCompleteFrame();
EXPECT_EQ(0xffffff00, frame_out->TimeStamp());
@@ -1758,8 +1749,8 @@ TEST_F(TestBasicJitterBuffer, Insert2FramesReOrderedWithTimestampWrap) {
packet_->timestamp = timestamp_;
bool retransmitted = false;
- EXPECT_EQ(kCompleteSession, jitter_buffer_->InsertPacket(*packet_,
- &retransmitted));
+ EXPECT_EQ(kCompleteSession,
+ jitter_buffer_->InsertPacket(*packet_, &retransmitted));
// Insert second frame
seq_num_--;
@@ -1770,8 +1761,8 @@ TEST_F(TestBasicJitterBuffer, Insert2FramesReOrderedWithTimestampWrap) {
packet_->seqNum = seq_num_;
packet_->timestamp = timestamp_;
- EXPECT_EQ(kCompleteSession, jitter_buffer_->InsertPacket(*packet_,
- &retransmitted));
+ EXPECT_EQ(kCompleteSession,
+ jitter_buffer_->InsertPacket(*packet_, &retransmitted));
VCMEncodedFrame* frame_out = DecodeCompleteFrame();
EXPECT_EQ(0xffffff00, frame_out->TimeStamp());
@@ -1798,12 +1789,12 @@ TEST_F(TestBasicJitterBuffer, DeltaFrameWithMoreThanMaxNumberOfPackets) {
packet_->seqNum = seq_num_;
if (firstPacket) {
- EXPECT_EQ(kIncomplete, jitter_buffer_->InsertPacket(*packet_,
- &retransmitted));
+ EXPECT_EQ(kIncomplete,
+ jitter_buffer_->InsertPacket(*packet_, &retransmitted));
firstPacket = false;
} else {
- EXPECT_EQ(kIncomplete, jitter_buffer_->InsertPacket(*packet_,
- &retransmitted));
+ EXPECT_EQ(kIncomplete,
+ jitter_buffer_->InsertPacket(*packet_, &retransmitted));
}
loop++;
@@ -1817,10 +1808,8 @@ TEST_F(TestBasicJitterBuffer, DeltaFrameWithMoreThanMaxNumberOfPackets) {
packet_->seqNum = seq_num_;
// Insert the packet -> frame recycled.
- EXPECT_EQ(kSizeError, jitter_buffer_->InsertPacket(*packet_,
- &retransmitted));
+ EXPECT_EQ(kSizeError, jitter_buffer_->InsertPacket(*packet_, &retransmitted));
EXPECT_TRUE(NULL == DecodeCompleteFrame());
-
}
TEST_F(TestBasicJitterBuffer, ExceedNumOfFrameWithSeqNumWrap) {
@@ -1832,13 +1821,18 @@ TEST_F(TestBasicJitterBuffer, ExceedNumOfFrameWithSeqNumWrap) {
// --------------------------------------------------------------
// |<-----------delta frames------------->|<------key frames----->|
+ // Make sure the jitter doesn't request a keyframe after too much non-
+ // decodable frames.
+ jitter_buffer_->SetNackMode(kNack, -1, -1);
+ jitter_buffer_->SetNackSettings(kMaxNumberOfFrames, kMaxNumberOfFrames, 0);
+
int loop = 0;
seq_num_ = 65485;
uint32_t first_key_frame_timestamp = 0;
bool retransmitted = false;
// Insert MAX_NUMBER_OF_FRAMES frames.
do {
- timestamp_ += 33*90;
+ timestamp_ += 33 * 90;
seq_num_++;
packet_->isFirstPacket = true;
packet_->markerBit = true;
@@ -1851,8 +1845,8 @@ TEST_F(TestBasicJitterBuffer, ExceedNumOfFrameWithSeqNumWrap) {
}
// Insert frame.
- EXPECT_EQ(kCompleteSession, jitter_buffer_->InsertPacket(*packet_,
- &retransmitted));
+ EXPECT_EQ(kCompleteSession,
+ jitter_buffer_->InsertPacket(*packet_, &retransmitted));
loop++;
} while (loop < kMaxNumberOfFrames);
@@ -1860,7 +1854,7 @@ TEST_F(TestBasicJitterBuffer, ExceedNumOfFrameWithSeqNumWrap) {
// Max number of frames inserted.
// Insert one more frame.
- timestamp_ += 33*90;
+ timestamp_ += 33 * 90;
seq_num_++;
packet_->isFirstPacket = true;
packet_->markerBit = true;
@@ -1894,8 +1888,7 @@ TEST_F(TestBasicJitterBuffer, EmptyLastFrame) {
packet_->timestamp = timestamp_;
packet_->frameType = kEmptyFrame;
- EXPECT_EQ(kNoError, jitter_buffer_->InsertPacket(*packet_,
- &retransmitted));
+ EXPECT_EQ(kNoError, jitter_buffer_->InsertPacket(*packet_, &retransmitted));
VCMEncodedFrame* testFrame = DecodeIncompleteFrame();
// Timestamp should never be the last TS inserted.
if (testFrame != NULL) {
@@ -1919,8 +1912,8 @@ TEST_F(TestBasicJitterBuffer, H264IncompleteNalu) {
packet_->markerBit = false;
bool retransmitted = false;
- EXPECT_EQ(kDecodableSession, jitter_buffer_->InsertPacket(*packet_,
- &retransmitted));
+ EXPECT_EQ(kDecodableSession,
+ jitter_buffer_->InsertPacket(*packet_, &retransmitted));
seq_num_ += 2; // Skip one packet.
packet_->seqNum = seq_num_;
@@ -1929,8 +1922,8 @@ TEST_F(TestBasicJitterBuffer, H264IncompleteNalu) {
packet_->completeNALU = kNaluIncomplete;
packet_->markerBit = false;
- EXPECT_EQ(kDecodableSession, jitter_buffer_->InsertPacket(*packet_,
- &retransmitted));
+ EXPECT_EQ(kDecodableSession,
+ jitter_buffer_->InsertPacket(*packet_, &retransmitted));
seq_num_++;
packet_->seqNum = seq_num_;
@@ -1939,15 +1932,15 @@ TEST_F(TestBasicJitterBuffer, H264IncompleteNalu) {
packet_->completeNALU = kNaluEnd;
packet_->markerBit = false;
- EXPECT_EQ(kDecodableSession, jitter_buffer_->InsertPacket(*packet_,
- &retransmitted));
+ EXPECT_EQ(kDecodableSession,
+ jitter_buffer_->InsertPacket(*packet_, &retransmitted));
seq_num_++;
packet_->seqNum = seq_num_;
packet_->completeNALU = kNaluComplete;
packet_->markerBit = true; // Last packet.
- EXPECT_EQ(kDecodableSession, jitter_buffer_->InsertPacket(*packet_,
- &retransmitted));
+ EXPECT_EQ(kDecodableSession,
+ jitter_buffer_->InsertPacket(*packet_, &retransmitted));
// The JB will only output (incomplete) frames if a packet belonging to a
// subsequent frame was already inserted. Insert one packet of a subsequent
// frame. place high timestamp so the JB would always have a next frame
@@ -1960,8 +1953,8 @@ TEST_F(TestBasicJitterBuffer, H264IncompleteNalu) {
packet_->completeNALU = kNaluStart;
packet_->markerBit = false;
- EXPECT_EQ(kDecodableSession, jitter_buffer_->InsertPacket(*packet_,
- &retransmitted));
+ EXPECT_EQ(kDecodableSession,
+ jitter_buffer_->InsertPacket(*packet_, &retransmitted));
VCMEncodedFrame* frame_out = DecodeIncompleteFrame();
@@ -1973,7 +1966,7 @@ TEST_F(TestBasicJitterBuffer, H264IncompleteNalu) {
// Test reordered start frame + 1 lost.
seq_num_ += 2; // Re-order 1 frame.
- timestamp_ += 33*90;
+ timestamp_ += 33 * 90;
insertedLength = 0;
packet_->seqNum = seq_num_;
@@ -1982,9 +1975,9 @@ TEST_F(TestBasicJitterBuffer, H264IncompleteNalu) {
packet_->isFirstPacket = false;
packet_->completeNALU = kNaluEnd;
packet_->markerBit = false;
- EXPECT_EQ(kDecodableSession, jitter_buffer_->InsertPacket(*packet_,
- &retransmitted));
- insertedLength += packet_->sizeBytes; // This packet should be decoded.
+ EXPECT_EQ(kDecodableSession,
+ jitter_buffer_->InsertPacket(*packet_, &retransmitted));
+ insertedLength += packet_->sizeBytes; // This packet should be decoded.
seq_num_--;
packet_->seqNum = seq_num_;
packet_->timestamp = timestamp_;
@@ -1993,8 +1986,8 @@ TEST_F(TestBasicJitterBuffer, H264IncompleteNalu) {
packet_->completeNALU = kNaluStart;
packet_->markerBit = false;
- EXPECT_EQ(kDecodableSession, jitter_buffer_->InsertPacket(*packet_,
- &retransmitted));
+ EXPECT_EQ(kDecodableSession,
+ jitter_buffer_->InsertPacket(*packet_, &retransmitted));
insertedLength += packet_->sizeBytes; // This packet should be decoded.
seq_num_ += 3; // One packet drop.
@@ -2004,8 +1997,8 @@ TEST_F(TestBasicJitterBuffer, H264IncompleteNalu) {
packet_->isFirstPacket = false;
packet_->completeNALU = kNaluComplete;
packet_->markerBit = false;
- EXPECT_EQ(kDecodableSession, jitter_buffer_->InsertPacket(*packet_,
- &retransmitted));
+ EXPECT_EQ(kDecodableSession,
+ jitter_buffer_->InsertPacket(*packet_, &retransmitted));
insertedLength += packet_->sizeBytes; // This packet should be decoded.
seq_num_++;
packet_->seqNum = seq_num_;
@@ -2014,8 +2007,8 @@ TEST_F(TestBasicJitterBuffer, H264IncompleteNalu) {
packet_->isFirstPacket = false;
packet_->completeNALU = kNaluStart;
packet_->markerBit = false;
- EXPECT_EQ(kDecodableSession, jitter_buffer_->InsertPacket(*packet_,
- &retransmitted));
+ EXPECT_EQ(kDecodableSession,
+ jitter_buffer_->InsertPacket(*packet_, &retransmitted));
// This packet should be decoded since it's the beginning of a NAL.
insertedLength += packet_->sizeBytes;
@@ -2026,8 +2019,8 @@ TEST_F(TestBasicJitterBuffer, H264IncompleteNalu) {
packet_->isFirstPacket = false;
packet_->completeNALU = kNaluEnd;
packet_->markerBit = true;
- EXPECT_EQ(kDecodableSession, jitter_buffer_->InsertPacket(*packet_,
- &retransmitted));
+ EXPECT_EQ(kDecodableSession,
+ jitter_buffer_->InsertPacket(*packet_, &retransmitted));
// This packet should not be decoded because it is an incomplete NAL if it
// is the last.
frame_out = DecodeIncompleteFrame();
@@ -2045,8 +2038,8 @@ TEST_F(TestBasicJitterBuffer, H264IncompleteNalu) {
emptypacket.isFirstPacket = true;
emptypacket.completeNALU = kNaluComplete;
emptypacket.markerBit = true;
- EXPECT_EQ(kCompleteSession, jitter_buffer_->InsertPacket(emptypacket,
- &retransmitted));
+ EXPECT_EQ(kCompleteSession,
+ jitter_buffer_->InsertPacket(emptypacket, &retransmitted));
// This packet should not be decoded because it is an incomplete NAL if it
// is the last.
@@ -2067,8 +2060,8 @@ TEST_F(TestBasicJitterBuffer, H264IncompleteNalu) {
packet_->completeNALU = kNaluComplete;
packet_->markerBit = false;
- EXPECT_EQ(kDecodableSession, jitter_buffer_->InsertPacket(*packet_,
- &retransmitted));
+ EXPECT_EQ(kDecodableSession,
+ jitter_buffer_->InsertPacket(*packet_, &retransmitted));
seq_num_++;
emptypacket.seqNum = seq_num_;
@@ -2077,8 +2070,8 @@ TEST_F(TestBasicJitterBuffer, H264IncompleteNalu) {
emptypacket.isFirstPacket = true;
emptypacket.completeNALU = kNaluComplete;
emptypacket.markerBit = true;
- EXPECT_EQ(kCompleteSession, jitter_buffer_->InsertPacket(emptypacket,
- &retransmitted));
+ EXPECT_EQ(kCompleteSession,
+ jitter_buffer_->InsertPacket(emptypacket, &retransmitted));
frame_out = DecodeCompleteFrame();
// Only last NALU is complete
@@ -2097,8 +2090,8 @@ TEST_F(TestBasicJitterBuffer, NextFrameWhenIncomplete) {
packet_->markerBit = true;
bool retransmitted = false;
- EXPECT_EQ(kCompleteSession, jitter_buffer_->InsertPacket(*packet_,
- &retransmitted));
+ EXPECT_EQ(kCompleteSession,
+ jitter_buffer_->InsertPacket(*packet_, &retransmitted));
VCMEncodedFrame* frame_out = DecodeCompleteFrame();
EXPECT_TRUE(frame_out != NULL);
jitter_buffer_->ReleaseFrame(frame_out);
@@ -2109,9 +2102,8 @@ TEST_F(TestBasicJitterBuffer, NextFrameWhenIncomplete) {
packet_->isFirstPacket = false;
packet_->markerBit = false;
-
- EXPECT_EQ(kDecodableSession, jitter_buffer_->InsertPacket(*packet_,
- &retransmitted));
+ EXPECT_EQ(kDecodableSession,
+ jitter_buffer_->InsertPacket(*packet_, &retransmitted));
frame_out = DecodeIncompleteFrame();
EXPECT_TRUE(frame_out == NULL);
@@ -2120,8 +2112,8 @@ TEST_F(TestBasicJitterBuffer, NextFrameWhenIncomplete) {
packet_->timestamp += 33 * 90;
packet_->isFirstPacket = true;
- EXPECT_EQ(kDecodableSession, jitter_buffer_->InsertPacket(*packet_,
- &retransmitted));
+ EXPECT_EQ(kDecodableSession,
+ jitter_buffer_->InsertPacket(*packet_, &retransmitted));
frame_out = DecodeIncompleteFrame();
CheckOutFrame(frame_out, packet_->sizeBytes, false);
@@ -2129,6 +2121,10 @@ TEST_F(TestBasicJitterBuffer, NextFrameWhenIncomplete) {
}
TEST_F(TestRunningJitterBuffer, Full) {
+ // Make sure the jitter doesn't request a keyframe after too much non-
+ // decodable frames.
+ jitter_buffer_->SetNackMode(kNack, -1, -1);
+ jitter_buffer_->SetNackSettings(kMaxNumberOfFrames, kMaxNumberOfFrames, 0);
// Insert a key frame and decode it.
EXPECT_GE(InsertFrame(kVideoFrameKey), kNoError);
EXPECT_TRUE(DecodeCompleteFrame());
@@ -2277,8 +2273,8 @@ TEST_F(TestJitterBufferNack, NackTooOldPackets) {
// old packet.
DropFrame(1);
// Insert a frame which should trigger a recycle until the next key frame.
- EXPECT_EQ(kFlushIndicator, InsertFrames(oldest_packet_to_nack_ + 1,
- kVideoFrameDelta));
+ EXPECT_EQ(kFlushIndicator,
+ InsertFrames(oldest_packet_to_nack_ + 1, kVideoFrameDelta));
EXPECT_FALSE(DecodeCompleteFrame());
bool request_key_frame = false;
@@ -2369,7 +2365,7 @@ TEST_F(TestJitterBufferNack, NackListBuiltBeforeFirstDecode) {
stream_generator_->Init(0, clock_->TimeInMilliseconds());
InsertFrame(kVideoFrameKey);
stream_generator_->GenerateFrame(kVideoFrameDelta, 2, 0,
- clock_->TimeInMilliseconds());
+ clock_->TimeInMilliseconds());
stream_generator_->NextPacket(NULL); // Drop packet.
EXPECT_EQ(kIncomplete, InsertPacketAndPop(0));
EXPECT_TRUE(DecodeCompleteFrame());
@@ -2397,8 +2393,8 @@ TEST_F(TestJitterBufferNack, VerifyRetransmittedFlag) {
EXPECT_EQ(1u, nack_list.size());
stream_generator_->PopPacket(&packet, 0);
EXPECT_EQ(packet.seqNum, nack_list[0]);
- EXPECT_EQ(kCompleteSession, jitter_buffer_->InsertPacket(packet,
- &retransmitted));
+ EXPECT_EQ(kCompleteSession,
+ jitter_buffer_->InsertPacket(packet, &retransmitted));
EXPECT_TRUE(retransmitted);
EXPECT_TRUE(DecodeCompleteFrame());
}
@@ -2406,7 +2402,7 @@ TEST_F(TestJitterBufferNack, VerifyRetransmittedFlag) {
TEST_F(TestJitterBufferNack, UseNackToRecoverFirstKeyFrame) {
stream_generator_->Init(0, clock_->TimeInMilliseconds());
stream_generator_->GenerateFrame(kVideoFrameKey, 3, 0,
- clock_->TimeInMilliseconds());
+ clock_->TimeInMilliseconds());
EXPECT_EQ(kIncomplete, InsertPacketAndPop(0));
// Drop second packet.
EXPECT_EQ(kIncomplete, InsertPacketAndPop(1));
@@ -2454,7 +2450,7 @@ TEST_F(TestJitterBufferNack, NormalOperation) {
// | 1 | 2 | .. | 8 | 9 | x | 11 | 12 | .. | 19 | x | 21 | .. | 100 |
// ----------------------------------------------------------------
stream_generator_->GenerateFrame(kVideoFrameKey, 100, 0,
- clock_->TimeInMilliseconds());
+ clock_->TimeInMilliseconds());
clock_->AdvanceTimeMilliseconds(kDefaultFramePeriodMs);
EXPECT_EQ(kDecodableSession, InsertPacketAndPop(0));
// Verify that the frame is incomplete.
@@ -2490,7 +2486,7 @@ TEST_F(TestJitterBufferNack, NormalOperationWrap) {
EXPECT_FALSE(request_key_frame);
EXPECT_TRUE(DecodeCompleteFrame());
stream_generator_->GenerateFrame(kVideoFrameDelta, 100, 0,
- clock_->TimeInMilliseconds());
+ clock_->TimeInMilliseconds());
EXPECT_EQ(kIncomplete, InsertPacketAndPop(0));
while (stream_generator_->PacketsRemaining() > 1) {
if (stream_generator_->NextSequenceNumber() % 10 != 0) {
@@ -2527,7 +2523,7 @@ TEST_F(TestJitterBufferNack, NormalOperationWrap2) {
clock_->TimeInMilliseconds());
clock_->AdvanceTimeMilliseconds(kDefaultFramePeriodMs);
for (int i = 0; i < 5; ++i) {
- if (stream_generator_->NextSequenceNumber() != 65535) {
+ if (stream_generator_->NextSequenceNumber() != 65535) {
EXPECT_EQ(kCompleteSession, InsertPacketAndPop(0));
EXPECT_FALSE(request_key_frame);
} else {
diff --git a/webrtc/modules/video_coding/jitter_estimator.cc b/webrtc/modules/video_coding/jitter_estimator.cc
new file mode 100644
index 0000000000..8270c60e01
--- /dev/null
+++ b/webrtc/modules/video_coding/jitter_estimator.cc
@@ -0,0 +1,443 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "webrtc/modules/video_coding/jitter_estimator.h"
+
+#include <assert.h>
+#include <math.h>
+#include <stdlib.h>
+#include <string.h>
+#include <string>
+
+#include "webrtc/modules/video_coding/internal_defines.h"
+#include "webrtc/modules/video_coding/rtt_filter.h"
+#include "webrtc/system_wrappers/include/clock.h"
+#include "webrtc/system_wrappers/include/field_trial.h"
+
+namespace webrtc {
+
+enum { kStartupDelaySamples = 30 };
+enum { kFsAccuStartupSamples = 5 };
+enum { kMaxFramerateEstimate = 200 };
+
+VCMJitterEstimator::VCMJitterEstimator(const Clock* clock,
+ int32_t vcmId,
+ int32_t receiverId)
+ : _vcmId(vcmId),
+ _receiverId(receiverId),
+ _phi(0.97),
+ _psi(0.9999),
+ _alphaCountMax(400),
+ _thetaLow(0.000001),
+ _nackLimit(3),
+ _numStdDevDelayOutlier(15),
+ _numStdDevFrameSizeOutlier(3),
+ _noiseStdDevs(2.33), // ~Less than 1% chance
+ // (look up in normal distribution table)...
+ _noiseStdDevOffset(30.0), // ...of getting 30 ms freezes
+ _rttFilter(),
+ fps_counter_(30), // TODO(sprang): Use an estimator with limit based on
+ // time, rather than number of samples.
+ low_rate_experiment_(kInit),
+ clock_(clock) {
+ Reset();
+}
+
+VCMJitterEstimator::~VCMJitterEstimator() {}
+
+VCMJitterEstimator& VCMJitterEstimator::operator=(
+ const VCMJitterEstimator& rhs) {
+ if (this != &rhs) {
+ memcpy(_thetaCov, rhs._thetaCov, sizeof(_thetaCov));
+ memcpy(_Qcov, rhs._Qcov, sizeof(_Qcov));
+
+ _vcmId = rhs._vcmId;
+ _receiverId = rhs._receiverId;
+ _avgFrameSize = rhs._avgFrameSize;
+ _varFrameSize = rhs._varFrameSize;
+ _maxFrameSize = rhs._maxFrameSize;
+ _fsSum = rhs._fsSum;
+ _fsCount = rhs._fsCount;
+ _lastUpdateT = rhs._lastUpdateT;
+ _prevEstimate = rhs._prevEstimate;
+ _prevFrameSize = rhs._prevFrameSize;
+ _avgNoise = rhs._avgNoise;
+ _alphaCount = rhs._alphaCount;
+ _filterJitterEstimate = rhs._filterJitterEstimate;
+ _startupCount = rhs._startupCount;
+ _latestNackTimestamp = rhs._latestNackTimestamp;
+ _nackCount = rhs._nackCount;
+ _rttFilter = rhs._rttFilter;
+ }
+ return *this;
+}
+
+// Resets the JitterEstimate
+void VCMJitterEstimator::Reset() {
+ _theta[0] = 1 / (512e3 / 8);
+ _theta[1] = 0;
+ _varNoise = 4.0;
+
+ _thetaCov[0][0] = 1e-4;
+ _thetaCov[1][1] = 1e2;
+ _thetaCov[0][1] = _thetaCov[1][0] = 0;
+ _Qcov[0][0] = 2.5e-10;
+ _Qcov[1][1] = 1e-10;
+ _Qcov[0][1] = _Qcov[1][0] = 0;
+ _avgFrameSize = 500;
+ _maxFrameSize = 500;
+ _varFrameSize = 100;
+ _lastUpdateT = -1;
+ _prevEstimate = -1.0;
+ _prevFrameSize = 0;
+ _avgNoise = 0.0;
+ _alphaCount = 1;
+ _filterJitterEstimate = 0.0;
+ _latestNackTimestamp = 0;
+ _nackCount = 0;
+ _fsSum = 0;
+ _fsCount = 0;
+ _startupCount = 0;
+ _rttFilter.Reset();
+ fps_counter_.Reset();
+}
+
+void VCMJitterEstimator::ResetNackCount() {
+ _nackCount = 0;
+}
+
+// Updates the estimates with the new measurements
+void VCMJitterEstimator::UpdateEstimate(int64_t frameDelayMS,
+ uint32_t frameSizeBytes,
+ bool incompleteFrame /* = false */) {
+ if (frameSizeBytes == 0) {
+ return;
+ }
+ int deltaFS = frameSizeBytes - _prevFrameSize;
+ if (_fsCount < kFsAccuStartupSamples) {
+ _fsSum += frameSizeBytes;
+ _fsCount++;
+ } else if (_fsCount == kFsAccuStartupSamples) {
+ // Give the frame size filter
+ _avgFrameSize = static_cast<double>(_fsSum) / static_cast<double>(_fsCount);
+ _fsCount++;
+ }
+ if (!incompleteFrame || frameSizeBytes > _avgFrameSize) {
+ double avgFrameSize = _phi * _avgFrameSize + (1 - _phi) * frameSizeBytes;
+ if (frameSizeBytes < _avgFrameSize + 2 * sqrt(_varFrameSize)) {
+ // Only update the average frame size if this sample wasn't a
+ // key frame
+ _avgFrameSize = avgFrameSize;
+ }
+ // Update the variance anyway since we want to capture cases where we only
+ // get
+ // key frames.
+ _varFrameSize = VCM_MAX(_phi * _varFrameSize +
+ (1 - _phi) * (frameSizeBytes - avgFrameSize) *
+ (frameSizeBytes - avgFrameSize),
+ 1.0);
+ }
+
+ // Update max frameSize estimate
+ _maxFrameSize =
+ VCM_MAX(_psi * _maxFrameSize, static_cast<double>(frameSizeBytes));
+
+ if (_prevFrameSize == 0) {
+ _prevFrameSize = frameSizeBytes;
+ return;
+ }
+ _prevFrameSize = frameSizeBytes;
+
+ // Only update the Kalman filter if the sample is not considered
+ // an extreme outlier. Even if it is an extreme outlier from a
+ // delay point of view, if the frame size also is large the
+ // deviation is probably due to an incorrect line slope.
+ double deviation = DeviationFromExpectedDelay(frameDelayMS, deltaFS);
+
+ if (fabs(deviation) < _numStdDevDelayOutlier * sqrt(_varNoise) ||
+ frameSizeBytes >
+ _avgFrameSize + _numStdDevFrameSizeOutlier * sqrt(_varFrameSize)) {
+ // Update the variance of the deviation from the
+ // line given by the Kalman filter
+ EstimateRandomJitter(deviation, incompleteFrame);
+ // Prevent updating with frames which have been congested by a large
+ // frame, and therefore arrives almost at the same time as that frame.
+ // This can occur when we receive a large frame (key frame) which
+ // has been delayed. The next frame is of normal size (delta frame),
+ // and thus deltaFS will be << 0. This removes all frame samples
+ // which arrives after a key frame.
+ if ((!incompleteFrame || deviation >= 0.0) &&
+ static_cast<double>(deltaFS) > -0.25 * _maxFrameSize) {
+ // Update the Kalman filter with the new data
+ KalmanEstimateChannel(frameDelayMS, deltaFS);
+ }
+ } else {
+ int nStdDev =
+ (deviation >= 0) ? _numStdDevDelayOutlier : -_numStdDevDelayOutlier;
+ EstimateRandomJitter(nStdDev * sqrt(_varNoise), incompleteFrame);
+ }
+ // Post process the total estimated jitter
+ if (_startupCount >= kStartupDelaySamples) {
+ PostProcessEstimate();
+ } else {
+ _startupCount++;
+ }
+}
+
+// Updates the nack/packet ratio
+void VCMJitterEstimator::FrameNacked() {
+ // Wait until _nackLimit retransmissions has been received,
+ // then always add ~1 RTT delay.
+ // TODO(holmer): Should we ever remove the additional delay if the
+ // the packet losses seem to have stopped? We could for instance scale
+ // the number of RTTs to add with the amount of retransmissions in a given
+ // time interval, or similar.
+ if (_nackCount < _nackLimit) {
+ _nackCount++;
+ }
+}
+
+// Updates Kalman estimate of the channel
+// The caller is expected to sanity check the inputs.
+void VCMJitterEstimator::KalmanEstimateChannel(int64_t frameDelayMS,
+ int32_t deltaFSBytes) {
+ double Mh[2];
+ double hMh_sigma;
+ double kalmanGain[2];
+ double measureRes;
+ double t00, t01;
+
+ // Kalman filtering
+
+ // Prediction
+ // M = M + Q
+ _thetaCov[0][0] += _Qcov[0][0];
+ _thetaCov[0][1] += _Qcov[0][1];
+ _thetaCov[1][0] += _Qcov[1][0];
+ _thetaCov[1][1] += _Qcov[1][1];
+
+ // Kalman gain
+ // K = M*h'/(sigma2n + h*M*h') = M*h'/(1 + h*M*h')
+ // h = [dFS 1]
+ // Mh = M*h'
+ // hMh_sigma = h*M*h' + R
+ Mh[0] = _thetaCov[0][0] * deltaFSBytes + _thetaCov[0][1];
+ Mh[1] = _thetaCov[1][0] * deltaFSBytes + _thetaCov[1][1];
+ // sigma weights measurements with a small deltaFS as noisy and
+ // measurements with large deltaFS as good
+ if (_maxFrameSize < 1.0) {
+ return;
+ }
+ double sigma = (300.0 * exp(-fabs(static_cast<double>(deltaFSBytes)) /
+ (1e0 * _maxFrameSize)) +
+ 1) *
+ sqrt(_varNoise);
+ if (sigma < 1.0) {
+ sigma = 1.0;
+ }
+ hMh_sigma = deltaFSBytes * Mh[0] + Mh[1] + sigma;
+ if ((hMh_sigma < 1e-9 && hMh_sigma >= 0) ||
+ (hMh_sigma > -1e-9 && hMh_sigma <= 0)) {
+ assert(false);
+ return;
+ }
+ kalmanGain[0] = Mh[0] / hMh_sigma;
+ kalmanGain[1] = Mh[1] / hMh_sigma;
+
+ // Correction
+ // theta = theta + K*(dT - h*theta)
+ measureRes = frameDelayMS - (deltaFSBytes * _theta[0] + _theta[1]);
+ _theta[0] += kalmanGain[0] * measureRes;
+ _theta[1] += kalmanGain[1] * measureRes;
+
+ if (_theta[0] < _thetaLow) {
+ _theta[0] = _thetaLow;
+ }
+
+ // M = (I - K*h)*M
+ t00 = _thetaCov[0][0];
+ t01 = _thetaCov[0][1];
+ _thetaCov[0][0] = (1 - kalmanGain[0] * deltaFSBytes) * t00 -
+ kalmanGain[0] * _thetaCov[1][0];
+ _thetaCov[0][1] = (1 - kalmanGain[0] * deltaFSBytes) * t01 -
+ kalmanGain[0] * _thetaCov[1][1];
+ _thetaCov[1][0] = _thetaCov[1][0] * (1 - kalmanGain[1]) -
+ kalmanGain[1] * deltaFSBytes * t00;
+ _thetaCov[1][1] = _thetaCov[1][1] * (1 - kalmanGain[1]) -
+ kalmanGain[1] * deltaFSBytes * t01;
+
+ // Covariance matrix, must be positive semi-definite
+ assert(_thetaCov[0][0] + _thetaCov[1][1] >= 0 &&
+ _thetaCov[0][0] * _thetaCov[1][1] -
+ _thetaCov[0][1] * _thetaCov[1][0] >=
+ 0 &&
+ _thetaCov[0][0] >= 0);
+}
+
+// Calculate difference in delay between a sample and the
+// expected delay estimated by the Kalman filter
+double VCMJitterEstimator::DeviationFromExpectedDelay(
+ int64_t frameDelayMS,
+ int32_t deltaFSBytes) const {
+ return frameDelayMS - (_theta[0] * deltaFSBytes + _theta[1]);
+}
+
+// Estimates the random jitter by calculating the variance of the
+// sample distance from the line given by theta.
+void VCMJitterEstimator::EstimateRandomJitter(double d_dT,
+ bool incompleteFrame) {
+ uint64_t now = clock_->TimeInMicroseconds();
+ if (_lastUpdateT != -1) {
+ fps_counter_.AddSample(now - _lastUpdateT);
+ }
+ _lastUpdateT = now;
+
+ if (_alphaCount == 0) {
+ assert(false);
+ return;
+ }
+ double alpha =
+ static_cast<double>(_alphaCount - 1) / static_cast<double>(_alphaCount);
+ _alphaCount++;
+ if (_alphaCount > _alphaCountMax)
+ _alphaCount = _alphaCountMax;
+
+ if (LowRateExperimentEnabled()) {
+ // In order to avoid a low frame rate stream to react slower to changes,
+ // scale the alpha weight relative a 30 fps stream.
+ double fps = GetFrameRate();
+ if (fps > 0.0) {
+ double rate_scale = 30.0 / fps;
+ // At startup, there can be a lot of noise in the fps estimate.
+ // Interpolate rate_scale linearly, from 1.0 at sample #1, to 30.0 / fps
+ // at sample #kStartupDelaySamples.
+ if (_alphaCount < kStartupDelaySamples) {
+ rate_scale =
+ (_alphaCount * rate_scale + (kStartupDelaySamples - _alphaCount)) /
+ kStartupDelaySamples;
+ }
+ alpha = pow(alpha, rate_scale);
+ }
+ }
+
+ double avgNoise = alpha * _avgNoise + (1 - alpha) * d_dT;
+ double varNoise =
+ alpha * _varNoise + (1 - alpha) * (d_dT - _avgNoise) * (d_dT - _avgNoise);
+ if (!incompleteFrame || varNoise > _varNoise) {
+ _avgNoise = avgNoise;
+ _varNoise = varNoise;
+ }
+ if (_varNoise < 1.0) {
+ // The variance should never be zero, since we might get
+ // stuck and consider all samples as outliers.
+ _varNoise = 1.0;
+ }
+}
+
+double VCMJitterEstimator::NoiseThreshold() const {
+ double noiseThreshold = _noiseStdDevs * sqrt(_varNoise) - _noiseStdDevOffset;
+ if (noiseThreshold < 1.0) {
+ noiseThreshold = 1.0;
+ }
+ return noiseThreshold;
+}
+
+// Calculates the current jitter estimate from the filtered estimates
+double VCMJitterEstimator::CalculateEstimate() {
+ double ret = _theta[0] * (_maxFrameSize - _avgFrameSize) + NoiseThreshold();
+
+ // A very low estimate (or negative) is neglected
+ if (ret < 1.0) {
+ if (_prevEstimate <= 0.01) {
+ ret = 1.0;
+ } else {
+ ret = _prevEstimate;
+ }
+ }
+ if (ret > 10000.0) { // Sanity
+ ret = 10000.0;
+ }
+ _prevEstimate = ret;
+ return ret;
+}
+
+void VCMJitterEstimator::PostProcessEstimate() {
+ _filterJitterEstimate = CalculateEstimate();
+}
+
+void VCMJitterEstimator::UpdateRtt(int64_t rttMs) {
+ _rttFilter.Update(rttMs);
+}
+
+void VCMJitterEstimator::UpdateMaxFrameSize(uint32_t frameSizeBytes) {
+ if (_maxFrameSize < frameSizeBytes) {
+ _maxFrameSize = frameSizeBytes;
+ }
+}
+
+// Returns the current filtered estimate if available,
+// otherwise tries to calculate an estimate.
+int VCMJitterEstimator::GetJitterEstimate(double rttMultiplier) {
+ double jitterMS = CalculateEstimate() + OPERATING_SYSTEM_JITTER;
+ if (_filterJitterEstimate > jitterMS)
+ jitterMS = _filterJitterEstimate;
+ if (_nackCount >= _nackLimit)
+ jitterMS += _rttFilter.RttMs() * rttMultiplier;
+
+ if (LowRateExperimentEnabled()) {
+ static const double kJitterScaleLowThreshold = 5.0;
+ static const double kJitterScaleHighThreshold = 10.0;
+ double fps = GetFrameRate();
+ // Ignore jitter for very low fps streams.
+ if (fps < kJitterScaleLowThreshold) {
+ if (fps == 0.0) {
+ return jitterMS;
+ }
+ return 0;
+ }
+
+ // Semi-low frame rate; scale by factor linearly interpolated from 0.0 at
+ // kJitterScaleLowThreshold to 1.0 at kJitterScaleHighThreshold.
+ if (fps < kJitterScaleHighThreshold) {
+ jitterMS =
+ (1.0 / (kJitterScaleHighThreshold - kJitterScaleLowThreshold)) *
+ (fps - kJitterScaleLowThreshold) * jitterMS;
+ }
+ }
+
+ return static_cast<uint32_t>(jitterMS + 0.5);
+}
+
+bool VCMJitterEstimator::LowRateExperimentEnabled() {
+ if (low_rate_experiment_ == kInit) {
+ std::string group =
+ webrtc::field_trial::FindFullName("WebRTC-ReducedJitterDelay");
+ if (group == "Disabled") {
+ low_rate_experiment_ = kDisabled;
+ } else {
+ low_rate_experiment_ = kEnabled;
+ }
+ }
+ return low_rate_experiment_ == kEnabled ? true : false;
+}
+
+double VCMJitterEstimator::GetFrameRate() const {
+ if (fps_counter_.count() == 0)
+ return 0;
+
+ double fps = 1000000.0 / fps_counter_.ComputeMean();
+ // Sanity check.
+ assert(fps >= 0.0);
+ if (fps > kMaxFramerateEstimate) {
+ fps = kMaxFramerateEstimate;
+ }
+ return fps;
+}
+} // namespace webrtc
diff --git a/webrtc/modules/video_coding/jitter_estimator.h b/webrtc/modules/video_coding/jitter_estimator.h
new file mode 100644
index 0000000000..a7b4b3e3df
--- /dev/null
+++ b/webrtc/modules/video_coding/jitter_estimator.h
@@ -0,0 +1,170 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef WEBRTC_MODULES_VIDEO_CODING_JITTER_ESTIMATOR_H_
+#define WEBRTC_MODULES_VIDEO_CODING_JITTER_ESTIMATOR_H_
+
+#include "webrtc/base/rollingaccumulator.h"
+#include "webrtc/modules/video_coding/rtt_filter.h"
+#include "webrtc/typedefs.h"
+
+namespace webrtc {
+
+class Clock;
+
+class VCMJitterEstimator {
+ public:
+ VCMJitterEstimator(const Clock* clock,
+ int32_t vcmId = 0,
+ int32_t receiverId = 0);
+ virtual ~VCMJitterEstimator();
+ VCMJitterEstimator& operator=(const VCMJitterEstimator& rhs);
+
+ // Resets the estimate to the initial state
+ void Reset();
+ void ResetNackCount();
+
+ // Updates the jitter estimate with the new data.
+ //
+ // Input:
+ // - frameDelay : Delay-delta calculated by UTILDelayEstimate in
+ // milliseconds
+ // - frameSize : Frame size of the current frame.
+ // - incompleteFrame : Flags if the frame is used to update the
+ // estimate before it
+ // was complete. Default is false.
+ void UpdateEstimate(int64_t frameDelayMS,
+ uint32_t frameSizeBytes,
+ bool incompleteFrame = false);
+
+ // Returns the current jitter estimate in milliseconds and adds
+ // also adds an RTT dependent term in cases of retransmission.
+ // Input:
+ // - rttMultiplier : RTT param multiplier (when applicable).
+ //
+ // Return value : Jitter estimate in milliseconds
+ int GetJitterEstimate(double rttMultiplier);
+
+ // Updates the nack counter.
+ void FrameNacked();
+
+ // Updates the RTT filter.
+ //
+ // Input:
+ // - rttMs : RTT in ms
+ void UpdateRtt(int64_t rttMs);
+
+ void UpdateMaxFrameSize(uint32_t frameSizeBytes);
+
+ // A constant describing the delay from the jitter buffer
+ // to the delay on the receiving side which is not accounted
+ // for by the jitter buffer nor the decoding delay estimate.
+ static const uint32_t OPERATING_SYSTEM_JITTER = 10;
+
+ protected:
+ // These are protected for better testing possibilities
+ double _theta[2]; // Estimated line parameters (slope, offset)
+ double _varNoise; // Variance of the time-deviation from the line
+
+ virtual bool LowRateExperimentEnabled();
+
+ private:
+ // Updates the Kalman filter for the line describing
+ // the frame size dependent jitter.
+ //
+ // Input:
+ // - frameDelayMS : Delay-delta calculated by UTILDelayEstimate in
+ // milliseconds
+ // - deltaFSBytes : Frame size delta, i.e.
+ // : frame size at time T minus frame size at time
+ // T-1
+ void KalmanEstimateChannel(int64_t frameDelayMS, int32_t deltaFSBytes);
+
+ // Updates the random jitter estimate, i.e. the variance
+ // of the time deviations from the line given by the Kalman filter.
+ //
+ // Input:
+ // - d_dT : The deviation from the kalman estimate
+ // - incompleteFrame : True if the frame used to update the
+ // estimate
+ // with was incomplete
+ void EstimateRandomJitter(double d_dT, bool incompleteFrame);
+
+ double NoiseThreshold() const;
+
+ // Calculates the current jitter estimate.
+ //
+ // Return value : The current jitter estimate in milliseconds
+ double CalculateEstimate();
+
+ // Post process the calculated estimate
+ void PostProcessEstimate();
+
+ // Calculates the difference in delay between a sample and the
+ // expected delay estimated by the Kalman filter.
+ //
+ // Input:
+ // - frameDelayMS : Delay-delta calculated by UTILDelayEstimate in
+ // milliseconds
+ // - deltaFS : Frame size delta, i.e. frame size at time
+ // T minus frame size at time T-1
+ //
+ // Return value : The difference in milliseconds
+ double DeviationFromExpectedDelay(int64_t frameDelayMS,
+ int32_t deltaFSBytes) const;
+
+ double GetFrameRate() const;
+
+ // Constants, filter parameters
+ int32_t _vcmId;
+ int32_t _receiverId;
+ const double _phi;
+ const double _psi;
+ const uint32_t _alphaCountMax;
+ const double _thetaLow;
+ const uint32_t _nackLimit;
+ const int32_t _numStdDevDelayOutlier;
+ const int32_t _numStdDevFrameSizeOutlier;
+ const double _noiseStdDevs;
+ const double _noiseStdDevOffset;
+
+ double _thetaCov[2][2]; // Estimate covariance
+ double _Qcov[2][2]; // Process noise covariance
+ double _avgFrameSize; // Average frame size
+ double _varFrameSize; // Frame size variance
+ double _maxFrameSize; // Largest frame size received (descending
+ // with a factor _psi)
+ uint32_t _fsSum;
+ uint32_t _fsCount;
+
+ int64_t _lastUpdateT;
+ double _prevEstimate; // The previously returned jitter estimate
+ uint32_t _prevFrameSize; // Frame size of the previous frame
+ double _avgNoise; // Average of the random jitter
+ uint32_t _alphaCount;
+ double _filterJitterEstimate; // The filtered sum of jitter estimates
+
+ uint32_t _startupCount;
+
+ int64_t
+ _latestNackTimestamp; // Timestamp in ms when the latest nack was seen
+ uint32_t _nackCount; // Keeps track of the number of nacks received,
+ // but never goes above _nackLimit
+ VCMRttFilter _rttFilter;
+
+ rtc::RollingAccumulator<uint64_t> fps_counter_;
+ enum ExperimentFlag { kInit, kEnabled, kDisabled };
+ ExperimentFlag low_rate_experiment_;
+ const Clock* clock_;
+};
+
+} // namespace webrtc
+
+#endif // WEBRTC_MODULES_VIDEO_CODING_JITTER_ESTIMATOR_H_
diff --git a/webrtc/modules/video_coding/main/source/jitter_estimator_tests.cc b/webrtc/modules/video_coding/jitter_estimator_tests.cc
index c69c4bcdad..3d46ce2bcd 100644
--- a/webrtc/modules/video_coding/main/source/jitter_estimator_tests.cc
+++ b/webrtc/modules/video_coding/jitter_estimator_tests.cc
@@ -7,7 +7,7 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#include "webrtc/modules/video_coding/main/source/jitter_estimator.h"
+#include "webrtc/modules/video_coding/jitter_estimator.h"
#include "testing/gtest/include/gtest/gtest.h"
#include "webrtc/system_wrappers/include/clock.h"
diff --git a/webrtc/modules/video_coding/main/interface/video_coding.h b/webrtc/modules/video_coding/main/interface/video_coding.h
deleted file mode 100644
index 67f7b635cb..0000000000
--- a/webrtc/modules/video_coding/main/interface/video_coding.h
+++ /dev/null
@@ -1,544 +0,0 @@
-/*
- * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
- *
- * Use of this source code is governed by a BSD-style license
- * that can be found in the LICENSE file in the root of the source
- * tree. An additional intellectual property rights grant can be found
- * in the file PATENTS. All contributing project authors may
- * be found in the AUTHORS file in the root of the source tree.
- */
-
-#ifndef WEBRTC_MODULES_INTERFACE_VIDEO_CODING_H_
-#define WEBRTC_MODULES_INTERFACE_VIDEO_CODING_H_
-
-#if defined(WEBRTC_WIN)
-// This is a workaround on Windows due to the fact that some Windows
-// headers define CreateEvent as a macro to either CreateEventW or CreateEventA.
-// This can cause problems since we use that name as well and could
-// declare them as one thing here whereas in another place a windows header
-// may have been included and then implementing CreateEvent() causes compilation
-// errors. So for consistency, we include the main windows header here.
-#include <windows.h>
-#endif
-
-#include "webrtc/modules/interface/module.h"
-#include "webrtc/modules/interface/module_common_types.h"
-#include "webrtc/modules/video_coding/main/interface/video_coding_defines.h"
-#include "webrtc/system_wrappers/include/event_wrapper.h"
-#include "webrtc/video_frame.h"
-
-namespace webrtc
-{
-
-class Clock;
-class EncodedImageCallback;
-class VideoEncoder;
-class VideoDecoder;
-struct CodecSpecificInfo;
-
-class EventFactory {
- public:
- virtual ~EventFactory() {}
-
- virtual EventWrapper* CreateEvent() = 0;
-};
-
-class EventFactoryImpl : public EventFactory {
- public:
- virtual ~EventFactoryImpl() {}
-
- virtual EventWrapper* CreateEvent() {
- return EventWrapper::Create();
- }
-};
-
-// Used to indicate which decode with errors mode should be used.
-enum VCMDecodeErrorMode {
- kNoErrors, // Never decode with errors. Video will freeze
- // if nack is disabled.
- kSelectiveErrors, // Frames that are determined decodable in
- // VCMSessionInfo may be decoded with missing
- // packets. As not all incomplete frames will be
- // decodable, video will freeze if nack is disabled.
- kWithErrors // Release frames as needed. Errors may be
- // introduced as some encoded frames may not be
- // complete.
-};
-
-class VideoCodingModule : public Module
-{
-public:
- enum SenderNackMode {
- kNackNone,
- kNackAll,
- kNackSelective
- };
-
- enum ReceiverRobustness {
- kNone,
- kHardNack,
- kSoftNack,
- kReferenceSelection
- };
-
- static VideoCodingModule* Create(
- Clock* clock,
- VideoEncoderRateObserver* encoder_rate_observer,
- VCMQMSettingsCallback* qm_settings_callback);
-
- static VideoCodingModule* Create(Clock* clock, EventFactory* event_factory);
-
- static void Destroy(VideoCodingModule* module);
-
- // Get number of supported codecs
- //
- // Return value : Number of supported codecs
- static uint8_t NumberOfCodecs();
-
- // Get supported codec settings with using id
- //
- // Input:
- // - listId : Id or index of the codec to look up
- // - codec : Memory where the codec settings will be stored
- //
- // Return value : VCM_OK, on success
- // VCM_PARAMETER_ERROR if codec not supported or id too high
- static int32_t Codec(const uint8_t listId, VideoCodec* codec);
-
- // Get supported codec settings using codec type
- //
- // Input:
- // - codecType : The codec type to get settings for
- // - codec : Memory where the codec settings will be stored
- //
- // Return value : VCM_OK, on success
- // VCM_PARAMETER_ERROR if codec not supported
- static int32_t Codec(VideoCodecType codecType, VideoCodec* codec);
-
- /*
- * Sender
- */
-
- // Registers a codec to be used for encoding. Calling this
- // API multiple times overwrites any previously registered codecs.
- //
- // NOTE: Must be called on the thread that constructed the VCM instance.
- //
- // Input:
- // - sendCodec : Settings for the codec to be registered.
- // - numberOfCores : The number of cores the codec is allowed
- // to use.
- // - maxPayloadSize : The maximum size each payload is allowed
- // to have. Usually MTU - overhead.
- //
- // Return value : VCM_OK, on success.
- // < 0, on error.
- virtual int32_t RegisterSendCodec(const VideoCodec* sendCodec,
- uint32_t numberOfCores,
- uint32_t maxPayloadSize) = 0;
-
- // Get the current send codec in use.
- //
- // If a codec has not been set yet, the |id| property of the return value
- // will be 0 and |name| empty.
- //
- // NOTE: This method intentionally does not hold locks and minimizes data
- // copying. It must be called on the thread where the VCM was constructed.
- virtual const VideoCodec& GetSendCodec() const = 0;
-
- // DEPRECATED: Use GetSendCodec() instead.
- //
- // API to get the current send codec in use.
- //
- // Input:
- // - currentSendCodec : Address where the sendCodec will be written.
- //
- // Return value : VCM_OK, on success.
- // < 0, on error.
- //
- // NOTE: The returned codec information is not guaranteed to be current when
- // the call returns. This method acquires a lock that is aligned with
- // video encoding, so it should be assumed to be allowed to block for
- // several milliseconds.
- virtual int32_t SendCodec(VideoCodec* currentSendCodec) const = 0;
-
- // DEPRECATED: Use GetSendCodec() instead.
- //
- // API to get the current send codec type
- //
- // Return value : Codec type, on success.
- // kVideoCodecUnknown, on error or if no send codec is set
- // NOTE: Same notes apply as for SendCodec() above.
- virtual VideoCodecType SendCodec() const = 0;
-
- // Register an external encoder object. This can not be used together with
- // external decoder callbacks.
- //
- // Input:
- // - externalEncoder : Encoder object to be used for encoding frames inserted
- // with the AddVideoFrame API.
- // - payloadType : The payload type bound which this encoder is bound to.
- //
- // Return value : VCM_OK, on success.
- // < 0, on error.
- virtual int32_t RegisterExternalEncoder(VideoEncoder* externalEncoder,
- uint8_t payloadType,
- bool internalSource = false) = 0;
-
- // API to get currently configured encoder target bitrate in bits/s.
- //
- // Return value : 0, on success.
- // < 0, on error.
- virtual int Bitrate(unsigned int* bitrate) const = 0;
-
- // API to get currently configured encoder target frame rate.
- //
- // Return value : 0, on success.
- // < 0, on error.
- virtual int FrameRate(unsigned int* framerate) const = 0;
-
- // Sets the parameters describing the send channel. These parameters are inputs to the
- // Media Optimization inside the VCM and also specifies the target bit rate for the
- // encoder. Bit rate used by NACK should already be compensated for by the user.
- //
- // Input:
- // - target_bitrate : The target bitrate for VCM in bits/s.
- // - lossRate : Fractions of lost packets the past second.
- // (loss rate in percent = 100 * packetLoss / 255)
- // - rtt : Current round-trip time in ms.
- //
- // Return value : VCM_OK, on success.
- // < 0, on error.
- virtual int32_t SetChannelParameters(uint32_t target_bitrate,
- uint8_t lossRate,
- int64_t rtt) = 0;
-
- // Sets the parameters describing the receive channel. These parameters are inputs to the
- // Media Optimization inside the VCM.
- //
- // Input:
- // - rtt : Current round-trip time in ms.
- // with the most amount available bandwidth in a conference
- // scenario
- //
- // Return value : VCM_OK, on success.
- // < 0, on error.
- virtual int32_t SetReceiveChannelParameters(int64_t rtt) = 0;
-
- // Register a transport callback which will be called to deliver the encoded data and
- // side information.
- //
- // Input:
- // - transport : The callback object to register.
- //
- // Return value : VCM_OK, on success.
- // < 0, on error.
- virtual int32_t RegisterTransportCallback(VCMPacketizationCallback* transport) = 0;
-
- // Register video output information callback which will be called to deliver information
- // about the video stream produced by the encoder, for instance the average frame rate and
- // bit rate.
- //
- // Input:
- // - outputInformation : The callback object to register.
- //
- // Return value : VCM_OK, on success.
- // < 0, on error.
- virtual int32_t RegisterSendStatisticsCallback(
- VCMSendStatisticsCallback* sendStats) = 0;
-
- // Register a video protection callback which will be called to deliver
- // the requested FEC rate and NACK status (on/off).
- //
- // Input:
- // - protection : The callback object to register.
- //
- // Return value : VCM_OK, on success.
- // < 0, on error.
- virtual int32_t RegisterProtectionCallback(VCMProtectionCallback* protection) = 0;
-
- // Enable or disable a video protection method.
- //
- // Input:
- // - videoProtection : The method to enable or disable.
- // - enable : True if the method should be enabled, false if
- // it should be disabled.
- //
- // Return value : VCM_OK, on success.
- // < 0, on error.
- virtual int32_t SetVideoProtection(VCMVideoProtection videoProtection,
- bool enable) = 0;
-
- // Add one raw video frame to the encoder. This function does all the necessary
- // processing, then decides what frame type to encode, or if the frame should be
- // dropped. If the frame should be encoded it passes the frame to the encoder
- // before it returns.
- //
- // Input:
- // - videoFrame : Video frame to encode.
- // - codecSpecificInfo : Extra codec information, e.g., pre-parsed in-band signaling.
- //
- // Return value : VCM_OK, on success.
- // < 0, on error.
- virtual int32_t AddVideoFrame(
- const VideoFrame& videoFrame,
- const VideoContentMetrics* contentMetrics = NULL,
- const CodecSpecificInfo* codecSpecificInfo = NULL) = 0;
-
- // Next frame encoded should be an intra frame (keyframe).
- //
- // Return value : VCM_OK, on success.
- // < 0, on error.
- virtual int32_t IntraFrameRequest(int stream_index) = 0;
-
- // Frame Dropper enable. Can be used to disable the frame dropping when the encoder
- // over-uses its bit rate. This API is designed to be used when the encoded frames
- // are supposed to be stored to an AVI file, or when the I420 codec is used and the
- // target bit rate shouldn't affect the frame rate.
- //
- // Input:
- // - enable : True to enable the setting, false to disable it.
- //
- // Return value : VCM_OK, on success.
- // < 0, on error.
- virtual int32_t EnableFrameDropper(bool enable) = 0;
-
-
- /*
- * Receiver
- */
-
- // Register possible receive codecs, can be called multiple times for different codecs.
- // The module will automatically switch between registered codecs depending on the
- // payload type of incoming frames. The actual decoder will be created when needed.
- //
- // Input:
- // - receiveCodec : Settings for the codec to be registered.
- // - numberOfCores : Number of CPU cores that the decoder is allowed to use.
- // - requireKeyFrame : Set this to true if you don't want any delta frames
- // to be decoded until the first key frame has been decoded.
- //
- // Return value : VCM_OK, on success.
- // < 0, on error.
- virtual int32_t RegisterReceiveCodec(const VideoCodec* receiveCodec,
- int32_t numberOfCores,
- bool requireKeyFrame = false) = 0;
-
- // Register an externally defined decoder/renderer object. Can be a decoder only or a
- // decoder coupled with a renderer. Note that RegisterReceiveCodec must be called to
- // be used for decoding incoming streams.
- //
- // Input:
- // - externalDecoder : The external decoder/renderer object.
- // - payloadType : The payload type which this decoder should be
- // registered to.
- // - internalRenderTiming : True if the internal renderer (if any) of the decoder
- // object can make sure to render at a given time in ms.
- //
- // Return value : VCM_OK, on success.
- // < 0, on error.
- virtual int32_t RegisterExternalDecoder(VideoDecoder* externalDecoder,
- uint8_t payloadType,
- bool internalRenderTiming) = 0;
-
- // Register a receive callback. Will be called whenever there is a new frame ready
- // for rendering.
- //
- // Input:
- // - receiveCallback : The callback object to be used by the module when a
- // frame is ready for rendering.
- // De-register with a NULL pointer.
- //
- // Return value : VCM_OK, on success.
- // < 0, on error.
- virtual int32_t RegisterReceiveCallback(VCMReceiveCallback* receiveCallback) = 0;
-
- // Register a receive statistics callback which will be called to deliver information
- // about the video stream received by the receiving side of the VCM, for instance the
- // average frame rate and bit rate.
- //
- // Input:
- // - receiveStats : The callback object to register.
- //
- // Return value : VCM_OK, on success.
- // < 0, on error.
- virtual int32_t RegisterReceiveStatisticsCallback(
- VCMReceiveStatisticsCallback* receiveStats) = 0;
-
- // Register a decoder timing callback which will be called to deliver
- // information about the timing of the decoder in the receiving side of the
- // VCM, for instance the current and maximum frame decode latency.
- //
- // Input:
- // - decoderTiming : The callback object to register.
- //
- // Return value : VCM_OK, on success.
- // < 0, on error.
- virtual int32_t RegisterDecoderTimingCallback(
- VCMDecoderTimingCallback* decoderTiming) = 0;
-
- // Register a frame type request callback. This callback will be called when the
- // module needs to request specific frame types from the send side.
- //
- // Input:
- // - frameTypeCallback : The callback object to be used by the module when
- // requesting a specific type of frame from the send side.
- // De-register with a NULL pointer.
- //
- // Return value : VCM_OK, on success.
- // < 0, on error.
- virtual int32_t RegisterFrameTypeCallback(
- VCMFrameTypeCallback* frameTypeCallback) = 0;
-
- // Registers a callback which is called whenever the receive side of the VCM
- // encounters holes in the packet sequence and needs packets to be retransmitted.
- //
- // Input:
- // - callback : The callback to be registered in the VCM.
- //
- // Return value : VCM_OK, on success.
- // <0, on error.
- virtual int32_t RegisterPacketRequestCallback(
- VCMPacketRequestCallback* callback) = 0;
-
- // Waits for the next frame in the jitter buffer to become complete
- // (waits no longer than maxWaitTimeMs), then passes it to the decoder for decoding.
- // Should be called as often as possible to get the most out of the decoder.
- //
- // Return value : VCM_OK, on success.
- // < 0, on error.
- virtual int32_t Decode(uint16_t maxWaitTimeMs = 200) = 0;
-
- // Registers a callback which conveys the size of the render buffer.
- virtual int RegisterRenderBufferSizeCallback(
- VCMRenderBufferSizeCallback* callback) = 0;
-
- // Reset the decoder state to the initial state.
- //
- // Return value : VCM_OK, on success.
- // < 0, on error.
- virtual int32_t ResetDecoder() = 0;
-
- // API to get the codec which is currently used for decoding by the module.
- //
- // Input:
- // - currentReceiveCodec : Settings for the codec to be registered.
- //
- // Return value : VCM_OK, on success.
- // < 0, on error.
- virtual int32_t ReceiveCodec(VideoCodec* currentReceiveCodec) const = 0;
-
- // API to get the codec type currently used for decoding by the module.
- //
- // Return value : codecy type, on success.
- // kVideoCodecUnknown, on error or if no receive codec is registered
- virtual VideoCodecType ReceiveCodec() const = 0;
-
- // Insert a parsed packet into the receiver side of the module. Will be placed in the
- // jitter buffer waiting for the frame to become complete. Returns as soon as the packet
- // has been placed in the jitter buffer.
- //
- // Input:
- // - incomingPayload : Payload of the packet.
- // - payloadLength : Length of the payload.
- // - rtpInfo : The parsed header.
- //
- // Return value : VCM_OK, on success.
- // < 0, on error.
- virtual int32_t IncomingPacket(const uint8_t* incomingPayload,
- size_t payloadLength,
- const WebRtcRTPHeader& rtpInfo) = 0;
-
- // Minimum playout delay (Used for lip-sync). This is the minimum delay required
- // to sync with audio. Not included in VideoCodingModule::Delay()
- // Defaults to 0 ms.
- //
- // Input:
- // - minPlayoutDelayMs : Additional delay in ms.
- //
- // Return value : VCM_OK, on success.
- // < 0, on error.
- virtual int32_t SetMinimumPlayoutDelay(uint32_t minPlayoutDelayMs) = 0;
-
- // Set the time required by the renderer to render a frame.
- //
- // Input:
- // - timeMS : The time in ms required by the renderer to render a frame.
- //
- // Return value : VCM_OK, on success.
- // < 0, on error.
- virtual int32_t SetRenderDelay(uint32_t timeMS) = 0;
-
- // The total delay desired by the VCM. Can be less than the minimum
- // delay set with SetMinimumPlayoutDelay.
- //
- // Return value : Total delay in ms, on success.
- // < 0, on error.
- virtual int32_t Delay() const = 0;
-
- // Returns the number of packets discarded by the jitter buffer due to being
- // too late. This can include duplicated packets which arrived after the
- // frame was sent to the decoder. Therefore packets which were prematurely
- // NACKed will be counted.
- virtual uint32_t DiscardedPackets() const = 0;
-
-
- // Robustness APIs
-
- // Set the receiver robustness mode. The mode decides how the receiver
- // responds to losses in the stream. The type of counter-measure (soft or
- // hard NACK, dual decoder, RPS, etc.) is selected through the
- // robustnessMode parameter. The errorMode parameter decides if it is
- // allowed to display frames corrupted by losses. Note that not all
- // combinations of the two parameters are feasible. An error will be
- // returned for invalid combinations.
- // Input:
- // - robustnessMode : selected robustness mode.
- // - errorMode : selected error mode.
- //
- // Return value : VCM_OK, on success;
- // < 0, on error.
- virtual int SetReceiverRobustnessMode(ReceiverRobustness robustnessMode,
- VCMDecodeErrorMode errorMode) = 0;
-
- // Set the decode error mode. The mode decides which errors (if any) are
- // allowed in decodable frames. Note that setting decode_error_mode to
- // anything other than kWithErrors without enabling nack will cause
- // long-term freezes (resulting from frequent key frame requests) if
- // packet loss occurs.
- virtual void SetDecodeErrorMode(VCMDecodeErrorMode decode_error_mode) = 0;
-
- // Sets the maximum number of sequence numbers that we are allowed to NACK
- // and the oldest sequence number that we will consider to NACK. If a
- // sequence number older than |max_packet_age_to_nack| is missing
- // a key frame will be requested. A key frame will also be requested if the
- // time of incomplete or non-continuous frames in the jitter buffer is above
- // |max_incomplete_time_ms|.
- virtual void SetNackSettings(size_t max_nack_list_size,
- int max_packet_age_to_nack,
- int max_incomplete_time_ms) = 0;
-
- // Setting a desired delay to the VCM receiver. Video rendering will be
- // delayed by at least desired_delay_ms.
- virtual int SetMinReceiverDelay(int desired_delay_ms) = 0;
-
- // Lets the sender suspend video when the rate drops below
- // |threshold_bps|, and turns back on when the rate goes back up above
- // |threshold_bps| + |window_bps|.
- virtual void SuspendBelowMinBitrate() = 0;
-
- // Returns true if SuspendBelowMinBitrate is engaged and the video has been
- // suspended due to bandwidth limitations; otherwise false.
- virtual bool VideoSuspended() const = 0;
-
- virtual void RegisterPreDecodeImageCallback(
- EncodedImageCallback* observer) = 0;
- virtual void RegisterPostEncodeImageCallback(
- EncodedImageCallback* post_encode_callback) = 0;
- // Releases pending decode calls, permitting faster thread shutdown.
- virtual void TriggerDecoderShutdown() = 0;
-};
-
-} // namespace webrtc
-
-#endif // WEBRTC_MODULES_INTERFACE_VIDEO_CODING_H_
diff --git a/webrtc/modules/video_coding/main/source/OWNERS b/webrtc/modules/video_coding/main/source/OWNERS
deleted file mode 100644
index 3ee6b4bf5f..0000000000
--- a/webrtc/modules/video_coding/main/source/OWNERS
+++ /dev/null
@@ -1,5 +0,0 @@
-
-# These are for the common case of adding or renaming files. If you're doing
-# structural changes, please get a review from a reviewer in this file.
-per-file *.gyp=*
-per-file *.gypi=*
diff --git a/webrtc/modules/video_coding/main/source/codec_timer.cc b/webrtc/modules/video_coding/main/source/codec_timer.cc
deleted file mode 100644
index a462258813..0000000000
--- a/webrtc/modules/video_coding/main/source/codec_timer.cc
+++ /dev/null
@@ -1,136 +0,0 @@
-/*
- * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
- *
- * Use of this source code is governed by a BSD-style license
- * that can be found in the LICENSE file in the root of the source
- * tree. An additional intellectual property rights grant can be found
- * in the file PATENTS. All contributing project authors may
- * be found in the AUTHORS file in the root of the source tree.
- */
-
-#include "webrtc/modules/video_coding/main/source/codec_timer.h"
-
-#include <assert.h>
-
-namespace webrtc
-{
-
-// The first kIgnoredSampleCount samples will be ignored.
-static const int32_t kIgnoredSampleCount = 5;
-
-VCMCodecTimer::VCMCodecTimer()
-:
-_filteredMax(0),
-_ignoredSampleCount(0),
-_shortMax(0),
-_history()
-{
- Reset();
-}
-
-int32_t VCMCodecTimer::StopTimer(int64_t startTimeMs, int64_t nowMs)
-{
- const int32_t timeDiff = static_cast<int32_t>(nowMs - startTimeMs);
- MaxFilter(timeDiff, nowMs);
- return timeDiff;
-}
-
-void VCMCodecTimer::Reset()
-{
- _filteredMax = 0;
- _ignoredSampleCount = 0;
- _shortMax = 0;
- for (int i=0; i < MAX_HISTORY_SIZE; i++)
- {
- _history[i].shortMax = 0;
- _history[i].timeMs = -1;
- }
-}
-
-// Update the max-value filter
-void VCMCodecTimer::MaxFilter(int32_t decodeTime, int64_t nowMs)
-{
- if (_ignoredSampleCount >= kIgnoredSampleCount)
- {
- UpdateMaxHistory(decodeTime, nowMs);
- ProcessHistory(nowMs);
- }
- else
- {
- _ignoredSampleCount++;
- }
-}
-
-void
-VCMCodecTimer::UpdateMaxHistory(int32_t decodeTime, int64_t now)
-{
- if (_history[0].timeMs >= 0 &&
- now - _history[0].timeMs < SHORT_FILTER_MS)
- {
- if (decodeTime > _shortMax)
- {
- _shortMax = decodeTime;
- }
- }
- else
- {
- // Only add a new value to the history once a second
- if(_history[0].timeMs == -1)
- {
- // First, no shift
- _shortMax = decodeTime;
- }
- else
- {
- // Shift
- for(int i = (MAX_HISTORY_SIZE - 2); i >= 0 ; i--)
- {
- _history[i+1].shortMax = _history[i].shortMax;
- _history[i+1].timeMs = _history[i].timeMs;
- }
- }
- if (_shortMax == 0)
- {
- _shortMax = decodeTime;
- }
-
- _history[0].shortMax = _shortMax;
- _history[0].timeMs = now;
- _shortMax = 0;
- }
-}
-
-void
-VCMCodecTimer::ProcessHistory(int64_t nowMs)
-{
- _filteredMax = _shortMax;
- if (_history[0].timeMs == -1)
- {
- return;
- }
- for (int i=0; i < MAX_HISTORY_SIZE; i++)
- {
- if (_history[i].timeMs == -1)
- {
- break;
- }
- if (nowMs - _history[i].timeMs > MAX_HISTORY_SIZE * SHORT_FILTER_MS)
- {
- // This sample (and all samples after this) is too old
- break;
- }
- if (_history[i].shortMax > _filteredMax)
- {
- // This sample is the largest one this far into the history
- _filteredMax = _history[i].shortMax;
- }
- }
-}
-
-// Get the maximum observed time within a time window
-int32_t VCMCodecTimer::RequiredDecodeTimeMs(FrameType /*frameType*/) const
-{
- return _filteredMax;
-}
-
-}
diff --git a/webrtc/modules/video_coding/main/source/codec_timer.h b/webrtc/modules/video_coding/main/source/codec_timer.h
deleted file mode 100644
index 9268e8d817..0000000000
--- a/webrtc/modules/video_coding/main/source/codec_timer.h
+++ /dev/null
@@ -1,62 +0,0 @@
-/*
- * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
- *
- * Use of this source code is governed by a BSD-style license
- * that can be found in the LICENSE file in the root of the source
- * tree. An additional intellectual property rights grant can be found
- * in the file PATENTS. All contributing project authors may
- * be found in the AUTHORS file in the root of the source tree.
- */
-
-#ifndef WEBRTC_MODULES_VIDEO_CODING_CODEC_TIMER_H_
-#define WEBRTC_MODULES_VIDEO_CODING_CODEC_TIMER_H_
-
-#include "webrtc/modules/interface/module_common_types.h"
-#include "webrtc/typedefs.h"
-
-namespace webrtc
-{
-
-// MAX_HISTORY_SIZE * SHORT_FILTER_MS defines the window size in milliseconds
-#define MAX_HISTORY_SIZE 10
-#define SHORT_FILTER_MS 1000
-
-class VCMShortMaxSample
-{
-public:
- VCMShortMaxSample() : shortMax(0), timeMs(-1) {};
-
- int32_t shortMax;
- int64_t timeMs;
-};
-
-class VCMCodecTimer
-{
-public:
- VCMCodecTimer();
-
- // Updates and returns the max filtered decode time.
- int32_t StopTimer(int64_t startTimeMs, int64_t nowMs);
-
- // Empty the list of timers.
- void Reset();
-
- // Get the required decode time in ms.
- int32_t RequiredDecodeTimeMs(FrameType frameType) const;
-
-private:
- void UpdateMaxHistory(int32_t decodeTime, int64_t now);
- void MaxFilter(int32_t newTime, int64_t nowMs);
- void ProcessHistory(int64_t nowMs);
-
- int32_t _filteredMax;
- // The number of samples ignored so far.
- int32_t _ignoredSampleCount;
- int32_t _shortMax;
- VCMShortMaxSample _history[MAX_HISTORY_SIZE];
-
-};
-
-} // namespace webrtc
-
-#endif // WEBRTC_MODULES_VIDEO_CODING_CODEC_TIMER_H_
diff --git a/webrtc/modules/video_coding/main/source/encoded_frame.h b/webrtc/modules/video_coding/main/source/encoded_frame.h
deleted file mode 100644
index 608578c35d..0000000000
--- a/webrtc/modules/video_coding/main/source/encoded_frame.h
+++ /dev/null
@@ -1,127 +0,0 @@
-/*
- * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
- *
- * Use of this source code is governed by a BSD-style license
- * that can be found in the LICENSE file in the root of the source
- * tree. An additional intellectual property rights grant can be found
- * in the file PATENTS. All contributing project authors may
- * be found in the AUTHORS file in the root of the source tree.
- */
-
-#ifndef WEBRTC_MODULES_VIDEO_CODING_ENCODED_FRAME_H_
-#define WEBRTC_MODULES_VIDEO_CODING_ENCODED_FRAME_H_
-
-#include <vector>
-
-#include "webrtc/common_types.h"
-#include "webrtc/common_video/interface/video_image.h"
-#include "webrtc/modules/interface/module_common_types.h"
-#include "webrtc/modules/video_coding/codecs/interface/video_codec_interface.h"
-#include "webrtc/modules/video_coding/main/interface/video_coding_defines.h"
-
-namespace webrtc
-{
-
-class VCMEncodedFrame : protected EncodedImage
-{
-public:
- VCMEncodedFrame();
- VCMEncodedFrame(const webrtc::EncodedImage& rhs);
- VCMEncodedFrame(const VCMEncodedFrame& rhs);
-
- ~VCMEncodedFrame();
- /**
- * Delete VideoFrame and resets members to zero
- */
- void Free();
- /**
- * Set render time in milliseconds
- */
- void SetRenderTime(const int64_t renderTimeMs) {_renderTimeMs = renderTimeMs;}
-
- /**
- * Set the encoded frame size
- */
- void SetEncodedSize(uint32_t width, uint32_t height)
- { _encodedWidth = width; _encodedHeight = height; }
- /**
- * Get the encoded image
- */
- const webrtc::EncodedImage& EncodedImage() const
- { return static_cast<const webrtc::EncodedImage&>(*this); }
- /**
- * Get pointer to frame buffer
- */
- const uint8_t* Buffer() const {return _buffer;}
- /**
- * Get frame length
- */
- size_t Length() const {return _length;}
- /**
- * Get frame timestamp (90kHz)
- */
- uint32_t TimeStamp() const {return _timeStamp;}
- /**
- * Get render time in milliseconds
- */
- int64_t RenderTimeMs() const {return _renderTimeMs;}
- /**
- * Get frame type
- */
- webrtc::FrameType FrameType() const { return _frameType; }
- /**
- * Get frame rotation
- */
- VideoRotation rotation() const { return _rotation; }
- /**
- * True if this frame is complete, false otherwise
- */
- bool Complete() const { return _completeFrame; }
- /**
- * True if there's a frame missing before this frame
- */
- bool MissingFrame() const { return _missingFrame; }
- /**
- * Payload type of the encoded payload
- */
- uint8_t PayloadType() const { return _payloadType; }
- /**
- * Get codec specific info.
- * The returned pointer is only valid as long as the VCMEncodedFrame
- * is valid. Also, VCMEncodedFrame owns the pointer and will delete
- * the object.
- */
- const CodecSpecificInfo* CodecSpecific() const {return &_codecSpecificInfo;}
-
- const RTPFragmentationHeader* FragmentationHeader() const;
-
-protected:
- /**
- * Verifies that current allocated buffer size is larger than or equal to the input size.
- * If the current buffer size is smaller, a new allocation is made and the old buffer data
- * is copied to the new buffer.
- * Buffer size is updated to minimumSize.
- */
- void VerifyAndAllocate(size_t minimumSize);
-
- void Reset();
-
- void CopyCodecSpecific(const RTPVideoHeader* header);
-
- int64_t _renderTimeMs;
- uint8_t _payloadType;
- bool _missingFrame;
- CodecSpecificInfo _codecSpecificInfo;
- webrtc::VideoCodecType _codec;
- RTPFragmentationHeader _fragmentation;
- VideoRotation _rotation;
-
- // Video rotation is only set along with the last packet for each frame
- // (same as marker bit). This |_rotation_set| is only for debugging purpose
- // to ensure we don't set it twice for a frame.
- bool _rotation_set;
-};
-
-} // namespace webrtc
-
-#endif // WEBRTC_MODULES_VIDEO_CODING_ENCODED_FRAME_H_
diff --git a/webrtc/modules/video_coding/main/source/fec_tables_xor.h b/webrtc/modules/video_coding/main/source/fec_tables_xor.h
deleted file mode 100644
index 28c67b4565..0000000000
--- a/webrtc/modules/video_coding/main/source/fec_tables_xor.h
+++ /dev/null
@@ -1,6481 +0,0 @@
-/*
- * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
- *
- * Use of this source code is governed by a BSD-style license
- * that can be found in the LICENSE file in the root of the source
- * tree. An additional intellectual property rights grant can be found
- * in the file PATENTS. All contributing project authors may
- * be found in the AUTHORS file in the root of the source tree.
- */
-
-#ifndef WEBRTC_MODULES_VIDEO_CODING_SOURCE_FEC_TABLES_XOR_H_
-#define WEBRTC_MODULES_VIDEO_CODING_SOURCE_FEC_TABLES_XOR_H_
-
-// This is a private header for media_opt_util.cc.
-// It should not be included by other files.
-
-namespace webrtc {
-
-// Table for Protection factor (code rate) of delta frames, for the XOR FEC.
-// Input is the packet loss and an effective rate (bits/frame).
-// Output is array kCodeRateXORTable[k], where k = rate_i*129 + loss_j;
-// loss_j = 0,1,..128, and rate_i varies over some range.
-static const int kSizeCodeRateXORTable = 6450;
-static const unsigned char kCodeRateXORTable[kSizeCodeRateXORTable] = {
-0,
-0,
-0,
-0,
-0,
-0,
-0,
-0,
-0,
-0,
-0,
-0,
-0,
-0,
-0,
-0,
-0,
-0,
-0,
-0,
-0,
-0,
-0,
-0,
-0,
-0,
-0,
-0,
-0,
-0,
-0,
-0,
-0,
-11,
-11,
-11,
-11,
-11,
-11,
-11,
-11,
-11,
-11,
-11,
-11,
-11,
-11,
-11,
-39,
-39,
-39,
-39,
-39,
-39,
-39,
-39,
-39,
-39,
-39,
-39,
-39,
-39,
-39,
-39,
-39,
-39,
-39,
-39,
-39,
-39,
-39,
-39,
-39,
-39,
-39,
-39,
-39,
-39,
-39,
-39,
-39,
-39,
-39,
-39,
-39,
-39,
-39,
-39,
-39,
-39,
-39,
-39,
-39,
-39,
-39,
-39,
-51,
-51,
-51,
-51,
-51,
-51,
-51,
-51,
-51,
-51,
-51,
-51,
-51,
-51,
-51,
-51,
-51,
-51,
-51,
-51,
-51,
-51,
-51,
-51,
-51,
-51,
-51,
-51,
-51,
-51,
-51,
-51,
-51,
-0,
-0,
-0,
-0,
-0,
-0,
-0,
-0,
-0,
-0,
-0,
-0,
-0,
-0,
-0,
-0,
-0,
-0,
-8,
-8,
-8,
-8,
-8,
-8,
-8,
-8,
-8,
-8,
-8,
-8,
-8,
-8,
-8,
-30,
-30,
-30,
-30,
-30,
-30,
-30,
-30,
-30,
-30,
-30,
-30,
-30,
-30,
-30,
-56,
-56,
-56,
-56,
-56,
-56,
-56,
-65,
-65,
-65,
-65,
-65,
-65,
-65,
-65,
-65,
-65,
-65,
-65,
-65,
-65,
-65,
-65,
-65,
-65,
-65,
-65,
-65,
-65,
-65,
-65,
-65,
-65,
-65,
-65,
-65,
-65,
-65,
-65,
-65,
-65,
-65,
-65,
-65,
-65,
-65,
-65,
-65,
-87,
-87,
-87,
-87,
-87,
-87,
-87,
-87,
-87,
-87,
-87,
-87,
-87,
-87,
-87,
-87,
-87,
-87,
-87,
-87,
-87,
-87,
-87,
-87,
-78,
-78,
-78,
-78,
-78,
-78,
-78,
-78,
-78,
-0,
-0,
-0,
-0,
-0,
-0,
-0,
-0,
-0,
-0,
-0,
-0,
-0,
-0,
-0,
-6,
-6,
-6,
-23,
-23,
-23,
-23,
-23,
-23,
-23,
-23,
-23,
-23,
-23,
-23,
-23,
-23,
-23,
-44,
-44,
-44,
-44,
-44,
-44,
-50,
-50,
-50,
-50,
-50,
-50,
-50,
-50,
-50,
-68,
-68,
-68,
-68,
-68,
-68,
-68,
-85,
-85,
-85,
-85,
-85,
-85,
-85,
-85,
-85,
-85,
-85,
-85,
-85,
-85,
-85,
-85,
-85,
-85,
-85,
-85,
-85,
-85,
-85,
-85,
-85,
-85,
-85,
-85,
-85,
-85,
-85,
-85,
-85,
-85,
-85,
-85,
-85,
-85,
-85,
-85,
-85,
-105,
-105,
-105,
-105,
-105,
-105,
-105,
-105,
-105,
-105,
-105,
-105,
-105,
-105,
-105,
-105,
-105,
-105,
-105,
-105,
-105,
-105,
-105,
-105,
-88,
-88,
-88,
-88,
-88,
-88,
-88,
-88,
-88,
-0,
-0,
-0,
-0,
-0,
-0,
-0,
-0,
-0,
-5,
-5,
-5,
-5,
-5,
-5,
-19,
-19,
-19,
-36,
-41,
-41,
-41,
-41,
-41,
-41,
-41,
-41,
-41,
-41,
-41,
-41,
-41,
-41,
-55,
-55,
-55,
-55,
-55,
-55,
-69,
-69,
-69,
-69,
-69,
-69,
-69,
-69,
-69,
-75,
-75,
-80,
-80,
-80,
-80,
-80,
-97,
-97,
-97,
-97,
-97,
-97,
-97,
-97,
-97,
-97,
-102,
-102,
-102,
-102,
-102,
-102,
-102,
-102,
-102,
-102,
-102,
-102,
-102,
-102,
-102,
-102,
-102,
-102,
-102,
-102,
-102,
-102,
-102,
-102,
-102,
-102,
-102,
-102,
-102,
-102,
-102,
-116,
-116,
-116,
-116,
-116,
-116,
-116,
-116,
-116,
-116,
-116,
-116,
-116,
-116,
-116,
-116,
-116,
-116,
-116,
-116,
-116,
-116,
-116,
-116,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-0,
-0,
-0,
-0,
-0,
-0,
-0,
-0,
-4,
-16,
-16,
-16,
-16,
-16,
-16,
-30,
-35,
-35,
-47,
-58,
-58,
-58,
-58,
-58,
-58,
-58,
-58,
-58,
-58,
-58,
-58,
-58,
-58,
-63,
-63,
-63,
-63,
-63,
-63,
-77,
-77,
-77,
-77,
-77,
-77,
-77,
-82,
-82,
-82,
-82,
-94,
-94,
-94,
-94,
-94,
-105,
-105,
-105,
-105,
-110,
-110,
-110,
-110,
-110,
-110,
-122,
-122,
-122,
-122,
-122,
-122,
-122,
-122,
-122,
-122,
-122,
-122,
-122,
-122,
-122,
-122,
-122,
-122,
-122,
-122,
-122,
-122,
-122,
-122,
-122,
-122,
-122,
-122,
-122,
-122,
-122,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-115,
-115,
-115,
-115,
-115,
-115,
-115,
-115,
-115,
-0,
-0,
-0,
-0,
-0,
-0,
-0,
-4,
-14,
-27,
-27,
-27,
-27,
-27,
-31,
-41,
-52,
-52,
-56,
-69,
-69,
-69,
-69,
-69,
-69,
-69,
-69,
-69,
-69,
-69,
-69,
-69,
-69,
-69,
-69,
-69,
-69,
-69,
-69,
-79,
-79,
-79,
-79,
-83,
-83,
-83,
-94,
-94,
-94,
-94,
-106,
-106,
-106,
-106,
-106,
-115,
-115,
-115,
-115,
-125,
-125,
-125,
-125,
-125,
-125,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-0,
-0,
-0,
-0,
-3,
-3,
-3,
-17,
-28,
-38,
-38,
-38,
-38,
-38,
-47,
-51,
-63,
-63,
-63,
-72,
-72,
-72,
-72,
-72,
-72,
-72,
-76,
-76,
-76,
-76,
-80,
-80,
-80,
-80,
-80,
-80,
-80,
-80,
-80,
-84,
-84,
-84,
-84,
-93,
-93,
-93,
-105,
-105,
-105,
-105,
-114,
-114,
-114,
-114,
-114,
-124,
-124,
-124,
-124,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-0,
-0,
-0,
-0,
-12,
-12,
-12,
-35,
-43,
-47,
-47,
-47,
-47,
-47,
-58,
-58,
-66,
-66,
-66,
-70,
-70,
-70,
-70,
-70,
-73,
-73,
-82,
-82,
-82,
-86,
-94,
-94,
-94,
-94,
-94,
-94,
-94,
-94,
-94,
-94,
-94,
-94,
-94,
-105,
-105,
-105,
-114,
-114,
-114,
-114,
-117,
-117,
-117,
-117,
-117,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-0,
-0,
-0,
-0,
-24,
-24,
-24,
-49,
-53,
-53,
-53,
-53,
-53,
-53,
-61,
-61,
-64,
-64,
-64,
-64,
-70,
-70,
-70,
-70,
-78,
-78,
-88,
-88,
-88,
-96,
-106,
-106,
-106,
-106,
-106,
-106,
-106,
-106,
-106,
-106,
-112,
-112,
-112,
-120,
-120,
-120,
-124,
-124,
-124,
-124,
-124,
-124,
-124,
-124,
-124,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-0,
-0,
-0,
-5,
-36,
-36,
-36,
-55,
-55,
-55,
-55,
-55,
-55,
-55,
-58,
-58,
-58,
-58,
-58,
-64,
-78,
-78,
-78,
-78,
-87,
-87,
-94,
-94,
-94,
-103,
-110,
-110,
-110,
-110,
-110,
-110,
-110,
-110,
-116,
-116,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-0,
-0,
-0,
-18,
-43,
-43,
-43,
-53,
-53,
-53,
-53,
-53,
-53,
-53,
-53,
-58,
-58,
-58,
-58,
-71,
-87,
-87,
-87,
-87,
-94,
-94,
-97,
-97,
-97,
-109,
-111,
-111,
-111,
-111,
-111,
-111,
-111,
-111,
-125,
-125,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-0,
-0,
-0,
-31,
-46,
-46,
-46,
-48,
-48,
-48,
-48,
-48,
-48,
-48,
-48,
-66,
-66,
-66,
-66,
-80,
-93,
-93,
-93,
-93,
-95,
-95,
-95,
-95,
-100,
-115,
-115,
-115,
-115,
-115,
-115,
-115,
-115,
-115,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-0,
-0,
-4,
-40,
-45,
-45,
-45,
-45,
-45,
-45,
-45,
-45,
-49,
-49,
-49,
-74,
-74,
-74,
-74,
-86,
-90,
-90,
-90,
-90,
-95,
-95,
-95,
-95,
-106,
-120,
-120,
-120,
-120,
-120,
-120,
-120,
-120,
-120,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-0,
-0,
-14,
-42,
-42,
-42,
-42,
-42,
-42,
-42,
-42,
-46,
-56,
-56,
-56,
-80,
-80,
-80,
-80,
-84,
-84,
-84,
-84,
-88,
-99,
-99,
-99,
-99,
-111,
-122,
-122,
-122,
-122,
-122,
-122,
-122,
-122,
-122,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-0,
-0,
-26,
-40,
-40,
-40,
-40,
-40,
-40,
-40,
-40,
-54,
-66,
-66,
-66,
-80,
-80,
-80,
-80,
-80,
-80,
-80,
-84,
-94,
-106,
-106,
-106,
-106,
-116,
-120,
-120,
-120,
-120,
-120,
-120,
-120,
-120,
-124,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-0,
-3,
-34,
-38,
-38,
-38,
-38,
-38,
-42,
-42,
-42,
-63,
-72,
-72,
-76,
-80,
-80,
-80,
-80,
-80,
-80,
-80,
-89,
-101,
-114,
-114,
-114,
-114,
-118,
-118,
-118,
-118,
-118,
-118,
-118,
-118,
-118,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-0,
-12,
-36,
-36,
-36,
-36,
-36,
-36,
-49,
-49,
-49,
-69,
-73,
-76,
-86,
-86,
-86,
-86,
-86,
-86,
-86,
-86,
-97,
-109,
-122,
-122,
-122,
-122,
-122,
-122,
-122,
-122,
-122,
-122,
-122,
-122,
-122,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-0,
-22,
-34,
-34,
-34,
-34,
-38,
-38,
-57,
-57,
-57,
-69,
-73,
-82,
-92,
-92,
-92,
-92,
-92,
-92,
-96,
-96,
-104,
-117,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-0,
-29,
-33,
-33,
-33,
-33,
-44,
-44,
-62,
-62,
-62,
-69,
-77,
-87,
-95,
-95,
-95,
-95,
-95,
-95,
-107,
-107,
-110,
-120,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-0,
-31,
-31,
-31,
-31,
-31,
-51,
-51,
-62,
-65,
-65,
-73,
-83,
-91,
-94,
-94,
-94,
-94,
-97,
-97,
-114,
-114,
-114,
-122,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-0,
-29,
-29,
-29,
-29,
-29,
-56,
-56,
-59,
-70,
-70,
-79,
-86,
-89,
-89,
-89,
-89,
-89,
-100,
-100,
-116,
-116,
-116,
-122,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-0,
-28,
-28,
-28,
-28,
-28,
-57,
-57,
-57,
-76,
-76,
-83,
-86,
-86,
-86,
-86,
-86,
-89,
-104,
-104,
-114,
-114,
-114,
-124,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-0,
-27,
-27,
-27,
-27,
-30,
-55,
-55,
-55,
-80,
-80,
-83,
-86,
-86,
-86,
-86,
-86,
-93,
-108,
-108,
-111,
-111,
-111,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-0,
-26,
-26,
-26,
-26,
-36,
-53,
-53,
-53,
-80,
-80,
-80,
-90,
-90,
-90,
-90,
-90,
-98,
-107,
-107,
-107,
-107,
-107,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-0,
-26,
-26,
-26,
-28,
-42,
-52,
-54,
-54,
-78,
-78,
-78,
-95,
-95,
-95,
-97,
-97,
-104,
-106,
-106,
-106,
-106,
-106,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-0,
-24,
-24,
-24,
-33,
-47,
-49,
-58,
-58,
-74,
-74,
-74,
-97,
-97,
-97,
-106,
-106,
-108,
-108,
-108,
-108,
-108,
-108,
-124,
-124,
-124,
-124,
-124,
-124,
-124,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-0,
-24,
-24,
-24,
-39,
-48,
-50,
-63,
-63,
-72,
-74,
-74,
-96,
-96,
-96,
-109,
-111,
-111,
-111,
-111,
-111,
-111,
-111,
-119,
-119,
-122,
-122,
-122,
-122,
-122,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-0,
-23,
-23,
-23,
-43,
-46,
-54,
-66,
-66,
-69,
-77,
-77,
-92,
-92,
-92,
-105,
-113,
-113,
-113,
-113,
-113,
-113,
-113,
-115,
-117,
-123,
-123,
-123,
-123,
-123,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-0,
-22,
-22,
-22,
-44,
-44,
-59,
-67,
-67,
-67,
-81,
-81,
-89,
-89,
-89,
-97,
-112,
-112,
-112,
-112,
-112,
-112,
-112,
-112,
-119,
-126,
-126,
-126,
-126,
-126,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-0,
-21,
-21,
-24,
-43,
-45,
-63,
-65,
-65,
-67,
-85,
-85,
-87,
-87,
-87,
-91,
-109,
-109,
-109,
-111,
-111,
-111,
-111,
-111,
-123,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-0,
-21,
-21,
-28,
-42,
-50,
-63,
-63,
-66,
-71,
-85,
-85,
-85,
-85,
-87,
-92,
-106,
-106,
-108,
-114,
-114,
-114,
-114,
-114,
-125,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-0,
-20,
-20,
-34,
-41,
-54,
-62,
-62,
-69,
-75,
-82,
-82,
-82,
-82,
-92,
-98,
-105,
-105,
-110,
-117,
-117,
-117,
-117,
-117,
-124,
-124,
-126,
-126,
-126,
-126,
-126,
-126,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-0,
-20,
-20,
-38,
-40,
-58,
-60,
-60,
-73,
-78,
-80,
-80,
-80,
-80,
-100,
-105,
-107,
-107,
-113,
-118,
-118,
-118,
-118,
-118,
-120,
-120,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-0,
-19,
-21,
-38,
-40,
-58,
-58,
-60,
-75,
-77,
-77,
-77,
-81,
-81,
-107,
-109,
-109,
-109,
-114,
-116,
-116,
-116,
-116,
-116,
-116,
-116,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-0,
-18,
-25,
-37,
-44,
-56,
-56,
-63,
-75,
-75,
-75,
-75,
-88,
-88,
-111,
-111,
-111,
-111,
-112,
-112,
-112,
-112,
-112,
-112,
-112,
-114,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-0,
-18,
-30,
-36,
-48,
-55,
-55,
-67,
-73,
-73,
-73,
-73,
-97,
-97,
-110,
-110,
-110,
-110,
-110,
-110,
-110,
-110,
-110,
-110,
-110,
-116,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-0,
-18,
-34,
-36,
-52,
-55,
-55,
-70,
-72,
-73,
-73,
-73,
-102,
-104,
-108,
-108,
-108,
-108,
-109,
-109,
-109,
-109,
-109,
-109,
-109,
-119,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-0,
-17,
-35,
-35,
-52,
-59,
-59,
-70,
-70,
-76,
-76,
-76,
-99,
-105,
-105,
-105,
-105,
-105,
-111,
-111,
-111,
-111,
-111,
-111,
-111,
-121,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-0,
-17,
-34,
-36,
-51,
-61,
-62,
-70,
-70,
-80,
-80,
-80,
-93,
-103,
-103,
-103,
-103,
-103,
-112,
-112,
-112,
-112,
-112,
-116,
-118,
-124,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-0,
-16,
-33,
-39,
-50,
-59,
-65,
-72,
-72,
-82,
-82,
-82,
-91,
-100,
-100,
-100,
-100,
-100,
-109,
-109,
-109,
-109,
-109,
-121,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-0,
-16,
-32,
-43,
-48,
-54,
-66,
-75,
-75,
-81,
-83,
-83,
-92,
-97,
-97,
-97,
-99,
-99,
-105,
-105,
-105,
-105,
-105,
-123,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-0,
-15,
-31,
-46,
-47,
-49,
-69,
-77,
-77,
-81,
-85,
-85,
-93,
-95,
-95,
-95,
-100,
-100,
-102,
-102,
-102,
-102,
-102,
-120,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-0,
-15,
-30,
-46,
-48,
-48,
-70,
-75,
-79,
-82,
-87,
-87,
-92,
-94,
-94,
-94,
-103,
-103,
-103,
-103,
-103,
-104,
-104,
-115,
-120,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-0,
-15,
-30,
-45,
-50,
-50,
-68,
-70,
-80,
-85,
-89,
-89,
-90,
-95,
-95,
-95,
-104,
-104,
-104,
-104,
-104,
-109,
-109,
-112,
-114,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-0,
-14,
-29,
-44,
-54,
-54,
-64,
-64,
-83,
-87,
-88,
-88,
-88,
-98,
-98,
-98,
-103,
-103,
-103,
-103,
-103,
-113,
-113,
-113,
-113,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-0,
-14,
-29,
-43,
-56,
-56,
-61,
-61,
-84,
-85,
-88,
-88,
-88,
-100,
-100,
-100,
-102,
-102,
-102,
-102,
-102,
-113,
-116,
-116,
-116,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-0,
-14,
-28,
-42,
-57,
-57,
-62,
-62,
-80,
-80,
-91,
-91,
-91,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-109,
-119,
-119,
-119,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-0,
-14,
-28,
-42,
-56,
-56,
-65,
-66,
-76,
-76,
-92,
-92,
-92,
-97,
-97,
-97,
-101,
-101,
-101,
-101,
-101,
-106,
-121,
-121,
-121,
-126,
-126,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-0,
-13,
-27,
-41,
-55,
-55,
-67,
-72,
-74,
-74,
-90,
-90,
-90,
-91,
-91,
-91,
-105,
-105,
-105,
-105,
-105,
-107,
-122,
-122,
-122,
-123,
-123,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-0,
-13,
-27,
-40,
-54,
-54,
-67,
-76,
-76,
-76,
-85,
-85,
-85,
-85,
-85,
-85,
-112,
-112,
-112,
-112,
-112,
-112,
-121,
-121,
-121,
-121,
-121,
-126,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-
-
-};
-
-} // namespace webrtc
-
-#endif // WEBRTC_MODULES_VIDEO_CODING_SOURCE_FEC_TABLES_XOR_H_
diff --git a/webrtc/modules/video_coding/main/source/frame_buffer.cc b/webrtc/modules/video_coding/main/source/frame_buffer.cc
deleted file mode 100644
index 5b6680ec61..0000000000
--- a/webrtc/modules/video_coding/main/source/frame_buffer.cc
+++ /dev/null
@@ -1,297 +0,0 @@
-/*
- * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
- *
- * Use of this source code is governed by a BSD-style license
- * that can be found in the LICENSE file in the root of the source
- * tree. An additional intellectual property rights grant can be found
- * in the file PATENTS. All contributing project authors may
- * be found in the AUTHORS file in the root of the source tree.
- */
-
-#include "webrtc/modules/video_coding/main/source/frame_buffer.h"
-
-#include <assert.h>
-#include <string.h>
-
-#include "webrtc/base/checks.h"
-#include "webrtc/modules/video_coding/main/source/packet.h"
-#include "webrtc/system_wrappers/include/logging.h"
-
-namespace webrtc {
-
-VCMFrameBuffer::VCMFrameBuffer()
- :
- _state(kStateEmpty),
- _nackCount(0),
- _latestPacketTimeMs(-1) {
-}
-
-VCMFrameBuffer::~VCMFrameBuffer() {
-}
-
-VCMFrameBuffer::VCMFrameBuffer(const VCMFrameBuffer& rhs)
-:
-VCMEncodedFrame(rhs),
-_state(rhs._state),
-_sessionInfo(),
-_nackCount(rhs._nackCount),
-_latestPacketTimeMs(rhs._latestPacketTimeMs) {
- _sessionInfo = rhs._sessionInfo;
- _sessionInfo.UpdateDataPointers(rhs._buffer, _buffer);
-}
-
-webrtc::FrameType
-VCMFrameBuffer::FrameType() const {
- return _sessionInfo.FrameType();
-}
-
-int32_t
-VCMFrameBuffer::GetLowSeqNum() const {
- return _sessionInfo.LowSequenceNumber();
-}
-
-int32_t
-VCMFrameBuffer::GetHighSeqNum() const {
- return _sessionInfo.HighSequenceNumber();
-}
-
-int VCMFrameBuffer::PictureId() const {
- return _sessionInfo.PictureId();
-}
-
-int VCMFrameBuffer::TemporalId() const {
- return _sessionInfo.TemporalId();
-}
-
-bool VCMFrameBuffer::LayerSync() const {
- return _sessionInfo.LayerSync();
-}
-
-int VCMFrameBuffer::Tl0PicId() const {
- return _sessionInfo.Tl0PicId();
-}
-
-bool VCMFrameBuffer::NonReference() const {
- return _sessionInfo.NonReference();
-}
-
-void VCMFrameBuffer::SetGofInfo(const GofInfoVP9& gof_info, size_t idx) {
- _sessionInfo.SetGofInfo(gof_info, idx);
- // TODO(asapersson): Consider adding hdr->VP9.ref_picture_id for testing.
- _codecSpecificInfo.codecSpecific.VP9.temporal_idx =
- gof_info.temporal_idx[idx];
- _codecSpecificInfo.codecSpecific.VP9.temporal_up_switch =
- gof_info.temporal_up_switch[idx];
-}
-
-bool
-VCMFrameBuffer::IsSessionComplete() const {
- return _sessionInfo.complete();
-}
-
-// Insert packet
-VCMFrameBufferEnum
-VCMFrameBuffer::InsertPacket(const VCMPacket& packet,
- int64_t timeInMs,
- VCMDecodeErrorMode decode_error_mode,
- const FrameData& frame_data) {
- assert(!(NULL == packet.dataPtr && packet.sizeBytes > 0));
- if (packet.dataPtr != NULL) {
- _payloadType = packet.payloadType;
- }
-
- if (kStateEmpty == _state) {
- // First packet (empty and/or media) inserted into this frame.
- // store some info and set some initial values.
- _timeStamp = packet.timestamp;
- // We only take the ntp timestamp of the first packet of a frame.
- ntp_time_ms_ = packet.ntp_time_ms_;
- _codec = packet.codec;
- if (packet.frameType != kEmptyFrame) {
- // first media packet
- SetState(kStateIncomplete);
- }
- }
-
- uint32_t requiredSizeBytes = Length() + packet.sizeBytes +
- (packet.insertStartCode ? kH264StartCodeLengthBytes : 0);
- if (requiredSizeBytes >= _size) {
- const uint8_t* prevBuffer = _buffer;
- const uint32_t increments = requiredSizeBytes /
- kBufferIncStepSizeBytes +
- (requiredSizeBytes %
- kBufferIncStepSizeBytes > 0);
- const uint32_t newSize = _size +
- increments * kBufferIncStepSizeBytes;
- if (newSize > kMaxJBFrameSizeBytes) {
- LOG(LS_ERROR) << "Failed to insert packet due to frame being too "
- "big.";
- return kSizeError;
- }
- VerifyAndAllocate(newSize);
- _sessionInfo.UpdateDataPointers(prevBuffer, _buffer);
- }
-
- if (packet.width > 0 && packet.height > 0) {
- _encodedWidth = packet.width;
- _encodedHeight = packet.height;
- }
-
- // Don't copy payload specific data for empty packets (e.g padding packets).
- if (packet.sizeBytes > 0)
- CopyCodecSpecific(&packet.codecSpecificHeader);
-
- int retVal = _sessionInfo.InsertPacket(packet, _buffer,
- decode_error_mode,
- frame_data);
- if (retVal == -1) {
- return kSizeError;
- } else if (retVal == -2) {
- return kDuplicatePacket;
- } else if (retVal == -3) {
- return kOutOfBoundsPacket;
- }
- // update length
- _length = Length() + static_cast<uint32_t>(retVal);
-
- _latestPacketTimeMs = timeInMs;
-
- // http://www.etsi.org/deliver/etsi_ts/126100_126199/126114/12.07.00_60/
- // ts_126114v120700p.pdf Section 7.4.5.
- // The MTSI client shall add the payload bytes as defined in this clause
- // onto the last RTP packet in each group of packets which make up a key
- // frame (I-frame or IDR frame in H.264 (AVC), or an IRAP picture in H.265
- // (HEVC)).
- if (packet.markerBit) {
- RTC_DCHECK(!_rotation_set);
- _rotation = packet.codecSpecificHeader.rotation;
- _rotation_set = true;
- }
-
- if (_sessionInfo.complete()) {
- SetState(kStateComplete);
- return kCompleteSession;
- } else if (_sessionInfo.decodable()) {
- SetState(kStateDecodable);
- return kDecodableSession;
- }
- return kIncomplete;
-}
-
-int64_t
-VCMFrameBuffer::LatestPacketTimeMs() const {
- return _latestPacketTimeMs;
-}
-
-void
-VCMFrameBuffer::IncrementNackCount() {
- _nackCount++;
-}
-
-int16_t
-VCMFrameBuffer::GetNackCount() const {
- return _nackCount;
-}
-
-bool
-VCMFrameBuffer::HaveFirstPacket() const {
- return _sessionInfo.HaveFirstPacket();
-}
-
-bool
-VCMFrameBuffer::HaveLastPacket() const {
- return _sessionInfo.HaveLastPacket();
-}
-
-int
-VCMFrameBuffer::NumPackets() const {
- return _sessionInfo.NumPackets();
-}
-
-void
-VCMFrameBuffer::Reset() {
- _length = 0;
- _timeStamp = 0;
- _sessionInfo.Reset();
- _payloadType = 0;
- _nackCount = 0;
- _latestPacketTimeMs = -1;
- _state = kStateEmpty;
- VCMEncodedFrame::Reset();
-}
-
-// Set state of frame
-void
-VCMFrameBuffer::SetState(VCMFrameBufferStateEnum state) {
- if (_state == state) {
- return;
- }
- switch (state) {
- case kStateIncomplete:
- // we can go to this state from state kStateEmpty
- assert(_state == kStateEmpty);
-
- // Do nothing, we received a packet
- break;
-
- case kStateComplete:
- assert(_state == kStateEmpty ||
- _state == kStateIncomplete ||
- _state == kStateDecodable);
-
- break;
-
- case kStateEmpty:
- // Should only be set to empty through Reset().
- assert(false);
- break;
-
- case kStateDecodable:
- assert(_state == kStateEmpty ||
- _state == kStateIncomplete);
- break;
- }
- _state = state;
-}
-
-// Get current state of frame
-VCMFrameBufferStateEnum
-VCMFrameBuffer::GetState() const {
- return _state;
-}
-
-// Get current state of frame
-VCMFrameBufferStateEnum
-VCMFrameBuffer::GetState(uint32_t& timeStamp) const {
- timeStamp = TimeStamp();
- return GetState();
-}
-
-bool
-VCMFrameBuffer::IsRetransmitted() const {
- return _sessionInfo.session_nack();
-}
-
-void
-VCMFrameBuffer::PrepareForDecode(bool continuous) {
-#ifdef INDEPENDENT_PARTITIONS
- if (_codec == kVideoCodecVP8) {
- _length =
- _sessionInfo.BuildVP8FragmentationHeader(_buffer, _length,
- &_fragmentation);
- } else {
- size_t bytes_removed = _sessionInfo.MakeDecodable();
- _length -= bytes_removed;
- }
-#else
- size_t bytes_removed = _sessionInfo.MakeDecodable();
- _length -= bytes_removed;
-#endif
- // Transfer frame information to EncodedFrame and create any codec
- // specific information.
- _frameType = _sessionInfo.FrameType();
- _completeFrame = _sessionInfo.complete();
- _missingFrame = !continuous;
-}
-
-} // namespace webrtc
diff --git a/webrtc/modules/video_coding/main/source/generic_decoder.cc b/webrtc/modules/video_coding/main/source/generic_decoder.cc
deleted file mode 100644
index 8b2d3974de..0000000000
--- a/webrtc/modules/video_coding/main/source/generic_decoder.cc
+++ /dev/null
@@ -1,198 +0,0 @@
-/*
- * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
- *
- * Use of this source code is governed by a BSD-style license
- * that can be found in the LICENSE file in the root of the source
- * tree. An additional intellectual property rights grant can be found
- * in the file PATENTS. All contributing project authors may
- * be found in the AUTHORS file in the root of the source tree.
- */
-
-#include "webrtc/modules/video_coding/main/interface/video_coding.h"
-#include "webrtc/modules/video_coding/main/source/generic_decoder.h"
-#include "webrtc/modules/video_coding/main/source/internal_defines.h"
-#include "webrtc/system_wrappers/include/clock.h"
-#include "webrtc/system_wrappers/include/logging.h"
-
-namespace webrtc {
-
-VCMDecodedFrameCallback::VCMDecodedFrameCallback(VCMTiming& timing,
- Clock* clock)
-:
-_critSect(CriticalSectionWrapper::CreateCriticalSection()),
-_clock(clock),
-_receiveCallback(NULL),
-_timing(timing),
-_timestampMap(kDecoderFrameMemoryLength),
-_lastReceivedPictureID(0)
-{
-}
-
-VCMDecodedFrameCallback::~VCMDecodedFrameCallback()
-{
- delete _critSect;
-}
-
-void VCMDecodedFrameCallback::SetUserReceiveCallback(
- VCMReceiveCallback* receiveCallback)
-{
- CriticalSectionScoped cs(_critSect);
- _receiveCallback = receiveCallback;
-}
-
-VCMReceiveCallback* VCMDecodedFrameCallback::UserReceiveCallback()
-{
- CriticalSectionScoped cs(_critSect);
- return _receiveCallback;
-}
-
-int32_t VCMDecodedFrameCallback::Decoded(VideoFrame& decodedImage) {
- // TODO(holmer): We should improve this so that we can handle multiple
- // callbacks from one call to Decode().
- VCMFrameInformation* frameInfo;
- VCMReceiveCallback* callback;
- {
- CriticalSectionScoped cs(_critSect);
- frameInfo = _timestampMap.Pop(decodedImage.timestamp());
- callback = _receiveCallback;
- }
-
- if (frameInfo == NULL) {
- LOG(LS_WARNING) << "Too many frames backed up in the decoder, dropping "
- "this one.";
- return WEBRTC_VIDEO_CODEC_OK;
- }
-
- _timing.StopDecodeTimer(
- decodedImage.timestamp(),
- frameInfo->decodeStartTimeMs,
- _clock->TimeInMilliseconds(),
- frameInfo->renderTimeMs);
-
- if (callback != NULL)
- {
- decodedImage.set_render_time_ms(frameInfo->renderTimeMs);
- decodedImage.set_rotation(frameInfo->rotation);
- callback->FrameToRender(decodedImage);
- }
- return WEBRTC_VIDEO_CODEC_OK;
-}
-
-int32_t
-VCMDecodedFrameCallback::ReceivedDecodedReferenceFrame(
- const uint64_t pictureId)
-{
- CriticalSectionScoped cs(_critSect);
- if (_receiveCallback != NULL)
- {
- return _receiveCallback->ReceivedDecodedReferenceFrame(pictureId);
- }
- return -1;
-}
-
-int32_t
-VCMDecodedFrameCallback::ReceivedDecodedFrame(const uint64_t pictureId)
-{
- _lastReceivedPictureID = pictureId;
- return 0;
-}
-
-uint64_t VCMDecodedFrameCallback::LastReceivedPictureID() const
-{
- return _lastReceivedPictureID;
-}
-
-void VCMDecodedFrameCallback::Map(uint32_t timestamp,
- VCMFrameInformation* frameInfo) {
- CriticalSectionScoped cs(_critSect);
- _timestampMap.Add(timestamp, frameInfo);
-}
-
-int32_t VCMDecodedFrameCallback::Pop(uint32_t timestamp)
-{
- CriticalSectionScoped cs(_critSect);
- if (_timestampMap.Pop(timestamp) == NULL)
- {
- return VCM_GENERAL_ERROR;
- }
- return VCM_OK;
-}
-
-VCMGenericDecoder::VCMGenericDecoder(VideoDecoder& decoder, bool isExternal)
-:
-_callback(NULL),
-_frameInfos(),
-_nextFrameInfoIdx(0),
-_decoder(decoder),
-_codecType(kVideoCodecUnknown),
-_isExternal(isExternal),
-_keyFrameDecoded(false)
-{
-}
-
-VCMGenericDecoder::~VCMGenericDecoder()
-{
-}
-
-int32_t VCMGenericDecoder::InitDecode(const VideoCodec* settings,
- int32_t numberOfCores)
-{
- _codecType = settings->codecType;
-
- return _decoder.InitDecode(settings, numberOfCores);
-}
-
-int32_t VCMGenericDecoder::Decode(const VCMEncodedFrame& frame,
- int64_t nowMs)
-{
- _frameInfos[_nextFrameInfoIdx].decodeStartTimeMs = nowMs;
- _frameInfos[_nextFrameInfoIdx].renderTimeMs = frame.RenderTimeMs();
- _frameInfos[_nextFrameInfoIdx].rotation = frame.rotation();
- _callback->Map(frame.TimeStamp(), &_frameInfos[_nextFrameInfoIdx]);
-
- _nextFrameInfoIdx = (_nextFrameInfoIdx + 1) % kDecoderFrameMemoryLength;
- int32_t ret = _decoder.Decode(frame.EncodedImage(),
- frame.MissingFrame(),
- frame.FragmentationHeader(),
- frame.CodecSpecific(),
- frame.RenderTimeMs());
-
- if (ret < WEBRTC_VIDEO_CODEC_OK)
- {
- LOG(LS_WARNING) << "Failed to decode frame with timestamp "
- << frame.TimeStamp() << ", error code: " << ret;
- _callback->Pop(frame.TimeStamp());
- return ret;
- }
- else if (ret == WEBRTC_VIDEO_CODEC_NO_OUTPUT ||
- ret == WEBRTC_VIDEO_CODEC_REQUEST_SLI)
- {
- // No output
- _callback->Pop(frame.TimeStamp());
- }
- return ret;
-}
-
-int32_t
-VCMGenericDecoder::Release()
-{
- return _decoder.Release();
-}
-
-int32_t VCMGenericDecoder::Reset()
-{
- return _decoder.Reset();
-}
-
-int32_t VCMGenericDecoder::RegisterDecodeCompleteCallback(VCMDecodedFrameCallback* callback)
-{
- _callback = callback;
- return _decoder.RegisterDecodeCompleteCallback(callback);
-}
-
-bool VCMGenericDecoder::External() const
-{
- return _isExternal;
-}
-
-} // namespace
diff --git a/webrtc/modules/video_coding/main/source/generic_decoder.h b/webrtc/modules/video_coding/main/source/generic_decoder.h
deleted file mode 100644
index 09929e64f4..0000000000
--- a/webrtc/modules/video_coding/main/source/generic_decoder.h
+++ /dev/null
@@ -1,112 +0,0 @@
-/*
- * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
- *
- * Use of this source code is governed by a BSD-style license
- * that can be found in the LICENSE file in the root of the source
- * tree. An additional intellectual property rights grant can be found
- * in the file PATENTS. All contributing project authors may
- * be found in the AUTHORS file in the root of the source tree.
- */
-
-#ifndef WEBRTC_MODULES_VIDEO_CODING_GENERIC_DECODER_H_
-#define WEBRTC_MODULES_VIDEO_CODING_GENERIC_DECODER_H_
-
-#include "webrtc/modules/interface/module_common_types.h"
-#include "webrtc/modules/video_coding/codecs/interface/video_codec_interface.h"
-#include "webrtc/modules/video_coding/main/source/encoded_frame.h"
-#include "webrtc/modules/video_coding/main/source/timestamp_map.h"
-#include "webrtc/modules/video_coding/main/source/timing.h"
-
-namespace webrtc
-{
-
-class VCMReceiveCallback;
-
-enum { kDecoderFrameMemoryLength = 10 };
-
-struct VCMFrameInformation
-{
- int64_t renderTimeMs;
- int64_t decodeStartTimeMs;
- void* userData;
- VideoRotation rotation;
-};
-
-class VCMDecodedFrameCallback : public DecodedImageCallback
-{
-public:
- VCMDecodedFrameCallback(VCMTiming& timing, Clock* clock);
- virtual ~VCMDecodedFrameCallback();
- void SetUserReceiveCallback(VCMReceiveCallback* receiveCallback);
- VCMReceiveCallback* UserReceiveCallback();
-
- virtual int32_t Decoded(VideoFrame& decodedImage);
- virtual int32_t ReceivedDecodedReferenceFrame(const uint64_t pictureId);
- virtual int32_t ReceivedDecodedFrame(const uint64_t pictureId);
-
- uint64_t LastReceivedPictureID() const;
-
- void Map(uint32_t timestamp, VCMFrameInformation* frameInfo);
- int32_t Pop(uint32_t timestamp);
-
-private:
- // Protect |_receiveCallback| and |_timestampMap|.
- CriticalSectionWrapper* _critSect;
- Clock* _clock;
- VCMReceiveCallback* _receiveCallback; // Guarded by |_critSect|.
- VCMTiming& _timing;
- VCMTimestampMap _timestampMap; // Guarded by |_critSect|.
- uint64_t _lastReceivedPictureID;
-};
-
-
-class VCMGenericDecoder
-{
- friend class VCMCodecDataBase;
-public:
- VCMGenericDecoder(VideoDecoder& decoder, bool isExternal = false);
- ~VCMGenericDecoder();
-
- /**
- * Initialize the decoder with the information from the VideoCodec
- */
- int32_t InitDecode(const VideoCodec* settings,
- int32_t numberOfCores);
-
- /**
- * Decode to a raw I420 frame,
- *
- * inputVideoBuffer reference to encoded video frame
- */
- int32_t Decode(const VCMEncodedFrame& inputFrame, int64_t nowMs);
-
- /**
- * Free the decoder memory
- */
- int32_t Release();
-
- /**
- * Reset the decoder state, prepare for a new call
- */
- int32_t Reset();
-
- /**
- * Set decode callback. Deregistering while decoding is illegal.
- */
- int32_t RegisterDecodeCompleteCallback(VCMDecodedFrameCallback* callback);
-
- bool External() const;
-
-private:
- VCMDecodedFrameCallback* _callback;
- VCMFrameInformation _frameInfos[kDecoderFrameMemoryLength];
- uint32_t _nextFrameInfoIdx;
- VideoDecoder& _decoder;
- VideoCodecType _codecType;
- bool _isExternal;
- bool _keyFrameDecoded;
-};
-
-} // namespace webrtc
-
-#endif // WEBRTC_MODULES_VIDEO_CODING_GENERIC_DECODER_H_
diff --git a/webrtc/modules/video_coding/main/source/generic_encoder.h b/webrtc/modules/video_coding/main/source/generic_encoder.h
deleted file mode 100644
index 3a7132860f..0000000000
--- a/webrtc/modules/video_coding/main/source/generic_encoder.h
+++ /dev/null
@@ -1,142 +0,0 @@
-/*
- * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
- *
- * Use of this source code is governed by a BSD-style license
- * that can be found in the LICENSE file in the root of the source
- * tree. An additional intellectual property rights grant can be found
- * in the file PATENTS. All contributing project authors may
- * be found in the AUTHORS file in the root of the source tree.
- */
-
-#ifndef WEBRTC_MODULES_VIDEO_CODING_GENERIC_ENCODER_H_
-#define WEBRTC_MODULES_VIDEO_CODING_GENERIC_ENCODER_H_
-
-#include "webrtc/modules/video_coding/codecs/interface/video_codec_interface.h"
-#include "webrtc/modules/video_coding/main/interface/video_coding_defines.h"
-
-#include <stdio.h>
-
-#include "webrtc/base/criticalsection.h"
-#include "webrtc/base/scoped_ptr.h"
-
-namespace webrtc {
-class CriticalSectionWrapper;
-
-namespace media_optimization {
-class MediaOptimization;
-} // namespace media_optimization
-
-struct EncoderParameters {
- uint32_t target_bitrate;
- uint8_t loss_rate;
- int64_t rtt;
- uint32_t input_frame_rate;
-};
-
-/*************************************/
-/* VCMEncodeFrameCallback class */
-/***********************************/
-class VCMEncodedFrameCallback : public EncodedImageCallback
-{
-public:
- VCMEncodedFrameCallback(EncodedImageCallback* post_encode_callback);
- virtual ~VCMEncodedFrameCallback();
-
- /*
- * Callback implementation - codec encode complete
- */
- int32_t Encoded(
- const EncodedImage& encodedImage,
- const CodecSpecificInfo* codecSpecificInfo = NULL,
- const RTPFragmentationHeader* fragmentationHeader = NULL);
- /*
- * Callback implementation - generic encoder encode complete
- */
- int32_t SetTransportCallback(VCMPacketizationCallback* transport);
- /**
- * Set media Optimization
- */
- void SetMediaOpt (media_optimization::MediaOptimization* mediaOpt);
-
- void SetPayloadType(uint8_t payloadType) { _payloadType = payloadType; };
- void SetInternalSource(bool internalSource) { _internalSource = internalSource; };
-
- void SetRotation(VideoRotation rotation) { _rotation = rotation; }
-
-private:
- VCMPacketizationCallback* _sendCallback;
- media_optimization::MediaOptimization* _mediaOpt;
- uint8_t _payloadType;
- bool _internalSource;
- VideoRotation _rotation;
-
- EncodedImageCallback* post_encode_callback_;
-
-#ifdef DEBUG_ENCODER_BIT_STREAM
- FILE* _bitStreamAfterEncoder;
-#endif
-};// end of VCMEncodeFrameCallback class
-
-
-/******************************/
-/* VCMGenericEncoder class */
-/******************************/
-class VCMGenericEncoder
-{
- friend class VCMCodecDataBase;
-public:
- VCMGenericEncoder(VideoEncoder* encoder,
- VideoEncoderRateObserver* rate_observer,
- VCMEncodedFrameCallback* encoded_frame_callback,
- bool internalSource);
- ~VCMGenericEncoder();
- /**
- * Free encoder memory
- */
- int32_t Release();
- /**
- * Initialize the encoder with the information from the VideoCodec
- */
- int32_t InitEncode(const VideoCodec* settings,
- int32_t numberOfCores,
- size_t maxPayloadSize);
- /**
- * Encode raw image
- * inputFrame : Frame containing raw image
- * codecSpecificInfo : Specific codec data
- * cameraFrameRate : Request or information from the remote side
- * frameType : The requested frame type to encode
- */
- int32_t Encode(const VideoFrame& inputFrame,
- const CodecSpecificInfo* codecSpecificInfo,
- const std::vector<FrameType>& frameTypes);
-
- void SetEncoderParameters(const EncoderParameters& params);
- EncoderParameters GetEncoderParameters() const;
-
- int32_t SetPeriodicKeyFrames(bool enable);
-
- int32_t RequestFrame(const std::vector<FrameType>& frame_types);
-
- bool InternalSource() const;
-
- void OnDroppedFrame();
-
- bool SupportsNativeHandle() const;
-
- int GetTargetFramerate();
-
-private:
- VideoEncoder* const encoder_;
- VideoEncoderRateObserver* const rate_observer_;
- VCMEncodedFrameCallback* const vcm_encoded_frame_callback_;
- const bool internal_source_;
- mutable rtc::CriticalSection params_lock_;
- EncoderParameters encoder_params_ GUARDED_BY(params_lock_);
- VideoRotation rotation_;
- bool is_screenshare_;
-}; // end of VCMGenericEncoder class
-
-} // namespace webrtc
-
-#endif // WEBRTC_MODULES_VIDEO_CODING_GENERIC_ENCODER_H_
diff --git a/webrtc/modules/video_coding/main/source/inter_frame_delay.cc b/webrtc/modules/video_coding/main/source/inter_frame_delay.cc
deleted file mode 100644
index 4786917e16..0000000000
--- a/webrtc/modules/video_coding/main/source/inter_frame_delay.cc
+++ /dev/null
@@ -1,114 +0,0 @@
-/*
- * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
- *
- * Use of this source code is governed by a BSD-style license
- * that can be found in the LICENSE file in the root of the source
- * tree. An additional intellectual property rights grant can be found
- * in the file PATENTS. All contributing project authors may
- * be found in the AUTHORS file in the root of the source tree.
- */
-
-#include "webrtc/modules/video_coding/main/source/inter_frame_delay.h"
-
-namespace webrtc {
-
-VCMInterFrameDelay::VCMInterFrameDelay(int64_t currentWallClock)
-{
- Reset(currentWallClock);
-}
-
-// Resets the delay estimate
-void
-VCMInterFrameDelay::Reset(int64_t currentWallClock)
-{
- _zeroWallClock = currentWallClock;
- _wrapArounds = 0;
- _prevWallClock = 0;
- _prevTimestamp = 0;
- _dTS = 0;
-}
-
-// Calculates the delay of a frame with the given timestamp.
-// This method is called when the frame is complete.
-bool
-VCMInterFrameDelay::CalculateDelay(uint32_t timestamp,
- int64_t *delay,
- int64_t currentWallClock)
-{
- if (_prevWallClock == 0)
- {
- // First set of data, initialization, wait for next frame
- _prevWallClock = currentWallClock;
- _prevTimestamp = timestamp;
- *delay = 0;
- return true;
- }
-
- int32_t prevWrapArounds = _wrapArounds;
- CheckForWrapArounds(timestamp);
-
- // This will be -1 for backward wrap arounds and +1 for forward wrap arounds
- int32_t wrapAroundsSincePrev = _wrapArounds - prevWrapArounds;
-
- // Account for reordering in jitter variance estimate in the future?
- // Note that this also captures incomplete frames which are grabbed
- // for decoding after a later frame has been complete, i.e. real
- // packet losses.
- if ((wrapAroundsSincePrev == 0 && timestamp < _prevTimestamp) || wrapAroundsSincePrev < 0)
- {
- *delay = 0;
- return false;
- }
-
- // Compute the compensated timestamp difference and convert it to ms and
- // round it to closest integer.
- _dTS = static_cast<int64_t>((timestamp + wrapAroundsSincePrev *
- (static_cast<int64_t>(1)<<32) - _prevTimestamp) / 90.0 + 0.5);
-
- // frameDelay is the difference of dT and dTS -- i.e. the difference of
- // the wall clock time difference and the timestamp difference between
- // two following frames.
- *delay = static_cast<int64_t>(currentWallClock - _prevWallClock - _dTS);
-
- _prevTimestamp = timestamp;
- _prevWallClock = currentWallClock;
-
- return true;
-}
-
-// Returns the current difference between incoming timestamps
-uint32_t VCMInterFrameDelay::CurrentTimeStampDiffMs() const
-{
- if (_dTS < 0)
- {
- return 0;
- }
- return static_cast<uint32_t>(_dTS);
-}
-
-// Investigates if the timestamp clock has overflowed since the last timestamp and
-// keeps track of the number of wrap arounds since reset.
-void
-VCMInterFrameDelay::CheckForWrapArounds(uint32_t timestamp)
-{
- if (timestamp < _prevTimestamp)
- {
- // This difference will probably be less than -2^31 if we have had a wrap around
- // (e.g. timestamp = 1, _previousTimestamp = 2^32 - 1). Since it is cast to a Word32,
- // it should be positive.
- if (static_cast<int32_t>(timestamp - _prevTimestamp) > 0)
- {
- // Forward wrap around
- _wrapArounds++;
- }
- }
- // This difference will probably be less than -2^31 if we have had a backward wrap around.
- // Since it is cast to a Word32, it should be positive.
- else if (static_cast<int32_t>(_prevTimestamp - timestamp) > 0)
- {
- // Backward wrap around
- _wrapArounds--;
- }
-}
-
-}
diff --git a/webrtc/modules/video_coding/main/source/inter_frame_delay.h b/webrtc/modules/video_coding/main/source/inter_frame_delay.h
deleted file mode 100644
index 58b326ae96..0000000000
--- a/webrtc/modules/video_coding/main/source/inter_frame_delay.h
+++ /dev/null
@@ -1,66 +0,0 @@
-/*
- * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
- *
- * Use of this source code is governed by a BSD-style license
- * that can be found in the LICENSE file in the root of the source
- * tree. An additional intellectual property rights grant can be found
- * in the file PATENTS. All contributing project authors may
- * be found in the AUTHORS file in the root of the source tree.
- */
-
-#ifndef WEBRTC_MODULES_VIDEO_CODING_INTER_FRAME_DELAY_H_
-#define WEBRTC_MODULES_VIDEO_CODING_INTER_FRAME_DELAY_H_
-
-#include "webrtc/typedefs.h"
-
-namespace webrtc
-{
-
-class VCMInterFrameDelay
-{
-public:
- VCMInterFrameDelay(int64_t currentWallClock);
-
- // Resets the estimate. Zeros are given as parameters.
- void Reset(int64_t currentWallClock);
-
- // Calculates the delay of a frame with the given timestamp.
- // This method is called when the frame is complete.
- //
- // Input:
- // - timestamp : RTP timestamp of a received frame
- // - *delay : Pointer to memory where the result should be stored
- // - currentWallClock : The current time in milliseconds.
- // Should be -1 for normal operation, only used for testing.
- // Return value : true if OK, false when reordered timestamps
- bool CalculateDelay(uint32_t timestamp,
- int64_t *delay,
- int64_t currentWallClock);
-
- // Returns the current difference between incoming timestamps
- //
- // Return value : Wrap-around compensated difference between incoming
- // timestamps.
- uint32_t CurrentTimeStampDiffMs() const;
-
-private:
- // Controls if the RTP timestamp counter has had a wrap around
- // between the current and the previously received frame.
- //
- // Input:
- // - timestmap : RTP timestamp of the current frame.
- void CheckForWrapArounds(uint32_t timestamp);
-
- int64_t _zeroWallClock; // Local timestamp of the first video packet received
- int32_t _wrapArounds; // Number of wrapArounds detected
- // The previous timestamp passed to the delay estimate
- uint32_t _prevTimestamp;
- // The previous wall clock timestamp used by the delay estimate
- int64_t _prevWallClock;
- // Wrap-around compensated difference between incoming timestamps
- int64_t _dTS;
-};
-
-} // namespace webrtc
-
-#endif // WEBRTC_MODULES_VIDEO_CODING_INTER_FRAME_DELAY_H_
diff --git a/webrtc/modules/video_coding/main/source/internal_defines.h b/webrtc/modules/video_coding/main/source/internal_defines.h
deleted file mode 100644
index adc940f20d..0000000000
--- a/webrtc/modules/video_coding/main/source/internal_defines.h
+++ /dev/null
@@ -1,68 +0,0 @@
-/*
- * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
- *
- * Use of this source code is governed by a BSD-style license
- * that can be found in the LICENSE file in the root of the source
- * tree. An additional intellectual property rights grant can be found
- * in the file PATENTS. All contributing project authors may
- * be found in the AUTHORS file in the root of the source tree.
- */
-
-#ifndef WEBRTC_MODULES_VIDEO_CODING_SOURCE_INTERNAL_DEFINES_H_
-#define WEBRTC_MODULES_VIDEO_CODING_SOURCE_INTERNAL_DEFINES_H_
-
-#include "webrtc/typedefs.h"
-
-namespace webrtc
-{
-
-#define MASK_32_BITS(x) (0xFFFFFFFF & (x))
-
-inline uint32_t MaskWord64ToUWord32(int64_t w64)
-{
- return static_cast<uint32_t>(MASK_32_BITS(w64));
-}
-
-#define VCM_MAX(a, b) (((a) > (b)) ? (a) : (b))
-#define VCM_MIN(a, b) (((a) < (b)) ? (a) : (b))
-
-#define VCM_DEFAULT_CODEC_WIDTH 352
-#define VCM_DEFAULT_CODEC_HEIGHT 288
-#define VCM_DEFAULT_FRAME_RATE 30
-#define VCM_MIN_BITRATE 30
-#define VCM_FLUSH_INDICATOR 4
-
-// Helper macros for creating the static codec list
-#define VCM_NO_CODEC_IDX -1
-#ifdef VIDEOCODEC_VP8
- #define VCM_VP8_IDX (VCM_NO_CODEC_IDX + 1)
-#else
- #define VCM_VP8_IDX VCM_NO_CODEC_IDX
-#endif
-#ifdef VIDEOCODEC_VP9
- #define VCM_VP9_IDX (VCM_VP8_IDX + 1)
-#else
- #define VCM_VP9_IDX VCM_VP8_IDX
-#endif
-#ifdef VIDEOCODEC_H264
- #define VCM_H264_IDX (VCM_VP9_IDX + 1)
-#else
- #define VCM_H264_IDX VCM_VP9_IDX
-#endif
-#ifdef VIDEOCODEC_I420
- #define VCM_I420_IDX (VCM_H264_IDX + 1)
-#else
- #define VCM_I420_IDX VCM_H264_IDX
-#endif
-#define VCM_NUM_VIDEO_CODECS_AVAILABLE (VCM_I420_IDX + 1)
-
-#define VCM_NO_RECEIVER_ID 0
-
-inline int32_t VCMId(const int32_t vcmId, const int32_t receiverId = 0)
-{
- return static_cast<int32_t>((vcmId << 16) + receiverId);
-}
-
-} // namespace webrtc
-
-#endif // WEBRTC_MODULES_VIDEO_CODING_SOURCE_INTERNAL_DEFINES_H_
diff --git a/webrtc/modules/video_coding/main/source/jitter_estimator.cc b/webrtc/modules/video_coding/main/source/jitter_estimator.cc
deleted file mode 100644
index 5894c88d72..0000000000
--- a/webrtc/modules/video_coding/main/source/jitter_estimator.cc
+++ /dev/null
@@ -1,482 +0,0 @@
-/*
- * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
- *
- * Use of this source code is governed by a BSD-style license
- * that can be found in the LICENSE file in the root of the source
- * tree. An additional intellectual property rights grant can be found
- * in the file PATENTS. All contributing project authors may
- * be found in the AUTHORS file in the root of the source tree.
- */
-
-#include "webrtc/modules/video_coding/main/source/internal_defines.h"
-#include "webrtc/modules/video_coding/main/source/jitter_estimator.h"
-#include "webrtc/modules/video_coding/main/source/rtt_filter.h"
-#include "webrtc/system_wrappers/include/clock.h"
-#include "webrtc/system_wrappers/include/field_trial.h"
-
-#include <assert.h>
-#include <math.h>
-#include <stdlib.h>
-#include <string.h>
-
-namespace webrtc {
-
-enum { kStartupDelaySamples = 30 };
-enum { kFsAccuStartupSamples = 5 };
-enum { kMaxFramerateEstimate = 200 };
-
-VCMJitterEstimator::VCMJitterEstimator(const Clock* clock,
- int32_t vcmId,
- int32_t receiverId)
- : _vcmId(vcmId),
- _receiverId(receiverId),
- _phi(0.97),
- _psi(0.9999),
- _alphaCountMax(400),
- _thetaLow(0.000001),
- _nackLimit(3),
- _numStdDevDelayOutlier(15),
- _numStdDevFrameSizeOutlier(3),
- _noiseStdDevs(2.33), // ~Less than 1% chance
- // (look up in normal distribution table)...
- _noiseStdDevOffset(30.0), // ...of getting 30 ms freezes
- _rttFilter(),
- fps_counter_(30), // TODO(sprang): Use an estimator with limit based on
- // time, rather than number of samples.
- low_rate_experiment_(kInit),
- clock_(clock) {
- Reset();
-}
-
-VCMJitterEstimator::~VCMJitterEstimator() {
-}
-
-VCMJitterEstimator&
-VCMJitterEstimator::operator=(const VCMJitterEstimator& rhs)
-{
- if (this != &rhs)
- {
- memcpy(_thetaCov, rhs._thetaCov, sizeof(_thetaCov));
- memcpy(_Qcov, rhs._Qcov, sizeof(_Qcov));
-
- _vcmId = rhs._vcmId;
- _receiverId = rhs._receiverId;
- _avgFrameSize = rhs._avgFrameSize;
- _varFrameSize = rhs._varFrameSize;
- _maxFrameSize = rhs._maxFrameSize;
- _fsSum = rhs._fsSum;
- _fsCount = rhs._fsCount;
- _lastUpdateT = rhs._lastUpdateT;
- _prevEstimate = rhs._prevEstimate;
- _prevFrameSize = rhs._prevFrameSize;
- _avgNoise = rhs._avgNoise;
- _alphaCount = rhs._alphaCount;
- _filterJitterEstimate = rhs._filterJitterEstimate;
- _startupCount = rhs._startupCount;
- _latestNackTimestamp = rhs._latestNackTimestamp;
- _nackCount = rhs._nackCount;
- _rttFilter = rhs._rttFilter;
- }
- return *this;
-}
-
-// Resets the JitterEstimate
-void
-VCMJitterEstimator::Reset()
-{
- _theta[0] = 1/(512e3/8);
- _theta[1] = 0;
- _varNoise = 4.0;
-
- _thetaCov[0][0] = 1e-4;
- _thetaCov[1][1] = 1e2;
- _thetaCov[0][1] = _thetaCov[1][0] = 0;
- _Qcov[0][0] = 2.5e-10;
- _Qcov[1][1] = 1e-10;
- _Qcov[0][1] = _Qcov[1][0] = 0;
- _avgFrameSize = 500;
- _maxFrameSize = 500;
- _varFrameSize = 100;
- _lastUpdateT = -1;
- _prevEstimate = -1.0;
- _prevFrameSize = 0;
- _avgNoise = 0.0;
- _alphaCount = 1;
- _filterJitterEstimate = 0.0;
- _latestNackTimestamp = 0;
- _nackCount = 0;
- _fsSum = 0;
- _fsCount = 0;
- _startupCount = 0;
- _rttFilter.Reset();
- fps_counter_.Reset();
-}
-
-void
-VCMJitterEstimator::ResetNackCount()
-{
- _nackCount = 0;
-}
-
-// Updates the estimates with the new measurements
-void
-VCMJitterEstimator::UpdateEstimate(int64_t frameDelayMS, uint32_t frameSizeBytes,
- bool incompleteFrame /* = false */)
-{
- if (frameSizeBytes == 0)
- {
- return;
- }
- int deltaFS = frameSizeBytes - _prevFrameSize;
- if (_fsCount < kFsAccuStartupSamples)
- {
- _fsSum += frameSizeBytes;
- _fsCount++;
- }
- else if (_fsCount == kFsAccuStartupSamples)
- {
- // Give the frame size filter
- _avgFrameSize = static_cast<double>(_fsSum) /
- static_cast<double>(_fsCount);
- _fsCount++;
- }
- if (!incompleteFrame || frameSizeBytes > _avgFrameSize)
- {
- double avgFrameSize = _phi * _avgFrameSize +
- (1 - _phi) * frameSizeBytes;
- if (frameSizeBytes < _avgFrameSize + 2 * sqrt(_varFrameSize))
- {
- // Only update the average frame size if this sample wasn't a
- // key frame
- _avgFrameSize = avgFrameSize;
- }
- // Update the variance anyway since we want to capture cases where we only get
- // key frames.
- _varFrameSize = VCM_MAX(_phi * _varFrameSize + (1 - _phi) *
- (frameSizeBytes - avgFrameSize) *
- (frameSizeBytes - avgFrameSize), 1.0);
- }
-
- // Update max frameSize estimate
- _maxFrameSize = VCM_MAX(_psi * _maxFrameSize, static_cast<double>(frameSizeBytes));
-
- if (_prevFrameSize == 0)
- {
- _prevFrameSize = frameSizeBytes;
- return;
- }
- _prevFrameSize = frameSizeBytes;
-
- // Only update the Kalman filter if the sample is not considered
- // an extreme outlier. Even if it is an extreme outlier from a
- // delay point of view, if the frame size also is large the
- // deviation is probably due to an incorrect line slope.
- double deviation = DeviationFromExpectedDelay(frameDelayMS, deltaFS);
-
- if (fabs(deviation) < _numStdDevDelayOutlier * sqrt(_varNoise) ||
- frameSizeBytes > _avgFrameSize + _numStdDevFrameSizeOutlier * sqrt(_varFrameSize))
- {
- // Update the variance of the deviation from the
- // line given by the Kalman filter
- EstimateRandomJitter(deviation, incompleteFrame);
- // Prevent updating with frames which have been congested by a large
- // frame, and therefore arrives almost at the same time as that frame.
- // This can occur when we receive a large frame (key frame) which
- // has been delayed. The next frame is of normal size (delta frame),
- // and thus deltaFS will be << 0. This removes all frame samples
- // which arrives after a key frame.
- if ((!incompleteFrame || deviation >= 0.0) &&
- static_cast<double>(deltaFS) > - 0.25 * _maxFrameSize)
- {
- // Update the Kalman filter with the new data
- KalmanEstimateChannel(frameDelayMS, deltaFS);
- }
- }
- else
- {
- int nStdDev = (deviation >= 0) ? _numStdDevDelayOutlier : -_numStdDevDelayOutlier;
- EstimateRandomJitter(nStdDev * sqrt(_varNoise), incompleteFrame);
- }
- // Post process the total estimated jitter
- if (_startupCount >= kStartupDelaySamples)
- {
- PostProcessEstimate();
- }
- else
- {
- _startupCount++;
- }
-}
-
-// Updates the nack/packet ratio
-void
-VCMJitterEstimator::FrameNacked()
-{
- // Wait until _nackLimit retransmissions has been received,
- // then always add ~1 RTT delay.
- // TODO(holmer): Should we ever remove the additional delay if the
- // the packet losses seem to have stopped? We could for instance scale
- // the number of RTTs to add with the amount of retransmissions in a given
- // time interval, or similar.
- if (_nackCount < _nackLimit)
- {
- _nackCount++;
- }
-}
-
-// Updates Kalman estimate of the channel
-// The caller is expected to sanity check the inputs.
-void
-VCMJitterEstimator::KalmanEstimateChannel(int64_t frameDelayMS,
- int32_t deltaFSBytes)
-{
- double Mh[2];
- double hMh_sigma;
- double kalmanGain[2];
- double measureRes;
- double t00, t01;
-
- // Kalman filtering
-
- // Prediction
- // M = M + Q
- _thetaCov[0][0] += _Qcov[0][0];
- _thetaCov[0][1] += _Qcov[0][1];
- _thetaCov[1][0] += _Qcov[1][0];
- _thetaCov[1][1] += _Qcov[1][1];
-
- // Kalman gain
- // K = M*h'/(sigma2n + h*M*h') = M*h'/(1 + h*M*h')
- // h = [dFS 1]
- // Mh = M*h'
- // hMh_sigma = h*M*h' + R
- Mh[0] = _thetaCov[0][0] * deltaFSBytes + _thetaCov[0][1];
- Mh[1] = _thetaCov[1][0] * deltaFSBytes + _thetaCov[1][1];
- // sigma weights measurements with a small deltaFS as noisy and
- // measurements with large deltaFS as good
- if (_maxFrameSize < 1.0)
- {
- return;
- }
- double sigma = (300.0 * exp(-fabs(static_cast<double>(deltaFSBytes)) /
- (1e0 * _maxFrameSize)) + 1) * sqrt(_varNoise);
- if (sigma < 1.0)
- {
- sigma = 1.0;
- }
- hMh_sigma = deltaFSBytes * Mh[0] + Mh[1] + sigma;
- if ((hMh_sigma < 1e-9 && hMh_sigma >= 0) || (hMh_sigma > -1e-9 && hMh_sigma <= 0))
- {
- assert(false);
- return;
- }
- kalmanGain[0] = Mh[0] / hMh_sigma;
- kalmanGain[1] = Mh[1] / hMh_sigma;
-
- // Correction
- // theta = theta + K*(dT - h*theta)
- measureRes = frameDelayMS - (deltaFSBytes * _theta[0] + _theta[1]);
- _theta[0] += kalmanGain[0] * measureRes;
- _theta[1] += kalmanGain[1] * measureRes;
-
- if (_theta[0] < _thetaLow)
- {
- _theta[0] = _thetaLow;
- }
-
- // M = (I - K*h)*M
- t00 = _thetaCov[0][0];
- t01 = _thetaCov[0][1];
- _thetaCov[0][0] = (1 - kalmanGain[0] * deltaFSBytes) * t00 -
- kalmanGain[0] * _thetaCov[1][0];
- _thetaCov[0][1] = (1 - kalmanGain[0] * deltaFSBytes) * t01 -
- kalmanGain[0] * _thetaCov[1][1];
- _thetaCov[1][0] = _thetaCov[1][0] * (1 - kalmanGain[1]) -
- kalmanGain[1] * deltaFSBytes * t00;
- _thetaCov[1][1] = _thetaCov[1][1] * (1 - kalmanGain[1]) -
- kalmanGain[1] * deltaFSBytes * t01;
-
- // Covariance matrix, must be positive semi-definite
- assert(_thetaCov[0][0] + _thetaCov[1][1] >= 0 &&
- _thetaCov[0][0] * _thetaCov[1][1] - _thetaCov[0][1] * _thetaCov[1][0] >= 0 &&
- _thetaCov[0][0] >= 0);
-}
-
-// Calculate difference in delay between a sample and the
-// expected delay estimated by the Kalman filter
-double
-VCMJitterEstimator::DeviationFromExpectedDelay(int64_t frameDelayMS,
- int32_t deltaFSBytes) const
-{
- return frameDelayMS - (_theta[0] * deltaFSBytes + _theta[1]);
-}
-
-// Estimates the random jitter by calculating the variance of the
-// sample distance from the line given by theta.
-void VCMJitterEstimator::EstimateRandomJitter(double d_dT,
- bool incompleteFrame) {
- uint64_t now = clock_->TimeInMicroseconds();
- if (_lastUpdateT != -1) {
- fps_counter_.AddSample(now - _lastUpdateT);
- }
- _lastUpdateT = now;
-
- if (_alphaCount == 0) {
- assert(false);
- return;
- }
- double alpha =
- static_cast<double>(_alphaCount - 1) / static_cast<double>(_alphaCount);
- _alphaCount++;
- if (_alphaCount > _alphaCountMax)
- _alphaCount = _alphaCountMax;
-
- if (LowRateExperimentEnabled()) {
- // In order to avoid a low frame rate stream to react slower to changes,
- // scale the alpha weight relative a 30 fps stream.
- double fps = GetFrameRate();
- if (fps > 0.0) {
- double rate_scale = 30.0 / fps;
- // At startup, there can be a lot of noise in the fps estimate.
- // Interpolate rate_scale linearly, from 1.0 at sample #1, to 30.0 / fps
- // at sample #kStartupDelaySamples.
- if (_alphaCount < kStartupDelaySamples) {
- rate_scale =
- (_alphaCount * rate_scale + (kStartupDelaySamples - _alphaCount)) /
- kStartupDelaySamples;
- }
- alpha = pow(alpha, rate_scale);
- }
- }
-
- double avgNoise = alpha * _avgNoise + (1 - alpha) * d_dT;
- double varNoise =
- alpha * _varNoise + (1 - alpha) * (d_dT - _avgNoise) * (d_dT - _avgNoise);
- if (!incompleteFrame || varNoise > _varNoise) {
- _avgNoise = avgNoise;
- _varNoise = varNoise;
- }
- if (_varNoise < 1.0) {
- // The variance should never be zero, since we might get
- // stuck and consider all samples as outliers.
- _varNoise = 1.0;
- }
-}
-
-double
-VCMJitterEstimator::NoiseThreshold() const
-{
- double noiseThreshold = _noiseStdDevs * sqrt(_varNoise) - _noiseStdDevOffset;
- if (noiseThreshold < 1.0)
- {
- noiseThreshold = 1.0;
- }
- return noiseThreshold;
-}
-
-// Calculates the current jitter estimate from the filtered estimates
-double
-VCMJitterEstimator::CalculateEstimate()
-{
- double ret = _theta[0] * (_maxFrameSize - _avgFrameSize) + NoiseThreshold();
-
- // A very low estimate (or negative) is neglected
- if (ret < 1.0) {
- if (_prevEstimate <= 0.01)
- {
- ret = 1.0;
- }
- else
- {
- ret = _prevEstimate;
- }
- }
- if (ret > 10000.0) // Sanity
- {
- ret = 10000.0;
- }
- _prevEstimate = ret;
- return ret;
-}
-
-void
-VCMJitterEstimator::PostProcessEstimate()
-{
- _filterJitterEstimate = CalculateEstimate();
-}
-
-void
-VCMJitterEstimator::UpdateRtt(int64_t rttMs)
-{
- _rttFilter.Update(rttMs);
-}
-
-void
-VCMJitterEstimator::UpdateMaxFrameSize(uint32_t frameSizeBytes)
-{
- if (_maxFrameSize < frameSizeBytes)
- {
- _maxFrameSize = frameSizeBytes;
- }
-}
-
-// Returns the current filtered estimate if available,
-// otherwise tries to calculate an estimate.
-int VCMJitterEstimator::GetJitterEstimate(double rttMultiplier) {
- double jitterMS = CalculateEstimate() + OPERATING_SYSTEM_JITTER;
- if (_filterJitterEstimate > jitterMS)
- jitterMS = _filterJitterEstimate;
- if (_nackCount >= _nackLimit)
- jitterMS += _rttFilter.RttMs() * rttMultiplier;
-
- if (LowRateExperimentEnabled()) {
- static const double kJitterScaleLowThreshold = 5.0;
- static const double kJitterScaleHighThreshold = 10.0;
- double fps = GetFrameRate();
- // Ignore jitter for very low fps streams.
- if (fps < kJitterScaleLowThreshold) {
- if (fps == 0.0) {
- return jitterMS;
- }
- return 0;
- }
-
- // Semi-low frame rate; scale by factor linearly interpolated from 0.0 at
- // kJitterScaleLowThreshold to 1.0 at kJitterScaleHighThreshold.
- if (fps < kJitterScaleHighThreshold) {
- jitterMS =
- (1.0 / (kJitterScaleHighThreshold - kJitterScaleLowThreshold)) *
- (fps - kJitterScaleLowThreshold) * jitterMS;
- }
- }
-
- return static_cast<uint32_t>(jitterMS + 0.5);
-}
-
-bool VCMJitterEstimator::LowRateExperimentEnabled() {
- if (low_rate_experiment_ == kInit) {
- std::string group =
- webrtc::field_trial::FindFullName("WebRTC-ReducedJitterDelay");
- if (group == "Disabled") {
- low_rate_experiment_ = kDisabled;
- } else {
- low_rate_experiment_ = kEnabled;
- }
- }
- return low_rate_experiment_ == kEnabled ? true : false;
-}
-
-double VCMJitterEstimator::GetFrameRate() const {
- if (fps_counter_.count() == 0)
- return 0;
-
- double fps = 1000000.0 / fps_counter_.ComputeMean();
- // Sanity check.
- assert(fps >= 0.0);
- if (fps > kMaxFramerateEstimate) {
- fps = kMaxFramerateEstimate;
- }
- return fps;
-}
-
-}
diff --git a/webrtc/modules/video_coding/main/source/jitter_estimator.h b/webrtc/modules/video_coding/main/source/jitter_estimator.h
deleted file mode 100644
index 46ed67ba1d..0000000000
--- a/webrtc/modules/video_coding/main/source/jitter_estimator.h
+++ /dev/null
@@ -1,165 +0,0 @@
-/*
- * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
- *
- * Use of this source code is governed by a BSD-style license
- * that can be found in the LICENSE file in the root of the source
- * tree. An additional intellectual property rights grant can be found
- * in the file PATENTS. All contributing project authors may
- * be found in the AUTHORS file in the root of the source tree.
- */
-
-#ifndef WEBRTC_MODULES_VIDEO_CODING_JITTER_ESTIMATOR_H_
-#define WEBRTC_MODULES_VIDEO_CODING_JITTER_ESTIMATOR_H_
-
-#include "webrtc/base/rollingaccumulator.h"
-#include "webrtc/modules/video_coding/main/source/rtt_filter.h"
-#include "webrtc/typedefs.h"
-
-namespace webrtc
-{
-
-class Clock;
-
-class VCMJitterEstimator
-{
-public:
- VCMJitterEstimator(const Clock* clock,
- int32_t vcmId = 0,
- int32_t receiverId = 0);
- virtual ~VCMJitterEstimator();
- VCMJitterEstimator& operator=(const VCMJitterEstimator& rhs);
-
- // Resets the estimate to the initial state
- void Reset();
- void ResetNackCount();
-
- // Updates the jitter estimate with the new data.
- //
- // Input:
- // - frameDelay : Delay-delta calculated by UTILDelayEstimate in milliseconds
- // - frameSize : Frame size of the current frame.
- // - incompleteFrame : Flags if the frame is used to update the estimate before it
- // was complete. Default is false.
- void UpdateEstimate(int64_t frameDelayMS,
- uint32_t frameSizeBytes,
- bool incompleteFrame = false);
-
- // Returns the current jitter estimate in milliseconds and adds
- // also adds an RTT dependent term in cases of retransmission.
- // Input:
- // - rttMultiplier : RTT param multiplier (when applicable).
- //
- // Return value : Jitter estimate in milliseconds
- int GetJitterEstimate(double rttMultiplier);
-
- // Updates the nack counter.
- void FrameNacked();
-
- // Updates the RTT filter.
- //
- // Input:
- // - rttMs : RTT in ms
- void UpdateRtt(int64_t rttMs);
-
- void UpdateMaxFrameSize(uint32_t frameSizeBytes);
-
- // A constant describing the delay from the jitter buffer
- // to the delay on the receiving side which is not accounted
- // for by the jitter buffer nor the decoding delay estimate.
- static const uint32_t OPERATING_SYSTEM_JITTER = 10;
-
-protected:
- // These are protected for better testing possibilities
- double _theta[2]; // Estimated line parameters (slope, offset)
- double _varNoise; // Variance of the time-deviation from the line
-
- virtual bool LowRateExperimentEnabled();
-
-private:
- // Updates the Kalman filter for the line describing
- // the frame size dependent jitter.
- //
- // Input:
- // - frameDelayMS : Delay-delta calculated by UTILDelayEstimate in milliseconds
- // - deltaFSBytes : Frame size delta, i.e.
- // : frame size at time T minus frame size at time T-1
- void KalmanEstimateChannel(int64_t frameDelayMS, int32_t deltaFSBytes);
-
- // Updates the random jitter estimate, i.e. the variance
- // of the time deviations from the line given by the Kalman filter.
- //
- // Input:
- // - d_dT : The deviation from the kalman estimate
- // - incompleteFrame : True if the frame used to update the estimate
- // with was incomplete
- void EstimateRandomJitter(double d_dT, bool incompleteFrame);
-
- double NoiseThreshold() const;
-
- // Calculates the current jitter estimate.
- //
- // Return value : The current jitter estimate in milliseconds
- double CalculateEstimate();
-
- // Post process the calculated estimate
- void PostProcessEstimate();
-
- // Calculates the difference in delay between a sample and the
- // expected delay estimated by the Kalman filter.
- //
- // Input:
- // - frameDelayMS : Delay-delta calculated by UTILDelayEstimate in milliseconds
- // - deltaFS : Frame size delta, i.e. frame size at time
- // T minus frame size at time T-1
- //
- // Return value : The difference in milliseconds
- double DeviationFromExpectedDelay(int64_t frameDelayMS,
- int32_t deltaFSBytes) const;
-
- double GetFrameRate() const;
-
- // Constants, filter parameters
- int32_t _vcmId;
- int32_t _receiverId;
- const double _phi;
- const double _psi;
- const uint32_t _alphaCountMax;
- const double _thetaLow;
- const uint32_t _nackLimit;
- const int32_t _numStdDevDelayOutlier;
- const int32_t _numStdDevFrameSizeOutlier;
- const double _noiseStdDevs;
- const double _noiseStdDevOffset;
-
- double _thetaCov[2][2]; // Estimate covariance
- double _Qcov[2][2]; // Process noise covariance
- double _avgFrameSize; // Average frame size
- double _varFrameSize; // Frame size variance
- double _maxFrameSize; // Largest frame size received (descending
- // with a factor _psi)
- uint32_t _fsSum;
- uint32_t _fsCount;
-
- int64_t _lastUpdateT;
- double _prevEstimate; // The previously returned jitter estimate
- uint32_t _prevFrameSize; // Frame size of the previous frame
- double _avgNoise; // Average of the random jitter
- uint32_t _alphaCount;
- double _filterJitterEstimate; // The filtered sum of jitter estimates
-
- uint32_t _startupCount;
-
- int64_t _latestNackTimestamp; // Timestamp in ms when the latest nack was seen
- uint32_t _nackCount; // Keeps track of the number of nacks received,
- // but never goes above _nackLimit
- VCMRttFilter _rttFilter;
-
- rtc::RollingAccumulator<uint64_t> fps_counter_;
- enum ExperimentFlag { kInit, kEnabled, kDisabled };
- ExperimentFlag low_rate_experiment_;
- const Clock* clock_;
-};
-
-} // namespace webrtc
-
-#endif // WEBRTC_MODULES_VIDEO_CODING_JITTER_ESTIMATOR_H_
diff --git a/webrtc/modules/video_coding/main/source/media_opt_util.cc b/webrtc/modules/video_coding/main/source/media_opt_util.cc
deleted file mode 100644
index 51decbed97..0000000000
--- a/webrtc/modules/video_coding/main/source/media_opt_util.cc
+++ /dev/null
@@ -1,774 +0,0 @@
-/*
- * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
- *
- * Use of this source code is governed by a BSD-style license
- * that can be found in the LICENSE file in the root of the source
- * tree. An additional intellectual property rights grant can be found
- * in the file PATENTS. All contributing project authors may
- * be found in the AUTHORS file in the root of the source tree.
- */
-
-#include "webrtc/modules/video_coding/main/source/media_opt_util.h"
-
-#include <algorithm>
-#include <float.h>
-#include <limits.h>
-#include <math.h>
-
-#include "webrtc/modules/interface/module_common_types.h"
-#include "webrtc/modules/video_coding/codecs/vp8/include/vp8_common_types.h"
-#include "webrtc/modules/video_coding/main/interface/video_coding_defines.h"
-#include "webrtc/modules/video_coding/main/source/fec_tables_xor.h"
-#include "webrtc/modules/video_coding/main/source/nack_fec_tables.h"
-
-namespace webrtc {
-// Max value of loss rates in off-line model
-static const int kPacketLossMax = 129;
-
-namespace media_optimization {
-
-VCMProtectionMethod::VCMProtectionMethod()
- : _effectivePacketLoss(0),
- _protectionFactorK(0),
- _protectionFactorD(0),
- _scaleProtKey(2.0f),
- _maxPayloadSize(1460),
- _qmRobustness(new VCMQmRobustness()),
- _useUepProtectionK(false),
- _useUepProtectionD(true),
- _corrFecCost(1.0),
- _type(kNone) {
-}
-
-VCMProtectionMethod::~VCMProtectionMethod()
-{
- delete _qmRobustness;
-}
-void
-VCMProtectionMethod::UpdateContentMetrics(const
- VideoContentMetrics* contentMetrics)
-{
- _qmRobustness->UpdateContent(contentMetrics);
-}
-
-VCMNackFecMethod::VCMNackFecMethod(int64_t lowRttNackThresholdMs,
- int64_t highRttNackThresholdMs)
- : VCMFecMethod(),
- _lowRttNackMs(lowRttNackThresholdMs),
- _highRttNackMs(highRttNackThresholdMs),
- _maxFramesFec(1) {
- assert(lowRttNackThresholdMs >= -1 && highRttNackThresholdMs >= -1);
- assert(highRttNackThresholdMs == -1 ||
- lowRttNackThresholdMs <= highRttNackThresholdMs);
- assert(lowRttNackThresholdMs > -1 || highRttNackThresholdMs == -1);
- _type = kNackFec;
-}
-
-VCMNackFecMethod::~VCMNackFecMethod()
-{
- //
-}
-bool
-VCMNackFecMethod::ProtectionFactor(const VCMProtectionParameters* parameters)
-{
- // Hybrid Nack FEC has three operational modes:
- // 1. Low RTT (below kLowRttNackMs) - Nack only: Set FEC rate
- // (_protectionFactorD) to zero. -1 means no FEC.
- // 2. High RTT (above _highRttNackMs) - FEC Only: Keep FEC factors.
- // -1 means always allow NACK.
- // 3. Medium RTT values - Hybrid mode: We will only nack the
- // residual following the decoding of the FEC (refer to JB logic). FEC
- // delta protection factor will be adjusted based on the RTT.
-
- // Otherwise: we count on FEC; if the RTT is below a threshold, then we
- // nack the residual, based on a decision made in the JB.
-
- // Compute the protection factors
- VCMFecMethod::ProtectionFactor(parameters);
- if (_lowRttNackMs == -1 || parameters->rtt < _lowRttNackMs)
- {
- _protectionFactorD = 0;
- VCMFecMethod::UpdateProtectionFactorD(_protectionFactorD);
- }
-
- // When in Hybrid mode (RTT range), adjust FEC rates based on the
- // RTT (NACK effectiveness) - adjustment factor is in the range [0,1].
- else if (_highRttNackMs == -1 || parameters->rtt < _highRttNackMs)
- {
- // TODO(mikhal): Disabling adjustment temporarily.
- // uint16_t rttIndex = (uint16_t) parameters->rtt;
- float adjustRtt = 1.0f;// (float)VCMNackFecTable[rttIndex] / 100.0f;
-
- // Adjust FEC with NACK on (for delta frame only)
- // table depends on RTT relative to rttMax (NACK Threshold)
- _protectionFactorD = static_cast<uint8_t>
- (adjustRtt *
- static_cast<float>(_protectionFactorD));
- // update FEC rates after applying adjustment
- VCMFecMethod::UpdateProtectionFactorD(_protectionFactorD);
- }
-
- return true;
-}
-
-int VCMNackFecMethod::ComputeMaxFramesFec(
- const VCMProtectionParameters* parameters) {
- if (parameters->numLayers > 2) {
- // For more than 2 temporal layers we will only have FEC on the base layer,
- // and the base layers will be pretty far apart. Therefore we force one
- // frame FEC.
- return 1;
- }
- // We set the max number of frames to base the FEC on so that on average
- // we will have complete frames in one RTT. Note that this is an upper
- // bound, and that the actual number of frames used for FEC is decided by the
- // RTP module based on the actual number of packets and the protection factor.
- float base_layer_framerate = parameters->frameRate /
- static_cast<float>(1 << (parameters->numLayers - 1));
- int max_frames_fec = std::max(static_cast<int>(
- 2.0f * base_layer_framerate * parameters->rtt /
- 1000.0f + 0.5f), 1);
- // |kUpperLimitFramesFec| is the upper limit on how many frames we
- // allow any FEC to be based on.
- if (max_frames_fec > kUpperLimitFramesFec) {
- max_frames_fec = kUpperLimitFramesFec;
- }
- return max_frames_fec;
-}
-
-int VCMNackFecMethod::MaxFramesFec() const {
- return _maxFramesFec;
-}
-
-bool VCMNackFecMethod::BitRateTooLowForFec(
- const VCMProtectionParameters* parameters) {
- // Bitrate below which we turn off FEC, regardless of reported packet loss.
- // The condition should depend on resolution and content. For now, use
- // threshold on bytes per frame, with some effect for the frame size.
- // The condition for turning off FEC is also based on other factors,
- // such as |_numLayers|, |_maxFramesFec|, and |_rtt|.
- int estimate_bytes_per_frame = 1000 * BitsPerFrame(parameters) / 8;
- int max_bytes_per_frame = kMaxBytesPerFrameForFec;
- int num_pixels = parameters->codecWidth * parameters->codecHeight;
- if (num_pixels <= 352 * 288) {
- max_bytes_per_frame = kMaxBytesPerFrameForFecLow;
- } else if (num_pixels > 640 * 480) {
- max_bytes_per_frame = kMaxBytesPerFrameForFecHigh;
- }
- // TODO (marpan): add condition based on maximum frames used for FEC,
- // and expand condition based on frame size.
- // Max round trip time threshold in ms.
- const int64_t kMaxRttTurnOffFec = 200;
- if (estimate_bytes_per_frame < max_bytes_per_frame &&
- parameters->numLayers < 3 &&
- parameters->rtt < kMaxRttTurnOffFec) {
- return true;
- }
- return false;
-}
-
-bool
-VCMNackFecMethod::EffectivePacketLoss(const VCMProtectionParameters* parameters)
-{
- // Set the effective packet loss for encoder (based on FEC code).
- // Compute the effective packet loss and residual packet loss due to FEC.
- VCMFecMethod::EffectivePacketLoss(parameters);
- return true;
-}
-
-bool
-VCMNackFecMethod::UpdateParameters(const VCMProtectionParameters* parameters)
-{
- ProtectionFactor(parameters);
- EffectivePacketLoss(parameters);
- _maxFramesFec = ComputeMaxFramesFec(parameters);
- if (BitRateTooLowForFec(parameters)) {
- _protectionFactorK = 0;
- _protectionFactorD = 0;
- }
-
- // Protection/fec rates obtained above are defined relative to total number
- // of packets (total rate: source + fec) FEC in RTP module assumes
- // protection factor is defined relative to source number of packets so we
- // should convert the factor to reduce mismatch between mediaOpt's rate and
- // the actual one
- _protectionFactorK = VCMFecMethod::ConvertFECRate(_protectionFactorK);
- _protectionFactorD = VCMFecMethod::ConvertFECRate(_protectionFactorD);
-
- return true;
-}
-
-VCMNackMethod::VCMNackMethod():
-VCMProtectionMethod()
-{
- _type = kNack;
-}
-
-VCMNackMethod::~VCMNackMethod()
-{
- //
-}
-
-bool
-VCMNackMethod::EffectivePacketLoss(const VCMProtectionParameters* parameter)
-{
- // Effective Packet Loss, NA in current version.
- _effectivePacketLoss = 0;
- return true;
-}
-
-bool
-VCMNackMethod::UpdateParameters(const VCMProtectionParameters* parameters)
-{
- // Compute the effective packet loss
- EffectivePacketLoss(parameters);
-
- // nackCost = (bitRate - nackCost) * (lossPr)
- return true;
-}
-
-VCMFecMethod::VCMFecMethod():
-VCMProtectionMethod()
-{
- _type = kFec;
-}
-VCMFecMethod::~VCMFecMethod()
-{
- //
-}
-
-uint8_t
-VCMFecMethod::BoostCodeRateKey(uint8_t packetFrameDelta,
- uint8_t packetFrameKey) const
-{
- uint8_t boostRateKey = 2;
- // Default: ratio scales the FEC protection up for I frames
- uint8_t ratio = 1;
-
- if (packetFrameDelta > 0)
- {
- ratio = (int8_t) (packetFrameKey / packetFrameDelta);
- }
- ratio = VCM_MAX(boostRateKey, ratio);
-
- return ratio;
-}
-
-uint8_t
-VCMFecMethod::ConvertFECRate(uint8_t codeRateRTP) const
-{
- return static_cast<uint8_t> (VCM_MIN(255,(0.5 + 255.0 * codeRateRTP /
- (float)(255 - codeRateRTP))));
-}
-
-// Update FEC with protectionFactorD
-void
-VCMFecMethod::UpdateProtectionFactorD(uint8_t protectionFactorD)
-{
- _protectionFactorD = protectionFactorD;
-}
-
-// Update FEC with protectionFactorK
-void
-VCMFecMethod::UpdateProtectionFactorK(uint8_t protectionFactorK)
-{
- _protectionFactorK = protectionFactorK;
-}
-
-bool
-VCMFecMethod::ProtectionFactor(const VCMProtectionParameters* parameters)
-{
- // FEC PROTECTION SETTINGS: varies with packet loss and bitrate
-
- // No protection if (filtered) packetLoss is 0
- uint8_t packetLoss = (uint8_t) (255 * parameters->lossPr);
- if (packetLoss == 0)
- {
- _protectionFactorK = 0;
- _protectionFactorD = 0;
- return true;
- }
-
- // Parameters for FEC setting:
- // first partition size, thresholds, table pars, spatial resoln fac.
-
- // First partition protection: ~ 20%
- uint8_t firstPartitionProt = (uint8_t) (255 * 0.20);
-
- // Minimum protection level needed to generate one FEC packet for one
- // source packet/frame (in RTP sender)
- uint8_t minProtLevelFec = 85;
-
- // Threshold on packetLoss and bitRrate/frameRate (=average #packets),
- // above which we allocate protection to cover at least first partition.
- uint8_t lossThr = 0;
- uint8_t packetNumThr = 1;
-
- // Parameters for range of rate index of table.
- const uint8_t ratePar1 = 5;
- const uint8_t ratePar2 = 49;
-
- // Spatial resolution size, relative to a reference size.
- float spatialSizeToRef = static_cast<float>
- (parameters->codecWidth * parameters->codecHeight) /
- (static_cast<float>(704 * 576));
- // resolnFac: This parameter will generally increase/decrease the FEC rate
- // (for fixed bitRate and packetLoss) based on system size.
- // Use a smaller exponent (< 1) to control/soften system size effect.
- const float resolnFac = 1.0 / powf(spatialSizeToRef, 0.3f);
-
- const int bitRatePerFrame = BitsPerFrame(parameters);
-
-
- // Average number of packets per frame (source and fec):
- const uint8_t avgTotPackets = 1 + (uint8_t)
- ((float) bitRatePerFrame * 1000.0
- / (float) (8.0 * _maxPayloadSize) + 0.5);
-
- // FEC rate parameters: for P and I frame
- uint8_t codeRateDelta = 0;
- uint8_t codeRateKey = 0;
-
- // Get index for table: the FEC protection depends on an effective rate.
- // The range on the rate index corresponds to rates (bps)
- // from ~200k to ~8000k, for 30fps
- const uint16_t effRateFecTable = static_cast<uint16_t>
- (resolnFac * bitRatePerFrame);
- uint8_t rateIndexTable =
- (uint8_t) VCM_MAX(VCM_MIN((effRateFecTable - ratePar1) /
- ratePar1, ratePar2), 0);
-
- // Restrict packet loss range to 50:
- // current tables defined only up to 50%
- if (packetLoss >= kPacketLossMax)
- {
- packetLoss = kPacketLossMax - 1;
- }
- uint16_t indexTable = rateIndexTable * kPacketLossMax + packetLoss;
-
- // Check on table index
- assert(indexTable < kSizeCodeRateXORTable);
-
- // Protection factor for P frame
- codeRateDelta = kCodeRateXORTable[indexTable];
-
- if (packetLoss > lossThr && avgTotPackets > packetNumThr)
- {
- // Set a minimum based on first partition size.
- if (codeRateDelta < firstPartitionProt)
- {
- codeRateDelta = firstPartitionProt;
- }
- }
-
- // Check limit on amount of protection for P frame; 50% is max.
- if (codeRateDelta >= kPacketLossMax)
- {
- codeRateDelta = kPacketLossMax - 1;
- }
-
- float adjustFec = 1.0f;
- // Avoid additional adjustments when layers are active.
- // TODO(mikhal/marco): Update adjusmtent based on layer info.
- if (parameters->numLayers == 1)
- {
- adjustFec = _qmRobustness->AdjustFecFactor(codeRateDelta,
- parameters->bitRate,
- parameters->frameRate,
- parameters->rtt,
- packetLoss);
- }
-
- codeRateDelta = static_cast<uint8_t>(codeRateDelta * adjustFec);
-
- // For Key frame:
- // Effectively at a higher rate, so we scale/boost the rate
- // The boost factor may depend on several factors: ratio of packet
- // number of I to P frames, how much protection placed on P frames, etc.
- const uint8_t packetFrameDelta = (uint8_t)
- (0.5 + parameters->packetsPerFrame);
- const uint8_t packetFrameKey = (uint8_t)
- (0.5 + parameters->packetsPerFrameKey);
- const uint8_t boostKey = BoostCodeRateKey(packetFrameDelta,
- packetFrameKey);
-
- rateIndexTable = (uint8_t) VCM_MAX(VCM_MIN(
- 1 + (boostKey * effRateFecTable - ratePar1) /
- ratePar1,ratePar2),0);
- uint16_t indexTableKey = rateIndexTable * kPacketLossMax + packetLoss;
-
- indexTableKey = VCM_MIN(indexTableKey, kSizeCodeRateXORTable);
-
- // Check on table index
- assert(indexTableKey < kSizeCodeRateXORTable);
-
- // Protection factor for I frame
- codeRateKey = kCodeRateXORTable[indexTableKey];
-
- // Boosting for Key frame.
- int boostKeyProt = _scaleProtKey * codeRateDelta;
- if (boostKeyProt >= kPacketLossMax)
- {
- boostKeyProt = kPacketLossMax - 1;
- }
-
- // Make sure I frame protection is at least larger than P frame protection,
- // and at least as high as filtered packet loss.
- codeRateKey = static_cast<uint8_t> (VCM_MAX(packetLoss,
- VCM_MAX(boostKeyProt, codeRateKey)));
-
- // Check limit on amount of protection for I frame: 50% is max.
- if (codeRateKey >= kPacketLossMax)
- {
- codeRateKey = kPacketLossMax - 1;
- }
-
- _protectionFactorK = codeRateKey;
- _protectionFactorD = codeRateDelta;
-
- // Generally there is a rate mis-match between the FEC cost estimated
- // in mediaOpt and the actual FEC cost sent out in RTP module.
- // This is more significant at low rates (small # of source packets), where
- // the granularity of the FEC decreases. In this case, non-zero protection
- // in mediaOpt may generate 0 FEC packets in RTP sender (since actual #FEC
- // is based on rounding off protectionFactor on actual source packet number).
- // The correction factor (_corrFecCost) attempts to corrects this, at least
- // for cases of low rates (small #packets) and low protection levels.
-
- float numPacketsFl = 1.0f + ((float) bitRatePerFrame * 1000.0
- / (float) (8.0 * _maxPayloadSize) + 0.5);
-
- const float estNumFecGen = 0.5f + static_cast<float> (_protectionFactorD *
- numPacketsFl / 255.0f);
-
-
- // We reduce cost factor (which will reduce overhead for FEC and
- // hybrid method) and not the protectionFactor.
- _corrFecCost = 1.0f;
- if (estNumFecGen < 1.1f && _protectionFactorD < minProtLevelFec)
- {
- _corrFecCost = 0.5f;
- }
- if (estNumFecGen < 0.9f && _protectionFactorD < minProtLevelFec)
- {
- _corrFecCost = 0.0f;
- }
-
- // TODO (marpan): Set the UEP protection on/off for Key and Delta frames
- _useUepProtectionK = _qmRobustness->SetUepProtection(codeRateKey,
- parameters->bitRate,
- packetLoss,
- 0);
-
- _useUepProtectionD = _qmRobustness->SetUepProtection(codeRateDelta,
- parameters->bitRate,
- packetLoss,
- 1);
-
- // DONE WITH FEC PROTECTION SETTINGS
- return true;
-}
-
-int VCMFecMethod::BitsPerFrame(const VCMProtectionParameters* parameters) {
- // When temporal layers are available FEC will only be applied on the base
- // layer.
- const float bitRateRatio =
- kVp8LayerRateAlloction[parameters->numLayers - 1][0];
- float frameRateRatio = powf(1 / 2.0, parameters->numLayers - 1);
- float bitRate = parameters->bitRate * bitRateRatio;
- float frameRate = parameters->frameRate * frameRateRatio;
-
- // TODO(mikhal): Update factor following testing.
- float adjustmentFactor = 1;
-
- // Average bits per frame (units of kbits)
- return static_cast<int>(adjustmentFactor * bitRate / frameRate);
-}
-
-bool
-VCMFecMethod::EffectivePacketLoss(const VCMProtectionParameters* parameters)
-{
- // Effective packet loss to encoder is based on RPL (residual packet loss)
- // this is a soft setting based on degree of FEC protection
- // RPL = received/input packet loss - average_FEC_recovery
- // note: received/input packet loss may be filtered based on FilteredLoss
-
- // Effective Packet Loss, NA in current version.
- _effectivePacketLoss = 0;
-
- return true;
-}
-
-bool
-VCMFecMethod::UpdateParameters(const VCMProtectionParameters* parameters)
-{
- // Compute the protection factor
- ProtectionFactor(parameters);
-
- // Compute the effective packet loss
- EffectivePacketLoss(parameters);
-
- // Protection/fec rates obtained above is defined relative to total number
- // of packets (total rate: source+fec) FEC in RTP module assumes protection
- // factor is defined relative to source number of packets so we should
- // convert the factor to reduce mismatch between mediaOpt suggested rate and
- // the actual rate
- _protectionFactorK = ConvertFECRate(_protectionFactorK);
- _protectionFactorD = ConvertFECRate(_protectionFactorD);
-
- return true;
-}
-VCMLossProtectionLogic::VCMLossProtectionLogic(int64_t nowMs):
-_currentParameters(),
-_rtt(0),
-_lossPr(0.0f),
-_bitRate(0.0f),
-_frameRate(0.0f),
-_keyFrameSize(0.0f),
-_fecRateKey(0),
-_fecRateDelta(0),
-_lastPrUpdateT(0),
-_lossPr255(0.9999f),
-_lossPrHistory(),
-_shortMaxLossPr255(0),
-_packetsPerFrame(0.9999f),
-_packetsPerFrameKey(0.9999f),
-_codecWidth(0),
-_codecHeight(0),
-_numLayers(1)
-{
- Reset(nowMs);
-}
-
-VCMLossProtectionLogic::~VCMLossProtectionLogic()
-{
- Release();
-}
-
-void VCMLossProtectionLogic::SetMethod(
- enum VCMProtectionMethodEnum newMethodType) {
- if (_selectedMethod && _selectedMethod->Type() == newMethodType)
- return;
-
- switch(newMethodType) {
- case kNack:
- _selectedMethod.reset(new VCMNackMethod());
- break;
- case kFec:
- _selectedMethod.reset(new VCMFecMethod());
- break;
- case kNackFec:
- _selectedMethod.reset(new VCMNackFecMethod(kLowRttNackMs, -1));
- break;
- case kNone:
- _selectedMethod.reset();
- break;
- }
- UpdateMethod();
-}
-
-void
-VCMLossProtectionLogic::UpdateRtt(int64_t rtt)
-{
- _rtt = rtt;
-}
-
-void
-VCMLossProtectionLogic::UpdateMaxLossHistory(uint8_t lossPr255,
- int64_t now)
-{
- if (_lossPrHistory[0].timeMs >= 0 &&
- now - _lossPrHistory[0].timeMs < kLossPrShortFilterWinMs)
- {
- if (lossPr255 > _shortMaxLossPr255)
- {
- _shortMaxLossPr255 = lossPr255;
- }
- }
- else
- {
- // Only add a new value to the history once a second
- if (_lossPrHistory[0].timeMs == -1)
- {
- // First, no shift
- _shortMaxLossPr255 = lossPr255;
- }
- else
- {
- // Shift
- for (int32_t i = (kLossPrHistorySize - 2); i >= 0; i--)
- {
- _lossPrHistory[i + 1].lossPr255 = _lossPrHistory[i].lossPr255;
- _lossPrHistory[i + 1].timeMs = _lossPrHistory[i].timeMs;
- }
- }
- if (_shortMaxLossPr255 == 0)
- {
- _shortMaxLossPr255 = lossPr255;
- }
-
- _lossPrHistory[0].lossPr255 = _shortMaxLossPr255;
- _lossPrHistory[0].timeMs = now;
- _shortMaxLossPr255 = 0;
- }
-}
-
-uint8_t
-VCMLossProtectionLogic::MaxFilteredLossPr(int64_t nowMs) const
-{
- uint8_t maxFound = _shortMaxLossPr255;
- if (_lossPrHistory[0].timeMs == -1)
- {
- return maxFound;
- }
- for (int32_t i = 0; i < kLossPrHistorySize; i++)
- {
- if (_lossPrHistory[i].timeMs == -1)
- {
- break;
- }
- if (nowMs - _lossPrHistory[i].timeMs >
- kLossPrHistorySize * kLossPrShortFilterWinMs)
- {
- // This sample (and all samples after this) is too old
- break;
- }
- if (_lossPrHistory[i].lossPr255 > maxFound)
- {
- // This sample is the largest one this far into the history
- maxFound = _lossPrHistory[i].lossPr255;
- }
- }
- return maxFound;
-}
-
-uint8_t VCMLossProtectionLogic::FilteredLoss(
- int64_t nowMs,
- FilterPacketLossMode filter_mode,
- uint8_t lossPr255) {
-
- // Update the max window filter.
- UpdateMaxLossHistory(lossPr255, nowMs);
-
- // Update the recursive average filter.
- _lossPr255.Apply(static_cast<float> (nowMs - _lastPrUpdateT),
- static_cast<float> (lossPr255));
- _lastPrUpdateT = nowMs;
-
- // Filtered loss: default is received loss (no filtering).
- uint8_t filtered_loss = lossPr255;
-
- switch (filter_mode) {
- case kNoFilter:
- break;
- case kAvgFilter:
- filtered_loss = static_cast<uint8_t>(_lossPr255.filtered() + 0.5);
- break;
- case kMaxFilter:
- filtered_loss = MaxFilteredLossPr(nowMs);
- break;
- }
-
- return filtered_loss;
-}
-
-void
-VCMLossProtectionLogic::UpdateFilteredLossPr(uint8_t packetLossEnc)
-{
- _lossPr = (float) packetLossEnc / (float) 255.0;
-}
-
-void
-VCMLossProtectionLogic::UpdateBitRate(float bitRate)
-{
- _bitRate = bitRate;
-}
-
-void
-VCMLossProtectionLogic::UpdatePacketsPerFrame(float nPackets, int64_t nowMs)
-{
- _packetsPerFrame.Apply(static_cast<float>(nowMs - _lastPacketPerFrameUpdateT),
- nPackets);
- _lastPacketPerFrameUpdateT = nowMs;
-}
-
-void
-VCMLossProtectionLogic::UpdatePacketsPerFrameKey(float nPackets, int64_t nowMs)
-{
- _packetsPerFrameKey.Apply(static_cast<float>(nowMs -
- _lastPacketPerFrameUpdateTKey), nPackets);
- _lastPacketPerFrameUpdateTKey = nowMs;
-}
-
-void
-VCMLossProtectionLogic::UpdateKeyFrameSize(float keyFrameSize)
-{
- _keyFrameSize = keyFrameSize;
-}
-
-void
-VCMLossProtectionLogic::UpdateFrameSize(uint16_t width,
- uint16_t height)
-{
- _codecWidth = width;
- _codecHeight = height;
-}
-
-void VCMLossProtectionLogic::UpdateNumLayers(int numLayers) {
- _numLayers = (numLayers == 0) ? 1 : numLayers;
-}
-
-bool
-VCMLossProtectionLogic::UpdateMethod()
-{
- if (!_selectedMethod)
- return false;
- _currentParameters.rtt = _rtt;
- _currentParameters.lossPr = _lossPr;
- _currentParameters.bitRate = _bitRate;
- _currentParameters.frameRate = _frameRate; // rename actual frame rate?
- _currentParameters.keyFrameSize = _keyFrameSize;
- _currentParameters.fecRateDelta = _fecRateDelta;
- _currentParameters.fecRateKey = _fecRateKey;
- _currentParameters.packetsPerFrame = _packetsPerFrame.filtered();
- _currentParameters.packetsPerFrameKey = _packetsPerFrameKey.filtered();
- _currentParameters.codecWidth = _codecWidth;
- _currentParameters.codecHeight = _codecHeight;
- _currentParameters.numLayers = _numLayers;
- return _selectedMethod->UpdateParameters(&_currentParameters);
-}
-
-VCMProtectionMethod*
-VCMLossProtectionLogic::SelectedMethod() const
-{
- return _selectedMethod.get();
-}
-
-VCMProtectionMethodEnum VCMLossProtectionLogic::SelectedType() const {
- return _selectedMethod ? _selectedMethod->Type() : kNone;
-}
-
-void
-VCMLossProtectionLogic::Reset(int64_t nowMs)
-{
- _lastPrUpdateT = nowMs;
- _lastPacketPerFrameUpdateT = nowMs;
- _lastPacketPerFrameUpdateTKey = nowMs;
- _lossPr255.Reset(0.9999f);
- _packetsPerFrame.Reset(0.9999f);
- _fecRateDelta = _fecRateKey = 0;
- for (int32_t i = 0; i < kLossPrHistorySize; i++)
- {
- _lossPrHistory[i].lossPr255 = 0;
- _lossPrHistory[i].timeMs = -1;
- }
- _shortMaxLossPr255 = 0;
- Release();
-}
-
-void VCMLossProtectionLogic::Release() {
- _selectedMethod.reset();
-}
-
-} // namespace media_optimization
-} // namespace webrtc
diff --git a/webrtc/modules/video_coding/main/source/media_opt_util.h b/webrtc/modules/video_coding/main/source/media_opt_util.h
deleted file mode 100644
index 2085bbcde9..0000000000
--- a/webrtc/modules/video_coding/main/source/media_opt_util.h
+++ /dev/null
@@ -1,364 +0,0 @@
-/*
- * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
- *
- * Use of this source code is governed by a BSD-style license
- * that can be found in the LICENSE file in the root of the source
- * tree. An additional intellectual property rights grant can be found
- * in the file PATENTS. All contributing project authors may
- * be found in the AUTHORS file in the root of the source tree.
- */
-
-#ifndef WEBRTC_MODULES_VIDEO_CODING_MEDIA_OPT_UTIL_H_
-#define WEBRTC_MODULES_VIDEO_CODING_MEDIA_OPT_UTIL_H_
-
-#include <math.h>
-#include <stdlib.h>
-
-#include "webrtc/base/exp_filter.h"
-#include "webrtc/base/scoped_ptr.h"
-#include "webrtc/modules/video_coding/main/source/internal_defines.h"
-#include "webrtc/modules/video_coding/main/source/qm_select.h"
-#include "webrtc/system_wrappers/include/trace.h"
-#include "webrtc/typedefs.h"
-
-namespace webrtc {
-namespace media_optimization {
-
-// Number of time periods used for (max) window filter for packet loss
-// TODO (marpan): set reasonable window size for filtered packet loss,
-// adjustment should be based on logged/real data of loss stats/correlation.
-enum { kLossPrHistorySize = 10 };
-
-// 1000 ms, total filter length is (kLossPrHistorySize * 1000) ms
-enum { kLossPrShortFilterWinMs = 1000 };
-
-// The type of filter used on the received packet loss reports.
-enum FilterPacketLossMode {
- kNoFilter, // No filtering on received loss.
- kAvgFilter, // Recursive average filter.
- kMaxFilter // Max-window filter, over the time interval of:
- // (kLossPrHistorySize * kLossPrShortFilterWinMs) ms.
-};
-
-// Thresholds for hybrid NACK/FEC
-// common to media optimization and the jitter buffer.
-const int64_t kLowRttNackMs = 20;
-
-struct VCMProtectionParameters
-{
- VCMProtectionParameters() : rtt(0), lossPr(0.0f), bitRate(0.0f),
- packetsPerFrame(0.0f), packetsPerFrameKey(0.0f), frameRate(0.0f),
- keyFrameSize(0.0f), fecRateDelta(0), fecRateKey(0),
- codecWidth(0), codecHeight(0),
- numLayers(1)
- {}
-
- int64_t rtt;
- float lossPr;
- float bitRate;
- float packetsPerFrame;
- float packetsPerFrameKey;
- float frameRate;
- float keyFrameSize;
- uint8_t fecRateDelta;
- uint8_t fecRateKey;
- uint16_t codecWidth;
- uint16_t codecHeight;
- int numLayers;
-};
-
-
-/******************************/
-/* VCMProtectionMethod class */
-/******************************/
-
-enum VCMProtectionMethodEnum
-{
- kNack,
- kFec,
- kNackFec,
- kNone
-};
-
-class VCMLossProbabilitySample
-{
-public:
- VCMLossProbabilitySample() : lossPr255(0), timeMs(-1) {};
-
- uint8_t lossPr255;
- int64_t timeMs;
-};
-
-
-class VCMProtectionMethod
-{
-public:
- VCMProtectionMethod();
- virtual ~VCMProtectionMethod();
-
- // Updates the efficiency of the method using the parameters provided
- //
- // Input:
- // - parameters : Parameters used to calculate efficiency
- //
- // Return value : True if this method is recommended in
- // the given conditions.
- virtual bool UpdateParameters(const VCMProtectionParameters* parameters) = 0;
-
- // Returns the protection type
- //
- // Return value : The protection type
- enum VCMProtectionMethodEnum Type() const { return _type; }
-
- // Returns the effective packet loss for ER, required by this protection method
- //
- // Return value : Required effective packet loss
- virtual uint8_t RequiredPacketLossER() { return _effectivePacketLoss; }
-
- // Extracts the FEC protection factor for Key frame, required by this protection method
- //
- // Return value : Required protectionFactor for Key frame
- virtual uint8_t RequiredProtectionFactorK() { return _protectionFactorK; }
-
- // Extracts the FEC protection factor for Delta frame, required by this protection method
- //
- // Return value : Required protectionFactor for delta frame
- virtual uint8_t RequiredProtectionFactorD() { return _protectionFactorD; }
-
- // Extracts whether the FEC Unequal protection (UEP) is used for Key frame.
- //
- // Return value : Required Unequal protection on/off state.
- virtual bool RequiredUepProtectionK() { return _useUepProtectionK; }
-
- // Extracts whether the the FEC Unequal protection (UEP) is used for Delta frame.
- //
- // Return value : Required Unequal protection on/off state.
- virtual bool RequiredUepProtectionD() { return _useUepProtectionD; }
-
- virtual int MaxFramesFec() const { return 1; }
-
- // Updates content metrics
- void UpdateContentMetrics(const VideoContentMetrics* contentMetrics);
-
-protected:
-
- uint8_t _effectivePacketLoss;
- uint8_t _protectionFactorK;
- uint8_t _protectionFactorD;
- // Estimation of residual loss after the FEC
- float _scaleProtKey;
- int32_t _maxPayloadSize;
-
- VCMQmRobustness* _qmRobustness;
- bool _useUepProtectionK;
- bool _useUepProtectionD;
- float _corrFecCost;
- enum VCMProtectionMethodEnum _type;
-};
-
-class VCMNackMethod : public VCMProtectionMethod
-{
-public:
- VCMNackMethod();
- virtual ~VCMNackMethod();
- virtual bool UpdateParameters(const VCMProtectionParameters* parameters);
- // Get the effective packet loss
- bool EffectivePacketLoss(const VCMProtectionParameters* parameter);
-};
-
-class VCMFecMethod : public VCMProtectionMethod
-{
-public:
- VCMFecMethod();
- virtual ~VCMFecMethod();
- virtual bool UpdateParameters(const VCMProtectionParameters* parameters);
- // Get the effective packet loss for ER
- bool EffectivePacketLoss(const VCMProtectionParameters* parameters);
- // Get the FEC protection factors
- bool ProtectionFactor(const VCMProtectionParameters* parameters);
- // Get the boost for key frame protection
- uint8_t BoostCodeRateKey(uint8_t packetFrameDelta,
- uint8_t packetFrameKey) const;
- // Convert the rates: defined relative to total# packets or source# packets
- uint8_t ConvertFECRate(uint8_t codeRate) const;
- // Get the average effective recovery from FEC: for random loss model
- float AvgRecoveryFEC(const VCMProtectionParameters* parameters) const;
- // Update FEC with protectionFactorD
- void UpdateProtectionFactorD(uint8_t protectionFactorD);
- // Update FEC with protectionFactorK
- void UpdateProtectionFactorK(uint8_t protectionFactorK);
- // Compute the bits per frame. Account for temporal layers when applicable.
- int BitsPerFrame(const VCMProtectionParameters* parameters);
-
-protected:
- enum { kUpperLimitFramesFec = 6 };
- // Thresholds values for the bytes/frame and round trip time, below which we
- // may turn off FEC, depending on |_numLayers| and |_maxFramesFec|.
- // Max bytes/frame for VGA, corresponds to ~140k at 25fps.
- enum { kMaxBytesPerFrameForFec = 700 };
- // Max bytes/frame for CIF and lower: corresponds to ~80k at 25fps.
- enum { kMaxBytesPerFrameForFecLow = 400 };
- // Max bytes/frame for frame size larger than VGA, ~200k at 25fps.
- enum { kMaxBytesPerFrameForFecHigh = 1000 };
-};
-
-
-class VCMNackFecMethod : public VCMFecMethod
-{
-public:
- VCMNackFecMethod(int64_t lowRttNackThresholdMs,
- int64_t highRttNackThresholdMs);
- virtual ~VCMNackFecMethod();
- virtual bool UpdateParameters(const VCMProtectionParameters* parameters);
- // Get the effective packet loss for ER
- bool EffectivePacketLoss(const VCMProtectionParameters* parameters);
- // Get the protection factors
- bool ProtectionFactor(const VCMProtectionParameters* parameters);
- // Get the max number of frames the FEC is allowed to be based on.
- int MaxFramesFec() const;
- // Turn off the FEC based on low bitrate and other factors.
- bool BitRateTooLowForFec(const VCMProtectionParameters* parameters);
-private:
- int ComputeMaxFramesFec(const VCMProtectionParameters* parameters);
-
- int64_t _lowRttNackMs;
- int64_t _highRttNackMs;
- int _maxFramesFec;
-};
-
-class VCMLossProtectionLogic
-{
-public:
- VCMLossProtectionLogic(int64_t nowMs);
- ~VCMLossProtectionLogic();
-
- // Set the protection method to be used
- //
- // Input:
- // - newMethodType : New requested protection method type. If one
- // is already set, it will be deleted and replaced
- void SetMethod(VCMProtectionMethodEnum newMethodType);
-
- // Update the round-trip time
- //
- // Input:
- // - rtt : Round-trip time in seconds.
- void UpdateRtt(int64_t rtt);
-
- // Update the filtered packet loss.
- //
- // Input:
- // - packetLossEnc : The reported packet loss filtered
- // (max window or average)
- void UpdateFilteredLossPr(uint8_t packetLossEnc);
-
- // Update the current target bit rate.
- //
- // Input:
- // - bitRate : The current target bit rate in kbits/s
- void UpdateBitRate(float bitRate);
-
- // Update the number of packets per frame estimate, for delta frames
- //
- // Input:
- // - nPackets : Number of packets in the latest sent frame.
- void UpdatePacketsPerFrame(float nPackets, int64_t nowMs);
-
- // Update the number of packets per frame estimate, for key frames
- //
- // Input:
- // - nPackets : umber of packets in the latest sent frame.
- void UpdatePacketsPerFrameKey(float nPackets, int64_t nowMs);
-
- // Update the keyFrameSize estimate
- //
- // Input:
- // - keyFrameSize : The size of the latest sent key frame.
- void UpdateKeyFrameSize(float keyFrameSize);
-
- // Update the frame rate
- //
- // Input:
- // - frameRate : The current target frame rate.
- void UpdateFrameRate(float frameRate) { _frameRate = frameRate; }
-
- // Update the frame size
- //
- // Input:
- // - width : The codec frame width.
- // - height : The codec frame height.
- void UpdateFrameSize(uint16_t width, uint16_t height);
-
- // Update the number of active layers
- //
- // Input:
- // - numLayers : Number of layers used.
- void UpdateNumLayers(int numLayers);
-
- // The amount of packet loss to cover for with FEC.
- //
- // Input:
- // - fecRateKey : Packet loss to cover for with FEC when
- // sending key frames.
- // - fecRateDelta : Packet loss to cover for with FEC when
- // sending delta frames.
- void UpdateFECRates(uint8_t fecRateKey, uint8_t fecRateDelta)
- { _fecRateKey = fecRateKey;
- _fecRateDelta = fecRateDelta; }
-
- // Update the protection methods with the current VCMProtectionParameters
- // and set the requested protection settings.
- // Return value : Returns true on update
- bool UpdateMethod();
-
- // Returns the method currently selected.
- //
- // Return value : The protection method currently selected.
- VCMProtectionMethod* SelectedMethod() const;
-
- // Return the protection type of the currently selected method
- VCMProtectionMethodEnum SelectedType() const;
-
- // Updates the filtered loss for the average and max window packet loss,
- // and returns the filtered loss probability in the interval [0, 255].
- // The returned filtered loss value depends on the parameter |filter_mode|.
- // The input parameter |lossPr255| is the received packet loss.
-
- // Return value : The filtered loss probability
- uint8_t FilteredLoss(int64_t nowMs, FilterPacketLossMode filter_mode,
- uint8_t lossPr255);
-
- void Reset(int64_t nowMs);
-
- void Release();
-
-private:
- // Sets the available loss protection methods.
- void UpdateMaxLossHistory(uint8_t lossPr255, int64_t now);
- uint8_t MaxFilteredLossPr(int64_t nowMs) const;
- rtc::scoped_ptr<VCMProtectionMethod> _selectedMethod;
- VCMProtectionParameters _currentParameters;
- int64_t _rtt;
- float _lossPr;
- float _bitRate;
- float _frameRate;
- float _keyFrameSize;
- uint8_t _fecRateKey;
- uint8_t _fecRateDelta;
- int64_t _lastPrUpdateT;
- int64_t _lastPacketPerFrameUpdateT;
- int64_t _lastPacketPerFrameUpdateTKey;
- rtc::ExpFilter _lossPr255;
- VCMLossProbabilitySample _lossPrHistory[kLossPrHistorySize];
- uint8_t _shortMaxLossPr255;
- rtc::ExpFilter _packetsPerFrame;
- rtc::ExpFilter _packetsPerFrameKey;
- uint16_t _codecWidth;
- uint16_t _codecHeight;
- int _numLayers;
-};
-
-} // namespace media_optimization
-} // namespace webrtc
-
-#endif // WEBRTC_MODULES_VIDEO_CODING_MEDIA_OPT_UTIL_H_
diff --git a/webrtc/modules/video_coding/main/source/nack_fec_tables.h b/webrtc/modules/video_coding/main/source/nack_fec_tables.h
deleted file mode 100644
index b82bb1b4ba..0000000000
--- a/webrtc/modules/video_coding/main/source/nack_fec_tables.h
+++ /dev/null
@@ -1,126 +0,0 @@
-/*
- * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
- *
- * Use of this source code is governed by a BSD-style license
- * that can be found in the LICENSE file in the root of the source
- * tree. An additional intellectual property rights grant can be found
- * in the file PATENTS. All contributing project authors may
- * be found in the AUTHORS file in the root of the source tree.
- */
-
-#ifndef WEBRTC_MODULES_VIDEO_CODING_SOURCE_NACK_FEC_TABLES_H_
-#define WEBRTC_MODULES_VIDEO_CODING_SOURCE_NACK_FEC_TABLES_H_
-
-namespace webrtc
-{
-
-// Table for adjusting FEC rate for NACK/FEC protection method
-// Table values are built as a sigmoid function, ranging from 0 to 100, based on
-// the HybridNackTH values defined in media_opt_util.h.
-const uint16_t VCMNackFecTable[100] = {
-0,
-0,
-0,
-0,
-0,
-0,
-0,
-0,
-0,
-0,
-0,
-1,
-1,
-1,
-1,
-1,
-2,
-2,
-2,
-3,
-3,
-4,
-5,
-6,
-7,
-9,
-10,
-12,
-15,
-18,
-21,
-24,
-28,
-32,
-37,
-41,
-46,
-51,
-56,
-61,
-66,
-70,
-74,
-78,
-81,
-84,
-86,
-89,
-90,
-92,
-93,
-95,
-95,
-96,
-97,
-97,
-98,
-98,
-99,
-99,
-99,
-99,
-99,
-99,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-
-};
-
-} // namespace webrtc
-
-#endif // WEBRTC_MODULES_VIDEO_CODING_SOURCE_NACK_FEC_TABLES_H_
diff --git a/webrtc/modules/video_coding/main/source/packet.h b/webrtc/modules/video_coding/main/source/packet.h
deleted file mode 100644
index 80bf532502..0000000000
--- a/webrtc/modules/video_coding/main/source/packet.h
+++ /dev/null
@@ -1,59 +0,0 @@
-/*
- * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
- *
- * Use of this source code is governed by a BSD-style license
- * that can be found in the LICENSE file in the root of the source
- * tree. An additional intellectual property rights grant can be found
- * in the file PATENTS. All contributing project authors may
- * be found in the AUTHORS file in the root of the source tree.
- */
-
-#ifndef WEBRTC_MODULES_VIDEO_CODING_PACKET_H_
-#define WEBRTC_MODULES_VIDEO_CODING_PACKET_H_
-
-#include "webrtc/modules/interface/module_common_types.h"
-#include "webrtc/modules/video_coding/main/source/jitter_buffer_common.h"
-#include "webrtc/typedefs.h"
-
-namespace webrtc {
-
-class VCMPacket {
-public:
- VCMPacket();
- VCMPacket(const uint8_t* ptr,
- const size_t size,
- const WebRtcRTPHeader& rtpHeader);
- VCMPacket(const uint8_t* ptr,
- size_t size,
- uint16_t seqNum,
- uint32_t timestamp,
- bool markerBit);
-
- void Reset();
-
- uint8_t payloadType;
- uint32_t timestamp;
- // NTP time of the capture time in local timebase in milliseconds.
- int64_t ntp_time_ms_;
- uint16_t seqNum;
- const uint8_t* dataPtr;
- size_t sizeBytes;
- bool markerBit;
-
- FrameType frameType;
- VideoCodecType codec;
-
- bool isFirstPacket; // Is this first packet in a frame.
- VCMNaluCompleteness completeNALU; // Default is kNaluIncomplete.
- bool insertStartCode; // True if a start code should be inserted before this
- // packet.
- int width;
- int height;
- RTPVideoHeader codecSpecificHeader;
-
-protected:
- void CopyCodecSpecifics(const RTPVideoHeader& videoHeader);
-};
-
-} // namespace webrtc
-#endif // WEBRTC_MODULES_VIDEO_CODING_PACKET_H_
diff --git a/webrtc/modules/video_coding/main/source/rtt_filter.cc b/webrtc/modules/video_coding/main/source/rtt_filter.cc
deleted file mode 100644
index 5742e8fa89..0000000000
--- a/webrtc/modules/video_coding/main/source/rtt_filter.cc
+++ /dev/null
@@ -1,202 +0,0 @@
-/*
- * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
- *
- * Use of this source code is governed by a BSD-style license
- * that can be found in the LICENSE file in the root of the source
- * tree. An additional intellectual property rights grant can be found
- * in the file PATENTS. All contributing project authors may
- * be found in the AUTHORS file in the root of the source tree.
- */
-
-#include "webrtc/modules/video_coding/main/source/internal_defines.h"
-#include "webrtc/modules/video_coding/main/source/rtt_filter.h"
-
-#include <math.h>
-#include <stdlib.h>
-#include <string.h>
-
-namespace webrtc {
-
-VCMRttFilter::VCMRttFilter()
- : _filtFactMax(35),
- _jumpStdDevs(2.5),
- _driftStdDevs(3.5),
- _detectThreshold(kMaxDriftJumpCount) {
- Reset();
-}
-
-VCMRttFilter&
-VCMRttFilter::operator=(const VCMRttFilter& rhs)
-{
- if (this != &rhs)
- {
- _gotNonZeroUpdate = rhs._gotNonZeroUpdate;
- _avgRtt = rhs._avgRtt;
- _varRtt = rhs._varRtt;
- _maxRtt = rhs._maxRtt;
- _filtFactCount = rhs._filtFactCount;
- _jumpCount = rhs._jumpCount;
- _driftCount = rhs._driftCount;
- memcpy(_jumpBuf, rhs._jumpBuf, sizeof(_jumpBuf));
- memcpy(_driftBuf, rhs._driftBuf, sizeof(_driftBuf));
- }
- return *this;
-}
-
-void
-VCMRttFilter::Reset()
-{
- _gotNonZeroUpdate = false;
- _avgRtt = 0;
- _varRtt = 0;
- _maxRtt = 0;
- _filtFactCount = 1;
- _jumpCount = 0;
- _driftCount = 0;
- memset(_jumpBuf, 0, kMaxDriftJumpCount);
- memset(_driftBuf, 0, kMaxDriftJumpCount);
-}
-
-void
-VCMRttFilter::Update(int64_t rttMs)
-{
- if (!_gotNonZeroUpdate)
- {
- if (rttMs == 0)
- {
- return;
- }
- _gotNonZeroUpdate = true;
- }
-
- // Sanity check
- if (rttMs > 3000)
- {
- rttMs = 3000;
- }
-
- double filtFactor = 0;
- if (_filtFactCount > 1)
- {
- filtFactor = static_cast<double>(_filtFactCount - 1) / _filtFactCount;
- }
- _filtFactCount++;
- if (_filtFactCount > _filtFactMax)
- {
- // This prevents filtFactor from going above
- // (_filtFactMax - 1) / _filtFactMax,
- // e.g., _filtFactMax = 50 => filtFactor = 49/50 = 0.98
- _filtFactCount = _filtFactMax;
- }
- double oldAvg = _avgRtt;
- double oldVar = _varRtt;
- _avgRtt = filtFactor * _avgRtt + (1 - filtFactor) * rttMs;
- _varRtt = filtFactor * _varRtt + (1 - filtFactor) *
- (rttMs - _avgRtt) * (rttMs - _avgRtt);
- _maxRtt = VCM_MAX(rttMs, _maxRtt);
- if (!JumpDetection(rttMs) || !DriftDetection(rttMs))
- {
- // In some cases we don't want to update the statistics
- _avgRtt = oldAvg;
- _varRtt = oldVar;
- }
-}
-
-bool
-VCMRttFilter::JumpDetection(int64_t rttMs)
-{
- double diffFromAvg = _avgRtt - rttMs;
- if (fabs(diffFromAvg) > _jumpStdDevs * sqrt(_varRtt))
- {
- int diffSign = (diffFromAvg >= 0) ? 1 : -1;
- int jumpCountSign = (_jumpCount >= 0) ? 1 : -1;
- if (diffSign != jumpCountSign)
- {
- // Since the signs differ the samples currently
- // in the buffer is useless as they represent a
- // jump in a different direction.
- _jumpCount = 0;
- }
- if (abs(_jumpCount) < kMaxDriftJumpCount)
- {
- // Update the buffer used for the short time
- // statistics.
- // The sign of the diff is used for updating the counter since
- // we want to use the same buffer for keeping track of when
- // the RTT jumps down and up.
- _jumpBuf[abs(_jumpCount)] = rttMs;
- _jumpCount += diffSign;
- }
- if (abs(_jumpCount) >= _detectThreshold)
- {
- // Detected an RTT jump
- ShortRttFilter(_jumpBuf, abs(_jumpCount));
- _filtFactCount = _detectThreshold + 1;
- _jumpCount = 0;
- }
- else
- {
- return false;
- }
- }
- else
- {
- _jumpCount = 0;
- }
- return true;
-}
-
-bool
-VCMRttFilter::DriftDetection(int64_t rttMs)
-{
- if (_maxRtt - _avgRtt > _driftStdDevs * sqrt(_varRtt))
- {
- if (_driftCount < kMaxDriftJumpCount)
- {
- // Update the buffer used for the short time
- // statistics.
- _driftBuf[_driftCount] = rttMs;
- _driftCount++;
- }
- if (_driftCount >= _detectThreshold)
- {
- // Detected an RTT drift
- ShortRttFilter(_driftBuf, _driftCount);
- _filtFactCount = _detectThreshold + 1;
- _driftCount = 0;
- }
- }
- else
- {
- _driftCount = 0;
- }
- return true;
-}
-
-void
-VCMRttFilter::ShortRttFilter(int64_t* buf, uint32_t length)
-{
- if (length == 0)
- {
- return;
- }
- _maxRtt = 0;
- _avgRtt = 0;
- for (uint32_t i=0; i < length; i++)
- {
- if (buf[i] > _maxRtt)
- {
- _maxRtt = buf[i];
- }
- _avgRtt += buf[i];
- }
- _avgRtt = _avgRtt / static_cast<double>(length);
-}
-
-int64_t
-VCMRttFilter::RttMs() const
-{
- return static_cast<int64_t>(_maxRtt + 0.5);
-}
-
-}
diff --git a/webrtc/modules/video_coding/main/source/rtt_filter.h b/webrtc/modules/video_coding/main/source/rtt_filter.h
deleted file mode 100644
index 9e14a1ab39..0000000000
--- a/webrtc/modules/video_coding/main/source/rtt_filter.h
+++ /dev/null
@@ -1,68 +0,0 @@
-/*
- * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
- *
- * Use of this source code is governed by a BSD-style license
- * that can be found in the LICENSE file in the root of the source
- * tree. An additional intellectual property rights grant can be found
- * in the file PATENTS. All contributing project authors may
- * be found in the AUTHORS file in the root of the source tree.
- */
-
-#ifndef WEBRTC_MODULES_VIDEO_CODING_RTT_FILTER_H_
-#define WEBRTC_MODULES_VIDEO_CODING_RTT_FILTER_H_
-
-#include "webrtc/typedefs.h"
-
-namespace webrtc
-{
-
-class VCMRttFilter
-{
-public:
- VCMRttFilter();
-
- VCMRttFilter& operator=(const VCMRttFilter& rhs);
-
- // Resets the filter.
- void Reset();
- // Updates the filter with a new sample.
- void Update(int64_t rttMs);
- // A getter function for the current RTT level in ms.
- int64_t RttMs() const;
-
-private:
- // The size of the drift and jump memory buffers
- // and thus also the detection threshold for these
- // detectors in number of samples.
- enum { kMaxDriftJumpCount = 5 };
- // Detects RTT jumps by comparing the difference between
- // samples and average to the standard deviation.
- // Returns true if the long time statistics should be updated
- // and false otherwise
- bool JumpDetection(int64_t rttMs);
- // Detects RTT drifts by comparing the difference between
- // max and average to the standard deviation.
- // Returns true if the long time statistics should be updated
- // and false otherwise
- bool DriftDetection(int64_t rttMs);
- // Computes the short time average and maximum of the vector buf.
- void ShortRttFilter(int64_t* buf, uint32_t length);
-
- bool _gotNonZeroUpdate;
- double _avgRtt;
- double _varRtt;
- int64_t _maxRtt;
- uint32_t _filtFactCount;
- const uint32_t _filtFactMax;
- const double _jumpStdDevs;
- const double _driftStdDevs;
- int32_t _jumpCount;
- int32_t _driftCount;
- const int32_t _detectThreshold;
- int64_t _jumpBuf[kMaxDriftJumpCount];
- int64_t _driftBuf[kMaxDriftJumpCount];
-};
-
-} // namespace webrtc
-
-#endif // WEBRTC_MODULES_VIDEO_CODING_RTT_FILTER_H_
diff --git a/webrtc/modules/video_coding/main/test/video_source.h b/webrtc/modules/video_coding/main/test/video_source.h
deleted file mode 100644
index 05deb4a39b..0000000000
--- a/webrtc/modules/video_coding/main/test/video_source.h
+++ /dev/null
@@ -1,82 +0,0 @@
-/*
- * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
- *
- * Use of this source code is governed by a BSD-style license
- * that can be found in the LICENSE file in the root of the source
- * tree. An additional intellectual property rights grant can be found
- * in the file PATENTS. All contributing project authors may
- * be found in the AUTHORS file in the root of the source tree.
- */
-
-#ifndef WEBRTC_MODULES_VIDEO_CODING_TEST_VIDEO_SOURCE_H_
-#define WEBRTC_MODULES_VIDEO_CODING_TEST_VIDEO_SOURCE_H_
-
-#include "webrtc/common_video/libyuv/include/webrtc_libyuv.h"
-#include "webrtc/typedefs.h"
-
-#include <string>
-
-enum VideoSize
- {
- kUndefined,
- kSQCIF, // 128*96 = 12 288
- kQQVGA, // 160*120 = 19 200
- kQCIF, // 176*144 = 25 344
- kCGA, // 320*200 = 64 000
- kQVGA, // 320*240 = 76 800
- kSIF, // 352*240 = 84 480
- kWQVGA, // 400*240 = 96 000
- kCIF, // 352*288 = 101 376
- kW288p, // 512*288 = 147 456 (WCIF)
- k448p, // 576*448 = 281 088
- kVGA, // 640*480 = 307 200
- k432p, // 720*432 = 311 040
- kW432p, // 768*432 = 331 776
- k4SIF, // 704*480 = 337 920
- kW448p, // 768*448 = 344 064
- kNTSC, // 720*480 = 345 600
- kFW448p, // 800*448 = 358 400
- kWVGA, // 800*480 = 384 000
- k4CIF, // 704*576 = 405 504
- kSVGA, // 800*600 = 480 000
- kW544p, // 960*544 = 522 240
- kW576p, // 1024*576 = 589 824 (W4CIF)
- kHD, // 960*720 = 691 200
- kXGA, // 1024*768 = 786 432
- kWHD, // 1280*720 = 921 600
- kFullHD, // 1440*1080 = 1 555 200
- kWFullHD, // 1920*1080 = 2 073 600
-
- kNumberOfVideoSizes
- };
-
-
-class VideoSource
-{
-public:
- VideoSource();
- VideoSource(std::string fileName, VideoSize size, float frameRate, webrtc::VideoType type = webrtc::kI420);
- VideoSource(std::string fileName, uint16_t width, uint16_t height,
- float frameRate = 30, webrtc::VideoType type = webrtc::kI420);
-
- std::string GetFileName() const { return _fileName; }
- uint16_t GetWidth() const { return _width; }
- uint16_t GetHeight() const { return _height; }
- webrtc::VideoType GetType() const { return _type; }
- float GetFrameRate() const { return _frameRate; }
- int GetWidthHeight( VideoSize size);
-
- // Returns the filename with the path (including the leading slash) removed.
- std::string GetName() const;
-
- size_t GetFrameLength() const;
-
-private:
- std::string _fileName;
- uint16_t _width;
- uint16_t _height;
- webrtc::VideoType _type;
- float _frameRate;
-};
-
-#endif // WEBRTC_MODULES_VIDEO_CODING_TEST_VIDEO_SOURCE_H_
diff --git a/webrtc/modules/video_coding/media_opt_util.cc b/webrtc/modules/video_coding/media_opt_util.cc
new file mode 100644
index 0000000000..d57e9c8dd2
--- /dev/null
+++ b/webrtc/modules/video_coding/media_opt_util.cc
@@ -0,0 +1,682 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "webrtc/modules/video_coding/media_opt_util.h"
+
+#include <float.h>
+#include <limits.h>
+#include <math.h>
+
+#include <algorithm>
+
+#include "webrtc/modules/include/module_common_types.h"
+#include "webrtc/modules/video_coding/codecs/vp8/include/vp8_common_types.h"
+#include "webrtc/modules/video_coding/include/video_coding_defines.h"
+#include "webrtc/modules/video_coding/fec_tables_xor.h"
+#include "webrtc/modules/video_coding/nack_fec_tables.h"
+
+namespace webrtc {
+// Max value of loss rates in off-line model
+static const int kPacketLossMax = 129;
+
+namespace media_optimization {
+
+VCMProtectionMethod::VCMProtectionMethod()
+ : _effectivePacketLoss(0),
+ _protectionFactorK(0),
+ _protectionFactorD(0),
+ _scaleProtKey(2.0f),
+ _maxPayloadSize(1460),
+ _qmRobustness(new VCMQmRobustness()),
+ _useUepProtectionK(false),
+ _useUepProtectionD(true),
+ _corrFecCost(1.0),
+ _type(kNone) {}
+
+VCMProtectionMethod::~VCMProtectionMethod() {
+ delete _qmRobustness;
+}
+void VCMProtectionMethod::UpdateContentMetrics(
+ const VideoContentMetrics* contentMetrics) {
+ _qmRobustness->UpdateContent(contentMetrics);
+}
+
+VCMNackFecMethod::VCMNackFecMethod(int64_t lowRttNackThresholdMs,
+ int64_t highRttNackThresholdMs)
+ : VCMFecMethod(),
+ _lowRttNackMs(lowRttNackThresholdMs),
+ _highRttNackMs(highRttNackThresholdMs),
+ _maxFramesFec(1) {
+ assert(lowRttNackThresholdMs >= -1 && highRttNackThresholdMs >= -1);
+ assert(highRttNackThresholdMs == -1 ||
+ lowRttNackThresholdMs <= highRttNackThresholdMs);
+ assert(lowRttNackThresholdMs > -1 || highRttNackThresholdMs == -1);
+ _type = kNackFec;
+}
+
+VCMNackFecMethod::~VCMNackFecMethod() {
+ //
+}
+bool VCMNackFecMethod::ProtectionFactor(
+ const VCMProtectionParameters* parameters) {
+ // Hybrid Nack FEC has three operational modes:
+ // 1. Low RTT (below kLowRttNackMs) - Nack only: Set FEC rate
+ // (_protectionFactorD) to zero. -1 means no FEC.
+ // 2. High RTT (above _highRttNackMs) - FEC Only: Keep FEC factors.
+ // -1 means always allow NACK.
+ // 3. Medium RTT values - Hybrid mode: We will only nack the
+ // residual following the decoding of the FEC (refer to JB logic). FEC
+ // delta protection factor will be adjusted based on the RTT.
+
+ // Otherwise: we count on FEC; if the RTT is below a threshold, then we
+ // nack the residual, based on a decision made in the JB.
+
+ // Compute the protection factors
+ VCMFecMethod::ProtectionFactor(parameters);
+ if (_lowRttNackMs == -1 || parameters->rtt < _lowRttNackMs) {
+ _protectionFactorD = 0;
+ VCMFecMethod::UpdateProtectionFactorD(_protectionFactorD);
+
+ // When in Hybrid mode (RTT range), adjust FEC rates based on the
+ // RTT (NACK effectiveness) - adjustment factor is in the range [0,1].
+ } else if (_highRttNackMs == -1 || parameters->rtt < _highRttNackMs) {
+ // TODO(mikhal): Disabling adjustment temporarily.
+ // uint16_t rttIndex = (uint16_t) parameters->rtt;
+ float adjustRtt = 1.0f; // (float)VCMNackFecTable[rttIndex] / 100.0f;
+
+ // Adjust FEC with NACK on (for delta frame only)
+ // table depends on RTT relative to rttMax (NACK Threshold)
+ _protectionFactorD = static_cast<uint8_t>(
+ adjustRtt * static_cast<float>(_protectionFactorD));
+ // update FEC rates after applying adjustment
+ VCMFecMethod::UpdateProtectionFactorD(_protectionFactorD);
+ }
+
+ return true;
+}
+
+int VCMNackFecMethod::ComputeMaxFramesFec(
+ const VCMProtectionParameters* parameters) {
+ if (parameters->numLayers > 2) {
+ // For more than 2 temporal layers we will only have FEC on the base layer,
+ // and the base layers will be pretty far apart. Therefore we force one
+ // frame FEC.
+ return 1;
+ }
+ // We set the max number of frames to base the FEC on so that on average
+ // we will have complete frames in one RTT. Note that this is an upper
+ // bound, and that the actual number of frames used for FEC is decided by the
+ // RTP module based on the actual number of packets and the protection factor.
+ float base_layer_framerate =
+ parameters->frameRate /
+ static_cast<float>(1 << (parameters->numLayers - 1));
+ int max_frames_fec = std::max(
+ static_cast<int>(2.0f * base_layer_framerate * parameters->rtt / 1000.0f +
+ 0.5f),
+ 1);
+ // |kUpperLimitFramesFec| is the upper limit on how many frames we
+ // allow any FEC to be based on.
+ if (max_frames_fec > kUpperLimitFramesFec) {
+ max_frames_fec = kUpperLimitFramesFec;
+ }
+ return max_frames_fec;
+}
+
+int VCMNackFecMethod::MaxFramesFec() const {
+ return _maxFramesFec;
+}
+
+bool VCMNackFecMethod::BitRateTooLowForFec(
+ const VCMProtectionParameters* parameters) {
+ // Bitrate below which we turn off FEC, regardless of reported packet loss.
+ // The condition should depend on resolution and content. For now, use
+ // threshold on bytes per frame, with some effect for the frame size.
+ // The condition for turning off FEC is also based on other factors,
+ // such as |_numLayers|, |_maxFramesFec|, and |_rtt|.
+ int estimate_bytes_per_frame = 1000 * BitsPerFrame(parameters) / 8;
+ int max_bytes_per_frame = kMaxBytesPerFrameForFec;
+ int num_pixels = parameters->codecWidth * parameters->codecHeight;
+ if (num_pixels <= 352 * 288) {
+ max_bytes_per_frame = kMaxBytesPerFrameForFecLow;
+ } else if (num_pixels > 640 * 480) {
+ max_bytes_per_frame = kMaxBytesPerFrameForFecHigh;
+ }
+ // TODO(marpan): add condition based on maximum frames used for FEC,
+ // and expand condition based on frame size.
+ // Max round trip time threshold in ms.
+ const int64_t kMaxRttTurnOffFec = 200;
+ if (estimate_bytes_per_frame < max_bytes_per_frame &&
+ parameters->numLayers < 3 && parameters->rtt < kMaxRttTurnOffFec) {
+ return true;
+ }
+ return false;
+}
+
+bool VCMNackFecMethod::EffectivePacketLoss(
+ const VCMProtectionParameters* parameters) {
+ // Set the effective packet loss for encoder (based on FEC code).
+ // Compute the effective packet loss and residual packet loss due to FEC.
+ VCMFecMethod::EffectivePacketLoss(parameters);
+ return true;
+}
+
+bool VCMNackFecMethod::UpdateParameters(
+ const VCMProtectionParameters* parameters) {
+ ProtectionFactor(parameters);
+ EffectivePacketLoss(parameters);
+ _maxFramesFec = ComputeMaxFramesFec(parameters);
+ if (BitRateTooLowForFec(parameters)) {
+ _protectionFactorK = 0;
+ _protectionFactorD = 0;
+ }
+
+ // Protection/fec rates obtained above are defined relative to total number
+ // of packets (total rate: source + fec) FEC in RTP module assumes
+ // protection factor is defined relative to source number of packets so we
+ // should convert the factor to reduce mismatch between mediaOpt's rate and
+ // the actual one
+ _protectionFactorK = VCMFecMethod::ConvertFECRate(_protectionFactorK);
+ _protectionFactorD = VCMFecMethod::ConvertFECRate(_protectionFactorD);
+
+ return true;
+}
+
+VCMNackMethod::VCMNackMethod() : VCMProtectionMethod() {
+ _type = kNack;
+}
+
+VCMNackMethod::~VCMNackMethod() {
+ //
+}
+
+bool VCMNackMethod::EffectivePacketLoss(
+ const VCMProtectionParameters* parameter) {
+ // Effective Packet Loss, NA in current version.
+ _effectivePacketLoss = 0;
+ return true;
+}
+
+bool VCMNackMethod::UpdateParameters(
+ const VCMProtectionParameters* parameters) {
+ // Compute the effective packet loss
+ EffectivePacketLoss(parameters);
+
+ // nackCost = (bitRate - nackCost) * (lossPr)
+ return true;
+}
+
+VCMFecMethod::VCMFecMethod() : VCMProtectionMethod() {
+ _type = kFec;
+}
+VCMFecMethod::~VCMFecMethod() {
+ //
+}
+
+uint8_t VCMFecMethod::BoostCodeRateKey(uint8_t packetFrameDelta,
+ uint8_t packetFrameKey) const {
+ uint8_t boostRateKey = 2;
+ // Default: ratio scales the FEC protection up for I frames
+ uint8_t ratio = 1;
+
+ if (packetFrameDelta > 0) {
+ ratio = (int8_t)(packetFrameKey / packetFrameDelta);
+ }
+ ratio = VCM_MAX(boostRateKey, ratio);
+
+ return ratio;
+}
+
+uint8_t VCMFecMethod::ConvertFECRate(uint8_t codeRateRTP) const {
+ return static_cast<uint8_t>(VCM_MIN(
+ 255,
+ (0.5 + 255.0 * codeRateRTP / static_cast<float>(255 - codeRateRTP))));
+}
+
+// Update FEC with protectionFactorD
+void VCMFecMethod::UpdateProtectionFactorD(uint8_t protectionFactorD) {
+ _protectionFactorD = protectionFactorD;
+}
+
+// Update FEC with protectionFactorK
+void VCMFecMethod::UpdateProtectionFactorK(uint8_t protectionFactorK) {
+ _protectionFactorK = protectionFactorK;
+}
+
+bool VCMFecMethod::ProtectionFactor(const VCMProtectionParameters* parameters) {
+ // FEC PROTECTION SETTINGS: varies with packet loss and bitrate
+
+ // No protection if (filtered) packetLoss is 0
+ uint8_t packetLoss = (uint8_t)(255 * parameters->lossPr);
+ if (packetLoss == 0) {
+ _protectionFactorK = 0;
+ _protectionFactorD = 0;
+ return true;
+ }
+
+ // Parameters for FEC setting:
+ // first partition size, thresholds, table pars, spatial resoln fac.
+
+ // First partition protection: ~ 20%
+ uint8_t firstPartitionProt = (uint8_t)(255 * 0.20);
+
+ // Minimum protection level needed to generate one FEC packet for one
+ // source packet/frame (in RTP sender)
+ uint8_t minProtLevelFec = 85;
+
+ // Threshold on packetLoss and bitRrate/frameRate (=average #packets),
+ // above which we allocate protection to cover at least first partition.
+ uint8_t lossThr = 0;
+ uint8_t packetNumThr = 1;
+
+ // Parameters for range of rate index of table.
+ const uint8_t ratePar1 = 5;
+ const uint8_t ratePar2 = 49;
+
+ // Spatial resolution size, relative to a reference size.
+ float spatialSizeToRef =
+ static_cast<float>(parameters->codecWidth * parameters->codecHeight) /
+ (static_cast<float>(704 * 576));
+ // resolnFac: This parameter will generally increase/decrease the FEC rate
+ // (for fixed bitRate and packetLoss) based on system size.
+ // Use a smaller exponent (< 1) to control/soften system size effect.
+ const float resolnFac = 1.0 / powf(spatialSizeToRef, 0.3f);
+
+ const int bitRatePerFrame = BitsPerFrame(parameters);
+
+ // Average number of packets per frame (source and fec):
+ const uint8_t avgTotPackets =
+ 1 + (uint8_t)(static_cast<float>(bitRatePerFrame) * 1000.0 /
+ static_cast<float>(8.0 * _maxPayloadSize) +
+ 0.5);
+
+ // FEC rate parameters: for P and I frame
+ uint8_t codeRateDelta = 0;
+ uint8_t codeRateKey = 0;
+
+ // Get index for table: the FEC protection depends on an effective rate.
+ // The range on the rate index corresponds to rates (bps)
+ // from ~200k to ~8000k, for 30fps
+ const uint16_t effRateFecTable =
+ static_cast<uint16_t>(resolnFac * bitRatePerFrame);
+ uint8_t rateIndexTable = (uint8_t)VCM_MAX(
+ VCM_MIN((effRateFecTable - ratePar1) / ratePar1, ratePar2), 0);
+
+ // Restrict packet loss range to 50:
+ // current tables defined only up to 50%
+ if (packetLoss >= kPacketLossMax) {
+ packetLoss = kPacketLossMax - 1;
+ }
+ uint16_t indexTable = rateIndexTable * kPacketLossMax + packetLoss;
+
+ // Check on table index
+ assert(indexTable < kSizeCodeRateXORTable);
+
+ // Protection factor for P frame
+ codeRateDelta = kCodeRateXORTable[indexTable];
+
+ if (packetLoss > lossThr && avgTotPackets > packetNumThr) {
+ // Set a minimum based on first partition size.
+ if (codeRateDelta < firstPartitionProt) {
+ codeRateDelta = firstPartitionProt;
+ }
+ }
+
+ // Check limit on amount of protection for P frame; 50% is max.
+ if (codeRateDelta >= kPacketLossMax) {
+ codeRateDelta = kPacketLossMax - 1;
+ }
+
+ float adjustFec = 1.0f;
+ // Avoid additional adjustments when layers are active.
+ // TODO(mikhal/marco): Update adjusmtent based on layer info.
+ if (parameters->numLayers == 1) {
+ adjustFec = _qmRobustness->AdjustFecFactor(
+ codeRateDelta, parameters->bitRate, parameters->frameRate,
+ parameters->rtt, packetLoss);
+ }
+
+ codeRateDelta = static_cast<uint8_t>(codeRateDelta * adjustFec);
+
+ // For Key frame:
+ // Effectively at a higher rate, so we scale/boost the rate
+ // The boost factor may depend on several factors: ratio of packet
+ // number of I to P frames, how much protection placed on P frames, etc.
+ const uint8_t packetFrameDelta = (uint8_t)(0.5 + parameters->packetsPerFrame);
+ const uint8_t packetFrameKey =
+ (uint8_t)(0.5 + parameters->packetsPerFrameKey);
+ const uint8_t boostKey = BoostCodeRateKey(packetFrameDelta, packetFrameKey);
+
+ rateIndexTable = (uint8_t)VCM_MAX(
+ VCM_MIN(1 + (boostKey * effRateFecTable - ratePar1) / ratePar1, ratePar2),
+ 0);
+ uint16_t indexTableKey = rateIndexTable * kPacketLossMax + packetLoss;
+
+ indexTableKey = VCM_MIN(indexTableKey, kSizeCodeRateXORTable);
+
+ // Check on table index
+ assert(indexTableKey < kSizeCodeRateXORTable);
+
+ // Protection factor for I frame
+ codeRateKey = kCodeRateXORTable[indexTableKey];
+
+ // Boosting for Key frame.
+ int boostKeyProt = _scaleProtKey * codeRateDelta;
+ if (boostKeyProt >= kPacketLossMax) {
+ boostKeyProt = kPacketLossMax - 1;
+ }
+
+ // Make sure I frame protection is at least larger than P frame protection,
+ // and at least as high as filtered packet loss.
+ codeRateKey = static_cast<uint8_t>(
+ VCM_MAX(packetLoss, VCM_MAX(boostKeyProt, codeRateKey)));
+
+ // Check limit on amount of protection for I frame: 50% is max.
+ if (codeRateKey >= kPacketLossMax) {
+ codeRateKey = kPacketLossMax - 1;
+ }
+
+ _protectionFactorK = codeRateKey;
+ _protectionFactorD = codeRateDelta;
+
+ // Generally there is a rate mis-match between the FEC cost estimated
+ // in mediaOpt and the actual FEC cost sent out in RTP module.
+ // This is more significant at low rates (small # of source packets), where
+ // the granularity of the FEC decreases. In this case, non-zero protection
+ // in mediaOpt may generate 0 FEC packets in RTP sender (since actual #FEC
+ // is based on rounding off protectionFactor on actual source packet number).
+ // The correction factor (_corrFecCost) attempts to corrects this, at least
+ // for cases of low rates (small #packets) and low protection levels.
+
+ float numPacketsFl = 1.0f + (static_cast<float>(bitRatePerFrame) * 1000.0 /
+ static_cast<float>(8.0 * _maxPayloadSize) +
+ 0.5);
+
+ const float estNumFecGen =
+ 0.5f + static_cast<float>(_protectionFactorD * numPacketsFl / 255.0f);
+
+ // We reduce cost factor (which will reduce overhead for FEC and
+ // hybrid method) and not the protectionFactor.
+ _corrFecCost = 1.0f;
+ if (estNumFecGen < 1.1f && _protectionFactorD < minProtLevelFec) {
+ _corrFecCost = 0.5f;
+ }
+ if (estNumFecGen < 0.9f && _protectionFactorD < minProtLevelFec) {
+ _corrFecCost = 0.0f;
+ }
+
+ // TODO(marpan): Set the UEP protection on/off for Key and Delta frames
+ _useUepProtectionK = _qmRobustness->SetUepProtection(
+ codeRateKey, parameters->bitRate, packetLoss, 0);
+
+ _useUepProtectionD = _qmRobustness->SetUepProtection(
+ codeRateDelta, parameters->bitRate, packetLoss, 1);
+
+ // DONE WITH FEC PROTECTION SETTINGS
+ return true;
+}
+
+int VCMFecMethod::BitsPerFrame(const VCMProtectionParameters* parameters) {
+ // When temporal layers are available FEC will only be applied on the base
+ // layer.
+ const float bitRateRatio =
+ kVp8LayerRateAlloction[parameters->numLayers - 1][0];
+ float frameRateRatio = powf(1 / 2.0, parameters->numLayers - 1);
+ float bitRate = parameters->bitRate * bitRateRatio;
+ float frameRate = parameters->frameRate * frameRateRatio;
+
+ // TODO(mikhal): Update factor following testing.
+ float adjustmentFactor = 1;
+
+ // Average bits per frame (units of kbits)
+ return static_cast<int>(adjustmentFactor * bitRate / frameRate);
+}
+
+bool VCMFecMethod::EffectivePacketLoss(
+ const VCMProtectionParameters* parameters) {
+ // Effective packet loss to encoder is based on RPL (residual packet loss)
+ // this is a soft setting based on degree of FEC protection
+ // RPL = received/input packet loss - average_FEC_recovery
+ // note: received/input packet loss may be filtered based on FilteredLoss
+
+ // Effective Packet Loss, NA in current version.
+ _effectivePacketLoss = 0;
+
+ return true;
+}
+
+bool VCMFecMethod::UpdateParameters(const VCMProtectionParameters* parameters) {
+ // Compute the protection factor
+ ProtectionFactor(parameters);
+
+ // Compute the effective packet loss
+ EffectivePacketLoss(parameters);
+
+ // Protection/fec rates obtained above is defined relative to total number
+ // of packets (total rate: source+fec) FEC in RTP module assumes protection
+ // factor is defined relative to source number of packets so we should
+ // convert the factor to reduce mismatch between mediaOpt suggested rate and
+ // the actual rate
+ _protectionFactorK = ConvertFECRate(_protectionFactorK);
+ _protectionFactorD = ConvertFECRate(_protectionFactorD);
+
+ return true;
+}
+VCMLossProtectionLogic::VCMLossProtectionLogic(int64_t nowMs)
+ : _currentParameters(),
+ _rtt(0),
+ _lossPr(0.0f),
+ _bitRate(0.0f),
+ _frameRate(0.0f),
+ _keyFrameSize(0.0f),
+ _fecRateKey(0),
+ _fecRateDelta(0),
+ _lastPrUpdateT(0),
+ _lossPr255(0.9999f),
+ _lossPrHistory(),
+ _shortMaxLossPr255(0),
+ _packetsPerFrame(0.9999f),
+ _packetsPerFrameKey(0.9999f),
+ _codecWidth(0),
+ _codecHeight(0),
+ _numLayers(1) {
+ Reset(nowMs);
+}
+
+VCMLossProtectionLogic::~VCMLossProtectionLogic() {
+ Release();
+}
+
+void VCMLossProtectionLogic::SetMethod(
+ enum VCMProtectionMethodEnum newMethodType) {
+ if (_selectedMethod && _selectedMethod->Type() == newMethodType)
+ return;
+
+ switch (newMethodType) {
+ case kNack:
+ _selectedMethod.reset(new VCMNackMethod());
+ break;
+ case kFec:
+ _selectedMethod.reset(new VCMFecMethod());
+ break;
+ case kNackFec:
+ _selectedMethod.reset(new VCMNackFecMethod(kLowRttNackMs, -1));
+ break;
+ case kNone:
+ _selectedMethod.reset();
+ break;
+ }
+ UpdateMethod();
+}
+
+void VCMLossProtectionLogic::UpdateRtt(int64_t rtt) {
+ _rtt = rtt;
+}
+
+void VCMLossProtectionLogic::UpdateMaxLossHistory(uint8_t lossPr255,
+ int64_t now) {
+ if (_lossPrHistory[0].timeMs >= 0 &&
+ now - _lossPrHistory[0].timeMs < kLossPrShortFilterWinMs) {
+ if (lossPr255 > _shortMaxLossPr255) {
+ _shortMaxLossPr255 = lossPr255;
+ }
+ } else {
+ // Only add a new value to the history once a second
+ if (_lossPrHistory[0].timeMs == -1) {
+ // First, no shift
+ _shortMaxLossPr255 = lossPr255;
+ } else {
+ // Shift
+ for (int32_t i = (kLossPrHistorySize - 2); i >= 0; i--) {
+ _lossPrHistory[i + 1].lossPr255 = _lossPrHistory[i].lossPr255;
+ _lossPrHistory[i + 1].timeMs = _lossPrHistory[i].timeMs;
+ }
+ }
+ if (_shortMaxLossPr255 == 0) {
+ _shortMaxLossPr255 = lossPr255;
+ }
+
+ _lossPrHistory[0].lossPr255 = _shortMaxLossPr255;
+ _lossPrHistory[0].timeMs = now;
+ _shortMaxLossPr255 = 0;
+ }
+}
+
+uint8_t VCMLossProtectionLogic::MaxFilteredLossPr(int64_t nowMs) const {
+ uint8_t maxFound = _shortMaxLossPr255;
+ if (_lossPrHistory[0].timeMs == -1) {
+ return maxFound;
+ }
+ for (int32_t i = 0; i < kLossPrHistorySize; i++) {
+ if (_lossPrHistory[i].timeMs == -1) {
+ break;
+ }
+ if (nowMs - _lossPrHistory[i].timeMs >
+ kLossPrHistorySize * kLossPrShortFilterWinMs) {
+ // This sample (and all samples after this) is too old
+ break;
+ }
+ if (_lossPrHistory[i].lossPr255 > maxFound) {
+ // This sample is the largest one this far into the history
+ maxFound = _lossPrHistory[i].lossPr255;
+ }
+ }
+ return maxFound;
+}
+
+uint8_t VCMLossProtectionLogic::FilteredLoss(int64_t nowMs,
+ FilterPacketLossMode filter_mode,
+ uint8_t lossPr255) {
+ // Update the max window filter.
+ UpdateMaxLossHistory(lossPr255, nowMs);
+
+ // Update the recursive average filter.
+ _lossPr255.Apply(static_cast<float>(nowMs - _lastPrUpdateT),
+ static_cast<float>(lossPr255));
+ _lastPrUpdateT = nowMs;
+
+ // Filtered loss: default is received loss (no filtering).
+ uint8_t filtered_loss = lossPr255;
+
+ switch (filter_mode) {
+ case kNoFilter:
+ break;
+ case kAvgFilter:
+ filtered_loss = static_cast<uint8_t>(_lossPr255.filtered() + 0.5);
+ break;
+ case kMaxFilter:
+ filtered_loss = MaxFilteredLossPr(nowMs);
+ break;
+ }
+
+ return filtered_loss;
+}
+
+void VCMLossProtectionLogic::UpdateFilteredLossPr(uint8_t packetLossEnc) {
+ _lossPr = static_cast<float>(packetLossEnc) / 255.0;
+}
+
+void VCMLossProtectionLogic::UpdateBitRate(float bitRate) {
+ _bitRate = bitRate;
+}
+
+void VCMLossProtectionLogic::UpdatePacketsPerFrame(float nPackets,
+ int64_t nowMs) {
+ _packetsPerFrame.Apply(static_cast<float>(nowMs - _lastPacketPerFrameUpdateT),
+ nPackets);
+ _lastPacketPerFrameUpdateT = nowMs;
+}
+
+void VCMLossProtectionLogic::UpdatePacketsPerFrameKey(float nPackets,
+ int64_t nowMs) {
+ _packetsPerFrameKey.Apply(
+ static_cast<float>(nowMs - _lastPacketPerFrameUpdateTKey), nPackets);
+ _lastPacketPerFrameUpdateTKey = nowMs;
+}
+
+void VCMLossProtectionLogic::UpdateKeyFrameSize(float keyFrameSize) {
+ _keyFrameSize = keyFrameSize;
+}
+
+void VCMLossProtectionLogic::UpdateFrameSize(uint16_t width, uint16_t height) {
+ _codecWidth = width;
+ _codecHeight = height;
+}
+
+void VCMLossProtectionLogic::UpdateNumLayers(int numLayers) {
+ _numLayers = (numLayers == 0) ? 1 : numLayers;
+}
+
+bool VCMLossProtectionLogic::UpdateMethod() {
+ if (!_selectedMethod)
+ return false;
+ _currentParameters.rtt = _rtt;
+ _currentParameters.lossPr = _lossPr;
+ _currentParameters.bitRate = _bitRate;
+ _currentParameters.frameRate = _frameRate; // rename actual frame rate?
+ _currentParameters.keyFrameSize = _keyFrameSize;
+ _currentParameters.fecRateDelta = _fecRateDelta;
+ _currentParameters.fecRateKey = _fecRateKey;
+ _currentParameters.packetsPerFrame = _packetsPerFrame.filtered();
+ _currentParameters.packetsPerFrameKey = _packetsPerFrameKey.filtered();
+ _currentParameters.codecWidth = _codecWidth;
+ _currentParameters.codecHeight = _codecHeight;
+ _currentParameters.numLayers = _numLayers;
+ return _selectedMethod->UpdateParameters(&_currentParameters);
+}
+
+VCMProtectionMethod* VCMLossProtectionLogic::SelectedMethod() const {
+ return _selectedMethod.get();
+}
+
+VCMProtectionMethodEnum VCMLossProtectionLogic::SelectedType() const {
+ return _selectedMethod ? _selectedMethod->Type() : kNone;
+}
+
+void VCMLossProtectionLogic::Reset(int64_t nowMs) {
+ _lastPrUpdateT = nowMs;
+ _lastPacketPerFrameUpdateT = nowMs;
+ _lastPacketPerFrameUpdateTKey = nowMs;
+ _lossPr255.Reset(0.9999f);
+ _packetsPerFrame.Reset(0.9999f);
+ _fecRateDelta = _fecRateKey = 0;
+ for (int32_t i = 0; i < kLossPrHistorySize; i++) {
+ _lossPrHistory[i].lossPr255 = 0;
+ _lossPrHistory[i].timeMs = -1;
+ }
+ _shortMaxLossPr255 = 0;
+ Release();
+}
+
+void VCMLossProtectionLogic::Release() {
+ _selectedMethod.reset();
+}
+
+} // namespace media_optimization
+} // namespace webrtc
diff --git a/webrtc/modules/video_coding/media_opt_util.h b/webrtc/modules/video_coding/media_opt_util.h
new file mode 100644
index 0000000000..a016a03eab
--- /dev/null
+++ b/webrtc/modules/video_coding/media_opt_util.h
@@ -0,0 +1,361 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef WEBRTC_MODULES_VIDEO_CODING_MEDIA_OPT_UTIL_H_
+#define WEBRTC_MODULES_VIDEO_CODING_MEDIA_OPT_UTIL_H_
+
+#include <math.h>
+#include <stdlib.h>
+
+#include "webrtc/base/exp_filter.h"
+#include "webrtc/base/scoped_ptr.h"
+#include "webrtc/modules/video_coding/internal_defines.h"
+#include "webrtc/modules/video_coding/qm_select.h"
+#include "webrtc/system_wrappers/include/trace.h"
+#include "webrtc/typedefs.h"
+
+namespace webrtc {
+namespace media_optimization {
+
+// Number of time periods used for (max) window filter for packet loss
+// TODO(marpan): set reasonable window size for filtered packet loss,
+// adjustment should be based on logged/real data of loss stats/correlation.
+enum { kLossPrHistorySize = 10 };
+
+// 1000 ms, total filter length is (kLossPrHistorySize * 1000) ms
+enum { kLossPrShortFilterWinMs = 1000 };
+
+// The type of filter used on the received packet loss reports.
+enum FilterPacketLossMode {
+ kNoFilter, // No filtering on received loss.
+ kAvgFilter, // Recursive average filter.
+ kMaxFilter // Max-window filter, over the time interval of:
+ // (kLossPrHistorySize * kLossPrShortFilterWinMs) ms.
+};
+
+// Thresholds for hybrid NACK/FEC
+// common to media optimization and the jitter buffer.
+const int64_t kLowRttNackMs = 20;
+
+struct VCMProtectionParameters {
+ VCMProtectionParameters()
+ : rtt(0),
+ lossPr(0.0f),
+ bitRate(0.0f),
+ packetsPerFrame(0.0f),
+ packetsPerFrameKey(0.0f),
+ frameRate(0.0f),
+ keyFrameSize(0.0f),
+ fecRateDelta(0),
+ fecRateKey(0),
+ codecWidth(0),
+ codecHeight(0),
+ numLayers(1) {}
+
+ int64_t rtt;
+ float lossPr;
+ float bitRate;
+ float packetsPerFrame;
+ float packetsPerFrameKey;
+ float frameRate;
+ float keyFrameSize;
+ uint8_t fecRateDelta;
+ uint8_t fecRateKey;
+ uint16_t codecWidth;
+ uint16_t codecHeight;
+ int numLayers;
+};
+
+/******************************/
+/* VCMProtectionMethod class */
+/******************************/
+
+enum VCMProtectionMethodEnum { kNack, kFec, kNackFec, kNone };
+
+class VCMLossProbabilitySample {
+ public:
+ VCMLossProbabilitySample() : lossPr255(0), timeMs(-1) {}
+
+ uint8_t lossPr255;
+ int64_t timeMs;
+};
+
+class VCMProtectionMethod {
+ public:
+ VCMProtectionMethod();
+ virtual ~VCMProtectionMethod();
+
+ // Updates the efficiency of the method using the parameters provided
+ //
+ // Input:
+ // - parameters : Parameters used to calculate efficiency
+ //
+ // Return value : True if this method is recommended in
+ // the given conditions.
+ virtual bool UpdateParameters(const VCMProtectionParameters* parameters) = 0;
+
+ // Returns the protection type
+ //
+ // Return value : The protection type
+ enum VCMProtectionMethodEnum Type() const { return _type; }
+
+ // Returns the effective packet loss for ER, required by this protection
+ // method
+ //
+ // Return value : Required effective packet loss
+ virtual uint8_t RequiredPacketLossER() { return _effectivePacketLoss; }
+
+ // Extracts the FEC protection factor for Key frame, required by this
+ // protection method
+ //
+ // Return value : Required protectionFactor for Key frame
+ virtual uint8_t RequiredProtectionFactorK() { return _protectionFactorK; }
+
+ // Extracts the FEC protection factor for Delta frame, required by this
+ // protection method
+ //
+ // Return value : Required protectionFactor for delta frame
+ virtual uint8_t RequiredProtectionFactorD() { return _protectionFactorD; }
+
+ // Extracts whether the FEC Unequal protection (UEP) is used for Key frame.
+ //
+ // Return value : Required Unequal protection on/off state.
+ virtual bool RequiredUepProtectionK() { return _useUepProtectionK; }
+
+ // Extracts whether the the FEC Unequal protection (UEP) is used for Delta
+ // frame.
+ //
+ // Return value : Required Unequal protection on/off state.
+ virtual bool RequiredUepProtectionD() { return _useUepProtectionD; }
+
+ virtual int MaxFramesFec() const { return 1; }
+
+ // Updates content metrics
+ void UpdateContentMetrics(const VideoContentMetrics* contentMetrics);
+
+ protected:
+ uint8_t _effectivePacketLoss;
+ uint8_t _protectionFactorK;
+ uint8_t _protectionFactorD;
+ // Estimation of residual loss after the FEC
+ float _scaleProtKey;
+ int32_t _maxPayloadSize;
+
+ VCMQmRobustness* _qmRobustness;
+ bool _useUepProtectionK;
+ bool _useUepProtectionD;
+ float _corrFecCost;
+ enum VCMProtectionMethodEnum _type;
+};
+
+class VCMNackMethod : public VCMProtectionMethod {
+ public:
+ VCMNackMethod();
+ virtual ~VCMNackMethod();
+ virtual bool UpdateParameters(const VCMProtectionParameters* parameters);
+ // Get the effective packet loss
+ bool EffectivePacketLoss(const VCMProtectionParameters* parameter);
+};
+
+class VCMFecMethod : public VCMProtectionMethod {
+ public:
+ VCMFecMethod();
+ virtual ~VCMFecMethod();
+ virtual bool UpdateParameters(const VCMProtectionParameters* parameters);
+ // Get the effective packet loss for ER
+ bool EffectivePacketLoss(const VCMProtectionParameters* parameters);
+ // Get the FEC protection factors
+ bool ProtectionFactor(const VCMProtectionParameters* parameters);
+ // Get the boost for key frame protection
+ uint8_t BoostCodeRateKey(uint8_t packetFrameDelta,
+ uint8_t packetFrameKey) const;
+ // Convert the rates: defined relative to total# packets or source# packets
+ uint8_t ConvertFECRate(uint8_t codeRate) const;
+ // Get the average effective recovery from FEC: for random loss model
+ float AvgRecoveryFEC(const VCMProtectionParameters* parameters) const;
+ // Update FEC with protectionFactorD
+ void UpdateProtectionFactorD(uint8_t protectionFactorD);
+ // Update FEC with protectionFactorK
+ void UpdateProtectionFactorK(uint8_t protectionFactorK);
+ // Compute the bits per frame. Account for temporal layers when applicable.
+ int BitsPerFrame(const VCMProtectionParameters* parameters);
+
+ protected:
+ enum { kUpperLimitFramesFec = 6 };
+ // Thresholds values for the bytes/frame and round trip time, below which we
+ // may turn off FEC, depending on |_numLayers| and |_maxFramesFec|.
+ // Max bytes/frame for VGA, corresponds to ~140k at 25fps.
+ enum { kMaxBytesPerFrameForFec = 700 };
+ // Max bytes/frame for CIF and lower: corresponds to ~80k at 25fps.
+ enum { kMaxBytesPerFrameForFecLow = 400 };
+ // Max bytes/frame for frame size larger than VGA, ~200k at 25fps.
+ enum { kMaxBytesPerFrameForFecHigh = 1000 };
+};
+
+class VCMNackFecMethod : public VCMFecMethod {
+ public:
+ VCMNackFecMethod(int64_t lowRttNackThresholdMs,
+ int64_t highRttNackThresholdMs);
+ virtual ~VCMNackFecMethod();
+ virtual bool UpdateParameters(const VCMProtectionParameters* parameters);
+ // Get the effective packet loss for ER
+ bool EffectivePacketLoss(const VCMProtectionParameters* parameters);
+ // Get the protection factors
+ bool ProtectionFactor(const VCMProtectionParameters* parameters);
+ // Get the max number of frames the FEC is allowed to be based on.
+ int MaxFramesFec() const;
+ // Turn off the FEC based on low bitrate and other factors.
+ bool BitRateTooLowForFec(const VCMProtectionParameters* parameters);
+
+ private:
+ int ComputeMaxFramesFec(const VCMProtectionParameters* parameters);
+
+ int64_t _lowRttNackMs;
+ int64_t _highRttNackMs;
+ int _maxFramesFec;
+};
+
+class VCMLossProtectionLogic {
+ public:
+ explicit VCMLossProtectionLogic(int64_t nowMs);
+ ~VCMLossProtectionLogic();
+
+ // Set the protection method to be used
+ //
+ // Input:
+ // - newMethodType : New requested protection method type. If one
+ // is already set, it will be deleted and replaced
+ void SetMethod(VCMProtectionMethodEnum newMethodType);
+
+ // Update the round-trip time
+ //
+ // Input:
+ // - rtt : Round-trip time in seconds.
+ void UpdateRtt(int64_t rtt);
+
+ // Update the filtered packet loss.
+ //
+ // Input:
+ // - packetLossEnc : The reported packet loss filtered
+ // (max window or average)
+ void UpdateFilteredLossPr(uint8_t packetLossEnc);
+
+ // Update the current target bit rate.
+ //
+ // Input:
+ // - bitRate : The current target bit rate in kbits/s
+ void UpdateBitRate(float bitRate);
+
+ // Update the number of packets per frame estimate, for delta frames
+ //
+ // Input:
+ // - nPackets : Number of packets in the latest sent frame.
+ void UpdatePacketsPerFrame(float nPackets, int64_t nowMs);
+
+ // Update the number of packets per frame estimate, for key frames
+ //
+ // Input:
+ // - nPackets : umber of packets in the latest sent frame.
+ void UpdatePacketsPerFrameKey(float nPackets, int64_t nowMs);
+
+ // Update the keyFrameSize estimate
+ //
+ // Input:
+ // - keyFrameSize : The size of the latest sent key frame.
+ void UpdateKeyFrameSize(float keyFrameSize);
+
+ // Update the frame rate
+ //
+ // Input:
+ // - frameRate : The current target frame rate.
+ void UpdateFrameRate(float frameRate) { _frameRate = frameRate; }
+
+ // Update the frame size
+ //
+ // Input:
+ // - width : The codec frame width.
+ // - height : The codec frame height.
+ void UpdateFrameSize(uint16_t width, uint16_t height);
+
+ // Update the number of active layers
+ //
+ // Input:
+ // - numLayers : Number of layers used.
+ void UpdateNumLayers(int numLayers);
+
+ // The amount of packet loss to cover for with FEC.
+ //
+ // Input:
+ // - fecRateKey : Packet loss to cover for with FEC when
+ // sending key frames.
+ // - fecRateDelta : Packet loss to cover for with FEC when
+ // sending delta frames.
+ void UpdateFECRates(uint8_t fecRateKey, uint8_t fecRateDelta) {
+ _fecRateKey = fecRateKey;
+ _fecRateDelta = fecRateDelta;
+ }
+
+ // Update the protection methods with the current VCMProtectionParameters
+ // and set the requested protection settings.
+ // Return value : Returns true on update
+ bool UpdateMethod();
+
+ // Returns the method currently selected.
+ //
+ // Return value : The protection method currently selected.
+ VCMProtectionMethod* SelectedMethod() const;
+
+ // Return the protection type of the currently selected method
+ VCMProtectionMethodEnum SelectedType() const;
+
+ // Updates the filtered loss for the average and max window packet loss,
+ // and returns the filtered loss probability in the interval [0, 255].
+ // The returned filtered loss value depends on the parameter |filter_mode|.
+ // The input parameter |lossPr255| is the received packet loss.
+
+ // Return value : The filtered loss probability
+ uint8_t FilteredLoss(int64_t nowMs,
+ FilterPacketLossMode filter_mode,
+ uint8_t lossPr255);
+
+ void Reset(int64_t nowMs);
+
+ void Release();
+
+ private:
+ // Sets the available loss protection methods.
+ void UpdateMaxLossHistory(uint8_t lossPr255, int64_t now);
+ uint8_t MaxFilteredLossPr(int64_t nowMs) const;
+ rtc::scoped_ptr<VCMProtectionMethod> _selectedMethod;
+ VCMProtectionParameters _currentParameters;
+ int64_t _rtt;
+ float _lossPr;
+ float _bitRate;
+ float _frameRate;
+ float _keyFrameSize;
+ uint8_t _fecRateKey;
+ uint8_t _fecRateDelta;
+ int64_t _lastPrUpdateT;
+ int64_t _lastPacketPerFrameUpdateT;
+ int64_t _lastPacketPerFrameUpdateTKey;
+ rtc::ExpFilter _lossPr255;
+ VCMLossProbabilitySample _lossPrHistory[kLossPrHistorySize];
+ uint8_t _shortMaxLossPr255;
+ rtc::ExpFilter _packetsPerFrame;
+ rtc::ExpFilter _packetsPerFrameKey;
+ uint16_t _codecWidth;
+ uint16_t _codecHeight;
+ int _numLayers;
+};
+
+} // namespace media_optimization
+} // namespace webrtc
+
+#endif // WEBRTC_MODULES_VIDEO_CODING_MEDIA_OPT_UTIL_H_
diff --git a/webrtc/modules/video_coding/main/source/media_optimization.cc b/webrtc/modules/video_coding/media_optimization.cc
index cc73d3803d..a234a06f9b 100644
--- a/webrtc/modules/video_coding/main/source/media_optimization.cc
+++ b/webrtc/modules/video_coding/media_optimization.cc
@@ -8,13 +8,13 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#include "webrtc/modules/video_coding/main/source/media_optimization.h"
+#include "webrtc/modules/video_coding/media_optimization.h"
-#include "webrtc/modules/video_coding/main/source/content_metrics_processing.h"
-#include "webrtc/modules/video_coding/main/source/qm_select.h"
-#include "webrtc/modules/video_coding/utility/include/frame_dropper.h"
+#include "webrtc/base/logging.h"
+#include "webrtc/modules/video_coding/content_metrics_processing.h"
+#include "webrtc/modules/video_coding/qm_select.h"
+#include "webrtc/modules/video_coding/utility/frame_dropper.h"
#include "webrtc/system_wrappers/include/clock.h"
-#include "webrtc/system_wrappers/include/logging.h"
namespace webrtc {
namespace media_optimization {
@@ -53,11 +53,9 @@ void UpdateProtectionCallback(
key_fec_params.fec_mask_type = kFecMaskRandom;
// TODO(Marco): Pass FEC protection values per layer.
- video_protection_callback->ProtectionRequest(&delta_fec_params,
- &key_fec_params,
- video_rate_bps,
- nack_overhead_rate_bps,
- fec_overhead_rate_bps);
+ video_protection_callback->ProtectionRequest(
+ &delta_fec_params, &key_fec_params, video_rate_bps,
+ nack_overhead_rate_bps, fec_overhead_rate_bps);
}
} // namespace
@@ -115,8 +113,8 @@ MediaOptimization::~MediaOptimization(void) {
void MediaOptimization::Reset() {
CriticalSectionScoped lock(crit_sect_.get());
- SetEncodingDataInternal(
- kVideoCodecUnknown, 0, 0, 0, 0, 0, 0, max_payload_size_);
+ SetEncodingDataInternal(kVideoCodecUnknown, 0, 0, 0, 0, 0, 0,
+ max_payload_size_);
memset(incoming_frame_times_, -1, sizeof(incoming_frame_times_));
incoming_frame_rate_ = 0.0;
frame_dropper_->Reset();
@@ -149,14 +147,8 @@ void MediaOptimization::SetEncodingData(VideoCodecType send_codec_type,
int num_layers,
int32_t mtu) {
CriticalSectionScoped lock(crit_sect_.get());
- SetEncodingDataInternal(send_codec_type,
- max_bit_rate,
- frame_rate,
- target_bitrate,
- width,
- height,
- num_layers,
- mtu);
+ SetEncodingDataInternal(send_codec_type, max_bit_rate, frame_rate,
+ target_bitrate, width, height, num_layers, mtu);
}
void MediaOptimization::SetEncodingDataInternal(VideoCodecType send_codec_type,
@@ -190,11 +182,8 @@ void MediaOptimization::SetEncodingDataInternal(VideoCodecType send_codec_type,
codec_height_ = height;
num_layers_ = (num_layers <= 1) ? 1 : num_layers; // Can also be zero.
max_payload_size_ = mtu;
- qm_resolution_->Initialize(target_bitrate_kbps,
- user_frame_rate_,
- codec_width_,
- codec_height_,
- num_layers_);
+ qm_resolution_->Initialize(target_bitrate_kbps, user_frame_rate_,
+ codec_width_, codec_height_, num_layers_);
}
uint32_t MediaOptimization::SetTargetRates(
@@ -256,10 +245,8 @@ uint32_t MediaOptimization::SetTargetRates(
// overhead data actually transmitted (including headers) the last
// second.
if (protection_callback) {
- UpdateProtectionCallback(selected_method,
- &sent_video_rate_bps,
- &sent_nack_rate_bps,
- &sent_fec_rate_bps,
+ UpdateProtectionCallback(selected_method, &sent_video_rate_bps,
+ &sent_nack_rate_bps, &sent_fec_rate_bps,
protection_callback);
}
uint32_t sent_total_rate_bps =
@@ -296,10 +283,8 @@ uint32_t MediaOptimization::SetTargetRates(
if (enable_qm_ && qmsettings_callback) {
// Update QM with rates.
- qm_resolution_->UpdateRates(target_video_bitrate_kbps,
- sent_video_rate_kbps,
- incoming_frame_rate_,
- fraction_lost_);
+ qm_resolution_->UpdateRates(target_video_bitrate_kbps, sent_video_rate_kbps,
+ incoming_frame_rate_, fraction_lost_);
// Check for QM selection.
bool select_qm = CheckStatusForQMchange();
if (select_qm) {
@@ -514,8 +499,7 @@ void MediaOptimization::UpdateSentBitrate(int64_t now_ms) {
}
size_t framesize_sum = 0;
for (FrameSampleList::iterator it = encoded_frame_samples_.begin();
- it != encoded_frame_samples_.end();
- ++it) {
+ it != encoded_frame_samples_.end(); ++it) {
framesize_sum += it->size_bytes;
}
float denom = static_cast<float>(
@@ -565,7 +549,8 @@ bool MediaOptimization::QMUpdate(
}
LOG(LS_INFO) << "Media optimizer requests the video resolution to be changed "
- "to " << qm->codec_width << "x" << qm->codec_height << "@"
+ "to "
+ << qm->codec_width << "x" << qm->codec_height << "@"
<< qm->frame_rate;
// Update VPM with new target frame rate and frame size.
@@ -574,11 +559,11 @@ bool MediaOptimization::QMUpdate(
// will vary/fluctuate, and since we don't want to change the state of the
// VPM frame dropper, unless a temporal action was selected, we use the
// quantity |qm->frame_rate| for updating.
- video_qmsettings_callback->SetVideoQMSettings(
- qm->frame_rate, codec_width_, codec_height_);
+ video_qmsettings_callback->SetVideoQMSettings(qm->frame_rate, codec_width_,
+ codec_height_);
content_->UpdateFrameRate(qm->frame_rate);
- qm_resolution_->UpdateCodecParameters(
- qm->frame_rate, codec_width_, codec_height_);
+ qm_resolution_->UpdateCodecParameters(qm->frame_rate, codec_width_,
+ codec_height_);
return true;
}
diff --git a/webrtc/modules/video_coding/main/source/media_optimization.h b/webrtc/modules/video_coding/media_optimization.h
index c4feeff743..54389bf5b5 100644
--- a/webrtc/modules/video_coding/main/source/media_optimization.h
+++ b/webrtc/modules/video_coding/media_optimization.h
@@ -8,16 +8,16 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#ifndef WEBRTC_MODULES_VIDEO_CODING_MAIN_SOURCE_MEDIA_OPTIMIZATION_H_
-#define WEBRTC_MODULES_VIDEO_CODING_MAIN_SOURCE_MEDIA_OPTIMIZATION_H_
+#ifndef WEBRTC_MODULES_VIDEO_CODING_MEDIA_OPTIMIZATION_H_
+#define WEBRTC_MODULES_VIDEO_CODING_MEDIA_OPTIMIZATION_H_
#include <list>
#include "webrtc/base/scoped_ptr.h"
-#include "webrtc/modules/interface/module_common_types.h"
-#include "webrtc/modules/video_coding/main/interface/video_coding.h"
-#include "webrtc/modules/video_coding/main/source/media_opt_util.h"
-#include "webrtc/modules/video_coding/main/source/qm_select.h"
+#include "webrtc/modules/include/module_common_types.h"
+#include "webrtc/modules/video_coding/include/video_coding.h"
+#include "webrtc/modules/video_coding/media_opt_util.h"
+#include "webrtc/modules/video_coding/qm_select.h"
#include "webrtc/system_wrappers/include/critical_section_wrapper.h"
namespace webrtc {
@@ -85,15 +85,9 @@ class MediaOptimization {
uint32_t SentBitRate();
private:
- enum {
- kFrameCountHistorySize = 90
- };
- enum {
- kFrameHistoryWinMs = 2000
- };
- enum {
- kBitrateAverageWinMs = 1000
- };
+ enum { kFrameCountHistorySize = 90 };
+ enum { kFrameHistoryWinMs = 2000 };
+ enum { kBitrateAverageWinMs = 1000 };
struct EncodedFrameSample;
typedef std::list<EncodedFrameSample> FrameSampleList;
@@ -177,4 +171,4 @@ class MediaOptimization {
} // namespace media_optimization
} // namespace webrtc
-#endif // WEBRTC_MODULES_VIDEO_CODING_MAIN_SOURCE_MEDIA_OPTIMIZATION_H_
+#endif // WEBRTC_MODULES_VIDEO_CODING_MEDIA_OPTIMIZATION_H_
diff --git a/webrtc/modules/video_coding/main/source/media_optimization_unittest.cc b/webrtc/modules/video_coding/media_optimization_unittest.cc
index be528d9932..3f8ac5d075 100644
--- a/webrtc/modules/video_coding/main/source/media_optimization_unittest.cc
+++ b/webrtc/modules/video_coding/media_optimization_unittest.cc
@@ -9,7 +9,7 @@
*/
#include "testing/gtest/include/gtest/gtest.h"
-#include "webrtc/modules/video_coding/main/source/media_optimization.h"
+#include "webrtc/modules/video_coding/media_optimization.h"
#include "webrtc/system_wrappers/include/clock.h"
namespace webrtc {
@@ -51,7 +51,6 @@ class TestMediaOptimization : public ::testing::Test {
uint32_t next_timestamp_;
};
-
TEST_F(TestMediaOptimization, VerifyMuting) {
// Enable video suspension with these limits.
// Suspend the video when the rate is below 50 kbps and resume when it gets
diff --git a/webrtc/modules/video_coding/nack_fec_tables.h b/webrtc/modules/video_coding/nack_fec_tables.h
new file mode 100644
index 0000000000..f9f5ad97ac
--- /dev/null
+++ b/webrtc/modules/video_coding/nack_fec_tables.h
@@ -0,0 +1,31 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef WEBRTC_MODULES_VIDEO_CODING_NACK_FEC_TABLES_H_
+#define WEBRTC_MODULES_VIDEO_CODING_NACK_FEC_TABLES_H_
+
+namespace webrtc {
+
+// Table for adjusting FEC rate for NACK/FEC protection method
+// Table values are built as a sigmoid function, ranging from 0 to 100, based on
+// the HybridNackTH values defined in media_opt_util.h.
+const uint16_t VCMNackFecTable[100] = {
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1,
+ 1, 2, 2, 2, 3, 3, 4, 5, 6, 7, 9, 10, 12, 15, 18,
+ 21, 24, 28, 32, 37, 41, 46, 51, 56, 61, 66, 70, 74, 78, 81,
+ 84, 86, 89, 90, 92, 93, 95, 95, 96, 97, 97, 98, 98, 99, 99,
+ 99, 99, 99, 99, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100,
+ 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100,
+ 100, 100, 100, 100, 100, 100, 100, 100, 100, 100,
+};
+
+} // namespace webrtc
+
+#endif // WEBRTC_MODULES_VIDEO_CODING_NACK_FEC_TABLES_H_
diff --git a/webrtc/modules/video_coding/main/source/packet.cc b/webrtc/modules/video_coding/packet.cc
index fd5a6abb8c..e25de2ed6c 100644
--- a/webrtc/modules/video_coding/main/source/packet.cc
+++ b/webrtc/modules/video_coding/packet.cc
@@ -8,11 +8,12 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#include "webrtc/modules/interface/module_common_types.h"
-#include "webrtc/modules/video_coding/main/source/packet.h"
+#include "webrtc/modules/video_coding/packet.h"
#include <assert.h>
+#include "webrtc/modules/include/module_common_types.h"
+
namespace webrtc {
VCMPacket::VCMPacket()
@@ -34,49 +35,47 @@ VCMPacket::VCMPacket()
VCMPacket::VCMPacket(const uint8_t* ptr,
const size_t size,
- const WebRtcRTPHeader& rtpHeader) :
- payloadType(rtpHeader.header.payloadType),
- timestamp(rtpHeader.header.timestamp),
- ntp_time_ms_(rtpHeader.ntp_time_ms),
- seqNum(rtpHeader.header.sequenceNumber),
- dataPtr(ptr),
- sizeBytes(size),
- markerBit(rtpHeader.header.markerBit),
+ const WebRtcRTPHeader& rtpHeader)
+ : payloadType(rtpHeader.header.payloadType),
+ timestamp(rtpHeader.header.timestamp),
+ ntp_time_ms_(rtpHeader.ntp_time_ms),
+ seqNum(rtpHeader.header.sequenceNumber),
+ dataPtr(ptr),
+ sizeBytes(size),
+ markerBit(rtpHeader.header.markerBit),
- frameType(rtpHeader.frameType),
- codec(kVideoCodecUnknown),
- isFirstPacket(rtpHeader.type.Video.isFirstPacket),
- completeNALU(kNaluComplete),
- insertStartCode(false),
- width(rtpHeader.type.Video.width),
- height(rtpHeader.type.Video.height),
- codecSpecificHeader(rtpHeader.type.Video)
-{
- CopyCodecSpecifics(rtpHeader.type.Video);
+ frameType(rtpHeader.frameType),
+ codec(kVideoCodecUnknown),
+ isFirstPacket(rtpHeader.type.Video.isFirstPacket),
+ completeNALU(kNaluComplete),
+ insertStartCode(false),
+ width(rtpHeader.type.Video.width),
+ height(rtpHeader.type.Video.height),
+ codecSpecificHeader(rtpHeader.type.Video) {
+ CopyCodecSpecifics(rtpHeader.type.Video);
}
VCMPacket::VCMPacket(const uint8_t* ptr,
size_t size,
uint16_t seq,
uint32_t ts,
- bool mBit) :
- payloadType(0),
- timestamp(ts),
- ntp_time_ms_(0),
- seqNum(seq),
- dataPtr(ptr),
- sizeBytes(size),
- markerBit(mBit),
+ bool mBit)
+ : payloadType(0),
+ timestamp(ts),
+ ntp_time_ms_(0),
+ seqNum(seq),
+ dataPtr(ptr),
+ sizeBytes(size),
+ markerBit(mBit),
- frameType(kVideoFrameDelta),
- codec(kVideoCodecUnknown),
- isFirstPacket(false),
- completeNALU(kNaluComplete),
- insertStartCode(false),
- width(0),
- height(0),
- codecSpecificHeader()
-{}
+ frameType(kVideoFrameDelta),
+ codec(kVideoCodecUnknown),
+ isFirstPacket(false),
+ completeNALU(kNaluComplete),
+ insertStartCode(false),
+ width(0),
+ height(0),
+ codecSpecificHeader() {}
void VCMPacket::Reset() {
payloadType = 0;
diff --git a/webrtc/modules/video_coding/packet.h b/webrtc/modules/video_coding/packet.h
new file mode 100644
index 0000000000..b77c1df039
--- /dev/null
+++ b/webrtc/modules/video_coding/packet.h
@@ -0,0 +1,59 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef WEBRTC_MODULES_VIDEO_CODING_PACKET_H_
+#define WEBRTC_MODULES_VIDEO_CODING_PACKET_H_
+
+#include "webrtc/modules/include/module_common_types.h"
+#include "webrtc/modules/video_coding/jitter_buffer_common.h"
+#include "webrtc/typedefs.h"
+
+namespace webrtc {
+
+class VCMPacket {
+ public:
+ VCMPacket();
+ VCMPacket(const uint8_t* ptr,
+ const size_t size,
+ const WebRtcRTPHeader& rtpHeader);
+ VCMPacket(const uint8_t* ptr,
+ size_t size,
+ uint16_t seqNum,
+ uint32_t timestamp,
+ bool markerBit);
+
+ void Reset();
+
+ uint8_t payloadType;
+ uint32_t timestamp;
+ // NTP time of the capture time in local timebase in milliseconds.
+ int64_t ntp_time_ms_;
+ uint16_t seqNum;
+ const uint8_t* dataPtr;
+ size_t sizeBytes;
+ bool markerBit;
+
+ FrameType frameType;
+ VideoCodecType codec;
+
+ bool isFirstPacket; // Is this first packet in a frame.
+ VCMNaluCompleteness completeNALU; // Default is kNaluIncomplete.
+ bool insertStartCode; // True if a start code should be inserted before this
+ // packet.
+ int width;
+ int height;
+ RTPVideoHeader codecSpecificHeader;
+
+ protected:
+ void CopyCodecSpecifics(const RTPVideoHeader& videoHeader);
+};
+
+} // namespace webrtc
+#endif // WEBRTC_MODULES_VIDEO_CODING_PACKET_H_
diff --git a/webrtc/modules/video_coding/main/source/qm_select.cc b/webrtc/modules/video_coding/qm_select.cc
index e86d0755c0..9da42bb33c 100644
--- a/webrtc/modules/video_coding/main/source/qm_select.cc
+++ b/webrtc/modules/video_coding/qm_select.cc
@@ -8,14 +8,14 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#include "webrtc/modules/video_coding/main/source/qm_select.h"
+#include "webrtc/modules/video_coding/qm_select.h"
#include <math.h>
-#include "webrtc/modules/interface/module_common_types.h"
-#include "webrtc/modules/video_coding/main/interface/video_coding_defines.h"
-#include "webrtc/modules/video_coding/main/source/internal_defines.h"
-#include "webrtc/modules/video_coding/main/source/qm_select_data.h"
+#include "webrtc/modules/include/module_common_types.h"
+#include "webrtc/modules/video_coding/include/video_coding_defines.h"
+#include "webrtc/modules/video_coding/internal_defines.h"
+#include "webrtc/modules/video_coding/qm_select_data.h"
#include "webrtc/system_wrappers/include/trace.h"
namespace webrtc {
@@ -36,8 +36,7 @@ VCMQmMethod::VCMQmMethod()
ResetQM();
}
-VCMQmMethod::~VCMQmMethod() {
-}
+VCMQmMethod::~VCMQmMethod() {}
void VCMQmMethod::ResetQM() {
aspect_ratio_ = 1.0f;
@@ -52,7 +51,7 @@ uint8_t VCMQmMethod::ComputeContentClass() {
return content_class_ = 3 * motion_.level + spatial_.level;
}
-void VCMQmMethod::UpdateContent(const VideoContentMetrics* contentMetrics) {
+void VCMQmMethod::UpdateContent(const VideoContentMetrics* contentMetrics) {
content_metrics_ = contentMetrics;
}
@@ -64,7 +63,7 @@ void VCMQmMethod::ComputeMotionNFD() {
if (motion_.value < kLowMotionNfd) {
motion_.level = kLow;
} else if (motion_.value > kHighMotionNfd) {
- motion_.level = kHigh;
+ motion_.level = kHigh;
} else {
motion_.level = kDefault;
}
@@ -75,7 +74,7 @@ void VCMQmMethod::ComputeSpatial() {
float spatial_err_h = 0.0;
float spatial_err_v = 0.0;
if (content_metrics_) {
- spatial_err = content_metrics_->spatial_pred_err;
+ spatial_err = content_metrics_->spatial_pred_err;
spatial_err_h = content_metrics_->spatial_pred_err_h;
spatial_err_v = content_metrics_->spatial_pred_err_v;
}
@@ -94,8 +93,7 @@ void VCMQmMethod::ComputeSpatial() {
}
}
-ImageType VCMQmMethod::GetImageType(uint16_t width,
- uint16_t height) {
+ImageType VCMQmMethod::GetImageType(uint16_t width, uint16_t height) {
// Get the image type for the encoder frame size.
uint32_t image_size = width * height;
if (image_size == kSizeOfImageType[kQCIF]) {
@@ -142,7 +140,7 @@ FrameRateLevelClass VCMQmMethod::FrameRateLevel(float avg_framerate) {
} else if (avg_framerate <= kMiddleFrameRate) {
return kFrameRateMiddle1;
} else if (avg_framerate <= kHighFrameRate) {
- return kFrameRateMiddle2;
+ return kFrameRateMiddle2;
} else {
return kFrameRateHigh;
}
@@ -150,8 +148,7 @@ FrameRateLevelClass VCMQmMethod::FrameRateLevel(float avg_framerate) {
// RESOLUTION CLASS
-VCMQmResolution::VCMQmResolution()
- : qm_(new VCMResolutionScale()) {
+VCMQmResolution::VCMQmResolution() : qm_(new VCMResolutionScale()) {
Reset();
}
@@ -174,7 +171,7 @@ void VCMQmResolution::ResetRates() {
void VCMQmResolution::ResetDownSamplingState() {
state_dec_factor_spatial_ = 1.0;
- state_dec_factor_temporal_ = 1.0;
+ state_dec_factor_temporal_ = 1.0;
for (int i = 0; i < kDownActionHistorySize; i++) {
down_action_history_[i].spatial = kNoChangeSpatial;
down_action_history_[i].temporal = kNoChangeTemporal;
@@ -225,11 +222,12 @@ int VCMQmResolution::Initialize(float bitrate,
buffer_level_ = kInitBufferLevel * target_bitrate_;
// Per-frame bandwidth.
per_frame_bandwidth_ = target_bitrate_ / user_framerate;
- init_ = true;
+ init_ = true;
return VCM_OK;
}
-void VCMQmResolution::UpdateCodecParameters(float frame_rate, uint16_t width,
+void VCMQmResolution::UpdateCodecParameters(float frame_rate,
+ uint16_t width,
uint16_t height) {
width_ = width;
height_ = height;
@@ -283,12 +281,12 @@ void VCMQmResolution::UpdateRates(float target_bitrate,
// Update with the current new target and frame rate:
// these values are ones the encoder will use for the current/next ~1sec.
- target_bitrate_ = target_bitrate;
+ target_bitrate_ = target_bitrate;
incoming_framerate_ = incoming_framerate;
sum_incoming_framerate_ += incoming_framerate_;
// Update the per_frame_bandwidth:
// this is the per_frame_bw for the current/next ~1sec.
- per_frame_bandwidth_ = 0.0f;
+ per_frame_bandwidth_ = 0.0f;
if (incoming_framerate_ > 0.0f) {
per_frame_bandwidth_ = target_bitrate_ / incoming_framerate_;
}
@@ -313,7 +311,7 @@ int VCMQmResolution::SelectResolution(VCMResolutionScale** qm) {
}
if (content_metrics_ == NULL) {
Reset();
- *qm = qm_;
+ *qm = qm_;
return VCM_OK;
}
@@ -376,31 +374,31 @@ void VCMQmResolution::ComputeRatesForSelection() {
avg_rate_mismatch_sgn_ = 0.0f;
avg_packet_loss_ = 0.0f;
if (frame_cnt_ > 0) {
- avg_ratio_buffer_low_ = static_cast<float>(low_buffer_cnt_) /
- static_cast<float>(frame_cnt_);
+ avg_ratio_buffer_low_ =
+ static_cast<float>(low_buffer_cnt_) / static_cast<float>(frame_cnt_);
}
if (update_rate_cnt_ > 0) {
- avg_rate_mismatch_ = static_cast<float>(sum_rate_MM_) /
- static_cast<float>(update_rate_cnt_);
+ avg_rate_mismatch_ =
+ static_cast<float>(sum_rate_MM_) / static_cast<float>(update_rate_cnt_);
avg_rate_mismatch_sgn_ = static_cast<float>(sum_rate_MM_sgn_) /
- static_cast<float>(update_rate_cnt_);
+ static_cast<float>(update_rate_cnt_);
avg_target_rate_ = static_cast<float>(sum_target_rate_) /
- static_cast<float>(update_rate_cnt_);
+ static_cast<float>(update_rate_cnt_);
avg_incoming_framerate_ = static_cast<float>(sum_incoming_framerate_) /
- static_cast<float>(update_rate_cnt_);
- avg_packet_loss_ = static_cast<float>(sum_packet_loss_) /
- static_cast<float>(update_rate_cnt_);
+ static_cast<float>(update_rate_cnt_);
+ avg_packet_loss_ = static_cast<float>(sum_packet_loss_) /
+ static_cast<float>(update_rate_cnt_);
}
// For selection we may want to weight some quantities more heavily
// with the current (i.e., next ~1sec) rate values.
- avg_target_rate_ = kWeightRate * avg_target_rate_ +
- (1.0 - kWeightRate) * target_bitrate_;
+ avg_target_rate_ =
+ kWeightRate * avg_target_rate_ + (1.0 - kWeightRate) * target_bitrate_;
avg_incoming_framerate_ = kWeightRate * avg_incoming_framerate_ +
- (1.0 - kWeightRate) * incoming_framerate_;
+ (1.0 - kWeightRate) * incoming_framerate_;
// Use base layer frame rate for temporal layers: this will favor spatial.
assert(num_layers_ > 0);
- framerate_level_ = FrameRateLevel(
- avg_incoming_framerate_ / static_cast<float>(1 << (num_layers_ - 1)));
+ framerate_level_ = FrameRateLevel(avg_incoming_framerate_ /
+ static_cast<float>(1 << (num_layers_ - 1)));
}
void VCMQmResolution::ComputeEncoderState() {
@@ -412,7 +410,7 @@ void VCMQmResolution::ComputeEncoderState() {
// 2) rate mis-match is high, and consistent over-shooting by encoder.
if ((avg_ratio_buffer_low_ > kMaxBufferLow) ||
((avg_rate_mismatch_ > kMaxRateMisMatch) &&
- (avg_rate_mismatch_sgn_ < -kRateOverShoot))) {
+ (avg_rate_mismatch_sgn_ < -kRateOverShoot))) {
encoder_state_ = kStressedEncoding;
}
// Assign easy state if:
@@ -435,9 +433,9 @@ bool VCMQmResolution::GoingUpResolution() {
// Modify the fac_width/height for this case.
if (down_action_history_[0].spatial == kOneQuarterSpatialUniform) {
fac_width = kFactorWidthSpatial[kOneQuarterSpatialUniform] /
- kFactorWidthSpatial[kOneHalfSpatialUniform];
+ kFactorWidthSpatial[kOneHalfSpatialUniform];
fac_height = kFactorHeightSpatial[kOneQuarterSpatialUniform] /
- kFactorHeightSpatial[kOneHalfSpatialUniform];
+ kFactorHeightSpatial[kOneHalfSpatialUniform];
}
// Check if we should go up both spatially and temporally.
@@ -459,8 +457,8 @@ bool VCMQmResolution::GoingUpResolution() {
kTransRateScaleUpSpatial);
}
if (down_action_history_[0].temporal != kNoChangeTemporal) {
- selected_up_temporal = ConditionForGoingUp(1.0f, 1.0f, fac_temp,
- kTransRateScaleUpTemp);
+ selected_up_temporal =
+ ConditionForGoingUp(1.0f, 1.0f, fac_temp, kTransRateScaleUpTemp);
}
if (selected_up_spatial && !selected_up_temporal) {
action_.spatial = down_action_history_[0].spatial;
@@ -484,13 +482,13 @@ bool VCMQmResolution::ConditionForGoingUp(float fac_width,
float fac_height,
float fac_temp,
float scale_fac) {
- float estimated_transition_rate_up = GetTransitionRate(fac_width, fac_height,
- fac_temp, scale_fac);
+ float estimated_transition_rate_up =
+ GetTransitionRate(fac_width, fac_height, fac_temp, scale_fac);
// Go back up if:
// 1) target rate is above threshold and current encoder state is stable, or
// 2) encoder state is easy (encoder is significantly under-shooting target).
if (((avg_target_rate_ > estimated_transition_rate_up) &&
- (encoder_state_ == kStableEncoding)) ||
+ (encoder_state_ == kStableEncoding)) ||
(encoder_state_ == kEasyEncoding)) {
return true;
} else {
@@ -505,7 +503,7 @@ bool VCMQmResolution::GoingDownResolution() {
// Resolution reduction if:
// (1) target rate is below transition rate, or
// (2) encoder is in stressed state and target rate below a max threshold.
- if ((avg_target_rate_ < estimated_transition_rate_down ) ||
+ if ((avg_target_rate_ < estimated_transition_rate_down) ||
(encoder_state_ == kStressedEncoding && avg_target_rate_ < max_rate)) {
// Get the down-sampling action: based on content class, and how low
// average target rate is relative to transition rate.
@@ -529,9 +527,7 @@ bool VCMQmResolution::GoingDownResolution() {
action_.spatial = kNoChangeSpatial;
break;
}
- default: {
- assert(false);
- }
+ default: { assert(false); }
}
switch (temp_fact) {
case 3: {
@@ -546,9 +542,7 @@ bool VCMQmResolution::GoingDownResolution() {
action_.temporal = kNoChangeTemporal;
break;
}
- default: {
- assert(false);
- }
+ default: { assert(false); }
}
// Only allow for one action (spatial or temporal) at a given time.
assert(action_.temporal == kNoChangeTemporal ||
@@ -572,9 +566,9 @@ float VCMQmResolution::GetTransitionRate(float fac_width,
float fac_height,
float fac_temp,
float scale_fac) {
- ImageType image_type = GetImageType(
- static_cast<uint16_t>(fac_width * width_),
- static_cast<uint16_t>(fac_height * height_));
+ ImageType image_type =
+ GetImageType(static_cast<uint16_t>(fac_width * width_),
+ static_cast<uint16_t>(fac_height * height_));
FrameRateLevelClass framerate_level =
FrameRateLevel(fac_temp * avg_incoming_framerate_);
@@ -589,13 +583,13 @@ float VCMQmResolution::GetTransitionRate(float fac_width,
// Nominal values based on image format (frame size and frame rate).
float max_rate = kFrameRateFac[framerate_level] * kMaxRateQm[image_type];
- uint8_t image_class = image_type > kVGA ? 1: 0;
+ uint8_t image_class = image_type > kVGA ? 1 : 0;
uint8_t table_index = image_class * 9 + content_class_;
// Scale factor for down-sampling transition threshold:
// factor based on the content class and the image size.
float scaleTransRate = kScaleTransRateQm[table_index];
// Threshold bitrate for resolution action.
- return static_cast<float> (scale_fac * scaleTransRate * max_rate);
+ return static_cast<float>(scale_fac * scaleTransRate * max_rate);
}
void VCMQmResolution::UpdateDownsamplingState(UpDownAction up_down) {
@@ -605,9 +599,9 @@ void VCMQmResolution::UpdateDownsamplingState(UpDownAction up_down) {
// If last spatial action was 1/2x1/2, we undo it in two steps, so the
// spatial scale factor in this first step is modified as (4.0/3.0 / 2.0).
if (action_.spatial == kOneQuarterSpatialUniform) {
- qm_->spatial_width_fact =
- 1.0f * kFactorWidthSpatial[kOneHalfSpatialUniform] /
- kFactorWidthSpatial[kOneQuarterSpatialUniform];
+ qm_->spatial_width_fact = 1.0f *
+ kFactorWidthSpatial[kOneHalfSpatialUniform] /
+ kFactorWidthSpatial[kOneQuarterSpatialUniform];
qm_->spatial_height_fact =
1.0f * kFactorHeightSpatial[kOneHalfSpatialUniform] /
kFactorHeightSpatial[kOneQuarterSpatialUniform];
@@ -628,17 +622,18 @@ void VCMQmResolution::UpdateDownsamplingState(UpDownAction up_down) {
}
UpdateCodecResolution();
state_dec_factor_spatial_ = state_dec_factor_spatial_ *
- qm_->spatial_width_fact * qm_->spatial_height_fact;
+ qm_->spatial_width_fact *
+ qm_->spatial_height_fact;
state_dec_factor_temporal_ = state_dec_factor_temporal_ * qm_->temporal_fact;
}
-void VCMQmResolution::UpdateCodecResolution() {
+void VCMQmResolution::UpdateCodecResolution() {
if (action_.spatial != kNoChangeSpatial) {
qm_->change_resolution_spatial = true;
- qm_->codec_width = static_cast<uint16_t>(width_ /
- qm_->spatial_width_fact + 0.5f);
- qm_->codec_height = static_cast<uint16_t>(height_ /
- qm_->spatial_height_fact + 0.5f);
+ qm_->codec_width =
+ static_cast<uint16_t>(width_ / qm_->spatial_width_fact + 0.5f);
+ qm_->codec_height =
+ static_cast<uint16_t>(height_ / qm_->spatial_height_fact + 0.5f);
// Size should not exceed native sizes.
assert(qm_->codec_width <= native_width_);
assert(qm_->codec_height <= native_height_);
@@ -662,8 +657,9 @@ void VCMQmResolution::UpdateCodecResolution() {
}
uint8_t VCMQmResolution::RateClass(float transition_rate) {
- return avg_target_rate_ < (kFacLowRate * transition_rate) ? 0:
- (avg_target_rate_ >= transition_rate ? 2 : 1);
+ return avg_target_rate_ < (kFacLowRate * transition_rate)
+ ? 0
+ : (avg_target_rate_ >= transition_rate ? 2 : 1);
}
// TODO(marpan): Would be better to capture these frame rate adjustments by
@@ -698,15 +694,14 @@ void VCMQmResolution::AdjustAction() {
}
// Never use temporal action if number of temporal layers is above 2.
if (num_layers_ > 2) {
- if (action_.temporal != kNoChangeTemporal) {
+ if (action_.temporal != kNoChangeTemporal) {
action_.spatial = kOneHalfSpatialUniform;
}
action_.temporal = kNoChangeTemporal;
}
// If spatial action was selected, we need to make sure the frame sizes
// are multiples of two. Otherwise switch to 2/3 temporal.
- if (action_.spatial != kNoChangeSpatial &&
- !EvenFrameSize()) {
+ if (action_.spatial != kNoChangeSpatial && !EvenFrameSize()) {
action_.spatial = kNoChangeSpatial;
// Only one action (spatial or temporal) is allowed at a given time, so need
// to check whether temporal action is currently selected.
@@ -722,35 +717,36 @@ void VCMQmResolution::ConvertSpatialFractionalToWhole() {
bool found = false;
int isel = kDownActionHistorySize;
for (int i = 0; i < kDownActionHistorySize; ++i) {
- if (down_action_history_[i].spatial == kOneHalfSpatialUniform) {
+ if (down_action_history_[i].spatial == kOneHalfSpatialUniform) {
isel = i;
found = true;
break;
}
}
if (found) {
- action_.spatial = kOneQuarterSpatialUniform;
- state_dec_factor_spatial_ = state_dec_factor_spatial_ /
- (kFactorWidthSpatial[kOneHalfSpatialUniform] *
- kFactorHeightSpatial[kOneHalfSpatialUniform]);
- // Check if switching to 1/2x1/2 (=1/4) spatial is allowed.
- ConstrainAmountOfDownSampling();
- if (action_.spatial == kNoChangeSpatial) {
- // Not allowed. Go back to 3/4x3/4 spatial.
- action_.spatial = kOneHalfSpatialUniform;
- state_dec_factor_spatial_ = state_dec_factor_spatial_ *
- kFactorWidthSpatial[kOneHalfSpatialUniform] *
- kFactorHeightSpatial[kOneHalfSpatialUniform];
- } else {
- // Switching is allowed. Remove 3/4x3/4 from the history, and update
- // the frame size.
- for (int i = isel; i < kDownActionHistorySize - 1; ++i) {
- down_action_history_[i].spatial =
- down_action_history_[i + 1].spatial;
- }
- width_ = width_ * kFactorWidthSpatial[kOneHalfSpatialUniform];
- height_ = height_ * kFactorHeightSpatial[kOneHalfSpatialUniform];
- }
+ action_.spatial = kOneQuarterSpatialUniform;
+ state_dec_factor_spatial_ =
+ state_dec_factor_spatial_ /
+ (kFactorWidthSpatial[kOneHalfSpatialUniform] *
+ kFactorHeightSpatial[kOneHalfSpatialUniform]);
+ // Check if switching to 1/2x1/2 (=1/4) spatial is allowed.
+ ConstrainAmountOfDownSampling();
+ if (action_.spatial == kNoChangeSpatial) {
+ // Not allowed. Go back to 3/4x3/4 spatial.
+ action_.spatial = kOneHalfSpatialUniform;
+ state_dec_factor_spatial_ =
+ state_dec_factor_spatial_ *
+ kFactorWidthSpatial[kOneHalfSpatialUniform] *
+ kFactorHeightSpatial[kOneHalfSpatialUniform];
+ } else {
+ // Switching is allowed. Remove 3/4x3/4 from the history, and update
+ // the frame size.
+ for (int i = isel; i < kDownActionHistorySize - 1; ++i) {
+ down_action_history_[i].spatial = down_action_history_[i + 1].spatial;
+ }
+ width_ = width_ * kFactorWidthSpatial[kOneHalfSpatialUniform];
+ height_ = height_ * kFactorHeightSpatial[kOneHalfSpatialUniform];
+ }
}
}
}
@@ -815,8 +811,8 @@ void VCMQmResolution::ConstrainAmountOfDownSampling() {
float spatial_width_fact = kFactorWidthSpatial[action_.spatial];
float spatial_height_fact = kFactorHeightSpatial[action_.spatial];
float temporal_fact = kFactorTemporal[action_.temporal];
- float new_dec_factor_spatial = state_dec_factor_spatial_ *
- spatial_width_fact * spatial_height_fact;
+ float new_dec_factor_spatial =
+ state_dec_factor_spatial_ * spatial_width_fact * spatial_height_fact;
float new_dec_factor_temp = state_dec_factor_temporal_ * temporal_fact;
// No spatial sampling if current frame size is too small, or if the
@@ -908,8 +904,7 @@ VCMQmRobustness::VCMQmRobustness() {
Reset();
}
-VCMQmRobustness::~VCMQmRobustness() {
-}
+VCMQmRobustness::~VCMQmRobustness() {}
void VCMQmRobustness::Reset() {
prev_total_rate_ = 0.0f;
@@ -928,7 +923,7 @@ float VCMQmRobustness::AdjustFecFactor(uint8_t code_rate_delta,
int64_t rtt_time,
uint8_t packet_loss) {
// Default: no adjustment
- float adjust_fec = 1.0f;
+ float adjust_fec = 1.0f;
if (content_metrics_ == NULL) {
return adjust_fec;
}
@@ -955,4 +950,4 @@ bool VCMQmRobustness::SetUepProtection(uint8_t code_rate_delta,
// Default.
return false;
}
-} // namespace
+} // namespace webrtc
diff --git a/webrtc/modules/video_coding/main/source/qm_select.h b/webrtc/modules/video_coding/qm_select.h
index 079e7f8879..764b5ed8e3 100644
--- a/webrtc/modules/video_coding/main/source/qm_select.h
+++ b/webrtc/modules/video_coding/qm_select.h
@@ -30,8 +30,7 @@ struct VCMResolutionScale {
spatial_height_fact(1.0f),
temporal_fact(1.0f),
change_resolution_spatial(false),
- change_resolution_temporal(false) {
- }
+ change_resolution_temporal(false) {}
uint16_t codec_width;
uint16_t codec_height;
float frame_rate;
@@ -43,20 +42,20 @@ struct VCMResolutionScale {
};
enum ImageType {
- kQCIF = 0, // 176x144
- kHCIF, // 264x216 = half(~3/4x3/4) CIF.
- kQVGA, // 320x240 = quarter VGA.
- kCIF, // 352x288
- kHVGA, // 480x360 = half(~3/4x3/4) VGA.
- kVGA, // 640x480
- kQFULLHD, // 960x540 = quarter FULLHD, and half(~3/4x3/4) WHD.
- kWHD, // 1280x720
- kFULLHD, // 1920x1080
+ kQCIF = 0, // 176x144
+ kHCIF, // 264x216 = half(~3/4x3/4) CIF.
+ kQVGA, // 320x240 = quarter VGA.
+ kCIF, // 352x288
+ kHVGA, // 480x360 = half(~3/4x3/4) VGA.
+ kVGA, // 640x480
+ kQFULLHD, // 960x540 = quarter FULLHD, and half(~3/4x3/4) WHD.
+ kWHD, // 1280x720
+ kFULLHD, // 1920x1080
kNumImageTypes
};
-const uint32_t kSizeOfImageType[kNumImageTypes] =
-{ 25344, 57024, 76800, 101376, 172800, 307200, 518400, 921600, 2073600 };
+const uint32_t kSizeOfImageType[kNumImageTypes] = {
+ 25344, 57024, 76800, 101376, 172800, 307200, 518400, 921600, 2073600};
enum FrameRateLevelClass {
kFrameRateLow,
@@ -65,17 +64,10 @@ enum FrameRateLevelClass {
kFrameRateHigh
};
-enum ContentLevelClass {
- kLow,
- kHigh,
- kDefault
-};
+enum ContentLevelClass { kLow, kHigh, kDefault };
struct VCMContFeature {
- VCMContFeature()
- : value(0.0f),
- level(kDefault) {
- }
+ VCMContFeature() : value(0.0f), level(kDefault) {}
void Reset() {
value = 0.0f;
level = kDefault;
@@ -84,43 +76,34 @@ struct VCMContFeature {
ContentLevelClass level;
};
-enum UpDownAction {
- kUpResolution,
- kDownResolution
-};
+enum UpDownAction { kUpResolution, kDownResolution };
enum SpatialAction {
kNoChangeSpatial,
- kOneHalfSpatialUniform, // 3/4 x 3/4: 9/6 ~1/2 pixel reduction.
- kOneQuarterSpatialUniform, // 1/2 x 1/2: 1/4 pixel reduction.
+ kOneHalfSpatialUniform, // 3/4 x 3/4: 9/6 ~1/2 pixel reduction.
+ kOneQuarterSpatialUniform, // 1/2 x 1/2: 1/4 pixel reduction.
kNumModesSpatial
};
enum TemporalAction {
kNoChangeTemporal,
- kTwoThirdsTemporal, // 2/3 frame rate reduction
- kOneHalfTemporal, // 1/2 frame rate reduction
+ kTwoThirdsTemporal, // 2/3 frame rate reduction
+ kOneHalfTemporal, // 1/2 frame rate reduction
kNumModesTemporal
};
struct ResolutionAction {
- ResolutionAction()
- : spatial(kNoChangeSpatial),
- temporal(kNoChangeTemporal) {
- }
+ ResolutionAction() : spatial(kNoChangeSpatial), temporal(kNoChangeTemporal) {}
SpatialAction spatial;
TemporalAction temporal;
};
// Down-sampling factors for spatial (width and height), and temporal.
-const float kFactorWidthSpatial[kNumModesSpatial] =
- { 1.0f, 4.0f / 3.0f, 2.0f };
+const float kFactorWidthSpatial[kNumModesSpatial] = {1.0f, 4.0f / 3.0f, 2.0f};
-const float kFactorHeightSpatial[kNumModesSpatial] =
- { 1.0f, 4.0f / 3.0f, 2.0f };
+const float kFactorHeightSpatial[kNumModesSpatial] = {1.0f, 4.0f / 3.0f, 2.0f};
-const float kFactorTemporal[kNumModesTemporal] =
- { 1.0f, 1.5f, 2.0f };
+const float kFactorTemporal[kNumModesTemporal] = {1.0f, 1.5f, 2.0f};
enum EncoderState {
kStableEncoding, // Low rate mis-match, stable buffer levels.
@@ -297,7 +280,7 @@ class VCMQmResolution : public VCMQmMethod {
// Select the directional (1x2 or 2x1) spatial down-sampling action.
void SelectSpatialDirectionMode(float transition_rate);
- enum { kDownActionHistorySize = 10};
+ enum { kDownActionHistorySize = 10 };
VCMResolutionScale* qm_;
// Encoder rate control parameters.
diff --git a/webrtc/modules/video_coding/main/source/qm_select_data.h b/webrtc/modules/video_coding/qm_select_data.h
index dc6bce4811..49190ef53b 100644
--- a/webrtc/modules/video_coding/main/source/qm_select_data.h
+++ b/webrtc/modules/video_coding/qm_select_data.h
@@ -8,8 +8,8 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#ifndef WEBRTC_MODULES_VIDEO_CODING_SOURCE_QM_SELECT_DATA_H_
-#define WEBRTC_MODULES_VIDEO_CODING_SOURCE_QM_SELECT_DATA_H_
+#ifndef WEBRTC_MODULES_VIDEO_CODING_QM_SELECT_DATA_H_
+#define WEBRTC_MODULES_VIDEO_CODING_QM_SELECT_DATA_H_
/***************************************************************
*QMSelectData.h
@@ -69,36 +69,36 @@ const uint16_t kMaxRateQm[9] = {
// Frame rate scale for maximum transition rate.
const float kFrameRateFac[4] = {
- 0.5f, // Low
- 0.7f, // Middle level 1
- 0.85f, // Middle level 2
- 1.0f, // High
+ 0.5f, // Low
+ 0.7f, // Middle level 1
+ 0.85f, // Middle level 2
+ 1.0f, // High
};
// Scale for transitional rate: based on content class
// motion=L/H/D,spatial==L/H/D: for low, high, middle levels
const float kScaleTransRateQm[18] = {
// VGA and lower
- 0.40f, // L, L
- 0.50f, // L, H
- 0.40f, // L, D
- 0.60f, // H ,L
- 0.60f, // H, H
- 0.60f, // H, D
- 0.50f, // D, L
- 0.50f, // D, D
- 0.50f, // D, H
+ 0.40f, // L, L
+ 0.50f, // L, H
+ 0.40f, // L, D
+ 0.60f, // H ,L
+ 0.60f, // H, H
+ 0.60f, // H, D
+ 0.50f, // D, L
+ 0.50f, // D, D
+ 0.50f, // D, H
// over VGA
- 0.40f, // L, L
- 0.50f, // L, H
- 0.40f, // L, D
- 0.60f, // H ,L
- 0.60f, // H, H
- 0.60f, // H, D
- 0.50f, // D, L
- 0.50f, // D, D
- 0.50f, // D, H
+ 0.40f, // L, L
+ 0.50f, // L, H
+ 0.40f, // L, D
+ 0.60f, // H ,L
+ 0.60f, // H, H
+ 0.60f, // H, D
+ 0.50f, // D, L
+ 0.50f, // D, D
+ 0.50f, // D, H
};
// Threshold on the target rate relative to transitional rate.
@@ -108,73 +108,73 @@ const float kFacLowRate = 0.5f;
// motion=L/H/D,spatial==L/H/D, for low, high, middle levels;
// rate = 0/1/2, for target rate state relative to transition rate.
const uint8_t kSpatialAction[27] = {
-// rateClass = 0:
- 1, // L, L
- 1, // L, H
- 1, // L, D
- 4, // H ,L
- 1, // H, H
- 4, // H, D
- 4, // D, L
- 1, // D, H
- 2, // D, D
-
-// rateClass = 1:
- 1, // L, L
- 1, // L, H
- 1, // L, D
- 2, // H ,L
- 1, // H, H
- 2, // H, D
- 2, // D, L
- 1, // D, H
- 2, // D, D
-
-// rateClass = 2:
- 1, // L, L
- 1, // L, H
- 1, // L, D
- 2, // H ,L
- 1, // H, H
- 2, // H, D
- 2, // D, L
- 1, // D, H
- 2, // D, D
+ // rateClass = 0:
+ 1, // L, L
+ 1, // L, H
+ 1, // L, D
+ 4, // H ,L
+ 1, // H, H
+ 4, // H, D
+ 4, // D, L
+ 1, // D, H
+ 2, // D, D
+
+ // rateClass = 1:
+ 1, // L, L
+ 1, // L, H
+ 1, // L, D
+ 2, // H ,L
+ 1, // H, H
+ 2, // H, D
+ 2, // D, L
+ 1, // D, H
+ 2, // D, D
+
+ // rateClass = 2:
+ 1, // L, L
+ 1, // L, H
+ 1, // L, D
+ 2, // H ,L
+ 1, // H, H
+ 2, // H, D
+ 2, // D, L
+ 1, // D, H
+ 2, // D, D
};
const uint8_t kTemporalAction[27] = {
-// rateClass = 0:
- 3, // L, L
- 2, // L, H
- 2, // L, D
- 1, // H ,L
- 3, // H, H
- 1, // H, D
- 1, // D, L
- 2, // D, H
- 1, // D, D
-
-// rateClass = 1:
- 3, // L, L
- 3, // L, H
- 3, // L, D
- 1, // H ,L
- 3, // H, H
- 1, // H, D
- 1, // D, L
- 3, // D, H
- 1, // D, D
-
-// rateClass = 2:
- 1, // L, L
- 3, // L, H
- 3, // L, D
- 1, // H ,L
- 3, // H, H
- 1, // H, D
- 1, // D, L
- 3, // D, H
- 1, // D, D
+ // rateClass = 0:
+ 3, // L, L
+ 2, // L, H
+ 2, // L, D
+ 1, // H ,L
+ 3, // H, H
+ 1, // H, D
+ 1, // D, L
+ 2, // D, H
+ 1, // D, D
+
+ // rateClass = 1:
+ 3, // L, L
+ 3, // L, H
+ 3, // L, D
+ 1, // H ,L
+ 3, // H, H
+ 1, // H, D
+ 1, // D, L
+ 3, // D, H
+ 1, // D, D
+
+ // rateClass = 2:
+ 1, // L, L
+ 3, // L, H
+ 3, // L, D
+ 1, // H ,L
+ 3, // H, H
+ 1, // H, D
+ 1, // D, L
+ 3, // D, H
+ 1, // D, D
};
// Control the total amount of down-sampling allowed.
@@ -224,4 +224,4 @@ const float kSpatialErrVertVsHoriz = 0.1f; // percentage to favor H over V
} // namespace webrtc
-#endif // WEBRTC_MODULES_VIDEO_CODING_SOURCE_QM_SELECT_DATA_H_
+#endif // WEBRTC_MODULES_VIDEO_CODING_QM_SELECT_DATA_H_
diff --git a/webrtc/modules/video_coding/main/source/qm_select_unittest.cc b/webrtc/modules/video_coding/qm_select_unittest.cc
index 6abc0d3099..f8542ec676 100644
--- a/webrtc/modules/video_coding/main/source/qm_select_unittest.cc
+++ b/webrtc/modules/video_coding/qm_select_unittest.cc
@@ -15,8 +15,8 @@
#include "testing/gtest/include/gtest/gtest.h"
-#include "webrtc/modules/interface/module_common_types.h"
-#include "webrtc/modules/video_coding/main/source/qm_select.h"
+#include "webrtc/modules/include/module_common_types.h"
+#include "webrtc/modules/video_coding/qm_select.h"
namespace webrtc {
@@ -32,10 +32,9 @@ const float kTemporalHigh = 0.1f;
class QmSelectTest : public ::testing::Test {
protected:
QmSelectTest()
- : qm_resolution_(new VCMQmResolution()),
- content_metrics_(new VideoContentMetrics()),
- qm_scale_(NULL) {
- }
+ : qm_resolution_(new VCMQmResolution()),
+ content_metrics_(new VideoContentMetrics()),
+ qm_scale_(NULL) {}
VCMQmResolution* qm_resolution_;
VideoContentMetrics* content_metrics_;
VCMResolutionScale* qm_scale_;
@@ -87,8 +86,8 @@ TEST_F(QmSelectTest, HandleInputs) {
qm_resolution_->UpdateContent(content_metrics);
// Content metrics are NULL: Expect success and no down-sampling action.
EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_));
- EXPECT_TRUE(IsSelectedActionCorrect(qm_scale_, 1.0, 1.0, 1.0, 640, 480,
- 30.0f));
+ EXPECT_TRUE(
+ IsSelectedActionCorrect(qm_scale_, 1.0, 1.0, 1.0, 640, 480, 30.0f));
}
// TODO(marpan): Add a test for number of temporal layers > 1.
@@ -118,8 +117,8 @@ TEST_F(QmSelectTest, NoActionHighRate) {
EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_));
EXPECT_EQ(0, qm_resolution_->ComputeContentClass());
EXPECT_EQ(kStableEncoding, qm_resolution_->GetEncoderState());
- EXPECT_TRUE(IsSelectedActionCorrect(qm_scale_, 1.0f, 1.0f, 1.0f, 640, 480,
- 30.0f));
+ EXPECT_TRUE(
+ IsSelectedActionCorrect(qm_scale_, 1.0f, 1.0f, 1.0f, 640, 480, 30.0f));
}
// Rate is well below transition, down-sampling action is taken,
@@ -149,40 +148,40 @@ TEST_F(QmSelectTest, DownActionLowRate) {
EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_));
EXPECT_EQ(3, qm_resolution_->ComputeContentClass());
EXPECT_EQ(kStableEncoding, qm_resolution_->GetEncoderState());
- EXPECT_TRUE(IsSelectedActionCorrect(qm_scale_, 2.0f, 2.0f, 1.0f, 320, 240,
- 30.0f));
+ EXPECT_TRUE(
+ IsSelectedActionCorrect(qm_scale_, 2.0f, 2.0f, 1.0f, 320, 240, 30.0f));
qm_resolution_->ResetDownSamplingState();
// Low motion, low spatial: 2/3 temporal is expected.
UpdateQmContentData(kTemporalLow, kSpatialLow, kSpatialLow, kSpatialLow);
EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_));
EXPECT_EQ(0, qm_resolution_->ComputeContentClass());
- EXPECT_TRUE(IsSelectedActionCorrect(qm_scale_, 1.0f, 1.0f, 1.5f, 640, 480,
- 20.5f));
+ EXPECT_TRUE(
+ IsSelectedActionCorrect(qm_scale_, 1.0f, 1.0f, 1.5f, 640, 480, 20.5f));
qm_resolution_->ResetDownSamplingState();
// Medium motion, low spatial: 2x2 spatial expected.
UpdateQmContentData(kTemporalMedium, kSpatialLow, kSpatialLow, kSpatialLow);
EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_));
EXPECT_EQ(6, qm_resolution_->ComputeContentClass());
- EXPECT_TRUE(IsSelectedActionCorrect(qm_scale_, 2.0f, 2.0f, 1.0f, 320, 240,
- 30.0f));
+ EXPECT_TRUE(
+ IsSelectedActionCorrect(qm_scale_, 2.0f, 2.0f, 1.0f, 320, 240, 30.0f));
qm_resolution_->ResetDownSamplingState();
// High motion, high spatial: 2/3 temporal expected.
UpdateQmContentData(kTemporalHigh, kSpatialHigh, kSpatialHigh, kSpatialHigh);
EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_));
EXPECT_EQ(4, qm_resolution_->ComputeContentClass());
- EXPECT_TRUE(IsSelectedActionCorrect(qm_scale_, 1.0f, 1.0f, 1.5f, 640, 480,
- 20.5f));
+ EXPECT_TRUE(
+ IsSelectedActionCorrect(qm_scale_, 1.0f, 1.0f, 1.5f, 640, 480, 20.5f));
qm_resolution_->ResetDownSamplingState();
// Low motion, high spatial: 1/2 temporal expected.
UpdateQmContentData(kTemporalLow, kSpatialHigh, kSpatialHigh, kSpatialHigh);
EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_));
EXPECT_EQ(1, qm_resolution_->ComputeContentClass());
- EXPECT_TRUE(IsSelectedActionCorrect(qm_scale_, 1.0f, 1.0f, 2.0f, 640, 480,
- 15.5f));
+ EXPECT_TRUE(
+ IsSelectedActionCorrect(qm_scale_, 1.0f, 1.0f, 2.0f, 640, 480, 15.5f));
qm_resolution_->ResetDownSamplingState();
// Medium motion, high spatial: 1/2 temporal expected.
@@ -190,8 +189,8 @@ TEST_F(QmSelectTest, DownActionLowRate) {
kSpatialHigh);
EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_));
EXPECT_EQ(7, qm_resolution_->ComputeContentClass());
- EXPECT_TRUE(IsSelectedActionCorrect(qm_scale_, 1.0f, 1.0f, 2.0f, 640, 480,
- 15.5f));
+ EXPECT_TRUE(
+ IsSelectedActionCorrect(qm_scale_, 1.0f, 1.0f, 2.0f, 640, 480, 15.5f));
qm_resolution_->ResetDownSamplingState();
// High motion, medium spatial: 2x2 spatial expected.
@@ -200,8 +199,8 @@ TEST_F(QmSelectTest, DownActionLowRate) {
EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_));
EXPECT_EQ(5, qm_resolution_->ComputeContentClass());
// Target frame rate for frame dropper should be the same as previous == 15.
- EXPECT_TRUE(IsSelectedActionCorrect(qm_scale_, 2.0f, 2.0f, 1.0f, 320, 240,
- 30.0f));
+ EXPECT_TRUE(
+ IsSelectedActionCorrect(qm_scale_, 2.0f, 2.0f, 1.0f, 320, 240, 30.0f));
qm_resolution_->ResetDownSamplingState();
// Low motion, medium spatial: high frame rate, so 1/2 temporal expected.
@@ -209,8 +208,8 @@ TEST_F(QmSelectTest, DownActionLowRate) {
kSpatialMedium);
EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_));
EXPECT_EQ(2, qm_resolution_->ComputeContentClass());
- EXPECT_TRUE(IsSelectedActionCorrect(qm_scale_, 1.0f, 1.0f, 2.0f, 640, 480,
- 15.5f));
+ EXPECT_TRUE(
+ IsSelectedActionCorrect(qm_scale_, 1.0f, 1.0f, 2.0f, 640, 480, 15.5f));
qm_resolution_->ResetDownSamplingState();
// Medium motion, medium spatial: high frame rate, so 2/3 temporal expected.
@@ -218,8 +217,8 @@ TEST_F(QmSelectTest, DownActionLowRate) {
kSpatialMedium);
EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_));
EXPECT_EQ(8, qm_resolution_->ComputeContentClass());
- EXPECT_TRUE(IsSelectedActionCorrect(qm_scale_, 1.0f, 1.0f, 1.5f, 640, 480,
- 20.5f));
+ EXPECT_TRUE(
+ IsSelectedActionCorrect(qm_scale_, 1.0f, 1.0f, 1.5f, 640, 480, 20.5f));
}
// Rate mis-match is high, and we have over-shooting.
@@ -249,16 +248,16 @@ TEST_F(QmSelectTest, DownActionHighRateMMOvershoot) {
EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_));
EXPECT_EQ(3, qm_resolution_->ComputeContentClass());
EXPECT_EQ(kStressedEncoding, qm_resolution_->GetEncoderState());
- EXPECT_TRUE(IsSelectedActionCorrect(qm_scale_, 4.0f / 3.0f, 4.0f / 3.0f,
- 1.0f, 480, 360, 30.0f));
+ EXPECT_TRUE(IsSelectedActionCorrect(qm_scale_, 4.0f / 3.0f, 4.0f / 3.0f, 1.0f,
+ 480, 360, 30.0f));
qm_resolution_->ResetDownSamplingState();
// Low motion, high spatial
UpdateQmContentData(kTemporalLow, kSpatialHigh, kSpatialHigh, kSpatialHigh);
EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_));
EXPECT_EQ(1, qm_resolution_->ComputeContentClass());
- EXPECT_TRUE(IsSelectedActionCorrect(qm_scale_, 1.0f, 1.0f, 1.5f, 640, 480,
- 20.5f));
+ EXPECT_TRUE(
+ IsSelectedActionCorrect(qm_scale_, 1.0f, 1.0f, 1.5f, 640, 480, 20.5f));
}
// Rate mis-match is high, target rate is below max for down-sampling,
@@ -288,16 +287,16 @@ TEST_F(QmSelectTest, NoActionHighRateMMUndershoot) {
EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_));
EXPECT_EQ(3, qm_resolution_->ComputeContentClass());
EXPECT_EQ(kEasyEncoding, qm_resolution_->GetEncoderState());
- EXPECT_TRUE(IsSelectedActionCorrect(qm_scale_, 1.0f, 1.0f, 1.0f, 640, 480,
- 30.0f));
+ EXPECT_TRUE(
+ IsSelectedActionCorrect(qm_scale_, 1.0f, 1.0f, 1.0f, 640, 480, 30.0f));
qm_resolution_->ResetDownSamplingState();
// Low motion, high spatial
UpdateQmContentData(kTemporalLow, kSpatialHigh, kSpatialHigh, kSpatialHigh);
EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_));
EXPECT_EQ(1, qm_resolution_->ComputeContentClass());
- EXPECT_TRUE(IsSelectedActionCorrect(qm_scale_, 1.0f, 1.0f, 1.0f, 640, 480,
- 30.0f));
+ EXPECT_TRUE(
+ IsSelectedActionCorrect(qm_scale_, 1.0f, 1.0f, 1.0f, 640, 480, 30.0f));
}
// Buffer is underflowing, and target rate is below max for down-sampling,
@@ -332,16 +331,16 @@ TEST_F(QmSelectTest, DownActionBufferUnderflow) {
EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_));
EXPECT_EQ(3, qm_resolution_->ComputeContentClass());
EXPECT_EQ(kStressedEncoding, qm_resolution_->GetEncoderState());
- EXPECT_TRUE(IsSelectedActionCorrect(qm_scale_, 4.0f / 3.0f, 4.0f / 3.0f,
- 1.0f, 480, 360, 30.0f));
+ EXPECT_TRUE(IsSelectedActionCorrect(qm_scale_, 4.0f / 3.0f, 4.0f / 3.0f, 1.0f,
+ 480, 360, 30.0f));
qm_resolution_->ResetDownSamplingState();
// Low motion, high spatial
UpdateQmContentData(kTemporalLow, kSpatialHigh, kSpatialHigh, kSpatialHigh);
EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_));
EXPECT_EQ(1, qm_resolution_->ComputeContentClass());
- EXPECT_TRUE(IsSelectedActionCorrect(qm_scale_, 1.0f, 1.0f, 1.5f, 640, 480,
- 20.5f));
+ EXPECT_TRUE(
+ IsSelectedActionCorrect(qm_scale_, 1.0f, 1.0f, 1.5f, 640, 480, 20.5f));
}
// Target rate is below max for down-sampling, but buffer level is stable,
@@ -376,16 +375,16 @@ TEST_F(QmSelectTest, NoActionBufferStable) {
EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_));
EXPECT_EQ(3, qm_resolution_->ComputeContentClass());
EXPECT_EQ(kStableEncoding, qm_resolution_->GetEncoderState());
- EXPECT_TRUE(IsSelectedActionCorrect(qm_scale_, 1.0f, 1.0f, 1.0f, 640, 480,
- 30.0f));
+ EXPECT_TRUE(
+ IsSelectedActionCorrect(qm_scale_, 1.0f, 1.0f, 1.0f, 640, 480, 30.0f));
qm_resolution_->ResetDownSamplingState();
// Low motion, high spatial
UpdateQmContentData(kTemporalLow, kSpatialHigh, kSpatialHigh, kSpatialHigh);
EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_));
EXPECT_EQ(1, qm_resolution_->ComputeContentClass());
- EXPECT_TRUE(IsSelectedActionCorrect(qm_scale_, 1.0f, 1.0f, 1.0f, 640, 480,
- 30.0f));
+ EXPECT_TRUE(
+ IsSelectedActionCorrect(qm_scale_, 1.0f, 1.0f, 1.0f, 640, 480, 30.0f));
}
// Very low rate, but no spatial down-sampling below some size (QCIF).
@@ -414,8 +413,8 @@ TEST_F(QmSelectTest, LimitDownSpatialAction) {
EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_));
EXPECT_EQ(3, qm_resolution_->ComputeContentClass());
EXPECT_EQ(kStableEncoding, qm_resolution_->GetEncoderState());
- EXPECT_TRUE(IsSelectedActionCorrect(qm_scale_, 1.0f, 1.0f, 1.0f, 176, 144,
- 30.0f));
+ EXPECT_TRUE(
+ IsSelectedActionCorrect(qm_scale_, 1.0f, 1.0f, 1.0f, 176, 144, 30.0f));
}
// Very low rate, but no frame reduction below some frame_rate (8fps).
@@ -445,8 +444,8 @@ TEST_F(QmSelectTest, LimitDownTemporalAction) {
EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_));
EXPECT_EQ(2, qm_resolution_->ComputeContentClass());
EXPECT_EQ(kStableEncoding, qm_resolution_->GetEncoderState());
- EXPECT_TRUE(IsSelectedActionCorrect(qm_scale_, 1.0f, 1.0f, 1.0f, 640, 480,
- 8.0f));
+ EXPECT_TRUE(
+ IsSelectedActionCorrect(qm_scale_, 1.0f, 1.0f, 1.0f, 640, 480, 8.0f));
}
// Two stages: spatial down-sample and then back up spatially,
@@ -468,7 +467,7 @@ TEST_F(QmSelectTest, 2StageDownSpatialUpSpatial) {
int incoming_frame_rate[] = {30, 30, 30};
uint8_t fraction_lost[] = {10, 10, 10};
UpdateQmRateData(target_rate, encoder_sent_rate, incoming_frame_rate,
- fraction_lost, 3);
+ fraction_lost, 3);
// Update content: motion level, and 3 spatial prediction errors.
// High motion, low spatial.
@@ -476,8 +475,8 @@ TEST_F(QmSelectTest, 2StageDownSpatialUpSpatial) {
EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_));
EXPECT_EQ(3, qm_resolution_->ComputeContentClass());
EXPECT_EQ(kStableEncoding, qm_resolution_->GetEncoderState());
- EXPECT_TRUE(IsSelectedActionCorrect(qm_scale_, 2.0f, 2.0f, 1.0f, 320, 240,
- 30.0f));
+ EXPECT_TRUE(
+ IsSelectedActionCorrect(qm_scale_, 2.0f, 2.0f, 1.0f, 320, 240, 30.0f));
// Reset and go up in rate: expected to go back up, in 2 stages of 3/4.
qm_resolution_->ResetRates();
@@ -493,8 +492,8 @@ TEST_F(QmSelectTest, 2StageDownSpatialUpSpatial) {
EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_));
EXPECT_EQ(kStableEncoding, qm_resolution_->GetEncoderState());
float scale = (4.0f / 3.0f) / 2.0f;
- EXPECT_TRUE(IsSelectedActionCorrect(qm_scale_, scale, scale, 1.0f, 480, 360,
- 30.0f));
+ EXPECT_TRUE(
+ IsSelectedActionCorrect(qm_scale_, scale, scale, 1.0f, 480, 360, 30.0f));
qm_resolution_->UpdateCodecParameters(30.0f, 480, 360);
EXPECT_EQ(4, qm_resolution_->GetImageType(480, 360));
@@ -522,7 +521,7 @@ TEST_F(QmSelectTest, 2StageDownSpatialUpSpatialUndershoot) {
int incoming_frame_rate[] = {30, 30, 30};
uint8_t fraction_lost[] = {10, 10, 10};
UpdateQmRateData(target_rate, encoder_sent_rate, incoming_frame_rate,
- fraction_lost, 3);
+ fraction_lost, 3);
// Update content: motion level, and 3 spatial prediction errors.
// High motion, low spatial.
@@ -530,8 +529,8 @@ TEST_F(QmSelectTest, 2StageDownSpatialUpSpatialUndershoot) {
EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_));
EXPECT_EQ(3, qm_resolution_->ComputeContentClass());
EXPECT_EQ(kStableEncoding, qm_resolution_->GetEncoderState());
- EXPECT_TRUE(IsSelectedActionCorrect(qm_scale_, 2.0f, 2.0f, 1.0f, 320, 240,
- 30.0f));
+ EXPECT_TRUE(
+ IsSelectedActionCorrect(qm_scale_, 2.0f, 2.0f, 1.0f, 320, 240, 30.0f));
// Reset rates and simulate under-shooting scenario.: expect to go back up.
// Goes up spatially in two stages for 1/2x1/2 down-sampling.
@@ -548,8 +547,8 @@ TEST_F(QmSelectTest, 2StageDownSpatialUpSpatialUndershoot) {
EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_));
EXPECT_EQ(kEasyEncoding, qm_resolution_->GetEncoderState());
float scale = (4.0f / 3.0f) / 2.0f;
- EXPECT_TRUE(IsSelectedActionCorrect(qm_scale_, scale, scale, 1.0f, 480, 360,
- 30.0f));
+ EXPECT_TRUE(
+ IsSelectedActionCorrect(qm_scale_, scale, scale, 1.0f, 480, 360, 30.0f));
qm_resolution_->UpdateCodecParameters(30.0f, 480, 360);
EXPECT_EQ(4, qm_resolution_->GetImageType(480, 360));
@@ -577,7 +576,7 @@ TEST_F(QmSelectTest, 2StageDownSpatialNoActionUp) {
int incoming_frame_rate[] = {30, 30, 30};
uint8_t fraction_lost[] = {10, 10, 10};
UpdateQmRateData(target_rate, encoder_sent_rate, incoming_frame_rate,
- fraction_lost, 3);
+ fraction_lost, 3);
// Update content: motion level, and 3 spatial prediction errors.
// High motion, low spatial.
@@ -585,8 +584,8 @@ TEST_F(QmSelectTest, 2StageDownSpatialNoActionUp) {
EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_));
EXPECT_EQ(3, qm_resolution_->ComputeContentClass());
EXPECT_EQ(kStableEncoding, qm_resolution_->GetEncoderState());
- EXPECT_TRUE(IsSelectedActionCorrect(qm_scale_, 2.0f, 2.0f, 1.0f, 320, 240,
- 30.0f));
+ EXPECT_TRUE(
+ IsSelectedActionCorrect(qm_scale_, 2.0f, 2.0f, 1.0f, 320, 240, 30.0f));
// Reset and simulate large rate mis-match: expect no action to go back up.
qm_resolution_->ResetRates();
@@ -601,8 +600,8 @@ TEST_F(QmSelectTest, 2StageDownSpatialNoActionUp) {
fraction_lost2, 5);
EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_));
EXPECT_EQ(kStressedEncoding, qm_resolution_->GetEncoderState());
- EXPECT_TRUE(IsSelectedActionCorrect(qm_scale_, 1.0f, 1.0f, 1.0f, 320, 240,
- 30.0f));
+ EXPECT_TRUE(
+ IsSelectedActionCorrect(qm_scale_, 1.0f, 1.0f, 1.0f, 320, 240, 30.0f));
}
// Two stages: temporally down-sample and then back up temporally,
@@ -632,8 +631,8 @@ TEST_F(QmSelectTest, 2StatgeDownTemporalUpTemporal) {
EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_));
EXPECT_EQ(1, qm_resolution_->ComputeContentClass());
EXPECT_EQ(kStableEncoding, qm_resolution_->GetEncoderState());
- EXPECT_TRUE(IsSelectedActionCorrect(qm_scale_, 1.0f, 1.0f, 2.0f, 640, 480,
- 15.5f));
+ EXPECT_TRUE(
+ IsSelectedActionCorrect(qm_scale_, 1.0f, 1.0f, 2.0f, 640, 480, 15.5f));
// Reset rates and go up in rate: expect to go back up.
qm_resolution_->ResetRates();
@@ -646,8 +645,8 @@ TEST_F(QmSelectTest, 2StatgeDownTemporalUpTemporal) {
fraction_lost2, 5);
EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_));
EXPECT_EQ(kStableEncoding, qm_resolution_->GetEncoderState());
- EXPECT_TRUE(IsSelectedActionCorrect(qm_scale_, 1.0f, 1.0f, 0.5f, 640, 480,
- 30.0f));
+ EXPECT_TRUE(
+ IsSelectedActionCorrect(qm_scale_, 1.0f, 1.0f, 0.5f, 640, 480, 30.0f));
}
// Two stages: temporal down-sample and then back up temporally, since encoder
@@ -669,7 +668,7 @@ TEST_F(QmSelectTest, 2StatgeDownTemporalUpTemporalUndershoot) {
int incoming_frame_rate[] = {30, 30, 30};
uint8_t fraction_lost[] = {10, 10, 10};
UpdateQmRateData(target_rate, encoder_sent_rate, incoming_frame_rate,
- fraction_lost, 3);
+ fraction_lost, 3);
// Update content: motion level, and 3 spatial prediction errors.
// Low motion, high spatial.
@@ -677,8 +676,8 @@ TEST_F(QmSelectTest, 2StatgeDownTemporalUpTemporalUndershoot) {
EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_));
EXPECT_EQ(1, qm_resolution_->ComputeContentClass());
EXPECT_EQ(kStableEncoding, qm_resolution_->GetEncoderState());
- EXPECT_TRUE(IsSelectedActionCorrect(qm_scale_, 1.0f, 1.0f, 2.0f, 640, 480,
- 15.5f));
+ EXPECT_TRUE(
+ IsSelectedActionCorrect(qm_scale_, 1.0f, 1.0f, 2.0f, 640, 480, 15.5f));
// Reset rates and simulate under-shooting scenario.: expect to go back up.
qm_resolution_->ResetRates();
@@ -691,8 +690,8 @@ TEST_F(QmSelectTest, 2StatgeDownTemporalUpTemporalUndershoot) {
fraction_lost2, 5);
EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_));
EXPECT_EQ(kEasyEncoding, qm_resolution_->GetEncoderState());
- EXPECT_TRUE(IsSelectedActionCorrect(qm_scale_, 1.0f, 1.0f, 0.5f, 640, 480,
- 30.0f));
+ EXPECT_TRUE(
+ IsSelectedActionCorrect(qm_scale_, 1.0f, 1.0f, 0.5f, 640, 480, 30.0f));
}
// Two stages: temporal down-sample and then no action to go up,
@@ -736,8 +735,8 @@ TEST_F(QmSelectTest, 2StageDownTemporalNoActionUp) {
fraction_lost2, 5);
EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_));
EXPECT_EQ(kStressedEncoding, qm_resolution_->GetEncoderState());
- EXPECT_TRUE(IsSelectedActionCorrect(qm_scale_, 1.0f, 1.0f, 1.0f, 640, 480,
- 15.0f));
+ EXPECT_TRUE(
+ IsSelectedActionCorrect(qm_scale_, 1.0f, 1.0f, 1.0f, 640, 480, 15.0f));
}
// 3 stages: spatial down-sample, followed by temporal down-sample,
// and then go up to full state, as encoding rate has increased.
@@ -766,8 +765,8 @@ TEST_F(QmSelectTest, 3StageDownSpatialTemporlaUpSpatialTemporal) {
EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_));
EXPECT_EQ(3, qm_resolution_->ComputeContentClass());
EXPECT_EQ(kStableEncoding, qm_resolution_->GetEncoderState());
- EXPECT_TRUE(IsSelectedActionCorrect(qm_scale_, 2.0f, 2.0f, 1.0f, 320, 240,
- 30.0f));
+ EXPECT_TRUE(
+ IsSelectedActionCorrect(qm_scale_, 2.0f, 2.0f, 1.0f, 320, 240, 30.0f));
// Change content data: expect temporal down-sample.
qm_resolution_->UpdateCodecParameters(30.0f, 320, 240);
@@ -780,7 +779,7 @@ TEST_F(QmSelectTest, 3StageDownSpatialTemporlaUpSpatialTemporal) {
int incoming_frame_rate2[] = {30, 30, 30, 30, 30};
uint8_t fraction_lost2[] = {10, 10, 10, 10, 10};
UpdateQmRateData(target_rate2, encoder_sent_rate2, incoming_frame_rate2,
- fraction_lost2, 5);
+ fraction_lost2, 5);
// Update content: motion level, and 3 spatial prediction errors.
// Low motion, high spatial.
@@ -788,8 +787,8 @@ TEST_F(QmSelectTest, 3StageDownSpatialTemporlaUpSpatialTemporal) {
EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_));
EXPECT_EQ(1, qm_resolution_->ComputeContentClass());
EXPECT_EQ(kStableEncoding, qm_resolution_->GetEncoderState());
- EXPECT_TRUE(IsSelectedActionCorrect(qm_scale_, 1.0f, 1.0f, 1.5f, 320, 240,
- 20.5f));
+ EXPECT_TRUE(
+ IsSelectedActionCorrect(qm_scale_, 1.0f, 1.0f, 1.5f, 320, 240, 20.5f));
// Reset rates and go high up in rate: expect to go back up both spatial
// and temporally. The 1/2x1/2 spatial is undone in two stages.
@@ -806,8 +805,8 @@ TEST_F(QmSelectTest, 3StageDownSpatialTemporlaUpSpatialTemporal) {
EXPECT_EQ(1, qm_resolution_->ComputeContentClass());
EXPECT_EQ(kStableEncoding, qm_resolution_->GetEncoderState());
float scale = (4.0f / 3.0f) / 2.0f;
- EXPECT_TRUE(IsSelectedActionCorrect(qm_scale_, scale, scale, 2.0f / 3.0f,
- 480, 360, 30.0f));
+ EXPECT_TRUE(IsSelectedActionCorrect(qm_scale_, scale, scale, 2.0f / 3.0f, 480,
+ 360, 30.0f));
qm_resolution_->UpdateCodecParameters(30.0f, 480, 360);
EXPECT_EQ(4, qm_resolution_->GetImageType(480, 360));
@@ -842,8 +841,8 @@ TEST_F(QmSelectTest, NoActionTooMuchDownSampling) {
EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_));
EXPECT_EQ(3, qm_resolution_->ComputeContentClass());
EXPECT_EQ(kStableEncoding, qm_resolution_->GetEncoderState());
- EXPECT_TRUE(IsSelectedActionCorrect(qm_scale_, 2.0f, 2.0f, 1.0f, 640, 360,
- 30.0f));
+ EXPECT_TRUE(
+ IsSelectedActionCorrect(qm_scale_, 2.0f, 2.0f, 1.0f, 640, 360, 30.0f));
// Reset and lower rates to get another spatial action (3/4x3/4).
// Lower the frame rate for spatial to be selected again.
@@ -865,8 +864,8 @@ TEST_F(QmSelectTest, NoActionTooMuchDownSampling) {
EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_));
EXPECT_EQ(5, qm_resolution_->ComputeContentClass());
EXPECT_EQ(kStableEncoding, qm_resolution_->GetEncoderState());
- EXPECT_TRUE(IsSelectedActionCorrect(qm_scale_, 4.0f / 3.0f, 4.0f / 3.0f,
- 1.0f, 480, 270, 10.0f));
+ EXPECT_TRUE(IsSelectedActionCorrect(qm_scale_, 4.0f / 3.0f, 4.0f / 3.0f, 1.0f,
+ 480, 270, 10.0f));
// Reset and go to very low rate: no action should be taken,
// we went down too much already.
@@ -883,8 +882,8 @@ TEST_F(QmSelectTest, NoActionTooMuchDownSampling) {
EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_));
EXPECT_EQ(5, qm_resolution_->ComputeContentClass());
EXPECT_EQ(kStableEncoding, qm_resolution_->GetEncoderState());
- EXPECT_TRUE(IsSelectedActionCorrect(qm_scale_, 1.0f, 1.0f, 1.0f, 480, 270,
- 10.0f));
+ EXPECT_TRUE(
+ IsSelectedActionCorrect(qm_scale_, 1.0f, 1.0f, 1.0f, 480, 270, 10.0f));
}
// Multiple down-sampling stages and then undo all of them.
@@ -917,8 +916,8 @@ TEST_F(QmSelectTest, MultipleStagesCheckActionHistory1) {
EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_));
EXPECT_EQ(6, qm_resolution_->ComputeContentClass());
EXPECT_EQ(kStableEncoding, qm_resolution_->GetEncoderState());
- EXPECT_TRUE(IsSelectedActionCorrect(qm_scale_, 4.0f / 3.0f, 4.0f / 3.0f,
- 1.0f, 480, 360, 30.0f));
+ EXPECT_TRUE(IsSelectedActionCorrect(qm_scale_, 4.0f / 3.0f, 4.0f / 3.0f, 1.0f,
+ 480, 360, 30.0f));
// Go down 2/3 temporal.
qm_resolution_->UpdateCodecParameters(30.0f, 480, 360);
EXPECT_EQ(4, qm_resolution_->GetImageType(480, 360));
@@ -936,8 +935,8 @@ TEST_F(QmSelectTest, MultipleStagesCheckActionHistory1) {
EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_));
EXPECT_EQ(1, qm_resolution_->ComputeContentClass());
EXPECT_EQ(kStableEncoding, qm_resolution_->GetEncoderState());
- EXPECT_TRUE(IsSelectedActionCorrect(qm_scale_, 1.0f, 1.0f, 1.5f, 480, 360,
- 20.5f));
+ EXPECT_TRUE(
+ IsSelectedActionCorrect(qm_scale_, 1.0f, 1.0f, 1.5f, 480, 360, 20.5f));
// Go down 3/4x3/4 spatial:
qm_resolution_->UpdateCodecParameters(20.0f, 480, 360);
@@ -947,7 +946,7 @@ TEST_F(QmSelectTest, MultipleStagesCheckActionHistory1) {
int incoming_frame_rate3[] = {20, 20, 20, 20, 20};
uint8_t fraction_lost3[] = {10, 10, 10, 10, 10};
UpdateQmRateData(target_rate3, encoder_sent_rate3, incoming_frame_rate3,
- fraction_lost3, 5);
+ fraction_lost3, 5);
// Update content: motion level, and 3 spatial prediction errors.
// High motion, low spatial.
@@ -957,8 +956,8 @@ TEST_F(QmSelectTest, MultipleStagesCheckActionHistory1) {
EXPECT_EQ(kStableEncoding, qm_resolution_->GetEncoderState());
// The two spatial actions of 3/4x3/4 are converted to 1/2x1/2,
// so scale factor is 2.0.
- EXPECT_TRUE(IsSelectedActionCorrect(qm_scale_, 2.0f, 2.0f, 1.0f, 320, 240,
- 20.0f));
+ EXPECT_TRUE(
+ IsSelectedActionCorrect(qm_scale_, 2.0f, 2.0f, 1.0f, 320, 240, 20.0f));
// Reset rates and go high up in rate: expect to go up:
// 1/2x1x2 spatial and 1/2 temporally.
@@ -1018,8 +1017,8 @@ TEST_F(QmSelectTest, MultipleStagesCheckActionHistory2) {
EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_));
EXPECT_EQ(6, qm_resolution_->ComputeContentClass());
EXPECT_EQ(kStableEncoding, qm_resolution_->GetEncoderState());
- EXPECT_TRUE(IsSelectedActionCorrect(qm_scale_, 2.0f, 2.0f, 1.0f, 320, 240,
- 30.0f));
+ EXPECT_TRUE(
+ IsSelectedActionCorrect(qm_scale_, 2.0f, 2.0f, 1.0f, 320, 240, 30.0f));
// Go down 2/3 temporal.
qm_resolution_->UpdateCodecParameters(30.0f, 320, 240);
@@ -1039,8 +1038,8 @@ TEST_F(QmSelectTest, MultipleStagesCheckActionHistory2) {
EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_));
EXPECT_EQ(7, qm_resolution_->ComputeContentClass());
EXPECT_EQ(kStableEncoding, qm_resolution_->GetEncoderState());
- EXPECT_TRUE(IsSelectedActionCorrect(qm_scale_, 1.0f, 1.0f, 1.5f, 320, 240,
- 20.5f));
+ EXPECT_TRUE(
+ IsSelectedActionCorrect(qm_scale_, 1.0f, 1.0f, 1.5f, 320, 240, 20.5f));
// Go up 2/3 temporally.
qm_resolution_->UpdateCodecParameters(20.0f, 320, 240);
@@ -1076,8 +1075,8 @@ TEST_F(QmSelectTest, MultipleStagesCheckActionHistory2) {
EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_));
EXPECT_EQ(1, qm_resolution_->ComputeContentClass());
EXPECT_EQ(kStableEncoding, qm_resolution_->GetEncoderState());
- EXPECT_TRUE(IsSelectedActionCorrect(qm_scale_, 1.0f, 1.0f, 1.5f, 320, 240,
- 20.5f));
+ EXPECT_TRUE(
+ IsSelectedActionCorrect(qm_scale_, 1.0f, 1.0f, 1.5f, 320, 240, 20.5f));
// Go up spatial and temporal. Spatial undoing is done in 2 stages.
qm_resolution_->UpdateCodecParameters(20.5f, 320, 240);
@@ -1092,8 +1091,8 @@ TEST_F(QmSelectTest, MultipleStagesCheckActionHistory2) {
EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_));
float scale = (4.0f / 3.0f) / 2.0f;
- EXPECT_TRUE(IsSelectedActionCorrect(qm_scale_, scale, scale, 2.0f / 3.0f,
- 480, 360, 30.0f));
+ EXPECT_TRUE(IsSelectedActionCorrect(qm_scale_, scale, scale, 2.0f / 3.0f, 480,
+ 360, 30.0f));
qm_resolution_->UpdateCodecParameters(30.0f, 480, 360);
EXPECT_EQ(4, qm_resolution_->GetImageType(480, 360));
@@ -1131,8 +1130,8 @@ TEST_F(QmSelectTest, MultipleStagesCheckActionHistory3) {
EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_));
EXPECT_EQ(6, qm_resolution_->ComputeContentClass());
EXPECT_EQ(kStableEncoding, qm_resolution_->GetEncoderState());
- EXPECT_TRUE(IsSelectedActionCorrect(qm_scale_, 4.0f / 3.0f, 4.0f / 3.0f,
- 1.0f, 480, 360, 30.0f));
+ EXPECT_TRUE(IsSelectedActionCorrect(qm_scale_, 4.0f / 3.0f, 4.0f / 3.0f, 1.0f,
+ 480, 360, 30.0f));
// Go down 2/3 temporal.
qm_resolution_->UpdateCodecParameters(30.0f, 480, 360);
@@ -1151,8 +1150,8 @@ TEST_F(QmSelectTest, MultipleStagesCheckActionHistory3) {
EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_));
EXPECT_EQ(1, qm_resolution_->ComputeContentClass());
EXPECT_EQ(kStableEncoding, qm_resolution_->GetEncoderState());
- EXPECT_TRUE(IsSelectedActionCorrect(qm_scale_, 1.0f, 1.0f, 1.5f, 480, 360,
- 20.5f));
+ EXPECT_TRUE(
+ IsSelectedActionCorrect(qm_scale_, 1.0f, 1.0f, 1.5f, 480, 360, 20.5f));
// Go up 2/3 temporal.
qm_resolution_->UpdateCodecParameters(20.5f, 480, 360);
@@ -1184,8 +1183,8 @@ TEST_F(QmSelectTest, MultipleStagesCheckActionHistory3) {
EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_));
EXPECT_EQ(kStableEncoding, qm_resolution_->GetEncoderState());
- EXPECT_TRUE(IsSelectedActionCorrect(qm_scale_, 3.0f / 4.0f, 3.0f / 4.0f,
- 1.0f, 640, 480, 30.0f));
+ EXPECT_TRUE(IsSelectedActionCorrect(qm_scale_, 3.0f / 4.0f, 3.0f / 4.0f, 1.0f,
+ 640, 480, 30.0f));
}
// Two stages of 3/4x3/4 converted to one stage of 1/2x1/2.
@@ -1215,8 +1214,8 @@ TEST_F(QmSelectTest, ConvertThreeQuartersToOneHalf) {
EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_));
EXPECT_EQ(6, qm_resolution_->ComputeContentClass());
EXPECT_EQ(kStableEncoding, qm_resolution_->GetEncoderState());
- EXPECT_TRUE(IsSelectedActionCorrect(qm_scale_, 4.0f / 3.0f, 4.0f / 3.0f,
- 1.0f, 480, 360, 30.0f));
+ EXPECT_TRUE(IsSelectedActionCorrect(qm_scale_, 4.0f / 3.0f, 4.0f / 3.0f, 1.0f,
+ 480, 360, 30.0f));
// Set rates to go down another 3/4 spatial. Should be converted ton 1/2.
qm_resolution_->UpdateCodecParameters(30.0f, 480, 360);
@@ -1235,8 +1234,8 @@ TEST_F(QmSelectTest, ConvertThreeQuartersToOneHalf) {
EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_));
EXPECT_EQ(6, qm_resolution_->ComputeContentClass());
EXPECT_EQ(kStableEncoding, qm_resolution_->GetEncoderState());
- EXPECT_TRUE(IsSelectedActionCorrect(qm_scale_, 2.0f, 2.0f, 1.0f, 320, 240,
- 30.0f));
+ EXPECT_TRUE(
+ IsSelectedActionCorrect(qm_scale_, 2.0f, 2.0f, 1.0f, 320, 240, 30.0f));
}
void QmSelectTest::InitQmNativeData(float initial_bit_rate,
@@ -1244,11 +1243,9 @@ void QmSelectTest::InitQmNativeData(float initial_bit_rate,
int native_width,
int native_height,
int num_layers) {
- EXPECT_EQ(0, qm_resolution_->Initialize(initial_bit_rate,
- user_frame_rate,
- native_width,
- native_height,
- num_layers));
+ EXPECT_EQ(
+ 0, qm_resolution_->Initialize(initial_bit_rate, user_frame_rate,
+ native_width, native_height, num_layers));
}
void QmSelectTest::UpdateQmContentData(float motion_metric,
@@ -1281,8 +1278,7 @@ void QmSelectTest::UpdateQmRateData(int* target_rate,
float encoder_sent_rate_update = encoder_sent_rate[i];
float incoming_frame_rate_update = incoming_frame_rate[i];
uint8_t fraction_lost_update = fraction_lost[i];
- qm_resolution_->UpdateRates(target_rate_update,
- encoder_sent_rate_update,
+ qm_resolution_->UpdateRates(target_rate_update, encoder_sent_rate_update,
incoming_frame_rate_update,
fraction_lost_update);
}
diff --git a/webrtc/modules/video_coding/main/source/receiver.cc b/webrtc/modules/video_coding/receiver.cc
index 0707a9c3cd..fa2a2dca29 100644
--- a/webrtc/modules/video_coding/main/source/receiver.cc
+++ b/webrtc/modules/video_coding/receiver.cc
@@ -8,18 +8,20 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#include "webrtc/modules/video_coding/main/source/receiver.h"
+#include "webrtc/modules/video_coding/receiver.h"
#include <assert.h>
#include <cstdlib>
+#include <utility>
+#include <vector>
+#include "webrtc/base/logging.h"
#include "webrtc/base/trace_event.h"
-#include "webrtc/modules/video_coding/main/source/encoded_frame.h"
-#include "webrtc/modules/video_coding/main/source/internal_defines.h"
-#include "webrtc/modules/video_coding/main/source/media_opt_util.h"
+#include "webrtc/modules/video_coding/encoded_frame.h"
+#include "webrtc/modules/video_coding/internal_defines.h"
+#include "webrtc/modules/video_coding/media_opt_util.h"
#include "webrtc/system_wrappers/include/clock.h"
-#include "webrtc/system_wrappers/include/logging.h"
namespace webrtc {
@@ -40,9 +42,9 @@ VCMReceiver::VCMReceiver(VCMTiming* timing,
rtc::scoped_ptr<EventWrapper> jitter_buffer_event)
: crit_sect_(CriticalSectionWrapper::CreateCriticalSection()),
clock_(clock),
- jitter_buffer_(clock_, jitter_buffer_event.Pass()),
+ jitter_buffer_(clock_, std::move(jitter_buffer_event)),
timing_(timing),
- render_wait_event_(receiver_event.Pass()),
+ render_wait_event_(std::move(receiver_event)),
max_video_delay_ms_(kMaxVideoDelayMs) {
Reset();
}
@@ -71,8 +73,8 @@ int32_t VCMReceiver::InsertPacket(const VCMPacket& packet,
// Insert the packet into the jitter buffer. The packet can either be empty or
// contain media at this point.
bool retransmitted = false;
- const VCMFrameBufferEnum ret = jitter_buffer_.InsertPacket(packet,
- &retransmitted);
+ const VCMFrameBufferEnum ret =
+ jitter_buffer_.InsertPacket(packet, &retransmitted);
if (ret == kOldPacket) {
return VCM_OK;
} else if (ret == kFlushIndicator) {
@@ -95,13 +97,13 @@ void VCMReceiver::TriggerDecoderShutdown() {
}
VCMEncodedFrame* VCMReceiver::FrameForDecoding(uint16_t max_wait_time_ms,
- int64_t& next_render_time_ms,
- bool render_timing) {
+ int64_t* next_render_time_ms,
+ bool prefer_late_decoding) {
const int64_t start_time_ms = clock_->TimeInMilliseconds();
uint32_t frame_timestamp = 0;
// Exhaust wait time to get a complete frame for decoding.
- bool found_frame = jitter_buffer_.NextCompleteTimestamp(
- max_wait_time_ms, &frame_timestamp);
+ bool found_frame =
+ jitter_buffer_.NextCompleteTimestamp(max_wait_time_ms, &frame_timestamp);
if (!found_frame)
found_frame = jitter_buffer_.NextMaybeIncompleteTimestamp(&frame_timestamp);
@@ -113,14 +115,14 @@ VCMEncodedFrame* VCMReceiver::FrameForDecoding(uint16_t max_wait_time_ms,
timing_->SetJitterDelay(jitter_buffer_.EstimatedJitterMs());
const int64_t now_ms = clock_->TimeInMilliseconds();
timing_->UpdateCurrentDelay(frame_timestamp);
- next_render_time_ms = timing_->RenderTimeMs(frame_timestamp, now_ms);
+ *next_render_time_ms = timing_->RenderTimeMs(frame_timestamp, now_ms);
// Check render timing.
bool timing_error = false;
// Assume that render timing errors are due to changes in the video stream.
- if (next_render_time_ms < 0) {
+ if (*next_render_time_ms < 0) {
timing_error = true;
- } else if (std::abs(next_render_time_ms - now_ms) > max_video_delay_ms_) {
- int frame_delay = static_cast<int>(std::abs(next_render_time_ms - now_ms));
+ } else if (std::abs(*next_render_time_ms - now_ms) > max_video_delay_ms_) {
+ int frame_delay = static_cast<int>(std::abs(*next_render_time_ms - now_ms));
LOG(LS_WARNING) << "A frame about to be decoded is out of the configured "
<< "delay bounds (" << frame_delay << " > "
<< max_video_delay_ms_
@@ -140,14 +142,15 @@ VCMEncodedFrame* VCMReceiver::FrameForDecoding(uint16_t max_wait_time_ms,
return NULL;
}
- if (!render_timing) {
+ if (prefer_late_decoding) {
// Decode frame as close as possible to the render timestamp.
- const int32_t available_wait_time = max_wait_time_ms -
+ const int32_t available_wait_time =
+ max_wait_time_ms -
static_cast<int32_t>(clock_->TimeInMilliseconds() - start_time_ms);
- uint16_t new_max_wait_time = static_cast<uint16_t>(
- VCM_MAX(available_wait_time, 0));
+ uint16_t new_max_wait_time =
+ static_cast<uint16_t>(VCM_MAX(available_wait_time, 0));
uint32_t wait_time_ms = timing_->MaxWaitingTime(
- next_render_time_ms, clock_->TimeInMilliseconds());
+ *next_render_time_ms, clock_->TimeInMilliseconds());
if (new_max_wait_time < wait_time_ms) {
// We're not allowed to wait until the frame is supposed to be rendered,
// waiting as long as we're allowed to avoid busy looping, and then return
@@ -164,9 +167,9 @@ VCMEncodedFrame* VCMReceiver::FrameForDecoding(uint16_t max_wait_time_ms,
if (frame == NULL) {
return NULL;
}
- frame->SetRenderTime(next_render_time_ms);
- TRACE_EVENT_ASYNC_STEP1("webrtc", "Video", frame->TimeStamp(),
- "SetRenderTS", "render_time", next_render_time_ms);
+ frame->SetRenderTime(*next_render_time_ms);
+ TRACE_EVENT_ASYNC_STEP1("webrtc", "Video", frame->TimeStamp(), "SetRenderTS",
+ "render_time", *next_render_time_ms);
if (!frame->Complete()) {
// Update stats for incomplete frames.
bool retransmitted = false;
@@ -186,8 +189,7 @@ void VCMReceiver::ReleaseFrame(VCMEncodedFrame* frame) {
jitter_buffer_.ReleaseFrame(frame);
}
-void VCMReceiver::ReceiveStatistics(uint32_t* bitrate,
- uint32_t* framerate) {
+void VCMReceiver::ReceiveStatistics(uint32_t* bitrate, uint32_t* framerate) {
assert(bitrate);
assert(framerate);
jitter_buffer_.IncomingRateStatistics(framerate, bitrate);
@@ -209,8 +211,7 @@ void VCMReceiver::SetNackMode(VCMNackMode nackMode,
void VCMReceiver::SetNackSettings(size_t max_nack_list_size,
int max_packet_age_to_nack,
int max_incomplete_time_ms) {
- jitter_buffer_.SetNackSettings(max_nack_list_size,
- max_packet_age_to_nack,
+ jitter_buffer_.SetNackSettings(max_nack_list_size, max_packet_age_to_nack,
max_incomplete_time_ms);
}
diff --git a/webrtc/modules/video_coding/main/source/receiver.h b/webrtc/modules/video_coding/receiver.h
index e2515d438f..ff0eef8a6a 100644
--- a/webrtc/modules/video_coding/main/source/receiver.h
+++ b/webrtc/modules/video_coding/receiver.h
@@ -8,15 +8,17 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#ifndef WEBRTC_MODULES_VIDEO_CODING_MAIN_SOURCE_RECEIVER_H_
-#define WEBRTC_MODULES_VIDEO_CODING_MAIN_SOURCE_RECEIVER_H_
+#ifndef WEBRTC_MODULES_VIDEO_CODING_RECEIVER_H_
+#define WEBRTC_MODULES_VIDEO_CODING_RECEIVER_H_
-#include "webrtc/modules/video_coding/main/source/jitter_buffer.h"
-#include "webrtc/modules/video_coding/main/source/packet.h"
-#include "webrtc/modules/video_coding/main/source/timing.h"
+#include <vector>
+
+#include "webrtc/modules/video_coding/jitter_buffer.h"
+#include "webrtc/modules/video_coding/packet.h"
+#include "webrtc/modules/video_coding/timing.h"
#include "webrtc/system_wrappers/include/critical_section_wrapper.h"
-#include "webrtc/modules/video_coding/main/interface/video_coding.h"
-#include "webrtc/modules/video_coding/main/interface/video_coding_defines.h"
+#include "webrtc/modules/video_coding/include/video_coding.h"
+#include "webrtc/modules/video_coding/include/video_coding_defines.h"
namespace webrtc {
@@ -25,9 +27,7 @@ class VCMEncodedFrame;
class VCMReceiver {
public:
- VCMReceiver(VCMTiming* timing,
- Clock* clock,
- EventFactory* event_factory);
+ VCMReceiver(VCMTiming* timing, Clock* clock, EventFactory* event_factory);
// Using this constructor, you can specify a different event factory for the
// jitter buffer. Useful for unit tests when you want to simulate incoming
@@ -46,8 +46,8 @@ class VCMReceiver {
uint16_t frame_width,
uint16_t frame_height);
VCMEncodedFrame* FrameForDecoding(uint16_t max_wait_time_ms,
- int64_t& next_render_time_ms,
- bool render_timing = true);
+ int64_t* next_render_time_ms,
+ bool prefer_late_decoding);
void ReleaseFrame(VCMEncodedFrame* frame);
void ReceiveStatistics(uint32_t* bitrate, uint32_t* framerate);
uint32_t DiscardedPackets() const;
@@ -89,4 +89,4 @@ class VCMReceiver {
} // namespace webrtc
-#endif // WEBRTC_MODULES_VIDEO_CODING_MAIN_SOURCE_RECEIVER_H_
+#endif // WEBRTC_MODULES_VIDEO_CODING_RECEIVER_H_
diff --git a/webrtc/modules/video_coding/main/source/receiver_unittest.cc b/webrtc/modules/video_coding/receiver_unittest.cc
index 359b241e72..1f3a144bad 100644
--- a/webrtc/modules/video_coding/main/source/receiver_unittest.cc
+++ b/webrtc/modules/video_coding/receiver_unittest.cc
@@ -11,14 +11,16 @@
#include <list>
#include <queue>
+#include <vector>
#include "testing/gtest/include/gtest/gtest.h"
#include "webrtc/base/checks.h"
-#include "webrtc/modules/video_coding/main/source/packet.h"
-#include "webrtc/modules/video_coding/main/source/receiver.h"
-#include "webrtc/modules/video_coding/main/source/test/stream_generator.h"
-#include "webrtc/modules/video_coding/main/source/timing.h"
-#include "webrtc/modules/video_coding/main/test/test_util.h"
+#include "webrtc/modules/video_coding/encoded_frame.h"
+#include "webrtc/modules/video_coding/packet.h"
+#include "webrtc/modules/video_coding/receiver.h"
+#include "webrtc/modules/video_coding/test/stream_generator.h"
+#include "webrtc/modules/video_coding/timing.h"
+#include "webrtc/modules/video_coding/test/test_util.h"
#include "webrtc/system_wrappers/include/clock.h"
#include "webrtc/system_wrappers/include/critical_section_wrapper.h"
@@ -33,14 +35,11 @@ class TestVCMReceiver : public ::testing::Test {
: clock_(new SimulatedClock(0)),
timing_(clock_.get()),
receiver_(&timing_, clock_.get(), &event_factory_) {
-
- stream_generator_.reset(new
- StreamGenerator(0, clock_->TimeInMilliseconds()));
+ stream_generator_.reset(
+ new StreamGenerator(0, clock_->TimeInMilliseconds()));
}
- virtual void SetUp() {
- receiver_.Reset();
- }
+ virtual void SetUp() { receiver_.Reset(); }
int32_t InsertPacket(int index) {
VCMPacket packet;
@@ -78,7 +77,7 @@ class TestVCMReceiver : public ::testing::Test {
bool DecodeNextFrame() {
int64_t render_time_ms = 0;
VCMEncodedFrame* frame =
- receiver_.FrameForDecoding(0, render_time_ms, false);
+ receiver_.FrameForDecoding(0, &render_time_ms, false);
if (!frame)
return false;
receiver_.ReleaseFrame(frame);
@@ -115,7 +114,7 @@ TEST_F(TestVCMReceiver, RenderBufferSize_SkipToKeyFrame) {
EXPECT_GE(InsertFrame(kVideoFrameDelta, true), kNoError);
}
EXPECT_EQ((kNumOfFrames - 1) * kDefaultFramePeriodMs,
- receiver_.RenderBufferSizeMs());
+ receiver_.RenderBufferSizeMs());
}
TEST_F(TestVCMReceiver, RenderBufferSize_NotAllComplete) {
@@ -131,7 +130,7 @@ TEST_F(TestVCMReceiver, RenderBufferSize_NotAllComplete) {
EXPECT_GE(InsertFrame(kVideoFrameDelta, true), kNoError);
}
EXPECT_EQ((num_of_frames - 1) * kDefaultFramePeriodMs,
- receiver_.RenderBufferSizeMs());
+ receiver_.RenderBufferSizeMs());
}
TEST_F(TestVCMReceiver, RenderBufferSize_NoKeyFrame) {
@@ -141,7 +140,8 @@ TEST_F(TestVCMReceiver, RenderBufferSize_NoKeyFrame) {
EXPECT_GE(InsertFrame(kVideoFrameDelta, true), kNoError);
}
int64_t next_render_time_ms = 0;
- VCMEncodedFrame* frame = receiver_.FrameForDecoding(10, next_render_time_ms);
+ VCMEncodedFrame* frame =
+ receiver_.FrameForDecoding(10, &next_render_time_ms, false);
EXPECT_TRUE(frame == NULL);
receiver_.ReleaseFrame(frame);
EXPECT_GE(InsertFrame(kVideoFrameDelta, false), kNoError);
@@ -159,7 +159,7 @@ TEST_F(TestVCMReceiver, NonDecodableDuration_Empty) {
const int kMaxNonDecodableDuration = 500;
const int kMinDelayMs = 500;
receiver_.SetNackSettings(kMaxNackListSize, kMaxPacketAgeToNack,
- kMaxNonDecodableDuration);
+ kMaxNonDecodableDuration);
EXPECT_GE(InsertFrame(kVideoFrameKey, true), kNoError);
// Advance time until it's time to decode the key frame.
clock_->AdvanceTimeMilliseconds(kMinDelayMs);
@@ -176,7 +176,7 @@ TEST_F(TestVCMReceiver, NonDecodableDuration_NoKeyFrame) {
const int kMaxPacketAgeToNack = 1000;
const int kMaxNonDecodableDuration = 500;
receiver_.SetNackSettings(kMaxNackListSize, kMaxPacketAgeToNack,
- kMaxNonDecodableDuration);
+ kMaxNonDecodableDuration);
const int kNumFrames = kDefaultFrameRate * kMaxNonDecodableDuration / 1000;
for (int i = 0; i < kNumFrames; ++i) {
EXPECT_GE(InsertFrame(kVideoFrameDelta, true), kNoError);
@@ -192,24 +192,23 @@ TEST_F(TestVCMReceiver, NonDecodableDuration_OneIncomplete) {
const size_t kMaxNackListSize = 1000;
const int kMaxPacketAgeToNack = 1000;
const int kMaxNonDecodableDuration = 500;
- const int kMaxNonDecodableDurationFrames = (kDefaultFrameRate *
- kMaxNonDecodableDuration + 500) / 1000;
+ const int kMaxNonDecodableDurationFrames =
+ (kDefaultFrameRate * kMaxNonDecodableDuration + 500) / 1000;
const int kMinDelayMs = 500;
receiver_.SetNackSettings(kMaxNackListSize, kMaxPacketAgeToNack,
- kMaxNonDecodableDuration);
+ kMaxNonDecodableDuration);
receiver_.SetMinReceiverDelay(kMinDelayMs);
int64_t key_frame_inserted = clock_->TimeInMilliseconds();
EXPECT_GE(InsertFrame(kVideoFrameKey, true), kNoError);
// Insert an incomplete frame.
EXPECT_GE(InsertFrame(kVideoFrameDelta, false), kNoError);
// Insert enough frames to have too long non-decodable sequence.
- for (int i = 0; i < kMaxNonDecodableDurationFrames;
- ++i) {
+ for (int i = 0; i < kMaxNonDecodableDurationFrames; ++i) {
EXPECT_GE(InsertFrame(kVideoFrameDelta, true), kNoError);
}
// Advance time until it's time to decode the key frame.
clock_->AdvanceTimeMilliseconds(kMinDelayMs - clock_->TimeInMilliseconds() -
- key_frame_inserted);
+ key_frame_inserted);
EXPECT_TRUE(DecodeNextFrame());
// Make sure we get a key frame request.
bool request_key_frame = false;
@@ -223,11 +222,11 @@ TEST_F(TestVCMReceiver, NonDecodableDuration_NoTrigger) {
const size_t kMaxNackListSize = 1000;
const int kMaxPacketAgeToNack = 1000;
const int kMaxNonDecodableDuration = 500;
- const int kMaxNonDecodableDurationFrames = (kDefaultFrameRate *
- kMaxNonDecodableDuration + 500) / 1000;
+ const int kMaxNonDecodableDurationFrames =
+ (kDefaultFrameRate * kMaxNonDecodableDuration + 500) / 1000;
const int kMinDelayMs = 500;
receiver_.SetNackSettings(kMaxNackListSize, kMaxPacketAgeToNack,
- kMaxNonDecodableDuration);
+ kMaxNonDecodableDuration);
receiver_.SetMinReceiverDelay(kMinDelayMs);
int64_t key_frame_inserted = clock_->TimeInMilliseconds();
EXPECT_GE(InsertFrame(kVideoFrameKey, true), kNoError);
@@ -235,13 +234,12 @@ TEST_F(TestVCMReceiver, NonDecodableDuration_NoTrigger) {
EXPECT_GE(InsertFrame(kVideoFrameDelta, false), kNoError);
// Insert all but one frame to not trigger a key frame request due to
// too long duration of non-decodable frames.
- for (int i = 0; i < kMaxNonDecodableDurationFrames - 1;
- ++i) {
+ for (int i = 0; i < kMaxNonDecodableDurationFrames - 1; ++i) {
EXPECT_GE(InsertFrame(kVideoFrameDelta, true), kNoError);
}
// Advance time until it's time to decode the key frame.
clock_->AdvanceTimeMilliseconds(kMinDelayMs - clock_->TimeInMilliseconds() -
- key_frame_inserted);
+ key_frame_inserted);
EXPECT_TRUE(DecodeNextFrame());
// Make sure we don't get a key frame request since we haven't generated
// enough frames.
@@ -256,25 +254,24 @@ TEST_F(TestVCMReceiver, NonDecodableDuration_NoTrigger2) {
const size_t kMaxNackListSize = 1000;
const int kMaxPacketAgeToNack = 1000;
const int kMaxNonDecodableDuration = 500;
- const int kMaxNonDecodableDurationFrames = (kDefaultFrameRate *
- kMaxNonDecodableDuration + 500) / 1000;
+ const int kMaxNonDecodableDurationFrames =
+ (kDefaultFrameRate * kMaxNonDecodableDuration + 500) / 1000;
const int kMinDelayMs = 500;
receiver_.SetNackSettings(kMaxNackListSize, kMaxPacketAgeToNack,
- kMaxNonDecodableDuration);
+ kMaxNonDecodableDuration);
receiver_.SetMinReceiverDelay(kMinDelayMs);
int64_t key_frame_inserted = clock_->TimeInMilliseconds();
EXPECT_GE(InsertFrame(kVideoFrameKey, true), kNoError);
// Insert enough frames to have too long non-decodable sequence, except that
// we don't have any losses.
- for (int i = 0; i < kMaxNonDecodableDurationFrames;
- ++i) {
+ for (int i = 0; i < kMaxNonDecodableDurationFrames; ++i) {
EXPECT_GE(InsertFrame(kVideoFrameDelta, true), kNoError);
}
// Insert an incomplete frame.
EXPECT_GE(InsertFrame(kVideoFrameDelta, false), kNoError);
// Advance time until it's time to decode the key frame.
clock_->AdvanceTimeMilliseconds(kMinDelayMs - clock_->TimeInMilliseconds() -
- key_frame_inserted);
+ key_frame_inserted);
EXPECT_TRUE(DecodeNextFrame());
// Make sure we don't get a key frame request since the non-decodable duration
// is only one frame.
@@ -289,25 +286,24 @@ TEST_F(TestVCMReceiver, NonDecodableDuration_KeyFrameAfterIncompleteFrames) {
const size_t kMaxNackListSize = 1000;
const int kMaxPacketAgeToNack = 1000;
const int kMaxNonDecodableDuration = 500;
- const int kMaxNonDecodableDurationFrames = (kDefaultFrameRate *
- kMaxNonDecodableDuration + 500) / 1000;
+ const int kMaxNonDecodableDurationFrames =
+ (kDefaultFrameRate * kMaxNonDecodableDuration + 500) / 1000;
const int kMinDelayMs = 500;
receiver_.SetNackSettings(kMaxNackListSize, kMaxPacketAgeToNack,
- kMaxNonDecodableDuration);
+ kMaxNonDecodableDuration);
receiver_.SetMinReceiverDelay(kMinDelayMs);
int64_t key_frame_inserted = clock_->TimeInMilliseconds();
EXPECT_GE(InsertFrame(kVideoFrameKey, true), kNoError);
// Insert an incomplete frame.
EXPECT_GE(InsertFrame(kVideoFrameDelta, false), kNoError);
// Insert enough frames to have too long non-decodable sequence.
- for (int i = 0; i < kMaxNonDecodableDurationFrames;
- ++i) {
+ for (int i = 0; i < kMaxNonDecodableDurationFrames; ++i) {
EXPECT_GE(InsertFrame(kVideoFrameDelta, true), kNoError);
}
EXPECT_GE(InsertFrame(kVideoFrameKey, true), kNoError);
// Advance time until it's time to decode the key frame.
clock_->AdvanceTimeMilliseconds(kMinDelayMs - clock_->TimeInMilliseconds() -
- key_frame_inserted);
+ key_frame_inserted);
EXPECT_TRUE(DecodeNextFrame());
// Make sure we don't get a key frame request since we have a key frame
// in the list.
@@ -338,7 +334,7 @@ class SimulatedClockWithFrames : public SimulatedClock {
// Return true if some frame arrives between now and now+|milliseconds|.
bool AdvanceTimeMilliseconds(int64_t milliseconds, bool stop_on_frame) {
return AdvanceTimeMicroseconds(milliseconds * 1000, stop_on_frame);
- };
+ }
bool AdvanceTimeMicroseconds(int64_t microseconds, bool stop_on_frame) {
int64_t start_time = TimeInMicroseconds();
@@ -362,7 +358,7 @@ class SimulatedClockWithFrames : public SimulatedClock {
SimulatedClock::AdvanceTimeMicroseconds(end_time - TimeInMicroseconds());
}
return frame_injected;
- };
+ }
// Input timestamps are in unit Milliseconds.
// And |arrive_timestamps| must be positive and in increasing order.
@@ -429,7 +425,7 @@ class FrameInjectEvent : public EventWrapper {
bool Set() override { return true; }
- EventTypeWrapper Wait(unsigned long max_time) override {
+ EventTypeWrapper Wait(unsigned long max_time) override { // NOLINT
if (clock_->AdvanceTimeMilliseconds(max_time, stop_on_frame_) &&
stop_on_frame_) {
return EventTypeWrapper::kEventSignaled;
@@ -445,7 +441,6 @@ class FrameInjectEvent : public EventWrapper {
class VCMReceiverTimingTest : public ::testing::Test {
protected:
-
VCMReceiverTimingTest()
: clock_(&stream_generator_, &receiver_),
@@ -458,7 +453,6 @@ class VCMReceiverTimingTest : public ::testing::Test {
rtc::scoped_ptr<EventWrapper>(
new FrameInjectEvent(&clock_, true))) {}
-
virtual void SetUp() { receiver_.Reset(); }
SimulatedClockWithFrames clock_;
@@ -504,7 +498,7 @@ TEST_F(VCMReceiverTimingTest, FrameForDecoding) {
while (num_frames_return < kNumFrames) {
int64_t start_time = clock_.TimeInMilliseconds();
VCMEncodedFrame* frame =
- receiver_.FrameForDecoding(kMaxWaitTime, next_render_time, false);
+ receiver_.FrameForDecoding(kMaxWaitTime, &next_render_time, false);
int64_t end_time = clock_.TimeInMilliseconds();
// In any case the FrameForDecoding should not wait longer than
@@ -523,4 +517,59 @@ TEST_F(VCMReceiverTimingTest, FrameForDecoding) {
}
}
+// Test whether VCMReceiver::FrameForDecoding handles parameter
+// |prefer_late_decoding| and |max_wait_time_ms| correctly:
+// 1. The function execution should never take more than |max_wait_time_ms|.
+// 2. If the function exit before now + |max_wait_time_ms|, a frame must be
+// returned and the end time must be equal to the render timestamp - delay
+// for decoding and rendering.
+TEST_F(VCMReceiverTimingTest, FrameForDecodingPreferLateDecoding) {
+ const size_t kNumFrames = 100;
+ const int kFramePeriod = 40;
+
+ int64_t arrive_timestamps[kNumFrames];
+ int64_t render_timestamps[kNumFrames];
+ int64_t next_render_time;
+
+ int render_delay_ms;
+ int max_decode_ms;
+ int dummy;
+ timing_.GetTimings(&dummy, &max_decode_ms, &dummy, &dummy, &dummy, &dummy,
+ &render_delay_ms);
+
+ // Construct test samples.
+ // render_timestamps are the timestamps stored in the Frame;
+ // arrive_timestamps controls when the Frame packet got received.
+ for (size_t i = 0; i < kNumFrames; i++) {
+ // Preset frame rate to 25Hz.
+ // But we add a reasonable deviation to arrive_timestamps to mimic Internet
+ // fluctuation.
+ arrive_timestamps[i] =
+ (i + 1) * kFramePeriod + (i % 10) * ((i % 2) ? 1 : -1);
+ render_timestamps[i] = (i + 1) * kFramePeriod;
+ }
+
+ clock_.SetFrames(arrive_timestamps, render_timestamps, kNumFrames);
+
+ // Record how many frames we finally get out of the receiver.
+ size_t num_frames_return = 0;
+ const int64_t kMaxWaitTime = 30;
+ bool prefer_late_decoding = true;
+ while (num_frames_return < kNumFrames) {
+ int64_t start_time = clock_.TimeInMilliseconds();
+
+ VCMEncodedFrame* frame = receiver_.FrameForDecoding(
+ kMaxWaitTime, &next_render_time, prefer_late_decoding);
+ int64_t end_time = clock_.TimeInMilliseconds();
+ if (frame) {
+ EXPECT_EQ(frame->RenderTimeMs() - max_decode_ms - render_delay_ms,
+ end_time);
+ receiver_.ReleaseFrame(frame);
+ ++num_frames_return;
+ } else {
+ EXPECT_EQ(kMaxWaitTime, end_time - start_time);
+ }
+ }
+}
+
} // namespace webrtc
diff --git a/webrtc/modules/video_coding/rtt_filter.cc b/webrtc/modules/video_coding/rtt_filter.cc
new file mode 100644
index 0000000000..742f70f1c1
--- /dev/null
+++ b/webrtc/modules/video_coding/rtt_filter.cc
@@ -0,0 +1,165 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "webrtc/modules/video_coding/rtt_filter.h"
+
+#include <math.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include "webrtc/modules/video_coding/internal_defines.h"
+
+namespace webrtc {
+
+VCMRttFilter::VCMRttFilter()
+ : _filtFactMax(35),
+ _jumpStdDevs(2.5),
+ _driftStdDevs(3.5),
+ _detectThreshold(kMaxDriftJumpCount) {
+ Reset();
+}
+
+VCMRttFilter& VCMRttFilter::operator=(const VCMRttFilter& rhs) {
+ if (this != &rhs) {
+ _gotNonZeroUpdate = rhs._gotNonZeroUpdate;
+ _avgRtt = rhs._avgRtt;
+ _varRtt = rhs._varRtt;
+ _maxRtt = rhs._maxRtt;
+ _filtFactCount = rhs._filtFactCount;
+ _jumpCount = rhs._jumpCount;
+ _driftCount = rhs._driftCount;
+ memcpy(_jumpBuf, rhs._jumpBuf, sizeof(_jumpBuf));
+ memcpy(_driftBuf, rhs._driftBuf, sizeof(_driftBuf));
+ }
+ return *this;
+}
+
+void VCMRttFilter::Reset() {
+ _gotNonZeroUpdate = false;
+ _avgRtt = 0;
+ _varRtt = 0;
+ _maxRtt = 0;
+ _filtFactCount = 1;
+ _jumpCount = 0;
+ _driftCount = 0;
+ memset(_jumpBuf, 0, kMaxDriftJumpCount);
+ memset(_driftBuf, 0, kMaxDriftJumpCount);
+}
+
+void VCMRttFilter::Update(int64_t rttMs) {
+ if (!_gotNonZeroUpdate) {
+ if (rttMs == 0) {
+ return;
+ }
+ _gotNonZeroUpdate = true;
+ }
+
+ // Sanity check
+ if (rttMs > 3000) {
+ rttMs = 3000;
+ }
+
+ double filtFactor = 0;
+ if (_filtFactCount > 1) {
+ filtFactor = static_cast<double>(_filtFactCount - 1) / _filtFactCount;
+ }
+ _filtFactCount++;
+ if (_filtFactCount > _filtFactMax) {
+ // This prevents filtFactor from going above
+ // (_filtFactMax - 1) / _filtFactMax,
+ // e.g., _filtFactMax = 50 => filtFactor = 49/50 = 0.98
+ _filtFactCount = _filtFactMax;
+ }
+ double oldAvg = _avgRtt;
+ double oldVar = _varRtt;
+ _avgRtt = filtFactor * _avgRtt + (1 - filtFactor) * rttMs;
+ _varRtt = filtFactor * _varRtt +
+ (1 - filtFactor) * (rttMs - _avgRtt) * (rttMs - _avgRtt);
+ _maxRtt = VCM_MAX(rttMs, _maxRtt);
+ if (!JumpDetection(rttMs) || !DriftDetection(rttMs)) {
+ // In some cases we don't want to update the statistics
+ _avgRtt = oldAvg;
+ _varRtt = oldVar;
+ }
+}
+
+bool VCMRttFilter::JumpDetection(int64_t rttMs) {
+ double diffFromAvg = _avgRtt - rttMs;
+ if (fabs(diffFromAvg) > _jumpStdDevs * sqrt(_varRtt)) {
+ int diffSign = (diffFromAvg >= 0) ? 1 : -1;
+ int jumpCountSign = (_jumpCount >= 0) ? 1 : -1;
+ if (diffSign != jumpCountSign) {
+ // Since the signs differ the samples currently
+ // in the buffer is useless as they represent a
+ // jump in a different direction.
+ _jumpCount = 0;
+ }
+ if (abs(_jumpCount) < kMaxDriftJumpCount) {
+ // Update the buffer used for the short time
+ // statistics.
+ // The sign of the diff is used for updating the counter since
+ // we want to use the same buffer for keeping track of when
+ // the RTT jumps down and up.
+ _jumpBuf[abs(_jumpCount)] = rttMs;
+ _jumpCount += diffSign;
+ }
+ if (abs(_jumpCount) >= _detectThreshold) {
+ // Detected an RTT jump
+ ShortRttFilter(_jumpBuf, abs(_jumpCount));
+ _filtFactCount = _detectThreshold + 1;
+ _jumpCount = 0;
+ } else {
+ return false;
+ }
+ } else {
+ _jumpCount = 0;
+ }
+ return true;
+}
+
+bool VCMRttFilter::DriftDetection(int64_t rttMs) {
+ if (_maxRtt - _avgRtt > _driftStdDevs * sqrt(_varRtt)) {
+ if (_driftCount < kMaxDriftJumpCount) {
+ // Update the buffer used for the short time
+ // statistics.
+ _driftBuf[_driftCount] = rttMs;
+ _driftCount++;
+ }
+ if (_driftCount >= _detectThreshold) {
+ // Detected an RTT drift
+ ShortRttFilter(_driftBuf, _driftCount);
+ _filtFactCount = _detectThreshold + 1;
+ _driftCount = 0;
+ }
+ } else {
+ _driftCount = 0;
+ }
+ return true;
+}
+
+void VCMRttFilter::ShortRttFilter(int64_t* buf, uint32_t length) {
+ if (length == 0) {
+ return;
+ }
+ _maxRtt = 0;
+ _avgRtt = 0;
+ for (uint32_t i = 0; i < length; i++) {
+ if (buf[i] > _maxRtt) {
+ _maxRtt = buf[i];
+ }
+ _avgRtt += buf[i];
+ }
+ _avgRtt = _avgRtt / static_cast<double>(length);
+}
+
+int64_t VCMRttFilter::RttMs() const {
+ return static_cast<int64_t>(_maxRtt + 0.5);
+}
+} // namespace webrtc
diff --git a/webrtc/modules/video_coding/rtt_filter.h b/webrtc/modules/video_coding/rtt_filter.h
new file mode 100644
index 0000000000..f5de532cfc
--- /dev/null
+++ b/webrtc/modules/video_coding/rtt_filter.h
@@ -0,0 +1,66 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef WEBRTC_MODULES_VIDEO_CODING_RTT_FILTER_H_
+#define WEBRTC_MODULES_VIDEO_CODING_RTT_FILTER_H_
+
+#include "webrtc/typedefs.h"
+
+namespace webrtc {
+
+class VCMRttFilter {
+ public:
+ VCMRttFilter();
+
+ VCMRttFilter& operator=(const VCMRttFilter& rhs);
+
+ // Resets the filter.
+ void Reset();
+ // Updates the filter with a new sample.
+ void Update(int64_t rttMs);
+ // A getter function for the current RTT level in ms.
+ int64_t RttMs() const;
+
+ private:
+ // The size of the drift and jump memory buffers
+ // and thus also the detection threshold for these
+ // detectors in number of samples.
+ enum { kMaxDriftJumpCount = 5 };
+ // Detects RTT jumps by comparing the difference between
+ // samples and average to the standard deviation.
+ // Returns true if the long time statistics should be updated
+ // and false otherwise
+ bool JumpDetection(int64_t rttMs);
+ // Detects RTT drifts by comparing the difference between
+ // max and average to the standard deviation.
+ // Returns true if the long time statistics should be updated
+ // and false otherwise
+ bool DriftDetection(int64_t rttMs);
+ // Computes the short time average and maximum of the vector buf.
+ void ShortRttFilter(int64_t* buf, uint32_t length);
+
+ bool _gotNonZeroUpdate;
+ double _avgRtt;
+ double _varRtt;
+ int64_t _maxRtt;
+ uint32_t _filtFactCount;
+ const uint32_t _filtFactMax;
+ const double _jumpStdDevs;
+ const double _driftStdDevs;
+ int32_t _jumpCount;
+ int32_t _driftCount;
+ const int32_t _detectThreshold;
+ int64_t _jumpBuf[kMaxDriftJumpCount];
+ int64_t _driftBuf[kMaxDriftJumpCount];
+};
+
+} // namespace webrtc
+
+#endif // WEBRTC_MODULES_VIDEO_CODING_RTT_FILTER_H_
diff --git a/webrtc/modules/video_coding/main/source/session_info.cc b/webrtc/modules/video_coding/session_info.cc
index 9a1bc54e52..8701098639 100644
--- a/webrtc/modules/video_coding/main/source/session_info.cc
+++ b/webrtc/modules/video_coding/session_info.cc
@@ -8,10 +8,10 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#include "webrtc/modules/video_coding/main/source/session_info.h"
+#include "webrtc/modules/video_coding/session_info.h"
-#include "webrtc/modules/video_coding/main/source/packet.h"
-#include "webrtc/system_wrappers/include/logging.h"
+#include "webrtc/base/logging.h"
+#include "webrtc/modules/video_coding/packet.h"
namespace webrtc {
@@ -32,8 +32,7 @@ VCMSessionInfo::VCMSessionInfo()
empty_seq_num_low_(-1),
empty_seq_num_high_(-1),
first_packet_seq_num_(-1),
- last_packet_seq_num_(-1) {
-}
+ last_packet_seq_num_(-1) {}
void VCMSessionInfo::UpdateDataPointers(const uint8_t* old_base_ptr,
const uint8_t* new_base_ptr) {
@@ -88,8 +87,8 @@ bool VCMSessionInfo::LayerSync() const {
if (packets_.front().codecSpecificHeader.codec == kRtpVideoVp8) {
return packets_.front().codecSpecificHeader.codecHeader.VP8.layerSync;
} else if (packets_.front().codecSpecificHeader.codec == kRtpVideoVp9) {
- return
- packets_.front().codecSpecificHeader.codecHeader.VP9.temporal_up_switch;
+ return packets_.front()
+ .codecSpecificHeader.codecHeader.VP9.temporal_up_switch;
} else {
return false;
}
@@ -126,7 +125,7 @@ void VCMSessionInfo::SetGofInfo(const GofInfoVP9& gof_info, size_t idx) {
gof_info.temporal_up_switch[idx];
packets_.front().codecSpecificHeader.codecHeader.VP9.num_ref_pics =
gof_info.num_ref_pics[idx];
- for (size_t i = 0; i < gof_info.num_ref_pics[idx]; ++i) {
+ for (uint8_t i = 0; i < gof_info.num_ref_pics[idx]; ++i) {
packets_.front().codecSpecificHeader.codecHeader.VP9.pid_diff[i] =
gof_info.pid_diff[idx][i];
}
@@ -193,9 +192,7 @@ size_t VCMSessionInfo::InsertBuffer(uint8_t* frame_buffer,
while (nalu_ptr < packet_buffer + packet.sizeBytes) {
size_t length = BufferToUWord16(nalu_ptr);
nalu_ptr += kLengthFieldLength;
- frame_buffer_ptr += Insert(nalu_ptr,
- length,
- packet.insertStartCode,
+ frame_buffer_ptr += Insert(nalu_ptr, length, packet.insertStartCode,
const_cast<uint8_t*>(frame_buffer_ptr));
nalu_ptr += length;
}
@@ -203,14 +200,12 @@ size_t VCMSessionInfo::InsertBuffer(uint8_t* frame_buffer,
return packet.sizeBytes;
}
ShiftSubsequentPackets(
- packet_it,
- packet.sizeBytes +
- (packet.insertStartCode ? kH264StartCodeLengthBytes : 0));
-
- packet.sizeBytes = Insert(packet_buffer,
- packet.sizeBytes,
- packet.insertStartCode,
- const_cast<uint8_t*>(packet.dataPtr));
+ packet_it, packet.sizeBytes +
+ (packet.insertStartCode ? kH264StartCodeLengthBytes : 0));
+
+ packet.sizeBytes =
+ Insert(packet_buffer, packet.sizeBytes, packet.insertStartCode,
+ const_cast<uint8_t*>(packet.dataPtr));
return packet.sizeBytes;
}
@@ -223,8 +218,7 @@ size_t VCMSessionInfo::Insert(const uint8_t* buffer,
memcpy(frame_buffer, startCode, kH264StartCodeLengthBytes);
}
memcpy(frame_buffer + (insert_start_code ? kH264StartCodeLengthBytes : 0),
- buffer,
- length);
+ buffer, length);
length += (insert_start_code ? kH264StartCodeLengthBytes : 0);
return length;
@@ -276,13 +270,12 @@ void VCMSessionInfo::UpdateDecodableSession(const FrameData& frame_data) {
// thresholds.
const float kLowPacketPercentageThreshold = 0.2f;
const float kHighPacketPercentageThreshold = 0.8f;
- if (frame_data.rtt_ms < kRttThreshold
- || frame_type_ == kVideoFrameKey
- || !HaveFirstPacket()
- || (NumPackets() <= kHighPacketPercentageThreshold
- * frame_data.rolling_average_packets_per_frame
- && NumPackets() > kLowPacketPercentageThreshold
- * frame_data.rolling_average_packets_per_frame))
+ if (frame_data.rtt_ms < kRttThreshold || frame_type_ == kVideoFrameKey ||
+ !HaveFirstPacket() ||
+ (NumPackets() <= kHighPacketPercentageThreshold *
+ frame_data.rolling_average_packets_per_frame &&
+ NumPackets() > kLowPacketPercentageThreshold *
+ frame_data.rolling_average_packets_per_frame))
return;
decodable_ = true;
@@ -308,7 +301,7 @@ VCMSessionInfo::PacketIterator VCMSessionInfo::FindNaluEnd(
// Find the end of the NAL unit.
for (; packet_it != packets_.end(); ++packet_it) {
if (((*packet_it).completeNALU == kNaluComplete &&
- (*packet_it).sizeBytes > 0) ||
+ (*packet_it).sizeBytes > 0) ||
// Found next NALU.
(*packet_it).completeNALU == kNaluStart)
return --packet_it;
@@ -348,7 +341,7 @@ size_t VCMSessionInfo::BuildVP8FragmentationHeader(
memset(fragmentation->fragmentationLength, 0,
kMaxVP8Partitions * sizeof(size_t));
if (packets_.empty())
- return new_length;
+ return new_length;
PacketIterator it = FindNextPartitionBeginning(packets_.begin());
while (it != packets_.end()) {
const int partition_id =
@@ -371,7 +364,7 @@ size_t VCMSessionInfo::BuildVP8FragmentationHeader(
// Set all empty fragments to start where the previous fragment ends,
// and have zero length.
if (fragmentation->fragmentationLength[0] == 0)
- fragmentation->fragmentationOffset[0] = 0;
+ fragmentation->fragmentationOffset[0] = 0;
for (int i = 1; i < fragmentation->fragmentationVectorSize; ++i) {
if (fragmentation->fragmentationLength[i] == 0)
fragmentation->fragmentationOffset[i] =
@@ -379,7 +372,7 @@ size_t VCMSessionInfo::BuildVP8FragmentationHeader(
fragmentation->fragmentationLength[i - 1];
assert(i == 0 ||
fragmentation->fragmentationOffset[i] >=
- fragmentation->fragmentationOffset[i - 1]);
+ fragmentation->fragmentationOffset[i - 1]);
}
assert(new_length <= frame_buffer_length);
return new_length;
@@ -424,8 +417,8 @@ bool VCMSessionInfo::InSequence(const PacketIterator& packet_it,
// If the two iterators are pointing to the same packet they are considered
// to be in sequence.
return (packet_it == prev_packet_it ||
- (static_cast<uint16_t>((*prev_packet_it).seqNum + 1) ==
- (*packet_it).seqNum));
+ (static_cast<uint16_t>((*prev_packet_it).seqNum + 1) ==
+ (*packet_it).seqNum));
}
size_t VCMSessionInfo::MakeDecodable() {
@@ -435,8 +428,7 @@ size_t VCMSessionInfo::MakeDecodable() {
}
PacketIterator it = packets_.begin();
// Make sure we remove the first NAL unit if it's not decodable.
- if ((*it).completeNALU == kNaluIncomplete ||
- (*it).completeNALU == kNaluEnd) {
+ if ((*it).completeNALU == kNaluIncomplete || (*it).completeNALU == kNaluEnd) {
PacketIterator nalu_end = FindNaluEnd(it);
return_length += DeletePacketData(it, nalu_end);
it = nalu_end;
@@ -445,7 +437,7 @@ size_t VCMSessionInfo::MakeDecodable() {
// Take care of the rest of the NAL units.
for (; it != packets_.end(); ++it) {
bool start_of_nalu = ((*it).completeNALU == kNaluStart ||
- (*it).completeNALU == kNaluComplete);
+ (*it).completeNALU == kNaluComplete);
if (!start_of_nalu && !InSequence(it, prev_it)) {
// Found a sequence number gap due to packet loss.
PacketIterator nalu_end = FindNaluEnd(it);
@@ -463,18 +455,15 @@ void VCMSessionInfo::SetNotDecodableIfIncomplete() {
decodable_ = false;
}
-bool
-VCMSessionInfo::HaveFirstPacket() const {
+bool VCMSessionInfo::HaveFirstPacket() const {
return !packets_.empty() && (first_packet_seq_num_ != -1);
}
-bool
-VCMSessionInfo::HaveLastPacket() const {
+bool VCMSessionInfo::HaveLastPacket() const {
return !packets_.empty() && (last_packet_seq_num_ != -1);
}
-bool
-VCMSessionInfo::session_nack() const {
+bool VCMSessionInfo::session_nack() const {
return session_nack_;
}
@@ -502,8 +491,8 @@ int VCMSessionInfo::InsertPacket(const VCMPacket& packet,
break;
// Check for duplicate packets.
- if (rit != packets_.rend() &&
- (*rit).seqNum == packet.seqNum && (*rit).sizeBytes > 0)
+ if (rit != packets_.rend() && (*rit).seqNum == packet.seqNum &&
+ (*rit).sizeBytes > 0)
return -2;
if (packet.codec == kVideoCodecH264) {
@@ -572,8 +561,8 @@ void VCMSessionInfo::InformOfEmptyPacket(uint16_t seq_num) {
empty_seq_num_high_ = seq_num;
else
empty_seq_num_high_ = LatestSequenceNumber(seq_num, empty_seq_num_high_);
- if (empty_seq_num_low_ == -1 || IsNewerSequenceNumber(empty_seq_num_low_,
- seq_num))
+ if (empty_seq_num_low_ == -1 ||
+ IsNewerSequenceNumber(empty_seq_num_low_, seq_num))
empty_seq_num_low_ = seq_num;
}
diff --git a/webrtc/modules/video_coding/main/source/session_info.h b/webrtc/modules/video_coding/session_info.h
index 88071e19d5..e9ff25166d 100644
--- a/webrtc/modules/video_coding/main/source/session_info.h
+++ b/webrtc/modules/video_coding/session_info.h
@@ -8,14 +8,14 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#ifndef WEBRTC_MODULES_VIDEO_CODING_MAIN_SOURCE_SESSION_INFO_H_
-#define WEBRTC_MODULES_VIDEO_CODING_MAIN_SOURCE_SESSION_INFO_H_
+#ifndef WEBRTC_MODULES_VIDEO_CODING_SESSION_INFO_H_
+#define WEBRTC_MODULES_VIDEO_CODING_SESSION_INFO_H_
#include <list>
-#include "webrtc/modules/interface/module_common_types.h"
-#include "webrtc/modules/video_coding/main/interface/video_coding.h"
-#include "webrtc/modules/video_coding/main/source/packet.h"
+#include "webrtc/modules/include/module_common_types.h"
+#include "webrtc/modules/video_coding/include/video_coding.h"
+#include "webrtc/modules/video_coding/packet.h"
#include "webrtc/typedefs.h"
namespace webrtc {
@@ -116,8 +116,7 @@ class VCMSessionInfo {
PacketIterator FindPartitionEnd(PacketIterator it) const;
static bool InSequence(const PacketIterator& it,
const PacketIterator& prev_it);
- size_t InsertBuffer(uint8_t* frame_buffer,
- PacketIterator packetIterator);
+ size_t InsertBuffer(uint8_t* frame_buffer, PacketIterator packetIterator);
size_t Insert(const uint8_t* buffer,
size_t length,
bool insert_start_code,
@@ -126,8 +125,7 @@ class VCMSessionInfo {
PacketIterator FindNaluEnd(PacketIterator packet_iter) const;
// Deletes the data of all packets between |start| and |end|, inclusively.
// Note that this function doesn't delete the actual packets.
- size_t DeletePacketData(PacketIterator start,
- PacketIterator end);
+ size_t DeletePacketData(PacketIterator start, PacketIterator end);
void UpdateCompleteSession();
// When enabled, determine if session is decodable, i.e. incomplete but
@@ -169,4 +167,4 @@ class VCMSessionInfo {
} // namespace webrtc
-#endif // WEBRTC_MODULES_VIDEO_CODING_MAIN_SOURCE_SESSION_INFO_H_
+#endif // WEBRTC_MODULES_VIDEO_CODING_SESSION_INFO_H_
diff --git a/webrtc/modules/video_coding/main/source/session_info_unittest.cc b/webrtc/modules/video_coding/session_info_unittest.cc
index 58c352d3fc..4019d63a5f 100644
--- a/webrtc/modules/video_coding/main/source/session_info_unittest.cc
+++ b/webrtc/modules/video_coding/session_info_unittest.cc
@@ -11,9 +11,9 @@
#include <string.h>
#include "testing/gtest/include/gtest/gtest.h"
-#include "webrtc/modules/interface/module_common_types.h"
-#include "webrtc/modules/video_coding/main/source/packet.h"
-#include "webrtc/modules/video_coding/main/source/session_info.h"
+#include "webrtc/modules/include/module_common_types.h"
+#include "webrtc/modules/video_coding/packet.h"
+#include "webrtc/modules/video_coding/session_info.h"
namespace webrtc {
@@ -81,7 +81,7 @@ class TestVP8Partitions : public TestSessionInfo {
fragmentation_.fragmentationLength[partition_id]);
for (int i = 0; i < packets_expected; ++i) {
size_t packet_index = fragmentation_.fragmentationOffset[partition_id] +
- i * packet_buffer_size();
+ i * packet_buffer_size();
if (packet_index + packet_buffer_size() > frame_buffer_size())
return false;
VerifyPacket(frame_buffer_ + packet_index, start_value + i);
@@ -122,8 +122,7 @@ class TestNackList : public TestSessionInfo {
memset(seq_num_list_, 0, sizeof(seq_num_list_));
}
- void BuildSeqNumList(uint16_t low,
- uint16_t high) {
+ void BuildSeqNumList(uint16_t low, uint16_t high) {
size_t i = 0;
while (low != high + 1) {
EXPECT_LT(i, kMaxSeqNumListLength);
@@ -173,14 +172,11 @@ TEST_F(TestSessionInfo, TestSimpleAPIs) {
// To make things more difficult we will make sure to have a wrap here.
packet_.isFirstPacket = false;
packet_.markerBit = true;
- packet_.seqNum = 2;
+ packet_.seqNum = 2;
packet_.sizeBytes = 0;
packet_.frameType = kEmptyFrame;
- EXPECT_EQ(0,
- session_.InsertPacket(packet_,
- frame_buffer_,
- kNoErrors,
- frame_data));
+ EXPECT_EQ(
+ 0, session_.InsertPacket(packet_, frame_buffer_, kNoErrors, frame_data));
EXPECT_EQ(packet_.seqNum, session_.HighSequenceNumber());
}
@@ -198,9 +194,8 @@ TEST_F(TestSessionInfo, NormalOperation) {
packet_.seqNum += 1;
FillPacket(i);
ASSERT_EQ(packet_buffer_size(),
- static_cast<size_t>(session_.InsertPacket(packet_, frame_buffer_,
- kNoErrors,
- frame_data)));
+ static_cast<size_t>(session_.InsertPacket(
+ packet_, frame_buffer_, kNoErrors, frame_data)));
}
packet_.seqNum += 1;
@@ -223,9 +218,8 @@ TEST_F(TestSessionInfo, ErrorsEqualDecodableState) {
packet_.markerBit = false;
FillPacket(3);
EXPECT_EQ(packet_buffer_size(),
- static_cast<size_t>(session_.InsertPacket(packet_, frame_buffer_,
- kWithErrors,
- frame_data)));
+ static_cast<size_t>(session_.InsertPacket(
+ packet_, frame_buffer_, kWithErrors, frame_data)));
EXPECT_TRUE(session_.decodable());
}
@@ -237,18 +231,16 @@ TEST_F(TestSessionInfo, SelectiveDecodableState) {
frame_data.rolling_average_packets_per_frame = 11;
frame_data.rtt_ms = 150;
EXPECT_EQ(packet_buffer_size(),
- static_cast<size_t>(session_.InsertPacket(packet_, frame_buffer_,
- kSelectiveErrors,
- frame_data)));
+ static_cast<size_t>(session_.InsertPacket(
+ packet_, frame_buffer_, kSelectiveErrors, frame_data)));
EXPECT_FALSE(session_.decodable());
packet_.seqNum -= 1;
FillPacket(0);
packet_.isFirstPacket = true;
EXPECT_EQ(packet_buffer_size(),
- static_cast<size_t>(session_.InsertPacket(packet_, frame_buffer_,
- kSelectiveErrors,
- frame_data)));
+ static_cast<size_t>(session_.InsertPacket(
+ packet_, frame_buffer_, kSelectiveErrors, frame_data)));
EXPECT_TRUE(session_.decodable());
packet_.isFirstPacket = false;
@@ -256,19 +248,17 @@ TEST_F(TestSessionInfo, SelectiveDecodableState) {
for (int i = 2; i < 8; ++i) {
packet_.seqNum += 1;
FillPacket(i);
- EXPECT_EQ(packet_buffer_size(),
- static_cast<size_t>(session_.InsertPacket(packet_, frame_buffer_,
- kSelectiveErrors,
- frame_data)));
+ EXPECT_EQ(packet_buffer_size(),
+ static_cast<size_t>(session_.InsertPacket(
+ packet_, frame_buffer_, kSelectiveErrors, frame_data)));
EXPECT_TRUE(session_.decodable());
}
packet_.seqNum += 1;
FillPacket(8);
EXPECT_EQ(packet_buffer_size(),
- static_cast<size_t>(session_.InsertPacket(packet_, frame_buffer_,
- kSelectiveErrors,
- frame_data)));
+ static_cast<size_t>(session_.InsertPacket(
+ packet_, frame_buffer_, kSelectiveErrors, frame_data)));
EXPECT_TRUE(session_.decodable());
}
@@ -285,18 +275,14 @@ TEST_F(TestSessionInfo, OutOfBoundsPackets1PacketFrame) {
packet_.isFirstPacket = true;
packet_.markerBit = true;
FillPacket(1);
- EXPECT_EQ(-3, session_.InsertPacket(packet_,
- frame_buffer_,
- kNoErrors,
- frame_data));
+ EXPECT_EQ(
+ -3, session_.InsertPacket(packet_, frame_buffer_, kNoErrors, frame_data));
packet_.seqNum = 0x0000;
packet_.isFirstPacket = false;
packet_.markerBit = false;
FillPacket(1);
- EXPECT_EQ(-3, session_.InsertPacket(packet_,
- frame_buffer_,
- kNoErrors,
- frame_data));
+ EXPECT_EQ(
+ -3, session_.InsertPacket(packet_, frame_buffer_, kNoErrors, frame_data));
}
TEST_F(TestSessionInfo, SetMarkerBitOnce) {
@@ -311,10 +297,8 @@ TEST_F(TestSessionInfo, SetMarkerBitOnce) {
packet_.isFirstPacket = true;
packet_.markerBit = true;
FillPacket(1);
- EXPECT_EQ(-3, session_.InsertPacket(packet_,
- frame_buffer_,
- kNoErrors,
- frame_data));
+ EXPECT_EQ(
+ -3, session_.InsertPacket(packet_, frame_buffer_, kNoErrors, frame_data));
}
TEST_F(TestSessionInfo, OutOfBoundsPacketsBase) {
@@ -331,10 +315,8 @@ TEST_F(TestSessionInfo, OutOfBoundsPacketsBase) {
packet_.isFirstPacket = true;
packet_.markerBit = true;
FillPacket(1);
- EXPECT_EQ(-3, session_.InsertPacket(packet_,
- frame_buffer_,
- kNoErrors,
- frame_data));
+ EXPECT_EQ(
+ -3, session_.InsertPacket(packet_, frame_buffer_, kNoErrors, frame_data));
packet_.seqNum = 0x0006;
packet_.isFirstPacket = true;
packet_.markerBit = true;
@@ -346,10 +328,8 @@ TEST_F(TestSessionInfo, OutOfBoundsPacketsBase) {
packet_.isFirstPacket = false;
packet_.markerBit = true;
FillPacket(1);
- EXPECT_EQ(-3, session_.InsertPacket(packet_,
- frame_buffer_,
- kNoErrors,
- frame_data));
+ EXPECT_EQ(
+ -3, session_.InsertPacket(packet_, frame_buffer_, kNoErrors, frame_data));
}
TEST_F(TestSessionInfo, OutOfBoundsPacketsWrap) {
@@ -379,20 +359,14 @@ TEST_F(TestSessionInfo, OutOfBoundsPacketsWrap) {
packet_.isFirstPacket = false;
packet_.markerBit = false;
FillPacket(1);
- EXPECT_EQ(-3,
- session_.InsertPacket(packet_,
- frame_buffer_,
- kNoErrors,
- frame_data));
+ EXPECT_EQ(
+ -3, session_.InsertPacket(packet_, frame_buffer_, kNoErrors, frame_data));
packet_.seqNum = 0x0006;
packet_.isFirstPacket = false;
packet_.markerBit = false;
FillPacket(1);
- EXPECT_EQ(-3,
- session_.InsertPacket(packet_,
- frame_buffer_,
- kNoErrors,
- frame_data));
+ EXPECT_EQ(
+ -3, session_.InsertPacket(packet_, frame_buffer_, kNoErrors, frame_data));
}
TEST_F(TestSessionInfo, OutOfBoundsOutOfOrder) {
@@ -417,10 +391,8 @@ TEST_F(TestSessionInfo, OutOfBoundsOutOfOrder) {
packet_.isFirstPacket = false;
packet_.markerBit = false;
FillPacket(1);
- EXPECT_EQ(-3, session_.InsertPacket(packet_,
- frame_buffer_,
- kNoErrors,
- frame_data));
+ EXPECT_EQ(
+ -3, session_.InsertPacket(packet_, frame_buffer_, kNoErrors, frame_data));
packet_.seqNum = 0x0010;
packet_.isFirstPacket = false;
packet_.markerBit = false;
@@ -440,10 +412,8 @@ TEST_F(TestSessionInfo, OutOfBoundsOutOfOrder) {
packet_.isFirstPacket = false;
packet_.markerBit = false;
FillPacket(1);
- EXPECT_EQ(-3, session_.InsertPacket(packet_,
- frame_buffer_,
- kNoErrors,
- frame_data));
+ EXPECT_EQ(
+ -3, session_.InsertPacket(packet_, frame_buffer_, kNoErrors, frame_data));
}
TEST_F(TestVP8Partitions, TwoPartitionsOneLoss) {
@@ -455,8 +425,8 @@ TEST_F(TestVP8Partitions, TwoPartitionsOneLoss) {
packet_header_.header.markerBit = false;
packet_header_.header.sequenceNumber = 0;
FillPacket(0);
- VCMPacket* packet = new VCMPacket(packet_buffer_, packet_buffer_size(),
- packet_header_);
+ VCMPacket* packet =
+ new VCMPacket(packet_buffer_, packet_buffer_size(), packet_header_);
EXPECT_EQ(packet_buffer_size(),
static_cast<size_t>(session_.InsertPacket(*packet, frame_buffer_,
kNoErrors, frame_data)));
@@ -505,8 +475,8 @@ TEST_F(TestVP8Partitions, TwoPartitionsOneLoss2) {
packet_header_.header.markerBit = false;
packet_header_.header.sequenceNumber = 1;
FillPacket(1);
- VCMPacket* packet = new VCMPacket(packet_buffer_, packet_buffer_size(),
- packet_header_);
+ VCMPacket* packet =
+ new VCMPacket(packet_buffer_, packet_buffer_size(), packet_header_);
EXPECT_EQ(packet_buffer_size(),
static_cast<size_t>(session_.InsertPacket(*packet, frame_buffer_,
kNoErrors, frame_data)));
@@ -567,8 +537,8 @@ TEST_F(TestVP8Partitions, TwoPartitionsNoLossWrap) {
packet_header_.header.markerBit = false;
packet_header_.header.sequenceNumber = 0xfffd;
FillPacket(0);
- VCMPacket* packet = new VCMPacket(packet_buffer_, packet_buffer_size(),
- packet_header_);
+ VCMPacket* packet =
+ new VCMPacket(packet_buffer_, packet_buffer_size(), packet_header_);
EXPECT_EQ(packet_buffer_size(),
static_cast<size_t>(session_.InsertPacket(*packet, frame_buffer_,
kNoErrors, frame_data)));
@@ -629,8 +599,8 @@ TEST_F(TestVP8Partitions, TwoPartitionsLossWrap) {
packet_header_.header.markerBit = false;
packet_header_.header.sequenceNumber = 0xfffd;
FillPacket(0);
- VCMPacket* packet = new VCMPacket(packet_buffer_, packet_buffer_size(),
- packet_header_);
+ VCMPacket* packet =
+ new VCMPacket(packet_buffer_, packet_buffer_size(), packet_header_);
EXPECT_EQ(packet_buffer_size(),
static_cast<size_t>(session_.InsertPacket(*packet, frame_buffer_,
kNoErrors, frame_data)));
@@ -682,7 +652,6 @@ TEST_F(TestVP8Partitions, TwoPartitionsLossWrap) {
EXPECT_TRUE(VerifyPartition(1, 1, 2));
}
-
TEST_F(TestVP8Partitions, ThreePartitionsOneMissing) {
// Partition 1 |Partition 2 | Partition 3
// [ 1 ] [ 2 ] | | [ 5 ] | [ 6 ]
@@ -692,8 +661,8 @@ TEST_F(TestVP8Partitions, ThreePartitionsOneMissing) {
packet_header_.header.markerBit = false;
packet_header_.header.sequenceNumber = 1;
FillPacket(1);
- VCMPacket* packet = new VCMPacket(packet_buffer_, packet_buffer_size(),
- packet_header_);
+ VCMPacket* packet =
+ new VCMPacket(packet_buffer_, packet_buffer_size(), packet_header_);
EXPECT_EQ(packet_buffer_size(),
static_cast<size_t>(session_.InsertPacket(*packet, frame_buffer_,
kNoErrors, frame_data)));
@@ -754,8 +723,8 @@ TEST_F(TestVP8Partitions, ThreePartitionsLossInSecond) {
packet_header_.header.markerBit = false;
packet_header_.header.sequenceNumber = 1;
FillPacket(1);
- VCMPacket* packet = new VCMPacket(packet_buffer_, packet_buffer_size(),
- packet_header_);
+ VCMPacket* packet =
+ new VCMPacket(packet_buffer_, packet_buffer_size(), packet_header_);
EXPECT_EQ(packet_buffer_size(),
static_cast<size_t>(session_.InsertPacket(*packet, frame_buffer_,
kNoErrors, frame_data)));
@@ -767,8 +736,7 @@ TEST_F(TestVP8Partitions, ThreePartitionsLossInSecond) {
packet_header_.header.markerBit = false;
packet_header_.header.sequenceNumber += 1;
FillPacket(2);
- packet = new VCMPacket(packet_buffer_, packet_buffer_size(),
- packet_header_);
+ packet = new VCMPacket(packet_buffer_, packet_buffer_size(), packet_header_);
EXPECT_EQ(packet_buffer_size(),
static_cast<size_t>(session_.InsertPacket(*packet, frame_buffer_,
kNoErrors, frame_data)));
@@ -841,8 +809,8 @@ TEST_F(TestVP8Partitions, AggregationOverTwoPackets) {
packet_header_.header.markerBit = false;
packet_header_.header.sequenceNumber = 0;
FillPacket(0);
- VCMPacket* packet = new VCMPacket(packet_buffer_, packet_buffer_size(),
- packet_header_);
+ VCMPacket* packet =
+ new VCMPacket(packet_buffer_, packet_buffer_size(), packet_header_);
EXPECT_EQ(packet_buffer_size(),
static_cast<size_t>(session_.InsertPacket(*packet, frame_buffer_,
kNoErrors, frame_data)));
@@ -892,10 +860,8 @@ TEST_F(TestNalUnits, OnlyReceivedEmptyPacket) {
packet_.sizeBytes = 0;
packet_.seqNum = 0;
packet_.markerBit = false;
- EXPECT_EQ(0, session_.InsertPacket(packet_,
- frame_buffer_,
- kNoErrors,
- frame_data));
+ EXPECT_EQ(
+ 0, session_.InsertPacket(packet_, frame_buffer_, kNoErrors, frame_data));
EXPECT_EQ(0U, session_.MakeDecodable());
EXPECT_EQ(0U, session_.SessionLength());
diff --git a/webrtc/modules/video_coding/main/test/plotJitterEstimate.m b/webrtc/modules/video_coding/test/plotJitterEstimate.m
index d6185f55da..d6185f55da 100644
--- a/webrtc/modules/video_coding/main/test/plotJitterEstimate.m
+++ b/webrtc/modules/video_coding/test/plotJitterEstimate.m
diff --git a/webrtc/modules/video_coding/main/test/plotReceiveTrace.m b/webrtc/modules/video_coding/test/plotReceiveTrace.m
index 4d262aa165..4d262aa165 100644
--- a/webrtc/modules/video_coding/main/test/plotReceiveTrace.m
+++ b/webrtc/modules/video_coding/test/plotReceiveTrace.m
diff --git a/webrtc/modules/video_coding/main/test/plotTimingTest.m b/webrtc/modules/video_coding/test/plotTimingTest.m
index 52a6f303cd..52a6f303cd 100644
--- a/webrtc/modules/video_coding/main/test/plotTimingTest.m
+++ b/webrtc/modules/video_coding/test/plotTimingTest.m
diff --git a/webrtc/modules/video_coding/main/test/receiver_tests.h b/webrtc/modules/video_coding/test/receiver_tests.h
index 6d7b7beeb5..d6bac07392 100644
--- a/webrtc/modules/video_coding/main/test/receiver_tests.h
+++ b/webrtc/modules/video_coding/test/receiver_tests.h
@@ -11,20 +11,20 @@
#ifndef WEBRTC_MODULES_VIDEO_CODING_TEST_RECEIVER_TESTS_H_
#define WEBRTC_MODULES_VIDEO_CODING_TEST_RECEIVER_TESTS_H_
-#include "webrtc/common_types.h"
-#include "webrtc/modules/interface/module_common_types.h"
-#include "webrtc/modules/rtp_rtcp/interface/rtp_rtcp.h"
-#include "webrtc/modules/video_coding/main/interface/video_coding.h"
-#include "webrtc/modules/video_coding/main/test/test_util.h"
-#include "webrtc/modules/video_coding/main/test/video_source.h"
-#include "webrtc/typedefs.h"
-
#include <stdio.h>
#include <string>
+#include "webrtc/common_types.h"
+#include "webrtc/modules/include/module_common_types.h"
+#include "webrtc/modules/rtp_rtcp/include/rtp_rtcp.h"
+#include "webrtc/modules/video_coding/include/video_coding.h"
+#include "webrtc/modules/video_coding/test/test_util.h"
+#include "webrtc/modules/video_coding/test/video_source.h"
+#include "webrtc/typedefs.h"
+
class RtpDataCallback : public webrtc::NullRtpData {
public:
- RtpDataCallback(webrtc::VideoCodingModule* vcm) : vcm_(vcm) {}
+ explicit RtpDataCallback(webrtc::VideoCodingModule* vcm) : vcm_(vcm) {}
virtual ~RtpDataCallback() {}
int32_t OnReceivedPayloadData(
@@ -40,4 +40,4 @@ class RtpDataCallback : public webrtc::NullRtpData {
int RtpPlay(const CmdArgs& args);
-#endif // WEBRTC_MODULES_VIDEO_CODING_TEST_RECEIVER_TESTS_H_
+#endif // WEBRTC_MODULES_VIDEO_CODING_TEST_RECEIVER_TESTS_H_
diff --git a/webrtc/modules/video_coding/main/test/release_test.h b/webrtc/modules/video_coding/test/release_test.h
index e90dcaef01..ab9b2159d9 100644
--- a/webrtc/modules/video_coding/main/test/release_test.h
+++ b/webrtc/modules/video_coding/test/release_test.h
@@ -8,10 +8,10 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#ifndef RELEASE_TEST_H
-#define RELEASE_TEST_H
+#ifndef WEBRTC_MODULES_VIDEO_CODING_TEST_RELEASE_TEST_H_
+#define WEBRTC_MODULES_VIDEO_CODING_TEST_RELEASE_TEST_H_
int ReleaseTest();
int ReleaseTestPart2();
-#endif
+#endif // WEBRTC_MODULES_VIDEO_CODING_TEST_RELEASE_TEST_H_
diff --git a/webrtc/modules/video_coding/main/test/rtp_player.cc b/webrtc/modules/video_coding/test/rtp_player.cc
index 6717cf227d..9b6490618c 100644
--- a/webrtc/modules/video_coding/main/test/rtp_player.cc
+++ b/webrtc/modules/video_coding/test/rtp_player.cc
@@ -8,27 +8,27 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#include "webrtc/modules/video_coding/main/test/rtp_player.h"
+#include "webrtc/modules/video_coding/test/rtp_player.h"
#include <stdio.h>
#include <map>
#include "webrtc/base/scoped_ptr.h"
-#include "webrtc/modules/rtp_rtcp/interface/rtp_header_parser.h"
-#include "webrtc/modules/rtp_rtcp/interface/rtp_payload_registry.h"
-#include "webrtc/modules/rtp_rtcp/interface/rtp_receiver.h"
-#include "webrtc/modules/rtp_rtcp/interface/rtp_rtcp.h"
-#include "webrtc/modules/video_coding/main/source/internal_defines.h"
-#include "webrtc/modules/video_coding/main/test/test_util.h"
+#include "webrtc/modules/rtp_rtcp/include/rtp_header_parser.h"
+#include "webrtc/modules/rtp_rtcp/include/rtp_payload_registry.h"
+#include "webrtc/modules/rtp_rtcp/include/rtp_receiver.h"
+#include "webrtc/modules/rtp_rtcp/include/rtp_rtcp.h"
+#include "webrtc/modules/video_coding/internal_defines.h"
+#include "webrtc/modules/video_coding/test/test_util.h"
#include "webrtc/system_wrappers/include/clock.h"
#include "webrtc/system_wrappers/include/critical_section_wrapper.h"
#include "webrtc/test/rtp_file_reader.h"
#if 1
-# define DEBUG_LOG1(text, arg)
+#define DEBUG_LOG1(text, arg)
#else
-# define DEBUG_LOG1(text, arg) (printf(text "\n", arg))
+#define DEBUG_LOG1(text, arg) (printf(text "\n", arg))
#endif
namespace webrtc {
@@ -41,7 +41,9 @@ enum {
class RawRtpPacket {
public:
- RawRtpPacket(const uint8_t* data, size_t length, uint32_t ssrc,
+ RawRtpPacket(const uint8_t* data,
+ size_t length,
+ uint32_t ssrc,
uint16_t seq_num)
: data_(new uint8_t[length]),
length_(length),
@@ -140,7 +142,7 @@ class LostPackets {
CriticalSectionScoped cs(crit_sect_.get());
int count = 0;
for (ConstRtpPacketIterator it = packets_.begin(); it != packets_.end();
- ++it) {
+ ++it) {
if ((*it)->resend_time_ms() >= 0) {
count++;
}
@@ -164,7 +166,7 @@ class LostPackets {
printf("Packets still lost: %zd\n", packets_.size());
printf("Sequence numbers:\n");
for (ConstRtpPacketIterator it = packets_.begin(); it != packets_.end();
- ++it) {
+ ++it) {
printf("%u, ", (*it)->seq_num());
}
printf("\n");
@@ -231,17 +233,14 @@ class SsrcHandlers {
kDefaultTransmissionTimeOffsetExtensionId);
for (PayloadTypesIterator it = payload_types_.begin();
- it != payload_types_.end(); ++it) {
+ it != payload_types_.end(); ++it) {
VideoCodec codec;
memset(&codec, 0, sizeof(codec));
- strncpy(codec.plName, it->name().c_str(), sizeof(codec.plName)-1);
+ strncpy(codec.plName, it->name().c_str(), sizeof(codec.plName) - 1);
codec.plType = it->payload_type();
codec.codecType = it->codec_type();
- if (handler->rtp_module_->RegisterReceivePayload(codec.plName,
- codec.plType,
- 90000,
- 0,
- codec.maxBitrate) < 0) {
+ if (handler->rtp_module_->RegisterReceivePayload(
+ codec.plName, codec.plType, 90000, 0, codec.maxBitrate) < 0) {
return -1;
}
}
@@ -267,7 +266,8 @@ class SsrcHandlers {
private:
class Handler : public RtpStreamInterface {
public:
- Handler(uint32_t ssrc, const PayloadTypes& payload_types,
+ Handler(uint32_t ssrc,
+ const PayloadTypes& payload_types,
LostPackets* lost_packets)
: rtp_header_parser_(RtpHeaderParser::Create()),
rtp_payload_registry_(new RTPPayloadRegistry(
@@ -290,9 +290,7 @@ class SsrcHandlers {
}
virtual uint32_t ssrc() const { return ssrc_; }
- virtual const PayloadTypes& payload_types() const {
- return payload_types_;
- }
+ virtual const PayloadTypes& payload_types() const { return payload_types_; }
rtc::scoped_ptr<RtpHeaderParser> rtp_header_parser_;
rtc::scoped_ptr<RTPPayloadRegistry> rtp_payload_registry_;
@@ -351,8 +349,7 @@ class RtpPlayerImpl : public RtpPlayerInterface {
virtual int NextPacket(int64_t time_now) {
// Send any packets ready to be resent.
for (RawRtpPacket* packet = lost_packets_.NextPacketToResend(time_now);
- packet != NULL;
- packet = lost_packets_.NextPacketToResend(time_now)) {
+ packet != NULL; packet = lost_packets_.NextPacketToResend(time_now)) {
int ret = SendPacket(packet->data(), packet->length());
if (ret > 0) {
printf("Resend: %08x:%u\n", packet->ssrc(), packet->seq_num());
@@ -392,8 +389,7 @@ class RtpPlayerImpl : public RtpPlayerInterface {
if (!packet_source_->NextPacket(&next_packet_)) {
end_of_file_ = true;
return 0;
- }
- else if (next_packet_.length == 0) {
+ } else if (next_packet_.length == 0) {
return 0;
}
}
@@ -406,7 +402,7 @@ class RtpPlayerImpl : public RtpPlayerInterface {
virtual uint32_t TimeUntilNextPacket() const {
int64_t time_left = (next_rtp_time_ - first_packet_rtp_time_) -
- (clock_->TimeInMilliseconds() - first_packet_time_ms_);
+ (clock_->TimeInMilliseconds() - first_packet_time_ms_);
if (time_left < 0) {
return 0;
}
@@ -438,7 +434,7 @@ class RtpPlayerImpl : public RtpPlayerInterface {
if (no_loss_startup_ > 0) {
no_loss_startup_--;
- } else if ((rand() + 1.0)/(RAND_MAX + 1.0) < loss_rate_) {
+ } else if ((rand() + 1.0) / (RAND_MAX + 1.0) < loss_rate_) { // NOLINT
uint16_t seq_num = header.sequenceNumber;
lost_packets_.AddPacket(new RawRtpPacket(data, length, ssrc, seq_num));
DEBUG_LOG1("Dropped packet: %d!", header.header.sequenceNumber);
@@ -470,9 +466,12 @@ class RtpPlayerImpl : public RtpPlayerInterface {
};
RtpPlayerInterface* Create(const std::string& input_filename,
- PayloadSinkFactoryInterface* payload_sink_factory, Clock* clock,
- const PayloadTypes& payload_types, float loss_rate, int64_t rtt_ms,
- bool reordering) {
+ PayloadSinkFactoryInterface* payload_sink_factory,
+ Clock* clock,
+ const PayloadTypes& payload_types,
+ float loss_rate,
+ int64_t rtt_ms,
+ bool reordering) {
rtc::scoped_ptr<test::RtpFileReader> packet_source(
test::RtpFileReader::Create(test::RtpFileReader::kRtpDump,
input_filename));
diff --git a/webrtc/modules/video_coding/main/test/rtp_player.h b/webrtc/modules/video_coding/test/rtp_player.h
index 7459231416..e50fb9ac70 100644
--- a/webrtc/modules/video_coding/main/test/rtp_player.h
+++ b/webrtc/modules/video_coding/test/rtp_player.h
@@ -14,8 +14,8 @@
#include <string>
#include <vector>
-#include "webrtc/modules/rtp_rtcp/interface/rtp_rtcp_defines.h"
-#include "webrtc/modules/video_coding/main/interface/video_coding_defines.h"
+#include "webrtc/modules/rtp_rtcp/include/rtp_rtcp_defines.h"
+#include "webrtc/modules/video_coding/include/video_coding_defines.h"
namespace webrtc {
class Clock;
@@ -24,12 +24,12 @@ namespace rtpplayer {
class PayloadCodecTuple {
public:
- PayloadCodecTuple(uint8_t payload_type, const std::string& codec_name,
+ PayloadCodecTuple(uint8_t payload_type,
+ const std::string& codec_name,
VideoCodecType codec_type)
: name_(codec_name),
payload_type_(payload_type),
- codec_type_(codec_type) {
- }
+ codec_type_(codec_type) {}
const std::string& name() const { return name_; }
uint8_t payload_type() const { return payload_type_; }
@@ -87,11 +87,14 @@ class RtpPlayerInterface {
};
RtpPlayerInterface* Create(const std::string& inputFilename,
- PayloadSinkFactoryInterface* payloadSinkFactory, Clock* clock,
- const PayloadTypes& payload_types, float lossRate, int64_t rttMs,
- bool reordering);
+ PayloadSinkFactoryInterface* payloadSinkFactory,
+ Clock* clock,
+ const PayloadTypes& payload_types,
+ float lossRate,
+ int64_t rttMs,
+ bool reordering);
} // namespace rtpplayer
} // namespace webrtc
-#endif // WEBRTC_MODULES_VIDEO_CODING_TEST_RTP_PLAYER_H_
+#endif // WEBRTC_MODULES_VIDEO_CODING_TEST_RTP_PLAYER_H_
diff --git a/webrtc/modules/video_coding/main/source/test/stream_generator.cc b/webrtc/modules/video_coding/test/stream_generator.cc
index b365d96dc0..167d55faff 100644
--- a/webrtc/modules/video_coding/main/source/test/stream_generator.cc
+++ b/webrtc/modules/video_coding/test/stream_generator.cc
@@ -8,22 +8,21 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#include "webrtc/modules/video_coding/main/source/test/stream_generator.h"
+#include "webrtc/modules/video_coding/test/stream_generator.h"
#include <string.h>
#include <list>
#include "testing/gtest/include/gtest/gtest.h"
-#include "webrtc/modules/video_coding/main/source/packet.h"
-#include "webrtc/modules/video_coding/main/test/test_util.h"
+#include "webrtc/modules/video_coding/packet.h"
+#include "webrtc/modules/video_coding/test/test_util.h"
#include "webrtc/system_wrappers/include/clock.h"
namespace webrtc {
StreamGenerator::StreamGenerator(uint16_t start_seq_num, int64_t current_time)
- : packets_(), sequence_number_(start_seq_num), start_time_(current_time) {
-}
+ : packets_(), sequence_number_(start_seq_num), start_time_(current_time) {}
void StreamGenerator::Init(uint16_t start_seq_num, int64_t current_time) {
packets_.clear();
@@ -41,8 +40,8 @@ void StreamGenerator::GenerateFrame(FrameType type,
const int packet_size =
(kFrameSize + num_media_packets / 2) / num_media_packets;
bool marker_bit = (i == num_media_packets - 1);
- packets_.push_back(GeneratePacket(
- sequence_number_, timestamp, packet_size, (i == 0), marker_bit, type));
+ packets_.push_back(GeneratePacket(sequence_number_, timestamp, packet_size,
+ (i == 0), marker_bit, type));
++sequence_number_;
}
for (int i = 0; i < num_empty_packets; ++i) {
@@ -104,7 +103,9 @@ bool StreamGenerator::NextPacket(VCMPacket* packet) {
return true;
}
-void StreamGenerator::DropLastPacket() { packets_.pop_back(); }
+void StreamGenerator::DropLastPacket() {
+ packets_.pop_back();
+}
uint16_t StreamGenerator::NextSequenceNumber() const {
if (packets_.empty())
@@ -112,7 +113,9 @@ uint16_t StreamGenerator::NextSequenceNumber() const {
return packets_.front().seqNum;
}
-int StreamGenerator::PacketsRemaining() const { return packets_.size(); }
+int StreamGenerator::PacketsRemaining() const {
+ return packets_.size();
+}
std::list<VCMPacket>::iterator StreamGenerator::GetPacketIterator(int index) {
std::list<VCMPacket>::iterator it = packets_.begin();
diff --git a/webrtc/modules/video_coding/main/source/test/stream_generator.h b/webrtc/modules/video_coding/test/stream_generator.h
index 7902d16706..36b26db92e 100644
--- a/webrtc/modules/video_coding/main/source/test/stream_generator.h
+++ b/webrtc/modules/video_coding/test/stream_generator.h
@@ -7,13 +7,13 @@
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
-#ifndef WEBRTC_MODULES_VIDEO_CODING_MAIN_SOURCE_TEST_STREAM_GENERATOR_H_
-#define WEBRTC_MODULES_VIDEO_CODING_MAIN_SOURCE_TEST_STREAM_GENERATOR_H_
+#ifndef WEBRTC_MODULES_VIDEO_CODING_TEST_STREAM_GENERATOR_H_
+#define WEBRTC_MODULES_VIDEO_CODING_TEST_STREAM_GENERATOR_H_
#include <list>
-#include "webrtc/modules/video_coding/main/source/packet.h"
-#include "webrtc/modules/video_coding/main/test/test_util.h"
+#include "webrtc/modules/video_coding/packet.h"
+#include "webrtc/modules/video_coding/test/test_util.h"
#include "webrtc/typedefs.h"
namespace webrtc {
@@ -69,4 +69,4 @@ class StreamGenerator {
} // namespace webrtc
-#endif // WEBRTC_MODULES_VIDEO_CODING_MAIN_SOURCE_TEST_STREAM_GENERATOR_H_
+#endif // WEBRTC_MODULES_VIDEO_CODING_TEST_STREAM_GENERATOR_H_
diff --git a/webrtc/modules/video_coding/main/test/subfigure.m b/webrtc/modules/video_coding/test/subfigure.m
index eadfcb69bd..eadfcb69bd 100644
--- a/webrtc/modules/video_coding/main/test/subfigure.m
+++ b/webrtc/modules/video_coding/test/subfigure.m
diff --git a/webrtc/modules/video_coding/main/test/test_util.cc b/webrtc/modules/video_coding/test/test_util.cc
index cd858da288..7ff663e395 100644
--- a/webrtc/modules/video_coding/main/test/test_util.cc
+++ b/webrtc/modules/video_coding/test/test_util.cc
@@ -8,7 +8,7 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#include "webrtc/modules/video_coding/main/test/test_util.h"
+#include "webrtc/modules/video_coding/test/test_util.h"
#include <assert.h>
#include <math.h>
@@ -17,7 +17,7 @@
#include <sstream>
#include "webrtc/common_video/libyuv/include/webrtc_libyuv.h"
-#include "webrtc/modules/video_coding/main/source/internal_defines.h"
+#include "webrtc/modules/video_coding/internal_defines.h"
#include "webrtc/test/testsupport/fileutils.h"
CmdArgs::CmdArgs()
@@ -28,12 +28,12 @@ CmdArgs::CmdArgs()
rtt(0),
inputFile(webrtc::test::ProjectRootPath() + "/resources/foreman_cif.yuv"),
outputFile(webrtc::test::OutputPath() +
- "video_coding_test_output_352x288.yuv") {
-}
+ "video_coding_test_output_352x288.yuv") {}
namespace {
-void SplitFilename(const std::string& filename, std::string* basename,
+void SplitFilename(const std::string& filename,
+ std::string* basename,
std::string* extension) {
assert(basename);
assert(extension);
@@ -41,7 +41,7 @@ void SplitFilename(const std::string& filename, std::string* basename,
std::string::size_type idx;
idx = filename.rfind('.');
- if(idx != std::string::npos) {
+ if (idx != std::string::npos) {
*basename = filename.substr(0, idx);
*extension = filename.substr(idx + 1);
} else {
@@ -50,21 +50,24 @@ void SplitFilename(const std::string& filename, std::string* basename,
}
}
-std::string AppendWidthHeightCount(const std::string& filename, int width,
- int height, int count) {
+std::string AppendWidthHeightCount(const std::string& filename,
+ int width,
+ int height,
+ int count) {
std::string basename;
std::string extension;
SplitFilename(filename, &basename, &extension);
std::stringstream ss;
- ss << basename << "_" << count << "." << width << "_" << height << "." <<
- extension;
+ ss << basename << "_" << count << "." << width << "_" << height << "."
+ << extension;
return ss.str();
}
} // namespace
FileOutputFrameReceiver::FileOutputFrameReceiver(
- const std::string& base_out_filename, uint32_t ssrc)
+ const std::string& base_out_filename,
+ uint32_t ssrc)
: out_filename_(),
out_file_(NULL),
timing_file_(NULL),
@@ -80,8 +83,8 @@ FileOutputFrameReceiver::FileOutputFrameReceiver(
SplitFilename(base_out_filename, &basename, &extension);
}
std::stringstream ss;
- ss << basename << "_" << std::hex << std::setw(8) << std::setfill('0') <<
- ssrc << "." << extension;
+ ss << basename << "_" << std::hex << std::setw(8) << std::setfill('0') << ssrc
+ << "." << extension;
out_filename_ = ss.str();
}
@@ -113,8 +116,8 @@ int32_t FileOutputFrameReceiver::FrameToRender(
printf("New size: %dx%d\n", video_frame.width(), video_frame.height());
width_ = video_frame.width();
height_ = video_frame.height();
- std::string filename_with_width_height = AppendWidthHeightCount(
- out_filename_, width_, height_, count_);
+ std::string filename_with_width_height =
+ AppendWidthHeightCount(out_filename_, width_, height_, count_);
++count_;
out_file_ = fopen(filename_with_width_height.c_str(), "wb");
if (out_file_ == NULL) {
@@ -122,7 +125,7 @@ int32_t FileOutputFrameReceiver::FrameToRender(
}
}
fprintf(timing_file_, "%u, %u\n", video_frame.timestamp(),
- webrtc::MaskWord64ToUWord32(video_frame.render_time_ms()));
+ webrtc::MaskWord64ToUWord32(video_frame.render_time_ms()));
if (PrintVideoFrame(video_frame, out_file_) < 0) {
return -1;
}
@@ -130,7 +133,7 @@ int32_t FileOutputFrameReceiver::FrameToRender(
}
webrtc::RtpVideoCodecTypes ConvertCodecType(const char* plname) {
- if (strncmp(plname,"VP8" , 3) == 0) {
+ if (strncmp(plname, "VP8", 3) == 0) {
return webrtc::kRtpVideoVp8;
} else {
// Default value.
diff --git a/webrtc/modules/video_coding/main/test/test_util.h b/webrtc/modules/video_coding/test/test_util.h
index 27f66fe011..45b88b9b50 100644
--- a/webrtc/modules/video_coding/main/test/test_util.h
+++ b/webrtc/modules/video_coding/test/test_util.h
@@ -18,8 +18,8 @@
#include <string>
#include "webrtc/base/constructormagic.h"
-#include "webrtc/modules/interface/module_common_types.h"
-#include "webrtc/modules/video_coding/main/interface/video_coding.h"
+#include "webrtc/modules/include/module_common_types.h"
+#include "webrtc/modules/video_coding/include/video_coding.h"
#include "webrtc/system_wrappers/include/event_wrapper.h"
enum { kMaxNackListSize = 250 };
@@ -33,11 +33,13 @@ class NullEvent : public webrtc::EventWrapper {
virtual bool Reset() { return true; }
- virtual webrtc::EventTypeWrapper Wait(unsigned long max_time) {
+ virtual webrtc::EventTypeWrapper Wait(unsigned long max_time) { // NOLINT
return webrtc::kEventTimeout;
}
- virtual bool StartTimer(bool periodic, unsigned long time) { return true; }
+ virtual bool StartTimer(bool periodic, unsigned long time) { // NOLINT
+ return true;
+ }
virtual bool StopTimer() { return true; }
};
@@ -46,9 +48,7 @@ class NullEventFactory : public webrtc::EventFactory {
public:
virtual ~NullEventFactory() {}
- virtual webrtc::EventWrapper* CreateEvent() {
- return new NullEvent;
- }
+ virtual webrtc::EventWrapper* CreateEvent() { return new NullEvent; }
};
class FileOutputFrameReceiver : public webrtc::VCMReceiveCallback {
@@ -57,7 +57,7 @@ class FileOutputFrameReceiver : public webrtc::VCMReceiveCallback {
virtual ~FileOutputFrameReceiver();
// VCMReceiveCallback
- virtual int32_t FrameToRender(webrtc::VideoFrame& video_frame);
+ virtual int32_t FrameToRender(webrtc::VideoFrame& video_frame); // NOLINT
private:
std::string out_filename_;
@@ -83,4 +83,4 @@ class CmdArgs {
std::string outputFile;
};
-#endif
+#endif // WEBRTC_MODULES_VIDEO_CODING_TEST_TEST_UTIL_H_
diff --git a/webrtc/modules/video_coding/main/test/tester_main.cc b/webrtc/modules/video_coding/test/tester_main.cc
index 2885f00bd5..33ca82007d 100644
--- a/webrtc/modules/video_coding/main/test/tester_main.cc
+++ b/webrtc/modules/video_coding/test/tester_main.cc
@@ -8,25 +8,27 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-
#include <stdlib.h>
#include <string.h>
#include "gflags/gflags.h"
-#include "webrtc/modules/video_coding/main/interface/video_coding.h"
-#include "webrtc/modules/video_coding/main/test/receiver_tests.h"
+#include "webrtc/modules/video_coding/include/video_coding.h"
+#include "webrtc/modules/video_coding/test/receiver_tests.h"
#include "webrtc/test/testsupport/fileutils.h"
DEFINE_string(codec, "VP8", "Codec to use (VP8 or I420).");
DEFINE_int32(width, 352, "Width in pixels of the frames in the input file.");
DEFINE_int32(height, 288, "Height in pixels of the frames in the input file.");
DEFINE_int32(rtt, 0, "RTT (round-trip time), in milliseconds.");
-DEFINE_string(input_filename, webrtc::test::ProjectRootPath() +
- "/resources/foreman_cif.yuv", "Input file.");
-DEFINE_string(output_filename, webrtc::test::OutputPath() +
- "video_coding_test_output_352x288.yuv", "Output file.");
+DEFINE_string(input_filename,
+ webrtc::test::ProjectRootPath() + "/resources/foreman_cif.yuv",
+ "Input file.");
+DEFINE_string(output_filename,
+ webrtc::test::OutputPath() +
+ "video_coding_test_output_352x288.yuv",
+ "Output file.");
-using namespace webrtc;
+namespace webrtc {
/*
* Build with EVENT_DEBUG defined
@@ -36,36 +38,37 @@ using namespace webrtc;
int vcmMacrosTests = 0;
int vcmMacrosErrors = 0;
-int ParseArguments(CmdArgs& args) {
- args.width = FLAGS_width;
- args.height = FLAGS_height;
- if (args.width < 1 || args.height < 1) {
+int ParseArguments(CmdArgs* args) {
+ args->width = FLAGS_width;
+ args->height = FLAGS_height;
+ if (args->width < 1 || args->height < 1) {
return -1;
}
- args.codecName = FLAGS_codec;
- if (args.codecName == "VP8") {
- args.codecType = kVideoCodecVP8;
- } else if (args.codecName == "VP9") {
- args.codecType = kVideoCodecVP9;
- } else if (args.codecName == "I420") {
- args.codecType = kVideoCodecI420;
+ args->codecName = FLAGS_codec;
+ if (args->codecName == "VP8") {
+ args->codecType = kVideoCodecVP8;
+ } else if (args->codecName == "VP9") {
+ args->codecType = kVideoCodecVP9;
+ } else if (args->codecName == "I420") {
+ args->codecType = kVideoCodecI420;
} else {
- printf("Invalid codec: %s\n", args.codecName.c_str());
+ printf("Invalid codec: %s\n", args->codecName.c_str());
return -1;
}
- args.inputFile = FLAGS_input_filename;
- args.outputFile = FLAGS_output_filename;
- args.rtt = FLAGS_rtt;
+ args->inputFile = FLAGS_input_filename;
+ args->outputFile = FLAGS_output_filename;
+ args->rtt = FLAGS_rtt;
return 0;
}
+} // namespace webrtc
-int main(int argc, char **argv) {
+int main(int argc, char** argv) {
// Initialize WebRTC fileutils.h so paths to resources can be resolved.
webrtc::test::SetExecutablePath(argv[0]);
google::ParseCommandLineFlags(&argc, &argv, true);
CmdArgs args;
- if (ParseArguments(args) != 0) {
+ if (webrtc::ParseArguments(&args) != 0) {
printf("Unable to parse input arguments\n");
return -1;
}
diff --git a/webrtc/modules/video_coding/main/test/vcm_payload_sink_factory.cc b/webrtc/modules/video_coding/test/vcm_payload_sink_factory.cc
index 2d874cd1bd..c9ec372f41 100644
--- a/webrtc/modules/video_coding/main/test/vcm_payload_sink_factory.cc
+++ b/webrtc/modules/video_coding/test/vcm_payload_sink_factory.cc
@@ -8,23 +8,22 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#include "webrtc/modules/video_coding/main/test/vcm_payload_sink_factory.h"
+#include "webrtc/modules/video_coding/test/vcm_payload_sink_factory.h"
#include <assert.h>
#include <algorithm>
-#include "webrtc/modules/rtp_rtcp/interface/rtp_rtcp.h"
-#include "webrtc/modules/video_coding/main/test/test_util.h"
+#include "webrtc/modules/rtp_rtcp/include/rtp_rtcp.h"
+#include "webrtc/modules/video_coding/test/test_util.h"
#include "webrtc/system_wrappers/include/clock.h"
#include "webrtc/system_wrappers/include/critical_section_wrapper.h"
namespace webrtc {
namespace rtpplayer {
-class VcmPayloadSinkFactory::VcmPayloadSink
- : public PayloadSinkInterface,
- public VCMPacketRequestCallback {
+class VcmPayloadSinkFactory::VcmPayloadSink : public PayloadSinkInterface,
+ public VCMPacketRequestCallback {
public:
VcmPayloadSink(VcmPayloadSinkFactory* factory,
RtpStreamInterface* stream,
@@ -43,9 +42,7 @@ class VcmPayloadSinkFactory::VcmPayloadSink
vcm_->RegisterReceiveCallback(frame_receiver_.get());
}
- virtual ~VcmPayloadSink() {
- factory_->Remove(this);
- }
+ virtual ~VcmPayloadSink() { factory_->Remove(this); }
// PayloadSinkInterface
int32_t OnReceivedPayloadData(const uint8_t* payload_data,
@@ -136,14 +133,11 @@ PayloadSinkInterface* VcmPayloadSinkFactory::Create(
}
const PayloadTypes& plt = stream->payload_types();
- for (PayloadTypesIterator it = plt.begin(); it != plt.end();
- ++it) {
+ for (PayloadTypesIterator it = plt.begin(); it != plt.end(); ++it) {
if (it->codec_type() != kVideoCodecULPFEC &&
it->codec_type() != kVideoCodecRED) {
VideoCodec codec;
- if (VideoCodingModule::Codec(it->codec_type(), &codec) < 0) {
- return NULL;
- }
+ VideoCodingModule::Codec(it->codec_type(), &codec);
codec.plType = it->payload_type();
if (vcm->RegisterReceiveCodec(&codec, 1) < 0) {
return NULL;
diff --git a/webrtc/modules/video_coding/main/test/vcm_payload_sink_factory.h b/webrtc/modules/video_coding/test/vcm_payload_sink_factory.h
index ec94bdc382..dae53b0c08 100644
--- a/webrtc/modules/video_coding/main/test/vcm_payload_sink_factory.h
+++ b/webrtc/modules/video_coding/test/vcm_payload_sink_factory.h
@@ -8,13 +8,16 @@
* be found in the AUTHORS file in the root of the source tree.
*/
+#ifndef WEBRTC_MODULES_VIDEO_CODING_TEST_VCM_PAYLOAD_SINK_FACTORY_H_
+#define WEBRTC_MODULES_VIDEO_CODING_TEST_VCM_PAYLOAD_SINK_FACTORY_H_
+
#include <string>
#include <vector>
#include "webrtc/base/constructormagic.h"
#include "webrtc/base/scoped_ptr.h"
-#include "webrtc/modules/video_coding/main/interface/video_coding_defines.h"
-#include "webrtc/modules/video_coding/main/test/rtp_player.h"
+#include "webrtc/modules/video_coding/include/video_coding_defines.h"
+#include "webrtc/modules/video_coding/test/rtp_player.h"
class NullEventFactory;
@@ -26,9 +29,11 @@ namespace rtpplayer {
class VcmPayloadSinkFactory : public PayloadSinkFactoryInterface {
public:
VcmPayloadSinkFactory(const std::string& base_out_filename,
- Clock* clock, bool protection_enabled,
+ Clock* clock,
+ bool protection_enabled,
VCMVideoProtection protection_method,
- int64_t rtt_ms, uint32_t render_delay_ms,
+ int64_t rtt_ms,
+ uint32_t render_delay_ms,
uint32_t min_playout_delay_ms);
virtual ~VcmPayloadSinkFactory();
@@ -61,3 +66,5 @@ class VcmPayloadSinkFactory : public PayloadSinkFactoryInterface {
};
} // namespace rtpplayer
} // namespace webrtc
+
+#endif // WEBRTC_MODULES_VIDEO_CODING_TEST_VCM_PAYLOAD_SINK_FACTORY_H_
diff --git a/webrtc/modules/video_coding/main/test/video_rtp_play.cc b/webrtc/modules/video_coding/test/video_rtp_play.cc
index 8460601bf5..cb092e381e 100644
--- a/webrtc/modules/video_coding/main/test/video_rtp_play.cc
+++ b/webrtc/modules/video_coding/test/video_rtp_play.cc
@@ -8,8 +8,8 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#include "webrtc/modules/video_coding/main/test/receiver_tests.h"
-#include "webrtc/modules/video_coding/main/test/vcm_payload_sink_factory.h"
+#include "webrtc/modules/video_coding/test/receiver_tests.h"
+#include "webrtc/modules/video_coding/test/vcm_payload_sink_factory.h"
#include "webrtc/system_wrappers/include/trace.h"
#include "webrtc/test/testsupport/fileutils.h"
@@ -48,9 +48,9 @@ int RtpPlay(const CmdArgs& args) {
output_file = webrtc::test::OutputPath() + "RtpPlay_decoded.yuv";
webrtc::SimulatedClock clock(0);
- webrtc::rtpplayer::VcmPayloadSinkFactory factory(output_file, &clock,
- kConfigProtectionEnabled, kConfigProtectionMethod, kConfigRttMs,
- kConfigRenderDelayMs, kConfigMinPlayoutDelayMs);
+ webrtc::rtpplayer::VcmPayloadSinkFactory factory(
+ output_file, &clock, kConfigProtectionEnabled, kConfigProtectionMethod,
+ kConfigRttMs, kConfigRenderDelayMs, kConfigMinPlayoutDelayMs);
rtc::scoped_ptr<webrtc::rtpplayer::RtpPlayerInterface> rtp_player(
webrtc::rtpplayer::Create(args.inputFile, &factory, &clock, payload_types,
kConfigLossRate, kConfigRttMs,
@@ -63,7 +63,7 @@ int RtpPlay(const CmdArgs& args) {
while ((ret = rtp_player->NextPacket(clock.TimeInMilliseconds())) == 0) {
ret = factory.DecodeAndProcessAll(true);
if (ret < 0 || (kConfigMaxRuntimeMs > -1 &&
- clock.TimeInMilliseconds() >= kConfigMaxRuntimeMs)) {
+ clock.TimeInMilliseconds() >= kConfigMaxRuntimeMs)) {
break;
}
clock.AdvanceTimeMilliseconds(1);
diff --git a/webrtc/modules/video_coding/test/video_source.h b/webrtc/modules/video_coding/test/video_source.h
new file mode 100644
index 0000000000..19d7f50b26
--- /dev/null
+++ b/webrtc/modules/video_coding/test/video_source.h
@@ -0,0 +1,85 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef WEBRTC_MODULES_VIDEO_CODING_TEST_VIDEO_SOURCE_H_
+#define WEBRTC_MODULES_VIDEO_CODING_TEST_VIDEO_SOURCE_H_
+
+#include <string>
+
+#include "webrtc/common_video/libyuv/include/webrtc_libyuv.h"
+#include "webrtc/typedefs.h"
+
+enum VideoSize {
+ kUndefined,
+ kSQCIF, // 128*96 = 12 288
+ kQQVGA, // 160*120 = 19 200
+ kQCIF, // 176*144 = 25 344
+ kCGA, // 320*200 = 64 000
+ kQVGA, // 320*240 = 76 800
+ kSIF, // 352*240 = 84 480
+ kWQVGA, // 400*240 = 96 000
+ kCIF, // 352*288 = 101 376
+ kW288p, // 512*288 = 147 456 (WCIF)
+ k448p, // 576*448 = 281 088
+ kVGA, // 640*480 = 307 200
+ k432p, // 720*432 = 311 040
+ kW432p, // 768*432 = 331 776
+ k4SIF, // 704*480 = 337 920
+ kW448p, // 768*448 = 344 064
+ kNTSC, // 720*480 = 345 600
+ kFW448p, // 800*448 = 358 400
+ kWVGA, // 800*480 = 384 000
+ k4CIF, // 704*576 = 405 504
+ kSVGA, // 800*600 = 480 000
+ kW544p, // 960*544 = 522 240
+ kW576p, // 1024*576 = 589 824 (W4CIF)
+ kHD, // 960*720 = 691 200
+ kXGA, // 1024*768 = 786 432
+ kWHD, // 1280*720 = 921 600
+ kFullHD, // 1440*1080 = 1 555 200
+ kWFullHD, // 1920*1080 = 2 073 600
+
+ kNumberOfVideoSizes
+};
+
+class VideoSource {
+ public:
+ VideoSource();
+ VideoSource(std::string fileName,
+ VideoSize size,
+ float frameRate,
+ webrtc::VideoType type = webrtc::kI420);
+ VideoSource(std::string fileName,
+ uint16_t width,
+ uint16_t height,
+ float frameRate = 30,
+ webrtc::VideoType type = webrtc::kI420);
+
+ std::string GetFileName() const { return _fileName; }
+ uint16_t GetWidth() const { return _width; }
+ uint16_t GetHeight() const { return _height; }
+ webrtc::VideoType GetType() const { return _type; }
+ float GetFrameRate() const { return _frameRate; }
+ int GetWidthHeight(VideoSize size);
+
+ // Returns the filename with the path (including the leading slash) removed.
+ std::string GetName() const;
+
+ size_t GetFrameLength() const;
+
+ private:
+ std::string _fileName;
+ uint16_t _width;
+ uint16_t _height;
+ webrtc::VideoType _type;
+ float _frameRate;
+};
+
+#endif // WEBRTC_MODULES_VIDEO_CODING_TEST_VIDEO_SOURCE_H_
diff --git a/webrtc/modules/video_coding/main/source/timestamp_map.cc b/webrtc/modules/video_coding/timestamp_map.cc
index c68a5af7ba..97d2777658 100644
--- a/webrtc/modules/video_coding/main/source/timestamp_map.cc
+++ b/webrtc/modules/video_coding/timestamp_map.cc
@@ -11,8 +11,8 @@
#include <assert.h>
#include <stdlib.h>
-#include "webrtc/modules/interface/module_common_types.h"
-#include "webrtc/modules/video_coding/main/source/timestamp_map.h"
+#include "webrtc/modules/include/module_common_types.h"
+#include "webrtc/modules/video_coding/timestamp_map.h"
namespace webrtc {
@@ -20,11 +20,9 @@ VCMTimestampMap::VCMTimestampMap(size_t capacity)
: ring_buffer_(new TimestampDataTuple[capacity]),
capacity_(capacity),
next_add_idx_(0),
- next_pop_idx_(0) {
-}
+ next_pop_idx_(0) {}
-VCMTimestampMap::~VCMTimestampMap() {
-}
+VCMTimestampMap::~VCMTimestampMap() {}
void VCMTimestampMap::Add(uint32_t timestamp, VCMFrameInformation* data) {
ring_buffer_[next_add_idx_].timestamp = timestamp;
@@ -62,4 +60,4 @@ VCMFrameInformation* VCMTimestampMap::Pop(uint32_t timestamp) {
bool VCMTimestampMap::IsEmpty() const {
return (next_add_idx_ == next_pop_idx_);
}
-}
+} // namespace webrtc
diff --git a/webrtc/modules/video_coding/main/source/timestamp_map.h b/webrtc/modules/video_coding/timestamp_map.h
index 3d6f1bca0f..435d05895c 100644
--- a/webrtc/modules/video_coding/main/source/timestamp_map.h
+++ b/webrtc/modules/video_coding/timestamp_map.h
@@ -44,4 +44,4 @@ class VCMTimestampMap {
} // namespace webrtc
-#endif // WEBRTC_MODULES_VIDEO_CODING_TIMESTAMP_MAP_H_
+#endif // WEBRTC_MODULES_VIDEO_CODING_TIMESTAMP_MAP_H_
diff --git a/webrtc/modules/video_coding/main/source/timing.cc b/webrtc/modules/video_coding/timing.cc
index 8d59135876..08dc307524 100644
--- a/webrtc/modules/video_coding/main/source/timing.cc
+++ b/webrtc/modules/video_coding/timing.cc
@@ -8,19 +8,19 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#include "webrtc/modules/video_coding/main/source/timing.h"
+#include "webrtc/modules/video_coding/timing.h"
-#include "webrtc/modules/video_coding/main/source/internal_defines.h"
-#include "webrtc/modules/video_coding/main/source/jitter_buffer_common.h"
+#include <algorithm>
+
+#include "webrtc/modules/video_coding/internal_defines.h"
+#include "webrtc/modules/video_coding/jitter_buffer_common.h"
#include "webrtc/system_wrappers/include/clock.h"
#include "webrtc/system_wrappers/include/metrics.h"
#include "webrtc/system_wrappers/include/timestamp_extrapolator.h"
-
namespace webrtc {
-VCMTiming::VCMTiming(Clock* clock,
- VCMTiming* master_timing)
+VCMTiming::VCMTiming(Clock* clock, VCMTiming* master_timing)
: crit_sect_(CriticalSectionWrapper::CreateCriticalSection()),
clock_(clock),
master_(false),
@@ -62,14 +62,16 @@ void VCMTiming::UpdateHistograms() const {
if (elapsed_sec < metrics::kMinRunTimeInSeconds) {
return;
}
- RTC_HISTOGRAM_COUNTS_100("WebRTC.Video.DecodedFramesPerSecond",
+ RTC_HISTOGRAM_COUNTS_SPARSE_100(
+ "WebRTC.Video.DecodedFramesPerSecond",
static_cast<int>((num_decoded_frames_ / elapsed_sec) + 0.5f));
- RTC_HISTOGRAM_PERCENTAGE("WebRTC.Video.DelayedFramesToRenderer",
+ RTC_HISTOGRAM_PERCENTAGE_SPARSE(
+ "WebRTC.Video.DelayedFramesToRenderer",
num_delayed_decoded_frames_ * 100 / num_decoded_frames_);
if (num_delayed_decoded_frames_ > 0) {
- RTC_HISTOGRAM_COUNTS_1000(
+ RTC_HISTOGRAM_COUNTS_SPARSE_1000(
"WebRTC.Video.DelayedFramesToRenderer_AvgDelayInMs",
- sum_missed_render_deadline_ms_ / num_delayed_decoded_frames_);
+ sum_missed_render_deadline_ms_ / num_delayed_decoded_frames_);
}
}
@@ -118,8 +120,8 @@ void VCMTiming::UpdateCurrentDelay(uint32_t frame_timestamp) {
// Not initialized, set current delay to target.
current_delay_ms_ = target_delay_ms;
} else if (target_delay_ms != current_delay_ms_) {
- int64_t delay_diff_ms = static_cast<int64_t>(target_delay_ms) -
- current_delay_ms_;
+ int64_t delay_diff_ms =
+ static_cast<int64_t>(target_delay_ms) - current_delay_ms_;
// Never change the delay with more than 100 ms every second. If we're
// changing the delay in too large steps we will get noticeable freezes. By
// limiting the change we can increase the delay in smaller steps, which
@@ -128,11 +130,13 @@ void VCMTiming::UpdateCurrentDelay(uint32_t frame_timestamp) {
int64_t max_change_ms = 0;
if (frame_timestamp < 0x0000ffff && prev_frame_timestamp_ > 0xffff0000) {
// wrap
- max_change_ms = kDelayMaxChangeMsPerS * (frame_timestamp +
- (static_cast<int64_t>(1) << 32) - prev_frame_timestamp_) / 90000;
+ max_change_ms = kDelayMaxChangeMsPerS *
+ (frame_timestamp + (static_cast<int64_t>(1) << 32) -
+ prev_frame_timestamp_) /
+ 90000;
} else {
max_change_ms = kDelayMaxChangeMsPerS *
- (frame_timestamp - prev_frame_timestamp_) / 90000;
+ (frame_timestamp - prev_frame_timestamp_) / 90000;
}
if (max_change_ms <= 0) {
// Any changes less than 1 ms are truncated and
@@ -153,7 +157,7 @@ void VCMTiming::UpdateCurrentDelay(int64_t render_time_ms,
CriticalSectionScoped cs(crit_sect_);
uint32_t target_delay_ms = TargetDelayInternal();
int64_t delayed_ms = actual_decode_time_ms -
- (render_time_ms - MaxDecodeTimeMs() - render_delay_ms_);
+ (render_time_ms - MaxDecodeTimeMs() - render_delay_ms_);
if (delayed_ms < 0) {
return;
}
@@ -165,13 +169,13 @@ void VCMTiming::UpdateCurrentDelay(int64_t render_time_ms,
}
int32_t VCMTiming::StopDecodeTimer(uint32_t time_stamp,
- int64_t start_time_ms,
+ int32_t decode_time_ms,
int64_t now_ms,
int64_t render_time_ms) {
CriticalSectionScoped cs(crit_sect_);
- int32_t time_diff_ms = codec_timer_.StopTimer(start_time_ms, now_ms);
- assert(time_diff_ms >= 0);
- last_decode_ms_ = time_diff_ms;
+ codec_timer_.MaxFilter(decode_time_ms, now_ms);
+ assert(decode_time_ms >= 0);
+ last_decode_ms_ = decode_time_ms;
// Update stats.
++num_decoded_frames_;
@@ -191,8 +195,8 @@ void VCMTiming::IncomingTimestamp(uint32_t time_stamp, int64_t now_ms) {
ts_extrapolator_->Update(now_ms, time_stamp);
}
-int64_t VCMTiming::RenderTimeMs(uint32_t frame_timestamp, int64_t now_ms)
- const {
+int64_t VCMTiming::RenderTimeMs(uint32_t frame_timestamp,
+ int64_t now_ms) const {
CriticalSectionScoped cs(crit_sect_);
const int64_t render_time_ms = RenderTimeMsInternal(frame_timestamp, now_ms);
return render_time_ms;
@@ -201,7 +205,7 @@ int64_t VCMTiming::RenderTimeMs(uint32_t frame_timestamp, int64_t now_ms)
int64_t VCMTiming::RenderTimeMsInternal(uint32_t frame_timestamp,
int64_t now_ms) const {
int64_t estimated_complete_time_ms =
- ts_extrapolator_->ExtrapolateLocalTime(frame_timestamp);
+ ts_extrapolator_->ExtrapolateLocalTime(frame_timestamp);
if (estimated_complete_time_ms == -1) {
estimated_complete_time_ms = now_ms;
}
@@ -212,19 +216,19 @@ int64_t VCMTiming::RenderTimeMsInternal(uint32_t frame_timestamp,
}
// Must be called from inside a critical section.
-int32_t VCMTiming::MaxDecodeTimeMs(FrameType frame_type /*= kVideoFrameDelta*/)
- const {
+int32_t VCMTiming::MaxDecodeTimeMs(
+ FrameType frame_type /*= kVideoFrameDelta*/) const {
const int32_t decode_time_ms = codec_timer_.RequiredDecodeTimeMs(frame_type);
assert(decode_time_ms >= 0);
return decode_time_ms;
}
-uint32_t VCMTiming::MaxWaitingTime(int64_t render_time_ms, int64_t now_ms)
- const {
+uint32_t VCMTiming::MaxWaitingTime(int64_t render_time_ms,
+ int64_t now_ms) const {
CriticalSectionScoped cs(crit_sect_);
- const int64_t max_wait_time_ms = render_time_ms - now_ms -
- MaxDecodeTimeMs() - render_delay_ms_;
+ const int64_t max_wait_time_ms =
+ render_time_ms - now_ms - MaxDecodeTimeMs() - render_delay_ms_;
if (max_wait_time_ms < 0) {
return 0;
@@ -232,8 +236,8 @@ uint32_t VCMTiming::MaxWaitingTime(int64_t render_time_ms, int64_t now_ms)
return static_cast<uint32_t>(max_wait_time_ms);
}
-bool VCMTiming::EnoughTimeToDecode(uint32_t available_processing_time_ms)
- const {
+bool VCMTiming::EnoughTimeToDecode(
+ uint32_t available_processing_time_ms) const {
CriticalSectionScoped cs(crit_sect_);
int32_t max_decode_time_ms = MaxDecodeTimeMs();
if (max_decode_time_ms < 0) {
@@ -246,7 +250,8 @@ bool VCMTiming::EnoughTimeToDecode(uint32_t available_processing_time_ms)
max_decode_time_ms = 1;
}
return static_cast<int32_t>(available_processing_time_ms) -
- max_decode_time_ms > 0;
+ max_decode_time_ms >
+ 0;
}
uint32_t VCMTiming::TargetVideoDelay() const {
@@ -256,7 +261,7 @@ uint32_t VCMTiming::TargetVideoDelay() const {
uint32_t VCMTiming::TargetDelayInternal() const {
return std::max(min_playout_delay_ms_,
- jitter_delay_ms_ + MaxDecodeTimeMs() + render_delay_ms_);
+ jitter_delay_ms_ + MaxDecodeTimeMs() + render_delay_ms_);
}
void VCMTiming::GetTimings(int* decode_ms,
diff --git a/webrtc/modules/video_coding/main/source/timing.h b/webrtc/modules/video_coding/timing.h
index d3b8fa673f..a4d0cf4543 100644
--- a/webrtc/modules/video_coding/main/source/timing.h
+++ b/webrtc/modules/video_coding/timing.h
@@ -8,11 +8,11 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#ifndef WEBRTC_MODULES_VIDEO_CODING_MAIN_SOURCE_TIMING_H_
-#define WEBRTC_MODULES_VIDEO_CODING_MAIN_SOURCE_TIMING_H_
+#ifndef WEBRTC_MODULES_VIDEO_CODING_TIMING_H_
+#define WEBRTC_MODULES_VIDEO_CODING_TIMING_H_
#include "webrtc/base/thread_annotations.h"
-#include "webrtc/modules/video_coding/main/source/codec_timer.h"
+#include "webrtc/modules/video_coding/codec_timer.h"
#include "webrtc/system_wrappers/include/critical_section_wrapper.h"
#include "webrtc/typedefs.h"
@@ -25,8 +25,7 @@ class VCMTiming {
public:
// The primary timing component should be passed
// if this is the dual timing component.
- VCMTiming(Clock* clock,
- VCMTiming* master_timing = NULL);
+ explicit VCMTiming(Clock* clock, VCMTiming* master_timing = NULL);
~VCMTiming();
// Resets the timing to the initial state.
@@ -58,7 +57,7 @@ class VCMTiming {
// Stops the decoder timer, should be called when the decoder returns a frame
// or when the decoded frame callback is called.
int32_t StopDecodeTimer(uint32_t time_stamp,
- int64_t start_time_ms,
+ int32_t decode_time_ms,
int64_t now_ms,
int64_t render_time_ms);
@@ -124,4 +123,4 @@ class VCMTiming {
};
} // namespace webrtc
-#endif // WEBRTC_MODULES_VIDEO_CODING_MAIN_SOURCE_TIMING_H_
+#endif // WEBRTC_MODULES_VIDEO_CODING_TIMING_H_
diff --git a/webrtc/modules/video_coding/main/source/timing_unittest.cc b/webrtc/modules/video_coding/timing_unittest.cc
index 694a600c2a..2e8df83683 100644
--- a/webrtc/modules/video_coding/main/source/timing_unittest.cc
+++ b/webrtc/modules/video_coding/timing_unittest.cc
@@ -14,10 +14,10 @@
#include "testing/gtest/include/gtest/gtest.h"
-#include "webrtc/modules/video_coding/main/interface/video_coding.h"
-#include "webrtc/modules/video_coding/main/source/internal_defines.h"
-#include "webrtc/modules/video_coding/main/source/timing.h"
-#include "webrtc/modules/video_coding/main/test/test_util.h"
+#include "webrtc/modules/video_coding/include/video_coding.h"
+#include "webrtc/modules/video_coding/internal_defines.h"
+#include "webrtc/modules/video_coding/timing.h"
+#include "webrtc/modules/video_coding/test/test_util.h"
#include "webrtc/system_wrappers/include/clock.h"
#include "webrtc/system_wrappers/include/trace.h"
#include "webrtc/test/testsupport/fileutils.h"
@@ -55,8 +55,9 @@ TEST(ReceiverTiming, Tests) {
clock.AdvanceTimeMilliseconds(1000);
timing.SetJitterDelay(jitterDelayMs);
timing.UpdateCurrentDelay(timeStamp);
- waitTime = timing.MaxWaitingTime(timing.RenderTimeMs(
- timeStamp, clock.TimeInMilliseconds()), clock.TimeInMilliseconds());
+ waitTime = timing.MaxWaitingTime(
+ timing.RenderTimeMs(timeStamp, clock.TimeInMilliseconds()),
+ clock.TimeInMilliseconds());
// Since we gradually increase the delay we only get 100 ms every second.
EXPECT_EQ(jitterDelayMs - 10, waitTime);
@@ -85,9 +86,10 @@ TEST(ReceiverTiming, Tests) {
for (int i = 0; i < 10; i++) {
int64_t startTimeMs = clock.TimeInMilliseconds();
clock.AdvanceTimeMilliseconds(10);
- timing.StopDecodeTimer(timeStamp, startTimeMs,
- clock.TimeInMilliseconds(), timing.RenderTimeMs(
- timeStamp, clock.TimeInMilliseconds()));
+ timing.StopDecodeTimer(
+ timeStamp, clock.TimeInMilliseconds() - startTimeMs,
+ clock.TimeInMilliseconds(),
+ timing.RenderTimeMs(timeStamp, clock.TimeInMilliseconds()));
timeStamp += 90000 / 25;
clock.AdvanceTimeMilliseconds(1000 / 25 - 10);
timing.IncomingTimestamp(timeStamp, clock.TimeInMilliseconds());
@@ -105,7 +107,7 @@ TEST(ReceiverTiming, Tests) {
uint32_t minTotalDelayMs = 200;
timing.set_min_playout_delay(minTotalDelayMs);
clock.AdvanceTimeMilliseconds(5000);
- timeStamp += 5*90000;
+ timeStamp += 5 * 90000;
timing.UpdateCurrentDelay(timeStamp);
const int kRenderDelayMs = 10;
timing.set_render_delay(kRenderDelayMs);
@@ -121,7 +123,7 @@ TEST(ReceiverTiming, Tests) {
// Reset playout delay.
timing.set_min_playout_delay(0);
clock.AdvanceTimeMilliseconds(5000);
- timeStamp += 5*90000;
+ timeStamp += 5 * 90000;
timing.UpdateCurrentDelay(timeStamp);
}
@@ -135,8 +137,8 @@ TEST(ReceiverTiming, WrapAround) {
timing.IncomingTimestamp(timestamp, clock.TimeInMilliseconds());
clock.AdvanceTimeMilliseconds(1000 / kFramerate);
timestamp += 90000 / kFramerate;
- int64_t render_time = timing.RenderTimeMs(0xFFFFFFFFu,
- clock.TimeInMilliseconds());
+ int64_t render_time =
+ timing.RenderTimeMs(0xFFFFFFFFu, clock.TimeInMilliseconds());
EXPECT_EQ(3 * 1000 / kFramerate, render_time);
render_time = timing.RenderTimeMs(89u, // One second later in 90 kHz.
clock.TimeInMilliseconds());
diff --git a/webrtc/modules/video_coding/utility/frame_dropper.cc b/webrtc/modules/video_coding/utility/frame_dropper.cc
index 5262c5b88a..a0aa67be4e 100644
--- a/webrtc/modules/video_coding/utility/frame_dropper.cc
+++ b/webrtc/modules/video_coding/utility/frame_dropper.cc
@@ -8,12 +8,11 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#include "webrtc/modules/video_coding/utility/include/frame_dropper.h"
+#include "webrtc/modules/video_coding/utility/frame_dropper.h"
#include "webrtc/system_wrappers/include/trace.h"
-namespace webrtc
-{
+namespace webrtc {
const float kDefaultKeyFrameSizeAvgKBits = 0.9f;
const float kDefaultKeyFrameRatio = 0.99f;
@@ -22,339 +21,266 @@ const float kDefaultDropRatioMax = 0.96f;
const float kDefaultMaxTimeToDropFrames = 4.0f; // In seconds.
FrameDropper::FrameDropper()
-:
-_keyFrameSizeAvgKbits(kDefaultKeyFrameSizeAvgKBits),
-_keyFrameRatio(kDefaultKeyFrameRatio),
-_dropRatio(kDefaultDropRatioAlpha, kDefaultDropRatioMax),
-_enabled(true),
-_max_time_drops(kDefaultMaxTimeToDropFrames)
-{
- Reset();
+ : _keyFrameSizeAvgKbits(kDefaultKeyFrameSizeAvgKBits),
+ _keyFrameRatio(kDefaultKeyFrameRatio),
+ _dropRatio(kDefaultDropRatioAlpha, kDefaultDropRatioMax),
+ _enabled(true),
+ _max_time_drops(kDefaultMaxTimeToDropFrames) {
+ Reset();
}
FrameDropper::FrameDropper(float max_time_drops)
-:
-_keyFrameSizeAvgKbits(kDefaultKeyFrameSizeAvgKBits),
-_keyFrameRatio(kDefaultKeyFrameRatio),
-_dropRatio(kDefaultDropRatioAlpha, kDefaultDropRatioMax),
-_enabled(true),
-_max_time_drops(max_time_drops)
-{
- Reset();
+ : _keyFrameSizeAvgKbits(kDefaultKeyFrameSizeAvgKBits),
+ _keyFrameRatio(kDefaultKeyFrameRatio),
+ _dropRatio(kDefaultDropRatioAlpha, kDefaultDropRatioMax),
+ _enabled(true),
+ _max_time_drops(max_time_drops) {
+ Reset();
}
-void
-FrameDropper::Reset()
-{
- _keyFrameRatio.Reset(0.99f);
- _keyFrameRatio.Apply(1.0f, 1.0f/300.0f); // 1 key frame every 10th second in 30 fps
- _keyFrameSizeAvgKbits.Reset(0.9f);
- _keyFrameCount = 0;
- _accumulator = 0.0f;
- _accumulatorMax = 150.0f; // assume 300 kb/s and 0.5 s window
- _targetBitRate = 300.0f;
- _incoming_frame_rate = 30;
- _keyFrameSpreadFrames = 0.5f * _incoming_frame_rate;
- _dropNext = false;
- _dropRatio.Reset(0.9f);
- _dropRatio.Apply(0.0f, 0.0f); // Initialize to 0
- _dropCount = 0;
- _windowSize = 0.5f;
- _wasBelowMax = true;
- _fastMode = false; // start with normal (non-aggressive) mode
- // Cap for the encoder buffer level/accumulator, in secs.
- _cap_buffer_size = 3.0f;
- // Cap on maximum amount of dropped frames between kept frames, in secs.
- _max_time_drops = 4.0f;
+void FrameDropper::Reset() {
+ _keyFrameRatio.Reset(0.99f);
+ _keyFrameRatio.Apply(
+ 1.0f, 1.0f / 300.0f); // 1 key frame every 10th second in 30 fps
+ _keyFrameSizeAvgKbits.Reset(0.9f);
+ _keyFrameCount = 0;
+ _accumulator = 0.0f;
+ _accumulatorMax = 150.0f; // assume 300 kb/s and 0.5 s window
+ _targetBitRate = 300.0f;
+ _incoming_frame_rate = 30;
+ _keyFrameSpreadFrames = 0.5f * _incoming_frame_rate;
+ _dropNext = false;
+ _dropRatio.Reset(0.9f);
+ _dropRatio.Apply(0.0f, 0.0f); // Initialize to 0
+ _dropCount = 0;
+ _windowSize = 0.5f;
+ _wasBelowMax = true;
+ _fastMode = false; // start with normal (non-aggressive) mode
+ // Cap for the encoder buffer level/accumulator, in secs.
+ _cap_buffer_size = 3.0f;
+ // Cap on maximum amount of dropped frames between kept frames, in secs.
+ _max_time_drops = 4.0f;
}
-void
-FrameDropper::Enable(bool enable)
-{
- _enabled = enable;
+void FrameDropper::Enable(bool enable) {
+ _enabled = enable;
}
-void
-FrameDropper::Fill(size_t frameSizeBytes, bool deltaFrame)
-{
- if (!_enabled)
- {
- return;
- }
- float frameSizeKbits = 8.0f * static_cast<float>(frameSizeBytes) / 1000.0f;
- if (!deltaFrame && !_fastMode) // fast mode does not treat key-frames any different
- {
- _keyFrameSizeAvgKbits.Apply(1, frameSizeKbits);
- _keyFrameRatio.Apply(1.0, 1.0);
- if (frameSizeKbits > _keyFrameSizeAvgKbits.filtered())
- {
- // Remove the average key frame size since we
- // compensate for key frames when adding delta
- // frames.
- frameSizeKbits -= _keyFrameSizeAvgKbits.filtered();
- }
- else
- {
- // Shouldn't be negative, so zero is the lower bound.
- frameSizeKbits = 0;
- }
- if (_keyFrameRatio.filtered() > 1e-5 &&
- 1 / _keyFrameRatio.filtered() < _keyFrameSpreadFrames)
- {
- // We are sending key frames more often than our upper bound for
- // how much we allow the key frame compensation to be spread
- // out in time. Therefor we must use the key frame ratio rather
- // than keyFrameSpreadFrames.
- _keyFrameCount =
- static_cast<int32_t>(1 / _keyFrameRatio.filtered() + 0.5);
- }
- else
- {
- // Compensate for the key frame the following frames
- _keyFrameCount = static_cast<int32_t>(_keyFrameSpreadFrames + 0.5);
- }
+void FrameDropper::Fill(size_t frameSizeBytes, bool deltaFrame) {
+ if (!_enabled) {
+ return;
+ }
+ float frameSizeKbits = 8.0f * static_cast<float>(frameSizeBytes) / 1000.0f;
+ if (!deltaFrame &&
+ !_fastMode) { // fast mode does not treat key-frames any different
+ _keyFrameSizeAvgKbits.Apply(1, frameSizeKbits);
+ _keyFrameRatio.Apply(1.0, 1.0);
+ if (frameSizeKbits > _keyFrameSizeAvgKbits.filtered()) {
+ // Remove the average key frame size since we
+ // compensate for key frames when adding delta
+ // frames.
+ frameSizeKbits -= _keyFrameSizeAvgKbits.filtered();
+ } else {
+ // Shouldn't be negative, so zero is the lower bound.
+ frameSizeKbits = 0;
}
- else
- {
- // Decrease the keyFrameRatio
- _keyFrameRatio.Apply(1.0, 0.0);
+ if (_keyFrameRatio.filtered() > 1e-5 &&
+ 1 / _keyFrameRatio.filtered() < _keyFrameSpreadFrames) {
+ // We are sending key frames more often than our upper bound for
+ // how much we allow the key frame compensation to be spread
+ // out in time. Therefor we must use the key frame ratio rather
+ // than keyFrameSpreadFrames.
+ _keyFrameCount =
+ static_cast<int32_t>(1 / _keyFrameRatio.filtered() + 0.5);
+ } else {
+ // Compensate for the key frame the following frames
+ _keyFrameCount = static_cast<int32_t>(_keyFrameSpreadFrames + 0.5);
}
- // Change the level of the accumulator (bucket)
- _accumulator += frameSizeKbits;
- CapAccumulator();
+ } else {
+ // Decrease the keyFrameRatio
+ _keyFrameRatio.Apply(1.0, 0.0);
+ }
+ // Change the level of the accumulator (bucket)
+ _accumulator += frameSizeKbits;
+ CapAccumulator();
}
-void
-FrameDropper::Leak(uint32_t inputFrameRate)
-{
- if (!_enabled)
- {
- return;
- }
- if (inputFrameRate < 1)
- {
- return;
- }
- if (_targetBitRate < 0.0f)
- {
- return;
- }
- _keyFrameSpreadFrames = 0.5f * inputFrameRate;
- // T is the expected bits per frame (target). If all frames were the same size,
- // we would get T bits per frame. Notice that T is also weighted to be able to
- // force a lower frame rate if wanted.
- float T = _targetBitRate / inputFrameRate;
- if (_keyFrameCount > 0)
- {
- // Perform the key frame compensation
- if (_keyFrameRatio.filtered() > 0 &&
- 1 / _keyFrameRatio.filtered() < _keyFrameSpreadFrames)
- {
- T -= _keyFrameSizeAvgKbits.filtered() * _keyFrameRatio.filtered();
- }
- else
- {
- T -= _keyFrameSizeAvgKbits.filtered() / _keyFrameSpreadFrames;
- }
- _keyFrameCount--;
- }
- _accumulator -= T;
- if (_accumulator < 0.0f)
- {
- _accumulator = 0.0f;
+void FrameDropper::Leak(uint32_t inputFrameRate) {
+ if (!_enabled) {
+ return;
+ }
+ if (inputFrameRate < 1) {
+ return;
+ }
+ if (_targetBitRate < 0.0f) {
+ return;
+ }
+ _keyFrameSpreadFrames = 0.5f * inputFrameRate;
+ // T is the expected bits per frame (target). If all frames were the same
+ // size,
+ // we would get T bits per frame. Notice that T is also weighted to be able to
+ // force a lower frame rate if wanted.
+ float T = _targetBitRate / inputFrameRate;
+ if (_keyFrameCount > 0) {
+ // Perform the key frame compensation
+ if (_keyFrameRatio.filtered() > 0 &&
+ 1 / _keyFrameRatio.filtered() < _keyFrameSpreadFrames) {
+ T -= _keyFrameSizeAvgKbits.filtered() * _keyFrameRatio.filtered();
+ } else {
+ T -= _keyFrameSizeAvgKbits.filtered() / _keyFrameSpreadFrames;
}
- UpdateRatio();
+ _keyFrameCount--;
+ }
+ _accumulator -= T;
+ if (_accumulator < 0.0f) {
+ _accumulator = 0.0f;
+ }
+ UpdateRatio();
}
-void
-FrameDropper::UpdateNack(uint32_t nackBytes)
-{
- if (!_enabled)
- {
- return;
- }
- _accumulator += static_cast<float>(nackBytes) * 8.0f / 1000.0f;
+void FrameDropper::UpdateNack(uint32_t nackBytes) {
+ if (!_enabled) {
+ return;
+ }
+ _accumulator += static_cast<float>(nackBytes) * 8.0f / 1000.0f;
}
-void
-FrameDropper::FillBucket(float inKbits, float outKbits)
-{
- _accumulator += (inKbits - outKbits);
+void FrameDropper::FillBucket(float inKbits, float outKbits) {
+ _accumulator += (inKbits - outKbits);
}
-void
-FrameDropper::UpdateRatio()
-{
- if (_accumulator > 1.3f * _accumulatorMax)
- {
- // Too far above accumulator max, react faster
- _dropRatio.UpdateBase(0.8f);
+void FrameDropper::UpdateRatio() {
+ if (_accumulator > 1.3f * _accumulatorMax) {
+ // Too far above accumulator max, react faster
+ _dropRatio.UpdateBase(0.8f);
+ } else {
+ // Go back to normal reaction
+ _dropRatio.UpdateBase(0.9f);
+ }
+ if (_accumulator > _accumulatorMax) {
+ // We are above accumulator max, and should ideally
+ // drop a frame. Increase the dropRatio and drop
+ // the frame later.
+ if (_wasBelowMax) {
+ _dropNext = true;
}
- else
- {
- // Go back to normal reaction
- _dropRatio.UpdateBase(0.9f);
+ if (_fastMode) {
+ // always drop in aggressive mode
+ _dropNext = true;
}
- if (_accumulator > _accumulatorMax)
- {
- // We are above accumulator max, and should ideally
- // drop a frame. Increase the dropRatio and drop
- // the frame later.
- if (_wasBelowMax)
- {
- _dropNext = true;
- }
- if (_fastMode)
- {
- // always drop in aggressive mode
- _dropNext = true;
- }
- _dropRatio.Apply(1.0f, 1.0f);
- _dropRatio.UpdateBase(0.9f);
- }
- else
- {
- _dropRatio.Apply(1.0f, 0.0f);
- }
- _wasBelowMax = _accumulator < _accumulatorMax;
+ _dropRatio.Apply(1.0f, 1.0f);
+ _dropRatio.UpdateBase(0.9f);
+ } else {
+ _dropRatio.Apply(1.0f, 0.0f);
+ }
+ _wasBelowMax = _accumulator < _accumulatorMax;
}
-// This function signals when to drop frames to the caller. It makes use of the dropRatio
+// This function signals when to drop frames to the caller. It makes use of the
+// dropRatio
// to smooth out the drops over time.
-bool
-FrameDropper::DropFrame()
-{
- if (!_enabled)
- {
- return false;
+bool FrameDropper::DropFrame() {
+ if (!_enabled) {
+ return false;
+ }
+ if (_dropNext) {
+ _dropNext = false;
+ _dropCount = 0;
+ }
+
+ if (_dropRatio.filtered() >= 0.5f) { // Drops per keep
+ // limit is the number of frames we should drop between each kept frame
+ // to keep our drop ratio. limit is positive in this case.
+ float denom = 1.0f - _dropRatio.filtered();
+ if (denom < 1e-5) {
+ denom = 1e-5f;
+ }
+ int32_t limit = static_cast<int32_t>(1.0f / denom - 1.0f + 0.5f);
+ // Put a bound on the max amount of dropped frames between each kept
+ // frame, in terms of frame rate and window size (secs).
+ int max_limit = static_cast<int>(_incoming_frame_rate * _max_time_drops);
+ if (limit > max_limit) {
+ limit = max_limit;
}
- if (_dropNext)
- {
- _dropNext = false;
+ if (_dropCount < 0) {
+ // Reset the _dropCount since it was negative and should be positive.
+ if (_dropRatio.filtered() > 0.4f) {
+ _dropCount = -_dropCount;
+ } else {
_dropCount = 0;
+ }
}
-
- if (_dropRatio.filtered() >= 0.5f) // Drops per keep
- {
- // limit is the number of frames we should drop between each kept frame
- // to keep our drop ratio. limit is positive in this case.
- float denom = 1.0f - _dropRatio.filtered();
- if (denom < 1e-5)
- {
- denom = (float)1e-5;
- }
- int32_t limit = static_cast<int32_t>(1.0f / denom - 1.0f + 0.5f);
- // Put a bound on the max amount of dropped frames between each kept
- // frame, in terms of frame rate and window size (secs).
- int max_limit = static_cast<int>(_incoming_frame_rate *
- _max_time_drops);
- if (limit > max_limit) {
- limit = max_limit;
- }
- if (_dropCount < 0)
- {
- // Reset the _dropCount since it was negative and should be positive.
- if (_dropRatio.filtered() > 0.4f)
- {
- _dropCount = -_dropCount;
- }
- else
- {
- _dropCount = 0;
- }
- }
- if (_dropCount < limit)
- {
- // As long we are below the limit we should drop frames.
- _dropCount++;
- return true;
- }
- else
- {
- // Only when we reset _dropCount a frame should be kept.
- _dropCount = 0;
- return false;
- }
+ if (_dropCount < limit) {
+ // As long we are below the limit we should drop frames.
+ _dropCount++;
+ return true;
+ } else {
+ // Only when we reset _dropCount a frame should be kept.
+ _dropCount = 0;
+ return false;
}
- else if (_dropRatio.filtered() > 0.0f &&
- _dropRatio.filtered() < 0.5f) // Keeps per drop
- {
- // limit is the number of frames we should keep between each drop
- // in order to keep the drop ratio. limit is negative in this case,
- // and the _dropCount is also negative.
- float denom = _dropRatio.filtered();
- if (denom < 1e-5)
- {
- denom = (float)1e-5;
- }
- int32_t limit = -static_cast<int32_t>(1.0f / denom - 1.0f + 0.5f);
- if (_dropCount > 0)
- {
- // Reset the _dropCount since we have a positive
- // _dropCount, and it should be negative.
- if (_dropRatio.filtered() < 0.6f)
- {
- _dropCount = -_dropCount;
- }
- else
- {
- _dropCount = 0;
- }
- }
- if (_dropCount > limit)
- {
- if (_dropCount == 0)
- {
- // Drop frames when we reset _dropCount.
- _dropCount--;
- return true;
- }
- else
- {
- // Keep frames as long as we haven't reached limit.
- _dropCount--;
- return false;
- }
- }
- else
- {
- _dropCount = 0;
- return false;
- }
+ } else if (_dropRatio.filtered() > 0.0f &&
+ _dropRatio.filtered() < 0.5f) { // Keeps per drop
+ // limit is the number of frames we should keep between each drop
+ // in order to keep the drop ratio. limit is negative in this case,
+ // and the _dropCount is also negative.
+ float denom = _dropRatio.filtered();
+ if (denom < 1e-5) {
+ denom = 1e-5f;
}
- _dropCount = 0;
- return false;
+ int32_t limit = -static_cast<int32_t>(1.0f / denom - 1.0f + 0.5f);
+ if (_dropCount > 0) {
+ // Reset the _dropCount since we have a positive
+ // _dropCount, and it should be negative.
+ if (_dropRatio.filtered() < 0.6f) {
+ _dropCount = -_dropCount;
+ } else {
+ _dropCount = 0;
+ }
+ }
+ if (_dropCount > limit) {
+ if (_dropCount == 0) {
+ // Drop frames when we reset _dropCount.
+ _dropCount--;
+ return true;
+ } else {
+ // Keep frames as long as we haven't reached limit.
+ _dropCount--;
+ return false;
+ }
+ } else {
+ _dropCount = 0;
+ return false;
+ }
+ }
+ _dropCount = 0;
+ return false;
- // A simpler version, unfiltered and quicker
- //bool dropNext = _dropNext;
- //_dropNext = false;
- //return dropNext;
+ // A simpler version, unfiltered and quicker
+ // bool dropNext = _dropNext;
+ // _dropNext = false;
+ // return dropNext;
}
-void
-FrameDropper::SetRates(float bitRate, float incoming_frame_rate)
-{
- // Bit rate of -1 means infinite bandwidth.
- _accumulatorMax = bitRate * _windowSize; // bitRate * windowSize (in seconds)
- if (_targetBitRate > 0.0f && bitRate < _targetBitRate && _accumulator > _accumulatorMax)
- {
- // Rescale the accumulator level if the accumulator max decreases
- _accumulator = bitRate / _targetBitRate * _accumulator;
- }
- _targetBitRate = bitRate;
- CapAccumulator();
- _incoming_frame_rate = incoming_frame_rate;
+void FrameDropper::SetRates(float bitRate, float incoming_frame_rate) {
+ // Bit rate of -1 means infinite bandwidth.
+ _accumulatorMax = bitRate * _windowSize; // bitRate * windowSize (in seconds)
+ if (_targetBitRate > 0.0f && bitRate < _targetBitRate &&
+ _accumulator > _accumulatorMax) {
+ // Rescale the accumulator level if the accumulator max decreases
+ _accumulator = bitRate / _targetBitRate * _accumulator;
+ }
+ _targetBitRate = bitRate;
+ CapAccumulator();
+ _incoming_frame_rate = incoming_frame_rate;
}
-float
-FrameDropper::ActualFrameRate(uint32_t inputFrameRate) const
-{
- if (!_enabled)
- {
- return static_cast<float>(inputFrameRate);
- }
- return inputFrameRate * (1.0f - _dropRatio.filtered());
+float FrameDropper::ActualFrameRate(uint32_t inputFrameRate) const {
+ if (!_enabled) {
+ return static_cast<float>(inputFrameRate);
+ }
+ return inputFrameRate * (1.0f - _dropRatio.filtered());
}
// Put a cap on the accumulator, i.e., don't let it grow beyond some level.
@@ -366,5 +292,4 @@ void FrameDropper::CapAccumulator() {
_accumulator = max_accumulator;
}
}
-
-}
+} // namespace webrtc
diff --git a/webrtc/modules/video_coding/utility/frame_dropper.h b/webrtc/modules/video_coding/utility/frame_dropper.h
new file mode 100644
index 0000000000..7ec85ea880
--- /dev/null
+++ b/webrtc/modules/video_coding/utility/frame_dropper.h
@@ -0,0 +1,96 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef WEBRTC_MODULES_VIDEO_CODING_UTILITY_FRAME_DROPPER_H_
+#define WEBRTC_MODULES_VIDEO_CODING_UTILITY_FRAME_DROPPER_H_
+
+#include <cstddef>
+
+#include "webrtc/base/exp_filter.h"
+#include "webrtc/typedefs.h"
+
+namespace webrtc {
+
+// The Frame Dropper implements a variant of the leaky bucket algorithm
+// for keeping track of when to drop frames to avoid bit rate
+// over use when the encoder can't keep its bit rate.
+class FrameDropper {
+ public:
+ FrameDropper();
+ explicit FrameDropper(float max_time_drops);
+ virtual ~FrameDropper() {}
+
+ // Resets the FrameDropper to its initial state.
+ // This means that the frameRateWeight is set to its
+ // default value as well.
+ virtual void Reset();
+
+ virtual void Enable(bool enable);
+ // Answers the question if it's time to drop a frame
+ // if we want to reach a given frame rate. Must be
+ // called for every frame.
+ //
+ // Return value : True if we should drop the current frame
+ virtual bool DropFrame();
+ // Updates the FrameDropper with the size of the latest encoded
+ // frame. The FrameDropper calculates a new drop ratio (can be
+ // seen as the probability to drop a frame) and updates its
+ // internal statistics.
+ //
+ // Input:
+ // - frameSizeBytes : The size of the latest frame
+ // returned from the encoder.
+ // - deltaFrame : True if the encoder returned
+ // a key frame.
+ virtual void Fill(size_t frameSizeBytes, bool deltaFrame);
+
+ virtual void Leak(uint32_t inputFrameRate);
+
+ void UpdateNack(uint32_t nackBytes);
+
+ // Sets the target bit rate and the frame rate produced by
+ // the camera.
+ //
+ // Input:
+ // - bitRate : The target bit rate
+ virtual void SetRates(float bitRate, float incoming_frame_rate);
+
+ // Return value : The current average frame rate produced
+ // if the DropFrame() function is used as
+ // instruction of when to drop frames.
+ virtual float ActualFrameRate(uint32_t inputFrameRate) const;
+
+ private:
+ void FillBucket(float inKbits, float outKbits);
+ void UpdateRatio();
+ void CapAccumulator();
+
+ rtc::ExpFilter _keyFrameSizeAvgKbits;
+ rtc::ExpFilter _keyFrameRatio;
+ float _keyFrameSpreadFrames;
+ int32_t _keyFrameCount;
+ float _accumulator;
+ float _accumulatorMax;
+ float _targetBitRate;
+ bool _dropNext;
+ rtc::ExpFilter _dropRatio;
+ int32_t _dropCount;
+ float _windowSize;
+ float _incoming_frame_rate;
+ bool _wasBelowMax;
+ bool _enabled;
+ bool _fastMode;
+ float _cap_buffer_size;
+ float _max_time_drops;
+}; // end of VCMFrameDropper class
+
+} // namespace webrtc
+
+#endif // WEBRTC_MODULES_VIDEO_CODING_UTILITY_FRAME_DROPPER_H_
diff --git a/webrtc/modules/video_coding/utility/include/frame_dropper.h b/webrtc/modules/video_coding/utility/include/frame_dropper.h
deleted file mode 100644
index 2b78a7264f..0000000000
--- a/webrtc/modules/video_coding/utility/include/frame_dropper.h
+++ /dev/null
@@ -1,98 +0,0 @@
-/*
- * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
- *
- * Use of this source code is governed by a BSD-style license
- * that can be found in the LICENSE file in the root of the source
- * tree. An additional intellectual property rights grant can be found
- * in the file PATENTS. All contributing project authors may
- * be found in the AUTHORS file in the root of the source tree.
- */
-
-#ifndef WEBRTC_MODULES_VIDEO_CODING_UTILITY_INCLUDE_FRAME_DROPPER_H_
-#define WEBRTC_MODULES_VIDEO_CODING_UTILITY_INCLUDE_FRAME_DROPPER_H_
-
-#include <cstddef>
-
-#include "webrtc/base/exp_filter.h"
-#include "webrtc/typedefs.h"
-
-namespace webrtc
-{
-
-// The Frame Dropper implements a variant of the leaky bucket algorithm
-// for keeping track of when to drop frames to avoid bit rate
-// over use when the encoder can't keep its bit rate.
-class FrameDropper
-{
-public:
- FrameDropper();
- explicit FrameDropper(float max_time_drops);
- virtual ~FrameDropper() {}
-
- // Resets the FrameDropper to its initial state.
- // This means that the frameRateWeight is set to its
- // default value as well.
- virtual void Reset();
-
- virtual void Enable(bool enable);
- // Answers the question if it's time to drop a frame
- // if we want to reach a given frame rate. Must be
- // called for every frame.
- //
- // Return value : True if we should drop the current frame
- virtual bool DropFrame();
- // Updates the FrameDropper with the size of the latest encoded
- // frame. The FrameDropper calculates a new drop ratio (can be
- // seen as the probability to drop a frame) and updates its
- // internal statistics.
- //
- // Input:
- // - frameSizeBytes : The size of the latest frame
- // returned from the encoder.
- // - deltaFrame : True if the encoder returned
- // a key frame.
- virtual void Fill(size_t frameSizeBytes, bool deltaFrame);
-
- virtual void Leak(uint32_t inputFrameRate);
-
- void UpdateNack(uint32_t nackBytes);
-
- // Sets the target bit rate and the frame rate produced by
- // the camera.
- //
- // Input:
- // - bitRate : The target bit rate
- virtual void SetRates(float bitRate, float incoming_frame_rate);
-
- // Return value : The current average frame rate produced
- // if the DropFrame() function is used as
- // instruction of when to drop frames.
- virtual float ActualFrameRate(uint32_t inputFrameRate) const;
-
-private:
- void FillBucket(float inKbits, float outKbits);
- void UpdateRatio();
- void CapAccumulator();
-
- rtc::ExpFilter _keyFrameSizeAvgKbits;
- rtc::ExpFilter _keyFrameRatio;
- float _keyFrameSpreadFrames;
- int32_t _keyFrameCount;
- float _accumulator;
- float _accumulatorMax;
- float _targetBitRate;
- bool _dropNext;
- rtc::ExpFilter _dropRatio;
- int32_t _dropCount;
- float _windowSize;
- float _incoming_frame_rate;
- bool _wasBelowMax;
- bool _enabled;
- bool _fastMode;
- float _cap_buffer_size;
- float _max_time_drops;
-}; // end of VCMFrameDropper class
-
-} // namespace webrtc
-
-#endif // WEBRTC_MODULES_VIDEO_CODING_UTILITY_INCLUDE_FRAME_DROPPER_H_
diff --git a/webrtc/modules/video_coding/utility/include/mock/mock_frame_dropper.h b/webrtc/modules/video_coding/utility/include/mock/mock_frame_dropper.h
deleted file mode 100644
index 1e31e5442a..0000000000
--- a/webrtc/modules/video_coding/utility/include/mock/mock_frame_dropper.h
+++ /dev/null
@@ -1,41 +0,0 @@
-/*
- * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
- *
- * Use of this source code is governed by a BSD-style license
- * that can be found in the LICENSE file in the root of the source
- * tree. An additional intellectual property rights grant can be found
- * in the file PATENTS. All contributing project authors may
- * be found in the AUTHORS file in the root of the source tree.
- */
-#ifndef WEBRTC_MODULES_VIDEO_CODING_UTILITY_INCLUDE_MOCK_MOCK_FRAME_DROPPER_H_
-#define WEBRTC_MODULES_VIDEO_CODING_UTILITY_INCLUDE_MOCK_MOCK_FRAME_DROPPER_H_
-
-#include <string>
-
-#include "testing/gmock/include/gmock/gmock.h"
-#include "webrtc/modules/video_coding/utility/include/frame_dropper.h"
-#include "webrtc/typedefs.h"
-
-namespace webrtc {
-
-class MockFrameDropper : public FrameDropper {
- public:
- MOCK_METHOD0(Reset,
- void());
- MOCK_METHOD1(Enable,
- void(bool enable));
- MOCK_METHOD0(DropFrame,
- bool());
- MOCK_METHOD2(Fill,
- void(size_t frameSizeBytes, bool deltaFrame));
- MOCK_METHOD1(Leak,
- void(uint32_t inputFrameRate));
- MOCK_METHOD2(SetRates,
- void(float bitRate, float incoming_frame_rate));
- MOCK_CONST_METHOD1(ActualFrameRate,
- float(uint32_t inputFrameRate));
-};
-
-} // namespace webrtc
-
-#endif // WEBRTC_MODULES_VIDEO_CODING_UTILITY_INCLUDE_MOCK_MOCK_FRAME_DROPPER_H_
diff --git a/webrtc/modules/video_coding/utility/include/vp8_header_parser.h b/webrtc/modules/video_coding/utility/include/vp8_header_parser.h
deleted file mode 100644
index 88796ecd0e..0000000000
--- a/webrtc/modules/video_coding/utility/include/vp8_header_parser.h
+++ /dev/null
@@ -1,77 +0,0 @@
-/*
- * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
- *
- * Use of this source code is governed by a BSD-style license
- * that can be found in the LICENSE file in the root of the source
- * tree. An additional intellectual property rights grant can be found
- * in the file PATENTS. All contributing project authors may
- * be found in the AUTHORS file in the root of the source tree.
- */
-
-#ifndef WEBRTC_MODULES_VIDEO_CODING_UTILITY_VP8_PARSE_HEADER_H_
-#define WEBRTC_MODULES_VIDEO_CODING_UTILITY_VP8_PARSE_HEADER_H_
-
-namespace webrtc {
-
-namespace vp8 {
-
-enum {
- MB_FEATURE_TREE_PROBS = 3,
- NUM_MB_SEGMENTS = 4,
- NUM_REF_LF_DELTAS = 4,
- NUM_MODE_LF_DELTAS = 4,
-};
-
-typedef struct VP8BitReader VP8BitReader;
-struct VP8BitReader {
- // Boolean decoder.
- uint32_t value_; // Current value.
- uint32_t range_; // Current range minus 1. In [127, 254] interval.
- int bits_; // Number of valid bits left.
- // Read buffer.
- const uint8_t* buf_; // Next byte to be read.
- const uint8_t* buf_end_; // End of read buffer.
- int eof_; // True if input is exhausted.
-};
-
-const uint8_t kVP8Log2Range[128] = {
- 7, 6, 6, 5, 5, 5, 5, 4, 4, 4, 4, 4, 4, 4, 4,
- 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
- 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
- 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
- 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
- 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
- 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
- 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
- 0
-};
-
-// range = ((range - 1) << kVP8Log2Range[range]) + 1
-const uint8_t kVP8NewRange[128] = {
- 127, 127, 191, 127, 159, 191, 223, 127,
- 143, 159, 175, 191, 207, 223, 239, 127,
- 135, 143, 151, 159, 167, 175, 183, 191,
- 199, 207, 215, 223, 231, 239, 247, 127,
- 131, 135, 139, 143, 147, 151, 155, 159,
- 163, 167, 171, 175, 179, 183, 187, 191,
- 195, 199, 203, 207, 211, 215, 219, 223,
- 227, 231, 235, 239, 243, 247, 251, 127,
- 129, 131, 133, 135, 137, 139, 141, 143,
- 145, 147, 149, 151, 153, 155, 157, 159,
- 161, 163, 165, 167, 169, 171, 173, 175,
- 177, 179, 181, 183, 185, 187, 189, 191,
- 193, 195, 197, 199, 201, 203, 205, 207,
- 209, 211, 213, 215, 217, 219, 221, 223,
- 225, 227, 229, 231, 233, 235, 237, 239,
- 241, 243, 245, 247, 249, 251, 253, 127
-};
-
-// Gets the QP, QP range: [0, 127].
-// Returns true on success, false otherwise.
-bool GetQp(const uint8_t* buf, size_t length, int* qp);
-
-} // namespace vp8
-
-} // namespace webrtc
-
-#endif // WEBRTC_MODULES_VIDEO_CODING_UTILITY_VP8_PARSE_HEADER_H_
diff --git a/webrtc/modules/video_coding/utility/mock/mock_frame_dropper.h b/webrtc/modules/video_coding/utility/mock/mock_frame_dropper.h
new file mode 100644
index 0000000000..b68a4b8d5d
--- /dev/null
+++ b/webrtc/modules/video_coding/utility/mock/mock_frame_dropper.h
@@ -0,0 +1,34 @@
+/*
+ * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+#ifndef WEBRTC_MODULES_VIDEO_CODING_UTILITY_MOCK_MOCK_FRAME_DROPPER_H_
+#define WEBRTC_MODULES_VIDEO_CODING_UTILITY_MOCK_MOCK_FRAME_DROPPER_H_
+
+#include <string>
+
+#include "testing/gmock/include/gmock/gmock.h"
+#include "webrtc/modules/video_coding/utility/frame_dropper.h"
+#include "webrtc/typedefs.h"
+
+namespace webrtc {
+
+class MockFrameDropper : public FrameDropper {
+ public:
+ MOCK_METHOD0(Reset, void());
+ MOCK_METHOD1(Enable, void(bool enable));
+ MOCK_METHOD0(DropFrame, bool());
+ MOCK_METHOD2(Fill, void(size_t frameSizeBytes, bool deltaFrame));
+ MOCK_METHOD1(Leak, void(uint32_t inputFrameRate));
+ MOCK_METHOD2(SetRates, void(float bitRate, float incoming_frame_rate));
+ MOCK_CONST_METHOD1(ActualFrameRate, float(uint32_t inputFrameRate));
+};
+
+} // namespace webrtc
+
+#endif // WEBRTC_MODULES_VIDEO_CODING_UTILITY_MOCK_MOCK_FRAME_DROPPER_H_
diff --git a/webrtc/modules/video_coding/utility/include/moving_average.h b/webrtc/modules/video_coding/utility/moving_average.h
index 49c42c4ed4..494bfd51fb 100644
--- a/webrtc/modules/video_coding/utility/include/moving_average.h
+++ b/webrtc/modules/video_coding/utility/moving_average.h
@@ -16,7 +16,7 @@
#include "webrtc/typedefs.h"
namespace webrtc {
-template<class T>
+template <class T>
class MovingAverage {
public:
MovingAverage();
@@ -30,17 +30,17 @@ class MovingAverage {
std::list<T> samples_;
};
-template<class T>
-MovingAverage<T>::MovingAverage() : sum_(static_cast<T>(0)) {
-}
+template <class T>
+MovingAverage<T>::MovingAverage()
+ : sum_(static_cast<T>(0)) {}
-template<class T>
+template <class T>
void MovingAverage<T>::AddSample(T sample) {
samples_.push_back(sample);
sum_ += sample;
}
-template<class T>
+template <class T>
bool MovingAverage<T>::GetAverage(size_t num_samples, T* avg) {
if (num_samples > samples_.size())
return false;
@@ -55,17 +55,17 @@ bool MovingAverage<T>::GetAverage(size_t num_samples, T* avg) {
return true;
}
-template<class T>
+template <class T>
void MovingAverage<T>::Reset() {
sum_ = static_cast<T>(0);
samples_.clear();
}
-template<class T>
+template <class T>
int MovingAverage<T>::size() {
return samples_.size();
}
} // namespace webrtc
-#endif // WEBRTC_MODULES_VIDEO_CODING_MOVING_AVERAGE_SCALER_H_
+#endif // WEBRTC_MODULES_VIDEO_CODING_UTILITY_MOVING_AVERAGE_H_
diff --git a/webrtc/modules/video_coding/utility/qp_parser.cc b/webrtc/modules/video_coding/utility/qp_parser.cc
index 62ce31351e..0916cb0094 100644
--- a/webrtc/modules/video_coding/utility/qp_parser.cc
+++ b/webrtc/modules/video_coding/utility/qp_parser.cc
@@ -8,10 +8,10 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#include "webrtc/modules/video_coding/utility/include/qp_parser.h"
+#include "webrtc/modules/video_coding/utility/qp_parser.h"
#include "webrtc/common_types.h"
-#include "webrtc/modules/video_coding/utility/include/vp8_header_parser.h"
+#include "webrtc/modules/video_coding/utility/vp8_header_parser.h"
namespace webrtc {
diff --git a/webrtc/modules/video_coding/utility/include/qp_parser.h b/webrtc/modules/video_coding/utility/qp_parser.h
index 805b37b45c..0b644ef61c 100644
--- a/webrtc/modules/video_coding/utility/include/qp_parser.h
+++ b/webrtc/modules/video_coding/utility/qp_parser.h
@@ -11,7 +11,7 @@
#ifndef WEBRTC_MODULES_VIDEO_CODING_UTILITY_QP_PARSER_H_
#define WEBRTC_MODULES_VIDEO_CODING_UTILITY_QP_PARSER_H_
-#include "webrtc/modules/video_coding/main/source/encoded_frame.h"
+#include "webrtc/modules/video_coding/encoded_frame.h"
namespace webrtc {
diff --git a/webrtc/modules/video_coding/utility/quality_scaler.cc b/webrtc/modules/video_coding/utility/quality_scaler.cc
index ec7715230e..76bf9f5b03 100644
--- a/webrtc/modules/video_coding/utility/quality_scaler.cc
+++ b/webrtc/modules/video_coding/utility/quality_scaler.cc
@@ -7,7 +7,7 @@
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
-#include "webrtc/modules/video_coding/utility/include/quality_scaler.h"
+#include "webrtc/modules/video_coding/utility/quality_scaler.h"
namespace webrtc {
@@ -26,8 +26,7 @@ QualityScaler::QualityScaler()
downscale_shift_(0),
framerate_down_(false),
min_width_(kDefaultMinDownscaleDimension),
- min_height_(kDefaultMinDownscaleDimension) {
-}
+ min_height_(kDefaultMinDownscaleDimension) {}
void QualityScaler::Init(int low_qp_threshold,
int high_qp_threshold,
@@ -91,7 +90,7 @@ void QualityScaler::OnEncodeFrame(const VideoFrame& frame) {
AdjustScale(false);
}
} else if (average_qp_.GetAverage(num_samples_, &avg_qp) &&
- avg_qp <= low_qp_threshold_) {
+ avg_qp <= low_qp_threshold_) {
if (use_framerate_reduction_ && framerate_down_) {
target_framerate_ = -1;
framerate_down_ = false;
@@ -104,7 +103,7 @@ void QualityScaler::OnEncodeFrame(const VideoFrame& frame) {
assert(downscale_shift_ >= 0);
for (int shift = downscale_shift_;
shift > 0 && (res_.width / 2 >= min_width_) &&
- (res_.height / 2 >= min_height_);
+ (res_.height / 2 >= min_height_);
--shift) {
res_.width /= 2;
res_.height /= 2;
@@ -124,13 +123,8 @@ const VideoFrame& QualityScaler::GetScaledFrame(const VideoFrame& frame) {
if (res.width == frame.width())
return frame;
- scaler_.Set(frame.width(),
- frame.height(),
- res.width,
- res.height,
- kI420,
- kI420,
- kScaleBox);
+ scaler_.Set(frame.width(), frame.height(), res.width, res.height, kI420,
+ kI420, kScaleBox);
if (scaler_.Scale(frame, &scaled_frame_) != 0)
return frame;
diff --git a/webrtc/modules/video_coding/utility/include/quality_scaler.h b/webrtc/modules/video_coding/utility/quality_scaler.h
index 29a1496c05..a1233cca51 100644
--- a/webrtc/modules/video_coding/utility/include/quality_scaler.h
+++ b/webrtc/modules/video_coding/utility/quality_scaler.h
@@ -12,7 +12,7 @@
#define WEBRTC_MODULES_VIDEO_CODING_UTILITY_QUALITY_SCALER_H_
#include "webrtc/common_video/libyuv/include/scaler.h"
-#include "webrtc/modules/video_coding/utility/include/moving_average.h"
+#include "webrtc/modules/video_coding/utility/moving_average.h"
namespace webrtc {
class QualityScaler {
diff --git a/webrtc/modules/video_coding/utility/quality_scaler_unittest.cc b/webrtc/modules/video_coding/utility/quality_scaler_unittest.cc
index 2ce1107472..bad73a748c 100644
--- a/webrtc/modules/video_coding/utility/quality_scaler_unittest.cc
+++ b/webrtc/modules/video_coding/utility/quality_scaler_unittest.cc
@@ -8,7 +8,7 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#include "webrtc/modules/video_coding/utility/include/quality_scaler.h"
+#include "webrtc/modules/video_coding/utility/quality_scaler.h"
#include "testing/gtest/include/gtest/gtest.h"
@@ -33,6 +33,7 @@ class QualityScalerTest : public ::testing::Test {
int width;
int height;
};
+
protected:
enum ScaleDirection {
kKeepScaleAtHighQp,
@@ -43,8 +44,8 @@ class QualityScalerTest : public ::testing::Test {
enum BadQualityMetric { kDropFrame, kReportLowQP };
QualityScalerTest() {
- input_frame_.CreateEmptyFrame(
- kWidth, kHeight, kWidth, kHalfWidth, kHalfWidth);
+ input_frame_.CreateEmptyFrame(kWidth, kHeight, kWidth, kHalfWidth,
+ kHalfWidth);
qs_.Init(kMaxQp / QualityScaler::kDefaultLowQpDenominator, kHighQp, false);
qs_.ReportFramerate(kFramerate);
qs_.OnEncodeFrame(input_frame_);
@@ -97,7 +98,8 @@ class QualityScalerTest : public ::testing::Test {
int num_second,
int initial_framerate);
- void VerifyQualityAdaptation(int initial_framerate, int seconds,
+ void VerifyQualityAdaptation(int initial_framerate,
+ int seconds,
bool expect_spatial_resize,
bool expect_framerate_reduction);
@@ -183,8 +185,8 @@ TEST_F(QualityScalerTest, DoesNotDownscaleAfterHalfFramedrop) {
void QualityScalerTest::ContinuouslyDownscalesByHalfDimensionsAndBackUp() {
const int initial_min_dimension = input_frame_.width() < input_frame_.height()
- ? input_frame_.width()
- : input_frame_.height();
+ ? input_frame_.width()
+ : input_frame_.height();
int min_dimension = initial_min_dimension;
int current_shift = 0;
// Drop all frames to force-trigger downscaling.
@@ -229,14 +231,14 @@ TEST_F(QualityScalerTest,
const int kOddWidth = 517;
const int kHalfOddWidth = (kOddWidth + 1) / 2;
const int kOddHeight = 1239;
- input_frame_.CreateEmptyFrame(
- kOddWidth, kOddHeight, kOddWidth, kHalfOddWidth, kHalfOddWidth);
+ input_frame_.CreateEmptyFrame(kOddWidth, kOddHeight, kOddWidth, kHalfOddWidth,
+ kHalfOddWidth);
ContinuouslyDownscalesByHalfDimensionsAndBackUp();
}
void QualityScalerTest::DoesNotDownscaleFrameDimensions(int width, int height) {
- input_frame_.CreateEmptyFrame(
- width, height, width, (width + 1) / 2, (width + 1) / 2);
+ input_frame_.CreateEmptyFrame(width, height, width, (width + 1) / 2,
+ (width + 1) / 2);
for (int i = 0; i < kFramerate * kNumSeconds; ++i) {
qs_.ReportDroppedFrame();
@@ -259,7 +261,9 @@ TEST_F(QualityScalerTest, DoesNotDownscaleFrom1Px) {
}
QualityScalerTest::Resolution QualityScalerTest::TriggerResolutionChange(
- BadQualityMetric dropframe_lowqp, int num_second, int initial_framerate) {
+ BadQualityMetric dropframe_lowqp,
+ int num_second,
+ int initial_framerate) {
QualityScalerTest::Resolution res;
res.framerate = initial_framerate;
qs_.OnEncodeFrame(input_frame_);
@@ -288,7 +292,9 @@ QualityScalerTest::Resolution QualityScalerTest::TriggerResolutionChange(
}
void QualityScalerTest::VerifyQualityAdaptation(
- int initial_framerate, int seconds, bool expect_spatial_resize,
+ int initial_framerate,
+ int seconds,
+ bool expect_spatial_resize,
bool expect_framerate_reduction) {
const int kDisabledBadQpThreshold = kMaxQp + 1;
qs_.Init(kMaxQp / QualityScaler::kDefaultLowQpDenominator,
@@ -298,8 +304,8 @@ void QualityScalerTest::VerifyQualityAdaptation(
int init_height = qs_.GetScaledResolution().height;
// Test reducing framerate by dropping frame continuously.
- QualityScalerTest::Resolution res = TriggerResolutionChange(
- kDropFrame, seconds, initial_framerate);
+ QualityScalerTest::Resolution res =
+ TriggerResolutionChange(kDropFrame, seconds, initial_framerate);
if (expect_framerate_reduction) {
EXPECT_LT(res.framerate, initial_framerate);
diff --git a/webrtc/modules/video_coding/utility/video_coding_utility.gyp b/webrtc/modules/video_coding/utility/video_coding_utility.gyp
index f0764bb7bf..42cbb3d4e0 100644
--- a/webrtc/modules/video_coding/utility/video_coding_utility.gyp
+++ b/webrtc/modules/video_coding/utility/video_coding_utility.gyp
@@ -19,14 +19,14 @@
],
'sources': [
'frame_dropper.cc',
- 'include/frame_dropper.h',
- 'include/moving_average.h',
- 'include/qp_parser.h',
- 'include/quality_scaler.h',
- 'include/vp8_header_parser.h',
+ 'frame_dropper.h',
+ 'moving_average.h',
'qp_parser.cc',
+ 'qp_parser.h',
'quality_scaler.cc',
+ 'quality_scaler.h',
'vp8_header_parser.cc',
+ 'vp8_header_parser.h',
],
},
], # targets
diff --git a/webrtc/modules/video_coding/utility/vp8_header_parser.cc b/webrtc/modules/video_coding/utility/vp8_header_parser.cc
index dc5a0e5d15..631385d0f2 100644
--- a/webrtc/modules/video_coding/utility/vp8_header_parser.cc
+++ b/webrtc/modules/video_coding/utility/vp8_header_parser.cc
@@ -7,12 +7,9 @@
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
-#include <stdint.h>
-#include <stdio.h>
+#include "webrtc/modules/video_coding/utility/vp8_header_parser.h"
-#include "webrtc/modules/video_coding/utility/include/vp8_header_parser.h"
-
-#include "webrtc/system_wrappers/include/logging.h"
+#include "webrtc/base/logging.h"
namespace webrtc {
@@ -46,12 +43,12 @@ static void VP8LoadNewBytes(VP8BitReader* const br) {
const uint32_t in_bits = *(const uint32_t*)(br->buf_);
br->buf_ += BITS >> 3;
#if defined(WEBRTC_ARCH_BIG_ENDIAN)
- bits = static_cast<uint32_t>(in_bits);
- if (BITS != 8 * sizeof(uint32_t))
- bits >>= (8 * sizeof(uint32_t) - BITS);
+ bits = static_cast<uint32_t>(in_bits);
+ if (BITS != 8 * sizeof(uint32_t))
+ bits >>= (8 * sizeof(uint32_t) - BITS);
#else
- bits = BSwap32(in_bits);
- bits >>= 32 - BITS;
+ bits = BSwap32(in_bits);
+ bits >>= 32 - BITS;
#endif
br->value_ = bits | (br->value_ << BITS);
br->bits_ += BITS;
@@ -63,12 +60,12 @@ static void VP8LoadNewBytes(VP8BitReader* const br) {
static void VP8InitBitReader(VP8BitReader* const br,
const uint8_t* const start,
const uint8_t* const end) {
- br->range_ = 255 - 1;
- br->buf_ = start;
+ br->range_ = 255 - 1;
+ br->buf_ = start;
br->buf_end_ = end;
- br->value_ = 0;
- br->bits_ = -8; // To load the very first 8bits.
- br->eof_ = 0;
+ br->value_ = 0;
+ br->bits_ = -8; // To load the very first 8bits.
+ br->eof_ = 0;
VP8LoadNewBytes(br);
}
@@ -125,7 +122,7 @@ static void ParseSegmentHeader(VP8BitReader* br) {
int s;
VP8Get(br);
for (s = 0; s < NUM_MB_SEGMENTS; ++s) {
- VP8Get(br) ? VP8GetSignedValue(br, 7) : 0;
+ VP8Get(br) ? VP8GetSignedValue(br, 7) : 0;
}
for (s = 0; s < NUM_MB_SEGMENTS; ++s) {
VP8Get(br) ? VP8GetSignedValue(br, 6) : 0;
diff --git a/webrtc/modules/video_coding/utility/vp8_header_parser.h b/webrtc/modules/video_coding/utility/vp8_header_parser.h
new file mode 100644
index 0000000000..b0c684c578
--- /dev/null
+++ b/webrtc/modules/video_coding/utility/vp8_header_parser.h
@@ -0,0 +1,68 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef WEBRTC_MODULES_VIDEO_CODING_UTILITY_VP8_HEADER_PARSER_H_
+#define WEBRTC_MODULES_VIDEO_CODING_UTILITY_VP8_HEADER_PARSER_H_
+
+#include <stdint.h>
+#include <stdio.h>
+
+namespace webrtc {
+
+namespace vp8 {
+
+enum {
+ MB_FEATURE_TREE_PROBS = 3,
+ NUM_MB_SEGMENTS = 4,
+ NUM_REF_LF_DELTAS = 4,
+ NUM_MODE_LF_DELTAS = 4,
+};
+
+typedef struct VP8BitReader VP8BitReader;
+struct VP8BitReader {
+ // Boolean decoder.
+ uint32_t value_; // Current value.
+ uint32_t range_; // Current range minus 1. In [127, 254] interval.
+ int bits_; // Number of valid bits left.
+ // Read buffer.
+ const uint8_t* buf_; // Next byte to be read.
+ const uint8_t* buf_end_; // End of read buffer.
+ int eof_; // True if input is exhausted.
+};
+
+const uint8_t kVP8Log2Range[128] = {
+ 7, 6, 6, 5, 5, 5, 5, 4, 4, 4, 4, 4, 4, 4, 4, 3, 3, 3, 3, 3, 3, 3,
+ 3, 3, 3, 3, 3, 3, 3, 3, 3, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0};
+
+// range = ((range - 1) << kVP8Log2Range[range]) + 1
+const uint8_t kVP8NewRange[128] = {
+ 127, 127, 191, 127, 159, 191, 223, 127, 143, 159, 175, 191, 207, 223, 239,
+ 127, 135, 143, 151, 159, 167, 175, 183, 191, 199, 207, 215, 223, 231, 239,
+ 247, 127, 131, 135, 139, 143, 147, 151, 155, 159, 163, 167, 171, 175, 179,
+ 183, 187, 191, 195, 199, 203, 207, 211, 215, 219, 223, 227, 231, 235, 239,
+ 243, 247, 251, 127, 129, 131, 133, 135, 137, 139, 141, 143, 145, 147, 149,
+ 151, 153, 155, 157, 159, 161, 163, 165, 167, 169, 171, 173, 175, 177, 179,
+ 181, 183, 185, 187, 189, 191, 193, 195, 197, 199, 201, 203, 205, 207, 209,
+ 211, 213, 215, 217, 219, 221, 223, 225, 227, 229, 231, 233, 235, 237, 239,
+ 241, 243, 245, 247, 249, 251, 253, 127};
+
+// Gets the QP, QP range: [0, 127].
+// Returns true on success, false otherwise.
+bool GetQp(const uint8_t* buf, size_t length, int* qp);
+
+} // namespace vp8
+
+} // namespace webrtc
+
+#endif // WEBRTC_MODULES_VIDEO_CODING_UTILITY_VP8_HEADER_PARSER_H_
diff --git a/webrtc/modules/video_coding/video_coding.gypi b/webrtc/modules/video_coding/video_coding.gypi
index b292e0ae31..438d8f1c1f 100644
--- a/webrtc/modules/video_coding/video_coding.gypi
+++ b/webrtc/modules/video_coding/video_coding.gypi
@@ -22,61 +22,61 @@
],
'sources': [
# interfaces
- 'main/interface/video_coding.h',
- 'main/interface/video_coding_defines.h',
+ 'include/video_coding.h',
+ 'include/video_coding_defines.h',
# headers
- 'main/source/codec_database.h',
- 'main/source/codec_timer.h',
- 'main/source/content_metrics_processing.h',
- 'main/source/decoding_state.h',
- 'main/source/encoded_frame.h',
- 'main/source/fec_tables_xor.h',
- 'main/source/frame_buffer.h',
- 'main/source/generic_decoder.h',
- 'main/source/generic_encoder.h',
- 'main/source/inter_frame_delay.h',
- 'main/source/internal_defines.h',
- 'main/source/jitter_buffer.h',
- 'main/source/jitter_buffer_common.h',
- 'main/source/jitter_estimator.h',
- 'main/source/media_opt_util.h',
- 'main/source/media_optimization.h',
- 'main/source/nack_fec_tables.h',
- 'main/source/packet.h',
- 'main/source/qm_select_data.h',
- 'main/source/qm_select.h',
- 'main/source/receiver.h',
- 'main/source/rtt_filter.h',
- 'main/source/session_info.h',
- 'main/source/timestamp_map.h',
- 'main/source/timing.h',
- 'main/source/video_coding_impl.h',
+ 'codec_database.h',
+ 'codec_timer.h',
+ 'content_metrics_processing.h',
+ 'decoding_state.h',
+ 'encoded_frame.h',
+ 'fec_tables_xor.h',
+ 'frame_buffer.h',
+ 'generic_decoder.h',
+ 'generic_encoder.h',
+ 'inter_frame_delay.h',
+ 'internal_defines.h',
+ 'jitter_buffer.h',
+ 'jitter_buffer_common.h',
+ 'jitter_estimator.h',
+ 'media_opt_util.h',
+ 'media_optimization.h',
+ 'nack_fec_tables.h',
+ 'packet.h',
+ 'qm_select_data.h',
+ 'qm_select.h',
+ 'receiver.h',
+ 'rtt_filter.h',
+ 'session_info.h',
+ 'timestamp_map.h',
+ 'timing.h',
+ 'video_coding_impl.h',
# sources
- 'main/source/codec_database.cc',
- 'main/source/codec_timer.cc',
- 'main/source/content_metrics_processing.cc',
- 'main/source/decoding_state.cc',
- 'main/source/encoded_frame.cc',
- 'main/source/frame_buffer.cc',
- 'main/source/generic_decoder.cc',
- 'main/source/generic_encoder.cc',
- 'main/source/inter_frame_delay.cc',
- 'main/source/jitter_buffer.cc',
- 'main/source/jitter_estimator.cc',
- 'main/source/media_opt_util.cc',
- 'main/source/media_optimization.cc',
- 'main/source/packet.cc',
- 'main/source/qm_select.cc',
- 'main/source/receiver.cc',
- 'main/source/rtt_filter.cc',
- 'main/source/session_info.cc',
- 'main/source/timestamp_map.cc',
- 'main/source/timing.cc',
- 'main/source/video_coding_impl.cc',
- 'main/source/video_sender.cc',
- 'main/source/video_receiver.cc',
+ 'codec_database.cc',
+ 'codec_timer.cc',
+ 'content_metrics_processing.cc',
+ 'decoding_state.cc',
+ 'encoded_frame.cc',
+ 'frame_buffer.cc',
+ 'generic_decoder.cc',
+ 'generic_encoder.cc',
+ 'inter_frame_delay.cc',
+ 'jitter_buffer.cc',
+ 'jitter_estimator.cc',
+ 'media_opt_util.cc',
+ 'media_optimization.cc',
+ 'packet.cc',
+ 'qm_select.cc',
+ 'receiver.cc',
+ 'rtt_filter.cc',
+ 'session_info.cc',
+ 'timestamp_map.cc',
+ 'timing.cc',
+ 'video_coding_impl.cc',
+ 'video_sender.cc',
+ 'video_receiver.cc',
], # source
# TODO(jschuh): Bug 1348: fix size_t to int truncations.
'msvs_disabled_warnings': [ 4267, ],
diff --git a/webrtc/modules/video_coding/main/source/video_coding_impl.cc b/webrtc/modules/video_coding/video_coding_impl.cc
index b0a6754cbd..1e26a7e243 100644
--- a/webrtc/modules/video_coding/main/source/video_coding_impl.cc
+++ b/webrtc/modules/video_coding/video_coding_impl.cc
@@ -8,33 +8,33 @@
* be found in the AUTHORS file in the root of the source tree.
*/
+#include "webrtc/modules/video_coding/video_coding_impl.h"
+
+#include <algorithm>
+
#include "webrtc/common_types.h"
#include "webrtc/common_video/libyuv/include/webrtc_libyuv.h"
-#include "webrtc/modules/video_coding/codecs/interface/video_codec_interface.h"
-#include "webrtc/modules/video_coding/main/source/encoded_frame.h"
-#include "webrtc/modules/video_coding/main/source/jitter_buffer.h"
-#include "webrtc/modules/video_coding/main/source/packet.h"
-#include "webrtc/modules/video_coding/main/source/video_coding_impl.h"
+#include "webrtc/modules/video_coding/include/video_codec_interface.h"
+#include "webrtc/modules/video_coding/encoded_frame.h"
+#include "webrtc/modules/video_coding/jitter_buffer.h"
+#include "webrtc/modules/video_coding/packet.h"
#include "webrtc/system_wrappers/include/clock.h"
namespace webrtc {
namespace vcm {
-int64_t
-VCMProcessTimer::Period() const {
- return _periodMs;
+int64_t VCMProcessTimer::Period() const {
+ return _periodMs;
}
-int64_t
-VCMProcessTimer::TimeUntilProcess() const {
- const int64_t time_since_process = _clock->TimeInMilliseconds() - _latestMs;
- const int64_t time_until_process = _periodMs - time_since_process;
- return std::max<int64_t>(time_until_process, 0);
+int64_t VCMProcessTimer::TimeUntilProcess() const {
+ const int64_t time_since_process = _clock->TimeInMilliseconds() - _latestMs;
+ const int64_t time_until_process = _periodMs - time_since_process;
+ return std::max<int64_t>(time_until_process, 0);
}
-void
-VCMProcessTimer::Processed() {
- _latestMs = _clock->TimeInMilliseconds();
+void VCMProcessTimer::Processed() {
+ _latestMs = _clock->TimeInMilliseconds();
}
} // namespace vcm
@@ -59,8 +59,8 @@ class EncodedImageCallbackWrapper : public EncodedImageCallback {
const RTPFragmentationHeader* fragmentation) {
CriticalSectionScoped cs(cs_.get());
if (callback_)
- return callback_->Encoded(
- encoded_image, codec_specific_info, fragmentation);
+ return callback_->Encoded(encoded_image, codec_specific_info,
+ fragmentation);
return 0;
}
@@ -77,30 +77,26 @@ class VideoCodingModuleImpl : public VideoCodingModule {
VideoEncoderRateObserver* encoder_rate_observer,
VCMQMSettingsCallback* qm_settings_callback)
: VideoCodingModule(),
- sender_(new vcm::VideoSender(clock,
- &post_encode_callback_,
- encoder_rate_observer,
- qm_settings_callback)),
- receiver_(new vcm::VideoReceiver(clock, event_factory)),
+ sender_(clock,
+ &post_encode_callback_,
+ encoder_rate_observer,
+ qm_settings_callback),
+ receiver_(clock, event_factory),
own_event_factory_(owns_event_factory ? event_factory : NULL) {}
- virtual ~VideoCodingModuleImpl() {
- sender_.reset();
- receiver_.reset();
- own_event_factory_.reset();
- }
+ virtual ~VideoCodingModuleImpl() { own_event_factory_.reset(); }
int64_t TimeUntilNextProcess() override {
- int64_t sender_time = sender_->TimeUntilNextProcess();
- int64_t receiver_time = receiver_->TimeUntilNextProcess();
+ int64_t sender_time = sender_.TimeUntilNextProcess();
+ int64_t receiver_time = receiver_.TimeUntilNextProcess();
assert(sender_time >= 0);
assert(receiver_time >= 0);
return VCM_MIN(sender_time, receiver_time);
}
int32_t Process() override {
- int32_t sender_return = sender_->Process();
- int32_t receiver_return = receiver_->Process();
+ int32_t sender_return = sender_.Process();
+ int32_t receiver_return = receiver_.Process();
if (sender_return != VCM_OK)
return sender_return;
return receiver_return;
@@ -109,192 +105,176 @@ class VideoCodingModuleImpl : public VideoCodingModule {
int32_t RegisterSendCodec(const VideoCodec* sendCodec,
uint32_t numberOfCores,
uint32_t maxPayloadSize) override {
- return sender_->RegisterSendCodec(sendCodec, numberOfCores, maxPayloadSize);
- }
-
- const VideoCodec& GetSendCodec() const override {
- return sender_->GetSendCodec();
- }
-
- // DEPRECATED.
- int32_t SendCodec(VideoCodec* currentSendCodec) const override {
- return sender_->SendCodecBlocking(currentSendCodec);
- }
-
- // DEPRECATED.
- VideoCodecType SendCodec() const override {
- return sender_->SendCodecBlocking();
+ return sender_.RegisterSendCodec(sendCodec, numberOfCores, maxPayloadSize);
}
int32_t RegisterExternalEncoder(VideoEncoder* externalEncoder,
uint8_t payloadType,
bool internalSource) override {
- return sender_->RegisterExternalEncoder(
- externalEncoder, payloadType, internalSource);
+ sender_.RegisterExternalEncoder(externalEncoder, payloadType,
+ internalSource);
+ return 0;
}
int Bitrate(unsigned int* bitrate) const override {
- return sender_->Bitrate(bitrate);
+ return sender_.Bitrate(bitrate);
}
int FrameRate(unsigned int* framerate) const override {
- return sender_->FrameRate(framerate);
+ return sender_.FrameRate(framerate);
}
int32_t SetChannelParameters(uint32_t target_bitrate, // bits/s.
uint8_t lossRate,
int64_t rtt) override {
- return sender_->SetChannelParameters(target_bitrate, lossRate, rtt);
+ return sender_.SetChannelParameters(target_bitrate, lossRate, rtt);
}
int32_t RegisterTransportCallback(
VCMPacketizationCallback* transport) override {
- return sender_->RegisterTransportCallback(transport);
+ return sender_.RegisterTransportCallback(transport);
}
int32_t RegisterSendStatisticsCallback(
VCMSendStatisticsCallback* sendStats) override {
- return sender_->RegisterSendStatisticsCallback(sendStats);
+ return sender_.RegisterSendStatisticsCallback(sendStats);
}
int32_t RegisterProtectionCallback(
VCMProtectionCallback* protection) override {
- return sender_->RegisterProtectionCallback(protection);
+ return sender_.RegisterProtectionCallback(protection);
}
int32_t SetVideoProtection(VCMVideoProtection videoProtection,
bool enable) override {
// TODO(pbos): Remove enable from receive-side protection modes as well.
if (enable)
- sender_->SetVideoProtection(videoProtection);
- return receiver_->SetVideoProtection(videoProtection, enable);
+ sender_.SetVideoProtection(videoProtection);
+ return receiver_.SetVideoProtection(videoProtection, enable);
}
int32_t AddVideoFrame(const VideoFrame& videoFrame,
const VideoContentMetrics* contentMetrics,
const CodecSpecificInfo* codecSpecificInfo) override {
- return sender_->AddVideoFrame(
- videoFrame, contentMetrics, codecSpecificInfo);
+ return sender_.AddVideoFrame(videoFrame, contentMetrics, codecSpecificInfo);
}
int32_t IntraFrameRequest(int stream_index) override {
- return sender_->IntraFrameRequest(stream_index);
+ return sender_.IntraFrameRequest(stream_index);
}
int32_t EnableFrameDropper(bool enable) override {
- return sender_->EnableFrameDropper(enable);
+ return sender_.EnableFrameDropper(enable);
}
void SuspendBelowMinBitrate() override {
- return sender_->SuspendBelowMinBitrate();
+ return sender_.SuspendBelowMinBitrate();
}
- bool VideoSuspended() const override { return sender_->VideoSuspended(); }
+ bool VideoSuspended() const override { return sender_.VideoSuspended(); }
int32_t RegisterReceiveCodec(const VideoCodec* receiveCodec,
int32_t numberOfCores,
bool requireKeyFrame) override {
- return receiver_->RegisterReceiveCodec(
- receiveCodec, numberOfCores, requireKeyFrame);
+ return receiver_.RegisterReceiveCodec(receiveCodec, numberOfCores,
+ requireKeyFrame);
}
- int32_t RegisterExternalDecoder(VideoDecoder* externalDecoder,
- uint8_t payloadType,
- bool internalRenderTiming) override {
- return receiver_->RegisterExternalDecoder(
- externalDecoder, payloadType, internalRenderTiming);
+ void RegisterExternalDecoder(VideoDecoder* externalDecoder,
+ uint8_t payloadType) override {
+ receiver_.RegisterExternalDecoder(externalDecoder, payloadType);
}
int32_t RegisterReceiveCallback(
VCMReceiveCallback* receiveCallback) override {
- return receiver_->RegisterReceiveCallback(receiveCallback);
+ return receiver_.RegisterReceiveCallback(receiveCallback);
}
int32_t RegisterReceiveStatisticsCallback(
VCMReceiveStatisticsCallback* receiveStats) override {
- return receiver_->RegisterReceiveStatisticsCallback(receiveStats);
+ return receiver_.RegisterReceiveStatisticsCallback(receiveStats);
}
int32_t RegisterDecoderTimingCallback(
VCMDecoderTimingCallback* decoderTiming) override {
- return receiver_->RegisterDecoderTimingCallback(decoderTiming);
+ return receiver_.RegisterDecoderTimingCallback(decoderTiming);
}
int32_t RegisterFrameTypeCallback(
VCMFrameTypeCallback* frameTypeCallback) override {
- return receiver_->RegisterFrameTypeCallback(frameTypeCallback);
+ return receiver_.RegisterFrameTypeCallback(frameTypeCallback);
}
int32_t RegisterPacketRequestCallback(
VCMPacketRequestCallback* callback) override {
- return receiver_->RegisterPacketRequestCallback(callback);
+ return receiver_.RegisterPacketRequestCallback(callback);
}
int RegisterRenderBufferSizeCallback(
VCMRenderBufferSizeCallback* callback) override {
- return receiver_->RegisterRenderBufferSizeCallback(callback);
+ return receiver_.RegisterRenderBufferSizeCallback(callback);
}
int32_t Decode(uint16_t maxWaitTimeMs) override {
- return receiver_->Decode(maxWaitTimeMs);
+ return receiver_.Decode(maxWaitTimeMs);
}
- int32_t ResetDecoder() override { return receiver_->ResetDecoder(); }
+ int32_t ResetDecoder() override { return receiver_.ResetDecoder(); }
int32_t ReceiveCodec(VideoCodec* currentReceiveCodec) const override {
- return receiver_->ReceiveCodec(currentReceiveCodec);
+ return receiver_.ReceiveCodec(currentReceiveCodec);
}
VideoCodecType ReceiveCodec() const override {
- return receiver_->ReceiveCodec();
+ return receiver_.ReceiveCodec();
}
int32_t IncomingPacket(const uint8_t* incomingPayload,
size_t payloadLength,
const WebRtcRTPHeader& rtpInfo) override {
- return receiver_->IncomingPacket(incomingPayload, payloadLength, rtpInfo);
+ return receiver_.IncomingPacket(incomingPayload, payloadLength, rtpInfo);
}
int32_t SetMinimumPlayoutDelay(uint32_t minPlayoutDelayMs) override {
- return receiver_->SetMinimumPlayoutDelay(minPlayoutDelayMs);
+ return receiver_.SetMinimumPlayoutDelay(minPlayoutDelayMs);
}
int32_t SetRenderDelay(uint32_t timeMS) override {
- return receiver_->SetRenderDelay(timeMS);
+ return receiver_.SetRenderDelay(timeMS);
}
- int32_t Delay() const override { return receiver_->Delay(); }
+ int32_t Delay() const override { return receiver_.Delay(); }
uint32_t DiscardedPackets() const override {
- return receiver_->DiscardedPackets();
+ return receiver_.DiscardedPackets();
}
int SetReceiverRobustnessMode(ReceiverRobustness robustnessMode,
VCMDecodeErrorMode errorMode) override {
- return receiver_->SetReceiverRobustnessMode(robustnessMode, errorMode);
+ return receiver_.SetReceiverRobustnessMode(robustnessMode, errorMode);
}
void SetNackSettings(size_t max_nack_list_size,
int max_packet_age_to_nack,
int max_incomplete_time_ms) override {
- return receiver_->SetNackSettings(
- max_nack_list_size, max_packet_age_to_nack, max_incomplete_time_ms);
+ return receiver_.SetNackSettings(max_nack_list_size, max_packet_age_to_nack,
+ max_incomplete_time_ms);
}
void SetDecodeErrorMode(VCMDecodeErrorMode decode_error_mode) override {
- return receiver_->SetDecodeErrorMode(decode_error_mode);
+ return receiver_.SetDecodeErrorMode(decode_error_mode);
}
int SetMinReceiverDelay(int desired_delay_ms) override {
- return receiver_->SetMinReceiverDelay(desired_delay_ms);
+ return receiver_.SetMinReceiverDelay(desired_delay_ms);
}
int32_t SetReceiveChannelParameters(int64_t rtt) override {
- return receiver_->SetReceiveChannelParameters(rtt);
+ return receiver_.SetReceiveChannelParameters(rtt);
}
void RegisterPreDecodeImageCallback(EncodedImageCallback* observer) override {
- receiver_->RegisterPreDecodeImageCallback(observer);
+ receiver_.RegisterPreDecodeImageCallback(observer);
}
void RegisterPostEncodeImageCallback(
@@ -302,36 +282,18 @@ class VideoCodingModuleImpl : public VideoCodingModule {
post_encode_callback_.Register(observer);
}
- void TriggerDecoderShutdown() override {
- receiver_->TriggerDecoderShutdown();
- }
+ void TriggerDecoderShutdown() override { receiver_.TriggerDecoderShutdown(); }
private:
EncodedImageCallbackWrapper post_encode_callback_;
- // TODO(tommi): Change sender_ and receiver_ to be non pointers
- // (construction is 1 alloc instead of 3).
- rtc::scoped_ptr<vcm::VideoSender> sender_;
- rtc::scoped_ptr<vcm::VideoReceiver> receiver_;
+ vcm::VideoSender sender_;
+ vcm::VideoReceiver receiver_;
rtc::scoped_ptr<EventFactory> own_event_factory_;
};
} // namespace
-uint8_t VideoCodingModule::NumberOfCodecs() {
- return VCMCodecDataBase::NumberOfCodecs();
-}
-
-int32_t VideoCodingModule::Codec(uint8_t listId, VideoCodec* codec) {
- if (codec == NULL) {
- return VCM_PARAMETER_ERROR;
- }
- return VCMCodecDataBase::Codec(listId, codec) ? 0 : -1;
-}
-
-int32_t VideoCodingModule::Codec(VideoCodecType codecType, VideoCodec* codec) {
- if (codec == NULL) {
- return VCM_PARAMETER_ERROR;
- }
- return VCMCodecDataBase::Codec(codecType, codec) ? 0 : -1;
+void VideoCodingModule::Codec(VideoCodecType codecType, VideoCodec* codec) {
+ VCMCodecDataBase::Codec(codecType, codec);
}
VideoCodingModule* VideoCodingModule::Create(
@@ -342,9 +304,8 @@ VideoCodingModule* VideoCodingModule::Create(
encoder_rate_observer, qm_settings_callback);
}
-VideoCodingModule* VideoCodingModule::Create(
- Clock* clock,
- EventFactory* event_factory) {
+VideoCodingModule* VideoCodingModule::Create(Clock* clock,
+ EventFactory* event_factory) {
assert(clock);
assert(event_factory);
return new VideoCodingModuleImpl(clock, event_factory, false, nullptr,
diff --git a/webrtc/modules/video_coding/main/source/video_coding_impl.h b/webrtc/modules/video_coding/video_coding_impl.h
index 57f38dad13..f105fa9c18 100644
--- a/webrtc/modules/video_coding/main/source/video_coding_impl.h
+++ b/webrtc/modules/video_coding/video_coding_impl.h
@@ -11,21 +11,21 @@
#ifndef WEBRTC_MODULES_VIDEO_CODING_VIDEO_CODING_IMPL_H_
#define WEBRTC_MODULES_VIDEO_CODING_VIDEO_CODING_IMPL_H_
-#include "webrtc/modules/video_coding/main/interface/video_coding.h"
+#include "webrtc/modules/video_coding/include/video_coding.h"
#include <vector>
#include "webrtc/base/thread_annotations.h"
#include "webrtc/base/thread_checker.h"
-#include "webrtc/modules/video_coding/main/source/codec_database.h"
-#include "webrtc/modules/video_coding/main/source/frame_buffer.h"
-#include "webrtc/modules/video_coding/main/source/generic_decoder.h"
-#include "webrtc/modules/video_coding/main/source/generic_encoder.h"
-#include "webrtc/modules/video_coding/main/source/jitter_buffer.h"
-#include "webrtc/modules/video_coding/main/source/media_optimization.h"
-#include "webrtc/modules/video_coding/main/source/receiver.h"
-#include "webrtc/modules/video_coding/main/source/timing.h"
-#include "webrtc/modules/video_coding/utility/include/qp_parser.h"
+#include "webrtc/modules/video_coding/codec_database.h"
+#include "webrtc/modules/video_coding/frame_buffer.h"
+#include "webrtc/modules/video_coding/generic_decoder.h"
+#include "webrtc/modules/video_coding/generic_encoder.h"
+#include "webrtc/modules/video_coding/jitter_buffer.h"
+#include "webrtc/modules/video_coding/media_optimization.h"
+#include "webrtc/modules/video_coding/receiver.h"
+#include "webrtc/modules/video_coding/timing.h"
+#include "webrtc/modules/video_coding/utility/qp_parser.h"
#include "webrtc/system_wrappers/include/clock.h"
#include "webrtc/system_wrappers/include/critical_section_wrapper.h"
@@ -67,24 +67,10 @@ class VideoSender {
int32_t RegisterSendCodec(const VideoCodec* sendCodec,
uint32_t numberOfCores,
uint32_t maxPayloadSize);
- // Non-blocking access to the currently active send codec configuration.
- // Must be called from the same thread as the VideoSender instance was
- // created on.
- const VideoCodec& GetSendCodec() const;
- // Get a copy of the currently configured send codec.
- // This method acquires a lock to copy the current configuration out,
- // so it can block and the returned information is not guaranteed to be
- // accurate upon return. Consider using GetSendCodec() instead and make
- // decisions on that thread with regards to the current codec.
- int32_t SendCodecBlocking(VideoCodec* currentSendCodec) const;
-
- // Same as SendCodecBlocking. Try to use GetSendCodec() instead.
- VideoCodecType SendCodecBlocking() const;
-
- int32_t RegisterExternalEncoder(VideoEncoder* externalEncoder,
- uint8_t payloadType,
- bool internalSource);
+ void RegisterExternalEncoder(VideoEncoder* externalEncoder,
+ uint8_t payloadType,
+ bool internalSource);
int Bitrate(unsigned int* bitrate) const;
int FrameRate(unsigned int* framerate) const;
@@ -150,9 +136,8 @@ class VideoReceiver {
int32_t numberOfCores,
bool requireKeyFrame);
- int32_t RegisterExternalDecoder(VideoDecoder* externalDecoder,
- uint8_t payloadType,
- bool internalRenderTiming);
+ void RegisterExternalDecoder(VideoDecoder* externalDecoder,
+ uint8_t payloadType);
int32_t RegisterReceiveCallback(VCMReceiveCallback* receiveCallback);
int32_t RegisterReceiveStatisticsCallback(
VCMReceiveStatisticsCallback* receiveStats);
diff --git a/webrtc/modules/video_coding/main/source/video_coding_robustness_unittest.cc b/webrtc/modules/video_coding/video_coding_robustness_unittest.cc
index ac6e16bd80..dd6565d505 100644
--- a/webrtc/modules/video_coding/main/source/video_coding_robustness_unittest.cc
+++ b/webrtc/modules/video_coding/video_coding_robustness_unittest.cc
@@ -10,10 +10,10 @@
#include "testing/gmock/include/gmock/gmock.h"
#include "testing/gtest/include/gtest/gtest.h"
-#include "webrtc/modules/video_coding/codecs/interface/mock/mock_video_codec_interface.h"
-#include "webrtc/modules/video_coding/main/interface/mock/mock_vcm_callbacks.h"
-#include "webrtc/modules/video_coding/main/interface/video_coding.h"
-#include "webrtc/modules/video_coding/main/test/test_util.h"
+#include "webrtc/modules/video_coding/include/mock/mock_video_codec_interface.h"
+#include "webrtc/modules/video_coding/include/mock/mock_vcm_callbacks.h"
+#include "webrtc/modules/video_coding/include/video_coding.h"
+#include "webrtc/modules/video_coding/test/test_util.h"
#include "webrtc/system_wrappers/include/clock.h"
namespace webrtc {
@@ -42,16 +42,12 @@ class VCMRobustnessTest : public ::testing::Test {
vcm_->SetNackSettings(kMaxNackListSize, kMaxPacketAgeToNack, 0);
ASSERT_EQ(0, vcm_->RegisterFrameTypeCallback(&frame_type_callback_));
ASSERT_EQ(0, vcm_->RegisterPacketRequestCallback(&request_callback_));
- ASSERT_EQ(VCM_OK, vcm_->Codec(kVideoCodecVP8, &video_codec_));
+ VideoCodingModule::Codec(kVideoCodecVP8, &video_codec_);
ASSERT_EQ(VCM_OK, vcm_->RegisterReceiveCodec(&video_codec_, 1));
- ASSERT_EQ(VCM_OK, vcm_->RegisterExternalDecoder(&decoder_,
- video_codec_.plType,
- true));
+ vcm_->RegisterExternalDecoder(&decoder_, video_codec_.plType);
}
- virtual void TearDown() {
- VideoCodingModule::Destroy(vcm_);
- }
+ virtual void TearDown() { VideoCodingModule::Destroy(vcm_); }
void InsertPacket(uint32_t timestamp,
uint16_t seq_no,
@@ -89,19 +85,17 @@ TEST_F(VCMRobustnessTest, TestHardNack) {
.With(Args<0, 1>(ElementsAre(6, 7)))
.Times(1);
for (int ts = 0; ts <= 6000; ts += 3000) {
- EXPECT_CALL(decoder_, Decode(AllOf(Field(&EncodedImage::_timeStamp, ts),
- Field(&EncodedImage::_length,
- kPayloadLen * 3),
- Field(&EncodedImage::_completeFrame,
- true)),
- false, _, _, _))
+ EXPECT_CALL(decoder_,
+ Decode(AllOf(Field(&EncodedImage::_timeStamp, ts),
+ Field(&EncodedImage::_length, kPayloadLen * 3),
+ Field(&EncodedImage::_completeFrame, true)),
+ false, _, _, _))
.Times(1)
.InSequence(s);
}
ASSERT_EQ(VCM_OK, vcm_->SetReceiverRobustnessMode(
- VideoCodingModule::kHardNack,
- kNoErrors));
+ VideoCodingModule::kHardNack, kNoErrors));
InsertPacket(0, 0, true, false, kVideoFrameKey);
InsertPacket(0, 1, false, false, kVideoFrameKey);
@@ -138,14 +132,11 @@ TEST_F(VCMRobustnessTest, TestHardNack) {
}
TEST_F(VCMRobustnessTest, TestHardNackNoneDecoded) {
- EXPECT_CALL(request_callback_, ResendPackets(_, _))
- .Times(0);
- EXPECT_CALL(frame_type_callback_, RequestKeyFrame())
- .Times(1);
+ EXPECT_CALL(request_callback_, ResendPackets(_, _)).Times(0);
+ EXPECT_CALL(frame_type_callback_, RequestKeyFrame()).Times(1);
ASSERT_EQ(VCM_OK, vcm_->SetReceiverRobustnessMode(
- VideoCodingModule::kHardNack,
- kNoErrors));
+ VideoCodingModule::kHardNack, kNoErrors));
InsertPacket(3000, 3, true, false, kVideoFrameDelta);
InsertPacket(3000, 4, false, false, kVideoFrameDelta);
@@ -168,46 +159,43 @@ TEST_F(VCMRobustnessTest, TestModeNoneWithErrors) {
.With(Args<0, 1>(ElementsAre(4)))
.Times(0);
- EXPECT_CALL(decoder_, Copy())
- .Times(0);
- EXPECT_CALL(decoderCopy_, Copy())
- .Times(0);
+ EXPECT_CALL(decoder_, Copy()).Times(0);
+ EXPECT_CALL(decoderCopy_, Copy()).Times(0);
// Decode operations
- EXPECT_CALL(decoder_, Decode(AllOf(Field(&EncodedImage::_timeStamp, 0),
- Field(&EncodedImage::_completeFrame,
- true)),
- false, _, _, _))
- .Times(1)
- .InSequence(s1);
- EXPECT_CALL(decoder_, Decode(AllOf(Field(&EncodedImage::_timeStamp, 3000),
- Field(&EncodedImage::_completeFrame,
- false)),
- false, _, _, _))
- .Times(1)
- .InSequence(s1);
- EXPECT_CALL(decoder_, Decode(AllOf(Field(&EncodedImage::_timeStamp, 6000),
- Field(&EncodedImage::_completeFrame,
- true)),
- false, _, _, _))
- .Times(1)
- .InSequence(s1);
- EXPECT_CALL(decoder_, Decode(AllOf(Field(&EncodedImage::_timeStamp, 9000),
- Field(&EncodedImage::_completeFrame,
- true)),
- false, _, _, _))
- .Times(1)
- .InSequence(s1);
-
- ASSERT_EQ(VCM_OK, vcm_->SetReceiverRobustnessMode(
- VideoCodingModule::kNone,
- kWithErrors));
+ EXPECT_CALL(decoder_,
+ Decode(AllOf(Field(&EncodedImage::_timeStamp, 0),
+ Field(&EncodedImage::_completeFrame, true)),
+ false, _, _, _))
+ .Times(1)
+ .InSequence(s1);
+ EXPECT_CALL(decoder_,
+ Decode(AllOf(Field(&EncodedImage::_timeStamp, 3000),
+ Field(&EncodedImage::_completeFrame, false)),
+ false, _, _, _))
+ .Times(1)
+ .InSequence(s1);
+ EXPECT_CALL(decoder_,
+ Decode(AllOf(Field(&EncodedImage::_timeStamp, 6000),
+ Field(&EncodedImage::_completeFrame, true)),
+ false, _, _, _))
+ .Times(1)
+ .InSequence(s1);
+ EXPECT_CALL(decoder_,
+ Decode(AllOf(Field(&EncodedImage::_timeStamp, 9000),
+ Field(&EncodedImage::_completeFrame, true)),
+ false, _, _, _))
+ .Times(1)
+ .InSequence(s1);
+
+ ASSERT_EQ(VCM_OK, vcm_->SetReceiverRobustnessMode(VideoCodingModule::kNone,
+ kWithErrors));
InsertPacket(0, 0, true, false, kVideoFrameKey);
InsertPacket(0, 1, false, false, kVideoFrameKey);
InsertPacket(0, 2, false, true, kVideoFrameKey);
- EXPECT_EQ(VCM_OK, vcm_->Decode(0)); // Decode timestamp 0.
- EXPECT_EQ(VCM_OK, vcm_->Process()); // Expect no NACK list.
+ EXPECT_EQ(VCM_OK, vcm_->Decode(33)); // Decode timestamp 0.
+ EXPECT_EQ(VCM_OK, vcm_->Process()); // Expect no NACK list.
clock_->AdvanceTimeMilliseconds(33);
InsertPacket(3000, 3, true, false, kVideoFrameDelta);
@@ -224,8 +212,8 @@ TEST_F(VCMRobustnessTest, TestModeNoneWithErrors) {
EXPECT_EQ(VCM_OK, vcm_->Process()); // Expect no NACK list.
clock_->AdvanceTimeMilliseconds(10);
- EXPECT_EQ(VCM_OK, vcm_->Decode(0)); // Decode timestamp 6000 complete.
- EXPECT_EQ(VCM_OK, vcm_->Process()); // Expect no NACK list.
+ EXPECT_EQ(VCM_OK, vcm_->Decode(23)); // Decode timestamp 6000 complete.
+ EXPECT_EQ(VCM_OK, vcm_->Process()); // Expect no NACK list.
clock_->AdvanceTimeMilliseconds(23);
InsertPacket(3000, 4, false, false, kVideoFrameDelta);
@@ -233,6 +221,6 @@ TEST_F(VCMRobustnessTest, TestModeNoneWithErrors) {
InsertPacket(9000, 9, true, false, kVideoFrameDelta);
InsertPacket(9000, 10, false, false, kVideoFrameDelta);
InsertPacket(9000, 11, false, true, kVideoFrameDelta);
- EXPECT_EQ(VCM_OK, vcm_->Decode(0)); // Decode timestamp 9000 complete.
+ EXPECT_EQ(VCM_OK, vcm_->Decode(33)); // Decode timestamp 9000 complete.
}
} // namespace webrtc
diff --git a/webrtc/modules/video_coding/video_coding_test.gypi b/webrtc/modules/video_coding/video_coding_test.gypi
index 5d720ebb63..fc2fec6c98 100644
--- a/webrtc/modules/video_coding/video_coding_test.gypi
+++ b/webrtc/modules/video_coding/video_coding_test.gypi
@@ -19,16 +19,16 @@
],
'sources': [
# headers
- 'main/test/receiver_tests.h',
- 'main/test/rtp_player.h',
- 'main/test/vcm_payload_sink_factory.h',
+ 'test/receiver_tests.h',
+ 'test/rtp_player.h',
+ 'test/vcm_payload_sink_factory.h',
# sources
- 'main/test/rtp_player.cc',
- 'main/test/test_util.cc',
- 'main/test/tester_main.cc',
- 'main/test/vcm_payload_sink_factory.cc',
- 'main/test/video_rtp_play.cc',
+ 'test/rtp_player.cc',
+ 'test/test_util.cc',
+ 'test/tester_main.cc',
+ 'test/vcm_payload_sink_factory.cc',
+ 'test/video_rtp_play.cc',
], # sources
},
],
diff --git a/webrtc/modules/video_coding/main/source/video_receiver.cc b/webrtc/modules/video_coding/video_receiver.cc
index 77c069cf2d..02c0da8f48 100644
--- a/webrtc/modules/video_coding/main/source/video_receiver.cc
+++ b/webrtc/modules/video_coding/video_receiver.cc
@@ -9,16 +9,16 @@
*/
#include "webrtc/base/checks.h"
+#include "webrtc/base/logging.h"
#include "webrtc/base/trace_event.h"
#include "webrtc/common_types.h"
#include "webrtc/common_video/libyuv/include/webrtc_libyuv.h"
-#include "webrtc/modules/video_coding/codecs/interface/video_codec_interface.h"
-#include "webrtc/modules/video_coding/main/source/encoded_frame.h"
-#include "webrtc/modules/video_coding/main/source/jitter_buffer.h"
-#include "webrtc/modules/video_coding/main/source/packet.h"
-#include "webrtc/modules/video_coding/main/source/video_coding_impl.h"
+#include "webrtc/modules/video_coding/include/video_codec_interface.h"
+#include "webrtc/modules/video_coding/encoded_frame.h"
+#include "webrtc/modules/video_coding/jitter_buffer.h"
+#include "webrtc/modules/video_coding/packet.h"
+#include "webrtc/modules/video_coding/video_coding_impl.h"
#include "webrtc/system_wrappers/include/clock.h"
-#include "webrtc/system_wrappers/include/logging.h"
// #define DEBUG_DECODER_BIT_STREAM
@@ -31,7 +31,7 @@ VideoReceiver::VideoReceiver(Clock* clock, EventFactory* event_factory)
_receiveCritSect(CriticalSectionWrapper::CreateCriticalSection()),
_timing(clock_),
_receiver(&_timing, clock_, event_factory),
- _decodedFrameCallback(_timing, clock_),
+ _decodedFrameCallback(&_timing, clock_),
_frameTypeCallback(NULL),
_receiveStatsCallback(NULL),
_decoderTimingCallback(NULL),
@@ -84,20 +84,12 @@ int32_t VideoReceiver::Process() {
int jitter_buffer_ms;
int min_playout_delay_ms;
int render_delay_ms;
- _timing.GetTimings(&decode_ms,
- &max_decode_ms,
- &current_delay_ms,
- &target_delay_ms,
- &jitter_buffer_ms,
- &min_playout_delay_ms,
- &render_delay_ms);
- _decoderTimingCallback->OnDecoderTiming(decode_ms,
- max_decode_ms,
- current_delay_ms,
- target_delay_ms,
- jitter_buffer_ms,
- min_playout_delay_ms,
- render_delay_ms);
+ _timing.GetTimings(&decode_ms, &max_decode_ms, &current_delay_ms,
+ &target_delay_ms, &jitter_buffer_ms,
+ &min_playout_delay_ms, &render_delay_ms);
+ _decoderTimingCallback->OnDecoderTiming(
+ decode_ms, max_decode_ms, current_delay_ms, target_delay_ms,
+ jitter_buffer_ms, min_playout_delay_ms, render_delay_ms);
}
// Size of render buffer.
@@ -235,21 +227,17 @@ int32_t VideoReceiver::RegisterDecoderTimingCallback(
return VCM_OK;
}
-// Register an externally defined decoder/render object.
-// Can be a decoder only or a decoder coupled with a renderer.
-int32_t VideoReceiver::RegisterExternalDecoder(VideoDecoder* externalDecoder,
- uint8_t payloadType,
- bool internalRenderTiming) {
+// Register an externally defined decoder object.
+void VideoReceiver::RegisterExternalDecoder(VideoDecoder* externalDecoder,
+ uint8_t payloadType) {
CriticalSectionScoped cs(_receiveCritSect);
if (externalDecoder == NULL) {
// Make sure the VCM updates the decoder next time it decodes.
_decoder = NULL;
- return _codecDataBase.DeregisterExternalDecoder(payloadType) ? 0 : -1;
+ RTC_CHECK(_codecDataBase.DeregisterExternalDecoder(payloadType));
+ return;
}
- return _codecDataBase.RegisterExternalDecoder(
- externalDecoder, payloadType, internalRenderTiming)
- ? 0
- : -1;
+ _codecDataBase.RegisterExternalDecoder(externalDecoder, payloadType);
}
// Register a frame type request callback.
@@ -282,52 +270,46 @@ void VideoReceiver::TriggerDecoderShutdown() {
// Should be called as often as possible to get the most out of the decoder.
int32_t VideoReceiver::Decode(uint16_t maxWaitTimeMs) {
int64_t nextRenderTimeMs;
- bool supports_render_scheduling;
+ bool prefer_late_decoding = false;
{
CriticalSectionScoped cs(_receiveCritSect);
- supports_render_scheduling = _codecDataBase.SupportsRenderScheduling();
+ prefer_late_decoding = _codecDataBase.PrefersLateDecoding();
}
VCMEncodedFrame* frame = _receiver.FrameForDecoding(
- maxWaitTimeMs, nextRenderTimeMs, supports_render_scheduling);
+ maxWaitTimeMs, &nextRenderTimeMs, prefer_late_decoding);
- if (frame == NULL) {
+ if (!frame)
return VCM_FRAME_NOT_READY;
- } else {
- CriticalSectionScoped cs(_receiveCritSect);
- // If this frame was too late, we should adjust the delay accordingly
- _timing.UpdateCurrentDelay(frame->RenderTimeMs(),
- clock_->TimeInMilliseconds());
+ CriticalSectionScoped cs(_receiveCritSect);
- if (pre_decode_image_callback_) {
- EncodedImage encoded_image(frame->EncodedImage());
- int qp = -1;
- if (qp_parser_.GetQp(*frame, &qp)) {
- encoded_image.qp_ = qp;
- }
- pre_decode_image_callback_->Encoded(
- encoded_image, frame->CodecSpecific(), NULL);
+ // If this frame was too late, we should adjust the delay accordingly
+ _timing.UpdateCurrentDelay(frame->RenderTimeMs(),
+ clock_->TimeInMilliseconds());
+
+ if (pre_decode_image_callback_) {
+ EncodedImage encoded_image(frame->EncodedImage());
+ int qp = -1;
+ if (qp_parser_.GetQp(*frame, &qp)) {
+ encoded_image.qp_ = qp;
}
+ pre_decode_image_callback_->Encoded(encoded_image, frame->CodecSpecific(),
+ NULL);
+ }
#ifdef DEBUG_DECODER_BIT_STREAM
- if (_bitStreamBeforeDecoder != NULL) {
- // Write bit stream to file for debugging purposes
- if (fwrite(
- frame->Buffer(), 1, frame->Length(), _bitStreamBeforeDecoder) !=
- frame->Length()) {
- return -1;
- }
- }
-#endif
- const int32_t ret = Decode(*frame);
- _receiver.ReleaseFrame(frame);
- frame = NULL;
- if (ret != VCM_OK) {
- return ret;
+ if (_bitStreamBeforeDecoder != NULL) {
+ // Write bit stream to file for debugging purposes
+ if (fwrite(frame->Buffer(), 1, frame->Length(), _bitStreamBeforeDecoder) !=
+ frame->Length()) {
+ return -1;
}
}
- return VCM_OK;
+#endif
+ const int32_t ret = Decode(*frame);
+ _receiver.ReleaseFrame(frame);
+ return ret;
}
int32_t VideoReceiver::RequestSliceLossIndication(
@@ -363,21 +345,10 @@ int32_t VideoReceiver::RequestKeyFrame() {
// Must be called from inside the receive side critical section.
int32_t VideoReceiver::Decode(const VCMEncodedFrame& frame) {
- TRACE_EVENT_ASYNC_STEP1("webrtc",
- "Video",
- frame.TimeStamp(),
- "Decode",
- "type",
- frame.FrameType());
+ TRACE_EVENT_ASYNC_STEP1("webrtc", "Video", frame.TimeStamp(), "Decode",
+ "type", frame.FrameType());
// Change decoder if payload type has changed
- const bool renderTimingBefore = _codecDataBase.SupportsRenderScheduling();
- _decoder =
- _codecDataBase.GetDecoder(frame.PayloadType(), &_decodedFrameCallback);
- if (renderTimingBefore != _codecDataBase.SupportsRenderScheduling()) {
- // Make sure we reset the decode time estimate since it will
- // be zero for codecs without render timing.
- _timing.ResetDecodeTime();
- }
+ _decoder = _codecDataBase.GetDecoder(frame, &_decodedFrameCallback);
if (_decoder == NULL) {
return VCM_NO_CODEC_REGISTERED;
}
@@ -436,8 +407,8 @@ int32_t VideoReceiver::RegisterReceiveCodec(const VideoCodec* receiveCodec,
if (receiveCodec == NULL) {
return VCM_PARAMETER_ERROR;
}
- if (!_codecDataBase.RegisterReceiveCodec(
- receiveCodec, numberOfCores, requireKeyFrame)) {
+ if (!_codecDataBase.RegisterReceiveCodec(receiveCodec, numberOfCores,
+ requireKeyFrame)) {
return -1;
}
return 0;
@@ -463,9 +434,7 @@ int32_t VideoReceiver::IncomingPacket(const uint8_t* incomingPayload,
size_t payloadLength,
const WebRtcRTPHeader& rtpInfo) {
if (rtpInfo.frameType == kVideoFrameKey) {
- TRACE_EVENT1("webrtc",
- "VCM::PacketKeyFrame",
- "seqnum",
+ TRACE_EVENT1("webrtc", "VCM::PacketKeyFrame", "seqnum",
rtpInfo.header.sequenceNumber);
}
if (incomingPayload == NULL) {
@@ -504,7 +473,9 @@ int32_t VideoReceiver::SetRenderDelay(uint32_t timeMS) {
}
// Current video delay
-int32_t VideoReceiver::Delay() const { return _timing.TargetVideoDelay(); }
+int32_t VideoReceiver::Delay() const {
+ return _timing.TargetVideoDelay();
+}
uint32_t VideoReceiver::DiscardedPackets() const {
return _receiver.DiscardedPackets();
@@ -560,8 +531,8 @@ void VideoReceiver::SetNackSettings(size_t max_nack_list_size,
CriticalSectionScoped process_cs(process_crit_sect_.get());
max_nack_list_size_ = max_nack_list_size;
}
- _receiver.SetNackSettings(
- max_nack_list_size, max_packet_age_to_nack, max_incomplete_time_ms);
+ _receiver.SetNackSettings(max_nack_list_size, max_packet_age_to_nack,
+ max_incomplete_time_ms);
}
int VideoReceiver::SetMinReceiverDelay(int desired_delay_ms) {
diff --git a/webrtc/modules/video_coding/main/source/video_receiver_unittest.cc b/webrtc/modules/video_coding/video_receiver_unittest.cc
index 75ea29a1ec..820ce9ae2d 100644
--- a/webrtc/modules/video_coding/main/source/video_receiver_unittest.cc
+++ b/webrtc/modules/video_coding/video_receiver_unittest.cc
@@ -12,11 +12,11 @@
#include "testing/gtest/include/gtest/gtest.h"
#include "webrtc/base/scoped_ptr.h"
-#include "webrtc/modules/video_coding/codecs/interface/mock/mock_video_codec_interface.h"
-#include "webrtc/modules/video_coding/main/interface/mock/mock_vcm_callbacks.h"
-#include "webrtc/modules/video_coding/main/interface/video_coding.h"
-#include "webrtc/modules/video_coding/main/source/video_coding_impl.h"
-#include "webrtc/modules/video_coding/main/test/test_util.h"
+#include "webrtc/modules/video_coding/include/mock/mock_video_codec_interface.h"
+#include "webrtc/modules/video_coding/include/mock/mock_vcm_callbacks.h"
+#include "webrtc/modules/video_coding/include/video_coding.h"
+#include "webrtc/modules/video_coding/video_coding_impl.h"
+#include "webrtc/modules/video_coding/test/test_util.h"
#include "webrtc/system_wrappers/include/clock.h"
using ::testing::_;
@@ -34,14 +34,12 @@ class TestVideoReceiver : public ::testing::Test {
virtual void SetUp() {
receiver_.reset(new VideoReceiver(&clock_, &event_factory_));
- EXPECT_EQ(0, receiver_->RegisterExternalDecoder(&decoder_,
- kUnusedPayloadType, true));
+ receiver_->RegisterExternalDecoder(&decoder_, kUnusedPayloadType);
const size_t kMaxNackListSize = 250;
const int kMaxPacketAgeToNack = 450;
receiver_->SetNackSettings(kMaxNackListSize, kMaxPacketAgeToNack, 0);
- memset(&settings_, 0, sizeof(settings_));
- EXPECT_EQ(0, VideoCodingModule::Codec(kVideoCodecVP8, &settings_));
+ VideoCodingModule::Codec(kVideoCodecVP8, &settings_);
settings_.plType = kUnusedPayloadType; // Use the mocked encoder.
EXPECT_EQ(0, receiver_->RegisterReceiveCodec(&settings_, 1, true));
}
@@ -56,7 +54,7 @@ class TestVideoReceiver : public ::testing::Test {
}
EXPECT_EQ(0, receiver_->Process());
EXPECT_CALL(decoder_, Decode(_, _, _, _, _)).Times(0);
- EXPECT_EQ(VCM_FRAME_NOT_READY, receiver_->Decode(0));
+ EXPECT_EQ(VCM_FRAME_NOT_READY, receiver_->Decode(100));
}
void InsertAndVerifyDecodableFrame(const uint8_t* payload,
@@ -68,7 +66,7 @@ class TestVideoReceiver : public ::testing::Test {
EXPECT_CALL(packet_request_callback_, ResendPackets(_, _)).Times(0);
EXPECT_EQ(0, receiver_->Process());
EXPECT_CALL(decoder_, Decode(_, _, _, _, _)).Times(1);
- EXPECT_EQ(0, receiver_->Decode(0));
+ EXPECT_EQ(0, receiver_->Decode(100));
}
SimulatedClock clock_;
diff --git a/webrtc/modules/video_coding/main/source/video_sender.cc b/webrtc/modules/video_coding/video_sender.cc
index 98230b1e9e..ac901f95b9 100644
--- a/webrtc/modules/video_coding/main/source/video_sender.cc
+++ b/webrtc/modules/video_coding/video_sender.cc
@@ -8,18 +8,18 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#include "webrtc/common_types.h"
#include <algorithm> // std::max
#include "webrtc/base/checks.h"
+#include "webrtc/base/logging.h"
+#include "webrtc/common_types.h"
#include "webrtc/common_video/libyuv/include/webrtc_libyuv.h"
-#include "webrtc/modules/video_coding/codecs/interface/video_codec_interface.h"
-#include "webrtc/modules/video_coding/main/source/encoded_frame.h"
-#include "webrtc/modules/video_coding/main/source/video_coding_impl.h"
-#include "webrtc/modules/video_coding/utility/include/quality_scaler.h"
+#include "webrtc/modules/video_coding/include/video_codec_interface.h"
+#include "webrtc/modules/video_coding/encoded_frame.h"
+#include "webrtc/modules/video_coding/utility/quality_scaler.h"
+#include "webrtc/modules/video_coding/video_coding_impl.h"
#include "webrtc/system_wrappers/include/clock.h"
-#include "webrtc/system_wrappers/include/logging.h"
namespace webrtc {
namespace vcm {
@@ -126,57 +126,34 @@ int32_t VideoSender::RegisterSendCodec(const VideoCodec* sendCodec,
_nextFrameTypes.resize(VCM_MAX(sendCodec->numberOfSimulcastStreams, 1),
kVideoFrameDelta);
- _mediaOpt.SetEncodingData(sendCodec->codecType,
- sendCodec->maxBitrate * 1000,
- sendCodec->startBitrate * 1000,
- sendCodec->width,
- sendCodec->height,
- sendCodec->maxFramerate,
- numLayers,
- maxPayloadSize);
+ _mediaOpt.SetEncodingData(sendCodec->codecType, sendCodec->maxBitrate * 1000,
+ sendCodec->startBitrate * 1000, sendCodec->width,
+ sendCodec->height, sendCodec->maxFramerate,
+ numLayers, maxPayloadSize);
return VCM_OK;
}
-const VideoCodec& VideoSender::GetSendCodec() const {
- RTC_DCHECK(main_thread_.CalledOnValidThread());
- return current_codec_;
-}
-
-int32_t VideoSender::SendCodecBlocking(VideoCodec* currentSendCodec) const {
- rtc::CritScope lock(&send_crit_);
- if (currentSendCodec == nullptr) {
- return VCM_PARAMETER_ERROR;
- }
- return _codecDataBase.SendCodec(currentSendCodec) ? 0 : -1;
-}
-
-VideoCodecType VideoSender::SendCodecBlocking() const {
- rtc::CritScope lock(&send_crit_);
- return _codecDataBase.SendCodec();
-}
-
// Register an external decoder object.
// This can not be used together with external decoder callbacks.
-int32_t VideoSender::RegisterExternalEncoder(VideoEncoder* externalEncoder,
- uint8_t payloadType,
- bool internalSource /*= false*/) {
+void VideoSender::RegisterExternalEncoder(VideoEncoder* externalEncoder,
+ uint8_t payloadType,
+ bool internalSource /*= false*/) {
RTC_DCHECK(main_thread_.CalledOnValidThread());
rtc::CritScope lock(&send_crit_);
if (externalEncoder == nullptr) {
bool wasSendCodec = false;
- const bool ret =
- _codecDataBase.DeregisterExternalEncoder(payloadType, &wasSendCodec);
+ RTC_CHECK(
+ _codecDataBase.DeregisterExternalEncoder(payloadType, &wasSendCodec));
if (wasSendCodec) {
// Make sure the VCM doesn't use the de-registered codec
_encoder = nullptr;
}
- return ret ? 0 : -1;
+ return;
}
- _codecDataBase.RegisterExternalEncoder(
- externalEncoder, payloadType, internalSource);
- return 0;
+ _codecDataBase.RegisterExternalEncoder(externalEncoder, payloadType,
+ internalSource);
}
// Get encode bitrate
@@ -369,7 +346,6 @@ void VideoSender::SuspendBelowMinBitrate() {
}
bool VideoSender::VideoSuspended() const {
- rtc::CritScope lock(&send_crit_);
return _mediaOpt.IsVideoSuspended();
}
} // namespace vcm
diff --git a/webrtc/modules/video_coding/main/source/video_sender_unittest.cc b/webrtc/modules/video_coding/video_sender_unittest.cc
index e9c8bd79b6..741c7b7a60 100644
--- a/webrtc/modules/video_coding/main/source/video_sender_unittest.cc
+++ b/webrtc/modules/video_coding/video_sender_unittest.cc
@@ -13,18 +13,17 @@
#include "testing/gtest/include/gtest/gtest.h"
#include "webrtc/base/scoped_ptr.h"
#include "webrtc/common.h"
-#include "webrtc/modules/video_coding/codecs/interface/mock/mock_video_codec_interface.h"
+#include "webrtc/modules/video_coding/include/mock/mock_video_codec_interface.h"
#include "webrtc/modules/video_coding/codecs/vp8/include/vp8.h"
#include "webrtc/modules/video_coding/codecs/vp8/include/vp8_common_types.h"
#include "webrtc/modules/video_coding/codecs/vp8/temporal_layers.h"
-#include "webrtc/modules/video_coding/main/interface/mock/mock_vcm_callbacks.h"
-#include "webrtc/modules/video_coding/main/interface/video_coding.h"
-#include "webrtc/modules/video_coding/main/source/video_coding_impl.h"
-#include "webrtc/modules/video_coding/main/test/test_util.h"
+#include "webrtc/modules/video_coding/include/mock/mock_vcm_callbacks.h"
+#include "webrtc/modules/video_coding/include/video_coding.h"
+#include "webrtc/modules/video_coding/video_coding_impl.h"
+#include "webrtc/modules/video_coding/test/test_util.h"
#include "webrtc/system_wrappers/include/clock.h"
#include "webrtc/test/frame_generator.h"
#include "webrtc/test/testsupport/fileutils.h"
-#include "webrtc/test/testsupport/gtest_disable.h"
using ::testing::_;
using ::testing::AllOf;
@@ -41,9 +40,7 @@ using webrtc::test::FrameGenerator;
namespace webrtc {
namespace vcm {
namespace {
-enum {
- kMaxNumberOfTemporalLayers = 3
-};
+enum { kMaxNumberOfTemporalLayers = 3 };
struct Vp8StreamInfo {
float framerate_fps[kMaxNumberOfTemporalLayers];
@@ -87,7 +84,7 @@ class EmptyFrameGenerator : public FrameGenerator {
class PacketizationCallback : public VCMPacketizationCallback {
public:
- PacketizationCallback(Clock* clock)
+ explicit PacketizationCallback(Clock* clock)
: clock_(clock), start_time_ms_(clock_->TimeInMilliseconds()) {}
virtual ~PacketizationCallback() {}
@@ -207,22 +204,15 @@ class TestVideoSenderWithMockEncoder : public TestVideoSender {
void SetUp() override {
TestVideoSender::SetUp();
- EXPECT_EQ(
- 0,
- sender_->RegisterExternalEncoder(&encoder_, kUnusedPayloadType, false));
- memset(&settings_, 0, sizeof(settings_));
- EXPECT_EQ(0, VideoCodingModule::Codec(kVideoCodecVP8, &settings_));
+ sender_->RegisterExternalEncoder(&encoder_, kUnusedPayloadType, false);
+ VideoCodingModule::Codec(kVideoCodecVP8, &settings_);
settings_.numberOfSimulcastStreams = kNumberOfStreams;
- ConfigureStream(kDefaultWidth / 4,
- kDefaultHeight / 4,
- 100,
+ ConfigureStream(kDefaultWidth / 4, kDefaultHeight / 4, 100,
&settings_.simulcastStream[0]);
- ConfigureStream(kDefaultWidth / 2,
- kDefaultHeight / 2,
- 500,
+ ConfigureStream(kDefaultWidth / 2, kDefaultHeight / 2, 500,
&settings_.simulcastStream[1]);
- ConfigureStream(
- kDefaultWidth, kDefaultHeight, 1200, &settings_.simulcastStream[2]);
+ ConfigureStream(kDefaultWidth, kDefaultHeight, 1200,
+ &settings_.simulcastStream[2]);
settings_.plType = kUnusedPayloadType; // Use the mocked encoder.
generator_.reset(
new EmptyFrameGenerator(settings_.width, settings_.height));
@@ -246,12 +236,11 @@ class TestVideoSenderWithMockEncoder : public TestVideoSender {
assert(stream < kNumberOfStreams);
std::vector<FrameType> frame_types(kNumberOfStreams, kVideoFrameDelta);
frame_types[stream] = kVideoFrameKey;
- EXPECT_CALL(
- encoder_,
- Encode(_,
- _,
- Pointee(ElementsAreArray(&frame_types[0], frame_types.size()))))
- .Times(1).WillRepeatedly(Return(0));
+ EXPECT_CALL(encoder_,
+ Encode(_, _, Pointee(ElementsAreArray(&frame_types[0],
+ frame_types.size()))))
+ .Times(1)
+ .WillRepeatedly(Return(0));
}
static void ConfigureStream(int width,
@@ -300,11 +289,9 @@ TEST_F(TestVideoSenderWithMockEncoder, TestIntraRequests) {
TEST_F(TestVideoSenderWithMockEncoder, TestIntraRequestsInternalCapture) {
// De-register current external encoder.
- EXPECT_EQ(0,
- sender_->RegisterExternalEncoder(NULL, kUnusedPayloadType, false));
+ sender_->RegisterExternalEncoder(nullptr, kUnusedPayloadType, false);
// Register encoder with internal capture.
- EXPECT_EQ(
- 0, sender_->RegisterExternalEncoder(&encoder_, kUnusedPayloadType, true));
+ sender_->RegisterExternalEncoder(&encoder_, kUnusedPayloadType, true);
EXPECT_EQ(0, sender_->RegisterSendCodec(&settings_, 1, 1200));
ExpectIntraRequest(0);
EXPECT_EQ(0, sender_->IntraFrameRequest(0));
@@ -384,8 +371,7 @@ class TestVideoSenderWithVp8 : public TestVideoSender {
codec_.startBitrate = codec_bitrate_kbps_;
codec_.maxBitrate = codec_bitrate_kbps_;
encoder_.reset(VP8Encoder::Create());
- ASSERT_EQ(0, sender_->RegisterExternalEncoder(encoder_.get(), codec_.plType,
- false));
+ sender_->RegisterExternalEncoder(encoder_.get(), codec_.plType, false);
EXPECT_EQ(0, sender_->RegisterSendCodec(&codec_, 1, 1200));
}
@@ -393,8 +379,7 @@ class TestVideoSenderWithVp8 : public TestVideoSender {
int height,
int temporal_layers) {
VideoCodec codec;
- memset(&codec, 0, sizeof(codec));
- EXPECT_EQ(0, VideoCodingModule::Codec(kVideoCodecVP8, &codec));
+ VideoCodingModule::Codec(kVideoCodecVP8, &codec);
codec.width = width;
codec.height = height;
codec.codecSpecific.VP8.numberOfTemporalLayers = temporal_layers;
@@ -436,8 +421,12 @@ class TestVideoSenderWithVp8 : public TestVideoSender {
int available_bitrate_kbps_;
};
-TEST_F(TestVideoSenderWithVp8,
- DISABLED_ON_IOS(DISABLED_ON_ANDROID(FixedTemporalLayersStrategy))) {
+#if defined(WEBRTC_ANDROID) || defined(WEBRTC_IOS)
+#define MAYBE_FixedTemporalLayersStrategy DISABLED_FixedTemporalLayersStrategy
+#else
+#define MAYBE_FixedTemporalLayersStrategy FixedTemporalLayersStrategy
+#endif
+TEST_F(TestVideoSenderWithVp8, MAYBE_FixedTemporalLayersStrategy) {
const int low_b = codec_bitrate_kbps_ * kVp8LayerRateAlloction[2][0];
const int mid_b = codec_bitrate_kbps_ * kVp8LayerRateAlloction[2][1];
const int high_b = codec_bitrate_kbps_ * kVp8LayerRateAlloction[2][2];
@@ -451,8 +440,13 @@ TEST_F(TestVideoSenderWithVp8,
}
}
-TEST_F(TestVideoSenderWithVp8,
- DISABLED_ON_IOS(DISABLED_ON_ANDROID(RealTimeTemporalLayersStrategy))) {
+#if defined(WEBRTC_ANDROID) || defined(WEBRTC_IOS)
+#define MAYBE_RealTimeTemporalLayersStrategy \
+ DISABLED_RealTimeTemporalLayersStrategy
+#else
+#define MAYBE_RealTimeTemporalLayersStrategy RealTimeTemporalLayersStrategy
+#endif
+TEST_F(TestVideoSenderWithVp8, MAYBE_RealTimeTemporalLayersStrategy) {
Config extra_options;
extra_options.Set<TemporalLayers::Factory>(
new RealTimeTemporalLayersFactory());
diff --git a/webrtc/modules/video_processing/BUILD.gn b/webrtc/modules/video_processing/BUILD.gn
index 00d2911eef..6d411edda1 100644
--- a/webrtc/modules/video_processing/BUILD.gn
+++ b/webrtc/modules/video_processing/BUILD.gn
@@ -6,30 +6,37 @@
# in the file PATENTS. All contributing project authors may
# be found in the AUTHORS file in the root of the source tree.
+import("//build/config/arm.gni")
import("../../build/webrtc.gni")
build_video_processing_sse2 = current_cpu == "x86" || current_cpu == "x64"
source_set("video_processing") {
sources = [
- "main/interface/video_processing.h",
- "main/interface/video_processing_defines.h",
- "main/source/brighten.cc",
- "main/source/brighten.h",
- "main/source/brightness_detection.cc",
- "main/source/brightness_detection.h",
- "main/source/content_analysis.cc",
- "main/source/content_analysis.h",
- "main/source/deflickering.cc",
- "main/source/deflickering.h",
- "main/source/frame_preprocessor.cc",
- "main/source/frame_preprocessor.h",
- "main/source/spatial_resampler.cc",
- "main/source/spatial_resampler.h",
- "main/source/video_decimator.cc",
- "main/source/video_decimator.h",
- "main/source/video_processing_impl.cc",
- "main/source/video_processing_impl.h",
+ "brightness_detection.cc",
+ "brightness_detection.h",
+ "content_analysis.cc",
+ "content_analysis.h",
+ "deflickering.cc",
+ "deflickering.h",
+ "frame_preprocessor.cc",
+ "frame_preprocessor.h",
+ "include/video_processing.h",
+ "include/video_processing_defines.h",
+ "spatial_resampler.cc",
+ "spatial_resampler.h",
+ "util/denoiser_filter.cc",
+ "util/denoiser_filter.h",
+ "util/denoiser_filter_c.cc",
+ "util/denoiser_filter_c.h",
+ "util/skin_detection.cc",
+ "util/skin_detection.h",
+ "video_decimator.cc",
+ "video_decimator.h",
+ "video_denoiser.cc",
+ "video_denoiser.h",
+ "video_processing_impl.cc",
+ "video_processing_impl.h",
]
deps = [
@@ -41,6 +48,9 @@ source_set("video_processing") {
if (build_video_processing_sse2) {
deps += [ ":video_processing_sse2" ]
}
+ if (rtc_build_with_neon) {
+ deps += [ ":video_processing_neon" ]
+ }
configs += [ "../..:common_config" ]
public_configs = [ "../..:common_inherited_config" ]
@@ -55,7 +65,9 @@ source_set("video_processing") {
if (build_video_processing_sse2) {
source_set("video_processing_sse2") {
sources = [
- "main/source/content_analysis_sse2.cc",
+ "content_analysis_sse2.cc",
+ "util/denoiser_filter_sse2.cc",
+ "util/denoiser_filter_sse2.h",
]
configs += [ "../..:common_config" ]
@@ -72,3 +84,18 @@ if (build_video_processing_sse2) {
}
}
}
+
+if (rtc_build_with_neon) {
+ source_set("video_processing_neon") {
+ sources = [
+ "util/denoiser_filter_neon.cc",
+ "util/denoiser_filter_neon.h",
+ ]
+ if (current_cpu != "arm64") {
+ configs -= [ "//build/config/compiler:compiler_arm_fpu" ]
+ cflags = [ "-mfpu=neon" ]
+ }
+ configs += [ "../..:common_config" ]
+ public_configs = [ "../..:common_inherited_config" ]
+ }
+}
diff --git a/webrtc/modules/video_processing/OWNERS b/webrtc/modules/video_processing/OWNERS
index f452c9ed83..389d632dfd 100644
--- a/webrtc/modules/video_processing/OWNERS
+++ b/webrtc/modules/video_processing/OWNERS
@@ -1,4 +1,9 @@
stefan@webrtc.org
marpan@webrtc.org
+# These are for the common case of adding or renaming files. If you're doing
+# structural changes, please get a review from a reviewer in this file.
+per-file *.gyp=*
+per-file *.gypi=*
+
per-file BUILD.gn=kjellander@webrtc.org
diff --git a/webrtc/modules/video_processing/main/source/brightness_detection.cc b/webrtc/modules/video_processing/brightness_detection.cc
index bae225b3b0..7455cf9759 100644
--- a/webrtc/modules/video_processing/main/source/brightness_detection.cc
+++ b/webrtc/modules/video_processing/brightness_detection.cc
@@ -8,11 +8,12 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#include "webrtc/modules/video_processing/main/interface/video_processing.h"
-#include "webrtc/modules/video_processing/main/source/brightness_detection.h"
+#include "webrtc/modules/video_processing/brightness_detection.h"
#include <math.h>
+#include "webrtc/modules/video_processing/include/video_processing.h"
+
namespace webrtc {
VPMBrightnessDetection::VPMBrightnessDetection() {
@@ -28,14 +29,14 @@ void VPMBrightnessDetection::Reset() {
int32_t VPMBrightnessDetection::ProcessFrame(
const VideoFrame& frame,
- const VideoProcessingModule::FrameStats& stats) {
+ const VideoProcessing::FrameStats& stats) {
if (frame.IsZeroSize()) {
return VPM_PARAMETER_ERROR;
}
int width = frame.width();
int height = frame.height();
- if (!VideoProcessingModule::ValidFrameStats(stats)) {
+ if (!VideoProcessing::ValidFrameStats(stats)) {
return VPM_PARAMETER_ERROR;
}
@@ -62,11 +63,11 @@ int32_t VPMBrightnessDetection::ProcessFrame(
// Standard deviation of Y
const uint8_t* buffer = frame.buffer(kYPlane);
float std_y = 0;
- for (int h = 0; h < height; h += (1 << stats.subSamplHeight)) {
- int row = h*width;
- for (int w = 0; w < width; w += (1 << stats.subSamplWidth)) {
- std_y += (buffer[w + row] - stats.mean) * (buffer[w + row] -
- stats.mean);
+ for (int h = 0; h < height; h += (1 << stats.sub_sampling_factor)) {
+ int row = h * width;
+ for (int w = 0; w < width; w += (1 << stats.sub_sampling_factor)) {
+ std_y +=
+ (buffer[w + row] - stats.mean) * (buffer[w + row] - stats.mean);
}
}
std_y = sqrt(std_y / stats.num_pixels);
@@ -81,37 +82,39 @@ int32_t VPMBrightnessDetection::ProcessFrame(
float posPerc95 = stats.num_pixels * 0.95f;
for (uint32_t i = 0; i < 256; i++) {
sum += stats.hist[i];
- if (sum < pos_perc05) perc05 = i; // 5th perc.
- if (sum < pos_median) median_y = i; // 50th perc.
+ if (sum < pos_perc05)
+ perc05 = i; // 5th perc.
+ if (sum < pos_median)
+ median_y = i; // 50th perc.
if (sum < posPerc95)
- perc95 = i; // 95th perc.
+ perc95 = i; // 95th perc.
else
break;
}
- // Check if image is too dark
- if ((std_y < 55) && (perc05 < 50)) {
- if (median_y < 60 || stats.mean < 80 || perc95 < 130 ||
- prop_low > 0.20) {
- frame_cnt_dark_++;
- } else {
- frame_cnt_dark_ = 0;
- }
+ // Check if image is too dark
+ if ((std_y < 55) && (perc05 < 50)) {
+ if (median_y < 60 || stats.mean < 80 || perc95 < 130 ||
+ prop_low > 0.20) {
+ frame_cnt_dark_++;
} else {
frame_cnt_dark_ = 0;
}
+ } else {
+ frame_cnt_dark_ = 0;
+ }
- // Check if image is too bright
- if ((std_y < 52) && (perc95 > 200) && (median_y > 160)) {
- if (median_y > 185 || stats.mean > 185 || perc05 > 140 ||
- prop_high > 0.25) {
- frame_cnt_bright_++;
- } else {
- frame_cnt_bright_ = 0;
- }
+ // Check if image is too bright
+ if ((std_y < 52) && (perc95 > 200) && (median_y > 160)) {
+ if (median_y > 185 || stats.mean > 185 || perc05 > 140 ||
+ prop_high > 0.25) {
+ frame_cnt_bright_++;
} else {
frame_cnt_bright_ = 0;
}
+ } else {
+ frame_cnt_bright_ = 0;
+ }
} else {
frame_cnt_dark_ = 0;
frame_cnt_bright_ = 0;
@@ -122,11 +125,11 @@ int32_t VPMBrightnessDetection::ProcessFrame(
}
if (frame_cnt_dark_ > frame_cnt_alarm) {
- return VideoProcessingModule::kDarkWarning;
+ return VideoProcessing::kDarkWarning;
} else if (frame_cnt_bright_ > frame_cnt_alarm) {
- return VideoProcessingModule::kBrightWarning;
+ return VideoProcessing::kBrightWarning;
} else {
- return VideoProcessingModule::kNoWarning;
+ return VideoProcessing::kNoWarning;
}
}
diff --git a/webrtc/modules/video_processing/main/source/brightness_detection.h b/webrtc/modules/video_processing/brightness_detection.h
index 48532b4a20..78a7ac5e0b 100644
--- a/webrtc/modules/video_processing/main/source/brightness_detection.h
+++ b/webrtc/modules/video_processing/brightness_detection.h
@@ -8,12 +8,10 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-/*
- * brightness_detection.h
- */
-#ifndef MODULES_VIDEO_PROCESSING_MAIN_SOURCE_BRIGHTNESS_DETECTION_H
-#define MODULES_VIDEO_PROCESSING_MAIN_SOURCE_BRIGHTNESS_DETECTION_H
-#include "webrtc/modules/video_processing/main/interface/video_processing.h"
+#ifndef WEBRTC_MODULES_VIDEO_PROCESSING_BRIGHTNESS_DETECTION_H_
+#define WEBRTC_MODULES_VIDEO_PROCESSING_BRIGHTNESS_DETECTION_H_
+
+#include "webrtc/modules/video_processing/include/video_processing.h"
#include "webrtc/typedefs.h"
namespace webrtc {
@@ -25,7 +23,7 @@ class VPMBrightnessDetection {
void Reset();
int32_t ProcessFrame(const VideoFrame& frame,
- const VideoProcessingModule::FrameStats& stats);
+ const VideoProcessing::FrameStats& stats);
private:
uint32_t frame_cnt_bright_;
@@ -34,4 +32,4 @@ class VPMBrightnessDetection {
} // namespace webrtc
-#endif // MODULES_VIDEO_PROCESSING_MAIN_SOURCE_BRIGHTNESS_DETECTION_H
+#endif // WEBRTC_MODULES_VIDEO_PROCESSING_BRIGHTNESS_DETECTION_H_
diff --git a/webrtc/modules/video_processing/main/source/content_analysis.cc b/webrtc/modules/video_processing/content_analysis.cc
index d29db27408..54c04da466 100644
--- a/webrtc/modules/video_processing/main/source/content_analysis.cc
+++ b/webrtc/modules/video_processing/content_analysis.cc
@@ -7,7 +7,7 @@
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
-#include "webrtc/modules/video_processing/main/source/content_analysis.h"
+#include "webrtc/modules/video_processing/content_analysis.h"
#include <math.h>
#include <stdlib.h>
@@ -72,7 +72,7 @@ VideoContentMetrics* VPMContentAnalysis::ComputeContentMetrics(
// Saving current frame as previous one: Y only.
memcpy(prev_frame_, orig_frame_, width_ * height_);
- first_frame_ = false;
+ first_frame_ = false;
ca_Init_ = true;
return ContentMetrics();
@@ -85,7 +85,7 @@ int32_t VPMContentAnalysis::Release() {
}
if (prev_frame_ != NULL) {
- delete [] prev_frame_;
+ delete[] prev_frame_;
prev_frame_ = NULL;
}
@@ -106,11 +106,11 @@ int32_t VPMContentAnalysis::Initialize(int width, int height) {
skip_num_ = 1;
// use skipNum = 2 for 4CIF, WHD
- if ( (height_ >= 576) && (width_ >= 704) ) {
+ if ((height_ >= 576) && (width_ >= 704)) {
skip_num_ = 2;
}
// use skipNum = 4 for FULLL_HD images
- if ( (height_ >= 1080) && (width_ >= 1920) ) {
+ if ((height_ >= 1080) && (width_ >= 1920)) {
skip_num_ = 4;
}
@@ -119,7 +119,7 @@ int32_t VPMContentAnalysis::Initialize(int width, int height) {
}
if (prev_frame_ != NULL) {
- delete [] prev_frame_;
+ delete[] prev_frame_;
}
// Spatial Metrics don't work on a border of 8. Minimum processing
@@ -135,12 +135,12 @@ int32_t VPMContentAnalysis::Initialize(int width, int height) {
}
prev_frame_ = new uint8_t[width_ * height_]; // Y only.
- if (prev_frame_ == NULL) return VPM_MEMORY;
+ if (prev_frame_ == NULL)
+ return VPM_MEMORY;
return VPM_OK;
}
-
// Compute motion metrics: magnitude over non-zero motion vectors,
// and size of zero cluster
int32_t VPMContentAnalysis::ComputeMotionMetrics() {
@@ -163,36 +163,41 @@ int32_t VPMContentAnalysis::TemporalDiffMetric_C() {
uint64_t pixelSqSum = 0;
uint32_t num_pixels = 0; // Counter for # of pixels.
- const int width_end = ((width_ - 2*border_) & -16) + border_;
+ const int width_end = ((width_ - 2 * border_) & -16) + border_;
for (int i = border_; i < sizei - border_; i += skip_num_) {
for (int j = border_; j < width_end; j++) {
num_pixels += 1;
- int ssn = i * sizej + j;
+ int ssn = i * sizej + j;
- uint8_t currPixel = orig_frame_[ssn];
- uint8_t prevPixel = prev_frame_[ssn];
+ uint8_t currPixel = orig_frame_[ssn];
+ uint8_t prevPixel = prev_frame_[ssn];
- tempDiffSum += (uint32_t)abs((int16_t)(currPixel - prevPixel));
- pixelSum += (uint32_t) currPixel;
- pixelSqSum += (uint64_t) (currPixel * currPixel);
+ tempDiffSum +=
+ static_cast<uint32_t>(abs((int16_t)(currPixel - prevPixel)));
+ pixelSum += static_cast<uint32_t>(currPixel);
+ pixelSqSum += static_cast<uint64_t>(currPixel * currPixel);
}
}
// Default.
motion_magnitude_ = 0.0f;
- if (tempDiffSum == 0) return VPM_OK;
+ if (tempDiffSum == 0)
+ return VPM_OK;
// Normalize over all pixels.
- float const tempDiffAvg = (float)tempDiffSum / (float)(num_pixels);
- float const pixelSumAvg = (float)pixelSum / (float)(num_pixels);
- float const pixelSqSumAvg = (float)pixelSqSum / (float)(num_pixels);
+ float const tempDiffAvg =
+ static_cast<float>(tempDiffSum) / static_cast<float>(num_pixels);
+ float const pixelSumAvg =
+ static_cast<float>(pixelSum) / static_cast<float>(num_pixels);
+ float const pixelSqSumAvg =
+ static_cast<float>(pixelSqSum) / static_cast<float>(num_pixels);
float contrast = pixelSqSumAvg - (pixelSumAvg * pixelSumAvg);
if (contrast > 0.0) {
contrast = sqrt(contrast);
- motion_magnitude_ = tempDiffAvg/contrast;
+ motion_magnitude_ = tempDiffAvg / contrast;
}
return VPM_OK;
}
@@ -216,39 +221,40 @@ int32_t VPMContentAnalysis::ComputeSpatialMetrics_C() {
uint32_t spatialErrHSum = 0;
// make sure work section is a multiple of 16
- const int width_end = ((sizej - 2*border_) & -16) + border_;
+ const int width_end = ((sizej - 2 * border_) & -16) + border_;
for (int i = border_; i < sizei - border_; i += skip_num_) {
for (int j = border_; j < width_end; j++) {
- int ssn1= i * sizej + j;
- int ssn2 = (i + 1) * sizej + j; // bottom
- int ssn3 = (i - 1) * sizej + j; // top
- int ssn4 = i * sizej + j + 1; // right
- int ssn5 = i * sizej + j - 1; // left
+ int ssn1 = i * sizej + j;
+ int ssn2 = (i + 1) * sizej + j; // bottom
+ int ssn3 = (i - 1) * sizej + j; // top
+ int ssn4 = i * sizej + j + 1; // right
+ int ssn5 = i * sizej + j - 1; // left
- uint16_t refPixel1 = orig_frame_[ssn1] << 1;
- uint16_t refPixel2 = orig_frame_[ssn1] << 2;
+ uint16_t refPixel1 = orig_frame_[ssn1] << 1;
+ uint16_t refPixel2 = orig_frame_[ssn1] << 2;
uint8_t bottPixel = orig_frame_[ssn2];
uint8_t topPixel = orig_frame_[ssn3];
uint8_t rightPixel = orig_frame_[ssn4];
uint8_t leftPixel = orig_frame_[ssn5];
- spatialErrSum += (uint32_t) abs((int16_t)(refPixel2
- - (uint16_t)(bottPixel + topPixel + leftPixel + rightPixel)));
- spatialErrVSum += (uint32_t) abs((int16_t)(refPixel1
- - (uint16_t)(bottPixel + topPixel)));
- spatialErrHSum += (uint32_t) abs((int16_t)(refPixel1
- - (uint16_t)(leftPixel + rightPixel)));
+ spatialErrSum += static_cast<uint32_t>(abs(static_cast<int16_t>(
+ refPixel2 - static_cast<uint16_t>(bottPixel + topPixel + leftPixel +
+ rightPixel))));
+ spatialErrVSum += static_cast<uint32_t>(abs(static_cast<int16_t>(
+ refPixel1 - static_cast<uint16_t>(bottPixel + topPixel))));
+ spatialErrHSum += static_cast<uint32_t>(abs(static_cast<int16_t>(
+ refPixel1 - static_cast<uint16_t>(leftPixel + rightPixel))));
pixelMSA += orig_frame_[ssn1];
}
}
// Normalize over all pixels.
- const float spatialErr = (float)(spatialErrSum >> 2);
- const float spatialErrH = (float)(spatialErrHSum >> 1);
- const float spatialErrV = (float)(spatialErrVSum >> 1);
- const float norm = (float)pixelMSA;
+ const float spatialErr = static_cast<float>(spatialErrSum >> 2);
+ const float spatialErrH = static_cast<float>(spatialErrHSum >> 1);
+ const float spatialErrV = static_cast<float>(spatialErrVSum >> 1);
+ const float norm = static_cast<float>(pixelMSA);
// 2X2:
spatial_pred_err_ = spatialErr / norm;
@@ -260,7 +266,8 @@ int32_t VPMContentAnalysis::ComputeSpatialMetrics_C() {
}
VideoContentMetrics* VPMContentAnalysis::ContentMetrics() {
- if (ca_Init_ == false) return NULL;
+ if (ca_Init_ == false)
+ return NULL;
content_metrics_->spatial_pred_err = spatial_pred_err_;
content_metrics_->spatial_pred_err_h = spatial_pred_err_h_;
diff --git a/webrtc/modules/video_processing/main/source/content_analysis.h b/webrtc/modules/video_processing/content_analysis.h
index 510c1b4a55..d3a11bd091 100644
--- a/webrtc/modules/video_processing/main/source/content_analysis.h
+++ b/webrtc/modules/video_processing/content_analysis.h
@@ -8,11 +8,11 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#ifndef WEBRTC_MODULES_VIDEO_PROCESSING_MAIN_SOURCE_CONTENT_ANALYSIS_H
-#define WEBRTC_MODULES_VIDEO_PROCESSING_MAIN_SOURCE_CONTENT_ANALYSIS_H
+#ifndef WEBRTC_MODULES_VIDEO_PROCESSING_CONTENT_ANALYSIS_H_
+#define WEBRTC_MODULES_VIDEO_PROCESSING_CONTENT_ANALYSIS_H_
-#include "webrtc/modules/interface/module_common_types.h"
-#include "webrtc/modules/video_processing/main/interface/video_processing_defines.h"
+#include "webrtc/modules/include/module_common_types.h"
+#include "webrtc/modules/video_processing/include/video_processing_defines.h"
#include "webrtc/typedefs.h"
#include "webrtc/video_frame.h"
@@ -72,16 +72,16 @@ class VPMContentAnalysis {
int border_;
// Content Metrics: Stores the local average of the metrics.
- float motion_magnitude_; // motion class
- float spatial_pred_err_; // spatial class
+ float motion_magnitude_; // motion class
+ float spatial_pred_err_; // spatial class
float spatial_pred_err_h_; // spatial class
float spatial_pred_err_v_; // spatial class
bool first_frame_;
bool ca_Init_;
- VideoContentMetrics* content_metrics_;
+ VideoContentMetrics* content_metrics_;
};
} // namespace webrtc
-#endif // WEBRTC_MODULES_VIDEO_PROCESSING_MAIN_SOURCE_CONTENT_ANALYSIS_H
+#endif // WEBRTC_MODULES_VIDEO_PROCESSING_CONTENT_ANALYSIS_H_
diff --git a/webrtc/modules/video_processing/main/source/content_analysis_sse2.cc b/webrtc/modules/video_processing/content_analysis_sse2.cc
index 17b64ff280..7a60a89b45 100644
--- a/webrtc/modules/video_processing/main/source/content_analysis_sse2.cc
+++ b/webrtc/modules/video_processing/content_analysis_sse2.cc
@@ -8,7 +8,7 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#include "webrtc/modules/video_processing/main/source/content_analysis.h"
+#include "webrtc/modules/video_processing/content_analysis.h"
#include <emmintrin.h>
#include <math.h>
@@ -16,22 +16,22 @@
namespace webrtc {
int32_t VPMContentAnalysis::TemporalDiffMetric_SSE2() {
- uint32_t num_pixels = 0; // counter for # of pixels
- const uint8_t* imgBufO = orig_frame_ + border_*width_ + border_;
- const uint8_t* imgBufP = prev_frame_ + border_*width_ + border_;
+ uint32_t num_pixels = 0; // counter for # of pixels
+ const uint8_t* imgBufO = orig_frame_ + border_ * width_ + border_;
+ const uint8_t* imgBufP = prev_frame_ + border_ * width_ + border_;
- const int32_t width_end = ((width_ - 2*border_) & -16) + border_;
+ const int32_t width_end = ((width_ - 2 * border_) & -16) + border_;
- __m128i sad_64 = _mm_setzero_si128();
- __m128i sum_64 = _mm_setzero_si128();
+ __m128i sad_64 = _mm_setzero_si128();
+ __m128i sum_64 = _mm_setzero_si128();
__m128i sqsum_64 = _mm_setzero_si128();
- const __m128i z = _mm_setzero_si128();
+ const __m128i z = _mm_setzero_si128();
- for (uint16_t i = 0; i < (height_ - 2*border_); i += skip_num_) {
- __m128i sqsum_32 = _mm_setzero_si128();
+ for (uint16_t i = 0; i < (height_ - 2 * border_); i += skip_num_) {
+ __m128i sqsum_32 = _mm_setzero_si128();
- const uint8_t *lineO = imgBufO;
- const uint8_t *lineP = imgBufP;
+ const uint8_t* lineO = imgBufO;
+ const uint8_t* lineP = imgBufP;
// Work on 16 pixels at a time. For HD content with a width of 1920
// this loop will run ~67 times (depending on border). Maximum for
@@ -49,14 +49,14 @@ int32_t VPMContentAnalysis::TemporalDiffMetric_SSE2() {
lineP += 16;
// Abs pixel difference between frames.
- sad_64 = _mm_add_epi64 (sad_64, _mm_sad_epu8(o, p));
+ sad_64 = _mm_add_epi64(sad_64, _mm_sad_epu8(o, p));
// sum of all pixels in frame
- sum_64 = _mm_add_epi64 (sum_64, _mm_sad_epu8(o, z));
+ sum_64 = _mm_add_epi64(sum_64, _mm_sad_epu8(o, z));
// Squared sum of all pixels in frame.
- const __m128i olo = _mm_unpacklo_epi8(o,z);
- const __m128i ohi = _mm_unpackhi_epi8(o,z);
+ const __m128i olo = _mm_unpacklo_epi8(o, z);
+ const __m128i ohi = _mm_unpackhi_epi8(o, z);
const __m128i sqsum_32_lo = _mm_madd_epi16(olo, olo);
const __m128i sqsum_32_hi = _mm_madd_epi16(ohi, ohi);
@@ -66,9 +66,9 @@ int32_t VPMContentAnalysis::TemporalDiffMetric_SSE2() {
}
// Add to 64 bit running sum as to not roll over.
- sqsum_64 = _mm_add_epi64(sqsum_64,
- _mm_add_epi64(_mm_unpackhi_epi32(sqsum_32,z),
- _mm_unpacklo_epi32(sqsum_32,z)));
+ sqsum_64 =
+ _mm_add_epi64(sqsum_64, _mm_add_epi64(_mm_unpackhi_epi32(sqsum_32, z),
+ _mm_unpacklo_epi32(sqsum_32, z)));
imgBufO += width_ * skip_num_;
imgBufP += width_ * skip_num_;
@@ -81,13 +81,13 @@ int32_t VPMContentAnalysis::TemporalDiffMetric_SSE2() {
// Bring sums out of vector registers and into integer register
// domain, summing them along the way.
- _mm_store_si128 (&sad_final_128, sad_64);
- _mm_store_si128 (&sum_final_128, sum_64);
- _mm_store_si128 (&sqsum_final_128, sqsum_64);
+ _mm_store_si128(&sad_final_128, sad_64);
+ _mm_store_si128(&sum_final_128, sum_64);
+ _mm_store_si128(&sqsum_final_128, sqsum_64);
- uint64_t *sad_final_64 = reinterpret_cast<uint64_t*>(&sad_final_128);
- uint64_t *sum_final_64 = reinterpret_cast<uint64_t*>(&sum_final_128);
- uint64_t *sqsum_final_64 = reinterpret_cast<uint64_t*>(&sqsum_final_128);
+ uint64_t* sad_final_64 = reinterpret_cast<uint64_t*>(&sad_final_128);
+ uint64_t* sum_final_64 = reinterpret_cast<uint64_t*>(&sum_final_128);
+ uint64_t* sqsum_final_64 = reinterpret_cast<uint64_t*>(&sqsum_final_128);
const uint32_t pixelSum = sum_final_64[0] + sum_final_64[1];
const uint64_t pixelSqSum = sqsum_final_64[0] + sqsum_final_64[1];
@@ -96,27 +96,31 @@ int32_t VPMContentAnalysis::TemporalDiffMetric_SSE2() {
// Default.
motion_magnitude_ = 0.0f;
- if (tempDiffSum == 0) return VPM_OK;
+ if (tempDiffSum == 0)
+ return VPM_OK;
// Normalize over all pixels.
- const float tempDiffAvg = (float)tempDiffSum / (float)(num_pixels);
- const float pixelSumAvg = (float)pixelSum / (float)(num_pixels);
- const float pixelSqSumAvg = (float)pixelSqSum / (float)(num_pixels);
+ const float tempDiffAvg =
+ static_cast<float>(tempDiffSum) / static_cast<float>(num_pixels);
+ const float pixelSumAvg =
+ static_cast<float>(pixelSum) / static_cast<float>(num_pixels);
+ const float pixelSqSumAvg =
+ static_cast<float>(pixelSqSum) / static_cast<float>(num_pixels);
float contrast = pixelSqSumAvg - (pixelSumAvg * pixelSumAvg);
if (contrast > 0.0) {
contrast = sqrt(contrast);
- motion_magnitude_ = tempDiffAvg/contrast;
+ motion_magnitude_ = tempDiffAvg / contrast;
}
return VPM_OK;
}
int32_t VPMContentAnalysis::ComputeSpatialMetrics_SSE2() {
- const uint8_t* imgBuf = orig_frame_ + border_*width_;
+ const uint8_t* imgBuf = orig_frame_ + border_ * width_;
const int32_t width_end = ((width_ - 2 * border_) & -16) + border_;
- __m128i se_32 = _mm_setzero_si128();
+ __m128i se_32 = _mm_setzero_si128();
__m128i sev_32 = _mm_setzero_si128();
__m128i seh_32 = _mm_setzero_si128();
__m128i msa_32 = _mm_setzero_si128();
@@ -127,8 +131,8 @@ int32_t VPMContentAnalysis::ComputeSpatialMetrics_SSE2() {
// value is maxed out at 65529 for every row, 65529*1080 = 70777800, which
// will not roll over a 32 bit accumulator.
// skip_num_ is also used to reduce the number of rows
- for (int32_t i = 0; i < (height_ - 2*border_); i += skip_num_) {
- __m128i se_16 = _mm_setzero_si128();
+ for (int32_t i = 0; i < (height_ - 2 * border_); i += skip_num_) {
+ __m128i se_16 = _mm_setzero_si128();
__m128i sev_16 = _mm_setzero_si128();
__m128i seh_16 = _mm_setzero_si128();
__m128i msa_16 = _mm_setzero_si128();
@@ -143,9 +147,9 @@ int32_t VPMContentAnalysis::ComputeSpatialMetrics_SSE2() {
// border_ could also be adjusted to concentrate on just the center of
// the images for an HD capture in order to reduce the possiblity of
// rollover.
- const uint8_t *lineTop = imgBuf - width_ + border_;
- const uint8_t *lineCen = imgBuf + border_;
- const uint8_t *lineBot = imgBuf + width_ + border_;
+ const uint8_t* lineTop = imgBuf - width_ + border_;
+ const uint8_t* lineCen = imgBuf + border_;
+ const uint8_t* lineBot = imgBuf + width_ + border_;
for (int32_t j = 0; j < width_end - border_; j += 16) {
const __m128i t = _mm_loadu_si128((__m128i*)(lineTop));
@@ -159,20 +163,20 @@ int32_t VPMContentAnalysis::ComputeSpatialMetrics_SSE2() {
lineBot += 16;
// center pixel unpacked
- __m128i clo = _mm_unpacklo_epi8(c,z);
- __m128i chi = _mm_unpackhi_epi8(c,z);
+ __m128i clo = _mm_unpacklo_epi8(c, z);
+ __m128i chi = _mm_unpackhi_epi8(c, z);
// left right pixels unpacked and added together
- const __m128i lrlo = _mm_add_epi16(_mm_unpacklo_epi8(l,z),
- _mm_unpacklo_epi8(r,z));
- const __m128i lrhi = _mm_add_epi16(_mm_unpackhi_epi8(l,z),
- _mm_unpackhi_epi8(r,z));
+ const __m128i lrlo =
+ _mm_add_epi16(_mm_unpacklo_epi8(l, z), _mm_unpacklo_epi8(r, z));
+ const __m128i lrhi =
+ _mm_add_epi16(_mm_unpackhi_epi8(l, z), _mm_unpackhi_epi8(r, z));
// top & bottom pixels unpacked and added together
- const __m128i tblo = _mm_add_epi16(_mm_unpacklo_epi8(t,z),
- _mm_unpacklo_epi8(b,z));
- const __m128i tbhi = _mm_add_epi16(_mm_unpackhi_epi8(t,z),
- _mm_unpackhi_epi8(b,z));
+ const __m128i tblo =
+ _mm_add_epi16(_mm_unpacklo_epi8(t, z), _mm_unpacklo_epi8(b, z));
+ const __m128i tbhi =
+ _mm_add_epi16(_mm_unpackhi_epi8(t, z), _mm_unpackhi_epi8(b, z));
// running sum of all pixels
msa_16 = _mm_add_epi16(msa_16, _mm_add_epi16(chi, clo));
@@ -190,29 +194,32 @@ int32_t VPMContentAnalysis::ComputeSpatialMetrics_SSE2() {
const __m128i sethi = _mm_subs_epi16(chi, _mm_add_epi16(lrhi, tbhi));
// Add to 16 bit running sum
- se_16 = _mm_add_epi16(se_16, _mm_max_epi16(setlo,
- _mm_subs_epi16(z, setlo)));
- se_16 = _mm_add_epi16(se_16, _mm_max_epi16(sethi,
- _mm_subs_epi16(z, sethi)));
- sev_16 = _mm_add_epi16(sev_16, _mm_max_epi16(sevtlo,
- _mm_subs_epi16(z, sevtlo)));
- sev_16 = _mm_add_epi16(sev_16, _mm_max_epi16(sevthi,
- _mm_subs_epi16(z, sevthi)));
- seh_16 = _mm_add_epi16(seh_16, _mm_max_epi16(sehtlo,
- _mm_subs_epi16(z, sehtlo)));
- seh_16 = _mm_add_epi16(seh_16, _mm_max_epi16(sehthi,
- _mm_subs_epi16(z, sehthi)));
+ se_16 =
+ _mm_add_epi16(se_16, _mm_max_epi16(setlo, _mm_subs_epi16(z, setlo)));
+ se_16 =
+ _mm_add_epi16(se_16, _mm_max_epi16(sethi, _mm_subs_epi16(z, sethi)));
+ sev_16 = _mm_add_epi16(sev_16,
+ _mm_max_epi16(sevtlo, _mm_subs_epi16(z, sevtlo)));
+ sev_16 = _mm_add_epi16(sev_16,
+ _mm_max_epi16(sevthi, _mm_subs_epi16(z, sevthi)));
+ seh_16 = _mm_add_epi16(seh_16,
+ _mm_max_epi16(sehtlo, _mm_subs_epi16(z, sehtlo)));
+ seh_16 = _mm_add_epi16(seh_16,
+ _mm_max_epi16(sehthi, _mm_subs_epi16(z, sehthi)));
}
// Add to 32 bit running sum as to not roll over.
- se_32 = _mm_add_epi32(se_32, _mm_add_epi32(_mm_unpackhi_epi16(se_16,z),
- _mm_unpacklo_epi16(se_16,z)));
- sev_32 = _mm_add_epi32(sev_32, _mm_add_epi32(_mm_unpackhi_epi16(sev_16,z),
- _mm_unpacklo_epi16(sev_16,z)));
- seh_32 = _mm_add_epi32(seh_32, _mm_add_epi32(_mm_unpackhi_epi16(seh_16,z),
- _mm_unpacklo_epi16(seh_16,z)));
- msa_32 = _mm_add_epi32(msa_32, _mm_add_epi32(_mm_unpackhi_epi16(msa_16,z),
- _mm_unpacklo_epi16(msa_16,z)));
+ se_32 = _mm_add_epi32(se_32, _mm_add_epi32(_mm_unpackhi_epi16(se_16, z),
+ _mm_unpacklo_epi16(se_16, z)));
+ sev_32 =
+ _mm_add_epi32(sev_32, _mm_add_epi32(_mm_unpackhi_epi16(sev_16, z),
+ _mm_unpacklo_epi16(sev_16, z)));
+ seh_32 =
+ _mm_add_epi32(seh_32, _mm_add_epi32(_mm_unpackhi_epi16(seh_16, z),
+ _mm_unpacklo_epi16(seh_16, z)));
+ msa_32 =
+ _mm_add_epi32(msa_32, _mm_add_epi32(_mm_unpackhi_epi16(msa_16, z),
+ _mm_unpacklo_epi16(msa_16, z)));
imgBuf += width_ * skip_num_;
}
@@ -224,30 +231,30 @@ int32_t VPMContentAnalysis::ComputeSpatialMetrics_SSE2() {
// Bring sums out of vector registers and into integer register
// domain, summing them along the way.
- _mm_store_si128 (&se_128, _mm_add_epi64(_mm_unpackhi_epi32(se_32,z),
- _mm_unpacklo_epi32(se_32,z)));
- _mm_store_si128 (&sev_128, _mm_add_epi64(_mm_unpackhi_epi32(sev_32,z),
- _mm_unpacklo_epi32(sev_32,z)));
- _mm_store_si128 (&seh_128, _mm_add_epi64(_mm_unpackhi_epi32(seh_32,z),
- _mm_unpacklo_epi32(seh_32,z)));
- _mm_store_si128 (&msa_128, _mm_add_epi64(_mm_unpackhi_epi32(msa_32,z),
- _mm_unpacklo_epi32(msa_32,z)));
-
- uint64_t *se_64 = reinterpret_cast<uint64_t*>(&se_128);
- uint64_t *sev_64 = reinterpret_cast<uint64_t*>(&sev_128);
- uint64_t *seh_64 = reinterpret_cast<uint64_t*>(&seh_128);
- uint64_t *msa_64 = reinterpret_cast<uint64_t*>(&msa_128);
-
- const uint32_t spatialErrSum = se_64[0] + se_64[1];
+ _mm_store_si128(&se_128, _mm_add_epi64(_mm_unpackhi_epi32(se_32, z),
+ _mm_unpacklo_epi32(se_32, z)));
+ _mm_store_si128(&sev_128, _mm_add_epi64(_mm_unpackhi_epi32(sev_32, z),
+ _mm_unpacklo_epi32(sev_32, z)));
+ _mm_store_si128(&seh_128, _mm_add_epi64(_mm_unpackhi_epi32(seh_32, z),
+ _mm_unpacklo_epi32(seh_32, z)));
+ _mm_store_si128(&msa_128, _mm_add_epi64(_mm_unpackhi_epi32(msa_32, z),
+ _mm_unpacklo_epi32(msa_32, z)));
+
+ uint64_t* se_64 = reinterpret_cast<uint64_t*>(&se_128);
+ uint64_t* sev_64 = reinterpret_cast<uint64_t*>(&sev_128);
+ uint64_t* seh_64 = reinterpret_cast<uint64_t*>(&seh_128);
+ uint64_t* msa_64 = reinterpret_cast<uint64_t*>(&msa_128);
+
+ const uint32_t spatialErrSum = se_64[0] + se_64[1];
const uint32_t spatialErrVSum = sev_64[0] + sev_64[1];
const uint32_t spatialErrHSum = seh_64[0] + seh_64[1];
const uint32_t pixelMSA = msa_64[0] + msa_64[1];
// Normalize over all pixels.
- const float spatialErr = (float)(spatialErrSum >> 2);
- const float spatialErrH = (float)(spatialErrHSum >> 1);
- const float spatialErrV = (float)(spatialErrVSum >> 1);
- const float norm = (float)pixelMSA;
+ const float spatialErr = static_cast<float>(spatialErrSum >> 2);
+ const float spatialErrH = static_cast<float>(spatialErrHSum >> 1);
+ const float spatialErrV = static_cast<float>(spatialErrVSum >> 1);
+ const float norm = static_cast<float>(pixelMSA);
// 2X2:
spatial_pred_err_ = spatialErr / norm;
@@ -258,7 +265,7 @@ int32_t VPMContentAnalysis::ComputeSpatialMetrics_SSE2() {
// 2X1:
spatial_pred_err_v_ = spatialErrV / norm;
- return VPM_OK;
+ return VPM_OK;
}
} // namespace webrtc
diff --git a/webrtc/modules/video_processing/main/source/deflickering.cc b/webrtc/modules/video_processing/deflickering.cc
index 19bc641ac9..0e936ce9b7 100644
--- a/webrtc/modules/video_processing/main/source/deflickering.cc
+++ b/webrtc/modules/video_processing/deflickering.cc
@@ -8,13 +8,13 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#include "webrtc/modules/video_processing/main/source/deflickering.h"
+#include "webrtc/modules/video_processing/deflickering.h"
#include <math.h>
#include <stdlib.h>
+#include "webrtc/base/logging.h"
#include "webrtc/common_audio/signal_processing/include/signal_processing_library.h"
-#include "webrtc/system_wrappers/include/logging.h"
#include "webrtc/system_wrappers/include/sort.h"
namespace webrtc {
@@ -40,16 +40,17 @@ enum { kLog2OfDownsamplingFactor = 3 };
// >> fprintf('%d, ', probUW16)
// Resolution reduced to avoid overflow when multiplying with the
// (potentially) large number of pixels.
-const uint16_t VPMDeflickering::prob_uw16_[kNumProbs] = {102, 205, 410, 614,
- 819, 1024, 1229, 1434, 1638, 1843, 1946, 1987}; // <Q11>
+const uint16_t VPMDeflickering::prob_uw16_[kNumProbs] = {
+ 102, 205, 410, 614, 819, 1024,
+ 1229, 1434, 1638, 1843, 1946, 1987}; // <Q11>
// To generate in Matlab:
// >> numQuants = 14; maxOnlyLength = 5;
// >> weightUW16 = round(2^15 *
// [linspace(0.5, 1.0, numQuants - maxOnlyLength)]);
// >> fprintf('%d, %d,\n ', weightUW16);
-const uint16_t VPMDeflickering::weight_uw16_[kNumQuants - kMaxOnlyLength] =
- {16384, 18432, 20480, 22528, 24576, 26624, 28672, 30720, 32768}; // <Q15>
+const uint16_t VPMDeflickering::weight_uw16_[kNumQuants - kMaxOnlyLength] = {
+ 16384, 18432, 20480, 22528, 24576, 26624, 28672, 30720, 32768}; // <Q15>
VPMDeflickering::VPMDeflickering() {
Reset();
@@ -70,8 +71,8 @@ void VPMDeflickering::Reset() {
quant_hist_uw8_[0][kNumQuants - 1] = 255;
for (int32_t i = 0; i < kNumProbs; i++) {
// Unsigned round. <Q0>
- quant_hist_uw8_[0][i + 1] = static_cast<uint8_t>(
- (prob_uw16_[i] * 255 + (1 << 10)) >> 11);
+ quant_hist_uw8_[0][i + 1] =
+ static_cast<uint8_t>((prob_uw16_[i] * 255 + (1 << 10)) >> 11);
}
for (int32_t i = 1; i < kFrameHistory_size; i++) {
@@ -80,9 +81,8 @@ void VPMDeflickering::Reset() {
}
}
-int32_t VPMDeflickering::ProcessFrame(
- VideoFrame* frame,
- VideoProcessingModule::FrameStats* stats) {
+int32_t VPMDeflickering::ProcessFrame(VideoFrame* frame,
+ VideoProcessing::FrameStats* stats) {
assert(frame);
uint32_t frame_memory;
uint8_t quant_uw8[kNumQuants];
@@ -107,11 +107,12 @@ int32_t VPMDeflickering::ProcessFrame(
return VPM_GENERAL_ERROR;
}
- if (!VideoProcessingModule::ValidFrameStats(*stats)) {
+ if (!VideoProcessing::ValidFrameStats(*stats)) {
return VPM_GENERAL_ERROR;
}
- if (PreDetection(frame->timestamp(), *stats) == -1) return VPM_GENERAL_ERROR;
+ if (PreDetection(frame->timestamp(), *stats) == -1)
+ return VPM_GENERAL_ERROR;
// Flicker detection
int32_t det_flicker = DetectFlicker();
@@ -124,13 +125,13 @@ int32_t VPMDeflickering::ProcessFrame(
// Size of luminance component.
const uint32_t y_size = height * width;
- const uint32_t y_sub_size = width * (((height - 1) >>
- kLog2OfDownsamplingFactor) + 1);
+ const uint32_t y_sub_size =
+ width * (((height - 1) >> kLog2OfDownsamplingFactor) + 1);
uint8_t* y_sorted = new uint8_t[y_sub_size];
uint32_t sort_row_idx = 0;
for (int i = 0; i < height; i += kDownsamplingFactor) {
- memcpy(y_sorted + sort_row_idx * width,
- frame->buffer(kYPlane) + i * width, width);
+ memcpy(y_sorted + sort_row_idx * width, frame->buffer(kYPlane) + i * width,
+ width);
sort_row_idx++;
}
@@ -153,12 +154,12 @@ int32_t VPMDeflickering::ProcessFrame(
quant_uw8[i + 1] = y_sorted[prob_idx_uw32];
}
- delete [] y_sorted;
+ delete[] y_sorted;
y_sorted = NULL;
// Shift history for new frame.
memmove(quant_hist_uw8_[1], quant_hist_uw8_[0],
- (kFrameHistory_size - 1) * kNumQuants * sizeof(uint8_t));
+ (kFrameHistory_size - 1) * kNumQuants * sizeof(uint8_t));
// Store current frame in history.
memcpy(quant_hist_uw8_[0], quant_uw8, kNumQuants * sizeof(uint8_t));
@@ -190,9 +191,10 @@ int32_t VPMDeflickering::ProcessFrame(
// target = w * maxquant_uw8 + (1 - w) * minquant_uw8
// Weights w = |weight_uw16_| are in Q15, hence the final output has to be
// right shifted by 8 to end up in Q7.
- target_quant_uw16[i] = static_cast<uint16_t>((
- weight_uw16_[i] * maxquant_uw8[i] +
- ((1 << 15) - weight_uw16_[i]) * minquant_uw8[i]) >> 8); // <Q7>
+ target_quant_uw16[i] = static_cast<uint16_t>(
+ (weight_uw16_[i] * maxquant_uw8[i] +
+ ((1 << 15) - weight_uw16_[i]) * minquant_uw8[i]) >>
+ 8); // <Q7>
}
for (int32_t i = kNumQuants - kMaxOnlyLength; i < kNumQuants; i++) {
@@ -203,13 +205,14 @@ int32_t VPMDeflickering::ProcessFrame(
uint16_t mapUW16; // <Q7>
for (int32_t i = 1; i < kNumQuants; i++) {
// As quant and targetQuant are limited to UWord8, it's safe to use Q7 here.
- tmp_uw32 = static_cast<uint32_t>(target_quant_uw16[i] -
- target_quant_uw16[i - 1]);
+ tmp_uw32 =
+ static_cast<uint32_t>(target_quant_uw16[i] - target_quant_uw16[i - 1]);
tmp_uw16 = static_cast<uint16_t>(quant_uw8[i] - quant_uw8[i - 1]); // <Q0>
if (tmp_uw16 > 0) {
- increment_uw16 = static_cast<uint16_t>(WebRtcSpl_DivU32U16(tmp_uw32,
- tmp_uw16)); // <Q7>
+ increment_uw16 =
+ static_cast<uint16_t>(WebRtcSpl_DivU32U16(tmp_uw32,
+ tmp_uw16)); // <Q7>
} else {
// The value is irrelevant; the loop below will only iterate once.
increment_uw16 = 0;
@@ -230,7 +233,7 @@ int32_t VPMDeflickering::ProcessFrame(
}
// Frame was altered, so reset stats.
- VideoProcessingModule::ClearFrameStats(stats);
+ VideoProcessing::ClearFrameStats(stats);
return VPM_OK;
}
@@ -247,8 +250,9 @@ int32_t VPMDeflickering::ProcessFrame(
zero.\n
-1: Error
*/
-int32_t VPMDeflickering::PreDetection(const uint32_t timestamp,
- const VideoProcessingModule::FrameStats& stats) {
+int32_t VPMDeflickering::PreDetection(
+ const uint32_t timestamp,
+ const VideoProcessing::FrameStats& stats) {
int32_t mean_val; // Mean value of frame (Q4)
uint32_t frame_rate = 0;
int32_t meanBufferLength; // Temp variable.
@@ -257,16 +261,16 @@ int32_t VPMDeflickering::PreDetection(const uint32_t timestamp,
// Update mean value buffer.
// This should be done even though we might end up in an unreliable detection.
memmove(mean_buffer_ + 1, mean_buffer_,
- (kMeanBufferLength - 1) * sizeof(int32_t));
+ (kMeanBufferLength - 1) * sizeof(int32_t));
mean_buffer_[0] = mean_val;
// Update timestamp buffer.
// This should be done even though we might end up in an unreliable detection.
- memmove(timestamp_buffer_ + 1, timestamp_buffer_, (kMeanBufferLength - 1) *
- sizeof(uint32_t));
+ memmove(timestamp_buffer_ + 1, timestamp_buffer_,
+ (kMeanBufferLength - 1) * sizeof(uint32_t));
timestamp_buffer_[0] = timestamp;
-/* Compute current frame rate (Q4) */
+ /* Compute current frame rate (Q4) */
if (timestamp_buffer_[kMeanBufferLength - 1] != 0) {
frame_rate = ((90000 << 4) * (kMeanBufferLength - 1));
frame_rate /=
@@ -315,22 +319,22 @@ int32_t VPMDeflickering::PreDetection(const uint32_t timestamp,
-1: Error
*/
int32_t VPMDeflickering::DetectFlicker() {
- uint32_t i;
- int32_t freqEst; // (Q4) Frequency estimate to base detection upon
- int32_t ret_val = -1;
+ uint32_t i;
+ int32_t freqEst; // (Q4) Frequency estimate to base detection upon
+ int32_t ret_val = -1;
/* Sanity check for mean_buffer_length_ */
if (mean_buffer_length_ < 2) {
/* Not possible to estimate frequency */
- return(2);
+ return 2;
}
// Count zero crossings with a dead zone to be robust against noise. If the
// noise std is 2 pixel this corresponds to about 95% confidence interval.
int32_t deadzone = (kZeroCrossingDeadzone << kmean_valueScaling); // Q4
int32_t meanOfBuffer = 0; // Mean value of mean value buffer.
- int32_t numZeros = 0; // Number of zeros that cross the dead-zone.
- int32_t cntState = 0; // State variable for zero crossing regions.
- int32_t cntStateOld = 0; // Previous state for zero crossing regions.
+ int32_t numZeros = 0; // Number of zeros that cross the dead-zone.
+ int32_t cntState = 0; // State variable for zero crossing regions.
+ int32_t cntStateOld = 0; // Previous state for zero crossing regions.
for (i = 0; i < mean_buffer_length_; i++) {
meanOfBuffer += mean_buffer_[i];
@@ -371,7 +375,7 @@ int32_t VPMDeflickering::DetectFlicker() {
int32_t freqAlias = freqEst;
if (freqEst > kMinFrequencyToDetect) {
uint8_t aliasState = 1;
- while(freqState == 0) {
+ while (freqState == 0) {
/* Increase frequency */
freqAlias += (aliasState * frame_rate_);
freqAlias += ((freqEst << 1) * (1 - (aliasState << 1)));
diff --git a/webrtc/modules/video_processing/main/source/deflickering.h b/webrtc/modules/video_processing/deflickering.h
index 36e6845d71..3ff2723aba 100644
--- a/webrtc/modules/video_processing/main/source/deflickering.h
+++ b/webrtc/modules/video_processing/deflickering.h
@@ -8,12 +8,12 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#ifndef WEBRTC_MODULES_VIDEO_PROCESSING_MAIN_SOURCEdeflickering__H
-#define WEBRTC_MODULES_VIDEO_PROCESSING_MAIN_SOURCEdeflickering__H
+#ifndef WEBRTC_MODULES_VIDEO_PROCESSING_DEFLICKERING_H_
+#define WEBRTC_MODULES_VIDEO_PROCESSING_DEFLICKERING_H_
#include <string.h> // NULL
-#include "webrtc/modules/video_processing/main/interface/video_processing.h"
+#include "webrtc/modules/video_processing/include/video_processing.h"
#include "webrtc/typedefs.h"
namespace webrtc {
@@ -24,12 +24,11 @@ class VPMDeflickering {
~VPMDeflickering();
void Reset();
- int32_t ProcessFrame(VideoFrame* frame,
- VideoProcessingModule::FrameStats* stats);
+ int32_t ProcessFrame(VideoFrame* frame, VideoProcessing::FrameStats* stats);
private:
int32_t PreDetection(uint32_t timestamp,
- const VideoProcessingModule::FrameStats& stats);
+ const VideoProcessing::FrameStats& stats);
int32_t DetectFlicker();
@@ -39,13 +38,13 @@ class VPMDeflickering {
enum { kNumQuants = kNumProbs + 2 };
enum { kMaxOnlyLength = 5 };
- uint32_t mean_buffer_length_;
- uint8_t detection_state_; // 0: No flickering
- // 1: Flickering detected
- // 2: In flickering
- int32_t mean_buffer_[kMeanBufferLength];
- uint32_t timestamp_buffer_[kMeanBufferLength];
- uint32_t frame_rate_;
+ uint32_t mean_buffer_length_;
+ uint8_t detection_state_; // 0: No flickering
+ // 1: Flickering detected
+ // 2: In flickering
+ int32_t mean_buffer_[kMeanBufferLength];
+ uint32_t timestamp_buffer_[kMeanBufferLength];
+ uint32_t frame_rate_;
static const uint16_t prob_uw16_[kNumProbs];
static const uint16_t weight_uw16_[kNumQuants - kMaxOnlyLength];
uint8_t quant_hist_uw8_[kFrameHistory_size][kNumQuants];
@@ -53,4 +52,4 @@ class VPMDeflickering {
} // namespace webrtc
-#endif // WEBRTC_MODULES_VIDEO_PROCESSING_MAIN_SOURCEdeflickering__H
+#endif // WEBRTC_MODULES_VIDEO_PROCESSING_DEFLICKERING_H_
diff --git a/webrtc/modules/video_processing/main/source/frame_preprocessor.cc b/webrtc/modules/video_processing/frame_preprocessor.cc
index a9d77c2e0c..6778a597be 100644
--- a/webrtc/modules/video_processing/main/source/frame_preprocessor.cc
+++ b/webrtc/modules/video_processing/frame_preprocessor.cc
@@ -8,12 +8,14 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#include "webrtc/modules/video_processing/main/source/frame_preprocessor.h"
+#include "webrtc/modules/video_processing/frame_preprocessor.h"
+
+#include "webrtc/modules/video_processing/video_denoiser.h"
namespace webrtc {
VPMFramePreprocessor::VPMFramePreprocessor()
- : content_metrics_(NULL),
+ : content_metrics_(nullptr),
resampled_frame_(),
enable_ca_(false),
frame_cnt_(0) {
@@ -24,21 +26,21 @@ VPMFramePreprocessor::VPMFramePreprocessor()
VPMFramePreprocessor::~VPMFramePreprocessor() {
Reset();
- delete spatial_resampler_;
delete ca_;
delete vd_;
+ delete spatial_resampler_;
}
-void VPMFramePreprocessor::Reset() {
+void VPMFramePreprocessor::Reset() {
ca_->Release();
vd_->Reset();
- content_metrics_ = NULL;
+ content_metrics_ = nullptr;
spatial_resampler_->Reset();
enable_ca_ = false;
frame_cnt_ = 0;
}
-void VPMFramePreprocessor::EnableTemporalDecimation(bool enable) {
+void VPMFramePreprocessor::EnableTemporalDecimation(bool enable) {
vd_->EnableTemporalDecimation(enable);
}
@@ -46,20 +48,22 @@ void VPMFramePreprocessor::EnableContentAnalysis(bool enable) {
enable_ca_ = enable;
}
-void VPMFramePreprocessor::SetInputFrameResampleMode(
+void VPMFramePreprocessor::SetInputFrameResampleMode(
VideoFrameResampling resampling_mode) {
spatial_resampler_->SetInputFrameResampleMode(resampling_mode);
}
-int32_t VPMFramePreprocessor::SetTargetResolution(
- uint32_t width, uint32_t height, uint32_t frame_rate) {
- if ( (width == 0) || (height == 0) || (frame_rate == 0)) {
+int32_t VPMFramePreprocessor::SetTargetResolution(uint32_t width,
+ uint32_t height,
+ uint32_t frame_rate) {
+ if ((width == 0) || (height == 0) || (frame_rate == 0)) {
return VPM_PARAMETER_ERROR;
}
int32_t ret_val = 0;
ret_val = spatial_resampler_->SetTargetFrameSize(width, height);
- if (ret_val < 0) return ret_val;
+ if (ret_val < 0)
+ return ret_val;
vd_->SetTargetFramerate(frame_rate);
return VPM_OK;
@@ -78,59 +82,60 @@ void VPMFramePreprocessor::UpdateIncomingframe_rate() {
vd_->UpdateIncomingframe_rate();
}
-uint32_t VPMFramePreprocessor::Decimatedframe_rate() {
- return vd_->Decimatedframe_rate();
+uint32_t VPMFramePreprocessor::GetDecimatedFrameRate() {
+ return vd_->GetDecimatedFrameRate();
}
-
-uint32_t VPMFramePreprocessor::DecimatedWidth() const {
+uint32_t VPMFramePreprocessor::GetDecimatedWidth() const {
return spatial_resampler_->TargetWidth();
}
-
-uint32_t VPMFramePreprocessor::DecimatedHeight() const {
+uint32_t VPMFramePreprocessor::GetDecimatedHeight() const {
return spatial_resampler_->TargetHeight();
}
-int32_t VPMFramePreprocessor::PreprocessFrame(const VideoFrame& frame,
- VideoFrame** processed_frame) {
+void VPMFramePreprocessor::EnableDenosing(bool enable) {
+ denoiser_.reset(new VideoDenoiser(true));
+}
+
+const VideoFrame* VPMFramePreprocessor::PreprocessFrame(
+ const VideoFrame& frame) {
if (frame.IsZeroSize()) {
- return VPM_PARAMETER_ERROR;
+ return nullptr;
}
vd_->UpdateIncomingframe_rate();
-
if (vd_->DropFrame()) {
- return 1; // drop 1 frame
+ return nullptr;
}
- // Resizing incoming frame if needed. Otherwise, remains NULL.
- // We are not allowed to resample the input frame (must make a copy of it).
- *processed_frame = NULL;
- if (spatial_resampler_->ApplyResample(frame.width(), frame.height())) {
- int32_t ret = spatial_resampler_->ResampleFrame(frame, &resampled_frame_);
- if (ret != VPM_OK) return ret;
- *processed_frame = &resampled_frame_;
+ const VideoFrame* current_frame = &frame;
+ if (denoiser_) {
+ denoiser_->DenoiseFrame(*current_frame, &denoised_frame_);
+ current_frame = &denoised_frame_;
+ }
+
+ if (spatial_resampler_->ApplyResample(current_frame->width(),
+ current_frame->height())) {
+ if (spatial_resampler_->ResampleFrame(*current_frame, &resampled_frame_) !=
+ VPM_OK) {
+ return nullptr;
+ }
+ current_frame = &resampled_frame_;
}
// Perform content analysis on the frame to be encoded.
- if (enable_ca_) {
+ if (enable_ca_ && frame_cnt_ % kSkipFrameCA == 0) {
// Compute new metrics every |kSkipFramesCA| frames, starting with
// the first frame.
- if (frame_cnt_ % kSkipFrameCA == 0) {
- if (*processed_frame == NULL) {
- content_metrics_ = ca_->ComputeContentMetrics(frame);
- } else {
- content_metrics_ = ca_->ComputeContentMetrics(resampled_frame_);
- }
- }
- ++frame_cnt_;
+ content_metrics_ = ca_->ComputeContentMetrics(*current_frame);
}
- return VPM_OK;
+ ++frame_cnt_;
+ return current_frame;
}
-VideoContentMetrics* VPMFramePreprocessor::ContentMetrics() const {
+VideoContentMetrics* VPMFramePreprocessor::GetContentMetrics() const {
return content_metrics_;
}
-} // namespace
+} // namespace webrtc
diff --git a/webrtc/modules/video_processing/main/source/frame_preprocessor.h b/webrtc/modules/video_processing/frame_preprocessor.h
index 895e457cc6..5bdc576f37 100644
--- a/webrtc/modules/video_processing/main/source/frame_preprocessor.h
+++ b/webrtc/modules/video_processing/frame_preprocessor.h
@@ -8,20 +8,23 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-/*
- * frame_preprocessor.h
- */
-#ifndef WEBRTC_MODULES_VIDEO_PROCESSING_MAIN_SOURCE_FRAME_PREPROCESSOR_H
-#define WEBRTC_MODULES_VIDEO_PROCESSING_MAIN_SOURCE_FRAME_PREPROCESSOR_H
-
-#include "webrtc/modules/video_processing/main/interface/video_processing.h"
-#include "webrtc/modules/video_processing/main/source/content_analysis.h"
-#include "webrtc/modules/video_processing/main/source/spatial_resampler.h"
-#include "webrtc/modules/video_processing/main/source/video_decimator.h"
+#ifndef WEBRTC_MODULES_VIDEO_PROCESSING_FRAME_PREPROCESSOR_H_
+#define WEBRTC_MODULES_VIDEO_PROCESSING_FRAME_PREPROCESSOR_H_
+
+#include "webrtc/base/scoped_ptr.h"
+#include "webrtc/modules/video_processing/include/video_processing.h"
+#include "webrtc/modules/video_processing/content_analysis.h"
+#include "webrtc/modules/video_processing/spatial_resampler.h"
+#include "webrtc/modules/video_processing/video_decimator.h"
#include "webrtc/typedefs.h"
+#include "webrtc/video_frame.h"
namespace webrtc {
+class VideoDenoiser;
+
+// All pointers/members in this class are assumed to be protected by the class
+// owner.
class VPMFramePreprocessor {
public:
VPMFramePreprocessor();
@@ -38,7 +41,8 @@ class VPMFramePreprocessor {
void EnableContentAnalysis(bool enable);
// Set target resolution: frame rate and dimension.
- int32_t SetTargetResolution(uint32_t width, uint32_t height,
+ int32_t SetTargetResolution(uint32_t width,
+ uint32_t height,
uint32_t frame_rate);
// Set target frame rate.
@@ -50,14 +54,14 @@ class VPMFramePreprocessor {
int32_t updateIncomingFrameSize(uint32_t width, uint32_t height);
// Set decimated values: frame rate/dimension.
- uint32_t Decimatedframe_rate();
- uint32_t DecimatedWidth() const;
- uint32_t DecimatedHeight() const;
+ uint32_t GetDecimatedFrameRate();
+ uint32_t GetDecimatedWidth() const;
+ uint32_t GetDecimatedHeight() const;
// Preprocess output:
- int32_t PreprocessFrame(const VideoFrame& frame,
- VideoFrame** processed_frame);
- VideoContentMetrics* ContentMetrics() const;
+ void EnableDenosing(bool enable);
+ const VideoFrame* PreprocessFrame(const VideoFrame& frame);
+ VideoContentMetrics* GetContentMetrics() const;
private:
// The content does not change so much every frame, so to reduce complexity
@@ -65,15 +69,16 @@ class VPMFramePreprocessor {
enum { kSkipFrameCA = 2 };
VideoContentMetrics* content_metrics_;
+ VideoFrame denoised_frame_;
VideoFrame resampled_frame_;
VPMSpatialResampler* spatial_resampler_;
VPMContentAnalysis* ca_;
VPMVideoDecimator* vd_;
+ rtc::scoped_ptr<VideoDenoiser> denoiser_;
bool enable_ca_;
- int frame_cnt_;
-
+ uint32_t frame_cnt_;
};
} // namespace webrtc
-#endif // WEBRTC_MODULES_VIDEO_PROCESSING_MAIN_SOURCE_FRAME_PREPROCESSOR_H
+#endif // WEBRTC_MODULES_VIDEO_PROCESSING_FRAME_PREPROCESSOR_H_
diff --git a/webrtc/modules/video_processing/include/video_processing.h b/webrtc/modules/video_processing/include/video_processing.h
new file mode 100644
index 0000000000..a8d6358887
--- /dev/null
+++ b/webrtc/modules/video_processing/include/video_processing.h
@@ -0,0 +1,102 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef WEBRTC_MODULES_VIDEO_PROCESSING_INCLUDE_VIDEO_PROCESSING_H_
+#define WEBRTC_MODULES_VIDEO_PROCESSING_INCLUDE_VIDEO_PROCESSING_H_
+
+#include "webrtc/modules/include/module_common_types.h"
+#include "webrtc/modules/video_processing/include/video_processing_defines.h"
+#include "webrtc/video_frame.h"
+
+// The module is largely intended to process video streams, except functionality
+// provided by static functions which operate independent of previous frames. It
+// is recommended, but not required that a unique instance be used for each
+// concurrently processed stream. Similarly, it is recommended to call Reset()
+// before switching to a new stream, but this is not absolutely required.
+//
+// The module provides basic thread safety by permitting only a single function
+// to execute concurrently.
+
+namespace webrtc {
+
+class VideoProcessing {
+ public:
+ struct FrameStats {
+ uint32_t hist[256]; // Frame histogram.
+ uint32_t mean;
+ uint32_t sum;
+ uint32_t num_pixels;
+ uint32_t sub_sampling_factor; // Sub-sampling factor, in powers of 2.
+ };
+
+ enum BrightnessWarning { kNoWarning, kDarkWarning, kBrightWarning };
+
+ static VideoProcessing* Create();
+ virtual ~VideoProcessing() {}
+
+ // Retrieves statistics for the input frame. This function must be used to
+ // prepare a FrameStats struct for use in certain VPM functions.
+ static void GetFrameStats(const VideoFrame& frame, FrameStats* stats);
+
+ // Checks the validity of a FrameStats struct. Currently, valid implies only
+ // that is had changed from its initialized state.
+ static bool ValidFrameStats(const FrameStats& stats);
+
+ static void ClearFrameStats(FrameStats* stats);
+
+ // Increases/decreases the luminance value. 'delta' can be in the range {}
+ static void Brighten(int delta, VideoFrame* frame);
+
+ // Detects and removes camera flicker from a video stream. Every frame from
+ // the stream must be passed in. A frame will only be altered if flicker has
+ // been detected. Has a fixed-point implementation.
+ // Frame statistics provided by GetFrameStats(). On return the stats will
+ // be reset to zero if the frame was altered. Call GetFrameStats() again
+ // if the statistics for the altered frame are required.
+ virtual int32_t Deflickering(VideoFrame* frame, FrameStats* stats) = 0;
+
+ // Detects if a video frame is excessively bright or dark. Returns a
+ // warning if this is the case. Multiple frames should be passed in before
+ // expecting a warning. Has a floating-point implementation.
+ virtual int32_t BrightnessDetection(const VideoFrame& frame,
+ const FrameStats& stats) = 0;
+
+ // The following functions refer to the pre-processor unit within VPM. The
+ // pre-processor perfoms spatial/temporal decimation and content analysis on
+ // the frames prior to encoding.
+
+ // Enable/disable temporal decimation
+ virtual void EnableTemporalDecimation(bool enable) = 0;
+
+ virtual int32_t SetTargetResolution(uint32_t width,
+ uint32_t height,
+ uint32_t frame_rate) = 0;
+
+ virtual void SetTargetFramerate(int frame_rate) = 0;
+
+ virtual uint32_t GetDecimatedFrameRate() = 0;
+ virtual uint32_t GetDecimatedWidth() const = 0;
+ virtual uint32_t GetDecimatedHeight() const = 0;
+
+ // Set the spatial resampling settings of the VPM according to
+ // VideoFrameResampling.
+ virtual void SetInputFrameResampleMode(
+ VideoFrameResampling resampling_mode) = 0;
+
+ virtual void EnableDenosing(bool enable) = 0;
+ virtual const VideoFrame* PreprocessFrame(const VideoFrame& frame) = 0;
+
+ virtual VideoContentMetrics* GetContentMetrics() const = 0;
+ virtual void EnableContentAnalysis(bool enable) = 0;
+};
+
+} // namespace webrtc
+
+#endif // WEBRTC_MODULES_VIDEO_PROCESSING_INCLUDE_VIDEO_PROCESSING_H_
diff --git a/webrtc/modules/video_processing/main/interface/video_processing_defines.h b/webrtc/modules/video_processing/include/video_processing_defines.h
index 93a0658966..9cc71bde27 100644
--- a/webrtc/modules/video_processing/main/interface/video_processing_defines.h
+++ b/webrtc/modules/video_processing/include/video_processing_defines.h
@@ -13,29 +13,29 @@
* This header file includes the definitions used in the video processor module
*/
-#ifndef WEBRTC_MODULES_INTERFACE_VIDEO_PROCESSING_DEFINES_H
-#define WEBRTC_MODULES_INTERFACE_VIDEO_PROCESSING_DEFINES_H
+#ifndef WEBRTC_MODULES_VIDEO_PROCESSING_INCLUDE_VIDEO_PROCESSING_DEFINES_H_
+#define WEBRTC_MODULES_VIDEO_PROCESSING_INCLUDE_VIDEO_PROCESSING_DEFINES_H_
#include "webrtc/typedefs.h"
namespace webrtc {
// Error codes
-#define VPM_OK 0
-#define VPM_GENERAL_ERROR -1
-#define VPM_MEMORY -2
-#define VPM_PARAMETER_ERROR -3
-#define VPM_SCALE_ERROR -4
-#define VPM_UNINITIALIZED -5
-#define VPM_UNIMPLEMENTED -6
+#define VPM_OK 0
+#define VPM_GENERAL_ERROR -1
+#define VPM_MEMORY -2
+#define VPM_PARAMETER_ERROR -3
+#define VPM_SCALE_ERROR -4
+#define VPM_UNINITIALIZED -5
+#define VPM_UNIMPLEMENTED -6
enum VideoFrameResampling {
- kNoRescaling, // Disables rescaling.
- kFastRescaling, // Point filter.
- kBiLinear, // Bi-linear interpolation.
- kBox, // Box inteprolation.
+ kNoRescaling, // Disables rescaling.
+ kFastRescaling, // Point filter.
+ kBiLinear, // Bi-linear interpolation.
+ kBox, // Box inteprolation.
};
} // namespace webrtc
-#endif // WEBRTC_MODULES_INTERFACE_VIDEO_PROCESSING_DEFINES_H
+#endif // WEBRTC_MODULES_VIDEO_PROCESSING_INCLUDE_VIDEO_PROCESSING_DEFINES_H_
diff --git a/webrtc/modules/video_processing/main/interface/video_processing.h b/webrtc/modules/video_processing/main/interface/video_processing.h
deleted file mode 100644
index 30af99fb8e..0000000000
--- a/webrtc/modules/video_processing/main/interface/video_processing.h
+++ /dev/null
@@ -1,270 +0,0 @@
-/*
- * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
- *
- * Use of this source code is governed by a BSD-style license
- * that can be found in the LICENSE file in the root of the source
- * tree. An additional intellectual property rights grant can be found
- * in the file PATENTS. All contributing project authors may
- * be found in the AUTHORS file in the root of the source tree.
- */
-
-/*
- * video_processing.h
- * This header file contains the API required for the video
- * processing module class.
- */
-
-
-#ifndef WEBRTC_MODULES_INTERFACE_VIDEO_PROCESSING_H
-#define WEBRTC_MODULES_INTERFACE_VIDEO_PROCESSING_H
-
-#include "webrtc/modules/interface/module.h"
-#include "webrtc/modules/interface/module_common_types.h"
-#include "webrtc/modules/video_processing/main/interface/video_processing_defines.h"
-#include "webrtc/video_frame.h"
-
-/**
- The module is largely intended to process video streams, except functionality
- provided by static functions which operate independent of previous frames. It
- is recommended, but not required that a unique instance be used for each
- concurrently processed stream. Similarly, it is recommended to call Reset()
- before switching to a new stream, but this is not absolutely required.
-
- The module provides basic thread safety by permitting only a single function
- to execute concurrently.
-*/
-
-namespace webrtc {
-
-class VideoProcessingModule : public Module {
- public:
- /**
- Structure to hold frame statistics. Populate it with GetFrameStats().
- */
- struct FrameStats {
- FrameStats() :
- mean(0),
- sum(0),
- num_pixels(0),
- subSamplWidth(0),
- subSamplHeight(0) {
- memset(hist, 0, sizeof(hist));
- }
-
- uint32_t hist[256]; // FRame histogram.
- uint32_t mean; // Frame Mean value.
- uint32_t sum; // Sum of frame.
- uint32_t num_pixels; // Number of pixels.
- uint8_t subSamplWidth; // Subsampling rate of width in powers of 2.
- uint8_t subSamplHeight; // Subsampling rate of height in powers of 2.
-};
-
- /**
- Specifies the warning types returned by BrightnessDetection().
- */
- enum BrightnessWarning {
- kNoWarning, // Frame has acceptable brightness.
- kDarkWarning, // Frame is too dark.
- kBrightWarning // Frame is too bright.
- };
-
- /*
- Creates a VPM object.
-
- \param[in] id
- Unique identifier of this object.
-
- \return Pointer to a VPM object.
- */
- static VideoProcessingModule* Create();
-
- /**
- Destroys a VPM object.
-
- \param[in] module
- Pointer to the VPM object to destroy.
- */
- static void Destroy(VideoProcessingModule* module);
-
- /**
- Not supported.
- */
- int64_t TimeUntilNextProcess() override { return -1; }
-
- /**
- Not supported.
- */
- int32_t Process() override { return -1; }
-
- /**
- Resets all processing components to their initial states. This should be
- called whenever a new video stream is started.
- */
- virtual void Reset() = 0;
-
- /**
- Retrieves statistics for the input frame. This function must be used to
- prepare a FrameStats struct for use in certain VPM functions.
-
- \param[out] stats
- The frame statistics will be stored here on return.
-
- \param[in] frame
- Reference to the video frame.
-
- \return 0 on success, -1 on failure.
- */
- static int32_t GetFrameStats(FrameStats* stats, const VideoFrame& frame);
-
- /**
- Checks the validity of a FrameStats struct. Currently, valid implies only
- that is had changed from its initialized state.
-
- \param[in] stats
- Frame statistics.
-
- \return True on valid stats, false on invalid stats.
- */
- static bool ValidFrameStats(const FrameStats& stats);
-
- /**
- Returns a FrameStats struct to its intialized state.
-
- \param[in,out] stats
- Frame statistics.
- */
- static void ClearFrameStats(FrameStats* stats);
-
- /**
- Increases/decreases the luminance value.
-
- \param[in,out] frame
- Pointer to the video frame.
-
- \param[in] delta
- The amount to change the chrominance value of every single pixel.
- Can be < 0 also.
-
- \return 0 on success, -1 on failure.
- */
- static int32_t Brighten(VideoFrame* frame, int delta);
-
- /**
- Detects and removes camera flicker from a video stream. Every frame from
- the stream must be passed in. A frame will only be altered if flicker has
- been detected. Has a fixed-point implementation.
-
- \param[in,out] frame
- Pointer to the video frame.
-
- \param[in,out] stats
- Frame statistics provided by GetFrameStats(). On return the stats will
- be reset to zero if the frame was altered. Call GetFrameStats() again
- if the statistics for the altered frame are required.
-
- \return 0 on success, -1 on failure.
- */
- virtual int32_t Deflickering(VideoFrame* frame, FrameStats* stats) = 0;
-
- /**
- Detects if a video frame is excessively bright or dark. Returns a
- warning if this is the case. Multiple frames should be passed in before
- expecting a warning. Has a floating-point implementation.
-
- \param[in] frame
- Pointer to the video frame.
-
- \param[in] stats
- Frame statistics provided by GetFrameStats().
-
- \return A member of BrightnessWarning on success, -1 on error
- */
- virtual int32_t BrightnessDetection(const VideoFrame& frame,
- const FrameStats& stats) = 0;
-
- /**
- The following functions refer to the pre-processor unit within VPM. The
- pre-processor perfoms spatial/temporal decimation and content analysis on
- the frames prior to encoding.
- */
-
- /**
- Enable/disable temporal decimation
-
- \param[in] enable when true, temporal decimation is enabled
- */
- virtual void EnableTemporalDecimation(bool enable) = 0;
-
- /**
- Set target resolution
-
- \param[in] width
- Target width
-
- \param[in] height
- Target height
-
- \param[in] frame_rate
- Target frame_rate
-
- \return VPM_OK on success, a negative value on error (see error codes)
-
- */
- virtual int32_t SetTargetResolution(uint32_t width,
- uint32_t height,
- uint32_t frame_rate) = 0;
-
- virtual void SetTargetFramerate(int frame_rate) {}
-
- /**
- Get decimated(target) frame rate
- */
- virtual uint32_t Decimatedframe_rate() = 0;
-
- /**
- Get decimated(target) frame width
- */
- virtual uint32_t DecimatedWidth() const = 0;
-
- /**
- Get decimated(target) frame height
- */
- virtual uint32_t DecimatedHeight() const = 0 ;
-
- /**
- Set the spatial resampling settings of the VPM: The resampler may either be
- disabled or one of the following:
- scaling to a close to target dimension followed by crop/pad
-
- \param[in] resampling_mode
- Set resampling mode (a member of VideoFrameResampling)
- */
- virtual void SetInputFrameResampleMode(VideoFrameResampling
- resampling_mode) = 0;
-
- /**
- Get Processed (decimated) frame
-
- \param[in] frame pointer to the video frame.
- \param[in] processed_frame pointer (double) to the processed frame. If no
- processing is required, processed_frame will be NULL.
-
- \return VPM_OK on success, a negative value on error (see error codes)
- */
- virtual int32_t PreprocessFrame(const VideoFrame& frame,
- VideoFrame** processed_frame) = 0;
-
- /**
- Return content metrics for the last processed frame
- */
- virtual VideoContentMetrics* ContentMetrics() const = 0 ;
-
- /**
- Enable content analysis
- */
- virtual void EnableContentAnalysis(bool enable) = 0;
-};
-
-} // namespace webrtc
-
-#endif // WEBRTC_MODULES_INTERFACE_VIDEO_PROCESSING_H
diff --git a/webrtc/modules/video_processing/main/source/OWNERS b/webrtc/modules/video_processing/main/source/OWNERS
deleted file mode 100644
index 3ee6b4bf5f..0000000000
--- a/webrtc/modules/video_processing/main/source/OWNERS
+++ /dev/null
@@ -1,5 +0,0 @@
-
-# These are for the common case of adding or renaming files. If you're doing
-# structural changes, please get a review from a reviewer in this file.
-per-file *.gyp=*
-per-file *.gypi=*
diff --git a/webrtc/modules/video_processing/main/source/brighten.cc b/webrtc/modules/video_processing/main/source/brighten.cc
deleted file mode 100644
index 1fe813e7b0..0000000000
--- a/webrtc/modules/video_processing/main/source/brighten.cc
+++ /dev/null
@@ -1,45 +0,0 @@
-/*
- * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
- *
- * Use of this source code is governed by a BSD-style license
- * that can be found in the LICENSE file in the root of the source
- * tree. An additional intellectual property rights grant can be found
- * in the file PATENTS. All contributing project authors may
- * be found in the AUTHORS file in the root of the source tree.
- */
-
-#include "webrtc/modules/video_processing/main/source/brighten.h"
-
-#include <stdlib.h>
-
-namespace webrtc {
-namespace VideoProcessing {
-
-int32_t Brighten(VideoFrame* frame, int delta) {
- assert(frame);
- if (frame->IsZeroSize()) {
- return VPM_PARAMETER_ERROR;
- }
- if (frame->width() <= 0 || frame->height() <= 0) {
- return VPM_PARAMETER_ERROR;
- }
-
- int num_pixels = frame->width() * frame->height();
-
- int look_up[256];
- for (int i = 0; i < 256; i++) {
- int val = i + delta;
- look_up[i] = ((((val < 0) ? 0 : val) > 255) ? 255 : val);
- }
-
- uint8_t* temp_ptr = frame->buffer(kYPlane);
-
- for (int i = 0; i < num_pixels; i++) {
- *temp_ptr = static_cast<uint8_t>(look_up[*temp_ptr]);
- temp_ptr++;
- }
- return VPM_OK;
-}
-
-} // namespace VideoProcessing
-} // namespace webrtc
diff --git a/webrtc/modules/video_processing/main/source/brighten.h b/webrtc/modules/video_processing/main/source/brighten.h
deleted file mode 100644
index 151d7a3b51..0000000000
--- a/webrtc/modules/video_processing/main/source/brighten.h
+++ /dev/null
@@ -1,25 +0,0 @@
-/*
- * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
- *
- * Use of this source code is governed by a BSD-style license
- * that can be found in the LICENSE file in the root of the source
- * tree. An additional intellectual property rights grant can be found
- * in the file PATENTS. All contributing project authors may
- * be found in the AUTHORS file in the root of the source tree.
- */
-
-#ifndef MODULES_VIDEO_PROCESSING_MAIN_SOURCE_BRIGHTEN_H_
-#define MODULES_VIDEO_PROCESSING_MAIN_SOURCE_BRIGHTEN_H_
-
-#include "webrtc/modules/video_processing/main/interface/video_processing.h"
-#include "webrtc/typedefs.h"
-
-namespace webrtc {
-namespace VideoProcessing {
-
-int32_t Brighten(VideoFrame* frame, int delta);
-
-} // namespace VideoProcessing
-} // namespace webrtc
-
-#endif // MODULES_VIDEO_PROCESSING_MAIN_SOURCE_BRIGHTEN_H_
diff --git a/webrtc/modules/video_processing/main/source/video_processing_impl.cc b/webrtc/modules/video_processing/main/source/video_processing_impl.cc
deleted file mode 100644
index eaaf14f6ad..0000000000
--- a/webrtc/modules/video_processing/main/source/video_processing_impl.cc
+++ /dev/null
@@ -1,183 +0,0 @@
-/*
- * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
- *
- * Use of this source code is governed by a BSD-style license
- * that can be found in the LICENSE file in the root of the source
- * tree. An additional intellectual property rights grant can be found
- * in the file PATENTS. All contributing project authors may
- * be found in the AUTHORS file in the root of the source tree.
- */
-
-
-#include "webrtc/modules/video_processing/main/source/video_processing_impl.h"
-#include "webrtc/system_wrappers/include/critical_section_wrapper.h"
-#include "webrtc/system_wrappers/include/logging.h"
-
-#include <assert.h>
-
-namespace webrtc {
-
-namespace {
-void SetSubSampling(VideoProcessingModule::FrameStats* stats,
- const int32_t width,
- const int32_t height) {
- if (width * height >= 640 * 480) {
- stats->subSamplWidth = 3;
- stats->subSamplHeight = 3;
- } else if (width * height >= 352 * 288) {
- stats->subSamplWidth = 2;
- stats->subSamplHeight = 2;
- } else if (width * height >= 176 * 144) {
- stats->subSamplWidth = 1;
- stats->subSamplHeight = 1;
- } else {
- stats->subSamplWidth = 0;
- stats->subSamplHeight = 0;
- }
-}
-} // namespace
-
-VideoProcessingModule* VideoProcessingModule::Create() {
- return new VideoProcessingModuleImpl();
-}
-
-void VideoProcessingModule::Destroy(VideoProcessingModule* module) {
- if (module)
- delete static_cast<VideoProcessingModuleImpl*>(module);
-}
-
-VideoProcessingModuleImpl::VideoProcessingModuleImpl() {}
-VideoProcessingModuleImpl::~VideoProcessingModuleImpl() {}
-
-void VideoProcessingModuleImpl::Reset() {
- rtc::CritScope mutex(&mutex_);
- deflickering_.Reset();
- brightness_detection_.Reset();
- frame_pre_processor_.Reset();
-}
-
-int32_t VideoProcessingModule::GetFrameStats(FrameStats* stats,
- const VideoFrame& frame) {
- if (frame.IsZeroSize()) {
- LOG(LS_ERROR) << "Zero size frame.";
- return VPM_PARAMETER_ERROR;
- }
-
- int width = frame.width();
- int height = frame.height();
-
- ClearFrameStats(stats); // The histogram needs to be zeroed out.
- SetSubSampling(stats, width, height);
-
- const uint8_t* buffer = frame.buffer(kYPlane);
- // Compute histogram and sum of frame
- for (int i = 0; i < height; i += (1 << stats->subSamplHeight)) {
- int k = i * width;
- for (int j = 0; j < width; j += (1 << stats->subSamplWidth)) {
- stats->hist[buffer[k + j]]++;
- stats->sum += buffer[k + j];
- }
- }
-
- stats->num_pixels = (width * height) / ((1 << stats->subSamplWidth) *
- (1 << stats->subSamplHeight));
- assert(stats->num_pixels > 0);
-
- // Compute mean value of frame
- stats->mean = stats->sum / stats->num_pixels;
-
- return VPM_OK;
-}
-
-bool VideoProcessingModule::ValidFrameStats(const FrameStats& stats) {
- if (stats.num_pixels == 0) {
- LOG(LS_WARNING) << "Invalid frame stats.";
- return false;
- }
- return true;
-}
-
-void VideoProcessingModule::ClearFrameStats(FrameStats* stats) {
- stats->mean = 0;
- stats->sum = 0;
- stats->num_pixels = 0;
- stats->subSamplWidth = 0;
- stats->subSamplHeight = 0;
- memset(stats->hist, 0, sizeof(stats->hist));
-}
-
-int32_t VideoProcessingModule::Brighten(VideoFrame* frame, int delta) {
- return VideoProcessing::Brighten(frame, delta);
-}
-
-int32_t VideoProcessingModuleImpl::Deflickering(VideoFrame* frame,
- FrameStats* stats) {
- rtc::CritScope mutex(&mutex_);
- return deflickering_.ProcessFrame(frame, stats);
-}
-
-int32_t VideoProcessingModuleImpl::BrightnessDetection(
- const VideoFrame& frame,
- const FrameStats& stats) {
- rtc::CritScope mutex(&mutex_);
- return brightness_detection_.ProcessFrame(frame, stats);
-}
-
-
-void VideoProcessingModuleImpl::EnableTemporalDecimation(bool enable) {
- rtc::CritScope mutex(&mutex_);
- frame_pre_processor_.EnableTemporalDecimation(enable);
-}
-
-
-void VideoProcessingModuleImpl::SetInputFrameResampleMode(VideoFrameResampling
- resampling_mode) {
- rtc::CritScope cs(&mutex_);
- frame_pre_processor_.SetInputFrameResampleMode(resampling_mode);
-}
-
-int32_t VideoProcessingModuleImpl::SetTargetResolution(uint32_t width,
- uint32_t height,
- uint32_t frame_rate) {
- rtc::CritScope cs(&mutex_);
- return frame_pre_processor_.SetTargetResolution(width, height, frame_rate);
-}
-
-void VideoProcessingModuleImpl::SetTargetFramerate(int frame_rate) {
- rtc::CritScope cs(&mutex_);
- frame_pre_processor_.SetTargetFramerate(frame_rate);
-}
-
-uint32_t VideoProcessingModuleImpl::Decimatedframe_rate() {
- rtc::CritScope cs(&mutex_);
- return frame_pre_processor_.Decimatedframe_rate();
-}
-
-uint32_t VideoProcessingModuleImpl::DecimatedWidth() const {
- rtc::CritScope cs(&mutex_);
- return frame_pre_processor_.DecimatedWidth();
-}
-
-uint32_t VideoProcessingModuleImpl::DecimatedHeight() const {
- rtc::CritScope cs(&mutex_);
- return frame_pre_processor_.DecimatedHeight();
-}
-
-int32_t VideoProcessingModuleImpl::PreprocessFrame(
- const VideoFrame& frame,
- VideoFrame** processed_frame) {
- rtc::CritScope mutex(&mutex_);
- return frame_pre_processor_.PreprocessFrame(frame, processed_frame);
-}
-
-VideoContentMetrics* VideoProcessingModuleImpl::ContentMetrics() const {
- rtc::CritScope mutex(&mutex_);
- return frame_pre_processor_.ContentMetrics();
-}
-
-void VideoProcessingModuleImpl::EnableContentAnalysis(bool enable) {
- rtc::CritScope mutex(&mutex_);
- frame_pre_processor_.EnableContentAnalysis(enable);
-}
-
-} // namespace webrtc
diff --git a/webrtc/modules/video_processing/main/source/video_processing_impl.h b/webrtc/modules/video_processing/main/source/video_processing_impl.h
deleted file mode 100644
index fed5197f49..0000000000
--- a/webrtc/modules/video_processing/main/source/video_processing_impl.h
+++ /dev/null
@@ -1,75 +0,0 @@
-/*
- * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
- *
- * Use of this source code is governed by a BSD-style license
- * that can be found in the LICENSE file in the root of the source
- * tree. An additional intellectual property rights grant can be found
- * in the file PATENTS. All contributing project authors may
- * be found in the AUTHORS file in the root of the source tree.
- */
-
-#ifndef WEBRTC_MODULE_VIDEO_PROCESSING_IMPL_H
-#define WEBRTC_MODULE_VIDEO_PROCESSING_IMPL_H
-
-#include "webrtc/base/criticalsection.h"
-#include "webrtc/modules/video_processing/main/interface/video_processing.h"
-#include "webrtc/modules/video_processing/main/source/brighten.h"
-#include "webrtc/modules/video_processing/main/source/brightness_detection.h"
-#include "webrtc/modules/video_processing/main/source/deflickering.h"
-#include "webrtc/modules/video_processing/main/source/frame_preprocessor.h"
-
-namespace webrtc {
-class CriticalSectionWrapper;
-
-class VideoProcessingModuleImpl : public VideoProcessingModule {
- public:
- VideoProcessingModuleImpl();
- ~VideoProcessingModuleImpl() override;
-
- void Reset() override;
-
- int32_t Deflickering(VideoFrame* frame, FrameStats* stats) override;
-
- int32_t BrightnessDetection(const VideoFrame& frame,
- const FrameStats& stats) override;
-
- // Frame pre-processor functions
-
- // Enable temporal decimation
- void EnableTemporalDecimation(bool enable) override;
-
- void SetInputFrameResampleMode(VideoFrameResampling resampling_mode) override;
-
- // Enable content analysis
- void EnableContentAnalysis(bool enable) override;
-
- // Set Target Resolution: frame rate and dimension
- int32_t SetTargetResolution(uint32_t width,
- uint32_t height,
- uint32_t frame_rate) override;
-
- void SetTargetFramerate(int frame_rate) override;
-
- // Get decimated values: frame rate/dimension
- uint32_t Decimatedframe_rate() override;
- uint32_t DecimatedWidth() const override;
- uint32_t DecimatedHeight() const override;
-
- // Preprocess:
- // Pre-process incoming frame: Sample when needed and compute content
- // metrics when enabled.
- // If no resampling takes place - processed_frame is set to NULL.
- int32_t PreprocessFrame(const VideoFrame& frame,
- VideoFrame** processed_frame) override;
- VideoContentMetrics* ContentMetrics() const override;
-
- private:
- mutable rtc::CriticalSection mutex_;
- VPMDeflickering deflickering_ GUARDED_BY(mutex_);
- VPMBrightnessDetection brightness_detection_;
- VPMFramePreprocessor frame_pre_processor_;
-};
-
-} // namespace
-
-#endif
diff --git a/webrtc/modules/video_processing/main/test/unit_test/brightness_detection_test.cc b/webrtc/modules/video_processing/main/test/unit_test/brightness_detection_test.cc
deleted file mode 100644
index 4d0de3ac98..0000000000
--- a/webrtc/modules/video_processing/main/test/unit_test/brightness_detection_test.cc
+++ /dev/null
@@ -1,121 +0,0 @@
-/*
- * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
- *
- * Use of this source code is governed by a BSD-style license
- * that can be found in the LICENSE file in the root of the source
- * tree. An additional intellectual property rights grant can be found
- * in the file PATENTS. All contributing project authors may
- * be found in the AUTHORS file in the root of the source tree.
- */
-
-#include "webrtc/common_video/libyuv/include/webrtc_libyuv.h"
-#include "webrtc/modules/video_processing/main/interface/video_processing.h"
-#include "webrtc/modules/video_processing/main/test/unit_test/video_processing_unittest.h"
-#include "webrtc/test/testsupport/gtest_disable.h"
-
-using namespace webrtc;
-
-TEST_F(VideoProcessingModuleTest, DISABLED_ON_IOS(BrightnessDetection))
-{
- uint32_t frameNum = 0;
- int32_t brightnessWarning = 0;
- uint32_t warningCount = 0;
- rtc::scoped_ptr<uint8_t[]> video_buffer(new uint8_t[frame_length_]);
- while (fread(video_buffer.get(), 1, frame_length_, source_file_) ==
- frame_length_)
- {
- EXPECT_EQ(0, ConvertToI420(kI420, video_buffer.get(), 0, 0, width_,
- height_, 0, kVideoRotation_0, &video_frame_));
- frameNum++;
- VideoProcessingModule::FrameStats stats;
- ASSERT_EQ(0, vpm_->GetFrameStats(&stats, video_frame_));
- ASSERT_GE(brightnessWarning = vpm_->BrightnessDetection(video_frame_,
- stats), 0);
- if (brightnessWarning != VideoProcessingModule::kNoWarning)
- {
- warningCount++;
- }
- }
- ASSERT_NE(0, feof(source_file_)) << "Error reading source file";
-
- // Expect few warnings
- float warningProportion = static_cast<float>(warningCount) / frameNum * 100;
- printf("\nWarning proportions:\n");
- printf("Stock foreman: %.1f %%\n", warningProportion);
- EXPECT_LT(warningProportion, 10);
-
- rewind(source_file_);
- frameNum = 0;
- warningCount = 0;
- while (fread(video_buffer.get(), 1, frame_length_, source_file_) ==
- frame_length_ &&
- frameNum < 300)
- {
- EXPECT_EQ(0, ConvertToI420(kI420, video_buffer.get(), 0, 0, width_,
- height_, 0, kVideoRotation_0, &video_frame_));
- frameNum++;
-
- uint8_t* frame = video_frame_.buffer(kYPlane);
- uint32_t yTmp = 0;
- for (int yIdx = 0; yIdx < width_ * height_; yIdx++)
- {
- yTmp = frame[yIdx] << 1;
- if (yTmp > 255)
- {
- yTmp = 255;
- }
- frame[yIdx] = static_cast<uint8_t>(yTmp);
- }
-
- VideoProcessingModule::FrameStats stats;
- ASSERT_EQ(0, vpm_->GetFrameStats(&stats, video_frame_));
- ASSERT_GE(brightnessWarning = vpm_->BrightnessDetection(video_frame_,
- stats), 0);
- EXPECT_NE(VideoProcessingModule::kDarkWarning, brightnessWarning);
- if (brightnessWarning == VideoProcessingModule::kBrightWarning)
- {
- warningCount++;
- }
- }
- ASSERT_NE(0, feof(source_file_)) << "Error reading source file";
-
- // Expect many brightness warnings
- warningProportion = static_cast<float>(warningCount) / frameNum * 100;
- printf("Bright foreman: %.1f %%\n", warningProportion);
- EXPECT_GT(warningProportion, 95);
-
- rewind(source_file_);
- frameNum = 0;
- warningCount = 0;
- while (fread(video_buffer.get(), 1, frame_length_, source_file_) ==
- frame_length_ && frameNum < 300)
- {
- EXPECT_EQ(0, ConvertToI420(kI420, video_buffer.get(), 0, 0, width_,
- height_, 0, kVideoRotation_0, &video_frame_));
- frameNum++;
-
- uint8_t* y_plane = video_frame_.buffer(kYPlane);
- int32_t yTmp = 0;
- for (int yIdx = 0; yIdx < width_ * height_; yIdx++)
- {
- yTmp = y_plane[yIdx] >> 1;
- y_plane[yIdx] = static_cast<uint8_t>(yTmp);
- }
-
- VideoProcessingModule::FrameStats stats;
- ASSERT_EQ(0, vpm_->GetFrameStats(&stats, video_frame_));
- ASSERT_GE(brightnessWarning = vpm_->BrightnessDetection(video_frame_,
- stats), 0);
- EXPECT_NE(VideoProcessingModule::kBrightWarning, brightnessWarning);
- if (brightnessWarning == VideoProcessingModule::kDarkWarning)
- {
- warningCount++;
- }
- }
- ASSERT_NE(0, feof(source_file_)) << "Error reading source file";
-
- // Expect many darkness warnings
- warningProportion = static_cast<float>(warningCount) / frameNum * 100;
- printf("Dark foreman: %.1f %%\n\n", warningProportion);
- EXPECT_GT(warningProportion, 90);
-}
diff --git a/webrtc/modules/video_processing/main/test/unit_test/deflickering_test.cc b/webrtc/modules/video_processing/main/test/unit_test/deflickering_test.cc
deleted file mode 100644
index 83d09ef486..0000000000
--- a/webrtc/modules/video_processing/main/test/unit_test/deflickering_test.cc
+++ /dev/null
@@ -1,100 +0,0 @@
-/*
- * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
- *
- * Use of this source code is governed by a BSD-style license
- * that can be found in the LICENSE file in the root of the source
- * tree. An additional intellectual property rights grant can be found
- * in the file PATENTS. All contributing project authors may
- * be found in the AUTHORS file in the root of the source tree.
- */
-
-#include <stdio.h>
-#include <stdlib.h>
-
-#include "webrtc/common_video/libyuv/include/webrtc_libyuv.h"
-#include "webrtc/modules/video_processing/main/interface/video_processing.h"
-#include "webrtc/modules/video_processing/main/test/unit_test/video_processing_unittest.h"
-#include "webrtc/system_wrappers/include/tick_util.h"
-#include "webrtc/test/testsupport/fileutils.h"
-#include "webrtc/test/testsupport/gtest_disable.h"
-
-namespace webrtc {
-
-TEST_F(VideoProcessingModuleTest, DISABLED_ON_IOS(Deflickering))
-{
- enum { NumRuns = 30 };
- uint32_t frameNum = 0;
- const uint32_t frame_rate = 15;
-
- int64_t min_runtime = 0;
- int64_t avg_runtime = 0;
-
- // Close automatically opened Foreman.
- fclose(source_file_);
- const std::string input_file =
- webrtc::test::ResourcePath("deflicker_before_cif_short", "yuv");
- source_file_ = fopen(input_file.c_str(), "rb");
- ASSERT_TRUE(source_file_ != NULL) <<
- "Cannot read input file: " << input_file << "\n";
-
- const std::string output_file =
- webrtc::test::OutputPath() + "deflicker_output_cif_short.yuv";
- FILE* deflickerFile = fopen(output_file.c_str(), "wb");
- ASSERT_TRUE(deflickerFile != NULL) <<
- "Could not open output file: " << output_file << "\n";
-
- printf("\nRun time [us / frame]:\n");
- rtc::scoped_ptr<uint8_t[]> video_buffer(new uint8_t[frame_length_]);
- for (uint32_t run_idx = 0; run_idx < NumRuns; run_idx++)
- {
- TickTime t0;
- TickTime t1;
- TickInterval acc_ticks;
- uint32_t timeStamp = 1;
-
- frameNum = 0;
- while (fread(video_buffer.get(), 1, frame_length_, source_file_) ==
- frame_length_)
- {
- frameNum++;
- EXPECT_EQ(
- 0, ConvertToI420(kI420, video_buffer.get(), 0, 0, width_,
- height_, 0, kVideoRotation_0, &video_frame_));
- video_frame_.set_timestamp(timeStamp);
-
- t0 = TickTime::Now();
- VideoProcessingModule::FrameStats stats;
- ASSERT_EQ(0, vpm_->GetFrameStats(&stats, video_frame_));
- ASSERT_EQ(0, vpm_->Deflickering(&video_frame_, &stats));
- t1 = TickTime::Now();
- acc_ticks += (t1 - t0);
-
- if (run_idx == 0)
- {
- if (PrintVideoFrame(video_frame_, deflickerFile) < 0) {
- return;
- }
- }
- timeStamp += (90000 / frame_rate);
- }
- ASSERT_NE(0, feof(source_file_)) << "Error reading source file";
-
- printf("%u\n", static_cast<int>(acc_ticks.Microseconds() / frameNum));
- if (acc_ticks.Microseconds() < min_runtime || run_idx == 0)
- {
- min_runtime = acc_ticks.Microseconds();
- }
- avg_runtime += acc_ticks.Microseconds();
-
- rewind(source_file_);
- }
- ASSERT_EQ(0, fclose(deflickerFile));
- // TODO(kjellander): Add verification of deflicker output file.
-
- printf("\nAverage run time = %d us / frame\n",
- static_cast<int>(avg_runtime / frameNum / NumRuns));
- printf("Min run time = %d us / frame\n\n",
- static_cast<int>(min_runtime / frameNum));
-}
-
-} // namespace webrtc
diff --git a/webrtc/modules/video_processing/main/source/spatial_resampler.cc b/webrtc/modules/video_processing/spatial_resampler.cc
index 9360e68b41..cdbe0efac1 100644
--- a/webrtc/modules/video_processing/main/source/spatial_resampler.cc
+++ b/webrtc/modules/video_processing/spatial_resampler.cc
@@ -8,8 +8,7 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#include "webrtc/modules/video_processing/main/source/spatial_resampler.h"
-
+#include "webrtc/modules/video_processing/spatial_resampler.h"
namespace webrtc {
@@ -21,12 +20,13 @@ VPMSimpleSpatialResampler::VPMSimpleSpatialResampler()
VPMSimpleSpatialResampler::~VPMSimpleSpatialResampler() {}
-
int32_t VPMSimpleSpatialResampler::SetTargetFrameSize(int32_t width,
int32_t height) {
- if (resampling_mode_ == kNoRescaling) return VPM_OK;
+ if (resampling_mode_ == kNoRescaling)
+ return VPM_OK;
- if (width < 1 || height < 1) return VPM_PARAMETER_ERROR;
+ if (width < 1 || height < 1)
+ return VPM_PARAMETER_ERROR;
target_width_ = width;
target_height_ = height;
@@ -48,11 +48,11 @@ void VPMSimpleSpatialResampler::Reset() {
int32_t VPMSimpleSpatialResampler::ResampleFrame(const VideoFrame& inFrame,
VideoFrame* outFrame) {
// Don't copy if frame remains as is.
- if (resampling_mode_ == kNoRescaling)
- return VPM_OK;
+ if (resampling_mode_ == kNoRescaling) {
+ return VPM_OK;
// Check if re-sampling is needed
- else if ((inFrame.width() == target_width_) &&
- (inFrame.height() == target_height_)) {
+ } else if ((inFrame.width() == target_width_) &&
+ (inFrame.height() == target_height_)) {
return VPM_OK;
}
@@ -60,8 +60,8 @@ int32_t VPMSimpleSpatialResampler::ResampleFrame(const VideoFrame& inFrame,
// TODO(mikhal/marpan): Should we allow for setting the filter mode in
// _scale.Set() with |resampling_mode_|?
int ret_val = 0;
- ret_val = scaler_.Set(inFrame.width(), inFrame.height(),
- target_width_, target_height_, kI420, kI420, kScaleBox);
+ ret_val = scaler_.Set(inFrame.width(), inFrame.height(), target_width_,
+ target_height_, kI420, kI420, kScaleBox);
if (ret_val < 0)
return ret_val;
@@ -86,10 +86,9 @@ int32_t VPMSimpleSpatialResampler::TargetWidth() {
return target_width_;
}
-bool VPMSimpleSpatialResampler::ApplyResample(int32_t width,
- int32_t height) {
+bool VPMSimpleSpatialResampler::ApplyResample(int32_t width, int32_t height) {
if ((width == target_width_ && height == target_height_) ||
- resampling_mode_ == kNoRescaling)
+ resampling_mode_ == kNoRescaling)
return false;
else
return true;
diff --git a/webrtc/modules/video_processing/main/source/spatial_resampler.h b/webrtc/modules/video_processing/spatial_resampler.h
index f965a40a83..51820e24e5 100644
--- a/webrtc/modules/video_processing/main/source/spatial_resampler.h
+++ b/webrtc/modules/video_processing/spatial_resampler.h
@@ -8,13 +8,13 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#ifndef WEBRTC_MODULES_VIDEO_PROCESSING_MAIN_SOURCE_SPATIAL_RESAMPLER_H
-#define WEBRTC_MODULES_VIDEO_PROCESSING_MAIN_SOURCE_SPATIAL_RESAMPLER_H
+#ifndef WEBRTC_MODULES_VIDEO_PROCESSING_SPATIAL_RESAMPLER_H_
+#define WEBRTC_MODULES_VIDEO_PROCESSING_SPATIAL_RESAMPLER_H_
#include "webrtc/typedefs.h"
-#include "webrtc/modules/interface/module_common_types.h"
-#include "webrtc/modules/video_processing/main/interface/video_processing_defines.h"
+#include "webrtc/modules/include/module_common_types.h"
+#include "webrtc/modules/video_processing/include/video_processing_defines.h"
#include "webrtc/common_video/libyuv/include/scaler.h"
#include "webrtc/common_video/libyuv/include/webrtc_libyuv.h"
@@ -23,10 +23,10 @@ namespace webrtc {
class VPMSpatialResampler {
public:
- virtual ~VPMSpatialResampler() {};
+ virtual ~VPMSpatialResampler() {}
virtual int32_t SetTargetFrameSize(int32_t width, int32_t height) = 0;
- virtual void SetInputFrameResampleMode(VideoFrameResampling
- resampling_mode) = 0;
+ virtual void SetInputFrameResampleMode(
+ VideoFrameResampling resampling_mode) = 0;
virtual void Reset() = 0;
virtual int32_t ResampleFrame(const VideoFrame& inFrame,
VideoFrame* outFrame) = 0;
@@ -49,13 +49,12 @@ class VPMSimpleSpatialResampler : public VPMSpatialResampler {
virtual bool ApplyResample(int32_t width, int32_t height);
private:
-
- VideoFrameResampling resampling_mode_;
- int32_t target_width_;
- int32_t target_height_;
- Scaler scaler_;
+ VideoFrameResampling resampling_mode_;
+ int32_t target_width_;
+ int32_t target_height_;
+ Scaler scaler_;
};
} // namespace webrtc
-#endif // WEBRTC_MODULES_VIDEO_PROCESSING_MAIN_SOURCE_SPATIAL_RESAMPLER_H
+#endif // WEBRTC_MODULES_VIDEO_PROCESSING_SPATIAL_RESAMPLER_H_
diff --git a/webrtc/modules/video_processing/test/brightness_detection_test.cc b/webrtc/modules/video_processing/test/brightness_detection_test.cc
new file mode 100644
index 0000000000..669bb183e5
--- /dev/null
+++ b/webrtc/modules/video_processing/test/brightness_detection_test.cc
@@ -0,0 +1,120 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "webrtc/common_video/libyuv/include/webrtc_libyuv.h"
+#include "webrtc/modules/video_processing/include/video_processing.h"
+#include "webrtc/modules/video_processing/test/video_processing_unittest.h"
+
+namespace webrtc {
+
+#if defined(WEBRTC_IOS)
+#define MAYBE_BrightnessDetection DISABLED_BrightnessDetection
+#else
+#define MAYBE_BrightnessDetection BrightnessDetection
+#endif
+TEST_F(VideoProcessingTest, MAYBE_BrightnessDetection) {
+ uint32_t frameNum = 0;
+ int32_t brightnessWarning = 0;
+ uint32_t warningCount = 0;
+ rtc::scoped_ptr<uint8_t[]> video_buffer(new uint8_t[frame_length_]);
+ while (fread(video_buffer.get(), 1, frame_length_, source_file_) ==
+ frame_length_) {
+ EXPECT_EQ(0, ConvertToI420(kI420, video_buffer.get(), 0, 0, width_, height_,
+ 0, kVideoRotation_0, &video_frame_));
+ frameNum++;
+ VideoProcessing::FrameStats stats;
+ vp_->GetFrameStats(video_frame_, &stats);
+ EXPECT_GT(stats.num_pixels, 0u);
+ ASSERT_GE(brightnessWarning = vp_->BrightnessDetection(video_frame_, stats),
+ 0);
+ if (brightnessWarning != VideoProcessing::kNoWarning) {
+ warningCount++;
+ }
+ }
+ ASSERT_NE(0, feof(source_file_)) << "Error reading source file";
+
+ // Expect few warnings
+ float warningProportion = static_cast<float>(warningCount) / frameNum * 100;
+ printf("\nWarning proportions:\n");
+ printf("Stock foreman: %.1f %%\n", warningProportion);
+ EXPECT_LT(warningProportion, 10);
+
+ rewind(source_file_);
+ frameNum = 0;
+ warningCount = 0;
+ while (fread(video_buffer.get(), 1, frame_length_, source_file_) ==
+ frame_length_ &&
+ frameNum < 300) {
+ EXPECT_EQ(0, ConvertToI420(kI420, video_buffer.get(), 0, 0, width_, height_,
+ 0, kVideoRotation_0, &video_frame_));
+ frameNum++;
+
+ uint8_t* frame = video_frame_.buffer(kYPlane);
+ uint32_t yTmp = 0;
+ for (int yIdx = 0; yIdx < width_ * height_; yIdx++) {
+ yTmp = frame[yIdx] << 1;
+ if (yTmp > 255) {
+ yTmp = 255;
+ }
+ frame[yIdx] = static_cast<uint8_t>(yTmp);
+ }
+
+ VideoProcessing::FrameStats stats;
+ vp_->GetFrameStats(video_frame_, &stats);
+ EXPECT_GT(stats.num_pixels, 0u);
+ ASSERT_GE(brightnessWarning = vp_->BrightnessDetection(video_frame_, stats),
+ 0);
+ EXPECT_NE(VideoProcessing::kDarkWarning, brightnessWarning);
+ if (brightnessWarning == VideoProcessing::kBrightWarning) {
+ warningCount++;
+ }
+ }
+ ASSERT_NE(0, feof(source_file_)) << "Error reading source file";
+
+ // Expect many brightness warnings
+ warningProportion = static_cast<float>(warningCount) / frameNum * 100;
+ printf("Bright foreman: %.1f %%\n", warningProportion);
+ EXPECT_GT(warningProportion, 95);
+
+ rewind(source_file_);
+ frameNum = 0;
+ warningCount = 0;
+ while (fread(video_buffer.get(), 1, frame_length_, source_file_) ==
+ frame_length_ &&
+ frameNum < 300) {
+ EXPECT_EQ(0, ConvertToI420(kI420, video_buffer.get(), 0, 0, width_, height_,
+ 0, kVideoRotation_0, &video_frame_));
+ frameNum++;
+
+ uint8_t* y_plane = video_frame_.buffer(kYPlane);
+ int32_t yTmp = 0;
+ for (int yIdx = 0; yIdx < width_ * height_; yIdx++) {
+ yTmp = y_plane[yIdx] >> 1;
+ y_plane[yIdx] = static_cast<uint8_t>(yTmp);
+ }
+
+ VideoProcessing::FrameStats stats;
+ vp_->GetFrameStats(video_frame_, &stats);
+ EXPECT_GT(stats.num_pixels, 0u);
+ ASSERT_GE(brightnessWarning = vp_->BrightnessDetection(video_frame_, stats),
+ 0);
+ EXPECT_NE(VideoProcessing::kBrightWarning, brightnessWarning);
+ if (brightnessWarning == VideoProcessing::kDarkWarning) {
+ warningCount++;
+ }
+ }
+ ASSERT_NE(0, feof(source_file_)) << "Error reading source file";
+
+ // Expect many darkness warnings
+ warningProportion = static_cast<float>(warningCount) / frameNum * 100;
+ printf("Dark foreman: %.1f %%\n\n", warningProportion);
+ EXPECT_GT(warningProportion, 90);
+}
+} // namespace webrtc
diff --git a/webrtc/modules/video_processing/main/test/unit_test/content_metrics_test.cc b/webrtc/modules/video_processing/test/content_metrics_test.cc
index d9c1309d9b..782f9cff59 100644
--- a/webrtc/modules/video_processing/main/test/unit_test/content_metrics_test.cc
+++ b/webrtc/modules/video_processing/test/content_metrics_test.cc
@@ -9,28 +9,32 @@
*/
#include "webrtc/common_video/libyuv/include/webrtc_libyuv.h"
-#include "webrtc/modules/video_processing/main/interface/video_processing.h"
-#include "webrtc/modules/video_processing/main/source/content_analysis.h"
-#include "webrtc/modules/video_processing/main/test/unit_test/video_processing_unittest.h"
-#include "webrtc/test/testsupport/gtest_disable.h"
+#include "webrtc/modules/video_processing/include/video_processing.h"
+#include "webrtc/modules/video_processing/content_analysis.h"
+#include "webrtc/modules/video_processing/test/video_processing_unittest.h"
namespace webrtc {
-TEST_F(VideoProcessingModuleTest, DISABLED_ON_IOS(ContentAnalysis)) {
- VPMContentAnalysis ca__c(false);
- VPMContentAnalysis ca__sse(true);
- VideoContentMetrics *_cM_c, *_cM_SSE;
+#if defined(WEBRTC_IOS)
+TEST_F(VideoProcessingTest, DISABLED_ContentAnalysis) {
+#else
+TEST_F(VideoProcessingTest, ContentAnalysis) {
+#endif
+ VPMContentAnalysis ca__c(false);
+ VPMContentAnalysis ca__sse(true);
+ VideoContentMetrics* _cM_c;
+ VideoContentMetrics* _cM_SSE;
- ca__c.Initialize(width_,height_);
- ca__sse.Initialize(width_,height_);
+ ca__c.Initialize(width_, height_);
+ ca__sse.Initialize(width_, height_);
rtc::scoped_ptr<uint8_t[]> video_buffer(new uint8_t[frame_length_]);
- while (fread(video_buffer.get(), 1, frame_length_, source_file_)
- == frame_length_) {
+ while (fread(video_buffer.get(), 1, frame_length_, source_file_) ==
+ frame_length_) {
// Using ConvertToI420 to add stride to the image.
EXPECT_EQ(0, ConvertToI420(kI420, video_buffer.get(), 0, 0, width_, height_,
0, kVideoRotation_0, &video_frame_));
- _cM_c = ca__c.ComputeContentMetrics(video_frame_);
+ _cM_c = ca__c.ComputeContentMetrics(video_frame_);
_cM_SSE = ca__sse.ComputeContentMetrics(video_frame_);
ASSERT_EQ(_cM_c->spatial_pred_err, _cM_SSE->spatial_pred_err);
diff --git a/webrtc/modules/video_processing/main/test/unit_test/createTable.m b/webrtc/modules/video_processing/test/createTable.m
index 2c7fb522f6..fe8777ee71 100644
--- a/webrtc/modules/video_processing/main/test/unit_test/createTable.m
+++ b/webrtc/modules/video_processing/test/createTable.m
@@ -31,7 +31,7 @@ A=(1-B)/r0;
f0=A*x0.^2+B*x0; % compander function in zone 1
% equation system for finding second zone parameters
-M=[r0^3 r0^2 r0 1;
+M=[r0^3 r0^2 r0 1;
3*r0^2 2*r0 1 0;
3*r1^2 2*r1 1 0;
r1^3 r1^2 r1 1];
@@ -173,7 +173,7 @@ for k=1:size(y,3)
end
end
end
-
+
fprintf('\nWriting modified test file...')
writeYUV420file('../out/Debug/foremanColorEnhanced.yuv',y,unew,vnew);
fprintf(' done\n');
diff --git a/webrtc/modules/video_processing/test/deflickering_test.cc b/webrtc/modules/video_processing/test/deflickering_test.cc
new file mode 100644
index 0000000000..5410015b06
--- /dev/null
+++ b/webrtc/modules/video_processing/test/deflickering_test.cc
@@ -0,0 +1,98 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+
+#include "webrtc/common_video/libyuv/include/webrtc_libyuv.h"
+#include "webrtc/modules/video_processing/include/video_processing.h"
+#include "webrtc/modules/video_processing/test/video_processing_unittest.h"
+#include "webrtc/system_wrappers/include/tick_util.h"
+#include "webrtc/test/testsupport/fileutils.h"
+
+namespace webrtc {
+
+#if defined(WEBRTC_IOS)
+TEST_F(VideoProcessingTest, DISABLED_Deflickering) {
+#else
+TEST_F(VideoProcessingTest, Deflickering) {
+#endif
+ enum { NumRuns = 30 };
+ uint32_t frameNum = 0;
+ const uint32_t frame_rate = 15;
+
+ int64_t min_runtime = 0;
+ int64_t avg_runtime = 0;
+
+ // Close automatically opened Foreman.
+ fclose(source_file_);
+ const std::string input_file =
+ webrtc::test::ResourcePath("deflicker_before_cif_short", "yuv");
+ source_file_ = fopen(input_file.c_str(), "rb");
+ ASSERT_TRUE(source_file_ != NULL) << "Cannot read input file: " << input_file
+ << "\n";
+
+ const std::string output_file =
+ webrtc::test::OutputPath() + "deflicker_output_cif_short.yuv";
+ FILE* deflickerFile = fopen(output_file.c_str(), "wb");
+ ASSERT_TRUE(deflickerFile != NULL)
+ << "Could not open output file: " << output_file << "\n";
+
+ printf("\nRun time [us / frame]:\n");
+ rtc::scoped_ptr<uint8_t[]> video_buffer(new uint8_t[frame_length_]);
+ for (uint32_t run_idx = 0; run_idx < NumRuns; run_idx++) {
+ TickTime t0;
+ TickTime t1;
+ TickInterval acc_ticks;
+ uint32_t timeStamp = 1;
+
+ frameNum = 0;
+ while (fread(video_buffer.get(), 1, frame_length_, source_file_) ==
+ frame_length_) {
+ frameNum++;
+ EXPECT_EQ(0, ConvertToI420(kI420, video_buffer.get(), 0, 0, width_,
+ height_, 0, kVideoRotation_0, &video_frame_));
+ video_frame_.set_timestamp(timeStamp);
+
+ t0 = TickTime::Now();
+ VideoProcessing::FrameStats stats;
+ vp_->GetFrameStats(video_frame_, &stats);
+ EXPECT_GT(stats.num_pixels, 0u);
+ ASSERT_EQ(0, vp_->Deflickering(&video_frame_, &stats));
+ t1 = TickTime::Now();
+ acc_ticks += (t1 - t0);
+
+ if (run_idx == 0) {
+ if (PrintVideoFrame(video_frame_, deflickerFile) < 0) {
+ return;
+ }
+ }
+ timeStamp += (90000 / frame_rate);
+ }
+ ASSERT_NE(0, feof(source_file_)) << "Error reading source file";
+
+ printf("%u\n", static_cast<int>(acc_ticks.Microseconds() / frameNum));
+ if (acc_ticks.Microseconds() < min_runtime || run_idx == 0) {
+ min_runtime = acc_ticks.Microseconds();
+ }
+ avg_runtime += acc_ticks.Microseconds();
+
+ rewind(source_file_);
+ }
+ ASSERT_EQ(0, fclose(deflickerFile));
+ // TODO(kjellander): Add verification of deflicker output file.
+
+ printf("\nAverage run time = %d us / frame\n",
+ static_cast<int>(avg_runtime / frameNum / NumRuns));
+ printf("Min run time = %d us / frame\n\n",
+ static_cast<int>(min_runtime / frameNum));
+}
+
+} // namespace webrtc
diff --git a/webrtc/modules/video_processing/test/denoiser_test.cc b/webrtc/modules/video_processing/test/denoiser_test.cc
new file mode 100644
index 0000000000..551a77617d
--- /dev/null
+++ b/webrtc/modules/video_processing/test/denoiser_test.cc
@@ -0,0 +1,156 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <string.h>
+
+#include "webrtc/common_video/libyuv/include/webrtc_libyuv.h"
+#include "webrtc/modules/video_processing/include/video_processing.h"
+#include "webrtc/modules/video_processing/test/video_processing_unittest.h"
+#include "webrtc/modules/video_processing/video_denoiser.h"
+
+namespace webrtc {
+
+TEST_F(VideoProcessingTest, CopyMem) {
+ rtc::scoped_ptr<DenoiserFilter> df_c(DenoiserFilter::Create(false));
+ rtc::scoped_ptr<DenoiserFilter> df_sse_neon(DenoiserFilter::Create(true));
+ uint8_t src[16 * 16], dst[16 * 16];
+ for (int i = 0; i < 16; ++i) {
+ for (int j = 0; j < 16; ++j) {
+ src[i * 16 + j] = i * 16 + j;
+ }
+ }
+
+ memset(dst, 0, 8 * 8);
+ df_c->CopyMem8x8(src, 8, dst, 8);
+ EXPECT_EQ(0, memcmp(src, dst, 8 * 8));
+
+ memset(dst, 0, 16 * 16);
+ df_c->CopyMem16x16(src, 16, dst, 16);
+ EXPECT_EQ(0, memcmp(src, dst, 16 * 16));
+
+ memset(dst, 0, 8 * 8);
+ df_sse_neon->CopyMem16x16(src, 8, dst, 8);
+ EXPECT_EQ(0, memcmp(src, dst, 8 * 8));
+
+ memset(dst, 0, 16 * 16);
+ df_sse_neon->CopyMem16x16(src, 16, dst, 16);
+ EXPECT_EQ(0, memcmp(src, dst, 16 * 16));
+}
+
+TEST_F(VideoProcessingTest, Variance) {
+ rtc::scoped_ptr<DenoiserFilter> df_c(DenoiserFilter::Create(false));
+ rtc::scoped_ptr<DenoiserFilter> df_sse_neon(DenoiserFilter::Create(true));
+ uint8_t src[16 * 16], dst[16 * 16];
+ uint32_t sum = 0, sse = 0, var;
+ for (int i = 0; i < 16; ++i) {
+ for (int j = 0; j < 16; ++j) {
+ src[i * 16 + j] = i * 16 + j;
+ }
+ }
+ // Compute the 16x8 variance of the 16x16 block.
+ for (int i = 0; i < 8; ++i) {
+ for (int j = 0; j < 16; ++j) {
+ sum += (i * 32 + j);
+ sse += (i * 32 + j) * (i * 32 + j);
+ }
+ }
+ var = sse - ((sum * sum) >> 7);
+ memset(dst, 0, 16 * 16);
+ EXPECT_EQ(var, df_c->Variance16x8(src, 16, dst, 16, &sse));
+ EXPECT_EQ(var, df_sse_neon->Variance16x8(src, 16, dst, 16, &sse));
+}
+
+TEST_F(VideoProcessingTest, MbDenoise) {
+ rtc::scoped_ptr<DenoiserFilter> df_c(DenoiserFilter::Create(false));
+ rtc::scoped_ptr<DenoiserFilter> df_sse_neon(DenoiserFilter::Create(true));
+ uint8_t running_src[16 * 16], src[16 * 16], dst[16 * 16], dst_ref[16 * 16];
+
+ // Test case: |diff| <= |3 + shift_inc1|
+ for (int i = 0; i < 16; ++i) {
+ for (int j = 0; j < 16; ++j) {
+ running_src[i * 16 + j] = i * 11 + j;
+ src[i * 16 + j] = i * 11 + j + 2;
+ dst_ref[i * 16 + j] = running_src[i * 16 + j];
+ }
+ }
+ memset(dst, 0, 16 * 16);
+ df_c->MbDenoise(running_src, 16, dst, 16, src, 16, 0, 1);
+ EXPECT_EQ(0, memcmp(dst, dst_ref, 16 * 16));
+
+ // Test case: |diff| >= |4 + shift_inc1|
+ for (int i = 0; i < 16; ++i) {
+ for (int j = 0; j < 16; ++j) {
+ running_src[i * 16 + j] = i * 11 + j;
+ src[i * 16 + j] = i * 11 + j + 5;
+ dst_ref[i * 16 + j] = src[i * 16 + j] - 2;
+ }
+ }
+ memset(dst, 0, 16 * 16);
+ df_c->MbDenoise(running_src, 16, dst, 16, src, 16, 0, 1);
+ EXPECT_EQ(0, memcmp(dst, dst_ref, 16 * 16));
+ memset(dst, 0, 16 * 16);
+ df_sse_neon->MbDenoise(running_src, 16, dst, 16, src, 16, 0, 1);
+ EXPECT_EQ(0, memcmp(dst, dst_ref, 16 * 16));
+
+ // Test case: |diff| >= 8
+ for (int i = 0; i < 16; ++i) {
+ for (int j = 0; j < 16; ++j) {
+ running_src[i * 16 + j] = i * 11 + j;
+ src[i * 16 + j] = i * 11 + j + 8;
+ dst_ref[i * 16 + j] = src[i * 16 + j] - 6;
+ }
+ }
+ memset(dst, 0, 16 * 16);
+ df_c->MbDenoise(running_src, 16, dst, 16, src, 16, 0, 1);
+ EXPECT_EQ(0, memcmp(dst, dst_ref, 16 * 16));
+ memset(dst, 0, 16 * 16);
+ df_sse_neon->MbDenoise(running_src, 16, dst, 16, src, 16, 0, 1);
+ EXPECT_EQ(0, memcmp(dst, dst_ref, 16 * 16));
+
+ // Test case: |diff| > 15
+ for (int i = 0; i < 16; ++i) {
+ for (int j = 0; j < 16; ++j) {
+ running_src[i * 16 + j] = i * 11 + j;
+ src[i * 16 + j] = i * 11 + j + 16;
+ }
+ }
+ memset(dst, 0, 16 * 16);
+ DenoiserDecision decision =
+ df_c->MbDenoise(running_src, 16, dst, 16, src, 16, 0, 1);
+ EXPECT_EQ(COPY_BLOCK, decision);
+ decision = df_sse_neon->MbDenoise(running_src, 16, dst, 16, src, 16, 0, 1);
+ EXPECT_EQ(COPY_BLOCK, decision);
+}
+
+TEST_F(VideoProcessingTest, Denoiser) {
+ // Create pure C denoiser.
+ VideoDenoiser denoiser_c(false);
+ // Create SSE or NEON denoiser.
+ VideoDenoiser denoiser_sse_neon(true);
+ VideoFrame denoised_frame_c;
+ VideoFrame denoised_frame_sse_neon;
+
+ rtc::scoped_ptr<uint8_t[]> video_buffer(new uint8_t[frame_length_]);
+ while (fread(video_buffer.get(), 1, frame_length_, source_file_) ==
+ frame_length_) {
+ // Using ConvertToI420 to add stride to the image.
+ EXPECT_EQ(0, ConvertToI420(kI420, video_buffer.get(), 0, 0, width_, height_,
+ 0, kVideoRotation_0, &video_frame_));
+
+ denoiser_c.DenoiseFrame(video_frame_, &denoised_frame_c);
+ denoiser_sse_neon.DenoiseFrame(video_frame_, &denoised_frame_sse_neon);
+
+ // Denoising results should be the same for C and SSE/NEON denoiser.
+ ASSERT_EQ(true, denoised_frame_c.EqualsFrame(denoised_frame_sse_neon));
+ }
+ ASSERT_NE(0, feof(source_file_)) << "Error reading source file";
+}
+
+} // namespace webrtc
diff --git a/webrtc/modules/video_processing/main/test/unit_test/readYUV420file.m b/webrtc/modules/video_processing/test/readYUV420file.m
index 03013efd3a..f409820283 100644
--- a/webrtc/modules/video_processing/main/test/unit_test/readYUV420file.m
+++ b/webrtc/modules/video_processing/test/readYUV420file.m
@@ -10,7 +10,7 @@ end
nPx=width*height;
% nPx bytes luminance, nPx/4 bytes U, nPx/4 bytes V
-frameSizeBytes = nPx*1.5;
+frameSizeBytes = nPx*1.5;
% calculate number of frames
fseek(fid,0,'eof'); % move to end of file
@@ -27,19 +27,19 @@ V=uint8(zeros(height/2,width/2,numFrames));
[X,nBytes]=fread(fid, frameSizeBytes, 'uchar');
for k=1:numFrames
-
+
% Store luminance
Y(:,:,k)=uint8(reshape(X(1:nPx), width, height).');
-
+
% Store U channel
U(:,:,k)=uint8(reshape(X(nPx + (1:nPx/4)), width/2, height/2).');
% Store V channel
V(:,:,k)=uint8(reshape(X(nPx + nPx/4 + (1:nPx/4)), width/2, height/2).');
-
+
% Read next frame
[X,nBytes]=fread(fid, frameSizeBytes, 'uchar');
end
-
+
fclose(fid);
diff --git a/webrtc/modules/video_processing/main/test/unit_test/video_processing_unittest.cc b/webrtc/modules/video_processing/test/video_processing_unittest.cc
index 11ccc4891b..2fd8fb6673 100644
--- a/webrtc/modules/video_processing/main/test/unit_test/video_processing_unittest.cc
+++ b/webrtc/modules/video_processing/test/video_processing_unittest.cc
@@ -8,15 +8,15 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#include "webrtc/modules/video_processing/main/test/unit_test/video_processing_unittest.h"
+#include "webrtc/modules/video_processing/test/video_processing_unittest.h"
+
+#include <gflags/gflags.h>
#include <string>
-#include <gflags/gflags.h>
#include "webrtc/common_video/libyuv/include/webrtc_libyuv.h"
#include "webrtc/system_wrappers/include/tick_util.h"
#include "webrtc/test/testsupport/fileutils.h"
-#include "webrtc/test/testsupport/gtest_disable.h"
namespace webrtc {
@@ -30,8 +30,8 @@ DEFINE_bool(gen_files, false, "Output files for visual inspection.");
static void PreprocessFrameAndVerify(const VideoFrame& source,
int target_width,
int target_height,
- VideoProcessingModule* vpm,
- VideoFrame** out_frame);
+ VideoProcessing* vpm,
+ const VideoFrame* out_frame);
static void CropFrame(const uint8_t* source_data,
int source_width,
int source_height,
@@ -49,14 +49,14 @@ static void TestSize(const VideoFrame& source_frame,
int target_width,
int target_height,
double expected_psnr,
- VideoProcessingModule* vpm);
+ VideoProcessing* vpm);
static bool CompareFrames(const webrtc::VideoFrame& frame1,
const webrtc::VideoFrame& frame2);
static void WriteProcessedFrameForVisualInspection(const VideoFrame& source,
const VideoFrame& processed);
-VideoProcessingModuleTest::VideoProcessingModuleTest()
- : vpm_(NULL),
+VideoProcessingTest::VideoProcessingTest()
+ : vp_(NULL),
source_file_(NULL),
width_(352),
half_width_((width_ + 1) / 2),
@@ -65,155 +65,179 @@ VideoProcessingModuleTest::VideoProcessingModuleTest()
size_uv_(half_width_ * ((height_ + 1) / 2)),
frame_length_(CalcBufferSize(kI420, width_, height_)) {}
-void VideoProcessingModuleTest::SetUp() {
- vpm_ = VideoProcessingModule::Create();
- ASSERT_TRUE(vpm_ != NULL);
+void VideoProcessingTest::SetUp() {
+ vp_ = VideoProcessing::Create();
+ ASSERT_TRUE(vp_ != NULL);
ASSERT_EQ(0, video_frame_.CreateEmptyFrame(width_, height_, width_,
- half_width_, half_width_));
+ half_width_, half_width_));
// Clear video frame so DrMemory/Valgrind will allow reads of the buffer.
memset(video_frame_.buffer(kYPlane), 0, video_frame_.allocated_size(kYPlane));
memset(video_frame_.buffer(kUPlane), 0, video_frame_.allocated_size(kUPlane));
memset(video_frame_.buffer(kVPlane), 0, video_frame_.allocated_size(kVPlane));
const std::string video_file =
webrtc::test::ResourcePath("foreman_cif", "yuv");
- source_file_ = fopen(video_file.c_str(),"rb");
- ASSERT_TRUE(source_file_ != NULL) <<
- "Cannot read source file: " + video_file + "\n";
+ source_file_ = fopen(video_file.c_str(), "rb");
+ ASSERT_TRUE(source_file_ != NULL)
+ << "Cannot read source file: " + video_file + "\n";
}
-void VideoProcessingModuleTest::TearDown() {
- if (source_file_ != NULL) {
+void VideoProcessingTest::TearDown() {
+ if (source_file_ != NULL) {
ASSERT_EQ(0, fclose(source_file_));
}
source_file_ = NULL;
-
- if (vpm_ != NULL) {
- VideoProcessingModule::Destroy(vpm_);
- }
- vpm_ = NULL;
+ delete vp_;
+ vp_ = NULL;
}
-TEST_F(VideoProcessingModuleTest, DISABLED_ON_IOS(HandleNullBuffer)) {
+#if defined(WEBRTC_IOS)
+TEST_F(VideoProcessingTest, DISABLED_HandleNullBuffer) {
+#else
+TEST_F(VideoProcessingTest, HandleNullBuffer) {
+#endif
// TODO(mikhal/stefan): Do we need this one?
- VideoProcessingModule::FrameStats stats;
+ VideoProcessing::FrameStats stats;
// Video frame with unallocated buffer.
VideoFrame videoFrame;
- EXPECT_EQ(-3, vpm_->GetFrameStats(&stats, videoFrame));
+ vp_->GetFrameStats(videoFrame, &stats);
+ EXPECT_EQ(stats.num_pixels, 0u);
- EXPECT_EQ(-1, vpm_->Deflickering(&videoFrame, &stats));
+ EXPECT_EQ(-1, vp_->Deflickering(&videoFrame, &stats));
- EXPECT_EQ(-3, vpm_->BrightnessDetection(videoFrame, stats));
+ EXPECT_EQ(-3, vp_->BrightnessDetection(videoFrame, stats));
}
-TEST_F(VideoProcessingModuleTest, DISABLED_ON_IOS(HandleBadStats)) {
- VideoProcessingModule::FrameStats stats;
+#if defined(WEBRTC_IOS)
+TEST_F(VideoProcessingTest, DISABLED_HandleBadStats) {
+#else
+TEST_F(VideoProcessingTest, HandleBadStats) {
+#endif
+ VideoProcessing::FrameStats stats;
+ vp_->ClearFrameStats(&stats);
rtc::scoped_ptr<uint8_t[]> video_buffer(new uint8_t[frame_length_]);
- ASSERT_EQ(frame_length_, fread(video_buffer.get(), 1, frame_length_,
- source_file_));
+ ASSERT_EQ(frame_length_,
+ fread(video_buffer.get(), 1, frame_length_, source_file_));
EXPECT_EQ(0, ConvertToI420(kI420, video_buffer.get(), 0, 0, width_, height_,
0, kVideoRotation_0, &video_frame_));
- EXPECT_EQ(-1, vpm_->Deflickering(&video_frame_, &stats));
+ EXPECT_EQ(-1, vp_->Deflickering(&video_frame_, &stats));
- EXPECT_EQ(-3, vpm_->BrightnessDetection(video_frame_, stats));
+ EXPECT_EQ(-3, vp_->BrightnessDetection(video_frame_, stats));
}
-TEST_F(VideoProcessingModuleTest, DISABLED_ON_IOS(IdenticalResultsAfterReset)) {
+#if defined(WEBRTC_IOS)
+TEST_F(VideoProcessingTest, DISABLED_IdenticalResultsAfterReset) {
+#else
+TEST_F(VideoProcessingTest, IdenticalResultsAfterReset) {
+#endif
VideoFrame video_frame2;
- VideoProcessingModule::FrameStats stats;
+ VideoProcessing::FrameStats stats;
// Only testing non-static functions here.
rtc::scoped_ptr<uint8_t[]> video_buffer(new uint8_t[frame_length_]);
- ASSERT_EQ(frame_length_, fread(video_buffer.get(), 1, frame_length_,
- source_file_));
+ ASSERT_EQ(frame_length_,
+ fread(video_buffer.get(), 1, frame_length_, source_file_));
EXPECT_EQ(0, ConvertToI420(kI420, video_buffer.get(), 0, 0, width_, height_,
0, kVideoRotation_0, &video_frame_));
- ASSERT_EQ(0, vpm_->GetFrameStats(&stats, video_frame_));
+ vp_->GetFrameStats(video_frame_, &stats);
+ EXPECT_GT(stats.num_pixels, 0u);
ASSERT_EQ(0, video_frame2.CopyFrame(video_frame_));
- ASSERT_EQ(0, vpm_->Deflickering(&video_frame_, &stats));
- vpm_->Reset();
+ ASSERT_EQ(0, vp_->Deflickering(&video_frame_, &stats));
+
// Retrieve frame stats again in case Deflickering() has zeroed them.
- ASSERT_EQ(0, vpm_->GetFrameStats(&stats, video_frame2));
- ASSERT_EQ(0, vpm_->Deflickering(&video_frame2, &stats));
+ vp_->GetFrameStats(video_frame2, &stats);
+ EXPECT_GT(stats.num_pixels, 0u);
+ ASSERT_EQ(0, vp_->Deflickering(&video_frame2, &stats));
EXPECT_TRUE(CompareFrames(video_frame_, video_frame2));
- ASSERT_EQ(frame_length_, fread(video_buffer.get(), 1, frame_length_,
- source_file_));
+ ASSERT_EQ(frame_length_,
+ fread(video_buffer.get(), 1, frame_length_, source_file_));
EXPECT_EQ(0, ConvertToI420(kI420, video_buffer.get(), 0, 0, width_, height_,
0, kVideoRotation_0, &video_frame_));
- ASSERT_EQ(0, vpm_->GetFrameStats(&stats, video_frame_));
+ vp_->GetFrameStats(video_frame_, &stats);
+ EXPECT_GT(stats.num_pixels, 0u);
video_frame2.CopyFrame(video_frame_);
- ASSERT_EQ(0, vpm_->BrightnessDetection(video_frame_, stats));
- vpm_->Reset();
- ASSERT_EQ(0, vpm_->BrightnessDetection(video_frame2, stats));
+ ASSERT_EQ(0, vp_->BrightnessDetection(video_frame_, stats));
+
+ ASSERT_EQ(0, vp_->BrightnessDetection(video_frame2, stats));
EXPECT_TRUE(CompareFrames(video_frame_, video_frame2));
}
-TEST_F(VideoProcessingModuleTest, DISABLED_ON_IOS(FrameStats)) {
- VideoProcessingModule::FrameStats stats;
+#if defined(WEBRTC_IOS)
+TEST_F(VideoProcessingTest, DISABLED_FrameStats) {
+#else
+TEST_F(VideoProcessingTest, FrameStats) {
+#endif
+ VideoProcessing::FrameStats stats;
+ vp_->ClearFrameStats(&stats);
rtc::scoped_ptr<uint8_t[]> video_buffer(new uint8_t[frame_length_]);
- ASSERT_EQ(frame_length_, fread(video_buffer.get(), 1, frame_length_,
- source_file_));
+ ASSERT_EQ(frame_length_,
+ fread(video_buffer.get(), 1, frame_length_, source_file_));
EXPECT_EQ(0, ConvertToI420(kI420, video_buffer.get(), 0, 0, width_, height_,
0, kVideoRotation_0, &video_frame_));
- EXPECT_FALSE(vpm_->ValidFrameStats(stats));
- EXPECT_EQ(0, vpm_->GetFrameStats(&stats, video_frame_));
- EXPECT_TRUE(vpm_->ValidFrameStats(stats));
+ EXPECT_FALSE(vp_->ValidFrameStats(stats));
+ vp_->GetFrameStats(video_frame_, &stats);
+ EXPECT_GT(stats.num_pixels, 0u);
+ EXPECT_TRUE(vp_->ValidFrameStats(stats));
printf("\nFrameStats\n");
- printf("mean: %u\nnum_pixels: %u\nsubSamplWidth: "
- "%u\nsumSamplHeight: %u\nsum: %u\n\n",
+ printf("mean: %u\nnum_pixels: %u\nsubSamplFactor: %u\nsum: %u\n\n",
static_cast<unsigned int>(stats.mean),
static_cast<unsigned int>(stats.num_pixels),
- static_cast<unsigned int>(stats.subSamplHeight),
- static_cast<unsigned int>(stats.subSamplWidth),
+ static_cast<unsigned int>(stats.sub_sampling_factor),
static_cast<unsigned int>(stats.sum));
- vpm_->ClearFrameStats(&stats);
- EXPECT_FALSE(vpm_->ValidFrameStats(stats));
+ vp_->ClearFrameStats(&stats);
+ EXPECT_FALSE(vp_->ValidFrameStats(stats));
}
-TEST_F(VideoProcessingModuleTest, DISABLED_ON_IOS(PreprocessorLogic)) {
+#if defined(WEBRTC_IOS)
+TEST_F(VideoProcessingTest, DISABLED_PreprocessorLogic) {
+#else
+TEST_F(VideoProcessingTest, PreprocessorLogic) {
+#endif
// Disable temporal sampling (frame dropping).
- vpm_->EnableTemporalDecimation(false);
+ vp_->EnableTemporalDecimation(false);
int resolution = 100;
- EXPECT_EQ(VPM_OK, vpm_->SetTargetResolution(resolution, resolution, 15));
- EXPECT_EQ(VPM_OK, vpm_->SetTargetResolution(resolution, resolution, 30));
+ EXPECT_EQ(VPM_OK, vp_->SetTargetResolution(resolution, resolution, 15));
+ EXPECT_EQ(VPM_OK, vp_->SetTargetResolution(resolution, resolution, 30));
// Disable spatial sampling.
- vpm_->SetInputFrameResampleMode(kNoRescaling);
- EXPECT_EQ(VPM_OK, vpm_->SetTargetResolution(resolution, resolution, 30));
+ vp_->SetInputFrameResampleMode(kNoRescaling);
+ EXPECT_EQ(VPM_OK, vp_->SetTargetResolution(resolution, resolution, 30));
VideoFrame* out_frame = NULL;
// Set rescaling => output frame != NULL.
- vpm_->SetInputFrameResampleMode(kFastRescaling);
- PreprocessFrameAndVerify(video_frame_, resolution, resolution, vpm_,
- &out_frame);
+ vp_->SetInputFrameResampleMode(kFastRescaling);
+ PreprocessFrameAndVerify(video_frame_, resolution, resolution, vp_,
+ out_frame);
// No rescaling=> output frame = NULL.
- vpm_->SetInputFrameResampleMode(kNoRescaling);
- EXPECT_EQ(VPM_OK, vpm_->PreprocessFrame(video_frame_, &out_frame));
- EXPECT_TRUE(out_frame == NULL);
+ vp_->SetInputFrameResampleMode(kNoRescaling);
+ EXPECT_TRUE(vp_->PreprocessFrame(video_frame_) != nullptr);
}
-TEST_F(VideoProcessingModuleTest, DISABLED_ON_IOS(Resampler)) {
+#if defined(WEBRTC_IOS)
+TEST_F(VideoProcessingTest, DISABLED_Resampler) {
+#else
+TEST_F(VideoProcessingTest, Resampler) {
+#endif
enum { NumRuns = 1 };
int64_t min_runtime = 0;
int64_t total_runtime = 0;
rewind(source_file_);
- ASSERT_TRUE(source_file_ != NULL) <<
- "Cannot read input file \n";
+ ASSERT_TRUE(source_file_ != NULL) << "Cannot read input file \n";
// CA not needed here
- vpm_->EnableContentAnalysis(false);
+ vp_->EnableContentAnalysis(false);
// no temporal decimation
- vpm_->EnableTemporalDecimation(false);
+ vp_->EnableTemporalDecimation(false);
// Reading test frame
rtc::scoped_ptr<uint8_t[]> video_buffer(new uint8_t[frame_length_]);
- ASSERT_EQ(frame_length_, fread(video_buffer.get(), 1, frame_length_,
- source_file_));
+ ASSERT_EQ(frame_length_,
+ fread(video_buffer.get(), 1, frame_length_, source_file_));
// Using ConvertToI420 to add stride to the image.
EXPECT_EQ(0, ConvertToI420(kI420, video_buffer.get(), 0, 0, width_, height_,
0, kVideoRotation_0, &video_frame_));
@@ -231,43 +255,43 @@ TEST_F(VideoProcessingModuleTest, DISABLED_ON_IOS(Resampler)) {
// Test scaling to different sizes: source is of |width|/|height| = 352/288.
// Pure scaling:
- TestSize(video_frame_, video_frame_, width_ / 4, height_ / 4, 25.2, vpm_);
- TestSize(video_frame_, video_frame_, width_ / 2, height_ / 2, 28.1, vpm_);
+ TestSize(video_frame_, video_frame_, width_ / 4, height_ / 4, 25.2, vp_);
+ TestSize(video_frame_, video_frame_, width_ / 2, height_ / 2, 28.1, vp_);
// No resampling:
- TestSize(video_frame_, video_frame_, width_, height_, -1, vpm_);
- TestSize(video_frame_, video_frame_, 2 * width_, 2 * height_, 32.2, vpm_);
+ TestSize(video_frame_, video_frame_, width_, height_, -1, vp_);
+ TestSize(video_frame_, video_frame_, 2 * width_, 2 * height_, 32.2, vp_);
// Scaling and cropping. The cropped source frame is the largest center
// aligned region that can be used from the source while preserving aspect
// ratio.
CropFrame(video_buffer.get(), width_, height_, 0, 56, 352, 176,
&cropped_source_frame);
- TestSize(video_frame_, cropped_source_frame, 100, 50, 24.0, vpm_);
+ TestSize(video_frame_, cropped_source_frame, 100, 50, 24.0, vp_);
CropFrame(video_buffer.get(), width_, height_, 0, 30, 352, 225,
&cropped_source_frame);
- TestSize(video_frame_, cropped_source_frame, 400, 256, 31.3, vpm_);
+ TestSize(video_frame_, cropped_source_frame, 400, 256, 31.3, vp_);
CropFrame(video_buffer.get(), width_, height_, 68, 0, 216, 288,
&cropped_source_frame);
- TestSize(video_frame_, cropped_source_frame, 480, 640, 32.15, vpm_);
+ TestSize(video_frame_, cropped_source_frame, 480, 640, 32.15, vp_);
CropFrame(video_buffer.get(), width_, height_, 0, 12, 352, 264,
&cropped_source_frame);
- TestSize(video_frame_, cropped_source_frame, 960, 720, 32.2, vpm_);
+ TestSize(video_frame_, cropped_source_frame, 960, 720, 32.2, vp_);
CropFrame(video_buffer.get(), width_, height_, 0, 44, 352, 198,
&cropped_source_frame);
- TestSize(video_frame_, cropped_source_frame, 1280, 720, 32.15, vpm_);
+ TestSize(video_frame_, cropped_source_frame, 1280, 720, 32.15, vp_);
// Upsampling to odd size.
CropFrame(video_buffer.get(), width_, height_, 0, 26, 352, 233,
&cropped_source_frame);
- TestSize(video_frame_, cropped_source_frame, 501, 333, 32.05, vpm_);
+ TestSize(video_frame_, cropped_source_frame, 501, 333, 32.05, vp_);
// Downsample to odd size.
CropFrame(video_buffer.get(), width_, height_, 0, 34, 352, 219,
&cropped_source_frame);
- TestSize(video_frame_, cropped_source_frame, 281, 175, 29.3, vpm_);
+ TestSize(video_frame_, cropped_source_frame, 281, 175, 29.3, vp_);
// Stop timer.
const int64_t runtime = (TickTime::Now() - time_start).Microseconds();
@@ -279,30 +303,30 @@ TEST_F(VideoProcessingModuleTest, DISABLED_ON_IOS(Resampler)) {
printf("\nAverage run time = %d us / frame\n",
static_cast<int>(total_runtime));
- printf("Min run time = %d us / frame\n\n",
- static_cast<int>(min_runtime));
+ printf("Min run time = %d us / frame\n\n", static_cast<int>(min_runtime));
}
void PreprocessFrameAndVerify(const VideoFrame& source,
int target_width,
int target_height,
- VideoProcessingModule* vpm,
- VideoFrame** out_frame) {
+ VideoProcessing* vpm,
+ const VideoFrame* out_frame) {
ASSERT_EQ(VPM_OK, vpm->SetTargetResolution(target_width, target_height, 30));
- ASSERT_EQ(VPM_OK, vpm->PreprocessFrame(source, out_frame));
+ out_frame = vpm->PreprocessFrame(source);
+ EXPECT_TRUE(out_frame != nullptr);
- // If no resizing is needed, expect NULL.
+ // If no resizing is needed, expect the original frame.
if (target_width == source.width() && target_height == source.height()) {
- EXPECT_EQ(NULL, *out_frame);
+ EXPECT_EQ(&source, out_frame);
return;
}
// Verify the resampled frame.
- EXPECT_TRUE(*out_frame != NULL);
- EXPECT_EQ(source.render_time_ms(), (*out_frame)->render_time_ms());
- EXPECT_EQ(source.timestamp(), (*out_frame)->timestamp());
- EXPECT_EQ(target_width, (*out_frame)->width());
- EXPECT_EQ(target_height, (*out_frame)->height());
+ EXPECT_TRUE(out_frame != NULL);
+ EXPECT_EQ(source.render_time_ms(), (out_frame)->render_time_ms());
+ EXPECT_EQ(source.timestamp(), (out_frame)->timestamp());
+ EXPECT_EQ(target_width, (out_frame)->width());
+ EXPECT_EQ(target_height, (out_frame)->height());
}
void CropFrame(const uint8_t* source_data,
@@ -326,12 +350,12 @@ void TestSize(const VideoFrame& source_frame,
int target_width,
int target_height,
double expected_psnr,
- VideoProcessingModule* vpm) {
+ VideoProcessing* vpm) {
// Resample source_frame to out_frame.
VideoFrame* out_frame = NULL;
vpm->SetInputFrameResampleMode(kBox);
PreprocessFrameAndVerify(source_frame, target_width, target_height, vpm,
- &out_frame);
+ out_frame);
if (out_frame == NULL)
return;
WriteProcessedFrameForVisualInspection(source_frame, *out_frame);
@@ -340,21 +364,22 @@ void TestSize(const VideoFrame& source_frame,
VideoFrame resampled_source_frame;
resampled_source_frame.CopyFrame(*out_frame);
PreprocessFrameAndVerify(resampled_source_frame, cropped_source_frame.width(),
- cropped_source_frame.height(), vpm, &out_frame);
+ cropped_source_frame.height(), vpm, out_frame);
WriteProcessedFrameForVisualInspection(resampled_source_frame, *out_frame);
// Compute PSNR against the cropped source frame and check expectation.
double psnr = I420PSNR(&cropped_source_frame, out_frame);
EXPECT_GT(psnr, expected_psnr);
- printf("PSNR: %f. PSNR is between source of size %d %d, and a modified "
- "source which is scaled down/up to: %d %d, and back to source size \n",
- psnr, source_frame.width(), source_frame.height(),
- target_width, target_height);
+ printf(
+ "PSNR: %f. PSNR is between source of size %d %d, and a modified "
+ "source which is scaled down/up to: %d %d, and back to source size \n",
+ psnr, source_frame.width(), source_frame.height(), target_width,
+ target_height);
}
bool CompareFrames(const webrtc::VideoFrame& frame1,
const webrtc::VideoFrame& frame2) {
- for (int plane = 0; plane < webrtc::kNumOfPlanes; plane ++) {
+ for (int plane = 0; plane < webrtc::kNumOfPlanes; plane++) {
webrtc::PlaneType plane_type = static_cast<webrtc::PlaneType>(plane);
int allocated_size1 = frame1.allocated_size(plane_type);
int allocated_size2 = frame2.allocated_size(plane_type);
diff --git a/webrtc/modules/video_processing/main/test/unit_test/video_processing_unittest.h b/webrtc/modules/video_processing/test/video_processing_unittest.h
index 4a4fda41e6..3433c6ca86 100644
--- a/webrtc/modules/video_processing/main/test/unit_test/video_processing_unittest.h
+++ b/webrtc/modules/video_processing/test/video_processing_unittest.h
@@ -8,19 +8,21 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#ifndef WEBRTC_MODULES_VIDEO_PROCESSING_MAIN_TEST_UNIT_TEST_VIDEO_PROCESSING_UNITTEST_H
-#define WEBRTC_MODULES_VIDEO_PROCESSING_MAIN_TEST_UNIT_TEST_VIDEO_PROCESSING_UNITTEST_H
+#ifndef WEBRTC_MODULES_VIDEO_PROCESSING_TEST_VIDEO_PROCESSING_UNITTEST_H_
+#define WEBRTC_MODULES_VIDEO_PROCESSING_TEST_VIDEO_PROCESSING_UNITTEST_H_
+
+#include <string>
#include "testing/gtest/include/gtest/gtest.h"
-#include "webrtc/modules/video_processing/main/interface/video_processing.h"
+#include "webrtc/modules/video_processing/include/video_processing.h"
#include "webrtc/system_wrappers/include/trace.h"
#include "webrtc/test/testsupport/fileutils.h"
namespace webrtc {
-class VideoProcessingModuleTest : public ::testing::Test {
+class VideoProcessingTest : public ::testing::Test {
protected:
- VideoProcessingModuleTest();
+ VideoProcessingTest();
virtual void SetUp();
virtual void TearDown();
static void SetUpTestCase() {
@@ -28,10 +30,8 @@ class VideoProcessingModuleTest : public ::testing::Test {
std::string trace_file = webrtc::test::OutputPath() + "VPMTrace.txt";
ASSERT_EQ(0, Trace::SetTraceFile(trace_file.c_str()));
}
- static void TearDownTestCase() {
- Trace::ReturnTrace();
- }
- VideoProcessingModule* vpm_;
+ static void TearDownTestCase() { Trace::ReturnTrace(); }
+ VideoProcessing* vp_;
FILE* source_file_;
VideoFrame video_frame_;
const int width_;
@@ -44,4 +44,4 @@ class VideoProcessingModuleTest : public ::testing::Test {
} // namespace webrtc
-#endif // WEBRTC_MODULES_VIDEO_PROCESSING_MAIN_TEST_UNIT_TEST_VIDEO_PROCESSING_UNITTEST_H
+#endif // WEBRTC_MODULES_VIDEO_PROCESSING_TEST_VIDEO_PROCESSING_UNITTEST_H_
diff --git a/webrtc/modules/video_processing/main/test/unit_test/writeYUV420file.m b/webrtc/modules/video_processing/test/writeYUV420file.m
index 69a8808338..359445009b 100644
--- a/webrtc/modules/video_processing/main/test/unit_test/writeYUV420file.m
+++ b/webrtc/modules/video_processing/test/writeYUV420file.m
@@ -11,10 +11,10 @@ numFrames=size(Y,3);
for k=1:numFrames
% Write luminance
fwrite(fid,uint8(Y(:,:,k).'), 'uchar');
-
+
% Write U channel
fwrite(fid,uint8(U(:,:,k).'), 'uchar');
-
+
% Write V channel
fwrite(fid,uint8(V(:,:,k).'), 'uchar');
end
diff --git a/webrtc/modules/video_processing/util/denoiser_filter.cc b/webrtc/modules/video_processing/util/denoiser_filter.cc
new file mode 100644
index 0000000000..fbc2435cb5
--- /dev/null
+++ b/webrtc/modules/video_processing/util/denoiser_filter.cc
@@ -0,0 +1,54 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "webrtc/base/checks.h"
+#include "webrtc/modules/video_processing/util/denoiser_filter.h"
+#include "webrtc/modules/video_processing/util/denoiser_filter_c.h"
+#include "webrtc/modules/video_processing/util/denoiser_filter_neon.h"
+#include "webrtc/modules/video_processing/util/denoiser_filter_sse2.h"
+#include "webrtc/system_wrappers/include/cpu_features_wrapper.h"
+
+namespace webrtc {
+
+const int kMotionMagnitudeThreshold = 8 * 3;
+const int kSumDiffThreshold = 16 * 16 * 2;
+const int kSumDiffThresholdHigh = 600;
+
+rtc::scoped_ptr<DenoiserFilter> DenoiserFilter::Create(
+ bool runtime_cpu_detection) {
+ rtc::scoped_ptr<DenoiserFilter> filter;
+
+ if (runtime_cpu_detection) {
+// If we know the minimum architecture at compile time, avoid CPU detection.
+#if defined(WEBRTC_ARCH_X86_FAMILY)
+ // x86 CPU detection required.
+ if (WebRtc_GetCPUInfo(kSSE2)) {
+ filter.reset(new DenoiserFilterSSE2());
+ } else {
+ filter.reset(new DenoiserFilterC());
+ }
+#elif defined(WEBRTC_DETECT_NEON)
+ if (WebRtc_GetCPUFeaturesARM() & kCPUFeatureNEON) {
+ filter.reset(new DenoiserFilterNEON());
+ } else {
+ filter.reset(new DenoiserFilterC());
+ }
+#else
+ filter.reset(new DenoiserFilterC());
+#endif
+ } else {
+ filter.reset(new DenoiserFilterC());
+ }
+
+ RTC_DCHECK(filter.get() != nullptr);
+ return filter;
+}
+
+} // namespace webrtc
diff --git a/webrtc/modules/video_processing/util/denoiser_filter.h b/webrtc/modules/video_processing/util/denoiser_filter.h
new file mode 100644
index 0000000000..5d5a61c59c
--- /dev/null
+++ b/webrtc/modules/video_processing/util/denoiser_filter.h
@@ -0,0 +1,63 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef WEBRTC_MODULES_VIDEO_PROCESSING_UTIL_DENOISER_FILTER_H_
+#define WEBRTC_MODULES_VIDEO_PROCESSING_UTIL_DENOISER_FILTER_H_
+
+#include "webrtc/base/scoped_ptr.h"
+#include "webrtc/modules/include/module_common_types.h"
+#include "webrtc/modules/video_processing/include/video_processing_defines.h"
+
+namespace webrtc {
+
+extern const int kMotionMagnitudeThreshold;
+extern const int kSumDiffThreshold;
+extern const int kSumDiffThresholdHigh;
+
+enum DenoiserDecision { COPY_BLOCK, FILTER_BLOCK };
+struct DenoiseMetrics {
+ uint32_t var;
+ uint32_t sad;
+ uint8_t denoise;
+ bool is_skin;
+};
+
+class DenoiserFilter {
+ public:
+ static rtc::scoped_ptr<DenoiserFilter> Create(bool runtime_cpu_detection);
+
+ virtual ~DenoiserFilter() {}
+
+ virtual void CopyMem16x16(const uint8_t* src,
+ int src_stride,
+ uint8_t* dst,
+ int dst_stride) = 0;
+ virtual void CopyMem8x8(const uint8_t* src,
+ int src_stride,
+ uint8_t* dst,
+ int dst_stride) = 0;
+ virtual uint32_t Variance16x8(const uint8_t* a,
+ int a_stride,
+ const uint8_t* b,
+ int b_stride,
+ unsigned int* sse) = 0;
+ virtual DenoiserDecision MbDenoise(uint8_t* mc_running_avg_y,
+ int mc_avg_y_stride,
+ uint8_t* running_avg_y,
+ int avg_y_stride,
+ const uint8_t* sig,
+ int sig_stride,
+ uint8_t motion_magnitude,
+ int increase_denoising) = 0;
+};
+
+} // namespace webrtc
+
+#endif // WEBRTC_MODULES_VIDEO_PROCESSING_UTIL_DENOISER_FILTER_H_
diff --git a/webrtc/modules/video_processing/util/denoiser_filter_c.cc b/webrtc/modules/video_processing/util/denoiser_filter_c.cc
new file mode 100644
index 0000000000..6323980e18
--- /dev/null
+++ b/webrtc/modules/video_processing/util/denoiser_filter_c.cc
@@ -0,0 +1,194 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <stdlib.h>
+
+#include "webrtc/modules/video_processing/util/denoiser_filter_c.h"
+
+namespace webrtc {
+
+void DenoiserFilterC::CopyMem16x16(const uint8_t* src,
+ int src_stride,
+ uint8_t* dst,
+ int dst_stride) {
+ for (int i = 0; i < 16; i++) {
+ memcpy(dst, src, 16);
+ src += src_stride;
+ dst += dst_stride;
+ }
+}
+
+void DenoiserFilterC::CopyMem8x8(const uint8_t* src,
+ int src_stride,
+ uint8_t* dst,
+ int dst_stride) {
+ for (int i = 0; i < 8; i++) {
+ memcpy(dst, src, 8);
+ src += src_stride;
+ dst += dst_stride;
+ }
+}
+
+uint32_t DenoiserFilterC::Variance16x8(const uint8_t* a,
+ int a_stride,
+ const uint8_t* b,
+ int b_stride,
+ uint32_t* sse) {
+ int sum = 0;
+ *sse = 0;
+ a_stride <<= 1;
+ b_stride <<= 1;
+
+ for (int i = 0; i < 8; i++) {
+ for (int j = 0; j < 16; j++) {
+ const int diff = a[j] - b[j];
+ sum += diff;
+ *sse += diff * diff;
+ }
+
+ a += a_stride;
+ b += b_stride;
+ }
+ return *sse - ((static_cast<int64_t>(sum) * sum) >> 7);
+}
+
+DenoiserDecision DenoiserFilterC::MbDenoise(uint8_t* mc_running_avg_y,
+ int mc_avg_y_stride,
+ uint8_t* running_avg_y,
+ int avg_y_stride,
+ const uint8_t* sig,
+ int sig_stride,
+ uint8_t motion_magnitude,
+ int increase_denoising) {
+ int sum_diff_thresh = 0;
+ int sum_diff = 0;
+ int adj_val[3] = {3, 4, 6};
+ int shift_inc1 = 0;
+ int shift_inc2 = 1;
+ int col_sum[16] = {0};
+ if (motion_magnitude <= kMotionMagnitudeThreshold) {
+ if (increase_denoising) {
+ shift_inc1 = 1;
+ shift_inc2 = 2;
+ }
+ adj_val[0] += shift_inc2;
+ adj_val[1] += shift_inc2;
+ adj_val[2] += shift_inc2;
+ }
+
+ for (int r = 0; r < 16; ++r) {
+ for (int c = 0; c < 16; ++c) {
+ int diff = 0;
+ int adjustment = 0;
+ int absdiff = 0;
+
+ diff = mc_running_avg_y[c] - sig[c];
+ absdiff = abs(diff);
+
+ // When |diff| <= |3 + shift_inc1|, use pixel value from
+ // last denoised raw.
+ if (absdiff <= 3 + shift_inc1) {
+ running_avg_y[c] = mc_running_avg_y[c];
+ col_sum[c] += diff;
+ } else {
+ if (absdiff >= 4 + shift_inc1 && absdiff <= 7)
+ adjustment = adj_val[0];
+ else if (absdiff >= 8 && absdiff <= 15)
+ adjustment = adj_val[1];
+ else
+ adjustment = adj_val[2];
+
+ if (diff > 0) {
+ if ((sig[c] + adjustment) > 255)
+ running_avg_y[c] = 255;
+ else
+ running_avg_y[c] = sig[c] + adjustment;
+
+ col_sum[c] += adjustment;
+ } else {
+ if ((sig[c] - adjustment) < 0)
+ running_avg_y[c] = 0;
+ else
+ running_avg_y[c] = sig[c] - adjustment;
+
+ col_sum[c] -= adjustment;
+ }
+ }
+ }
+
+ // Update pointers for next iteration.
+ sig += sig_stride;
+ mc_running_avg_y += mc_avg_y_stride;
+ running_avg_y += avg_y_stride;
+ }
+
+ for (int c = 0; c < 16; ++c) {
+ if (col_sum[c] >= 128) {
+ col_sum[c] = 127;
+ }
+ sum_diff += col_sum[c];
+ }
+
+ sum_diff_thresh = kSumDiffThreshold;
+ if (increase_denoising)
+ sum_diff_thresh = kSumDiffThresholdHigh;
+ if (abs(sum_diff) > sum_diff_thresh) {
+ int delta = ((abs(sum_diff) - sum_diff_thresh) >> 8) + 1;
+ // Only apply the adjustment for max delta up to 3.
+ if (delta < 4) {
+ sig -= sig_stride * 16;
+ mc_running_avg_y -= mc_avg_y_stride * 16;
+ running_avg_y -= avg_y_stride * 16;
+ for (int r = 0; r < 16; ++r) {
+ for (int c = 0; c < 16; ++c) {
+ int diff = mc_running_avg_y[c] - sig[c];
+ int adjustment = abs(diff);
+ if (adjustment > delta)
+ adjustment = delta;
+ if (diff > 0) {
+ // Bring denoised signal down.
+ if (running_avg_y[c] - adjustment < 0)
+ running_avg_y[c] = 0;
+ else
+ running_avg_y[c] = running_avg_y[c] - adjustment;
+ col_sum[c] -= adjustment;
+ } else if (diff < 0) {
+ // Bring denoised signal up.
+ if (running_avg_y[c] + adjustment > 255)
+ running_avg_y[c] = 255;
+ else
+ running_avg_y[c] = running_avg_y[c] + adjustment;
+ col_sum[c] += adjustment;
+ }
+ }
+ sig += sig_stride;
+ mc_running_avg_y += mc_avg_y_stride;
+ running_avg_y += avg_y_stride;
+ }
+
+ sum_diff = 0;
+ for (int c = 0; c < 16; ++c) {
+ if (col_sum[c] >= 128) {
+ col_sum[c] = 127;
+ }
+ sum_diff += col_sum[c];
+ }
+
+ if (abs(sum_diff) > sum_diff_thresh)
+ return COPY_BLOCK;
+ } else {
+ return COPY_BLOCK;
+ }
+ }
+
+ return FILTER_BLOCK;
+}
+
+} // namespace webrtc
diff --git a/webrtc/modules/video_processing/util/denoiser_filter_c.h b/webrtc/modules/video_processing/util/denoiser_filter_c.h
new file mode 100644
index 0000000000..fe46ac38ec
--- /dev/null
+++ b/webrtc/modules/video_processing/util/denoiser_filter_c.h
@@ -0,0 +1,46 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef WEBRTC_MODULES_VIDEO_PROCESSING_UTIL_DENOISER_FILTER_C_H_
+#define WEBRTC_MODULES_VIDEO_PROCESSING_UTIL_DENOISER_FILTER_C_H_
+
+#include "webrtc/modules/video_processing/util/denoiser_filter.h"
+
+namespace webrtc {
+
+class DenoiserFilterC : public DenoiserFilter {
+ public:
+ DenoiserFilterC() {}
+ void CopyMem16x16(const uint8_t* src,
+ int src_stride,
+ uint8_t* dst,
+ int dst_stride) override;
+ void CopyMem8x8(const uint8_t* src,
+ int src_stride,
+ uint8_t* dst,
+ int dst_stride) override;
+ uint32_t Variance16x8(const uint8_t* a,
+ int a_stride,
+ const uint8_t* b,
+ int b_stride,
+ unsigned int* sse) override;
+ DenoiserDecision MbDenoise(uint8_t* mc_running_avg_y,
+ int mc_avg_y_stride,
+ uint8_t* running_avg_y,
+ int avg_y_stride,
+ const uint8_t* sig,
+ int sig_stride,
+ uint8_t motion_magnitude,
+ int increase_denoising) override;
+};
+
+} // namespace webrtc
+
+#endif // WEBRTC_MODULES_VIDEO_PROCESSING_UTIL_DENOISER_FILTER_C_H_
diff --git a/webrtc/modules/video_processing/util/denoiser_filter_neon.cc b/webrtc/modules/video_processing/util/denoiser_filter_neon.cc
new file mode 100644
index 0000000000..b522bf002b
--- /dev/null
+++ b/webrtc/modules/video_processing/util/denoiser_filter_neon.cc
@@ -0,0 +1,283 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <arm_neon.h>
+
+#include "webrtc/modules/video_processing/util/denoiser_filter_neon.h"
+
+namespace webrtc {
+
+static int HorizontalAddS16x8(const int16x8_t v_16x8) {
+ const int32x4_t a = vpaddlq_s16(v_16x8);
+ const int64x2_t b = vpaddlq_s32(a);
+ const int32x2_t c = vadd_s32(vreinterpret_s32_s64(vget_low_s64(b)),
+ vreinterpret_s32_s64(vget_high_s64(b)));
+ return vget_lane_s32(c, 0);
+}
+
+static int HorizontalAddS32x4(const int32x4_t v_32x4) {
+ const int64x2_t b = vpaddlq_s32(v_32x4);
+ const int32x2_t c = vadd_s32(vreinterpret_s32_s64(vget_low_s64(b)),
+ vreinterpret_s32_s64(vget_high_s64(b)));
+ return vget_lane_s32(c, 0);
+}
+
+static void VarianceNeonW8(const uint8_t* a,
+ int a_stride,
+ const uint8_t* b,
+ int b_stride,
+ int w,
+ int h,
+ uint32_t* sse,
+ int64_t* sum) {
+ int16x8_t v_sum = vdupq_n_s16(0);
+ int32x4_t v_sse_lo = vdupq_n_s32(0);
+ int32x4_t v_sse_hi = vdupq_n_s32(0);
+
+ for (int i = 0; i < h; ++i) {
+ for (int j = 0; j < w; j += 8) {
+ const uint8x8_t v_a = vld1_u8(&a[j]);
+ const uint8x8_t v_b = vld1_u8(&b[j]);
+ const uint16x8_t v_diff = vsubl_u8(v_a, v_b);
+ const int16x8_t sv_diff = vreinterpretq_s16_u16(v_diff);
+ v_sum = vaddq_s16(v_sum, sv_diff);
+ v_sse_lo =
+ vmlal_s16(v_sse_lo, vget_low_s16(sv_diff), vget_low_s16(sv_diff));
+ v_sse_hi =
+ vmlal_s16(v_sse_hi, vget_high_s16(sv_diff), vget_high_s16(sv_diff));
+ }
+ a += a_stride;
+ b += b_stride;
+ }
+
+ *sum = HorizontalAddS16x8(v_sum);
+ *sse =
+ static_cast<uint32_t>(HorizontalAddS32x4(vaddq_s32(v_sse_lo, v_sse_hi)));
+}
+
+void DenoiserFilterNEON::CopyMem16x16(const uint8_t* src,
+ int src_stride,
+ uint8_t* dst,
+ int dst_stride) {
+ uint8x16_t qtmp;
+ for (int r = 0; r < 16; r++) {
+ qtmp = vld1q_u8(src);
+ vst1q_u8(dst, qtmp);
+ src += src_stride;
+ dst += dst_stride;
+ }
+}
+
+void DenoiserFilterNEON::CopyMem8x8(const uint8_t* src,
+ int src_stride,
+ uint8_t* dst,
+ int dst_stride) {
+ uint8x8_t vtmp;
+
+ for (int r = 0; r < 8; r++) {
+ vtmp = vld1_u8(src);
+ vst1_u8(dst, vtmp);
+ src += src_stride;
+ dst += dst_stride;
+ }
+}
+
+uint32_t DenoiserFilterNEON::Variance16x8(const uint8_t* a,
+ int a_stride,
+ const uint8_t* b,
+ int b_stride,
+ uint32_t* sse) {
+ int64_t sum = 0;
+ VarianceNeonW8(a, a_stride << 1, b, b_stride << 1, 16, 8, sse, &sum);
+ return *sse - ((sum * sum) >> 7);
+}
+
+DenoiserDecision DenoiserFilterNEON::MbDenoise(uint8_t* mc_running_avg_y,
+ int mc_running_avg_y_stride,
+ uint8_t* running_avg_y,
+ int running_avg_y_stride,
+ const uint8_t* sig,
+ int sig_stride,
+ uint8_t motion_magnitude,
+ int increase_denoising) {
+ // If motion_magnitude is small, making the denoiser more aggressive by
+ // increasing the adjustment for each level, level1 adjustment is
+ // increased, the deltas stay the same.
+ int shift_inc =
+ (increase_denoising && motion_magnitude <= kMotionMagnitudeThreshold) ? 1
+ : 0;
+ const uint8x16_t v_level1_adjustment = vmovq_n_u8(
+ (motion_magnitude <= kMotionMagnitudeThreshold) ? 4 + shift_inc : 3);
+ const uint8x16_t v_delta_level_1_and_2 = vdupq_n_u8(1);
+ const uint8x16_t v_delta_level_2_and_3 = vdupq_n_u8(2);
+ const uint8x16_t v_level1_threshold = vmovq_n_u8(4 + shift_inc);
+ const uint8x16_t v_level2_threshold = vdupq_n_u8(8);
+ const uint8x16_t v_level3_threshold = vdupq_n_u8(16);
+ int64x2_t v_sum_diff_total = vdupq_n_s64(0);
+
+ // Go over lines.
+ for (int r = 0; r < 16; ++r) {
+ // Load inputs.
+ const uint8x16_t v_sig = vld1q_u8(sig);
+ const uint8x16_t v_mc_running_avg_y = vld1q_u8(mc_running_avg_y);
+
+ // Calculate absolute difference and sign masks.
+ const uint8x16_t v_abs_diff = vabdq_u8(v_sig, v_mc_running_avg_y);
+ const uint8x16_t v_diff_pos_mask = vcltq_u8(v_sig, v_mc_running_avg_y);
+ const uint8x16_t v_diff_neg_mask = vcgtq_u8(v_sig, v_mc_running_avg_y);
+
+ // Figure out which level that put us in.
+ const uint8x16_t v_level1_mask = vcleq_u8(v_level1_threshold, v_abs_diff);
+ const uint8x16_t v_level2_mask = vcleq_u8(v_level2_threshold, v_abs_diff);
+ const uint8x16_t v_level3_mask = vcleq_u8(v_level3_threshold, v_abs_diff);
+
+ // Calculate absolute adjustments for level 1, 2 and 3.
+ const uint8x16_t v_level2_adjustment =
+ vandq_u8(v_level2_mask, v_delta_level_1_and_2);
+ const uint8x16_t v_level3_adjustment =
+ vandq_u8(v_level3_mask, v_delta_level_2_and_3);
+ const uint8x16_t v_level1and2_adjustment =
+ vaddq_u8(v_level1_adjustment, v_level2_adjustment);
+ const uint8x16_t v_level1and2and3_adjustment =
+ vaddq_u8(v_level1and2_adjustment, v_level3_adjustment);
+
+ // Figure adjustment absolute value by selecting between the absolute
+ // difference if in level0 or the value for level 1, 2 and 3.
+ const uint8x16_t v_abs_adjustment =
+ vbslq_u8(v_level1_mask, v_level1and2and3_adjustment, v_abs_diff);
+
+ // Calculate positive and negative adjustments. Apply them to the signal
+ // and accumulate them. Adjustments are less than eight and the maximum
+ // sum of them (7 * 16) can fit in a signed char.
+ const uint8x16_t v_pos_adjustment =
+ vandq_u8(v_diff_pos_mask, v_abs_adjustment);
+ const uint8x16_t v_neg_adjustment =
+ vandq_u8(v_diff_neg_mask, v_abs_adjustment);
+
+ uint8x16_t v_running_avg_y = vqaddq_u8(v_sig, v_pos_adjustment);
+ v_running_avg_y = vqsubq_u8(v_running_avg_y, v_neg_adjustment);
+
+ // Store results.
+ vst1q_u8(running_avg_y, v_running_avg_y);
+
+ // Sum all the accumulators to have the sum of all pixel differences
+ // for this macroblock.
+ {
+ const int8x16_t v_sum_diff =
+ vqsubq_s8(vreinterpretq_s8_u8(v_pos_adjustment),
+ vreinterpretq_s8_u8(v_neg_adjustment));
+ const int16x8_t fe_dc_ba_98_76_54_32_10 = vpaddlq_s8(v_sum_diff);
+ const int32x4_t fedc_ba98_7654_3210 =
+ vpaddlq_s16(fe_dc_ba_98_76_54_32_10);
+ const int64x2_t fedcba98_76543210 = vpaddlq_s32(fedc_ba98_7654_3210);
+
+ v_sum_diff_total = vqaddq_s64(v_sum_diff_total, fedcba98_76543210);
+ }
+
+ // Update pointers for next iteration.
+ sig += sig_stride;
+ mc_running_avg_y += mc_running_avg_y_stride;
+ running_avg_y += running_avg_y_stride;
+ }
+
+ // Too much adjustments => copy block.
+ {
+ int64x1_t x = vqadd_s64(vget_high_s64(v_sum_diff_total),
+ vget_low_s64(v_sum_diff_total));
+ int sum_diff = vget_lane_s32(vabs_s32(vreinterpret_s32_s64(x)), 0);
+ int sum_diff_thresh = kSumDiffThreshold;
+
+ if (increase_denoising)
+ sum_diff_thresh = kSumDiffThresholdHigh;
+ if (sum_diff > sum_diff_thresh) {
+ // Before returning to copy the block (i.e., apply no denoising),
+ // checK if we can still apply some (weaker) temporal filtering to
+ // this block, that would otherwise not be denoised at all. Simplest
+ // is to apply an additional adjustment to running_avg_y to bring it
+ // closer to sig. The adjustment is capped by a maximum delta, and
+ // chosen such that in most cases the resulting sum_diff will be
+ // within the accceptable range given by sum_diff_thresh.
+
+ // The delta is set by the excess of absolute pixel diff over the
+ // threshold.
+ int delta = ((sum_diff - sum_diff_thresh) >> 8) + 1;
+ // Only apply the adjustment for max delta up to 3.
+ if (delta < 4) {
+ const uint8x16_t k_delta = vmovq_n_u8(delta);
+ sig -= sig_stride * 16;
+ mc_running_avg_y -= mc_running_avg_y_stride * 16;
+ running_avg_y -= running_avg_y_stride * 16;
+ for (int r = 0; r < 16; ++r) {
+ uint8x16_t v_running_avg_y = vld1q_u8(running_avg_y);
+ const uint8x16_t v_sig = vld1q_u8(sig);
+ const uint8x16_t v_mc_running_avg_y = vld1q_u8(mc_running_avg_y);
+
+ // Calculate absolute difference and sign masks.
+ const uint8x16_t v_abs_diff = vabdq_u8(v_sig, v_mc_running_avg_y);
+ const uint8x16_t v_diff_pos_mask =
+ vcltq_u8(v_sig, v_mc_running_avg_y);
+ const uint8x16_t v_diff_neg_mask =
+ vcgtq_u8(v_sig, v_mc_running_avg_y);
+ // Clamp absolute difference to delta to get the adjustment.
+ const uint8x16_t v_abs_adjustment = vminq_u8(v_abs_diff, (k_delta));
+
+ const uint8x16_t v_pos_adjustment =
+ vandq_u8(v_diff_pos_mask, v_abs_adjustment);
+ const uint8x16_t v_neg_adjustment =
+ vandq_u8(v_diff_neg_mask, v_abs_adjustment);
+
+ v_running_avg_y = vqsubq_u8(v_running_avg_y, v_pos_adjustment);
+ v_running_avg_y = vqaddq_u8(v_running_avg_y, v_neg_adjustment);
+
+ // Store results.
+ vst1q_u8(running_avg_y, v_running_avg_y);
+
+ {
+ const int8x16_t v_sum_diff =
+ vqsubq_s8(vreinterpretq_s8_u8(v_neg_adjustment),
+ vreinterpretq_s8_u8(v_pos_adjustment));
+
+ const int16x8_t fe_dc_ba_98_76_54_32_10 = vpaddlq_s8(v_sum_diff);
+ const int32x4_t fedc_ba98_7654_3210 =
+ vpaddlq_s16(fe_dc_ba_98_76_54_32_10);
+ const int64x2_t fedcba98_76543210 =
+ vpaddlq_s32(fedc_ba98_7654_3210);
+
+ v_sum_diff_total = vqaddq_s64(v_sum_diff_total, fedcba98_76543210);
+ }
+ // Update pointers for next iteration.
+ sig += sig_stride;
+ mc_running_avg_y += mc_running_avg_y_stride;
+ running_avg_y += running_avg_y_stride;
+ }
+ {
+ // Update the sum of all pixel differences of this MB.
+ x = vqadd_s64(vget_high_s64(v_sum_diff_total),
+ vget_low_s64(v_sum_diff_total));
+ sum_diff = vget_lane_s32(vabs_s32(vreinterpret_s32_s64(x)), 0);
+
+ if (sum_diff > sum_diff_thresh) {
+ return COPY_BLOCK;
+ }
+ }
+ } else {
+ return COPY_BLOCK;
+ }
+ }
+ }
+
+ // Tell above level that block was filtered.
+ running_avg_y -= running_avg_y_stride * 16;
+ sig -= sig_stride * 16;
+
+ return FILTER_BLOCK;
+}
+
+} // namespace webrtc
diff --git a/webrtc/modules/video_processing/util/denoiser_filter_neon.h b/webrtc/modules/video_processing/util/denoiser_filter_neon.h
new file mode 100644
index 0000000000..bc87ba788e
--- /dev/null
+++ b/webrtc/modules/video_processing/util/denoiser_filter_neon.h
@@ -0,0 +1,46 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef WEBRTC_MODULES_VIDEO_PROCESSING_UTIL_DENOISER_FILTER_NEON_H_
+#define WEBRTC_MODULES_VIDEO_PROCESSING_UTIL_DENOISER_FILTER_NEON_H_
+
+#include "webrtc/modules/video_processing/util/denoiser_filter.h"
+
+namespace webrtc {
+
+class DenoiserFilterNEON : public DenoiserFilter {
+ public:
+ DenoiserFilterNEON() {}
+ void CopyMem16x16(const uint8_t* src,
+ int src_stride,
+ uint8_t* dst,
+ int dst_stride) override;
+ void CopyMem8x8(const uint8_t* src,
+ int src_stride,
+ uint8_t* dst,
+ int dst_stride) override;
+ uint32_t Variance16x8(const uint8_t* a,
+ int a_stride,
+ const uint8_t* b,
+ int b_stride,
+ unsigned int* sse) override;
+ DenoiserDecision MbDenoise(uint8_t* mc_running_avg_y,
+ int mc_avg_y_stride,
+ uint8_t* running_avg_y,
+ int avg_y_stride,
+ const uint8_t* sig,
+ int sig_stride,
+ uint8_t motion_magnitude,
+ int increase_denoising) override;
+};
+
+} // namespace webrtc
+
+#endif // WEBRTC_MODULES_VIDEO_PROCESSING_UTIL_DENOISER_FILTER_NEON_H_
diff --git a/webrtc/modules/video_processing/util/denoiser_filter_sse2.cc b/webrtc/modules/video_processing/util/denoiser_filter_sse2.cc
new file mode 100644
index 0000000000..903d7b1ec6
--- /dev/null
+++ b/webrtc/modules/video_processing/util/denoiser_filter_sse2.cc
@@ -0,0 +1,280 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <emmintrin.h>
+
+#include "webrtc/modules/video_processing/util/denoiser_filter_sse2.h"
+
+namespace webrtc {
+
+static void Get8x8varSse2(const uint8_t* src,
+ int src_stride,
+ const uint8_t* ref,
+ int ref_stride,
+ unsigned int* sse,
+ int* sum) {
+ const __m128i zero = _mm_setzero_si128();
+ __m128i vsum = _mm_setzero_si128();
+ __m128i vsse = _mm_setzero_si128();
+
+ for (int i = 0; i < 8; i += 2) {
+ const __m128i src0 = _mm_unpacklo_epi8(
+ _mm_loadl_epi64((const __m128i*)(src + i * src_stride)), zero);
+ const __m128i ref0 = _mm_unpacklo_epi8(
+ _mm_loadl_epi64((const __m128i*)(ref + i * ref_stride)), zero);
+ const __m128i diff0 = _mm_sub_epi16(src0, ref0);
+
+ const __m128i src1 = _mm_unpacklo_epi8(
+ _mm_loadl_epi64((const __m128i*)(src + (i + 1) * src_stride)), zero);
+ const __m128i ref1 = _mm_unpacklo_epi8(
+ _mm_loadl_epi64((const __m128i*)(ref + (i + 1) * ref_stride)), zero);
+ const __m128i diff1 = _mm_sub_epi16(src1, ref1);
+
+ vsum = _mm_add_epi16(vsum, diff0);
+ vsum = _mm_add_epi16(vsum, diff1);
+ vsse = _mm_add_epi32(vsse, _mm_madd_epi16(diff0, diff0));
+ vsse = _mm_add_epi32(vsse, _mm_madd_epi16(diff1, diff1));
+ }
+
+ // sum
+ vsum = _mm_add_epi16(vsum, _mm_srli_si128(vsum, 8));
+ vsum = _mm_add_epi16(vsum, _mm_srli_si128(vsum, 4));
+ vsum = _mm_add_epi16(vsum, _mm_srli_si128(vsum, 2));
+ *sum = static_cast<int16_t>(_mm_extract_epi16(vsum, 0));
+
+ // sse
+ vsse = _mm_add_epi32(vsse, _mm_srli_si128(vsse, 8));
+ vsse = _mm_add_epi32(vsse, _mm_srli_si128(vsse, 4));
+ *sse = _mm_cvtsi128_si32(vsse);
+}
+
+static void VarianceSSE2(const unsigned char* src,
+ int src_stride,
+ const unsigned char* ref,
+ int ref_stride,
+ int w,
+ int h,
+ uint32_t* sse,
+ int64_t* sum,
+ int block_size) {
+ *sse = 0;
+ *sum = 0;
+
+ for (int i = 0; i < h; i += block_size) {
+ for (int j = 0; j < w; j += block_size) {
+ uint32_t sse0 = 0;
+ int32_t sum0 = 0;
+
+ Get8x8varSse2(src + src_stride * i + j, src_stride,
+ ref + ref_stride * i + j, ref_stride, &sse0, &sum0);
+ *sse += sse0;
+ *sum += sum0;
+ }
+ }
+}
+
+// Compute the sum of all pixel differences of this MB.
+static uint32_t AbsSumDiff16x1(__m128i acc_diff) {
+ const __m128i k_1 = _mm_set1_epi16(1);
+ const __m128i acc_diff_lo =
+ _mm_srai_epi16(_mm_unpacklo_epi8(acc_diff, acc_diff), 8);
+ const __m128i acc_diff_hi =
+ _mm_srai_epi16(_mm_unpackhi_epi8(acc_diff, acc_diff), 8);
+ const __m128i acc_diff_16 = _mm_add_epi16(acc_diff_lo, acc_diff_hi);
+ const __m128i hg_fe_dc_ba = _mm_madd_epi16(acc_diff_16, k_1);
+ const __m128i hgfe_dcba =
+ _mm_add_epi32(hg_fe_dc_ba, _mm_srli_si128(hg_fe_dc_ba, 8));
+ const __m128i hgfedcba =
+ _mm_add_epi32(hgfe_dcba, _mm_srli_si128(hgfe_dcba, 4));
+ unsigned int sum_diff = abs(_mm_cvtsi128_si32(hgfedcba));
+
+ return sum_diff;
+}
+
+// TODO(jackychen): Optimize this function using SSE2.
+void DenoiserFilterSSE2::CopyMem16x16(const uint8_t* src,
+ int src_stride,
+ uint8_t* dst,
+ int dst_stride) {
+ for (int i = 0; i < 16; i++) {
+ memcpy(dst, src, 16);
+ src += src_stride;
+ dst += dst_stride;
+ }
+}
+
+// TODO(jackychen): Optimize this function using SSE2.
+void DenoiserFilterSSE2::CopyMem8x8(const uint8_t* src,
+ int src_stride,
+ uint8_t* dst,
+ int dst_stride) {
+ for (int i = 0; i < 8; i++) {
+ memcpy(dst, src, 8);
+ src += src_stride;
+ dst += dst_stride;
+ }
+}
+
+uint32_t DenoiserFilterSSE2::Variance16x8(const uint8_t* src,
+ int src_stride,
+ const uint8_t* ref,
+ int ref_stride,
+ uint32_t* sse) {
+ int64_t sum = 0;
+ VarianceSSE2(src, src_stride << 1, ref, ref_stride << 1, 16, 8, sse, &sum, 8);
+ return *sse - ((sum * sum) >> 7);
+}
+
+DenoiserDecision DenoiserFilterSSE2::MbDenoise(uint8_t* mc_running_avg_y,
+ int mc_avg_y_stride,
+ uint8_t* running_avg_y,
+ int avg_y_stride,
+ const uint8_t* sig,
+ int sig_stride,
+ uint8_t motion_magnitude,
+ int increase_denoising) {
+ int shift_inc =
+ (increase_denoising && motion_magnitude <= kMotionMagnitudeThreshold) ? 1
+ : 0;
+ __m128i acc_diff = _mm_setzero_si128();
+ const __m128i k_0 = _mm_setzero_si128();
+ const __m128i k_4 = _mm_set1_epi8(4 + shift_inc);
+ const __m128i k_8 = _mm_set1_epi8(8);
+ const __m128i k_16 = _mm_set1_epi8(16);
+ // Modify each level's adjustment according to motion_magnitude.
+ const __m128i l3 = _mm_set1_epi8(
+ (motion_magnitude <= kMotionMagnitudeThreshold) ? 7 + shift_inc : 6);
+ // Difference between level 3 and level 2 is 2.
+ const __m128i l32 = _mm_set1_epi8(2);
+ // Difference between level 2 and level 1 is 1.
+ const __m128i l21 = _mm_set1_epi8(1);
+
+ for (int r = 0; r < 16; ++r) {
+ // Calculate differences.
+ const __m128i v_sig =
+ _mm_loadu_si128(reinterpret_cast<const __m128i*>(&sig[0]));
+ const __m128i v_mc_running_avg_y =
+ _mm_loadu_si128(reinterpret_cast<__m128i*>(&mc_running_avg_y[0]));
+ __m128i v_running_avg_y;
+ const __m128i pdiff = _mm_subs_epu8(v_mc_running_avg_y, v_sig);
+ const __m128i ndiff = _mm_subs_epu8(v_sig, v_mc_running_avg_y);
+ // Obtain the sign. FF if diff is negative.
+ const __m128i diff_sign = _mm_cmpeq_epi8(pdiff, k_0);
+ // Clamp absolute difference to 16 to be used to get mask. Doing this
+ // allows us to use _mm_cmpgt_epi8, which operates on signed byte.
+ const __m128i clamped_absdiff =
+ _mm_min_epu8(_mm_or_si128(pdiff, ndiff), k_16);
+ // Get masks for l2 l1 and l0 adjustments.
+ const __m128i mask2 = _mm_cmpgt_epi8(k_16, clamped_absdiff);
+ const __m128i mask1 = _mm_cmpgt_epi8(k_8, clamped_absdiff);
+ const __m128i mask0 = _mm_cmpgt_epi8(k_4, clamped_absdiff);
+ // Get adjustments for l2, l1, and l0.
+ __m128i adj2 = _mm_and_si128(mask2, l32);
+ const __m128i adj1 = _mm_and_si128(mask1, l21);
+ const __m128i adj0 = _mm_and_si128(mask0, clamped_absdiff);
+ __m128i adj, padj, nadj;
+
+ // Combine the adjustments and get absolute adjustments.
+ adj2 = _mm_add_epi8(adj2, adj1);
+ adj = _mm_sub_epi8(l3, adj2);
+ adj = _mm_andnot_si128(mask0, adj);
+ adj = _mm_or_si128(adj, adj0);
+
+ // Restore the sign and get positive and negative adjustments.
+ padj = _mm_andnot_si128(diff_sign, adj);
+ nadj = _mm_and_si128(diff_sign, adj);
+
+ // Calculate filtered value.
+ v_running_avg_y = _mm_adds_epu8(v_sig, padj);
+ v_running_avg_y = _mm_subs_epu8(v_running_avg_y, nadj);
+ _mm_storeu_si128(reinterpret_cast<__m128i*>(running_avg_y),
+ v_running_avg_y);
+
+ // Adjustments <=7, and each element in acc_diff can fit in signed
+ // char.
+ acc_diff = _mm_adds_epi8(acc_diff, padj);
+ acc_diff = _mm_subs_epi8(acc_diff, nadj);
+
+ // Update pointers for next iteration.
+ sig += sig_stride;
+ mc_running_avg_y += mc_avg_y_stride;
+ running_avg_y += avg_y_stride;
+ }
+
+ {
+ // Compute the sum of all pixel differences of this MB.
+ unsigned int abs_sum_diff = AbsSumDiff16x1(acc_diff);
+ unsigned int sum_diff_thresh = kSumDiffThreshold;
+ if (increase_denoising)
+ sum_diff_thresh = kSumDiffThresholdHigh;
+ if (abs_sum_diff > sum_diff_thresh) {
+ // Before returning to copy the block (i.e., apply no denoising),
+ // check if we can still apply some (weaker) temporal filtering to
+ // this block, that would otherwise not be denoised at all. Simplest
+ // is to apply an additional adjustment to running_avg_y to bring it
+ // closer to sig. The adjustment is capped by a maximum delta, and
+ // chosen such that in most cases the resulting sum_diff will be
+ // within the acceptable range given by sum_diff_thresh.
+
+ // The delta is set by the excess of absolute pixel diff over the
+ // threshold.
+ int delta = ((abs_sum_diff - sum_diff_thresh) >> 8) + 1;
+ // Only apply the adjustment for max delta up to 3.
+ if (delta < 4) {
+ const __m128i k_delta = _mm_set1_epi8(delta);
+ sig -= sig_stride * 16;
+ mc_running_avg_y -= mc_avg_y_stride * 16;
+ running_avg_y -= avg_y_stride * 16;
+ for (int r = 0; r < 16; ++r) {
+ __m128i v_running_avg_y =
+ _mm_loadu_si128(reinterpret_cast<__m128i*>(&running_avg_y[0]));
+ // Calculate differences.
+ const __m128i v_sig =
+ _mm_loadu_si128(reinterpret_cast<const __m128i*>(&sig[0]));
+ const __m128i v_mc_running_avg_y =
+ _mm_loadu_si128(reinterpret_cast<__m128i*>(&mc_running_avg_y[0]));
+ const __m128i pdiff = _mm_subs_epu8(v_mc_running_avg_y, v_sig);
+ const __m128i ndiff = _mm_subs_epu8(v_sig, v_mc_running_avg_y);
+ // Obtain the sign. FF if diff is negative.
+ const __m128i diff_sign = _mm_cmpeq_epi8(pdiff, k_0);
+ // Clamp absolute difference to delta to get the adjustment.
+ const __m128i adj = _mm_min_epu8(_mm_or_si128(pdiff, ndiff), k_delta);
+ // Restore the sign and get positive and negative adjustments.
+ __m128i padj, nadj;
+ padj = _mm_andnot_si128(diff_sign, adj);
+ nadj = _mm_and_si128(diff_sign, adj);
+ // Calculate filtered value.
+ v_running_avg_y = _mm_subs_epu8(v_running_avg_y, padj);
+ v_running_avg_y = _mm_adds_epu8(v_running_avg_y, nadj);
+ _mm_storeu_si128(reinterpret_cast<__m128i*>(running_avg_y),
+ v_running_avg_y);
+
+ // Accumulate the adjustments.
+ acc_diff = _mm_subs_epi8(acc_diff, padj);
+ acc_diff = _mm_adds_epi8(acc_diff, nadj);
+
+ // Update pointers for next iteration.
+ sig += sig_stride;
+ mc_running_avg_y += mc_avg_y_stride;
+ running_avg_y += avg_y_stride;
+ }
+ abs_sum_diff = AbsSumDiff16x1(acc_diff);
+ if (abs_sum_diff > sum_diff_thresh) {
+ return COPY_BLOCK;
+ }
+ } else {
+ return COPY_BLOCK;
+ }
+ }
+ }
+ return FILTER_BLOCK;
+}
+
+} // namespace webrtc
diff --git a/webrtc/modules/video_processing/util/denoiser_filter_sse2.h b/webrtc/modules/video_processing/util/denoiser_filter_sse2.h
new file mode 100644
index 0000000000..31d8510902
--- /dev/null
+++ b/webrtc/modules/video_processing/util/denoiser_filter_sse2.h
@@ -0,0 +1,46 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef WEBRTC_MODULES_VIDEO_PROCESSING_UTIL_DENOISER_FILTER_SSE2_H_
+#define WEBRTC_MODULES_VIDEO_PROCESSING_UTIL_DENOISER_FILTER_SSE2_H_
+
+#include "webrtc/modules/video_processing/util/denoiser_filter.h"
+
+namespace webrtc {
+
+class DenoiserFilterSSE2 : public DenoiserFilter {
+ public:
+ DenoiserFilterSSE2() {}
+ void CopyMem16x16(const uint8_t* src,
+ int src_stride,
+ uint8_t* dst,
+ int dst_stride) override;
+ void CopyMem8x8(const uint8_t* src,
+ int src_stride,
+ uint8_t* dst,
+ int dst_stride) override;
+ uint32_t Variance16x8(const uint8_t* a,
+ int a_stride,
+ const uint8_t* b,
+ int b_stride,
+ unsigned int* sse) override;
+ DenoiserDecision MbDenoise(uint8_t* mc_running_avg_y,
+ int mc_avg_y_stride,
+ uint8_t* running_avg_y,
+ int avg_y_stride,
+ const uint8_t* sig,
+ int sig_stride,
+ uint8_t motion_magnitude,
+ int increase_denoising) override;
+};
+
+} // namespace webrtc
+
+#endif // WEBRTC_MODULES_VIDEO_PROCESSING_UTIL_DENOISER_FILTER_SSE2_H_
diff --git a/webrtc/modules/video_processing/util/skin_detection.cc b/webrtc/modules/video_processing/util/skin_detection.cc
new file mode 100644
index 0000000000..bf631ce2f6
--- /dev/null
+++ b/webrtc/modules/video_processing/util/skin_detection.cc
@@ -0,0 +1,65 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <limits.h>
+#include <math.h>
+
+#include "webrtc/modules/video_processing/util/skin_detection.h"
+
+namespace webrtc {
+
+// Fixed-point skin color model parameters.
+static const int skin_mean[2] = {7463, 9614}; // q6
+static const int skin_inv_cov[4] = {4107, 1663, 1663, 2157}; // q16
+static const int skin_threshold = 1570636; // q18
+
+// Thresholds on luminance.
+static const int y_low = 20;
+static const int y_high = 220;
+
+// Evaluates the Mahalanobis distance measure for the input CbCr values.
+static int EvaluateSkinColorDifference(int cb, int cr) {
+ const int cb_q6 = cb << 6;
+ const int cr_q6 = cr << 6;
+ const int cb_diff_q12 = (cb_q6 - skin_mean[0]) * (cb_q6 - skin_mean[0]);
+ const int cbcr_diff_q12 = (cb_q6 - skin_mean[0]) * (cr_q6 - skin_mean[1]);
+ const int cr_diff_q12 = (cr_q6 - skin_mean[1]) * (cr_q6 - skin_mean[1]);
+ const int cb_diff_q2 = (cb_diff_q12 + (1 << 9)) >> 10;
+ const int cbcr_diff_q2 = (cbcr_diff_q12 + (1 << 9)) >> 10;
+ const int cr_diff_q2 = (cr_diff_q12 + (1 << 9)) >> 10;
+ const int skin_diff =
+ skin_inv_cov[0] * cb_diff_q2 + skin_inv_cov[1] * cbcr_diff_q2 +
+ skin_inv_cov[2] * cbcr_diff_q2 + skin_inv_cov[3] * cr_diff_q2;
+ return skin_diff;
+}
+
+bool MbHasSkinColor(const uint8_t* y_src,
+ const uint8_t* u_src,
+ const uint8_t* v_src,
+ const int stride_y,
+ const int stride_u,
+ const int stride_v,
+ const int mb_row,
+ const int mb_col) {
+ const uint8_t* y = y_src + ((mb_row << 4) + 8) * stride_y + (mb_col << 4) + 8;
+ const uint8_t* u = u_src + ((mb_row << 3) + 4) * stride_u + (mb_col << 3) + 4;
+ const uint8_t* v = v_src + ((mb_row << 3) + 4) * stride_v + (mb_col << 3) + 4;
+ // Use 2x2 average of center pixel to compute skin area.
+ uint8_t y_avg = (*y + *(y + 1) + *(y + stride_y) + *(y + stride_y + 1)) >> 2;
+ uint8_t u_avg = (*u + *(u + 1) + *(u + stride_u) + *(u + stride_u + 1)) >> 2;
+ uint8_t v_avg = (*v + *(v + 1) + *(v + stride_v) + *(v + stride_v + 1)) >> 2;
+ // Ignore MB with too high or low brightness.
+ if (y_avg < y_low || y_avg > y_high)
+ return false;
+ else
+ return (EvaluateSkinColorDifference(u_avg, v_avg) < skin_threshold);
+}
+
+} // namespace webrtc
diff --git a/webrtc/modules/video_processing/util/skin_detection.h b/webrtc/modules/video_processing/util/skin_detection.h
new file mode 100755
index 0000000000..561c03c425
--- /dev/null
+++ b/webrtc/modules/video_processing/util/skin_detection.h
@@ -0,0 +1,28 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef WEBRTC_MODULES_VIDEO_PROCESSING_UTIL_SKIN_DETECTION_H_
+#define WEBRTC_MODULES_VIDEO_PROCESSING_UTIL_SKIN_DETECTION_H_
+
+namespace webrtc {
+
+typedef unsigned char uint8_t;
+bool MbHasSkinColor(const uint8_t* y_src,
+ const uint8_t* u_src,
+ const uint8_t* v_src,
+ const int stride_y,
+ const int stride_u,
+ const int stride_v,
+ const int mb_row,
+ const int mb_col);
+
+} // namespace webrtc
+
+#endif // WEBRTC_MODULES_VIDEO_PROCESSING_UTIL_SKIN_DETECTION_H_
diff --git a/webrtc/modules/video_processing/main/source/video_decimator.cc b/webrtc/modules/video_processing/video_decimator.cc
index 34c29c1677..63e347b026 100644
--- a/webrtc/modules/video_processing/main/source/video_decimator.cc
+++ b/webrtc/modules/video_processing/video_decimator.cc
@@ -9,8 +9,8 @@
*/
#include "webrtc/base/checks.h"
-#include "webrtc/modules/video_processing/main/interface/video_processing.h"
-#include "webrtc/modules/video_processing/main/source/video_decimator.h"
+#include "webrtc/modules/video_processing/include/video_processing.h"
+#include "webrtc/modules/video_processing/video_decimator.h"
#include "webrtc/system_wrappers/include/tick_util.h"
#define VD_MIN(a, b) ((a) < (b)) ? (a) : (b)
@@ -23,7 +23,7 @@ VPMVideoDecimator::VPMVideoDecimator() {
VPMVideoDecimator::~VPMVideoDecimator() {}
-void VPMVideoDecimator::Reset() {
+void VPMVideoDecimator::Reset() {
overshoot_modifier_ = 0;
drop_count_ = 0;
keep_count_ = 0;
@@ -43,14 +43,17 @@ void VPMVideoDecimator::SetTargetFramerate(int frame_rate) {
}
bool VPMVideoDecimator::DropFrame() {
- if (!enable_temporal_decimation_) return false;
+ if (!enable_temporal_decimation_)
+ return false;
- if (incoming_frame_rate_ <= 0) return false;
+ if (incoming_frame_rate_ <= 0)
+ return false;
const uint32_t incomingframe_rate =
static_cast<uint32_t>(incoming_frame_rate_ + 0.5f);
- if (target_frame_rate_ == 0) return true;
+ if (target_frame_rate_ == 0)
+ return true;
bool drop = false;
if (incomingframe_rate > target_frame_rate_) {
@@ -61,44 +64,43 @@ bool VPMVideoDecimator::DropFrame() {
overshoot_modifier_ = 0;
}
- if (overshoot && 2 * overshoot < (int32_t) incomingframe_rate) {
+ if (overshoot && 2 * overshoot < (int32_t)incomingframe_rate) {
if (drop_count_) { // Just got here so drop to be sure.
- drop_count_ = 0;
- return true;
+ drop_count_ = 0;
+ return true;
}
const uint32_t dropVar = incomingframe_rate / overshoot;
if (keep_count_ >= dropVar) {
- drop = true;
- overshoot_modifier_ = -((int32_t) incomingframe_rate % overshoot) / 3;
- keep_count_ = 1;
+ drop = true;
+ overshoot_modifier_ = -((int32_t)incomingframe_rate % overshoot) / 3;
+ keep_count_ = 1;
} else {
- keep_count_++;
+ keep_count_++;
}
} else {
keep_count_ = 0;
const uint32_t dropVar = overshoot / target_frame_rate_;
if (drop_count_ < dropVar) {
- drop = true;
- drop_count_++;
+ drop = true;
+ drop_count_++;
} else {
- overshoot_modifier_ = overshoot % target_frame_rate_;
- drop = false;
- drop_count_ = 0;
+ overshoot_modifier_ = overshoot % target_frame_rate_;
+ drop = false;
+ drop_count_ = 0;
}
}
}
return drop;
}
-
-uint32_t VPMVideoDecimator::Decimatedframe_rate() {
-ProcessIncomingframe_rate(TickTime::MillisecondTimestamp());
+uint32_t VPMVideoDecimator::GetDecimatedFrameRate() {
+ ProcessIncomingframe_rate(TickTime::MillisecondTimestamp());
if (!enable_temporal_decimation_) {
return static_cast<uint32_t>(incoming_frame_rate_ + 0.5f);
}
return VD_MIN(target_frame_rate_,
- static_cast<uint32_t>(incoming_frame_rate_ + 0.5f));
+ static_cast<uint32_t>(incoming_frame_rate_ + 0.5f));
}
uint32_t VPMVideoDecimator::Inputframe_rate() {
@@ -113,7 +115,7 @@ void VPMVideoDecimator::UpdateIncomingframe_rate() {
} else {
// Shift.
for (int i = kFrameCountHistory_size - 2; i >= 0; i--) {
- incoming_frame_times_[i+1] = incoming_frame_times_[i];
+ incoming_frame_times_[i + 1] = incoming_frame_times_[i];
}
}
incoming_frame_times_[0] = now;
@@ -133,7 +135,7 @@ void VPMVideoDecimator::ProcessIncomingframe_rate(int64_t now) {
}
}
if (num > 1) {
- int64_t diff = now - incoming_frame_times_[num-1];
+ int64_t diff = now - incoming_frame_times_[num - 1];
incoming_frame_rate_ = 1.0;
if (diff > 0) {
incoming_frame_rate_ = nrOfFrames * 1000.0f / static_cast<float>(diff);
diff --git a/webrtc/modules/video_processing/main/source/video_decimator.h b/webrtc/modules/video_processing/video_decimator.h
index 3d4573caf8..1b871df8c3 100644
--- a/webrtc/modules/video_processing/main/source/video_decimator.h
+++ b/webrtc/modules/video_processing/video_decimator.h
@@ -8,10 +8,10 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#ifndef WEBRTC_MODULES_VIDEO_PROCESSING_MAIN_SOURCE_VIDEO_DECIMATOR_H
-#define WEBRTC_MODULES_VIDEO_PROCESSING_MAIN_SOURCE_VIDEO_DECIMATOR_H
+#ifndef WEBRTC_MODULES_VIDEO_PROCESSING_VIDEO_DECIMATOR_H_
+#define WEBRTC_MODULES_VIDEO_PROCESSING_VIDEO_DECIMATOR_H_
-#include "webrtc/modules/interface/module_common_types.h"
+#include "webrtc/modules/include/module_common_types.h"
#include "webrtc/typedefs.h"
namespace webrtc {
@@ -32,7 +32,7 @@ class VPMVideoDecimator {
void UpdateIncomingframe_rate();
// Get Decimated Frame Rate/Dimensions.
- uint32_t Decimatedframe_rate();
+ uint32_t GetDecimatedFrameRate();
// Get input frame rate.
uint32_t Inputframe_rate();
@@ -40,8 +40,8 @@ class VPMVideoDecimator {
private:
void ProcessIncomingframe_rate(int64_t now);
- enum { kFrameCountHistory_size = 90};
- enum { kFrameHistoryWindowMs = 2000};
+ enum { kFrameCountHistory_size = 90 };
+ enum { kFrameHistoryWindowMs = 2000 };
// Temporal decimation.
int32_t overshoot_modifier_;
@@ -55,4 +55,4 @@ class VPMVideoDecimator {
} // namespace webrtc
-#endif // WEBRTC_MODULES_VIDEO_PROCESSING_MAIN_SOURCE_VIDEO_DECIMATOR_H
+#endif // WEBRTC_MODULES_VIDEO_PROCESSING_VIDEO_DECIMATOR_H_
diff --git a/webrtc/modules/video_processing/video_denoiser.cc b/webrtc/modules/video_processing/video_denoiser.cc
new file mode 100644
index 0000000000..4902a89491
--- /dev/null
+++ b/webrtc/modules/video_processing/video_denoiser.cc
@@ -0,0 +1,147 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+#include "webrtc/common_video/libyuv/include/scaler.h"
+#include "webrtc/common_video/libyuv/include/webrtc_libyuv.h"
+#include "webrtc/modules/video_processing/video_denoiser.h"
+
+namespace webrtc {
+
+VideoDenoiser::VideoDenoiser(bool runtime_cpu_detection)
+ : width_(0),
+ height_(0),
+ filter_(DenoiserFilter::Create(runtime_cpu_detection)) {}
+
+void VideoDenoiser::TrailingReduction(int mb_rows,
+ int mb_cols,
+ const uint8_t* y_src,
+ int stride_y,
+ uint8_t* y_dst) {
+ for (int mb_row = 1; mb_row < mb_rows - 1; ++mb_row) {
+ for (int mb_col = 1; mb_col < mb_cols - 1; ++mb_col) {
+ int mb_index = mb_row * mb_cols + mb_col;
+ uint8_t* mb_dst = y_dst + (mb_row << 4) * stride_y + (mb_col << 4);
+ const uint8_t* mb_src = y_src + (mb_row << 4) * stride_y + (mb_col << 4);
+ // If the number of denoised neighbors is less than a threshold,
+ // do NOT denoise for the block. Set different threshold for skin MB.
+ // The change of denoising status will not propagate.
+ if (metrics_[mb_index].is_skin) {
+ // The threshold is high (more strict) for non-skin MB where the
+ // trailing usually happen.
+ if (metrics_[mb_index].denoise &&
+ metrics_[mb_index + 1].denoise + metrics_[mb_index - 1].denoise +
+ metrics_[mb_index + mb_cols].denoise +
+ metrics_[mb_index - mb_cols].denoise <=
+ 2) {
+ metrics_[mb_index].denoise = 0;
+ filter_->CopyMem16x16(mb_src, stride_y, mb_dst, stride_y);
+ }
+ } else if (metrics_[mb_index].denoise &&
+ metrics_[mb_index + 1].denoise +
+ metrics_[mb_index - 1].denoise +
+ metrics_[mb_index + mb_cols + 1].denoise +
+ metrics_[mb_index + mb_cols - 1].denoise +
+ metrics_[mb_index - mb_cols + 1].denoise +
+ metrics_[mb_index - mb_cols - 1].denoise +
+ metrics_[mb_index + mb_cols].denoise +
+ metrics_[mb_index - mb_cols].denoise <=
+ 7) {
+ filter_->CopyMem16x16(mb_src, stride_y, mb_dst, stride_y);
+ }
+ }
+ }
+}
+
+void VideoDenoiser::DenoiseFrame(const VideoFrame& frame,
+ VideoFrame* denoised_frame) {
+ int stride_y = frame.stride(kYPlane);
+ int stride_u = frame.stride(kUPlane);
+ int stride_v = frame.stride(kVPlane);
+ // If previous width and height are different from current frame's, then no
+ // denoising for the current frame.
+ if (width_ != frame.width() || height_ != frame.height()) {
+ width_ = frame.width();
+ height_ = frame.height();
+ denoised_frame->CreateFrame(frame.buffer(kYPlane), frame.buffer(kUPlane),
+ frame.buffer(kVPlane), width_, height_,
+ stride_y, stride_u, stride_v);
+ // Setting time parameters to the output frame.
+ denoised_frame->set_timestamp(frame.timestamp());
+ denoised_frame->set_render_time_ms(frame.render_time_ms());
+ return;
+ }
+ // For 16x16 block.
+ int mb_cols = width_ >> 4;
+ int mb_rows = height_ >> 4;
+ if (metrics_.get() == nullptr)
+ metrics_.reset(new DenoiseMetrics[mb_cols * mb_rows]());
+ // Denoise on Y plane.
+ uint8_t* y_dst = denoised_frame->buffer(kYPlane);
+ uint8_t* u_dst = denoised_frame->buffer(kUPlane);
+ uint8_t* v_dst = denoised_frame->buffer(kVPlane);
+ const uint8_t* y_src = frame.buffer(kYPlane);
+ const uint8_t* u_src = frame.buffer(kUPlane);
+ const uint8_t* v_src = frame.buffer(kVPlane);
+ // Temporary buffer to store denoising result.
+ uint8_t y_tmp[16 * 16] = {0};
+ for (int mb_row = 0; mb_row < mb_rows; ++mb_row) {
+ for (int mb_col = 0; mb_col < mb_cols; ++mb_col) {
+ const uint8_t* mb_src = y_src + (mb_row << 4) * stride_y + (mb_col << 4);
+ uint8_t* mb_dst = y_dst + (mb_row << 4) * stride_y + (mb_col << 4);
+ int mb_index = mb_row * mb_cols + mb_col;
+ // Denoise each MB at the very start and save the result to a temporary
+ // buffer.
+ if (filter_->MbDenoise(mb_dst, stride_y, y_tmp, 16, mb_src, stride_y, 0,
+ 1) == FILTER_BLOCK) {
+ uint32_t thr_var = 0;
+ // Save var and sad to the buffer.
+ metrics_[mb_index].var = filter_->Variance16x8(
+ mb_dst, stride_y, y_tmp, 16, &metrics_[mb_index].sad);
+ // Get skin map.
+ metrics_[mb_index].is_skin = MbHasSkinColor(
+ y_src, u_src, v_src, stride_y, stride_u, stride_v, mb_row, mb_col);
+ // Variance threshold for skin/non-skin MB is different.
+ // Skin MB use a small threshold to reduce blockiness.
+ thr_var = metrics_[mb_index].is_skin ? 128 : 12 * 128;
+ if (metrics_[mb_index].var > thr_var) {
+ metrics_[mb_index].denoise = 0;
+ // Use the source MB.
+ filter_->CopyMem16x16(mb_src, stride_y, mb_dst, stride_y);
+ } else {
+ metrics_[mb_index].denoise = 1;
+ // Use the denoised MB.
+ filter_->CopyMem16x16(y_tmp, 16, mb_dst, stride_y);
+ }
+ } else {
+ metrics_[mb_index].denoise = 0;
+ filter_->CopyMem16x16(mb_src, stride_y, mb_dst, stride_y);
+ }
+ // Copy source U/V plane.
+ const uint8_t* mb_src_u =
+ u_src + (mb_row << 3) * stride_u + (mb_col << 3);
+ const uint8_t* mb_src_v =
+ v_src + (mb_row << 3) * stride_v + (mb_col << 3);
+ uint8_t* mb_dst_u = u_dst + (mb_row << 3) * stride_u + (mb_col << 3);
+ uint8_t* mb_dst_v = v_dst + (mb_row << 3) * stride_v + (mb_col << 3);
+ filter_->CopyMem8x8(mb_src_u, stride_u, mb_dst_u, stride_u);
+ filter_->CopyMem8x8(mb_src_v, stride_v, mb_dst_v, stride_v);
+ }
+ }
+ // Second round.
+ // This is to reduce the trailing artifact and blockiness by referring
+ // neighbors' denoising status.
+ TrailingReduction(mb_rows, mb_cols, y_src, stride_y, y_dst);
+
+ // Setting time parameters to the output frame.
+ denoised_frame->set_timestamp(frame.timestamp());
+ denoised_frame->set_render_time_ms(frame.render_time_ms());
+ return;
+}
+
+} // namespace webrtc
diff --git a/webrtc/modules/video_processing/video_denoiser.h b/webrtc/modules/video_processing/video_denoiser.h
new file mode 100644
index 0000000000..107a15ca07
--- /dev/null
+++ b/webrtc/modules/video_processing/video_denoiser.h
@@ -0,0 +1,38 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef WEBRTC_MODULES_VIDEO_PROCESSING_VIDEO_DENOISER_H_
+#define WEBRTC_MODULES_VIDEO_PROCESSING_VIDEO_DENOISER_H_
+
+#include "webrtc/modules/video_processing/util/denoiser_filter.h"
+#include "webrtc/modules/video_processing/util/skin_detection.h"
+
+namespace webrtc {
+
+class VideoDenoiser {
+ public:
+ explicit VideoDenoiser(bool runtime_cpu_detection);
+ void DenoiseFrame(const VideoFrame& frame, VideoFrame* denoised_frame);
+
+ private:
+ void TrailingReduction(int mb_rows,
+ int mb_cols,
+ const uint8_t* y_src,
+ int stride_y,
+ uint8_t* y_dst);
+ int width_;
+ int height_;
+ rtc::scoped_ptr<DenoiseMetrics[]> metrics_;
+ rtc::scoped_ptr<DenoiserFilter> filter_;
+};
+
+} // namespace webrtc
+
+#endif // WEBRTC_MODULES_VIDEO_PROCESSING_VIDEO_DENOISER_H_
diff --git a/webrtc/modules/video_processing/video_processing.gypi b/webrtc/modules/video_processing/video_processing.gypi
index 5827a5b1a6..7418c455a2 100644
--- a/webrtc/modules/video_processing/video_processing.gypi
+++ b/webrtc/modules/video_processing/video_processing.gypi
@@ -18,29 +18,38 @@
'<(webrtc_root)/system_wrappers/system_wrappers.gyp:system_wrappers',
],
'sources': [
- 'main/interface/video_processing.h',
- 'main/interface/video_processing_defines.h',
- 'main/source/brighten.cc',
- 'main/source/brighten.h',
- 'main/source/brightness_detection.cc',
- 'main/source/brightness_detection.h',
- 'main/source/content_analysis.cc',
- 'main/source/content_analysis.h',
- 'main/source/deflickering.cc',
- 'main/source/deflickering.h',
- 'main/source/frame_preprocessor.cc',
- 'main/source/frame_preprocessor.h',
- 'main/source/spatial_resampler.cc',
- 'main/source/spatial_resampler.h',
- 'main/source/video_decimator.cc',
- 'main/source/video_decimator.h',
- 'main/source/video_processing_impl.cc',
- 'main/source/video_processing_impl.h',
+ 'include/video_processing.h',
+ 'include/video_processing_defines.h',
+ 'brightness_detection.cc',
+ 'brightness_detection.h',
+ 'content_analysis.cc',
+ 'content_analysis.h',
+ 'deflickering.cc',
+ 'deflickering.h',
+ 'frame_preprocessor.cc',
+ 'frame_preprocessor.h',
+ 'spatial_resampler.cc',
+ 'spatial_resampler.h',
+ 'video_decimator.cc',
+ 'video_decimator.h',
+ 'video_processing_impl.cc',
+ 'video_processing_impl.h',
+ 'video_denoiser.cc',
+ 'video_denoiser.h',
+ 'util/denoiser_filter.cc',
+ 'util/denoiser_filter.h',
+ 'util/denoiser_filter_c.cc',
+ 'util/denoiser_filter_c.h',
+ 'util/skin_detection.cc',
+ 'util/skin_detection.h',
],
'conditions': [
['target_arch=="ia32" or target_arch=="x64"', {
'dependencies': [ 'video_processing_sse2', ],
}],
+ ['target_arch=="arm" or target_arch == "arm64"', {
+ 'dependencies': [ 'video_processing_neon', ],
+ }],
],
},
],
@@ -51,7 +60,9 @@
'target_name': 'video_processing_sse2',
'type': 'static_library',
'sources': [
- 'main/source/content_analysis_sse2.cc',
+ 'content_analysis_sse2.cc',
+ 'util/denoiser_filter_sse2.cc',
+ 'util/denoiser_filter_sse2.h',
],
'conditions': [
['os_posix==1 and OS!="mac"', {
@@ -66,6 +77,19 @@
},
],
}],
+ ['target_arch=="arm" or target_arch == "arm64"', {
+ 'targets': [
+ {
+ 'target_name': 'video_processing_neon',
+ 'type': 'static_library',
+ 'includes': [ '../../build/arm_neon.gypi', ],
+ 'sources': [
+ 'util/denoiser_filter_neon.cc',
+ 'util/denoiser_filter_neon.h',
+ ],
+ },
+ ],
+ }],
],
}
diff --git a/webrtc/modules/video_processing/video_processing_impl.cc b/webrtc/modules/video_processing/video_processing_impl.cc
new file mode 100644
index 0000000000..f34886f10f
--- /dev/null
+++ b/webrtc/modules/video_processing/video_processing_impl.cc
@@ -0,0 +1,179 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "webrtc/modules/video_processing/video_processing_impl.h"
+
+#include <assert.h>
+
+#include "webrtc/base/checks.h"
+#include "webrtc/base/logging.h"
+#include "webrtc/system_wrappers/include/critical_section_wrapper.h"
+
+namespace webrtc {
+
+namespace {
+
+int GetSubSamplingFactor(int width, int height) {
+ if (width * height >= 640 * 480) {
+ return 3;
+ } else if (width * height >= 352 * 288) {
+ return 2;
+ } else if (width * height >= 176 * 144) {
+ return 1;
+ } else {
+ return 0;
+ }
+}
+} // namespace
+
+VideoProcessing* VideoProcessing::Create() {
+ return new VideoProcessingImpl();
+}
+
+VideoProcessingImpl::VideoProcessingImpl() {}
+VideoProcessingImpl::~VideoProcessingImpl() {}
+
+void VideoProcessing::GetFrameStats(const VideoFrame& frame,
+ FrameStats* stats) {
+ ClearFrameStats(stats); // The histogram needs to be zeroed out.
+ if (frame.IsZeroSize()) {
+ return;
+ }
+
+ int width = frame.width();
+ int height = frame.height();
+ stats->sub_sampling_factor = GetSubSamplingFactor(width, height);
+
+ const uint8_t* buffer = frame.buffer(kYPlane);
+ // Compute histogram and sum of frame
+ for (int i = 0; i < height; i += (1 << stats->sub_sampling_factor)) {
+ int k = i * width;
+ for (int j = 0; j < width; j += (1 << stats->sub_sampling_factor)) {
+ stats->hist[buffer[k + j]]++;
+ stats->sum += buffer[k + j];
+ }
+ }
+
+ stats->num_pixels = (width * height) / ((1 << stats->sub_sampling_factor) *
+ (1 << stats->sub_sampling_factor));
+ assert(stats->num_pixels > 0);
+
+ // Compute mean value of frame
+ stats->mean = stats->sum / stats->num_pixels;
+}
+
+bool VideoProcessing::ValidFrameStats(const FrameStats& stats) {
+ if (stats.num_pixels == 0) {
+ LOG(LS_WARNING) << "Invalid frame stats.";
+ return false;
+ }
+ return true;
+}
+
+void VideoProcessing::ClearFrameStats(FrameStats* stats) {
+ stats->mean = 0;
+ stats->sum = 0;
+ stats->num_pixels = 0;
+ stats->sub_sampling_factor = 0;
+ memset(stats->hist, 0, sizeof(stats->hist));
+}
+
+void VideoProcessing::Brighten(int delta, VideoFrame* frame) {
+ RTC_DCHECK(!frame->IsZeroSize());
+ RTC_DCHECK(frame->width() > 0);
+ RTC_DCHECK(frame->height() > 0);
+
+ int num_pixels = frame->width() * frame->height();
+
+ int look_up[256];
+ for (int i = 0; i < 256; i++) {
+ int val = i + delta;
+ look_up[i] = ((((val < 0) ? 0 : val) > 255) ? 255 : val);
+ }
+
+ uint8_t* temp_ptr = frame->buffer(kYPlane);
+ for (int i = 0; i < num_pixels; i++) {
+ *temp_ptr = static_cast<uint8_t>(look_up[*temp_ptr]);
+ temp_ptr++;
+ }
+}
+
+int32_t VideoProcessingImpl::Deflickering(VideoFrame* frame,
+ FrameStats* stats) {
+ rtc::CritScope mutex(&mutex_);
+ return deflickering_.ProcessFrame(frame, stats);
+}
+
+int32_t VideoProcessingImpl::BrightnessDetection(const VideoFrame& frame,
+ const FrameStats& stats) {
+ rtc::CritScope mutex(&mutex_);
+ return brightness_detection_.ProcessFrame(frame, stats);
+}
+
+void VideoProcessingImpl::EnableTemporalDecimation(bool enable) {
+ rtc::CritScope mutex(&mutex_);
+ frame_pre_processor_.EnableTemporalDecimation(enable);
+}
+
+void VideoProcessingImpl::SetInputFrameResampleMode(
+ VideoFrameResampling resampling_mode) {
+ rtc::CritScope cs(&mutex_);
+ frame_pre_processor_.SetInputFrameResampleMode(resampling_mode);
+}
+
+int32_t VideoProcessingImpl::SetTargetResolution(uint32_t width,
+ uint32_t height,
+ uint32_t frame_rate) {
+ rtc::CritScope cs(&mutex_);
+ return frame_pre_processor_.SetTargetResolution(width, height, frame_rate);
+}
+
+void VideoProcessingImpl::SetTargetFramerate(int frame_rate) {
+ rtc::CritScope cs(&mutex_);
+ frame_pre_processor_.SetTargetFramerate(frame_rate);
+}
+
+uint32_t VideoProcessingImpl::GetDecimatedFrameRate() {
+ rtc::CritScope cs(&mutex_);
+ return frame_pre_processor_.GetDecimatedFrameRate();
+}
+
+uint32_t VideoProcessingImpl::GetDecimatedWidth() const {
+ rtc::CritScope cs(&mutex_);
+ return frame_pre_processor_.GetDecimatedWidth();
+}
+
+uint32_t VideoProcessingImpl::GetDecimatedHeight() const {
+ rtc::CritScope cs(&mutex_);
+ return frame_pre_processor_.GetDecimatedHeight();
+}
+
+void VideoProcessingImpl::EnableDenosing(bool enable) {
+ rtc::CritScope cs(&mutex_);
+ frame_pre_processor_.EnableDenosing(enable);
+}
+
+const VideoFrame* VideoProcessingImpl::PreprocessFrame(
+ const VideoFrame& frame) {
+ rtc::CritScope mutex(&mutex_);
+ return frame_pre_processor_.PreprocessFrame(frame);
+}
+
+VideoContentMetrics* VideoProcessingImpl::GetContentMetrics() const {
+ rtc::CritScope mutex(&mutex_);
+ return frame_pre_processor_.GetContentMetrics();
+}
+
+void VideoProcessingImpl::EnableContentAnalysis(bool enable) {
+ rtc::CritScope mutex(&mutex_);
+ frame_pre_processor_.EnableContentAnalysis(enable);
+}
+
+} // namespace webrtc
diff --git a/webrtc/modules/video_processing/video_processing_impl.h b/webrtc/modules/video_processing/video_processing_impl.h
new file mode 100644
index 0000000000..edbaba12fa
--- /dev/null
+++ b/webrtc/modules/video_processing/video_processing_impl.h
@@ -0,0 +1,55 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef WEBRTC_MODULES_VIDEO_PROCESSING_VIDEO_PROCESSING_IMPL_H_
+#define WEBRTC_MODULES_VIDEO_PROCESSING_VIDEO_PROCESSING_IMPL_H_
+
+#include "webrtc/base/criticalsection.h"
+#include "webrtc/modules/video_processing/include/video_processing.h"
+#include "webrtc/modules/video_processing/brightness_detection.h"
+#include "webrtc/modules/video_processing/deflickering.h"
+#include "webrtc/modules/video_processing/frame_preprocessor.h"
+
+namespace webrtc {
+class CriticalSectionWrapper;
+
+class VideoProcessingImpl : public VideoProcessing {
+ public:
+ VideoProcessingImpl();
+ ~VideoProcessingImpl() override;
+
+ // Implements VideoProcessing.
+ int32_t Deflickering(VideoFrame* frame, FrameStats* stats) override;
+ int32_t BrightnessDetection(const VideoFrame& frame,
+ const FrameStats& stats) override;
+ void EnableTemporalDecimation(bool enable) override;
+ void SetInputFrameResampleMode(VideoFrameResampling resampling_mode) override;
+ void EnableContentAnalysis(bool enable) override;
+ int32_t SetTargetResolution(uint32_t width,
+ uint32_t height,
+ uint32_t frame_rate) override;
+ void SetTargetFramerate(int frame_rate) override;
+ uint32_t GetDecimatedFrameRate() override;
+ uint32_t GetDecimatedWidth() const override;
+ uint32_t GetDecimatedHeight() const override;
+ void EnableDenosing(bool enable) override;
+ const VideoFrame* PreprocessFrame(const VideoFrame& frame) override;
+ VideoContentMetrics* GetContentMetrics() const override;
+
+ private:
+ mutable rtc::CriticalSection mutex_;
+ VPMDeflickering deflickering_ GUARDED_BY(mutex_);
+ VPMBrightnessDetection brightness_detection_;
+ VPMFramePreprocessor frame_pre_processor_;
+};
+
+} // namespace webrtc
+
+#endif // WEBRTC_MODULES_VIDEO_PROCESSING_VIDEO_PROCESSING_IMPL_H_
diff --git a/webrtc/modules/video_render/BUILD.gn b/webrtc/modules/video_render/BUILD.gn
index 80f23870aa..0771bd7080 100644
--- a/webrtc/modules/video_render/BUILD.gn
+++ b/webrtc/modules/video_render/BUILD.gn
@@ -13,8 +13,8 @@ source_set("video_render_module") {
"external/video_render_external_impl.cc",
"external/video_render_external_impl.h",
"i_video_render.h",
- "include/video_render.h",
- "include/video_render_defines.h",
+ "video_render.h",
+ "video_render_defines.h",
"video_render_impl.h",
]
diff --git a/webrtc/modules/video_render/android/video_render_android_impl.cc b/webrtc/modules/video_render/android/video_render_android_impl.cc
index c647501963..9affb23d99 100644
--- a/webrtc/modules/video_render/android/video_render_android_impl.cc
+++ b/webrtc/modules/video_render/android/video_render_android_impl.cc
@@ -141,18 +141,13 @@ int32_t VideoRenderAndroid::StartRender() {
return 0;
}
- _javaRenderThread = ThreadWrapper::CreateThread(JavaRenderThreadFun, this,
- "AndroidRenderThread");
+ _javaRenderThread.reset(new rtc::PlatformThread(JavaRenderThreadFun, this,
+ "AndroidRenderThread"));
- if (_javaRenderThread->Start())
- WEBRTC_TRACE(kTraceInfo, kTraceVideoRenderer, _id,
- "%s: thread started", __FUNCTION__);
- else {
- WEBRTC_TRACE(kTraceError, kTraceVideoRenderer, _id,
- "%s: Could not start send thread", __FUNCTION__);
- return -1;
- }
- _javaRenderThread->SetPriority(kRealtimePriority);
+ _javaRenderThread->Start();
+ WEBRTC_TRACE(kTraceInfo, kTraceVideoRenderer, _id, "%s: thread started",
+ __FUNCTION__);
+ _javaRenderThread->SetPriority(rtc::kRealtimePriority);
return 0;
}
diff --git a/webrtc/modules/video_render/android/video_render_android_impl.h b/webrtc/modules/video_render/android/video_render_android_impl.h
index 34950db7d1..e5b7de4643 100644
--- a/webrtc/modules/video_render/android/video_render_android_impl.h
+++ b/webrtc/modules/video_render/android/video_render_android_impl.h
@@ -15,8 +15,8 @@
#include <map>
+#include "webrtc/base/platform_thread.h"
#include "webrtc/modules/video_render/i_video_render.h"
-#include "webrtc/system_wrappers/include/thread_wrapper.h"
namespace webrtc {
@@ -144,7 +144,8 @@ class VideoRenderAndroid: IVideoRender {
EventWrapper& _javaRenderEvent;
int64_t _lastJavaRenderEvent;
JNIEnv* _javaRenderJniEnv; // JNIEnv for the java render thread.
- rtc::scoped_ptr<ThreadWrapper> _javaRenderThread;
+ // TODO(pbos): Remove scoped_ptr and use the member directly.
+ rtc::scoped_ptr<rtc::PlatformThread> _javaRenderThread;
};
} // namespace webrtc
diff --git a/webrtc/modules/video_render/android/video_render_android_native_opengl2.h b/webrtc/modules/video_render/android/video_render_android_native_opengl2.h
index b748b2dd47..8be247b834 100644
--- a/webrtc/modules/video_render/android/video_render_android_native_opengl2.h
+++ b/webrtc/modules/video_render/android/video_render_android_native_opengl2.h
@@ -15,7 +15,7 @@
#include "webrtc/modules/video_render/android/video_render_android_impl.h"
#include "webrtc/modules/video_render/android/video_render_opengles20.h"
-#include "webrtc/modules/video_render/include/video_render_defines.h"
+#include "webrtc/modules/video_render/video_render_defines.h"
namespace webrtc {
diff --git a/webrtc/modules/video_render/android/video_render_android_surface_view.h b/webrtc/modules/video_render/android/video_render_android_surface_view.h
index 480e8b5106..0f029b54f3 100644
--- a/webrtc/modules/video_render/android/video_render_android_surface_view.h
+++ b/webrtc/modules/video_render/android/video_render_android_surface_view.h
@@ -14,7 +14,7 @@
#include <jni.h>
#include "webrtc/modules/video_render/android/video_render_android_impl.h"
-#include "webrtc/modules/video_render/include/video_render_defines.h"
+#include "webrtc/modules/video_render/video_render_defines.h"
namespace webrtc {
diff --git a/webrtc/modules/video_render/android/video_render_opengles20.h b/webrtc/modules/video_render/android/video_render_opengles20.h
index 44ab4c04bc..57e2a10d42 100644
--- a/webrtc/modules/video_render/android/video_render_opengles20.h
+++ b/webrtc/modules/video_render/android/video_render_opengles20.h
@@ -11,7 +11,7 @@
#ifndef WEBRTC_MODULES_VIDEO_RENDER_MAIN_SOURCE_ANDROID_VIDEO_RENDER_OPENGLES20_H_
#define WEBRTC_MODULES_VIDEO_RENDER_MAIN_SOURCE_ANDROID_VIDEO_RENDER_OPENGLES20_H_
-#include "webrtc/modules/video_render/include/video_render_defines.h"
+#include "webrtc/modules/video_render/video_render_defines.h"
#include <GLES2/gl2.h>
#include <GLES2/gl2ext.h>
diff --git a/webrtc/modules/video_render/external/video_render_external_impl.h b/webrtc/modules/video_render/external/video_render_external_impl.h
index 9230e60acc..a8b663fff7 100644
--- a/webrtc/modules/video_render/external/video_render_external_impl.h
+++ b/webrtc/modules/video_render/external/video_render_external_impl.h
@@ -11,7 +11,7 @@
#ifndef WEBRTC_MODULES_VIDEO_RENDER_MAIN_SOURCE_EXTERNAL_VIDEO_RENDER_EXTERNAL_IMPL_H_
#define WEBRTC_MODULES_VIDEO_RENDER_MAIN_SOURCE_EXTERNAL_VIDEO_RENDER_EXTERNAL_IMPL_H_
-#include "webrtc/modules/interface/module_common_types.h"
+#include "webrtc/modules/include/module_common_types.h"
#include "webrtc/modules/video_render/i_video_render.h"
#include "webrtc/system_wrappers/include/critical_section_wrapper.h"
diff --git a/webrtc/modules/video_render/i_video_render.h b/webrtc/modules/video_render/i_video_render.h
index ff1cce782e..e6ec7a4680 100644
--- a/webrtc/modules/video_render/i_video_render.h
+++ b/webrtc/modules/video_render/i_video_render.h
@@ -11,7 +11,7 @@
#ifndef WEBRTC_MODULES_VIDEO_RENDER_MAIN_SOURCE_I_VIDEO_RENDER_H_
#define WEBRTC_MODULES_VIDEO_RENDER_MAIN_SOURCE_I_VIDEO_RENDER_H_
-#include "webrtc/modules/video_render/include/video_render.h"
+#include "webrtc/modules/video_render/video_render.h"
namespace webrtc {
diff --git a/webrtc/modules/video_render/ios/open_gles20.h b/webrtc/modules/video_render/ios/open_gles20.h
index f74955fee4..880ddb5231 100644
--- a/webrtc/modules/video_render/ios/open_gles20.h
+++ b/webrtc/modules/video_render/ios/open_gles20.h
@@ -13,7 +13,7 @@
#include <OpenGLES/ES2/glext.h>
-#include "webrtc/modules/video_render/include/video_render_defines.h"
+#include "webrtc/modules/video_render/video_render_defines.h"
/*
* This OpenGles20 is the class of renderer for VideoFrame into a GLES 2.0
diff --git a/webrtc/modules/video_render/ios/video_render_ios_channel.h b/webrtc/modules/video_render/ios/video_render_ios_channel.h
index 375a5ee719..a15ba393dc 100644
--- a/webrtc/modules/video_render/ios/video_render_ios_channel.h
+++ b/webrtc/modules/video_render/ios/video_render_ios_channel.h
@@ -11,7 +11,7 @@
#ifndef WEBRTC_MODULES_VIDEO_RENDER_IOS_VIDEO_RENDER_IOS_CHANNEL_H_
#define WEBRTC_MODULES_VIDEO_RENDER_IOS_VIDEO_RENDER_IOS_CHANNEL_H_
-#include "webrtc/modules/video_render/include/video_render_defines.h"
+#include "webrtc/modules/video_render/video_render_defines.h"
#include "webrtc/modules/video_render/ios/video_render_ios_view.h"
namespace webrtc {
diff --git a/webrtc/modules/video_render/ios/video_render_ios_gles20.h b/webrtc/modules/video_render/ios/video_render_ios_gles20.h
index b6da12aab8..d703630d92 100644
--- a/webrtc/modules/video_render/ios/video_render_ios_gles20.h
+++ b/webrtc/modules/video_render/ios/video_render_ios_gles20.h
@@ -14,10 +14,10 @@
#include <list>
#include <map>
+#include "webrtc/base/platform_thread.h"
#include "webrtc/base/scoped_ptr.h"
#include "webrtc/modules/video_render/ios/video_render_ios_channel.h"
#include "webrtc/modules/video_render/ios/video_render_ios_view.h"
-#include "webrtc/system_wrappers/include/thread_wrapper.h"
namespace webrtc {
@@ -64,7 +64,8 @@ class VideoRenderIosGles20 {
private:
rtc::scoped_ptr<CriticalSectionWrapper> gles_crit_sec_;
EventTimerWrapper* screen_update_event_;
- rtc::scoped_ptr<ThreadWrapper> screen_update_thread_;
+ // TODO(pbos): Remove scoped_ptr and use member directly.
+ rtc::scoped_ptr<rtc::PlatformThread> screen_update_thread_;
VideoRenderIosView* view_;
Rect window_rect_;
diff --git a/webrtc/modules/video_render/ios/video_render_ios_gles20.mm b/webrtc/modules/video_render/ios/video_render_ios_gles20.mm
index 3a276d6030..6ad5db8b8c 100644
--- a/webrtc/modules/video_render/ios/video_render_ios_gles20.mm
+++ b/webrtc/modules/video_render/ios/video_render_ios_gles20.mm
@@ -32,15 +32,15 @@ VideoRenderIosGles20::VideoRenderIosGles20(VideoRenderIosView* view,
z_order_to_channel_(),
gles_context_([view context]),
is_rendering_(true) {
- screen_update_thread_ = ThreadWrapper::CreateThread(
- ScreenUpdateThreadProc, this, "ScreenUpdateGles20");
+ screen_update_thread_.reset(new rtc::PlatformThread(
+ ScreenUpdateThreadProc, this, "ScreenUpdateGles20"));
screen_update_event_ = EventTimerWrapper::Create();
GetWindowRect(window_rect_);
}
VideoRenderIosGles20::~VideoRenderIosGles20() {
// Signal event to exit thread, then delete it
- ThreadWrapper* thread_wrapper = screen_update_thread_.release();
+ rtc::PlatformThread* thread_wrapper = screen_update_thread_.release();
if (thread_wrapper) {
screen_update_event_->Set();
@@ -83,7 +83,7 @@ int VideoRenderIosGles20::Init() {
}
screen_update_thread_->Start();
- screen_update_thread_->SetPriority(kRealtimePriority);
+ screen_update_thread_->SetPriority(rtc::kRealtimePriority);
// Start the event triggering the render process
unsigned int monitor_freq = 60;
diff --git a/webrtc/modules/video_render/linux/video_x11_channel.h b/webrtc/modules/video_render/linux/video_x11_channel.h
index 8cbd2a27d1..6eb402e12e 100644
--- a/webrtc/modules/video_render/linux/video_x11_channel.h
+++ b/webrtc/modules/video_render/linux/video_x11_channel.h
@@ -13,7 +13,7 @@
#include <sys/shm.h>
#include "webrtc/common_video/libyuv/include/webrtc_libyuv.h"
-#include "webrtc/modules/video_render/include/video_render_defines.h"
+#include "webrtc/modules/video_render/video_render_defines.h"
#include <X11/Xlib.h>
#include <X11/Xutil.h>
diff --git a/webrtc/modules/video_render/linux/video_x11_render.h b/webrtc/modules/video_render/linux/video_x11_render.h
index 265ef7cfab..23b83bd67b 100644
--- a/webrtc/modules/video_render/linux/video_x11_render.h
+++ b/webrtc/modules/video_render/linux/video_x11_render.h
@@ -11,7 +11,7 @@
#ifndef WEBRTC_MODULES_VIDEO_RENDER_MAIN_SOURCE_LINUX_VIDEO_X11_RENDER_H_
#define WEBRTC_MODULES_VIDEO_RENDER_MAIN_SOURCE_LINUX_VIDEO_X11_RENDER_H_
-#include "webrtc/modules/video_render/include/video_render_defines.h"
+#include "webrtc/modules/video_render/video_render_defines.h"
#include <X11/Xlib.h>
#include <map>
diff --git a/webrtc/modules/video_render/mac/video_render_agl.cc b/webrtc/modules/video_render/mac/video_render_agl.cc
index dc157d597b..3243563b2b 100644
--- a/webrtc/modules/video_render/mac/video_render_agl.cc
+++ b/webrtc/modules/video_render/mac/video_render_agl.cc
@@ -395,8 +395,8 @@ _renderingIsPaused( false),
{
//WEBRTC_TRACE(kTraceInfo, kTraceVideoRenderer, _id, "%s");
- _screenUpdateThread = ThreadWrapper::CreateThread(
- ScreenUpdateThreadProc, this, "ScreenUpdate");
+ _screenUpdateThread.reset(
+ new rtc::PlatformThread(ScreenUpdateThreadProc, this, "ScreenUpdate"));
_screenUpdateEvent = EventWrapper::Create();
if(!IsValidWindowPtr(_windowRef))
@@ -512,8 +512,8 @@ _renderingIsPaused( false),
//WEBRTC_TRACE(kTraceDebug, "%s:%d Constructor", __FUNCTION__, __LINE__);
// _renderCritSec = CriticalSectionWrapper::CreateCriticalSection();
- _screenUpdateThread = ThreadWrapper::CreateThread(
- ScreenUpdateThreadProc, this, "ScreenUpdateThread");
+ _screenUpdateThread.reset(new rtc::PlatformThread(
+ ScreenUpdateThreadProc, this, "ScreenUpdateThread"));
_screenUpdateEvent = EventWrapper::Create();
GetWindowRect(_windowRect);
@@ -677,7 +677,7 @@ VideoRenderAGL::~VideoRenderAGL()
#endif
// Signal event to exit thread, then delete it
- ThreadWrapper* tmpPtr = _screenUpdateThread.release();
+ rtc::PlatformThread* tmpPtr = _screenUpdateThread.release();
if (tmpPtr)
{
@@ -739,7 +739,7 @@ int VideoRenderAGL::Init()
return -1;
}
_screenUpdateThread->Start();
- _screenUpdateThread->SetPriority(kRealtimePriority);
+ _screenUpdateThread->SetPriority(rtc::kRealtimePriority);
// Start the event triggering the render process
unsigned int monitorFreq = 60;
@@ -856,7 +856,7 @@ int VideoRenderAGL::DeleteAGLChannel(int channel)
int VideoRenderAGL::StopThread()
{
CriticalSectionScoped cs(&_renderCritSec);
- ThreadWrapper* tmpPtr = _screenUpdateThread.release();
+ rtc::PlatformThread* tmpPtr = _screenUpdateThread.release();
if (tmpPtr)
{
@@ -1880,7 +1880,7 @@ int32_t VideoRenderAGL::StartRender()
UnlockAGLCntx();
return -1;
}
- _screenUpdateThread->SetPriority(kRealtimePriority);
+ _screenUpdateThread->SetPriority(rtc::kRealtimePriority);
if(FALSE == _screenUpdateEvent->StartTimer(true, 1000/MONITOR_FREQ))
{
//WEBRTC_TRACE(kTraceError, kTraceVideoRenderer, _id, "%s:%d Failed to start screenUpdateEvent", __FUNCTION__, __LINE__);
@@ -1891,8 +1891,8 @@ int32_t VideoRenderAGL::StartRender()
return 0;
}
- _screenUpdateThread = ThreadWrapper::CreateThread(ScreenUpdateThreadProc,
- this, "ScreenUpdate");
+ _screenUpdateThread.reset(
+ new rtc::PlatformThread(ScreenUpdateThreadProc, this, "ScreenUpdate"));
_screenUpdateEvent = EventWrapper::Create();
if (!_screenUpdateThread)
@@ -1903,14 +1903,13 @@ int32_t VideoRenderAGL::StartRender()
}
_screenUpdateThread->Start();
- _screenUpdateThread->SetPriority(kRealtimePriority);
+ _screenUpdateThread->SetPriority(rtc::kRealtimePriority);
_screenUpdateEvent->StartTimer(true, 1000/MONITOR_FREQ);
//WEBRTC_TRACE(kTraceInfo, kTraceVideoRenderer, _id, "%s:%d Started screenUpdateThread", __FUNCTION__, __LINE__);
UnlockAGLCntx();
return 0;
-
}
int32_t VideoRenderAGL::StopRender()
diff --git a/webrtc/modules/video_render/mac/video_render_agl.h b/webrtc/modules/video_render/mac/video_render_agl.h
index 8710228754..e1da8faf83 100644
--- a/webrtc/modules/video_render/mac/video_render_agl.h
+++ b/webrtc/modules/video_render/mac/video_render_agl.h
@@ -15,8 +15,8 @@
#ifndef WEBRTC_MODULES_VIDEO_RENDER_MAIN_SOURCE_MAC_VIDEO_RENDER_AGL_H_
#define WEBRTC_MODULES_VIDEO_RENDER_MAIN_SOURCE_MAC_VIDEO_RENDER_AGL_H_
-#include "webrtc/modules/video_render/include/video_render_defines.h"
-#include "webrtc/system_wrappers/include/thread_wrapper.h"
+#include "webrtc/base/platform_thread.h"
+#include "webrtc/modules/video_render/video_render_defines.h"
#define NEW_HIVIEW_PARENT_EVENT_HANDLER 1
#define NEW_HIVIEW_EVENT_HANDLER 1
@@ -142,7 +142,8 @@ class VideoRenderAGL {
bool _fullScreen;
int _id;
webrtc::CriticalSectionWrapper& _renderCritSec;
- rtc::scoped_ptr<webrtc::ThreadWrapper> _screenUpdateThread;
+ // TODO(pbos): Remove scoped_ptr and use PlatformThread directly.
+ rtc::scoped_ptr<rtc::PlatformThread> _screenUpdateThread;
webrtc::EventWrapper* _screenUpdateEvent;
bool _isHIViewRef;
AGLContext _aglContext;
diff --git a/webrtc/modules/video_render/mac/video_render_nsopengl.h b/webrtc/modules/video_render/mac/video_render_nsopengl.h
index 5dab4d266f..a888b68a97 100644
--- a/webrtc/modules/video_render/mac/video_render_nsopengl.h
+++ b/webrtc/modules/video_render/mac/video_render_nsopengl.h
@@ -23,16 +23,19 @@
#include <map>
#include "webrtc/base/thread_annotations.h"
-#include "webrtc/modules/video_render/include/video_render_defines.h"
+#include "webrtc/modules/video_render/video_render_defines.h"
#import "webrtc/modules/video_render/mac/cocoa_full_screen_window.h"
#import "webrtc/modules/video_render/mac/cocoa_render_view.h"
class Trace;
+namespace rtc {
+class PlatformThread;
+} // namespace rtc
+
namespace webrtc {
class EventTimerWrapper;
-class ThreadWrapper;
class VideoRenderNSOpenGL;
class CriticalSectionWrapper;
@@ -166,7 +169,8 @@ private: // variables
bool _fullScreen;
int _id;
CriticalSectionWrapper& _nsglContextCritSec;
- rtc::scoped_ptr<ThreadWrapper> _screenUpdateThread;
+ // TODO(pbos): Remove scoped_ptr and use PlatformThread directly.
+ rtc::scoped_ptr<rtc::PlatformThread> _screenUpdateThread;
EventTimerWrapper* _screenUpdateEvent;
NSOpenGLContext* _nsglContext;
NSOpenGLContext* _nsglFullScreenContext;
diff --git a/webrtc/modules/video_render/mac/video_render_nsopengl.mm b/webrtc/modules/video_render/mac/video_render_nsopengl.mm
index b5150eb668..b7683a96af 100644
--- a/webrtc/modules/video_render/mac/video_render_nsopengl.mm
+++ b/webrtc/modules/video_render/mac/video_render_nsopengl.mm
@@ -11,11 +11,11 @@
#include "webrtc/engine_configurations.h"
#if defined(COCOA_RENDERING)
+#include "webrtc/base/platform_thread.h"
#include "webrtc/common_video/libyuv/include/webrtc_libyuv.h"
#include "webrtc/modules/video_render/mac/video_render_nsopengl.h"
#include "webrtc/system_wrappers/include/critical_section_wrapper.h"
#include "webrtc/system_wrappers/include/event_wrapper.h"
-#include "webrtc/system_wrappers/include/thread_wrapper.h"
#include "webrtc/system_wrappers/include/trace.h"
namespace webrtc {
@@ -378,8 +378,8 @@ _renderingIsPaused (FALSE),
_windowRefSuperView(NULL),
_windowRefSuperViewFrame(NSMakeRect(0,0,0,0))
{
- _screenUpdateThread = ThreadWrapper::CreateThread(ScreenUpdateThreadProc,
- this, "ScreenUpdateNSOpenGL");
+ _screenUpdateThread.reset(new rtc::PlatformThread(
+ ScreenUpdateThreadProc, this, "ScreenUpdateNSOpenGL"));
}
int VideoRenderNSOpenGL::ChangeWindow(CocoaRenderView* newWindowRef)
@@ -427,15 +427,15 @@ int32_t VideoRenderNSOpenGL::StartRender()
WEBRTC_TRACE(kTraceDebug, kTraceVideoRenderer, _id, "Restarting screenUpdateThread");
// we already have the thread. Most likely StopRender() was called and they were paused
- if(FALSE == _screenUpdateThread->Start() ||
- FALSE == _screenUpdateEvent->StartTimer(true, 1000/MONITOR_FREQ))
- {
+ _screenUpdateThread->Start();
+ if (FALSE ==
+ _screenUpdateEvent->StartTimer(true, 1000 / MONITOR_FREQ)) {
WEBRTC_TRACE(kTraceError, kTraceVideoRenderer, _id, "Failed to restart screenUpdateThread or screenUpdateEvent");
UnlockAGLCntx();
return -1;
}
- _screenUpdateThread->SetPriority(kRealtimePriority);
+ _screenUpdateThread->SetPriority(rtc::kRealtimePriority);
UnlockAGLCntx();
return 0;
@@ -471,8 +471,8 @@ int32_t VideoRenderNSOpenGL::StopRender()
return 0;
}
- if(FALSE == _screenUpdateThread->Stop() || FALSE == _screenUpdateEvent->StopTimer())
- {
+ _screenUpdateThread->Stop();
+ if (FALSE == _screenUpdateEvent->StopTimer()) {
_renderingIsPaused = FALSE;
UnlockAGLCntx();
@@ -657,17 +657,15 @@ VideoRenderNSOpenGL::~VideoRenderNSOpenGL()
}
// Signal event to exit thread, then delete it
- ThreadWrapper* tmpPtr = _screenUpdateThread.release();
+ rtc::PlatformThread* tmpPtr = _screenUpdateThread.release();
if (tmpPtr)
{
_screenUpdateEvent->Set();
_screenUpdateEvent->StopTimer();
- if (tmpPtr->Stop())
- {
- delete tmpPtr;
- }
+ tmpPtr->Stop();
+ delete tmpPtr;
delete _screenUpdateEvent;
_screenUpdateEvent = NULL;
}
@@ -716,7 +714,7 @@ int VideoRenderNSOpenGL::Init()
}
_screenUpdateThread->Start();
- _screenUpdateThread->SetPriority(kRealtimePriority);
+ _screenUpdateThread->SetPriority(rtc::kRealtimePriority);
// Start the event triggering the render process
unsigned int monitorFreq = 60;
@@ -864,17 +862,15 @@ int32_t VideoRenderNSOpenGL::GetChannelProperties(const uint16_t streamId,
int VideoRenderNSOpenGL::StopThread()
{
- ThreadWrapper* tmpPtr = _screenUpdateThread.release();
+ rtc::PlatformThread* tmpPtr = _screenUpdateThread.release();
WEBRTC_TRACE(kTraceInfo, kTraceVideoRenderer, _id,
"%s Stopping thread ", __FUNCTION__, tmpPtr);
if (tmpPtr)
{
_screenUpdateEvent->Set();
- if (tmpPtr->Stop())
- {
- delete tmpPtr;
- }
+ tmpPtr->Stop();
+ delete tmpPtr;
}
delete _screenUpdateEvent;
diff --git a/webrtc/modules/video_render/test/testAPI/testAPI.cc b/webrtc/modules/video_render/test/testAPI/testAPI.cc
index 256d031c0c..cea2f6b56f 100644
--- a/webrtc/modules/video_render/test/testAPI/testAPI.cc
+++ b/webrtc/modules/video_render/test/testAPI/testAPI.cc
@@ -32,10 +32,10 @@
#endif
#include "webrtc/common_types.h"
-#include "webrtc/modules/interface/module_common_types.h"
-#include "webrtc/modules/utility/interface/process_thread.h"
-#include "webrtc/modules/video_render/include/video_render.h"
-#include "webrtc/modules/video_render/include/video_render_defines.h"
+#include "webrtc/modules/include/module_common_types.h"
+#include "webrtc/modules/utility/include/process_thread.h"
+#include "webrtc/modules/video_render/video_render.h"
+#include "webrtc/modules/video_render/video_render_defines.h"
#include "webrtc/system_wrappers/include/sleep.h"
#include "webrtc/system_wrappers/include/tick_util.h"
#include "webrtc/system_wrappers/include/trace.h"
@@ -244,7 +244,7 @@ int WebRtcCreateWindow(Window *outWindow, Display **outDisplay, int winNum, int
return 0;
}
-#endif // LINUX
+#endif // WEBRTC_LINUX
// Note: Mac code is in testApi_mac.mm.
diff --git a/webrtc/modules/video_render/test/testAPI/testAPI.h b/webrtc/modules/video_render/test/testAPI/testAPI.h
index 8b14e84931..0655a5b434 100644
--- a/webrtc/modules/video_render/test/testAPI/testAPI.h
+++ b/webrtc/modules/video_render/test/testAPI/testAPI.h
@@ -11,7 +11,7 @@
#ifndef WEBRTC_MODULES_VIDEO_RENDER_MAIN_TEST_TESTAPI_TESTAPI_H
#define WEBRTC_MODULES_VIDEO_RENDER_MAIN_TEST_TESTAPI_TESTAPI_H
-#include "webrtc/modules/video_render/include/video_render_defines.h"
+#include "webrtc/modules/video_render/video_render_defines.h"
void RunVideoRenderTests(void* window, webrtc::VideoRenderType windowType);
diff --git a/webrtc/modules/video_render/test/testAPI/testAPI_mac.mm b/webrtc/modules/video_render/test/testAPI/testAPI_mac.mm
index dd57397c73..dfee4c7298 100644
--- a/webrtc/modules/video_render/test/testAPI/testAPI_mac.mm
+++ b/webrtc/modules/video_render/test/testAPI/testAPI_mac.mm
@@ -20,10 +20,10 @@
#import "webrtc/modules/video_render/mac/cocoa_render_view.h"
#include "webrtc/common_types.h"
-#include "webrtc/modules/interface/module_common_types.h"
-#include "webrtc/modules/utility/interface/process_thread.h"
-#include "webrtc/modules/video_render/include/video_render.h"
-#include "webrtc/modules/video_render/include/video_render_defines.h"
+#include "webrtc/modules/include/module_common_types.h"
+#include "webrtc/modules/utility/include/process_thread.h"
+#include "webrtc/modules/video_render/video_render.h"
+#include "webrtc/modules/video_render/video_render_defines.h"
#include "webrtc/system_wrappers/include/tick_util.h"
#include "webrtc/system_wrappers/include/trace.h"
diff --git a/webrtc/modules/video_render/video_render.gypi b/webrtc/modules/video_render/video_render.gypi
index 63f69b0a63..e8cc03a4b0 100644
--- a/webrtc/modules/video_render/video_render.gypi
+++ b/webrtc/modules/video_render/video_render.gypi
@@ -25,8 +25,8 @@
'external/video_render_external_impl.cc',
'external/video_render_external_impl.h',
'i_video_render.h',
- 'include/video_render.h',
- 'include/video_render_defines.h',
+ 'video_render.h',
+ 'video_render_defines.h',
'video_render_impl.h',
],
},
@@ -149,13 +149,28 @@
'<(directx_sdk_path)/Include',
],
}],
+ ['OS=="win" and clang==1', {
+ 'msvs_settings': {
+ 'VCCLCompilerTool': {
+ 'AdditionalOptions': [
+ # Disable warnings failing when compiling with Clang on Windows.
+ # https://bugs.chromium.org/p/webrtc/issues/detail?id=5366
+ '-Wno-comment',
+ '-Wno-reorder',
+ '-Wno-unused-value',
+ '-Wno-unused-private-field',
+ ],
+ },
+ },
+ }],
] # conditions
},
],
}], # build_with_chromium==0
- ['include_tests==1', {
+ ['include_tests==1 and OS!="ios"', {
'targets': [
{
+ # Does not compile on iOS: webrtc:4755.
'target_name': 'video_render_tests',
'type': 'executable',
'dependencies': [
@@ -197,7 +212,7 @@
] # conditions
}, # video_render_module_test
], # targets
- }], # include_tests==1
+ }], # include_tests==1 and OS!=ios
], # conditions
}
diff --git a/webrtc/modules/video_render/include/video_render.h b/webrtc/modules/video_render/video_render.h
index 51fcce10c3..a193a187e7 100644
--- a/webrtc/modules/video_render/include/video_render.h
+++ b/webrtc/modules/video_render/video_render.h
@@ -8,8 +8,8 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#ifndef WEBRTC_MODULES_VIDEO_RENDER_MAIN_INTERFACE_VIDEO_RENDER_H_
-#define WEBRTC_MODULES_VIDEO_RENDER_MAIN_INTERFACE_VIDEO_RENDER_H_
+#ifndef WEBRTC_MODULES_VIDEO_RENDER_VIDEO_RENDER_H_
+#define WEBRTC_MODULES_VIDEO_RENDER_VIDEO_RENDER_H_
/*
* video_render.h
@@ -20,8 +20,8 @@
*
*/
-#include "webrtc/modules/interface/module.h"
-#include "webrtc/modules/video_render/include/video_render_defines.h"
+#include "webrtc/modules/include/module.h"
+#include "webrtc/modules/video_render/video_render_defines.h"
namespace webrtc {
@@ -265,4 +265,4 @@ public:
const uint32_t timeout) = 0;
};
} // namespace webrtc
-#endif // WEBRTC_MODULES_VIDEO_RENDER_MAIN_INTERFACE_VIDEO_RENDER_H_
+#endif // WEBRTC_MODULES_VIDEO_RENDER_VIDEO_RENDER_H_
diff --git a/webrtc/modules/video_render/include/video_render_defines.h b/webrtc/modules/video_render/video_render_defines.h
index f8f48035ea..999707cb6e 100644
--- a/webrtc/modules/video_render/include/video_render_defines.h
+++ b/webrtc/modules/video_render/video_render_defines.h
@@ -8,12 +8,12 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#ifndef WEBRTC_MODULES_VIDEO_RENDER_MAIN_INTERFACE_VIDEO_RENDER_DEFINES_H_
-#define WEBRTC_MODULES_VIDEO_RENDER_MAIN_INTERFACE_VIDEO_RENDER_DEFINES_H_
+#ifndef WEBRTC_MODULES_VIDEO_RENDER_VIDEO_RENDER_DEFINES_H_
+#define WEBRTC_MODULES_VIDEO_RENDER_VIDEO_RENDER_DEFINES_H_
#include "webrtc/common_types.h"
-#include "webrtc/common_video/interface/incoming_video_stream.h"
-#include "webrtc/modules/interface/module_common_types.h"
+#include "webrtc/common_video/include/incoming_video_stream.h"
+#include "webrtc/modules/include/module_common_types.h"
namespace webrtc
{
@@ -67,4 +67,4 @@ enum StretchMode
} // namespace webrtc
-#endif // WEBRTC_MODULES_VIDEO_RENDER_MAIN_INTERFACE_VIDEO_RENDER_DEFINES_H_
+#endif // WEBRTC_MODULES_VIDEO_RENDER_VIDEO_RENDER_DEFINES_H_
diff --git a/webrtc/modules/video_render/video_render_impl.cc b/webrtc/modules/video_render/video_render_impl.cc
index 32ec2b0e75..d2a074b4c4 100644
--- a/webrtc/modules/video_render/video_render_impl.cc
+++ b/webrtc/modules/video_render/video_render_impl.cc
@@ -10,11 +10,11 @@
#include <assert.h>
-#include "webrtc/common_video/interface/incoming_video_stream.h"
+#include "webrtc/common_video/include/incoming_video_stream.h"
#include "webrtc/engine_configurations.h"
#include "webrtc/modules/video_render/external/video_render_external_impl.h"
#include "webrtc/modules/video_render/i_video_render.h"
-#include "webrtc/modules/video_render/include/video_render_defines.h"
+#include "webrtc/modules/video_render/video_render_defines.h"
#include "webrtc/modules/video_render/video_render_impl.h"
#include "webrtc/system_wrappers/include/critical_section_wrapper.h"
#include "webrtc/system_wrappers/include/trace.h"
@@ -197,7 +197,8 @@ ModuleVideoRenderImpl::AddIncomingRenderStream(const uint32_t streamId,
}
// Create platform independant code
- IncomingVideoStream* ptrIncomingStream = new IncomingVideoStream(streamId);
+ IncomingVideoStream* ptrIncomingStream =
+ new IncomingVideoStream(streamId, false);
ptrIncomingStream->SetRenderCallback(ptrRenderCallback);
VideoRenderCallback* moduleCallback = ptrIncomingStream->ModuleCallback();
diff --git a/webrtc/modules/video_render/video_render_impl.h b/webrtc/modules/video_render/video_render_impl.h
index cc00897cc3..ce93cea6b5 100644
--- a/webrtc/modules/video_render/video_render_impl.h
+++ b/webrtc/modules/video_render/video_render_impl.h
@@ -14,7 +14,7 @@
#include <map>
#include "webrtc/engine_configurations.h"
-#include "webrtc/modules/video_render/include/video_render.h"
+#include "webrtc/modules/video_render/video_render.h"
namespace webrtc {
class CriticalSectionWrapper;
diff --git a/webrtc/modules/video_render/video_render_internal_impl.cc b/webrtc/modules/video_render/video_render_internal_impl.cc
index 09ebc7e548..1fed26e9c4 100644
--- a/webrtc/modules/video_render/video_render_internal_impl.cc
+++ b/webrtc/modules/video_render/video_render_internal_impl.cc
@@ -10,10 +10,10 @@
#include <assert.h>
-#include "webrtc/common_video/interface/incoming_video_stream.h"
+#include "webrtc/common_video/include/incoming_video_stream.h"
#include "webrtc/engine_configurations.h"
#include "webrtc/modules/video_render/i_video_render.h"
-#include "webrtc/modules/video_render/include/video_render_defines.h"
+#include "webrtc/modules/video_render/video_render_defines.h"
#include "webrtc/modules/video_render/video_render_impl.h"
#include "webrtc/system_wrappers/include/critical_section_wrapper.h"
#include "webrtc/system_wrappers/include/trace.h"
@@ -420,7 +420,8 @@ ModuleVideoRenderImpl::AddIncomingRenderStream(const uint32_t streamId,
}
// Create platform independant code
- IncomingVideoStream* ptrIncomingStream = new IncomingVideoStream(streamId);
+ IncomingVideoStream* ptrIncomingStream =
+ new IncomingVideoStream(streamId, false);
ptrIncomingStream->SetRenderCallback(ptrRenderCallback);
VideoRenderCallback* moduleCallback = ptrIncomingStream->ModuleCallback();
diff --git a/webrtc/modules/video_render/windows/i_video_render_win.h b/webrtc/modules/video_render/windows/i_video_render_win.h
index 56731e3770..6dbb4fd3cb 100644
--- a/webrtc/modules/video_render/windows/i_video_render_win.h
+++ b/webrtc/modules/video_render/windows/i_video_render_win.h
@@ -11,7 +11,7 @@
#ifndef WEBRTC_MODULES_VIDEO_RENDER_MAIN_SOURCE_WINDOWS_I_VIDEO_RENDER_WIN_H_
#define WEBRTC_MODULES_VIDEO_RENDER_MAIN_SOURCE_WINDOWS_I_VIDEO_RENDER_WIN_H_
-#include "webrtc/modules/video_render/include/video_render.h"
+#include "webrtc/modules/video_render/video_render.h"
namespace webrtc {
diff --git a/webrtc/modules/video_render/windows/video_render_direct3d9.cc b/webrtc/modules/video_render/windows/video_render_direct3d9.cc
index 24dd0efddb..83835aebb8 100644
--- a/webrtc/modules/video_render/windows/video_render_direct3d9.cc
+++ b/webrtc/modules/video_render/windows/video_render_direct3d9.cc
@@ -294,8 +294,8 @@ VideoRenderDirect3D9::VideoRenderDirect3D9(Trace* trace,
_totalMemory(0),
_availableMemory(0)
{
- _screenUpdateThread = ThreadWrapper::CreateThread(
- ScreenUpdateThreadProc, this, "ScreenUpdateThread");
+ _screenUpdateThread.reset(new rtc::PlatformThread(
+ ScreenUpdateThreadProc, this, "ScreenUpdateThread"));
_screenUpdateEvent = EventTimerWrapper::Create();
SetRect(&_originalHwndRect, 0, 0, 0, 0);
}
@@ -305,7 +305,7 @@ VideoRenderDirect3D9::~VideoRenderDirect3D9()
//NOTE: we should not enter CriticalSection in here!
// Signal event to exit thread, then delete it
- ThreadWrapper* tmpPtr = _screenUpdateThread.release();
+ rtc::PlatformThread* tmpPtr = _screenUpdateThread.release();
if (tmpPtr)
{
_screenUpdateEvent->Set();
@@ -546,7 +546,7 @@ int32_t VideoRenderDirect3D9::Init()
return -1;
}
_screenUpdateThread->Start();
- _screenUpdateThread->SetPriority(kRealtimePriority);
+ _screenUpdateThread->SetPriority(rtc::kRealtimePriority);
// Start the event triggering the render process
unsigned int monitorFreq = 60;
diff --git a/webrtc/modules/video_render/windows/video_render_direct3d9.h b/webrtc/modules/video_render/windows/video_render_direct3d9.h
index c8f6639d9a..5a1f207934 100644
--- a/webrtc/modules/video_render/windows/video_render_direct3d9.h
+++ b/webrtc/modules/video_render/windows/video_render_direct3d9.h
@@ -19,8 +19,8 @@
#include <Map>
// Added
-#include "webrtc/modules/video_render/include/video_render_defines.h"
-#include "webrtc/system_wrappers/include/thread_wrapper.h"
+#include "webrtc/base/platform_thread.h"
+#include "webrtc/modules/video_render/video_render_defines.h"
#pragma comment(lib, "d3d9.lib") // located in DirectX SDK
@@ -203,7 +203,8 @@ private:
CriticalSectionWrapper& _refD3DCritsect;
Trace* _trace;
- rtc::scoped_ptr<ThreadWrapper> _screenUpdateThread;
+ // TODO(pbos): Remove scoped_ptr and use PlatformThread directly.
+ rtc::scoped_ptr<rtc::PlatformThread> _screenUpdateThread;
EventTimerWrapper* _screenUpdateEvent;
HWND _hWnd;