aboutsummaryrefslogtreecommitdiff
path: root/webrtc/modules/video_coding
diff options
context:
space:
mode:
Diffstat (limited to 'webrtc/modules/video_coding')
-rw-r--r--webrtc/modules/video_coding/BUILD.gn146
-rw-r--r--webrtc/modules/video_coding/OWNERS5
-rw-r--r--webrtc/modules/video_coding/codec_database.cc (renamed from webrtc/modules/video_coding/main/source/codec_database.cc)265
-rw-r--r--webrtc/modules/video_coding/codec_database.h (renamed from webrtc/modules/video_coding/main/source/codec_database.h)51
-rw-r--r--webrtc/modules/video_coding/codec_timer.cc96
-rw-r--r--webrtc/modules/video_coding/codec_timer.h57
-rw-r--r--webrtc/modules/video_coding/codecs/h264/h264_video_toolbox_decoder.cc19
-rw-r--r--webrtc/modules/video_coding/codecs/h264/h264_video_toolbox_decoder.h2
-rw-r--r--webrtc/modules/video_coding/codecs/h264/h264_video_toolbox_encoder.cc43
-rw-r--r--webrtc/modules/video_coding/codecs/h264/h264_video_toolbox_encoder.h2
-rw-r--r--webrtc/modules/video_coding/codecs/h264/h264_video_toolbox_nalu.cc9
-rw-r--r--webrtc/modules/video_coding/codecs/h264/h264_video_toolbox_nalu.h17
-rw-r--r--webrtc/modules/video_coding/codecs/h264/include/h264.h2
-rw-r--r--webrtc/modules/video_coding/codecs/i420/i420.cc55
-rw-r--r--webrtc/modules/video_coding/codecs/i420/include/i420.h145
-rw-r--r--webrtc/modules/video_coding/codecs/interface/mock/mock_video_codec_interface.h43
-rw-r--r--webrtc/modules/video_coding/codecs/interface/video_codec_interface.h28
-rw-r--r--webrtc/modules/video_coding/codecs/interface/video_error_codes.h9
-rw-r--r--webrtc/modules/video_coding/codecs/test/packet_manipulator.cc8
-rw-r--r--webrtc/modules/video_coding/codecs/test/packet_manipulator.h13
-rw-r--r--webrtc/modules/video_coding/codecs/test/packet_manipulator_unittest.cc27
-rw-r--r--webrtc/modules/video_coding/codecs/test/predictive_packet_manipulator.cc17
-rw-r--r--webrtc/modules/video_coding/codecs/test/predictive_packet_manipulator.h1
-rw-r--r--webrtc/modules/video_coding/codecs/test/stats.cc69
-rw-r--r--webrtc/modules/video_coding/codecs/test/stats.h2
-rw-r--r--webrtc/modules/video_coding/codecs/test/stats_unittest.cc16
-rw-r--r--webrtc/modules/video_coding/codecs/test/videoprocessor.cc67
-rw-r--r--webrtc/modules/video_coding/codecs/test/videoprocessor.h15
-rw-r--r--webrtc/modules/video_coding/codecs/test/videoprocessor_integrationtest.cc346
-rw-r--r--webrtc/modules/video_coding/codecs/test/videoprocessor_unittest.cc44
-rw-r--r--webrtc/modules/video_coding/codecs/tools/video_quality_measurement.cc320
-rw-r--r--webrtc/modules/video_coding/codecs/vp8/default_temporal_layers.cc26
-rw-r--r--webrtc/modules/video_coding/codecs/vp8/default_temporal_layers_unittest.cc157
-rw-r--r--webrtc/modules/video_coding/codecs/vp8/include/vp8.h9
-rw-r--r--webrtc/modules/video_coding/codecs/vp8/include/vp8_common_types.h14
-rw-r--r--webrtc/modules/video_coding/codecs/vp8/realtime_temporal_layers.cc43
-rw-r--r--webrtc/modules/video_coding/codecs/vp8/reference_picture_selection.cc20
-rw-r--r--webrtc/modules/video_coding/codecs/vp8/reference_picture_selection_unittest.cc40
-rw-r--r--webrtc/modules/video_coding/codecs/vp8/screenshare_layers.cc6
-rw-r--r--webrtc/modules/video_coding/codecs/vp8/screenshare_layers.h2
-rw-r--r--webrtc/modules/video_coding/codecs/vp8/screenshare_layers_unittest.cc4
-rw-r--r--webrtc/modules/video_coding/codecs/vp8/simulcast_encoder_adapter.cc58
-rw-r--r--webrtc/modules/video_coding/codecs/vp8/simulcast_encoder_adapter.h10
-rw-r--r--webrtc/modules/video_coding/codecs/vp8/simulcast_encoder_adapter_unittest.cc33
-rw-r--r--webrtc/modules/video_coding/codecs/vp8/simulcast_unittest.cc14
-rw-r--r--webrtc/modules/video_coding/codecs/vp8/simulcast_unittest.h282
-rw-r--r--webrtc/modules/video_coding/codecs/vp8/temporal_layers.h5
-rw-r--r--webrtc/modules/video_coding/codecs/vp8/test/vp8_impl_unittest.cc22
-rw-r--r--webrtc/modules/video_coding/codecs/vp8/vp8_factory.h1
-rw-r--r--webrtc/modules/video_coding/codecs/vp8/vp8_impl.cc262
-rw-r--r--webrtc/modules/video_coding/codecs/vp8/vp8_impl.h24
-rw-r--r--webrtc/modules/video_coding/codecs/vp8/vp8_sequence_coder.cc93
-rw-r--r--webrtc/modules/video_coding/codecs/vp9/include/vp9.h3
-rw-r--r--webrtc/modules/video_coding/codecs/vp9/screenshare_layers.cc93
-rw-r--r--webrtc/modules/video_coding/codecs/vp9/screenshare_layers.h66
-rw-r--r--webrtc/modules/video_coding/codecs/vp9/screenshare_layers_unittest.cc323
-rw-r--r--webrtc/modules/video_coding/codecs/vp9/vp9.gyp32
-rw-r--r--webrtc/modules/video_coding/codecs/vp9/vp9_dummy_impl.cc19
-rw-r--r--webrtc/modules/video_coding/codecs/vp9/vp9_frame_buffer_pool.cc2
-rw-r--r--webrtc/modules/video_coding/codecs/vp9/vp9_impl.cc411
-rw-r--r--webrtc/modules/video_coding/codecs/vp9/vp9_impl.h49
-rw-r--r--webrtc/modules/video_coding/content_metrics_processing.cc (renamed from webrtc/modules/video_coding/main/source/content_metrics_processing.cc)49
-rw-r--r--webrtc/modules/video_coding/content_metrics_processing.h (renamed from webrtc/modules/video_coding/main/source/content_metrics_processing.h)16
-rw-r--r--webrtc/modules/video_coding/decoding_state.cc (renamed from webrtc/modules/video_coding/main/source/decoding_state.cc)86
-rw-r--r--webrtc/modules/video_coding/decoding_state.h (renamed from webrtc/modules/video_coding/main/source/decoding_state.h)26
-rw-r--r--webrtc/modules/video_coding/decoding_state_unittest.cc (renamed from webrtc/modules/video_coding/main/source/decoding_state_unittest.cc)260
-rw-r--r--webrtc/modules/video_coding/encoded_frame.cc (renamed from webrtc/modules/video_coding/main/source/encoded_frame.cc)118
-rw-r--r--webrtc/modules/video_coding/encoded_frame.h132
-rw-r--r--webrtc/modules/video_coding/fec_tables_xor.h459
-rw-r--r--webrtc/modules/video_coding/frame_buffer.cc270
-rw-r--r--webrtc/modules/video_coding/frame_buffer.h (renamed from webrtc/modules/video_coding/main/source/frame_buffer.h)18
-rw-r--r--webrtc/modules/video_coding/generic_decoder.cc192
-rw-r--r--webrtc/modules/video_coding/generic_decoder.h111
-rw-r--r--webrtc/modules/video_coding/generic_encoder.cc (renamed from webrtc/modules/video_coding/main/source/generic_encoder.cc)103
-rw-r--r--webrtc/modules/video_coding/generic_encoder.h149
-rw-r--r--webrtc/modules/video_coding/include/mock/mock_vcm_callbacks.h (renamed from webrtc/modules/video_coding/main/interface/mock/mock_vcm_callbacks.h)15
-rw-r--r--webrtc/modules/video_coding/include/mock/mock_video_codec_interface.h81
-rw-r--r--webrtc/modules/video_coding/include/video_codec_interface.h99
-rw-r--r--webrtc/modules/video_coding/include/video_coding.h519
-rw-r--r--webrtc/modules/video_coding/include/video_coding_defines.h (renamed from webrtc/modules/video_coding/main/interface/video_coding_defines.h)99
-rw-r--r--webrtc/modules/video_coding/include/video_error_codes.h32
-rw-r--r--webrtc/modules/video_coding/inter_frame_delay.cc107
-rw-r--r--webrtc/modules/video_coding/inter_frame_delay.h67
-rw-r--r--webrtc/modules/video_coding/internal_defines.h41
-rw-r--r--webrtc/modules/video_coding/jitter_buffer.cc (renamed from webrtc/modules/video_coding/main/source/jitter_buffer.cc)213
-rw-r--r--webrtc/modules/video_coding/jitter_buffer.h (renamed from webrtc/modules/video_coding/main/source/jitter_buffer.h)47
-rw-r--r--webrtc/modules/video_coding/jitter_buffer_common.h (renamed from webrtc/modules/video_coding/main/source/jitter_buffer_common.h)52
-rw-r--r--webrtc/modules/video_coding/jitter_buffer_unittest.cc (renamed from webrtc/modules/video_coding/main/source/jitter_buffer_unittest.cc)488
-rw-r--r--webrtc/modules/video_coding/jitter_estimator.cc443
-rw-r--r--webrtc/modules/video_coding/jitter_estimator.h170
-rw-r--r--webrtc/modules/video_coding/jitter_estimator_tests.cc (renamed from webrtc/modules/video_coding/main/source/jitter_estimator_tests.cc)2
-rw-r--r--webrtc/modules/video_coding/main/interface/video_coding.h544
-rw-r--r--webrtc/modules/video_coding/main/source/OWNERS5
-rw-r--r--webrtc/modules/video_coding/main/source/codec_timer.cc136
-rw-r--r--webrtc/modules/video_coding/main/source/codec_timer.h62
-rw-r--r--webrtc/modules/video_coding/main/source/encoded_frame.h127
-rw-r--r--webrtc/modules/video_coding/main/source/fec_tables_xor.h6481
-rw-r--r--webrtc/modules/video_coding/main/source/frame_buffer.cc297
-rw-r--r--webrtc/modules/video_coding/main/source/generic_decoder.cc198
-rw-r--r--webrtc/modules/video_coding/main/source/generic_decoder.h112
-rw-r--r--webrtc/modules/video_coding/main/source/generic_encoder.h142
-rw-r--r--webrtc/modules/video_coding/main/source/inter_frame_delay.cc114
-rw-r--r--webrtc/modules/video_coding/main/source/inter_frame_delay.h66
-rw-r--r--webrtc/modules/video_coding/main/source/internal_defines.h68
-rw-r--r--webrtc/modules/video_coding/main/source/jitter_estimator.cc482
-rw-r--r--webrtc/modules/video_coding/main/source/jitter_estimator.h165
-rw-r--r--webrtc/modules/video_coding/main/source/media_opt_util.cc774
-rw-r--r--webrtc/modules/video_coding/main/source/media_opt_util.h364
-rw-r--r--webrtc/modules/video_coding/main/source/nack_fec_tables.h126
-rw-r--r--webrtc/modules/video_coding/main/source/packet.h59
-rw-r--r--webrtc/modules/video_coding/main/source/rtt_filter.cc202
-rw-r--r--webrtc/modules/video_coding/main/source/rtt_filter.h68
-rw-r--r--webrtc/modules/video_coding/main/test/video_source.h82
-rw-r--r--webrtc/modules/video_coding/media_opt_util.cc682
-rw-r--r--webrtc/modules/video_coding/media_opt_util.h361
-rw-r--r--webrtc/modules/video_coding/media_optimization.cc (renamed from webrtc/modules/video_coding/main/source/media_optimization.cc)65
-rw-r--r--webrtc/modules/video_coding/media_optimization.h (renamed from webrtc/modules/video_coding/main/source/media_optimization.h)26
-rw-r--r--webrtc/modules/video_coding/media_optimization_unittest.cc (renamed from webrtc/modules/video_coding/main/source/media_optimization_unittest.cc)3
-rw-r--r--webrtc/modules/video_coding/nack_fec_tables.h31
-rw-r--r--webrtc/modules/video_coding/packet.cc (renamed from webrtc/modules/video_coding/main/source/packet.cc)73
-rw-r--r--webrtc/modules/video_coding/packet.h59
-rw-r--r--webrtc/modules/video_coding/qm_select.cc (renamed from webrtc/modules/video_coding/main/source/qm_select.cc)191
-rw-r--r--webrtc/modules/video_coding/qm_select.h (renamed from webrtc/modules/video_coding/main/source/qm_select.h)65
-rw-r--r--webrtc/modules/video_coding/qm_select_data.h (renamed from webrtc/modules/video_coding/main/source/qm_select_data.h)178
-rw-r--r--webrtc/modules/video_coding/qm_select_unittest.cc (renamed from webrtc/modules/video_coding/main/source/qm_select_unittest.cc)234
-rw-r--r--webrtc/modules/video_coding/receiver.cc (renamed from webrtc/modules/video_coding/main/source/receiver.cc)59
-rw-r--r--webrtc/modules/video_coding/receiver.h (renamed from webrtc/modules/video_coding/main/source/receiver.h)26
-rw-r--r--webrtc/modules/video_coding/receiver_unittest.cc (renamed from webrtc/modules/video_coding/main/source/receiver_unittest.cc)143
-rw-r--r--webrtc/modules/video_coding/rtt_filter.cc165
-rw-r--r--webrtc/modules/video_coding/rtt_filter.h66
-rw-r--r--webrtc/modules/video_coding/session_info.cc (renamed from webrtc/modules/video_coding/main/source/session_info.cc)83
-rw-r--r--webrtc/modules/video_coding/session_info.h (renamed from webrtc/modules/video_coding/main/source/session_info.h)18
-rw-r--r--webrtc/modules/video_coding/session_info_unittest.cc (renamed from webrtc/modules/video_coding/main/source/session_info_unittest.cc)146
-rw-r--r--webrtc/modules/video_coding/test/plotJitterEstimate.m (renamed from webrtc/modules/video_coding/main/test/plotJitterEstimate.m)0
-rw-r--r--webrtc/modules/video_coding/test/plotReceiveTrace.m (renamed from webrtc/modules/video_coding/main/test/plotReceiveTrace.m)0
-rw-r--r--webrtc/modules/video_coding/test/plotTimingTest.m (renamed from webrtc/modules/video_coding/main/test/plotTimingTest.m)0
-rw-r--r--webrtc/modules/video_coding/test/receiver_tests.h (renamed from webrtc/modules/video_coding/main/test/receiver_tests.h)20
-rw-r--r--webrtc/modules/video_coding/test/release_test.h (renamed from webrtc/modules/video_coding/main/test/release_test.h)6
-rw-r--r--webrtc/modules/video_coding/test/rtp_player.cc (renamed from webrtc/modules/video_coding/main/test/rtp_player.cc)63
-rw-r--r--webrtc/modules/video_coding/test/rtp_player.h (renamed from webrtc/modules/video_coding/main/test/rtp_player.h)21
-rw-r--r--webrtc/modules/video_coding/test/stream_generator.cc (renamed from webrtc/modules/video_coding/main/source/test/stream_generator.cc)21
-rw-r--r--webrtc/modules/video_coding/test/stream_generator.h (renamed from webrtc/modules/video_coding/main/source/test/stream_generator.h)10
-rw-r--r--webrtc/modules/video_coding/test/subfigure.m (renamed from webrtc/modules/video_coding/main/test/subfigure.m)0
-rw-r--r--webrtc/modules/video_coding/test/test_util.cc (renamed from webrtc/modules/video_coding/main/test/test_util.cc)37
-rw-r--r--webrtc/modules/video_coding/test/test_util.h (renamed from webrtc/modules/video_coding/main/test/test_util.h)18
-rw-r--r--webrtc/modules/video_coding/test/tester_main.cc (renamed from webrtc/modules/video_coding/main/test/tester_main.cc)53
-rw-r--r--webrtc/modules/video_coding/test/vcm_payload_sink_factory.cc (renamed from webrtc/modules/video_coding/main/test/vcm_payload_sink_factory.cc)22
-rw-r--r--webrtc/modules/video_coding/test/vcm_payload_sink_factory.h (renamed from webrtc/modules/video_coding/main/test/vcm_payload_sink_factory.h)15
-rw-r--r--webrtc/modules/video_coding/test/video_rtp_play.cc (renamed from webrtc/modules/video_coding/main/test/video_rtp_play.cc)12
-rw-r--r--webrtc/modules/video_coding/test/video_source.h85
-rw-r--r--webrtc/modules/video_coding/timestamp_map.cc (renamed from webrtc/modules/video_coding/main/source/timestamp_map.cc)12
-rw-r--r--webrtc/modules/video_coding/timestamp_map.h (renamed from webrtc/modules/video_coding/main/source/timestamp_map.h)2
-rw-r--r--webrtc/modules/video_coding/timing.cc (renamed from webrtc/modules/video_coding/main/source/timing.cc)71
-rw-r--r--webrtc/modules/video_coding/timing.h (renamed from webrtc/modules/video_coding/main/source/timing.h)13
-rw-r--r--webrtc/modules/video_coding/timing_unittest.cc (renamed from webrtc/modules/video_coding/main/source/timing_unittest.cc)28
-rw-r--r--webrtc/modules/video_coding/utility/frame_dropper.cc529
-rw-r--r--webrtc/modules/video_coding/utility/frame_dropper.h96
-rw-r--r--webrtc/modules/video_coding/utility/include/frame_dropper.h98
-rw-r--r--webrtc/modules/video_coding/utility/include/mock/mock_frame_dropper.h41
-rw-r--r--webrtc/modules/video_coding/utility/include/vp8_header_parser.h77
-rw-r--r--webrtc/modules/video_coding/utility/mock/mock_frame_dropper.h34
-rw-r--r--webrtc/modules/video_coding/utility/moving_average.h (renamed from webrtc/modules/video_coding/utility/include/moving_average.h)18
-rw-r--r--webrtc/modules/video_coding/utility/qp_parser.cc4
-rw-r--r--webrtc/modules/video_coding/utility/qp_parser.h (renamed from webrtc/modules/video_coding/utility/include/qp_parser.h)2
-rw-r--r--webrtc/modules/video_coding/utility/quality_scaler.cc18
-rw-r--r--webrtc/modules/video_coding/utility/quality_scaler.h (renamed from webrtc/modules/video_coding/utility/include/quality_scaler.h)2
-rw-r--r--webrtc/modules/video_coding/utility/quality_scaler_unittest.cc34
-rw-r--r--webrtc/modules/video_coding/utility/video_coding_utility.gyp10
-rw-r--r--webrtc/modules/video_coding/utility/vp8_header_parser.cc29
-rw-r--r--webrtc/modules/video_coding/utility/vp8_header_parser.h68
-rw-r--r--webrtc/modules/video_coding/video_coding.gypi102
-rw-r--r--webrtc/modules/video_coding/video_coding_impl.cc (renamed from webrtc/modules/video_coding/main/source/video_coding_impl.cc)197
-rw-r--r--webrtc/modules/video_coding/video_coding_impl.h (renamed from webrtc/modules/video_coding/main/source/video_coding_impl.h)45
-rw-r--r--webrtc/modules/video_coding/video_coding_robustness_unittest.cc (renamed from webrtc/modules/video_coding/main/source/video_coding_robustness_unittest.cc)112
-rw-r--r--webrtc/modules/video_coding/video_coding_test.gypi16
-rw-r--r--webrtc/modules/video_coding/video_receiver.cc (renamed from webrtc/modules/video_coding/main/source/video_receiver.cc)141
-rw-r--r--webrtc/modules/video_coding/video_receiver_unittest.cc (renamed from webrtc/modules/video_coding/main/source/video_receiver_unittest.cc)20
-rw-r--r--webrtc/modules/video_coding/video_sender.cc (renamed from webrtc/modules/video_coding/main/source/video_sender.cc)60
-rw-r--r--webrtc/modules/video_coding/video_sender_unittest.cc (renamed from webrtc/modules/video_coding/main/source/video_sender_unittest.cc)76
179 files changed, 9323 insertions, 14889 deletions
diff --git a/webrtc/modules/video_coding/BUILD.gn b/webrtc/modules/video_coding/BUILD.gn
index 9e8cd47e53..32ac627ed2 100644
--- a/webrtc/modules/video_coding/BUILD.gn
+++ b/webrtc/modules/video_coding/BUILD.gn
@@ -10,57 +10,57 @@ import("../../build/webrtc.gni")
source_set("video_coding") {
sources = [
- "main/interface/video_coding.h",
- "main/interface/video_coding_defines.h",
- "main/source/codec_database.cc",
- "main/source/codec_database.h",
- "main/source/codec_timer.cc",
- "main/source/codec_timer.h",
- "main/source/content_metrics_processing.cc",
- "main/source/content_metrics_processing.h",
- "main/source/decoding_state.cc",
- "main/source/decoding_state.h",
- "main/source/encoded_frame.cc",
- "main/source/encoded_frame.h",
- "main/source/fec_tables_xor.h",
- "main/source/frame_buffer.cc",
- "main/source/frame_buffer.h",
- "main/source/generic_decoder.cc",
- "main/source/generic_decoder.h",
- "main/source/generic_encoder.cc",
- "main/source/generic_encoder.h",
- "main/source/inter_frame_delay.cc",
- "main/source/inter_frame_delay.h",
- "main/source/internal_defines.h",
- "main/source/jitter_buffer.cc",
- "main/source/jitter_buffer.h",
- "main/source/jitter_buffer_common.h",
- "main/source/jitter_estimator.cc",
- "main/source/jitter_estimator.h",
- "main/source/media_opt_util.cc",
- "main/source/media_opt_util.h",
- "main/source/media_optimization.cc",
- "main/source/media_optimization.h",
- "main/source/nack_fec_tables.h",
- "main/source/packet.cc",
- "main/source/packet.h",
- "main/source/qm_select.cc",
- "main/source/qm_select.h",
- "main/source/qm_select_data.h",
- "main/source/receiver.cc",
- "main/source/receiver.h",
- "main/source/rtt_filter.cc",
- "main/source/rtt_filter.h",
- "main/source/session_info.cc",
- "main/source/session_info.h",
- "main/source/timestamp_map.cc",
- "main/source/timestamp_map.h",
- "main/source/timing.cc",
- "main/source/timing.h",
- "main/source/video_coding_impl.cc",
- "main/source/video_coding_impl.h",
- "main/source/video_receiver.cc",
- "main/source/video_sender.cc",
+ "codec_database.cc",
+ "codec_database.h",
+ "codec_timer.cc",
+ "codec_timer.h",
+ "content_metrics_processing.cc",
+ "content_metrics_processing.h",
+ "decoding_state.cc",
+ "decoding_state.h",
+ "encoded_frame.cc",
+ "encoded_frame.h",
+ "fec_tables_xor.h",
+ "frame_buffer.cc",
+ "frame_buffer.h",
+ "generic_decoder.cc",
+ "generic_decoder.h",
+ "generic_encoder.cc",
+ "generic_encoder.h",
+ "include/video_coding.h",
+ "include/video_coding_defines.h",
+ "inter_frame_delay.cc",
+ "inter_frame_delay.h",
+ "internal_defines.h",
+ "jitter_buffer.cc",
+ "jitter_buffer.h",
+ "jitter_buffer_common.h",
+ "jitter_estimator.cc",
+ "jitter_estimator.h",
+ "media_opt_util.cc",
+ "media_opt_util.h",
+ "media_optimization.cc",
+ "media_optimization.h",
+ "nack_fec_tables.h",
+ "packet.cc",
+ "packet.h",
+ "qm_select.cc",
+ "qm_select.h",
+ "qm_select_data.h",
+ "receiver.cc",
+ "receiver.h",
+ "rtt_filter.cc",
+ "rtt_filter.h",
+ "session_info.cc",
+ "session_info.h",
+ "timestamp_map.cc",
+ "timestamp_map.h",
+ "timing.cc",
+ "timing.h",
+ "video_coding_impl.cc",
+ "video_coding_impl.h",
+ "video_receiver.cc",
+ "video_sender.cc",
]
configs += [ "../..:common_config" ]
@@ -94,14 +94,14 @@ source_set("video_coding") {
source_set("video_coding_utility") {
sources = [
"utility/frame_dropper.cc",
- "utility/include/frame_dropper.h",
- "utility/include/moving_average.h",
- "utility/include/qp_parser.h",
- "utility/include/quality_scaler.h",
- "utility/include/vp8_header_parser.h",
+ "utility/frame_dropper.h",
+ "utility/moving_average.h",
"utility/qp_parser.cc",
+ "utility/qp_parser.h",
"utility/quality_scaler.cc",
+ "utility/quality_scaler.h",
"utility/vp8_header_parser.cc",
+ "utility/vp8_header_parser.h",
]
configs += [ "../..:common_config" ]
@@ -136,6 +136,18 @@ source_set("webrtc_h264") {
deps = [
"../../system_wrappers",
]
+
+ if (use_third_party_h264) {
+ # Dependency added so that variables use_openh264 and ffmpeg_branding are
+ # recognized build arguments (avoid "Build argument has no effect" error).
+ # The variables and dependencies will be used for real as soon as
+ # https://codereview.webrtc.org/1306813009/ lands. In the meantime, the
+ # build arguments are to be used by waterfall/trybots.
+ deps += [
+ "//third_party/ffmpeg:ffmpeg",
+ "//third_party/openh264:encoder",
+ ]
+ }
}
# TODO(tkchin): Source set for webrtc_h264_video_toolbox. Currently not
@@ -209,19 +221,15 @@ source_set("webrtc_vp8") {
}
source_set("webrtc_vp9") {
- if (rtc_build_vp9) {
- sources = [
- "codecs/vp9/include/vp9.h",
- "codecs/vp9/vp9_frame_buffer_pool.cc",
- "codecs/vp9/vp9_frame_buffer_pool.h",
- "codecs/vp9/vp9_impl.cc",
- "codecs/vp9/vp9_impl.h",
- ]
- } else {
- sources = [
- "codecs/vp9/vp9_dummy_impl.cc",
- ]
- }
+ sources = [
+ "codecs/vp9/include/vp9.h",
+ "codecs/vp9/screenshare_layers.cc",
+ "codecs/vp9/screenshare_layers.h",
+ "codecs/vp9/vp9_frame_buffer_pool.cc",
+ "codecs/vp9/vp9_frame_buffer_pool.h",
+ "codecs/vp9/vp9_impl.cc",
+ "codecs/vp9/vp9_impl.h",
+ ]
configs += [ "../..:common_config" ]
public_configs = [ "../..:common_inherited_config" ]
diff --git a/webrtc/modules/video_coding/OWNERS b/webrtc/modules/video_coding/OWNERS
index f452c9ed83..389d632dfd 100644
--- a/webrtc/modules/video_coding/OWNERS
+++ b/webrtc/modules/video_coding/OWNERS
@@ -1,4 +1,9 @@
stefan@webrtc.org
marpan@webrtc.org
+# These are for the common case of adding or renaming files. If you're doing
+# structural changes, please get a review from a reviewer in this file.
+per-file *.gyp=*
+per-file *.gypi=*
+
per-file BUILD.gn=kjellander@webrtc.org
diff --git a/webrtc/modules/video_coding/main/source/codec_database.cc b/webrtc/modules/video_coding/codec_database.cc
index bfdc609e3c..1fae435bab 100644
--- a/webrtc/modules/video_coding/main/source/codec_database.cc
+++ b/webrtc/modules/video_coding/codec_database.cc
@@ -8,26 +8,18 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#include "webrtc/modules/video_coding/main/source/codec_database.h"
+#include "webrtc/modules/video_coding/codec_database.h"
#include <assert.h>
#include "webrtc/base/checks.h"
+#include "webrtc/base/logging.h"
#include "webrtc/engine_configurations.h"
-#ifdef VIDEOCODEC_H264
#include "webrtc/modules/video_coding/codecs/h264/include/h264.h"
-#endif
-#ifdef VIDEOCODEC_I420
#include "webrtc/modules/video_coding/codecs/i420/include/i420.h"
-#endif
-#ifdef VIDEOCODEC_VP8
#include "webrtc/modules/video_coding/codecs/vp8/include/vp8.h"
-#endif
-#ifdef VIDEOCODEC_VP9
#include "webrtc/modules/video_coding/codecs/vp9/include/vp9.h"
-#endif
-#include "webrtc/modules/video_coding/main/source/internal_defines.h"
-#include "webrtc/system_wrappers/include/logging.h"
+#include "webrtc/modules/video_coding/internal_defines.h"
namespace {
const size_t kDefaultPayloadSize = 1440;
@@ -74,9 +66,9 @@ VideoCodecH264 VideoEncoder::GetDefaultH264Settings() {
h264_settings.profile = kProfileBase;
h264_settings.frameDroppingOn = true;
h264_settings.keyFrameInterval = 3000;
- h264_settings.spsData = NULL;
+ h264_settings.spsData = nullptr;
h264_settings.spsLen = 0;
- h264_settings.ppsData = NULL;
+ h264_settings.ppsData = nullptr;
h264_settings.ppsLen = 0;
return h264_settings;
@@ -93,12 +85,9 @@ VCMDecoderMapItem::VCMDecoderMapItem(VideoCodec* settings,
VCMExtDecoderMapItem::VCMExtDecoderMapItem(
VideoDecoder* external_decoder_instance,
- uint8_t payload_type,
- bool internal_render_timing)
+ uint8_t payload_type)
: payload_type(payload_type),
- external_decoder_instance(external_decoder_instance),
- internal_render_timing(internal_render_timing) {
-}
+ external_decoder_instance(external_decoder_instance) {}
VCMCodecDataBase::VCMCodecDataBase(
VideoEncoderRateObserver* encoder_rate_observer,
@@ -110,35 +99,27 @@ VCMCodecDataBase::VCMCodecDataBase(
send_codec_(),
receive_codec_(),
encoder_payload_type_(0),
- external_encoder_(NULL),
+ external_encoder_(nullptr),
internal_source_(false),
encoder_rate_observer_(encoder_rate_observer),
encoded_frame_callback_(encoded_frame_callback),
- ptr_decoder_(NULL),
+ ptr_decoder_(nullptr),
dec_map_(),
dec_external_map_() {}
VCMCodecDataBase::~VCMCodecDataBase() {
- ResetSender();
- ResetReceiver();
-}
-
-int VCMCodecDataBase::NumberOfCodecs() {
- return VCM_NUM_VIDEO_CODECS_AVAILABLE;
+ DeleteEncoder();
+ ReleaseDecoder(ptr_decoder_);
+ for (auto& kv : dec_map_)
+ delete kv.second;
+ for (auto& kv : dec_external_map_)
+ delete kv.second;
}
-bool VCMCodecDataBase::Codec(int list_id,
- VideoCodec* settings) {
- if (!settings) {
- return false;
- }
- if (list_id >= VCM_NUM_VIDEO_CODECS_AVAILABLE) {
- return false;
- }
+void VCMCodecDataBase::Codec(VideoCodecType codec_type, VideoCodec* settings) {
memset(settings, 0, sizeof(VideoCodec));
- switch (list_id) {
-#ifdef VIDEOCODEC_VP8
- case VCM_VP8_IDX: {
+ switch (codec_type) {
+ case kVideoCodecVP8:
strncpy(settings->plName, "VP8", 4);
settings->codecType = kVideoCodecVP8;
// 96 to 127 dynamic payload types for video codecs.
@@ -152,11 +133,8 @@ bool VCMCodecDataBase::Codec(int list_id,
settings->numberOfSimulcastStreams = 0;
settings->qpMax = 56;
settings->codecSpecific.VP8 = VideoEncoder::GetDefaultVp8Settings();
- return true;
- }
-#endif
-#ifdef VIDEOCODEC_VP9
- case VCM_VP9_IDX: {
+ return;
+ case kVideoCodecVP9:
strncpy(settings->plName, "VP9", 4);
settings->codecType = kVideoCodecVP9;
// 96 to 127 dynamic payload types for video codecs.
@@ -170,11 +148,8 @@ bool VCMCodecDataBase::Codec(int list_id,
settings->numberOfSimulcastStreams = 0;
settings->qpMax = 56;
settings->codecSpecific.VP9 = VideoEncoder::GetDefaultVp9Settings();
- return true;
- }
-#endif
-#ifdef VIDEOCODEC_H264
- case VCM_H264_IDX: {
+ return;
+ case kVideoCodecH264:
strncpy(settings->plName, "H264", 5);
settings->codecType = kVideoCodecH264;
// 96 to 127 dynamic payload types for video codecs.
@@ -188,11 +163,8 @@ bool VCMCodecDataBase::Codec(int list_id,
settings->numberOfSimulcastStreams = 0;
settings->qpMax = 56;
settings->codecSpecific.H264 = VideoEncoder::GetDefaultH264Settings();
- return true;
- }
-#endif
-#ifdef VIDEOCODEC_I420
- case VCM_I420_IDX: {
+ return;
+ case kVideoCodecI420:
strncpy(settings->plName, "I420", 5);
settings->codecType = kVideoCodecI420;
// 96 to 127 dynamic payload types for video codecs.
@@ -207,32 +179,14 @@ bool VCMCodecDataBase::Codec(int list_id,
settings->height = VCM_DEFAULT_CODEC_HEIGHT;
settings->minBitrate = VCM_MIN_BITRATE;
settings->numberOfSimulcastStreams = 0;
- return true;
- }
-#endif
- default: {
- return false;
- }
- }
-}
-
-bool VCMCodecDataBase::Codec(VideoCodecType codec_type,
- VideoCodec* settings) {
- for (int i = 0; i < VCMCodecDataBase::NumberOfCodecs(); i++) {
- const bool ret = VCMCodecDataBase::Codec(i, settings);
- if (!ret) {
- return false;
- }
- if (codec_type == settings->codecType) {
- return true;
- }
+ return;
+ case kVideoCodecRED:
+ case kVideoCodecULPFEC:
+ case kVideoCodecGeneric:
+ case kVideoCodecUnknown:
+ RTC_NOTREACHED();
+ return;
}
- return false;
-}
-
-void VCMCodecDataBase::ResetSender() {
- DeleteEncoder();
- periodic_key_frames_ = false;
}
// Assuming only one registered encoder - since only one used, no need for more.
@@ -264,8 +218,9 @@ bool VCMCodecDataBase::SetSendCodec(const VideoCodec* send_codec,
if (new_send_codec.maxBitrate == 0) {
// max is one bit per pixel
new_send_codec.maxBitrate = (static_cast<int>(send_codec->height) *
- static_cast<int>(send_codec->width) *
- static_cast<int>(send_codec->maxFramerate)) / 1000;
+ static_cast<int>(send_codec->width) *
+ static_cast<int>(send_codec->maxFramerate)) /
+ 1000;
if (send_codec->startBitrate > new_send_codec.maxBitrate) {
// But if the user tries to set a higher start bit rate we will
// increase the max accordingly.
@@ -328,8 +283,8 @@ VideoCodecType VCMCodecDataBase::SendCodec() const {
return send_codec_.codecType;
}
-bool VCMCodecDataBase::DeregisterExternalEncoder(
- uint8_t payload_type, bool* was_send_codec) {
+bool VCMCodecDataBase::DeregisterExternalEncoder(uint8_t payload_type,
+ bool* was_send_codec) {
assert(was_send_codec);
*was_send_codec = false;
if (encoder_payload_type_ != payload_type) {
@@ -342,15 +297,14 @@ bool VCMCodecDataBase::DeregisterExternalEncoder(
*was_send_codec = true;
}
encoder_payload_type_ = 0;
- external_encoder_ = NULL;
+ external_encoder_ = nullptr;
internal_source_ = false;
return true;
}
-void VCMCodecDataBase::RegisterExternalEncoder(
- VideoEncoder* external_encoder,
- uint8_t payload_type,
- bool internal_source) {
+void VCMCodecDataBase::RegisterExternalEncoder(VideoEncoder* external_encoder,
+ uint8_t payload_type,
+ bool internal_source) {
// Since only one encoder can be used at a given time, only one external
// encoder can be registered/used.
external_encoder_ = external_encoder;
@@ -360,9 +314,8 @@ void VCMCodecDataBase::RegisterExternalEncoder(
}
bool VCMCodecDataBase::RequiresEncoderReset(const VideoCodec& new_send_codec) {
- if (ptr_encoder_ == NULL) {
+ if (!ptr_encoder_)
return true;
- }
// Does not check startBitrate or maxFramerate
if (new_send_codec.codecType != send_codec_.codecType ||
@@ -419,8 +372,7 @@ bool VCMCodecDataBase::RequiresEncoderReset(const VideoCodec& new_send_codec) {
++i) {
if (memcmp(&new_send_codec.simulcastStream[i],
&send_codec_.simulcastStream[i],
- sizeof(new_send_codec.simulcastStream[i])) !=
- 0) {
+ sizeof(new_send_codec.simulcastStream[i])) != 0) {
return true;
}
}
@@ -440,22 +392,6 @@ bool VCMCodecDataBase::SetPeriodicKeyFrames(bool enable) {
return true;
}
-void VCMCodecDataBase::ResetReceiver() {
- ReleaseDecoder(ptr_decoder_);
- ptr_decoder_ = NULL;
- memset(&receive_codec_, 0, sizeof(VideoCodec));
- while (!dec_map_.empty()) {
- DecoderMap::iterator it = dec_map_.begin();
- delete (*it).second;
- dec_map_.erase(it);
- }
- while (!dec_external_map_.empty()) {
- ExternalDecoderMap::iterator external_it = dec_external_map_.begin();
- delete (*external_it).second;
- dec_external_map_.erase(external_it);
- }
-}
-
bool VCMCodecDataBase::DeregisterExternalDecoder(uint8_t payload_type) {
ExternalDecoderMap::iterator it = dec_external_map_.find(payload_type);
if (it == dec_external_map_.end()) {
@@ -465,43 +401,36 @@ bool VCMCodecDataBase::DeregisterExternalDecoder(uint8_t payload_type) {
// We can't use payload_type to check if the decoder is currently in use,
// because payload type may be out of date (e.g. before we decode the first
// frame after RegisterReceiveCodec)
- if (ptr_decoder_ != NULL &&
- &ptr_decoder_->_decoder == (*it).second->external_decoder_instance) {
+ if (ptr_decoder_ != nullptr &&
+ ptr_decoder_->_decoder == (*it).second->external_decoder_instance) {
// Release it if it was registered and in use.
ReleaseDecoder(ptr_decoder_);
- ptr_decoder_ = NULL;
+ ptr_decoder_ = nullptr;
}
DeregisterReceiveCodec(payload_type);
- delete (*it).second;
+ delete it->second;
dec_external_map_.erase(it);
return true;
}
// Add the external encoder object to the list of external decoders.
// Won't be registered as a receive codec until RegisterReceiveCodec is called.
-bool VCMCodecDataBase::RegisterExternalDecoder(
- VideoDecoder* external_decoder,
- uint8_t payload_type,
- bool internal_render_timing) {
+void VCMCodecDataBase::RegisterExternalDecoder(VideoDecoder* external_decoder,
+ uint8_t payload_type) {
// Check if payload value already exists, if so - erase old and insert new.
- VCMExtDecoderMapItem* ext_decoder = new VCMExtDecoderMapItem(
- external_decoder, payload_type, internal_render_timing);
- if (!ext_decoder) {
- return false;
- }
+ VCMExtDecoderMapItem* ext_decoder =
+ new VCMExtDecoderMapItem(external_decoder, payload_type);
DeregisterExternalDecoder(payload_type);
dec_external_map_[payload_type] = ext_decoder;
- return true;
}
bool VCMCodecDataBase::DecoderRegistered() const {
return !dec_map_.empty();
}
-bool VCMCodecDataBase::RegisterReceiveCodec(
- const VideoCodec* receive_codec,
- int number_of_cores,
- bool require_key_frame) {
+bool VCMCodecDataBase::RegisterReceiveCodec(const VideoCodec* receive_codec,
+ int number_of_cores,
+ bool require_key_frame) {
if (number_of_cores < 0) {
return false;
}
@@ -511,20 +440,17 @@ bool VCMCodecDataBase::RegisterReceiveCodec(
return false;
}
VideoCodec* new_receive_codec = new VideoCodec(*receive_codec);
- dec_map_[receive_codec->plType] = new VCMDecoderMapItem(new_receive_codec,
- number_of_cores,
- require_key_frame);
+ dec_map_[receive_codec->plType] = new VCMDecoderMapItem(
+ new_receive_codec, number_of_cores, require_key_frame);
return true;
}
-bool VCMCodecDataBase::DeregisterReceiveCodec(
- uint8_t payload_type) {
+bool VCMCodecDataBase::DeregisterReceiveCodec(uint8_t payload_type) {
DecoderMap::iterator it = dec_map_.find(payload_type);
if (it == dec_map_.end()) {
return false;
}
- VCMDecoderMapItem* dec_item = (*it).second;
- delete dec_item;
+ delete it->second;
dec_map_.erase(it);
if (receive_codec_.plType == payload_type) {
// This codec is currently in use.
@@ -550,49 +476,50 @@ VideoCodecType VCMCodecDataBase::ReceiveCodec() const {
}
VCMGenericDecoder* VCMCodecDataBase::GetDecoder(
- uint8_t payload_type, VCMDecodedFrameCallback* decoded_frame_callback) {
+ const VCMEncodedFrame& frame,
+ VCMDecodedFrameCallback* decoded_frame_callback) {
+ uint8_t payload_type = frame.PayloadType();
if (payload_type == receive_codec_.plType || payload_type == 0) {
return ptr_decoder_;
}
// Check for exisitng decoder, if exists - delete.
if (ptr_decoder_) {
ReleaseDecoder(ptr_decoder_);
- ptr_decoder_ = NULL;
+ ptr_decoder_ = nullptr;
memset(&receive_codec_, 0, sizeof(VideoCodec));
}
- ptr_decoder_ = CreateAndInitDecoder(payload_type, &receive_codec_);
+ ptr_decoder_ = CreateAndInitDecoder(frame, &receive_codec_);
if (!ptr_decoder_) {
- return NULL;
+ return nullptr;
}
VCMReceiveCallback* callback = decoded_frame_callback->UserReceiveCallback();
- if (callback) callback->OnIncomingPayloadType(receive_codec_.plType);
- if (ptr_decoder_->RegisterDecodeCompleteCallback(decoded_frame_callback)
- < 0) {
+ if (callback)
+ callback->OnIncomingPayloadType(receive_codec_.plType);
+ if (ptr_decoder_->RegisterDecodeCompleteCallback(decoded_frame_callback) <
+ 0) {
ReleaseDecoder(ptr_decoder_);
- ptr_decoder_ = NULL;
+ ptr_decoder_ = nullptr;
memset(&receive_codec_, 0, sizeof(VideoCodec));
- return NULL;
+ return nullptr;
}
return ptr_decoder_;
}
void VCMCodecDataBase::ReleaseDecoder(VCMGenericDecoder* decoder) const {
if (decoder) {
- assert(&decoder->_decoder);
+ assert(decoder->_decoder);
decoder->Release();
if (!decoder->External()) {
- delete &decoder->_decoder;
+ delete decoder->_decoder;
}
delete decoder;
}
}
-bool VCMCodecDataBase::SupportsRenderScheduling() const {
- const VCMExtDecoderMapItem* ext_item = FindExternalDecoderItem(
- receive_codec_.plType);
- if (ext_item == nullptr)
+bool VCMCodecDataBase::PrefersLateDecoding() const {
+ if (!ptr_decoder_)
return true;
- return ext_item->internal_render_timing;
+ return ptr_decoder_->PrefersLateDecoding();
}
bool VCMCodecDataBase::MatchesCurrentResolution(int width, int height) const {
@@ -600,33 +527,43 @@ bool VCMCodecDataBase::MatchesCurrentResolution(int width, int height) const {
}
VCMGenericDecoder* VCMCodecDataBase::CreateAndInitDecoder(
- uint8_t payload_type,
+ const VCMEncodedFrame& frame,
VideoCodec* new_codec) const {
+ uint8_t payload_type = frame.PayloadType();
assert(new_codec);
const VCMDecoderMapItem* decoder_item = FindDecoderItem(payload_type);
if (!decoder_item) {
LOG(LS_ERROR) << "Can't find a decoder associated with payload type: "
<< static_cast<int>(payload_type);
- return NULL;
+ return nullptr;
}
- VCMGenericDecoder* ptr_decoder = NULL;
+ VCMGenericDecoder* ptr_decoder = nullptr;
const VCMExtDecoderMapItem* external_dec_item =
FindExternalDecoderItem(payload_type);
if (external_dec_item) {
// External codec.
ptr_decoder = new VCMGenericDecoder(
- *external_dec_item->external_decoder_instance, true);
+ external_dec_item->external_decoder_instance, true);
} else {
// Create decoder.
ptr_decoder = CreateDecoder(decoder_item->settings->codecType);
}
if (!ptr_decoder)
- return NULL;
+ return nullptr;
+ // Copy over input resolutions to prevent codec reinitialization due to
+ // the first frame being of a different resolution than the database values.
+ // This is best effort, since there's no guarantee that width/height have been
+ // parsed yet (and may be zero).
+ if (frame.EncodedImage()._encodedWidth > 0 &&
+ frame.EncodedImage()._encodedHeight > 0) {
+ decoder_item->settings->width = frame.EncodedImage()._encodedWidth;
+ decoder_item->settings->height = frame.EncodedImage()._encodedHeight;
+ }
if (ptr_decoder->InitDecode(decoder_item->settings.get(),
decoder_item->number_of_cores) < 0) {
ReleaseDecoder(ptr_decoder);
- return NULL;
+ return nullptr;
}
memcpy(new_codec, decoder_item->settings.get(), sizeof(VideoCodec));
return ptr_decoder;
@@ -641,30 +578,22 @@ void VCMCodecDataBase::DeleteEncoder() {
VCMGenericDecoder* VCMCodecDataBase::CreateDecoder(VideoCodecType type) const {
switch (type) {
-#ifdef VIDEOCODEC_VP8
case kVideoCodecVP8:
- return new VCMGenericDecoder(*(VP8Decoder::Create()));
-#endif
-#ifdef VIDEOCODEC_VP9
+ return new VCMGenericDecoder(VP8Decoder::Create());
case kVideoCodecVP9:
- return new VCMGenericDecoder(*(VP9Decoder::Create()));
-#endif
-#ifdef VIDEOCODEC_I420
+ return new VCMGenericDecoder(VP9Decoder::Create());
case kVideoCodecI420:
- return new VCMGenericDecoder(*(new I420Decoder));
-#endif
-#ifdef VIDEOCODEC_H264
+ return new VCMGenericDecoder(new I420Decoder());
case kVideoCodecH264:
if (H264Decoder::IsSupported()) {
- return new VCMGenericDecoder(*(H264Decoder::Create()));
+ return new VCMGenericDecoder(H264Decoder::Create());
}
break;
-#endif
default:
break;
}
LOG(LS_WARNING) << "No internal decoder of this type exists.";
- return NULL;
+ return nullptr;
}
const VCMDecoderMapItem* VCMCodecDataBase::FindDecoderItem(
@@ -673,7 +602,7 @@ const VCMDecoderMapItem* VCMCodecDataBase::FindDecoderItem(
if (it != dec_map_.end()) {
return (*it).second;
}
- return NULL;
+ return nullptr;
}
const VCMExtDecoderMapItem* VCMCodecDataBase::FindExternalDecoderItem(
@@ -682,6 +611,6 @@ const VCMExtDecoderMapItem* VCMCodecDataBase::FindExternalDecoderItem(
if (it != dec_external_map_.end()) {
return (*it).second;
}
- return NULL;
+ return nullptr;
}
} // namespace webrtc
diff --git a/webrtc/modules/video_coding/main/source/codec_database.h b/webrtc/modules/video_coding/codec_database.h
index 93aa9c3ba8..62ec30a46e 100644
--- a/webrtc/modules/video_coding/main/source/codec_database.h
+++ b/webrtc/modules/video_coding/codec_database.h
@@ -8,16 +8,16 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#ifndef WEBRTC_MODULES_VIDEO_CODING_MAIN_SOURCE_CODEC_DATABASE_H_
-#define WEBRTC_MODULES_VIDEO_CODING_MAIN_SOURCE_CODEC_DATABASE_H_
+#ifndef WEBRTC_MODULES_VIDEO_CODING_CODEC_DATABASE_H_
+#define WEBRTC_MODULES_VIDEO_CODING_CODEC_DATABASE_H_
#include <map>
#include "webrtc/base/scoped_ptr.h"
-#include "webrtc/modules/video_coding/codecs/interface/video_codec_interface.h"
-#include "webrtc/modules/video_coding/main/interface/video_coding.h"
-#include "webrtc/modules/video_coding/main/source/generic_decoder.h"
-#include "webrtc/modules/video_coding/main/source/generic_encoder.h"
+#include "webrtc/modules/video_coding/include/video_codec_interface.h"
+#include "webrtc/modules/video_coding/include/video_coding.h"
+#include "webrtc/modules/video_coding/generic_decoder.h"
+#include "webrtc/modules/video_coding/generic_encoder.h"
#include "webrtc/typedefs.h"
namespace webrtc {
@@ -36,12 +36,10 @@ struct VCMDecoderMapItem {
struct VCMExtDecoderMapItem {
public:
VCMExtDecoderMapItem(VideoDecoder* external_decoder_instance,
- uint8_t payload_type,
- bool internal_render_timing);
+ uint8_t payload_type);
uint8_t payload_type;
VideoDecoder* external_decoder_instance;
- bool internal_render_timing;
};
class VCMCodecDataBase {
@@ -51,16 +49,8 @@ class VCMCodecDataBase {
~VCMCodecDataBase();
// Sender Side
- // Returns the number of supported codecs (or -1 in case of error).
- static int NumberOfCodecs();
-
- // Returns the default settings for the codec with id |list_id|.
- static bool Codec(int list_id, VideoCodec* settings);
-
// Returns the default settings for the codec with type |codec_type|.
- static bool Codec(VideoCodecType codec_type, VideoCodec* settings);
-
- void ResetSender();
+ static void Codec(VideoCodecType codec_type, VideoCodec* settings);
// Sets the sender side codec and initiates the desired codec given the
// VideoCodec struct.
@@ -94,19 +84,12 @@ class VCMCodecDataBase {
bool SetPeriodicKeyFrames(bool enable);
- // Receiver Side
- void ResetReceiver();
-
// Deregisters an external decoder object specified by |payload_type|.
bool DeregisterExternalDecoder(uint8_t payload_type);
// Registers an external decoder object to the payload type |payload_type|.
- // |internal_render_timing| is set to true if the |external_decoder| has
- // built in rendering which is able to obey the render timestamps of the
- // encoded frames.
- bool RegisterExternalDecoder(VideoDecoder* external_decoder,
- uint8_t payload_type,
- bool internal_render_timing);
+ void RegisterExternalDecoder(VideoDecoder* external_decoder,
+ uint8_t payload_type);
bool DecoderRegistered() const;
@@ -128,16 +111,16 @@ class VCMCodecDataBase {
// NULL is returned if no encoder with the specified payload type was found
// and the function failed to create one.
VCMGenericDecoder* GetDecoder(
- uint8_t payload_type, VCMDecodedFrameCallback* decoded_frame_callback);
+ const VCMEncodedFrame& frame,
+ VCMDecodedFrameCallback* decoded_frame_callback);
// Deletes the memory of the decoder instance |decoder|. Used to delete
// deep copies returned by CreateDecoderCopy().
void ReleaseDecoder(VCMGenericDecoder* decoder) const;
- // Returns true if the currently active decoder supports render scheduling,
- // that is, it is able to render frames according to the render timestamp of
- // the encoded frames.
- bool SupportsRenderScheduling() const;
+ // Returns true if the currently active decoder prefer to decode frames late.
+ // That means that frames must be decoded near the render times stamp.
+ bool PrefersLateDecoding() const;
bool MatchesCurrentResolution(int width, int height) const;
@@ -145,7 +128,7 @@ class VCMCodecDataBase {
typedef std::map<uint8_t, VCMDecoderMapItem*> DecoderMap;
typedef std::map<uint8_t, VCMExtDecoderMapItem*> ExternalDecoderMap;
- VCMGenericDecoder* CreateAndInitDecoder(uint8_t payload_type,
+ VCMGenericDecoder* CreateAndInitDecoder(const VCMEncodedFrame& frame,
VideoCodec* new_codec) const;
// Determines whether a new codec has to be created or not.
@@ -181,4 +164,4 @@ class VCMCodecDataBase {
} // namespace webrtc
-#endif // WEBRTC_MODULES_VIDEO_CODING_MAIN_SOURCE_CODEC_DATABASE_H_
+#endif // WEBRTC_MODULES_VIDEO_CODING_CODEC_DATABASE_H_
diff --git a/webrtc/modules/video_coding/codec_timer.cc b/webrtc/modules/video_coding/codec_timer.cc
new file mode 100644
index 0000000000..60add8fc4b
--- /dev/null
+++ b/webrtc/modules/video_coding/codec_timer.cc
@@ -0,0 +1,96 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "webrtc/modules/video_coding/codec_timer.h"
+
+#include <assert.h>
+
+namespace webrtc {
+
+// The first kIgnoredSampleCount samples will be ignored.
+static const int32_t kIgnoredSampleCount = 5;
+
+VCMCodecTimer::VCMCodecTimer()
+ : _filteredMax(0), _ignoredSampleCount(0), _shortMax(0), _history() {
+ Reset();
+}
+
+void VCMCodecTimer::Reset() {
+ _filteredMax = 0;
+ _ignoredSampleCount = 0;
+ _shortMax = 0;
+ for (int i = 0; i < MAX_HISTORY_SIZE; i++) {
+ _history[i].shortMax = 0;
+ _history[i].timeMs = -1;
+ }
+}
+
+// Update the max-value filter
+void VCMCodecTimer::MaxFilter(int32_t decodeTime, int64_t nowMs) {
+ if (_ignoredSampleCount >= kIgnoredSampleCount) {
+ UpdateMaxHistory(decodeTime, nowMs);
+ ProcessHistory(nowMs);
+ } else {
+ _ignoredSampleCount++;
+ }
+}
+
+void VCMCodecTimer::UpdateMaxHistory(int32_t decodeTime, int64_t now) {
+ if (_history[0].timeMs >= 0 && now - _history[0].timeMs < SHORT_FILTER_MS) {
+ if (decodeTime > _shortMax) {
+ _shortMax = decodeTime;
+ }
+ } else {
+ // Only add a new value to the history once a second
+ if (_history[0].timeMs == -1) {
+ // First, no shift
+ _shortMax = decodeTime;
+ } else {
+ // Shift
+ for (int i = (MAX_HISTORY_SIZE - 2); i >= 0; i--) {
+ _history[i + 1].shortMax = _history[i].shortMax;
+ _history[i + 1].timeMs = _history[i].timeMs;
+ }
+ }
+ if (_shortMax == 0) {
+ _shortMax = decodeTime;
+ }
+
+ _history[0].shortMax = _shortMax;
+ _history[0].timeMs = now;
+ _shortMax = 0;
+ }
+}
+
+void VCMCodecTimer::ProcessHistory(int64_t nowMs) {
+ _filteredMax = _shortMax;
+ if (_history[0].timeMs == -1) {
+ return;
+ }
+ for (int i = 0; i < MAX_HISTORY_SIZE; i++) {
+ if (_history[i].timeMs == -1) {
+ break;
+ }
+ if (nowMs - _history[i].timeMs > MAX_HISTORY_SIZE * SHORT_FILTER_MS) {
+ // This sample (and all samples after this) is too old
+ break;
+ }
+ if (_history[i].shortMax > _filteredMax) {
+ // This sample is the largest one this far into the history
+ _filteredMax = _history[i].shortMax;
+ }
+ }
+}
+
+// Get the maximum observed time within a time window
+int32_t VCMCodecTimer::RequiredDecodeTimeMs(FrameType /*frameType*/) const {
+ return _filteredMax;
+}
+} // namespace webrtc
diff --git a/webrtc/modules/video_coding/codec_timer.h b/webrtc/modules/video_coding/codec_timer.h
new file mode 100644
index 0000000000..8ebd82ab9c
--- /dev/null
+++ b/webrtc/modules/video_coding/codec_timer.h
@@ -0,0 +1,57 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef WEBRTC_MODULES_VIDEO_CODING_CODEC_TIMER_H_
+#define WEBRTC_MODULES_VIDEO_CODING_CODEC_TIMER_H_
+
+#include "webrtc/modules/include/module_common_types.h"
+#include "webrtc/typedefs.h"
+
+namespace webrtc {
+
+// MAX_HISTORY_SIZE * SHORT_FILTER_MS defines the window size in milliseconds
+#define MAX_HISTORY_SIZE 10
+#define SHORT_FILTER_MS 1000
+
+class VCMShortMaxSample {
+ public:
+ VCMShortMaxSample() : shortMax(0), timeMs(-1) {}
+
+ int32_t shortMax;
+ int64_t timeMs;
+};
+
+class VCMCodecTimer {
+ public:
+ VCMCodecTimer();
+
+ // Updates the max filtered decode time.
+ void MaxFilter(int32_t newDecodeTimeMs, int64_t nowMs);
+
+ // Empty the list of timers.
+ void Reset();
+
+ // Get the required decode time in ms.
+ int32_t RequiredDecodeTimeMs(FrameType frameType) const;
+
+ private:
+ void UpdateMaxHistory(int32_t decodeTime, int64_t now);
+ void ProcessHistory(int64_t nowMs);
+
+ int32_t _filteredMax;
+ // The number of samples ignored so far.
+ int32_t _ignoredSampleCount;
+ int32_t _shortMax;
+ VCMShortMaxSample _history[MAX_HISTORY_SIZE];
+};
+
+} // namespace webrtc
+
+#endif // WEBRTC_MODULES_VIDEO_CODING_CODEC_TIMER_H_
diff --git a/webrtc/modules/video_coding/codecs/h264/h264_video_toolbox_decoder.cc b/webrtc/modules/video_coding/codecs/h264/h264_video_toolbox_decoder.cc
index 61ef80bbf1..6fee2e6f36 100644
--- a/webrtc/modules/video_coding/codecs/h264/h264_video_toolbox_decoder.cc
+++ b/webrtc/modules/video_coding/codecs/h264/h264_video_toolbox_decoder.cc
@@ -16,7 +16,7 @@
#include "libyuv/convert.h"
#include "webrtc/base/checks.h"
#include "webrtc/base/logging.h"
-#include "webrtc/common_video/interface/video_frame_buffer.h"
+#include "webrtc/common_video/include/video_frame_buffer.h"
#include "webrtc/modules/video_coding/codecs/h264/h264_video_toolbox_nalu.h"
#include "webrtc/video_frame.h"
@@ -106,8 +106,7 @@ namespace webrtc {
H264VideoToolboxDecoder::H264VideoToolboxDecoder()
: callback_(nullptr),
video_format_(nullptr),
- decompression_session_(nullptr) {
-}
+ decompression_session_(nullptr) {}
H264VideoToolboxDecoder::~H264VideoToolboxDecoder() {
DestroyDecompressionSession();
@@ -129,8 +128,7 @@ int H264VideoToolboxDecoder::Decode(
CMSampleBufferRef sample_buffer = nullptr;
if (!H264AnnexBBufferToCMSampleBuffer(input_image._buffer,
- input_image._length,
- video_format_,
+ input_image._length, video_format_,
&sample_buffer)) {
return WEBRTC_VIDEO_CODEC_ERROR;
}
@@ -206,11 +204,8 @@ int H264VideoToolboxDecoder::ResetDecompressionSession() {
int64_t nv12type = kCVPixelFormatType_420YpCbCr8BiPlanarFullRange;
CFNumberRef pixel_format =
CFNumberCreate(nullptr, kCFNumberLongType, &nv12type);
- CFTypeRef values[attributes_size] = {
- kCFBooleanTrue,
- io_surface_value,
- pixel_format
- };
+ CFTypeRef values[attributes_size] = {kCFBooleanTrue, io_surface_value,
+ pixel_format};
CFDictionaryRef attributes =
internal::CreateCFDictionary(keys, values, attributes_size);
if (io_surface_value) {
@@ -266,6 +261,10 @@ void H264VideoToolboxDecoder::SetVideoFormat(
}
}
+const char* H264VideoToolboxDecoder::ImplementationName() const {
+ return "VideoToolbox";
+}
+
} // namespace webrtc
#endif // defined(WEBRTC_VIDEO_TOOLBOX_SUPPORTED)
diff --git a/webrtc/modules/video_coding/codecs/h264/h264_video_toolbox_decoder.h b/webrtc/modules/video_coding/codecs/h264/h264_video_toolbox_decoder.h
index f54ddb9efd..6d64307a82 100644
--- a/webrtc/modules/video_coding/codecs/h264/h264_video_toolbox_decoder.h
+++ b/webrtc/modules/video_coding/codecs/h264/h264_video_toolbox_decoder.h
@@ -45,6 +45,8 @@ class H264VideoToolboxDecoder : public H264Decoder {
int Reset() override;
+ const char* ImplementationName() const override;
+
private:
int ResetDecompressionSession();
void ConfigureDecompressionSession();
diff --git a/webrtc/modules/video_coding/codecs/h264/h264_video_toolbox_encoder.cc b/webrtc/modules/video_coding/codecs/h264/h264_video_toolbox_encoder.cc
index d677f8b812..7df4ec74ba 100644
--- a/webrtc/modules/video_coding/codecs/h264/h264_video_toolbox_encoder.cc
+++ b/webrtc/modules/video_coding/codecs/h264/h264_video_toolbox_encoder.cc
@@ -99,11 +99,7 @@ struct FrameEncodeParams {
int32_t h,
int64_t rtms,
uint32_t ts)
- : callback(cb),
- width(w),
- height(h),
- render_time_ms(rtms),
- timestamp(ts) {
+ : callback(cb), width(w), height(h), render_time_ms(rtms), timestamp(ts) {
if (csi) {
codec_specific_info = *csi;
} else {
@@ -146,9 +142,8 @@ bool CopyVideoFrameToPixelBuffer(const webrtc::VideoFrame& frame,
int ret = libyuv::I420ToNV12(
frame.buffer(webrtc::kYPlane), frame.stride(webrtc::kYPlane),
frame.buffer(webrtc::kUPlane), frame.stride(webrtc::kUPlane),
- frame.buffer(webrtc::kVPlane), frame.stride(webrtc::kVPlane),
- dst_y, dst_stride_y, dst_uv, dst_stride_uv,
- frame.width(), frame.height());
+ frame.buffer(webrtc::kVPlane), frame.stride(webrtc::kVPlane), dst_y,
+ dst_stride_y, dst_uv, dst_stride_uv, frame.width(), frame.height());
CVPixelBufferUnlockBaseAddress(pixel_buffer, 0);
if (ret) {
LOG(LS_ERROR) << "Error converting I420 VideoFrame to NV12 :" << ret;
@@ -188,10 +183,8 @@ void VTCompressionOutputCallback(void* encoder,
// TODO(tkchin): Allocate buffers through a pool.
rtc::scoped_ptr<rtc::Buffer> buffer(new rtc::Buffer());
rtc::scoped_ptr<webrtc::RTPFragmentationHeader> header;
- if (!H264CMSampleBufferToAnnexBBuffer(sample_buffer,
- is_keyframe,
- buffer.get(),
- header.accept())) {
+ if (!H264CMSampleBufferToAnnexBBuffer(sample_buffer, is_keyframe,
+ buffer.get(), header.accept())) {
return;
}
webrtc::EncodedImage frame(buffer->data(), buffer->size(), buffer->size());
@@ -215,8 +208,7 @@ void VTCompressionOutputCallback(void* encoder,
namespace webrtc {
H264VideoToolboxEncoder::H264VideoToolboxEncoder()
- : callback_(nullptr), compression_session_(nullptr) {
-}
+ : callback_(nullptr), compression_session_(nullptr) {}
H264VideoToolboxEncoder::~H264VideoToolboxEncoder() {
DestroyCompressionSession();
@@ -289,8 +281,8 @@ int H264VideoToolboxEncoder::Encode(
CMTimeMake(input_image.render_time_ms(), 1000);
CFDictionaryRef frame_properties = nullptr;
if (is_keyframe_required) {
- CFTypeRef keys[] = { kVTEncodeFrameOptionKey_ForceKeyFrame };
- CFTypeRef values[] = { kCFBooleanTrue };
+ CFTypeRef keys[] = {kVTEncodeFrameOptionKey_ForceKeyFrame};
+ CFTypeRef values[] = {kCFBooleanTrue};
frame_properties = internal::CreateCFDictionary(keys, values, 1);
}
rtc::scoped_ptr<internal::FrameEncodeParams> encode_params;
@@ -359,11 +351,8 @@ int H264VideoToolboxEncoder::ResetCompressionSession() {
int64_t nv12type = kCVPixelFormatType_420YpCbCr8BiPlanarFullRange;
CFNumberRef pixel_format =
CFNumberCreate(nullptr, kCFNumberLongType, &nv12type);
- CFTypeRef values[attributes_size] = {
- kCFBooleanTrue,
- io_surface_value,
- pixel_format
- };
+ CFTypeRef values[attributes_size] = {kCFBooleanTrue, io_surface_value,
+ pixel_format};
CFDictionaryRef source_attributes =
internal::CreateCFDictionary(keys, values, attributes_size);
if (io_surface_value) {
@@ -376,15 +365,11 @@ int H264VideoToolboxEncoder::ResetCompressionSession() {
}
OSStatus status = VTCompressionSessionCreate(
nullptr, // use default allocator
- width_,
- height_,
- kCMVideoCodecType_H264,
+ width_, height_, kCMVideoCodecType_H264,
nullptr, // use default encoder
source_attributes,
nullptr, // use default compressed data allocator
- internal::VTCompressionOutputCallback,
- this,
- &compression_session_);
+ internal::VTCompressionOutputCallback, this, &compression_session_);
if (source_attributes) {
CFRelease(source_attributes);
source_attributes = nullptr;
@@ -434,6 +419,10 @@ void H264VideoToolboxEncoder::DestroyCompressionSession() {
}
}
+const char* H264VideoToolboxEncoder::ImplementationName() const {
+ return "VideoToolbox";
+}
+
} // namespace webrtc
#endif // defined(WEBRTC_VIDEO_TOOLBOX_SUPPORTED)
diff --git a/webrtc/modules/video_coding/codecs/h264/h264_video_toolbox_encoder.h b/webrtc/modules/video_coding/codecs/h264/h264_video_toolbox_encoder.h
index f4fb86fa04..269e0411b2 100644
--- a/webrtc/modules/video_coding/codecs/h264/h264_video_toolbox_encoder.h
+++ b/webrtc/modules/video_coding/codecs/h264/h264_video_toolbox_encoder.h
@@ -48,6 +48,8 @@ class H264VideoToolboxEncoder : public H264Encoder {
int Release() override;
+ const char* ImplementationName() const override;
+
private:
int ResetCompressionSession();
void ConfigureCompressionSession();
diff --git a/webrtc/modules/video_coding/codecs/h264/h264_video_toolbox_nalu.cc b/webrtc/modules/video_coding/codecs/h264/h264_video_toolbox_nalu.cc
index caca96d3d8..322c213f7b 100644
--- a/webrtc/modules/video_coding/codecs/h264/h264_video_toolbox_nalu.cc
+++ b/webrtc/modules/video_coding/codecs/h264/h264_video_toolbox_nalu.cc
@@ -154,11 +154,10 @@ bool H264CMSampleBufferToAnnexBBuffer(
return true;
}
-bool H264AnnexBBufferToCMSampleBuffer(
- const uint8_t* annexb_buffer,
- size_t annexb_buffer_size,
- CMVideoFormatDescriptionRef video_format,
- CMSampleBufferRef* out_sample_buffer) {
+bool H264AnnexBBufferToCMSampleBuffer(const uint8_t* annexb_buffer,
+ size_t annexb_buffer_size,
+ CMVideoFormatDescriptionRef video_format,
+ CMSampleBufferRef* out_sample_buffer) {
RTC_DCHECK(annexb_buffer);
RTC_DCHECK(out_sample_buffer);
*out_sample_buffer = nullptr;
diff --git a/webrtc/modules/video_coding/codecs/h264/h264_video_toolbox_nalu.h b/webrtc/modules/video_coding/codecs/h264/h264_video_toolbox_nalu.h
index 230dea94a0..31ef525816 100644
--- a/webrtc/modules/video_coding/codecs/h264/h264_video_toolbox_nalu.h
+++ b/webrtc/modules/video_coding/codecs/h264/h264_video_toolbox_nalu.h
@@ -9,8 +9,8 @@
*
*/
-#ifndef WEBRTC_MODULES_VIDEO_CODING_CODECS_H264_H264_VIDEO_TOOLBOX_NALU_H
-#define WEBRTC_MODULES_VIDEO_CODING_CODECS_H264_H264_VIDEO_TOOLBOX_NALU_H
+#ifndef WEBRTC_MODULES_VIDEO_CODING_CODECS_H264_H264_VIDEO_TOOLBOX_NALU_H_
+#define WEBRTC_MODULES_VIDEO_CODING_CODECS_H264_H264_VIDEO_TOOLBOX_NALU_H_
#include "webrtc/modules/video_coding/codecs/h264/include/h264.h"
@@ -19,7 +19,7 @@
#include <CoreMedia/CoreMedia.h>
#include "webrtc/base/buffer.h"
-#include "webrtc/modules/interface/module_common_types.h"
+#include "webrtc/modules/include/module_common_types.h"
namespace webrtc {
@@ -39,11 +39,10 @@ bool H264CMSampleBufferToAnnexBBuffer(
// If |is_keyframe| is true then |video_format| is ignored since the format will
// be read from the buffer. Otherwise |video_format| must be provided.
// Caller is responsible for releasing the created sample buffer.
-bool H264AnnexBBufferToCMSampleBuffer(
- const uint8_t* annexb_buffer,
- size_t annexb_buffer_size,
- CMVideoFormatDescriptionRef video_format,
- CMSampleBufferRef* out_sample_buffer);
+bool H264AnnexBBufferToCMSampleBuffer(const uint8_t* annexb_buffer,
+ size_t annexb_buffer_size,
+ CMVideoFormatDescriptionRef video_format,
+ CMSampleBufferRef* out_sample_buffer);
// Helper class for reading NALUs from an RTP Annex B buffer.
class AnnexBBufferReader final {
@@ -97,4 +96,4 @@ class AvccBufferWriter final {
} // namespace webrtc
#endif // defined(WEBRTC_VIDEO_TOOLBOX_SUPPORTED)
-#endif // WEBRTC_MODULES_VIDEO_CODING_CODECS_H264_H264_VIDEO_TOOLBOX_NALU_H
+#endif // WEBRTC_MODULES_VIDEO_CODING_CODECS_H264_H264_VIDEO_TOOLBOX_NALU_H_
diff --git a/webrtc/modules/video_coding/codecs/h264/include/h264.h b/webrtc/modules/video_coding/codecs/h264/include/h264.h
index 3f52839a6c..50ca57c1c9 100644
--- a/webrtc/modules/video_coding/codecs/h264/include/h264.h
+++ b/webrtc/modules/video_coding/codecs/h264/include/h264.h
@@ -23,7 +23,7 @@
#endif // defined(WEBRTC_IOS) || defined(WEBRTC_MAC)
-#include "webrtc/modules/video_coding/codecs/interface/video_codec_interface.h"
+#include "webrtc/modules/video_coding/include/video_codec_interface.h"
namespace webrtc {
diff --git a/webrtc/modules/video_coding/codecs/i420/i420.cc b/webrtc/modules/video_coding/codecs/i420/i420.cc
index cf546a07a1..7f06b4cf7d 100644
--- a/webrtc/modules/video_coding/codecs/i420/i420.cc
+++ b/webrtc/modules/video_coding/codecs/i420/i420.cc
@@ -21,20 +21,19 @@ const size_t kI420HeaderSize = 4;
namespace webrtc {
-I420Encoder::I420Encoder() : _inited(false), _encodedImage(),
- _encodedCompleteCallback(NULL) {
-}
+I420Encoder::I420Encoder()
+ : _inited(false), _encodedImage(), _encodedCompleteCallback(NULL) {}
I420Encoder::~I420Encoder() {
_inited = false;
- delete [] _encodedImage._buffer;
+ delete[] _encodedImage._buffer;
}
int I420Encoder::Release() {
// Should allocate an encoded frame and then release it here, for that we
// actually need an init flag.
if (_encodedImage._buffer != NULL) {
- delete [] _encodedImage._buffer;
+ delete[] _encodedImage._buffer;
_encodedImage._buffer = NULL;
}
_inited = false;
@@ -53,7 +52,7 @@ int I420Encoder::InitEncode(const VideoCodec* codecSettings,
// Allocating encoded memory.
if (_encodedImage._buffer != NULL) {
- delete [] _encodedImage._buffer;
+ delete[] _encodedImage._buffer;
_encodedImage._buffer = NULL;
_encodedImage._size = 0;
}
@@ -101,18 +100,18 @@ int I420Encoder::Encode(const VideoFrame& inputImage,
kI420HeaderSize;
if (_encodedImage._size > req_length) {
// Reallocate buffer.
- delete [] _encodedImage._buffer;
+ delete[] _encodedImage._buffer;
_encodedImage._buffer = new uint8_t[req_length];
_encodedImage._size = req_length;
}
- uint8_t *buffer = _encodedImage._buffer;
+ uint8_t* buffer = _encodedImage._buffer;
buffer = InsertHeader(buffer, width, height);
- int ret_length = ExtractBuffer(inputImage, req_length - kI420HeaderSize,
- buffer);
+ int ret_length =
+ ExtractBuffer(inputImage, req_length - kI420HeaderSize, buffer);
if (ret_length < 0)
return WEBRTC_VIDEO_CODEC_MEMORY;
_encodedImage._length = ret_length + kI420HeaderSize;
@@ -121,7 +120,8 @@ int I420Encoder::Encode(const VideoFrame& inputImage,
return WEBRTC_VIDEO_CODEC_OK;
}
-uint8_t* I420Encoder::InsertHeader(uint8_t *buffer, uint16_t width,
+uint8_t* I420Encoder::InsertHeader(uint8_t* buffer,
+ uint16_t width,
uint16_t height) {
*buffer++ = static_cast<uint8_t>(width >> 8);
*buffer++ = static_cast<uint8_t>(width & 0xFF);
@@ -130,30 +130,29 @@ uint8_t* I420Encoder::InsertHeader(uint8_t *buffer, uint16_t width,
return buffer;
}
-int
-I420Encoder::RegisterEncodeCompleteCallback(EncodedImageCallback* callback) {
+int I420Encoder::RegisterEncodeCompleteCallback(
+ EncodedImageCallback* callback) {
_encodedCompleteCallback = callback;
return WEBRTC_VIDEO_CODEC_OK;
}
-
-I420Decoder::I420Decoder() : _decodedImage(), _width(0), _height(0),
- _inited(false), _decodeCompleteCallback(NULL) {
-}
+I420Decoder::I420Decoder()
+ : _decodedImage(),
+ _width(0),
+ _height(0),
+ _inited(false),
+ _decodeCompleteCallback(NULL) {}
I420Decoder::~I420Decoder() {
Release();
}
-int
-I420Decoder::Reset() {
+int I420Decoder::Reset() {
return WEBRTC_VIDEO_CODEC_OK;
}
-
-int
-I420Decoder::InitDecode(const VideoCodec* codecSettings,
- int /*numberOfCores */) {
+int I420Decoder::InitDecode(const VideoCodec* codecSettings,
+ int /*numberOfCores */) {
if (codecSettings == NULL) {
return WEBRTC_VIDEO_CODEC_ERR_PARAMETER;
} else if (codecSettings->width < 1 || codecSettings->height < 1) {
@@ -165,7 +164,8 @@ I420Decoder::InitDecode(const VideoCodec* codecSettings,
return WEBRTC_VIDEO_CODEC_OK;
}
-int I420Decoder::Decode(const EncodedImage& inputImage, bool /*missingFrames*/,
+int I420Decoder::Decode(const EncodedImage& inputImage,
+ bool /*missingFrames*/,
const RTPFragmentationHeader* /*fragmentation*/,
const CodecSpecificInfo* /*codecSpecificInfo*/,
int64_t /*renderTimeMs*/) {
@@ -203,8 +203,8 @@ int I420Decoder::Decode(const EncodedImage& inputImage, bool /*missingFrames*/,
}
// Set decoded image parameters.
int half_width = (_width + 1) / 2;
- _decodedImage.CreateEmptyFrame(_width, _height,
- _width, half_width, half_width);
+ _decodedImage.CreateEmptyFrame(_width, _height, _width, half_width,
+ half_width);
// Converting from buffer to plane representation.
int ret = ConvertToI420(kI420, buffer, 0, 0, _width, _height, 0,
kVideoRotation_0, &_decodedImage);
@@ -218,7 +218,8 @@ int I420Decoder::Decode(const EncodedImage& inputImage, bool /*missingFrames*/,
}
const uint8_t* I420Decoder::ExtractHeader(const uint8_t* buffer,
- uint16_t* width, uint16_t* height) {
+ uint16_t* width,
+ uint16_t* height) {
*width = static_cast<uint16_t>(*buffer++) << 8;
*width |= *buffer++;
*height = static_cast<uint16_t>(*buffer++) << 8;
diff --git a/webrtc/modules/video_coding/codecs/i420/include/i420.h b/webrtc/modules/video_coding/codecs/i420/include/i420.h
index 8990ccf878..9f77845e96 100644
--- a/webrtc/modules/video_coding/codecs/i420/include/i420.h
+++ b/webrtc/modules/video_coding/codecs/i420/include/i420.h
@@ -8,12 +8,12 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#ifndef WEBRTC_MODULES_VIDEO_CODING_CODECS_I420_MAIN_INTERFACE_I420_H_
-#define WEBRTC_MODULES_VIDEO_CODING_CODECS_I420_MAIN_INTERFACE_I420_H_
+#ifndef WEBRTC_MODULES_VIDEO_CODING_CODECS_I420_INCLUDE_I420_H_
+#define WEBRTC_MODULES_VIDEO_CODING_CODECS_I420_INCLUDE_I420_H_
#include <vector>
-#include "webrtc/modules/video_coding/codecs/interface/video_codec_interface.h"
+#include "webrtc/modules/video_coding/include/video_codec_interface.h"
#include "webrtc/typedefs.h"
namespace webrtc {
@@ -24,45 +24,45 @@ class I420Encoder : public VideoEncoder {
virtual ~I420Encoder();
-// Initialize the encoder with the information from the VideoCodec.
-//
-// Input:
-// - codecSettings : Codec settings.
-// - numberOfCores : Number of cores available for the encoder.
-// - maxPayloadSize : The maximum size each payload is allowed
-// to have. Usually MTU - overhead.
-//
-// Return value : WEBRTC_VIDEO_CODEC_OK if OK.
-// <0 - Error
+ // Initialize the encoder with the information from the VideoCodec.
+ //
+ // Input:
+ // - codecSettings : Codec settings.
+ // - numberOfCores : Number of cores available for the encoder.
+ // - maxPayloadSize : The maximum size each payload is allowed
+ // to have. Usually MTU - overhead.
+ //
+ // Return value : WEBRTC_VIDEO_CODEC_OK if OK.
+ // <0 - Error
int InitEncode(const VideoCodec* codecSettings,
int /*numberOfCores*/,
size_t /*maxPayloadSize*/) override;
-// "Encode" an I420 image (as a part of a video stream). The encoded image
-// will be returned to the user via the encode complete callback.
-//
-// Input:
-// - inputImage : Image to be encoded.
-// - codecSpecificInfo : Pointer to codec specific data.
-// - frameType : Frame type to be sent (Key /Delta).
-//
-// Return value : WEBRTC_VIDEO_CODEC_OK if OK.
-// <0 - Error
+ // "Encode" an I420 image (as a part of a video stream). The encoded image
+ // will be returned to the user via the encode complete callback.
+ //
+ // Input:
+ // - inputImage : Image to be encoded.
+ // - codecSpecificInfo : Pointer to codec specific data.
+ // - frameType : Frame type to be sent (Key /Delta).
+ //
+ // Return value : WEBRTC_VIDEO_CODEC_OK if OK.
+ // <0 - Error
int Encode(const VideoFrame& inputImage,
const CodecSpecificInfo* /*codecSpecificInfo*/,
const std::vector<FrameType>* /*frame_types*/) override;
-// Register an encode complete callback object.
-//
-// Input:
-// - callback : Callback object which handles encoded images.
-//
-// Return value : WEBRTC_VIDEO_CODEC_OK if OK, < 0 otherwise.
+ // Register an encode complete callback object.
+ //
+ // Input:
+ // - callback : Callback object which handles encoded images.
+ //
+ // Return value : WEBRTC_VIDEO_CODEC_OK if OK, < 0 otherwise.
int RegisterEncodeCompleteCallback(EncodedImageCallback* callback) override;
-// Free encoder memory.
-//
-// Return value : WEBRTC_VIDEO_CODEC_OK if OK, < 0 otherwise.
+ // Free encoder memory.
+ //
+ // Return value : WEBRTC_VIDEO_CODEC_OK if OK, < 0 otherwise.
int Release() override;
int SetRates(uint32_t /*newBitRate*/, uint32_t /*frameRate*/) override {
@@ -76,12 +76,13 @@ class I420Encoder : public VideoEncoder {
void OnDroppedFrame() override {}
private:
- static uint8_t* InsertHeader(uint8_t* buffer, uint16_t width,
+ static uint8_t* InsertHeader(uint8_t* buffer,
+ uint16_t width,
uint16_t height);
- bool _inited;
- EncodedImage _encodedImage;
- EncodedImageCallback* _encodedCompleteCallback;
+ bool _inited;
+ EncodedImage _encodedImage;
+ EncodedImageCallback* _encodedCompleteCallback;
}; // class I420Encoder
class I420Decoder : public VideoDecoder {
@@ -90,50 +91,50 @@ class I420Decoder : public VideoDecoder {
virtual ~I420Decoder();
-// Initialize the decoder.
-// The user must notify the codec of width and height values.
-//
-// Return value : WEBRTC_VIDEO_CODEC_OK.
-// <0 - Errors
+ // Initialize the decoder.
+ // The user must notify the codec of width and height values.
+ //
+ // Return value : WEBRTC_VIDEO_CODEC_OK.
+ // <0 - Errors
int InitDecode(const VideoCodec* codecSettings,
int /*numberOfCores*/) override;
-// Decode encoded image (as a part of a video stream). The decoded image
-// will be returned to the user through the decode complete callback.
-//
-// Input:
-// - inputImage : Encoded image to be decoded
-// - missingFrames : True if one or more frames have been lost
-// since the previous decode call.
-// - codecSpecificInfo : pointer to specific codec data
-// - renderTimeMs : Render time in Ms
-//
-// Return value : WEBRTC_VIDEO_CODEC_OK if OK
-// <0 - Error
+ // Decode encoded image (as a part of a video stream). The decoded image
+ // will be returned to the user through the decode complete callback.
+ //
+ // Input:
+ // - inputImage : Encoded image to be decoded
+ // - missingFrames : True if one or more frames have been lost
+ // since the previous decode call.
+ // - codecSpecificInfo : pointer to specific codec data
+ // - renderTimeMs : Render time in Ms
+ //
+ // Return value : WEBRTC_VIDEO_CODEC_OK if OK
+ // <0 - Error
int Decode(const EncodedImage& inputImage,
bool missingFrames,
const RTPFragmentationHeader* /*fragmentation*/,
const CodecSpecificInfo* /*codecSpecificInfo*/,
int64_t /*renderTimeMs*/) override;
-// Register a decode complete callback object.
-//
-// Input:
-// - callback : Callback object which handles decoded images.
-//
-// Return value : WEBRTC_VIDEO_CODEC_OK if OK, < 0 otherwise.
+ // Register a decode complete callback object.
+ //
+ // Input:
+ // - callback : Callback object which handles decoded images.
+ //
+ // Return value : WEBRTC_VIDEO_CODEC_OK if OK, < 0 otherwise.
int RegisterDecodeCompleteCallback(DecodedImageCallback* callback) override;
-// Free decoder memory.
-//
-// Return value : WEBRTC_VIDEO_CODEC_OK if OK.
-// <0 - Error
+ // Free decoder memory.
+ //
+ // Return value : WEBRTC_VIDEO_CODEC_OK if OK.
+ // <0 - Error
int Release() override;
-// Reset decoder state and prepare for a new call.
-//
-// Return value : WEBRTC_VIDEO_CODEC_OK.
-// <0 - Error
+ // Reset decoder state and prepare for a new call.
+ //
+ // Return value : WEBRTC_VIDEO_CODEC_OK.
+ // <0 - Error
int Reset() override;
private:
@@ -142,12 +143,12 @@ class I420Decoder : public VideoDecoder {
uint16_t* height);
VideoFrame _decodedImage;
- int _width;
- int _height;
- bool _inited;
- DecodedImageCallback* _decodeCompleteCallback;
+ int _width;
+ int _height;
+ bool _inited;
+ DecodedImageCallback* _decodeCompleteCallback;
}; // class I420Decoder
} // namespace webrtc
-#endif // WEBRTC_MODULES_VIDEO_CODING_CODECS_I420_MAIN_INTERFACE_I420_H_
+#endif // WEBRTC_MODULES_VIDEO_CODING_CODECS_I420_INCLUDE_I420_H_
diff --git a/webrtc/modules/video_coding/codecs/interface/mock/mock_video_codec_interface.h b/webrtc/modules/video_coding/codecs/interface/mock/mock_video_codec_interface.h
index 6c926d4794..d727e896ad 100644
--- a/webrtc/modules/video_coding/codecs/interface/mock/mock_video_codec_interface.h
+++ b/webrtc/modules/video_coding/codecs/interface/mock/mock_video_codec_interface.h
@@ -11,27 +11,32 @@
#ifndef WEBRTC_MODULES_VIDEO_CODING_CODECS_INTERFACE_MOCK_MOCK_VIDEO_CODEC_INTERFACE_H_
#define WEBRTC_MODULES_VIDEO_CODING_CODECS_INTERFACE_MOCK_MOCK_VIDEO_CODEC_INTERFACE_H_
+#pragma message("WARNING: video_coding/codecs/interface is DEPRECATED; "
+ "use video_coding/include")
#include <string>
+#include <vector>
#include "testing/gmock/include/gmock/gmock.h"
-#include "webrtc/modules/video_coding/codecs/interface/video_codec_interface.h"
+#include "webrtc/modules/video_coding/include/video_codec_interface.h"
#include "webrtc/typedefs.h"
namespace webrtc {
class MockEncodedImageCallback : public EncodedImageCallback {
public:
- MOCK_METHOD3(Encoded, int32_t(const EncodedImage& encodedImage,
- const CodecSpecificInfo* codecSpecificInfo,
- const RTPFragmentationHeader* fragmentation));
+ MOCK_METHOD3(Encoded,
+ int32_t(const EncodedImage& encodedImage,
+ const CodecSpecificInfo* codecSpecificInfo,
+ const RTPFragmentationHeader* fragmentation));
};
class MockVideoEncoder : public VideoEncoder {
public:
- MOCK_CONST_METHOD2(Version, int32_t(int8_t *version, int32_t length));
- MOCK_METHOD3(InitEncode, int32_t(const VideoCodec* codecSettings,
- int32_t numberOfCores,
- size_t maxPayloadSize));
+ MOCK_CONST_METHOD2(Version, int32_t(int8_t* version, int32_t length));
+ MOCK_METHOD3(InitEncode,
+ int32_t(const VideoCodec* codecSettings,
+ int32_t numberOfCores,
+ size_t maxPayloadSize));
MOCK_METHOD3(Encode,
int32_t(const VideoFrame& inputImage,
const CodecSpecificInfo* codecSpecificInfo,
@@ -47,22 +52,24 @@ class MockVideoEncoder : public VideoEncoder {
class MockDecodedImageCallback : public DecodedImageCallback {
public:
- MOCK_METHOD1(Decoded, int32_t(VideoFrame& decodedImage));
+ MOCK_METHOD1(Decoded, int32_t(const VideoFrame& decodedImage));
+ MOCK_METHOD2(Decoded,
+ int32_t(const VideoFrame& decodedImage, int64_t decode_time_ms));
MOCK_METHOD1(ReceivedDecodedReferenceFrame,
int32_t(const uint64_t pictureId));
- MOCK_METHOD1(ReceivedDecodedFrame,
- int32_t(const uint64_t pictureId));
+ MOCK_METHOD1(ReceivedDecodedFrame, int32_t(const uint64_t pictureId));
};
class MockVideoDecoder : public VideoDecoder {
public:
- MOCK_METHOD2(InitDecode, int32_t(const VideoCodec* codecSettings,
- int32_t numberOfCores));
- MOCK_METHOD5(Decode, int32_t(const EncodedImage& inputImage,
- bool missingFrames,
- const RTPFragmentationHeader* fragmentation,
- const CodecSpecificInfo* codecSpecificInfo,
- int64_t renderTimeMs));
+ MOCK_METHOD2(InitDecode,
+ int32_t(const VideoCodec* codecSettings, int32_t numberOfCores));
+ MOCK_METHOD5(Decode,
+ int32_t(const EncodedImage& inputImage,
+ bool missingFrames,
+ const RTPFragmentationHeader* fragmentation,
+ const CodecSpecificInfo* codecSpecificInfo,
+ int64_t renderTimeMs));
MOCK_METHOD1(RegisterDecodeCompleteCallback,
int32_t(DecodedImageCallback* callback));
MOCK_METHOD0(Release, int32_t());
diff --git a/webrtc/modules/video_coding/codecs/interface/video_codec_interface.h b/webrtc/modules/video_coding/codecs/interface/video_codec_interface.h
index 6363ab7332..6bcfa909bd 100644
--- a/webrtc/modules/video_coding/codecs/interface/video_codec_interface.h
+++ b/webrtc/modules/video_coding/codecs/interface/video_codec_interface.h
@@ -8,23 +8,24 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#ifndef WEBRTC_MODULES_VIDEO_CODING_CODECS_INTERFACE_VIDEO_CODEC_INTERFACE_H
-#define WEBRTC_MODULES_VIDEO_CODING_CODECS_INTERFACE_VIDEO_CODEC_INTERFACE_H
+#ifndef WEBRTC_MODULES_VIDEO_CODING_CODECS_INTERFACE_VIDEO_CODEC_INTERFACE_H_
+#define WEBRTC_MODULES_VIDEO_CODING_CODECS_INTERFACE_VIDEO_CODEC_INTERFACE_H_
+#pragma message("WARNING: video_coding/codecs/interface is DEPRECATED; "
+ "use video_coding/include")
#include <vector>
#include "webrtc/common_types.h"
-#include "webrtc/modules/interface/module_common_types.h"
-#include "webrtc/modules/video_coding/codecs/interface/video_error_codes.h"
+#include "webrtc/modules/include/module_common_types.h"
+#include "webrtc/modules/video_coding/include/video_error_codes.h"
#include "webrtc/typedefs.h"
#include "webrtc/video_decoder.h"
#include "webrtc/video_encoder.h"
#include "webrtc/video_frame.h"
-namespace webrtc
-{
+namespace webrtc {
-class RTPFragmentationHeader; // forward declaration
+class RTPFragmentationHeader; // forward declaration
// Note: if any pointers are added to this struct, it must be fitted
// with a copy-constructor. See below.
@@ -68,6 +69,10 @@ struct CodecSpecificInfoVP9 {
uint16_t width[kMaxVp9NumberOfSpatialLayers];
uint16_t height[kMaxVp9NumberOfSpatialLayers];
GofInfoVP9 gof;
+
+ // Frame reference data.
+ uint8_t num_ref_pics;
+ uint8_t p_diff[kMaxVp9RefPics];
};
struct CodecSpecificInfoGeneric {
@@ -86,12 +91,11 @@ union CodecSpecificInfoUnion {
// Note: if any pointers are added to this struct or its sub-structs, it
// must be fitted with a copy-constructor. This is because it is copied
// in the copy-constructor of VCMEncodedFrame.
-struct CodecSpecificInfo
-{
- VideoCodecType codecType;
- CodecSpecificInfoUnion codecSpecific;
+struct CodecSpecificInfo {
+ VideoCodecType codecType;
+ CodecSpecificInfoUnion codecSpecific;
};
} // namespace webrtc
-#endif // WEBRTC_MODULES_VIDEO_CODING_CODECS_INTERFACE_VIDEO_CODEC_INTERFACE_H
+#endif // WEBRTC_MODULES_VIDEO_CODING_CODECS_INTERFACE_VIDEO_CODEC_INTERFACE_H_
diff --git a/webrtc/modules/video_coding/codecs/interface/video_error_codes.h b/webrtc/modules/video_coding/codecs/interface/video_error_codes.h
index 28e5a32d43..ea8829df80 100644
--- a/webrtc/modules/video_coding/codecs/interface/video_error_codes.h
+++ b/webrtc/modules/video_coding/codecs/interface/video_error_codes.h
@@ -8,8 +8,11 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#ifndef WEBRTC_MODULES_VIDEO_CODING_CODECS_INTERFACE_VIDEO_ERROR_CODES_H
-#define WEBRTC_MODULES_VIDEO_CODING_CODECS_INTERFACE_VIDEO_ERROR_CODES_H
+#ifndef WEBRTC_MODULES_VIDEO_CODING_CODECS_INTERFACE_VIDEO_ERROR_CODES_H_
+#define WEBRTC_MODULES_VIDEO_CODING_CODECS_INTERFACE_VIDEO_ERROR_CODES_H_
+
+#pragma message("WARNING: video_coding/codecs/interface is DEPRECATED; "
+ "use video_coding/include")
// NOTE: in sync with video_coding_module_defines.h
@@ -29,4 +32,4 @@
#define WEBRTC_VIDEO_CODEC_FALLBACK_SOFTWARE -13
#define WEBRTC_VIDEO_CODEC_TARGET_BITRATE_OVERSHOOT -14
-#endif // WEBRTC_MODULES_VIDEO_CODING_CODECS_INTERFACE_VIDEO_ERROR_CODES_H
+#endif // WEBRTC_MODULES_VIDEO_CODING_CODECS_INTERFACE_VIDEO_ERROR_CODES_H_
diff --git a/webrtc/modules/video_coding/codecs/test/packet_manipulator.cc b/webrtc/modules/video_coding/codecs/test/packet_manipulator.cc
index 36ba0e8272..b554b4e9ae 100644
--- a/webrtc/modules/video_coding/codecs/test/packet_manipulator.cc
+++ b/webrtc/modules/video_coding/codecs/test/packet_manipulator.cc
@@ -57,7 +57,7 @@ int PacketManipulatorImpl::ManipulatePackets(
active_burst_packets_--;
nbr_packets_dropped++;
} else if (RandomUniform() < config_.packet_loss_probability ||
- packet_loss_has_occurred) {
+ packet_loss_has_occurred) {
packet_loss_has_occurred = true;
nbr_packets_dropped++;
if (config_.packet_loss_mode == kBurst) {
@@ -91,9 +91,9 @@ inline double PacketManipulatorImpl::RandomUniform() {
// get the same behavior as long as we're using a fixed initial seed.
critsect_->Enter();
srand(random_seed_);
- random_seed_ = rand();
+ random_seed_ = rand(); // NOLINT (rand_r instead of rand)
critsect_->Leave();
- return (random_seed_ + 1.0)/(RAND_MAX + 1.0);
+ return (random_seed_ + 1.0) / (RAND_MAX + 1.0);
}
const char* PacketLossModeToStr(PacketLossMode e) {
@@ -109,4 +109,4 @@ const char* PacketLossModeToStr(PacketLossMode e) {
}
} // namespace test
-} // namespace webrtcc
+} // namespace webrtc
diff --git a/webrtc/modules/video_coding/codecs/test/packet_manipulator.h b/webrtc/modules/video_coding/codecs/test/packet_manipulator.h
index 16a9dc22ef..3334be072b 100644
--- a/webrtc/modules/video_coding/codecs/test/packet_manipulator.h
+++ b/webrtc/modules/video_coding/codecs/test/packet_manipulator.h
@@ -13,7 +13,7 @@
#include <stdlib.h>
-#include "webrtc/modules/video_coding/codecs/interface/video_codec_interface.h"
+#include "webrtc/modules/video_coding/include/video_codec_interface.h"
#include "webrtc/system_wrappers/include/critical_section_wrapper.h"
#include "webrtc/test/testsupport/packet_reader.h"
@@ -36,10 +36,11 @@ const char* PacketLossModeToStr(PacketLossMode e);
// scenarios caused by network interference.
struct NetworkingConfig {
NetworkingConfig()
- : packet_size_in_bytes(1500), max_payload_size_in_bytes(1440),
- packet_loss_mode(kUniform), packet_loss_probability(0.0),
- packet_loss_burst_length(1) {
- }
+ : packet_size_in_bytes(1500),
+ max_payload_size_in_bytes(1440),
+ packet_loss_mode(kUniform),
+ packet_loss_probability(0.0),
+ packet_loss_burst_length(1) {}
// Packet size in bytes. Default: 1500 bytes.
size_t packet_size_in_bytes;
@@ -93,9 +94,11 @@ class PacketManipulatorImpl : public PacketManipulator {
virtual ~PacketManipulatorImpl();
int ManipulatePackets(webrtc::EncodedImage* encoded_image) override;
virtual void InitializeRandomSeed(unsigned int seed);
+
protected:
// Returns a uniformly distributed random value between 0.0 and 1.0
virtual double RandomUniform();
+
private:
PacketReader* packet_reader_;
const NetworkingConfig& config_;
diff --git a/webrtc/modules/video_coding/codecs/test/packet_manipulator_unittest.cc b/webrtc/modules/video_coding/codecs/test/packet_manipulator_unittest.cc
index ace7bc0507..8c3d30dc0d 100644
--- a/webrtc/modules/video_coding/codecs/test/packet_manipulator_unittest.cc
+++ b/webrtc/modules/video_coding/codecs/test/packet_manipulator_unittest.cc
@@ -13,7 +13,7 @@
#include <queue>
#include "testing/gtest/include/gtest/gtest.h"
-#include "webrtc/modules/video_coding/codecs/interface/video_codec_interface.h"
+#include "webrtc/modules/video_coding/include/video_codec_interface.h"
#include "webrtc/modules/video_coding/codecs/test/predictive_packet_manipulator.h"
#include "webrtc/test/testsupport/unittest_utils.h"
#include "webrtc/typedefs.h"
@@ -25,7 +25,7 @@ const double kNeverDropProbability = 0.0;
const double kAlwaysDropProbability = 1.0;
const int kBurstLength = 1;
-class PacketManipulatorTest: public PacketRelatedTest {
+class PacketManipulatorTest : public PacketRelatedTest {
protected:
PacketReader packet_reader_;
EncodedImage image_;
@@ -50,19 +50,15 @@ class PacketManipulatorTest: public PacketRelatedTest {
virtual ~PacketManipulatorTest() {}
- void SetUp() {
- PacketRelatedTest::SetUp();
- }
+ void SetUp() { PacketRelatedTest::SetUp(); }
- void TearDown() {
- PacketRelatedTest::TearDown();
- }
+ void TearDown() { PacketRelatedTest::TearDown(); }
void VerifyPacketLoss(int expected_nbr_packets_dropped,
int actual_nbr_packets_dropped,
size_t expected_packet_data_length,
uint8_t* expected_packet_data,
- EncodedImage& actual_image) {
+ const EncodedImage& actual_image) {
EXPECT_EQ(expected_nbr_packets_dropped, actual_nbr_packets_dropped);
EXPECT_EQ(expected_packet_data_length, image_._length);
EXPECT_EQ(0, memcmp(expected_packet_data, actual_image._buffer,
@@ -75,10 +71,10 @@ TEST_F(PacketManipulatorTest, Constructor) {
}
TEST_F(PacketManipulatorTest, DropNone) {
- PacketManipulatorImpl manipulator(&packet_reader_, no_drop_config_, false);
+ PacketManipulatorImpl manipulator(&packet_reader_, no_drop_config_, false);
int nbr_packets_dropped = manipulator.ManipulatePackets(&image_);
- VerifyPacketLoss(0, nbr_packets_dropped, kPacketDataLength,
- packet_data_, image_);
+ VerifyPacketLoss(0, nbr_packets_dropped, kPacketDataLength, packet_data_,
+ image_);
}
TEST_F(PacketManipulatorTest, UniformDropNoneSmallFrame) {
@@ -87,15 +83,14 @@ TEST_F(PacketManipulatorTest, UniformDropNoneSmallFrame) {
PacketManipulatorImpl manipulator(&packet_reader_, no_drop_config_, false);
int nbr_packets_dropped = manipulator.ManipulatePackets(&image_);
- VerifyPacketLoss(0, nbr_packets_dropped, data_length,
- packet_data_, image_);
+ VerifyPacketLoss(0, nbr_packets_dropped, data_length, packet_data_, image_);
}
TEST_F(PacketManipulatorTest, UniformDropAll) {
PacketManipulatorImpl manipulator(&packet_reader_, drop_config_, false);
int nbr_packets_dropped = manipulator.ManipulatePackets(&image_);
- VerifyPacketLoss(kPacketDataNumberOfPackets, nbr_packets_dropped,
- 0, packet_data_, image_);
+ VerifyPacketLoss(kPacketDataNumberOfPackets, nbr_packets_dropped, 0,
+ packet_data_, image_);
}
// Use our customized test class to make the second packet being lost
diff --git a/webrtc/modules/video_coding/codecs/test/predictive_packet_manipulator.cc b/webrtc/modules/video_coding/codecs/test/predictive_packet_manipulator.cc
index c92cfa48a7..9eba205a88 100644
--- a/webrtc/modules/video_coding/codecs/test/predictive_packet_manipulator.cc
+++ b/webrtc/modules/video_coding/codecs/test/predictive_packet_manipulator.cc
@@ -19,13 +19,11 @@ namespace webrtc {
namespace test {
PredictivePacketManipulator::PredictivePacketManipulator(
- PacketReader* packet_reader, const NetworkingConfig& config)
- : PacketManipulatorImpl(packet_reader, config, false) {
-}
-
-PredictivePacketManipulator::~PredictivePacketManipulator() {
-}
+ PacketReader* packet_reader,
+ const NetworkingConfig& config)
+ : PacketManipulatorImpl(packet_reader, config, false) {}
+PredictivePacketManipulator::~PredictivePacketManipulator() {}
void PredictivePacketManipulator::AddRandomResult(double result) {
assert(result >= 0.0 && result <= 1.0);
@@ -33,8 +31,9 @@ void PredictivePacketManipulator::AddRandomResult(double result) {
}
double PredictivePacketManipulator::RandomUniform() {
- if(random_results_.size() == 0u) {
- fprintf(stderr, "No more stored results, please make sure AddRandomResult()"
+ if (random_results_.size() == 0u) {
+ fprintf(stderr,
+ "No more stored results, please make sure AddRandomResult()"
"is called same amount of times you're going to invoke the "
"RandomUniform() function, i.e. once per packet.\n");
assert(false);
@@ -45,4 +44,4 @@ double PredictivePacketManipulator::RandomUniform() {
}
} // namespace test
-} // namespace webrtcc
+} // namespace webrtc
diff --git a/webrtc/modules/video_coding/codecs/test/predictive_packet_manipulator.h b/webrtc/modules/video_coding/codecs/test/predictive_packet_manipulator.h
index 082712d870..45c7848c67 100644
--- a/webrtc/modules/video_coding/codecs/test/predictive_packet_manipulator.h
+++ b/webrtc/modules/video_coding/codecs/test/predictive_packet_manipulator.h
@@ -31,6 +31,7 @@ class PredictivePacketManipulator : public PacketManipulatorImpl {
// FIFO queue so they will be returned in the same order they were added.
// Result parameter must be 0.0 to 1.0.
void AddRandomResult(double result);
+
protected:
// Returns a uniformly distributed random value between 0.0 and 1.0
double RandomUniform() override;
diff --git a/webrtc/modules/video_coding/codecs/test/stats.cc b/webrtc/modules/video_coding/codecs/test/stats.cc
index f87407d223..478b2f4901 100644
--- a/webrtc/modules/video_coding/codecs/test/stats.cc
+++ b/webrtc/modules/video_coding/codecs/test/stats.cc
@@ -39,19 +39,19 @@ Stats::Stats() {}
Stats::~Stats() {}
bool LessForEncodeTime(const FrameStatistic& s1, const FrameStatistic& s2) {
- return s1.encode_time_in_us < s2.encode_time_in_us;
+ return s1.encode_time_in_us < s2.encode_time_in_us;
}
bool LessForDecodeTime(const FrameStatistic& s1, const FrameStatistic& s2) {
- return s1.decode_time_in_us < s2.decode_time_in_us;
+ return s1.decode_time_in_us < s2.decode_time_in_us;
}
bool LessForEncodedSize(const FrameStatistic& s1, const FrameStatistic& s2) {
- return s1.encoded_frame_length_in_bytes < s2.encoded_frame_length_in_bytes;
+ return s1.encoded_frame_length_in_bytes < s2.encoded_frame_length_in_bytes;
}
bool LessForBitRate(const FrameStatistic& s1, const FrameStatistic& s2) {
- return s1.bit_rate_in_kbps < s2.bit_rate_in_kbps;
+ return s1.bit_rate_in_kbps < s2.bit_rate_in_kbps;
}
FrameStatistic& Stats::NewFrame(int frame_number) {
@@ -78,8 +78,7 @@ void Stats::PrintSummary() {
size_t nbr_keyframes = 0;
size_t nbr_nonkeyframes = 0;
- for (FrameStatisticsIterator it = stats_.begin();
- it != stats_.end(); ++it) {
+ for (FrameStatisticsIterator it = stats_.begin(); it != stats_.end(); ++it) {
total_encoding_time_in_us += it->encode_time_in_us;
total_decoding_time_in_us += it->decode_time_in_us;
total_encoded_frames_lengths += it->encoded_frame_length_in_bytes;
@@ -96,15 +95,13 @@ void Stats::PrintSummary() {
// ENCODING
printf("Encoding time:\n");
- frame = std::min_element(stats_.begin(),
- stats_.end(), LessForEncodeTime);
- printf(" Min : %7d us (frame %d)\n",
- frame->encode_time_in_us, frame->frame_number);
+ frame = std::min_element(stats_.begin(), stats_.end(), LessForEncodeTime);
+ printf(" Min : %7d us (frame %d)\n", frame->encode_time_in_us,
+ frame->frame_number);
- frame = std::max_element(stats_.begin(),
- stats_.end(), LessForEncodeTime);
- printf(" Max : %7d us (frame %d)\n",
- frame->encode_time_in_us, frame->frame_number);
+ frame = std::max_element(stats_.begin(), stats_.end(), LessForEncodeTime);
+ printf(" Max : %7d us (frame %d)\n", frame->encode_time_in_us,
+ frame->frame_number);
printf(" Average : %7d us\n",
static_cast<int>(total_encoding_time_in_us / stats_.size()));
@@ -115,7 +112,7 @@ void Stats::PrintSummary() {
// failures)
std::vector<FrameStatistic> decoded_frames;
for (std::vector<FrameStatistic>::iterator it = stats_.begin();
- it != stats_.end(); ++it) {
+ it != stats_.end(); ++it) {
if (it->decoding_successful) {
decoded_frames.push_back(*it);
}
@@ -123,15 +120,15 @@ void Stats::PrintSummary() {
if (decoded_frames.size() == 0) {
printf("No successfully decoded frames exist in this statistics.\n");
} else {
- frame = std::min_element(decoded_frames.begin(),
- decoded_frames.end(), LessForDecodeTime);
- printf(" Min : %7d us (frame %d)\n",
- frame->decode_time_in_us, frame->frame_number);
+ frame = std::min_element(decoded_frames.begin(), decoded_frames.end(),
+ LessForDecodeTime);
+ printf(" Min : %7d us (frame %d)\n", frame->decode_time_in_us,
+ frame->frame_number);
- frame = std::max_element(decoded_frames.begin(),
- decoded_frames.end(), LessForDecodeTime);
- printf(" Max : %7d us (frame %d)\n",
- frame->decode_time_in_us, frame->frame_number);
+ frame = std::max_element(decoded_frames.begin(), decoded_frames.end(),
+ LessForDecodeTime);
+ printf(" Max : %7d us (frame %d)\n", frame->decode_time_in_us,
+ frame->frame_number);
printf(" Average : %7d us\n",
static_cast<int>(total_decoding_time_in_us / decoded_frames.size()));
@@ -141,13 +138,11 @@ void Stats::PrintSummary() {
// SIZE
printf("Frame sizes:\n");
- frame = std::min_element(stats_.begin(),
- stats_.end(), LessForEncodedSize);
+ frame = std::min_element(stats_.begin(), stats_.end(), LessForEncodedSize);
printf(" Min : %7" PRIuS " bytes (frame %d)\n",
frame->encoded_frame_length_in_bytes, frame->frame_number);
- frame = std::max_element(stats_.begin(),
- stats_.end(), LessForEncodedSize);
+ frame = std::max_element(stats_.begin(), stats_.end(), LessForEncodedSize);
printf(" Max : %7" PRIuS " bytes (frame %d)\n",
frame->encoded_frame_length_in_bytes, frame->frame_number);
@@ -167,21 +162,17 @@ void Stats::PrintSummary() {
// BIT RATE
printf("Bit rates:\n");
- frame = std::min_element(stats_.begin(),
- stats_.end(), LessForBitRate);
- printf(" Min bit rate: %7d kbps (frame %d)\n",
- frame->bit_rate_in_kbps, frame->frame_number);
+ frame = std::min_element(stats_.begin(), stats_.end(), LessForBitRate);
+ printf(" Min bit rate: %7d kbps (frame %d)\n", frame->bit_rate_in_kbps,
+ frame->frame_number);
- frame = std::max_element(stats_.begin(),
- stats_.end(), LessForBitRate);
- printf(" Max bit rate: %7d kbps (frame %d)\n",
- frame->bit_rate_in_kbps, frame->frame_number);
+ frame = std::max_element(stats_.begin(), stats_.end(), LessForBitRate);
+ printf(" Max bit rate: %7d kbps (frame %d)\n", frame->bit_rate_in_kbps,
+ frame->frame_number);
printf("\n");
- printf("Total encoding time : %7d ms.\n",
- total_encoding_time_in_us / 1000);
- printf("Total decoding time : %7d ms.\n",
- total_decoding_time_in_us / 1000);
+ printf("Total encoding time : %7d ms.\n", total_encoding_time_in_us / 1000);
+ printf("Total decoding time : %7d ms.\n", total_decoding_time_in_us / 1000);
printf("Total processing time: %7d ms.\n",
(total_encoding_time_in_us + total_decoding_time_in_us) / 1000);
}
diff --git a/webrtc/modules/video_coding/codecs/test/stats.h b/webrtc/modules/video_coding/codecs/test/stats.h
index 83ba108bb7..9092631ca1 100644
--- a/webrtc/modules/video_coding/codecs/test/stats.h
+++ b/webrtc/modules/video_coding/codecs/test/stats.h
@@ -13,7 +13,7 @@
#include <vector>
-#include "webrtc/common_video/interface/video_image.h"
+#include "webrtc/common_video/include/video_image.h"
namespace webrtc {
namespace test {
diff --git a/webrtc/modules/video_coding/codecs/test/stats_unittest.cc b/webrtc/modules/video_coding/codecs/test/stats_unittest.cc
index a2d27e71d6..0403ccfdb3 100644
--- a/webrtc/modules/video_coding/codecs/test/stats_unittest.cc
+++ b/webrtc/modules/video_coding/codecs/test/stats_unittest.cc
@@ -16,21 +16,15 @@
namespace webrtc {
namespace test {
-class StatsTest: public testing::Test {
+class StatsTest : public testing::Test {
protected:
- StatsTest() {
- }
+ StatsTest() {}
- virtual ~StatsTest() {
- }
+ virtual ~StatsTest() {}
- void SetUp() {
- stats_ = new Stats();
- }
+ void SetUp() { stats_ = new Stats(); }
- void TearDown() {
- delete stats_;
- }
+ void TearDown() { delete stats_; }
Stats* stats_;
};
diff --git a/webrtc/modules/video_coding/codecs/test/videoprocessor.cc b/webrtc/modules/video_coding/codecs/test/videoprocessor.cc
index c814dfe0e7..7376000bd5 100644
--- a/webrtc/modules/video_coding/codecs/test/videoprocessor.cc
+++ b/webrtc/modules/video_coding/codecs/test/videoprocessor.cc
@@ -93,14 +93,18 @@ bool VideoProcessorImpl::Init() {
int32_t register_result =
encoder_->RegisterEncodeCompleteCallback(encode_callback_);
if (register_result != WEBRTC_VIDEO_CODEC_OK) {
- fprintf(stderr, "Failed to register encode complete callback, return code: "
- "%d\n", register_result);
+ fprintf(stderr,
+ "Failed to register encode complete callback, return code: "
+ "%d\n",
+ register_result);
return false;
}
register_result = decoder_->RegisterDecodeCompleteCallback(decode_callback_);
if (register_result != WEBRTC_VIDEO_CODEC_OK) {
- fprintf(stderr, "Failed to register decode complete callback, return code: "
- "%d\n", register_result);
+ fprintf(stderr,
+ "Failed to register decode complete callback, return code: "
+ "%d\n",
+ register_result);
return false;
}
// Init the encoder and decoder
@@ -146,13 +150,14 @@ VideoProcessorImpl::~VideoProcessorImpl() {
delete decode_callback_;
}
-
void VideoProcessorImpl::SetRates(int bit_rate, int frame_rate) {
int set_rates_result = encoder_->SetRates(bit_rate, frame_rate);
assert(set_rates_result >= 0);
if (set_rates_result < 0) {
- fprintf(stderr, "Failed to update encoder with new rate %d, "
- "return code: %d\n", bit_rate, set_rates_result);
+ fprintf(stderr,
+ "Failed to update encoder with new rate %d, "
+ "return code: %d\n",
+ bit_rate, set_rates_result);
}
num_dropped_frames_ = 0;
num_spatial_resizes_ = 0;
@@ -175,7 +180,7 @@ int VideoProcessorImpl::NumberSpatialResizes() {
}
bool VideoProcessorImpl::ProcessFrame(int frame_number) {
- assert(frame_number >=0);
+ assert(frame_number >= 0);
if (!initialized_) {
fprintf(stderr, "Attempting to use uninitialized VideoProcessor!\n");
return false;
@@ -186,10 +191,8 @@ bool VideoProcessorImpl::ProcessFrame(int frame_number) {
}
if (frame_reader_->ReadFrame(source_buffer_)) {
// Copy the source frame to the newly read frame data.
- source_frame_.CreateFrame(source_buffer_,
- config_.codec_settings->width,
- config_.codec_settings->height,
- kVideoRotation_0);
+ source_frame_.CreateFrame(source_buffer_, config_.codec_settings->width,
+ config_.codec_settings->height, kVideoRotation_0);
// Ensure we have a new statistics data object we can fill:
FrameStatistic& stat = stats_->NewFrame(frame_number);
@@ -224,10 +227,10 @@ bool VideoProcessorImpl::ProcessFrame(int frame_number) {
void VideoProcessorImpl::FrameEncoded(const EncodedImage& encoded_image) {
// Timestamp is frame number, so this gives us #dropped frames.
- int num_dropped_from_prev_encode = encoded_image._timeStamp -
- prev_time_stamp_ - 1;
- num_dropped_frames_ += num_dropped_from_prev_encode;
- prev_time_stamp_ = encoded_image._timeStamp;
+ int num_dropped_from_prev_encode =
+ encoded_image._timeStamp - prev_time_stamp_ - 1;
+ num_dropped_frames_ += num_dropped_from_prev_encode;
+ prev_time_stamp_ = encoded_image._timeStamp;
if (num_dropped_from_prev_encode > 0) {
// For dropped frames, we write out the last decoded frame to avoid getting
// out of sync for the computation of PSNR and SSIM.
@@ -244,15 +247,16 @@ void VideoProcessorImpl::FrameEncoded(const EncodedImage& encoded_image) {
TickTime encode_stop = TickTime::Now();
int frame_number = encoded_image._timeStamp;
FrameStatistic& stat = stats_->stats_[frame_number];
- stat.encode_time_in_us = GetElapsedTimeMicroseconds(encode_start_,
- encode_stop);
+ stat.encode_time_in_us =
+ GetElapsedTimeMicroseconds(encode_start_, encode_stop);
stat.encoding_successful = true;
stat.encoded_frame_length_in_bytes = encoded_image._length;
stat.frame_number = encoded_image._timeStamp;
stat.frame_type = encoded_image._frameType;
stat.bit_rate_in_kbps = encoded_image._length * bit_rate_factor_;
- stat.total_packets = encoded_image._length /
- config_.networking_config.packet_size_in_bytes + 1;
+ stat.total_packets =
+ encoded_image._length / config_.networking_config.packet_size_in_bytes +
+ 1;
// Perform packet loss if criteria is fullfilled:
bool exclude_this_frame = false;
@@ -280,7 +284,7 @@ void VideoProcessorImpl::FrameEncoded(const EncodedImage& encoded_image) {
copied_image._buffer = copied_buffer.get();
if (!exclude_this_frame) {
stat.packets_dropped =
- packet_manipulator_->ManipulatePackets(&copied_image);
+ packet_manipulator_->ManipulatePackets(&copied_image);
}
// Keep track of if frames are lost due to packet loss so we can tell
@@ -305,26 +309,25 @@ void VideoProcessorImpl::FrameDecoded(const VideoFrame& image) {
int frame_number = image.timestamp();
// Report stats
FrameStatistic& stat = stats_->stats_[frame_number];
- stat.decode_time_in_us = GetElapsedTimeMicroseconds(decode_start_,
- decode_stop);
+ stat.decode_time_in_us =
+ GetElapsedTimeMicroseconds(decode_start_, decode_stop);
stat.decoding_successful = true;
// Check for resize action (either down or up):
if (static_cast<int>(image.width()) != last_encoder_frame_width_ ||
- static_cast<int>(image.height()) != last_encoder_frame_height_ ) {
+ static_cast<int>(image.height()) != last_encoder_frame_height_) {
++num_spatial_resizes_;
last_encoder_frame_width_ = image.width();
last_encoder_frame_height_ = image.height();
}
// Check if codec size is different from native/original size, and if so,
// upsample back to original size: needed for PSNR and SSIM computations.
- if (image.width() != config_.codec_settings->width ||
+ if (image.width() != config_.codec_settings->width ||
image.height() != config_.codec_settings->height) {
VideoFrame up_image;
- int ret_val = scaler_.Set(image.width(), image.height(),
- config_.codec_settings->width,
- config_.codec_settings->height,
- kI420, kI420, kScaleBilinear);
+ int ret_val = scaler_.Set(
+ image.width(), image.height(), config_.codec_settings->width,
+ config_.codec_settings->height, kI420, kI420, kScaleBilinear);
assert(ret_val >= 0);
if (ret_val < 0) {
fprintf(stderr, "Failed to set scalar for frame: %d, return code: %d\n",
@@ -366,7 +369,8 @@ void VideoProcessorImpl::FrameDecoded(const VideoFrame& image) {
}
int VideoProcessorImpl::GetElapsedTimeMicroseconds(
- const webrtc::TickTime& start, const webrtc::TickTime& stop) {
+ const webrtc::TickTime& start,
+ const webrtc::TickTime& stop) {
uint64_t encode_time = (stop - start).Microseconds();
assert(encode_time <
static_cast<unsigned int>(std::numeric_limits<int>::max()));
@@ -404,8 +408,7 @@ const char* VideoCodecTypeToStr(webrtc::VideoCodecType e) {
}
// Callbacks
-int32_t
-VideoProcessorImpl::VideoProcessorEncodeCompleteCallback::Encoded(
+int32_t VideoProcessorImpl::VideoProcessorEncodeCompleteCallback::Encoded(
const EncodedImage& encoded_image,
const webrtc::CodecSpecificInfo* codec_specific_info,
const webrtc::RTPFragmentationHeader* fragmentation) {
diff --git a/webrtc/modules/video_coding/codecs/test/videoprocessor.h b/webrtc/modules/video_coding/codecs/test/videoprocessor.h
index 0b094ae73e..3ee08fd46a 100644
--- a/webrtc/modules/video_coding/codecs/test/videoprocessor.h
+++ b/webrtc/modules/video_coding/codecs/test/videoprocessor.h
@@ -13,9 +13,10 @@
#include <string>
+#include "webrtc/base/checks.h"
#include "webrtc/common_video/libyuv/include/scaler.h"
#include "webrtc/common_video/libyuv/include/webrtc_libyuv.h"
-#include "webrtc/modules/video_coding/codecs/interface/video_codec_interface.h"
+#include "webrtc/modules/video_coding/include/video_codec_interface.h"
#include "webrtc/modules/video_coding/codecs/test/packet_manipulator.h"
#include "webrtc/modules/video_coding/codecs/test/stats.h"
#include "webrtc/system_wrappers/include/tick_util.h"
@@ -242,12 +243,16 @@ class VideoProcessorImpl : public VideoProcessor {
// Callback class required to implement according to the VideoDecoder API.
class VideoProcessorDecodeCompleteCallback
- : public webrtc::DecodedImageCallback {
+ : public webrtc::DecodedImageCallback {
public:
- explicit VideoProcessorDecodeCompleteCallback(VideoProcessorImpl* vp)
- : video_processor_(vp) {
+ explicit VideoProcessorDecodeCompleteCallback(VideoProcessorImpl* vp)
+ : video_processor_(vp) {}
+ int32_t Decoded(webrtc::VideoFrame& image) override;
+ int32_t Decoded(webrtc::VideoFrame& image,
+ int64_t decode_time_ms) override {
+ RTC_NOTREACHED();
+ return -1;
}
- int32_t Decoded(webrtc::VideoFrame& image) override;
private:
VideoProcessorImpl* video_processor_;
diff --git a/webrtc/modules/video_coding/codecs/test/videoprocessor_integrationtest.cc b/webrtc/modules/video_coding/codecs/test/videoprocessor_integrationtest.cc
index 3d6aedb22a..7b92616e1b 100644
--- a/webrtc/modules/video_coding/codecs/test/videoprocessor_integrationtest.cc
+++ b/webrtc/modules/video_coding/codecs/test/videoprocessor_integrationtest.cc
@@ -12,17 +12,16 @@
#include "testing/gtest/include/gtest/gtest.h"
-#include "webrtc/modules/video_coding/codecs/interface/video_codec_interface.h"
+#include "webrtc/modules/video_coding/include/video_codec_interface.h"
#include "webrtc/modules/video_coding/codecs/test/packet_manipulator.h"
#include "webrtc/modules/video_coding/codecs/test/videoprocessor.h"
#include "webrtc/modules/video_coding/codecs/vp8/include/vp8.h"
#include "webrtc/modules/video_coding/codecs/vp9/include/vp9.h"
#include "webrtc/modules/video_coding/codecs/vp8/include/vp8_common_types.h"
-#include "webrtc/modules/video_coding/main/interface/video_coding.h"
+#include "webrtc/modules/video_coding/include/video_coding.h"
#include "webrtc/test/testsupport/fileutils.h"
#include "webrtc/test/testsupport/frame_reader.h"
#include "webrtc/test/testsupport/frame_writer.h"
-#include "webrtc/test/testsupport/gtest_disable.h"
#include "webrtc/test/testsupport/metrics/video_metrics.h"
#include "webrtc/test/testsupport/packet_reader.h"
#include "webrtc/typedefs.h"
@@ -81,7 +80,6 @@ struct RateControlMetrics {
int num_key_frames;
};
-
// Sequence used is foreman (CIF): may be better to use VGA for resize test.
const int kCIFWidth = 352;
const int kCIFHeight = 288;
@@ -101,7 +99,7 @@ const float kScaleKeyFrameSize = 0.5f;
// dropping/spatial resize, and temporal layers. The limits for the rate
// control metrics are set to be fairly conservative, so failure should only
// happen when some significant regression or breakdown occurs.
-class VideoProcessorIntegrationTest: public testing::Test {
+class VideoProcessorIntegrationTest : public testing::Test {
protected:
VideoEncoder* encoder_;
VideoDecoder* decoder_;
@@ -148,7 +146,6 @@ class VideoProcessorIntegrationTest: public testing::Test {
bool frame_dropper_on_;
bool spatial_resize_on_;
-
VideoProcessorIntegrationTest() {}
virtual ~VideoProcessorIntegrationTest() {}
@@ -165,14 +162,13 @@ class VideoProcessorIntegrationTest: public testing::Test {
// CIF is currently used for all tests below.
// Setup the TestConfig struct for processing of a clip in CIF resolution.
- config_.input_filename =
- webrtc::test::ResourcePath("foreman_cif", "yuv");
+ config_.input_filename = webrtc::test::ResourcePath("foreman_cif", "yuv");
// Generate an output filename in a safe way.
config_.output_filename = webrtc::test::TempFilename(
webrtc::test::OutputPath(), "videoprocessor_integrationtest");
- config_.frame_length_in_bytes = CalcBufferSize(kI420,
- kCIFWidth, kCIFHeight);
+ config_.frame_length_in_bytes =
+ CalcBufferSize(kI420, kCIFWidth, kCIFHeight);
config_.verbose = false;
// Only allow encoder/decoder to use single core, for predictability.
config_.use_single_core = true;
@@ -188,52 +184,46 @@ class VideoProcessorIntegrationTest: public testing::Test {
// These features may be set depending on the test.
switch (config_.codec_settings->codecType) {
- case kVideoCodecVP8:
- config_.codec_settings->codecSpecific.VP8.errorConcealmentOn =
- error_concealment_on_;
- config_.codec_settings->codecSpecific.VP8.denoisingOn =
- denoising_on_;
- config_.codec_settings->codecSpecific.VP8.numberOfTemporalLayers =
- num_temporal_layers_;
- config_.codec_settings->codecSpecific.VP8.frameDroppingOn =
- frame_dropper_on_;
- config_.codec_settings->codecSpecific.VP8.automaticResizeOn =
- spatial_resize_on_;
- config_.codec_settings->codecSpecific.VP8.keyFrameInterval =
- kBaseKeyFrameInterval;
- break;
- case kVideoCodecVP9:
- config_.codec_settings->codecSpecific.VP9.denoisingOn =
- denoising_on_;
- config_.codec_settings->codecSpecific.VP9.numberOfTemporalLayers =
- num_temporal_layers_;
- config_.codec_settings->codecSpecific.VP9.frameDroppingOn =
- frame_dropper_on_;
- config_.codec_settings->codecSpecific.VP9.automaticResizeOn =
- spatial_resize_on_;
- config_.codec_settings->codecSpecific.VP9.keyFrameInterval =
- kBaseKeyFrameInterval;
- break;
- default:
- assert(false);
- break;
- }
- frame_reader_ =
- new webrtc::test::FrameReaderImpl(config_.input_filename,
- config_.frame_length_in_bytes);
- frame_writer_ =
- new webrtc::test::FrameWriterImpl(config_.output_filename,
- config_.frame_length_in_bytes);
+ case kVideoCodecVP8:
+ config_.codec_settings->codecSpecific.VP8.errorConcealmentOn =
+ error_concealment_on_;
+ config_.codec_settings->codecSpecific.VP8.denoisingOn = denoising_on_;
+ config_.codec_settings->codecSpecific.VP8.numberOfTemporalLayers =
+ num_temporal_layers_;
+ config_.codec_settings->codecSpecific.VP8.frameDroppingOn =
+ frame_dropper_on_;
+ config_.codec_settings->codecSpecific.VP8.automaticResizeOn =
+ spatial_resize_on_;
+ config_.codec_settings->codecSpecific.VP8.keyFrameInterval =
+ kBaseKeyFrameInterval;
+ break;
+ case kVideoCodecVP9:
+ config_.codec_settings->codecSpecific.VP9.denoisingOn = denoising_on_;
+ config_.codec_settings->codecSpecific.VP9.numberOfTemporalLayers =
+ num_temporal_layers_;
+ config_.codec_settings->codecSpecific.VP9.frameDroppingOn =
+ frame_dropper_on_;
+ config_.codec_settings->codecSpecific.VP9.automaticResizeOn =
+ spatial_resize_on_;
+ config_.codec_settings->codecSpecific.VP9.keyFrameInterval =
+ kBaseKeyFrameInterval;
+ break;
+ default:
+ assert(false);
+ break;
+ }
+ frame_reader_ = new webrtc::test::FrameReaderImpl(
+ config_.input_filename, config_.frame_length_in_bytes);
+ frame_writer_ = new webrtc::test::FrameWriterImpl(
+ config_.output_filename, config_.frame_length_in_bytes);
ASSERT_TRUE(frame_reader_->Init());
ASSERT_TRUE(frame_writer_->Init());
packet_manipulator_ = new webrtc::test::PacketManipulatorImpl(
&packet_reader_, config_.networking_config, config_.verbose);
- processor_ = new webrtc::test::VideoProcessorImpl(encoder_, decoder_,
- frame_reader_,
- frame_writer_,
- packet_manipulator_,
- config_, &stats_);
+ processor_ = new webrtc::test::VideoProcessorImpl(
+ encoder_, decoder_, frame_reader_, frame_writer_, packet_manipulator_,
+ config_, &stats_);
ASSERT_TRUE(processor_->Init());
}
@@ -247,7 +237,7 @@ class VideoProcessorIntegrationTest: public testing::Test {
encoding_bitrate_[i] = 0.0f;
// Update layer per-frame-bandwidth.
per_frame_bandwidth_[i] = static_cast<float>(bit_rate_layer_[i]) /
- static_cast<float>(frame_rate_layer_[i]);
+ static_cast<float>(frame_rate_layer_[i]);
}
// Set maximum size of key frames, following setting in the VP8 wrapper.
float max_key_size = kScaleKeyFrameSize * kOptimalBufferSize * frame_rate_;
@@ -274,28 +264,28 @@ class VideoProcessorIntegrationTest: public testing::Test {
// Update rate mismatch relative to per-frame bandwidth for delta frames.
if (frame_type == kVideoFrameDelta) {
// TODO(marpan): Should we count dropped (zero size) frames in mismatch?
- sum_frame_size_mismatch_[layer_] += fabs(encoded_size_kbits -
- per_frame_bandwidth_[layer_]) /
- per_frame_bandwidth_[layer_];
+ sum_frame_size_mismatch_[layer_] +=
+ fabs(encoded_size_kbits - per_frame_bandwidth_[layer_]) /
+ per_frame_bandwidth_[layer_];
} else {
- float target_size = (frame_num == 1) ? target_size_key_frame_initial_ :
- target_size_key_frame_;
- sum_key_frame_size_mismatch_ += fabs(encoded_size_kbits - target_size) /
- target_size;
+ float target_size = (frame_num == 1) ? target_size_key_frame_initial_
+ : target_size_key_frame_;
+ sum_key_frame_size_mismatch_ +=
+ fabs(encoded_size_kbits - target_size) / target_size;
num_key_frames_ += 1;
}
sum_encoded_frame_size_[layer_] += encoded_size_kbits;
// Encoding bitrate per layer: from the start of the update/run to the
// current frame.
encoding_bitrate_[layer_] = sum_encoded_frame_size_[layer_] *
- frame_rate_layer_[layer_] /
- num_frames_per_update_[layer_];
+ frame_rate_layer_[layer_] /
+ num_frames_per_update_[layer_];
// Total encoding rate: from the start of the update/run to current frame.
sum_encoded_frame_size_total_ += encoded_size_kbits;
- encoding_bitrate_total_ = sum_encoded_frame_size_total_ * frame_rate_ /
- num_frames_total_;
- perc_encoding_rate_mismatch_ = 100 * fabs(encoding_bitrate_total_ -
- bit_rate_) / bit_rate_;
+ encoding_bitrate_total_ =
+ sum_encoded_frame_size_total_ * frame_rate_ / num_frames_total_;
+ perc_encoding_rate_mismatch_ =
+ 100 * fabs(encoding_bitrate_total_ - bit_rate_) / bit_rate_;
if (perc_encoding_rate_mismatch_ < kPercTargetvsActualMismatch &&
!encoding_rate_within_target_) {
num_frames_to_hit_target_ = num_frames_total_;
@@ -314,34 +304,38 @@ class VideoProcessorIntegrationTest: public testing::Test {
int num_key_frames) {
int num_dropped_frames = processor_->NumberDroppedFrames();
int num_resize_actions = processor_->NumberSpatialResizes();
- printf("For update #: %d,\n "
+ printf(
+ "For update #: %d,\n "
" Target Bitrate: %d,\n"
" Encoding bitrate: %f,\n"
" Frame rate: %d \n",
update_index, bit_rate_, encoding_bitrate_total_, frame_rate_);
- printf(" Number of frames to approach target rate = %d, \n"
- " Number of dropped frames = %d, \n"
- " Number of spatial resizes = %d, \n",
- num_frames_to_hit_target_, num_dropped_frames, num_resize_actions);
+ printf(
+ " Number of frames to approach target rate = %d, \n"
+ " Number of dropped frames = %d, \n"
+ " Number of spatial resizes = %d, \n",
+ num_frames_to_hit_target_, num_dropped_frames, num_resize_actions);
EXPECT_LE(perc_encoding_rate_mismatch_, max_encoding_rate_mismatch);
if (num_key_frames_ > 0) {
- int perc_key_frame_size_mismatch = 100 * sum_key_frame_size_mismatch_ /
- num_key_frames_;
- printf(" Number of Key frames: %d \n"
- " Key frame rate mismatch: %d \n",
- num_key_frames_, perc_key_frame_size_mismatch);
+ int perc_key_frame_size_mismatch =
+ 100 * sum_key_frame_size_mismatch_ / num_key_frames_;
+ printf(
+ " Number of Key frames: %d \n"
+ " Key frame rate mismatch: %d \n",
+ num_key_frames_, perc_key_frame_size_mismatch);
EXPECT_LE(perc_key_frame_size_mismatch, max_key_frame_size_mismatch);
}
printf("\n");
printf("Rates statistics for Layer data \n");
- for (int i = 0; i < num_temporal_layers_ ; i++) {
+ for (int i = 0; i < num_temporal_layers_; i++) {
printf("Layer #%d \n", i);
- int perc_frame_size_mismatch = 100 * sum_frame_size_mismatch_[i] /
- num_frames_per_update_[i];
- int perc_encoding_rate_mismatch = 100 * fabs(encoding_bitrate_[i] -
- bit_rate_layer_[i]) /
- bit_rate_layer_[i];
- printf(" Target Layer Bit rate: %f \n"
+ int perc_frame_size_mismatch =
+ 100 * sum_frame_size_mismatch_[i] / num_frames_per_update_[i];
+ int perc_encoding_rate_mismatch =
+ 100 * fabs(encoding_bitrate_[i] - bit_rate_layer_[i]) /
+ bit_rate_layer_[i];
+ printf(
+ " Target Layer Bit rate: %f \n"
" Layer frame rate: %f, \n"
" Layer per frame bandwidth: %f, \n"
" Layer Encoding bit rate: %f, \n"
@@ -366,13 +360,13 @@ class VideoProcessorIntegrationTest: public testing::Test {
if (num_temporal_layers_ == 1) {
layer_ = 0;
} else if (num_temporal_layers_ == 2) {
- // layer 0: 0 2 4 ...
- // layer 1: 1 3
- if (frame_number % 2 == 0) {
- layer_ = 0;
- } else {
- layer_ = 1;
- }
+ // layer 0: 0 2 4 ...
+ // layer 1: 1 3
+ if (frame_number % 2 == 0) {
+ layer_ = 0;
+ } else {
+ layer_ = 1;
+ }
} else if (num_temporal_layers_ == 3) {
// layer 0: 0 4 8 ...
// layer 1: 2 6
@@ -391,20 +385,20 @@ class VideoProcessorIntegrationTest: public testing::Test {
// Set the bitrate and frame rate per layer, for up to 3 layers.
void SetLayerRates() {
- assert(num_temporal_layers_<= 3);
+ assert(num_temporal_layers_ <= 3);
for (int i = 0; i < num_temporal_layers_; i++) {
float bit_rate_ratio =
kVp8LayerRateAlloction[num_temporal_layers_ - 1][i];
if (i > 0) {
- float bit_rate_delta_ratio = kVp8LayerRateAlloction
- [num_temporal_layers_ - 1][i] -
+ float bit_rate_delta_ratio =
+ kVp8LayerRateAlloction[num_temporal_layers_ - 1][i] -
kVp8LayerRateAlloction[num_temporal_layers_ - 1][i - 1];
bit_rate_layer_[i] = bit_rate_ * bit_rate_delta_ratio;
} else {
bit_rate_layer_[i] = bit_rate_ * bit_rate_ratio;
}
- frame_rate_layer_[i] = frame_rate_ / static_cast<float>(
- 1 << (num_temporal_layers_ - 1));
+ frame_rate_layer_[i] =
+ frame_rate_ / static_cast<float>(1 << (num_temporal_layers_ - 1));
}
if (num_temporal_layers_ == 3) {
frame_rate_layer_[2] = frame_rate_ / 2.0f;
@@ -437,12 +431,12 @@ class VideoProcessorIntegrationTest: public testing::Test {
spatial_resize_on_ = process.spatial_resize_on;
SetUpCodecConfig();
// Update the layers and the codec with the initial rates.
- bit_rate_ = rate_profile.target_bit_rate[0];
+ bit_rate_ = rate_profile.target_bit_rate[0];
frame_rate_ = rate_profile.input_frame_rate[0];
SetLayerRates();
// Set the initial target size for key frame.
- target_size_key_frame_initial_ = 0.5 * kInitialBufferSize *
- bit_rate_layer_[0];
+ target_size_key_frame_initial_ =
+ 0.5 * kInitialBufferSize * bit_rate_layer_[0];
processor_->SetRates(bit_rate_, frame_rate_);
// Process each frame, up to |num_frames|.
int num_frames = rate_profile.num_frames;
@@ -452,7 +446,7 @@ class VideoProcessorIntegrationTest: public testing::Test {
int frame_number = 0;
FrameType frame_type = kVideoFrameDelta;
while (processor_->ProcessFrame(frame_number) &&
- frame_number < num_frames) {
+ frame_number < num_frames) {
// Get the layer index for the frame |frame_number|.
LayerIndexForFrame(frame_number);
// Get the frame_type.
@@ -468,8 +462,7 @@ class VideoProcessorIntegrationTest: public testing::Test {
if (frame_number ==
rate_profile.frame_index_rate_update[update_index + 1]) {
VerifyRateControl(
- update_index,
- rc_metrics[update_index].max_key_frame_size_mismatch,
+ update_index, rc_metrics[update_index].max_key_frame_size_mismatch,
rc_metrics[update_index].max_delta_frame_size_mismatch,
rc_metrics[update_index].max_encoding_rate_mismatch,
rc_metrics[update_index].max_time_hit_target,
@@ -478,23 +471,22 @@ class VideoProcessorIntegrationTest: public testing::Test {
rc_metrics[update_index].num_key_frames);
// Update layer rates and the codec with new rates.
++update_index;
- bit_rate_ = rate_profile.target_bit_rate[update_index];
+ bit_rate_ = rate_profile.target_bit_rate[update_index];
frame_rate_ = rate_profile.input_frame_rate[update_index];
SetLayerRates();
- ResetRateControlMetrics(rate_profile.
- frame_index_rate_update[update_index + 1]);
+ ResetRateControlMetrics(
+ rate_profile.frame_index_rate_update[update_index + 1]);
processor_->SetRates(bit_rate_, frame_rate_);
}
}
- VerifyRateControl(
- update_index,
- rc_metrics[update_index].max_key_frame_size_mismatch,
- rc_metrics[update_index].max_delta_frame_size_mismatch,
- rc_metrics[update_index].max_encoding_rate_mismatch,
- rc_metrics[update_index].max_time_hit_target,
- rc_metrics[update_index].max_num_dropped_frames,
- rc_metrics[update_index].num_spatial_resizes,
- rc_metrics[update_index].num_key_frames);
+ VerifyRateControl(update_index,
+ rc_metrics[update_index].max_key_frame_size_mismatch,
+ rc_metrics[update_index].max_delta_frame_size_mismatch,
+ rc_metrics[update_index].max_encoding_rate_mismatch,
+ rc_metrics[update_index].max_time_hit_target,
+ rc_metrics[update_index].max_num_dropped_frames,
+ rc_metrics[update_index].num_spatial_resizes,
+ rc_metrics[update_index].num_key_frames);
EXPECT_EQ(num_frames, frame_number);
EXPECT_EQ(num_frames + 1, static_cast<int>(stats_.stats_.size()));
@@ -507,16 +499,14 @@ class VideoProcessorIntegrationTest: public testing::Test {
// TODO(marpan): should compute these quality metrics per SetRates update.
webrtc::test::QualityMetricsResult psnr_result, ssim_result;
- EXPECT_EQ(0, webrtc::test::I420MetricsFromFiles(
- config_.input_filename.c_str(),
- config_.output_filename.c_str(),
- config_.codec_settings->width,
- config_.codec_settings->height,
- &psnr_result,
- &ssim_result));
+ EXPECT_EQ(
+ 0, webrtc::test::I420MetricsFromFiles(
+ config_.input_filename.c_str(), config_.output_filename.c_str(),
+ config_.codec_settings->width, config_.codec_settings->height,
+ &psnr_result, &ssim_result));
printf("PSNR avg: %f, min: %f SSIM avg: %f, min: %f\n",
- psnr_result.average, psnr_result.min,
- ssim_result.average, ssim_result.min);
+ psnr_result.average, psnr_result.min, ssim_result.average,
+ ssim_result.min);
stats_.PrintSummary();
EXPECT_GT(psnr_result.average, quality_metrics.minimum_avg_psnr);
EXPECT_GT(psnr_result.min, quality_metrics.minimum_min_psnr);
@@ -549,7 +539,7 @@ void SetCodecParameters(CodecConfigPars* process_settings,
bool spatial_resize_on) {
process_settings->codec_type = codec_type;
process_settings->packet_loss = packet_loss;
- process_settings->key_frame_interval = key_frame_interval;
+ process_settings->key_frame_interval = key_frame_interval;
process_settings->num_temporal_layers = num_temporal_layers,
process_settings->error_concealment_on = error_concealment_on;
process_settings->denoising_on = denoising_on;
@@ -608,9 +598,7 @@ TEST_F(VideoProcessorIntegrationTest, Process0PercentPacketLossVP9) {
// Metrics for rate control.
RateControlMetrics rc_metrics[1];
SetRateControlMetrics(rc_metrics, 0, 0, 40, 20, 10, 20, 0, 1);
- ProcessFramesAndVerify(quality_metrics,
- rate_profile,
- process_settings,
+ ProcessFramesAndVerify(quality_metrics, rate_profile, process_settings,
rc_metrics);
}
@@ -632,13 +620,10 @@ TEST_F(VideoProcessorIntegrationTest, Process5PercentPacketLossVP9) {
// Metrics for rate control.
RateControlMetrics rc_metrics[1];
SetRateControlMetrics(rc_metrics, 0, 0, 40, 20, 10, 20, 0, 1);
- ProcessFramesAndVerify(quality_metrics,
- rate_profile,
- process_settings,
+ ProcessFramesAndVerify(quality_metrics, rate_profile, process_settings,
rc_metrics);
}
-
// VP9: Run with no packet loss, with varying bitrate (3 rate updates):
// low to high to medium. Check that quality and encoder response to the new
// target rate/per-frame bandwidth (for each rate update) is within limits.
@@ -657,15 +642,13 @@ TEST_F(VideoProcessorIntegrationTest, ProcessNoLossChangeBitRateVP9) {
false, true, false);
// Metrics for expected quality.
QualityMetrics quality_metrics;
- SetQualityMetrics(&quality_metrics, 35.9, 30.0, 0.90, 0.85);
+ SetQualityMetrics(&quality_metrics, 35.7, 30.0, 0.90, 0.85);
// Metrics for rate control.
RateControlMetrics rc_metrics[3];
SetRateControlMetrics(rc_metrics, 0, 0, 30, 20, 20, 30, 0, 1);
SetRateControlMetrics(rc_metrics, 1, 2, 0, 20, 20, 60, 0, 0);
SetRateControlMetrics(rc_metrics, 2, 0, 0, 25, 20, 40, 0, 0);
- ProcessFramesAndVerify(quality_metrics,
- rate_profile,
- process_settings,
+ ProcessFramesAndVerify(quality_metrics, rate_profile, process_settings,
rc_metrics);
}
@@ -695,12 +678,10 @@ TEST_F(VideoProcessorIntegrationTest,
SetQualityMetrics(&quality_metrics, 31.5, 18.0, 0.80, 0.44);
// Metrics for rate control.
RateControlMetrics rc_metrics[3];
- SetRateControlMetrics(rc_metrics, 0, 35, 50, 70, 15, 45, 0, 1);
+ SetRateControlMetrics(rc_metrics, 0, 38, 50, 75, 15, 45, 0, 1);
SetRateControlMetrics(rc_metrics, 1, 10, 0, 40, 10, 30, 0, 0);
SetRateControlMetrics(rc_metrics, 2, 5, 0, 30, 5, 20, 0, 0);
- ProcessFramesAndVerify(quality_metrics,
- rate_profile,
- process_settings,
+ ProcessFramesAndVerify(quality_metrics, rate_profile, process_settings,
rc_metrics);
}
@@ -721,19 +702,13 @@ TEST_F(VideoProcessorIntegrationTest, ProcessNoLossDenoiserOnVP9) {
// Metrics for rate control.
RateControlMetrics rc_metrics[1];
SetRateControlMetrics(rc_metrics, 0, 0, 40, 20, 10, 20, 0, 1);
- ProcessFramesAndVerify(quality_metrics,
- rate_profile,
- process_settings,
+ ProcessFramesAndVerify(quality_metrics, rate_profile, process_settings,
rc_metrics);
}
// Run with no packet loss, at low bitrate.
-// spatial_resize is on, and for this low bitrate expect two resizes during the
-// sequence; first resize is 3/4, second is 1/2 (from original).
+// spatial_resize is on, for this low bitrate expect one resize in sequence.
// Resize happens on delta frame. Expect only one key frame (first frame).
-// Disable for msan, see
-// https://code.google.com/p/webrtc/issues/detail?id=5110 for details.
-#if !defined(MEMORY_SANITIZER)
TEST_F(VideoProcessorIntegrationTest, ProcessNoLossSpatialResizeFrameDropVP9) {
config_.networking_config.packet_loss_probability = 0;
// Bitrate and frame rate profile.
@@ -743,20 +718,17 @@ TEST_F(VideoProcessorIntegrationTest, ProcessNoLossSpatialResizeFrameDropVP9) {
rate_profile.num_frames = kNbrFramesLong;
// Codec/network settings.
CodecConfigPars process_settings;
- SetCodecParameters(&process_settings, kVideoCodecVP9, 0.0f, -1,
- 1, false, false, true, true);
+ SetCodecParameters(&process_settings, kVideoCodecVP9, 0.0f, -1, 1, false,
+ false, true, true);
// Metrics for expected quality.
QualityMetrics quality_metrics;
- SetQualityMetrics(&quality_metrics, 25.0, 13.0, 0.70, 0.40);
+ SetQualityMetrics(&quality_metrics, 24.0, 13.0, 0.65, 0.37);
// Metrics for rate control.
RateControlMetrics rc_metrics[1];
- SetRateControlMetrics(rc_metrics, 0, 180, 70, 130, 15, 80, 2, 1);
- ProcessFramesAndVerify(quality_metrics,
- rate_profile,
- process_settings,
+ SetRateControlMetrics(rc_metrics, 0, 228, 70, 160, 15, 80, 1, 1);
+ ProcessFramesAndVerify(quality_metrics, rate_profile, process_settings,
rc_metrics);
}
-#endif
// TODO(marpan): Add temporal layer test for VP9, once changes are in
// vp9 wrapper for this.
@@ -780,9 +752,7 @@ TEST_F(VideoProcessorIntegrationTest, ProcessZeroPacketLoss) {
// Metrics for rate control.
RateControlMetrics rc_metrics[1];
SetRateControlMetrics(rc_metrics, 0, 0, 40, 20, 10, 15, 0, 1);
- ProcessFramesAndVerify(quality_metrics,
- rate_profile,
- process_settings,
+ ProcessFramesAndVerify(quality_metrics, rate_profile, process_settings,
rc_metrics);
}
@@ -804,9 +774,7 @@ TEST_F(VideoProcessorIntegrationTest, Process5PercentPacketLoss) {
// Metrics for rate control.
RateControlMetrics rc_metrics[1];
SetRateControlMetrics(rc_metrics, 0, 0, 40, 20, 10, 15, 0, 1);
- ProcessFramesAndVerify(quality_metrics,
- rate_profile,
- process_settings,
+ ProcessFramesAndVerify(quality_metrics, rate_profile, process_settings,
rc_metrics);
}
@@ -828,9 +796,7 @@ TEST_F(VideoProcessorIntegrationTest, Process10PercentPacketLoss) {
// Metrics for rate control.
RateControlMetrics rc_metrics[1];
SetRateControlMetrics(rc_metrics, 0, 0, 40, 20, 10, 15, 0, 1);
- ProcessFramesAndVerify(quality_metrics,
- rate_profile,
- process_settings,
+ ProcessFramesAndVerify(quality_metrics, rate_profile, process_settings,
rc_metrics);
}
@@ -847,8 +813,13 @@ TEST_F(VideoProcessorIntegrationTest, Process10PercentPacketLoss) {
// low to high to medium. Check that quality and encoder response to the new
// target rate/per-frame bandwidth (for each rate update) is within limits.
// One key frame (first frame only) in sequence.
-TEST_F(VideoProcessorIntegrationTest,
- DISABLED_ON_ANDROID(ProcessNoLossChangeBitRateVP8)) {
+#if defined(WEBRTC_ANDROID)
+#define MAYBE_ProcessNoLossChangeBitRateVP8 \
+ DISABLED_ProcessNoLossChangeBitRateVP8
+#else
+#define MAYBE_ProcessNoLossChangeBitRateVP8 ProcessNoLossChangeBitRateVP8
+#endif
+TEST_F(VideoProcessorIntegrationTest, MAYBE_ProcessNoLossChangeBitRateVP8) {
// Bitrate and frame rate profile.
RateProfile rate_profile;
SetRateProfilePars(&rate_profile, 0, 200, 30, 0);
@@ -868,9 +839,7 @@ TEST_F(VideoProcessorIntegrationTest,
SetRateControlMetrics(rc_metrics, 0, 0, 45, 20, 10, 15, 0, 1);
SetRateControlMetrics(rc_metrics, 1, 0, 0, 25, 20, 10, 0, 0);
SetRateControlMetrics(rc_metrics, 2, 0, 0, 25, 15, 10, 0, 0);
- ProcessFramesAndVerify(quality_metrics,
- rate_profile,
- process_settings,
+ ProcessFramesAndVerify(quality_metrics, rate_profile, process_settings,
rc_metrics);
}
@@ -881,8 +850,15 @@ TEST_F(VideoProcessorIntegrationTest,
// for the rate control metrics can be lower. One key frame (first frame only).
// Note: quality after update should be higher but we currently compute quality
// metrics averaged over whole sequence run.
+#if defined(WEBRTC_ANDROID)
+#define MAYBE_ProcessNoLossChangeFrameRateFrameDropVP8 \
+ DISABLED_ProcessNoLossChangeFrameRateFrameDropVP8
+#else
+#define MAYBE_ProcessNoLossChangeFrameRateFrameDropVP8 \
+ ProcessNoLossChangeFrameRateFrameDropVP8
+#endif
TEST_F(VideoProcessorIntegrationTest,
- DISABLED_ON_ANDROID(ProcessNoLossChangeFrameRateFrameDropVP8)) {
+ MAYBE_ProcessNoLossChangeFrameRateFrameDropVP8) {
config_.networking_config.packet_loss_probability = 0;
// Bitrate and frame rate profile.
RateProfile rate_profile;
@@ -903,16 +879,21 @@ TEST_F(VideoProcessorIntegrationTest,
SetRateControlMetrics(rc_metrics, 0, 40, 20, 75, 15, 60, 0, 1);
SetRateControlMetrics(rc_metrics, 1, 10, 0, 25, 10, 35, 0, 0);
SetRateControlMetrics(rc_metrics, 2, 0, 0, 20, 10, 15, 0, 0);
- ProcessFramesAndVerify(quality_metrics,
- rate_profile,
- process_settings,
+ ProcessFramesAndVerify(quality_metrics, rate_profile, process_settings,
rc_metrics);
}
// Run with no packet loss, at low bitrate. During this time we should've
// resized once. Expect 2 key frames generated (first and one for resize).
+#if defined(WEBRTC_ANDROID)
+#define MAYBE_ProcessNoLossSpatialResizeFrameDropVP8 \
+ DISABLED_ProcessNoLossSpatialResizeFrameDropVP8
+#else
+#define MAYBE_ProcessNoLossSpatialResizeFrameDropVP8 \
+ ProcessNoLossSpatialResizeFrameDropVP8
+#endif
TEST_F(VideoProcessorIntegrationTest,
- DISABLED_ON_ANDROID(ProcessNoLossSpatialResizeFrameDropVP8)) {
+ MAYBE_ProcessNoLossSpatialResizeFrameDropVP8) {
config_.networking_config.packet_loss_probability = 0;
// Bitrate and frame rate profile.
RateProfile rate_profile;
@@ -921,17 +902,15 @@ TEST_F(VideoProcessorIntegrationTest,
rate_profile.num_frames = kNbrFramesLong;
// Codec/network settings.
CodecConfigPars process_settings;
- SetCodecParameters(&process_settings, kVideoCodecVP8, 0.0f, -1,
- 1, false, true, true, true);
+ SetCodecParameters(&process_settings, kVideoCodecVP8, 0.0f, -1, 1, false,
+ true, true, true);
// Metrics for expected quality.
QualityMetrics quality_metrics;
SetQualityMetrics(&quality_metrics, 25.0, 15.0, 0.70, 0.40);
// Metrics for rate control.
RateControlMetrics rc_metrics[1];
SetRateControlMetrics(rc_metrics, 0, 160, 60, 120, 20, 70, 1, 2);
- ProcessFramesAndVerify(quality_metrics,
- rate_profile,
- process_settings,
+ ProcessFramesAndVerify(quality_metrics, rate_profile, process_settings,
rc_metrics);
}
@@ -940,8 +919,13 @@ TEST_F(VideoProcessorIntegrationTest,
// encoding rate mismatch are applied to each layer.
// No dropped frames in this test, and internal spatial resizer is off.
// One key frame (first frame only) in sequence, so no spatial resizing.
-TEST_F(VideoProcessorIntegrationTest,
- DISABLED_ON_ANDROID(ProcessNoLossTemporalLayersVP8)) {
+#if defined(WEBRTC_ANDROID)
+#define MAYBE_ProcessNoLossTemporalLayersVP8 \
+ DISABLED_ProcessNoLossTemporalLayersVP8
+#else
+#define MAYBE_ProcessNoLossTemporalLayersVP8 ProcessNoLossTemporalLayersVP8
+#endif
+TEST_F(VideoProcessorIntegrationTest, MAYBE_ProcessNoLossTemporalLayersVP8) {
config_.networking_config.packet_loss_probability = 0;
// Bitrate and frame rate profile.
RateProfile rate_profile;
@@ -960,9 +944,7 @@ TEST_F(VideoProcessorIntegrationTest,
RateControlMetrics rc_metrics[2];
SetRateControlMetrics(rc_metrics, 0, 0, 20, 30, 10, 10, 0, 1);
SetRateControlMetrics(rc_metrics, 1, 0, 0, 30, 15, 10, 0, 0);
- ProcessFramesAndVerify(quality_metrics,
- rate_profile,
- process_settings,
+ ProcessFramesAndVerify(quality_metrics, rate_profile, process_settings,
rc_metrics);
}
} // namespace webrtc
diff --git a/webrtc/modules/video_coding/codecs/test/videoprocessor_unittest.cc b/webrtc/modules/video_coding/codecs/test/videoprocessor_unittest.cc
index 88b5467f1f..148d8dc74a 100644
--- a/webrtc/modules/video_coding/codecs/test/videoprocessor_unittest.cc
+++ b/webrtc/modules/video_coding/codecs/test/videoprocessor_unittest.cc
@@ -10,10 +10,10 @@
#include "testing/gmock/include/gmock/gmock.h"
#include "testing/gtest/include/gtest/gtest.h"
-#include "webrtc/modules/video_coding/codecs/interface/mock/mock_video_codec_interface.h"
+#include "webrtc/modules/video_coding/include/mock/mock_video_codec_interface.h"
#include "webrtc/modules/video_coding/codecs/test/mock/mock_packet_manipulator.h"
#include "webrtc/modules/video_coding/codecs/test/videoprocessor.h"
-#include "webrtc/modules/video_coding/main/interface/video_coding.h"
+#include "webrtc/modules/video_coding/include/video_coding.h"
#include "webrtc/test/testsupport/mock/mock_frame_reader.h"
#include "webrtc/test/testsupport/mock/mock_frame_writer.h"
#include "webrtc/test/testsupport/packet_reader.h"
@@ -29,7 +29,7 @@ namespace test {
// Very basic testing for VideoProcessor. It's mostly tested by running the
// video_quality_measurement program.
-class VideoProcessorTest: public testing::Test {
+class VideoProcessorTest : public testing::Test {
protected:
MockVideoEncoder encoder_mock_;
MockVideoDecoder decoder_mock_;
@@ -53,44 +53,34 @@ class VideoProcessorTest: public testing::Test {
void TearDown() {}
void ExpectInit() {
- EXPECT_CALL(encoder_mock_, InitEncode(_, _, _))
- .Times(1);
+ EXPECT_CALL(encoder_mock_, InitEncode(_, _, _)).Times(1);
EXPECT_CALL(encoder_mock_, RegisterEncodeCompleteCallback(_))
- .Times(AtLeast(1));
- EXPECT_CALL(decoder_mock_, InitDecode(_, _))
- .Times(1);
+ .Times(AtLeast(1));
+ EXPECT_CALL(decoder_mock_, InitDecode(_, _)).Times(1);
EXPECT_CALL(decoder_mock_, RegisterDecodeCompleteCallback(_))
- .Times(AtLeast(1));
- EXPECT_CALL(frame_reader_mock_, NumberOfFrames())
- .WillOnce(Return(1));
- EXPECT_CALL(frame_reader_mock_, FrameLength())
- .WillOnce(Return(152064));
+ .Times(AtLeast(1));
+ EXPECT_CALL(frame_reader_mock_, NumberOfFrames()).WillOnce(Return(1));
+ EXPECT_CALL(frame_reader_mock_, FrameLength()).WillOnce(Return(152064));
}
};
TEST_F(VideoProcessorTest, Init) {
ExpectInit();
- VideoProcessorImpl video_processor(&encoder_mock_, &decoder_mock_,
- &frame_reader_mock_,
- &frame_writer_mock_,
- &packet_manipulator_mock_, config_,
- &stats_);
+ VideoProcessorImpl video_processor(
+ &encoder_mock_, &decoder_mock_, &frame_reader_mock_, &frame_writer_mock_,
+ &packet_manipulator_mock_, config_, &stats_);
ASSERT_TRUE(video_processor.Init());
}
TEST_F(VideoProcessorTest, ProcessFrame) {
ExpectInit();
- EXPECT_CALL(encoder_mock_, Encode(_, _, _))
- .Times(1);
- EXPECT_CALL(frame_reader_mock_, ReadFrame(_))
- .WillOnce(Return(true));
+ EXPECT_CALL(encoder_mock_, Encode(_, _, _)).Times(1);
+ EXPECT_CALL(frame_reader_mock_, ReadFrame(_)).WillOnce(Return(true));
// Since we don't return any callback from the mock, the decoder will not
// be more than initialized...
- VideoProcessorImpl video_processor(&encoder_mock_, &decoder_mock_,
- &frame_reader_mock_,
- &frame_writer_mock_,
- &packet_manipulator_mock_, config_,
- &stats_);
+ VideoProcessorImpl video_processor(
+ &encoder_mock_, &decoder_mock_, &frame_reader_mock_, &frame_writer_mock_,
+ &packet_manipulator_mock_, config_, &stats_);
ASSERT_TRUE(video_processor.Init());
video_processor.ProcessFrame(0);
}
diff --git a/webrtc/modules/video_coding/codecs/tools/video_quality_measurement.cc b/webrtc/modules/video_coding/codecs/tools/video_quality_measurement.cc
index 22be5a83cc..37fad483f7 100644
--- a/webrtc/modules/video_coding/codecs/tools/video_quality_measurement.cc
+++ b/webrtc/modules/video_coding/codecs/tools/video_quality_measurement.cc
@@ -16,7 +16,7 @@
#include <sys/stat.h> // To check for directory existence.
#ifndef S_ISDIR // Not defined in stat.h on Windows.
-#define S_ISDIR(mode) (((mode) & S_IFMT) == S_IFDIR)
+#define S_ISDIR(mode) (((mode)&S_IFMT) == S_IFDIR)
#endif
#include "gflags/gflags.h"
@@ -26,7 +26,7 @@
#include "webrtc/modules/video_coding/codecs/test/stats.h"
#include "webrtc/modules/video_coding/codecs/test/videoprocessor.h"
#include "webrtc/modules/video_coding/codecs/vp8/include/vp8.h"
-#include "webrtc/modules/video_coding/main/interface/video_coding.h"
+#include "webrtc/modules/video_coding/include/video_coding.h"
#include "webrtc/system_wrappers/include/trace.h"
#include "webrtc/test/testsupport/frame_reader.h"
#include "webrtc/test/testsupport/frame_writer.h"
@@ -34,68 +34,102 @@
#include "webrtc/test/testsupport/packet_reader.h"
DEFINE_string(test_name, "Quality test", "The name of the test to run. ");
-DEFINE_string(test_description, "", "A more detailed description about what "
+DEFINE_string(test_description,
+ "",
+ "A more detailed description about what "
"the current test is about.");
-DEFINE_string(input_filename, "", "Input file. "
+DEFINE_string(input_filename,
+ "",
+ "Input file. "
"The source video file to be encoded and decoded. Must be in "
".yuv format");
DEFINE_int32(width, -1, "Width in pixels of the frames in the input file.");
DEFINE_int32(height, -1, "Height in pixels of the frames in the input file.");
-DEFINE_int32(framerate, 30, "Frame rate of the input file, in FPS "
+DEFINE_int32(framerate,
+ 30,
+ "Frame rate of the input file, in FPS "
"(frames-per-second). ");
-DEFINE_string(output_dir, ".", "Output directory. "
+DEFINE_string(output_dir,
+ ".",
+ "Output directory. "
"The directory where the output file will be put. Must already "
"exist.");
-DEFINE_bool(use_single_core, false, "Force using a single core. If set to "
+DEFINE_bool(use_single_core,
+ false,
+ "Force using a single core. If set to "
"true, only one core will be used for processing. Using a single "
"core is necessary to get a deterministic behavior for the"
"encoded frames - using multiple cores will produce different "
"encoded frames since multiple cores are competing to consume the "
"byte budget for each frame in parallel. If set to false, "
"the maximum detected number of cores will be used. ");
-DEFINE_bool(disable_fixed_random_seed , false, "Set this flag to disable the"
+DEFINE_bool(disable_fixed_random_seed,
+ false,
+ "Set this flag to disable the"
"usage of a fixed random seed for the random generator used "
"for packet loss. Disabling this will cause consecutive runs "
"loose packets at different locations, which is bad for "
"reproducibility.");
-DEFINE_string(output_filename, "", "Output file. "
+DEFINE_string(output_filename,
+ "",
+ "Output file. "
"The name of the output video file resulting of the processing "
"of the source file. By default this is the same name as the "
"input file with '_out' appended before the extension.");
DEFINE_int32(bitrate, 500, "Bit rate in kilobits/second.");
-DEFINE_int32(keyframe_interval, 0, "Forces a keyframe every Nth frame. "
+DEFINE_int32(keyframe_interval,
+ 0,
+ "Forces a keyframe every Nth frame. "
"0 means the encoder decides when to insert keyframes. Note that "
"the encoder may create a keyframe in other locations in addition "
"to the interval that is set using this parameter.");
-DEFINE_int32(temporal_layers, 0, "The number of temporal layers to use "
+DEFINE_int32(temporal_layers,
+ 0,
+ "The number of temporal layers to use "
"(VP8 specific codec setting). Must be 0-4.");
-DEFINE_int32(packet_size, 1500, "Simulated network packet size in bytes (MTU). "
+DEFINE_int32(packet_size,
+ 1500,
+ "Simulated network packet size in bytes (MTU). "
"Used for packet loss simulation.");
-DEFINE_int32(max_payload_size, 1440, "Max payload size in bytes for the "
+DEFINE_int32(max_payload_size,
+ 1440,
+ "Max payload size in bytes for the "
"encoder.");
-DEFINE_string(packet_loss_mode, "uniform", "Packet loss mode. Two different "
+DEFINE_string(packet_loss_mode,
+ "uniform",
+ "Packet loss mode. Two different "
"packet loss models are supported: uniform or burst. This "
"setting has no effect unless packet_loss_rate is >0. ");
-DEFINE_double(packet_loss_probability, 0.0, "Packet loss probability. A value "
+DEFINE_double(packet_loss_probability,
+ 0.0,
+ "Packet loss probability. A value "
"between 0.0 and 1.0 that defines the probability of a packet "
"being lost. 0.1 means 10% and so on.");
-DEFINE_int32(packet_loss_burst_length, 1, "Packet loss burst length. Defines "
+DEFINE_int32(packet_loss_burst_length,
+ 1,
+ "Packet loss burst length. Defines "
"how many packets will be lost in a burst when a packet has been "
"decided to be lost. Must be >=1.");
-DEFINE_bool(csv, false, "CSV output. Enabling this will output all frame "
+DEFINE_bool(csv,
+ false,
+ "CSV output. Enabling this will output all frame "
"statistics at the end of execution. Recommended to run combined "
"with --noverbose to avoid mixing output.");
-DEFINE_bool(python, false, "Python output. Enabling this will output all frame "
+DEFINE_bool(python,
+ false,
+ "Python output. Enabling this will output all frame "
"statistics as a Python script at the end of execution. "
"Recommended to run combine with --noverbose to avoid mixing "
"output.");
-DEFINE_bool(verbose, true, "Verbose mode. Prints a lot of debugging info. "
+DEFINE_bool(verbose,
+ true,
+ "Verbose mode. Prints a lot of debugging info. "
"Suitable for tracking progress but not for capturing output. "
"Disable with --noverbose flag.");
// Custom log method that only prints if the verbose flag is given.
// Supports all the standard printf parameters and formatting (just forwarded).
-int Log(const char *format, ...) {
+int Log(const char* format, ...) {
int result = 0;
if (FLAGS_verbose) {
va_list args;
@@ -132,9 +166,9 @@ int HandleCommandLineFlags(webrtc::test::TestConfig* config) {
// Verify the output dir exists.
struct stat dir_info;
if (!(stat(FLAGS_output_dir.c_str(), &dir_info) == 0 &&
- S_ISDIR(dir_info.st_mode))) {
+ S_ISDIR(dir_info.st_mode))) {
fprintf(stderr, "Cannot find output directory: %s\n",
- FLAGS_output_dir.c_str());
+ FLAGS_output_dir.c_str());
return 3;
}
config->output_dir = FLAGS_output_dir;
@@ -148,16 +182,16 @@ int HandleCommandLineFlags(webrtc::test::TestConfig* config) {
startIndex = 0;
}
FLAGS_output_filename =
- FLAGS_input_filename.substr(startIndex,
- FLAGS_input_filename.find_last_of(".")
- - startIndex) + "_out.yuv";
+ FLAGS_input_filename.substr(
+ startIndex, FLAGS_input_filename.find_last_of(".") - startIndex) +
+ "_out.yuv";
}
// Verify output file can be written.
if (FLAGS_output_dir == ".") {
config->output_filename = FLAGS_output_filename;
} else {
- config->output_filename = FLAGS_output_dir + "/"+ FLAGS_output_filename;
+ config->output_filename = FLAGS_output_dir + "/" + FLAGS_output_filename;
}
test_file = fopen(config->output_filename.c_str(), "wb");
if (test_file == NULL) {
@@ -232,27 +266,32 @@ int HandleCommandLineFlags(webrtc::test::TestConfig* config) {
// Check packet loss settings
if (FLAGS_packet_loss_mode != "uniform" &&
FLAGS_packet_loss_mode != "burst") {
- fprintf(stderr, "Unsupported packet loss mode, must be 'uniform' or "
+ fprintf(stderr,
+ "Unsupported packet loss mode, must be 'uniform' or "
"'burst'\n.");
return 10;
}
config->networking_config.packet_loss_mode = webrtc::test::kUniform;
if (FLAGS_packet_loss_mode == "burst") {
- config->networking_config.packet_loss_mode = webrtc::test::kBurst;
+ config->networking_config.packet_loss_mode = webrtc::test::kBurst;
}
if (FLAGS_packet_loss_probability < 0.0 ||
FLAGS_packet_loss_probability > 1.0) {
- fprintf(stderr, "Invalid packet loss probability. Must be 0.0 - 1.0, "
- "was: %f\n", FLAGS_packet_loss_probability);
+ fprintf(stderr,
+ "Invalid packet loss probability. Must be 0.0 - 1.0, "
+ "was: %f\n",
+ FLAGS_packet_loss_probability);
return 11;
}
config->networking_config.packet_loss_probability =
FLAGS_packet_loss_probability;
if (FLAGS_packet_loss_burst_length < 1) {
- fprintf(stderr, "Invalid packet loss burst length, must be >=1, "
- "was: %d\n", FLAGS_packet_loss_burst_length);
+ fprintf(stderr,
+ "Invalid packet loss burst length, must be >=1, "
+ "was: %d\n",
+ FLAGS_packet_loss_burst_length);
return 12;
}
config->networking_config.packet_loss_burst_length =
@@ -264,10 +303,9 @@ int HandleCommandLineFlags(webrtc::test::TestConfig* config) {
void CalculateSsimVideoMetrics(webrtc::test::TestConfig* config,
webrtc::test::QualityMetricsResult* result) {
Log("Calculating SSIM...\n");
- I420SSIMFromFiles(config->input_filename.c_str(),
- config->output_filename.c_str(),
- config->codec_settings->width,
- config->codec_settings->height, result);
+ I420SSIMFromFiles(
+ config->input_filename.c_str(), config->output_filename.c_str(),
+ config->codec_settings->width, config->codec_settings->height, result);
Log(" Average: %3.2f\n", result->average);
Log(" Min : %3.2f (frame %d)\n", result->min, result->min_frame_number);
Log(" Max : %3.2f (frame %d)\n", result->max, result->max_frame_number);
@@ -276,10 +314,9 @@ void CalculateSsimVideoMetrics(webrtc::test::TestConfig* config,
void CalculatePsnrVideoMetrics(webrtc::test::TestConfig* config,
webrtc::test::QualityMetricsResult* result) {
Log("Calculating PSNR...\n");
- I420PSNRFromFiles(config->input_filename.c_str(),
- config->output_filename.c_str(),
- config->codec_settings->width,
- config->codec_settings->height, result);
+ I420PSNRFromFiles(
+ config->input_filename.c_str(), config->output_filename.c_str(),
+ config->codec_settings->width, config->codec_settings->height, result);
Log(" Average: %3.2f\n", result->average);
Log(" Min : %3.2f (frame %d)\n", result->min, result->min_frame_number);
Log(" Max : %3.2f (frame %d)\n", result->max, result->max_frame_number);
@@ -309,9 +346,11 @@ void PrintConfigurationSummary(const webrtc::test::TestConfig& config) {
void PrintCsvOutput(const webrtc::test::Stats& stats,
const webrtc::test::QualityMetricsResult& ssim_result,
const webrtc::test::QualityMetricsResult& psnr_result) {
- Log("\nCSV output (recommended to run with --noverbose to skip the "
- "above output)\n");
- printf("frame_number encoding_successful decoding_successful "
+ Log(
+ "\nCSV output (recommended to run with --noverbose to skip the "
+ "above output)\n");
+ printf(
+ "frame_number encoding_successful decoding_successful "
"encode_return_code decode_return_code "
"encode_time_in_us decode_time_in_us "
"bit_rate_in_kbps encoded_frame_length_in_bytes frame_type "
@@ -322,22 +361,13 @@ void PrintCsvOutput(const webrtc::test::Stats& stats,
const webrtc::test::FrameStatistic& f = stats.stats_[i];
const webrtc::test::FrameResult& ssim = ssim_result.frames[i];
const webrtc::test::FrameResult& psnr = psnr_result.frames[i];
- printf("%4d, %d, %d, %2d, %2d, %6d, %6d, %5d, %7" PRIuS ", %d, %2d, %2"
- PRIuS ", %5.3f, %5.2f\n",
- f.frame_number,
- f.encoding_successful,
- f.decoding_successful,
- f.encode_return_code,
- f.decode_return_code,
- f.encode_time_in_us,
- f.decode_time_in_us,
- f.bit_rate_in_kbps,
- f.encoded_frame_length_in_bytes,
- f.frame_type,
- f.packets_dropped,
- f.total_packets,
- ssim.value,
- psnr.value);
+ printf("%4d, %d, %d, %2d, %2d, %6d, %6d, %5d, %7" PRIuS
+ ", %d, %2d, %2" PRIuS ", %5.3f, %5.2f\n",
+ f.frame_number, f.encoding_successful, f.decoding_successful,
+ f.encode_return_code, f.decode_return_code, f.encode_time_in_us,
+ f.decode_time_in_us, f.bit_rate_in_kbps,
+ f.encoded_frame_length_in_bytes, f.frame_type, f.packets_dropped,
+ f.total_packets, ssim.value, psnr.value);
}
}
@@ -345,91 +375,85 @@ void PrintPythonOutput(const webrtc::test::TestConfig& config,
const webrtc::test::Stats& stats,
const webrtc::test::QualityMetricsResult& ssim_result,
const webrtc::test::QualityMetricsResult& psnr_result) {
- Log("\nPython output (recommended to run with --noverbose to skip the "
- "above output)\n");
- printf("test_configuration = ["
- "{'name': 'name', 'value': '%s'},\n"
- "{'name': 'description', 'value': '%s'},\n"
- "{'name': 'test_number', 'value': '%d'},\n"
- "{'name': 'input_filename', 'value': '%s'},\n"
- "{'name': 'output_filename', 'value': '%s'},\n"
- "{'name': 'output_dir', 'value': '%s'},\n"
- "{'name': 'packet_size_in_bytes', 'value': '%" PRIuS "'},\n"
- "{'name': 'max_payload_size_in_bytes', 'value': '%" PRIuS "'},\n"
- "{'name': 'packet_loss_mode', 'value': '%s'},\n"
- "{'name': 'packet_loss_probability', 'value': '%f'},\n"
- "{'name': 'packet_loss_burst_length', 'value': '%d'},\n"
- "{'name': 'exclude_frame_types', 'value': '%s'},\n"
- "{'name': 'frame_length_in_bytes', 'value': '%" PRIuS "'},\n"
- "{'name': 'use_single_core', 'value': '%s'},\n"
- "{'name': 'keyframe_interval;', 'value': '%d'},\n"
- "{'name': 'video_codec_type', 'value': '%s'},\n"
- "{'name': 'width', 'value': '%d'},\n"
- "{'name': 'height', 'value': '%d'},\n"
- "{'name': 'bit_rate_in_kbps', 'value': '%d'},\n"
- "]\n",
- config.name.c_str(),
- config.description.c_str(),
- config.test_number,
- config.input_filename.c_str(),
- config.output_filename.c_str(),
- config.output_dir.c_str(),
- config.networking_config.packet_size_in_bytes,
- config.networking_config.max_payload_size_in_bytes,
- PacketLossModeToStr(config.networking_config.packet_loss_mode),
- config.networking_config.packet_loss_probability,
- config.networking_config.packet_loss_burst_length,
- ExcludeFrameTypesToStr(config.exclude_frame_types),
- config.frame_length_in_bytes,
- config.use_single_core ? "True " : "False",
- config.keyframe_interval,
- webrtc::test::VideoCodecTypeToStr(config.codec_settings->codecType),
- config.codec_settings->width,
- config.codec_settings->height,
- config.codec_settings->startBitrate);
- printf("frame_data_types = {"
- "'frame_number': ('number', 'Frame number'),\n"
- "'encoding_successful': ('boolean', 'Encoding successful?'),\n"
- "'decoding_successful': ('boolean', 'Decoding successful?'),\n"
- "'encode_time': ('number', 'Encode time (us)'),\n"
- "'decode_time': ('number', 'Decode time (us)'),\n"
- "'encode_return_code': ('number', 'Encode return code'),\n"
- "'decode_return_code': ('number', 'Decode return code'),\n"
- "'bit_rate': ('number', 'Bit rate (kbps)'),\n"
- "'encoded_frame_length': "
- "('number', 'Encoded frame length (bytes)'),\n"
- "'frame_type': ('string', 'Frame type'),\n"
- "'packets_dropped': ('number', 'Packets dropped'),\n"
- "'total_packets': ('number', 'Total packets'),\n"
- "'ssim': ('number', 'SSIM'),\n"
- "'psnr': ('number', 'PSNR (dB)'),\n"
- "}\n");
+ Log(
+ "\nPython output (recommended to run with --noverbose to skip the "
+ "above output)\n");
+ printf(
+ "test_configuration = ["
+ "{'name': 'name', 'value': '%s'},\n"
+ "{'name': 'description', 'value': '%s'},\n"
+ "{'name': 'test_number', 'value': '%d'},\n"
+ "{'name': 'input_filename', 'value': '%s'},\n"
+ "{'name': 'output_filename', 'value': '%s'},\n"
+ "{'name': 'output_dir', 'value': '%s'},\n"
+ "{'name': 'packet_size_in_bytes', 'value': '%" PRIuS
+ "'},\n"
+ "{'name': 'max_payload_size_in_bytes', 'value': '%" PRIuS
+ "'},\n"
+ "{'name': 'packet_loss_mode', 'value': '%s'},\n"
+ "{'name': 'packet_loss_probability', 'value': '%f'},\n"
+ "{'name': 'packet_loss_burst_length', 'value': '%d'},\n"
+ "{'name': 'exclude_frame_types', 'value': '%s'},\n"
+ "{'name': 'frame_length_in_bytes', 'value': '%" PRIuS
+ "'},\n"
+ "{'name': 'use_single_core', 'value': '%s'},\n"
+ "{'name': 'keyframe_interval;', 'value': '%d'},\n"
+ "{'name': 'video_codec_type', 'value': '%s'},\n"
+ "{'name': 'width', 'value': '%d'},\n"
+ "{'name': 'height', 'value': '%d'},\n"
+ "{'name': 'bit_rate_in_kbps', 'value': '%d'},\n"
+ "]\n",
+ config.name.c_str(), config.description.c_str(), config.test_number,
+ config.input_filename.c_str(), config.output_filename.c_str(),
+ config.output_dir.c_str(), config.networking_config.packet_size_in_bytes,
+ config.networking_config.max_payload_size_in_bytes,
+ PacketLossModeToStr(config.networking_config.packet_loss_mode),
+ config.networking_config.packet_loss_probability,
+ config.networking_config.packet_loss_burst_length,
+ ExcludeFrameTypesToStr(config.exclude_frame_types),
+ config.frame_length_in_bytes, config.use_single_core ? "True " : "False",
+ config.keyframe_interval,
+ webrtc::test::VideoCodecTypeToStr(config.codec_settings->codecType),
+ config.codec_settings->width, config.codec_settings->height,
+ config.codec_settings->startBitrate);
+ printf(
+ "frame_data_types = {"
+ "'frame_number': ('number', 'Frame number'),\n"
+ "'encoding_successful': ('boolean', 'Encoding successful?'),\n"
+ "'decoding_successful': ('boolean', 'Decoding successful?'),\n"
+ "'encode_time': ('number', 'Encode time (us)'),\n"
+ "'decode_time': ('number', 'Decode time (us)'),\n"
+ "'encode_return_code': ('number', 'Encode return code'),\n"
+ "'decode_return_code': ('number', 'Decode return code'),\n"
+ "'bit_rate': ('number', 'Bit rate (kbps)'),\n"
+ "'encoded_frame_length': "
+ "('number', 'Encoded frame length (bytes)'),\n"
+ "'frame_type': ('string', 'Frame type'),\n"
+ "'packets_dropped': ('number', 'Packets dropped'),\n"
+ "'total_packets': ('number', 'Total packets'),\n"
+ "'ssim': ('number', 'SSIM'),\n"
+ "'psnr': ('number', 'PSNR (dB)'),\n"
+ "}\n");
printf("frame_data = [");
for (unsigned int i = 0; i < stats.stats_.size(); ++i) {
const webrtc::test::FrameStatistic& f = stats.stats_[i];
const webrtc::test::FrameResult& ssim = ssim_result.frames[i];
const webrtc::test::FrameResult& psnr = psnr_result.frames[i];
- printf("{'frame_number': %d, "
- "'encoding_successful': %s, 'decoding_successful': %s, "
- "'encode_time': %d, 'decode_time': %d, "
- "'encode_return_code': %d, 'decode_return_code': %d, "
- "'bit_rate': %d, 'encoded_frame_length': %" PRIuS ", "
- "'frame_type': %s, 'packets_dropped': %d, "
- "'total_packets': %" PRIuS ", 'ssim': %f, 'psnr': %f},\n",
- f.frame_number,
- f.encoding_successful ? "True " : "False",
- f.decoding_successful ? "True " : "False",
- f.encode_time_in_us,
- f.decode_time_in_us,
- f.encode_return_code,
- f.decode_return_code,
- f.bit_rate_in_kbps,
- f.encoded_frame_length_in_bytes,
- f.frame_type == webrtc::kVideoFrameDelta ? "'Delta'" : "'Other'",
- f.packets_dropped,
- f.total_packets,
- ssim.value,
- psnr.value);
+ printf(
+ "{'frame_number': %d, "
+ "'encoding_successful': %s, 'decoding_successful': %s, "
+ "'encode_time': %d, 'decode_time': %d, "
+ "'encode_return_code': %d, 'decode_return_code': %d, "
+ "'bit_rate': %d, 'encoded_frame_length': %" PRIuS
+ ", "
+ "'frame_type': %s, 'packets_dropped': %d, "
+ "'total_packets': %" PRIuS ", 'ssim': %f, 'psnr': %f},\n",
+ f.frame_number, f.encoding_successful ? "True " : "False",
+ f.decoding_successful ? "True " : "False", f.encode_time_in_us,
+ f.decode_time_in_us, f.encode_return_code, f.decode_return_code,
+ f.bit_rate_in_kbps, f.encoded_frame_length_in_bytes,
+ f.frame_type == webrtc::kVideoFrameDelta ? "'Delta'" : "'Other'",
+ f.packets_dropped, f.total_packets, ssim.value, psnr.value);
}
printf("]\n");
}
@@ -438,10 +462,14 @@ void PrintPythonOutput(const webrtc::test::TestConfig& config,
// The input file must be in YUV format.
int main(int argc, char* argv[]) {
std::string program_name = argv[0];
- std::string usage = "Quality test application for video comparisons.\n"
- "Run " + program_name + " --helpshort for usage.\n"
- "Example usage:\n" + program_name +
- " --input_filename=filename.yuv --width=352 --height=288\n";
+ std::string usage =
+ "Quality test application for video comparisons.\n"
+ "Run " +
+ program_name +
+ " --helpshort for usage.\n"
+ "Example usage:\n" +
+ program_name +
+ " --input_filename=filename.yuv --width=352 --height=288\n";
google::SetUsageMessage(usage);
google::ParseCommandLineFlags(&argc, &argv, true);
@@ -478,10 +506,8 @@ int main(int argc, char* argv[]) {
packet_manipulator.InitializeRandomSeed(time(NULL));
}
webrtc::test::VideoProcessor* processor =
- new webrtc::test::VideoProcessorImpl(encoder, decoder,
- &frame_reader,
- &frame_writer,
- &packet_manipulator,
+ new webrtc::test::VideoProcessorImpl(encoder, decoder, &frame_reader,
+ &frame_writer, &packet_manipulator,
config, &stats);
processor->Init();
diff --git a/webrtc/modules/video_coding/codecs/vp8/default_temporal_layers.cc b/webrtc/modules/video_coding/codecs/vp8/default_temporal_layers.cc
index da6008ba3d..9226fa774c 100644
--- a/webrtc/modules/video_coding/codecs/vp8/default_temporal_layers.cc
+++ b/webrtc/modules/video_coding/codecs/vp8/default_temporal_layers.cc
@@ -13,8 +13,8 @@
#include <stdlib.h>
#include <string.h>
-#include "webrtc/modules/interface/module_common_types.h"
-#include "webrtc/modules/video_coding/codecs/interface/video_codec_interface.h"
+#include "webrtc/modules/include/module_common_types.h"
+#include "webrtc/modules/video_coding/include/video_codec_interface.h"
#include "webrtc/modules/video_coding/codecs/vp8/include/vp8_common_types.h"
#include "vpx/vpx_encoder.h"
@@ -41,7 +41,7 @@ int DefaultTemporalLayers::CurrentLayerId() const {
int index = pattern_idx_ % temporal_ids_length_;
assert(index >= 0);
return temporal_ids_[index];
- }
+}
bool DefaultTemporalLayers::ConfigureBitrates(int bitrateKbit,
int max_bitrate_kbit,
@@ -56,8 +56,7 @@ bool DefaultTemporalLayers::ConfigureBitrates(int bitrateKbit,
cfg->ts_periodicity = temporal_ids_length_;
cfg->ts_target_bitrate[0] = bitrateKbit;
cfg->ts_rate_decimator[0] = 1;
- memcpy(cfg->ts_layer_id,
- temporal_ids_,
+ memcpy(cfg->ts_layer_id, temporal_ids_,
sizeof(unsigned int) * temporal_ids_length_);
temporal_pattern_length_ = 1;
temporal_pattern_[0] = kTemporalUpdateLastRefAll;
@@ -74,8 +73,7 @@ bool DefaultTemporalLayers::ConfigureBitrates(int bitrateKbit,
cfg->ts_target_bitrate[1] = bitrateKbit;
cfg->ts_rate_decimator[0] = 2;
cfg->ts_rate_decimator[1] = 1;
- memcpy(cfg->ts_layer_id,
- temporal_ids_,
+ memcpy(cfg->ts_layer_id, temporal_ids_,
sizeof(unsigned int) * temporal_ids_length_);
temporal_pattern_length_ = 8;
temporal_pattern_[0] = kTemporalUpdateLastAndGoldenRefAltRef;
@@ -103,8 +101,7 @@ bool DefaultTemporalLayers::ConfigureBitrates(int bitrateKbit,
cfg->ts_rate_decimator[0] = 4;
cfg->ts_rate_decimator[1] = 2;
cfg->ts_rate_decimator[2] = 1;
- memcpy(cfg->ts_layer_id,
- temporal_ids_,
+ memcpy(cfg->ts_layer_id, temporal_ids_,
sizeof(unsigned int) * temporal_ids_length_);
temporal_pattern_length_ = 8;
temporal_pattern_[0] = kTemporalUpdateLastAndGoldenRefAltRef;
@@ -138,8 +135,7 @@ bool DefaultTemporalLayers::ConfigureBitrates(int bitrateKbit,
cfg->ts_rate_decimator[1] = 4;
cfg->ts_rate_decimator[2] = 2;
cfg->ts_rate_decimator[3] = 1;
- memcpy(cfg->ts_layer_id,
- temporal_ids_,
+ memcpy(cfg->ts_layer_id, temporal_ids_,
sizeof(unsigned int) * temporal_ids_length_);
temporal_pattern_length_ = 16;
temporal_pattern_[0] = kTemporalUpdateLast;
@@ -243,7 +239,7 @@ int DefaultTemporalLayers::EncodeFlags(uint32_t timestamp) {
void DefaultTemporalLayers::PopulateCodecSpecific(
bool base_layer_sync,
- CodecSpecificInfoVP8 *vp8_info,
+ CodecSpecificInfoVP8* vp8_info,
uint32_t timestamp) {
assert(number_of_temporal_layers_ > 0);
assert(0 < temporal_ids_length_);
@@ -254,8 +250,8 @@ void DefaultTemporalLayers::PopulateCodecSpecific(
vp8_info->tl0PicIdx = kNoTl0PicIdx;
} else {
if (base_layer_sync) {
- vp8_info->temporalIdx = 0;
- vp8_info->layerSync = true;
+ vp8_info->temporalIdx = 0;
+ vp8_info->layerSync = true;
} else {
vp8_info->temporalIdx = CurrentLayerId();
TemporalReferences temporal_reference =
@@ -267,7 +263,7 @@ void DefaultTemporalLayers::PopulateCodecSpecific(
kTemporalUpdateGoldenWithoutDependencyRefAltRef ||
temporal_reference == kTemporalUpdateNoneNoRefGoldenRefAltRef ||
(temporal_reference == kTemporalUpdateNone &&
- number_of_temporal_layers_ == 4)) {
+ number_of_temporal_layers_ == 4)) {
vp8_info->layerSync = true;
} else {
vp8_info->layerSync = false;
diff --git a/webrtc/modules/video_coding/codecs/vp8/default_temporal_layers_unittest.cc b/webrtc/modules/video_coding/codecs/vp8/default_temporal_layers_unittest.cc
index 34121cbcf6..461ba69a72 100644
--- a/webrtc/modules/video_coding/codecs/vp8/default_temporal_layers_unittest.cc
+++ b/webrtc/modules/video_coding/codecs/vp8/default_temporal_layers_unittest.cc
@@ -8,9 +8,8 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-
#include "testing/gtest/include/gtest/gtest.h"
-#include "webrtc/modules/video_coding/codecs/interface/video_codec_interface.h"
+#include "webrtc/modules/video_coding/include/video_codec_interface.h"
#include "webrtc/modules/video_coding/codecs/vp8/default_temporal_layers.h"
#include "vpx/vpx_encoder.h"
@@ -19,47 +18,36 @@
namespace webrtc {
enum {
- kTemporalUpdateLast = VP8_EFLAG_NO_UPD_GF |
- VP8_EFLAG_NO_UPD_ARF |
+ kTemporalUpdateLast = VP8_EFLAG_NO_UPD_GF | VP8_EFLAG_NO_UPD_ARF |
VP8_EFLAG_NO_REF_GF |
VP8_EFLAG_NO_REF_ARF,
- kTemporalUpdateGoldenWithoutDependency = VP8_EFLAG_NO_REF_GF |
- VP8_EFLAG_NO_REF_ARF |
- VP8_EFLAG_NO_UPD_ARF |
- VP8_EFLAG_NO_UPD_LAST,
- kTemporalUpdateGolden = VP8_EFLAG_NO_REF_ARF |
- VP8_EFLAG_NO_UPD_ARF |
- VP8_EFLAG_NO_UPD_LAST,
- kTemporalUpdateAltrefWithoutDependency = VP8_EFLAG_NO_REF_ARF |
- VP8_EFLAG_NO_REF_GF |
- VP8_EFLAG_NO_UPD_GF |
- VP8_EFLAG_NO_UPD_LAST,
- kTemporalUpdateAltref = VP8_EFLAG_NO_UPD_GF |
- VP8_EFLAG_NO_UPD_LAST,
- kTemporalUpdateNone = VP8_EFLAG_NO_UPD_GF |
- VP8_EFLAG_NO_UPD_ARF |
+ kTemporalUpdateGoldenWithoutDependency =
+ VP8_EFLAG_NO_REF_GF | VP8_EFLAG_NO_REF_ARF | VP8_EFLAG_NO_UPD_ARF |
+ VP8_EFLAG_NO_UPD_LAST,
+ kTemporalUpdateGolden =
+ VP8_EFLAG_NO_REF_ARF | VP8_EFLAG_NO_UPD_ARF | VP8_EFLAG_NO_UPD_LAST,
+ kTemporalUpdateAltrefWithoutDependency =
+ VP8_EFLAG_NO_REF_ARF | VP8_EFLAG_NO_REF_GF | VP8_EFLAG_NO_UPD_GF |
+ VP8_EFLAG_NO_UPD_LAST,
+ kTemporalUpdateAltref = VP8_EFLAG_NO_UPD_GF | VP8_EFLAG_NO_UPD_LAST,
+ kTemporalUpdateNone = VP8_EFLAG_NO_UPD_GF | VP8_EFLAG_NO_UPD_ARF |
VP8_EFLAG_NO_UPD_LAST |
VP8_EFLAG_NO_UPD_ENTROPY,
- kTemporalUpdateNoneNoRefAltRef = VP8_EFLAG_NO_REF_ARF |
- VP8_EFLAG_NO_UPD_GF |
+ kTemporalUpdateNoneNoRefAltRef = VP8_EFLAG_NO_REF_ARF | VP8_EFLAG_NO_UPD_GF |
VP8_EFLAG_NO_UPD_ARF |
VP8_EFLAG_NO_UPD_LAST |
VP8_EFLAG_NO_UPD_ENTROPY,
- kTemporalUpdateNoneNoRefGolden = VP8_EFLAG_NO_REF_GF |
- VP8_EFLAG_NO_UPD_GF |
+ kTemporalUpdateNoneNoRefGolden = VP8_EFLAG_NO_REF_GF | VP8_EFLAG_NO_UPD_GF |
VP8_EFLAG_NO_UPD_ARF |
VP8_EFLAG_NO_UPD_LAST |
VP8_EFLAG_NO_UPD_ENTROPY,
- kTemporalUpdateGoldenWithoutDependencyRefAltRef = VP8_EFLAG_NO_REF_GF |
- VP8_EFLAG_NO_UPD_ARF |
- VP8_EFLAG_NO_UPD_LAST,
- kTemporalUpdateGoldenRefAltRef = VP8_EFLAG_NO_UPD_ARF |
- VP8_EFLAG_NO_UPD_LAST,
- kTemporalUpdateLastRefAltRef = VP8_EFLAG_NO_UPD_GF |
- VP8_EFLAG_NO_UPD_ARF |
- VP8_EFLAG_NO_REF_GF,
- kTemporalUpdateLastAndGoldenRefAltRef = VP8_EFLAG_NO_UPD_ARF |
- VP8_EFLAG_NO_REF_GF,
+ kTemporalUpdateGoldenWithoutDependencyRefAltRef =
+ VP8_EFLAG_NO_REF_GF | VP8_EFLAG_NO_UPD_ARF | VP8_EFLAG_NO_UPD_LAST,
+ kTemporalUpdateGoldenRefAltRef = VP8_EFLAG_NO_UPD_ARF | VP8_EFLAG_NO_UPD_LAST,
+ kTemporalUpdateLastRefAltRef =
+ VP8_EFLAG_NO_UPD_GF | VP8_EFLAG_NO_UPD_ARF | VP8_EFLAG_NO_REF_GF,
+ kTemporalUpdateLastAndGoldenRefAltRef =
+ VP8_EFLAG_NO_UPD_ARF | VP8_EFLAG_NO_REF_GF,
};
TEST(TemporalLayersTest, 2Layers) {
@@ -68,29 +56,30 @@ TEST(TemporalLayersTest, 2Layers) {
CodecSpecificInfoVP8 vp8_info;
tl.ConfigureBitrates(500, 500, 30, &cfg);
- int expected_flags[16] = { kTemporalUpdateLastAndGoldenRefAltRef,
- kTemporalUpdateGoldenWithoutDependencyRefAltRef,
- kTemporalUpdateLastRefAltRef,
- kTemporalUpdateGoldenRefAltRef,
- kTemporalUpdateLastRefAltRef,
- kTemporalUpdateGoldenRefAltRef,
- kTemporalUpdateLastRefAltRef,
- kTemporalUpdateNone,
- kTemporalUpdateLastAndGoldenRefAltRef,
- kTemporalUpdateGoldenWithoutDependencyRefAltRef,
- kTemporalUpdateLastRefAltRef,
- kTemporalUpdateGoldenRefAltRef,
- kTemporalUpdateLastRefAltRef,
- kTemporalUpdateGoldenRefAltRef,
- kTemporalUpdateLastRefAltRef,
- kTemporalUpdateNone,
- };
- int expected_temporal_idx[16] =
- { 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1 };
-
- bool expected_layer_sync[16] =
- { false, true, false, false, false, false, false, false,
- false, true, false, false, false, false, false, false };
+ int expected_flags[16] = {
+ kTemporalUpdateLastAndGoldenRefAltRef,
+ kTemporalUpdateGoldenWithoutDependencyRefAltRef,
+ kTemporalUpdateLastRefAltRef,
+ kTemporalUpdateGoldenRefAltRef,
+ kTemporalUpdateLastRefAltRef,
+ kTemporalUpdateGoldenRefAltRef,
+ kTemporalUpdateLastRefAltRef,
+ kTemporalUpdateNone,
+ kTemporalUpdateLastAndGoldenRefAltRef,
+ kTemporalUpdateGoldenWithoutDependencyRefAltRef,
+ kTemporalUpdateLastRefAltRef,
+ kTemporalUpdateGoldenRefAltRef,
+ kTemporalUpdateLastRefAltRef,
+ kTemporalUpdateGoldenRefAltRef,
+ kTemporalUpdateLastRefAltRef,
+ kTemporalUpdateNone,
+ };
+ int expected_temporal_idx[16] = {0, 1, 0, 1, 0, 1, 0, 1,
+ 0, 1, 0, 1, 0, 1, 0, 1};
+
+ bool expected_layer_sync[16] = {false, true, false, false, false, false,
+ false, false, false, true, false, false,
+ false, false, false, false};
uint32_t timestamp = 0;
for (int i = 0; i < 16; ++i) {
@@ -108,29 +97,30 @@ TEST(TemporalLayersTest, 3Layers) {
CodecSpecificInfoVP8 vp8_info;
tl.ConfigureBitrates(500, 500, 30, &cfg);
- int expected_flags[16] = { kTemporalUpdateLastAndGoldenRefAltRef,
- kTemporalUpdateNoneNoRefGolden,
- kTemporalUpdateGoldenWithoutDependencyRefAltRef,
- kTemporalUpdateNone,
- kTemporalUpdateLastRefAltRef,
- kTemporalUpdateNone,
- kTemporalUpdateGoldenRefAltRef,
- kTemporalUpdateNone,
- kTemporalUpdateLastAndGoldenRefAltRef,
- kTemporalUpdateNoneNoRefGolden,
- kTemporalUpdateGoldenWithoutDependencyRefAltRef,
- kTemporalUpdateNone,
- kTemporalUpdateLastRefAltRef,
- kTemporalUpdateNone,
- kTemporalUpdateGoldenRefAltRef,
- kTemporalUpdateNone,
+ int expected_flags[16] = {
+ kTemporalUpdateLastAndGoldenRefAltRef,
+ kTemporalUpdateNoneNoRefGolden,
+ kTemporalUpdateGoldenWithoutDependencyRefAltRef,
+ kTemporalUpdateNone,
+ kTemporalUpdateLastRefAltRef,
+ kTemporalUpdateNone,
+ kTemporalUpdateGoldenRefAltRef,
+ kTemporalUpdateNone,
+ kTemporalUpdateLastAndGoldenRefAltRef,
+ kTemporalUpdateNoneNoRefGolden,
+ kTemporalUpdateGoldenWithoutDependencyRefAltRef,
+ kTemporalUpdateNone,
+ kTemporalUpdateLastRefAltRef,
+ kTemporalUpdateNone,
+ kTemporalUpdateGoldenRefAltRef,
+ kTemporalUpdateNone,
};
- int expected_temporal_idx[16] =
- { 0, 2, 1, 2, 0, 2, 1, 2, 0, 2, 1, 2, 0, 2, 1, 2 };
+ int expected_temporal_idx[16] = {0, 2, 1, 2, 0, 2, 1, 2,
+ 0, 2, 1, 2, 0, 2, 1, 2};
- bool expected_layer_sync[16] =
- { false, true, true, false, false, false, false, false,
- false, true, true, false, false, false, false, false };
+ bool expected_layer_sync[16] = {false, true, true, false, false, false,
+ false, false, false, true, true, false,
+ false, false, false, false};
unsigned int timestamp = 0;
for (int i = 0; i < 16; ++i) {
@@ -165,12 +155,12 @@ TEST(TemporalLayersTest, 4Layers) {
kTemporalUpdateAltref,
kTemporalUpdateNone,
};
- int expected_temporal_idx[16] =
- { 0, 3, 2, 3, 1, 3, 2, 3, 0, 3, 2, 3, 1, 3, 2, 3 };
+ int expected_temporal_idx[16] = {0, 3, 2, 3, 1, 3, 2, 3,
+ 0, 3, 2, 3, 1, 3, 2, 3};
- bool expected_layer_sync[16] =
- { false, true, true, true, true, true, false, true,
- false, true, false, true, false, true, false, true };
+ bool expected_layer_sync[16] = {false, true, true, true, true, true,
+ false, true, false, true, false, true,
+ false, true, false, true};
uint32_t timestamp = 0;
for (int i = 0; i < 16; ++i) {
@@ -198,8 +188,7 @@ TEST(TemporalLayersTest, KeyFrame) {
kTemporalUpdateGoldenRefAltRef,
kTemporalUpdateNone,
};
- int expected_temporal_idx[8] =
- { 0, 0, 0, 0, 0, 0, 0, 2};
+ int expected_temporal_idx[8] = {0, 0, 0, 0, 0, 0, 0, 2};
uint32_t timestamp = 0;
for (int i = 0; i < 7; ++i) {
diff --git a/webrtc/modules/video_coding/codecs/vp8/include/vp8.h b/webrtc/modules/video_coding/codecs/vp8/include/vp8.h
index f5dae471d2..dd3514235d 100644
--- a/webrtc/modules/video_coding/codecs/vp8/include/vp8.h
+++ b/webrtc/modules/video_coding/codecs/vp8/include/vp8.h
@@ -13,7 +13,7 @@
#ifndef WEBRTC_MODULES_VIDEO_CODING_CODECS_VP8_INCLUDE_VP8_H_
#define WEBRTC_MODULES_VIDEO_CODING_CODECS_VP8_INCLUDE_VP8_H_
-#include "webrtc/modules/video_coding/codecs/interface/video_codec_interface.h"
+#include "webrtc/modules/video_coding/include/video_codec_interface.h"
namespace webrtc {
@@ -21,16 +21,15 @@ class VP8Encoder : public VideoEncoder {
public:
static VP8Encoder* Create();
- virtual ~VP8Encoder() {};
+ virtual ~VP8Encoder() {}
}; // end of VP8Encoder class
-
class VP8Decoder : public VideoDecoder {
public:
static VP8Decoder* Create();
- virtual ~VP8Decoder() {};
+ virtual ~VP8Decoder() {}
}; // end of VP8Decoder class
} // namespace webrtc
-#endif // WEBRTC_MODULES_VIDEO_CODING_CODECS_VP8_INCLUDE_VP8_H_
+#endif // WEBRTC_MODULES_VIDEO_CODING_CODECS_VP8_INCLUDE_VP8_H_
diff --git a/webrtc/modules/video_coding/codecs/vp8/include/vp8_common_types.h b/webrtc/modules/video_coding/codecs/vp8/include/vp8_common_types.h
index c2cefdd94e..7a27e4429a 100644
--- a/webrtc/modules/video_coding/codecs/vp8/include/vp8_common_types.h
+++ b/webrtc/modules/video_coding/codecs/vp8/include/vp8_common_types.h
@@ -8,8 +8,8 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#ifndef WEBRTC_MODULES_VIDEO_CODING_CODECS_VP8_COMMON_TYPES_H_
-#define WEBRTC_MODULES_VIDEO_CODING_CODECS_VP8_COMMON_TYPES_H_
+#ifndef WEBRTC_MODULES_VIDEO_CODING_CODECS_VP8_INCLUDE_VP8_COMMON_TYPES_H_
+#define WEBRTC_MODULES_VIDEO_CODING_CODECS_VP8_INCLUDE_VP8_COMMON_TYPES_H_
#include "webrtc/common_types.h"
@@ -19,11 +19,11 @@ namespace webrtc {
// Values as required for the VP8 codec (accumulating).
static const float
kVp8LayerRateAlloction[kMaxTemporalStreams][kMaxTemporalStreams] = {
- {1.0f, 1.0f, 1.0f, 1.0f}, // 1 layer
- {0.6f, 1.0f, 1.0f, 1.0f}, // 2 layers {60%, 40%}
- {0.4f, 0.6f, 1.0f, 1.0f}, // 3 layers {40%, 20%, 40%}
- {0.25f, 0.4f, 0.6f, 1.0f} // 4 layers {25%, 15%, 20%, 40%}
+ {1.0f, 1.0f, 1.0f, 1.0f}, // 1 layer
+ {0.6f, 1.0f, 1.0f, 1.0f}, // 2 layers {60%, 40%}
+ {0.4f, 0.6f, 1.0f, 1.0f}, // 3 layers {40%, 20%, 40%}
+ {0.25f, 0.4f, 0.6f, 1.0f} // 4 layers {25%, 15%, 20%, 40%}
};
} // namespace webrtc
-#endif // WEBRTC_MODULES_VIDEO_CODING_CODECS_VP8_COMMON_TYPES_H_
+#endif // WEBRTC_MODULES_VIDEO_CODING_CODECS_VP8_INCLUDE_VP8_COMMON_TYPES_H_
diff --git a/webrtc/modules/video_coding/codecs/vp8/realtime_temporal_layers.cc b/webrtc/modules/video_coding/codecs/vp8/realtime_temporal_layers.cc
index 15b5af9200..d22601358f 100644
--- a/webrtc/modules/video_coding/codecs/vp8/realtime_temporal_layers.cc
+++ b/webrtc/modules/video_coding/codecs/vp8/realtime_temporal_layers.cc
@@ -12,7 +12,7 @@
#include "vpx/vpx_encoder.h"
#include "vpx/vp8cx.h"
-#include "webrtc/modules/video_coding/codecs/interface/video_codec_interface.h"
+#include "webrtc/modules/video_coding/include/video_codec_interface.h"
#include "webrtc/modules/video_coding/codecs/vp8/include/vp8_common_types.h"
#include "webrtc/modules/video_coding/codecs/vp8/temporal_layers.h"
@@ -23,7 +23,8 @@ namespace webrtc {
namespace {
enum {
kTemporalUpdateLast = VP8_EFLAG_NO_UPD_GF | VP8_EFLAG_NO_UPD_ARF |
- VP8_EFLAG_NO_REF_GF | VP8_EFLAG_NO_REF_ARF,
+ VP8_EFLAG_NO_REF_GF |
+ VP8_EFLAG_NO_REF_ARF,
kTemporalUpdateGolden =
VP8_EFLAG_NO_REF_ARF | VP8_EFLAG_NO_UPD_ARF | VP8_EFLAG_NO_UPD_LAST,
@@ -37,13 +38,15 @@ enum {
kTemporalUpdateAltref | VP8_EFLAG_NO_REF_ARF | VP8_EFLAG_NO_REF_GF,
kTemporalUpdateNone = VP8_EFLAG_NO_UPD_GF | VP8_EFLAG_NO_UPD_ARF |
- VP8_EFLAG_NO_UPD_LAST | VP8_EFLAG_NO_UPD_ENTROPY,
+ VP8_EFLAG_NO_UPD_LAST |
+ VP8_EFLAG_NO_UPD_ENTROPY,
kTemporalUpdateNoneNoRefAltref = kTemporalUpdateNone | VP8_EFLAG_NO_REF_ARF,
kTemporalUpdateNoneNoRefGoldenRefAltRef =
VP8_EFLAG_NO_REF_GF | VP8_EFLAG_NO_UPD_GF | VP8_EFLAG_NO_UPD_ARF |
- VP8_EFLAG_NO_UPD_LAST | VP8_EFLAG_NO_UPD_ENTROPY,
+ VP8_EFLAG_NO_UPD_LAST |
+ VP8_EFLAG_NO_UPD_ENTROPY,
kTemporalUpdateGoldenWithoutDependencyRefAltRef =
VP8_EFLAG_NO_REF_GF | VP8_EFLAG_NO_UPD_ARF | VP8_EFLAG_NO_UPD_LAST,
@@ -133,12 +136,14 @@ class RealTimeTemporalLayers : public TemporalLayers {
layer_ids_length_ = sizeof(layer_ids) / sizeof(*layer_ids);
static const int encode_flags[] = {
- kTemporalUpdateLastAndGoldenRefAltRef,
- kTemporalUpdateGoldenWithoutDependencyRefAltRef,
- kTemporalUpdateLastRefAltRef, kTemporalUpdateGoldenRefAltRef,
- kTemporalUpdateLastRefAltRef, kTemporalUpdateGoldenRefAltRef,
- kTemporalUpdateLastRefAltRef, kTemporalUpdateNone
- };
+ kTemporalUpdateLastAndGoldenRefAltRef,
+ kTemporalUpdateGoldenWithoutDependencyRefAltRef,
+ kTemporalUpdateLastRefAltRef,
+ kTemporalUpdateGoldenRefAltRef,
+ kTemporalUpdateLastRefAltRef,
+ kTemporalUpdateGoldenRefAltRef,
+ kTemporalUpdateLastRefAltRef,
+ kTemporalUpdateNone};
encode_flags_length_ = sizeof(encode_flags) / sizeof(*layer_ids);
encode_flags_ = encode_flags;
@@ -153,12 +158,14 @@ class RealTimeTemporalLayers : public TemporalLayers {
layer_ids_length_ = sizeof(layer_ids) / sizeof(*layer_ids);
static const int encode_flags[] = {
- kTemporalUpdateLastAndGoldenRefAltRef,
- kTemporalUpdateNoneNoRefGoldenRefAltRef,
- kTemporalUpdateGoldenWithoutDependencyRefAltRef, kTemporalUpdateNone,
- kTemporalUpdateLastRefAltRef, kTemporalUpdateNone,
- kTemporalUpdateGoldenRefAltRef, kTemporalUpdateNone
- };
+ kTemporalUpdateLastAndGoldenRefAltRef,
+ kTemporalUpdateNoneNoRefGoldenRefAltRef,
+ kTemporalUpdateGoldenWithoutDependencyRefAltRef,
+ kTemporalUpdateNone,
+ kTemporalUpdateLastRefAltRef,
+ kTemporalUpdateNone,
+ kTemporalUpdateGoldenRefAltRef,
+ kTemporalUpdateNone};
encode_flags_length_ = sizeof(encode_flags) / sizeof(*layer_ids);
encode_flags_ = encode_flags;
@@ -172,8 +179,8 @@ class RealTimeTemporalLayers : public TemporalLayers {
assert(false);
return false;
}
- memcpy(
- cfg->ts_layer_id, layer_ids_, sizeof(unsigned int) * layer_ids_length_);
+ memcpy(cfg->ts_layer_id, layer_ids_,
+ sizeof(unsigned int) * layer_ids_length_);
return true;
}
diff --git a/webrtc/modules/video_coding/codecs/vp8/reference_picture_selection.cc b/webrtc/modules/video_coding/codecs/vp8/reference_picture_selection.cc
index a922e35712..1838e32eb7 100644
--- a/webrtc/modules/video_coding/codecs/vp8/reference_picture_selection.cc
+++ b/webrtc/modules/video_coding/codecs/vp8/reference_picture_selection.cc
@@ -25,8 +25,7 @@ ReferencePictureSelection::ReferencePictureSelection()
last_sent_ref_update_time_(0),
established_ref_picture_id_(0),
last_refresh_time_(0),
- rtt_(0) {
-}
+ rtt_(0) {}
void ReferencePictureSelection::Init() {
update_golden_next_ = true;
@@ -62,7 +61,8 @@ bool ReferencePictureSelection::ReceivedSLI(uint32_t now_ts) {
return send_refresh;
}
-int ReferencePictureSelection::EncodeFlags(int picture_id, bool send_refresh,
+int ReferencePictureSelection::EncodeFlags(int picture_id,
+ bool send_refresh,
uint32_t now_ts) {
int flags = 0;
// We can't refresh the decoder until we have established the key frame.
@@ -87,12 +87,12 @@ int ReferencePictureSelection::EncodeFlags(int picture_id, bool send_refresh,
received_ack_) {
flags |= VP8_EFLAG_NO_REF_LAST; // Don't reference the last frame.
if (update_golden_next_) {
- flags |= VP8_EFLAG_FORCE_GF; // Update the golden reference.
+ flags |= VP8_EFLAG_FORCE_GF; // Update the golden reference.
flags |= VP8_EFLAG_NO_UPD_ARF; // Don't update alt-ref.
- flags |= VP8_EFLAG_NO_REF_GF; // Don't reference the golden frame.
+ flags |= VP8_EFLAG_NO_REF_GF; // Don't reference the golden frame.
} else {
- flags |= VP8_EFLAG_FORCE_ARF; // Update the alt-ref reference.
- flags |= VP8_EFLAG_NO_UPD_GF; // Don't update the golden frame.
+ flags |= VP8_EFLAG_FORCE_ARF; // Update the alt-ref reference.
+ flags |= VP8_EFLAG_NO_UPD_GF; // Don't update the golden frame.
flags |= VP8_EFLAG_NO_REF_ARF; // Don't reference the alt-ref frame.
}
last_sent_ref_picture_id_ = picture_id;
@@ -103,9 +103,9 @@ int ReferencePictureSelection::EncodeFlags(int picture_id, bool send_refresh,
if (established_golden_)
flags |= VP8_EFLAG_NO_REF_ARF; // Don't reference the alt-ref frame.
else
- flags |= VP8_EFLAG_NO_REF_GF; // Don't reference the golden frame.
- flags |= VP8_EFLAG_NO_UPD_GF; // Don't update the golden frame.
- flags |= VP8_EFLAG_NO_UPD_ARF; // Don't update the alt-ref frame.
+ flags |= VP8_EFLAG_NO_REF_GF; // Don't reference the golden frame.
+ flags |= VP8_EFLAG_NO_UPD_GF; // Don't update the golden frame.
+ flags |= VP8_EFLAG_NO_UPD_ARF; // Don't update the alt-ref frame.
}
return flags;
}
diff --git a/webrtc/modules/video_coding/codecs/vp8/reference_picture_selection_unittest.cc b/webrtc/modules/video_coding/codecs/vp8/reference_picture_selection_unittest.cc
index c6474e5bd1..742bb96e91 100644
--- a/webrtc/modules/video_coding/codecs/vp8/reference_picture_selection_unittest.cc
+++ b/webrtc/modules/video_coding/codecs/vp8/reference_picture_selection_unittest.cc
@@ -22,25 +22,19 @@ static const uint32_t kMinUpdateInterval = 10;
// Should match the values set in reference_picture_selection.h
static const int kRtt = 10;
-static const int kNoPropagationGolden = VP8_EFLAG_NO_REF_ARF |
- VP8_EFLAG_NO_UPD_GF |
- VP8_EFLAG_NO_UPD_ARF;
-static const int kNoPropagationAltRef = VP8_EFLAG_NO_REF_GF |
- VP8_EFLAG_NO_UPD_GF |
- VP8_EFLAG_NO_UPD_ARF;
-static const int kPropagateGolden = VP8_EFLAG_FORCE_GF |
- VP8_EFLAG_NO_UPD_ARF |
- VP8_EFLAG_NO_REF_GF |
- VP8_EFLAG_NO_REF_LAST;
-static const int kPropagateAltRef = VP8_EFLAG_FORCE_ARF |
- VP8_EFLAG_NO_UPD_GF |
- VP8_EFLAG_NO_REF_ARF |
- VP8_EFLAG_NO_REF_LAST;
-static const int kRefreshFromGolden = VP8_EFLAG_NO_REF_LAST |
- VP8_EFLAG_NO_REF_ARF;
-static const int kRefreshFromAltRef = VP8_EFLAG_NO_REF_LAST |
- VP8_EFLAG_NO_REF_GF;
-
+static const int kNoPropagationGolden =
+ VP8_EFLAG_NO_REF_ARF | VP8_EFLAG_NO_UPD_GF | VP8_EFLAG_NO_UPD_ARF;
+static const int kNoPropagationAltRef =
+ VP8_EFLAG_NO_REF_GF | VP8_EFLAG_NO_UPD_GF | VP8_EFLAG_NO_UPD_ARF;
+static const int kPropagateGolden = VP8_EFLAG_FORCE_GF | VP8_EFLAG_NO_UPD_ARF |
+ VP8_EFLAG_NO_REF_GF | VP8_EFLAG_NO_REF_LAST;
+static const int kPropagateAltRef = VP8_EFLAG_FORCE_ARF | VP8_EFLAG_NO_UPD_GF |
+ VP8_EFLAG_NO_REF_ARF |
+ VP8_EFLAG_NO_REF_LAST;
+static const int kRefreshFromGolden =
+ VP8_EFLAG_NO_REF_LAST | VP8_EFLAG_NO_REF_ARF;
+static const int kRefreshFromAltRef =
+ VP8_EFLAG_NO_REF_LAST | VP8_EFLAG_NO_REF_GF;
class TestRPS : public ::testing::Test {
protected:
@@ -84,15 +78,15 @@ TEST_F(TestRPS, TestDecoderRefresh) {
EXPECT_EQ(rps_.ReceivedSLI(90 * time), true);
// Enough time have elapsed since the previous reference propagation, we will
// therefore get both a refresh from golden and a propagation of alt-ref.
- EXPECT_EQ(rps_.EncodeFlags(5, true, 90 * time), kRefreshFromGolden |
- kPropagateAltRef);
+ EXPECT_EQ(rps_.EncodeFlags(5, true, 90 * time),
+ kRefreshFromGolden | kPropagateAltRef);
rps_.ReceivedRPSI(5);
time += kRtt + 1;
// Enough time for a new refresh, but not enough time for a reference
// propagation.
EXPECT_EQ(rps_.ReceivedSLI(90 * time), true);
- EXPECT_EQ(rps_.EncodeFlags(6, true, 90 * time), kRefreshFromAltRef |
- kNoPropagationAltRef);
+ EXPECT_EQ(rps_.EncodeFlags(6, true, 90 * time),
+ kRefreshFromAltRef | kNoPropagationAltRef);
}
TEST_F(TestRPS, TestWrap) {
diff --git a/webrtc/modules/video_coding/codecs/vp8/screenshare_layers.cc b/webrtc/modules/video_coding/codecs/vp8/screenshare_layers.cc
index 0fbb2a6c40..536587a13e 100644
--- a/webrtc/modules/video_coding/codecs/vp8/screenshare_layers.cc
+++ b/webrtc/modules/video_coding/codecs/vp8/screenshare_layers.cc
@@ -11,10 +11,12 @@
#include <stdlib.h>
+#include <algorithm>
+
#include "webrtc/base/checks.h"
#include "vpx/vpx_encoder.h"
#include "vpx/vp8cx.h"
-#include "webrtc/modules/video_coding/codecs/interface/video_codec_interface.h"
+#include "webrtc/modules/video_coding/include/video_codec_interface.h"
namespace webrtc {
@@ -188,7 +190,7 @@ void ScreenshareLayers::FrameEncoded(unsigned int size,
}
void ScreenshareLayers::PopulateCodecSpecific(bool base_layer_sync,
- CodecSpecificInfoVP8 *vp8_info,
+ CodecSpecificInfoVP8* vp8_info,
uint32_t timestamp) {
int64_t unwrapped_timestamp = time_wrap_handler_.Unwrap(timestamp);
if (number_of_temporal_layers_ == 1) {
diff --git a/webrtc/modules/video_coding/codecs/vp8/screenshare_layers.h b/webrtc/modules/video_coding/codecs/vp8/screenshare_layers.h
index 90a8b1b883..7628758209 100644
--- a/webrtc/modules/video_coding/codecs/vp8/screenshare_layers.h
+++ b/webrtc/modules/video_coding/codecs/vp8/screenshare_layers.h
@@ -15,7 +15,7 @@
#include "webrtc/base/timeutils.h"
#include "webrtc/modules/video_coding/codecs/vp8/temporal_layers.h"
-#include "webrtc/modules/video_coding/utility/include/frame_dropper.h"
+#include "webrtc/modules/video_coding/utility/frame_dropper.h"
#include "webrtc/typedefs.h"
namespace webrtc {
diff --git a/webrtc/modules/video_coding/codecs/vp8/screenshare_layers_unittest.cc b/webrtc/modules/video_coding/codecs/vp8/screenshare_layers_unittest.cc
index 628e336568..f31ed5e4d8 100644
--- a/webrtc/modules/video_coding/codecs/vp8/screenshare_layers_unittest.cc
+++ b/webrtc/modules/video_coding/codecs/vp8/screenshare_layers_unittest.cc
@@ -12,9 +12,9 @@
#include "vpx/vpx_encoder.h"
#include "vpx/vp8cx.h"
#include "webrtc/base/scoped_ptr.h"
-#include "webrtc/modules/video_coding/codecs/interface/video_codec_interface.h"
+#include "webrtc/modules/video_coding/include/video_codec_interface.h"
#include "webrtc/modules/video_coding/codecs/vp8/screenshare_layers.h"
-#include "webrtc/modules/video_coding/utility/include/mock/mock_frame_dropper.h"
+#include "webrtc/modules/video_coding/utility/mock/mock_frame_dropper.h"
using ::testing::_;
using ::testing::NiceMock;
diff --git a/webrtc/modules/video_coding/codecs/vp8/simulcast_encoder_adapter.cc b/webrtc/modules/video_coding/codecs/vp8/simulcast_encoder_adapter.cc
index 5dc4ac78f1..40e438f7e4 100644
--- a/webrtc/modules/video_coding/codecs/vp8/simulcast_encoder_adapter.cc
+++ b/webrtc/modules/video_coding/codecs/vp8/simulcast_encoder_adapter.cc
@@ -215,9 +215,7 @@ int SimulcastEncoderAdapter::InitEncode(const VideoCodec* inst,
}
VideoEncoder* encoder = factory_->Create();
- ret = encoder->InitEncode(&stream_codec,
- number_of_cores,
- max_payload_size);
+ ret = encoder->InitEncode(&stream_codec, number_of_cores, max_payload_size);
if (ret < 0) {
Release();
return ret;
@@ -284,35 +282,25 @@ int SimulcastEncoderAdapter::Encode(
// scale it to match what the encoder expects (below).
if ((dst_width == src_width && dst_height == src_height) ||
input_image.IsZeroSize()) {
- streaminfos_[stream_idx].encoder->Encode(input_image,
- codec_specific_info,
+ streaminfos_[stream_idx].encoder->Encode(input_image, codec_specific_info,
&stream_frame_types);
} else {
VideoFrame dst_frame;
// Making sure that destination frame is of sufficient size.
// Aligning stride values based on width.
- dst_frame.CreateEmptyFrame(dst_width, dst_height,
- dst_width, (dst_width + 1) / 2,
- (dst_width + 1) / 2);
- libyuv::I420Scale(input_image.buffer(kYPlane),
- input_image.stride(kYPlane),
- input_image.buffer(kUPlane),
- input_image.stride(kUPlane),
- input_image.buffer(kVPlane),
- input_image.stride(kVPlane),
- src_width, src_height,
- dst_frame.buffer(kYPlane),
- dst_frame.stride(kYPlane),
- dst_frame.buffer(kUPlane),
- dst_frame.stride(kUPlane),
- dst_frame.buffer(kVPlane),
- dst_frame.stride(kVPlane),
- dst_width, dst_height,
- libyuv::kFilterBilinear);
+ dst_frame.CreateEmptyFrame(dst_width, dst_height, dst_width,
+ (dst_width + 1) / 2, (dst_width + 1) / 2);
+ libyuv::I420Scale(
+ input_image.buffer(kYPlane), input_image.stride(kYPlane),
+ input_image.buffer(kUPlane), input_image.stride(kUPlane),
+ input_image.buffer(kVPlane), input_image.stride(kVPlane), src_width,
+ src_height, dst_frame.buffer(kYPlane), dst_frame.stride(kYPlane),
+ dst_frame.buffer(kUPlane), dst_frame.stride(kUPlane),
+ dst_frame.buffer(kVPlane), dst_frame.stride(kVPlane), dst_width,
+ dst_height, libyuv::kFilterBilinear);
dst_frame.set_timestamp(input_image.timestamp());
dst_frame.set_render_time_ms(input_image.render_time_ms());
- streaminfos_[stream_idx].encoder->Encode(dst_frame,
- codec_specific_info,
+ streaminfos_[stream_idx].encoder->Encode(dst_frame, codec_specific_info,
&stream_frame_types);
}
}
@@ -426,16 +414,17 @@ uint32_t SimulcastEncoderAdapter::GetStreamBitrate(
// current stream's |targetBitrate|, otherwise it's capped by |maxBitrate|.
if (stream_idx < codec_.numberOfSimulcastStreams - 1) {
unsigned int max_rate = codec_.simulcastStream[stream_idx].maxBitrate;
- if (new_bitrate_kbit >= SumStreamTargetBitrate(stream_idx + 1, codec_) +
- codec_.simulcastStream[stream_idx + 1].minBitrate) {
+ if (new_bitrate_kbit >=
+ SumStreamTargetBitrate(stream_idx + 1, codec_) +
+ codec_.simulcastStream[stream_idx + 1].minBitrate) {
max_rate = codec_.simulcastStream[stream_idx].targetBitrate;
}
return std::min(new_bitrate_kbit - sum_target_lower_streams, max_rate);
} else {
- // For the highest stream (highest resolution), the |targetBitRate| and
- // |maxBitrate| are not used. Any excess bitrate (above the targets of
- // all lower streams) is given to this (highest resolution) stream.
- return new_bitrate_kbit - sum_target_lower_streams;
+ // For the highest stream (highest resolution), the |targetBitRate| and
+ // |maxBitrate| are not used. Any excess bitrate (above the targets of
+ // all lower streams) is given to this (highest resolution) stream.
+ return new_bitrate_kbit - sum_target_lower_streams;
}
} else {
// Not enough bitrate for this stream.
@@ -507,4 +496,11 @@ bool SimulcastEncoderAdapter::SupportsNativeHandle() const {
return streaminfos_[0].encoder->SupportsNativeHandle();
}
+const char* SimulcastEncoderAdapter::ImplementationName() const {
+ // We should not be calling this method before streaminfos_ are configured.
+ RTC_DCHECK(!streaminfos_.empty());
+ // TODO(pbos): Support multiple implementation names for different encoders.
+ return streaminfos_[0].encoder->ImplementationName();
+}
+
} // namespace webrtc
diff --git a/webrtc/modules/video_coding/codecs/vp8/simulcast_encoder_adapter.h b/webrtc/modules/video_coding/codecs/vp8/simulcast_encoder_adapter.h
index afec024abc..05a96c7336 100644
--- a/webrtc/modules/video_coding/codecs/vp8/simulcast_encoder_adapter.h
+++ b/webrtc/modules/video_coding/codecs/vp8/simulcast_encoder_adapter.h
@@ -59,6 +59,7 @@ class SimulcastEncoderAdapter : public VP8Encoder {
int GetTargetFramerate() override;
bool SupportsNativeHandle() const override;
+ const char* ImplementationName() const override;
private:
struct StreamInfo {
@@ -71,8 +72,8 @@ class SimulcastEncoderAdapter : public VP8Encoder {
send_stream(true) {}
StreamInfo(VideoEncoder* encoder,
EncodedImageCallback* callback,
- unsigned short width,
- unsigned short height,
+ uint16_t width,
+ uint16_t height,
bool send_stream)
: encoder(encoder),
callback(callback),
@@ -83,8 +84,8 @@ class SimulcastEncoderAdapter : public VP8Encoder {
// Deleted by SimulcastEncoderAdapter::Release().
VideoEncoder* encoder;
EncodedImageCallback* callback;
- unsigned short width;
- unsigned short height;
+ uint16_t width;
+ uint16_t height;
bool key_frame_request;
bool send_stream;
};
@@ -118,4 +119,3 @@ class SimulcastEncoderAdapter : public VP8Encoder {
} // namespace webrtc
#endif // WEBRTC_MODULES_VIDEO_CODING_CODECS_VP8_SIMULCAST_ENCODER_ADAPTER_H_
-
diff --git a/webrtc/modules/video_coding/codecs/vp8/simulcast_encoder_adapter_unittest.cc b/webrtc/modules/video_coding/codecs/vp8/simulcast_encoder_adapter_unittest.cc
index 218b5e2d1a..86b8e0b345 100644
--- a/webrtc/modules/video_coding/codecs/vp8/simulcast_encoder_adapter_unittest.cc
+++ b/webrtc/modules/video_coding/codecs/vp8/simulcast_encoder_adapter_unittest.cc
@@ -11,7 +11,7 @@
#include <vector>
#include "testing/gmock/include/gmock/gmock.h"
-#include "webrtc/modules/video_coding/codecs/interface/video_codec_interface.h"
+#include "webrtc/modules/video_coding/include/video_codec_interface.h"
#include "webrtc/modules/video_coding/codecs/vp8/simulcast_encoder_adapter.h"
#include "webrtc/modules/video_coding/codecs/vp8/simulcast_unittest.h"
#include "webrtc/modules/video_coding/codecs/vp8/vp8_factory.h"
@@ -27,12 +27,10 @@ static VP8Encoder* CreateTestEncoderAdapter() {
class TestSimulcastEncoderAdapter : public TestVp8Simulcast {
public:
TestSimulcastEncoderAdapter()
- : TestVp8Simulcast(CreateTestEncoderAdapter(),
- VP8Decoder::Create()) {}
+ : TestVp8Simulcast(CreateTestEncoderAdapter(), VP8Decoder::Create()) {}
+
protected:
- virtual void SetUp() {
- TestVp8Simulcast::SetUp();
- }
+ virtual void SetUp() { TestVp8Simulcast::SetUp(); }
virtual void TearDown() {
TestVp8Simulcast::TearDown();
VP8EncoderFactoryConfig::set_use_simulcast_adapter(false);
@@ -97,8 +95,7 @@ TEST_F(TestSimulcastEncoderAdapter, TestSpatioTemporalLayers321PatternEncoder) {
// TODO(ronghuawu): Enable this test when SkipEncodingUnusedStreams option is
// implemented for SimulcastEncoderAdapter.
-TEST_F(TestSimulcastEncoderAdapter,
- DISABLED_TestSkipEncodingUnusedStreams) {
+TEST_F(TestSimulcastEncoderAdapter, DISABLED_TestSkipEncodingUnusedStreams) {
TestVp8Simulcast::TestSkipEncodingUnusedStreams();
}
@@ -127,23 +124,17 @@ class MockVideoEncoder : public VideoEncoder {
return 0;
}
- int32_t Release() override {
- return 0;
- }
+ int32_t Release() override { return 0; }
int32_t SetRates(uint32_t newBitRate, uint32_t frameRate) override {
return 0;
}
- MOCK_METHOD2(SetChannelParameters,
- int32_t(uint32_t packetLoss, int64_t rtt));
+ MOCK_METHOD2(SetChannelParameters, int32_t(uint32_t packetLoss, int64_t rtt));
- bool SupportsNativeHandle() const override {
- return supports_native_handle_;
- }
+ bool SupportsNativeHandle() const override { return supports_native_handle_; }
- virtual ~MockVideoEncoder() {
- }
+ virtual ~MockVideoEncoder() {}
const VideoCodec& codec() const { return codec_; }
@@ -200,7 +191,8 @@ class TestSimulcastEncoderAdapterFakeHelper {
EXPECT_TRUE(!factory_->encoders().empty());
for (size_t i = 0; i < factory_->encoders().size(); ++i) {
EXPECT_CALL(*factory_->encoders()[i],
- SetChannelParameters(packetLoss, rtt)).Times(1);
+ SetChannelParameters(packetLoss, rtt))
+ .Times(1);
}
}
@@ -249,8 +241,7 @@ class TestSimulcastEncoderAdapterFake : public ::testing::Test,
void SetupCodec() {
TestVp8Simulcast::DefaultSettings(
- &codec_,
- static_cast<const int*>(kTestTemporalLayerProfile));
+ &codec_, static_cast<const int*>(kTestTemporalLayerProfile));
EXPECT_EQ(0, adapter_->InitEncode(&codec_, 1, 1200));
adapter_->RegisterEncodeCompleteCallback(this);
}
diff --git a/webrtc/modules/video_coding/codecs/vp8/simulcast_unittest.cc b/webrtc/modules/video_coding/codecs/vp8/simulcast_unittest.cc
index 373a55237f..f23affee41 100644
--- a/webrtc/modules/video_coding/codecs/vp8/simulcast_unittest.cc
+++ b/webrtc/modules/video_coding/codecs/vp8/simulcast_unittest.cc
@@ -13,18 +13,14 @@
namespace webrtc {
namespace testing {
-class TestVp8Impl
- : public TestVp8Simulcast {
+class TestVp8Impl : public TestVp8Simulcast {
public:
TestVp8Impl()
- : TestVp8Simulcast(VP8Encoder::Create(), VP8Decoder::Create()) {}
+ : TestVp8Simulcast(VP8Encoder::Create(), VP8Decoder::Create()) {}
+
protected:
- virtual void SetUp() {
- TestVp8Simulcast::SetUp();
- }
- virtual void TearDown() {
- TestVp8Simulcast::TearDown();
- }
+ virtual void SetUp() { TestVp8Simulcast::SetUp(); }
+ virtual void TearDown() { TestVp8Simulcast::TearDown(); }
};
TEST_F(TestVp8Impl, TestKeyFrameRequestsOnAllStreams) {
diff --git a/webrtc/modules/video_coding/codecs/vp8/simulcast_unittest.h b/webrtc/modules/video_coding/codecs/vp8/simulcast_unittest.h
index e4fc986545..7a7a2c253b 100644
--- a/webrtc/modules/video_coding/codecs/vp8/simulcast_unittest.h
+++ b/webrtc/modules/video_coding/codecs/vp8/simulcast_unittest.h
@@ -14,10 +14,11 @@
#include <algorithm>
#include <vector>
+#include "webrtc/base/checks.h"
#include "webrtc/base/scoped_ptr.h"
#include "webrtc/common.h"
#include "webrtc/common_video/libyuv/include/webrtc_libyuv.h"
-#include "webrtc/modules/video_coding/codecs/interface/mock/mock_video_codec_interface.h"
+#include "webrtc/modules/video_coding/include/mock/mock_video_codec_interface.h"
#include "webrtc/modules/video_coding/codecs/vp8/include/vp8.h"
#include "webrtc/modules/video_coding/codecs/vp8/temporal_layers.h"
#include "webrtc/video_frame.h"
@@ -43,10 +44,8 @@ const int kMinBitrates[kNumberOfSimulcastStreams] = {50, 150, 600};
const int kTargetBitrates[kNumberOfSimulcastStreams] = {100, 450, 1000};
const int kDefaultTemporalLayerProfile[3] = {3, 3, 3};
-template<typename T> void SetExpectedValues3(T value0,
- T value1,
- T value2,
- T* expected_values) {
+template <typename T>
+void SetExpectedValues3(T value0, T value1, T value2, T* expected_values) {
expected_values[0] = value0;
expected_values[1] = value1;
expected_values[2] = value2;
@@ -54,15 +53,14 @@ template<typename T> void SetExpectedValues3(T value0,
class Vp8TestEncodedImageCallback : public EncodedImageCallback {
public:
- Vp8TestEncodedImageCallback()
- : picture_id_(-1) {
+ Vp8TestEncodedImageCallback() : picture_id_(-1) {
memset(temporal_layer_, -1, sizeof(temporal_layer_));
memset(layer_sync_, false, sizeof(layer_sync_));
}
~Vp8TestEncodedImageCallback() {
- delete [] encoded_key_frame_._buffer;
- delete [] encoded_frame_._buffer;
+ delete[] encoded_key_frame_._buffer;
+ delete[] encoded_frame_._buffer;
}
virtual int32_t Encoded(const EncodedImage& encoded_image,
@@ -71,22 +69,20 @@ class Vp8TestEncodedImageCallback : public EncodedImageCallback {
// Only store the base layer.
if (codec_specific_info->codecSpecific.VP8.simulcastIdx == 0) {
if (encoded_image._frameType == kVideoFrameKey) {
- delete [] encoded_key_frame_._buffer;
+ delete[] encoded_key_frame_._buffer;
encoded_key_frame_._buffer = new uint8_t[encoded_image._size];
encoded_key_frame_._size = encoded_image._size;
encoded_key_frame_._length = encoded_image._length;
encoded_key_frame_._frameType = kVideoFrameKey;
encoded_key_frame_._completeFrame = encoded_image._completeFrame;
- memcpy(encoded_key_frame_._buffer,
- encoded_image._buffer,
+ memcpy(encoded_key_frame_._buffer, encoded_image._buffer,
encoded_image._length);
} else {
- delete [] encoded_frame_._buffer;
+ delete[] encoded_frame_._buffer;
encoded_frame_._buffer = new uint8_t[encoded_image._size];
encoded_frame_._size = encoded_image._size;
encoded_frame_._length = encoded_image._length;
- memcpy(encoded_frame_._buffer,
- encoded_image._buffer,
+ memcpy(encoded_frame_._buffer, encoded_image._buffer,
encoded_image._length);
}
}
@@ -97,8 +93,10 @@ class Vp8TestEncodedImageCallback : public EncodedImageCallback {
codec_specific_info->codecSpecific.VP8.temporalIdx;
return 0;
}
- void GetLastEncodedFrameInfo(int* picture_id, int* temporal_layer,
- bool* layer_sync, int stream) {
+ void GetLastEncodedFrameInfo(int* picture_id,
+ int* temporal_layer,
+ bool* layer_sync,
+ int stream) {
*picture_id = picture_id_;
*temporal_layer = temporal_layer_[stream];
*layer_sync = layer_sync_[stream];
@@ -120,10 +118,8 @@ class Vp8TestEncodedImageCallback : public EncodedImageCallback {
class Vp8TestDecodedImageCallback : public DecodedImageCallback {
public:
- Vp8TestDecodedImageCallback()
- : decoded_frames_(0) {
- }
- virtual int32_t Decoded(VideoFrame& decoded_image) {
+ Vp8TestDecodedImageCallback() : decoded_frames_(0) {}
+ int32_t Decoded(VideoFrame& decoded_image) override {
for (int i = 0; i < decoded_image.width(); ++i) {
EXPECT_NEAR(kColorY, decoded_image.buffer(kYPlane)[i], 1);
}
@@ -136,9 +132,11 @@ class Vp8TestDecodedImageCallback : public DecodedImageCallback {
decoded_frames_++;
return 0;
}
- int DecodedFrames() {
- return decoded_frames_;
+ int32_t Decoded(VideoFrame& decoded_image, int64_t decode_time_ms) override {
+ RTC_NOTREACHED();
+ return -1;
}
+ int DecodedFrames() { return decoded_frames_; }
private:
int decoded_frames_;
@@ -161,8 +159,7 @@ class SkipEncodingUnusedStreamsTest {
std::vector<unsigned int> configured_bitrates;
for (std::vector<TemporalLayers*>::const_iterator it =
spy_factory->spying_layers_.begin();
- it != spy_factory->spying_layers_.end();
- ++it) {
+ it != spy_factory->spying_layers_.end(); ++it) {
configured_bitrates.push_back(
static_cast<SpyingTemporalLayers*>(*it)->configured_bitrate_);
}
@@ -185,8 +182,8 @@ class SkipEncodingUnusedStreamsTest {
int framerate,
vpx_codec_enc_cfg_t* cfg) override {
configured_bitrate_ = bitrate_kbit;
- return layers_->ConfigureBitrates(
- bitrate_kbit, max_bitrate_kbit, framerate, cfg);
+ return layers_->ConfigureBitrates(bitrate_kbit, max_bitrate_kbit,
+ framerate, cfg);
}
void PopulateCodecSpecific(bool base_layer_sync,
@@ -228,16 +225,15 @@ class SkipEncodingUnusedStreamsTest {
class TestVp8Simulcast : public ::testing::Test {
public:
TestVp8Simulcast(VP8Encoder* encoder, VP8Decoder* decoder)
- : encoder_(encoder),
- decoder_(decoder) {}
+ : encoder_(encoder), decoder_(decoder) {}
// Creates an VideoFrame from |plane_colors|.
static void CreateImage(VideoFrame* frame, int plane_colors[kNumOfPlanes]) {
for (int plane_num = 0; plane_num < kNumOfPlanes; ++plane_num) {
- int width = (plane_num != kYPlane ? (frame->width() + 1) / 2 :
- frame->width());
- int height = (plane_num != kYPlane ? (frame->height() + 1) / 2 :
- frame->height());
+ int width =
+ (plane_num != kYPlane ? (frame->width() + 1) / 2 : frame->width());
+ int height =
+ (plane_num != kYPlane ? (frame->height() + 1) / 2 : frame->height());
PlaneType plane_type = static_cast<PlaneType>(plane_num);
uint8_t* data = frame->buffer(plane_type);
// Setting allocated area to zero - setting only image size to
@@ -267,24 +263,15 @@ class TestVp8Simulcast : public ::testing::Test {
settings->height = kDefaultHeight;
settings->numberOfSimulcastStreams = kNumberOfSimulcastStreams;
ASSERT_EQ(3, kNumberOfSimulcastStreams);
- ConfigureStream(kDefaultWidth / 4, kDefaultHeight / 4,
- kMaxBitrates[0],
- kMinBitrates[0],
- kTargetBitrates[0],
- &settings->simulcastStream[0],
- temporal_layer_profile[0]);
- ConfigureStream(kDefaultWidth / 2, kDefaultHeight / 2,
- kMaxBitrates[1],
- kMinBitrates[1],
- kTargetBitrates[1],
- &settings->simulcastStream[1],
- temporal_layer_profile[1]);
- ConfigureStream(kDefaultWidth, kDefaultHeight,
- kMaxBitrates[2],
- kMinBitrates[2],
- kTargetBitrates[2],
- &settings->simulcastStream[2],
- temporal_layer_profile[2]);
+ ConfigureStream(kDefaultWidth / 4, kDefaultHeight / 4, kMaxBitrates[0],
+ kMinBitrates[0], kTargetBitrates[0],
+ &settings->simulcastStream[0], temporal_layer_profile[0]);
+ ConfigureStream(kDefaultWidth / 2, kDefaultHeight / 2, kMaxBitrates[1],
+ kMinBitrates[1], kTargetBitrates[1],
+ &settings->simulcastStream[1], temporal_layer_profile[1]);
+ ConfigureStream(kDefaultWidth, kDefaultHeight, kMaxBitrates[2],
+ kMinBitrates[2], kTargetBitrates[2],
+ &settings->simulcastStream[2], temporal_layer_profile[2]);
settings->codecSpecific.VP8.resilience = kResilientStream;
settings->codecSpecific.VP8.denoisingOn = true;
settings->codecSpecific.VP8.errorConcealmentOn = false;
@@ -312,9 +299,7 @@ class TestVp8Simulcast : public ::testing::Test {
}
protected:
- virtual void SetUp() {
- SetUpCodec(kDefaultTemporalLayerProfile);
- }
+ virtual void SetUp() { SetUpCodec(kDefaultTemporalLayerProfile); }
virtual void SetUpCodec(const int* temporal_layer_profile) {
encoder_->RegisterEncodeCompleteCallback(&encoder_callback_);
@@ -323,14 +308,14 @@ class TestVp8Simulcast : public ::testing::Test {
EXPECT_EQ(0, encoder_->InitEncode(&settings_, 1, 1200));
EXPECT_EQ(0, decoder_->InitDecode(&settings_, 1));
int half_width = (kDefaultWidth + 1) / 2;
- input_frame_.CreateEmptyFrame(kDefaultWidth, kDefaultHeight,
- kDefaultWidth, half_width, half_width);
+ input_frame_.CreateEmptyFrame(kDefaultWidth, kDefaultHeight, kDefaultWidth,
+ half_width, half_width);
memset(input_frame_.buffer(kYPlane), 0,
- input_frame_.allocated_size(kYPlane));
+ input_frame_.allocated_size(kYPlane));
memset(input_frame_.buffer(kUPlane), 0,
- input_frame_.allocated_size(kUPlane));
+ input_frame_.allocated_size(kUPlane));
memset(input_frame_.buffer(kVPlane), 0,
- input_frame_.allocated_size(kVPlane));
+ input_frame_.allocated_size(kVPlane));
}
virtual void TearDown() {
@@ -342,28 +327,34 @@ class TestVp8Simulcast : public ::testing::Test {
ASSERT_GE(expected_video_streams, 0);
ASSERT_LE(expected_video_streams, kNumberOfSimulcastStreams);
if (expected_video_streams >= 1) {
- EXPECT_CALL(encoder_callback_, Encoded(
- AllOf(Field(&EncodedImage::_frameType, frame_type),
- Field(&EncodedImage::_encodedWidth, kDefaultWidth / 4),
- Field(&EncodedImage::_encodedHeight, kDefaultHeight / 4)), _, _)
- )
+ EXPECT_CALL(
+ encoder_callback_,
+ Encoded(
+ AllOf(Field(&EncodedImage::_frameType, frame_type),
+ Field(&EncodedImage::_encodedWidth, kDefaultWidth / 4),
+ Field(&EncodedImage::_encodedHeight, kDefaultHeight / 4)),
+ _, _))
.Times(1)
.WillRepeatedly(Return(0));
}
if (expected_video_streams >= 2) {
- EXPECT_CALL(encoder_callback_, Encoded(
- AllOf(Field(&EncodedImage::_frameType, frame_type),
- Field(&EncodedImage::_encodedWidth, kDefaultWidth / 2),
- Field(&EncodedImage::_encodedHeight, kDefaultHeight / 2)), _, _)
- )
+ EXPECT_CALL(
+ encoder_callback_,
+ Encoded(
+ AllOf(Field(&EncodedImage::_frameType, frame_type),
+ Field(&EncodedImage::_encodedWidth, kDefaultWidth / 2),
+ Field(&EncodedImage::_encodedHeight, kDefaultHeight / 2)),
+ _, _))
.Times(1)
.WillRepeatedly(Return(0));
}
if (expected_video_streams >= 3) {
- EXPECT_CALL(encoder_callback_, Encoded(
- AllOf(Field(&EncodedImage::_frameType, frame_type),
- Field(&EncodedImage::_encodedWidth, kDefaultWidth),
- Field(&EncodedImage::_encodedHeight, kDefaultHeight)), _, _))
+ EXPECT_CALL(
+ encoder_callback_,
+ Encoded(AllOf(Field(&EncodedImage::_frameType, frame_type),
+ Field(&EncodedImage::_encodedWidth, kDefaultWidth),
+ Field(&EncodedImage::_encodedHeight, kDefaultHeight)),
+ _, _))
.Times(1)
.WillRepeatedly(Return(0));
}
@@ -477,8 +468,8 @@ class TestVp8Simulcast : public ::testing::Test {
void TestPaddingOneStreamTwoMaxedOut() {
// We are just below limit of sending third stream, so we should get
// first stream's rate maxed out at |targetBitrate|, second at |maxBitrate|.
- encoder_->SetRates(kTargetBitrates[0] + kTargetBitrates[1] +
- kMinBitrates[2] - 1, 30);
+ encoder_->SetRates(
+ kTargetBitrates[0] + kTargetBitrates[1] + kMinBitrates[2] - 1, 30);
std::vector<FrameType> frame_types(kNumberOfSimulcastStreams,
kVideoFrameDelta);
ExpectStreams(kVideoFrameKey, 2);
@@ -491,8 +482,8 @@ class TestVp8Simulcast : public ::testing::Test {
void TestSendAllStreams() {
// We have just enough to send all streams.
- encoder_->SetRates(kTargetBitrates[0] + kTargetBitrates[1] +
- kMinBitrates[2], 30);
+ encoder_->SetRates(
+ kTargetBitrates[0] + kTargetBitrates[1] + kMinBitrates[2], 30);
std::vector<FrameType> frame_types(kNumberOfSimulcastStreams,
kVideoFrameDelta);
ExpectStreams(kVideoFrameKey, 3);
@@ -505,8 +496,7 @@ class TestVp8Simulcast : public ::testing::Test {
void TestDisablingStreams() {
// We should get three media streams.
- encoder_->SetRates(kMaxBitrates[0] + kMaxBitrates[1] +
- kMaxBitrates[2], 30);
+ encoder_->SetRates(kMaxBitrates[0] + kMaxBitrates[1] + kMaxBitrates[2], 30);
std::vector<FrameType> frame_types(kNumberOfSimulcastStreams,
kVideoFrameDelta);
ExpectStreams(kVideoFrameKey, 3);
@@ -517,8 +507,8 @@ class TestVp8Simulcast : public ::testing::Test {
EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, &frame_types));
// We should only get two streams and padding for one.
- encoder_->SetRates(kTargetBitrates[0] + kTargetBitrates[1] +
- kMinBitrates[2] / 2, 30);
+ encoder_->SetRates(
+ kTargetBitrates[0] + kTargetBitrates[1] + kMinBitrates[2] / 2, 30);
ExpectStreams(kVideoFrameDelta, 2);
input_frame_.set_timestamp(input_frame_.timestamp() + 3000);
EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, &frame_types));
@@ -537,16 +527,16 @@ class TestVp8Simulcast : public ::testing::Test {
EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, &frame_types));
// We should only get two streams and padding for one.
- encoder_->SetRates(kTargetBitrates[0] + kTargetBitrates[1] +
- kMinBitrates[2] / 2, 30);
+ encoder_->SetRates(
+ kTargetBitrates[0] + kTargetBitrates[1] + kMinBitrates[2] / 2, 30);
// We get a key frame because a new stream is being enabled.
ExpectStreams(kVideoFrameKey, 2);
input_frame_.set_timestamp(input_frame_.timestamp() + 3000);
EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, &frame_types));
// We should get all three streams.
- encoder_->SetRates(kTargetBitrates[0] + kTargetBitrates[1] +
- kTargetBitrates[2], 30);
+ encoder_->SetRates(
+ kTargetBitrates[0] + kTargetBitrates[1] + kTargetBitrates[2], 30);
// We get a key frame because a new stream is being enabled.
ExpectStreams(kVideoFrameKey, 3);
input_frame_.set_timestamp(input_frame_.timestamp() + 3000);
@@ -571,20 +561,20 @@ class TestVp8Simulcast : public ::testing::Test {
input_frame_.CreateEmptyFrame(settings_.width, settings_.height,
settings_.width, half_width, half_width);
memset(input_frame_.buffer(kYPlane), 0,
- input_frame_.allocated_size(kYPlane));
+ input_frame_.allocated_size(kYPlane));
memset(input_frame_.buffer(kUPlane), 0,
- input_frame_.allocated_size(kUPlane));
+ input_frame_.allocated_size(kUPlane));
memset(input_frame_.buffer(kVPlane), 0,
- input_frame_.allocated_size(kVPlane));
+ input_frame_.allocated_size(kVPlane));
// The for loop above did not set the bitrate of the highest layer.
- settings_.simulcastStream[settings_.numberOfSimulcastStreams - 1].
- maxBitrate = 0;
+ settings_.simulcastStream[settings_.numberOfSimulcastStreams - 1]
+ .maxBitrate = 0;
// The highest layer has to correspond to the non-simulcast resolution.
- settings_.simulcastStream[settings_.numberOfSimulcastStreams - 1].
- width = settings_.width;
- settings_.simulcastStream[settings_.numberOfSimulcastStreams - 1].
- height = settings_.height;
+ settings_.simulcastStream[settings_.numberOfSimulcastStreams - 1].width =
+ settings_.width;
+ settings_.simulcastStream[settings_.numberOfSimulcastStreams - 1].height =
+ settings_.height;
EXPECT_EQ(0, encoder_->InitEncode(&settings_, 1, 1200));
// Encode one frame and verify.
@@ -612,21 +602,17 @@ class TestVp8Simulcast : public ::testing::Test {
input_frame_.CreateEmptyFrame(settings_.width, settings_.height,
settings_.width, half_width, half_width);
memset(input_frame_.buffer(kYPlane), 0,
- input_frame_.allocated_size(kYPlane));
+ input_frame_.allocated_size(kYPlane));
memset(input_frame_.buffer(kUPlane), 0,
- input_frame_.allocated_size(kUPlane));
+ input_frame_.allocated_size(kUPlane));
memset(input_frame_.buffer(kVPlane), 0,
- input_frame_.allocated_size(kVPlane));
+ input_frame_.allocated_size(kVPlane));
EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, &frame_types));
}
- void TestSwitchingToOneStream() {
- SwitchingToOneStream(1024, 768);
- }
+ void TestSwitchingToOneStream() { SwitchingToOneStream(1024, 768); }
- void TestSwitchingToOneOddStream() {
- SwitchingToOneStream(1023, 769);
- }
+ void TestSwitchingToOneOddStream() { SwitchingToOneStream(1023, 769); }
void TestRPSIEncoder() {
Vp8TestEncodedImageCallback encoder_callback;
@@ -777,67 +763,55 @@ class TestVp8Simulcast : public ::testing::Test {
encoder_->RegisterEncodeCompleteCallback(&encoder_callback);
encoder_->SetRates(kMaxBitrates[2], 30); // To get all three streams.
- int expected_temporal_idx[3] = { -1, -1, -1};
+ int expected_temporal_idx[3] = {-1, -1, -1};
bool expected_layer_sync[3] = {false, false, false};
// First frame: #0.
EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, NULL));
SetExpectedValues3<int>(0, 0, 0, expected_temporal_idx);
SetExpectedValues3<bool>(true, true, true, expected_layer_sync);
- VerifyTemporalIdxAndSyncForAllSpatialLayers(&encoder_callback,
- expected_temporal_idx,
- expected_layer_sync,
- 3);
+ VerifyTemporalIdxAndSyncForAllSpatialLayers(
+ &encoder_callback, expected_temporal_idx, expected_layer_sync, 3);
// Next frame: #1.
input_frame_.set_timestamp(input_frame_.timestamp() + 3000);
EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, NULL));
SetExpectedValues3<int>(2, 2, 2, expected_temporal_idx);
SetExpectedValues3<bool>(true, true, true, expected_layer_sync);
- VerifyTemporalIdxAndSyncForAllSpatialLayers(&encoder_callback,
- expected_temporal_idx,
- expected_layer_sync,
- 3);
+ VerifyTemporalIdxAndSyncForAllSpatialLayers(
+ &encoder_callback, expected_temporal_idx, expected_layer_sync, 3);
// Next frame: #2.
input_frame_.set_timestamp(input_frame_.timestamp() + 3000);
EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, NULL));
SetExpectedValues3<int>(1, 1, 1, expected_temporal_idx);
SetExpectedValues3<bool>(true, true, true, expected_layer_sync);
- VerifyTemporalIdxAndSyncForAllSpatialLayers(&encoder_callback,
- expected_temporal_idx,
- expected_layer_sync,
- 3);
+ VerifyTemporalIdxAndSyncForAllSpatialLayers(
+ &encoder_callback, expected_temporal_idx, expected_layer_sync, 3);
// Next frame: #3.
input_frame_.set_timestamp(input_frame_.timestamp() + 3000);
EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, NULL));
SetExpectedValues3<int>(2, 2, 2, expected_temporal_idx);
SetExpectedValues3<bool>(false, false, false, expected_layer_sync);
- VerifyTemporalIdxAndSyncForAllSpatialLayers(&encoder_callback,
- expected_temporal_idx,
- expected_layer_sync,
- 3);
+ VerifyTemporalIdxAndSyncForAllSpatialLayers(
+ &encoder_callback, expected_temporal_idx, expected_layer_sync, 3);
// Next frame: #4.
input_frame_.set_timestamp(input_frame_.timestamp() + 3000);
EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, NULL));
SetExpectedValues3<int>(0, 0, 0, expected_temporal_idx);
SetExpectedValues3<bool>(false, false, false, expected_layer_sync);
- VerifyTemporalIdxAndSyncForAllSpatialLayers(&encoder_callback,
- expected_temporal_idx,
- expected_layer_sync,
- 3);
+ VerifyTemporalIdxAndSyncForAllSpatialLayers(
+ &encoder_callback, expected_temporal_idx, expected_layer_sync, 3);
// Next frame: #5.
input_frame_.set_timestamp(input_frame_.timestamp() + 3000);
EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, NULL));
SetExpectedValues3<int>(2, 2, 2, expected_temporal_idx);
SetExpectedValues3<bool>(false, false, false, expected_layer_sync);
- VerifyTemporalIdxAndSyncForAllSpatialLayers(&encoder_callback,
- expected_temporal_idx,
- expected_layer_sync,
- 3);
+ VerifyTemporalIdxAndSyncForAllSpatialLayers(
+ &encoder_callback, expected_temporal_idx, expected_layer_sync, 3);
}
// Test the layer pattern and sync flag for various spatial-temporal patterns.
@@ -858,67 +832,55 @@ class TestVp8Simulcast : public ::testing::Test {
encoder_->RegisterEncodeCompleteCallback(&encoder_callback);
encoder_->SetRates(kMaxBitrates[2], 30); // To get all three streams.
- int expected_temporal_idx[3] = { -1, -1, -1};
+ int expected_temporal_idx[3] = {-1, -1, -1};
bool expected_layer_sync[3] = {false, false, false};
// First frame: #0.
EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, NULL));
SetExpectedValues3<int>(0, 0, 255, expected_temporal_idx);
SetExpectedValues3<bool>(true, true, false, expected_layer_sync);
- VerifyTemporalIdxAndSyncForAllSpatialLayers(&encoder_callback,
- expected_temporal_idx,
- expected_layer_sync,
- 3);
+ VerifyTemporalIdxAndSyncForAllSpatialLayers(
+ &encoder_callback, expected_temporal_idx, expected_layer_sync, 3);
// Next frame: #1.
input_frame_.set_timestamp(input_frame_.timestamp() + 3000);
EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, NULL));
SetExpectedValues3<int>(2, 1, 255, expected_temporal_idx);
SetExpectedValues3<bool>(true, true, false, expected_layer_sync);
- VerifyTemporalIdxAndSyncForAllSpatialLayers(&encoder_callback,
- expected_temporal_idx,
- expected_layer_sync,
- 3);
+ VerifyTemporalIdxAndSyncForAllSpatialLayers(
+ &encoder_callback, expected_temporal_idx, expected_layer_sync, 3);
// Next frame: #2.
input_frame_.set_timestamp(input_frame_.timestamp() + 3000);
EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, NULL));
SetExpectedValues3<int>(1, 0, 255, expected_temporal_idx);
SetExpectedValues3<bool>(true, false, false, expected_layer_sync);
- VerifyTemporalIdxAndSyncForAllSpatialLayers(&encoder_callback,
- expected_temporal_idx,
- expected_layer_sync,
- 3);
+ VerifyTemporalIdxAndSyncForAllSpatialLayers(
+ &encoder_callback, expected_temporal_idx, expected_layer_sync, 3);
// Next frame: #3.
input_frame_.set_timestamp(input_frame_.timestamp() + 3000);
EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, NULL));
SetExpectedValues3<int>(2, 1, 255, expected_temporal_idx);
SetExpectedValues3<bool>(false, false, false, expected_layer_sync);
- VerifyTemporalIdxAndSyncForAllSpatialLayers(&encoder_callback,
- expected_temporal_idx,
- expected_layer_sync,
- 3);
+ VerifyTemporalIdxAndSyncForAllSpatialLayers(
+ &encoder_callback, expected_temporal_idx, expected_layer_sync, 3);
// Next frame: #4.
input_frame_.set_timestamp(input_frame_.timestamp() + 3000);
EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, NULL));
SetExpectedValues3<int>(0, 0, 255, expected_temporal_idx);
SetExpectedValues3<bool>(false, false, false, expected_layer_sync);
- VerifyTemporalIdxAndSyncForAllSpatialLayers(&encoder_callback,
- expected_temporal_idx,
- expected_layer_sync,
- 3);
+ VerifyTemporalIdxAndSyncForAllSpatialLayers(
+ &encoder_callback, expected_temporal_idx, expected_layer_sync, 3);
// Next frame: #5.
input_frame_.set_timestamp(input_frame_.timestamp() + 3000);
EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, NULL));
SetExpectedValues3<int>(2, 1, 255, expected_temporal_idx);
SetExpectedValues3<bool>(false, false, false, expected_layer_sync);
- VerifyTemporalIdxAndSyncForAllSpatialLayers(&encoder_callback,
- expected_temporal_idx,
- expected_layer_sync,
- 3);
+ VerifyTemporalIdxAndSyncForAllSpatialLayers(
+ &encoder_callback, expected_temporal_idx, expected_layer_sync, 3);
}
void TestStrideEncodeDecode() {
@@ -932,8 +894,8 @@ class TestVp8Simulcast : public ::testing::Test {
// 1. stride > width 2. stride_y != stride_uv/2
int stride_y = kDefaultWidth + 20;
int stride_uv = ((kDefaultWidth + 1) / 2) + 5;
- input_frame_.CreateEmptyFrame(kDefaultWidth, kDefaultHeight,
- stride_y, stride_uv, stride_uv);
+ input_frame_.CreateEmptyFrame(kDefaultWidth, kDefaultHeight, stride_y,
+ stride_uv, stride_uv);
// Set color.
int plane_offset[kNumOfPlanes];
plane_offset[kYPlane] = kColorY;
@@ -963,10 +925,9 @@ class TestVp8Simulcast : public ::testing::Test {
void TestSkipEncodingUnusedStreams() {
SkipEncodingUnusedStreamsTest test;
std::vector<unsigned int> configured_bitrate =
- test.RunTest(encoder_.get(),
- &settings_,
- 1); // Target bit rate 1, to force all streams but the
- // base one to be exceeding bandwidth constraints.
+ test.RunTest(encoder_.get(), &settings_,
+ 1); // Target bit rate 1, to force all streams but the
+ // base one to be exceeding bandwidth constraints.
EXPECT_EQ(static_cast<size_t>(kNumberOfSimulcastStreams),
configured_bitrate.size());
@@ -975,8 +936,7 @@ class TestVp8Simulcast : public ::testing::Test {
int stream = 0;
for (std::vector<unsigned int>::const_iterator it =
configured_bitrate.begin();
- it != configured_bitrate.end();
- ++it) {
+ it != configured_bitrate.end(); ++it) {
if (stream == 0) {
EXPECT_EQ(min_bitrate, *it);
} else {
diff --git a/webrtc/modules/video_coding/codecs/vp8/temporal_layers.h b/webrtc/modules/video_coding/codecs/vp8/temporal_layers.h
index 7607210d5c..47112c64aa 100644
--- a/webrtc/modules/video_coding/codecs/vp8/temporal_layers.h
+++ b/webrtc/modules/video_coding/codecs/vp8/temporal_layers.h
@@ -14,7 +14,8 @@
#include "vpx/vpx_encoder.h"
-#include "webrtc/common_video/interface/video_image.h"
+#include "webrtc/common.h"
+#include "webrtc/common_video/include/video_image.h"
#include "webrtc/typedefs.h"
namespace webrtc {
@@ -30,6 +31,8 @@ class TemporalLayers {
virtual ~Factory() {}
virtual TemporalLayers* Create(int temporal_layers,
uint8_t initial_tl0_pic_idx) const;
+ static const ConfigOptionID identifier =
+ ConfigOptionID::kTemporalLayersFactory;
};
virtual ~TemporalLayers() {}
diff --git a/webrtc/modules/video_coding/codecs/vp8/test/vp8_impl_unittest.cc b/webrtc/modules/video_coding/codecs/vp8/test/vp8_impl_unittest.cc
index 5ec674f16a..c3d77da063 100644
--- a/webrtc/modules/video_coding/codecs/vp8/test/vp8_impl_unittest.cc
+++ b/webrtc/modules/video_coding/codecs/vp8/test/vp8_impl_unittest.cc
@@ -11,12 +11,12 @@
#include <stdio.h>
#include "testing/gtest/include/gtest/gtest.h"
+#include "webrtc/base/checks.h"
#include "webrtc/base/scoped_ptr.h"
#include "webrtc/common_video/libyuv/include/webrtc_libyuv.h"
#include "webrtc/modules/video_coding/codecs/vp8/include/vp8.h"
#include "webrtc/system_wrappers/include/tick_util.h"
#include "webrtc/test/testsupport/fileutils.h"
-#include "webrtc/test/testsupport/gtest_disable.h"
namespace webrtc {
@@ -78,7 +78,11 @@ class Vp8UnitTestDecodeCompleteCallback : public webrtc::DecodedImageCallback {
public:
explicit Vp8UnitTestDecodeCompleteCallback(VideoFrame* frame)
: decoded_frame_(frame), decode_complete(false) {}
- int Decoded(webrtc::VideoFrame& frame);
+ int32_t Decoded(VideoFrame& frame) override;
+ int32_t Decoded(VideoFrame& frame, int64_t decode_time_ms) override {
+ RTC_NOTREACHED();
+ return -1;
+ }
bool DecodeComplete();
private:
@@ -216,7 +220,12 @@ TEST_F(TestVp8Impl, EncoderParameterTest) {
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, decoder_->InitDecode(&codec_inst_, 1));
}
-TEST_F(TestVp8Impl, DISABLED_ON_ANDROID(AlignedStrideEncodeDecode)) {
+#if defined(WEBRTC_ANDROID)
+#define MAYBE_AlignedStrideEncodeDecode DISABLED_AlignedStrideEncodeDecode
+#else
+#define MAYBE_AlignedStrideEncodeDecode AlignedStrideEncodeDecode
+#endif
+TEST_F(TestVp8Impl, MAYBE_AlignedStrideEncodeDecode) {
SetUpEncodeDecode();
encoder_->Encode(input_frame_, NULL, NULL);
EXPECT_GT(WaitForEncodedFrame(), 0u);
@@ -232,7 +241,12 @@ TEST_F(TestVp8Impl, DISABLED_ON_ANDROID(AlignedStrideEncodeDecode)) {
EXPECT_EQ(kTestNtpTimeMs, decoded_frame_.ntp_time_ms());
}
-TEST_F(TestVp8Impl, DISABLED_ON_ANDROID(DecodeWithACompleteKeyFrame)) {
+#if defined(WEBRTC_ANDROID)
+#define MAYBE_DecodeWithACompleteKeyFrame DISABLED_DecodeWithACompleteKeyFrame
+#else
+#define MAYBE_DecodeWithACompleteKeyFrame DecodeWithACompleteKeyFrame
+#endif
+TEST_F(TestVp8Impl, MAYBE_DecodeWithACompleteKeyFrame) {
SetUpEncodeDecode();
encoder_->Encode(input_frame_, NULL, NULL);
EXPECT_GT(WaitForEncodedFrame(), 0u);
diff --git a/webrtc/modules/video_coding/codecs/vp8/vp8_factory.h b/webrtc/modules/video_coding/codecs/vp8/vp8_factory.h
index 84745ea5a1..52f8aa30b8 100644
--- a/webrtc/modules/video_coding/codecs/vp8/vp8_factory.h
+++ b/webrtc/modules/video_coding/codecs/vp8/vp8_factory.h
@@ -32,4 +32,3 @@ class VP8EncoderFactoryConfig {
} // namespace webrtc
#endif // WEBRTC_MODULES_VIDEO_CODING_CODECS_VP8_VP8_FACTORY_H_
-
diff --git a/webrtc/modules/video_coding/codecs/vp8/vp8_impl.cc b/webrtc/modules/video_coding/codecs/vp8/vp8_impl.cc
index 029ccd1f27..5a04f6a43d 100644
--- a/webrtc/modules/video_coding/codecs/vp8/vp8_impl.cc
+++ b/webrtc/modules/video_coding/codecs/vp8/vp8_impl.cc
@@ -16,7 +16,7 @@
#include <algorithm>
// NOTE(ajm): Path provided by gyp.
-#include "libyuv/scale.h" // NOLINT
+#include "libyuv/scale.h" // NOLINT
#include "libyuv/convert.h" // NOLINT
#include "webrtc/base/checks.h"
@@ -24,8 +24,8 @@
#include "webrtc/common.h"
#include "webrtc/common_types.h"
#include "webrtc/common_video/libyuv/include/webrtc_libyuv.h"
-#include "webrtc/modules/interface/module_common_types.h"
-#include "webrtc/modules/video_coding/codecs/interface/video_codec_interface.h"
+#include "webrtc/modules/include/module_common_types.h"
+#include "webrtc/modules/video_coding/include/video_codec_interface.h"
#include "webrtc/modules/video_coding/codecs/vp8/include/vp8_common_types.h"
#include "webrtc/modules/video_coding/codecs/vp8/screenshare_layers.h"
#include "webrtc/modules/video_coding/codecs/vp8/temporal_layers.h"
@@ -68,10 +68,9 @@ std::vector<int> GetStreamBitratesKbps(const VideoCodec& codec,
std::vector<int> bitrates_kbps(codec.numberOfSimulcastStreams);
// Allocate min -> target bitrates as long as we have bitrate to spend.
size_t last_active_stream = 0;
- for (size_t i = 0;
- i < static_cast<size_t>(codec.numberOfSimulcastStreams) &&
- bitrate_to_allocate_kbps >=
- static_cast<int>(codec.simulcastStream[i].minBitrate);
+ for (size_t i = 0; i < static_cast<size_t>(codec.numberOfSimulcastStreams) &&
+ bitrate_to_allocate_kbps >=
+ static_cast<int>(codec.simulcastStream[i].minBitrate);
++i) {
last_active_stream = i;
int allocated_bitrate_kbps =
@@ -132,7 +131,7 @@ bool ValidSimulcastResolutions(const VideoCodec& codec, int num_streams) {
return true;
}
-int NumStreamsDisabled(std::vector<bool>& streams) {
+int NumStreamsDisabled(const std::vector<bool>& streams) {
int num_disabled = 0;
for (bool stream : streams) {
if (!stream)
@@ -183,7 +182,7 @@ int VP8EncoderImpl::Release() {
while (!encoded_images_.empty()) {
EncodedImage& image = encoded_images_.back();
- delete [] image._buffer;
+ delete[] image._buffer;
encoded_images_.pop_back();
}
while (!encoders_.empty()) {
@@ -289,10 +288,8 @@ int VP8EncoderImpl::SetRates(uint32_t new_bitrate_kbit,
target_bitrate = tl0_bitrate;
}
configurations_[i].rc_target_bitrate = target_bitrate;
- temporal_layers_[stream_idx]->ConfigureBitrates(target_bitrate,
- max_bitrate,
- framerate,
- &configurations_[i]);
+ temporal_layers_[stream_idx]->ConfigureBitrates(
+ target_bitrate, max_bitrate, framerate, &configurations_[i]);
if (vpx_codec_enc_config_set(&encoders_[i], &configurations_[i])) {
return WEBRTC_VIDEO_CODEC_ERROR;
}
@@ -301,6 +298,10 @@ int VP8EncoderImpl::SetRates(uint32_t new_bitrate_kbit,
return WEBRTC_VIDEO_CODEC_OK;
}
+const char* VP8EncoderImpl::ImplementationName() const {
+ return "libvpx";
+}
+
void VP8EncoderImpl::SetStreamState(bool send_stream,
int stream_idx) {
if (send_stream && !send_stream_[stream_idx]) {
@@ -311,8 +312,8 @@ void VP8EncoderImpl::SetStreamState(bool send_stream,
}
void VP8EncoderImpl::SetupTemporalLayers(int num_streams,
- int num_temporal_layers,
- const VideoCodec& codec) {
+ int num_temporal_layers,
+ const VideoCodec& codec) {
const Config default_options;
const TemporalLayers::Factory& tl_factory =
(codec.extra_options ? codec.extra_options : &default_options)
@@ -330,15 +331,16 @@ void VP8EncoderImpl::SetupTemporalLayers(int num_streams,
for (int i = 0; i < num_streams; ++i) {
// TODO(andresp): crash if layers is invalid.
int layers = codec.simulcastStream[i].numberOfTemporalLayers;
- if (layers < 1) layers = 1;
+ if (layers < 1)
+ layers = 1;
temporal_layers_.push_back(tl_factory.Create(layers, rand()));
}
}
}
int VP8EncoderImpl::InitEncode(const VideoCodec* inst,
- int number_of_cores,
- size_t /*maxPayloadSize */) {
+ int number_of_cores,
+ size_t /*maxPayloadSize */) {
if (inst == NULL) {
return WEBRTC_VIDEO_CODEC_ERR_PARAMETER;
}
@@ -375,12 +377,13 @@ int VP8EncoderImpl::InitEncode(const VideoCodec* inst,
return WEBRTC_VIDEO_CODEC_ERR_PARAMETER;
}
- int num_temporal_layers = doing_simulcast ?
- inst->simulcastStream[0].numberOfTemporalLayers :
- inst->codecSpecific.VP8.numberOfTemporalLayers;
+ int num_temporal_layers =
+ doing_simulcast ? inst->simulcastStream[0].numberOfTemporalLayers
+ : inst->codecSpecific.VP8.numberOfTemporalLayers;
// TODO(andresp): crash if num temporal layers is bananas.
- if (num_temporal_layers < 1) num_temporal_layers = 1;
+ if (num_temporal_layers < 1)
+ num_temporal_layers = 1;
SetupTemporalLayers(number_of_streams, num_temporal_layers, *inst);
feedback_mode_ = inst->codecSpecific.VP8.feedbackModeOn;
@@ -410,7 +413,7 @@ int VP8EncoderImpl::InitEncode(const VideoCodec* inst,
int idx = number_of_streams - 1;
for (int i = 0; i < (number_of_streams - 1); ++i, --idx) {
int gcd = GCD(inst->simulcastStream[idx].width,
- inst->simulcastStream[idx-1].width);
+ inst->simulcastStream[idx - 1].width);
downsampling_factors_[i].num = inst->simulcastStream[idx].width / gcd;
downsampling_factors_[i].den = inst->simulcastStream[idx - 1].width / gcd;
send_stream_[i] = false;
@@ -422,20 +425,20 @@ int VP8EncoderImpl::InitEncode(const VideoCodec* inst,
}
for (int i = 0; i < number_of_streams; ++i) {
// Random start, 16 bits is enough.
- picture_id_[i] = static_cast<uint16_t>(rand()) & 0x7FFF;
+ picture_id_[i] = static_cast<uint16_t>(rand()) & 0x7FFF; // NOLINT
last_key_frame_picture_id_[i] = -1;
// allocate memory for encoded image
if (encoded_images_[i]._buffer != NULL) {
- delete [] encoded_images_[i]._buffer;
+ delete[] encoded_images_[i]._buffer;
}
- encoded_images_[i]._size = CalcBufferSize(kI420,
- codec_.width, codec_.height);
+ encoded_images_[i]._size =
+ CalcBufferSize(kI420, codec_.width, codec_.height);
encoded_images_[i]._buffer = new uint8_t[encoded_images_[i]._size];
encoded_images_[i]._completeFrame = true;
}
// populate encoder configuration with default values
- if (vpx_codec_enc_config_default(vpx_codec_vp8_cx(),
- &configurations_[0], 0)) {
+ if (vpx_codec_enc_config_default(vpx_codec_vp8_cx(), &configurations_[0],
+ 0)) {
return WEBRTC_VIDEO_CODEC_ERROR;
}
// setting the time base of the codec
@@ -459,8 +462,8 @@ int VP8EncoderImpl::InitEncode(const VideoCodec* inst,
break;
case kResilientFrames:
#ifdef INDEPENDENT_PARTITIONS
- configurations_[0]-g_error_resilient = VPX_ERROR_RESILIENT_DEFAULT |
- VPX_ERROR_RESILIENT_PARTITIONS;
+ configurations_[0] - g_error_resilient =
+ VPX_ERROR_RESILIENT_DEFAULT | VPX_ERROR_RESILIENT_PARTITIONS;
break;
#else
return WEBRTC_VIDEO_CODEC_ERR_PARAMETER; // Not supported
@@ -536,20 +539,18 @@ int VP8EncoderImpl::InitEncode(const VideoCodec* inst,
// Determine number of threads based on the image size and #cores.
// TODO(fbarchard): Consider number of Simulcast layers.
- configurations_[0].g_threads = NumberOfThreads(configurations_[0].g_w,
- configurations_[0].g_h,
- number_of_cores);
+ configurations_[0].g_threads = NumberOfThreads(
+ configurations_[0].g_w, configurations_[0].g_h, number_of_cores);
// Creating a wrapper to the image - setting image data to NULL.
// Actual pointer will be set in encode. Setting align to 1, as it
// is meaningless (no memory allocation is done here).
- vpx_img_wrap(&raw_images_[0], VPX_IMG_FMT_I420, inst->width, inst->height,
- 1, NULL);
+ vpx_img_wrap(&raw_images_[0], VPX_IMG_FMT_I420, inst->width, inst->height, 1,
+ NULL);
if (encoders_.size() == 1) {
configurations_[0].rc_target_bitrate = inst->startBitrate;
- temporal_layers_[0]->ConfigureBitrates(inst->startBitrate,
- inst->maxBitrate,
+ temporal_layers_[0]->ConfigureBitrates(inst->startBitrate, inst->maxBitrate,
inst->maxFramerate,
&configurations_[0]);
} else {
@@ -641,20 +642,15 @@ int VP8EncoderImpl::InitAndSetControlSettings() {
flags |= VPX_CODEC_USE_OUTPUT_PARTITION;
if (encoders_.size() > 1) {
- int error = vpx_codec_enc_init_multi(&encoders_[0],
- vpx_codec_vp8_cx(),
- &configurations_[0],
- encoders_.size(),
- flags,
- &downsampling_factors_[0]);
+ int error = vpx_codec_enc_init_multi(&encoders_[0], vpx_codec_vp8_cx(),
+ &configurations_[0], encoders_.size(),
+ flags, &downsampling_factors_[0]);
if (error) {
return WEBRTC_VIDEO_CODEC_UNINITIALIZED;
}
} else {
- if (vpx_codec_enc_init(&encoders_[0],
- vpx_codec_vp8_cx(),
- &configurations_[0],
- flags)) {
+ if (vpx_codec_enc_init(&encoders_[0], vpx_codec_vp8_cx(),
+ &configurations_[0], flags)) {
return WEBRTC_VIDEO_CODEC_UNINITIALIZED;
}
}
@@ -671,13 +667,13 @@ int VP8EncoderImpl::InitAndSetControlSettings() {
#else
denoiser_state = kDenoiserOnAdaptive;
#endif
- vpx_codec_control(&encoders_[0], VP8E_SET_NOISE_SENSITIVITY,
- codec_.codecSpecific.VP8.denoisingOn ?
- denoiser_state : kDenoiserOff);
+ vpx_codec_control(
+ &encoders_[0], VP8E_SET_NOISE_SENSITIVITY,
+ codec_.codecSpecific.VP8.denoisingOn ? denoiser_state : kDenoiserOff);
if (encoders_.size() > 2) {
- vpx_codec_control(&encoders_[1], VP8E_SET_NOISE_SENSITIVITY,
- codec_.codecSpecific.VP8.denoisingOn ?
- denoiser_state : kDenoiserOff);
+ vpx_codec_control(
+ &encoders_[1], VP8E_SET_NOISE_SENSITIVITY,
+ codec_.codecSpecific.VP8.denoisingOn ? denoiser_state : kDenoiserOff);
}
for (size_t i = 0; i < encoders_.size(); ++i) {
// Allow more screen content to be detected as static.
@@ -710,14 +706,12 @@ uint32_t VP8EncoderImpl::MaxIntraTarget(uint32_t optimalBuffersize) {
// Don't go below 3 times the per frame bandwidth.
const uint32_t minIntraTh = 300;
- return (targetPct < minIntraTh) ? minIntraTh: targetPct;
+ return (targetPct < minIntraTh) ? minIntraTh : targetPct;
}
int VP8EncoderImpl::Encode(const VideoFrame& frame,
const CodecSpecificInfo* codec_specific_info,
const std::vector<FrameType>* frame_types) {
- TRACE_EVENT1("webrtc", "VP8::Encode", "timestamp", frame.timestamp());
-
if (!inited_)
return WEBRTC_VIDEO_CODEC_UNINITIALIZED;
if (frame.IsZeroSize())
@@ -731,7 +725,7 @@ int VP8EncoderImpl::Encode(const VideoFrame& frame,
quality_scaler_enabled_ ? quality_scaler_.GetScaledFrame(frame) : frame;
if (quality_scaler_enabled_ && (input_image.width() != codec_.width ||
- input_image.height() != codec_.height)) {
+ input_image.height() != codec_.height)) {
int ret = UpdateCodecFrameSize(input_image);
if (ret < 0)
return ret;
@@ -747,11 +741,11 @@ int VP8EncoderImpl::Encode(const VideoFrame& frame,
// Image in vpx_image_t format.
// Input image is const. VP8's raw image is not defined as const.
raw_images_[0].planes[VPX_PLANE_Y] =
- const_cast<uint8_t*>(input_image.buffer(kYPlane));
+ const_cast<uint8_t*>(input_image.buffer(kYPlane));
raw_images_[0].planes[VPX_PLANE_U] =
- const_cast<uint8_t*>(input_image.buffer(kUPlane));
+ const_cast<uint8_t*>(input_image.buffer(kUPlane));
raw_images_[0].planes[VPX_PLANE_V] =
- const_cast<uint8_t*>(input_image.buffer(kVPlane));
+ const_cast<uint8_t*>(input_image.buffer(kVPlane));
raw_images_[0].stride[VPX_PLANE_Y] = input_image.stride(kYPlane);
raw_images_[0].stride[VPX_PLANE_U] = input_image.stride(kUPlane);
@@ -760,17 +754,17 @@ int VP8EncoderImpl::Encode(const VideoFrame& frame,
for (size_t i = 1; i < encoders_.size(); ++i) {
// Scale the image down a number of times by downsampling factor
libyuv::I420Scale(
- raw_images_[i-1].planes[VPX_PLANE_Y],
- raw_images_[i-1].stride[VPX_PLANE_Y],
- raw_images_[i-1].planes[VPX_PLANE_U],
- raw_images_[i-1].stride[VPX_PLANE_U],
- raw_images_[i-1].planes[VPX_PLANE_V],
- raw_images_[i-1].stride[VPX_PLANE_V],
- raw_images_[i-1].d_w, raw_images_[i-1].d_h,
- raw_images_[i].planes[VPX_PLANE_Y], raw_images_[i].stride[VPX_PLANE_Y],
- raw_images_[i].planes[VPX_PLANE_U], raw_images_[i].stride[VPX_PLANE_U],
- raw_images_[i].planes[VPX_PLANE_V], raw_images_[i].stride[VPX_PLANE_V],
- raw_images_[i].d_w, raw_images_[i].d_h, libyuv::kFilterBilinear);
+ raw_images_[i - 1].planes[VPX_PLANE_Y],
+ raw_images_[i - 1].stride[VPX_PLANE_Y],
+ raw_images_[i - 1].planes[VPX_PLANE_U],
+ raw_images_[i - 1].stride[VPX_PLANE_U],
+ raw_images_[i - 1].planes[VPX_PLANE_V],
+ raw_images_[i - 1].stride[VPX_PLANE_V], raw_images_[i - 1].d_w,
+ raw_images_[i - 1].d_h, raw_images_[i].planes[VPX_PLANE_Y],
+ raw_images_[i].stride[VPX_PLANE_Y], raw_images_[i].planes[VPX_PLANE_U],
+ raw_images_[i].stride[VPX_PLANE_U], raw_images_[i].planes[VPX_PLANE_V],
+ raw_images_[i].stride[VPX_PLANE_V], raw_images_[i].d_w,
+ raw_images_[i].d_h, libyuv::kFilterBilinear);
}
vpx_enc_frame_flags_t flags[kMaxSimulcastStreams];
for (size_t i = 0; i < encoders_.size(); ++i) {
@@ -805,8 +799,8 @@ int VP8EncoderImpl::Encode(const VideoFrame& frame,
if (send_key_frame) {
// Adapt the size of the key frame when in screenshare with 1 temporal
// layer.
- if (encoders_.size() == 1 && codec_.mode == kScreensharing
- && codec_.codecSpecific.VP8.numberOfTemporalLayers <= 1) {
+ if (encoders_.size() == 1 && codec_.mode == kScreensharing &&
+ codec_.codecSpecific.VP8.numberOfTemporalLayers <= 1) {
const uint32_t forceKeyFrameIntraTh = 100;
vpx_codec_control(&(encoders_[0]), VP8E_SET_MAX_INTRA_BITRATE_PCT,
forceKeyFrameIntraTh);
@@ -818,13 +812,12 @@ int VP8EncoderImpl::Encode(const VideoFrame& frame,
}
std::fill(key_frame_request_.begin(), key_frame_request_.end(), false);
} else if (codec_specific_info &&
- codec_specific_info->codecType == kVideoCodecVP8) {
+ codec_specific_info->codecType == kVideoCodecVP8) {
if (feedback_mode_) {
// Handle RPSI and SLI messages and set up the appropriate encode flags.
bool sendRefresh = false;
if (codec_specific_info->codecSpecific.VP8.hasReceivedRPSI) {
- rps_.ReceivedRPSI(
- codec_specific_info->codecSpecific.VP8.pictureIdRPSI);
+ rps_.ReceivedRPSI(codec_specific_info->codecSpecific.VP8.pictureIdRPSI);
}
if (codec_specific_info->codecSpecific.VP8.hasReceivedSLI) {
sendRefresh = rps_.ReceivedSLI(input_image.timestamp());
@@ -876,8 +869,7 @@ int VP8EncoderImpl::Encode(const VideoFrame& frame,
}
vpx_codec_control(&encoders_[i], VP8E_SET_FRAME_FLAGS, flags[stream_idx]);
- vpx_codec_control(&encoders_[i],
- VP8E_SET_TEMPORAL_LAYER_ID,
+ vpx_codec_control(&encoders_[i], VP8E_SET_TEMPORAL_LAYER_ID,
temporal_layers_[stream_idx]->CurrentLayerId());
}
// TODO(holmer): Ideally the duration should be the timestamp diff of this
@@ -895,7 +887,7 @@ int VP8EncoderImpl::Encode(const VideoFrame& frame,
// Reset specific intra frame thresholds, following the key frame.
if (send_key_frame) {
vpx_codec_control(&(encoders_[0]), VP8E_SET_MAX_INTRA_BITRATE_PCT,
- rc_max_intra_target_);
+ rc_max_intra_target_);
}
if (error)
return WEBRTC_VIDEO_CODEC_ERROR;
@@ -913,8 +905,7 @@ int VP8EncoderImpl::UpdateCodecFrameSize(const VideoFrame& input_image) {
codec_.simulcastStream[0].height = input_image.height();
}
// Update the cpu_speed setting for resolution change.
- vpx_codec_control(&(encoders_[0]),
- VP8E_SET_CPUUSED,
+ vpx_codec_control(&(encoders_[0]), VP8E_SET_CPUUSED,
SetCpuSpeed(codec_.width, codec_.height));
raw_images_[0].w = codec_.width;
raw_images_[0].h = codec_.height;
@@ -947,13 +938,12 @@ void VP8EncoderImpl::PopulateCodecSpecific(
}
vp8Info->simulcastIdx = stream_idx;
vp8Info->keyIdx = kNoKeyIdx; // TODO(hlundin) populate this
- vp8Info->nonReference = (pkt.data.frame.flags & VPX_FRAME_IS_DROPPABLE) ?
- true : false;
+ vp8Info->nonReference =
+ (pkt.data.frame.flags & VPX_FRAME_IS_DROPPABLE) ? true : false;
bool base_layer_sync_point = (pkt.data.frame.flags & VPX_FRAME_IS_KEY) ||
- only_predicting_from_key_frame;
+ only_predicting_from_key_frame;
temporal_layers_[stream_idx]->PopulateCodecSpecific(base_layer_sync_point,
- vp8Info,
- timestamp);
+ vp8Info, timestamp);
// Prepare next.
picture_id_[stream_idx] = (picture_id_[stream_idx] + 1) & 0x7FFF;
}
@@ -966,27 +956,26 @@ int VP8EncoderImpl::GetEncodedPartitions(const VideoFrame& input_image,
int stream_idx = static_cast<int>(encoders_.size()) - 1;
int result = WEBRTC_VIDEO_CODEC_OK;
for (size_t encoder_idx = 0; encoder_idx < encoders_.size();
- ++encoder_idx, --stream_idx) {
+ ++encoder_idx, --stream_idx) {
vpx_codec_iter_t iter = NULL;
int part_idx = 0;
encoded_images_[encoder_idx]._length = 0;
encoded_images_[encoder_idx]._frameType = kVideoFrameDelta;
RTPFragmentationHeader frag_info;
// token_partitions_ is number of bits used.
- frag_info.VerifyAndAllocateFragmentationHeader((1 << token_partitions_)
- + 1);
+ frag_info.VerifyAndAllocateFragmentationHeader((1 << token_partitions_) +
+ 1);
CodecSpecificInfo codec_specific;
- const vpx_codec_cx_pkt_t *pkt = NULL;
- while ((pkt = vpx_codec_get_cx_data(&encoders_[encoder_idx],
- &iter)) != NULL) {
+ const vpx_codec_cx_pkt_t* pkt = NULL;
+ while ((pkt = vpx_codec_get_cx_data(&encoders_[encoder_idx], &iter)) !=
+ NULL) {
switch (pkt->kind) {
case VPX_CODEC_CX_FRAME_PKT: {
uint32_t length = encoded_images_[encoder_idx]._length;
memcpy(&encoded_images_[encoder_idx]._buffer[length],
- pkt->data.frame.buf,
- pkt->data.frame.sz);
+ pkt->data.frame.buf, pkt->data.frame.sz);
frag_info.fragmentationOffset[part_idx] = length;
- frag_info.fragmentationLength[part_idx] = pkt->data.frame.sz;
+ frag_info.fragmentationLength[part_idx] = pkt->data.frame.sz;
frag_info.fragmentationPlType[part_idx] = 0; // not known here
frag_info.fragmentationTimeDiff[part_idx] = 0;
encoded_images_[encoder_idx]._length += pkt->data.frame.sz;
@@ -1063,7 +1052,6 @@ int VP8EncoderImpl::RegisterEncodeCompleteCallback(
return WEBRTC_VIDEO_CODEC_OK;
}
-
VP8DecoderImpl::VP8DecoderImpl()
: decode_complete_callback_(NULL),
inited_(false),
@@ -1075,8 +1063,7 @@ VP8DecoderImpl::VP8DecoderImpl()
propagation_cnt_(-1),
last_frame_width_(0),
last_frame_height_(0),
- key_frame_required_(true) {
-}
+ key_frame_required_(true) {}
VP8DecoderImpl::~VP8DecoderImpl() {
inited_ = true; // in order to do the actual release
@@ -1092,8 +1079,7 @@ int VP8DecoderImpl::Reset() {
return WEBRTC_VIDEO_CODEC_OK;
}
-int VP8DecoderImpl::InitDecode(const VideoCodec* inst,
- int number_of_cores) {
+int VP8DecoderImpl::InitDecode(const VideoCodec* inst, int number_of_cores) {
int ret_val = Release();
if (ret_val < 0) {
return ret_val;
@@ -1104,12 +1090,12 @@ int VP8DecoderImpl::InitDecode(const VideoCodec* inst,
if (inst && inst->codecType == kVideoCodecVP8) {
feedback_mode_ = inst->codecSpecific.VP8.feedbackModeOn;
}
- vpx_codec_dec_cfg_t cfg;
+ vpx_codec_dec_cfg_t cfg;
// Setting number of threads to a constant value (1)
cfg.threads = 1;
cfg.h = cfg.w = 0; // set after decode
-vpx_codec_flags_t flags = 0;
+ vpx_codec_flags_t flags = 0;
#if !defined(WEBRTC_ARCH_ARM) && !defined(WEBRTC_ARCH_ARM64)
flags = VPX_CODEC_USE_POSTPROC;
#ifdef INDEPENDENT_PARTITIONS
@@ -1134,10 +1120,10 @@ vpx_codec_flags_t flags = 0;
}
int VP8DecoderImpl::Decode(const EncodedImage& input_image,
- bool missing_frames,
- const RTPFragmentationHeader* fragmentation,
- const CodecSpecificInfo* codec_specific_info,
- int64_t /*render_time_ms*/) {
+ bool missing_frames,
+ const RTPFragmentationHeader* fragmentation,
+ const CodecSpecificInfo* codec_specific_info,
+ int64_t /*render_time_ms*/) {
if (!inited_) {
return WEBRTC_VIDEO_CODEC_UNINITIALIZED;
}
@@ -1188,9 +1174,9 @@ int VP8DecoderImpl::Decode(const EncodedImage& input_image,
if (input_image._frameType == kVideoFrameKey &&
input_image._completeFrame) {
propagation_cnt_ = -1;
- // Start count on first loss.
+ // Start count on first loss.
} else if ((!input_image._completeFrame || missing_frames) &&
- propagation_cnt_ == -1) {
+ propagation_cnt_ == -1) {
propagation_cnt_ = 0;
}
if (propagation_cnt_ >= 0) {
@@ -1242,15 +1228,15 @@ int VP8DecoderImpl::Decode(const EncodedImage& input_image,
if (input_image._frameType == kVideoFrameKey && input_image._buffer != NULL) {
const uint32_t bytes_to_copy = input_image._length;
if (last_keyframe_._size < bytes_to_copy) {
- delete [] last_keyframe_._buffer;
+ delete[] last_keyframe_._buffer;
last_keyframe_._buffer = NULL;
last_keyframe_._size = 0;
}
uint8_t* temp_buffer = last_keyframe_._buffer; // Save buffer ptr.
- uint32_t temp_size = last_keyframe_._size; // Save size.
- last_keyframe_ = input_image; // Shallow copy.
- last_keyframe_._buffer = temp_buffer; // Restore buffer ptr.
- last_keyframe_._size = temp_size; // Restore buffer size.
+ uint32_t temp_size = last_keyframe_._size; // Save size.
+ last_keyframe_ = input_image; // Shallow copy.
+ last_keyframe_._buffer = temp_buffer; // Restore buffer ptr.
+ last_keyframe_._size = temp_size; // Restore buffer size.
if (!last_keyframe_._buffer) {
// Allocate memory.
last_keyframe_._size = bytes_to_copy;
@@ -1300,7 +1286,8 @@ int VP8DecoderImpl::Decode(const EncodedImage& input_image,
}
if (picture_id > -1) {
if (((reference_updates & VP8_GOLD_FRAME) ||
- (reference_updates & VP8_ALTR_FRAME)) && !corrupted) {
+ (reference_updates & VP8_ALTR_FRAME)) &&
+ !corrupted) {
decode_complete_callback_->ReceivedDecodedReferenceFrame(picture_id);
}
decode_complete_callback_->ReceivedDecodedFrame(picture_id);
@@ -1323,14 +1310,10 @@ int VP8DecoderImpl::DecodePartitions(
const EncodedImage& input_image,
const RTPFragmentationHeader* fragmentation) {
for (int i = 0; i < fragmentation->fragmentationVectorSize; ++i) {
- const uint8_t* partition = input_image._buffer +
- fragmentation->fragmentationOffset[i];
- const uint32_t partition_length =
- fragmentation->fragmentationLength[i];
- if (vpx_codec_decode(decoder_,
- partition,
- partition_length,
- 0,
+ const uint8_t* partition =
+ input_image._buffer + fragmentation->fragmentationOffset[i];
+ const uint32_t partition_length = fragmentation->fragmentationLength[i];
+ if (vpx_codec_decode(decoder_, partition, partition_length, 0,
VPX_DL_REALTIME)) {
return WEBRTC_VIDEO_CODEC_ERROR;
}
@@ -1343,8 +1326,8 @@ int VP8DecoderImpl::DecodePartitions(
}
int VP8DecoderImpl::ReturnFrame(const vpx_image_t* img,
- uint32_t timestamp,
- int64_t ntp_time_ms) {
+ uint32_t timestamp,
+ int64_t ntp_time_ms) {
if (img == NULL) {
// Decoder OK and NULL image => No show frame
return WEBRTC_VIDEO_CODEC_NO_OUTPUT;
@@ -1354,14 +1337,13 @@ int VP8DecoderImpl::ReturnFrame(const vpx_image_t* img,
// Allocate memory for decoded image.
VideoFrame decoded_image(buffer_pool_.CreateBuffer(img->d_w, img->d_h),
timestamp, 0, kVideoRotation_0);
- libyuv::I420Copy(
- img->planes[VPX_PLANE_Y], img->stride[VPX_PLANE_Y],
- img->planes[VPX_PLANE_U], img->stride[VPX_PLANE_U],
- img->planes[VPX_PLANE_V], img->stride[VPX_PLANE_V],
- decoded_image.buffer(kYPlane), decoded_image.stride(kYPlane),
- decoded_image.buffer(kUPlane), decoded_image.stride(kUPlane),
- decoded_image.buffer(kVPlane), decoded_image.stride(kVPlane),
- img->d_w, img->d_h);
+ libyuv::I420Copy(img->planes[VPX_PLANE_Y], img->stride[VPX_PLANE_Y],
+ img->planes[VPX_PLANE_U], img->stride[VPX_PLANE_U],
+ img->planes[VPX_PLANE_V], img->stride[VPX_PLANE_V],
+ decoded_image.buffer(kYPlane), decoded_image.stride(kYPlane),
+ decoded_image.buffer(kUPlane), decoded_image.stride(kUPlane),
+ decoded_image.buffer(kVPlane), decoded_image.stride(kVPlane),
+ img->d_w, img->d_h);
decoded_image.set_ntp_time_ms(ntp_time_ms);
int ret = decode_complete_callback_->Decoded(decoded_image);
if (ret != 0)
@@ -1380,7 +1362,7 @@ int VP8DecoderImpl::RegisterDecodeCompleteCallback(
int VP8DecoderImpl::Release() {
if (last_keyframe_._buffer != NULL) {
- delete [] last_keyframe_._buffer;
+ delete[] last_keyframe_._buffer;
last_keyframe_._buffer = NULL;
}
if (decoder_ != NULL) {
@@ -1400,15 +1382,19 @@ int VP8DecoderImpl::Release() {
return WEBRTC_VIDEO_CODEC_OK;
}
+const char* VP8DecoderImpl::ImplementationName() const {
+ return "libvpx";
+}
+
int VP8DecoderImpl::CopyReference(VP8DecoderImpl* copy) {
// The type of frame to copy should be set in ref_frame_->frame_type
// before the call to this function.
- if (vpx_codec_control(decoder_, VP8_COPY_REFERENCE, ref_frame_)
- != VPX_CODEC_OK) {
+ if (vpx_codec_control(decoder_, VP8_COPY_REFERENCE, ref_frame_) !=
+ VPX_CODEC_OK) {
return -1;
}
- if (vpx_codec_control(copy->decoder_, VP8_SET_REFERENCE, ref_frame_)
- != VPX_CODEC_OK) {
+ if (vpx_codec_control(copy->decoder_, VP8_SET_REFERENCE, ref_frame_) !=
+ VPX_CODEC_OK) {
return -1;
}
return 0;
diff --git a/webrtc/modules/video_coding/codecs/vp8/vp8_impl.h b/webrtc/modules/video_coding/codecs/vp8/vp8_impl.h
index ba14ed5841..9d5fb713a4 100644
--- a/webrtc/modules/video_coding/codecs/vp8/vp8_impl.h
+++ b/webrtc/modules/video_coding/codecs/vp8/vp8_impl.h
@@ -22,12 +22,12 @@
#include "vpx/vp8cx.h"
#include "vpx/vp8dx.h"
-#include "webrtc/common_video/interface/i420_buffer_pool.h"
-#include "webrtc/modules/video_coding/codecs/interface/video_codec_interface.h"
+#include "webrtc/common_video/include/i420_buffer_pool.h"
+#include "webrtc/modules/video_coding/include/video_codec_interface.h"
#include "webrtc/modules/video_coding/codecs/vp8/include/vp8.h"
#include "webrtc/modules/video_coding/codecs/vp8/reference_picture_selection.h"
-#include "webrtc/modules/video_coding/utility/include/frame_dropper.h"
-#include "webrtc/modules/video_coding/utility/include/quality_scaler.h"
+#include "webrtc/modules/video_coding/utility/frame_dropper.h"
+#include "webrtc/modules/video_coding/utility/quality_scaler.h"
#include "webrtc/video_frame.h"
namespace webrtc {
@@ -58,8 +58,11 @@ class VP8EncoderImpl : public VP8Encoder {
void OnDroppedFrame() override {}
+ const char* ImplementationName() const override;
+
private:
- void SetupTemporalLayers(int num_streams, int num_temporal_layers,
+ void SetupTemporalLayers(int num_streams,
+ int num_temporal_layers,
const VideoCodec& codec);
// Set the cpu_speed setting for encoder based on resolution and/or platform.
@@ -126,15 +129,17 @@ class VP8DecoderImpl : public VP8Decoder {
int InitDecode(const VideoCodec* inst, int number_of_cores) override;
int Decode(const EncodedImage& input_image,
- bool missing_frames,
- const RTPFragmentationHeader* fragmentation,
- const CodecSpecificInfo* codec_specific_info,
- int64_t /*render_time_ms*/) override;
+ bool missing_frames,
+ const RTPFragmentationHeader* fragmentation,
+ const CodecSpecificInfo* codec_specific_info,
+ int64_t /*render_time_ms*/) override;
int RegisterDecodeCompleteCallback(DecodedImageCallback* callback) override;
int Release() override;
int Reset() override;
+ const char* ImplementationName() const override;
+
private:
// Copy reference image from this _decoder to the _decoder in copyTo. Set
// which frame type to copy in _refFrame->frame_type before the call to
@@ -165,4 +170,3 @@ class VP8DecoderImpl : public VP8Decoder {
} // namespace webrtc
#endif // WEBRTC_MODULES_VIDEO_CODING_CODECS_VP8_VP8_IMPL_H_
-
diff --git a/webrtc/modules/video_coding/codecs/vp8/vp8_sequence_coder.cc b/webrtc/modules/video_coding/codecs/vp8/vp8_sequence_coder.cc
index 5843d83fa7..9e546653db 100644
--- a/webrtc/modules/video_coding/codecs/vp8/vp8_sequence_coder.cc
+++ b/webrtc/modules/video_coding/codecs/vp8/vp8_sequence_coder.cc
@@ -1,4 +1,4 @@
- /*
+/*
* Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
@@ -9,8 +9,9 @@
*/
#include "testing/gtest/include/gtest/gtest.h"
+#include "webrtc/base/checks.h"
#include "webrtc/base/scoped_ptr.h"
-#include "webrtc/common_video/interface/video_image.h"
+#include "webrtc/common_video/include/video_image.h"
#include "webrtc/common_video/libyuv/include/webrtc_libyuv.h"
#include "webrtc/modules/video_coding/codecs/vp8/include/vp8.h"
#include "webrtc/system_wrappers/include/tick_util.h"
@@ -22,8 +23,7 @@
class Vp8SequenceCoderEncodeCallback : public webrtc::EncodedImageCallback {
public:
explicit Vp8SequenceCoderEncodeCallback(FILE* encoded_file)
- : encoded_file_(encoded_file),
- encoded_bytes_(0) {}
+ : encoded_file_(encoded_file), encoded_bytes_(0) {}
~Vp8SequenceCoderEncodeCallback();
int Encoded(const webrtc::EncodedImage& encoded_image,
const webrtc::CodecSpecificInfo* codecSpecificInfo,
@@ -31,6 +31,7 @@ class Vp8SequenceCoderEncodeCallback : public webrtc::EncodedImageCallback {
// Returns the encoded image.
webrtc::EncodedImage encoded_image() { return encoded_image_; }
size_t encoded_bytes() { return encoded_bytes_; }
+
private:
webrtc::EncodedImage encoded_image_;
FILE* encoded_file_;
@@ -38,7 +39,7 @@ class Vp8SequenceCoderEncodeCallback : public webrtc::EncodedImageCallback {
};
Vp8SequenceCoderEncodeCallback::~Vp8SequenceCoderEncodeCallback() {
- delete [] encoded_image_._buffer;
+ delete[] encoded_image_._buffer;
encoded_image_._buffer = NULL;
}
int Vp8SequenceCoderEncodeCallback::Encoded(
@@ -46,7 +47,7 @@ int Vp8SequenceCoderEncodeCallback::Encoded(
const webrtc::CodecSpecificInfo* codecSpecificInfo,
const webrtc::RTPFragmentationHeader* fragmentation) {
if (encoded_image_._size < encoded_image._size) {
- delete [] encoded_image_._buffer;
+ delete[] encoded_image_._buffer;
encoded_image_._buffer = NULL;
encoded_image_._buffer = new uint8_t[encoded_image._size];
encoded_image_._size = encoded_image._size;
@@ -68,7 +69,11 @@ class Vp8SequenceCoderDecodeCallback : public webrtc::DecodedImageCallback {
public:
explicit Vp8SequenceCoderDecodeCallback(FILE* decoded_file)
: decoded_file_(decoded_file) {}
- int Decoded(webrtc::VideoFrame& frame);
+ int32_t Decoded(webrtc::VideoFrame& frame) override;
+ int32_t Decoded(webrtc::VideoFrame& frame, int64_t decode_time_ms) override {
+ RTC_NOTREACHED();
+ return -1;
+ }
bool DecodeComplete();
private:
@@ -80,16 +85,16 @@ int Vp8SequenceCoderDecodeCallback::Decoded(webrtc::VideoFrame& image) {
return 0;
}
-int SequenceCoder(webrtc::test::CommandLineParser& parser) {
- int width = strtol((parser.GetFlag("w")).c_str(), NULL, 10);
- int height = strtol((parser.GetFlag("h")).c_str(), NULL, 10);
- int framerate = strtol((parser.GetFlag("f")).c_str(), NULL, 10);
+int SequenceCoder(webrtc::test::CommandLineParser* parser) {
+ int width = strtol((parser->GetFlag("w")).c_str(), NULL, 10);
+ int height = strtol((parser->GetFlag("h")).c_str(), NULL, 10);
+ int framerate = strtol((parser->GetFlag("f")).c_str(), NULL, 10);
if (width <= 0 || height <= 0 || framerate <= 0) {
fprintf(stderr, "Error: Resolution cannot be <= 0!\n");
return -1;
}
- int target_bitrate = strtol((parser.GetFlag("b")).c_str(), NULL, 10);
+ int target_bitrate = strtol((parser->GetFlag("b")).c_str(), NULL, 10);
if (target_bitrate <= 0) {
fprintf(stderr, "Error: Bit-rate cannot be <= 0!\n");
return -1;
@@ -97,20 +102,20 @@ int SequenceCoder(webrtc::test::CommandLineParser& parser) {
// SetUp
// Open input file.
- std::string encoded_file_name = parser.GetFlag("encoded_file");
+ std::string encoded_file_name = parser->GetFlag("encoded_file");
FILE* encoded_file = fopen(encoded_file_name.c_str(), "wb");
if (encoded_file == NULL) {
fprintf(stderr, "Error: Cannot open encoded file\n");
return -1;
}
- std::string input_file_name = parser.GetFlag("input_file");
+ std::string input_file_name = parser->GetFlag("input_file");
FILE* input_file = fopen(input_file_name.c_str(), "rb");
if (input_file == NULL) {
fprintf(stderr, "Error: Cannot open input file\n");
return -1;
}
// Open output file.
- std::string output_file_name = parser.GetFlag("output_file");
+ std::string output_file_name = parser->GetFlag("output_file");
FILE* output_file = fopen(output_file_name.c_str(), "wb");
if (output_file == NULL) {
fprintf(stderr, "Error: Cannot open output file\n");
@@ -118,8 +123,8 @@ int SequenceCoder(webrtc::test::CommandLineParser& parser) {
}
// Get range of frames: will encode num_frames following start_frame).
- int start_frame = strtol((parser.GetFlag("start_frame")).c_str(), NULL, 10);
- int num_frames = strtol((parser.GetFlag("num_frames")).c_str(), NULL, 10);
+ int start_frame = strtol((parser->GetFlag("start_frame")).c_str(), NULL, 10);
+ int num_frames = strtol((parser->GetFlag("num_frames")).c_str(), NULL, 10);
// Codec SetUp.
webrtc::VideoCodec inst;
@@ -157,8 +162,8 @@ int SequenceCoder(webrtc::test::CommandLineParser& parser) {
int frames_processed = 0;
input_frame.CreateEmptyFrame(width, height, width, half_width, half_width);
while (!feof(input_file) &&
- (num_frames == -1 || frames_processed < num_frames)) {
- if (fread(frame_buffer.get(), 1, length, input_file) != length)
+ (num_frames == -1 || frames_processed < num_frames)) {
+ if (fread(frame_buffer.get(), 1, length, input_file) != length)
continue;
if (frame_cnt >= start_frame) {
webrtc::ConvertToI420(webrtc::kI420, frame_buffer.get(), 0, 0, width,
@@ -179,33 +184,35 @@ int SequenceCoder(webrtc::test::CommandLineParser& parser) {
printf("Actual bitrate: %f kbps\n", actual_bit_rate / 1000);
webrtc::test::QualityMetricsResult psnr_result, ssim_result;
EXPECT_EQ(0, webrtc::test::I420MetricsFromFiles(
- input_file_name.c_str(), output_file_name.c_str(),
- inst.width, inst.height,
- &psnr_result, &ssim_result));
+ input_file_name.c_str(), output_file_name.c_str(),
+ inst.width, inst.height, &psnr_result, &ssim_result));
printf("PSNR avg: %f[dB], min: %f[dB]\nSSIM avg: %f, min: %f\n",
- psnr_result.average, psnr_result.min,
- ssim_result.average, ssim_result.min);
+ psnr_result.average, psnr_result.min, ssim_result.average,
+ ssim_result.min);
return frame_cnt;
}
int main(int argc, char** argv) {
std::string program_name = argv[0];
- std::string usage = "Encode and decodes a video sequence, and writes"
- "results to a file.\n"
- "Example usage:\n" + program_name + " functionality"
- " --w=352 --h=288 --input_file=input.yuv --output_file=output.yuv "
- " Command line flags:\n"
- " - width(int): The width of the input file. Default: 352\n"
- " - height(int): The height of the input file. Default: 288\n"
- " - input_file(string): The YUV file to encode."
- " Default: foreman.yuv\n"
- " - encoded_file(string): The vp8 encoded file (encoder output)."
- " Default: vp8_encoded.vp8\n"
- " - output_file(string): The yuv decoded file (decoder output)."
- " Default: vp8_decoded.yuv\n."
- " - start_frame - frame number in which encoding will begin. Default: 0"
- " - num_frames - Number of frames to be processed. "
- " Default: -1 (entire sequence).";
+ std::string usage =
+ "Encode and decodes a video sequence, and writes"
+ "results to a file.\n"
+ "Example usage:\n" +
+ program_name +
+ " functionality"
+ " --w=352 --h=288 --input_file=input.yuv --output_file=output.yuv "
+ " Command line flags:\n"
+ " - width(int): The width of the input file. Default: 352\n"
+ " - height(int): The height of the input file. Default: 288\n"
+ " - input_file(string): The YUV file to encode."
+ " Default: foreman.yuv\n"
+ " - encoded_file(string): The vp8 encoded file (encoder output)."
+ " Default: vp8_encoded.vp8\n"
+ " - output_file(string): The yuv decoded file (decoder output)."
+ " Default: vp8_decoded.yuv\n."
+ " - start_frame - frame number in which encoding will begin. Default: 0"
+ " - num_frames - Number of frames to be processed. "
+ " Default: -1 (entire sequence).";
webrtc::test::CommandLineParser parser;
@@ -223,8 +230,8 @@ int main(int argc, char** argv) {
parser.SetFlag("output_file", webrtc::test::OutputPath() + "vp8_decoded.yuv");
parser.SetFlag("encoded_file",
webrtc::test::OutputPath() + "vp8_encoded.vp8");
- parser.SetFlag("input_file", webrtc::test::ResourcePath("foreman_cif",
- "yuv"));
+ parser.SetFlag("input_file",
+ webrtc::test::ResourcePath("foreman_cif", "yuv"));
parser.SetFlag("help", "false");
parser.ProcessFlags();
@@ -234,5 +241,5 @@ int main(int argc, char** argv) {
}
parser.PrintEnteredFlags();
- return SequenceCoder(parser);
+ return SequenceCoder(&parser);
}
diff --git a/webrtc/modules/video_coding/codecs/vp9/include/vp9.h b/webrtc/modules/video_coding/codecs/vp9/include/vp9.h
index cd77f72dcb..3bcbe46b3a 100644
--- a/webrtc/modules/video_coding/codecs/vp9/include/vp9.h
+++ b/webrtc/modules/video_coding/codecs/vp9/include/vp9.h
@@ -12,7 +12,7 @@
#ifndef WEBRTC_MODULES_VIDEO_CODING_CODECS_VP9_INCLUDE_VP9_H_
#define WEBRTC_MODULES_VIDEO_CODING_CODECS_VP9_INCLUDE_VP9_H_
-#include "webrtc/modules/video_coding/codecs/interface/video_codec_interface.h"
+#include "webrtc/modules/video_coding/include/video_codec_interface.h"
namespace webrtc {
@@ -23,7 +23,6 @@ class VP9Encoder : public VideoEncoder {
virtual ~VP9Encoder() {}
};
-
class VP9Decoder : public VideoDecoder {
public:
static VP9Decoder* Create();
diff --git a/webrtc/modules/video_coding/codecs/vp9/screenshare_layers.cc b/webrtc/modules/video_coding/codecs/vp9/screenshare_layers.cc
new file mode 100644
index 0000000000..c7ed78a192
--- /dev/null
+++ b/webrtc/modules/video_coding/codecs/vp9/screenshare_layers.cc
@@ -0,0 +1,93 @@
+/* Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+*
+* Use of this source code is governed by a BSD-style license
+* that can be found in the LICENSE file in the root of the source
+* tree. An additional intellectual property rights grant can be found
+* in the file PATENTS. All contributing project authors may
+* be found in the AUTHORS file in the root of the source tree.
+*/
+
+#include <algorithm>
+#include "webrtc/modules/video_coding/codecs/vp9/screenshare_layers.h"
+#include "webrtc/base/checks.h"
+
+namespace webrtc {
+
+ScreenshareLayersVP9::ScreenshareLayersVP9(uint8_t num_layers)
+ : num_layers_(num_layers),
+ start_layer_(0),
+ last_timestamp_(0),
+ timestamp_initialized_(false) {
+ RTC_DCHECK_GT(num_layers, 0);
+ RTC_DCHECK_LE(num_layers, kMaxVp9NumberOfSpatialLayers);
+ memset(bits_used_, 0, sizeof(bits_used_));
+ memset(threshold_kbps_, 0, sizeof(threshold_kbps_));
+}
+
+uint8_t ScreenshareLayersVP9::GetStartLayer() const {
+ return start_layer_;
+}
+
+void ScreenshareLayersVP9::ConfigureBitrate(int threshold_kbps,
+ uint8_t layer_id) {
+ // The upper layer is always the layer we spill frames
+ // to when the bitrate becomes to high, therefore setting
+ // a max limit is not allowed. The top layer bitrate is
+ // never used either so configuring it makes no difference.
+ RTC_DCHECK_LT(layer_id, num_layers_ - 1);
+ threshold_kbps_[layer_id] = threshold_kbps;
+}
+
+void ScreenshareLayersVP9::LayerFrameEncoded(unsigned int size_bytes,
+ uint8_t layer_id) {
+ RTC_DCHECK_LT(layer_id, num_layers_);
+ bits_used_[layer_id] += size_bytes * 8;
+}
+
+VP9EncoderImpl::SuperFrameRefSettings
+ScreenshareLayersVP9::GetSuperFrameSettings(uint32_t timestamp,
+ bool is_keyframe) {
+ VP9EncoderImpl::SuperFrameRefSettings settings;
+ if (!timestamp_initialized_) {
+ last_timestamp_ = timestamp;
+ timestamp_initialized_ = true;
+ }
+ float time_diff = (timestamp - last_timestamp_) / 90.f;
+ float total_bits_used = 0;
+ float total_threshold_kbps = 0;
+ start_layer_ = 0;
+
+ // Up to (num_layers - 1) because we only have
+ // (num_layers - 1) thresholds to check.
+ for (int layer_id = 0; layer_id < num_layers_ - 1; ++layer_id) {
+ bits_used_[layer_id] = std::max(
+ 0.f, bits_used_[layer_id] - time_diff * threshold_kbps_[layer_id]);
+ total_bits_used += bits_used_[layer_id];
+ total_threshold_kbps += threshold_kbps_[layer_id];
+
+ // If this is a keyframe then there should be no
+ // references to any previous frames.
+ if (!is_keyframe) {
+ settings.layer[layer_id].ref_buf1 = layer_id;
+ if (total_bits_used > total_threshold_kbps * 1000)
+ start_layer_ = layer_id + 1;
+ }
+
+ settings.layer[layer_id].upd_buf = layer_id;
+ }
+ // Since the above loop does not iterate over the last layer
+ // the reference of the last layer has to be set after the loop,
+ // and if this is a keyframe there should be no references to
+ // any previous frames.
+ if (!is_keyframe)
+ settings.layer[num_layers_ - 1].ref_buf1 = num_layers_ - 1;
+
+ settings.layer[num_layers_ - 1].upd_buf = num_layers_ - 1;
+ settings.is_keyframe = is_keyframe;
+ settings.start_layer = start_layer_;
+ settings.stop_layer = num_layers_ - 1;
+ last_timestamp_ = timestamp;
+ return settings;
+}
+
+} // namespace webrtc
diff --git a/webrtc/modules/video_coding/codecs/vp9/screenshare_layers.h b/webrtc/modules/video_coding/codecs/vp9/screenshare_layers.h
new file mode 100644
index 0000000000..5a901ae359
--- /dev/null
+++ b/webrtc/modules/video_coding/codecs/vp9/screenshare_layers.h
@@ -0,0 +1,66 @@
+/* Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+*
+* Use of this source code is governed by a BSD-style license
+* that can be found in the LICENSE file in the root of the source
+* tree. An additional intellectual property rights grant can be found
+* in the file PATENTS. All contributing project authors may
+* be found in the AUTHORS file in the root of the source tree.
+*/
+
+#ifndef WEBRTC_MODULES_VIDEO_CODING_CODECS_VP9_SCREENSHARE_LAYERS_H_
+#define WEBRTC_MODULES_VIDEO_CODING_CODECS_VP9_SCREENSHARE_LAYERS_H_
+
+#include "webrtc/modules/video_coding/codecs/vp9/vp9_impl.h"
+
+namespace webrtc {
+
+class ScreenshareLayersVP9 {
+ public:
+ explicit ScreenshareLayersVP9(uint8_t num_layers);
+
+ // The target bitrate for layer with id layer_id.
+ void ConfigureBitrate(int threshold_kbps, uint8_t layer_id);
+
+ // The current start layer.
+ uint8_t GetStartLayer() const;
+
+ // Update the layer with the size of the layer frame.
+ void LayerFrameEncoded(unsigned int size_bytes, uint8_t layer_id);
+
+ // Get the layer settings for the next superframe.
+ //
+ // In short, each time the GetSuperFrameSettings is called the
+ // bitrate of every layer is calculated and if the cummulative
+ // bitrate exceeds the configured cummulative bitrates
+ // (ConfigureBitrate to configure) up to and including that
+ // layer then the resulting encoding settings for the
+ // superframe will only encode layers above that layer.
+ VP9EncoderImpl::SuperFrameRefSettings GetSuperFrameSettings(
+ uint32_t timestamp,
+ bool is_keyframe);
+
+ private:
+ // How many layers that are used.
+ uint8_t num_layers_;
+
+ // The index of the first layer to encode.
+ uint8_t start_layer_;
+
+ // Cummulative target kbps for the different layers.
+ float threshold_kbps_[kMaxVp9NumberOfSpatialLayers - 1];
+
+ // How many bits that has been used for a certain layer. Increased in
+ // FrameEncoded() by the size of the encoded frame and decreased in
+ // GetSuperFrameSettings() depending on the time between frames.
+ float bits_used_[kMaxVp9NumberOfSpatialLayers];
+
+ // Timestamp of last frame.
+ uint32_t last_timestamp_;
+
+ // If the last_timestamp_ has been set.
+ bool timestamp_initialized_;
+};
+
+} // namespace webrtc
+
+#endif // WEBRTC_MODULES_VIDEO_CODING_CODECS_VP9_SCREENSHARE_LAYERS_H_
diff --git a/webrtc/modules/video_coding/codecs/vp9/screenshare_layers_unittest.cc b/webrtc/modules/video_coding/codecs/vp9/screenshare_layers_unittest.cc
new file mode 100644
index 0000000000..5eb7b237ac
--- /dev/null
+++ b/webrtc/modules/video_coding/codecs/vp9/screenshare_layers_unittest.cc
@@ -0,0 +1,323 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <limits>
+
+#include "testing/gtest/include/gtest/gtest.h"
+#include "vpx/vp8cx.h"
+#include "webrtc/base/logging.h"
+#include "webrtc/modules/video_coding/codecs/vp9/screenshare_layers.h"
+#include "webrtc/modules/video_coding/codecs/vp9/vp9_impl.h"
+#include "webrtc/system_wrappers/include/clock.h"
+
+namespace webrtc {
+
+typedef VP9EncoderImpl::SuperFrameRefSettings Settings;
+
+const uint32_t kTickFrequency = 90000;
+
+class ScreenshareLayerTestVP9 : public ::testing::Test {
+ protected:
+ ScreenshareLayerTestVP9() : clock_(0) {}
+ virtual ~ScreenshareLayerTestVP9() {}
+
+ void InitScreenshareLayers(int layers) {
+ layers_.reset(new ScreenshareLayersVP9(layers));
+ }
+
+ void ConfigureBitrateForLayer(int kbps, uint8_t layer_id) {
+ layers_->ConfigureBitrate(kbps, layer_id);
+ }
+
+ void AdvanceTime(int64_t milliseconds) {
+ clock_.AdvanceTimeMilliseconds(milliseconds);
+ }
+
+ void AddKilobitsToLayer(int kilobits, uint8_t layer_id) {
+ layers_->LayerFrameEncoded(kilobits * 1000 / 8, layer_id);
+ }
+
+ void EqualRefsForLayer(const Settings& actual, uint8_t layer_id) {
+ EXPECT_EQ(expected_.layer[layer_id].upd_buf,
+ actual.layer[layer_id].upd_buf);
+ EXPECT_EQ(expected_.layer[layer_id].ref_buf1,
+ actual.layer[layer_id].ref_buf1);
+ EXPECT_EQ(expected_.layer[layer_id].ref_buf2,
+ actual.layer[layer_id].ref_buf2);
+ EXPECT_EQ(expected_.layer[layer_id].ref_buf3,
+ actual.layer[layer_id].ref_buf3);
+ }
+
+ void EqualRefs(const Settings& actual) {
+ for (unsigned int layer_id = 0; layer_id < kMaxVp9NumberOfSpatialLayers;
+ ++layer_id) {
+ EqualRefsForLayer(actual, layer_id);
+ }
+ }
+
+ void EqualStartStopKeyframe(const Settings& actual) {
+ EXPECT_EQ(expected_.start_layer, actual.start_layer);
+ EXPECT_EQ(expected_.stop_layer, actual.stop_layer);
+ EXPECT_EQ(expected_.is_keyframe, actual.is_keyframe);
+ }
+
+ // Check that the settings returned by GetSuperFrameSettings() is
+ // equal to the expected_ settings.
+ void EqualToExpected() {
+ uint32_t frame_timestamp_ =
+ clock_.TimeInMilliseconds() * (kTickFrequency / 1000);
+ Settings actual =
+ layers_->GetSuperFrameSettings(frame_timestamp_, expected_.is_keyframe);
+ EqualRefs(actual);
+ EqualStartStopKeyframe(actual);
+ }
+
+ Settings expected_;
+ SimulatedClock clock_;
+ rtc::scoped_ptr<ScreenshareLayersVP9> layers_;
+};
+
+TEST_F(ScreenshareLayerTestVP9, NoRefsOnKeyFrame) {
+ const int kNumLayers = kMaxVp9NumberOfSpatialLayers;
+ InitScreenshareLayers(kNumLayers);
+ expected_.start_layer = 0;
+ expected_.stop_layer = kNumLayers - 1;
+
+ for (int l = 0; l < kNumLayers; ++l) {
+ expected_.layer[l].upd_buf = l;
+ }
+ expected_.is_keyframe = true;
+ EqualToExpected();
+
+ for (int l = 0; l < kNumLayers; ++l) {
+ expected_.layer[l].ref_buf1 = l;
+ }
+ expected_.is_keyframe = false;
+ EqualToExpected();
+}
+
+// Test if it is possible to send at a high bitrate (over the threshold)
+// after a longer period of low bitrate. This should not be possible.
+TEST_F(ScreenshareLayerTestVP9, DontAccumelateAvailableBitsOverTime) {
+ InitScreenshareLayers(2);
+ ConfigureBitrateForLayer(100, 0);
+
+ expected_.layer[0].upd_buf = 0;
+ expected_.layer[0].ref_buf1 = 0;
+ expected_.layer[1].upd_buf = 1;
+ expected_.layer[1].ref_buf1 = 1;
+ expected_.start_layer = 0;
+ expected_.stop_layer = 1;
+
+ // Send 10 frames at a low bitrate (50 kbps)
+ for (int i = 0; i < 10; ++i) {
+ AdvanceTime(200);
+ EqualToExpected();
+ AddKilobitsToLayer(10, 0);
+ }
+
+ AdvanceTime(200);
+ EqualToExpected();
+ AddKilobitsToLayer(301, 0);
+
+ // Send 10 frames at a high bitrate (200 kbps)
+ expected_.start_layer = 1;
+ for (int i = 0; i < 10; ++i) {
+ AdvanceTime(200);
+ EqualToExpected();
+ AddKilobitsToLayer(40, 1);
+ }
+}
+
+// Test if used bits are accumelated over layers, as they should;
+TEST_F(ScreenshareLayerTestVP9, AccumelateUsedBitsOverLayers) {
+ const int kNumLayers = kMaxVp9NumberOfSpatialLayers;
+ InitScreenshareLayers(kNumLayers);
+ for (int l = 0; l < kNumLayers - 1; ++l)
+ ConfigureBitrateForLayer(100, l);
+ for (int l = 0; l < kNumLayers; ++l) {
+ expected_.layer[l].upd_buf = l;
+ expected_.layer[l].ref_buf1 = l;
+ }
+
+ expected_.start_layer = 0;
+ expected_.stop_layer = kNumLayers - 1;
+ EqualToExpected();
+
+ for (int layer = 0; layer < kNumLayers - 1; ++layer) {
+ expected_.start_layer = layer;
+ EqualToExpected();
+ AddKilobitsToLayer(101, layer);
+ }
+}
+
+// General testing of the bitrate controller.
+TEST_F(ScreenshareLayerTestVP9, 2LayerBitrate) {
+ InitScreenshareLayers(2);
+ ConfigureBitrateForLayer(100, 0);
+
+ expected_.layer[0].upd_buf = 0;
+ expected_.layer[1].upd_buf = 1;
+ expected_.layer[0].ref_buf1 = -1;
+ expected_.layer[1].ref_buf1 = -1;
+ expected_.start_layer = 0;
+ expected_.stop_layer = 1;
+
+ expected_.is_keyframe = true;
+ EqualToExpected();
+ AddKilobitsToLayer(100, 0);
+
+ expected_.layer[0].ref_buf1 = 0;
+ expected_.layer[1].ref_buf1 = 1;
+ expected_.is_keyframe = false;
+ AdvanceTime(199);
+ EqualToExpected();
+ AddKilobitsToLayer(100, 0);
+
+ expected_.start_layer = 1;
+ for (int frame = 0; frame < 3; ++frame) {
+ AdvanceTime(200);
+ EqualToExpected();
+ AddKilobitsToLayer(100, 1);
+ }
+
+ // Just before enough bits become available for L0 @0.999 seconds.
+ AdvanceTime(199);
+ EqualToExpected();
+ AddKilobitsToLayer(100, 1);
+
+ // Just after enough bits become available for L0 @1.0001 seconds.
+ expected_.start_layer = 0;
+ AdvanceTime(2);
+ EqualToExpected();
+ AddKilobitsToLayer(100, 0);
+
+ // Keyframes always encode all layers, even if it is over budget.
+ expected_.layer[0].ref_buf1 = -1;
+ expected_.layer[1].ref_buf1 = -1;
+ expected_.is_keyframe = true;
+ AdvanceTime(499);
+ EqualToExpected();
+ expected_.layer[0].ref_buf1 = 0;
+ expected_.layer[1].ref_buf1 = 1;
+ expected_.start_layer = 1;
+ expected_.is_keyframe = false;
+ EqualToExpected();
+ AddKilobitsToLayer(100, 0);
+
+ // 400 kb in L0 --> @3 second mark to fall below the threshold..
+ // just before @2.999 seconds.
+ expected_.is_keyframe = false;
+ AdvanceTime(1499);
+ EqualToExpected();
+ AddKilobitsToLayer(100, 1);
+
+ // just after @3.001 seconds.
+ expected_.start_layer = 0;
+ AdvanceTime(2);
+ EqualToExpected();
+ AddKilobitsToLayer(100, 0);
+}
+
+// General testing of the bitrate controller.
+TEST_F(ScreenshareLayerTestVP9, 3LayerBitrate) {
+ InitScreenshareLayers(3);
+ ConfigureBitrateForLayer(100, 0);
+ ConfigureBitrateForLayer(100, 1);
+
+ for (int l = 0; l < 3; ++l) {
+ expected_.layer[l].upd_buf = l;
+ expected_.layer[l].ref_buf1 = l;
+ }
+ expected_.start_layer = 0;
+ expected_.stop_layer = 2;
+
+ EqualToExpected();
+ AddKilobitsToLayer(105, 0);
+ AddKilobitsToLayer(30, 1);
+
+ AdvanceTime(199);
+ EqualToExpected();
+ AddKilobitsToLayer(105, 0);
+ AddKilobitsToLayer(30, 1);
+
+ expected_.start_layer = 1;
+ AdvanceTime(200);
+ EqualToExpected();
+ AddKilobitsToLayer(130, 1);
+
+ expected_.start_layer = 2;
+ AdvanceTime(200);
+ EqualToExpected();
+
+ // 400 kb in L1 --> @1.0 second mark to fall below threshold.
+ // 210 kb in L0 --> @1.1 second mark to fall below threshold.
+ // Just before L1 @0.999 seconds.
+ AdvanceTime(399);
+ EqualToExpected();
+
+ // Just after L1 @1.001 seconds.
+ expected_.start_layer = 1;
+ AdvanceTime(2);
+ EqualToExpected();
+
+ // Just before L0 @1.099 seconds.
+ AdvanceTime(99);
+ EqualToExpected();
+
+ // Just after L0 @1.101 seconds.
+ expected_.start_layer = 0;
+ AdvanceTime(2);
+ EqualToExpected();
+
+ // @1.1 seconds
+ AdvanceTime(99);
+ EqualToExpected();
+ AddKilobitsToLayer(200, 1);
+
+ expected_.is_keyframe = true;
+ for (int l = 0; l < 3; ++l)
+ expected_.layer[l].ref_buf1 = -1;
+ AdvanceTime(200);
+ EqualToExpected();
+
+ expected_.is_keyframe = false;
+ expected_.start_layer = 2;
+ for (int l = 0; l < 3; ++l)
+ expected_.layer[l].ref_buf1 = l;
+ AdvanceTime(200);
+ EqualToExpected();
+}
+
+// Test that the bitrate calculations are
+// correct when the timestamp wrap.
+TEST_F(ScreenshareLayerTestVP9, TimestampWrap) {
+ InitScreenshareLayers(2);
+ ConfigureBitrateForLayer(100, 0);
+
+ expected_.layer[0].upd_buf = 0;
+ expected_.layer[0].ref_buf1 = 0;
+ expected_.layer[1].upd_buf = 1;
+ expected_.layer[1].ref_buf1 = 1;
+ expected_.start_layer = 0;
+ expected_.stop_layer = 1;
+
+ // Advance time to just before the timestamp wraps.
+ AdvanceTime(std::numeric_limits<uint32_t>::max() / (kTickFrequency / 1000));
+ EqualToExpected();
+ AddKilobitsToLayer(200, 0);
+
+ // Wrap
+ expected_.start_layer = 1;
+ AdvanceTime(1);
+ EqualToExpected();
+}
+
+} // namespace webrtc
diff --git a/webrtc/modules/video_coding/codecs/vp9/vp9.gyp b/webrtc/modules/video_coding/codecs/vp9/vp9.gyp
index 752521c5cb..8993d79bd7 100644
--- a/webrtc/modules/video_coding/codecs/vp9/vp9.gyp
+++ b/webrtc/modules/video_coding/codecs/vp9/vp9.gyp
@@ -14,30 +14,26 @@
{
'target_name': 'webrtc_vp9',
'type': 'static_library',
- 'dependencies': [
- '<(webrtc_root)/common_video/common_video.gyp:common_video',
- '<(webrtc_root)/modules/video_coding/utility/video_coding_utility.gyp:video_coding_utility',
- '<(webrtc_root)/system_wrappers/system_wrappers.gyp:system_wrappers',
- ],
'conditions': [
['build_libvpx==1', {
'dependencies': [
'<(libvpx_dir)/libvpx.gyp:libvpx_new',
],
}],
- ['build_vp9==1', {
- 'sources': [
- 'include/vp9.h',
- 'vp9_frame_buffer_pool.cc',
- 'vp9_frame_buffer_pool.h',
- 'vp9_impl.cc',
- 'vp9_impl.h',
- ],
- }, {
- 'sources': [
- 'vp9_dummy_impl.cc',
- ],
- }],
+ ],
+ 'dependencies': [
+ '<(webrtc_root)/common_video/common_video.gyp:common_video',
+ '<(webrtc_root)/modules/video_coding/utility/video_coding_utility.gyp:video_coding_utility',
+ '<(webrtc_root)/system_wrappers/system_wrappers.gyp:system_wrappers',
+ ],
+ 'sources': [
+ 'include/vp9.h',
+ 'screenshare_layers.cc',
+ 'screenshare_layers.h',
+ 'vp9_frame_buffer_pool.cc',
+ 'vp9_frame_buffer_pool.h',
+ 'vp9_impl.cc',
+ 'vp9_impl.h',
],
},
],
diff --git a/webrtc/modules/video_coding/codecs/vp9/vp9_dummy_impl.cc b/webrtc/modules/video_coding/codecs/vp9/vp9_dummy_impl.cc
deleted file mode 100644
index 491ccbe79c..0000000000
--- a/webrtc/modules/video_coding/codecs/vp9/vp9_dummy_impl.cc
+++ /dev/null
@@ -1,19 +0,0 @@
-/*
- * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
- *
- * Use of this source code is governed by a BSD-style license
- * that can be found in the LICENSE file in the root of the source
- * tree. An additional intellectual property rights grant can be found
- * in the file PATENTS. All contributing project authors may
- * be found in the AUTHORS file in the root of the source tree.
- *
- */
-
-// This file contains an implementation of empty webrtc VP9 encoder/decoder
-// factories so it is possible to build webrtc without linking with vp9.
-#include "webrtc/modules/video_coding/codecs/vp9/vp9_impl.h"
-
-namespace webrtc {
-VP9Encoder* VP9Encoder::Create() { return nullptr; }
-VP9Decoder* VP9Decoder::Create() { return nullptr; }
-}
diff --git a/webrtc/modules/video_coding/codecs/vp9/vp9_frame_buffer_pool.cc b/webrtc/modules/video_coding/codecs/vp9/vp9_frame_buffer_pool.cc
index bedbe68ca8..62c05d34fa 100644
--- a/webrtc/modules/video_coding/codecs/vp9/vp9_frame_buffer_pool.cc
+++ b/webrtc/modules/video_coding/codecs/vp9/vp9_frame_buffer_pool.cc
@@ -16,7 +16,7 @@
#include "vpx/vpx_frame_buffer.h"
#include "webrtc/base/checks.h"
-#include "webrtc/system_wrappers/include/logging.h"
+#include "webrtc/base/logging.h"
namespace webrtc {
diff --git a/webrtc/modules/video_coding/codecs/vp9/vp9_impl.cc b/webrtc/modules/video_coding/codecs/vp9/vp9_impl.cc
index 0ca7eeabe9..e554795519 100644
--- a/webrtc/modules/video_coding/codecs/vp9/vp9_impl.cc
+++ b/webrtc/modules/video_coding/codecs/vp9/vp9_impl.cc
@@ -21,36 +21,31 @@
#include "vpx/vp8cx.h"
#include "vpx/vp8dx.h"
-#include "webrtc/base/bind.h"
#include "webrtc/base/checks.h"
+#include "webrtc/base/keep_ref_until_done.h"
+#include "webrtc/base/logging.h"
#include "webrtc/base/trace_event.h"
#include "webrtc/common.h"
#include "webrtc/common_video/libyuv/include/webrtc_libyuv.h"
-#include "webrtc/modules/interface/module_common_types.h"
-#include "webrtc/system_wrappers/include/logging.h"
+#include "webrtc/modules/include/module_common_types.h"
+#include "webrtc/modules/video_coding/codecs/vp9/screenshare_layers.h"
#include "webrtc/system_wrappers/include/tick_util.h"
-namespace {
-
-// VP9DecoderImpl::ReturnFrame helper function used with WrappedI420Buffer.
-static void WrappedI420BufferNoLongerUsedCb(
- webrtc::Vp9FrameBufferPool::Vp9FrameBuffer* img_buffer) {
- img_buffer->Release();
-}
-
-} // anonymous namespace
-
namespace webrtc {
// Only positive speeds, range for real-time coding currently is: 5 - 8.
// Lower means slower/better quality, higher means fastest/lower quality.
int GetCpuSpeed(int width, int height) {
+#if defined(WEBRTC_ARCH_ARM) || defined(WEBRTC_ARCH_ARM64)
+ return 8;
+#else
// For smaller resolutions, use lower speed setting (get some coding gain at
// the cost of increased encoding complexity).
if (width * height <= 352 * 288)
return 5;
else
return 7;
+#endif
}
VP9Encoder* VP9Encoder::Create() {
@@ -59,7 +54,7 @@ VP9Encoder* VP9Encoder::Create() {
void VP9EncoderImpl::EncoderOutputCodedPacketCallback(vpx_codec_cx_pkt* pkt,
void* user_data) {
- VP9EncoderImpl* enc = (VP9EncoderImpl*)(user_data);
+ VP9EncoderImpl* enc = static_cast<VP9EncoderImpl*>(user_data);
enc->GetEncodedLayerFrame(pkt);
}
@@ -76,9 +71,12 @@ VP9EncoderImpl::VP9EncoderImpl()
raw_(NULL),
input_image_(NULL),
tl0_pic_idx_(0),
- gof_idx_(0),
+ frames_since_kf_(0),
num_temporal_layers_(0),
- num_spatial_layers_(0) {
+ num_spatial_layers_(0),
+ frames_encoded_(0),
+ // Use two spatial when screensharing with flexible mode.
+ spatial_layer_(new ScreenshareLayersVP9(2)) {
memset(&codec_, 0, sizeof(codec_));
uint32_t seed = static_cast<uint32_t>(TickTime::MillisecondTimestamp());
srand(seed);
@@ -90,7 +88,7 @@ VP9EncoderImpl::~VP9EncoderImpl() {
int VP9EncoderImpl::Release() {
if (encoded_image_._buffer != NULL) {
- delete [] encoded_image_._buffer;
+ delete[] encoded_image_._buffer;
encoded_image_._buffer = NULL;
}
if (encoder_ != NULL) {
@@ -112,42 +110,72 @@ int VP9EncoderImpl::Release() {
return WEBRTC_VIDEO_CODEC_OK;
}
+bool VP9EncoderImpl::ExplicitlyConfiguredSpatialLayers() const {
+ // We check target_bitrate_bps of the 0th layer to see if the spatial layers
+ // (i.e. bitrates) were explicitly configured.
+ return num_spatial_layers_ > 1 &&
+ codec_.spatialLayers[0].target_bitrate_bps > 0;
+}
+
bool VP9EncoderImpl::SetSvcRates() {
- float rate_ratio[VPX_MAX_LAYERS] = {0};
- float total = 0;
uint8_t i = 0;
- for (i = 0; i < num_spatial_layers_; ++i) {
- if (svc_internal_.svc_params.scaling_factor_num[i] <= 0 ||
- svc_internal_.svc_params.scaling_factor_den[i] <= 0) {
+ if (ExplicitlyConfiguredSpatialLayers()) {
+ if (num_temporal_layers_ > 1) {
+ LOG(LS_ERROR) << "Multiple temporal layers when manually specifying "
+ "spatial layers not implemented yet!";
return false;
}
- rate_ratio[i] = static_cast<float>(
- svc_internal_.svc_params.scaling_factor_num[i]) /
- svc_internal_.svc_params.scaling_factor_den[i];
- total += rate_ratio[i];
- }
-
- for (i = 0; i < num_spatial_layers_; ++i) {
- config_->ss_target_bitrate[i] = static_cast<unsigned int>(
- config_->rc_target_bitrate * rate_ratio[i] / total);
- if (num_temporal_layers_ == 1) {
- config_->layer_target_bitrate[i] = config_->ss_target_bitrate[i];
- } else if (num_temporal_layers_ == 2) {
- config_->layer_target_bitrate[i * num_temporal_layers_] =
- config_->ss_target_bitrate[i] * 2 / 3;
- config_->layer_target_bitrate[i * num_temporal_layers_ + 1] =
- config_->ss_target_bitrate[i];
- } else if (num_temporal_layers_ == 3) {
- config_->layer_target_bitrate[i * num_temporal_layers_] =
- config_->ss_target_bitrate[i] / 2;
- config_->layer_target_bitrate[i * num_temporal_layers_ + 1] =
- config_->layer_target_bitrate[i * num_temporal_layers_] +
- (config_->ss_target_bitrate[i] / 4);
- config_->layer_target_bitrate[i * num_temporal_layers_ + 2] =
- config_->ss_target_bitrate[i];
- } else {
- return false;
+ int total_bitrate_bps = 0;
+ for (i = 0; i < num_spatial_layers_; ++i)
+ total_bitrate_bps += codec_.spatialLayers[i].target_bitrate_bps;
+ // If total bitrate differs now from what has been specified at the
+ // beginning, update the bitrates in the same ratio as before.
+ for (i = 0; i < num_spatial_layers_; ++i) {
+ config_->ss_target_bitrate[i] = config_->layer_target_bitrate[i] =
+ static_cast<int>(static_cast<int64_t>(config_->rc_target_bitrate) *
+ codec_.spatialLayers[i].target_bitrate_bps /
+ total_bitrate_bps);
+ }
+ } else {
+ float rate_ratio[VPX_MAX_LAYERS] = {0};
+ float total = 0;
+
+ for (i = 0; i < num_spatial_layers_; ++i) {
+ if (svc_internal_.svc_params.scaling_factor_num[i] <= 0 ||
+ svc_internal_.svc_params.scaling_factor_den[i] <= 0) {
+ LOG(LS_ERROR) << "Scaling factors not specified!";
+ return false;
+ }
+ rate_ratio[i] =
+ static_cast<float>(svc_internal_.svc_params.scaling_factor_num[i]) /
+ svc_internal_.svc_params.scaling_factor_den[i];
+ total += rate_ratio[i];
+ }
+
+ for (i = 0; i < num_spatial_layers_; ++i) {
+ config_->ss_target_bitrate[i] = static_cast<unsigned int>(
+ config_->rc_target_bitrate * rate_ratio[i] / total);
+ if (num_temporal_layers_ == 1) {
+ config_->layer_target_bitrate[i] = config_->ss_target_bitrate[i];
+ } else if (num_temporal_layers_ == 2) {
+ config_->layer_target_bitrate[i * num_temporal_layers_] =
+ config_->ss_target_bitrate[i] * 2 / 3;
+ config_->layer_target_bitrate[i * num_temporal_layers_ + 1] =
+ config_->ss_target_bitrate[i];
+ } else if (num_temporal_layers_ == 3) {
+ config_->layer_target_bitrate[i * num_temporal_layers_] =
+ config_->ss_target_bitrate[i] / 2;
+ config_->layer_target_bitrate[i * num_temporal_layers_ + 1] =
+ config_->layer_target_bitrate[i * num_temporal_layers_] +
+ (config_->ss_target_bitrate[i] / 4);
+ config_->layer_target_bitrate[i * num_temporal_layers_ + 2] =
+ config_->ss_target_bitrate[i];
+ } else {
+ LOG(LS_ERROR) << "Unsupported number of temporal layers: "
+ << num_temporal_layers_;
+ return false;
+ }
}
}
@@ -178,6 +206,7 @@ int VP9EncoderImpl::SetRates(uint32_t new_bitrate_kbit,
}
config_->rc_target_bitrate = new_bitrate_kbit;
codec_.maxFramerate = new_framerate;
+ spatial_layer_->ConfigureBitrate(new_bitrate_kbit, 0);
if (!SetSvcRates()) {
return WEBRTC_VIDEO_CODEC_ERR_PARAMETER;
@@ -216,6 +245,7 @@ int VP9EncoderImpl::InitEncode(const VideoCodec* inst,
if (inst->codecSpecific.VP9.numberOfSpatialLayers > 2) {
return WEBRTC_VIDEO_CODEC_ERR_PARAMETER;
}
+
int retVal = Release();
if (retVal < 0) {
return retVal;
@@ -237,10 +267,10 @@ int VP9EncoderImpl::InitEncode(const VideoCodec* inst,
num_temporal_layers_ = 1;
// Random start 16 bits is enough.
- picture_id_ = static_cast<uint16_t>(rand()) & 0x7FFF;
+ picture_id_ = static_cast<uint16_t>(rand()) & 0x7FFF; // NOLINT
// Allocate memory for encoded image
if (encoded_image_._buffer != NULL) {
- delete [] encoded_image_._buffer;
+ delete[] encoded_image_._buffer;
}
encoded_image_._size = CalcBufferSize(kI420, codec_.width, codec_.height);
encoded_image_._buffer = new uint8_t[encoded_image_._size];
@@ -248,8 +278,8 @@ int VP9EncoderImpl::InitEncode(const VideoCodec* inst,
// Creating a wrapper to the image - setting image data to NULL. Actual
// pointer will be set in encode. Setting align to 1, as it is meaningless
// (actual memory is not allocated).
- raw_ = vpx_img_wrap(NULL, VPX_IMG_FMT_I420, codec_.width, codec_.height,
- 1, NULL);
+ raw_ = vpx_img_wrap(NULL, VPX_IMG_FMT_I420, codec_.width, codec_.height, 1,
+ NULL);
// Populate encoder configuration with default values.
if (vpx_codec_enc_config_default(vpx_codec_vp9_cx(), config_, 0)) {
return WEBRTC_VIDEO_CODEC_ERROR;
@@ -264,8 +294,8 @@ int VP9EncoderImpl::InitEncode(const VideoCodec* inst,
config_->g_lag_in_frames = 0; // 0- no frame lagging
config_->g_threads = 1;
// Rate control settings.
- config_->rc_dropframe_thresh = inst->codecSpecific.VP9.frameDroppingOn ?
- 30 : 0;
+ config_->rc_dropframe_thresh =
+ inst->codecSpecific.VP9.frameDroppingOn ? 30 : 0;
config_->rc_end_usage = VPX_CBR;
config_->g_pass = VPX_RC_ONE_PASS;
config_->rc_min_quantizer = 2;
@@ -277,24 +307,32 @@ int VP9EncoderImpl::InitEncode(const VideoCodec* inst,
config_->rc_buf_sz = 1000;
// Set the maximum target size of any key-frame.
rc_max_intra_target_ = MaxIntraTarget(config_->rc_buf_optimal_sz);
- if (inst->codecSpecific.VP9.keyFrameInterval > 0) {
+ if (inst->codecSpecific.VP9.keyFrameInterval > 0) {
config_->kf_mode = VPX_KF_AUTO;
config_->kf_max_dist = inst->codecSpecific.VP9.keyFrameInterval;
+ // Needs to be set (in svc mode) to get correct periodic key frame interval
+ // (will have no effect in non-svc).
+ config_->kf_min_dist = config_->kf_max_dist;
} else {
config_->kf_mode = VPX_KF_DISABLED;
}
- config_->rc_resize_allowed = inst->codecSpecific.VP9.automaticResizeOn ?
- 1 : 0;
+ config_->rc_resize_allowed =
+ inst->codecSpecific.VP9.automaticResizeOn ? 1 : 0;
// Determine number of threads based on the image size and #cores.
- config_->g_threads = NumberOfThreads(config_->g_w,
- config_->g_h,
- number_of_cores);
+ config_->g_threads =
+ NumberOfThreads(config_->g_w, config_->g_h, number_of_cores);
cpu_speed_ = GetCpuSpeed(config_->g_w, config_->g_h);
// TODO(asapersson): Check configuration of temporal switch up and increase
// pattern length.
- if (num_temporal_layers_ == 1) {
+ is_flexible_mode_ = inst->codecSpecific.VP9.flexibleMode;
+ if (is_flexible_mode_) {
+ config_->temporal_layering_mode = VP9E_TEMPORAL_LAYERING_MODE_BYPASS;
+ config_->ts_number_layers = num_temporal_layers_;
+ if (codec_.mode == kScreensharing)
+ spatial_layer_->ConfigureBitrate(inst->startBitrate, 0);
+ } else if (num_temporal_layers_ == 1) {
gof_.SetGofInfoVP9(kTemporalStructureMode1);
config_->temporal_layering_mode = VP9E_TEMPORAL_LAYERING_MODE_NOLAYERING;
config_->ts_number_layers = 1;
@@ -326,7 +364,7 @@ int VP9EncoderImpl::InitEncode(const VideoCodec* inst,
return WEBRTC_VIDEO_CODEC_ERR_PARAMETER;
}
- tl0_pic_idx_ = static_cast<uint8_t>(rand());
+ tl0_pic_idx_ = static_cast<uint8_t>(rand()); // NOLINT
return InitAndSetControlSettings(inst);
}
@@ -347,16 +385,28 @@ int VP9EncoderImpl::NumberOfThreads(int width,
}
int VP9EncoderImpl::InitAndSetControlSettings(const VideoCodec* inst) {
- config_->ss_number_layers = num_spatial_layers_;
-
- int scaling_factor_num = 256;
- for (int i = num_spatial_layers_ - 1; i >= 0; --i) {
+ // Set QP-min/max per spatial and temporal layer.
+ int tot_num_layers = num_spatial_layers_ * num_temporal_layers_;
+ for (int i = 0; i < tot_num_layers; ++i) {
svc_internal_.svc_params.max_quantizers[i] = config_->rc_max_quantizer;
svc_internal_.svc_params.min_quantizers[i] = config_->rc_min_quantizer;
- // 1:2 scaling in each dimension.
- svc_internal_.svc_params.scaling_factor_num[i] = scaling_factor_num;
- svc_internal_.svc_params.scaling_factor_den[i] = 256;
- scaling_factor_num /= 2;
+ }
+ config_->ss_number_layers = num_spatial_layers_;
+ if (ExplicitlyConfiguredSpatialLayers()) {
+ for (int i = 0; i < num_spatial_layers_; ++i) {
+ const auto& layer = codec_.spatialLayers[i];
+ svc_internal_.svc_params.scaling_factor_num[i] = layer.scaling_factor_num;
+ svc_internal_.svc_params.scaling_factor_den[i] = layer.scaling_factor_den;
+ }
+ } else {
+ int scaling_factor_num = 256;
+ for (int i = num_spatial_layers_ - 1; i >= 0; --i) {
+ // 1:2 scaling in each dimension.
+ svc_internal_.svc_params.scaling_factor_num[i] = scaling_factor_num;
+ svc_internal_.svc_params.scaling_factor_den[i] = 256;
+ if (codec_.mode != kScreensharing)
+ scaling_factor_num /= 2;
+ }
}
if (!SetSvcRates()) {
@@ -381,8 +431,10 @@ int VP9EncoderImpl::InitAndSetControlSettings(const VideoCodec* inst) {
}
// Register callback for getting each spatial layer.
vpx_codec_priv_output_cx_pkt_cb_pair_t cbp = {
- VP9EncoderImpl::EncoderOutputCodedPacketCallback, (void*)(this)};
- vpx_codec_control(encoder_, VP9E_REGISTER_CX_CALLBACK, (void*)(&cbp));
+ VP9EncoderImpl::EncoderOutputCodedPacketCallback,
+ reinterpret_cast<void*>(this)};
+ vpx_codec_control(encoder_, VP9E_REGISTER_CX_CALLBACK,
+ reinterpret_cast<void*>(&cbp));
// Control function to set the number of column tiles in encoding a frame, in
// log2 unit: e.g., 0 = 1 tile column, 1 = 2 tile columns, 2 = 4 tile columns.
@@ -417,7 +469,7 @@ uint32_t VP9EncoderImpl::MaxIntraTarget(uint32_t optimal_buffer_size) {
optimal_buffer_size * scale_par * codec_.maxFramerate / 10;
// Don't go below 3 times the per frame bandwidth.
const uint32_t min_intra_size = 300;
- return (target_pct < min_intra_size) ? min_intra_size: target_pct;
+ return (target_pct < min_intra_size) ? min_intra_size : target_pct;
}
int VP9EncoderImpl::Encode(const VideoFrame& input_image,
@@ -455,12 +507,35 @@ int VP9EncoderImpl::Encode(const VideoFrame& input_image,
raw_->stride[VPX_PLANE_U] = input_image.stride(kUPlane);
raw_->stride[VPX_PLANE_V] = input_image.stride(kVPlane);
- int flags = 0;
+ vpx_enc_frame_flags_t flags = 0;
bool send_keyframe = (frame_type == kVideoFrameKey);
if (send_keyframe) {
// Key frame request from caller.
flags = VPX_EFLAG_FORCE_KF;
}
+
+ if (is_flexible_mode_) {
+ SuperFrameRefSettings settings;
+
+ // These structs are copied when calling vpx_codec_control,
+ // therefore it is ok for them to go out of scope.
+ vpx_svc_ref_frame_config enc_layer_conf;
+ vpx_svc_layer_id layer_id;
+
+ if (codec_.mode == kRealtimeVideo) {
+ // Real time video not yet implemented in flexible mode.
+ RTC_NOTREACHED();
+ } else {
+ settings = spatial_layer_->GetSuperFrameSettings(input_image.timestamp(),
+ send_keyframe);
+ }
+ enc_layer_conf = GenerateRefsAndFlags(settings);
+ layer_id.temporal_layer_id = 0;
+ layer_id.spatial_layer_id = settings.start_layer;
+ vpx_codec_control(encoder_, VP9E_SET_SVC_LAYER_ID, &layer_id);
+ vpx_codec_control(encoder_, VP9E_SET_SVC_REF_FRAME_CONFIG, &enc_layer_conf);
+ }
+
assert(codec_.maxFramerate > 0);
uint32_t duration = 90000 / codec_.maxFramerate;
if (vpx_codec_encode(encoder_, raw_, timestamp_, duration, flags,
@@ -473,12 +548,12 @@ int VP9EncoderImpl::Encode(const VideoFrame& input_image,
}
void VP9EncoderImpl::PopulateCodecSpecific(CodecSpecificInfo* codec_specific,
- const vpx_codec_cx_pkt& pkt,
- uint32_t timestamp) {
+ const vpx_codec_cx_pkt& pkt,
+ uint32_t timestamp) {
assert(codec_specific != NULL);
codec_specific->codecType = kVideoCodecVP9;
- CodecSpecificInfoVP9 *vp9_info = &(codec_specific->codecSpecific.VP9);
- // TODO(asapersson): Set correct values.
+ CodecSpecificInfoVP9* vp9_info = &(codec_specific->codecSpecific.VP9);
+ // TODO(asapersson): Set correct value.
vp9_info->inter_pic_predicted =
(pkt.data.frame.flags & VPX_FRAME_IS_KEY) ? false : true;
vp9_info->flexible_mode = codec_.codecSpecific.VP9.flexibleMode;
@@ -486,9 +561,6 @@ void VP9EncoderImpl::PopulateCodecSpecific(CodecSpecificInfo* codec_specific,
!codec_.codecSpecific.VP9.flexibleMode)
? true
: false;
- if (pkt.data.frame.flags & VPX_FRAME_IS_KEY) {
- gof_idx_ = 0;
- }
vpx_svc_layer_id_t layer_id = {0};
vpx_codec_control(encoder_, VP9E_GET_SVC_LAYER_ID, &layer_id);
@@ -511,25 +583,31 @@ void VP9EncoderImpl::PopulateCodecSpecific(CodecSpecificInfo* codec_specific,
vp9_info->ss_data_available = false;
}
- if (vp9_info->flexible_mode) {
- vp9_info->gof_idx = kNoGofIdx;
+ // TODO(asapersson): this info has to be obtained from the encoder.
+ vp9_info->temporal_up_switch = false;
+
+ bool is_first_frame = false;
+ if (is_flexible_mode_) {
+ is_first_frame =
+ layer_id.spatial_layer_id == spatial_layer_->GetStartLayer();
} else {
- vp9_info->gof_idx =
- static_cast<uint8_t>(gof_idx_++ % gof_.num_frames_in_gof);
+ is_first_frame = layer_id.spatial_layer_id == 0;
}
- // TODO(asapersson): this info has to be obtained from the encoder.
- vp9_info->temporal_up_switch = true;
-
- if (layer_id.spatial_layer_id == 0) {
+ if (is_first_frame) {
picture_id_ = (picture_id_ + 1) & 0x7FFF;
// TODO(asapersson): this info has to be obtained from the encoder.
vp9_info->inter_layer_predicted = false;
+ ++frames_since_kf_;
} else {
// TODO(asapersson): this info has to be obtained from the encoder.
vp9_info->inter_layer_predicted = true;
}
+ if (pkt.data.frame.flags & VPX_FRAME_IS_KEY) {
+ frames_since_kf_ = 0;
+ }
+
vp9_info->picture_id = picture_id_;
if (!vp9_info->flexible_mode) {
@@ -542,6 +620,20 @@ void VP9EncoderImpl::PopulateCodecSpecific(CodecSpecificInfo* codec_specific,
// Always populate this, so that the packetizer can properly set the marker
// bit.
vp9_info->num_spatial_layers = num_spatial_layers_;
+
+ vp9_info->num_ref_pics = 0;
+ if (vp9_info->flexible_mode) {
+ vp9_info->gof_idx = kNoGofIdx;
+ vp9_info->num_ref_pics = num_ref_pics_[layer_id.spatial_layer_id];
+ for (int i = 0; i < num_ref_pics_[layer_id.spatial_layer_id]; ++i) {
+ vp9_info->p_diff[i] = p_diff_[layer_id.spatial_layer_id][i];
+ }
+ } else {
+ vp9_info->gof_idx =
+ static_cast<uint8_t>(frames_since_kf_ % gof_.num_frames_in_gof);
+ vp9_info->temporal_up_switch = gof_.temporal_up_switch[vp9_info->gof_idx];
+ }
+
if (vp9_info->ss_data_available) {
vp9_info->spatial_layer_resolution_present = true;
for (size_t i = 0; i < vp9_info->num_spatial_layers; ++i) {
@@ -577,6 +669,14 @@ int VP9EncoderImpl::GetEncodedLayerFrame(const vpx_codec_cx_pkt* pkt) {
frag_info.fragmentationPlType[part_idx] = 0;
frag_info.fragmentationTimeDiff[part_idx] = 0;
encoded_image_._length += static_cast<uint32_t>(pkt->data.frame.sz);
+
+ vpx_svc_layer_id_t layer_id = {0};
+ vpx_codec_control(encoder_, VP9E_GET_SVC_LAYER_ID, &layer_id);
+ if (is_flexible_mode_ && codec_.mode == kScreensharing)
+ spatial_layer_->LayerFrameEncoded(
+ static_cast<unsigned int>(encoded_image_._length),
+ layer_id.spatial_layer_id);
+
assert(encoded_image_._length <= encoded_image_._size);
// End of frame.
@@ -598,6 +698,108 @@ int VP9EncoderImpl::GetEncodedLayerFrame(const vpx_codec_cx_pkt* pkt) {
return WEBRTC_VIDEO_CODEC_OK;
}
+vpx_svc_ref_frame_config VP9EncoderImpl::GenerateRefsAndFlags(
+ const SuperFrameRefSettings& settings) {
+ static const vpx_enc_frame_flags_t kAllFlags =
+ VP8_EFLAG_NO_REF_ARF | VP8_EFLAG_NO_REF_GF | VP8_EFLAG_NO_REF_LAST |
+ VP8_EFLAG_NO_UPD_LAST | VP8_EFLAG_NO_UPD_ARF | VP8_EFLAG_NO_UPD_GF;
+ vpx_svc_ref_frame_config sf_conf = {};
+ if (settings.is_keyframe) {
+ // Used later on to make sure we don't make any invalid references.
+ memset(buffer_updated_at_frame_, -1, sizeof(buffer_updated_at_frame_));
+ for (int layer = settings.start_layer; layer <= settings.stop_layer;
+ ++layer) {
+ num_ref_pics_[layer] = 0;
+ buffer_updated_at_frame_[settings.layer[layer].upd_buf] = frames_encoded_;
+ // When encoding a keyframe only the alt_fb_idx is used
+ // to specify which layer ends up in which buffer.
+ sf_conf.alt_fb_idx[layer] = settings.layer[layer].upd_buf;
+ }
+ } else {
+ for (int layer_idx = settings.start_layer; layer_idx <= settings.stop_layer;
+ ++layer_idx) {
+ vpx_enc_frame_flags_t layer_flags = kAllFlags;
+ num_ref_pics_[layer_idx] = 0;
+ int8_t refs[3] = {settings.layer[layer_idx].ref_buf1,
+ settings.layer[layer_idx].ref_buf2,
+ settings.layer[layer_idx].ref_buf3};
+
+ for (unsigned int ref_idx = 0; ref_idx < kMaxVp9RefPics; ++ref_idx) {
+ if (refs[ref_idx] == -1)
+ continue;
+
+ RTC_DCHECK_GE(refs[ref_idx], 0);
+ RTC_DCHECK_LE(refs[ref_idx], 7);
+ // Easier to remove flags from all flags rather than having to
+ // build the flags from 0.
+ switch (num_ref_pics_[layer_idx]) {
+ case 0: {
+ sf_conf.lst_fb_idx[layer_idx] = refs[ref_idx];
+ layer_flags &= ~VP8_EFLAG_NO_REF_LAST;
+ break;
+ }
+ case 1: {
+ sf_conf.gld_fb_idx[layer_idx] = refs[ref_idx];
+ layer_flags &= ~VP8_EFLAG_NO_REF_GF;
+ break;
+ }
+ case 2: {
+ sf_conf.alt_fb_idx[layer_idx] = refs[ref_idx];
+ layer_flags &= ~VP8_EFLAG_NO_REF_ARF;
+ break;
+ }
+ }
+ // Make sure we don't reference a buffer that hasn't been
+ // used at all or hasn't been used since a keyframe.
+ RTC_DCHECK_NE(buffer_updated_at_frame_[refs[ref_idx]], -1);
+
+ p_diff_[layer_idx][num_ref_pics_[layer_idx]] =
+ frames_encoded_ - buffer_updated_at_frame_[refs[ref_idx]];
+ num_ref_pics_[layer_idx]++;
+ }
+
+ bool upd_buf_same_as_a_ref = false;
+ if (settings.layer[layer_idx].upd_buf != -1) {
+ for (unsigned int ref_idx = 0; ref_idx < kMaxVp9RefPics; ++ref_idx) {
+ if (settings.layer[layer_idx].upd_buf == refs[ref_idx]) {
+ switch (ref_idx) {
+ case 0: {
+ layer_flags &= ~VP8_EFLAG_NO_UPD_LAST;
+ break;
+ }
+ case 1: {
+ layer_flags &= ~VP8_EFLAG_NO_UPD_GF;
+ break;
+ }
+ case 2: {
+ layer_flags &= ~VP8_EFLAG_NO_UPD_ARF;
+ break;
+ }
+ }
+ upd_buf_same_as_a_ref = true;
+ break;
+ }
+ }
+ if (!upd_buf_same_as_a_ref) {
+ // If we have three references and a buffer is specified to be
+ // updated, then that buffer must be the same as one of the
+ // three references.
+ RTC_CHECK_LT(num_ref_pics_[layer_idx], kMaxVp9RefPics);
+
+ sf_conf.alt_fb_idx[layer_idx] = settings.layer[layer_idx].upd_buf;
+ layer_flags ^= VP8_EFLAG_NO_UPD_ARF;
+ }
+
+ int updated_buffer = settings.layer[layer_idx].upd_buf;
+ buffer_updated_at_frame_[updated_buffer] = frames_encoded_;
+ sf_conf.frame_flags[layer_idx] = layer_flags;
+ }
+ }
+ }
+ ++frames_encoded_;
+ return sf_conf;
+}
+
int VP9EncoderImpl::SetChannelParameters(uint32_t packet_loss, int64_t rtt) {
return WEBRTC_VIDEO_CODEC_OK;
}
@@ -608,6 +810,10 @@ int VP9EncoderImpl::RegisterEncodeCompleteCallback(
return WEBRTC_VIDEO_CODEC_OK;
}
+const char* VP9EncoderImpl::ImplementationName() const {
+ return "libvpx";
+}
+
VP9Decoder* VP9Decoder::Create() {
return new VP9DecoderImpl();
}
@@ -652,7 +858,7 @@ int VP9DecoderImpl::InitDecode(const VideoCodec* inst, int number_of_cores) {
if (decoder_ == NULL) {
decoder_ = new vpx_codec_ctx_t;
}
- vpx_codec_dec_cfg_t cfg;
+ vpx_codec_dec_cfg_t cfg;
// Setting number of threads to a constant value (1)
cfg.threads = 1;
cfg.h = cfg.w = 0; // set after decode
@@ -705,10 +911,8 @@ int VP9DecoderImpl::Decode(const EncodedImage& input_image,
}
// During decode libvpx may get and release buffers from |frame_buffer_pool_|.
// In practice libvpx keeps a few (~3-4) buffers alive at a time.
- if (vpx_codec_decode(decoder_,
- buffer,
- static_cast<unsigned int>(input_image._length),
- 0,
+ if (vpx_codec_decode(decoder_, buffer,
+ static_cast<unsigned int>(input_image._length), 0,
VPX_DL_REALTIME)) {
return WEBRTC_VIDEO_CODEC_ERROR;
}
@@ -730,24 +934,22 @@ int VP9DecoderImpl::ReturnFrame(const vpx_image_t* img, uint32_t timestamp) {
}
// This buffer contains all of |img|'s image data, a reference counted
- // Vp9FrameBuffer. Performing AddRef/Release ensures it is not released and
- // recycled during use (libvpx is done with the buffers after a few
+ // Vp9FrameBuffer. (libvpx is done with the buffers after a few
// vpx_codec_decode calls or vpx_codec_destroy).
Vp9FrameBufferPool::Vp9FrameBuffer* img_buffer =
static_cast<Vp9FrameBufferPool::Vp9FrameBuffer*>(img->fb_priv);
- img_buffer->AddRef();
// The buffer can be used directly by the VideoFrame (without copy) by
// using a WrappedI420Buffer.
rtc::scoped_refptr<WrappedI420Buffer> img_wrapped_buffer(
new rtc::RefCountedObject<webrtc::WrappedI420Buffer>(
- img->d_w, img->d_h,
- img->planes[VPX_PLANE_Y], img->stride[VPX_PLANE_Y],
- img->planes[VPX_PLANE_U], img->stride[VPX_PLANE_U],
- img->planes[VPX_PLANE_V], img->stride[VPX_PLANE_V],
+ img->d_w, img->d_h, img->planes[VPX_PLANE_Y],
+ img->stride[VPX_PLANE_Y], img->planes[VPX_PLANE_U],
+ img->stride[VPX_PLANE_U], img->planes[VPX_PLANE_V],
+ img->stride[VPX_PLANE_V],
// WrappedI420Buffer's mechanism for allowing the release of its frame
// buffer is through a callback function. This is where we should
// release |img_buffer|.
- rtc::Bind(&WrappedI420BufferNoLongerUsedCb, img_buffer)));
+ rtc::KeepRefUntilDone(img_buffer)));
VideoFrame decoded_image;
decoded_image.set_video_frame_buffer(img_wrapped_buffer);
@@ -781,4 +983,9 @@ int VP9DecoderImpl::Release() {
inited_ = false;
return WEBRTC_VIDEO_CODEC_OK;
}
+
+const char* VP9DecoderImpl::ImplementationName() const {
+ return "libvpx";
+}
+
} // namespace webrtc
diff --git a/webrtc/modules/video_coding/codecs/vp9/vp9_impl.h b/webrtc/modules/video_coding/codecs/vp9/vp9_impl.h
index f9c123079e..bfa4540304 100644
--- a/webrtc/modules/video_coding/codecs/vp9/vp9_impl.h
+++ b/webrtc/modules/video_coding/codecs/vp9/vp9_impl.h
@@ -9,8 +9,10 @@
*
*/
-#ifndef WEBRTC_MODULES_VIDEO_CODING_CODECS_VP9_IMPL_H_
-#define WEBRTC_MODULES_VIDEO_CODING_CODECS_VP9_IMPL_H_
+#ifndef WEBRTC_MODULES_VIDEO_CODING_CODECS_VP9_VP9_IMPL_H_
+#define WEBRTC_MODULES_VIDEO_CODING_CODECS_VP9_VP9_IMPL_H_
+
+#include <vector>
#include "webrtc/modules/video_coding/codecs/vp9/include/vp9.h"
#include "webrtc/modules/video_coding/codecs/vp9/vp9_frame_buffer_pool.h"
@@ -21,6 +23,8 @@
namespace webrtc {
+class ScreenshareLayersVP9;
+
class VP9EncoderImpl : public VP9Encoder {
public:
VP9EncoderImpl();
@@ -45,6 +49,22 @@ class VP9EncoderImpl : public VP9Encoder {
void OnDroppedFrame() override {}
+ const char* ImplementationName() const override;
+
+ struct LayerFrameRefSettings {
+ int8_t upd_buf = -1; // -1 - no update, 0..7 - update buffer 0..7
+ int8_t ref_buf1 = -1; // -1 - no reference, 0..7 - reference buffer 0..7
+ int8_t ref_buf2 = -1; // -1 - no reference, 0..7 - reference buffer 0..7
+ int8_t ref_buf3 = -1; // -1 - no reference, 0..7 - reference buffer 0..7
+ };
+
+ struct SuperFrameRefSettings {
+ LayerFrameRefSettings layer[kMaxVp9NumberOfSpatialLayers];
+ uint8_t start_layer = 0; // The first spatial layer to be encoded.
+ uint8_t stop_layer = 0; // The last spatial layer to be encoded.
+ bool is_keyframe = false;
+ };
+
private:
// Determine number of encoder threads to use.
int NumberOfThreads(int width, int height, int number_of_cores);
@@ -56,8 +76,18 @@ class VP9EncoderImpl : public VP9Encoder {
const vpx_codec_cx_pkt& pkt,
uint32_t timestamp);
+ bool ExplicitlyConfiguredSpatialLayers() const;
bool SetSvcRates();
+ // Used for flexible mode to set the flags and buffer references used
+ // by the encoder. Also calculates the references used by the RTP
+ // packetizer.
+ //
+ // Has to be called for every frame (keyframes included) to update the
+ // state used to calculate references.
+ vpx_svc_ref_frame_config GenerateRefsAndFlags(
+ const SuperFrameRefSettings& settings);
+
virtual int GetEncodedLayerFrame(const vpx_codec_cx_pkt* pkt);
// Callback function for outputting packets per spatial layer.
@@ -88,11 +118,18 @@ class VP9EncoderImpl : public VP9Encoder {
GofInfoVP9 gof_; // Contains each frame's temporal information for
// non-flexible mode.
uint8_t tl0_pic_idx_; // Only used in non-flexible mode.
- size_t gof_idx_; // Only used in non-flexible mode.
+ size_t frames_since_kf_;
uint8_t num_temporal_layers_;
uint8_t num_spatial_layers_;
-};
+ // Used for flexible mode.
+ bool is_flexible_mode_;
+ int64_t buffer_updated_at_frame_[kNumVp9Buffers];
+ int64_t frames_encoded_;
+ uint8_t num_ref_pics_[kMaxVp9NumberOfSpatialLayers];
+ uint8_t p_diff_[kMaxVp9NumberOfSpatialLayers][kMaxVp9RefPics];
+ rtc::scoped_ptr<ScreenshareLayersVP9> spatial_layer_;
+};
class VP9DecoderImpl : public VP9Decoder {
public:
@@ -114,6 +151,8 @@ class VP9DecoderImpl : public VP9Decoder {
int Reset() override;
+ const char* ImplementationName() const override;
+
private:
int ReturnFrame(const vpx_image_t* img, uint32_t timeStamp);
@@ -127,4 +166,4 @@ class VP9DecoderImpl : public VP9Decoder {
};
} // namespace webrtc
-#endif // WEBRTC_MODULES_VIDEO_CODING_CODECS_VP9_IMPL_H_
+#endif // WEBRTC_MODULES_VIDEO_CODING_CODECS_VP9_VP9_IMPL_H_
diff --git a/webrtc/modules/video_coding/main/source/content_metrics_processing.cc b/webrtc/modules/video_coding/content_metrics_processing.cc
index 757ffb0e46..0c3a6dbc6c 100644
--- a/webrtc/modules/video_coding/main/source/content_metrics_processing.cc
+++ b/webrtc/modules/video_coding/content_metrics_processing.cc
@@ -8,12 +8,12 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#include "webrtc/modules/video_coding/main/source/content_metrics_processing.h"
+#include "webrtc/modules/video_coding/content_metrics_processing.h"
#include <math.h>
-#include "webrtc/modules/interface/module_common_types.h"
-#include "webrtc/modules/video_coding/main/interface/video_coding_defines.h"
+#include "webrtc/modules/include/module_common_types.h"
+#include "webrtc/modules/video_coding/include/video_coding_defines.h"
namespace webrtc {
//////////////////////////////////
@@ -38,15 +38,15 @@ int VCMContentMetricsProcessing::Reset() {
recursive_avg_->Reset();
uniform_avg_->Reset();
frame_cnt_uniform_avg_ = 0;
- avg_motion_level_ = 0.0f;
+ avg_motion_level_ = 0.0f;
avg_spatial_level_ = 0.0f;
return VCM_OK;
}
void VCMContentMetricsProcessing::UpdateFrameRate(uint32_t frameRate) {
// Update factor for recursive averaging.
- recursive_avg_factor_ = static_cast<float> (1000.0f) /
- static_cast<float>(frameRate * kQmMinIntervalMs);
+ recursive_avg_factor_ = static_cast<float>(1000.0f) /
+ static_cast<float>(frameRate * kQmMinIntervalMs);
}
VideoContentMetrics* VCMContentMetricsProcessing::LongTermAvgData() {
@@ -58,10 +58,10 @@ VideoContentMetrics* VCMContentMetricsProcessing::ShortTermAvgData() {
return NULL;
}
// Two metrics are used: motion and spatial level.
- uniform_avg_->motion_magnitude = avg_motion_level_ /
- static_cast<float>(frame_cnt_uniform_avg_);
- uniform_avg_->spatial_pred_err = avg_spatial_level_ /
- static_cast<float>(frame_cnt_uniform_avg_);
+ uniform_avg_->motion_magnitude =
+ avg_motion_level_ / static_cast<float>(frame_cnt_uniform_avg_);
+ uniform_avg_->spatial_pred_err =
+ avg_spatial_level_ / static_cast<float>(frame_cnt_uniform_avg_);
return uniform_avg_;
}
@@ -73,7 +73,7 @@ void VCMContentMetricsProcessing::ResetShortTermAvgData() {
}
int VCMContentMetricsProcessing::UpdateContentData(
- const VideoContentMetrics *contentMetrics) {
+ const VideoContentMetrics* contentMetrics) {
if (contentMetrics == NULL) {
return VCM_OK;
}
@@ -81,7 +81,7 @@ int VCMContentMetricsProcessing::UpdateContentData(
}
int VCMContentMetricsProcessing::ProcessContent(
- const VideoContentMetrics *contentMetrics) {
+ const VideoContentMetrics* contentMetrics) {
// Update the recursive averaged metrics: average is over longer window
// of time: over QmMinIntervalMs ms.
UpdateRecursiveAvg(contentMetrics);
@@ -92,34 +92,33 @@ int VCMContentMetricsProcessing::ProcessContent(
}
void VCMContentMetricsProcessing::UpdateUniformAvg(
- const VideoContentMetrics *contentMetrics) {
+ const VideoContentMetrics* contentMetrics) {
// Update frame counter.
frame_cnt_uniform_avg_ += 1;
// Update averaged metrics: motion and spatial level are used.
avg_motion_level_ += contentMetrics->motion_magnitude;
- avg_spatial_level_ += contentMetrics->spatial_pred_err;
+ avg_spatial_level_ += contentMetrics->spatial_pred_err;
return;
}
void VCMContentMetricsProcessing::UpdateRecursiveAvg(
- const VideoContentMetrics *contentMetrics) {
-
+ const VideoContentMetrics* contentMetrics) {
// Spatial metrics: 2x2, 1x2(H), 2x1(V).
- recursive_avg_->spatial_pred_err = (1 - recursive_avg_factor_) *
- recursive_avg_->spatial_pred_err +
+ recursive_avg_->spatial_pred_err =
+ (1 - recursive_avg_factor_) * recursive_avg_->spatial_pred_err +
recursive_avg_factor_ * contentMetrics->spatial_pred_err;
- recursive_avg_->spatial_pred_err_h = (1 - recursive_avg_factor_) *
- recursive_avg_->spatial_pred_err_h +
+ recursive_avg_->spatial_pred_err_h =
+ (1 - recursive_avg_factor_) * recursive_avg_->spatial_pred_err_h +
recursive_avg_factor_ * contentMetrics->spatial_pred_err_h;
- recursive_avg_->spatial_pred_err_v = (1 - recursive_avg_factor_) *
- recursive_avg_->spatial_pred_err_v +
+ recursive_avg_->spatial_pred_err_v =
+ (1 - recursive_avg_factor_) * recursive_avg_->spatial_pred_err_v +
recursive_avg_factor_ * contentMetrics->spatial_pred_err_v;
// Motion metric: Derived from NFD (normalized frame difference).
- recursive_avg_->motion_magnitude = (1 - recursive_avg_factor_) *
- recursive_avg_->motion_magnitude +
+ recursive_avg_->motion_magnitude =
+ (1 - recursive_avg_factor_) * recursive_avg_->motion_magnitude +
recursive_avg_factor_ * contentMetrics->motion_magnitude;
}
-} // namespace
+} // namespace webrtc
diff --git a/webrtc/modules/video_coding/main/source/content_metrics_processing.h b/webrtc/modules/video_coding/content_metrics_processing.h
index 3517f757d4..3f67ec19c9 100644
--- a/webrtc/modules/video_coding/main/source/content_metrics_processing.h
+++ b/webrtc/modules/video_coding/content_metrics_processing.h
@@ -18,14 +18,10 @@ namespace webrtc {
struct VideoContentMetrics;
// QM interval time (in ms)
-enum {
- kQmMinIntervalMs = 10000
-};
+enum { kQmMinIntervalMs = 10000 };
// Flag for NFD metric vs motion metric
-enum {
- kNfdMetric = 1
-};
+enum { kNfdMetric = 1 };
/**********************************/
/* Content Metrics Processing */
@@ -36,7 +32,7 @@ class VCMContentMetricsProcessing {
~VCMContentMetricsProcessing();
// Update class with latest metrics.
- int UpdateContentData(const VideoContentMetrics *contentMetrics);
+ int UpdateContentData(const VideoContentMetrics* contentMetrics);
// Reset the short-term averaged content data.
void ResetShortTermAvgData();
@@ -57,13 +53,13 @@ class VCMContentMetricsProcessing {
private:
// Compute working average.
- int ProcessContent(const VideoContentMetrics *contentMetrics);
+ int ProcessContent(const VideoContentMetrics* contentMetrics);
// Update the recursive averaged metrics: longer time average (~5/10 secs).
- void UpdateRecursiveAvg(const VideoContentMetrics *contentMetrics);
+ void UpdateRecursiveAvg(const VideoContentMetrics* contentMetrics);
// Update the uniform averaged metrics: shorter time average (~RTCP report).
- void UpdateUniformAvg(const VideoContentMetrics *contentMetrics);
+ void UpdateUniformAvg(const VideoContentMetrics* contentMetrics);
VideoContentMetrics* recursive_avg_;
VideoContentMetrics* uniform_avg_;
diff --git a/webrtc/modules/video_coding/main/source/decoding_state.cc b/webrtc/modules/video_coding/decoding_state.cc
index cc92f1c83f..89be9b66c1 100644
--- a/webrtc/modules/video_coding/main/source/decoding_state.cc
+++ b/webrtc/modules/video_coding/decoding_state.cc
@@ -8,12 +8,12 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#include "webrtc/modules/video_coding/main/source/decoding_state.h"
+#include "webrtc/modules/video_coding/decoding_state.h"
-#include "webrtc/modules/interface/module_common_types.h"
-#include "webrtc/modules/video_coding/main/source/frame_buffer.h"
-#include "webrtc/modules/video_coding/main/source/jitter_buffer_common.h"
-#include "webrtc/modules/video_coding/main/source/packet.h"
+#include "webrtc/modules/include/module_common_types.h"
+#include "webrtc/modules/video_coding/frame_buffer.h"
+#include "webrtc/modules/video_coding/jitter_buffer_common.h"
+#include "webrtc/modules/video_coding/packet.h"
namespace webrtc {
@@ -24,7 +24,9 @@ VCMDecodingState::VCMDecodingState()
temporal_id_(kNoTemporalIdx),
tl0_pic_id_(kNoTl0PicIdx),
full_sync_(true),
- in_initial_state_(true) {}
+ in_initial_state_(true) {
+ memset(frame_decoded_, 0, sizeof(frame_decoded_));
+}
VCMDecodingState::~VCMDecodingState() {}
@@ -37,6 +39,7 @@ void VCMDecodingState::Reset() {
tl0_pic_id_ = kNoTl0PicIdx;
full_sync_ = true;
in_initial_state_ = true;
+ memset(frame_decoded_, 0, sizeof(frame_decoded_));
}
uint32_t VCMDecodingState::time_stamp() const {
@@ -63,12 +66,33 @@ bool VCMDecodingState::IsOldPacket(const VCMPacket* packet) const {
void VCMDecodingState::SetState(const VCMFrameBuffer* frame) {
assert(frame != NULL && frame->GetHighSeqNum() >= 0);
- UpdateSyncState(frame);
+ if (!UsingFlexibleMode(frame))
+ UpdateSyncState(frame);
sequence_num_ = static_cast<uint16_t>(frame->GetHighSeqNum());
time_stamp_ = frame->TimeStamp();
picture_id_ = frame->PictureId();
temporal_id_ = frame->TemporalId();
tl0_pic_id_ = frame->Tl0PicId();
+
+ if (UsingFlexibleMode(frame)) {
+ uint16_t frame_index = picture_id_ % kFrameDecodedLength;
+ if (in_initial_state_) {
+ frame_decoded_cleared_to_ = frame_index;
+ } else if (frame->FrameType() == kVideoFrameKey) {
+ memset(frame_decoded_, 0, sizeof(frame_decoded_));
+ frame_decoded_cleared_to_ = frame_index;
+ } else {
+ if (AheadOfFramesDecodedClearedTo(frame_index)) {
+ while (frame_decoded_cleared_to_ != frame_index) {
+ frame_decoded_cleared_to_ =
+ (frame_decoded_cleared_to_ + 1) % kFrameDecodedLength;
+ frame_decoded_[frame_decoded_cleared_to_] = false;
+ }
+ }
+ }
+ frame_decoded_[frame_index] = true;
+ }
+
in_initial_state_ = false;
}
@@ -80,6 +104,8 @@ void VCMDecodingState::CopyFrom(const VCMDecodingState& state) {
tl0_pic_id_ = state.tl0_pic_id_;
full_sync_ = state.full_sync_;
in_initial_state_ = state.in_initial_state_;
+ frame_decoded_cleared_to_ = state.frame_decoded_cleared_to_;
+ memcpy(frame_decoded_, state.frame_decoded_, sizeof(frame_decoded_));
}
bool VCMDecodingState::UpdateEmptyFrame(const VCMFrameBuffer* frame) {
@@ -140,8 +166,8 @@ void VCMDecodingState::UpdateSyncState(const VCMFrameBuffer* frame) {
full_sync_ = ContinuousPictureId(frame->PictureId());
}
} else {
- full_sync_ = ContinuousSeqNum(static_cast<uint16_t>(
- frame->GetLowSeqNum()));
+ full_sync_ =
+ ContinuousSeqNum(static_cast<uint16_t>(frame->GetLowSeqNum()));
}
}
}
@@ -173,7 +199,11 @@ bool VCMDecodingState::ContinuousFrame(const VCMFrameBuffer* frame) const {
if (!full_sync_ && !frame->LayerSync())
return false;
if (UsingPictureId(frame)) {
- return ContinuousPictureId(frame->PictureId());
+ if (UsingFlexibleMode(frame)) {
+ return ContinuousFrameRefs(frame);
+ } else {
+ return ContinuousPictureId(frame->PictureId());
+ }
} else {
return ContinuousSeqNum(static_cast<uint16_t>(frame->GetLowSeqNum()));
}
@@ -199,8 +229,7 @@ bool VCMDecodingState::ContinuousSeqNum(uint16_t seq_num) const {
return seq_num == static_cast<uint16_t>(sequence_num_ + 1);
}
-bool VCMDecodingState::ContinuousLayer(int temporal_id,
- int tl0_pic_id) const {
+bool VCMDecodingState::ContinuousLayer(int temporal_id, int tl0_pic_id) const {
// First, check if applicable.
if (temporal_id == kNoTemporalIdx || tl0_pic_id == kNoTl0PicIdx)
return false;
@@ -216,8 +245,41 @@ bool VCMDecodingState::ContinuousLayer(int temporal_id,
return (static_cast<uint8_t>(tl0_pic_id_ + 1) == tl0_pic_id);
}
+bool VCMDecodingState::ContinuousFrameRefs(const VCMFrameBuffer* frame) const {
+ uint8_t num_refs = frame->CodecSpecific()->codecSpecific.VP9.num_ref_pics;
+ for (uint8_t r = 0; r < num_refs; ++r) {
+ uint16_t frame_ref = frame->PictureId() -
+ frame->CodecSpecific()->codecSpecific.VP9.p_diff[r];
+ uint16_t frame_index = frame_ref % kFrameDecodedLength;
+ if (AheadOfFramesDecodedClearedTo(frame_index) ||
+ !frame_decoded_[frame_index]) {
+ return false;
+ }
+ }
+ return true;
+}
+
bool VCMDecodingState::UsingPictureId(const VCMFrameBuffer* frame) const {
return (frame->PictureId() != kNoPictureId && picture_id_ != kNoPictureId);
}
+bool VCMDecodingState::UsingFlexibleMode(const VCMFrameBuffer* frame) const {
+ return frame->CodecSpecific()->codecType == kVideoCodecVP9 &&
+ frame->CodecSpecific()->codecSpecific.VP9.flexible_mode;
+}
+
+// TODO(philipel): change how check work, this check practially
+// limits the max p_diff to 64.
+bool VCMDecodingState::AheadOfFramesDecodedClearedTo(uint16_t index) const {
+ // No way of knowing for sure if we are actually ahead of
+ // frame_decoded_cleared_to_. We just make the assumption
+ // that we are not trying to reference back to a very old
+ // index, but instead are referencing a newer index.
+ uint16_t diff =
+ index > frame_decoded_cleared_to_
+ ? kFrameDecodedLength - (index - frame_decoded_cleared_to_)
+ : frame_decoded_cleared_to_ - index;
+ return diff > kFrameDecodedLength / 2;
+}
+
} // namespace webrtc
diff --git a/webrtc/modules/video_coding/main/source/decoding_state.h b/webrtc/modules/video_coding/decoding_state.h
index 99ee335195..f4ea8ae081 100644
--- a/webrtc/modules/video_coding/main/source/decoding_state.h
+++ b/webrtc/modules/video_coding/decoding_state.h
@@ -21,6 +21,11 @@ class VCMPacket;
class VCMDecodingState {
public:
+ // The max number of bits used to reference back
+ // to a previous frame when using flexible mode.
+ static const uint16_t kNumRefBits = 7;
+ static const uint16_t kFrameDecodedLength = 1 << kNumRefBits;
+
VCMDecodingState();
~VCMDecodingState();
// Check for old frame
@@ -52,17 +57,24 @@ class VCMDecodingState {
bool ContinuousPictureId(int picture_id) const;
bool ContinuousSeqNum(uint16_t seq_num) const;
bool ContinuousLayer(int temporal_id, int tl0_pic_id) const;
+ bool ContinuousFrameRefs(const VCMFrameBuffer* frame) const;
bool UsingPictureId(const VCMFrameBuffer* frame) const;
+ bool UsingFlexibleMode(const VCMFrameBuffer* frame) const;
+ bool AheadOfFramesDecodedClearedTo(uint16_t index) const;
// Keep state of last decoded frame.
// TODO(mikhal/stefan): create designated classes to handle these types.
- uint16_t sequence_num_;
- uint32_t time_stamp_;
- int picture_id_;
- int temporal_id_;
- int tl0_pic_id_;
- bool full_sync_; // Sync flag when temporal layers are used.
- bool in_initial_state_;
+ uint16_t sequence_num_;
+ uint32_t time_stamp_;
+ int picture_id_;
+ int temporal_id_;
+ int tl0_pic_id_;
+ bool full_sync_; // Sync flag when temporal layers are used.
+ bool in_initial_state_;
+
+ // Used to check references in flexible mode.
+ bool frame_decoded_[kFrameDecodedLength];
+ uint16_t frame_decoded_cleared_to_;
};
} // namespace webrtc
diff --git a/webrtc/modules/video_coding/main/source/decoding_state_unittest.cc b/webrtc/modules/video_coding/decoding_state_unittest.cc
index feae701a65..5f5d0d38b1 100644
--- a/webrtc/modules/video_coding/main/source/decoding_state_unittest.cc
+++ b/webrtc/modules/video_coding/decoding_state_unittest.cc
@@ -11,11 +11,11 @@
#include <string.h>
#include "testing/gtest/include/gtest/gtest.h"
-#include "webrtc/modules/interface/module_common_types.h"
-#include "webrtc/modules/video_coding/main/source/decoding_state.h"
-#include "webrtc/modules/video_coding/main/source/frame_buffer.h"
-#include "webrtc/modules/video_coding/main/source/jitter_buffer_common.h"
-#include "webrtc/modules/video_coding/main/source/packet.h"
+#include "webrtc/modules/include/module_common_types.h"
+#include "webrtc/modules/video_coding/decoding_state.h"
+#include "webrtc/modules/video_coding/frame_buffer.h"
+#include "webrtc/modules/video_coding/jitter_buffer_common.h"
+#include "webrtc/modules/video_coding/packet.h"
namespace webrtc {
@@ -446,4 +446,254 @@ TEST(TestDecodingState, PictureIdRepeat) {
EXPECT_FALSE(dec_state.ContinuousFrame(&frame));
}
+TEST(TestDecodingState, FrameContinuityFlexibleModeKeyFrame) {
+ VCMDecodingState dec_state;
+ VCMFrameBuffer frame;
+ VCMPacket packet;
+ packet.isFirstPacket = true;
+ packet.timestamp = 1;
+ packet.seqNum = 0xffff;
+ uint8_t data[] = "I need a data pointer for this test!";
+ packet.sizeBytes = sizeof(data);
+ packet.dataPtr = data;
+ packet.codecSpecificHeader.codec = kRtpVideoVp9;
+
+ RTPVideoHeaderVP9& vp9_hdr = packet.codecSpecificHeader.codecHeader.VP9;
+ vp9_hdr.picture_id = 10;
+ vp9_hdr.flexible_mode = true;
+
+ FrameData frame_data;
+ frame_data.rtt_ms = 0;
+ frame_data.rolling_average_packets_per_frame = -1;
+
+ // Key frame as first frame
+ packet.frameType = kVideoFrameKey;
+ EXPECT_LE(0, frame.InsertPacket(packet, 0, kNoErrors, frame_data));
+ EXPECT_TRUE(dec_state.ContinuousFrame(&frame));
+ dec_state.SetState(&frame);
+
+ // Key frame again
+ vp9_hdr.picture_id = 11;
+ frame.Reset();
+ EXPECT_LE(0, frame.InsertPacket(packet, 0, kNoErrors, frame_data));
+ EXPECT_TRUE(dec_state.ContinuousFrame(&frame));
+ dec_state.SetState(&frame);
+
+ // Ref to 11, continuous
+ frame.Reset();
+ packet.frameType = kVideoFrameDelta;
+ vp9_hdr.picture_id = 12;
+ vp9_hdr.num_ref_pics = 1;
+ vp9_hdr.pid_diff[0] = 1;
+ EXPECT_LE(0, frame.InsertPacket(packet, 0, kNoErrors, frame_data));
+ EXPECT_TRUE(dec_state.ContinuousFrame(&frame));
+}
+
+TEST(TestDecodingState, FrameContinuityFlexibleModeOutOfOrderFrames) {
+ VCMDecodingState dec_state;
+ VCMFrameBuffer frame;
+ VCMPacket packet;
+ packet.isFirstPacket = true;
+ packet.timestamp = 1;
+ packet.seqNum = 0xffff;
+ uint8_t data[] = "I need a data pointer for this test!";
+ packet.sizeBytes = sizeof(data);
+ packet.dataPtr = data;
+ packet.codecSpecificHeader.codec = kRtpVideoVp9;
+
+ RTPVideoHeaderVP9& vp9_hdr = packet.codecSpecificHeader.codecHeader.VP9;
+ vp9_hdr.picture_id = 10;
+ vp9_hdr.flexible_mode = true;
+
+ FrameData frame_data;
+ frame_data.rtt_ms = 0;
+ frame_data.rolling_average_packets_per_frame = -1;
+
+ // Key frame as first frame
+ packet.frameType = kVideoFrameKey;
+ EXPECT_LE(0, frame.InsertPacket(packet, 0, kNoErrors, frame_data));
+ EXPECT_TRUE(dec_state.ContinuousFrame(&frame));
+ dec_state.SetState(&frame);
+
+ // Ref to 10, continuous
+ frame.Reset();
+ packet.frameType = kVideoFrameDelta;
+ vp9_hdr.picture_id = 15;
+ vp9_hdr.num_ref_pics = 1;
+ vp9_hdr.pid_diff[0] = 5;
+ EXPECT_LE(0, frame.InsertPacket(packet, 0, kNoErrors, frame_data));
+ EXPECT_TRUE(dec_state.ContinuousFrame(&frame));
+ dec_state.SetState(&frame);
+
+ // Out of order, last id 15, this id 12, ref to 10, continuous
+ frame.Reset();
+ vp9_hdr.picture_id = 12;
+ vp9_hdr.pid_diff[0] = 2;
+ EXPECT_LE(0, frame.InsertPacket(packet, 0, kNoErrors, frame_data));
+ EXPECT_TRUE(dec_state.ContinuousFrame(&frame));
+ dec_state.SetState(&frame);
+
+ // Ref 10, 12, 15, continuous
+ frame.Reset();
+ vp9_hdr.picture_id = 20;
+ vp9_hdr.num_ref_pics = 3;
+ vp9_hdr.pid_diff[0] = 10;
+ vp9_hdr.pid_diff[1] = 8;
+ vp9_hdr.pid_diff[2] = 5;
+ EXPECT_LE(0, frame.InsertPacket(packet, 0, kNoErrors, frame_data));
+ EXPECT_TRUE(dec_state.ContinuousFrame(&frame));
+}
+
+TEST(TestDecodingState, FrameContinuityFlexibleModeGeneral) {
+ VCMDecodingState dec_state;
+ VCMFrameBuffer frame;
+ VCMPacket packet;
+ packet.isFirstPacket = true;
+ packet.timestamp = 1;
+ packet.seqNum = 0xffff;
+ uint8_t data[] = "I need a data pointer for this test!";
+ packet.sizeBytes = sizeof(data);
+ packet.dataPtr = data;
+ packet.codecSpecificHeader.codec = kRtpVideoVp9;
+
+ RTPVideoHeaderVP9& vp9_hdr = packet.codecSpecificHeader.codecHeader.VP9;
+ vp9_hdr.picture_id = 10;
+ vp9_hdr.flexible_mode = true;
+
+ FrameData frame_data;
+ frame_data.rtt_ms = 0;
+ frame_data.rolling_average_packets_per_frame = -1;
+
+ // Key frame as first frame
+ packet.frameType = kVideoFrameKey;
+ EXPECT_LE(0, frame.InsertPacket(packet, 0, kNoErrors, frame_data));
+ EXPECT_TRUE(dec_state.ContinuousFrame(&frame));
+
+ // Delta frame as first frame
+ frame.Reset();
+ packet.frameType = kVideoFrameDelta;
+ EXPECT_LE(0, frame.InsertPacket(packet, 0, kNoErrors, frame_data));
+ EXPECT_FALSE(dec_state.ContinuousFrame(&frame));
+
+ // Key frame then delta frame
+ frame.Reset();
+ packet.frameType = kVideoFrameKey;
+ EXPECT_LE(0, frame.InsertPacket(packet, 0, kNoErrors, frame_data));
+ dec_state.SetState(&frame);
+ frame.Reset();
+ packet.frameType = kVideoFrameDelta;
+ vp9_hdr.num_ref_pics = 1;
+ vp9_hdr.picture_id = 15;
+ vp9_hdr.pid_diff[0] = 5;
+ EXPECT_LE(0, frame.InsertPacket(packet, 0, kNoErrors, frame_data));
+ EXPECT_TRUE(dec_state.ContinuousFrame(&frame));
+ dec_state.SetState(&frame);
+
+ // Ref to 11, not continuous
+ frame.Reset();
+ vp9_hdr.picture_id = 16;
+ EXPECT_LE(0, frame.InsertPacket(packet, 0, kNoErrors, frame_data));
+ EXPECT_FALSE(dec_state.ContinuousFrame(&frame));
+
+ // Ref to 15, continuous
+ frame.Reset();
+ vp9_hdr.picture_id = 16;
+ vp9_hdr.pid_diff[0] = 1;
+ EXPECT_LE(0, frame.InsertPacket(packet, 0, kNoErrors, frame_data));
+ EXPECT_TRUE(dec_state.ContinuousFrame(&frame));
+ dec_state.SetState(&frame);
+
+ // Ref to 11 and 15, not continuous
+ frame.Reset();
+ vp9_hdr.picture_id = 20;
+ vp9_hdr.num_ref_pics = 2;
+ vp9_hdr.pid_diff[0] = 9;
+ vp9_hdr.pid_diff[1] = 5;
+ EXPECT_LE(0, frame.InsertPacket(packet, 0, kNoErrors, frame_data));
+ EXPECT_FALSE(dec_state.ContinuousFrame(&frame));
+
+ // Ref to 10, 15 and 16, continuous
+ frame.Reset();
+ vp9_hdr.picture_id = 22;
+ vp9_hdr.num_ref_pics = 3;
+ vp9_hdr.pid_diff[0] = 12;
+ vp9_hdr.pid_diff[1] = 7;
+ vp9_hdr.pid_diff[2] = 6;
+ EXPECT_LE(0, frame.InsertPacket(packet, 0, kNoErrors, frame_data));
+ EXPECT_TRUE(dec_state.ContinuousFrame(&frame));
+ dec_state.SetState(&frame);
+
+ // Key Frame, continuous
+ frame.Reset();
+ packet.frameType = kVideoFrameKey;
+ vp9_hdr.picture_id = VCMDecodingState::kFrameDecodedLength - 2;
+ vp9_hdr.num_ref_pics = 0;
+ EXPECT_LE(0, frame.InsertPacket(packet, 0, kNoErrors, frame_data));
+ EXPECT_TRUE(dec_state.ContinuousFrame(&frame));
+ dec_state.SetState(&frame);
+
+ // Frame at last index, ref to KF, continuous
+ frame.Reset();
+ packet.frameType = kVideoFrameDelta;
+ vp9_hdr.picture_id = VCMDecodingState::kFrameDecodedLength - 1;
+ vp9_hdr.num_ref_pics = 1;
+ vp9_hdr.pid_diff[0] = 1;
+ EXPECT_LE(0, frame.InsertPacket(packet, 0, kNoErrors, frame_data));
+ EXPECT_TRUE(dec_state.ContinuousFrame(&frame));
+ dec_state.SetState(&frame);
+
+ // Frame after wrapping buffer length, ref to last index, continuous
+ frame.Reset();
+ vp9_hdr.picture_id = 0;
+ vp9_hdr.num_ref_pics = 1;
+ vp9_hdr.pid_diff[0] = 1;
+ EXPECT_LE(0, frame.InsertPacket(packet, 0, kNoErrors, frame_data));
+ EXPECT_TRUE(dec_state.ContinuousFrame(&frame));
+ dec_state.SetState(&frame);
+
+ // Frame after wrapping start frame, ref to 0, continuous
+ frame.Reset();
+ vp9_hdr.picture_id = 20;
+ vp9_hdr.num_ref_pics = 1;
+ vp9_hdr.pid_diff[0] = 20;
+ EXPECT_LE(0, frame.InsertPacket(packet, 0, kNoErrors, frame_data));
+ EXPECT_TRUE(dec_state.ContinuousFrame(&frame));
+ dec_state.SetState(&frame);
+
+ // Frame after wrapping start frame, ref to 10, not continuous
+ frame.Reset();
+ vp9_hdr.picture_id = 23;
+ vp9_hdr.num_ref_pics = 1;
+ vp9_hdr.pid_diff[0] = 13;
+ EXPECT_LE(0, frame.InsertPacket(packet, 0, kNoErrors, frame_data));
+ EXPECT_FALSE(dec_state.ContinuousFrame(&frame));
+
+ // Key frame, continuous
+ frame.Reset();
+ packet.frameType = kVideoFrameKey;
+ vp9_hdr.picture_id = 25;
+ vp9_hdr.num_ref_pics = 0;
+ EXPECT_LE(0, frame.InsertPacket(packet, 0, kNoErrors, frame_data));
+ EXPECT_TRUE(dec_state.ContinuousFrame(&frame));
+ dec_state.SetState(&frame);
+
+ // Ref to KF, continuous
+ frame.Reset();
+ packet.frameType = kVideoFrameDelta;
+ vp9_hdr.picture_id = 26;
+ vp9_hdr.num_ref_pics = 1;
+ vp9_hdr.pid_diff[0] = 1;
+ EXPECT_LE(0, frame.InsertPacket(packet, 0, kNoErrors, frame_data));
+ EXPECT_TRUE(dec_state.ContinuousFrame(&frame));
+ dec_state.SetState(&frame);
+
+ // Ref to frame previous to KF, not continuous
+ frame.Reset();
+ vp9_hdr.picture_id = 30;
+ vp9_hdr.num_ref_pics = 1;
+ vp9_hdr.pid_diff[0] = 30;
+ EXPECT_LE(0, frame.InsertPacket(packet, 0, kNoErrors, frame_data));
+ EXPECT_FALSE(dec_state.ContinuousFrame(&frame));
+}
+
} // namespace webrtc
diff --git a/webrtc/modules/video_coding/main/source/encoded_frame.cc b/webrtc/modules/video_coding/encoded_frame.cc
index d86704d632..261074ae73 100644
--- a/webrtc/modules/video_coding/main/source/encoded_frame.cc
+++ b/webrtc/modules/video_coding/encoded_frame.cc
@@ -8,10 +8,10 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#include "webrtc/modules/video_coding/main/interface/video_coding_defines.h"
-#include "webrtc/modules/video_coding/main/source/encoded_frame.h"
-#include "webrtc/modules/video_coding/main/source/generic_encoder.h"
-#include "webrtc/modules/video_coding/main/source/jitter_buffer_common.h"
+#include "webrtc/modules/video_coding/include/video_coding_defines.h"
+#include "webrtc/modules/video_coding/encoded_frame.h"
+#include "webrtc/modules/video_coding/generic_encoder.h"
+#include "webrtc/modules/video_coding/jitter_buffer_common.h"
namespace webrtc {
@@ -24,7 +24,7 @@ VCMEncodedFrame::VCMEncodedFrame()
_fragmentation(),
_rotation(kVideoRotation_0),
_rotation_set(false) {
- _codecSpecificInfo.codecType = kVideoCodecUnknown;
+ _codecSpecificInfo.codecType = kVideoCodecUnknown;
}
VCMEncodedFrame::VCMEncodedFrame(const webrtc::EncodedImage& rhs)
@@ -36,15 +36,14 @@ VCMEncodedFrame::VCMEncodedFrame(const webrtc::EncodedImage& rhs)
_fragmentation(),
_rotation(kVideoRotation_0),
_rotation_set(false) {
- _codecSpecificInfo.codecType = kVideoCodecUnknown;
- _buffer = NULL;
- _size = 0;
- _length = 0;
- if (rhs._buffer != NULL)
- {
- VerifyAndAllocate(rhs._length);
- memcpy(_buffer, rhs._buffer, rhs._length);
- }
+ _codecSpecificInfo.codecType = kVideoCodecUnknown;
+ _buffer = NULL;
+ _size = 0;
+ _length = 0;
+ if (rhs._buffer != NULL) {
+ VerifyAndAllocate(rhs._length);
+ memcpy(_buffer, rhs._buffer, rhs._length);
+ }
}
VCMEncodedFrame::VCMEncodedFrame(const VCMEncodedFrame& rhs)
@@ -60,49 +59,43 @@ VCMEncodedFrame::VCMEncodedFrame(const VCMEncodedFrame& rhs)
_buffer = NULL;
_size = 0;
_length = 0;
- if (rhs._buffer != NULL)
- {
- VerifyAndAllocate(rhs._length);
- memcpy(_buffer, rhs._buffer, rhs._length);
- _length = rhs._length;
+ if (rhs._buffer != NULL) {
+ VerifyAndAllocate(rhs._length);
+ memcpy(_buffer, rhs._buffer, rhs._length);
+ _length = rhs._length;
}
_fragmentation.CopyFrom(rhs._fragmentation);
}
-VCMEncodedFrame::~VCMEncodedFrame()
-{
- Free();
+VCMEncodedFrame::~VCMEncodedFrame() {
+ Free();
}
-void VCMEncodedFrame::Free()
-{
- Reset();
- if (_buffer != NULL)
- {
- delete [] _buffer;
- _buffer = NULL;
- }
+void VCMEncodedFrame::Free() {
+ Reset();
+ if (_buffer != NULL) {
+ delete[] _buffer;
+ _buffer = NULL;
+ }
}
-void VCMEncodedFrame::Reset()
-{
- _renderTimeMs = -1;
- _timeStamp = 0;
- _payloadType = 0;
- _frameType = kVideoFrameDelta;
- _encodedWidth = 0;
- _encodedHeight = 0;
- _completeFrame = false;
- _missingFrame = false;
- _length = 0;
- _codecSpecificInfo.codecType = kVideoCodecUnknown;
- _codec = kVideoCodecUnknown;
- _rotation = kVideoRotation_0;
- _rotation_set = false;
+void VCMEncodedFrame::Reset() {
+ _renderTimeMs = -1;
+ _timeStamp = 0;
+ _payloadType = 0;
+ _frameType = kVideoFrameDelta;
+ _encodedWidth = 0;
+ _encodedHeight = 0;
+ _completeFrame = false;
+ _missingFrame = false;
+ _length = 0;
+ _codecSpecificInfo.codecType = kVideoCodecUnknown;
+ _codec = kVideoCodecUnknown;
+ _rotation = kVideoRotation_0;
+ _rotation_set = false;
}
-void VCMEncodedFrame::CopyCodecSpecific(const RTPVideoHeader* header)
-{
+void VCMEncodedFrame::CopyCodecSpecific(const RTPVideoHeader* header) {
if (header) {
switch (header->codec) {
case kRtpVideoVp8: {
@@ -147,6 +140,12 @@ void VCMEncodedFrame::CopyCodecSpecific(const RTPVideoHeader* header)
header->codecHeader.VP9.inter_pic_predicted;
_codecSpecificInfo.codecSpecific.VP9.flexible_mode =
header->codecHeader.VP9.flexible_mode;
+ _codecSpecificInfo.codecSpecific.VP9.num_ref_pics =
+ header->codecHeader.VP9.num_ref_pics;
+ for (uint8_t r = 0; r < header->codecHeader.VP9.num_ref_pics; ++r) {
+ _codecSpecificInfo.codecSpecific.VP9.p_diff[r] =
+ header->codecHeader.VP9.pid_diff[r];
+ }
_codecSpecificInfo.codecSpecific.VP9.ss_data_available =
header->codecHeader.VP9.ss_data_available;
if (header->codecHeader.VP9.picture_id != kNoPictureId) {
@@ -209,21 +208,18 @@ const RTPFragmentationHeader* VCMEncodedFrame::FragmentationHeader() const {
return &_fragmentation;
}
-void VCMEncodedFrame::VerifyAndAllocate(size_t minimumSize)
-{
- if(minimumSize > _size)
- {
- // create buffer of sufficient size
- uint8_t* newBuffer = new uint8_t[minimumSize];
- if(_buffer)
- {
- // copy old data
- memcpy(newBuffer, _buffer, _size);
- delete [] _buffer;
- }
- _buffer = newBuffer;
- _size = minimumSize;
+void VCMEncodedFrame::VerifyAndAllocate(size_t minimumSize) {
+ if (minimumSize > _size) {
+ // create buffer of sufficient size
+ uint8_t* newBuffer = new uint8_t[minimumSize];
+ if (_buffer) {
+ // copy old data
+ memcpy(newBuffer, _buffer, _size);
+ delete[] _buffer;
}
+ _buffer = newBuffer;
+ _size = minimumSize;
+ }
}
} // namespace webrtc
diff --git a/webrtc/modules/video_coding/encoded_frame.h b/webrtc/modules/video_coding/encoded_frame.h
new file mode 100644
index 0000000000..9034200980
--- /dev/null
+++ b/webrtc/modules/video_coding/encoded_frame.h
@@ -0,0 +1,132 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef WEBRTC_MODULES_VIDEO_CODING_ENCODED_FRAME_H_
+#define WEBRTC_MODULES_VIDEO_CODING_ENCODED_FRAME_H_
+
+#include <vector>
+
+#include "webrtc/common_types.h"
+#include "webrtc/common_video/include/video_image.h"
+#include "webrtc/modules/include/module_common_types.h"
+#include "webrtc/modules/video_coding/include/video_codec_interface.h"
+#include "webrtc/modules/video_coding/include/video_coding_defines.h"
+
+namespace webrtc {
+
+class VCMEncodedFrame : protected EncodedImage {
+ public:
+ VCMEncodedFrame();
+ explicit VCMEncodedFrame(const webrtc::EncodedImage& rhs);
+ VCMEncodedFrame(const VCMEncodedFrame& rhs);
+
+ ~VCMEncodedFrame();
+ /**
+ * Delete VideoFrame and resets members to zero
+ */
+ void Free();
+ /**
+ * Set render time in milliseconds
+ */
+ void SetRenderTime(const int64_t renderTimeMs) {
+ _renderTimeMs = renderTimeMs;
+ }
+
+ /**
+ * Set the encoded frame size
+ */
+ void SetEncodedSize(uint32_t width, uint32_t height) {
+ _encodedWidth = width;
+ _encodedHeight = height;
+ }
+ /**
+ * Get the encoded image
+ */
+ const webrtc::EncodedImage& EncodedImage() const {
+ return static_cast<const webrtc::EncodedImage&>(*this);
+ }
+ /**
+ * Get pointer to frame buffer
+ */
+ const uint8_t* Buffer() const { return _buffer; }
+ /**
+ * Get frame length
+ */
+ size_t Length() const { return _length; }
+ /**
+ * Get frame timestamp (90kHz)
+ */
+ uint32_t TimeStamp() const { return _timeStamp; }
+ /**
+ * Get render time in milliseconds
+ */
+ int64_t RenderTimeMs() const { return _renderTimeMs; }
+ /**
+ * Get frame type
+ */
+ webrtc::FrameType FrameType() const { return _frameType; }
+ /**
+ * Get frame rotation
+ */
+ VideoRotation rotation() const { return _rotation; }
+ /**
+ * True if this frame is complete, false otherwise
+ */
+ bool Complete() const { return _completeFrame; }
+ /**
+ * True if there's a frame missing before this frame
+ */
+ bool MissingFrame() const { return _missingFrame; }
+ /**
+ * Payload type of the encoded payload
+ */
+ uint8_t PayloadType() const { return _payloadType; }
+ /**
+ * Get codec specific info.
+ * The returned pointer is only valid as long as the VCMEncodedFrame
+ * is valid. Also, VCMEncodedFrame owns the pointer and will delete
+ * the object.
+ */
+ const CodecSpecificInfo* CodecSpecific() const { return &_codecSpecificInfo; }
+
+ const RTPFragmentationHeader* FragmentationHeader() const;
+
+ protected:
+ /**
+ * Verifies that current allocated buffer size is larger than or equal to the
+ * input size.
+ * If the current buffer size is smaller, a new allocation is made and the old
+ * buffer data
+ * is copied to the new buffer.
+ * Buffer size is updated to minimumSize.
+ */
+ void VerifyAndAllocate(size_t minimumSize);
+
+ void Reset();
+
+ void CopyCodecSpecific(const RTPVideoHeader* header);
+
+ int64_t _renderTimeMs;
+ uint8_t _payloadType;
+ bool _missingFrame;
+ CodecSpecificInfo _codecSpecificInfo;
+ webrtc::VideoCodecType _codec;
+ RTPFragmentationHeader _fragmentation;
+ VideoRotation _rotation;
+
+ // Video rotation is only set along with the last packet for each frame
+ // (same as marker bit). This |_rotation_set| is only for debugging purpose
+ // to ensure we don't set it twice for a frame.
+ bool _rotation_set;
+};
+
+} // namespace webrtc
+
+#endif // WEBRTC_MODULES_VIDEO_CODING_ENCODED_FRAME_H_
diff --git a/webrtc/modules/video_coding/fec_tables_xor.h b/webrtc/modules/video_coding/fec_tables_xor.h
new file mode 100644
index 0000000000..fa5bd7bde4
--- /dev/null
+++ b/webrtc/modules/video_coding/fec_tables_xor.h
@@ -0,0 +1,459 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef WEBRTC_MODULES_VIDEO_CODING_FEC_TABLES_XOR_H_
+#define WEBRTC_MODULES_VIDEO_CODING_FEC_TABLES_XOR_H_
+
+// This is a private header for media_opt_util.cc.
+// It should not be included by other files.
+
+namespace webrtc {
+
+// Table for Protection factor (code rate) of delta frames, for the XOR FEC.
+// Input is the packet loss and an effective rate (bits/frame).
+// Output is array kCodeRateXORTable[k], where k = rate_i*129 + loss_j;
+// loss_j = 0,1,..128, and rate_i varies over some range.
+static const int kSizeCodeRateXORTable = 6450;
+static const unsigned char kCodeRateXORTable[kSizeCodeRateXORTable] = {
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11,
+ 11, 11, 11, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39,
+ 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39,
+ 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39,
+ 39, 39, 39, 39, 39, 39, 51, 51, 51, 51, 51, 51, 51, 51, 51,
+ 51, 51, 51, 51, 51, 51, 51, 51, 51, 51, 51, 51, 51, 51, 51,
+ 51, 51, 51, 51, 51, 51, 51, 51, 51, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 8, 8, 8,
+ 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 30, 30, 30,
+ 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 56, 56, 56,
+ 56, 56, 56, 56, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65,
+ 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65,
+ 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65,
+ 87, 87, 87, 87, 87, 87, 87, 87, 87, 87, 87, 87, 87, 87, 87,
+ 87, 87, 87, 87, 87, 87, 87, 87, 87, 78, 78, 78, 78, 78, 78,
+ 78, 78, 78, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 6, 6, 6, 23, 23, 23, 23, 23, 23, 23, 23, 23,
+ 23, 23, 23, 23, 23, 23, 44, 44, 44, 44, 44, 44, 50, 50, 50,
+ 50, 50, 50, 50, 50, 50, 68, 68, 68, 68, 68, 68, 68, 85, 85,
+ 85, 85, 85, 85, 85, 85, 85, 85, 85, 85, 85, 85, 85, 85, 85,
+ 85, 85, 85, 85, 85, 85, 85, 85, 85, 85, 85, 85, 85, 85, 85,
+ 85, 85, 85, 85, 85, 85, 85, 85, 85, 105, 105, 105, 105, 105, 105,
+ 105, 105, 105, 105, 105, 105, 105, 105, 105, 105, 105, 105, 105, 105, 105,
+ 105, 105, 105, 88, 88, 88, 88, 88, 88, 88, 88, 88, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 5, 5, 5, 5, 5, 5, 19, 19, 19,
+ 36, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41,
+ 55, 55, 55, 55, 55, 55, 69, 69, 69, 69, 69, 69, 69, 69, 69,
+ 75, 75, 80, 80, 80, 80, 80, 97, 97, 97, 97, 97, 97, 97, 97,
+ 97, 97, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102,
+ 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102,
+ 102, 102, 102, 116, 116, 116, 116, 116, 116, 116, 116, 116, 116, 116, 116,
+ 116, 116, 116, 116, 116, 116, 116, 116, 116, 116, 116, 116, 100, 100, 100,
+ 100, 100, 100, 100, 100, 100, 0, 0, 0, 0, 0, 0, 0, 0, 4,
+ 16, 16, 16, 16, 16, 16, 30, 35, 35, 47, 58, 58, 58, 58, 58,
+ 58, 58, 58, 58, 58, 58, 58, 58, 58, 63, 63, 63, 63, 63, 63,
+ 77, 77, 77, 77, 77, 77, 77, 82, 82, 82, 82, 94, 94, 94, 94,
+ 94, 105, 105, 105, 105, 110, 110, 110, 110, 110, 110, 122, 122, 122, 122,
+ 122, 122, 122, 122, 122, 122, 122, 122, 122, 122, 122, 122, 122, 122, 122,
+ 122, 122, 122, 122, 122, 122, 122, 122, 122, 122, 122, 122, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 115, 115, 115, 115, 115, 115, 115, 115, 115,
+ 0, 0, 0, 0, 0, 0, 0, 4, 14, 27, 27, 27, 27, 27, 31,
+ 41, 52, 52, 56, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69,
+ 69, 69, 69, 69, 69, 69, 69, 69, 69, 79, 79, 79, 79, 83, 83,
+ 83, 94, 94, 94, 94, 106, 106, 106, 106, 106, 115, 115, 115, 115, 125,
+ 125, 125, 125, 125, 125, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 0, 0, 0, 0, 3, 3,
+ 3, 17, 28, 38, 38, 38, 38, 38, 47, 51, 63, 63, 63, 72, 72,
+ 72, 72, 72, 72, 72, 76, 76, 76, 76, 80, 80, 80, 80, 80, 80,
+ 80, 80, 80, 84, 84, 84, 84, 93, 93, 93, 105, 105, 105, 105, 114,
+ 114, 114, 114, 114, 124, 124, 124, 124, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 0, 0, 0, 0, 12, 12, 12, 35, 43, 47, 47, 47,
+ 47, 47, 58, 58, 66, 66, 66, 70, 70, 70, 70, 70, 73, 73, 82,
+ 82, 82, 86, 94, 94, 94, 94, 94, 94, 94, 94, 94, 94, 94, 94,
+ 94, 105, 105, 105, 114, 114, 114, 114, 117, 117, 117, 117, 117, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 0, 0, 0,
+ 0, 24, 24, 24, 49, 53, 53, 53, 53, 53, 53, 61, 61, 64, 64,
+ 64, 64, 70, 70, 70, 70, 78, 78, 88, 88, 88, 96, 106, 106, 106,
+ 106, 106, 106, 106, 106, 106, 106, 112, 112, 112, 120, 120, 120, 124, 124,
+ 124, 124, 124, 124, 124, 124, 124, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 0, 0, 0, 5, 36, 36, 36, 55, 55,
+ 55, 55, 55, 55, 55, 58, 58, 58, 58, 58, 64, 78, 78, 78, 78,
+ 87, 87, 94, 94, 94, 103, 110, 110, 110, 110, 110, 110, 110, 110, 116,
+ 116, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 0, 0, 0, 18, 43, 43, 43, 53, 53, 53, 53, 53, 53, 53, 53,
+ 58, 58, 58, 58, 71, 87, 87, 87, 87, 94, 94, 97, 97, 97, 109,
+ 111, 111, 111, 111, 111, 111, 111, 111, 125, 125, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 0, 0, 0, 31, 46, 46,
+ 46, 48, 48, 48, 48, 48, 48, 48, 48, 66, 66, 66, 66, 80, 93,
+ 93, 93, 93, 95, 95, 95, 95, 100, 115, 115, 115, 115, 115, 115, 115,
+ 115, 115, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 0, 0, 4, 40, 45, 45, 45, 45, 45, 45, 45, 45,
+ 49, 49, 49, 74, 74, 74, 74, 86, 90, 90, 90, 90, 95, 95, 95,
+ 95, 106, 120, 120, 120, 120, 120, 120, 120, 120, 120, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 0, 0, 14,
+ 42, 42, 42, 42, 42, 42, 42, 42, 46, 56, 56, 56, 80, 80, 80,
+ 80, 84, 84, 84, 84, 88, 99, 99, 99, 99, 111, 122, 122, 122, 122,
+ 122, 122, 122, 122, 122, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 0, 0, 26, 40, 40, 40, 40, 40, 40,
+ 40, 40, 54, 66, 66, 66, 80, 80, 80, 80, 80, 80, 80, 84, 94,
+ 106, 106, 106, 106, 116, 120, 120, 120, 120, 120, 120, 120, 120, 124, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 0, 3, 34, 38, 38, 38, 38, 38, 42, 42, 42, 63, 72, 72, 76,
+ 80, 80, 80, 80, 80, 80, 80, 89, 101, 114, 114, 114, 114, 118, 118,
+ 118, 118, 118, 118, 118, 118, 118, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 0, 12, 36, 36, 36, 36,
+ 36, 36, 49, 49, 49, 69, 73, 76, 86, 86, 86, 86, 86, 86, 86,
+ 86, 97, 109, 122, 122, 122, 122, 122, 122, 122, 122, 122, 122, 122, 122,
+ 122, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 0, 22, 34, 34, 34, 34, 38, 38, 57, 57, 57, 69,
+ 73, 82, 92, 92, 92, 92, 92, 92, 96, 96, 104, 117, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 0, 29, 33,
+ 33, 33, 33, 44, 44, 62, 62, 62, 69, 77, 87, 95, 95, 95, 95,
+ 95, 95, 107, 107, 110, 120, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 0, 31, 31, 31, 31, 31, 51, 51, 62,
+ 65, 65, 73, 83, 91, 94, 94, 94, 94, 97, 97, 114, 114, 114, 122,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 0, 29, 29, 29, 29, 29, 56, 56, 59, 70, 70, 79, 86, 89, 89,
+ 89, 89, 89, 100, 100, 116, 116, 116, 122, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 0, 28, 28, 28, 28, 28,
+ 57, 57, 57, 76, 76, 83, 86, 86, 86, 86, 86, 89, 104, 104, 114,
+ 114, 114, 124, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 0, 27, 27, 27, 27, 30, 55, 55, 55, 80, 80, 83,
+ 86, 86, 86, 86, 86, 93, 108, 108, 111, 111, 111, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 0, 26, 26,
+ 26, 26, 36, 53, 53, 53, 80, 80, 80, 90, 90, 90, 90, 90, 98,
+ 107, 107, 107, 107, 107, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 0, 26, 26, 26, 28, 42, 52, 54, 54,
+ 78, 78, 78, 95, 95, 95, 97, 97, 104, 106, 106, 106, 106, 106, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 0, 24, 24, 24, 33, 47, 49, 58, 58, 74, 74, 74, 97, 97, 97,
+ 106, 106, 108, 108, 108, 108, 108, 108, 124, 124, 124, 124, 124, 124, 124,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 0, 24, 24, 24, 39, 48,
+ 50, 63, 63, 72, 74, 74, 96, 96, 96, 109, 111, 111, 111, 111, 111,
+ 111, 111, 119, 119, 122, 122, 122, 122, 122, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 0, 23, 23, 23, 43, 46, 54, 66, 66, 69, 77, 77,
+ 92, 92, 92, 105, 113, 113, 113, 113, 113, 113, 113, 115, 117, 123, 123,
+ 123, 123, 123, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 0, 22, 22,
+ 22, 44, 44, 59, 67, 67, 67, 81, 81, 89, 89, 89, 97, 112, 112,
+ 112, 112, 112, 112, 112, 112, 119, 126, 126, 126, 126, 126, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 0, 21, 21, 24, 43, 45, 63, 65, 65,
+ 67, 85, 85, 87, 87, 87, 91, 109, 109, 109, 111, 111, 111, 111, 111,
+ 123, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 0, 21, 21, 28, 42, 50, 63, 63, 66, 71, 85, 85, 85, 85, 87,
+ 92, 106, 106, 108, 114, 114, 114, 114, 114, 125, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 0, 20, 20, 34, 41, 54,
+ 62, 62, 69, 75, 82, 82, 82, 82, 92, 98, 105, 105, 110, 117, 117,
+ 117, 117, 117, 124, 124, 126, 126, 126, 126, 126, 126, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 0, 20, 20, 38, 40, 58, 60, 60, 73, 78, 80, 80,
+ 80, 80, 100, 105, 107, 107, 113, 118, 118, 118, 118, 118, 120, 120, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 0, 19, 21,
+ 38, 40, 58, 58, 60, 75, 77, 77, 77, 81, 81, 107, 109, 109, 109,
+ 114, 116, 116, 116, 116, 116, 116, 116, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 0, 18, 25, 37, 44, 56, 56, 63, 75,
+ 75, 75, 75, 88, 88, 111, 111, 111, 111, 112, 112, 112, 112, 112, 112,
+ 112, 114, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 0, 18, 30, 36, 48, 55, 55, 67, 73, 73, 73, 73, 97, 97, 110,
+ 110, 110, 110, 110, 110, 110, 110, 110, 110, 110, 116, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 0, 18, 34, 36, 52, 55,
+ 55, 70, 72, 73, 73, 73, 102, 104, 108, 108, 108, 108, 109, 109, 109,
+ 109, 109, 109, 109, 119, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 0, 17, 35, 35, 52, 59, 59, 70, 70, 76, 76, 76,
+ 99, 105, 105, 105, 105, 105, 111, 111, 111, 111, 111, 111, 111, 121, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 0, 17, 34,
+ 36, 51, 61, 62, 70, 70, 80, 80, 80, 93, 103, 103, 103, 103, 103,
+ 112, 112, 112, 112, 112, 116, 118, 124, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 0, 16, 33, 39, 50, 59, 65, 72, 72,
+ 82, 82, 82, 91, 100, 100, 100, 100, 100, 109, 109, 109, 109, 109, 121,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 0, 16, 32, 43, 48, 54, 66, 75, 75, 81, 83, 83, 92, 97, 97,
+ 97, 99, 99, 105, 105, 105, 105, 105, 123, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 0, 15, 31, 46, 47, 49,
+ 69, 77, 77, 81, 85, 85, 93, 95, 95, 95, 100, 100, 102, 102, 102,
+ 102, 102, 120, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 0, 15, 30, 46, 48, 48, 70, 75, 79, 82, 87, 87,
+ 92, 94, 94, 94, 103, 103, 103, 103, 103, 104, 104, 115, 120, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 0, 15, 30,
+ 45, 50, 50, 68, 70, 80, 85, 89, 89, 90, 95, 95, 95, 104, 104,
+ 104, 104, 104, 109, 109, 112, 114, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 0, 14, 29, 44, 54, 54, 64, 64, 83,
+ 87, 88, 88, 88, 98, 98, 98, 103, 103, 103, 103, 103, 113, 113, 113,
+ 113, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 0, 14, 29, 43, 56, 56, 61, 61, 84, 85, 88, 88, 88, 100, 100,
+ 100, 102, 102, 102, 102, 102, 113, 116, 116, 116, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 0, 14, 28, 42, 57, 57,
+ 62, 62, 80, 80, 91, 91, 91, 100, 100, 100, 100, 100, 100, 100, 100,
+ 109, 119, 119, 119, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 0, 14, 28, 42, 56, 56, 65, 66, 76, 76, 92, 92,
+ 92, 97, 97, 97, 101, 101, 101, 101, 101, 106, 121, 121, 121, 126, 126,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 0, 13, 27,
+ 41, 55, 55, 67, 72, 74, 74, 90, 90, 90, 91, 91, 91, 105, 105,
+ 105, 105, 105, 107, 122, 122, 122, 123, 123, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 0, 13, 27, 40, 54, 54, 67, 76, 76,
+ 76, 85, 85, 85, 85, 85, 85, 112, 112, 112, 112, 112, 112, 121, 121,
+ 121, 121, 121, 126, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+};
+
+} // namespace webrtc
+
+#endif // WEBRTC_MODULES_VIDEO_CODING_FEC_TABLES_XOR_H_
diff --git a/webrtc/modules/video_coding/frame_buffer.cc b/webrtc/modules/video_coding/frame_buffer.cc
new file mode 100644
index 0000000000..b6ddeda4e7
--- /dev/null
+++ b/webrtc/modules/video_coding/frame_buffer.cc
@@ -0,0 +1,270 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "webrtc/modules/video_coding/frame_buffer.h"
+
+#include <assert.h>
+#include <string.h>
+
+#include "webrtc/base/checks.h"
+#include "webrtc/base/logging.h"
+#include "webrtc/modules/video_coding/packet.h"
+
+namespace webrtc {
+
+VCMFrameBuffer::VCMFrameBuffer()
+ : _state(kStateEmpty), _nackCount(0), _latestPacketTimeMs(-1) {}
+
+VCMFrameBuffer::~VCMFrameBuffer() {}
+
+VCMFrameBuffer::VCMFrameBuffer(const VCMFrameBuffer& rhs)
+ : VCMEncodedFrame(rhs),
+ _state(rhs._state),
+ _sessionInfo(),
+ _nackCount(rhs._nackCount),
+ _latestPacketTimeMs(rhs._latestPacketTimeMs) {
+ _sessionInfo = rhs._sessionInfo;
+ _sessionInfo.UpdateDataPointers(rhs._buffer, _buffer);
+}
+
+webrtc::FrameType VCMFrameBuffer::FrameType() const {
+ return _sessionInfo.FrameType();
+}
+
+int32_t VCMFrameBuffer::GetLowSeqNum() const {
+ return _sessionInfo.LowSequenceNumber();
+}
+
+int32_t VCMFrameBuffer::GetHighSeqNum() const {
+ return _sessionInfo.HighSequenceNumber();
+}
+
+int VCMFrameBuffer::PictureId() const {
+ return _sessionInfo.PictureId();
+}
+
+int VCMFrameBuffer::TemporalId() const {
+ return _sessionInfo.TemporalId();
+}
+
+bool VCMFrameBuffer::LayerSync() const {
+ return _sessionInfo.LayerSync();
+}
+
+int VCMFrameBuffer::Tl0PicId() const {
+ return _sessionInfo.Tl0PicId();
+}
+
+bool VCMFrameBuffer::NonReference() const {
+ return _sessionInfo.NonReference();
+}
+
+void VCMFrameBuffer::SetGofInfo(const GofInfoVP9& gof_info, size_t idx) {
+ _sessionInfo.SetGofInfo(gof_info, idx);
+ // TODO(asapersson): Consider adding hdr->VP9.ref_picture_id for testing.
+ _codecSpecificInfo.codecSpecific.VP9.temporal_idx =
+ gof_info.temporal_idx[idx];
+ _codecSpecificInfo.codecSpecific.VP9.temporal_up_switch =
+ gof_info.temporal_up_switch[idx];
+}
+
+bool VCMFrameBuffer::IsSessionComplete() const {
+ return _sessionInfo.complete();
+}
+
+// Insert packet
+VCMFrameBufferEnum VCMFrameBuffer::InsertPacket(
+ const VCMPacket& packet,
+ int64_t timeInMs,
+ VCMDecodeErrorMode decode_error_mode,
+ const FrameData& frame_data) {
+ assert(!(NULL == packet.dataPtr && packet.sizeBytes > 0));
+ if (packet.dataPtr != NULL) {
+ _payloadType = packet.payloadType;
+ }
+
+ if (kStateEmpty == _state) {
+ // First packet (empty and/or media) inserted into this frame.
+ // store some info and set some initial values.
+ _timeStamp = packet.timestamp;
+ // We only take the ntp timestamp of the first packet of a frame.
+ ntp_time_ms_ = packet.ntp_time_ms_;
+ _codec = packet.codec;
+ if (packet.frameType != kEmptyFrame) {
+ // first media packet
+ SetState(kStateIncomplete);
+ }
+ }
+
+ uint32_t requiredSizeBytes =
+ Length() + packet.sizeBytes +
+ (packet.insertStartCode ? kH264StartCodeLengthBytes : 0);
+ if (requiredSizeBytes >= _size) {
+ const uint8_t* prevBuffer = _buffer;
+ const uint32_t increments =
+ requiredSizeBytes / kBufferIncStepSizeBytes +
+ (requiredSizeBytes % kBufferIncStepSizeBytes > 0);
+ const uint32_t newSize = _size + increments * kBufferIncStepSizeBytes;
+ if (newSize > kMaxJBFrameSizeBytes) {
+ LOG(LS_ERROR) << "Failed to insert packet due to frame being too "
+ "big.";
+ return kSizeError;
+ }
+ VerifyAndAllocate(newSize);
+ _sessionInfo.UpdateDataPointers(prevBuffer, _buffer);
+ }
+
+ if (packet.width > 0 && packet.height > 0) {
+ _encodedWidth = packet.width;
+ _encodedHeight = packet.height;
+ }
+
+ // Don't copy payload specific data for empty packets (e.g padding packets).
+ if (packet.sizeBytes > 0)
+ CopyCodecSpecific(&packet.codecSpecificHeader);
+
+ int retVal =
+ _sessionInfo.InsertPacket(packet, _buffer, decode_error_mode, frame_data);
+ if (retVal == -1) {
+ return kSizeError;
+ } else if (retVal == -2) {
+ return kDuplicatePacket;
+ } else if (retVal == -3) {
+ return kOutOfBoundsPacket;
+ }
+ // update length
+ _length = Length() + static_cast<uint32_t>(retVal);
+
+ _latestPacketTimeMs = timeInMs;
+
+ // http://www.etsi.org/deliver/etsi_ts/126100_126199/126114/12.07.00_60/
+ // ts_126114v120700p.pdf Section 7.4.5.
+ // The MTSI client shall add the payload bytes as defined in this clause
+ // onto the last RTP packet in each group of packets which make up a key
+ // frame (I-frame or IDR frame in H.264 (AVC), or an IRAP picture in H.265
+ // (HEVC)).
+ if (packet.markerBit) {
+ RTC_DCHECK(!_rotation_set);
+ _rotation = packet.codecSpecificHeader.rotation;
+ _rotation_set = true;
+ }
+
+ if (_sessionInfo.complete()) {
+ SetState(kStateComplete);
+ return kCompleteSession;
+ } else if (_sessionInfo.decodable()) {
+ SetState(kStateDecodable);
+ return kDecodableSession;
+ }
+ return kIncomplete;
+}
+
+int64_t VCMFrameBuffer::LatestPacketTimeMs() const {
+ return _latestPacketTimeMs;
+}
+
+void VCMFrameBuffer::IncrementNackCount() {
+ _nackCount++;
+}
+
+int16_t VCMFrameBuffer::GetNackCount() const {
+ return _nackCount;
+}
+
+bool VCMFrameBuffer::HaveFirstPacket() const {
+ return _sessionInfo.HaveFirstPacket();
+}
+
+bool VCMFrameBuffer::HaveLastPacket() const {
+ return _sessionInfo.HaveLastPacket();
+}
+
+int VCMFrameBuffer::NumPackets() const {
+ return _sessionInfo.NumPackets();
+}
+
+void VCMFrameBuffer::Reset() {
+ _length = 0;
+ _timeStamp = 0;
+ _sessionInfo.Reset();
+ _payloadType = 0;
+ _nackCount = 0;
+ _latestPacketTimeMs = -1;
+ _state = kStateEmpty;
+ VCMEncodedFrame::Reset();
+}
+
+// Set state of frame
+void VCMFrameBuffer::SetState(VCMFrameBufferStateEnum state) {
+ if (_state == state) {
+ return;
+ }
+ switch (state) {
+ case kStateIncomplete:
+ // we can go to this state from state kStateEmpty
+ assert(_state == kStateEmpty);
+
+ // Do nothing, we received a packet
+ break;
+
+ case kStateComplete:
+ assert(_state == kStateEmpty || _state == kStateIncomplete ||
+ _state == kStateDecodable);
+
+ break;
+
+ case kStateEmpty:
+ // Should only be set to empty through Reset().
+ assert(false);
+ break;
+
+ case kStateDecodable:
+ assert(_state == kStateEmpty || _state == kStateIncomplete);
+ break;
+ }
+ _state = state;
+}
+
+// Get current state of frame
+VCMFrameBufferStateEnum VCMFrameBuffer::GetState() const {
+ return _state;
+}
+
+// Get current state of frame
+VCMFrameBufferStateEnum VCMFrameBuffer::GetState(uint32_t& timeStamp) const {
+ timeStamp = TimeStamp();
+ return GetState();
+}
+
+bool VCMFrameBuffer::IsRetransmitted() const {
+ return _sessionInfo.session_nack();
+}
+
+void VCMFrameBuffer::PrepareForDecode(bool continuous) {
+#ifdef INDEPENDENT_PARTITIONS
+ if (_codec == kVideoCodecVP8) {
+ _length = _sessionInfo.BuildVP8FragmentationHeader(_buffer, _length,
+ &_fragmentation);
+ } else {
+ size_t bytes_removed = _sessionInfo.MakeDecodable();
+ _length -= bytes_removed;
+ }
+#else
+ size_t bytes_removed = _sessionInfo.MakeDecodable();
+ _length -= bytes_removed;
+#endif
+ // Transfer frame information to EncodedFrame and create any codec
+ // specific information.
+ _frameType = _sessionInfo.FrameType();
+ _completeFrame = _sessionInfo.complete();
+ _missingFrame = !continuous;
+}
+
+} // namespace webrtc
diff --git a/webrtc/modules/video_coding/main/source/frame_buffer.h b/webrtc/modules/video_coding/frame_buffer.h
index ab4ff6574e..f5a707efe4 100644
--- a/webrtc/modules/video_coding/main/source/frame_buffer.h
+++ b/webrtc/modules/video_coding/frame_buffer.h
@@ -8,14 +8,14 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#ifndef WEBRTC_MODULES_VIDEO_CODING_MAIN_SOURCE_FRAME_BUFFER_H_
-#define WEBRTC_MODULES_VIDEO_CODING_MAIN_SOURCE_FRAME_BUFFER_H_
-
-#include "webrtc/modules/interface/module_common_types.h"
-#include "webrtc/modules/video_coding/main/interface/video_coding.h"
-#include "webrtc/modules/video_coding/main/source/encoded_frame.h"
-#include "webrtc/modules/video_coding/main/source/jitter_buffer_common.h"
-#include "webrtc/modules/video_coding/main/source/session_info.h"
+#ifndef WEBRTC_MODULES_VIDEO_CODING_FRAME_BUFFER_H_
+#define WEBRTC_MODULES_VIDEO_CODING_FRAME_BUFFER_H_
+
+#include "webrtc/modules/include/module_common_types.h"
+#include "webrtc/modules/video_coding/include/video_coding.h"
+#include "webrtc/modules/video_coding/encoded_frame.h"
+#include "webrtc/modules/video_coding/jitter_buffer_common.h"
+#include "webrtc/modules/video_coding/session_info.h"
#include "webrtc/typedefs.h"
namespace webrtc {
@@ -89,4 +89,4 @@ class VCMFrameBuffer : public VCMEncodedFrame {
} // namespace webrtc
-#endif // WEBRTC_MODULES_VIDEO_CODING_MAIN_SOURCE_FRAME_BUFFER_H_
+#endif // WEBRTC_MODULES_VIDEO_CODING_FRAME_BUFFER_H_
diff --git a/webrtc/modules/video_coding/generic_decoder.cc b/webrtc/modules/video_coding/generic_decoder.cc
new file mode 100644
index 0000000000..5cbe0f5ba0
--- /dev/null
+++ b/webrtc/modules/video_coding/generic_decoder.cc
@@ -0,0 +1,192 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "webrtc/base/logging.h"
+#include "webrtc/base/trace_event.h"
+#include "webrtc/modules/video_coding/include/video_coding.h"
+#include "webrtc/modules/video_coding/generic_decoder.h"
+#include "webrtc/modules/video_coding/internal_defines.h"
+#include "webrtc/system_wrappers/include/clock.h"
+
+namespace webrtc {
+
+VCMDecodedFrameCallback::VCMDecodedFrameCallback(VCMTiming* timing,
+ Clock* clock)
+ : _critSect(CriticalSectionWrapper::CreateCriticalSection()),
+ _clock(clock),
+ _receiveCallback(NULL),
+ _timing(timing),
+ _timestampMap(kDecoderFrameMemoryLength),
+ _lastReceivedPictureID(0) {}
+
+VCMDecodedFrameCallback::~VCMDecodedFrameCallback() {
+ delete _critSect;
+}
+
+void VCMDecodedFrameCallback::SetUserReceiveCallback(
+ VCMReceiveCallback* receiveCallback) {
+ CriticalSectionScoped cs(_critSect);
+ _receiveCallback = receiveCallback;
+}
+
+VCMReceiveCallback* VCMDecodedFrameCallback::UserReceiveCallback() {
+ CriticalSectionScoped cs(_critSect);
+ return _receiveCallback;
+}
+
+int32_t VCMDecodedFrameCallback::Decoded(VideoFrame& decodedImage) {
+ return Decoded(decodedImage, -1);
+}
+
+int32_t VCMDecodedFrameCallback::Decoded(VideoFrame& decodedImage,
+ int64_t decode_time_ms) {
+ TRACE_EVENT_INSTANT1("webrtc", "VCMDecodedFrameCallback::Decoded",
+ "timestamp", decodedImage.timestamp());
+ // TODO(holmer): We should improve this so that we can handle multiple
+ // callbacks from one call to Decode().
+ VCMFrameInformation* frameInfo;
+ VCMReceiveCallback* callback;
+ {
+ CriticalSectionScoped cs(_critSect);
+ frameInfo = _timestampMap.Pop(decodedImage.timestamp());
+ callback = _receiveCallback;
+ }
+
+ if (frameInfo == NULL) {
+ LOG(LS_WARNING) << "Too many frames backed up in the decoder, dropping "
+ "this one.";
+ return WEBRTC_VIDEO_CODEC_OK;
+ }
+
+ const int64_t now_ms = _clock->TimeInMilliseconds();
+ if (decode_time_ms < 0) {
+ decode_time_ms =
+ static_cast<int32_t>(now_ms - frameInfo->decodeStartTimeMs);
+ }
+ _timing->StopDecodeTimer(decodedImage.timestamp(), decode_time_ms, now_ms,
+ frameInfo->renderTimeMs);
+
+ if (callback != NULL) {
+ decodedImage.set_render_time_ms(frameInfo->renderTimeMs);
+ decodedImage.set_rotation(frameInfo->rotation);
+ callback->FrameToRender(decodedImage);
+ }
+ return WEBRTC_VIDEO_CODEC_OK;
+}
+
+int32_t VCMDecodedFrameCallback::ReceivedDecodedReferenceFrame(
+ const uint64_t pictureId) {
+ CriticalSectionScoped cs(_critSect);
+ if (_receiveCallback != NULL) {
+ return _receiveCallback->ReceivedDecodedReferenceFrame(pictureId);
+ }
+ return -1;
+}
+
+int32_t VCMDecodedFrameCallback::ReceivedDecodedFrame(
+ const uint64_t pictureId) {
+ _lastReceivedPictureID = pictureId;
+ return 0;
+}
+
+uint64_t VCMDecodedFrameCallback::LastReceivedPictureID() const {
+ return _lastReceivedPictureID;
+}
+
+void VCMDecodedFrameCallback::OnDecoderImplementationName(
+ const char* implementation_name) {
+ CriticalSectionScoped cs(_critSect);
+ if (_receiveCallback)
+ _receiveCallback->OnDecoderImplementationName(implementation_name);
+}
+
+void VCMDecodedFrameCallback::Map(uint32_t timestamp,
+ VCMFrameInformation* frameInfo) {
+ CriticalSectionScoped cs(_critSect);
+ _timestampMap.Add(timestamp, frameInfo);
+}
+
+int32_t VCMDecodedFrameCallback::Pop(uint32_t timestamp) {
+ CriticalSectionScoped cs(_critSect);
+ if (_timestampMap.Pop(timestamp) == NULL) {
+ return VCM_GENERAL_ERROR;
+ }
+ return VCM_OK;
+}
+
+VCMGenericDecoder::VCMGenericDecoder(VideoDecoder* decoder, bool isExternal)
+ : _callback(NULL),
+ _frameInfos(),
+ _nextFrameInfoIdx(0),
+ _decoder(decoder),
+ _codecType(kVideoCodecUnknown),
+ _isExternal(isExternal),
+ _keyFrameDecoded(false) {}
+
+VCMGenericDecoder::~VCMGenericDecoder() {}
+
+int32_t VCMGenericDecoder::InitDecode(const VideoCodec* settings,
+ int32_t numberOfCores) {
+ TRACE_EVENT0("webrtc", "VCMGenericDecoder::InitDecode");
+ _codecType = settings->codecType;
+
+ return _decoder->InitDecode(settings, numberOfCores);
+}
+
+int32_t VCMGenericDecoder::Decode(const VCMEncodedFrame& frame, int64_t nowMs) {
+ TRACE_EVENT1("webrtc", "VCMGenericDecoder::Decode", "timestamp",
+ frame.EncodedImage()._timeStamp);
+ _frameInfos[_nextFrameInfoIdx].decodeStartTimeMs = nowMs;
+ _frameInfos[_nextFrameInfoIdx].renderTimeMs = frame.RenderTimeMs();
+ _frameInfos[_nextFrameInfoIdx].rotation = frame.rotation();
+ _callback->Map(frame.TimeStamp(), &_frameInfos[_nextFrameInfoIdx]);
+
+ _nextFrameInfoIdx = (_nextFrameInfoIdx + 1) % kDecoderFrameMemoryLength;
+ int32_t ret = _decoder->Decode(frame.EncodedImage(), frame.MissingFrame(),
+ frame.FragmentationHeader(),
+ frame.CodecSpecific(), frame.RenderTimeMs());
+
+ _callback->OnDecoderImplementationName(_decoder->ImplementationName());
+ if (ret < WEBRTC_VIDEO_CODEC_OK) {
+ LOG(LS_WARNING) << "Failed to decode frame with timestamp "
+ << frame.TimeStamp() << ", error code: " << ret;
+ _callback->Pop(frame.TimeStamp());
+ return ret;
+ } else if (ret == WEBRTC_VIDEO_CODEC_NO_OUTPUT ||
+ ret == WEBRTC_VIDEO_CODEC_REQUEST_SLI) {
+ // No output
+ _callback->Pop(frame.TimeStamp());
+ }
+ return ret;
+}
+
+int32_t VCMGenericDecoder::Release() {
+ return _decoder->Release();
+}
+
+int32_t VCMGenericDecoder::Reset() {
+ return _decoder->Reset();
+}
+
+int32_t VCMGenericDecoder::RegisterDecodeCompleteCallback(
+ VCMDecodedFrameCallback* callback) {
+ _callback = callback;
+ return _decoder->RegisterDecodeCompleteCallback(callback);
+}
+
+bool VCMGenericDecoder::External() const {
+ return _isExternal;
+}
+
+bool VCMGenericDecoder::PrefersLateDecoding() const {
+ return _decoder->PrefersLateDecoding();
+}
+
+} // namespace webrtc
diff --git a/webrtc/modules/video_coding/generic_decoder.h b/webrtc/modules/video_coding/generic_decoder.h
new file mode 100644
index 0000000000..67ceabfc53
--- /dev/null
+++ b/webrtc/modules/video_coding/generic_decoder.h
@@ -0,0 +1,111 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef WEBRTC_MODULES_VIDEO_CODING_GENERIC_DECODER_H_
+#define WEBRTC_MODULES_VIDEO_CODING_GENERIC_DECODER_H_
+
+#include "webrtc/modules/include/module_common_types.h"
+#include "webrtc/modules/video_coding/include/video_codec_interface.h"
+#include "webrtc/modules/video_coding/encoded_frame.h"
+#include "webrtc/modules/video_coding/timestamp_map.h"
+#include "webrtc/modules/video_coding/timing.h"
+
+namespace webrtc {
+
+class VCMReceiveCallback;
+
+enum { kDecoderFrameMemoryLength = 10 };
+
+struct VCMFrameInformation {
+ int64_t renderTimeMs;
+ int64_t decodeStartTimeMs;
+ void* userData;
+ VideoRotation rotation;
+};
+
+class VCMDecodedFrameCallback : public DecodedImageCallback {
+ public:
+ VCMDecodedFrameCallback(VCMTiming* timing, Clock* clock);
+ virtual ~VCMDecodedFrameCallback();
+ void SetUserReceiveCallback(VCMReceiveCallback* receiveCallback);
+ VCMReceiveCallback* UserReceiveCallback();
+
+ virtual int32_t Decoded(VideoFrame& decodedImage); // NOLINT
+ virtual int32_t Decoded(VideoFrame& decodedImage, // NOLINT
+ int64_t decode_time_ms);
+ virtual int32_t ReceivedDecodedReferenceFrame(const uint64_t pictureId);
+ virtual int32_t ReceivedDecodedFrame(const uint64_t pictureId);
+
+ uint64_t LastReceivedPictureID() const;
+ void OnDecoderImplementationName(const char* implementation_name);
+
+ void Map(uint32_t timestamp, VCMFrameInformation* frameInfo);
+ int32_t Pop(uint32_t timestamp);
+
+ private:
+ // Protect |_receiveCallback| and |_timestampMap|.
+ CriticalSectionWrapper* _critSect;
+ Clock* _clock;
+ VCMReceiveCallback* _receiveCallback GUARDED_BY(_critSect);
+ VCMTiming* _timing;
+ VCMTimestampMap _timestampMap GUARDED_BY(_critSect);
+ uint64_t _lastReceivedPictureID;
+};
+
+class VCMGenericDecoder {
+ friend class VCMCodecDataBase;
+
+ public:
+ explicit VCMGenericDecoder(VideoDecoder* decoder, bool isExternal = false);
+ ~VCMGenericDecoder();
+
+ /**
+ * Initialize the decoder with the information from the VideoCodec
+ */
+ int32_t InitDecode(const VideoCodec* settings, int32_t numberOfCores);
+
+ /**
+ * Decode to a raw I420 frame,
+ *
+ * inputVideoBuffer reference to encoded video frame
+ */
+ int32_t Decode(const VCMEncodedFrame& inputFrame, int64_t nowMs);
+
+ /**
+ * Free the decoder memory
+ */
+ int32_t Release();
+
+ /**
+ * Reset the decoder state, prepare for a new call
+ */
+ int32_t Reset();
+
+ /**
+ * Set decode callback. Deregistering while decoding is illegal.
+ */
+ int32_t RegisterDecodeCompleteCallback(VCMDecodedFrameCallback* callback);
+
+ bool External() const;
+ bool PrefersLateDecoding() const;
+
+ private:
+ VCMDecodedFrameCallback* _callback;
+ VCMFrameInformation _frameInfos[kDecoderFrameMemoryLength];
+ uint32_t _nextFrameInfoIdx;
+ VideoDecoder* const _decoder;
+ VideoCodecType _codecType;
+ bool _isExternal;
+ bool _keyFrameDecoded;
+};
+
+} // namespace webrtc
+
+#endif // WEBRTC_MODULES_VIDEO_CODING_GENERIC_DECODER_H_
diff --git a/webrtc/modules/video_coding/main/source/generic_encoder.cc b/webrtc/modules/video_coding/generic_encoder.cc
index de196040f0..c7444ce99f 100644
--- a/webrtc/modules/video_coding/main/source/generic_encoder.cc
+++ b/webrtc/modules/video_coding/generic_encoder.cc
@@ -8,13 +8,17 @@
* be found in the AUTHORS file in the root of the source tree.
*/
+#include "webrtc/modules/video_coding/generic_encoder.h"
+
+#include <vector>
+
#include "webrtc/base/checks.h"
+#include "webrtc/base/logging.h"
+#include "webrtc/base/trace_event.h"
#include "webrtc/engine_configurations.h"
-#include "webrtc/modules/video_coding/main/source/encoded_frame.h"
-#include "webrtc/modules/video_coding/main/source/generic_encoder.h"
-#include "webrtc/modules/video_coding/main/source/media_optimization.h"
+#include "webrtc/modules/video_coding/encoded_frame.h"
+#include "webrtc/modules/video_coding/media_optimization.h"
#include "webrtc/system_wrappers/include/critical_section_wrapper.h"
-#include "webrtc/system_wrappers/include/logging.h"
namespace webrtc {
namespace {
@@ -27,8 +31,7 @@ void CopyCodecSpecific(const CodecSpecificInfo* info, RTPVideoHeader* rtp) {
rtp->codec = kRtpVideoVp8;
rtp->codecHeader.VP8.InitRTPVideoHeaderVP8();
rtp->codecHeader.VP8.pictureId = info->codecSpecific.VP8.pictureId;
- rtp->codecHeader.VP8.nonReference =
- info->codecSpecific.VP8.nonReference;
+ rtp->codecHeader.VP8.nonReference = info->codecSpecific.VP8.nonReference;
rtp->codecHeader.VP8.temporalIdx = info->codecSpecific.VP8.temporalIdx;
rtp->codecHeader.VP8.layerSync = info->codecSpecific.VP8.layerSync;
rtp->codecHeader.VP8.tl0PicIdx = info->codecSpecific.VP8.tl0PicIdx;
@@ -54,11 +57,9 @@ void CopyCodecSpecific(const CodecSpecificInfo* info, RTPVideoHeader* rtp) {
rtp->codecHeader.VP9.inter_layer_predicted =
info->codecSpecific.VP9.inter_layer_predicted;
rtp->codecHeader.VP9.gof_idx = info->codecSpecific.VP9.gof_idx;
-
- // Packetizer needs to know the number of spatial layers to correctly set
- // the marker bit, even when the number won't be written in the packet.
rtp->codecHeader.VP9.num_spatial_layers =
info->codecSpecific.VP9.num_spatial_layers;
+
if (info->codecSpecific.VP9.ss_data_available) {
rtp->codecHeader.VP9.spatial_layer_resolution_present =
info->codecSpecific.VP9.spatial_layer_resolution_present;
@@ -71,6 +72,10 @@ void CopyCodecSpecific(const CodecSpecificInfo* info, RTPVideoHeader* rtp) {
}
rtp->codecHeader.VP9.gof.CopyGofInfoVP9(info->codecSpecific.VP9.gof);
}
+
+ rtp->codecHeader.VP9.num_ref_pics = info->codecSpecific.VP9.num_ref_pics;
+ for (int i = 0; i < info->codecSpecific.VP9.num_ref_pics; ++i)
+ rtp->codecHeader.VP9.pid_diff[i] = info->codecSpecific.VP9.p_diff[i];
return;
}
case kVideoCodecH264:
@@ -86,7 +91,7 @@ void CopyCodecSpecific(const CodecSpecificInfo* info, RTPVideoHeader* rtp) {
}
} // namespace
-//#define DEBUG_ENCODER_BIT_STREAM
+// #define DEBUG_ENCODER_BIT_STREAM
VCMGenericEncoder::VCMGenericEncoder(
VideoEncoder* encoder,
@@ -110,6 +115,7 @@ int32_t VCMGenericEncoder::Release() {
int32_t VCMGenericEncoder::InitEncode(const VideoCodec* settings,
int32_t numberOfCores,
size_t maxPayloadSize) {
+ TRACE_EVENT0("webrtc", "VCMGenericEncoder::InitEncode");
{
rtc::CritScope lock(&params_lock_);
encoder_params_.target_bitrate = settings->startBitrate * 1000;
@@ -130,6 +136,9 @@ int32_t VCMGenericEncoder::InitEncode(const VideoCodec* settings,
int32_t VCMGenericEncoder::Encode(const VideoFrame& inputFrame,
const CodecSpecificInfo* codecSpecificInfo,
const std::vector<FrameType>& frameTypes) {
+ TRACE_EVENT1("webrtc", "VCMGenericEncoder::Encode", "timestamp",
+ inputFrame.timestamp());
+
for (FrameType frame_type : frameTypes)
RTC_DCHECK(frame_type == kVideoFrameKey || frame_type == kVideoFrameDelta);
@@ -143,6 +152,12 @@ int32_t VCMGenericEncoder::Encode(const VideoFrame& inputFrame,
vcm_encoded_frame_callback_->SetRotation(rotation_);
int32_t result = encoder_->Encode(inputFrame, codecSpecificInfo, &frameTypes);
+
+ if (vcm_encoded_frame_callback_) {
+ vcm_encoded_frame_callback_->SignalLastEncoderImplementationUsed(
+ encoder_->ImplementationName());
+ }
+
if (is_screenshare_ &&
result == WEBRTC_VIDEO_CODEC_TARGET_BITRATE_OVERSHOOT) {
// Target bitrate exceeded, encoder state has been reset - try again.
@@ -182,10 +197,8 @@ EncoderParameters VCMGenericEncoder::GetEncoderParameters() const {
return encoder_params_;
}
-int32_t
-VCMGenericEncoder::SetPeriodicKeyFrames(bool enable)
-{
- return encoder_->SetPeriodicKeyFrames(enable);
+int32_t VCMGenericEncoder::SetPeriodicKeyFrames(bool enable) {
+ return encoder_->SetPeriodicKeyFrames(enable);
}
int32_t VCMGenericEncoder::RequestFrame(
@@ -194,10 +207,8 @@ int32_t VCMGenericEncoder::RequestFrame(
return encoder_->Encode(image, NULL, &frame_types);
}
-bool
-VCMGenericEncoder::InternalSource() const
-{
- return internal_source_;
+bool VCMGenericEncoder::InternalSource() const {
+ return internal_source_;
}
void VCMGenericEncoder::OnDroppedFrame() {
@@ -212,12 +223,12 @@ int VCMGenericEncoder::GetTargetFramerate() {
return encoder_->GetTargetFramerate();
}
- /***************************
- * Callback Implementation
- ***************************/
+/***************************
+ * Callback Implementation
+ ***************************/
VCMEncodedFrameCallback::VCMEncodedFrameCallback(
EncodedImageCallback* post_encode_callback)
- : _sendCallback(),
+ : send_callback_(),
_mediaOpt(NULL),
_payloadType(0),
_internalSource(false),
@@ -229,39 +240,37 @@ VCMEncodedFrameCallback::VCMEncodedFrameCallback(
#endif
{
#ifdef DEBUG_ENCODER_BIT_STREAM
- _bitStreamAfterEncoder = fopen("encoderBitStream.bit", "wb");
+ _bitStreamAfterEncoder = fopen("encoderBitStream.bit", "wb");
#endif
}
-VCMEncodedFrameCallback::~VCMEncodedFrameCallback()
-{
+VCMEncodedFrameCallback::~VCMEncodedFrameCallback() {
#ifdef DEBUG_ENCODER_BIT_STREAM
- fclose(_bitStreamAfterEncoder);
+ fclose(_bitStreamAfterEncoder);
#endif
}
-int32_t
-VCMEncodedFrameCallback::SetTransportCallback(VCMPacketizationCallback* transport)
-{
- _sendCallback = transport;
- return VCM_OK;
+int32_t VCMEncodedFrameCallback::SetTransportCallback(
+ VCMPacketizationCallback* transport) {
+ send_callback_ = transport;
+ return VCM_OK;
}
int32_t VCMEncodedFrameCallback::Encoded(
- const EncodedImage& encodedImage,
+ const EncodedImage& encoded_image,
const CodecSpecificInfo* codecSpecificInfo,
const RTPFragmentationHeader* fragmentationHeader) {
- RTC_DCHECK(encodedImage._frameType == kVideoFrameKey ||
- encodedImage._frameType == kVideoFrameDelta);
- post_encode_callback_->Encoded(encodedImage, NULL, NULL);
+ TRACE_EVENT_INSTANT1("webrtc", "VCMEncodedFrameCallback::Encoded",
+ "timestamp", encoded_image._timeStamp);
+ post_encode_callback_->Encoded(encoded_image, NULL, NULL);
- if (_sendCallback == NULL) {
+ if (send_callback_ == NULL) {
return VCM_UNINITIALIZED;
}
#ifdef DEBUG_ENCODER_BIT_STREAM
if (_bitStreamAfterEncoder != NULL) {
- fwrite(encodedImage._buffer, 1, encodedImage._length,
+ fwrite(encoded_image._buffer, 1, encoded_image._length,
_bitStreamAfterEncoder);
}
#endif
@@ -274,25 +283,29 @@ int32_t VCMEncodedFrameCallback::Encoded(
}
rtpVideoHeader.rotation = _rotation;
- int32_t callbackReturn = _sendCallback->SendData(
- _payloadType, encodedImage, *fragmentationHeader, rtpVideoHeaderPtr);
+ int32_t callbackReturn = send_callback_->SendData(
+ _payloadType, encoded_image, *fragmentationHeader, rtpVideoHeaderPtr);
if (callbackReturn < 0) {
return callbackReturn;
}
if (_mediaOpt != NULL) {
- _mediaOpt->UpdateWithEncodedData(encodedImage);
+ _mediaOpt->UpdateWithEncodedData(encoded_image);
if (_internalSource)
return _mediaOpt->DropFrame(); // Signal to encoder to drop next frame.
}
return VCM_OK;
}
-void
-VCMEncodedFrameCallback::SetMediaOpt(
- media_optimization::MediaOptimization *mediaOpt)
-{
- _mediaOpt = mediaOpt;
+void VCMEncodedFrameCallback::SetMediaOpt(
+ media_optimization::MediaOptimization* mediaOpt) {
+ _mediaOpt = mediaOpt;
+}
+
+void VCMEncodedFrameCallback::SignalLastEncoderImplementationUsed(
+ const char* implementation_name) {
+ if (send_callback_)
+ send_callback_->OnEncoderImplementationName(implementation_name);
}
} // namespace webrtc
diff --git a/webrtc/modules/video_coding/generic_encoder.h b/webrtc/modules/video_coding/generic_encoder.h
new file mode 100644
index 0000000000..f739edb44f
--- /dev/null
+++ b/webrtc/modules/video_coding/generic_encoder.h
@@ -0,0 +1,149 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef WEBRTC_MODULES_VIDEO_CODING_GENERIC_ENCODER_H_
+#define WEBRTC_MODULES_VIDEO_CODING_GENERIC_ENCODER_H_
+
+#include <stdio.h>
+#include <vector>
+
+#include "webrtc/modules/video_coding/include/video_codec_interface.h"
+#include "webrtc/modules/video_coding/include/video_coding_defines.h"
+
+#include "webrtc/base/criticalsection.h"
+#include "webrtc/base/scoped_ptr.h"
+
+namespace webrtc {
+class CriticalSectionWrapper;
+
+namespace media_optimization {
+class MediaOptimization;
+} // namespace media_optimization
+
+struct EncoderParameters {
+ uint32_t target_bitrate;
+ uint8_t loss_rate;
+ int64_t rtt;
+ uint32_t input_frame_rate;
+};
+
+/*************************************/
+/* VCMEncodeFrameCallback class */
+/***********************************/
+class VCMEncodedFrameCallback : public EncodedImageCallback {
+ public:
+ explicit VCMEncodedFrameCallback(
+ EncodedImageCallback* post_encode_callback);
+ virtual ~VCMEncodedFrameCallback();
+
+ /*
+ * Callback implementation - codec encode complete
+ */
+ int32_t Encoded(
+ const EncodedImage& encodedImage,
+ const CodecSpecificInfo* codecSpecificInfo = NULL,
+ const RTPFragmentationHeader* fragmentationHeader = NULL);
+ /*
+ * Callback implementation - generic encoder encode complete
+ */
+ int32_t SetTransportCallback(VCMPacketizationCallback* transport);
+ /**
+ * Set media Optimization
+ */
+ void SetMediaOpt(media_optimization::MediaOptimization* mediaOpt);
+
+ void SetPayloadType(uint8_t payloadType) {
+ _payloadType = payloadType;
+ }
+
+ void SetInternalSource(bool internalSource) {
+ _internalSource = internalSource;
+ }
+
+ void SetRotation(VideoRotation rotation) { _rotation = rotation; }
+ void SignalLastEncoderImplementationUsed(
+ const char* encoder_implementation_name);
+
+ private:
+ VCMPacketizationCallback* send_callback_;
+ media_optimization::MediaOptimization* _mediaOpt;
+ uint8_t _payloadType;
+ bool _internalSource;
+ VideoRotation _rotation;
+
+ EncodedImageCallback* post_encode_callback_;
+
+#ifdef DEBUG_ENCODER_BIT_STREAM
+ FILE* _bitStreamAfterEncoder;
+#endif
+}; // end of VCMEncodeFrameCallback class
+
+/******************************/
+/* VCMGenericEncoder class */
+/******************************/
+class VCMGenericEncoder {
+ friend class VCMCodecDataBase;
+
+ public:
+ VCMGenericEncoder(VideoEncoder* encoder,
+ VideoEncoderRateObserver* rate_observer,
+ VCMEncodedFrameCallback* encoded_frame_callback,
+ bool internalSource);
+ ~VCMGenericEncoder();
+ /**
+ * Free encoder memory
+ */
+ int32_t Release();
+ /**
+ * Initialize the encoder with the information from the VideoCodec
+ */
+ int32_t InitEncode(const VideoCodec* settings,
+ int32_t numberOfCores,
+ size_t maxPayloadSize);
+ /**
+ * Encode raw image
+ * inputFrame : Frame containing raw image
+ * codecSpecificInfo : Specific codec data
+ * cameraFrameRate : Request or information from the remote side
+ * frameType : The requested frame type to encode
+ */
+ int32_t Encode(const VideoFrame& inputFrame,
+ const CodecSpecificInfo* codecSpecificInfo,
+ const std::vector<FrameType>& frameTypes);
+
+ void SetEncoderParameters(const EncoderParameters& params);
+ EncoderParameters GetEncoderParameters() const;
+
+ int32_t SetPeriodicKeyFrames(bool enable);
+
+ int32_t RequestFrame(const std::vector<FrameType>& frame_types);
+
+ bool InternalSource() const;
+
+ void OnDroppedFrame();
+
+ bool SupportsNativeHandle() const;
+
+ int GetTargetFramerate();
+
+ private:
+ VideoEncoder* const encoder_;
+ VideoEncoderRateObserver* const rate_observer_;
+ VCMEncodedFrameCallback* const vcm_encoded_frame_callback_;
+ const bool internal_source_;
+ mutable rtc::CriticalSection params_lock_;
+ EncoderParameters encoder_params_ GUARDED_BY(params_lock_);
+ VideoRotation rotation_;
+ bool is_screenshare_;
+}; // end of VCMGenericEncoder class
+
+} // namespace webrtc
+
+#endif // WEBRTC_MODULES_VIDEO_CODING_GENERIC_ENCODER_H_
diff --git a/webrtc/modules/video_coding/main/interface/mock/mock_vcm_callbacks.h b/webrtc/modules/video_coding/include/mock/mock_vcm_callbacks.h
index 302d4a3a13..0185dae333 100644
--- a/webrtc/modules/video_coding/main/interface/mock/mock_vcm_callbacks.h
+++ b/webrtc/modules/video_coding/include/mock/mock_vcm_callbacks.h
@@ -8,11 +8,11 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#ifndef WEBRTC_MODULES_VIDEO_CODING_MAIN_INTERFACE_MOCK_MOCK_VCM_CALLBACKS_H_
-#define WEBRTC_MODULES_VIDEO_CODING_MAIN_INTERFACE_MOCK_MOCK_VCM_CALLBACKS_H_
+#ifndef WEBRTC_MODULES_VIDEO_CODING_INCLUDE_MOCK_MOCK_VCM_CALLBACKS_H_
+#define WEBRTC_MODULES_VIDEO_CODING_INCLUDE_MOCK_MOCK_VCM_CALLBACKS_H_
#include "testing/gmock/include/gmock/gmock.h"
-#include "webrtc/modules/video_coding/main/interface/video_coding_defines.h"
+#include "webrtc/modules/video_coding/include/video_coding_defines.h"
#include "webrtc/typedefs.h"
namespace webrtc {
@@ -20,16 +20,15 @@ namespace webrtc {
class MockVCMFrameTypeCallback : public VCMFrameTypeCallback {
public:
MOCK_METHOD0(RequestKeyFrame, int32_t());
- MOCK_METHOD1(SliceLossIndicationRequest,
- int32_t(const uint64_t pictureId));
+ MOCK_METHOD1(SliceLossIndicationRequest, int32_t(const uint64_t pictureId));
};
class MockPacketRequestCallback : public VCMPacketRequestCallback {
public:
- MOCK_METHOD2(ResendPackets, int32_t(const uint16_t* sequenceNumbers,
- uint16_t length));
+ MOCK_METHOD2(ResendPackets,
+ int32_t(const uint16_t* sequenceNumbers, uint16_t length));
};
} // namespace webrtc
-#endif // WEBRTC_MODULES_VIDEO_CODING_MAIN_INTERFACE_MOCK_MOCK_VCM_CALLBACKS_H_
+#endif // WEBRTC_MODULES_VIDEO_CODING_INCLUDE_MOCK_MOCK_VCM_CALLBACKS_H_
diff --git a/webrtc/modules/video_coding/include/mock/mock_video_codec_interface.h b/webrtc/modules/video_coding/include/mock/mock_video_codec_interface.h
new file mode 100644
index 0000000000..9cb4a83535
--- /dev/null
+++ b/webrtc/modules/video_coding/include/mock/mock_video_codec_interface.h
@@ -0,0 +1,81 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef WEBRTC_MODULES_VIDEO_CODING_INCLUDE_MOCK_MOCK_VIDEO_CODEC_INTERFACE_H_
+#define WEBRTC_MODULES_VIDEO_CODING_INCLUDE_MOCK_MOCK_VIDEO_CODEC_INTERFACE_H_
+
+#include <string>
+#include <vector>
+
+#include "testing/gmock/include/gmock/gmock.h"
+#include "webrtc/modules/video_coding/include/video_codec_interface.h"
+#include "webrtc/typedefs.h"
+
+namespace webrtc {
+
+class MockEncodedImageCallback : public EncodedImageCallback {
+ public:
+ MOCK_METHOD3(Encoded,
+ int32_t(const EncodedImage& encodedImage,
+ const CodecSpecificInfo* codecSpecificInfo,
+ const RTPFragmentationHeader* fragmentation));
+};
+
+class MockVideoEncoder : public VideoEncoder {
+ public:
+ MOCK_CONST_METHOD2(Version, int32_t(int8_t* version, int32_t length));
+ MOCK_METHOD3(InitEncode,
+ int32_t(const VideoCodec* codecSettings,
+ int32_t numberOfCores,
+ size_t maxPayloadSize));
+ MOCK_METHOD3(Encode,
+ int32_t(const VideoFrame& inputImage,
+ const CodecSpecificInfo* codecSpecificInfo,
+ const std::vector<FrameType>* frame_types));
+ MOCK_METHOD1(RegisterEncodeCompleteCallback,
+ int32_t(EncodedImageCallback* callback));
+ MOCK_METHOD0(Release, int32_t());
+ MOCK_METHOD0(Reset, int32_t());
+ MOCK_METHOD2(SetChannelParameters, int32_t(uint32_t packetLoss, int64_t rtt));
+ MOCK_METHOD2(SetRates, int32_t(uint32_t newBitRate, uint32_t frameRate));
+ MOCK_METHOD1(SetPeriodicKeyFrames, int32_t(bool enable));
+};
+
+class MockDecodedImageCallback : public DecodedImageCallback {
+ public:
+ MOCK_METHOD1(Decoded, int32_t(VideoFrame& decodedImage)); // NOLINT
+ MOCK_METHOD2(Decoded,
+ int32_t(VideoFrame& decodedImage, // NOLINT
+ int64_t decode_time_ms));
+ MOCK_METHOD1(ReceivedDecodedReferenceFrame,
+ int32_t(const uint64_t pictureId));
+ MOCK_METHOD1(ReceivedDecodedFrame, int32_t(const uint64_t pictureId));
+};
+
+class MockVideoDecoder : public VideoDecoder {
+ public:
+ MOCK_METHOD2(InitDecode,
+ int32_t(const VideoCodec* codecSettings, int32_t numberOfCores));
+ MOCK_METHOD5(Decode,
+ int32_t(const EncodedImage& inputImage,
+ bool missingFrames,
+ const RTPFragmentationHeader* fragmentation,
+ const CodecSpecificInfo* codecSpecificInfo,
+ int64_t renderTimeMs));
+ MOCK_METHOD1(RegisterDecodeCompleteCallback,
+ int32_t(DecodedImageCallback* callback));
+ MOCK_METHOD0(Release, int32_t());
+ MOCK_METHOD0(Reset, int32_t());
+ MOCK_METHOD0(Copy, VideoDecoder*());
+};
+
+} // namespace webrtc
+
+#endif // WEBRTC_MODULES_VIDEO_CODING_INCLUDE_MOCK_MOCK_VIDEO_CODEC_INTERFACE_H_
diff --git a/webrtc/modules/video_coding/include/video_codec_interface.h b/webrtc/modules/video_coding/include/video_codec_interface.h
new file mode 100644
index 0000000000..19303c0d67
--- /dev/null
+++ b/webrtc/modules/video_coding/include/video_codec_interface.h
@@ -0,0 +1,99 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef WEBRTC_MODULES_VIDEO_CODING_INCLUDE_VIDEO_CODEC_INTERFACE_H_
+#define WEBRTC_MODULES_VIDEO_CODING_INCLUDE_VIDEO_CODEC_INTERFACE_H_
+
+#include <vector>
+
+#include "webrtc/common_types.h"
+#include "webrtc/modules/include/module_common_types.h"
+#include "webrtc/modules/video_coding/include/video_error_codes.h"
+#include "webrtc/typedefs.h"
+#include "webrtc/video_decoder.h"
+#include "webrtc/video_encoder.h"
+#include "webrtc/video_frame.h"
+
+namespace webrtc {
+
+class RTPFragmentationHeader; // forward declaration
+
+// Note: if any pointers are added to this struct, it must be fitted
+// with a copy-constructor. See below.
+struct CodecSpecificInfoVP8 {
+ bool hasReceivedSLI;
+ uint8_t pictureIdSLI;
+ bool hasReceivedRPSI;
+ uint64_t pictureIdRPSI;
+ int16_t pictureId; // Negative value to skip pictureId.
+ bool nonReference;
+ uint8_t simulcastIdx;
+ uint8_t temporalIdx;
+ bool layerSync;
+ int tl0PicIdx; // Negative value to skip tl0PicIdx.
+ int8_t keyIdx; // Negative value to skip keyIdx.
+};
+
+struct CodecSpecificInfoVP9 {
+ bool has_received_sli;
+ uint8_t picture_id_sli;
+ bool has_received_rpsi;
+ uint64_t picture_id_rpsi;
+ int16_t picture_id; // Negative value to skip pictureId.
+
+ bool inter_pic_predicted; // This layer frame is dependent on previously
+ // coded frame(s).
+ bool flexible_mode;
+ bool ss_data_available;
+
+ int tl0_pic_idx; // Negative value to skip tl0PicIdx.
+ uint8_t temporal_idx;
+ uint8_t spatial_idx;
+ bool temporal_up_switch;
+ bool inter_layer_predicted; // Frame is dependent on directly lower spatial
+ // layer frame.
+ uint8_t gof_idx;
+
+ // SS data.
+ size_t num_spatial_layers; // Always populated.
+ bool spatial_layer_resolution_present;
+ uint16_t width[kMaxVp9NumberOfSpatialLayers];
+ uint16_t height[kMaxVp9NumberOfSpatialLayers];
+ GofInfoVP9 gof;
+
+ // Frame reference data.
+ uint8_t num_ref_pics;
+ uint8_t p_diff[kMaxVp9RefPics];
+};
+
+struct CodecSpecificInfoGeneric {
+ uint8_t simulcast_idx;
+};
+
+struct CodecSpecificInfoH264 {};
+
+union CodecSpecificInfoUnion {
+ CodecSpecificInfoGeneric generic;
+ CodecSpecificInfoVP8 VP8;
+ CodecSpecificInfoVP9 VP9;
+ CodecSpecificInfoH264 H264;
+};
+
+// Note: if any pointers are added to this struct or its sub-structs, it
+// must be fitted with a copy-constructor. This is because it is copied
+// in the copy-constructor of VCMEncodedFrame.
+struct CodecSpecificInfo {
+ VideoCodecType codecType;
+ CodecSpecificInfoUnion codecSpecific;
+};
+
+} // namespace webrtc
+
+#endif // WEBRTC_MODULES_VIDEO_CODING_INCLUDE_VIDEO_CODEC_INTERFACE_H_
diff --git a/webrtc/modules/video_coding/include/video_coding.h b/webrtc/modules/video_coding/include/video_coding.h
new file mode 100644
index 0000000000..c46896c823
--- /dev/null
+++ b/webrtc/modules/video_coding/include/video_coding.h
@@ -0,0 +1,519 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef WEBRTC_MODULES_VIDEO_CODING_INCLUDE_VIDEO_CODING_H_
+#define WEBRTC_MODULES_VIDEO_CODING_INCLUDE_VIDEO_CODING_H_
+
+#if defined(WEBRTC_WIN)
+// This is a workaround on Windows due to the fact that some Windows
+// headers define CreateEvent as a macro to either CreateEventW or CreateEventA.
+// This can cause problems since we use that name as well and could
+// declare them as one thing here whereas in another place a windows header
+// may have been included and then implementing CreateEvent() causes compilation
+// errors. So for consistency, we include the main windows header here.
+#include <windows.h>
+#endif
+
+#include "webrtc/modules/include/module.h"
+#include "webrtc/modules/include/module_common_types.h"
+#include "webrtc/modules/video_coding/include/video_coding_defines.h"
+#include "webrtc/system_wrappers/include/event_wrapper.h"
+#include "webrtc/video_frame.h"
+
+namespace webrtc {
+
+class Clock;
+class EncodedImageCallback;
+class VideoEncoder;
+class VideoDecoder;
+struct CodecSpecificInfo;
+
+class EventFactory {
+ public:
+ virtual ~EventFactory() {}
+
+ virtual EventWrapper* CreateEvent() = 0;
+};
+
+class EventFactoryImpl : public EventFactory {
+ public:
+ virtual ~EventFactoryImpl() {}
+
+ virtual EventWrapper* CreateEvent() { return EventWrapper::Create(); }
+};
+
+// Used to indicate which decode with errors mode should be used.
+enum VCMDecodeErrorMode {
+ kNoErrors, // Never decode with errors. Video will freeze
+ // if nack is disabled.
+ kSelectiveErrors, // Frames that are determined decodable in
+ // VCMSessionInfo may be decoded with missing
+ // packets. As not all incomplete frames will be
+ // decodable, video will freeze if nack is disabled.
+ kWithErrors // Release frames as needed. Errors may be
+ // introduced as some encoded frames may not be
+ // complete.
+};
+
+class VideoCodingModule : public Module {
+ public:
+ enum SenderNackMode { kNackNone, kNackAll, kNackSelective };
+
+ enum ReceiverRobustness { kNone, kHardNack, kSoftNack, kReferenceSelection };
+
+ static VideoCodingModule* Create(
+ Clock* clock,
+ VideoEncoderRateObserver* encoder_rate_observer,
+ VCMQMSettingsCallback* qm_settings_callback);
+
+ static VideoCodingModule* Create(Clock* clock, EventFactory* event_factory);
+
+ static void Destroy(VideoCodingModule* module);
+
+ // Get supported codec settings using codec type
+ //
+ // Input:
+ // - codecType : The codec type to get settings for
+ // - codec : Memory where the codec settings will be stored
+ //
+ // Return value : VCM_OK, on success
+ // VCM_PARAMETER_ERROR if codec not supported
+ static void Codec(VideoCodecType codecType, VideoCodec* codec);
+
+ /*
+ * Sender
+ */
+
+ // Registers a codec to be used for encoding. Calling this
+ // API multiple times overwrites any previously registered codecs.
+ //
+ // NOTE: Must be called on the thread that constructed the VCM instance.
+ //
+ // Input:
+ // - sendCodec : Settings for the codec to be registered.
+ // - numberOfCores : The number of cores the codec is allowed
+ // to use.
+ // - maxPayloadSize : The maximum size each payload is allowed
+ // to have. Usually MTU - overhead.
+ //
+ // Return value : VCM_OK, on success.
+ // < 0, on error.
+ virtual int32_t RegisterSendCodec(const VideoCodec* sendCodec,
+ uint32_t numberOfCores,
+ uint32_t maxPayloadSize) = 0;
+
+ // Register an external encoder object. This can not be used together with
+ // external decoder callbacks.
+ //
+ // Input:
+ // - externalEncoder : Encoder object to be used for encoding frames
+ // inserted
+ // with the AddVideoFrame API.
+ // - payloadType : The payload type bound which this encoder is bound
+ // to.
+ //
+ // Return value : VCM_OK, on success.
+ // < 0, on error.
+ // TODO(pbos): Remove return type when unused elsewhere.
+ virtual int32_t RegisterExternalEncoder(VideoEncoder* externalEncoder,
+ uint8_t payloadType,
+ bool internalSource = false) = 0;
+
+ // API to get currently configured encoder target bitrate in bits/s.
+ //
+ // Return value : 0, on success.
+ // < 0, on error.
+ virtual int Bitrate(unsigned int* bitrate) const = 0;
+
+ // API to get currently configured encoder target frame rate.
+ //
+ // Return value : 0, on success.
+ // < 0, on error.
+ virtual int FrameRate(unsigned int* framerate) const = 0;
+
+ // Sets the parameters describing the send channel. These parameters are
+ // inputs to the
+ // Media Optimization inside the VCM and also specifies the target bit rate
+ // for the
+ // encoder. Bit rate used by NACK should already be compensated for by the
+ // user.
+ //
+ // Input:
+ // - target_bitrate : The target bitrate for VCM in bits/s.
+ // - lossRate : Fractions of lost packets the past second.
+ // (loss rate in percent = 100 * packetLoss /
+ // 255)
+ // - rtt : Current round-trip time in ms.
+ //
+ // Return value : VCM_OK, on success.
+ // < 0, on error.
+ virtual int32_t SetChannelParameters(uint32_t target_bitrate,
+ uint8_t lossRate,
+ int64_t rtt) = 0;
+
+ // Sets the parameters describing the receive channel. These parameters are
+ // inputs to the
+ // Media Optimization inside the VCM.
+ //
+ // Input:
+ // - rtt : Current round-trip time in ms.
+ // with the most amount available bandwidth in
+ // a conference
+ // scenario
+ //
+ // Return value : VCM_OK, on success.
+ // < 0, on error.
+ virtual int32_t SetReceiveChannelParameters(int64_t rtt) = 0;
+
+ // Register a transport callback which will be called to deliver the encoded
+ // data and
+ // side information.
+ //
+ // Input:
+ // - transport : The callback object to register.
+ //
+ // Return value : VCM_OK, on success.
+ // < 0, on error.
+ virtual int32_t RegisterTransportCallback(
+ VCMPacketizationCallback* transport) = 0;
+
+ // Register video output information callback which will be called to deliver
+ // information
+ // about the video stream produced by the encoder, for instance the average
+ // frame rate and
+ // bit rate.
+ //
+ // Input:
+ // - outputInformation : The callback object to register.
+ //
+ // Return value : VCM_OK, on success.
+ // < 0, on error.
+ virtual int32_t RegisterSendStatisticsCallback(
+ VCMSendStatisticsCallback* sendStats) = 0;
+
+ // Register a video protection callback which will be called to deliver
+ // the requested FEC rate and NACK status (on/off).
+ //
+ // Input:
+ // - protection : The callback object to register.
+ //
+ // Return value : VCM_OK, on success.
+ // < 0, on error.
+ virtual int32_t RegisterProtectionCallback(
+ VCMProtectionCallback* protection) = 0;
+
+ // Enable or disable a video protection method.
+ //
+ // Input:
+ // - videoProtection : The method to enable or disable.
+ // - enable : True if the method should be enabled, false if
+ // it should be disabled.
+ //
+ // Return value : VCM_OK, on success.
+ // < 0, on error.
+ virtual int32_t SetVideoProtection(VCMVideoProtection videoProtection,
+ bool enable) = 0;
+
+ // Add one raw video frame to the encoder. This function does all the
+ // necessary
+ // processing, then decides what frame type to encode, or if the frame should
+ // be
+ // dropped. If the frame should be encoded it passes the frame to the encoder
+ // before it returns.
+ //
+ // Input:
+ // - videoFrame : Video frame to encode.
+ // - codecSpecificInfo : Extra codec information, e.g., pre-parsed
+ // in-band signaling.
+ //
+ // Return value : VCM_OK, on success.
+ // < 0, on error.
+ virtual int32_t AddVideoFrame(
+ const VideoFrame& videoFrame,
+ const VideoContentMetrics* contentMetrics = NULL,
+ const CodecSpecificInfo* codecSpecificInfo = NULL) = 0;
+
+ // Next frame encoded should be an intra frame (keyframe).
+ //
+ // Return value : VCM_OK, on success.
+ // < 0, on error.
+ virtual int32_t IntraFrameRequest(int stream_index) = 0;
+
+ // Frame Dropper enable. Can be used to disable the frame dropping when the
+ // encoder
+ // over-uses its bit rate. This API is designed to be used when the encoded
+ // frames
+ // are supposed to be stored to an AVI file, or when the I420 codec is used
+ // and the
+ // target bit rate shouldn't affect the frame rate.
+ //
+ // Input:
+ // - enable : True to enable the setting, false to disable it.
+ //
+ // Return value : VCM_OK, on success.
+ // < 0, on error.
+ virtual int32_t EnableFrameDropper(bool enable) = 0;
+
+ /*
+ * Receiver
+ */
+
+ // Register possible receive codecs, can be called multiple times for
+ // different codecs.
+ // The module will automatically switch between registered codecs depending on
+ // the
+ // payload type of incoming frames. The actual decoder will be created when
+ // needed.
+ //
+ // Input:
+ // - receiveCodec : Settings for the codec to be registered.
+ // - numberOfCores : Number of CPU cores that the decoder is allowed
+ // to use.
+ // - requireKeyFrame : Set this to true if you don't want any delta
+ // frames
+ // to be decoded until the first key frame has been
+ // decoded.
+ //
+ // Return value : VCM_OK, on success.
+ // < 0, on error.
+ virtual int32_t RegisterReceiveCodec(const VideoCodec* receiveCodec,
+ int32_t numberOfCores,
+ bool requireKeyFrame = false) = 0;
+
+ // Register an externally defined decoder/renderer object. Can be a decoder
+ // only or a
+ // decoder coupled with a renderer. Note that RegisterReceiveCodec must be
+ // called to
+ // be used for decoding incoming streams.
+ //
+ // Input:
+ // - externalDecoder : The external decoder/renderer object.
+ // - payloadType : The payload type which this decoder should
+ // be
+ // registered to.
+ //
+ virtual void RegisterExternalDecoder(VideoDecoder* externalDecoder,
+ uint8_t payloadType) = 0;
+
+ // Register a receive callback. Will be called whenever there is a new frame
+ // ready
+ // for rendering.
+ //
+ // Input:
+ // - receiveCallback : The callback object to be used by the
+ // module when a
+ // frame is ready for rendering.
+ // De-register with a NULL pointer.
+ //
+ // Return value : VCM_OK, on success.
+ // < 0, on error.
+ virtual int32_t RegisterReceiveCallback(
+ VCMReceiveCallback* receiveCallback) = 0;
+
+ // Register a receive statistics callback which will be called to deliver
+ // information
+ // about the video stream received by the receiving side of the VCM, for
+ // instance the
+ // average frame rate and bit rate.
+ //
+ // Input:
+ // - receiveStats : The callback object to register.
+ //
+ // Return value : VCM_OK, on success.
+ // < 0, on error.
+ virtual int32_t RegisterReceiveStatisticsCallback(
+ VCMReceiveStatisticsCallback* receiveStats) = 0;
+
+ // Register a decoder timing callback which will be called to deliver
+ // information about the timing of the decoder in the receiving side of the
+ // VCM, for instance the current and maximum frame decode latency.
+ //
+ // Input:
+ // - decoderTiming : The callback object to register.
+ //
+ // Return value : VCM_OK, on success.
+ // < 0, on error.
+ virtual int32_t RegisterDecoderTimingCallback(
+ VCMDecoderTimingCallback* decoderTiming) = 0;
+
+ // Register a frame type request callback. This callback will be called when
+ // the
+ // module needs to request specific frame types from the send side.
+ //
+ // Input:
+ // - frameTypeCallback : The callback object to be used by the
+ // module when
+ // requesting a specific type of frame from
+ // the send side.
+ // De-register with a NULL pointer.
+ //
+ // Return value : VCM_OK, on success.
+ // < 0, on error.
+ virtual int32_t RegisterFrameTypeCallback(
+ VCMFrameTypeCallback* frameTypeCallback) = 0;
+
+ // Registers a callback which is called whenever the receive side of the VCM
+ // encounters holes in the packet sequence and needs packets to be
+ // retransmitted.
+ //
+ // Input:
+ // - callback : The callback to be registered in the VCM.
+ //
+ // Return value : VCM_OK, on success.
+ // <0, on error.
+ virtual int32_t RegisterPacketRequestCallback(
+ VCMPacketRequestCallback* callback) = 0;
+
+ // Waits for the next frame in the jitter buffer to become complete
+ // (waits no longer than maxWaitTimeMs), then passes it to the decoder for
+ // decoding.
+ // Should be called as often as possible to get the most out of the decoder.
+ //
+ // Return value : VCM_OK, on success.
+ // < 0, on error.
+ virtual int32_t Decode(uint16_t maxWaitTimeMs = 200) = 0;
+
+ // Registers a callback which conveys the size of the render buffer.
+ virtual int RegisterRenderBufferSizeCallback(
+ VCMRenderBufferSizeCallback* callback) = 0;
+
+ // Reset the decoder state to the initial state.
+ //
+ // Return value : VCM_OK, on success.
+ // < 0, on error.
+ virtual int32_t ResetDecoder() = 0;
+
+ // API to get the codec which is currently used for decoding by the module.
+ //
+ // Input:
+ // - currentReceiveCodec : Settings for the codec to be registered.
+ //
+ // Return value : VCM_OK, on success.
+ // < 0, on error.
+ virtual int32_t ReceiveCodec(VideoCodec* currentReceiveCodec) const = 0;
+
+ // API to get the codec type currently used for decoding by the module.
+ //
+ // Return value : codecy type, on success.
+ // kVideoCodecUnknown, on error or if no receive codec is
+ // registered
+ virtual VideoCodecType ReceiveCodec() const = 0;
+
+ // Insert a parsed packet into the receiver side of the module. Will be placed
+ // in the
+ // jitter buffer waiting for the frame to become complete. Returns as soon as
+ // the packet
+ // has been placed in the jitter buffer.
+ //
+ // Input:
+ // - incomingPayload : Payload of the packet.
+ // - payloadLength : Length of the payload.
+ // - rtpInfo : The parsed header.
+ //
+ // Return value : VCM_OK, on success.
+ // < 0, on error.
+ virtual int32_t IncomingPacket(const uint8_t* incomingPayload,
+ size_t payloadLength,
+ const WebRtcRTPHeader& rtpInfo) = 0;
+
+ // Minimum playout delay (Used for lip-sync). This is the minimum delay
+ // required
+ // to sync with audio. Not included in VideoCodingModule::Delay()
+ // Defaults to 0 ms.
+ //
+ // Input:
+ // - minPlayoutDelayMs : Additional delay in ms.
+ //
+ // Return value : VCM_OK, on success.
+ // < 0, on error.
+ virtual int32_t SetMinimumPlayoutDelay(uint32_t minPlayoutDelayMs) = 0;
+
+ // Set the time required by the renderer to render a frame.
+ //
+ // Input:
+ // - timeMS : The time in ms required by the renderer to render a
+ // frame.
+ //
+ // Return value : VCM_OK, on success.
+ // < 0, on error.
+ virtual int32_t SetRenderDelay(uint32_t timeMS) = 0;
+
+ // The total delay desired by the VCM. Can be less than the minimum
+ // delay set with SetMinimumPlayoutDelay.
+ //
+ // Return value : Total delay in ms, on success.
+ // < 0, on error.
+ virtual int32_t Delay() const = 0;
+
+ // Returns the number of packets discarded by the jitter buffer due to being
+ // too late. This can include duplicated packets which arrived after the
+ // frame was sent to the decoder. Therefore packets which were prematurely
+ // NACKed will be counted.
+ virtual uint32_t DiscardedPackets() const = 0;
+
+ // Robustness APIs
+
+ // Set the receiver robustness mode. The mode decides how the receiver
+ // responds to losses in the stream. The type of counter-measure (soft or
+ // hard NACK, dual decoder, RPS, etc.) is selected through the
+ // robustnessMode parameter. The errorMode parameter decides if it is
+ // allowed to display frames corrupted by losses. Note that not all
+ // combinations of the two parameters are feasible. An error will be
+ // returned for invalid combinations.
+ // Input:
+ // - robustnessMode : selected robustness mode.
+ // - errorMode : selected error mode.
+ //
+ // Return value : VCM_OK, on success;
+ // < 0, on error.
+ virtual int SetReceiverRobustnessMode(ReceiverRobustness robustnessMode,
+ VCMDecodeErrorMode errorMode) = 0;
+
+ // Set the decode error mode. The mode decides which errors (if any) are
+ // allowed in decodable frames. Note that setting decode_error_mode to
+ // anything other than kWithErrors without enabling nack will cause
+ // long-term freezes (resulting from frequent key frame requests) if
+ // packet loss occurs.
+ virtual void SetDecodeErrorMode(VCMDecodeErrorMode decode_error_mode) = 0;
+
+ // Sets the maximum number of sequence numbers that we are allowed to NACK
+ // and the oldest sequence number that we will consider to NACK. If a
+ // sequence number older than |max_packet_age_to_nack| is missing
+ // a key frame will be requested. A key frame will also be requested if the
+ // time of incomplete or non-continuous frames in the jitter buffer is above
+ // |max_incomplete_time_ms|.
+ virtual void SetNackSettings(size_t max_nack_list_size,
+ int max_packet_age_to_nack,
+ int max_incomplete_time_ms) = 0;
+
+ // Setting a desired delay to the VCM receiver. Video rendering will be
+ // delayed by at least desired_delay_ms.
+ virtual int SetMinReceiverDelay(int desired_delay_ms) = 0;
+
+ // Lets the sender suspend video when the rate drops below
+ // |threshold_bps|, and turns back on when the rate goes back up above
+ // |threshold_bps| + |window_bps|.
+ virtual void SuspendBelowMinBitrate() = 0;
+
+ // Returns true if SuspendBelowMinBitrate is engaged and the video has been
+ // suspended due to bandwidth limitations; otherwise false.
+ virtual bool VideoSuspended() const = 0;
+
+ virtual void RegisterPreDecodeImageCallback(
+ EncodedImageCallback* observer) = 0;
+ virtual void RegisterPostEncodeImageCallback(
+ EncodedImageCallback* post_encode_callback) = 0;
+ // Releases pending decode calls, permitting faster thread shutdown.
+ virtual void TriggerDecoderShutdown() = 0;
+};
+
+} // namespace webrtc
+
+#endif // WEBRTC_MODULES_VIDEO_CODING_INCLUDE_VIDEO_CODING_H_
diff --git a/webrtc/modules/video_coding/main/interface/video_coding_defines.h b/webrtc/modules/video_coding/include/video_coding_defines.h
index fd38d64415..673a02b713 100644
--- a/webrtc/modules/video_coding/main/interface/video_coding_defines.h
+++ b/webrtc/modules/video_coding/include/video_coding_defines.h
@@ -8,33 +8,33 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#ifndef WEBRTC_MODULES_INTERFACE_VIDEO_CODING_DEFINES_H_
-#define WEBRTC_MODULES_INTERFACE_VIDEO_CODING_DEFINES_H_
+#ifndef WEBRTC_MODULES_VIDEO_CODING_INCLUDE_VIDEO_CODING_DEFINES_H_
+#define WEBRTC_MODULES_VIDEO_CODING_INCLUDE_VIDEO_CODING_DEFINES_H_
-#include "webrtc/modules/interface/module_common_types.h"
+#include "webrtc/modules/include/module_common_types.h"
#include "webrtc/typedefs.h"
#include "webrtc/video_frame.h"
namespace webrtc {
// Error codes
-#define VCM_FRAME_NOT_READY 3
-#define VCM_REQUEST_SLI 2
-#define VCM_MISSING_CALLBACK 1
-#define VCM_OK 0
-#define VCM_GENERAL_ERROR -1
-#define VCM_LEVEL_EXCEEDED -2
-#define VCM_MEMORY -3
-#define VCM_PARAMETER_ERROR -4
-#define VCM_UNKNOWN_PAYLOAD -5
-#define VCM_CODEC_ERROR -6
-#define VCM_UNINITIALIZED -7
+#define VCM_FRAME_NOT_READY 3
+#define VCM_REQUEST_SLI 2
+#define VCM_MISSING_CALLBACK 1
+#define VCM_OK 0
+#define VCM_GENERAL_ERROR -1
+#define VCM_LEVEL_EXCEEDED -2
+#define VCM_MEMORY -3
+#define VCM_PARAMETER_ERROR -4
+#define VCM_UNKNOWN_PAYLOAD -5
+#define VCM_CODEC_ERROR -6
+#define VCM_UNINITIALIZED -7
#define VCM_NO_CODEC_REGISTERED -8
#define VCM_JITTER_BUFFER_ERROR -9
-#define VCM_OLD_PACKET_ERROR -10
-#define VCM_NO_FRAME_DECODED -11
-#define VCM_ERROR_REQUEST_SLI -12
-#define VCM_NOT_IMPLEMENTED -20
+#define VCM_OLD_PACKET_ERROR -10
+#define VCM_NO_FRAME_DECODED -11
+#define VCM_ERROR_REQUEST_SLI -12
+#define VCM_NOT_IMPLEMENTED -20
enum { kDefaultStartBitrateKbps = 300 };
@@ -62,40 +62,42 @@ class VCMPacketizationCallback {
const RTPFragmentationHeader& fragmentationHeader,
const RTPVideoHeader* rtpVideoHdr) = 0;
+ virtual void OnEncoderImplementationName(const char* implementation_name) {}
+
protected:
- virtual ~VCMPacketizationCallback() {
- }
+ virtual ~VCMPacketizationCallback() {}
};
-// Callback class used for passing decoded frames which are ready to be rendered.
+// Callback class used for passing decoded frames which are ready to be
+// rendered.
class VCMReceiveCallback {
public:
- virtual int32_t FrameToRender(VideoFrame& videoFrame) = 0;
- virtual int32_t ReceivedDecodedReferenceFrame(
- const uint64_t pictureId) {
+ virtual int32_t FrameToRender(VideoFrame& videoFrame) = 0; // NOLINT
+ virtual int32_t ReceivedDecodedReferenceFrame(const uint64_t pictureId) {
return -1;
}
// Called when the current receive codec changes.
virtual void OnIncomingPayloadType(int payload_type) {}
+ virtual void OnDecoderImplementationName(const char* implementation_name) {}
protected:
- virtual ~VCMReceiveCallback() {
- }
+ virtual ~VCMReceiveCallback() {}
};
-// Callback class used for informing the user of the bit rate and frame rate produced by the
+// Callback class used for informing the user of the bit rate and frame rate
+// produced by the
// encoder.
class VCMSendStatisticsCallback {
public:
virtual int32_t SendStatistics(const uint32_t bitRate,
- const uint32_t frameRate) = 0;
+ const uint32_t frameRate) = 0;
protected:
- virtual ~VCMSendStatisticsCallback() {
- }
+ virtual ~VCMSendStatisticsCallback() {}
};
-// Callback class used for informing the user of the incoming bit rate and frame rate.
+// Callback class used for informing the user of the incoming bit rate and frame
+// rate.
class VCMReceiveStatisticsCallback {
public:
virtual void OnReceiveRatesUpdated(uint32_t bitRate, uint32_t frameRate) = 0;
@@ -103,8 +105,7 @@ class VCMReceiveStatisticsCallback {
virtual void OnFrameCountsUpdated(const FrameCounts& frame_counts) = 0;
protected:
- virtual ~VCMReceiveStatisticsCallback() {
- }
+ virtual ~VCMReceiveStatisticsCallback() {}
};
// Callback class used for informing the user of decode timing info.
@@ -133,8 +134,7 @@ class VCMProtectionCallback {
uint32_t* sent_fec_rate_bps) = 0;
protected:
- virtual ~VCMProtectionCallback() {
- }
+ virtual ~VCMProtectionCallback() {}
};
class VideoEncoderRateObserver {
@@ -143,31 +143,30 @@ class VideoEncoderRateObserver {
virtual void OnSetRates(uint32_t bitrate_bps, int framerate) = 0;
};
-// Callback class used for telling the user about what frame type needed to continue decoding.
+// Callback class used for telling the user about what frame type needed to
+// continue decoding.
// Typically a key frame when the stream has been corrupted in some way.
class VCMFrameTypeCallback {
public:
virtual int32_t RequestKeyFrame() = 0;
- virtual int32_t SliceLossIndicationRequest(
- const uint64_t pictureId) {
+ virtual int32_t SliceLossIndicationRequest(const uint64_t pictureId) {
return -1;
}
protected:
- virtual ~VCMFrameTypeCallback() {
- }
+ virtual ~VCMFrameTypeCallback() {}
};
-// Callback class used for telling the user about which packet sequence numbers are currently
+// Callback class used for telling the user about which packet sequence numbers
+// are currently
// missing and need to be resent.
class VCMPacketRequestCallback {
public:
virtual int32_t ResendPackets(const uint16_t* sequenceNumbers,
- uint16_t length) = 0;
+ uint16_t length) = 0;
protected:
- virtual ~VCMPacketRequestCallback() {
- }
+ virtual ~VCMPacketRequestCallback() {}
};
// Callback used to inform the user of the the desired resolution
@@ -175,14 +174,13 @@ class VCMPacketRequestCallback {
class VCMQMSettingsCallback {
public:
virtual int32_t SetVideoQMSettings(const uint32_t frameRate,
- const uint32_t width,
- const uint32_t height) = 0;
+ const uint32_t width,
+ const uint32_t height) = 0;
virtual void SetTargetFramerate(int frame_rate) = 0;
protected:
- virtual ~VCMQMSettingsCallback() {
- }
+ virtual ~VCMQMSettingsCallback() {}
};
// Callback class used for telling the user about the size (in time) of the
@@ -192,10 +190,9 @@ class VCMRenderBufferSizeCallback {
virtual void RenderBufferSizeMs(int buffer_size_ms) = 0;
protected:
- virtual ~VCMRenderBufferSizeCallback() {
- }
+ virtual ~VCMRenderBufferSizeCallback() {}
};
} // namespace webrtc
-#endif // WEBRTC_MODULES_INTERFACE_VIDEO_CODING_DEFINES_H_
+#endif // WEBRTC_MODULES_VIDEO_CODING_INCLUDE_VIDEO_CODING_DEFINES_H_
diff --git a/webrtc/modules/video_coding/include/video_error_codes.h b/webrtc/modules/video_coding/include/video_error_codes.h
new file mode 100644
index 0000000000..360aa87744
--- /dev/null
+++ b/webrtc/modules/video_coding/include/video_error_codes.h
@@ -0,0 +1,32 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef WEBRTC_MODULES_VIDEO_CODING_INCLUDE_VIDEO_ERROR_CODES_H_
+#define WEBRTC_MODULES_VIDEO_CODING_INCLUDE_VIDEO_ERROR_CODES_H_
+
+// NOTE: in sync with video_coding_module_defines.h
+
+// Define return values
+
+#define WEBRTC_VIDEO_CODEC_REQUEST_SLI 2
+#define WEBRTC_VIDEO_CODEC_NO_OUTPUT 1
+#define WEBRTC_VIDEO_CODEC_OK 0
+#define WEBRTC_VIDEO_CODEC_ERROR -1
+#define WEBRTC_VIDEO_CODEC_LEVEL_EXCEEDED -2
+#define WEBRTC_VIDEO_CODEC_MEMORY -3
+#define WEBRTC_VIDEO_CODEC_ERR_PARAMETER -4
+#define WEBRTC_VIDEO_CODEC_ERR_SIZE -5
+#define WEBRTC_VIDEO_CODEC_TIMEOUT -6
+#define WEBRTC_VIDEO_CODEC_UNINITIALIZED -7
+#define WEBRTC_VIDEO_CODEC_ERR_REQUEST_SLI -12
+#define WEBRTC_VIDEO_CODEC_FALLBACK_SOFTWARE -13
+#define WEBRTC_VIDEO_CODEC_TARGET_BITRATE_OVERSHOOT -14
+
+#endif // WEBRTC_MODULES_VIDEO_CODING_INCLUDE_VIDEO_ERROR_CODES_H_
diff --git a/webrtc/modules/video_coding/inter_frame_delay.cc b/webrtc/modules/video_coding/inter_frame_delay.cc
new file mode 100644
index 0000000000..fb3b54d204
--- /dev/null
+++ b/webrtc/modules/video_coding/inter_frame_delay.cc
@@ -0,0 +1,107 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "webrtc/modules/video_coding/inter_frame_delay.h"
+
+namespace webrtc {
+
+VCMInterFrameDelay::VCMInterFrameDelay(int64_t currentWallClock) {
+ Reset(currentWallClock);
+}
+
+// Resets the delay estimate
+void VCMInterFrameDelay::Reset(int64_t currentWallClock) {
+ _zeroWallClock = currentWallClock;
+ _wrapArounds = 0;
+ _prevWallClock = 0;
+ _prevTimestamp = 0;
+ _dTS = 0;
+}
+
+// Calculates the delay of a frame with the given timestamp.
+// This method is called when the frame is complete.
+bool VCMInterFrameDelay::CalculateDelay(uint32_t timestamp,
+ int64_t* delay,
+ int64_t currentWallClock) {
+ if (_prevWallClock == 0) {
+ // First set of data, initialization, wait for next frame
+ _prevWallClock = currentWallClock;
+ _prevTimestamp = timestamp;
+ *delay = 0;
+ return true;
+ }
+
+ int32_t prevWrapArounds = _wrapArounds;
+ CheckForWrapArounds(timestamp);
+
+ // This will be -1 for backward wrap arounds and +1 for forward wrap arounds
+ int32_t wrapAroundsSincePrev = _wrapArounds - prevWrapArounds;
+
+ // Account for reordering in jitter variance estimate in the future?
+ // Note that this also captures incomplete frames which are grabbed
+ // for decoding after a later frame has been complete, i.e. real
+ // packet losses.
+ if ((wrapAroundsSincePrev == 0 && timestamp < _prevTimestamp) ||
+ wrapAroundsSincePrev < 0) {
+ *delay = 0;
+ return false;
+ }
+
+ // Compute the compensated timestamp difference and convert it to ms and
+ // round it to closest integer.
+ _dTS = static_cast<int64_t>(
+ (timestamp + wrapAroundsSincePrev * (static_cast<int64_t>(1) << 32) -
+ _prevTimestamp) /
+ 90.0 +
+ 0.5);
+
+ // frameDelay is the difference of dT and dTS -- i.e. the difference of
+ // the wall clock time difference and the timestamp difference between
+ // two following frames.
+ *delay = static_cast<int64_t>(currentWallClock - _prevWallClock - _dTS);
+
+ _prevTimestamp = timestamp;
+ _prevWallClock = currentWallClock;
+
+ return true;
+}
+
+// Returns the current difference between incoming timestamps
+uint32_t VCMInterFrameDelay::CurrentTimeStampDiffMs() const {
+ if (_dTS < 0) {
+ return 0;
+ }
+ return static_cast<uint32_t>(_dTS);
+}
+
+// Investigates if the timestamp clock has overflowed since the last timestamp
+// and
+// keeps track of the number of wrap arounds since reset.
+void VCMInterFrameDelay::CheckForWrapArounds(uint32_t timestamp) {
+ if (timestamp < _prevTimestamp) {
+ // This difference will probably be less than -2^31 if we have had a wrap
+ // around
+ // (e.g. timestamp = 1, _previousTimestamp = 2^32 - 1). Since it is cast to
+ // a Word32,
+ // it should be positive.
+ if (static_cast<int32_t>(timestamp - _prevTimestamp) > 0) {
+ // Forward wrap around
+ _wrapArounds++;
+ }
+ // This difference will probably be less than -2^31 if we have had a
+ // backward
+ // wrap around.
+ // Since it is cast to a Word32, it should be positive.
+ } else if (static_cast<int32_t>(_prevTimestamp - timestamp) > 0) {
+ // Backward wrap around
+ _wrapArounds--;
+ }
+}
+} // namespace webrtc
diff --git a/webrtc/modules/video_coding/inter_frame_delay.h b/webrtc/modules/video_coding/inter_frame_delay.h
new file mode 100644
index 0000000000..94b73908bb
--- /dev/null
+++ b/webrtc/modules/video_coding/inter_frame_delay.h
@@ -0,0 +1,67 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef WEBRTC_MODULES_VIDEO_CODING_INTER_FRAME_DELAY_H_
+#define WEBRTC_MODULES_VIDEO_CODING_INTER_FRAME_DELAY_H_
+
+#include "webrtc/typedefs.h"
+
+namespace webrtc {
+
+class VCMInterFrameDelay {
+ public:
+ explicit VCMInterFrameDelay(int64_t currentWallClock);
+
+ // Resets the estimate. Zeros are given as parameters.
+ void Reset(int64_t currentWallClock);
+
+ // Calculates the delay of a frame with the given timestamp.
+ // This method is called when the frame is complete.
+ //
+ // Input:
+ // - timestamp : RTP timestamp of a received frame
+ // - *delay : Pointer to memory where the result should be
+ // stored
+ // - currentWallClock : The current time in milliseconds.
+ // Should be -1 for normal operation, only used
+ // for testing.
+ // Return value : true if OK, false when reordered timestamps
+ bool CalculateDelay(uint32_t timestamp,
+ int64_t* delay,
+ int64_t currentWallClock);
+
+ // Returns the current difference between incoming timestamps
+ //
+ // Return value : Wrap-around compensated difference between
+ // incoming
+ // timestamps.
+ uint32_t CurrentTimeStampDiffMs() const;
+
+ private:
+ // Controls if the RTP timestamp counter has had a wrap around
+ // between the current and the previously received frame.
+ //
+ // Input:
+ // - timestmap : RTP timestamp of the current frame.
+ void CheckForWrapArounds(uint32_t timestamp);
+
+ int64_t _zeroWallClock; // Local timestamp of the first video packet received
+ int32_t _wrapArounds; // Number of wrapArounds detected
+ // The previous timestamp passed to the delay estimate
+ uint32_t _prevTimestamp;
+ // The previous wall clock timestamp used by the delay estimate
+ int64_t _prevWallClock;
+ // Wrap-around compensated difference between incoming timestamps
+ int64_t _dTS;
+};
+
+} // namespace webrtc
+
+#endif // WEBRTC_MODULES_VIDEO_CODING_INTER_FRAME_DELAY_H_
diff --git a/webrtc/modules/video_coding/internal_defines.h b/webrtc/modules/video_coding/internal_defines.h
new file mode 100644
index 0000000000..e225726dea
--- /dev/null
+++ b/webrtc/modules/video_coding/internal_defines.h
@@ -0,0 +1,41 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef WEBRTC_MODULES_VIDEO_CODING_INTERNAL_DEFINES_H_
+#define WEBRTC_MODULES_VIDEO_CODING_INTERNAL_DEFINES_H_
+
+#include "webrtc/typedefs.h"
+
+namespace webrtc {
+
+#define MASK_32_BITS(x) (0xFFFFFFFF & (x))
+
+inline uint32_t MaskWord64ToUWord32(int64_t w64) {
+ return static_cast<uint32_t>(MASK_32_BITS(w64));
+}
+
+#define VCM_MAX(a, b) (((a) > (b)) ? (a) : (b))
+#define VCM_MIN(a, b) (((a) < (b)) ? (a) : (b))
+
+#define VCM_DEFAULT_CODEC_WIDTH 352
+#define VCM_DEFAULT_CODEC_HEIGHT 288
+#define VCM_DEFAULT_FRAME_RATE 30
+#define VCM_MIN_BITRATE 30
+#define VCM_FLUSH_INDICATOR 4
+
+#define VCM_NO_RECEIVER_ID 0
+
+inline int32_t VCMId(const int32_t vcmId, const int32_t receiverId = 0) {
+ return static_cast<int32_t>((vcmId << 16) + receiverId);
+}
+
+} // namespace webrtc
+
+#endif // WEBRTC_MODULES_VIDEO_CODING_INTERNAL_DEFINES_H_
diff --git a/webrtc/modules/video_coding/main/source/jitter_buffer.cc b/webrtc/modules/video_coding/jitter_buffer.cc
index bfdd7867d9..640bcb4f22 100644
--- a/webrtc/modules/video_coding/main/source/jitter_buffer.cc
+++ b/webrtc/modules/video_coding/jitter_buffer.cc
@@ -7,7 +7,7 @@
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
-#include "webrtc/modules/video_coding/main/source/jitter_buffer.h"
+#include "webrtc/modules/video_coding/jitter_buffer.h"
#include <assert.h>
@@ -15,19 +15,19 @@
#include <utility>
#include "webrtc/base/checks.h"
+#include "webrtc/base/logging.h"
#include "webrtc/base/trace_event.h"
-#include "webrtc/modules/rtp_rtcp/interface/rtp_rtcp_defines.h"
-#include "webrtc/modules/video_coding/main/interface/video_coding.h"
-#include "webrtc/modules/video_coding/main/source/frame_buffer.h"
-#include "webrtc/modules/video_coding/main/source/inter_frame_delay.h"
-#include "webrtc/modules/video_coding/main/source/internal_defines.h"
-#include "webrtc/modules/video_coding/main/source/jitter_buffer_common.h"
-#include "webrtc/modules/video_coding/main/source/jitter_estimator.h"
-#include "webrtc/modules/video_coding/main/source/packet.h"
+#include "webrtc/modules/rtp_rtcp/include/rtp_rtcp_defines.h"
+#include "webrtc/modules/video_coding/include/video_coding.h"
+#include "webrtc/modules/video_coding/frame_buffer.h"
+#include "webrtc/modules/video_coding/inter_frame_delay.h"
+#include "webrtc/modules/video_coding/internal_defines.h"
+#include "webrtc/modules/video_coding/jitter_buffer_common.h"
+#include "webrtc/modules/video_coding/jitter_estimator.h"
+#include "webrtc/modules/video_coding/packet.h"
#include "webrtc/system_wrappers/include/clock.h"
#include "webrtc/system_wrappers/include/critical_section_wrapper.h"
#include "webrtc/system_wrappers/include/event_wrapper.h"
-#include "webrtc/system_wrappers/include/logging.h"
#include "webrtc/system_wrappers/include/metrics.h"
namespace webrtc {
@@ -38,6 +38,10 @@ static const uint32_t kSsCleanupIntervalSec = 60;
// Use this rtt if no value has been reported.
static const int64_t kDefaultRtt = 200;
+// Request a keyframe if no continuous frame has been received for this
+// number of milliseconds and NACKs are disabled.
+static const int64_t kMaxDiscontinuousFramesTime = 1000;
+
typedef std::pair<uint32_t, VCMFrameBuffer*> FrameListPair;
bool IsKeyFrame(FrameListPair pair) {
@@ -89,7 +93,7 @@ int FrameList::RecycleFramesUntilKeyFrame(FrameList::iterator* key_frame_it,
}
void FrameList::CleanUpOldOrEmptyFrames(VCMDecodingState* decoding_state,
- UnorderedFrameList* free_frames) {
+ UnorderedFrameList* free_frames) {
while (!empty()) {
VCMFrameBuffer* oldest_frame = Front();
bool remove_frame = false;
@@ -168,6 +172,7 @@ void Vp9SsMap::AdvanceFront(uint32_t timestamp) {
ss_map_[timestamp] = gof;
}
+// TODO(asapersson): Update according to updates in RTP payload profile.
bool Vp9SsMap::UpdatePacket(VCMPacket* packet) {
uint8_t gof_idx = packet->codecSpecificHeader.codecHeader.VP9.gof_idx;
if (gof_idx == kNoGofIdx)
@@ -186,7 +191,7 @@ bool Vp9SsMap::UpdatePacket(VCMPacket* packet) {
// TODO(asapersson): Set vp9.ref_picture_id[i] and add usage.
vp9->num_ref_pics = it->second.num_ref_pics[gof_idx];
- for (size_t i = 0; i < it->second.num_ref_pics[gof_idx]; ++i) {
+ for (uint8_t i = 0; i < it->second.num_ref_pics[gof_idx]; ++i) {
vp9->pid_diff[i] = it->second.pid_diff[gof_idx][i];
}
return true;
@@ -214,7 +219,7 @@ VCMJitterBuffer::VCMJitterBuffer(Clock* clock,
: clock_(clock),
running_(false),
crit_sect_(CriticalSectionWrapper::CreateCriticalSection()),
- frame_event_(event.Pass()),
+ frame_event_(std::move(event)),
max_number_of_frames_(kStartNumberOfFrames),
free_frames_(),
decodable_frames_(),
@@ -276,17 +281,18 @@ void VCMJitterBuffer::UpdateHistograms() {
return;
}
- RTC_HISTOGRAM_PERCENTAGE("WebRTC.Video.DiscardedPacketsInPercent",
- num_discarded_packets_ * 100 / num_packets_);
- RTC_HISTOGRAM_PERCENTAGE("WebRTC.Video.DuplicatedPacketsInPercent",
- num_duplicated_packets_ * 100 / num_packets_);
+ RTC_HISTOGRAM_PERCENTAGE_SPARSE("WebRTC.Video.DiscardedPacketsInPercent",
+ num_discarded_packets_ * 100 / num_packets_);
+ RTC_HISTOGRAM_PERCENTAGE_SPARSE("WebRTC.Video.DuplicatedPacketsInPercent",
+ num_duplicated_packets_ * 100 / num_packets_);
int total_frames =
receive_statistics_.key_frames + receive_statistics_.delta_frames;
if (total_frames > 0) {
- RTC_HISTOGRAM_COUNTS_100("WebRTC.Video.CompleteFramesReceivedPerSecond",
+ RTC_HISTOGRAM_COUNTS_SPARSE_100(
+ "WebRTC.Video.CompleteFramesReceivedPerSecond",
static_cast<int>((total_frames / elapsed_sec) + 0.5f));
- RTC_HISTOGRAM_COUNTS_1000(
+ RTC_HISTOGRAM_COUNTS_SPARSE_1000(
"WebRTC.Video.KeyFramesReceivedInPermille",
static_cast<int>(
(receive_statistics_.key_frames * 1000.0f / total_frames) + 0.5f));
@@ -316,7 +322,6 @@ void VCMJitterBuffer::Start() {
first_packet_since_reset_ = true;
rtt_ms_ = kDefaultRtt;
last_decoded_state_.Reset();
- vp9_ss_map_.Reset();
}
void VCMJitterBuffer::Stop() {
@@ -324,7 +329,6 @@ void VCMJitterBuffer::Stop() {
UpdateHistograms();
running_ = false;
last_decoded_state_.Reset();
- vp9_ss_map_.Reset();
// Make sure all frames are free and reset.
for (FrameList::iterator it = decodable_frames_.begin();
@@ -356,7 +360,6 @@ void VCMJitterBuffer::Flush() {
decodable_frames_.Reset(&free_frames_);
incomplete_frames_.Reset(&free_frames_);
last_decoded_state_.Reset(); // TODO(mikhal): sync reset.
- vp9_ss_map_.Reset();
num_consecutive_old_packets_ = 0;
// Also reset the jitter and delay estimates
jitter_estimate_.Reset();
@@ -428,8 +431,8 @@ void VCMJitterBuffer::IncomingRateStatistics(unsigned int* framerate,
if (incoming_bit_count_ == 0) {
*bitrate = 0;
} else {
- *bitrate = 10 * ((100 * incoming_bit_count_) /
- static_cast<unsigned int>(diff));
+ *bitrate =
+ 10 * ((100 * incoming_bit_count_) / static_cast<unsigned int>(diff));
}
incoming_bit_rate_ = *bitrate;
@@ -470,8 +473,8 @@ bool VCMJitterBuffer::CompleteSequenceWithNextFrame() {
// Returns immediately or a |max_wait_time_ms| ms event hang waiting for a
// complete frame, |max_wait_time_ms| decided by caller.
-bool VCMJitterBuffer::NextCompleteTimestamp(
- uint32_t max_wait_time_ms, uint32_t* timestamp) {
+bool VCMJitterBuffer::NextCompleteTimestamp(uint32_t max_wait_time_ms,
+ uint32_t* timestamp) {
crit_sect_->Enter();
if (!running_) {
crit_sect_->Leave();
@@ -481,13 +484,13 @@ bool VCMJitterBuffer::NextCompleteTimestamp(
if (decodable_frames_.empty() ||
decodable_frames_.Front()->GetState() != kStateComplete) {
- const int64_t end_wait_time_ms = clock_->TimeInMilliseconds() +
- max_wait_time_ms;
+ const int64_t end_wait_time_ms =
+ clock_->TimeInMilliseconds() + max_wait_time_ms;
int64_t wait_time_ms = max_wait_time_ms;
while (wait_time_ms > 0) {
crit_sect_->Leave();
const EventTypeWrapper ret =
- frame_event_->Wait(static_cast<uint32_t>(wait_time_ms));
+ frame_event_->Wait(static_cast<uint32_t>(wait_time_ms));
crit_sect_->Enter();
if (ret == kEventSignaled) {
// Are we shutting down the jitter buffer?
@@ -530,16 +533,25 @@ bool VCMJitterBuffer::NextMaybeIncompleteTimestamp(uint32_t* timestamp) {
CleanUpOldOrEmptyFrames();
+ VCMFrameBuffer* oldest_frame;
if (decodable_frames_.empty()) {
- return false;
- }
- VCMFrameBuffer* oldest_frame = decodable_frames_.Front();
- // If we have exactly one frame in the buffer, release it only if it is
- // complete. We know decodable_frames_ is not empty due to the previous
- // check.
- if (decodable_frames_.size() == 1 && incomplete_frames_.empty()
- && oldest_frame->GetState() != kStateComplete) {
- return false;
+ if (nack_mode_ != kNoNack || incomplete_frames_.size() <= 1) {
+ return false;
+ }
+ oldest_frame = incomplete_frames_.Front();
+ // Frame will only be removed from buffer if it is complete (or decodable).
+ if (oldest_frame->GetState() < kStateComplete) {
+ return false;
+ }
+ } else {
+ oldest_frame = decodable_frames_.Front();
+ // If we have exactly one frame in the buffer, release it only if it is
+ // complete. We know decodable_frames_ is not empty due to the previous
+ // check.
+ if (decodable_frames_.size() == 1 && incomplete_frames_.empty() &&
+ oldest_frame->GetState() != kStateComplete) {
+ return false;
+ }
}
*timestamp = oldest_frame->TimeStamp();
@@ -576,8 +588,7 @@ VCMEncodedFrame* VCMJitterBuffer::ExtractAndSetDecode(uint32_t timestamp) {
} else {
// Wait for this one to get complete.
waiting_for_completion_.frame_size = frame->Length();
- waiting_for_completion_.latest_packet_time =
- frame->LatestPacketTimeMs();
+ waiting_for_completion_.latest_packet_time = frame->LatestPacketTimeMs();
waiting_for_completion_.timestamp = frame->TimeStamp();
}
}
@@ -688,21 +699,6 @@ VCMFrameBufferEnum VCMJitterBuffer::InsertPacket(const VCMPacket& packet,
num_consecutive_old_packets_ = 0;
- if (packet.codec == kVideoCodecVP9) {
- if (packet.codecSpecificHeader.codecHeader.VP9.flexible_mode) {
- // TODO(asapersson): Add support for flexible mode.
- return kGeneralError;
- }
- if (!packet.codecSpecificHeader.codecHeader.VP9.flexible_mode) {
- if (vp9_ss_map_.Insert(packet))
- vp9_ss_map_.UpdateFrames(&incomplete_frames_);
-
- vp9_ss_map_.UpdatePacket(const_cast<VCMPacket*>(&packet));
- }
- if (!last_decoded_state_.in_initial_state())
- vp9_ss_map_.RemoveOld(last_decoded_state_.time_stamp());
- }
-
VCMFrameBuffer* frame;
FrameList* frame_list;
const VCMFrameBufferEnum error = GetFrame(packet, &frame, &frame_list);
@@ -745,8 +741,8 @@ VCMFrameBufferEnum VCMJitterBuffer::InsertPacket(const VCMPacket& packet,
frame->InsertPacket(packet, now_ms, decode_error_mode_, frame_data);
if (previous_state != kStateComplete) {
- TRACE_EVENT_ASYNC_BEGIN1("webrtc", "Video", frame->TimeStamp(),
- "timestamp", frame->TimeStamp());
+ TRACE_EVENT_ASYNC_BEGIN1("webrtc", "Video", frame->TimeStamp(), "timestamp",
+ frame->TimeStamp());
}
if (buffer_state > 0) {
@@ -763,8 +759,8 @@ VCMFrameBufferEnum VCMJitterBuffer::InsertPacket(const VCMPacket& packet,
buffer_state = kFlushIndicator;
}
- latest_received_sequence_number_ = LatestSequenceNumber(
- latest_received_sequence_number_, packet.seqNum);
+ latest_received_sequence_number_ =
+ LatestSequenceNumber(latest_received_sequence_number_, packet.seqNum);
}
}
@@ -796,6 +792,12 @@ VCMFrameBufferEnum VCMJitterBuffer::InsertPacket(const VCMPacket& packet,
FindAndInsertContinuousFrames(*frame);
} else {
incomplete_frames_.InsertFrame(frame);
+ // If NACKs are enabled, keyframes are triggered by |GetNackList|.
+ if (nack_mode_ == kNoNack &&
+ NonContinuousOrIncompleteDuration() >
+ 90 * kMaxDiscontinuousFramesTime) {
+ return kFlushIndicator;
+ }
}
break;
}
@@ -806,6 +808,12 @@ VCMFrameBufferEnum VCMJitterBuffer::InsertPacket(const VCMPacket& packet,
return kNoError;
} else {
incomplete_frames_.InsertFrame(frame);
+ // If NACKs are enabled, keyframes are triggered by |GetNackList|.
+ if (nack_mode_ == kNoNack &&
+ NonContinuousOrIncompleteDuration() >
+ 90 * kMaxDiscontinuousFramesTime) {
+ return kFlushIndicator;
+ }
}
break;
}
@@ -824,15 +832,15 @@ VCMFrameBufferEnum VCMJitterBuffer::InsertPacket(const VCMPacket& packet,
case kFlushIndicator:
free_frames_.push_back(frame);
return kFlushIndicator;
- default: assert(false);
+ default:
+ assert(false);
}
return buffer_state;
}
-bool VCMJitterBuffer::IsContinuousInState(const VCMFrameBuffer& frame,
+bool VCMJitterBuffer::IsContinuousInState(
+ const VCMFrameBuffer& frame,
const VCMDecodingState& decoding_state) const {
- if (decode_error_mode_ == kWithErrors)
- return true;
// Is this frame (complete or decodable) and continuous?
// kStateDecodable will never be set when decode_error_mode_ is false
// as SessionInfo determines this state based on the error mode (and frame
@@ -849,7 +857,7 @@ bool VCMJitterBuffer::IsContinuous(const VCMFrameBuffer& frame) const {
VCMDecodingState decoding_state;
decoding_state.CopyFrom(last_decoded_state_);
for (FrameList::const_iterator it = decodable_frames_.begin();
- it != decodable_frames_.end(); ++it) {
+ it != decodable_frames_.end(); ++it) {
VCMFrameBuffer* decodable_frame = it->second;
if (IsNewerTimestamp(decodable_frame->TimeStamp(), frame.TimeStamp())) {
break;
@@ -882,7 +890,7 @@ void VCMJitterBuffer::FindAndInsertContinuousFramesWithState(
// 1. Continuous base or sync layer.
// 2. The end of the list was reached.
for (FrameList::iterator it = incomplete_frames_.begin();
- it != incomplete_frames_.end();) {
+ it != incomplete_frames_.end();) {
VCMFrameBuffer* frame = it->second;
if (IsNewerTimestamp(original_decoded_state.time_stamp(),
frame->TimeStamp())) {
@@ -992,16 +1000,18 @@ std::vector<uint16_t> VCMJitterBuffer::GetNackList(bool* request_key_frame) {
if (last_decoded_state_.in_initial_state()) {
VCMFrameBuffer* next_frame = NextFrame();
const bool first_frame_is_key = next_frame &&
- next_frame->FrameType() == kVideoFrameKey &&
- next_frame->HaveFirstPacket();
+ next_frame->FrameType() == kVideoFrameKey &&
+ next_frame->HaveFirstPacket();
if (!first_frame_is_key) {
- bool have_non_empty_frame = decodable_frames_.end() != find_if(
- decodable_frames_.begin(), decodable_frames_.end(),
- HasNonEmptyState);
+ bool have_non_empty_frame =
+ decodable_frames_.end() != find_if(decodable_frames_.begin(),
+ decodable_frames_.end(),
+ HasNonEmptyState);
if (!have_non_empty_frame) {
- have_non_empty_frame = incomplete_frames_.end() != find_if(
- incomplete_frames_.begin(), incomplete_frames_.end(),
- HasNonEmptyState);
+ have_non_empty_frame =
+ incomplete_frames_.end() != find_if(incomplete_frames_.begin(),
+ incomplete_frames_.end(),
+ HasNonEmptyState);
}
bool found_key_frame = RecycleFramesUntilKeyFrame();
if (!found_key_frame) {
@@ -1020,8 +1030,8 @@ std::vector<uint16_t> VCMJitterBuffer::GetNackList(bool* request_key_frame) {
LOG_F(LS_WARNING) << "Too long non-decodable duration: "
<< non_continuous_incomplete_duration << " > "
<< 90 * max_incomplete_time_ms_;
- FrameList::reverse_iterator rit = find_if(incomplete_frames_.rbegin(),
- incomplete_frames_.rend(), IsKeyFrame);
+ FrameList::reverse_iterator rit = find_if(
+ incomplete_frames_.rbegin(), incomplete_frames_.rend(), IsKeyFrame);
if (rit == incomplete_frames_.rend()) {
// Request a key frame if we don't have one already.
*request_key_frame = true;
@@ -1061,8 +1071,7 @@ bool VCMJitterBuffer::UpdateNackList(uint16_t sequence_number) {
// Make sure we don't add packets which are already too old to be decoded.
if (!last_decoded_state_.in_initial_state()) {
latest_received_sequence_number_ = LatestSequenceNumber(
- latest_received_sequence_number_,
- last_decoded_state_.sequence_num());
+ latest_received_sequence_number_, last_decoded_state_.sequence_num());
}
if (IsNewerSequenceNumber(sequence_number,
latest_received_sequence_number_)) {
@@ -1112,8 +1121,8 @@ bool VCMJitterBuffer::MissingTooOldPacket(
if (missing_sequence_numbers_.empty()) {
return false;
}
- const uint16_t age_of_oldest_missing_packet = latest_sequence_number -
- *missing_sequence_numbers_.begin();
+ const uint16_t age_of_oldest_missing_packet =
+ latest_sequence_number - *missing_sequence_numbers_.begin();
// Recycle frames if the NACK list contains too old sequence numbers as
// the packets may have already been dropped by the sender.
return age_of_oldest_missing_packet > max_packet_age_to_nack_;
@@ -1121,8 +1130,8 @@ bool VCMJitterBuffer::MissingTooOldPacket(
bool VCMJitterBuffer::HandleTooOldPackets(uint16_t latest_sequence_number) {
bool key_frame_found = false;
- const uint16_t age_of_oldest_missing_packet = latest_sequence_number -
- *missing_sequence_numbers_.begin();
+ const uint16_t age_of_oldest_missing_packet =
+ latest_sequence_number - *missing_sequence_numbers_.begin();
LOG_F(LS_WARNING) << "NACK list contains too old sequence numbers: "
<< age_of_oldest_missing_packet << " > "
<< max_packet_age_to_nack_;
@@ -1136,9 +1145,9 @@ void VCMJitterBuffer::DropPacketsFromNackList(
uint16_t last_decoded_sequence_number) {
// Erase all sequence numbers from the NACK list which we won't need any
// longer.
- missing_sequence_numbers_.erase(missing_sequence_numbers_.begin(),
- missing_sequence_numbers_.upper_bound(
- last_decoded_sequence_number));
+ missing_sequence_numbers_.erase(
+ missing_sequence_numbers_.begin(),
+ missing_sequence_numbers_.upper_bound(last_decoded_sequence_number));
}
int64_t VCMJitterBuffer::LastDecodedTimestamp() const {
@@ -1222,11 +1231,11 @@ void VCMJitterBuffer::CountFrame(const VCMFrameBuffer& frame) {
incoming_frame_count_++;
if (frame.FrameType() == kVideoFrameKey) {
- TRACE_EVENT_ASYNC_STEP0("webrtc", "Video",
- frame.TimeStamp(), "KeyComplete");
+ TRACE_EVENT_ASYNC_STEP0("webrtc", "Video", frame.TimeStamp(),
+ "KeyComplete");
} else {
- TRACE_EVENT_ASYNC_STEP0("webrtc", "Video",
- frame.TimeStamp(), "DeltaComplete");
+ TRACE_EVENT_ASYNC_STEP0("webrtc", "Video", frame.TimeStamp(),
+ "DeltaComplete");
}
// Update receive statistics. We count all layers, thus when you use layers
@@ -1244,13 +1253,13 @@ void VCMJitterBuffer::CountFrame(const VCMFrameBuffer& frame) {
void VCMJitterBuffer::UpdateAveragePacketsPerFrame(int current_number_packets) {
if (frame_counter_ > kFastConvergeThreshold) {
- average_packets_per_frame_ = average_packets_per_frame_
- * (1 - kNormalConvergeMultiplier)
- + current_number_packets * kNormalConvergeMultiplier;
+ average_packets_per_frame_ =
+ average_packets_per_frame_ * (1 - kNormalConvergeMultiplier) +
+ current_number_packets * kNormalConvergeMultiplier;
} else if (frame_counter_ > 0) {
- average_packets_per_frame_ = average_packets_per_frame_
- * (1 - kFastConvergeMultiplier)
- + current_number_packets * kFastConvergeMultiplier;
+ average_packets_per_frame_ =
+ average_packets_per_frame_ * (1 - kFastConvergeMultiplier) +
+ current_number_packets * kFastConvergeMultiplier;
frame_counter_++;
} else {
average_packets_per_frame_ = current_number_packets;
@@ -1272,7 +1281,7 @@ void VCMJitterBuffer::CleanUpOldOrEmptyFrames() {
// Must be called from within |crit_sect_|.
bool VCMJitterBuffer::IsPacketRetransmitted(const VCMPacket& packet) const {
return missing_sequence_numbers_.find(packet.seqNum) !=
- missing_sequence_numbers_.end();
+ missing_sequence_numbers_.end();
}
// Must be called under the critical section |crit_sect_|. Should never be
@@ -1304,18 +1313,16 @@ void VCMJitterBuffer::UpdateJitterEstimate(const VCMFrameBuffer& frame,
// Must be called under the critical section |crit_sect_|. Should never be
// called with retransmitted frames, they must be filtered out before this
// function is called.
-void VCMJitterBuffer::UpdateJitterEstimate(
- int64_t latest_packet_time_ms,
- uint32_t timestamp,
- unsigned int frame_size,
- bool incomplete_frame) {
+void VCMJitterBuffer::UpdateJitterEstimate(int64_t latest_packet_time_ms,
+ uint32_t timestamp,
+ unsigned int frame_size,
+ bool incomplete_frame) {
if (latest_packet_time_ms == -1) {
return;
}
int64_t frame_delay;
- bool not_reordered = inter_frame_delay_.CalculateDelay(timestamp,
- &frame_delay,
- latest_packet_time_ms);
+ bool not_reordered = inter_frame_delay_.CalculateDelay(
+ timestamp, &frame_delay, latest_packet_time_ms);
// Filter out frames which have been reordered in time by the network
if (not_reordered) {
// Update the jitter estimate with the new samples
diff --git a/webrtc/modules/video_coding/main/source/jitter_buffer.h b/webrtc/modules/video_coding/jitter_buffer.h
index f4a3638f7d..01e27752d2 100644
--- a/webrtc/modules/video_coding/main/source/jitter_buffer.h
+++ b/webrtc/modules/video_coding/jitter_buffer.h
@@ -8,8 +8,8 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#ifndef WEBRTC_MODULES_VIDEO_CODING_MAIN_SOURCE_JITTER_BUFFER_H_
-#define WEBRTC_MODULES_VIDEO_CODING_MAIN_SOURCE_JITTER_BUFFER_H_
+#ifndef WEBRTC_MODULES_VIDEO_CODING_JITTER_BUFFER_H_
+#define WEBRTC_MODULES_VIDEO_CODING_JITTER_BUFFER_H_
#include <list>
#include <map>
@@ -18,22 +18,19 @@
#include "webrtc/base/constructormagic.h"
#include "webrtc/base/thread_annotations.h"
-#include "webrtc/modules/interface/module_common_types.h"
-#include "webrtc/modules/video_coding/main/interface/video_coding.h"
-#include "webrtc/modules/video_coding/main/interface/video_coding_defines.h"
-#include "webrtc/modules/video_coding/main/source/decoding_state.h"
-#include "webrtc/modules/video_coding/main/source/inter_frame_delay.h"
-#include "webrtc/modules/video_coding/main/source/jitter_buffer_common.h"
-#include "webrtc/modules/video_coding/main/source/jitter_estimator.h"
+#include "webrtc/modules/include/module_common_types.h"
+#include "webrtc/modules/video_coding/include/video_coding.h"
+#include "webrtc/modules/video_coding/include/video_coding_defines.h"
+#include "webrtc/modules/video_coding/decoding_state.h"
+#include "webrtc/modules/video_coding/inter_frame_delay.h"
+#include "webrtc/modules/video_coding/jitter_buffer_common.h"
+#include "webrtc/modules/video_coding/jitter_estimator.h"
#include "webrtc/system_wrappers/include/critical_section_wrapper.h"
#include "webrtc/typedefs.h"
namespace webrtc {
-enum VCMNackMode {
- kNack,
- kNoNack
-};
+enum VCMNackMode { kNack, kNoNack };
// forward declarations
class Clock;
@@ -54,8 +51,7 @@ struct VCMJitterSample {
class TimestampLessThan {
public:
- bool operator() (uint32_t timestamp1,
- uint32_t timestamp2) const {
+ bool operator()(uint32_t timestamp1, uint32_t timestamp2) const {
return IsNewerTimestamp(timestamp2, timestamp1);
}
};
@@ -68,7 +64,7 @@ class FrameList
VCMFrameBuffer* Front() const;
VCMFrameBuffer* Back() const;
int RecycleFramesUntilKeyFrame(FrameList::iterator* key_frame_it,
- UnorderedFrameList* free_frames);
+ UnorderedFrameList* free_frames);
void CleanUpOldOrEmptyFrames(VCMDecodingState* decoding_state,
UnorderedFrameList* free_frames);
void Reset(UnorderedFrameList* free_frames);
@@ -141,8 +137,7 @@ class VCMJitterBuffer {
int num_discarded_packets() const;
// Statistics, Calculate frame and bit rates.
- void IncomingRateStatistics(unsigned int* framerate,
- unsigned int* bitrate);
+ void IncomingRateStatistics(unsigned int* framerate, unsigned int* bitrate);
// Checks if the packet sequence will be complete if the next frame would be
// grabbed for decoding. That is, if a frame has been lost between the
@@ -177,8 +172,7 @@ class VCMJitterBuffer {
// Inserts a packet into a frame returned from GetFrame().
// If the return value is <= 0, |frame| is invalidated and the pointer must
// be dropped after this function returns.
- VCMFrameBufferEnum InsertPacket(const VCMPacket& packet,
- bool* retransmitted);
+ VCMFrameBufferEnum InsertPacket(const VCMPacket& packet, bool* retransmitted);
// Returns the estimated jitter in milliseconds.
uint32_t EstimatedJitterMs();
@@ -192,7 +186,8 @@ class VCMJitterBuffer {
// |low_rtt_nack_threshold_ms| is an RTT threshold in ms below which we expect
// to rely on NACK only, and therefore are using larger buffers to have time
// to wait for retransmissions.
- void SetNackMode(VCMNackMode mode, int64_t low_rtt_nack_threshold_ms,
+ void SetNackMode(VCMNackMode mode,
+ int64_t low_rtt_nack_threshold_ms,
int64_t high_rtt_nack_threshold_ms);
void SetNackSettings(size_t max_nack_list_size,
@@ -209,7 +204,7 @@ class VCMJitterBuffer {
// session. Changes will not influence frames already in the buffer.
void SetDecodeErrorMode(VCMDecodeErrorMode error_mode);
int64_t LastDecodedTimestamp() const;
- VCMDecodeErrorMode decode_error_mode() const {return decode_error_mode_;}
+ VCMDecodeErrorMode decode_error_mode() const { return decode_error_mode_; }
// Used to compute time of complete continuous frames. Returns the timestamps
// corresponding to the start and end of the continuous complete buffer.
@@ -220,8 +215,8 @@ class VCMJitterBuffer {
private:
class SequenceNumberLessThan {
public:
- bool operator() (const uint16_t& sequence_number1,
- const uint16_t& sequence_number2) const {
+ bool operator()(const uint16_t& sequence_number1,
+ const uint16_t& sequence_number2) const {
return IsNewerSequenceNumber(sequence_number2, sequence_number1);
}
};
@@ -338,8 +333,6 @@ class VCMJitterBuffer {
FrameList incomplete_frames_ GUARDED_BY(crit_sect_);
VCMDecodingState last_decoded_state_ GUARDED_BY(crit_sect_);
bool first_packet_since_reset_;
- // Contains scalability structure data for VP9.
- Vp9SsMap vp9_ss_map_ GUARDED_BY(crit_sect_);
// Statistics.
VCMReceiveStatisticsCallback* stats_callback_ GUARDED_BY(crit_sect_);
@@ -393,4 +386,4 @@ class VCMJitterBuffer {
};
} // namespace webrtc
-#endif // WEBRTC_MODULES_VIDEO_CODING_MAIN_SOURCE_JITTER_BUFFER_H_
+#endif // WEBRTC_MODULES_VIDEO_CODING_JITTER_BUFFER_H_
diff --git a/webrtc/modules/video_coding/main/source/jitter_buffer_common.h b/webrtc/modules/video_coding/jitter_buffer_common.h
index 97af78087a..65356f1d1b 100644
--- a/webrtc/modules/video_coding/main/source/jitter_buffer_common.h
+++ b/webrtc/modules/video_coding/jitter_buffer_common.h
@@ -19,11 +19,11 @@ namespace webrtc {
static const float kFastConvergeMultiplier = 0.4f;
static const float kNormalConvergeMultiplier = 0.2f;
-enum { kMaxNumberOfFrames = 300 };
-enum { kStartNumberOfFrames = 6 };
-enum { kMaxVideoDelayMs = 10000 };
+enum { kMaxNumberOfFrames = 300 };
+enum { kStartNumberOfFrames = 6 };
+enum { kMaxVideoDelayMs = 10000 };
enum { kPacketsPerFrameMultiplier = 5 };
-enum { kFastConvergeThreshold = 5};
+enum { kFastConvergeThreshold = 5 };
enum VCMJitterBufferEnum {
kMaxConsecutiveOldFrames = 60,
@@ -36,36 +36,36 @@ enum VCMJitterBufferEnum {
};
enum VCMFrameBufferEnum {
- kOutOfBoundsPacket = -7,
- kNotInitialized = -6,
- kOldPacket = -5,
- kGeneralError = -4,
- kFlushIndicator = -3, // Indicator that a flush has occurred.
- kTimeStampError = -2,
- kSizeError = -1,
- kNoError = 0,
- kIncomplete = 1, // Frame incomplete.
- kCompleteSession = 3, // at least one layer in the frame complete.
- kDecodableSession = 4, // Frame incomplete, but ready to be decoded
- kDuplicatePacket = 5 // We're receiving a duplicate packet.
+ kOutOfBoundsPacket = -7,
+ kNotInitialized = -6,
+ kOldPacket = -5,
+ kGeneralError = -4,
+ kFlushIndicator = -3, // Indicator that a flush has occurred.
+ kTimeStampError = -2,
+ kSizeError = -1,
+ kNoError = 0,
+ kIncomplete = 1, // Frame incomplete.
+ kCompleteSession = 3, // at least one layer in the frame complete.
+ kDecodableSession = 4, // Frame incomplete, but ready to be decoded
+ kDuplicatePacket = 5 // We're receiving a duplicate packet.
};
enum VCMFrameBufferStateEnum {
- kStateEmpty, // frame popped by the RTP receiver
- kStateIncomplete, // frame that have one or more packet(s) stored
- kStateComplete, // frame that have all packets
- kStateDecodable // Hybrid mode - frame can be decoded
+ kStateEmpty, // frame popped by the RTP receiver
+ kStateIncomplete, // frame that have one or more packet(s) stored
+ kStateComplete, // frame that have all packets
+ kStateDecodable // Hybrid mode - frame can be decoded
};
-enum { kH264StartCodeLengthBytes = 4};
+enum { kH264StartCodeLengthBytes = 4 };
// Used to indicate if a received packet contain a complete NALU (or equivalent)
enum VCMNaluCompleteness {
- kNaluUnset = 0, // Packet has not been filled.
- kNaluComplete = 1, // Packet can be decoded as is.
- kNaluStart, // Packet contain beginning of NALU
- kNaluIncomplete, // Packet is not beginning or end of NALU
- kNaluEnd, // Packet is the end of a NALU
+ kNaluUnset = 0, // Packet has not been filled.
+ kNaluComplete = 1, // Packet can be decoded as is.
+ kNaluStart, // Packet contain beginning of NALU
+ kNaluIncomplete, // Packet is not beginning or end of NALU
+ kNaluEnd, // Packet is the end of a NALU
};
} // namespace webrtc
diff --git a/webrtc/modules/video_coding/main/source/jitter_buffer_unittest.cc b/webrtc/modules/video_coding/jitter_buffer_unittest.cc
index d6c6d4985b..8abc1b5471 100644
--- a/webrtc/modules/video_coding/main/source/jitter_buffer_unittest.cc
+++ b/webrtc/modules/video_coding/jitter_buffer_unittest.cc
@@ -13,12 +13,12 @@
#include <list>
#include "testing/gtest/include/gtest/gtest.h"
-#include "webrtc/modules/video_coding/main/source/frame_buffer.h"
-#include "webrtc/modules/video_coding/main/source/jitter_buffer.h"
-#include "webrtc/modules/video_coding/main/source/media_opt_util.h"
-#include "webrtc/modules/video_coding/main/source/packet.h"
-#include "webrtc/modules/video_coding/main/source/test/stream_generator.h"
-#include "webrtc/modules/video_coding/main/test/test_util.h"
+#include "webrtc/modules/video_coding/frame_buffer.h"
+#include "webrtc/modules/video_coding/jitter_buffer.h"
+#include "webrtc/modules/video_coding/media_opt_util.h"
+#include "webrtc/modules/video_coding/packet.h"
+#include "webrtc/modules/video_coding/test/stream_generator.h"
+#include "webrtc/modules/video_coding/test/test_util.h"
#include "webrtc/system_wrappers/include/clock.h"
#include "webrtc/system_wrappers/include/metrics.h"
#include "webrtc/test/histogram.h"
@@ -26,13 +26,12 @@
namespace webrtc {
namespace {
- const uint32_t kProcessIntervalSec = 60;
+const uint32_t kProcessIntervalSec = 60;
} // namespace
class Vp9SsMapTest : public ::testing::Test {
protected:
- Vp9SsMapTest()
- : packet_(data_, 1400, 1234, 1, true) {}
+ Vp9SsMapTest() : packet_(data_, 1400, 1234, 1, true) {}
virtual void SetUp() {
packet_.isFirstPacket = true;
@@ -234,8 +233,8 @@ class TestBasicJitterBuffer : public ::testing::Test {
}
void CheckOutFrame(VCMEncodedFrame* frame_out,
- unsigned int size,
- bool startCode) {
+ unsigned int size,
+ bool startCode) {
ASSERT_TRUE(frame_out);
const uint8_t* outData = frame_out->Buffer();
@@ -280,7 +279,6 @@ class TestBasicJitterBuffer : public ::testing::Test {
rtc::scoped_ptr<VCMJitterBuffer> jitter_buffer_;
};
-
class TestRunningJitterBuffer : public ::testing::Test {
protected:
enum { kDataBufferSize = 10 };
@@ -294,8 +292,8 @@ class TestRunningJitterBuffer : public ::testing::Test {
rtc::scoped_ptr<EventWrapper>(event_factory_.CreateEvent()));
stream_generator_ = new StreamGenerator(0, clock_->TimeInMilliseconds());
jitter_buffer_->Start();
- jitter_buffer_->SetNackSettings(max_nack_list_size_,
- oldest_packet_to_nack_, 0);
+ jitter_buffer_->SetNackSettings(max_nack_list_size_, oldest_packet_to_nack_,
+ 0);
memset(data_buffer_, 0, kDataBufferSize);
}
@@ -396,9 +394,7 @@ class TestJitterBufferNack : public TestRunningJitterBuffer {
jitter_buffer_->SetNackMode(kNack, -1, -1);
}
- virtual void TearDown() {
- TestRunningJitterBuffer::TearDown();
- }
+ virtual void TearDown() { TestRunningJitterBuffer::TearDown(); }
};
TEST_F(TestBasicJitterBuffer, StopRunning) {
@@ -431,8 +427,8 @@ TEST_F(TestBasicJitterBuffer, SinglePacketFrame) {
// Insert the packet to the jitter buffer and get a frame.
bool retransmitted = false;
- EXPECT_EQ(kCompleteSession, jitter_buffer_->InsertPacket(*packet_,
- &retransmitted));
+ EXPECT_EQ(kCompleteSession,
+ jitter_buffer_->InsertPacket(*packet_, &retransmitted));
VCMEncodedFrame* frame_out = DecodeCompleteFrame();
CheckOutFrame(frame_out, size_, false);
EXPECT_EQ(kVideoFrameKey, frame_out->FrameType());
@@ -450,8 +446,8 @@ TEST_F(TestBasicJitterBuffer, VerifyHistogramStats) {
// Insert single packet frame to the jitter buffer and get a frame.
bool retransmitted = false;
- EXPECT_EQ(kCompleteSession, jitter_buffer_->InsertPacket(*packet_,
- &retransmitted));
+ EXPECT_EQ(kCompleteSession,
+ jitter_buffer_->InsertPacket(*packet_, &retransmitted));
VCMEncodedFrame* frame_out = DecodeCompleteFrame();
CheckOutFrame(frame_out, size_, false);
EXPECT_EQ(kVideoFrameKey, frame_out->FrameType());
@@ -460,25 +456,25 @@ TEST_F(TestBasicJitterBuffer, VerifyHistogramStats) {
// Verify that histograms are updated when the jitter buffer is stopped.
clock_->AdvanceTimeMilliseconds(metrics::kMinRunTimeInSeconds * 1000);
jitter_buffer_->Stop();
- EXPECT_EQ(0, test::LastHistogramSample(
- "WebRTC.Video.DiscardedPacketsInPercent"));
- EXPECT_EQ(0, test::LastHistogramSample(
- "WebRTC.Video.DuplicatedPacketsInPercent"));
+ EXPECT_EQ(
+ 0, test::LastHistogramSample("WebRTC.Video.DiscardedPacketsInPercent"));
+ EXPECT_EQ(
+ 0, test::LastHistogramSample("WebRTC.Video.DuplicatedPacketsInPercent"));
EXPECT_NE(-1, test::LastHistogramSample(
- "WebRTC.Video.CompleteFramesReceivedPerSecond"));
+ "WebRTC.Video.CompleteFramesReceivedPerSecond"));
EXPECT_EQ(1000, test::LastHistogramSample(
- "WebRTC.Video.KeyFramesReceivedInPermille"));
+ "WebRTC.Video.KeyFramesReceivedInPermille"));
// Verify that histograms are not updated if stop is called again.
jitter_buffer_->Stop();
+ EXPECT_EQ(
+ 1, test::NumHistogramSamples("WebRTC.Video.DiscardedPacketsInPercent"));
+ EXPECT_EQ(
+ 1, test::NumHistogramSamples("WebRTC.Video.DuplicatedPacketsInPercent"));
EXPECT_EQ(1, test::NumHistogramSamples(
- "WebRTC.Video.DiscardedPacketsInPercent"));
- EXPECT_EQ(1, test::NumHistogramSamples(
- "WebRTC.Video.DuplicatedPacketsInPercent"));
- EXPECT_EQ(1, test::NumHistogramSamples(
- "WebRTC.Video.CompleteFramesReceivedPerSecond"));
- EXPECT_EQ(1, test::NumHistogramSamples(
- "WebRTC.Video.KeyFramesReceivedInPermille"));
+ "WebRTC.Video.CompleteFramesReceivedPerSecond"));
+ EXPECT_EQ(
+ 1, test::NumHistogramSamples("WebRTC.Video.KeyFramesReceivedInPermille"));
}
TEST_F(TestBasicJitterBuffer, DualPacketFrame) {
@@ -487,8 +483,8 @@ TEST_F(TestBasicJitterBuffer, DualPacketFrame) {
packet_->markerBit = false;
bool retransmitted = false;
- EXPECT_EQ(kIncomplete, jitter_buffer_->InsertPacket(*packet_,
- &retransmitted));
+ EXPECT_EQ(kIncomplete,
+ jitter_buffer_->InsertPacket(*packet_, &retransmitted));
VCMEncodedFrame* frame_out = DecodeCompleteFrame();
// Should not be complete.
EXPECT_TRUE(frame_out == NULL);
@@ -498,8 +494,8 @@ TEST_F(TestBasicJitterBuffer, DualPacketFrame) {
packet_->markerBit = true;
packet_->seqNum = seq_num_;
- EXPECT_EQ(kCompleteSession, jitter_buffer_->InsertPacket(*packet_,
- &retransmitted));
+ EXPECT_EQ(kCompleteSession,
+ jitter_buffer_->InsertPacket(*packet_, &retransmitted));
frame_out = DecodeCompleteFrame();
CheckOutFrame(frame_out, 2 * size_, false);
@@ -514,8 +510,8 @@ TEST_F(TestBasicJitterBuffer, 100PacketKeyFrame) {
packet_->markerBit = false;
bool retransmitted = false;
- EXPECT_EQ(kIncomplete, jitter_buffer_->InsertPacket(*packet_,
- &retransmitted));
+ EXPECT_EQ(kIncomplete,
+ jitter_buffer_->InsertPacket(*packet_, &retransmitted));
VCMEncodedFrame* frame_out = DecodeCompleteFrame();
@@ -530,8 +526,8 @@ TEST_F(TestBasicJitterBuffer, 100PacketKeyFrame) {
packet_->markerBit = false;
packet_->seqNum = seq_num_;
- EXPECT_EQ(kIncomplete, jitter_buffer_->InsertPacket(*packet_,
- &retransmitted));
+ EXPECT_EQ(kIncomplete,
+ jitter_buffer_->InsertPacket(*packet_, &retransmitted));
loop++;
} while (loop < 98);
@@ -541,8 +537,8 @@ TEST_F(TestBasicJitterBuffer, 100PacketKeyFrame) {
packet_->markerBit = true;
packet_->seqNum = seq_num_;
- EXPECT_EQ(kCompleteSession, jitter_buffer_->InsertPacket(*packet_,
- &retransmitted));
+ EXPECT_EQ(kCompleteSession,
+ jitter_buffer_->InsertPacket(*packet_, &retransmitted));
frame_out = DecodeCompleteFrame();
@@ -558,8 +554,8 @@ TEST_F(TestBasicJitterBuffer, 100PacketDeltaFrame) {
packet_->markerBit = true;
bool retransmitted = false;
- EXPECT_EQ(kCompleteSession, jitter_buffer_->InsertPacket(*packet_,
- &retransmitted));
+ EXPECT_EQ(kCompleteSession,
+ jitter_buffer_->InsertPacket(*packet_, &retransmitted));
VCMEncodedFrame* frame_out = DecodeCompleteFrame();
EXPECT_FALSE(frame_out == NULL);
jitter_buffer_->ReleaseFrame(frame_out);
@@ -570,8 +566,8 @@ TEST_F(TestBasicJitterBuffer, 100PacketDeltaFrame) {
packet_->frameType = kVideoFrameDelta;
packet_->timestamp += 33 * 90;
- EXPECT_EQ(kIncomplete, jitter_buffer_->InsertPacket(*packet_,
- &retransmitted));
+ EXPECT_EQ(kIncomplete,
+ jitter_buffer_->InsertPacket(*packet_, &retransmitted));
frame_out = DecodeCompleteFrame();
@@ -586,8 +582,8 @@ TEST_F(TestBasicJitterBuffer, 100PacketDeltaFrame) {
packet_->seqNum = seq_num_;
// Insert a packet into a frame.
- EXPECT_EQ(kIncomplete, jitter_buffer_->InsertPacket(*packet_,
- &retransmitted));
+ EXPECT_EQ(kIncomplete,
+ jitter_buffer_->InsertPacket(*packet_, &retransmitted));
loop++;
} while (loop < 98);
@@ -597,8 +593,8 @@ TEST_F(TestBasicJitterBuffer, 100PacketDeltaFrame) {
packet_->markerBit = true;
packet_->seqNum = seq_num_;
- EXPECT_EQ(kCompleteSession, jitter_buffer_->InsertPacket(*packet_,
- &retransmitted));
+ EXPECT_EQ(kCompleteSession,
+ jitter_buffer_->InsertPacket(*packet_, &retransmitted));
frame_out = DecodeCompleteFrame();
@@ -617,8 +613,8 @@ TEST_F(TestBasicJitterBuffer, PacketReorderingReverseOrder) {
packet_->timestamp = timestamp_;
bool retransmitted = false;
- EXPECT_EQ(kIncomplete, jitter_buffer_->InsertPacket(*packet_,
- &retransmitted));
+ EXPECT_EQ(kIncomplete,
+ jitter_buffer_->InsertPacket(*packet_, &retransmitted));
VCMEncodedFrame* frame_out = DecodeCompleteFrame();
@@ -632,8 +628,8 @@ TEST_F(TestBasicJitterBuffer, PacketReorderingReverseOrder) {
packet_->markerBit = false;
packet_->seqNum = seq_num_;
- EXPECT_EQ(kIncomplete, jitter_buffer_->InsertPacket(*packet_,
- &retransmitted));
+ EXPECT_EQ(kIncomplete,
+ jitter_buffer_->InsertPacket(*packet_, &retransmitted));
loop++;
} while (loop < 98);
@@ -643,10 +639,10 @@ TEST_F(TestBasicJitterBuffer, PacketReorderingReverseOrder) {
packet_->markerBit = false;
packet_->seqNum = seq_num_;
- EXPECT_EQ(kCompleteSession, jitter_buffer_->InsertPacket(*packet_,
- &retransmitted));
+ EXPECT_EQ(kCompleteSession,
+ jitter_buffer_->InsertPacket(*packet_, &retransmitted));
- frame_out = DecodeCompleteFrame();;
+ frame_out = DecodeCompleteFrame();
CheckOutFrame(frame_out, 100 * size_, false);
@@ -660,8 +656,8 @@ TEST_F(TestBasicJitterBuffer, FrameReordering2Frames2PacketsEach) {
packet_->markerBit = false;
bool retransmitted = false;
- EXPECT_EQ(kIncomplete, jitter_buffer_->InsertPacket(*packet_,
- &retransmitted));
+ EXPECT_EQ(kIncomplete,
+ jitter_buffer_->InsertPacket(*packet_, &retransmitted));
VCMEncodedFrame* frame_out = DecodeCompleteFrame();
@@ -672,23 +668,23 @@ TEST_F(TestBasicJitterBuffer, FrameReordering2Frames2PacketsEach) {
packet_->markerBit = true;
packet_->seqNum = seq_num_;
- EXPECT_EQ(kCompleteSession, jitter_buffer_->InsertPacket(*packet_,
- &retransmitted));
+ EXPECT_EQ(kCompleteSession,
+ jitter_buffer_->InsertPacket(*packet_, &retransmitted));
// check that we fail to get frame since seqnum is not continuous
frame_out = DecodeCompleteFrame();
EXPECT_TRUE(frame_out == NULL);
seq_num_ -= 3;
- timestamp_ -= 33*90;
+ timestamp_ -= 33 * 90;
packet_->frameType = kVideoFrameKey;
packet_->isFirstPacket = true;
packet_->markerBit = false;
packet_->seqNum = seq_num_;
packet_->timestamp = timestamp_;
- EXPECT_EQ(kIncomplete, jitter_buffer_->InsertPacket(*packet_,
- &retransmitted));
+ EXPECT_EQ(kIncomplete,
+ jitter_buffer_->InsertPacket(*packet_, &retransmitted));
frame_out = DecodeCompleteFrame();
@@ -700,8 +696,8 @@ TEST_F(TestBasicJitterBuffer, FrameReordering2Frames2PacketsEach) {
packet_->markerBit = true;
packet_->seqNum = seq_num_;
- EXPECT_EQ(kCompleteSession, jitter_buffer_->InsertPacket(*packet_,
- &retransmitted));
+ EXPECT_EQ(kCompleteSession,
+ jitter_buffer_->InsertPacket(*packet_, &retransmitted));
frame_out = DecodeCompleteFrame();
CheckOutFrame(frame_out, 2 * size_, false);
@@ -781,8 +777,8 @@ TEST_F(TestBasicJitterBuffer, DuplicatePackets) {
EXPECT_EQ(0, jitter_buffer_->num_duplicated_packets());
bool retransmitted = false;
- EXPECT_EQ(kIncomplete, jitter_buffer_->InsertPacket(*packet_,
- &retransmitted));
+ EXPECT_EQ(kIncomplete,
+ jitter_buffer_->InsertPacket(*packet_, &retransmitted));
VCMEncodedFrame* frame_out = DecodeCompleteFrame();
@@ -791,8 +787,8 @@ TEST_F(TestBasicJitterBuffer, DuplicatePackets) {
EXPECT_EQ(0, jitter_buffer_->num_duplicated_packets());
// Insert a packet into a frame.
- EXPECT_EQ(kDuplicatePacket, jitter_buffer_->InsertPacket(*packet_,
- &retransmitted));
+ EXPECT_EQ(kDuplicatePacket,
+ jitter_buffer_->InsertPacket(*packet_, &retransmitted));
EXPECT_EQ(2, jitter_buffer_->num_packets());
EXPECT_EQ(1, jitter_buffer_->num_duplicated_packets());
@@ -801,8 +797,8 @@ TEST_F(TestBasicJitterBuffer, DuplicatePackets) {
packet_->markerBit = true;
packet_->isFirstPacket = false;
- EXPECT_EQ(kCompleteSession, jitter_buffer_->InsertPacket(*packet_,
- &retransmitted));
+ EXPECT_EQ(kCompleteSession,
+ jitter_buffer_->InsertPacket(*packet_, &retransmitted));
frame_out = DecodeCompleteFrame();
ASSERT_TRUE(frame_out != NULL);
@@ -885,7 +881,6 @@ TEST_F(TestBasicJitterBuffer, TestSkipForwardVp9) {
packet_->codecSpecificHeader.codecHeader.VP9.spatial_idx = 0;
packet_->codecSpecificHeader.codecHeader.VP9.beginning_of_frame = true;
packet_->codecSpecificHeader.codecHeader.VP9.end_of_frame = true;
- packet_->codecSpecificHeader.codecHeader.VP9.temporal_idx = kNoTemporalIdx;
packet_->codecSpecificHeader.codecHeader.VP9.temporal_up_switch = false;
packet_->seqNum = 65485;
@@ -893,7 +888,7 @@ TEST_F(TestBasicJitterBuffer, TestSkipForwardVp9) {
packet_->frameType = kVideoFrameKey;
packet_->codecSpecificHeader.codecHeader.VP9.picture_id = 5;
packet_->codecSpecificHeader.codecHeader.VP9.tl0_pic_idx = 200;
- packet_->codecSpecificHeader.codecHeader.VP9.gof_idx = 0;
+ packet_->codecSpecificHeader.codecHeader.VP9.temporal_idx = 0;
packet_->codecSpecificHeader.codecHeader.VP9.ss_data_available = true;
packet_->codecSpecificHeader.codecHeader.VP9.gof.SetGofInfoVP9(
kTemporalStructureMode3); // kTemporalStructureMode3: 0-2-1-2..
@@ -905,7 +900,7 @@ TEST_F(TestBasicJitterBuffer, TestSkipForwardVp9) {
packet_->frameType = kVideoFrameDelta;
packet_->codecSpecificHeader.codecHeader.VP9.picture_id = 9;
packet_->codecSpecificHeader.codecHeader.VP9.tl0_pic_idx = 201;
- packet_->codecSpecificHeader.codecHeader.VP9.gof_idx = 0;
+ packet_->codecSpecificHeader.codecHeader.VP9.temporal_idx = 0;
packet_->codecSpecificHeader.codecHeader.VP9.ss_data_available = false;
EXPECT_EQ(kCompleteSession, jitter_buffer_->InsertPacket(*packet_, &re));
@@ -939,22 +934,22 @@ TEST_F(TestBasicJitterBuffer, ReorderedVp9SsData_3TlLayers) {
packet_->codecSpecificHeader.codecHeader.VP9.spatial_idx = 0;
packet_->codecSpecificHeader.codecHeader.VP9.beginning_of_frame = true;
packet_->codecSpecificHeader.codecHeader.VP9.end_of_frame = true;
- packet_->codecSpecificHeader.codecHeader.VP9.temporal_idx = kNoTemporalIdx;
- packet_->codecSpecificHeader.codecHeader.VP9.temporal_up_switch = false;
packet_->codecSpecificHeader.codecHeader.VP9.tl0_pic_idx = 200;
packet_->seqNum = 65486;
packet_->timestamp = 6000;
packet_->frameType = kVideoFrameDelta;
packet_->codecSpecificHeader.codecHeader.VP9.picture_id = 6;
- packet_->codecSpecificHeader.codecHeader.VP9.gof_idx = 1;
+ packet_->codecSpecificHeader.codecHeader.VP9.temporal_idx = 2;
+ packet_->codecSpecificHeader.codecHeader.VP9.temporal_up_switch = true;
EXPECT_EQ(kCompleteSession, jitter_buffer_->InsertPacket(*packet_, &re));
packet_->seqNum = 65487;
packet_->timestamp = 9000;
packet_->frameType = kVideoFrameDelta;
packet_->codecSpecificHeader.codecHeader.VP9.picture_id = 7;
- packet_->codecSpecificHeader.codecHeader.VP9.gof_idx = 2;
+ packet_->codecSpecificHeader.codecHeader.VP9.temporal_idx = 1;
+ packet_->codecSpecificHeader.codecHeader.VP9.temporal_up_switch = true;
EXPECT_EQ(kCompleteSession, jitter_buffer_->InsertPacket(*packet_, &re));
// Insert first frame with SS data.
@@ -964,7 +959,8 @@ TEST_F(TestBasicJitterBuffer, ReorderedVp9SsData_3TlLayers) {
packet_->width = 352;
packet_->height = 288;
packet_->codecSpecificHeader.codecHeader.VP9.picture_id = 5;
- packet_->codecSpecificHeader.codecHeader.VP9.gof_idx = 0;
+ packet_->codecSpecificHeader.codecHeader.VP9.temporal_idx = 0;
+ packet_->codecSpecificHeader.codecHeader.VP9.temporal_up_switch = false;
packet_->codecSpecificHeader.codecHeader.VP9.ss_data_available = true;
packet_->codecSpecificHeader.codecHeader.VP9.gof.SetGofInfoVP9(
kTemporalStructureMode3); // kTemporalStructureMode3: 0-2-1-2..
@@ -1011,8 +1007,6 @@ TEST_F(TestBasicJitterBuffer, ReorderedVp9SsData_2Tl2SLayers) {
packet_->codecSpecificHeader.codecHeader.VP9.flexible_mode = false;
packet_->codecSpecificHeader.codecHeader.VP9.beginning_of_frame = true;
packet_->codecSpecificHeader.codecHeader.VP9.end_of_frame = true;
- packet_->codecSpecificHeader.codecHeader.VP9.temporal_idx = kNoTemporalIdx;
- packet_->codecSpecificHeader.codecHeader.VP9.temporal_up_switch = false;
packet_->codecSpecificHeader.codecHeader.VP9.tl0_pic_idx = 200;
packet_->isFirstPacket = true;
@@ -1022,7 +1016,8 @@ TEST_F(TestBasicJitterBuffer, ReorderedVp9SsData_2Tl2SLayers) {
packet_->frameType = kVideoFrameDelta;
packet_->codecSpecificHeader.codecHeader.VP9.spatial_idx = 0;
packet_->codecSpecificHeader.codecHeader.VP9.picture_id = 6;
- packet_->codecSpecificHeader.codecHeader.VP9.gof_idx = 1;
+ packet_->codecSpecificHeader.codecHeader.VP9.temporal_idx = 1;
+ packet_->codecSpecificHeader.codecHeader.VP9.temporal_up_switch = true;
EXPECT_EQ(kIncomplete, jitter_buffer_->InsertPacket(*packet_, &re));
packet_->isFirstPacket = false;
@@ -1031,7 +1026,8 @@ TEST_F(TestBasicJitterBuffer, ReorderedVp9SsData_2Tl2SLayers) {
packet_->frameType = kVideoFrameDelta;
packet_->codecSpecificHeader.codecHeader.VP9.spatial_idx = 1;
packet_->codecSpecificHeader.codecHeader.VP9.picture_id = 6;
- packet_->codecSpecificHeader.codecHeader.VP9.gof_idx = 1;
+ packet_->codecSpecificHeader.codecHeader.VP9.temporal_idx = 1;
+ packet_->codecSpecificHeader.codecHeader.VP9.temporal_up_switch = true;
EXPECT_EQ(kCompleteSession, jitter_buffer_->InsertPacket(*packet_, &re));
packet_->isFirstPacket = false;
@@ -1041,7 +1037,8 @@ TEST_F(TestBasicJitterBuffer, ReorderedVp9SsData_2Tl2SLayers) {
packet_->frameType = kVideoFrameKey;
packet_->codecSpecificHeader.codecHeader.VP9.spatial_idx = 1;
packet_->codecSpecificHeader.codecHeader.VP9.picture_id = 5;
- packet_->codecSpecificHeader.codecHeader.VP9.gof_idx = 0;
+ packet_->codecSpecificHeader.codecHeader.VP9.temporal_idx = 0;
+ packet_->codecSpecificHeader.codecHeader.VP9.temporal_up_switch = false;
EXPECT_EQ(kIncomplete, jitter_buffer_->InsertPacket(*packet_, &re));
// Insert first frame with SS data.
@@ -1053,7 +1050,8 @@ TEST_F(TestBasicJitterBuffer, ReorderedVp9SsData_2Tl2SLayers) {
packet_->height = 288;
packet_->codecSpecificHeader.codecHeader.VP9.spatial_idx = 0;
packet_->codecSpecificHeader.codecHeader.VP9.picture_id = 5;
- packet_->codecSpecificHeader.codecHeader.VP9.gof_idx = 0;
+ packet_->codecSpecificHeader.codecHeader.VP9.temporal_idx = 0;
+ packet_->codecSpecificHeader.codecHeader.VP9.temporal_up_switch = false;
packet_->codecSpecificHeader.codecHeader.VP9.ss_data_available = true;
packet_->codecSpecificHeader.codecHeader.VP9.gof.SetGofInfoVP9(
kTemporalStructureMode2); // kTemporalStructureMode3: 0-1-0-1..
@@ -1084,8 +1082,8 @@ TEST_F(TestBasicJitterBuffer, H264InsertStartCode) {
packet_->insertStartCode = true;
bool retransmitted = false;
- EXPECT_EQ(kIncomplete, jitter_buffer_->InsertPacket(*packet_,
- &retransmitted));
+ EXPECT_EQ(kIncomplete,
+ jitter_buffer_->InsertPacket(*packet_, &retransmitted));
VCMEncodedFrame* frame_out = DecodeCompleteFrame();
@@ -1097,8 +1095,8 @@ TEST_F(TestBasicJitterBuffer, H264InsertStartCode) {
packet_->markerBit = true;
packet_->seqNum = seq_num_;
- EXPECT_EQ(kCompleteSession, jitter_buffer_->InsertPacket(*packet_,
- &retransmitted));
+ EXPECT_EQ(kCompleteSession,
+ jitter_buffer_->InsertPacket(*packet_, &retransmitted));
frame_out = DecodeCompleteFrame();
CheckOutFrame(frame_out, size_ * 2 + 4 * 2, true);
@@ -1118,8 +1116,8 @@ TEST_F(TestBasicJitterBuffer, PacketLossWithSelectiveErrorsThresholdCheck) {
packet_->timestamp = timestamp_;
bool retransmitted = false;
- EXPECT_EQ(kIncomplete, jitter_buffer_->InsertPacket(*packet_,
- &retransmitted));
+ EXPECT_EQ(kIncomplete,
+ jitter_buffer_->InsertPacket(*packet_, &retransmitted));
uint32_t timestamp = 0;
EXPECT_FALSE(jitter_buffer_->NextCompleteTimestamp(0, &timestamp));
EXPECT_FALSE(jitter_buffer_->NextMaybeIncompleteTimestamp(&timestamp));
@@ -1127,8 +1125,8 @@ TEST_F(TestBasicJitterBuffer, PacketLossWithSelectiveErrorsThresholdCheck) {
packet_->isFirstPacket = false;
for (int i = 1; i < 9; ++i) {
packet_->seqNum++;
- EXPECT_EQ(kIncomplete, jitter_buffer_->InsertPacket(*packet_,
- &retransmitted));
+ EXPECT_EQ(kIncomplete,
+ jitter_buffer_->InsertPacket(*packet_, &retransmitted));
EXPECT_FALSE(jitter_buffer_->NextCompleteTimestamp(0, &timestamp));
EXPECT_FALSE(jitter_buffer_->NextMaybeIncompleteTimestamp(&timestamp));
}
@@ -1137,8 +1135,8 @@ TEST_F(TestBasicJitterBuffer, PacketLossWithSelectiveErrorsThresholdCheck) {
packet_->markerBit = true;
packet_->seqNum++;
- EXPECT_EQ(kCompleteSession, jitter_buffer_->InsertPacket(*packet_,
- &retransmitted));
+ EXPECT_EQ(kCompleteSession,
+ jitter_buffer_->InsertPacket(*packet_, &retransmitted));
VCMEncodedFrame* frame_out = DecodeCompleteFrame();
CheckOutFrame(frame_out, 10 * size_, false);
EXPECT_EQ(kVideoFrameKey, frame_out->FrameType());
@@ -1152,8 +1150,8 @@ TEST_F(TestBasicJitterBuffer, PacketLossWithSelectiveErrorsThresholdCheck) {
packet_->seqNum += 100;
packet_->timestamp += 33 * 90 * 8;
- EXPECT_EQ(kDecodableSession, jitter_buffer_->InsertPacket(*packet_,
- &retransmitted));
+ EXPECT_EQ(kDecodableSession,
+ jitter_buffer_->InsertPacket(*packet_, &retransmitted));
EXPECT_FALSE(jitter_buffer_->NextCompleteTimestamp(0, &timestamp));
EXPECT_FALSE(jitter_buffer_->NextMaybeIncompleteTimestamp(&timestamp));
@@ -1161,23 +1159,23 @@ TEST_F(TestBasicJitterBuffer, PacketLossWithSelectiveErrorsThresholdCheck) {
packet_->seqNum -= 99;
packet_->timestamp -= 33 * 90 * 7;
- EXPECT_EQ(kDecodableSession, jitter_buffer_->InsertPacket(*packet_,
- &retransmitted));
+ EXPECT_EQ(kDecodableSession,
+ jitter_buffer_->InsertPacket(*packet_, &retransmitted));
EXPECT_FALSE(jitter_buffer_->NextCompleteTimestamp(0, &timestamp));
EXPECT_TRUE(jitter_buffer_->NextMaybeIncompleteTimestamp(&timestamp));
packet_->isFirstPacket = false;
for (int i = 1; i < 8; ++i) {
packet_->seqNum++;
- EXPECT_EQ(kDecodableSession, jitter_buffer_->InsertPacket(*packet_,
- &retransmitted));
+ EXPECT_EQ(kDecodableSession,
+ jitter_buffer_->InsertPacket(*packet_, &retransmitted));
EXPECT_FALSE(jitter_buffer_->NextCompleteTimestamp(0, &timestamp));
EXPECT_TRUE(jitter_buffer_->NextMaybeIncompleteTimestamp(&timestamp));
}
packet_->seqNum++;
- EXPECT_EQ(kDecodableSession, jitter_buffer_->InsertPacket(*packet_,
- &retransmitted));
+ EXPECT_EQ(kDecodableSession,
+ jitter_buffer_->InsertPacket(*packet_, &retransmitted));
EXPECT_FALSE(jitter_buffer_->NextCompleteTimestamp(0, &timestamp));
EXPECT_TRUE(jitter_buffer_->NextMaybeIncompleteTimestamp(&timestamp));
@@ -1189,8 +1187,7 @@ TEST_F(TestBasicJitterBuffer, PacketLossWithSelectiveErrorsThresholdCheck) {
packet_->markerBit = true;
packet_->seqNum++;
- EXPECT_EQ(kOldPacket, jitter_buffer_->InsertPacket(*packet_,
- &retransmitted));
+ EXPECT_EQ(kOldPacket, jitter_buffer_->InsertPacket(*packet_, &retransmitted));
}
// Make sure first packet is present before a frame can be decoded.
@@ -1204,8 +1201,8 @@ TEST_F(TestBasicJitterBuffer, PacketLossWithSelectiveErrorsIncompleteKey) {
packet_->timestamp = timestamp_;
bool retransmitted = false;
- EXPECT_EQ(kCompleteSession, jitter_buffer_->InsertPacket(*packet_,
- &retransmitted));
+ EXPECT_EQ(kCompleteSession,
+ jitter_buffer_->InsertPacket(*packet_, &retransmitted));
VCMEncodedFrame* frame_out = DecodeCompleteFrame();
CheckOutFrame(frame_out, size_, false);
EXPECT_EQ(kVideoFrameKey, frame_out->FrameType());
@@ -1217,9 +1214,9 @@ TEST_F(TestBasicJitterBuffer, PacketLossWithSelectiveErrorsIncompleteKey) {
packet_->isFirstPacket = false;
packet_->markerBit = false;
packet_->seqNum += 100;
- packet_->timestamp += 33*90*8;
- EXPECT_EQ(kIncomplete, jitter_buffer_->InsertPacket(*packet_,
- &retransmitted));
+ packet_->timestamp += 33 * 90 * 8;
+ EXPECT_EQ(kIncomplete,
+ jitter_buffer_->InsertPacket(*packet_, &retransmitted));
uint32_t timestamp;
EXPECT_FALSE(jitter_buffer_->NextCompleteTimestamp(0, &timestamp));
EXPECT_FALSE(jitter_buffer_->NextMaybeIncompleteTimestamp(&timestamp));
@@ -1228,10 +1225,10 @@ TEST_F(TestBasicJitterBuffer, PacketLossWithSelectiveErrorsIncompleteKey) {
packet_->frameType = kVideoFrameKey;
packet_->isFirstPacket = true;
packet_->seqNum -= 99;
- packet_->timestamp -= 33*90*7;
+ packet_->timestamp -= 33 * 90 * 7;
- EXPECT_EQ(kIncomplete, jitter_buffer_->InsertPacket(*packet_,
- &retransmitted));
+ EXPECT_EQ(kIncomplete,
+ jitter_buffer_->InsertPacket(*packet_, &retransmitted));
EXPECT_FALSE(jitter_buffer_->NextCompleteTimestamp(0, &timestamp));
EXPECT_FALSE(jitter_buffer_->NextMaybeIncompleteTimestamp(&timestamp));
@@ -1240,8 +1237,8 @@ TEST_F(TestBasicJitterBuffer, PacketLossWithSelectiveErrorsIncompleteKey) {
packet_->isFirstPacket = false;
for (int i = 1; i < 5; ++i) {
packet_->seqNum++;
- EXPECT_EQ(kIncomplete, jitter_buffer_->InsertPacket(*packet_,
- &retransmitted));
+ EXPECT_EQ(kIncomplete,
+ jitter_buffer_->InsertPacket(*packet_, &retransmitted));
EXPECT_FALSE(jitter_buffer_->NextCompleteTimestamp(0, &timestamp));
EXPECT_FALSE(jitter_buffer_->NextMaybeIncompleteTimestamp(&timestamp));
}
@@ -1249,8 +1246,8 @@ TEST_F(TestBasicJitterBuffer, PacketLossWithSelectiveErrorsIncompleteKey) {
// Complete key frame.
packet_->markerBit = true;
packet_->seqNum++;
- EXPECT_EQ(kCompleteSession, jitter_buffer_->InsertPacket(*packet_,
- &retransmitted));
+ EXPECT_EQ(kCompleteSession,
+ jitter_buffer_->InsertPacket(*packet_, &retransmitted));
frame_out = DecodeCompleteFrame();
CheckOutFrame(frame_out, 6 * size_, false);
EXPECT_EQ(kVideoFrameKey, frame_out->FrameType());
@@ -1268,8 +1265,8 @@ TEST_F(TestBasicJitterBuffer, PacketLossWithSelectiveErrorsMissingFirstPacket) {
packet_->timestamp = timestamp_;
bool retransmitted = false;
- EXPECT_EQ(kCompleteSession, jitter_buffer_->InsertPacket(*packet_,
- &retransmitted));
+ EXPECT_EQ(kCompleteSession,
+ jitter_buffer_->InsertPacket(*packet_, &retransmitted));
VCMEncodedFrame* frame_out = DecodeCompleteFrame();
CheckOutFrame(frame_out, size_, false);
EXPECT_EQ(kVideoFrameKey, frame_out->FrameType());
@@ -1281,9 +1278,9 @@ TEST_F(TestBasicJitterBuffer, PacketLossWithSelectiveErrorsMissingFirstPacket) {
packet_->isFirstPacket = false;
packet_->markerBit = false;
packet_->seqNum += 100;
- packet_->timestamp += 33*90*8;
- EXPECT_EQ(kIncomplete, jitter_buffer_->InsertPacket(*packet_,
- &retransmitted));
+ packet_->timestamp += 33 * 90 * 8;
+ EXPECT_EQ(kIncomplete,
+ jitter_buffer_->InsertPacket(*packet_, &retransmitted));
uint32_t timestamp;
EXPECT_FALSE(jitter_buffer_->NextCompleteTimestamp(0, &timestamp));
EXPECT_FALSE(jitter_buffer_->NextMaybeIncompleteTimestamp(&timestamp));
@@ -1291,17 +1288,17 @@ TEST_F(TestBasicJitterBuffer, PacketLossWithSelectiveErrorsMissingFirstPacket) {
// Insert second frame with the first packet missing. Make sure we're waiting
// for the key frame to be complete.
packet_->seqNum -= 98;
- packet_->timestamp -= 33*90*7;
+ packet_->timestamp -= 33 * 90 * 7;
- EXPECT_EQ(kIncomplete, jitter_buffer_->InsertPacket(*packet_,
- &retransmitted));
+ EXPECT_EQ(kIncomplete,
+ jitter_buffer_->InsertPacket(*packet_, &retransmitted));
EXPECT_FALSE(jitter_buffer_->NextCompleteTimestamp(0, &timestamp));
EXPECT_FALSE(jitter_buffer_->NextMaybeIncompleteTimestamp(&timestamp));
for (int i = 0; i < 5; ++i) {
packet_->seqNum++;
- EXPECT_EQ(kIncomplete, jitter_buffer_->InsertPacket(*packet_,
- &retransmitted));
+ EXPECT_EQ(kIncomplete,
+ jitter_buffer_->InsertPacket(*packet_, &retransmitted));
EXPECT_FALSE(jitter_buffer_->NextCompleteTimestamp(0, &timestamp));
EXPECT_FALSE(jitter_buffer_->NextMaybeIncompleteTimestamp(&timestamp));
}
@@ -1309,8 +1306,8 @@ TEST_F(TestBasicJitterBuffer, PacketLossWithSelectiveErrorsMissingFirstPacket) {
// Add first packet. Frame should now be decodable, but incomplete.
packet_->isFirstPacket = true;
packet_->seqNum -= 6;
- EXPECT_EQ(kDecodableSession, jitter_buffer_->InsertPacket(*packet_,
- &retransmitted));
+ EXPECT_EQ(kDecodableSession,
+ jitter_buffer_->InsertPacket(*packet_, &retransmitted));
EXPECT_FALSE(jitter_buffer_->NextCompleteTimestamp(0, &timestamp));
EXPECT_TRUE(jitter_buffer_->NextMaybeIncompleteTimestamp(&timestamp));
@@ -1329,8 +1326,8 @@ TEST_F(TestBasicJitterBuffer, DiscontinuousStreamWhenDecodingWithErrors) {
packet_->seqNum = seq_num_;
packet_->timestamp = timestamp_;
bool retransmitted = false;
- EXPECT_EQ(kCompleteSession, jitter_buffer_->InsertPacket(*packet_,
- &retransmitted));
+ EXPECT_EQ(kCompleteSession,
+ jitter_buffer_->InsertPacket(*packet_, &retransmitted));
uint32_t next_timestamp;
EXPECT_TRUE(jitter_buffer_->NextCompleteTimestamp(0, &next_timestamp));
EXPECT_EQ(packet_->timestamp, next_timestamp);
@@ -1346,8 +1343,8 @@ TEST_F(TestBasicJitterBuffer, DiscontinuousStreamWhenDecodingWithErrors) {
packet_->markerBit = false;
packet_->seqNum = seq_num_;
packet_->timestamp = timestamp_;
- EXPECT_EQ(kDecodableSession, jitter_buffer_->InsertPacket(*packet_,
- &retransmitted));
+ EXPECT_EQ(kDecodableSession,
+ jitter_buffer_->InsertPacket(*packet_, &retransmitted));
// Insert a packet (so the previous one will be released).
timestamp_ += 33 * 90;
seq_num_ += 2;
@@ -1356,8 +1353,8 @@ TEST_F(TestBasicJitterBuffer, DiscontinuousStreamWhenDecodingWithErrors) {
packet_->markerBit = false;
packet_->seqNum = seq_num_;
packet_->timestamp = timestamp_;
- EXPECT_EQ(kDecodableSession, jitter_buffer_->InsertPacket(*packet_,
- &retransmitted));
+ EXPECT_EQ(kDecodableSession,
+ jitter_buffer_->InsertPacket(*packet_, &retransmitted));
EXPECT_FALSE(jitter_buffer_->NextCompleteTimestamp(0, &next_timestamp));
EXPECT_TRUE(jitter_buffer_->NextMaybeIncompleteTimestamp(&next_timestamp));
EXPECT_EQ(packet_->timestamp - 33 * 90, next_timestamp);
@@ -1382,12 +1379,12 @@ TEST_F(TestBasicJitterBuffer, PacketLoss) {
packet_->completeNALU = kNaluStart;
bool retransmitted = false;
- EXPECT_EQ(kDecodableSession, jitter_buffer_->InsertPacket(*packet_,
- &retransmitted));
+ EXPECT_EQ(kDecodableSession,
+ jitter_buffer_->InsertPacket(*packet_, &retransmitted));
for (int i = 0; i < 11; ++i) {
webrtc::FrameType frametype = kVideoFrameDelta;
seq_num_++;
- timestamp_ += 33*90;
+ timestamp_ += 33 * 90;
packet_->frameType = frametype;
packet_->isFirstPacket = true;
packet_->markerBit = false;
@@ -1395,8 +1392,8 @@ TEST_F(TestBasicJitterBuffer, PacketLoss) {
packet_->timestamp = timestamp_;
packet_->completeNALU = kNaluStart;
- EXPECT_EQ(kDecodableSession, jitter_buffer_->InsertPacket(*packet_,
- &retransmitted));
+ EXPECT_EQ(kDecodableSession,
+ jitter_buffer_->InsertPacket(*packet_, &retransmitted));
VCMEncodedFrame* frame_out = DecodeCompleteFrame();
@@ -1430,9 +1427,9 @@ TEST_F(TestBasicJitterBuffer, PacketLoss) {
CheckOutFrame(frame_out, size_, false);
if (i == 0) {
- EXPECT_EQ(kVideoFrameKey, frame_out->FrameType());
+ EXPECT_EQ(kVideoFrameKey, frame_out->FrameType());
} else {
- EXPECT_EQ(frametype, frame_out->FrameType());
+ EXPECT_EQ(frametype, frame_out->FrameType());
}
EXPECT_FALSE(frame_out->Complete());
EXPECT_FALSE(frame_out->MissingFrame());
@@ -1446,18 +1443,15 @@ TEST_F(TestBasicJitterBuffer, PacketLoss) {
timestamp_ -= 33 * 90;
packet_->timestamp = timestamp_ - 1000;
- EXPECT_EQ(kOldPacket, jitter_buffer_->InsertPacket(*packet_,
- &retransmitted));
+ EXPECT_EQ(kOldPacket, jitter_buffer_->InsertPacket(*packet_, &retransmitted));
packet_->timestamp = timestamp_ - 500;
- EXPECT_EQ(kOldPacket, jitter_buffer_->InsertPacket(*packet_,
- &retransmitted));
+ EXPECT_EQ(kOldPacket, jitter_buffer_->InsertPacket(*packet_, &retransmitted));
packet_->timestamp = timestamp_ - 100;
- EXPECT_EQ(kOldPacket, jitter_buffer_->InsertPacket(*packet_,
- &retransmitted));
+ EXPECT_EQ(kOldPacket, jitter_buffer_->InsertPacket(*packet_, &retransmitted));
EXPECT_EQ(3, jitter_buffer_->num_discarded_packets());
@@ -1476,8 +1470,8 @@ TEST_F(TestBasicJitterBuffer, DeltaFrame100PacketsWithSeqNumWrap) {
packet_->timestamp = timestamp_;
bool retransmitted = false;
- EXPECT_EQ(kIncomplete, jitter_buffer_->InsertPacket(*packet_,
- &retransmitted));
+ EXPECT_EQ(kIncomplete,
+ jitter_buffer_->InsertPacket(*packet_, &retransmitted));
VCMEncodedFrame* frame_out = DecodeCompleteFrame();
@@ -1490,8 +1484,8 @@ TEST_F(TestBasicJitterBuffer, DeltaFrame100PacketsWithSeqNumWrap) {
packet_->markerBit = false;
packet_->seqNum = seq_num_;
- EXPECT_EQ(kIncomplete, jitter_buffer_->InsertPacket(*packet_,
- &retransmitted));
+ EXPECT_EQ(kIncomplete,
+ jitter_buffer_->InsertPacket(*packet_, &retransmitted));
frame_out = DecodeCompleteFrame();
@@ -1505,8 +1499,8 @@ TEST_F(TestBasicJitterBuffer, DeltaFrame100PacketsWithSeqNumWrap) {
packet_->markerBit = true;
packet_->seqNum = seq_num_;
- EXPECT_EQ(kCompleteSession, jitter_buffer_->InsertPacket(*packet_,
- &retransmitted));
+ EXPECT_EQ(kCompleteSession,
+ jitter_buffer_->InsertPacket(*packet_, &retransmitted));
frame_out = DecodeCompleteFrame();
@@ -1525,8 +1519,8 @@ TEST_F(TestBasicJitterBuffer, PacketReorderingReverseWithNegSeqNumWrap) {
packet_->seqNum = seq_num_;
bool retransmitted = false;
- EXPECT_EQ(kIncomplete, jitter_buffer_->InsertPacket(*packet_,
- &retransmitted));
+ EXPECT_EQ(kIncomplete,
+ jitter_buffer_->InsertPacket(*packet_, &retransmitted));
VCMEncodedFrame* frame_out = DecodeCompleteFrame();
// Should not be complete.
@@ -1540,8 +1534,8 @@ TEST_F(TestBasicJitterBuffer, PacketReorderingReverseWithNegSeqNumWrap) {
packet_->markerBit = false;
packet_->seqNum = seq_num_;
- EXPECT_EQ(kIncomplete, jitter_buffer_->InsertPacket(*packet_,
- &retransmitted));
+ EXPECT_EQ(kIncomplete,
+ jitter_buffer_->InsertPacket(*packet_, &retransmitted));
frame_out = DecodeCompleteFrame();
@@ -1556,8 +1550,8 @@ TEST_F(TestBasicJitterBuffer, PacketReorderingReverseWithNegSeqNumWrap) {
packet_->markerBit = false;
packet_->seqNum = seq_num_;
- EXPECT_EQ(kCompleteSession, jitter_buffer_->InsertPacket(*packet_,
- &retransmitted));
+ EXPECT_EQ(kCompleteSession,
+ jitter_buffer_->InsertPacket(*packet_, &retransmitted));
frame_out = DecodeCompleteFrame();
CheckOutFrame(frame_out, 100 * size_, false);
@@ -1579,8 +1573,8 @@ TEST_F(TestBasicJitterBuffer, TestInsertOldFrame) {
packet_->seqNum = seq_num_;
bool retransmitted = false;
- EXPECT_EQ(kCompleteSession, jitter_buffer_->InsertPacket(*packet_,
- &retransmitted));
+ EXPECT_EQ(kCompleteSession,
+ jitter_buffer_->InsertPacket(*packet_, &retransmitted));
VCMEncodedFrame* frame_out = DecodeCompleteFrame();
EXPECT_EQ(3000u, frame_out->TimeStamp());
@@ -1596,8 +1590,7 @@ TEST_F(TestBasicJitterBuffer, TestInsertOldFrame) {
packet_->seqNum = seq_num_;
packet_->timestamp = timestamp_;
- EXPECT_EQ(kOldPacket, jitter_buffer_->InsertPacket(*packet_,
- &retransmitted));
+ EXPECT_EQ(kOldPacket, jitter_buffer_->InsertPacket(*packet_, &retransmitted));
}
TEST_F(TestBasicJitterBuffer, TestInsertOldFrameWithSeqNumWrap) {
@@ -1615,8 +1608,8 @@ TEST_F(TestBasicJitterBuffer, TestInsertOldFrameWithSeqNumWrap) {
packet_->timestamp = timestamp_;
bool retransmitted = false;
- EXPECT_EQ(kCompleteSession, jitter_buffer_->InsertPacket(*packet_,
- &retransmitted));
+ EXPECT_EQ(kCompleteSession,
+ jitter_buffer_->InsertPacket(*packet_, &retransmitted));
VCMEncodedFrame* frame_out = DecodeCompleteFrame();
EXPECT_EQ(timestamp_, frame_out->TimeStamp());
@@ -1635,10 +1628,8 @@ TEST_F(TestBasicJitterBuffer, TestInsertOldFrameWithSeqNumWrap) {
packet_->seqNum = seq_num_;
packet_->timestamp = timestamp_;
-
// This timestamp is old.
- EXPECT_EQ(kOldPacket, jitter_buffer_->InsertPacket(*packet_,
- &retransmitted));
+ EXPECT_EQ(kOldPacket, jitter_buffer_->InsertPacket(*packet_, &retransmitted));
}
TEST_F(TestBasicJitterBuffer, TimestampWrap) {
@@ -1655,8 +1646,8 @@ TEST_F(TestBasicJitterBuffer, TimestampWrap) {
packet_->timestamp = timestamp_;
bool retransmitted = false;
- EXPECT_EQ(kIncomplete, jitter_buffer_->InsertPacket(*packet_,
- &retransmitted));
+ EXPECT_EQ(kIncomplete,
+ jitter_buffer_->InsertPacket(*packet_, &retransmitted));
VCMEncodedFrame* frame_out = DecodeCompleteFrame();
EXPECT_TRUE(frame_out == NULL);
@@ -1666,23 +1657,23 @@ TEST_F(TestBasicJitterBuffer, TimestampWrap) {
packet_->markerBit = true;
packet_->seqNum = seq_num_;
- EXPECT_EQ(kCompleteSession, jitter_buffer_->InsertPacket(*packet_,
- &retransmitted));
+ EXPECT_EQ(kCompleteSession,
+ jitter_buffer_->InsertPacket(*packet_, &retransmitted));
frame_out = DecodeCompleteFrame();
CheckOutFrame(frame_out, 2 * size_, false);
jitter_buffer_->ReleaseFrame(frame_out);
seq_num_++;
- timestamp_ += 33*90;
+ timestamp_ += 33 * 90;
packet_->frameType = kVideoFrameDelta;
packet_->isFirstPacket = true;
packet_->markerBit = false;
packet_->seqNum = seq_num_;
packet_->timestamp = timestamp_;
- EXPECT_EQ(kIncomplete, jitter_buffer_->InsertPacket(*packet_,
- &retransmitted));
+ EXPECT_EQ(kIncomplete,
+ jitter_buffer_->InsertPacket(*packet_, &retransmitted));
frame_out = DecodeCompleteFrame();
EXPECT_TRUE(frame_out == NULL);
@@ -1692,8 +1683,8 @@ TEST_F(TestBasicJitterBuffer, TimestampWrap) {
packet_->markerBit = true;
packet_->seqNum = seq_num_;
- EXPECT_EQ(kCompleteSession, jitter_buffer_->InsertPacket(*packet_,
- &retransmitted));
+ EXPECT_EQ(kCompleteSession,
+ jitter_buffer_->InsertPacket(*packet_, &retransmitted));
frame_out = DecodeCompleteFrame();
CheckOutFrame(frame_out, 2 * size_, false);
@@ -1715,8 +1706,8 @@ TEST_F(TestBasicJitterBuffer, 2FrameWithTimestampWrap) {
bool retransmitted = false;
// Insert first frame (session will be complete).
- EXPECT_EQ(kCompleteSession, jitter_buffer_->InsertPacket(*packet_,
- &retransmitted));
+ EXPECT_EQ(kCompleteSession,
+ jitter_buffer_->InsertPacket(*packet_, &retransmitted));
// Insert next frame.
seq_num_++;
@@ -1727,8 +1718,8 @@ TEST_F(TestBasicJitterBuffer, 2FrameWithTimestampWrap) {
packet_->seqNum = seq_num_;
packet_->timestamp = timestamp_;
- EXPECT_EQ(kCompleteSession, jitter_buffer_->InsertPacket(*packet_,
- &retransmitted));
+ EXPECT_EQ(kCompleteSession,
+ jitter_buffer_->InsertPacket(*packet_, &retransmitted));
VCMEncodedFrame* frame_out = DecodeCompleteFrame();
EXPECT_EQ(0xffffff00, frame_out->TimeStamp());
@@ -1758,8 +1749,8 @@ TEST_F(TestBasicJitterBuffer, Insert2FramesReOrderedWithTimestampWrap) {
packet_->timestamp = timestamp_;
bool retransmitted = false;
- EXPECT_EQ(kCompleteSession, jitter_buffer_->InsertPacket(*packet_,
- &retransmitted));
+ EXPECT_EQ(kCompleteSession,
+ jitter_buffer_->InsertPacket(*packet_, &retransmitted));
// Insert second frame
seq_num_--;
@@ -1770,8 +1761,8 @@ TEST_F(TestBasicJitterBuffer, Insert2FramesReOrderedWithTimestampWrap) {
packet_->seqNum = seq_num_;
packet_->timestamp = timestamp_;
- EXPECT_EQ(kCompleteSession, jitter_buffer_->InsertPacket(*packet_,
- &retransmitted));
+ EXPECT_EQ(kCompleteSession,
+ jitter_buffer_->InsertPacket(*packet_, &retransmitted));
VCMEncodedFrame* frame_out = DecodeCompleteFrame();
EXPECT_EQ(0xffffff00, frame_out->TimeStamp());
@@ -1798,12 +1789,12 @@ TEST_F(TestBasicJitterBuffer, DeltaFrameWithMoreThanMaxNumberOfPackets) {
packet_->seqNum = seq_num_;
if (firstPacket) {
- EXPECT_EQ(kIncomplete, jitter_buffer_->InsertPacket(*packet_,
- &retransmitted));
+ EXPECT_EQ(kIncomplete,
+ jitter_buffer_->InsertPacket(*packet_, &retransmitted));
firstPacket = false;
} else {
- EXPECT_EQ(kIncomplete, jitter_buffer_->InsertPacket(*packet_,
- &retransmitted));
+ EXPECT_EQ(kIncomplete,
+ jitter_buffer_->InsertPacket(*packet_, &retransmitted));
}
loop++;
@@ -1817,10 +1808,8 @@ TEST_F(TestBasicJitterBuffer, DeltaFrameWithMoreThanMaxNumberOfPackets) {
packet_->seqNum = seq_num_;
// Insert the packet -> frame recycled.
- EXPECT_EQ(kSizeError, jitter_buffer_->InsertPacket(*packet_,
- &retransmitted));
+ EXPECT_EQ(kSizeError, jitter_buffer_->InsertPacket(*packet_, &retransmitted));
EXPECT_TRUE(NULL == DecodeCompleteFrame());
-
}
TEST_F(TestBasicJitterBuffer, ExceedNumOfFrameWithSeqNumWrap) {
@@ -1832,13 +1821,18 @@ TEST_F(TestBasicJitterBuffer, ExceedNumOfFrameWithSeqNumWrap) {
// --------------------------------------------------------------
// |<-----------delta frames------------->|<------key frames----->|
+ // Make sure the jitter doesn't request a keyframe after too much non-
+ // decodable frames.
+ jitter_buffer_->SetNackMode(kNack, -1, -1);
+ jitter_buffer_->SetNackSettings(kMaxNumberOfFrames, kMaxNumberOfFrames, 0);
+
int loop = 0;
seq_num_ = 65485;
uint32_t first_key_frame_timestamp = 0;
bool retransmitted = false;
// Insert MAX_NUMBER_OF_FRAMES frames.
do {
- timestamp_ += 33*90;
+ timestamp_ += 33 * 90;
seq_num_++;
packet_->isFirstPacket = true;
packet_->markerBit = true;
@@ -1851,8 +1845,8 @@ TEST_F(TestBasicJitterBuffer, ExceedNumOfFrameWithSeqNumWrap) {
}
// Insert frame.
- EXPECT_EQ(kCompleteSession, jitter_buffer_->InsertPacket(*packet_,
- &retransmitted));
+ EXPECT_EQ(kCompleteSession,
+ jitter_buffer_->InsertPacket(*packet_, &retransmitted));
loop++;
} while (loop < kMaxNumberOfFrames);
@@ -1860,7 +1854,7 @@ TEST_F(TestBasicJitterBuffer, ExceedNumOfFrameWithSeqNumWrap) {
// Max number of frames inserted.
// Insert one more frame.
- timestamp_ += 33*90;
+ timestamp_ += 33 * 90;
seq_num_++;
packet_->isFirstPacket = true;
packet_->markerBit = true;
@@ -1894,8 +1888,7 @@ TEST_F(TestBasicJitterBuffer, EmptyLastFrame) {
packet_->timestamp = timestamp_;
packet_->frameType = kEmptyFrame;
- EXPECT_EQ(kNoError, jitter_buffer_->InsertPacket(*packet_,
- &retransmitted));
+ EXPECT_EQ(kNoError, jitter_buffer_->InsertPacket(*packet_, &retransmitted));
VCMEncodedFrame* testFrame = DecodeIncompleteFrame();
// Timestamp should never be the last TS inserted.
if (testFrame != NULL) {
@@ -1919,8 +1912,8 @@ TEST_F(TestBasicJitterBuffer, H264IncompleteNalu) {
packet_->markerBit = false;
bool retransmitted = false;
- EXPECT_EQ(kDecodableSession, jitter_buffer_->InsertPacket(*packet_,
- &retransmitted));
+ EXPECT_EQ(kDecodableSession,
+ jitter_buffer_->InsertPacket(*packet_, &retransmitted));
seq_num_ += 2; // Skip one packet.
packet_->seqNum = seq_num_;
@@ -1929,8 +1922,8 @@ TEST_F(TestBasicJitterBuffer, H264IncompleteNalu) {
packet_->completeNALU = kNaluIncomplete;
packet_->markerBit = false;
- EXPECT_EQ(kDecodableSession, jitter_buffer_->InsertPacket(*packet_,
- &retransmitted));
+ EXPECT_EQ(kDecodableSession,
+ jitter_buffer_->InsertPacket(*packet_, &retransmitted));
seq_num_++;
packet_->seqNum = seq_num_;
@@ -1939,15 +1932,15 @@ TEST_F(TestBasicJitterBuffer, H264IncompleteNalu) {
packet_->completeNALU = kNaluEnd;
packet_->markerBit = false;
- EXPECT_EQ(kDecodableSession, jitter_buffer_->InsertPacket(*packet_,
- &retransmitted));
+ EXPECT_EQ(kDecodableSession,
+ jitter_buffer_->InsertPacket(*packet_, &retransmitted));
seq_num_++;
packet_->seqNum = seq_num_;
packet_->completeNALU = kNaluComplete;
packet_->markerBit = true; // Last packet.
- EXPECT_EQ(kDecodableSession, jitter_buffer_->InsertPacket(*packet_,
- &retransmitted));
+ EXPECT_EQ(kDecodableSession,
+ jitter_buffer_->InsertPacket(*packet_, &retransmitted));
// The JB will only output (incomplete) frames if a packet belonging to a
// subsequent frame was already inserted. Insert one packet of a subsequent
// frame. place high timestamp so the JB would always have a next frame
@@ -1960,8 +1953,8 @@ TEST_F(TestBasicJitterBuffer, H264IncompleteNalu) {
packet_->completeNALU = kNaluStart;
packet_->markerBit = false;
- EXPECT_EQ(kDecodableSession, jitter_buffer_->InsertPacket(*packet_,
- &retransmitted));
+ EXPECT_EQ(kDecodableSession,
+ jitter_buffer_->InsertPacket(*packet_, &retransmitted));
VCMEncodedFrame* frame_out = DecodeIncompleteFrame();
@@ -1973,7 +1966,7 @@ TEST_F(TestBasicJitterBuffer, H264IncompleteNalu) {
// Test reordered start frame + 1 lost.
seq_num_ += 2; // Re-order 1 frame.
- timestamp_ += 33*90;
+ timestamp_ += 33 * 90;
insertedLength = 0;
packet_->seqNum = seq_num_;
@@ -1982,9 +1975,9 @@ TEST_F(TestBasicJitterBuffer, H264IncompleteNalu) {
packet_->isFirstPacket = false;
packet_->completeNALU = kNaluEnd;
packet_->markerBit = false;
- EXPECT_EQ(kDecodableSession, jitter_buffer_->InsertPacket(*packet_,
- &retransmitted));
- insertedLength += packet_->sizeBytes; // This packet should be decoded.
+ EXPECT_EQ(kDecodableSession,
+ jitter_buffer_->InsertPacket(*packet_, &retransmitted));
+ insertedLength += packet_->sizeBytes; // This packet should be decoded.
seq_num_--;
packet_->seqNum = seq_num_;
packet_->timestamp = timestamp_;
@@ -1993,8 +1986,8 @@ TEST_F(TestBasicJitterBuffer, H264IncompleteNalu) {
packet_->completeNALU = kNaluStart;
packet_->markerBit = false;
- EXPECT_EQ(kDecodableSession, jitter_buffer_->InsertPacket(*packet_,
- &retransmitted));
+ EXPECT_EQ(kDecodableSession,
+ jitter_buffer_->InsertPacket(*packet_, &retransmitted));
insertedLength += packet_->sizeBytes; // This packet should be decoded.
seq_num_ += 3; // One packet drop.
@@ -2004,8 +1997,8 @@ TEST_F(TestBasicJitterBuffer, H264IncompleteNalu) {
packet_->isFirstPacket = false;
packet_->completeNALU = kNaluComplete;
packet_->markerBit = false;
- EXPECT_EQ(kDecodableSession, jitter_buffer_->InsertPacket(*packet_,
- &retransmitted));
+ EXPECT_EQ(kDecodableSession,
+ jitter_buffer_->InsertPacket(*packet_, &retransmitted));
insertedLength += packet_->sizeBytes; // This packet should be decoded.
seq_num_++;
packet_->seqNum = seq_num_;
@@ -2014,8 +2007,8 @@ TEST_F(TestBasicJitterBuffer, H264IncompleteNalu) {
packet_->isFirstPacket = false;
packet_->completeNALU = kNaluStart;
packet_->markerBit = false;
- EXPECT_EQ(kDecodableSession, jitter_buffer_->InsertPacket(*packet_,
- &retransmitted));
+ EXPECT_EQ(kDecodableSession,
+ jitter_buffer_->InsertPacket(*packet_, &retransmitted));
// This packet should be decoded since it's the beginning of a NAL.
insertedLength += packet_->sizeBytes;
@@ -2026,8 +2019,8 @@ TEST_F(TestBasicJitterBuffer, H264IncompleteNalu) {
packet_->isFirstPacket = false;
packet_->completeNALU = kNaluEnd;
packet_->markerBit = true;
- EXPECT_EQ(kDecodableSession, jitter_buffer_->InsertPacket(*packet_,
- &retransmitted));
+ EXPECT_EQ(kDecodableSession,
+ jitter_buffer_->InsertPacket(*packet_, &retransmitted));
// This packet should not be decoded because it is an incomplete NAL if it
// is the last.
frame_out = DecodeIncompleteFrame();
@@ -2045,8 +2038,8 @@ TEST_F(TestBasicJitterBuffer, H264IncompleteNalu) {
emptypacket.isFirstPacket = true;
emptypacket.completeNALU = kNaluComplete;
emptypacket.markerBit = true;
- EXPECT_EQ(kCompleteSession, jitter_buffer_->InsertPacket(emptypacket,
- &retransmitted));
+ EXPECT_EQ(kCompleteSession,
+ jitter_buffer_->InsertPacket(emptypacket, &retransmitted));
// This packet should not be decoded because it is an incomplete NAL if it
// is the last.
@@ -2067,8 +2060,8 @@ TEST_F(TestBasicJitterBuffer, H264IncompleteNalu) {
packet_->completeNALU = kNaluComplete;
packet_->markerBit = false;
- EXPECT_EQ(kDecodableSession, jitter_buffer_->InsertPacket(*packet_,
- &retransmitted));
+ EXPECT_EQ(kDecodableSession,
+ jitter_buffer_->InsertPacket(*packet_, &retransmitted));
seq_num_++;
emptypacket.seqNum = seq_num_;
@@ -2077,8 +2070,8 @@ TEST_F(TestBasicJitterBuffer, H264IncompleteNalu) {
emptypacket.isFirstPacket = true;
emptypacket.completeNALU = kNaluComplete;
emptypacket.markerBit = true;
- EXPECT_EQ(kCompleteSession, jitter_buffer_->InsertPacket(emptypacket,
- &retransmitted));
+ EXPECT_EQ(kCompleteSession,
+ jitter_buffer_->InsertPacket(emptypacket, &retransmitted));
frame_out = DecodeCompleteFrame();
// Only last NALU is complete
@@ -2097,8 +2090,8 @@ TEST_F(TestBasicJitterBuffer, NextFrameWhenIncomplete) {
packet_->markerBit = true;
bool retransmitted = false;
- EXPECT_EQ(kCompleteSession, jitter_buffer_->InsertPacket(*packet_,
- &retransmitted));
+ EXPECT_EQ(kCompleteSession,
+ jitter_buffer_->InsertPacket(*packet_, &retransmitted));
VCMEncodedFrame* frame_out = DecodeCompleteFrame();
EXPECT_TRUE(frame_out != NULL);
jitter_buffer_->ReleaseFrame(frame_out);
@@ -2109,9 +2102,8 @@ TEST_F(TestBasicJitterBuffer, NextFrameWhenIncomplete) {
packet_->isFirstPacket = false;
packet_->markerBit = false;
-
- EXPECT_EQ(kDecodableSession, jitter_buffer_->InsertPacket(*packet_,
- &retransmitted));
+ EXPECT_EQ(kDecodableSession,
+ jitter_buffer_->InsertPacket(*packet_, &retransmitted));
frame_out = DecodeIncompleteFrame();
EXPECT_TRUE(frame_out == NULL);
@@ -2120,8 +2112,8 @@ TEST_F(TestBasicJitterBuffer, NextFrameWhenIncomplete) {
packet_->timestamp += 33 * 90;
packet_->isFirstPacket = true;
- EXPECT_EQ(kDecodableSession, jitter_buffer_->InsertPacket(*packet_,
- &retransmitted));
+ EXPECT_EQ(kDecodableSession,
+ jitter_buffer_->InsertPacket(*packet_, &retransmitted));
frame_out = DecodeIncompleteFrame();
CheckOutFrame(frame_out, packet_->sizeBytes, false);
@@ -2129,6 +2121,10 @@ TEST_F(TestBasicJitterBuffer, NextFrameWhenIncomplete) {
}
TEST_F(TestRunningJitterBuffer, Full) {
+ // Make sure the jitter doesn't request a keyframe after too much non-
+ // decodable frames.
+ jitter_buffer_->SetNackMode(kNack, -1, -1);
+ jitter_buffer_->SetNackSettings(kMaxNumberOfFrames, kMaxNumberOfFrames, 0);
// Insert a key frame and decode it.
EXPECT_GE(InsertFrame(kVideoFrameKey), kNoError);
EXPECT_TRUE(DecodeCompleteFrame());
@@ -2277,8 +2273,8 @@ TEST_F(TestJitterBufferNack, NackTooOldPackets) {
// old packet.
DropFrame(1);
// Insert a frame which should trigger a recycle until the next key frame.
- EXPECT_EQ(kFlushIndicator, InsertFrames(oldest_packet_to_nack_ + 1,
- kVideoFrameDelta));
+ EXPECT_EQ(kFlushIndicator,
+ InsertFrames(oldest_packet_to_nack_ + 1, kVideoFrameDelta));
EXPECT_FALSE(DecodeCompleteFrame());
bool request_key_frame = false;
@@ -2369,7 +2365,7 @@ TEST_F(TestJitterBufferNack, NackListBuiltBeforeFirstDecode) {
stream_generator_->Init(0, clock_->TimeInMilliseconds());
InsertFrame(kVideoFrameKey);
stream_generator_->GenerateFrame(kVideoFrameDelta, 2, 0,
- clock_->TimeInMilliseconds());
+ clock_->TimeInMilliseconds());
stream_generator_->NextPacket(NULL); // Drop packet.
EXPECT_EQ(kIncomplete, InsertPacketAndPop(0));
EXPECT_TRUE(DecodeCompleteFrame());
@@ -2397,8 +2393,8 @@ TEST_F(TestJitterBufferNack, VerifyRetransmittedFlag) {
EXPECT_EQ(1u, nack_list.size());
stream_generator_->PopPacket(&packet, 0);
EXPECT_EQ(packet.seqNum, nack_list[0]);
- EXPECT_EQ(kCompleteSession, jitter_buffer_->InsertPacket(packet,
- &retransmitted));
+ EXPECT_EQ(kCompleteSession,
+ jitter_buffer_->InsertPacket(packet, &retransmitted));
EXPECT_TRUE(retransmitted);
EXPECT_TRUE(DecodeCompleteFrame());
}
@@ -2406,7 +2402,7 @@ TEST_F(TestJitterBufferNack, VerifyRetransmittedFlag) {
TEST_F(TestJitterBufferNack, UseNackToRecoverFirstKeyFrame) {
stream_generator_->Init(0, clock_->TimeInMilliseconds());
stream_generator_->GenerateFrame(kVideoFrameKey, 3, 0,
- clock_->TimeInMilliseconds());
+ clock_->TimeInMilliseconds());
EXPECT_EQ(kIncomplete, InsertPacketAndPop(0));
// Drop second packet.
EXPECT_EQ(kIncomplete, InsertPacketAndPop(1));
@@ -2454,7 +2450,7 @@ TEST_F(TestJitterBufferNack, NormalOperation) {
// | 1 | 2 | .. | 8 | 9 | x | 11 | 12 | .. | 19 | x | 21 | .. | 100 |
// ----------------------------------------------------------------
stream_generator_->GenerateFrame(kVideoFrameKey, 100, 0,
- clock_->TimeInMilliseconds());
+ clock_->TimeInMilliseconds());
clock_->AdvanceTimeMilliseconds(kDefaultFramePeriodMs);
EXPECT_EQ(kDecodableSession, InsertPacketAndPop(0));
// Verify that the frame is incomplete.
@@ -2490,7 +2486,7 @@ TEST_F(TestJitterBufferNack, NormalOperationWrap) {
EXPECT_FALSE(request_key_frame);
EXPECT_TRUE(DecodeCompleteFrame());
stream_generator_->GenerateFrame(kVideoFrameDelta, 100, 0,
- clock_->TimeInMilliseconds());
+ clock_->TimeInMilliseconds());
EXPECT_EQ(kIncomplete, InsertPacketAndPop(0));
while (stream_generator_->PacketsRemaining() > 1) {
if (stream_generator_->NextSequenceNumber() % 10 != 0) {
@@ -2527,7 +2523,7 @@ TEST_F(TestJitterBufferNack, NormalOperationWrap2) {
clock_->TimeInMilliseconds());
clock_->AdvanceTimeMilliseconds(kDefaultFramePeriodMs);
for (int i = 0; i < 5; ++i) {
- if (stream_generator_->NextSequenceNumber() != 65535) {
+ if (stream_generator_->NextSequenceNumber() != 65535) {
EXPECT_EQ(kCompleteSession, InsertPacketAndPop(0));
EXPECT_FALSE(request_key_frame);
} else {
diff --git a/webrtc/modules/video_coding/jitter_estimator.cc b/webrtc/modules/video_coding/jitter_estimator.cc
new file mode 100644
index 0000000000..8270c60e01
--- /dev/null
+++ b/webrtc/modules/video_coding/jitter_estimator.cc
@@ -0,0 +1,443 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "webrtc/modules/video_coding/jitter_estimator.h"
+
+#include <assert.h>
+#include <math.h>
+#include <stdlib.h>
+#include <string.h>
+#include <string>
+
+#include "webrtc/modules/video_coding/internal_defines.h"
+#include "webrtc/modules/video_coding/rtt_filter.h"
+#include "webrtc/system_wrappers/include/clock.h"
+#include "webrtc/system_wrappers/include/field_trial.h"
+
+namespace webrtc {
+
+enum { kStartupDelaySamples = 30 };
+enum { kFsAccuStartupSamples = 5 };
+enum { kMaxFramerateEstimate = 200 };
+
+VCMJitterEstimator::VCMJitterEstimator(const Clock* clock,
+ int32_t vcmId,
+ int32_t receiverId)
+ : _vcmId(vcmId),
+ _receiverId(receiverId),
+ _phi(0.97),
+ _psi(0.9999),
+ _alphaCountMax(400),
+ _thetaLow(0.000001),
+ _nackLimit(3),
+ _numStdDevDelayOutlier(15),
+ _numStdDevFrameSizeOutlier(3),
+ _noiseStdDevs(2.33), // ~Less than 1% chance
+ // (look up in normal distribution table)...
+ _noiseStdDevOffset(30.0), // ...of getting 30 ms freezes
+ _rttFilter(),
+ fps_counter_(30), // TODO(sprang): Use an estimator with limit based on
+ // time, rather than number of samples.
+ low_rate_experiment_(kInit),
+ clock_(clock) {
+ Reset();
+}
+
+VCMJitterEstimator::~VCMJitterEstimator() {}
+
+VCMJitterEstimator& VCMJitterEstimator::operator=(
+ const VCMJitterEstimator& rhs) {
+ if (this != &rhs) {
+ memcpy(_thetaCov, rhs._thetaCov, sizeof(_thetaCov));
+ memcpy(_Qcov, rhs._Qcov, sizeof(_Qcov));
+
+ _vcmId = rhs._vcmId;
+ _receiverId = rhs._receiverId;
+ _avgFrameSize = rhs._avgFrameSize;
+ _varFrameSize = rhs._varFrameSize;
+ _maxFrameSize = rhs._maxFrameSize;
+ _fsSum = rhs._fsSum;
+ _fsCount = rhs._fsCount;
+ _lastUpdateT = rhs._lastUpdateT;
+ _prevEstimate = rhs._prevEstimate;
+ _prevFrameSize = rhs._prevFrameSize;
+ _avgNoise = rhs._avgNoise;
+ _alphaCount = rhs._alphaCount;
+ _filterJitterEstimate = rhs._filterJitterEstimate;
+ _startupCount = rhs._startupCount;
+ _latestNackTimestamp = rhs._latestNackTimestamp;
+ _nackCount = rhs._nackCount;
+ _rttFilter = rhs._rttFilter;
+ }
+ return *this;
+}
+
+// Resets the JitterEstimate
+void VCMJitterEstimator::Reset() {
+ _theta[0] = 1 / (512e3 / 8);
+ _theta[1] = 0;
+ _varNoise = 4.0;
+
+ _thetaCov[0][0] = 1e-4;
+ _thetaCov[1][1] = 1e2;
+ _thetaCov[0][1] = _thetaCov[1][0] = 0;
+ _Qcov[0][0] = 2.5e-10;
+ _Qcov[1][1] = 1e-10;
+ _Qcov[0][1] = _Qcov[1][0] = 0;
+ _avgFrameSize = 500;
+ _maxFrameSize = 500;
+ _varFrameSize = 100;
+ _lastUpdateT = -1;
+ _prevEstimate = -1.0;
+ _prevFrameSize = 0;
+ _avgNoise = 0.0;
+ _alphaCount = 1;
+ _filterJitterEstimate = 0.0;
+ _latestNackTimestamp = 0;
+ _nackCount = 0;
+ _fsSum = 0;
+ _fsCount = 0;
+ _startupCount = 0;
+ _rttFilter.Reset();
+ fps_counter_.Reset();
+}
+
+void VCMJitterEstimator::ResetNackCount() {
+ _nackCount = 0;
+}
+
+// Updates the estimates with the new measurements
+void VCMJitterEstimator::UpdateEstimate(int64_t frameDelayMS,
+ uint32_t frameSizeBytes,
+ bool incompleteFrame /* = false */) {
+ if (frameSizeBytes == 0) {
+ return;
+ }
+ int deltaFS = frameSizeBytes - _prevFrameSize;
+ if (_fsCount < kFsAccuStartupSamples) {
+ _fsSum += frameSizeBytes;
+ _fsCount++;
+ } else if (_fsCount == kFsAccuStartupSamples) {
+ // Give the frame size filter
+ _avgFrameSize = static_cast<double>(_fsSum) / static_cast<double>(_fsCount);
+ _fsCount++;
+ }
+ if (!incompleteFrame || frameSizeBytes > _avgFrameSize) {
+ double avgFrameSize = _phi * _avgFrameSize + (1 - _phi) * frameSizeBytes;
+ if (frameSizeBytes < _avgFrameSize + 2 * sqrt(_varFrameSize)) {
+ // Only update the average frame size if this sample wasn't a
+ // key frame
+ _avgFrameSize = avgFrameSize;
+ }
+ // Update the variance anyway since we want to capture cases where we only
+ // get
+ // key frames.
+ _varFrameSize = VCM_MAX(_phi * _varFrameSize +
+ (1 - _phi) * (frameSizeBytes - avgFrameSize) *
+ (frameSizeBytes - avgFrameSize),
+ 1.0);
+ }
+
+ // Update max frameSize estimate
+ _maxFrameSize =
+ VCM_MAX(_psi * _maxFrameSize, static_cast<double>(frameSizeBytes));
+
+ if (_prevFrameSize == 0) {
+ _prevFrameSize = frameSizeBytes;
+ return;
+ }
+ _prevFrameSize = frameSizeBytes;
+
+ // Only update the Kalman filter if the sample is not considered
+ // an extreme outlier. Even if it is an extreme outlier from a
+ // delay point of view, if the frame size also is large the
+ // deviation is probably due to an incorrect line slope.
+ double deviation = DeviationFromExpectedDelay(frameDelayMS, deltaFS);
+
+ if (fabs(deviation) < _numStdDevDelayOutlier * sqrt(_varNoise) ||
+ frameSizeBytes >
+ _avgFrameSize + _numStdDevFrameSizeOutlier * sqrt(_varFrameSize)) {
+ // Update the variance of the deviation from the
+ // line given by the Kalman filter
+ EstimateRandomJitter(deviation, incompleteFrame);
+ // Prevent updating with frames which have been congested by a large
+ // frame, and therefore arrives almost at the same time as that frame.
+ // This can occur when we receive a large frame (key frame) which
+ // has been delayed. The next frame is of normal size (delta frame),
+ // and thus deltaFS will be << 0. This removes all frame samples
+ // which arrives after a key frame.
+ if ((!incompleteFrame || deviation >= 0.0) &&
+ static_cast<double>(deltaFS) > -0.25 * _maxFrameSize) {
+ // Update the Kalman filter with the new data
+ KalmanEstimateChannel(frameDelayMS, deltaFS);
+ }
+ } else {
+ int nStdDev =
+ (deviation >= 0) ? _numStdDevDelayOutlier : -_numStdDevDelayOutlier;
+ EstimateRandomJitter(nStdDev * sqrt(_varNoise), incompleteFrame);
+ }
+ // Post process the total estimated jitter
+ if (_startupCount >= kStartupDelaySamples) {
+ PostProcessEstimate();
+ } else {
+ _startupCount++;
+ }
+}
+
+// Updates the nack/packet ratio
+void VCMJitterEstimator::FrameNacked() {
+ // Wait until _nackLimit retransmissions has been received,
+ // then always add ~1 RTT delay.
+ // TODO(holmer): Should we ever remove the additional delay if the
+ // the packet losses seem to have stopped? We could for instance scale
+ // the number of RTTs to add with the amount of retransmissions in a given
+ // time interval, or similar.
+ if (_nackCount < _nackLimit) {
+ _nackCount++;
+ }
+}
+
+// Updates Kalman estimate of the channel
+// The caller is expected to sanity check the inputs.
+void VCMJitterEstimator::KalmanEstimateChannel(int64_t frameDelayMS,
+ int32_t deltaFSBytes) {
+ double Mh[2];
+ double hMh_sigma;
+ double kalmanGain[2];
+ double measureRes;
+ double t00, t01;
+
+ // Kalman filtering
+
+ // Prediction
+ // M = M + Q
+ _thetaCov[0][0] += _Qcov[0][0];
+ _thetaCov[0][1] += _Qcov[0][1];
+ _thetaCov[1][0] += _Qcov[1][0];
+ _thetaCov[1][1] += _Qcov[1][1];
+
+ // Kalman gain
+ // K = M*h'/(sigma2n + h*M*h') = M*h'/(1 + h*M*h')
+ // h = [dFS 1]
+ // Mh = M*h'
+ // hMh_sigma = h*M*h' + R
+ Mh[0] = _thetaCov[0][0] * deltaFSBytes + _thetaCov[0][1];
+ Mh[1] = _thetaCov[1][0] * deltaFSBytes + _thetaCov[1][1];
+ // sigma weights measurements with a small deltaFS as noisy and
+ // measurements with large deltaFS as good
+ if (_maxFrameSize < 1.0) {
+ return;
+ }
+ double sigma = (300.0 * exp(-fabs(static_cast<double>(deltaFSBytes)) /
+ (1e0 * _maxFrameSize)) +
+ 1) *
+ sqrt(_varNoise);
+ if (sigma < 1.0) {
+ sigma = 1.0;
+ }
+ hMh_sigma = deltaFSBytes * Mh[0] + Mh[1] + sigma;
+ if ((hMh_sigma < 1e-9 && hMh_sigma >= 0) ||
+ (hMh_sigma > -1e-9 && hMh_sigma <= 0)) {
+ assert(false);
+ return;
+ }
+ kalmanGain[0] = Mh[0] / hMh_sigma;
+ kalmanGain[1] = Mh[1] / hMh_sigma;
+
+ // Correction
+ // theta = theta + K*(dT - h*theta)
+ measureRes = frameDelayMS - (deltaFSBytes * _theta[0] + _theta[1]);
+ _theta[0] += kalmanGain[0] * measureRes;
+ _theta[1] += kalmanGain[1] * measureRes;
+
+ if (_theta[0] < _thetaLow) {
+ _theta[0] = _thetaLow;
+ }
+
+ // M = (I - K*h)*M
+ t00 = _thetaCov[0][0];
+ t01 = _thetaCov[0][1];
+ _thetaCov[0][0] = (1 - kalmanGain[0] * deltaFSBytes) * t00 -
+ kalmanGain[0] * _thetaCov[1][0];
+ _thetaCov[0][1] = (1 - kalmanGain[0] * deltaFSBytes) * t01 -
+ kalmanGain[0] * _thetaCov[1][1];
+ _thetaCov[1][0] = _thetaCov[1][0] * (1 - kalmanGain[1]) -
+ kalmanGain[1] * deltaFSBytes * t00;
+ _thetaCov[1][1] = _thetaCov[1][1] * (1 - kalmanGain[1]) -
+ kalmanGain[1] * deltaFSBytes * t01;
+
+ // Covariance matrix, must be positive semi-definite
+ assert(_thetaCov[0][0] + _thetaCov[1][1] >= 0 &&
+ _thetaCov[0][0] * _thetaCov[1][1] -
+ _thetaCov[0][1] * _thetaCov[1][0] >=
+ 0 &&
+ _thetaCov[0][0] >= 0);
+}
+
+// Calculate difference in delay between a sample and the
+// expected delay estimated by the Kalman filter
+double VCMJitterEstimator::DeviationFromExpectedDelay(
+ int64_t frameDelayMS,
+ int32_t deltaFSBytes) const {
+ return frameDelayMS - (_theta[0] * deltaFSBytes + _theta[1]);
+}
+
+// Estimates the random jitter by calculating the variance of the
+// sample distance from the line given by theta.
+void VCMJitterEstimator::EstimateRandomJitter(double d_dT,
+ bool incompleteFrame) {
+ uint64_t now = clock_->TimeInMicroseconds();
+ if (_lastUpdateT != -1) {
+ fps_counter_.AddSample(now - _lastUpdateT);
+ }
+ _lastUpdateT = now;
+
+ if (_alphaCount == 0) {
+ assert(false);
+ return;
+ }
+ double alpha =
+ static_cast<double>(_alphaCount - 1) / static_cast<double>(_alphaCount);
+ _alphaCount++;
+ if (_alphaCount > _alphaCountMax)
+ _alphaCount = _alphaCountMax;
+
+ if (LowRateExperimentEnabled()) {
+ // In order to avoid a low frame rate stream to react slower to changes,
+ // scale the alpha weight relative a 30 fps stream.
+ double fps = GetFrameRate();
+ if (fps > 0.0) {
+ double rate_scale = 30.0 / fps;
+ // At startup, there can be a lot of noise in the fps estimate.
+ // Interpolate rate_scale linearly, from 1.0 at sample #1, to 30.0 / fps
+ // at sample #kStartupDelaySamples.
+ if (_alphaCount < kStartupDelaySamples) {
+ rate_scale =
+ (_alphaCount * rate_scale + (kStartupDelaySamples - _alphaCount)) /
+ kStartupDelaySamples;
+ }
+ alpha = pow(alpha, rate_scale);
+ }
+ }
+
+ double avgNoise = alpha * _avgNoise + (1 - alpha) * d_dT;
+ double varNoise =
+ alpha * _varNoise + (1 - alpha) * (d_dT - _avgNoise) * (d_dT - _avgNoise);
+ if (!incompleteFrame || varNoise > _varNoise) {
+ _avgNoise = avgNoise;
+ _varNoise = varNoise;
+ }
+ if (_varNoise < 1.0) {
+ // The variance should never be zero, since we might get
+ // stuck and consider all samples as outliers.
+ _varNoise = 1.0;
+ }
+}
+
+double VCMJitterEstimator::NoiseThreshold() const {
+ double noiseThreshold = _noiseStdDevs * sqrt(_varNoise) - _noiseStdDevOffset;
+ if (noiseThreshold < 1.0) {
+ noiseThreshold = 1.0;
+ }
+ return noiseThreshold;
+}
+
+// Calculates the current jitter estimate from the filtered estimates
+double VCMJitterEstimator::CalculateEstimate() {
+ double ret = _theta[0] * (_maxFrameSize - _avgFrameSize) + NoiseThreshold();
+
+ // A very low estimate (or negative) is neglected
+ if (ret < 1.0) {
+ if (_prevEstimate <= 0.01) {
+ ret = 1.0;
+ } else {
+ ret = _prevEstimate;
+ }
+ }
+ if (ret > 10000.0) { // Sanity
+ ret = 10000.0;
+ }
+ _prevEstimate = ret;
+ return ret;
+}
+
+void VCMJitterEstimator::PostProcessEstimate() {
+ _filterJitterEstimate = CalculateEstimate();
+}
+
+void VCMJitterEstimator::UpdateRtt(int64_t rttMs) {
+ _rttFilter.Update(rttMs);
+}
+
+void VCMJitterEstimator::UpdateMaxFrameSize(uint32_t frameSizeBytes) {
+ if (_maxFrameSize < frameSizeBytes) {
+ _maxFrameSize = frameSizeBytes;
+ }
+}
+
+// Returns the current filtered estimate if available,
+// otherwise tries to calculate an estimate.
+int VCMJitterEstimator::GetJitterEstimate(double rttMultiplier) {
+ double jitterMS = CalculateEstimate() + OPERATING_SYSTEM_JITTER;
+ if (_filterJitterEstimate > jitterMS)
+ jitterMS = _filterJitterEstimate;
+ if (_nackCount >= _nackLimit)
+ jitterMS += _rttFilter.RttMs() * rttMultiplier;
+
+ if (LowRateExperimentEnabled()) {
+ static const double kJitterScaleLowThreshold = 5.0;
+ static const double kJitterScaleHighThreshold = 10.0;
+ double fps = GetFrameRate();
+ // Ignore jitter for very low fps streams.
+ if (fps < kJitterScaleLowThreshold) {
+ if (fps == 0.0) {
+ return jitterMS;
+ }
+ return 0;
+ }
+
+ // Semi-low frame rate; scale by factor linearly interpolated from 0.0 at
+ // kJitterScaleLowThreshold to 1.0 at kJitterScaleHighThreshold.
+ if (fps < kJitterScaleHighThreshold) {
+ jitterMS =
+ (1.0 / (kJitterScaleHighThreshold - kJitterScaleLowThreshold)) *
+ (fps - kJitterScaleLowThreshold) * jitterMS;
+ }
+ }
+
+ return static_cast<uint32_t>(jitterMS + 0.5);
+}
+
+bool VCMJitterEstimator::LowRateExperimentEnabled() {
+ if (low_rate_experiment_ == kInit) {
+ std::string group =
+ webrtc::field_trial::FindFullName("WebRTC-ReducedJitterDelay");
+ if (group == "Disabled") {
+ low_rate_experiment_ = kDisabled;
+ } else {
+ low_rate_experiment_ = kEnabled;
+ }
+ }
+ return low_rate_experiment_ == kEnabled ? true : false;
+}
+
+double VCMJitterEstimator::GetFrameRate() const {
+ if (fps_counter_.count() == 0)
+ return 0;
+
+ double fps = 1000000.0 / fps_counter_.ComputeMean();
+ // Sanity check.
+ assert(fps >= 0.0);
+ if (fps > kMaxFramerateEstimate) {
+ fps = kMaxFramerateEstimate;
+ }
+ return fps;
+}
+} // namespace webrtc
diff --git a/webrtc/modules/video_coding/jitter_estimator.h b/webrtc/modules/video_coding/jitter_estimator.h
new file mode 100644
index 0000000000..a7b4b3e3df
--- /dev/null
+++ b/webrtc/modules/video_coding/jitter_estimator.h
@@ -0,0 +1,170 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef WEBRTC_MODULES_VIDEO_CODING_JITTER_ESTIMATOR_H_
+#define WEBRTC_MODULES_VIDEO_CODING_JITTER_ESTIMATOR_H_
+
+#include "webrtc/base/rollingaccumulator.h"
+#include "webrtc/modules/video_coding/rtt_filter.h"
+#include "webrtc/typedefs.h"
+
+namespace webrtc {
+
+class Clock;
+
+class VCMJitterEstimator {
+ public:
+ VCMJitterEstimator(const Clock* clock,
+ int32_t vcmId = 0,
+ int32_t receiverId = 0);
+ virtual ~VCMJitterEstimator();
+ VCMJitterEstimator& operator=(const VCMJitterEstimator& rhs);
+
+ // Resets the estimate to the initial state
+ void Reset();
+ void ResetNackCount();
+
+ // Updates the jitter estimate with the new data.
+ //
+ // Input:
+ // - frameDelay : Delay-delta calculated by UTILDelayEstimate in
+ // milliseconds
+ // - frameSize : Frame size of the current frame.
+ // - incompleteFrame : Flags if the frame is used to update the
+ // estimate before it
+ // was complete. Default is false.
+ void UpdateEstimate(int64_t frameDelayMS,
+ uint32_t frameSizeBytes,
+ bool incompleteFrame = false);
+
+ // Returns the current jitter estimate in milliseconds and adds
+ // also adds an RTT dependent term in cases of retransmission.
+ // Input:
+ // - rttMultiplier : RTT param multiplier (when applicable).
+ //
+ // Return value : Jitter estimate in milliseconds
+ int GetJitterEstimate(double rttMultiplier);
+
+ // Updates the nack counter.
+ void FrameNacked();
+
+ // Updates the RTT filter.
+ //
+ // Input:
+ // - rttMs : RTT in ms
+ void UpdateRtt(int64_t rttMs);
+
+ void UpdateMaxFrameSize(uint32_t frameSizeBytes);
+
+ // A constant describing the delay from the jitter buffer
+ // to the delay on the receiving side which is not accounted
+ // for by the jitter buffer nor the decoding delay estimate.
+ static const uint32_t OPERATING_SYSTEM_JITTER = 10;
+
+ protected:
+ // These are protected for better testing possibilities
+ double _theta[2]; // Estimated line parameters (slope, offset)
+ double _varNoise; // Variance of the time-deviation from the line
+
+ virtual bool LowRateExperimentEnabled();
+
+ private:
+ // Updates the Kalman filter for the line describing
+ // the frame size dependent jitter.
+ //
+ // Input:
+ // - frameDelayMS : Delay-delta calculated by UTILDelayEstimate in
+ // milliseconds
+ // - deltaFSBytes : Frame size delta, i.e.
+ // : frame size at time T minus frame size at time
+ // T-1
+ void KalmanEstimateChannel(int64_t frameDelayMS, int32_t deltaFSBytes);
+
+ // Updates the random jitter estimate, i.e. the variance
+ // of the time deviations from the line given by the Kalman filter.
+ //
+ // Input:
+ // - d_dT : The deviation from the kalman estimate
+ // - incompleteFrame : True if the frame used to update the
+ // estimate
+ // with was incomplete
+ void EstimateRandomJitter(double d_dT, bool incompleteFrame);
+
+ double NoiseThreshold() const;
+
+ // Calculates the current jitter estimate.
+ //
+ // Return value : The current jitter estimate in milliseconds
+ double CalculateEstimate();
+
+ // Post process the calculated estimate
+ void PostProcessEstimate();
+
+ // Calculates the difference in delay between a sample and the
+ // expected delay estimated by the Kalman filter.
+ //
+ // Input:
+ // - frameDelayMS : Delay-delta calculated by UTILDelayEstimate in
+ // milliseconds
+ // - deltaFS : Frame size delta, i.e. frame size at time
+ // T minus frame size at time T-1
+ //
+ // Return value : The difference in milliseconds
+ double DeviationFromExpectedDelay(int64_t frameDelayMS,
+ int32_t deltaFSBytes) const;
+
+ double GetFrameRate() const;
+
+ // Constants, filter parameters
+ int32_t _vcmId;
+ int32_t _receiverId;
+ const double _phi;
+ const double _psi;
+ const uint32_t _alphaCountMax;
+ const double _thetaLow;
+ const uint32_t _nackLimit;
+ const int32_t _numStdDevDelayOutlier;
+ const int32_t _numStdDevFrameSizeOutlier;
+ const double _noiseStdDevs;
+ const double _noiseStdDevOffset;
+
+ double _thetaCov[2][2]; // Estimate covariance
+ double _Qcov[2][2]; // Process noise covariance
+ double _avgFrameSize; // Average frame size
+ double _varFrameSize; // Frame size variance
+ double _maxFrameSize; // Largest frame size received (descending
+ // with a factor _psi)
+ uint32_t _fsSum;
+ uint32_t _fsCount;
+
+ int64_t _lastUpdateT;
+ double _prevEstimate; // The previously returned jitter estimate
+ uint32_t _prevFrameSize; // Frame size of the previous frame
+ double _avgNoise; // Average of the random jitter
+ uint32_t _alphaCount;
+ double _filterJitterEstimate; // The filtered sum of jitter estimates
+
+ uint32_t _startupCount;
+
+ int64_t
+ _latestNackTimestamp; // Timestamp in ms when the latest nack was seen
+ uint32_t _nackCount; // Keeps track of the number of nacks received,
+ // but never goes above _nackLimit
+ VCMRttFilter _rttFilter;
+
+ rtc::RollingAccumulator<uint64_t> fps_counter_;
+ enum ExperimentFlag { kInit, kEnabled, kDisabled };
+ ExperimentFlag low_rate_experiment_;
+ const Clock* clock_;
+};
+
+} // namespace webrtc
+
+#endif // WEBRTC_MODULES_VIDEO_CODING_JITTER_ESTIMATOR_H_
diff --git a/webrtc/modules/video_coding/main/source/jitter_estimator_tests.cc b/webrtc/modules/video_coding/jitter_estimator_tests.cc
index c69c4bcdad..3d46ce2bcd 100644
--- a/webrtc/modules/video_coding/main/source/jitter_estimator_tests.cc
+++ b/webrtc/modules/video_coding/jitter_estimator_tests.cc
@@ -7,7 +7,7 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#include "webrtc/modules/video_coding/main/source/jitter_estimator.h"
+#include "webrtc/modules/video_coding/jitter_estimator.h"
#include "testing/gtest/include/gtest/gtest.h"
#include "webrtc/system_wrappers/include/clock.h"
diff --git a/webrtc/modules/video_coding/main/interface/video_coding.h b/webrtc/modules/video_coding/main/interface/video_coding.h
deleted file mode 100644
index 67f7b635cb..0000000000
--- a/webrtc/modules/video_coding/main/interface/video_coding.h
+++ /dev/null
@@ -1,544 +0,0 @@
-/*
- * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
- *
- * Use of this source code is governed by a BSD-style license
- * that can be found in the LICENSE file in the root of the source
- * tree. An additional intellectual property rights grant can be found
- * in the file PATENTS. All contributing project authors may
- * be found in the AUTHORS file in the root of the source tree.
- */
-
-#ifndef WEBRTC_MODULES_INTERFACE_VIDEO_CODING_H_
-#define WEBRTC_MODULES_INTERFACE_VIDEO_CODING_H_
-
-#if defined(WEBRTC_WIN)
-// This is a workaround on Windows due to the fact that some Windows
-// headers define CreateEvent as a macro to either CreateEventW or CreateEventA.
-// This can cause problems since we use that name as well and could
-// declare them as one thing here whereas in another place a windows header
-// may have been included and then implementing CreateEvent() causes compilation
-// errors. So for consistency, we include the main windows header here.
-#include <windows.h>
-#endif
-
-#include "webrtc/modules/interface/module.h"
-#include "webrtc/modules/interface/module_common_types.h"
-#include "webrtc/modules/video_coding/main/interface/video_coding_defines.h"
-#include "webrtc/system_wrappers/include/event_wrapper.h"
-#include "webrtc/video_frame.h"
-
-namespace webrtc
-{
-
-class Clock;
-class EncodedImageCallback;
-class VideoEncoder;
-class VideoDecoder;
-struct CodecSpecificInfo;
-
-class EventFactory {
- public:
- virtual ~EventFactory() {}
-
- virtual EventWrapper* CreateEvent() = 0;
-};
-
-class EventFactoryImpl : public EventFactory {
- public:
- virtual ~EventFactoryImpl() {}
-
- virtual EventWrapper* CreateEvent() {
- return EventWrapper::Create();
- }
-};
-
-// Used to indicate which decode with errors mode should be used.
-enum VCMDecodeErrorMode {
- kNoErrors, // Never decode with errors. Video will freeze
- // if nack is disabled.
- kSelectiveErrors, // Frames that are determined decodable in
- // VCMSessionInfo may be decoded with missing
- // packets. As not all incomplete frames will be
- // decodable, video will freeze if nack is disabled.
- kWithErrors // Release frames as needed. Errors may be
- // introduced as some encoded frames may not be
- // complete.
-};
-
-class VideoCodingModule : public Module
-{
-public:
- enum SenderNackMode {
- kNackNone,
- kNackAll,
- kNackSelective
- };
-
- enum ReceiverRobustness {
- kNone,
- kHardNack,
- kSoftNack,
- kReferenceSelection
- };
-
- static VideoCodingModule* Create(
- Clock* clock,
- VideoEncoderRateObserver* encoder_rate_observer,
- VCMQMSettingsCallback* qm_settings_callback);
-
- static VideoCodingModule* Create(Clock* clock, EventFactory* event_factory);
-
- static void Destroy(VideoCodingModule* module);
-
- // Get number of supported codecs
- //
- // Return value : Number of supported codecs
- static uint8_t NumberOfCodecs();
-
- // Get supported codec settings with using id
- //
- // Input:
- // - listId : Id or index of the codec to look up
- // - codec : Memory where the codec settings will be stored
- //
- // Return value : VCM_OK, on success
- // VCM_PARAMETER_ERROR if codec not supported or id too high
- static int32_t Codec(const uint8_t listId, VideoCodec* codec);
-
- // Get supported codec settings using codec type
- //
- // Input:
- // - codecType : The codec type to get settings for
- // - codec : Memory where the codec settings will be stored
- //
- // Return value : VCM_OK, on success
- // VCM_PARAMETER_ERROR if codec not supported
- static int32_t Codec(VideoCodecType codecType, VideoCodec* codec);
-
- /*
- * Sender
- */
-
- // Registers a codec to be used for encoding. Calling this
- // API multiple times overwrites any previously registered codecs.
- //
- // NOTE: Must be called on the thread that constructed the VCM instance.
- //
- // Input:
- // - sendCodec : Settings for the codec to be registered.
- // - numberOfCores : The number of cores the codec is allowed
- // to use.
- // - maxPayloadSize : The maximum size each payload is allowed
- // to have. Usually MTU - overhead.
- //
- // Return value : VCM_OK, on success.
- // < 0, on error.
- virtual int32_t RegisterSendCodec(const VideoCodec* sendCodec,
- uint32_t numberOfCores,
- uint32_t maxPayloadSize) = 0;
-
- // Get the current send codec in use.
- //
- // If a codec has not been set yet, the |id| property of the return value
- // will be 0 and |name| empty.
- //
- // NOTE: This method intentionally does not hold locks and minimizes data
- // copying. It must be called on the thread where the VCM was constructed.
- virtual const VideoCodec& GetSendCodec() const = 0;
-
- // DEPRECATED: Use GetSendCodec() instead.
- //
- // API to get the current send codec in use.
- //
- // Input:
- // - currentSendCodec : Address where the sendCodec will be written.
- //
- // Return value : VCM_OK, on success.
- // < 0, on error.
- //
- // NOTE: The returned codec information is not guaranteed to be current when
- // the call returns. This method acquires a lock that is aligned with
- // video encoding, so it should be assumed to be allowed to block for
- // several milliseconds.
- virtual int32_t SendCodec(VideoCodec* currentSendCodec) const = 0;
-
- // DEPRECATED: Use GetSendCodec() instead.
- //
- // API to get the current send codec type
- //
- // Return value : Codec type, on success.
- // kVideoCodecUnknown, on error or if no send codec is set
- // NOTE: Same notes apply as for SendCodec() above.
- virtual VideoCodecType SendCodec() const = 0;
-
- // Register an external encoder object. This can not be used together with
- // external decoder callbacks.
- //
- // Input:
- // - externalEncoder : Encoder object to be used for encoding frames inserted
- // with the AddVideoFrame API.
- // - payloadType : The payload type bound which this encoder is bound to.
- //
- // Return value : VCM_OK, on success.
- // < 0, on error.
- virtual int32_t RegisterExternalEncoder(VideoEncoder* externalEncoder,
- uint8_t payloadType,
- bool internalSource = false) = 0;
-
- // API to get currently configured encoder target bitrate in bits/s.
- //
- // Return value : 0, on success.
- // < 0, on error.
- virtual int Bitrate(unsigned int* bitrate) const = 0;
-
- // API to get currently configured encoder target frame rate.
- //
- // Return value : 0, on success.
- // < 0, on error.
- virtual int FrameRate(unsigned int* framerate) const = 0;
-
- // Sets the parameters describing the send channel. These parameters are inputs to the
- // Media Optimization inside the VCM and also specifies the target bit rate for the
- // encoder. Bit rate used by NACK should already be compensated for by the user.
- //
- // Input:
- // - target_bitrate : The target bitrate for VCM in bits/s.
- // - lossRate : Fractions of lost packets the past second.
- // (loss rate in percent = 100 * packetLoss / 255)
- // - rtt : Current round-trip time in ms.
- //
- // Return value : VCM_OK, on success.
- // < 0, on error.
- virtual int32_t SetChannelParameters(uint32_t target_bitrate,
- uint8_t lossRate,
- int64_t rtt) = 0;
-
- // Sets the parameters describing the receive channel. These parameters are inputs to the
- // Media Optimization inside the VCM.
- //
- // Input:
- // - rtt : Current round-trip time in ms.
- // with the most amount available bandwidth in a conference
- // scenario
- //
- // Return value : VCM_OK, on success.
- // < 0, on error.
- virtual int32_t SetReceiveChannelParameters(int64_t rtt) = 0;
-
- // Register a transport callback which will be called to deliver the encoded data and
- // side information.
- //
- // Input:
- // - transport : The callback object to register.
- //
- // Return value : VCM_OK, on success.
- // < 0, on error.
- virtual int32_t RegisterTransportCallback(VCMPacketizationCallback* transport) = 0;
-
- // Register video output information callback which will be called to deliver information
- // about the video stream produced by the encoder, for instance the average frame rate and
- // bit rate.
- //
- // Input:
- // - outputInformation : The callback object to register.
- //
- // Return value : VCM_OK, on success.
- // < 0, on error.
- virtual int32_t RegisterSendStatisticsCallback(
- VCMSendStatisticsCallback* sendStats) = 0;
-
- // Register a video protection callback which will be called to deliver
- // the requested FEC rate and NACK status (on/off).
- //
- // Input:
- // - protection : The callback object to register.
- //
- // Return value : VCM_OK, on success.
- // < 0, on error.
- virtual int32_t RegisterProtectionCallback(VCMProtectionCallback* protection) = 0;
-
- // Enable or disable a video protection method.
- //
- // Input:
- // - videoProtection : The method to enable or disable.
- // - enable : True if the method should be enabled, false if
- // it should be disabled.
- //
- // Return value : VCM_OK, on success.
- // < 0, on error.
- virtual int32_t SetVideoProtection(VCMVideoProtection videoProtection,
- bool enable) = 0;
-
- // Add one raw video frame to the encoder. This function does all the necessary
- // processing, then decides what frame type to encode, or if the frame should be
- // dropped. If the frame should be encoded it passes the frame to the encoder
- // before it returns.
- //
- // Input:
- // - videoFrame : Video frame to encode.
- // - codecSpecificInfo : Extra codec information, e.g., pre-parsed in-band signaling.
- //
- // Return value : VCM_OK, on success.
- // < 0, on error.
- virtual int32_t AddVideoFrame(
- const VideoFrame& videoFrame,
- const VideoContentMetrics* contentMetrics = NULL,
- const CodecSpecificInfo* codecSpecificInfo = NULL) = 0;
-
- // Next frame encoded should be an intra frame (keyframe).
- //
- // Return value : VCM_OK, on success.
- // < 0, on error.
- virtual int32_t IntraFrameRequest(int stream_index) = 0;
-
- // Frame Dropper enable. Can be used to disable the frame dropping when the encoder
- // over-uses its bit rate. This API is designed to be used when the encoded frames
- // are supposed to be stored to an AVI file, or when the I420 codec is used and the
- // target bit rate shouldn't affect the frame rate.
- //
- // Input:
- // - enable : True to enable the setting, false to disable it.
- //
- // Return value : VCM_OK, on success.
- // < 0, on error.
- virtual int32_t EnableFrameDropper(bool enable) = 0;
-
-
- /*
- * Receiver
- */
-
- // Register possible receive codecs, can be called multiple times for different codecs.
- // The module will automatically switch between registered codecs depending on the
- // payload type of incoming frames. The actual decoder will be created when needed.
- //
- // Input:
- // - receiveCodec : Settings for the codec to be registered.
- // - numberOfCores : Number of CPU cores that the decoder is allowed to use.
- // - requireKeyFrame : Set this to true if you don't want any delta frames
- // to be decoded until the first key frame has been decoded.
- //
- // Return value : VCM_OK, on success.
- // < 0, on error.
- virtual int32_t RegisterReceiveCodec(const VideoCodec* receiveCodec,
- int32_t numberOfCores,
- bool requireKeyFrame = false) = 0;
-
- // Register an externally defined decoder/renderer object. Can be a decoder only or a
- // decoder coupled with a renderer. Note that RegisterReceiveCodec must be called to
- // be used for decoding incoming streams.
- //
- // Input:
- // - externalDecoder : The external decoder/renderer object.
- // - payloadType : The payload type which this decoder should be
- // registered to.
- // - internalRenderTiming : True if the internal renderer (if any) of the decoder
- // object can make sure to render at a given time in ms.
- //
- // Return value : VCM_OK, on success.
- // < 0, on error.
- virtual int32_t RegisterExternalDecoder(VideoDecoder* externalDecoder,
- uint8_t payloadType,
- bool internalRenderTiming) = 0;
-
- // Register a receive callback. Will be called whenever there is a new frame ready
- // for rendering.
- //
- // Input:
- // - receiveCallback : The callback object to be used by the module when a
- // frame is ready for rendering.
- // De-register with a NULL pointer.
- //
- // Return value : VCM_OK, on success.
- // < 0, on error.
- virtual int32_t RegisterReceiveCallback(VCMReceiveCallback* receiveCallback) = 0;
-
- // Register a receive statistics callback which will be called to deliver information
- // about the video stream received by the receiving side of the VCM, for instance the
- // average frame rate and bit rate.
- //
- // Input:
- // - receiveStats : The callback object to register.
- //
- // Return value : VCM_OK, on success.
- // < 0, on error.
- virtual int32_t RegisterReceiveStatisticsCallback(
- VCMReceiveStatisticsCallback* receiveStats) = 0;
-
- // Register a decoder timing callback which will be called to deliver
- // information about the timing of the decoder in the receiving side of the
- // VCM, for instance the current and maximum frame decode latency.
- //
- // Input:
- // - decoderTiming : The callback object to register.
- //
- // Return value : VCM_OK, on success.
- // < 0, on error.
- virtual int32_t RegisterDecoderTimingCallback(
- VCMDecoderTimingCallback* decoderTiming) = 0;
-
- // Register a frame type request callback. This callback will be called when the
- // module needs to request specific frame types from the send side.
- //
- // Input:
- // - frameTypeCallback : The callback object to be used by the module when
- // requesting a specific type of frame from the send side.
- // De-register with a NULL pointer.
- //
- // Return value : VCM_OK, on success.
- // < 0, on error.
- virtual int32_t RegisterFrameTypeCallback(
- VCMFrameTypeCallback* frameTypeCallback) = 0;
-
- // Registers a callback which is called whenever the receive side of the VCM
- // encounters holes in the packet sequence and needs packets to be retransmitted.
- //
- // Input:
- // - callback : The callback to be registered in the VCM.
- //
- // Return value : VCM_OK, on success.
- // <0, on error.
- virtual int32_t RegisterPacketRequestCallback(
- VCMPacketRequestCallback* callback) = 0;
-
- // Waits for the next frame in the jitter buffer to become complete
- // (waits no longer than maxWaitTimeMs), then passes it to the decoder for decoding.
- // Should be called as often as possible to get the most out of the decoder.
- //
- // Return value : VCM_OK, on success.
- // < 0, on error.
- virtual int32_t Decode(uint16_t maxWaitTimeMs = 200) = 0;
-
- // Registers a callback which conveys the size of the render buffer.
- virtual int RegisterRenderBufferSizeCallback(
- VCMRenderBufferSizeCallback* callback) = 0;
-
- // Reset the decoder state to the initial state.
- //
- // Return value : VCM_OK, on success.
- // < 0, on error.
- virtual int32_t ResetDecoder() = 0;
-
- // API to get the codec which is currently used for decoding by the module.
- //
- // Input:
- // - currentReceiveCodec : Settings for the codec to be registered.
- //
- // Return value : VCM_OK, on success.
- // < 0, on error.
- virtual int32_t ReceiveCodec(VideoCodec* currentReceiveCodec) const = 0;
-
- // API to get the codec type currently used for decoding by the module.
- //
- // Return value : codecy type, on success.
- // kVideoCodecUnknown, on error or if no receive codec is registered
- virtual VideoCodecType ReceiveCodec() const = 0;
-
- // Insert a parsed packet into the receiver side of the module. Will be placed in the
- // jitter buffer waiting for the frame to become complete. Returns as soon as the packet
- // has been placed in the jitter buffer.
- //
- // Input:
- // - incomingPayload : Payload of the packet.
- // - payloadLength : Length of the payload.
- // - rtpInfo : The parsed header.
- //
- // Return value : VCM_OK, on success.
- // < 0, on error.
- virtual int32_t IncomingPacket(const uint8_t* incomingPayload,
- size_t payloadLength,
- const WebRtcRTPHeader& rtpInfo) = 0;
-
- // Minimum playout delay (Used for lip-sync). This is the minimum delay required
- // to sync with audio. Not included in VideoCodingModule::Delay()
- // Defaults to 0 ms.
- //
- // Input:
- // - minPlayoutDelayMs : Additional delay in ms.
- //
- // Return value : VCM_OK, on success.
- // < 0, on error.
- virtual int32_t SetMinimumPlayoutDelay(uint32_t minPlayoutDelayMs) = 0;
-
- // Set the time required by the renderer to render a frame.
- //
- // Input:
- // - timeMS : The time in ms required by the renderer to render a frame.
- //
- // Return value : VCM_OK, on success.
- // < 0, on error.
- virtual int32_t SetRenderDelay(uint32_t timeMS) = 0;
-
- // The total delay desired by the VCM. Can be less than the minimum
- // delay set with SetMinimumPlayoutDelay.
- //
- // Return value : Total delay in ms, on success.
- // < 0, on error.
- virtual int32_t Delay() const = 0;
-
- // Returns the number of packets discarded by the jitter buffer due to being
- // too late. This can include duplicated packets which arrived after the
- // frame was sent to the decoder. Therefore packets which were prematurely
- // NACKed will be counted.
- virtual uint32_t DiscardedPackets() const = 0;
-
-
- // Robustness APIs
-
- // Set the receiver robustness mode. The mode decides how the receiver
- // responds to losses in the stream. The type of counter-measure (soft or
- // hard NACK, dual decoder, RPS, etc.) is selected through the
- // robustnessMode parameter. The errorMode parameter decides if it is
- // allowed to display frames corrupted by losses. Note that not all
- // combinations of the two parameters are feasible. An error will be
- // returned for invalid combinations.
- // Input:
- // - robustnessMode : selected robustness mode.
- // - errorMode : selected error mode.
- //
- // Return value : VCM_OK, on success;
- // < 0, on error.
- virtual int SetReceiverRobustnessMode(ReceiverRobustness robustnessMode,
- VCMDecodeErrorMode errorMode) = 0;
-
- // Set the decode error mode. The mode decides which errors (if any) are
- // allowed in decodable frames. Note that setting decode_error_mode to
- // anything other than kWithErrors without enabling nack will cause
- // long-term freezes (resulting from frequent key frame requests) if
- // packet loss occurs.
- virtual void SetDecodeErrorMode(VCMDecodeErrorMode decode_error_mode) = 0;
-
- // Sets the maximum number of sequence numbers that we are allowed to NACK
- // and the oldest sequence number that we will consider to NACK. If a
- // sequence number older than |max_packet_age_to_nack| is missing
- // a key frame will be requested. A key frame will also be requested if the
- // time of incomplete or non-continuous frames in the jitter buffer is above
- // |max_incomplete_time_ms|.
- virtual void SetNackSettings(size_t max_nack_list_size,
- int max_packet_age_to_nack,
- int max_incomplete_time_ms) = 0;
-
- // Setting a desired delay to the VCM receiver. Video rendering will be
- // delayed by at least desired_delay_ms.
- virtual int SetMinReceiverDelay(int desired_delay_ms) = 0;
-
- // Lets the sender suspend video when the rate drops below
- // |threshold_bps|, and turns back on when the rate goes back up above
- // |threshold_bps| + |window_bps|.
- virtual void SuspendBelowMinBitrate() = 0;
-
- // Returns true if SuspendBelowMinBitrate is engaged and the video has been
- // suspended due to bandwidth limitations; otherwise false.
- virtual bool VideoSuspended() const = 0;
-
- virtual void RegisterPreDecodeImageCallback(
- EncodedImageCallback* observer) = 0;
- virtual void RegisterPostEncodeImageCallback(
- EncodedImageCallback* post_encode_callback) = 0;
- // Releases pending decode calls, permitting faster thread shutdown.
- virtual void TriggerDecoderShutdown() = 0;
-};
-
-} // namespace webrtc
-
-#endif // WEBRTC_MODULES_INTERFACE_VIDEO_CODING_H_
diff --git a/webrtc/modules/video_coding/main/source/OWNERS b/webrtc/modules/video_coding/main/source/OWNERS
deleted file mode 100644
index 3ee6b4bf5f..0000000000
--- a/webrtc/modules/video_coding/main/source/OWNERS
+++ /dev/null
@@ -1,5 +0,0 @@
-
-# These are for the common case of adding or renaming files. If you're doing
-# structural changes, please get a review from a reviewer in this file.
-per-file *.gyp=*
-per-file *.gypi=*
diff --git a/webrtc/modules/video_coding/main/source/codec_timer.cc b/webrtc/modules/video_coding/main/source/codec_timer.cc
deleted file mode 100644
index a462258813..0000000000
--- a/webrtc/modules/video_coding/main/source/codec_timer.cc
+++ /dev/null
@@ -1,136 +0,0 @@
-/*
- * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
- *
- * Use of this source code is governed by a BSD-style license
- * that can be found in the LICENSE file in the root of the source
- * tree. An additional intellectual property rights grant can be found
- * in the file PATENTS. All contributing project authors may
- * be found in the AUTHORS file in the root of the source tree.
- */
-
-#include "webrtc/modules/video_coding/main/source/codec_timer.h"
-
-#include <assert.h>
-
-namespace webrtc
-{
-
-// The first kIgnoredSampleCount samples will be ignored.
-static const int32_t kIgnoredSampleCount = 5;
-
-VCMCodecTimer::VCMCodecTimer()
-:
-_filteredMax(0),
-_ignoredSampleCount(0),
-_shortMax(0),
-_history()
-{
- Reset();
-}
-
-int32_t VCMCodecTimer::StopTimer(int64_t startTimeMs, int64_t nowMs)
-{
- const int32_t timeDiff = static_cast<int32_t>(nowMs - startTimeMs);
- MaxFilter(timeDiff, nowMs);
- return timeDiff;
-}
-
-void VCMCodecTimer::Reset()
-{
- _filteredMax = 0;
- _ignoredSampleCount = 0;
- _shortMax = 0;
- for (int i=0; i < MAX_HISTORY_SIZE; i++)
- {
- _history[i].shortMax = 0;
- _history[i].timeMs = -1;
- }
-}
-
-// Update the max-value filter
-void VCMCodecTimer::MaxFilter(int32_t decodeTime, int64_t nowMs)
-{
- if (_ignoredSampleCount >= kIgnoredSampleCount)
- {
- UpdateMaxHistory(decodeTime, nowMs);
- ProcessHistory(nowMs);
- }
- else
- {
- _ignoredSampleCount++;
- }
-}
-
-void
-VCMCodecTimer::UpdateMaxHistory(int32_t decodeTime, int64_t now)
-{
- if (_history[0].timeMs >= 0 &&
- now - _history[0].timeMs < SHORT_FILTER_MS)
- {
- if (decodeTime > _shortMax)
- {
- _shortMax = decodeTime;
- }
- }
- else
- {
- // Only add a new value to the history once a second
- if(_history[0].timeMs == -1)
- {
- // First, no shift
- _shortMax = decodeTime;
- }
- else
- {
- // Shift
- for(int i = (MAX_HISTORY_SIZE - 2); i >= 0 ; i--)
- {
- _history[i+1].shortMax = _history[i].shortMax;
- _history[i+1].timeMs = _history[i].timeMs;
- }
- }
- if (_shortMax == 0)
- {
- _shortMax = decodeTime;
- }
-
- _history[0].shortMax = _shortMax;
- _history[0].timeMs = now;
- _shortMax = 0;
- }
-}
-
-void
-VCMCodecTimer::ProcessHistory(int64_t nowMs)
-{
- _filteredMax = _shortMax;
- if (_history[0].timeMs == -1)
- {
- return;
- }
- for (int i=0; i < MAX_HISTORY_SIZE; i++)
- {
- if (_history[i].timeMs == -1)
- {
- break;
- }
- if (nowMs - _history[i].timeMs > MAX_HISTORY_SIZE * SHORT_FILTER_MS)
- {
- // This sample (and all samples after this) is too old
- break;
- }
- if (_history[i].shortMax > _filteredMax)
- {
- // This sample is the largest one this far into the history
- _filteredMax = _history[i].shortMax;
- }
- }
-}
-
-// Get the maximum observed time within a time window
-int32_t VCMCodecTimer::RequiredDecodeTimeMs(FrameType /*frameType*/) const
-{
- return _filteredMax;
-}
-
-}
diff --git a/webrtc/modules/video_coding/main/source/codec_timer.h b/webrtc/modules/video_coding/main/source/codec_timer.h
deleted file mode 100644
index 9268e8d817..0000000000
--- a/webrtc/modules/video_coding/main/source/codec_timer.h
+++ /dev/null
@@ -1,62 +0,0 @@
-/*
- * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
- *
- * Use of this source code is governed by a BSD-style license
- * that can be found in the LICENSE file in the root of the source
- * tree. An additional intellectual property rights grant can be found
- * in the file PATENTS. All contributing project authors may
- * be found in the AUTHORS file in the root of the source tree.
- */
-
-#ifndef WEBRTC_MODULES_VIDEO_CODING_CODEC_TIMER_H_
-#define WEBRTC_MODULES_VIDEO_CODING_CODEC_TIMER_H_
-
-#include "webrtc/modules/interface/module_common_types.h"
-#include "webrtc/typedefs.h"
-
-namespace webrtc
-{
-
-// MAX_HISTORY_SIZE * SHORT_FILTER_MS defines the window size in milliseconds
-#define MAX_HISTORY_SIZE 10
-#define SHORT_FILTER_MS 1000
-
-class VCMShortMaxSample
-{
-public:
- VCMShortMaxSample() : shortMax(0), timeMs(-1) {};
-
- int32_t shortMax;
- int64_t timeMs;
-};
-
-class VCMCodecTimer
-{
-public:
- VCMCodecTimer();
-
- // Updates and returns the max filtered decode time.
- int32_t StopTimer(int64_t startTimeMs, int64_t nowMs);
-
- // Empty the list of timers.
- void Reset();
-
- // Get the required decode time in ms.
- int32_t RequiredDecodeTimeMs(FrameType frameType) const;
-
-private:
- void UpdateMaxHistory(int32_t decodeTime, int64_t now);
- void MaxFilter(int32_t newTime, int64_t nowMs);
- void ProcessHistory(int64_t nowMs);
-
- int32_t _filteredMax;
- // The number of samples ignored so far.
- int32_t _ignoredSampleCount;
- int32_t _shortMax;
- VCMShortMaxSample _history[MAX_HISTORY_SIZE];
-
-};
-
-} // namespace webrtc
-
-#endif // WEBRTC_MODULES_VIDEO_CODING_CODEC_TIMER_H_
diff --git a/webrtc/modules/video_coding/main/source/encoded_frame.h b/webrtc/modules/video_coding/main/source/encoded_frame.h
deleted file mode 100644
index 608578c35d..0000000000
--- a/webrtc/modules/video_coding/main/source/encoded_frame.h
+++ /dev/null
@@ -1,127 +0,0 @@
-/*
- * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
- *
- * Use of this source code is governed by a BSD-style license
- * that can be found in the LICENSE file in the root of the source
- * tree. An additional intellectual property rights grant can be found
- * in the file PATENTS. All contributing project authors may
- * be found in the AUTHORS file in the root of the source tree.
- */
-
-#ifndef WEBRTC_MODULES_VIDEO_CODING_ENCODED_FRAME_H_
-#define WEBRTC_MODULES_VIDEO_CODING_ENCODED_FRAME_H_
-
-#include <vector>
-
-#include "webrtc/common_types.h"
-#include "webrtc/common_video/interface/video_image.h"
-#include "webrtc/modules/interface/module_common_types.h"
-#include "webrtc/modules/video_coding/codecs/interface/video_codec_interface.h"
-#include "webrtc/modules/video_coding/main/interface/video_coding_defines.h"
-
-namespace webrtc
-{
-
-class VCMEncodedFrame : protected EncodedImage
-{
-public:
- VCMEncodedFrame();
- VCMEncodedFrame(const webrtc::EncodedImage& rhs);
- VCMEncodedFrame(const VCMEncodedFrame& rhs);
-
- ~VCMEncodedFrame();
- /**
- * Delete VideoFrame and resets members to zero
- */
- void Free();
- /**
- * Set render time in milliseconds
- */
- void SetRenderTime(const int64_t renderTimeMs) {_renderTimeMs = renderTimeMs;}
-
- /**
- * Set the encoded frame size
- */
- void SetEncodedSize(uint32_t width, uint32_t height)
- { _encodedWidth = width; _encodedHeight = height; }
- /**
- * Get the encoded image
- */
- const webrtc::EncodedImage& EncodedImage() const
- { return static_cast<const webrtc::EncodedImage&>(*this); }
- /**
- * Get pointer to frame buffer
- */
- const uint8_t* Buffer() const {return _buffer;}
- /**
- * Get frame length
- */
- size_t Length() const {return _length;}
- /**
- * Get frame timestamp (90kHz)
- */
- uint32_t TimeStamp() const {return _timeStamp;}
- /**
- * Get render time in milliseconds
- */
- int64_t RenderTimeMs() const {return _renderTimeMs;}
- /**
- * Get frame type
- */
- webrtc::FrameType FrameType() const { return _frameType; }
- /**
- * Get frame rotation
- */
- VideoRotation rotation() const { return _rotation; }
- /**
- * True if this frame is complete, false otherwise
- */
- bool Complete() const { return _completeFrame; }
- /**
- * True if there's a frame missing before this frame
- */
- bool MissingFrame() const { return _missingFrame; }
- /**
- * Payload type of the encoded payload
- */
- uint8_t PayloadType() const { return _payloadType; }
- /**
- * Get codec specific info.
- * The returned pointer is only valid as long as the VCMEncodedFrame
- * is valid. Also, VCMEncodedFrame owns the pointer and will delete
- * the object.
- */
- const CodecSpecificInfo* CodecSpecific() const {return &_codecSpecificInfo;}
-
- const RTPFragmentationHeader* FragmentationHeader() const;
-
-protected:
- /**
- * Verifies that current allocated buffer size is larger than or equal to the input size.
- * If the current buffer size is smaller, a new allocation is made and the old buffer data
- * is copied to the new buffer.
- * Buffer size is updated to minimumSize.
- */
- void VerifyAndAllocate(size_t minimumSize);
-
- void Reset();
-
- void CopyCodecSpecific(const RTPVideoHeader* header);
-
- int64_t _renderTimeMs;
- uint8_t _payloadType;
- bool _missingFrame;
- CodecSpecificInfo _codecSpecificInfo;
- webrtc::VideoCodecType _codec;
- RTPFragmentationHeader _fragmentation;
- VideoRotation _rotation;
-
- // Video rotation is only set along with the last packet for each frame
- // (same as marker bit). This |_rotation_set| is only for debugging purpose
- // to ensure we don't set it twice for a frame.
- bool _rotation_set;
-};
-
-} // namespace webrtc
-
-#endif // WEBRTC_MODULES_VIDEO_CODING_ENCODED_FRAME_H_
diff --git a/webrtc/modules/video_coding/main/source/fec_tables_xor.h b/webrtc/modules/video_coding/main/source/fec_tables_xor.h
deleted file mode 100644
index 28c67b4565..0000000000
--- a/webrtc/modules/video_coding/main/source/fec_tables_xor.h
+++ /dev/null
@@ -1,6481 +0,0 @@
-/*
- * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
- *
- * Use of this source code is governed by a BSD-style license
- * that can be found in the LICENSE file in the root of the source
- * tree. An additional intellectual property rights grant can be found
- * in the file PATENTS. All contributing project authors may
- * be found in the AUTHORS file in the root of the source tree.
- */
-
-#ifndef WEBRTC_MODULES_VIDEO_CODING_SOURCE_FEC_TABLES_XOR_H_
-#define WEBRTC_MODULES_VIDEO_CODING_SOURCE_FEC_TABLES_XOR_H_
-
-// This is a private header for media_opt_util.cc.
-// It should not be included by other files.
-
-namespace webrtc {
-
-// Table for Protection factor (code rate) of delta frames, for the XOR FEC.
-// Input is the packet loss and an effective rate (bits/frame).
-// Output is array kCodeRateXORTable[k], where k = rate_i*129 + loss_j;
-// loss_j = 0,1,..128, and rate_i varies over some range.
-static const int kSizeCodeRateXORTable = 6450;
-static const unsigned char kCodeRateXORTable[kSizeCodeRateXORTable] = {
-0,
-0,
-0,
-0,
-0,
-0,
-0,
-0,
-0,
-0,
-0,
-0,
-0,
-0,
-0,
-0,
-0,
-0,
-0,
-0,
-0,
-0,
-0,
-0,
-0,
-0,
-0,
-0,
-0,
-0,
-0,
-0,
-0,
-11,
-11,
-11,
-11,
-11,
-11,
-11,
-11,
-11,
-11,
-11,
-11,
-11,
-11,
-11,
-39,
-39,
-39,
-39,
-39,
-39,
-39,
-39,
-39,
-39,
-39,
-39,
-39,
-39,
-39,
-39,
-39,
-39,
-39,
-39,
-39,
-39,
-39,
-39,
-39,
-39,
-39,
-39,
-39,
-39,
-39,
-39,
-39,
-39,
-39,
-39,
-39,
-39,
-39,
-39,
-39,
-39,
-39,
-39,
-39,
-39,
-39,
-39,
-51,
-51,
-51,
-51,
-51,
-51,
-51,
-51,
-51,
-51,
-51,
-51,
-51,
-51,
-51,
-51,
-51,
-51,
-51,
-51,
-51,
-51,
-51,
-51,
-51,
-51,
-51,
-51,
-51,
-51,
-51,
-51,
-51,
-0,
-0,
-0,
-0,
-0,
-0,
-0,
-0,
-0,
-0,
-0,
-0,
-0,
-0,
-0,
-0,
-0,
-0,
-8,
-8,
-8,
-8,
-8,
-8,
-8,
-8,
-8,
-8,
-8,
-8,
-8,
-8,
-8,
-30,
-30,
-30,
-30,
-30,
-30,
-30,
-30,
-30,
-30,
-30,
-30,
-30,
-30,
-30,
-56,
-56,
-56,
-56,
-56,
-56,
-56,
-65,
-65,
-65,
-65,
-65,
-65,
-65,
-65,
-65,
-65,
-65,
-65,
-65,
-65,
-65,
-65,
-65,
-65,
-65,
-65,
-65,
-65,
-65,
-65,
-65,
-65,
-65,
-65,
-65,
-65,
-65,
-65,
-65,
-65,
-65,
-65,
-65,
-65,
-65,
-65,
-65,
-87,
-87,
-87,
-87,
-87,
-87,
-87,
-87,
-87,
-87,
-87,
-87,
-87,
-87,
-87,
-87,
-87,
-87,
-87,
-87,
-87,
-87,
-87,
-87,
-78,
-78,
-78,
-78,
-78,
-78,
-78,
-78,
-78,
-0,
-0,
-0,
-0,
-0,
-0,
-0,
-0,
-0,
-0,
-0,
-0,
-0,
-0,
-0,
-6,
-6,
-6,
-23,
-23,
-23,
-23,
-23,
-23,
-23,
-23,
-23,
-23,
-23,
-23,
-23,
-23,
-23,
-44,
-44,
-44,
-44,
-44,
-44,
-50,
-50,
-50,
-50,
-50,
-50,
-50,
-50,
-50,
-68,
-68,
-68,
-68,
-68,
-68,
-68,
-85,
-85,
-85,
-85,
-85,
-85,
-85,
-85,
-85,
-85,
-85,
-85,
-85,
-85,
-85,
-85,
-85,
-85,
-85,
-85,
-85,
-85,
-85,
-85,
-85,
-85,
-85,
-85,
-85,
-85,
-85,
-85,
-85,
-85,
-85,
-85,
-85,
-85,
-85,
-85,
-85,
-105,
-105,
-105,
-105,
-105,
-105,
-105,
-105,
-105,
-105,
-105,
-105,
-105,
-105,
-105,
-105,
-105,
-105,
-105,
-105,
-105,
-105,
-105,
-105,
-88,
-88,
-88,
-88,
-88,
-88,
-88,
-88,
-88,
-0,
-0,
-0,
-0,
-0,
-0,
-0,
-0,
-0,
-5,
-5,
-5,
-5,
-5,
-5,
-19,
-19,
-19,
-36,
-41,
-41,
-41,
-41,
-41,
-41,
-41,
-41,
-41,
-41,
-41,
-41,
-41,
-41,
-55,
-55,
-55,
-55,
-55,
-55,
-69,
-69,
-69,
-69,
-69,
-69,
-69,
-69,
-69,
-75,
-75,
-80,
-80,
-80,
-80,
-80,
-97,
-97,
-97,
-97,
-97,
-97,
-97,
-97,
-97,
-97,
-102,
-102,
-102,
-102,
-102,
-102,
-102,
-102,
-102,
-102,
-102,
-102,
-102,
-102,
-102,
-102,
-102,
-102,
-102,
-102,
-102,
-102,
-102,
-102,
-102,
-102,
-102,
-102,
-102,
-102,
-102,
-116,
-116,
-116,
-116,
-116,
-116,
-116,
-116,
-116,
-116,
-116,
-116,
-116,
-116,
-116,
-116,
-116,
-116,
-116,
-116,
-116,
-116,
-116,
-116,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-0,
-0,
-0,
-0,
-0,
-0,
-0,
-0,
-4,
-16,
-16,
-16,
-16,
-16,
-16,
-30,
-35,
-35,
-47,
-58,
-58,
-58,
-58,
-58,
-58,
-58,
-58,
-58,
-58,
-58,
-58,
-58,
-58,
-63,
-63,
-63,
-63,
-63,
-63,
-77,
-77,
-77,
-77,
-77,
-77,
-77,
-82,
-82,
-82,
-82,
-94,
-94,
-94,
-94,
-94,
-105,
-105,
-105,
-105,
-110,
-110,
-110,
-110,
-110,
-110,
-122,
-122,
-122,
-122,
-122,
-122,
-122,
-122,
-122,
-122,
-122,
-122,
-122,
-122,
-122,
-122,
-122,
-122,
-122,
-122,
-122,
-122,
-122,
-122,
-122,
-122,
-122,
-122,
-122,
-122,
-122,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-115,
-115,
-115,
-115,
-115,
-115,
-115,
-115,
-115,
-0,
-0,
-0,
-0,
-0,
-0,
-0,
-4,
-14,
-27,
-27,
-27,
-27,
-27,
-31,
-41,
-52,
-52,
-56,
-69,
-69,
-69,
-69,
-69,
-69,
-69,
-69,
-69,
-69,
-69,
-69,
-69,
-69,
-69,
-69,
-69,
-69,
-69,
-69,
-79,
-79,
-79,
-79,
-83,
-83,
-83,
-94,
-94,
-94,
-94,
-106,
-106,
-106,
-106,
-106,
-115,
-115,
-115,
-115,
-125,
-125,
-125,
-125,
-125,
-125,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-0,
-0,
-0,
-0,
-3,
-3,
-3,
-17,
-28,
-38,
-38,
-38,
-38,
-38,
-47,
-51,
-63,
-63,
-63,
-72,
-72,
-72,
-72,
-72,
-72,
-72,
-76,
-76,
-76,
-76,
-80,
-80,
-80,
-80,
-80,
-80,
-80,
-80,
-80,
-84,
-84,
-84,
-84,
-93,
-93,
-93,
-105,
-105,
-105,
-105,
-114,
-114,
-114,
-114,
-114,
-124,
-124,
-124,
-124,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-0,
-0,
-0,
-0,
-12,
-12,
-12,
-35,
-43,
-47,
-47,
-47,
-47,
-47,
-58,
-58,
-66,
-66,
-66,
-70,
-70,
-70,
-70,
-70,
-73,
-73,
-82,
-82,
-82,
-86,
-94,
-94,
-94,
-94,
-94,
-94,
-94,
-94,
-94,
-94,
-94,
-94,
-94,
-105,
-105,
-105,
-114,
-114,
-114,
-114,
-117,
-117,
-117,
-117,
-117,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-0,
-0,
-0,
-0,
-24,
-24,
-24,
-49,
-53,
-53,
-53,
-53,
-53,
-53,
-61,
-61,
-64,
-64,
-64,
-64,
-70,
-70,
-70,
-70,
-78,
-78,
-88,
-88,
-88,
-96,
-106,
-106,
-106,
-106,
-106,
-106,
-106,
-106,
-106,
-106,
-112,
-112,
-112,
-120,
-120,
-120,
-124,
-124,
-124,
-124,
-124,
-124,
-124,
-124,
-124,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-0,
-0,
-0,
-5,
-36,
-36,
-36,
-55,
-55,
-55,
-55,
-55,
-55,
-55,
-58,
-58,
-58,
-58,
-58,
-64,
-78,
-78,
-78,
-78,
-87,
-87,
-94,
-94,
-94,
-103,
-110,
-110,
-110,
-110,
-110,
-110,
-110,
-110,
-116,
-116,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-0,
-0,
-0,
-18,
-43,
-43,
-43,
-53,
-53,
-53,
-53,
-53,
-53,
-53,
-53,
-58,
-58,
-58,
-58,
-71,
-87,
-87,
-87,
-87,
-94,
-94,
-97,
-97,
-97,
-109,
-111,
-111,
-111,
-111,
-111,
-111,
-111,
-111,
-125,
-125,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-0,
-0,
-0,
-31,
-46,
-46,
-46,
-48,
-48,
-48,
-48,
-48,
-48,
-48,
-48,
-66,
-66,
-66,
-66,
-80,
-93,
-93,
-93,
-93,
-95,
-95,
-95,
-95,
-100,
-115,
-115,
-115,
-115,
-115,
-115,
-115,
-115,
-115,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-0,
-0,
-4,
-40,
-45,
-45,
-45,
-45,
-45,
-45,
-45,
-45,
-49,
-49,
-49,
-74,
-74,
-74,
-74,
-86,
-90,
-90,
-90,
-90,
-95,
-95,
-95,
-95,
-106,
-120,
-120,
-120,
-120,
-120,
-120,
-120,
-120,
-120,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-0,
-0,
-14,
-42,
-42,
-42,
-42,
-42,
-42,
-42,
-42,
-46,
-56,
-56,
-56,
-80,
-80,
-80,
-80,
-84,
-84,
-84,
-84,
-88,
-99,
-99,
-99,
-99,
-111,
-122,
-122,
-122,
-122,
-122,
-122,
-122,
-122,
-122,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-0,
-0,
-26,
-40,
-40,
-40,
-40,
-40,
-40,
-40,
-40,
-54,
-66,
-66,
-66,
-80,
-80,
-80,
-80,
-80,
-80,
-80,
-84,
-94,
-106,
-106,
-106,
-106,
-116,
-120,
-120,
-120,
-120,
-120,
-120,
-120,
-120,
-124,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-0,
-3,
-34,
-38,
-38,
-38,
-38,
-38,
-42,
-42,
-42,
-63,
-72,
-72,
-76,
-80,
-80,
-80,
-80,
-80,
-80,
-80,
-89,
-101,
-114,
-114,
-114,
-114,
-118,
-118,
-118,
-118,
-118,
-118,
-118,
-118,
-118,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-0,
-12,
-36,
-36,
-36,
-36,
-36,
-36,
-49,
-49,
-49,
-69,
-73,
-76,
-86,
-86,
-86,
-86,
-86,
-86,
-86,
-86,
-97,
-109,
-122,
-122,
-122,
-122,
-122,
-122,
-122,
-122,
-122,
-122,
-122,
-122,
-122,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-0,
-22,
-34,
-34,
-34,
-34,
-38,
-38,
-57,
-57,
-57,
-69,
-73,
-82,
-92,
-92,
-92,
-92,
-92,
-92,
-96,
-96,
-104,
-117,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-0,
-29,
-33,
-33,
-33,
-33,
-44,
-44,
-62,
-62,
-62,
-69,
-77,
-87,
-95,
-95,
-95,
-95,
-95,
-95,
-107,
-107,
-110,
-120,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-0,
-31,
-31,
-31,
-31,
-31,
-51,
-51,
-62,
-65,
-65,
-73,
-83,
-91,
-94,
-94,
-94,
-94,
-97,
-97,
-114,
-114,
-114,
-122,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-0,
-29,
-29,
-29,
-29,
-29,
-56,
-56,
-59,
-70,
-70,
-79,
-86,
-89,
-89,
-89,
-89,
-89,
-100,
-100,
-116,
-116,
-116,
-122,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-0,
-28,
-28,
-28,
-28,
-28,
-57,
-57,
-57,
-76,
-76,
-83,
-86,
-86,
-86,
-86,
-86,
-89,
-104,
-104,
-114,
-114,
-114,
-124,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-0,
-27,
-27,
-27,
-27,
-30,
-55,
-55,
-55,
-80,
-80,
-83,
-86,
-86,
-86,
-86,
-86,
-93,
-108,
-108,
-111,
-111,
-111,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-0,
-26,
-26,
-26,
-26,
-36,
-53,
-53,
-53,
-80,
-80,
-80,
-90,
-90,
-90,
-90,
-90,
-98,
-107,
-107,
-107,
-107,
-107,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-0,
-26,
-26,
-26,
-28,
-42,
-52,
-54,
-54,
-78,
-78,
-78,
-95,
-95,
-95,
-97,
-97,
-104,
-106,
-106,
-106,
-106,
-106,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-0,
-24,
-24,
-24,
-33,
-47,
-49,
-58,
-58,
-74,
-74,
-74,
-97,
-97,
-97,
-106,
-106,
-108,
-108,
-108,
-108,
-108,
-108,
-124,
-124,
-124,
-124,
-124,
-124,
-124,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-0,
-24,
-24,
-24,
-39,
-48,
-50,
-63,
-63,
-72,
-74,
-74,
-96,
-96,
-96,
-109,
-111,
-111,
-111,
-111,
-111,
-111,
-111,
-119,
-119,
-122,
-122,
-122,
-122,
-122,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-0,
-23,
-23,
-23,
-43,
-46,
-54,
-66,
-66,
-69,
-77,
-77,
-92,
-92,
-92,
-105,
-113,
-113,
-113,
-113,
-113,
-113,
-113,
-115,
-117,
-123,
-123,
-123,
-123,
-123,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-0,
-22,
-22,
-22,
-44,
-44,
-59,
-67,
-67,
-67,
-81,
-81,
-89,
-89,
-89,
-97,
-112,
-112,
-112,
-112,
-112,
-112,
-112,
-112,
-119,
-126,
-126,
-126,
-126,
-126,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-0,
-21,
-21,
-24,
-43,
-45,
-63,
-65,
-65,
-67,
-85,
-85,
-87,
-87,
-87,
-91,
-109,
-109,
-109,
-111,
-111,
-111,
-111,
-111,
-123,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-0,
-21,
-21,
-28,
-42,
-50,
-63,
-63,
-66,
-71,
-85,
-85,
-85,
-85,
-87,
-92,
-106,
-106,
-108,
-114,
-114,
-114,
-114,
-114,
-125,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-0,
-20,
-20,
-34,
-41,
-54,
-62,
-62,
-69,
-75,
-82,
-82,
-82,
-82,
-92,
-98,
-105,
-105,
-110,
-117,
-117,
-117,
-117,
-117,
-124,
-124,
-126,
-126,
-126,
-126,
-126,
-126,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-0,
-20,
-20,
-38,
-40,
-58,
-60,
-60,
-73,
-78,
-80,
-80,
-80,
-80,
-100,
-105,
-107,
-107,
-113,
-118,
-118,
-118,
-118,
-118,
-120,
-120,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-0,
-19,
-21,
-38,
-40,
-58,
-58,
-60,
-75,
-77,
-77,
-77,
-81,
-81,
-107,
-109,
-109,
-109,
-114,
-116,
-116,
-116,
-116,
-116,
-116,
-116,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-0,
-18,
-25,
-37,
-44,
-56,
-56,
-63,
-75,
-75,
-75,
-75,
-88,
-88,
-111,
-111,
-111,
-111,
-112,
-112,
-112,
-112,
-112,
-112,
-112,
-114,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-0,
-18,
-30,
-36,
-48,
-55,
-55,
-67,
-73,
-73,
-73,
-73,
-97,
-97,
-110,
-110,
-110,
-110,
-110,
-110,
-110,
-110,
-110,
-110,
-110,
-116,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-0,
-18,
-34,
-36,
-52,
-55,
-55,
-70,
-72,
-73,
-73,
-73,
-102,
-104,
-108,
-108,
-108,
-108,
-109,
-109,
-109,
-109,
-109,
-109,
-109,
-119,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-0,
-17,
-35,
-35,
-52,
-59,
-59,
-70,
-70,
-76,
-76,
-76,
-99,
-105,
-105,
-105,
-105,
-105,
-111,
-111,
-111,
-111,
-111,
-111,
-111,
-121,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-0,
-17,
-34,
-36,
-51,
-61,
-62,
-70,
-70,
-80,
-80,
-80,
-93,
-103,
-103,
-103,
-103,
-103,
-112,
-112,
-112,
-112,
-112,
-116,
-118,
-124,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-0,
-16,
-33,
-39,
-50,
-59,
-65,
-72,
-72,
-82,
-82,
-82,
-91,
-100,
-100,
-100,
-100,
-100,
-109,
-109,
-109,
-109,
-109,
-121,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-0,
-16,
-32,
-43,
-48,
-54,
-66,
-75,
-75,
-81,
-83,
-83,
-92,
-97,
-97,
-97,
-99,
-99,
-105,
-105,
-105,
-105,
-105,
-123,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-0,
-15,
-31,
-46,
-47,
-49,
-69,
-77,
-77,
-81,
-85,
-85,
-93,
-95,
-95,
-95,
-100,
-100,
-102,
-102,
-102,
-102,
-102,
-120,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-0,
-15,
-30,
-46,
-48,
-48,
-70,
-75,
-79,
-82,
-87,
-87,
-92,
-94,
-94,
-94,
-103,
-103,
-103,
-103,
-103,
-104,
-104,
-115,
-120,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-0,
-15,
-30,
-45,
-50,
-50,
-68,
-70,
-80,
-85,
-89,
-89,
-90,
-95,
-95,
-95,
-104,
-104,
-104,
-104,
-104,
-109,
-109,
-112,
-114,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-0,
-14,
-29,
-44,
-54,
-54,
-64,
-64,
-83,
-87,
-88,
-88,
-88,
-98,
-98,
-98,
-103,
-103,
-103,
-103,
-103,
-113,
-113,
-113,
-113,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-0,
-14,
-29,
-43,
-56,
-56,
-61,
-61,
-84,
-85,
-88,
-88,
-88,
-100,
-100,
-100,
-102,
-102,
-102,
-102,
-102,
-113,
-116,
-116,
-116,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-0,
-14,
-28,
-42,
-57,
-57,
-62,
-62,
-80,
-80,
-91,
-91,
-91,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-109,
-119,
-119,
-119,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-0,
-14,
-28,
-42,
-56,
-56,
-65,
-66,
-76,
-76,
-92,
-92,
-92,
-97,
-97,
-97,
-101,
-101,
-101,
-101,
-101,
-106,
-121,
-121,
-121,
-126,
-126,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-0,
-13,
-27,
-41,
-55,
-55,
-67,
-72,
-74,
-74,
-90,
-90,
-90,
-91,
-91,
-91,
-105,
-105,
-105,
-105,
-105,
-107,
-122,
-122,
-122,
-123,
-123,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-0,
-13,
-27,
-40,
-54,
-54,
-67,
-76,
-76,
-76,
-85,
-85,
-85,
-85,
-85,
-85,
-112,
-112,
-112,
-112,
-112,
-112,
-121,
-121,
-121,
-121,
-121,
-126,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-127,
-
-
-};
-
-} // namespace webrtc
-
-#endif // WEBRTC_MODULES_VIDEO_CODING_SOURCE_FEC_TABLES_XOR_H_
diff --git a/webrtc/modules/video_coding/main/source/frame_buffer.cc b/webrtc/modules/video_coding/main/source/frame_buffer.cc
deleted file mode 100644
index 5b6680ec61..0000000000
--- a/webrtc/modules/video_coding/main/source/frame_buffer.cc
+++ /dev/null
@@ -1,297 +0,0 @@
-/*
- * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
- *
- * Use of this source code is governed by a BSD-style license
- * that can be found in the LICENSE file in the root of the source
- * tree. An additional intellectual property rights grant can be found
- * in the file PATENTS. All contributing project authors may
- * be found in the AUTHORS file in the root of the source tree.
- */
-
-#include "webrtc/modules/video_coding/main/source/frame_buffer.h"
-
-#include <assert.h>
-#include <string.h>
-
-#include "webrtc/base/checks.h"
-#include "webrtc/modules/video_coding/main/source/packet.h"
-#include "webrtc/system_wrappers/include/logging.h"
-
-namespace webrtc {
-
-VCMFrameBuffer::VCMFrameBuffer()
- :
- _state(kStateEmpty),
- _nackCount(0),
- _latestPacketTimeMs(-1) {
-}
-
-VCMFrameBuffer::~VCMFrameBuffer() {
-}
-
-VCMFrameBuffer::VCMFrameBuffer(const VCMFrameBuffer& rhs)
-:
-VCMEncodedFrame(rhs),
-_state(rhs._state),
-_sessionInfo(),
-_nackCount(rhs._nackCount),
-_latestPacketTimeMs(rhs._latestPacketTimeMs) {
- _sessionInfo = rhs._sessionInfo;
- _sessionInfo.UpdateDataPointers(rhs._buffer, _buffer);
-}
-
-webrtc::FrameType
-VCMFrameBuffer::FrameType() const {
- return _sessionInfo.FrameType();
-}
-
-int32_t
-VCMFrameBuffer::GetLowSeqNum() const {
- return _sessionInfo.LowSequenceNumber();
-}
-
-int32_t
-VCMFrameBuffer::GetHighSeqNum() const {
- return _sessionInfo.HighSequenceNumber();
-}
-
-int VCMFrameBuffer::PictureId() const {
- return _sessionInfo.PictureId();
-}
-
-int VCMFrameBuffer::TemporalId() const {
- return _sessionInfo.TemporalId();
-}
-
-bool VCMFrameBuffer::LayerSync() const {
- return _sessionInfo.LayerSync();
-}
-
-int VCMFrameBuffer::Tl0PicId() const {
- return _sessionInfo.Tl0PicId();
-}
-
-bool VCMFrameBuffer::NonReference() const {
- return _sessionInfo.NonReference();
-}
-
-void VCMFrameBuffer::SetGofInfo(const GofInfoVP9& gof_info, size_t idx) {
- _sessionInfo.SetGofInfo(gof_info, idx);
- // TODO(asapersson): Consider adding hdr->VP9.ref_picture_id for testing.
- _codecSpecificInfo.codecSpecific.VP9.temporal_idx =
- gof_info.temporal_idx[idx];
- _codecSpecificInfo.codecSpecific.VP9.temporal_up_switch =
- gof_info.temporal_up_switch[idx];
-}
-
-bool
-VCMFrameBuffer::IsSessionComplete() const {
- return _sessionInfo.complete();
-}
-
-// Insert packet
-VCMFrameBufferEnum
-VCMFrameBuffer::InsertPacket(const VCMPacket& packet,
- int64_t timeInMs,
- VCMDecodeErrorMode decode_error_mode,
- const FrameData& frame_data) {
- assert(!(NULL == packet.dataPtr && packet.sizeBytes > 0));
- if (packet.dataPtr != NULL) {
- _payloadType = packet.payloadType;
- }
-
- if (kStateEmpty == _state) {
- // First packet (empty and/or media) inserted into this frame.
- // store some info and set some initial values.
- _timeStamp = packet.timestamp;
- // We only take the ntp timestamp of the first packet of a frame.
- ntp_time_ms_ = packet.ntp_time_ms_;
- _codec = packet.codec;
- if (packet.frameType != kEmptyFrame) {
- // first media packet
- SetState(kStateIncomplete);
- }
- }
-
- uint32_t requiredSizeBytes = Length() + packet.sizeBytes +
- (packet.insertStartCode ? kH264StartCodeLengthBytes : 0);
- if (requiredSizeBytes >= _size) {
- const uint8_t* prevBuffer = _buffer;
- const uint32_t increments = requiredSizeBytes /
- kBufferIncStepSizeBytes +
- (requiredSizeBytes %
- kBufferIncStepSizeBytes > 0);
- const uint32_t newSize = _size +
- increments * kBufferIncStepSizeBytes;
- if (newSize > kMaxJBFrameSizeBytes) {
- LOG(LS_ERROR) << "Failed to insert packet due to frame being too "
- "big.";
- return kSizeError;
- }
- VerifyAndAllocate(newSize);
- _sessionInfo.UpdateDataPointers(prevBuffer, _buffer);
- }
-
- if (packet.width > 0 && packet.height > 0) {
- _encodedWidth = packet.width;
- _encodedHeight = packet.height;
- }
-
- // Don't copy payload specific data for empty packets (e.g padding packets).
- if (packet.sizeBytes > 0)
- CopyCodecSpecific(&packet.codecSpecificHeader);
-
- int retVal = _sessionInfo.InsertPacket(packet, _buffer,
- decode_error_mode,
- frame_data);
- if (retVal == -1) {
- return kSizeError;
- } else if (retVal == -2) {
- return kDuplicatePacket;
- } else if (retVal == -3) {
- return kOutOfBoundsPacket;
- }
- // update length
- _length = Length() + static_cast<uint32_t>(retVal);
-
- _latestPacketTimeMs = timeInMs;
-
- // http://www.etsi.org/deliver/etsi_ts/126100_126199/126114/12.07.00_60/
- // ts_126114v120700p.pdf Section 7.4.5.
- // The MTSI client shall add the payload bytes as defined in this clause
- // onto the last RTP packet in each group of packets which make up a key
- // frame (I-frame or IDR frame in H.264 (AVC), or an IRAP picture in H.265
- // (HEVC)).
- if (packet.markerBit) {
- RTC_DCHECK(!_rotation_set);
- _rotation = packet.codecSpecificHeader.rotation;
- _rotation_set = true;
- }
-
- if (_sessionInfo.complete()) {
- SetState(kStateComplete);
- return kCompleteSession;
- } else if (_sessionInfo.decodable()) {
- SetState(kStateDecodable);
- return kDecodableSession;
- }
- return kIncomplete;
-}
-
-int64_t
-VCMFrameBuffer::LatestPacketTimeMs() const {
- return _latestPacketTimeMs;
-}
-
-void
-VCMFrameBuffer::IncrementNackCount() {
- _nackCount++;
-}
-
-int16_t
-VCMFrameBuffer::GetNackCount() const {
- return _nackCount;
-}
-
-bool
-VCMFrameBuffer::HaveFirstPacket() const {
- return _sessionInfo.HaveFirstPacket();
-}
-
-bool
-VCMFrameBuffer::HaveLastPacket() const {
- return _sessionInfo.HaveLastPacket();
-}
-
-int
-VCMFrameBuffer::NumPackets() const {
- return _sessionInfo.NumPackets();
-}
-
-void
-VCMFrameBuffer::Reset() {
- _length = 0;
- _timeStamp = 0;
- _sessionInfo.Reset();
- _payloadType = 0;
- _nackCount = 0;
- _latestPacketTimeMs = -1;
- _state = kStateEmpty;
- VCMEncodedFrame::Reset();
-}
-
-// Set state of frame
-void
-VCMFrameBuffer::SetState(VCMFrameBufferStateEnum state) {
- if (_state == state) {
- return;
- }
- switch (state) {
- case kStateIncomplete:
- // we can go to this state from state kStateEmpty
- assert(_state == kStateEmpty);
-
- // Do nothing, we received a packet
- break;
-
- case kStateComplete:
- assert(_state == kStateEmpty ||
- _state == kStateIncomplete ||
- _state == kStateDecodable);
-
- break;
-
- case kStateEmpty:
- // Should only be set to empty through Reset().
- assert(false);
- break;
-
- case kStateDecodable:
- assert(_state == kStateEmpty ||
- _state == kStateIncomplete);
- break;
- }
- _state = state;
-}
-
-// Get current state of frame
-VCMFrameBufferStateEnum
-VCMFrameBuffer::GetState() const {
- return _state;
-}
-
-// Get current state of frame
-VCMFrameBufferStateEnum
-VCMFrameBuffer::GetState(uint32_t& timeStamp) const {
- timeStamp = TimeStamp();
- return GetState();
-}
-
-bool
-VCMFrameBuffer::IsRetransmitted() const {
- return _sessionInfo.session_nack();
-}
-
-void
-VCMFrameBuffer::PrepareForDecode(bool continuous) {
-#ifdef INDEPENDENT_PARTITIONS
- if (_codec == kVideoCodecVP8) {
- _length =
- _sessionInfo.BuildVP8FragmentationHeader(_buffer, _length,
- &_fragmentation);
- } else {
- size_t bytes_removed = _sessionInfo.MakeDecodable();
- _length -= bytes_removed;
- }
-#else
- size_t bytes_removed = _sessionInfo.MakeDecodable();
- _length -= bytes_removed;
-#endif
- // Transfer frame information to EncodedFrame and create any codec
- // specific information.
- _frameType = _sessionInfo.FrameType();
- _completeFrame = _sessionInfo.complete();
- _missingFrame = !continuous;
-}
-
-} // namespace webrtc
diff --git a/webrtc/modules/video_coding/main/source/generic_decoder.cc b/webrtc/modules/video_coding/main/source/generic_decoder.cc
deleted file mode 100644
index 8b2d3974de..0000000000
--- a/webrtc/modules/video_coding/main/source/generic_decoder.cc
+++ /dev/null
@@ -1,198 +0,0 @@
-/*
- * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
- *
- * Use of this source code is governed by a BSD-style license
- * that can be found in the LICENSE file in the root of the source
- * tree. An additional intellectual property rights grant can be found
- * in the file PATENTS. All contributing project authors may
- * be found in the AUTHORS file in the root of the source tree.
- */
-
-#include "webrtc/modules/video_coding/main/interface/video_coding.h"
-#include "webrtc/modules/video_coding/main/source/generic_decoder.h"
-#include "webrtc/modules/video_coding/main/source/internal_defines.h"
-#include "webrtc/system_wrappers/include/clock.h"
-#include "webrtc/system_wrappers/include/logging.h"
-
-namespace webrtc {
-
-VCMDecodedFrameCallback::VCMDecodedFrameCallback(VCMTiming& timing,
- Clock* clock)
-:
-_critSect(CriticalSectionWrapper::CreateCriticalSection()),
-_clock(clock),
-_receiveCallback(NULL),
-_timing(timing),
-_timestampMap(kDecoderFrameMemoryLength),
-_lastReceivedPictureID(0)
-{
-}
-
-VCMDecodedFrameCallback::~VCMDecodedFrameCallback()
-{
- delete _critSect;
-}
-
-void VCMDecodedFrameCallback::SetUserReceiveCallback(
- VCMReceiveCallback* receiveCallback)
-{
- CriticalSectionScoped cs(_critSect);
- _receiveCallback = receiveCallback;
-}
-
-VCMReceiveCallback* VCMDecodedFrameCallback::UserReceiveCallback()
-{
- CriticalSectionScoped cs(_critSect);
- return _receiveCallback;
-}
-
-int32_t VCMDecodedFrameCallback::Decoded(VideoFrame& decodedImage) {
- // TODO(holmer): We should improve this so that we can handle multiple
- // callbacks from one call to Decode().
- VCMFrameInformation* frameInfo;
- VCMReceiveCallback* callback;
- {
- CriticalSectionScoped cs(_critSect);
- frameInfo = _timestampMap.Pop(decodedImage.timestamp());
- callback = _receiveCallback;
- }
-
- if (frameInfo == NULL) {
- LOG(LS_WARNING) << "Too many frames backed up in the decoder, dropping "
- "this one.";
- return WEBRTC_VIDEO_CODEC_OK;
- }
-
- _timing.StopDecodeTimer(
- decodedImage.timestamp(),
- frameInfo->decodeStartTimeMs,
- _clock->TimeInMilliseconds(),
- frameInfo->renderTimeMs);
-
- if (callback != NULL)
- {
- decodedImage.set_render_time_ms(frameInfo->renderTimeMs);
- decodedImage.set_rotation(frameInfo->rotation);
- callback->FrameToRender(decodedImage);
- }
- return WEBRTC_VIDEO_CODEC_OK;
-}
-
-int32_t
-VCMDecodedFrameCallback::ReceivedDecodedReferenceFrame(
- const uint64_t pictureId)
-{
- CriticalSectionScoped cs(_critSect);
- if (_receiveCallback != NULL)
- {
- return _receiveCallback->ReceivedDecodedReferenceFrame(pictureId);
- }
- return -1;
-}
-
-int32_t
-VCMDecodedFrameCallback::ReceivedDecodedFrame(const uint64_t pictureId)
-{
- _lastReceivedPictureID = pictureId;
- return 0;
-}
-
-uint64_t VCMDecodedFrameCallback::LastReceivedPictureID() const
-{
- return _lastReceivedPictureID;
-}
-
-void VCMDecodedFrameCallback::Map(uint32_t timestamp,
- VCMFrameInformation* frameInfo) {
- CriticalSectionScoped cs(_critSect);
- _timestampMap.Add(timestamp, frameInfo);
-}
-
-int32_t VCMDecodedFrameCallback::Pop(uint32_t timestamp)
-{
- CriticalSectionScoped cs(_critSect);
- if (_timestampMap.Pop(timestamp) == NULL)
- {
- return VCM_GENERAL_ERROR;
- }
- return VCM_OK;
-}
-
-VCMGenericDecoder::VCMGenericDecoder(VideoDecoder& decoder, bool isExternal)
-:
-_callback(NULL),
-_frameInfos(),
-_nextFrameInfoIdx(0),
-_decoder(decoder),
-_codecType(kVideoCodecUnknown),
-_isExternal(isExternal),
-_keyFrameDecoded(false)
-{
-}
-
-VCMGenericDecoder::~VCMGenericDecoder()
-{
-}
-
-int32_t VCMGenericDecoder::InitDecode(const VideoCodec* settings,
- int32_t numberOfCores)
-{
- _codecType = settings->codecType;
-
- return _decoder.InitDecode(settings, numberOfCores);
-}
-
-int32_t VCMGenericDecoder::Decode(const VCMEncodedFrame& frame,
- int64_t nowMs)
-{
- _frameInfos[_nextFrameInfoIdx].decodeStartTimeMs = nowMs;
- _frameInfos[_nextFrameInfoIdx].renderTimeMs = frame.RenderTimeMs();
- _frameInfos[_nextFrameInfoIdx].rotation = frame.rotation();
- _callback->Map(frame.TimeStamp(), &_frameInfos[_nextFrameInfoIdx]);
-
- _nextFrameInfoIdx = (_nextFrameInfoIdx + 1) % kDecoderFrameMemoryLength;
- int32_t ret = _decoder.Decode(frame.EncodedImage(),
- frame.MissingFrame(),
- frame.FragmentationHeader(),
- frame.CodecSpecific(),
- frame.RenderTimeMs());
-
- if (ret < WEBRTC_VIDEO_CODEC_OK)
- {
- LOG(LS_WARNING) << "Failed to decode frame with timestamp "
- << frame.TimeStamp() << ", error code: " << ret;
- _callback->Pop(frame.TimeStamp());
- return ret;
- }
- else if (ret == WEBRTC_VIDEO_CODEC_NO_OUTPUT ||
- ret == WEBRTC_VIDEO_CODEC_REQUEST_SLI)
- {
- // No output
- _callback->Pop(frame.TimeStamp());
- }
- return ret;
-}
-
-int32_t
-VCMGenericDecoder::Release()
-{
- return _decoder.Release();
-}
-
-int32_t VCMGenericDecoder::Reset()
-{
- return _decoder.Reset();
-}
-
-int32_t VCMGenericDecoder::RegisterDecodeCompleteCallback(VCMDecodedFrameCallback* callback)
-{
- _callback = callback;
- return _decoder.RegisterDecodeCompleteCallback(callback);
-}
-
-bool VCMGenericDecoder::External() const
-{
- return _isExternal;
-}
-
-} // namespace
diff --git a/webrtc/modules/video_coding/main/source/generic_decoder.h b/webrtc/modules/video_coding/main/source/generic_decoder.h
deleted file mode 100644
index 09929e64f4..0000000000
--- a/webrtc/modules/video_coding/main/source/generic_decoder.h
+++ /dev/null
@@ -1,112 +0,0 @@
-/*
- * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
- *
- * Use of this source code is governed by a BSD-style license
- * that can be found in the LICENSE file in the root of the source
- * tree. An additional intellectual property rights grant can be found
- * in the file PATENTS. All contributing project authors may
- * be found in the AUTHORS file in the root of the source tree.
- */
-
-#ifndef WEBRTC_MODULES_VIDEO_CODING_GENERIC_DECODER_H_
-#define WEBRTC_MODULES_VIDEO_CODING_GENERIC_DECODER_H_
-
-#include "webrtc/modules/interface/module_common_types.h"
-#include "webrtc/modules/video_coding/codecs/interface/video_codec_interface.h"
-#include "webrtc/modules/video_coding/main/source/encoded_frame.h"
-#include "webrtc/modules/video_coding/main/source/timestamp_map.h"
-#include "webrtc/modules/video_coding/main/source/timing.h"
-
-namespace webrtc
-{
-
-class VCMReceiveCallback;
-
-enum { kDecoderFrameMemoryLength = 10 };
-
-struct VCMFrameInformation
-{
- int64_t renderTimeMs;
- int64_t decodeStartTimeMs;
- void* userData;
- VideoRotation rotation;
-};
-
-class VCMDecodedFrameCallback : public DecodedImageCallback
-{
-public:
- VCMDecodedFrameCallback(VCMTiming& timing, Clock* clock);
- virtual ~VCMDecodedFrameCallback();
- void SetUserReceiveCallback(VCMReceiveCallback* receiveCallback);
- VCMReceiveCallback* UserReceiveCallback();
-
- virtual int32_t Decoded(VideoFrame& decodedImage);
- virtual int32_t ReceivedDecodedReferenceFrame(const uint64_t pictureId);
- virtual int32_t ReceivedDecodedFrame(const uint64_t pictureId);
-
- uint64_t LastReceivedPictureID() const;
-
- void Map(uint32_t timestamp, VCMFrameInformation* frameInfo);
- int32_t Pop(uint32_t timestamp);
-
-private:
- // Protect |_receiveCallback| and |_timestampMap|.
- CriticalSectionWrapper* _critSect;
- Clock* _clock;
- VCMReceiveCallback* _receiveCallback; // Guarded by |_critSect|.
- VCMTiming& _timing;
- VCMTimestampMap _timestampMap; // Guarded by |_critSect|.
- uint64_t _lastReceivedPictureID;
-};
-
-
-class VCMGenericDecoder
-{
- friend class VCMCodecDataBase;
-public:
- VCMGenericDecoder(VideoDecoder& decoder, bool isExternal = false);
- ~VCMGenericDecoder();
-
- /**
- * Initialize the decoder with the information from the VideoCodec
- */
- int32_t InitDecode(const VideoCodec* settings,
- int32_t numberOfCores);
-
- /**
- * Decode to a raw I420 frame,
- *
- * inputVideoBuffer reference to encoded video frame
- */
- int32_t Decode(const VCMEncodedFrame& inputFrame, int64_t nowMs);
-
- /**
- * Free the decoder memory
- */
- int32_t Release();
-
- /**
- * Reset the decoder state, prepare for a new call
- */
- int32_t Reset();
-
- /**
- * Set decode callback. Deregistering while decoding is illegal.
- */
- int32_t RegisterDecodeCompleteCallback(VCMDecodedFrameCallback* callback);
-
- bool External() const;
-
-private:
- VCMDecodedFrameCallback* _callback;
- VCMFrameInformation _frameInfos[kDecoderFrameMemoryLength];
- uint32_t _nextFrameInfoIdx;
- VideoDecoder& _decoder;
- VideoCodecType _codecType;
- bool _isExternal;
- bool _keyFrameDecoded;
-};
-
-} // namespace webrtc
-
-#endif // WEBRTC_MODULES_VIDEO_CODING_GENERIC_DECODER_H_
diff --git a/webrtc/modules/video_coding/main/source/generic_encoder.h b/webrtc/modules/video_coding/main/source/generic_encoder.h
deleted file mode 100644
index 3a7132860f..0000000000
--- a/webrtc/modules/video_coding/main/source/generic_encoder.h
+++ /dev/null
@@ -1,142 +0,0 @@
-/*
- * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
- *
- * Use of this source code is governed by a BSD-style license
- * that can be found in the LICENSE file in the root of the source
- * tree. An additional intellectual property rights grant can be found
- * in the file PATENTS. All contributing project authors may
- * be found in the AUTHORS file in the root of the source tree.
- */
-
-#ifndef WEBRTC_MODULES_VIDEO_CODING_GENERIC_ENCODER_H_
-#define WEBRTC_MODULES_VIDEO_CODING_GENERIC_ENCODER_H_
-
-#include "webrtc/modules/video_coding/codecs/interface/video_codec_interface.h"
-#include "webrtc/modules/video_coding/main/interface/video_coding_defines.h"
-
-#include <stdio.h>
-
-#include "webrtc/base/criticalsection.h"
-#include "webrtc/base/scoped_ptr.h"
-
-namespace webrtc {
-class CriticalSectionWrapper;
-
-namespace media_optimization {
-class MediaOptimization;
-} // namespace media_optimization
-
-struct EncoderParameters {
- uint32_t target_bitrate;
- uint8_t loss_rate;
- int64_t rtt;
- uint32_t input_frame_rate;
-};
-
-/*************************************/
-/* VCMEncodeFrameCallback class */
-/***********************************/
-class VCMEncodedFrameCallback : public EncodedImageCallback
-{
-public:
- VCMEncodedFrameCallback(EncodedImageCallback* post_encode_callback);
- virtual ~VCMEncodedFrameCallback();
-
- /*
- * Callback implementation - codec encode complete
- */
- int32_t Encoded(
- const EncodedImage& encodedImage,
- const CodecSpecificInfo* codecSpecificInfo = NULL,
- const RTPFragmentationHeader* fragmentationHeader = NULL);
- /*
- * Callback implementation - generic encoder encode complete
- */
- int32_t SetTransportCallback(VCMPacketizationCallback* transport);
- /**
- * Set media Optimization
- */
- void SetMediaOpt (media_optimization::MediaOptimization* mediaOpt);
-
- void SetPayloadType(uint8_t payloadType) { _payloadType = payloadType; };
- void SetInternalSource(bool internalSource) { _internalSource = internalSource; };
-
- void SetRotation(VideoRotation rotation) { _rotation = rotation; }
-
-private:
- VCMPacketizationCallback* _sendCallback;
- media_optimization::MediaOptimization* _mediaOpt;
- uint8_t _payloadType;
- bool _internalSource;
- VideoRotation _rotation;
-
- EncodedImageCallback* post_encode_callback_;
-
-#ifdef DEBUG_ENCODER_BIT_STREAM
- FILE* _bitStreamAfterEncoder;
-#endif
-};// end of VCMEncodeFrameCallback class
-
-
-/******************************/
-/* VCMGenericEncoder class */
-/******************************/
-class VCMGenericEncoder
-{
- friend class VCMCodecDataBase;
-public:
- VCMGenericEncoder(VideoEncoder* encoder,
- VideoEncoderRateObserver* rate_observer,
- VCMEncodedFrameCallback* encoded_frame_callback,
- bool internalSource);
- ~VCMGenericEncoder();
- /**
- * Free encoder memory
- */
- int32_t Release();
- /**
- * Initialize the encoder with the information from the VideoCodec
- */
- int32_t InitEncode(const VideoCodec* settings,
- int32_t numberOfCores,
- size_t maxPayloadSize);
- /**
- * Encode raw image
- * inputFrame : Frame containing raw image
- * codecSpecificInfo : Specific codec data
- * cameraFrameRate : Request or information from the remote side
- * frameType : The requested frame type to encode
- */
- int32_t Encode(const VideoFrame& inputFrame,
- const CodecSpecificInfo* codecSpecificInfo,
- const std::vector<FrameType>& frameTypes);
-
- void SetEncoderParameters(const EncoderParameters& params);
- EncoderParameters GetEncoderParameters() const;
-
- int32_t SetPeriodicKeyFrames(bool enable);
-
- int32_t RequestFrame(const std::vector<FrameType>& frame_types);
-
- bool InternalSource() const;
-
- void OnDroppedFrame();
-
- bool SupportsNativeHandle() const;
-
- int GetTargetFramerate();
-
-private:
- VideoEncoder* const encoder_;
- VideoEncoderRateObserver* const rate_observer_;
- VCMEncodedFrameCallback* const vcm_encoded_frame_callback_;
- const bool internal_source_;
- mutable rtc::CriticalSection params_lock_;
- EncoderParameters encoder_params_ GUARDED_BY(params_lock_);
- VideoRotation rotation_;
- bool is_screenshare_;
-}; // end of VCMGenericEncoder class
-
-} // namespace webrtc
-
-#endif // WEBRTC_MODULES_VIDEO_CODING_GENERIC_ENCODER_H_
diff --git a/webrtc/modules/video_coding/main/source/inter_frame_delay.cc b/webrtc/modules/video_coding/main/source/inter_frame_delay.cc
deleted file mode 100644
index 4786917e16..0000000000
--- a/webrtc/modules/video_coding/main/source/inter_frame_delay.cc
+++ /dev/null
@@ -1,114 +0,0 @@
-/*
- * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
- *
- * Use of this source code is governed by a BSD-style license
- * that can be found in the LICENSE file in the root of the source
- * tree. An additional intellectual property rights grant can be found
- * in the file PATENTS. All contributing project authors may
- * be found in the AUTHORS file in the root of the source tree.
- */
-
-#include "webrtc/modules/video_coding/main/source/inter_frame_delay.h"
-
-namespace webrtc {
-
-VCMInterFrameDelay::VCMInterFrameDelay(int64_t currentWallClock)
-{
- Reset(currentWallClock);
-}
-
-// Resets the delay estimate
-void
-VCMInterFrameDelay::Reset(int64_t currentWallClock)
-{
- _zeroWallClock = currentWallClock;
- _wrapArounds = 0;
- _prevWallClock = 0;
- _prevTimestamp = 0;
- _dTS = 0;
-}
-
-// Calculates the delay of a frame with the given timestamp.
-// This method is called when the frame is complete.
-bool
-VCMInterFrameDelay::CalculateDelay(uint32_t timestamp,
- int64_t *delay,
- int64_t currentWallClock)
-{
- if (_prevWallClock == 0)
- {
- // First set of data, initialization, wait for next frame
- _prevWallClock = currentWallClock;
- _prevTimestamp = timestamp;
- *delay = 0;
- return true;
- }
-
- int32_t prevWrapArounds = _wrapArounds;
- CheckForWrapArounds(timestamp);
-
- // This will be -1 for backward wrap arounds and +1 for forward wrap arounds
- int32_t wrapAroundsSincePrev = _wrapArounds - prevWrapArounds;
-
- // Account for reordering in jitter variance estimate in the future?
- // Note that this also captures incomplete frames which are grabbed
- // for decoding after a later frame has been complete, i.e. real
- // packet losses.
- if ((wrapAroundsSincePrev == 0 && timestamp < _prevTimestamp) || wrapAroundsSincePrev < 0)
- {
- *delay = 0;
- return false;
- }
-
- // Compute the compensated timestamp difference and convert it to ms and
- // round it to closest integer.
- _dTS = static_cast<int64_t>((timestamp + wrapAroundsSincePrev *
- (static_cast<int64_t>(1)<<32) - _prevTimestamp) / 90.0 + 0.5);
-
- // frameDelay is the difference of dT and dTS -- i.e. the difference of
- // the wall clock time difference and the timestamp difference between
- // two following frames.
- *delay = static_cast<int64_t>(currentWallClock - _prevWallClock - _dTS);
-
- _prevTimestamp = timestamp;
- _prevWallClock = currentWallClock;
-
- return true;
-}
-
-// Returns the current difference between incoming timestamps
-uint32_t VCMInterFrameDelay::CurrentTimeStampDiffMs() const
-{
- if (_dTS < 0)
- {
- return 0;
- }
- return static_cast<uint32_t>(_dTS);
-}
-
-// Investigates if the timestamp clock has overflowed since the last timestamp and
-// keeps track of the number of wrap arounds since reset.
-void
-VCMInterFrameDelay::CheckForWrapArounds(uint32_t timestamp)
-{
- if (timestamp < _prevTimestamp)
- {
- // This difference will probably be less than -2^31 if we have had a wrap around
- // (e.g. timestamp = 1, _previousTimestamp = 2^32 - 1). Since it is cast to a Word32,
- // it should be positive.
- if (static_cast<int32_t>(timestamp - _prevTimestamp) > 0)
- {
- // Forward wrap around
- _wrapArounds++;
- }
- }
- // This difference will probably be less than -2^31 if we have had a backward wrap around.
- // Since it is cast to a Word32, it should be positive.
- else if (static_cast<int32_t>(_prevTimestamp - timestamp) > 0)
- {
- // Backward wrap around
- _wrapArounds--;
- }
-}
-
-}
diff --git a/webrtc/modules/video_coding/main/source/inter_frame_delay.h b/webrtc/modules/video_coding/main/source/inter_frame_delay.h
deleted file mode 100644
index 58b326ae96..0000000000
--- a/webrtc/modules/video_coding/main/source/inter_frame_delay.h
+++ /dev/null
@@ -1,66 +0,0 @@
-/*
- * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
- *
- * Use of this source code is governed by a BSD-style license
- * that can be found in the LICENSE file in the root of the source
- * tree. An additional intellectual property rights grant can be found
- * in the file PATENTS. All contributing project authors may
- * be found in the AUTHORS file in the root of the source tree.
- */
-
-#ifndef WEBRTC_MODULES_VIDEO_CODING_INTER_FRAME_DELAY_H_
-#define WEBRTC_MODULES_VIDEO_CODING_INTER_FRAME_DELAY_H_
-
-#include "webrtc/typedefs.h"
-
-namespace webrtc
-{
-
-class VCMInterFrameDelay
-{
-public:
- VCMInterFrameDelay(int64_t currentWallClock);
-
- // Resets the estimate. Zeros are given as parameters.
- void Reset(int64_t currentWallClock);
-
- // Calculates the delay of a frame with the given timestamp.
- // This method is called when the frame is complete.
- //
- // Input:
- // - timestamp : RTP timestamp of a received frame
- // - *delay : Pointer to memory where the result should be stored
- // - currentWallClock : The current time in milliseconds.
- // Should be -1 for normal operation, only used for testing.
- // Return value : true if OK, false when reordered timestamps
- bool CalculateDelay(uint32_t timestamp,
- int64_t *delay,
- int64_t currentWallClock);
-
- // Returns the current difference between incoming timestamps
- //
- // Return value : Wrap-around compensated difference between incoming
- // timestamps.
- uint32_t CurrentTimeStampDiffMs() const;
-
-private:
- // Controls if the RTP timestamp counter has had a wrap around
- // between the current and the previously received frame.
- //
- // Input:
- // - timestmap : RTP timestamp of the current frame.
- void CheckForWrapArounds(uint32_t timestamp);
-
- int64_t _zeroWallClock; // Local timestamp of the first video packet received
- int32_t _wrapArounds; // Number of wrapArounds detected
- // The previous timestamp passed to the delay estimate
- uint32_t _prevTimestamp;
- // The previous wall clock timestamp used by the delay estimate
- int64_t _prevWallClock;
- // Wrap-around compensated difference between incoming timestamps
- int64_t _dTS;
-};
-
-} // namespace webrtc
-
-#endif // WEBRTC_MODULES_VIDEO_CODING_INTER_FRAME_DELAY_H_
diff --git a/webrtc/modules/video_coding/main/source/internal_defines.h b/webrtc/modules/video_coding/main/source/internal_defines.h
deleted file mode 100644
index adc940f20d..0000000000
--- a/webrtc/modules/video_coding/main/source/internal_defines.h
+++ /dev/null
@@ -1,68 +0,0 @@
-/*
- * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
- *
- * Use of this source code is governed by a BSD-style license
- * that can be found in the LICENSE file in the root of the source
- * tree. An additional intellectual property rights grant can be found
- * in the file PATENTS. All contributing project authors may
- * be found in the AUTHORS file in the root of the source tree.
- */
-
-#ifndef WEBRTC_MODULES_VIDEO_CODING_SOURCE_INTERNAL_DEFINES_H_
-#define WEBRTC_MODULES_VIDEO_CODING_SOURCE_INTERNAL_DEFINES_H_
-
-#include "webrtc/typedefs.h"
-
-namespace webrtc
-{
-
-#define MASK_32_BITS(x) (0xFFFFFFFF & (x))
-
-inline uint32_t MaskWord64ToUWord32(int64_t w64)
-{
- return static_cast<uint32_t>(MASK_32_BITS(w64));
-}
-
-#define VCM_MAX(a, b) (((a) > (b)) ? (a) : (b))
-#define VCM_MIN(a, b) (((a) < (b)) ? (a) : (b))
-
-#define VCM_DEFAULT_CODEC_WIDTH 352
-#define VCM_DEFAULT_CODEC_HEIGHT 288
-#define VCM_DEFAULT_FRAME_RATE 30
-#define VCM_MIN_BITRATE 30
-#define VCM_FLUSH_INDICATOR 4
-
-// Helper macros for creating the static codec list
-#define VCM_NO_CODEC_IDX -1
-#ifdef VIDEOCODEC_VP8
- #define VCM_VP8_IDX (VCM_NO_CODEC_IDX + 1)
-#else
- #define VCM_VP8_IDX VCM_NO_CODEC_IDX
-#endif
-#ifdef VIDEOCODEC_VP9
- #define VCM_VP9_IDX (VCM_VP8_IDX + 1)
-#else
- #define VCM_VP9_IDX VCM_VP8_IDX
-#endif
-#ifdef VIDEOCODEC_H264
- #define VCM_H264_IDX (VCM_VP9_IDX + 1)
-#else
- #define VCM_H264_IDX VCM_VP9_IDX
-#endif
-#ifdef VIDEOCODEC_I420
- #define VCM_I420_IDX (VCM_H264_IDX + 1)
-#else
- #define VCM_I420_IDX VCM_H264_IDX
-#endif
-#define VCM_NUM_VIDEO_CODECS_AVAILABLE (VCM_I420_IDX + 1)
-
-#define VCM_NO_RECEIVER_ID 0
-
-inline int32_t VCMId(const int32_t vcmId, const int32_t receiverId = 0)
-{
- return static_cast<int32_t>((vcmId << 16) + receiverId);
-}
-
-} // namespace webrtc
-
-#endif // WEBRTC_MODULES_VIDEO_CODING_SOURCE_INTERNAL_DEFINES_H_
diff --git a/webrtc/modules/video_coding/main/source/jitter_estimator.cc b/webrtc/modules/video_coding/main/source/jitter_estimator.cc
deleted file mode 100644
index 5894c88d72..0000000000
--- a/webrtc/modules/video_coding/main/source/jitter_estimator.cc
+++ /dev/null
@@ -1,482 +0,0 @@
-/*
- * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
- *
- * Use of this source code is governed by a BSD-style license
- * that can be found in the LICENSE file in the root of the source
- * tree. An additional intellectual property rights grant can be found
- * in the file PATENTS. All contributing project authors may
- * be found in the AUTHORS file in the root of the source tree.
- */
-
-#include "webrtc/modules/video_coding/main/source/internal_defines.h"
-#include "webrtc/modules/video_coding/main/source/jitter_estimator.h"
-#include "webrtc/modules/video_coding/main/source/rtt_filter.h"
-#include "webrtc/system_wrappers/include/clock.h"
-#include "webrtc/system_wrappers/include/field_trial.h"
-
-#include <assert.h>
-#include <math.h>
-#include <stdlib.h>
-#include <string.h>
-
-namespace webrtc {
-
-enum { kStartupDelaySamples = 30 };
-enum { kFsAccuStartupSamples = 5 };
-enum { kMaxFramerateEstimate = 200 };
-
-VCMJitterEstimator::VCMJitterEstimator(const Clock* clock,
- int32_t vcmId,
- int32_t receiverId)
- : _vcmId(vcmId),
- _receiverId(receiverId),
- _phi(0.97),
- _psi(0.9999),
- _alphaCountMax(400),
- _thetaLow(0.000001),
- _nackLimit(3),
- _numStdDevDelayOutlier(15),
- _numStdDevFrameSizeOutlier(3),
- _noiseStdDevs(2.33), // ~Less than 1% chance
- // (look up in normal distribution table)...
- _noiseStdDevOffset(30.0), // ...of getting 30 ms freezes
- _rttFilter(),
- fps_counter_(30), // TODO(sprang): Use an estimator with limit based on
- // time, rather than number of samples.
- low_rate_experiment_(kInit),
- clock_(clock) {
- Reset();
-}
-
-VCMJitterEstimator::~VCMJitterEstimator() {
-}
-
-VCMJitterEstimator&
-VCMJitterEstimator::operator=(const VCMJitterEstimator& rhs)
-{
- if (this != &rhs)
- {
- memcpy(_thetaCov, rhs._thetaCov, sizeof(_thetaCov));
- memcpy(_Qcov, rhs._Qcov, sizeof(_Qcov));
-
- _vcmId = rhs._vcmId;
- _receiverId = rhs._receiverId;
- _avgFrameSize = rhs._avgFrameSize;
- _varFrameSize = rhs._varFrameSize;
- _maxFrameSize = rhs._maxFrameSize;
- _fsSum = rhs._fsSum;
- _fsCount = rhs._fsCount;
- _lastUpdateT = rhs._lastUpdateT;
- _prevEstimate = rhs._prevEstimate;
- _prevFrameSize = rhs._prevFrameSize;
- _avgNoise = rhs._avgNoise;
- _alphaCount = rhs._alphaCount;
- _filterJitterEstimate = rhs._filterJitterEstimate;
- _startupCount = rhs._startupCount;
- _latestNackTimestamp = rhs._latestNackTimestamp;
- _nackCount = rhs._nackCount;
- _rttFilter = rhs._rttFilter;
- }
- return *this;
-}
-
-// Resets the JitterEstimate
-void
-VCMJitterEstimator::Reset()
-{
- _theta[0] = 1/(512e3/8);
- _theta[1] = 0;
- _varNoise = 4.0;
-
- _thetaCov[0][0] = 1e-4;
- _thetaCov[1][1] = 1e2;
- _thetaCov[0][1] = _thetaCov[1][0] = 0;
- _Qcov[0][0] = 2.5e-10;
- _Qcov[1][1] = 1e-10;
- _Qcov[0][1] = _Qcov[1][0] = 0;
- _avgFrameSize = 500;
- _maxFrameSize = 500;
- _varFrameSize = 100;
- _lastUpdateT = -1;
- _prevEstimate = -1.0;
- _prevFrameSize = 0;
- _avgNoise = 0.0;
- _alphaCount = 1;
- _filterJitterEstimate = 0.0;
- _latestNackTimestamp = 0;
- _nackCount = 0;
- _fsSum = 0;
- _fsCount = 0;
- _startupCount = 0;
- _rttFilter.Reset();
- fps_counter_.Reset();
-}
-
-void
-VCMJitterEstimator::ResetNackCount()
-{
- _nackCount = 0;
-}
-
-// Updates the estimates with the new measurements
-void
-VCMJitterEstimator::UpdateEstimate(int64_t frameDelayMS, uint32_t frameSizeBytes,
- bool incompleteFrame /* = false */)
-{
- if (frameSizeBytes == 0)
- {
- return;
- }
- int deltaFS = frameSizeBytes - _prevFrameSize;
- if (_fsCount < kFsAccuStartupSamples)
- {
- _fsSum += frameSizeBytes;
- _fsCount++;
- }
- else if (_fsCount == kFsAccuStartupSamples)
- {
- // Give the frame size filter
- _avgFrameSize = static_cast<double>(_fsSum) /
- static_cast<double>(_fsCount);
- _fsCount++;
- }
- if (!incompleteFrame || frameSizeBytes > _avgFrameSize)
- {
- double avgFrameSize = _phi * _avgFrameSize +
- (1 - _phi) * frameSizeBytes;
- if (frameSizeBytes < _avgFrameSize + 2 * sqrt(_varFrameSize))
- {
- // Only update the average frame size if this sample wasn't a
- // key frame
- _avgFrameSize = avgFrameSize;
- }
- // Update the variance anyway since we want to capture cases where we only get
- // key frames.
- _varFrameSize = VCM_MAX(_phi * _varFrameSize + (1 - _phi) *
- (frameSizeBytes - avgFrameSize) *
- (frameSizeBytes - avgFrameSize), 1.0);
- }
-
- // Update max frameSize estimate
- _maxFrameSize = VCM_MAX(_psi * _maxFrameSize, static_cast<double>(frameSizeBytes));
-
- if (_prevFrameSize == 0)
- {
- _prevFrameSize = frameSizeBytes;
- return;
- }
- _prevFrameSize = frameSizeBytes;
-
- // Only update the Kalman filter if the sample is not considered
- // an extreme outlier. Even if it is an extreme outlier from a
- // delay point of view, if the frame size also is large the
- // deviation is probably due to an incorrect line slope.
- double deviation = DeviationFromExpectedDelay(frameDelayMS, deltaFS);
-
- if (fabs(deviation) < _numStdDevDelayOutlier * sqrt(_varNoise) ||
- frameSizeBytes > _avgFrameSize + _numStdDevFrameSizeOutlier * sqrt(_varFrameSize))
- {
- // Update the variance of the deviation from the
- // line given by the Kalman filter
- EstimateRandomJitter(deviation, incompleteFrame);
- // Prevent updating with frames which have been congested by a large
- // frame, and therefore arrives almost at the same time as that frame.
- // This can occur when we receive a large frame (key frame) which
- // has been delayed. The next frame is of normal size (delta frame),
- // and thus deltaFS will be << 0. This removes all frame samples
- // which arrives after a key frame.
- if ((!incompleteFrame || deviation >= 0.0) &&
- static_cast<double>(deltaFS) > - 0.25 * _maxFrameSize)
- {
- // Update the Kalman filter with the new data
- KalmanEstimateChannel(frameDelayMS, deltaFS);
- }
- }
- else
- {
- int nStdDev = (deviation >= 0) ? _numStdDevDelayOutlier : -_numStdDevDelayOutlier;
- EstimateRandomJitter(nStdDev * sqrt(_varNoise), incompleteFrame);
- }
- // Post process the total estimated jitter
- if (_startupCount >= kStartupDelaySamples)
- {
- PostProcessEstimate();
- }
- else
- {
- _startupCount++;
- }
-}
-
-// Updates the nack/packet ratio
-void
-VCMJitterEstimator::FrameNacked()
-{
- // Wait until _nackLimit retransmissions has been received,
- // then always add ~1 RTT delay.
- // TODO(holmer): Should we ever remove the additional delay if the
- // the packet losses seem to have stopped? We could for instance scale
- // the number of RTTs to add with the amount of retransmissions in a given
- // time interval, or similar.
- if (_nackCount < _nackLimit)
- {
- _nackCount++;
- }
-}
-
-// Updates Kalman estimate of the channel
-// The caller is expected to sanity check the inputs.
-void
-VCMJitterEstimator::KalmanEstimateChannel(int64_t frameDelayMS,
- int32_t deltaFSBytes)
-{
- double Mh[2];
- double hMh_sigma;
- double kalmanGain[2];
- double measureRes;
- double t00, t01;
-
- // Kalman filtering
-
- // Prediction
- // M = M + Q
- _thetaCov[0][0] += _Qcov[0][0];
- _thetaCov[0][1] += _Qcov[0][1];
- _thetaCov[1][0] += _Qcov[1][0];
- _thetaCov[1][1] += _Qcov[1][1];
-
- // Kalman gain
- // K = M*h'/(sigma2n + h*M*h') = M*h'/(1 + h*M*h')
- // h = [dFS 1]
- // Mh = M*h'
- // hMh_sigma = h*M*h' + R
- Mh[0] = _thetaCov[0][0] * deltaFSBytes + _thetaCov[0][1];
- Mh[1] = _thetaCov[1][0] * deltaFSBytes + _thetaCov[1][1];
- // sigma weights measurements with a small deltaFS as noisy and
- // measurements with large deltaFS as good
- if (_maxFrameSize < 1.0)
- {
- return;
- }
- double sigma = (300.0 * exp(-fabs(static_cast<double>(deltaFSBytes)) /
- (1e0 * _maxFrameSize)) + 1) * sqrt(_varNoise);
- if (sigma < 1.0)
- {
- sigma = 1.0;
- }
- hMh_sigma = deltaFSBytes * Mh[0] + Mh[1] + sigma;
- if ((hMh_sigma < 1e-9 && hMh_sigma >= 0) || (hMh_sigma > -1e-9 && hMh_sigma <= 0))
- {
- assert(false);
- return;
- }
- kalmanGain[0] = Mh[0] / hMh_sigma;
- kalmanGain[1] = Mh[1] / hMh_sigma;
-
- // Correction
- // theta = theta + K*(dT - h*theta)
- measureRes = frameDelayMS - (deltaFSBytes * _theta[0] + _theta[1]);
- _theta[0] += kalmanGain[0] * measureRes;
- _theta[1] += kalmanGain[1] * measureRes;
-
- if (_theta[0] < _thetaLow)
- {
- _theta[0] = _thetaLow;
- }
-
- // M = (I - K*h)*M
- t00 = _thetaCov[0][0];
- t01 = _thetaCov[0][1];
- _thetaCov[0][0] = (1 - kalmanGain[0] * deltaFSBytes) * t00 -
- kalmanGain[0] * _thetaCov[1][0];
- _thetaCov[0][1] = (1 - kalmanGain[0] * deltaFSBytes) * t01 -
- kalmanGain[0] * _thetaCov[1][1];
- _thetaCov[1][0] = _thetaCov[1][0] * (1 - kalmanGain[1]) -
- kalmanGain[1] * deltaFSBytes * t00;
- _thetaCov[1][1] = _thetaCov[1][1] * (1 - kalmanGain[1]) -
- kalmanGain[1] * deltaFSBytes * t01;
-
- // Covariance matrix, must be positive semi-definite
- assert(_thetaCov[0][0] + _thetaCov[1][1] >= 0 &&
- _thetaCov[0][0] * _thetaCov[1][1] - _thetaCov[0][1] * _thetaCov[1][0] >= 0 &&
- _thetaCov[0][0] >= 0);
-}
-
-// Calculate difference in delay between a sample and the
-// expected delay estimated by the Kalman filter
-double
-VCMJitterEstimator::DeviationFromExpectedDelay(int64_t frameDelayMS,
- int32_t deltaFSBytes) const
-{
- return frameDelayMS - (_theta[0] * deltaFSBytes + _theta[1]);
-}
-
-// Estimates the random jitter by calculating the variance of the
-// sample distance from the line given by theta.
-void VCMJitterEstimator::EstimateRandomJitter(double d_dT,
- bool incompleteFrame) {
- uint64_t now = clock_->TimeInMicroseconds();
- if (_lastUpdateT != -1) {
- fps_counter_.AddSample(now - _lastUpdateT);
- }
- _lastUpdateT = now;
-
- if (_alphaCount == 0) {
- assert(false);
- return;
- }
- double alpha =
- static_cast<double>(_alphaCount - 1) / static_cast<double>(_alphaCount);
- _alphaCount++;
- if (_alphaCount > _alphaCountMax)
- _alphaCount = _alphaCountMax;
-
- if (LowRateExperimentEnabled()) {
- // In order to avoid a low frame rate stream to react slower to changes,
- // scale the alpha weight relative a 30 fps stream.
- double fps = GetFrameRate();
- if (fps > 0.0) {
- double rate_scale = 30.0 / fps;
- // At startup, there can be a lot of noise in the fps estimate.
- // Interpolate rate_scale linearly, from 1.0 at sample #1, to 30.0 / fps
- // at sample #kStartupDelaySamples.
- if (_alphaCount < kStartupDelaySamples) {
- rate_scale =
- (_alphaCount * rate_scale + (kStartupDelaySamples - _alphaCount)) /
- kStartupDelaySamples;
- }
- alpha = pow(alpha, rate_scale);
- }
- }
-
- double avgNoise = alpha * _avgNoise + (1 - alpha) * d_dT;
- double varNoise =
- alpha * _varNoise + (1 - alpha) * (d_dT - _avgNoise) * (d_dT - _avgNoise);
- if (!incompleteFrame || varNoise > _varNoise) {
- _avgNoise = avgNoise;
- _varNoise = varNoise;
- }
- if (_varNoise < 1.0) {
- // The variance should never be zero, since we might get
- // stuck and consider all samples as outliers.
- _varNoise = 1.0;
- }
-}
-
-double
-VCMJitterEstimator::NoiseThreshold() const
-{
- double noiseThreshold = _noiseStdDevs * sqrt(_varNoise) - _noiseStdDevOffset;
- if (noiseThreshold < 1.0)
- {
- noiseThreshold = 1.0;
- }
- return noiseThreshold;
-}
-
-// Calculates the current jitter estimate from the filtered estimates
-double
-VCMJitterEstimator::CalculateEstimate()
-{
- double ret = _theta[0] * (_maxFrameSize - _avgFrameSize) + NoiseThreshold();
-
- // A very low estimate (or negative) is neglected
- if (ret < 1.0) {
- if (_prevEstimate <= 0.01)
- {
- ret = 1.0;
- }
- else
- {
- ret = _prevEstimate;
- }
- }
- if (ret > 10000.0) // Sanity
- {
- ret = 10000.0;
- }
- _prevEstimate = ret;
- return ret;
-}
-
-void
-VCMJitterEstimator::PostProcessEstimate()
-{
- _filterJitterEstimate = CalculateEstimate();
-}
-
-void
-VCMJitterEstimator::UpdateRtt(int64_t rttMs)
-{
- _rttFilter.Update(rttMs);
-}
-
-void
-VCMJitterEstimator::UpdateMaxFrameSize(uint32_t frameSizeBytes)
-{
- if (_maxFrameSize < frameSizeBytes)
- {
- _maxFrameSize = frameSizeBytes;
- }
-}
-
-// Returns the current filtered estimate if available,
-// otherwise tries to calculate an estimate.
-int VCMJitterEstimator::GetJitterEstimate(double rttMultiplier) {
- double jitterMS = CalculateEstimate() + OPERATING_SYSTEM_JITTER;
- if (_filterJitterEstimate > jitterMS)
- jitterMS = _filterJitterEstimate;
- if (_nackCount >= _nackLimit)
- jitterMS += _rttFilter.RttMs() * rttMultiplier;
-
- if (LowRateExperimentEnabled()) {
- static const double kJitterScaleLowThreshold = 5.0;
- static const double kJitterScaleHighThreshold = 10.0;
- double fps = GetFrameRate();
- // Ignore jitter for very low fps streams.
- if (fps < kJitterScaleLowThreshold) {
- if (fps == 0.0) {
- return jitterMS;
- }
- return 0;
- }
-
- // Semi-low frame rate; scale by factor linearly interpolated from 0.0 at
- // kJitterScaleLowThreshold to 1.0 at kJitterScaleHighThreshold.
- if (fps < kJitterScaleHighThreshold) {
- jitterMS =
- (1.0 / (kJitterScaleHighThreshold - kJitterScaleLowThreshold)) *
- (fps - kJitterScaleLowThreshold) * jitterMS;
- }
- }
-
- return static_cast<uint32_t>(jitterMS + 0.5);
-}
-
-bool VCMJitterEstimator::LowRateExperimentEnabled() {
- if (low_rate_experiment_ == kInit) {
- std::string group =
- webrtc::field_trial::FindFullName("WebRTC-ReducedJitterDelay");
- if (group == "Disabled") {
- low_rate_experiment_ = kDisabled;
- } else {
- low_rate_experiment_ = kEnabled;
- }
- }
- return low_rate_experiment_ == kEnabled ? true : false;
-}
-
-double VCMJitterEstimator::GetFrameRate() const {
- if (fps_counter_.count() == 0)
- return 0;
-
- double fps = 1000000.0 / fps_counter_.ComputeMean();
- // Sanity check.
- assert(fps >= 0.0);
- if (fps > kMaxFramerateEstimate) {
- fps = kMaxFramerateEstimate;
- }
- return fps;
-}
-
-}
diff --git a/webrtc/modules/video_coding/main/source/jitter_estimator.h b/webrtc/modules/video_coding/main/source/jitter_estimator.h
deleted file mode 100644
index 46ed67ba1d..0000000000
--- a/webrtc/modules/video_coding/main/source/jitter_estimator.h
+++ /dev/null
@@ -1,165 +0,0 @@
-/*
- * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
- *
- * Use of this source code is governed by a BSD-style license
- * that can be found in the LICENSE file in the root of the source
- * tree. An additional intellectual property rights grant can be found
- * in the file PATENTS. All contributing project authors may
- * be found in the AUTHORS file in the root of the source tree.
- */
-
-#ifndef WEBRTC_MODULES_VIDEO_CODING_JITTER_ESTIMATOR_H_
-#define WEBRTC_MODULES_VIDEO_CODING_JITTER_ESTIMATOR_H_
-
-#include "webrtc/base/rollingaccumulator.h"
-#include "webrtc/modules/video_coding/main/source/rtt_filter.h"
-#include "webrtc/typedefs.h"
-
-namespace webrtc
-{
-
-class Clock;
-
-class VCMJitterEstimator
-{
-public:
- VCMJitterEstimator(const Clock* clock,
- int32_t vcmId = 0,
- int32_t receiverId = 0);
- virtual ~VCMJitterEstimator();
- VCMJitterEstimator& operator=(const VCMJitterEstimator& rhs);
-
- // Resets the estimate to the initial state
- void Reset();
- void ResetNackCount();
-
- // Updates the jitter estimate with the new data.
- //
- // Input:
- // - frameDelay : Delay-delta calculated by UTILDelayEstimate in milliseconds
- // - frameSize : Frame size of the current frame.
- // - incompleteFrame : Flags if the frame is used to update the estimate before it
- // was complete. Default is false.
- void UpdateEstimate(int64_t frameDelayMS,
- uint32_t frameSizeBytes,
- bool incompleteFrame = false);
-
- // Returns the current jitter estimate in milliseconds and adds
- // also adds an RTT dependent term in cases of retransmission.
- // Input:
- // - rttMultiplier : RTT param multiplier (when applicable).
- //
- // Return value : Jitter estimate in milliseconds
- int GetJitterEstimate(double rttMultiplier);
-
- // Updates the nack counter.
- void FrameNacked();
-
- // Updates the RTT filter.
- //
- // Input:
- // - rttMs : RTT in ms
- void UpdateRtt(int64_t rttMs);
-
- void UpdateMaxFrameSize(uint32_t frameSizeBytes);
-
- // A constant describing the delay from the jitter buffer
- // to the delay on the receiving side which is not accounted
- // for by the jitter buffer nor the decoding delay estimate.
- static const uint32_t OPERATING_SYSTEM_JITTER = 10;
-
-protected:
- // These are protected for better testing possibilities
- double _theta[2]; // Estimated line parameters (slope, offset)
- double _varNoise; // Variance of the time-deviation from the line
-
- virtual bool LowRateExperimentEnabled();
-
-private:
- // Updates the Kalman filter for the line describing
- // the frame size dependent jitter.
- //
- // Input:
- // - frameDelayMS : Delay-delta calculated by UTILDelayEstimate in milliseconds
- // - deltaFSBytes : Frame size delta, i.e.
- // : frame size at time T minus frame size at time T-1
- void KalmanEstimateChannel(int64_t frameDelayMS, int32_t deltaFSBytes);
-
- // Updates the random jitter estimate, i.e. the variance
- // of the time deviations from the line given by the Kalman filter.
- //
- // Input:
- // - d_dT : The deviation from the kalman estimate
- // - incompleteFrame : True if the frame used to update the estimate
- // with was incomplete
- void EstimateRandomJitter(double d_dT, bool incompleteFrame);
-
- double NoiseThreshold() const;
-
- // Calculates the current jitter estimate.
- //
- // Return value : The current jitter estimate in milliseconds
- double CalculateEstimate();
-
- // Post process the calculated estimate
- void PostProcessEstimate();
-
- // Calculates the difference in delay between a sample and the
- // expected delay estimated by the Kalman filter.
- //
- // Input:
- // - frameDelayMS : Delay-delta calculated by UTILDelayEstimate in milliseconds
- // - deltaFS : Frame size delta, i.e. frame size at time
- // T minus frame size at time T-1
- //
- // Return value : The difference in milliseconds
- double DeviationFromExpectedDelay(int64_t frameDelayMS,
- int32_t deltaFSBytes) const;
-
- double GetFrameRate() const;
-
- // Constants, filter parameters
- int32_t _vcmId;
- int32_t _receiverId;
- const double _phi;
- const double _psi;
- const uint32_t _alphaCountMax;
- const double _thetaLow;
- const uint32_t _nackLimit;
- const int32_t _numStdDevDelayOutlier;
- const int32_t _numStdDevFrameSizeOutlier;
- const double _noiseStdDevs;
- const double _noiseStdDevOffset;
-
- double _thetaCov[2][2]; // Estimate covariance
- double _Qcov[2][2]; // Process noise covariance
- double _avgFrameSize; // Average frame size
- double _varFrameSize; // Frame size variance
- double _maxFrameSize; // Largest frame size received (descending
- // with a factor _psi)
- uint32_t _fsSum;
- uint32_t _fsCount;
-
- int64_t _lastUpdateT;
- double _prevEstimate; // The previously returned jitter estimate
- uint32_t _prevFrameSize; // Frame size of the previous frame
- double _avgNoise; // Average of the random jitter
- uint32_t _alphaCount;
- double _filterJitterEstimate; // The filtered sum of jitter estimates
-
- uint32_t _startupCount;
-
- int64_t _latestNackTimestamp; // Timestamp in ms when the latest nack was seen
- uint32_t _nackCount; // Keeps track of the number of nacks received,
- // but never goes above _nackLimit
- VCMRttFilter _rttFilter;
-
- rtc::RollingAccumulator<uint64_t> fps_counter_;
- enum ExperimentFlag { kInit, kEnabled, kDisabled };
- ExperimentFlag low_rate_experiment_;
- const Clock* clock_;
-};
-
-} // namespace webrtc
-
-#endif // WEBRTC_MODULES_VIDEO_CODING_JITTER_ESTIMATOR_H_
diff --git a/webrtc/modules/video_coding/main/source/media_opt_util.cc b/webrtc/modules/video_coding/main/source/media_opt_util.cc
deleted file mode 100644
index 51decbed97..0000000000
--- a/webrtc/modules/video_coding/main/source/media_opt_util.cc
+++ /dev/null
@@ -1,774 +0,0 @@
-/*
- * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
- *
- * Use of this source code is governed by a BSD-style license
- * that can be found in the LICENSE file in the root of the source
- * tree. An additional intellectual property rights grant can be found
- * in the file PATENTS. All contributing project authors may
- * be found in the AUTHORS file in the root of the source tree.
- */
-
-#include "webrtc/modules/video_coding/main/source/media_opt_util.h"
-
-#include <algorithm>
-#include <float.h>
-#include <limits.h>
-#include <math.h>
-
-#include "webrtc/modules/interface/module_common_types.h"
-#include "webrtc/modules/video_coding/codecs/vp8/include/vp8_common_types.h"
-#include "webrtc/modules/video_coding/main/interface/video_coding_defines.h"
-#include "webrtc/modules/video_coding/main/source/fec_tables_xor.h"
-#include "webrtc/modules/video_coding/main/source/nack_fec_tables.h"
-
-namespace webrtc {
-// Max value of loss rates in off-line model
-static const int kPacketLossMax = 129;
-
-namespace media_optimization {
-
-VCMProtectionMethod::VCMProtectionMethod()
- : _effectivePacketLoss(0),
- _protectionFactorK(0),
- _protectionFactorD(0),
- _scaleProtKey(2.0f),
- _maxPayloadSize(1460),
- _qmRobustness(new VCMQmRobustness()),
- _useUepProtectionK(false),
- _useUepProtectionD(true),
- _corrFecCost(1.0),
- _type(kNone) {
-}
-
-VCMProtectionMethod::~VCMProtectionMethod()
-{
- delete _qmRobustness;
-}
-void
-VCMProtectionMethod::UpdateContentMetrics(const
- VideoContentMetrics* contentMetrics)
-{
- _qmRobustness->UpdateContent(contentMetrics);
-}
-
-VCMNackFecMethod::VCMNackFecMethod(int64_t lowRttNackThresholdMs,
- int64_t highRttNackThresholdMs)
- : VCMFecMethod(),
- _lowRttNackMs(lowRttNackThresholdMs),
- _highRttNackMs(highRttNackThresholdMs),
- _maxFramesFec(1) {
- assert(lowRttNackThresholdMs >= -1 && highRttNackThresholdMs >= -1);
- assert(highRttNackThresholdMs == -1 ||
- lowRttNackThresholdMs <= highRttNackThresholdMs);
- assert(lowRttNackThresholdMs > -1 || highRttNackThresholdMs == -1);
- _type = kNackFec;
-}
-
-VCMNackFecMethod::~VCMNackFecMethod()
-{
- //
-}
-bool
-VCMNackFecMethod::ProtectionFactor(const VCMProtectionParameters* parameters)
-{
- // Hybrid Nack FEC has three operational modes:
- // 1. Low RTT (below kLowRttNackMs) - Nack only: Set FEC rate
- // (_protectionFactorD) to zero. -1 means no FEC.
- // 2. High RTT (above _highRttNackMs) - FEC Only: Keep FEC factors.
- // -1 means always allow NACK.
- // 3. Medium RTT values - Hybrid mode: We will only nack the
- // residual following the decoding of the FEC (refer to JB logic). FEC
- // delta protection factor will be adjusted based on the RTT.
-
- // Otherwise: we count on FEC; if the RTT is below a threshold, then we
- // nack the residual, based on a decision made in the JB.
-
- // Compute the protection factors
- VCMFecMethod::ProtectionFactor(parameters);
- if (_lowRttNackMs == -1 || parameters->rtt < _lowRttNackMs)
- {
- _protectionFactorD = 0;
- VCMFecMethod::UpdateProtectionFactorD(_protectionFactorD);
- }
-
- // When in Hybrid mode (RTT range), adjust FEC rates based on the
- // RTT (NACK effectiveness) - adjustment factor is in the range [0,1].
- else if (_highRttNackMs == -1 || parameters->rtt < _highRttNackMs)
- {
- // TODO(mikhal): Disabling adjustment temporarily.
- // uint16_t rttIndex = (uint16_t) parameters->rtt;
- float adjustRtt = 1.0f;// (float)VCMNackFecTable[rttIndex] / 100.0f;
-
- // Adjust FEC with NACK on (for delta frame only)
- // table depends on RTT relative to rttMax (NACK Threshold)
- _protectionFactorD = static_cast<uint8_t>
- (adjustRtt *
- static_cast<float>(_protectionFactorD));
- // update FEC rates after applying adjustment
- VCMFecMethod::UpdateProtectionFactorD(_protectionFactorD);
- }
-
- return true;
-}
-
-int VCMNackFecMethod::ComputeMaxFramesFec(
- const VCMProtectionParameters* parameters) {
- if (parameters->numLayers > 2) {
- // For more than 2 temporal layers we will only have FEC on the base layer,
- // and the base layers will be pretty far apart. Therefore we force one
- // frame FEC.
- return 1;
- }
- // We set the max number of frames to base the FEC on so that on average
- // we will have complete frames in one RTT. Note that this is an upper
- // bound, and that the actual number of frames used for FEC is decided by the
- // RTP module based on the actual number of packets and the protection factor.
- float base_layer_framerate = parameters->frameRate /
- static_cast<float>(1 << (parameters->numLayers - 1));
- int max_frames_fec = std::max(static_cast<int>(
- 2.0f * base_layer_framerate * parameters->rtt /
- 1000.0f + 0.5f), 1);
- // |kUpperLimitFramesFec| is the upper limit on how many frames we
- // allow any FEC to be based on.
- if (max_frames_fec > kUpperLimitFramesFec) {
- max_frames_fec = kUpperLimitFramesFec;
- }
- return max_frames_fec;
-}
-
-int VCMNackFecMethod::MaxFramesFec() const {
- return _maxFramesFec;
-}
-
-bool VCMNackFecMethod::BitRateTooLowForFec(
- const VCMProtectionParameters* parameters) {
- // Bitrate below which we turn off FEC, regardless of reported packet loss.
- // The condition should depend on resolution and content. For now, use
- // threshold on bytes per frame, with some effect for the frame size.
- // The condition for turning off FEC is also based on other factors,
- // such as |_numLayers|, |_maxFramesFec|, and |_rtt|.
- int estimate_bytes_per_frame = 1000 * BitsPerFrame(parameters) / 8;
- int max_bytes_per_frame = kMaxBytesPerFrameForFec;
- int num_pixels = parameters->codecWidth * parameters->codecHeight;
- if (num_pixels <= 352 * 288) {
- max_bytes_per_frame = kMaxBytesPerFrameForFecLow;
- } else if (num_pixels > 640 * 480) {
- max_bytes_per_frame = kMaxBytesPerFrameForFecHigh;
- }
- // TODO (marpan): add condition based on maximum frames used for FEC,
- // and expand condition based on frame size.
- // Max round trip time threshold in ms.
- const int64_t kMaxRttTurnOffFec = 200;
- if (estimate_bytes_per_frame < max_bytes_per_frame &&
- parameters->numLayers < 3 &&
- parameters->rtt < kMaxRttTurnOffFec) {
- return true;
- }
- return false;
-}
-
-bool
-VCMNackFecMethod::EffectivePacketLoss(const VCMProtectionParameters* parameters)
-{
- // Set the effective packet loss for encoder (based on FEC code).
- // Compute the effective packet loss and residual packet loss due to FEC.
- VCMFecMethod::EffectivePacketLoss(parameters);
- return true;
-}
-
-bool
-VCMNackFecMethod::UpdateParameters(const VCMProtectionParameters* parameters)
-{
- ProtectionFactor(parameters);
- EffectivePacketLoss(parameters);
- _maxFramesFec = ComputeMaxFramesFec(parameters);
- if (BitRateTooLowForFec(parameters)) {
- _protectionFactorK = 0;
- _protectionFactorD = 0;
- }
-
- // Protection/fec rates obtained above are defined relative to total number
- // of packets (total rate: source + fec) FEC in RTP module assumes
- // protection factor is defined relative to source number of packets so we
- // should convert the factor to reduce mismatch between mediaOpt's rate and
- // the actual one
- _protectionFactorK = VCMFecMethod::ConvertFECRate(_protectionFactorK);
- _protectionFactorD = VCMFecMethod::ConvertFECRate(_protectionFactorD);
-
- return true;
-}
-
-VCMNackMethod::VCMNackMethod():
-VCMProtectionMethod()
-{
- _type = kNack;
-}
-
-VCMNackMethod::~VCMNackMethod()
-{
- //
-}
-
-bool
-VCMNackMethod::EffectivePacketLoss(const VCMProtectionParameters* parameter)
-{
- // Effective Packet Loss, NA in current version.
- _effectivePacketLoss = 0;
- return true;
-}
-
-bool
-VCMNackMethod::UpdateParameters(const VCMProtectionParameters* parameters)
-{
- // Compute the effective packet loss
- EffectivePacketLoss(parameters);
-
- // nackCost = (bitRate - nackCost) * (lossPr)
- return true;
-}
-
-VCMFecMethod::VCMFecMethod():
-VCMProtectionMethod()
-{
- _type = kFec;
-}
-VCMFecMethod::~VCMFecMethod()
-{
- //
-}
-
-uint8_t
-VCMFecMethod::BoostCodeRateKey(uint8_t packetFrameDelta,
- uint8_t packetFrameKey) const
-{
- uint8_t boostRateKey = 2;
- // Default: ratio scales the FEC protection up for I frames
- uint8_t ratio = 1;
-
- if (packetFrameDelta > 0)
- {
- ratio = (int8_t) (packetFrameKey / packetFrameDelta);
- }
- ratio = VCM_MAX(boostRateKey, ratio);
-
- return ratio;
-}
-
-uint8_t
-VCMFecMethod::ConvertFECRate(uint8_t codeRateRTP) const
-{
- return static_cast<uint8_t> (VCM_MIN(255,(0.5 + 255.0 * codeRateRTP /
- (float)(255 - codeRateRTP))));
-}
-
-// Update FEC with protectionFactorD
-void
-VCMFecMethod::UpdateProtectionFactorD(uint8_t protectionFactorD)
-{
- _protectionFactorD = protectionFactorD;
-}
-
-// Update FEC with protectionFactorK
-void
-VCMFecMethod::UpdateProtectionFactorK(uint8_t protectionFactorK)
-{
- _protectionFactorK = protectionFactorK;
-}
-
-bool
-VCMFecMethod::ProtectionFactor(const VCMProtectionParameters* parameters)
-{
- // FEC PROTECTION SETTINGS: varies with packet loss and bitrate
-
- // No protection if (filtered) packetLoss is 0
- uint8_t packetLoss = (uint8_t) (255 * parameters->lossPr);
- if (packetLoss == 0)
- {
- _protectionFactorK = 0;
- _protectionFactorD = 0;
- return true;
- }
-
- // Parameters for FEC setting:
- // first partition size, thresholds, table pars, spatial resoln fac.
-
- // First partition protection: ~ 20%
- uint8_t firstPartitionProt = (uint8_t) (255 * 0.20);
-
- // Minimum protection level needed to generate one FEC packet for one
- // source packet/frame (in RTP sender)
- uint8_t minProtLevelFec = 85;
-
- // Threshold on packetLoss and bitRrate/frameRate (=average #packets),
- // above which we allocate protection to cover at least first partition.
- uint8_t lossThr = 0;
- uint8_t packetNumThr = 1;
-
- // Parameters for range of rate index of table.
- const uint8_t ratePar1 = 5;
- const uint8_t ratePar2 = 49;
-
- // Spatial resolution size, relative to a reference size.
- float spatialSizeToRef = static_cast<float>
- (parameters->codecWidth * parameters->codecHeight) /
- (static_cast<float>(704 * 576));
- // resolnFac: This parameter will generally increase/decrease the FEC rate
- // (for fixed bitRate and packetLoss) based on system size.
- // Use a smaller exponent (< 1) to control/soften system size effect.
- const float resolnFac = 1.0 / powf(spatialSizeToRef, 0.3f);
-
- const int bitRatePerFrame = BitsPerFrame(parameters);
-
-
- // Average number of packets per frame (source and fec):
- const uint8_t avgTotPackets = 1 + (uint8_t)
- ((float) bitRatePerFrame * 1000.0
- / (float) (8.0 * _maxPayloadSize) + 0.5);
-
- // FEC rate parameters: for P and I frame
- uint8_t codeRateDelta = 0;
- uint8_t codeRateKey = 0;
-
- // Get index for table: the FEC protection depends on an effective rate.
- // The range on the rate index corresponds to rates (bps)
- // from ~200k to ~8000k, for 30fps
- const uint16_t effRateFecTable = static_cast<uint16_t>
- (resolnFac * bitRatePerFrame);
- uint8_t rateIndexTable =
- (uint8_t) VCM_MAX(VCM_MIN((effRateFecTable - ratePar1) /
- ratePar1, ratePar2), 0);
-
- // Restrict packet loss range to 50:
- // current tables defined only up to 50%
- if (packetLoss >= kPacketLossMax)
- {
- packetLoss = kPacketLossMax - 1;
- }
- uint16_t indexTable = rateIndexTable * kPacketLossMax + packetLoss;
-
- // Check on table index
- assert(indexTable < kSizeCodeRateXORTable);
-
- // Protection factor for P frame
- codeRateDelta = kCodeRateXORTable[indexTable];
-
- if (packetLoss > lossThr && avgTotPackets > packetNumThr)
- {
- // Set a minimum based on first partition size.
- if (codeRateDelta < firstPartitionProt)
- {
- codeRateDelta = firstPartitionProt;
- }
- }
-
- // Check limit on amount of protection for P frame; 50% is max.
- if (codeRateDelta >= kPacketLossMax)
- {
- codeRateDelta = kPacketLossMax - 1;
- }
-
- float adjustFec = 1.0f;
- // Avoid additional adjustments when layers are active.
- // TODO(mikhal/marco): Update adjusmtent based on layer info.
- if (parameters->numLayers == 1)
- {
- adjustFec = _qmRobustness->AdjustFecFactor(codeRateDelta,
- parameters->bitRate,
- parameters->frameRate,
- parameters->rtt,
- packetLoss);
- }
-
- codeRateDelta = static_cast<uint8_t>(codeRateDelta * adjustFec);
-
- // For Key frame:
- // Effectively at a higher rate, so we scale/boost the rate
- // The boost factor may depend on several factors: ratio of packet
- // number of I to P frames, how much protection placed on P frames, etc.
- const uint8_t packetFrameDelta = (uint8_t)
- (0.5 + parameters->packetsPerFrame);
- const uint8_t packetFrameKey = (uint8_t)
- (0.5 + parameters->packetsPerFrameKey);
- const uint8_t boostKey = BoostCodeRateKey(packetFrameDelta,
- packetFrameKey);
-
- rateIndexTable = (uint8_t) VCM_MAX(VCM_MIN(
- 1 + (boostKey * effRateFecTable - ratePar1) /
- ratePar1,ratePar2),0);
- uint16_t indexTableKey = rateIndexTable * kPacketLossMax + packetLoss;
-
- indexTableKey = VCM_MIN(indexTableKey, kSizeCodeRateXORTable);
-
- // Check on table index
- assert(indexTableKey < kSizeCodeRateXORTable);
-
- // Protection factor for I frame
- codeRateKey = kCodeRateXORTable[indexTableKey];
-
- // Boosting for Key frame.
- int boostKeyProt = _scaleProtKey * codeRateDelta;
- if (boostKeyProt >= kPacketLossMax)
- {
- boostKeyProt = kPacketLossMax - 1;
- }
-
- // Make sure I frame protection is at least larger than P frame protection,
- // and at least as high as filtered packet loss.
- codeRateKey = static_cast<uint8_t> (VCM_MAX(packetLoss,
- VCM_MAX(boostKeyProt, codeRateKey)));
-
- // Check limit on amount of protection for I frame: 50% is max.
- if (codeRateKey >= kPacketLossMax)
- {
- codeRateKey = kPacketLossMax - 1;
- }
-
- _protectionFactorK = codeRateKey;
- _protectionFactorD = codeRateDelta;
-
- // Generally there is a rate mis-match between the FEC cost estimated
- // in mediaOpt and the actual FEC cost sent out in RTP module.
- // This is more significant at low rates (small # of source packets), where
- // the granularity of the FEC decreases. In this case, non-zero protection
- // in mediaOpt may generate 0 FEC packets in RTP sender (since actual #FEC
- // is based on rounding off protectionFactor on actual source packet number).
- // The correction factor (_corrFecCost) attempts to corrects this, at least
- // for cases of low rates (small #packets) and low protection levels.
-
- float numPacketsFl = 1.0f + ((float) bitRatePerFrame * 1000.0
- / (float) (8.0 * _maxPayloadSize) + 0.5);
-
- const float estNumFecGen = 0.5f + static_cast<float> (_protectionFactorD *
- numPacketsFl / 255.0f);
-
-
- // We reduce cost factor (which will reduce overhead for FEC and
- // hybrid method) and not the protectionFactor.
- _corrFecCost = 1.0f;
- if (estNumFecGen < 1.1f && _protectionFactorD < minProtLevelFec)
- {
- _corrFecCost = 0.5f;
- }
- if (estNumFecGen < 0.9f && _protectionFactorD < minProtLevelFec)
- {
- _corrFecCost = 0.0f;
- }
-
- // TODO (marpan): Set the UEP protection on/off for Key and Delta frames
- _useUepProtectionK = _qmRobustness->SetUepProtection(codeRateKey,
- parameters->bitRate,
- packetLoss,
- 0);
-
- _useUepProtectionD = _qmRobustness->SetUepProtection(codeRateDelta,
- parameters->bitRate,
- packetLoss,
- 1);
-
- // DONE WITH FEC PROTECTION SETTINGS
- return true;
-}
-
-int VCMFecMethod::BitsPerFrame(const VCMProtectionParameters* parameters) {
- // When temporal layers are available FEC will only be applied on the base
- // layer.
- const float bitRateRatio =
- kVp8LayerRateAlloction[parameters->numLayers - 1][0];
- float frameRateRatio = powf(1 / 2.0, parameters->numLayers - 1);
- float bitRate = parameters->bitRate * bitRateRatio;
- float frameRate = parameters->frameRate * frameRateRatio;
-
- // TODO(mikhal): Update factor following testing.
- float adjustmentFactor = 1;
-
- // Average bits per frame (units of kbits)
- return static_cast<int>(adjustmentFactor * bitRate / frameRate);
-}
-
-bool
-VCMFecMethod::EffectivePacketLoss(const VCMProtectionParameters* parameters)
-{
- // Effective packet loss to encoder is based on RPL (residual packet loss)
- // this is a soft setting based on degree of FEC protection
- // RPL = received/input packet loss - average_FEC_recovery
- // note: received/input packet loss may be filtered based on FilteredLoss
-
- // Effective Packet Loss, NA in current version.
- _effectivePacketLoss = 0;
-
- return true;
-}
-
-bool
-VCMFecMethod::UpdateParameters(const VCMProtectionParameters* parameters)
-{
- // Compute the protection factor
- ProtectionFactor(parameters);
-
- // Compute the effective packet loss
- EffectivePacketLoss(parameters);
-
- // Protection/fec rates obtained above is defined relative to total number
- // of packets (total rate: source+fec) FEC in RTP module assumes protection
- // factor is defined relative to source number of packets so we should
- // convert the factor to reduce mismatch between mediaOpt suggested rate and
- // the actual rate
- _protectionFactorK = ConvertFECRate(_protectionFactorK);
- _protectionFactorD = ConvertFECRate(_protectionFactorD);
-
- return true;
-}
-VCMLossProtectionLogic::VCMLossProtectionLogic(int64_t nowMs):
-_currentParameters(),
-_rtt(0),
-_lossPr(0.0f),
-_bitRate(0.0f),
-_frameRate(0.0f),
-_keyFrameSize(0.0f),
-_fecRateKey(0),
-_fecRateDelta(0),
-_lastPrUpdateT(0),
-_lossPr255(0.9999f),
-_lossPrHistory(),
-_shortMaxLossPr255(0),
-_packetsPerFrame(0.9999f),
-_packetsPerFrameKey(0.9999f),
-_codecWidth(0),
-_codecHeight(0),
-_numLayers(1)
-{
- Reset(nowMs);
-}
-
-VCMLossProtectionLogic::~VCMLossProtectionLogic()
-{
- Release();
-}
-
-void VCMLossProtectionLogic::SetMethod(
- enum VCMProtectionMethodEnum newMethodType) {
- if (_selectedMethod && _selectedMethod->Type() == newMethodType)
- return;
-
- switch(newMethodType) {
- case kNack:
- _selectedMethod.reset(new VCMNackMethod());
- break;
- case kFec:
- _selectedMethod.reset(new VCMFecMethod());
- break;
- case kNackFec:
- _selectedMethod.reset(new VCMNackFecMethod(kLowRttNackMs, -1));
- break;
- case kNone:
- _selectedMethod.reset();
- break;
- }
- UpdateMethod();
-}
-
-void
-VCMLossProtectionLogic::UpdateRtt(int64_t rtt)
-{
- _rtt = rtt;
-}
-
-void
-VCMLossProtectionLogic::UpdateMaxLossHistory(uint8_t lossPr255,
- int64_t now)
-{
- if (_lossPrHistory[0].timeMs >= 0 &&
- now - _lossPrHistory[0].timeMs < kLossPrShortFilterWinMs)
- {
- if (lossPr255 > _shortMaxLossPr255)
- {
- _shortMaxLossPr255 = lossPr255;
- }
- }
- else
- {
- // Only add a new value to the history once a second
- if (_lossPrHistory[0].timeMs == -1)
- {
- // First, no shift
- _shortMaxLossPr255 = lossPr255;
- }
- else
- {
- // Shift
- for (int32_t i = (kLossPrHistorySize - 2); i >= 0; i--)
- {
- _lossPrHistory[i + 1].lossPr255 = _lossPrHistory[i].lossPr255;
- _lossPrHistory[i + 1].timeMs = _lossPrHistory[i].timeMs;
- }
- }
- if (_shortMaxLossPr255 == 0)
- {
- _shortMaxLossPr255 = lossPr255;
- }
-
- _lossPrHistory[0].lossPr255 = _shortMaxLossPr255;
- _lossPrHistory[0].timeMs = now;
- _shortMaxLossPr255 = 0;
- }
-}
-
-uint8_t
-VCMLossProtectionLogic::MaxFilteredLossPr(int64_t nowMs) const
-{
- uint8_t maxFound = _shortMaxLossPr255;
- if (_lossPrHistory[0].timeMs == -1)
- {
- return maxFound;
- }
- for (int32_t i = 0; i < kLossPrHistorySize; i++)
- {
- if (_lossPrHistory[i].timeMs == -1)
- {
- break;
- }
- if (nowMs - _lossPrHistory[i].timeMs >
- kLossPrHistorySize * kLossPrShortFilterWinMs)
- {
- // This sample (and all samples after this) is too old
- break;
- }
- if (_lossPrHistory[i].lossPr255 > maxFound)
- {
- // This sample is the largest one this far into the history
- maxFound = _lossPrHistory[i].lossPr255;
- }
- }
- return maxFound;
-}
-
-uint8_t VCMLossProtectionLogic::FilteredLoss(
- int64_t nowMs,
- FilterPacketLossMode filter_mode,
- uint8_t lossPr255) {
-
- // Update the max window filter.
- UpdateMaxLossHistory(lossPr255, nowMs);
-
- // Update the recursive average filter.
- _lossPr255.Apply(static_cast<float> (nowMs - _lastPrUpdateT),
- static_cast<float> (lossPr255));
- _lastPrUpdateT = nowMs;
-
- // Filtered loss: default is received loss (no filtering).
- uint8_t filtered_loss = lossPr255;
-
- switch (filter_mode) {
- case kNoFilter:
- break;
- case kAvgFilter:
- filtered_loss = static_cast<uint8_t>(_lossPr255.filtered() + 0.5);
- break;
- case kMaxFilter:
- filtered_loss = MaxFilteredLossPr(nowMs);
- break;
- }
-
- return filtered_loss;
-}
-
-void
-VCMLossProtectionLogic::UpdateFilteredLossPr(uint8_t packetLossEnc)
-{
- _lossPr = (float) packetLossEnc / (float) 255.0;
-}
-
-void
-VCMLossProtectionLogic::UpdateBitRate(float bitRate)
-{
- _bitRate = bitRate;
-}
-
-void
-VCMLossProtectionLogic::UpdatePacketsPerFrame(float nPackets, int64_t nowMs)
-{
- _packetsPerFrame.Apply(static_cast<float>(nowMs - _lastPacketPerFrameUpdateT),
- nPackets);
- _lastPacketPerFrameUpdateT = nowMs;
-}
-
-void
-VCMLossProtectionLogic::UpdatePacketsPerFrameKey(float nPackets, int64_t nowMs)
-{
- _packetsPerFrameKey.Apply(static_cast<float>(nowMs -
- _lastPacketPerFrameUpdateTKey), nPackets);
- _lastPacketPerFrameUpdateTKey = nowMs;
-}
-
-void
-VCMLossProtectionLogic::UpdateKeyFrameSize(float keyFrameSize)
-{
- _keyFrameSize = keyFrameSize;
-}
-
-void
-VCMLossProtectionLogic::UpdateFrameSize(uint16_t width,
- uint16_t height)
-{
- _codecWidth = width;
- _codecHeight = height;
-}
-
-void VCMLossProtectionLogic::UpdateNumLayers(int numLayers) {
- _numLayers = (numLayers == 0) ? 1 : numLayers;
-}
-
-bool
-VCMLossProtectionLogic::UpdateMethod()
-{
- if (!_selectedMethod)
- return false;
- _currentParameters.rtt = _rtt;
- _currentParameters.lossPr = _lossPr;
- _currentParameters.bitRate = _bitRate;
- _currentParameters.frameRate = _frameRate; // rename actual frame rate?
- _currentParameters.keyFrameSize = _keyFrameSize;
- _currentParameters.fecRateDelta = _fecRateDelta;
- _currentParameters.fecRateKey = _fecRateKey;
- _currentParameters.packetsPerFrame = _packetsPerFrame.filtered();
- _currentParameters.packetsPerFrameKey = _packetsPerFrameKey.filtered();
- _currentParameters.codecWidth = _codecWidth;
- _currentParameters.codecHeight = _codecHeight;
- _currentParameters.numLayers = _numLayers;
- return _selectedMethod->UpdateParameters(&_currentParameters);
-}
-
-VCMProtectionMethod*
-VCMLossProtectionLogic::SelectedMethod() const
-{
- return _selectedMethod.get();
-}
-
-VCMProtectionMethodEnum VCMLossProtectionLogic::SelectedType() const {
- return _selectedMethod ? _selectedMethod->Type() : kNone;
-}
-
-void
-VCMLossProtectionLogic::Reset(int64_t nowMs)
-{
- _lastPrUpdateT = nowMs;
- _lastPacketPerFrameUpdateT = nowMs;
- _lastPacketPerFrameUpdateTKey = nowMs;
- _lossPr255.Reset(0.9999f);
- _packetsPerFrame.Reset(0.9999f);
- _fecRateDelta = _fecRateKey = 0;
- for (int32_t i = 0; i < kLossPrHistorySize; i++)
- {
- _lossPrHistory[i].lossPr255 = 0;
- _lossPrHistory[i].timeMs = -1;
- }
- _shortMaxLossPr255 = 0;
- Release();
-}
-
-void VCMLossProtectionLogic::Release() {
- _selectedMethod.reset();
-}
-
-} // namespace media_optimization
-} // namespace webrtc
diff --git a/webrtc/modules/video_coding/main/source/media_opt_util.h b/webrtc/modules/video_coding/main/source/media_opt_util.h
deleted file mode 100644
index 2085bbcde9..0000000000
--- a/webrtc/modules/video_coding/main/source/media_opt_util.h
+++ /dev/null
@@ -1,364 +0,0 @@
-/*
- * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
- *
- * Use of this source code is governed by a BSD-style license
- * that can be found in the LICENSE file in the root of the source
- * tree. An additional intellectual property rights grant can be found
- * in the file PATENTS. All contributing project authors may
- * be found in the AUTHORS file in the root of the source tree.
- */
-
-#ifndef WEBRTC_MODULES_VIDEO_CODING_MEDIA_OPT_UTIL_H_
-#define WEBRTC_MODULES_VIDEO_CODING_MEDIA_OPT_UTIL_H_
-
-#include <math.h>
-#include <stdlib.h>
-
-#include "webrtc/base/exp_filter.h"
-#include "webrtc/base/scoped_ptr.h"
-#include "webrtc/modules/video_coding/main/source/internal_defines.h"
-#include "webrtc/modules/video_coding/main/source/qm_select.h"
-#include "webrtc/system_wrappers/include/trace.h"
-#include "webrtc/typedefs.h"
-
-namespace webrtc {
-namespace media_optimization {
-
-// Number of time periods used for (max) window filter for packet loss
-// TODO (marpan): set reasonable window size for filtered packet loss,
-// adjustment should be based on logged/real data of loss stats/correlation.
-enum { kLossPrHistorySize = 10 };
-
-// 1000 ms, total filter length is (kLossPrHistorySize * 1000) ms
-enum { kLossPrShortFilterWinMs = 1000 };
-
-// The type of filter used on the received packet loss reports.
-enum FilterPacketLossMode {
- kNoFilter, // No filtering on received loss.
- kAvgFilter, // Recursive average filter.
- kMaxFilter // Max-window filter, over the time interval of:
- // (kLossPrHistorySize * kLossPrShortFilterWinMs) ms.
-};
-
-// Thresholds for hybrid NACK/FEC
-// common to media optimization and the jitter buffer.
-const int64_t kLowRttNackMs = 20;
-
-struct VCMProtectionParameters
-{
- VCMProtectionParameters() : rtt(0), lossPr(0.0f), bitRate(0.0f),
- packetsPerFrame(0.0f), packetsPerFrameKey(0.0f), frameRate(0.0f),
- keyFrameSize(0.0f), fecRateDelta(0), fecRateKey(0),
- codecWidth(0), codecHeight(0),
- numLayers(1)
- {}
-
- int64_t rtt;
- float lossPr;
- float bitRate;
- float packetsPerFrame;
- float packetsPerFrameKey;
- float frameRate;
- float keyFrameSize;
- uint8_t fecRateDelta;
- uint8_t fecRateKey;
- uint16_t codecWidth;
- uint16_t codecHeight;
- int numLayers;
-};
-
-
-/******************************/
-/* VCMProtectionMethod class */
-/******************************/
-
-enum VCMProtectionMethodEnum
-{
- kNack,
- kFec,
- kNackFec,
- kNone
-};
-
-class VCMLossProbabilitySample
-{
-public:
- VCMLossProbabilitySample() : lossPr255(0), timeMs(-1) {};
-
- uint8_t lossPr255;
- int64_t timeMs;
-};
-
-
-class VCMProtectionMethod
-{
-public:
- VCMProtectionMethod();
- virtual ~VCMProtectionMethod();
-
- // Updates the efficiency of the method using the parameters provided
- //
- // Input:
- // - parameters : Parameters used to calculate efficiency
- //
- // Return value : True if this method is recommended in
- // the given conditions.
- virtual bool UpdateParameters(const VCMProtectionParameters* parameters) = 0;
-
- // Returns the protection type
- //
- // Return value : The protection type
- enum VCMProtectionMethodEnum Type() const { return _type; }
-
- // Returns the effective packet loss for ER, required by this protection method
- //
- // Return value : Required effective packet loss
- virtual uint8_t RequiredPacketLossER() { return _effectivePacketLoss; }
-
- // Extracts the FEC protection factor for Key frame, required by this protection method
- //
- // Return value : Required protectionFactor for Key frame
- virtual uint8_t RequiredProtectionFactorK() { return _protectionFactorK; }
-
- // Extracts the FEC protection factor for Delta frame, required by this protection method
- //
- // Return value : Required protectionFactor for delta frame
- virtual uint8_t RequiredProtectionFactorD() { return _protectionFactorD; }
-
- // Extracts whether the FEC Unequal protection (UEP) is used for Key frame.
- //
- // Return value : Required Unequal protection on/off state.
- virtual bool RequiredUepProtectionK() { return _useUepProtectionK; }
-
- // Extracts whether the the FEC Unequal protection (UEP) is used for Delta frame.
- //
- // Return value : Required Unequal protection on/off state.
- virtual bool RequiredUepProtectionD() { return _useUepProtectionD; }
-
- virtual int MaxFramesFec() const { return 1; }
-
- // Updates content metrics
- void UpdateContentMetrics(const VideoContentMetrics* contentMetrics);
-
-protected:
-
- uint8_t _effectivePacketLoss;
- uint8_t _protectionFactorK;
- uint8_t _protectionFactorD;
- // Estimation of residual loss after the FEC
- float _scaleProtKey;
- int32_t _maxPayloadSize;
-
- VCMQmRobustness* _qmRobustness;
- bool _useUepProtectionK;
- bool _useUepProtectionD;
- float _corrFecCost;
- enum VCMProtectionMethodEnum _type;
-};
-
-class VCMNackMethod : public VCMProtectionMethod
-{
-public:
- VCMNackMethod();
- virtual ~VCMNackMethod();
- virtual bool UpdateParameters(const VCMProtectionParameters* parameters);
- // Get the effective packet loss
- bool EffectivePacketLoss(const VCMProtectionParameters* parameter);
-};
-
-class VCMFecMethod : public VCMProtectionMethod
-{
-public:
- VCMFecMethod();
- virtual ~VCMFecMethod();
- virtual bool UpdateParameters(const VCMProtectionParameters* parameters);
- // Get the effective packet loss for ER
- bool EffectivePacketLoss(const VCMProtectionParameters* parameters);
- // Get the FEC protection factors
- bool ProtectionFactor(const VCMProtectionParameters* parameters);
- // Get the boost for key frame protection
- uint8_t BoostCodeRateKey(uint8_t packetFrameDelta,
- uint8_t packetFrameKey) const;
- // Convert the rates: defined relative to total# packets or source# packets
- uint8_t ConvertFECRate(uint8_t codeRate) const;
- // Get the average effective recovery from FEC: for random loss model
- float AvgRecoveryFEC(const VCMProtectionParameters* parameters) const;
- // Update FEC with protectionFactorD
- void UpdateProtectionFactorD(uint8_t protectionFactorD);
- // Update FEC with protectionFactorK
- void UpdateProtectionFactorK(uint8_t protectionFactorK);
- // Compute the bits per frame. Account for temporal layers when applicable.
- int BitsPerFrame(const VCMProtectionParameters* parameters);
-
-protected:
- enum { kUpperLimitFramesFec = 6 };
- // Thresholds values for the bytes/frame and round trip time, below which we
- // may turn off FEC, depending on |_numLayers| and |_maxFramesFec|.
- // Max bytes/frame for VGA, corresponds to ~140k at 25fps.
- enum { kMaxBytesPerFrameForFec = 700 };
- // Max bytes/frame for CIF and lower: corresponds to ~80k at 25fps.
- enum { kMaxBytesPerFrameForFecLow = 400 };
- // Max bytes/frame for frame size larger than VGA, ~200k at 25fps.
- enum { kMaxBytesPerFrameForFecHigh = 1000 };
-};
-
-
-class VCMNackFecMethod : public VCMFecMethod
-{
-public:
- VCMNackFecMethod(int64_t lowRttNackThresholdMs,
- int64_t highRttNackThresholdMs);
- virtual ~VCMNackFecMethod();
- virtual bool UpdateParameters(const VCMProtectionParameters* parameters);
- // Get the effective packet loss for ER
- bool EffectivePacketLoss(const VCMProtectionParameters* parameters);
- // Get the protection factors
- bool ProtectionFactor(const VCMProtectionParameters* parameters);
- // Get the max number of frames the FEC is allowed to be based on.
- int MaxFramesFec() const;
- // Turn off the FEC based on low bitrate and other factors.
- bool BitRateTooLowForFec(const VCMProtectionParameters* parameters);
-private:
- int ComputeMaxFramesFec(const VCMProtectionParameters* parameters);
-
- int64_t _lowRttNackMs;
- int64_t _highRttNackMs;
- int _maxFramesFec;
-};
-
-class VCMLossProtectionLogic
-{
-public:
- VCMLossProtectionLogic(int64_t nowMs);
- ~VCMLossProtectionLogic();
-
- // Set the protection method to be used
- //
- // Input:
- // - newMethodType : New requested protection method type. If one
- // is already set, it will be deleted and replaced
- void SetMethod(VCMProtectionMethodEnum newMethodType);
-
- // Update the round-trip time
- //
- // Input:
- // - rtt : Round-trip time in seconds.
- void UpdateRtt(int64_t rtt);
-
- // Update the filtered packet loss.
- //
- // Input:
- // - packetLossEnc : The reported packet loss filtered
- // (max window or average)
- void UpdateFilteredLossPr(uint8_t packetLossEnc);
-
- // Update the current target bit rate.
- //
- // Input:
- // - bitRate : The current target bit rate in kbits/s
- void UpdateBitRate(float bitRate);
-
- // Update the number of packets per frame estimate, for delta frames
- //
- // Input:
- // - nPackets : Number of packets in the latest sent frame.
- void UpdatePacketsPerFrame(float nPackets, int64_t nowMs);
-
- // Update the number of packets per frame estimate, for key frames
- //
- // Input:
- // - nPackets : umber of packets in the latest sent frame.
- void UpdatePacketsPerFrameKey(float nPackets, int64_t nowMs);
-
- // Update the keyFrameSize estimate
- //
- // Input:
- // - keyFrameSize : The size of the latest sent key frame.
- void UpdateKeyFrameSize(float keyFrameSize);
-
- // Update the frame rate
- //
- // Input:
- // - frameRate : The current target frame rate.
- void UpdateFrameRate(float frameRate) { _frameRate = frameRate; }
-
- // Update the frame size
- //
- // Input:
- // - width : The codec frame width.
- // - height : The codec frame height.
- void UpdateFrameSize(uint16_t width, uint16_t height);
-
- // Update the number of active layers
- //
- // Input:
- // - numLayers : Number of layers used.
- void UpdateNumLayers(int numLayers);
-
- // The amount of packet loss to cover for with FEC.
- //
- // Input:
- // - fecRateKey : Packet loss to cover for with FEC when
- // sending key frames.
- // - fecRateDelta : Packet loss to cover for with FEC when
- // sending delta frames.
- void UpdateFECRates(uint8_t fecRateKey, uint8_t fecRateDelta)
- { _fecRateKey = fecRateKey;
- _fecRateDelta = fecRateDelta; }
-
- // Update the protection methods with the current VCMProtectionParameters
- // and set the requested protection settings.
- // Return value : Returns true on update
- bool UpdateMethod();
-
- // Returns the method currently selected.
- //
- // Return value : The protection method currently selected.
- VCMProtectionMethod* SelectedMethod() const;
-
- // Return the protection type of the currently selected method
- VCMProtectionMethodEnum SelectedType() const;
-
- // Updates the filtered loss for the average and max window packet loss,
- // and returns the filtered loss probability in the interval [0, 255].
- // The returned filtered loss value depends on the parameter |filter_mode|.
- // The input parameter |lossPr255| is the received packet loss.
-
- // Return value : The filtered loss probability
- uint8_t FilteredLoss(int64_t nowMs, FilterPacketLossMode filter_mode,
- uint8_t lossPr255);
-
- void Reset(int64_t nowMs);
-
- void Release();
-
-private:
- // Sets the available loss protection methods.
- void UpdateMaxLossHistory(uint8_t lossPr255, int64_t now);
- uint8_t MaxFilteredLossPr(int64_t nowMs) const;
- rtc::scoped_ptr<VCMProtectionMethod> _selectedMethod;
- VCMProtectionParameters _currentParameters;
- int64_t _rtt;
- float _lossPr;
- float _bitRate;
- float _frameRate;
- float _keyFrameSize;
- uint8_t _fecRateKey;
- uint8_t _fecRateDelta;
- int64_t _lastPrUpdateT;
- int64_t _lastPacketPerFrameUpdateT;
- int64_t _lastPacketPerFrameUpdateTKey;
- rtc::ExpFilter _lossPr255;
- VCMLossProbabilitySample _lossPrHistory[kLossPrHistorySize];
- uint8_t _shortMaxLossPr255;
- rtc::ExpFilter _packetsPerFrame;
- rtc::ExpFilter _packetsPerFrameKey;
- uint16_t _codecWidth;
- uint16_t _codecHeight;
- int _numLayers;
-};
-
-} // namespace media_optimization
-} // namespace webrtc
-
-#endif // WEBRTC_MODULES_VIDEO_CODING_MEDIA_OPT_UTIL_H_
diff --git a/webrtc/modules/video_coding/main/source/nack_fec_tables.h b/webrtc/modules/video_coding/main/source/nack_fec_tables.h
deleted file mode 100644
index b82bb1b4ba..0000000000
--- a/webrtc/modules/video_coding/main/source/nack_fec_tables.h
+++ /dev/null
@@ -1,126 +0,0 @@
-/*
- * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
- *
- * Use of this source code is governed by a BSD-style license
- * that can be found in the LICENSE file in the root of the source
- * tree. An additional intellectual property rights grant can be found
- * in the file PATENTS. All contributing project authors may
- * be found in the AUTHORS file in the root of the source tree.
- */
-
-#ifndef WEBRTC_MODULES_VIDEO_CODING_SOURCE_NACK_FEC_TABLES_H_
-#define WEBRTC_MODULES_VIDEO_CODING_SOURCE_NACK_FEC_TABLES_H_
-
-namespace webrtc
-{
-
-// Table for adjusting FEC rate for NACK/FEC protection method
-// Table values are built as a sigmoid function, ranging from 0 to 100, based on
-// the HybridNackTH values defined in media_opt_util.h.
-const uint16_t VCMNackFecTable[100] = {
-0,
-0,
-0,
-0,
-0,
-0,
-0,
-0,
-0,
-0,
-0,
-1,
-1,
-1,
-1,
-1,
-2,
-2,
-2,
-3,
-3,
-4,
-5,
-6,
-7,
-9,
-10,
-12,
-15,
-18,
-21,
-24,
-28,
-32,
-37,
-41,
-46,
-51,
-56,
-61,
-66,
-70,
-74,
-78,
-81,
-84,
-86,
-89,
-90,
-92,
-93,
-95,
-95,
-96,
-97,
-97,
-98,
-98,
-99,
-99,
-99,
-99,
-99,
-99,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-
-};
-
-} // namespace webrtc
-
-#endif // WEBRTC_MODULES_VIDEO_CODING_SOURCE_NACK_FEC_TABLES_H_
diff --git a/webrtc/modules/video_coding/main/source/packet.h b/webrtc/modules/video_coding/main/source/packet.h
deleted file mode 100644
index 80bf532502..0000000000
--- a/webrtc/modules/video_coding/main/source/packet.h
+++ /dev/null
@@ -1,59 +0,0 @@
-/*
- * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
- *
- * Use of this source code is governed by a BSD-style license
- * that can be found in the LICENSE file in the root of the source
- * tree. An additional intellectual property rights grant can be found
- * in the file PATENTS. All contributing project authors may
- * be found in the AUTHORS file in the root of the source tree.
- */
-
-#ifndef WEBRTC_MODULES_VIDEO_CODING_PACKET_H_
-#define WEBRTC_MODULES_VIDEO_CODING_PACKET_H_
-
-#include "webrtc/modules/interface/module_common_types.h"
-#include "webrtc/modules/video_coding/main/source/jitter_buffer_common.h"
-#include "webrtc/typedefs.h"
-
-namespace webrtc {
-
-class VCMPacket {
-public:
- VCMPacket();
- VCMPacket(const uint8_t* ptr,
- const size_t size,
- const WebRtcRTPHeader& rtpHeader);
- VCMPacket(const uint8_t* ptr,
- size_t size,
- uint16_t seqNum,
- uint32_t timestamp,
- bool markerBit);
-
- void Reset();
-
- uint8_t payloadType;
- uint32_t timestamp;
- // NTP time of the capture time in local timebase in milliseconds.
- int64_t ntp_time_ms_;
- uint16_t seqNum;
- const uint8_t* dataPtr;
- size_t sizeBytes;
- bool markerBit;
-
- FrameType frameType;
- VideoCodecType codec;
-
- bool isFirstPacket; // Is this first packet in a frame.
- VCMNaluCompleteness completeNALU; // Default is kNaluIncomplete.
- bool insertStartCode; // True if a start code should be inserted before this
- // packet.
- int width;
- int height;
- RTPVideoHeader codecSpecificHeader;
-
-protected:
- void CopyCodecSpecifics(const RTPVideoHeader& videoHeader);
-};
-
-} // namespace webrtc
-#endif // WEBRTC_MODULES_VIDEO_CODING_PACKET_H_
diff --git a/webrtc/modules/video_coding/main/source/rtt_filter.cc b/webrtc/modules/video_coding/main/source/rtt_filter.cc
deleted file mode 100644
index 5742e8fa89..0000000000
--- a/webrtc/modules/video_coding/main/source/rtt_filter.cc
+++ /dev/null
@@ -1,202 +0,0 @@
-/*
- * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
- *
- * Use of this source code is governed by a BSD-style license
- * that can be found in the LICENSE file in the root of the source
- * tree. An additional intellectual property rights grant can be found
- * in the file PATENTS. All contributing project authors may
- * be found in the AUTHORS file in the root of the source tree.
- */
-
-#include "webrtc/modules/video_coding/main/source/internal_defines.h"
-#include "webrtc/modules/video_coding/main/source/rtt_filter.h"
-
-#include <math.h>
-#include <stdlib.h>
-#include <string.h>
-
-namespace webrtc {
-
-VCMRttFilter::VCMRttFilter()
- : _filtFactMax(35),
- _jumpStdDevs(2.5),
- _driftStdDevs(3.5),
- _detectThreshold(kMaxDriftJumpCount) {
- Reset();
-}
-
-VCMRttFilter&
-VCMRttFilter::operator=(const VCMRttFilter& rhs)
-{
- if (this != &rhs)
- {
- _gotNonZeroUpdate = rhs._gotNonZeroUpdate;
- _avgRtt = rhs._avgRtt;
- _varRtt = rhs._varRtt;
- _maxRtt = rhs._maxRtt;
- _filtFactCount = rhs._filtFactCount;
- _jumpCount = rhs._jumpCount;
- _driftCount = rhs._driftCount;
- memcpy(_jumpBuf, rhs._jumpBuf, sizeof(_jumpBuf));
- memcpy(_driftBuf, rhs._driftBuf, sizeof(_driftBuf));
- }
- return *this;
-}
-
-void
-VCMRttFilter::Reset()
-{
- _gotNonZeroUpdate = false;
- _avgRtt = 0;
- _varRtt = 0;
- _maxRtt = 0;
- _filtFactCount = 1;
- _jumpCount = 0;
- _driftCount = 0;
- memset(_jumpBuf, 0, kMaxDriftJumpCount);
- memset(_driftBuf, 0, kMaxDriftJumpCount);
-}
-
-void
-VCMRttFilter::Update(int64_t rttMs)
-{
- if (!_gotNonZeroUpdate)
- {
- if (rttMs == 0)
- {
- return;
- }
- _gotNonZeroUpdate = true;
- }
-
- // Sanity check
- if (rttMs > 3000)
- {
- rttMs = 3000;
- }
-
- double filtFactor = 0;
- if (_filtFactCount > 1)
- {
- filtFactor = static_cast<double>(_filtFactCount - 1) / _filtFactCount;
- }
- _filtFactCount++;
- if (_filtFactCount > _filtFactMax)
- {
- // This prevents filtFactor from going above
- // (_filtFactMax - 1) / _filtFactMax,
- // e.g., _filtFactMax = 50 => filtFactor = 49/50 = 0.98
- _filtFactCount = _filtFactMax;
- }
- double oldAvg = _avgRtt;
- double oldVar = _varRtt;
- _avgRtt = filtFactor * _avgRtt + (1 - filtFactor) * rttMs;
- _varRtt = filtFactor * _varRtt + (1 - filtFactor) *
- (rttMs - _avgRtt) * (rttMs - _avgRtt);
- _maxRtt = VCM_MAX(rttMs, _maxRtt);
- if (!JumpDetection(rttMs) || !DriftDetection(rttMs))
- {
- // In some cases we don't want to update the statistics
- _avgRtt = oldAvg;
- _varRtt = oldVar;
- }
-}
-
-bool
-VCMRttFilter::JumpDetection(int64_t rttMs)
-{
- double diffFromAvg = _avgRtt - rttMs;
- if (fabs(diffFromAvg) > _jumpStdDevs * sqrt(_varRtt))
- {
- int diffSign = (diffFromAvg >= 0) ? 1 : -1;
- int jumpCountSign = (_jumpCount >= 0) ? 1 : -1;
- if (diffSign != jumpCountSign)
- {
- // Since the signs differ the samples currently
- // in the buffer is useless as they represent a
- // jump in a different direction.
- _jumpCount = 0;
- }
- if (abs(_jumpCount) < kMaxDriftJumpCount)
- {
- // Update the buffer used for the short time
- // statistics.
- // The sign of the diff is used for updating the counter since
- // we want to use the same buffer for keeping track of when
- // the RTT jumps down and up.
- _jumpBuf[abs(_jumpCount)] = rttMs;
- _jumpCount += diffSign;
- }
- if (abs(_jumpCount) >= _detectThreshold)
- {
- // Detected an RTT jump
- ShortRttFilter(_jumpBuf, abs(_jumpCount));
- _filtFactCount = _detectThreshold + 1;
- _jumpCount = 0;
- }
- else
- {
- return false;
- }
- }
- else
- {
- _jumpCount = 0;
- }
- return true;
-}
-
-bool
-VCMRttFilter::DriftDetection(int64_t rttMs)
-{
- if (_maxRtt - _avgRtt > _driftStdDevs * sqrt(_varRtt))
- {
- if (_driftCount < kMaxDriftJumpCount)
- {
- // Update the buffer used for the short time
- // statistics.
- _driftBuf[_driftCount] = rttMs;
- _driftCount++;
- }
- if (_driftCount >= _detectThreshold)
- {
- // Detected an RTT drift
- ShortRttFilter(_driftBuf, _driftCount);
- _filtFactCount = _detectThreshold + 1;
- _driftCount = 0;
- }
- }
- else
- {
- _driftCount = 0;
- }
- return true;
-}
-
-void
-VCMRttFilter::ShortRttFilter(int64_t* buf, uint32_t length)
-{
- if (length == 0)
- {
- return;
- }
- _maxRtt = 0;
- _avgRtt = 0;
- for (uint32_t i=0; i < length; i++)
- {
- if (buf[i] > _maxRtt)
- {
- _maxRtt = buf[i];
- }
- _avgRtt += buf[i];
- }
- _avgRtt = _avgRtt / static_cast<double>(length);
-}
-
-int64_t
-VCMRttFilter::RttMs() const
-{
- return static_cast<int64_t>(_maxRtt + 0.5);
-}
-
-}
diff --git a/webrtc/modules/video_coding/main/source/rtt_filter.h b/webrtc/modules/video_coding/main/source/rtt_filter.h
deleted file mode 100644
index 9e14a1ab39..0000000000
--- a/webrtc/modules/video_coding/main/source/rtt_filter.h
+++ /dev/null
@@ -1,68 +0,0 @@
-/*
- * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
- *
- * Use of this source code is governed by a BSD-style license
- * that can be found in the LICENSE file in the root of the source
- * tree. An additional intellectual property rights grant can be found
- * in the file PATENTS. All contributing project authors may
- * be found in the AUTHORS file in the root of the source tree.
- */
-
-#ifndef WEBRTC_MODULES_VIDEO_CODING_RTT_FILTER_H_
-#define WEBRTC_MODULES_VIDEO_CODING_RTT_FILTER_H_
-
-#include "webrtc/typedefs.h"
-
-namespace webrtc
-{
-
-class VCMRttFilter
-{
-public:
- VCMRttFilter();
-
- VCMRttFilter& operator=(const VCMRttFilter& rhs);
-
- // Resets the filter.
- void Reset();
- // Updates the filter with a new sample.
- void Update(int64_t rttMs);
- // A getter function for the current RTT level in ms.
- int64_t RttMs() const;
-
-private:
- // The size of the drift and jump memory buffers
- // and thus also the detection threshold for these
- // detectors in number of samples.
- enum { kMaxDriftJumpCount = 5 };
- // Detects RTT jumps by comparing the difference between
- // samples and average to the standard deviation.
- // Returns true if the long time statistics should be updated
- // and false otherwise
- bool JumpDetection(int64_t rttMs);
- // Detects RTT drifts by comparing the difference between
- // max and average to the standard deviation.
- // Returns true if the long time statistics should be updated
- // and false otherwise
- bool DriftDetection(int64_t rttMs);
- // Computes the short time average and maximum of the vector buf.
- void ShortRttFilter(int64_t* buf, uint32_t length);
-
- bool _gotNonZeroUpdate;
- double _avgRtt;
- double _varRtt;
- int64_t _maxRtt;
- uint32_t _filtFactCount;
- const uint32_t _filtFactMax;
- const double _jumpStdDevs;
- const double _driftStdDevs;
- int32_t _jumpCount;
- int32_t _driftCount;
- const int32_t _detectThreshold;
- int64_t _jumpBuf[kMaxDriftJumpCount];
- int64_t _driftBuf[kMaxDriftJumpCount];
-};
-
-} // namespace webrtc
-
-#endif // WEBRTC_MODULES_VIDEO_CODING_RTT_FILTER_H_
diff --git a/webrtc/modules/video_coding/main/test/video_source.h b/webrtc/modules/video_coding/main/test/video_source.h
deleted file mode 100644
index 05deb4a39b..0000000000
--- a/webrtc/modules/video_coding/main/test/video_source.h
+++ /dev/null
@@ -1,82 +0,0 @@
-/*
- * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
- *
- * Use of this source code is governed by a BSD-style license
- * that can be found in the LICENSE file in the root of the source
- * tree. An additional intellectual property rights grant can be found
- * in the file PATENTS. All contributing project authors may
- * be found in the AUTHORS file in the root of the source tree.
- */
-
-#ifndef WEBRTC_MODULES_VIDEO_CODING_TEST_VIDEO_SOURCE_H_
-#define WEBRTC_MODULES_VIDEO_CODING_TEST_VIDEO_SOURCE_H_
-
-#include "webrtc/common_video/libyuv/include/webrtc_libyuv.h"
-#include "webrtc/typedefs.h"
-
-#include <string>
-
-enum VideoSize
- {
- kUndefined,
- kSQCIF, // 128*96 = 12 288
- kQQVGA, // 160*120 = 19 200
- kQCIF, // 176*144 = 25 344
- kCGA, // 320*200 = 64 000
- kQVGA, // 320*240 = 76 800
- kSIF, // 352*240 = 84 480
- kWQVGA, // 400*240 = 96 000
- kCIF, // 352*288 = 101 376
- kW288p, // 512*288 = 147 456 (WCIF)
- k448p, // 576*448 = 281 088
- kVGA, // 640*480 = 307 200
- k432p, // 720*432 = 311 040
- kW432p, // 768*432 = 331 776
- k4SIF, // 704*480 = 337 920
- kW448p, // 768*448 = 344 064
- kNTSC, // 720*480 = 345 600
- kFW448p, // 800*448 = 358 400
- kWVGA, // 800*480 = 384 000
- k4CIF, // 704*576 = 405 504
- kSVGA, // 800*600 = 480 000
- kW544p, // 960*544 = 522 240
- kW576p, // 1024*576 = 589 824 (W4CIF)
- kHD, // 960*720 = 691 200
- kXGA, // 1024*768 = 786 432
- kWHD, // 1280*720 = 921 600
- kFullHD, // 1440*1080 = 1 555 200
- kWFullHD, // 1920*1080 = 2 073 600
-
- kNumberOfVideoSizes
- };
-
-
-class VideoSource
-{
-public:
- VideoSource();
- VideoSource(std::string fileName, VideoSize size, float frameRate, webrtc::VideoType type = webrtc::kI420);
- VideoSource(std::string fileName, uint16_t width, uint16_t height,
- float frameRate = 30, webrtc::VideoType type = webrtc::kI420);
-
- std::string GetFileName() const { return _fileName; }
- uint16_t GetWidth() const { return _width; }
- uint16_t GetHeight() const { return _height; }
- webrtc::VideoType GetType() const { return _type; }
- float GetFrameRate() const { return _frameRate; }
- int GetWidthHeight( VideoSize size);
-
- // Returns the filename with the path (including the leading slash) removed.
- std::string GetName() const;
-
- size_t GetFrameLength() const;
-
-private:
- std::string _fileName;
- uint16_t _width;
- uint16_t _height;
- webrtc::VideoType _type;
- float _frameRate;
-};
-
-#endif // WEBRTC_MODULES_VIDEO_CODING_TEST_VIDEO_SOURCE_H_
diff --git a/webrtc/modules/video_coding/media_opt_util.cc b/webrtc/modules/video_coding/media_opt_util.cc
new file mode 100644
index 0000000000..d57e9c8dd2
--- /dev/null
+++ b/webrtc/modules/video_coding/media_opt_util.cc
@@ -0,0 +1,682 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "webrtc/modules/video_coding/media_opt_util.h"
+
+#include <float.h>
+#include <limits.h>
+#include <math.h>
+
+#include <algorithm>
+
+#include "webrtc/modules/include/module_common_types.h"
+#include "webrtc/modules/video_coding/codecs/vp8/include/vp8_common_types.h"
+#include "webrtc/modules/video_coding/include/video_coding_defines.h"
+#include "webrtc/modules/video_coding/fec_tables_xor.h"
+#include "webrtc/modules/video_coding/nack_fec_tables.h"
+
+namespace webrtc {
+// Max value of loss rates in off-line model
+static const int kPacketLossMax = 129;
+
+namespace media_optimization {
+
+VCMProtectionMethod::VCMProtectionMethod()
+ : _effectivePacketLoss(0),
+ _protectionFactorK(0),
+ _protectionFactorD(0),
+ _scaleProtKey(2.0f),
+ _maxPayloadSize(1460),
+ _qmRobustness(new VCMQmRobustness()),
+ _useUepProtectionK(false),
+ _useUepProtectionD(true),
+ _corrFecCost(1.0),
+ _type(kNone) {}
+
+VCMProtectionMethod::~VCMProtectionMethod() {
+ delete _qmRobustness;
+}
+void VCMProtectionMethod::UpdateContentMetrics(
+ const VideoContentMetrics* contentMetrics) {
+ _qmRobustness->UpdateContent(contentMetrics);
+}
+
+VCMNackFecMethod::VCMNackFecMethod(int64_t lowRttNackThresholdMs,
+ int64_t highRttNackThresholdMs)
+ : VCMFecMethod(),
+ _lowRttNackMs(lowRttNackThresholdMs),
+ _highRttNackMs(highRttNackThresholdMs),
+ _maxFramesFec(1) {
+ assert(lowRttNackThresholdMs >= -1 && highRttNackThresholdMs >= -1);
+ assert(highRttNackThresholdMs == -1 ||
+ lowRttNackThresholdMs <= highRttNackThresholdMs);
+ assert(lowRttNackThresholdMs > -1 || highRttNackThresholdMs == -1);
+ _type = kNackFec;
+}
+
+VCMNackFecMethod::~VCMNackFecMethod() {
+ //
+}
+bool VCMNackFecMethod::ProtectionFactor(
+ const VCMProtectionParameters* parameters) {
+ // Hybrid Nack FEC has three operational modes:
+ // 1. Low RTT (below kLowRttNackMs) - Nack only: Set FEC rate
+ // (_protectionFactorD) to zero. -1 means no FEC.
+ // 2. High RTT (above _highRttNackMs) - FEC Only: Keep FEC factors.
+ // -1 means always allow NACK.
+ // 3. Medium RTT values - Hybrid mode: We will only nack the
+ // residual following the decoding of the FEC (refer to JB logic). FEC
+ // delta protection factor will be adjusted based on the RTT.
+
+ // Otherwise: we count on FEC; if the RTT is below a threshold, then we
+ // nack the residual, based on a decision made in the JB.
+
+ // Compute the protection factors
+ VCMFecMethod::ProtectionFactor(parameters);
+ if (_lowRttNackMs == -1 || parameters->rtt < _lowRttNackMs) {
+ _protectionFactorD = 0;
+ VCMFecMethod::UpdateProtectionFactorD(_protectionFactorD);
+
+ // When in Hybrid mode (RTT range), adjust FEC rates based on the
+ // RTT (NACK effectiveness) - adjustment factor is in the range [0,1].
+ } else if (_highRttNackMs == -1 || parameters->rtt < _highRttNackMs) {
+ // TODO(mikhal): Disabling adjustment temporarily.
+ // uint16_t rttIndex = (uint16_t) parameters->rtt;
+ float adjustRtt = 1.0f; // (float)VCMNackFecTable[rttIndex] / 100.0f;
+
+ // Adjust FEC with NACK on (for delta frame only)
+ // table depends on RTT relative to rttMax (NACK Threshold)
+ _protectionFactorD = static_cast<uint8_t>(
+ adjustRtt * static_cast<float>(_protectionFactorD));
+ // update FEC rates after applying adjustment
+ VCMFecMethod::UpdateProtectionFactorD(_protectionFactorD);
+ }
+
+ return true;
+}
+
+int VCMNackFecMethod::ComputeMaxFramesFec(
+ const VCMProtectionParameters* parameters) {
+ if (parameters->numLayers > 2) {
+ // For more than 2 temporal layers we will only have FEC on the base layer,
+ // and the base layers will be pretty far apart. Therefore we force one
+ // frame FEC.
+ return 1;
+ }
+ // We set the max number of frames to base the FEC on so that on average
+ // we will have complete frames in one RTT. Note that this is an upper
+ // bound, and that the actual number of frames used for FEC is decided by the
+ // RTP module based on the actual number of packets and the protection factor.
+ float base_layer_framerate =
+ parameters->frameRate /
+ static_cast<float>(1 << (parameters->numLayers - 1));
+ int max_frames_fec = std::max(
+ static_cast<int>(2.0f * base_layer_framerate * parameters->rtt / 1000.0f +
+ 0.5f),
+ 1);
+ // |kUpperLimitFramesFec| is the upper limit on how many frames we
+ // allow any FEC to be based on.
+ if (max_frames_fec > kUpperLimitFramesFec) {
+ max_frames_fec = kUpperLimitFramesFec;
+ }
+ return max_frames_fec;
+}
+
+int VCMNackFecMethod::MaxFramesFec() const {
+ return _maxFramesFec;
+}
+
+bool VCMNackFecMethod::BitRateTooLowForFec(
+ const VCMProtectionParameters* parameters) {
+ // Bitrate below which we turn off FEC, regardless of reported packet loss.
+ // The condition should depend on resolution and content. For now, use
+ // threshold on bytes per frame, with some effect for the frame size.
+ // The condition for turning off FEC is also based on other factors,
+ // such as |_numLayers|, |_maxFramesFec|, and |_rtt|.
+ int estimate_bytes_per_frame = 1000 * BitsPerFrame(parameters) / 8;
+ int max_bytes_per_frame = kMaxBytesPerFrameForFec;
+ int num_pixels = parameters->codecWidth * parameters->codecHeight;
+ if (num_pixels <= 352 * 288) {
+ max_bytes_per_frame = kMaxBytesPerFrameForFecLow;
+ } else if (num_pixels > 640 * 480) {
+ max_bytes_per_frame = kMaxBytesPerFrameForFecHigh;
+ }
+ // TODO(marpan): add condition based on maximum frames used for FEC,
+ // and expand condition based on frame size.
+ // Max round trip time threshold in ms.
+ const int64_t kMaxRttTurnOffFec = 200;
+ if (estimate_bytes_per_frame < max_bytes_per_frame &&
+ parameters->numLayers < 3 && parameters->rtt < kMaxRttTurnOffFec) {
+ return true;
+ }
+ return false;
+}
+
+bool VCMNackFecMethod::EffectivePacketLoss(
+ const VCMProtectionParameters* parameters) {
+ // Set the effective packet loss for encoder (based on FEC code).
+ // Compute the effective packet loss and residual packet loss due to FEC.
+ VCMFecMethod::EffectivePacketLoss(parameters);
+ return true;
+}
+
+bool VCMNackFecMethod::UpdateParameters(
+ const VCMProtectionParameters* parameters) {
+ ProtectionFactor(parameters);
+ EffectivePacketLoss(parameters);
+ _maxFramesFec = ComputeMaxFramesFec(parameters);
+ if (BitRateTooLowForFec(parameters)) {
+ _protectionFactorK = 0;
+ _protectionFactorD = 0;
+ }
+
+ // Protection/fec rates obtained above are defined relative to total number
+ // of packets (total rate: source + fec) FEC in RTP module assumes
+ // protection factor is defined relative to source number of packets so we
+ // should convert the factor to reduce mismatch between mediaOpt's rate and
+ // the actual one
+ _protectionFactorK = VCMFecMethod::ConvertFECRate(_protectionFactorK);
+ _protectionFactorD = VCMFecMethod::ConvertFECRate(_protectionFactorD);
+
+ return true;
+}
+
+VCMNackMethod::VCMNackMethod() : VCMProtectionMethod() {
+ _type = kNack;
+}
+
+VCMNackMethod::~VCMNackMethod() {
+ //
+}
+
+bool VCMNackMethod::EffectivePacketLoss(
+ const VCMProtectionParameters* parameter) {
+ // Effective Packet Loss, NA in current version.
+ _effectivePacketLoss = 0;
+ return true;
+}
+
+bool VCMNackMethod::UpdateParameters(
+ const VCMProtectionParameters* parameters) {
+ // Compute the effective packet loss
+ EffectivePacketLoss(parameters);
+
+ // nackCost = (bitRate - nackCost) * (lossPr)
+ return true;
+}
+
+VCMFecMethod::VCMFecMethod() : VCMProtectionMethod() {
+ _type = kFec;
+}
+VCMFecMethod::~VCMFecMethod() {
+ //
+}
+
+uint8_t VCMFecMethod::BoostCodeRateKey(uint8_t packetFrameDelta,
+ uint8_t packetFrameKey) const {
+ uint8_t boostRateKey = 2;
+ // Default: ratio scales the FEC protection up for I frames
+ uint8_t ratio = 1;
+
+ if (packetFrameDelta > 0) {
+ ratio = (int8_t)(packetFrameKey / packetFrameDelta);
+ }
+ ratio = VCM_MAX(boostRateKey, ratio);
+
+ return ratio;
+}
+
+uint8_t VCMFecMethod::ConvertFECRate(uint8_t codeRateRTP) const {
+ return static_cast<uint8_t>(VCM_MIN(
+ 255,
+ (0.5 + 255.0 * codeRateRTP / static_cast<float>(255 - codeRateRTP))));
+}
+
+// Update FEC with protectionFactorD
+void VCMFecMethod::UpdateProtectionFactorD(uint8_t protectionFactorD) {
+ _protectionFactorD = protectionFactorD;
+}
+
+// Update FEC with protectionFactorK
+void VCMFecMethod::UpdateProtectionFactorK(uint8_t protectionFactorK) {
+ _protectionFactorK = protectionFactorK;
+}
+
+bool VCMFecMethod::ProtectionFactor(const VCMProtectionParameters* parameters) {
+ // FEC PROTECTION SETTINGS: varies with packet loss and bitrate
+
+ // No protection if (filtered) packetLoss is 0
+ uint8_t packetLoss = (uint8_t)(255 * parameters->lossPr);
+ if (packetLoss == 0) {
+ _protectionFactorK = 0;
+ _protectionFactorD = 0;
+ return true;
+ }
+
+ // Parameters for FEC setting:
+ // first partition size, thresholds, table pars, spatial resoln fac.
+
+ // First partition protection: ~ 20%
+ uint8_t firstPartitionProt = (uint8_t)(255 * 0.20);
+
+ // Minimum protection level needed to generate one FEC packet for one
+ // source packet/frame (in RTP sender)
+ uint8_t minProtLevelFec = 85;
+
+ // Threshold on packetLoss and bitRrate/frameRate (=average #packets),
+ // above which we allocate protection to cover at least first partition.
+ uint8_t lossThr = 0;
+ uint8_t packetNumThr = 1;
+
+ // Parameters for range of rate index of table.
+ const uint8_t ratePar1 = 5;
+ const uint8_t ratePar2 = 49;
+
+ // Spatial resolution size, relative to a reference size.
+ float spatialSizeToRef =
+ static_cast<float>(parameters->codecWidth * parameters->codecHeight) /
+ (static_cast<float>(704 * 576));
+ // resolnFac: This parameter will generally increase/decrease the FEC rate
+ // (for fixed bitRate and packetLoss) based on system size.
+ // Use a smaller exponent (< 1) to control/soften system size effect.
+ const float resolnFac = 1.0 / powf(spatialSizeToRef, 0.3f);
+
+ const int bitRatePerFrame = BitsPerFrame(parameters);
+
+ // Average number of packets per frame (source and fec):
+ const uint8_t avgTotPackets =
+ 1 + (uint8_t)(static_cast<float>(bitRatePerFrame) * 1000.0 /
+ static_cast<float>(8.0 * _maxPayloadSize) +
+ 0.5);
+
+ // FEC rate parameters: for P and I frame
+ uint8_t codeRateDelta = 0;
+ uint8_t codeRateKey = 0;
+
+ // Get index for table: the FEC protection depends on an effective rate.
+ // The range on the rate index corresponds to rates (bps)
+ // from ~200k to ~8000k, for 30fps
+ const uint16_t effRateFecTable =
+ static_cast<uint16_t>(resolnFac * bitRatePerFrame);
+ uint8_t rateIndexTable = (uint8_t)VCM_MAX(
+ VCM_MIN((effRateFecTable - ratePar1) / ratePar1, ratePar2), 0);
+
+ // Restrict packet loss range to 50:
+ // current tables defined only up to 50%
+ if (packetLoss >= kPacketLossMax) {
+ packetLoss = kPacketLossMax - 1;
+ }
+ uint16_t indexTable = rateIndexTable * kPacketLossMax + packetLoss;
+
+ // Check on table index
+ assert(indexTable < kSizeCodeRateXORTable);
+
+ // Protection factor for P frame
+ codeRateDelta = kCodeRateXORTable[indexTable];
+
+ if (packetLoss > lossThr && avgTotPackets > packetNumThr) {
+ // Set a minimum based on first partition size.
+ if (codeRateDelta < firstPartitionProt) {
+ codeRateDelta = firstPartitionProt;
+ }
+ }
+
+ // Check limit on amount of protection for P frame; 50% is max.
+ if (codeRateDelta >= kPacketLossMax) {
+ codeRateDelta = kPacketLossMax - 1;
+ }
+
+ float adjustFec = 1.0f;
+ // Avoid additional adjustments when layers are active.
+ // TODO(mikhal/marco): Update adjusmtent based on layer info.
+ if (parameters->numLayers == 1) {
+ adjustFec = _qmRobustness->AdjustFecFactor(
+ codeRateDelta, parameters->bitRate, parameters->frameRate,
+ parameters->rtt, packetLoss);
+ }
+
+ codeRateDelta = static_cast<uint8_t>(codeRateDelta * adjustFec);
+
+ // For Key frame:
+ // Effectively at a higher rate, so we scale/boost the rate
+ // The boost factor may depend on several factors: ratio of packet
+ // number of I to P frames, how much protection placed on P frames, etc.
+ const uint8_t packetFrameDelta = (uint8_t)(0.5 + parameters->packetsPerFrame);
+ const uint8_t packetFrameKey =
+ (uint8_t)(0.5 + parameters->packetsPerFrameKey);
+ const uint8_t boostKey = BoostCodeRateKey(packetFrameDelta, packetFrameKey);
+
+ rateIndexTable = (uint8_t)VCM_MAX(
+ VCM_MIN(1 + (boostKey * effRateFecTable - ratePar1) / ratePar1, ratePar2),
+ 0);
+ uint16_t indexTableKey = rateIndexTable * kPacketLossMax + packetLoss;
+
+ indexTableKey = VCM_MIN(indexTableKey, kSizeCodeRateXORTable);
+
+ // Check on table index
+ assert(indexTableKey < kSizeCodeRateXORTable);
+
+ // Protection factor for I frame
+ codeRateKey = kCodeRateXORTable[indexTableKey];
+
+ // Boosting for Key frame.
+ int boostKeyProt = _scaleProtKey * codeRateDelta;
+ if (boostKeyProt >= kPacketLossMax) {
+ boostKeyProt = kPacketLossMax - 1;
+ }
+
+ // Make sure I frame protection is at least larger than P frame protection,
+ // and at least as high as filtered packet loss.
+ codeRateKey = static_cast<uint8_t>(
+ VCM_MAX(packetLoss, VCM_MAX(boostKeyProt, codeRateKey)));
+
+ // Check limit on amount of protection for I frame: 50% is max.
+ if (codeRateKey >= kPacketLossMax) {
+ codeRateKey = kPacketLossMax - 1;
+ }
+
+ _protectionFactorK = codeRateKey;
+ _protectionFactorD = codeRateDelta;
+
+ // Generally there is a rate mis-match between the FEC cost estimated
+ // in mediaOpt and the actual FEC cost sent out in RTP module.
+ // This is more significant at low rates (small # of source packets), where
+ // the granularity of the FEC decreases. In this case, non-zero protection
+ // in mediaOpt may generate 0 FEC packets in RTP sender (since actual #FEC
+ // is based on rounding off protectionFactor on actual source packet number).
+ // The correction factor (_corrFecCost) attempts to corrects this, at least
+ // for cases of low rates (small #packets) and low protection levels.
+
+ float numPacketsFl = 1.0f + (static_cast<float>(bitRatePerFrame) * 1000.0 /
+ static_cast<float>(8.0 * _maxPayloadSize) +
+ 0.5);
+
+ const float estNumFecGen =
+ 0.5f + static_cast<float>(_protectionFactorD * numPacketsFl / 255.0f);
+
+ // We reduce cost factor (which will reduce overhead for FEC and
+ // hybrid method) and not the protectionFactor.
+ _corrFecCost = 1.0f;
+ if (estNumFecGen < 1.1f && _protectionFactorD < minProtLevelFec) {
+ _corrFecCost = 0.5f;
+ }
+ if (estNumFecGen < 0.9f && _protectionFactorD < minProtLevelFec) {
+ _corrFecCost = 0.0f;
+ }
+
+ // TODO(marpan): Set the UEP protection on/off for Key and Delta frames
+ _useUepProtectionK = _qmRobustness->SetUepProtection(
+ codeRateKey, parameters->bitRate, packetLoss, 0);
+
+ _useUepProtectionD = _qmRobustness->SetUepProtection(
+ codeRateDelta, parameters->bitRate, packetLoss, 1);
+
+ // DONE WITH FEC PROTECTION SETTINGS
+ return true;
+}
+
+int VCMFecMethod::BitsPerFrame(const VCMProtectionParameters* parameters) {
+ // When temporal layers are available FEC will only be applied on the base
+ // layer.
+ const float bitRateRatio =
+ kVp8LayerRateAlloction[parameters->numLayers - 1][0];
+ float frameRateRatio = powf(1 / 2.0, parameters->numLayers - 1);
+ float bitRate = parameters->bitRate * bitRateRatio;
+ float frameRate = parameters->frameRate * frameRateRatio;
+
+ // TODO(mikhal): Update factor following testing.
+ float adjustmentFactor = 1;
+
+ // Average bits per frame (units of kbits)
+ return static_cast<int>(adjustmentFactor * bitRate / frameRate);
+}
+
+bool VCMFecMethod::EffectivePacketLoss(
+ const VCMProtectionParameters* parameters) {
+ // Effective packet loss to encoder is based on RPL (residual packet loss)
+ // this is a soft setting based on degree of FEC protection
+ // RPL = received/input packet loss - average_FEC_recovery
+ // note: received/input packet loss may be filtered based on FilteredLoss
+
+ // Effective Packet Loss, NA in current version.
+ _effectivePacketLoss = 0;
+
+ return true;
+}
+
+bool VCMFecMethod::UpdateParameters(const VCMProtectionParameters* parameters) {
+ // Compute the protection factor
+ ProtectionFactor(parameters);
+
+ // Compute the effective packet loss
+ EffectivePacketLoss(parameters);
+
+ // Protection/fec rates obtained above is defined relative to total number
+ // of packets (total rate: source+fec) FEC in RTP module assumes protection
+ // factor is defined relative to source number of packets so we should
+ // convert the factor to reduce mismatch between mediaOpt suggested rate and
+ // the actual rate
+ _protectionFactorK = ConvertFECRate(_protectionFactorK);
+ _protectionFactorD = ConvertFECRate(_protectionFactorD);
+
+ return true;
+}
+VCMLossProtectionLogic::VCMLossProtectionLogic(int64_t nowMs)
+ : _currentParameters(),
+ _rtt(0),
+ _lossPr(0.0f),
+ _bitRate(0.0f),
+ _frameRate(0.0f),
+ _keyFrameSize(0.0f),
+ _fecRateKey(0),
+ _fecRateDelta(0),
+ _lastPrUpdateT(0),
+ _lossPr255(0.9999f),
+ _lossPrHistory(),
+ _shortMaxLossPr255(0),
+ _packetsPerFrame(0.9999f),
+ _packetsPerFrameKey(0.9999f),
+ _codecWidth(0),
+ _codecHeight(0),
+ _numLayers(1) {
+ Reset(nowMs);
+}
+
+VCMLossProtectionLogic::~VCMLossProtectionLogic() {
+ Release();
+}
+
+void VCMLossProtectionLogic::SetMethod(
+ enum VCMProtectionMethodEnum newMethodType) {
+ if (_selectedMethod && _selectedMethod->Type() == newMethodType)
+ return;
+
+ switch (newMethodType) {
+ case kNack:
+ _selectedMethod.reset(new VCMNackMethod());
+ break;
+ case kFec:
+ _selectedMethod.reset(new VCMFecMethod());
+ break;
+ case kNackFec:
+ _selectedMethod.reset(new VCMNackFecMethod(kLowRttNackMs, -1));
+ break;
+ case kNone:
+ _selectedMethod.reset();
+ break;
+ }
+ UpdateMethod();
+}
+
+void VCMLossProtectionLogic::UpdateRtt(int64_t rtt) {
+ _rtt = rtt;
+}
+
+void VCMLossProtectionLogic::UpdateMaxLossHistory(uint8_t lossPr255,
+ int64_t now) {
+ if (_lossPrHistory[0].timeMs >= 0 &&
+ now - _lossPrHistory[0].timeMs < kLossPrShortFilterWinMs) {
+ if (lossPr255 > _shortMaxLossPr255) {
+ _shortMaxLossPr255 = lossPr255;
+ }
+ } else {
+ // Only add a new value to the history once a second
+ if (_lossPrHistory[0].timeMs == -1) {
+ // First, no shift
+ _shortMaxLossPr255 = lossPr255;
+ } else {
+ // Shift
+ for (int32_t i = (kLossPrHistorySize - 2); i >= 0; i--) {
+ _lossPrHistory[i + 1].lossPr255 = _lossPrHistory[i].lossPr255;
+ _lossPrHistory[i + 1].timeMs = _lossPrHistory[i].timeMs;
+ }
+ }
+ if (_shortMaxLossPr255 == 0) {
+ _shortMaxLossPr255 = lossPr255;
+ }
+
+ _lossPrHistory[0].lossPr255 = _shortMaxLossPr255;
+ _lossPrHistory[0].timeMs = now;
+ _shortMaxLossPr255 = 0;
+ }
+}
+
+uint8_t VCMLossProtectionLogic::MaxFilteredLossPr(int64_t nowMs) const {
+ uint8_t maxFound = _shortMaxLossPr255;
+ if (_lossPrHistory[0].timeMs == -1) {
+ return maxFound;
+ }
+ for (int32_t i = 0; i < kLossPrHistorySize; i++) {
+ if (_lossPrHistory[i].timeMs == -1) {
+ break;
+ }
+ if (nowMs - _lossPrHistory[i].timeMs >
+ kLossPrHistorySize * kLossPrShortFilterWinMs) {
+ // This sample (and all samples after this) is too old
+ break;
+ }
+ if (_lossPrHistory[i].lossPr255 > maxFound) {
+ // This sample is the largest one this far into the history
+ maxFound = _lossPrHistory[i].lossPr255;
+ }
+ }
+ return maxFound;
+}
+
+uint8_t VCMLossProtectionLogic::FilteredLoss(int64_t nowMs,
+ FilterPacketLossMode filter_mode,
+ uint8_t lossPr255) {
+ // Update the max window filter.
+ UpdateMaxLossHistory(lossPr255, nowMs);
+
+ // Update the recursive average filter.
+ _lossPr255.Apply(static_cast<float>(nowMs - _lastPrUpdateT),
+ static_cast<float>(lossPr255));
+ _lastPrUpdateT = nowMs;
+
+ // Filtered loss: default is received loss (no filtering).
+ uint8_t filtered_loss = lossPr255;
+
+ switch (filter_mode) {
+ case kNoFilter:
+ break;
+ case kAvgFilter:
+ filtered_loss = static_cast<uint8_t>(_lossPr255.filtered() + 0.5);
+ break;
+ case kMaxFilter:
+ filtered_loss = MaxFilteredLossPr(nowMs);
+ break;
+ }
+
+ return filtered_loss;
+}
+
+void VCMLossProtectionLogic::UpdateFilteredLossPr(uint8_t packetLossEnc) {
+ _lossPr = static_cast<float>(packetLossEnc) / 255.0;
+}
+
+void VCMLossProtectionLogic::UpdateBitRate(float bitRate) {
+ _bitRate = bitRate;
+}
+
+void VCMLossProtectionLogic::UpdatePacketsPerFrame(float nPackets,
+ int64_t nowMs) {
+ _packetsPerFrame.Apply(static_cast<float>(nowMs - _lastPacketPerFrameUpdateT),
+ nPackets);
+ _lastPacketPerFrameUpdateT = nowMs;
+}
+
+void VCMLossProtectionLogic::UpdatePacketsPerFrameKey(float nPackets,
+ int64_t nowMs) {
+ _packetsPerFrameKey.Apply(
+ static_cast<float>(nowMs - _lastPacketPerFrameUpdateTKey), nPackets);
+ _lastPacketPerFrameUpdateTKey = nowMs;
+}
+
+void VCMLossProtectionLogic::UpdateKeyFrameSize(float keyFrameSize) {
+ _keyFrameSize = keyFrameSize;
+}
+
+void VCMLossProtectionLogic::UpdateFrameSize(uint16_t width, uint16_t height) {
+ _codecWidth = width;
+ _codecHeight = height;
+}
+
+void VCMLossProtectionLogic::UpdateNumLayers(int numLayers) {
+ _numLayers = (numLayers == 0) ? 1 : numLayers;
+}
+
+bool VCMLossProtectionLogic::UpdateMethod() {
+ if (!_selectedMethod)
+ return false;
+ _currentParameters.rtt = _rtt;
+ _currentParameters.lossPr = _lossPr;
+ _currentParameters.bitRate = _bitRate;
+ _currentParameters.frameRate = _frameRate; // rename actual frame rate?
+ _currentParameters.keyFrameSize = _keyFrameSize;
+ _currentParameters.fecRateDelta = _fecRateDelta;
+ _currentParameters.fecRateKey = _fecRateKey;
+ _currentParameters.packetsPerFrame = _packetsPerFrame.filtered();
+ _currentParameters.packetsPerFrameKey = _packetsPerFrameKey.filtered();
+ _currentParameters.codecWidth = _codecWidth;
+ _currentParameters.codecHeight = _codecHeight;
+ _currentParameters.numLayers = _numLayers;
+ return _selectedMethod->UpdateParameters(&_currentParameters);
+}
+
+VCMProtectionMethod* VCMLossProtectionLogic::SelectedMethod() const {
+ return _selectedMethod.get();
+}
+
+VCMProtectionMethodEnum VCMLossProtectionLogic::SelectedType() const {
+ return _selectedMethod ? _selectedMethod->Type() : kNone;
+}
+
+void VCMLossProtectionLogic::Reset(int64_t nowMs) {
+ _lastPrUpdateT = nowMs;
+ _lastPacketPerFrameUpdateT = nowMs;
+ _lastPacketPerFrameUpdateTKey = nowMs;
+ _lossPr255.Reset(0.9999f);
+ _packetsPerFrame.Reset(0.9999f);
+ _fecRateDelta = _fecRateKey = 0;
+ for (int32_t i = 0; i < kLossPrHistorySize; i++) {
+ _lossPrHistory[i].lossPr255 = 0;
+ _lossPrHistory[i].timeMs = -1;
+ }
+ _shortMaxLossPr255 = 0;
+ Release();
+}
+
+void VCMLossProtectionLogic::Release() {
+ _selectedMethod.reset();
+}
+
+} // namespace media_optimization
+} // namespace webrtc
diff --git a/webrtc/modules/video_coding/media_opt_util.h b/webrtc/modules/video_coding/media_opt_util.h
new file mode 100644
index 0000000000..a016a03eab
--- /dev/null
+++ b/webrtc/modules/video_coding/media_opt_util.h
@@ -0,0 +1,361 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef WEBRTC_MODULES_VIDEO_CODING_MEDIA_OPT_UTIL_H_
+#define WEBRTC_MODULES_VIDEO_CODING_MEDIA_OPT_UTIL_H_
+
+#include <math.h>
+#include <stdlib.h>
+
+#include "webrtc/base/exp_filter.h"
+#include "webrtc/base/scoped_ptr.h"
+#include "webrtc/modules/video_coding/internal_defines.h"
+#include "webrtc/modules/video_coding/qm_select.h"
+#include "webrtc/system_wrappers/include/trace.h"
+#include "webrtc/typedefs.h"
+
+namespace webrtc {
+namespace media_optimization {
+
+// Number of time periods used for (max) window filter for packet loss
+// TODO(marpan): set reasonable window size for filtered packet loss,
+// adjustment should be based on logged/real data of loss stats/correlation.
+enum { kLossPrHistorySize = 10 };
+
+// 1000 ms, total filter length is (kLossPrHistorySize * 1000) ms
+enum { kLossPrShortFilterWinMs = 1000 };
+
+// The type of filter used on the received packet loss reports.
+enum FilterPacketLossMode {
+ kNoFilter, // No filtering on received loss.
+ kAvgFilter, // Recursive average filter.
+ kMaxFilter // Max-window filter, over the time interval of:
+ // (kLossPrHistorySize * kLossPrShortFilterWinMs) ms.
+};
+
+// Thresholds for hybrid NACK/FEC
+// common to media optimization and the jitter buffer.
+const int64_t kLowRttNackMs = 20;
+
+struct VCMProtectionParameters {
+ VCMProtectionParameters()
+ : rtt(0),
+ lossPr(0.0f),
+ bitRate(0.0f),
+ packetsPerFrame(0.0f),
+ packetsPerFrameKey(0.0f),
+ frameRate(0.0f),
+ keyFrameSize(0.0f),
+ fecRateDelta(0),
+ fecRateKey(0),
+ codecWidth(0),
+ codecHeight(0),
+ numLayers(1) {}
+
+ int64_t rtt;
+ float lossPr;
+ float bitRate;
+ float packetsPerFrame;
+ float packetsPerFrameKey;
+ float frameRate;
+ float keyFrameSize;
+ uint8_t fecRateDelta;
+ uint8_t fecRateKey;
+ uint16_t codecWidth;
+ uint16_t codecHeight;
+ int numLayers;
+};
+
+/******************************/
+/* VCMProtectionMethod class */
+/******************************/
+
+enum VCMProtectionMethodEnum { kNack, kFec, kNackFec, kNone };
+
+class VCMLossProbabilitySample {
+ public:
+ VCMLossProbabilitySample() : lossPr255(0), timeMs(-1) {}
+
+ uint8_t lossPr255;
+ int64_t timeMs;
+};
+
+class VCMProtectionMethod {
+ public:
+ VCMProtectionMethod();
+ virtual ~VCMProtectionMethod();
+
+ // Updates the efficiency of the method using the parameters provided
+ //
+ // Input:
+ // - parameters : Parameters used to calculate efficiency
+ //
+ // Return value : True if this method is recommended in
+ // the given conditions.
+ virtual bool UpdateParameters(const VCMProtectionParameters* parameters) = 0;
+
+ // Returns the protection type
+ //
+ // Return value : The protection type
+ enum VCMProtectionMethodEnum Type() const { return _type; }
+
+ // Returns the effective packet loss for ER, required by this protection
+ // method
+ //
+ // Return value : Required effective packet loss
+ virtual uint8_t RequiredPacketLossER() { return _effectivePacketLoss; }
+
+ // Extracts the FEC protection factor for Key frame, required by this
+ // protection method
+ //
+ // Return value : Required protectionFactor for Key frame
+ virtual uint8_t RequiredProtectionFactorK() { return _protectionFactorK; }
+
+ // Extracts the FEC protection factor for Delta frame, required by this
+ // protection method
+ //
+ // Return value : Required protectionFactor for delta frame
+ virtual uint8_t RequiredProtectionFactorD() { return _protectionFactorD; }
+
+ // Extracts whether the FEC Unequal protection (UEP) is used for Key frame.
+ //
+ // Return value : Required Unequal protection on/off state.
+ virtual bool RequiredUepProtectionK() { return _useUepProtectionK; }
+
+ // Extracts whether the the FEC Unequal protection (UEP) is used for Delta
+ // frame.
+ //
+ // Return value : Required Unequal protection on/off state.
+ virtual bool RequiredUepProtectionD() { return _useUepProtectionD; }
+
+ virtual int MaxFramesFec() const { return 1; }
+
+ // Updates content metrics
+ void UpdateContentMetrics(const VideoContentMetrics* contentMetrics);
+
+ protected:
+ uint8_t _effectivePacketLoss;
+ uint8_t _protectionFactorK;
+ uint8_t _protectionFactorD;
+ // Estimation of residual loss after the FEC
+ float _scaleProtKey;
+ int32_t _maxPayloadSize;
+
+ VCMQmRobustness* _qmRobustness;
+ bool _useUepProtectionK;
+ bool _useUepProtectionD;
+ float _corrFecCost;
+ enum VCMProtectionMethodEnum _type;
+};
+
+class VCMNackMethod : public VCMProtectionMethod {
+ public:
+ VCMNackMethod();
+ virtual ~VCMNackMethod();
+ virtual bool UpdateParameters(const VCMProtectionParameters* parameters);
+ // Get the effective packet loss
+ bool EffectivePacketLoss(const VCMProtectionParameters* parameter);
+};
+
+class VCMFecMethod : public VCMProtectionMethod {
+ public:
+ VCMFecMethod();
+ virtual ~VCMFecMethod();
+ virtual bool UpdateParameters(const VCMProtectionParameters* parameters);
+ // Get the effective packet loss for ER
+ bool EffectivePacketLoss(const VCMProtectionParameters* parameters);
+ // Get the FEC protection factors
+ bool ProtectionFactor(const VCMProtectionParameters* parameters);
+ // Get the boost for key frame protection
+ uint8_t BoostCodeRateKey(uint8_t packetFrameDelta,
+ uint8_t packetFrameKey) const;
+ // Convert the rates: defined relative to total# packets or source# packets
+ uint8_t ConvertFECRate(uint8_t codeRate) const;
+ // Get the average effective recovery from FEC: for random loss model
+ float AvgRecoveryFEC(const VCMProtectionParameters* parameters) const;
+ // Update FEC with protectionFactorD
+ void UpdateProtectionFactorD(uint8_t protectionFactorD);
+ // Update FEC with protectionFactorK
+ void UpdateProtectionFactorK(uint8_t protectionFactorK);
+ // Compute the bits per frame. Account for temporal layers when applicable.
+ int BitsPerFrame(const VCMProtectionParameters* parameters);
+
+ protected:
+ enum { kUpperLimitFramesFec = 6 };
+ // Thresholds values for the bytes/frame and round trip time, below which we
+ // may turn off FEC, depending on |_numLayers| and |_maxFramesFec|.
+ // Max bytes/frame for VGA, corresponds to ~140k at 25fps.
+ enum { kMaxBytesPerFrameForFec = 700 };
+ // Max bytes/frame for CIF and lower: corresponds to ~80k at 25fps.
+ enum { kMaxBytesPerFrameForFecLow = 400 };
+ // Max bytes/frame for frame size larger than VGA, ~200k at 25fps.
+ enum { kMaxBytesPerFrameForFecHigh = 1000 };
+};
+
+class VCMNackFecMethod : public VCMFecMethod {
+ public:
+ VCMNackFecMethod(int64_t lowRttNackThresholdMs,
+ int64_t highRttNackThresholdMs);
+ virtual ~VCMNackFecMethod();
+ virtual bool UpdateParameters(const VCMProtectionParameters* parameters);
+ // Get the effective packet loss for ER
+ bool EffectivePacketLoss(const VCMProtectionParameters* parameters);
+ // Get the protection factors
+ bool ProtectionFactor(const VCMProtectionParameters* parameters);
+ // Get the max number of frames the FEC is allowed to be based on.
+ int MaxFramesFec() const;
+ // Turn off the FEC based on low bitrate and other factors.
+ bool BitRateTooLowForFec(const VCMProtectionParameters* parameters);
+
+ private:
+ int ComputeMaxFramesFec(const VCMProtectionParameters* parameters);
+
+ int64_t _lowRttNackMs;
+ int64_t _highRttNackMs;
+ int _maxFramesFec;
+};
+
+class VCMLossProtectionLogic {
+ public:
+ explicit VCMLossProtectionLogic(int64_t nowMs);
+ ~VCMLossProtectionLogic();
+
+ // Set the protection method to be used
+ //
+ // Input:
+ // - newMethodType : New requested protection method type. If one
+ // is already set, it will be deleted and replaced
+ void SetMethod(VCMProtectionMethodEnum newMethodType);
+
+ // Update the round-trip time
+ //
+ // Input:
+ // - rtt : Round-trip time in seconds.
+ void UpdateRtt(int64_t rtt);
+
+ // Update the filtered packet loss.
+ //
+ // Input:
+ // - packetLossEnc : The reported packet loss filtered
+ // (max window or average)
+ void UpdateFilteredLossPr(uint8_t packetLossEnc);
+
+ // Update the current target bit rate.
+ //
+ // Input:
+ // - bitRate : The current target bit rate in kbits/s
+ void UpdateBitRate(float bitRate);
+
+ // Update the number of packets per frame estimate, for delta frames
+ //
+ // Input:
+ // - nPackets : Number of packets in the latest sent frame.
+ void UpdatePacketsPerFrame(float nPackets, int64_t nowMs);
+
+ // Update the number of packets per frame estimate, for key frames
+ //
+ // Input:
+ // - nPackets : umber of packets in the latest sent frame.
+ void UpdatePacketsPerFrameKey(float nPackets, int64_t nowMs);
+
+ // Update the keyFrameSize estimate
+ //
+ // Input:
+ // - keyFrameSize : The size of the latest sent key frame.
+ void UpdateKeyFrameSize(float keyFrameSize);
+
+ // Update the frame rate
+ //
+ // Input:
+ // - frameRate : The current target frame rate.
+ void UpdateFrameRate(float frameRate) { _frameRate = frameRate; }
+
+ // Update the frame size
+ //
+ // Input:
+ // - width : The codec frame width.
+ // - height : The codec frame height.
+ void UpdateFrameSize(uint16_t width, uint16_t height);
+
+ // Update the number of active layers
+ //
+ // Input:
+ // - numLayers : Number of layers used.
+ void UpdateNumLayers(int numLayers);
+
+ // The amount of packet loss to cover for with FEC.
+ //
+ // Input:
+ // - fecRateKey : Packet loss to cover for with FEC when
+ // sending key frames.
+ // - fecRateDelta : Packet loss to cover for with FEC when
+ // sending delta frames.
+ void UpdateFECRates(uint8_t fecRateKey, uint8_t fecRateDelta) {
+ _fecRateKey = fecRateKey;
+ _fecRateDelta = fecRateDelta;
+ }
+
+ // Update the protection methods with the current VCMProtectionParameters
+ // and set the requested protection settings.
+ // Return value : Returns true on update
+ bool UpdateMethod();
+
+ // Returns the method currently selected.
+ //
+ // Return value : The protection method currently selected.
+ VCMProtectionMethod* SelectedMethod() const;
+
+ // Return the protection type of the currently selected method
+ VCMProtectionMethodEnum SelectedType() const;
+
+ // Updates the filtered loss for the average and max window packet loss,
+ // and returns the filtered loss probability in the interval [0, 255].
+ // The returned filtered loss value depends on the parameter |filter_mode|.
+ // The input parameter |lossPr255| is the received packet loss.
+
+ // Return value : The filtered loss probability
+ uint8_t FilteredLoss(int64_t nowMs,
+ FilterPacketLossMode filter_mode,
+ uint8_t lossPr255);
+
+ void Reset(int64_t nowMs);
+
+ void Release();
+
+ private:
+ // Sets the available loss protection methods.
+ void UpdateMaxLossHistory(uint8_t lossPr255, int64_t now);
+ uint8_t MaxFilteredLossPr(int64_t nowMs) const;
+ rtc::scoped_ptr<VCMProtectionMethod> _selectedMethod;
+ VCMProtectionParameters _currentParameters;
+ int64_t _rtt;
+ float _lossPr;
+ float _bitRate;
+ float _frameRate;
+ float _keyFrameSize;
+ uint8_t _fecRateKey;
+ uint8_t _fecRateDelta;
+ int64_t _lastPrUpdateT;
+ int64_t _lastPacketPerFrameUpdateT;
+ int64_t _lastPacketPerFrameUpdateTKey;
+ rtc::ExpFilter _lossPr255;
+ VCMLossProbabilitySample _lossPrHistory[kLossPrHistorySize];
+ uint8_t _shortMaxLossPr255;
+ rtc::ExpFilter _packetsPerFrame;
+ rtc::ExpFilter _packetsPerFrameKey;
+ uint16_t _codecWidth;
+ uint16_t _codecHeight;
+ int _numLayers;
+};
+
+} // namespace media_optimization
+} // namespace webrtc
+
+#endif // WEBRTC_MODULES_VIDEO_CODING_MEDIA_OPT_UTIL_H_
diff --git a/webrtc/modules/video_coding/main/source/media_optimization.cc b/webrtc/modules/video_coding/media_optimization.cc
index cc73d3803d..a234a06f9b 100644
--- a/webrtc/modules/video_coding/main/source/media_optimization.cc
+++ b/webrtc/modules/video_coding/media_optimization.cc
@@ -8,13 +8,13 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#include "webrtc/modules/video_coding/main/source/media_optimization.h"
+#include "webrtc/modules/video_coding/media_optimization.h"
-#include "webrtc/modules/video_coding/main/source/content_metrics_processing.h"
-#include "webrtc/modules/video_coding/main/source/qm_select.h"
-#include "webrtc/modules/video_coding/utility/include/frame_dropper.h"
+#include "webrtc/base/logging.h"
+#include "webrtc/modules/video_coding/content_metrics_processing.h"
+#include "webrtc/modules/video_coding/qm_select.h"
+#include "webrtc/modules/video_coding/utility/frame_dropper.h"
#include "webrtc/system_wrappers/include/clock.h"
-#include "webrtc/system_wrappers/include/logging.h"
namespace webrtc {
namespace media_optimization {
@@ -53,11 +53,9 @@ void UpdateProtectionCallback(
key_fec_params.fec_mask_type = kFecMaskRandom;
// TODO(Marco): Pass FEC protection values per layer.
- video_protection_callback->ProtectionRequest(&delta_fec_params,
- &key_fec_params,
- video_rate_bps,
- nack_overhead_rate_bps,
- fec_overhead_rate_bps);
+ video_protection_callback->ProtectionRequest(
+ &delta_fec_params, &key_fec_params, video_rate_bps,
+ nack_overhead_rate_bps, fec_overhead_rate_bps);
}
} // namespace
@@ -115,8 +113,8 @@ MediaOptimization::~MediaOptimization(void) {
void MediaOptimization::Reset() {
CriticalSectionScoped lock(crit_sect_.get());
- SetEncodingDataInternal(
- kVideoCodecUnknown, 0, 0, 0, 0, 0, 0, max_payload_size_);
+ SetEncodingDataInternal(kVideoCodecUnknown, 0, 0, 0, 0, 0, 0,
+ max_payload_size_);
memset(incoming_frame_times_, -1, sizeof(incoming_frame_times_));
incoming_frame_rate_ = 0.0;
frame_dropper_->Reset();
@@ -149,14 +147,8 @@ void MediaOptimization::SetEncodingData(VideoCodecType send_codec_type,
int num_layers,
int32_t mtu) {
CriticalSectionScoped lock(crit_sect_.get());
- SetEncodingDataInternal(send_codec_type,
- max_bit_rate,
- frame_rate,
- target_bitrate,
- width,
- height,
- num_layers,
- mtu);
+ SetEncodingDataInternal(send_codec_type, max_bit_rate, frame_rate,
+ target_bitrate, width, height, num_layers, mtu);
}
void MediaOptimization::SetEncodingDataInternal(VideoCodecType send_codec_type,
@@ -190,11 +182,8 @@ void MediaOptimization::SetEncodingDataInternal(VideoCodecType send_codec_type,
codec_height_ = height;
num_layers_ = (num_layers <= 1) ? 1 : num_layers; // Can also be zero.
max_payload_size_ = mtu;
- qm_resolution_->Initialize(target_bitrate_kbps,
- user_frame_rate_,
- codec_width_,
- codec_height_,
- num_layers_);
+ qm_resolution_->Initialize(target_bitrate_kbps, user_frame_rate_,
+ codec_width_, codec_height_, num_layers_);
}
uint32_t MediaOptimization::SetTargetRates(
@@ -256,10 +245,8 @@ uint32_t MediaOptimization::SetTargetRates(
// overhead data actually transmitted (including headers) the last
// second.
if (protection_callback) {
- UpdateProtectionCallback(selected_method,
- &sent_video_rate_bps,
- &sent_nack_rate_bps,
- &sent_fec_rate_bps,
+ UpdateProtectionCallback(selected_method, &sent_video_rate_bps,
+ &sent_nack_rate_bps, &sent_fec_rate_bps,
protection_callback);
}
uint32_t sent_total_rate_bps =
@@ -296,10 +283,8 @@ uint32_t MediaOptimization::SetTargetRates(
if (enable_qm_ && qmsettings_callback) {
// Update QM with rates.
- qm_resolution_->UpdateRates(target_video_bitrate_kbps,
- sent_video_rate_kbps,
- incoming_frame_rate_,
- fraction_lost_);
+ qm_resolution_->UpdateRates(target_video_bitrate_kbps, sent_video_rate_kbps,
+ incoming_frame_rate_, fraction_lost_);
// Check for QM selection.
bool select_qm = CheckStatusForQMchange();
if (select_qm) {
@@ -514,8 +499,7 @@ void MediaOptimization::UpdateSentBitrate(int64_t now_ms) {
}
size_t framesize_sum = 0;
for (FrameSampleList::iterator it = encoded_frame_samples_.begin();
- it != encoded_frame_samples_.end();
- ++it) {
+ it != encoded_frame_samples_.end(); ++it) {
framesize_sum += it->size_bytes;
}
float denom = static_cast<float>(
@@ -565,7 +549,8 @@ bool MediaOptimization::QMUpdate(
}
LOG(LS_INFO) << "Media optimizer requests the video resolution to be changed "
- "to " << qm->codec_width << "x" << qm->codec_height << "@"
+ "to "
+ << qm->codec_width << "x" << qm->codec_height << "@"
<< qm->frame_rate;
// Update VPM with new target frame rate and frame size.
@@ -574,11 +559,11 @@ bool MediaOptimization::QMUpdate(
// will vary/fluctuate, and since we don't want to change the state of the
// VPM frame dropper, unless a temporal action was selected, we use the
// quantity |qm->frame_rate| for updating.
- video_qmsettings_callback->SetVideoQMSettings(
- qm->frame_rate, codec_width_, codec_height_);
+ video_qmsettings_callback->SetVideoQMSettings(qm->frame_rate, codec_width_,
+ codec_height_);
content_->UpdateFrameRate(qm->frame_rate);
- qm_resolution_->UpdateCodecParameters(
- qm->frame_rate, codec_width_, codec_height_);
+ qm_resolution_->UpdateCodecParameters(qm->frame_rate, codec_width_,
+ codec_height_);
return true;
}
diff --git a/webrtc/modules/video_coding/main/source/media_optimization.h b/webrtc/modules/video_coding/media_optimization.h
index c4feeff743..54389bf5b5 100644
--- a/webrtc/modules/video_coding/main/source/media_optimization.h
+++ b/webrtc/modules/video_coding/media_optimization.h
@@ -8,16 +8,16 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#ifndef WEBRTC_MODULES_VIDEO_CODING_MAIN_SOURCE_MEDIA_OPTIMIZATION_H_
-#define WEBRTC_MODULES_VIDEO_CODING_MAIN_SOURCE_MEDIA_OPTIMIZATION_H_
+#ifndef WEBRTC_MODULES_VIDEO_CODING_MEDIA_OPTIMIZATION_H_
+#define WEBRTC_MODULES_VIDEO_CODING_MEDIA_OPTIMIZATION_H_
#include <list>
#include "webrtc/base/scoped_ptr.h"
-#include "webrtc/modules/interface/module_common_types.h"
-#include "webrtc/modules/video_coding/main/interface/video_coding.h"
-#include "webrtc/modules/video_coding/main/source/media_opt_util.h"
-#include "webrtc/modules/video_coding/main/source/qm_select.h"
+#include "webrtc/modules/include/module_common_types.h"
+#include "webrtc/modules/video_coding/include/video_coding.h"
+#include "webrtc/modules/video_coding/media_opt_util.h"
+#include "webrtc/modules/video_coding/qm_select.h"
#include "webrtc/system_wrappers/include/critical_section_wrapper.h"
namespace webrtc {
@@ -85,15 +85,9 @@ class MediaOptimization {
uint32_t SentBitRate();
private:
- enum {
- kFrameCountHistorySize = 90
- };
- enum {
- kFrameHistoryWinMs = 2000
- };
- enum {
- kBitrateAverageWinMs = 1000
- };
+ enum { kFrameCountHistorySize = 90 };
+ enum { kFrameHistoryWinMs = 2000 };
+ enum { kBitrateAverageWinMs = 1000 };
struct EncodedFrameSample;
typedef std::list<EncodedFrameSample> FrameSampleList;
@@ -177,4 +171,4 @@ class MediaOptimization {
} // namespace media_optimization
} // namespace webrtc
-#endif // WEBRTC_MODULES_VIDEO_CODING_MAIN_SOURCE_MEDIA_OPTIMIZATION_H_
+#endif // WEBRTC_MODULES_VIDEO_CODING_MEDIA_OPTIMIZATION_H_
diff --git a/webrtc/modules/video_coding/main/source/media_optimization_unittest.cc b/webrtc/modules/video_coding/media_optimization_unittest.cc
index be528d9932..3f8ac5d075 100644
--- a/webrtc/modules/video_coding/main/source/media_optimization_unittest.cc
+++ b/webrtc/modules/video_coding/media_optimization_unittest.cc
@@ -9,7 +9,7 @@
*/
#include "testing/gtest/include/gtest/gtest.h"
-#include "webrtc/modules/video_coding/main/source/media_optimization.h"
+#include "webrtc/modules/video_coding/media_optimization.h"
#include "webrtc/system_wrappers/include/clock.h"
namespace webrtc {
@@ -51,7 +51,6 @@ class TestMediaOptimization : public ::testing::Test {
uint32_t next_timestamp_;
};
-
TEST_F(TestMediaOptimization, VerifyMuting) {
// Enable video suspension with these limits.
// Suspend the video when the rate is below 50 kbps and resume when it gets
diff --git a/webrtc/modules/video_coding/nack_fec_tables.h b/webrtc/modules/video_coding/nack_fec_tables.h
new file mode 100644
index 0000000000..f9f5ad97ac
--- /dev/null
+++ b/webrtc/modules/video_coding/nack_fec_tables.h
@@ -0,0 +1,31 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef WEBRTC_MODULES_VIDEO_CODING_NACK_FEC_TABLES_H_
+#define WEBRTC_MODULES_VIDEO_CODING_NACK_FEC_TABLES_H_
+
+namespace webrtc {
+
+// Table for adjusting FEC rate for NACK/FEC protection method
+// Table values are built as a sigmoid function, ranging from 0 to 100, based on
+// the HybridNackTH values defined in media_opt_util.h.
+const uint16_t VCMNackFecTable[100] = {
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1,
+ 1, 2, 2, 2, 3, 3, 4, 5, 6, 7, 9, 10, 12, 15, 18,
+ 21, 24, 28, 32, 37, 41, 46, 51, 56, 61, 66, 70, 74, 78, 81,
+ 84, 86, 89, 90, 92, 93, 95, 95, 96, 97, 97, 98, 98, 99, 99,
+ 99, 99, 99, 99, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100,
+ 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100,
+ 100, 100, 100, 100, 100, 100, 100, 100, 100, 100,
+};
+
+} // namespace webrtc
+
+#endif // WEBRTC_MODULES_VIDEO_CODING_NACK_FEC_TABLES_H_
diff --git a/webrtc/modules/video_coding/main/source/packet.cc b/webrtc/modules/video_coding/packet.cc
index fd5a6abb8c..e25de2ed6c 100644
--- a/webrtc/modules/video_coding/main/source/packet.cc
+++ b/webrtc/modules/video_coding/packet.cc
@@ -8,11 +8,12 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#include "webrtc/modules/interface/module_common_types.h"
-#include "webrtc/modules/video_coding/main/source/packet.h"
+#include "webrtc/modules/video_coding/packet.h"
#include <assert.h>
+#include "webrtc/modules/include/module_common_types.h"
+
namespace webrtc {
VCMPacket::VCMPacket()
@@ -34,49 +35,47 @@ VCMPacket::VCMPacket()
VCMPacket::VCMPacket(const uint8_t* ptr,
const size_t size,
- const WebRtcRTPHeader& rtpHeader) :
- payloadType(rtpHeader.header.payloadType),
- timestamp(rtpHeader.header.timestamp),
- ntp_time_ms_(rtpHeader.ntp_time_ms),
- seqNum(rtpHeader.header.sequenceNumber),
- dataPtr(ptr),
- sizeBytes(size),
- markerBit(rtpHeader.header.markerBit),
+ const WebRtcRTPHeader& rtpHeader)
+ : payloadType(rtpHeader.header.payloadType),
+ timestamp(rtpHeader.header.timestamp),
+ ntp_time_ms_(rtpHeader.ntp_time_ms),
+ seqNum(rtpHeader.header.sequenceNumber),
+ dataPtr(ptr),
+ sizeBytes(size),
+ markerBit(rtpHeader.header.markerBit),
- frameType(rtpHeader.frameType),
- codec(kVideoCodecUnknown),
- isFirstPacket(rtpHeader.type.Video.isFirstPacket),
- completeNALU(kNaluComplete),
- insertStartCode(false),
- width(rtpHeader.type.Video.width),
- height(rtpHeader.type.Video.height),
- codecSpecificHeader(rtpHeader.type.Video)
-{
- CopyCodecSpecifics(rtpHeader.type.Video);
+ frameType(rtpHeader.frameType),
+ codec(kVideoCodecUnknown),
+ isFirstPacket(rtpHeader.type.Video.isFirstPacket),
+ completeNALU(kNaluComplete),
+ insertStartCode(false),
+ width(rtpHeader.type.Video.width),
+ height(rtpHeader.type.Video.height),
+ codecSpecificHeader(rtpHeader.type.Video) {
+ CopyCodecSpecifics(rtpHeader.type.Video);
}
VCMPacket::VCMPacket(const uint8_t* ptr,
size_t size,
uint16_t seq,
uint32_t ts,
- bool mBit) :
- payloadType(0),
- timestamp(ts),
- ntp_time_ms_(0),
- seqNum(seq),
- dataPtr(ptr),
- sizeBytes(size),
- markerBit(mBit),
+ bool mBit)
+ : payloadType(0),
+ timestamp(ts),
+ ntp_time_ms_(0),
+ seqNum(seq),
+ dataPtr(ptr),
+ sizeBytes(size),
+ markerBit(mBit),
- frameType(kVideoFrameDelta),
- codec(kVideoCodecUnknown),
- isFirstPacket(false),
- completeNALU(kNaluComplete),
- insertStartCode(false),
- width(0),
- height(0),
- codecSpecificHeader()
-{}
+ frameType(kVideoFrameDelta),
+ codec(kVideoCodecUnknown),
+ isFirstPacket(false),
+ completeNALU(kNaluComplete),
+ insertStartCode(false),
+ width(0),
+ height(0),
+ codecSpecificHeader() {}
void VCMPacket::Reset() {
payloadType = 0;
diff --git a/webrtc/modules/video_coding/packet.h b/webrtc/modules/video_coding/packet.h
new file mode 100644
index 0000000000..b77c1df039
--- /dev/null
+++ b/webrtc/modules/video_coding/packet.h
@@ -0,0 +1,59 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef WEBRTC_MODULES_VIDEO_CODING_PACKET_H_
+#define WEBRTC_MODULES_VIDEO_CODING_PACKET_H_
+
+#include "webrtc/modules/include/module_common_types.h"
+#include "webrtc/modules/video_coding/jitter_buffer_common.h"
+#include "webrtc/typedefs.h"
+
+namespace webrtc {
+
+class VCMPacket {
+ public:
+ VCMPacket();
+ VCMPacket(const uint8_t* ptr,
+ const size_t size,
+ const WebRtcRTPHeader& rtpHeader);
+ VCMPacket(const uint8_t* ptr,
+ size_t size,
+ uint16_t seqNum,
+ uint32_t timestamp,
+ bool markerBit);
+
+ void Reset();
+
+ uint8_t payloadType;
+ uint32_t timestamp;
+ // NTP time of the capture time in local timebase in milliseconds.
+ int64_t ntp_time_ms_;
+ uint16_t seqNum;
+ const uint8_t* dataPtr;
+ size_t sizeBytes;
+ bool markerBit;
+
+ FrameType frameType;
+ VideoCodecType codec;
+
+ bool isFirstPacket; // Is this first packet in a frame.
+ VCMNaluCompleteness completeNALU; // Default is kNaluIncomplete.
+ bool insertStartCode; // True if a start code should be inserted before this
+ // packet.
+ int width;
+ int height;
+ RTPVideoHeader codecSpecificHeader;
+
+ protected:
+ void CopyCodecSpecifics(const RTPVideoHeader& videoHeader);
+};
+
+} // namespace webrtc
+#endif // WEBRTC_MODULES_VIDEO_CODING_PACKET_H_
diff --git a/webrtc/modules/video_coding/main/source/qm_select.cc b/webrtc/modules/video_coding/qm_select.cc
index e86d0755c0..9da42bb33c 100644
--- a/webrtc/modules/video_coding/main/source/qm_select.cc
+++ b/webrtc/modules/video_coding/qm_select.cc
@@ -8,14 +8,14 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#include "webrtc/modules/video_coding/main/source/qm_select.h"
+#include "webrtc/modules/video_coding/qm_select.h"
#include <math.h>
-#include "webrtc/modules/interface/module_common_types.h"
-#include "webrtc/modules/video_coding/main/interface/video_coding_defines.h"
-#include "webrtc/modules/video_coding/main/source/internal_defines.h"
-#include "webrtc/modules/video_coding/main/source/qm_select_data.h"
+#include "webrtc/modules/include/module_common_types.h"
+#include "webrtc/modules/video_coding/include/video_coding_defines.h"
+#include "webrtc/modules/video_coding/internal_defines.h"
+#include "webrtc/modules/video_coding/qm_select_data.h"
#include "webrtc/system_wrappers/include/trace.h"
namespace webrtc {
@@ -36,8 +36,7 @@ VCMQmMethod::VCMQmMethod()
ResetQM();
}
-VCMQmMethod::~VCMQmMethod() {
-}
+VCMQmMethod::~VCMQmMethod() {}
void VCMQmMethod::ResetQM() {
aspect_ratio_ = 1.0f;
@@ -52,7 +51,7 @@ uint8_t VCMQmMethod::ComputeContentClass() {
return content_class_ = 3 * motion_.level + spatial_.level;
}
-void VCMQmMethod::UpdateContent(const VideoContentMetrics* contentMetrics) {
+void VCMQmMethod::UpdateContent(const VideoContentMetrics* contentMetrics) {
content_metrics_ = contentMetrics;
}
@@ -64,7 +63,7 @@ void VCMQmMethod::ComputeMotionNFD() {
if (motion_.value < kLowMotionNfd) {
motion_.level = kLow;
} else if (motion_.value > kHighMotionNfd) {
- motion_.level = kHigh;
+ motion_.level = kHigh;
} else {
motion_.level = kDefault;
}
@@ -75,7 +74,7 @@ void VCMQmMethod::ComputeSpatial() {
float spatial_err_h = 0.0;
float spatial_err_v = 0.0;
if (content_metrics_) {
- spatial_err = content_metrics_->spatial_pred_err;
+ spatial_err = content_metrics_->spatial_pred_err;
spatial_err_h = content_metrics_->spatial_pred_err_h;
spatial_err_v = content_metrics_->spatial_pred_err_v;
}
@@ -94,8 +93,7 @@ void VCMQmMethod::ComputeSpatial() {
}
}
-ImageType VCMQmMethod::GetImageType(uint16_t width,
- uint16_t height) {
+ImageType VCMQmMethod::GetImageType(uint16_t width, uint16_t height) {
// Get the image type for the encoder frame size.
uint32_t image_size = width * height;
if (image_size == kSizeOfImageType[kQCIF]) {
@@ -142,7 +140,7 @@ FrameRateLevelClass VCMQmMethod::FrameRateLevel(float avg_framerate) {
} else if (avg_framerate <= kMiddleFrameRate) {
return kFrameRateMiddle1;
} else if (avg_framerate <= kHighFrameRate) {
- return kFrameRateMiddle2;
+ return kFrameRateMiddle2;
} else {
return kFrameRateHigh;
}
@@ -150,8 +148,7 @@ FrameRateLevelClass VCMQmMethod::FrameRateLevel(float avg_framerate) {
// RESOLUTION CLASS
-VCMQmResolution::VCMQmResolution()
- : qm_(new VCMResolutionScale()) {
+VCMQmResolution::VCMQmResolution() : qm_(new VCMResolutionScale()) {
Reset();
}
@@ -174,7 +171,7 @@ void VCMQmResolution::ResetRates() {
void VCMQmResolution::ResetDownSamplingState() {
state_dec_factor_spatial_ = 1.0;
- state_dec_factor_temporal_ = 1.0;
+ state_dec_factor_temporal_ = 1.0;
for (int i = 0; i < kDownActionHistorySize; i++) {
down_action_history_[i].spatial = kNoChangeSpatial;
down_action_history_[i].temporal = kNoChangeTemporal;
@@ -225,11 +222,12 @@ int VCMQmResolution::Initialize(float bitrate,
buffer_level_ = kInitBufferLevel * target_bitrate_;
// Per-frame bandwidth.
per_frame_bandwidth_ = target_bitrate_ / user_framerate;
- init_ = true;
+ init_ = true;
return VCM_OK;
}
-void VCMQmResolution::UpdateCodecParameters(float frame_rate, uint16_t width,
+void VCMQmResolution::UpdateCodecParameters(float frame_rate,
+ uint16_t width,
uint16_t height) {
width_ = width;
height_ = height;
@@ -283,12 +281,12 @@ void VCMQmResolution::UpdateRates(float target_bitrate,
// Update with the current new target and frame rate:
// these values are ones the encoder will use for the current/next ~1sec.
- target_bitrate_ = target_bitrate;
+ target_bitrate_ = target_bitrate;
incoming_framerate_ = incoming_framerate;
sum_incoming_framerate_ += incoming_framerate_;
// Update the per_frame_bandwidth:
// this is the per_frame_bw for the current/next ~1sec.
- per_frame_bandwidth_ = 0.0f;
+ per_frame_bandwidth_ = 0.0f;
if (incoming_framerate_ > 0.0f) {
per_frame_bandwidth_ = target_bitrate_ / incoming_framerate_;
}
@@ -313,7 +311,7 @@ int VCMQmResolution::SelectResolution(VCMResolutionScale** qm) {
}
if (content_metrics_ == NULL) {
Reset();
- *qm = qm_;
+ *qm = qm_;
return VCM_OK;
}
@@ -376,31 +374,31 @@ void VCMQmResolution::ComputeRatesForSelection() {
avg_rate_mismatch_sgn_ = 0.0f;
avg_packet_loss_ = 0.0f;
if (frame_cnt_ > 0) {
- avg_ratio_buffer_low_ = static_cast<float>(low_buffer_cnt_) /
- static_cast<float>(frame_cnt_);
+ avg_ratio_buffer_low_ =
+ static_cast<float>(low_buffer_cnt_) / static_cast<float>(frame_cnt_);
}
if (update_rate_cnt_ > 0) {
- avg_rate_mismatch_ = static_cast<float>(sum_rate_MM_) /
- static_cast<float>(update_rate_cnt_);
+ avg_rate_mismatch_ =
+ static_cast<float>(sum_rate_MM_) / static_cast<float>(update_rate_cnt_);
avg_rate_mismatch_sgn_ = static_cast<float>(sum_rate_MM_sgn_) /
- static_cast<float>(update_rate_cnt_);
+ static_cast<float>(update_rate_cnt_);
avg_target_rate_ = static_cast<float>(sum_target_rate_) /
- static_cast<float>(update_rate_cnt_);
+ static_cast<float>(update_rate_cnt_);
avg_incoming_framerate_ = static_cast<float>(sum_incoming_framerate_) /
- static_cast<float>(update_rate_cnt_);
- avg_packet_loss_ = static_cast<float>(sum_packet_loss_) /
- static_cast<float>(update_rate_cnt_);
+ static_cast<float>(update_rate_cnt_);
+ avg_packet_loss_ = static_cast<float>(sum_packet_loss_) /
+ static_cast<float>(update_rate_cnt_);
}
// For selection we may want to weight some quantities more heavily
// with the current (i.e., next ~1sec) rate values.
- avg_target_rate_ = kWeightRate * avg_target_rate_ +
- (1.0 - kWeightRate) * target_bitrate_;
+ avg_target_rate_ =
+ kWeightRate * avg_target_rate_ + (1.0 - kWeightRate) * target_bitrate_;
avg_incoming_framerate_ = kWeightRate * avg_incoming_framerate_ +
- (1.0 - kWeightRate) * incoming_framerate_;
+ (1.0 - kWeightRate) * incoming_framerate_;
// Use base layer frame rate for temporal layers: this will favor spatial.
assert(num_layers_ > 0);
- framerate_level_ = FrameRateLevel(
- avg_incoming_framerate_ / static_cast<float>(1 << (num_layers_ - 1)));
+ framerate_level_ = FrameRateLevel(avg_incoming_framerate_ /
+ static_cast<float>(1 << (num_layers_ - 1)));
}
void VCMQmResolution::ComputeEncoderState() {
@@ -412,7 +410,7 @@ void VCMQmResolution::ComputeEncoderState() {
// 2) rate mis-match is high, and consistent over-shooting by encoder.
if ((avg_ratio_buffer_low_ > kMaxBufferLow) ||
((avg_rate_mismatch_ > kMaxRateMisMatch) &&
- (avg_rate_mismatch_sgn_ < -kRateOverShoot))) {
+ (avg_rate_mismatch_sgn_ < -kRateOverShoot))) {
encoder_state_ = kStressedEncoding;
}
// Assign easy state if:
@@ -435,9 +433,9 @@ bool VCMQmResolution::GoingUpResolution() {
// Modify the fac_width/height for this case.
if (down_action_history_[0].spatial == kOneQuarterSpatialUniform) {
fac_width = kFactorWidthSpatial[kOneQuarterSpatialUniform] /
- kFactorWidthSpatial[kOneHalfSpatialUniform];
+ kFactorWidthSpatial[kOneHalfSpatialUniform];
fac_height = kFactorHeightSpatial[kOneQuarterSpatialUniform] /
- kFactorHeightSpatial[kOneHalfSpatialUniform];
+ kFactorHeightSpatial[kOneHalfSpatialUniform];
}
// Check if we should go up both spatially and temporally.
@@ -459,8 +457,8 @@ bool VCMQmResolution::GoingUpResolution() {
kTransRateScaleUpSpatial);
}
if (down_action_history_[0].temporal != kNoChangeTemporal) {
- selected_up_temporal = ConditionForGoingUp(1.0f, 1.0f, fac_temp,
- kTransRateScaleUpTemp);
+ selected_up_temporal =
+ ConditionForGoingUp(1.0f, 1.0f, fac_temp, kTransRateScaleUpTemp);
}
if (selected_up_spatial && !selected_up_temporal) {
action_.spatial = down_action_history_[0].spatial;
@@ -484,13 +482,13 @@ bool VCMQmResolution::ConditionForGoingUp(float fac_width,
float fac_height,
float fac_temp,
float scale_fac) {
- float estimated_transition_rate_up = GetTransitionRate(fac_width, fac_height,
- fac_temp, scale_fac);
+ float estimated_transition_rate_up =
+ GetTransitionRate(fac_width, fac_height, fac_temp, scale_fac);
// Go back up if:
// 1) target rate is above threshold and current encoder state is stable, or
// 2) encoder state is easy (encoder is significantly under-shooting target).
if (((avg_target_rate_ > estimated_transition_rate_up) &&
- (encoder_state_ == kStableEncoding)) ||
+ (encoder_state_ == kStableEncoding)) ||
(encoder_state_ == kEasyEncoding)) {
return true;
} else {
@@ -505,7 +503,7 @@ bool VCMQmResolution::GoingDownResolution() {
// Resolution reduction if:
// (1) target rate is below transition rate, or
// (2) encoder is in stressed state and target rate below a max threshold.
- if ((avg_target_rate_ < estimated_transition_rate_down ) ||
+ if ((avg_target_rate_ < estimated_transition_rate_down) ||
(encoder_state_ == kStressedEncoding && avg_target_rate_ < max_rate)) {
// Get the down-sampling action: based on content class, and how low
// average target rate is relative to transition rate.
@@ -529,9 +527,7 @@ bool VCMQmResolution::GoingDownResolution() {
action_.spatial = kNoChangeSpatial;
break;
}
- default: {
- assert(false);
- }
+ default: { assert(false); }
}
switch (temp_fact) {
case 3: {
@@ -546,9 +542,7 @@ bool VCMQmResolution::GoingDownResolution() {
action_.temporal = kNoChangeTemporal;
break;
}
- default: {
- assert(false);
- }
+ default: { assert(false); }
}
// Only allow for one action (spatial or temporal) at a given time.
assert(action_.temporal == kNoChangeTemporal ||
@@ -572,9 +566,9 @@ float VCMQmResolution::GetTransitionRate(float fac_width,
float fac_height,
float fac_temp,
float scale_fac) {
- ImageType image_type = GetImageType(
- static_cast<uint16_t>(fac_width * width_),
- static_cast<uint16_t>(fac_height * height_));
+ ImageType image_type =
+ GetImageType(static_cast<uint16_t>(fac_width * width_),
+ static_cast<uint16_t>(fac_height * height_));
FrameRateLevelClass framerate_level =
FrameRateLevel(fac_temp * avg_incoming_framerate_);
@@ -589,13 +583,13 @@ float VCMQmResolution::GetTransitionRate(float fac_width,
// Nominal values based on image format (frame size and frame rate).
float max_rate = kFrameRateFac[framerate_level] * kMaxRateQm[image_type];
- uint8_t image_class = image_type > kVGA ? 1: 0;
+ uint8_t image_class = image_type > kVGA ? 1 : 0;
uint8_t table_index = image_class * 9 + content_class_;
// Scale factor for down-sampling transition threshold:
// factor based on the content class and the image size.
float scaleTransRate = kScaleTransRateQm[table_index];
// Threshold bitrate for resolution action.
- return static_cast<float> (scale_fac * scaleTransRate * max_rate);
+ return static_cast<float>(scale_fac * scaleTransRate * max_rate);
}
void VCMQmResolution::UpdateDownsamplingState(UpDownAction up_down) {
@@ -605,9 +599,9 @@ void VCMQmResolution::UpdateDownsamplingState(UpDownAction up_down) {
// If last spatial action was 1/2x1/2, we undo it in two steps, so the
// spatial scale factor in this first step is modified as (4.0/3.0 / 2.0).
if (action_.spatial == kOneQuarterSpatialUniform) {
- qm_->spatial_width_fact =
- 1.0f * kFactorWidthSpatial[kOneHalfSpatialUniform] /
- kFactorWidthSpatial[kOneQuarterSpatialUniform];
+ qm_->spatial_width_fact = 1.0f *
+ kFactorWidthSpatial[kOneHalfSpatialUniform] /
+ kFactorWidthSpatial[kOneQuarterSpatialUniform];
qm_->spatial_height_fact =
1.0f * kFactorHeightSpatial[kOneHalfSpatialUniform] /
kFactorHeightSpatial[kOneQuarterSpatialUniform];
@@ -628,17 +622,18 @@ void VCMQmResolution::UpdateDownsamplingState(UpDownAction up_down) {
}
UpdateCodecResolution();
state_dec_factor_spatial_ = state_dec_factor_spatial_ *
- qm_->spatial_width_fact * qm_->spatial_height_fact;
+ qm_->spatial_width_fact *
+ qm_->spatial_height_fact;
state_dec_factor_temporal_ = state_dec_factor_temporal_ * qm_->temporal_fact;
}
-void VCMQmResolution::UpdateCodecResolution() {
+void VCMQmResolution::UpdateCodecResolution() {
if (action_.spatial != kNoChangeSpatial) {
qm_->change_resolution_spatial = true;
- qm_->codec_width = static_cast<uint16_t>(width_ /
- qm_->spatial_width_fact + 0.5f);
- qm_->codec_height = static_cast<uint16_t>(height_ /
- qm_->spatial_height_fact + 0.5f);
+ qm_->codec_width =
+ static_cast<uint16_t>(width_ / qm_->spatial_width_fact + 0.5f);
+ qm_->codec_height =
+ static_cast<uint16_t>(height_ / qm_->spatial_height_fact + 0.5f);
// Size should not exceed native sizes.
assert(qm_->codec_width <= native_width_);
assert(qm_->codec_height <= native_height_);
@@ -662,8 +657,9 @@ void VCMQmResolution::UpdateCodecResolution() {
}
uint8_t VCMQmResolution::RateClass(float transition_rate) {
- return avg_target_rate_ < (kFacLowRate * transition_rate) ? 0:
- (avg_target_rate_ >= transition_rate ? 2 : 1);
+ return avg_target_rate_ < (kFacLowRate * transition_rate)
+ ? 0
+ : (avg_target_rate_ >= transition_rate ? 2 : 1);
}
// TODO(marpan): Would be better to capture these frame rate adjustments by
@@ -698,15 +694,14 @@ void VCMQmResolution::AdjustAction() {
}
// Never use temporal action if number of temporal layers is above 2.
if (num_layers_ > 2) {
- if (action_.temporal != kNoChangeTemporal) {
+ if (action_.temporal != kNoChangeTemporal) {
action_.spatial = kOneHalfSpatialUniform;
}
action_.temporal = kNoChangeTemporal;
}
// If spatial action was selected, we need to make sure the frame sizes
// are multiples of two. Otherwise switch to 2/3 temporal.
- if (action_.spatial != kNoChangeSpatial &&
- !EvenFrameSize()) {
+ if (action_.spatial != kNoChangeSpatial && !EvenFrameSize()) {
action_.spatial = kNoChangeSpatial;
// Only one action (spatial or temporal) is allowed at a given time, so need
// to check whether temporal action is currently selected.
@@ -722,35 +717,36 @@ void VCMQmResolution::ConvertSpatialFractionalToWhole() {
bool found = false;
int isel = kDownActionHistorySize;
for (int i = 0; i < kDownActionHistorySize; ++i) {
- if (down_action_history_[i].spatial == kOneHalfSpatialUniform) {
+ if (down_action_history_[i].spatial == kOneHalfSpatialUniform) {
isel = i;
found = true;
break;
}
}
if (found) {
- action_.spatial = kOneQuarterSpatialUniform;
- state_dec_factor_spatial_ = state_dec_factor_spatial_ /
- (kFactorWidthSpatial[kOneHalfSpatialUniform] *
- kFactorHeightSpatial[kOneHalfSpatialUniform]);
- // Check if switching to 1/2x1/2 (=1/4) spatial is allowed.
- ConstrainAmountOfDownSampling();
- if (action_.spatial == kNoChangeSpatial) {
- // Not allowed. Go back to 3/4x3/4 spatial.
- action_.spatial = kOneHalfSpatialUniform;
- state_dec_factor_spatial_ = state_dec_factor_spatial_ *
- kFactorWidthSpatial[kOneHalfSpatialUniform] *
- kFactorHeightSpatial[kOneHalfSpatialUniform];
- } else {
- // Switching is allowed. Remove 3/4x3/4 from the history, and update
- // the frame size.
- for (int i = isel; i < kDownActionHistorySize - 1; ++i) {
- down_action_history_[i].spatial =
- down_action_history_[i + 1].spatial;
- }
- width_ = width_ * kFactorWidthSpatial[kOneHalfSpatialUniform];
- height_ = height_ * kFactorHeightSpatial[kOneHalfSpatialUniform];
- }
+ action_.spatial = kOneQuarterSpatialUniform;
+ state_dec_factor_spatial_ =
+ state_dec_factor_spatial_ /
+ (kFactorWidthSpatial[kOneHalfSpatialUniform] *
+ kFactorHeightSpatial[kOneHalfSpatialUniform]);
+ // Check if switching to 1/2x1/2 (=1/4) spatial is allowed.
+ ConstrainAmountOfDownSampling();
+ if (action_.spatial == kNoChangeSpatial) {
+ // Not allowed. Go back to 3/4x3/4 spatial.
+ action_.spatial = kOneHalfSpatialUniform;
+ state_dec_factor_spatial_ =
+ state_dec_factor_spatial_ *
+ kFactorWidthSpatial[kOneHalfSpatialUniform] *
+ kFactorHeightSpatial[kOneHalfSpatialUniform];
+ } else {
+ // Switching is allowed. Remove 3/4x3/4 from the history, and update
+ // the frame size.
+ for (int i = isel; i < kDownActionHistorySize - 1; ++i) {
+ down_action_history_[i].spatial = down_action_history_[i + 1].spatial;
+ }
+ width_ = width_ * kFactorWidthSpatial[kOneHalfSpatialUniform];
+ height_ = height_ * kFactorHeightSpatial[kOneHalfSpatialUniform];
+ }
}
}
}
@@ -815,8 +811,8 @@ void VCMQmResolution::ConstrainAmountOfDownSampling() {
float spatial_width_fact = kFactorWidthSpatial[action_.spatial];
float spatial_height_fact = kFactorHeightSpatial[action_.spatial];
float temporal_fact = kFactorTemporal[action_.temporal];
- float new_dec_factor_spatial = state_dec_factor_spatial_ *
- spatial_width_fact * spatial_height_fact;
+ float new_dec_factor_spatial =
+ state_dec_factor_spatial_ * spatial_width_fact * spatial_height_fact;
float new_dec_factor_temp = state_dec_factor_temporal_ * temporal_fact;
// No spatial sampling if current frame size is too small, or if the
@@ -908,8 +904,7 @@ VCMQmRobustness::VCMQmRobustness() {
Reset();
}
-VCMQmRobustness::~VCMQmRobustness() {
-}
+VCMQmRobustness::~VCMQmRobustness() {}
void VCMQmRobustness::Reset() {
prev_total_rate_ = 0.0f;
@@ -928,7 +923,7 @@ float VCMQmRobustness::AdjustFecFactor(uint8_t code_rate_delta,
int64_t rtt_time,
uint8_t packet_loss) {
// Default: no adjustment
- float adjust_fec = 1.0f;
+ float adjust_fec = 1.0f;
if (content_metrics_ == NULL) {
return adjust_fec;
}
@@ -955,4 +950,4 @@ bool VCMQmRobustness::SetUepProtection(uint8_t code_rate_delta,
// Default.
return false;
}
-} // namespace
+} // namespace webrtc
diff --git a/webrtc/modules/video_coding/main/source/qm_select.h b/webrtc/modules/video_coding/qm_select.h
index 079e7f8879..764b5ed8e3 100644
--- a/webrtc/modules/video_coding/main/source/qm_select.h
+++ b/webrtc/modules/video_coding/qm_select.h
@@ -30,8 +30,7 @@ struct VCMResolutionScale {
spatial_height_fact(1.0f),
temporal_fact(1.0f),
change_resolution_spatial(false),
- change_resolution_temporal(false) {
- }
+ change_resolution_temporal(false) {}
uint16_t codec_width;
uint16_t codec_height;
float frame_rate;
@@ -43,20 +42,20 @@ struct VCMResolutionScale {
};
enum ImageType {
- kQCIF = 0, // 176x144
- kHCIF, // 264x216 = half(~3/4x3/4) CIF.
- kQVGA, // 320x240 = quarter VGA.
- kCIF, // 352x288
- kHVGA, // 480x360 = half(~3/4x3/4) VGA.
- kVGA, // 640x480
- kQFULLHD, // 960x540 = quarter FULLHD, and half(~3/4x3/4) WHD.
- kWHD, // 1280x720
- kFULLHD, // 1920x1080
+ kQCIF = 0, // 176x144
+ kHCIF, // 264x216 = half(~3/4x3/4) CIF.
+ kQVGA, // 320x240 = quarter VGA.
+ kCIF, // 352x288
+ kHVGA, // 480x360 = half(~3/4x3/4) VGA.
+ kVGA, // 640x480
+ kQFULLHD, // 960x540 = quarter FULLHD, and half(~3/4x3/4) WHD.
+ kWHD, // 1280x720
+ kFULLHD, // 1920x1080
kNumImageTypes
};
-const uint32_t kSizeOfImageType[kNumImageTypes] =
-{ 25344, 57024, 76800, 101376, 172800, 307200, 518400, 921600, 2073600 };
+const uint32_t kSizeOfImageType[kNumImageTypes] = {
+ 25344, 57024, 76800, 101376, 172800, 307200, 518400, 921600, 2073600};
enum FrameRateLevelClass {
kFrameRateLow,
@@ -65,17 +64,10 @@ enum FrameRateLevelClass {
kFrameRateHigh
};
-enum ContentLevelClass {
- kLow,
- kHigh,
- kDefault
-};
+enum ContentLevelClass { kLow, kHigh, kDefault };
struct VCMContFeature {
- VCMContFeature()
- : value(0.0f),
- level(kDefault) {
- }
+ VCMContFeature() : value(0.0f), level(kDefault) {}
void Reset() {
value = 0.0f;
level = kDefault;
@@ -84,43 +76,34 @@ struct VCMContFeature {
ContentLevelClass level;
};
-enum UpDownAction {
- kUpResolution,
- kDownResolution
-};
+enum UpDownAction { kUpResolution, kDownResolution };
enum SpatialAction {
kNoChangeSpatial,
- kOneHalfSpatialUniform, // 3/4 x 3/4: 9/6 ~1/2 pixel reduction.
- kOneQuarterSpatialUniform, // 1/2 x 1/2: 1/4 pixel reduction.
+ kOneHalfSpatialUniform, // 3/4 x 3/4: 9/6 ~1/2 pixel reduction.
+ kOneQuarterSpatialUniform, // 1/2 x 1/2: 1/4 pixel reduction.
kNumModesSpatial
};
enum TemporalAction {
kNoChangeTemporal,
- kTwoThirdsTemporal, // 2/3 frame rate reduction
- kOneHalfTemporal, // 1/2 frame rate reduction
+ kTwoThirdsTemporal, // 2/3 frame rate reduction
+ kOneHalfTemporal, // 1/2 frame rate reduction
kNumModesTemporal
};
struct ResolutionAction {
- ResolutionAction()
- : spatial(kNoChangeSpatial),
- temporal(kNoChangeTemporal) {
- }
+ ResolutionAction() : spatial(kNoChangeSpatial), temporal(kNoChangeTemporal) {}
SpatialAction spatial;
TemporalAction temporal;
};
// Down-sampling factors for spatial (width and height), and temporal.
-const float kFactorWidthSpatial[kNumModesSpatial] =
- { 1.0f, 4.0f / 3.0f, 2.0f };
+const float kFactorWidthSpatial[kNumModesSpatial] = {1.0f, 4.0f / 3.0f, 2.0f};
-const float kFactorHeightSpatial[kNumModesSpatial] =
- { 1.0f, 4.0f / 3.0f, 2.0f };
+const float kFactorHeightSpatial[kNumModesSpatial] = {1.0f, 4.0f / 3.0f, 2.0f};
-const float kFactorTemporal[kNumModesTemporal] =
- { 1.0f, 1.5f, 2.0f };
+const float kFactorTemporal[kNumModesTemporal] = {1.0f, 1.5f, 2.0f};
enum EncoderState {
kStableEncoding, // Low rate mis-match, stable buffer levels.
@@ -297,7 +280,7 @@ class VCMQmResolution : public VCMQmMethod {
// Select the directional (1x2 or 2x1) spatial down-sampling action.
void SelectSpatialDirectionMode(float transition_rate);
- enum { kDownActionHistorySize = 10};
+ enum { kDownActionHistorySize = 10 };
VCMResolutionScale* qm_;
// Encoder rate control parameters.
diff --git a/webrtc/modules/video_coding/main/source/qm_select_data.h b/webrtc/modules/video_coding/qm_select_data.h
index dc6bce4811..49190ef53b 100644
--- a/webrtc/modules/video_coding/main/source/qm_select_data.h
+++ b/webrtc/modules/video_coding/qm_select_data.h
@@ -8,8 +8,8 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#ifndef WEBRTC_MODULES_VIDEO_CODING_SOURCE_QM_SELECT_DATA_H_
-#define WEBRTC_MODULES_VIDEO_CODING_SOURCE_QM_SELECT_DATA_H_
+#ifndef WEBRTC_MODULES_VIDEO_CODING_QM_SELECT_DATA_H_
+#define WEBRTC_MODULES_VIDEO_CODING_QM_SELECT_DATA_H_
/***************************************************************
*QMSelectData.h
@@ -69,36 +69,36 @@ const uint16_t kMaxRateQm[9] = {
// Frame rate scale for maximum transition rate.
const float kFrameRateFac[4] = {
- 0.5f, // Low
- 0.7f, // Middle level 1
- 0.85f, // Middle level 2
- 1.0f, // High
+ 0.5f, // Low
+ 0.7f, // Middle level 1
+ 0.85f, // Middle level 2
+ 1.0f, // High
};
// Scale for transitional rate: based on content class
// motion=L/H/D,spatial==L/H/D: for low, high, middle levels
const float kScaleTransRateQm[18] = {
// VGA and lower
- 0.40f, // L, L
- 0.50f, // L, H
- 0.40f, // L, D
- 0.60f, // H ,L
- 0.60f, // H, H
- 0.60f, // H, D
- 0.50f, // D, L
- 0.50f, // D, D
- 0.50f, // D, H
+ 0.40f, // L, L
+ 0.50f, // L, H
+ 0.40f, // L, D
+ 0.60f, // H ,L
+ 0.60f, // H, H
+ 0.60f, // H, D
+ 0.50f, // D, L
+ 0.50f, // D, D
+ 0.50f, // D, H
// over VGA
- 0.40f, // L, L
- 0.50f, // L, H
- 0.40f, // L, D
- 0.60f, // H ,L
- 0.60f, // H, H
- 0.60f, // H, D
- 0.50f, // D, L
- 0.50f, // D, D
- 0.50f, // D, H
+ 0.40f, // L, L
+ 0.50f, // L, H
+ 0.40f, // L, D
+ 0.60f, // H ,L
+ 0.60f, // H, H
+ 0.60f, // H, D
+ 0.50f, // D, L
+ 0.50f, // D, D
+ 0.50f, // D, H
};
// Threshold on the target rate relative to transitional rate.
@@ -108,73 +108,73 @@ const float kFacLowRate = 0.5f;
// motion=L/H/D,spatial==L/H/D, for low, high, middle levels;
// rate = 0/1/2, for target rate state relative to transition rate.
const uint8_t kSpatialAction[27] = {
-// rateClass = 0:
- 1, // L, L
- 1, // L, H
- 1, // L, D
- 4, // H ,L
- 1, // H, H
- 4, // H, D
- 4, // D, L
- 1, // D, H
- 2, // D, D
-
-// rateClass = 1:
- 1, // L, L
- 1, // L, H
- 1, // L, D
- 2, // H ,L
- 1, // H, H
- 2, // H, D
- 2, // D, L
- 1, // D, H
- 2, // D, D
-
-// rateClass = 2:
- 1, // L, L
- 1, // L, H
- 1, // L, D
- 2, // H ,L
- 1, // H, H
- 2, // H, D
- 2, // D, L
- 1, // D, H
- 2, // D, D
+ // rateClass = 0:
+ 1, // L, L
+ 1, // L, H
+ 1, // L, D
+ 4, // H ,L
+ 1, // H, H
+ 4, // H, D
+ 4, // D, L
+ 1, // D, H
+ 2, // D, D
+
+ // rateClass = 1:
+ 1, // L, L
+ 1, // L, H
+ 1, // L, D
+ 2, // H ,L
+ 1, // H, H
+ 2, // H, D
+ 2, // D, L
+ 1, // D, H
+ 2, // D, D
+
+ // rateClass = 2:
+ 1, // L, L
+ 1, // L, H
+ 1, // L, D
+ 2, // H ,L
+ 1, // H, H
+ 2, // H, D
+ 2, // D, L
+ 1, // D, H
+ 2, // D, D
};
const uint8_t kTemporalAction[27] = {
-// rateClass = 0:
- 3, // L, L
- 2, // L, H
- 2, // L, D
- 1, // H ,L
- 3, // H, H
- 1, // H, D
- 1, // D, L
- 2, // D, H
- 1, // D, D
-
-// rateClass = 1:
- 3, // L, L
- 3, // L, H
- 3, // L, D
- 1, // H ,L
- 3, // H, H
- 1, // H, D
- 1, // D, L
- 3, // D, H
- 1, // D, D
-
-// rateClass = 2:
- 1, // L, L
- 3, // L, H
- 3, // L, D
- 1, // H ,L
- 3, // H, H
- 1, // H, D
- 1, // D, L
- 3, // D, H
- 1, // D, D
+ // rateClass = 0:
+ 3, // L, L
+ 2, // L, H
+ 2, // L, D
+ 1, // H ,L
+ 3, // H, H
+ 1, // H, D
+ 1, // D, L
+ 2, // D, H
+ 1, // D, D
+
+ // rateClass = 1:
+ 3, // L, L
+ 3, // L, H
+ 3, // L, D
+ 1, // H ,L
+ 3, // H, H
+ 1, // H, D
+ 1, // D, L
+ 3, // D, H
+ 1, // D, D
+
+ // rateClass = 2:
+ 1, // L, L
+ 3, // L, H
+ 3, // L, D
+ 1, // H ,L
+ 3, // H, H
+ 1, // H, D
+ 1, // D, L
+ 3, // D, H
+ 1, // D, D
};
// Control the total amount of down-sampling allowed.
@@ -224,4 +224,4 @@ const float kSpatialErrVertVsHoriz = 0.1f; // percentage to favor H over V
} // namespace webrtc
-#endif // WEBRTC_MODULES_VIDEO_CODING_SOURCE_QM_SELECT_DATA_H_
+#endif // WEBRTC_MODULES_VIDEO_CODING_QM_SELECT_DATA_H_
diff --git a/webrtc/modules/video_coding/main/source/qm_select_unittest.cc b/webrtc/modules/video_coding/qm_select_unittest.cc
index 6abc0d3099..f8542ec676 100644
--- a/webrtc/modules/video_coding/main/source/qm_select_unittest.cc
+++ b/webrtc/modules/video_coding/qm_select_unittest.cc
@@ -15,8 +15,8 @@
#include "testing/gtest/include/gtest/gtest.h"
-#include "webrtc/modules/interface/module_common_types.h"
-#include "webrtc/modules/video_coding/main/source/qm_select.h"
+#include "webrtc/modules/include/module_common_types.h"
+#include "webrtc/modules/video_coding/qm_select.h"
namespace webrtc {
@@ -32,10 +32,9 @@ const float kTemporalHigh = 0.1f;
class QmSelectTest : public ::testing::Test {
protected:
QmSelectTest()
- : qm_resolution_(new VCMQmResolution()),
- content_metrics_(new VideoContentMetrics()),
- qm_scale_(NULL) {
- }
+ : qm_resolution_(new VCMQmResolution()),
+ content_metrics_(new VideoContentMetrics()),
+ qm_scale_(NULL) {}
VCMQmResolution* qm_resolution_;
VideoContentMetrics* content_metrics_;
VCMResolutionScale* qm_scale_;
@@ -87,8 +86,8 @@ TEST_F(QmSelectTest, HandleInputs) {
qm_resolution_->UpdateContent(content_metrics);
// Content metrics are NULL: Expect success and no down-sampling action.
EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_));
- EXPECT_TRUE(IsSelectedActionCorrect(qm_scale_, 1.0, 1.0, 1.0, 640, 480,
- 30.0f));
+ EXPECT_TRUE(
+ IsSelectedActionCorrect(qm_scale_, 1.0, 1.0, 1.0, 640, 480, 30.0f));
}
// TODO(marpan): Add a test for number of temporal layers > 1.
@@ -118,8 +117,8 @@ TEST_F(QmSelectTest, NoActionHighRate) {
EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_));
EXPECT_EQ(0, qm_resolution_->ComputeContentClass());
EXPECT_EQ(kStableEncoding, qm_resolution_->GetEncoderState());
- EXPECT_TRUE(IsSelectedActionCorrect(qm_scale_, 1.0f, 1.0f, 1.0f, 640, 480,
- 30.0f));
+ EXPECT_TRUE(
+ IsSelectedActionCorrect(qm_scale_, 1.0f, 1.0f, 1.0f, 640, 480, 30.0f));
}
// Rate is well below transition, down-sampling action is taken,
@@ -149,40 +148,40 @@ TEST_F(QmSelectTest, DownActionLowRate) {
EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_));
EXPECT_EQ(3, qm_resolution_->ComputeContentClass());
EXPECT_EQ(kStableEncoding, qm_resolution_->GetEncoderState());
- EXPECT_TRUE(IsSelectedActionCorrect(qm_scale_, 2.0f, 2.0f, 1.0f, 320, 240,
- 30.0f));
+ EXPECT_TRUE(
+ IsSelectedActionCorrect(qm_scale_, 2.0f, 2.0f, 1.0f, 320, 240, 30.0f));
qm_resolution_->ResetDownSamplingState();
// Low motion, low spatial: 2/3 temporal is expected.
UpdateQmContentData(kTemporalLow, kSpatialLow, kSpatialLow, kSpatialLow);
EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_));
EXPECT_EQ(0, qm_resolution_->ComputeContentClass());
- EXPECT_TRUE(IsSelectedActionCorrect(qm_scale_, 1.0f, 1.0f, 1.5f, 640, 480,
- 20.5f));
+ EXPECT_TRUE(
+ IsSelectedActionCorrect(qm_scale_, 1.0f, 1.0f, 1.5f, 640, 480, 20.5f));
qm_resolution_->ResetDownSamplingState();
// Medium motion, low spatial: 2x2 spatial expected.
UpdateQmContentData(kTemporalMedium, kSpatialLow, kSpatialLow, kSpatialLow);
EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_));
EXPECT_EQ(6, qm_resolution_->ComputeContentClass());
- EXPECT_TRUE(IsSelectedActionCorrect(qm_scale_, 2.0f, 2.0f, 1.0f, 320, 240,
- 30.0f));
+ EXPECT_TRUE(
+ IsSelectedActionCorrect(qm_scale_, 2.0f, 2.0f, 1.0f, 320, 240, 30.0f));
qm_resolution_->ResetDownSamplingState();
// High motion, high spatial: 2/3 temporal expected.
UpdateQmContentData(kTemporalHigh, kSpatialHigh, kSpatialHigh, kSpatialHigh);
EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_));
EXPECT_EQ(4, qm_resolution_->ComputeContentClass());
- EXPECT_TRUE(IsSelectedActionCorrect(qm_scale_, 1.0f, 1.0f, 1.5f, 640, 480,
- 20.5f));
+ EXPECT_TRUE(
+ IsSelectedActionCorrect(qm_scale_, 1.0f, 1.0f, 1.5f, 640, 480, 20.5f));
qm_resolution_->ResetDownSamplingState();
// Low motion, high spatial: 1/2 temporal expected.
UpdateQmContentData(kTemporalLow, kSpatialHigh, kSpatialHigh, kSpatialHigh);
EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_));
EXPECT_EQ(1, qm_resolution_->ComputeContentClass());
- EXPECT_TRUE(IsSelectedActionCorrect(qm_scale_, 1.0f, 1.0f, 2.0f, 640, 480,
- 15.5f));
+ EXPECT_TRUE(
+ IsSelectedActionCorrect(qm_scale_, 1.0f, 1.0f, 2.0f, 640, 480, 15.5f));
qm_resolution_->ResetDownSamplingState();
// Medium motion, high spatial: 1/2 temporal expected.
@@ -190,8 +189,8 @@ TEST_F(QmSelectTest, DownActionLowRate) {
kSpatialHigh);
EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_));
EXPECT_EQ(7, qm_resolution_->ComputeContentClass());
- EXPECT_TRUE(IsSelectedActionCorrect(qm_scale_, 1.0f, 1.0f, 2.0f, 640, 480,
- 15.5f));
+ EXPECT_TRUE(
+ IsSelectedActionCorrect(qm_scale_, 1.0f, 1.0f, 2.0f, 640, 480, 15.5f));
qm_resolution_->ResetDownSamplingState();
// High motion, medium spatial: 2x2 spatial expected.
@@ -200,8 +199,8 @@ TEST_F(QmSelectTest, DownActionLowRate) {
EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_));
EXPECT_EQ(5, qm_resolution_->ComputeContentClass());
// Target frame rate for frame dropper should be the same as previous == 15.
- EXPECT_TRUE(IsSelectedActionCorrect(qm_scale_, 2.0f, 2.0f, 1.0f, 320, 240,
- 30.0f));
+ EXPECT_TRUE(
+ IsSelectedActionCorrect(qm_scale_, 2.0f, 2.0f, 1.0f, 320, 240, 30.0f));
qm_resolution_->ResetDownSamplingState();
// Low motion, medium spatial: high frame rate, so 1/2 temporal expected.
@@ -209,8 +208,8 @@ TEST_F(QmSelectTest, DownActionLowRate) {
kSpatialMedium);
EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_));
EXPECT_EQ(2, qm_resolution_->ComputeContentClass());
- EXPECT_TRUE(IsSelectedActionCorrect(qm_scale_, 1.0f, 1.0f, 2.0f, 640, 480,
- 15.5f));
+ EXPECT_TRUE(
+ IsSelectedActionCorrect(qm_scale_, 1.0f, 1.0f, 2.0f, 640, 480, 15.5f));
qm_resolution_->ResetDownSamplingState();
// Medium motion, medium spatial: high frame rate, so 2/3 temporal expected.
@@ -218,8 +217,8 @@ TEST_F(QmSelectTest, DownActionLowRate) {
kSpatialMedium);
EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_));
EXPECT_EQ(8, qm_resolution_->ComputeContentClass());
- EXPECT_TRUE(IsSelectedActionCorrect(qm_scale_, 1.0f, 1.0f, 1.5f, 640, 480,
- 20.5f));
+ EXPECT_TRUE(
+ IsSelectedActionCorrect(qm_scale_, 1.0f, 1.0f, 1.5f, 640, 480, 20.5f));
}
// Rate mis-match is high, and we have over-shooting.
@@ -249,16 +248,16 @@ TEST_F(QmSelectTest, DownActionHighRateMMOvershoot) {
EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_));
EXPECT_EQ(3, qm_resolution_->ComputeContentClass());
EXPECT_EQ(kStressedEncoding, qm_resolution_->GetEncoderState());
- EXPECT_TRUE(IsSelectedActionCorrect(qm_scale_, 4.0f / 3.0f, 4.0f / 3.0f,
- 1.0f, 480, 360, 30.0f));
+ EXPECT_TRUE(IsSelectedActionCorrect(qm_scale_, 4.0f / 3.0f, 4.0f / 3.0f, 1.0f,
+ 480, 360, 30.0f));
qm_resolution_->ResetDownSamplingState();
// Low motion, high spatial
UpdateQmContentData(kTemporalLow, kSpatialHigh, kSpatialHigh, kSpatialHigh);
EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_));
EXPECT_EQ(1, qm_resolution_->ComputeContentClass());
- EXPECT_TRUE(IsSelectedActionCorrect(qm_scale_, 1.0f, 1.0f, 1.5f, 640, 480,
- 20.5f));
+ EXPECT_TRUE(
+ IsSelectedActionCorrect(qm_scale_, 1.0f, 1.0f, 1.5f, 640, 480, 20.5f));
}
// Rate mis-match is high, target rate is below max for down-sampling,
@@ -288,16 +287,16 @@ TEST_F(QmSelectTest, NoActionHighRateMMUndershoot) {
EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_));
EXPECT_EQ(3, qm_resolution_->ComputeContentClass());
EXPECT_EQ(kEasyEncoding, qm_resolution_->GetEncoderState());
- EXPECT_TRUE(IsSelectedActionCorrect(qm_scale_, 1.0f, 1.0f, 1.0f, 640, 480,
- 30.0f));
+ EXPECT_TRUE(
+ IsSelectedActionCorrect(qm_scale_, 1.0f, 1.0f, 1.0f, 640, 480, 30.0f));
qm_resolution_->ResetDownSamplingState();
// Low motion, high spatial
UpdateQmContentData(kTemporalLow, kSpatialHigh, kSpatialHigh, kSpatialHigh);
EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_));
EXPECT_EQ(1, qm_resolution_->ComputeContentClass());
- EXPECT_TRUE(IsSelectedActionCorrect(qm_scale_, 1.0f, 1.0f, 1.0f, 640, 480,
- 30.0f));
+ EXPECT_TRUE(
+ IsSelectedActionCorrect(qm_scale_, 1.0f, 1.0f, 1.0f, 640, 480, 30.0f));
}
// Buffer is underflowing, and target rate is below max for down-sampling,
@@ -332,16 +331,16 @@ TEST_F(QmSelectTest, DownActionBufferUnderflow) {
EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_));
EXPECT_EQ(3, qm_resolution_->ComputeContentClass());
EXPECT_EQ(kStressedEncoding, qm_resolution_->GetEncoderState());
- EXPECT_TRUE(IsSelectedActionCorrect(qm_scale_, 4.0f / 3.0f, 4.0f / 3.0f,
- 1.0f, 480, 360, 30.0f));
+ EXPECT_TRUE(IsSelectedActionCorrect(qm_scale_, 4.0f / 3.0f, 4.0f / 3.0f, 1.0f,
+ 480, 360, 30.0f));
qm_resolution_->ResetDownSamplingState();
// Low motion, high spatial
UpdateQmContentData(kTemporalLow, kSpatialHigh, kSpatialHigh, kSpatialHigh);
EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_));
EXPECT_EQ(1, qm_resolution_->ComputeContentClass());
- EXPECT_TRUE(IsSelectedActionCorrect(qm_scale_, 1.0f, 1.0f, 1.5f, 640, 480,
- 20.5f));
+ EXPECT_TRUE(
+ IsSelectedActionCorrect(qm_scale_, 1.0f, 1.0f, 1.5f, 640, 480, 20.5f));
}
// Target rate is below max for down-sampling, but buffer level is stable,
@@ -376,16 +375,16 @@ TEST_F(QmSelectTest, NoActionBufferStable) {
EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_));
EXPECT_EQ(3, qm_resolution_->ComputeContentClass());
EXPECT_EQ(kStableEncoding, qm_resolution_->GetEncoderState());
- EXPECT_TRUE(IsSelectedActionCorrect(qm_scale_, 1.0f, 1.0f, 1.0f, 640, 480,
- 30.0f));
+ EXPECT_TRUE(
+ IsSelectedActionCorrect(qm_scale_, 1.0f, 1.0f, 1.0f, 640, 480, 30.0f));
qm_resolution_->ResetDownSamplingState();
// Low motion, high spatial
UpdateQmContentData(kTemporalLow, kSpatialHigh, kSpatialHigh, kSpatialHigh);
EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_));
EXPECT_EQ(1, qm_resolution_->ComputeContentClass());
- EXPECT_TRUE(IsSelectedActionCorrect(qm_scale_, 1.0f, 1.0f, 1.0f, 640, 480,
- 30.0f));
+ EXPECT_TRUE(
+ IsSelectedActionCorrect(qm_scale_, 1.0f, 1.0f, 1.0f, 640, 480, 30.0f));
}
// Very low rate, but no spatial down-sampling below some size (QCIF).
@@ -414,8 +413,8 @@ TEST_F(QmSelectTest, LimitDownSpatialAction) {
EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_));
EXPECT_EQ(3, qm_resolution_->ComputeContentClass());
EXPECT_EQ(kStableEncoding, qm_resolution_->GetEncoderState());
- EXPECT_TRUE(IsSelectedActionCorrect(qm_scale_, 1.0f, 1.0f, 1.0f, 176, 144,
- 30.0f));
+ EXPECT_TRUE(
+ IsSelectedActionCorrect(qm_scale_, 1.0f, 1.0f, 1.0f, 176, 144, 30.0f));
}
// Very low rate, but no frame reduction below some frame_rate (8fps).
@@ -445,8 +444,8 @@ TEST_F(QmSelectTest, LimitDownTemporalAction) {
EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_));
EXPECT_EQ(2, qm_resolution_->ComputeContentClass());
EXPECT_EQ(kStableEncoding, qm_resolution_->GetEncoderState());
- EXPECT_TRUE(IsSelectedActionCorrect(qm_scale_, 1.0f, 1.0f, 1.0f, 640, 480,
- 8.0f));
+ EXPECT_TRUE(
+ IsSelectedActionCorrect(qm_scale_, 1.0f, 1.0f, 1.0f, 640, 480, 8.0f));
}
// Two stages: spatial down-sample and then back up spatially,
@@ -468,7 +467,7 @@ TEST_F(QmSelectTest, 2StageDownSpatialUpSpatial) {
int incoming_frame_rate[] = {30, 30, 30};
uint8_t fraction_lost[] = {10, 10, 10};
UpdateQmRateData(target_rate, encoder_sent_rate, incoming_frame_rate,
- fraction_lost, 3);
+ fraction_lost, 3);
// Update content: motion level, and 3 spatial prediction errors.
// High motion, low spatial.
@@ -476,8 +475,8 @@ TEST_F(QmSelectTest, 2StageDownSpatialUpSpatial) {
EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_));
EXPECT_EQ(3, qm_resolution_->ComputeContentClass());
EXPECT_EQ(kStableEncoding, qm_resolution_->GetEncoderState());
- EXPECT_TRUE(IsSelectedActionCorrect(qm_scale_, 2.0f, 2.0f, 1.0f, 320, 240,
- 30.0f));
+ EXPECT_TRUE(
+ IsSelectedActionCorrect(qm_scale_, 2.0f, 2.0f, 1.0f, 320, 240, 30.0f));
// Reset and go up in rate: expected to go back up, in 2 stages of 3/4.
qm_resolution_->ResetRates();
@@ -493,8 +492,8 @@ TEST_F(QmSelectTest, 2StageDownSpatialUpSpatial) {
EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_));
EXPECT_EQ(kStableEncoding, qm_resolution_->GetEncoderState());
float scale = (4.0f / 3.0f) / 2.0f;
- EXPECT_TRUE(IsSelectedActionCorrect(qm_scale_, scale, scale, 1.0f, 480, 360,
- 30.0f));
+ EXPECT_TRUE(
+ IsSelectedActionCorrect(qm_scale_, scale, scale, 1.0f, 480, 360, 30.0f));
qm_resolution_->UpdateCodecParameters(30.0f, 480, 360);
EXPECT_EQ(4, qm_resolution_->GetImageType(480, 360));
@@ -522,7 +521,7 @@ TEST_F(QmSelectTest, 2StageDownSpatialUpSpatialUndershoot) {
int incoming_frame_rate[] = {30, 30, 30};
uint8_t fraction_lost[] = {10, 10, 10};
UpdateQmRateData(target_rate, encoder_sent_rate, incoming_frame_rate,
- fraction_lost, 3);
+ fraction_lost, 3);
// Update content: motion level, and 3 spatial prediction errors.
// High motion, low spatial.
@@ -530,8 +529,8 @@ TEST_F(QmSelectTest, 2StageDownSpatialUpSpatialUndershoot) {
EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_));
EXPECT_EQ(3, qm_resolution_->ComputeContentClass());
EXPECT_EQ(kStableEncoding, qm_resolution_->GetEncoderState());
- EXPECT_TRUE(IsSelectedActionCorrect(qm_scale_, 2.0f, 2.0f, 1.0f, 320, 240,
- 30.0f));
+ EXPECT_TRUE(
+ IsSelectedActionCorrect(qm_scale_, 2.0f, 2.0f, 1.0f, 320, 240, 30.0f));
// Reset rates and simulate under-shooting scenario.: expect to go back up.
// Goes up spatially in two stages for 1/2x1/2 down-sampling.
@@ -548,8 +547,8 @@ TEST_F(QmSelectTest, 2StageDownSpatialUpSpatialUndershoot) {
EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_));
EXPECT_EQ(kEasyEncoding, qm_resolution_->GetEncoderState());
float scale = (4.0f / 3.0f) / 2.0f;
- EXPECT_TRUE(IsSelectedActionCorrect(qm_scale_, scale, scale, 1.0f, 480, 360,
- 30.0f));
+ EXPECT_TRUE(
+ IsSelectedActionCorrect(qm_scale_, scale, scale, 1.0f, 480, 360, 30.0f));
qm_resolution_->UpdateCodecParameters(30.0f, 480, 360);
EXPECT_EQ(4, qm_resolution_->GetImageType(480, 360));
@@ -577,7 +576,7 @@ TEST_F(QmSelectTest, 2StageDownSpatialNoActionUp) {
int incoming_frame_rate[] = {30, 30, 30};
uint8_t fraction_lost[] = {10, 10, 10};
UpdateQmRateData(target_rate, encoder_sent_rate, incoming_frame_rate,
- fraction_lost, 3);
+ fraction_lost, 3);
// Update content: motion level, and 3 spatial prediction errors.
// High motion, low spatial.
@@ -585,8 +584,8 @@ TEST_F(QmSelectTest, 2StageDownSpatialNoActionUp) {
EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_));
EXPECT_EQ(3, qm_resolution_->ComputeContentClass());
EXPECT_EQ(kStableEncoding, qm_resolution_->GetEncoderState());
- EXPECT_TRUE(IsSelectedActionCorrect(qm_scale_, 2.0f, 2.0f, 1.0f, 320, 240,
- 30.0f));
+ EXPECT_TRUE(
+ IsSelectedActionCorrect(qm_scale_, 2.0f, 2.0f, 1.0f, 320, 240, 30.0f));
// Reset and simulate large rate mis-match: expect no action to go back up.
qm_resolution_->ResetRates();
@@ -601,8 +600,8 @@ TEST_F(QmSelectTest, 2StageDownSpatialNoActionUp) {
fraction_lost2, 5);
EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_));
EXPECT_EQ(kStressedEncoding, qm_resolution_->GetEncoderState());
- EXPECT_TRUE(IsSelectedActionCorrect(qm_scale_, 1.0f, 1.0f, 1.0f, 320, 240,
- 30.0f));
+ EXPECT_TRUE(
+ IsSelectedActionCorrect(qm_scale_, 1.0f, 1.0f, 1.0f, 320, 240, 30.0f));
}
// Two stages: temporally down-sample and then back up temporally,
@@ -632,8 +631,8 @@ TEST_F(QmSelectTest, 2StatgeDownTemporalUpTemporal) {
EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_));
EXPECT_EQ(1, qm_resolution_->ComputeContentClass());
EXPECT_EQ(kStableEncoding, qm_resolution_->GetEncoderState());
- EXPECT_TRUE(IsSelectedActionCorrect(qm_scale_, 1.0f, 1.0f, 2.0f, 640, 480,
- 15.5f));
+ EXPECT_TRUE(
+ IsSelectedActionCorrect(qm_scale_, 1.0f, 1.0f, 2.0f, 640, 480, 15.5f));
// Reset rates and go up in rate: expect to go back up.
qm_resolution_->ResetRates();
@@ -646,8 +645,8 @@ TEST_F(QmSelectTest, 2StatgeDownTemporalUpTemporal) {
fraction_lost2, 5);
EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_));
EXPECT_EQ(kStableEncoding, qm_resolution_->GetEncoderState());
- EXPECT_TRUE(IsSelectedActionCorrect(qm_scale_, 1.0f, 1.0f, 0.5f, 640, 480,
- 30.0f));
+ EXPECT_TRUE(
+ IsSelectedActionCorrect(qm_scale_, 1.0f, 1.0f, 0.5f, 640, 480, 30.0f));
}
// Two stages: temporal down-sample and then back up temporally, since encoder
@@ -669,7 +668,7 @@ TEST_F(QmSelectTest, 2StatgeDownTemporalUpTemporalUndershoot) {
int incoming_frame_rate[] = {30, 30, 30};
uint8_t fraction_lost[] = {10, 10, 10};
UpdateQmRateData(target_rate, encoder_sent_rate, incoming_frame_rate,
- fraction_lost, 3);
+ fraction_lost, 3);
// Update content: motion level, and 3 spatial prediction errors.
// Low motion, high spatial.
@@ -677,8 +676,8 @@ TEST_F(QmSelectTest, 2StatgeDownTemporalUpTemporalUndershoot) {
EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_));
EXPECT_EQ(1, qm_resolution_->ComputeContentClass());
EXPECT_EQ(kStableEncoding, qm_resolution_->GetEncoderState());
- EXPECT_TRUE(IsSelectedActionCorrect(qm_scale_, 1.0f, 1.0f, 2.0f, 640, 480,
- 15.5f));
+ EXPECT_TRUE(
+ IsSelectedActionCorrect(qm_scale_, 1.0f, 1.0f, 2.0f, 640, 480, 15.5f));
// Reset rates and simulate under-shooting scenario.: expect to go back up.
qm_resolution_->ResetRates();
@@ -691,8 +690,8 @@ TEST_F(QmSelectTest, 2StatgeDownTemporalUpTemporalUndershoot) {
fraction_lost2, 5);
EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_));
EXPECT_EQ(kEasyEncoding, qm_resolution_->GetEncoderState());
- EXPECT_TRUE(IsSelectedActionCorrect(qm_scale_, 1.0f, 1.0f, 0.5f, 640, 480,
- 30.0f));
+ EXPECT_TRUE(
+ IsSelectedActionCorrect(qm_scale_, 1.0f, 1.0f, 0.5f, 640, 480, 30.0f));
}
// Two stages: temporal down-sample and then no action to go up,
@@ -736,8 +735,8 @@ TEST_F(QmSelectTest, 2StageDownTemporalNoActionUp) {
fraction_lost2, 5);
EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_));
EXPECT_EQ(kStressedEncoding, qm_resolution_->GetEncoderState());
- EXPECT_TRUE(IsSelectedActionCorrect(qm_scale_, 1.0f, 1.0f, 1.0f, 640, 480,
- 15.0f));
+ EXPECT_TRUE(
+ IsSelectedActionCorrect(qm_scale_, 1.0f, 1.0f, 1.0f, 640, 480, 15.0f));
}
// 3 stages: spatial down-sample, followed by temporal down-sample,
// and then go up to full state, as encoding rate has increased.
@@ -766,8 +765,8 @@ TEST_F(QmSelectTest, 3StageDownSpatialTemporlaUpSpatialTemporal) {
EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_));
EXPECT_EQ(3, qm_resolution_->ComputeContentClass());
EXPECT_EQ(kStableEncoding, qm_resolution_->GetEncoderState());
- EXPECT_TRUE(IsSelectedActionCorrect(qm_scale_, 2.0f, 2.0f, 1.0f, 320, 240,
- 30.0f));
+ EXPECT_TRUE(
+ IsSelectedActionCorrect(qm_scale_, 2.0f, 2.0f, 1.0f, 320, 240, 30.0f));
// Change content data: expect temporal down-sample.
qm_resolution_->UpdateCodecParameters(30.0f, 320, 240);
@@ -780,7 +779,7 @@ TEST_F(QmSelectTest, 3StageDownSpatialTemporlaUpSpatialTemporal) {
int incoming_frame_rate2[] = {30, 30, 30, 30, 30};
uint8_t fraction_lost2[] = {10, 10, 10, 10, 10};
UpdateQmRateData(target_rate2, encoder_sent_rate2, incoming_frame_rate2,
- fraction_lost2, 5);
+ fraction_lost2, 5);
// Update content: motion level, and 3 spatial prediction errors.
// Low motion, high spatial.
@@ -788,8 +787,8 @@ TEST_F(QmSelectTest, 3StageDownSpatialTemporlaUpSpatialTemporal) {
EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_));
EXPECT_EQ(1, qm_resolution_->ComputeContentClass());
EXPECT_EQ(kStableEncoding, qm_resolution_->GetEncoderState());
- EXPECT_TRUE(IsSelectedActionCorrect(qm_scale_, 1.0f, 1.0f, 1.5f, 320, 240,
- 20.5f));
+ EXPECT_TRUE(
+ IsSelectedActionCorrect(qm_scale_, 1.0f, 1.0f, 1.5f, 320, 240, 20.5f));
// Reset rates and go high up in rate: expect to go back up both spatial
// and temporally. The 1/2x1/2 spatial is undone in two stages.
@@ -806,8 +805,8 @@ TEST_F(QmSelectTest, 3StageDownSpatialTemporlaUpSpatialTemporal) {
EXPECT_EQ(1, qm_resolution_->ComputeContentClass());
EXPECT_EQ(kStableEncoding, qm_resolution_->GetEncoderState());
float scale = (4.0f / 3.0f) / 2.0f;
- EXPECT_TRUE(IsSelectedActionCorrect(qm_scale_, scale, scale, 2.0f / 3.0f,
- 480, 360, 30.0f));
+ EXPECT_TRUE(IsSelectedActionCorrect(qm_scale_, scale, scale, 2.0f / 3.0f, 480,
+ 360, 30.0f));
qm_resolution_->UpdateCodecParameters(30.0f, 480, 360);
EXPECT_EQ(4, qm_resolution_->GetImageType(480, 360));
@@ -842,8 +841,8 @@ TEST_F(QmSelectTest, NoActionTooMuchDownSampling) {
EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_));
EXPECT_EQ(3, qm_resolution_->ComputeContentClass());
EXPECT_EQ(kStableEncoding, qm_resolution_->GetEncoderState());
- EXPECT_TRUE(IsSelectedActionCorrect(qm_scale_, 2.0f, 2.0f, 1.0f, 640, 360,
- 30.0f));
+ EXPECT_TRUE(
+ IsSelectedActionCorrect(qm_scale_, 2.0f, 2.0f, 1.0f, 640, 360, 30.0f));
// Reset and lower rates to get another spatial action (3/4x3/4).
// Lower the frame rate for spatial to be selected again.
@@ -865,8 +864,8 @@ TEST_F(QmSelectTest, NoActionTooMuchDownSampling) {
EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_));
EXPECT_EQ(5, qm_resolution_->ComputeContentClass());
EXPECT_EQ(kStableEncoding, qm_resolution_->GetEncoderState());
- EXPECT_TRUE(IsSelectedActionCorrect(qm_scale_, 4.0f / 3.0f, 4.0f / 3.0f,
- 1.0f, 480, 270, 10.0f));
+ EXPECT_TRUE(IsSelectedActionCorrect(qm_scale_, 4.0f / 3.0f, 4.0f / 3.0f, 1.0f,
+ 480, 270, 10.0f));
// Reset and go to very low rate: no action should be taken,
// we went down too much already.
@@ -883,8 +882,8 @@ TEST_F(QmSelectTest, NoActionTooMuchDownSampling) {
EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_));
EXPECT_EQ(5, qm_resolution_->ComputeContentClass());
EXPECT_EQ(kStableEncoding, qm_resolution_->GetEncoderState());
- EXPECT_TRUE(IsSelectedActionCorrect(qm_scale_, 1.0f, 1.0f, 1.0f, 480, 270,
- 10.0f));
+ EXPECT_TRUE(
+ IsSelectedActionCorrect(qm_scale_, 1.0f, 1.0f, 1.0f, 480, 270, 10.0f));
}
// Multiple down-sampling stages and then undo all of them.
@@ -917,8 +916,8 @@ TEST_F(QmSelectTest, MultipleStagesCheckActionHistory1) {
EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_));
EXPECT_EQ(6, qm_resolution_->ComputeContentClass());
EXPECT_EQ(kStableEncoding, qm_resolution_->GetEncoderState());
- EXPECT_TRUE(IsSelectedActionCorrect(qm_scale_, 4.0f / 3.0f, 4.0f / 3.0f,
- 1.0f, 480, 360, 30.0f));
+ EXPECT_TRUE(IsSelectedActionCorrect(qm_scale_, 4.0f / 3.0f, 4.0f / 3.0f, 1.0f,
+ 480, 360, 30.0f));
// Go down 2/3 temporal.
qm_resolution_->UpdateCodecParameters(30.0f, 480, 360);
EXPECT_EQ(4, qm_resolution_->GetImageType(480, 360));
@@ -936,8 +935,8 @@ TEST_F(QmSelectTest, MultipleStagesCheckActionHistory1) {
EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_));
EXPECT_EQ(1, qm_resolution_->ComputeContentClass());
EXPECT_EQ(kStableEncoding, qm_resolution_->GetEncoderState());
- EXPECT_TRUE(IsSelectedActionCorrect(qm_scale_, 1.0f, 1.0f, 1.5f, 480, 360,
- 20.5f));
+ EXPECT_TRUE(
+ IsSelectedActionCorrect(qm_scale_, 1.0f, 1.0f, 1.5f, 480, 360, 20.5f));
// Go down 3/4x3/4 spatial:
qm_resolution_->UpdateCodecParameters(20.0f, 480, 360);
@@ -947,7 +946,7 @@ TEST_F(QmSelectTest, MultipleStagesCheckActionHistory1) {
int incoming_frame_rate3[] = {20, 20, 20, 20, 20};
uint8_t fraction_lost3[] = {10, 10, 10, 10, 10};
UpdateQmRateData(target_rate3, encoder_sent_rate3, incoming_frame_rate3,
- fraction_lost3, 5);
+ fraction_lost3, 5);
// Update content: motion level, and 3 spatial prediction errors.
// High motion, low spatial.
@@ -957,8 +956,8 @@ TEST_F(QmSelectTest, MultipleStagesCheckActionHistory1) {
EXPECT_EQ(kStableEncoding, qm_resolution_->GetEncoderState());
// The two spatial actions of 3/4x3/4 are converted to 1/2x1/2,
// so scale factor is 2.0.
- EXPECT_TRUE(IsSelectedActionCorrect(qm_scale_, 2.0f, 2.0f, 1.0f, 320, 240,
- 20.0f));
+ EXPECT_TRUE(
+ IsSelectedActionCorrect(qm_scale_, 2.0f, 2.0f, 1.0f, 320, 240, 20.0f));
// Reset rates and go high up in rate: expect to go up:
// 1/2x1x2 spatial and 1/2 temporally.
@@ -1018,8 +1017,8 @@ TEST_F(QmSelectTest, MultipleStagesCheckActionHistory2) {
EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_));
EXPECT_EQ(6, qm_resolution_->ComputeContentClass());
EXPECT_EQ(kStableEncoding, qm_resolution_->GetEncoderState());
- EXPECT_TRUE(IsSelectedActionCorrect(qm_scale_, 2.0f, 2.0f, 1.0f, 320, 240,
- 30.0f));
+ EXPECT_TRUE(
+ IsSelectedActionCorrect(qm_scale_, 2.0f, 2.0f, 1.0f, 320, 240, 30.0f));
// Go down 2/3 temporal.
qm_resolution_->UpdateCodecParameters(30.0f, 320, 240);
@@ -1039,8 +1038,8 @@ TEST_F(QmSelectTest, MultipleStagesCheckActionHistory2) {
EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_));
EXPECT_EQ(7, qm_resolution_->ComputeContentClass());
EXPECT_EQ(kStableEncoding, qm_resolution_->GetEncoderState());
- EXPECT_TRUE(IsSelectedActionCorrect(qm_scale_, 1.0f, 1.0f, 1.5f, 320, 240,
- 20.5f));
+ EXPECT_TRUE(
+ IsSelectedActionCorrect(qm_scale_, 1.0f, 1.0f, 1.5f, 320, 240, 20.5f));
// Go up 2/3 temporally.
qm_resolution_->UpdateCodecParameters(20.0f, 320, 240);
@@ -1076,8 +1075,8 @@ TEST_F(QmSelectTest, MultipleStagesCheckActionHistory2) {
EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_));
EXPECT_EQ(1, qm_resolution_->ComputeContentClass());
EXPECT_EQ(kStableEncoding, qm_resolution_->GetEncoderState());
- EXPECT_TRUE(IsSelectedActionCorrect(qm_scale_, 1.0f, 1.0f, 1.5f, 320, 240,
- 20.5f));
+ EXPECT_TRUE(
+ IsSelectedActionCorrect(qm_scale_, 1.0f, 1.0f, 1.5f, 320, 240, 20.5f));
// Go up spatial and temporal. Spatial undoing is done in 2 stages.
qm_resolution_->UpdateCodecParameters(20.5f, 320, 240);
@@ -1092,8 +1091,8 @@ TEST_F(QmSelectTest, MultipleStagesCheckActionHistory2) {
EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_));
float scale = (4.0f / 3.0f) / 2.0f;
- EXPECT_TRUE(IsSelectedActionCorrect(qm_scale_, scale, scale, 2.0f / 3.0f,
- 480, 360, 30.0f));
+ EXPECT_TRUE(IsSelectedActionCorrect(qm_scale_, scale, scale, 2.0f / 3.0f, 480,
+ 360, 30.0f));
qm_resolution_->UpdateCodecParameters(30.0f, 480, 360);
EXPECT_EQ(4, qm_resolution_->GetImageType(480, 360));
@@ -1131,8 +1130,8 @@ TEST_F(QmSelectTest, MultipleStagesCheckActionHistory3) {
EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_));
EXPECT_EQ(6, qm_resolution_->ComputeContentClass());
EXPECT_EQ(kStableEncoding, qm_resolution_->GetEncoderState());
- EXPECT_TRUE(IsSelectedActionCorrect(qm_scale_, 4.0f / 3.0f, 4.0f / 3.0f,
- 1.0f, 480, 360, 30.0f));
+ EXPECT_TRUE(IsSelectedActionCorrect(qm_scale_, 4.0f / 3.0f, 4.0f / 3.0f, 1.0f,
+ 480, 360, 30.0f));
// Go down 2/3 temporal.
qm_resolution_->UpdateCodecParameters(30.0f, 480, 360);
@@ -1151,8 +1150,8 @@ TEST_F(QmSelectTest, MultipleStagesCheckActionHistory3) {
EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_));
EXPECT_EQ(1, qm_resolution_->ComputeContentClass());
EXPECT_EQ(kStableEncoding, qm_resolution_->GetEncoderState());
- EXPECT_TRUE(IsSelectedActionCorrect(qm_scale_, 1.0f, 1.0f, 1.5f, 480, 360,
- 20.5f));
+ EXPECT_TRUE(
+ IsSelectedActionCorrect(qm_scale_, 1.0f, 1.0f, 1.5f, 480, 360, 20.5f));
// Go up 2/3 temporal.
qm_resolution_->UpdateCodecParameters(20.5f, 480, 360);
@@ -1184,8 +1183,8 @@ TEST_F(QmSelectTest, MultipleStagesCheckActionHistory3) {
EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_));
EXPECT_EQ(kStableEncoding, qm_resolution_->GetEncoderState());
- EXPECT_TRUE(IsSelectedActionCorrect(qm_scale_, 3.0f / 4.0f, 3.0f / 4.0f,
- 1.0f, 640, 480, 30.0f));
+ EXPECT_TRUE(IsSelectedActionCorrect(qm_scale_, 3.0f / 4.0f, 3.0f / 4.0f, 1.0f,
+ 640, 480, 30.0f));
}
// Two stages of 3/4x3/4 converted to one stage of 1/2x1/2.
@@ -1215,8 +1214,8 @@ TEST_F(QmSelectTest, ConvertThreeQuartersToOneHalf) {
EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_));
EXPECT_EQ(6, qm_resolution_->ComputeContentClass());
EXPECT_EQ(kStableEncoding, qm_resolution_->GetEncoderState());
- EXPECT_TRUE(IsSelectedActionCorrect(qm_scale_, 4.0f / 3.0f, 4.0f / 3.0f,
- 1.0f, 480, 360, 30.0f));
+ EXPECT_TRUE(IsSelectedActionCorrect(qm_scale_, 4.0f / 3.0f, 4.0f / 3.0f, 1.0f,
+ 480, 360, 30.0f));
// Set rates to go down another 3/4 spatial. Should be converted ton 1/2.
qm_resolution_->UpdateCodecParameters(30.0f, 480, 360);
@@ -1235,8 +1234,8 @@ TEST_F(QmSelectTest, ConvertThreeQuartersToOneHalf) {
EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_));
EXPECT_EQ(6, qm_resolution_->ComputeContentClass());
EXPECT_EQ(kStableEncoding, qm_resolution_->GetEncoderState());
- EXPECT_TRUE(IsSelectedActionCorrect(qm_scale_, 2.0f, 2.0f, 1.0f, 320, 240,
- 30.0f));
+ EXPECT_TRUE(
+ IsSelectedActionCorrect(qm_scale_, 2.0f, 2.0f, 1.0f, 320, 240, 30.0f));
}
void QmSelectTest::InitQmNativeData(float initial_bit_rate,
@@ -1244,11 +1243,9 @@ void QmSelectTest::InitQmNativeData(float initial_bit_rate,
int native_width,
int native_height,
int num_layers) {
- EXPECT_EQ(0, qm_resolution_->Initialize(initial_bit_rate,
- user_frame_rate,
- native_width,
- native_height,
- num_layers));
+ EXPECT_EQ(
+ 0, qm_resolution_->Initialize(initial_bit_rate, user_frame_rate,
+ native_width, native_height, num_layers));
}
void QmSelectTest::UpdateQmContentData(float motion_metric,
@@ -1281,8 +1278,7 @@ void QmSelectTest::UpdateQmRateData(int* target_rate,
float encoder_sent_rate_update = encoder_sent_rate[i];
float incoming_frame_rate_update = incoming_frame_rate[i];
uint8_t fraction_lost_update = fraction_lost[i];
- qm_resolution_->UpdateRates(target_rate_update,
- encoder_sent_rate_update,
+ qm_resolution_->UpdateRates(target_rate_update, encoder_sent_rate_update,
incoming_frame_rate_update,
fraction_lost_update);
}
diff --git a/webrtc/modules/video_coding/main/source/receiver.cc b/webrtc/modules/video_coding/receiver.cc
index 0707a9c3cd..fa2a2dca29 100644
--- a/webrtc/modules/video_coding/main/source/receiver.cc
+++ b/webrtc/modules/video_coding/receiver.cc
@@ -8,18 +8,20 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#include "webrtc/modules/video_coding/main/source/receiver.h"
+#include "webrtc/modules/video_coding/receiver.h"
#include <assert.h>
#include <cstdlib>
+#include <utility>
+#include <vector>
+#include "webrtc/base/logging.h"
#include "webrtc/base/trace_event.h"
-#include "webrtc/modules/video_coding/main/source/encoded_frame.h"
-#include "webrtc/modules/video_coding/main/source/internal_defines.h"
-#include "webrtc/modules/video_coding/main/source/media_opt_util.h"
+#include "webrtc/modules/video_coding/encoded_frame.h"
+#include "webrtc/modules/video_coding/internal_defines.h"
+#include "webrtc/modules/video_coding/media_opt_util.h"
#include "webrtc/system_wrappers/include/clock.h"
-#include "webrtc/system_wrappers/include/logging.h"
namespace webrtc {
@@ -40,9 +42,9 @@ VCMReceiver::VCMReceiver(VCMTiming* timing,
rtc::scoped_ptr<EventWrapper> jitter_buffer_event)
: crit_sect_(CriticalSectionWrapper::CreateCriticalSection()),
clock_(clock),
- jitter_buffer_(clock_, jitter_buffer_event.Pass()),
+ jitter_buffer_(clock_, std::move(jitter_buffer_event)),
timing_(timing),
- render_wait_event_(receiver_event.Pass()),
+ render_wait_event_(std::move(receiver_event)),
max_video_delay_ms_(kMaxVideoDelayMs) {
Reset();
}
@@ -71,8 +73,8 @@ int32_t VCMReceiver::InsertPacket(const VCMPacket& packet,
// Insert the packet into the jitter buffer. The packet can either be empty or
// contain media at this point.
bool retransmitted = false;
- const VCMFrameBufferEnum ret = jitter_buffer_.InsertPacket(packet,
- &retransmitted);
+ const VCMFrameBufferEnum ret =
+ jitter_buffer_.InsertPacket(packet, &retransmitted);
if (ret == kOldPacket) {
return VCM_OK;
} else if (ret == kFlushIndicator) {
@@ -95,13 +97,13 @@ void VCMReceiver::TriggerDecoderShutdown() {
}
VCMEncodedFrame* VCMReceiver::FrameForDecoding(uint16_t max_wait_time_ms,
- int64_t& next_render_time_ms,
- bool render_timing) {
+ int64_t* next_render_time_ms,
+ bool prefer_late_decoding) {
const int64_t start_time_ms = clock_->TimeInMilliseconds();
uint32_t frame_timestamp = 0;
// Exhaust wait time to get a complete frame for decoding.
- bool found_frame = jitter_buffer_.NextCompleteTimestamp(
- max_wait_time_ms, &frame_timestamp);
+ bool found_frame =
+ jitter_buffer_.NextCompleteTimestamp(max_wait_time_ms, &frame_timestamp);
if (!found_frame)
found_frame = jitter_buffer_.NextMaybeIncompleteTimestamp(&frame_timestamp);
@@ -113,14 +115,14 @@ VCMEncodedFrame* VCMReceiver::FrameForDecoding(uint16_t max_wait_time_ms,
timing_->SetJitterDelay(jitter_buffer_.EstimatedJitterMs());
const int64_t now_ms = clock_->TimeInMilliseconds();
timing_->UpdateCurrentDelay(frame_timestamp);
- next_render_time_ms = timing_->RenderTimeMs(frame_timestamp, now_ms);
+ *next_render_time_ms = timing_->RenderTimeMs(frame_timestamp, now_ms);
// Check render timing.
bool timing_error = false;
// Assume that render timing errors are due to changes in the video stream.
- if (next_render_time_ms < 0) {
+ if (*next_render_time_ms < 0) {
timing_error = true;
- } else if (std::abs(next_render_time_ms - now_ms) > max_video_delay_ms_) {
- int frame_delay = static_cast<int>(std::abs(next_render_time_ms - now_ms));
+ } else if (std::abs(*next_render_time_ms - now_ms) > max_video_delay_ms_) {
+ int frame_delay = static_cast<int>(std::abs(*next_render_time_ms - now_ms));
LOG(LS_WARNING) << "A frame about to be decoded is out of the configured "
<< "delay bounds (" << frame_delay << " > "
<< max_video_delay_ms_
@@ -140,14 +142,15 @@ VCMEncodedFrame* VCMReceiver::FrameForDecoding(uint16_t max_wait_time_ms,
return NULL;
}
- if (!render_timing) {
+ if (prefer_late_decoding) {
// Decode frame as close as possible to the render timestamp.
- const int32_t available_wait_time = max_wait_time_ms -
+ const int32_t available_wait_time =
+ max_wait_time_ms -
static_cast<int32_t>(clock_->TimeInMilliseconds() - start_time_ms);
- uint16_t new_max_wait_time = static_cast<uint16_t>(
- VCM_MAX(available_wait_time, 0));
+ uint16_t new_max_wait_time =
+ static_cast<uint16_t>(VCM_MAX(available_wait_time, 0));
uint32_t wait_time_ms = timing_->MaxWaitingTime(
- next_render_time_ms, clock_->TimeInMilliseconds());
+ *next_render_time_ms, clock_->TimeInMilliseconds());
if (new_max_wait_time < wait_time_ms) {
// We're not allowed to wait until the frame is supposed to be rendered,
// waiting as long as we're allowed to avoid busy looping, and then return
@@ -164,9 +167,9 @@ VCMEncodedFrame* VCMReceiver::FrameForDecoding(uint16_t max_wait_time_ms,
if (frame == NULL) {
return NULL;
}
- frame->SetRenderTime(next_render_time_ms);
- TRACE_EVENT_ASYNC_STEP1("webrtc", "Video", frame->TimeStamp(),
- "SetRenderTS", "render_time", next_render_time_ms);
+ frame->SetRenderTime(*next_render_time_ms);
+ TRACE_EVENT_ASYNC_STEP1("webrtc", "Video", frame->TimeStamp(), "SetRenderTS",
+ "render_time", *next_render_time_ms);
if (!frame->Complete()) {
// Update stats for incomplete frames.
bool retransmitted = false;
@@ -186,8 +189,7 @@ void VCMReceiver::ReleaseFrame(VCMEncodedFrame* frame) {
jitter_buffer_.ReleaseFrame(frame);
}
-void VCMReceiver::ReceiveStatistics(uint32_t* bitrate,
- uint32_t* framerate) {
+void VCMReceiver::ReceiveStatistics(uint32_t* bitrate, uint32_t* framerate) {
assert(bitrate);
assert(framerate);
jitter_buffer_.IncomingRateStatistics(framerate, bitrate);
@@ -209,8 +211,7 @@ void VCMReceiver::SetNackMode(VCMNackMode nackMode,
void VCMReceiver::SetNackSettings(size_t max_nack_list_size,
int max_packet_age_to_nack,
int max_incomplete_time_ms) {
- jitter_buffer_.SetNackSettings(max_nack_list_size,
- max_packet_age_to_nack,
+ jitter_buffer_.SetNackSettings(max_nack_list_size, max_packet_age_to_nack,
max_incomplete_time_ms);
}
diff --git a/webrtc/modules/video_coding/main/source/receiver.h b/webrtc/modules/video_coding/receiver.h
index e2515d438f..ff0eef8a6a 100644
--- a/webrtc/modules/video_coding/main/source/receiver.h
+++ b/webrtc/modules/video_coding/receiver.h
@@ -8,15 +8,17 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#ifndef WEBRTC_MODULES_VIDEO_CODING_MAIN_SOURCE_RECEIVER_H_
-#define WEBRTC_MODULES_VIDEO_CODING_MAIN_SOURCE_RECEIVER_H_
+#ifndef WEBRTC_MODULES_VIDEO_CODING_RECEIVER_H_
+#define WEBRTC_MODULES_VIDEO_CODING_RECEIVER_H_
-#include "webrtc/modules/video_coding/main/source/jitter_buffer.h"
-#include "webrtc/modules/video_coding/main/source/packet.h"
-#include "webrtc/modules/video_coding/main/source/timing.h"
+#include <vector>
+
+#include "webrtc/modules/video_coding/jitter_buffer.h"
+#include "webrtc/modules/video_coding/packet.h"
+#include "webrtc/modules/video_coding/timing.h"
#include "webrtc/system_wrappers/include/critical_section_wrapper.h"
-#include "webrtc/modules/video_coding/main/interface/video_coding.h"
-#include "webrtc/modules/video_coding/main/interface/video_coding_defines.h"
+#include "webrtc/modules/video_coding/include/video_coding.h"
+#include "webrtc/modules/video_coding/include/video_coding_defines.h"
namespace webrtc {
@@ -25,9 +27,7 @@ class VCMEncodedFrame;
class VCMReceiver {
public:
- VCMReceiver(VCMTiming* timing,
- Clock* clock,
- EventFactory* event_factory);
+ VCMReceiver(VCMTiming* timing, Clock* clock, EventFactory* event_factory);
// Using this constructor, you can specify a different event factory for the
// jitter buffer. Useful for unit tests when you want to simulate incoming
@@ -46,8 +46,8 @@ class VCMReceiver {
uint16_t frame_width,
uint16_t frame_height);
VCMEncodedFrame* FrameForDecoding(uint16_t max_wait_time_ms,
- int64_t& next_render_time_ms,
- bool render_timing = true);
+ int64_t* next_render_time_ms,
+ bool prefer_late_decoding);
void ReleaseFrame(VCMEncodedFrame* frame);
void ReceiveStatistics(uint32_t* bitrate, uint32_t* framerate);
uint32_t DiscardedPackets() const;
@@ -89,4 +89,4 @@ class VCMReceiver {
} // namespace webrtc
-#endif // WEBRTC_MODULES_VIDEO_CODING_MAIN_SOURCE_RECEIVER_H_
+#endif // WEBRTC_MODULES_VIDEO_CODING_RECEIVER_H_
diff --git a/webrtc/modules/video_coding/main/source/receiver_unittest.cc b/webrtc/modules/video_coding/receiver_unittest.cc
index 359b241e72..1f3a144bad 100644
--- a/webrtc/modules/video_coding/main/source/receiver_unittest.cc
+++ b/webrtc/modules/video_coding/receiver_unittest.cc
@@ -11,14 +11,16 @@
#include <list>
#include <queue>
+#include <vector>
#include "testing/gtest/include/gtest/gtest.h"
#include "webrtc/base/checks.h"
-#include "webrtc/modules/video_coding/main/source/packet.h"
-#include "webrtc/modules/video_coding/main/source/receiver.h"
-#include "webrtc/modules/video_coding/main/source/test/stream_generator.h"
-#include "webrtc/modules/video_coding/main/source/timing.h"
-#include "webrtc/modules/video_coding/main/test/test_util.h"
+#include "webrtc/modules/video_coding/encoded_frame.h"
+#include "webrtc/modules/video_coding/packet.h"
+#include "webrtc/modules/video_coding/receiver.h"
+#include "webrtc/modules/video_coding/test/stream_generator.h"
+#include "webrtc/modules/video_coding/timing.h"
+#include "webrtc/modules/video_coding/test/test_util.h"
#include "webrtc/system_wrappers/include/clock.h"
#include "webrtc/system_wrappers/include/critical_section_wrapper.h"
@@ -33,14 +35,11 @@ class TestVCMReceiver : public ::testing::Test {
: clock_(new SimulatedClock(0)),
timing_(clock_.get()),
receiver_(&timing_, clock_.get(), &event_factory_) {
-
- stream_generator_.reset(new
- StreamGenerator(0, clock_->TimeInMilliseconds()));
+ stream_generator_.reset(
+ new StreamGenerator(0, clock_->TimeInMilliseconds()));
}
- virtual void SetUp() {
- receiver_.Reset();
- }
+ virtual void SetUp() { receiver_.Reset(); }
int32_t InsertPacket(int index) {
VCMPacket packet;
@@ -78,7 +77,7 @@ class TestVCMReceiver : public ::testing::Test {
bool DecodeNextFrame() {
int64_t render_time_ms = 0;
VCMEncodedFrame* frame =
- receiver_.FrameForDecoding(0, render_time_ms, false);
+ receiver_.FrameForDecoding(0, &render_time_ms, false);
if (!frame)
return false;
receiver_.ReleaseFrame(frame);
@@ -115,7 +114,7 @@ TEST_F(TestVCMReceiver, RenderBufferSize_SkipToKeyFrame) {
EXPECT_GE(InsertFrame(kVideoFrameDelta, true), kNoError);
}
EXPECT_EQ((kNumOfFrames - 1) * kDefaultFramePeriodMs,
- receiver_.RenderBufferSizeMs());
+ receiver_.RenderBufferSizeMs());
}
TEST_F(TestVCMReceiver, RenderBufferSize_NotAllComplete) {
@@ -131,7 +130,7 @@ TEST_F(TestVCMReceiver, RenderBufferSize_NotAllComplete) {
EXPECT_GE(InsertFrame(kVideoFrameDelta, true), kNoError);
}
EXPECT_EQ((num_of_frames - 1) * kDefaultFramePeriodMs,
- receiver_.RenderBufferSizeMs());
+ receiver_.RenderBufferSizeMs());
}
TEST_F(TestVCMReceiver, RenderBufferSize_NoKeyFrame) {
@@ -141,7 +140,8 @@ TEST_F(TestVCMReceiver, RenderBufferSize_NoKeyFrame) {
EXPECT_GE(InsertFrame(kVideoFrameDelta, true), kNoError);
}
int64_t next_render_time_ms = 0;
- VCMEncodedFrame* frame = receiver_.FrameForDecoding(10, next_render_time_ms);
+ VCMEncodedFrame* frame =
+ receiver_.FrameForDecoding(10, &next_render_time_ms, false);
EXPECT_TRUE(frame == NULL);
receiver_.ReleaseFrame(frame);
EXPECT_GE(InsertFrame(kVideoFrameDelta, false), kNoError);
@@ -159,7 +159,7 @@ TEST_F(TestVCMReceiver, NonDecodableDuration_Empty) {
const int kMaxNonDecodableDuration = 500;
const int kMinDelayMs = 500;
receiver_.SetNackSettings(kMaxNackListSize, kMaxPacketAgeToNack,
- kMaxNonDecodableDuration);
+ kMaxNonDecodableDuration);
EXPECT_GE(InsertFrame(kVideoFrameKey, true), kNoError);
// Advance time until it's time to decode the key frame.
clock_->AdvanceTimeMilliseconds(kMinDelayMs);
@@ -176,7 +176,7 @@ TEST_F(TestVCMReceiver, NonDecodableDuration_NoKeyFrame) {
const int kMaxPacketAgeToNack = 1000;
const int kMaxNonDecodableDuration = 500;
receiver_.SetNackSettings(kMaxNackListSize, kMaxPacketAgeToNack,
- kMaxNonDecodableDuration);
+ kMaxNonDecodableDuration);
const int kNumFrames = kDefaultFrameRate * kMaxNonDecodableDuration / 1000;
for (int i = 0; i < kNumFrames; ++i) {
EXPECT_GE(InsertFrame(kVideoFrameDelta, true), kNoError);
@@ -192,24 +192,23 @@ TEST_F(TestVCMReceiver, NonDecodableDuration_OneIncomplete) {
const size_t kMaxNackListSize = 1000;
const int kMaxPacketAgeToNack = 1000;
const int kMaxNonDecodableDuration = 500;
- const int kMaxNonDecodableDurationFrames = (kDefaultFrameRate *
- kMaxNonDecodableDuration + 500) / 1000;
+ const int kMaxNonDecodableDurationFrames =
+ (kDefaultFrameRate * kMaxNonDecodableDuration + 500) / 1000;
const int kMinDelayMs = 500;
receiver_.SetNackSettings(kMaxNackListSize, kMaxPacketAgeToNack,
- kMaxNonDecodableDuration);
+ kMaxNonDecodableDuration);
receiver_.SetMinReceiverDelay(kMinDelayMs);
int64_t key_frame_inserted = clock_->TimeInMilliseconds();
EXPECT_GE(InsertFrame(kVideoFrameKey, true), kNoError);
// Insert an incomplete frame.
EXPECT_GE(InsertFrame(kVideoFrameDelta, false), kNoError);
// Insert enough frames to have too long non-decodable sequence.
- for (int i = 0; i < kMaxNonDecodableDurationFrames;
- ++i) {
+ for (int i = 0; i < kMaxNonDecodableDurationFrames; ++i) {
EXPECT_GE(InsertFrame(kVideoFrameDelta, true), kNoError);
}
// Advance time until it's time to decode the key frame.
clock_->AdvanceTimeMilliseconds(kMinDelayMs - clock_->TimeInMilliseconds() -
- key_frame_inserted);
+ key_frame_inserted);
EXPECT_TRUE(DecodeNextFrame());
// Make sure we get a key frame request.
bool request_key_frame = false;
@@ -223,11 +222,11 @@ TEST_F(TestVCMReceiver, NonDecodableDuration_NoTrigger) {
const size_t kMaxNackListSize = 1000;
const int kMaxPacketAgeToNack = 1000;
const int kMaxNonDecodableDuration = 500;
- const int kMaxNonDecodableDurationFrames = (kDefaultFrameRate *
- kMaxNonDecodableDuration + 500) / 1000;
+ const int kMaxNonDecodableDurationFrames =
+ (kDefaultFrameRate * kMaxNonDecodableDuration + 500) / 1000;
const int kMinDelayMs = 500;
receiver_.SetNackSettings(kMaxNackListSize, kMaxPacketAgeToNack,
- kMaxNonDecodableDuration);
+ kMaxNonDecodableDuration);
receiver_.SetMinReceiverDelay(kMinDelayMs);
int64_t key_frame_inserted = clock_->TimeInMilliseconds();
EXPECT_GE(InsertFrame(kVideoFrameKey, true), kNoError);
@@ -235,13 +234,12 @@ TEST_F(TestVCMReceiver, NonDecodableDuration_NoTrigger) {
EXPECT_GE(InsertFrame(kVideoFrameDelta, false), kNoError);
// Insert all but one frame to not trigger a key frame request due to
// too long duration of non-decodable frames.
- for (int i = 0; i < kMaxNonDecodableDurationFrames - 1;
- ++i) {
+ for (int i = 0; i < kMaxNonDecodableDurationFrames - 1; ++i) {
EXPECT_GE(InsertFrame(kVideoFrameDelta, true), kNoError);
}
// Advance time until it's time to decode the key frame.
clock_->AdvanceTimeMilliseconds(kMinDelayMs - clock_->TimeInMilliseconds() -
- key_frame_inserted);
+ key_frame_inserted);
EXPECT_TRUE(DecodeNextFrame());
// Make sure we don't get a key frame request since we haven't generated
// enough frames.
@@ -256,25 +254,24 @@ TEST_F(TestVCMReceiver, NonDecodableDuration_NoTrigger2) {
const size_t kMaxNackListSize = 1000;
const int kMaxPacketAgeToNack = 1000;
const int kMaxNonDecodableDuration = 500;
- const int kMaxNonDecodableDurationFrames = (kDefaultFrameRate *
- kMaxNonDecodableDuration + 500) / 1000;
+ const int kMaxNonDecodableDurationFrames =
+ (kDefaultFrameRate * kMaxNonDecodableDuration + 500) / 1000;
const int kMinDelayMs = 500;
receiver_.SetNackSettings(kMaxNackListSize, kMaxPacketAgeToNack,
- kMaxNonDecodableDuration);
+ kMaxNonDecodableDuration);
receiver_.SetMinReceiverDelay(kMinDelayMs);
int64_t key_frame_inserted = clock_->TimeInMilliseconds();
EXPECT_GE(InsertFrame(kVideoFrameKey, true), kNoError);
// Insert enough frames to have too long non-decodable sequence, except that
// we don't have any losses.
- for (int i = 0; i < kMaxNonDecodableDurationFrames;
- ++i) {
+ for (int i = 0; i < kMaxNonDecodableDurationFrames; ++i) {
EXPECT_GE(InsertFrame(kVideoFrameDelta, true), kNoError);
}
// Insert an incomplete frame.
EXPECT_GE(InsertFrame(kVideoFrameDelta, false), kNoError);
// Advance time until it's time to decode the key frame.
clock_->AdvanceTimeMilliseconds(kMinDelayMs - clock_->TimeInMilliseconds() -
- key_frame_inserted);
+ key_frame_inserted);
EXPECT_TRUE(DecodeNextFrame());
// Make sure we don't get a key frame request since the non-decodable duration
// is only one frame.
@@ -289,25 +286,24 @@ TEST_F(TestVCMReceiver, NonDecodableDuration_KeyFrameAfterIncompleteFrames) {
const size_t kMaxNackListSize = 1000;
const int kMaxPacketAgeToNack = 1000;
const int kMaxNonDecodableDuration = 500;
- const int kMaxNonDecodableDurationFrames = (kDefaultFrameRate *
- kMaxNonDecodableDuration + 500) / 1000;
+ const int kMaxNonDecodableDurationFrames =
+ (kDefaultFrameRate * kMaxNonDecodableDuration + 500) / 1000;
const int kMinDelayMs = 500;
receiver_.SetNackSettings(kMaxNackListSize, kMaxPacketAgeToNack,
- kMaxNonDecodableDuration);
+ kMaxNonDecodableDuration);
receiver_.SetMinReceiverDelay(kMinDelayMs);
int64_t key_frame_inserted = clock_->TimeInMilliseconds();
EXPECT_GE(InsertFrame(kVideoFrameKey, true), kNoError);
// Insert an incomplete frame.
EXPECT_GE(InsertFrame(kVideoFrameDelta, false), kNoError);
// Insert enough frames to have too long non-decodable sequence.
- for (int i = 0; i < kMaxNonDecodableDurationFrames;
- ++i) {
+ for (int i = 0; i < kMaxNonDecodableDurationFrames; ++i) {
EXPECT_GE(InsertFrame(kVideoFrameDelta, true), kNoError);
}
EXPECT_GE(InsertFrame(kVideoFrameKey, true), kNoError);
// Advance time until it's time to decode the key frame.
clock_->AdvanceTimeMilliseconds(kMinDelayMs - clock_->TimeInMilliseconds() -
- key_frame_inserted);
+ key_frame_inserted);
EXPECT_TRUE(DecodeNextFrame());
// Make sure we don't get a key frame request since we have a key frame
// in the list.
@@ -338,7 +334,7 @@ class SimulatedClockWithFrames : public SimulatedClock {
// Return true if some frame arrives between now and now+|milliseconds|.
bool AdvanceTimeMilliseconds(int64_t milliseconds, bool stop_on_frame) {
return AdvanceTimeMicroseconds(milliseconds * 1000, stop_on_frame);
- };
+ }
bool AdvanceTimeMicroseconds(int64_t microseconds, bool stop_on_frame) {
int64_t start_time = TimeInMicroseconds();
@@ -362,7 +358,7 @@ class SimulatedClockWithFrames : public SimulatedClock {
SimulatedClock::AdvanceTimeMicroseconds(end_time - TimeInMicroseconds());
}
return frame_injected;
- };
+ }
// Input timestamps are in unit Milliseconds.
// And |arrive_timestamps| must be positive and in increasing order.
@@ -429,7 +425,7 @@ class FrameInjectEvent : public EventWrapper {
bool Set() override { return true; }
- EventTypeWrapper Wait(unsigned long max_time) override {
+ EventTypeWrapper Wait(unsigned long max_time) override { // NOLINT
if (clock_->AdvanceTimeMilliseconds(max_time, stop_on_frame_) &&
stop_on_frame_) {
return EventTypeWrapper::kEventSignaled;
@@ -445,7 +441,6 @@ class FrameInjectEvent : public EventWrapper {
class VCMReceiverTimingTest : public ::testing::Test {
protected:
-
VCMReceiverTimingTest()
: clock_(&stream_generator_, &receiver_),
@@ -458,7 +453,6 @@ class VCMReceiverTimingTest : public ::testing::Test {
rtc::scoped_ptr<EventWrapper>(
new FrameInjectEvent(&clock_, true))) {}
-
virtual void SetUp() { receiver_.Reset(); }
SimulatedClockWithFrames clock_;
@@ -504,7 +498,7 @@ TEST_F(VCMReceiverTimingTest, FrameForDecoding) {
while (num_frames_return < kNumFrames) {
int64_t start_time = clock_.TimeInMilliseconds();
VCMEncodedFrame* frame =
- receiver_.FrameForDecoding(kMaxWaitTime, next_render_time, false);
+ receiver_.FrameForDecoding(kMaxWaitTime, &next_render_time, false);
int64_t end_time = clock_.TimeInMilliseconds();
// In any case the FrameForDecoding should not wait longer than
@@ -523,4 +517,59 @@ TEST_F(VCMReceiverTimingTest, FrameForDecoding) {
}
}
+// Test whether VCMReceiver::FrameForDecoding handles parameter
+// |prefer_late_decoding| and |max_wait_time_ms| correctly:
+// 1. The function execution should never take more than |max_wait_time_ms|.
+// 2. If the function exit before now + |max_wait_time_ms|, a frame must be
+// returned and the end time must be equal to the render timestamp - delay
+// for decoding and rendering.
+TEST_F(VCMReceiverTimingTest, FrameForDecodingPreferLateDecoding) {
+ const size_t kNumFrames = 100;
+ const int kFramePeriod = 40;
+
+ int64_t arrive_timestamps[kNumFrames];
+ int64_t render_timestamps[kNumFrames];
+ int64_t next_render_time;
+
+ int render_delay_ms;
+ int max_decode_ms;
+ int dummy;
+ timing_.GetTimings(&dummy, &max_decode_ms, &dummy, &dummy, &dummy, &dummy,
+ &render_delay_ms);
+
+ // Construct test samples.
+ // render_timestamps are the timestamps stored in the Frame;
+ // arrive_timestamps controls when the Frame packet got received.
+ for (size_t i = 0; i < kNumFrames; i++) {
+ // Preset frame rate to 25Hz.
+ // But we add a reasonable deviation to arrive_timestamps to mimic Internet
+ // fluctuation.
+ arrive_timestamps[i] =
+ (i + 1) * kFramePeriod + (i % 10) * ((i % 2) ? 1 : -1);
+ render_timestamps[i] = (i + 1) * kFramePeriod;
+ }
+
+ clock_.SetFrames(arrive_timestamps, render_timestamps, kNumFrames);
+
+ // Record how many frames we finally get out of the receiver.
+ size_t num_frames_return = 0;
+ const int64_t kMaxWaitTime = 30;
+ bool prefer_late_decoding = true;
+ while (num_frames_return < kNumFrames) {
+ int64_t start_time = clock_.TimeInMilliseconds();
+
+ VCMEncodedFrame* frame = receiver_.FrameForDecoding(
+ kMaxWaitTime, &next_render_time, prefer_late_decoding);
+ int64_t end_time = clock_.TimeInMilliseconds();
+ if (frame) {
+ EXPECT_EQ(frame->RenderTimeMs() - max_decode_ms - render_delay_ms,
+ end_time);
+ receiver_.ReleaseFrame(frame);
+ ++num_frames_return;
+ } else {
+ EXPECT_EQ(kMaxWaitTime, end_time - start_time);
+ }
+ }
+}
+
} // namespace webrtc
diff --git a/webrtc/modules/video_coding/rtt_filter.cc b/webrtc/modules/video_coding/rtt_filter.cc
new file mode 100644
index 0000000000..742f70f1c1
--- /dev/null
+++ b/webrtc/modules/video_coding/rtt_filter.cc
@@ -0,0 +1,165 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "webrtc/modules/video_coding/rtt_filter.h"
+
+#include <math.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include "webrtc/modules/video_coding/internal_defines.h"
+
+namespace webrtc {
+
+VCMRttFilter::VCMRttFilter()
+ : _filtFactMax(35),
+ _jumpStdDevs(2.5),
+ _driftStdDevs(3.5),
+ _detectThreshold(kMaxDriftJumpCount) {
+ Reset();
+}
+
+VCMRttFilter& VCMRttFilter::operator=(const VCMRttFilter& rhs) {
+ if (this != &rhs) {
+ _gotNonZeroUpdate = rhs._gotNonZeroUpdate;
+ _avgRtt = rhs._avgRtt;
+ _varRtt = rhs._varRtt;
+ _maxRtt = rhs._maxRtt;
+ _filtFactCount = rhs._filtFactCount;
+ _jumpCount = rhs._jumpCount;
+ _driftCount = rhs._driftCount;
+ memcpy(_jumpBuf, rhs._jumpBuf, sizeof(_jumpBuf));
+ memcpy(_driftBuf, rhs._driftBuf, sizeof(_driftBuf));
+ }
+ return *this;
+}
+
+void VCMRttFilter::Reset() {
+ _gotNonZeroUpdate = false;
+ _avgRtt = 0;
+ _varRtt = 0;
+ _maxRtt = 0;
+ _filtFactCount = 1;
+ _jumpCount = 0;
+ _driftCount = 0;
+ memset(_jumpBuf, 0, kMaxDriftJumpCount);
+ memset(_driftBuf, 0, kMaxDriftJumpCount);
+}
+
+void VCMRttFilter::Update(int64_t rttMs) {
+ if (!_gotNonZeroUpdate) {
+ if (rttMs == 0) {
+ return;
+ }
+ _gotNonZeroUpdate = true;
+ }
+
+ // Sanity check
+ if (rttMs > 3000) {
+ rttMs = 3000;
+ }
+
+ double filtFactor = 0;
+ if (_filtFactCount > 1) {
+ filtFactor = static_cast<double>(_filtFactCount - 1) / _filtFactCount;
+ }
+ _filtFactCount++;
+ if (_filtFactCount > _filtFactMax) {
+ // This prevents filtFactor from going above
+ // (_filtFactMax - 1) / _filtFactMax,
+ // e.g., _filtFactMax = 50 => filtFactor = 49/50 = 0.98
+ _filtFactCount = _filtFactMax;
+ }
+ double oldAvg = _avgRtt;
+ double oldVar = _varRtt;
+ _avgRtt = filtFactor * _avgRtt + (1 - filtFactor) * rttMs;
+ _varRtt = filtFactor * _varRtt +
+ (1 - filtFactor) * (rttMs - _avgRtt) * (rttMs - _avgRtt);
+ _maxRtt = VCM_MAX(rttMs, _maxRtt);
+ if (!JumpDetection(rttMs) || !DriftDetection(rttMs)) {
+ // In some cases we don't want to update the statistics
+ _avgRtt = oldAvg;
+ _varRtt = oldVar;
+ }
+}
+
+bool VCMRttFilter::JumpDetection(int64_t rttMs) {
+ double diffFromAvg = _avgRtt - rttMs;
+ if (fabs(diffFromAvg) > _jumpStdDevs * sqrt(_varRtt)) {
+ int diffSign = (diffFromAvg >= 0) ? 1 : -1;
+ int jumpCountSign = (_jumpCount >= 0) ? 1 : -1;
+ if (diffSign != jumpCountSign) {
+ // Since the signs differ the samples currently
+ // in the buffer is useless as they represent a
+ // jump in a different direction.
+ _jumpCount = 0;
+ }
+ if (abs(_jumpCount) < kMaxDriftJumpCount) {
+ // Update the buffer used for the short time
+ // statistics.
+ // The sign of the diff is used for updating the counter since
+ // we want to use the same buffer for keeping track of when
+ // the RTT jumps down and up.
+ _jumpBuf[abs(_jumpCount)] = rttMs;
+ _jumpCount += diffSign;
+ }
+ if (abs(_jumpCount) >= _detectThreshold) {
+ // Detected an RTT jump
+ ShortRttFilter(_jumpBuf, abs(_jumpCount));
+ _filtFactCount = _detectThreshold + 1;
+ _jumpCount = 0;
+ } else {
+ return false;
+ }
+ } else {
+ _jumpCount = 0;
+ }
+ return true;
+}
+
+bool VCMRttFilter::DriftDetection(int64_t rttMs) {
+ if (_maxRtt - _avgRtt > _driftStdDevs * sqrt(_varRtt)) {
+ if (_driftCount < kMaxDriftJumpCount) {
+ // Update the buffer used for the short time
+ // statistics.
+ _driftBuf[_driftCount] = rttMs;
+ _driftCount++;
+ }
+ if (_driftCount >= _detectThreshold) {
+ // Detected an RTT drift
+ ShortRttFilter(_driftBuf, _driftCount);
+ _filtFactCount = _detectThreshold + 1;
+ _driftCount = 0;
+ }
+ } else {
+ _driftCount = 0;
+ }
+ return true;
+}
+
+void VCMRttFilter::ShortRttFilter(int64_t* buf, uint32_t length) {
+ if (length == 0) {
+ return;
+ }
+ _maxRtt = 0;
+ _avgRtt = 0;
+ for (uint32_t i = 0; i < length; i++) {
+ if (buf[i] > _maxRtt) {
+ _maxRtt = buf[i];
+ }
+ _avgRtt += buf[i];
+ }
+ _avgRtt = _avgRtt / static_cast<double>(length);
+}
+
+int64_t VCMRttFilter::RttMs() const {
+ return static_cast<int64_t>(_maxRtt + 0.5);
+}
+} // namespace webrtc
diff --git a/webrtc/modules/video_coding/rtt_filter.h b/webrtc/modules/video_coding/rtt_filter.h
new file mode 100644
index 0000000000..f5de532cfc
--- /dev/null
+++ b/webrtc/modules/video_coding/rtt_filter.h
@@ -0,0 +1,66 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef WEBRTC_MODULES_VIDEO_CODING_RTT_FILTER_H_
+#define WEBRTC_MODULES_VIDEO_CODING_RTT_FILTER_H_
+
+#include "webrtc/typedefs.h"
+
+namespace webrtc {
+
+class VCMRttFilter {
+ public:
+ VCMRttFilter();
+
+ VCMRttFilter& operator=(const VCMRttFilter& rhs);
+
+ // Resets the filter.
+ void Reset();
+ // Updates the filter with a new sample.
+ void Update(int64_t rttMs);
+ // A getter function for the current RTT level in ms.
+ int64_t RttMs() const;
+
+ private:
+ // The size of the drift and jump memory buffers
+ // and thus also the detection threshold for these
+ // detectors in number of samples.
+ enum { kMaxDriftJumpCount = 5 };
+ // Detects RTT jumps by comparing the difference between
+ // samples and average to the standard deviation.
+ // Returns true if the long time statistics should be updated
+ // and false otherwise
+ bool JumpDetection(int64_t rttMs);
+ // Detects RTT drifts by comparing the difference between
+ // max and average to the standard deviation.
+ // Returns true if the long time statistics should be updated
+ // and false otherwise
+ bool DriftDetection(int64_t rttMs);
+ // Computes the short time average and maximum of the vector buf.
+ void ShortRttFilter(int64_t* buf, uint32_t length);
+
+ bool _gotNonZeroUpdate;
+ double _avgRtt;
+ double _varRtt;
+ int64_t _maxRtt;
+ uint32_t _filtFactCount;
+ const uint32_t _filtFactMax;
+ const double _jumpStdDevs;
+ const double _driftStdDevs;
+ int32_t _jumpCount;
+ int32_t _driftCount;
+ const int32_t _detectThreshold;
+ int64_t _jumpBuf[kMaxDriftJumpCount];
+ int64_t _driftBuf[kMaxDriftJumpCount];
+};
+
+} // namespace webrtc
+
+#endif // WEBRTC_MODULES_VIDEO_CODING_RTT_FILTER_H_
diff --git a/webrtc/modules/video_coding/main/source/session_info.cc b/webrtc/modules/video_coding/session_info.cc
index 9a1bc54e52..8701098639 100644
--- a/webrtc/modules/video_coding/main/source/session_info.cc
+++ b/webrtc/modules/video_coding/session_info.cc
@@ -8,10 +8,10 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#include "webrtc/modules/video_coding/main/source/session_info.h"
+#include "webrtc/modules/video_coding/session_info.h"
-#include "webrtc/modules/video_coding/main/source/packet.h"
-#include "webrtc/system_wrappers/include/logging.h"
+#include "webrtc/base/logging.h"
+#include "webrtc/modules/video_coding/packet.h"
namespace webrtc {
@@ -32,8 +32,7 @@ VCMSessionInfo::VCMSessionInfo()
empty_seq_num_low_(-1),
empty_seq_num_high_(-1),
first_packet_seq_num_(-1),
- last_packet_seq_num_(-1) {
-}
+ last_packet_seq_num_(-1) {}
void VCMSessionInfo::UpdateDataPointers(const uint8_t* old_base_ptr,
const uint8_t* new_base_ptr) {
@@ -88,8 +87,8 @@ bool VCMSessionInfo::LayerSync() const {
if (packets_.front().codecSpecificHeader.codec == kRtpVideoVp8) {
return packets_.front().codecSpecificHeader.codecHeader.VP8.layerSync;
} else if (packets_.front().codecSpecificHeader.codec == kRtpVideoVp9) {
- return
- packets_.front().codecSpecificHeader.codecHeader.VP9.temporal_up_switch;
+ return packets_.front()
+ .codecSpecificHeader.codecHeader.VP9.temporal_up_switch;
} else {
return false;
}
@@ -126,7 +125,7 @@ void VCMSessionInfo::SetGofInfo(const GofInfoVP9& gof_info, size_t idx) {
gof_info.temporal_up_switch[idx];
packets_.front().codecSpecificHeader.codecHeader.VP9.num_ref_pics =
gof_info.num_ref_pics[idx];
- for (size_t i = 0; i < gof_info.num_ref_pics[idx]; ++i) {
+ for (uint8_t i = 0; i < gof_info.num_ref_pics[idx]; ++i) {
packets_.front().codecSpecificHeader.codecHeader.VP9.pid_diff[i] =
gof_info.pid_diff[idx][i];
}
@@ -193,9 +192,7 @@ size_t VCMSessionInfo::InsertBuffer(uint8_t* frame_buffer,
while (nalu_ptr < packet_buffer + packet.sizeBytes) {
size_t length = BufferToUWord16(nalu_ptr);
nalu_ptr += kLengthFieldLength;
- frame_buffer_ptr += Insert(nalu_ptr,
- length,
- packet.insertStartCode,
+ frame_buffer_ptr += Insert(nalu_ptr, length, packet.insertStartCode,
const_cast<uint8_t*>(frame_buffer_ptr));
nalu_ptr += length;
}
@@ -203,14 +200,12 @@ size_t VCMSessionInfo::InsertBuffer(uint8_t* frame_buffer,
return packet.sizeBytes;
}
ShiftSubsequentPackets(
- packet_it,
- packet.sizeBytes +
- (packet.insertStartCode ? kH264StartCodeLengthBytes : 0));
-
- packet.sizeBytes = Insert(packet_buffer,
- packet.sizeBytes,
- packet.insertStartCode,
- const_cast<uint8_t*>(packet.dataPtr));
+ packet_it, packet.sizeBytes +
+ (packet.insertStartCode ? kH264StartCodeLengthBytes : 0));
+
+ packet.sizeBytes =
+ Insert(packet_buffer, packet.sizeBytes, packet.insertStartCode,
+ const_cast<uint8_t*>(packet.dataPtr));
return packet.sizeBytes;
}
@@ -223,8 +218,7 @@ size_t VCMSessionInfo::Insert(const uint8_t* buffer,
memcpy(frame_buffer, startCode, kH264StartCodeLengthBytes);
}
memcpy(frame_buffer + (insert_start_code ? kH264StartCodeLengthBytes : 0),
- buffer,
- length);
+ buffer, length);
length += (insert_start_code ? kH264StartCodeLengthBytes : 0);
return length;
@@ -276,13 +270,12 @@ void VCMSessionInfo::UpdateDecodableSession(const FrameData& frame_data) {
// thresholds.
const float kLowPacketPercentageThreshold = 0.2f;
const float kHighPacketPercentageThreshold = 0.8f;
- if (frame_data.rtt_ms < kRttThreshold
- || frame_type_ == kVideoFrameKey
- || !HaveFirstPacket()
- || (NumPackets() <= kHighPacketPercentageThreshold
- * frame_data.rolling_average_packets_per_frame
- && NumPackets() > kLowPacketPercentageThreshold
- * frame_data.rolling_average_packets_per_frame))
+ if (frame_data.rtt_ms < kRttThreshold || frame_type_ == kVideoFrameKey ||
+ !HaveFirstPacket() ||
+ (NumPackets() <= kHighPacketPercentageThreshold *
+ frame_data.rolling_average_packets_per_frame &&
+ NumPackets() > kLowPacketPercentageThreshold *
+ frame_data.rolling_average_packets_per_frame))
return;
decodable_ = true;
@@ -308,7 +301,7 @@ VCMSessionInfo::PacketIterator VCMSessionInfo::FindNaluEnd(
// Find the end of the NAL unit.
for (; packet_it != packets_.end(); ++packet_it) {
if (((*packet_it).completeNALU == kNaluComplete &&
- (*packet_it).sizeBytes > 0) ||
+ (*packet_it).sizeBytes > 0) ||
// Found next NALU.
(*packet_it).completeNALU == kNaluStart)
return --packet_it;
@@ -348,7 +341,7 @@ size_t VCMSessionInfo::BuildVP8FragmentationHeader(
memset(fragmentation->fragmentationLength, 0,
kMaxVP8Partitions * sizeof(size_t));
if (packets_.empty())
- return new_length;
+ return new_length;
PacketIterator it = FindNextPartitionBeginning(packets_.begin());
while (it != packets_.end()) {
const int partition_id =
@@ -371,7 +364,7 @@ size_t VCMSessionInfo::BuildVP8FragmentationHeader(
// Set all empty fragments to start where the previous fragment ends,
// and have zero length.
if (fragmentation->fragmentationLength[0] == 0)
- fragmentation->fragmentationOffset[0] = 0;
+ fragmentation->fragmentationOffset[0] = 0;
for (int i = 1; i < fragmentation->fragmentationVectorSize; ++i) {
if (fragmentation->fragmentationLength[i] == 0)
fragmentation->fragmentationOffset[i] =
@@ -379,7 +372,7 @@ size_t VCMSessionInfo::BuildVP8FragmentationHeader(
fragmentation->fragmentationLength[i - 1];
assert(i == 0 ||
fragmentation->fragmentationOffset[i] >=
- fragmentation->fragmentationOffset[i - 1]);
+ fragmentation->fragmentationOffset[i - 1]);
}
assert(new_length <= frame_buffer_length);
return new_length;
@@ -424,8 +417,8 @@ bool VCMSessionInfo::InSequence(const PacketIterator& packet_it,
// If the two iterators are pointing to the same packet they are considered
// to be in sequence.
return (packet_it == prev_packet_it ||
- (static_cast<uint16_t>((*prev_packet_it).seqNum + 1) ==
- (*packet_it).seqNum));
+ (static_cast<uint16_t>((*prev_packet_it).seqNum + 1) ==
+ (*packet_it).seqNum));
}
size_t VCMSessionInfo::MakeDecodable() {
@@ -435,8 +428,7 @@ size_t VCMSessionInfo::MakeDecodable() {
}
PacketIterator it = packets_.begin();
// Make sure we remove the first NAL unit if it's not decodable.
- if ((*it).completeNALU == kNaluIncomplete ||
- (*it).completeNALU == kNaluEnd) {
+ if ((*it).completeNALU == kNaluIncomplete || (*it).completeNALU == kNaluEnd) {
PacketIterator nalu_end = FindNaluEnd(it);
return_length += DeletePacketData(it, nalu_end);
it = nalu_end;
@@ -445,7 +437,7 @@ size_t VCMSessionInfo::MakeDecodable() {
// Take care of the rest of the NAL units.
for (; it != packets_.end(); ++it) {
bool start_of_nalu = ((*it).completeNALU == kNaluStart ||
- (*it).completeNALU == kNaluComplete);
+ (*it).completeNALU == kNaluComplete);
if (!start_of_nalu && !InSequence(it, prev_it)) {
// Found a sequence number gap due to packet loss.
PacketIterator nalu_end = FindNaluEnd(it);
@@ -463,18 +455,15 @@ void VCMSessionInfo::SetNotDecodableIfIncomplete() {
decodable_ = false;
}
-bool
-VCMSessionInfo::HaveFirstPacket() const {
+bool VCMSessionInfo::HaveFirstPacket() const {
return !packets_.empty() && (first_packet_seq_num_ != -1);
}
-bool
-VCMSessionInfo::HaveLastPacket() const {
+bool VCMSessionInfo::HaveLastPacket() const {
return !packets_.empty() && (last_packet_seq_num_ != -1);
}
-bool
-VCMSessionInfo::session_nack() const {
+bool VCMSessionInfo::session_nack() const {
return session_nack_;
}
@@ -502,8 +491,8 @@ int VCMSessionInfo::InsertPacket(const VCMPacket& packet,
break;
// Check for duplicate packets.
- if (rit != packets_.rend() &&
- (*rit).seqNum == packet.seqNum && (*rit).sizeBytes > 0)
+ if (rit != packets_.rend() && (*rit).seqNum == packet.seqNum &&
+ (*rit).sizeBytes > 0)
return -2;
if (packet.codec == kVideoCodecH264) {
@@ -572,8 +561,8 @@ void VCMSessionInfo::InformOfEmptyPacket(uint16_t seq_num) {
empty_seq_num_high_ = seq_num;
else
empty_seq_num_high_ = LatestSequenceNumber(seq_num, empty_seq_num_high_);
- if (empty_seq_num_low_ == -1 || IsNewerSequenceNumber(empty_seq_num_low_,
- seq_num))
+ if (empty_seq_num_low_ == -1 ||
+ IsNewerSequenceNumber(empty_seq_num_low_, seq_num))
empty_seq_num_low_ = seq_num;
}
diff --git a/webrtc/modules/video_coding/main/source/session_info.h b/webrtc/modules/video_coding/session_info.h
index 88071e19d5..e9ff25166d 100644
--- a/webrtc/modules/video_coding/main/source/session_info.h
+++ b/webrtc/modules/video_coding/session_info.h
@@ -8,14 +8,14 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#ifndef WEBRTC_MODULES_VIDEO_CODING_MAIN_SOURCE_SESSION_INFO_H_
-#define WEBRTC_MODULES_VIDEO_CODING_MAIN_SOURCE_SESSION_INFO_H_
+#ifndef WEBRTC_MODULES_VIDEO_CODING_SESSION_INFO_H_
+#define WEBRTC_MODULES_VIDEO_CODING_SESSION_INFO_H_
#include <list>
-#include "webrtc/modules/interface/module_common_types.h"
-#include "webrtc/modules/video_coding/main/interface/video_coding.h"
-#include "webrtc/modules/video_coding/main/source/packet.h"
+#include "webrtc/modules/include/module_common_types.h"
+#include "webrtc/modules/video_coding/include/video_coding.h"
+#include "webrtc/modules/video_coding/packet.h"
#include "webrtc/typedefs.h"
namespace webrtc {
@@ -116,8 +116,7 @@ class VCMSessionInfo {
PacketIterator FindPartitionEnd(PacketIterator it) const;
static bool InSequence(const PacketIterator& it,
const PacketIterator& prev_it);
- size_t InsertBuffer(uint8_t* frame_buffer,
- PacketIterator packetIterator);
+ size_t InsertBuffer(uint8_t* frame_buffer, PacketIterator packetIterator);
size_t Insert(const uint8_t* buffer,
size_t length,
bool insert_start_code,
@@ -126,8 +125,7 @@ class VCMSessionInfo {
PacketIterator FindNaluEnd(PacketIterator packet_iter) const;
// Deletes the data of all packets between |start| and |end|, inclusively.
// Note that this function doesn't delete the actual packets.
- size_t DeletePacketData(PacketIterator start,
- PacketIterator end);
+ size_t DeletePacketData(PacketIterator start, PacketIterator end);
void UpdateCompleteSession();
// When enabled, determine if session is decodable, i.e. incomplete but
@@ -169,4 +167,4 @@ class VCMSessionInfo {
} // namespace webrtc
-#endif // WEBRTC_MODULES_VIDEO_CODING_MAIN_SOURCE_SESSION_INFO_H_
+#endif // WEBRTC_MODULES_VIDEO_CODING_SESSION_INFO_H_
diff --git a/webrtc/modules/video_coding/main/source/session_info_unittest.cc b/webrtc/modules/video_coding/session_info_unittest.cc
index 58c352d3fc..4019d63a5f 100644
--- a/webrtc/modules/video_coding/main/source/session_info_unittest.cc
+++ b/webrtc/modules/video_coding/session_info_unittest.cc
@@ -11,9 +11,9 @@
#include <string.h>
#include "testing/gtest/include/gtest/gtest.h"
-#include "webrtc/modules/interface/module_common_types.h"
-#include "webrtc/modules/video_coding/main/source/packet.h"
-#include "webrtc/modules/video_coding/main/source/session_info.h"
+#include "webrtc/modules/include/module_common_types.h"
+#include "webrtc/modules/video_coding/packet.h"
+#include "webrtc/modules/video_coding/session_info.h"
namespace webrtc {
@@ -81,7 +81,7 @@ class TestVP8Partitions : public TestSessionInfo {
fragmentation_.fragmentationLength[partition_id]);
for (int i = 0; i < packets_expected; ++i) {
size_t packet_index = fragmentation_.fragmentationOffset[partition_id] +
- i * packet_buffer_size();
+ i * packet_buffer_size();
if (packet_index + packet_buffer_size() > frame_buffer_size())
return false;
VerifyPacket(frame_buffer_ + packet_index, start_value + i);
@@ -122,8 +122,7 @@ class TestNackList : public TestSessionInfo {
memset(seq_num_list_, 0, sizeof(seq_num_list_));
}
- void BuildSeqNumList(uint16_t low,
- uint16_t high) {
+ void BuildSeqNumList(uint16_t low, uint16_t high) {
size_t i = 0;
while (low != high + 1) {
EXPECT_LT(i, kMaxSeqNumListLength);
@@ -173,14 +172,11 @@ TEST_F(TestSessionInfo, TestSimpleAPIs) {
// To make things more difficult we will make sure to have a wrap here.
packet_.isFirstPacket = false;
packet_.markerBit = true;
- packet_.seqNum = 2;
+ packet_.seqNum = 2;
packet_.sizeBytes = 0;
packet_.frameType = kEmptyFrame;
- EXPECT_EQ(0,
- session_.InsertPacket(packet_,
- frame_buffer_,
- kNoErrors,
- frame_data));
+ EXPECT_EQ(
+ 0, session_.InsertPacket(packet_, frame_buffer_, kNoErrors, frame_data));
EXPECT_EQ(packet_.seqNum, session_.HighSequenceNumber());
}
@@ -198,9 +194,8 @@ TEST_F(TestSessionInfo, NormalOperation) {
packet_.seqNum += 1;
FillPacket(i);
ASSERT_EQ(packet_buffer_size(),
- static_cast<size_t>(session_.InsertPacket(packet_, frame_buffer_,
- kNoErrors,
- frame_data)));
+ static_cast<size_t>(session_.InsertPacket(
+ packet_, frame_buffer_, kNoErrors, frame_data)));
}
packet_.seqNum += 1;
@@ -223,9 +218,8 @@ TEST_F(TestSessionInfo, ErrorsEqualDecodableState) {
packet_.markerBit = false;
FillPacket(3);
EXPECT_EQ(packet_buffer_size(),
- static_cast<size_t>(session_.InsertPacket(packet_, frame_buffer_,
- kWithErrors,
- frame_data)));
+ static_cast<size_t>(session_.InsertPacket(
+ packet_, frame_buffer_, kWithErrors, frame_data)));
EXPECT_TRUE(session_.decodable());
}
@@ -237,18 +231,16 @@ TEST_F(TestSessionInfo, SelectiveDecodableState) {
frame_data.rolling_average_packets_per_frame = 11;
frame_data.rtt_ms = 150;
EXPECT_EQ(packet_buffer_size(),
- static_cast<size_t>(session_.InsertPacket(packet_, frame_buffer_,
- kSelectiveErrors,
- frame_data)));
+ static_cast<size_t>(session_.InsertPacket(
+ packet_, frame_buffer_, kSelectiveErrors, frame_data)));
EXPECT_FALSE(session_.decodable());
packet_.seqNum -= 1;
FillPacket(0);
packet_.isFirstPacket = true;
EXPECT_EQ(packet_buffer_size(),
- static_cast<size_t>(session_.InsertPacket(packet_, frame_buffer_,
- kSelectiveErrors,
- frame_data)));
+ static_cast<size_t>(session_.InsertPacket(
+ packet_, frame_buffer_, kSelectiveErrors, frame_data)));
EXPECT_TRUE(session_.decodable());
packet_.isFirstPacket = false;
@@ -256,19 +248,17 @@ TEST_F(TestSessionInfo, SelectiveDecodableState) {
for (int i = 2; i < 8; ++i) {
packet_.seqNum += 1;
FillPacket(i);
- EXPECT_EQ(packet_buffer_size(),
- static_cast<size_t>(session_.InsertPacket(packet_, frame_buffer_,
- kSelectiveErrors,
- frame_data)));
+ EXPECT_EQ(packet_buffer_size(),
+ static_cast<size_t>(session_.InsertPacket(
+ packet_, frame_buffer_, kSelectiveErrors, frame_data)));
EXPECT_TRUE(session_.decodable());
}
packet_.seqNum += 1;
FillPacket(8);
EXPECT_EQ(packet_buffer_size(),
- static_cast<size_t>(session_.InsertPacket(packet_, frame_buffer_,
- kSelectiveErrors,
- frame_data)));
+ static_cast<size_t>(session_.InsertPacket(
+ packet_, frame_buffer_, kSelectiveErrors, frame_data)));
EXPECT_TRUE(session_.decodable());
}
@@ -285,18 +275,14 @@ TEST_F(TestSessionInfo, OutOfBoundsPackets1PacketFrame) {
packet_.isFirstPacket = true;
packet_.markerBit = true;
FillPacket(1);
- EXPECT_EQ(-3, session_.InsertPacket(packet_,
- frame_buffer_,
- kNoErrors,
- frame_data));
+ EXPECT_EQ(
+ -3, session_.InsertPacket(packet_, frame_buffer_, kNoErrors, frame_data));
packet_.seqNum = 0x0000;
packet_.isFirstPacket = false;
packet_.markerBit = false;
FillPacket(1);
- EXPECT_EQ(-3, session_.InsertPacket(packet_,
- frame_buffer_,
- kNoErrors,
- frame_data));
+ EXPECT_EQ(
+ -3, session_.InsertPacket(packet_, frame_buffer_, kNoErrors, frame_data));
}
TEST_F(TestSessionInfo, SetMarkerBitOnce) {
@@ -311,10 +297,8 @@ TEST_F(TestSessionInfo, SetMarkerBitOnce) {
packet_.isFirstPacket = true;
packet_.markerBit = true;
FillPacket(1);
- EXPECT_EQ(-3, session_.InsertPacket(packet_,
- frame_buffer_,
- kNoErrors,
- frame_data));
+ EXPECT_EQ(
+ -3, session_.InsertPacket(packet_, frame_buffer_, kNoErrors, frame_data));
}
TEST_F(TestSessionInfo, OutOfBoundsPacketsBase) {
@@ -331,10 +315,8 @@ TEST_F(TestSessionInfo, OutOfBoundsPacketsBase) {
packet_.isFirstPacket = true;
packet_.markerBit = true;
FillPacket(1);
- EXPECT_EQ(-3, session_.InsertPacket(packet_,
- frame_buffer_,
- kNoErrors,
- frame_data));
+ EXPECT_EQ(
+ -3, session_.InsertPacket(packet_, frame_buffer_, kNoErrors, frame_data));
packet_.seqNum = 0x0006;
packet_.isFirstPacket = true;
packet_.markerBit = true;
@@ -346,10 +328,8 @@ TEST_F(TestSessionInfo, OutOfBoundsPacketsBase) {
packet_.isFirstPacket = false;
packet_.markerBit = true;
FillPacket(1);
- EXPECT_EQ(-3, session_.InsertPacket(packet_,
- frame_buffer_,
- kNoErrors,
- frame_data));
+ EXPECT_EQ(
+ -3, session_.InsertPacket(packet_, frame_buffer_, kNoErrors, frame_data));
}
TEST_F(TestSessionInfo, OutOfBoundsPacketsWrap) {
@@ -379,20 +359,14 @@ TEST_F(TestSessionInfo, OutOfBoundsPacketsWrap) {
packet_.isFirstPacket = false;
packet_.markerBit = false;
FillPacket(1);
- EXPECT_EQ(-3,
- session_.InsertPacket(packet_,
- frame_buffer_,
- kNoErrors,
- frame_data));
+ EXPECT_EQ(
+ -3, session_.InsertPacket(packet_, frame_buffer_, kNoErrors, frame_data));
packet_.seqNum = 0x0006;
packet_.isFirstPacket = false;
packet_.markerBit = false;
FillPacket(1);
- EXPECT_EQ(-3,
- session_.InsertPacket(packet_,
- frame_buffer_,
- kNoErrors,
- frame_data));
+ EXPECT_EQ(
+ -3, session_.InsertPacket(packet_, frame_buffer_, kNoErrors, frame_data));
}
TEST_F(TestSessionInfo, OutOfBoundsOutOfOrder) {
@@ -417,10 +391,8 @@ TEST_F(TestSessionInfo, OutOfBoundsOutOfOrder) {
packet_.isFirstPacket = false;
packet_.markerBit = false;
FillPacket(1);
- EXPECT_EQ(-3, session_.InsertPacket(packet_,
- frame_buffer_,
- kNoErrors,
- frame_data));
+ EXPECT_EQ(
+ -3, session_.InsertPacket(packet_, frame_buffer_, kNoErrors, frame_data));
packet_.seqNum = 0x0010;
packet_.isFirstPacket = false;
packet_.markerBit = false;
@@ -440,10 +412,8 @@ TEST_F(TestSessionInfo, OutOfBoundsOutOfOrder) {
packet_.isFirstPacket = false;
packet_.markerBit = false;
FillPacket(1);
- EXPECT_EQ(-3, session_.InsertPacket(packet_,
- frame_buffer_,
- kNoErrors,
- frame_data));
+ EXPECT_EQ(
+ -3, session_.InsertPacket(packet_, frame_buffer_, kNoErrors, frame_data));
}
TEST_F(TestVP8Partitions, TwoPartitionsOneLoss) {
@@ -455,8 +425,8 @@ TEST_F(TestVP8Partitions, TwoPartitionsOneLoss) {
packet_header_.header.markerBit = false;
packet_header_.header.sequenceNumber = 0;
FillPacket(0);
- VCMPacket* packet = new VCMPacket(packet_buffer_, packet_buffer_size(),
- packet_header_);
+ VCMPacket* packet =
+ new VCMPacket(packet_buffer_, packet_buffer_size(), packet_header_);
EXPECT_EQ(packet_buffer_size(),
static_cast<size_t>(session_.InsertPacket(*packet, frame_buffer_,
kNoErrors, frame_data)));
@@ -505,8 +475,8 @@ TEST_F(TestVP8Partitions, TwoPartitionsOneLoss2) {
packet_header_.header.markerBit = false;
packet_header_.header.sequenceNumber = 1;
FillPacket(1);
- VCMPacket* packet = new VCMPacket(packet_buffer_, packet_buffer_size(),
- packet_header_);
+ VCMPacket* packet =
+ new VCMPacket(packet_buffer_, packet_buffer_size(), packet_header_);
EXPECT_EQ(packet_buffer_size(),
static_cast<size_t>(session_.InsertPacket(*packet, frame_buffer_,
kNoErrors, frame_data)));
@@ -567,8 +537,8 @@ TEST_F(TestVP8Partitions, TwoPartitionsNoLossWrap) {
packet_header_.header.markerBit = false;
packet_header_.header.sequenceNumber = 0xfffd;
FillPacket(0);
- VCMPacket* packet = new VCMPacket(packet_buffer_, packet_buffer_size(),
- packet_header_);
+ VCMPacket* packet =
+ new VCMPacket(packet_buffer_, packet_buffer_size(), packet_header_);
EXPECT_EQ(packet_buffer_size(),
static_cast<size_t>(session_.InsertPacket(*packet, frame_buffer_,
kNoErrors, frame_data)));
@@ -629,8 +599,8 @@ TEST_F(TestVP8Partitions, TwoPartitionsLossWrap) {
packet_header_.header.markerBit = false;
packet_header_.header.sequenceNumber = 0xfffd;
FillPacket(0);
- VCMPacket* packet = new VCMPacket(packet_buffer_, packet_buffer_size(),
- packet_header_);
+ VCMPacket* packet =
+ new VCMPacket(packet_buffer_, packet_buffer_size(), packet_header_);
EXPECT_EQ(packet_buffer_size(),
static_cast<size_t>(session_.InsertPacket(*packet, frame_buffer_,
kNoErrors, frame_data)));
@@ -682,7 +652,6 @@ TEST_F(TestVP8Partitions, TwoPartitionsLossWrap) {
EXPECT_TRUE(VerifyPartition(1, 1, 2));
}
-
TEST_F(TestVP8Partitions, ThreePartitionsOneMissing) {
// Partition 1 |Partition 2 | Partition 3
// [ 1 ] [ 2 ] | | [ 5 ] | [ 6 ]
@@ -692,8 +661,8 @@ TEST_F(TestVP8Partitions, ThreePartitionsOneMissing) {
packet_header_.header.markerBit = false;
packet_header_.header.sequenceNumber = 1;
FillPacket(1);
- VCMPacket* packet = new VCMPacket(packet_buffer_, packet_buffer_size(),
- packet_header_);
+ VCMPacket* packet =
+ new VCMPacket(packet_buffer_, packet_buffer_size(), packet_header_);
EXPECT_EQ(packet_buffer_size(),
static_cast<size_t>(session_.InsertPacket(*packet, frame_buffer_,
kNoErrors, frame_data)));
@@ -754,8 +723,8 @@ TEST_F(TestVP8Partitions, ThreePartitionsLossInSecond) {
packet_header_.header.markerBit = false;
packet_header_.header.sequenceNumber = 1;
FillPacket(1);
- VCMPacket* packet = new VCMPacket(packet_buffer_, packet_buffer_size(),
- packet_header_);
+ VCMPacket* packet =
+ new VCMPacket(packet_buffer_, packet_buffer_size(), packet_header_);
EXPECT_EQ(packet_buffer_size(),
static_cast<size_t>(session_.InsertPacket(*packet, frame_buffer_,
kNoErrors, frame_data)));
@@ -767,8 +736,7 @@ TEST_F(TestVP8Partitions, ThreePartitionsLossInSecond) {
packet_header_.header.markerBit = false;
packet_header_.header.sequenceNumber += 1;
FillPacket(2);
- packet = new VCMPacket(packet_buffer_, packet_buffer_size(),
- packet_header_);
+ packet = new VCMPacket(packet_buffer_, packet_buffer_size(), packet_header_);
EXPECT_EQ(packet_buffer_size(),
static_cast<size_t>(session_.InsertPacket(*packet, frame_buffer_,
kNoErrors, frame_data)));
@@ -841,8 +809,8 @@ TEST_F(TestVP8Partitions, AggregationOverTwoPackets) {
packet_header_.header.markerBit = false;
packet_header_.header.sequenceNumber = 0;
FillPacket(0);
- VCMPacket* packet = new VCMPacket(packet_buffer_, packet_buffer_size(),
- packet_header_);
+ VCMPacket* packet =
+ new VCMPacket(packet_buffer_, packet_buffer_size(), packet_header_);
EXPECT_EQ(packet_buffer_size(),
static_cast<size_t>(session_.InsertPacket(*packet, frame_buffer_,
kNoErrors, frame_data)));
@@ -892,10 +860,8 @@ TEST_F(TestNalUnits, OnlyReceivedEmptyPacket) {
packet_.sizeBytes = 0;
packet_.seqNum = 0;
packet_.markerBit = false;
- EXPECT_EQ(0, session_.InsertPacket(packet_,
- frame_buffer_,
- kNoErrors,
- frame_data));
+ EXPECT_EQ(
+ 0, session_.InsertPacket(packet_, frame_buffer_, kNoErrors, frame_data));
EXPECT_EQ(0U, session_.MakeDecodable());
EXPECT_EQ(0U, session_.SessionLength());
diff --git a/webrtc/modules/video_coding/main/test/plotJitterEstimate.m b/webrtc/modules/video_coding/test/plotJitterEstimate.m
index d6185f55da..d6185f55da 100644
--- a/webrtc/modules/video_coding/main/test/plotJitterEstimate.m
+++ b/webrtc/modules/video_coding/test/plotJitterEstimate.m
diff --git a/webrtc/modules/video_coding/main/test/plotReceiveTrace.m b/webrtc/modules/video_coding/test/plotReceiveTrace.m
index 4d262aa165..4d262aa165 100644
--- a/webrtc/modules/video_coding/main/test/plotReceiveTrace.m
+++ b/webrtc/modules/video_coding/test/plotReceiveTrace.m
diff --git a/webrtc/modules/video_coding/main/test/plotTimingTest.m b/webrtc/modules/video_coding/test/plotTimingTest.m
index 52a6f303cd..52a6f303cd 100644
--- a/webrtc/modules/video_coding/main/test/plotTimingTest.m
+++ b/webrtc/modules/video_coding/test/plotTimingTest.m
diff --git a/webrtc/modules/video_coding/main/test/receiver_tests.h b/webrtc/modules/video_coding/test/receiver_tests.h
index 6d7b7beeb5..d6bac07392 100644
--- a/webrtc/modules/video_coding/main/test/receiver_tests.h
+++ b/webrtc/modules/video_coding/test/receiver_tests.h
@@ -11,20 +11,20 @@
#ifndef WEBRTC_MODULES_VIDEO_CODING_TEST_RECEIVER_TESTS_H_
#define WEBRTC_MODULES_VIDEO_CODING_TEST_RECEIVER_TESTS_H_
-#include "webrtc/common_types.h"
-#include "webrtc/modules/interface/module_common_types.h"
-#include "webrtc/modules/rtp_rtcp/interface/rtp_rtcp.h"
-#include "webrtc/modules/video_coding/main/interface/video_coding.h"
-#include "webrtc/modules/video_coding/main/test/test_util.h"
-#include "webrtc/modules/video_coding/main/test/video_source.h"
-#include "webrtc/typedefs.h"
-
#include <stdio.h>
#include <string>
+#include "webrtc/common_types.h"
+#include "webrtc/modules/include/module_common_types.h"
+#include "webrtc/modules/rtp_rtcp/include/rtp_rtcp.h"
+#include "webrtc/modules/video_coding/include/video_coding.h"
+#include "webrtc/modules/video_coding/test/test_util.h"
+#include "webrtc/modules/video_coding/test/video_source.h"
+#include "webrtc/typedefs.h"
+
class RtpDataCallback : public webrtc::NullRtpData {
public:
- RtpDataCallback(webrtc::VideoCodingModule* vcm) : vcm_(vcm) {}
+ explicit RtpDataCallback(webrtc::VideoCodingModule* vcm) : vcm_(vcm) {}
virtual ~RtpDataCallback() {}
int32_t OnReceivedPayloadData(
@@ -40,4 +40,4 @@ class RtpDataCallback : public webrtc::NullRtpData {
int RtpPlay(const CmdArgs& args);
-#endif // WEBRTC_MODULES_VIDEO_CODING_TEST_RECEIVER_TESTS_H_
+#endif // WEBRTC_MODULES_VIDEO_CODING_TEST_RECEIVER_TESTS_H_
diff --git a/webrtc/modules/video_coding/main/test/release_test.h b/webrtc/modules/video_coding/test/release_test.h
index e90dcaef01..ab9b2159d9 100644
--- a/webrtc/modules/video_coding/main/test/release_test.h
+++ b/webrtc/modules/video_coding/test/release_test.h
@@ -8,10 +8,10 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#ifndef RELEASE_TEST_H
-#define RELEASE_TEST_H
+#ifndef WEBRTC_MODULES_VIDEO_CODING_TEST_RELEASE_TEST_H_
+#define WEBRTC_MODULES_VIDEO_CODING_TEST_RELEASE_TEST_H_
int ReleaseTest();
int ReleaseTestPart2();
-#endif
+#endif // WEBRTC_MODULES_VIDEO_CODING_TEST_RELEASE_TEST_H_
diff --git a/webrtc/modules/video_coding/main/test/rtp_player.cc b/webrtc/modules/video_coding/test/rtp_player.cc
index 6717cf227d..9b6490618c 100644
--- a/webrtc/modules/video_coding/main/test/rtp_player.cc
+++ b/webrtc/modules/video_coding/test/rtp_player.cc
@@ -8,27 +8,27 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#include "webrtc/modules/video_coding/main/test/rtp_player.h"
+#include "webrtc/modules/video_coding/test/rtp_player.h"
#include <stdio.h>
#include <map>
#include "webrtc/base/scoped_ptr.h"
-#include "webrtc/modules/rtp_rtcp/interface/rtp_header_parser.h"
-#include "webrtc/modules/rtp_rtcp/interface/rtp_payload_registry.h"
-#include "webrtc/modules/rtp_rtcp/interface/rtp_receiver.h"
-#include "webrtc/modules/rtp_rtcp/interface/rtp_rtcp.h"
-#include "webrtc/modules/video_coding/main/source/internal_defines.h"
-#include "webrtc/modules/video_coding/main/test/test_util.h"
+#include "webrtc/modules/rtp_rtcp/include/rtp_header_parser.h"
+#include "webrtc/modules/rtp_rtcp/include/rtp_payload_registry.h"
+#include "webrtc/modules/rtp_rtcp/include/rtp_receiver.h"
+#include "webrtc/modules/rtp_rtcp/include/rtp_rtcp.h"
+#include "webrtc/modules/video_coding/internal_defines.h"
+#include "webrtc/modules/video_coding/test/test_util.h"
#include "webrtc/system_wrappers/include/clock.h"
#include "webrtc/system_wrappers/include/critical_section_wrapper.h"
#include "webrtc/test/rtp_file_reader.h"
#if 1
-# define DEBUG_LOG1(text, arg)
+#define DEBUG_LOG1(text, arg)
#else
-# define DEBUG_LOG1(text, arg) (printf(text "\n", arg))
+#define DEBUG_LOG1(text, arg) (printf(text "\n", arg))
#endif
namespace webrtc {
@@ -41,7 +41,9 @@ enum {
class RawRtpPacket {
public:
- RawRtpPacket(const uint8_t* data, size_t length, uint32_t ssrc,
+ RawRtpPacket(const uint8_t* data,
+ size_t length,
+ uint32_t ssrc,
uint16_t seq_num)
: data_(new uint8_t[length]),
length_(length),
@@ -140,7 +142,7 @@ class LostPackets {
CriticalSectionScoped cs(crit_sect_.get());
int count = 0;
for (ConstRtpPacketIterator it = packets_.begin(); it != packets_.end();
- ++it) {
+ ++it) {
if ((*it)->resend_time_ms() >= 0) {
count++;
}
@@ -164,7 +166,7 @@ class LostPackets {
printf("Packets still lost: %zd\n", packets_.size());
printf("Sequence numbers:\n");
for (ConstRtpPacketIterator it = packets_.begin(); it != packets_.end();
- ++it) {
+ ++it) {
printf("%u, ", (*it)->seq_num());
}
printf("\n");
@@ -231,17 +233,14 @@ class SsrcHandlers {
kDefaultTransmissionTimeOffsetExtensionId);
for (PayloadTypesIterator it = payload_types_.begin();
- it != payload_types_.end(); ++it) {
+ it != payload_types_.end(); ++it) {
VideoCodec codec;
memset(&codec, 0, sizeof(codec));
- strncpy(codec.plName, it->name().c_str(), sizeof(codec.plName)-1);
+ strncpy(codec.plName, it->name().c_str(), sizeof(codec.plName) - 1);
codec.plType = it->payload_type();
codec.codecType = it->codec_type();
- if (handler->rtp_module_->RegisterReceivePayload(codec.plName,
- codec.plType,
- 90000,
- 0,
- codec.maxBitrate) < 0) {
+ if (handler->rtp_module_->RegisterReceivePayload(
+ codec.plName, codec.plType, 90000, 0, codec.maxBitrate) < 0) {
return -1;
}
}
@@ -267,7 +266,8 @@ class SsrcHandlers {
private:
class Handler : public RtpStreamInterface {
public:
- Handler(uint32_t ssrc, const PayloadTypes& payload_types,
+ Handler(uint32_t ssrc,
+ const PayloadTypes& payload_types,
LostPackets* lost_packets)
: rtp_header_parser_(RtpHeaderParser::Create()),
rtp_payload_registry_(new RTPPayloadRegistry(
@@ -290,9 +290,7 @@ class SsrcHandlers {
}
virtual uint32_t ssrc() const { return ssrc_; }
- virtual const PayloadTypes& payload_types() const {
- return payload_types_;
- }
+ virtual const PayloadTypes& payload_types() const { return payload_types_; }
rtc::scoped_ptr<RtpHeaderParser> rtp_header_parser_;
rtc::scoped_ptr<RTPPayloadRegistry> rtp_payload_registry_;
@@ -351,8 +349,7 @@ class RtpPlayerImpl : public RtpPlayerInterface {
virtual int NextPacket(int64_t time_now) {
// Send any packets ready to be resent.
for (RawRtpPacket* packet = lost_packets_.NextPacketToResend(time_now);
- packet != NULL;
- packet = lost_packets_.NextPacketToResend(time_now)) {
+ packet != NULL; packet = lost_packets_.NextPacketToResend(time_now)) {
int ret = SendPacket(packet->data(), packet->length());
if (ret > 0) {
printf("Resend: %08x:%u\n", packet->ssrc(), packet->seq_num());
@@ -392,8 +389,7 @@ class RtpPlayerImpl : public RtpPlayerInterface {
if (!packet_source_->NextPacket(&next_packet_)) {
end_of_file_ = true;
return 0;
- }
- else if (next_packet_.length == 0) {
+ } else if (next_packet_.length == 0) {
return 0;
}
}
@@ -406,7 +402,7 @@ class RtpPlayerImpl : public RtpPlayerInterface {
virtual uint32_t TimeUntilNextPacket() const {
int64_t time_left = (next_rtp_time_ - first_packet_rtp_time_) -
- (clock_->TimeInMilliseconds() - first_packet_time_ms_);
+ (clock_->TimeInMilliseconds() - first_packet_time_ms_);
if (time_left < 0) {
return 0;
}
@@ -438,7 +434,7 @@ class RtpPlayerImpl : public RtpPlayerInterface {
if (no_loss_startup_ > 0) {
no_loss_startup_--;
- } else if ((rand() + 1.0)/(RAND_MAX + 1.0) < loss_rate_) {
+ } else if ((rand() + 1.0) / (RAND_MAX + 1.0) < loss_rate_) { // NOLINT
uint16_t seq_num = header.sequenceNumber;
lost_packets_.AddPacket(new RawRtpPacket(data, length, ssrc, seq_num));
DEBUG_LOG1("Dropped packet: %d!", header.header.sequenceNumber);
@@ -470,9 +466,12 @@ class RtpPlayerImpl : public RtpPlayerInterface {
};
RtpPlayerInterface* Create(const std::string& input_filename,
- PayloadSinkFactoryInterface* payload_sink_factory, Clock* clock,
- const PayloadTypes& payload_types, float loss_rate, int64_t rtt_ms,
- bool reordering) {
+ PayloadSinkFactoryInterface* payload_sink_factory,
+ Clock* clock,
+ const PayloadTypes& payload_types,
+ float loss_rate,
+ int64_t rtt_ms,
+ bool reordering) {
rtc::scoped_ptr<test::RtpFileReader> packet_source(
test::RtpFileReader::Create(test::RtpFileReader::kRtpDump,
input_filename));
diff --git a/webrtc/modules/video_coding/main/test/rtp_player.h b/webrtc/modules/video_coding/test/rtp_player.h
index 7459231416..e50fb9ac70 100644
--- a/webrtc/modules/video_coding/main/test/rtp_player.h
+++ b/webrtc/modules/video_coding/test/rtp_player.h
@@ -14,8 +14,8 @@
#include <string>
#include <vector>
-#include "webrtc/modules/rtp_rtcp/interface/rtp_rtcp_defines.h"
-#include "webrtc/modules/video_coding/main/interface/video_coding_defines.h"
+#include "webrtc/modules/rtp_rtcp/include/rtp_rtcp_defines.h"
+#include "webrtc/modules/video_coding/include/video_coding_defines.h"
namespace webrtc {
class Clock;
@@ -24,12 +24,12 @@ namespace rtpplayer {
class PayloadCodecTuple {
public:
- PayloadCodecTuple(uint8_t payload_type, const std::string& codec_name,
+ PayloadCodecTuple(uint8_t payload_type,
+ const std::string& codec_name,
VideoCodecType codec_type)
: name_(codec_name),
payload_type_(payload_type),
- codec_type_(codec_type) {
- }
+ codec_type_(codec_type) {}
const std::string& name() const { return name_; }
uint8_t payload_type() const { return payload_type_; }
@@ -87,11 +87,14 @@ class RtpPlayerInterface {
};
RtpPlayerInterface* Create(const std::string& inputFilename,
- PayloadSinkFactoryInterface* payloadSinkFactory, Clock* clock,
- const PayloadTypes& payload_types, float lossRate, int64_t rttMs,
- bool reordering);
+ PayloadSinkFactoryInterface* payloadSinkFactory,
+ Clock* clock,
+ const PayloadTypes& payload_types,
+ float lossRate,
+ int64_t rttMs,
+ bool reordering);
} // namespace rtpplayer
} // namespace webrtc
-#endif // WEBRTC_MODULES_VIDEO_CODING_TEST_RTP_PLAYER_H_
+#endif // WEBRTC_MODULES_VIDEO_CODING_TEST_RTP_PLAYER_H_
diff --git a/webrtc/modules/video_coding/main/source/test/stream_generator.cc b/webrtc/modules/video_coding/test/stream_generator.cc
index b365d96dc0..167d55faff 100644
--- a/webrtc/modules/video_coding/main/source/test/stream_generator.cc
+++ b/webrtc/modules/video_coding/test/stream_generator.cc
@@ -8,22 +8,21 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#include "webrtc/modules/video_coding/main/source/test/stream_generator.h"
+#include "webrtc/modules/video_coding/test/stream_generator.h"
#include <string.h>
#include <list>
#include "testing/gtest/include/gtest/gtest.h"
-#include "webrtc/modules/video_coding/main/source/packet.h"
-#include "webrtc/modules/video_coding/main/test/test_util.h"
+#include "webrtc/modules/video_coding/packet.h"
+#include "webrtc/modules/video_coding/test/test_util.h"
#include "webrtc/system_wrappers/include/clock.h"
namespace webrtc {
StreamGenerator::StreamGenerator(uint16_t start_seq_num, int64_t current_time)
- : packets_(), sequence_number_(start_seq_num), start_time_(current_time) {
-}
+ : packets_(), sequence_number_(start_seq_num), start_time_(current_time) {}
void StreamGenerator::Init(uint16_t start_seq_num, int64_t current_time) {
packets_.clear();
@@ -41,8 +40,8 @@ void StreamGenerator::GenerateFrame(FrameType type,
const int packet_size =
(kFrameSize + num_media_packets / 2) / num_media_packets;
bool marker_bit = (i == num_media_packets - 1);
- packets_.push_back(GeneratePacket(
- sequence_number_, timestamp, packet_size, (i == 0), marker_bit, type));
+ packets_.push_back(GeneratePacket(sequence_number_, timestamp, packet_size,
+ (i == 0), marker_bit, type));
++sequence_number_;
}
for (int i = 0; i < num_empty_packets; ++i) {
@@ -104,7 +103,9 @@ bool StreamGenerator::NextPacket(VCMPacket* packet) {
return true;
}
-void StreamGenerator::DropLastPacket() { packets_.pop_back(); }
+void StreamGenerator::DropLastPacket() {
+ packets_.pop_back();
+}
uint16_t StreamGenerator::NextSequenceNumber() const {
if (packets_.empty())
@@ -112,7 +113,9 @@ uint16_t StreamGenerator::NextSequenceNumber() const {
return packets_.front().seqNum;
}
-int StreamGenerator::PacketsRemaining() const { return packets_.size(); }
+int StreamGenerator::PacketsRemaining() const {
+ return packets_.size();
+}
std::list<VCMPacket>::iterator StreamGenerator::GetPacketIterator(int index) {
std::list<VCMPacket>::iterator it = packets_.begin();
diff --git a/webrtc/modules/video_coding/main/source/test/stream_generator.h b/webrtc/modules/video_coding/test/stream_generator.h
index 7902d16706..36b26db92e 100644
--- a/webrtc/modules/video_coding/main/source/test/stream_generator.h
+++ b/webrtc/modules/video_coding/test/stream_generator.h
@@ -7,13 +7,13 @@
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
-#ifndef WEBRTC_MODULES_VIDEO_CODING_MAIN_SOURCE_TEST_STREAM_GENERATOR_H_
-#define WEBRTC_MODULES_VIDEO_CODING_MAIN_SOURCE_TEST_STREAM_GENERATOR_H_
+#ifndef WEBRTC_MODULES_VIDEO_CODING_TEST_STREAM_GENERATOR_H_
+#define WEBRTC_MODULES_VIDEO_CODING_TEST_STREAM_GENERATOR_H_
#include <list>
-#include "webrtc/modules/video_coding/main/source/packet.h"
-#include "webrtc/modules/video_coding/main/test/test_util.h"
+#include "webrtc/modules/video_coding/packet.h"
+#include "webrtc/modules/video_coding/test/test_util.h"
#include "webrtc/typedefs.h"
namespace webrtc {
@@ -69,4 +69,4 @@ class StreamGenerator {
} // namespace webrtc
-#endif // WEBRTC_MODULES_VIDEO_CODING_MAIN_SOURCE_TEST_STREAM_GENERATOR_H_
+#endif // WEBRTC_MODULES_VIDEO_CODING_TEST_STREAM_GENERATOR_H_
diff --git a/webrtc/modules/video_coding/main/test/subfigure.m b/webrtc/modules/video_coding/test/subfigure.m
index eadfcb69bd..eadfcb69bd 100644
--- a/webrtc/modules/video_coding/main/test/subfigure.m
+++ b/webrtc/modules/video_coding/test/subfigure.m
diff --git a/webrtc/modules/video_coding/main/test/test_util.cc b/webrtc/modules/video_coding/test/test_util.cc
index cd858da288..7ff663e395 100644
--- a/webrtc/modules/video_coding/main/test/test_util.cc
+++ b/webrtc/modules/video_coding/test/test_util.cc
@@ -8,7 +8,7 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#include "webrtc/modules/video_coding/main/test/test_util.h"
+#include "webrtc/modules/video_coding/test/test_util.h"
#include <assert.h>
#include <math.h>
@@ -17,7 +17,7 @@
#include <sstream>
#include "webrtc/common_video/libyuv/include/webrtc_libyuv.h"
-#include "webrtc/modules/video_coding/main/source/internal_defines.h"
+#include "webrtc/modules/video_coding/internal_defines.h"
#include "webrtc/test/testsupport/fileutils.h"
CmdArgs::CmdArgs()
@@ -28,12 +28,12 @@ CmdArgs::CmdArgs()
rtt(0),
inputFile(webrtc::test::ProjectRootPath() + "/resources/foreman_cif.yuv"),
outputFile(webrtc::test::OutputPath() +
- "video_coding_test_output_352x288.yuv") {
-}
+ "video_coding_test_output_352x288.yuv") {}
namespace {
-void SplitFilename(const std::string& filename, std::string* basename,
+void SplitFilename(const std::string& filename,
+ std::string* basename,
std::string* extension) {
assert(basename);
assert(extension);
@@ -41,7 +41,7 @@ void SplitFilename(const std::string& filename, std::string* basename,
std::string::size_type idx;
idx = filename.rfind('.');
- if(idx != std::string::npos) {
+ if (idx != std::string::npos) {
*basename = filename.substr(0, idx);
*extension = filename.substr(idx + 1);
} else {
@@ -50,21 +50,24 @@ void SplitFilename(const std::string& filename, std::string* basename,
}
}
-std::string AppendWidthHeightCount(const std::string& filename, int width,
- int height, int count) {
+std::string AppendWidthHeightCount(const std::string& filename,
+ int width,
+ int height,
+ int count) {
std::string basename;
std::string extension;
SplitFilename(filename, &basename, &extension);
std::stringstream ss;
- ss << basename << "_" << count << "." << width << "_" << height << "." <<
- extension;
+ ss << basename << "_" << count << "." << width << "_" << height << "."
+ << extension;
return ss.str();
}
} // namespace
FileOutputFrameReceiver::FileOutputFrameReceiver(
- const std::string& base_out_filename, uint32_t ssrc)
+ const std::string& base_out_filename,
+ uint32_t ssrc)
: out_filename_(),
out_file_(NULL),
timing_file_(NULL),
@@ -80,8 +83,8 @@ FileOutputFrameReceiver::FileOutputFrameReceiver(
SplitFilename(base_out_filename, &basename, &extension);
}
std::stringstream ss;
- ss << basename << "_" << std::hex << std::setw(8) << std::setfill('0') <<
- ssrc << "." << extension;
+ ss << basename << "_" << std::hex << std::setw(8) << std::setfill('0') << ssrc
+ << "." << extension;
out_filename_ = ss.str();
}
@@ -113,8 +116,8 @@ int32_t FileOutputFrameReceiver::FrameToRender(
printf("New size: %dx%d\n", video_frame.width(), video_frame.height());
width_ = video_frame.width();
height_ = video_frame.height();
- std::string filename_with_width_height = AppendWidthHeightCount(
- out_filename_, width_, height_, count_);
+ std::string filename_with_width_height =
+ AppendWidthHeightCount(out_filename_, width_, height_, count_);
++count_;
out_file_ = fopen(filename_with_width_height.c_str(), "wb");
if (out_file_ == NULL) {
@@ -122,7 +125,7 @@ int32_t FileOutputFrameReceiver::FrameToRender(
}
}
fprintf(timing_file_, "%u, %u\n", video_frame.timestamp(),
- webrtc::MaskWord64ToUWord32(video_frame.render_time_ms()));
+ webrtc::MaskWord64ToUWord32(video_frame.render_time_ms()));
if (PrintVideoFrame(video_frame, out_file_) < 0) {
return -1;
}
@@ -130,7 +133,7 @@ int32_t FileOutputFrameReceiver::FrameToRender(
}
webrtc::RtpVideoCodecTypes ConvertCodecType(const char* plname) {
- if (strncmp(plname,"VP8" , 3) == 0) {
+ if (strncmp(plname, "VP8", 3) == 0) {
return webrtc::kRtpVideoVp8;
} else {
// Default value.
diff --git a/webrtc/modules/video_coding/main/test/test_util.h b/webrtc/modules/video_coding/test/test_util.h
index 27f66fe011..45b88b9b50 100644
--- a/webrtc/modules/video_coding/main/test/test_util.h
+++ b/webrtc/modules/video_coding/test/test_util.h
@@ -18,8 +18,8 @@
#include <string>
#include "webrtc/base/constructormagic.h"
-#include "webrtc/modules/interface/module_common_types.h"
-#include "webrtc/modules/video_coding/main/interface/video_coding.h"
+#include "webrtc/modules/include/module_common_types.h"
+#include "webrtc/modules/video_coding/include/video_coding.h"
#include "webrtc/system_wrappers/include/event_wrapper.h"
enum { kMaxNackListSize = 250 };
@@ -33,11 +33,13 @@ class NullEvent : public webrtc::EventWrapper {
virtual bool Reset() { return true; }
- virtual webrtc::EventTypeWrapper Wait(unsigned long max_time) {
+ virtual webrtc::EventTypeWrapper Wait(unsigned long max_time) { // NOLINT
return webrtc::kEventTimeout;
}
- virtual bool StartTimer(bool periodic, unsigned long time) { return true; }
+ virtual bool StartTimer(bool periodic, unsigned long time) { // NOLINT
+ return true;
+ }
virtual bool StopTimer() { return true; }
};
@@ -46,9 +48,7 @@ class NullEventFactory : public webrtc::EventFactory {
public:
virtual ~NullEventFactory() {}
- virtual webrtc::EventWrapper* CreateEvent() {
- return new NullEvent;
- }
+ virtual webrtc::EventWrapper* CreateEvent() { return new NullEvent; }
};
class FileOutputFrameReceiver : public webrtc::VCMReceiveCallback {
@@ -57,7 +57,7 @@ class FileOutputFrameReceiver : public webrtc::VCMReceiveCallback {
virtual ~FileOutputFrameReceiver();
// VCMReceiveCallback
- virtual int32_t FrameToRender(webrtc::VideoFrame& video_frame);
+ virtual int32_t FrameToRender(webrtc::VideoFrame& video_frame); // NOLINT
private:
std::string out_filename_;
@@ -83,4 +83,4 @@ class CmdArgs {
std::string outputFile;
};
-#endif
+#endif // WEBRTC_MODULES_VIDEO_CODING_TEST_TEST_UTIL_H_
diff --git a/webrtc/modules/video_coding/main/test/tester_main.cc b/webrtc/modules/video_coding/test/tester_main.cc
index 2885f00bd5..33ca82007d 100644
--- a/webrtc/modules/video_coding/main/test/tester_main.cc
+++ b/webrtc/modules/video_coding/test/tester_main.cc
@@ -8,25 +8,27 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-
#include <stdlib.h>
#include <string.h>
#include "gflags/gflags.h"
-#include "webrtc/modules/video_coding/main/interface/video_coding.h"
-#include "webrtc/modules/video_coding/main/test/receiver_tests.h"
+#include "webrtc/modules/video_coding/include/video_coding.h"
+#include "webrtc/modules/video_coding/test/receiver_tests.h"
#include "webrtc/test/testsupport/fileutils.h"
DEFINE_string(codec, "VP8", "Codec to use (VP8 or I420).");
DEFINE_int32(width, 352, "Width in pixels of the frames in the input file.");
DEFINE_int32(height, 288, "Height in pixels of the frames in the input file.");
DEFINE_int32(rtt, 0, "RTT (round-trip time), in milliseconds.");
-DEFINE_string(input_filename, webrtc::test::ProjectRootPath() +
- "/resources/foreman_cif.yuv", "Input file.");
-DEFINE_string(output_filename, webrtc::test::OutputPath() +
- "video_coding_test_output_352x288.yuv", "Output file.");
+DEFINE_string(input_filename,
+ webrtc::test::ProjectRootPath() + "/resources/foreman_cif.yuv",
+ "Input file.");
+DEFINE_string(output_filename,
+ webrtc::test::OutputPath() +
+ "video_coding_test_output_352x288.yuv",
+ "Output file.");
-using namespace webrtc;
+namespace webrtc {
/*
* Build with EVENT_DEBUG defined
@@ -36,36 +38,37 @@ using namespace webrtc;
int vcmMacrosTests = 0;
int vcmMacrosErrors = 0;
-int ParseArguments(CmdArgs& args) {
- args.width = FLAGS_width;
- args.height = FLAGS_height;
- if (args.width < 1 || args.height < 1) {
+int ParseArguments(CmdArgs* args) {
+ args->width = FLAGS_width;
+ args->height = FLAGS_height;
+ if (args->width < 1 || args->height < 1) {
return -1;
}
- args.codecName = FLAGS_codec;
- if (args.codecName == "VP8") {
- args.codecType = kVideoCodecVP8;
- } else if (args.codecName == "VP9") {
- args.codecType = kVideoCodecVP9;
- } else if (args.codecName == "I420") {
- args.codecType = kVideoCodecI420;
+ args->codecName = FLAGS_codec;
+ if (args->codecName == "VP8") {
+ args->codecType = kVideoCodecVP8;
+ } else if (args->codecName == "VP9") {
+ args->codecType = kVideoCodecVP9;
+ } else if (args->codecName == "I420") {
+ args->codecType = kVideoCodecI420;
} else {
- printf("Invalid codec: %s\n", args.codecName.c_str());
+ printf("Invalid codec: %s\n", args->codecName.c_str());
return -1;
}
- args.inputFile = FLAGS_input_filename;
- args.outputFile = FLAGS_output_filename;
- args.rtt = FLAGS_rtt;
+ args->inputFile = FLAGS_input_filename;
+ args->outputFile = FLAGS_output_filename;
+ args->rtt = FLAGS_rtt;
return 0;
}
+} // namespace webrtc
-int main(int argc, char **argv) {
+int main(int argc, char** argv) {
// Initialize WebRTC fileutils.h so paths to resources can be resolved.
webrtc::test::SetExecutablePath(argv[0]);
google::ParseCommandLineFlags(&argc, &argv, true);
CmdArgs args;
- if (ParseArguments(args) != 0) {
+ if (webrtc::ParseArguments(&args) != 0) {
printf("Unable to parse input arguments\n");
return -1;
}
diff --git a/webrtc/modules/video_coding/main/test/vcm_payload_sink_factory.cc b/webrtc/modules/video_coding/test/vcm_payload_sink_factory.cc
index 2d874cd1bd..c9ec372f41 100644
--- a/webrtc/modules/video_coding/main/test/vcm_payload_sink_factory.cc
+++ b/webrtc/modules/video_coding/test/vcm_payload_sink_factory.cc
@@ -8,23 +8,22 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#include "webrtc/modules/video_coding/main/test/vcm_payload_sink_factory.h"
+#include "webrtc/modules/video_coding/test/vcm_payload_sink_factory.h"
#include <assert.h>
#include <algorithm>
-#include "webrtc/modules/rtp_rtcp/interface/rtp_rtcp.h"
-#include "webrtc/modules/video_coding/main/test/test_util.h"
+#include "webrtc/modules/rtp_rtcp/include/rtp_rtcp.h"
+#include "webrtc/modules/video_coding/test/test_util.h"
#include "webrtc/system_wrappers/include/clock.h"
#include "webrtc/system_wrappers/include/critical_section_wrapper.h"
namespace webrtc {
namespace rtpplayer {
-class VcmPayloadSinkFactory::VcmPayloadSink
- : public PayloadSinkInterface,
- public VCMPacketRequestCallback {
+class VcmPayloadSinkFactory::VcmPayloadSink : public PayloadSinkInterface,
+ public VCMPacketRequestCallback {
public:
VcmPayloadSink(VcmPayloadSinkFactory* factory,
RtpStreamInterface* stream,
@@ -43,9 +42,7 @@ class VcmPayloadSinkFactory::VcmPayloadSink
vcm_->RegisterReceiveCallback(frame_receiver_.get());
}
- virtual ~VcmPayloadSink() {
- factory_->Remove(this);
- }
+ virtual ~VcmPayloadSink() { factory_->Remove(this); }
// PayloadSinkInterface
int32_t OnReceivedPayloadData(const uint8_t* payload_data,
@@ -136,14 +133,11 @@ PayloadSinkInterface* VcmPayloadSinkFactory::Create(
}
const PayloadTypes& plt = stream->payload_types();
- for (PayloadTypesIterator it = plt.begin(); it != plt.end();
- ++it) {
+ for (PayloadTypesIterator it = plt.begin(); it != plt.end(); ++it) {
if (it->codec_type() != kVideoCodecULPFEC &&
it->codec_type() != kVideoCodecRED) {
VideoCodec codec;
- if (VideoCodingModule::Codec(it->codec_type(), &codec) < 0) {
- return NULL;
- }
+ VideoCodingModule::Codec(it->codec_type(), &codec);
codec.plType = it->payload_type();
if (vcm->RegisterReceiveCodec(&codec, 1) < 0) {
return NULL;
diff --git a/webrtc/modules/video_coding/main/test/vcm_payload_sink_factory.h b/webrtc/modules/video_coding/test/vcm_payload_sink_factory.h
index ec94bdc382..dae53b0c08 100644
--- a/webrtc/modules/video_coding/main/test/vcm_payload_sink_factory.h
+++ b/webrtc/modules/video_coding/test/vcm_payload_sink_factory.h
@@ -8,13 +8,16 @@
* be found in the AUTHORS file in the root of the source tree.
*/
+#ifndef WEBRTC_MODULES_VIDEO_CODING_TEST_VCM_PAYLOAD_SINK_FACTORY_H_
+#define WEBRTC_MODULES_VIDEO_CODING_TEST_VCM_PAYLOAD_SINK_FACTORY_H_
+
#include <string>
#include <vector>
#include "webrtc/base/constructormagic.h"
#include "webrtc/base/scoped_ptr.h"
-#include "webrtc/modules/video_coding/main/interface/video_coding_defines.h"
-#include "webrtc/modules/video_coding/main/test/rtp_player.h"
+#include "webrtc/modules/video_coding/include/video_coding_defines.h"
+#include "webrtc/modules/video_coding/test/rtp_player.h"
class NullEventFactory;
@@ -26,9 +29,11 @@ namespace rtpplayer {
class VcmPayloadSinkFactory : public PayloadSinkFactoryInterface {
public:
VcmPayloadSinkFactory(const std::string& base_out_filename,
- Clock* clock, bool protection_enabled,
+ Clock* clock,
+ bool protection_enabled,
VCMVideoProtection protection_method,
- int64_t rtt_ms, uint32_t render_delay_ms,
+ int64_t rtt_ms,
+ uint32_t render_delay_ms,
uint32_t min_playout_delay_ms);
virtual ~VcmPayloadSinkFactory();
@@ -61,3 +66,5 @@ class VcmPayloadSinkFactory : public PayloadSinkFactoryInterface {
};
} // namespace rtpplayer
} // namespace webrtc
+
+#endif // WEBRTC_MODULES_VIDEO_CODING_TEST_VCM_PAYLOAD_SINK_FACTORY_H_
diff --git a/webrtc/modules/video_coding/main/test/video_rtp_play.cc b/webrtc/modules/video_coding/test/video_rtp_play.cc
index 8460601bf5..cb092e381e 100644
--- a/webrtc/modules/video_coding/main/test/video_rtp_play.cc
+++ b/webrtc/modules/video_coding/test/video_rtp_play.cc
@@ -8,8 +8,8 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#include "webrtc/modules/video_coding/main/test/receiver_tests.h"
-#include "webrtc/modules/video_coding/main/test/vcm_payload_sink_factory.h"
+#include "webrtc/modules/video_coding/test/receiver_tests.h"
+#include "webrtc/modules/video_coding/test/vcm_payload_sink_factory.h"
#include "webrtc/system_wrappers/include/trace.h"
#include "webrtc/test/testsupport/fileutils.h"
@@ -48,9 +48,9 @@ int RtpPlay(const CmdArgs& args) {
output_file = webrtc::test::OutputPath() + "RtpPlay_decoded.yuv";
webrtc::SimulatedClock clock(0);
- webrtc::rtpplayer::VcmPayloadSinkFactory factory(output_file, &clock,
- kConfigProtectionEnabled, kConfigProtectionMethod, kConfigRttMs,
- kConfigRenderDelayMs, kConfigMinPlayoutDelayMs);
+ webrtc::rtpplayer::VcmPayloadSinkFactory factory(
+ output_file, &clock, kConfigProtectionEnabled, kConfigProtectionMethod,
+ kConfigRttMs, kConfigRenderDelayMs, kConfigMinPlayoutDelayMs);
rtc::scoped_ptr<webrtc::rtpplayer::RtpPlayerInterface> rtp_player(
webrtc::rtpplayer::Create(args.inputFile, &factory, &clock, payload_types,
kConfigLossRate, kConfigRttMs,
@@ -63,7 +63,7 @@ int RtpPlay(const CmdArgs& args) {
while ((ret = rtp_player->NextPacket(clock.TimeInMilliseconds())) == 0) {
ret = factory.DecodeAndProcessAll(true);
if (ret < 0 || (kConfigMaxRuntimeMs > -1 &&
- clock.TimeInMilliseconds() >= kConfigMaxRuntimeMs)) {
+ clock.TimeInMilliseconds() >= kConfigMaxRuntimeMs)) {
break;
}
clock.AdvanceTimeMilliseconds(1);
diff --git a/webrtc/modules/video_coding/test/video_source.h b/webrtc/modules/video_coding/test/video_source.h
new file mode 100644
index 0000000000..19d7f50b26
--- /dev/null
+++ b/webrtc/modules/video_coding/test/video_source.h
@@ -0,0 +1,85 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef WEBRTC_MODULES_VIDEO_CODING_TEST_VIDEO_SOURCE_H_
+#define WEBRTC_MODULES_VIDEO_CODING_TEST_VIDEO_SOURCE_H_
+
+#include <string>
+
+#include "webrtc/common_video/libyuv/include/webrtc_libyuv.h"
+#include "webrtc/typedefs.h"
+
+enum VideoSize {
+ kUndefined,
+ kSQCIF, // 128*96 = 12 288
+ kQQVGA, // 160*120 = 19 200
+ kQCIF, // 176*144 = 25 344
+ kCGA, // 320*200 = 64 000
+ kQVGA, // 320*240 = 76 800
+ kSIF, // 352*240 = 84 480
+ kWQVGA, // 400*240 = 96 000
+ kCIF, // 352*288 = 101 376
+ kW288p, // 512*288 = 147 456 (WCIF)
+ k448p, // 576*448 = 281 088
+ kVGA, // 640*480 = 307 200
+ k432p, // 720*432 = 311 040
+ kW432p, // 768*432 = 331 776
+ k4SIF, // 704*480 = 337 920
+ kW448p, // 768*448 = 344 064
+ kNTSC, // 720*480 = 345 600
+ kFW448p, // 800*448 = 358 400
+ kWVGA, // 800*480 = 384 000
+ k4CIF, // 704*576 = 405 504
+ kSVGA, // 800*600 = 480 000
+ kW544p, // 960*544 = 522 240
+ kW576p, // 1024*576 = 589 824 (W4CIF)
+ kHD, // 960*720 = 691 200
+ kXGA, // 1024*768 = 786 432
+ kWHD, // 1280*720 = 921 600
+ kFullHD, // 1440*1080 = 1 555 200
+ kWFullHD, // 1920*1080 = 2 073 600
+
+ kNumberOfVideoSizes
+};
+
+class VideoSource {
+ public:
+ VideoSource();
+ VideoSource(std::string fileName,
+ VideoSize size,
+ float frameRate,
+ webrtc::VideoType type = webrtc::kI420);
+ VideoSource(std::string fileName,
+ uint16_t width,
+ uint16_t height,
+ float frameRate = 30,
+ webrtc::VideoType type = webrtc::kI420);
+
+ std::string GetFileName() const { return _fileName; }
+ uint16_t GetWidth() const { return _width; }
+ uint16_t GetHeight() const { return _height; }
+ webrtc::VideoType GetType() const { return _type; }
+ float GetFrameRate() const { return _frameRate; }
+ int GetWidthHeight(VideoSize size);
+
+ // Returns the filename with the path (including the leading slash) removed.
+ std::string GetName() const;
+
+ size_t GetFrameLength() const;
+
+ private:
+ std::string _fileName;
+ uint16_t _width;
+ uint16_t _height;
+ webrtc::VideoType _type;
+ float _frameRate;
+};
+
+#endif // WEBRTC_MODULES_VIDEO_CODING_TEST_VIDEO_SOURCE_H_
diff --git a/webrtc/modules/video_coding/main/source/timestamp_map.cc b/webrtc/modules/video_coding/timestamp_map.cc
index c68a5af7ba..97d2777658 100644
--- a/webrtc/modules/video_coding/main/source/timestamp_map.cc
+++ b/webrtc/modules/video_coding/timestamp_map.cc
@@ -11,8 +11,8 @@
#include <assert.h>
#include <stdlib.h>
-#include "webrtc/modules/interface/module_common_types.h"
-#include "webrtc/modules/video_coding/main/source/timestamp_map.h"
+#include "webrtc/modules/include/module_common_types.h"
+#include "webrtc/modules/video_coding/timestamp_map.h"
namespace webrtc {
@@ -20,11 +20,9 @@ VCMTimestampMap::VCMTimestampMap(size_t capacity)
: ring_buffer_(new TimestampDataTuple[capacity]),
capacity_(capacity),
next_add_idx_(0),
- next_pop_idx_(0) {
-}
+ next_pop_idx_(0) {}
-VCMTimestampMap::~VCMTimestampMap() {
-}
+VCMTimestampMap::~VCMTimestampMap() {}
void VCMTimestampMap::Add(uint32_t timestamp, VCMFrameInformation* data) {
ring_buffer_[next_add_idx_].timestamp = timestamp;
@@ -62,4 +60,4 @@ VCMFrameInformation* VCMTimestampMap::Pop(uint32_t timestamp) {
bool VCMTimestampMap::IsEmpty() const {
return (next_add_idx_ == next_pop_idx_);
}
-}
+} // namespace webrtc
diff --git a/webrtc/modules/video_coding/main/source/timestamp_map.h b/webrtc/modules/video_coding/timestamp_map.h
index 3d6f1bca0f..435d05895c 100644
--- a/webrtc/modules/video_coding/main/source/timestamp_map.h
+++ b/webrtc/modules/video_coding/timestamp_map.h
@@ -44,4 +44,4 @@ class VCMTimestampMap {
} // namespace webrtc
-#endif // WEBRTC_MODULES_VIDEO_CODING_TIMESTAMP_MAP_H_
+#endif // WEBRTC_MODULES_VIDEO_CODING_TIMESTAMP_MAP_H_
diff --git a/webrtc/modules/video_coding/main/source/timing.cc b/webrtc/modules/video_coding/timing.cc
index 8d59135876..08dc307524 100644
--- a/webrtc/modules/video_coding/main/source/timing.cc
+++ b/webrtc/modules/video_coding/timing.cc
@@ -8,19 +8,19 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#include "webrtc/modules/video_coding/main/source/timing.h"
+#include "webrtc/modules/video_coding/timing.h"
-#include "webrtc/modules/video_coding/main/source/internal_defines.h"
-#include "webrtc/modules/video_coding/main/source/jitter_buffer_common.h"
+#include <algorithm>
+
+#include "webrtc/modules/video_coding/internal_defines.h"
+#include "webrtc/modules/video_coding/jitter_buffer_common.h"
#include "webrtc/system_wrappers/include/clock.h"
#include "webrtc/system_wrappers/include/metrics.h"
#include "webrtc/system_wrappers/include/timestamp_extrapolator.h"
-
namespace webrtc {
-VCMTiming::VCMTiming(Clock* clock,
- VCMTiming* master_timing)
+VCMTiming::VCMTiming(Clock* clock, VCMTiming* master_timing)
: crit_sect_(CriticalSectionWrapper::CreateCriticalSection()),
clock_(clock),
master_(false),
@@ -62,14 +62,16 @@ void VCMTiming::UpdateHistograms() const {
if (elapsed_sec < metrics::kMinRunTimeInSeconds) {
return;
}
- RTC_HISTOGRAM_COUNTS_100("WebRTC.Video.DecodedFramesPerSecond",
+ RTC_HISTOGRAM_COUNTS_SPARSE_100(
+ "WebRTC.Video.DecodedFramesPerSecond",
static_cast<int>((num_decoded_frames_ / elapsed_sec) + 0.5f));
- RTC_HISTOGRAM_PERCENTAGE("WebRTC.Video.DelayedFramesToRenderer",
+ RTC_HISTOGRAM_PERCENTAGE_SPARSE(
+ "WebRTC.Video.DelayedFramesToRenderer",
num_delayed_decoded_frames_ * 100 / num_decoded_frames_);
if (num_delayed_decoded_frames_ > 0) {
- RTC_HISTOGRAM_COUNTS_1000(
+ RTC_HISTOGRAM_COUNTS_SPARSE_1000(
"WebRTC.Video.DelayedFramesToRenderer_AvgDelayInMs",
- sum_missed_render_deadline_ms_ / num_delayed_decoded_frames_);
+ sum_missed_render_deadline_ms_ / num_delayed_decoded_frames_);
}
}
@@ -118,8 +120,8 @@ void VCMTiming::UpdateCurrentDelay(uint32_t frame_timestamp) {
// Not initialized, set current delay to target.
current_delay_ms_ = target_delay_ms;
} else if (target_delay_ms != current_delay_ms_) {
- int64_t delay_diff_ms = static_cast<int64_t>(target_delay_ms) -
- current_delay_ms_;
+ int64_t delay_diff_ms =
+ static_cast<int64_t>(target_delay_ms) - current_delay_ms_;
// Never change the delay with more than 100 ms every second. If we're
// changing the delay in too large steps we will get noticeable freezes. By
// limiting the change we can increase the delay in smaller steps, which
@@ -128,11 +130,13 @@ void VCMTiming::UpdateCurrentDelay(uint32_t frame_timestamp) {
int64_t max_change_ms = 0;
if (frame_timestamp < 0x0000ffff && prev_frame_timestamp_ > 0xffff0000) {
// wrap
- max_change_ms = kDelayMaxChangeMsPerS * (frame_timestamp +
- (static_cast<int64_t>(1) << 32) - prev_frame_timestamp_) / 90000;
+ max_change_ms = kDelayMaxChangeMsPerS *
+ (frame_timestamp + (static_cast<int64_t>(1) << 32) -
+ prev_frame_timestamp_) /
+ 90000;
} else {
max_change_ms = kDelayMaxChangeMsPerS *
- (frame_timestamp - prev_frame_timestamp_) / 90000;
+ (frame_timestamp - prev_frame_timestamp_) / 90000;
}
if (max_change_ms <= 0) {
// Any changes less than 1 ms are truncated and
@@ -153,7 +157,7 @@ void VCMTiming::UpdateCurrentDelay(int64_t render_time_ms,
CriticalSectionScoped cs(crit_sect_);
uint32_t target_delay_ms = TargetDelayInternal();
int64_t delayed_ms = actual_decode_time_ms -
- (render_time_ms - MaxDecodeTimeMs() - render_delay_ms_);
+ (render_time_ms - MaxDecodeTimeMs() - render_delay_ms_);
if (delayed_ms < 0) {
return;
}
@@ -165,13 +169,13 @@ void VCMTiming::UpdateCurrentDelay(int64_t render_time_ms,
}
int32_t VCMTiming::StopDecodeTimer(uint32_t time_stamp,
- int64_t start_time_ms,
+ int32_t decode_time_ms,
int64_t now_ms,
int64_t render_time_ms) {
CriticalSectionScoped cs(crit_sect_);
- int32_t time_diff_ms = codec_timer_.StopTimer(start_time_ms, now_ms);
- assert(time_diff_ms >= 0);
- last_decode_ms_ = time_diff_ms;
+ codec_timer_.MaxFilter(decode_time_ms, now_ms);
+ assert(decode_time_ms >= 0);
+ last_decode_ms_ = decode_time_ms;
// Update stats.
++num_decoded_frames_;
@@ -191,8 +195,8 @@ void VCMTiming::IncomingTimestamp(uint32_t time_stamp, int64_t now_ms) {
ts_extrapolator_->Update(now_ms, time_stamp);
}
-int64_t VCMTiming::RenderTimeMs(uint32_t frame_timestamp, int64_t now_ms)
- const {
+int64_t VCMTiming::RenderTimeMs(uint32_t frame_timestamp,
+ int64_t now_ms) const {
CriticalSectionScoped cs(crit_sect_);
const int64_t render_time_ms = RenderTimeMsInternal(frame_timestamp, now_ms);
return render_time_ms;
@@ -201,7 +205,7 @@ int64_t VCMTiming::RenderTimeMs(uint32_t frame_timestamp, int64_t now_ms)
int64_t VCMTiming::RenderTimeMsInternal(uint32_t frame_timestamp,
int64_t now_ms) const {
int64_t estimated_complete_time_ms =
- ts_extrapolator_->ExtrapolateLocalTime(frame_timestamp);
+ ts_extrapolator_->ExtrapolateLocalTime(frame_timestamp);
if (estimated_complete_time_ms == -1) {
estimated_complete_time_ms = now_ms;
}
@@ -212,19 +216,19 @@ int64_t VCMTiming::RenderTimeMsInternal(uint32_t frame_timestamp,
}
// Must be called from inside a critical section.
-int32_t VCMTiming::MaxDecodeTimeMs(FrameType frame_type /*= kVideoFrameDelta*/)
- const {
+int32_t VCMTiming::MaxDecodeTimeMs(
+ FrameType frame_type /*= kVideoFrameDelta*/) const {
const int32_t decode_time_ms = codec_timer_.RequiredDecodeTimeMs(frame_type);
assert(decode_time_ms >= 0);
return decode_time_ms;
}
-uint32_t VCMTiming::MaxWaitingTime(int64_t render_time_ms, int64_t now_ms)
- const {
+uint32_t VCMTiming::MaxWaitingTime(int64_t render_time_ms,
+ int64_t now_ms) const {
CriticalSectionScoped cs(crit_sect_);
- const int64_t max_wait_time_ms = render_time_ms - now_ms -
- MaxDecodeTimeMs() - render_delay_ms_;
+ const int64_t max_wait_time_ms =
+ render_time_ms - now_ms - MaxDecodeTimeMs() - render_delay_ms_;
if (max_wait_time_ms < 0) {
return 0;
@@ -232,8 +236,8 @@ uint32_t VCMTiming::MaxWaitingTime(int64_t render_time_ms, int64_t now_ms)
return static_cast<uint32_t>(max_wait_time_ms);
}
-bool VCMTiming::EnoughTimeToDecode(uint32_t available_processing_time_ms)
- const {
+bool VCMTiming::EnoughTimeToDecode(
+ uint32_t available_processing_time_ms) const {
CriticalSectionScoped cs(crit_sect_);
int32_t max_decode_time_ms = MaxDecodeTimeMs();
if (max_decode_time_ms < 0) {
@@ -246,7 +250,8 @@ bool VCMTiming::EnoughTimeToDecode(uint32_t available_processing_time_ms)
max_decode_time_ms = 1;
}
return static_cast<int32_t>(available_processing_time_ms) -
- max_decode_time_ms > 0;
+ max_decode_time_ms >
+ 0;
}
uint32_t VCMTiming::TargetVideoDelay() const {
@@ -256,7 +261,7 @@ uint32_t VCMTiming::TargetVideoDelay() const {
uint32_t VCMTiming::TargetDelayInternal() const {
return std::max(min_playout_delay_ms_,
- jitter_delay_ms_ + MaxDecodeTimeMs() + render_delay_ms_);
+ jitter_delay_ms_ + MaxDecodeTimeMs() + render_delay_ms_);
}
void VCMTiming::GetTimings(int* decode_ms,
diff --git a/webrtc/modules/video_coding/main/source/timing.h b/webrtc/modules/video_coding/timing.h
index d3b8fa673f..a4d0cf4543 100644
--- a/webrtc/modules/video_coding/main/source/timing.h
+++ b/webrtc/modules/video_coding/timing.h
@@ -8,11 +8,11 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#ifndef WEBRTC_MODULES_VIDEO_CODING_MAIN_SOURCE_TIMING_H_
-#define WEBRTC_MODULES_VIDEO_CODING_MAIN_SOURCE_TIMING_H_
+#ifndef WEBRTC_MODULES_VIDEO_CODING_TIMING_H_
+#define WEBRTC_MODULES_VIDEO_CODING_TIMING_H_
#include "webrtc/base/thread_annotations.h"
-#include "webrtc/modules/video_coding/main/source/codec_timer.h"
+#include "webrtc/modules/video_coding/codec_timer.h"
#include "webrtc/system_wrappers/include/critical_section_wrapper.h"
#include "webrtc/typedefs.h"
@@ -25,8 +25,7 @@ class VCMTiming {
public:
// The primary timing component should be passed
// if this is the dual timing component.
- VCMTiming(Clock* clock,
- VCMTiming* master_timing = NULL);
+ explicit VCMTiming(Clock* clock, VCMTiming* master_timing = NULL);
~VCMTiming();
// Resets the timing to the initial state.
@@ -58,7 +57,7 @@ class VCMTiming {
// Stops the decoder timer, should be called when the decoder returns a frame
// or when the decoded frame callback is called.
int32_t StopDecodeTimer(uint32_t time_stamp,
- int64_t start_time_ms,
+ int32_t decode_time_ms,
int64_t now_ms,
int64_t render_time_ms);
@@ -124,4 +123,4 @@ class VCMTiming {
};
} // namespace webrtc
-#endif // WEBRTC_MODULES_VIDEO_CODING_MAIN_SOURCE_TIMING_H_
+#endif // WEBRTC_MODULES_VIDEO_CODING_TIMING_H_
diff --git a/webrtc/modules/video_coding/main/source/timing_unittest.cc b/webrtc/modules/video_coding/timing_unittest.cc
index 694a600c2a..2e8df83683 100644
--- a/webrtc/modules/video_coding/main/source/timing_unittest.cc
+++ b/webrtc/modules/video_coding/timing_unittest.cc
@@ -14,10 +14,10 @@
#include "testing/gtest/include/gtest/gtest.h"
-#include "webrtc/modules/video_coding/main/interface/video_coding.h"
-#include "webrtc/modules/video_coding/main/source/internal_defines.h"
-#include "webrtc/modules/video_coding/main/source/timing.h"
-#include "webrtc/modules/video_coding/main/test/test_util.h"
+#include "webrtc/modules/video_coding/include/video_coding.h"
+#include "webrtc/modules/video_coding/internal_defines.h"
+#include "webrtc/modules/video_coding/timing.h"
+#include "webrtc/modules/video_coding/test/test_util.h"
#include "webrtc/system_wrappers/include/clock.h"
#include "webrtc/system_wrappers/include/trace.h"
#include "webrtc/test/testsupport/fileutils.h"
@@ -55,8 +55,9 @@ TEST(ReceiverTiming, Tests) {
clock.AdvanceTimeMilliseconds(1000);
timing.SetJitterDelay(jitterDelayMs);
timing.UpdateCurrentDelay(timeStamp);
- waitTime = timing.MaxWaitingTime(timing.RenderTimeMs(
- timeStamp, clock.TimeInMilliseconds()), clock.TimeInMilliseconds());
+ waitTime = timing.MaxWaitingTime(
+ timing.RenderTimeMs(timeStamp, clock.TimeInMilliseconds()),
+ clock.TimeInMilliseconds());
// Since we gradually increase the delay we only get 100 ms every second.
EXPECT_EQ(jitterDelayMs - 10, waitTime);
@@ -85,9 +86,10 @@ TEST(ReceiverTiming, Tests) {
for (int i = 0; i < 10; i++) {
int64_t startTimeMs = clock.TimeInMilliseconds();
clock.AdvanceTimeMilliseconds(10);
- timing.StopDecodeTimer(timeStamp, startTimeMs,
- clock.TimeInMilliseconds(), timing.RenderTimeMs(
- timeStamp, clock.TimeInMilliseconds()));
+ timing.StopDecodeTimer(
+ timeStamp, clock.TimeInMilliseconds() - startTimeMs,
+ clock.TimeInMilliseconds(),
+ timing.RenderTimeMs(timeStamp, clock.TimeInMilliseconds()));
timeStamp += 90000 / 25;
clock.AdvanceTimeMilliseconds(1000 / 25 - 10);
timing.IncomingTimestamp(timeStamp, clock.TimeInMilliseconds());
@@ -105,7 +107,7 @@ TEST(ReceiverTiming, Tests) {
uint32_t minTotalDelayMs = 200;
timing.set_min_playout_delay(minTotalDelayMs);
clock.AdvanceTimeMilliseconds(5000);
- timeStamp += 5*90000;
+ timeStamp += 5 * 90000;
timing.UpdateCurrentDelay(timeStamp);
const int kRenderDelayMs = 10;
timing.set_render_delay(kRenderDelayMs);
@@ -121,7 +123,7 @@ TEST(ReceiverTiming, Tests) {
// Reset playout delay.
timing.set_min_playout_delay(0);
clock.AdvanceTimeMilliseconds(5000);
- timeStamp += 5*90000;
+ timeStamp += 5 * 90000;
timing.UpdateCurrentDelay(timeStamp);
}
@@ -135,8 +137,8 @@ TEST(ReceiverTiming, WrapAround) {
timing.IncomingTimestamp(timestamp, clock.TimeInMilliseconds());
clock.AdvanceTimeMilliseconds(1000 / kFramerate);
timestamp += 90000 / kFramerate;
- int64_t render_time = timing.RenderTimeMs(0xFFFFFFFFu,
- clock.TimeInMilliseconds());
+ int64_t render_time =
+ timing.RenderTimeMs(0xFFFFFFFFu, clock.TimeInMilliseconds());
EXPECT_EQ(3 * 1000 / kFramerate, render_time);
render_time = timing.RenderTimeMs(89u, // One second later in 90 kHz.
clock.TimeInMilliseconds());
diff --git a/webrtc/modules/video_coding/utility/frame_dropper.cc b/webrtc/modules/video_coding/utility/frame_dropper.cc
index 5262c5b88a..a0aa67be4e 100644
--- a/webrtc/modules/video_coding/utility/frame_dropper.cc
+++ b/webrtc/modules/video_coding/utility/frame_dropper.cc
@@ -8,12 +8,11 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#include "webrtc/modules/video_coding/utility/include/frame_dropper.h"
+#include "webrtc/modules/video_coding/utility/frame_dropper.h"
#include "webrtc/system_wrappers/include/trace.h"
-namespace webrtc
-{
+namespace webrtc {
const float kDefaultKeyFrameSizeAvgKBits = 0.9f;
const float kDefaultKeyFrameRatio = 0.99f;
@@ -22,339 +21,266 @@ const float kDefaultDropRatioMax = 0.96f;
const float kDefaultMaxTimeToDropFrames = 4.0f; // In seconds.
FrameDropper::FrameDropper()
-:
-_keyFrameSizeAvgKbits(kDefaultKeyFrameSizeAvgKBits),
-_keyFrameRatio(kDefaultKeyFrameRatio),
-_dropRatio(kDefaultDropRatioAlpha, kDefaultDropRatioMax),
-_enabled(true),
-_max_time_drops(kDefaultMaxTimeToDropFrames)
-{
- Reset();
+ : _keyFrameSizeAvgKbits(kDefaultKeyFrameSizeAvgKBits),
+ _keyFrameRatio(kDefaultKeyFrameRatio),
+ _dropRatio(kDefaultDropRatioAlpha, kDefaultDropRatioMax),
+ _enabled(true),
+ _max_time_drops(kDefaultMaxTimeToDropFrames) {
+ Reset();
}
FrameDropper::FrameDropper(float max_time_drops)
-:
-_keyFrameSizeAvgKbits(kDefaultKeyFrameSizeAvgKBits),
-_keyFrameRatio(kDefaultKeyFrameRatio),
-_dropRatio(kDefaultDropRatioAlpha, kDefaultDropRatioMax),
-_enabled(true),
-_max_time_drops(max_time_drops)
-{
- Reset();
+ : _keyFrameSizeAvgKbits(kDefaultKeyFrameSizeAvgKBits),
+ _keyFrameRatio(kDefaultKeyFrameRatio),
+ _dropRatio(kDefaultDropRatioAlpha, kDefaultDropRatioMax),
+ _enabled(true),
+ _max_time_drops(max_time_drops) {
+ Reset();
}
-void
-FrameDropper::Reset()
-{
- _keyFrameRatio.Reset(0.99f);
- _keyFrameRatio.Apply(1.0f, 1.0f/300.0f); // 1 key frame every 10th second in 30 fps
- _keyFrameSizeAvgKbits.Reset(0.9f);
- _keyFrameCount = 0;
- _accumulator = 0.0f;
- _accumulatorMax = 150.0f; // assume 300 kb/s and 0.5 s window
- _targetBitRate = 300.0f;
- _incoming_frame_rate = 30;
- _keyFrameSpreadFrames = 0.5f * _incoming_frame_rate;
- _dropNext = false;
- _dropRatio.Reset(0.9f);
- _dropRatio.Apply(0.0f, 0.0f); // Initialize to 0
- _dropCount = 0;
- _windowSize = 0.5f;
- _wasBelowMax = true;
- _fastMode = false; // start with normal (non-aggressive) mode
- // Cap for the encoder buffer level/accumulator, in secs.
- _cap_buffer_size = 3.0f;
- // Cap on maximum amount of dropped frames between kept frames, in secs.
- _max_time_drops = 4.0f;
+void FrameDropper::Reset() {
+ _keyFrameRatio.Reset(0.99f);
+ _keyFrameRatio.Apply(
+ 1.0f, 1.0f / 300.0f); // 1 key frame every 10th second in 30 fps
+ _keyFrameSizeAvgKbits.Reset(0.9f);
+ _keyFrameCount = 0;
+ _accumulator = 0.0f;
+ _accumulatorMax = 150.0f; // assume 300 kb/s and 0.5 s window
+ _targetBitRate = 300.0f;
+ _incoming_frame_rate = 30;
+ _keyFrameSpreadFrames = 0.5f * _incoming_frame_rate;
+ _dropNext = false;
+ _dropRatio.Reset(0.9f);
+ _dropRatio.Apply(0.0f, 0.0f); // Initialize to 0
+ _dropCount = 0;
+ _windowSize = 0.5f;
+ _wasBelowMax = true;
+ _fastMode = false; // start with normal (non-aggressive) mode
+ // Cap for the encoder buffer level/accumulator, in secs.
+ _cap_buffer_size = 3.0f;
+ // Cap on maximum amount of dropped frames between kept frames, in secs.
+ _max_time_drops = 4.0f;
}
-void
-FrameDropper::Enable(bool enable)
-{
- _enabled = enable;
+void FrameDropper::Enable(bool enable) {
+ _enabled = enable;
}
-void
-FrameDropper::Fill(size_t frameSizeBytes, bool deltaFrame)
-{
- if (!_enabled)
- {
- return;
- }
- float frameSizeKbits = 8.0f * static_cast<float>(frameSizeBytes) / 1000.0f;
- if (!deltaFrame && !_fastMode) // fast mode does not treat key-frames any different
- {
- _keyFrameSizeAvgKbits.Apply(1, frameSizeKbits);
- _keyFrameRatio.Apply(1.0, 1.0);
- if (frameSizeKbits > _keyFrameSizeAvgKbits.filtered())
- {
- // Remove the average key frame size since we
- // compensate for key frames when adding delta
- // frames.
- frameSizeKbits -= _keyFrameSizeAvgKbits.filtered();
- }
- else
- {
- // Shouldn't be negative, so zero is the lower bound.
- frameSizeKbits = 0;
- }
- if (_keyFrameRatio.filtered() > 1e-5 &&
- 1 / _keyFrameRatio.filtered() < _keyFrameSpreadFrames)
- {
- // We are sending key frames more often than our upper bound for
- // how much we allow the key frame compensation to be spread
- // out in time. Therefor we must use the key frame ratio rather
- // than keyFrameSpreadFrames.
- _keyFrameCount =
- static_cast<int32_t>(1 / _keyFrameRatio.filtered() + 0.5);
- }
- else
- {
- // Compensate for the key frame the following frames
- _keyFrameCount = static_cast<int32_t>(_keyFrameSpreadFrames + 0.5);
- }
+void FrameDropper::Fill(size_t frameSizeBytes, bool deltaFrame) {
+ if (!_enabled) {
+ return;
+ }
+ float frameSizeKbits = 8.0f * static_cast<float>(frameSizeBytes) / 1000.0f;
+ if (!deltaFrame &&
+ !_fastMode) { // fast mode does not treat key-frames any different
+ _keyFrameSizeAvgKbits.Apply(1, frameSizeKbits);
+ _keyFrameRatio.Apply(1.0, 1.0);
+ if (frameSizeKbits > _keyFrameSizeAvgKbits.filtered()) {
+ // Remove the average key frame size since we
+ // compensate for key frames when adding delta
+ // frames.
+ frameSizeKbits -= _keyFrameSizeAvgKbits.filtered();
+ } else {
+ // Shouldn't be negative, so zero is the lower bound.
+ frameSizeKbits = 0;
}
- else
- {
- // Decrease the keyFrameRatio
- _keyFrameRatio.Apply(1.0, 0.0);
+ if (_keyFrameRatio.filtered() > 1e-5 &&
+ 1 / _keyFrameRatio.filtered() < _keyFrameSpreadFrames) {
+ // We are sending key frames more often than our upper bound for
+ // how much we allow the key frame compensation to be spread
+ // out in time. Therefor we must use the key frame ratio rather
+ // than keyFrameSpreadFrames.
+ _keyFrameCount =
+ static_cast<int32_t>(1 / _keyFrameRatio.filtered() + 0.5);
+ } else {
+ // Compensate for the key frame the following frames
+ _keyFrameCount = static_cast<int32_t>(_keyFrameSpreadFrames + 0.5);
}
- // Change the level of the accumulator (bucket)
- _accumulator += frameSizeKbits;
- CapAccumulator();
+ } else {
+ // Decrease the keyFrameRatio
+ _keyFrameRatio.Apply(1.0, 0.0);
+ }
+ // Change the level of the accumulator (bucket)
+ _accumulator += frameSizeKbits;
+ CapAccumulator();
}
-void
-FrameDropper::Leak(uint32_t inputFrameRate)
-{
- if (!_enabled)
- {
- return;
- }
- if (inputFrameRate < 1)
- {
- return;
- }
- if (_targetBitRate < 0.0f)
- {
- return;
- }
- _keyFrameSpreadFrames = 0.5f * inputFrameRate;
- // T is the expected bits per frame (target). If all frames were the same size,
- // we would get T bits per frame. Notice that T is also weighted to be able to
- // force a lower frame rate if wanted.
- float T = _targetBitRate / inputFrameRate;
- if (_keyFrameCount > 0)
- {
- // Perform the key frame compensation
- if (_keyFrameRatio.filtered() > 0 &&
- 1 / _keyFrameRatio.filtered() < _keyFrameSpreadFrames)
- {
- T -= _keyFrameSizeAvgKbits.filtered() * _keyFrameRatio.filtered();
- }
- else
- {
- T -= _keyFrameSizeAvgKbits.filtered() / _keyFrameSpreadFrames;
- }
- _keyFrameCount--;
- }
- _accumulator -= T;
- if (_accumulator < 0.0f)
- {
- _accumulator = 0.0f;
+void FrameDropper::Leak(uint32_t inputFrameRate) {
+ if (!_enabled) {
+ return;
+ }
+ if (inputFrameRate < 1) {
+ return;
+ }
+ if (_targetBitRate < 0.0f) {
+ return;
+ }
+ _keyFrameSpreadFrames = 0.5f * inputFrameRate;
+ // T is the expected bits per frame (target). If all frames were the same
+ // size,
+ // we would get T bits per frame. Notice that T is also weighted to be able to
+ // force a lower frame rate if wanted.
+ float T = _targetBitRate / inputFrameRate;
+ if (_keyFrameCount > 0) {
+ // Perform the key frame compensation
+ if (_keyFrameRatio.filtered() > 0 &&
+ 1 / _keyFrameRatio.filtered() < _keyFrameSpreadFrames) {
+ T -= _keyFrameSizeAvgKbits.filtered() * _keyFrameRatio.filtered();
+ } else {
+ T -= _keyFrameSizeAvgKbits.filtered() / _keyFrameSpreadFrames;
}
- UpdateRatio();
+ _keyFrameCount--;
+ }
+ _accumulator -= T;
+ if (_accumulator < 0.0f) {
+ _accumulator = 0.0f;
+ }
+ UpdateRatio();
}
-void
-FrameDropper::UpdateNack(uint32_t nackBytes)
-{
- if (!_enabled)
- {
- return;
- }
- _accumulator += static_cast<float>(nackBytes) * 8.0f / 1000.0f;
+void FrameDropper::UpdateNack(uint32_t nackBytes) {
+ if (!_enabled) {
+ return;
+ }
+ _accumulator += static_cast<float>(nackBytes) * 8.0f / 1000.0f;
}
-void
-FrameDropper::FillBucket(float inKbits, float outKbits)
-{
- _accumulator += (inKbits - outKbits);
+void FrameDropper::FillBucket(float inKbits, float outKbits) {
+ _accumulator += (inKbits - outKbits);
}
-void
-FrameDropper::UpdateRatio()
-{
- if (_accumulator > 1.3f * _accumulatorMax)
- {
- // Too far above accumulator max, react faster
- _dropRatio.UpdateBase(0.8f);
+void FrameDropper::UpdateRatio() {
+ if (_accumulator > 1.3f * _accumulatorMax) {
+ // Too far above accumulator max, react faster
+ _dropRatio.UpdateBase(0.8f);
+ } else {
+ // Go back to normal reaction
+ _dropRatio.UpdateBase(0.9f);
+ }
+ if (_accumulator > _accumulatorMax) {
+ // We are above accumulator max, and should ideally
+ // drop a frame. Increase the dropRatio and drop
+ // the frame later.
+ if (_wasBelowMax) {
+ _dropNext = true;
}
- else
- {
- // Go back to normal reaction
- _dropRatio.UpdateBase(0.9f);
+ if (_fastMode) {
+ // always drop in aggressive mode
+ _dropNext = true;
}
- if (_accumulator > _accumulatorMax)
- {
- // We are above accumulator max, and should ideally
- // drop a frame. Increase the dropRatio and drop
- // the frame later.
- if (_wasBelowMax)
- {
- _dropNext = true;
- }
- if (_fastMode)
- {
- // always drop in aggressive mode
- _dropNext = true;
- }
- _dropRatio.Apply(1.0f, 1.0f);
- _dropRatio.UpdateBase(0.9f);
- }
- else
- {
- _dropRatio.Apply(1.0f, 0.0f);
- }
- _wasBelowMax = _accumulator < _accumulatorMax;
+ _dropRatio.Apply(1.0f, 1.0f);
+ _dropRatio.UpdateBase(0.9f);
+ } else {
+ _dropRatio.Apply(1.0f, 0.0f);
+ }
+ _wasBelowMax = _accumulator < _accumulatorMax;
}
-// This function signals when to drop frames to the caller. It makes use of the dropRatio
+// This function signals when to drop frames to the caller. It makes use of the
+// dropRatio
// to smooth out the drops over time.
-bool
-FrameDropper::DropFrame()
-{
- if (!_enabled)
- {
- return false;
+bool FrameDropper::DropFrame() {
+ if (!_enabled) {
+ return false;
+ }
+ if (_dropNext) {
+ _dropNext = false;
+ _dropCount = 0;
+ }
+
+ if (_dropRatio.filtered() >= 0.5f) { // Drops per keep
+ // limit is the number of frames we should drop between each kept frame
+ // to keep our drop ratio. limit is positive in this case.
+ float denom = 1.0f - _dropRatio.filtered();
+ if (denom < 1e-5) {
+ denom = 1e-5f;
+ }
+ int32_t limit = static_cast<int32_t>(1.0f / denom - 1.0f + 0.5f);
+ // Put a bound on the max amount of dropped frames between each kept
+ // frame, in terms of frame rate and window size (secs).
+ int max_limit = static_cast<int>(_incoming_frame_rate * _max_time_drops);
+ if (limit > max_limit) {
+ limit = max_limit;
}
- if (_dropNext)
- {
- _dropNext = false;
+ if (_dropCount < 0) {
+ // Reset the _dropCount since it was negative and should be positive.
+ if (_dropRatio.filtered() > 0.4f) {
+ _dropCount = -_dropCount;
+ } else {
_dropCount = 0;
+ }
}
-
- if (_dropRatio.filtered() >= 0.5f) // Drops per keep
- {
- // limit is the number of frames we should drop between each kept frame
- // to keep our drop ratio. limit is positive in this case.
- float denom = 1.0f - _dropRatio.filtered();
- if (denom < 1e-5)
- {
- denom = (float)1e-5;
- }
- int32_t limit = static_cast<int32_t>(1.0f / denom - 1.0f + 0.5f);
- // Put a bound on the max amount of dropped frames between each kept
- // frame, in terms of frame rate and window size (secs).
- int max_limit = static_cast<int>(_incoming_frame_rate *
- _max_time_drops);
- if (limit > max_limit) {
- limit = max_limit;
- }
- if (_dropCount < 0)
- {
- // Reset the _dropCount since it was negative and should be positive.
- if (_dropRatio.filtered() > 0.4f)
- {
- _dropCount = -_dropCount;
- }
- else
- {
- _dropCount = 0;
- }
- }
- if (_dropCount < limit)
- {
- // As long we are below the limit we should drop frames.
- _dropCount++;
- return true;
- }
- else
- {
- // Only when we reset _dropCount a frame should be kept.
- _dropCount = 0;
- return false;
- }
+ if (_dropCount < limit) {
+ // As long we are below the limit we should drop frames.
+ _dropCount++;
+ return true;
+ } else {
+ // Only when we reset _dropCount a frame should be kept.
+ _dropCount = 0;
+ return false;
}
- else if (_dropRatio.filtered() > 0.0f &&
- _dropRatio.filtered() < 0.5f) // Keeps per drop
- {
- // limit is the number of frames we should keep between each drop
- // in order to keep the drop ratio. limit is negative in this case,
- // and the _dropCount is also negative.
- float denom = _dropRatio.filtered();
- if (denom < 1e-5)
- {
- denom = (float)1e-5;
- }
- int32_t limit = -static_cast<int32_t>(1.0f / denom - 1.0f + 0.5f);
- if (_dropCount > 0)
- {
- // Reset the _dropCount since we have a positive
- // _dropCount, and it should be negative.
- if (_dropRatio.filtered() < 0.6f)
- {
- _dropCount = -_dropCount;
- }
- else
- {
- _dropCount = 0;
- }
- }
- if (_dropCount > limit)
- {
- if (_dropCount == 0)
- {
- // Drop frames when we reset _dropCount.
- _dropCount--;
- return true;
- }
- else
- {
- // Keep frames as long as we haven't reached limit.
- _dropCount--;
- return false;
- }
- }
- else
- {
- _dropCount = 0;
- return false;
- }
+ } else if (_dropRatio.filtered() > 0.0f &&
+ _dropRatio.filtered() < 0.5f) { // Keeps per drop
+ // limit is the number of frames we should keep between each drop
+ // in order to keep the drop ratio. limit is negative in this case,
+ // and the _dropCount is also negative.
+ float denom = _dropRatio.filtered();
+ if (denom < 1e-5) {
+ denom = 1e-5f;
}
- _dropCount = 0;
- return false;
+ int32_t limit = -static_cast<int32_t>(1.0f / denom - 1.0f + 0.5f);
+ if (_dropCount > 0) {
+ // Reset the _dropCount since we have a positive
+ // _dropCount, and it should be negative.
+ if (_dropRatio.filtered() < 0.6f) {
+ _dropCount = -_dropCount;
+ } else {
+ _dropCount = 0;
+ }
+ }
+ if (_dropCount > limit) {
+ if (_dropCount == 0) {
+ // Drop frames when we reset _dropCount.
+ _dropCount--;
+ return true;
+ } else {
+ // Keep frames as long as we haven't reached limit.
+ _dropCount--;
+ return false;
+ }
+ } else {
+ _dropCount = 0;
+ return false;
+ }
+ }
+ _dropCount = 0;
+ return false;
- // A simpler version, unfiltered and quicker
- //bool dropNext = _dropNext;
- //_dropNext = false;
- //return dropNext;
+ // A simpler version, unfiltered and quicker
+ // bool dropNext = _dropNext;
+ // _dropNext = false;
+ // return dropNext;
}
-void
-FrameDropper::SetRates(float bitRate, float incoming_frame_rate)
-{
- // Bit rate of -1 means infinite bandwidth.
- _accumulatorMax = bitRate * _windowSize; // bitRate * windowSize (in seconds)
- if (_targetBitRate > 0.0f && bitRate < _targetBitRate && _accumulator > _accumulatorMax)
- {
- // Rescale the accumulator level if the accumulator max decreases
- _accumulator = bitRate / _targetBitRate * _accumulator;
- }
- _targetBitRate = bitRate;
- CapAccumulator();
- _incoming_frame_rate = incoming_frame_rate;
+void FrameDropper::SetRates(float bitRate, float incoming_frame_rate) {
+ // Bit rate of -1 means infinite bandwidth.
+ _accumulatorMax = bitRate * _windowSize; // bitRate * windowSize (in seconds)
+ if (_targetBitRate > 0.0f && bitRate < _targetBitRate &&
+ _accumulator > _accumulatorMax) {
+ // Rescale the accumulator level if the accumulator max decreases
+ _accumulator = bitRate / _targetBitRate * _accumulator;
+ }
+ _targetBitRate = bitRate;
+ CapAccumulator();
+ _incoming_frame_rate = incoming_frame_rate;
}
-float
-FrameDropper::ActualFrameRate(uint32_t inputFrameRate) const
-{
- if (!_enabled)
- {
- return static_cast<float>(inputFrameRate);
- }
- return inputFrameRate * (1.0f - _dropRatio.filtered());
+float FrameDropper::ActualFrameRate(uint32_t inputFrameRate) const {
+ if (!_enabled) {
+ return static_cast<float>(inputFrameRate);
+ }
+ return inputFrameRate * (1.0f - _dropRatio.filtered());
}
// Put a cap on the accumulator, i.e., don't let it grow beyond some level.
@@ -366,5 +292,4 @@ void FrameDropper::CapAccumulator() {
_accumulator = max_accumulator;
}
}
-
-}
+} // namespace webrtc
diff --git a/webrtc/modules/video_coding/utility/frame_dropper.h b/webrtc/modules/video_coding/utility/frame_dropper.h
new file mode 100644
index 0000000000..7ec85ea880
--- /dev/null
+++ b/webrtc/modules/video_coding/utility/frame_dropper.h
@@ -0,0 +1,96 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef WEBRTC_MODULES_VIDEO_CODING_UTILITY_FRAME_DROPPER_H_
+#define WEBRTC_MODULES_VIDEO_CODING_UTILITY_FRAME_DROPPER_H_
+
+#include <cstddef>
+
+#include "webrtc/base/exp_filter.h"
+#include "webrtc/typedefs.h"
+
+namespace webrtc {
+
+// The Frame Dropper implements a variant of the leaky bucket algorithm
+// for keeping track of when to drop frames to avoid bit rate
+// over use when the encoder can't keep its bit rate.
+class FrameDropper {
+ public:
+ FrameDropper();
+ explicit FrameDropper(float max_time_drops);
+ virtual ~FrameDropper() {}
+
+ // Resets the FrameDropper to its initial state.
+ // This means that the frameRateWeight is set to its
+ // default value as well.
+ virtual void Reset();
+
+ virtual void Enable(bool enable);
+ // Answers the question if it's time to drop a frame
+ // if we want to reach a given frame rate. Must be
+ // called for every frame.
+ //
+ // Return value : True if we should drop the current frame
+ virtual bool DropFrame();
+ // Updates the FrameDropper with the size of the latest encoded
+ // frame. The FrameDropper calculates a new drop ratio (can be
+ // seen as the probability to drop a frame) and updates its
+ // internal statistics.
+ //
+ // Input:
+ // - frameSizeBytes : The size of the latest frame
+ // returned from the encoder.
+ // - deltaFrame : True if the encoder returned
+ // a key frame.
+ virtual void Fill(size_t frameSizeBytes, bool deltaFrame);
+
+ virtual void Leak(uint32_t inputFrameRate);
+
+ void UpdateNack(uint32_t nackBytes);
+
+ // Sets the target bit rate and the frame rate produced by
+ // the camera.
+ //
+ // Input:
+ // - bitRate : The target bit rate
+ virtual void SetRates(float bitRate, float incoming_frame_rate);
+
+ // Return value : The current average frame rate produced
+ // if the DropFrame() function is used as
+ // instruction of when to drop frames.
+ virtual float ActualFrameRate(uint32_t inputFrameRate) const;
+
+ private:
+ void FillBucket(float inKbits, float outKbits);
+ void UpdateRatio();
+ void CapAccumulator();
+
+ rtc::ExpFilter _keyFrameSizeAvgKbits;
+ rtc::ExpFilter _keyFrameRatio;
+ float _keyFrameSpreadFrames;
+ int32_t _keyFrameCount;
+ float _accumulator;
+ float _accumulatorMax;
+ float _targetBitRate;
+ bool _dropNext;
+ rtc::ExpFilter _dropRatio;
+ int32_t _dropCount;
+ float _windowSize;
+ float _incoming_frame_rate;
+ bool _wasBelowMax;
+ bool _enabled;
+ bool _fastMode;
+ float _cap_buffer_size;
+ float _max_time_drops;
+}; // end of VCMFrameDropper class
+
+} // namespace webrtc
+
+#endif // WEBRTC_MODULES_VIDEO_CODING_UTILITY_FRAME_DROPPER_H_
diff --git a/webrtc/modules/video_coding/utility/include/frame_dropper.h b/webrtc/modules/video_coding/utility/include/frame_dropper.h
deleted file mode 100644
index 2b78a7264f..0000000000
--- a/webrtc/modules/video_coding/utility/include/frame_dropper.h
+++ /dev/null
@@ -1,98 +0,0 @@
-/*
- * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
- *
- * Use of this source code is governed by a BSD-style license
- * that can be found in the LICENSE file in the root of the source
- * tree. An additional intellectual property rights grant can be found
- * in the file PATENTS. All contributing project authors may
- * be found in the AUTHORS file in the root of the source tree.
- */
-
-#ifndef WEBRTC_MODULES_VIDEO_CODING_UTILITY_INCLUDE_FRAME_DROPPER_H_
-#define WEBRTC_MODULES_VIDEO_CODING_UTILITY_INCLUDE_FRAME_DROPPER_H_
-
-#include <cstddef>
-
-#include "webrtc/base/exp_filter.h"
-#include "webrtc/typedefs.h"
-
-namespace webrtc
-{
-
-// The Frame Dropper implements a variant of the leaky bucket algorithm
-// for keeping track of when to drop frames to avoid bit rate
-// over use when the encoder can't keep its bit rate.
-class FrameDropper
-{
-public:
- FrameDropper();
- explicit FrameDropper(float max_time_drops);
- virtual ~FrameDropper() {}
-
- // Resets the FrameDropper to its initial state.
- // This means that the frameRateWeight is set to its
- // default value as well.
- virtual void Reset();
-
- virtual void Enable(bool enable);
- // Answers the question if it's time to drop a frame
- // if we want to reach a given frame rate. Must be
- // called for every frame.
- //
- // Return value : True if we should drop the current frame
- virtual bool DropFrame();
- // Updates the FrameDropper with the size of the latest encoded
- // frame. The FrameDropper calculates a new drop ratio (can be
- // seen as the probability to drop a frame) and updates its
- // internal statistics.
- //
- // Input:
- // - frameSizeBytes : The size of the latest frame
- // returned from the encoder.
- // - deltaFrame : True if the encoder returned
- // a key frame.
- virtual void Fill(size_t frameSizeBytes, bool deltaFrame);
-
- virtual void Leak(uint32_t inputFrameRate);
-
- void UpdateNack(uint32_t nackBytes);
-
- // Sets the target bit rate and the frame rate produced by
- // the camera.
- //
- // Input:
- // - bitRate : The target bit rate
- virtual void SetRates(float bitRate, float incoming_frame_rate);
-
- // Return value : The current average frame rate produced
- // if the DropFrame() function is used as
- // instruction of when to drop frames.
- virtual float ActualFrameRate(uint32_t inputFrameRate) const;
-
-private:
- void FillBucket(float inKbits, float outKbits);
- void UpdateRatio();
- void CapAccumulator();
-
- rtc::ExpFilter _keyFrameSizeAvgKbits;
- rtc::ExpFilter _keyFrameRatio;
- float _keyFrameSpreadFrames;
- int32_t _keyFrameCount;
- float _accumulator;
- float _accumulatorMax;
- float _targetBitRate;
- bool _dropNext;
- rtc::ExpFilter _dropRatio;
- int32_t _dropCount;
- float _windowSize;
- float _incoming_frame_rate;
- bool _wasBelowMax;
- bool _enabled;
- bool _fastMode;
- float _cap_buffer_size;
- float _max_time_drops;
-}; // end of VCMFrameDropper class
-
-} // namespace webrtc
-
-#endif // WEBRTC_MODULES_VIDEO_CODING_UTILITY_INCLUDE_FRAME_DROPPER_H_
diff --git a/webrtc/modules/video_coding/utility/include/mock/mock_frame_dropper.h b/webrtc/modules/video_coding/utility/include/mock/mock_frame_dropper.h
deleted file mode 100644
index 1e31e5442a..0000000000
--- a/webrtc/modules/video_coding/utility/include/mock/mock_frame_dropper.h
+++ /dev/null
@@ -1,41 +0,0 @@
-/*
- * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
- *
- * Use of this source code is governed by a BSD-style license
- * that can be found in the LICENSE file in the root of the source
- * tree. An additional intellectual property rights grant can be found
- * in the file PATENTS. All contributing project authors may
- * be found in the AUTHORS file in the root of the source tree.
- */
-#ifndef WEBRTC_MODULES_VIDEO_CODING_UTILITY_INCLUDE_MOCK_MOCK_FRAME_DROPPER_H_
-#define WEBRTC_MODULES_VIDEO_CODING_UTILITY_INCLUDE_MOCK_MOCK_FRAME_DROPPER_H_
-
-#include <string>
-
-#include "testing/gmock/include/gmock/gmock.h"
-#include "webrtc/modules/video_coding/utility/include/frame_dropper.h"
-#include "webrtc/typedefs.h"
-
-namespace webrtc {
-
-class MockFrameDropper : public FrameDropper {
- public:
- MOCK_METHOD0(Reset,
- void());
- MOCK_METHOD1(Enable,
- void(bool enable));
- MOCK_METHOD0(DropFrame,
- bool());
- MOCK_METHOD2(Fill,
- void(size_t frameSizeBytes, bool deltaFrame));
- MOCK_METHOD1(Leak,
- void(uint32_t inputFrameRate));
- MOCK_METHOD2(SetRates,
- void(float bitRate, float incoming_frame_rate));
- MOCK_CONST_METHOD1(ActualFrameRate,
- float(uint32_t inputFrameRate));
-};
-
-} // namespace webrtc
-
-#endif // WEBRTC_MODULES_VIDEO_CODING_UTILITY_INCLUDE_MOCK_MOCK_FRAME_DROPPER_H_
diff --git a/webrtc/modules/video_coding/utility/include/vp8_header_parser.h b/webrtc/modules/video_coding/utility/include/vp8_header_parser.h
deleted file mode 100644
index 88796ecd0e..0000000000
--- a/webrtc/modules/video_coding/utility/include/vp8_header_parser.h
+++ /dev/null
@@ -1,77 +0,0 @@
-/*
- * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
- *
- * Use of this source code is governed by a BSD-style license
- * that can be found in the LICENSE file in the root of the source
- * tree. An additional intellectual property rights grant can be found
- * in the file PATENTS. All contributing project authors may
- * be found in the AUTHORS file in the root of the source tree.
- */
-
-#ifndef WEBRTC_MODULES_VIDEO_CODING_UTILITY_VP8_PARSE_HEADER_H_
-#define WEBRTC_MODULES_VIDEO_CODING_UTILITY_VP8_PARSE_HEADER_H_
-
-namespace webrtc {
-
-namespace vp8 {
-
-enum {
- MB_FEATURE_TREE_PROBS = 3,
- NUM_MB_SEGMENTS = 4,
- NUM_REF_LF_DELTAS = 4,
- NUM_MODE_LF_DELTAS = 4,
-};
-
-typedef struct VP8BitReader VP8BitReader;
-struct VP8BitReader {
- // Boolean decoder.
- uint32_t value_; // Current value.
- uint32_t range_; // Current range minus 1. In [127, 254] interval.
- int bits_; // Number of valid bits left.
- // Read buffer.
- const uint8_t* buf_; // Next byte to be read.
- const uint8_t* buf_end_; // End of read buffer.
- int eof_; // True if input is exhausted.
-};
-
-const uint8_t kVP8Log2Range[128] = {
- 7, 6, 6, 5, 5, 5, 5, 4, 4, 4, 4, 4, 4, 4, 4,
- 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
- 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
- 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
- 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
- 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
- 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
- 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
- 0
-};
-
-// range = ((range - 1) << kVP8Log2Range[range]) + 1
-const uint8_t kVP8NewRange[128] = {
- 127, 127, 191, 127, 159, 191, 223, 127,
- 143, 159, 175, 191, 207, 223, 239, 127,
- 135, 143, 151, 159, 167, 175, 183, 191,
- 199, 207, 215, 223, 231, 239, 247, 127,
- 131, 135, 139, 143, 147, 151, 155, 159,
- 163, 167, 171, 175, 179, 183, 187, 191,
- 195, 199, 203, 207, 211, 215, 219, 223,
- 227, 231, 235, 239, 243, 247, 251, 127,
- 129, 131, 133, 135, 137, 139, 141, 143,
- 145, 147, 149, 151, 153, 155, 157, 159,
- 161, 163, 165, 167, 169, 171, 173, 175,
- 177, 179, 181, 183, 185, 187, 189, 191,
- 193, 195, 197, 199, 201, 203, 205, 207,
- 209, 211, 213, 215, 217, 219, 221, 223,
- 225, 227, 229, 231, 233, 235, 237, 239,
- 241, 243, 245, 247, 249, 251, 253, 127
-};
-
-// Gets the QP, QP range: [0, 127].
-// Returns true on success, false otherwise.
-bool GetQp(const uint8_t* buf, size_t length, int* qp);
-
-} // namespace vp8
-
-} // namespace webrtc
-
-#endif // WEBRTC_MODULES_VIDEO_CODING_UTILITY_VP8_PARSE_HEADER_H_
diff --git a/webrtc/modules/video_coding/utility/mock/mock_frame_dropper.h b/webrtc/modules/video_coding/utility/mock/mock_frame_dropper.h
new file mode 100644
index 0000000000..b68a4b8d5d
--- /dev/null
+++ b/webrtc/modules/video_coding/utility/mock/mock_frame_dropper.h
@@ -0,0 +1,34 @@
+/*
+ * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+#ifndef WEBRTC_MODULES_VIDEO_CODING_UTILITY_MOCK_MOCK_FRAME_DROPPER_H_
+#define WEBRTC_MODULES_VIDEO_CODING_UTILITY_MOCK_MOCK_FRAME_DROPPER_H_
+
+#include <string>
+
+#include "testing/gmock/include/gmock/gmock.h"
+#include "webrtc/modules/video_coding/utility/frame_dropper.h"
+#include "webrtc/typedefs.h"
+
+namespace webrtc {
+
+class MockFrameDropper : public FrameDropper {
+ public:
+ MOCK_METHOD0(Reset, void());
+ MOCK_METHOD1(Enable, void(bool enable));
+ MOCK_METHOD0(DropFrame, bool());
+ MOCK_METHOD2(Fill, void(size_t frameSizeBytes, bool deltaFrame));
+ MOCK_METHOD1(Leak, void(uint32_t inputFrameRate));
+ MOCK_METHOD2(SetRates, void(float bitRate, float incoming_frame_rate));
+ MOCK_CONST_METHOD1(ActualFrameRate, float(uint32_t inputFrameRate));
+};
+
+} // namespace webrtc
+
+#endif // WEBRTC_MODULES_VIDEO_CODING_UTILITY_MOCK_MOCK_FRAME_DROPPER_H_
diff --git a/webrtc/modules/video_coding/utility/include/moving_average.h b/webrtc/modules/video_coding/utility/moving_average.h
index 49c42c4ed4..494bfd51fb 100644
--- a/webrtc/modules/video_coding/utility/include/moving_average.h
+++ b/webrtc/modules/video_coding/utility/moving_average.h
@@ -16,7 +16,7 @@
#include "webrtc/typedefs.h"
namespace webrtc {
-template<class T>
+template <class T>
class MovingAverage {
public:
MovingAverage();
@@ -30,17 +30,17 @@ class MovingAverage {
std::list<T> samples_;
};
-template<class T>
-MovingAverage<T>::MovingAverage() : sum_(static_cast<T>(0)) {
-}
+template <class T>
+MovingAverage<T>::MovingAverage()
+ : sum_(static_cast<T>(0)) {}
-template<class T>
+template <class T>
void MovingAverage<T>::AddSample(T sample) {
samples_.push_back(sample);
sum_ += sample;
}
-template<class T>
+template <class T>
bool MovingAverage<T>::GetAverage(size_t num_samples, T* avg) {
if (num_samples > samples_.size())
return false;
@@ -55,17 +55,17 @@ bool MovingAverage<T>::GetAverage(size_t num_samples, T* avg) {
return true;
}
-template<class T>
+template <class T>
void MovingAverage<T>::Reset() {
sum_ = static_cast<T>(0);
samples_.clear();
}
-template<class T>
+template <class T>
int MovingAverage<T>::size() {
return samples_.size();
}
} // namespace webrtc
-#endif // WEBRTC_MODULES_VIDEO_CODING_MOVING_AVERAGE_SCALER_H_
+#endif // WEBRTC_MODULES_VIDEO_CODING_UTILITY_MOVING_AVERAGE_H_
diff --git a/webrtc/modules/video_coding/utility/qp_parser.cc b/webrtc/modules/video_coding/utility/qp_parser.cc
index 62ce31351e..0916cb0094 100644
--- a/webrtc/modules/video_coding/utility/qp_parser.cc
+++ b/webrtc/modules/video_coding/utility/qp_parser.cc
@@ -8,10 +8,10 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#include "webrtc/modules/video_coding/utility/include/qp_parser.h"
+#include "webrtc/modules/video_coding/utility/qp_parser.h"
#include "webrtc/common_types.h"
-#include "webrtc/modules/video_coding/utility/include/vp8_header_parser.h"
+#include "webrtc/modules/video_coding/utility/vp8_header_parser.h"
namespace webrtc {
diff --git a/webrtc/modules/video_coding/utility/include/qp_parser.h b/webrtc/modules/video_coding/utility/qp_parser.h
index 805b37b45c..0b644ef61c 100644
--- a/webrtc/modules/video_coding/utility/include/qp_parser.h
+++ b/webrtc/modules/video_coding/utility/qp_parser.h
@@ -11,7 +11,7 @@
#ifndef WEBRTC_MODULES_VIDEO_CODING_UTILITY_QP_PARSER_H_
#define WEBRTC_MODULES_VIDEO_CODING_UTILITY_QP_PARSER_H_
-#include "webrtc/modules/video_coding/main/source/encoded_frame.h"
+#include "webrtc/modules/video_coding/encoded_frame.h"
namespace webrtc {
diff --git a/webrtc/modules/video_coding/utility/quality_scaler.cc b/webrtc/modules/video_coding/utility/quality_scaler.cc
index ec7715230e..76bf9f5b03 100644
--- a/webrtc/modules/video_coding/utility/quality_scaler.cc
+++ b/webrtc/modules/video_coding/utility/quality_scaler.cc
@@ -7,7 +7,7 @@
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
-#include "webrtc/modules/video_coding/utility/include/quality_scaler.h"
+#include "webrtc/modules/video_coding/utility/quality_scaler.h"
namespace webrtc {
@@ -26,8 +26,7 @@ QualityScaler::QualityScaler()
downscale_shift_(0),
framerate_down_(false),
min_width_(kDefaultMinDownscaleDimension),
- min_height_(kDefaultMinDownscaleDimension) {
-}
+ min_height_(kDefaultMinDownscaleDimension) {}
void QualityScaler::Init(int low_qp_threshold,
int high_qp_threshold,
@@ -91,7 +90,7 @@ void QualityScaler::OnEncodeFrame(const VideoFrame& frame) {
AdjustScale(false);
}
} else if (average_qp_.GetAverage(num_samples_, &avg_qp) &&
- avg_qp <= low_qp_threshold_) {
+ avg_qp <= low_qp_threshold_) {
if (use_framerate_reduction_ && framerate_down_) {
target_framerate_ = -1;
framerate_down_ = false;
@@ -104,7 +103,7 @@ void QualityScaler::OnEncodeFrame(const VideoFrame& frame) {
assert(downscale_shift_ >= 0);
for (int shift = downscale_shift_;
shift > 0 && (res_.width / 2 >= min_width_) &&
- (res_.height / 2 >= min_height_);
+ (res_.height / 2 >= min_height_);
--shift) {
res_.width /= 2;
res_.height /= 2;
@@ -124,13 +123,8 @@ const VideoFrame& QualityScaler::GetScaledFrame(const VideoFrame& frame) {
if (res.width == frame.width())
return frame;
- scaler_.Set(frame.width(),
- frame.height(),
- res.width,
- res.height,
- kI420,
- kI420,
- kScaleBox);
+ scaler_.Set(frame.width(), frame.height(), res.width, res.height, kI420,
+ kI420, kScaleBox);
if (scaler_.Scale(frame, &scaled_frame_) != 0)
return frame;
diff --git a/webrtc/modules/video_coding/utility/include/quality_scaler.h b/webrtc/modules/video_coding/utility/quality_scaler.h
index 29a1496c05..a1233cca51 100644
--- a/webrtc/modules/video_coding/utility/include/quality_scaler.h
+++ b/webrtc/modules/video_coding/utility/quality_scaler.h
@@ -12,7 +12,7 @@
#define WEBRTC_MODULES_VIDEO_CODING_UTILITY_QUALITY_SCALER_H_
#include "webrtc/common_video/libyuv/include/scaler.h"
-#include "webrtc/modules/video_coding/utility/include/moving_average.h"
+#include "webrtc/modules/video_coding/utility/moving_average.h"
namespace webrtc {
class QualityScaler {
diff --git a/webrtc/modules/video_coding/utility/quality_scaler_unittest.cc b/webrtc/modules/video_coding/utility/quality_scaler_unittest.cc
index 2ce1107472..bad73a748c 100644
--- a/webrtc/modules/video_coding/utility/quality_scaler_unittest.cc
+++ b/webrtc/modules/video_coding/utility/quality_scaler_unittest.cc
@@ -8,7 +8,7 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#include "webrtc/modules/video_coding/utility/include/quality_scaler.h"
+#include "webrtc/modules/video_coding/utility/quality_scaler.h"
#include "testing/gtest/include/gtest/gtest.h"
@@ -33,6 +33,7 @@ class QualityScalerTest : public ::testing::Test {
int width;
int height;
};
+
protected:
enum ScaleDirection {
kKeepScaleAtHighQp,
@@ -43,8 +44,8 @@ class QualityScalerTest : public ::testing::Test {
enum BadQualityMetric { kDropFrame, kReportLowQP };
QualityScalerTest() {
- input_frame_.CreateEmptyFrame(
- kWidth, kHeight, kWidth, kHalfWidth, kHalfWidth);
+ input_frame_.CreateEmptyFrame(kWidth, kHeight, kWidth, kHalfWidth,
+ kHalfWidth);
qs_.Init(kMaxQp / QualityScaler::kDefaultLowQpDenominator, kHighQp, false);
qs_.ReportFramerate(kFramerate);
qs_.OnEncodeFrame(input_frame_);
@@ -97,7 +98,8 @@ class QualityScalerTest : public ::testing::Test {
int num_second,
int initial_framerate);
- void VerifyQualityAdaptation(int initial_framerate, int seconds,
+ void VerifyQualityAdaptation(int initial_framerate,
+ int seconds,
bool expect_spatial_resize,
bool expect_framerate_reduction);
@@ -183,8 +185,8 @@ TEST_F(QualityScalerTest, DoesNotDownscaleAfterHalfFramedrop) {
void QualityScalerTest::ContinuouslyDownscalesByHalfDimensionsAndBackUp() {
const int initial_min_dimension = input_frame_.width() < input_frame_.height()
- ? input_frame_.width()
- : input_frame_.height();
+ ? input_frame_.width()
+ : input_frame_.height();
int min_dimension = initial_min_dimension;
int current_shift = 0;
// Drop all frames to force-trigger downscaling.
@@ -229,14 +231,14 @@ TEST_F(QualityScalerTest,
const int kOddWidth = 517;
const int kHalfOddWidth = (kOddWidth + 1) / 2;
const int kOddHeight = 1239;
- input_frame_.CreateEmptyFrame(
- kOddWidth, kOddHeight, kOddWidth, kHalfOddWidth, kHalfOddWidth);
+ input_frame_.CreateEmptyFrame(kOddWidth, kOddHeight, kOddWidth, kHalfOddWidth,
+ kHalfOddWidth);
ContinuouslyDownscalesByHalfDimensionsAndBackUp();
}
void QualityScalerTest::DoesNotDownscaleFrameDimensions(int width, int height) {
- input_frame_.CreateEmptyFrame(
- width, height, width, (width + 1) / 2, (width + 1) / 2);
+ input_frame_.CreateEmptyFrame(width, height, width, (width + 1) / 2,
+ (width + 1) / 2);
for (int i = 0; i < kFramerate * kNumSeconds; ++i) {
qs_.ReportDroppedFrame();
@@ -259,7 +261,9 @@ TEST_F(QualityScalerTest, DoesNotDownscaleFrom1Px) {
}
QualityScalerTest::Resolution QualityScalerTest::TriggerResolutionChange(
- BadQualityMetric dropframe_lowqp, int num_second, int initial_framerate) {
+ BadQualityMetric dropframe_lowqp,
+ int num_second,
+ int initial_framerate) {
QualityScalerTest::Resolution res;
res.framerate = initial_framerate;
qs_.OnEncodeFrame(input_frame_);
@@ -288,7 +292,9 @@ QualityScalerTest::Resolution QualityScalerTest::TriggerResolutionChange(
}
void QualityScalerTest::VerifyQualityAdaptation(
- int initial_framerate, int seconds, bool expect_spatial_resize,
+ int initial_framerate,
+ int seconds,
+ bool expect_spatial_resize,
bool expect_framerate_reduction) {
const int kDisabledBadQpThreshold = kMaxQp + 1;
qs_.Init(kMaxQp / QualityScaler::kDefaultLowQpDenominator,
@@ -298,8 +304,8 @@ void QualityScalerTest::VerifyQualityAdaptation(
int init_height = qs_.GetScaledResolution().height;
// Test reducing framerate by dropping frame continuously.
- QualityScalerTest::Resolution res = TriggerResolutionChange(
- kDropFrame, seconds, initial_framerate);
+ QualityScalerTest::Resolution res =
+ TriggerResolutionChange(kDropFrame, seconds, initial_framerate);
if (expect_framerate_reduction) {
EXPECT_LT(res.framerate, initial_framerate);
diff --git a/webrtc/modules/video_coding/utility/video_coding_utility.gyp b/webrtc/modules/video_coding/utility/video_coding_utility.gyp
index f0764bb7bf..42cbb3d4e0 100644
--- a/webrtc/modules/video_coding/utility/video_coding_utility.gyp
+++ b/webrtc/modules/video_coding/utility/video_coding_utility.gyp
@@ -19,14 +19,14 @@
],
'sources': [
'frame_dropper.cc',
- 'include/frame_dropper.h',
- 'include/moving_average.h',
- 'include/qp_parser.h',
- 'include/quality_scaler.h',
- 'include/vp8_header_parser.h',
+ 'frame_dropper.h',
+ 'moving_average.h',
'qp_parser.cc',
+ 'qp_parser.h',
'quality_scaler.cc',
+ 'quality_scaler.h',
'vp8_header_parser.cc',
+ 'vp8_header_parser.h',
],
},
], # targets
diff --git a/webrtc/modules/video_coding/utility/vp8_header_parser.cc b/webrtc/modules/video_coding/utility/vp8_header_parser.cc
index dc5a0e5d15..631385d0f2 100644
--- a/webrtc/modules/video_coding/utility/vp8_header_parser.cc
+++ b/webrtc/modules/video_coding/utility/vp8_header_parser.cc
@@ -7,12 +7,9 @@
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
-#include <stdint.h>
-#include <stdio.h>
+#include "webrtc/modules/video_coding/utility/vp8_header_parser.h"
-#include "webrtc/modules/video_coding/utility/include/vp8_header_parser.h"
-
-#include "webrtc/system_wrappers/include/logging.h"
+#include "webrtc/base/logging.h"
namespace webrtc {
@@ -46,12 +43,12 @@ static void VP8LoadNewBytes(VP8BitReader* const br) {
const uint32_t in_bits = *(const uint32_t*)(br->buf_);
br->buf_ += BITS >> 3;
#if defined(WEBRTC_ARCH_BIG_ENDIAN)
- bits = static_cast<uint32_t>(in_bits);
- if (BITS != 8 * sizeof(uint32_t))
- bits >>= (8 * sizeof(uint32_t) - BITS);
+ bits = static_cast<uint32_t>(in_bits);
+ if (BITS != 8 * sizeof(uint32_t))
+ bits >>= (8 * sizeof(uint32_t) - BITS);
#else
- bits = BSwap32(in_bits);
- bits >>= 32 - BITS;
+ bits = BSwap32(in_bits);
+ bits >>= 32 - BITS;
#endif
br->value_ = bits | (br->value_ << BITS);
br->bits_ += BITS;
@@ -63,12 +60,12 @@ static void VP8LoadNewBytes(VP8BitReader* const br) {
static void VP8InitBitReader(VP8BitReader* const br,
const uint8_t* const start,
const uint8_t* const end) {
- br->range_ = 255 - 1;
- br->buf_ = start;
+ br->range_ = 255 - 1;
+ br->buf_ = start;
br->buf_end_ = end;
- br->value_ = 0;
- br->bits_ = -8; // To load the very first 8bits.
- br->eof_ = 0;
+ br->value_ = 0;
+ br->bits_ = -8; // To load the very first 8bits.
+ br->eof_ = 0;
VP8LoadNewBytes(br);
}
@@ -125,7 +122,7 @@ static void ParseSegmentHeader(VP8BitReader* br) {
int s;
VP8Get(br);
for (s = 0; s < NUM_MB_SEGMENTS; ++s) {
- VP8Get(br) ? VP8GetSignedValue(br, 7) : 0;
+ VP8Get(br) ? VP8GetSignedValue(br, 7) : 0;
}
for (s = 0; s < NUM_MB_SEGMENTS; ++s) {
VP8Get(br) ? VP8GetSignedValue(br, 6) : 0;
diff --git a/webrtc/modules/video_coding/utility/vp8_header_parser.h b/webrtc/modules/video_coding/utility/vp8_header_parser.h
new file mode 100644
index 0000000000..b0c684c578
--- /dev/null
+++ b/webrtc/modules/video_coding/utility/vp8_header_parser.h
@@ -0,0 +1,68 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef WEBRTC_MODULES_VIDEO_CODING_UTILITY_VP8_HEADER_PARSER_H_
+#define WEBRTC_MODULES_VIDEO_CODING_UTILITY_VP8_HEADER_PARSER_H_
+
+#include <stdint.h>
+#include <stdio.h>
+
+namespace webrtc {
+
+namespace vp8 {
+
+enum {
+ MB_FEATURE_TREE_PROBS = 3,
+ NUM_MB_SEGMENTS = 4,
+ NUM_REF_LF_DELTAS = 4,
+ NUM_MODE_LF_DELTAS = 4,
+};
+
+typedef struct VP8BitReader VP8BitReader;
+struct VP8BitReader {
+ // Boolean decoder.
+ uint32_t value_; // Current value.
+ uint32_t range_; // Current range minus 1. In [127, 254] interval.
+ int bits_; // Number of valid bits left.
+ // Read buffer.
+ const uint8_t* buf_; // Next byte to be read.
+ const uint8_t* buf_end_; // End of read buffer.
+ int eof_; // True if input is exhausted.
+};
+
+const uint8_t kVP8Log2Range[128] = {
+ 7, 6, 6, 5, 5, 5, 5, 4, 4, 4, 4, 4, 4, 4, 4, 3, 3, 3, 3, 3, 3, 3,
+ 3, 3, 3, 3, 3, 3, 3, 3, 3, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0};
+
+// range = ((range - 1) << kVP8Log2Range[range]) + 1
+const uint8_t kVP8NewRange[128] = {
+ 127, 127, 191, 127, 159, 191, 223, 127, 143, 159, 175, 191, 207, 223, 239,
+ 127, 135, 143, 151, 159, 167, 175, 183, 191, 199, 207, 215, 223, 231, 239,
+ 247, 127, 131, 135, 139, 143, 147, 151, 155, 159, 163, 167, 171, 175, 179,
+ 183, 187, 191, 195, 199, 203, 207, 211, 215, 219, 223, 227, 231, 235, 239,
+ 243, 247, 251, 127, 129, 131, 133, 135, 137, 139, 141, 143, 145, 147, 149,
+ 151, 153, 155, 157, 159, 161, 163, 165, 167, 169, 171, 173, 175, 177, 179,
+ 181, 183, 185, 187, 189, 191, 193, 195, 197, 199, 201, 203, 205, 207, 209,
+ 211, 213, 215, 217, 219, 221, 223, 225, 227, 229, 231, 233, 235, 237, 239,
+ 241, 243, 245, 247, 249, 251, 253, 127};
+
+// Gets the QP, QP range: [0, 127].
+// Returns true on success, false otherwise.
+bool GetQp(const uint8_t* buf, size_t length, int* qp);
+
+} // namespace vp8
+
+} // namespace webrtc
+
+#endif // WEBRTC_MODULES_VIDEO_CODING_UTILITY_VP8_HEADER_PARSER_H_
diff --git a/webrtc/modules/video_coding/video_coding.gypi b/webrtc/modules/video_coding/video_coding.gypi
index b292e0ae31..438d8f1c1f 100644
--- a/webrtc/modules/video_coding/video_coding.gypi
+++ b/webrtc/modules/video_coding/video_coding.gypi
@@ -22,61 +22,61 @@
],
'sources': [
# interfaces
- 'main/interface/video_coding.h',
- 'main/interface/video_coding_defines.h',
+ 'include/video_coding.h',
+ 'include/video_coding_defines.h',
# headers
- 'main/source/codec_database.h',
- 'main/source/codec_timer.h',
- 'main/source/content_metrics_processing.h',
- 'main/source/decoding_state.h',
- 'main/source/encoded_frame.h',
- 'main/source/fec_tables_xor.h',
- 'main/source/frame_buffer.h',
- 'main/source/generic_decoder.h',
- 'main/source/generic_encoder.h',
- 'main/source/inter_frame_delay.h',
- 'main/source/internal_defines.h',
- 'main/source/jitter_buffer.h',
- 'main/source/jitter_buffer_common.h',
- 'main/source/jitter_estimator.h',
- 'main/source/media_opt_util.h',
- 'main/source/media_optimization.h',
- 'main/source/nack_fec_tables.h',
- 'main/source/packet.h',
- 'main/source/qm_select_data.h',
- 'main/source/qm_select.h',
- 'main/source/receiver.h',
- 'main/source/rtt_filter.h',
- 'main/source/session_info.h',
- 'main/source/timestamp_map.h',
- 'main/source/timing.h',
- 'main/source/video_coding_impl.h',
+ 'codec_database.h',
+ 'codec_timer.h',
+ 'content_metrics_processing.h',
+ 'decoding_state.h',
+ 'encoded_frame.h',
+ 'fec_tables_xor.h',
+ 'frame_buffer.h',
+ 'generic_decoder.h',
+ 'generic_encoder.h',
+ 'inter_frame_delay.h',
+ 'internal_defines.h',
+ 'jitter_buffer.h',
+ 'jitter_buffer_common.h',
+ 'jitter_estimator.h',
+ 'media_opt_util.h',
+ 'media_optimization.h',
+ 'nack_fec_tables.h',
+ 'packet.h',
+ 'qm_select_data.h',
+ 'qm_select.h',
+ 'receiver.h',
+ 'rtt_filter.h',
+ 'session_info.h',
+ 'timestamp_map.h',
+ 'timing.h',
+ 'video_coding_impl.h',
# sources
- 'main/source/codec_database.cc',
- 'main/source/codec_timer.cc',
- 'main/source/content_metrics_processing.cc',
- 'main/source/decoding_state.cc',
- 'main/source/encoded_frame.cc',
- 'main/source/frame_buffer.cc',
- 'main/source/generic_decoder.cc',
- 'main/source/generic_encoder.cc',
- 'main/source/inter_frame_delay.cc',
- 'main/source/jitter_buffer.cc',
- 'main/source/jitter_estimator.cc',
- 'main/source/media_opt_util.cc',
- 'main/source/media_optimization.cc',
- 'main/source/packet.cc',
- 'main/source/qm_select.cc',
- 'main/source/receiver.cc',
- 'main/source/rtt_filter.cc',
- 'main/source/session_info.cc',
- 'main/source/timestamp_map.cc',
- 'main/source/timing.cc',
- 'main/source/video_coding_impl.cc',
- 'main/source/video_sender.cc',
- 'main/source/video_receiver.cc',
+ 'codec_database.cc',
+ 'codec_timer.cc',
+ 'content_metrics_processing.cc',
+ 'decoding_state.cc',
+ 'encoded_frame.cc',
+ 'frame_buffer.cc',
+ 'generic_decoder.cc',
+ 'generic_encoder.cc',
+ 'inter_frame_delay.cc',
+ 'jitter_buffer.cc',
+ 'jitter_estimator.cc',
+ 'media_opt_util.cc',
+ 'media_optimization.cc',
+ 'packet.cc',
+ 'qm_select.cc',
+ 'receiver.cc',
+ 'rtt_filter.cc',
+ 'session_info.cc',
+ 'timestamp_map.cc',
+ 'timing.cc',
+ 'video_coding_impl.cc',
+ 'video_sender.cc',
+ 'video_receiver.cc',
], # source
# TODO(jschuh): Bug 1348: fix size_t to int truncations.
'msvs_disabled_warnings': [ 4267, ],
diff --git a/webrtc/modules/video_coding/main/source/video_coding_impl.cc b/webrtc/modules/video_coding/video_coding_impl.cc
index b0a6754cbd..1e26a7e243 100644
--- a/webrtc/modules/video_coding/main/source/video_coding_impl.cc
+++ b/webrtc/modules/video_coding/video_coding_impl.cc
@@ -8,33 +8,33 @@
* be found in the AUTHORS file in the root of the source tree.
*/
+#include "webrtc/modules/video_coding/video_coding_impl.h"
+
+#include <algorithm>
+
#include "webrtc/common_types.h"
#include "webrtc/common_video/libyuv/include/webrtc_libyuv.h"
-#include "webrtc/modules/video_coding/codecs/interface/video_codec_interface.h"
-#include "webrtc/modules/video_coding/main/source/encoded_frame.h"
-#include "webrtc/modules/video_coding/main/source/jitter_buffer.h"
-#include "webrtc/modules/video_coding/main/source/packet.h"
-#include "webrtc/modules/video_coding/main/source/video_coding_impl.h"
+#include "webrtc/modules/video_coding/include/video_codec_interface.h"
+#include "webrtc/modules/video_coding/encoded_frame.h"
+#include "webrtc/modules/video_coding/jitter_buffer.h"
+#include "webrtc/modules/video_coding/packet.h"
#include "webrtc/system_wrappers/include/clock.h"
namespace webrtc {
namespace vcm {
-int64_t
-VCMProcessTimer::Period() const {
- return _periodMs;
+int64_t VCMProcessTimer::Period() const {
+ return _periodMs;
}
-int64_t
-VCMProcessTimer::TimeUntilProcess() const {
- const int64_t time_since_process = _clock->TimeInMilliseconds() - _latestMs;
- const int64_t time_until_process = _periodMs - time_since_process;
- return std::max<int64_t>(time_until_process, 0);
+int64_t VCMProcessTimer::TimeUntilProcess() const {
+ const int64_t time_since_process = _clock->TimeInMilliseconds() - _latestMs;
+ const int64_t time_until_process = _periodMs - time_since_process;
+ return std::max<int64_t>(time_until_process, 0);
}
-void
-VCMProcessTimer::Processed() {
- _latestMs = _clock->TimeInMilliseconds();
+void VCMProcessTimer::Processed() {
+ _latestMs = _clock->TimeInMilliseconds();
}
} // namespace vcm
@@ -59,8 +59,8 @@ class EncodedImageCallbackWrapper : public EncodedImageCallback {
const RTPFragmentationHeader* fragmentation) {
CriticalSectionScoped cs(cs_.get());
if (callback_)
- return callback_->Encoded(
- encoded_image, codec_specific_info, fragmentation);
+ return callback_->Encoded(encoded_image, codec_specific_info,
+ fragmentation);
return 0;
}
@@ -77,30 +77,26 @@ class VideoCodingModuleImpl : public VideoCodingModule {
VideoEncoderRateObserver* encoder_rate_observer,
VCMQMSettingsCallback* qm_settings_callback)
: VideoCodingModule(),
- sender_(new vcm::VideoSender(clock,
- &post_encode_callback_,
- encoder_rate_observer,
- qm_settings_callback)),
- receiver_(new vcm::VideoReceiver(clock, event_factory)),
+ sender_(clock,
+ &post_encode_callback_,
+ encoder_rate_observer,
+ qm_settings_callback),
+ receiver_(clock, event_factory),
own_event_factory_(owns_event_factory ? event_factory : NULL) {}
- virtual ~VideoCodingModuleImpl() {
- sender_.reset();
- receiver_.reset();
- own_event_factory_.reset();
- }
+ virtual ~VideoCodingModuleImpl() { own_event_factory_.reset(); }
int64_t TimeUntilNextProcess() override {
- int64_t sender_time = sender_->TimeUntilNextProcess();
- int64_t receiver_time = receiver_->TimeUntilNextProcess();
+ int64_t sender_time = sender_.TimeUntilNextProcess();
+ int64_t receiver_time = receiver_.TimeUntilNextProcess();
assert(sender_time >= 0);
assert(receiver_time >= 0);
return VCM_MIN(sender_time, receiver_time);
}
int32_t Process() override {
- int32_t sender_return = sender_->Process();
- int32_t receiver_return = receiver_->Process();
+ int32_t sender_return = sender_.Process();
+ int32_t receiver_return = receiver_.Process();
if (sender_return != VCM_OK)
return sender_return;
return receiver_return;
@@ -109,192 +105,176 @@ class VideoCodingModuleImpl : public VideoCodingModule {
int32_t RegisterSendCodec(const VideoCodec* sendCodec,
uint32_t numberOfCores,
uint32_t maxPayloadSize) override {
- return sender_->RegisterSendCodec(sendCodec, numberOfCores, maxPayloadSize);
- }
-
- const VideoCodec& GetSendCodec() const override {
- return sender_->GetSendCodec();
- }
-
- // DEPRECATED.
- int32_t SendCodec(VideoCodec* currentSendCodec) const override {
- return sender_->SendCodecBlocking(currentSendCodec);
- }
-
- // DEPRECATED.
- VideoCodecType SendCodec() const override {
- return sender_->SendCodecBlocking();
+ return sender_.RegisterSendCodec(sendCodec, numberOfCores, maxPayloadSize);
}
int32_t RegisterExternalEncoder(VideoEncoder* externalEncoder,
uint8_t payloadType,
bool internalSource) override {
- return sender_->RegisterExternalEncoder(
- externalEncoder, payloadType, internalSource);
+ sender_.RegisterExternalEncoder(externalEncoder, payloadType,
+ internalSource);
+ return 0;
}
int Bitrate(unsigned int* bitrate) const override {
- return sender_->Bitrate(bitrate);
+ return sender_.Bitrate(bitrate);
}
int FrameRate(unsigned int* framerate) const override {
- return sender_->FrameRate(framerate);
+ return sender_.FrameRate(framerate);
}
int32_t SetChannelParameters(uint32_t target_bitrate, // bits/s.
uint8_t lossRate,
int64_t rtt) override {
- return sender_->SetChannelParameters(target_bitrate, lossRate, rtt);
+ return sender_.SetChannelParameters(target_bitrate, lossRate, rtt);
}
int32_t RegisterTransportCallback(
VCMPacketizationCallback* transport) override {
- return sender_->RegisterTransportCallback(transport);
+ return sender_.RegisterTransportCallback(transport);
}
int32_t RegisterSendStatisticsCallback(
VCMSendStatisticsCallback* sendStats) override {
- return sender_->RegisterSendStatisticsCallback(sendStats);
+ return sender_.RegisterSendStatisticsCallback(sendStats);
}
int32_t RegisterProtectionCallback(
VCMProtectionCallback* protection) override {
- return sender_->RegisterProtectionCallback(protection);
+ return sender_.RegisterProtectionCallback(protection);
}
int32_t SetVideoProtection(VCMVideoProtection videoProtection,
bool enable) override {
// TODO(pbos): Remove enable from receive-side protection modes as well.
if (enable)
- sender_->SetVideoProtection(videoProtection);
- return receiver_->SetVideoProtection(videoProtection, enable);
+ sender_.SetVideoProtection(videoProtection);
+ return receiver_.SetVideoProtection(videoProtection, enable);
}
int32_t AddVideoFrame(const VideoFrame& videoFrame,
const VideoContentMetrics* contentMetrics,
const CodecSpecificInfo* codecSpecificInfo) override {
- return sender_->AddVideoFrame(
- videoFrame, contentMetrics, codecSpecificInfo);
+ return sender_.AddVideoFrame(videoFrame, contentMetrics, codecSpecificInfo);
}
int32_t IntraFrameRequest(int stream_index) override {
- return sender_->IntraFrameRequest(stream_index);
+ return sender_.IntraFrameRequest(stream_index);
}
int32_t EnableFrameDropper(bool enable) override {
- return sender_->EnableFrameDropper(enable);
+ return sender_.EnableFrameDropper(enable);
}
void SuspendBelowMinBitrate() override {
- return sender_->SuspendBelowMinBitrate();
+ return sender_.SuspendBelowMinBitrate();
}
- bool VideoSuspended() const override { return sender_->VideoSuspended(); }
+ bool VideoSuspended() const override { return sender_.VideoSuspended(); }
int32_t RegisterReceiveCodec(const VideoCodec* receiveCodec,
int32_t numberOfCores,
bool requireKeyFrame) override {
- return receiver_->RegisterReceiveCodec(
- receiveCodec, numberOfCores, requireKeyFrame);
+ return receiver_.RegisterReceiveCodec(receiveCodec, numberOfCores,
+ requireKeyFrame);
}
- int32_t RegisterExternalDecoder(VideoDecoder* externalDecoder,
- uint8_t payloadType,
- bool internalRenderTiming) override {
- return receiver_->RegisterExternalDecoder(
- externalDecoder, payloadType, internalRenderTiming);
+ void RegisterExternalDecoder(VideoDecoder* externalDecoder,
+ uint8_t payloadType) override {
+ receiver_.RegisterExternalDecoder(externalDecoder, payloadType);
}
int32_t RegisterReceiveCallback(
VCMReceiveCallback* receiveCallback) override {
- return receiver_->RegisterReceiveCallback(receiveCallback);
+ return receiver_.RegisterReceiveCallback(receiveCallback);
}
int32_t RegisterReceiveStatisticsCallback(
VCMReceiveStatisticsCallback* receiveStats) override {
- return receiver_->RegisterReceiveStatisticsCallback(receiveStats);
+ return receiver_.RegisterReceiveStatisticsCallback(receiveStats);
}
int32_t RegisterDecoderTimingCallback(
VCMDecoderTimingCallback* decoderTiming) override {
- return receiver_->RegisterDecoderTimingCallback(decoderTiming);
+ return receiver_.RegisterDecoderTimingCallback(decoderTiming);
}
int32_t RegisterFrameTypeCallback(
VCMFrameTypeCallback* frameTypeCallback) override {
- return receiver_->RegisterFrameTypeCallback(frameTypeCallback);
+ return receiver_.RegisterFrameTypeCallback(frameTypeCallback);
}
int32_t RegisterPacketRequestCallback(
VCMPacketRequestCallback* callback) override {
- return receiver_->RegisterPacketRequestCallback(callback);
+ return receiver_.RegisterPacketRequestCallback(callback);
}
int RegisterRenderBufferSizeCallback(
VCMRenderBufferSizeCallback* callback) override {
- return receiver_->RegisterRenderBufferSizeCallback(callback);
+ return receiver_.RegisterRenderBufferSizeCallback(callback);
}
int32_t Decode(uint16_t maxWaitTimeMs) override {
- return receiver_->Decode(maxWaitTimeMs);
+ return receiver_.Decode(maxWaitTimeMs);
}
- int32_t ResetDecoder() override { return receiver_->ResetDecoder(); }
+ int32_t ResetDecoder() override { return receiver_.ResetDecoder(); }
int32_t ReceiveCodec(VideoCodec* currentReceiveCodec) const override {
- return receiver_->ReceiveCodec(currentReceiveCodec);
+ return receiver_.ReceiveCodec(currentReceiveCodec);
}
VideoCodecType ReceiveCodec() const override {
- return receiver_->ReceiveCodec();
+ return receiver_.ReceiveCodec();
}
int32_t IncomingPacket(const uint8_t* incomingPayload,
size_t payloadLength,
const WebRtcRTPHeader& rtpInfo) override {
- return receiver_->IncomingPacket(incomingPayload, payloadLength, rtpInfo);
+ return receiver_.IncomingPacket(incomingPayload, payloadLength, rtpInfo);
}
int32_t SetMinimumPlayoutDelay(uint32_t minPlayoutDelayMs) override {
- return receiver_->SetMinimumPlayoutDelay(minPlayoutDelayMs);
+ return receiver_.SetMinimumPlayoutDelay(minPlayoutDelayMs);
}
int32_t SetRenderDelay(uint32_t timeMS) override {
- return receiver_->SetRenderDelay(timeMS);
+ return receiver_.SetRenderDelay(timeMS);
}
- int32_t Delay() const override { return receiver_->Delay(); }
+ int32_t Delay() const override { return receiver_.Delay(); }
uint32_t DiscardedPackets() const override {
- return receiver_->DiscardedPackets();
+ return receiver_.DiscardedPackets();
}
int SetReceiverRobustnessMode(ReceiverRobustness robustnessMode,
VCMDecodeErrorMode errorMode) override {
- return receiver_->SetReceiverRobustnessMode(robustnessMode, errorMode);
+ return receiver_.SetReceiverRobustnessMode(robustnessMode, errorMode);
}
void SetNackSettings(size_t max_nack_list_size,
int max_packet_age_to_nack,
int max_incomplete_time_ms) override {
- return receiver_->SetNackSettings(
- max_nack_list_size, max_packet_age_to_nack, max_incomplete_time_ms);
+ return receiver_.SetNackSettings(max_nack_list_size, max_packet_age_to_nack,
+ max_incomplete_time_ms);
}
void SetDecodeErrorMode(VCMDecodeErrorMode decode_error_mode) override {
- return receiver_->SetDecodeErrorMode(decode_error_mode);
+ return receiver_.SetDecodeErrorMode(decode_error_mode);
}
int SetMinReceiverDelay(int desired_delay_ms) override {
- return receiver_->SetMinReceiverDelay(desired_delay_ms);
+ return receiver_.SetMinReceiverDelay(desired_delay_ms);
}
int32_t SetReceiveChannelParameters(int64_t rtt) override {
- return receiver_->SetReceiveChannelParameters(rtt);
+ return receiver_.SetReceiveChannelParameters(rtt);
}
void RegisterPreDecodeImageCallback(EncodedImageCallback* observer) override {
- receiver_->RegisterPreDecodeImageCallback(observer);
+ receiver_.RegisterPreDecodeImageCallback(observer);
}
void RegisterPostEncodeImageCallback(
@@ -302,36 +282,18 @@ class VideoCodingModuleImpl : public VideoCodingModule {
post_encode_callback_.Register(observer);
}
- void TriggerDecoderShutdown() override {
- receiver_->TriggerDecoderShutdown();
- }
+ void TriggerDecoderShutdown() override { receiver_.TriggerDecoderShutdown(); }
private:
EncodedImageCallbackWrapper post_encode_callback_;
- // TODO(tommi): Change sender_ and receiver_ to be non pointers
- // (construction is 1 alloc instead of 3).
- rtc::scoped_ptr<vcm::VideoSender> sender_;
- rtc::scoped_ptr<vcm::VideoReceiver> receiver_;
+ vcm::VideoSender sender_;
+ vcm::VideoReceiver receiver_;
rtc::scoped_ptr<EventFactory> own_event_factory_;
};
} // namespace
-uint8_t VideoCodingModule::NumberOfCodecs() {
- return VCMCodecDataBase::NumberOfCodecs();
-}
-
-int32_t VideoCodingModule::Codec(uint8_t listId, VideoCodec* codec) {
- if (codec == NULL) {
- return VCM_PARAMETER_ERROR;
- }
- return VCMCodecDataBase::Codec(listId, codec) ? 0 : -1;
-}
-
-int32_t VideoCodingModule::Codec(VideoCodecType codecType, VideoCodec* codec) {
- if (codec == NULL) {
- return VCM_PARAMETER_ERROR;
- }
- return VCMCodecDataBase::Codec(codecType, codec) ? 0 : -1;
+void VideoCodingModule::Codec(VideoCodecType codecType, VideoCodec* codec) {
+ VCMCodecDataBase::Codec(codecType, codec);
}
VideoCodingModule* VideoCodingModule::Create(
@@ -342,9 +304,8 @@ VideoCodingModule* VideoCodingModule::Create(
encoder_rate_observer, qm_settings_callback);
}
-VideoCodingModule* VideoCodingModule::Create(
- Clock* clock,
- EventFactory* event_factory) {
+VideoCodingModule* VideoCodingModule::Create(Clock* clock,
+ EventFactory* event_factory) {
assert(clock);
assert(event_factory);
return new VideoCodingModuleImpl(clock, event_factory, false, nullptr,
diff --git a/webrtc/modules/video_coding/main/source/video_coding_impl.h b/webrtc/modules/video_coding/video_coding_impl.h
index 57f38dad13..f105fa9c18 100644
--- a/webrtc/modules/video_coding/main/source/video_coding_impl.h
+++ b/webrtc/modules/video_coding/video_coding_impl.h
@@ -11,21 +11,21 @@
#ifndef WEBRTC_MODULES_VIDEO_CODING_VIDEO_CODING_IMPL_H_
#define WEBRTC_MODULES_VIDEO_CODING_VIDEO_CODING_IMPL_H_
-#include "webrtc/modules/video_coding/main/interface/video_coding.h"
+#include "webrtc/modules/video_coding/include/video_coding.h"
#include <vector>
#include "webrtc/base/thread_annotations.h"
#include "webrtc/base/thread_checker.h"
-#include "webrtc/modules/video_coding/main/source/codec_database.h"
-#include "webrtc/modules/video_coding/main/source/frame_buffer.h"
-#include "webrtc/modules/video_coding/main/source/generic_decoder.h"
-#include "webrtc/modules/video_coding/main/source/generic_encoder.h"
-#include "webrtc/modules/video_coding/main/source/jitter_buffer.h"
-#include "webrtc/modules/video_coding/main/source/media_optimization.h"
-#include "webrtc/modules/video_coding/main/source/receiver.h"
-#include "webrtc/modules/video_coding/main/source/timing.h"
-#include "webrtc/modules/video_coding/utility/include/qp_parser.h"
+#include "webrtc/modules/video_coding/codec_database.h"
+#include "webrtc/modules/video_coding/frame_buffer.h"
+#include "webrtc/modules/video_coding/generic_decoder.h"
+#include "webrtc/modules/video_coding/generic_encoder.h"
+#include "webrtc/modules/video_coding/jitter_buffer.h"
+#include "webrtc/modules/video_coding/media_optimization.h"
+#include "webrtc/modules/video_coding/receiver.h"
+#include "webrtc/modules/video_coding/timing.h"
+#include "webrtc/modules/video_coding/utility/qp_parser.h"
#include "webrtc/system_wrappers/include/clock.h"
#include "webrtc/system_wrappers/include/critical_section_wrapper.h"
@@ -67,24 +67,10 @@ class VideoSender {
int32_t RegisterSendCodec(const VideoCodec* sendCodec,
uint32_t numberOfCores,
uint32_t maxPayloadSize);
- // Non-blocking access to the currently active send codec configuration.
- // Must be called from the same thread as the VideoSender instance was
- // created on.
- const VideoCodec& GetSendCodec() const;
- // Get a copy of the currently configured send codec.
- // This method acquires a lock to copy the current configuration out,
- // so it can block and the returned information is not guaranteed to be
- // accurate upon return. Consider using GetSendCodec() instead and make
- // decisions on that thread with regards to the current codec.
- int32_t SendCodecBlocking(VideoCodec* currentSendCodec) const;
-
- // Same as SendCodecBlocking. Try to use GetSendCodec() instead.
- VideoCodecType SendCodecBlocking() const;
-
- int32_t RegisterExternalEncoder(VideoEncoder* externalEncoder,
- uint8_t payloadType,
- bool internalSource);
+ void RegisterExternalEncoder(VideoEncoder* externalEncoder,
+ uint8_t payloadType,
+ bool internalSource);
int Bitrate(unsigned int* bitrate) const;
int FrameRate(unsigned int* framerate) const;
@@ -150,9 +136,8 @@ class VideoReceiver {
int32_t numberOfCores,
bool requireKeyFrame);
- int32_t RegisterExternalDecoder(VideoDecoder* externalDecoder,
- uint8_t payloadType,
- bool internalRenderTiming);
+ void RegisterExternalDecoder(VideoDecoder* externalDecoder,
+ uint8_t payloadType);
int32_t RegisterReceiveCallback(VCMReceiveCallback* receiveCallback);
int32_t RegisterReceiveStatisticsCallback(
VCMReceiveStatisticsCallback* receiveStats);
diff --git a/webrtc/modules/video_coding/main/source/video_coding_robustness_unittest.cc b/webrtc/modules/video_coding/video_coding_robustness_unittest.cc
index ac6e16bd80..dd6565d505 100644
--- a/webrtc/modules/video_coding/main/source/video_coding_robustness_unittest.cc
+++ b/webrtc/modules/video_coding/video_coding_robustness_unittest.cc
@@ -10,10 +10,10 @@
#include "testing/gmock/include/gmock/gmock.h"
#include "testing/gtest/include/gtest/gtest.h"
-#include "webrtc/modules/video_coding/codecs/interface/mock/mock_video_codec_interface.h"
-#include "webrtc/modules/video_coding/main/interface/mock/mock_vcm_callbacks.h"
-#include "webrtc/modules/video_coding/main/interface/video_coding.h"
-#include "webrtc/modules/video_coding/main/test/test_util.h"
+#include "webrtc/modules/video_coding/include/mock/mock_video_codec_interface.h"
+#include "webrtc/modules/video_coding/include/mock/mock_vcm_callbacks.h"
+#include "webrtc/modules/video_coding/include/video_coding.h"
+#include "webrtc/modules/video_coding/test/test_util.h"
#include "webrtc/system_wrappers/include/clock.h"
namespace webrtc {
@@ -42,16 +42,12 @@ class VCMRobustnessTest : public ::testing::Test {
vcm_->SetNackSettings(kMaxNackListSize, kMaxPacketAgeToNack, 0);
ASSERT_EQ(0, vcm_->RegisterFrameTypeCallback(&frame_type_callback_));
ASSERT_EQ(0, vcm_->RegisterPacketRequestCallback(&request_callback_));
- ASSERT_EQ(VCM_OK, vcm_->Codec(kVideoCodecVP8, &video_codec_));
+ VideoCodingModule::Codec(kVideoCodecVP8, &video_codec_);
ASSERT_EQ(VCM_OK, vcm_->RegisterReceiveCodec(&video_codec_, 1));
- ASSERT_EQ(VCM_OK, vcm_->RegisterExternalDecoder(&decoder_,
- video_codec_.plType,
- true));
+ vcm_->RegisterExternalDecoder(&decoder_, video_codec_.plType);
}
- virtual void TearDown() {
- VideoCodingModule::Destroy(vcm_);
- }
+ virtual void TearDown() { VideoCodingModule::Destroy(vcm_); }
void InsertPacket(uint32_t timestamp,
uint16_t seq_no,
@@ -89,19 +85,17 @@ TEST_F(VCMRobustnessTest, TestHardNack) {
.With(Args<0, 1>(ElementsAre(6, 7)))
.Times(1);
for (int ts = 0; ts <= 6000; ts += 3000) {
- EXPECT_CALL(decoder_, Decode(AllOf(Field(&EncodedImage::_timeStamp, ts),
- Field(&EncodedImage::_length,
- kPayloadLen * 3),
- Field(&EncodedImage::_completeFrame,
- true)),
- false, _, _, _))
+ EXPECT_CALL(decoder_,
+ Decode(AllOf(Field(&EncodedImage::_timeStamp, ts),
+ Field(&EncodedImage::_length, kPayloadLen * 3),
+ Field(&EncodedImage::_completeFrame, true)),
+ false, _, _, _))
.Times(1)
.InSequence(s);
}
ASSERT_EQ(VCM_OK, vcm_->SetReceiverRobustnessMode(
- VideoCodingModule::kHardNack,
- kNoErrors));
+ VideoCodingModule::kHardNack, kNoErrors));
InsertPacket(0, 0, true, false, kVideoFrameKey);
InsertPacket(0, 1, false, false, kVideoFrameKey);
@@ -138,14 +132,11 @@ TEST_F(VCMRobustnessTest, TestHardNack) {
}
TEST_F(VCMRobustnessTest, TestHardNackNoneDecoded) {
- EXPECT_CALL(request_callback_, ResendPackets(_, _))
- .Times(0);
- EXPECT_CALL(frame_type_callback_, RequestKeyFrame())
- .Times(1);
+ EXPECT_CALL(request_callback_, ResendPackets(_, _)).Times(0);
+ EXPECT_CALL(frame_type_callback_, RequestKeyFrame()).Times(1);
ASSERT_EQ(VCM_OK, vcm_->SetReceiverRobustnessMode(
- VideoCodingModule::kHardNack,
- kNoErrors));
+ VideoCodingModule::kHardNack, kNoErrors));
InsertPacket(3000, 3, true, false, kVideoFrameDelta);
InsertPacket(3000, 4, false, false, kVideoFrameDelta);
@@ -168,46 +159,43 @@ TEST_F(VCMRobustnessTest, TestModeNoneWithErrors) {
.With(Args<0, 1>(ElementsAre(4)))
.Times(0);
- EXPECT_CALL(decoder_, Copy())
- .Times(0);
- EXPECT_CALL(decoderCopy_, Copy())
- .Times(0);
+ EXPECT_CALL(decoder_, Copy()).Times(0);
+ EXPECT_CALL(decoderCopy_, Copy()).Times(0);
// Decode operations
- EXPECT_CALL(decoder_, Decode(AllOf(Field(&EncodedImage::_timeStamp, 0),
- Field(&EncodedImage::_completeFrame,
- true)),
- false, _, _, _))
- .Times(1)
- .InSequence(s1);
- EXPECT_CALL(decoder_, Decode(AllOf(Field(&EncodedImage::_timeStamp, 3000),
- Field(&EncodedImage::_completeFrame,
- false)),
- false, _, _, _))
- .Times(1)
- .InSequence(s1);
- EXPECT_CALL(decoder_, Decode(AllOf(Field(&EncodedImage::_timeStamp, 6000),
- Field(&EncodedImage::_completeFrame,
- true)),
- false, _, _, _))
- .Times(1)
- .InSequence(s1);
- EXPECT_CALL(decoder_, Decode(AllOf(Field(&EncodedImage::_timeStamp, 9000),
- Field(&EncodedImage::_completeFrame,
- true)),
- false, _, _, _))
- .Times(1)
- .InSequence(s1);
-
- ASSERT_EQ(VCM_OK, vcm_->SetReceiverRobustnessMode(
- VideoCodingModule::kNone,
- kWithErrors));
+ EXPECT_CALL(decoder_,
+ Decode(AllOf(Field(&EncodedImage::_timeStamp, 0),
+ Field(&EncodedImage::_completeFrame, true)),
+ false, _, _, _))
+ .Times(1)
+ .InSequence(s1);
+ EXPECT_CALL(decoder_,
+ Decode(AllOf(Field(&EncodedImage::_timeStamp, 3000),
+ Field(&EncodedImage::_completeFrame, false)),
+ false, _, _, _))
+ .Times(1)
+ .InSequence(s1);
+ EXPECT_CALL(decoder_,
+ Decode(AllOf(Field(&EncodedImage::_timeStamp, 6000),
+ Field(&EncodedImage::_completeFrame, true)),
+ false, _, _, _))
+ .Times(1)
+ .InSequence(s1);
+ EXPECT_CALL(decoder_,
+ Decode(AllOf(Field(&EncodedImage::_timeStamp, 9000),
+ Field(&EncodedImage::_completeFrame, true)),
+ false, _, _, _))
+ .Times(1)
+ .InSequence(s1);
+
+ ASSERT_EQ(VCM_OK, vcm_->SetReceiverRobustnessMode(VideoCodingModule::kNone,
+ kWithErrors));
InsertPacket(0, 0, true, false, kVideoFrameKey);
InsertPacket(0, 1, false, false, kVideoFrameKey);
InsertPacket(0, 2, false, true, kVideoFrameKey);
- EXPECT_EQ(VCM_OK, vcm_->Decode(0)); // Decode timestamp 0.
- EXPECT_EQ(VCM_OK, vcm_->Process()); // Expect no NACK list.
+ EXPECT_EQ(VCM_OK, vcm_->Decode(33)); // Decode timestamp 0.
+ EXPECT_EQ(VCM_OK, vcm_->Process()); // Expect no NACK list.
clock_->AdvanceTimeMilliseconds(33);
InsertPacket(3000, 3, true, false, kVideoFrameDelta);
@@ -224,8 +212,8 @@ TEST_F(VCMRobustnessTest, TestModeNoneWithErrors) {
EXPECT_EQ(VCM_OK, vcm_->Process()); // Expect no NACK list.
clock_->AdvanceTimeMilliseconds(10);
- EXPECT_EQ(VCM_OK, vcm_->Decode(0)); // Decode timestamp 6000 complete.
- EXPECT_EQ(VCM_OK, vcm_->Process()); // Expect no NACK list.
+ EXPECT_EQ(VCM_OK, vcm_->Decode(23)); // Decode timestamp 6000 complete.
+ EXPECT_EQ(VCM_OK, vcm_->Process()); // Expect no NACK list.
clock_->AdvanceTimeMilliseconds(23);
InsertPacket(3000, 4, false, false, kVideoFrameDelta);
@@ -233,6 +221,6 @@ TEST_F(VCMRobustnessTest, TestModeNoneWithErrors) {
InsertPacket(9000, 9, true, false, kVideoFrameDelta);
InsertPacket(9000, 10, false, false, kVideoFrameDelta);
InsertPacket(9000, 11, false, true, kVideoFrameDelta);
- EXPECT_EQ(VCM_OK, vcm_->Decode(0)); // Decode timestamp 9000 complete.
+ EXPECT_EQ(VCM_OK, vcm_->Decode(33)); // Decode timestamp 9000 complete.
}
} // namespace webrtc
diff --git a/webrtc/modules/video_coding/video_coding_test.gypi b/webrtc/modules/video_coding/video_coding_test.gypi
index 5d720ebb63..fc2fec6c98 100644
--- a/webrtc/modules/video_coding/video_coding_test.gypi
+++ b/webrtc/modules/video_coding/video_coding_test.gypi
@@ -19,16 +19,16 @@
],
'sources': [
# headers
- 'main/test/receiver_tests.h',
- 'main/test/rtp_player.h',
- 'main/test/vcm_payload_sink_factory.h',
+ 'test/receiver_tests.h',
+ 'test/rtp_player.h',
+ 'test/vcm_payload_sink_factory.h',
# sources
- 'main/test/rtp_player.cc',
- 'main/test/test_util.cc',
- 'main/test/tester_main.cc',
- 'main/test/vcm_payload_sink_factory.cc',
- 'main/test/video_rtp_play.cc',
+ 'test/rtp_player.cc',
+ 'test/test_util.cc',
+ 'test/tester_main.cc',
+ 'test/vcm_payload_sink_factory.cc',
+ 'test/video_rtp_play.cc',
], # sources
},
],
diff --git a/webrtc/modules/video_coding/main/source/video_receiver.cc b/webrtc/modules/video_coding/video_receiver.cc
index 77c069cf2d..02c0da8f48 100644
--- a/webrtc/modules/video_coding/main/source/video_receiver.cc
+++ b/webrtc/modules/video_coding/video_receiver.cc
@@ -9,16 +9,16 @@
*/
#include "webrtc/base/checks.h"
+#include "webrtc/base/logging.h"
#include "webrtc/base/trace_event.h"
#include "webrtc/common_types.h"
#include "webrtc/common_video/libyuv/include/webrtc_libyuv.h"
-#include "webrtc/modules/video_coding/codecs/interface/video_codec_interface.h"
-#include "webrtc/modules/video_coding/main/source/encoded_frame.h"
-#include "webrtc/modules/video_coding/main/source/jitter_buffer.h"
-#include "webrtc/modules/video_coding/main/source/packet.h"
-#include "webrtc/modules/video_coding/main/source/video_coding_impl.h"
+#include "webrtc/modules/video_coding/include/video_codec_interface.h"
+#include "webrtc/modules/video_coding/encoded_frame.h"
+#include "webrtc/modules/video_coding/jitter_buffer.h"
+#include "webrtc/modules/video_coding/packet.h"
+#include "webrtc/modules/video_coding/video_coding_impl.h"
#include "webrtc/system_wrappers/include/clock.h"
-#include "webrtc/system_wrappers/include/logging.h"
// #define DEBUG_DECODER_BIT_STREAM
@@ -31,7 +31,7 @@ VideoReceiver::VideoReceiver(Clock* clock, EventFactory* event_factory)
_receiveCritSect(CriticalSectionWrapper::CreateCriticalSection()),
_timing(clock_),
_receiver(&_timing, clock_, event_factory),
- _decodedFrameCallback(_timing, clock_),
+ _decodedFrameCallback(&_timing, clock_),
_frameTypeCallback(NULL),
_receiveStatsCallback(NULL),
_decoderTimingCallback(NULL),
@@ -84,20 +84,12 @@ int32_t VideoReceiver::Process() {
int jitter_buffer_ms;
int min_playout_delay_ms;
int render_delay_ms;
- _timing.GetTimings(&decode_ms,
- &max_decode_ms,
- &current_delay_ms,
- &target_delay_ms,
- &jitter_buffer_ms,
- &min_playout_delay_ms,
- &render_delay_ms);
- _decoderTimingCallback->OnDecoderTiming(decode_ms,
- max_decode_ms,
- current_delay_ms,
- target_delay_ms,
- jitter_buffer_ms,
- min_playout_delay_ms,
- render_delay_ms);
+ _timing.GetTimings(&decode_ms, &max_decode_ms, &current_delay_ms,
+ &target_delay_ms, &jitter_buffer_ms,
+ &min_playout_delay_ms, &render_delay_ms);
+ _decoderTimingCallback->OnDecoderTiming(
+ decode_ms, max_decode_ms, current_delay_ms, target_delay_ms,
+ jitter_buffer_ms, min_playout_delay_ms, render_delay_ms);
}
// Size of render buffer.
@@ -235,21 +227,17 @@ int32_t VideoReceiver::RegisterDecoderTimingCallback(
return VCM_OK;
}
-// Register an externally defined decoder/render object.
-// Can be a decoder only or a decoder coupled with a renderer.
-int32_t VideoReceiver::RegisterExternalDecoder(VideoDecoder* externalDecoder,
- uint8_t payloadType,
- bool internalRenderTiming) {
+// Register an externally defined decoder object.
+void VideoReceiver::RegisterExternalDecoder(VideoDecoder* externalDecoder,
+ uint8_t payloadType) {
CriticalSectionScoped cs(_receiveCritSect);
if (externalDecoder == NULL) {
// Make sure the VCM updates the decoder next time it decodes.
_decoder = NULL;
- return _codecDataBase.DeregisterExternalDecoder(payloadType) ? 0 : -1;
+ RTC_CHECK(_codecDataBase.DeregisterExternalDecoder(payloadType));
+ return;
}
- return _codecDataBase.RegisterExternalDecoder(
- externalDecoder, payloadType, internalRenderTiming)
- ? 0
- : -1;
+ _codecDataBase.RegisterExternalDecoder(externalDecoder, payloadType);
}
// Register a frame type request callback.
@@ -282,52 +270,46 @@ void VideoReceiver::TriggerDecoderShutdown() {
// Should be called as often as possible to get the most out of the decoder.
int32_t VideoReceiver::Decode(uint16_t maxWaitTimeMs) {
int64_t nextRenderTimeMs;
- bool supports_render_scheduling;
+ bool prefer_late_decoding = false;
{
CriticalSectionScoped cs(_receiveCritSect);
- supports_render_scheduling = _codecDataBase.SupportsRenderScheduling();
+ prefer_late_decoding = _codecDataBase.PrefersLateDecoding();
}
VCMEncodedFrame* frame = _receiver.FrameForDecoding(
- maxWaitTimeMs, nextRenderTimeMs, supports_render_scheduling);
+ maxWaitTimeMs, &nextRenderTimeMs, prefer_late_decoding);
- if (frame == NULL) {
+ if (!frame)
return VCM_FRAME_NOT_READY;
- } else {
- CriticalSectionScoped cs(_receiveCritSect);
- // If this frame was too late, we should adjust the delay accordingly
- _timing.UpdateCurrentDelay(frame->RenderTimeMs(),
- clock_->TimeInMilliseconds());
+ CriticalSectionScoped cs(_receiveCritSect);
- if (pre_decode_image_callback_) {
- EncodedImage encoded_image(frame->EncodedImage());
- int qp = -1;
- if (qp_parser_.GetQp(*frame, &qp)) {
- encoded_image.qp_ = qp;
- }
- pre_decode_image_callback_->Encoded(
- encoded_image, frame->CodecSpecific(), NULL);
+ // If this frame was too late, we should adjust the delay accordingly
+ _timing.UpdateCurrentDelay(frame->RenderTimeMs(),
+ clock_->TimeInMilliseconds());
+
+ if (pre_decode_image_callback_) {
+ EncodedImage encoded_image(frame->EncodedImage());
+ int qp = -1;
+ if (qp_parser_.GetQp(*frame, &qp)) {
+ encoded_image.qp_ = qp;
}
+ pre_decode_image_callback_->Encoded(encoded_image, frame->CodecSpecific(),
+ NULL);
+ }
#ifdef DEBUG_DECODER_BIT_STREAM
- if (_bitStreamBeforeDecoder != NULL) {
- // Write bit stream to file for debugging purposes
- if (fwrite(
- frame->Buffer(), 1, frame->Length(), _bitStreamBeforeDecoder) !=
- frame->Length()) {
- return -1;
- }
- }
-#endif
- const int32_t ret = Decode(*frame);
- _receiver.ReleaseFrame(frame);
- frame = NULL;
- if (ret != VCM_OK) {
- return ret;
+ if (_bitStreamBeforeDecoder != NULL) {
+ // Write bit stream to file for debugging purposes
+ if (fwrite(frame->Buffer(), 1, frame->Length(), _bitStreamBeforeDecoder) !=
+ frame->Length()) {
+ return -1;
}
}
- return VCM_OK;
+#endif
+ const int32_t ret = Decode(*frame);
+ _receiver.ReleaseFrame(frame);
+ return ret;
}
int32_t VideoReceiver::RequestSliceLossIndication(
@@ -363,21 +345,10 @@ int32_t VideoReceiver::RequestKeyFrame() {
// Must be called from inside the receive side critical section.
int32_t VideoReceiver::Decode(const VCMEncodedFrame& frame) {
- TRACE_EVENT_ASYNC_STEP1("webrtc",
- "Video",
- frame.TimeStamp(),
- "Decode",
- "type",
- frame.FrameType());
+ TRACE_EVENT_ASYNC_STEP1("webrtc", "Video", frame.TimeStamp(), "Decode",
+ "type", frame.FrameType());
// Change decoder if payload type has changed
- const bool renderTimingBefore = _codecDataBase.SupportsRenderScheduling();
- _decoder =
- _codecDataBase.GetDecoder(frame.PayloadType(), &_decodedFrameCallback);
- if (renderTimingBefore != _codecDataBase.SupportsRenderScheduling()) {
- // Make sure we reset the decode time estimate since it will
- // be zero for codecs without render timing.
- _timing.ResetDecodeTime();
- }
+ _decoder = _codecDataBase.GetDecoder(frame, &_decodedFrameCallback);
if (_decoder == NULL) {
return VCM_NO_CODEC_REGISTERED;
}
@@ -436,8 +407,8 @@ int32_t VideoReceiver::RegisterReceiveCodec(const VideoCodec* receiveCodec,
if (receiveCodec == NULL) {
return VCM_PARAMETER_ERROR;
}
- if (!_codecDataBase.RegisterReceiveCodec(
- receiveCodec, numberOfCores, requireKeyFrame)) {
+ if (!_codecDataBase.RegisterReceiveCodec(receiveCodec, numberOfCores,
+ requireKeyFrame)) {
return -1;
}
return 0;
@@ -463,9 +434,7 @@ int32_t VideoReceiver::IncomingPacket(const uint8_t* incomingPayload,
size_t payloadLength,
const WebRtcRTPHeader& rtpInfo) {
if (rtpInfo.frameType == kVideoFrameKey) {
- TRACE_EVENT1("webrtc",
- "VCM::PacketKeyFrame",
- "seqnum",
+ TRACE_EVENT1("webrtc", "VCM::PacketKeyFrame", "seqnum",
rtpInfo.header.sequenceNumber);
}
if (incomingPayload == NULL) {
@@ -504,7 +473,9 @@ int32_t VideoReceiver::SetRenderDelay(uint32_t timeMS) {
}
// Current video delay
-int32_t VideoReceiver::Delay() const { return _timing.TargetVideoDelay(); }
+int32_t VideoReceiver::Delay() const {
+ return _timing.TargetVideoDelay();
+}
uint32_t VideoReceiver::DiscardedPackets() const {
return _receiver.DiscardedPackets();
@@ -560,8 +531,8 @@ void VideoReceiver::SetNackSettings(size_t max_nack_list_size,
CriticalSectionScoped process_cs(process_crit_sect_.get());
max_nack_list_size_ = max_nack_list_size;
}
- _receiver.SetNackSettings(
- max_nack_list_size, max_packet_age_to_nack, max_incomplete_time_ms);
+ _receiver.SetNackSettings(max_nack_list_size, max_packet_age_to_nack,
+ max_incomplete_time_ms);
}
int VideoReceiver::SetMinReceiverDelay(int desired_delay_ms) {
diff --git a/webrtc/modules/video_coding/main/source/video_receiver_unittest.cc b/webrtc/modules/video_coding/video_receiver_unittest.cc
index 75ea29a1ec..820ce9ae2d 100644
--- a/webrtc/modules/video_coding/main/source/video_receiver_unittest.cc
+++ b/webrtc/modules/video_coding/video_receiver_unittest.cc
@@ -12,11 +12,11 @@
#include "testing/gtest/include/gtest/gtest.h"
#include "webrtc/base/scoped_ptr.h"
-#include "webrtc/modules/video_coding/codecs/interface/mock/mock_video_codec_interface.h"
-#include "webrtc/modules/video_coding/main/interface/mock/mock_vcm_callbacks.h"
-#include "webrtc/modules/video_coding/main/interface/video_coding.h"
-#include "webrtc/modules/video_coding/main/source/video_coding_impl.h"
-#include "webrtc/modules/video_coding/main/test/test_util.h"
+#include "webrtc/modules/video_coding/include/mock/mock_video_codec_interface.h"
+#include "webrtc/modules/video_coding/include/mock/mock_vcm_callbacks.h"
+#include "webrtc/modules/video_coding/include/video_coding.h"
+#include "webrtc/modules/video_coding/video_coding_impl.h"
+#include "webrtc/modules/video_coding/test/test_util.h"
#include "webrtc/system_wrappers/include/clock.h"
using ::testing::_;
@@ -34,14 +34,12 @@ class TestVideoReceiver : public ::testing::Test {
virtual void SetUp() {
receiver_.reset(new VideoReceiver(&clock_, &event_factory_));
- EXPECT_EQ(0, receiver_->RegisterExternalDecoder(&decoder_,
- kUnusedPayloadType, true));
+ receiver_->RegisterExternalDecoder(&decoder_, kUnusedPayloadType);
const size_t kMaxNackListSize = 250;
const int kMaxPacketAgeToNack = 450;
receiver_->SetNackSettings(kMaxNackListSize, kMaxPacketAgeToNack, 0);
- memset(&settings_, 0, sizeof(settings_));
- EXPECT_EQ(0, VideoCodingModule::Codec(kVideoCodecVP8, &settings_));
+ VideoCodingModule::Codec(kVideoCodecVP8, &settings_);
settings_.plType = kUnusedPayloadType; // Use the mocked encoder.
EXPECT_EQ(0, receiver_->RegisterReceiveCodec(&settings_, 1, true));
}
@@ -56,7 +54,7 @@ class TestVideoReceiver : public ::testing::Test {
}
EXPECT_EQ(0, receiver_->Process());
EXPECT_CALL(decoder_, Decode(_, _, _, _, _)).Times(0);
- EXPECT_EQ(VCM_FRAME_NOT_READY, receiver_->Decode(0));
+ EXPECT_EQ(VCM_FRAME_NOT_READY, receiver_->Decode(100));
}
void InsertAndVerifyDecodableFrame(const uint8_t* payload,
@@ -68,7 +66,7 @@ class TestVideoReceiver : public ::testing::Test {
EXPECT_CALL(packet_request_callback_, ResendPackets(_, _)).Times(0);
EXPECT_EQ(0, receiver_->Process());
EXPECT_CALL(decoder_, Decode(_, _, _, _, _)).Times(1);
- EXPECT_EQ(0, receiver_->Decode(0));
+ EXPECT_EQ(0, receiver_->Decode(100));
}
SimulatedClock clock_;
diff --git a/webrtc/modules/video_coding/main/source/video_sender.cc b/webrtc/modules/video_coding/video_sender.cc
index 98230b1e9e..ac901f95b9 100644
--- a/webrtc/modules/video_coding/main/source/video_sender.cc
+++ b/webrtc/modules/video_coding/video_sender.cc
@@ -8,18 +8,18 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#include "webrtc/common_types.h"
#include <algorithm> // std::max
#include "webrtc/base/checks.h"
+#include "webrtc/base/logging.h"
+#include "webrtc/common_types.h"
#include "webrtc/common_video/libyuv/include/webrtc_libyuv.h"
-#include "webrtc/modules/video_coding/codecs/interface/video_codec_interface.h"
-#include "webrtc/modules/video_coding/main/source/encoded_frame.h"
-#include "webrtc/modules/video_coding/main/source/video_coding_impl.h"
-#include "webrtc/modules/video_coding/utility/include/quality_scaler.h"
+#include "webrtc/modules/video_coding/include/video_codec_interface.h"
+#include "webrtc/modules/video_coding/encoded_frame.h"
+#include "webrtc/modules/video_coding/utility/quality_scaler.h"
+#include "webrtc/modules/video_coding/video_coding_impl.h"
#include "webrtc/system_wrappers/include/clock.h"
-#include "webrtc/system_wrappers/include/logging.h"
namespace webrtc {
namespace vcm {
@@ -126,57 +126,34 @@ int32_t VideoSender::RegisterSendCodec(const VideoCodec* sendCodec,
_nextFrameTypes.resize(VCM_MAX(sendCodec->numberOfSimulcastStreams, 1),
kVideoFrameDelta);
- _mediaOpt.SetEncodingData(sendCodec->codecType,
- sendCodec->maxBitrate * 1000,
- sendCodec->startBitrate * 1000,
- sendCodec->width,
- sendCodec->height,
- sendCodec->maxFramerate,
- numLayers,
- maxPayloadSize);
+ _mediaOpt.SetEncodingData(sendCodec->codecType, sendCodec->maxBitrate * 1000,
+ sendCodec->startBitrate * 1000, sendCodec->width,
+ sendCodec->height, sendCodec->maxFramerate,
+ numLayers, maxPayloadSize);
return VCM_OK;
}
-const VideoCodec& VideoSender::GetSendCodec() const {
- RTC_DCHECK(main_thread_.CalledOnValidThread());
- return current_codec_;
-}
-
-int32_t VideoSender::SendCodecBlocking(VideoCodec* currentSendCodec) const {
- rtc::CritScope lock(&send_crit_);
- if (currentSendCodec == nullptr) {
- return VCM_PARAMETER_ERROR;
- }
- return _codecDataBase.SendCodec(currentSendCodec) ? 0 : -1;
-}
-
-VideoCodecType VideoSender::SendCodecBlocking() const {
- rtc::CritScope lock(&send_crit_);
- return _codecDataBase.SendCodec();
-}
-
// Register an external decoder object.
// This can not be used together with external decoder callbacks.
-int32_t VideoSender::RegisterExternalEncoder(VideoEncoder* externalEncoder,
- uint8_t payloadType,
- bool internalSource /*= false*/) {
+void VideoSender::RegisterExternalEncoder(VideoEncoder* externalEncoder,
+ uint8_t payloadType,
+ bool internalSource /*= false*/) {
RTC_DCHECK(main_thread_.CalledOnValidThread());
rtc::CritScope lock(&send_crit_);
if (externalEncoder == nullptr) {
bool wasSendCodec = false;
- const bool ret =
- _codecDataBase.DeregisterExternalEncoder(payloadType, &wasSendCodec);
+ RTC_CHECK(
+ _codecDataBase.DeregisterExternalEncoder(payloadType, &wasSendCodec));
if (wasSendCodec) {
// Make sure the VCM doesn't use the de-registered codec
_encoder = nullptr;
}
- return ret ? 0 : -1;
+ return;
}
- _codecDataBase.RegisterExternalEncoder(
- externalEncoder, payloadType, internalSource);
- return 0;
+ _codecDataBase.RegisterExternalEncoder(externalEncoder, payloadType,
+ internalSource);
}
// Get encode bitrate
@@ -369,7 +346,6 @@ void VideoSender::SuspendBelowMinBitrate() {
}
bool VideoSender::VideoSuspended() const {
- rtc::CritScope lock(&send_crit_);
return _mediaOpt.IsVideoSuspended();
}
} // namespace vcm
diff --git a/webrtc/modules/video_coding/main/source/video_sender_unittest.cc b/webrtc/modules/video_coding/video_sender_unittest.cc
index e9c8bd79b6..741c7b7a60 100644
--- a/webrtc/modules/video_coding/main/source/video_sender_unittest.cc
+++ b/webrtc/modules/video_coding/video_sender_unittest.cc
@@ -13,18 +13,17 @@
#include "testing/gtest/include/gtest/gtest.h"
#include "webrtc/base/scoped_ptr.h"
#include "webrtc/common.h"
-#include "webrtc/modules/video_coding/codecs/interface/mock/mock_video_codec_interface.h"
+#include "webrtc/modules/video_coding/include/mock/mock_video_codec_interface.h"
#include "webrtc/modules/video_coding/codecs/vp8/include/vp8.h"
#include "webrtc/modules/video_coding/codecs/vp8/include/vp8_common_types.h"
#include "webrtc/modules/video_coding/codecs/vp8/temporal_layers.h"
-#include "webrtc/modules/video_coding/main/interface/mock/mock_vcm_callbacks.h"
-#include "webrtc/modules/video_coding/main/interface/video_coding.h"
-#include "webrtc/modules/video_coding/main/source/video_coding_impl.h"
-#include "webrtc/modules/video_coding/main/test/test_util.h"
+#include "webrtc/modules/video_coding/include/mock/mock_vcm_callbacks.h"
+#include "webrtc/modules/video_coding/include/video_coding.h"
+#include "webrtc/modules/video_coding/video_coding_impl.h"
+#include "webrtc/modules/video_coding/test/test_util.h"
#include "webrtc/system_wrappers/include/clock.h"
#include "webrtc/test/frame_generator.h"
#include "webrtc/test/testsupport/fileutils.h"
-#include "webrtc/test/testsupport/gtest_disable.h"
using ::testing::_;
using ::testing::AllOf;
@@ -41,9 +40,7 @@ using webrtc::test::FrameGenerator;
namespace webrtc {
namespace vcm {
namespace {
-enum {
- kMaxNumberOfTemporalLayers = 3
-};
+enum { kMaxNumberOfTemporalLayers = 3 };
struct Vp8StreamInfo {
float framerate_fps[kMaxNumberOfTemporalLayers];
@@ -87,7 +84,7 @@ class EmptyFrameGenerator : public FrameGenerator {
class PacketizationCallback : public VCMPacketizationCallback {
public:
- PacketizationCallback(Clock* clock)
+ explicit PacketizationCallback(Clock* clock)
: clock_(clock), start_time_ms_(clock_->TimeInMilliseconds()) {}
virtual ~PacketizationCallback() {}
@@ -207,22 +204,15 @@ class TestVideoSenderWithMockEncoder : public TestVideoSender {
void SetUp() override {
TestVideoSender::SetUp();
- EXPECT_EQ(
- 0,
- sender_->RegisterExternalEncoder(&encoder_, kUnusedPayloadType, false));
- memset(&settings_, 0, sizeof(settings_));
- EXPECT_EQ(0, VideoCodingModule::Codec(kVideoCodecVP8, &settings_));
+ sender_->RegisterExternalEncoder(&encoder_, kUnusedPayloadType, false);
+ VideoCodingModule::Codec(kVideoCodecVP8, &settings_);
settings_.numberOfSimulcastStreams = kNumberOfStreams;
- ConfigureStream(kDefaultWidth / 4,
- kDefaultHeight / 4,
- 100,
+ ConfigureStream(kDefaultWidth / 4, kDefaultHeight / 4, 100,
&settings_.simulcastStream[0]);
- ConfigureStream(kDefaultWidth / 2,
- kDefaultHeight / 2,
- 500,
+ ConfigureStream(kDefaultWidth / 2, kDefaultHeight / 2, 500,
&settings_.simulcastStream[1]);
- ConfigureStream(
- kDefaultWidth, kDefaultHeight, 1200, &settings_.simulcastStream[2]);
+ ConfigureStream(kDefaultWidth, kDefaultHeight, 1200,
+ &settings_.simulcastStream[2]);
settings_.plType = kUnusedPayloadType; // Use the mocked encoder.
generator_.reset(
new EmptyFrameGenerator(settings_.width, settings_.height));
@@ -246,12 +236,11 @@ class TestVideoSenderWithMockEncoder : public TestVideoSender {
assert(stream < kNumberOfStreams);
std::vector<FrameType> frame_types(kNumberOfStreams, kVideoFrameDelta);
frame_types[stream] = kVideoFrameKey;
- EXPECT_CALL(
- encoder_,
- Encode(_,
- _,
- Pointee(ElementsAreArray(&frame_types[0], frame_types.size()))))
- .Times(1).WillRepeatedly(Return(0));
+ EXPECT_CALL(encoder_,
+ Encode(_, _, Pointee(ElementsAreArray(&frame_types[0],
+ frame_types.size()))))
+ .Times(1)
+ .WillRepeatedly(Return(0));
}
static void ConfigureStream(int width,
@@ -300,11 +289,9 @@ TEST_F(TestVideoSenderWithMockEncoder, TestIntraRequests) {
TEST_F(TestVideoSenderWithMockEncoder, TestIntraRequestsInternalCapture) {
// De-register current external encoder.
- EXPECT_EQ(0,
- sender_->RegisterExternalEncoder(NULL, kUnusedPayloadType, false));
+ sender_->RegisterExternalEncoder(nullptr, kUnusedPayloadType, false);
// Register encoder with internal capture.
- EXPECT_EQ(
- 0, sender_->RegisterExternalEncoder(&encoder_, kUnusedPayloadType, true));
+ sender_->RegisterExternalEncoder(&encoder_, kUnusedPayloadType, true);
EXPECT_EQ(0, sender_->RegisterSendCodec(&settings_, 1, 1200));
ExpectIntraRequest(0);
EXPECT_EQ(0, sender_->IntraFrameRequest(0));
@@ -384,8 +371,7 @@ class TestVideoSenderWithVp8 : public TestVideoSender {
codec_.startBitrate = codec_bitrate_kbps_;
codec_.maxBitrate = codec_bitrate_kbps_;
encoder_.reset(VP8Encoder::Create());
- ASSERT_EQ(0, sender_->RegisterExternalEncoder(encoder_.get(), codec_.plType,
- false));
+ sender_->RegisterExternalEncoder(encoder_.get(), codec_.plType, false);
EXPECT_EQ(0, sender_->RegisterSendCodec(&codec_, 1, 1200));
}
@@ -393,8 +379,7 @@ class TestVideoSenderWithVp8 : public TestVideoSender {
int height,
int temporal_layers) {
VideoCodec codec;
- memset(&codec, 0, sizeof(codec));
- EXPECT_EQ(0, VideoCodingModule::Codec(kVideoCodecVP8, &codec));
+ VideoCodingModule::Codec(kVideoCodecVP8, &codec);
codec.width = width;
codec.height = height;
codec.codecSpecific.VP8.numberOfTemporalLayers = temporal_layers;
@@ -436,8 +421,12 @@ class TestVideoSenderWithVp8 : public TestVideoSender {
int available_bitrate_kbps_;
};
-TEST_F(TestVideoSenderWithVp8,
- DISABLED_ON_IOS(DISABLED_ON_ANDROID(FixedTemporalLayersStrategy))) {
+#if defined(WEBRTC_ANDROID) || defined(WEBRTC_IOS)
+#define MAYBE_FixedTemporalLayersStrategy DISABLED_FixedTemporalLayersStrategy
+#else
+#define MAYBE_FixedTemporalLayersStrategy FixedTemporalLayersStrategy
+#endif
+TEST_F(TestVideoSenderWithVp8, MAYBE_FixedTemporalLayersStrategy) {
const int low_b = codec_bitrate_kbps_ * kVp8LayerRateAlloction[2][0];
const int mid_b = codec_bitrate_kbps_ * kVp8LayerRateAlloction[2][1];
const int high_b = codec_bitrate_kbps_ * kVp8LayerRateAlloction[2][2];
@@ -451,8 +440,13 @@ TEST_F(TestVideoSenderWithVp8,
}
}
-TEST_F(TestVideoSenderWithVp8,
- DISABLED_ON_IOS(DISABLED_ON_ANDROID(RealTimeTemporalLayersStrategy))) {
+#if defined(WEBRTC_ANDROID) || defined(WEBRTC_IOS)
+#define MAYBE_RealTimeTemporalLayersStrategy \
+ DISABLED_RealTimeTemporalLayersStrategy
+#else
+#define MAYBE_RealTimeTemporalLayersStrategy RealTimeTemporalLayersStrategy
+#endif
+TEST_F(TestVideoSenderWithVp8, MAYBE_RealTimeTemporalLayersStrategy) {
Config extra_options;
extra_options.Set<TemporalLayers::Factory>(
new RealTimeTemporalLayersFactory());