summaryrefslogtreecommitdiff
path: root/media
diff options
context:
space:
mode:
authorBen Murdoch <benm@google.com>2014-07-20 18:25:52 -0700
committerBen Murdoch <benm@google.com>2014-07-20 18:25:52 -0700
commit116680a4aac90f2aa7413d9095a592090648e557 (patch)
treef7c6fed0e63d6a2804243d4a31a752dca39fb076 /media
parent1f14a4515e04c9ffc9bac4dd1e2f68611626b800 (diff)
downloadchromium_org-116680a4aac90f2aa7413d9095a592090648e557.tar.gz
Merge from Chromium at DEPS revision 284076
This commit was generated by merge_to_master.py. Change-Id: I9a279485b02fe7ceddcd32d992a714ff132e99ae
Diffstat (limited to 'media')
-rw-r--r--media/BUILD.gn1448
-rw-r--r--media/OWNERS1
-rw-r--r--media/audio/audio_input_volume_unittest.cc2
-rw-r--r--media/audio/audio_manager.h7
-rw-r--r--media/audio/audio_output_controller.cc2
-rw-r--r--media/audio/fake_audio_input_stream.cc66
-rw-r--r--media/audio/fake_audio_input_stream.h6
-rw-r--r--media/audio/mac/audio_low_latency_input_mac.cc111
-rw-r--r--media/audio/mac/audio_low_latency_input_mac.h28
-rw-r--r--media/audio/mac/audio_manager_mac.cc13
-rw-r--r--media/audio/pulse/audio_manager_pulse.cc7
-rw-r--r--media/audio/sounds/audio_stream_handler.cc31
-rw-r--r--media/audio/win/audio_low_latency_input_win.cc24
-rw-r--r--media/audio/win/audio_low_latency_input_win.h6
-rw-r--r--media/audio/win/audio_low_latency_output_win.cc18
-rw-r--r--media/audio/win/core_audio_util_win.cc15
-rw-r--r--media/audio/win/core_audio_util_win.h14
-rw-r--r--media/audio/win/core_audio_util_win_unittest.cc16
-rw-r--r--media/base/android/OWNERS3
-rw-r--r--media/base/android/java/src/org/chromium/media/VideoCaptureAndroid.java2
-rw-r--r--media/base/android/media_codec_bridge.cc3
-rw-r--r--media/base/android/media_drm_bridge.cc7
-rw-r--r--media/base/android/media_player_manager.h6
-rw-r--r--media/base/android/media_source_player.cc31
-rw-r--r--media/base/android/media_source_player.h9
-rw-r--r--media/base/android/media_source_player_unittest.cc5
-rw-r--r--media/base/audio_block_fifo.cc83
-rw-r--r--media/base/audio_block_fifo.h70
-rw-r--r--media/base/audio_block_fifo_unittest.cc149
-rw-r--r--media/base/audio_bus.cc11
-rw-r--r--media/base/audio_bus.h22
-rw-r--r--media/base/audio_capturer_source.h2
-rw-r--r--media/base/audio_decoder.h15
-rw-r--r--media/base/audio_hash.cc4
-rw-r--r--media/base/audio_hash.h3
-rw-r--r--media/base/audio_renderer.h24
-rw-r--r--media/base/audio_renderer_mixer_input.cc34
-rw-r--r--media/base/audio_renderer_mixer_input.h2
-rw-r--r--media/base/audio_renderer_mixer_input_unittest.cc10
-rw-r--r--media/base/audio_renderer_mixer_unittest.cc4
-rw-r--r--media/base/audio_splicer.cc16
-rw-r--r--media/base/audio_splicer.h6
-rw-r--r--media/base/audio_splicer_unittest.cc38
-rw-r--r--media/base/audio_video_metadata_extractor.cc5
-rw-r--r--media/base/buffering_state.h3
-rw-r--r--media/base/clock.cc130
-rw-r--r--media/base/clock.h124
-rw-r--r--media/base/clock_unittest.cc253
-rw-r--r--media/base/container_names.cc6
-rw-r--r--media/base/decoder_buffer.h4
-rw-r--r--media/base/demuxer_stream.h3
-rw-r--r--media/base/fake_text_track_stream.cc4
-rw-r--r--media/base/fake_text_track_stream.h1
-rw-r--r--media/base/mock_filters.cc4
-rw-r--r--media/base/mock_filters.h31
-rw-r--r--media/base/pipeline.cc292
-rw-r--r--media/base/pipeline.h80
-rw-r--r--media/base/pipeline_unittest.cc346
-rw-r--r--media/base/stream_parser_buffer.cc7
-rw-r--r--media/base/text_renderer.cc3
-rw-r--r--media/base/text_renderer.h5
-rw-r--r--media/base/text_renderer_unittest.cc6
-rw-r--r--media/base/time_delta_interpolator.cc79
-rw-r--r--media/base/time_delta_interpolator.h82
-rw-r--r--media/base/time_delta_interpolator_unittest.cc199
-rw-r--r--media/base/video_decoder.h17
-rw-r--r--media/base/video_frame.cc97
-rw-r--r--media/base/video_frame.h7
-rw-r--r--media/base/video_frame_unittest.cc13
-rw-r--r--media/base/video_renderer.h27
-rw-r--r--media/base/video_rotation.h22
-rw-r--r--media/base/video_util.cc14
-rw-r--r--media/base/video_util.h7
-rw-r--r--media/cast/DEPS1
-rw-r--r--media/cast/README61
-rw-r--r--media/cast/cast.gyp161
-rw-r--r--media/cast/cast_config.cc24
-rw-r--r--media/cast/cast_config.h73
-rw-r--r--media/cast/cast_defines.h10
-rw-r--r--media/cast/cast_receiver.h8
-rw-r--r--media/cast/cast_sender.h6
-rw-r--r--media/cast/cast_sender_impl.cc7
-rw-r--r--media/cast/cast_sender_impl.h10
-rw-r--r--media/cast/cast_testing.gypi130
-rw-r--r--media/cast/common/clock_drift_smoother.cc (renamed from media/cast/base/clock_drift_smoother.cc)2
-rw-r--r--media/cast/common/clock_drift_smoother.h (renamed from media/cast/base/clock_drift_smoother.h)6
-rw-r--r--media/cast/common/transport_encryption_handler.cc (renamed from media/cast/transport/utility/transport_encryption_handler.cc)42
-rw-r--r--media/cast/common/transport_encryption_handler.h (renamed from media/cast/transport/utility/transport_encryption_handler.h)15
-rw-r--r--media/cast/logging/proto/BUILD.gn22
-rw-r--r--media/cast/net/DEPS16
-rw-r--r--media/cast/net/cast_transport_config.cc (renamed from media/cast/transport/cast_transport_config.cc)26
-rw-r--r--media/cast/net/cast_transport_config.h (renamed from media/cast/transport/cast_transport_config.h)70
-rw-r--r--media/cast/net/cast_transport_defines.h (renamed from media/cast/transport/cast_transport_defines.h)30
-rw-r--r--media/cast/net/cast_transport_sender.h (renamed from media/cast/transport/cast_transport_sender.h)20
-rw-r--r--media/cast/net/cast_transport_sender_impl.cc (renamed from media/cast/transport/cast_transport_sender_impl.cc)37
-rw-r--r--media/cast/net/cast_transport_sender_impl.h (renamed from media/cast/transport/cast_transport_sender_impl.h)27
-rw-r--r--media/cast/net/cast_transport_sender_impl_unittest.cc (renamed from media/cast/transport/cast_transport_sender_impl_unittest.cc)12
-rw-r--r--media/cast/net/frame_id_wrap_helper_test.cc (renamed from media/cast/transport/frame_id_wrap_helper_test.cc)6
-rw-r--r--media/cast/net/pacing/mock_paced_packet_sender.cc (renamed from media/cast/transport/pacing/mock_paced_packet_sender.cc)6
-rw-r--r--media/cast/net/pacing/mock_paced_packet_sender.h (renamed from media/cast/transport/pacing/mock_paced_packet_sender.h)12
-rw-r--r--media/cast/net/pacing/paced_sender.cc (renamed from media/cast/transport/pacing/paced_sender.cc)60
-rw-r--r--media/cast/net/pacing/paced_sender.h (renamed from media/cast/transport/pacing/paced_sender.h)36
-rw-r--r--media/cast/net/pacing/paced_sender_unittest.cc (renamed from media/cast/transport/pacing/paced_sender_unittest.cc)77
-rw-r--r--media/cast/net/rtcp/mock_rtcp_receiver_feedback.cc (renamed from media/cast/rtcp/mock_rtcp_receiver_feedback.cc)4
-rw-r--r--media/cast/net/rtcp/mock_rtcp_receiver_feedback.h (renamed from media/cast/rtcp/mock_rtcp_receiver_feedback.h)10
-rw-r--r--media/cast/net/rtcp/mock_rtcp_sender_feedback.cc (renamed from media/cast/rtcp/mock_rtcp_sender_feedback.cc)4
-rw-r--r--media/cast/net/rtcp/mock_rtcp_sender_feedback.h (renamed from media/cast/rtcp/mock_rtcp_sender_feedback.h)4
-rw-r--r--media/cast/net/rtcp/receiver_rtcp_event_subscriber.cc (renamed from media/cast/rtcp/receiver_rtcp_event_subscriber.cc)2
-rw-r--r--media/cast/net/rtcp/receiver_rtcp_event_subscriber.h (renamed from media/cast/rtcp/receiver_rtcp_event_subscriber.h)2
-rw-r--r--media/cast/net/rtcp/receiver_rtcp_event_subscriber_unittest.cc (renamed from media/cast/rtcp/receiver_rtcp_event_subscriber_unittest.cc)2
-rw-r--r--media/cast/net/rtcp/rtcp.cc (renamed from media/cast/rtcp/rtcp.cc)74
-rw-r--r--media/cast/net/rtcp/rtcp.h (renamed from media/cast/rtcp/rtcp.h)24
-rw-r--r--media/cast/net/rtcp/rtcp_builder.cc (renamed from media/cast/transport/rtcp/rtcp_builder.cc)10
-rw-r--r--media/cast/net/rtcp/rtcp_builder.h (renamed from media/cast/transport/rtcp/rtcp_builder.h)14
-rw-r--r--media/cast/net/rtcp/rtcp_builder_unittest.cc (renamed from media/cast/transport/rtcp/rtcp_builder_unittest.cc)10
-rw-r--r--media/cast/net/rtcp/rtcp_defines.cc (renamed from media/cast/rtcp/rtcp_defines.cc)2
-rw-r--r--media/cast/net/rtcp/rtcp_defines.h (renamed from media/cast/rtcp/rtcp_defines.h)4
-rw-r--r--media/cast/net/rtcp/rtcp_receiver.cc (renamed from media/cast/rtcp/rtcp_receiver.cc)12
-rw-r--r--media/cast/net/rtcp/rtcp_receiver.h (renamed from media/cast/rtcp/rtcp_receiver.h)14
-rw-r--r--media/cast/net/rtcp/rtcp_receiver_unittest.cc (renamed from media/cast/rtcp/rtcp_receiver_unittest.cc)29
-rw-r--r--media/cast/net/rtcp/rtcp_sender.cc (renamed from media/cast/rtcp/rtcp_sender.cc)76
-rw-r--r--media/cast/net/rtcp/rtcp_sender.h (renamed from media/cast/rtcp/rtcp_sender.h)26
-rw-r--r--media/cast/net/rtcp/rtcp_sender_unittest.cc (renamed from media/cast/rtcp/rtcp_sender_unittest.cc)75
-rw-r--r--media/cast/net/rtcp/rtcp_unittest.cc (renamed from media/cast/rtcp/rtcp_unittest.cc)53
-rw-r--r--media/cast/net/rtcp/rtcp_utility.cc (renamed from media/cast/rtcp/rtcp_utility.cc)30
-rw-r--r--media/cast/net/rtcp/rtcp_utility.h (renamed from media/cast/rtcp/rtcp_utility.h)4
-rw-r--r--media/cast/net/rtcp/test_rtcp_packet_builder.cc (renamed from media/cast/rtcp/test_rtcp_packet_builder.cc)26
-rw-r--r--media/cast/net/rtcp/test_rtcp_packet_builder.h (renamed from media/cast/rtcp/test_rtcp_packet_builder.h)9
-rw-r--r--media/cast/net/rtp/cast_message_builder.cc (renamed from media/cast/framer/cast_message_builder.cc)4
-rw-r--r--media/cast/net/rtp/cast_message_builder.h (renamed from media/cast/framer/cast_message_builder.h)8
-rw-r--r--media/cast/net/rtp/cast_message_builder_unittest.cc (renamed from media/cast/framer/cast_message_builder_unittest.cc)8
-rw-r--r--media/cast/net/rtp/frame_buffer.cc (renamed from media/cast/framer/frame_buffer.cc)12
-rw-r--r--media/cast/net/rtp/frame_buffer.h (renamed from media/cast/framer/frame_buffer.h)6
-rw-r--r--media/cast/net/rtp/frame_buffer_unittest.cc (renamed from media/cast/framer/frame_buffer_unittest.cc)15
-rw-r--r--media/cast/net/rtp/frame_id_map.cc (renamed from media/cast/framer/frame_id_map.cc)6
-rw-r--r--media/cast/net/rtp/frame_id_map.h (renamed from media/cast/framer/frame_id_map.h)6
-rw-r--r--media/cast/net/rtp/framer.cc (renamed from media/cast/framer/framer.cc)6
-rw-r--r--media/cast/net/rtp/framer.h (renamed from media/cast/framer/framer.h)14
-rw-r--r--media/cast/net/rtp/framer_unittest.cc (renamed from media/cast/framer/framer_unittest.cc)69
-rw-r--r--media/cast/net/rtp/mock_rtp_feedback.h (renamed from media/cast/rtp_receiver/rtp_parser/include/mock/mock_rtp_feedback.h)4
-rw-r--r--media/cast/net/rtp/mock_rtp_payload_feedback.cc (renamed from media/cast/rtp_receiver/mock_rtp_payload_feedback.cc)4
-rw-r--r--media/cast/net/rtp/mock_rtp_payload_feedback.h (renamed from media/cast/rtp_receiver/mock_rtp_payload_feedback.h)4
-rw-r--r--media/cast/net/rtp/packet_storage.cc (renamed from media/cast/transport/rtp_sender/packet_storage/packet_storage.cc)7
-rw-r--r--media/cast/net/rtp/packet_storage.h (renamed from media/cast/transport/rtp_sender/packet_storage/packet_storage.h)16
-rw-r--r--media/cast/net/rtp/packet_storage_unittest.cc (renamed from media/cast/transport/rtp_sender/packet_storage/packet_storage_unittest.cc)6
-rw-r--r--media/cast/net/rtp/receiver_stats.cc (renamed from media/cast/rtp_receiver/receiver_stats.cc)6
-rw-r--r--media/cast/net/rtp/receiver_stats.h (renamed from media/cast/rtp_receiver/receiver_stats.h)6
-rw-r--r--media/cast/net/rtp/receiver_stats_unittest.cc (renamed from media/cast/rtp_receiver/receiver_stats_unittest.cc)6
-rw-r--r--media/cast/net/rtp/rtp_header_parser.cc (renamed from media/cast/transport/rtp_sender/rtp_packetizer/test/rtp_header_parser.cc)6
-rw-r--r--media/cast/net/rtp/rtp_header_parser.h (renamed from media/cast/transport/rtp_sender/rtp_packetizer/test/rtp_header_parser.h)16
-rw-r--r--media/cast/net/rtp/rtp_packet_builder.cc (renamed from media/cast/rtp_receiver/rtp_parser/test/rtp_packet_builder.cc)4
-rw-r--r--media/cast/net/rtp/rtp_packet_builder.h (renamed from media/cast/rtp_receiver/rtp_parser/test/rtp_packet_builder.h)4
-rw-r--r--media/cast/net/rtp/rtp_packetizer.cc (renamed from media/cast/transport/rtp_sender/rtp_packetizer/rtp_packetizer.cc)15
-rw-r--r--media/cast/net/rtp/rtp_packetizer.h (renamed from media/cast/transport/rtp_sender/rtp_packetizer/rtp_packetizer.h)22
-rw-r--r--media/cast/net/rtp/rtp_packetizer_unittest.cc (renamed from media/cast/transport/rtp_sender/rtp_packetizer/rtp_packetizer_unittest.cc)14
-rw-r--r--media/cast/net/rtp/rtp_parser.cc (renamed from media/cast/rtp_receiver/rtp_parser/rtp_parser.cc)4
-rw-r--r--media/cast/net/rtp/rtp_parser.h (renamed from media/cast/rtp_receiver/rtp_parser/rtp_parser.h)14
-rw-r--r--media/cast/net/rtp/rtp_parser_unittest.cc (renamed from media/cast/rtp_receiver/rtp_parser/rtp_parser_unittest.cc)12
-rw-r--r--media/cast/net/rtp/rtp_receiver_defines.cc (renamed from media/cast/rtp_receiver/rtp_receiver_defines.cc)2
-rw-r--r--media/cast/net/rtp/rtp_receiver_defines.h (renamed from media/cast/rtp_receiver/rtp_receiver_defines.h)4
-rw-r--r--media/cast/net/rtp/rtp_sender.cc (renamed from media/cast/transport/rtp_sender/rtp_sender.cc)35
-rw-r--r--media/cast/net/rtp/rtp_sender.h (renamed from media/cast/transport/rtp_sender/rtp_sender.h)33
-rw-r--r--media/cast/net/udp_transport.cc (renamed from media/cast/transport/transport/udp_transport.cc)4
-rw-r--r--media/cast/net/udp_transport.h (renamed from media/cast/transport/transport/udp_transport.h)12
-rw-r--r--media/cast/net/udp_transport_unittest.cc (renamed from media/cast/transport/transport/udp_transport_unittest.cc)10
-rw-r--r--media/cast/receiver/audio_decoder.cc18
-rw-r--r--media/cast/receiver/audio_decoder.h6
-rw-r--r--media/cast/receiver/audio_decoder_unittest.cc33
-rw-r--r--media/cast/receiver/cast_receiver_impl.cc14
-rw-r--r--media/cast/receiver/cast_receiver_impl.h16
-rw-r--r--media/cast/receiver/frame_receiver.cc8
-rw-r--r--media/cast/receiver/frame_receiver.h20
-rw-r--r--media/cast/receiver/frame_receiver_unittest.cc8
-rw-r--r--media/cast/receiver/video_decoder.cc27
-rw-r--r--media/cast/receiver/video_decoder.h6
-rw-r--r--media/cast/receiver/video_decoder_unittest.cc14
-rw-r--r--media/cast/sender/audio_encoder.cc (renamed from media/cast/audio_sender/audio_encoder.cc)39
-rw-r--r--media/cast/sender/audio_encoder.h (renamed from media/cast/audio_sender/audio_encoder.h)16
-rw-r--r--media/cast/sender/audio_encoder_unittest.cc (renamed from media/cast/audio_sender/audio_encoder_unittest.cc)47
-rw-r--r--media/cast/sender/audio_sender.cc (renamed from media/cast/audio_sender/audio_sender.cc)59
-rw-r--r--media/cast/sender/audio_sender.h (renamed from media/cast/audio_sender/audio_sender.h)18
-rw-r--r--media/cast/sender/audio_sender_unittest.cc (renamed from media/cast/audio_sender/audio_sender_unittest.cc)28
-rw-r--r--media/cast/sender/congestion_control.cc (renamed from media/cast/congestion_control/congestion_control.cc)7
-rw-r--r--media/cast/sender/congestion_control.h (renamed from media/cast/congestion_control/congestion_control.h)6
-rw-r--r--media/cast/sender/congestion_control_unittest.cc (renamed from media/cast/congestion_control/congestion_control_unittest.cc)5
-rw-r--r--media/cast/sender/external_video_encoder.cc (renamed from media/cast/video_sender/external_video_encoder.cc)24
-rw-r--r--media/cast/sender/external_video_encoder.h (renamed from media/cast/video_sender/external_video_encoder.h)8
-rw-r--r--media/cast/sender/external_video_encoder_unittest.cc (renamed from media/cast/video_sender/external_video_encoder_unittest.cc)14
-rw-r--r--media/cast/sender/fake_software_video_encoder.cc (renamed from media/cast/video_sender/fake_software_video_encoder.cc)14
-rw-r--r--media/cast/sender/fake_software_video_encoder.h (renamed from media/cast/video_sender/fake_software_video_encoder.h)10
-rw-r--r--media/cast/sender/rtp_timestamp_helper.cc (renamed from media/cast/rtp_timestamp_helper.cc)2
-rw-r--r--media/cast/sender/rtp_timestamp_helper.h (renamed from media/cast/rtp_timestamp_helper.h)6
-rw-r--r--media/cast/sender/software_video_encoder.h (renamed from media/cast/video_sender/software_video_encoder.h)10
-rw-r--r--media/cast/sender/video_encoder.h (renamed from media/cast/video_sender/video_encoder.h)10
-rw-r--r--media/cast/sender/video_encoder_impl.cc (renamed from media/cast/video_sender/video_encoder_impl.cc)19
-rw-r--r--media/cast/sender/video_encoder_impl.h (renamed from media/cast/video_sender/video_encoder_impl.h)13
-rw-r--r--media/cast/sender/video_encoder_impl_unittest.cc (renamed from media/cast/video_sender/video_encoder_impl_unittest.cc)14
-rw-r--r--media/cast/sender/video_sender.cc (renamed from media/cast/video_sender/video_sender.cc)35
-rw-r--r--media/cast/sender/video_sender.h (renamed from media/cast/video_sender/video_sender.h)25
-rw-r--r--media/cast/sender/video_sender_unittest.cc (renamed from media/cast/video_sender/video_sender_unittest.cc)34
-rw-r--r--media/cast/sender/vp8_encoder.cc (renamed from media/cast/video_sender/codecs/vp8/vp8_encoder.cc)16
-rw-r--r--media/cast/sender/vp8_encoder.h (renamed from media/cast/video_sender/codecs/vp8/vp8_encoder.h)12
-rw-r--r--media/cast/test/cast_benchmarks.cc147
-rw-r--r--media/cast/test/end2end_unittest.cc123
-rw-r--r--media/cast/test/fake_media_source.cc594
-rw-r--r--media/cast/test/fake_media_source.h152
-rw-r--r--media/cast/test/loopback_transport.cc68
-rw-r--r--media/cast/test/loopback_transport.h55
-rw-r--r--media/cast/test/proto/BUILD.gn14
-rw-r--r--media/cast/test/proto/network_simulation_model.proto27
-rw-r--r--media/cast/test/receiver.cc2
-rw-r--r--media/cast/test/sender.cc708
-rw-r--r--media/cast/test/simulator.cc445
-rw-r--r--media/cast/test/utility/default_config.cc43
-rw-r--r--media/cast/test/utility/default_config.h9
-rw-r--r--media/cast/test/utility/in_process_receiver.cc10
-rw-r--r--media/cast/test/utility/in_process_receiver.h10
-rw-r--r--media/cast/test/utility/udp_proxy.cc238
-rw-r--r--media/cast/test/utility/udp_proxy.h64
-rw-r--r--media/cast/test/utility/udp_proxy_main.cc2
-rw-r--r--media/cdm/ppapi/cdm_adapter.cc77
-rw-r--r--media/cdm/ppapi/cdm_adapter.h18
-rw-r--r--media/cdm/ppapi/cdm_helpers.cc58
-rw-r--r--media/cdm/ppapi/cdm_helpers.h51
-rw-r--r--media/ffmpeg/ffmpeg_common.cc12
-rw-r--r--media/ffmpeg/ffmpeg_common.h6
-rw-r--r--media/ffmpeg/ffmpeg_common_unittest.cc65
-rw-r--r--media/filters/audio_clock.cc20
-rw-r--r--media/filters/audio_clock.h6
-rw-r--r--media/filters/audio_clock_unittest.cc50
-rw-r--r--media/filters/audio_decoder_selector_unittest.cc61
-rw-r--r--media/filters/audio_decoder_unittest.cc22
-rw-r--r--media/filters/audio_file_reader.cc24
-rw-r--r--media/filters/audio_file_reader.h1
-rw-r--r--media/filters/audio_file_reader_unittest.cc4
-rw-r--r--media/filters/audio_renderer_impl.cc215
-rw-r--r--media/filters/audio_renderer_impl.h79
-rw-r--r--media/filters/audio_renderer_impl_unittest.cc436
-rw-r--r--media/filters/chunk_demuxer.cc17
-rw-r--r--media/filters/chunk_demuxer.h1
-rw-r--r--media/filters/chunk_demuxer_unittest.cc394
-rw-r--r--media/filters/decoder_selector.cc42
-rw-r--r--media/filters/decoder_selector.h7
-rw-r--r--media/filters/decoder_stream.cc121
-rw-r--r--media/filters/decoder_stream.h17
-rw-r--r--media/filters/decrypting_audio_decoder.cc21
-rw-r--r--media/filters/decrypting_audio_decoder.h4
-rw-r--r--media/filters/decrypting_audio_decoder_unittest.cc6
-rw-r--r--media/filters/decrypting_demuxer_stream.cc53
-rw-r--r--media/filters/decrypting_demuxer_stream.h13
-rw-r--r--media/filters/decrypting_demuxer_stream_unittest.cc79
-rw-r--r--media/filters/decrypting_video_decoder.cc23
-rw-r--r--media/filters/decrypting_video_decoder.h5
-rw-r--r--media/filters/decrypting_video_decoder_unittest.cc72
-rw-r--r--media/filters/fake_demuxer_stream.cc4
-rw-r--r--media/filters/fake_demuxer_stream.h1
-rw-r--r--media/filters/fake_video_decoder.cc36
-rw-r--r--media/filters/fake_video_decoder.h1
-rw-r--r--media/filters/fake_video_decoder_unittest.cc26
-rw-r--r--media/filters/ffmpeg_audio_decoder.cc20
-rw-r--r--media/filters/ffmpeg_audio_decoder.h1
-rw-r--r--media/filters/ffmpeg_demuxer.cc135
-rw-r--r--media/filters/ffmpeg_demuxer.h19
-rw-r--r--media/filters/ffmpeg_demuxer_unittest.cc114
-rw-r--r--media/filters/ffmpeg_video_decoder.cc15
-rw-r--r--media/filters/ffmpeg_video_decoder.h1
-rw-r--r--media/filters/ffmpeg_video_decoder_unittest.cc24
-rw-r--r--media/filters/frame_processor.cc359
-rw-r--r--media/filters/frame_processor.h139
-rw-r--r--media/filters/frame_processor_base.cc214
-rw-r--r--media/filters/frame_processor_base.h234
-rw-r--r--media/filters/gpu_video_decoder.cc19
-rw-r--r--media/filters/gpu_video_decoder.h1
-rw-r--r--media/filters/h264_bitstream_buffer.cc152
-rw-r--r--media/filters/h264_bitstream_buffer.h120
-rw-r--r--media/filters/h264_bitstream_buffer_unittest.cc56
-rw-r--r--media/filters/h264_parser.cc6
-rw-r--r--media/filters/h264_parser.h39
-rw-r--r--media/filters/opus_audio_decoder.cc9
-rw-r--r--media/filters/opus_audio_decoder.h1
-rw-r--r--media/filters/pipeline_integration_test.cc78
-rw-r--r--media/filters/pipeline_integration_test_base.cc29
-rw-r--r--media/filters/pipeline_integration_test_base.h2
-rw-r--r--media/filters/skcanvas_video_renderer.cc10
-rw-r--r--media/filters/skcanvas_video_renderer_unittest.cc10
-rw-r--r--media/filters/source_buffer_platform.cc14
-rw-r--r--media/filters/source_buffer_platform.h18
-rw-r--r--media/filters/source_buffer_stream.cc17
-rw-r--r--media/filters/source_buffer_stream_unittest.cc226
-rw-r--r--media/filters/video_decoder_selector_unittest.cc61
-rw-r--r--media/filters/video_frame_stream_unittest.cc94
-rw-r--r--media/filters/video_renderer_impl.cc249
-rw-r--r--media/filters/video_renderer_impl.h74
-rw-r--r--media/filters/video_renderer_impl_unittest.cc248
-rw-r--r--media/filters/vpx_video_decoder.cc8
-rw-r--r--media/filters/vpx_video_decoder.h1
-rw-r--r--media/formats/mp2t/es_adapter_video.cc190
-rw-r--r--media/formats/mp2t/es_adapter_video.h98
-rw-r--r--media/formats/mp2t/es_adapter_video_unittest.cc148
-rw-r--r--media/formats/mp2t/es_parser_adts.cc145
-rw-r--r--media/formats/mp2t/es_parser_adts.h21
-rw-r--r--media/formats/mp2t/es_parser_h264.cc19
-rw-r--r--media/formats/mp2t/es_parser_h264.h7
-rw-r--r--media/formats/mp2t/mp2t_stream_parser.cc106
-rw-r--r--media/formats/mp2t/mp2t_stream_parser.h12
-rw-r--r--media/formats/mp2t/mp2t_stream_parser_unittest.cc128
-rw-r--r--media/formats/mp2t/ts_packet.cc7
-rw-r--r--media/formats/mp4/mp4_stream_parser.cc9
-rw-r--r--media/formats/mp4/mp4_stream_parser.h8
-rw-r--r--media/formats/mp4/track_run_iterator.cc8
-rw-r--r--media/formats/webm/webm_audio_client.cc5
-rw-r--r--media/formats/webm/webm_video_client.cc8
-rw-r--r--media/media.gyp45
-rw-r--r--media/media.target.darwin-arm.mk32
-rw-r--r--media/media.target.darwin-arm64.mk32
-rw-r--r--media/media.target.darwin-mips.mk32
-rw-r--r--media/media.target.darwin-x86.mk32
-rw-r--r--media/media.target.darwin-x86_64.mk32
-rw-r--r--media/media.target.linux-arm.mk32
-rw-r--r--media/media.target.linux-arm64.mk32
-rw-r--r--media/media.target.linux-mips.mk32
-rw-r--r--media/media.target.linux-x86.mk32
-rw-r--r--media/media.target.linux-x86_64.mk32
-rw-r--r--media/media_android_imageformat_list.target.darwin-arm.mk10
-rw-r--r--media/media_android_imageformat_list.target.darwin-arm64.mk10
-rw-r--r--media/media_android_imageformat_list.target.darwin-mips.mk10
-rw-r--r--media/media_android_imageformat_list.target.darwin-x86.mk10
-rw-r--r--media/media_android_imageformat_list.target.darwin-x86_64.mk10
-rw-r--r--media/media_android_imageformat_list.target.linux-arm.mk10
-rw-r--r--media/media_android_imageformat_list.target.linux-arm64.mk10
-rw-r--r--media/media_android_imageformat_list.target.linux-mips.mk10
-rw-r--r--media/media_android_imageformat_list.target.linux-x86.mk10
-rw-r--r--media/media_android_imageformat_list.target.linux-x86_64.mk10
-rw-r--r--media/media_android_jni_headers.target.darwin-arm.mk10
-rw-r--r--media/media_android_jni_headers.target.darwin-arm64.mk10
-rw-r--r--media/media_android_jni_headers.target.darwin-mips.mk10
-rw-r--r--media/media_android_jni_headers.target.darwin-x86.mk10
-rw-r--r--media/media_android_jni_headers.target.darwin-x86_64.mk10
-rw-r--r--media/media_android_jni_headers.target.linux-arm.mk10
-rw-r--r--media/media_android_jni_headers.target.linux-arm64.mk10
-rw-r--r--media/media_android_jni_headers.target.linux-mips.mk10
-rw-r--r--media/media_android_jni_headers.target.linux-x86.mk10
-rw-r--r--media/media_android_jni_headers.target.linux-x86_64.mk10
-rw-r--r--media/media_asm.target.darwin-x86.mk10
-rw-r--r--media/media_asm.target.darwin-x86_64.mk10
-rw-r--r--media/media_asm.target.linux-x86.mk10
-rw-r--r--media/media_asm.target.linux-x86_64.mk10
-rw-r--r--media/media_mmx.target.darwin-x86.mk10
-rw-r--r--media/media_mmx.target.darwin-x86_64.mk10
-rw-r--r--media/media_mmx.target.linux-x86.mk10
-rw-r--r--media/media_mmx.target.linux-x86_64.mk10
-rw-r--r--media/media_sse2.target.darwin-x86.mk10
-rw-r--r--media/media_sse2.target.darwin-x86_64.mk10
-rw-r--r--media/media_sse2.target.linux-x86.mk10
-rw-r--r--media/media_sse2.target.linux-x86_64.mk10
-rw-r--r--media/player_android.target.darwin-arm.mk10
-rw-r--r--media/player_android.target.darwin-arm64.mk10
-rw-r--r--media/player_android.target.darwin-mips.mk10
-rw-r--r--media/player_android.target.darwin-x86.mk10
-rw-r--r--media/player_android.target.darwin-x86_64.mk10
-rw-r--r--media/player_android.target.linux-arm.mk10
-rw-r--r--media/player_android.target.linux-arm64.mk10
-rw-r--r--media/player_android.target.linux-mips.mk10
-rw-r--r--media/player_android.target.linux-x86.mk10
-rw-r--r--media/player_android.target.linux-x86_64.mk10
-rw-r--r--media/shared_memory_support.target.darwin-arm.mk10
-rw-r--r--media/shared_memory_support.target.darwin-arm64.mk10
-rw-r--r--media/shared_memory_support.target.darwin-mips.mk10
-rw-r--r--media/shared_memory_support.target.darwin-x86.mk10
-rw-r--r--media/shared_memory_support.target.darwin-x86_64.mk10
-rw-r--r--media/shared_memory_support.target.linux-arm.mk10
-rw-r--r--media/shared_memory_support.target.linux-arm64.mk10
-rw-r--r--media/shared_memory_support.target.linux-mips.mk10
-rw-r--r--media/shared_memory_support.target.linux-x86.mk10
-rw-r--r--media/shared_memory_support.target.linux-x86_64.mk10
-rw-r--r--media/test/data/README19
-rw-r--r--media/test/data/audio-start-time-only.webmbin0 -> 65536 bytes
-rw-r--r--media/test/data/bear-320x240-av_enc-a.webmbin0 -> 220572 bytes
-rw-r--r--media/test/data/bear-320x240-av_enc-v.webmbin0 -> 219816 bytes
-rw-r--r--media/test/data/bear-320x240-v-vp9_enc-v.webmbin0 -> 68099 bytes
-rw-r--r--media/test/data/bear-320x240-v_enc-v.webmbin0 -> 196175 bytes
-rw-r--r--media/test/data/bear-a_enc-a.webmbin0 -> 25422 bytes
-rw-r--r--media/test/data/bear-opus.webmbin0 -> 25717 bytes
-rw-r--r--media/test/data/bear-vp9-odd-dimensions.webmbin0 -> 31548 bytes
-rw-r--r--media/test/data/bear.flacbin46751 -> 17837 bytes
-rw-r--r--media/test/data/bear.mp4bin0 -> 41099 bytes
-rw-r--r--media/test/data/bear.webmbin0 -> 58199 bytes
-rw-r--r--media/test/data/bear_192kHz.wavbin0 -> 820418 bytes
-rw-r--r--media/test/data/bear_3kHz.wavbin0 -> 12900 bytes
-rw-r--r--media/test/data/bear_alaw.wavbin0 -> 8591 bytes
-rw-r--r--media/test/data/bear_divx_mp3.avibin0 -> 86242 bytes
-rw-r--r--media/test/data/bear_gsm_ms.wavbin0 -> 1815 bytes
-rw-r--r--media/test/data/bear_h264_aac.3gpbin0 -> 35206 bytes
-rw-r--r--media/test/data/bear_mpeg4_amrnb.3gpbin0 -> 76962 bytes
-rw-r--r--media/test/data/bear_mpeg4_mp3.avibin0 -> 86242 bytes
-rw-r--r--media/test/data/bear_mpeg4asp_mp3.avibin0 -> 72688 bytes
-rw-r--r--media/test/data/bear_mulaw.wavbin0 -> 8591 bytes
-rw-r--r--media/test/data/bear_pcm.wavbin0 -> 188460 bytes
-rw-r--r--media/test/data/bear_pcm_s16be.movbin0 -> 189077 bytes
-rw-r--r--media/test/data/bear_pcm_s24be.movbin0 -> 283285 bytes
-rw-r--r--media/test/data/bear_rotate_0.mp4bin0 -> 67279 bytes
-rw-r--r--media/test/data/bear_rotate_180.mp4bin0 -> 63080 bytes
-rw-r--r--media/test/data/bear_rotate_270.mp4bin0 -> 63080 bytes
-rw-r--r--media/test/data/bear_rotate_90.mp4bin0 -> 63080 bytes
-rw-r--r--media/test/data/bear_silent.mp4bin0 -> 30451 bytes
-rw-r--r--media/test/data/bear_silent.ogvbin0 -> 46247 bytes
-rw-r--r--media/test/data/bear_silent.webmbin0 -> 45723 bytes
-rw-r--r--media/test/data/blackwhite.pngbin0 -> 1025 bytes
-rw-r--r--media/test/data/blackwhite_yuv420p.avibin0 -> 7834 bytes
-rw-r--r--media/test/data/blackwhite_yuv420p.mp4bin0 -> 2070 bytes
-rw-r--r--media/test/data/blackwhite_yuv420p.ogvbin0 -> 4852 bytes
-rw-r--r--media/test/data/blackwhite_yuv420p.webmbin0 -> 1746 bytes
-rw-r--r--media/test/data/blackwhite_yuv422p.mp4bin0 -> 2070 bytes
-rw-r--r--media/test/data/blackwhite_yuv422p.ogvbin0 -> 4875 bytes
-rw-r--r--media/test/data/blackwhite_yuv444p.mp4bin0 -> 2095 bytes
-rw-r--r--media/test/data/blackwhite_yuv444p.ogvbin0 -> 4921 bytes
-rw-r--r--media/test/data/blackwhite_yuv444p.webmbin0 -> 949 bytes
-rw-r--r--media/test/data/blackwhite_yuvj420p.mp4bin0 -> 2107 bytes
-rw-r--r--media/test/data/frame_size_change-av_enc-v.webmbin0 -> 1187773 bytes
-rw-r--r--media/test/data/sfx-opus-441.webmbin0 -> 3559 bytes
-rw-r--r--media/test/data/sync2.ogvbin0 -> 157839 bytes
-rw-r--r--media/test/data/tulip2.webmbin0 -> 4389024 bytes
-rw-r--r--media/tools/player_x11/player_x11.cc4
-rw-r--r--media/video/capture/fake_video_capture_device.cc8
-rw-r--r--media/video/capture/fake_video_capture_device_factory.cc6
-rw-r--r--media/video/capture/file_video_capture_device_factory.cc5
-rw-r--r--media/video/capture/linux/video_capture_device_factory_linux.cc4
-rw-r--r--media/video/capture/linux/video_capture_device_linux.cc5
-rw-r--r--media/video/capture/mac/avfoundation_glue.h1
-rw-r--r--media/video/capture/mac/video_capture_device_avfoundation_mac.mm8
-rw-r--r--media/video/capture/mac/video_capture_device_factory_mac.mm37
-rw-r--r--media/video/capture/mac/video_capture_device_mac.h25
-rw-r--r--media/video/capture/mac/video_capture_device_mac.mm26
-rw-r--r--media/video/capture/mac/video_capture_device_qtkit_mac.mm12
-rw-r--r--media/video/capture/video_capture_device.cc33
-rw-r--r--media/video/capture/video_capture_device.h29
-rw-r--r--media/video/capture/video_capture_types.cc4
-rw-r--r--media/video/capture/win/sink_filter_win.cc9
-rw-r--r--media/video/capture/win/sink_filter_win.h7
-rw-r--r--media/video/capture/win/video_capture_device_factory_win.cc195
-rw-r--r--media/video/capture/win/video_capture_device_mf_win.cc71
-rw-r--r--media/video/capture/win/video_capture_device_mf_win.h3
-rw-r--r--media/video/capture/win/video_capture_device_win.cc5
-rw-r--r--media/video_capture_android_jni_headers.target.darwin-arm.mk10
-rw-r--r--media/video_capture_android_jni_headers.target.darwin-arm64.mk10
-rw-r--r--media/video_capture_android_jni_headers.target.darwin-mips.mk10
-rw-r--r--media/video_capture_android_jni_headers.target.darwin-x86.mk10
-rw-r--r--media/video_capture_android_jni_headers.target.darwin-x86_64.mk10
-rw-r--r--media/video_capture_android_jni_headers.target.linux-arm.mk10
-rw-r--r--media/video_capture_android_jni_headers.target.linux-arm64.mk10
-rw-r--r--media/video_capture_android_jni_headers.target.linux-mips.mk10
-rw-r--r--media/video_capture_android_jni_headers.target.linux-x86.mk10
-rw-r--r--media/video_capture_android_jni_headers.target.linux-x86_64.mk10
453 files changed, 10599 insertions, 6360 deletions
diff --git a/media/BUILD.gn b/media/BUILD.gn
new file mode 100644
index 0000000000..37dc76f810
--- /dev/null
+++ b/media/BUILD.gn
@@ -0,0 +1,1448 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import("//build/config/android/config.gni")
+import("//build/config/arm.gni")
+import("//build/config/ui.gni")
+import("//build/config/linux/pkg_config.gni")
+
+# These variables need to be args.
+
+# Override to dynamically link the cras (ChromeOS audio) library.
+use_cras = false
+
+# Option e.g. for Linux distributions to link pulseaudio directly
+# (DT_NEEDED) instead of using dlopen. This helps with automated
+# detection of ABI mismatches and prevents silent errors.
+#
+# TODO(ajwong): Why is this prefixed "linux_"?
+linux_link_pulseaudio = false
+
+# TODO(ajwong): Enable libvpx once that's converted.
+media_use_ffmpeg = true
+media_use_libvpx = false
+if (is_android) {
+ # Android doesn't use ffmpeg or libvpx.
+ media_use_ffmpeg = false
+ media_use_libvpx = false
+}
+
+# TODO(ajwong): how to disable embedded?
+# Original conditional: (OS=="linux" or OS=="freebsd" or OS=="solaris") and embedded!=1
+use_alsa = false
+use_pulseaudio = false
+if (is_posix && !is_android) {
+ use_alsa = true
+ if (!use_cras) {
+ use_pulseaudio = true
+ }
+}
+
+# TODO(ajwong): is_openbsd should be a platform define.
+is_openbsd = false
+
+# TODO(ajwong): This should be branding controlled?
+proprietary_codecs = false
+
+# TODO(ajwong): Where are these coming from?jk
+enable_mpeg2ts_stream_parser = false
+enable_browser_cdms = is_android
+
+# Common configuration for targets in the media directory.
+# NOT for exporting.
+config("media_config") {
+ defines = [ "MEDIA_IMPLEMENTATION" ]
+ if (cpu_arch == "arm" && arm_use_neon) {
+ defines += [ "USE_NEON" ]
+ }
+ if (!media_use_libvpx) {
+ defines += [ "MEDIA_DISABLE_LIBVPX" ]
+ }
+ if (use_pulseaudio) {
+ defines += [ "USE_PULSEAUDIO" ]
+ if (linux_link_pulseaudio) {
+ defines += [ "DLOPEN_PULSEAUDIO" ]
+ }
+ }
+ if (use_cras) {
+ defines = [ "USE_CRAS" ]
+ }
+ if (enable_mpeg2ts_stream_parser) {
+ defines += [ "ENABLE_MPEG2TS_STREAM_PARSER" ]
+ }
+}
+
+if (is_win) {
+ config("media_dependent_config") {
+ ldflags += [
+ "/DELAYLOAD:mf.dll",
+ "/DELAYLOAD:mfplat.dll",
+ "/DELAYLOAD:mfreadwrite.dll",
+ ]
+ }
+}
+
+if (!linux_link_pulseaudio) {
+ # When libpulse is not directly linked, use stubs to allow for dlopening of
+ # the binary.
+ action("pulse_generate_stubs") {
+ extra_header = "audio/pulse/pulse_stub_header.fragment"
+
+ script = "../tools/generate_stubs/generate_stubs.py"
+ sources = [ "audio/pulse/pulse.sigs" ]
+ source_prereqs = [ extra_header ]
+ stubs_filename_root = "pulse_stubs"
+
+ # TODO(ajwong): these need to be included in the pulse build.
+ outputs = [
+ "$target_gen_dir/audio/pulse/$stubs_filename_root.cc",
+ "$target_gen_dir/audio/pulse/$stubs_filename_root.h",
+ ]
+ args = [
+ "-i", rebase_path("$target_gen_dir/audio/pulse", root_build_dir),
+ "-o", rebase_path("$target_gen_dir/audio/pulse", root_build_dir),
+ "-t", "posix_stubs",
+ "-e", rebase_path(extra_header, root_build_dir),
+ "-s", stubs_filename_root,
+ "-p", "media/audio/pulse",
+ ]
+
+ args += rebase_path(sources, root_build_dir)
+ }
+}
+
+component("media") {
+ sources = [
+ "audio/agc_audio_stream.h",
+ "audio/audio_buffers_state.cc",
+ "audio/audio_buffers_state.h",
+ "audio/audio_device_name.cc",
+ "audio/audio_device_name.h",
+ "audio/audio_device_thread.cc",
+ "audio/audio_device_thread.h",
+ "audio/audio_input_controller.cc",
+ "audio/audio_input_controller.h",
+ "audio/audio_input_device.cc",
+ "audio/audio_input_device.h",
+ "audio/audio_input_ipc.cc",
+ "audio/audio_input_ipc.h",
+ "audio/audio_io.h",
+ "audio/audio_manager.cc",
+ "audio/audio_manager.h",
+ "audio/audio_manager_base.cc",
+ "audio/audio_manager_base.h",
+ "audio/audio_output_controller.cc",
+ "audio/audio_output_controller.h",
+ "audio/audio_output_device.cc",
+ "audio/audio_output_device.h",
+ "audio/audio_output_dispatcher.cc",
+ "audio/audio_output_dispatcher.h",
+ "audio/audio_output_dispatcher_impl.cc",
+ "audio/audio_output_dispatcher_impl.h",
+ "audio/audio_output_ipc.cc",
+ "audio/audio_output_ipc.h",
+ "audio/audio_output_proxy.cc",
+ "audio/audio_output_proxy.h",
+ "audio/audio_output_resampler.cc",
+ "audio/audio_output_resampler.h",
+ "audio/audio_power_monitor.cc",
+ "audio/audio_power_monitor.h",
+ "audio/audio_source_diverter.h",
+ "audio/clockless_audio_sink.cc",
+ "audio/clockless_audio_sink.h",
+ "audio/fake_audio_consumer.cc",
+ "audio/fake_audio_consumer.h",
+ "audio/fake_audio_input_stream.cc",
+ "audio/fake_audio_input_stream.h",
+ "audio/fake_audio_log_factory.h",
+ "audio/fake_audio_log_factory.cc",
+ "audio/fake_audio_manager.cc",
+ "audio/fake_audio_manager.h",
+ "audio/fake_audio_output_stream.cc",
+ "audio/fake_audio_output_stream.h",
+ "audio/linux/audio_manager_linux.cc",
+ "audio/mac/audio_auhal_mac.cc",
+ "audio/mac/audio_auhal_mac.h",
+ "audio/mac/audio_device_listener_mac.cc",
+ "audio/mac/audio_device_listener_mac.h",
+ "audio/mac/audio_input_mac.cc",
+ "audio/mac/audio_input_mac.h",
+ "audio/mac/audio_low_latency_input_mac.cc",
+ "audio/mac/audio_low_latency_input_mac.h",
+ "audio/mac/audio_manager_mac.cc",
+ "audio/mac/audio_manager_mac.h",
+ "audio/null_audio_sink.cc",
+ "audio/null_audio_sink.h",
+ "audio/sample_rates.cc",
+ "audio/sample_rates.h",
+ "audio/scoped_task_runner_observer.cc",
+ "audio/scoped_task_runner_observer.h",
+ "audio/simple_sources.cc",
+ "audio/simple_sources.h",
+ "audio/sounds/audio_stream_handler.cc",
+ "audio/sounds/audio_stream_handler.h",
+ "audio/sounds/sounds_manager.cc",
+ "audio/sounds/sounds_manager.h",
+ "audio/sounds/wav_audio_handler.cc",
+ "audio/sounds/wav_audio_handler.h",
+ "audio/virtual_audio_input_stream.cc",
+ "audio/virtual_audio_input_stream.h",
+ "audio/virtual_audio_output_stream.cc",
+ "audio/virtual_audio_output_stream.h",
+ "audio/win/audio_device_listener_win.cc",
+ "audio/win/audio_device_listener_win.h",
+ "audio/win/audio_low_latency_input_win.cc",
+ "audio/win/audio_low_latency_input_win.h",
+ "audio/win/audio_low_latency_output_win.cc",
+ "audio/win/audio_low_latency_output_win.h",
+ "audio/win/audio_manager_win.cc",
+ "audio/win/audio_manager_win.h",
+ "audio/win/avrt_wrapper_win.cc",
+ "audio/win/avrt_wrapper_win.h",
+ "audio/win/core_audio_util_win.cc",
+ "audio/win/core_audio_util_win.h",
+ "audio/win/device_enumeration_win.cc",
+ "audio/win/device_enumeration_win.h",
+ "audio/win/wavein_input_win.cc",
+ "audio/win/wavein_input_win.h",
+ "audio/win/waveout_output_win.cc",
+ "audio/win/waveout_output_win.h",
+ "base/audio_buffer.cc",
+ "base/audio_buffer.h",
+ "base/audio_buffer_queue.cc",
+ "base/audio_buffer_queue.h",
+ "base/audio_capturer_source.h",
+ "base/audio_buffer_converter.cc",
+ "base/audio_buffer_converter.h",
+ "base/audio_converter.cc",
+ "base/audio_converter.h",
+ "base/audio_decoder.cc",
+ "base/audio_decoder.h",
+ "base/audio_decoder_config.cc",
+ "base/audio_decoder_config.h",
+ "base/audio_discard_helper.cc",
+ "base/audio_discard_helper.h",
+ "base/audio_fifo.cc",
+ "base/audio_fifo.h",
+ "base/audio_hardware_config.cc",
+ "base/audio_hardware_config.h",
+ "base/audio_hash.cc",
+ "base/audio_hash.h",
+ "base/audio_pull_fifo.cc",
+ "base/audio_pull_fifo.h",
+ "base/audio_renderer.cc",
+ "base/audio_renderer.h",
+ "base/audio_renderer_mixer.cc",
+ "base/audio_renderer_mixer.h",
+ "base/audio_renderer_mixer_input.cc",
+ "base/audio_renderer_mixer_input.h",
+ "base/audio_renderer_sink.h",
+ "base/audio_splicer.cc",
+ "base/audio_splicer.h",
+ "base/audio_timestamp_helper.cc",
+ "base/audio_timestamp_helper.h",
+ "base/bind_to_current_loop.h",
+ "base/bit_reader.cc",
+ "base/bit_reader.h",
+ "base/bit_reader_core.cc",
+ "base/bit_reader_core.h",
+ "base/bitstream_buffer.h",
+ "base/buffering_state.h",
+ "base/buffers.h",
+ "base/byte_queue.cc",
+ "base/byte_queue.h",
+ "base/cdm_promise.cc",
+ "base/cdm_promise.h",
+ "base/channel_mixer.cc",
+ "base/channel_mixer.h",
+ "base/clock.h",
+ "base/data_buffer.cc",
+ "base/data_buffer.h",
+ "base/data_source.cc",
+ "base/data_source.h",
+ "base/decoder_buffer.cc",
+ "base/decoder_buffer.h",
+ "base/decoder_buffer_queue.cc",
+ "base/decoder_buffer_queue.h",
+ "base/decrypt_config.cc",
+ "base/decrypt_config.h",
+ "base/decryptor.cc",
+ "base/decryptor.h",
+ "base/demuxer.cc",
+ "base/demuxer.h",
+ "base/demuxer_stream.cc",
+ "base/demuxer_stream.h",
+ "base/djb2.cc",
+ "base/djb2.h",
+ "base/filter_collection.cc",
+ "base/filter_collection.h",
+ "base/media.cc",
+ "base/media.h",
+ "base/media_keys.cc",
+ "base/media_keys.h",
+ "base/media_log.cc",
+ "base/media_log.h",
+ "base/media_log_event.h",
+ "base/media_switches.cc",
+ "base/media_switches.h",
+ "base/media_win.cc",
+ "base/multi_channel_resampler.cc",
+ "base/multi_channel_resampler.h",
+ "base/pipeline.cc",
+ "base/pipeline.h",
+ "base/pipeline_status.h",
+ "base/player_tracker.cc",
+ "base/player_tracker.h",
+ "base/ranges.cc",
+ "base/ranges.h",
+ "base/sample_format.cc",
+ "base/sample_format.h",
+ "base/scoped_histogram_timer.h",
+ "base/seekable_buffer.cc",
+ "base/seekable_buffer.h",
+ "base/serial_runner.cc",
+ "base/serial_runner.h",
+ "base/simd/convert_rgb_to_yuv.h",
+ "base/simd/convert_rgb_to_yuv_c.cc",
+ "base/simd/convert_yuv_to_rgb.h",
+ "base/simd/convert_yuv_to_rgb_c.cc",
+ "base/simd/filter_yuv.h",
+ "base/simd/filter_yuv_c.cc",
+ "base/simd/yuv_to_rgb_table.cc",
+ "base/simd/yuv_to_rgb_table.h",
+ "base/sinc_resampler.cc",
+ "base/sinc_resampler.h",
+ "base/stream_parser.cc",
+ "base/stream_parser.h",
+ "base/stream_parser_buffer.cc",
+ "base/stream_parser_buffer.h",
+ "base/text_cue.cc",
+ "base/text_cue.h",
+ "base/text_ranges.cc",
+ "base/text_ranges.h",
+ "base/text_renderer.cc",
+ "base/text_renderer.h",
+ "base/text_track.h",
+ "base/text_track_config.cc",
+ "base/text_track_config.h",
+ "base/time_delta_interpolator.cc",
+ "base/time_delta_interpolator.h",
+ "base/user_input_monitor.cc",
+ "base/user_input_monitor.h",
+ "base/user_input_monitor_mac.cc",
+ "base/user_input_monitor_win.cc",
+ "base/video_decoder.cc",
+ "base/video_decoder.h",
+ "base/video_decoder_config.cc",
+ "base/video_decoder_config.h",
+ "base/video_frame.cc",
+ "base/video_frame.h",
+ "base/video_frame_pool.cc",
+ "base/video_frame_pool.h",
+ "base/video_renderer.cc",
+ "base/video_renderer.h",
+ "base/video_rotation.h",
+ "base/video_util.cc",
+ "base/video_util.h",
+ "base/yuv_convert.cc",
+ "base/yuv_convert.h",
+ "cdm/aes_decryptor.cc",
+ "cdm/aes_decryptor.h",
+ "cdm/json_web_key.cc",
+ "cdm/json_web_key.h",
+ "cdm/key_system_names.cc",
+ "cdm/key_system_names.h",
+ "cdm/player_tracker_impl.cc",
+ "cdm/player_tracker_impl.h",
+ "ffmpeg/ffmpeg_deleters.h",
+ "filters/audio_clock.cc",
+ "filters/audio_clock.h",
+ "filters/audio_renderer_algorithm.cc",
+ "filters/audio_renderer_algorithm.h",
+ "filters/audio_renderer_impl.cc",
+ "filters/audio_renderer_impl.h",
+ "filters/chunk_demuxer.cc",
+ "filters/chunk_demuxer.h",
+ "filters/decoder_selector.cc",
+ "filters/decoder_selector.h",
+ "filters/decoder_stream.cc",
+ "filters/decoder_stream.h",
+ "filters/decoder_stream_traits.cc",
+ "filters/decoder_stream_traits.h",
+ "filters/decrypting_audio_decoder.cc",
+ "filters/decrypting_audio_decoder.h",
+ "filters/decrypting_demuxer_stream.cc",
+ "filters/decrypting_demuxer_stream.h",
+ "filters/decrypting_video_decoder.cc",
+ "filters/decrypting_video_decoder.h",
+ "filters/file_data_source.cc",
+ "filters/file_data_source.h",
+ "filters/frame_processor.cc",
+ "filters/frame_processor.h",
+ "filters/gpu_video_accelerator_factories.cc",
+ "filters/gpu_video_accelerator_factories.h",
+ "filters/gpu_video_decoder.cc",
+ "filters/gpu_video_decoder.h",
+ "filters/h264_bit_reader.cc",
+ "filters/h264_bit_reader.h",
+ "filters/h264_parser.cc",
+ "filters/h264_parser.h",
+ "filters/skcanvas_video_renderer.cc",
+ "filters/skcanvas_video_renderer.h",
+ "filters/source_buffer_platform.cc",
+ "filters/source_buffer_platform.h",
+ "filters/source_buffer_stream.cc",
+ "filters/source_buffer_stream.h",
+ "filters/stream_parser_factory.cc",
+ "filters/stream_parser_factory.h",
+ "filters/video_frame_scheduler.h",
+ "filters/video_frame_scheduler_impl.cc",
+ "filters/video_frame_scheduler_impl.h",
+ "filters/video_frame_scheduler_proxy.cc",
+ "filters/video_frame_scheduler_proxy.h",
+ "filters/video_renderer_impl.cc",
+ "filters/video_renderer_impl.h",
+ "filters/webvtt_util.h",
+ "filters/wsola_internals.cc",
+ "filters/wsola_internals.h",
+ "midi/midi_manager.cc",
+ "midi/midi_manager.h",
+ "midi/midi_manager_mac.cc",
+ "midi/midi_manager_mac.h",
+ "midi/midi_manager_usb.cc",
+ "midi/midi_manager_usb.h",
+ "midi/midi_manager_win.cc",
+ "midi/midi_manager_win.h",
+ "midi/midi_message_queue.cc",
+ "midi/midi_message_queue.h",
+ "midi/midi_message_util.cc",
+ "midi/midi_message_util.h",
+ "midi/midi_port_info.cc",
+ "midi/midi_port_info.h",
+ "midi/usb_midi_descriptor_parser.cc",
+ "midi/usb_midi_descriptor_parser.h",
+ "midi/usb_midi_device.h",
+ "midi/usb_midi_input_stream.cc",
+ "midi/usb_midi_input_stream.h",
+ "midi/usb_midi_jack.h",
+ "midi/usb_midi_output_stream.cc",
+ "midi/usb_midi_output_stream.h",
+ "video/capture/fake_video_capture_device.cc",
+ "video/capture/fake_video_capture_device.h",
+ "video/capture/fake_video_capture_device_factory.h",
+ "video/capture/fake_video_capture_device_factory.cc",
+ "video/capture/file_video_capture_device.cc",
+ "video/capture/file_video_capture_device.h",
+ "video/capture/file_video_capture_device_factory.h",
+ "video/capture/file_video_capture_device_factory.cc",
+ "video/capture/linux/video_capture_device_factory_linux.cc",
+ "video/capture/linux/video_capture_device_factory_linux.h",
+ "video/capture/linux/video_capture_device_linux.cc",
+ "video/capture/linux/video_capture_device_linux.h",
+ "video/capture/linux/video_capture_device_chromeos.cc",
+ "video/capture/linux/video_capture_device_chromeos.h",
+ "video/capture/mac/avfoundation_glue.h",
+ "video/capture/mac/avfoundation_glue.mm",
+ "video/capture/mac/coremedia_glue.h",
+ "video/capture/mac/coremedia_glue.mm",
+ "video/capture/mac/platform_video_capturing_mac.h",
+ "video/capture/mac/video_capture_device_avfoundation_mac.h",
+ "video/capture/mac/video_capture_device_avfoundation_mac.mm",
+ "video/capture/mac/video_capture_device_factory_mac.h",
+ "video/capture/mac/video_capture_device_factory_mac.mm",
+ "video/capture/mac/video_capture_device_mac.h",
+ "video/capture/mac/video_capture_device_mac.mm",
+ "video/capture/mac/video_capture_device_qtkit_mac.h",
+ "video/capture/mac/video_capture_device_qtkit_mac.mm",
+ "video/capture/video_capture_device.cc",
+ "video/capture/video_capture_device.h",
+ "video/capture/video_capture_device_factory.cc",
+ "video/capture/video_capture_device_factory.h",
+ "video/capture/video_capture_types.cc",
+ "video/capture/video_capture_types.h",
+ "video/capture/win/capability_list_win.cc",
+ "video/capture/win/capability_list_win.h",
+ "video/capture/win/filter_base_win.cc",
+ "video/capture/win/filter_base_win.h",
+ "video/capture/win/pin_base_win.cc",
+ "video/capture/win/pin_base_win.h",
+ "video/capture/win/sink_filter_observer_win.h",
+ "video/capture/win/sink_filter_win.cc",
+ "video/capture/win/sink_filter_win.h",
+ "video/capture/win/sink_input_pin_win.cc",
+ "video/capture/win/sink_input_pin_win.h",
+ "video/capture/win/video_capture_device_factory_win.cc",
+ "video/capture/win/video_capture_device_factory_win.h",
+ "video/capture/win/video_capture_device_mf_win.cc",
+ "video/capture/win/video_capture_device_mf_win.h",
+ "video/capture/win/video_capture_device_win.cc",
+ "video/capture/win/video_capture_device_win.h",
+ "video/picture.cc",
+ "video/picture.h",
+ "video/video_decode_accelerator.cc",
+ "video/video_decode_accelerator.h",
+ "video/video_encode_accelerator.cc",
+ "video/video_encode_accelerator.h",
+ "formats/common/offset_byte_queue.cc",
+ "formats/common/offset_byte_queue.h",
+ "formats/webm/webm_audio_client.cc",
+ "formats/webm/webm_audio_client.h",
+ "formats/webm/webm_cluster_parser.cc",
+ "formats/webm/webm_cluster_parser.h",
+ "formats/webm/webm_constants.cc",
+ "formats/webm/webm_constants.h",
+ "formats/webm/webm_content_encodings.cc",
+ "formats/webm/webm_content_encodings.h",
+ "formats/webm/webm_content_encodings_client.cc",
+ "formats/webm/webm_content_encodings_client.h",
+ "formats/webm/webm_crypto_helpers.cc",
+ "formats/webm/webm_crypto_helpers.h",
+ "formats/webm/webm_info_parser.cc",
+ "formats/webm/webm_info_parser.h",
+ "formats/webm/webm_parser.cc",
+ "formats/webm/webm_parser.h",
+ "formats/webm/webm_stream_parser.cc",
+ "formats/webm/webm_stream_parser.h",
+ "formats/webm/webm_tracks_parser.cc",
+ "formats/webm/webm_tracks_parser.h",
+ "formats/webm/webm_video_client.cc",
+ "formats/webm/webm_video_client.h",
+ "formats/webm/webm_webvtt_parser.cc",
+ "formats/webm/webm_webvtt_parser.h",
+ ]
+
+ configs += [ ":media_config", ]
+
+ libs = []
+ defines = []
+ deps = []
+
+ include_dirs = [ "." ]
+ if (media_use_ffmpeg) {
+ deps += [ "//third_party/ffmpeg" ]
+ sources += [
+ "base/audio_video_metadata_extractor.cc",
+ "base/audio_video_metadata_extractor.h",
+ "base/container_names.cc",
+ "base/container_names.h",
+ "base/media_file_checker.cc",
+ "base/media_file_checker.h",
+ "base/media_posix.cc",
+ "ffmpeg/ffmpeg_common.cc",
+ "ffmpeg/ffmpeg_common.h",
+ "filters/audio_file_reader.cc",
+ "filters/audio_file_reader.h",
+ "filters/blocking_url_protocol.cc",
+ "filters/blocking_url_protocol.h",
+ "filters/ffmpeg_audio_decoder.cc",
+ "filters/ffmpeg_audio_decoder.h",
+ "filters/ffmpeg_demuxer.cc",
+ "filters/ffmpeg_demuxer.h",
+ "filters/ffmpeg_glue.cc",
+ "filters/ffmpeg_glue.h",
+ "filters/ffmpeg_video_decoder.cc",
+ "filters/ffmpeg_video_decoder.h",
+ "filters/in_memory_url_protocol.cc",
+ "filters/in_memory_url_protocol.h",
+ ]
+ if (proprietary_codecs) {
+ sources += [
+ "filters/ffmpeg_h264_to_annex_b_bitstream_converter.cc",
+ "filters/ffmpeg_h264_to_annex_b_bitstream_converter.h",
+ ]
+ }
+ }
+
+ if (cpu_arch == "arm" && arm_use_neon) {
+ defines += [ "USE_NEON" ]
+ }
+
+ if (media_use_libvpx) {
+ sources += [
+ "filters/vpx_video_decoder.cc",
+ "filters/vpx_video_decoder.h",
+ ]
+ deps += [ "//third_party/libvpx" ]
+ }
+
+ if (enable_browser_cdms) {
+ sources += [
+ "base/browser_cdm.cc",
+ "base/browser_cdm.h",
+ "base/browser_cdm_factory.h",
+ ]
+ }
+
+ if (!is_android) {
+ sources += [
+ "filters/opus_audio_decoder.cc",
+ "filters/opus_audio_decoder.h",
+ ]
+ } else {
+ sources += [
+ "audio/android/audio_manager_android.cc",
+ "audio/android/audio_manager_android.h",
+ "audio/android/audio_record_input.cc",
+ "audio/android/audio_record_input.h",
+ "audio/android/opensles_input.cc",
+ "audio/android/opensles_input.h",
+ "audio/android/opensles_output.cc",
+ "audio/android/opensles_output.h",
+ "audio/android/opensles_wrapper.cc",
+ "base/android/demuxer_android.h",
+ "base/android/demuxer_stream_player_params.cc",
+ "base/android/demuxer_stream_player_params.h",
+ "base/android/media_player_manager.h",
+ "base/android/media_resource_getter.cc",
+ "base/android/media_resource_getter.h",
+ "base/media_stub.cc",
+ "midi/midi_manager_android.cc",
+ "midi/usb_midi_device_android.cc",
+ "midi/usb_midi_device_android.h",
+ "midi/usb_midi_device_factory_android.cc",
+ "midi/usb_midi_device_factory_android.h",
+ "video/capture/android/video_capture_device_android.cc",
+ "video/capture/android/video_capture_device_android.h",
+ "video/capture/android/video_capture_device_factory_android.cc",
+ "video/capture/android/video_capture_device_factory_android.h",
+ ]
+ defines += [ "DISABLE_USER_INPUT_MONITOR" ]
+ deps += [
+ ":media_android_jni_headers",
+ ":player_android",
+ ":video_capture_android_jni_headers",
+ ]
+ if (!is_android_webview_build) {
+ deps += [ ":media_java" ]
+ }
+ }
+
+ if (is_chromeos) {
+ # A simple WebM encoder for animated avatars on ChromeOS.
+ sources += [
+ "formats/webm/chromeos/ebml_writer.cc",
+ "formats/webm/chromeos/ebml_writer.h",
+ "formats/webm/chromeos/webm_encoder.cc",
+ "formats/webm/chromeos/webm_encoder.h",
+ ]
+ deps += [
+ "//third_party/libvpx",
+ "//third_party/libyuv"
+ ]
+ # For VaapiVideoEncodeAccelerator.
+ if (cpu_arch != "arm" && use_x11) {
+ sources += [
+ "filters/h264_bitstream_buffer.cc",
+ "filters/h264_bitstream_buffer.h",
+ ]
+ }
+ }
+
+ if (!is_ios) {
+ deps += [ "//third_party/libyuv" ]
+ }
+
+ if (use_alsa) {
+ libs += [ "asound" ]
+ defines += [ "USE_ALSA" ]
+ sources += [
+ "audio/alsa/alsa_input.cc",
+ "audio/alsa/alsa_input.h",
+ "audio/alsa/alsa_output.cc",
+ "audio/alsa/alsa_output.h",
+ "audio/alsa/alsa_util.cc",
+ "audio/alsa/alsa_util.h",
+ "audio/alsa/alsa_wrapper.cc",
+ "audio/alsa/alsa_wrapper.h",
+ "audio/alsa/audio_manager_alsa.cc",
+ "audio/alsa/audio_manager_alsa.h",
+ "midi/midi_manager_alsa.cc",
+ "midi/midi_manager_alsa.h",
+ ]
+ }
+
+ if (is_openbsd) {
+ sources += [
+ "audio/openbsd/audio_manager_openbsd.cc",
+ "audio/openbsd/audio_manager_openbsd.h",
+ ]
+ }
+
+ # A simple WebM encoder for animated avatars on ChromeOS.
+ if (is_linux) {
+ if (use_x11) {
+ configs += [
+ "//build/config/linux:x11",
+ "//build/config/linux:xext",
+# TODO(ajwong): Why does xent get a separate thing in //build/config/linux:BUILD.gn
+# "//build/config/linux:xdamage",
+# "//build/config/linux:xfixes",
+# "//build/config/linux:xtst",
+ ]
+ sources += [
+ "base/user_input_monitor_linux.cc"
+ ]
+ } else {
+ defines += [ "DISABLE_USER_INPUT_MONITOR" ]
+ }
+
+ if (use_cras) {
+ pkg_config("libcras") {
+ packages = [ "libcras" ]
+ }
+ configs += [ "libcras" ]
+ sources += [
+ "audio/cras/audio_manager_cras.cc",
+ "audio/cras/audio_manager_cras.h",
+ "audio/cras/cras_input.cc",
+ "audio/cras/cras_input.h",
+ "audio/cras/cras_unified.cc",
+ "audio/cras/cras_unified.h",
+ ]
+ }
+
+ }
+
+ if (use_ozone) {
+ platform_list_txt_file = "$target_gen_dir/ui/ozone/platform_list.txt"
+ constructor_list_cc_file = "$target_gen_dir/media/ozone/constructor_list.cc"
+
+ # Used for the generated listing header (ui/ozone/platform_list.h)
+ include_dirs += [ target_gen_dir ]
+
+ sources += [
+ constructor_list_cc_file,
+ "ozone/media_ozone_platform.cc",
+ "ozone/media_ozone_platform.h",
+ ]
+
+ deps += [ "//ui/ozone/ozone" ]
+
+ action("generate_constructor_list") {
+ # Ozone platform objects are auto-generated using similar
+ # patterns for naming and classes constructors. Here we build the
+ # object MediaOzonePlatform.
+ script = "../ui/ozone/generate_constructor_list.py"
+ sources = [ platform_list_txt_file ]
+ outputs = [ constructor_list_cc_file ]
+ args += [
+ "--platform_list=$platform_list_txt_file",
+ "--output_cc=$constructor_list_cc_file",
+ "--namespace=media",
+ "--typename=MediaOzonePlatform",
+ "--include=\"media/ozone/media_ozone_platform.h\""
+ ]
+ }
+ }
+
+ if (use_pulseaudio) {
+ if (linux_link_pulseaudio) {
+ pkg_config("libpulse") {
+ packages = [ "libpulse" ]
+ }
+ configs += [ ":libpulse" ]
+ } else {
+ # TODO(ajwong): Technically, this dl should go in the action.
+ libs += [ "dl" ]
+ deps += [ ":pulse_generate_stubs" ]
+ sources += get_target_outputs(":pulse_generate_stubs")
+ }
+ sources += [
+ "audio/pulse/audio_manager_pulse.cc",
+ "audio/pulse/audio_manager_pulse.h",
+ "audio/pulse/pulse_input.cc",
+ "audio/pulse/pulse_input.h",
+ "audio/pulse/pulse_output.cc",
+ "audio/pulse/pulse_output.h",
+ "audio/pulse/pulse_util.cc",
+ "audio/pulse/pulse_util.h",
+ ]
+ }
+
+ if (is_mac) {
+ libs += [
+ "AudioToolbox.framework",
+ "AudioUnit.framework",
+ "CoreAudio.framework",
+ "CoreMIDI.framework",
+ "CoreVideo.framework",
+ "OpenGL.framework",
+ "QTKit.framework",
+ ]
+ }
+
+ if (is_win) {
+ libs += [
+ "mf.lib",
+ "mfplat.lib",
+ "mfreadwrite.lib",
+ "mfuuid.lib",
+ ]
+ cflags += [
+ "/wd4267" # TODO(wolenetz): Fix size_t to int trunctaion in win64. See
+ # http://crbug.com/171009
+ ]
+ configs += [ ":media_dependent_config" ]
+ all_dependent_configs = [ ":media_dependent_config" ]
+ }
+
+ if (proprietary_codecs) {
+ sources += [
+ "formats/mp2t/es_adapter_video.cc",
+ "formats/mp2t/es_adapter_video.h",
+ "formats/mp2t/es_parser.h",
+ "formats/mp2t/es_parser_adts.cc",
+ "formats/mp2t/es_parser_adts.h",
+ "formats/mp2t/es_parser_h264.cc",
+ "formats/mp2t/es_parser_h264.h",
+ "formats/mp2t/mp2t_common.h",
+ "formats/mp2t/mp2t_stream_parser.cc",
+ "formats/mp2t/mp2t_stream_parser.h",
+ "formats/mp2t/ts_packet.cc",
+ "formats/mp2t/ts_packet.h",
+ "formats/mp2t/ts_section.h",
+ "formats/mp2t/ts_section_pat.cc",
+ "formats/mp2t/ts_section_pat.h",
+ "formats/mp2t/ts_section_pes.cc",
+ "formats/mp2t/ts_section_pes.h",
+ "formats/mp2t/ts_section_pmt.cc",
+ "formats/mp2t/ts_section_pmt.h",
+ "formats/mp2t/ts_section_psi.cc",
+ "formats/mp2t/ts_section_psi.h",
+ "formats/mp4/aac.cc",
+ "formats/mp4/aac.h",
+ "formats/mp4/avc.cc",
+ "formats/mp4/avc.h",
+ "formats/mp4/box_definitions.cc",
+ "formats/mp4/box_definitions.h",
+ "formats/mp4/box_reader.cc",
+ "formats/mp4/box_reader.h",
+ "formats/mp4/cenc.cc",
+ "formats/mp4/cenc.h",
+ "formats/mp4/es_descriptor.cc",
+ "formats/mp4/es_descriptor.h",
+ "formats/mp4/mp4_stream_parser.cc",
+ "formats/mp4/mp4_stream_parser.h",
+ "formats/mp4/sample_to_group_iterator.cc",
+ "formats/mp4/sample_to_group_iterator.h",
+ "formats/mp4/track_run_iterator.cc",
+ "formats/mp4/track_run_iterator.h",
+ "formats/mpeg/adts_constants.cc",
+ "formats/mpeg/adts_constants.h",
+ "formats/mpeg/adts_stream_parser.cc",
+ "formats/mpeg/adts_stream_parser.h",
+ "formats/mpeg/mp3_stream_parser.cc",
+ "formats/mpeg/mp3_stream_parser.h",
+ "formats/mpeg/mpeg_audio_stream_parser_base.cc",
+ "formats/mpeg/mpeg_audio_stream_parser_base.h",
+ ]
+ }
+
+ if (cpu_arch == "x86" || cpu_arch == "x64") {
+ sources += [ "base/simd/convert_yuv_to_rgb_x86.cc" ]
+ deps += [
+ ":media_yasm",
+ ":media_mmx",
+ ":media_sse2",
+ ]
+ }
+
+ if (is_linux || is_win) {
+ sources += [
+ "base/keyboard_event_counter.cc",
+ "base/keyboard_event_counter.h",
+ ]
+ }
+
+ deps += [
+ ":shared_memory_support",
+ "//base",
+ "//base:i18n",
+ "//base/third_party/dynamic_annotations",
+ "//crypto",
+ "//crypto:platform", # TODO(ajwong): This used to be provided by crypto.gyp via export_dependent_settings
+ "//gpu/command_buffer/common",
+ "//skia",
+ "//third_party/opus",
+ "//ui/events:events_base",
+ "//ui/gfx",
+ "//ui/gfx/geometry",
+ "//url",
+ ]
+}
+
+test("media_unittests") {
+ sources = [
+ "audio/android/audio_android_unittest.cc",
+ "audio/audio_input_controller_unittest.cc",
+ "audio/audio_input_unittest.cc",
+ "audio/audio_manager_unittest.cc",
+ "audio/audio_output_controller_unittest.cc",
+ "audio/audio_output_device_unittest.cc",
+ "audio/audio_output_proxy_unittest.cc",
+ "audio/audio_parameters_unittest.cc",
+ "audio/audio_power_monitor_unittest.cc",
+ "audio/fake_audio_consumer_unittest.cc",
+ "audio/mac/audio_auhal_mac_unittest.cc",
+ "audio/mac/audio_device_listener_mac_unittest.cc",
+ "audio/mac/audio_low_latency_input_mac_unittest.cc",
+ "audio/simple_sources_unittest.cc",
+ "audio/sounds/audio_stream_handler_unittest.cc",
+ "audio/sounds/sounds_manager_unittest.cc",
+ "audio/sounds/test_data.cc",
+ "audio/sounds/test_data.h",
+ "audio/sounds/wav_audio_handler_unittest.cc",
+ "audio/virtual_audio_input_stream_unittest.cc",
+ "audio/virtual_audio_output_stream_unittest.cc",
+ "audio/win/audio_device_listener_win_unittest.cc",
+ "audio/win/audio_low_latency_input_win_unittest.cc",
+ "audio/win/audio_low_latency_output_win_unittest.cc",
+ "audio/win/audio_output_win_unittest.cc",
+ "audio/win/core_audio_util_win_unittest.cc",
+ "base/android/media_codec_bridge_unittest.cc",
+ "base/android/media_drm_bridge_unittest.cc",
+ "base/android/media_source_player_unittest.cc",
+ "base/audio_buffer_converter_unittest.cc",
+ "base/audio_buffer_unittest.cc",
+ "base/audio_buffer_queue_unittest.cc",
+ "base/audio_bus_unittest.cc",
+ "base/audio_converter_unittest.cc",
+ "base/audio_discard_helper_unittest.cc",
+ "base/audio_fifo_unittest.cc",
+ "base/audio_hardware_config_unittest.cc",
+ "base/audio_hash_unittest.cc",
+ "base/audio_pull_fifo_unittest.cc",
+ "base/audio_renderer_mixer_input_unittest.cc",
+ "base/audio_renderer_mixer_unittest.cc",
+ "base/audio_splicer_unittest.cc",
+ "base/audio_timestamp_helper_unittest.cc",
+ "base/bind_to_current_loop_unittest.cc",
+ "base/bit_reader_unittest.cc",
+ "base/callback_holder.h",
+ "base/callback_holder_unittest.cc",
+ "base/channel_mixer_unittest.cc",
+ "base/data_buffer_unittest.cc",
+ "base/decoder_buffer_queue_unittest.cc",
+ "base/decoder_buffer_unittest.cc",
+ "base/djb2_unittest.cc",
+ "base/gmock_callback_support_unittest.cc",
+ "base/multi_channel_resampler_unittest.cc",
+ "base/pipeline_unittest.cc",
+ "base/ranges_unittest.cc",
+ "base/run_all_unittests.cc",
+ "base/scoped_histogram_timer_unittest.cc",
+ "base/serial_runner_unittest.cc",
+ "base/seekable_buffer_unittest.cc",
+ "base/sinc_resampler_unittest.cc",
+ "base/stream_parser_unittest.cc",
+ "base/text_ranges_unittest.cc",
+ "base/text_renderer_unittest.cc",
+ "base/user_input_monitor_unittest.cc",
+ "base/vector_math_testing.h",
+ "base/vector_math_unittest.cc",
+ "base/video_frame_unittest.cc",
+ "base/video_frame_pool_unittest.cc",
+ "base/video_util_unittest.cc",
+ "base/yuv_convert_unittest.cc",
+ "cdm/aes_decryptor_unittest.cc",
+ "cdm/json_web_key_unittest.cc",
+ "filters/audio_clock_unittest.cc",
+ "filters/audio_decoder_selector_unittest.cc",
+ "filters/audio_renderer_algorithm_unittest.cc",
+ "filters/audio_renderer_impl_unittest.cc",
+ "filters/chunk_demuxer_unittest.cc",
+ "filters/decrypting_audio_decoder_unittest.cc",
+ "filters/decrypting_demuxer_stream_unittest.cc",
+ "filters/decrypting_video_decoder_unittest.cc",
+ "filters/fake_demuxer_stream.cc",
+ "filters/fake_demuxer_stream.h",
+ "filters/fake_demuxer_stream_unittest.cc",
+ "filters/fake_video_decoder.cc",
+ "filters/fake_video_decoder.h",
+ "filters/fake_video_decoder_unittest.cc",
+ "filters/file_data_source_unittest.cc",
+ "filters/frame_processor_unittest.cc",
+ "filters/h264_bit_reader_unittest.cc",
+ "filters/h264_parser_unittest.cc",
+ "filters/skcanvas_video_renderer_unittest.cc",
+ "filters/source_buffer_stream_unittest.cc",
+ "filters/video_decoder_selector_unittest.cc",
+ "filters/video_frame_scheduler_impl_unittest.cc",
+ "filters/video_frame_scheduler_unittest.cc",
+ "filters/video_frame_stream_unittest.cc",
+ "filters/video_renderer_impl_unittest.cc",
+ "midi/midi_manager_unittest.cc",
+ "midi/midi_manager_usb_unittest.cc",
+ "midi/midi_message_queue_unittest.cc",
+ "midi/midi_message_util_unittest.cc",
+ "midi/usb_midi_descriptor_parser_unittest.cc",
+ "midi/usb_midi_input_stream_unittest.cc",
+ "midi/usb_midi_output_stream_unittest.cc",
+ "video/capture/fake_video_capture_device_unittest.cc",
+ "video/capture/video_capture_device_unittest.cc",
+ "formats/common/offset_byte_queue_unittest.cc",
+ "formats/webm/cluster_builder.cc",
+ "formats/webm/cluster_builder.h",
+ "formats/webm/tracks_builder.cc",
+ "formats/webm/tracks_builder.h",
+ "formats/webm/webm_cluster_parser_unittest.cc",
+ "formats/webm/webm_content_encodings_client_unittest.cc",
+ "formats/webm/webm_parser_unittest.cc",
+ "formats/webm/webm_tracks_parser_unittest.cc",
+ "formats/webm/webm_webvtt_parser_unittest.cc",
+ ]
+
+ if (media_use_ffmpeg) {
+ sources += [
+ "base/audio_video_metadata_extractor_unittest.cc",
+ "base/media_file_checker_unittest.cc",
+ ]
+ }
+
+ if (!is_android) {
+ sources += [
+ "audio/audio_input_volume_unittest.cc",
+ "base/container_names_unittest.cc",
+ "ffmpeg/ffmpeg_common_unittest.cc",
+ "filters/audio_decoder_unittest.cc",
+ "filters/audio_file_reader_unittest.cc",
+ "filters/blocking_url_protocol_unittest.cc",
+ "filters/ffmpeg_demuxer_unittest.cc",
+ "filters/ffmpeg_glue_unittest.cc",
+ "filters/ffmpeg_video_decoder_unittest.cc",
+ "filters/in_memory_url_protocol_unittest.cc",
+ "filters/pipeline_integration_test.cc",
+ "filters/pipeline_integration_test_base.cc",
+ ]
+ } else {
+# TODO(ajwong): Blocked on android.
+# deps += [
+# ":player_android",
+# "//testing/android:native_test_native_code"
+# ]
+ }
+
+ if (is_linux && use_cras) {
+ sources += [
+ "audio/cras/cras_input_unittest.cc",
+ "audio/cras/cras_unified_unittest.cc",
+ ]
+ }
+
+ if (cpu_arch != "arm" && is_chromeos && use_x11) {
+ sources += [ "filters/h264_bitstream_buffer_unittest.cc" ]
+ }
+
+ if (use_alsa) {
+ sources += [
+ "audio/alsa/alsa_output_unittest.cc",
+ "audio/audio_low_latency_input_output_unittest.cc",
+ ]
+ }
+
+ if (cpu_arch == "x86" || cpu_arch == "x64") {
+ sources += [ "base/simd/convert_rgb_to_yuv_unittest.cc" ]
+ }
+
+ if (proprietary_codecs) {
+ sources += [
+ "filters/ffmpeg_h264_to_annex_b_bitstream_converter_unittest.cc",
+ "filters/h264_to_annex_b_bitstream_converter_unittest.cc",
+ "formats/common/stream_parser_test_base.cc",
+ "formats/common/stream_parser_test_base.h",
+ "formats/mp2t/es_adapter_video_unittest.cc",
+ "formats/mp2t/es_parser_h264_unittest.cc",
+ "formats/mp2t/mp2t_stream_parser_unittest.cc",
+ "formats/mp4/aac_unittest.cc",
+ "formats/mp4/avc_unittest.cc",
+ "formats/mp4/box_reader_unittest.cc",
+ "formats/mp4/es_descriptor_unittest.cc",
+ "formats/mp4/mp4_stream_parser_unittest.cc",
+ "formats/mp4/sample_to_group_iterator_unittest.cc",
+ "formats/mp4/track_run_iterator_unittest.cc",
+ "formats/mpeg/adts_stream_parser_unittest.cc",
+ "formats/mpeg/mp3_stream_parser_unittest.cc",
+ ]
+ }
+
+ if (is_win && cpu_arch == "x64") {
+ cflags += [
+ "/wd4267" # TODO(wolenetz): Fix size_t to int trunctaion in win64. See
+ # http://crbug.com/171009
+ ]
+ }
+
+ if (is_mac) {
+ sources += [
+ "video/capture/mac/video_capture_device_factory_mac_unittest.mm"
+ ]
+ }
+
+# include_dirs += [
+# # Needed by media_drm_bridge.cc.
+# target_gen_dir,
+# ],
+
+ configs += [ ":media_config" ]
+
+# TODO(ajwong): This was in the original gyp, but it seems silly.
+# ['os_posix==1 and OS!="mac"', {
+# 'conditions': [
+# ['use_allocator!="none"', {
+# 'dependencies': [
+# '../base/allocator/allocator.gyp:allocator',
+# ],
+# }],
+# ],
+# }],
+ deps = [
+ ":media",
+ ":media_test_support",
+ "//base/test:test_support",
+ "//skia", # Direct dependency required to inherit config.
+ "//testing/gmock",
+ "//testing/gtest",
+ # TODO(dalecurtis): Port the rest of Widevine stuff.
+ "//third_party/widevine/cdm:version_h",
+ "//ui/base",
+ "//ui/gfx:gfx_test_support",
+ ]
+ if (media_use_ffmpeg) {
+ deps += [
+ "//third_party/ffmpeg", # Direct dependency required to inherit config.
+ ]
+ }
+}
+
+test("media_perftests") {
+ sources = [
+ "base/audio_bus_perftest.cc",
+ "base/audio_converter_perftest.cc",
+ "base/run_all_perftests.cc",
+ "base/sinc_resampler_perftest.cc",
+ "base/vector_math_perftest.cc",
+ "base/yuv_convert_perftest.cc",
+ ]
+ if (media_use_ffmpeg) {
+ sources += [
+ "base/demuxer_perftest.cc",
+ "filters/pipeline_integration_perftest.cc",
+ "filters/pipeline_integration_test_base.cc",
+ ]
+ }
+ configs += [ ":media_config" ]
+ deps = [
+ ":media",
+ ":media_test_support",
+ ":shared_memory_support",
+ "//base/test:test_support",
+ "//testing/gmock",
+ "//testing/gtest",
+ "//testing/perf",
+ "//third_party/widevine/cdm:version_h",
+ "//ui/gl",
+ "//ui/gfx:gfx_test_support",
+ ]
+ if (media_use_ffmpeg) {
+ deps += [
+ "//third_party/ffmpeg", # Direct dependency required to inherit config.
+ ]
+ }
+}
+
+if (cpu_arch == "x86" || cpu_arch == "x64") {
+ source_set("media_mmx") {
+ sources = [ "base/simd/filter_yuv_mmx.cc" ]
+ configs += [ ":media_config" ]
+ cflags = [ "-mmmx" ]
+ }
+
+ source_set("media_sse2") {
+ sources = [
+ "base/simd/convert_rgb_to_yuv_sse2.cc",
+ "base/simd/convert_rgb_to_yuv_ssse3.cc",
+ "base/simd/filter_yuv_sse2.cc",
+ ]
+ configs += [ ":media_config" ]
+ cflags = [ "-msse2" ]
+ }
+
+ import("//third_party/yasm/yasm_assemble.gni")
+ yasm_assemble("media_yasm") {
+ sources = [
+ "base/simd/convert_rgb_to_yuv_ssse3.asm",
+ "base/simd/convert_yuv_to_rgb_mmx.asm",
+ "base/simd/convert_yuv_to_rgb_sse.asm",
+ "base/simd/convert_yuva_to_argb_mmx.asm",
+ "base/simd/empty_register_state_mmx.asm",
+ "base/simd/linear_scale_yuv_to_rgb_mmx.asm",
+ "base/simd/linear_scale_yuv_to_rgb_sse.asm",
+ "base/simd/scale_yuv_to_rgb_mmx.asm",
+ "base/simd/scale_yuv_to_rgb_sse.asm",
+ ]
+
+ # TODO(ajwong): Only export if shared_library build...
+ yasm_flags = [
+ "-DCHROMIUM",
+ "-DEXPORT_SYMBOLS",
+ # In addition to the same path as source asm, let yasm %include
+ # search path be relative to src/ per Chromium policy.
+ "-I", rebase_path("..", root_build_dir),
+ ]
+
+ inputs = [
+ "//third_party/x86inc/x86inc.asm",
+ "base/simd/convert_rgb_to_yuv_ssse3.inc",
+ "base/simd/convert_yuv_to_rgb_mmx.inc",
+ "base/simd/convert_yuva_to_argb_mmx.inc",
+ "base/simd/linear_scale_yuv_to_rgb_mmx.inc",
+ "base/simd/media_export.asm",
+ "base/simd/scale_yuv_to_rgb_mmx.inc",
+ ]
+
+ if (cpu_arch == "x86") {
+ yasm_flags += [ "-DARCH_X86_32" ]
+ } else if (cpu_arch == "x64") {
+ yasm_flags += [ "-DARCH_X86_64" ]
+ sources += [
+ "base/simd/linear_scale_yuv_to_rgb_mmx_x64.asm",
+ "base/simd/scale_yuv_to_rgb_sse2_x64.asm",
+ ]
+ }
+
+ if (is_mac) {
+ yasm_flags += [
+ "-DPREFIX",
+ "-DMACHO",
+ ]
+ } else {
+ if (is_posix) {
+ yasm_flags += [ "-DELF" ]
+ if (cpu_arch == "x64") {
+ # TODO(ajwong): Why isn't this true in mac?
+ yasm_flags += [ "-DPIC" ]
+ }
+ }
+ }
+ }
+}
+
+source_set("media_test_support") {
+ sources = [
+ "audio/mock_audio_manager.cc",
+ "audio/mock_audio_manager.h",
+ "audio/mock_audio_source_callback.cc",
+ "audio/mock_audio_source_callback.h",
+ "audio/test_audio_input_controller_factory.cc",
+ "audio/test_audio_input_controller_factory.h",
+ "base/fake_audio_render_callback.cc",
+ "base/fake_audio_render_callback.h",
+ "base/fake_audio_renderer_sink.cc",
+ "base/fake_audio_renderer_sink.h",
+ "base/fake_text_track_stream.cc",
+ "base/fake_text_track_stream.h",
+ "base/gmock_callback_support.h",
+ "base/mock_audio_renderer_sink.cc",
+ "base/mock_audio_renderer_sink.h",
+ "base/mock_demuxer_host.cc",
+ "base/mock_demuxer_host.h",
+ "base/mock_filters.cc",
+ "base/mock_filters.h",
+ "base/test_data_util.cc",
+ "base/test_data_util.h",
+ "base/test_helpers.cc",
+ "base/test_helpers.h",
+ "filters/clockless_video_frame_scheduler.cc",
+ "filters/clockless_video_frame_scheduler.h",
+ "filters/mock_gpu_video_accelerator_factories.cc",
+ "filters/mock_gpu_video_accelerator_factories.h",
+ "filters/test_video_frame_scheduler.cc",
+ "filters/test_video_frame_scheduler.h",
+ "video/mock_video_decode_accelerator.cc",
+ "video/mock_video_decode_accelerator.h",
+ ]
+ deps = [
+ ":media",
+ ":shared_memory_support",
+ "//base",
+ "//skia",
+ "//testing/gmock",
+ "//testing/gtest",
+ ]
+}
+
+component("shared_memory_support") {
+ sources = [
+ "audio/audio_parameters.cc",
+ "audio/audio_parameters.h",
+ "base/audio_bus.cc",
+ "base/audio_bus.h",
+ "base/channel_layout.cc",
+ "base/channel_layout.h",
+ "base/limits.h",
+ "base/media_export.h",
+ "base/vector_math.cc",
+ "base/vector_math.h",
+ ]
+ configs += [ ":media_config" ]
+ deps = [ "//base" ]
+}
+
+if (media_use_ffmpeg) {
+ test("ffmpeg_unittests") {
+ sources = [ "ffmpeg/ffmpeg_unittest.cc" ]
+
+ deps = [
+ ":media",
+ ":media_test_support",
+ "//base",
+ "//base:i18n",
+ "//base/test:test_support",
+ "//testing/gtest",
+ "//third_party/ffmpeg",
+ "//ui/gfx:gfx_test_support",
+ ]
+ }
+
+ test("ffmpeg_regression_tests") {
+ sources = [
+ "base/run_all_unittests.cc",
+ "ffmpeg/ffmpeg_regression_tests.cc",
+ "filters/pipeline_integration_test_base.cc",
+ ]
+ configs += [ ":media_config" ]
+ deps = [
+ ":media",
+ ":media_test_support",
+ "//base/test:test_support",
+ "//testing/gmock",
+ "//testing/gtest",
+ "//third_party/ffmpeg",
+ "//ui/gfx/geometry",
+ "//ui/gfx:gfx_test_support",
+ ]
+ # TODO(ajwong): This was in the original gyp, but it seems silly.
+ # ['os_posix==1 and OS!="mac"', {
+ # 'conditions': [
+ # ['use_allocator!="none"', {
+ # 'dependencies': [
+ # '../base/allocator/allocator.gyp:allocator',
+ # ],
+ # }],
+ # ],
+ # }],
+ }
+}
+
+if (use_x11) {
+ executable("player_x11") {
+ sources = [
+ "tools/player_x11/data_source_logger.cc",
+ "tools/player_x11/data_source_logger.h",
+ "tools/player_x11/gl_video_renderer.cc",
+ "tools/player_x11/gl_video_renderer.h",
+ "tools/player_x11/player_x11.cc",
+ "tools/player_x11/x11_video_renderer.cc",
+ "tools/player_x11/x11_video_renderer.h",
+ ]
+ configs += [
+ ":media_config",
+ "//build/config/linux:x11",
+ "//build/config/linux:xext",
+# TODO(ajwong): Why does xext get a separate thing in //build/config/linux:BUILD.gn
+ # "//build/config/linux:xrender",
+ ]
+ deps = [
+ ":media",
+ ":shared_memory_support",
+ "//base",
+ "//ui/gl",
+ "//ui/gfx",
+ "//ui/gfx/geometry",
+ ]
+ }
+}
+
+if (is_android) {
+ import("//build/config/android/rules.gni")
+ source_set("player_android") {
+ configs += [ ":media_config" ]
+ sources = [
+ "base/android/audio_decoder_job.cc",
+ "base/android/audio_decoder_job.h",
+ "base/android/browser_cdm_factory_android.cc",
+ "base/android/media_codec_bridge.cc",
+ "base/android/media_codec_bridge.h",
+ "base/android/media_decoder_job.cc",
+ "base/android/media_decoder_job.h",
+ "base/android/media_drm_bridge.cc",
+ "base/android/media_drm_bridge.h",
+ "base/android/media_jni_registrar.cc",
+ "base/android/media_jni_registrar.h",
+ "base/android/media_player_android.cc",
+ "base/android/media_player_android.h",
+ "base/android/media_player_bridge.cc",
+ "base/android/media_player_bridge.h",
+ "base/android/media_player_listener.cc",
+ "base/android/media_player_listener.h",
+ "base/android/media_source_player.cc",
+ "base/android/media_source_player.h",
+ "base/android/video_decoder_job.cc",
+ "base/android/video_decoder_job.h",
+ "base/android/webaudio_media_codec_bridge.cc",
+ "base/android/webaudio_media_codec_bridge.h",
+ "base/android/webaudio_media_codec_info.h",
+ ]
+
+ deps = [
+ ":media_android_jni_headers",
+ "//base",
+ "//third_party/widevine/cdm:version_h",
+ "//ui/gl",
+ "//url"
+ ]
+ }
+
+ generate_jni("media_android_jni_headers") {
+ sources = [
+ "base/android/java/src/org/chromium/media/AudioManagerAndroid.java",
+ "base/android/java/src/org/chromium/media/AudioRecordInput.java",
+ "base/android/java/src/org/chromium/media/MediaCodecBridge.java",
+ "base/android/java/src/org/chromium/media/MediaDrmBridge.java",
+ "base/android/java/src/org/chromium/media/MediaPlayerBridge.java",
+ "base/android/java/src/org/chromium/media/MediaPlayerListener.java",
+ "base/android/java/src/org/chromium/media/UsbMidiDeviceAndroid.java",
+ "base/android/java/src/org/chromium/media/UsbMidiDeviceFactoryAndroid.java",
+ "base/android/java/src/org/chromium/media/WebAudioMediaCodecBridge.java",
+ ]
+ jni_package = "media"
+ }
+
+ generate_jni("video_capture_android_jni_headers") {
+ sources = [
+ "base/android/java/src/org/chromium/media/VideoCapture.java",
+ "base/android/java/src/org/chromium/media/VideoCaptureFactory.java",
+ ]
+ jni_package = "media"
+ }
+
+ android_library("media_java") {
+ srcjar_deps = [
+ ":media_android_imageformat_list",
+ ]
+ java_files = []
+ }
+
+ java_cpp_template("media_android_imageformat_list") {
+ sources = [
+ "base/android/java/src/org/chromium/media/ImageFormat.template",
+ ]
+ inputs = [
+ "video/capture/android/imageformat_list.h"
+ ]
+ package_name = "org/chromium/media"
+ }
+
+ # TODO(dalecurtis): Finish media_unittests_apk and media_perftests_apk.
+}
diff --git a/media/OWNERS b/media/OWNERS
index b5706fc4ee..d813f89c1e 100644
--- a/media/OWNERS
+++ b/media/OWNERS
@@ -4,6 +4,7 @@ ddorwin@chromium.org
scherkus@chromium.org
shadi@chromium.org
vrk@chromium.org
+wolenetz@chromium.org
xhwang@chromium.org
per-file *.isolate=csharp@chromium.org
diff --git a/media/audio/audio_input_volume_unittest.cc b/media/audio/audio_input_volume_unittest.cc
index e89d106f7e..5cfec55361 100644
--- a/media/audio/audio_input_volume_unittest.cc
+++ b/media/audio/audio_input_volume_unittest.cc
@@ -149,7 +149,7 @@ TEST_F(AudioInputVolumeTest, MAYBE_InputVolumeTest) {
double current_volume = ais->GetVolume();
EXPECT_EQ(max_volume, current_volume);
- // Set the volume to the mininum level (=0).
+ // Set the volume to the minimum level (=0).
double new_volume = 0.0;
ais->SetVolume(new_volume);
#if defined(OS_LINUX)
diff --git a/media/audio/audio_manager.h b/media/audio/audio_manager.h
index ca385706c0..915308ef77 100644
--- a/media/audio/audio_manager.h
+++ b/media/audio/audio_manager.h
@@ -67,14 +67,14 @@ class MEDIA_EXPORT AudioManager {
// recording.
//
// Not threadsafe; in production this should only be called from the
- // Audio IO thread (see GetTaskRunner()).
+ // Audio worker thread (see GetWorkerTaskRunner()).
virtual void GetAudioInputDeviceNames(AudioDeviceNames* device_names) = 0;
// Appends a list of available output devices to |device_names|,
// which must initially be empty.
//
// Not threadsafe; in production this should only be called from the
- // Audio IO thread (see GetTaskRunner()).
+ // Audio worker thread (see GetWorkerTaskRunner()).
virtual void GetAudioOutputDeviceNames(AudioDeviceNames* device_names) = 0;
// Factory for all the supported stream formats. |params| defines parameters
@@ -167,7 +167,8 @@ class MEDIA_EXPORT AudioManager {
// If the hardware has only an input device (e.g. a webcam), the return value
// will be empty (which the caller can then interpret to be the default output
// device). Implementations that don't yet support this feature, must return
- // an empty string.
+ // an empty string. Must be called on the audio worker thread (see
+ // GetWorkerTaskRunner()).
virtual std::string GetAssociatedOutputDeviceID(
const std::string& input_device_id) = 0;
diff --git a/media/audio/audio_output_controller.cc b/media/audio/audio_output_controller.cc
index 232b77d727..fb01254568 100644
--- a/media/audio/audio_output_controller.cc
+++ b/media/audio/audio_output_controller.cc
@@ -223,7 +223,7 @@ void AudioOutputController::DoPause() {
// Let the renderer know we've stopped. Necessary to let PPAPI clients know
// audio has been shutdown. TODO(dalecurtis): This stinks. PPAPI should have
// a better way to know when it should exit PPB_Audio_Shared::Run().
- sync_reader_->UpdatePendingBytes(-1);
+ sync_reader_->UpdatePendingBytes(kuint32max);
handler_->OnPaused();
}
diff --git a/media/audio/fake_audio_input_stream.cc b/media/audio/fake_audio_input_stream.cc
index 384adcb411..74ac579c1a 100644
--- a/media/audio/fake_audio_input_stream.cc
+++ b/media/audio/fake_audio_input_stream.cc
@@ -26,11 +26,31 @@ const int kAutomaticBeepIntervalInMs = 500;
// Automatic beep will be triggered every |kAutomaticBeepIntervalInMs| unless
// users explicitly call BeepOnce(), which will disable the automatic beep.
-struct BeepContext {
- BeepContext() : beep_once(false), automatic(true) {}
- base::Lock beep_lock;
- bool beep_once;
- bool automatic;
+class BeepContext {
+ public:
+ BeepContext() : beep_once_(false), automatic_beep_(true) {}
+
+ void SetBeepOnce(bool enable) {
+ base::AutoLock auto_lock(lock_);
+ beep_once_ = enable;
+
+ // Disable the automatic beep if users explicit set |beep_once_| to true.
+ if (enable)
+ automatic_beep_ = false;
+ }
+ bool beep_once() const {
+ base::AutoLock auto_lock(lock_);
+ return beep_once_;
+ }
+ bool automatic_beep() const {
+ base::AutoLock auto_lock(lock_);
+ return automatic_beep_;
+ }
+
+ private:
+ mutable base::Lock lock_;
+ bool beep_once_;
+ bool automatic_beep_;
};
static base::LazyInstance<BeepContext> g_beep_context =
@@ -52,7 +72,7 @@ FakeAudioInputStream::FakeAudioInputStream(AudioManagerBase* manager,
params.frames_per_buffer()) /
8),
params_(params),
- thread_("FakeAudioRecordingThread"),
+ task_runner_(manager->GetTaskRunner()),
callback_interval_(base::TimeDelta::FromMilliseconds(
(params.frames_per_buffer() * 1000) / params.sample_rate())),
beep_duration_in_buffers_(kBeepDurationMilliseconds *
@@ -62,12 +82,15 @@ FakeAudioInputStream::FakeAudioInputStream(AudioManagerBase* manager,
beep_generated_in_buffers_(0),
beep_period_in_frames_(params.sample_rate() / kBeepFrequency),
frames_elapsed_(0),
- audio_bus_(AudioBus::Create(params)) {
+ audio_bus_(AudioBus::Create(params)),
+ weak_factory_(this) {
+ DCHECK(audio_manager_->GetTaskRunner()->BelongsToCurrentThread());
}
FakeAudioInputStream::~FakeAudioInputStream() {}
bool FakeAudioInputStream::Open() {
+ DCHECK(audio_manager_->GetTaskRunner()->BelongsToCurrentThread());
buffer_.reset(new uint8[buffer_size_]);
memset(buffer_.get(), 0, buffer_size_);
audio_bus_->Zero();
@@ -75,14 +98,13 @@ bool FakeAudioInputStream::Open() {
}
void FakeAudioInputStream::Start(AudioInputCallback* callback) {
- DCHECK(!thread_.IsRunning());
+ DCHECK(audio_manager_->GetTaskRunner()->BelongsToCurrentThread());
DCHECK(!callback_);
callback_ = callback;
last_callback_time_ = TimeTicks::Now();
- thread_.Start();
- thread_.message_loop()->PostDelayedTask(
+ task_runner_->PostDelayedTask(
FROM_HERE,
- base::Bind(&FakeAudioInputStream::DoCallback, base::Unretained(this)),
+ base::Bind(&FakeAudioInputStream::DoCallback, weak_factory_.GetWeakPtr()),
callback_interval_);
}
@@ -108,8 +130,7 @@ void FakeAudioInputStream::DoCallback() {
bool should_beep = false;
{
BeepContext* beep_context = g_beep_context.Pointer();
- base::AutoLock auto_lock(beep_context->beep_lock);
- if (beep_context->automatic) {
+ if (beep_context->automatic_beep()) {
base::TimeDelta delta = interval_from_last_beep_ -
TimeDelta::FromMilliseconds(kAutomaticBeepIntervalInMs);
if (delta > base::TimeDelta()) {
@@ -117,8 +138,8 @@ void FakeAudioInputStream::DoCallback() {
interval_from_last_beep_ = delta;
}
} else {
- should_beep = beep_context->beep_once;
- beep_context->beep_once = false;
+ should_beep = beep_context->beep_once();
+ beep_context->SetBeepOnce(false);
}
}
@@ -151,29 +172,34 @@ void FakeAudioInputStream::DoCallback() {
callback_->OnData(this, audio_bus_.get(), buffer_size_, 1.0);
frames_elapsed_ += params_.frames_per_buffer();
- thread_.message_loop()->PostDelayedTask(
+ task_runner_->PostDelayedTask(
FROM_HERE,
- base::Bind(&FakeAudioInputStream::DoCallback, base::Unretained(this)),
+ base::Bind(&FakeAudioInputStream::DoCallback, weak_factory_.GetWeakPtr()),
next_callback_time);
}
void FakeAudioInputStream::Stop() {
- thread_.Stop();
+ DCHECK(audio_manager_->GetTaskRunner()->BelongsToCurrentThread());
+ weak_factory_.InvalidateWeakPtrs();
callback_ = NULL;
}
void FakeAudioInputStream::Close() {
+ DCHECK(audio_manager_->GetTaskRunner()->BelongsToCurrentThread());
audio_manager_->ReleaseInputStream(this);
}
double FakeAudioInputStream::GetMaxVolume() {
+ DCHECK(audio_manager_->GetTaskRunner()->BelongsToCurrentThread());
return 1.0;
}
void FakeAudioInputStream::SetVolume(double volume) {
+ DCHECK(audio_manager_->GetTaskRunner()->BelongsToCurrentThread());
}
double FakeAudioInputStream::GetVolume() {
+ DCHECK(audio_manager_->GetTaskRunner()->BelongsToCurrentThread());
return 1.0;
}
@@ -186,9 +212,7 @@ bool FakeAudioInputStream::GetAutomaticGainControl() {
// static
void FakeAudioInputStream::BeepOnce() {
BeepContext* beep_context = g_beep_context.Pointer();
- base::AutoLock auto_lock(beep_context->beep_lock);
- beep_context->beep_once = true;
- beep_context->automatic = false;
+ beep_context->SetBeepOnce(true);
}
} // namespace media
diff --git a/media/audio/fake_audio_input_stream.h b/media/audio/fake_audio_input_stream.h
index e6c625e6b3..4c3c24c555 100644
--- a/media/audio/fake_audio_input_stream.h
+++ b/media/audio/fake_audio_input_stream.h
@@ -61,7 +61,7 @@ class MEDIA_EXPORT FakeAudioInputStream
scoped_ptr<uint8[]> buffer_;
int buffer_size_;
AudioParameters params_;
- base::Thread thread_;
+ const scoped_refptr<base::SingleThreadTaskRunner> task_runner_;
base::TimeTicks last_callback_time_;
base::TimeDelta callback_interval_;
base::TimeDelta interval_from_last_beep_;
@@ -71,6 +71,10 @@ class MEDIA_EXPORT FakeAudioInputStream
int frames_elapsed_;
scoped_ptr<media::AudioBus> audio_bus_;
+ // Allows us to run tasks on the FakeAudioInputStream instance which are
+ // bound by its lifetime.
+ base::WeakPtrFactory<FakeAudioInputStream> weak_factory_;
+
DISALLOW_COPY_AND_ASSIGN(FakeAudioInputStream);
};
diff --git a/media/audio/mac/audio_low_latency_input_mac.cc b/media/audio/mac/audio_low_latency_input_mac.cc
index d7a3430f6d..f0f6629185 100644
--- a/media/audio/mac/audio_low_latency_input_mac.cc
+++ b/media/audio/mac/audio_low_latency_input_mac.cc
@@ -11,6 +11,7 @@
#include "base/mac/mac_logging.h"
#include "media/audio/mac/audio_manager_mac.h"
#include "media/base/audio_bus.h"
+#include "media/base/audio_fifo.h"
#include "media/base/data_buffer.h"
namespace media {
@@ -34,17 +35,17 @@ static std::ostream& operator<<(std::ostream& os,
AUAudioInputStream::AUAudioInputStream(AudioManagerMac* manager,
const AudioParameters& input_params,
- const AudioParameters& output_params,
AudioDeviceID audio_device_id)
: manager_(manager),
+ number_of_frames_(input_params.frames_per_buffer()),
sink_(NULL),
audio_unit_(0),
input_device_id_(audio_device_id),
started_(false),
hardware_latency_frames_(0),
- fifo_delay_bytes_(0),
number_of_channels_in_frame_(0),
- audio_bus_(media::AudioBus::Create(input_params)) {
+ audio_bus_(media::AudioBus::Create(input_params)),
+ audio_wrapper_(media::AudioBus::Create(input_params)) {
DCHECK(manager_);
// Set up the desired (output) format specified by the client.
@@ -62,12 +63,6 @@ AUAudioInputStream::AUAudioInputStream(AudioManagerMac* manager,
DVLOG(1) << "Desired ouput format: " << format_;
- // Set number of sample frames per callback used by the internal audio layer.
- // An internal FIFO is then utilized to adapt the internal size to the size
- // requested by the client.
- number_of_frames_ = output_params.frames_per_buffer();
- DVLOG(1) << "Size of data buffer in frames : " << number_of_frames_;
-
// Derive size (in bytes) of the buffers that we will render to.
UInt32 data_byte_size = number_of_frames_ * format_.mBytesPerFrame;
DVLOG(1) << "Size of data buffer in bytes : " << data_byte_size;
@@ -82,38 +77,6 @@ AUAudioInputStream::AUAudioInputStream(AudioManagerMac* manager,
audio_buffer->mNumberChannels = input_params.channels();
audio_buffer->mDataByteSize = data_byte_size;
audio_buffer->mData = audio_data_buffer_.get();
-
- // Set up an internal FIFO buffer that will accumulate recorded audio frames
- // until a requested size is ready to be sent to the client.
- // It is not possible to ask for less than |kAudioFramesPerCallback| number of
- // audio frames.
- size_t requested_size_frames =
- input_params.GetBytesPerBuffer() / format_.mBytesPerPacket;
- if (requested_size_frames < number_of_frames_) {
- // For devices that only support a low sample rate like 8kHz, we adjust the
- // buffer size to match number_of_frames_. The value of number_of_frames_
- // in this case has not been calculated based on hardware settings but
- // rather our hardcoded defaults (see ChooseBufferSize).
- requested_size_frames = number_of_frames_;
- }
-
- requested_size_bytes_ = requested_size_frames * format_.mBytesPerFrame;
- DVLOG(1) << "Requested buffer size in bytes : " << requested_size_bytes_;
- DVLOG_IF(0, requested_size_frames > number_of_frames_) << "FIFO is used";
-
- const int number_of_bytes = number_of_frames_ * format_.mBytesPerFrame;
- fifo_delay_bytes_ = requested_size_bytes_ - number_of_bytes;
-
- // Allocate some extra memory to avoid memory reallocations.
- // Ensure that the size is an even multiple of |number_of_frames_ and
- // larger than |requested_size_frames|.
- // Example: number_of_frames_=128, requested_size_frames=480 =>
- // allocated space equals 4*128=512 audio frames
- const int max_forward_capacity = number_of_bytes *
- ((requested_size_frames / number_of_frames_) + 1);
- fifo_.reset(new media::SeekableBuffer(0, max_forward_capacity));
-
- data_ = new media::DataBuffer(requested_size_bytes_);
}
AUAudioInputStream::~AUAudioInputStream() {}
@@ -132,20 +95,20 @@ bool AUAudioInputStream::Open() {
// Start by obtaining an AudioOuputUnit using an AUHAL component description.
- Component comp;
- ComponentDescription desc;
-
// Description for the Audio Unit we want to use (AUHAL in this case).
- desc.componentType = kAudioUnitType_Output;
- desc.componentSubType = kAudioUnitSubType_HALOutput;
- desc.componentManufacturer = kAudioUnitManufacturer_Apple;
- desc.componentFlags = 0;
- desc.componentFlagsMask = 0;
- comp = FindNextComponent(0, &desc);
+ AudioComponentDescription desc = {
+ kAudioUnitType_Output,
+ kAudioUnitSubType_HALOutput,
+ kAudioUnitManufacturer_Apple,
+ 0,
+ 0
+ };
+
+ AudioComponent comp = AudioComponentFindNext(0, &desc);
DCHECK(comp);
// Get access to the service provided by the specified Audio Unit.
- OSStatus result = OpenAComponent(comp, &audio_unit_);
+ OSStatus result = AudioComponentInstanceNew(comp, &audio_unit_);
if (result) {
HandleError(result);
return false;
@@ -527,27 +490,45 @@ OSStatus AUAudioInputStream::Provide(UInt32 number_of_frames,
uint8* audio_data = reinterpret_cast<uint8*>(buffer.mData);
uint32 capture_delay_bytes = static_cast<uint32>
((capture_latency_frames + 0.5) * format_.mBytesPerFrame);
- // Account for the extra delay added by the FIFO.
- capture_delay_bytes += fifo_delay_bytes_;
DCHECK(audio_data);
if (!audio_data)
return kAudioUnitErr_InvalidElement;
- // Accumulate captured audio in FIFO until we can match the output size
- // requested by the client.
- fifo_->Append(audio_data, buffer.mDataByteSize);
+ if (number_of_frames != number_of_frames_) {
+ // Create a FIFO on the fly to handle any discrepancies in callback rates.
+ if (!fifo_) {
+ VLOG(1) << "Audio frame size changed from " << number_of_frames_ << " to "
+ << number_of_frames << "; adding FIFO to compensate.";
+ fifo_.reset(new AudioFifo(
+ format_.mChannelsPerFrame, number_of_frames_ + number_of_frames));
+ }
+
+ if (audio_wrapper_->frames() != static_cast<int>(number_of_frames)) {
+ audio_wrapper_ = media::AudioBus::Create(format_.mChannelsPerFrame,
+ number_of_frames);
+ }
+ }
+
+ // Copy captured (and interleaved) data into deinterleaved audio bus.
+ audio_wrapper_->FromInterleaved(
+ audio_data, audio_wrapper_->frames(), format_.mBitsPerChannel / 8);
- // Deliver recorded data to the client as soon as the FIFO contains a
- // sufficient amount.
- if (fifo_->forward_bytes() >= requested_size_bytes_) {
- // Read from FIFO into temporary data buffer.
- fifo_->Read(data_->writable_data(), requested_size_bytes_);
+ // When FIFO does not kick in, data will be directly passed to the callback.
+ if (!fifo_) {
+ CHECK_EQ(audio_wrapper_->frames(), static_cast<int>(number_of_frames_));
+ sink_->OnData(
+ this, audio_wrapper_.get(), capture_delay_bytes, normalized_volume);
+ return noErr;
+ }
- // Copy captured (and interleaved) data into deinterleaved audio bus.
- audio_bus_->FromInterleaved(
- data_->data(), audio_bus_->frames(), format_.mBitsPerChannel / 8);
+ // Compensate the audio delay caused by the FIFO.
+ capture_delay_bytes += fifo_->frames() * format_.mBytesPerFrame;
+ fifo_->Push(audio_wrapper_.get());
+ if (fifo_->frames() >= static_cast<int>(number_of_frames_)) {
+ // Consume the audio from the FIFO.
+ fifo_->Consume(audio_bus_.get(), 0, audio_bus_->frames());
+ DCHECK(fifo_->frames() < static_cast<int>(number_of_frames_));
- // Deliver data packet, delay estimation and volume level to the user.
sink_->OnData(
this, audio_bus_.get(), capture_delay_bytes, normalized_volume);
}
diff --git a/media/audio/mac/audio_low_latency_input_mac.h b/media/audio/mac/audio_low_latency_input_mac.h
index 7726227eae..059cf1f808 100644
--- a/media/audio/mac/audio_low_latency_input_mac.h
+++ b/media/audio/mac/audio_low_latency_input_mac.h
@@ -45,11 +45,11 @@
#include "media/audio/agc_audio_stream.h"
#include "media/audio/audio_io.h"
#include "media/audio/audio_parameters.h"
-#include "media/base/seekable_buffer.h"
namespace media {
class AudioBus;
+class AudioFifo;
class AudioManagerMac;
class DataBuffer;
@@ -59,7 +59,6 @@ class AUAudioInputStream : public AgcAudioStream<AudioInputStream> {
// the audio manager who is creating this object.
AUAudioInputStream(AudioManagerMac* manager,
const AudioParameters& input_params,
- const AudioParameters& output_params,
AudioDeviceID audio_device_id);
// The dtor is typically called by the AudioManager only and it is usually
// triggered by calling AudioInputStream::Close().
@@ -115,7 +114,7 @@ class AUAudioInputStream : public AgcAudioStream<AudioInputStream> {
AudioManagerMac* manager_;
// Contains the desired number of audio frames in each callback.
- size_t number_of_frames_;
+ const size_t number_of_frames_;
// Pointer to the object that will receive the recorded audio samples.
AudioInputCallback* sink_;
@@ -145,31 +144,24 @@ class AUAudioInputStream : public AgcAudioStream<AudioInputStream> {
// Fixed capture hardware latency in frames.
double hardware_latency_frames_;
- // Delay due to the FIFO in bytes.
- int fifo_delay_bytes_;
-
// The number of channels in each frame of audio data, which is used
// when querying the volume of each channel.
int number_of_channels_in_frame_;
- // Accumulates recorded data packets until the requested size has been stored.
- scoped_ptr<media::SeekableBuffer> fifo_;
-
- // Intermediate storage of data from the FIFO before sending it to the
- // client using the OnData() callback.
- scoped_refptr<media::DataBuffer> data_;
-
- // The client requests that the recorded data shall be delivered using
- // OnData() callbacks where each callback contains this amount of bytes.
- int requested_size_bytes_;
+ // Dynamically allocated FIFO used to accumulates recorded data when
+ // CoreAudio delivers non-requested frame size of data.
+ scoped_ptr<media::AudioFifo> fifo_;
// Used to defer Start() to workaround http://crbug.com/160920.
base::CancelableClosure deferred_start_cb_;
- // Extra audio bus used for storage of deinterleaved data for the OnData
- // callback.
+ // Audio bus used for storage of deinterleaved data for the OnData callback.
scoped_ptr<media::AudioBus> audio_bus_;
+ // Audio bus used to convert interleaved data to deinterleaved data before
+ // storing data to FIFO or delivering data via OnData callback.
+ scoped_ptr<media::AudioBus> audio_wrapper_;
+
DISALLOW_COPY_AND_ASSIGN(AUAudioInputStream);
};
diff --git a/media/audio/mac/audio_manager_mac.cc b/media/audio/mac/audio_manager_mac.cc
index 970720679a..eabce32381 100644
--- a/media/audio/mac/audio_manager_mac.cc
+++ b/media/audio/mac/audio_manager_mac.cc
@@ -625,18 +625,7 @@ AudioInputStream* AudioManagerMac::MakeLowLatencyInputStream(
AudioDeviceID audio_device_id = GetAudioDeviceIdByUId(true, device_id);
AudioInputStream* stream = NULL;
if (audio_device_id != kAudioObjectUnknown) {
- // AUAudioInputStream needs to be fed the preferred audio output parameters
- // of the matching device so that the buffer size of both input and output
- // can be matched. See constructor of AUAudioInputStream for more.
- const std::string associated_output_device(
- GetAssociatedOutputDeviceID(device_id));
- const AudioParameters output_params =
- GetPreferredOutputStreamParameters(
- associated_output_device.empty() ?
- AudioManagerBase::kDefaultDeviceId : associated_output_device,
- params);
- stream = new AUAudioInputStream(this, params, output_params,
- audio_device_id);
+ stream = new AUAudioInputStream(this, params, audio_device_id);
input_streams_.push_back(stream);
}
diff --git a/media/audio/pulse/audio_manager_pulse.cc b/media/audio/pulse/audio_manager_pulse.cc
index 412f2a421b..03ff17dac7 100644
--- a/media/audio/pulse/audio_manager_pulse.cc
+++ b/media/audio/pulse/audio_manager_pulse.cc
@@ -42,8 +42,10 @@ static const int kMaximumOutputBufferSize = 8192;
// Default input buffer size.
static const int kDefaultInputBufferSize = 1024;
+#if defined(DLOPEN_PULSEAUDIO)
static const base::FilePath::CharType kPulseLib[] =
FILE_PATH_LITERAL("libpulse.so.0");
+#endif
// static
AudioManager* AudioManagerPulse::Create(AudioLogFactory* audio_log_factory) {
@@ -175,7 +177,7 @@ AudioParameters AudioManagerPulse::GetPreferredOutputStreamParameters(
int buffer_size = kMinimumOutputBufferSize;
int bits_per_sample = 16;
int input_channels = 0;
- int sample_rate;
+ int sample_rate = GetNativeSampleRate();
if (input_params.IsValid()) {
bits_per_sample = input_params.bits_per_sample();
channel_layout = input_params.channel_layout();
@@ -183,9 +185,6 @@ AudioParameters AudioManagerPulse::GetPreferredOutputStreamParameters(
buffer_size =
std::min(kMaximumOutputBufferSize,
std::max(buffer_size, input_params.frames_per_buffer()));
- sample_rate = input_params.sample_rate();
- } else {
- sample_rate = GetNativeSampleRate();
}
int user_buffer_size = GetUserBufferSize();
diff --git a/media/audio/sounds/audio_stream_handler.cc b/media/audio/sounds/audio_stream_handler.cc
index 2a08b29d71..645fcb366a 100644
--- a/media/audio/sounds/audio_stream_handler.cc
+++ b/media/audio/sounds/audio_stream_handler.cc
@@ -37,12 +37,11 @@ class AudioStreamHandler::AudioStreamContainer
: public AudioOutputStream::AudioSourceCallback {
public:
AudioStreamContainer(const WavAudioHandler& wav_audio)
- : stream_(NULL),
- wav_audio_(wav_audio),
+ : started_(false),
+ stream_(NULL),
cursor_(0),
- started_(false),
- delayed_stop_posted_(false) {
- }
+ delayed_stop_posted_(false),
+ wav_audio_(wav_audio) {}
virtual ~AudioStreamContainer() {
DCHECK(AudioManager::Get()->GetTaskRunner()->BelongsToCurrentThread());
@@ -58,8 +57,8 @@ class AudioStreamHandler::AudioStreamContainer
p.sample_rate(),
p.bits_per_sample(),
kDefaultFrameCount);
- stream_ = AudioManager::Get()->MakeAudioOutputStreamProxy(
- params, std::string());
+ stream_ = AudioManager::Get()->MakeAudioOutputStreamProxy(params,
+ std::string());
if (!stream_ || !stream_->Open()) {
LOG(ERROR) << "Failed to open an output stream.";
return;
@@ -71,8 +70,8 @@ class AudioStreamHandler::AudioStreamContainer
base::AutoLock al(state_lock_);
delayed_stop_posted_ = false;
- stop_closure_.Reset(base::Bind(
- &AudioStreamContainer::StopStream, base::Unretained(this)));
+ stop_closure_.Reset(base::Bind(&AudioStreamContainer::StopStream,
+ base::Unretained(this)));
if (started_) {
if (wav_audio_.AtEnd(cursor_))
@@ -81,9 +80,9 @@ class AudioStreamHandler::AudioStreamContainer
}
cursor_ = 0;
- started_ = true;
}
+ started_ = true;
if (g_audio_source_for_testing)
stream_->Start(g_audio_source_for_testing);
else
@@ -99,6 +98,7 @@ class AudioStreamHandler::AudioStreamContainer
if (stream_)
stream_->Close();
stream_ = NULL;
+ stop_closure_.Cancel();
}
private:
@@ -131,24 +131,25 @@ class AudioStreamHandler::AudioStreamContainer
void StopStream() {
DCHECK(AudioManager::Get()->GetTaskRunner()->BelongsToCurrentThread());
- base::AutoLock al(state_lock_);
-
if (stream_ && started_) {
+ // Do not hold the |state_lock_| while stopping the output stream.
stream_->Stop();
if (g_observer_for_testing)
g_observer_for_testing->OnStop(cursor_);
}
+
started_ = false;
}
+ // Must only be accessed on the AudioManager::GetTaskRunner() thread.
+ bool started_;
AudioOutputStream* stream_;
- const WavAudioHandler wav_audio_;
-
+ // All variables below must be accessed under |state_lock_| when |started_|.
base::Lock state_lock_;
size_t cursor_;
- bool started_;
bool delayed_stop_posted_;
+ const WavAudioHandler wav_audio_;
base::CancelableClosure stop_closure_;
DISALLOW_COPY_AND_ASSIGN(AudioStreamContainer);
diff --git a/media/audio/win/audio_low_latency_input_win.cc b/media/audio/win/audio_low_latency_input_win.cc
index c43ed22977..d29d1b42c6 100644
--- a/media/audio/win/audio_low_latency_input_win.cc
+++ b/media/audio/win/audio_low_latency_input_win.cc
@@ -9,6 +9,7 @@
#include "base/strings/utf_string_conversions.h"
#include "media/audio/win/audio_manager_win.h"
#include "media/audio/win/avrt_wrapper_win.h"
+#include "media/audio/win/core_audio_util_win.h"
#include "media/base/audio_bus.h"
using base::win::ScopedComPtr;
@@ -97,7 +98,9 @@ WASAPIAudioInputStream::WASAPIAudioInputStream(AudioManagerWin* manager,
}
}
-WASAPIAudioInputStream::~WASAPIAudioInputStream() {}
+WASAPIAudioInputStream::~WASAPIAudioInputStream() {
+ DCHECK(CalledOnValidThread());
+}
bool WASAPIAudioInputStream::Open() {
DCHECK(CalledOnValidThread());
@@ -512,6 +515,11 @@ HRESULT WASAPIAudioInputStream::SetCaptureDevice() {
base::WideToUTF8(static_cast<WCHAR*>(communications_id))) {
DLOG(WARNING) << "Ducking has been requested for a non-default device."
"Not supported.";
+ // We can't honor the requested effect flag, so turn it off and
+ // continue. We'll check this flag later to see if we've actually
+ // opened up the communications device, so it's important that it
+ // reflects the active state.
+ effects_ &= ~AudioParameters::DUCKING;
endpoint_device_.Release(); // Fall back on code below.
}
}
@@ -639,12 +647,14 @@ HRESULT WASAPIAudioInputStream::InitializeAudioEngine() {
// buffer is never smaller than the minimum buffer size needed to ensure
// that glitches do not occur between the periodic processing passes.
// This setting should lead to lowest possible latency.
- HRESULT hr = audio_client_->Initialize(AUDCLNT_SHAREMODE_SHARED,
- flags,
- 0, // hnsBufferDuration
- 0,
- &format_,
- NULL);
+ HRESULT hr = audio_client_->Initialize(
+ AUDCLNT_SHAREMODE_SHARED,
+ flags,
+ 0, // hnsBufferDuration
+ 0,
+ &format_,
+ (effects_ & AudioParameters::DUCKING) ? &kCommunicationsSessionId : NULL);
+
if (FAILED(hr))
return hr;
diff --git a/media/audio/win/audio_low_latency_input_win.h b/media/audio/win/audio_low_latency_input_win.h
index a33a582c97..91fdde1594 100644
--- a/media/audio/win/audio_low_latency_input_win.h
+++ b/media/audio/win/audio_low_latency_input_win.h
@@ -157,8 +157,10 @@ class MEDIA_EXPORT WASAPIAudioInputStream
// Length of the audio endpoint buffer.
uint32 endpoint_buffer_size_frames_;
- // A copy of the supplied AudioParameter's |effects|.
- const int effects_;
+ // A copy of the supplied AudioParameter's |effects|. If ducking was
+ // specified (desired device=communications) but we ended up not being
+ // able to open the communications device, this flag will be cleared.
+ int effects_;
// Contains the unique name of the selected endpoint device.
// Note that AudioManagerBase::kDefaultDeviceId represents the default
diff --git a/media/audio/win/audio_low_latency_output_win.cc b/media/audio/win/audio_low_latency_output_win.cc
index 6aad434f6e..15cabbaff6 100644
--- a/media/audio/win/audio_low_latency_output_win.cc
+++ b/media/audio/win/audio_low_latency_output_win.cc
@@ -71,6 +71,7 @@ WASAPIAudioOutputStream::WASAPIAudioOutputStream(AudioManagerWin* manager,
source_(NULL),
audio_bus_(AudioBus::Create(params)) {
DCHECK(manager_);
+
VLOG(1) << "WASAPIAudioOutputStream::WASAPIAudioOutputStream()";
VLOG_IF(1, share_mode_ == AUDCLNT_SHAREMODE_EXCLUSIVE)
<< "Core Audio (WASAPI) EXCLUSIVE MODE is enabled.";
@@ -120,7 +121,9 @@ WASAPIAudioOutputStream::WASAPIAudioOutputStream(AudioManagerWin* manager,
DCHECK(stop_render_event_.IsValid());
}
-WASAPIAudioOutputStream::~WASAPIAudioOutputStream() {}
+WASAPIAudioOutputStream::~WASAPIAudioOutputStream() {
+ DCHECK_EQ(GetCurrentThreadId(), creating_thread_id_);
+}
bool WASAPIAudioOutputStream::Open() {
VLOG(1) << "WASAPIAudioOutputStream::Open()";
@@ -128,11 +131,19 @@ bool WASAPIAudioOutputStream::Open() {
if (opened_)
return true;
+ DCHECK(!audio_client_);
+ DCHECK(!audio_render_client_);
+
+ // Will be set to true if we ended up opening the default communications
+ // device.
+ bool communications_device = false;
+
// Create an IAudioClient interface for the default rendering IMMDevice.
ScopedComPtr<IAudioClient> audio_client;
if (device_id_.empty() ||
CoreAudioUtil::DeviceIsDefault(eRender, device_role_, device_id_)) {
audio_client = CoreAudioUtil::CreateDefaultClient(eRender, device_role_);
+ communications_device = (device_role_ == eCommunications);
} else {
ScopedComPtr<IMMDevice> device(CoreAudioUtil::CreateDevice(device_id_));
DLOG_IF(ERROR, !device) << "Failed to open device: " << device_id_;
@@ -157,7 +168,8 @@ bool WASAPIAudioOutputStream::Open() {
// mode and using event-driven buffer handling.
hr = CoreAudioUtil::SharedModeInitialize(
audio_client, &format_, audio_samples_render_event_.Get(),
- &endpoint_buffer_size_frames_);
+ &endpoint_buffer_size_frames_,
+ communications_device ? &kCommunicationsSessionId : NULL);
if (FAILED(hr))
return false;
@@ -198,7 +210,7 @@ bool WASAPIAudioOutputStream::Open() {
if (!audio_render_client)
return false;
- // Store valid COM interfaces.
+ // Store valid COM interfaces.
audio_client_ = audio_client;
audio_render_client_ = audio_render_client;
diff --git a/media/audio/win/core_audio_util_win.cc b/media/audio/win/core_audio_util_win.cc
index 71e8d717f6..29e955efce 100644
--- a/media/audio/win/core_audio_util_win.cc
+++ b/media/audio/win/core_audio_util_win.cc
@@ -23,6 +23,12 @@ using base::win::ScopedHandle;
namespace media {
+// See header file for documentation.
+// {BE39AF4F-087C-423F-9303-234EC1E5B8EE}
+const GUID kCommunicationsSessionId = {
+ 0xbe39af4f, 0x87c, 0x423f, { 0x93, 0x3, 0x23, 0x4e, 0xc1, 0xe5, 0xb8, 0xee }
+};
+
enum { KSAUDIO_SPEAKER_UNSUPPORTED = 0 };
// Converts Microsoft's channel configuration to ChannelLayout.
@@ -731,10 +737,9 @@ ChannelConfig CoreAudioUtil::GetChannelConfig(const std::string& device_id,
return static_cast<ChannelConfig>(format.dwChannelMask);
}
-HRESULT CoreAudioUtil::SharedModeInitialize(IAudioClient* client,
- const WAVEFORMATPCMEX* format,
- HANDLE event_handle,
- uint32* endpoint_buffer_size) {
+HRESULT CoreAudioUtil::SharedModeInitialize(
+ IAudioClient* client, const WAVEFORMATPCMEX* format, HANDLE event_handle,
+ uint32* endpoint_buffer_size, const GUID* session_guid) {
DCHECK(IsSupported());
// Use default flags (i.e, dont set AUDCLNT_STREAMFLAGS_NOPERSIST) to
@@ -760,7 +765,7 @@ HRESULT CoreAudioUtil::SharedModeInitialize(IAudioClient* client,
0,
0,
reinterpret_cast<const WAVEFORMATEX*>(format),
- NULL);
+ session_guid);
if (FAILED(hr)) {
DVLOG(1) << "IAudioClient::Initialize: " << std::hex << hr;
return hr;
diff --git a/media/audio/win/core_audio_util_win.h b/media/audio/win/core_audio_util_win.h
index 8727f97b51..4e935318d3 100644
--- a/media/audio/win/core_audio_util_win.h
+++ b/media/audio/win/core_audio_util_win.h
@@ -200,10 +200,15 @@ class MEDIA_EXPORT CoreAudioUtil {
// If a valid event is provided in |event_handle|, the client will be
// initialized for event-driven buffer handling. If |event_handle| is set to
// NULL, event-driven buffer handling is not utilized.
+ // This function will initialize the audio client as part of the default
+ // audio session if NULL is passed for |session_guid|, otherwise the client
+ // will be associated with the specified session.
static HRESULT SharedModeInitialize(IAudioClient* client,
const WAVEFORMATPCMEX* format,
HANDLE event_handle,
- uint32* endpoint_buffer_size);
+ uint32* endpoint_buffer_size,
+ const GUID* session_guid);
+
// TODO(henrika): add ExclusiveModeInitialize(...)
// Create an IAudioRenderClient client for an existing IAudioClient given by
@@ -230,6 +235,13 @@ class MEDIA_EXPORT CoreAudioUtil {
DISALLOW_COPY_AND_ASSIGN(CoreAudioUtil);
};
+// The special audio session identifier we use when opening up the default
+// communication device. This has the effect that a separate volume control
+// will be shown in the system's volume mixer and control over ducking and
+// visually observing the behavior of ducking, is easier.
+// Use with |SharedModeInitialize|.
+extern const GUID kCommunicationsSessionId;
+
} // namespace media
#endif // MEDIA_AUDIO_WIN_CORE_AUDIO_UTIL_WIN_H_
diff --git a/media/audio/win/core_audio_util_win_unittest.cc b/media/audio/win/core_audio_util_win_unittest.cc
index f18878cb06..cc8271ecc2 100644
--- a/media/audio/win/core_audio_util_win_unittest.cc
+++ b/media/audio/win/core_audio_util_win_unittest.cc
@@ -346,13 +346,13 @@ TEST_F(CoreAudioUtilWinTest, SharedModeInitialize) {
// Perform a shared-mode initialization without event-driven buffer handling.
uint32 endpoint_buffer_size = 0;
HRESULT hr = CoreAudioUtil::SharedModeInitialize(client, &format, NULL,
- &endpoint_buffer_size);
+ &endpoint_buffer_size, NULL);
EXPECT_TRUE(SUCCEEDED(hr));
EXPECT_GT(endpoint_buffer_size, 0u);
// It is only possible to create a client once.
hr = CoreAudioUtil::SharedModeInitialize(client, &format, NULL,
- &endpoint_buffer_size);
+ &endpoint_buffer_size, NULL);
EXPECT_FALSE(SUCCEEDED(hr));
EXPECT_EQ(hr, AUDCLNT_E_ALREADY_INITIALIZED);
@@ -360,7 +360,7 @@ TEST_F(CoreAudioUtilWinTest, SharedModeInitialize) {
client = CoreAudioUtil::CreateDefaultClient(eRender, eConsole);
EXPECT_TRUE(client);
hr = CoreAudioUtil::SharedModeInitialize(client, &format, NULL,
- &endpoint_buffer_size);
+ &endpoint_buffer_size, NULL);
EXPECT_TRUE(SUCCEEDED(hr));
EXPECT_GT(endpoint_buffer_size, 0u);
@@ -373,7 +373,7 @@ TEST_F(CoreAudioUtilWinTest, SharedModeInitialize) {
EXPECT_FALSE(CoreAudioUtil::IsFormatSupported(
client, AUDCLNT_SHAREMODE_SHARED, &format));
hr = CoreAudioUtil::SharedModeInitialize(client, &format, NULL,
- &endpoint_buffer_size);
+ &endpoint_buffer_size, NULL);
EXPECT_TRUE(FAILED(hr));
EXPECT_EQ(hr, E_INVALIDARG);
@@ -389,7 +389,7 @@ TEST_F(CoreAudioUtilWinTest, SharedModeInitialize) {
EXPECT_TRUE(CoreAudioUtil::IsFormatSupported(
client, AUDCLNT_SHAREMODE_SHARED, &format));
hr = CoreAudioUtil::SharedModeInitialize(client, &format, event_handle.Get(),
- &endpoint_buffer_size);
+ &endpoint_buffer_size, NULL);
EXPECT_TRUE(SUCCEEDED(hr));
EXPECT_GT(endpoint_buffer_size, 0u);
}
@@ -420,7 +420,7 @@ TEST_F(CoreAudioUtilWinTest, CreateRenderAndCaptureClients) {
// Do a proper initialization and verify that it works this time.
CoreAudioUtil::SharedModeInitialize(client, &format, NULL,
- &endpoint_buffer_size);
+ &endpoint_buffer_size, NULL);
render_client = CoreAudioUtil::CreateRenderClient(client);
EXPECT_TRUE(render_client);
EXPECT_GT(endpoint_buffer_size, 0u);
@@ -432,7 +432,7 @@ TEST_F(CoreAudioUtilWinTest, CreateRenderAndCaptureClients) {
// Do a proper initialization and verify that it works this time.
CoreAudioUtil::SharedModeInitialize(client, &format, NULL,
- &endpoint_buffer_size);
+ &endpoint_buffer_size, NULL);
capture_client = CoreAudioUtil::CreateCaptureClient(client);
EXPECT_TRUE(capture_client);
EXPECT_GT(endpoint_buffer_size, 0u);
@@ -454,7 +454,7 @@ TEST_F(CoreAudioUtilWinTest, FillRenderEndpointBufferWithSilence) {
EXPECT_TRUE(SUCCEEDED(CoreAudioUtil::GetSharedModeMixFormat(client,
&format)));
CoreAudioUtil::SharedModeInitialize(client, &format, NULL,
- &endpoint_buffer_size);
+ &endpoint_buffer_size, NULL);
EXPECT_GT(endpoint_buffer_size, 0u);
ScopedComPtr<IAudioRenderClient> render_client(
diff --git a/media/base/android/OWNERS b/media/base/android/OWNERS
index 4f6695a84b..a26a30770f 100644
--- a/media/base/android/OWNERS
+++ b/media/base/android/OWNERS
@@ -1,6 +1,3 @@
# Preferred reviewers.
qinmin@chromium.org
wjia@chromium.org
-
-# JNI or last resource.
-bulach@chromium.org
diff --git a/media/base/android/java/src/org/chromium/media/VideoCaptureAndroid.java b/media/base/android/java/src/org/chromium/media/VideoCaptureAndroid.java
index 1fb2f76044..0bcd2aa8e4 100644
--- a/media/base/android/java/src/org/chromium/media/VideoCaptureAndroid.java
+++ b/media/base/android/java/src/org/chromium/media/VideoCaptureAndroid.java
@@ -133,7 +133,7 @@ public class VideoCaptureAndroid extends VideoCapture {
for (Camera.Size size : supportedSizes) {
formatList.add(new CaptureFormat(size.width,
size.height,
- (fpsRange[0] + 999) / 1000,
+ (fpsRange[1] + 999) / 1000,
pixelFormat));
}
}
diff --git a/media/base/android/media_codec_bridge.cc b/media/base/android/media_codec_bridge.cc
index 92b2791506..7ece5d558e 100644
--- a/media/base/android/media_codec_bridge.cc
+++ b/media/base/android/media_codec_bridge.cc
@@ -176,7 +176,8 @@ bool MediaCodecBridge::IsKnownUnaccelerated(const std::string& mime_type,
// It would be nice if MediaCodecInfo externalized some notion of
// HW-acceleration but it doesn't. Android Media guidance is that the
// prefix below is always used for SW decoders, so that's what we use.
- return StartsWithASCII(codecs_info[i].name, "OMX.google.", true);
+ if (!StartsWithASCII(codecs_info[i].name, "OMX.google.", true))
+ return false;
}
}
return true;
diff --git a/media/base/android/media_drm_bridge.cc b/media/base/android/media_drm_bridge.cc
index 197973588a..3cb5bace51 100644
--- a/media/base/android/media_drm_bridge.cc
+++ b/media/base/android/media_drm_bridge.cc
@@ -444,10 +444,6 @@ void MediaDrmBridge::UpdateSession(uint32 session_id,
base::android::ToJavaByteArray(env, response, response_length);
Java_MediaDrmBridge_updateSession(
env, j_media_drm_.obj(), session_id, j_response.obj());
-
- // TODO(xhwang/jrummell): Move this when usableKeyIds/keyschange are
- // implemented.
- player_tracker_.NotifyNewKey();
}
void MediaDrmBridge::ReleaseSession(uint32 session_id) {
@@ -522,6 +518,9 @@ void MediaDrmBridge::OnSessionReady(JNIEnv* env,
jint j_session_id) {
uint32 session_id = j_session_id;
session_ready_cb_.Run(session_id);
+ // TODO(xhwang/jrummell): Move this when usableKeyIds/keyschange are
+ // implemented.
+ player_tracker_.NotifyNewKey();
}
void MediaDrmBridge::OnSessionClosed(JNIEnv* env,
diff --git a/media/base/android/media_player_manager.h b/media/base/android/media_player_manager.h
index 50e6f06dee..0b79f187d6 100644
--- a/media/base/android/media_player_manager.h
+++ b/media/base/android/media_player_manager.h
@@ -65,6 +65,12 @@ class MEDIA_EXPORT MediaPlayerManager {
// Called by the player to get a hardware protected surface.
virtual void RequestFullScreen(int player_id) = 0;
+
+#if defined(VIDEO_HOLE)
+ // Returns true if a media player should use video-overlay for the embedded
+ // encrypted video.
+ virtual bool ShouldUseVideoOverlayForEmbeddedEncryptedVideo() = 0;
+#endif // defined(VIDEO_HOLE)
};
} // namespace media
diff --git a/media/base/android/media_source_player.cc b/media/base/android/media_source_player.cc
index 6eb5677b2c..f0fe738b06 100644
--- a/media/base/android/media_source_player.cc
+++ b/media/base/android/media_source_player.cc
@@ -38,7 +38,7 @@ MediaSourcePlayer::MediaSourcePlayer(
demuxer_(demuxer.Pass()),
pending_event_(NO_EVENT_PENDING),
playing_(false),
- clock_(&default_tick_clock_),
+ interpolator_(&default_tick_clock_),
doing_browser_seek_(false),
pending_seek_(false),
drm_bridge_(NULL),
@@ -62,7 +62,7 @@ MediaSourcePlayer::MediaSourcePlayer(
base::Bind(&MediaSourcePlayer::OnDemuxerConfigsChanged,
weak_factory_.GetWeakPtr())));
demuxer_->Initialize(this);
- clock_.SetMaxTime(base::TimeDelta());
+ interpolator_.SetUpperBound(base::TimeDelta());
weak_this_ = weak_factory_.GetWeakPtr();
}
@@ -90,7 +90,7 @@ void MediaSourcePlayer::ScheduleSeekEventAndStopDecoding(
pending_seek_ = false;
- clock_.SetTime(seek_time, seek_time);
+ interpolator_.SetBounds(seek_time, seek_time);
if (audio_decoder_job_->is_decoding())
audio_decoder_job_->StopDecode();
@@ -123,7 +123,13 @@ void MediaSourcePlayer::Start() {
playing_ = true;
- if (IsProtectedSurfaceRequired())
+ bool request_fullscreen = IsProtectedSurfaceRequired();
+#if defined(VIDEO_HOLE)
+ // Skip to request fullscreen when hole-punching is used.
+ request_fullscreen = request_fullscreen &&
+ !manager()->ShouldUseVideoOverlayForEmbeddedEncryptedVideo();
+#endif // defined(VIDEO_HOLE)
+ if (request_fullscreen)
manager()->RequestFullScreen(player_id());
StartInternal();
@@ -173,7 +179,7 @@ void MediaSourcePlayer::SeekTo(base::TimeDelta timestamp) {
}
base::TimeDelta MediaSourcePlayer::GetCurrentTime() {
- return clock_.Elapsed();
+ return std::min(interpolator_.GetInterpolatedTime(), duration_);
}
base::TimeDelta MediaSourcePlayer::GetDuration() {
@@ -237,7 +243,6 @@ void MediaSourcePlayer::OnDemuxerConfigsAvailable(
DVLOG(1) << __FUNCTION__;
DCHECK(!HasAudio() && !HasVideo());
duration_ = configs.duration;
- clock_.SetDuration(duration_);
audio_decoder_job_->SetDemuxerConfigs(configs);
video_decoder_job_->SetDemuxerConfigs(configs);
@@ -257,7 +262,6 @@ void MediaSourcePlayer::OnDemuxerDataAvailable(const DemuxerData& data) {
void MediaSourcePlayer::OnDemuxerDurationChanged(base::TimeDelta duration) {
duration_ = duration;
- clock_.SetDuration(duration_);
}
void MediaSourcePlayer::OnMediaCryptoReady() {
@@ -330,7 +334,7 @@ void MediaSourcePlayer::OnDemuxerSeekDone(
DCHECK(seek_time >= GetCurrentTime());
DVLOG(1) << __FUNCTION__ << " : setting clock to actual browser seek time: "
<< seek_time.InSecondsF();
- clock_.SetTime(seek_time, seek_time);
+ interpolator_.SetBounds(seek_time, seek_time);
audio_decoder_job_->SetBaseTimestamp(seek_time);
} else {
DCHECK(actual_browser_seek_time == kNoTimestamp());
@@ -356,7 +360,8 @@ void MediaSourcePlayer::OnDemuxerSeekDone(
void MediaSourcePlayer::UpdateTimestamps(
base::TimeDelta current_presentation_timestamp,
base::TimeDelta max_presentation_timestamp) {
- clock_.SetTime(current_presentation_timestamp, max_presentation_timestamp);
+ interpolator_.SetBounds(current_presentation_timestamp,
+ max_presentation_timestamp);
manager()->OnTimeUpdate(player_id(), GetCurrentTime());
}
@@ -491,7 +496,7 @@ void MediaSourcePlayer::MediaDecoderCallback(
if (!playing_) {
if (is_clock_manager)
- clock_.Pause();
+ interpolator_.StopInterpolating();
return;
}
@@ -573,7 +578,7 @@ void MediaSourcePlayer::PlaybackCompleted(bool is_audio) {
if (AudioFinished() && VideoFinished()) {
playing_ = false;
- clock_.Pause();
+ interpolator_.StopInterpolating();
start_time_ticks_ = base::TimeTicks();
manager()->OnPlaybackComplete(player_id());
}
@@ -672,8 +677,8 @@ void MediaSourcePlayer::OnPrefetchDone() {
start_time_ticks_ = base::TimeTicks::Now();
start_presentation_timestamp_ = GetCurrentTime();
- if (!clock_.IsPlaying())
- clock_.Play();
+ if (!interpolator_.interpolating())
+ interpolator_.StartInterpolating();
if (!AudioFinished())
DecodeMoreAudio();
diff --git a/media/base/android/media_source_player.h b/media/base/android/media_source_player.h
index 689c41eb54..5c0df7e25a 100644
--- a/media/base/android/media_source_player.h
+++ b/media/base/android/media_source_player.h
@@ -23,8 +23,8 @@
#include "media/base/android/media_decoder_job.h"
#include "media/base/android/media_drm_bridge.h"
#include "media/base/android/media_player_android.h"
-#include "media/base/clock.h"
#include "media/base/media_export.h"
+#include "media/base/time_delta_interpolator.h"
namespace media {
@@ -200,11 +200,12 @@ class MEDIA_EXPORT MediaSourcePlayer : public MediaPlayerAndroid,
base::TimeDelta duration_;
bool playing_;
- // base::TickClock used by |clock_|.
+ // base::TickClock used by |interpolator_|.
base::DefaultTickClock default_tick_clock_;
- // Reference clock. Keeps track of current playback time.
- Clock clock_;
+ // Tracks the most recent media time update and provides interpolated values
+ // as playback progresses.
+ TimeDeltaInterpolator interpolator_;
// Timestamps for providing simple A/V sync. When start decoding an audio
// chunk, we record its presentation timestamp and the current system time.
diff --git a/media/base/android/media_source_player_unittest.cc b/media/base/android/media_source_player_unittest.cc
index b3eaeafdb1..604c195da3 100644
--- a/media/base/android/media_source_player_unittest.cc
+++ b/media/base/android/media_source_player_unittest.cc
@@ -74,6 +74,11 @@ class MockMediaPlayerManager : public MediaPlayerManager {
virtual MediaPlayerAndroid* GetFullscreenPlayer() OVERRIDE { return NULL; }
virtual MediaPlayerAndroid* GetPlayer(int player_id) OVERRIDE { return NULL; }
virtual void RequestFullScreen(int player_id) OVERRIDE {}
+#if defined(VIDEO_HOLE)
+ virtual bool ShouldUseVideoOverlayForEmbeddedEncryptedVideo() OVERRIDE {
+ return false;
+ }
+#endif // defined(VIDEO_HOLE)
bool playback_completed() const {
return playback_completed_;
diff --git a/media/base/audio_block_fifo.cc b/media/base/audio_block_fifo.cc
new file mode 100644
index 0000000000..b8cecfa164
--- /dev/null
+++ b/media/base/audio_block_fifo.cc
@@ -0,0 +1,83 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/base/audio_block_fifo.h"
+
+#include "base/logging.h"
+
+namespace media {
+
+AudioBlockFifo::AudioBlockFifo(int channels, int frames, int blocks)
+ : block_frames_(frames),
+ write_block_(0),
+ read_block_(0),
+ available_blocks_(0),
+ write_pos_(0) {
+ // Create |blocks| of audio buses and push them to the containers.
+ audio_blocks_.reserve(blocks);
+ for (int i = 0; i < blocks; ++i) {
+ scoped_ptr<AudioBus> audio_bus = AudioBus::Create(channels, frames);
+ audio_blocks_.push_back(audio_bus.release());
+ }
+}
+
+AudioBlockFifo::~AudioBlockFifo() {}
+
+void AudioBlockFifo::Push(const void* source,
+ int frames,
+ int bytes_per_sample) {
+ DCHECK(source);
+ DCHECK_GT(frames, 0);
+ DCHECK_GT(bytes_per_sample, 0);
+ DCHECK_LT(available_blocks_, static_cast<int>(audio_blocks_.size()));
+
+ const uint8* source_ptr = static_cast<const uint8*>(source);
+ int frames_to_push = frames;
+ while (frames_to_push) {
+ // Get the current write block.
+ AudioBus* current_block = audio_blocks_[write_block_];
+
+ // Figure out what segment sizes we need when adding the new content to
+ // the FIFO.
+ const int push_frames =
+ std::min(block_frames_ - write_pos_, frames_to_push);
+
+ // Deinterleave the content to the FIFO and update the |write_pos_|.
+ current_block->FromInterleaved(source_ptr, push_frames, bytes_per_sample);
+ write_pos_ = (write_pos_ + push_frames) % block_frames_;
+ if (!write_pos_) {
+ // The current block is completely filled, increment |write_block_| and
+ // |available_blocks_|.
+ write_block_ = (write_block_ + 1) % audio_blocks_.size();
+ ++available_blocks_;
+ }
+
+ source_ptr += push_frames * bytes_per_sample * current_block->channels();
+ frames_to_push -= push_frames;
+ }
+}
+
+const AudioBus* AudioBlockFifo::Consume() {
+ DCHECK(available_blocks_);
+ AudioBus* audio_bus = audio_blocks_[read_block_];
+ read_block_ = (read_block_ + 1) % audio_blocks_.size();
+ --available_blocks_;
+ return audio_bus;
+}
+
+void AudioBlockFifo::Clear() {
+ write_pos_ = 0;
+ write_block_ = 0;
+ read_block_ = 0;
+ available_blocks_ = 0;
+}
+
+int AudioBlockFifo::GetUnfilledFrames() const {
+ const int unfilled_frames =
+ (audio_blocks_.size() - available_blocks_) * block_frames_ - write_pos_;
+ DCHECK_GE(unfilled_frames, 0);
+ return unfilled_frames;
+}
+
+} // namespace media
diff --git a/media/base/audio_block_fifo.h b/media/base/audio_block_fifo.h
new file mode 100644
index 0000000000..d3d5a8ac24
--- /dev/null
+++ b/media/base/audio_block_fifo.h
@@ -0,0 +1,70 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_BASE_AUDIO_BLOCK_FIFO_H_
+#define MEDIA_BASE_AUDIO_BLOCK_FIFO_H_
+
+#include "base/memory/scoped_vector.h"
+#include "media/base/audio_bus.h"
+#include "media/base/media_export.h"
+
+namespace media {
+
+// First-in first-out container for AudioBus elements.
+// The FIFO is composed of blocks of AudioBus elements, it accepts interleaved
+// data as input and will deinterleave it into the FIFO, and it only allows
+// consuming a whole block of AudioBus element.
+// This class is thread-unsafe.
+class MEDIA_EXPORT AudioBlockFifo {
+ public:
+ // Creates a new AudioBlockFifo and allocates |blocks| memory, each block
+ // of memory can store |channels| of length |frames| data.
+ AudioBlockFifo(int channels, int frames, int blocks);
+ virtual ~AudioBlockFifo();
+
+ // Pushes interleaved audio data from |source| to the FIFO.
+ // The method will deinterleave the data into a audio bus.
+ // Push() will crash if the allocated space is insufficient.
+ void Push(const void* source, int frames, int bytes_per_sample);
+
+ // Consumes a block of audio from the FIFO. Returns an AudioBus which
+ // contains the consumed audio data to avoid copying.
+ // Consume() will crash if the FIFO does not contain a block of data.
+ const AudioBus* Consume();
+
+ // Empties the FIFO without deallocating any memory.
+ void Clear();
+
+ // Number of available block of memory ready to be consumed in the FIFO.
+ int available_blocks() const { return available_blocks_; }
+
+ // Number of unfilled frames in the whole FIFO.
+ int GetUnfilledFrames() const;
+
+ private:
+ // The actual FIFO is a vector of audio buses.
+ ScopedVector<AudioBus> audio_blocks_;
+
+ // Maximum number of frames of data one block of memory can contain.
+ // This value is set by |frames| in the constructor.
+ const int block_frames_;
+
+ // Used to keep track which block of memory to be written.
+ int write_block_;
+
+ // Used to keep track which block of memory to be consumed.
+ int read_block_;
+
+ // Number of available blocks of memory to be consumed.
+ int available_blocks_;
+
+ // Current write position in the current written block.
+ int write_pos_;
+
+ DISALLOW_COPY_AND_ASSIGN(AudioBlockFifo);
+};
+
+} // namespace media
+
+#endif // MEDIA_BASE_AUDIO_BLOCK_FIFO_H_
diff --git a/media/base/audio_block_fifo_unittest.cc b/media/base/audio_block_fifo_unittest.cc
new file mode 100644
index 0000000000..8e8b5e0715
--- /dev/null
+++ b/media/base/audio_block_fifo_unittest.cc
@@ -0,0 +1,149 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/base/audio_block_fifo.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace media {
+
+class AudioBlockFifoTest : public testing::Test {
+ public:
+ AudioBlockFifoTest() {}
+ virtual ~AudioBlockFifoTest() {}
+
+ void PushAndVerify(AudioBlockFifo* fifo, int frames_to_push,
+ int channels, int block_frames, int max_frames) {
+ const int bytes_per_sample = 2;
+ const int data_byte_size = bytes_per_sample * channels * frames_to_push;
+ scoped_ptr<uint8[]> data(new uint8[data_byte_size]);
+ memset(data.get(), 0, data_byte_size);
+
+ for (int filled_frames = max_frames - fifo->GetUnfilledFrames();
+ filled_frames + frames_to_push <= max_frames;) {
+ fifo->Push(data.get(), frames_to_push, bytes_per_sample);
+ filled_frames += frames_to_push;
+ EXPECT_EQ(max_frames - filled_frames, fifo->GetUnfilledFrames());
+ EXPECT_EQ(static_cast<int>(filled_frames / block_frames),
+ fifo->available_blocks());
+ }
+ }
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(AudioBlockFifoTest);
+};
+
+// Verify that construction works as intended.
+TEST_F(AudioBlockFifoTest, Construct) {
+ const int channels = 6;
+ const int frames = 128;
+ const int blocks = 4;
+ AudioBlockFifo fifo(channels, frames, blocks);
+ EXPECT_EQ(0, fifo.available_blocks());
+ EXPECT_EQ(frames * blocks, fifo.GetUnfilledFrames());
+}
+
+// Pushes audio bus objects to/from a FIFO up to different degrees.
+TEST_F(AudioBlockFifoTest, Push) {
+ const int channels = 2;
+ const int frames = 128;
+ const int blocks = 2;
+ AudioBlockFifo fifo(channels, frames, blocks);
+
+ // Push frames / 2 of data until FIFO is full.
+ PushAndVerify(&fifo, frames / 2, channels, frames, frames * blocks);
+ fifo.Clear();
+
+ // Push frames of data until FIFO is full.
+ PushAndVerify(&fifo, frames, channels, frames, frames * blocks);
+ fifo.Clear();
+
+ // Push 1.5 * frames of data.
+ PushAndVerify(&fifo, frames * 1.5, channels, frames, frames * blocks);
+ fifo.Clear();
+}
+
+// Perform a sequence of Push/Consume calls to different degrees, and verify
+// things are correct.
+TEST_F(AudioBlockFifoTest, PushAndConsume) {
+ const int channels = 2;
+ const int frames = 441;
+ const int blocks = 4;
+ AudioBlockFifo fifo(channels, frames, blocks);
+ PushAndVerify(&fifo, frames, channels, frames, frames * blocks);
+ EXPECT_TRUE(fifo.GetUnfilledFrames() == 0);
+ EXPECT_TRUE(fifo.available_blocks() == blocks);
+
+ // Consume 1 block of data.
+ const AudioBus* bus = fifo.Consume();
+ EXPECT_TRUE(channels == bus->channels());
+ EXPECT_TRUE(frames == bus->frames());
+ EXPECT_TRUE(fifo.available_blocks() == (blocks - 1));
+ EXPECT_TRUE(fifo.GetUnfilledFrames() == frames);
+
+ // Fill it up again.
+ PushAndVerify(&fifo, frames, channels, frames, frames * blocks);
+ EXPECT_TRUE(fifo.GetUnfilledFrames() == 0);
+ EXPECT_TRUE(fifo.available_blocks() == blocks);
+
+ // Consume all blocks of data.
+ for (int i = 1; i <= blocks; ++i) {
+ const AudioBus* bus = fifo.Consume();
+ EXPECT_TRUE(channels == bus->channels());
+ EXPECT_TRUE(frames == bus->frames());
+ EXPECT_TRUE(fifo.GetUnfilledFrames() == frames * i);
+ EXPECT_TRUE(fifo.available_blocks() == (blocks - i));
+ }
+ EXPECT_TRUE(fifo.GetUnfilledFrames() == frames * blocks);
+ EXPECT_TRUE(fifo.available_blocks() == 0);
+
+ fifo.Clear();
+ int new_push_frames = 128;
+ // Change the input frame and try to fill up the FIFO.
+ PushAndVerify(&fifo, new_push_frames, channels, frames,
+ frames * blocks);
+ EXPECT_TRUE(fifo.GetUnfilledFrames() != 0);
+ EXPECT_TRUE(fifo.available_blocks() == blocks -1);
+
+ // Consume all the existing filled blocks of data.
+ while (fifo.available_blocks()) {
+ const AudioBus* bus = fifo.Consume();
+ EXPECT_TRUE(channels == bus->channels());
+ EXPECT_TRUE(frames == bus->frames());
+ }
+
+ // Since one block of FIFO has not been completely filled up, there should
+ // be remaining frames.
+ const int number_of_push =
+ static_cast<int>(frames * blocks / new_push_frames);
+ const int remain_frames = frames * blocks - fifo.GetUnfilledFrames();
+ EXPECT_EQ(number_of_push * new_push_frames - frames * (blocks - 1),
+ remain_frames);
+
+ // Completely fill up the buffer again.
+ new_push_frames = frames * blocks - remain_frames;
+ PushAndVerify(&fifo, new_push_frames, channels, frames,
+ frames * blocks);
+ EXPECT_TRUE(fifo.GetUnfilledFrames() == 0);
+ EXPECT_TRUE(fifo.available_blocks() == blocks);
+}
+
+// Perform a sequence of Push/Consume calls to a 1 block FIFO.
+TEST_F(AudioBlockFifoTest, PushAndConsumeOneBlockFifo) {
+ static const int channels = 2;
+ static const int frames = 441;
+ static const int blocks = 1;
+ AudioBlockFifo fifo(channels, frames, blocks);
+ PushAndVerify(&fifo, frames, channels, frames, frames * blocks);
+ EXPECT_TRUE(fifo.GetUnfilledFrames() == 0);
+ EXPECT_TRUE(fifo.available_blocks() == blocks);
+
+ // Consume 1 block of data.
+ const AudioBus* bus = fifo.Consume();
+ EXPECT_TRUE(channels == bus->channels());
+ EXPECT_TRUE(frames == bus->frames());
+ EXPECT_TRUE(fifo.available_blocks() == 0);
+ EXPECT_TRUE(fifo.GetUnfilledFrames() == frames);
+}
+
+} // namespace media
diff --git a/media/base/audio_bus.cc b/media/base/audio_bus.cc
index e34c748939..c6236b642b 100644
--- a/media/base/audio_bus.cc
+++ b/media/base/audio_bus.cc
@@ -340,4 +340,15 @@ void AudioBus::SwapChannels(int a, int b) {
std::swap(channel_data_[a], channel_data_[b]);
}
+scoped_refptr<AudioBusRefCounted> AudioBusRefCounted::Create(
+ int channels, int frames) {
+ return scoped_refptr<AudioBusRefCounted>(
+ new AudioBusRefCounted(channels, frames));
+}
+
+AudioBusRefCounted::AudioBusRefCounted(int channels, int frames)
+ : AudioBus(channels, frames) {}
+
+AudioBusRefCounted::~AudioBusRefCounted() {}
+
} // namespace media
diff --git a/media/base/audio_bus.h b/media/base/audio_bus.h
index c5b161f023..c78486e5e0 100644
--- a/media/base/audio_bus.h
+++ b/media/base/audio_bus.h
@@ -8,6 +8,7 @@
#include <vector>
#include "base/memory/aligned_memory.h"
+#include "base/memory/ref_counted.h"
#include "base/memory/scoped_ptr.h"
#include "media/base/media_export.h"
@@ -110,12 +111,13 @@ class MEDIA_EXPORT AudioBus {
virtual ~AudioBus();
- private:
+ protected:
AudioBus(int channels, int frames);
AudioBus(int channels, int frames, float* data);
AudioBus(int frames, const std::vector<float*>& channel_data);
explicit AudioBus(int channels);
+ private:
// Helper method for building |channel_data_| from a block of memory. |data|
// must be at least BlockSize() bytes in size.
void BuildChannelData(int channels, int aligned_frame, float* data);
@@ -132,6 +134,24 @@ class MEDIA_EXPORT AudioBus {
DISALLOW_COPY_AND_ASSIGN(AudioBus);
};
+// RefCounted version of AudioBus. This is not meant for general use. Only use
+// this when your lifetime requirements make it impossible to use an
+// AudioBus scoped_ptr.
+class MEDIA_EXPORT AudioBusRefCounted
+ : public media::AudioBus,
+ public base::RefCountedThreadSafe<AudioBusRefCounted> {
+ public:
+ static scoped_refptr<AudioBusRefCounted> Create(int channels, int frames);
+
+ private:
+ friend class base::RefCountedThreadSafe<AudioBusRefCounted>;
+
+ AudioBusRefCounted(int channels, int frames);
+ virtual ~AudioBusRefCounted();
+
+ DISALLOW_COPY_AND_ASSIGN(AudioBusRefCounted);
+};
+
} // namespace media
#endif // MEDIA_BASE_AUDIO_BUS_H_
diff --git a/media/base/audio_capturer_source.h b/media/base/audio_capturer_source.h
index b584f8a48d..621c3921ab 100644
--- a/media/base/audio_capturer_source.h
+++ b/media/base/audio_capturer_source.h
@@ -24,7 +24,7 @@ class AudioCapturerSource
class CaptureCallback {
public:
// Callback to deliver the captured data from the OS.
- virtual void Capture(AudioBus* audio_source,
+ virtual void Capture(const AudioBus* audio_source,
int audio_delay_milliseconds,
double volume,
bool key_pressed) = 0;
diff --git a/media/base/audio_decoder.h b/media/base/audio_decoder.h
index 0118b5e4fd..779e5ef502 100644
--- a/media/base/audio_decoder.h
+++ b/media/base/audio_decoder.h
@@ -25,7 +25,7 @@ class MEDIA_EXPORT AudioDecoder {
// match, break them into a decoder_status.h.
enum Status {
kOk, // We're all good.
- kAborted, // We aborted as a result of Stop() or Reset().
+ kAborted, // We aborted as a result of Reset() or destruction.
kDecodeError, // A decoding error occurred.
kDecryptError // Decrypting error happened.
};
@@ -40,6 +40,11 @@ class MEDIA_EXPORT AudioDecoder {
typedef base::Callback<void(Status)> DecodeCB;
AudioDecoder();
+
+ // Fires any pending callbacks, stops and destroys the decoder.
+ // Note: Since this is a destructor, |this| will be destroyed after this call.
+ // Make sure the callbacks fired from this call doesn't post any task that
+ // depends on |this|.
virtual ~AudioDecoder();
// Initializes an AudioDecoder with the given DemuxerStream, executing the
@@ -68,14 +73,6 @@ class MEDIA_EXPORT AudioDecoder {
// aborted before |closure| is called.
virtual void Reset(const base::Closure& closure) = 0;
- // Stops decoder, fires any pending callbacks and sets the decoder to an
- // uninitialized state. An AudioDecoder cannot be re-initialized after it has
- // been stopped. DecodeCB and OutputCB may still be called for older buffers
- // if they were scheduled before this method is called.
- // Note that if Initialize() is pending or has finished successfully, Stop()
- // must be called before destructing the decoder.
- virtual void Stop() = 0;
-
private:
DISALLOW_COPY_AND_ASSIGN(AudioDecoder);
};
diff --git a/media/base/audio_hash.cc b/media/base/audio_hash.cc
index 28f16418b6..7d705820c2 100644
--- a/media/base/audio_hash.cc
+++ b/media/base/audio_hash.cc
@@ -16,7 +16,6 @@ namespace media {
AudioHash::AudioHash()
: audio_hash_(),
sample_count_(0) {
- COMPILE_ASSERT(arraysize(audio_hash_) == kHashBuckets, audio_hash_size_error);
}
AudioHash::~AudioHash() {}
@@ -27,7 +26,8 @@ void AudioHash::Update(const AudioBus* audio_bus, int frames) {
const float* channel = audio_bus->channel(ch);
for (uint32 i = 0; i < static_cast<uint32>(frames); ++i) {
const uint32 kSampleIndex = sample_count_ + i;
- const uint32 kHashIndex = (kSampleIndex * (ch + 1)) % kHashBuckets;
+ const uint32 kHashIndex =
+ (kSampleIndex * (ch + 1)) % arraysize(audio_hash_);
// Mix in a sine wave with the result so we ensure that sequences of empty
// buffers don't result in an empty hash.
diff --git a/media/base/audio_hash.h b/media/base/audio_hash.h
index 91d6edf904..ec733832f7 100644
--- a/media/base/audio_hash.h
+++ b/media/base/audio_hash.h
@@ -44,8 +44,7 @@ class MEDIA_EXPORT AudioHash {
// Storage for the audio hash. The number of buckets controls the importance
// of position in the hash. A higher number reduces the chance of false
// positives related to incorrect sample position. Value chosen by dice roll.
- enum { kHashBuckets = 6 };
- float audio_hash_[kHashBuckets];
+ float audio_hash_[6];
// The total number of samples processed per channel. Uses a uint32 instead
// of size_t so overflows on 64-bit and 32-bit machines are equivalent.
diff --git a/media/base/audio_renderer.h b/media/base/audio_renderer.h
index 2d7d3e18a2..48ab6846b7 100644
--- a/media/base/audio_renderer.h
+++ b/media/base/audio_renderer.h
@@ -8,6 +8,7 @@
#include "base/callback.h"
#include "base/memory/ref_counted.h"
#include "base/time/time.h"
+#include "media/base/buffering_state.h"
#include "media/base/media_export.h"
#include "media/base/pipeline_status.h"
@@ -29,21 +30,19 @@ class MEDIA_EXPORT AudioRenderer {
//
// |statistics_cb| is executed periodically with audio rendering stats.
//
- // |underflow_cb| is executed when the renderer runs out of data to pass to
- // the audio card during playback. ResumeAfterUnderflow() must be called
- // to resume playback. Pause(), Preroll(), or Stop() cancels the underflow
- // condition.
- //
// |time_cb| is executed whenever time has advanced by way of audio rendering.
//
+ // |buffering_state_cb| is executed when audio rendering has either run out of
+ // data or has enough data to continue playback.
+ //
// |ended_cb| is executed when audio rendering has reached the end of stream.
//
// |error_cb| is executed if an error was encountered.
virtual void Initialize(DemuxerStream* stream,
const PipelineStatusCB& init_cb,
const StatisticsCB& statistics_cb,
- const base::Closure& underflow_cb,
const TimeCB& time_cb,
+ const BufferingStateCB& buffering_state_cb,
const base::Closure& ended_cb,
const PipelineStatusCB& error_cb) = 0;
@@ -56,14 +55,16 @@ class MEDIA_EXPORT AudioRenderer {
virtual void StopRendering() = 0;
// Discard any audio data, executing |callback| when completed.
+ //
+ // Clients should expect |buffering_state_cb| to be called with
+ // BUFFERING_HAVE_NOTHING while flushing is in progress.
virtual void Flush(const base::Closure& callback) = 0;
- // Start prerolling audio data for samples starting at |time|, executing
- // |callback| when completed.
+ // Starts playback by reading from |stream| and decoding and rendering audio.
+ // |timestamp| is the media timestamp playback should start rendering from.
//
// Only valid to call after a successful Initialize() or Flush().
- virtual void Preroll(base::TimeDelta time,
- const PipelineStatusCB& callback) = 0;
+ virtual void StartPlayingFrom(base::TimeDelta timestamp) = 0;
// Stop all operations in preparation for being deleted, executing |callback|
// when complete.
@@ -75,9 +76,6 @@ class MEDIA_EXPORT AudioRenderer {
// Sets the output volume.
virtual void SetVolume(float volume) = 0;
- // Resumes playback after underflow occurs.
- virtual void ResumeAfterUnderflow() = 0;
-
private:
DISALLOW_COPY_AND_ASSIGN(AudioRenderer);
};
diff --git a/media/base/audio_renderer_mixer_input.cc b/media/base/audio_renderer_mixer_input.cc
index 07e249a9e5..ab9f0a7eca 100644
--- a/media/base/audio_renderer_mixer_input.cc
+++ b/media/base/audio_renderer_mixer_input.cc
@@ -24,20 +24,17 @@ AudioRendererMixerInput::AudioRendererMixerInput(
}
AudioRendererMixerInput::~AudioRendererMixerInput() {
- // Mixer is no longer safe to use after |remove_mixer_cb_| has been called.
- if (initialized_) {
- mixer_->RemoveErrorCallback(error_cb_);
- remove_mixer_cb_.Run(params_);
- }
+ DCHECK(!playing_);
+ DCHECK(!mixer_);
}
void AudioRendererMixerInput::Initialize(
const AudioParameters& params,
AudioRendererSink::RenderCallback* callback) {
+ DCHECK(callback);
DCHECK(!initialized_);
+
params_ = params;
- mixer_ = get_mixer_cb_.Run(params_);
- mixer_->AddErrorCallback(error_cb_);
callback_ = callback;
initialized_ = true;
}
@@ -45,20 +42,34 @@ void AudioRendererMixerInput::Initialize(
void AudioRendererMixerInput::Start() {
DCHECK(initialized_);
DCHECK(!playing_);
+ DCHECK(!mixer_);
+ mixer_ = get_mixer_cb_.Run(params_);
+
+ // Note: OnRenderError() may be called immediately after this call returns.
+ mixer_->AddErrorCallback(error_cb_);
}
void AudioRendererMixerInput::Stop() {
// Stop() may be called at any time, if Pause() hasn't been called we need to
// remove our mixer input before shutdown.
- if (!playing_)
- return;
+ if (playing_) {
+ mixer_->RemoveMixerInput(this);
+ playing_ = false;
+ }
- mixer_->RemoveMixerInput(this);
- playing_ = false;
+ if (mixer_) {
+ // TODO(dalecurtis): This is required so that |callback_| isn't called after
+ // Stop() by an error event since it may outlive this ref-counted object. We
+ // should instead have sane ownership semantics: http://crbug.com/151051
+ mixer_->RemoveErrorCallback(error_cb_);
+ remove_mixer_cb_.Run(params_);
+ mixer_ = NULL;
+ }
}
void AudioRendererMixerInput::Play() {
DCHECK(initialized_);
+ DCHECK(mixer_);
if (playing_)
return;
@@ -69,6 +80,7 @@ void AudioRendererMixerInput::Play() {
void AudioRendererMixerInput::Pause() {
DCHECK(initialized_);
+ DCHECK(mixer_);
if (!playing_)
return;
diff --git a/media/base/audio_renderer_mixer_input.h b/media/base/audio_renderer_mixer_input.h
index 6b026cf9c2..c7e24c6fbb 100644
--- a/media/base/audio_renderer_mixer_input.h
+++ b/media/base/audio_renderer_mixer_input.h
@@ -68,7 +68,7 @@ class MEDIA_EXPORT AudioRendererMixerInput
AudioRendererSink::RenderCallback* callback_;
// Error callback for handing to AudioRendererMixer.
- base::Closure error_cb_;
+ const base::Closure error_cb_;
DISALLOW_COPY_AND_ASSIGN(AudioRendererMixerInput);
};
diff --git a/media/base/audio_renderer_mixer_input_unittest.cc b/media/base/audio_renderer_mixer_input_unittest.cc
index 9a019db571..be03867027 100644
--- a/media/base/audio_renderer_mixer_input_unittest.cc
+++ b/media/base/audio_renderer_mixer_input_unittest.cc
@@ -28,7 +28,6 @@ class AudioRendererMixerInputTest : public testing::Test {
CreateMixerInput();
fake_callback_.reset(new FakeAudioRenderCallback(0));
mixer_input_->Initialize(audio_parameters_, fake_callback_.get());
- EXPECT_CALL(*this, RemoveMixer(testing::_));
audio_bus_ = AudioBus::Create(audio_parameters_);
}
@@ -49,6 +48,7 @@ class AudioRendererMixerInputTest : public testing::Test {
mixer_.reset(new AudioRendererMixer(
audio_parameters_, audio_parameters_, sink));
}
+ EXPECT_CALL(*this, RemoveMixer(testing::_));
return mixer_.get();
}
@@ -109,4 +109,12 @@ TEST_F(AudioRendererMixerInputTest, StopBeforeInitializeOrStart) {
mixer_input_->Stop();
}
+// Test that Start() can be called after Stop().
+// TODO(dalecurtis): We shouldn't allow this. See http://crbug.com/151051
+TEST_F(AudioRendererMixerInputTest, StartAfterStop) {
+ mixer_input_->Stop();
+ mixer_input_->Start();
+ mixer_input_->Stop();
+}
+
} // namespace media
diff --git a/media/base/audio_renderer_mixer_unittest.cc b/media/base/audio_renderer_mixer_unittest.cc
index 857df2629e..cb58a03855 100644
--- a/media/base/audio_renderer_mixer_unittest.cc
+++ b/media/base/audio_renderer_mixer_unittest.cc
@@ -396,8 +396,10 @@ TEST_P(AudioRendererMixerBehavioralTest, OnRenderError) {
TEST_P(AudioRendererMixerBehavioralTest, OnRenderErrorPausedInput) {
InitializeInputs(kMixerInputs);
- for (size_t i = 0; i < mixer_inputs_.size(); ++i)
+ for (size_t i = 0; i < mixer_inputs_.size(); ++i) {
+ mixer_inputs_[i]->Start();
EXPECT_CALL(*fake_callbacks_[i], OnRenderError()).Times(1);
+ }
// Fire the error before attaching any inputs. Ensure an error is recieved
// even if the input is not connected.
diff --git a/media/base/audio_splicer.cc b/media/base/audio_splicer.cc
index 2fb4180ff9..7fafc8bbba 100644
--- a/media/base/audio_splicer.cc
+++ b/media/base/audio_splicer.cc
@@ -16,12 +16,6 @@
namespace media {
-// Largest gap or overlap allowed by this class. Anything
-// larger than this will trigger an error.
-// This is an arbitrary value, but the initial selection of 50ms
-// roughly represents the duration of 2 compressed AAC or MP3 frames.
-static const int kMaxTimeDeltaInMilliseconds = 50;
-
// Minimum gap size needed before the splicer will take action to
// fill a gap. This avoids periodically inserting and then dropping samples
// when the buffer timestamps are slightly off because of timestamp rounding
@@ -143,7 +137,8 @@ bool AudioStreamSanitizer::AddInput(const scoped_refptr<AudioBuffer>& input) {
output_timestamp_helper_.GetTimestamp();
const base::TimeDelta delta = timestamp - expected_timestamp;
- if (std::abs(delta.InMilliseconds()) > kMaxTimeDeltaInMilliseconds) {
+ if (std::abs(delta.InMilliseconds()) >
+ AudioSplicer::kMaxTimeDeltaInMilliseconds) {
DVLOG(1) << "Timestamp delta too large: " << delta.InMicroseconds() << "us";
return false;
}
@@ -310,7 +305,12 @@ bool AudioSplicer::AddInput(const scoped_refptr<AudioBuffer>& input) {
if (pre_splice_sanitizer_->GetFrameCount() <=
output_ts_helper.GetFramesToTarget(splice_timestamp_)) {
CHECK(pre_splice_sanitizer_->DrainInto(output_sanitizer_.get()));
- CHECK(post_splice_sanitizer_->DrainInto(output_sanitizer_.get()));
+
+ // If the file contains incorrectly muxed timestamps, there may be huge gaps
+ // between the demuxed and decoded timestamps.
+ if (!post_splice_sanitizer_->DrainInto(output_sanitizer_.get()))
+ return false;
+
reset_splice_timestamps();
return true;
}
diff --git a/media/base/audio_splicer.h b/media/base/audio_splicer.h
index 389607ae2c..0db5d08dd7 100644
--- a/media/base/audio_splicer.h
+++ b/media/base/audio_splicer.h
@@ -27,6 +27,12 @@ class MEDIA_EXPORT AudioSplicer {
enum {
// The number of ms to crossfade before trimming when buffers overlap.
kCrossfadeDurationInMilliseconds = 5,
+
+ // Largest gap or overlap allowed between buffers. Anything larger than
+ // this will trigger an error. This is an arbitrary value, but the initial
+ // selection of 50ms roughly represents the duration of 2 compressed AAC or
+ // MP3 frames.
+ kMaxTimeDeltaInMilliseconds = 50,
};
// Resets the splicer state by clearing the output buffers queue and resetting
diff --git a/media/base/audio_splicer_unittest.cc b/media/base/audio_splicer_unittest.cc
index 2e46b9fb12..e6de2c62ed 100644
--- a/media/base/audio_splicer_unittest.cc
+++ b/media/base/audio_splicer_unittest.cc
@@ -682,4 +682,42 @@ TEST_F(AudioSplicerTest, IncorrectlyMarkedSpliceWithGap) {
EXPECT_FALSE(splicer_.HasNextBuffer());
}
+// Test behavior when a splice frame is incorrectly marked and there is a gap
+// between what's in the pre splice and post splice that is too large to recover
+// from.
+// +--------+
+// |11111111|
+// +--------+
+// +------+
+// |222222|
+// +------+
+// Results in an error and not a crash.
+TEST_F(AudioSplicerTest, IncorrectlyMarkedSpliceWithBadGap) {
+ const int kBufferSize =
+ input_timestamp_helper_.GetFramesToTarget(max_crossfade_duration()) * 2;
+ const int kGapSize = kBufferSize +
+ input_timestamp_helper_.GetFramesToTarget(
+ base::TimeDelta::FromMilliseconds(
+ AudioSplicer::kMaxTimeDeltaInMilliseconds + 1));
+
+ scoped_refptr<AudioBuffer> first_buffer =
+ GetNextInputBuffer(1.0f, kBufferSize);
+ scoped_refptr<AudioBuffer> gap_buffer =
+ GetNextInputBuffer(0.0f, kGapSize);
+ splicer_.SetSpliceTimestamp(input_timestamp_helper_.GetTimestamp());
+ scoped_refptr<AudioBuffer> second_buffer =
+ GetNextInputBuffer(0.0f, kBufferSize);
+
+ // The splicer should pass through the first buffer since it's not part of the
+ // splice.
+ EXPECT_TRUE(AddInput(first_buffer));
+ VerifyNextBuffer(first_buffer);
+
+ // Do not add |gap_buffer|.
+
+ // |second_buffer| will complete the supposed splice.
+ splicer_.SetSpliceTimestamp(kNoTimestamp());
+ EXPECT_FALSE(AddInput(second_buffer));
+}
+
} // namespace media
diff --git a/media/base/audio_video_metadata_extractor.cc b/media/base/audio_video_metadata_extractor.cc
index 7a8cf766ab..fd666f6c91 100644
--- a/media/base/audio_video_metadata_extractor.cc
+++ b/media/base/audio_video_metadata_extractor.cc
@@ -236,8 +236,9 @@ void AudioVideoMetadataExtractor::ExtractDictionary(
if (!metadata)
return;
- AVDictionaryEntry* tag = NULL;
- while ((tag = av_dict_get(metadata, "", tag, AV_DICT_IGNORE_SUFFIX))) {
+ for (AVDictionaryEntry* tag =
+ av_dict_get(metadata, "", NULL, AV_DICT_IGNORE_SUFFIX);
+ tag; tag = av_dict_get(metadata, "", tag, AV_DICT_IGNORE_SUFFIX)) {
if (raw_tags->find(tag->key) == raw_tags->end())
(*raw_tags)[tag->key] = tag->value;
diff --git a/media/base/buffering_state.h b/media/base/buffering_state.h
index 3140505847..935922c9b4 100644
--- a/media/base/buffering_state.h
+++ b/media/base/buffering_state.h
@@ -21,6 +21,9 @@ enum BufferingState {
BUFFERING_HAVE_ENOUGH,
};
+// Used to indicate changes in buffering state;
+typedef base::Callback<void(BufferingState)> BufferingStateCB;
+
} // namespace media
#endif // MEDIA_BASE_BUFFERING_STATE_H_
diff --git a/media/base/clock.cc b/media/base/clock.cc
deleted file mode 100644
index 3dc49e9e79..0000000000
--- a/media/base/clock.cc
+++ /dev/null
@@ -1,130 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "media/base/clock.h"
-
-#include <algorithm>
-
-#include "base/logging.h"
-#include "base/time/tick_clock.h"
-#include "media/base/buffers.h"
-
-namespace media {
-
-Clock::Clock(base::TickClock* clock)
- : clock_(clock),
- playing_(false),
- underflow_(false),
- playback_rate_(1.0f),
- max_time_(kNoTimestamp()),
- duration_(kNoTimestamp()) {
- DCHECK(clock_);
-}
-
-Clock::~Clock() {}
-
-bool Clock::IsPlaying() const {
- return playing_;
-}
-
-base::TimeDelta Clock::Play() {
- DCHECK(!playing_);
- UpdateReferencePoints();
- playing_ = true;
- return media_time_;
-}
-
-base::TimeDelta Clock::Pause() {
- DCHECK(playing_);
- UpdateReferencePoints();
- playing_ = false;
- return media_time_;
-}
-
-void Clock::SetPlaybackRate(float playback_rate) {
- UpdateReferencePoints();
- playback_rate_ = playback_rate;
-}
-
-void Clock::SetTime(base::TimeDelta current_time, base::TimeDelta max_time) {
- DCHECK(current_time <= max_time);
- DCHECK(current_time != kNoTimestamp());
-
- UpdateReferencePoints(current_time);
- max_time_ = ClampToValidTimeRange(max_time);
- underflow_ = false;
-}
-
-base::TimeDelta Clock::Elapsed() {
- if (duration_ == kNoTimestamp())
- return base::TimeDelta();
-
- // The clock is not advancing, so return the last recorded time.
- if (!playing_ || underflow_)
- return media_time_;
-
- base::TimeDelta elapsed = EstimatedElapsedTime();
- if (max_time_ != kNoTimestamp() && elapsed > max_time_) {
- UpdateReferencePoints(max_time_);
- underflow_ = true;
- elapsed = max_time_;
- }
-
- return elapsed;
-}
-
-void Clock::SetMaxTime(base::TimeDelta max_time) {
- DCHECK(max_time != kNoTimestamp());
-
- UpdateReferencePoints();
- max_time_ = ClampToValidTimeRange(max_time);
-
- underflow_ = media_time_ > max_time_;
- if (underflow_)
- media_time_ = max_time_;
-}
-
-void Clock::SetDuration(base::TimeDelta duration) {
- DCHECK(duration > base::TimeDelta());
- duration_ = duration;
-
- media_time_ = ClampToValidTimeRange(media_time_);
- if (max_time_ != kNoTimestamp())
- max_time_ = ClampToValidTimeRange(max_time_);
-}
-
-base::TimeDelta Clock::ElapsedViaProvidedTime(
- const base::TimeTicks& time) const {
- // TODO(scherkus): floating point badness scaling time by playback rate.
- int64 now_us = (time - reference_).InMicroseconds();
- now_us = static_cast<int64>(now_us * playback_rate_);
- return media_time_ + base::TimeDelta::FromMicroseconds(now_us);
-}
-
-base::TimeDelta Clock::ClampToValidTimeRange(base::TimeDelta time) const {
- if (duration_ == kNoTimestamp())
- return base::TimeDelta();
- return std::max(std::min(time, duration_), base::TimeDelta());
-}
-
-base::TimeDelta Clock::Duration() const {
- if (duration_ == kNoTimestamp())
- return base::TimeDelta();
- return duration_;
-}
-
-void Clock::UpdateReferencePoints() {
- UpdateReferencePoints(Elapsed());
-}
-
-void Clock::UpdateReferencePoints(base::TimeDelta current_time) {
- media_time_ = ClampToValidTimeRange(current_time);
- reference_ = clock_->NowTicks();
-}
-
-base::TimeDelta Clock::EstimatedElapsedTime() {
- return ClampToValidTimeRange(ElapsedViaProvidedTime(clock_->NowTicks()));
-}
-
-} // namespace media
diff --git a/media/base/clock.h b/media/base/clock.h
deleted file mode 100644
index fbd7ca125d..0000000000
--- a/media/base/clock.h
+++ /dev/null
@@ -1,124 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef MEDIA_BASE_CLOCK_H_
-#define MEDIA_BASE_CLOCK_H_
-
-#include "base/basictypes.h"
-#include "base/time/time.h"
-#include "media/base/media_export.h"
-
-namespace base {
-class TickClock;
-} // namespace base
-
-namespace media {
-
-// A clock represents a single source of time to allow audio and video streams
-// to synchronize with each other. Clock essentially tracks the media time with
-// respect to some other source of time, whether that may be the monotonic
-// system clock or updates via SetTime(). Clock uses linear interpolation to
-// calculate the current media time since the last time SetTime() was called.
-//
-// Clocks start off paused with a playback rate of 1.0f and a media time of 0.
-//
-// Clock is not thread-safe and must be externally locked.
-//
-// TODO(scherkus): Clock will some day be responsible for executing callbacks
-// given a media time. This will be used primarily by video renderers. For now
-// we'll keep using a poll-and-sleep solution.
-//
-// TODO(miu): Rename media::Clock to avoid confusion (and tripping up the media
-// PRESUBMIT script on future changes).
-class MEDIA_EXPORT Clock {
- public:
- explicit Clock(base::TickClock* clock);
- ~Clock();
-
- // Returns true if the clock is running.
- bool IsPlaying() const;
-
- // Starts the clock and returns the current media time, which will increase
- // with respect to the current playback rate.
- base::TimeDelta Play();
-
- // Stops the clock and returns the current media time, which will remain
- // constant until Play() is called.
- base::TimeDelta Pause();
-
- // Sets a new playback rate. The rate at which the media time will increase
- // will now change.
- void SetPlaybackRate(float playback_rate);
-
- // Forcefully sets the media time to |current_time|. The second parameter is
- // the |max_time| that the clock should progress after a call to Play(). This
- // value is often the time of the end of the last frame buffered and decoded.
- //
- // These values are clamped to the duration of the video, which is initially
- // set to 0 (before SetDuration() is called).
- void SetTime(base::TimeDelta current_time, base::TimeDelta max_time);
-
- // Sets the |max_time| to be returned by a call to Elapsed().
- void SetMaxTime(base::TimeDelta max_time);
-
- // Returns the current elapsed media time. Returns 0 if SetDuration() has
- // never been called.
- base::TimeDelta Elapsed();
-
- // Sets the duration of the video. Clock expects the duration will be set
- // exactly once.
- void SetDuration(base::TimeDelta duration);
-
- // Returns the duration of the clock, or 0 if not set.
- base::TimeDelta Duration() const;
-
- private:
- // Updates the reference points based on the current calculated time.
- void UpdateReferencePoints();
-
- // Updates the reference points based on the given |current_time|.
- void UpdateReferencePoints(base::TimeDelta current_time);
-
- // Returns the time elapsed based on the current reference points, ignoring
- // the |max_time_| cap.
- base::TimeDelta EstimatedElapsedTime();
-
- // Translates |time| into the current media time, based on the perspective of
- // the monotonically-increasing system clock.
- base::TimeDelta ElapsedViaProvidedTime(const base::TimeTicks& time) const;
-
- base::TimeDelta ClampToValidTimeRange(base::TimeDelta time) const;
-
- base::TickClock* const clock_;
-
- // Whether the clock is running.
- bool playing_;
-
- // Whether the clock is stalled because it has reached the |max_time_|
- // allowed.
- bool underflow_;
-
- // The monotonic system clock time when this Clock last started playing or had
- // its time set via SetTime().
- base::TimeTicks reference_;
-
- // Current accumulated amount of media time. The remaining portion must be
- // calculated by comparing the system time to the reference time.
- base::TimeDelta media_time_;
-
- // Current playback rate.
- float playback_rate_;
-
- // The maximum time that can be returned by calls to Elapsed().
- base::TimeDelta max_time_;
-
- // Duration of the media.
- base::TimeDelta duration_;
-
- DISALLOW_COPY_AND_ASSIGN(Clock);
-};
-
-} // namespace media
-
-#endif // MEDIA_BASE_CLOCK_H_
diff --git a/media/base/clock_unittest.cc b/media/base/clock_unittest.cc
deleted file mode 100644
index 3bf05996c6..0000000000
--- a/media/base/clock_unittest.cc
+++ /dev/null
@@ -1,253 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/compiler_specific.h"
-#include "base/logging.h"
-#include "base/test/simple_test_tick_clock.h"
-#include "base/time/clock.h"
-#include "media/base/clock.h"
-#include "testing/gmock/include/gmock/gmock.h"
-
-using ::testing::InSequence;
-using ::testing::Return;
-using ::testing::StrictMock;
-
-namespace base {
-
-// Provide a stream output operator so we can use EXPECT_EQ(...) with TimeDelta.
-//
-// TODO(scherkus): move this into the testing package.
-static std::ostream& operator<<(std::ostream& stream, const TimeDelta& time) {
- return (stream << time.ToInternalValue());
-}
-
-} // namespace
-
-namespace media {
-
-static const int kDurationInSeconds = 120;
-
-class ClockTest : public ::testing::Test {
- public:
- ClockTest() : clock_(&test_tick_clock_) {
- SetDuration();
- }
-
- protected:
- void SetDuration() {
- const base::TimeDelta kDuration =
- base::TimeDelta::FromSeconds(kDurationInSeconds);
- clock_.SetDuration(kDuration);
- EXPECT_EQ(kDuration, clock_.Duration());
- }
-
- void AdvanceSystemTime(base::TimeDelta delta) {
- test_tick_clock_.Advance(delta);
- }
-
- base::SimpleTestTickClock test_tick_clock_;
- Clock clock_;
- base::TimeDelta time_elapsed_;
-};
-
-TEST_F(ClockTest, Created) {
- const base::TimeDelta kExpected = base::TimeDelta::FromSeconds(0);
- EXPECT_EQ(kExpected, clock_.Elapsed());
-}
-
-TEST_F(ClockTest, Play_NormalSpeed) {
- const base::TimeDelta kZero;
- const base::TimeDelta kTimeToAdvance = base::TimeDelta::FromSeconds(2);
-
- EXPECT_EQ(kZero, clock_.Play());
- AdvanceSystemTime(kTimeToAdvance);
- EXPECT_EQ(kTimeToAdvance, clock_.Elapsed());
-}
-
-TEST_F(ClockTest, Play_DoubleSpeed) {
- const base::TimeDelta kZero;
- const base::TimeDelta kTimeToAdvance = base::TimeDelta::FromSeconds(5);
-
- clock_.SetPlaybackRate(2.0f);
- EXPECT_EQ(kZero, clock_.Play());
- AdvanceSystemTime(kTimeToAdvance);
- EXPECT_EQ(2 * kTimeToAdvance, clock_.Elapsed());
-}
-
-TEST_F(ClockTest, Play_HalfSpeed) {
- const base::TimeDelta kZero;
- const base::TimeDelta kTimeToAdvance = base::TimeDelta::FromSeconds(4);
-
- clock_.SetPlaybackRate(0.5f);
- EXPECT_EQ(kZero, clock_.Play());
- AdvanceSystemTime(kTimeToAdvance);
- EXPECT_EQ(kTimeToAdvance / 2, clock_.Elapsed());
-}
-
-TEST_F(ClockTest, Play_ZeroSpeed) {
- // We'll play for 2 seconds at normal speed, 4 seconds at zero speed, and 8
- // seconds at normal speed.
- const base::TimeDelta kZero;
- const base::TimeDelta kPlayDuration1 = base::TimeDelta::FromSeconds(2);
- const base::TimeDelta kPlayDuration2 = base::TimeDelta::FromSeconds(4);
- const base::TimeDelta kPlayDuration3 = base::TimeDelta::FromSeconds(8);
- const base::TimeDelta kExpected = kPlayDuration1 + kPlayDuration3;
-
- EXPECT_EQ(kZero, clock_.Play());
-
- AdvanceSystemTime(kPlayDuration1);
- clock_.SetPlaybackRate(0.0f);
- AdvanceSystemTime(kPlayDuration2);
- clock_.SetPlaybackRate(1.0f);
- AdvanceSystemTime(kPlayDuration3);
-
- EXPECT_EQ(kExpected, clock_.Elapsed());
-}
-
-TEST_F(ClockTest, Play_MultiSpeed) {
- // We'll play for 2 seconds at half speed, 4 seconds at normal speed, and 8
- // seconds at double speed.
- const base::TimeDelta kZero;
- const base::TimeDelta kPlayDuration1 = base::TimeDelta::FromSeconds(2);
- const base::TimeDelta kPlayDuration2 = base::TimeDelta::FromSeconds(4);
- const base::TimeDelta kPlayDuration3 = base::TimeDelta::FromSeconds(8);
- const base::TimeDelta kExpected =
- kPlayDuration1 / 2 + kPlayDuration2 + 2 * kPlayDuration3;
-
- clock_.SetPlaybackRate(0.5f);
- EXPECT_EQ(kZero, clock_.Play());
- AdvanceSystemTime(kPlayDuration1);
-
- clock_.SetPlaybackRate(1.0f);
- AdvanceSystemTime(kPlayDuration2);
-
- clock_.SetPlaybackRate(2.0f);
- AdvanceSystemTime(kPlayDuration3);
- EXPECT_EQ(kExpected, clock_.Elapsed());
-}
-
-TEST_F(ClockTest, Pause) {
- const base::TimeDelta kZero;
- const base::TimeDelta kPlayDuration = base::TimeDelta::FromSeconds(4);
- const base::TimeDelta kPauseDuration = base::TimeDelta::FromSeconds(20);
- const base::TimeDelta kExpectedFirstPause = kPlayDuration;
- const base::TimeDelta kExpectedSecondPause = 2 * kPlayDuration;
-
- // Play for 4 seconds.
- EXPECT_EQ(kZero, clock_.Play());
- AdvanceSystemTime(kPlayDuration);
-
- // Pause for 20 seconds.
- EXPECT_EQ(kExpectedFirstPause, clock_.Pause());
- EXPECT_EQ(kExpectedFirstPause, clock_.Elapsed());
- AdvanceSystemTime(kPauseDuration);
- EXPECT_EQ(kExpectedFirstPause, clock_.Elapsed());
-
- // Play again for 4 more seconds.
- EXPECT_EQ(kExpectedFirstPause, clock_.Play());
- AdvanceSystemTime(kPlayDuration);
- EXPECT_EQ(kExpectedSecondPause, clock_.Pause());
- EXPECT_EQ(kExpectedSecondPause, clock_.Elapsed());
-}
-
-TEST_F(ClockTest, SetTime_Paused) {
- const base::TimeDelta kFirstTime = base::TimeDelta::FromSeconds(4);
- const base::TimeDelta kSecondTime = base::TimeDelta::FromSeconds(16);
-
- clock_.SetTime(kFirstTime, clock_.Duration());
- EXPECT_EQ(kFirstTime, clock_.Elapsed());
- clock_.SetTime(kSecondTime, clock_.Duration());
- EXPECT_EQ(kSecondTime, clock_.Elapsed());
-}
-
-TEST_F(ClockTest, SetTime_Playing) {
- // We'll play for 4 seconds, then set the time to 12, then play for 4 more
- // seconds.
- const base::TimeDelta kZero;
- const base::TimeDelta kPlayDuration = base::TimeDelta::FromSeconds(4);
- const base::TimeDelta kUpdatedTime = base::TimeDelta::FromSeconds(12);
- const base::TimeDelta kExpected = kUpdatedTime + kPlayDuration;
-
- EXPECT_EQ(kZero, clock_.Play());
- AdvanceSystemTime(kPlayDuration);
-
- clock_.SetTime(kUpdatedTime, clock_.Duration());
- AdvanceSystemTime(kPlayDuration);
- EXPECT_EQ(kExpected, clock_.Elapsed());
-}
-
-TEST_F(ClockTest, CapAtMediaDuration_Paused) {
- const base::TimeDelta kDuration =
- base::TimeDelta::FromSeconds(kDurationInSeconds);
- const base::TimeDelta kTimeOverDuration =
- base::TimeDelta::FromSeconds(kDurationInSeconds + 4);
-
- // Elapsed time should always be capped at the duration of the media.
- clock_.SetTime(kTimeOverDuration, kTimeOverDuration);
- EXPECT_EQ(kDuration, clock_.Elapsed());
-}
-
-TEST_F(ClockTest, CapAtMediaDuration_Playing) {
- const base::TimeDelta kZero;
- const base::TimeDelta kDuration =
- base::TimeDelta::FromSeconds(kDurationInSeconds);
- const base::TimeDelta kTimeOverDuration =
- base::TimeDelta::FromSeconds(kDurationInSeconds + 4);
-
- // Play for twice as long as the duration of the media.
- EXPECT_EQ(kZero, clock_.Play());
- AdvanceSystemTime(2 * kDuration);
- EXPECT_EQ(kDuration, clock_.Elapsed());
-
- // Manually set the time past the duration.
- clock_.SetTime(kTimeOverDuration, kTimeOverDuration);
- EXPECT_EQ(kDuration, clock_.Elapsed());
-}
-
-TEST_F(ClockTest, SetMaxTime) {
- const base::TimeDelta kZero;
- const base::TimeDelta kTimeInterval = base::TimeDelta::FromSeconds(4);
- const base::TimeDelta kMaxTime = base::TimeDelta::FromSeconds(6);
-
- EXPECT_EQ(kZero, clock_.Play());
- clock_.SetMaxTime(kMaxTime);
- AdvanceSystemTime(kTimeInterval);
- EXPECT_EQ(kTimeInterval, clock_.Elapsed());
-
- AdvanceSystemTime(kTimeInterval);
- EXPECT_EQ(kMaxTime, clock_.Elapsed());
-
- AdvanceSystemTime(kTimeInterval);
- EXPECT_EQ(kMaxTime, clock_.Elapsed());
-}
-
-TEST_F(ClockTest, SetMaxTime_MultipleTimes) {
- const base::TimeDelta kZero;
- const base::TimeDelta kTimeInterval = base::TimeDelta::FromSeconds(4);
- const base::TimeDelta kMaxTime1 = base::TimeDelta::FromSeconds(6);
- const base::TimeDelta kMaxTime2 = base::TimeDelta::FromSeconds(12);
-
- EXPECT_EQ(kZero, clock_.Play());
- clock_.SetMaxTime(clock_.Duration());
- AdvanceSystemTime(kTimeInterval);
- EXPECT_EQ(kTimeInterval, clock_.Elapsed());
-
- clock_.SetMaxTime(kMaxTime1);
- AdvanceSystemTime(kTimeInterval);
- EXPECT_EQ(kMaxTime1, clock_.Elapsed());
-
- AdvanceSystemTime(kTimeInterval);
- EXPECT_EQ(kMaxTime1, clock_.Elapsed());
-
- clock_.SetMaxTime(kMaxTime2);
- EXPECT_EQ(kMaxTime1, clock_.Elapsed());
-
- AdvanceSystemTime(kTimeInterval);
- EXPECT_EQ(kMaxTime1 + kTimeInterval, clock_.Elapsed());
-
- AdvanceSystemTime(kTimeInterval);
- EXPECT_EQ(kMaxTime2, clock_.Elapsed());
-}
-
-} // namespace media
diff --git a/media/base/container_names.cc b/media/base/container_names.cc
index f062929d54..0f629f8a64 100644
--- a/media/base/container_names.cc
+++ b/media/base/container_names.cc
@@ -16,8 +16,10 @@ namespace media {
namespace container_names {
#define TAG(a, b, c, d) \
- ((static_cast<uint8>(a) << 24) | (static_cast<uint8>(b) << 16) | \
- (static_cast<uint8>(c) << 8) | (static_cast<uint8>(d)))
+ ((static_cast<uint32>(static_cast<uint8>(a)) << 24) | \
+ (static_cast<uint32>(static_cast<uint8>(b)) << 16) | \
+ (static_cast<uint32>(static_cast<uint8>(c)) << 8) | \
+ (static_cast<uint32>(static_cast<uint8>(d))))
#define RCHECK(x) \
do { \
diff --git a/media/base/decoder_buffer.h b/media/base/decoder_buffer.h
index 27de88f469..c17aa213ab 100644
--- a/media/base/decoder_buffer.h
+++ b/media/base/decoder_buffer.h
@@ -14,6 +14,7 @@
#include "base/memory/scoped_ptr.h"
#include "base/time/time.h"
#include "build/build_config.h"
+#include "media/base/buffers.h"
#include "media/base/decrypt_config.h"
#include "media/base/media_export.h"
@@ -77,6 +78,9 @@ class MEDIA_EXPORT DecoderBuffer
void set_duration(base::TimeDelta duration) {
DCHECK(!end_of_stream());
+ DCHECK(duration == kNoTimestamp() ||
+ (duration >= base::TimeDelta() && duration != kInfiniteDuration()))
+ << duration.InSecondsF();
duration_ = duration;
}
diff --git a/media/base/demuxer_stream.h b/media/base/demuxer_stream.h
index 87f53e7307..1207e18720 100644
--- a/media/base/demuxer_stream.h
+++ b/media/base/demuxer_stream.h
@@ -8,6 +8,7 @@
#include "base/callback.h"
#include "base/memory/ref_counted.h"
#include "media/base/media_export.h"
+#include "media/base/video_rotation.h"
namespace media {
@@ -80,6 +81,8 @@ class MEDIA_EXPORT DemuxerStream {
// on this.
virtual bool SupportsConfigChanges() = 0;
+ virtual VideoRotation video_rotation() = 0;
+
protected:
// Only allow concrete implementations to get deleted.
virtual ~DemuxerStream();
diff --git a/media/base/fake_text_track_stream.cc b/media/base/fake_text_track_stream.cc
index 2e9a1e17c9..f18e5403b4 100644
--- a/media/base/fake_text_track_stream.cc
+++ b/media/base/fake_text_track_stream.cc
@@ -38,6 +38,10 @@ DemuxerStream::Type FakeTextTrackStream::type() {
bool FakeTextTrackStream::SupportsConfigChanges() { return false; }
+VideoRotation FakeTextTrackStream::video_rotation() {
+ return VIDEO_ROTATION_0;
+}
+
void FakeTextTrackStream::SatisfyPendingRead(
const base::TimeDelta& start,
const base::TimeDelta& duration,
diff --git a/media/base/fake_text_track_stream.h b/media/base/fake_text_track_stream.h
index db7a3e10df..27baeec6d2 100644
--- a/media/base/fake_text_track_stream.h
+++ b/media/base/fake_text_track_stream.h
@@ -24,6 +24,7 @@ class FakeTextTrackStream : public DemuxerStream {
virtual Type type() OVERRIDE;
MOCK_METHOD0(EnableBitstreamConverter, void());
virtual bool SupportsConfigChanges();
+ virtual VideoRotation video_rotation() OVERRIDE;
void SatisfyPendingRead(const base::TimeDelta& start,
const base::TimeDelta& duration,
diff --git a/media/base/mock_filters.cc b/media/base/mock_filters.cc
index e4faf70b3e..42ec9bb7e1 100644
--- a/media/base/mock_filters.cc
+++ b/media/base/mock_filters.cc
@@ -48,6 +48,10 @@ void MockDemuxerStream::set_video_decoder_config(
video_decoder_config_ = config;
}
+VideoRotation MockDemuxerStream::video_rotation() {
+ return VIDEO_ROTATION_0;
+}
+
MockVideoDecoder::MockVideoDecoder() {
EXPECT_CALL(*this, HasAlpha()).WillRepeatedly(Return(false));
}
diff --git a/media/base/mock_filters.h b/media/base/mock_filters.h
index 9c5353312f..28d4d090b4 100644
--- a/media/base/mock_filters.h
+++ b/media/base/mock_filters.h
@@ -61,6 +61,8 @@ class MockDemuxerStream : public DemuxerStream {
void set_audio_decoder_config(const AudioDecoderConfig& config);
void set_video_decoder_config(const VideoDecoderConfig& config);
+ virtual VideoRotation video_rotation() OVERRIDE;
+
private:
DemuxerStream::Type type_;
AudioDecoderConfig audio_decoder_config_;
@@ -82,7 +84,6 @@ class MockVideoDecoder : public VideoDecoder {
MOCK_METHOD2(Decode, void(const scoped_refptr<DecoderBuffer>& buffer,
const DecodeCB&));
MOCK_METHOD1(Reset, void(const base::Closure&));
- MOCK_METHOD0(Stop, void());
MOCK_CONST_METHOD0(HasAlpha, bool());
private:
@@ -103,7 +104,6 @@ class MockAudioDecoder : public AudioDecoder {
void(const scoped_refptr<DecoderBuffer>& buffer,
const DecodeCB&));
MOCK_METHOD1(Reset, void(const base::Closure&));
- MOCK_METHOD0(Stop, void());
private:
DISALLOW_COPY_AND_ASSIGN(MockAudioDecoder);
@@ -115,20 +115,19 @@ class MockVideoRenderer : public VideoRenderer {
virtual ~MockVideoRenderer();
// VideoRenderer implementation.
- MOCK_METHOD9(Initialize, void(DemuxerStream* stream,
- bool low_delay,
- const PipelineStatusCB& init_cb,
- const StatisticsCB& statistics_cb,
- const TimeCB& time_cb,
- const base::Closure& ended_cb,
- const PipelineStatusCB& error_cb,
- const TimeDeltaCB& get_time_cb,
- const TimeDeltaCB& get_duration_cb));
- MOCK_METHOD1(Play, void(const base::Closure& callback));
+ MOCK_METHOD10(Initialize, void(DemuxerStream* stream,
+ bool low_delay,
+ const PipelineStatusCB& init_cb,
+ const StatisticsCB& statistics_cb,
+ const TimeCB& time_cb,
+ const BufferingStateCB& buffering_state_cb,
+ const base::Closure& ended_cb,
+ const PipelineStatusCB& error_cb,
+ const TimeDeltaCB& get_time_cb,
+ const TimeDeltaCB& get_duration_cb));
MOCK_METHOD1(Flush, void(const base::Closure& callback));
- MOCK_METHOD2(Preroll, void(base::TimeDelta time, const PipelineStatusCB& cb));
+ MOCK_METHOD1(StartPlayingFrom, void(base::TimeDelta timestamp));
MOCK_METHOD1(Stop, void(const base::Closure& callback));
- MOCK_METHOD1(SetPlaybackRate, void(float playback_rate));
private:
DISALLOW_COPY_AND_ASSIGN(MockVideoRenderer);
@@ -143,8 +142,8 @@ class MockAudioRenderer : public AudioRenderer {
MOCK_METHOD7(Initialize, void(DemuxerStream* stream,
const PipelineStatusCB& init_cb,
const StatisticsCB& statistics_cb,
- const base::Closure& underflow_cb,
const TimeCB& time_cb,
+ const BufferingStateCB& buffering_state_cb,
const base::Closure& ended_cb,
const PipelineStatusCB& error_cb));
MOCK_METHOD0(StartRendering, void());
@@ -152,7 +151,7 @@ class MockAudioRenderer : public AudioRenderer {
MOCK_METHOD1(Flush, void(const base::Closure& callback));
MOCK_METHOD1(Stop, void(const base::Closure& callback));
MOCK_METHOD1(SetPlaybackRate, void(float playback_rate));
- MOCK_METHOD2(Preroll, void(base::TimeDelta time, const PipelineStatusCB& cb));
+ MOCK_METHOD1(StartPlayingFrom, void(base::TimeDelta timestamp));
MOCK_METHOD1(SetVolume, void(float volume));
MOCK_METHOD0(ResumeAfterUnderflow, void());
diff --git a/media/base/pipeline.cc b/media/base/pipeline.cc
index 2884c38993..ca53be0ceb 100644
--- a/media/base/pipeline.cc
+++ b/media/base/pipeline.cc
@@ -19,11 +19,11 @@
#include "base/synchronization/condition_variable.h"
#include "media/base/audio_decoder.h"
#include "media/base/audio_renderer.h"
-#include "media/base/clock.h"
#include "media/base/filter_collection.h"
#include "media/base/media_log.h"
#include "media/base/text_renderer.h"
#include "media/base/text_track_config.h"
+#include "media/base/time_delta_interpolator.h"
#include "media/base/video_decoder.h"
#include "media/base/video_decoder_config.h"
#include "media/base/video_renderer.h"
@@ -41,8 +41,8 @@ Pipeline::Pipeline(
did_loading_progress_(false),
volume_(1.0f),
playback_rate_(0.0f),
- clock_(new Clock(&default_tick_clock_)),
- clock_state_(CLOCK_PAUSED),
+ interpolator_(new TimeDeltaInterpolator(&default_tick_clock_)),
+ interpolation_state_(INTERPOLATION_STOPPED),
status_(PIPELINE_OK),
state_(kCreated),
audio_ended_(false),
@@ -51,10 +51,11 @@ Pipeline::Pipeline(
audio_buffering_state_(BUFFERING_HAVE_NOTHING),
video_buffering_state_(BUFFERING_HAVE_NOTHING),
demuxer_(NULL),
- creation_time_(default_tick_clock_.NowTicks()) {
+ underflow_disabled_for_testing_(false) {
media_log_->AddEvent(media_log_->CreatePipelineStateChangedEvent(kCreated));
media_log_->AddEvent(
media_log_->CreateEvent(MediaLogEvent::PIPELINE_CREATED));
+ interpolator_->SetBounds(base::TimeDelta(), base::TimeDelta());
}
Pipeline::~Pipeline() {
@@ -73,13 +74,13 @@ void Pipeline::Start(scoped_ptr<FilterCollection> collection,
const PipelineStatusCB& error_cb,
const PipelineStatusCB& seek_cb,
const PipelineMetadataCB& metadata_cb,
- const base::Closure& preroll_completed_cb,
+ const BufferingStateCB& buffering_state_cb,
const base::Closure& duration_change_cb) {
DCHECK(!ended_cb.is_null());
DCHECK(!error_cb.is_null());
DCHECK(!seek_cb.is_null());
DCHECK(!metadata_cb.is_null());
- DCHECK(!preroll_completed_cb.is_null());
+ DCHECK(!buffering_state_cb.is_null());
base::AutoLock auto_lock(lock_);
CHECK(!running_) << "Media pipeline is already running";
@@ -90,7 +91,7 @@ void Pipeline::Start(scoped_ptr<FilterCollection> collection,
error_cb_ = error_cb;
seek_cb_ = seek_cb;
metadata_cb_ = metadata_cb;
- preroll_completed_cb_ = preroll_completed_cb;
+ buffering_state_cb_ = buffering_state_cb;
duration_change_cb_ = duration_change_cb;
task_runner_->PostTask(
@@ -156,7 +157,7 @@ void Pipeline::SetVolume(float volume) {
TimeDelta Pipeline::GetMediaTime() const {
base::AutoLock auto_lock(lock_);
- return clock_->Elapsed();
+ return std::min(interpolator_->GetInterpolatedTime(), duration_);
}
Ranges<TimeDelta> Pipeline::GetBufferedTimeRanges() const {
@@ -166,7 +167,7 @@ Ranges<TimeDelta> Pipeline::GetBufferedTimeRanges() const {
TimeDelta Pipeline::GetMediaDuration() const {
base::AutoLock auto_lock(lock_);
- return clock_->Duration();
+ return duration_;
}
bool Pipeline::DidLoadingProgress() {
@@ -181,8 +182,9 @@ PipelineStatistics Pipeline::GetStatistics() const {
return statistics_;
}
-void Pipeline::SetClockForTesting(Clock* clock) {
- clock_.reset(clock);
+void Pipeline::SetTimeDeltaInterpolatorForTesting(
+ TimeDeltaInterpolator* interpolator) {
+ interpolator_.reset(interpolator);
}
void Pipeline::SetErrorForTesting(PipelineStatus status) {
@@ -190,13 +192,6 @@ void Pipeline::SetErrorForTesting(PipelineStatus status) {
}
void Pipeline::SetState(State next_state) {
- if (state_ != kPlaying && next_state == kPlaying &&
- !creation_time_.is_null()) {
- UMA_HISTOGRAM_TIMES("Media.TimeToPipelineStarted",
- default_tick_clock_.NowTicks() - creation_time_);
- creation_time_ = base::TimeTicks();
- }
-
DVLOG(1) << GetStateString(state_) << " -> " << GetStateString(next_state);
state_ = next_state;
@@ -211,7 +206,6 @@ const char* Pipeline::GetStateString(State state) {
RETURN_STRING(kInitDemuxer);
RETURN_STRING(kInitAudioRenderer);
RETURN_STRING(kInitVideoRenderer);
- RETURN_STRING(kInitPrerolling);
RETURN_STRING(kSeeking);
RETURN_STRING(kPlaying);
RETURN_STRING(kStopping);
@@ -239,17 +233,14 @@ Pipeline::State Pipeline::GetNextState() const {
return kInitAudioRenderer;
if (demuxer_->GetStream(DemuxerStream::VIDEO))
return kInitVideoRenderer;
- return kInitPrerolling;
+ return kPlaying;
case kInitAudioRenderer:
if (demuxer_->GetStream(DemuxerStream::VIDEO))
return kInitVideoRenderer;
- return kInitPrerolling;
+ return kPlaying;
case kInitVideoRenderer:
- return kInitPrerolling;
-
- case kInitPrerolling:
return kPlaying;
case kSeeking:
@@ -297,15 +288,15 @@ void Pipeline::OnAudioTimeUpdate(TimeDelta time, TimeDelta max_time) {
DCHECK_LE(time.InMicroseconds(), max_time.InMicroseconds());
base::AutoLock auto_lock(lock_);
- if (clock_state_ == CLOCK_WAITING_FOR_AUDIO_TIME_UPDATE &&
- time < clock_->Elapsed()) {
+ if (interpolation_state_ == INTERPOLATION_WAITING_FOR_AUDIO_TIME_UPDATE &&
+ time < interpolator_->GetInterpolatedTime()) {
return;
}
if (state_ == kSeeking)
return;
- clock_->SetTime(time, max_time);
+ interpolator_->SetBounds(time, max_time);
StartClockIfWaitingForTimeUpdate_Locked();
}
@@ -319,8 +310,8 @@ void Pipeline::OnVideoTimeUpdate(TimeDelta max_time) {
return;
base::AutoLock auto_lock(lock_);
- DCHECK_NE(clock_state_, CLOCK_WAITING_FOR_AUDIO_TIME_UPDATE);
- clock_->SetMaxTime(max_time);
+ DCHECK_NE(interpolation_state_, INTERPOLATION_WAITING_FOR_AUDIO_TIME_UPDATE);
+ interpolator_->SetUpperBound(max_time);
}
void Pipeline::SetDuration(TimeDelta duration) {
@@ -331,7 +322,7 @@ void Pipeline::SetDuration(TimeDelta duration) {
UMA_HISTOGRAM_LONG_TIMES("Media.Duration", duration);
base::AutoLock auto_lock(lock_);
- clock_->SetDuration(duration);
+ duration_ = duration;
if (!duration_change_cb_.is_null())
duration_change_cb_.Run();
}
@@ -360,8 +351,7 @@ void Pipeline::StateTransitionTask(PipelineStatus status) {
// Guard against accidentally clearing |pending_callbacks_| for states that
// use it as well as states that should not be using it.
- DCHECK_EQ(pending_callbacks_.get() != NULL,
- (state_ == kInitPrerolling || state_ == kSeeking));
+ DCHECK_EQ(pending_callbacks_.get() != NULL, state_ == kSeeking);
pending_callbacks_.reset();
@@ -380,49 +370,47 @@ void Pipeline::StateTransitionTask(PipelineStatus status) {
case kInitVideoRenderer:
return InitializeVideoRenderer(done_cb);
- case kInitPrerolling:
- filter_collection_.reset();
- {
- base::AutoLock l(lock_);
- // We do not want to start the clock running. We only want to set the
- // base media time so our timestamp calculations will be correct.
- clock_->SetTime(base::TimeDelta(), base::TimeDelta());
- }
- if (!audio_renderer_ && !video_renderer_) {
- done_cb.Run(PIPELINE_ERROR_COULD_NOT_RENDER);
- return;
+ case kPlaying:
+ // Finish initial start sequence the first time we enter the playing
+ // state.
+ if (filter_collection_) {
+ filter_collection_.reset();
+ if (!audio_renderer_ && !video_renderer_) {
+ ErrorChangedTask(PIPELINE_ERROR_COULD_NOT_RENDER);
+ return;
+ }
+
+ {
+ PipelineMetadata metadata;
+ metadata.has_audio = audio_renderer_;
+ metadata.has_video = video_renderer_;
+ metadata.timeline_offset = demuxer_->GetTimelineOffset();
+ DemuxerStream* stream = demuxer_->GetStream(DemuxerStream::VIDEO);
+ if (stream) {
+ metadata.natural_size =
+ stream->video_decoder_config().natural_size();
+ metadata.video_rotation = stream->video_rotation();
+ }
+ metadata_cb_.Run(metadata);
+ }
}
+ base::ResetAndReturn(&seek_cb_).Run(PIPELINE_OK);
+
{
- PipelineMetadata metadata;
- metadata.has_audio = audio_renderer_;
- metadata.has_video = video_renderer_;
- metadata.timeline_offset = demuxer_->GetTimelineOffset();
- DemuxerStream* stream = demuxer_->GetStream(DemuxerStream::VIDEO);
- if (stream)
- metadata.natural_size = stream->video_decoder_config().natural_size();
- metadata_cb_.Run(metadata);
+ base::AutoLock auto_lock(lock_);
+ interpolator_->SetBounds(start_timestamp_, start_timestamp_);
}
- return DoInitialPreroll(done_cb);
+ if (audio_renderer_)
+ audio_renderer_->StartPlayingFrom(start_timestamp_);
+ if (video_renderer_)
+ video_renderer_->StartPlayingFrom(start_timestamp_);
+ if (text_renderer_)
+ text_renderer_->StartPlaying();
- case kPlaying:
PlaybackRateChangedTask(GetPlaybackRate());
VolumeChangedTask(GetVolume());
-
- // We enter this state from either kInitPrerolling or kSeeking. As of now
- // both those states call Preroll(), which means by time we enter this
- // state we've already buffered enough data. Forcefully update the
- // buffering state, which start the clock and renderers and transition
- // into kPlaying state.
- //
- // TODO(scherkus): Remove after renderers are taught to fire buffering
- // state callbacks http://crbug.com/144683
- DCHECK(WaitingForEnoughData());
- if (audio_renderer_)
- BufferingStateChanged(&audio_buffering_state_, BUFFERING_HAVE_ENOUGH);
- if (video_renderer_)
- BufferingStateChanged(&video_buffering_state_, BUFFERING_HAVE_ENOUGH);
return;
case kStopping:
@@ -440,38 +428,14 @@ void Pipeline::StateTransitionTask(PipelineStatus status) {
//
// That being said, deleting the renderers while keeping |pending_callbacks_|
// running on the media thread would result in crashes.
-void Pipeline::DoInitialPreroll(const PipelineStatusCB& done_cb) {
- DCHECK(task_runner_->BelongsToCurrentThread());
- DCHECK(!pending_callbacks_.get());
- SerialRunner::Queue bound_fns;
- const base::TimeDelta seek_timestamp = base::TimeDelta();
-
- // Preroll renderers.
- if (audio_renderer_) {
- bound_fns.Push(base::Bind(
- &AudioRenderer::Preroll, base::Unretained(audio_renderer_.get()),
- seek_timestamp));
- }
-
- if (video_renderer_) {
- bound_fns.Push(base::Bind(
- &VideoRenderer::Preroll, base::Unretained(video_renderer_.get()),
- seek_timestamp));
-
- // TODO(scherkus): Remove after VideoRenderer is taught to fire buffering
- // state callbacks http://crbug.com/144683
- bound_fns.Push(base::Bind(&VideoRenderer::Play,
- base::Unretained(video_renderer_.get())));
- }
-
- if (text_renderer_) {
- bound_fns.Push(base::Bind(
- &TextRenderer::Play, base::Unretained(text_renderer_.get())));
- }
-
- pending_callbacks_ = SerialRunner::Run(bound_fns, done_cb);
+#if DCHECK_IS_ON
+static void VerifyBufferingStates(BufferingState* audio_buffering_state,
+ BufferingState* video_buffering_state) {
+ DCHECK_EQ(*audio_buffering_state, BUFFERING_HAVE_NOTHING);
+ DCHECK_EQ(*video_buffering_state, BUFFERING_HAVE_NOTHING);
}
+#endif
void Pipeline::DoSeek(
base::TimeDelta seek_timestamp,
@@ -479,6 +443,10 @@ void Pipeline::DoSeek(
DCHECK(task_runner_->BelongsToCurrentThread());
DCHECK(!pending_callbacks_.get());
SerialRunner::Queue bound_fns;
+ {
+ base::AutoLock auto_lock(lock_);
+ PauseClockAndStopRendering_Locked();
+ }
// Pause.
if (text_renderer_) {
@@ -490,25 +458,20 @@ void Pipeline::DoSeek(
if (audio_renderer_) {
bound_fns.Push(base::Bind(
&AudioRenderer::Flush, base::Unretained(audio_renderer_.get())));
-
- // TODO(scherkus): Remove after AudioRenderer is taught to fire buffering
- // state callbacks http://crbug.com/144683
- bound_fns.Push(base::Bind(&Pipeline::BufferingStateChanged,
- base::Unretained(this),
- &audio_buffering_state_,
- BUFFERING_HAVE_NOTHING));
}
+
if (video_renderer_) {
bound_fns.Push(base::Bind(
&VideoRenderer::Flush, base::Unretained(video_renderer_.get())));
-
- // TODO(scherkus): Remove after VideoRenderer is taught to fire buffering
- // state callbacks http://crbug.com/144683
- bound_fns.Push(base::Bind(&Pipeline::BufferingStateChanged,
- base::Unretained(this),
- &video_buffering_state_,
- BUFFERING_HAVE_NOTHING));
}
+
+#if DCHECK_IS_ON
+ // Verify renderers reset their buffering states.
+ bound_fns.Push(base::Bind(&VerifyBufferingStates,
+ &audio_buffering_state_,
+ &video_buffering_state_));
+#endif
+
if (text_renderer_) {
bound_fns.Push(base::Bind(
&TextRenderer::Flush, base::Unretained(text_renderer_.get())));
@@ -518,29 +481,6 @@ void Pipeline::DoSeek(
bound_fns.Push(base::Bind(
&Demuxer::Seek, base::Unretained(demuxer_), seek_timestamp));
- // Preroll renderers.
- if (audio_renderer_) {
- bound_fns.Push(base::Bind(
- &AudioRenderer::Preroll, base::Unretained(audio_renderer_.get()),
- seek_timestamp));
- }
-
- if (video_renderer_) {
- bound_fns.Push(base::Bind(
- &VideoRenderer::Preroll, base::Unretained(video_renderer_.get()),
- seek_timestamp));
-
- // TODO(scherkus): Remove after renderers are taught to fire buffering
- // state callbacks http://crbug.com/144683
- bound_fns.Push(base::Bind(&VideoRenderer::Play,
- base::Unretained(video_renderer_.get())));
- }
-
- if (text_renderer_) {
- bound_fns.Push(base::Bind(
- &TextRenderer::Play, base::Unretained(text_renderer_.get())));
- }
-
pending_callbacks_ = SerialRunner::Run(bound_fns, done_cb);
}
@@ -704,13 +644,11 @@ void Pipeline::PlaybackRateChangedTask(float playback_rate) {
{
base::AutoLock auto_lock(lock_);
- clock_->SetPlaybackRate(playback_rate);
+ interpolator_->SetPlaybackRate(playback_rate);
}
if (audio_renderer_)
audio_renderer_->SetPlaybackRate(playback_rate_);
- if (video_renderer_)
- video_renderer_->SetPlaybackRate(playback_rate_);
}
void Pipeline::VolumeChangedTask(float volume) {
@@ -747,13 +685,8 @@ void Pipeline::SeekTask(TimeDelta time, const PipelineStatusCB& seek_cb) {
audio_ended_ = false;
video_ended_ = false;
text_ended_ = false;
+ start_timestamp_ = time;
- // Kick off seeking!
- {
- base::AutoLock auto_lock(lock_);
- PauseClockAndStopRendering_Locked();
- clock_->SetTime(time, time);
- }
DoSeek(time, base::Bind(
&Pipeline::OnStateTransition, base::Unretained(this)));
}
@@ -770,7 +703,7 @@ void Pipeline::DoAudioRendererEnded() {
// Start clock since there is no more audio to trigger clock updates.
{
base::AutoLock auto_lock(lock_);
- clock_->SetMaxTime(clock_->Duration());
+ interpolator_->SetUpperBound(duration_);
StartClockIfWaitingForTimeUpdate_Locked();
}
@@ -816,7 +749,7 @@ void Pipeline::RunEndedCallbackIfNeeded() {
{
base::AutoLock auto_lock(lock_);
PauseClockAndStopRendering_Locked();
- clock_->SetTime(clock_->Duration(), clock_->Duration());
+ interpolator_->SetBounds(duration_, duration_);
}
DCHECK_EQ(status_, PIPELINE_OK);
@@ -851,8 +784,9 @@ void Pipeline::InitializeAudioRenderer(const PipelineStatusCB& done_cb) {
demuxer_->GetStream(DemuxerStream::AUDIO),
done_cb,
base::Bind(&Pipeline::OnUpdateStatistics, base::Unretained(this)),
- base::Bind(&Pipeline::OnAudioUnderflow, base::Unretained(this)),
base::Bind(&Pipeline::OnAudioTimeUpdate, base::Unretained(this)),
+ base::Bind(&Pipeline::BufferingStateChanged, base::Unretained(this),
+ &audio_buffering_state_),
base::Bind(&Pipeline::OnAudioRendererEnded, base::Unretained(this)),
base::Bind(&Pipeline::SetError, base::Unretained(this)));
}
@@ -867,26 +801,14 @@ void Pipeline::InitializeVideoRenderer(const PipelineStatusCB& done_cb) {
done_cb,
base::Bind(&Pipeline::OnUpdateStatistics, base::Unretained(this)),
base::Bind(&Pipeline::OnVideoTimeUpdate, base::Unretained(this)),
+ base::Bind(&Pipeline::BufferingStateChanged, base::Unretained(this),
+ &video_buffering_state_),
base::Bind(&Pipeline::OnVideoRendererEnded, base::Unretained(this)),
base::Bind(&Pipeline::SetError, base::Unretained(this)),
base::Bind(&Pipeline::GetMediaTime, base::Unretained(this)),
base::Bind(&Pipeline::GetMediaDuration, base::Unretained(this)));
}
-void Pipeline::OnAudioUnderflow() {
- if (!task_runner_->BelongsToCurrentThread()) {
- task_runner_->PostTask(FROM_HERE, base::Bind(
- &Pipeline::OnAudioUnderflow, base::Unretained(this)));
- return;
- }
-
- if (state_ != kPlaying)
- return;
-
- if (audio_renderer_)
- audio_renderer_->ResumeAfterUnderflow();
-}
-
void Pipeline::BufferingStateChanged(BufferingState* buffering_state,
BufferingState new_buffering_state) {
DVLOG(1) << __FUNCTION__ << "(" << *buffering_state << ", "
@@ -894,17 +816,29 @@ void Pipeline::BufferingStateChanged(BufferingState* buffering_state,
<< (buffering_state == &audio_buffering_state_ ? "audio" : "video");
DCHECK(task_runner_->BelongsToCurrentThread());
bool was_waiting_for_enough_data = WaitingForEnoughData();
+
*buffering_state = new_buffering_state;
+ // Disable underflow by ignoring updates that renderers have ran out of data
+ // after we have started the clock.
+ if (state_ == kPlaying && underflow_disabled_for_testing_ &&
+ interpolation_state_ != INTERPOLATION_STOPPED) {
+ return;
+ }
+
// Renderer underflowed.
if (!was_waiting_for_enough_data && WaitingForEnoughData()) {
- StartWaitingForEnoughData();
+ PausePlayback();
+
+ // TODO(scherkus): Fire BUFFERING_HAVE_NOTHING callback to alert clients of
+ // underflow state http://crbug.com/144683
return;
}
// Renderer prerolled.
if (was_waiting_for_enough_data && !WaitingForEnoughData()) {
StartPlayback();
+ buffering_state_cb_.Run(BUFFERING_HAVE_ENOUGH);
return;
}
}
@@ -920,10 +854,11 @@ bool Pipeline::WaitingForEnoughData() const {
return false;
}
-void Pipeline::StartWaitingForEnoughData() {
+void Pipeline::PausePlayback() {
DVLOG(1) << __FUNCTION__;
DCHECK_EQ(state_, kPlaying);
DCHECK(WaitingForEnoughData());
+ DCHECK(task_runner_->BelongsToCurrentThread());
base::AutoLock auto_lock(lock_);
PauseClockAndStopRendering_Locked();
@@ -932,54 +867,51 @@ void Pipeline::StartWaitingForEnoughData() {
void Pipeline::StartPlayback() {
DVLOG(1) << __FUNCTION__;
DCHECK_EQ(state_, kPlaying);
- DCHECK_EQ(clock_state_, CLOCK_PAUSED);
+ DCHECK_EQ(interpolation_state_, INTERPOLATION_STOPPED);
DCHECK(!WaitingForEnoughData());
+ DCHECK(task_runner_->BelongsToCurrentThread());
if (audio_renderer_) {
// We use audio stream to update the clock. So if there is such a
// stream, we pause the clock until we receive a valid timestamp.
base::AutoLock auto_lock(lock_);
- clock_state_ = CLOCK_WAITING_FOR_AUDIO_TIME_UPDATE;
+ interpolation_state_ = INTERPOLATION_WAITING_FOR_AUDIO_TIME_UPDATE;
audio_renderer_->StartRendering();
} else {
base::AutoLock auto_lock(lock_);
- clock_state_ = CLOCK_PLAYING;
- clock_->SetMaxTime(clock_->Duration());
- clock_->Play();
+ interpolation_state_ = INTERPOLATION_STARTED;
+ interpolator_->SetUpperBound(duration_);
+ interpolator_->StartInterpolating();
}
-
- preroll_completed_cb_.Run();
- if (!seek_cb_.is_null())
- base::ResetAndReturn(&seek_cb_).Run(PIPELINE_OK);
}
void Pipeline::PauseClockAndStopRendering_Locked() {
lock_.AssertAcquired();
- switch (clock_state_) {
- case CLOCK_PAUSED:
+ switch (interpolation_state_) {
+ case INTERPOLATION_STOPPED:
return;
- case CLOCK_WAITING_FOR_AUDIO_TIME_UPDATE:
+ case INTERPOLATION_WAITING_FOR_AUDIO_TIME_UPDATE:
audio_renderer_->StopRendering();
break;
- case CLOCK_PLAYING:
+ case INTERPOLATION_STARTED:
if (audio_renderer_)
audio_renderer_->StopRendering();
- clock_->Pause();
+ interpolator_->StopInterpolating();
break;
}
- clock_state_ = CLOCK_PAUSED;
+ interpolation_state_ = INTERPOLATION_STOPPED;
}
void Pipeline::StartClockIfWaitingForTimeUpdate_Locked() {
lock_.AssertAcquired();
- if (clock_state_ != CLOCK_WAITING_FOR_AUDIO_TIME_UPDATE)
+ if (interpolation_state_ != INTERPOLATION_WAITING_FOR_AUDIO_TIME_UPDATE)
return;
- clock_state_ = CLOCK_PLAYING;
- clock_->Play();
+ interpolation_state_ = INTERPOLATION_STARTED;
+ interpolator_->StartInterpolating();
}
} // namespace media
diff --git a/media/base/pipeline.h b/media/base/pipeline.h
index b40cd3c98c..6a408da23f 100644
--- a/media/base/pipeline.h
+++ b/media/base/pipeline.h
@@ -19,6 +19,7 @@
#include "media/base/pipeline_status.h"
#include "media/base/ranges.h"
#include "media/base/serial_runner.h"
+#include "media/base/video_rotation.h"
#include "ui/gfx/size.h"
namespace base {
@@ -28,20 +29,22 @@ class TimeDelta;
namespace media {
-class Clock;
class FilterCollection;
class MediaLog;
class TextRenderer;
class TextTrackConfig;
+class TimeDeltaInterpolator;
class VideoRenderer;
// Metadata describing a pipeline once it has been initialized.
struct PipelineMetadata {
- PipelineMetadata() : has_audio(false), has_video(false) {}
+ PipelineMetadata()
+ : has_audio(false), has_video(false), video_rotation(VIDEO_ROTATION_0) {}
bool has_audio;
bool has_video;
gfx::Size natural_size;
+ VideoRotation video_rotation;
base::Time timeline_offset;
};
@@ -59,10 +62,7 @@ typedef base::Callback<void(PipelineMetadata)> PipelineMetadataCB;
// [ InitXXX (for each filter) ] [ Stopping ]
// | |
// V V
-// [ InitPrerolling ] [ Stopped ]
-// |
-// V
-// [ Playing ] <-- [ Seeking ]
+// [ Playing ] <-- [ Seeking ] [ Stopped ]
// | ^
// `---------------'
// Seek()
@@ -85,7 +85,7 @@ class MEDIA_EXPORT Pipeline : public DemuxerHost {
virtual ~Pipeline();
// Build a pipeline to using the given filter collection to construct a filter
- // chain, executing |seek_cb| when the initial seek/preroll has completed.
+ // chain, executing |seek_cb| when the initial seek has completed.
//
// |filter_collection| must be a complete collection containing a demuxer,
// audio/video decoders, and audio/video renderers. Failing to do so will
@@ -99,9 +99,8 @@ class MEDIA_EXPORT Pipeline : public DemuxerHost {
// |metadata_cb| will be executed when the content duration, container video
// size, start time, and whether the content has audio and/or
// video in supported formats are known.
- // |preroll_completed_cb| will be executed when all renderers have buffered
- // enough data to satisfy preroll and are ready to
- // start playback.
+ // |buffering_state_cb| will be executed whenever there are changes in the
+ // overall buffering state of the pipeline.
// |duration_change_cb| optional callback that will be executed whenever the
// presentation duration changes.
// It is an error to call this method after the pipeline has already started.
@@ -110,7 +109,7 @@ class MEDIA_EXPORT Pipeline : public DemuxerHost {
const PipelineStatusCB& error_cb,
const PipelineStatusCB& seek_cb,
const PipelineMetadataCB& metadata_cb,
- const base::Closure& preroll_completed_cb,
+ const BufferingStateCB& buffering_state_cb,
const base::Closure& duration_change_cb);
// Asynchronously stops the pipeline, executing |stop_cb| when the pipeline
@@ -177,7 +176,10 @@ class MEDIA_EXPORT Pipeline : public DemuxerHost {
// Gets the current pipeline statistics.
PipelineStatistics GetStatistics() const;
- void SetClockForTesting(Clock* clock);
+ void set_underflow_disabled_for_testing(bool disabled) {
+ underflow_disabled_for_testing_ = disabled;
+ }
+ void SetTimeDeltaInterpolatorForTesting(TimeDeltaInterpolator* interpolator);
void SetErrorForTesting(PipelineStatus status);
private:
@@ -192,7 +194,6 @@ class MEDIA_EXPORT Pipeline : public DemuxerHost {
kInitDemuxer,
kInitAudioRenderer,
kInitVideoRenderer,
- kInitPrerolling,
kSeeking,
kPlaying,
kStopping,
@@ -292,16 +293,8 @@ class MEDIA_EXPORT Pipeline : public DemuxerHost {
void OnStateTransition(PipelineStatus status);
void StateTransitionTask(PipelineStatus status);
- // Initiates an asynchronous preroll call sequence executing |done_cb|
- // with the final status when completed.
- void DoInitialPreroll(const PipelineStatusCB& done_cb);
-
// Initiates an asynchronous pause-flush-seek-preroll call sequence
// executing |done_cb| with the final status when completed.
- //
- // TODO(scherkus): Prerolling should be separate from seeking so we can report
- // finer grained ready states (HAVE_CURRENT_DATA vs. HAVE_FUTURE_DATA)
- // indepentent from seeking.
void DoSeek(base::TimeDelta seek_timestamp, const PipelineStatusCB& done_cb);
// Initiates an asynchronous pause-flush-stop call sequence executing
@@ -309,8 +302,6 @@ class MEDIA_EXPORT Pipeline : public DemuxerHost {
void DoStop(const PipelineStatusCB& done_cb);
void OnStopCompleted(PipelineStatus status);
- void OnAudioUnderflow();
-
// Collection of callback methods and helpers for tracking changes in
// buffering state and transition from paused/underflow states and playing
// states.
@@ -319,11 +310,11 @@ class MEDIA_EXPORT Pipeline : public DemuxerHost {
// - A waiting to non-waiting transition indicates preroll has completed
// and StartPlayback() should be called
// - A non-waiting to waiting transition indicates underflow has occurred
- // and StartWaitingForEnoughData() should be called
+ // and PausePlayback() should be called
void BufferingStateChanged(BufferingState* buffering_state,
BufferingState new_buffering_state);
bool WaitingForEnoughData() const;
- void StartWaitingForEnoughData();
+ void PausePlayback();
void StartPlayback();
void PauseClockAndStopRendering_Locked();
@@ -358,26 +349,28 @@ class MEDIA_EXPORT Pipeline : public DemuxerHost {
// the filters.
float playback_rate_;
- // base::TickClock used by |clock_|.
+ // Current duration as reported by |demuxer_|.
+ base::TimeDelta duration_;
+
+ // base::TickClock used by |interpolator_|.
base::DefaultTickClock default_tick_clock_;
- // Reference clock. Keeps track of current playback time. Uses system
- // clock and linear interpolation, but can have its time manually set
- // by filters.
- scoped_ptr<Clock> clock_;
+ // Tracks the most recent media time update and provides interpolated values
+ // as playback progresses.
+ scoped_ptr<TimeDeltaInterpolator> interpolator_;
- enum ClockState {
- // Audio (if present) is not rendering. Clock isn't playing.
- CLOCK_PAUSED,
+ enum InterpolationState {
+ // Audio (if present) is not rendering. Time isn't being interpolated.
+ INTERPOLATION_STOPPED,
- // Audio (if present) is rendering. Clock isn't playing.
- CLOCK_WAITING_FOR_AUDIO_TIME_UPDATE,
+ // Audio (if present) is rendering. Time isn't being interpolated.
+ INTERPOLATION_WAITING_FOR_AUDIO_TIME_UPDATE,
- // Audio (if present) is rendering. Clock is playing.
- CLOCK_PLAYING,
+ // Audio (if present) is rendering. Time is being interpolated.
+ INTERPOLATION_STARTED,
};
- ClockState clock_state_;
+ InterpolationState interpolation_state_;
// Status of the pipeline. Initialized to PIPELINE_OK which indicates that
// the pipeline is operating correctly. Any other value indicates that the
@@ -391,6 +384,9 @@ class MEDIA_EXPORT Pipeline : public DemuxerHost {
// Member that tracks the current state.
State state_;
+ // The timestamp to start playback from after starting/seeking has completed.
+ base::TimeDelta start_timestamp_;
+
// Whether we've received the audio/video/text ended events.
bool audio_ended_;
bool video_ended_;
@@ -409,7 +405,7 @@ class MEDIA_EXPORT Pipeline : public DemuxerHost {
base::Closure ended_cb_;
PipelineStatusCB error_cb_;
PipelineMetadataCB metadata_cb_;
- base::Closure preroll_completed_cb_;
+ BufferingStateCB buffering_state_cb_;
base::Closure duration_change_cb_;
// Contains the demuxer and renderers to use when initializing.
@@ -426,12 +422,10 @@ class MEDIA_EXPORT Pipeline : public DemuxerHost {
PipelineStatistics statistics_;
- // Time of pipeline creation; is non-zero only until the pipeline first
- // reaches "kStarted", at which point it is used & zeroed out.
- base::TimeTicks creation_time_;
-
scoped_ptr<SerialRunner> pending_callbacks_;
+ bool underflow_disabled_for_testing_;
+
base::ThreadChecker thread_checker_;
DISALLOW_COPY_AND_ASSIGN(Pipeline);
diff --git a/media/base/pipeline_unittest.cc b/media/base/pipeline_unittest.cc
index 05ffc8c391..2685b8eacd 100644
--- a/media/base/pipeline_unittest.cc
+++ b/media/base/pipeline_unittest.cc
@@ -10,7 +10,6 @@
#include "base/test/simple_test_tick_clock.h"
#include "base/threading/simple_thread.h"
#include "base/time/clock.h"
-#include "media/base/clock.h"
#include "media/base/fake_text_track_stream.h"
#include "media/base/gmock_callback_support.h"
#include "media/base/media_log.h"
@@ -19,6 +18,7 @@
#include "media/base/test_helpers.h"
#include "media/base/text_renderer.h"
#include "media/base/text_track_config.h"
+#include "media/base/time_delta_interpolator.h"
#include "testing/gtest/include/gtest/gtest.h"
#include "ui/gfx/size.h"
@@ -51,6 +51,10 @@ ACTION_P2(SetError, pipeline, status) {
pipeline->SetErrorForTesting(status);
}
+ACTION_P2(SetBufferingState, cb, buffering_state) {
+ cb->Run(buffering_state);
+}
+
// Used for setting expectations on pipeline callbacks. Using a StrictMock
// also lets us test for missing callbacks.
class CallbackHelper {
@@ -64,7 +68,7 @@ class CallbackHelper {
MOCK_METHOD0(OnEnded, void());
MOCK_METHOD1(OnError, void(PipelineStatus));
MOCK_METHOD1(OnMetadata, void(PipelineMetadata));
- MOCK_METHOD0(OnPrerollCompleted, void());
+ MOCK_METHOD1(OnBufferingStateChange, void(BufferingState));
MOCK_METHOD0(OnDurationChange, void());
private:
@@ -101,8 +105,8 @@ class PipelineTest : public ::testing::Test {
scoped_ptr<TextRenderer> text_renderer(text_renderer_);
filter_collection_->SetTextRenderer(text_renderer.Pass());
- // InitializeDemuxer() adds overriding expectations for expected non-NULL
- // streams.
+ // SetDemuxerExpectations() adds overriding expectations for expected
+ // non-NULL streams.
DemuxerStream* null_pointer = NULL;
EXPECT_CALL(*demuxer_, GetStream(_))
.WillRepeatedly(Return(null_pointer));
@@ -137,8 +141,8 @@ class PipelineTest : public ::testing::Test {
protected:
// Sets up expectations to allow the demuxer to initialize.
typedef std::vector<MockDemuxerStream*> MockDemuxerStreamVector;
- void InitializeDemuxer(MockDemuxerStreamVector* streams,
- const base::TimeDelta& duration) {
+ void SetDemuxerExpectations(MockDemuxerStreamVector* streams,
+ const base::TimeDelta& duration) {
EXPECT_CALL(callbacks_, OnDurationChange());
EXPECT_CALL(*demuxer_, Initialize(_, _, _))
.WillOnce(DoAll(SetDemuxerProperties(duration),
@@ -152,9 +156,9 @@ class PipelineTest : public ::testing::Test {
}
}
- void InitializeDemuxer(MockDemuxerStreamVector* streams) {
+ void SetDemuxerExpectations(MockDemuxerStreamVector* streams) {
// Initialize with a default non-zero duration.
- InitializeDemuxer(streams, base::TimeDelta::FromSeconds(10));
+ SetDemuxerExpectations(streams, base::TimeDelta::FromSeconds(10));
}
scoped_ptr<StrictMock<MockDemuxerStream> > CreateStream(
@@ -165,22 +169,19 @@ class PipelineTest : public ::testing::Test {
}
// Sets up expectations to allow the video renderer to initialize.
- void InitializeVideoRenderer(DemuxerStream* stream) {
- EXPECT_CALL(*video_renderer_, Initialize(stream, _, _, _, _, _, _, _, _))
- .WillOnce(RunCallback<2>(PIPELINE_OK));
- EXPECT_CALL(*video_renderer_, SetPlaybackRate(0.0f));
-
- // Startup sequence.
- EXPECT_CALL(*video_renderer_, Preroll(base::TimeDelta(), _))
- .WillOnce(RunCallback<1>(PIPELINE_OK));
- EXPECT_CALL(*video_renderer_, Play(_))
- .WillOnce(RunClosure<0>());
+ void SetVideoRendererExpectations(DemuxerStream* stream) {
+ EXPECT_CALL(*video_renderer_, Initialize(stream, _, _, _, _, _, _, _, _, _))
+ .WillOnce(DoAll(SaveArg<5>(&video_buffering_state_cb_),
+ SaveArg<6>(&video_ended_cb_),
+ RunCallback<2>(PIPELINE_OK)));
}
// Sets up expectations to allow the audio renderer to initialize.
- void InitializeAudioRenderer(DemuxerStream* stream) {
+ void SetAudioRendererExpectations(DemuxerStream* stream) {
EXPECT_CALL(*audio_renderer_, Initialize(stream, _, _, _, _, _, _))
- .WillOnce(DoAll(SaveArg<4>(&audio_time_cb_),
+ .WillOnce(DoAll(SaveArg<3>(&audio_time_cb_),
+ SaveArg<4>(&audio_buffering_state_cb_),
+ SaveArg<5>(&audio_ended_cb_),
RunCallback<1>(PIPELINE_OK)));
}
@@ -193,7 +194,7 @@ class PipelineTest : public ::testing::Test {
// Sets up expectations on the callback and initializes the pipeline. Called
// after tests have set expectations any filters they wish to use.
- void InitializePipeline(PipelineStatus start_status) {
+ void StartPipeline(PipelineStatus start_status) {
EXPECT_CALL(callbacks_, OnStart(start_status));
if (start_status == PIPELINE_OK) {
@@ -202,13 +203,19 @@ class PipelineTest : public ::testing::Test {
if (audio_stream_) {
EXPECT_CALL(*audio_renderer_, SetPlaybackRate(0.0f));
EXPECT_CALL(*audio_renderer_, SetVolume(1.0f));
-
- // Startup sequence.
- EXPECT_CALL(*audio_renderer_, Preroll(base::TimeDelta(), _))
- .WillOnce(RunCallback<1>(PIPELINE_OK));
+ EXPECT_CALL(*audio_renderer_, StartPlayingFrom(base::TimeDelta()))
+ .WillOnce(SetBufferingState(&audio_buffering_state_cb_,
+ BUFFERING_HAVE_ENOUGH));
EXPECT_CALL(*audio_renderer_, StartRendering());
}
- EXPECT_CALL(callbacks_, OnPrerollCompleted());
+
+ if (video_stream_) {
+ EXPECT_CALL(*video_renderer_, StartPlayingFrom(base::TimeDelta()))
+ .WillOnce(SetBufferingState(&video_buffering_state_cb_,
+ BUFFERING_HAVE_ENOUGH));
+ }
+
+ EXPECT_CALL(callbacks_, OnBufferingStateChange(BUFFERING_HAVE_ENOUGH));
}
pipeline_->Start(
@@ -217,7 +224,7 @@ class PipelineTest : public ::testing::Test {
base::Bind(&CallbackHelper::OnError, base::Unretained(&callbacks_)),
base::Bind(&CallbackHelper::OnStart, base::Unretained(&callbacks_)),
base::Bind(&CallbackHelper::OnMetadata, base::Unretained(&callbacks_)),
- base::Bind(&CallbackHelper::OnPrerollCompleted,
+ base::Bind(&CallbackHelper::OnBufferingStateChange,
base::Unretained(&callbacks_)),
base::Bind(&CallbackHelper::OnDurationChange,
base::Unretained(&callbacks_)));
@@ -251,17 +258,21 @@ class PipelineTest : public ::testing::Test {
return text_stream_.get();
}
- void ExpectSeek(const base::TimeDelta& seek_time) {
+ void ExpectSeek(const base::TimeDelta& seek_time, bool underflowed) {
// Every filter should receive a call to Seek().
EXPECT_CALL(*demuxer_, Seek(seek_time, _))
.WillOnce(RunCallback<1>(PIPELINE_OK));
if (audio_stream_) {
- EXPECT_CALL(*audio_renderer_, StopRendering());
+ if (!underflowed)
+ EXPECT_CALL(*audio_renderer_, StopRendering());
EXPECT_CALL(*audio_renderer_, Flush(_))
- .WillOnce(RunClosure<0>());
- EXPECT_CALL(*audio_renderer_, Preroll(seek_time, _))
- .WillOnce(RunCallback<1>(PIPELINE_OK));
+ .WillOnce(DoAll(SetBufferingState(&audio_buffering_state_cb_,
+ BUFFERING_HAVE_NOTHING),
+ RunClosure<0>()));
+ EXPECT_CALL(*audio_renderer_, StartPlayingFrom(seek_time))
+ .WillOnce(SetBufferingState(&audio_buffering_state_cb_,
+ BUFFERING_HAVE_ENOUGH));
EXPECT_CALL(*audio_renderer_, SetPlaybackRate(_));
EXPECT_CALL(*audio_renderer_, SetVolume(_));
EXPECT_CALL(*audio_renderer_, StartRendering());
@@ -269,18 +280,17 @@ class PipelineTest : public ::testing::Test {
if (video_stream_) {
EXPECT_CALL(*video_renderer_, Flush(_))
- .WillOnce(RunClosure<0>());
- EXPECT_CALL(*video_renderer_, Preroll(seek_time, _))
- .WillOnce(RunCallback<1>(PIPELINE_OK));
- EXPECT_CALL(*video_renderer_, SetPlaybackRate(_));
- EXPECT_CALL(*video_renderer_, Play(_))
- .WillOnce(RunClosure<0>());
+ .WillOnce(DoAll(SetBufferingState(&video_buffering_state_cb_,
+ BUFFERING_HAVE_NOTHING),
+ RunClosure<0>()));
+ EXPECT_CALL(*video_renderer_, StartPlayingFrom(seek_time))
+ .WillOnce(SetBufferingState(&video_buffering_state_cb_,
+ BUFFERING_HAVE_ENOUGH));
}
- EXPECT_CALL(callbacks_, OnPrerollCompleted());
-
- // We expect a successful seek callback.
+ // We expect a successful seek callback followed by a buffering update.
EXPECT_CALL(callbacks_, OnSeek(PIPELINE_OK));
+ EXPECT_CALL(callbacks_, OnBufferingStateChange(BUFFERING_HAVE_ENOUGH));
}
void DoSeek(const base::TimeDelta& seek_time) {
@@ -330,6 +340,10 @@ class PipelineTest : public ::testing::Test {
scoped_ptr<StrictMock<MockDemuxerStream> > video_stream_;
scoped_ptr<FakeTextTrackStream> text_stream_;
AudioRenderer::TimeCB audio_time_cb_;
+ BufferingStateCB audio_buffering_state_cb_;
+ BufferingStateCB video_buffering_state_cb_;
+ base::Closure audio_ended_cb_;
+ base::Closure video_ended_cb_;
VideoDecoderConfig video_decoder_config_;
PipelineMetadata metadata_;
@@ -376,7 +390,7 @@ TEST_F(PipelineTest, NeverInitializes) {
base::Bind(&CallbackHelper::OnError, base::Unretained(&callbacks_)),
base::Bind(&CallbackHelper::OnStart, base::Unretained(&callbacks_)),
base::Bind(&CallbackHelper::OnMetadata, base::Unretained(&callbacks_)),
- base::Bind(&CallbackHelper::OnPrerollCompleted,
+ base::Bind(&CallbackHelper::OnBufferingStateChange,
base::Unretained(&callbacks_)),
base::Bind(&CallbackHelper::OnDurationChange,
base::Unretained(&callbacks_)));
@@ -396,7 +410,7 @@ TEST_F(PipelineTest, URLNotFound) {
EXPECT_CALL(*demuxer_, Stop(_))
.WillOnce(RunClosure<0>());
- InitializePipeline(PIPELINE_ERROR_URL_NOT_FOUND);
+ StartPipeline(PIPELINE_ERROR_URL_NOT_FOUND);
}
TEST_F(PipelineTest, NoStreams) {
@@ -405,7 +419,7 @@ TEST_F(PipelineTest, NoStreams) {
EXPECT_CALL(*demuxer_, Stop(_))
.WillOnce(RunClosure<0>());
- InitializePipeline(PIPELINE_ERROR_COULD_NOT_RENDER);
+ StartPipeline(PIPELINE_ERROR_COULD_NOT_RENDER);
}
TEST_F(PipelineTest, AudioStream) {
@@ -413,10 +427,10 @@ TEST_F(PipelineTest, AudioStream) {
MockDemuxerStreamVector streams;
streams.push_back(audio_stream());
- InitializeDemuxer(&streams);
- InitializeAudioRenderer(audio_stream());
+ SetDemuxerExpectations(&streams);
+ SetAudioRendererExpectations(audio_stream());
- InitializePipeline(PIPELINE_OK);
+ StartPipeline(PIPELINE_OK);
EXPECT_TRUE(metadata_.has_audio);
EXPECT_FALSE(metadata_.has_video);
}
@@ -426,10 +440,10 @@ TEST_F(PipelineTest, VideoStream) {
MockDemuxerStreamVector streams;
streams.push_back(video_stream());
- InitializeDemuxer(&streams);
- InitializeVideoRenderer(video_stream());
+ SetDemuxerExpectations(&streams);
+ SetVideoRendererExpectations(video_stream());
- InitializePipeline(PIPELINE_OK);
+ StartPipeline(PIPELINE_OK);
EXPECT_FALSE(metadata_.has_audio);
EXPECT_TRUE(metadata_.has_video);
}
@@ -441,11 +455,11 @@ TEST_F(PipelineTest, AudioVideoStream) {
streams.push_back(audio_stream());
streams.push_back(video_stream());
- InitializeDemuxer(&streams);
- InitializeAudioRenderer(audio_stream());
- InitializeVideoRenderer(video_stream());
+ SetDemuxerExpectations(&streams);
+ SetAudioRendererExpectations(audio_stream());
+ SetVideoRendererExpectations(video_stream());
- InitializePipeline(PIPELINE_OK);
+ StartPipeline(PIPELINE_OK);
EXPECT_TRUE(metadata_.has_audio);
EXPECT_TRUE(metadata_.has_video);
}
@@ -456,10 +470,10 @@ TEST_F(PipelineTest, VideoTextStream) {
MockDemuxerStreamVector streams;
streams.push_back(video_stream());
- InitializeDemuxer(&streams);
- InitializeVideoRenderer(video_stream());
+ SetDemuxerExpectations(&streams);
+ SetVideoRendererExpectations(video_stream());
- InitializePipeline(PIPELINE_OK);
+ StartPipeline(PIPELINE_OK);
EXPECT_FALSE(metadata_.has_audio);
EXPECT_TRUE(metadata_.has_video);
@@ -475,11 +489,11 @@ TEST_F(PipelineTest, VideoAudioTextStream) {
streams.push_back(video_stream());
streams.push_back(audio_stream());
- InitializeDemuxer(&streams);
- InitializeVideoRenderer(video_stream());
- InitializeAudioRenderer(audio_stream());
+ SetDemuxerExpectations(&streams);
+ SetVideoRendererExpectations(video_stream());
+ SetAudioRendererExpectations(audio_stream());
- InitializePipeline(PIPELINE_OK);
+ StartPipeline(PIPELINE_OK);
EXPECT_TRUE(metadata_.has_audio);
EXPECT_TRUE(metadata_.has_video);
@@ -495,19 +509,18 @@ TEST_F(PipelineTest, Seek) {
streams.push_back(audio_stream());
streams.push_back(video_stream());
- InitializeDemuxer(&streams, base::TimeDelta::FromSeconds(3000));
- InitializeAudioRenderer(audio_stream());
- InitializeVideoRenderer(video_stream());
+ SetDemuxerExpectations(&streams, base::TimeDelta::FromSeconds(3000));
+ SetAudioRendererExpectations(audio_stream());
+ SetVideoRendererExpectations(video_stream());
// Initialize then seek!
- InitializePipeline(PIPELINE_OK);
+ StartPipeline(PIPELINE_OK);
- AddTextStream();
message_loop_.RunUntilIdle();
// Every filter should receive a call to Seek().
base::TimeDelta expected = base::TimeDelta::FromSeconds(2000);
- ExpectSeek(expected);
+ ExpectSeek(expected, false);
DoSeek(expected);
}
@@ -516,15 +529,15 @@ TEST_F(PipelineTest, SetVolume) {
MockDemuxerStreamVector streams;
streams.push_back(audio_stream());
- InitializeDemuxer(&streams);
- InitializeAudioRenderer(audio_stream());
+ SetDemuxerExpectations(&streams);
+ SetAudioRendererExpectations(audio_stream());
// The audio renderer should receive a call to SetVolume().
float expected = 0.5f;
EXPECT_CALL(*audio_renderer_, SetVolume(expected));
// Initialize then set volume!
- InitializePipeline(PIPELINE_OK);
+ StartPipeline(PIPELINE_OK);
pipeline_->SetVolume(expected);
}
@@ -534,10 +547,10 @@ TEST_F(PipelineTest, Properties) {
streams.push_back(video_stream());
const base::TimeDelta kDuration = base::TimeDelta::FromSeconds(100);
- InitializeDemuxer(&streams, kDuration);
- InitializeVideoRenderer(video_stream());
+ SetDemuxerExpectations(&streams, kDuration);
+ SetVideoRendererExpectations(video_stream());
- InitializePipeline(PIPELINE_OK);
+ StartPipeline(PIPELINE_OK);
EXPECT_EQ(kDuration.ToInternalValue(),
pipeline_->GetMediaDuration().ToInternalValue());
EXPECT_FALSE(pipeline_->DidLoadingProgress());
@@ -549,10 +562,10 @@ TEST_F(PipelineTest, GetBufferedTimeRanges) {
streams.push_back(video_stream());
const base::TimeDelta kDuration = base::TimeDelta::FromSeconds(100);
- InitializeDemuxer(&streams, kDuration);
- InitializeVideoRenderer(video_stream());
+ SetDemuxerExpectations(&streams, kDuration);
+ SetVideoRendererExpectations(video_stream());
- InitializePipeline(PIPELINE_OK);
+ StartPipeline(PIPELINE_OK);
EXPECT_EQ(0u, pipeline_->GetBufferedTimeRanges().size());
@@ -565,7 +578,7 @@ TEST_F(PipelineTest, GetBufferedTimeRanges) {
EXPECT_EQ(kDuration / 8, pipeline_->GetBufferedTimeRanges().end(0));
base::TimeDelta kSeekTime = kDuration / 2;
- ExpectSeek(kSeekTime);
+ ExpectSeek(kSeekTime, false);
DoSeek(kSeekTime);
EXPECT_FALSE(pipeline_->DidLoadingProgress());
@@ -579,18 +592,18 @@ TEST_F(PipelineTest, EndedCallback) {
streams.push_back(audio_stream());
streams.push_back(video_stream());
- InitializeDemuxer(&streams);
- InitializeAudioRenderer(audio_stream());
- InitializeVideoRenderer(video_stream());
- InitializePipeline(PIPELINE_OK);
+ SetDemuxerExpectations(&streams);
+ SetAudioRendererExpectations(audio_stream());
+ SetVideoRendererExpectations(video_stream());
+ StartPipeline(PIPELINE_OK);
AddTextStream();
// The ended callback shouldn't run until all renderers have ended.
- pipeline_->OnAudioRendererEnded();
+ audio_ended_cb_.Run();
message_loop_.RunUntilIdle();
- pipeline_->OnVideoRendererEnded();
+ video_ended_cb_.Run();
message_loop_.RunUntilIdle();
EXPECT_CALL(*audio_renderer_, StopRendering());
@@ -608,19 +621,18 @@ TEST_F(PipelineTest, AudioStreamShorterThanVideo) {
streams.push_back(audio_stream());
streams.push_back(video_stream());
- // Replace the clock so we can simulate wall clock time advancing w/o using
- // Sleep().
- pipeline_->SetClockForTesting(new Clock(&test_tick_clock_));
+ // Replace what's used for interpolating to simulate wall clock time.
+ pipeline_->SetTimeDeltaInterpolatorForTesting(
+ new TimeDeltaInterpolator(&test_tick_clock_));
- InitializeDemuxer(&streams, duration);
- InitializeAudioRenderer(audio_stream());
- InitializeVideoRenderer(video_stream());
- InitializePipeline(PIPELINE_OK);
+ SetDemuxerExpectations(&streams, duration);
+ SetAudioRendererExpectations(audio_stream());
+ SetVideoRendererExpectations(video_stream());
+ StartPipeline(PIPELINE_OK);
EXPECT_EQ(0, pipeline_->GetMediaTime().ToInternalValue());
float playback_rate = 1.0f;
- EXPECT_CALL(*video_renderer_, SetPlaybackRate(playback_rate));
EXPECT_CALL(*audio_renderer_, SetPlaybackRate(playback_rate));
pipeline_->SetPlaybackRate(playback_rate);
message_loop_.RunUntilIdle();
@@ -634,7 +646,7 @@ TEST_F(PipelineTest, AudioStreamShorterThanVideo) {
EXPECT_EQ(pipeline_->GetMediaTime().ToInternalValue(), start_time);
// Signal end of audio stream.
- pipeline_->OnAudioRendererEnded();
+ audio_ended_cb_.Run();
message_loop_.RunUntilIdle();
// Verify that the clock advances.
@@ -645,7 +657,7 @@ TEST_F(PipelineTest, AudioStreamShorterThanVideo) {
// Signal end of video stream and make sure OnEnded() callback occurs.
EXPECT_CALL(*audio_renderer_, StopRendering());
EXPECT_CALL(callbacks_, OnEnded());
- pipeline_->OnVideoRendererEnded();
+ video_ended_cb_.Run();
}
TEST_F(PipelineTest, ErrorDuringSeek) {
@@ -653,9 +665,9 @@ TEST_F(PipelineTest, ErrorDuringSeek) {
MockDemuxerStreamVector streams;
streams.push_back(audio_stream());
- InitializeDemuxer(&streams);
- InitializeAudioRenderer(audio_stream());
- InitializePipeline(PIPELINE_OK);
+ SetDemuxerExpectations(&streams);
+ SetAudioRendererExpectations(audio_stream());
+ StartPipeline(PIPELINE_OK);
float playback_rate = 1.0f;
EXPECT_CALL(*audio_renderer_, SetPlaybackRate(playback_rate));
@@ -667,7 +679,9 @@ TEST_F(PipelineTest, ErrorDuringSeek) {
// Preroll() isn't called as the demuxer errors out first.
EXPECT_CALL(*audio_renderer_, StopRendering());
EXPECT_CALL(*audio_renderer_, Flush(_))
- .WillOnce(RunClosure<0>());
+ .WillOnce(DoAll(SetBufferingState(&audio_buffering_state_cb_,
+ BUFFERING_HAVE_NOTHING),
+ RunClosure<0>()));
EXPECT_CALL(*audio_renderer_, Stop(_))
.WillOnce(RunClosure<0>());
@@ -706,9 +720,9 @@ TEST_F(PipelineTest, NoMessageDuringTearDownFromError) {
MockDemuxerStreamVector streams;
streams.push_back(audio_stream());
- InitializeDemuxer(&streams);
- InitializeAudioRenderer(audio_stream());
- InitializePipeline(PIPELINE_OK);
+ SetDemuxerExpectations(&streams);
+ SetAudioRendererExpectations(audio_stream());
+ StartPipeline(PIPELINE_OK);
// Trigger additional requests on the pipeline during tear down from error.
base::Callback<void(PipelineStatus)> cb = base::Bind(
@@ -721,7 +735,9 @@ TEST_F(PipelineTest, NoMessageDuringTearDownFromError) {
// Seek() isn't called as the demuxer errors out first.
EXPECT_CALL(*audio_renderer_, StopRendering());
EXPECT_CALL(*audio_renderer_, Flush(_))
- .WillOnce(RunClosure<0>());
+ .WillOnce(DoAll(SetBufferingState(&audio_buffering_state_cb_,
+ BUFFERING_HAVE_NOTHING),
+ RunClosure<0>()));
EXPECT_CALL(*audio_renderer_, Stop(_))
.WillOnce(RunClosure<0>());
@@ -748,9 +764,9 @@ TEST_F(PipelineTest, AudioTimeUpdateDuringSeek) {
MockDemuxerStreamVector streams;
streams.push_back(audio_stream());
- InitializeDemuxer(&streams);
- InitializeAudioRenderer(audio_stream());
- InitializePipeline(PIPELINE_OK);
+ SetDemuxerExpectations(&streams);
+ SetAudioRendererExpectations(audio_stream());
+ StartPipeline(PIPELINE_OK);
float playback_rate = 1.0f;
EXPECT_CALL(*audio_renderer_, SetPlaybackRate(playback_rate));
@@ -774,15 +790,18 @@ TEST_F(PipelineTest, AudioTimeUpdateDuringSeek) {
EXPECT_CALL(*audio_renderer_, StopRendering());
EXPECT_CALL(*audio_renderer_, Flush(_))
- .WillOnce(RunClosure<0>());
- EXPECT_CALL(*audio_renderer_, Preroll(seek_time, _))
- .WillOnce(RunCallback<1>(PIPELINE_OK));
+ .WillOnce(DoAll(SetBufferingState(&audio_buffering_state_cb_,
+ BUFFERING_HAVE_NOTHING),
+ RunClosure<0>()));
+ EXPECT_CALL(*audio_renderer_, StartPlayingFrom(seek_time))
+ .WillOnce(SetBufferingState(&audio_buffering_state_cb_,
+ BUFFERING_HAVE_ENOUGH));
EXPECT_CALL(*audio_renderer_, SetPlaybackRate(_));
EXPECT_CALL(*audio_renderer_, SetVolume(_));
EXPECT_CALL(*audio_renderer_, StartRendering());
- EXPECT_CALL(callbacks_, OnPrerollCompleted());
EXPECT_CALL(callbacks_, OnSeek(PIPELINE_OK));
+ EXPECT_CALL(callbacks_, OnBufferingStateChange(BUFFERING_HAVE_ENOUGH));
DoSeek(seek_time);
EXPECT_EQ(pipeline_->GetMediaTime(), seek_time);
@@ -803,9 +822,9 @@ TEST_F(PipelineTest, DeleteAfterStop) {
CreateAudioStream();
MockDemuxerStreamVector streams;
streams.push_back(audio_stream());
- InitializeDemuxer(&streams);
- InitializeAudioRenderer(audio_stream());
- InitializePipeline(PIPELINE_OK);
+ SetDemuxerExpectations(&streams);
+ SetAudioRendererExpectations(audio_stream());
+ StartPipeline(PIPELINE_OK);
ExpectStop();
@@ -814,6 +833,28 @@ TEST_F(PipelineTest, DeleteAfterStop) {
message_loop_.RunUntilIdle();
}
+TEST_F(PipelineTest, Underflow) {
+ CreateAudioStream();
+ CreateVideoStream();
+ MockDemuxerStreamVector streams;
+ streams.push_back(audio_stream());
+ streams.push_back(video_stream());
+
+ SetDemuxerExpectations(&streams);
+ SetAudioRendererExpectations(audio_stream());
+ SetVideoRendererExpectations(video_stream());
+ StartPipeline(PIPELINE_OK);
+
+ // Simulate underflow.
+ EXPECT_CALL(*audio_renderer_, StopRendering());
+ audio_buffering_state_cb_.Run(BUFFERING_HAVE_NOTHING);
+
+ // Seek while underflowed. We shouldn't call StopRendering() again.
+ base::TimeDelta expected = base::TimeDelta::FromSeconds(5);
+ ExpectSeek(expected, true);
+ DoSeek(expected);
+}
+
class PipelineTeardownTest : public PipelineTest {
public:
enum TeardownState {
@@ -822,7 +863,6 @@ class PipelineTeardownTest : public PipelineTest {
kInitVideoRenderer,
kFlushing,
kSeeking,
- kPrerolling,
kPlaying,
};
@@ -845,7 +885,6 @@ class PipelineTeardownTest : public PipelineTest {
case kFlushing:
case kSeeking:
- case kPrerolling:
DoInitialize(state, stop_or_error);
DoSeek(state, stop_or_error);
break;
@@ -872,7 +911,7 @@ class PipelineTeardownTest : public PipelineTest {
base::Bind(&CallbackHelper::OnError, base::Unretained(&callbacks_)),
base::Bind(&CallbackHelper::OnStart, base::Unretained(&callbacks_)),
base::Bind(&CallbackHelper::OnMetadata, base::Unretained(&callbacks_)),
- base::Bind(&CallbackHelper::OnPrerollCompleted,
+ base::Bind(&CallbackHelper::OnBufferingStateChange,
base::Unretained(&callbacks_)),
base::Bind(&CallbackHelper::OnDurationChange,
base::Unretained(&callbacks_)));
@@ -906,7 +945,7 @@ class PipelineTeardownTest : public PipelineTest {
MockDemuxerStreamVector streams;
streams.push_back(audio_stream());
streams.push_back(video_stream());
- InitializeDemuxer(&streams, base::TimeDelta::FromSeconds(3000));
+ SetDemuxerExpectations(&streams, base::TimeDelta::FromSeconds(3000));
if (state == kInitAudioRenderer) {
if (stop_or_error == kStop) {
@@ -926,17 +965,18 @@ class PipelineTeardownTest : public PipelineTest {
}
EXPECT_CALL(*audio_renderer_, Initialize(_, _, _, _, _, _, _))
- .WillOnce(RunCallback<1>(PIPELINE_OK));
+ .WillOnce(DoAll(SaveArg<4>(&audio_buffering_state_cb_),
+ RunCallback<1>(PIPELINE_OK)));
if (state == kInitVideoRenderer) {
if (stop_or_error == kStop) {
- EXPECT_CALL(*video_renderer_, Initialize(_, _, _, _, _, _, _, _, _))
+ EXPECT_CALL(*video_renderer_, Initialize(_, _, _, _, _, _, _, _, _, _))
.WillOnce(DoAll(Stop(pipeline_.get(), stop_cb),
RunCallback<2>(PIPELINE_OK)));
EXPECT_CALL(callbacks_, OnStop());
} else {
status = PIPELINE_ERROR_INITIALIZATION_FAILED;
- EXPECT_CALL(*video_renderer_, Initialize(_, _, _, _, _, _, _, _, _))
+ EXPECT_CALL(*video_renderer_, Initialize(_, _, _, _, _, _, _, _, _, _))
.WillOnce(RunCallback<2>(status));
}
@@ -946,27 +986,26 @@ class PipelineTeardownTest : public PipelineTest {
return status;
}
- EXPECT_CALL(*video_renderer_, Initialize(_, _, _, _, _, _, _, _, _))
- .WillOnce(RunCallback<2>(PIPELINE_OK));
+ EXPECT_CALL(*video_renderer_, Initialize(_, _, _, _, _, _, _, _, _, _))
+ .WillOnce(DoAll(SaveArg<5>(&video_buffering_state_cb_),
+ RunCallback<2>(PIPELINE_OK)));
EXPECT_CALL(callbacks_, OnMetadata(_));
// If we get here it's a successful initialization.
- EXPECT_CALL(*audio_renderer_, Preroll(base::TimeDelta(), _))
- .WillOnce(RunCallback<1>(PIPELINE_OK));
- EXPECT_CALL(*video_renderer_, Preroll(base::TimeDelta(), _))
- .WillOnce(RunCallback<1>(PIPELINE_OK));
+ EXPECT_CALL(*audio_renderer_, StartPlayingFrom(base::TimeDelta()))
+ .WillOnce(SetBufferingState(&audio_buffering_state_cb_,
+ BUFFERING_HAVE_ENOUGH));
+ EXPECT_CALL(*video_renderer_, StartPlayingFrom(base::TimeDelta()))
+ .WillOnce(SetBufferingState(&video_buffering_state_cb_,
+ BUFFERING_HAVE_ENOUGH));
EXPECT_CALL(*audio_renderer_, SetPlaybackRate(0.0f));
- EXPECT_CALL(*video_renderer_, SetPlaybackRate(0.0f));
EXPECT_CALL(*audio_renderer_, SetVolume(1.0f));
-
EXPECT_CALL(*audio_renderer_, StartRendering());
- EXPECT_CALL(*video_renderer_, Play(_))
- .WillOnce(RunClosure<0>());
if (status == PIPELINE_OK)
- EXPECT_CALL(callbacks_, OnPrerollCompleted());
+ EXPECT_CALL(callbacks_, OnBufferingStateChange(BUFFERING_HAVE_ENOUGH));
return status;
}
@@ -1000,18 +1039,30 @@ class PipelineTeardownTest : public PipelineTest {
if (state == kFlushing) {
if (stop_or_error == kStop) {
EXPECT_CALL(*audio_renderer_, Flush(_))
- .WillOnce(DoAll(Stop(pipeline_.get(), stop_cb), RunClosure<0>()));
+ .WillOnce(DoAll(Stop(pipeline_.get(), stop_cb),
+ SetBufferingState(&audio_buffering_state_cb_,
+ BUFFERING_HAVE_NOTHING),
+ RunClosure<0>()));
} else {
status = PIPELINE_ERROR_READ;
EXPECT_CALL(*audio_renderer_, Flush(_)).WillOnce(
- DoAll(SetError(pipeline_.get(), status), RunClosure<0>()));
+ DoAll(SetError(pipeline_.get(), status),
+ SetBufferingState(&audio_buffering_state_cb_,
+ BUFFERING_HAVE_NOTHING),
+ RunClosure<0>()));
}
return status;
}
- EXPECT_CALL(*audio_renderer_, Flush(_)).WillOnce(RunClosure<0>());
- EXPECT_CALL(*video_renderer_, Flush(_)).WillOnce(RunClosure<0>());
+ EXPECT_CALL(*audio_renderer_, Flush(_))
+ .WillOnce(DoAll(SetBufferingState(&audio_buffering_state_cb_,
+ BUFFERING_HAVE_NOTHING),
+ RunClosure<0>()));
+ EXPECT_CALL(*video_renderer_, Flush(_))
+ .WillOnce(DoAll(SetBufferingState(&video_buffering_state_cb_,
+ BUFFERING_HAVE_NOTHING),
+ RunClosure<0>()));
if (state == kSeeking) {
if (stop_or_error == kStop) {
@@ -1027,33 +1078,6 @@ class PipelineTeardownTest : public PipelineTest {
return status;
}
- EXPECT_CALL(*demuxer_, Seek(_, _))
- .WillOnce(RunCallback<1>(PIPELINE_OK));
-
- if (state == kPrerolling) {
- if (stop_or_error == kStop) {
- EXPECT_CALL(*audio_renderer_, Preroll(_, _))
- .WillOnce(DoAll(Stop(pipeline_.get(), stop_cb),
- RunCallback<1>(PIPELINE_OK)));
- } else {
- status = PIPELINE_ERROR_READ;
- EXPECT_CALL(*audio_renderer_, Preroll(_, _))
- .WillOnce(RunCallback<1>(status));
- }
-
- return status;
- }
-
- EXPECT_CALL(*audio_renderer_, Preroll(_, _))
- .WillOnce(RunCallback<1>(PIPELINE_OK));
- EXPECT_CALL(*video_renderer_, Preroll(_, _))
- .WillOnce(RunCallback<1>(PIPELINE_OK));
-
- // Playback rate and volume are updated prior to starting.
- EXPECT_CALL(*audio_renderer_, SetPlaybackRate(0.0f));
- EXPECT_CALL(*video_renderer_, SetPlaybackRate(0.0f));
- EXPECT_CALL(*audio_renderer_, SetVolume(1.0f));
-
NOTREACHED() << "State not supported: " << state;
return status;
}
@@ -1101,7 +1125,6 @@ INSTANTIATE_TEARDOWN_TEST(Stop, InitAudioRenderer);
INSTANTIATE_TEARDOWN_TEST(Stop, InitVideoRenderer);
INSTANTIATE_TEARDOWN_TEST(Stop, Flushing);
INSTANTIATE_TEARDOWN_TEST(Stop, Seeking);
-INSTANTIATE_TEARDOWN_TEST(Stop, Prerolling);
INSTANTIATE_TEARDOWN_TEST(Stop, Playing);
INSTANTIATE_TEARDOWN_TEST(Error, InitDemuxer);
@@ -1109,7 +1132,6 @@ INSTANTIATE_TEARDOWN_TEST(Error, InitAudioRenderer);
INSTANTIATE_TEARDOWN_TEST(Error, InitVideoRenderer);
INSTANTIATE_TEARDOWN_TEST(Error, Flushing);
INSTANTIATE_TEARDOWN_TEST(Error, Seeking);
-INSTANTIATE_TEARDOWN_TEST(Error, Prerolling);
INSTANTIATE_TEARDOWN_TEST(Error, Playing);
INSTANTIATE_TEARDOWN_TEST(ErrorAndStop, Playing);
diff --git a/media/base/stream_parser_buffer.cc b/media/base/stream_parser_buffer.cc
index e9d64272c4..ae826594d5 100644
--- a/media/base/stream_parser_buffer.cc
+++ b/media/base/stream_parser_buffer.cc
@@ -106,6 +106,11 @@ void StreamParserBuffer::SetConfigId(int config_id) {
void StreamParserBuffer::ConvertToSpliceBuffer(
const BufferQueue& pre_splice_buffers) {
DCHECK(splice_buffers_.empty());
+ DCHECK(duration() > base::TimeDelta())
+ << "Only buffers with a valid duration can convert to a splice buffer."
+ << " pts " << timestamp().InSecondsF()
+ << " dts " << GetDecodeTimestamp().InSecondsF()
+ << " dur " << duration().InSecondsF();
DCHECK(!end_of_stream());
// Make a copy of this first, before making any changes.
@@ -139,6 +144,8 @@ void StreamParserBuffer::ConvertToSpliceBuffer(
// The splice duration is the duration of all buffers before the splice plus
// the highest ending timestamp after the splice point.
+ DCHECK(overlapping_buffer->duration() > base::TimeDelta());
+ DCHECK(pre_splice_buffers.back()->duration() > base::TimeDelta());
set_duration(
std::max(overlapping_buffer->timestamp() + overlapping_buffer->duration(),
pre_splice_buffers.back()->timestamp() +
diff --git a/media/base/text_renderer.cc b/media/base/text_renderer.cc
index 6f88ef71a6..3a04348c24 100644
--- a/media/base/text_renderer.cc
+++ b/media/base/text_renderer.cc
@@ -46,7 +46,7 @@ void TextRenderer::Initialize(const base::Closure& ended_cb) {
state_ = kPaused;
}
-void TextRenderer::Play(const base::Closure& callback) {
+void TextRenderer::StartPlaying() {
DCHECK(task_runner_->BelongsToCurrentThread());
DCHECK_EQ(state_, kPaused) << "state_ " << state_;
@@ -62,7 +62,6 @@ void TextRenderer::Play(const base::Closure& callback) {
}
state_ = kPlaying;
- callback.Run();
}
void TextRenderer::Pause(const base::Closure& callback) {
diff --git a/media/base/text_renderer.h b/media/base/text_renderer.h
index ce10912556..7e719dbac9 100644
--- a/media/base/text_renderer.h
+++ b/media/base/text_renderer.h
@@ -44,9 +44,8 @@ class MEDIA_EXPORT TextRenderer {
// end of stream, following a play request.
void Initialize(const base::Closure& ended_cb);
- // Start text track cue decoding and rendering, executing |callback| when
- // playback is underway.
- void Play(const base::Closure& callback);
+ // Start text track cue decoding and rendering.
+ void StartPlaying();
// Temporarily suspend decoding and rendering, executing |callback| when
// playback has been suspended.
diff --git a/media/base/text_renderer_unittest.cc b/media/base/text_renderer_unittest.cc
index 77e8c47182..2c31c921a0 100644
--- a/media/base/text_renderer_unittest.cc
+++ b/media/base/text_renderer_unittest.cc
@@ -184,10 +184,7 @@ class TextRendererTest : public testing::Test {
}
void Play() {
- EXPECT_CALL(*this, OnPlay());
- text_renderer_->Play(base::Bind(&TextRendererTest::OnPlay,
- base::Unretained(this)));
- message_loop_.RunUntilIdle();
+ text_renderer_->StartPlaying();
}
void Pause() {
@@ -215,7 +212,6 @@ class TextRendererTest : public testing::Test {
MOCK_METHOD0(OnEnd, void());
MOCK_METHOD0(OnStop, void());
- MOCK_METHOD0(OnPlay, void());
MOCK_METHOD0(OnPause, void());
MOCK_METHOD0(OnFlush, void());
diff --git a/media/base/time_delta_interpolator.cc b/media/base/time_delta_interpolator.cc
new file mode 100644
index 0000000000..11ba1cd085
--- /dev/null
+++ b/media/base/time_delta_interpolator.cc
@@ -0,0 +1,79 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/base/time_delta_interpolator.h"
+
+#include <algorithm>
+
+#include "base/logging.h"
+#include "base/time/tick_clock.h"
+#include "media/base/buffers.h"
+
+namespace media {
+
+TimeDeltaInterpolator::TimeDeltaInterpolator(base::TickClock* tick_clock)
+ : tick_clock_(tick_clock),
+ interpolating_(false),
+ upper_bound_(kNoTimestamp()),
+ playback_rate_(1.0f) {
+ DCHECK(tick_clock_);
+}
+
+TimeDeltaInterpolator::~TimeDeltaInterpolator() {
+}
+
+base::TimeDelta TimeDeltaInterpolator::StartInterpolating() {
+ DCHECK(!interpolating_);
+ reference_ = tick_clock_->NowTicks();
+ interpolating_ = true;
+ return lower_bound_;
+}
+
+base::TimeDelta TimeDeltaInterpolator::StopInterpolating() {
+ DCHECK(interpolating_);
+ lower_bound_ = GetInterpolatedTime();
+ interpolating_ = false;
+ return lower_bound_;
+}
+
+void TimeDeltaInterpolator::SetPlaybackRate(float playback_rate) {
+ lower_bound_ = GetInterpolatedTime();
+ reference_ = tick_clock_->NowTicks();
+ playback_rate_ = playback_rate;
+}
+
+void TimeDeltaInterpolator::SetBounds(base::TimeDelta lower_bound,
+ base::TimeDelta upper_bound) {
+ DCHECK(lower_bound <= upper_bound);
+ DCHECK(lower_bound != kNoTimestamp());
+
+ lower_bound_ = std::max(base::TimeDelta(), lower_bound);
+ upper_bound_ = std::max(base::TimeDelta(), upper_bound);
+ reference_ = tick_clock_->NowTicks();
+}
+
+void TimeDeltaInterpolator::SetUpperBound(base::TimeDelta upper_bound) {
+ DCHECK(upper_bound != kNoTimestamp());
+
+ lower_bound_ = GetInterpolatedTime();
+ reference_ = tick_clock_->NowTicks();
+ upper_bound_ = upper_bound;
+}
+
+base::TimeDelta TimeDeltaInterpolator::GetInterpolatedTime() {
+ if (!interpolating_)
+ return lower_bound_;
+
+ int64 now_us = (tick_clock_->NowTicks() - reference_).InMicroseconds();
+ now_us = static_cast<int64>(now_us * playback_rate_);
+ base::TimeDelta interpolated_time =
+ lower_bound_ + base::TimeDelta::FromMicroseconds(now_us);
+
+ if (upper_bound_ == kNoTimestamp())
+ return interpolated_time;
+
+ return std::min(interpolated_time, upper_bound_);
+}
+
+} // namespace media
diff --git a/media/base/time_delta_interpolator.h b/media/base/time_delta_interpolator.h
new file mode 100644
index 0000000000..af7535da6f
--- /dev/null
+++ b/media/base/time_delta_interpolator.h
@@ -0,0 +1,82 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_BASE_TIME_DELTA_INTERPOLATOR_H_
+#define MEDIA_BASE_TIME_DELTA_INTERPOLATOR_H_
+
+#include "base/basictypes.h"
+#include "base/time/time.h"
+#include "media/base/media_export.h"
+
+namespace base {
+class TickClock;
+} // namespace base
+
+namespace media {
+
+// Interpolates between two TimeDeltas based on the passage of wall clock time
+// and the current playback rate.
+//
+// TimeDeltaInterpolator is not thread-safe and must be externally locked.
+class MEDIA_EXPORT TimeDeltaInterpolator {
+ public:
+ // Constructs an interpolator initialized to zero with a rate of 1.0.
+ //
+ // |tick_clock| is used for sampling wall clock time for interpolating.
+ explicit TimeDeltaInterpolator(base::TickClock* tick_clock);
+ ~TimeDeltaInterpolator();
+
+ bool interpolating() { return interpolating_; }
+
+ // Starts returning interpolated TimeDelta values.
+ //
+ // |tick_clock| will be queried for a new reference time value.
+ base::TimeDelta StartInterpolating();
+
+ // Stops returning interpolated TimeDelta values.
+ //
+ // |tick_clock| will be queried for a new reference time value.
+ base::TimeDelta StopInterpolating();
+
+ // Sets a new rate at which to interpolate.
+ //
+ // |tick_clock| will be queried for a new reference time value.
+ void SetPlaybackRate(float playback_rate);
+
+ // Sets the two timestamps to interpolate between at |playback_rate_|.
+ // |upper_bound| must be greater or equal to |lower_bound|.
+ //
+ // |upper_bound| is typically the media timestamp of the last audio frame
+ // buffered by the audio hardware.
+ void SetBounds(base::TimeDelta lower_bound, base::TimeDelta upper_bound);
+
+ // Sets the upper bound used for interpolation. Note that if |upper_bound| is
+ // less than what was previously set via SetTime(), then all future calls
+ // to GetInterpolatedTime() will return |upper_bound|.
+ void SetUpperBound(base::TimeDelta upper_bound);
+
+ // Computes an interpolated time based on SetTime().
+ base::TimeDelta GetInterpolatedTime();
+
+ private:
+ base::TickClock* const tick_clock_;
+
+ bool interpolating_;
+
+ // The range of time to interpolate between.
+ base::TimeDelta lower_bound_;
+ base::TimeDelta upper_bound_;
+
+ // The monotonic system clock time used for interpolating between
+ // |lower_bound_| and |upper_bound_|.
+ base::TimeTicks reference_;
+
+ float playback_rate_;
+
+ DISALLOW_COPY_AND_ASSIGN(TimeDeltaInterpolator);
+};
+
+} // namespace media
+
+#endif // MEDIA_BASE_TIME_DELTA_INTERPOLATOR_H_
diff --git a/media/base/time_delta_interpolator_unittest.cc b/media/base/time_delta_interpolator_unittest.cc
new file mode 100644
index 0000000000..04242f122c
--- /dev/null
+++ b/media/base/time_delta_interpolator_unittest.cc
@@ -0,0 +1,199 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/logging.h"
+#include "base/test/simple_test_tick_clock.h"
+#include "media/base/time_delta_interpolator.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace media {
+
+class TimeDeltaInterpolatorTest : public ::testing::Test {
+ public:
+ TimeDeltaInterpolatorTest() : interpolator_(&test_tick_clock_) {}
+
+ protected:
+ void AdvanceSystemTime(base::TimeDelta delta) {
+ test_tick_clock_.Advance(delta);
+ }
+
+ base::SimpleTestTickClock test_tick_clock_;
+ TimeDeltaInterpolator interpolator_;
+};
+
+TEST_F(TimeDeltaInterpolatorTest, Created) {
+ const base::TimeDelta kExpected = base::TimeDelta::FromSeconds(0);
+ EXPECT_EQ(kExpected, interpolator_.GetInterpolatedTime());
+}
+
+TEST_F(TimeDeltaInterpolatorTest, StartInterpolating_NormalSpeed) {
+ const base::TimeDelta kZero;
+ const base::TimeDelta kTimeToAdvance = base::TimeDelta::FromSeconds(2);
+
+ EXPECT_EQ(kZero, interpolator_.StartInterpolating());
+ AdvanceSystemTime(kTimeToAdvance);
+ EXPECT_EQ(kTimeToAdvance, interpolator_.GetInterpolatedTime());
+}
+
+TEST_F(TimeDeltaInterpolatorTest, StartInterpolating_DoubleSpeed) {
+ const base::TimeDelta kZero;
+ const base::TimeDelta kTimeToAdvance = base::TimeDelta::FromSeconds(5);
+
+ interpolator_.SetPlaybackRate(2.0f);
+ EXPECT_EQ(kZero, interpolator_.StartInterpolating());
+ AdvanceSystemTime(kTimeToAdvance);
+ EXPECT_EQ(2 * kTimeToAdvance, interpolator_.GetInterpolatedTime());
+}
+
+TEST_F(TimeDeltaInterpolatorTest, StartInterpolating_HalfSpeed) {
+ const base::TimeDelta kZero;
+ const base::TimeDelta kTimeToAdvance = base::TimeDelta::FromSeconds(4);
+
+ interpolator_.SetPlaybackRate(0.5f);
+ EXPECT_EQ(kZero, interpolator_.StartInterpolating());
+ AdvanceSystemTime(kTimeToAdvance);
+ EXPECT_EQ(kTimeToAdvance / 2, interpolator_.GetInterpolatedTime());
+}
+
+TEST_F(TimeDeltaInterpolatorTest, StartInterpolating_ZeroSpeed) {
+ // We'll play for 2 seconds at normal speed, 4 seconds at zero speed, and 8
+ // seconds at normal speed.
+ const base::TimeDelta kZero;
+ const base::TimeDelta kPlayDuration1 = base::TimeDelta::FromSeconds(2);
+ const base::TimeDelta kPlayDuration2 = base::TimeDelta::FromSeconds(4);
+ const base::TimeDelta kPlayDuration3 = base::TimeDelta::FromSeconds(8);
+ const base::TimeDelta kExpected = kPlayDuration1 + kPlayDuration3;
+
+ EXPECT_EQ(kZero, interpolator_.StartInterpolating());
+
+ AdvanceSystemTime(kPlayDuration1);
+ interpolator_.SetPlaybackRate(0.0f);
+ AdvanceSystemTime(kPlayDuration2);
+ interpolator_.SetPlaybackRate(1.0f);
+ AdvanceSystemTime(kPlayDuration3);
+
+ EXPECT_EQ(kExpected, interpolator_.GetInterpolatedTime());
+}
+
+TEST_F(TimeDeltaInterpolatorTest, StartInterpolating_MultiSpeed) {
+ // We'll play for 2 seconds at half speed, 4 seconds at normal speed, and 8
+ // seconds at double speed.
+ const base::TimeDelta kZero;
+ const base::TimeDelta kPlayDuration1 = base::TimeDelta::FromSeconds(2);
+ const base::TimeDelta kPlayDuration2 = base::TimeDelta::FromSeconds(4);
+ const base::TimeDelta kPlayDuration3 = base::TimeDelta::FromSeconds(8);
+ const base::TimeDelta kExpected =
+ kPlayDuration1 / 2 + kPlayDuration2 + 2 * kPlayDuration3;
+
+ interpolator_.SetPlaybackRate(0.5f);
+ EXPECT_EQ(kZero, interpolator_.StartInterpolating());
+ AdvanceSystemTime(kPlayDuration1);
+
+ interpolator_.SetPlaybackRate(1.0f);
+ AdvanceSystemTime(kPlayDuration2);
+
+ interpolator_.SetPlaybackRate(2.0f);
+ AdvanceSystemTime(kPlayDuration3);
+ EXPECT_EQ(kExpected, interpolator_.GetInterpolatedTime());
+}
+
+TEST_F(TimeDeltaInterpolatorTest, StopInterpolating) {
+ const base::TimeDelta kZero;
+ const base::TimeDelta kPlayDuration = base::TimeDelta::FromSeconds(4);
+ const base::TimeDelta kPauseDuration = base::TimeDelta::FromSeconds(20);
+ const base::TimeDelta kExpectedFirstPause = kPlayDuration;
+ const base::TimeDelta kExpectedSecondPause = 2 * kPlayDuration;
+
+ // Play for 4 seconds.
+ EXPECT_EQ(kZero, interpolator_.StartInterpolating());
+ AdvanceSystemTime(kPlayDuration);
+
+ // Pause for 20 seconds.
+ EXPECT_EQ(kExpectedFirstPause, interpolator_.StopInterpolating());
+ EXPECT_EQ(kExpectedFirstPause, interpolator_.GetInterpolatedTime());
+ AdvanceSystemTime(kPauseDuration);
+ EXPECT_EQ(kExpectedFirstPause, interpolator_.GetInterpolatedTime());
+
+ // Play again for 4 more seconds.
+ EXPECT_EQ(kExpectedFirstPause, interpolator_.StartInterpolating());
+ AdvanceSystemTime(kPlayDuration);
+ EXPECT_EQ(kExpectedSecondPause, interpolator_.StopInterpolating());
+ EXPECT_EQ(kExpectedSecondPause, interpolator_.GetInterpolatedTime());
+}
+
+TEST_F(TimeDeltaInterpolatorTest, SetBounds_Stopped) {
+ const base::TimeDelta kFirstTime = base::TimeDelta::FromSeconds(4);
+ const base::TimeDelta kSecondTime = base::TimeDelta::FromSeconds(16);
+ const base::TimeDelta kArbitraryMaxTime = base::TimeDelta::FromSeconds(100);
+
+ interpolator_.SetBounds(kFirstTime, kArbitraryMaxTime);
+ EXPECT_EQ(kFirstTime, interpolator_.GetInterpolatedTime());
+ interpolator_.SetBounds(kSecondTime, kArbitraryMaxTime);
+ EXPECT_EQ(kSecondTime, interpolator_.GetInterpolatedTime());
+}
+
+TEST_F(TimeDeltaInterpolatorTest, SetBounds_Started) {
+ // We'll play for 4 seconds, then set the time to 12, then play for 4 more
+ // seconds.
+ const base::TimeDelta kZero;
+ const base::TimeDelta kPlayDuration = base::TimeDelta::FromSeconds(4);
+ const base::TimeDelta kUpdatedTime = base::TimeDelta::FromSeconds(12);
+ const base::TimeDelta kArbitraryMaxTime = base::TimeDelta::FromSeconds(100);
+ const base::TimeDelta kExpected = kUpdatedTime + kPlayDuration;
+
+ EXPECT_EQ(kZero, interpolator_.StartInterpolating());
+ AdvanceSystemTime(kPlayDuration);
+
+ interpolator_.SetBounds(kUpdatedTime, kArbitraryMaxTime);
+ AdvanceSystemTime(kPlayDuration);
+ EXPECT_EQ(kExpected, interpolator_.GetInterpolatedTime());
+}
+
+TEST_F(TimeDeltaInterpolatorTest, SetUpperBound) {
+ const base::TimeDelta kZero;
+ const base::TimeDelta kTimeInterval = base::TimeDelta::FromSeconds(4);
+ const base::TimeDelta kMaxTime = base::TimeDelta::FromSeconds(6);
+
+ EXPECT_EQ(kZero, interpolator_.StartInterpolating());
+ interpolator_.SetUpperBound(kMaxTime);
+ AdvanceSystemTime(kTimeInterval);
+ EXPECT_EQ(kTimeInterval, interpolator_.GetInterpolatedTime());
+
+ AdvanceSystemTime(kTimeInterval);
+ EXPECT_EQ(kMaxTime, interpolator_.GetInterpolatedTime());
+
+ AdvanceSystemTime(kTimeInterval);
+ EXPECT_EQ(kMaxTime, interpolator_.GetInterpolatedTime());
+}
+
+TEST_F(TimeDeltaInterpolatorTest, SetUpperBound_MultipleTimes) {
+ const base::TimeDelta kZero;
+ const base::TimeDelta kTimeInterval = base::TimeDelta::FromSeconds(4);
+ const base::TimeDelta kMaxTime0 = base::TimeDelta::FromSeconds(120);
+ const base::TimeDelta kMaxTime1 = base::TimeDelta::FromSeconds(6);
+ const base::TimeDelta kMaxTime2 = base::TimeDelta::FromSeconds(12);
+
+ EXPECT_EQ(kZero, interpolator_.StartInterpolating());
+ interpolator_.SetUpperBound(kMaxTime0);
+ AdvanceSystemTime(kTimeInterval);
+ EXPECT_EQ(kTimeInterval, interpolator_.GetInterpolatedTime());
+
+ interpolator_.SetUpperBound(kMaxTime1);
+ AdvanceSystemTime(kTimeInterval);
+ EXPECT_EQ(kMaxTime1, interpolator_.GetInterpolatedTime());
+
+ AdvanceSystemTime(kTimeInterval);
+ EXPECT_EQ(kMaxTime1, interpolator_.GetInterpolatedTime());
+
+ interpolator_.SetUpperBound(kMaxTime2);
+ EXPECT_EQ(kMaxTime1, interpolator_.GetInterpolatedTime());
+
+ AdvanceSystemTime(kTimeInterval);
+ EXPECT_EQ(kMaxTime1 + kTimeInterval, interpolator_.GetInterpolatedTime());
+
+ AdvanceSystemTime(kTimeInterval);
+ EXPECT_EQ(kMaxTime2, interpolator_.GetInterpolatedTime());
+}
+
+} // namespace media
diff --git a/media/base/video_decoder.h b/media/base/video_decoder.h
index e7d7ad6287..edca238e78 100644
--- a/media/base/video_decoder.h
+++ b/media/base/video_decoder.h
@@ -39,6 +39,11 @@ class MEDIA_EXPORT VideoDecoder {
typedef base::Callback<void(Status status)> DecodeCB;
VideoDecoder();
+
+ // Fires any pending callbacks, stops and destroys the decoder.
+ // Note: Since this is a destructor, |this| will be destroyed after this call.
+ // Make sure the callbacks fired from this call doesn't post any task that
+ // depends on |this|.
virtual ~VideoDecoder();
// Initializes a VideoDecoder with the given |config|, executing the
@@ -48,9 +53,8 @@ class MEDIA_EXPORT VideoDecoder {
// Note:
// 1) The VideoDecoder will be reinitialized if it was initialized before.
// Upon reinitialization, all internal buffered frames will be dropped.
- // 2) This method should not be called during pending decode, reset or stop.
- // 3) No VideoDecoder calls except for Stop() should be made before
- // |status_cb| is executed.
+ // 2) This method should not be called during pending decode or reset.
+ // 3) No VideoDecoder calls should be made before |status_cb| is executed.
virtual void Initialize(const VideoDecoderConfig& config,
bool low_delay,
const PipelineStatusCB& status_cb,
@@ -83,13 +87,6 @@ class MEDIA_EXPORT VideoDecoder {
// Note: No VideoDecoder calls should be made before |closure| is executed.
virtual void Reset(const base::Closure& closure) = 0;
- // Stops decoder, fires any pending callbacks and sets the decoder to an
- // uninitialized state. A VideoDecoder cannot be re-initialized after it has
- // been stopped.
- // Note that if Initialize() is pending or has finished successfully, Stop()
- // must be called before destructing the decoder.
- virtual void Stop() = 0;
-
// Returns true if the decoder needs bitstream conversion before decoding.
virtual bool NeedsBitstreamConversion() const;
diff --git a/media/base/video_frame.cc b/media/base/video_frame.cc
index 272d41dc4c..f6b49e483d 100644
--- a/media/base/video_frame.cc
+++ b/media/base/video_frame.cc
@@ -24,39 +24,46 @@ static inline size_t RoundUp(size_t value, size_t alignment) {
return ((value + (alignment - 1)) & ~(alignment - 1));
}
-// static
-scoped_refptr<VideoFrame> VideoFrame::CreateFrame(
- VideoFrame::Format format,
- const gfx::Size& coded_size,
- const gfx::Rect& visible_rect,
- const gfx::Size& natural_size,
- base::TimeDelta timestamp) {
- // Since we're creating a new YUV frame (and allocating memory for it
- // ourselves), we can pad the requested |coded_size| if necessary if the
- // request does not line up on sample boundaries.
+// Rounds up |coded_size| if necessary for |format|.
+static gfx::Size AdjustCodedSize(VideoFrame::Format format,
+ const gfx::Size& coded_size) {
gfx::Size new_coded_size(coded_size);
switch (format) {
- case VideoFrame::YV24:
- break;
case VideoFrame::YV12:
case VideoFrame::YV12A:
case VideoFrame::I420:
case VideoFrame::YV12J:
- new_coded_size.set_height((new_coded_size.height() + 1) / 2 * 2);
+ new_coded_size.set_height(RoundUp(new_coded_size.height(), 2));
// Fallthrough.
case VideoFrame::YV16:
- new_coded_size.set_width((new_coded_size.width() + 1) / 2 * 2);
+ new_coded_size.set_width(RoundUp(new_coded_size.width(), 2));
break;
- case VideoFrame::UNKNOWN:
- case VideoFrame::NV12:
+ default:
+ break;
+ }
+ return new_coded_size;
+}
+
+// static
+scoped_refptr<VideoFrame> VideoFrame::CreateFrame(
+ VideoFrame::Format format,
+ const gfx::Size& coded_size,
+ const gfx::Rect& visible_rect,
+ const gfx::Size& natural_size,
+ base::TimeDelta timestamp) {
+ DCHECK(format != VideoFrame::UNKNOWN &&
+ format != VideoFrame::NV12 &&
+ format != VideoFrame::NATIVE_TEXTURE);
#if defined(VIDEO_HOLE)
- case VideoFrame::HOLE:
+ DCHECK(format != VideoFrame::HOLE);
#endif // defined(VIDEO_HOLE)
- case VideoFrame::NATIVE_TEXTURE:
- LOG(FATAL) << "Only YUV formats supported: " << format;
- return NULL;
- }
+
+ // Since we're creating a new YUV frame (and allocating memory for it
+ // ourselves), we can pad the requested |coded_size| if necessary if the
+ // request does not line up on sample boundaries.
+ gfx::Size new_coded_size = AdjustCodedSize(format, coded_size);
DCHECK(IsValidConfig(format, new_coded_size, visible_rect, natural_size));
+
scoped_refptr<VideoFrame> frame(
new VideoFrame(format,
new_coded_size,
@@ -191,28 +198,30 @@ scoped_refptr<VideoFrame> VideoFrame::WrapExternalPackedMemory(
base::SharedMemoryHandle handle,
base::TimeDelta timestamp,
const base::Closure& no_longer_needed_cb) {
- if (!IsValidConfig(format, coded_size, visible_rect, natural_size))
+ gfx::Size new_coded_size = AdjustCodedSize(format, coded_size);
+
+ if (!IsValidConfig(format, new_coded_size, visible_rect, natural_size))
return NULL;
- if (data_size < AllocationSize(format, coded_size))
+ if (data_size < AllocationSize(format, new_coded_size))
return NULL;
switch (format) {
case VideoFrame::I420: {
scoped_refptr<VideoFrame> frame(
new VideoFrame(format,
- coded_size,
+ new_coded_size,
visible_rect,
natural_size,
scoped_ptr<gpu::MailboxHolder>(),
timestamp,
false));
frame->shared_memory_handle_ = handle;
- frame->strides_[kYPlane] = coded_size.width();
- frame->strides_[kUPlane] = coded_size.width() / 2;
- frame->strides_[kVPlane] = coded_size.width() / 2;
+ frame->strides_[kYPlane] = new_coded_size.width();
+ frame->strides_[kUPlane] = new_coded_size.width() / 2;
+ frame->strides_[kVPlane] = new_coded_size.width() / 2;
frame->data_[kYPlane] = data;
- frame->data_[kUPlane] = data + coded_size.GetArea();
- frame->data_[kVPlane] = data + (coded_size.GetArea() * 5 / 4);
+ frame->data_[kUPlane] = data + new_coded_size.GetArea();
+ frame->data_[kVPlane] = data + (new_coded_size.GetArea() * 5 / 4);
frame->no_longer_needed_cb_ = no_longer_needed_cb;
return frame;
}
@@ -283,12 +292,12 @@ scoped_refptr<VideoFrame> VideoFrame::WrapExternalYuvData(
uint8* v_data,
base::TimeDelta timestamp,
const base::Closure& no_longer_needed_cb) {
- if (!IsValidConfig(format, coded_size, visible_rect, natural_size))
- return NULL;
+ gfx::Size new_coded_size = AdjustCodedSize(format, coded_size);
+ CHECK(IsValidConfig(format, new_coded_size, visible_rect, natural_size));
scoped_refptr<VideoFrame> frame(
new VideoFrame(format,
- coded_size,
+ new_coded_size,
visible_rect,
natural_size,
scoped_ptr<gpu::MailboxHolder>(),
@@ -312,7 +321,7 @@ scoped_refptr<VideoFrame> VideoFrame::WrapVideoFrame(
const base::Closure& no_longer_needed_cb) {
// NATIVE_TEXTURE frames need mailbox info propagated, and there's no support
// for that here yet, see http://crbug/362521.
- CHECK(frame->format() != NATIVE_TEXTURE);
+ CHECK_NE(frame->format(), NATIVE_TEXTURE);
DCHECK(frame->visible_rect().Contains(visible_rect));
scoped_refptr<VideoFrame> wrapped_frame(
@@ -363,6 +372,19 @@ scoped_refptr<VideoFrame> VideoFrame::CreateBlackFrame(const gfx::Size& size) {
return CreateColorFrame(size, kBlackY, kBlackUV, kBlackUV, kZero);
}
+// static
+scoped_refptr<VideoFrame> VideoFrame::CreateTransparentFrame(
+ const gfx::Size& size) {
+ const uint8 kBlackY = 0x00;
+ const uint8 kBlackUV = 0x00;
+ const uint8 kTransparentA = 0x00;
+ const base::TimeDelta kZero;
+ scoped_refptr<VideoFrame> frame = VideoFrame::CreateFrame(
+ VideoFrame::YV12A, size, gfx::Rect(size), size, kZero);
+ FillYUVA(frame, kBlackY, kBlackUV, kBlackUV, kTransparentA);
+ return frame;
+}
+
#if defined(VIDEO_HOLE)
// This block and other blocks wrapped around #if defined(VIDEO_HOLE) is not
// maintained by the general compositor team. Please contact the following
@@ -609,10 +631,13 @@ void VideoFrame::AllocateYUV() {
// overreads by one line in some cases, see libavcodec/utils.c:
// avcodec_align_dimensions2() and libavcodec/x86/h264_chromamc.asm:
// put_h264_chroma_mc4_ssse3().
+ const size_t data_size =
+ y_bytes + (uv_bytes * 2 + uv_stride) + a_bytes + kFrameSizePadding;
uint8* data = reinterpret_cast<uint8*>(
- base::AlignedAlloc(
- y_bytes + (uv_bytes * 2 + uv_stride) + a_bytes + kFrameSizePadding,
- kFrameAddressAlignment));
+ base::AlignedAlloc(data_size, kFrameAddressAlignment));
+ // FFmpeg expects the initialize allocation to be zero-initialized. Failure
+ // to do so can lead to unitialized value usage. See http://crbug.com/390941
+ memset(data, 0, data_size);
no_longer_needed_cb_ = base::Bind(&ReleaseData, data);
COMPILE_ASSERT(0 == VideoFrame::kYPlane, y_plane_data_must_be_index_0);
data_[VideoFrame::kYPlane] = data;
diff --git a/media/base/video_frame.h b/media/base/video_frame.h
index 0696a554c8..2445dd1b40 100644
--- a/media/base/video_frame.h
+++ b/media/base/video_frame.h
@@ -114,6 +114,7 @@ class MEDIA_EXPORT VideoFrame : public base::RefCountedThreadSafe<VideoFrame> {
// and plane count as given by |format|. The shared memory handle of the
// backing allocation, if present, can be passed in with |handle|. When the
// frame is destroyed, |no_longer_needed_cb.Run()| will be called.
+ // Returns NULL on failure.
static scoped_refptr<VideoFrame> WrapExternalPackedMemory(
Format format,
const gfx::Size& coded_size,
@@ -136,6 +137,7 @@ class MEDIA_EXPORT VideoFrame : public base::RefCountedThreadSafe<VideoFrame> {
// directly to a hardware device and/or to another process, or can also be
// mapped via mmap() for CPU access.
// When the frame is destroyed, |no_longer_needed_cb.Run()| will be called.
+ // Returns NULL on failure.
static scoped_refptr<VideoFrame> WrapExternalDmabufs(
Format format,
const gfx::Size& coded_size,
@@ -187,6 +189,11 @@ class MEDIA_EXPORT VideoFrame : public base::RefCountedThreadSafe<VideoFrame> {
// equivalent of RGB(0,0,0).
static scoped_refptr<VideoFrame> CreateBlackFrame(const gfx::Size& size);
+ // Allocates YV12A frame based on |size|, and sets its data to the YUVA
+ // equivalent of RGBA(0,0,0,0).
+ static scoped_refptr<VideoFrame> CreateTransparentFrame(
+ const gfx::Size& size);
+
#if defined(VIDEO_HOLE)
// Allocates a hole frame.
static scoped_refptr<VideoFrame> CreateHoleFrame(const gfx::Size& size);
diff --git a/media/base/video_frame_unittest.cc b/media/base/video_frame_unittest.cc
index 618d68f458..199e1bf96e 100644
--- a/media/base/video_frame_unittest.cc
+++ b/media/base/video_frame_unittest.cc
@@ -308,4 +308,17 @@ TEST(VideoFrame, TextureNoLongerNeededCallbackAfterTakingAndReleasingMailbox) {
EXPECT_EQ(release_sync_points, called_sync_points);
}
+TEST(VideoFrame, ZeroInitialized) {
+ const int kWidth = 64;
+ const int kHeight = 48;
+ const base::TimeDelta kTimestamp = base::TimeDelta::FromMicroseconds(1337);
+
+ gfx::Size size(kWidth, kHeight);
+ scoped_refptr<media::VideoFrame> frame = VideoFrame::CreateFrame(
+ media::VideoFrame::YV12, size, gfx::Rect(size), size, kTimestamp);
+
+ for (size_t i = 0; i < VideoFrame::NumPlanes(frame->format()); ++i)
+ EXPECT_EQ(0, frame->data(i)[0]);
+}
+
} // namespace media
diff --git a/media/base/video_renderer.h b/media/base/video_renderer.h
index b4154a0410..2e36c7f56d 100644
--- a/media/base/video_renderer.h
+++ b/media/base/video_renderer.h
@@ -8,6 +8,7 @@
#include "base/callback.h"
#include "base/memory/ref_counted.h"
#include "base/time/time.h"
+#include "media/base/buffering_state.h"
#include "media/base/media_export.h"
#include "media/base/pipeline_status.h"
@@ -36,6 +37,9 @@ class MEDIA_EXPORT VideoRenderer {
//
// |time_cb| is executed whenever time has advanced by way of video rendering.
//
+ // |buffering_state_cb| is executed when video rendering has either run out of
+ // data or has enough data to continue playback.
+ //
// |ended_cb| is executed when video rendering has reached the end of stream.
//
// |error_cb| is executed if an error was encountered.
@@ -48,36 +52,29 @@ class MEDIA_EXPORT VideoRenderer {
const PipelineStatusCB& init_cb,
const StatisticsCB& statistics_cb,
const TimeCB& time_cb,
+ const BufferingStateCB& buffering_state_cb,
const base::Closure& ended_cb,
const PipelineStatusCB& error_cb,
const TimeDeltaCB& get_time_cb,
const TimeDeltaCB& get_duration_cb) = 0;
- // Start audio decoding and rendering at the current playback rate, executing
- // |callback| when playback is underway.
- virtual void Play(const base::Closure& callback) = 0;
-
// Discard any video data and stop reading from |stream|, executing |callback|
// when completed.
+ //
+ // Clients should expect |buffering_state_cb| to be called with
+ // BUFFERING_HAVE_NOTHING while flushing is in progress.
virtual void Flush(const base::Closure& callback) = 0;
- // Start prerolling video data. If |time| equals kNoTimestamp() then all
- // samples delivered to the renderer are used to complete preroll. If |time|
- // does not equal kNoTimestamp(), then any samples delivered to the renderer
- // with timestamps less than |time| are silently dropped and not used to
- // satisfy preroll. |callback| is executed when preroll has completed.
+ // Starts playback by reading from |stream| and decoding and rendering video.
+ // |timestamp| is the media timestamp playback should start rendering from.
//
- // Only valid to call after a successful Initialize(), Pause(), or Flush().
- virtual void Preroll(base::TimeDelta time,
- const PipelineStatusCB& callback) = 0;
+ // Only valid to call after a successful Initialize() or Flush().
+ virtual void StartPlayingFrom(base::TimeDelta timestamp) = 0;
// Stop all operations in preparation for being deleted, executing |callback|
// when complete.
virtual void Stop(const base::Closure& callback) = 0;
- // Updates the current playback rate.
- virtual void SetPlaybackRate(float playback_rate) = 0;
-
private:
DISALLOW_COPY_AND_ASSIGN(VideoRenderer);
};
diff --git a/media/base/video_rotation.h b/media/base/video_rotation.h
new file mode 100644
index 0000000000..ad8e57bc43
--- /dev/null
+++ b/media/base/video_rotation.h
@@ -0,0 +1,22 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_BASE_VIDEO_ROTATION_H_
+#define MEDIA_BASE_VIDEO_ROTATION_H_
+
+namespace media {
+
+// Enumeration to represent 90 degree video rotation for MP4 videos
+// where it can be rotated by 90 degree intervals.
+enum VideoRotation {
+ VIDEO_ROTATION_0 = 0,
+ VIDEO_ROTATION_90,
+ VIDEO_ROTATION_180,
+ VIDEO_ROTATION_270,
+ VIDEO_ROTATION_MAX = VIDEO_ROTATION_270
+};
+
+} // namespace media
+
+#endif // MEDIA_BASE_VIDEO_ROTATION_H_
diff --git a/media/base/video_util.cc b/media/base/video_util.cc
index 09f37b8716..d7946743ea 100644
--- a/media/base/video_util.cc
+++ b/media/base/video_util.cc
@@ -96,6 +96,20 @@ void FillYUV(VideoFrame* frame, uint8 y, uint8 u, uint8 v) {
}
}
+void FillYUVA(VideoFrame* frame, uint8 y, uint8 u, uint8 v, uint8 a) {
+ // Fill Y, U and V planes.
+ FillYUV(frame, y, u, v);
+
+ // Fill the A plane.
+ uint8* a_plane = frame->data(VideoFrame::kAPlane);
+ int a_rows = frame->rows(VideoFrame::kAPlane);
+ int a_row_bytes = frame->row_bytes(VideoFrame::kAPlane);
+ for (int i = 0; i < a_rows; ++i) {
+ memset(a_plane, a, a_row_bytes);
+ a_plane += frame->stride(VideoFrame::kAPlane);
+ }
+}
+
static void LetterboxPlane(VideoFrame* frame,
int plane,
const gfx::Rect& view_area,
diff --git a/media/base/video_util.h b/media/base/video_util.h
index 5788015dab..702e620dc7 100644
--- a/media/base/video_util.h
+++ b/media/base/video_util.h
@@ -44,6 +44,13 @@ MEDIA_EXPORT void CopyPlane(size_t plane, const uint8* source, int stride,
// Fills |frame| containing YUV data to the given color values.
MEDIA_EXPORT void FillYUV(VideoFrame* frame, uint8 y, uint8 u, uint8 v);
+// Fills |frame| containing YUVA data with the given color values.
+MEDIA_EXPORT void FillYUVA(VideoFrame* frame,
+ uint8 y,
+ uint8 u,
+ uint8 v,
+ uint8 a);
+
// Creates a border in |frame| such that all pixels outside of
// |view_area| are black. The size and position of |view_area|
// must be even to align correctly with the color planes.
diff --git a/media/cast/DEPS b/media/cast/DEPS
index abee2864b3..e44afe05ab 100644
--- a/media/cast/DEPS
+++ b/media/cast/DEPS
@@ -3,6 +3,7 @@ include_rules = [
"+media",
"+net",
"+third_party/libyuv",
+ "+third_party/mt19937ar",
"+third_party/zlib",
"+ui/gfx",
]
diff --git a/media/cast/README b/media/cast/README
deleted file mode 100644
index 0930c1e012..0000000000
--- a/media/cast/README
+++ /dev/null
@@ -1,61 +0,0 @@
-This directory contains a RTP/RTCP library used for the Cast mirroring
-protocol. This library is specifically built for low latency purposes and
-enables Chrome to send real-time video and audio streams.
-
-CONTENTS
-
-cast/
- Build rules and top level source files and headers.
-
-cast/audio_receiver/
- Module for receiving and decodes audio RTP stream.
-
-cast/audio_sender/
- Module for encoding and sending audio RTP stream.
-
-cast/congestion_control/
- Bandwidth estimation and network congestion handling.
-
-cast/transport/pacing/
- Module for rate limiting data outflow.
-
-cast/rtcp/
- Module for handling RTCP messages.
-
-cast/rtp_common/
- Module for common code used for RTP messages.
-
-cast/rtp_receiver/
- Module for reciving RTP messages.
-
-cast/transport/rtp_sender/
- Module for sending RTP messages.
-
-cast/test/
- Module for test applications.
-
-cast/video_receiver/
- Module for receiving and decodes video RTP stream.
-
-cast/video_sender/
- Module for encoding and sending video RTP stream.
-
-DEPENDENCIES
-
-Content of this directory should only depend on:
-
-base/
- Provides base libraries and platform independent layer.
-
-net/
- Provides network capabilities.
-
-third_party/libvpx
- Provides video encoder.
-
-third_party/opus
- Provides audio encoder.
-
-OWNERS
-
-See OWNERS for ownership.
diff --git a/media/cast/cast.gyp b/media/cast/cast.gyp
index 5de8796079..fb1ee64e6c 100644
--- a/media/cast/cast.gyp
+++ b/media/cast/cast.gyp
@@ -22,6 +22,7 @@
'dependencies': [
'cast_logging_proto',
'<(DEPTH)/base/base.gyp:base',
+ '<(DEPTH)/crypto/crypto.gyp:crypto',
'<(DEPTH)/net/net.gyp:net',
],
'export_dependent_settings': [
@@ -33,8 +34,10 @@
'cast_defines.h',
'cast_environment.cc',
'cast_environment.h',
- 'base/clock_drift_smoother.cc',
- 'base/clock_drift_smoother.h',
+ 'common/clock_drift_smoother.cc',
+ 'common/clock_drift_smoother.h',
+ 'common/transport_encryption_handler.cc',
+ 'common/transport_encryption_handler.h',
'logging/encoding_event_subscriber.cc',
'logging/encoding_event_subscriber.h',
'logging/log_deserializer.cc',
@@ -57,15 +60,10 @@
'logging/simple_event_subscriber.h',
'logging/stats_event_subscriber.cc',
'logging/stats_event_subscriber.h',
- 'rtp_timestamp_helper.cc',
- 'rtp_timestamp_helper.h',
- 'transport/cast_transport_config.cc',
- 'transport/cast_transport_config.h',
- 'transport/cast_transport_defines.h',
- 'transport/cast_transport_sender.h',
], # source
},
{
+ # GN version: //media/cast/logging/proto
'target_name': 'cast_logging_proto',
'type': 'static_library',
'include_dirs': [
@@ -89,8 +87,7 @@
],
'dependencies': [
'cast_base',
- 'cast_rtcp',
- 'cast_transport',
+ 'cast_net',
'<(DEPTH)/base/base.gyp:base',
'<(DEPTH)/media/media.gyp:media',
'<(DEPTH)/media/media.gyp:shared_memory_support',
@@ -100,14 +97,6 @@
],
'sources': [
'cast_receiver.h',
- 'framer/cast_message_builder.cc',
- 'framer/cast_message_builder.h',
- 'framer/frame_buffer.cc',
- 'framer/frame_buffer.h',
- 'framer/frame_id_map.cc',
- 'framer/frame_id_map.h',
- 'framer/framer.cc',
- 'framer/framer.h',
'receiver/audio_decoder.cc',
'receiver/audio_decoder.h',
'receiver/cast_receiver_impl.cc',
@@ -116,39 +105,20 @@
'receiver/frame_receiver.h',
'receiver/video_decoder.cc',
'receiver/video_decoder.h',
- 'rtp_receiver/receiver_stats.cc',
- 'rtp_receiver/receiver_stats.h',
- 'rtp_receiver/rtp_receiver_defines.cc',
- 'rtp_receiver/rtp_receiver_defines.h',
- 'rtp_receiver/rtp_parser/rtp_parser.cc',
- 'rtp_receiver/rtp_parser/rtp_parser.h',
- ], # source
- },
- {
- 'target_name': 'cast_rtcp',
- 'type': 'static_library',
- 'include_dirs': [
- '<(DEPTH)/',
- ],
- 'dependencies': [
- 'cast_base',
- 'cast_transport',
- '<(DEPTH)/base/base.gyp:base',
- '<(DEPTH)/net/net.gyp:net',
- ],
- 'sources': [
- 'rtcp/rtcp_defines.cc',
- 'rtcp/rtcp_defines.h',
- 'rtcp/rtcp.h',
- 'rtcp/rtcp.cc',
- 'rtcp/rtcp_receiver.cc',
- 'rtcp/rtcp_receiver.h',
- 'rtcp/rtcp_sender.cc',
- 'rtcp/rtcp_sender.h',
- 'rtcp/rtcp_utility.cc',
- 'rtcp/rtcp_utility.h',
- 'rtcp/receiver_rtcp_event_subscriber.cc',
- 'rtcp/receiver_rtcp_event_subscriber.cc',
+ 'net/rtp/cast_message_builder.cc',
+ 'net/rtp/cast_message_builder.h',
+ 'net/rtp/frame_buffer.cc',
+ 'net/rtp/frame_buffer.h',
+ 'net/rtp/frame_id_map.cc',
+ 'net/rtp/frame_id_map.h',
+ 'net/rtp/framer.cc',
+ 'net/rtp/framer.h',
+ 'net/rtp/receiver_stats.cc',
+ 'net/rtp/receiver_stats.h',
+ 'net/rtp/rtp_parser.cc',
+ 'net/rtp/rtp_parser.h',
+ 'net/rtp/rtp_receiver_defines.cc',
+ 'net/rtp/rtp_receiver_defines.h',
], # source
},
{
@@ -159,39 +129,40 @@
],
'dependencies': [
'cast_base',
- 'cast_rtcp',
- 'cast_transport',
+ 'cast_net',
'<(DEPTH)/media/media.gyp:media',
'<(DEPTH)/media/media.gyp:shared_memory_support',
'<(DEPTH)/third_party/opus/opus.gyp:opus',
'<(DEPTH)/third_party/libvpx/libvpx.gyp:libvpx',
], # dependencies
'sources': [
- 'audio_sender/audio_encoder.h',
- 'audio_sender/audio_encoder.cc',
- 'audio_sender/audio_sender.h',
- 'audio_sender/audio_sender.cc',
'cast_sender.h',
'cast_sender_impl.cc',
'cast_sender_impl.h',
- 'congestion_control/congestion_control.h',
- 'congestion_control/congestion_control.cc',
- 'video_sender/codecs/vp8/vp8_encoder.cc',
- 'video_sender/codecs/vp8/vp8_encoder.h',
- 'video_sender/external_video_encoder.h',
- 'video_sender/external_video_encoder.cc',
- 'video_sender/fake_software_video_encoder.h',
- 'video_sender/fake_software_video_encoder.cc',
- 'video_sender/software_video_encoder.h',
- 'video_sender/video_encoder.h',
- 'video_sender/video_encoder_impl.h',
- 'video_sender/video_encoder_impl.cc',
- 'video_sender/video_sender.h',
- 'video_sender/video_sender.cc',
+ 'sender/audio_encoder.h',
+ 'sender/audio_encoder.cc',
+ 'sender/audio_sender.h',
+ 'sender/audio_sender.cc',
+ 'sender/congestion_control.h',
+ 'sender/congestion_control.cc',
+ 'sender/external_video_encoder.h',
+ 'sender/external_video_encoder.cc',
+ 'sender/fake_software_video_encoder.h',
+ 'sender/fake_software_video_encoder.cc',
+ 'sender/rtp_timestamp_helper.cc',
+ 'sender/rtp_timestamp_helper.h',
+ 'sender/software_video_encoder.h',
+ 'sender/video_encoder.h',
+ 'sender/video_encoder_impl.h',
+ 'sender/video_encoder_impl.cc',
+ 'sender/video_sender.h',
+ 'sender/video_sender.cc',
+ 'sender/vp8_encoder.cc',
+ 'sender/vp8_encoder.h',
], # source
},
{
- 'target_name': 'cast_transport',
+ 'target_name': 'cast_net',
'type': 'static_library',
'include_dirs': [
'<(DEPTH)/',
@@ -199,26 +170,38 @@
'dependencies': [
'cast_base',
'<(DEPTH)/base/base.gyp:base',
- '<(DEPTH)/crypto/crypto.gyp:crypto',
'<(DEPTH)/net/net.gyp:net',
],
'sources': [
- 'transport/cast_transport_sender_impl.cc',
- 'transport/cast_transport_sender_impl.h',
- 'transport/pacing/paced_sender.cc',
- 'transport/pacing/paced_sender.h',
- 'transport/rtcp/rtcp_builder.cc',
- 'transport/rtcp/rtcp_builder.h',
- 'transport/rtp_sender/packet_storage/packet_storage.cc',
- 'transport/rtp_sender/packet_storage/packet_storage.h',
- 'transport/rtp_sender/rtp_packetizer/rtp_packetizer.cc',
- 'transport/rtp_sender/rtp_packetizer/rtp_packetizer.h',
- 'transport/rtp_sender/rtp_sender.cc',
- 'transport/rtp_sender/rtp_sender.h',
- 'transport/transport/udp_transport.cc',
- 'transport/transport/udp_transport.h',
- 'transport/utility/transport_encryption_handler.cc',
- 'transport/utility/transport_encryption_handler.h',
+ 'net/cast_transport_config.cc',
+ 'net/cast_transport_config.h',
+ 'net/cast_transport_defines.h',
+ 'net/cast_transport_sender.h',
+ 'net/cast_transport_sender_impl.cc',
+ 'net/cast_transport_sender_impl.h',
+ 'net/pacing/paced_sender.cc',
+ 'net/pacing/paced_sender.h',
+ 'net/rtcp/receiver_rtcp_event_subscriber.cc',
+ 'net/rtcp/rtcp_builder.cc',
+ 'net/rtcp/rtcp_builder.h',
+ 'net/rtcp/rtcp_defines.cc',
+ 'net/rtcp/rtcp_defines.h',
+ 'net/rtcp/rtcp.h',
+ 'net/rtcp/rtcp.cc',
+ 'net/rtcp/rtcp_receiver.cc',
+ 'net/rtcp/rtcp_receiver.h',
+ 'net/rtcp/rtcp_sender.cc',
+ 'net/rtcp/rtcp_sender.h',
+ 'net/rtcp/rtcp_utility.cc',
+ 'net/rtcp/rtcp_utility.h',
+ 'net/rtp/packet_storage.cc',
+ 'net/rtp/packet_storage.h',
+ 'net/rtp/rtp_packetizer.cc',
+ 'net/rtp/rtp_packetizer.h',
+ 'net/rtp/rtp_sender.cc',
+ 'net/rtp/rtp_sender.h',
+ 'net/udp_transport.cc',
+ 'net/udp_transport.h',
], # source
},
],
diff --git a/media/cast/cast_config.cc b/media/cast/cast_config.cc
index 0e7953af01..f74121632c 100644
--- a/media/cast/cast_config.cc
+++ b/media/cast/cast_config.cc
@@ -22,9 +22,13 @@ namespace cast {
// these classes to centralize the logic?
VideoSenderConfig::VideoSenderConfig()
- : incoming_feedback_ssrc(0),
+ : ssrc(0),
+ incoming_feedback_ssrc(0),
rtcp_interval(kDefaultRtcpIntervalMs),
rtcp_mode(kRtcpReducedSize),
+ target_playout_delay(
+ base::TimeDelta::FromMilliseconds(kDefaultRtpMaxDelayMs)),
+ rtp_payload_type(0),
use_external_encoder(false),
width(0),
height(0),
@@ -36,17 +40,26 @@ VideoSenderConfig::VideoSenderConfig()
min_qp(kDefaultMinQp),
max_frame_rate(kDefaultMaxFrameRate),
max_number_of_video_buffers_used(kDefaultNumberOfVideoBuffers),
- codec(transport::kVp8),
+ codec(CODEC_VIDEO_VP8),
number_of_encode_threads(1) {}
+VideoSenderConfig::~VideoSenderConfig() {}
+
AudioSenderConfig::AudioSenderConfig()
- : incoming_feedback_ssrc(0),
+ : ssrc(0),
+ incoming_feedback_ssrc(0),
rtcp_interval(kDefaultRtcpIntervalMs),
rtcp_mode(kRtcpReducedSize),
+ target_playout_delay(
+ base::TimeDelta::FromMilliseconds(kDefaultRtpMaxDelayMs)),
+ rtp_payload_type(0),
use_external_encoder(false),
frequency(0),
channels(0),
- bitrate(0) {}
+ bitrate(0),
+ codec(CODEC_AUDIO_OPUS) {}
+
+AudioSenderConfig::~AudioSenderConfig() {}
FrameReceiverConfig::FrameReceiverConfig()
: feedback_ssrc(0),
@@ -57,7 +70,8 @@ FrameReceiverConfig::FrameReceiverConfig()
rtp_payload_type(0),
frequency(0),
channels(0),
- max_frame_rate(0) {}
+ max_frame_rate(0),
+ codec(CODEC_UNKNOWN) {}
FrameReceiverConfig::~FrameReceiverConfig() {}
diff --git a/media/cast/cast_config.h b/media/cast/cast_config.h
index ea25d6b6cf..c2e797b63a 100644
--- a/media/cast/cast_config.h
+++ b/media/cast/cast_config.h
@@ -14,51 +14,79 @@
#include "base/memory/ref_counted.h"
#include "base/memory/shared_memory.h"
#include "base/single_thread_task_runner.h"
+#include "base/time/time.h"
#include "media/cast/cast_defines.h"
-#include "media/cast/transport/cast_transport_config.h"
+#include "media/cast/net/cast_transport_config.h"
namespace media {
class VideoEncodeAccelerator;
namespace cast {
-enum RtcpMode {
- kRtcpCompound, // Compound RTCP mode is described by RFC 4585.
- kRtcpReducedSize, // Reduced-size RTCP mode is described by RFC 5506.
-};
-
// TODO(miu): Merge AudioSenderConfig and VideoSenderConfig and make their
// naming/documentation consistent with FrameReceiverConfig.
struct AudioSenderConfig {
AudioSenderConfig();
+ ~AudioSenderConfig();
+
+ // Identifier referring to the sender, used by the receiver.
+ uint32 ssrc;
- // The sender ssrc is in rtp_config.ssrc.
+ // The receiver's SSRC identifier.
uint32 incoming_feedback_ssrc;
int rtcp_interval;
std::string rtcp_c_name;
RtcpMode rtcp_mode;
- transport::RtpConfig rtp_config;
+ // The total amount of time between a frame's capture/recording on the sender
+ // and its playback on the receiver (i.e., shown to a user). This is fixed as
+ // a value large enough to give the system sufficient time to encode,
+ // transmit/retransmit, receive, decode, and render; given its run-time
+ // environment (sender/receiver hardware performance, network conditions,
+ // etc.).
+ base::TimeDelta target_playout_delay;
+
+ // RTP payload type enum: Specifies the type/encoding of frame data.
+ int rtp_payload_type;
bool use_external_encoder;
int frequency;
int channels;
int bitrate; // Set to <= 0 for "auto variable bitrate" (libopus knows best).
- transport::AudioCodec codec;
+ Codec codec;
+
+ // The AES crypto key and initialization vector. Each of these strings
+ // contains the data in binary form, of size kAesKeySize. If they are empty
+ // strings, crypto is not being used.
+ std::string aes_key;
+ std::string aes_iv_mask;
};
struct VideoSenderConfig {
VideoSenderConfig();
+ ~VideoSenderConfig();
- // The sender ssrc is in rtp_config.ssrc.
- uint32 incoming_feedback_ssrc;
+ // Identifier referring to the sender, used by the receiver.
+ uint32 ssrc;
+
+ // The receiver's SSRC identifier.
+ uint32 incoming_feedback_ssrc; // TODO(miu): Rename to receiver_ssrc.
int rtcp_interval;
std::string rtcp_c_name;
RtcpMode rtcp_mode;
- transport::RtpConfig rtp_config;
+ // The total amount of time between a frame's capture/recording on the sender
+ // and its playback on the receiver (i.e., shown to a user). This is fixed as
+ // a value large enough to give the system sufficient time to encode,
+ // transmit/retransmit, receive, decode, and render; given its run-time
+ // environment (sender/receiver hardware performance, network conditions,
+ // etc.).
+ base::TimeDelta target_playout_delay;
+
+ // RTP payload type enum: Specifies the type/encoding of frame data.
+ int rtp_payload_type;
bool use_external_encoder;
int width; // Incoming frames will be scaled to this size.
@@ -72,8 +100,14 @@ struct VideoSenderConfig {
int min_qp;
int max_frame_rate;
int max_number_of_video_buffers_used; // Max value depend on codec.
- transport::VideoCodec codec;
+ Codec codec;
int number_of_encode_threads;
+
+ // The AES crypto key and initialization vector. Each of these strings
+ // contains the data in binary form, of size kAesKeySize. If they are empty
+ // strings, crypto is not being used.
+ std::string aes_key;
+ std::string aes_iv_mask;
};
// TODO(miu): Naming and minor type changes are badly needed in a later CL.
@@ -127,11 +161,7 @@ struct FrameReceiverConfig {
// Codec used for the compression of signal data.
// TODO(miu): Merge the AudioCodec and VideoCodec enums into one so this union
// is not necessary.
- union MergedCodecPlaceholder {
- transport::AudioCodec audio;
- transport::VideoCodec video;
- MergedCodecPlaceholder() : audio(transport::kUnknownAudioCodec) {}
- } codec;
+ Codec codec;
// The AES crypto key and initialization vector. Each of these strings
// contains the data in binary form, of size kAesKeySize. If they are empty
@@ -140,9 +170,10 @@ struct FrameReceiverConfig {
std::string aes_iv_mask;
};
-// import from media::cast::transport
-typedef transport::Packet Packet;
-typedef transport::PacketList PacketList;
+// Import from media::cast.
+
+typedef Packet Packet;
+typedef PacketList PacketList;
typedef base::Callback<void(CastInitializationStatus)>
CastInitializationCallback;
diff --git a/media/cast/cast_defines.h b/media/cast/cast_defines.h
index 64b20c96da..07be3b3f9e 100644
--- a/media/cast/cast_defines.h
+++ b/media/cast/cast_defines.h
@@ -14,7 +14,7 @@
#include "base/compiler_specific.h"
#include "base/logging.h"
#include "base/time/time.h"
-#include "media/cast/transport/cast_transport_config.h"
+#include "media/cast/net/cast_transport_config.h"
namespace media {
namespace cast {
@@ -29,7 +29,6 @@ const uint32 kStartFrameId = UINT32_C(0xffffffff);
// frames.
const int kMaxUnackedFrames = 255;
-const size_t kMaxIpPacketSize = 1500;
const int kStartRttMs = 20;
const int64 kCastMessageUpdateIntervalMs = 33;
const int64 kNackRepeatIntervalMs = 30;
@@ -193,13 +192,6 @@ inline base::TimeDelta RtpDeltaToTimeDelta(int64 rtp_delta, int rtp_timebase) {
return rtp_delta * base::TimeDelta::FromSeconds(1) / rtp_timebase;
}
-inline uint32 GetVideoRtpTimestamp(const base::TimeTicks& time_ticks) {
- base::TimeTicks zero_time;
- base::TimeDelta recorded_delta = time_ticks - zero_time;
- // Timestamp is in 90 KHz for video.
- return static_cast<uint32>(recorded_delta.InMilliseconds() * 90);
-}
-
} // namespace cast
} // namespace media
diff --git a/media/cast/cast_receiver.h b/media/cast/cast_receiver.h
index a9d3edeb78..f57942231d 100644
--- a/media/cast/cast_receiver.h
+++ b/media/cast/cast_receiver.h
@@ -22,9 +22,7 @@ class VideoFrame;
namespace cast {
-namespace transport {
class PacketSender;
-}
// The following callbacks are used to deliver decoded audio/video frame data,
// the frame's corresponding play-out time, and a continuity flag.
@@ -44,7 +42,7 @@ typedef base::Callback<void(const scoped_refptr<media::VideoFrame>& video_frame,
// should examine the |frame_id| field to determine whether any frames have been
// dropped (i.e., frame_id should be incrementing by one each time). Note: A
// NULL pointer can be returned on error.
-typedef base::Callback<void(scoped_ptr<transport::EncodedFrame>)>
+typedef base::Callback<void(scoped_ptr<EncodedFrame>)>
ReceiveEncodedFrameCallback;
class CastReceiver {
@@ -53,13 +51,13 @@ class CastReceiver {
scoped_refptr<CastEnvironment> cast_environment,
const FrameReceiverConfig& audio_config,
const FrameReceiverConfig& video_config,
- transport::PacketSender* const packet_sender);
+ PacketSender* const packet_sender);
// All received RTP and RTCP packets for the call should be sent to this
// PacketReceiver. Can be called from any thread.
// TODO(hubbe): Replace with:
// virtual void ReceivePacket(scoped_ptr<Packet> packet) = 0;
- virtual transport::PacketReceiverCallback packet_receiver() = 0;
+ virtual PacketReceiverCallback packet_receiver() = 0;
// Polling interface to get audio and video frames from the CastReceiver. The
// the RequestDecodedXXXXXFrame() methods utilize internal software-based
diff --git a/media/cast/cast_sender.h b/media/cast/cast_sender.h
index eb3327ff3d..abe0a01713 100644
--- a/media/cast/cast_sender.h
+++ b/media/cast/cast_sender.h
@@ -19,7 +19,7 @@
#include "media/base/audio_bus.h"
#include "media/cast/cast_config.h"
#include "media/cast/cast_environment.h"
-#include "media/cast/transport/cast_transport_sender.h"
+#include "media/cast/net/cast_transport_sender.h"
namespace media {
class VideoFrame;
@@ -63,7 +63,7 @@ class CastSender {
public:
static scoped_ptr<CastSender> Create(
scoped_refptr<CastEnvironment> cast_environment,
- transport::CastTransportSender* const transport_sender);
+ CastTransportSender* const transport_sender);
virtual ~CastSender() {}
@@ -75,7 +75,7 @@ class CastSender {
// All RTCP packets for the session should be inserted to this object.
// This function and the callback must be called on the main thread.
- virtual transport::PacketReceiverCallback packet_receiver() = 0;
+ virtual PacketReceiverCallback packet_receiver() = 0;
// Initialize the audio stack. Must be called in order to send audio frames.
// Status of the initialization will be returned on cast_initialization_cb.
diff --git a/media/cast/cast_sender_impl.cc b/media/cast/cast_sender_impl.cc
index 361e4d8dc1..a120d3a930 100644
--- a/media/cast/cast_sender_impl.cc
+++ b/media/cast/cast_sender_impl.cc
@@ -1,6 +1,7 @@
// Copyright 2013 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+
#include "media/cast/cast_sender_impl.h"
#include "base/bind.h"
@@ -75,7 +76,7 @@ class LocalAudioFrameInput : public AudioFrameInput {
scoped_ptr<CastSender> CastSender::Create(
scoped_refptr<CastEnvironment> cast_environment,
- transport::CastTransportSender* const transport_sender) {
+ CastTransportSender* const transport_sender) {
CHECK(cast_environment);
return scoped_ptr<CastSender>(
new CastSenderImpl(cast_environment, transport_sender));
@@ -83,7 +84,7 @@ scoped_ptr<CastSender> CastSender::Create(
CastSenderImpl::CastSenderImpl(
scoped_refptr<CastEnvironment> cast_environment,
- transport::CastTransportSender* const transport_sender)
+ CastTransportSender* const transport_sender)
: cast_environment_(cast_environment),
transport_sender_(transport_sender),
weak_factory_(this) {
@@ -211,7 +212,7 @@ scoped_refptr<VideoFrameInput> CastSenderImpl::video_frame_input() {
return video_frame_input_;
}
-transport::PacketReceiverCallback CastSenderImpl::packet_receiver() {
+PacketReceiverCallback CastSenderImpl::packet_receiver() {
return base::Bind(&CastSenderImpl::ReceivedPacket,
weak_factory_.GetWeakPtr());
}
diff --git a/media/cast/cast_sender_impl.h b/media/cast/cast_sender_impl.h
index d09a869712..bc320f7b54 100644
--- a/media/cast/cast_sender_impl.h
+++ b/media/cast/cast_sender_impl.h
@@ -6,12 +6,12 @@
#include "base/memory/ref_counted.h"
#include "base/memory/scoped_ptr.h"
-#include "media/cast/audio_sender/audio_sender.h"
#include "media/cast/cast_config.h"
#include "media/cast/cast_defines.h"
#include "media/cast/cast_environment.h"
#include "media/cast/cast_sender.h"
-#include "media/cast/video_sender/video_sender.h"
+#include "media/cast/sender/audio_sender.h"
+#include "media/cast/sender/video_sender.h"
namespace media {
class VideoFrame;
@@ -25,7 +25,7 @@ class VideoSender;
class CastSenderImpl : public CastSender {
public:
CastSenderImpl(scoped_refptr<CastEnvironment> cast_environment,
- transport::CastTransportSender* const transport_sender);
+ CastTransportSender* const transport_sender);
virtual void InitializeAudio(
const AudioSenderConfig& audio_config,
@@ -42,7 +42,7 @@ class CastSenderImpl : public CastSender {
virtual scoped_refptr<AudioFrameInput> audio_frame_input() OVERRIDE;
virtual scoped_refptr<VideoFrameInput> video_frame_input() OVERRIDE;
- virtual transport::PacketReceiverCallback packet_receiver() OVERRIDE;
+ virtual PacketReceiverCallback packet_receiver() OVERRIDE;
private:
void ReceivedPacket(scoped_ptr<Packet> packet);
@@ -55,7 +55,7 @@ class CastSenderImpl : public CastSender {
scoped_refptr<CastEnvironment> cast_environment_;
// The transport sender is owned by the owner of the CastSender, and should be
// valid throughout the lifetime of the CastSender.
- transport::CastTransportSender* const transport_sender_;
+ CastTransportSender* const transport_sender_;
uint32 ssrc_of_audio_sender_;
uint32 ssrc_of_video_sender_;
diff --git a/media/cast/cast_testing.gypi b/media/cast/cast_testing.gypi
index aef0fbd8c3..97e831e42b 100644
--- a/media/cast/cast_testing.gypi
+++ b/media/cast/cast_testing.gypi
@@ -11,19 +11,25 @@
'<(DEPTH)/',
],
'dependencies': [
+ 'cast_net',
'cast_receiver',
- 'cast_transport',
'<(DEPTH)/testing/gtest.gyp:gtest',
+ '<(DEPTH)/third_party/ffmpeg/ffmpeg.gyp:ffmpeg',
'<(DEPTH)/third_party/libyuv/libyuv.gyp:libyuv',
+ '<(DEPTH)/third_party/mt19937ar/mt19937ar.gyp:mt19937ar',
'<(DEPTH)/ui/gfx/gfx.gyp:gfx_geometry',
],
'sources': [
+ 'test/fake_media_source.cc',
+ 'test/fake_media_source.h',
'test/fake_single_thread_task_runner.cc',
'test/fake_single_thread_task_runner.h',
'test/skewed_single_thread_task_runner.cc',
'test/skewed_single_thread_task_runner.h',
'test/skewed_tick_clock.cc',
'test/skewed_tick_clock.h',
+ 'test/loopback_transport.cc',
+ 'test/loopback_transport.h',
'test/utility/audio_utility.cc',
'test/utility/audio_utility.h',
'test/utility/barcode.cc',
@@ -52,14 +58,13 @@
],
'dependencies': [
'cast_base',
+ 'cast_net',
'cast_receiver',
- 'cast_rtcp',
'cast_sender',
'cast_test_utility',
# Not a true dependency. This is here to make sure the CQ can verify
# the tools compile correctly.
'cast_tools',
- 'cast_transport',
'<(DEPTH)/base/base.gyp:test_support_base',
'<(DEPTH)/net/net.gyp:net',
'<(DEPTH)/testing/gmock.gyp:gmock',
@@ -67,12 +72,6 @@
],
'sources': [
'<(DEPTH)/media/base/run_all_unittests.cc',
- 'audio_sender/audio_encoder_unittest.cc',
- 'audio_sender/audio_sender_unittest.cc',
- 'congestion_control/congestion_control_unittest.cc',
- 'framer/cast_message_builder_unittest.cc',
- 'framer/frame_buffer_unittest.cc',
- 'framer/framer_unittest.cc',
'logging/encoding_event_subscriber_unittest.cc',
'logging/serialize_deserialize_test.cc',
'logging/logging_impl_unittest.cc',
@@ -80,26 +79,44 @@
'logging/receiver_time_offset_estimator_impl_unittest.cc',
'logging/simple_event_subscriber_unittest.cc',
'logging/stats_event_subscriber_unittest.cc',
+ 'net/cast_transport_sender_impl_unittest.cc',
+ 'net/pacing/mock_paced_packet_sender.cc',
+ 'net/pacing/mock_paced_packet_sender.h',
+ 'net/pacing/paced_sender_unittest.cc',
+ 'net/rtcp/mock_rtcp_receiver_feedback.cc',
+ 'net/rtcp/mock_rtcp_receiver_feedback.h',
+ 'net/rtcp/mock_rtcp_sender_feedback.cc',
+ 'net/rtcp/mock_rtcp_sender_feedback.h',
+ 'net/rtcp/rtcp_receiver_unittest.cc',
+ 'net/rtcp/rtcp_sender_unittest.cc',
+ 'net/rtcp/rtcp_unittest.cc',
+ 'net/rtcp/receiver_rtcp_event_subscriber_unittest.cc',
+# TODO(miu): The following two are test utility modules. Rename/move the files.
+ 'net/rtcp/test_rtcp_packet_builder.cc',
+ 'net/rtcp/test_rtcp_packet_builder.h',
+ 'net/rtp/cast_message_builder_unittest.cc',
+ 'net/rtp/frame_buffer_unittest.cc',
+ 'net/rtp/framer_unittest.cc',
+ 'net/rtp/mock_rtp_payload_feedback.cc',
+ 'net/rtp/mock_rtp_payload_feedback.h',
+ 'net/rtp/packet_storage_unittest.cc',
+ 'net/rtp/receiver_stats_unittest.cc',
+ 'net/rtp/rtp_header_parser.cc',
+ 'net/rtp/rtp_header_parser.h',
+ 'net/rtp/rtp_packet_builder.cc',
+ 'net/rtp/rtp_parser_unittest.cc',
+ 'net/rtp/rtp_packetizer_unittest.cc',
+ 'net/rtp/rtp_receiver_defines.h',
+ 'net/udp_transport_unittest.cc',
'receiver/audio_decoder_unittest.cc',
'receiver/frame_receiver_unittest.cc',
'receiver/video_decoder_unittest.cc',
- 'rtcp/mock_rtcp_receiver_feedback.cc',
- 'rtcp/mock_rtcp_receiver_feedback.h',
- 'rtcp/mock_rtcp_sender_feedback.cc',
- 'rtcp/mock_rtcp_sender_feedback.h',
- 'rtcp/rtcp_receiver_unittest.cc',
- 'rtcp/rtcp_sender_unittest.cc',
- 'rtcp/rtcp_unittest.cc',
- 'rtcp/receiver_rtcp_event_subscriber_unittest.cc',
-# TODO(miu): The following two are test utility modules. Rename/move the files.
- 'rtcp/test_rtcp_packet_builder.cc',
- 'rtcp/test_rtcp_packet_builder.h',
- 'rtp_receiver/rtp_receiver_defines.h',
- 'rtp_receiver/mock_rtp_payload_feedback.cc',
- 'rtp_receiver/mock_rtp_payload_feedback.h',
- 'rtp_receiver/receiver_stats_unittest.cc',
- 'rtp_receiver/rtp_parser/test/rtp_packet_builder.cc',
- 'rtp_receiver/rtp_parser/rtp_parser_unittest.cc',
+ 'sender/audio_encoder_unittest.cc',
+ 'sender/audio_sender_unittest.cc',
+ 'sender/congestion_control_unittest.cc',
+ 'sender/external_video_encoder_unittest.cc',
+ 'sender/video_encoder_impl_unittest.cc',
+ 'sender/video_sender_unittest.cc',
'test/end2end_unittest.cc',
'test/fake_receiver_time_offset_estimator.cc',
'test/fake_receiver_time_offset_estimator.h',
@@ -109,18 +126,6 @@
'test/fake_video_encode_accelerator.h',
'test/utility/audio_utility_unittest.cc',
'test/utility/barcode_unittest.cc',
- 'transport/cast_transport_sender_impl_unittest.cc',
- 'transport/pacing/mock_paced_packet_sender.cc',
- 'transport/pacing/mock_paced_packet_sender.h',
- 'transport/pacing/paced_sender_unittest.cc',
- 'transport/rtp_sender/packet_storage/packet_storage_unittest.cc',
- 'transport/rtp_sender/rtp_packetizer/rtp_packetizer_unittest.cc',
- 'transport/rtp_sender/rtp_packetizer/test/rtp_header_parser.cc',
- 'transport/rtp_sender/rtp_packetizer/test/rtp_header_parser.h',
- 'transport/transport/udp_transport_unittest.cc',
- 'video_sender/external_video_encoder_unittest.cc',
- 'video_sender/video_encoder_impl_unittest.cc',
- 'video_sender/video_sender_unittest.cc',
], # source
},
{
@@ -131,11 +136,10 @@
],
'dependencies': [
'cast_base',
+ 'cast_net',
'cast_receiver',
- 'cast_rtcp',
'cast_sender',
'cast_test_utility',
- 'cast_transport',
'<(DEPTH)/base/base.gyp:test_support_base',
'<(DEPTH)/net/net.gyp:net',
'<(DEPTH)/testing/gtest.gyp:gtest',
@@ -167,6 +171,7 @@
'dependencies': [
'cast_receiver_app',
'cast_sender_app',
+ 'cast_simulator',
'udp_proxy',
],
},
@@ -178,9 +183,9 @@
],
'dependencies': [
'cast_base',
+ 'cast_net',
'cast_receiver',
'cast_test_utility',
- 'cast_transport',
'<(DEPTH)/net/net.gyp:net_test_support',
'<(DEPTH)/media/media.gyp:media',
'<(DEPTH)/testing/gtest.gyp:gtest',
@@ -211,9 +216,9 @@
],
'dependencies': [
'cast_base',
+ 'cast_net',
'cast_sender',
'cast_test_utility',
- 'cast_transport',
'<(DEPTH)/net/net.gyp:net_test_support',
'<(DEPTH)/media/media.gyp:media',
'<(DEPTH)/testing/gtest.gyp:gtest',
@@ -226,6 +231,45 @@
],
},
{
+ 'target_name': 'cast_simulator',
+ 'type': 'executable',
+ 'include_dirs': [
+ '<(DEPTH)/',
+ ],
+ 'dependencies': [
+ 'cast_base',
+ 'cast_net',
+ 'cast_network_model_proto',
+ 'cast_sender',
+ 'cast_test_utility',
+ '<(DEPTH)/net/net.gyp:net_test_support',
+ '<(DEPTH)/media/media.gyp:media',
+ '<(DEPTH)/testing/gtest.gyp:gtest',
+ '<(DEPTH)/third_party/ffmpeg/ffmpeg.gyp:ffmpeg',
+ '<(DEPTH)/third_party/opus/opus.gyp:opus',
+ '<(DEPTH)/ui/gfx/gfx.gyp:gfx_geometry',
+ ],
+ 'sources': [
+ '<(DEPTH)/media/cast/test/simulator.cc',
+ ],
+ },
+ {
+ # GN version: //media/cast/test/proto
+ 'target_name': 'cast_network_model_proto',
+ 'type': 'static_library',
+ 'include_dirs': [
+ '<(DEPTH)/',
+ ],
+ 'sources': [
+ 'test/proto/network_simulation_model.proto',
+ ],
+ 'variables': {
+ 'proto_in_dir': 'test/proto',
+ 'proto_out_dir': 'media/cast/test/proto',
+ },
+ 'includes': ['../../build/protoc.gypi'],
+ },
+ {
'target_name': 'generate_barcode_video',
'type': 'executable',
'include_dirs': [
@@ -248,8 +292,8 @@
],
'dependencies': [
'cast_base',
+ 'cast_net',
'cast_test_utility',
- 'cast_transport',
'<(DEPTH)/base/base.gyp:base',
'<(DEPTH)/media/media.gyp:media',
],
diff --git a/media/cast/base/clock_drift_smoother.cc b/media/cast/common/clock_drift_smoother.cc
index ca0380533e..aff9a396de 100644
--- a/media/cast/base/clock_drift_smoother.cc
+++ b/media/cast/common/clock_drift_smoother.cc
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "media/cast/base/clock_drift_smoother.h"
+#include "media/cast/common/clock_drift_smoother.h"
#include "base/logging.h"
diff --git a/media/cast/base/clock_drift_smoother.h b/media/cast/common/clock_drift_smoother.h
index 67de4cb51a..0511da9f3d 100644
--- a/media/cast/base/clock_drift_smoother.h
+++ b/media/cast/common/clock_drift_smoother.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef MEDIA_CAST_BASE_CLOCK_DRIFT_SMOOTHER_H_
-#define MEDIA_CAST_BASE_CLOCK_DRIFT_SMOOTHER_H_
+#ifndef MEDIA_CAST_COMMON_CLOCK_DRIFT_SMOOTHER_H_
+#define MEDIA_CAST_COMMON_CLOCK_DRIFT_SMOOTHER_H_
#include "base/time/time.h"
@@ -49,4 +49,4 @@ class ClockDriftSmoother {
} // namespace cast
} // namespace media
-#endif // MEDIA_CAST_BASE_CLOCK_DRIFT_SMOOTHER_H_
+#endif // MEDIA_CAST_COMMON_CLOCK_DRIFT_SMOOTHER_H_
diff --git a/media/cast/transport/utility/transport_encryption_handler.cc b/media/cast/common/transport_encryption_handler.cc
index 89db2cf95b..54a43e8b52 100644
--- a/media/cast/transport/utility/transport_encryption_handler.cc
+++ b/media/cast/common/transport_encryption_handler.cc
@@ -2,32 +2,55 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "media/cast/transport/utility/transport_encryption_handler.h"
+#include "media/cast/common/transport_encryption_handler.h"
#include "base/logging.h"
#include "crypto/encryptor.h"
#include "crypto/symmetric_key.h"
-#include "media/cast/transport/cast_transport_defines.h"
+#include "media/cast/net/cast_transport_defines.h"
+
+namespace {
+
+// Crypto.
+const size_t kAesBlockSize = 16;
+const size_t kAesKeySize = 16;
+
+std::string GetAesNonce(uint32 frame_id, const std::string& iv_mask) {
+ std::string aes_nonce(kAesBlockSize, 0);
+
+ // Serializing frame_id in big-endian order (aes_nonce[8] is the most
+ // significant byte of frame_id).
+ aes_nonce[11] = frame_id & 0xff;
+ aes_nonce[10] = (frame_id >> 8) & 0xff;
+ aes_nonce[9] = (frame_id >> 16) & 0xff;
+ aes_nonce[8] = (frame_id >> 24) & 0xff;
+
+ for (size_t i = 0; i < kAesBlockSize; ++i) {
+ aes_nonce[i] ^= iv_mask[i];
+ }
+ return aes_nonce;
+}
+
+} // namespace
namespace media {
namespace cast {
-namespace transport {
TransportEncryptionHandler::TransportEncryptionHandler()
- : key_(), encryptor_(), iv_mask_(), initialized_(false) {}
+ : key_(), encryptor_(), iv_mask_(), is_activated_(false) {}
TransportEncryptionHandler::~TransportEncryptionHandler() {}
bool TransportEncryptionHandler::Initialize(std::string aes_key,
std::string aes_iv_mask) {
- initialized_ = false;
+ is_activated_ = false;
if (aes_iv_mask.size() == kAesKeySize && aes_key.size() == kAesKeySize) {
iv_mask_ = aes_iv_mask;
key_.reset(
crypto::SymmetricKey::Import(crypto::SymmetricKey::AES, aes_key));
encryptor_.reset(new crypto::Encryptor());
encryptor_->Init(key_.get(), crypto::Encryptor::CTR, std::string());
- initialized_ = true;
+ is_activated_ = true;
} else if (aes_iv_mask.size() != 0 || aes_key.size() != 0) {
DCHECK_EQ(aes_iv_mask.size(), 0u)
<< "Invalid Crypto configuration: aes_iv_mask.size";
@@ -41,7 +64,7 @@ bool TransportEncryptionHandler::Initialize(std::string aes_key,
bool TransportEncryptionHandler::Encrypt(uint32 frame_id,
const base::StringPiece& data,
std::string* encrypted_data) {
- if (!initialized_)
+ if (!is_activated_)
return false;
if (!encryptor_->SetCounter(GetAesNonce(frame_id, iv_mask_))) {
NOTREACHED() << "Failed to set counter";
@@ -57,10 +80,10 @@ bool TransportEncryptionHandler::Encrypt(uint32 frame_id,
bool TransportEncryptionHandler::Decrypt(uint32 frame_id,
const base::StringPiece& ciphertext,
std::string* plaintext) {
- if (!initialized_) {
+ if (!is_activated_) {
return false;
}
- if (!encryptor_->SetCounter(transport::GetAesNonce(frame_id, iv_mask_))) {
+ if (!encryptor_->SetCounter(GetAesNonce(frame_id, iv_mask_))) {
NOTREACHED() << "Failed to set counter";
return false;
}
@@ -71,6 +94,5 @@ bool TransportEncryptionHandler::Decrypt(uint32 frame_id,
return true;
}
-} // namespace transport
} // namespace cast
} // namespace media
diff --git a/media/cast/transport/utility/transport_encryption_handler.h b/media/cast/common/transport_encryption_handler.h
index 06d0e3f34d..d4798dc78b 100644
--- a/media/cast/transport/utility/transport_encryption_handler.h
+++ b/media/cast/common/transport_encryption_handler.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef MEDIA_CAST_TRANSPORT_TRANSPORT_UTILITY_ENCRYPTION_HANDLER_H_
-#define MEDIA_CAST_TRANSPORT_TRANSPORT_UTILITY_ENCRYPTION_HANDLER_H_
+#ifndef MEDIA_CAST_COMMON_TRANSPORT_ENCRYPTION_HANDLER_H_
+#define MEDIA_CAST_COMMON_TRANSPORT_ENCRYPTION_HANDLER_H_
// Helper class to handle encryption for the Cast Transport library.
#include <string>
@@ -20,7 +20,6 @@ class SymmetricKey;
namespace media {
namespace cast {
-namespace transport {
class TransportEncryptionHandler : public base::NonThreadSafe {
public:
@@ -37,22 +36,18 @@ class TransportEncryptionHandler : public base::NonThreadSafe {
const base::StringPiece& ciphertext,
std::string* plaintext);
- // TODO(miu): This naming is very misleading. It should be called
- // is_activated() since Initialize() without keys (i.e., cypto is disabled)
- // may have succeeded.
- bool initialized() const { return initialized_; }
+ bool is_activated() const { return is_activated_; }
private:
scoped_ptr<crypto::SymmetricKey> key_;
scoped_ptr<crypto::Encryptor> encryptor_;
std::string iv_mask_;
- bool initialized_;
+ bool is_activated_;
DISALLOW_COPY_AND_ASSIGN(TransportEncryptionHandler);
};
-} // namespace transport
} // namespace cast
} // namespace media
-#endif // MEDIA_CAST_TRANSPORT_TRANSPORT_UTILITY_ENCRYPTION_HANDLER_H_
+#endif // MEDIA_CAST_COMMON_TRANSPORT_ENCRYPTION_HANDLER_H_
diff --git a/media/cast/logging/proto/BUILD.gn b/media/cast/logging/proto/BUILD.gn
new file mode 100644
index 0000000000..c9fb89bbae
--- /dev/null
+++ b/media/cast/logging/proto/BUILD.gn
@@ -0,0 +1,22 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import("//third_party/protobuf/proto_library.gni")
+
+# GYP version: media/cast/cast.gyp:cast_logging_proto
+source_set("proto") {
+ sources = [
+ "proto_utils.cc",
+ ]
+ deps = [
+ ":cast_logging_proto",
+ ]
+}
+
+proto_library("cast_logging_proto") {
+ visibility = ":proto"
+ sources = [
+ "raw_events.proto",
+ ]
+}
diff --git a/media/cast/net/DEPS b/media/cast/net/DEPS
new file mode 100644
index 0000000000..3fa74fa027
--- /dev/null
+++ b/media/cast/net/DEPS
@@ -0,0 +1,16 @@
+include_rules = [
+ "-media/cast",
+ "+media/cast/cast_config.h",
+ "+media/cast/cast_defines.h",
+ "+media/cast/cast_environment.h",
+ "+media/cast/common",
+ "+media/cast/logging",
+ "+media/cast/net",
+ "+net",
+]
+
+specific_include_rules = {
+ ".*unittest.cc": [
+ "+media/cast/test",
+ ],
+}
diff --git a/media/cast/transport/cast_transport_config.cc b/media/cast/net/cast_transport_config.cc
index 16e9034713..ae8ec60551 100644
--- a/media/cast/transport/cast_transport_config.cc
+++ b/media/cast/net/cast_transport_config.cc
@@ -2,37 +2,16 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "media/cast/transport/cast_transport_config.h"
+#include "media/cast/net/cast_transport_config.h"
namespace media {
namespace cast {
-namespace transport {
-
-namespace {
-const int kDefaultRtpMaxDelayMs = 100;
-} // namespace
-
-RtpConfig::RtpConfig()
- : ssrc(0),
- max_delay_ms(kDefaultRtpMaxDelayMs),
- payload_type(0) {}
-
-RtpConfig::~RtpConfig() {}
CastTransportRtpConfig::CastTransportRtpConfig()
- : max_outstanding_frames(-1) {}
+ : ssrc(0), rtp_payload_type(0), stored_frames(0) {}
CastTransportRtpConfig::~CastTransportRtpConfig() {}
-CastTransportAudioConfig::CastTransportAudioConfig()
- : codec(kOpus), frequency(0), channels(0) {}
-
-CastTransportAudioConfig::~CastTransportAudioConfig() {}
-
-CastTransportVideoConfig::CastTransportVideoConfig() : codec(kVp8) {}
-
-CastTransportVideoConfig::~CastTransportVideoConfig() {}
-
EncodedFrame::EncodedFrame()
: dependency(UNKNOWN_DEPENDENCY),
frame_id(0),
@@ -77,6 +56,5 @@ SendRtcpFromRtpSenderData::SendRtcpFromRtpSenderData()
: packet_type_flags(0), sending_ssrc(0) {}
SendRtcpFromRtpSenderData::~SendRtcpFromRtpSenderData() {}
-} // namespace transport
} // namespace cast
} // namespace media
diff --git a/media/cast/transport/cast_transport_config.h b/media/cast/net/cast_transport_config.h
index 96b771acb9..db87b8c9fc 100644
--- a/media/cast/transport/cast_transport_config.h
+++ b/media/cast/net/cast_transport_config.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef MEDIA_CAST_TRANSPORT_CAST_TRANSPORT_CONFIG_H_
-#define MEDIA_CAST_TRANSPORT_CAST_TRANSPORT_CONFIG_H_
+#ifndef MEDIA_CAST_NET_CAST_TRANSPORT_CONFIG_H_
+#define MEDIA_CAST_NET_CAST_TRANSPORT_CONFIG_H_
#include <string>
#include <vector>
@@ -13,66 +13,45 @@
#include "base/memory/linked_ptr.h"
#include "base/memory/ref_counted.h"
#include "base/stl_util.h"
-#include "media/cast/transport/cast_transport_defines.h"
-#include "net/base/ip_endpoint.h"
+#include "media/cast/net/cast_transport_defines.h"
namespace media {
namespace cast {
-namespace transport {
enum RtcpMode {
kRtcpCompound, // Compound RTCP mode is described by RFC 4585.
kRtcpReducedSize, // Reduced-size RTCP mode is described by RFC 5506.
};
-enum VideoCodec {
- kUnknownVideoCodec,
- kFakeSoftwareVideo,
- kVp8,
- kH264,
- kVideoCodecLast = kH264
-};
-
-enum AudioCodec {
- kUnknownAudioCodec,
- kOpus,
- kPcm16,
- kAudioCodecLast = kPcm16
-};
-
-struct RtpConfig {
- RtpConfig();
- ~RtpConfig();
- uint32 ssrc;
- int max_delay_ms;
- int payload_type;
- std::string aes_key; // Binary string of size kAesKeySize.
- std::string aes_iv_mask; // Binary string of size kAesBlockSize.
+enum Codec {
+ CODEC_UNKNOWN,
+ CODEC_AUDIO_OPUS,
+ CODEC_AUDIO_PCM16,
+ CODEC_VIDEO_FAKE,
+ CODEC_VIDEO_VP8,
+ CODEC_VIDEO_H264,
+ CODEC_LAST = CODEC_VIDEO_H264
};
struct CastTransportRtpConfig {
CastTransportRtpConfig();
~CastTransportRtpConfig();
- RtpConfig config;
- int max_outstanding_frames;
-};
-struct CastTransportAudioConfig {
- CastTransportAudioConfig();
- ~CastTransportAudioConfig();
+ // Identifier refering to this sender.
+ uint32 ssrc;
- CastTransportRtpConfig rtp;
- AudioCodec codec;
- int frequency;
- int channels;
-};
+ // RTP payload type enum: Specifies the type/encoding of frame data.
+ int rtp_payload_type;
-struct CastTransportVideoConfig {
- CastTransportVideoConfig();
- ~CastTransportVideoConfig();
+ // The number of most-recent frames that must be stored in the transport
+ // layer, to facilitate re-transmissions.
+ int stored_frames;
- CastTransportRtpConfig rtp;
- VideoCodec codec;
+ // The AES crypto key and initialization vector. Each of these strings
+ // contains the data in binary form, of size kAesKeySize. If they are empty
+ // strings, crypto is not being used.
+ std::string aes_key;
+ std::string aes_iv_mask;
};
// A combination of metadata and data for one encoded frame. This can contain
@@ -214,8 +193,7 @@ inline bool operator==(RtcpSenderInfo lhs, RtcpSenderInfo rhs) {
lhs.send_octet_count == rhs.send_octet_count;
}
-} // namespace transport
} // namespace cast
} // namespace media
-#endif // MEDIA_CAST_TRANSPORT_CAST_TRANSPORT_CONFIG_H_
+#endif // MEDIA_CAST_NET_CAST_TRANSPORT_CONFIG_H_
diff --git a/media/cast/transport/cast_transport_defines.h b/media/cast/net/cast_transport_defines.h
index a34f7c539a..b2020f33af 100644
--- a/media/cast/transport/cast_transport_defines.h
+++ b/media/cast/net/cast_transport_defines.h
@@ -1,9 +1,9 @@
-// Copyright 2013 The Chromium Authors. All rights reserved.
+// Copyright 2014 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef MEDIA_CAST_TRANSPORT_CAST_TRANSPORT_DEFINES_H_
-#define MEDIA_CAST_TRANSPORT_CAST_TRANSPORT_DEFINES_H_
+#ifndef MEDIA_CAST_NET_CAST_TRANSPORT_DEFINES_H_
+#define MEDIA_CAST_NET_CAST_TRANSPORT_DEFINES_H_
#include <stdint.h>
@@ -16,7 +16,6 @@
namespace media {
namespace cast {
-namespace transport {
// TODO(mikhal): Implement and add more types.
enum CastTransportStatus {
@@ -35,26 +34,6 @@ typedef std::set<uint16> PacketIdSet;
// Each uint8 represents one cast frame.
typedef std::map<uint8, PacketIdSet> MissingFramesAndPacketsMap;
-// Crypto.
-const size_t kAesBlockSize = 16;
-const size_t kAesKeySize = 16;
-
-inline std::string GetAesNonce(uint32 frame_id, const std::string& iv_mask) {
- std::string aes_nonce(kAesBlockSize, 0);
-
- // Serializing frame_id in big-endian order (aes_nonce[8] is the most
- // significant byte of frame_id).
- aes_nonce[11] = frame_id & 0xff;
- aes_nonce[10] = (frame_id >> 8) & 0xff;
- aes_nonce[9] = (frame_id >> 16) & 0xff;
- aes_nonce[8] = (frame_id >> 24) & 0xff;
-
- for (size_t i = 0; i < kAesBlockSize; ++i) {
- aes_nonce[i] ^= iv_mask[i];
- }
- return aes_nonce;
-}
-
// Rtcp defines.
enum RtcpPacketFields {
@@ -162,8 +141,7 @@ inline uint32 GetVideoRtpTimestamp(const base::TimeTicks& time_ticks) {
return static_cast<uint32>(recorded_delta.InMilliseconds() * 90);
}
-} // namespace transport
} // namespace cast
} // namespace media
-#endif // MEDIA_CAST_TRANSPORT_CAST_TRANSPORT_DEFINES_H_
+#endif // MEDIA_CAST_NET_CAST_TRANSPORT_DEFINES_H_
diff --git a/media/cast/transport/cast_transport_sender.h b/media/cast/net/cast_transport_sender.h
index e88f2f4f09..3f8a119655 100644
--- a/media/cast/transport/cast_transport_sender.h
+++ b/media/cast/net/cast_transport_sender.h
@@ -1,4 +1,4 @@
-// Copyright 2013 The Chromium Authors. All rights reserved.
+// Copyright 2014 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
@@ -20,8 +20,8 @@
// This also works when the CastSender acts as a receiver for the RTCP packets
// due to the weak pointers in the ReceivedPacket method in cast_sender_impl.cc.
-#ifndef MEDIA_CAST_TRANSPORT_CAST_TRANSPORT_SENDER_H_
-#define MEDIA_CAST_TRANSPORT_CAST_TRANSPORT_SENDER_H_
+#ifndef MEDIA_CAST_NET_CAST_TRANSPORT_SENDER_H_
+#define MEDIA_CAST_NET_CAST_TRANSPORT_SENDER_H_
#include "base/basictypes.h"
#include "base/callback.h"
@@ -29,8 +29,9 @@
#include "base/threading/non_thread_safe.h"
#include "base/time/tick_clock.h"
#include "media/cast/logging/logging_defines.h"
-#include "media/cast/transport/cast_transport_config.h"
-#include "media/cast/transport/cast_transport_defines.h"
+#include "media/cast/net/cast_transport_config.h"
+#include "media/cast/net/cast_transport_defines.h"
+#include "net/base/ip_endpoint.h"
namespace net {
class NetLog;
@@ -38,7 +39,6 @@ class NetLog;
namespace media {
namespace cast {
-namespace transport {
// Following the initialization of either audio or video an initialization
// status will be sent via this callback.
@@ -65,9 +65,8 @@ class CastTransportSender : public base::NonThreadSafe {
// Audio/Video initialization.
// Encoded frames cannot be transmitted until the relevant initialize method
// is called. Usually called by CastSender.
- virtual void InitializeAudio(const CastTransportAudioConfig& config) = 0;
-
- virtual void InitializeVideo(const CastTransportVideoConfig& config) = 0;
+ virtual void InitializeAudio(const CastTransportRtpConfig& config) = 0;
+ virtual void InitializeVideo(const CastTransportRtpConfig& config) = 0;
// Sets the Cast packet receiver. Should be called after creation on the
// Cast sender. Packets won't be received until this function is called.
@@ -106,8 +105,7 @@ class CastTransportSender : public base::NonThreadSafe {
base::TimeDelta dedupe_window) = 0;
};
-} // namespace transport
} // namespace cast
} // namespace media
-#endif // MEDIA_CAST_TRANSPORT_CAST_TRANSPORT_SENDER_H_
+#endif // MEDIA_CAST_NET_CAST_TRANSPORT_SENDER_H_
diff --git a/media/cast/transport/cast_transport_sender_impl.cc b/media/cast/net/cast_transport_sender_impl.cc
index 6fd848f27b..973f34155f 100644
--- a/media/cast/transport/cast_transport_sender_impl.cc
+++ b/media/cast/net/cast_transport_sender_impl.cc
@@ -2,16 +2,16 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "media/cast/transport/cast_transport_sender_impl.h"
+#include "media/cast/net/cast_transport_sender_impl.h"
#include "base/single_thread_task_runner.h"
-#include "media/cast/transport/cast_transport_config.h"
-#include "media/cast/transport/cast_transport_defines.h"
+#include "media/cast/net/cast_transport_config.h"
+#include "media/cast/net/cast_transport_defines.h"
+#include "media/cast/net/udp_transport.h"
#include "net/base/net_util.h"
namespace media {
namespace cast {
-namespace transport {
scoped_ptr<CastTransportSender> CastTransportSender::Create(
net::NetLog* net_log,
@@ -80,18 +80,18 @@ CastTransportSenderImpl::~CastTransportSenderImpl() {
}
void CastTransportSenderImpl::InitializeAudio(
- const CastTransportAudioConfig& config) {
- LOG_IF(WARNING, config.rtp.config.aes_key.empty() ||
- config.rtp.config.aes_iv_mask.empty())
+ const CastTransportRtpConfig& config) {
+ LOG_IF(WARNING, config.aes_key.empty() || config.aes_iv_mask.empty())
<< "Unsafe to send audio with encryption DISABLED.";
- if (!audio_encryptor_.Initialize(config.rtp.config.aes_key,
- config.rtp.config.aes_iv_mask)) {
+ if (!audio_encryptor_.Initialize(config.aes_key, config.aes_iv_mask)) {
status_callback_.Run(TRANSPORT_AUDIO_UNINITIALIZED);
return;
}
audio_sender_.reset(new RtpSender(clock_, transport_task_runner_, &pacer_));
- if (audio_sender_->InitializeAudio(config)) {
- pacer_.RegisterAudioSsrc(config.rtp.config.ssrc);
+ if (audio_sender_->Initialize(config)) {
+ // Audio packets have a higher priority.
+ pacer_.RegisterAudioSsrc(config.ssrc);
+ pacer_.RegisterPrioritySsrc(config.ssrc);
status_callback_.Run(TRANSPORT_AUDIO_INITIALIZED);
} else {
audio_sender_.reset();
@@ -100,18 +100,16 @@ void CastTransportSenderImpl::InitializeAudio(
}
void CastTransportSenderImpl::InitializeVideo(
- const CastTransportVideoConfig& config) {
- LOG_IF(WARNING, config.rtp.config.aes_key.empty() ||
- config.rtp.config.aes_iv_mask.empty())
+ const CastTransportRtpConfig& config) {
+ LOG_IF(WARNING, config.aes_key.empty() || config.aes_iv_mask.empty())
<< "Unsafe to send video with encryption DISABLED.";
- if (!video_encryptor_.Initialize(config.rtp.config.aes_key,
- config.rtp.config.aes_iv_mask)) {
+ if (!video_encryptor_.Initialize(config.aes_key, config.aes_iv_mask)) {
status_callback_.Run(TRANSPORT_VIDEO_UNINITIALIZED);
return;
}
video_sender_.reset(new RtpSender(clock_, transport_task_runner_, &pacer_));
- if (video_sender_->InitializeVideo(config)) {
- pacer_.RegisterVideoSsrc(config.rtp.config.ssrc);
+ if (video_sender_->Initialize(config)) {
+ pacer_.RegisterVideoSsrc(config.ssrc);
status_callback_.Run(TRANSPORT_VIDEO_INITIALIZED);
} else {
video_sender_.reset();
@@ -128,7 +126,7 @@ namespace {
void EncryptAndSendFrame(const EncodedFrame& frame,
TransportEncryptionHandler* encryptor,
RtpSender* sender) {
- if (encryptor->initialized()) {
+ if (encryptor->is_activated()) {
EncodedFrame encrypted_frame;
frame.CopyMetadataTo(&encrypted_frame);
if (encryptor->Encrypt(frame.frame_id, frame.data, &encrypted_frame.data)) {
@@ -207,6 +205,5 @@ void CastTransportSenderImpl::SendRawEvents() {
raw_events_callback_.Run(packet_events);
}
-} // namespace transport
} // namespace cast
} // namespace media
diff --git a/media/cast/transport/cast_transport_sender_impl.h b/media/cast/net/cast_transport_sender_impl.h
index 035ef844b6..5d34fdeb01 100644
--- a/media/cast/transport/cast_transport_sender_impl.h
+++ b/media/cast/net/cast_transport_sender_impl.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef MEDIA_CAST_TRANSPORT_CAST_TRANSPORT_IMPL_H_
-#define MEDIA_CAST_TRANSPORT_CAST_TRANSPORT_IMPL_H_
+#ifndef MEDIA_CAST_NET_CAST_TRANSPORT_IMPL_H_
+#define MEDIA_CAST_NET_CAST_TRANSPORT_IMPL_H_
#include "base/callback.h"
#include "base/memory/ref_counted.h"
@@ -11,18 +11,19 @@
#include "base/time/tick_clock.h"
#include "base/time/time.h"
#include "base/timer/timer.h"
+#include "media/cast/common/transport_encryption_handler.h"
#include "media/cast/logging/logging_defines.h"
#include "media/cast/logging/simple_event_subscriber.h"
-#include "media/cast/transport/cast_transport_config.h"
-#include "media/cast/transport/cast_transport_sender.h"
-#include "media/cast/transport/pacing/paced_sender.h"
-#include "media/cast/transport/rtcp/rtcp_builder.h"
-#include "media/cast/transport/rtp_sender/rtp_sender.h"
-#include "media/cast/transport/utility/transport_encryption_handler.h"
+#include "media/cast/net/cast_transport_config.h"
+#include "media/cast/net/cast_transport_sender.h"
+#include "media/cast/net/pacing/paced_sender.h"
+#include "media/cast/net/rtcp/rtcp_builder.h"
+#include "media/cast/net/rtp/rtp_sender.h"
namespace media {
namespace cast {
-namespace transport {
+
+class UdpTransport;
class CastTransportSenderImpl : public CastTransportSender {
public:
@@ -46,9 +47,8 @@ class CastTransportSenderImpl : public CastTransportSender {
virtual ~CastTransportSenderImpl();
- virtual void InitializeAudio(const CastTransportAudioConfig& config) OVERRIDE;
-
- virtual void InitializeVideo(const CastTransportVideoConfig& config) OVERRIDE;
+ virtual void InitializeAudio(const CastTransportRtpConfig& config) OVERRIDE;
+ virtual void InitializeVideo(const CastTransportRtpConfig& config) OVERRIDE;
// CastTransportSender implementation.
virtual void SetPacketReceiver(const PacketReceiverCallback& packet_receiver)
@@ -103,8 +103,7 @@ class CastTransportSenderImpl : public CastTransportSender {
DISALLOW_COPY_AND_ASSIGN(CastTransportSenderImpl);
};
-} // namespace transport
} // namespace cast
} // namespace media
-#endif // MEDIA_CAST_TRANSPORT_CAST_TRANSPORT_IMPL_H_
+#endif // MEDIA_CAST_NET_CAST_TRANSPORT_IMPL_H_
diff --git a/media/cast/transport/cast_transport_sender_impl_unittest.cc b/media/cast/net/cast_transport_sender_impl_unittest.cc
index 67eb39a47a..b1d29b9ef9 100644
--- a/media/cast/transport/cast_transport_sender_impl_unittest.cc
+++ b/media/cast/net/cast_transport_sender_impl_unittest.cc
@@ -12,19 +12,18 @@
#include "base/run_loop.h"
#include "base/test/simple_test_tick_clock.h"
#include "media/cast/cast_config.h"
-#include "media/cast/rtcp/rtcp.h"
+#include "media/cast/net/cast_transport_config.h"
+#include "media/cast/net/cast_transport_sender_impl.h"
+#include "media/cast/net/rtcp/rtcp.h"
#include "media/cast/test/fake_single_thread_task_runner.h"
-#include "media/cast/transport/cast_transport_config.h"
-#include "media/cast/transport/cast_transport_sender_impl.h"
#include "testing/gtest/include/gtest/gtest.h"
namespace media {
namespace cast {
-namespace transport {
static const int64 kStartMillisecond = INT64_C(12345678900000);
-class FakePacketSender : public transport::PacketSender {
+class FakePacketSender : public PacketSender {
public:
FakePacketSender() {}
@@ -78,7 +77,7 @@ class CastTransportSenderImplTest : public ::testing::Test {
}
}
- static void UpdateCastTransportStatus(transport::CastTransportStatus status) {
+ static void UpdateCastTransportStatus(CastTransportStatus status) {
}
base::SimpleTestTickClock testing_clock_;
@@ -108,6 +107,5 @@ TEST_F(CastTransportSenderImplTest, InitWithLogging) {
EXPECT_GT(num_times_callback_called_, 1);
}
-} // namespace transport
} // namespace cast
} // namespace media
diff --git a/media/cast/transport/frame_id_wrap_helper_test.cc b/media/cast/net/frame_id_wrap_helper_test.cc
index 3a2060d3aa..92a8443533 100644
--- a/media/cast/transport/frame_id_wrap_helper_test.cc
+++ b/media/cast/net/frame_id_wrap_helper_test.cc
@@ -1,13 +1,12 @@
-// Copyright 2013 The Chromium Authors. All rights reserved.
+// Copyright 2014 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include <gtest/gtest.h>
-#include "media/cast/transport/cast_transport_defines.h"
+#include "media/cast/net/cast_transport_defines.h"
namespace media {
namespace cast {
-namespace transport {
class FrameIdWrapHelperTest : public ::testing::Test {
protected:
@@ -47,6 +46,5 @@ TEST_F(FrameIdWrapHelperTest, OutOfOrder) {
EXPECT_EQ(257u, new_frame_id);
}
-} // namespace transport
} // namespace cast
} // namespace media
diff --git a/media/cast/transport/pacing/mock_paced_packet_sender.cc b/media/cast/net/pacing/mock_paced_packet_sender.cc
index 5e325f0233..3219ba2ba4 100644
--- a/media/cast/transport/pacing/mock_paced_packet_sender.cc
+++ b/media/cast/net/pacing/mock_paced_packet_sender.cc
@@ -1,17 +1,15 @@
-// Copyright 2013 The Chromium Authors. All rights reserved.
+// Copyright 2014 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "media/cast/transport/pacing/mock_paced_packet_sender.h"
+#include "media/cast/net/pacing/mock_paced_packet_sender.h"
namespace media {
namespace cast {
-namespace transport {
MockPacedPacketSender::MockPacedPacketSender() {}
MockPacedPacketSender::~MockPacedPacketSender() {}
-} // namespace transport
} // namespace cast
} // namespace media
diff --git a/media/cast/transport/pacing/mock_paced_packet_sender.h b/media/cast/net/pacing/mock_paced_packet_sender.h
index 20b7647035..2f7f1b2650 100644
--- a/media/cast/transport/pacing/mock_paced_packet_sender.h
+++ b/media/cast/net/pacing/mock_paced_packet_sender.h
@@ -1,16 +1,15 @@
-// Copyright 2013 The Chromium Authors. All rights reserved.
+// Copyright 2014 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef MEDIA_CAST_TRANSPORT_PACING_MOCK_PACED_PACKET_SENDER_H_
-#define MEDIA_CAST_TRANSPORT_PACING_MOCK_PACED_PACKET_SENDER_H_
+#ifndef MEDIA_CAST_NET_PACING_MOCK_PACED_PACKET_SENDER_H_
+#define MEDIA_CAST_NET_PACING_MOCK_PACED_PACKET_SENDER_H_
-#include "media/cast/transport/pacing/paced_sender.h"
+#include "media/cast/net/pacing/paced_sender.h"
#include "testing/gmock/include/gmock/gmock.h"
namespace media {
namespace cast {
-namespace transport {
class MockPacedPacketSender : public PacedPacketSender {
public:
@@ -24,8 +23,7 @@ class MockPacedPacketSender : public PacedPacketSender {
MOCK_METHOD1(CancelSendingPacket, void(const PacketKey& packet_key));
};
-} // namespace transport
} // namespace cast
} // namespace media
-#endif // MEDIA_CAST_TRANSPORT_PACING_MOCK_PACED_PACKET_SENDER_H_
+#endif // MEDIA_CAST_NET_PACING_MOCK_PACED_PACKET_SENDER_H_
diff --git a/media/cast/transport/pacing/paced_sender.cc b/media/cast/net/pacing/paced_sender.cc
index 20cbde85be..89a69272dc 100644
--- a/media/cast/transport/pacing/paced_sender.cc
+++ b/media/cast/net/pacing/paced_sender.cc
@@ -1,16 +1,16 @@
-// Copyright 2013 The Chromium Authors. All rights reserved.
+// Copyright 2014 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "media/cast/transport/pacing/paced_sender.h"
+#include "media/cast/net/pacing/paced_sender.h"
#include "base/big_endian.h"
#include "base/bind.h"
#include "base/message_loop/message_loop.h"
+#include "media/cast/logging/logging_impl.h"
namespace media {
namespace cast {
-namespace transport {
namespace {
@@ -60,13 +60,24 @@ void PacedSender::RegisterVideoSsrc(uint32 video_ssrc) {
video_ssrc_ = video_ssrc;
}
+void PacedSender::RegisterPrioritySsrc(uint32 ssrc) {
+ priority_ssrcs_.push_back(ssrc);
+}
+
bool PacedSender::SendPackets(const SendPacketVector& packets) {
if (packets.empty()) {
return true;
}
+ const bool high_priority = IsHighPriority(packets.begin()->first);
for (size_t i = 0; i < packets.size(); i++) {
- packet_list_[packets[i].first] =
- make_pair(PacketType_Normal, packets[i].second);
+ DCHECK(IsHighPriority(packets[i].first) == high_priority);
+ if (high_priority) {
+ priority_packet_list_[packets[i].first] =
+ make_pair(PacketType_Normal, packets[i].second);
+ } else {
+ packet_list_[packets[i].first] =
+ make_pair(PacketType_Normal, packets[i].second);
+ }
}
if (state_ == State_Unblocked) {
SendStoredPackets();
@@ -79,6 +90,7 @@ bool PacedSender::ResendPackets(const SendPacketVector& packets,
if (packets.empty()) {
return true;
}
+ const bool high_priority = IsHighPriority(packets.begin()->first);
base::TimeTicks now = clock_->NowTicks();
for (size_t i = 0; i < packets.size(); i++) {
std::map<PacketKey, base::TimeTicks>::const_iterator j =
@@ -89,8 +101,14 @@ bool PacedSender::ResendPackets(const SendPacketVector& packets,
continue;
}
- packet_list_[packets[i].first] =
- make_pair(PacketType_Resend, packets[i].second);
+ DCHECK(IsHighPriority(packets[i].first) == high_priority);
+ if (high_priority) {
+ priority_packet_list_[packets[i].first] =
+ make_pair(PacketType_Resend, packets[i].second);
+ } else {
+ packet_list_[packets[i].first] =
+ make_pair(PacketType_Resend, packets[i].second);
+ }
}
if (state_ == State_Unblocked) {
SendStoredPackets();
@@ -100,7 +118,8 @@ bool PacedSender::ResendPackets(const SendPacketVector& packets,
bool PacedSender::SendRtcpPacket(uint32 ssrc, PacketRef packet) {
if (state_ == State_TransportBlocked) {
- packet_list_[PacedPacketSender::MakePacketKey(base::TimeTicks(), ssrc, 0)] =
+ priority_packet_list_[
+ PacedPacketSender::MakePacketKey(base::TimeTicks(), ssrc, 0)] =
make_pair(PacketType_RTCP, packet);
} else {
// We pass the RTCP packets straight through.
@@ -110,33 +129,39 @@ bool PacedSender::SendRtcpPacket(uint32 ssrc, PacketRef packet) {
weak_factory_.GetWeakPtr()))) {
state_ = State_TransportBlocked;
}
-
}
return true;
}
void PacedSender::CancelSendingPacket(const PacketKey& packet_key) {
packet_list_.erase(packet_key);
+ priority_packet_list_.erase(packet_key);
}
-PacketRef PacedSender::GetNextPacket(PacketType* packet_type,
+PacketRef PacedSender::PopNextPacket(PacketType* packet_type,
PacketKey* packet_key) {
- std::map<PacketKey, std::pair<PacketType, PacketRef> >::iterator i;
- i = packet_list_.begin();
- DCHECK(i != packet_list_.end());
+ PacketList* list = !priority_packet_list_.empty() ?
+ &priority_packet_list_ : &packet_list_;
+ DCHECK(!list->empty());
+ PacketList::iterator i = list->begin();
*packet_type = i->second.first;
*packet_key = i->first;
PacketRef ret = i->second.second;
- packet_list_.erase(i);
+ list->erase(i);
return ret;
}
+bool PacedSender::IsHighPriority(const PacketKey& packet_key) const {
+ return std::find(priority_ssrcs_.begin(), priority_ssrcs_.end(),
+ packet_key.second.first) != priority_ssrcs_.end();
+}
+
bool PacedSender::empty() const {
- return packet_list_.empty();
+ return packet_list_.empty() && priority_packet_list_.empty();
}
size_t PacedSender::size() const {
- return packet_list_.size();
+ return packet_list_.size() + priority_packet_list_.size();
}
// This function can be called from three places:
@@ -199,7 +224,7 @@ void PacedSender::SendStoredPackets() {
}
PacketType packet_type;
PacketKey packet_key;
- PacketRef packet = GetNextPacket(&packet_type, &packet_key);
+ PacketRef packet = PopNextPacket(&packet_type, &packet_key);
sent_time_[packet_key] = now;
sent_time_buffer_[packet_key] = now;
@@ -255,6 +280,5 @@ void PacedSender::LogPacketEvent(const Packet& packet, CastLoggingEvent event) {
packet);
}
-} // namespace transport
} // namespace cast
} // namespace media
diff --git a/media/cast/transport/pacing/paced_sender.h b/media/cast/net/pacing/paced_sender.h
index 9fc0c8b8b8..094e5299e1 100644
--- a/media/cast/transport/pacing/paced_sender.h
+++ b/media/cast/net/pacing/paced_sender.h
@@ -1,11 +1,11 @@
-// Copyright 2013 The Chromium Authors. All rights reserved.
+// Copyright 2014 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef MEDIA_CAST_TRANSPORT_PACING_PACED_SENDER_H_
-#define MEDIA_CAST_TRANSPORT_PACING_PACED_SENDER_H_
+#ifndef MEDIA_CAST_NET_PACING_PACED_SENDER_H_
+#define MEDIA_CAST_NET_PACING_PACED_SENDER_H_
-#include <list>
+#include <map>
#include <vector>
#include "base/basictypes.h"
@@ -16,16 +16,14 @@
#include "base/time/default_tick_clock.h"
#include "base/time/tick_clock.h"
#include "base/time/time.h"
-#include "media/cast/transport/cast_transport_config.h"
-#include "media/cast/transport/transport/udp_transport.h"
+#include "media/cast/logging/logging_defines.h"
+#include "media/cast/net/cast_transport_config.h"
namespace media {
namespace cast {
class LoggingImpl;
-namespace transport {
-
// Use std::pair for free comparison operators.
// { capture_time, ssrc, packet_id }
// The PacketKey is designed to meet two criteria:
@@ -71,6 +69,12 @@ class PacedSender : public PacedPacketSender,
void RegisterAudioSsrc(uint32 audio_ssrc);
void RegisterVideoSsrc(uint32 video_ssrc);
+ // Register SSRC that has a higher priority for sending. Multiple SSRCs can
+ // be registered.
+ // Note that it is not expected to register many SSRCs with this method.
+ // Because IsHigherPriority() is determined in linear time.
+ void RegisterPrioritySsrc(uint32 ssrc);
+
// PacedPacketSender implementation.
virtual bool SendPackets(const SendPacketVector& packets) OVERRIDE;
virtual bool ResendPackets(const SendPacketVector& packets,
@@ -110,16 +114,25 @@ class PacedSender : public PacedPacketSender,
// Returns the next packet to send. RTCP packets have highest priority,
// resend packets have second highest priority and then comes everything
// else.
- PacketRef GetNextPacket(PacketType* packet_type,
+ PacketRef PopNextPacket(PacketType* packet_type,
PacketKey* packet_key);
+ // Returns true if the packet should have a higher priority.
+ bool IsHighPriority(const PacketKey& packet_key) const;
+
base::TickClock* const clock_; // Not owned by this class.
LoggingImpl* const logging_; // Not owned by this class.
PacketSender* transport_; // Not owned by this class.
scoped_refptr<base::SingleThreadTaskRunner> transport_task_runner_;
uint32 audio_ssrc_;
uint32 video_ssrc_;
- std::map<PacketKey, std::pair<PacketType, PacketRef> > packet_list_;
+
+ // Set of SSRCs that have higher priority. This is a vector instead of a
+ // set because there's only very few in it (most likely 1).
+ std::vector<uint32> priority_ssrcs_;
+ typedef std::map<PacketKey, std::pair<PacketType, PacketRef> > PacketList;
+ PacketList packet_list_;
+ PacketList priority_packet_list_;
std::map<PacketKey, base::TimeTicks> sent_time_;
std::map<PacketKey, base::TimeTicks> sent_time_buffer_;
@@ -140,8 +153,7 @@ class PacedSender : public PacedPacketSender,
DISALLOW_COPY_AND_ASSIGN(PacedSender);
};
-} // namespace transport
} // namespace cast
} // namespace media
-#endif // MEDIA_CAST_TRANSPORT_PACING_PACED_SENDER_H_
+#endif // MEDIA_CAST_NET_PACING_PACED_SENDER_H_
diff --git a/media/cast/transport/pacing/paced_sender_unittest.cc b/media/cast/net/pacing/paced_sender_unittest.cc
index 5e24fca4b5..43e7603385 100644
--- a/media/cast/transport/pacing/paced_sender_unittest.cc
+++ b/media/cast/net/pacing/paced_sender_unittest.cc
@@ -1,4 +1,4 @@
-// Copyright 2013 The Chromium Authors. All rights reserved.
+// Copyright 2014 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
@@ -6,23 +6,24 @@
#include "base/big_endian.h"
#include "base/test/simple_test_tick_clock.h"
+#include "media/cast/logging/logging_impl.h"
#include "media/cast/logging/simple_event_subscriber.h"
+#include "media/cast/net/pacing/paced_sender.h"
#include "media/cast/test/fake_single_thread_task_runner.h"
-#include "media/cast/transport/pacing/paced_sender.h"
#include "testing/gmock/include/gmock/gmock.h"
+using testing::_;
+
namespace media {
namespace cast {
-namespace transport {
-
-using testing::_;
+namespace {
static const uint8 kValue = 123;
-static const size_t kSize1 = 100;
-static const size_t kSize2 = 101;
-static const size_t kSize3 = 102;
-static const size_t kSize4 = 103;
-static const size_t kNackSize = 104;
+static const size_t kSize1 = 101;
+static const size_t kSize2 = 102;
+static const size_t kSize3 = 103;
+static const size_t kSize4 = 104;
+static const size_t kNackSize = 105;
static const int64 kStartMillisecond = INT64_C(12345678900000);
static const uint32 kVideoSsrc = 0x1234;
static const uint32 kAudioSsrc = 0x5678;
@@ -68,7 +69,7 @@ class PacedSenderTest : public ::testing::Test {
logging_.RemoveRawEventSubscriber(&subscriber_);
}
- static void UpdateCastTransportStatus(transport::CastTransportStatus status) {
+ static void UpdateCastTransportStatus(CastTransportStatus status) {
NOTREACHED();
}
@@ -124,6 +125,8 @@ class PacedSenderTest : public ::testing::Test {
DISALLOW_COPY_AND_ASSIGN(PacedSenderTest);
};
+} // namespace
+
TEST_F(PacedSenderTest, PassThroughRtcp) {
mock_transport_.AddExpectedSize(kSize1, 2);
SendPacketVector packets = CreateSendPacketVector(kSize1, 1, true);
@@ -346,6 +349,56 @@ TEST_F(PacedSenderTest, PaceWith60fps) {
EXPECT_TRUE(RunUntilEmpty(5));
}
-} // namespace transport
+TEST_F(PacedSenderTest, SendPriority) {
+ // Actual order to the network is:
+ // 1. Video packets x 10.
+ // 2. RTCP packet x 1.
+ // 3. Audio packet x 1.
+ // 4. Video retransmission packet x 10.
+ // 5. Video packet x 10.
+ mock_transport_.AddExpectedSize(kSize2, 10); // Normal video packets.
+ mock_transport_.AddExpectedSize(kSize3, 1); // RTCP packet.
+ mock_transport_.AddExpectedSize(kSize1, 1); // Audio packet.
+ mock_transport_.AddExpectedSize(kSize4, 10); // Resend video packets.
+ mock_transport_.AddExpectedSize(kSize2, 10); // Normal video packets.
+
+ paced_sender_->RegisterPrioritySsrc(kAudioSsrc);
+
+ // Retransmission packets with the earlier timestamp.
+ SendPacketVector resend_packets =
+ CreateSendPacketVector(kSize4, 10, false);
+ testing_clock_.Advance(base::TimeDelta::FromMilliseconds(10));
+
+ // Send 20 normal video packets. Only 10 will be sent in this
+ // call, the rest will be sitting in the queue waiting for pacing.
+ EXPECT_TRUE(paced_sender_->SendPackets(
+ CreateSendPacketVector(kSize2, 20, false)));
+
+ testing_clock_.Advance(base::TimeDelta::FromMilliseconds(10));
+
+ // Send normal audio packet. This is queued and will be sent
+ // earlier than video packets.
+ EXPECT_TRUE(paced_sender_->SendPackets(
+ CreateSendPacketVector(kSize1, 1, true)));
+
+ // Send RTCP packet. This is queued and will be sent first.
+ EXPECT_TRUE(paced_sender_->SendRtcpPacket(
+ kVideoSsrc,
+ new base::RefCountedData<Packet>(Packet(kSize3, kValue))));
+
+ // Resend video packets. This is queued and will be sent
+ // earlier than normal video packets.
+ EXPECT_TRUE(paced_sender_->ResendPackets(
+ resend_packets, base::TimeDelta()));
+
+ // Roll the clock. Queued packets will be sent in this order:
+ // 1. RTCP packet x 1.
+ // 2. Audio packet x 1.
+ // 3. Video retransmission packet x 10.
+ // 4. Video packet x 10.
+ task_runner_->RunTasks();
+ EXPECT_TRUE(RunUntilEmpty(4));
+}
+
} // namespace cast
} // namespace media
diff --git a/media/cast/rtcp/mock_rtcp_receiver_feedback.cc b/media/cast/net/rtcp/mock_rtcp_receiver_feedback.cc
index 9ff2d48f03..0ea6606f82 100644
--- a/media/cast/rtcp/mock_rtcp_receiver_feedback.cc
+++ b/media/cast/net/rtcp/mock_rtcp_receiver_feedback.cc
@@ -1,8 +1,8 @@
-// Copyright 2013 The Chromium Authors. All rights reserved.
+// Copyright 2014 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "media/cast/rtcp/mock_rtcp_receiver_feedback.h"
+#include "media/cast/net/rtcp/mock_rtcp_receiver_feedback.h"
namespace media {
namespace cast {
diff --git a/media/cast/rtcp/mock_rtcp_receiver_feedback.h b/media/cast/net/rtcp/mock_rtcp_receiver_feedback.h
index 56fe1ca699..38bf0eec28 100644
--- a/media/cast/rtcp/mock_rtcp_receiver_feedback.h
+++ b/media/cast/net/rtcp/mock_rtcp_receiver_feedback.h
@@ -1,4 +1,4 @@
-// Copyright 2013 The Chromium Authors. All rights reserved.
+// Copyright 2014 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
@@ -7,9 +7,9 @@
#include <vector>
-#include "media/cast/rtcp/rtcp_defines.h"
-#include "media/cast/rtcp/rtcp_receiver.h"
-#include "media/cast/transport/cast_transport_defines.h"
+#include "media/cast/net/cast_transport_defines.h"
+#include "media/cast/net/rtcp/rtcp_defines.h"
+#include "media/cast/net/rtcp/rtcp_receiver.h"
#include "testing/gmock/include/gmock/gmock.h"
namespace media {
@@ -21,7 +21,7 @@ class MockRtcpReceiverFeedback : public RtcpReceiverFeedback {
virtual ~MockRtcpReceiverFeedback();
MOCK_METHOD1(OnReceivedSenderReport,
- void(const transport::RtcpSenderInfo& remote_sender_info));
+ void(const RtcpSenderInfo& remote_sender_info));
MOCK_METHOD1(OnReceiverReferenceTimeReport,
void(const RtcpReceiverReferenceTimeReport& remote_time_report));
diff --git a/media/cast/rtcp/mock_rtcp_sender_feedback.cc b/media/cast/net/rtcp/mock_rtcp_sender_feedback.cc
index e44e0bfdef..2c51c7448d 100644
--- a/media/cast/rtcp/mock_rtcp_sender_feedback.cc
+++ b/media/cast/net/rtcp/mock_rtcp_sender_feedback.cc
@@ -1,8 +1,8 @@
-// Copyright 2013 The Chromium Authors. All rights reserved.
+// Copyright 2014 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "media/cast/rtcp/mock_rtcp_sender_feedback.h"
+#include "media/cast/net/rtcp/mock_rtcp_sender_feedback.h"
namespace media {
namespace cast {
diff --git a/media/cast/rtcp/mock_rtcp_sender_feedback.h b/media/cast/net/rtcp/mock_rtcp_sender_feedback.h
index 40547e6283..a6af0aaa3e 100644
--- a/media/cast/rtcp/mock_rtcp_sender_feedback.h
+++ b/media/cast/net/rtcp/mock_rtcp_sender_feedback.h
@@ -1,4 +1,4 @@
-// Copyright 2013 The Chromium Authors. All rights reserved.
+// Copyright 2014 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
@@ -7,7 +7,7 @@
#include <vector>
-#include "media/cast/rtcp/rtcp_receiver.h"
+#include "media/cast/net/rtcp/rtcp_receiver.h"
#include "testing/gmock/include/gmock/gmock.h"
namespace media {
diff --git a/media/cast/rtcp/receiver_rtcp_event_subscriber.cc b/media/cast/net/rtcp/receiver_rtcp_event_subscriber.cc
index 9a9c0aeeb7..a751ff94d3 100644
--- a/media/cast/rtcp/receiver_rtcp_event_subscriber.cc
+++ b/media/cast/net/rtcp/receiver_rtcp_event_subscriber.cc
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "media/cast/rtcp/receiver_rtcp_event_subscriber.h"
+#include "media/cast/net/rtcp/receiver_rtcp_event_subscriber.h"
#include <utility>
diff --git a/media/cast/rtcp/receiver_rtcp_event_subscriber.h b/media/cast/net/rtcp/receiver_rtcp_event_subscriber.h
index 84af7cbaf3..7e80ffebc6 100644
--- a/media/cast/rtcp/receiver_rtcp_event_subscriber.h
+++ b/media/cast/net/rtcp/receiver_rtcp_event_subscriber.h
@@ -10,7 +10,7 @@
#include "base/threading/thread_checker.h"
#include "media/cast/logging/logging_defines.h"
#include "media/cast/logging/raw_event_subscriber.h"
-#include "media/cast/rtcp/rtcp_defines.h"
+#include "media/cast/net/rtcp/rtcp_defines.h"
namespace media {
namespace cast {
diff --git a/media/cast/rtcp/receiver_rtcp_event_subscriber_unittest.cc b/media/cast/net/rtcp/receiver_rtcp_event_subscriber_unittest.cc
index e0d0f17216..8d975592f2 100644
--- a/media/cast/rtcp/receiver_rtcp_event_subscriber_unittest.cc
+++ b/media/cast/net/rtcp/receiver_rtcp_event_subscriber_unittest.cc
@@ -8,7 +8,7 @@
#include "base/time/tick_clock.h"
#include "media/cast/cast_environment.h"
#include "media/cast/logging/logging_defines.h"
-#include "media/cast/rtcp/receiver_rtcp_event_subscriber.h"
+#include "media/cast/net/rtcp/receiver_rtcp_event_subscriber.h"
#include "media/cast/test/fake_single_thread_task_runner.h"
#include "testing/gtest/include/gtest/gtest.h"
diff --git a/media/cast/rtcp/rtcp.cc b/media/cast/net/rtcp/rtcp.cc
index 480b2ac399..c0e6c19387 100644
--- a/media/cast/rtcp/rtcp.cc
+++ b/media/cast/net/rtcp/rtcp.cc
@@ -1,25 +1,26 @@
-// Copyright 2013 The Chromium Authors. All rights reserved.
+// Copyright 2014 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "media/cast/rtcp/rtcp.h"
+#include "media/cast/net/rtcp/rtcp.h"
#include "base/big_endian.h"
-#include "base/rand_util.h"
#include "media/cast/cast_config.h"
#include "media/cast/cast_defines.h"
#include "media/cast/cast_environment.h"
-#include "media/cast/rtcp/rtcp_defines.h"
-#include "media/cast/rtcp/rtcp_receiver.h"
-#include "media/cast/rtcp/rtcp_sender.h"
-#include "media/cast/rtcp/rtcp_utility.h"
-#include "media/cast/transport/cast_transport_defines.h"
+#include "media/cast/net/cast_transport_defines.h"
+#include "media/cast/net/rtcp/rtcp_defines.h"
+#include "media/cast/net/rtcp/rtcp_receiver.h"
+#include "media/cast/net/rtcp/rtcp_sender.h"
+#include "media/cast/net/rtcp/rtcp_utility.h"
+
+using base::TimeDelta;
namespace media {
namespace cast {
-static const int kMaxRttMs = 10000; // 10 seconds.
-static const int kMaxDelay = 2000;
+static const int32 kMaxRttMs = 10000; // 10 seconds.
+static const int32 kMaxDelayMs = 2000; // 2 seconds.
class LocalRtcpRttFeedback : public RtcpRttFeedback {
public:
@@ -43,7 +44,7 @@ class LocalRtcpReceiverFeedback : public RtcpReceiverFeedback {
: rtcp_(rtcp), cast_environment_(cast_environment) {}
virtual void OnReceivedSenderReport(
- const transport::RtcpSenderInfo& remote_sender_info) OVERRIDE {
+ const RtcpSenderInfo& remote_sender_info) OVERRIDE {
rtcp_->OnReceivedNtp(remote_sender_info.ntp_seconds,
remote_sender_info.ntp_fraction);
if (remote_sender_info.send_packet_count != 0) {
@@ -75,8 +76,8 @@ class LocalRtcpReceiverFeedback : public RtcpReceiverFeedback {
Rtcp::Rtcp(scoped_refptr<CastEnvironment> cast_environment,
RtcpSenderFeedback* sender_feedback,
- transport::CastTransportSender* const transport_sender,
- transport::PacedPacketSender* paced_packet_sender,
+ CastTransportSender* const transport_sender,
+ PacedPacketSender* paced_packet_sender,
RtpReceiverStatistics* rtp_receiver_statistics, RtcpMode rtcp_mode,
const base::TimeDelta& rtcp_interval, uint32 local_ssrc,
uint32 remote_ssrc, const std::string& c_name,
@@ -98,7 +99,7 @@ Rtcp::Rtcp(scoped_refptr<CastEnvironment> cast_environment,
local_clock_ahead_by_(ClockDriftSmoother::GetDefaultTimeConstant()),
lip_sync_rtp_timestamp_(0),
lip_sync_ntp_timestamp_(0),
- min_rtt_(base::TimeDelta::FromMilliseconds(kMaxRttMs)),
+ min_rtt_(TimeDelta::FromMilliseconds(kMaxRttMs)),
number_of_rtt_in_avg_(0) {
rtcp_receiver_.reset(new RtcpReceiver(cast_environment, sender_feedback,
receiver_feedback_.get(),
@@ -114,8 +115,8 @@ bool Rtcp::IsRtcpPacket(const uint8* packet, size_t length) {
if (length < kMinLengthOfRtcp) return false;
uint8 packet_type = packet[1];
- if (packet_type >= transport::kPacketTypeLow &&
- packet_type <= transport::kPacketTypeHigh) {
+ if (packet_type >= kPacketTypeLow &&
+ packet_type <= kPacketTypeHigh) {
return true;
}
return false;
@@ -156,23 +157,23 @@ void Rtcp::SendRtcpFromRtpReceiver(
uint32 packet_type_flags = 0;
base::TimeTicks now = cast_environment_->Clock()->NowTicks();
- transport::RtcpReportBlock report_block;
+ RtcpReportBlock report_block;
RtcpReceiverReferenceTimeReport rrtr;
// Attach our NTP to all RTCP packets; with this information a "smart" sender
// can make decisions based on how old the RTCP message is.
- packet_type_flags |= transport::kRtcpRrtr;
+ packet_type_flags |= kRtcpRrtr;
ConvertTimeTicksToNtp(now, &rrtr.ntp_seconds, &rrtr.ntp_fraction);
SaveLastSentNtpTime(now, rrtr.ntp_seconds, rrtr.ntp_fraction);
if (cast_message) {
- packet_type_flags |= transport::kRtcpCast;
+ packet_type_flags |= kRtcpCast;
}
if (rtcp_events) {
- packet_type_flags |= transport::kRtcpReceiverLog;
+ packet_type_flags |= kRtcpReceiverLog;
}
if (rtcp_mode_ == kRtcpCompound || now >= next_time_to_send_rtcp_) {
- packet_type_flags |= transport::kRtcpRr;
+ packet_type_flags |= kRtcpRr;
report_block.remote_ssrc = 0; // Not needed to set send side.
report_block.media_ssrc = remote_ssrc_; // SSRC of the RTP packet sender.
@@ -201,13 +202,13 @@ void Rtcp::SendRtcpFromRtpReceiver(
&rrtr,
cast_message,
rtcp_events,
- target_delay_ms_);
+ target_delay_);
}
void Rtcp::SendRtcpFromRtpSender(base::TimeTicks current_time,
uint32 current_time_as_rtp_timestamp) {
DCHECK(transport_sender_);
- uint32 packet_type_flags = transport::kRtcpSr;
+ uint32 packet_type_flags = kRtcpSr;
uint32 current_ntp_seconds = 0;
uint32 current_ntp_fractions = 0;
ConvertTimeTicksToNtp(current_time, &current_ntp_seconds,
@@ -215,9 +216,9 @@ void Rtcp::SendRtcpFromRtpSender(base::TimeTicks current_time,
SaveLastSentNtpTime(current_time, current_ntp_seconds,
current_ntp_fractions);
- transport::RtcpDlrrReportBlock dlrr;
+ RtcpDlrrReportBlock dlrr;
if (!time_last_report_received_.is_null()) {
- packet_type_flags |= transport::kRtcpDlrr;
+ packet_type_flags |= kRtcpDlrr;
dlrr.last_rr = last_report_truncated_ntp_;
uint32 delay_seconds = 0;
uint32 delay_fraction = 0;
@@ -302,8 +303,8 @@ void Rtcp::SetCastReceiverEventHistorySize(size_t size) {
}
void Rtcp::SetTargetDelay(base::TimeDelta target_delay) {
- DCHECK(target_delay.InMilliseconds() < kMaxDelay);
- target_delay_ms_ = static_cast<uint16>(target_delay.InMilliseconds());
+ DCHECK(target_delay < TimeDelta::FromMilliseconds(kMaxDelayMs));
+ target_delay_ = target_delay;
}
void Rtcp::OnReceivedDelaySinceLastReport(uint32 receivers_ssrc,
@@ -331,7 +332,7 @@ void Rtcp::SaveLastSentNtpTime(const base::TimeTicks& now,
last_reports_sent_map_[last_report] = now;
last_reports_sent_queue_.push(std::make_pair(last_report, now));
- base::TimeTicks timeout = now - base::TimeDelta::FromMilliseconds(kMaxRttMs);
+ base::TimeTicks timeout = now - TimeDelta::FromMilliseconds(kMaxRttMs);
// Cleanup old statistics older than |timeout|.
while (!last_reports_sent_queue_.empty()) {
@@ -358,11 +359,12 @@ void Rtcp::UpdateRtt(const base::TimeDelta& sender_delay,
// TODO(miu): Replace "average for all time" with an EWMA, or suitable
// "average over recent past" mechanism.
if (number_of_rtt_in_avg_ != 0) {
- const double ac = static_cast<double>(number_of_rtt_in_avg_);
- avg_rtt_ms_ = ((ac / (ac + 1.0)) * avg_rtt_ms_) +
- ((1.0 / (ac + 1.0)) * rtt.InMillisecondsF());
+ // Integer math equivalent of (ac/(ac+1.0))*avg_rtt_ + (1.0/(ac+1.0))*rtt).
+ // (TimeDelta only supports math with other TimeDeltas and int64s.)
+ avg_rtt_ = (avg_rtt_ * number_of_rtt_in_avg_ + rtt) /
+ (number_of_rtt_in_avg_ + 1);
} else {
- avg_rtt_ms_ = rtt.InMillisecondsF();
+ avg_rtt_ = rtt;
}
number_of_rtt_in_avg_++;
}
@@ -377,19 +379,15 @@ bool Rtcp::Rtt(base::TimeDelta* rtt, base::TimeDelta* avg_rtt,
if (number_of_rtt_in_avg_ == 0) return false;
*rtt = rtt_;
- *avg_rtt = base::TimeDelta::FromMillisecondsD(avg_rtt_ms_);
+ *avg_rtt = avg_rtt_;
*min_rtt = min_rtt_;
*max_rtt = max_rtt_;
return true;
}
void Rtcp::UpdateNextTimeToSendRtcp() {
- int random = base::RandInt(0, 999);
- base::TimeDelta time_to_next =
- (rtcp_interval_ / 2) + (rtcp_interval_ * random / 1000);
-
base::TimeTicks now = cast_environment_->Clock()->NowTicks();
- next_time_to_send_rtcp_ = now + time_to_next;
+ next_time_to_send_rtcp_ = now + rtcp_interval_;
}
void Rtcp::OnReceivedReceiverLog(const RtcpReceiverLogMessage& receiver_log) {
diff --git a/media/cast/rtcp/rtcp.h b/media/cast/net/rtcp/rtcp.h
index 9d0184f903..2bf0367185 100644
--- a/media/cast/rtcp/rtcp.h
+++ b/media/cast/net/rtcp/rtcp.h
@@ -1,4 +1,4 @@
-// Copyright 2013 The Chromium Authors. All rights reserved.
+// Copyright 2014 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
@@ -13,15 +13,15 @@
#include "base/memory/scoped_ptr.h"
#include "base/time/tick_clock.h"
#include "base/time/time.h"
-#include "media/cast/base/clock_drift_smoother.h"
#include "media/cast/cast_config.h"
#include "media/cast/cast_defines.h"
#include "media/cast/cast_environment.h"
-#include "media/cast/rtcp/receiver_rtcp_event_subscriber.h"
-#include "media/cast/rtcp/rtcp_defines.h"
-#include "media/cast/transport/cast_transport_defines.h"
-#include "media/cast/transport/cast_transport_sender.h"
-#include "media/cast/transport/pacing/paced_sender.h"
+#include "media/cast/common/clock_drift_smoother.h"
+#include "media/cast/net/cast_transport_defines.h"
+#include "media/cast/net/cast_transport_sender.h"
+#include "media/cast/net/pacing/paced_sender.h"
+#include "media/cast/net/rtcp/receiver_rtcp_event_subscriber.h"
+#include "media/cast/net/rtcp/rtcp_defines.h"
namespace media {
namespace cast {
@@ -60,8 +60,8 @@ class Rtcp {
// be used by the Cast receivers and test applications.
Rtcp(scoped_refptr<CastEnvironment> cast_environment,
RtcpSenderFeedback* sender_feedback,
- transport::CastTransportSender* const transport_sender, // Send-side.
- transport::PacedPacketSender* paced_packet_sender, // Receive side.
+ CastTransportSender* const transport_sender, // Send-side.
+ PacedPacketSender* paced_packet_sender, // Receive side.
RtpReceiverStatistics* rtp_receiver_statistics,
RtcpMode rtcp_mode,
const base::TimeDelta& rtcp_interval,
@@ -150,7 +150,7 @@ class Rtcp {
uint32 last_ntp_fraction);
scoped_refptr<CastEnvironment> cast_environment_;
- transport::CastTransportSender* const transport_sender_;
+ CastTransportSender* const transport_sender_;
const base::TimeDelta rtcp_interval_;
const RtcpMode rtcp_mode_;
const uint32 local_ssrc_;
@@ -194,8 +194,8 @@ class Rtcp {
base::TimeDelta min_rtt_;
base::TimeDelta max_rtt_;
int number_of_rtt_in_avg_;
- double avg_rtt_ms_;
- uint16 target_delay_ms_;
+ base::TimeDelta avg_rtt_;
+ base::TimeDelta target_delay_;
DISALLOW_COPY_AND_ASSIGN(Rtcp);
};
diff --git a/media/cast/transport/rtcp/rtcp_builder.cc b/media/cast/net/rtcp/rtcp_builder.cc
index b8875fc96b..a5673a33f2 100644
--- a/media/cast/transport/rtcp/rtcp_builder.cc
+++ b/media/cast/net/rtcp/rtcp_builder.cc
@@ -1,8 +1,8 @@
-// Copyright 2013 The Chromium Authors. All rights reserved.
+// Copyright 2014 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "media/cast/transport/rtcp/rtcp_builder.h"
+#include "media/cast/net/rtcp/rtcp_builder.h"
#include <algorithm>
#include <string>
@@ -10,12 +10,11 @@
#include "base/big_endian.h"
#include "base/logging.h"
-#include "media/cast/transport/cast_transport_defines.h"
-#include "media/cast/transport/pacing/paced_sender.h"
+#include "media/cast/net/cast_transport_defines.h"
+#include "media/cast/net/pacing/paced_sender.h"
namespace media {
namespace cast {
-namespace transport {
RtcpBuilder::RtcpBuilder(PacedSender* const outgoing_transport)
: transport_(outgoing_transport),
@@ -192,6 +191,5 @@ bool RtcpBuilder::BuildDlrrRb(const RtcpDlrrReportBlock& dlrr,
return true;
}
-} // namespace transport
} // namespace cast
} // namespace media
diff --git a/media/cast/transport/rtcp/rtcp_builder.h b/media/cast/net/rtcp/rtcp_builder.h
index f095ae9ee5..1a9b169be1 100644
--- a/media/cast/transport/rtcp/rtcp_builder.h
+++ b/media/cast/net/rtcp/rtcp_builder.h
@@ -1,20 +1,19 @@
-// Copyright 2013 The Chromium Authors. All rights reserved.
+// Copyright 2014 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef MEDIA_CAST_TRANSPORT_RTCP_RTCP_BUILDER_H_
-#define MEDIA_CAST_TRANSPORT_RTCP_RTCP_BUILDER_H_
+#ifndef MEDIA_CAST_NET_RTCP_RTCP_BUILDER_H_
+#define MEDIA_CAST_NET_RTCP_RTCP_BUILDER_H_
#include <list>
#include <string>
#include <vector>
-#include "media/cast/transport/cast_transport_defines.h"
-#include "media/cast/transport/pacing/paced_sender.h"
+#include "media/cast/net/cast_transport_defines.h"
+#include "media/cast/net/pacing/paced_sender.h"
namespace media {
namespace cast {
-namespace transport {
class RtcpBuilder {
public:
@@ -42,8 +41,7 @@ class RtcpBuilder {
DISALLOW_COPY_AND_ASSIGN(RtcpBuilder);
};
-} // namespace transport
} // namespace cast
} // namespace media
-#endif // MEDIA_CAST_TRANSPORT_RTCP_RTCP_BUILDER_H_
+#endif // MEDIA_CAST_NET_RTCP_RTCP_BUILDER_H_
diff --git a/media/cast/transport/rtcp/rtcp_builder_unittest.cc b/media/cast/net/rtcp/rtcp_builder_unittest.cc
index 0322612f27..1c471f5176 100644
--- a/media/cast/transport/rtcp/rtcp_builder_unittest.cc
+++ b/media/cast/net/rtcp/rtcp_builder_unittest.cc
@@ -1,4 +1,4 @@
-// Copyright 2013 The Chromium Authors. All rights reserved.
+// Copyright 2014 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
@@ -6,11 +6,11 @@
#include "base/test/simple_test_tick_clock.h"
#include "media/cast/cast_defines.h"
#include "media/cast/cast_environment.h"
-#include "media/cast/rtcp/rtcp_utility.h"
-#include "media/cast/rtcp/test_rtcp_packet_builder.h"
+#include "media/cast/net/pacing/paced_sender.h"
+#include "media/cast/net/rtcp/rtcp_builder.h"
+#include "media/cast/net/rtcp/rtcp_utility.h"
+#include "media/cast/net/rtcp/test_rtcp_packet_builder.h"
#include "media/cast/test/fake_single_thread_task_runner.h"
-#include "media/cast/transport/pacing/paced_sender.h"
-#include "media/cast/transport/rtcp/rtcp_builder.h"
#include "testing/gmock/include/gmock/gmock.h"
namespace media {
diff --git a/media/cast/rtcp/rtcp_defines.cc b/media/cast/net/rtcp/rtcp_defines.cc
index 214100d4d9..63799a92df 100644
--- a/media/cast/rtcp/rtcp_defines.cc
+++ b/media/cast/net/rtcp/rtcp_defines.cc
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "media/cast/rtcp/rtcp_defines.h"
+#include "media/cast/net/rtcp/rtcp_defines.h"
#include "media/cast/logging/logging_defines.h"
diff --git a/media/cast/rtcp/rtcp_defines.h b/media/cast/net/rtcp/rtcp_defines.h
index 31795648c6..28f69e1835 100644
--- a/media/cast/rtcp/rtcp_defines.h
+++ b/media/cast/net/rtcp/rtcp_defines.h
@@ -1,4 +1,4 @@
-// Copyright 2013 The Chromium Authors. All rights reserved.
+// Copyright 2014 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
@@ -11,7 +11,7 @@
#include "media/cast/cast_config.h"
#include "media/cast/cast_defines.h"
#include "media/cast/logging/logging_defines.h"
-#include "media/cast/transport/cast_transport_defines.h"
+#include "media/cast/net/cast_transport_defines.h"
namespace media {
namespace cast {
diff --git a/media/cast/rtcp/rtcp_receiver.cc b/media/cast/net/rtcp/rtcp_receiver.cc
index 3be8e921c4..99fd9178ae 100644
--- a/media/cast/rtcp/rtcp_receiver.cc
+++ b/media/cast/net/rtcp/rtcp_receiver.cc
@@ -1,12 +1,12 @@
-// Copyright 2013 The Chromium Authors. All rights reserved.
+// Copyright 2014 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "media/cast/rtcp/rtcp_receiver.h"
+#include "media/cast/net/rtcp/rtcp_receiver.h"
#include "base/logging.h"
-#include "media/cast/rtcp/rtcp_utility.h"
-#include "media/cast/transport/cast_transport_defines.h"
+#include "media/cast/net/cast_transport_defines.h"
+#include "media/cast/net/rtcp/rtcp_utility.h"
namespace {
@@ -135,7 +135,7 @@ void RtcpReceiver::HandleSenderReport(RtcpParser* rtcp_parser) {
VLOG(2) << "Cast RTCP received SR from SSRC " << remote_ssrc;
if (remote_ssrc_ == remote_ssrc) {
- transport::RtcpSenderInfo remote_sender_info;
+ RtcpSenderInfo remote_sender_info;
remote_sender_info.ntp_seconds =
rtcp_field.sender_report.ntp_most_significant;
remote_sender_info.ntp_fraction =
@@ -193,7 +193,7 @@ void RtcpReceiver::HandleReportBlock(const RtcpField* rtcp_field,
}
VLOG(2) << "Cast RTCP received RB from SSRC " << remote_ssrc;
- transport::RtcpReportBlock report_block;
+ RtcpReportBlock report_block;
report_block.remote_ssrc = remote_ssrc;
report_block.media_ssrc = rb.ssrc;
report_block.fraction_lost = rb.fraction_lost;
diff --git a/media/cast/rtcp/rtcp_receiver.h b/media/cast/net/rtcp/rtcp_receiver.h
index d3cef9e57b..aea45846a2 100644
--- a/media/cast/rtcp/rtcp_receiver.h
+++ b/media/cast/net/rtcp/rtcp_receiver.h
@@ -1,4 +1,4 @@
-// Copyright 2013 The Chromium Authors. All rights reserved.
+// Copyright 2014 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
@@ -8,10 +8,10 @@
#include <queue>
#include "base/containers/hash_tables.h"
-#include "media/cast/rtcp/rtcp.h"
-#include "media/cast/rtcp/rtcp_defines.h"
-#include "media/cast/rtcp/rtcp_utility.h"
-#include "media/cast/transport/cast_transport_defines.h"
+#include "media/cast/net/cast_transport_defines.h"
+#include "media/cast/net/rtcp/rtcp.h"
+#include "media/cast/net/rtcp/rtcp_defines.h"
+#include "media/cast/net/rtcp/rtcp_utility.h"
namespace media {
namespace cast {
@@ -19,7 +19,7 @@ namespace cast {
class RtcpReceiverFeedback {
public:
virtual void OnReceivedSenderReport(
- const transport::RtcpSenderInfo& remote_sender_info) = 0;
+ const RtcpSenderInfo& remote_sender_info) = 0;
virtual void OnReceiverReferenceTimeReport(
const RtcpReceiverReferenceTimeReport& remote_time_report) = 0;
@@ -116,7 +116,7 @@ class RtcpReceiver {
RtcpRttFeedback* const rtt_feedback_;
scoped_refptr<CastEnvironment> cast_environment_;
- transport::FrameIdWrapHelper ack_frame_id_wrap_helper_;
+ FrameIdWrapHelper ack_frame_id_wrap_helper_;
// Maintains a history of receiver events.
size_t receiver_event_history_size_;
diff --git a/media/cast/rtcp/rtcp_receiver_unittest.cc b/media/cast/net/rtcp/rtcp_receiver_unittest.cc
index 51026d1554..f4010cb36b 100644
--- a/media/cast/rtcp/rtcp_receiver_unittest.cc
+++ b/media/cast/net/rtcp/rtcp_receiver_unittest.cc
@@ -1,17 +1,17 @@
-// Copyright 2013 The Chromium Authors. All rights reserved.
+// Copyright 2014 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/memory/scoped_ptr.h"
#include "base/test/simple_test_tick_clock.h"
#include "media/cast/cast_environment.h"
-#include "media/cast/rtcp/mock_rtcp_receiver_feedback.h"
-#include "media/cast/rtcp/mock_rtcp_sender_feedback.h"
-#include "media/cast/rtcp/rtcp_receiver.h"
-#include "media/cast/rtcp/rtcp_utility.h"
-#include "media/cast/rtcp/test_rtcp_packet_builder.h"
+#include "media/cast/net/cast_transport_defines.h"
+#include "media/cast/net/rtcp/mock_rtcp_receiver_feedback.h"
+#include "media/cast/net/rtcp/mock_rtcp_sender_feedback.h"
+#include "media/cast/net/rtcp/rtcp_receiver.h"
+#include "media/cast/net/rtcp/rtcp_utility.h"
+#include "media/cast/net/rtcp/test_rtcp_packet_builder.h"
#include "media/cast/test/fake_single_thread_task_runner.h"
-#include "media/cast/transport/cast_transport_defines.h"
#include "testing/gmock/include/gmock/gmock.h"
namespace media {
@@ -22,7 +22,8 @@ using testing::_;
static const uint32 kSenderSsrc = 0x10203;
static const uint32 kSourceSsrc = 0x40506;
static const uint32 kUnknownSsrc = 0xDEAD;
-static const uint16 kTargetDelayMs = 100;
+static const base::TimeDelta kTargetDelay =
+ base::TimeDelta::FromMilliseconds(100);
static const std::string kCName("test@10.1.1.1");
namespace {
@@ -72,7 +73,7 @@ class RtcpReceiverCastLogVerification : public RtcpReceiverFeedback {
called_on_received_receiver_log_(false) {}
virtual void OnReceivedSenderReport(
- const transport::RtcpSenderInfo& remote_sender_info) OVERRIDE{};
+ const RtcpSenderInfo& remote_sender_info) OVERRIDE{};
virtual void OnReceiverReferenceTimeReport(
const RtcpReceiverReferenceTimeReport& remote_time_report) OVERRIDE{};
@@ -188,8 +189,8 @@ class RtcpReceiverTest : public ::testing::Test {
MockRtcpRttFeedback mock_rtt_feedback_;
MockRtcpSenderFeedback mock_sender_feedback_;
scoped_ptr<RtcpReceiver> rtcp_receiver_;
- transport::RtcpSenderInfo expected_sender_info_;
- transport::RtcpReportBlock expected_report_block_;
+ RtcpSenderInfo expected_sender_info_;
+ RtcpReportBlock expected_report_block_;
RtcpReceiverReferenceTimeReport expected_receiver_reference_report_;
DISALLOW_COPY_AND_ASSIGN(RtcpReceiverTest);
@@ -374,7 +375,7 @@ TEST_F(RtcpReceiverTest, InjectReceiverReportPacketWithCastFeedback) {
TestRtcpPacketBuilder p1;
p1.AddRr(kSenderSsrc, 1);
p1.AddRb(kUnknownSsrc);
- p1.AddCast(kSenderSsrc, kUnknownSsrc, kTargetDelayMs);
+ p1.AddCast(kSenderSsrc, kUnknownSsrc, kTargetDelay);
// Expected to be ignored since the source ssrc does not match our
// local ssrc.
@@ -391,7 +392,7 @@ TEST_F(RtcpReceiverTest, InjectReceiverReportPacketWithCastFeedback) {
TestRtcpPacketBuilder p2;
p2.AddRr(kSenderSsrc, 1);
p2.AddRb(kSourceSsrc);
- p2.AddCast(kSenderSsrc, kSourceSsrc, kTargetDelayMs);
+ p2.AddCast(kSenderSsrc, kSourceSsrc, kTargetDelay);
// Expected to be pass through since the sender ssrc match our local ssrc.
InjectRtcpPacket(p2.Data(), p2.Length());
@@ -415,7 +416,7 @@ TEST_F(RtcpReceiverTest, InjectReceiverReportPacketWithCastVerification) {
TestRtcpPacketBuilder p;
p.AddRr(kSenderSsrc, 1);
p.AddRb(kSourceSsrc);
- p.AddCast(kSenderSsrc, kSourceSsrc, kTargetDelayMs);
+ p.AddCast(kSenderSsrc, kSourceSsrc, kTargetDelay);
// Expected to be pass through since the sender ssrc match our local ssrc.
RtcpParser rtcp_parser(p.Data(), p.Length());
diff --git a/media/cast/rtcp/rtcp_sender.cc b/media/cast/net/rtcp/rtcp_sender.cc
index bf7d30c84c..a44f87ebb0 100644
--- a/media/cast/rtcp/rtcp_sender.cc
+++ b/media/cast/net/rtcp/rtcp_sender.cc
@@ -1,8 +1,8 @@
-// Copyright 2013 The Chromium Authors. All rights reserved.
+// Copyright 2014 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "media/cast/rtcp/rtcp_sender.h"
+#include "media/cast/net/rtcp/rtcp_sender.h"
#include <stdint.h>
@@ -12,10 +12,10 @@
#include "base/big_endian.h"
#include "base/logging.h"
#include "media/cast/cast_environment.h"
-#include "media/cast/rtcp/rtcp_defines.h"
-#include "media/cast/rtcp/rtcp_utility.h"
-#include "media/cast/transport/cast_transport_defines.h"
-#include "media/cast/transport/pacing/paced_sender.h"
+#include "media/cast/net/cast_transport_defines.h"
+#include "media/cast/net/pacing/paced_sender.h"
+#include "media/cast/net/rtcp/rtcp_defines.h"
+#include "media/cast/net/rtcp/rtcp_utility.h"
namespace media {
namespace cast {
@@ -148,7 +148,7 @@ class NackStringBuilder {
// TODO(mikhal): This is only used by the receiver. Consider renaming.
RtcpSender::RtcpSender(scoped_refptr<CastEnvironment> cast_environment,
- transport::PacedPacketSender* outgoing_transport,
+ PacedPacketSender* outgoing_transport,
uint32 sending_ssrc,
const std::string& c_name)
: ssrc_(sending_ssrc),
@@ -162,44 +162,44 @@ RtcpSender::~RtcpSender() {}
void RtcpSender::SendRtcpFromRtpReceiver(
uint32 packet_type_flags,
- const transport::RtcpReportBlock* report_block,
+ const RtcpReportBlock* report_block,
const RtcpReceiverReferenceTimeReport* rrtr,
const RtcpCastMessage* cast_message,
const ReceiverRtcpEventSubscriber::RtcpEventMultiMap* rtcp_events,
- uint16 target_delay_ms) {
- if (packet_type_flags & transport::kRtcpSr ||
- packet_type_flags & transport::kRtcpDlrr ||
- packet_type_flags & transport::kRtcpSenderLog) {
+ base::TimeDelta target_delay) {
+ if (packet_type_flags & kRtcpSr ||
+ packet_type_flags & kRtcpDlrr ||
+ packet_type_flags & kRtcpSenderLog) {
NOTREACHED() << "Invalid argument";
}
- if (packet_type_flags & transport::kRtcpPli ||
- packet_type_flags & transport::kRtcpRpsi ||
- packet_type_flags & transport::kRtcpRemb ||
- packet_type_flags & transport::kRtcpNack) {
+ if (packet_type_flags & kRtcpPli ||
+ packet_type_flags & kRtcpRpsi ||
+ packet_type_flags & kRtcpRemb ||
+ packet_type_flags & kRtcpNack) {
// Implement these for webrtc interop.
NOTIMPLEMENTED();
}
- transport::PacketRef packet(new base::RefCountedData<Packet>);
+ PacketRef packet(new base::RefCountedData<Packet>);
packet->data.reserve(kMaxIpPacketSize);
- if (packet_type_flags & transport::kRtcpRr) {
+ if (packet_type_flags & kRtcpRr) {
BuildRR(report_block, &packet->data);
if (!c_name_.empty()) {
BuildSdec(&packet->data);
}
}
- if (packet_type_flags & transport::kRtcpBye) {
+ if (packet_type_flags & kRtcpBye) {
BuildBye(&packet->data);
}
- if (packet_type_flags & transport::kRtcpRrtr) {
+ if (packet_type_flags & kRtcpRrtr) {
DCHECK(rrtr) << "Invalid argument";
BuildRrtr(rrtr, &packet->data);
}
- if (packet_type_flags & transport::kRtcpCast) {
+ if (packet_type_flags & kRtcpCast) {
DCHECK(cast_message) << "Invalid argument";
- BuildCast(cast_message, target_delay_ms, &packet->data);
+ BuildCast(cast_message, target_delay, &packet->data);
}
- if (packet_type_flags & transport::kRtcpReceiverLog) {
+ if (packet_type_flags & kRtcpReceiverLog) {
DCHECK(rtcp_events) << "Invalid argument";
BuildReceiverLog(*rtcp_events, &packet->data);
}
@@ -210,7 +210,7 @@ void RtcpSender::SendRtcpFromRtpReceiver(
transport_->SendRtcpPacket(ssrc_, packet);
}
-void RtcpSender::BuildRR(const transport::RtcpReportBlock* report_block,
+void RtcpSender::BuildRR(const RtcpReportBlock* report_block,
Packet* packet) const {
size_t start_size = packet->size();
DCHECK_LT(start_size + 32, kMaxIpPacketSize) << "Not enough buffer space";
@@ -223,7 +223,7 @@ void RtcpSender::BuildRR(const transport::RtcpReportBlock* report_block,
base::BigEndianWriter big_endian_writer(
reinterpret_cast<char*>(&((*packet)[start_size])), 8);
big_endian_writer.WriteU8(0x80 + (report_block ? 1 : 0));
- big_endian_writer.WriteU8(transport::kPacketTypeReceiverReport);
+ big_endian_writer.WriteU8(kPacketTypeReceiverReport);
big_endian_writer.WriteU16(number_of_rows);
big_endian_writer.WriteU32(ssrc_);
@@ -232,7 +232,7 @@ void RtcpSender::BuildRR(const transport::RtcpReportBlock* report_block,
}
}
-void RtcpSender::AddReportBlocks(const transport::RtcpReportBlock& report_block,
+void RtcpSender::AddReportBlocks(const RtcpReportBlock& report_block,
Packet* packet) const {
size_t start_size = packet->size();
DCHECK_LT(start_size + 24, kMaxIpPacketSize) << "Not enough buffer space";
@@ -276,7 +276,7 @@ void RtcpSender::BuildSdec(Packet* packet) const {
reinterpret_cast<char*>(&((*packet)[start_size])), 10);
// We always need to add one SDES CNAME.
big_endian_writer.WriteU8(0x80 + 1);
- big_endian_writer.WriteU8(transport::kPacketTypeSdes);
+ big_endian_writer.WriteU8(kPacketTypeSdes);
// Handle SDES length later on.
uint32 sdes_length_position = static_cast<uint32>(start_size) + 3;
@@ -319,7 +319,7 @@ void RtcpSender::BuildPli(uint32 remote_ssrc, Packet* packet) const {
reinterpret_cast<char*>(&((*packet)[start_size])), 12);
uint8 FMT = 1; // Picture loss indicator.
big_endian_writer.WriteU8(0x80 + FMT);
- big_endian_writer.WriteU8(transport::kPacketTypePayloadSpecific);
+ big_endian_writer.WriteU8(kPacketTypePayloadSpecific);
big_endian_writer.WriteU16(2); // Used fixed length of 2.
big_endian_writer.WriteU32(ssrc_); // Add our own SSRC.
big_endian_writer.WriteU32(remote_ssrc); // Add the remote SSRC.
@@ -346,7 +346,7 @@ void RtcpSender::BuildRpsi(const RtcpRpsiMessage* rpsi, Packet* packet) const {
reinterpret_cast<char*>(&((*packet)[start_size])), 24);
uint8 FMT = 3; // Reference Picture Selection Indication.
big_endian_writer.WriteU8(0x80 + FMT);
- big_endian_writer.WriteU8(transport::kPacketTypePayloadSpecific);
+ big_endian_writer.WriteU8(kPacketTypePayloadSpecific);
// Calculate length.
uint32 bits_required = 7;
@@ -404,7 +404,7 @@ void RtcpSender::BuildRemb(const RtcpRembMessage* remb, Packet* packet) const {
// Add application layer feedback.
uint8 FMT = 15;
big_endian_writer.WriteU8(0x80 + FMT);
- big_endian_writer.WriteU8(transport::kPacketTypePayloadSpecific);
+ big_endian_writer.WriteU8(kPacketTypePayloadSpecific);
big_endian_writer.WriteU8(0);
big_endian_writer.WriteU8(static_cast<uint8>(remb->remb_ssrcs.size() + 4));
big_endian_writer.WriteU32(ssrc_); // Add our own SSRC.
@@ -442,7 +442,7 @@ void RtcpSender::BuildNack(const RtcpNackMessage* nack, Packet* packet) const {
uint8 FMT = 1;
big_endian_writer.WriteU8(0x80 + FMT);
- big_endian_writer.WriteU8(transport::kPacketTypeGenericRtpFeedback);
+ big_endian_writer.WriteU8(kPacketTypeGenericRtpFeedback);
big_endian_writer.WriteU8(0);
size_t nack_size_pos = start_size + 3;
big_endian_writer.WriteU8(3);
@@ -498,7 +498,7 @@ void RtcpSender::BuildBye(Packet* packet) const {
base::BigEndianWriter big_endian_writer(
reinterpret_cast<char*>(&((*packet)[start_size])), 8);
big_endian_writer.WriteU8(0x80 + 1);
- big_endian_writer.WriteU8(transport::kPacketTypeBye);
+ big_endian_writer.WriteU8(kPacketTypeBye);
big_endian_writer.WriteU16(1); // Length.
big_endian_writer.WriteU32(ssrc_); // Add our own SSRC.
}
@@ -516,7 +516,7 @@ void RtcpSender::BuildRrtr(const RtcpReceiverReferenceTimeReport* rrtr,
reinterpret_cast<char*>(&((*packet)[start_size])), 20);
big_endian_writer.WriteU8(0x80);
- big_endian_writer.WriteU8(transport::kPacketTypeXr);
+ big_endian_writer.WriteU8(kPacketTypeXr);
big_endian_writer.WriteU16(4); // Length.
big_endian_writer.WriteU32(ssrc_); // Add our own SSRC.
big_endian_writer.WriteU8(4); // Add block type.
@@ -529,7 +529,7 @@ void RtcpSender::BuildRrtr(const RtcpReceiverReferenceTimeReport* rrtr,
}
void RtcpSender::BuildCast(const RtcpCastMessage* cast,
- uint16 target_delay_ms,
+ base::TimeDelta target_delay,
Packet* packet) const {
size_t start_size = packet->size();
DCHECK_LT(start_size + 20, kMaxIpPacketSize) << "Not enough buffer space";
@@ -542,7 +542,7 @@ void RtcpSender::BuildCast(const RtcpCastMessage* cast,
reinterpret_cast<char*>(&((*packet)[start_size])), 20);
uint8 FMT = 15; // Application layer feedback.
big_endian_writer.WriteU8(0x80 + FMT);
- big_endian_writer.WriteU8(transport::kPacketTypePayloadSpecific);
+ big_endian_writer.WriteU8(kPacketTypePayloadSpecific);
big_endian_writer.WriteU8(0);
size_t cast_size_pos = start_size + 3; // Save length position.
big_endian_writer.WriteU8(4);
@@ -552,7 +552,9 @@ void RtcpSender::BuildCast(const RtcpCastMessage* cast,
big_endian_writer.WriteU8(static_cast<uint8>(cast->ack_frame_id_));
size_t cast_loss_field_pos = start_size + 17; // Save loss field position.
big_endian_writer.WriteU8(0); // Overwritten with number_of_loss_fields.
- big_endian_writer.WriteU16(target_delay_ms);
+ DCHECK_LE(target_delay.InMilliseconds(),
+ std::numeric_limits<uint16_t>::max());
+ big_endian_writer.WriteU16(target_delay.InMilliseconds());
size_t number_of_loss_fields = 0;
size_t max_number_of_loss_fields = std::min<size_t>(
@@ -641,7 +643,7 @@ void RtcpSender::BuildReceiverLog(
base::BigEndianWriter big_endian_writer(
reinterpret_cast<char*>(&((*packet)[packet_start_size])), rtcp_log_size);
big_endian_writer.WriteU8(0x80 + kReceiverLogSubtype);
- big_endian_writer.WriteU8(transport::kPacketTypeApplicationDefined);
+ big_endian_writer.WriteU8(kPacketTypeApplicationDefined);
big_endian_writer.WriteU16(static_cast<uint16>(
2 + 2 * number_of_frames + total_number_of_messages_to_send));
big_endian_writer.WriteU32(ssrc_); // Add our own SSRC.
diff --git a/media/cast/rtcp/rtcp_sender.h b/media/cast/net/rtcp/rtcp_sender.h
index f09a4fb0e5..fe5af0dcd3 100644
--- a/media/cast/rtcp/rtcp_sender.h
+++ b/media/cast/net/rtcp/rtcp_sender.h
@@ -1,4 +1,4 @@
-// Copyright 2013 The Chromium Authors. All rights reserved.
+// Copyright 2014 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
@@ -11,11 +11,11 @@
#include "media/cast/cast_config.h"
#include "media/cast/cast_defines.h"
-#include "media/cast/rtcp/receiver_rtcp_event_subscriber.h"
-#include "media/cast/rtcp/rtcp.h"
-#include "media/cast/rtcp/rtcp_defines.h"
-#include "media/cast/transport/cast_transport_defines.h"
-#include "media/cast/transport/rtcp/rtcp_builder.h"
+#include "media/cast/net/cast_transport_defines.h"
+#include "media/cast/net/rtcp/receiver_rtcp_event_subscriber.h"
+#include "media/cast/net/rtcp/rtcp.h"
+#include "media/cast/net/rtcp/rtcp_builder.h"
+#include "media/cast/net/rtcp/rtcp_defines.h"
namespace media {
namespace cast {
@@ -44,7 +44,7 @@ COMPILE_ASSERT(kSecondRedundancyOffset >
class RtcpSender {
public:
RtcpSender(scoped_refptr<CastEnvironment> cast_environment,
- transport::PacedPacketSender* outgoing_transport,
+ PacedPacketSender* outgoing_transport,
uint32 sending_ssrc,
const std::string& c_name);
@@ -52,17 +52,17 @@ class RtcpSender {
void SendRtcpFromRtpReceiver(
uint32 packet_type_flags,
- const transport::RtcpReportBlock* report_block,
+ const RtcpReportBlock* report_block,
const RtcpReceiverReferenceTimeReport* rrtr,
const RtcpCastMessage* cast_message,
const ReceiverRtcpEventSubscriber::RtcpEventMultiMap* rtcp_events,
- uint16 target_delay_ms);
+ base::TimeDelta target_delay);
private:
- void BuildRR(const transport::RtcpReportBlock* report_block,
+ void BuildRR(const RtcpReportBlock* report_block,
Packet* packet) const;
- void AddReportBlocks(const transport::RtcpReportBlock& report_block,
+ void AddReportBlocks(const RtcpReportBlock& report_block,
Packet* packet) const;
void BuildSdec(Packet* packet) const;
@@ -81,7 +81,7 @@ class RtcpSender {
Packet* packet) const;
void BuildCast(const RtcpCastMessage* cast_message,
- uint16 target_delay_ms,
+ base::TimeDelta target_delay,
Packet* packet) const;
void BuildReceiverLog(
@@ -114,7 +114,7 @@ class RtcpSender {
const std::string c_name_;
// Not owned by this class.
- transport::PacedPacketSender* const transport_;
+ PacedPacketSender* const transport_;
scoped_refptr<CastEnvironment> cast_environment_;
std::deque<RtcpReceiverLogMessage> rtcp_events_history_;
diff --git a/media/cast/rtcp/rtcp_sender_unittest.cc b/media/cast/net/rtcp/rtcp_sender_unittest.cc
index 0b0c7d3ab8..c0c321e2c8 100644
--- a/media/cast/rtcp/rtcp_sender_unittest.cc
+++ b/media/cast/net/rtcp/rtcp_sender_unittest.cc
@@ -1,4 +1,4 @@
-// Copyright 2013 The Chromium Authors. All rights reserved.
+// Copyright 2014 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
@@ -6,13 +6,13 @@
#include "base/test/simple_test_tick_clock.h"
#include "media/cast/cast_defines.h"
#include "media/cast/cast_environment.h"
-#include "media/cast/rtcp/receiver_rtcp_event_subscriber.h"
-#include "media/cast/rtcp/rtcp_sender.h"
-#include "media/cast/rtcp/rtcp_utility.h"
-#include "media/cast/rtcp/test_rtcp_packet_builder.h"
+#include "media/cast/net/cast_transport_defines.h"
+#include "media/cast/net/pacing/paced_sender.h"
+#include "media/cast/net/rtcp/receiver_rtcp_event_subscriber.h"
+#include "media/cast/net/rtcp/rtcp_sender.h"
+#include "media/cast/net/rtcp/rtcp_utility.h"
+#include "media/cast/net/rtcp/test_rtcp_packet_builder.h"
#include "media/cast/test/fake_single_thread_task_runner.h"
-#include "media/cast/transport/cast_transport_defines.h"
-#include "media/cast/transport/pacing/paced_sender.h"
#include "testing/gmock/include/gmock/gmock.h"
namespace media {
@@ -21,11 +21,12 @@ namespace cast {
namespace {
static const uint32 kSendingSsrc = 0x12345678;
static const uint32 kMediaSsrc = 0x87654321;
-static const int16 kDefaultDelay = 100;
+static const base::TimeDelta kDefaultDelay =
+ base::TimeDelta::FromMilliseconds(100);
static const std::string kCName("test@10.1.1.1");
-transport::RtcpReportBlock GetReportBlock() {
- transport::RtcpReportBlock report_block;
+RtcpReportBlock GetReportBlock() {
+ RtcpReportBlock report_block;
// Initialize remote_ssrc to a "clearly illegal" value.
report_block.remote_ssrc = 0xDEAD;
report_block.media_ssrc = kMediaSsrc; // SSRC of the RTP packet sender.
@@ -40,12 +41,12 @@ transport::RtcpReportBlock GetReportBlock() {
} // namespace
-class TestRtcpTransport : public transport::PacedPacketSender {
+class TestRtcpTransport : public PacedPacketSender {
public:
TestRtcpTransport() : packet_count_(0) {}
virtual bool SendRtcpPacket(uint32 ssrc,
- transport::PacketRef packet) OVERRIDE {
+ PacketRef packet) OVERRIDE {
EXPECT_EQ(expected_packet_.size(), packet->data.size());
EXPECT_EQ(0, memcmp(expected_packet_.data(),
packet->data.data(),
@@ -55,17 +56,17 @@ class TestRtcpTransport : public transport::PacedPacketSender {
}
virtual bool SendPackets(
- const transport::SendPacketVector& packets) OVERRIDE {
+ const SendPacketVector& packets) OVERRIDE {
return false;
}
virtual bool ResendPackets(
- const transport::SendPacketVector& packets,
+ const SendPacketVector& packets,
base::TimeDelta dedupe_window) OVERRIDE {
return false;
}
virtual void CancelSendingPacket(
- const transport::PacketKey& packet_key) OVERRIDE {
+ const PacketKey& packet_key) OVERRIDE {
}
void SetExpectedRtcpPacket(scoped_ptr<Packet> packet) {
@@ -113,7 +114,7 @@ TEST_F(RtcpSenderTest, RtcpReceiverReport) {
test_transport_.SetExpectedRtcpPacket(p1.GetPacket());
rtcp_sender_->SendRtcpFromRtpReceiver(
- transport::kRtcpRr, NULL, NULL, NULL, NULL, kDefaultDelay);
+ kRtcpRr, NULL, NULL, NULL, NULL, kDefaultDelay);
EXPECT_EQ(1, test_transport_.packet_count());
@@ -124,10 +125,10 @@ TEST_F(RtcpSenderTest, RtcpReceiverReport) {
p2.AddSdesCname(kSendingSsrc, kCName);
test_transport_.SetExpectedRtcpPacket(p2.GetPacket().Pass());
- transport::RtcpReportBlock report_block = GetReportBlock();
+ RtcpReportBlock report_block = GetReportBlock();
rtcp_sender_->SendRtcpFromRtpReceiver(
- transport::kRtcpRr, &report_block, NULL, NULL, NULL, kDefaultDelay);
+ kRtcpRr, &report_block, NULL, NULL, NULL, kDefaultDelay);
EXPECT_EQ(2, test_transport_.packet_count());
}
@@ -142,14 +143,14 @@ TEST_F(RtcpSenderTest, RtcpReceiverReportWithRrtr) {
p.AddXrRrtrBlock();
test_transport_.SetExpectedRtcpPacket(p.GetPacket().Pass());
- transport::RtcpReportBlock report_block = GetReportBlock();
+ RtcpReportBlock report_block = GetReportBlock();
RtcpReceiverReferenceTimeReport rrtr;
rrtr.ntp_seconds = kNtpHigh;
rrtr.ntp_fraction = kNtpLow;
rtcp_sender_->SendRtcpFromRtpReceiver(
- transport::kRtcpRr | transport::kRtcpRrtr,
+ kRtcpRr | kRtcpRrtr,
&report_block,
&rrtr,
NULL,
@@ -168,7 +169,7 @@ TEST_F(RtcpSenderTest, RtcpReceiverReportWithCast) {
p.AddCast(kSendingSsrc, kMediaSsrc, kDefaultDelay);
test_transport_.SetExpectedRtcpPacket(p.GetPacket().Pass());
- transport::RtcpReportBlock report_block = GetReportBlock();
+ RtcpReportBlock report_block = GetReportBlock();
RtcpCastMessage cast_message(kMediaSsrc);
cast_message.ack_frame_id_ = kAckFrameId;
@@ -182,7 +183,7 @@ TEST_F(RtcpSenderTest, RtcpReceiverReportWithCast) {
missing_packets;
rtcp_sender_->SendRtcpFromRtpReceiver(
- transport::kRtcpRr | transport::kRtcpCast,
+ kRtcpRr | kRtcpCast,
&report_block,
NULL,
&cast_message,
@@ -202,7 +203,7 @@ TEST_F(RtcpSenderTest, RtcpReceiverReportWithRrtraAndCastMessage) {
p.AddCast(kSendingSsrc, kMediaSsrc, kDefaultDelay);
test_transport_.SetExpectedRtcpPacket(p.GetPacket().Pass());
- transport::RtcpReportBlock report_block = GetReportBlock();
+ RtcpReportBlock report_block = GetReportBlock();
RtcpReceiverReferenceTimeReport rrtr;
rrtr.ntp_seconds = kNtpHigh;
@@ -220,7 +221,7 @@ TEST_F(RtcpSenderTest, RtcpReceiverReportWithRrtraAndCastMessage) {
missing_packets;
rtcp_sender_->SendRtcpFromRtpReceiver(
- transport::kRtcpRr | transport::kRtcpRrtr | transport::kRtcpCast,
+ kRtcpRr | kRtcpRrtr | kRtcpCast,
&report_block,
&rrtr,
&cast_message,
@@ -243,7 +244,7 @@ TEST_F(RtcpSenderTest, RtcpReceiverReportWithRrtrCastMessageAndLog) {
p.AddCast(kSendingSsrc, kMediaSsrc, kDefaultDelay);
test_transport_.SetExpectedRtcpPacket(p.GetPacket().Pass());
- transport::RtcpReportBlock report_block = GetReportBlock();
+ RtcpReportBlock report_block = GetReportBlock();
RtcpReceiverReferenceTimeReport rrtr;
rrtr.ntp_seconds = kNtpHigh;
@@ -264,8 +265,8 @@ TEST_F(RtcpSenderTest, RtcpReceiverReportWithRrtrCastMessageAndLog) {
ReceiverRtcpEventSubscriber::RtcpEventMultiMap rtcp_events;
rtcp_sender_->SendRtcpFromRtpReceiver(
- transport::kRtcpRr | transport::kRtcpRrtr | transport::kRtcpCast |
- transport::kRtcpReceiverLog,
+ kRtcpRr | kRtcpRrtr | kRtcpCast |
+ kRtcpReceiverLog,
&report_block,
&rrtr,
&cast_message,
@@ -301,8 +302,8 @@ TEST_F(RtcpSenderTest, RtcpReceiverReportWithRrtrCastMessageAndLog) {
EXPECT_EQ(2u, rtcp_events.size());
rtcp_sender_->SendRtcpFromRtpReceiver(
- transport::kRtcpRr | transport::kRtcpRrtr | transport::kRtcpCast |
- transport::kRtcpReceiverLog,
+ kRtcpRr | kRtcpRrtr | kRtcpCast |
+ kRtcpReceiverLog,
&report_block,
&rrtr,
&cast_message,
@@ -321,7 +322,7 @@ TEST_F(RtcpSenderTest, RtcpReceiverReportWithOversizedFrameLog) {
p.AddRb(kMediaSsrc);
p.AddSdesCname(kSendingSsrc, kCName);
- transport::RtcpReportBlock report_block = GetReportBlock();
+ RtcpReportBlock report_block = GetReportBlock();
base::SimpleTestTickClock testing_clock;
testing_clock.Advance(base::TimeDelta::FromMilliseconds(kTimeBaseMs));
@@ -370,7 +371,7 @@ TEST_F(RtcpSenderTest, RtcpReceiverReportWithOversizedFrameLog) {
event_subscriber.GetRtcpEventsAndReset(&rtcp_events);
rtcp_sender_->SendRtcpFromRtpReceiver(
- transport::kRtcpRr | transport::kRtcpReceiverLog,
+ kRtcpRr | kRtcpReceiverLog,
&report_block,
NULL,
NULL,
@@ -389,7 +390,7 @@ TEST_F(RtcpSenderTest, RtcpReceiverReportWithTooManyLogFrames) {
p.AddRb(kMediaSsrc);
p.AddSdesCname(kSendingSsrc, kCName);
- transport::RtcpReportBlock report_block = GetReportBlock();
+ RtcpReportBlock report_block = GetReportBlock();
base::SimpleTestTickClock testing_clock;
testing_clock.Advance(base::TimeDelta::FromMilliseconds(kTimeBaseMs));
@@ -427,7 +428,7 @@ TEST_F(RtcpSenderTest, RtcpReceiverReportWithTooManyLogFrames) {
event_subscriber.GetRtcpEventsAndReset(&rtcp_events);
rtcp_sender_->SendRtcpFromRtpReceiver(
- transport::kRtcpRr | transport::kRtcpReceiverLog,
+ kRtcpRr | kRtcpReceiverLog,
&report_block,
NULL,
NULL,
@@ -445,7 +446,7 @@ TEST_F(RtcpSenderTest, RtcpReceiverReportWithOldLogFrames) {
p.AddRb(kMediaSsrc);
p.AddSdesCname(kSendingSsrc, kCName);
- transport::RtcpReportBlock report_block = GetReportBlock();
+ RtcpReportBlock report_block = GetReportBlock();
base::SimpleTestTickClock testing_clock;
testing_clock.Advance(base::TimeDelta::FromMilliseconds(kTimeBaseMs));
@@ -478,7 +479,7 @@ TEST_F(RtcpSenderTest, RtcpReceiverReportWithOldLogFrames) {
event_subscriber.GetRtcpEventsAndReset(&rtcp_events);
rtcp_sender_->SendRtcpFromRtpReceiver(
- transport::kRtcpRr | transport::kRtcpReceiverLog,
+ kRtcpRr | kRtcpReceiverLog,
&report_block,
NULL,
NULL,
@@ -492,7 +493,7 @@ TEST_F(RtcpSenderTest, RtcpReceiverReportRedundancy) {
uint32 time_base_ms = 12345678;
int kTimeBetweenEventsMs = 10;
- transport::RtcpReportBlock report_block = GetReportBlock();
+ RtcpReportBlock report_block = GetReportBlock();
base::SimpleTestTickClock testing_clock;
testing_clock.Advance(base::TimeDelta::FromMilliseconds(time_base_ms));
@@ -537,7 +538,7 @@ TEST_F(RtcpSenderTest, RtcpReceiverReportRedundancy) {
event_subscriber.GetRtcpEventsAndReset(&rtcp_events);
rtcp_sender_->SendRtcpFromRtpReceiver(
- transport::kRtcpRr | transport::kRtcpReceiverLog,
+ kRtcpRr | kRtcpReceiverLog,
&report_block,
NULL,
NULL,
diff --git a/media/cast/rtcp/rtcp_unittest.cc b/media/cast/net/rtcp/rtcp_unittest.cc
index 095e6d24df..25d05b2239 100644
--- a/media/cast/rtcp/rtcp_unittest.cc
+++ b/media/cast/net/rtcp/rtcp_unittest.cc
@@ -1,4 +1,4 @@
-// Copyright 2013 The Chromium Authors. All rights reserved.
+// Copyright 2014 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
@@ -7,14 +7,14 @@
#include "base/test/simple_test_tick_clock.h"
#include "media/cast/cast_defines.h"
#include "media/cast/cast_environment.h"
-#include "media/cast/rtcp/mock_rtcp_receiver_feedback.h"
-#include "media/cast/rtcp/mock_rtcp_sender_feedback.h"
-#include "media/cast/rtcp/rtcp.h"
-#include "media/cast/rtcp/test_rtcp_packet_builder.h"
+#include "media/cast/net/cast_transport_config.h"
+#include "media/cast/net/cast_transport_sender_impl.h"
+#include "media/cast/net/pacing/paced_sender.h"
+#include "media/cast/net/rtcp/mock_rtcp_receiver_feedback.h"
+#include "media/cast/net/rtcp/mock_rtcp_sender_feedback.h"
+#include "media/cast/net/rtcp/rtcp.h"
+#include "media/cast/net/rtcp/test_rtcp_packet_builder.h"
#include "media/cast/test/fake_single_thread_task_runner.h"
-#include "media/cast/transport/cast_transport_config.h"
-#include "media/cast/transport/cast_transport_sender_impl.h"
-#include "media/cast/transport/pacing/paced_sender.h"
#include "testing/gmock/include/gmock/gmock.h"
namespace media {
@@ -29,7 +29,7 @@ static const uint32 kRtcpIntervalMs = 500;
static const int64 kAddedDelay = 123;
static const int64 kAddedShortDelay = 100;
-class RtcpTestPacketSender : public transport::PacketSender {
+class RtcpTestPacketSender : public PacketSender {
public:
explicit RtcpTestPacketSender(base::SimpleTestTickClock* testing_clock)
: drop_packets_(false),
@@ -45,7 +45,7 @@ class RtcpTestPacketSender : public transport::PacketSender {
void set_drop_packets(bool drop_packets) { drop_packets_ = drop_packets; }
// A singular packet implies a RTCP packet.
- virtual bool SendPacket(transport::PacketRef packet,
+ virtual bool SendPacket(PacketRef packet,
const base::Closure& cb) OVERRIDE {
if (short_delay_) {
testing_clock_->Advance(
@@ -69,7 +69,7 @@ class RtcpTestPacketSender : public transport::PacketSender {
DISALLOW_COPY_AND_ASSIGN(RtcpTestPacketSender);
};
-class LocalRtcpTransport : public transport::PacedPacketSender {
+class LocalRtcpTransport : public PacedPacketSender {
public:
LocalRtcpTransport(scoped_refptr<CastEnvironment> cast_environment,
base::SimpleTestTickClock* testing_clock)
@@ -84,7 +84,7 @@ class LocalRtcpTransport : public transport::PacedPacketSender {
void set_drop_packets(bool drop_packets) { drop_packets_ = drop_packets; }
virtual bool SendRtcpPacket(uint32 ssrc,
- transport::PacketRef packet) OVERRIDE {
+ PacketRef packet) OVERRIDE {
if (short_delay_) {
testing_clock_->Advance(
base::TimeDelta::FromMilliseconds(kAddedShortDelay));
@@ -99,18 +99,18 @@ class LocalRtcpTransport : public transport::PacedPacketSender {
}
virtual bool SendPackets(
- const transport::SendPacketVector& packets) OVERRIDE {
+ const SendPacketVector& packets) OVERRIDE {
return false;
}
virtual bool ResendPackets(
- const transport::SendPacketVector& packets,
+ const SendPacketVector& packets,
base::TimeDelta dedupe_window) OVERRIDE {
return false;
}
virtual void CancelSendingPacket(
- const transport::PacketKey& packet_key) OVERRIDE {
+ const PacketKey& packet_key) OVERRIDE {
}
private:
@@ -127,8 +127,8 @@ class RtcpPeer : public Rtcp {
public:
RtcpPeer(scoped_refptr<CastEnvironment> cast_environment,
RtcpSenderFeedback* sender_feedback,
- transport::CastTransportSender* const transport_sender,
- transport::PacedPacketSender* paced_packet_sender,
+ CastTransportSender* const transport_sender,
+ PacedPacketSender* paced_packet_sender,
RtpReceiverStatistics* rtp_receiver_statistics,
RtcpMode rtcp_mode,
const base::TimeDelta& rtcp_interval,
@@ -165,27 +165,28 @@ class RtcpTest : public ::testing::Test {
receiver_to_sender_(cast_environment_, testing_clock_) {
testing_clock_->Advance(base::TimeTicks::Now() - base::TimeTicks());
net::IPEndPoint dummy_endpoint;
- transport_sender_.reset(new transport::CastTransportSenderImpl(
+ transport_sender_.reset(new CastTransportSenderImpl(
NULL,
testing_clock_,
dummy_endpoint,
base::Bind(&UpdateCastTransportStatus),
- transport::BulkRawEventsCallback(),
+ BulkRawEventsCallback(),
base::TimeDelta(),
task_runner_,
&sender_to_receiver_));
- transport::CastTransportAudioConfig config;
- config.rtp.config.ssrc = kSenderSsrc;
- config.rtp.max_outstanding_frames = 1;
+ CastTransportRtpConfig config;
+ config.ssrc = kSenderSsrc;
+ config.rtp_payload_type = 127;
+ config.stored_frames = 1;
transport_sender_->InitializeAudio(config);
EXPECT_CALL(mock_sender_feedback_, OnReceivedCastFeedback(_)).Times(0);
}
virtual ~RtcpTest() {}
- static void UpdateCastTransportStatus(transport::CastTransportStatus status) {
- bool result = (status == transport::TRANSPORT_AUDIO_INITIALIZED ||
- status == transport::TRANSPORT_VIDEO_INITIALIZED);
+ static void UpdateCastTransportStatus(CastTransportStatus status) {
+ bool result = (status == TRANSPORT_AUDIO_INITIALIZED ||
+ status == TRANSPORT_VIDEO_INITIALIZED);
EXPECT_TRUE(result);
}
@@ -201,7 +202,7 @@ class RtcpTest : public ::testing::Test {
scoped_refptr<test::FakeSingleThreadTaskRunner> task_runner_;
scoped_refptr<CastEnvironment> cast_environment_;
RtcpTestPacketSender sender_to_receiver_;
- scoped_ptr<transport::CastTransportSenderImpl> transport_sender_;
+ scoped_ptr<CastTransportSenderImpl> transport_sender_;
LocalRtcpTransport receiver_to_sender_;
MockRtcpSenderFeedback mock_sender_feedback_;
diff --git a/media/cast/rtcp/rtcp_utility.cc b/media/cast/net/rtcp/rtcp_utility.cc
index e29f82e9cf..91fccc1968 100644
--- a/media/cast/rtcp/rtcp_utility.cc
+++ b/media/cast/net/rtcp/rtcp_utility.cc
@@ -1,12 +1,12 @@
-// Copyright 2013 The Chromium Authors. All rights reserved.
+// Copyright 2014 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "media/cast/rtcp/rtcp_utility.h"
+#include "media/cast/net/rtcp/rtcp_utility.h"
#include "base/big_endian.h"
#include "base/logging.h"
-#include "media/cast/transport/cast_transport_defines.h"
+#include "media/cast/net/cast_transport_defines.h"
namespace media {
namespace cast {
@@ -106,44 +106,44 @@ void RtcpParser::IterateTopLevel() {
return; // Bad block!
switch (header.PT) {
- case transport::kPacketTypeSenderReport:
+ case kPacketTypeSenderReport:
// number of Report blocks
number_of_blocks_ = header.IC;
ParseSR();
return;
- case transport::kPacketTypeReceiverReport:
+ case kPacketTypeReceiverReport:
// number of Report blocks
number_of_blocks_ = header.IC;
ParseRR();
return;
- case transport::kPacketTypeSdes:
+ case kPacketTypeSdes:
// number of Sdes blocks
number_of_blocks_ = header.IC;
if (!ParseSdes()) {
break; // Nothing supported found, continue to next block!
}
return;
- case transport::kPacketTypeBye:
+ case kPacketTypeBye:
number_of_blocks_ = header.IC;
if (!ParseBye()) {
// Nothing supported found, continue to next block!
break;
}
return;
- case transport::kPacketTypeApplicationDefined:
+ case kPacketTypeApplicationDefined:
if (!ParseApplicationDefined(header.IC)) {
// Nothing supported found, continue to next block!
break;
}
return;
- case transport::kPacketTypeGenericRtpFeedback: // Fall through!
- case transport::kPacketTypePayloadSpecific:
+ case kPacketTypeGenericRtpFeedback: // Fall through!
+ case kPacketTypePayloadSpecific:
if (!ParseFeedBackCommon(header)) {
// Nothing supported found, continue to next block!
break;
}
return;
- case transport::kPacketTypeXr:
+ case kPacketTypeXr:
if (!ParseExtendedReport()) {
break; // Nothing supported found, continue to next block!
}
@@ -609,8 +609,8 @@ bool RtcpParser::ParseCastReceiverLogEventItem() {
}
bool RtcpParser::ParseFeedBackCommon(const RtcpCommonHeader& header) {
- DCHECK((header.PT == transport::kPacketTypeGenericRtpFeedback) ||
- (header.PT == transport::kPacketTypePayloadSpecific))
+ DCHECK((header.PT == kPacketTypeGenericRtpFeedback) ||
+ (header.PT == kPacketTypePayloadSpecific))
<< "Invalid state";
ptrdiff_t length = rtcp_block_end_ - rtcp_data_;
@@ -630,7 +630,7 @@ bool RtcpParser::ParseFeedBackCommon(const RtcpCommonHeader& header) {
rtcp_data_ += 12;
- if (header.PT == transport::kPacketTypeGenericRtpFeedback) {
+ if (header.PT == kPacketTypeGenericRtpFeedback) {
// Transport layer feedback
switch (header.IC) {
case 1:
@@ -663,7 +663,7 @@ bool RtcpParser::ParseFeedBackCommon(const RtcpCommonHeader& header) {
EndCurrentBlock();
return false;
- } else if (header.PT == transport::kPacketTypePayloadSpecific) {
+ } else if (header.PT == kPacketTypePayloadSpecific) {
// Payload specific feedback
switch (header.IC) {
case 1:
diff --git a/media/cast/rtcp/rtcp_utility.h b/media/cast/net/rtcp/rtcp_utility.h
index 34f3f25a88..bf6717071a 100644
--- a/media/cast/rtcp/rtcp_utility.h
+++ b/media/cast/net/rtcp/rtcp_utility.h
@@ -1,4 +1,4 @@
-// Copyright 2013 The Chromium Authors. All rights reserved.
+// Copyright 2014 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
@@ -8,7 +8,7 @@
#include "media/cast/cast_config.h"
#include "media/cast/cast_defines.h"
#include "media/cast/logging/logging_defines.h"
-#include "media/cast/rtcp/rtcp_defines.h"
+#include "media/cast/net/rtcp/rtcp_defines.h"
namespace media {
namespace cast {
diff --git a/media/cast/rtcp/test_rtcp_packet_builder.cc b/media/cast/net/rtcp/test_rtcp_packet_builder.cc
index 8d0809d928..b6d7cf208e 100644
--- a/media/cast/rtcp/test_rtcp_packet_builder.cc
+++ b/media/cast/net/rtcp/test_rtcp_packet_builder.cc
@@ -1,11 +1,11 @@
-// Copyright 2013 The Chromium Authors. All rights reserved.
+// Copyright 2014 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "media/cast/rtcp/test_rtcp_packet_builder.h"
+#include "media/cast/net/rtcp/test_rtcp_packet_builder.h"
#include "base/logging.h"
-#include "media/cast/rtcp/rtcp_utility.h"
+#include "media/cast/net/rtcp/rtcp_utility.h"
namespace media {
namespace cast {
@@ -65,21 +65,7 @@ void TestRtcpPacketBuilder::AddSdesCname(uint32 sender_ssrc,
for (size_t i = 0; i < c_name.size(); ++i) {
big_endian_writer_.WriteU8(c_name.c_str()[i]);
}
- int padding;
- switch (c_name.size() % 4) {
- case 0:
- padding = 2;
- break;
- case 1:
- padding = 1;
- break;
- case 2:
- padding = 4;
- break;
- case 3:
- padding = 3;
- break;
- }
+ const int padding = 4 - ((c_name.size() + 2) % 4);
for (int j = 0; j < padding; ++j) {
big_endian_writer_.WriteU8(0);
}
@@ -191,7 +177,7 @@ void TestRtcpPacketBuilder::AddRemb(uint32 sender_ssrc, uint32 media_ssrc) {
void TestRtcpPacketBuilder::AddCast(uint32 sender_ssrc,
uint32 media_ssrc,
- uint16 target_delay_ms) {
+ base::TimeDelta target_delay) {
AddRtcpHeader(206, 15);
big_endian_writer_.WriteU32(sender_ssrc);
big_endian_writer_.WriteU32(media_ssrc);
@@ -201,7 +187,7 @@ void TestRtcpPacketBuilder::AddCast(uint32 sender_ssrc,
big_endian_writer_.WriteU8('T');
big_endian_writer_.WriteU8(kAckFrameId);
big_endian_writer_.WriteU8(3); // Loss fields.
- big_endian_writer_.WriteU16(target_delay_ms);
+ big_endian_writer_.WriteU16(target_delay.InMilliseconds());
big_endian_writer_.WriteU8(kLostFrameId);
big_endian_writer_.WriteU16(kRtcpCastAllPacketsLost);
big_endian_writer_.WriteU8(0); // Lost packet id mask.
diff --git a/media/cast/rtcp/test_rtcp_packet_builder.h b/media/cast/net/rtcp/test_rtcp_packet_builder.h
index d4266670ab..153ced1fd6 100644
--- a/media/cast/rtcp/test_rtcp_packet_builder.h
+++ b/media/cast/net/rtcp/test_rtcp_packet_builder.h
@@ -1,4 +1,4 @@
-// Copyright 2013 The Chromium Authors. All rights reserved.
+// Copyright 2014 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
@@ -9,7 +9,8 @@
#include "base/big_endian.h"
#include "media/cast/cast_config.h"
-#include "media/cast/rtcp/rtcp_defines.h"
+#include "media/cast/net/cast_transport_defines.h"
+#include "media/cast/net/rtcp/rtcp_defines.h"
namespace media {
namespace cast {
@@ -79,7 +80,9 @@ class TestRtcpPacketBuilder {
void AddPli(uint32 sender_ssrc, uint32 media_ssrc);
void AddRpsi(uint32 sender_ssrc, uint32 media_ssrc);
void AddRemb(uint32 sender_ssrc, uint32 media_ssrc);
- void AddCast(uint32 sender_ssrc, uint32 media_ssrc, uint16 target_delay_ms);
+ void AddCast(uint32 sender_ssrc,
+ uint32 media_ssrc,
+ base::TimeDelta target_delay);
void AddReceiverLog(uint32 sender_ssrc);
void AddReceiverFrameLog(uint32 rtp_timestamp,
int num_events,
diff --git a/media/cast/framer/cast_message_builder.cc b/media/cast/net/rtp/cast_message_builder.cc
index f3473f9690..bb36613d2c 100644
--- a/media/cast/framer/cast_message_builder.cc
+++ b/media/cast/net/rtp/cast_message_builder.cc
@@ -1,8 +1,8 @@
-// Copyright 2013 The Chromium Authors. All rights reserved.
+// Copyright 2014 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "media/cast/framer/cast_message_builder.h"
+#include "media/cast/net/rtp/cast_message_builder.h"
#include "media/cast/cast_defines.h"
diff --git a/media/cast/framer/cast_message_builder.h b/media/cast/net/rtp/cast_message_builder.h
index 9db88d4a99..0136fed4a0 100644
--- a/media/cast/framer/cast_message_builder.h
+++ b/media/cast/net/rtp/cast_message_builder.h
@@ -1,4 +1,4 @@
-// Copyright 2013 The Chromium Authors. All rights reserved.
+// Copyright 2014 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
@@ -10,9 +10,9 @@
#include <deque>
#include <map>
-#include "media/cast/framer/frame_id_map.h"
-#include "media/cast/rtcp/rtcp.h"
-#include "media/cast/rtp_receiver/rtp_receiver_defines.h"
+#include "media/cast/net/rtcp/rtcp.h"
+#include "media/cast/net/rtp/frame_id_map.h"
+#include "media/cast/net/rtp/rtp_receiver_defines.h"
namespace media {
namespace cast {
diff --git a/media/cast/framer/cast_message_builder_unittest.cc b/media/cast/net/rtp/cast_message_builder_unittest.cc
index ef75162a08..b0e30c44cd 100644
--- a/media/cast/framer/cast_message_builder_unittest.cc
+++ b/media/cast/net/rtp/cast_message_builder_unittest.cc
@@ -1,4 +1,4 @@
-// Copyright 2013 The Chromium Authors. All rights reserved.
+// Copyright 2014 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
@@ -6,9 +6,9 @@
#include "base/memory/scoped_ptr.h"
#include "base/test/simple_test_tick_clock.h"
-#include "media/cast/framer/cast_message_builder.h"
-#include "media/cast/rtcp/rtcp.h"
-#include "media/cast/rtp_receiver/rtp_receiver_defines.h"
+#include "media/cast/net/rtcp/rtcp.h"
+#include "media/cast/net/rtp/cast_message_builder.h"
+#include "media/cast/net/rtp/rtp_receiver_defines.h"
#include "testing/gtest/include/gtest/gtest.h"
namespace media {
diff --git a/media/cast/framer/frame_buffer.cc b/media/cast/net/rtp/frame_buffer.cc
index 0b6fa8332c..a419ab6a46 100644
--- a/media/cast/framer/frame_buffer.cc
+++ b/media/cast/net/rtp/frame_buffer.cc
@@ -1,8 +1,8 @@
-// Copyright 2013 The Chromium Authors. All rights reserved.
+// Copyright 2014 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "media/cast/framer/frame_buffer.h"
+#include "media/cast/net/rtp/frame_buffer.h"
#include "base/logging.h"
@@ -59,17 +59,17 @@ bool FrameBuffer::Complete() const {
return num_packets_received_ - 1 == max_packet_id_;
}
-bool FrameBuffer::AssembleEncodedFrame(transport::EncodedFrame* frame) const {
+bool FrameBuffer::AssembleEncodedFrame(EncodedFrame* frame) const {
if (!Complete())
return false;
// Frame is complete -> construct.
if (is_key_frame_)
- frame->dependency = transport::EncodedFrame::KEY;
+ frame->dependency = EncodedFrame::KEY;
else if (frame_id_ == last_referenced_frame_id_)
- frame->dependency = transport::EncodedFrame::INDEPENDENT;
+ frame->dependency = EncodedFrame::INDEPENDENT;
else
- frame->dependency = transport::EncodedFrame::DEPENDENT;
+ frame->dependency = EncodedFrame::DEPENDENT;
frame->frame_id = frame_id_;
frame->referenced_frame_id = last_referenced_frame_id_;
frame->rtp_timestamp = rtp_timestamp_;
diff --git a/media/cast/framer/frame_buffer.h b/media/cast/net/rtp/frame_buffer.h
index d4d5dedbbd..1309a8785a 100644
--- a/media/cast/framer/frame_buffer.h
+++ b/media/cast/net/rtp/frame_buffer.h
@@ -1,4 +1,4 @@
-// Copyright 2013 The Chromium Authors. All rights reserved.
+// Copyright 2014 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
@@ -9,7 +9,7 @@
#include <vector>
#include "media/cast/cast_config.h"
-#include "media/cast/rtp_receiver/rtp_receiver_defines.h"
+#include "media/cast/net/rtp/rtp_receiver_defines.h"
namespace media {
namespace cast {
@@ -29,7 +29,7 @@ class FrameBuffer {
// and also copies the data from all packets into the data field in |frame|.
// Returns true if the frame was complete; false if incomplete and |frame|
// remains unchanged.
- bool AssembleEncodedFrame(transport::EncodedFrame* frame) const;
+ bool AssembleEncodedFrame(EncodedFrame* frame) const;
bool is_key_frame() const { return is_key_frame_; }
diff --git a/media/cast/framer/frame_buffer_unittest.cc b/media/cast/net/rtp/frame_buffer_unittest.cc
index d6844f3e95..69ce93dbbb 100644
--- a/media/cast/framer/frame_buffer_unittest.cc
+++ b/media/cast/net/rtp/frame_buffer_unittest.cc
@@ -1,8 +1,9 @@
-// Copyright 2013 The Chromium Authors. All rights reserved.
+// Copyright 2014 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "media/cast/framer/frame_buffer.h"
+#include "media/cast/net/rtp/frame_buffer.h"
+#include "media/cast/net/cast_transport_defines.h"
#include "testing/gtest/include/gtest/gtest.h"
namespace media {
@@ -29,9 +30,9 @@ TEST_F(FrameBufferTest, OnePacketInsertSanity) {
rtp_header_.frame_id = 5;
rtp_header_.reference_frame_id = 5;
buffer_.InsertPacket(payload_.data(), payload_.size(), rtp_header_);
- transport::EncodedFrame frame;
+ EncodedFrame frame;
EXPECT_TRUE(buffer_.AssembleEncodedFrame(&frame));
- EXPECT_EQ(transport::EncodedFrame::KEY, frame.dependency);
+ EXPECT_EQ(EncodedFrame::KEY, frame.dependency);
EXPECT_EQ(5u, frame.frame_id);
EXPECT_EQ(5u, frame.referenced_frame_id);
EXPECT_EQ(3000u, frame.rtp_timestamp);
@@ -39,7 +40,7 @@ TEST_F(FrameBufferTest, OnePacketInsertSanity) {
TEST_F(FrameBufferTest, EmptyBuffer) {
EXPECT_FALSE(buffer_.Complete());
- transport::EncodedFrame frame;
+ EncodedFrame frame;
EXPECT_FALSE(buffer_.AssembleEncodedFrame(&frame));
}
@@ -47,7 +48,7 @@ TEST_F(FrameBufferTest, DefaultOnePacketFrame) {
buffer_.InsertPacket(payload_.data(), payload_.size(), rtp_header_);
EXPECT_TRUE(buffer_.Complete());
EXPECT_FALSE(buffer_.is_key_frame());
- transport::EncodedFrame frame;
+ EncodedFrame frame;
EXPECT_TRUE(buffer_.AssembleEncodedFrame(&frame));
EXPECT_EQ(payload_.size(), frame.data.size());
}
@@ -63,7 +64,7 @@ TEST_F(FrameBufferTest, MultiplePacketFrame) {
++rtp_header_.packet_id;
EXPECT_TRUE(buffer_.Complete());
EXPECT_TRUE(buffer_.is_key_frame());
- transport::EncodedFrame frame;
+ EncodedFrame frame;
EXPECT_TRUE(buffer_.AssembleEncodedFrame(&frame));
EXPECT_EQ(3 * payload_.size(), frame.data.size());
}
diff --git a/media/cast/framer/frame_id_map.cc b/media/cast/net/rtp/frame_id_map.cc
index b4389fd532..f0b433c2fb 100644
--- a/media/cast/framer/frame_id_map.cc
+++ b/media/cast/net/rtp/frame_id_map.cc
@@ -1,11 +1,11 @@
-// Copyright 2013 The Chromium Authors. All rights reserved.
+// Copyright 2014 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "media/cast/framer/frame_id_map.h"
+#include "media/cast/net/rtp/frame_id_map.h"
#include "base/logging.h"
-#include "media/cast/rtp_receiver/rtp_receiver_defines.h"
+#include "media/cast/net/rtp/rtp_receiver_defines.h"
namespace media {
namespace cast {
diff --git a/media/cast/framer/frame_id_map.h b/media/cast/net/rtp/frame_id_map.h
index 66e306f671..9c1b674ff4 100644
--- a/media/cast/framer/frame_id_map.h
+++ b/media/cast/net/rtp/frame_id_map.h
@@ -1,4 +1,4 @@
-// Copyright 2013 The Chromium Authors. All rights reserved.
+// Copyright 2014 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
@@ -11,8 +11,8 @@
#include "base/memory/linked_ptr.h"
#include "base/memory/scoped_ptr.h"
#include "media/cast/cast_config.h"
-#include "media/cast/rtcp/rtcp_defines.h"
-#include "media/cast/rtp_receiver/rtp_receiver_defines.h"
+#include "media/cast/net/rtcp/rtcp_defines.h"
+#include "media/cast/net/rtp/rtp_receiver_defines.h"
namespace media {
namespace cast {
diff --git a/media/cast/framer/framer.cc b/media/cast/net/rtp/framer.cc
index de4451a3b4..c94dc0c712 100644
--- a/media/cast/framer/framer.cc
+++ b/media/cast/net/rtp/framer.cc
@@ -1,8 +1,8 @@
-// Copyright 2013 The Chromium Authors. All rights reserved.
+// Copyright 2014 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "media/cast/framer/framer.h"
+#include "media/cast/net/rtp/framer.h"
#include "base/logging.h"
@@ -62,7 +62,7 @@ bool Framer::InsertPacket(const uint8* payload_data,
}
// This does not release the frame.
-bool Framer::GetEncodedFrame(transport::EncodedFrame* frame,
+bool Framer::GetEncodedFrame(EncodedFrame* frame,
bool* next_frame,
bool* have_multiple_decodable_frames) {
*have_multiple_decodable_frames = frame_id_map_.HaveMultipleDecodableFrames();
diff --git a/media/cast/framer/framer.h b/media/cast/net/rtp/framer.h
index 0b7249eff3..cf70ef191e 100644
--- a/media/cast/framer/framer.h
+++ b/media/cast/net/rtp/framer.h
@@ -1,4 +1,4 @@
-// Copyright 2013 The Chromium Authors. All rights reserved.
+// Copyright 2014 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
@@ -12,11 +12,11 @@
#include "base/memory/scoped_ptr.h"
#include "base/time/tick_clock.h"
#include "base/time/time.h"
-#include "media/cast/framer/cast_message_builder.h"
-#include "media/cast/framer/frame_buffer.h"
-#include "media/cast/framer/frame_id_map.h"
-#include "media/cast/rtcp/rtcp.h"
-#include "media/cast/rtp_receiver/rtp_receiver_defines.h"
+#include "media/cast/net/rtcp/rtcp.h"
+#include "media/cast/net/rtp/cast_message_builder.h"
+#include "media/cast/net/rtp/frame_buffer.h"
+#include "media/cast/net/rtp/frame_id_map.h"
+#include "media/cast/net/rtp/rtp_receiver_defines.h"
namespace media {
namespace cast {
@@ -45,7 +45,7 @@ class Framer {
// |next_frame| will be set to true if the returned frame is the very
// next frame. |have_multiple_complete_frames| will be set to true
// if there are more decodadble frames available.
- bool GetEncodedFrame(transport::EncodedFrame* video_frame,
+ bool GetEncodedFrame(EncodedFrame* video_frame,
bool* next_frame,
bool* have_multiple_complete_frames);
diff --git a/media/cast/framer/framer_unittest.cc b/media/cast/net/rtp/framer_unittest.cc
index ad53ef06ee..95e60d438d 100644
--- a/media/cast/framer/framer_unittest.cc
+++ b/media/cast/net/rtp/framer_unittest.cc
@@ -1,10 +1,11 @@
-// Copyright 2013 The Chromium Authors. All rights reserved.
+// Copyright 2014 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/test/simple_test_tick_clock.h"
-#include "media/cast/framer/framer.h"
-#include "media/cast/rtp_receiver/mock_rtp_payload_feedback.h"
+#include "media/cast/net/cast_transport_defines.h"
+#include "media/cast/net/rtp/framer.h"
+#include "media/cast/net/rtp/mock_rtp_payload_feedback.h"
#include "testing/gtest/include/gtest/gtest.h"
namespace media {
@@ -33,14 +34,14 @@ class FramerTest : public ::testing::Test {
};
TEST_F(FramerTest, EmptyState) {
- transport::EncodedFrame frame;
+ EncodedFrame frame;
bool next_frame = false;
bool multiple = false;
EXPECT_FALSE(framer_.GetEncodedFrame(&frame, &next_frame, &multiple));
}
TEST_F(FramerTest, AlwaysStartWithKey) {
- transport::EncodedFrame frame;
+ EncodedFrame frame;
bool next_frame = false;
bool complete = false;
bool multiple = false;
@@ -60,14 +61,14 @@ TEST_F(FramerTest, AlwaysStartWithKey) {
EXPECT_TRUE(framer_.GetEncodedFrame(&frame, &next_frame, &multiple));
EXPECT_TRUE(next_frame);
EXPECT_TRUE(multiple);
- EXPECT_EQ(transport::EncodedFrame::KEY, frame.dependency);
+ EXPECT_EQ(EncodedFrame::KEY, frame.dependency);
EXPECT_EQ(1u, frame.frame_id);
EXPECT_EQ(1u, frame.referenced_frame_id);
framer_.ReleaseFrame(frame.frame_id);
}
TEST_F(FramerTest, CompleteFrame) {
- transport::EncodedFrame frame;
+ EncodedFrame frame;
bool next_frame = false;
bool complete = false;
bool multiple = false;
@@ -81,7 +82,7 @@ TEST_F(FramerTest, CompleteFrame) {
EXPECT_TRUE(framer_.GetEncodedFrame(&frame, &next_frame, &multiple));
EXPECT_TRUE(next_frame);
EXPECT_FALSE(multiple);
- EXPECT_EQ(transport::EncodedFrame::KEY, frame.dependency);
+ EXPECT_EQ(EncodedFrame::KEY, frame.dependency);
EXPECT_EQ(0u, frame.frame_id);
EXPECT_EQ(0u, frame.referenced_frame_id);
framer_.ReleaseFrame(frame.frame_id);
@@ -107,7 +108,7 @@ TEST_F(FramerTest, CompleteFrame) {
}
TEST_F(FramerTest, DuplicatePackets) {
- transport::EncodedFrame frame;
+ EncodedFrame frame;
bool next_frame = false;
bool complete = false;
bool multiple = false;
@@ -139,7 +140,7 @@ TEST_F(FramerTest, DuplicatePackets) {
EXPECT_TRUE(complete);
EXPECT_FALSE(duplicate);
EXPECT_TRUE(framer_.GetEncodedFrame(&frame, &next_frame, &multiple));
- EXPECT_EQ(transport::EncodedFrame::KEY, frame.dependency);
+ EXPECT_EQ(EncodedFrame::KEY, frame.dependency);
EXPECT_FALSE(multiple);
EXPECT_EQ(0u, frame.referenced_frame_id);
@@ -150,7 +151,7 @@ TEST_F(FramerTest, DuplicatePackets) {
EXPECT_FALSE(complete);
EXPECT_TRUE(duplicate);
EXPECT_TRUE(framer_.GetEncodedFrame(&frame, &next_frame, &multiple));
- EXPECT_EQ(transport::EncodedFrame::KEY, frame.dependency);
+ EXPECT_EQ(EncodedFrame::KEY, frame.dependency);
EXPECT_EQ(0u, frame.frame_id);
EXPECT_FALSE(multiple);
EXPECT_EQ(0u, frame.referenced_frame_id);
@@ -184,7 +185,7 @@ TEST_F(FramerTest, DuplicatePackets) {
EXPECT_TRUE(complete);
EXPECT_FALSE(duplicate);
EXPECT_TRUE(framer_.GetEncodedFrame(&frame, &next_frame, &multiple));
- EXPECT_EQ(transport::EncodedFrame::DEPENDENT, frame.dependency);
+ EXPECT_EQ(EncodedFrame::DEPENDENT, frame.dependency);
EXPECT_EQ(1u, frame.frame_id);
EXPECT_EQ(0u, frame.referenced_frame_id);
EXPECT_FALSE(multiple);
@@ -196,14 +197,14 @@ TEST_F(FramerTest, DuplicatePackets) {
EXPECT_FALSE(complete);
EXPECT_TRUE(duplicate);
EXPECT_TRUE(framer_.GetEncodedFrame(&frame, &next_frame, &multiple));
- EXPECT_EQ(transport::EncodedFrame::DEPENDENT, frame.dependency);
+ EXPECT_EQ(EncodedFrame::DEPENDENT, frame.dependency);
EXPECT_EQ(1u, frame.frame_id);
EXPECT_EQ(0u, frame.referenced_frame_id);
EXPECT_FALSE(multiple);
}
TEST_F(FramerTest, ContinuousSequence) {
- transport::EncodedFrame frame;
+ EncodedFrame frame;
bool next_frame = false;
bool complete = false;
bool multiple = false;
@@ -217,7 +218,7 @@ TEST_F(FramerTest, ContinuousSequence) {
EXPECT_TRUE(framer_.GetEncodedFrame(&frame, &next_frame, &multiple));
EXPECT_TRUE(next_frame);
EXPECT_FALSE(multiple);
- EXPECT_EQ(transport::EncodedFrame::KEY, frame.dependency);
+ EXPECT_EQ(EncodedFrame::KEY, frame.dependency);
EXPECT_EQ(0u, frame.frame_id);
EXPECT_EQ(0u, frame.referenced_frame_id);
framer_.ReleaseFrame(frame.frame_id);
@@ -234,7 +235,7 @@ TEST_F(FramerTest, ContinuousSequence) {
TEST_F(FramerTest, Wrap) {
// Insert key frame, frame_id = 255 (will jump to that)
- transport::EncodedFrame frame;
+ EncodedFrame frame;
bool next_frame = false;
bool multiple = true;
bool duplicate = false;
@@ -248,7 +249,7 @@ TEST_F(FramerTest, Wrap) {
EXPECT_TRUE(framer_.GetEncodedFrame(&frame, &next_frame, &multiple));
EXPECT_TRUE(next_frame);
EXPECT_FALSE(multiple);
- EXPECT_EQ(transport::EncodedFrame::KEY, frame.dependency);
+ EXPECT_EQ(EncodedFrame::KEY, frame.dependency);
EXPECT_EQ(255u, frame.frame_id);
EXPECT_EQ(255u, frame.referenced_frame_id);
framer_.ReleaseFrame(frame.frame_id);
@@ -261,14 +262,14 @@ TEST_F(FramerTest, Wrap) {
EXPECT_TRUE(framer_.GetEncodedFrame(&frame, &next_frame, &multiple));
EXPECT_TRUE(next_frame);
EXPECT_FALSE(multiple);
- EXPECT_EQ(transport::EncodedFrame::DEPENDENT, frame.dependency);
+ EXPECT_EQ(EncodedFrame::DEPENDENT, frame.dependency);
EXPECT_EQ(256u, frame.frame_id);
EXPECT_EQ(255u, frame.referenced_frame_id);
framer_.ReleaseFrame(frame.frame_id);
}
TEST_F(FramerTest, Reset) {
- transport::EncodedFrame frame;
+ EncodedFrame frame;
bool next_frame = false;
bool complete = false;
bool multiple = true;
@@ -284,7 +285,7 @@ TEST_F(FramerTest, Reset) {
}
TEST_F(FramerTest, RequireKeyAfterReset) {
- transport::EncodedFrame frame;
+ EncodedFrame frame;
bool next_frame = false;
bool multiple = false;
bool duplicate = false;
@@ -308,7 +309,7 @@ TEST_F(FramerTest, RequireKeyAfterReset) {
}
TEST_F(FramerTest, BasicNonLastReferenceId) {
- transport::EncodedFrame frame;
+ EncodedFrame frame;
bool next_frame = false;
bool multiple = false;
bool duplicate = false;
@@ -335,7 +336,7 @@ TEST_F(FramerTest, BasicNonLastReferenceId) {
TEST_F(FramerTest, InOrderReferenceFrameSelection) {
// Create pattern: 0, 1, 4, 5.
- transport::EncodedFrame frame;
+ EncodedFrame frame;
bool next_frame = false;
bool multiple = false;
bool duplicate = false;
@@ -360,7 +361,7 @@ TEST_F(FramerTest, InOrderReferenceFrameSelection) {
framer_.InsertPacket(
payload_.data(), payload_.size(), rtp_header_, &duplicate);
EXPECT_TRUE(framer_.GetEncodedFrame(&frame, &next_frame, &multiple));
- EXPECT_EQ(transport::EncodedFrame::KEY, frame.dependency);
+ EXPECT_EQ(EncodedFrame::KEY, frame.dependency);
EXPECT_EQ(0u, frame.frame_id);
EXPECT_EQ(0u, frame.referenced_frame_id);
EXPECT_FALSE(multiple);
@@ -368,14 +369,14 @@ TEST_F(FramerTest, InOrderReferenceFrameSelection) {
EXPECT_TRUE(framer_.GetEncodedFrame(&frame, &next_frame, &multiple));
EXPECT_TRUE(next_frame);
EXPECT_TRUE(multiple);
- EXPECT_EQ(transport::EncodedFrame::DEPENDENT, frame.dependency);
+ EXPECT_EQ(EncodedFrame::DEPENDENT, frame.dependency);
EXPECT_EQ(1u, frame.frame_id);
EXPECT_EQ(0u, frame.referenced_frame_id);
framer_.ReleaseFrame(frame.frame_id);
EXPECT_TRUE(framer_.GetEncodedFrame(&frame, &next_frame, &multiple));
EXPECT_FALSE(next_frame);
EXPECT_FALSE(multiple);
- EXPECT_EQ(transport::EncodedFrame::DEPENDENT, frame.dependency);
+ EXPECT_EQ(EncodedFrame::DEPENDENT, frame.dependency);
EXPECT_EQ(4u, frame.frame_id);
EXPECT_EQ(0u, frame.referenced_frame_id);
framer_.ReleaseFrame(frame.frame_id);
@@ -394,14 +395,14 @@ TEST_F(FramerTest, InOrderReferenceFrameSelection) {
EXPECT_TRUE(framer_.GetEncodedFrame(&frame, &next_frame, &multiple));
EXPECT_TRUE(next_frame);
EXPECT_FALSE(multiple);
- EXPECT_EQ(transport::EncodedFrame::DEPENDENT, frame.dependency);
+ EXPECT_EQ(EncodedFrame::DEPENDENT, frame.dependency);
EXPECT_EQ(5u, frame.frame_id);
EXPECT_EQ(4u, frame.referenced_frame_id);
}
TEST_F(FramerTest, AudioWrap) {
// All audio frames are marked as key frames.
- transport::EncodedFrame frame;
+ EncodedFrame frame;
bool next_frame = false;
bool multiple = false;
bool duplicate = false;
@@ -415,7 +416,7 @@ TEST_F(FramerTest, AudioWrap) {
EXPECT_TRUE(framer_.GetEncodedFrame(&frame, &next_frame, &multiple));
EXPECT_TRUE(next_frame);
EXPECT_FALSE(multiple);
- EXPECT_EQ(transport::EncodedFrame::KEY, frame.dependency);
+ EXPECT_EQ(EncodedFrame::KEY, frame.dependency);
EXPECT_EQ(254u, frame.frame_id);
EXPECT_EQ(254u, frame.referenced_frame_id);
framer_.ReleaseFrame(frame.frame_id);
@@ -434,7 +435,7 @@ TEST_F(FramerTest, AudioWrap) {
EXPECT_TRUE(framer_.GetEncodedFrame(&frame, &next_frame, &multiple));
EXPECT_TRUE(next_frame);
EXPECT_TRUE(multiple);
- EXPECT_EQ(transport::EncodedFrame::KEY, frame.dependency);
+ EXPECT_EQ(EncodedFrame::KEY, frame.dependency);
EXPECT_EQ(255u, frame.frame_id);
EXPECT_EQ(255u, frame.referenced_frame_id);
framer_.ReleaseFrame(frame.frame_id);
@@ -442,7 +443,7 @@ TEST_F(FramerTest, AudioWrap) {
EXPECT_TRUE(framer_.GetEncodedFrame(&frame, &next_frame, &multiple));
EXPECT_TRUE(next_frame);
EXPECT_FALSE(multiple);
- EXPECT_EQ(transport::EncodedFrame::KEY, frame.dependency);
+ EXPECT_EQ(EncodedFrame::KEY, frame.dependency);
EXPECT_EQ(256u, frame.frame_id);
EXPECT_EQ(256u, frame.referenced_frame_id);
framer_.ReleaseFrame(frame.frame_id);
@@ -450,7 +451,7 @@ TEST_F(FramerTest, AudioWrap) {
TEST_F(FramerTest, AudioWrapWithMissingFrame) {
// All audio frames are marked as key frames.
- transport::EncodedFrame frame;
+ EncodedFrame frame;
bool next_frame = false;
bool multiple = true;
bool duplicate = false;
@@ -464,7 +465,7 @@ TEST_F(FramerTest, AudioWrapWithMissingFrame) {
EXPECT_TRUE(framer_.GetEncodedFrame(&frame, &next_frame, &multiple));
EXPECT_TRUE(next_frame);
EXPECT_FALSE(multiple);
- EXPECT_EQ(transport::EncodedFrame::KEY, frame.dependency);
+ EXPECT_EQ(EncodedFrame::KEY, frame.dependency);
EXPECT_EQ(253u, frame.frame_id);
EXPECT_EQ(253u, frame.referenced_frame_id);
framer_.ReleaseFrame(frame.frame_id);
@@ -483,14 +484,14 @@ TEST_F(FramerTest, AudioWrapWithMissingFrame) {
EXPECT_TRUE(framer_.GetEncodedFrame(&frame, &next_frame, &multiple));
EXPECT_FALSE(next_frame);
EXPECT_TRUE(multiple);
- EXPECT_EQ(transport::EncodedFrame::KEY, frame.dependency);
+ EXPECT_EQ(EncodedFrame::KEY, frame.dependency);
EXPECT_EQ(255u, frame.frame_id);
EXPECT_EQ(255u, frame.referenced_frame_id);
framer_.ReleaseFrame(frame.frame_id);
EXPECT_TRUE(framer_.GetEncodedFrame(&frame, &next_frame, &multiple));
EXPECT_TRUE(next_frame);
EXPECT_FALSE(multiple);
- EXPECT_EQ(transport::EncodedFrame::KEY, frame.dependency);
+ EXPECT_EQ(EncodedFrame::KEY, frame.dependency);
EXPECT_EQ(256u, frame.frame_id);
EXPECT_EQ(256u, frame.referenced_frame_id);
framer_.ReleaseFrame(frame.frame_id);
diff --git a/media/cast/rtp_receiver/rtp_parser/include/mock/mock_rtp_feedback.h b/media/cast/net/rtp/mock_rtp_feedback.h
index f5edf7c43f..44624ffc71 100644
--- a/media/cast/rtp_receiver/rtp_parser/include/mock/mock_rtp_feedback.h
+++ b/media/cast/net/rtp/mock_rtp_feedback.h
@@ -1,11 +1,11 @@
-// Copyright 2013 The Chromium Authors. All rights reserved.
+// Copyright 2014 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef MEDIA_CAST_RTP_INCLUDE_MOCK_RTP_FEEDBACK_H_
#define MEDIA_CAST_RTP_INCLUDE_MOCK_RTP_FEEDBACK_H_
-#include "media/cast/rtp_receiver/rtp_parser/rtp_feedback.h"
+#include "media/cast/net/rtp/rtp_parser/rtp_feedback.h"
#include "testing/gmock/include/gmock/gmock.h"
namespace media {
diff --git a/media/cast/rtp_receiver/mock_rtp_payload_feedback.cc b/media/cast/net/rtp/mock_rtp_payload_feedback.cc
index 02b6c0be45..cde8e3880a 100644
--- a/media/cast/rtp_receiver/mock_rtp_payload_feedback.cc
+++ b/media/cast/net/rtp/mock_rtp_payload_feedback.cc
@@ -1,8 +1,8 @@
-// Copyright 2013 The Chromium Authors. All rights reserved.
+// Copyright 2014 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "media/cast/rtp_receiver/mock_rtp_payload_feedback.h"
+#include "media/cast/net/rtp/mock_rtp_payload_feedback.h"
namespace media {
namespace cast {
diff --git a/media/cast/rtp_receiver/mock_rtp_payload_feedback.h b/media/cast/net/rtp/mock_rtp_payload_feedback.h
index 14e48673bd..f46d318476 100644
--- a/media/cast/rtp_receiver/mock_rtp_payload_feedback.h
+++ b/media/cast/net/rtp/mock_rtp_payload_feedback.h
@@ -1,11 +1,11 @@
-// Copyright 2013 The Chromium Authors. All rights reserved.
+// Copyright 2014 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef MEDIA_CAST_RTP_COMMON_MOCK_RTP_PAYLOAD_FEEDBACK_H_
#define MEDIA_CAST_RTP_COMMON_MOCK_RTP_PAYLOAD_FEEDBACK_H_
-#include "media/cast/rtp_receiver/rtp_receiver_defines.h"
+#include "media/cast/net/rtp/rtp_receiver_defines.h"
#include "testing/gmock/include/gmock/gmock.h"
namespace media {
diff --git a/media/cast/transport/rtp_sender/packet_storage/packet_storage.cc b/media/cast/net/rtp/packet_storage.cc
index a748baa27a..59ac9ce44e 100644
--- a/media/cast/transport/rtp_sender/packet_storage/packet_storage.cc
+++ b/media/cast/net/rtp/packet_storage.cc
@@ -1,16 +1,16 @@
-// Copyright 2013 The Chromium Authors. All rights reserved.
+// Copyright 2014 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "media/cast/transport/rtp_sender/packet_storage/packet_storage.h"
+#include "media/cast/net/rtp/packet_storage.h"
#include <string>
#include "base/logging.h"
+#include "media/cast/cast_defines.h"
namespace media {
namespace cast {
-namespace transport {
PacketStorage::PacketStorage(size_t stored_frames)
: max_stored_frames_(stored_frames),
@@ -60,6 +60,5 @@ const SendPacketVector* PacketStorage::GetFrame8(uint8 frame_id_8bits) const {
return &(frames_[index_8bits]);
}
-} // namespace transport
} // namespace cast
} // namespace media
diff --git a/media/cast/transport/rtp_sender/packet_storage/packet_storage.h b/media/cast/net/rtp/packet_storage.h
index 037ead1edf..9330a6a0ae 100644
--- a/media/cast/transport/rtp_sender/packet_storage/packet_storage.h
+++ b/media/cast/net/rtp/packet_storage.h
@@ -1,9 +1,9 @@
-// Copyright 2013 The Chromium Authors. All rights reserved.
+// Copyright 2014 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef MEDIA_CAST_TRANSPORT_RTP_SENDER_PACKET_STORAGE_PACKET_STORAGE_H_
-#define MEDIA_CAST_TRANSPORT_RTP_SENDER_PACKET_STORAGE_PACKET_STORAGE_H_
+#ifndef MEDIA_CAST_NET_RTP_SENDER_PACKET_STORAGE_PACKET_STORAGE_H_
+#define MEDIA_CAST_NET_RTP_SENDER_PACKET_STORAGE_PACKET_STORAGE_H_
#include <deque>
#include <list>
@@ -15,13 +15,12 @@
#include "base/memory/scoped_ptr.h"
#include "base/time/tick_clock.h"
#include "base/time/time.h"
-#include "media/cast/transport/cast_transport_config.h"
-#include "media/cast/transport/cast_transport_defines.h"
-#include "media/cast/transport/pacing/paced_sender.h"
+#include "media/cast/net/cast_transport_config.h"
+#include "media/cast/net/cast_transport_defines.h"
+#include "media/cast/net/pacing/paced_sender.h"
namespace media {
namespace cast {
-namespace transport {
// Stores a list of frames. Each frame consists a list of packets.
typedef std::deque<SendPacketVector> FrameQueue;
@@ -55,8 +54,7 @@ class PacketStorage {
DISALLOW_COPY_AND_ASSIGN(PacketStorage);
};
-} // namespace transport
} // namespace cast
} // namespace media
-#endif // MEDIA_CAST_TRANSPORT_RTP_SENDER_PACKET_STORAGE_PACKET_STORAGE_H_
+#endif // MEDIA_CAST_NET_RTP_SENDER_PACKET_STORAGE_PACKET_STORAGE_H_
diff --git a/media/cast/transport/rtp_sender/packet_storage/packet_storage_unittest.cc b/media/cast/net/rtp/packet_storage_unittest.cc
index 298942c80a..5e9393da5d 100644
--- a/media/cast/transport/rtp_sender/packet_storage/packet_storage_unittest.cc
+++ b/media/cast/net/rtp/packet_storage_unittest.cc
@@ -1,8 +1,8 @@
-// Copyright 2013 The Chromium Authors. All rights reserved.
+// Copyright 2014 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "media/cast/transport/rtp_sender/packet_storage/packet_storage.h"
+#include "media/cast/net/rtp/packet_storage.h"
#include <stdint.h>
@@ -14,7 +14,6 @@
namespace media {
namespace cast {
-namespace transport {
static size_t kStoredFrames = 10;
@@ -110,6 +109,5 @@ TEST(PacketStorageTest, GetFrameTooOld) {
}
}
-} // namespace transport
} // namespace cast
} // namespace media
diff --git a/media/cast/rtp_receiver/receiver_stats.cc b/media/cast/net/rtp/receiver_stats.cc
index 7eff86763f..416cdd8ea5 100644
--- a/media/cast/rtp_receiver/receiver_stats.cc
+++ b/media/cast/net/rtp/receiver_stats.cc
@@ -1,11 +1,11 @@
-// Copyright 2013 The Chromium Authors. All rights reserved.
+// Copyright 2014 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "media/cast/rtp_receiver/receiver_stats.h"
+#include "media/cast/net/rtp/receiver_stats.h"
#include "base/logging.h"
-#include "media/cast/rtp_receiver/rtp_receiver_defines.h"
+#include "media/cast/net/rtp/rtp_receiver_defines.h"
namespace media {
namespace cast {
diff --git a/media/cast/rtp_receiver/receiver_stats.h b/media/cast/net/rtp/receiver_stats.h
index 05a067f787..9de6b22c20 100644
--- a/media/cast/rtp_receiver/receiver_stats.h
+++ b/media/cast/net/rtp/receiver_stats.h
@@ -1,4 +1,4 @@
-// Copyright 2013 The Chromium Authors. All rights reserved.
+// Copyright 2014 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
@@ -7,8 +7,8 @@
#include "base/time/tick_clock.h"
#include "base/time/time.h"
-#include "media/cast/rtcp/rtcp.h"
-#include "media/cast/rtp_receiver/rtp_receiver_defines.h"
+#include "media/cast/net/rtcp/rtcp.h"
+#include "media/cast/net/rtp/rtp_receiver_defines.h"
namespace media {
namespace cast {
diff --git a/media/cast/rtp_receiver/receiver_stats_unittest.cc b/media/cast/net/rtp/receiver_stats_unittest.cc
index 98059cdde7..caedf84c06 100644
--- a/media/cast/rtp_receiver/receiver_stats_unittest.cc
+++ b/media/cast/net/rtp/receiver_stats_unittest.cc
@@ -1,4 +1,4 @@
-// Copyright 2013 The Chromium Authors. All rights reserved.
+// Copyright 2014 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
@@ -8,8 +8,8 @@
#include "base/test/simple_test_tick_clock.h"
#include "base/time/time.h"
-#include "media/cast/rtp_receiver/receiver_stats.h"
-#include "media/cast/rtp_receiver/rtp_receiver_defines.h"
+#include "media/cast/net/rtp/receiver_stats.h"
+#include "media/cast/net/rtp/rtp_receiver_defines.h"
namespace media {
namespace cast {
diff --git a/media/cast/transport/rtp_sender/rtp_packetizer/test/rtp_header_parser.cc b/media/cast/net/rtp/rtp_header_parser.cc
index ddadcb2cf2..2d70f9827d 100644
--- a/media/cast/transport/rtp_sender/rtp_packetizer/test/rtp_header_parser.cc
+++ b/media/cast/net/rtp/rtp_header_parser.cc
@@ -1,8 +1,8 @@
-// Copyright 2013 The Chromium Authors. All rights reserved.
+// Copyright 2014 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "media/cast/transport/rtp_sender/rtp_packetizer/test/rtp_header_parser.h"
+#include "media/cast/net/rtp/rtp_header_parser.h"
#include <cstddef>
@@ -10,7 +10,6 @@
namespace media {
namespace cast {
-namespace transport {
static const uint8 kCastKeyFrameBitMask = 0x80;
static const uint8 kCastReferenceFrameIdBitMask = 0x40;
@@ -100,6 +99,5 @@ bool RtpHeaderParser::ParseCast(RtpCastTestHeader* parsed_packet) const {
return true;
}
-} // namespace transport
} // namespace cast
} // namespace media
diff --git a/media/cast/transport/rtp_sender/rtp_packetizer/test/rtp_header_parser.h b/media/cast/net/rtp/rtp_header_parser.h
index bb6e0fae54..fd235c2aa5 100644
--- a/media/cast/transport/rtp_sender/rtp_packetizer/test/rtp_header_parser.h
+++ b/media/cast/net/rtp/rtp_header_parser.h
@@ -1,17 +1,16 @@
-// Copyright 2013 The Chromium Authors. All rights reserved.
+// Copyright 2014 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// Utility parser for rtp packetizer unittests
-#ifndef MEDIA_CAST_TRANSPORT_RTP_SENDER_RTP_PACKETIZER_TEST_RTP_HEADER_PARSER_H_
-#define MEDIA_CAST_TRANSPORT_RTP_SENDER_RTP_PACKETIZER_TEST_RTP_HEADER_PARSER_H_
+#ifndef MEDIA_CAST_NET_RTP_RTP_HEADER_PARSER_H_
+#define MEDIA_CAST_NET_RTP_RTP_HEADER_PARSER_H_
#include "base/basictypes.h"
-#include "media/cast/transport/cast_transport_defines.h"
+#include "media/cast/net/cast_transport_defines.h"
namespace media {
namespace cast {
-namespace transport {
// TODO(miu): Kill this and use RtpCastHeader instead.
struct RtpCastTestHeader {
@@ -51,14 +50,13 @@ class RtpHeaderParser {
const uint8* const rtp_data_begin_;
size_t length_;
- mutable transport::FrameIdWrapHelper frame_id_wrap_helper_;
- mutable transport::FrameIdWrapHelper reference_frame_id_wrap_helper_;
+ mutable FrameIdWrapHelper frame_id_wrap_helper_;
+ mutable FrameIdWrapHelper reference_frame_id_wrap_helper_;
DISALLOW_COPY_AND_ASSIGN(RtpHeaderParser);
};
-} // namespace transport
} // namespace cast
} // namespace media
-#endif // MEDIA_CAST_TRANSPORT_RTP_SENDER_RTP_PACKETIZER_TEST_RTP_HEADER_PARSER_H_
+#endif // MEDIA_CAST_NET_RTP_RTP_HEADER_PARSER_H_
diff --git a/media/cast/rtp_receiver/rtp_parser/test/rtp_packet_builder.cc b/media/cast/net/rtp/rtp_packet_builder.cc
index b8ab3baa29..2f279f4e96 100644
--- a/media/cast/rtp_receiver/rtp_parser/test/rtp_packet_builder.cc
+++ b/media/cast/net/rtp/rtp_packet_builder.cc
@@ -1,8 +1,8 @@
-// Copyright 2013 The Chromium Authors. All rights reserved.
+// Copyright 2014 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "media/cast/rtp_receiver/rtp_parser/test/rtp_packet_builder.h"
+#include "media/cast/net/rtp/rtp_packet_builder.h"
#include "base/big_endian.h"
#include "base/logging.h"
diff --git a/media/cast/rtp_receiver/rtp_parser/test/rtp_packet_builder.h b/media/cast/net/rtp/rtp_packet_builder.h
index 7337798e31..3d6579720b 100644
--- a/media/cast/rtp_receiver/rtp_parser/test/rtp_packet_builder.h
+++ b/media/cast/net/rtp/rtp_packet_builder.h
@@ -1,4 +1,4 @@
-// Copyright 2013 The Chromium Authors. All rights reserved.
+// Copyright 2014 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
@@ -7,7 +7,7 @@
#ifndef MEDIA_CAST_RTP_RECEIVER_RTP_PARSER_TEST_RTP_PACKET_BUILDER_H_
#define MEDIA_CAST_RTP_RECEIVER_RTP_PARSER_TEST_RTP_PACKET_BUILDER_H_
-#include "media/cast/rtp_receiver/rtp_receiver_defines.h"
+#include "media/cast/net/rtp/rtp_receiver_defines.h"
namespace media {
namespace cast {
diff --git a/media/cast/transport/rtp_sender/rtp_packetizer/rtp_packetizer.cc b/media/cast/net/rtp/rtp_packetizer.cc
index d40f99f144..dcfcc8bba4 100644
--- a/media/cast/transport/rtp_sender/rtp_packetizer/rtp_packetizer.cc
+++ b/media/cast/net/rtp/rtp_packetizer.cc
@@ -1,16 +1,15 @@
-// Copyright 2013 The Chromium Authors. All rights reserved.
+// Copyright 2014 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "media/cast/transport/rtp_sender/rtp_packetizer/rtp_packetizer.h"
+#include "media/cast/net/rtp/rtp_packetizer.h"
#include "base/big_endian.h"
#include "base/logging.h"
-#include "media/cast/transport/pacing/paced_sender.h"
+#include "media/cast/net/pacing/paced_sender.h"
namespace media {
namespace cast {
-namespace transport {
static const uint16 kCommonRtpHeaderLength = 12;
static const uint16 kCastRtpHeaderLength = 7;
@@ -19,13 +18,10 @@ static const uint8 kCastReferenceFrameIdBitMask = 0x40;
static const uint8 kRtpMarkerBitMask = 0x80;
RtpPacketizerConfig::RtpPacketizerConfig()
- : audio(false),
- payload_type(-1),
+ : payload_type(-1),
max_payload_length(kMaxIpPacketSize - 28), // Default is IP-v4/UDP.
sequence_number(0),
- frequency(8000),
- ssrc(0),
- channels(0) {}
+ ssrc(0) {}
RtpPacketizerConfig::~RtpPacketizerConfig() {}
@@ -132,6 +128,5 @@ void RtpPacketizer::BuildCommonRTPheader(Packet* packet,
++sequence_number_;
}
-} // namespace transport
} // namespace cast
} // namespace media
diff --git a/media/cast/transport/rtp_sender/rtp_packetizer/rtp_packetizer.h b/media/cast/net/rtp/rtp_packetizer.h
index ebdbf01018..034a74a2a5 100644
--- a/media/cast/transport/rtp_sender/rtp_packetizer/rtp_packetizer.h
+++ b/media/cast/net/rtp/rtp_packetizer.h
@@ -1,16 +1,16 @@
-// Copyright 2013 The Chromium Authors. All rights reserved.
+// Copyright 2014 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef MEDIA_CAST_TRANSPORT_RTP_SENDER_RTP_PACKETIZER_RTP_PACKETIZER_H_
-#define MEDIA_CAST_TRANSPORT_RTP_SENDER_RTP_PACKETIZER_RTP_PACKETIZER_H_
+#ifndef MEDIA_CAST_NET_RTP_RTP_PACKETIZER_H_
+#define MEDIA_CAST_NET_RTP_RTP_PACKETIZER_H_
#include <cmath>
#include <list>
#include <map>
#include "base/time/time.h"
-#include "media/cast/transport/rtp_sender/packet_storage/packet_storage.h"
+#include "media/cast/net/rtp/packet_storage.h"
namespace base {
class TickClock;
@@ -19,8 +19,6 @@ class TickClock;
namespace media {
namespace cast {
-namespace transport {
-
class PacedSender;
struct RtpPacketizerConfig {
@@ -28,21 +26,12 @@ struct RtpPacketizerConfig {
~RtpPacketizerConfig();
// General.
- bool audio;
int payload_type;
uint16 max_payload_length;
uint16 sequence_number;
- int frequency;
// SSRC.
unsigned int ssrc;
-
- // Video.
- VideoCodec video_codec;
-
- // Audio.
- uint8 channels;
- AudioCodec audio_codec;
};
// This object is only called from the main cast thread.
@@ -79,8 +68,7 @@ class RtpPacketizer {
size_t send_octet_count_;
};
-} // namespace transport
} // namespace cast
} // namespace media
-#endif // MEDIA_CAST_TRANSPORT_RTP_SENDER_RTP_PACKETIZER_RTP_PACKETIZER_H_
+#endif // MEDIA_CAST_NET_RTP_RTP_PACKETIZER_H_
diff --git a/media/cast/transport/rtp_sender/rtp_packetizer/rtp_packetizer_unittest.cc b/media/cast/net/rtp/rtp_packetizer_unittest.cc
index 64def4ce7f..1e39e5924f 100644
--- a/media/cast/transport/rtp_sender/rtp_packetizer/rtp_packetizer_unittest.cc
+++ b/media/cast/net/rtp/rtp_packetizer_unittest.cc
@@ -1,23 +1,23 @@
-// Copyright 2013 The Chromium Authors. All rights reserved.
+// Copyright 2014 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "media/cast/transport/rtp_sender/rtp_packetizer/rtp_packetizer.h"
+#include "media/cast/net/rtp/rtp_packetizer.h"
#include <stdint.h>
#include "base/memory/scoped_ptr.h"
#include "base/test/simple_test_tick_clock.h"
+#include "media/cast/logging/logging_impl.h"
#include "media/cast/logging/simple_event_subscriber.h"
+#include "media/cast/net/pacing/paced_sender.h"
+#include "media/cast/net/rtp/packet_storage.h"
+#include "media/cast/net/rtp/rtp_header_parser.h"
#include "media/cast/test/fake_single_thread_task_runner.h"
-#include "media/cast/transport/pacing/paced_sender.h"
-#include "media/cast/transport/rtp_sender/packet_storage/packet_storage.h"
-#include "media/cast/transport/rtp_sender/rtp_packetizer/test/rtp_header_parser.h"
#include "testing/gmock/include/gmock/gmock.h"
namespace media {
namespace cast {
-namespace transport {
namespace {
static const int kPayload = 127;
@@ -26,7 +26,6 @@ static const uint16 kSeqNum = 33;
static const int kMaxPacketLength = 1500;
static const int kSsrc = 0x12345;
static const unsigned int kFrameSize = 5000;
-static const uint32 kStartFrameId = UINT32_C(0xffffffff);
}
class TestRtpPacketTransport : public PacketSender {
@@ -170,6 +169,5 @@ TEST_F(RtpPacketizerTest, Stats) {
EXPECT_EQ(expected_num_of_packets, transport_->number_of_packets_received());
}
-} // namespace transport
} // namespace cast
} // namespace media
diff --git a/media/cast/rtp_receiver/rtp_parser/rtp_parser.cc b/media/cast/net/rtp/rtp_parser.cc
index f44e82dac2..bed1d7c8d8 100644
--- a/media/cast/rtp_receiver/rtp_parser/rtp_parser.cc
+++ b/media/cast/net/rtp/rtp_parser.cc
@@ -1,8 +1,8 @@
-// Copyright 2013 The Chromium Authors. All rights reserved.
+// Copyright 2014 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "media/cast/rtp_receiver/rtp_parser/rtp_parser.h"
+#include "media/cast/net/rtp/rtp_parser.h"
#include "base/big_endian.h"
#include "base/logging.h"
diff --git a/media/cast/rtp_receiver/rtp_parser/rtp_parser.h b/media/cast/net/rtp/rtp_parser.h
index 35118cf144..64586d27c9 100644
--- a/media/cast/rtp_receiver/rtp_parser/rtp_parser.h
+++ b/media/cast/net/rtp/rtp_parser.h
@@ -1,12 +1,12 @@
-// Copyright 2013 The Chromium Authors. All rights reserved.
+// Copyright 2014 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef MEDIA_CAST_RTP_RECEIVER_RTP_PARSER_RTP_PARSER_H_
-#define MEDIA_CAST_RTP_RECEIVER_RTP_PARSER_RTP_PARSER_H_
+#ifndef MEDIA_CAST_NET_RTP_RTP_PARSER_H_
+#define MEDIA_CAST_NET_RTP_RTP_PARSER_H_
-#include "media/cast/rtp_receiver/rtp_receiver_defines.h"
-#include "media/cast/transport/cast_transport_defines.h"
+#include "media/cast/net/cast_transport_defines.h"
+#include "media/cast/net/rtp/rtp_receiver_defines.h"
namespace media {
namespace cast {
@@ -36,7 +36,7 @@ class RtpParser {
private:
const uint32 expected_sender_ssrc_;
const uint8 expected_payload_type_;
- transport::FrameIdWrapHelper frame_id_wrap_helper_;
+ FrameIdWrapHelper frame_id_wrap_helper_;
DISALLOW_COPY_AND_ASSIGN(RtpParser);
};
@@ -44,4 +44,4 @@ class RtpParser {
} // namespace cast
} // namespace media
-#endif // MEDIA_CAST_RTP_RECEIVER_RTP_PARSER_RTP_PARSER_H_
+#endif // MEDIA_CAST_NET_RTP_RTP_PARSER_H_
diff --git a/media/cast/rtp_receiver/rtp_parser/rtp_parser_unittest.cc b/media/cast/net/rtp/rtp_parser_unittest.cc
index 47c79139ff..e5c08fe60e 100644
--- a/media/cast/rtp_receiver/rtp_parser/rtp_parser_unittest.cc
+++ b/media/cast/net/rtp/rtp_parser_unittest.cc
@@ -1,12 +1,12 @@
-// Copyright 2013 The Chromium Authors. All rights reserved.
+// Copyright 2014 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/memory/scoped_ptr.h"
#include "base/rand_util.h"
-#include "media/cast/rtp_receiver/rtp_parser/rtp_parser.h"
-#include "media/cast/rtp_receiver/rtp_parser/test/rtp_packet_builder.h"
-#include "media/cast/rtp_receiver/rtp_receiver_defines.h"
+#include "media/cast/net/rtp/rtp_packet_builder.h"
+#include "media/cast/net/rtp/rtp_parser.h"
+#include "media/cast/net/rtp/rtp_receiver_defines.h"
#include "testing/gtest/include/gtest/gtest.h"
namespace media {
@@ -39,7 +39,7 @@ class RtpParserTest : public ::testing::Test {
void ExpectParsesPacket() {
RtpCastHeader parsed_header;
const uint8* payload = NULL;
- size_t payload_size = -1;
+ size_t payload_size = static_cast<size_t>(-1);
EXPECT_TRUE(rtp_parser_.ParsePacket(
packet_, kPacketLength, &parsed_header, &payload, &payload_size));
@@ -63,7 +63,7 @@ class RtpParserTest : public ::testing::Test {
void ExpectDoesNotParsePacket() {
RtpCastHeader parsed_header;
const uint8* payload = NULL;
- size_t payload_size = -1;
+ size_t payload_size = static_cast<size_t>(-1);
EXPECT_FALSE(rtp_parser_.ParsePacket(
packet_, kPacketLength, &parsed_header, &payload, &payload_size));
}
diff --git a/media/cast/rtp_receiver/rtp_receiver_defines.cc b/media/cast/net/rtp/rtp_receiver_defines.cc
index e42b2b733c..9b20b5f904 100644
--- a/media/cast/rtp_receiver/rtp_receiver_defines.cc
+++ b/media/cast/net/rtp/rtp_receiver_defines.cc
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "media/cast/rtp_receiver/rtp_receiver_defines.h"
+#include "media/cast/net/rtp/rtp_receiver_defines.h"
namespace media {
namespace cast {
diff --git a/media/cast/rtp_receiver/rtp_receiver_defines.h b/media/cast/net/rtp/rtp_receiver_defines.h
index d907436f48..86fbd2296f 100644
--- a/media/cast/rtp_receiver/rtp_receiver_defines.h
+++ b/media/cast/net/rtp/rtp_receiver_defines.h
@@ -1,4 +1,4 @@
-// Copyright 2013 The Chromium Authors. All rights reserved.
+// Copyright 2014 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
@@ -7,7 +7,7 @@
#include "base/basictypes.h"
#include "media/cast/cast_config.h"
-#include "media/cast/rtcp/rtcp_defines.h"
+#include "media/cast/net/rtcp/rtcp_defines.h"
namespace media {
namespace cast {
diff --git a/media/cast/transport/rtp_sender/rtp_sender.cc b/media/cast/net/rtp/rtp_sender.cc
index b807b34757..0f88c444ec 100644
--- a/media/cast/transport/rtp_sender/rtp_sender.cc
+++ b/media/cast/net/rtp/rtp_sender.cc
@@ -1,18 +1,17 @@
-// Copyright 2013 The Chromium Authors. All rights reserved.
+// Copyright 2014 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "media/cast/transport/rtp_sender/rtp_sender.h"
+#include "media/cast/net/rtp/rtp_sender.h"
#include "base/big_endian.h"
#include "base/logging.h"
#include "base/rand_util.h"
-#include "media/cast/transport/cast_transport_defines.h"
-#include "media/cast/transport/pacing/paced_sender.h"
+#include "media/cast/net/cast_transport_defines.h"
+#include "media/cast/net/pacing/paced_sender.h"
namespace media {
namespace cast {
-namespace transport {
namespace {
@@ -41,30 +40,13 @@ RtpSender::RtpSender(
RtpSender::~RtpSender() {}
-bool RtpSender::InitializeAudio(const CastTransportAudioConfig& config) {
- storage_.reset(new PacketStorage(config.rtp.max_outstanding_frames));
+bool RtpSender::Initialize(const CastTransportRtpConfig& config) {
+ storage_.reset(new PacketStorage(config.stored_frames));
if (!storage_->IsValid()) {
return false;
}
- config_.audio = true;
- config_.ssrc = config.rtp.config.ssrc;
- config_.payload_type = config.rtp.config.payload_type;
- config_.frequency = config.frequency;
- config_.audio_codec = config.codec;
- packetizer_.reset(new RtpPacketizer(transport_, storage_.get(), config_));
- return true;
-}
-
-bool RtpSender::InitializeVideo(const CastTransportVideoConfig& config) {
- storage_.reset(new PacketStorage(config.rtp.max_outstanding_frames));
- if (!storage_->IsValid()) {
- return false;
- }
- config_.audio = false;
- config_.ssrc = config.rtp.config.ssrc;
- config_.payload_type = config.rtp.config.payload_type;
- config_.frequency = kVideoFrequency;
- config_.video_codec = config.codec;
+ config_.ssrc = config.ssrc;
+ config_.payload_type = config.rtp_payload_type;
packetizer_.reset(new RtpPacketizer(transport_, storage_.get(), config_));
return true;
}
@@ -145,6 +127,5 @@ void RtpSender::UpdateSequenceNumber(Packet* packet) {
big_endian_writer.WriteU16(packetizer_->NextSequenceNumber());
}
-} // namespace transport
} // namespace cast
} // namespace media
diff --git a/media/cast/transport/rtp_sender/rtp_sender.h b/media/cast/net/rtp/rtp_sender.h
index e65326abf1..4dd7966a68 100644
--- a/media/cast/transport/rtp_sender/rtp_sender.h
+++ b/media/cast/net/rtp/rtp_sender.h
@@ -1,32 +1,30 @@
-// Copyright 2013 The Chromium Authors. All rights reserved.
+// Copyright 2014 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// This file contains the interface to the cast RTP sender.
-#ifndef MEDIA_CAST_TRANSPORT_RTP_SENDER_RTP_SENDER_H_
-#define MEDIA_CAST_TRANSPORT_RTP_SENDER_RTP_SENDER_H_
+#ifndef MEDIA_CAST_NET_RTP_RTP_SENDER_H_
+#define MEDIA_CAST_NET_RTP_RTP_SENDER_H_
#include <map>
#include <set>
#include "base/memory/scoped_ptr.h"
+#include "base/memory/weak_ptr.h"
#include "base/time/tick_clock.h"
#include "base/time/time.h"
-#include "base/memory/weak_ptr.h"
#include "media/cast/cast_config.h"
#include "media/cast/cast_environment.h"
-#include "media/cast/transport/cast_transport_defines.h"
-#include "media/cast/transport/cast_transport_sender.h"
-#include "media/cast/transport/pacing/paced_sender.h"
-#include "media/cast/transport/rtp_sender/packet_storage/packet_storage.h"
-#include "media/cast/transport/rtp_sender/rtp_packetizer/rtp_packetizer.h"
+#include "media/cast/net/cast_transport_defines.h"
+#include "media/cast/net/cast_transport_sender.h"
+#include "media/cast/net/pacing/paced_sender.h"
+#include "media/cast/net/rtp/packet_storage.h"
+#include "media/cast/net/rtp/rtp_packetizer.h"
namespace media {
namespace cast {
-namespace transport {
-
// This object is only called from the main cast thread.
// This class handles splitting encoded audio and video frames into packets and
// add an RTP header to each packet. The sent packets are stored until they are
@@ -40,13 +38,9 @@ class RtpSender {
~RtpSender();
- // Initialize audio stack. Audio must be initialized prior to sending encoded
- // audio frames. Returns false if configuration is invalid.
- bool InitializeAudio(const CastTransportAudioConfig& config);
-
- // Initialize video stack. Video must be initialized prior to sending encoded
- // video frames. Returns false if configuration is invalid.
- bool InitializeVideo(const CastTransportVideoConfig& config);
+ // This must be called before sending any frames. Returns false if
+ // configuration is invalid.
+ bool Initialize(const CastTransportRtpConfig& config);
void SendFrame(const EncodedFrame& frame);
@@ -78,8 +72,7 @@ class RtpSender {
DISALLOW_COPY_AND_ASSIGN(RtpSender);
};
-} // namespace transport
} // namespace cast
} // namespace media
-#endif // MEDIA_CAST_TRANSPORT_RTP_SENDER_RTP_SENDER_H_
+#endif // MEDIA_CAST_NET_RTP_SENDER_RTP_SENDER_H_
diff --git a/media/cast/transport/transport/udp_transport.cc b/media/cast/net/udp_transport.cc
index 9669b17d43..ae0593d3ac 100644
--- a/media/cast/transport/transport/udp_transport.cc
+++ b/media/cast/net/udp_transport.cc
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "media/cast/transport/transport/udp_transport.h"
+#include "media/cast/net/udp_transport.h"
#include <algorithm>
#include <string>
@@ -19,7 +19,6 @@
namespace media {
namespace cast {
-namespace transport {
namespace {
const int kMaxPacketSize = 1500;
@@ -237,6 +236,5 @@ void UdpTransport::OnSent(const scoped_refptr<net::IOBuffer>& buf,
}
}
-} // namespace transport
} // namespace cast
} // namespace media
diff --git a/media/cast/transport/transport/udp_transport.h b/media/cast/net/udp_transport.h
index 1a568501d5..951e9c12d3 100644
--- a/media/cast/transport/transport/udp_transport.h
+++ b/media/cast/net/udp_transport.h
@@ -2,15 +2,15 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef MEDIA_CAST_TRANSPORT_TRANSPORT_UDP_TRANSPORT_H_
-#define MEDIA_CAST_TRANSPORT_TRANSPORT_UDP_TRANSPORT_H_
+#ifndef MEDIA_CAST_NET_UDP_TRANSPORT_H_
+#define MEDIA_CAST_NET_UDP_TRANSPORT_H_
#include "base/memory/ref_counted.h"
#include "base/memory/scoped_ptr.h"
#include "base/memory/weak_ptr.h"
#include "media/cast/cast_environment.h"
-#include "media/cast/transport/cast_transport_config.h"
-#include "media/cast/transport/cast_transport_sender.h"
+#include "media/cast/net/cast_transport_config.h"
+#include "media/cast/net/cast_transport_sender.h"
#include "net/base/ip_endpoint.h"
#include "net/base/net_util.h"
#include "net/udp/udp_socket.h"
@@ -23,7 +23,6 @@ class NetLog;
namespace media {
namespace cast {
-namespace transport {
// This class implements UDP transport mechanism for Cast.
class UdpTransport : public PacketSender {
@@ -90,8 +89,7 @@ class UdpTransport : public PacketSender {
DISALLOW_COPY_AND_ASSIGN(UdpTransport);
};
-} // namespace transport
} // namespace cast
} // namespace media
-#endif // MEDIA_CAST_TRANSPORT_TRANSPORT_UDP_TRANSPORT_H_
+#endif // MEDIA_CAST_NET_UDP_TRANSPORT_H_
diff --git a/media/cast/transport/transport/udp_transport_unittest.cc b/media/cast/net/udp_transport_unittest.cc
index 26879492f0..7ae938b4ca 100644
--- a/media/cast/transport/transport/udp_transport_unittest.cc
+++ b/media/cast/net/udp_transport_unittest.cc
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "media/cast/transport/transport/udp_transport.h"
+#include "media/cast/net/udp_transport.h"
#include <algorithm>
#include <string>
@@ -12,14 +12,13 @@
#include "base/callback.h"
#include "base/message_loop/message_loop.h"
#include "base/run_loop.h"
+#include "media/cast/net/cast_transport_config.h"
#include "media/cast/test/utility/net_utility.h"
-#include "media/cast/transport/cast_transport_config.h"
#include "net/base/net_util.h"
#include "testing/gtest/include/gtest/gtest.h"
namespace media {
namespace cast {
-namespace transport {
class MockPacketReceiver {
public:
@@ -33,7 +32,7 @@ class MockPacketReceiver {
}
std::string packet() const { return packet_; }
- transport::PacketReceiverCallback packet_receiver() {
+ PacketReceiverCallback packet_receiver() {
return base::Bind(&MockPacketReceiver::ReceivedPacket,
base::Unretained(this));
}
@@ -50,7 +49,7 @@ void SendPacket(UdpTransport* transport, Packet packet) {
transport->SendPacket(new base::RefCountedData<Packet>(packet), cb);
}
-static void UpdateCastTransportStatus(transport::CastTransportStatus status) {
+static void UpdateCastTransportStatus(CastTransportStatus status) {
NOTREACHED();
}
@@ -95,6 +94,5 @@ TEST(UdpTransport, SendAndReceive) {
std::equal(packet.begin(), packet.end(), receiver2.packet().begin()));
}
-} // namespace transport
} // namespace cast
} // namespace media
diff --git a/media/cast/receiver/audio_decoder.cc b/media/cast/receiver/audio_decoder.cc
index a4d1896835..dac0a5e9a1 100644
--- a/media/cast/receiver/audio_decoder.cc
+++ b/media/cast/receiver/audio_decoder.cc
@@ -23,7 +23,7 @@ class AudioDecoder::ImplBase
: public base::RefCountedThreadSafe<AudioDecoder::ImplBase> {
public:
ImplBase(const scoped_refptr<CastEnvironment>& cast_environment,
- transport::AudioCodec codec,
+ Codec codec,
int num_channels,
int sampling_rate)
: cast_environment_(cast_environment),
@@ -39,7 +39,7 @@ class AudioDecoder::ImplBase
return cast_initialization_status_;
}
- void DecodeFrame(scoped_ptr<transport::EncodedFrame> encoded_frame,
+ void DecodeFrame(scoped_ptr<EncodedFrame> encoded_frame,
const DecodeFrameCallback& callback) {
DCHECK_EQ(cast_initialization_status_, STATUS_AUDIO_INITIALIZED);
@@ -77,7 +77,7 @@ class AudioDecoder::ImplBase
virtual scoped_ptr<AudioBus> Decode(uint8* data, int len) = 0;
const scoped_refptr<CastEnvironment> cast_environment_;
- const transport::AudioCodec codec_;
+ const Codec codec_;
const int num_channels_;
// Subclass' ctor is expected to set this to STATUS_AUDIO_INITIALIZED.
@@ -96,7 +96,7 @@ class AudioDecoder::OpusImpl : public AudioDecoder::ImplBase {
int num_channels,
int sampling_rate)
: ImplBase(cast_environment,
- transport::kOpus,
+ CODEC_AUDIO_OPUS,
num_channels,
sampling_rate),
decoder_memory_(new uint8[opus_decoder_get_size(num_channels)]),
@@ -166,7 +166,7 @@ class AudioDecoder::Pcm16Impl : public AudioDecoder::ImplBase {
int num_channels,
int sampling_rate)
: ImplBase(cast_environment,
- transport::kPcm16,
+ CODEC_AUDIO_PCM16,
num_channels,
sampling_rate) {
if (ImplBase::cast_initialization_status_ != STATUS_AUDIO_UNINITIALIZED)
@@ -202,13 +202,13 @@ AudioDecoder::AudioDecoder(
const scoped_refptr<CastEnvironment>& cast_environment,
int channels,
int sampling_rate,
- transport::AudioCodec codec)
+ Codec codec)
: cast_environment_(cast_environment) {
switch (codec) {
- case transport::kOpus:
+ case CODEC_AUDIO_OPUS:
impl_ = new OpusImpl(cast_environment, channels, sampling_rate);
break;
- case transport::kPcm16:
+ case CODEC_AUDIO_PCM16:
impl_ = new Pcm16Impl(cast_environment, channels, sampling_rate);
break;
default:
@@ -226,7 +226,7 @@ CastInitializationStatus AudioDecoder::InitializationResult() const {
}
void AudioDecoder::DecodeFrame(
- scoped_ptr<transport::EncodedFrame> encoded_frame,
+ scoped_ptr<EncodedFrame> encoded_frame,
const DecodeFrameCallback& callback) {
DCHECK(encoded_frame.get());
DCHECK(!callback.is_null());
diff --git a/media/cast/receiver/audio_decoder.h b/media/cast/receiver/audio_decoder.h
index c66735e4e6..0b13eae6a8 100644
--- a/media/cast/receiver/audio_decoder.h
+++ b/media/cast/receiver/audio_decoder.h
@@ -10,7 +10,7 @@
#include "media/base/audio_bus.h"
#include "media/cast/cast_config.h"
#include "media/cast/cast_environment.h"
-#include "media/cast/transport/cast_transport_config.h"
+#include "media/cast/net/cast_transport_config.h"
namespace media {
namespace cast {
@@ -29,7 +29,7 @@ class AudioDecoder {
AudioDecoder(const scoped_refptr<CastEnvironment>& cast_environment,
int channels,
int sampling_rate,
- transport::AudioCodec codec);
+ Codec codec);
virtual ~AudioDecoder();
// Returns STATUS_AUDIO_INITIALIZED if the decoder was successfully
@@ -44,7 +44,7 @@ class AudioDecoder {
// monotonically-increasing by 1 for each successive call to this method.
// When it is not, the decoder will assume one or more frames have been
// dropped (e.g., due to packet loss), and will perform recovery actions.
- void DecodeFrame(scoped_ptr<transport::EncodedFrame> encoded_frame,
+ void DecodeFrame(scoped_ptr<EncodedFrame> encoded_frame,
const DecodeFrameCallback& callback);
private:
diff --git a/media/cast/receiver/audio_decoder_unittest.cc b/media/cast/receiver/audio_decoder_unittest.cc
index 6985a69423..576e9dbce5 100644
--- a/media/cast/receiver/audio_decoder_unittest.cc
+++ b/media/cast/receiver/audio_decoder_unittest.cc
@@ -20,11 +20,11 @@ namespace cast {
namespace {
struct TestScenario {
- transport::AudioCodec codec;
+ Codec codec;
int num_channels;
int sampling_rate;
- TestScenario(transport::AudioCodec c, int n, int s)
+ TestScenario(Codec c, int n, int s)
: codec(c), num_channels(n), sampling_rate(s) {}
};
} // namespace
@@ -51,7 +51,7 @@ class AudioDecoderTest : public ::testing::TestWithParam<TestScenario> {
last_frame_id_ = 0;
seen_a_decoded_frame_ = false;
- if (GetParam().codec == transport::kOpus) {
+ if (GetParam().codec == CODEC_AUDIO_OPUS) {
opus_encoder_memory_.reset(
new uint8[opus_encoder_get_size(GetParam().num_channels)]);
OpusEncoder* const opus_encoder =
@@ -73,9 +73,9 @@ class AudioDecoderTest : public ::testing::TestWithParam<TestScenario> {
void FeedMoreAudio(const base::TimeDelta& duration,
int num_dropped_frames) {
// Prepare a simulated EncodedFrame to feed into the AudioDecoder.
- scoped_ptr<transport::EncodedFrame> encoded_frame(
- new transport::EncodedFrame());
- encoded_frame->dependency = transport::EncodedFrame::KEY;
+ scoped_ptr<EncodedFrame> encoded_frame(
+ new EncodedFrame());
+ encoded_frame->dependency = EncodedFrame::KEY;
encoded_frame->frame_id = last_frame_id_ + 1 + num_dropped_frames;
encoded_frame->referenced_frame_id = encoded_frame->frame_id;
last_frame_id_ = encoded_frame->frame_id;
@@ -88,13 +88,13 @@ class AudioDecoderTest : public ::testing::TestWithParam<TestScenario> {
std::vector<int16> interleaved(num_elements);
audio_bus->ToInterleaved(
audio_bus->frames(), sizeof(int16), &interleaved.front());
- if (GetParam().codec == transport::kPcm16) {
+ if (GetParam().codec == CODEC_AUDIO_PCM16) {
encoded_frame->data.resize(num_elements * sizeof(int16));
int16* const pcm_data =
reinterpret_cast<int16*>(encoded_frame->mutable_bytes());
for (size_t i = 0; i < interleaved.size(); ++i)
pcm_data[i] = static_cast<int16>(base::HostToNet16(interleaved[i]));
- } else if (GetParam().codec == transport::kOpus) {
+ } else if (GetParam().codec == CODEC_AUDIO_OPUS) {
OpusEncoder* const opus_encoder =
reinterpret_cast<OpusEncoder*>(opus_encoder_memory_.get());
const int kOpusEncodeBufferSize = 4000;
@@ -154,7 +154,7 @@ class AudioDecoderTest : public ::testing::TestWithParam<TestScenario> {
// first frame seen at the start (and immediately after dropped packet
// recovery) because it introduces a tiny, significant delay.
bool examine_signal = true;
- if (GetParam().codec == transport::kOpus) {
+ if (GetParam().codec == CODEC_AUDIO_OPUS) {
examine_signal = seen_a_decoded_frame_ && should_be_continuous;
seen_a_decoded_frame_ = true;
}
@@ -229,13 +229,14 @@ TEST_P(AudioDecoderTest, RecoversFromDroppedFrames) {
WaitForAllAudioToBeDecoded();
}
-INSTANTIATE_TEST_CASE_P(AudioDecoderTestScenarios,
- AudioDecoderTest,
- ::testing::Values(
- TestScenario(transport::kPcm16, 1, 8000),
- TestScenario(transport::kPcm16, 2, 48000),
- TestScenario(transport::kOpus, 1, 8000),
- TestScenario(transport::kOpus, 2, 48000)));
+INSTANTIATE_TEST_CASE_P(
+ AudioDecoderTestScenarios,
+ AudioDecoderTest,
+ ::testing::Values(
+ TestScenario(CODEC_AUDIO_PCM16, 1, 8000),
+ TestScenario(CODEC_AUDIO_PCM16, 2, 48000),
+ TestScenario(CODEC_AUDIO_OPUS, 1, 8000),
+ TestScenario(CODEC_AUDIO_OPUS, 2, 48000)));
} // namespace cast
} // namespace media
diff --git a/media/cast/receiver/cast_receiver_impl.cc b/media/cast/receiver/cast_receiver_impl.cc
index 7cff354c14..36669b9e62 100644
--- a/media/cast/receiver/cast_receiver_impl.cc
+++ b/media/cast/receiver/cast_receiver_impl.cc
@@ -20,7 +20,7 @@ scoped_ptr<CastReceiver> CastReceiver::Create(
scoped_refptr<CastEnvironment> cast_environment,
const FrameReceiverConfig& audio_config,
const FrameReceiverConfig& video_config,
- transport::PacketSender* const packet_sender) {
+ PacketSender* const packet_sender) {
return scoped_ptr<CastReceiver>(new CastReceiverImpl(
cast_environment, audio_config, video_config, packet_sender));
}
@@ -29,7 +29,7 @@ CastReceiverImpl::CastReceiverImpl(
scoped_refptr<CastEnvironment> cast_environment,
const FrameReceiverConfig& audio_config,
const FrameReceiverConfig& video_config,
- transport::PacketSender* const packet_sender)
+ PacketSender* const packet_sender)
: cast_environment_(cast_environment),
pacer_(cast_environment->Clock(),
cast_environment->Logging(),
@@ -41,8 +41,8 @@ CastReceiverImpl::CastReceiverImpl(
ssrc_of_video_sender_(video_config.incoming_ssrc),
num_audio_channels_(audio_config.channels),
audio_sampling_rate_(audio_config.frequency),
- audio_codec_(audio_config.codec.audio),
- video_codec_(video_config.codec.video) {}
+ audio_codec_(audio_config.codec),
+ video_codec_(video_config.codec) {}
CastReceiverImpl::~CastReceiverImpl() {}
@@ -76,7 +76,7 @@ void CastReceiverImpl::DispatchReceivedPacket(scoped_ptr<Packet> packet) {
base::Passed(&packet)));
}
-transport::PacketReceiverCallback CastReceiverImpl::packet_receiver() {
+PacketReceiverCallback CastReceiverImpl::packet_receiver() {
return base::Bind(&CastReceiverImpl::DispatchReceivedPacket,
// TODO(miu): This code structure is dangerous, since the
// callback could be stored and then invoked after
@@ -122,7 +122,7 @@ void CastReceiverImpl::RequestEncodedVideoFrame(
void CastReceiverImpl::DecodeEncodedAudioFrame(
const AudioFrameDecodedCallback& callback,
- scoped_ptr<transport::EncodedFrame> encoded_frame) {
+ scoped_ptr<EncodedFrame> encoded_frame) {
DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
if (!encoded_frame) {
callback.Run(make_scoped_ptr<AudioBus>(NULL), base::TimeTicks(), false);
@@ -150,7 +150,7 @@ void CastReceiverImpl::DecodeEncodedAudioFrame(
void CastReceiverImpl::DecodeEncodedVideoFrame(
const VideoFrameDecodedCallback& callback,
- scoped_ptr<transport::EncodedFrame> encoded_frame) {
+ scoped_ptr<EncodedFrame> encoded_frame) {
DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
if (!encoded_frame) {
callback.Run(
diff --git a/media/cast/receiver/cast_receiver_impl.h b/media/cast/receiver/cast_receiver_impl.h
index c0dd5f38d1..3443561257 100644
--- a/media/cast/receiver/cast_receiver_impl.h
+++ b/media/cast/receiver/cast_receiver_impl.h
@@ -10,8 +10,8 @@
#include "media/cast/cast_config.h"
#include "media/cast/cast_environment.h"
#include "media/cast/cast_receiver.h"
+#include "media/cast/net/pacing/paced_sender.h"
#include "media/cast/receiver/frame_receiver.h"
-#include "media/cast/transport/pacing/paced_sender.h"
namespace media {
namespace cast {
@@ -27,12 +27,12 @@ class CastReceiverImpl : public CastReceiver {
CastReceiverImpl(scoped_refptr<CastEnvironment> cast_environment,
const FrameReceiverConfig& audio_config,
const FrameReceiverConfig& video_config,
- transport::PacketSender* const packet_sender);
+ PacketSender* const packet_sender);
virtual ~CastReceiverImpl();
// CastReceiver implementation.
- virtual transport::PacketReceiverCallback packet_receiver() OVERRIDE;
+ virtual PacketReceiverCallback packet_receiver() OVERRIDE;
virtual void RequestDecodedAudioFrame(
const AudioFrameDecodedCallback& callback) OVERRIDE;
virtual void RequestEncodedAudioFrame(
@@ -51,13 +51,13 @@ class CastReceiverImpl : public CastReceiver {
// uses this as a callback for RequestEncodedAudioFrame().
void DecodeEncodedAudioFrame(
const AudioFrameDecodedCallback& callback,
- scoped_ptr<transport::EncodedFrame> encoded_frame);
+ scoped_ptr<EncodedFrame> encoded_frame);
// Feeds an EncodedFrame into |video_decoder_|. RequestDecodedVideoFrame()
// uses this as a callback for RequestEncodedVideoFrame().
void DecodeEncodedVideoFrame(
const VideoFrameDecodedCallback& callback,
- scoped_ptr<transport::EncodedFrame> encoded_frame);
+ scoped_ptr<EncodedFrame> encoded_frame);
// Receives an AudioBus from |audio_decoder_|, logs the event, and passes the
// data on by running the given |callback|. This method is static to ensure
@@ -88,7 +88,7 @@ class CastReceiverImpl : public CastReceiver {
bool is_continuous);
const scoped_refptr<CastEnvironment> cast_environment_;
- transport::PacedSender pacer_;
+ PacedSender pacer_;
FrameReceiver audio_receiver_;
FrameReceiver video_receiver_;
@@ -102,8 +102,8 @@ class CastReceiverImpl : public CastReceiver {
// the internal software-based decoders.
const int num_audio_channels_;
const int audio_sampling_rate_;
- const transport::AudioCodec audio_codec_;
- const transport::VideoCodec video_codec_;
+ const Codec audio_codec_;
+ const Codec video_codec_;
// Created on-demand to decode frames from |audio_receiver_| into AudioBuses
// for playback.
diff --git a/media/cast/receiver/frame_receiver.cc b/media/cast/receiver/frame_receiver.cc
index e189cc99a7..951e958b80 100644
--- a/media/cast/receiver/frame_receiver.cc
+++ b/media/cast/receiver/frame_receiver.cc
@@ -23,7 +23,7 @@ FrameReceiver::FrameReceiver(
const scoped_refptr<CastEnvironment>& cast_environment,
const FrameReceiverConfig& config,
EventMediaType event_media_type,
- transport::PacedPacketSender* const packet_sender)
+ PacedPacketSender* const packet_sender)
: cast_environment_(cast_environment),
packet_parser_(config.incoming_ssrc, config.rtp_payload_type),
stats_(cast_environment->Clock()),
@@ -193,8 +193,8 @@ void FrameReceiver::EmitAvailableEncodedFrames() {
// Attempt to peek at the next completed frame from the |framer_|.
// TODO(miu): We should only be peeking at the metadata, and not copying the
// payload yet! Or, at least, peek using a StringPiece instead of a copy.
- scoped_ptr<transport::EncodedFrame> encoded_frame(
- new transport::EncodedFrame());
+ scoped_ptr<EncodedFrame> encoded_frame(
+ new EncodedFrame());
bool is_consecutively_next_frame = false;
bool have_multiple_complete_frames = false;
if (!framer_.GetEncodedFrame(encoded_frame.get(),
@@ -239,7 +239,7 @@ void FrameReceiver::EmitAvailableEncodedFrames() {
}
// Decrypt the payload data in the frame, if crypto is being used.
- if (decryptor_.initialized()) {
+ if (decryptor_.is_activated()) {
std::string decrypted_data;
if (!decryptor_.Decrypt(encoded_frame->frame_id,
encoded_frame->data,
diff --git a/media/cast/receiver/frame_receiver.h b/media/cast/receiver/frame_receiver.h
index ac14ab1e0f..f8e4481db2 100644
--- a/media/cast/receiver/frame_receiver.h
+++ b/media/cast/receiver/frame_receiver.h
@@ -9,17 +9,17 @@
#include "base/memory/scoped_ptr.h"
#include "base/memory/weak_ptr.h"
#include "base/time/time.h"
-#include "media/cast/base/clock_drift_smoother.h"
#include "media/cast/cast_config.h"
#include "media/cast/cast_receiver.h"
-#include "media/cast/framer/framer.h"
+#include "media/cast/common/clock_drift_smoother.h"
+#include "media/cast/common/transport_encryption_handler.h"
#include "media/cast/logging/logging_defines.h"
-#include "media/cast/rtcp/receiver_rtcp_event_subscriber.h"
-#include "media/cast/rtcp/rtcp.h"
-#include "media/cast/rtp_receiver/receiver_stats.h"
-#include "media/cast/rtp_receiver/rtp_parser/rtp_parser.h"
-#include "media/cast/rtp_receiver/rtp_receiver_defines.h"
-#include "media/cast/transport/utility/transport_encryption_handler.h"
+#include "media/cast/net/rtcp/receiver_rtcp_event_subscriber.h"
+#include "media/cast/net/rtcp/rtcp.h"
+#include "media/cast/net/rtp/framer.h"
+#include "media/cast/net/rtp/receiver_stats.h"
+#include "media/cast/net/rtp/rtp_parser.h"
+#include "media/cast/net/rtp/rtp_receiver_defines.h"
namespace media {
namespace cast {
@@ -50,7 +50,7 @@ class FrameReceiver : public RtpPayloadFeedback,
FrameReceiver(const scoped_refptr<CastEnvironment>& cast_environment,
const FrameReceiverConfig& config,
EventMediaType event_media_type,
- transport::PacedPacketSender* const packet_sender);
+ PacedPacketSender* const packet_sender);
virtual ~FrameReceiver();
@@ -152,7 +152,7 @@ class FrameReceiver : public RtpPayloadFeedback,
Rtcp rtcp_;
// Decrypts encrypted frames.
- transport::TransportEncryptionHandler decryptor_;
+ TransportEncryptionHandler decryptor_;
// Outstanding callbacks to run to deliver on client requests for frames.
std::list<ReceiveEncodedFrameCallback> frame_request_queue_;
diff --git a/media/cast/receiver/frame_receiver_unittest.cc b/media/cast/receiver/frame_receiver_unittest.cc
index 4d8273e132..121d12e812 100644
--- a/media/cast/receiver/frame_receiver_unittest.cc
+++ b/media/cast/receiver/frame_receiver_unittest.cc
@@ -12,11 +12,11 @@
#include "media/cast/cast_defines.h"
#include "media/cast/cast_environment.h"
#include "media/cast/logging/simple_event_subscriber.h"
+#include "media/cast/net/pacing/mock_paced_packet_sender.h"
+#include "media/cast/net/rtcp/test_rtcp_packet_builder.h"
#include "media/cast/receiver/frame_receiver.h"
-#include "media/cast/rtcp/test_rtcp_packet_builder.h"
#include "media/cast/test/fake_single_thread_task_runner.h"
#include "media/cast/test/utility/default_config.h"
-#include "media/cast/transport/pacing/mock_paced_packet_sender.h"
#include "testing/gmock/include/gmock/gmock.h"
using ::testing::_;
@@ -41,7 +41,7 @@ class FakeFrameClient {
std::make_pair(expected_frame_id, expected_playout_time));
}
- void DeliverEncodedFrame(scoped_ptr<transport::EncodedFrame> frame) {
+ void DeliverEncodedFrame(scoped_ptr<EncodedFrame> frame) {
SCOPED_TRACE(::testing::Message() << "num_called_ is " << num_called_);
ASSERT_FALSE(!frame)
<< "If at shutdown: There were unsatisfied requests enqueued.";
@@ -136,7 +136,7 @@ class FrameReceiverTest : public ::testing::Test {
RtpCastHeader rtp_header_;
base::SimpleTestTickClock* testing_clock_; // Owned by CastEnvironment.
base::TimeTicks start_time_;
- transport::MockPacedPacketSender mock_transport_;
+ MockPacedPacketSender mock_transport_;
scoped_refptr<test::FakeSingleThreadTaskRunner> task_runner_;
scoped_refptr<CastEnvironment> cast_environment_;
FakeFrameClient frame_client_;
diff --git a/media/cast/receiver/video_decoder.cc b/media/cast/receiver/video_decoder.cc
index 6db3fd35f3..f4de9c3b31 100644
--- a/media/cast/receiver/video_decoder.cc
+++ b/media/cast/receiver/video_decoder.cc
@@ -30,7 +30,7 @@ class VideoDecoder::ImplBase
: public base::RefCountedThreadSafe<VideoDecoder::ImplBase> {
public:
ImplBase(const scoped_refptr<CastEnvironment>& cast_environment,
- transport::VideoCodec codec)
+ Codec codec)
: cast_environment_(cast_environment),
codec_(codec),
cast_initialization_status_(STATUS_VIDEO_UNINITIALIZED),
@@ -40,7 +40,7 @@ class VideoDecoder::ImplBase
return cast_initialization_status_;
}
- void DecodeFrame(scoped_ptr<transport::EncodedFrame> encoded_frame,
+ void DecodeFrame(scoped_ptr<EncodedFrame> encoded_frame,
const DecodeFrameCallback& callback) {
DCHECK_EQ(cast_initialization_status_, STATUS_VIDEO_INITIALIZED);
@@ -77,7 +77,7 @@ class VideoDecoder::ImplBase
virtual scoped_refptr<VideoFrame> Decode(uint8* data, int len) = 0;
const scoped_refptr<CastEnvironment> cast_environment_;
- const transport::VideoCodec codec_;
+ const Codec codec_;
// Subclass' ctor is expected to set this to STATUS_VIDEO_INITIALIZED.
CastInitializationStatus cast_initialization_status_;
@@ -92,7 +92,7 @@ class VideoDecoder::ImplBase
class VideoDecoder::Vp8Impl : public VideoDecoder::ImplBase {
public:
explicit Vp8Impl(const scoped_refptr<CastEnvironment>& cast_environment)
- : ImplBase(cast_environment, transport::kVp8) {
+ : ImplBase(cast_environment, CODEC_VIDEO_VP8) {
if (ImplBase::cast_initialization_status_ != STATUS_VIDEO_UNINITIALIZED)
return;
@@ -173,7 +173,7 @@ class VideoDecoder::Vp8Impl : public VideoDecoder::ImplBase {
class VideoDecoder::FakeImpl : public VideoDecoder::ImplBase {
public:
explicit FakeImpl(const scoped_refptr<CastEnvironment>& cast_environment)
- : ImplBase(cast_environment, transport::kFakeSoftwareVideo),
+ : ImplBase(cast_environment, CODEC_VIDEO_FAKE),
last_decoded_id_(-1) {
if (ImplBase::cast_initialization_status_ != STATUS_VIDEO_UNINITIALIZED)
return;
@@ -184,9 +184,14 @@ class VideoDecoder::FakeImpl : public VideoDecoder::ImplBase {
virtual ~FakeImpl() {}
virtual scoped_refptr<VideoFrame> Decode(uint8* data, int len) OVERRIDE {
+ // Make sure this is a JSON string.
+ if (!len || data[0] != '{')
+ return NULL;
base::JSONReader reader;
scoped_ptr<base::Value> values(
- reader.Read(base::StringPiece(reinterpret_cast<char*>(data))));
+ reader.Read(base::StringPiece(reinterpret_cast<char*>(data), len)));
+ if (!values)
+ return NULL;
base::DictionaryValue* dict = NULL;
values->GetAsDictionary(&dict);
@@ -209,18 +214,18 @@ class VideoDecoder::FakeImpl : public VideoDecoder::ImplBase {
VideoDecoder::VideoDecoder(
const scoped_refptr<CastEnvironment>& cast_environment,
- transport::VideoCodec codec)
+ Codec codec)
: cast_environment_(cast_environment) {
switch (codec) {
#ifndef OFFICIAL_BUILD
- case transport::kFakeSoftwareVideo:
+ case CODEC_VIDEO_FAKE:
impl_ = new FakeImpl(cast_environment);
break;
#endif
- case transport::kVp8:
+ case CODEC_VIDEO_VP8:
impl_ = new Vp8Impl(cast_environment);
break;
- case transport::kH264:
+ case CODEC_VIDEO_H264:
// TODO(miu): Need implementation.
NOTIMPLEMENTED();
break;
@@ -239,7 +244,7 @@ CastInitializationStatus VideoDecoder::InitializationResult() const {
}
void VideoDecoder::DecodeFrame(
- scoped_ptr<transport::EncodedFrame> encoded_frame,
+ scoped_ptr<EncodedFrame> encoded_frame,
const DecodeFrameCallback& callback) {
DCHECK(encoded_frame.get());
DCHECK(!callback.is_null());
diff --git a/media/cast/receiver/video_decoder.h b/media/cast/receiver/video_decoder.h
index 66dc36bb2a..f3d8ca202e 100644
--- a/media/cast/receiver/video_decoder.h
+++ b/media/cast/receiver/video_decoder.h
@@ -10,7 +10,7 @@
#include "base/memory/scoped_ptr.h"
#include "media/base/video_frame.h"
#include "media/cast/cast_config.h"
-#include "media/cast/transport/cast_transport_config.h"
+#include "media/cast/net/cast_transport_config.h"
namespace media {
namespace cast {
@@ -28,7 +28,7 @@ class VideoDecoder {
bool is_continuous)> DecodeFrameCallback;
VideoDecoder(const scoped_refptr<CastEnvironment>& cast_environment,
- transport::VideoCodec codec);
+ Codec codec);
virtual ~VideoDecoder();
// Returns STATUS_VIDEO_INITIALIZED if the decoder was successfully
@@ -43,7 +43,7 @@ class VideoDecoder {
// monotonically-increasing by 1 for each successive call to this method.
// When it is not, the decoder will assume one or more frames have been
// dropped (e.g., due to packet loss), and will perform recovery actions.
- void DecodeFrame(scoped_ptr<transport::EncodedFrame> encoded_frame,
+ void DecodeFrame(scoped_ptr<EncodedFrame> encoded_frame,
const DecodeFrameCallback& callback);
private:
diff --git a/media/cast/receiver/video_decoder_unittest.cc b/media/cast/receiver/video_decoder_unittest.cc
index 1d16534b96..c7c39ad2d0 100644
--- a/media/cast/receiver/video_decoder_unittest.cc
+++ b/media/cast/receiver/video_decoder_unittest.cc
@@ -11,9 +11,9 @@
#include "base/time/time.h"
#include "media/cast/cast_config.h"
#include "media/cast/receiver/video_decoder.h"
+#include "media/cast/sender/vp8_encoder.h"
#include "media/cast/test/utility/standalone_cast_environment.h"
#include "media/cast/test/utility/video_utility.h"
-#include "media/cast/video_sender/codecs/vp8/vp8_encoder.h"
#include "testing/gtest/include/gtest/gtest.h"
namespace media {
@@ -35,8 +35,7 @@ VideoSenderConfig GetVideoSenderConfigForTest() {
} // namespace
-class VideoDecoderTest
- : public ::testing::TestWithParam<transport::VideoCodec> {
+class VideoDecoderTest : public ::testing::TestWithParam<Codec> {
public:
VideoDecoderTest()
: cast_environment_(new StandaloneCastEnvironment()),
@@ -74,9 +73,10 @@ class VideoDecoderTest
PopulateVideoFrame(video_frame, 0);
// Encode |frame| into |encoded_frame->data|.
- scoped_ptr<transport::EncodedFrame> encoded_frame(
- new transport::EncodedFrame());
- CHECK_EQ(transport::kVp8, GetParam()); // Only support VP8 test currently.
+ scoped_ptr<EncodedFrame> encoded_frame(
+ new EncodedFrame());
+ // Test only supports VP8, currently.
+ CHECK_EQ(CODEC_VIDEO_VP8, GetParam());
vp8_encoder_.Encode(video_frame, encoded_frame.get());
encoded_frame->frame_id = last_frame_id_ + 1 + num_dropped_frames;
last_frame_id_ = encoded_frame->frame_id;
@@ -177,7 +177,7 @@ TEST_P(VideoDecoderTest, RecoversFromDroppedFrames) {
INSTANTIATE_TEST_CASE_P(VideoDecoderTestScenarios,
VideoDecoderTest,
- ::testing::Values(transport::kVp8));
+ ::testing::Values(CODEC_VIDEO_VP8));
} // namespace cast
} // namespace media
diff --git a/media/cast/audio_sender/audio_encoder.cc b/media/cast/sender/audio_encoder.cc
index 8860c7dd2d..83bd594fb6 100644
--- a/media/cast/audio_sender/audio_encoder.cc
+++ b/media/cast/sender/audio_encoder.cc
@@ -1,8 +1,8 @@
-// Copyright 2013 The Chromium Authors. All rights reserved.
+// Copyright 2014 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "media/cast/audio_sender/audio_encoder.h"
+#include "media/cast/sender/audio_encoder.h"
#include <algorithm>
@@ -44,7 +44,7 @@ class AudioEncoder::ImplBase
: public base::RefCountedThreadSafe<AudioEncoder::ImplBase> {
public:
ImplBase(const scoped_refptr<CastEnvironment>& cast_environment,
- transport::AudioCodec codec,
+ Codec codec,
int num_channels,
int sampling_rate,
const FrameEncodedCallback& callback)
@@ -117,9 +117,9 @@ class AudioEncoder::ImplBase
if (buffer_fill_end_ < samples_per_frame_)
break;
- scoped_ptr<transport::EncodedFrame> audio_frame(
- new transport::EncodedFrame());
- audio_frame->dependency = transport::EncodedFrame::KEY;
+ scoped_ptr<EncodedFrame> audio_frame(
+ new EncodedFrame());
+ audio_frame->dependency = EncodedFrame::KEY;
audio_frame->frame_id = frame_id_;
audio_frame->referenced_frame_id = frame_id_;
audio_frame->rtp_timestamp = frame_rtp_timestamp_;
@@ -151,7 +151,7 @@ class AudioEncoder::ImplBase
virtual bool EncodeFromFilledBuffer(std::string* out) = 0;
const scoped_refptr<CastEnvironment> cast_environment_;
- const transport::AudioCodec codec_;
+ const Codec codec_;
const int num_channels_;
const int samples_per_frame_;
const FrameEncodedCallback callback_;
@@ -193,7 +193,7 @@ class AudioEncoder::OpusImpl : public AudioEncoder::ImplBase {
int bitrate,
const FrameEncodedCallback& callback)
: ImplBase(cast_environment,
- transport::kOpus,
+ CODEC_AUDIO_OPUS,
num_channels,
sampling_rate,
callback),
@@ -283,7 +283,7 @@ class AudioEncoder::Pcm16Impl : public AudioEncoder::ImplBase {
int sampling_rate,
const FrameEncodedCallback& callback)
: ImplBase(cast_environment,
- transport::kPcm16,
+ CODEC_AUDIO_PCM16,
num_channels,
sampling_rate,
callback),
@@ -326,24 +326,27 @@ class AudioEncoder::Pcm16Impl : public AudioEncoder::ImplBase {
AudioEncoder::AudioEncoder(
const scoped_refptr<CastEnvironment>& cast_environment,
- const AudioSenderConfig& audio_config,
+ int num_channels,
+ int sampling_rate,
+ int bitrate,
+ Codec codec,
const FrameEncodedCallback& frame_encoded_callback)
: cast_environment_(cast_environment) {
// Note: It doesn't matter which thread constructs AudioEncoder, just so long
// as all calls to InsertAudio() are by the same thread.
insert_thread_checker_.DetachFromThread();
- switch (audio_config.codec) {
- case transport::kOpus:
+ switch (codec) {
+ case CODEC_AUDIO_OPUS:
impl_ = new OpusImpl(cast_environment,
- audio_config.channels,
- audio_config.frequency,
- audio_config.bitrate,
+ num_channels,
+ sampling_rate,
+ bitrate,
frame_encoded_callback);
break;
- case transport::kPcm16:
+ case CODEC_AUDIO_PCM16:
impl_ = new Pcm16Impl(cast_environment,
- audio_config.channels,
- audio_config.frequency,
+ num_channels,
+ sampling_rate,
frame_encoded_callback);
break;
default:
diff --git a/media/cast/audio_sender/audio_encoder.h b/media/cast/sender/audio_encoder.h
index 2297672b74..5f080c6cb6 100644
--- a/media/cast/audio_sender/audio_encoder.h
+++ b/media/cast/sender/audio_encoder.h
@@ -1,15 +1,14 @@
-// Copyright 2013 The Chromium Authors. All rights reserved.
+// Copyright 2014 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef MEDIA_CAST_AUDIO_SENDER_AUDIO_ENCODER_H_
-#define MEDIA_CAST_AUDIO_SENDER_AUDIO_ENCODER_H_
+#ifndef MEDIA_CAST_SENDER_AUDIO_ENCODER_H_
+#define MEDIA_CAST_SENDER_AUDIO_ENCODER_H_
#include "base/memory/ref_counted.h"
#include "base/memory/scoped_ptr.h"
#include "base/threading/thread_checker.h"
#include "media/base/audio_bus.h"
-#include "media/cast/cast_config.h"
#include "media/cast/cast_environment.h"
namespace base {
@@ -21,11 +20,14 @@ namespace cast {
class AudioEncoder {
public:
- typedef base::Callback<void(scoped_ptr<transport::EncodedFrame>)>
+ typedef base::Callback<void(scoped_ptr<EncodedFrame>)>
FrameEncodedCallback;
AudioEncoder(const scoped_refptr<CastEnvironment>& cast_environment,
- const AudioSenderConfig& audio_config,
+ int num_channels,
+ int sampling_rate,
+ int bitrate,
+ Codec codec,
const FrameEncodedCallback& frame_encoded_callback);
virtual ~AudioEncoder();
@@ -51,4 +53,4 @@ class AudioEncoder {
} // namespace cast
} // namespace media
-#endif // MEDIA_CAST_AUDIO_SENDER_AUDIO_ENCODER_H_
+#endif // MEDIA_CAST_SENDER_AUDIO_ENCODER_H_
diff --git a/media/cast/audio_sender/audio_encoder_unittest.cc b/media/cast/sender/audio_encoder_unittest.cc
index b521099243..0764148a4c 100644
--- a/media/cast/audio_sender/audio_encoder_unittest.cc
+++ b/media/cast/sender/audio_encoder_unittest.cc
@@ -1,4 +1,4 @@
-// Copyright 2013 The Chromium Authors. All rights reserved.
+// Copyright 2014 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
@@ -12,9 +12,8 @@
#include "base/memory/scoped_ptr.h"
#include "media/base/audio_bus.h"
#include "media/base/media.h"
-#include "media/cast/audio_sender/audio_encoder.h"
-#include "media/cast/cast_config.h"
#include "media/cast/cast_environment.h"
+#include "media/cast/sender/audio_encoder.h"
#include "media/cast/test/fake_single_thread_task_runner.h"
#include "media/cast/test/utility/audio_utility.h"
#include "testing/gtest/include/gtest/gtest.h"
@@ -22,13 +21,13 @@
namespace media {
namespace cast {
-static const int64 kStartMillisecond = INT64_C(12345678900000);
+static const int kNumChannels = 2;
namespace {
class TestEncodedAudioFrameReceiver {
public:
- explicit TestEncodedAudioFrameReceiver(transport::AudioCodec codec)
+ explicit TestEncodedAudioFrameReceiver(Codec codec)
: codec_(codec), frames_received_(0), rtp_lower_bound_(0) {}
virtual ~TestEncodedAudioFrameReceiver() {}
@@ -40,8 +39,8 @@ class TestEncodedAudioFrameReceiver {
upper_bound_ = upper_bound;
}
- void FrameEncoded(scoped_ptr<transport::EncodedFrame> encoded_frame) {
- EXPECT_EQ(encoded_frame->dependency, transport::EncodedFrame::KEY);
+ void FrameEncoded(scoped_ptr<EncodedFrame> encoded_frame) {
+ EXPECT_EQ(encoded_frame->dependency, EncodedFrame::KEY);
EXPECT_EQ(static_cast<uint8>(frames_received_ & 0xff),
encoded_frame->frame_id);
EXPECT_EQ(encoded_frame->frame_id, encoded_frame->referenced_frame_id);
@@ -62,7 +61,7 @@ class TestEncodedAudioFrameReceiver {
}
private:
- const transport::AudioCodec codec_;
+ const Codec codec_;
int frames_received_;
uint32 rtp_lower_bound_;
base::TimeTicks lower_bound_;
@@ -96,8 +95,7 @@ class AudioEncoderTest : public ::testing::TestWithParam<TestScenario> {
AudioEncoderTest() {
InitializeMediaLibraryForTesting();
testing_clock_ = new base::SimpleTestTickClock();
- testing_clock_->Advance(
- base::TimeDelta::FromMilliseconds(kStartMillisecond));
+ testing_clock_->Advance(base::TimeTicks::Now() - base::TimeTicks());
}
virtual void SetUp() {
@@ -111,7 +109,7 @@ class AudioEncoderTest : public ::testing::TestWithParam<TestScenario> {
virtual ~AudioEncoderTest() {}
- void RunTestForCodec(transport::AudioCodec codec) {
+ void RunTestForCodec(Codec codec) {
const TestScenario& scenario = GetParam();
SCOPED_TRACE(::testing::Message() << "Durations: " << scenario.ToString());
@@ -144,18 +142,10 @@ class AudioEncoderTest : public ::testing::TestWithParam<TestScenario> {
}
private:
- void CreateObjectsForCodec(transport::AudioCodec codec) {
- AudioSenderConfig audio_config;
- audio_config.codec = codec;
- audio_config.use_external_encoder = false;
- audio_config.frequency = kDefaultAudioSamplingRate;
- audio_config.channels = 2;
- audio_config.bitrate = kDefaultAudioEncoderBitrate;
- audio_config.rtp_config.payload_type = 127;
-
+ void CreateObjectsForCodec(Codec codec) {
audio_bus_factory_.reset(
- new TestAudioBusFactory(audio_config.channels,
- audio_config.frequency,
+ new TestAudioBusFactory(kNumChannels,
+ kDefaultAudioSamplingRate,
TestAudioBusFactory::kMiddleANoteFreq,
0.5f));
@@ -163,7 +153,10 @@ class AudioEncoderTest : public ::testing::TestWithParam<TestScenario> {
audio_encoder_.reset(new AudioEncoder(
cast_environment_,
- audio_config,
+ kNumChannels,
+ kDefaultAudioSamplingRate,
+ kDefaultAudioEncoderBitrate,
+ codec,
base::Bind(&TestEncodedAudioFrameReceiver::FrameEncoded,
base::Unretained(receiver_.get()))));
}
@@ -178,9 +171,13 @@ class AudioEncoderTest : public ::testing::TestWithParam<TestScenario> {
DISALLOW_COPY_AND_ASSIGN(AudioEncoderTest);
};
-TEST_P(AudioEncoderTest, EncodeOpus) { RunTestForCodec(transport::kOpus); }
+TEST_P(AudioEncoderTest, EncodeOpus) {
+ RunTestForCodec(CODEC_AUDIO_OPUS);
+}
-TEST_P(AudioEncoderTest, EncodePcm16) { RunTestForCodec(transport::kPcm16); }
+TEST_P(AudioEncoderTest, EncodePcm16) {
+ RunTestForCodec(CODEC_AUDIO_PCM16);
+}
static const int64 kOneCall_3Millis[] = {3};
static const int64 kOneCall_10Millis[] = {10};
diff --git a/media/cast/audio_sender/audio_sender.cc b/media/cast/sender/audio_sender.cc
index 878f3456c8..7c7c696125 100644
--- a/media/cast/audio_sender/audio_sender.cc
+++ b/media/cast/sender/audio_sender.cc
@@ -1,19 +1,20 @@
-// Copyright 2013 The Chromium Authors. All rights reserved.
+// Copyright 2014 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "media/cast/audio_sender/audio_sender.h"
+#include "media/cast/sender/audio_sender.h"
#include "base/bind.h"
#include "base/logging.h"
#include "base/message_loop/message_loop.h"
-#include "media/cast/audio_sender/audio_encoder.h"
#include "media/cast/cast_defines.h"
-#include "media/cast/rtcp/rtcp_defines.h"
-#include "media/cast/transport/cast_transport_config.h"
+#include "media/cast/net/cast_transport_config.h"
+#include "media/cast/net/rtcp/rtcp_defines.h"
+#include "media/cast/sender/audio_encoder.h"
namespace media {
namespace cast {
+namespace {
const int kNumAggressiveReportsSentAtStart = 100;
const int kMinSchedulingDelayMs = 1;
@@ -23,18 +24,25 @@ const int kMinSchedulingDelayMs = 1;
// well.
const int kAudioFrameRate = 100;
+// Helper function to compute the maximum unacked audio frames that is sent.
+int GetMaxUnackedFrames(base::TimeDelta target_delay) {
+ // As long as it doesn't go over |kMaxUnackedFrames|, it is okay to send more
+ // audio data than the target delay would suggest. Audio packets are tiny and
+ // receiver has the ability to drop any one of the packets.
+ // We send up to three times of the target delay of audio frames.
+ int frames =
+ 1 + 3 * target_delay * kAudioFrameRate / base::TimeDelta::FromSeconds(1);
+ return std::min(kMaxUnackedFrames, frames);
+}
+} // namespace
+
AudioSender::AudioSender(scoped_refptr<CastEnvironment> cast_environment,
const AudioSenderConfig& audio_config,
- transport::CastTransportSender* const transport_sender)
+ CastTransportSender* const transport_sender)
: cast_environment_(cast_environment),
- target_playout_delay_(base::TimeDelta::FromMilliseconds(
- audio_config.rtp_config.max_delay_ms)),
+ target_playout_delay_(audio_config.target_playout_delay),
transport_sender_(transport_sender),
- max_unacked_frames_(
- std::min(kMaxUnackedFrames,
- 1 + static_cast<int>(target_playout_delay_ *
- kAudioFrameRate /
- base::TimeDelta::FromSeconds(1)))),
+ max_unacked_frames_(GetMaxUnackedFrames(target_playout_delay_)),
configured_encoder_bitrate_(audio_config.bitrate),
rtcp_(cast_environment,
this,
@@ -43,7 +51,7 @@ AudioSender::AudioSender(scoped_refptr<CastEnvironment> cast_environment,
NULL,
audio_config.rtcp_mode,
base::TimeDelta::FromMilliseconds(audio_config.rtcp_interval),
- audio_config.rtp_config.ssrc,
+ audio_config.ssrc,
audio_config.incoming_feedback_ssrc,
audio_config.rtcp_c_name,
AUDIO_EVENT),
@@ -60,7 +68,10 @@ AudioSender::AudioSender(scoped_refptr<CastEnvironment> cast_environment,
if (!audio_config.use_external_encoder) {
audio_encoder_.reset(
new AudioEncoder(cast_environment,
- audio_config,
+ audio_config.channels,
+ audio_config.frequency,
+ audio_config.bitrate,
+ audio_config.codec,
base::Bind(&AudioSender::SendEncodedAudioFrame,
weak_factory_.GetWeakPtr())));
cast_initialization_status_ = audio_encoder_->InitializationResult();
@@ -69,12 +80,14 @@ AudioSender::AudioSender(scoped_refptr<CastEnvironment> cast_environment,
cast_initialization_status_ = STATUS_AUDIO_UNINITIALIZED;
}
- media::cast::transport::CastTransportAudioConfig transport_config;
- transport_config.codec = audio_config.codec;
- transport_config.rtp.config = audio_config.rtp_config;
- transport_config.frequency = audio_config.frequency;
- transport_config.channels = audio_config.channels;
- transport_config.rtp.max_outstanding_frames = max_unacked_frames_;
+ media::cast::CastTransportRtpConfig transport_config;
+ transport_config.ssrc = audio_config.ssrc;
+ transport_config.rtp_payload_type = audio_config.rtp_payload_type;
+ // TODO(miu): AudioSender needs to be like VideoSender in providing an upper
+ // limit on the number of in-flight frames.
+ transport_config.stored_frames = max_unacked_frames_;
+ transport_config.aes_key = audio_config.aes_key;
+ transport_config.aes_iv_mask = audio_config.aes_iv_mask;
transport_sender_->InitializeAudio(transport_config);
rtcp_.SetCastReceiverEventHistorySize(kReceiverRtcpEventHistorySize);
@@ -102,7 +115,7 @@ void AudioSender::InsertAudio(scoped_ptr<AudioBus> audio_bus,
}
void AudioSender::SendEncodedAudioFrame(
- scoped_ptr<transport::EncodedFrame> encoded_frame) {
+ scoped_ptr<EncodedFrame> encoded_frame) {
DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
const uint32 frame_id = encoded_frame->frame_id;
@@ -121,7 +134,7 @@ void AudioSender::SendEncodedAudioFrame(
cast_environment_->Logging()->InsertEncodedFrameEvent(
last_send_time_, FRAME_ENCODED, AUDIO_EVENT, encoded_frame->rtp_timestamp,
frame_id, static_cast<int>(encoded_frame->data.size()),
- encoded_frame->dependency == transport::EncodedFrame::KEY,
+ encoded_frame->dependency == EncodedFrame::KEY,
configured_encoder_bitrate_);
// Only use lowest 8 bits as key.
frame_id_to_rtp_timestamp_[frame_id & 0xff] = encoded_frame->rtp_timestamp;
diff --git a/media/cast/audio_sender/audio_sender.h b/media/cast/sender/audio_sender.h
index 80cf8a4e9e..efaa2b3d03 100644
--- a/media/cast/audio_sender/audio_sender.h
+++ b/media/cast/sender/audio_sender.h
@@ -1,9 +1,9 @@
-// Copyright 2013 The Chromium Authors. All rights reserved.
+// Copyright 2014 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef MEDIA_CAST_AUDIO_SENDER_H_
-#define MEDIA_CAST_AUDIO_SENDER_H_
+#ifndef MEDIA_CAST_SENDER_AUDIO_SENDER_H_
+#define MEDIA_CAST_SENDER_AUDIO_SENDER_H_
#include "base/callback.h"
#include "base/memory/ref_counted.h"
@@ -16,8 +16,8 @@
#include "media/cast/cast_config.h"
#include "media/cast/cast_environment.h"
#include "media/cast/logging/logging_defines.h"
-#include "media/cast/rtcp/rtcp.h"
-#include "media/cast/rtp_timestamp_helper.h"
+#include "media/cast/net/rtcp/rtcp.h"
+#include "media/cast/sender/rtp_timestamp_helper.h"
namespace media {
namespace cast {
@@ -36,7 +36,7 @@ class AudioSender : public RtcpSenderFeedback,
public:
AudioSender(scoped_refptr<CastEnvironment> cast_environment,
const AudioSenderConfig& audio_config,
- transport::CastTransportSender* const transport_sender);
+ CastTransportSender* const transport_sender);
virtual ~AudioSender();
@@ -82,7 +82,7 @@ class AudioSender : public RtcpSenderFeedback,
bool AreTooManyFramesInFlight() const;
// Called by the |audio_encoder_| with the next EncodedFrame to send.
- void SendEncodedAudioFrame(scoped_ptr<transport::EncodedFrame> audio_frame);
+ void SendEncodedAudioFrame(scoped_ptr<EncodedFrame> audio_frame);
const scoped_refptr<CastEnvironment> cast_environment_;
@@ -99,7 +99,7 @@ class AudioSender : public RtcpSenderFeedback,
// process to the browser process over IPC, with the browser process being
// responsible for "packetizing" the frames and pushing packets into the
// network layer.
- transport::CastTransportSender* const transport_sender_;
+ CastTransportSender* const transport_sender_;
// Maximum number of outstanding frames before the encoding and sending of
// new frames shall halt.
@@ -159,4 +159,4 @@ class AudioSender : public RtcpSenderFeedback,
} // namespace cast
} // namespace media
-#endif // MEDIA_CAST_AUDIO_SENDER_H_
+#endif // MEDIA_CAST_SENDER_AUDIO_SENDER_H_
diff --git a/media/cast/audio_sender/audio_sender_unittest.cc b/media/cast/sender/audio_sender_unittest.cc
index 51edd49602..ab923811aa 100644
--- a/media/cast/audio_sender/audio_sender_unittest.cc
+++ b/media/cast/sender/audio_sender_unittest.cc
@@ -1,4 +1,4 @@
-// Copyright 2013 The Chromium Authors. All rights reserved.
+// Copyright 2014 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
@@ -9,24 +9,24 @@
#include "base/memory/scoped_ptr.h"
#include "base/test/simple_test_tick_clock.h"
#include "media/base/media.h"
-#include "media/cast/audio_sender/audio_sender.h"
#include "media/cast/cast_config.h"
#include "media/cast/cast_environment.h"
-#include "media/cast/rtcp/rtcp.h"
+#include "media/cast/net/cast_transport_config.h"
+#include "media/cast/net/cast_transport_sender_impl.h"
+#include "media/cast/net/rtcp/rtcp.h"
+#include "media/cast/sender/audio_sender.h"
#include "media/cast/test/fake_single_thread_task_runner.h"
#include "media/cast/test/utility/audio_utility.h"
-#include "media/cast/transport/cast_transport_config.h"
-#include "media/cast/transport/cast_transport_sender_impl.h"
#include "testing/gtest/include/gtest/gtest.h"
namespace media {
namespace cast {
-class TestPacketSender : public transport::PacketSender {
+class TestPacketSender : public PacketSender {
public:
TestPacketSender() : number_of_rtp_packets_(0), number_of_rtcp_packets_(0) {}
- virtual bool SendPacket(transport::PacketRef packet,
+ virtual bool SendPacket(PacketRef packet,
const base::Closure& cb) OVERRIDE {
if (Rtcp::IsRtcpPacket(&packet->data[0], packet->data.size())) {
++number_of_rtcp_packets_;
@@ -65,21 +65,21 @@ class AudioSenderTest : public ::testing::Test {
task_runner_,
task_runner_,
task_runner_);
- audio_config_.codec = transport::kOpus;
+ audio_config_.codec = CODEC_AUDIO_OPUS;
audio_config_.use_external_encoder = false;
audio_config_.frequency = kDefaultAudioSamplingRate;
audio_config_.channels = 2;
audio_config_.bitrate = kDefaultAudioEncoderBitrate;
- audio_config_.rtp_config.payload_type = 127;
+ audio_config_.rtp_payload_type = 127;
net::IPEndPoint dummy_endpoint;
- transport_sender_.reset(new transport::CastTransportSenderImpl(
+ transport_sender_.reset(new CastTransportSenderImpl(
NULL,
testing_clock_,
dummy_endpoint,
base::Bind(&UpdateCastTransportStatus),
- transport::BulkRawEventsCallback(),
+ BulkRawEventsCallback(),
base::TimeDelta(),
task_runner_,
&transport_));
@@ -90,13 +90,13 @@ class AudioSenderTest : public ::testing::Test {
virtual ~AudioSenderTest() {}
- static void UpdateCastTransportStatus(transport::CastTransportStatus status) {
- EXPECT_EQ(transport::TRANSPORT_AUDIO_INITIALIZED, status);
+ static void UpdateCastTransportStatus(CastTransportStatus status) {
+ EXPECT_EQ(TRANSPORT_AUDIO_INITIALIZED, status);
}
base::SimpleTestTickClock* testing_clock_; // Owned by CastEnvironment.
TestPacketSender transport_;
- scoped_ptr<transport::CastTransportSenderImpl> transport_sender_;
+ scoped_ptr<CastTransportSenderImpl> transport_sender_;
scoped_refptr<test::FakeSingleThreadTaskRunner> task_runner_;
scoped_ptr<AudioSender> audio_sender_;
scoped_refptr<CastEnvironment> cast_environment_;
diff --git a/media/cast/congestion_control/congestion_control.cc b/media/cast/sender/congestion_control.cc
index d24e0ac3d0..70fcfe9dc9 100644
--- a/media/cast/congestion_control/congestion_control.cc
+++ b/media/cast/sender/congestion_control.cc
@@ -1,4 +1,4 @@
-// Copyright 2013 The Chromium Authors. All rights reserved.
+// Copyright 2014 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
@@ -13,7 +13,7 @@
// If we estimate that our virtual buffer is mostly empty, we try to use
// more bandwidth than our recent usage, otherwise we use less.
-#include "media/cast/congestion_control/congestion_control.h"
+#include "media/cast/sender/congestion_control.h"
#include "base/logging.h"
#include "media/cast/cast_config.h"
@@ -59,8 +59,7 @@ CongestionControl::CongestionControl(base::TickClock* clock,
CongestionControl::~CongestionControl() {}
void CongestionControl::UpdateRtt(base::TimeDelta rtt) {
- rtt_ = base::TimeDelta::FromSecondsD(
- (rtt_.InSecondsF() * 7 + rtt.InSecondsF()) / 8);
+ rtt_ = (7 * rtt_ + rtt) / 8;
}
// Calculate how much "dead air" there is between two frames.
diff --git a/media/cast/congestion_control/congestion_control.h b/media/cast/sender/congestion_control.h
index 54622ab114..8537037c71 100644
--- a/media/cast/congestion_control/congestion_control.h
+++ b/media/cast/sender/congestion_control.h
@@ -1,4 +1,4 @@
-// Copyright 2013 The Chromium Authors. All rights reserved.
+// Copyright 2014 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
@@ -54,7 +54,7 @@ class CongestionControl {
// Get the FrameStats for a given |frame_id|.
// Note: Older FrameStats will be removed automatically.
FrameStats* GetFrameStats(uint32 frame_id);
- // Calculata safe bitrate. This is based on how much we've been
+ // Calculate a safe bitrate. This is based on how much we've been
// sending in the past.
double CalculateSafeBitrate();
@@ -63,7 +63,7 @@ class CongestionControl {
base::TimeTicks EstimatedAckTime(uint32 frame_id, double bitrate);
// Calculate when we start sending the data for a given frame.
// This is done by calculating when we were done sending the previous
- // frame, but obvoiusly can't be less than |sent_time| (if known).
+ // frame, but obviously can't be less than |sent_time| (if known).
base::TimeTicks EstimatedSendingTime(uint32 frame_id, double bitrate);
base::TickClock* const clock_; // Not owned by this class.
diff --git a/media/cast/congestion_control/congestion_control_unittest.cc b/media/cast/sender/congestion_control_unittest.cc
index 5745eab21d..afdce0031f 100644
--- a/media/cast/congestion_control/congestion_control_unittest.cc
+++ b/media/cast/sender/congestion_control_unittest.cc
@@ -1,12 +1,13 @@
-// Copyright 2013 The Chromium Authors. All rights reserved.
+// Copyright 2014 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include <stdint.h>
+#include "base/bind.h"
#include "base/test/simple_test_tick_clock.h"
#include "media/cast/cast_defines.h"
-#include "media/cast/congestion_control/congestion_control.h"
+#include "media/cast/sender/congestion_control.h"
#include "media/cast/test/fake_single_thread_task_runner.h"
#include "testing/gtest/include/gtest/gtest.h"
diff --git a/media/cast/video_sender/external_video_encoder.cc b/media/cast/sender/external_video_encoder.cc
index ca30bcd47a..e3abecd407 100644
--- a/media/cast/video_sender/external_video_encoder.cc
+++ b/media/cast/sender/external_video_encoder.cc
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "media/cast/video_sender/external_video_encoder.h"
+#include "media/cast/sender/external_video_encoder.h"
#include "base/bind.h"
#include "base/logging.h"
@@ -13,7 +13,7 @@
#include "media/base/video_util.h"
#include "media/cast/cast_defines.h"
#include "media/cast/logging/logging_defines.h"
-#include "media/cast/transport/cast_transport_config.h"
+#include "media/cast/net/cast_transport_config.h"
#include "media/video/video_encode_accelerator.h"
namespace media {
@@ -99,20 +99,19 @@ class LocalVideoEncodeAcceleratorClient
VideoCodecProfile output_profile = media::VIDEO_CODEC_PROFILE_UNKNOWN;
switch (video_config.codec) {
- case transport::kVp8:
+ case CODEC_VIDEO_VP8:
output_profile = media::VP8PROFILE_MAIN;
break;
- case transport::kH264:
+ case CODEC_VIDEO_H264:
output_profile = media::H264PROFILE_MAIN;
break;
- case transport::kFakeSoftwareVideo:
+ case CODEC_VIDEO_FAKE:
NOTREACHED() << "Fake software video encoder cannot be external";
break;
- case transport::kUnknownVideoCodec:
- NOTREACHED() << "Video codec not specified";
+ default:
+ NOTREACHED() << "Video codec not specified or not supported";
break;
}
- codec_ = video_config.codec;
max_frame_rate_ = video_config.max_frame_rate;
if (!video_encode_accelerator_->Initialize(
@@ -221,10 +220,10 @@ class LocalVideoEncodeAcceleratorClient
stream_header_.append(static_cast<const char*>(output_buffer->memory()),
payload_size);
} else if (!encoded_frame_data_storage_.empty()) {
- scoped_ptr<transport::EncodedFrame> encoded_frame(
- new transport::EncodedFrame());
- encoded_frame->dependency = key_frame ? transport::EncodedFrame::KEY :
- transport::EncodedFrame::DEPENDENT;
+ scoped_ptr<EncodedFrame> encoded_frame(
+ new EncodedFrame());
+ encoded_frame->dependency = key_frame ? EncodedFrame::KEY :
+ EncodedFrame::DEPENDENT;
encoded_frame->frame_id = ++last_encoded_frame_id_;
if (key_frame)
encoded_frame->referenced_frame_id = encoded_frame->frame_id;
@@ -313,7 +312,6 @@ class LocalVideoEncodeAcceleratorClient
const CreateVideoEncodeMemoryCallback create_video_encode_memory_cb_;
const base::WeakPtr<ExternalVideoEncoder> weak_owner_;
int max_frame_rate_;
- transport::VideoCodec codec_;
uint32 last_encoded_frame_id_;
bool key_frame_encountered_;
std::string stream_header_;
diff --git a/media/cast/video_sender/external_video_encoder.h b/media/cast/sender/external_video_encoder.h
index 29fe0c5fcd..84de7f08f4 100644
--- a/media/cast/video_sender/external_video_encoder.h
+++ b/media/cast/sender/external_video_encoder.h
@@ -2,14 +2,14 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef MEDIA_CAST_VIDEO_SENDER_EXTERNAL_VIDEO_ENCODER_H_
-#define MEDIA_CAST_VIDEO_SENDER_EXTERNAL_VIDEO_ENCODER_H_
+#ifndef MEDIA_CAST_SENDER_EXTERNAL_VIDEO_ENCODER_H_
+#define MEDIA_CAST_SENDER_EXTERNAL_VIDEO_ENCODER_H_
#include "base/memory/scoped_ptr.h"
#include "base/memory/weak_ptr.h"
#include "media/cast/cast_config.h"
#include "media/cast/cast_environment.h"
-#include "media/cast/video_sender/video_encoder.h"
+#include "media/cast/sender/video_encoder.h"
#include "media/video/video_encode_accelerator.h"
namespace media {
@@ -83,4 +83,4 @@ class ExternalVideoEncoder : public VideoEncoder {
} // namespace cast
} // namespace media
-#endif // MEDIA_CAST_VIDEO_SENDER_EXTERNAL_VIDEO_ENCODER_H_
+#endif // MEDIA_CAST_SENDER_EXTERNAL_VIDEO_ENCODER_H_
diff --git a/media/cast/video_sender/external_video_encoder_unittest.cc b/media/cast/sender/external_video_encoder_unittest.cc
index 853258ce30..0b446f866e 100644
--- a/media/cast/video_sender/external_video_encoder_unittest.cc
+++ b/media/cast/sender/external_video_encoder_unittest.cc
@@ -10,10 +10,10 @@
#include "media/base/video_frame.h"
#include "media/cast/cast_defines.h"
#include "media/cast/cast_environment.h"
+#include "media/cast/sender/external_video_encoder.h"
#include "media/cast/test/fake_single_thread_task_runner.h"
#include "media/cast/test/fake_video_encode_accelerator.h"
#include "media/cast/test/utility/video_utility.h"
-#include "media/cast/video_sender/external_video_encoder.h"
#include "testing/gmock/include/gmock/gmock.h"
namespace media {
@@ -54,11 +54,11 @@ class TestVideoEncoderCallback
}
void DeliverEncodedVideoFrame(
- scoped_ptr<transport::EncodedFrame> encoded_frame) {
+ scoped_ptr<EncodedFrame> encoded_frame) {
if (expected_frame_id_ == expected_last_referenced_frame_id_) {
- EXPECT_EQ(transport::EncodedFrame::KEY, encoded_frame->dependency);
+ EXPECT_EQ(EncodedFrame::KEY, encoded_frame->dependency);
} else {
- EXPECT_EQ(transport::EncodedFrame::DEPENDENT,
+ EXPECT_EQ(EncodedFrame::DEPENDENT,
encoded_frame->dependency);
}
EXPECT_EQ(expected_frame_id_, encoded_frame->frame_id);
@@ -86,9 +86,9 @@ class ExternalVideoEncoderTest : public ::testing::Test {
protected:
ExternalVideoEncoderTest()
: test_video_encoder_callback_(new TestVideoEncoderCallback()) {
- video_config_.rtp_config.ssrc = 1;
+ video_config_.ssrc = 1;
video_config_.incoming_feedback_ssrc = 2;
- video_config_.rtp_config.payload_type = 127;
+ video_config_.rtp_payload_type = 127;
video_config_.use_external_encoder = true;
video_config_.width = 320;
video_config_.height = 240;
@@ -99,7 +99,7 @@ class ExternalVideoEncoderTest : public ::testing::Test {
video_config_.min_qp = 0;
video_config_.max_frame_rate = 30;
video_config_.max_number_of_video_buffers_used = 3;
- video_config_.codec = transport::kVp8;
+ video_config_.codec = CODEC_VIDEO_VP8;
gfx::Size size(video_config_.width, video_config_.height);
video_frame_ = media::VideoFrame::CreateFrame(
VideoFrame::I420, size, gfx::Rect(size), size, base::TimeDelta());
diff --git a/media/cast/video_sender/fake_software_video_encoder.cc b/media/cast/sender/fake_software_video_encoder.cc
index 7c5c952641..bd96f78c89 100644
--- a/media/cast/video_sender/fake_software_video_encoder.cc
+++ b/media/cast/sender/fake_software_video_encoder.cc
@@ -2,11 +2,11 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "media/cast/video_sender/fake_software_video_encoder.h"
+#include "media/cast/sender/fake_software_video_encoder.h"
#include "base/json/json_writer.h"
#include "base/values.h"
-#include "media/cast/transport/cast_transport_config.h"
+#include "media/cast/net/cast_transport_config.h"
#ifndef OFFICIAL_BUILD
@@ -28,26 +28,26 @@ void FakeSoftwareVideoEncoder::Initialize() {}
bool FakeSoftwareVideoEncoder::Encode(
const scoped_refptr<media::VideoFrame>& video_frame,
- transport::EncodedFrame* encoded_image) {
+ EncodedFrame* encoded_image) {
encoded_image->frame_id = frame_id_++;
if (next_frame_is_key_) {
- encoded_image->dependency = transport::EncodedFrame::KEY;
+ encoded_image->dependency = EncodedFrame::KEY;
encoded_image->referenced_frame_id = encoded_image->frame_id;
next_frame_is_key_ = false;
} else {
- encoded_image->dependency = transport::EncodedFrame::DEPENDENT;
+ encoded_image->dependency = EncodedFrame::DEPENDENT;
encoded_image->referenced_frame_id = encoded_image->frame_id - 1;
}
base::DictionaryValue values;
values.SetBoolean("key",
- encoded_image->dependency == transport::EncodedFrame::KEY);
+ encoded_image->dependency == EncodedFrame::KEY);
values.SetInteger("ref", encoded_image->referenced_frame_id);
values.SetInteger("id", encoded_image->frame_id);
values.SetInteger("size", frame_size_);
base::JSONWriter::Write(&values, &encoded_image->data);
encoded_image->data.resize(
- std::max<size_t>(encoded_image->data.size(), frame_size_));
+ std::max<size_t>(encoded_image->data.size(), frame_size_), ' ');
return true;
}
diff --git a/media/cast/video_sender/fake_software_video_encoder.h b/media/cast/sender/fake_software_video_encoder.h
index 0eb88ddfe1..5491ae0cc8 100644
--- a/media/cast/video_sender/fake_software_video_encoder.h
+++ b/media/cast/sender/fake_software_video_encoder.h
@@ -2,11 +2,11 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef MEDIA_CAST_VIDEO_SENDER_FAKE_SOFTWARE_VIDEO_ENCODER_H_
-#define MEDIA_CAST_VIDEO_SENDER_FAKE_SOFTWARE_VIDEO_ENCODER_H_
+#ifndef MEDIA_CAST_SENDER_FAKE_SOFTWARE_VIDEO_ENCODER_H_
+#define MEDIA_CAST_SENDER_FAKE_SOFTWARE_VIDEO_ENCODER_H_
#include "media/cast/cast_config.h"
-#include "media/cast/video_sender/software_video_encoder.h"
+#include "media/cast/sender/software_video_encoder.h"
namespace media {
namespace cast {
@@ -19,7 +19,7 @@ class FakeSoftwareVideoEncoder : public SoftwareVideoEncoder {
// SoftwareVideoEncoder implementations.
virtual void Initialize() OVERRIDE;
virtual bool Encode(const scoped_refptr<media::VideoFrame>& video_frame,
- transport::EncodedFrame* encoded_image) OVERRIDE;
+ EncodedFrame* encoded_image) OVERRIDE;
virtual void UpdateRates(uint32 new_bitrate) OVERRIDE;
virtual void GenerateKeyFrame() OVERRIDE;
virtual void LatestFrameIdToReference(uint32 frame_id) OVERRIDE;
@@ -35,4 +35,4 @@ class FakeSoftwareVideoEncoder : public SoftwareVideoEncoder {
} // namespace cast
} // namespace media
-#endif // MEDIA_CAST_VIDEO_SENDER_FAKE_SOFTWARE_VIDEO_ENCODER_H_
+#endif // MEDIA_CAST_SENDER_FAKE_SOFTWARE_VIDEO_ENCODER_H_
diff --git a/media/cast/rtp_timestamp_helper.cc b/media/cast/sender/rtp_timestamp_helper.cc
index 3349e7b33f..ea0c35c66f 100644
--- a/media/cast/rtp_timestamp_helper.cc
+++ b/media/cast/sender/rtp_timestamp_helper.cc
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "media/cast/rtp_timestamp_helper.h"
+#include "media/cast/sender/rtp_timestamp_helper.h"
namespace media {
namespace cast {
diff --git a/media/cast/rtp_timestamp_helper.h b/media/cast/sender/rtp_timestamp_helper.h
index b9c650c506..8f56681dac 100644
--- a/media/cast/rtp_timestamp_helper.h
+++ b/media/cast/sender/rtp_timestamp_helper.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef MEDIA_CAST_RTP_TIMESTAMP_HELPER_H_
-#define MEDIA_CAST_RTP_TIMESTAMP_HELPER_H_
+#ifndef MEDIA_CAST_SENDER_RTP_TIMESTAMP_HELPER_H_
+#define MEDIA_CAST_SENDER_RTP_TIMESTAMP_HELPER_H_
#include "base/basictypes.h"
#include "base/time/time.h"
@@ -38,4 +38,4 @@ class RtpTimestampHelper {
} // namespace cast
} // namespace media
-#endif // MEDIA_CAST_CAST_DEFINES_H_
+#endif // MEDIA_CAST_SENDER_RTP_TIMESTAMP_HELPER_H_
diff --git a/media/cast/video_sender/software_video_encoder.h b/media/cast/sender/software_video_encoder.h
index f1bf6f6331..16c8cd3473 100644
--- a/media/cast/video_sender/software_video_encoder.h
+++ b/media/cast/sender/software_video_encoder.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef MEDIA_CAST_VIDEO_SENDER_SOFTWARE_VIDEO_ENCODER_H_
-#define MEDIA_CAST_VIDEO_SENDER_SOFTWARE_VIDEO_ENCODER_H_
+#ifndef MEDIA_CAST_SENDER_SOFTWARE_VIDEO_ENCODER_H_
+#define MEDIA_CAST_SENDER_SOFTWARE_VIDEO_ENCODER_H_
#include "base/basictypes.h"
#include "base/memory/ref_counted.h"
@@ -14,9 +14,7 @@ class VideoFrame;
namespace media {
namespace cast {
-namespace transport {
struct EncodedFrame;
-} // namespace transport
class SoftwareVideoEncoder {
public:
@@ -28,7 +26,7 @@ class SoftwareVideoEncoder {
// Encode a raw image (as a part of a video stream).
virtual bool Encode(const scoped_refptr<media::VideoFrame>& video_frame,
- transport::EncodedFrame* encoded_image) = 0;
+ EncodedFrame* encoded_image) = 0;
// Update the encoder with a new target bit rate.
virtual void UpdateRates(uint32 new_bitrate) = 0;
@@ -43,4 +41,4 @@ class SoftwareVideoEncoder {
} // namespace cast
} // namespace media
-#endif // MEDIA_CAST_VIDEO_SENDER_SOFTWARE_VIDEO_ENCODER_H_
+#endif // MEDIA_CAST_SENDER_SOFTWARE_VIDEO_ENCODER_H_
diff --git a/media/cast/video_sender/video_encoder.h b/media/cast/sender/video_encoder.h
index c7b1049ce6..d788c7b2aa 100644
--- a/media/cast/video_sender/video_encoder.h
+++ b/media/cast/sender/video_encoder.h
@@ -1,9 +1,9 @@
-// Copyright 2013 The Chromium Authors. All rights reserved.
+// Copyright 2014 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef MEDIA_CAST_VIDEO_SENDER_VIDEO_ENCODER_H_
-#define MEDIA_CAST_VIDEO_SENDER_VIDEO_ENCODER_H_
+#ifndef MEDIA_CAST_SENDER_VIDEO_ENCODER_H_
+#define MEDIA_CAST_SENDER_VIDEO_ENCODER_H_
#include "base/callback.h"
#include "base/memory/ref_counted.h"
@@ -20,7 +20,7 @@ namespace cast {
// All these functions are called from the main cast thread.
class VideoEncoder {
public:
- typedef base::Callback<void(scoped_ptr<transport::EncodedFrame>)>
+ typedef base::Callback<void(scoped_ptr<EncodedFrame>)>
FrameEncodedCallback;
virtual ~VideoEncoder() {}
@@ -48,4 +48,4 @@ class VideoEncoder {
} // namespace cast
} // namespace media
-#endif // MEDIA_CAST_VIDEO_SENDER_VIDEO_ENCODER_H_
+#endif // MEDIA_CAST_SENDER_VIDEO_ENCODER_H_
diff --git a/media/cast/video_sender/video_encoder_impl.cc b/media/cast/sender/video_encoder_impl.cc
index b90ef0f07e..d21649784e 100644
--- a/media/cast/video_sender/video_encoder_impl.cc
+++ b/media/cast/sender/video_encoder_impl.cc
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "media/cast/video_sender/video_encoder_impl.h"
+#include "media/cast/sender/video_encoder_impl.h"
#include "base/bind.h"
#include "base/bind_helpers.h"
@@ -11,8 +11,8 @@
#include "base/message_loop/message_loop.h"
#include "media/base/video_frame.h"
#include "media/cast/cast_defines.h"
-#include "media/cast/video_sender/codecs/vp8/vp8_encoder.h"
-#include "media/cast/video_sender/fake_software_video_encoder.h"
+#include "media/cast/sender/fake_software_video_encoder.h"
+#include "media/cast/sender/vp8_encoder.h"
namespace media {
namespace cast {
@@ -43,8 +43,8 @@ void EncodeVideoFrameOnEncoderThread(
dynamic_config.latest_frame_id_to_reference);
encoder->UpdateRates(dynamic_config.bit_rate);
- scoped_ptr<transport::EncodedFrame> encoded_frame(
- new transport::EncodedFrame());
+ scoped_ptr<EncodedFrame> encoded_frame(
+ new EncodedFrame());
if (!encoder->Encode(video_frame, encoded_frame.get())) {
VLOG(1) << "Encoding failed";
return;
@@ -53,7 +53,7 @@ void EncodeVideoFrameOnEncoderThread(
VLOG(1) << "Encoding resulted in an empty frame";
return;
}
- encoded_frame->rtp_timestamp = transport::GetVideoRtpTimestamp(capture_time);
+ encoded_frame->rtp_timestamp = GetVideoRtpTimestamp(capture_time);
encoded_frame->reference_time = capture_time;
environment->PostTask(
@@ -68,9 +68,8 @@ VideoEncoderImpl::VideoEncoderImpl(
scoped_refptr<CastEnvironment> cast_environment,
const VideoSenderConfig& video_config,
int max_unacked_frames)
- : video_config_(video_config),
- cast_environment_(cast_environment) {
- if (video_config.codec == transport::kVp8) {
+ : cast_environment_(cast_environment) {
+ if (video_config.codec == CODEC_VIDEO_VP8) {
encoder_.reset(new Vp8Encoder(video_config, max_unacked_frames));
cast_environment_->PostTask(CastEnvironment::VIDEO,
FROM_HERE,
@@ -78,7 +77,7 @@ VideoEncoderImpl::VideoEncoderImpl(
cast_environment,
encoder_.get()));
#ifndef OFFICIAL_BUILD
- } else if (video_config.codec == transport::kFakeSoftwareVideo) {
+ } else if (video_config.codec == CODEC_VIDEO_FAKE) {
encoder_.reset(new FakeSoftwareVideoEncoder(video_config));
#endif
} else {
diff --git a/media/cast/video_sender/video_encoder_impl.h b/media/cast/sender/video_encoder_impl.h
index b34b440c93..54a380265c 100644
--- a/media/cast/video_sender/video_encoder_impl.h
+++ b/media/cast/sender/video_encoder_impl.h
@@ -2,14 +2,14 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef MEDIA_CAST_VIDEO_SENDER_VIDEO_ENCODER_IMPL_H_
-#define MEDIA_CAST_VIDEO_SENDER_VIDEO_ENCODER_IMPL_H_
+#ifndef MEDIA_CAST_SENDER_VIDEO_ENCODER_IMPL_H_
+#define MEDIA_CAST_SENDER_VIDEO_ENCODER_IMPL_H_
#include "base/memory/scoped_ptr.h"
#include "media/cast/cast_config.h"
#include "media/cast/cast_environment.h"
-#include "media/cast/video_sender/software_video_encoder.h"
-#include "media/cast/video_sender/video_encoder.h"
+#include "media/cast/sender/software_video_encoder.h"
+#include "media/cast/sender/video_encoder.h"
namespace media {
class VideoFrame;
@@ -26,7 +26,7 @@ class VideoEncoderImpl : public VideoEncoder {
int bit_rate;
};
- typedef base::Callback<void(scoped_ptr<transport::EncodedFrame>)>
+ typedef base::Callback<void(scoped_ptr<EncodedFrame>)>
FrameEncodedCallback;
VideoEncoderImpl(scoped_refptr<CastEnvironment> cast_environment,
@@ -53,7 +53,6 @@ class VideoEncoderImpl : public VideoEncoder {
virtual void LatestFrameIdToReference(uint32 frame_id) OVERRIDE;
private:
- const VideoSenderConfig video_config_;
scoped_refptr<CastEnvironment> cast_environment_;
CodecDynamicConfig dynamic_config_;
@@ -69,4 +68,4 @@ class VideoEncoderImpl : public VideoEncoder {
} // namespace cast
} // namespace media
-#endif // MEDIA_CAST_VIDEO_SENDER_VIDEO_ENCODER_IMPL_H_
+#endif // MEDIA_CAST_SENDER_VIDEO_ENCODER_IMPL_H_
diff --git a/media/cast/video_sender/video_encoder_impl_unittest.cc b/media/cast/sender/video_encoder_impl_unittest.cc
index a60812304f..190ca2aacc 100644
--- a/media/cast/video_sender/video_encoder_impl_unittest.cc
+++ b/media/cast/sender/video_encoder_impl_unittest.cc
@@ -10,9 +10,9 @@
#include "media/base/video_frame.h"
#include "media/cast/cast_defines.h"
#include "media/cast/cast_environment.h"
+#include "media/cast/sender/video_encoder_impl.h"
#include "media/cast/test/fake_single_thread_task_runner.h"
#include "media/cast/test/utility/video_utility.h"
-#include "media/cast/video_sender/video_encoder_impl.h"
#include "testing/gmock/include/gmock/gmock.h"
namespace media {
@@ -35,11 +35,11 @@ class TestVideoEncoderCallback
}
void DeliverEncodedVideoFrame(
- scoped_ptr<transport::EncodedFrame> encoded_frame) {
+ scoped_ptr<EncodedFrame> encoded_frame) {
if (expected_frame_id_ == expected_last_referenced_frame_id_) {
- EXPECT_EQ(transport::EncodedFrame::KEY, encoded_frame->dependency);
+ EXPECT_EQ(EncodedFrame::KEY, encoded_frame->dependency);
} else {
- EXPECT_EQ(transport::EncodedFrame::DEPENDENT,
+ EXPECT_EQ(EncodedFrame::DEPENDENT,
encoded_frame->dependency);
}
EXPECT_EQ(expected_frame_id_, encoded_frame->frame_id);
@@ -66,9 +66,9 @@ class VideoEncoderImplTest : public ::testing::Test {
protected:
VideoEncoderImplTest()
: test_video_encoder_callback_(new TestVideoEncoderCallback()) {
- video_config_.rtp_config.ssrc = 1;
+ video_config_.ssrc = 1;
video_config_.incoming_feedback_ssrc = 2;
- video_config_.rtp_config.payload_type = 127;
+ video_config_.rtp_payload_type = 127;
video_config_.use_external_encoder = false;
video_config_.width = 320;
video_config_.height = 240;
@@ -79,7 +79,7 @@ class VideoEncoderImplTest : public ::testing::Test {
video_config_.min_qp = 0;
video_config_.max_frame_rate = 30;
video_config_.max_number_of_video_buffers_used = 3;
- video_config_.codec = transport::kVp8;
+ video_config_.codec = CODEC_VIDEO_VP8;
gfx::Size size(video_config_.width, video_config_.height);
video_frame_ = media::VideoFrame::CreateFrame(
VideoFrame::I420, size, gfx::Rect(size), size, base::TimeDelta());
diff --git a/media/cast/video_sender/video_sender.cc b/media/cast/sender/video_sender.cc
index cf050b7f10..0d826903fd 100644
--- a/media/cast/video_sender/video_sender.cc
+++ b/media/cast/sender/video_sender.cc
@@ -1,8 +1,8 @@
-// Copyright 2013 The Chromium Authors. All rights reserved.
+// Copyright 2014 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "media/cast/video_sender/video_sender.h"
+#include "media/cast/sender/video_sender.h"
#include <algorithm>
#include <cstring>
@@ -12,10 +12,10 @@
#include "base/logging.h"
#include "base/message_loop/message_loop.h"
#include "media/cast/cast_defines.h"
-#include "media/cast/rtcp/rtcp_defines.h"
-#include "media/cast/transport/cast_transport_config.h"
-#include "media/cast/video_sender/external_video_encoder.h"
-#include "media/cast/video_sender/video_encoder_impl.h"
+#include "media/cast/net/cast_transport_config.h"
+#include "media/cast/net/rtcp/rtcp_defines.h"
+#include "media/cast/sender/external_video_encoder.h"
+#include "media/cast/sender/video_encoder_impl.h"
namespace media {
namespace cast {
@@ -28,10 +28,9 @@ VideoSender::VideoSender(
const VideoSenderConfig& video_config,
const CreateVideoEncodeAcceleratorCallback& create_vea_cb,
const CreateVideoEncodeMemoryCallback& create_video_encode_mem_cb,
- transport::CastTransportSender* const transport_sender)
+ CastTransportSender* const transport_sender)
: cast_environment_(cast_environment),
- target_playout_delay_(base::TimeDelta::FromMilliseconds(
- video_config.rtp_config.max_delay_ms)),
+ target_playout_delay_(video_config.target_playout_delay),
transport_sender_(transport_sender),
max_unacked_frames_(
std::min(kMaxUnackedFrames,
@@ -45,7 +44,7 @@ VideoSender::VideoSender(
NULL,
video_config.rtcp_mode,
base::TimeDelta::FromMilliseconds(video_config.rtcp_interval),
- video_config.rtp_config.ssrc,
+ video_config.ssrc,
video_config.incoming_feedback_ssrc,
video_config.rtcp_c_name,
VIDEO_EVENT),
@@ -75,10 +74,12 @@ VideoSender::VideoSender(
}
cast_initialization_status_ = STATUS_VIDEO_INITIALIZED;
- media::cast::transport::CastTransportVideoConfig transport_config;
- transport_config.codec = video_config.codec;
- transport_config.rtp.config = video_config.rtp_config;
- transport_config.rtp.max_outstanding_frames = max_unacked_frames_;
+ media::cast::CastTransportRtpConfig transport_config;
+ transport_config.ssrc = video_config.ssrc;
+ transport_config.rtp_payload_type = video_config.rtp_payload_type;
+ transport_config.stored_frames = max_unacked_frames_;
+ transport_config.aes_key = video_config.aes_key;
+ transport_config.aes_iv_mask = video_config.aes_iv_mask;
transport_sender_->InitializeVideo(transport_config);
rtcp_.SetCastReceiverEventHistorySize(kReceiverRtcpEventHistorySize);
@@ -140,7 +141,7 @@ void VideoSender::InsertRawVideoFrame(
void VideoSender::SendEncodedVideoFrame(
int requested_bitrate_before_encode,
- scoped_ptr<transport::EncodedFrame> encoded_frame) {
+ scoped_ptr<EncodedFrame> encoded_frame) {
DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
DCHECK_GT(frames_in_encoder_, 0);
@@ -159,13 +160,13 @@ void VideoSender::SendEncodedVideoFrame(
ScheduleNextResendCheck();
}
- VLOG_IF(1, encoded_frame->dependency == transport::EncodedFrame::KEY)
+ VLOG_IF(1, encoded_frame->dependency == EncodedFrame::KEY)
<< "Send encoded key frame; frame_id: " << frame_id;
cast_environment_->Logging()->InsertEncodedFrameEvent(
last_send_time_, FRAME_ENCODED, VIDEO_EVENT, encoded_frame->rtp_timestamp,
frame_id, static_cast<int>(encoded_frame->data.size()),
- encoded_frame->dependency == transport::EncodedFrame::KEY,
+ encoded_frame->dependency == EncodedFrame::KEY,
requested_bitrate_before_encode);
// Only use lowest 8 bits as key.
frame_id_to_rtp_timestamp_[frame_id & 0xff] = encoded_frame->rtp_timestamp;
diff --git a/media/cast/video_sender/video_sender.h b/media/cast/sender/video_sender.h
index cf8d27511c..6587572806 100644
--- a/media/cast/video_sender/video_sender.h
+++ b/media/cast/sender/video_sender.h
@@ -1,9 +1,9 @@
-// Copyright 2013 The Chromium Authors. All rights reserved.
+// Copyright 2014 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef MEDIA_CAST_VIDEO_SENDER_VIDEO_SENDER_H_
-#define MEDIA_CAST_VIDEO_SENDER_VIDEO_SENDER_H_
+#ifndef MEDIA_CAST_SENDER_VIDEO_SENDER_H_
+#define MEDIA_CAST_SENDER_VIDEO_SENDER_H_
#include "base/callback.h"
#include "base/memory/ref_counted.h"
@@ -14,10 +14,10 @@
#include "base/time/time.h"
#include "media/cast/cast_config.h"
#include "media/cast/cast_environment.h"
-#include "media/cast/congestion_control/congestion_control.h"
#include "media/cast/logging/logging_defines.h"
-#include "media/cast/rtcp/rtcp.h"
-#include "media/cast/rtp_timestamp_helper.h"
+#include "media/cast/net/rtcp/rtcp.h"
+#include "media/cast/sender/congestion_control.h"
+#include "media/cast/sender/rtp_timestamp_helper.h"
namespace media {
@@ -25,13 +25,10 @@ class VideoFrame;
namespace cast {
+class CastTransportSender;
class LocalVideoEncoderCallback;
class VideoEncoder;
-namespace transport {
-class CastTransportSender;
-}
-
// Not thread safe. Only called from the main cast thread.
// This class owns all objects related to sending video, objects that create RTP
// packets, congestion control, video encoder, parsing and sending of
@@ -46,7 +43,7 @@ class VideoSender : public RtcpSenderFeedback,
const VideoSenderConfig& video_config,
const CreateVideoEncodeAcceleratorCallback& create_vea_cb,
const CreateVideoEncodeMemoryCallback& create_video_encode_mem_cb,
- transport::CastTransportSender* const transport_sender);
+ CastTransportSender* const transport_sender);
virtual ~VideoSender();
@@ -93,7 +90,7 @@ class VideoSender : public RtcpSenderFeedback,
// Called by the |video_encoder_| with the next EncodeFrame to send.
void SendEncodedVideoFrame(int requested_bitrate_before_encode,
- scoped_ptr<transport::EncodedFrame> encoded_frame);
+ scoped_ptr<EncodedFrame> encoded_frame);
const scoped_refptr<CastEnvironment> cast_environment_;
@@ -110,7 +107,7 @@ class VideoSender : public RtcpSenderFeedback,
// process to the browser process over IPC, with the browser process being
// responsible for "packetizing" the frames and pushing packets into the
// network layer.
- transport::CastTransportSender* const transport_sender_;
+ CastTransportSender* const transport_sender_;
// Maximum number of outstanding frames before the encoding and sending of
// new frames shall halt.
@@ -179,4 +176,4 @@ class VideoSender : public RtcpSenderFeedback,
} // namespace cast
} // namespace media
-#endif // MEDIA_CAST_VIDEO_SENDER_VIDEO_SENDER_H_
+#endif // MEDIA_CAST_SENDER_VIDEO_SENDER_H_
diff --git a/media/cast/video_sender/video_sender_unittest.cc b/media/cast/sender/video_sender_unittest.cc
index 49fae46c73..bccc248680 100644
--- a/media/cast/video_sender/video_sender_unittest.cc
+++ b/media/cast/sender/video_sender_unittest.cc
@@ -1,4 +1,4 @@
-// Copyright 2013 The Chromium Authors. All rights reserved.
+// Copyright 2014 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
@@ -12,14 +12,14 @@
#include "media/base/video_frame.h"
#include "media/cast/cast_environment.h"
#include "media/cast/logging/simple_event_subscriber.h"
+#include "media/cast/net/cast_transport_config.h"
+#include "media/cast/net/cast_transport_sender_impl.h"
+#include "media/cast/net/pacing/paced_sender.h"
+#include "media/cast/sender/video_sender.h"
#include "media/cast/test/fake_single_thread_task_runner.h"
#include "media/cast/test/fake_video_encode_accelerator.h"
#include "media/cast/test/utility/default_config.h"
#include "media/cast/test/utility/video_utility.h"
-#include "media/cast/transport/cast_transport_config.h"
-#include "media/cast/transport/cast_transport_sender_impl.h"
-#include "media/cast/transport/pacing/paced_sender.h"
-#include "media/cast/video_sender/video_sender.h"
#include "testing/gmock/include/gmock/gmock.h"
#include "testing/gtest/include/gtest/gtest.h"
@@ -51,7 +51,7 @@ void CreateSharedMemory(
callback.Run(shm.Pass());
}
-class TestPacketSender : public transport::PacketSender {
+class TestPacketSender : public PacketSender {
public:
TestPacketSender()
: number_of_rtp_packets_(0),
@@ -59,7 +59,7 @@ class TestPacketSender : public transport::PacketSender {
paused_(false) {}
// A singular packet implies a RTCP packet.
- virtual bool SendPacket(transport::PacketRef packet,
+ virtual bool SendPacket(PacketRef packet,
const base::Closure& cb) OVERRIDE {
if (paused_) {
stored_packet_ = packet;
@@ -97,7 +97,7 @@ class TestPacketSender : public transport::PacketSender {
int number_of_rtcp_packets_;
bool paused_;
base::Closure callback_;
- transport::PacketRef stored_packet_;
+ PacketRef stored_packet_;
DISALLOW_COPY_AND_ASSIGN(TestPacketSender);
};
@@ -109,7 +109,7 @@ class PeerVideoSender : public VideoSender {
const VideoSenderConfig& video_config,
const CreateVideoEncodeAcceleratorCallback& create_vea_cb,
const CreateVideoEncodeMemoryCallback& create_video_encode_mem_cb,
- transport::CastTransportSender* const transport_sender)
+ CastTransportSender* const transport_sender)
: VideoSender(cast_environment,
video_config,
create_vea_cb,
@@ -132,12 +132,12 @@ class VideoSenderTest : public ::testing::Test {
task_runner_);
last_pixel_value_ = kPixelValue;
net::IPEndPoint dummy_endpoint;
- transport_sender_.reset(new transport::CastTransportSenderImpl(
+ transport_sender_.reset(new CastTransportSenderImpl(
NULL,
testing_clock_,
dummy_endpoint,
base::Bind(&UpdateCastTransportStatus),
- transport::BulkRawEventsCallback(),
+ BulkRawEventsCallback(),
base::TimeDelta(),
task_runner_,
&transport_));
@@ -150,16 +150,16 @@ class VideoSenderTest : public ::testing::Test {
task_runner_->RunTasks();
}
- static void UpdateCastTransportStatus(transport::CastTransportStatus status) {
- EXPECT_EQ(transport::TRANSPORT_VIDEO_INITIALIZED, status);
+ static void UpdateCastTransportStatus(CastTransportStatus status) {
+ EXPECT_EQ(TRANSPORT_VIDEO_INITIALIZED, status);
}
void InitEncoder(bool external) {
VideoSenderConfig video_config;
- video_config.rtp_config.ssrc = 1;
+ video_config.ssrc = 1;
video_config.incoming_feedback_ssrc = 2;
video_config.rtcp_c_name = "video_test@10.1.1.1";
- video_config.rtp_config.payload_type = 127;
+ video_config.rtp_payload_type = 127;
video_config.use_external_encoder = external;
video_config.width = kWidth;
video_config.height = kHeight;
@@ -170,7 +170,7 @@ class VideoSenderTest : public ::testing::Test {
video_config.min_qp = 0;
video_config.max_frame_rate = 30;
video_config.max_number_of_video_buffers_used = 1;
- video_config.codec = transport::kVp8;
+ video_config.codec = CODEC_VIDEO_VP8;
if (external) {
scoped_ptr<VideoEncodeAccelerator> fake_vea(
@@ -218,7 +218,7 @@ class VideoSenderTest : public ::testing::Test {
base::SimpleTestTickClock* testing_clock_; // Owned by CastEnvironment.
TestPacketSender transport_;
- scoped_ptr<transport::CastTransportSenderImpl> transport_sender_;
+ scoped_ptr<CastTransportSenderImpl> transport_sender_;
scoped_refptr<test::FakeSingleThreadTaskRunner> task_runner_;
scoped_ptr<PeerVideoSender> video_sender_;
scoped_refptr<CastEnvironment> cast_environment_;
diff --git a/media/cast/video_sender/codecs/vp8/vp8_encoder.cc b/media/cast/sender/vp8_encoder.cc
index c7374babd1..b43b5c881e 100644
--- a/media/cast/video_sender/codecs/vp8/vp8_encoder.cc
+++ b/media/cast/sender/vp8_encoder.cc
@@ -1,17 +1,15 @@
-// Copyright 2013 The Chromium Authors. All rights reserved.
+// Copyright 2014 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-//
-// TODO (pwestin): add a link to the design document describing the generic
-// protocol and the VP8 specific details.
-#include "media/cast/video_sender/codecs/vp8/vp8_encoder.h"
+
+#include "media/cast/sender/vp8_encoder.h"
#include <vector>
#include "base/logging.h"
#include "media/base/video_frame.h"
#include "media/cast/cast_defines.h"
-#include "media/cast/transport/cast_transport_config.h"
+#include "media/cast/net/cast_transport_config.h"
#include "third_party/libvpx/source/libvpx/vpx/vp8cx.h"
namespace media {
@@ -135,7 +133,7 @@ void Vp8Encoder::InitEncode(int number_of_encode_threads) {
}
bool Vp8Encoder::Encode(const scoped_refptr<media::VideoFrame>& video_frame,
- transport::EncodedFrame* encoded_image) {
+ EncodedFrame* encoded_image) {
DCHECK(thread_checker_.CalledOnValidThread());
// Image in vpx_image_t format.
// Input image is const. VP8's raw image is not defined as const.
@@ -216,10 +214,10 @@ bool Vp8Encoder::Encode(const scoped_refptr<media::VideoFrame>& video_frame,
// Populate the encoded frame.
encoded_image->frame_id = ++last_encoded_frame_id_;
if (is_key_frame) {
- encoded_image->dependency = transport::EncodedFrame::KEY;
+ encoded_image->dependency = EncodedFrame::KEY;
encoded_image->referenced_frame_id = encoded_image->frame_id;
} else {
- encoded_image->dependency = transport::EncodedFrame::DEPENDENT;
+ encoded_image->dependency = EncodedFrame::DEPENDENT;
encoded_image->referenced_frame_id = latest_frame_id_to_reference;
}
diff --git a/media/cast/video_sender/codecs/vp8/vp8_encoder.h b/media/cast/sender/vp8_encoder.h
index 2421cf1511..0437dbc2f5 100644
--- a/media/cast/video_sender/codecs/vp8/vp8_encoder.h
+++ b/media/cast/sender/vp8_encoder.h
@@ -1,16 +1,16 @@
-// Copyright 2013 The Chromium Authors. All rights reserved.
+// Copyright 2014 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef MEDIA_CAST_VIDEO_SENDER_CODECS_VP8_VP8_ENCODER_H_
-#define MEDIA_CAST_VIDEO_SENDER_CODECS_VP8_VP8_ENCODER_H_
+#ifndef MEDIA_CAST_SENDER_CODECS_VP8_VP8_ENCODER_H_
+#define MEDIA_CAST_SENDER_CODECS_VP8_VP8_ENCODER_H_
#include "base/basictypes.h"
#include "base/memory/scoped_ptr.h"
#include "base/threading/thread_checker.h"
#include "base/time/time.h"
#include "media/cast/cast_config.h"
-#include "media/cast/video_sender/software_video_encoder.h"
+#include "media/cast/sender/software_video_encoder.h"
#include "third_party/libvpx/source/libvpx/vpx/vpx_encoder.h"
namespace media {
@@ -37,7 +37,7 @@ class Vp8Encoder : public SoftwareVideoEncoder {
// Encode a raw image (as a part of a video stream).
virtual bool Encode(const scoped_refptr<media::VideoFrame>& video_frame,
- transport::EncodedFrame* encoded_image) OVERRIDE;
+ EncodedFrame* encoded_image) OVERRIDE;
// Update the encoder with a new target bit rate.
virtual void UpdateRates(uint32 new_bitrate) OVERRIDE;
@@ -100,4 +100,4 @@ class Vp8Encoder : public SoftwareVideoEncoder {
} // namespace cast
} // namespace media
-#endif // MEDIA_CAST_VIDEO_SENDER_CODECS_VP8_VP8_ENCODER_H_
+#endif // MEDIA_CAST_SENDER_CODECS_VP8_VP8_ENCODER_H_
diff --git a/media/cast/test/cast_benchmarks.cc b/media/cast/test/cast_benchmarks.cc
index 66257626bd..a2483facff 100644
--- a/media/cast/test/cast_benchmarks.cc
+++ b/media/cast/test/cast_benchmarks.cc
@@ -45,7 +45,12 @@
#include "media/cast/cast_receiver.h"
#include "media/cast/cast_sender.h"
#include "media/cast/logging/simple_event_subscriber.h"
+#include "media/cast/net/cast_transport_config.h"
+#include "media/cast/net/cast_transport_defines.h"
+#include "media/cast/net/cast_transport_sender.h"
+#include "media/cast/net/cast_transport_sender_impl.h"
#include "media/cast/test/fake_single_thread_task_runner.h"
+#include "media/cast/test/loopback_transport.h"
#include "media/cast/test/skewed_single_thread_task_runner.h"
#include "media/cast/test/skewed_tick_clock.h"
#include "media/cast/test/utility/audio_utility.h"
@@ -53,10 +58,6 @@
#include "media/cast/test/utility/test_util.h"
#include "media/cast/test/utility/udp_proxy.h"
#include "media/cast/test/utility/video_utility.h"
-#include "media/cast/transport/cast_transport_config.h"
-#include "media/cast/transport/cast_transport_defines.h"
-#include "media/cast/transport/cast_transport_sender.h"
-#include "media/cast/transport/cast_transport_sender_impl.h"
#include "testing/gtest/include/gtest/gtest.h"
namespace media {
@@ -68,15 +69,15 @@ static const int64 kStartMillisecond = INT64_C(1245);
static const int kAudioChannels = 2;
static const int kVideoHdWidth = 1280;
static const int kVideoHdHeight = 720;
-static const int kTargetDelay = 300;
+static const int kTargetPlayoutDelayMs = 300;
// The tests are commonly implemented with |kFrameTimerMs| RunTask function;
// a normal video is 30 fps hence the 33 ms between frames.
static const int kFrameTimerMs = 33;
-void UpdateCastTransportStatus(transport::CastTransportStatus status) {
- bool result = (status == transport::TRANSPORT_AUDIO_INITIALIZED ||
- status == transport::TRANSPORT_VIDEO_INITIALIZED);
+void UpdateCastTransportStatus(CastTransportStatus status) {
+ bool result = (status == TRANSPORT_AUDIO_INITIALIZED ||
+ status == TRANSPORT_VIDEO_INITIALIZED);
EXPECT_TRUE(result);
}
@@ -93,68 +94,9 @@ void IgnoreRawEvents(const std::vector<PacketEvent>& packet_events) {
} // namespace
-// Shim that turns forwards packets from a test::PacketPipe to a
-// PacketReceiverCallback.
-class LoopBackPacketPipe : public test::PacketPipe {
- public:
- LoopBackPacketPipe(const transport::PacketReceiverCallback& packet_receiver)
- : packet_receiver_(packet_receiver) {}
-
- virtual ~LoopBackPacketPipe() {}
-
- // PacketPipe implementations.
- virtual void Send(scoped_ptr<transport::Packet> packet) OVERRIDE {
- packet_receiver_.Run(packet.Pass());
- }
-
- private:
- transport::PacketReceiverCallback packet_receiver_;
-};
-
-// Class that sends the packet direct from sender into the receiver with the
-// ability to drop packets between the two.
-// TODO(hubbe): Break this out and share code with end2end_unittest.cc
-class LoopBackTransport : public transport::PacketSender {
- public:
- explicit LoopBackTransport(scoped_refptr<CastEnvironment> cast_environment)
- : cast_environment_(cast_environment) {}
-
- void SetPacketReceiver(
- const transport::PacketReceiverCallback& packet_receiver,
- const scoped_refptr<base::SingleThreadTaskRunner>& task_runner,
- base::TickClock* clock) {
- scoped_ptr<test::PacketPipe> loopback_pipe(
- new LoopBackPacketPipe(packet_receiver));
- if (packet_pipe_) {
- packet_pipe_->AppendToPipe(loopback_pipe.Pass());
- } else {
- packet_pipe_ = loopback_pipe.Pass();
- }
- packet_pipe_->InitOnIOThread(task_runner, clock);
- }
-
- virtual bool SendPacket(transport::PacketRef packet,
- const base::Closure& cb) OVERRIDE {
- DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
- scoped_ptr<Packet> packet_copy(new Packet(packet->data));
- packet_pipe_->Send(packet_copy.Pass());
- return true;
- }
-
- void SetPacketPipe(scoped_ptr<test::PacketPipe> pipe) {
- // Append the loopback pipe to the end.
- pipe->AppendToPipe(packet_pipe_.Pass());
- packet_pipe_ = pipe.Pass();
- }
-
- private:
- scoped_refptr<CastEnvironment> cast_environment_;
- scoped_ptr<test::PacketPipe> packet_pipe_;
-};
-
// Wraps a CastTransportSender and records some statistics about
// the data that goes through it.
-class CastTransportSenderWrapper : public transport::CastTransportSender {
+class CastTransportSenderWrapper : public CastTransportSender {
public:
// Takes ownership of |transport|.
void Init(CastTransportSender* transport,
@@ -166,28 +108,28 @@ class CastTransportSenderWrapper : public transport::CastTransportSender {
}
virtual void InitializeAudio(
- const transport::CastTransportAudioConfig& config) OVERRIDE {
+ const CastTransportRtpConfig& config) OVERRIDE {
transport_->InitializeAudio(config);
}
virtual void InitializeVideo(
- const transport::CastTransportVideoConfig& config) OVERRIDE {
+ const CastTransportRtpConfig& config) OVERRIDE {
transport_->InitializeVideo(config);
}
virtual void SetPacketReceiver(
- const transport::PacketReceiverCallback& packet_receiver) OVERRIDE {
+ const PacketReceiverCallback& packet_receiver) OVERRIDE {
transport_->SetPacketReceiver(packet_receiver);
}
virtual void InsertCodedAudioFrame(
- const transport::EncodedFrame& audio_frame) OVERRIDE {
+ const EncodedFrame& audio_frame) OVERRIDE {
*encoded_audio_bytes_ += audio_frame.data.size();
transport_->InsertCodedAudioFrame(audio_frame);
}
virtual void InsertCodedVideoFrame(
- const transport::EncodedFrame& video_frame) OVERRIDE {
+ const EncodedFrame& video_frame) OVERRIDE {
*encoded_video_bytes_ += video_frame.data.size();
transport_->InsertCodedVideoFrame(video_frame);
}
@@ -196,7 +138,7 @@ class CastTransportSenderWrapper : public transport::CastTransportSender {
uint32 ntp_seconds,
uint32 ntp_fraction,
uint32 rtp_timestamp,
- const transport::RtcpDlrrReportBlock& dlrr,
+ const RtcpDlrrReportBlock& dlrr,
uint32 sending_ssrc,
const std::string& c_name) OVERRIDE {
transport_->SendRtcpFromRtpSender(packet_type_flags,
@@ -219,7 +161,7 @@ class CastTransportSenderWrapper : public transport::CastTransportSender {
}
private:
- scoped_ptr<transport::CastTransportSender> transport_;
+ scoped_ptr<CastTransportSender> transport_;
uint64* encoded_video_bytes_;
uint64* encoded_audio_bytes_;
};
@@ -278,34 +220,37 @@ class RunOneBenchmark {
base::TimeDelta::FromMilliseconds(kStartMillisecond));
}
- void Configure(transport::VideoCodec video_codec,
- transport::AudioCodec audio_codec,
+ void Configure(Codec video_codec,
+ Codec audio_codec,
int audio_sampling_frequency,
int max_number_of_video_buffers_used) {
- audio_sender_config_.rtp_config.ssrc = 1;
+ audio_sender_config_.ssrc = 1;
audio_sender_config_.incoming_feedback_ssrc = 2;
- audio_sender_config_.rtp_config.payload_type = 96;
+ audio_sender_config_.target_playout_delay =
+ base::TimeDelta::FromMilliseconds(kTargetPlayoutDelayMs);
+ audio_sender_config_.rtp_payload_type = 96;
audio_sender_config_.use_external_encoder = false;
audio_sender_config_.frequency = audio_sampling_frequency;
audio_sender_config_.channels = kAudioChannels;
audio_sender_config_.bitrate = kDefaultAudioEncoderBitrate;
audio_sender_config_.codec = audio_codec;
- audio_sender_config_.rtp_config.max_delay_ms = kTargetDelay;
audio_receiver_config_.feedback_ssrc =
audio_sender_config_.incoming_feedback_ssrc;
- audio_receiver_config_.incoming_ssrc = audio_sender_config_.rtp_config.ssrc;
+ audio_receiver_config_.incoming_ssrc = audio_sender_config_.ssrc;
audio_receiver_config_.rtp_payload_type =
- audio_sender_config_.rtp_config.payload_type;
+ audio_sender_config_.rtp_payload_type;
audio_receiver_config_.frequency = audio_sender_config_.frequency;
audio_receiver_config_.channels = kAudioChannels;
audio_receiver_config_.max_frame_rate = 100;
- audio_receiver_config_.codec.audio = audio_sender_config_.codec;
- audio_receiver_config_.rtp_max_delay_ms = kTargetDelay;
+ audio_receiver_config_.codec = audio_sender_config_.codec;
+ audio_receiver_config_.rtp_max_delay_ms = kTargetPlayoutDelayMs;
- video_sender_config_.rtp_config.ssrc = 3;
+ video_sender_config_.ssrc = 3;
video_sender_config_.incoming_feedback_ssrc = 4;
- video_sender_config_.rtp_config.payload_type = 97;
+ video_sender_config_.target_playout_delay =
+ base::TimeDelta::FromMilliseconds(kTargetPlayoutDelayMs);
+ video_sender_config_.rtp_payload_type = 97;
video_sender_config_.use_external_encoder = false;
video_sender_config_.width = kVideoHdWidth;
video_sender_config_.height = kVideoHdHeight;
@@ -324,18 +269,17 @@ class RunOneBenchmark {
video_sender_config_.max_number_of_video_buffers_used =
max_number_of_video_buffers_used;
video_sender_config_.codec = video_codec;
- video_sender_config_.rtp_config.max_delay_ms = kTargetDelay;
video_receiver_config_.feedback_ssrc =
video_sender_config_.incoming_feedback_ssrc;
- video_receiver_config_.incoming_ssrc = video_sender_config_.rtp_config.ssrc;
+ video_receiver_config_.incoming_ssrc = video_sender_config_.ssrc;
video_receiver_config_.rtp_payload_type =
- video_sender_config_.rtp_config.payload_type;
- video_receiver_config_.codec.video = video_sender_config_.codec;
+ video_sender_config_.rtp_payload_type;
+ video_receiver_config_.codec = video_sender_config_.codec;
video_receiver_config_.frequency = kVideoFrequency;
video_receiver_config_.channels = 1;
video_receiver_config_.max_frame_rate = 100;
- video_receiver_config_.rtp_max_delay_ms = kTargetDelay;
+ video_receiver_config_.rtp_max_delay_ms = kTargetPlayoutDelayMs;
}
void SetSenderClockSkew(double skew, base::TimeDelta offset) {
@@ -348,13 +292,13 @@ class RunOneBenchmark {
task_runner_receiver_->SetSkew(1.0 / skew);
}
- void Create() {
+ void Create(const MeasuringPoint& p) {
cast_receiver_ = CastReceiver::Create(cast_environment_receiver_,
audio_receiver_config_,
video_receiver_config_,
&receiver_to_sender_);
net::IPEndPoint dummy_endpoint;
- transport_sender_.Init(new transport::CastTransportSenderImpl(
+ transport_sender_.Init(new CastTransportSenderImpl(
NULL,
testing_clock_sender_,
dummy_endpoint,
@@ -377,10 +321,12 @@ class RunOneBenchmark {
CreateDefaultVideoEncodeAcceleratorCallback(),
CreateDefaultVideoEncodeMemoryCallback());
- receiver_to_sender_.SetPacketReceiver(
- cast_sender_->packet_receiver(), task_runner_, &testing_clock_);
- sender_to_receiver_.SetPacketReceiver(
- cast_receiver_->packet_receiver(), task_runner_, &testing_clock_);
+ receiver_to_sender_.Initialize(
+ CreateSimplePipe(p).Pass(), cast_sender_->packet_receiver(),
+ task_runner_, &testing_clock_);
+ sender_to_receiver_.Initialize(
+ CreateSimplePipe(p).Pass(), cast_receiver_->packet_receiver(),
+ task_runner_, &testing_clock_);
}
virtual ~RunOneBenchmark() {
@@ -436,10 +382,9 @@ class RunOneBenchmark {
void Run(const MeasuringPoint& p) {
available_bitrate_ = p.bitrate;
- Configure(transport::kFakeSoftwareVideo, transport::kPcm16, 32000, 1);
- receiver_to_sender_.SetPacketPipe(CreateSimplePipe(p).Pass());
- sender_to_receiver_.SetPacketPipe(CreateSimplePipe(p).Pass());
- Create();
+ Configure(
+ CODEC_VIDEO_FAKE, CODEC_AUDIO_PCM16, 32000, 1);
+ Create(p);
StartBasicPlayer();
for (int frame = 0; frame < 1000; frame++) {
diff --git a/media/cast/test/end2end_unittest.cc b/media/cast/test/end2end_unittest.cc
index 4a0d820214..a1d4a3ab34 100644
--- a/media/cast/test/end2end_unittest.cc
+++ b/media/cast/test/end2end_unittest.cc
@@ -29,6 +29,10 @@
#include "media/cast/cast_receiver.h"
#include "media/cast/cast_sender.h"
#include "media/cast/logging/simple_event_subscriber.h"
+#include "media/cast/net/cast_transport_config.h"
+#include "media/cast/net/cast_transport_defines.h"
+#include "media/cast/net/cast_transport_sender.h"
+#include "media/cast/net/cast_transport_sender_impl.h"
#include "media/cast/test/fake_single_thread_task_runner.h"
#include "media/cast/test/skewed_single_thread_task_runner.h"
#include "media/cast/test/skewed_tick_clock.h"
@@ -36,10 +40,6 @@
#include "media/cast/test/utility/default_config.h"
#include "media/cast/test/utility/udp_proxy.h"
#include "media/cast/test/utility/video_utility.h"
-#include "media/cast/transport/cast_transport_config.h"
-#include "media/cast/transport/cast_transport_defines.h"
-#include "media/cast/transport/cast_transport_sender.h"
-#include "media/cast/transport/cast_transport_sender_impl.h"
#include "testing/gtest/include/gtest/gtest.h"
namespace media {
@@ -97,9 +97,9 @@ std::string ConvertFromBase16String(const std::string base_16) {
return compressed;
}
-void UpdateCastTransportStatus(transport::CastTransportStatus status) {
- bool result = (status == transport::TRANSPORT_AUDIO_INITIALIZED ||
- status == transport::TRANSPORT_VIDEO_INITIALIZED);
+void UpdateCastTransportStatus(CastTransportStatus status) {
+ bool result = (status == TRANSPORT_AUDIO_INITIALIZED ||
+ status == TRANSPORT_VIDEO_INITIALIZED);
EXPECT_TRUE(result);
}
@@ -168,23 +168,23 @@ std::map<uint16, LoggingEventCounts> GetEventCountForPacketEvents(
// PacketReceiverCallback.
class LoopBackPacketPipe : public test::PacketPipe {
public:
- LoopBackPacketPipe(const transport::PacketReceiverCallback& packet_receiver)
+ LoopBackPacketPipe(const PacketReceiverCallback& packet_receiver)
: packet_receiver_(packet_receiver) {}
virtual ~LoopBackPacketPipe() {}
// PacketPipe implementations.
- virtual void Send(scoped_ptr<transport::Packet> packet) OVERRIDE {
+ virtual void Send(scoped_ptr<Packet> packet) OVERRIDE {
packet_receiver_.Run(packet.Pass());
}
private:
- transport::PacketReceiverCallback packet_receiver_;
+ PacketReceiverCallback packet_receiver_;
};
// Class that sends the packet direct from sender into the receiver with the
// ability to drop packets between the two.
-class LoopBackTransport : public transport::PacketSender {
+class LoopBackTransport : public PacketSender {
public:
explicit LoopBackTransport(scoped_refptr<CastEnvironment> cast_environment)
: send_packets_(true),
@@ -192,7 +192,7 @@ class LoopBackTransport : public transport::PacketSender {
cast_environment_(cast_environment) {}
void SetPacketReceiver(
- const transport::PacketReceiverCallback& packet_receiver,
+ const PacketReceiverCallback& packet_receiver,
const scoped_refptr<base::SingleThreadTaskRunner>& task_runner,
base::TickClock* clock) {
scoped_ptr<test::PacketPipe> loopback_pipe(
@@ -205,7 +205,7 @@ class LoopBackTransport : public transport::PacketSender {
packet_pipe_->InitOnIOThread(task_runner, clock);
}
- virtual bool SendPacket(transport::PacketRef packet,
+ virtual bool SendPacket(PacketRef packet,
const base::Closure& cb) OVERRIDE {
DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
if (!send_packets_)
@@ -307,7 +307,7 @@ class TestReceiverAudioCallback
EXPECT_TRUE(is_continuous);
}
- void CheckCodedAudioFrame(scoped_ptr<transport::EncodedFrame> audio_frame) {
+ void CheckCodedAudioFrame(scoped_ptr<EncodedFrame> audio_frame) {
ASSERT_TRUE(!!audio_frame);
ASSERT_FALSE(expected_frames_.empty());
const ExpectedAudioFrame& expected_audio_frame =
@@ -458,14 +458,15 @@ class End2EndTest : public ::testing::Test {
&event_subscriber_sender_);
}
- void Configure(transport::VideoCodec video_codec,
- transport::AudioCodec audio_codec,
+ void Configure(Codec video_codec,
+ Codec audio_codec,
int audio_sampling_frequency,
int max_number_of_video_buffers_used) {
- audio_sender_config_.rtp_config.ssrc = 1;
- audio_sender_config_.rtp_config.max_delay_ms = kTargetPlayoutDelayMs;
+ audio_sender_config_.ssrc = 1;
audio_sender_config_.incoming_feedback_ssrc = 2;
- audio_sender_config_.rtp_config.payload_type = 96;
+ audio_sender_config_.target_playout_delay =
+ base::TimeDelta::FromMilliseconds(kTargetPlayoutDelayMs);
+ audio_sender_config_.rtp_payload_type = 96;
audio_sender_config_.use_external_encoder = false;
audio_sender_config_.frequency = audio_sampling_frequency;
audio_sender_config_.channels = kAudioChannels;
@@ -474,22 +475,23 @@ class End2EndTest : public ::testing::Test {
audio_receiver_config_.feedback_ssrc =
audio_sender_config_.incoming_feedback_ssrc;
- audio_receiver_config_.incoming_ssrc = audio_sender_config_.rtp_config.ssrc;
+ audio_receiver_config_.incoming_ssrc = audio_sender_config_.ssrc;
audio_receiver_config_.rtp_max_delay_ms = kTargetPlayoutDelayMs;
audio_receiver_config_.rtp_payload_type =
- audio_sender_config_.rtp_config.payload_type;
+ audio_sender_config_.rtp_payload_type;
audio_receiver_config_.frequency = audio_sender_config_.frequency;
audio_receiver_config_.channels = kAudioChannels;
audio_receiver_config_.max_frame_rate = 100;
- audio_receiver_config_.codec.audio = audio_sender_config_.codec;
+ audio_receiver_config_.codec = audio_sender_config_.codec;
test_receiver_audio_callback_->SetExpectedSamplingFrequency(
audio_receiver_config_.frequency);
- video_sender_config_.rtp_config.ssrc = 3;
- video_sender_config_.rtp_config.max_delay_ms = kTargetPlayoutDelayMs;
+ video_sender_config_.ssrc = 3;
video_sender_config_.incoming_feedback_ssrc = 4;
- video_sender_config_.rtp_config.payload_type = 97;
+ video_sender_config_.target_playout_delay =
+ base::TimeDelta::FromMilliseconds(kTargetPlayoutDelayMs);
+ video_sender_config_.rtp_payload_type = 97;
video_sender_config_.use_external_encoder = false;
video_sender_config_.width = kVideoHdWidth;
video_sender_config_.height = kVideoHdHeight;
@@ -505,14 +507,14 @@ class End2EndTest : public ::testing::Test {
video_receiver_config_.feedback_ssrc =
video_sender_config_.incoming_feedback_ssrc;
- video_receiver_config_.incoming_ssrc = video_sender_config_.rtp_config.ssrc;
+ video_receiver_config_.incoming_ssrc = video_sender_config_.ssrc;
video_receiver_config_.rtp_max_delay_ms = kTargetPlayoutDelayMs;
video_receiver_config_.rtp_payload_type =
- video_sender_config_.rtp_config.payload_type;
+ video_sender_config_.rtp_payload_type;
video_receiver_config_.frequency = kVideoFrequency;
video_receiver_config_.channels = 1;
video_receiver_config_.max_frame_rate = video_sender_config_.max_frame_rate;
- video_receiver_config_.codec.video = video_sender_config_.codec;
+ video_receiver_config_.codec = video_sender_config_.codec;
}
void SetReceiverSkew(double skew, base::TimeDelta offset) {
@@ -580,7 +582,7 @@ class End2EndTest : public ::testing::Test {
&receiver_to_sender_);
net::IPEndPoint dummy_endpoint;
- transport_sender_.reset(new transport::CastTransportSenderImpl(
+ transport_sender_.reset(new CastTransportSenderImpl(
NULL,
testing_clock_sender_,
dummy_endpoint,
@@ -760,7 +762,7 @@ class End2EndTest : public ::testing::Test {
LoopBackTransport receiver_to_sender_;
LoopBackTransport sender_to_receiver_;
- scoped_ptr<transport::CastTransportSenderImpl> transport_sender_;
+ scoped_ptr<CastTransportSenderImpl> transport_sender_;
scoped_ptr<CastReceiver> cast_receiver_;
scoped_ptr<CastSender> cast_sender_;
@@ -782,7 +784,7 @@ class End2EndTest : public ::testing::Test {
};
TEST_F(End2EndTest, LoopNoLossPcm16) {
- Configure(transport::kVp8, transport::kPcm16, 32000, 1);
+ Configure(CODEC_VIDEO_VP8, CODEC_AUDIO_PCM16, 32000, 1);
// Reduce video resolution to allow processing multiple frames within a
// reasonable time frame.
video_sender_config_.width = kVideoQcifWidth;
@@ -836,7 +838,7 @@ TEST_F(End2EndTest, LoopNoLossPcm16) {
// This tests our external decoder interface for Audio.
// Audio test without packet loss using raw PCM 16 audio "codec";
TEST_F(End2EndTest, LoopNoLossPcm16ExternalDecoder) {
- Configure(transport::kVp8, transport::kPcm16, 32000, 1);
+ Configure(CODEC_VIDEO_VP8, CODEC_AUDIO_PCM16, 32000, 1);
Create();
const int kNumIterations = 10;
@@ -854,7 +856,8 @@ TEST_F(End2EndTest, LoopNoLossPcm16ExternalDecoder) {
// This tests our Opus audio codec without video.
TEST_F(End2EndTest, LoopNoLossOpus) {
- Configure(transport::kVp8, transport::kOpus, kDefaultAudioSamplingRate, 1);
+ Configure(CODEC_VIDEO_VP8, CODEC_AUDIO_OPUS,
+ kDefaultAudioSamplingRate, 1);
Create();
const int kNumIterations = 300;
@@ -880,7 +883,8 @@ TEST_F(End2EndTest, LoopNoLossOpus) {
// in audio_receiver.cc for likely cause(s) of this bug.
// http://crbug.com/356942
TEST_F(End2EndTest, DISABLED_StartSenderBeforeReceiver) {
- Configure(transport::kVp8, transport::kPcm16, kDefaultAudioSamplingRate, 1);
+ Configure(CODEC_VIDEO_VP8, CODEC_AUDIO_PCM16,
+ kDefaultAudioSamplingRate, 1);
Create();
int video_start = kVideoStart;
@@ -968,8 +972,10 @@ TEST_F(End2EndTest, DISABLED_StartSenderBeforeReceiver) {
// This tests a network glitch lasting for 10 video frames.
// Flaky. See crbug.com/351596.
TEST_F(End2EndTest, DISABLED_GlitchWith3Buffers) {
- Configure(transport::kVp8, transport::kOpus, kDefaultAudioSamplingRate, 3);
- video_sender_config_.rtp_config.max_delay_ms = 67;
+ Configure(CODEC_VIDEO_VP8, CODEC_AUDIO_OPUS,
+ kDefaultAudioSamplingRate, 3);
+ video_sender_config_.target_playout_delay =
+ base::TimeDelta::FromMilliseconds(67);
video_receiver_config_.rtp_max_delay_ms = 67;
Create();
@@ -1031,8 +1037,10 @@ TEST_F(End2EndTest, DISABLED_GlitchWith3Buffers) {
// Disabled due to flakiness and crashiness. http://crbug.com/360951
TEST_F(End2EndTest, DISABLED_DropEveryOtherFrame3Buffers) {
- Configure(transport::kVp8, transport::kOpus, kDefaultAudioSamplingRate, 3);
- video_sender_config_.rtp_config.max_delay_ms = 67;
+ Configure(CODEC_VIDEO_VP8, CODEC_AUDIO_OPUS,
+ kDefaultAudioSamplingRate, 3);
+ video_sender_config_.target_playout_delay =
+ base::TimeDelta::FromMilliseconds(67);
video_receiver_config_.rtp_max_delay_ms = 67;
Create();
sender_to_receiver_.DropAllPacketsBelongingToOddFrames();
@@ -1069,17 +1077,17 @@ TEST_F(End2EndTest, DISABLED_DropEveryOtherFrame3Buffers) {
}
TEST_F(End2EndTest, CryptoVideo) {
- Configure(transport::kVp8, transport::kPcm16, 32000, 1);
+ Configure(CODEC_VIDEO_VP8, CODEC_AUDIO_PCM16, 32000, 1);
- video_sender_config_.rtp_config.aes_iv_mask =
+ video_sender_config_.aes_iv_mask =
ConvertFromBase16String("1234567890abcdeffedcba0987654321");
- video_sender_config_.rtp_config.aes_key =
+ video_sender_config_.aes_key =
ConvertFromBase16String("deadbeefcafeb0b0b0b0cafedeadbeef");
video_receiver_config_.aes_iv_mask =
- video_sender_config_.rtp_config.aes_iv_mask;
+ video_sender_config_.aes_iv_mask;
video_receiver_config_.aes_key =
- video_sender_config_.rtp_config.aes_key;
+ video_sender_config_.aes_key;
Create();
@@ -1107,17 +1115,17 @@ TEST_F(End2EndTest, CryptoVideo) {
}
TEST_F(End2EndTest, CryptoAudio) {
- Configure(transport::kVp8, transport::kPcm16, 32000, 1);
+ Configure(CODEC_VIDEO_VP8, CODEC_AUDIO_PCM16, 32000, 1);
- audio_sender_config_.rtp_config.aes_iv_mask =
+ audio_sender_config_.aes_iv_mask =
ConvertFromBase16String("abcdeffedcba12345678900987654321");
- audio_sender_config_.rtp_config.aes_key =
+ audio_sender_config_.aes_key =
ConvertFromBase16String("deadbeefcafecafedeadbeefb0b0b0b0");
audio_receiver_config_.aes_iv_mask =
- audio_sender_config_.rtp_config.aes_iv_mask;
+ audio_sender_config_.aes_iv_mask;
audio_receiver_config_.aes_key =
- audio_sender_config_.rtp_config.aes_key;
+ audio_sender_config_.aes_key;
Create();
@@ -1136,7 +1144,7 @@ TEST_F(End2EndTest, CryptoAudio) {
// Video test without packet loss - tests the logging aspects of the end2end,
// but is basically equivalent to LoopNoLossPcm16.
TEST_F(End2EndTest, VideoLogging) {
- Configure(transport::kVp8, transport::kPcm16, 32000, 1);
+ Configure(CODEC_VIDEO_VP8, CODEC_AUDIO_PCM16, 32000, 1);
Create();
int video_start = kVideoStart;
@@ -1260,7 +1268,7 @@ TEST_F(End2EndTest, VideoLogging) {
// Audio test without packet loss - tests the logging aspects of the end2end,
// but is basically equivalent to LoopNoLossPcm16.
TEST_F(End2EndTest, AudioLogging) {
- Configure(transport::kVp8, transport::kPcm16, 32000, 1);
+ Configure(CODEC_VIDEO_VP8, CODEC_AUDIO_PCM16, 32000, 1);
Create();
int audio_diff = kFrameTimerMs;
@@ -1341,7 +1349,8 @@ TEST_F(End2EndTest, AudioLogging) {
}
TEST_F(End2EndTest, BasicFakeSoftwareVideo) {
- Configure(transport::kFakeSoftwareVideo, transport::kPcm16, 32000, 1);
+ Configure(CODEC_VIDEO_FAKE, CODEC_AUDIO_PCM16, 32000,
+ 1);
Create();
StartBasicPlayer();
SetReceiverSkew(1.0, base::TimeDelta::FromMilliseconds(1));
@@ -1362,7 +1371,8 @@ TEST_F(End2EndTest, BasicFakeSoftwareVideo) {
}
TEST_F(End2EndTest, ReceiverClockFast) {
- Configure(transport::kFakeSoftwareVideo, transport::kPcm16, 32000, 1);
+ Configure(CODEC_VIDEO_FAKE, CODEC_AUDIO_PCM16, 32000,
+ 1);
Create();
StartBasicPlayer();
SetReceiverSkew(2.0, base::TimeDelta::FromMicroseconds(1234567));
@@ -1377,7 +1387,8 @@ TEST_F(End2EndTest, ReceiverClockFast) {
}
TEST_F(End2EndTest, ReceiverClockSlow) {
- Configure(transport::kFakeSoftwareVideo, transport::kPcm16, 32000, 1);
+ Configure(CODEC_VIDEO_FAKE, CODEC_AUDIO_PCM16, 32000,
+ 1);
Create();
StartBasicPlayer();
SetReceiverSkew(0.5, base::TimeDelta::FromMicroseconds(-765432));
@@ -1392,7 +1403,8 @@ TEST_F(End2EndTest, ReceiverClockSlow) {
}
TEST_F(End2EndTest, SmoothPlayoutWithFivePercentClockRateSkew) {
- Configure(transport::kFakeSoftwareVideo, transport::kPcm16, 32000, 1);
+ Configure(CODEC_VIDEO_FAKE, CODEC_AUDIO_PCM16, 32000,
+ 1);
Create();
StartBasicPlayer();
SetReceiverSkew(1.05, base::TimeDelta::FromMilliseconds(-42));
@@ -1413,7 +1425,8 @@ TEST_F(End2EndTest, SmoothPlayoutWithFivePercentClockRateSkew) {
}
TEST_F(End2EndTest, EvilNetwork) {
- Configure(transport::kFakeSoftwareVideo, transport::kPcm16, 32000, 1);
+ Configure(CODEC_VIDEO_FAKE, CODEC_AUDIO_PCM16, 32000,
+ 1);
receiver_to_sender_.SetPacketPipe(test::EvilNetwork().Pass());
sender_to_receiver_.SetPacketPipe(test::EvilNetwork().Pass());
Create();
diff --git a/media/cast/test/fake_media_source.cc b/media/cast/test/fake_media_source.cc
new file mode 100644
index 0000000000..07baebe6e3
--- /dev/null
+++ b/media/cast/test/fake_media_source.cc
@@ -0,0 +1,594 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/cast/test/fake_media_source.h"
+
+#include "base/files/memory_mapped_file.h"
+#include "base/files/scoped_file.h"
+#include "base/logging.h"
+#include "base/strings/string_number_conversions.h"
+#include "media/audio/audio_parameters.h"
+#include "media/base/audio_buffer.h"
+#include "media/base/audio_bus.h"
+#include "media/base/audio_fifo.h"
+#include "media/base/audio_timestamp_helper.h"
+#include "media/base/media.h"
+#include "media/base/multi_channel_resampler.h"
+#include "media/base/video_frame.h"
+#include "media/base/video_util.h"
+#include "media/cast/cast_sender.h"
+#include "media/cast/test/utility/audio_utility.h"
+#include "media/cast/test/utility/video_utility.h"
+#include "media/ffmpeg/ffmpeg_common.h"
+#include "media/ffmpeg/ffmpeg_deleters.h"
+#include "media/filters/audio_renderer_algorithm.h"
+#include "media/filters/ffmpeg_demuxer.h"
+#include "media/filters/ffmpeg_glue.h"
+#include "media/filters/in_memory_url_protocol.h"
+#include "ui/gfx/size.h"
+
+namespace {
+
+static const int kAudioChannels = 2;
+static const int kAudioSamplingFrequency = 48000;
+static const int kSoundFrequency = 1234; // Frequency of sinusoid wave.
+static const float kSoundVolume = 0.5f;
+static const int kAudioFrameMs = 10; // Each audio frame is exactly 10ms.
+static const int kAudioPacketsPerSecond = 1000 / kAudioFrameMs;
+
+void AVFreeFrame(AVFrame* frame) {
+ av_frame_free(&frame);
+}
+
+} // namespace
+
+namespace media {
+namespace cast {
+
+FakeMediaSource::FakeMediaSource(
+ scoped_refptr<base::SingleThreadTaskRunner> task_runner,
+ base::TickClock* clock,
+ const VideoSenderConfig& video_config)
+ : task_runner_(task_runner),
+ video_config_(video_config),
+ synthetic_count_(0),
+ clock_(clock),
+ audio_frame_count_(0),
+ video_frame_count_(0),
+ weak_factory_(this),
+ av_format_context_(NULL),
+ audio_stream_index_(-1),
+ playback_rate_(1.0),
+ video_stream_index_(-1),
+ video_frame_rate_numerator_(video_config.max_frame_rate),
+ video_frame_rate_denominator_(1),
+ video_first_pts_(0),
+ video_first_pts_set_(false) {
+ audio_bus_factory_.reset(new TestAudioBusFactory(kAudioChannels,
+ kAudioSamplingFrequency,
+ kSoundFrequency,
+ kSoundVolume));
+}
+
+FakeMediaSource::~FakeMediaSource() {
+}
+
+void FakeMediaSource::SetSourceFile(const base::FilePath& video_file,
+ int override_fps) {
+ DCHECK(!video_file.empty());
+
+ if (override_fps) {
+ video_config_.max_frame_rate = override_fps;
+ video_frame_rate_numerator_ = override_fps;
+ }
+
+ LOG(INFO) << "Source: " << video_file.value();
+ if (!file_data_.Initialize(video_file)) {
+ LOG(ERROR) << "Cannot load file.";
+ return;
+ }
+ protocol_.reset(
+ new InMemoryUrlProtocol(file_data_.data(), file_data_.length(), false));
+ glue_.reset(new FFmpegGlue(protocol_.get()));
+
+ if (!glue_->OpenContext()) {
+ LOG(ERROR) << "Cannot open file.";
+ return;
+ }
+
+ // AVFormatContext is owned by the glue.
+ av_format_context_ = glue_->format_context();
+ if (avformat_find_stream_info(av_format_context_, NULL) < 0) {
+ LOG(ERROR) << "Cannot find stream information.";
+ return;
+ }
+
+ // Prepare FFmpeg decoders.
+ for (unsigned int i = 0; i < av_format_context_->nb_streams; ++i) {
+ AVStream* av_stream = av_format_context_->streams[i];
+ AVCodecContext* av_codec_context = av_stream->codec;
+ AVCodec* av_codec = avcodec_find_decoder(av_codec_context->codec_id);
+
+ if (!av_codec) {
+ LOG(ERROR) << "Cannot find decoder for the codec: "
+ << av_codec_context->codec_id;
+ continue;
+ }
+
+ // Number of threads for decoding.
+ av_codec_context->thread_count = 2;
+ av_codec_context->error_concealment = FF_EC_GUESS_MVS | FF_EC_DEBLOCK;
+ av_codec_context->request_sample_fmt = AV_SAMPLE_FMT_S16;
+
+ if (avcodec_open2(av_codec_context, av_codec, NULL) < 0) {
+ LOG(ERROR) << "Cannot open AVCodecContext for the codec: "
+ << av_codec_context->codec_id;
+ return;
+ }
+
+ if (av_codec->type == AVMEDIA_TYPE_AUDIO) {
+ if (av_codec_context->sample_fmt == AV_SAMPLE_FMT_S16P) {
+ LOG(ERROR) << "Audio format not supported.";
+ continue;
+ }
+ ChannelLayout layout = ChannelLayoutToChromeChannelLayout(
+ av_codec_context->channel_layout,
+ av_codec_context->channels);
+ if (layout == CHANNEL_LAYOUT_UNSUPPORTED) {
+ LOG(ERROR) << "Unsupported audio channels layout.";
+ continue;
+ }
+ if (audio_stream_index_ != -1) {
+ LOG(WARNING) << "Found multiple audio streams.";
+ }
+ audio_stream_index_ = static_cast<int>(i);
+ audio_params_.Reset(
+ AudioParameters::AUDIO_PCM_LINEAR,
+ layout,
+ av_codec_context->channels,
+ av_codec_context->channels,
+ av_codec_context->sample_rate,
+ 8 * av_get_bytes_per_sample(av_codec_context->sample_fmt),
+ av_codec_context->sample_rate / kAudioPacketsPerSecond);
+ LOG(INFO) << "Source file has audio.";
+ } else if (av_codec->type == AVMEDIA_TYPE_VIDEO) {
+ VideoFrame::Format format =
+ PixelFormatToVideoFormat(av_codec_context->pix_fmt);
+ if (format != VideoFrame::YV12) {
+ LOG(ERROR) << "Cannot handle non YV12 video format: " << format;
+ continue;
+ }
+ if (video_stream_index_ != -1) {
+ LOG(WARNING) << "Found multiple video streams.";
+ }
+ video_stream_index_ = static_cast<int>(i);
+ if (!override_fps) {
+ video_frame_rate_numerator_ = av_stream->r_frame_rate.num;
+ video_frame_rate_denominator_ = av_stream->r_frame_rate.den;
+ // Max frame rate is rounded up.
+ video_config_.max_frame_rate =
+ video_frame_rate_denominator_ +
+ video_frame_rate_numerator_ - 1;
+ video_config_.max_frame_rate /= video_frame_rate_denominator_;
+ } else {
+ // If video is played at a manual speed audio needs to match.
+ playback_rate_ = 1.0 * override_fps *
+ av_stream->r_frame_rate.den / av_stream->r_frame_rate.num;
+ }
+ LOG(INFO) << "Source file has video.";
+ } else {
+ LOG(ERROR) << "Unknown stream type; ignore.";
+ }
+ }
+
+ Rewind();
+}
+
+void FakeMediaSource::Start(scoped_refptr<AudioFrameInput> audio_frame_input,
+ scoped_refptr<VideoFrameInput> video_frame_input) {
+ audio_frame_input_ = audio_frame_input;
+ video_frame_input_ = video_frame_input;
+
+ LOG(INFO) << "Max Frame rate: " << video_config_.max_frame_rate;
+ LOG(INFO) << "Real Frame rate: "
+ << video_frame_rate_numerator_ << "/"
+ << video_frame_rate_denominator_ << " fps.";
+ LOG(INFO) << "Audio playback rate: " << playback_rate_;
+
+ if (!is_transcoding_audio() && !is_transcoding_video()) {
+ // Send fake patterns.
+ task_runner_->PostTask(
+ FROM_HERE,
+ base::Bind(
+ &FakeMediaSource::SendNextFakeFrame,
+ base::Unretained(this)));
+ return;
+ }
+
+ // Send transcoding streams.
+ audio_algo_.Initialize(playback_rate_, audio_params_);
+ audio_algo_.FlushBuffers();
+ audio_fifo_input_bus_ =
+ AudioBus::Create(
+ audio_params_.channels(), audio_params_.frames_per_buffer());
+ // Audio FIFO can carry all data fron AudioRendererAlgorithm.
+ audio_fifo_.reset(
+ new AudioFifo(audio_params_.channels(),
+ audio_algo_.QueueCapacity()));
+ audio_resampler_.reset(new media::MultiChannelResampler(
+ audio_params_.channels(),
+ static_cast<double>(audio_params_.sample_rate()) /
+ kAudioSamplingFrequency,
+ audio_params_.frames_per_buffer(),
+ base::Bind(&FakeMediaSource::ProvideData, base::Unretained(this))));
+ task_runner_->PostTask(
+ FROM_HERE,
+ base::Bind(
+ &FakeMediaSource::SendNextFrame,
+ base::Unretained(this)));
+}
+
+void FakeMediaSource::SendNextFakeFrame() {
+ gfx::Size size(video_config_.width, video_config_.height);
+ scoped_refptr<VideoFrame> video_frame =
+ VideoFrame::CreateBlackFrame(size);
+ PopulateVideoFrame(video_frame, synthetic_count_);
+ ++synthetic_count_;
+
+ base::TimeTicks now = clock_->NowTicks();
+ if (start_time_.is_null())
+ start_time_ = now;
+
+ base::TimeDelta video_time = VideoFrameTime(++video_frame_count_);
+ video_frame->set_timestamp(video_time);
+ video_frame_input_->InsertRawVideoFrame(video_frame,
+ start_time_ + video_time);
+
+ // Send just enough audio data to match next video frame's time.
+ base::TimeDelta audio_time = AudioFrameTime(audio_frame_count_);
+ while (audio_time < video_time) {
+ if (is_transcoding_audio()) {
+ Decode(true);
+ CHECK(!audio_bus_queue_.empty()) << "No audio decoded.";
+ scoped_ptr<AudioBus> bus(audio_bus_queue_.front());
+ audio_bus_queue_.pop();
+ audio_frame_input_->InsertAudio(
+ bus.Pass(), start_time_ + audio_time);
+ } else {
+ audio_frame_input_->InsertAudio(
+ audio_bus_factory_->NextAudioBus(
+ base::TimeDelta::FromMilliseconds(kAudioFrameMs)),
+ start_time_ + audio_time);
+ }
+ audio_time = AudioFrameTime(++audio_frame_count_);
+ }
+
+ // This is the time since the stream started.
+ const base::TimeDelta elapsed_time = now - start_time_;
+
+ // Handle the case when frame generation cannot keep up.
+ // Move the time ahead to match the next frame.
+ while (video_time < elapsed_time) {
+ LOG(WARNING) << "Skipping one frame.";
+ video_time = VideoFrameTime(++video_frame_count_);
+ }
+
+ task_runner_->PostDelayedTask(
+ FROM_HERE,
+ base::Bind(&FakeMediaSource::SendNextFakeFrame,
+ weak_factory_.GetWeakPtr()),
+ video_time - elapsed_time);
+}
+
+bool FakeMediaSource::SendNextTranscodedVideo(base::TimeDelta elapsed_time) {
+ if (!is_transcoding_video())
+ return false;
+
+ Decode(false);
+ if (video_frame_queue_.empty())
+ return false;
+
+ scoped_refptr<VideoFrame> decoded_frame =
+ video_frame_queue_.front();
+ if (elapsed_time < decoded_frame->timestamp())
+ return false;
+
+ gfx::Size size(video_config_.width, video_config_.height);
+ scoped_refptr<VideoFrame> video_frame =
+ VideoFrame::CreateBlackFrame(size);
+ video_frame_queue_.pop();
+ media::CopyPlane(VideoFrame::kYPlane,
+ decoded_frame->data(VideoFrame::kYPlane),
+ decoded_frame->stride(VideoFrame::kYPlane),
+ decoded_frame->rows(VideoFrame::kYPlane),
+ video_frame);
+ media::CopyPlane(VideoFrame::kUPlane,
+ decoded_frame->data(VideoFrame::kUPlane),
+ decoded_frame->stride(VideoFrame::kUPlane),
+ decoded_frame->rows(VideoFrame::kUPlane),
+ video_frame);
+ media::CopyPlane(VideoFrame::kVPlane,
+ decoded_frame->data(VideoFrame::kVPlane),
+ decoded_frame->stride(VideoFrame::kVPlane),
+ decoded_frame->rows(VideoFrame::kVPlane),
+ video_frame);
+
+ base::TimeDelta video_time;
+ // Use the timestamp from the file if we're transcoding.
+ video_time = ScaleTimestamp(decoded_frame->timestamp());
+ video_frame_input_->InsertRawVideoFrame(
+ video_frame, start_time_ + video_time);
+
+ // Make sure queue is not empty.
+ Decode(false);
+ return true;
+}
+
+bool FakeMediaSource::SendNextTranscodedAudio(base::TimeDelta elapsed_time) {
+ if (!is_transcoding_audio())
+ return false;
+
+ Decode(true);
+ if (audio_bus_queue_.empty())
+ return false;
+
+ base::TimeDelta audio_time = audio_sent_ts_->GetTimestamp();
+ if (elapsed_time < audio_time)
+ return false;
+ scoped_ptr<AudioBus> bus(audio_bus_queue_.front());
+ audio_bus_queue_.pop();
+ audio_sent_ts_->AddFrames(bus->frames());
+ audio_frame_input_->InsertAudio(
+ bus.Pass(), start_time_ + audio_time);
+
+ // Make sure queue is not empty.
+ Decode(true);
+ return true;
+}
+
+void FakeMediaSource::SendNextFrame() {
+ if (start_time_.is_null())
+ start_time_ = clock_->NowTicks();
+ if (start_time_.is_null())
+ start_time_ = clock_->NowTicks();
+
+ // Send as much as possible. Audio is sent according to
+ // system time.
+ while (SendNextTranscodedAudio(clock_->NowTicks() - start_time_));
+
+ // Video is sync'ed to audio.
+ while (SendNextTranscodedVideo(audio_sent_ts_->GetTimestamp()));
+
+ if (audio_bus_queue_.empty() && video_frame_queue_.empty()) {
+ // Both queues are empty can only mean that we have reached
+ // the end of the stream.
+ LOG(INFO) << "Rewind.";
+ Rewind();
+ start_time_ = base::TimeTicks();
+ audio_sent_ts_.reset();
+ video_first_pts_set_ = false;
+ }
+
+ // Send next send.
+ task_runner_->PostDelayedTask(
+ FROM_HERE,
+ base::Bind(
+ &FakeMediaSource::SendNextFrame,
+ base::Unretained(this)),
+ base::TimeDelta::FromMilliseconds(kAudioFrameMs));
+}
+
+base::TimeDelta FakeMediaSource::VideoFrameTime(int frame_number) {
+ return frame_number * base::TimeDelta::FromSeconds(1) *
+ video_frame_rate_denominator_ / video_frame_rate_numerator_;
+}
+
+base::TimeDelta FakeMediaSource::ScaleTimestamp(base::TimeDelta timestamp) {
+ return base::TimeDelta::FromMicroseconds(
+ timestamp.InMicroseconds() / playback_rate_);
+}
+
+base::TimeDelta FakeMediaSource::AudioFrameTime(int frame_number) {
+ return frame_number * base::TimeDelta::FromMilliseconds(kAudioFrameMs);
+}
+
+void FakeMediaSource::Rewind() {
+ CHECK(av_seek_frame(av_format_context_, -1, 0, AVSEEK_FLAG_BACKWARD) >= 0)
+ << "Failed to rewind to the beginning.";
+}
+
+ScopedAVPacket FakeMediaSource::DemuxOnePacket(bool* audio) {
+ ScopedAVPacket packet(new AVPacket());
+ if (av_read_frame(av_format_context_, packet.get()) < 0) {
+ LOG(ERROR) << "Failed to read one AVPacket.";
+ packet.reset();
+ return packet.Pass();
+ }
+
+ int stream_index = static_cast<int>(packet->stream_index);
+ if (stream_index == audio_stream_index_) {
+ *audio = true;
+ } else if (stream_index == video_stream_index_) {
+ *audio = false;
+ } else {
+ // Ignore unknown packet.
+ LOG(INFO) << "Unknown packet.";
+ packet.reset();
+ }
+ return packet.Pass();
+}
+
+void FakeMediaSource::DecodeAudio(ScopedAVPacket packet) {
+ // Audio.
+ AVFrame* avframe = av_frame_alloc();
+
+ // Make a shallow copy of packet so we can slide packet.data as frames are
+ // decoded from the packet; otherwise av_free_packet() will corrupt memory.
+ AVPacket packet_temp = *packet.get();
+
+ do {
+ int frame_decoded = 0;
+ int result = avcodec_decode_audio4(
+ av_audio_context(), avframe, &frame_decoded, &packet_temp);
+ CHECK(result >= 0) << "Failed to decode audio.";
+ packet_temp.size -= result;
+ packet_temp.data += result;
+ if (!frame_decoded)
+ continue;
+
+ int frames_read = avframe->nb_samples;
+ if (frames_read < 0)
+ break;
+
+ if (!audio_sent_ts_) {
+ // Initialize the base time to the first packet in the file.
+ // This is set to the frequency we send to the receiver.
+ // Not the frequency of the source file. This is because we
+ // increment the frame count by samples we sent.
+ audio_sent_ts_.reset(
+ new AudioTimestampHelper(kAudioSamplingFrequency));
+ // For some files this is an invalid value.
+ base::TimeDelta base_ts;
+ audio_sent_ts_->SetBaseTimestamp(base_ts);
+ }
+
+ scoped_refptr<AudioBuffer> buffer =
+ AudioBuffer::CopyFrom(
+ AVSampleFormatToSampleFormat(
+ av_audio_context()->sample_fmt),
+ ChannelLayoutToChromeChannelLayout(
+ av_audio_context()->channel_layout,
+ av_audio_context()->channels),
+ av_audio_context()->channels,
+ av_audio_context()->sample_rate,
+ frames_read,
+ &avframe->data[0],
+ // Note: Not all files have correct values for pkt_pts.
+ base::TimeDelta::FromMilliseconds(avframe->pkt_pts));
+ audio_algo_.EnqueueBuffer(buffer);
+ av_frame_unref(avframe);
+ } while (packet_temp.size > 0);
+ av_frame_free(&avframe);
+
+ const int frames_needed_to_scale =
+ playback_rate_ * av_audio_context()->sample_rate /
+ kAudioPacketsPerSecond;
+ while (frames_needed_to_scale <= audio_algo_.frames_buffered()) {
+ if (!audio_algo_.FillBuffer(audio_fifo_input_bus_.get(),
+ audio_fifo_input_bus_->frames())) {
+ // Nothing can be scaled. Decode some more.
+ return;
+ }
+
+ // Prevent overflow of audio data in the FIFO.
+ if (audio_fifo_input_bus_->frames() + audio_fifo_->frames()
+ <= audio_fifo_->max_frames()) {
+ audio_fifo_->Push(audio_fifo_input_bus_.get());
+ } else {
+ LOG(WARNING) << "Audio FIFO full; dropping samples.";
+ }
+
+ // Make sure there's enough data to resample audio.
+ if (audio_fifo_->frames() <
+ 2 * audio_params_.sample_rate() / kAudioPacketsPerSecond) {
+ continue;
+ }
+
+ scoped_ptr<media::AudioBus> resampled_bus(
+ media::AudioBus::Create(
+ audio_params_.channels(),
+ kAudioSamplingFrequency / kAudioPacketsPerSecond));
+ audio_resampler_->Resample(resampled_bus->frames(),
+ resampled_bus.get());
+ audio_bus_queue_.push(resampled_bus.release());
+ }
+}
+
+void FakeMediaSource::DecodeVideo(ScopedAVPacket packet) {
+ // Video.
+ int got_picture;
+ AVFrame* avframe = av_frame_alloc();
+ // Tell the decoder to reorder for us.
+ avframe->reordered_opaque =
+ av_video_context()->reordered_opaque = packet->pts;
+ CHECK(avcodec_decode_video2(
+ av_video_context(), avframe, &got_picture, packet.get()) >= 0)
+ << "Video decode error.";
+ if (!got_picture) {
+ av_frame_free(&avframe);
+ return;
+ }
+ gfx::Size size(av_video_context()->width, av_video_context()->height);
+ if (!video_first_pts_set_ ||
+ avframe->reordered_opaque < video_first_pts_) {
+ video_first_pts_set_ = true;
+ video_first_pts_ = avframe->reordered_opaque;
+ }
+ int64 pts = avframe->reordered_opaque - video_first_pts_;
+ video_frame_queue_.push(
+ VideoFrame::WrapExternalYuvData(
+ media::VideoFrame::YV12,
+ size,
+ gfx::Rect(size),
+ size,
+ avframe->linesize[0],
+ avframe->linesize[1],
+ avframe->linesize[2],
+ avframe->data[0],
+ avframe->data[1],
+ avframe->data[2],
+ base::TimeDelta::FromMilliseconds(pts),
+ base::Bind(&AVFreeFrame, avframe)));
+}
+
+void FakeMediaSource::Decode(bool decode_audio) {
+ // Read the stream until one video frame can be decoded.
+ while (true) {
+ if (decode_audio && !audio_bus_queue_.empty())
+ return;
+ if (!decode_audio && !video_frame_queue_.empty())
+ return;
+
+ bool audio_packet = false;
+ ScopedAVPacket packet = DemuxOnePacket(&audio_packet);
+ if (!packet) {
+ LOG(INFO) << "End of stream.";
+ return;
+ }
+
+ if (audio_packet)
+ DecodeAudio(packet.Pass());
+ else
+ DecodeVideo(packet.Pass());
+ }
+}
+
+void FakeMediaSource::ProvideData(int frame_delay,
+ media::AudioBus* output_bus) {
+ if (audio_fifo_->frames() >= output_bus->frames()) {
+ audio_fifo_->Consume(output_bus, 0, output_bus->frames());
+ } else {
+ LOG(WARNING) << "Not enough audio data for resampling.";
+ output_bus->Zero();
+ }
+}
+
+AVStream* FakeMediaSource::av_audio_stream() {
+ return av_format_context_->streams[audio_stream_index_];
+}
+
+AVStream* FakeMediaSource::av_video_stream() {
+ return av_format_context_->streams[video_stream_index_];
+}
+
+AVCodecContext* FakeMediaSource::av_audio_context() {
+ return av_audio_stream()->codec;
+}
+
+AVCodecContext* FakeMediaSource::av_video_context() {
+ return av_video_stream()->codec;
+}
+
+} // namespace cast
+} // namespace media
diff --git a/media/cast/test/fake_media_source.h b/media/cast/test/fake_media_source.h
new file mode 100644
index 0000000000..b18d44ea2a
--- /dev/null
+++ b/media/cast/test/fake_media_source.h
@@ -0,0 +1,152 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// A fake media source that generates video and audio frames to a cast
+// sender.
+// This class can transcode a WebM file using FFmpeg. It can also
+// generate an animation and audio of fixed frequency.
+
+#ifndef MEDIA_CAST_TEST_FAKE_MEDIA_SOURCE_H_
+#define MEDIA_CAST_TEST_FAKE_MEDIA_SOURCE_H_
+
+#include <queue>
+
+#include "base/files/file_path.h"
+#include "base/files/memory_mapped_file.h"
+#include "base/memory/ref_counted.h"
+#include "base/memory/weak_ptr.h"
+#include "base/single_thread_task_runner.h"
+#include "base/time/tick_clock.h"
+#include "media/audio/audio_parameters.h"
+#include "media/cast/cast_config.h"
+#include "media/filters/audio_renderer_algorithm.h"
+#include "media/filters/ffmpeg_demuxer.h"
+
+struct AVCodecContext;
+struct AVFormatContext;
+
+namespace media {
+
+class AudioBus;
+class AudioFifo;
+class AudioTimestampHelper;
+class FFmpegGlue;
+class InMemoryUrlProtocol;
+class MultiChannelResampler;
+
+namespace cast {
+
+class AudioFrameInput;
+class VideoFrameInput;
+class TestAudioBusFactory;
+
+class FakeMediaSource {
+ public:
+ // |task_runner| is to schedule decoding tasks.
+ // |clock| is used by this source but is not owned.
+ // |video_config| is the desired video config.
+ FakeMediaSource(scoped_refptr<base::SingleThreadTaskRunner> task_runner,
+ base::TickClock* clock,
+ const VideoSenderConfig& video_config);
+ ~FakeMediaSource();
+
+ // Transcode this file as the source of video and audio frames.
+ // If |override_fps| is non zero then the file is played at the desired rate.
+ void SetSourceFile(const base::FilePath& video_file, int override_fps);
+
+ void Start(scoped_refptr<AudioFrameInput> audio_frame_input,
+ scoped_refptr<VideoFrameInput> video_frame_input);
+
+ const VideoSenderConfig& get_video_config() const { return video_config_; }
+
+ private:
+ bool is_transcoding_audio() const { return audio_stream_index_ >= 0; }
+ bool is_transcoding_video() const { return video_stream_index_ >= 0; }
+
+ void SendNextFrame();
+ void SendNextFakeFrame();
+
+ // Return true if a frame was sent.
+ bool SendNextTranscodedVideo(base::TimeDelta elapsed_time);
+
+ // Return true if a frame was sent.
+ bool SendNextTranscodedAudio(base::TimeDelta elapsed_time);
+
+ // Helper methods to compute timestamps for the frame number specified.
+ base::TimeDelta VideoFrameTime(int frame_number);
+
+ base::TimeDelta ScaleTimestamp(base::TimeDelta timestamp);
+
+ base::TimeDelta AudioFrameTime(int frame_number);
+
+ // Go to the beginning of the stream.
+ void Rewind();
+
+ // Call FFmpeg to fetch one packet.
+ ScopedAVPacket DemuxOnePacket(bool* audio);
+
+ void DecodeAudio(ScopedAVPacket packet);
+ void DecodeVideo(ScopedAVPacket packet);
+ void Decode(bool decode_audio);
+
+ void ProvideData(int frame_delay, media::AudioBus* output_bus);
+
+ AVStream* av_audio_stream();
+ AVStream* av_video_stream();
+ AVCodecContext* av_audio_context();
+ AVCodecContext* av_video_context();
+
+ scoped_refptr<base::SingleThreadTaskRunner> task_runner_;
+ VideoSenderConfig video_config_;
+ scoped_refptr<AudioFrameInput> audio_frame_input_;
+ scoped_refptr<VideoFrameInput> video_frame_input_;
+ uint8 synthetic_count_;
+ base::TickClock* const clock_; // Not owned by this class.
+
+ // Time when the stream starts.
+ base::TimeTicks start_time_;
+
+ // The following three members are used only for fake frames.
+ int audio_frame_count_; // Each audio frame is exactly 10ms.
+ int video_frame_count_;
+ scoped_ptr<TestAudioBusFactory> audio_bus_factory_;
+
+ // NOTE: Weak pointers must be invalidated before all other member variables.
+ base::WeakPtrFactory<FakeMediaSource> weak_factory_;
+
+ base::MemoryMappedFile file_data_;
+ scoped_ptr<InMemoryUrlProtocol> protocol_;
+ scoped_ptr<FFmpegGlue> glue_;
+ AVFormatContext* av_format_context_;
+
+ int audio_stream_index_;
+ AudioParameters audio_params_;
+ double playback_rate_;
+
+ int video_stream_index_;
+ int video_frame_rate_numerator_;
+ int video_frame_rate_denominator_;
+
+ // These are used for audio resampling.
+ scoped_ptr<media::MultiChannelResampler> audio_resampler_;
+ scoped_ptr<media::AudioFifo> audio_fifo_;
+ scoped_ptr<media::AudioBus> audio_fifo_input_bus_;
+ media::AudioRendererAlgorithm audio_algo_;
+
+ // Track the timestamp of audio sent to the receiver.
+ scoped_ptr<media::AudioTimestampHelper> audio_sent_ts_;
+
+ std::queue<scoped_refptr<VideoFrame> > video_frame_queue_;
+ int64 video_first_pts_;
+ bool video_first_pts_set_;
+
+ std::queue<AudioBus*> audio_bus_queue_;
+
+ DISALLOW_COPY_AND_ASSIGN(FakeMediaSource);
+};
+
+} // namespace cast
+} // namespace media
+
+#endif // MEDIA_CAST_TEST_FAKE_MEDIA_SOURCE_H_
diff --git a/media/cast/test/loopback_transport.cc b/media/cast/test/loopback_transport.cc
new file mode 100644
index 0000000000..3b72a7ef5a
--- /dev/null
+++ b/media/cast/test/loopback_transport.cc
@@ -0,0 +1,68 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/cast/test/loopback_transport.h"
+
+#include "base/single_thread_task_runner.h"
+#include "base/time/tick_clock.h"
+#include "media/cast/test/utility/udp_proxy.h"
+
+namespace media {
+namespace cast {
+namespace {
+
+// Shim that turns forwards packets from a test::PacketPipe to a
+// PacketReceiverCallback.
+class LoopBackPacketPipe : public test::PacketPipe {
+ public:
+ LoopBackPacketPipe(
+ const PacketReceiverCallback& packet_receiver)
+ : packet_receiver_(packet_receiver) {}
+
+ virtual ~LoopBackPacketPipe() {}
+
+ // PacketPipe implementations.
+ virtual void Send(scoped_ptr<Packet> packet) OVERRIDE {
+ packet_receiver_.Run(packet.Pass());
+ }
+
+ private:
+ PacketReceiverCallback packet_receiver_;
+
+ DISALLOW_COPY_AND_ASSIGN(LoopBackPacketPipe);
+};
+
+} // namespace
+
+LoopBackTransport::LoopBackTransport(
+ scoped_refptr<CastEnvironment> cast_environment)
+ : cast_environment_(cast_environment) {
+}
+
+LoopBackTransport::~LoopBackTransport() {
+}
+
+bool LoopBackTransport::SendPacket(PacketRef packet,
+ const base::Closure& cb) {
+ DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
+ scoped_ptr<Packet> packet_copy(new Packet(packet->data));
+ packet_pipe_->Send(packet_copy.Pass());
+ return true;
+}
+
+void LoopBackTransport::Initialize(
+ scoped_ptr<test::PacketPipe> pipe,
+ const PacketReceiverCallback& packet_receiver,
+ const scoped_refptr<base::SingleThreadTaskRunner>& task_runner,
+ base::TickClock* clock) {
+ scoped_ptr<test::PacketPipe> loopback_pipe(
+ new LoopBackPacketPipe(packet_receiver));
+ // Append the loopback pipe to the end.
+ pipe->AppendToPipe(loopback_pipe.Pass());
+ packet_pipe_ = pipe.Pass();
+ packet_pipe_->InitOnIOThread(task_runner, clock);
+}
+
+} // namespace cast
+} // namespace media
diff --git a/media/cast/test/loopback_transport.h b/media/cast/test/loopback_transport.h
new file mode 100644
index 0000000000..8942caa4b9
--- /dev/null
+++ b/media/cast/test/loopback_transport.h
@@ -0,0 +1,55 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_CAST_TEST_LOOPBACK_TRANSPORT_H_
+#define MEDIA_CAST_TEST_LOOPBACK_TRANSPORT_H_
+
+#include "base/memory/ref_counted.h"
+#include "base/memory/scoped_ptr.h"
+#include "media/cast/cast_environment.h"
+#include "media/cast/net/cast_transport_config.h"
+
+namespace base {
+class SingleThreadTaskRunner;
+class TickClock;
+} // namespace base
+
+namespace media {
+namespace cast {
+
+namespace test {
+class PacketPipe;
+} // namespace test
+
+// Class that sends the packet to a receiver through a stack of PacketPipes.
+class LoopBackTransport : public PacketSender {
+ public:
+ explicit LoopBackTransport(
+ scoped_refptr<CastEnvironment> cast_environment);
+ virtual ~LoopBackTransport();
+
+ virtual bool SendPacket(PacketRef packet,
+ const base::Closure& cb) OVERRIDE;
+
+ // Initiailize this loopback transport.
+ // Establish a flow of packets from |pipe| to |packet_receiver|.
+ // The data flow looks like:
+ // SendPacket() -> |pipe| -> Fake loopback pipe -> |packet_receiver|.
+ void Initialize(
+ scoped_ptr<test::PacketPipe> pipe,
+ const PacketReceiverCallback& packet_receiver,
+ const scoped_refptr<base::SingleThreadTaskRunner>& task_runner,
+ base::TickClock* clock);
+
+ private:
+ const scoped_refptr<CastEnvironment> cast_environment_;
+ scoped_ptr<test::PacketPipe> packet_pipe_;
+
+ DISALLOW_COPY_AND_ASSIGN(LoopBackTransport);
+};
+
+} // namespace cast
+} // namespace media
+
+#endif // MEDIA_CAST_TEST_LOOPBACK_TRANSPORT_H_
diff --git a/media/cast/test/proto/BUILD.gn b/media/cast/test/proto/BUILD.gn
new file mode 100644
index 0000000000..7ac71d2e7f
--- /dev/null
+++ b/media/cast/test/proto/BUILD.gn
@@ -0,0 +1,14 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import("//third_party/protobuf/proto_library.gni")
+
+# GYP version: media/cast/cast.gyp:cast_logging_proto
+proto_library("cast_network_simulation_proto") {
+ visibility = ":proto"
+ sources = [
+ "network_simulation_model.proto",
+ ]
+}
+
diff --git a/media/cast/test/proto/network_simulation_model.proto b/media/cast/test/proto/network_simulation_model.proto
new file mode 100644
index 0000000000..902712f7fa
--- /dev/null
+++ b/media/cast/test/proto/network_simulation_model.proto
@@ -0,0 +1,27 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Contains parameters for a network simulation model.
+
+syntax = "proto2";
+
+option optimize_for = LITE_RUNTIME;
+
+package media.cast.proto;
+
+message NetworkSimulationModel {
+ optional NetworkSimulationModelType type = 1;
+ optional IPPModel ipp = 2;
+}
+
+enum NetworkSimulationModelType {
+ INTERRUPTED_POISSON_PROCESS = 1;
+}
+
+message IPPModel {
+ optional double coef_burstiness = 1;
+ optional double coef_variance = 2;
+ repeated double average_rate = 3;
+}
+
diff --git a/media/cast/test/receiver.cc b/media/cast/test/receiver.cc
index 9861d3834e..e87055420f 100644
--- a/media/cast/test/receiver.cc
+++ b/media/cast/test/receiver.cc
@@ -33,13 +33,13 @@
#include "media/cast/cast_environment.h"
#include "media/cast/cast_receiver.h"
#include "media/cast/logging/logging_defines.h"
+#include "media/cast/net/udp_transport.h"
#include "media/cast/test/utility/audio_utility.h"
#include "media/cast/test/utility/barcode.h"
#include "media/cast/test/utility/default_config.h"
#include "media/cast/test/utility/in_process_receiver.h"
#include "media/cast/test/utility/input_builder.h"
#include "media/cast/test/utility/standalone_cast_environment.h"
-#include "media/cast/transport/transport/udp_transport.h"
#include "net/base/net_util.h"
#if defined(OS_LINUX)
diff --git a/media/cast/test/sender.cc b/media/cast/test/sender.cc
index e457e2c9fe..6f9074f17e 100644
--- a/media/cast/test/sender.cc
+++ b/media/cast/test/sender.cc
@@ -10,10 +10,7 @@
#include "base/at_exit.h"
#include "base/base_paths.h"
#include "base/command_line.h"
-#include "base/file_util.h"
#include "base/files/file_path.h"
-#include "base/files/memory_mapped_file.h"
-#include "base/files/scoped_file.h"
#include "base/json/json_writer.h"
#include "base/logging.h"
#include "base/memory/scoped_ptr.h"
@@ -22,15 +19,8 @@
#include "base/threading/thread.h"
#include "base/time/default_tick_clock.h"
#include "base/values.h"
-#include "media/audio/audio_parameters.h"
-#include "media/base/audio_buffer.h"
-#include "media/base/audio_bus.h"
-#include "media/base/audio_fifo.h"
-#include "media/base/audio_timestamp_helper.h"
#include "media/base/media.h"
-#include "media/base/multi_channel_resampler.h"
#include "media/base/video_frame.h"
-#include "media/base/video_util.h"
#include "media/cast/cast_config.h"
#include "media/cast/cast_environment.h"
#include "media/cast/cast_sender.h"
@@ -40,28 +30,16 @@
#include "media/cast/logging/proto/raw_events.pb.h"
#include "media/cast/logging/receiver_time_offset_estimator_impl.h"
#include "media/cast/logging/stats_event_subscriber.h"
-#include "media/cast/test/utility/audio_utility.h"
+#include "media/cast/net/cast_transport_defines.h"
+#include "media/cast/net/cast_transport_sender.h"
+#include "media/cast/net/udp_transport.h"
+#include "media/cast/test/fake_media_source.h"
#include "media/cast/test/utility/default_config.h"
#include "media/cast/test/utility/input_builder.h"
-#include "media/cast/test/utility/video_utility.h"
-#include "media/cast/transport/cast_transport_defines.h"
-#include "media/cast/transport/cast_transport_sender.h"
-#include "media/cast/transport/transport/udp_transport.h"
-#include "media/ffmpeg/ffmpeg_common.h"
-#include "media/ffmpeg/ffmpeg_deleters.h"
-#include "media/filters/audio_renderer_algorithm.h"
-#include "media/filters/ffmpeg_demuxer.h"
-#include "media/filters/ffmpeg_glue.h"
-#include "media/filters/in_memory_url_protocol.h"
-#include "ui/gfx/size.h"
namespace {
static const int kAudioChannels = 2;
static const int kAudioSamplingFrequency = 48000;
-static const int kSoundFrequency = 1234; // Frequency of sinusoid wave.
-static const float kSoundVolume = 0.5f;
-static const int kAudioFrameMs = 10; // Each audio frame is exactly 10ms.
-static const int kAudioPacketsPerSecond = 1000 / kAudioFrameMs;
// The max allowed size of serialized log.
const int kMaxSerializedLogBytes = 10 * 1000 * 1000;
@@ -79,36 +57,32 @@ const int kMaxSerializedLogBytes = 10 * 1000 * 1000;
//
// --fps=xx
// Override framerate of the video stream.
-
const char kSwitchAddress[] = "address";
const char kSwitchPort[] = "port";
const char kSwitchSourceFile[] = "source-file";
const char kSwitchFps[] = "fps";
-} // namespace
-
-namespace media {
-namespace cast {
-
-AudioSenderConfig GetAudioSenderConfig() {
- AudioSenderConfig audio_config;
+media::cast::AudioSenderConfig GetAudioSenderConfig() {
+ media::cast::AudioSenderConfig audio_config;
audio_config.rtcp_c_name = "audio_sender@a.b.c.d";
audio_config.use_external_encoder = false;
audio_config.frequency = kAudioSamplingFrequency;
audio_config.channels = kAudioChannels;
- audio_config.bitrate = 64000;
- audio_config.codec = transport::kOpus;
- audio_config.rtp_config.ssrc = 1;
+ audio_config.bitrate = 0; // Use Opus auto-VBR mode.
+ audio_config.codec = media::cast::CODEC_AUDIO_OPUS;
+ audio_config.ssrc = 1;
audio_config.incoming_feedback_ssrc = 2;
- audio_config.rtp_config.payload_type = 127;
- audio_config.rtp_config.max_delay_ms = 300;
+ audio_config.rtp_payload_type = 127;
+ // TODO(miu): The default in cast_defines.h is 100. Should this be 100, and
+ // should receiver.cc's config also be 100?
+ audio_config.target_playout_delay = base::TimeDelta::FromMilliseconds(300);
return audio_config;
}
-VideoSenderConfig GetVideoSenderConfig() {
- VideoSenderConfig video_config;
+media::cast::VideoSenderConfig GetVideoSenderConfig() {
+ media::cast::VideoSenderConfig video_config;
video_config.rtcp_c_name = "video_sender@a.b.c.d";
video_config.use_external_encoder = false;
@@ -124,7 +98,7 @@ VideoSenderConfig GetVideoSenderConfig() {
video_config.start_bitrate = video_config.min_bitrate;
// Codec.
- video_config.codec = transport::kVp8;
+ video_config.codec = media::cast::CODEC_VIDEO_VP8;
video_config.max_number_of_video_buffers_used = 1;
video_config.number_of_encode_threads = 2;
@@ -133,620 +107,17 @@ VideoSenderConfig GetVideoSenderConfig() {
video_config.max_qp = 40;
// SSRCs and payload type. Don't change them.
- video_config.rtp_config.ssrc = 11;
+ video_config.ssrc = 11;
video_config.incoming_feedback_ssrc = 12;
- video_config.rtp_config.payload_type = 96;
- video_config.rtp_config.max_delay_ms = 300;
+ video_config.rtp_payload_type = 96;
+ // TODO(miu): The default in cast_defines.h is 100. Should this be 100, and
+ // should receiver.cc's config also be 100?
+ video_config.target_playout_delay = base::TimeDelta::FromMilliseconds(300);
return video_config;
}
-void AVFreeFrame(AVFrame* frame) { av_frame_free(&frame); }
-
-class SendProcess {
- public:
- SendProcess(scoped_refptr<base::SingleThreadTaskRunner> thread_proxy,
- base::TickClock* clock,
- const VideoSenderConfig& video_config)
- : test_app_thread_proxy_(thread_proxy),
- video_config_(video_config),
- synthetic_count_(0),
- clock_(clock),
- audio_frame_count_(0),
- video_frame_count_(0),
- weak_factory_(this),
- av_format_context_(NULL),
- audio_stream_index_(-1),
- playback_rate_(1.0),
- video_stream_index_(-1),
- video_frame_rate_numerator_(video_config.max_frame_rate),
- video_frame_rate_denominator_(1),
- video_first_pts_(0),
- video_first_pts_set_(false) {
- audio_bus_factory_.reset(new TestAudioBusFactory(kAudioChannels,
- kAudioSamplingFrequency,
- kSoundFrequency,
- kSoundVolume));
- const CommandLine* cmd = CommandLine::ForCurrentProcess();
- int override_fps = 0;
- if (base::StringToInt(cmd->GetSwitchValueASCII(kSwitchFps),
- &override_fps)) {
- video_config_.max_frame_rate = override_fps;
- video_frame_rate_numerator_ = override_fps;
- }
-
- // Load source file and prepare FFmpeg demuxer.
- base::FilePath source_path = cmd->GetSwitchValuePath(kSwitchSourceFile);
- if (source_path.empty())
- return;
-
- LOG(INFO) << "Source: " << source_path.value();
- if (!file_data_.Initialize(source_path)) {
- LOG(ERROR) << "Cannot load file.";
- return;
- }
- protocol_.reset(
- new InMemoryUrlProtocol(file_data_.data(), file_data_.length(), false));
- glue_.reset(new FFmpegGlue(protocol_.get()));
-
- if (!glue_->OpenContext()) {
- LOG(ERROR) << "Cannot open file.";
- return;
- }
-
- // AVFormatContext is owned by the glue.
- av_format_context_ = glue_->format_context();
- if (avformat_find_stream_info(av_format_context_, NULL) < 0) {
- LOG(ERROR) << "Cannot find stream information.";
- return;
- }
-
- // Prepare FFmpeg decoders.
- for (unsigned int i = 0; i < av_format_context_->nb_streams; ++i) {
- AVStream* av_stream = av_format_context_->streams[i];
- AVCodecContext* av_codec_context = av_stream->codec;
- AVCodec* av_codec = avcodec_find_decoder(av_codec_context->codec_id);
-
- if (!av_codec) {
- LOG(ERROR) << "Cannot find decoder for the codec: "
- << av_codec_context->codec_id;
- continue;
- }
-
- // Number of threads for decoding.
- av_codec_context->thread_count = 2;
- av_codec_context->error_concealment = FF_EC_GUESS_MVS | FF_EC_DEBLOCK;
- av_codec_context->request_sample_fmt = AV_SAMPLE_FMT_S16;
-
- if (avcodec_open2(av_codec_context, av_codec, NULL) < 0) {
- LOG(ERROR) << "Cannot open AVCodecContext for the codec: "
- << av_codec_context->codec_id;
- return;
- }
-
- if (av_codec->type == AVMEDIA_TYPE_AUDIO) {
- if (av_codec_context->sample_fmt == AV_SAMPLE_FMT_S16P) {
- LOG(ERROR) << "Audio format not supported.";
- continue;
- }
- ChannelLayout layout = ChannelLayoutToChromeChannelLayout(
- av_codec_context->channel_layout,
- av_codec_context->channels);
- if (layout == CHANNEL_LAYOUT_UNSUPPORTED) {
- LOG(ERROR) << "Unsupported audio channels layout.";
- continue;
- }
- if (audio_stream_index_ != -1) {
- LOG(WARNING) << "Found multiple audio streams.";
- }
- audio_stream_index_ = static_cast<int>(i);
- audio_params_.Reset(
- AudioParameters::AUDIO_PCM_LINEAR,
- layout,
- av_codec_context->channels,
- av_codec_context->channels,
- av_codec_context->sample_rate,
- 8 * av_get_bytes_per_sample(av_codec_context->sample_fmt),
- av_codec_context->sample_rate / kAudioPacketsPerSecond);
- LOG(INFO) << "Source file has audio.";
- } else if (av_codec->type == AVMEDIA_TYPE_VIDEO) {
- VideoFrame::Format format =
- PixelFormatToVideoFormat(av_codec_context->pix_fmt);
- if (format != VideoFrame::YV12) {
- LOG(ERROR) << "Cannot handle non YV12 video format: " << format;
- continue;
- }
- if (video_stream_index_ != -1) {
- LOG(WARNING) << "Found multiple video streams.";
- }
- video_stream_index_ = static_cast<int>(i);
- if (!override_fps) {
- video_frame_rate_numerator_ = av_stream->r_frame_rate.num;
- video_frame_rate_denominator_ = av_stream->r_frame_rate.den;
- // Max frame rate is rounded up.
- video_config_.max_frame_rate =
- video_frame_rate_denominator_ +
- video_frame_rate_numerator_ - 1;
- video_config_.max_frame_rate /= video_frame_rate_denominator_;
- } else {
- // If video is played at a manual speed audio needs to match.
- playback_rate_ = 1.0 * override_fps *
- av_stream->r_frame_rate.den / av_stream->r_frame_rate.num;
- }
- LOG(INFO) << "Source file has video.";
- } else {
- LOG(ERROR) << "Unknown stream type; ignore.";
- }
- }
-
- Rewind();
- }
-
- ~SendProcess() {
- }
-
- void Start(scoped_refptr<AudioFrameInput> audio_frame_input,
- scoped_refptr<VideoFrameInput> video_frame_input) {
- audio_frame_input_ = audio_frame_input;
- video_frame_input_ = video_frame_input;
-
- LOG(INFO) << "Max Frame rate: " << video_config_.max_frame_rate;
- LOG(INFO) << "Real Frame rate: "
- << video_frame_rate_numerator_ << "/"
- << video_frame_rate_denominator_ << " fps.";
- LOG(INFO) << "Audio playback rate: " << playback_rate_;
-
- if (!is_transcoding_audio() && !is_transcoding_video()) {
- // Send fake patterns.
- test_app_thread_proxy_->PostTask(
- FROM_HERE,
- base::Bind(
- &SendProcess::SendNextFakeFrame,
- base::Unretained(this)));
- return;
- }
-
- // Send transcoding streams.
- audio_algo_.Initialize(playback_rate_, audio_params_);
- audio_algo_.FlushBuffers();
- audio_fifo_input_bus_ =
- AudioBus::Create(
- audio_params_.channels(), audio_params_.frames_per_buffer());
- // Audio FIFO can carry all data fron AudioRendererAlgorithm.
- audio_fifo_.reset(
- new AudioFifo(audio_params_.channels(),
- audio_algo_.QueueCapacity()));
- audio_resampler_.reset(new media::MultiChannelResampler(
- audio_params_.channels(),
- static_cast<double>(audio_params_.sample_rate()) /
- kAudioSamplingFrequency,
- audio_params_.frames_per_buffer(),
- base::Bind(&SendProcess::ProvideData, base::Unretained(this))));
- test_app_thread_proxy_->PostTask(
- FROM_HERE,
- base::Bind(
- &SendProcess::SendNextFrame,
- base::Unretained(this)));
- }
-
- void SendNextFakeFrame() {
- gfx::Size size(video_config_.width, video_config_.height);
- scoped_refptr<VideoFrame> video_frame =
- VideoFrame::CreateBlackFrame(size);
- PopulateVideoFrame(video_frame, synthetic_count_);
- ++synthetic_count_;
-
- base::TimeTicks now = clock_->NowTicks();
- if (start_time_.is_null())
- start_time_ = now;
-
- base::TimeDelta video_time = VideoFrameTime(video_frame_count_);
- video_frame->set_timestamp(video_time);
- video_frame_input_->InsertRawVideoFrame(video_frame,
- start_time_ + video_time);
-
- // Send just enough audio data to match next video frame's time.
- base::TimeDelta audio_time = AudioFrameTime(audio_frame_count_);
- while (audio_time < video_time) {
- if (is_transcoding_audio()) {
- Decode(true);
- CHECK(!audio_bus_queue_.empty()) << "No audio decoded.";
- scoped_ptr<AudioBus> bus(audio_bus_queue_.front());
- audio_bus_queue_.pop();
- audio_frame_input_->InsertAudio(
- bus.Pass(), start_time_ + audio_time);
- } else {
- audio_frame_input_->InsertAudio(
- audio_bus_factory_->NextAudioBus(
- base::TimeDelta::FromMilliseconds(kAudioFrameMs)),
- start_time_ + audio_time);
- }
- audio_time = AudioFrameTime(++audio_frame_count_);
- }
-
- // This is the time since the stream started.
- const base::TimeDelta elapsed_time = now - start_time_;
-
- // Handle the case when frame generation cannot keep up.
- // Move the time ahead to match the next frame.
- while (video_time < elapsed_time) {
- LOG(WARNING) << "Skipping one frame.";
- video_time = VideoFrameTime(++video_frame_count_);
- }
-
- test_app_thread_proxy_->PostDelayedTask(
- FROM_HERE,
- base::Bind(&SendProcess::SendNextFakeFrame,
- weak_factory_.GetWeakPtr()),
- video_time - elapsed_time);
- }
-
- // Return true if a frame was sent.
- bool SendNextTranscodedVideo(base::TimeDelta elapsed_time) {
- if (!is_transcoding_video())
- return false;
-
- Decode(false);
- if (video_frame_queue_.empty())
- return false;
-
- scoped_refptr<VideoFrame> decoded_frame =
- video_frame_queue_.front();
- if (elapsed_time < decoded_frame->timestamp())
- return false;
-
- gfx::Size size(video_config_.width, video_config_.height);
- scoped_refptr<VideoFrame> video_frame =
- VideoFrame::CreateBlackFrame(size);
- video_frame_queue_.pop();
- media::CopyPlane(VideoFrame::kYPlane,
- decoded_frame->data(VideoFrame::kYPlane),
- decoded_frame->stride(VideoFrame::kYPlane),
- decoded_frame->rows(VideoFrame::kYPlane),
- video_frame);
- media::CopyPlane(VideoFrame::kUPlane,
- decoded_frame->data(VideoFrame::kUPlane),
- decoded_frame->stride(VideoFrame::kUPlane),
- decoded_frame->rows(VideoFrame::kUPlane),
- video_frame);
- media::CopyPlane(VideoFrame::kVPlane,
- decoded_frame->data(VideoFrame::kVPlane),
- decoded_frame->stride(VideoFrame::kVPlane),
- decoded_frame->rows(VideoFrame::kVPlane),
- video_frame);
-
- base::TimeDelta video_time;
- // Use the timestamp from the file if we're transcoding.
- video_time = ScaleTimestamp(decoded_frame->timestamp());
- video_frame_input_->InsertRawVideoFrame(
- video_frame, start_time_ + video_time);
-
- // Make sure queue is not empty.
- Decode(false);
- return true;
- }
-
- // Return true if a frame was sent.
- bool SendNextTranscodedAudio(base::TimeDelta elapsed_time) {
- if (!is_transcoding_audio())
- return false;
-
- Decode(true);
- if (audio_bus_queue_.empty())
- return false;
-
- base::TimeDelta audio_time = audio_sent_ts_->GetTimestamp();
- if (elapsed_time < audio_time)
- return false;
- scoped_ptr<AudioBus> bus(audio_bus_queue_.front());
- audio_bus_queue_.pop();
- audio_sent_ts_->AddFrames(bus->frames());
- audio_frame_input_->InsertAudio(
- bus.Pass(), start_time_ + audio_time);
-
- // Make sure queue is not empty.
- Decode(true);
- return true;
- }
-
- void SendNextFrame() {
- if (start_time_.is_null())
- start_time_ = clock_->NowTicks();
- if (start_time_.is_null())
- start_time_ = clock_->NowTicks();
-
- // Send as much as possible. Audio is sent according to
- // system time.
- while (SendNextTranscodedAudio(clock_->NowTicks() - start_time_));
-
- // Video is sync'ed to audio.
- while (SendNextTranscodedVideo(audio_sent_ts_->GetTimestamp()));
-
- if (audio_bus_queue_.empty() && video_frame_queue_.empty()) {
- // Both queues are empty can only mean that we have reached
- // the end of the stream.
- LOG(INFO) << "Rewind.";
- Rewind();
- start_time_ = base::TimeTicks();
- audio_sent_ts_.reset();
- video_first_pts_set_ = false;
- }
-
- // Send next send.
- test_app_thread_proxy_->PostDelayedTask(
- FROM_HERE,
- base::Bind(
- &SendProcess::SendNextFrame,
- base::Unretained(this)),
- base::TimeDelta::FromMilliseconds(kAudioFrameMs));
- }
-
- const VideoSenderConfig& get_video_config() const { return video_config_; }
-
- private:
- bool is_transcoding_audio() { return audio_stream_index_ >= 0; }
- bool is_transcoding_video() { return video_stream_index_ >= 0; }
-
- // Helper methods to compute timestamps for the frame number specified.
- base::TimeDelta VideoFrameTime(int frame_number) {
- return frame_number * base::TimeDelta::FromSeconds(1) *
- video_frame_rate_denominator_ / video_frame_rate_numerator_;
- }
-
- base::TimeDelta ScaleTimestamp(base::TimeDelta timestamp) {
- return base::TimeDelta::FromMicroseconds(
- timestamp.InMicroseconds() / playback_rate_);
- }
-
- base::TimeDelta AudioFrameTime(int frame_number) {
- return frame_number * base::TimeDelta::FromMilliseconds(kAudioFrameMs);
- }
-
- // Go to the beginning of the stream.
- void Rewind() {
- CHECK(av_seek_frame(av_format_context_, -1, 0, AVSEEK_FLAG_BACKWARD) >= 0)
- << "Failed to rewind to the beginning.";
- }
-
- // Call FFmpeg to fetch one packet.
- ScopedAVPacket DemuxOnePacket(bool* audio) {
- ScopedAVPacket packet(new AVPacket());
- if (av_read_frame(av_format_context_, packet.get()) < 0) {
- LOG(ERROR) << "Failed to read one AVPacket.";
- packet.reset();
- return packet.Pass();
- }
-
- int stream_index = static_cast<int>(packet->stream_index);
- if (stream_index == audio_stream_index_) {
- *audio = true;
- } else if (stream_index == video_stream_index_) {
- *audio = false;
- } else {
- // Ignore unknown packet.
- LOG(INFO) << "Unknown packet.";
- packet.reset();
- }
- return packet.Pass();
- }
-
- void DecodeAudio(ScopedAVPacket packet) {
- // Audio.
- AVFrame* avframe = av_frame_alloc();
-
- // Make a shallow copy of packet so we can slide packet.data as frames are
- // decoded from the packet; otherwise av_free_packet() will corrupt memory.
- AVPacket packet_temp = *packet.get();
-
- do {
- int frame_decoded = 0;
- int result = avcodec_decode_audio4(
- av_audio_context(), avframe, &frame_decoded, &packet_temp);
- CHECK(result >= 0) << "Failed to decode audio.";
- packet_temp.size -= result;
- packet_temp.data += result;
- if (!frame_decoded)
- continue;
-
- int frames_read = avframe->nb_samples;
- if (frames_read < 0)
- break;
-
- if (!audio_sent_ts_) {
- // Initialize the base time to the first packet in the file.
- // This is set to the frequency we send to the receiver.
- // Not the frequency of the source file. This is because we
- // increment the frame count by samples we sent.
- audio_sent_ts_.reset(
- new AudioTimestampHelper(kAudioSamplingFrequency));
- // For some files this is an invalid value.
- base::TimeDelta base_ts;
- audio_sent_ts_->SetBaseTimestamp(base_ts);
- }
-
- scoped_refptr<AudioBuffer> buffer =
- AudioBuffer::CopyFrom(
- AVSampleFormatToSampleFormat(
- av_audio_context()->sample_fmt),
- ChannelLayoutToChromeChannelLayout(
- av_audio_context()->channel_layout,
- av_audio_context()->channels),
- av_audio_context()->channels,
- av_audio_context()->sample_rate,
- frames_read,
- &avframe->data[0],
- // Note: Not all files have correct values for pkt_pts.
- base::TimeDelta::FromMilliseconds(avframe->pkt_pts));
- audio_algo_.EnqueueBuffer(buffer);
- av_frame_unref(avframe);
- } while (packet_temp.size > 0);
- av_frame_free(&avframe);
-
- const int frames_needed_to_scale =
- playback_rate_ * av_audio_context()->sample_rate /
- kAudioPacketsPerSecond;
- while (frames_needed_to_scale <= audio_algo_.frames_buffered()) {
- if (!audio_algo_.FillBuffer(audio_fifo_input_bus_.get(),
- audio_fifo_input_bus_->frames())) {
- // Nothing can be scaled. Decode some more.
- return;
- }
-
- // Prevent overflow of audio data in the FIFO.
- if (audio_fifo_input_bus_->frames() + audio_fifo_->frames()
- <= audio_fifo_->max_frames()) {
- audio_fifo_->Push(audio_fifo_input_bus_.get());
- } else {
- LOG(WARNING) << "Audio FIFO full; dropping samples.";
- }
-
- // Make sure there's enough data to resample audio.
- if (audio_fifo_->frames() <
- 2 * audio_params_.sample_rate() / kAudioPacketsPerSecond) {
- continue;
- }
-
- scoped_ptr<media::AudioBus> resampled_bus(
- media::AudioBus::Create(
- audio_params_.channels(),
- kAudioSamplingFrequency / kAudioPacketsPerSecond));
- audio_resampler_->Resample(resampled_bus->frames(),
- resampled_bus.get());
- audio_bus_queue_.push(resampled_bus.release());
- }
- }
-
- void DecodeVideo(ScopedAVPacket packet) {
- // Video.
- int got_picture;
- AVFrame* avframe = av_frame_alloc();
- // Tell the decoder to reorder for us.
- avframe->reordered_opaque =
- av_video_context()->reordered_opaque = packet->pts;
- CHECK(avcodec_decode_video2(
- av_video_context(), avframe, &got_picture, packet.get()) >= 0)
- << "Video decode error.";
- if (!got_picture) {
- av_frame_free(&avframe);
- return;
- }
- gfx::Size size(av_video_context()->width, av_video_context()->height);
- if (!video_first_pts_set_ ||
- avframe->reordered_opaque < video_first_pts_) {
- video_first_pts_set_ = true;
- video_first_pts_ = avframe->reordered_opaque;
- }
- int64 pts = avframe->reordered_opaque - video_first_pts_;
- video_frame_queue_.push(
- VideoFrame::WrapExternalYuvData(
- media::VideoFrame::YV12,
- size,
- gfx::Rect(size),
- size,
- avframe->linesize[0],
- avframe->linesize[1],
- avframe->linesize[2],
- avframe->data[0],
- avframe->data[1],
- avframe->data[2],
- base::TimeDelta::FromMilliseconds(pts),
- base::Bind(&AVFreeFrame, avframe)));
- }
-
- void Decode(bool decode_audio) {
- // Read the stream until one video frame can be decoded.
- while (true) {
- if (decode_audio && !audio_bus_queue_.empty())
- return;
- if (!decode_audio && !video_frame_queue_.empty())
- return;
-
- bool audio_packet = false;
- ScopedAVPacket packet = DemuxOnePacket(&audio_packet);
- if (!packet) {
- LOG(INFO) << "End of stream.";
- return;
- }
-
- if (audio_packet)
- DecodeAudio(packet.Pass());
- else
- DecodeVideo(packet.Pass());
- }
- }
-
- void ProvideData(int frame_delay, media::AudioBus* output_bus) {
- if (audio_fifo_->frames() >= output_bus->frames()) {
- audio_fifo_->Consume(output_bus, 0, output_bus->frames());
- } else {
- LOG(WARNING) << "Not enough audio data for resampling.";
- output_bus->Zero();
- }
- }
-
- AVStream* av_audio_stream() {
- return av_format_context_->streams[audio_stream_index_];
- }
- AVStream* av_video_stream() {
- return av_format_context_->streams[video_stream_index_];
- }
- AVCodecContext* av_audio_context() { return av_audio_stream()->codec; }
- AVCodecContext* av_video_context() { return av_video_stream()->codec; }
-
- scoped_refptr<base::SingleThreadTaskRunner> test_app_thread_proxy_;
- VideoSenderConfig video_config_;
- scoped_refptr<AudioFrameInput> audio_frame_input_;
- scoped_refptr<VideoFrameInput> video_frame_input_;
- uint8 synthetic_count_;
- base::TickClock* const clock_; // Not owned by this class.
-
- // Time when the stream starts.
- base::TimeTicks start_time_;
-
- // The following three members are used only for fake frames.
- int audio_frame_count_; // Each audio frame is exactly 10ms.
- int video_frame_count_;
- scoped_ptr<TestAudioBusFactory> audio_bus_factory_;
-
- // NOTE: Weak pointers must be invalidated before all other member variables.
- base::WeakPtrFactory<SendProcess> weak_factory_;
-
- base::MemoryMappedFile file_data_;
- scoped_ptr<InMemoryUrlProtocol> protocol_;
- scoped_ptr<FFmpegGlue> glue_;
- AVFormatContext* av_format_context_;
-
- int audio_stream_index_;
- AudioParameters audio_params_;
- double playback_rate_;
-
- int video_stream_index_;
- int video_frame_rate_numerator_;
- int video_frame_rate_denominator_;
-
- // These are used for audio resampling.
- scoped_ptr<media::MultiChannelResampler> audio_resampler_;
- scoped_ptr<media::AudioFifo> audio_fifo_;
- scoped_ptr<media::AudioBus> audio_fifo_input_bus_;
- media::AudioRendererAlgorithm audio_algo_;
-
- // Track the timestamp of audio sent to the receiver.
- scoped_ptr<media::AudioTimestampHelper> audio_sent_ts_;
-
- std::queue<scoped_refptr<VideoFrame> > video_frame_queue_;
- int64 video_first_pts_;
- bool video_first_pts_set_;
-
- std::queue<AudioBus*> audio_bus_queue_;
-
- DISALLOW_COPY_AND_ASSIGN(SendProcess);
-};
-
-} // namespace cast
-} // namespace media
-
-namespace {
void UpdateCastTransportStatus(
- media::cast::transport::CastTransportStatus status) {
+ media::cast::CastTransportStatus status) {
VLOG(1) << "Transport status: " << status;
}
@@ -902,10 +273,8 @@ int main(int argc, char** argv) {
LOG(INFO) << "Sending to " << remote_ip_address << ":" << remote_port
<< ".";
- media::cast::AudioSenderConfig audio_config =
- media::cast::GetAudioSenderConfig();
- media::cast::VideoSenderConfig video_config =
- media::cast::GetVideoSenderConfig();
+ media::cast::AudioSenderConfig audio_config = GetAudioSenderConfig();
+ media::cast::VideoSenderConfig video_config = GetVideoSenderConfig();
// Running transport on the main thread.
// Setting up transport config.
@@ -922,14 +291,25 @@ int main(int argc, char** argv) {
video_thread.message_loop_proxy()));
// SendProcess initialization.
- scoped_ptr<media::cast::SendProcess> send_process(
- new media::cast::SendProcess(test_thread.message_loop_proxy(),
- cast_environment->Clock(),
- video_config));
+ scoped_ptr<media::cast::FakeMediaSource> fake_media_source(
+ new media::cast::FakeMediaSource(test_thread.message_loop_proxy(),
+ cast_environment->Clock(),
+ video_config));
+
+ int override_fps = 0;
+ if (!base::StringToInt(cmd->GetSwitchValueASCII(kSwitchFps),
+ &override_fps)){
+ override_fps = 0;
+ }
+ base::FilePath source_path = cmd->GetSwitchValuePath(kSwitchSourceFile);
+ if (!source_path.empty()) {
+ LOG(INFO) << "Source: " << source_path.value();
+ fake_media_source->SetSourceFile(source_path, override_fps);
+ }
// CastTransportSender initialization.
- scoped_ptr<media::cast::transport::CastTransportSender> transport_sender =
- media::cast::transport::CastTransportSender::Create(
+ scoped_ptr<media::cast::CastTransportSender> transport_sender =
+ media::cast::CastTransportSender::Create(
NULL, // net log.
cast_environment->Clock(),
remote_endpoint,
@@ -942,7 +322,7 @@ int main(int argc, char** argv) {
scoped_ptr<media::cast::CastSender> cast_sender =
media::cast::CastSender::Create(cast_environment, transport_sender.get());
cast_sender->InitializeVideo(
- send_process->get_video_config(),
+ fake_media_source->get_video_config(),
base::Bind(&InitializationResult),
media::cast::CreateDefaultVideoEncodeAcceleratorCallback(),
media::cast::CreateDefaultVideoEncodeMemoryCallback());
@@ -1014,8 +394,8 @@ int main(int argc, char** argv) {
base::Passed(&offset_estimator)),
base::TimeDelta::FromSeconds(logging_duration_seconds));
- send_process->Start(cast_sender->audio_frame_input(),
- cast_sender->video_frame_input());
+ fake_media_source->Start(cast_sender->audio_frame_input(),
+ cast_sender->video_frame_input());
io_message_loop.Run();
return 0;
diff --git a/media/cast/test/simulator.cc b/media/cast/test/simulator.cc
new file mode 100644
index 0000000000..43b3a02dc6
--- /dev/null
+++ b/media/cast/test/simulator.cc
@@ -0,0 +1,445 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Simulate end to end streaming.
+//
+// Input:
+// --source=
+// WebM used as the source of video and audio frames.
+// --output=
+// File path to writing out the raw event log of the simulation session.
+// --sim-id=
+// Unique simulation ID.
+//
+// Output:
+// - Raw event log of the simulation session tagged with the unique test ID,
+// written out to the specified file path.
+
+#include "base/at_exit.h"
+#include "base/base_paths.h"
+#include "base/command_line.h"
+#include "base/file_util.h"
+#include "base/files/file_path.h"
+#include "base/files/memory_mapped_file.h"
+#include "base/files/scoped_file.h"
+#include "base/json/json_writer.h"
+#include "base/logging.h"
+#include "base/path_service.h"
+#include "base/test/simple_test_tick_clock.h"
+#include "base/thread_task_runner_handle.h"
+#include "base/time/tick_clock.h"
+#include "base/values.h"
+#include "media/base/audio_bus.h"
+#include "media/base/media.h"
+#include "media/base/video_frame.h"
+#include "media/cast/cast_config.h"
+#include "media/cast/cast_environment.h"
+#include "media/cast/cast_receiver.h"
+#include "media/cast/cast_sender.h"
+#include "media/cast/logging/encoding_event_subscriber.h"
+#include "media/cast/logging/log_serializer.h"
+#include "media/cast/logging/logging_defines.h"
+#include "media/cast/logging/proto/raw_events.pb.h"
+#include "media/cast/logging/raw_event_subscriber_bundle.h"
+#include "media/cast/logging/simple_event_subscriber.h"
+#include "media/cast/net/cast_transport_config.h"
+#include "media/cast/net/cast_transport_defines.h"
+#include "media/cast/net/cast_transport_sender.h"
+#include "media/cast/net/cast_transport_sender_impl.h"
+#include "media/cast/test/fake_media_source.h"
+#include "media/cast/test/fake_single_thread_task_runner.h"
+#include "media/cast/test/loopback_transport.h"
+#include "media/cast/test/proto/network_simulation_model.pb.h"
+#include "media/cast/test/skewed_tick_clock.h"
+#include "media/cast/test/utility/audio_utility.h"
+#include "media/cast/test/utility/default_config.h"
+#include "media/cast/test/utility/test_util.h"
+#include "media/cast/test/utility/udp_proxy.h"
+#include "media/cast/test/utility/video_utility.h"
+
+using media::cast::proto::IPPModel;
+using media::cast::proto::NetworkSimulationModel;
+using media::cast::proto::NetworkSimulationModelType;
+
+namespace media {
+namespace cast {
+namespace {
+const int kTargetDelay = 300;
+const char kSourcePath[] = "source";
+const char kModelPath[] = "model";
+const char kOutputPath[] = "output";
+const char kSimulationId[] = "sim-id";
+const char kLibDir[] = "lib-dir";
+
+void UpdateCastTransportStatus(CastTransportStatus status) {
+ LOG(INFO) << "Cast transport status: " << status;
+}
+
+void AudioInitializationStatus(CastInitializationStatus status) {
+ LOG(INFO) << "Audio status: " << status;
+}
+
+void VideoInitializationStatus(CastInitializationStatus status) {
+ LOG(INFO) << "Video status: " << status;
+}
+
+void LogTransportEvents(const scoped_refptr<CastEnvironment>& env,
+ const std::vector<PacketEvent>& packet_events) {
+ for (std::vector<media::cast::PacketEvent>::const_iterator it =
+ packet_events.begin();
+ it != packet_events.end();
+ ++it) {
+ env->Logging()->InsertPacketEvent(it->timestamp,
+ it->type,
+ it->media_type,
+ it->rtp_timestamp,
+ it->frame_id,
+ it->packet_id,
+ it->max_packet_id,
+ it->size);
+ }
+}
+
+void GotVideoFrame(
+ int* counter,
+ CastReceiver* cast_receiver,
+ const scoped_refptr<media::VideoFrame>& video_frame,
+ const base::TimeTicks& render_time,
+ bool continuous) {
+ ++*counter;
+ cast_receiver->RequestDecodedVideoFrame(
+ base::Bind(&GotVideoFrame, counter, cast_receiver));
+}
+
+void GotAudioFrame(
+ int* counter,
+ CastReceiver* cast_receiver,
+ scoped_ptr<AudioBus> audio_bus,
+ const base::TimeTicks& playout_time,
+ bool is_continuous) {
+ ++*counter;
+ cast_receiver->RequestDecodedAudioFrame(
+ base::Bind(&GotAudioFrame, counter, cast_receiver));
+}
+
+void AppendLog(EncodingEventSubscriber* subscriber,
+ const std::string& extra_data,
+ const base::FilePath& output_path) {
+ media::cast::proto::LogMetadata metadata;
+ metadata.set_extra_data(extra_data);
+
+ media::cast::FrameEventList frame_events;
+ media::cast::PacketEventList packet_events;
+ subscriber->GetEventsAndReset(
+ &metadata, &frame_events, &packet_events);
+ media::cast::proto::GeneralDescription* gen_desc =
+ metadata.mutable_general_description();
+ gen_desc->set_product("Cast Simulator");
+ gen_desc->set_product_version("0.1");
+
+ scoped_ptr<char[]> serialized_log(new char[media::cast::kMaxSerializedBytes]);
+ int output_bytes;
+ bool success = media::cast::SerializeEvents(metadata,
+ frame_events,
+ packet_events,
+ true,
+ media::cast::kMaxSerializedBytes,
+ serialized_log.get(),
+ &output_bytes);
+
+ if (!success) {
+ LOG(ERROR) << "Failed to serialize log.";
+ return;
+ }
+
+ if (AppendToFile(output_path, serialized_log.get(), output_bytes) == -1) {
+ LOG(ERROR) << "Failed to append to log.";
+ }
+}
+
+// Run simulation once.
+//
+// |output_path| is the path to write serialized log.
+// |extra_data| is extra tagging information to write to log.
+void RunSimulation(const base::FilePath& source_path,
+ const base::FilePath& output_path,
+ const std::string& extra_data,
+ const NetworkSimulationModel& model) {
+ // Fake clock. Make sure start time is non zero.
+ base::SimpleTestTickClock testing_clock;
+ testing_clock.Advance(base::TimeDelta::FromSeconds(1));
+
+ // Task runner.
+ scoped_refptr<test::FakeSingleThreadTaskRunner> task_runner =
+ new test::FakeSingleThreadTaskRunner(&testing_clock);
+ base::ThreadTaskRunnerHandle task_runner_handle(task_runner);
+
+ // CastEnvironments.
+ scoped_refptr<CastEnvironment> sender_env =
+ new CastEnvironment(
+ scoped_ptr<base::TickClock>(
+ new test::SkewedTickClock(&testing_clock)).Pass(),
+ task_runner,
+ task_runner,
+ task_runner);
+ scoped_refptr<CastEnvironment> receiver_env =
+ new CastEnvironment(
+ scoped_ptr<base::TickClock>(
+ new test::SkewedTickClock(&testing_clock)).Pass(),
+ task_runner,
+ task_runner,
+ task_runner);
+
+ // Event subscriber. Store at most 1 hour of events.
+ EncodingEventSubscriber audio_event_subscriber(AUDIO_EVENT,
+ 100 * 60 * 60);
+ EncodingEventSubscriber video_event_subscriber(VIDEO_EVENT,
+ 30 * 60 * 60);
+ sender_env->Logging()->AddRawEventSubscriber(&audio_event_subscriber);
+ sender_env->Logging()->AddRawEventSubscriber(&video_event_subscriber);
+
+ // Audio sender config.
+ AudioSenderConfig audio_sender_config = GetDefaultAudioSenderConfig();
+ audio_sender_config.target_playout_delay =
+ base::TimeDelta::FromMilliseconds(kTargetDelay);
+
+ // Audio receiver config.
+ FrameReceiverConfig audio_receiver_config =
+ GetDefaultAudioReceiverConfig();
+ audio_receiver_config.rtp_max_delay_ms =
+ audio_sender_config.target_playout_delay.InMilliseconds();
+
+ // Video sender config.
+ VideoSenderConfig video_sender_config = GetDefaultVideoSenderConfig();
+ video_sender_config.max_bitrate = 4000000;
+ video_sender_config.min_bitrate = 2000000;
+ video_sender_config.start_bitrate = 4000000;
+ video_sender_config.target_playout_delay =
+ base::TimeDelta::FromMilliseconds(kTargetDelay);
+
+ // Video receiver config.
+ FrameReceiverConfig video_receiver_config =
+ GetDefaultVideoReceiverConfig();
+ video_receiver_config.rtp_max_delay_ms =
+ video_sender_config.target_playout_delay.InMilliseconds();
+
+ // Loopback transport.
+ LoopBackTransport receiver_to_sender(receiver_env);
+ LoopBackTransport sender_to_receiver(sender_env);
+
+ // Cast receiver.
+ scoped_ptr<CastReceiver> cast_receiver(
+ CastReceiver::Create(receiver_env,
+ audio_receiver_config,
+ video_receiver_config,
+ &receiver_to_sender));
+
+ // Cast sender and transport sender.
+ scoped_ptr<CastTransportSender> transport_sender(
+ new CastTransportSenderImpl(
+ NULL,
+ &testing_clock,
+ net::IPEndPoint(),
+ base::Bind(&UpdateCastTransportStatus),
+ base::Bind(&LogTransportEvents, sender_env),
+ base::TimeDelta::FromSeconds(1),
+ task_runner,
+ &sender_to_receiver));
+ scoped_ptr<CastSender> cast_sender(
+ CastSender::Create(sender_env, transport_sender.get()));
+
+ // Build packet pipe.
+ if (model.type() != media::cast::proto::INTERRUPTED_POISSON_PROCESS) {
+ LOG(ERROR) << "Unknown model type " << model.type() << ".";
+ return;
+ }
+
+ const IPPModel& ipp_model = model.ipp();
+
+ std::vector<double> average_rates(ipp_model.average_rate_size());
+ std::copy(ipp_model.average_rate().begin(), ipp_model.average_rate().end(),
+ average_rates.begin());
+ test::InterruptedPoissonProcess ipp(average_rates,
+ ipp_model.coef_burstiness(), ipp_model.coef_variance(), 0);
+
+ // Connect sender to receiver. This initializes the pipe.
+ receiver_to_sender.Initialize(
+ ipp.NewBuffer(128 * 1024), cast_sender->packet_receiver(), task_runner,
+ &testing_clock);
+ sender_to_receiver.Initialize(
+ ipp.NewBuffer(128 * 1024), cast_receiver->packet_receiver(), task_runner,
+ &testing_clock);
+
+ // Start receiver.
+ int audio_frame_count = 0;
+ int video_frame_count = 0;
+ cast_receiver->RequestDecodedVideoFrame(
+ base::Bind(&GotVideoFrame, &video_frame_count, cast_receiver.get()));
+ cast_receiver->RequestDecodedAudioFrame(
+ base::Bind(&GotAudioFrame, &audio_frame_count, cast_receiver.get()));
+
+ FakeMediaSource media_source(task_runner,
+ &testing_clock,
+ video_sender_config);
+
+ // Initializing audio and video senders.
+ cast_sender->InitializeAudio(audio_sender_config,
+ base::Bind(&AudioInitializationStatus));
+ cast_sender->InitializeVideo(media_source.get_video_config(),
+ base::Bind(&VideoInitializationStatus),
+ CreateDefaultVideoEncodeAcceleratorCallback(),
+ CreateDefaultVideoEncodeMemoryCallback());
+
+ // Start sending.
+ if (!source_path.empty()) {
+ // 0 means using the FPS from the file.
+ media_source.SetSourceFile(source_path, 0);
+ }
+ media_source.Start(cast_sender->audio_frame_input(),
+ cast_sender->video_frame_input());
+
+ // Run for 3 minutes.
+ base::TimeDelta elapsed_time;
+ while (elapsed_time.InMinutes() < 3) {
+ // Each step is 100us.
+ base::TimeDelta step = base::TimeDelta::FromMicroseconds(100);
+ task_runner->Sleep(step);
+ elapsed_time += step;
+ }
+
+ LOG(INFO) << "Audio frame count: " << audio_frame_count;
+ LOG(INFO) << "Video frame count: " << video_frame_count;
+ LOG(INFO) << "Writing log: " << output_path.value();
+
+ // Truncate file and then write serialized log.
+ {
+ base::ScopedFILE file(base::OpenFile(output_path, "wb"));
+ if (!file.get()) {
+ LOG(INFO) << "Cannot write to log.";
+ return;
+ }
+ }
+ AppendLog(&video_event_subscriber, extra_data, output_path);
+ AppendLog(&audio_event_subscriber, extra_data, output_path);
+}
+
+NetworkSimulationModel DefaultModel() {
+ NetworkSimulationModel model;
+ model.set_type(cast::proto::INTERRUPTED_POISSON_PROCESS);
+ IPPModel* ipp = model.mutable_ipp();
+ ipp->set_coef_burstiness(0.609);
+ ipp->set_coef_variance(4.1);
+
+ ipp->add_average_rate(0.609);
+ ipp->add_average_rate(0.495);
+ ipp->add_average_rate(0.561);
+ ipp->add_average_rate(0.458);
+ ipp->add_average_rate(0.538);
+ ipp->add_average_rate(0.513);
+ ipp->add_average_rate(0.585);
+ ipp->add_average_rate(0.592);
+ ipp->add_average_rate(0.658);
+ ipp->add_average_rate(0.556);
+ ipp->add_average_rate(0.371);
+ ipp->add_average_rate(0.595);
+ ipp->add_average_rate(0.490);
+ ipp->add_average_rate(0.980);
+ ipp->add_average_rate(0.781);
+ ipp->add_average_rate(0.463);
+
+ return model;
+}
+
+bool IsModelValid(const NetworkSimulationModel& model) {
+ if (!model.has_type())
+ return false;
+ NetworkSimulationModelType type = model.type();
+ if (type == media::cast::proto::INTERRUPTED_POISSON_PROCESS) {
+ if (!model.has_ipp())
+ return false;
+ const IPPModel& ipp = model.ipp();
+ if (ipp.coef_burstiness() <= 0.0 || ipp.coef_variance() <= 0.0)
+ return false;
+ if (ipp.average_rate_size() == 0)
+ return false;
+ for (int i = 0; i < ipp.average_rate_size(); i++) {
+ if (ipp.average_rate(i) <= 0.0)
+ return false;
+ }
+ }
+
+ return true;
+}
+
+NetworkSimulationModel LoadModel(const base::FilePath& model_path) {
+ if (model_path.empty()) {
+ LOG(ERROR) << "Model path not set.";
+ return DefaultModel();
+ }
+ std::string model_str;
+ if (!base::ReadFileToString(model_path, &model_str)) {
+ LOG(ERROR) << "Failed to read model file.";
+ return DefaultModel();
+ }
+
+ NetworkSimulationModel model;
+ if (!model.ParseFromString(model_str)) {
+ LOG(ERROR) << "Failed to parse model.";
+ return DefaultModel();
+ }
+ if (!IsModelValid(model)) {
+ LOG(ERROR) << "Invalid model.";
+ return DefaultModel();
+ }
+
+ return model;
+}
+
+} // namespace
+} // namespace cast
+} // namespace media
+
+int main(int argc, char** argv) {
+ base::AtExitManager at_exit;
+ CommandLine::Init(argc, argv);
+ InitLogging(logging::LoggingSettings());
+
+ const CommandLine* cmd = CommandLine::ForCurrentProcess();
+ base::FilePath media_path = cmd->GetSwitchValuePath(media::cast::kLibDir);
+ if (media_path.empty()) {
+ if (!PathService::Get(base::DIR_MODULE, &media_path)) {
+ LOG(ERROR) << "Failed to load FFmpeg.";
+ return 1;
+ }
+ }
+
+ if (!media::InitializeMediaLibrary(media_path)) {
+ LOG(ERROR) << "Failed to initialize FFmpeg.";
+ return 1;
+ }
+
+ base::FilePath source_path = cmd->GetSwitchValuePath(
+ media::cast::kSourcePath);
+ base::FilePath output_path = cmd->GetSwitchValuePath(
+ media::cast::kOutputPath);
+ if (output_path.empty()) {
+ base::GetTempDir(&output_path);
+ output_path = output_path.AppendASCII("sim-events.gz");
+ }
+ std::string sim_id = cmd->GetSwitchValueASCII(media::cast::kSimulationId);
+
+ NetworkSimulationModel model = media::cast::LoadModel(
+ cmd->GetSwitchValuePath(media::cast::kModelPath));
+
+ base::DictionaryValue values;
+ values.SetBoolean("sim", true);
+ values.SetString("sim-id", sim_id);
+
+ std::string extra_data;
+ base::JSONWriter::Write(&values, &extra_data);
+
+ // Run.
+ media::cast::RunSimulation(source_path, output_path, extra_data, model);
+ return 0;
+}
diff --git a/media/cast/test/utility/default_config.cc b/media/cast/test/utility/default_config.cc
index b5de5ed690..6743146b70 100644
--- a/media/cast/test/utility/default_config.cc
+++ b/media/cast/test/utility/default_config.cc
@@ -5,7 +5,7 @@
#include "media/cast/test/utility/default_config.h"
#include "base/bind.h"
-#include "media/cast/transport/cast_transport_config.h"
+#include "media/cast/net/cast_transport_config.h"
namespace {
@@ -35,7 +35,7 @@ FrameReceiverConfig GetDefaultAudioReceiverConfig() {
config.frequency = 48000;
config.channels = 2;
config.max_frame_rate = 100; // 10ms of signal per frame
- config.codec.audio = media::cast::transport::kOpus;
+ config.codec = media::cast::CODEC_AUDIO_OPUS;
return config;
}
@@ -49,7 +49,44 @@ FrameReceiverConfig GetDefaultVideoReceiverConfig() {
config.frequency = kVideoFrequency;
config.channels = 1;
config.max_frame_rate = kDefaultMaxFrameRate;
- config.codec.video = media::cast::transport::kVp8;
+ config.codec = media::cast::CODEC_VIDEO_VP8;
+ return config;
+}
+
+AudioSenderConfig GetDefaultAudioSenderConfig() {
+ FrameReceiverConfig recv_config = GetDefaultAudioReceiverConfig();
+ AudioSenderConfig config;
+ config.ssrc = recv_config.incoming_ssrc;
+ config.incoming_feedback_ssrc = recv_config.feedback_ssrc;
+ config.rtp_payload_type = recv_config.rtp_payload_type;
+ config.use_external_encoder = false;
+ config.frequency = recv_config.frequency;
+ config.channels = recv_config.channels;
+ config.bitrate = kDefaultAudioEncoderBitrate;
+ config.codec = recv_config.codec;
+ config.target_playout_delay =
+ base::TimeDelta::FromMilliseconds(kDefaultRtpMaxDelayMs);
+ return config;
+}
+
+VideoSenderConfig GetDefaultVideoSenderConfig() {
+ FrameReceiverConfig recv_config = GetDefaultVideoReceiverConfig();
+ VideoSenderConfig config;
+ config.ssrc = recv_config.incoming_ssrc;
+ config.incoming_feedback_ssrc = recv_config.feedback_ssrc;
+ config.rtp_payload_type = recv_config.rtp_payload_type;
+ config.use_external_encoder = false;
+ config.width = 1280;
+ config.height = 720;
+ config.max_bitrate = 4000000;
+ config.min_bitrate = 2000000;
+ config.start_bitrate = 4000000;
+ config.max_frame_rate = recv_config.max_frame_rate;
+ config.max_number_of_video_buffers_used = 1;
+ config.codec = recv_config.codec;
+ config.number_of_encode_threads = 2;
+ config.target_playout_delay =
+ base::TimeDelta::FromMilliseconds(kDefaultRtpMaxDelayMs);
return config;
}
diff --git a/media/cast/test/utility/default_config.h b/media/cast/test/utility/default_config.h
index eaa3c96415..2cc52a7a72 100644
--- a/media/cast/test/utility/default_config.h
+++ b/media/cast/test/utility/default_config.h
@@ -20,6 +20,15 @@ FrameReceiverConfig GetDefaultAudioReceiverConfig();
// name.
FrameReceiverConfig GetDefaultVideoReceiverConfig();
+// Returns a AudioSenderConfig initialized to default values. This means
+// 48 kHz, 2-channel Opus-coded audio. Default values for SSRCs and payload
+// type.
+AudioSenderConfig GetDefaultAudioSenderConfig();
+
+// Returns a VideoSenderConfig initialized to default values. This means
+// 30 Hz VP8 coded code. Default values for SSRCs and payload type.
+VideoSenderConfig GetDefaultVideoSenderConfig();
+
// Returns a callback that does nothing.
CreateVideoEncodeAcceleratorCallback
CreateDefaultVideoEncodeAcceleratorCallback();
diff --git a/media/cast/test/utility/in_process_receiver.cc b/media/cast/test/utility/in_process_receiver.cc
index cfcc1fcb46..6c739ecdb6 100644
--- a/media/cast/test/utility/in_process_receiver.cc
+++ b/media/cast/test/utility/in_process_receiver.cc
@@ -11,11 +11,11 @@
#include "media/cast/cast_config.h"
#include "media/cast/cast_environment.h"
#include "media/cast/cast_receiver.h"
-#include "media/cast/transport/cast_transport_config.h"
-#include "media/cast/transport/transport/udp_transport.h"
+#include "media/cast/net/cast_transport_config.h"
+#include "media/cast/net/udp_transport.h"
-using media::cast::transport::CastTransportStatus;
-using media::cast::transport::UdpTransport;
+using media::cast::CastTransportStatus;
+using media::cast::UdpTransport;
namespace media {
namespace cast {
@@ -67,7 +67,7 @@ void InProcessReceiver::StopOnMainThread(base::WaitableEvent* event) {
}
void InProcessReceiver::UpdateCastTransportStatus(CastTransportStatus status) {
- LOG_IF(ERROR, status == media::cast::transport::TRANSPORT_SOCKET_ERROR)
+ LOG_IF(ERROR, status == media::cast::TRANSPORT_SOCKET_ERROR)
<< "Transport socket error occurred. InProcessReceiver is likely dead.";
VLOG(1) << "CastTransportStatus is now " << status;
}
diff --git a/media/cast/test/utility/in_process_receiver.h b/media/cast/test/utility/in_process_receiver.h
index cf25da9cee..454dc937ae 100644
--- a/media/cast/test/utility/in_process_receiver.h
+++ b/media/cast/test/utility/in_process_receiver.h
@@ -10,7 +10,8 @@
#include "base/memory/weak_ptr.h"
#include "media/base/audio_bus.h"
#include "media/cast/cast_config.h"
-#include "media/cast/transport/cast_transport_config.h"
+#include "media/cast/net/cast_transport_config.h"
+#include "net/base/ip_endpoint.h"
namespace base {
class TimeTicks;
@@ -29,10 +30,7 @@ namespace cast {
class CastEnvironment;
class CastReceiver;
-
-namespace transport {
class UdpTransport;
-} // namespace transport
// Common base functionality for an in-process Cast receiver. This is meant to
// be subclassed with the OnAudioFrame() and OnVideoFrame() methods implemented,
@@ -85,7 +83,7 @@ class InProcessReceiver {
// Callback for the transport to notify of status changes. A default
// implementation is provided here that simply logs socket errors.
- virtual void UpdateCastTransportStatus(transport::CastTransportStatus status);
+ virtual void UpdateCastTransportStatus(CastTransportStatus status);
private:
friend class base::RefCountedThreadSafe<InProcessReceiver>;
@@ -108,7 +106,7 @@ class InProcessReceiver {
const FrameReceiverConfig audio_config_;
const FrameReceiverConfig video_config_;
- scoped_ptr<transport::UdpTransport> transport_;
+ scoped_ptr<UdpTransport> transport_;
scoped_ptr<CastReceiver> cast_receiver_;
// NOTE: Weak pointers must be invalidated before all other member variables.
diff --git a/media/cast/test/utility/udp_proxy.cc b/media/cast/test/utility/udp_proxy.cc
index 9fc3b4a44d..4714b7ed67 100644
--- a/media/cast/test/utility/udp_proxy.cc
+++ b/media/cast/test/utility/udp_proxy.cc
@@ -2,12 +2,13 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include <math.h>
#include <stdlib.h>
+#include <vector>
#include "media/cast/test/utility/udp_proxy.h"
#include "base/logging.h"
-#include "base/memory/linked_ptr.h"
#include "base/rand_util.h"
#include "base/synchronization/waitable_event.h"
#include "base/threading/thread.h"
@@ -55,10 +56,10 @@ class Buffer : public PacketPipe {
CHECK_GT(max_megabits_per_second, 0);
}
- virtual void Send(scoped_ptr<transport::Packet> packet) OVERRIDE {
+ virtual void Send(scoped_ptr<Packet> packet) OVERRIDE {
if (packet->size() + buffer_size_ <= max_buffer_size_) {
buffer_size_ += packet->size();
- buffer_.push_back(linked_ptr<transport::Packet>(packet.release()));
+ buffer_.push_back(linked_ptr<Packet>(packet.release()));
if (buffer_.size() == 1) {
Schedule();
}
@@ -78,7 +79,7 @@ class Buffer : public PacketPipe {
void ProcessBuffer() {
CHECK(!buffer_.empty());
- scoped_ptr<transport::Packet> packet(buffer_.front().release());
+ scoped_ptr<Packet> packet(buffer_.front().release());
buffer_size_ -= packet->size();
buffer_.pop_front();
pipe_->Send(packet.Pass());
@@ -87,7 +88,7 @@ class Buffer : public PacketPipe {
}
}
- std::deque<linked_ptr<transport::Packet> > buffer_;
+ std::deque<linked_ptr<Packet> > buffer_;
size_t buffer_size_;
size_t max_buffer_size_;
double max_megabits_per_second_; // megabits per second
@@ -103,7 +104,7 @@ class RandomDrop : public PacketPipe {
RandomDrop(double drop_fraction)
: drop_fraction_(static_cast<int>(drop_fraction * RAND_MAX)) {}
- virtual void Send(scoped_ptr<transport::Packet> packet) OVERRIDE {
+ virtual void Send(scoped_ptr<Packet> packet) OVERRIDE {
if (rand() > drop_fraction_) {
pipe_->Send(packet.Pass());
}
@@ -122,7 +123,7 @@ class SimpleDelayBase : public PacketPipe {
SimpleDelayBase() : weak_factory_(this) {}
virtual ~SimpleDelayBase() {}
- virtual void Send(scoped_ptr<transport::Packet> packet) OVERRIDE {
+ virtual void Send(scoped_ptr<Packet> packet) OVERRIDE {
double seconds = GetDelay();
task_runner_->PostDelayedTask(
FROM_HERE,
@@ -135,7 +136,7 @@ class SimpleDelayBase : public PacketPipe {
virtual double GetDelay() = 0;
private:
- virtual void SendInternal(scoped_ptr<transport::Packet> packet) {
+ virtual void SendInternal(scoped_ptr<Packet> packet) {
pipe_->Send(packet.Pass());
}
@@ -184,8 +185,8 @@ class RandomSortedDelay : public PacketPipe {
seconds_between_extra_delay_(seconds_between_extra_delay),
weak_factory_(this) {}
- virtual void Send(scoped_ptr<transport::Packet> packet) OVERRIDE {
- buffer_.push_back(linked_ptr<transport::Packet>(packet.release()));
+ virtual void Send(scoped_ptr<Packet> packet) OVERRIDE {
+ buffer_.push_back(linked_ptr<Packet>(packet.release()));
if (buffer_.size() == 1) {
Schedule();
}
@@ -238,7 +239,7 @@ class RandomSortedDelay : public PacketPipe {
void ProcessBuffer() {
CHECK(!buffer_.empty());
- scoped_ptr<transport::Packet> packet(buffer_.front().release());
+ scoped_ptr<Packet> packet(buffer_.front().release());
pipe_->Send(packet.Pass());
buffer_.pop_front();
if (!buffer_.empty()) {
@@ -247,7 +248,7 @@ class RandomSortedDelay : public PacketPipe {
}
base::TimeTicks block_until_;
- std::deque<linked_ptr<transport::Packet> > buffer_;
+ std::deque<linked_ptr<Packet> > buffer_;
double random_delay_;
double extra_delay_;
double seconds_between_extra_delay_;
@@ -279,7 +280,7 @@ class NetworkGlitchPipe : public PacketPipe {
Flip();
}
- virtual void Send(scoped_ptr<transport::Packet> packet) OVERRIDE {
+ virtual void Send(scoped_ptr<Packet> packet) OVERRIDE {
if (works_) {
pipe_->Send(packet.Pass());
}
@@ -310,13 +311,212 @@ scoped_ptr<PacketPipe> NewNetworkGlitchPipe(double average_work_time,
.Pass();
}
+
+// Internal buffer object for a client of the IPP model.
+class InterruptedPoissonProcess::InternalBuffer : public PacketPipe {
+ public:
+ InternalBuffer(base::WeakPtr<InterruptedPoissonProcess> ipp,
+ size_t size)
+ : ipp_(ipp),
+ stored_size_(0),
+ stored_limit_(size),
+ clock_(NULL),
+ weak_factory_(this) {
+ }
+
+ virtual void Send(scoped_ptr<Packet> packet) OVERRIDE {
+ // Drop if buffer is full.
+ if (stored_size_ >= stored_limit_)
+ return;
+ stored_size_ += packet->size();
+ buffer_.push_back(linked_ptr<Packet>(packet.release()));
+ buffer_time_.push_back(clock_->NowTicks());
+ DCHECK(buffer_.size() == buffer_time_.size());
+ }
+
+ virtual void InitOnIOThread(
+ const scoped_refptr<base::SingleThreadTaskRunner>& task_runner,
+ base::TickClock* clock) OVERRIDE {
+ clock_ = clock;
+ if (ipp_)
+ ipp_->InitOnIOThread(task_runner, clock);
+ PacketPipe::InitOnIOThread(task_runner, clock);
+ }
+
+ void SendOnePacket() {
+ scoped_ptr<Packet> packet(buffer_.front().release());
+ stored_size_ -= packet->size();
+ buffer_.pop_front();
+ buffer_time_.pop_front();
+ pipe_->Send(packet.Pass());
+ DCHECK(buffer_.size() == buffer_time_.size());
+ }
+
+ bool Empty() const {
+ return buffer_.empty();
+ }
+
+ base::TimeTicks FirstPacketTime() const {
+ DCHECK(!buffer_time_.empty());
+ return buffer_time_.front();
+ }
+
+ base::WeakPtr<InternalBuffer> GetWeakPtr() {
+ return weak_factory_.GetWeakPtr();
+
+ }
+
+ private:
+ const base::WeakPtr<InterruptedPoissonProcess> ipp_;
+ size_t stored_size_;
+ const size_t stored_limit_;
+ std::deque<linked_ptr<Packet> > buffer_;
+ std::deque<base::TimeTicks> buffer_time_;
+ base::TickClock* clock_;
+ base::WeakPtrFactory<InternalBuffer> weak_factory_;
+
+ DISALLOW_COPY_AND_ASSIGN(InternalBuffer);
+};
+
+InterruptedPoissonProcess::InterruptedPoissonProcess(
+ const std::vector<double>& average_rates,
+ double coef_burstiness,
+ double coef_variance,
+ uint32 rand_seed)
+ : clock_(NULL),
+ average_rates_(average_rates),
+ coef_burstiness_(coef_burstiness),
+ coef_variance_(coef_variance),
+ rate_index_(0),
+ on_state_(true),
+ weak_factory_(this) {
+ mt_rand_.init_genrand(rand_seed);
+ DCHECK(!average_rates.empty());
+ ComputeRates();
+}
+
+InterruptedPoissonProcess::~InterruptedPoissonProcess() {
+}
+
+void InterruptedPoissonProcess::InitOnIOThread(
+ const scoped_refptr<base::SingleThreadTaskRunner>& task_runner,
+ base::TickClock* clock) {
+ // Already initialized and started.
+ if (task_runner_ && clock_)
+ return;
+ task_runner_ = task_runner;
+ clock_ = clock;
+ UpdateRates();
+ SwitchOn();
+ SendPacket();
+}
+
+scoped_ptr<PacketPipe> InterruptedPoissonProcess::NewBuffer(size_t size) {
+ scoped_ptr<InternalBuffer> buffer(
+ new InternalBuffer(weak_factory_.GetWeakPtr(), size));
+ send_buffers_.push_back(buffer->GetWeakPtr());
+ return buffer.PassAs<PacketPipe>();
+}
+
+base::TimeDelta InterruptedPoissonProcess::NextEvent(double rate) {
+ // Rate is per milliseconds.
+ // The time until next event is exponentially distributed to the
+ // inverse of |rate|.
+ return base::TimeDelta::FromMillisecondsD(
+ fabs(-log(1.0 - RandDouble()) / rate));
+}
+
+double InterruptedPoissonProcess::RandDouble() {
+ // Generate a 64-bits random number from MT19937 and then convert
+ // it to double.
+ uint64 rand = mt_rand_.genrand_int32();
+ rand <<= 32;
+ rand |= mt_rand_.genrand_int32();
+ return base::BitsToOpenEndedUnitInterval(rand);
+}
+
+void InterruptedPoissonProcess::ComputeRates() {
+ double avg_rate = average_rates_[rate_index_];
+
+ send_rate_ = avg_rate / coef_burstiness_;
+ switch_off_rate_ =
+ 2 * avg_rate * (1 - coef_burstiness_) * (1 - coef_burstiness_) /
+ coef_burstiness_ / (coef_variance_ - 1);
+ switch_on_rate_ =
+ 2 * avg_rate * (1 - coef_burstiness_) / (coef_variance_ - 1);
+}
+
+void InterruptedPoissonProcess::UpdateRates() {
+ ComputeRates();
+
+ // Rates are updated once per second.
+ rate_index_ = (rate_index_ + 1) % average_rates_.size();
+ task_runner_->PostDelayedTask(
+ FROM_HERE,
+ base::Bind(&InterruptedPoissonProcess::UpdateRates,
+ weak_factory_.GetWeakPtr()),
+ base::TimeDelta::FromSeconds(1));
+}
+
+void InterruptedPoissonProcess::SwitchOff() {
+ on_state_ = false;
+ task_runner_->PostDelayedTask(
+ FROM_HERE,
+ base::Bind(&InterruptedPoissonProcess::SwitchOn,
+ weak_factory_.GetWeakPtr()),
+ NextEvent(switch_on_rate_));
+}
+
+void InterruptedPoissonProcess::SwitchOn() {
+ on_state_ = true;
+ task_runner_->PostDelayedTask(
+ FROM_HERE,
+ base::Bind(&InterruptedPoissonProcess::SwitchOff,
+ weak_factory_.GetWeakPtr()),
+ NextEvent(switch_off_rate_));
+}
+
+void InterruptedPoissonProcess::SendPacket() {
+ task_runner_->PostDelayedTask(
+ FROM_HERE,
+ base::Bind(&InterruptedPoissonProcess::SendPacket,
+ weak_factory_.GetWeakPtr()),
+ NextEvent(send_rate_));
+
+ // If OFF then don't send.
+ if (!on_state_)
+ return;
+
+ // Find the earliest packet to send.
+ base::TimeTicks earliest_time;
+ for (size_t i = 0; i < send_buffers_.size(); ++i) {
+ if (!send_buffers_[i])
+ continue;
+ if (send_buffers_[i]->Empty())
+ continue;
+ if (earliest_time.is_null() ||
+ send_buffers_[i]->FirstPacketTime() < earliest_time)
+ earliest_time = send_buffers_[i]->FirstPacketTime();
+ }
+ for (size_t i = 0; i < send_buffers_.size(); ++i) {
+ if (!send_buffers_[i])
+ continue;
+ if (send_buffers_[i]->Empty())
+ continue;
+ if (send_buffers_[i]->FirstPacketTime() != earliest_time)
+ continue;
+ send_buffers_[i]->SendOnePacket();
+ break;
+ }
+}
+
class UDPProxyImpl;
class PacketSender : public PacketPipe {
public:
PacketSender(UDPProxyImpl* udp_proxy, const net::IPEndPoint* destination)
: udp_proxy_(udp_proxy), destination_(destination) {}
- virtual void Send(scoped_ptr<transport::Packet> packet) OVERRIDE;
+ virtual void Send(scoped_ptr<Packet> packet) OVERRIDE;
virtual void AppendToPipe(scoped_ptr<PacketPipe> pipe) OVERRIDE {
NOTREACHED();
}
@@ -426,7 +626,7 @@ class UDPProxyImpl : public UDPProxy {
proxy_thread_.Stop();
}
- void Send(scoped_ptr<transport::Packet> packet,
+ void Send(scoped_ptr<Packet> packet,
const net::IPEndPoint& destination) {
if (blocked_) {
LOG(ERROR) << "Cannot write packet right now: blocked";
@@ -522,7 +722,7 @@ class UDPProxyImpl : public UDPProxy {
void PollRead() {
while (true) {
- packet_.reset(new transport::Packet(kMaxPacketSize));
+ packet_.reset(new Packet(kMaxPacketSize));
scoped_refptr<net::IOBuffer> recv_buf =
new net::WrappedIOBuffer(reinterpret_cast<char*>(&packet_->front()));
int len = socket_->RecvFrom(
@@ -539,7 +739,7 @@ class UDPProxyImpl : public UDPProxy {
}
void AllowWrite(scoped_refptr<net::IOBuffer> buf,
- scoped_ptr<transport::Packet> packet,
+ scoped_ptr<Packet> packet,
int unused_len) {
DCHECK(blocked_);
blocked_ = false;
@@ -562,7 +762,7 @@ class UDPProxyImpl : public UDPProxy {
// For receiving.
net::IPEndPoint recv_address_;
- scoped_ptr<transport::Packet> packet_;
+ scoped_ptr<Packet> packet_;
// For sending.
bool blocked_;
@@ -570,7 +770,7 @@ class UDPProxyImpl : public UDPProxy {
base::WeakPtrFactory<UDPProxyImpl> weak_factory_;
};
-void PacketSender::Send(scoped_ptr<transport::Packet> packet) {
+void PacketSender::Send(scoped_ptr<Packet> packet) {
udp_proxy_->Send(packet.Pass(), *destination_);
}
diff --git a/media/cast/test/utility/udp_proxy.h b/media/cast/test/utility/udp_proxy.h
index b102573a94..ea50a2c86a 100644
--- a/media/cast/test/utility/udp_proxy.h
+++ b/media/cast/test/utility/udp_proxy.h
@@ -8,11 +8,14 @@
#include <vector>
#include "base/basictypes.h"
+#include "base/memory/linked_ptr.h"
#include "base/memory/ref_counted.h"
#include "base/memory/scoped_ptr.h"
+#include "base/memory/weak_ptr.h"
#include "base/single_thread_task_runner.h"
-#include "media/cast/transport/cast_transport_config.h"
+#include "media/cast/net/cast_transport_config.h"
#include "net/base/ip_endpoint.h"
+#include "third_party/mt19937ar/mt19937ar.h"
namespace net {
class NetLog;
@@ -30,7 +33,7 @@ class PacketPipe {
public:
PacketPipe();
virtual ~PacketPipe();
- virtual void Send(scoped_ptr<transport::Packet> packet) = 0;
+ virtual void Send(scoped_ptr<Packet> packet) = 0;
// Allows injection of fake test runner for testing.
virtual void InitOnIOThread(
const scoped_refptr<base::SingleThreadTaskRunner>& task_runner,
@@ -43,6 +46,63 @@ class PacketPipe {
base::TickClock* clock_;
};
+// Implements a Interrupted Poisson Process for packet delivery.
+// The process has 2 states: ON and OFF, the rate of switching between
+// these two states are defined.
+// When in ON state packets are sent according to a defined rate.
+// When in OFF state packets are not sent.
+// The rate above is the average rate of a poisson distribution.
+class InterruptedPoissonProcess {
+ public:
+ InterruptedPoissonProcess(
+ const std::vector<double>& average_rates,
+ double coef_burstiness,
+ double coef_variance,
+ uint32 rand_seed);
+ ~InterruptedPoissonProcess();
+
+ scoped_ptr<PacketPipe> NewBuffer(size_t size);
+
+ private:
+ class InternalBuffer;
+
+ // |task_runner| is the executor of the IO thread.
+ // |clock| is the system clock.
+ void InitOnIOThread(
+ const scoped_refptr<base::SingleThreadTaskRunner>& task_runner,
+ base::TickClock* clock);
+
+ base::TimeDelta NextEvent(double rate);
+ double RandDouble();
+ void ComputeRates();
+ void UpdateRates();
+ void SwitchOff();
+ void SwitchOn();
+ void SendPacket();
+
+ scoped_refptr<base::SingleThreadTaskRunner> task_runner_;
+ base::TickClock* clock_;
+ const std::vector<double> average_rates_;
+ const double coef_burstiness_;
+ const double coef_variance_;
+ int rate_index_;
+
+ // The following rates are per milliseconds.
+ double send_rate_;
+ double switch_off_rate_;
+ double switch_on_rate_;
+ bool on_state_;
+
+ std::vector<base::WeakPtr<InternalBuffer> > send_buffers_;
+
+ // Fast pseudo random number generator.
+ MersenneTwister mt_rand_;
+
+ base::WeakPtrFactory<InterruptedPoissonProcess> weak_factory_;
+
+ DISALLOW_COPY_AND_ASSIGN(InterruptedPoissonProcess);
+};
+
// A UDPProxy will set up a UDP socket and bind to |local_port|.
// Packets send to that port will be forwarded to |destination|.
// Packets send from |destination| to |local_port| will be returned
diff --git a/media/cast/test/utility/udp_proxy_main.cc b/media/cast/test/utility/udp_proxy_main.cc
index 800f09dc70..9dd047abfd 100644
--- a/media/cast/test/utility/udp_proxy_main.cc
+++ b/media/cast/test/utility/udp_proxy_main.cc
@@ -68,7 +68,7 @@ ByteCounter out_pipe_output_counter;
class ByteCounterPipe : public media::cast::test::PacketPipe {
public:
ByteCounterPipe(ByteCounter* counter) : counter_(counter) {}
- virtual void Send(scoped_ptr<media::cast::transport::Packet> packet)
+ virtual void Send(scoped_ptr<media::cast::Packet> packet)
OVERRIDE {
counter_->Increment(packet->size());
pipe_->Send(packet.Pass());
diff --git a/media/cdm/ppapi/cdm_adapter.cc b/media/cdm/ppapi/cdm_adapter.cc
index 10feef7754..14aa23120b 100644
--- a/media/cdm/ppapi/cdm_adapter.cc
+++ b/media/cdm/ppapi/cdm_adapter.cc
@@ -255,7 +255,6 @@ CdmAdapter::CdmAdapter(PP_Instance instance, pp::Module* module)
#if defined(OS_CHROMEOS)
output_protection_(this),
platform_verification_(this),
- challenge_in_progress_(false),
output_link_mask_(0),
output_protection_mask_(0),
query_output_protection_in_progress_(false),
@@ -838,7 +837,7 @@ void CdmAdapter::DeliverBlock(int32_t result,
const LinkedDecryptedBlock& decrypted_block,
const PP_DecryptTrackingInfo& tracking_info) {
PP_DCHECK(result == PP_OK);
- PP_DecryptedBlockInfo decrypted_block_info;
+ PP_DecryptedBlockInfo decrypted_block_info = {};
decrypted_block_info.tracking_info = tracking_info;
decrypted_block_info.tracking_info.timestamp = decrypted_block->Timestamp();
decrypted_block_info.tracking_info.buffer_id = 0;
@@ -855,9 +854,10 @@ void CdmAdapter::DeliverBlock(int32_t result,
} else {
PpbBuffer* ppb_buffer =
static_cast<PpbBuffer*>(decrypted_block->DecryptedBuffer());
- buffer = ppb_buffer->buffer_dev();
decrypted_block_info.tracking_info.buffer_id = ppb_buffer->buffer_id();
decrypted_block_info.data_size = ppb_buffer->Size();
+
+ buffer = ppb_buffer->TakeBuffer();
}
}
@@ -893,7 +893,7 @@ void CdmAdapter::DeliverFrame(
const LinkedVideoFrame& video_frame,
const PP_DecryptTrackingInfo& tracking_info) {
PP_DCHECK(result == PP_OK);
- PP_DecryptedFrameInfo decrypted_frame_info;
+ PP_DecryptedFrameInfo decrypted_frame_info = {};
decrypted_frame_info.tracking_info.request_id = tracking_info.request_id;
decrypted_frame_info.tracking_info.buffer_id = 0;
decrypted_frame_info.result = CdmStatusToPpDecryptResult(status);
@@ -908,8 +908,6 @@ void CdmAdapter::DeliverFrame(
PpbBuffer* ppb_buffer =
static_cast<PpbBuffer*>(video_frame->FrameBuffer());
- buffer = ppb_buffer->buffer_dev();
-
decrypted_frame_info.tracking_info.timestamp = video_frame->Timestamp();
decrypted_frame_info.tracking_info.buffer_id = ppb_buffer->buffer_id();
decrypted_frame_info.format =
@@ -928,8 +926,11 @@ void CdmAdapter::DeliverFrame(
video_frame->Stride(cdm::VideoFrame::kUPlane);
decrypted_frame_info.strides[PP_DECRYPTEDFRAMEPLANES_V] =
video_frame->Stride(cdm::VideoFrame::kVPlane);
+
+ buffer = ppb_buffer->TakeBuffer();
}
}
+
pp::ContentDecryptor_Private::DeliverFrame(buffer, decrypted_frame_info);
}
@@ -939,7 +940,7 @@ void CdmAdapter::DeliverSamples(int32_t result,
const PP_DecryptTrackingInfo& tracking_info) {
PP_DCHECK(result == PP_OK);
- PP_DecryptedSampleInfo decrypted_sample_info;
+ PP_DecryptedSampleInfo decrypted_sample_info = {};
decrypted_sample_info.tracking_info = tracking_info;
decrypted_sample_info.tracking_info.timestamp = 0;
decrypted_sample_info.tracking_info.buffer_id = 0;
@@ -956,11 +957,13 @@ void CdmAdapter::DeliverSamples(int32_t result,
} else {
PpbBuffer* ppb_buffer =
static_cast<PpbBuffer*>(audio_frames->FrameBuffer());
- buffer = ppb_buffer->buffer_dev();
+
decrypted_sample_info.tracking_info.buffer_id = ppb_buffer->buffer_id();
decrypted_sample_info.data_size = ppb_buffer->Size();
decrypted_sample_info.format =
CdmAudioFormatToPpDecryptedSampleFormat(audio_frames->Format());
+
+ buffer = ppb_buffer->TakeBuffer();
}
}
@@ -1005,34 +1008,33 @@ void CdmAdapter::SendPlatformChallenge(
const char* service_id, uint32_t service_id_length,
const char* challenge, uint32_t challenge_length) {
#if defined(OS_CHROMEOS)
- PP_DCHECK(!challenge_in_progress_);
-
- // Ensure member variables set by the callback are in a clean state.
- signed_data_output_ = pp::Var();
- signed_data_signature_output_ = pp::Var();
- platform_key_certificate_output_ = pp::Var();
-
pp::VarArrayBuffer challenge_var(challenge_length);
uint8_t* var_data = static_cast<uint8_t*>(challenge_var.Map());
memcpy(var_data, challenge, challenge_length);
std::string service_id_str(service_id, service_id_length);
+
+ linked_ptr<PepperPlatformChallengeResponse> response(
+ new PepperPlatformChallengeResponse());
+
int32_t result = platform_verification_.ChallengePlatform(
- pp::Var(service_id_str), challenge_var, &signed_data_output_,
- &signed_data_signature_output_, &platform_key_certificate_output_,
- callback_factory_.NewCallback(&CdmAdapter::SendPlatformChallengeDone));
+ pp::Var(service_id_str),
+ challenge_var,
+ &response->signed_data,
+ &response->signed_data_signature,
+ &response->platform_key_certificate,
+ callback_factory_.NewCallback(&CdmAdapter::SendPlatformChallengeDone,
+ response));
challenge_var.Unmap();
- if (result == PP_OK_COMPLETIONPENDING) {
- challenge_in_progress_ = true;
+ if (result == PP_OK_COMPLETIONPENDING)
return;
- }
// Fall through on error and issue an empty OnPlatformChallengeResponse().
PP_DCHECK(result != PP_OK);
#endif
- cdm::PlatformChallengeResponse response = {};
- cdm_->OnPlatformChallengeResponse(response);
+ cdm::PlatformChallengeResponse platform_challenge_response = {};
+ cdm_->OnPlatformChallengeResponse(platform_challenge_response);
}
void CdmAdapter::EnableOutputProtection(uint32_t desired_protection_mask) {
@@ -1131,7 +1133,14 @@ void CdmAdapter::ReportOutputProtectionQueryResult() {
return;
}
- if ((output_protection_mask_ & external_links) == external_links) {
+ const uint32_t kProtectableLinks =
+ cdm::kLinkTypeHDMI | cdm::kLinkTypeDVI | cdm::kLinkTypeDisplayPort;
+ bool is_unprotectable_link_connected = external_links & ~kProtectableLinks;
+ bool is_hdcp_enabled_on_all_protectable_links =
+ output_protection_mask_ & cdm::kProtectionHDCP;
+
+ if (!is_unprotectable_link_connected &&
+ is_hdcp_enabled_on_all_protectable_links) {
ReportOutputProtectionUMA(
OUTPUT_PROTECTION_ALL_EXTERNAL_LINKS_PROTECTED);
uma_for_output_protection_positive_result_reported_ = true;
@@ -1143,29 +1152,29 @@ void CdmAdapter::ReportOutputProtectionQueryResult() {
// queries and success results.
}
-void CdmAdapter::SendPlatformChallengeDone(int32_t result) {
- challenge_in_progress_ = false;
-
+void CdmAdapter::SendPlatformChallengeDone(
+ int32_t result,
+ const linked_ptr<PepperPlatformChallengeResponse>& response) {
if (result != PP_OK) {
CDM_DLOG() << __FUNCTION__ << ": Platform challenge failed!";
- cdm::PlatformChallengeResponse response = {};
- cdm_->OnPlatformChallengeResponse(response);
+ cdm::PlatformChallengeResponse platform_challenge_response = {};
+ cdm_->OnPlatformChallengeResponse(platform_challenge_response);
return;
}
- pp::VarArrayBuffer signed_data_var(signed_data_output_);
- pp::VarArrayBuffer signed_data_signature_var(signed_data_signature_output_);
+ pp::VarArrayBuffer signed_data_var(response->signed_data);
+ pp::VarArrayBuffer signed_data_signature_var(response->signed_data_signature);
std::string platform_key_certificate_string =
- platform_key_certificate_output_.AsString();
+ response->platform_key_certificate.AsString();
- cdm::PlatformChallengeResponse response = {
+ cdm::PlatformChallengeResponse platform_challenge_response = {
static_cast<uint8_t*>(signed_data_var.Map()),
signed_data_var.ByteLength(),
static_cast<uint8_t*>(signed_data_signature_var.Map()),
signed_data_signature_var.ByteLength(),
reinterpret_cast<const uint8_t*>(platform_key_certificate_string.data()),
static_cast<uint32_t>(platform_key_certificate_string.length())};
- cdm_->OnPlatformChallengeResponse(response);
+ cdm_->OnPlatformChallengeResponse(platform_challenge_response);
signed_data_var.Unmap();
signed_data_signature_var.Unmap();
diff --git a/media/cdm/ppapi/cdm_adapter.h b/media/cdm/ppapi/cdm_adapter.h
index cd65b181f0..0189c1fc35 100644
--- a/media/cdm/ppapi/cdm_adapter.h
+++ b/media/cdm/ppapi/cdm_adapter.h
@@ -245,21 +245,21 @@ class CdmAdapter : public pp::Instance,
void ReportOutputProtectionQuery();
void ReportOutputProtectionQueryResult();
- void SendPlatformChallengeDone(int32_t result);
+ struct PepperPlatformChallengeResponse {
+ pp::Var signed_data;
+ pp::Var signed_data_signature;
+ pp::Var platform_key_certificate;
+ };
+
+ void SendPlatformChallengeDone(
+ int32_t result,
+ const linked_ptr<PepperPlatformChallengeResponse>& response);
void EnableProtectionDone(int32_t result);
void QueryOutputProtectionStatusDone(int32_t result);
pp::OutputProtection_Private output_protection_;
pp::PlatformVerification platform_verification_;
- // Since PPAPI doesn't provide handlers for CompletionCallbacks with more than
- // one output we need to manage our own. These values are only read by
- // SendPlatformChallengeDone().
- pp::Var signed_data_output_;
- pp::Var signed_data_signature_output_;
- pp::Var platform_key_certificate_output_;
- bool challenge_in_progress_;
-
// Same as above, these are only read by QueryOutputProtectionStatusDone().
uint32_t output_link_mask_;
uint32_t output_protection_mask_;
diff --git a/media/cdm/ppapi/cdm_helpers.cc b/media/cdm/ppapi/cdm_helpers.cc
index 36b95021f8..62f93a009d 100644
--- a/media/cdm/ppapi/cdm_helpers.cc
+++ b/media/cdm/ppapi/cdm_helpers.cc
@@ -4,6 +4,7 @@
#include "media/cdm/ppapi/cdm_helpers.h"
+#include <algorithm>
#include <utility>
#include "base/basictypes.h"
@@ -20,6 +21,61 @@
namespace media {
+// static
+PpbBuffer* PpbBuffer::Create(const pp::Buffer_Dev& buffer,
+ uint32_t buffer_id,
+ PpbBufferAllocator* allocator) {
+ PP_DCHECK(buffer.data());
+ PP_DCHECK(buffer.size());
+ PP_DCHECK(buffer_id);
+ PP_DCHECK(allocator);
+ return new PpbBuffer(buffer, buffer_id, allocator);
+}
+
+void PpbBuffer::Destroy() {
+ delete this;
+}
+
+uint32_t PpbBuffer::Capacity() const {
+ return buffer_.size();
+}
+
+uint8_t* PpbBuffer::Data() {
+ return static_cast<uint8_t*>(buffer_.data());
+}
+
+void PpbBuffer::SetSize(uint32_t size) {
+ PP_DCHECK(size <= Capacity());
+ if (size > Capacity()) {
+ size_ = 0;
+ return;
+ }
+
+ size_ = size;
+}
+
+pp::Buffer_Dev PpbBuffer::TakeBuffer() {
+ PP_DCHECK(!buffer_.is_null());
+ pp::Buffer_Dev buffer;
+ std::swap(buffer, buffer_);
+ buffer_id_ = 0;
+ size_ = 0;
+ return buffer;
+}
+
+PpbBuffer::PpbBuffer(pp::Buffer_Dev buffer,
+ uint32_t buffer_id,
+ PpbBufferAllocator* allocator)
+ : buffer_(buffer), buffer_id_(buffer_id), size_(0), allocator_(allocator) {
+}
+
+PpbBuffer::~PpbBuffer() {
+ PP_DCHECK(!buffer_id_ == buffer_.is_null());
+ // If still owning the |buffer_|, release it in the |allocator_|.
+ if (buffer_id_)
+ allocator_->Release(buffer_id_);
+}
+
cdm::Buffer* PpbBufferAllocator::Allocate(uint32_t capacity) {
PP_DCHECK(pp::Module::Get()->core()->IsMainThread());
@@ -46,7 +102,7 @@ cdm::Buffer* PpbBufferAllocator::Allocate(uint32_t capacity) {
allocated_buffers_.insert(std::make_pair(buffer_id, buffer));
- return PpbBuffer::Create(buffer, buffer_id);
+ return PpbBuffer::Create(buffer, buffer_id, this);
}
void PpbBufferAllocator::Release(uint32_t buffer_id) {
diff --git a/media/cdm/ppapi/cdm_helpers.h b/media/cdm/ppapi/cdm_helpers.h
index e033dd79bf..1ee579b8f0 100644
--- a/media/cdm/ppapi/cdm_helpers.h
+++ b/media/cdm/ppapi/cdm_helpers.h
@@ -20,6 +20,8 @@
namespace media {
+class PpbBufferAllocator;
+
// cdm::Buffer implementation that provides access to memory owned by a
// pp::Buffer_Dev.
// This class holds a reference to the Buffer_Dev throughout its lifetime.
@@ -27,48 +29,37 @@ namespace media {
// pp::Buffer_Dev and PPB_Buffer_Dev.
class PpbBuffer : public cdm::Buffer {
public:
- static PpbBuffer* Create(const pp::Buffer_Dev& buffer, uint32_t buffer_id) {
- PP_DCHECK(buffer.data());
- PP_DCHECK(buffer.size());
- PP_DCHECK(buffer_id);
- return new PpbBuffer(buffer, buffer_id);
- }
+ static PpbBuffer* Create(const pp::Buffer_Dev& buffer, uint32_t buffer_id,
+ PpbBufferAllocator* allocator);
// cdm::Buffer implementation.
- virtual void Destroy() OVERRIDE { delete this; }
-
- virtual uint32_t Capacity() const OVERRIDE { return buffer_.size(); }
-
- virtual uint8_t* Data() OVERRIDE {
- return static_cast<uint8_t*>(buffer_.data());
- }
-
- virtual void SetSize(uint32_t size) OVERRIDE {
- PP_DCHECK(size <= Capacity());
- if (size > Capacity()) {
- size_ = 0;
- return;
- }
-
- size_ = size;
- }
-
+ virtual void Destroy() OVERRIDE;
+ virtual uint32_t Capacity() const OVERRIDE;
+ virtual uint8_t* Data() OVERRIDE;
+ virtual void SetSize(uint32_t size) OVERRIDE;
virtual uint32_t Size() const OVERRIDE { return size_; }
- pp::Buffer_Dev buffer_dev() const { return buffer_; }
+ // Takes the |buffer_| from this class and returns it.
+ // Note: The caller must ensure |allocator->Release()| is called later so that
+ // the buffer can be reused by the allocator.
+ // Since pp::Buffer_Dev is ref-counted, the caller now holds one reference to
+ // the buffer and this class holds no reference. Note that other references
+ // may still exist. For example, PpbBufferAllocator always holds a reference
+ // to all allocated buffers.
+ pp::Buffer_Dev TakeBuffer();
uint32_t buffer_id() const { return buffer_id_; }
private:
- PpbBuffer(pp::Buffer_Dev buffer, uint32_t buffer_id)
- : buffer_(buffer),
- buffer_id_(buffer_id),
- size_(0) {}
- virtual ~PpbBuffer() {}
+ PpbBuffer(pp::Buffer_Dev buffer,
+ uint32_t buffer_id,
+ PpbBufferAllocator* allocator);
+ virtual ~PpbBuffer();
pp::Buffer_Dev buffer_;
uint32_t buffer_id_;
uint32_t size_;
+ PpbBufferAllocator* allocator_;
DISALLOW_COPY_AND_ASSIGN(PpbBuffer);
};
diff --git a/media/ffmpeg/ffmpeg_common.cc b/media/ffmpeg/ffmpeg_common.cc
index d87aa82083..5f52fe95ba 100644
--- a/media/ffmpeg/ffmpeg_common.cc
+++ b/media/ffmpeg/ffmpeg_common.cc
@@ -285,11 +285,19 @@ void AVCodecContextToAudioDecoderConfig(
ChannelLayout channel_layout = ChannelLayoutToChromeChannelLayout(
codec_context->channel_layout, codec_context->channels);
+ int sample_rate = codec_context->sample_rate;
if (codec == kCodecOpus) {
// |codec_context->sample_fmt| is not set by FFmpeg because Opus decoding is
// not enabled in FFmpeg. It doesn't matter what value is set here, so long
// as it's valid, the true sample format is selected inside the decoder.
sample_format = kSampleFormatF32;
+
+ // Always use 48kHz for OPUS. Technically we should match to the highest
+ // supported hardware sample rate among [8, 12, 16, 24, 48] kHz, but we
+ // don't know the hardware sample rate at this point and those rates are
+ // rarely used for output. See the "Input Sample Rate" section of the spec:
+ // http://tools.ietf.org/html/draft-terriberry-oggopus-01#page-11
+ sample_rate = 48000;
}
base::TimeDelta seek_preroll;
@@ -301,7 +309,7 @@ void AVCodecContextToAudioDecoderConfig(
config->Initialize(codec,
sample_format,
channel_layout,
- codec_context->sample_rate,
+ sample_rate,
codec_context->extradata,
codec_context->extradata_size,
is_encrypted,
@@ -396,7 +404,7 @@ void AVStreamToVideoDecoderConfig(
if (codec == kCodecVP9) {
// TODO(tomfinegan): libavcodec doesn't know about VP9.
format = VideoFrame::YV12;
- coded_size = natural_size;
+ coded_size = visible_rect.size();
}
// Pad out |coded_size| for subsampled YUV formats.
diff --git a/media/ffmpeg/ffmpeg_common.h b/media/ffmpeg/ffmpeg_common.h
index ef1a7b6a76..d4e85c80e6 100644
--- a/media/ffmpeg/ffmpeg_common.h
+++ b/media/ffmpeg/ffmpeg_common.h
@@ -33,11 +33,11 @@ MSVC_PUSH_DISABLE_WARNING(4244);
#include <libavcodec/avcodec.h>
#include <libavformat/avformat.h>
#include <libavformat/avio.h>
-#include <libavutil/audioconvert.h>
#include <libavutil/avutil.h>
-#include <libavutil/mathematics.h>
-#include <libavutil/log.h>
#include <libavutil/imgutils.h>
+#include <libavutil/log.h>
+#include <libavutil/mathematics.h>
+#include <libavutil/opt.h>
MSVC_POP_WARNING();
} // extern "C"
diff --git a/media/ffmpeg/ffmpeg_common_unittest.cc b/media/ffmpeg/ffmpeg_common_unittest.cc
index 31397df7fa..6b168eee45 100644
--- a/media/ffmpeg/ffmpeg_common_unittest.cc
+++ b/media/ffmpeg/ffmpeg_common_unittest.cc
@@ -2,64 +2,37 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "base/files/file_path.h"
#include "base/logging.h"
-#include "base/path_service.h"
-#include "media/base/media.h"
#include "media/ffmpeg/ffmpeg_common.h"
+#include "media/filters/ffmpeg_glue.h"
#include "testing/gtest/include/gtest/gtest.h"
-using base::TimeDelta;
-
namespace media {
-static AVIndexEntry kIndexEntries[] = {
- // pos, timestamp, flags, size, min_distance
- { 0, 0, AVINDEX_KEYFRAME, 0, 0 },
- { 2000, 1000, AVINDEX_KEYFRAME, 0, 0 },
- { 3000, 2000, 0, 0, 0 },
- { 5000, 3000, AVINDEX_KEYFRAME, 0, 0 },
- { 6000, 4000, 0, 0, 0 },
- { 8000, 5000, AVINDEX_KEYFRAME, 0, 0 },
- { 9000, 6000, AVINDEX_KEYFRAME, 0, 0 },
- { 11500, 7000, AVINDEX_KEYFRAME, 0, 0 },
-};
-
-static const AVRational kTimeBase = { 1, 1000 };
-
class FFmpegCommonTest : public testing::Test {
public:
- FFmpegCommonTest();
- virtual ~FFmpegCommonTest();
-
- protected:
- AVStream stream_;
-
- DISALLOW_COPY_AND_ASSIGN(FFmpegCommonTest);
+ FFmpegCommonTest() { FFmpegGlue::InitializeFFmpeg(); }
+ virtual ~FFmpegCommonTest() {};
};
-static bool InitFFmpeg() {
- static bool initialized = false;
- if (initialized) {
- return true;
- }
- base::FilePath path;
- PathService::Get(base::DIR_MODULE, &path);
- return media::InitializeMediaLibrary(path);
-}
+TEST_F(FFmpegCommonTest, OpusAudioDecoderConfig) {
+ AVCodecContext context = {0};
+ context.codec_type = AVMEDIA_TYPE_AUDIO;
+ context.codec_id = AV_CODEC_ID_OPUS;
+ context.channel_layout = CHANNEL_LAYOUT_STEREO;
+ context.channels = 2;
+ context.sample_fmt = AV_SAMPLE_FMT_FLT;
-FFmpegCommonTest::FFmpegCommonTest() {
- CHECK(InitFFmpeg());
- stream_.time_base = kTimeBase;
- stream_.index_entries = kIndexEntries;
- stream_.index_entries_allocated_size = sizeof(kIndexEntries);
- stream_.nb_index_entries = arraysize(kIndexEntries);
-}
+ // During conversion this sample rate should be changed to 48kHz.
+ context.sample_rate = 44100;
-FFmpegCommonTest::~FFmpegCommonTest() {}
+ AudioDecoderConfig decoder_config;
+ AVCodecContextToAudioDecoderConfig(&context, false, &decoder_config, false);
+ EXPECT_EQ(48000, decoder_config.samples_per_second());
+}
TEST_F(FFmpegCommonTest, TimeBaseConversions) {
- int64 test_data[][5] = {
+ const int64 test_data[][5] = {
{1, 2, 1, 500000, 1 },
{1, 3, 1, 333333, 1 },
{1, 3, 2, 666667, 2 },
@@ -72,7 +45,8 @@ TEST_F(FFmpegCommonTest, TimeBaseConversions) {
time_base.num = static_cast<int>(test_data[i][0]);
time_base.den = static_cast<int>(test_data[i][1]);
- TimeDelta time_delta = ConvertFromTimeBase(time_base, test_data[i][2]);
+ base::TimeDelta time_delta =
+ ConvertFromTimeBase(time_base, test_data[i][2]);
EXPECT_EQ(time_delta.InMicroseconds(), test_data[i][3]);
EXPECT_EQ(ConvertToTimeBase(time_base, time_delta), test_data[i][4]);
@@ -150,5 +124,4 @@ TEST_F(FFmpegCommonTest, UTCDateToTime_Invalid) {
}
}
-
} // namespace media
diff --git a/media/filters/audio_clock.cc b/media/filters/audio_clock.cc
index 0454e85e8f..e315fa31e2 100644
--- a/media/filters/audio_clock.cc
+++ b/media/filters/audio_clock.cc
@@ -45,20 +45,32 @@ void AudioClock::WroteSilence(int frames, int delay_frames) {
PushBufferedAudio(frames, 0, kNoTimestamp());
}
-base::TimeDelta AudioClock::CurrentMediaTimestamp() const {
+base::TimeDelta AudioClock::CurrentMediaTimestamp(
+ base::TimeDelta time_since_writing) const {
+ int frames_to_skip =
+ static_cast<int>(time_since_writing.InSecondsF() * sample_rate_);
int silence_frames = 0;
for (size_t i = 0; i < buffered_audio_.size(); ++i) {
+ int frames = buffered_audio_[i].frames;
+ if (frames_to_skip > 0) {
+ if (frames <= frames_to_skip) {
+ frames_to_skip -= frames;
+ continue;
+ }
+ frames -= frames_to_skip;
+ frames_to_skip = 0;
+ }
+
// Account for silence ahead of the buffer closest to being played.
if (buffered_audio_[i].playback_rate == 0) {
- silence_frames += buffered_audio_[i].frames;
+ silence_frames += frames;
continue;
}
// Multiply by playback rate as frames represent time-scaled audio.
return buffered_audio_[i].endpoint_timestamp -
base::TimeDelta::FromMicroseconds(
- ((buffered_audio_[i].frames * buffered_audio_[i].playback_rate) +
- silence_frames) /
+ ((frames * buffered_audio_[i].playback_rate) + silence_frames) /
sample_rate_ * base::Time::kMicrosecondsPerSecond);
}
diff --git a/media/filters/audio_clock.h b/media/filters/audio_clock.h
index a0d8212f94..625da7d183 100644
--- a/media/filters/audio_clock.h
+++ b/media/filters/audio_clock.h
@@ -35,7 +35,11 @@ class MEDIA_EXPORT AudioClock {
// Calculates the current media timestamp taking silence and changes in
// playback rate into account.
- base::TimeDelta CurrentMediaTimestamp() const;
+ //
+ // Clients can provide |time_since_writing| to simulate the passage of time
+ // since last writing audio to get a more accurate current media timestamp.
+ base::TimeDelta CurrentMediaTimestamp(
+ base::TimeDelta time_since_writing) const;
// Returns the last endpoint timestamp provided to WroteAudio().
base::TimeDelta last_endpoint_timestamp() const {
diff --git a/media/filters/audio_clock_unittest.cc b/media/filters/audio_clock_unittest.cc
index a924a24e62..00179f9094 100644
--- a/media/filters/audio_clock_unittest.cc
+++ b/media/filters/audio_clock_unittest.cc
@@ -31,7 +31,12 @@ class AudioClockTest : public testing::Test {
}
int CurrentMediaTimestampInMilliseconds() {
- return clock_.CurrentMediaTimestamp().InMilliseconds();
+ return CurrentMediaTimestampSinceLastWritingInMilliseconds(0);
+ }
+
+ int CurrentMediaTimestampSinceLastWritingInMilliseconds(int milliseconds) {
+ return clock_.CurrentMediaTimestamp(base::TimeDelta::FromMilliseconds(
+ milliseconds)).InMilliseconds();
}
int LastEndpointTimestampInMilliseconds() {
@@ -47,7 +52,7 @@ class AudioClockTest : public testing::Test {
};
TEST_F(AudioClockTest, TimestampsStartAtNoTimestamp) {
- EXPECT_EQ(kNoTimestamp(), clock_.CurrentMediaTimestamp());
+ EXPECT_EQ(kNoTimestamp(), clock_.CurrentMediaTimestamp(base::TimeDelta()));
EXPECT_EQ(kNoTimestamp(), clock_.last_endpoint_timestamp());
}
@@ -174,4 +179,45 @@ TEST_F(AudioClockTest, ZeroDelay) {
EXPECT_EQ(3000, LastEndpointTimestampInMilliseconds());
}
+TEST_F(AudioClockTest, CurrentMediaTimestampSinceLastWriting) {
+ // Construct an audio clock with the following representation:
+ //
+ // +-------------------+----------------+------------------+----------------+
+ // | 10 frames silence | 10 frames @ 1x | 10 frames @ 0.5x | 10 frames @ 2x |
+ // +-------------------+----------------+------------------+----------------+
+ // Media timestamp: 0 1000 1500 3500
+ // Wall clock time: 2000 3000 4000 5000
+ WroteAudio(10, 40, 1.0);
+ WroteAudio(10, 40, 0.5);
+ WroteAudio(10, 40, 2.0);
+ EXPECT_EQ(-2000, CurrentMediaTimestampInMilliseconds());
+ EXPECT_EQ(3500, LastEndpointTimestampInMilliseconds());
+
+ // Simulate passing 2000ms of initial delay in the audio hardware.
+ EXPECT_EQ(-2000, CurrentMediaTimestampSinceLastWritingInMilliseconds(0));
+ EXPECT_EQ(-1500, CurrentMediaTimestampSinceLastWritingInMilliseconds(500));
+ EXPECT_EQ(-1000, CurrentMediaTimestampSinceLastWritingInMilliseconds(1000));
+ EXPECT_EQ(-500, CurrentMediaTimestampSinceLastWritingInMilliseconds(1500));
+ EXPECT_EQ(0, CurrentMediaTimestampSinceLastWritingInMilliseconds(2000));
+
+ // New we should see the 1.0x buffer.
+ EXPECT_EQ(500, CurrentMediaTimestampSinceLastWritingInMilliseconds(2500));
+ EXPECT_EQ(1000, CurrentMediaTimestampSinceLastWritingInMilliseconds(3000));
+
+ // Now we should see the 0.5x buffer.
+ EXPECT_EQ(1250, CurrentMediaTimestampSinceLastWritingInMilliseconds(3500));
+ EXPECT_EQ(1500, CurrentMediaTimestampSinceLastWritingInMilliseconds(4000));
+
+ // Now we should see the 2.0x buffer.
+ EXPECT_EQ(2500, CurrentMediaTimestampSinceLastWritingInMilliseconds(4500));
+ EXPECT_EQ(3500, CurrentMediaTimestampSinceLastWritingInMilliseconds(5000));
+
+ // Times beyond the known length of the audio clock should return the last
+ // value we know of.
+ EXPECT_EQ(LastEndpointTimestampInMilliseconds(),
+ CurrentMediaTimestampSinceLastWritingInMilliseconds(5001));
+ EXPECT_EQ(LastEndpointTimestampInMilliseconds(),
+ CurrentMediaTimestampSinceLastWritingInMilliseconds(6000));
+}
+
} // namespace media
diff --git a/media/filters/audio_decoder_selector_unittest.cc b/media/filters/audio_decoder_selector_unittest.cc
index 56005822b1..eb0e8dbd3c 100644
--- a/media/filters/audio_decoder_selector_unittest.cc
+++ b/media/filters/audio_decoder_selector_unittest.cc
@@ -26,7 +26,7 @@ class AudioDecoderSelectorTest : public ::testing::Test {
public:
enum DecryptorCapability {
kNoDecryptor,
- // Used to test Abort() during DecryptingAudioDecoder::Initialize() and
+ // Used to test destruction during DecryptingAudioDecoder::Initialize() and
// DecryptingDemuxerStream::Initialize(). We don't need this for normal
// AudioDecoders since we use MockAudioDecoder.
kHoldSetDecryptor,
@@ -45,9 +45,6 @@ class AudioDecoderSelectorTest : public ::testing::Test {
}
~AudioDecoderSelectorTest() {
- if (selected_decoder_)
- selected_decoder_->Stop();
-
message_loop_.RunUntilIdle();
}
@@ -123,11 +120,11 @@ class AudioDecoderSelectorTest : public ::testing::Test {
message_loop_.RunUntilIdle();
}
- void SelectDecoderAndAbort() {
+ void SelectDecoderAndDestroy() {
SelectDecoder();
EXPECT_CALL(*this, OnDecoderSelected(IsNull(), IsNull()));
- decoder_selector_->Abort();
+ decoder_selector_.reset();
message_loop_.RunUntilIdle();
}
@@ -135,27 +132,27 @@ class AudioDecoderSelectorTest : public ::testing::Test {
NOTREACHED();
}
- // Fixture members.
- scoped_ptr<AudioDecoderSelector> decoder_selector_;
+ // Declare |decoder_selector_| after |demuxer_stream_| and |decryptor_| since
+ // |demuxer_stream_| and |decryptor_| should outlive |decoder_selector_|.
scoped_ptr<StrictMock<MockDemuxerStream> > demuxer_stream_;
+
// Use NiceMock since we don't care about most of calls on the decryptor, e.g.
// RegisterNewKeyCB().
scoped_ptr<NiceMock<MockDecryptor> > decryptor_;
+
+ scoped_ptr<AudioDecoderSelector> decoder_selector_;
+
StrictMock<MockAudioDecoder>* decoder_1_;
StrictMock<MockAudioDecoder>* decoder_2_;
ScopedVector<AudioDecoder> all_decoders_;
scoped_ptr<AudioDecoder> selected_decoder_;
+
base::MessageLoop message_loop_;
private:
DISALLOW_COPY_AND_ASSIGN(AudioDecoderSelectorTest);
};
-// Note:
-// In all the tests, Stop() is expected to be called on a decoder if a decoder:
-// - is pending initialization and DecoderSelector::Abort() is called, or
-// - has been successfully initialized.
-
// The stream is not encrypted but we have no clear decoder. No decoder can be
// selected.
TEST_F(AudioDecoderSelectorTest, ClearStream_NoDecryptor_NoClearDecoder) {
@@ -176,20 +173,18 @@ TEST_F(AudioDecoderSelectorTest, ClearStream_NoDecryptor_OneClearDecoder) {
EXPECT_CALL(*decoder_1_, Initialize(_, _, _))
.WillOnce(RunCallback<1>(PIPELINE_OK));
EXPECT_CALL(*this, OnDecoderSelected(decoder_1_, IsNull()));
- EXPECT_CALL(*decoder_1_, Stop());
SelectDecoder();
}
TEST_F(AudioDecoderSelectorTest,
- Abort_ClearStream_NoDecryptor_OneClearDecoder) {
+ Destroy_ClearStream_NoDecryptor_OneClearDecoder) {
UseClearStream();
InitializeDecoderSelector(kNoDecryptor, 1);
EXPECT_CALL(*decoder_1_, Initialize(_, _, _));
- EXPECT_CALL(*decoder_1_, Stop());
- SelectDecoderAndAbort();
+ SelectDecoderAndDestroy();
}
// The stream is not encrypted and we have multiple clear decoders. The first
@@ -203,22 +198,20 @@ TEST_F(AudioDecoderSelectorTest, ClearStream_NoDecryptor_MultipleClearDecoder) {
EXPECT_CALL(*decoder_2_, Initialize(_, _, _))
.WillOnce(RunCallback<1>(PIPELINE_OK));
EXPECT_CALL(*this, OnDecoderSelected(decoder_2_, IsNull()));
- EXPECT_CALL(*decoder_2_, Stop());
SelectDecoder();
}
TEST_F(AudioDecoderSelectorTest,
- Abort_ClearStream_NoDecryptor_MultipleClearDecoder) {
+ Destroy_ClearStream_NoDecryptor_MultipleClearDecoder) {
UseClearStream();
InitializeDecoderSelector(kNoDecryptor, 2);
EXPECT_CALL(*decoder_1_, Initialize(_, _, _))
.WillOnce(RunCallback<1>(DECODER_ERROR_NOT_SUPPORTED));
EXPECT_CALL(*decoder_2_, Initialize(_, _, _));
- EXPECT_CALL(*decoder_2_, Stop());
- SelectDecoderAndAbort();
+ SelectDecoderAndDestroy();
}
// There is a decryptor but the stream is not encrypted. The decoder will be
@@ -230,19 +223,17 @@ TEST_F(AudioDecoderSelectorTest, ClearStream_HasDecryptor) {
EXPECT_CALL(*decoder_1_, Initialize(_, _, _))
.WillOnce(RunCallback<1>(PIPELINE_OK));
EXPECT_CALL(*this, OnDecoderSelected(decoder_1_, IsNull()));
- EXPECT_CALL(*decoder_1_, Stop());
SelectDecoder();
}
-TEST_F(AudioDecoderSelectorTest, Abort_ClearStream_HasDecryptor) {
+TEST_F(AudioDecoderSelectorTest, Destroy_ClearStream_HasDecryptor) {
UseClearStream();
InitializeDecoderSelector(kDecryptOnly, 1);
EXPECT_CALL(*decoder_1_, Initialize(_, _, _));
- EXPECT_CALL(*decoder_1_, Stop());
- SelectDecoderAndAbort();
+ SelectDecoderAndDestroy();
}
// The stream is encrypted and there's no decryptor. No decoder can be selected.
@@ -267,11 +258,11 @@ TEST_F(AudioDecoderSelectorTest, EncryptedStream_DecryptOnly_NoClearDecoder) {
}
TEST_F(AudioDecoderSelectorTest,
- Abort_EncryptedStream_DecryptOnly_NoClearDecoder) {
+ Destroy_EncryptedStream_DecryptOnly_NoClearDecoder) {
UseEncryptedStream();
InitializeDecoderSelector(kHoldSetDecryptor, 0);
- SelectDecoderAndAbort();
+ SelectDecoderAndDestroy();
}
// Decryptor can do decryption-only and there's a decoder available. The decoder
@@ -283,20 +274,18 @@ TEST_F(AudioDecoderSelectorTest, EncryptedStream_DecryptOnly_OneClearDecoder) {
EXPECT_CALL(*decoder_1_, Initialize(_, _, _))
.WillOnce(RunCallback<1>(PIPELINE_OK));
EXPECT_CALL(*this, OnDecoderSelected(decoder_1_, NotNull()));
- EXPECT_CALL(*decoder_1_, Stop());
SelectDecoder();
}
TEST_F(AudioDecoderSelectorTest,
- Abort_EncryptedStream_DecryptOnly_OneClearDecoder) {
+ Destroy_EncryptedStream_DecryptOnly_OneClearDecoder) {
UseEncryptedStream();
InitializeDecoderSelector(kDecryptOnly, 1);
EXPECT_CALL(*decoder_1_, Initialize(_, _, _));
- EXPECT_CALL(*decoder_1_, Stop());
- SelectDecoderAndAbort();
+ SelectDecoderAndDestroy();
}
// Decryptor can only do decryption and there are multiple decoders available.
@@ -312,22 +301,20 @@ TEST_F(AudioDecoderSelectorTest,
EXPECT_CALL(*decoder_2_, Initialize(_, _, _))
.WillOnce(RunCallback<1>(PIPELINE_OK));
EXPECT_CALL(*this, OnDecoderSelected(decoder_2_, NotNull()));
- EXPECT_CALL(*decoder_2_, Stop());
SelectDecoder();
}
TEST_F(AudioDecoderSelectorTest,
- Abort_EncryptedStream_DecryptOnly_MultipleClearDecoder) {
+ Destroy_EncryptedStream_DecryptOnly_MultipleClearDecoder) {
UseEncryptedStream();
InitializeDecoderSelector(kDecryptOnly, 2);
EXPECT_CALL(*decoder_1_, Initialize(_, _, _))
.WillOnce(RunCallback<1>(DECODER_ERROR_NOT_SUPPORTED));
EXPECT_CALL(*decoder_2_, Initialize(_, _, _));
- EXPECT_CALL(*decoder_2_, Stop());
- SelectDecoderAndAbort();
+ SelectDecoderAndDestroy();
}
// Decryptor can do decryption and decoding. A DecryptingAudioDecoder will be
@@ -342,11 +329,11 @@ TEST_F(AudioDecoderSelectorTest, EncryptedStream_DecryptAndDecode) {
SelectDecoder();
}
-TEST_F(AudioDecoderSelectorTest, Abort_EncryptedStream_DecryptAndDecode) {
+TEST_F(AudioDecoderSelectorTest, Destroy_EncryptedStream_DecryptAndDecode) {
UseEncryptedStream();
InitializeDecoderSelector(kHoldSetDecryptor, 1);
- SelectDecoderAndAbort();
+ SelectDecoderAndDestroy();
}
} // namespace media
diff --git a/media/filters/audio_decoder_unittest.cc b/media/filters/audio_decoder_unittest.cc
index a3aeb2565a..23e21ef7d8 100644
--- a/media/filters/audio_decoder_unittest.cc
+++ b/media/filters/audio_decoder_unittest.cc
@@ -110,9 +110,6 @@ class AudioDecoderTest : public testing::TestWithParam<DecoderTestData> {
}
virtual ~AudioDecoderTest() {
- // Always issue a Stop() even if it's already been sent to avoid assertion
- // failures causing crashes.
- Stop();
EXPECT_FALSE(pending_decode_);
EXPECT_FALSE(pending_reset_);
}
@@ -209,10 +206,6 @@ class AudioDecoderTest : public testing::TestWithParam<DecoderTestData> {
ASSERT_FALSE(pending_reset_);
}
- void Stop() {
- decoder_->Stop();
- }
-
void Seek(base::TimeDelta seek_time) {
Reset();
decoded_audio_.clear();
@@ -319,10 +312,9 @@ class FFmpegAudioDecoderBehavioralTest : public AudioDecoderTest {};
TEST_P(AudioDecoderTest, Initialize) {
ASSERT_NO_FATAL_FAILURE(Initialize());
- Stop();
}
-// Verifies decode audio as well as the Decode() -> Reset() -> Stop() sequence.
+// Verifies decode audio as well as the Decode() -> Reset() sequence.
TEST_P(AudioDecoderTest, ProduceAudioSamples) {
ASSERT_NO_FATAL_FAILURE(Initialize());
@@ -357,21 +349,17 @@ TEST_P(AudioDecoderTest, ProduceAudioSamples) {
// Seek back to the beginning. Calls Reset() on the decoder.
Seek(start_timestamp());
}
-
- Stop();
}
-TEST_P(AudioDecoderTest, DecodeStop) {
+TEST_P(AudioDecoderTest, Decode) {
ASSERT_NO_FATAL_FAILURE(Initialize());
Decode();
EXPECT_EQ(AudioDecoder::kOk, last_decode_status());
- Stop();
}
-TEST_P(AudioDecoderTest, ResetStop) {
+TEST_P(AudioDecoderTest, Reset) {
ASSERT_NO_FATAL_FAILURE(Initialize());
Reset();
- Stop();
}
TEST_P(AudioDecoderTest, NoTimestamp) {
@@ -380,7 +368,6 @@ TEST_P(AudioDecoderTest, NoTimestamp) {
buffer->set_timestamp(kNoTimestamp());
DecodeBuffer(buffer);
EXPECT_EQ(AudioDecoder::kDecodeError, last_decode_status());
- Stop();
}
TEST_P(OpusAudioDecoderBehavioralTest, InitializeWithNoCodecDelay) {
@@ -397,7 +384,6 @@ TEST_P(OpusAudioDecoderBehavioralTest, InitializeWithNoCodecDelay) {
base::TimeDelta::FromMilliseconds(80),
0);
InitializeDecoder(decoder_config);
- Stop();
}
TEST_P(OpusAudioDecoderBehavioralTest, InitializeWithBadCodecDelay) {
@@ -416,7 +402,6 @@ TEST_P(OpusAudioDecoderBehavioralTest, InitializeWithBadCodecDelay) {
// Use a different codec delay than in the extradata.
100);
InitializeDecoderWithStatus(decoder_config, DECODER_ERROR_NOT_SUPPORTED);
- Stop();
}
TEST_P(FFmpegAudioDecoderBehavioralTest, InitializeWithBadConfig) {
@@ -429,7 +414,6 @@ TEST_P(FFmpegAudioDecoderBehavioralTest, InitializeWithBadConfig) {
0,
false);
InitializeDecoderWithStatus(decoder_config, DECODER_ERROR_NOT_SUPPORTED);
- Stop();
}
const DecodedBufferExpectations kSfxOpusExpectations[] = {
diff --git a/media/filters/audio_file_reader.cc b/media/filters/audio_file_reader.cc
index cb8188e22b..3b14355cd4 100644
--- a/media/filters/audio_file_reader.cc
+++ b/media/filters/audio_file_reader.cc
@@ -20,6 +20,7 @@ AudioFileReader::AudioFileReader(FFmpegURLProtocol* protocol)
protocol_(protocol),
channels_(0),
sample_rate_(0),
+ end_padding_(0),
av_sample_format_(0) {
}
@@ -59,9 +60,20 @@ bool AudioFileReader::OpenDemuxer() {
return false;
const int result = avformat_find_stream_info(format_context, NULL);
- DLOG_IF(WARNING, result < 0)
- << "AudioFileReader::Open() : error in avformat_find_stream_info()";
- return result >= 0;
+ if (result < 0) {
+ DLOG(WARNING)
+ << "AudioFileReader::Open() : error in avformat_find_stream_info()";
+ return false;
+ }
+
+ // Attempt to extract end padding for mp3 files.
+ if (strcmp(format_context->iformat->name, "mp3") == 0 &&
+ (av_opt_get_int(format_context->priv_data, "end_pad", 0, &end_padding_) <
+ 0 ||
+ end_padding_ < 0)) {
+ end_padding_ = 0;
+ }
+ return true;
}
bool AudioFileReader::OpenDecoder() {
@@ -221,6 +233,12 @@ int AudioFileReader::Read(AudioBus* audio_bus) {
av_free_packet(&packet);
}
+ // If decoding completed successfully try to strip end padding.
+ if (continue_decoding && end_padding_ <= current_frame) {
+ DCHECK_GE(end_padding_, 0);
+ current_frame -= end_padding_;
+ }
+
// Zero any remaining frames.
audio_bus->ZeroFramesPartial(
current_frame, audio_bus->frames() - current_frame);
diff --git a/media/filters/audio_file_reader.h b/media/filters/audio_file_reader.h
index c700b3288f..963baa7346 100644
--- a/media/filters/audio_file_reader.h
+++ b/media/filters/audio_file_reader.h
@@ -87,6 +87,7 @@ class MEDIA_EXPORT AudioFileReader {
FFmpegURLProtocol* protocol_;
int channels_;
int sample_rate_;
+ int64_t end_padding_;
// AVSampleFormat initially requested; not Chrome's SampleFormat.
int av_sample_format_;
diff --git a/media/filters/audio_file_reader_unittest.cc b/media/filters/audio_file_reader_unittest.cc
index 4ce2ab6ff4..ba264d684f 100644
--- a/media/filters/audio_file_reader_unittest.cc
+++ b/media/filters/audio_file_reader_unittest.cc
@@ -195,12 +195,12 @@ TEST_F(AudioFileReaderTest, WaveF32LE) {
#if defined(USE_PROPRIETARY_CODECS)
TEST_F(AudioFileReaderTest, MP3) {
RunTest("sfx.mp3",
- "3.05,2.87,3.00,3.32,3.58,4.08,",
+ "5.59,7.11,6.63,6.23,5.58,5.22,",
1,
44100,
base::TimeDelta::FromMicroseconds(313470),
13825,
- 12719);
+ 10496);
}
TEST_F(AudioFileReaderTest, CorruptMP3) {
diff --git a/media/filters/audio_renderer_impl.cc b/media/filters/audio_renderer_impl.cc
index d07826a243..bbe0d786b9 100644
--- a/media/filters/audio_renderer_impl.cc
+++ b/media/filters/audio_renderer_impl.cc
@@ -48,22 +48,21 @@ AudioRendererImpl::AudioRendererImpl(
AudioHardwareConfig* hardware_config)
: task_runner_(task_runner),
sink_(sink),
- audio_buffer_stream_(task_runner,
- decoders.Pass(),
- set_decryptor_ready_cb),
+ audio_buffer_stream_(new AudioBufferStream(task_runner,
+ decoders.Pass(),
+ set_decryptor_ready_cb)),
hardware_config_(hardware_config),
- now_cb_(base::Bind(&base::TimeTicks::Now)),
state_(kUninitialized),
+ buffering_state_(BUFFERING_HAVE_NOTHING),
rendering_(false),
sink_playing_(false),
pending_read_(false),
received_end_of_stream_(false),
rendered_end_of_stream_(false),
- preroll_aborted_(false),
weak_factory_(this) {
- audio_buffer_stream_.set_splice_observer(base::Bind(
+ audio_buffer_stream_->set_splice_observer(base::Bind(
&AudioRendererImpl::OnNewSpliceBuffer, weak_factory_.GetWeakPtr()));
- audio_buffer_stream_.set_config_change_observer(base::Bind(
+ audio_buffer_stream_->set_config_change_observer(base::Bind(
&AudioRendererImpl::OnConfigChange, weak_factory_.GetWeakPtr()));
}
@@ -92,13 +91,11 @@ void AudioRendererImpl::StartRendering() {
void AudioRendererImpl::StartRendering_Locked() {
DVLOG(1) << __FUNCTION__;
DCHECK(task_runner_->BelongsToCurrentThread());
- DCHECK(state_ == kPlaying || state_ == kRebuffering || state_ == kUnderflow)
- << "state_=" << state_;
+ DCHECK_EQ(state_, kPlaying);
DCHECK(!sink_playing_);
DCHECK_NE(algorithm_->playback_rate(), 0);
lock_.AssertAcquired();
- earliest_end_time_ = now_cb_.Run();
sink_playing_ = true;
base::AutoUnlock auto_unlock(lock_);
@@ -123,8 +120,7 @@ void AudioRendererImpl::StopRendering() {
void AudioRendererImpl::StopRendering_Locked() {
DCHECK(task_runner_->BelongsToCurrentThread());
- DCHECK(state_ == kPlaying || state_ == kRebuffering || state_ == kUnderflow)
- << "state_=" << state_;
+ DCHECK_EQ(state_, kPlaying);
DCHECK(sink_playing_);
lock_.AssertAcquired();
@@ -139,16 +135,14 @@ void AudioRendererImpl::Flush(const base::Closure& callback) {
DCHECK(task_runner_->BelongsToCurrentThread());
base::AutoLock auto_lock(lock_);
- DCHECK(state_ == kPlaying || state_ == kRebuffering || state_ == kUnderflow)
- << "state_=" << state_;
+ DCHECK_EQ(state_, kPlaying);
DCHECK(flush_cb_.is_null());
flush_cb_ = callback;
+ ChangeState_Locked(kFlushing);
- if (pending_read_) {
- ChangeState_Locked(kFlushing);
+ if (pending_read_)
return;
- }
ChangeState_Locked(kFlushed);
DoFlush_Locked();
@@ -161,8 +155,8 @@ void AudioRendererImpl::DoFlush_Locked() {
DCHECK(!pending_read_);
DCHECK_EQ(state_, kFlushed);
- audio_buffer_stream_.Reset(base::Bind(&AudioRendererImpl::ResetDecoderDone,
- weak_factory_.GetWeakPtr()));
+ audio_buffer_stream_->Reset(base::Bind(&AudioRendererImpl::ResetDecoderDone,
+ weak_factory_.GetWeakPtr()));
}
void AudioRendererImpl::ResetDecoderDone() {
@@ -178,15 +172,20 @@ void AudioRendererImpl::ResetDecoderDone() {
audio_clock_.reset(new AudioClock(audio_parameters_.sample_rate()));
received_end_of_stream_ = false;
rendered_end_of_stream_ = false;
- preroll_aborted_ = false;
- earliest_end_time_ = now_cb_.Run();
+ // Flush() may have been called while underflowed/not fully buffered.
+ if (buffering_state_ != BUFFERING_HAVE_NOTHING)
+ SetBufferingState_Locked(BUFFERING_HAVE_NOTHING);
+
splicer_->Reset();
if (buffer_converter_)
buffer_converter_->Reset();
algorithm_->FlushBuffers();
}
- base::ResetAndReturn(&flush_cb_).Run();
+
+ // Changes in buffering state are always posted. Flush callback must only be
+ // run after buffering state has been set back to nothing.
+ task_runner_->PostTask(FROM_HERE, base::ResetAndReturn(&flush_cb_));
}
void AudioRendererImpl::Stop(const base::Closure& callback) {
@@ -207,7 +206,6 @@ void AudioRendererImpl::Stop(const base::Closure& callback) {
ChangeState_Locked(kStopped);
algorithm_.reset();
- underflow_cb_.Reset();
time_cb_.Reset();
flush_cb_.Reset();
}
@@ -217,23 +215,22 @@ void AudioRendererImpl::Stop(const base::Closure& callback) {
sink_ = NULL;
}
- audio_buffer_stream_.Stop(callback);
+ audio_buffer_stream_.reset();
+ task_runner_->PostTask(FROM_HERE, callback);
}
-void AudioRendererImpl::Preroll(base::TimeDelta time,
- const PipelineStatusCB& cb) {
- DVLOG(1) << __FUNCTION__ << "(" << time.InMicroseconds() << ")";
+void AudioRendererImpl::StartPlayingFrom(base::TimeDelta timestamp) {
+ DVLOG(1) << __FUNCTION__ << "(" << timestamp.InMicroseconds() << ")";
DCHECK(task_runner_->BelongsToCurrentThread());
base::AutoLock auto_lock(lock_);
DCHECK(!sink_playing_);
DCHECK_EQ(state_, kFlushed);
+ DCHECK_EQ(buffering_state_, BUFFERING_HAVE_NOTHING);
DCHECK(!pending_read_) << "Pending read must complete before seeking";
- DCHECK(preroll_cb_.is_null());
- ChangeState_Locked(kPrerolling);
- preroll_cb_ = cb;
- preroll_timestamp_ = time;
+ ChangeState_Locked(kPlaying);
+ start_timestamp_ = timestamp;
AttemptRead_Locked();
}
@@ -241,8 +238,8 @@ void AudioRendererImpl::Preroll(base::TimeDelta time,
void AudioRendererImpl::Initialize(DemuxerStream* stream,
const PipelineStatusCB& init_cb,
const StatisticsCB& statistics_cb,
- const base::Closure& underflow_cb,
const TimeCB& time_cb,
+ const BufferingStateCB& buffering_state_cb,
const base::Closure& ended_cb,
const PipelineStatusCB& error_cb) {
DCHECK(task_runner_->BelongsToCurrentThread());
@@ -250,8 +247,8 @@ void AudioRendererImpl::Initialize(DemuxerStream* stream,
DCHECK_EQ(stream->type(), DemuxerStream::AUDIO);
DCHECK(!init_cb.is_null());
DCHECK(!statistics_cb.is_null());
- DCHECK(!underflow_cb.is_null());
DCHECK(!time_cb.is_null());
+ DCHECK(!buffering_state_cb.is_null());
DCHECK(!ended_cb.is_null());
DCHECK(!error_cb.is_null());
DCHECK_EQ(kUninitialized, state_);
@@ -260,8 +257,8 @@ void AudioRendererImpl::Initialize(DemuxerStream* stream,
state_ = kInitializing;
init_cb_ = init_cb;
- underflow_cb_ = underflow_cb;
time_cb_ = time_cb;
+ buffering_state_cb_ = buffering_state_cb;
ended_cb_ = ended_cb;
error_cb_ = error_cb;
@@ -300,7 +297,7 @@ void AudioRendererImpl::Initialize(DemuxerStream* stream,
audio_clock_.reset(new AudioClock(audio_parameters_.sample_rate()));
- audio_buffer_stream_.Initialize(
+ audio_buffer_stream_->Initialize(
stream,
false,
statistics_cb,
@@ -357,23 +354,6 @@ void AudioRendererImpl::OnAudioBufferStreamInitialized(bool success) {
base::ResetAndReturn(&init_cb_).Run(PIPELINE_OK);
}
-void AudioRendererImpl::ResumeAfterUnderflow() {
- DCHECK(task_runner_->BelongsToCurrentThread());
- base::AutoLock auto_lock(lock_);
- if (state_ == kUnderflow) {
- // The "!preroll_aborted_" is a hack. If preroll is aborted, then we
- // shouldn't even reach the kUnderflow state to begin with. But for now
- // we're just making sure that the audio buffer capacity (i.e. the
- // number of bytes that need to be buffered for preroll to complete)
- // does not increase due to an aborted preroll.
- // TODO(vrk): Fix this bug correctly! (crbug.com/151352)
- if (!preroll_aborted_)
- algorithm_->IncreaseQueueCapacity();
-
- ChangeState_Locked(kRebuffering);
- }
-}
-
void AudioRendererImpl::SetVolume(float volume) {
DCHECK(task_runner_->BelongsToCurrentThread());
DCHECK(sink_);
@@ -435,7 +415,7 @@ void AudioRendererImpl::DecodedAudioReady(
bool need_another_buffer = false;
while (splicer_->HasNextBuffer())
- need_another_buffer = HandleSplicerBuffer(splicer_->GetNextBuffer());
+ need_another_buffer = HandleSplicerBuffer_Locked(splicer_->GetNextBuffer());
if (!need_another_buffer && !CanRead_Locked())
return;
@@ -443,23 +423,18 @@ void AudioRendererImpl::DecodedAudioReady(
AttemptRead_Locked();
}
-bool AudioRendererImpl::HandleSplicerBuffer(
+bool AudioRendererImpl::HandleSplicerBuffer_Locked(
const scoped_refptr<AudioBuffer>& buffer) {
+ lock_.AssertAcquired();
if (buffer->end_of_stream()) {
received_end_of_stream_ = true;
-
- // Transition to kPlaying if we are currently handling an underflow since
- // no more data will be arriving.
- if (state_ == kUnderflow || state_ == kRebuffering)
- ChangeState_Locked(kPlaying);
} else {
- if (state_ == kPrerolling) {
- if (IsBeforePrerollTime(buffer))
+ if (state_ == kPlaying) {
+ if (IsBeforeStartTime(buffer))
return true;
- // Trim off any additional time before the preroll timestamp.
- const base::TimeDelta trim_time =
- preroll_timestamp_ - buffer->timestamp();
+ // Trim off any additional time before the start timestamp.
+ const base::TimeDelta trim_time = start_timestamp_ - buffer->timestamp();
if (trim_time > base::TimeDelta()) {
buffer->TrimStart(buffer->frame_count() *
(static_cast<double>(trim_time.InMicroseconds()) /
@@ -485,22 +460,13 @@ bool AudioRendererImpl::HandleSplicerBuffer(
DCHECK(!pending_read_);
return false;
- case kPrerolling:
- if (!buffer->end_of_stream() && !algorithm_->IsQueueFull())
- return true;
- ChangeState_Locked(kPlaying);
- base::ResetAndReturn(&preroll_cb_).Run(PIPELINE_OK);
- return false;
-
case kPlaying:
- case kUnderflow:
- return false;
-
- case kRebuffering:
- if (!algorithm_->IsQueueFull())
- return true;
- ChangeState_Locked(kPlaying);
- return false;
+ if (buffer->end_of_stream() || algorithm_->IsQueueFull()) {
+ if (buffering_state_ == BUFFERING_HAVE_NOTHING)
+ SetBufferingState_Locked(BUFFERING_HAVE_ENOUGH);
+ return false;
+ }
+ return true;
case kStopped:
return false;
@@ -521,8 +487,8 @@ void AudioRendererImpl::AttemptRead_Locked() {
return;
pending_read_ = true;
- audio_buffer_stream_.Read(base::Bind(&AudioRendererImpl::DecodedAudioReady,
- weak_factory_.GetWeakPtr()));
+ audio_buffer_stream_->Read(base::Bind(&AudioRendererImpl::DecodedAudioReady,
+ weak_factory_.GetWeakPtr()));
}
bool AudioRendererImpl::CanRead_Locked() {
@@ -531,15 +497,12 @@ bool AudioRendererImpl::CanRead_Locked() {
switch (state_) {
case kUninitialized:
case kInitializing:
- case kFlushed:
case kFlushing:
+ case kFlushed:
case kStopped:
return false;
- case kPrerolling:
case kPlaying:
- case kUnderflow:
- case kRebuffering:
break;
}
@@ -575,11 +538,11 @@ void AudioRendererImpl::SetPlaybackRate(float playback_rate) {
}
}
-bool AudioRendererImpl::IsBeforePrerollTime(
+bool AudioRendererImpl::IsBeforeStartTime(
const scoped_refptr<AudioBuffer>& buffer) {
- DCHECK_EQ(state_, kPrerolling);
+ DCHECK_EQ(state_, kPlaying);
return buffer && !buffer->end_of_stream() &&
- (buffer->timestamp() + buffer->duration()) < preroll_timestamp_;
+ (buffer->timestamp() + buffer->duration()) < start_timestamp_;
}
int AudioRendererImpl::Render(AudioBus* audio_bus,
@@ -591,7 +554,6 @@ int AudioRendererImpl::Render(AudioBus* audio_bus,
audio_parameters_.sample_rate());
int frames_written = 0;
base::Closure time_cb;
- base::Closure underflow_cb;
{
base::AutoLock auto_lock(lock_);
@@ -617,10 +579,7 @@ int AudioRendererImpl::Render(AudioBus* audio_bus,
// 1) Algorithm can not fill the audio callback buffer
// 2) We received an end of stream buffer
// 3) We haven't already signalled that we've ended
- // 4) Our estimated earliest end time has expired
- //
- // TODO(enal): we should replace (4) with a check that the browser has no
- // more audio data or at least use a delayed callback.
+ // 4) We've played all known audio data sent to hardware
//
// We use the following conditions to determine underflow:
// 1) Algorithm can not fill the audio callback buffer
@@ -629,7 +588,7 @@ int AudioRendererImpl::Render(AudioBus* audio_bus,
//
// Otherwise the buffer has data we can send to the device.
const base::TimeDelta media_timestamp_before_filling =
- audio_clock_->CurrentMediaTimestamp();
+ audio_clock_->CurrentMediaTimestamp(base::TimeDelta());
if (algorithm_->frames_buffered() > 0) {
frames_written = algorithm_->FillBuffer(audio_bus, requested_frames);
audio_clock_->WroteAudio(
@@ -638,19 +597,16 @@ int AudioRendererImpl::Render(AudioBus* audio_bus,
audio_clock_->WroteSilence(requested_frames - frames_written, delay_frames);
if (frames_written == 0) {
- const base::TimeTicks now = now_cb_.Run();
-
if (received_end_of_stream_ && !rendered_end_of_stream_ &&
- now >= earliest_end_time_) {
+ audio_clock_->CurrentMediaTimestamp(base::TimeDelta()) ==
+ audio_clock_->last_endpoint_timestamp()) {
rendered_end_of_stream_ = true;
ended_cb_.Run();
} else if (!received_end_of_stream_ && state_ == kPlaying) {
- ChangeState_Locked(kUnderflow);
- underflow_cb = underflow_cb_;
- } else {
- // We can't write any data this cycle. For example, we may have
- // sent all available data to the audio device while not reaching
- // |earliest_end_time_|.
+ if (buffering_state_ != BUFFERING_HAVE_NOTHING) {
+ algorithm_->IncreaseQueueCapacity();
+ SetBufferingState_Locked(BUFFERING_HAVE_NOTHING);
+ }
}
}
@@ -663,43 +619,22 @@ int AudioRendererImpl::Render(AudioBus* audio_bus,
// We only want to execute |time_cb_| if time has progressed and we haven't
// signaled end of stream yet.
if (media_timestamp_before_filling !=
- audio_clock_->CurrentMediaTimestamp() &&
+ audio_clock_->CurrentMediaTimestamp(base::TimeDelta()) &&
!rendered_end_of_stream_) {
- time_cb = base::Bind(time_cb_,
- audio_clock_->CurrentMediaTimestamp(),
- audio_clock_->last_endpoint_timestamp());
- }
-
- if (frames_written > 0) {
- UpdateEarliestEndTime_Locked(
- frames_written, playback_delay, now_cb_.Run());
+ time_cb =
+ base::Bind(time_cb_,
+ audio_clock_->CurrentMediaTimestamp(base::TimeDelta()),
+ audio_clock_->last_endpoint_timestamp());
}
}
if (!time_cb.is_null())
task_runner_->PostTask(FROM_HERE, time_cb);
- if (!underflow_cb.is_null())
- underflow_cb.Run();
-
DCHECK_LE(frames_written, requested_frames);
return frames_written;
}
-void AudioRendererImpl::UpdateEarliestEndTime_Locked(
- int frames_filled, const base::TimeDelta& playback_delay,
- const base::TimeTicks& time_now) {
- DCHECK_GT(frames_filled, 0);
-
- base::TimeDelta predicted_play_time = base::TimeDelta::FromMicroseconds(
- static_cast<float>(frames_filled) * base::Time::kMicrosecondsPerSecond /
- audio_parameters_.sample_rate());
-
- lock_.AssertAcquired();
- earliest_end_time_ = std::max(
- earliest_end_time_, time_now + playback_delay + predicted_play_time);
-}
-
void AudioRendererImpl::OnRenderError() {
// UMA data tells us this happens ~0.01% of the time. Trigger an error instead
// of trying to gracefully fall back to a fake sink. It's very likely
@@ -720,7 +655,6 @@ void AudioRendererImpl::HandleAbortedReadOrDecodeError(bool is_decode_error) {
return;
case kFlushing:
ChangeState_Locked(kFlushed);
-
if (status == PIPELINE_OK) {
DoFlush_Locked();
return;
@@ -729,16 +663,9 @@ void AudioRendererImpl::HandleAbortedReadOrDecodeError(bool is_decode_error) {
error_cb_.Run(status);
base::ResetAndReturn(&flush_cb_).Run();
return;
- case kPrerolling:
- // This is a signal for abort if it's not an error.
- preroll_aborted_ = !is_decode_error;
- ChangeState_Locked(kPlaying);
- base::ResetAndReturn(&preroll_cb_).Run(status);
- return;
+
case kFlushed:
case kPlaying:
- case kUnderflow:
- case kRebuffering:
case kStopped:
if (status != PIPELINE_OK)
error_cb_.Run(status);
@@ -768,4 +695,16 @@ void AudioRendererImpl::OnConfigChange() {
CHECK(splicer_->AddInput(buffer_converter_->GetNextBuffer()));
}
+void AudioRendererImpl::SetBufferingState_Locked(
+ BufferingState buffering_state) {
+ DVLOG(1) << __FUNCTION__ << " : " << buffering_state_ << " -> "
+ << buffering_state;
+ DCHECK_NE(buffering_state_, buffering_state);
+ lock_.AssertAcquired();
+ buffering_state_ = buffering_state;
+
+ task_runner_->PostTask(FROM_HERE,
+ base::Bind(buffering_state_cb_, buffering_state_));
+}
+
} // namespace media
diff --git a/media/filters/audio_renderer_impl.h b/media/filters/audio_renderer_impl.h
index 7829366b3f..5f6ebdd347 100644
--- a/media/filters/audio_renderer_impl.h
+++ b/media/filters/audio_renderer_impl.h
@@ -22,6 +22,7 @@
#include <deque>
#include "base/gtest_prod_util.h"
+#include "base/memory/scoped_ptr.h"
#include "base/memory/weak_ptr.h"
#include "base/synchronization/lock.h"
#include "media/base/audio_decoder.h"
@@ -68,8 +69,8 @@ class MEDIA_EXPORT AudioRendererImpl
virtual void Initialize(DemuxerStream* stream,
const PipelineStatusCB& init_cb,
const StatisticsCB& statistics_cb,
- const base::Closure& underflow_cb,
const TimeCB& time_cb,
+ const BufferingStateCB& buffering_state_cb,
const base::Closure& ended_cb,
const PipelineStatusCB& error_cb) OVERRIDE;
virtual void StartRendering() OVERRIDE;
@@ -77,17 +78,9 @@ class MEDIA_EXPORT AudioRendererImpl
virtual void Flush(const base::Closure& callback) OVERRIDE;
virtual void Stop(const base::Closure& callback) OVERRIDE;
virtual void SetPlaybackRate(float rate) OVERRIDE;
- virtual void Preroll(base::TimeDelta time,
- const PipelineStatusCB& cb) OVERRIDE;
- virtual void ResumeAfterUnderflow() OVERRIDE;
+ virtual void StartPlayingFrom(base::TimeDelta timestamp) OVERRIDE;
virtual void SetVolume(float volume) OVERRIDE;
- // Allows injection of a custom time callback for non-realtime testing.
- typedef base::Callback<base::TimeTicks()> NowCB;
- void set_now_cb_for_testing(const NowCB& now_cb) {
- now_cb_ = now_cb;
- }
-
private:
friend class AudioRendererImplTest;
@@ -104,25 +97,17 @@ class MEDIA_EXPORT AudioRendererImpl
// |
// V Decoders reset
// kFlushed <------------------ kFlushing
- // | Preroll() ^
+ // | StartPlayingFrom() ^
// | |
- // V | Flush()
- // kPrerolling ----------------> kPlaying ---------.
- // Enough data buffered ^ | Not enough data
- // | | buffered
- // Enough data buffered | V
- // kRebuffering <--- kUnderflow
- // ResumeAfterUnderflow()
+ // | | Flush()
+ // `---------> kPlaying --------'
enum State {
kUninitialized,
kInitializing,
kFlushing,
kFlushed,
- kPrerolling,
kPlaying,
kStopped,
- kUnderflow,
- kRebuffering,
};
// Callback from the audio decoder delivering decoded audio samples.
@@ -131,17 +116,12 @@ class MEDIA_EXPORT AudioRendererImpl
// Handles buffers that come out of |splicer_|.
// Returns true if more buffers are needed.
- bool HandleSplicerBuffer(const scoped_refptr<AudioBuffer>& buffer);
+ bool HandleSplicerBuffer_Locked(const scoped_refptr<AudioBuffer>& buffer);
// Helper functions for AudioDecoder::Status values passed to
// DecodedAudioReady().
void HandleAbortedReadOrDecodeError(bool is_decode_error);
- // Estimate earliest time when current buffer can stop playing.
- void UpdateEarliestEndTime_Locked(int frames_filled,
- const base::TimeDelta& playback_delay,
- const base::TimeTicks& time_now);
-
void StartRendering_Locked();
void StopRendering_Locked();
@@ -177,10 +157,9 @@ class MEDIA_EXPORT AudioRendererImpl
bool CanRead_Locked();
void ChangeState_Locked(State new_state);
- // Returns true if the data in the buffer is all before
- // |preroll_timestamp_|. This can only return true while
- // in the kPrerolling state.
- bool IsBeforePrerollTime(const scoped_refptr<AudioBuffer>& buffer);
+ // Returns true if the data in the buffer is all before |start_timestamp_|.
+ // This can only return true while in the kPlaying state.
+ bool IsBeforeStartTime(const scoped_refptr<AudioBuffer>& buffer);
// Called upon AudioBufferStream initialization, or failure thereof (indicated
// by the value of |success|).
@@ -203,6 +182,9 @@ class MEDIA_EXPORT AudioRendererImpl
// Called by the AudioBufferStream when a config change occurs.
void OnConfigChange();
+ // Updates |buffering_state_| and fires |buffering_state_cb_|.
+ void SetBufferingState_Locked(BufferingState buffering_state);
+
scoped_refptr<base::SingleThreadTaskRunner> task_runner_;
scoped_ptr<AudioSplicer> splicer_;
@@ -216,7 +198,7 @@ class MEDIA_EXPORT AudioRendererImpl
// may deadlock between |task_runner_| and the audio callback thread.
scoped_refptr<media::AudioRendererSink> sink_;
- AudioBufferStream audio_buffer_stream_;
+ scoped_ptr<AudioBufferStream> audio_buffer_stream_;
// Interface to the hardware audio params.
const AudioHardwareConfig* const hardware_config_;
@@ -226,20 +208,14 @@ class MEDIA_EXPORT AudioRendererImpl
// Callbacks provided during Initialize().
PipelineStatusCB init_cb_;
- base::Closure underflow_cb_;
TimeCB time_cb_;
+ BufferingStateCB buffering_state_cb_;
base::Closure ended_cb_;
PipelineStatusCB error_cb_;
// Callback provided to Flush().
base::Closure flush_cb_;
- // Callback provided to Preroll().
- PipelineStatusCB preroll_cb_;
-
- // Typically calls base::TimeTicks::Now() but can be overridden by a test.
- NowCB now_cb_;
-
// After Initialize() has completed, all variables below must be accessed
// under |lock_|. ------------------------------------------------------------
base::Lock lock_;
@@ -250,6 +226,8 @@ class MEDIA_EXPORT AudioRendererImpl
// Simple state tracking variable.
State state_;
+ BufferingState buffering_state_;
+
// Keep track of whether or not the sink is playing and whether we should be
// rendering.
bool rendering_;
@@ -264,28 +242,7 @@ class MEDIA_EXPORT AudioRendererImpl
scoped_ptr<AudioClock> audio_clock_;
- base::TimeDelta preroll_timestamp_;
-
- // We're supposed to know amount of audio data OS or hardware buffered, but
- // that is not always so -- on my Linux box
- // AudioBuffersState::hardware_delay_bytes never reaches 0.
- //
- // As a result we cannot use it to find when stream ends. If we just ignore
- // buffered data we will notify host that stream ended before it is actually
- // did so, I've seen it done ~140ms too early when playing ~150ms file.
- //
- // Instead of trying to invent OS-specific solution for each and every OS we
- // are supporting, use simple workaround: every time we fill the buffer we
- // remember when it should stop playing, and do not assume that buffer is
- // empty till that time. Workaround is not bulletproof, as we don't exactly
- // know when that particular data would start playing, but it is much better
- // than nothing.
- base::TimeTicks earliest_end_time_;
- size_t total_frames_filled_;
-
- // True if the renderer receives a buffer with kAborted status during preroll,
- // false otherwise. This flag is cleared on the next Preroll() call.
- bool preroll_aborted_;
+ base::TimeDelta start_timestamp_;
// End variables which must be accessed under |lock_|. ----------------------
diff --git a/media/filters/audio_renderer_impl_unittest.cc b/media/filters/audio_renderer_impl_unittest.cc
index ef00769db4..c6cb9523cf 100644
--- a/media/filters/audio_renderer_impl_unittest.cc
+++ b/media/filters/audio_renderer_impl_unittest.cc
@@ -4,17 +4,11 @@
#include "base/bind.h"
#include "base/callback_helpers.h"
-#include "base/gtest_prod_util.h"
-#include "base/memory/scoped_vector.h"
-#include "base/message_loop/message_loop.h"
#include "base/run_loop.h"
-#include "base/stl_util.h"
#include "base/strings/stringprintf.h"
-#include "media/base/audio_buffer.h"
#include "media/base/audio_buffer_converter.h"
#include "media/base/audio_hardware_config.h"
#include "media/base/audio_splicer.h"
-#include "media/base/audio_timestamp_helper.h"
#include "media/base/fake_audio_renderer_sink.h"
#include "media/base/gmock_callback_support.h"
#include "media/base/mock_filters.h"
@@ -22,12 +16,8 @@
#include "media/filters/audio_renderer_impl.h"
#include "testing/gtest/include/gtest/gtest.h"
-using ::base::Time;
-using ::base::TimeTicks;
using ::base::TimeDelta;
using ::testing::_;
-using ::testing::AnyNumber;
-using ::testing::Invoke;
using ::testing::Return;
using ::testing::SaveArg;
@@ -43,11 +33,6 @@ static int kSamplesPerSecond = 44100;
// Use a different output sample rate so the AudioBufferConverter is invoked.
static int kOutputSamplesPerSecond = 48000;
-// Constants for distinguishing between muted audio and playing audio when using
-// ConsumeBufferedData(). Must match the type needed by kSampleFormat.
-static float kMutedAudio = 0.0f;
-static float kPlayingAudio = 0.5f;
-
static const int kDataSize = 1024;
ACTION_P(EnterPendingDecoderInitStateAction, test) {
@@ -62,7 +47,8 @@ class AudioRendererImplTest : public ::testing::Test {
needs_stop_(true),
demuxer_stream_(DemuxerStream::AUDIO),
decoder_(new MockAudioDecoder()),
- last_time_update_(kNoTimestamp()) {
+ last_time_update_(kNoTimestamp()),
+ ended_(false) {
AudioDecoderConfig audio_config(kCodec,
kSampleFormat,
kChannelLayout,
@@ -98,10 +84,6 @@ class AudioRendererImplTest : public ::testing::Test {
decoders.Pass(),
SetDecryptorReadyCB(),
&hardware_config_));
-
- // Stub out time.
- renderer_->set_now_cb_for_testing(base::Bind(
- &AudioRendererImplTest::GetTime, base::Unretained(this)));
}
virtual ~AudioRendererImplTest() {
@@ -120,7 +102,7 @@ class AudioRendererImplTest : public ::testing::Test {
}
MOCK_METHOD1(OnStatistics, void(const PipelineStatistics&));
- MOCK_METHOD0(OnUnderflow, void());
+ MOCK_METHOD1(OnBufferingStateChange, void(BufferingState));
MOCK_METHOD1(OnError, void(PipelineStatus));
void OnAudioTimeCallback(TimeDelta current_time, TimeDelta max_time) {
@@ -134,11 +116,12 @@ class AudioRendererImplTest : public ::testing::Test {
pipeline_status_cb,
base::Bind(&AudioRendererImplTest::OnStatistics,
base::Unretained(this)),
- base::Bind(&AudioRendererImplTest::OnUnderflow,
- base::Unretained(this)),
base::Bind(&AudioRendererImplTest::OnAudioTimeCallback,
base::Unretained(this)),
- ended_event_.GetClosure(),
+ base::Bind(&AudioRendererImplTest::OnBufferingStateChange,
+ base::Unretained(this)),
+ base::Bind(&AudioRendererImplTest::OnEnded,
+ base::Unretained(this)),
base::Bind(&AudioRendererImplTest::OnError,
base::Unretained(this)));
}
@@ -147,7 +130,6 @@ class AudioRendererImplTest : public ::testing::Test {
EXPECT_CALL(*decoder_, Initialize(_, _, _))
.WillOnce(DoAll(SaveArg<2>(&output_cb_),
RunCallback<1>(PIPELINE_OK)));
- EXPECT_CALL(*decoder_, Stop());
InitializeWithStatus(PIPELINE_OK);
next_timestamp_.reset(new AudioTimestampHelper(
@@ -169,7 +151,6 @@ class AudioRendererImplTest : public ::testing::Test {
EXPECT_CALL(*decoder_, Initialize(_, _, _))
.WillOnce(DoAll(SaveArg<2>(&output_cb_),
RunCallback<1>(PIPELINE_OK)));
- EXPECT_CALL(*decoder_, Stop());
WaitableMessageLoopEvent event;
InitializeRenderer(event.GetPipelineStatusCB());
@@ -186,7 +167,6 @@ class AudioRendererImplTest : public ::testing::Test {
EXPECT_CALL(*decoder_, Initialize(_, _, _))
.WillOnce(DoAll(SaveArg<2>(&output_cb_),
EnterPendingDecoderInitStateAction(this)));
- EXPECT_CALL(*decoder_, Stop());
WaitableMessageLoopEvent event;
InitializeRenderer(event.GetPipelineStatusCB());
@@ -205,9 +185,11 @@ class AudioRendererImplTest : public ::testing::Test {
init_decoder_cb_ = cb;
}
- void Flush() {
+ void FlushDuringPendingRead() {
+ SCOPED_TRACE("FlushDuringPendingRead()");
WaitableMessageLoopEvent flush_event;
renderer_->Flush(flush_event.GetClosure());
+ SatisfyPendingRead(kDataSize);
flush_event.RunAndWait();
EXPECT_FALSE(IsReadPending());
@@ -224,11 +206,10 @@ class AudioRendererImplTest : public ::testing::Test {
next_timestamp_->SetBaseTimestamp(timestamp);
// Fill entire buffer to complete prerolling.
- WaitableMessageLoopEvent event;
- renderer_->Preroll(timestamp, event.GetPipelineStatusCB());
+ renderer_->StartPlayingFrom(timestamp);
WaitForPendingRead();
+ EXPECT_CALL(*this, OnBufferingStateChange(BUFFERING_HAVE_ENOUGH));
DeliverRemainingAudio();
- event.RunAndWaitForStatus(PIPELINE_OK);
}
void StartRendering() {
@@ -240,17 +221,6 @@ class AudioRendererImplTest : public ::testing::Test {
renderer_->StopRendering();
}
- void Seek() {
- StopRendering();
- Flush();
- Preroll();
- }
-
- void WaitForEnded() {
- SCOPED_TRACE("WaitForEnded()");
- ended_event_.RunAndWait();
- }
-
bool IsReadPending() const {
return !decode_cb_.is_null();
}
@@ -280,7 +250,7 @@ class AudioRendererImplTest : public ::testing::Test {
kChannelLayout,
kChannelCount,
kSamplesPerSecond,
- kPlayingAudio,
+ 1.0f,
0.0f,
size,
next_timestamp_->GetTimestamp());
@@ -318,46 +288,14 @@ class AudioRendererImplTest : public ::testing::Test {
}
// Attempts to consume |requested_frames| frames from |renderer_|'s internal
- // buffer, returning true if all |requested_frames| frames were consumed,
- // false if less than |requested_frames| frames were consumed.
- //
- // |muted| is optional and if passed will get set if the value of
- // the consumed data is muted audio.
- bool ConsumeBufferedData(int requested_frames, bool* muted) {
+ // buffer. Returns true if and only if all of |requested_frames| were able
+ // to be consumed.
+ bool ConsumeBufferedData(int requested_frames) {
scoped_ptr<AudioBus> bus =
AudioBus::Create(kChannels, std::max(requested_frames, 1));
- int frames_read;
- if (!sink_->Render(bus.get(), 0, &frames_read)) {
- if (muted)
- *muted = true;
- return false;
- }
-
- if (muted)
- *muted = frames_read < 1 || bus->channel(0)[0] == kMutedAudio;
- return frames_read == requested_frames;
- }
-
- // Attempts to consume all data available from the renderer. Returns the
- // number of frames read. Since time is frozen, the audio delay will increase
- // as frames come in.
- int ConsumeAllBufferedData() {
int frames_read = 0;
- int total_frames_read = 0;
-
- scoped_ptr<AudioBus> bus = AudioBus::Create(kChannels, 1024);
-
- do {
- TimeDelta audio_delay = TimeDelta::FromMicroseconds(
- total_frames_read * Time::kMicrosecondsPerSecond /
- static_cast<float>(hardware_config_.GetOutputConfig().sample_rate()));
-
- frames_read = renderer_->Render(
- bus.get(), audio_delay.InMilliseconds());
- total_frames_read += frames_read;
- } while (frames_read > 0);
-
- return total_frames_read;
+ EXPECT_TRUE(sink_->Render(bus.get(), 0, &frames_read));
+ return frames_read == requested_frames;
}
int frames_buffered() {
@@ -377,57 +315,6 @@ class AudioRendererImplTest : public ::testing::Test {
return buffer_capacity() - frames_buffered();
}
- void CallResumeAfterUnderflow() {
- renderer_->ResumeAfterUnderflow();
- }
-
- TimeDelta CalculatePlayTime(int frames_filled) {
- return TimeDelta::FromMicroseconds(
- frames_filled * Time::kMicrosecondsPerSecond /
- renderer_->audio_parameters_.sample_rate());
- }
-
- void EndOfStreamTest(float playback_rate) {
- Initialize();
- Preroll();
- StartRendering();
- renderer_->SetPlaybackRate(playback_rate);
-
- // Drain internal buffer, we should have a pending read.
- int total_frames = frames_buffered();
- int frames_filled = ConsumeAllBufferedData();
- WaitForPendingRead();
-
- // Due to how the cross-fade algorithm works we won't get an exact match
- // between the ideal and expected number of frames consumed. In the faster
- // than normal playback case, more frames are created than should exist and
- // vice versa in the slower than normal playback case.
- const float kEpsilon = 0.20 * (total_frames / playback_rate);
- EXPECT_NEAR(frames_filled, total_frames / playback_rate, kEpsilon);
-
- // Figure out how long until the ended event should fire.
- TimeDelta audio_play_time = CalculatePlayTime(frames_filled);
- DVLOG(1) << "audio_play_time = " << audio_play_time.InSecondsF();
-
- // Fulfill the read with an end-of-stream packet. We shouldn't report ended
- // nor have a read until we drain the internal buffer.
- DeliverEndOfStream();
-
- // Advance time half way without an ended expectation.
- AdvanceTime(audio_play_time / 2);
- ConsumeBufferedData(frames_buffered(), NULL);
-
- // Advance time by other half and expect the ended event.
- AdvanceTime(audio_play_time / 2);
- ConsumeBufferedData(frames_buffered(), NULL);
- WaitForEnded();
- }
-
- void AdvanceTime(TimeDelta time) {
- base::AutoLock auto_lock(lock_);
- time_ += time;
- }
-
void force_config_change() {
renderer_->OnConfigChange();
}
@@ -444,6 +331,8 @@ class AudioRendererImplTest : public ::testing::Test {
return last_time_update_;
}
+ bool ended() const { return ended_; }
+
// Fixture members.
base::MessageLoop message_loop_;
scoped_ptr<AudioRendererImpl> renderer_;
@@ -455,11 +344,6 @@ class AudioRendererImplTest : public ::testing::Test {
bool needs_stop_;
private:
- TimeTicks GetTime() {
- base::AutoLock auto_lock(lock_);
- return time_;
- }
-
void DecodeDecoder(const scoped_refptr<DecoderBuffer>& buffer,
const AudioDecoder::DecodeCB& decode_cb) {
// We shouldn't ever call Read() after Stop():
@@ -505,27 +389,27 @@ class AudioRendererImplTest : public ::testing::Test {
message_loop_.RunUntilIdle();
}
+ void OnEnded() {
+ CHECK(!ended_);
+ ended_ = true;
+ }
+
MockDemuxerStream demuxer_stream_;
MockAudioDecoder* decoder_;
- // Used for stubbing out time in the audio callback thread.
- base::Lock lock_;
- TimeTicks time_;
-
// Used for satisfying reads.
AudioDecoder::OutputCB output_cb_;
AudioDecoder::DecodeCB decode_cb_;
base::Closure reset_cb_;
scoped_ptr<AudioTimestampHelper> next_timestamp_;
- WaitableMessageLoopEvent ended_event_;
-
// Run during DecodeDecoder() to unblock WaitForPendingRead().
base::Closure wait_for_pending_decode_cb_;
base::Closure stop_decoder_cb_;
PipelineStatusCB init_decoder_cb_;
base::TimeDelta last_time_update_;
+ bool ended_;
DISALLOW_COPY_AND_ASSIGN(AudioRendererImplTest);
};
@@ -550,242 +434,101 @@ TEST_F(AudioRendererImplTest, StartRendering) {
StartRendering();
// Drain internal buffer, we should have a pending read.
- EXPECT_TRUE(ConsumeBufferedData(frames_buffered(), NULL));
+ EXPECT_TRUE(ConsumeBufferedData(frames_buffered()));
WaitForPendingRead();
}
TEST_F(AudioRendererImplTest, EndOfStream) {
- EndOfStreamTest(1.0);
-}
-
-TEST_F(AudioRendererImplTest, EndOfStream_FasterPlaybackSpeed) {
- EndOfStreamTest(2.0);
-}
-
-TEST_F(AudioRendererImplTest, EndOfStream_SlowerPlaybackSpeed) {
- EndOfStreamTest(0.5);
-}
-
-TEST_F(AudioRendererImplTest, Underflow) {
Initialize();
Preroll();
-
- int initial_capacity = buffer_capacity();
-
StartRendering();
// Drain internal buffer, we should have a pending read.
- EXPECT_TRUE(ConsumeBufferedData(frames_buffered(), NULL));
+ EXPECT_TRUE(ConsumeBufferedData(frames_buffered()));
WaitForPendingRead();
- // Verify the next FillBuffer() call triggers the underflow callback
- // since the decoder hasn't delivered any data after it was drained.
- EXPECT_CALL(*this, OnUnderflow());
- EXPECT_FALSE(ConsumeBufferedData(kDataSize, NULL));
-
- renderer_->ResumeAfterUnderflow();
-
- // Verify after resuming that we're still not getting data.
- bool muted = false;
- EXPECT_EQ(0, frames_buffered());
- EXPECT_FALSE(ConsumeBufferedData(kDataSize, &muted));
- EXPECT_TRUE(muted);
-
- // Verify that the buffer capacity increased as a result of the underflow.
- EXPECT_GT(buffer_capacity(), initial_capacity);
+ // Forcefully trigger underflow.
+ EXPECT_FALSE(ConsumeBufferedData(1));
+ EXPECT_CALL(*this, OnBufferingStateChange(BUFFERING_HAVE_NOTHING));
- // Deliver data, we should get non-muted audio.
- DeliverRemainingAudio();
- EXPECT_TRUE(ConsumeBufferedData(kDataSize, &muted));
- EXPECT_FALSE(muted);
-}
-
-TEST_F(AudioRendererImplTest, Underflow_CapacityResetsAfterFlush) {
- Initialize();
- Preroll();
-
- int initial_capacity = buffer_capacity();
-
- StartRendering();
-
- // Drain internal buffer, we should have a pending read.
- EXPECT_TRUE(ConsumeBufferedData(frames_buffered(), NULL));
- WaitForPendingRead();
-
- // Verify the next FillBuffer() call triggers the underflow callback
- // since the decoder hasn't delivered any data after it was drained.
- EXPECT_CALL(*this, OnUnderflow());
- EXPECT_FALSE(ConsumeBufferedData(kDataSize, NULL));
-
- // Verify that the buffer capacity increased as a result of resuming after
- // underflow.
- EXPECT_EQ(buffer_capacity(), initial_capacity);
- renderer_->ResumeAfterUnderflow();
- EXPECT_GT(buffer_capacity(), initial_capacity);
-
- // Verify that the buffer capacity is restored to the |initial_capacity|.
+ // Fulfill the read with an end-of-stream buffer. Doing so should change our
+ // buffering state so playback resumes.
+ EXPECT_CALL(*this, OnBufferingStateChange(BUFFERING_HAVE_ENOUGH));
DeliverEndOfStream();
- Flush();
- EXPECT_EQ(buffer_capacity(), initial_capacity);
-}
-
-TEST_F(AudioRendererImplTest, Underflow_FlushWhileUnderflowed) {
- Initialize();
- Preroll();
- StartRendering();
-
- // Drain internal buffer, we should have a pending read.
- EXPECT_TRUE(ConsumeBufferedData(frames_buffered(), NULL));
- WaitForPendingRead();
- // Verify the next FillBuffer() call triggers the underflow callback
- // since the decoder hasn't delivered any data after it was drained.
- EXPECT_CALL(*this, OnUnderflow());
- EXPECT_FALSE(ConsumeBufferedData(kDataSize, NULL));
+ // Consume all remaining data. We shouldn't have signal ended yet.
+ EXPECT_TRUE(ConsumeBufferedData(frames_buffered()));
+ EXPECT_FALSE(ended());
- // Verify that we can still Flush() before entering the rebuffering state.
- DeliverEndOfStream();
- Flush();
+ // Ended should trigger on next render call.
+ EXPECT_FALSE(ConsumeBufferedData(1));
+ EXPECT_TRUE(ended());
}
-TEST_F(AudioRendererImplTest, Underflow_EndOfStream) {
- Initialize();
- Preroll();
- StartRendering();
-
- // Figure out how long until the ended event should fire. Since
- // ConsumeBufferedData() doesn't provide audio delay information, the time
- // until the ended event fires is equivalent to the longest buffered section,
- // which is the initial frames_buffered() read.
- TimeDelta time_until_ended = CalculatePlayTime(frames_buffered());
-
- // Drain internal buffer, we should have a pending read.
- EXPECT_TRUE(ConsumeBufferedData(frames_buffered(), NULL));
- WaitForPendingRead();
-
- // Verify the next FillBuffer() call triggers the underflow callback
- // since the decoder hasn't delivered any data after it was drained.
- EXPECT_CALL(*this, OnUnderflow());
- EXPECT_FALSE(ConsumeBufferedData(kDataSize, NULL));
-
- // Deliver a little bit of data.
- SatisfyPendingRead(kDataSize);
- WaitForPendingRead();
-
- // Verify we're getting muted audio during underflow. Note: Since resampling
- // is active, the number of frames_buffered() won't always match kDataSize.
- bool muted = false;
- const int kInitialFramesBuffered = 1114;
- EXPECT_EQ(kInitialFramesBuffered, frames_buffered());
- EXPECT_FALSE(ConsumeBufferedData(kInitialFramesBuffered, &muted));
- EXPECT_TRUE(muted);
-
- // Now deliver end of stream, we should get our little bit of data back.
- DeliverEndOfStream();
- const int kNextFramesBuffered = 1408;
- EXPECT_EQ(kNextFramesBuffered, frames_buffered());
- EXPECT_TRUE(ConsumeBufferedData(kNextFramesBuffered, &muted));
- EXPECT_FALSE(muted);
-
- // Attempt to read to make sure we're truly at the end of stream.
- AdvanceTime(time_until_ended);
- EXPECT_FALSE(ConsumeBufferedData(kDataSize, &muted));
- EXPECT_TRUE(muted);
- WaitForEnded();
-}
-
-TEST_F(AudioRendererImplTest, Underflow_ResumeFromCallback) {
+TEST_F(AudioRendererImplTest, Underflow) {
Initialize();
Preroll();
StartRendering();
// Drain internal buffer, we should have a pending read.
- EXPECT_TRUE(ConsumeBufferedData(frames_buffered(), NULL));
+ EXPECT_TRUE(ConsumeBufferedData(frames_buffered()));
WaitForPendingRead();
- // Verify the next FillBuffer() call triggers the underflow callback
- // since the decoder hasn't delivered any data after it was drained.
- EXPECT_CALL(*this, OnUnderflow())
- .WillOnce(Invoke(this, &AudioRendererImplTest::CallResumeAfterUnderflow));
- EXPECT_FALSE(ConsumeBufferedData(kDataSize, NULL));
+ // Verify the next FillBuffer() call triggers a buffering state change
+ // update.
+ EXPECT_CALL(*this, OnBufferingStateChange(BUFFERING_HAVE_NOTHING));
+ EXPECT_FALSE(ConsumeBufferedData(kDataSize));
- // Verify after resuming that we're still not getting data.
- bool muted = false;
+ // Verify we're still not getting audio data.
EXPECT_EQ(0, frames_buffered());
- EXPECT_FALSE(ConsumeBufferedData(kDataSize, &muted));
- EXPECT_TRUE(muted);
+ EXPECT_FALSE(ConsumeBufferedData(kDataSize));
- // Deliver data, we should get non-muted audio.
+ // Deliver enough data to have enough for buffering.
+ EXPECT_CALL(*this, OnBufferingStateChange(BUFFERING_HAVE_ENOUGH));
DeliverRemainingAudio();
- EXPECT_TRUE(ConsumeBufferedData(kDataSize, &muted));
- EXPECT_FALSE(muted);
+
+ // Verify we're getting audio data.
+ EXPECT_TRUE(ConsumeBufferedData(kDataSize));
}
-TEST_F(AudioRendererImplTest, Underflow_SetPlaybackRate) {
+TEST_F(AudioRendererImplTest, Underflow_CapacityResetsAfterFlush) {
Initialize();
Preroll();
StartRendering();
// Drain internal buffer, we should have a pending read.
- EXPECT_TRUE(ConsumeBufferedData(frames_buffered(), NULL));
+ EXPECT_TRUE(ConsumeBufferedData(frames_buffered()));
WaitForPendingRead();
- EXPECT_EQ(FakeAudioRendererSink::kPlaying, sink_->state());
-
// Verify the next FillBuffer() call triggers the underflow callback
// since the decoder hasn't delivered any data after it was drained.
- EXPECT_CALL(*this, OnUnderflow())
- .WillOnce(Invoke(this, &AudioRendererImplTest::CallResumeAfterUnderflow));
- EXPECT_FALSE(ConsumeBufferedData(kDataSize, NULL));
- EXPECT_EQ(0, frames_buffered());
-
- EXPECT_EQ(FakeAudioRendererSink::kPlaying, sink_->state());
-
- // Simulate playback being paused.
- renderer_->SetPlaybackRate(0);
-
- EXPECT_EQ(FakeAudioRendererSink::kPaused, sink_->state());
-
- // Deliver data to resolve the underflow.
- DeliverRemainingAudio();
-
- EXPECT_EQ(FakeAudioRendererSink::kPaused, sink_->state());
+ int initial_capacity = buffer_capacity();
+ EXPECT_CALL(*this, OnBufferingStateChange(BUFFERING_HAVE_NOTHING));
+ EXPECT_FALSE(ConsumeBufferedData(kDataSize));
- // Simulate playback being resumed.
- renderer_->SetPlaybackRate(1);
+ // Verify that the buffer capacity increased as a result of underflowing.
+ EXPECT_GT(buffer_capacity(), initial_capacity);
- EXPECT_EQ(FakeAudioRendererSink::kPlaying, sink_->state());
+ // Verify that the buffer capacity is restored to the |initial_capacity|.
+ FlushDuringPendingRead();
+ EXPECT_EQ(buffer_capacity(), initial_capacity);
}
-TEST_F(AudioRendererImplTest, Underflow_PausePlay) {
+TEST_F(AudioRendererImplTest, Underflow_Flush) {
Initialize();
Preroll();
StartRendering();
- // Drain internal buffer, we should have a pending read.
- EXPECT_TRUE(ConsumeBufferedData(frames_buffered(), NULL));
+ // Force underflow.
+ EXPECT_TRUE(ConsumeBufferedData(frames_buffered()));
WaitForPendingRead();
+ EXPECT_CALL(*this, OnBufferingStateChange(BUFFERING_HAVE_NOTHING));
+ EXPECT_FALSE(ConsumeBufferedData(kDataSize));
+ WaitForPendingRead();
+ StopRendering();
- EXPECT_EQ(FakeAudioRendererSink::kPlaying, sink_->state());
-
- // Verify the next FillBuffer() call triggers the underflow callback
- // since the decoder hasn't delivered any data after it was drained.
- EXPECT_CALL(*this, OnUnderflow())
- .WillOnce(Invoke(this, &AudioRendererImplTest::CallResumeAfterUnderflow));
- EXPECT_FALSE(ConsumeBufferedData(kDataSize, NULL));
- EXPECT_EQ(0, frames_buffered());
-
- EXPECT_EQ(FakeAudioRendererSink::kPlaying, sink_->state());
-
- // Simulate playback being paused, and then played again.
- renderer_->SetPlaybackRate(0.0);
- renderer_->SetPlaybackRate(1.0);
-
- // Deliver data to resolve the underflow.
- DeliverRemainingAudio();
-
- // We should have resumed playing now.
- EXPECT_EQ(FakeAudioRendererSink::kPlaying, sink_->state());
+ // We shouldn't expect another buffering state change when flushing.
+ FlushDuringPendingRead();
}
TEST_F(AudioRendererImplTest, PendingRead_Flush) {
@@ -795,22 +538,16 @@ TEST_F(AudioRendererImplTest, PendingRead_Flush) {
StartRendering();
// Partially drain internal buffer so we get a pending read.
- EXPECT_TRUE(ConsumeBufferedData(frames_buffered() / 2, NULL));
+ EXPECT_TRUE(ConsumeBufferedData(frames_buffered() / 2));
WaitForPendingRead();
StopRendering();
EXPECT_TRUE(IsReadPending());
- // Start flushing.
- WaitableMessageLoopEvent flush_event;
- renderer_->Flush(flush_event.GetClosure());
-
- SatisfyPendingRead(kDataSize);
-
- flush_event.RunAndWait();
-
- EXPECT_FALSE(IsReadPending());
+ // Flush and expect to be notified that we have nothing.
+ EXPECT_CALL(*this, OnBufferingStateChange(BUFFERING_HAVE_NOTHING));
+ FlushDuringPendingRead();
// Preroll again to a different timestamp and verify it completed normally.
Preroll(1000, PIPELINE_OK);
@@ -823,7 +560,7 @@ TEST_F(AudioRendererImplTest, PendingRead_Stop) {
StartRendering();
// Partially drain internal buffer so we get a pending read.
- EXPECT_TRUE(ConsumeBufferedData(frames_buffered() / 2, NULL));
+ EXPECT_TRUE(ConsumeBufferedData(frames_buffered() / 2));
WaitForPendingRead();
StopRendering();
@@ -848,7 +585,7 @@ TEST_F(AudioRendererImplTest, PendingFlush_Stop) {
StartRendering();
// Partially drain internal buffer so we get a pending read.
- EXPECT_TRUE(ConsumeBufferedData(frames_buffered() / 2, NULL));
+ EXPECT_TRUE(ConsumeBufferedData(frames_buffered() / 2));
WaitForPendingRead();
StopRendering();
@@ -859,6 +596,7 @@ TEST_F(AudioRendererImplTest, PendingFlush_Stop) {
WaitableMessageLoopEvent flush_event;
renderer_->Flush(flush_event.GetClosure());
+ EXPECT_CALL(*this, OnBufferingStateChange(BUFFERING_HAVE_NOTHING));
SatisfyPendingRead(kDataSize);
WaitableMessageLoopEvent event;
@@ -881,7 +619,7 @@ TEST_F(AudioRendererImplTest, ConfigChangeDrainsConverter) {
StartRendering();
// Drain internal buffer, we should have a pending read.
- EXPECT_TRUE(ConsumeBufferedData(frames_buffered(), NULL));
+ EXPECT_TRUE(ConsumeBufferedData(frames_buffered()));
WaitForPendingRead();
// Deliver a little bit of data. Use an odd data size to ensure there is data
@@ -907,7 +645,7 @@ TEST_F(AudioRendererImplTest, TimeUpdatesOnFirstBuffer) {
// Preroll() should be buffered some data, consume half of it now.
int frames_to_consume = frames_buffered() / 2;
- EXPECT_TRUE(ConsumeBufferedData(frames_to_consume, NULL));
+ EXPECT_TRUE(ConsumeBufferedData(frames_to_consume));
WaitForPendingRead();
base::RunLoop().RunUntilIdle();
@@ -920,7 +658,7 @@ TEST_F(AudioRendererImplTest, TimeUpdatesOnFirstBuffer) {
// The next time update should match the remaining frames_buffered(), but only
// after running the message loop.
frames_to_consume = frames_buffered();
- EXPECT_TRUE(ConsumeBufferedData(frames_to_consume, NULL));
+ EXPECT_TRUE(ConsumeBufferedData(frames_to_consume));
EXPECT_EQ(timestamp_helper.GetTimestamp(), last_time_update());
base::RunLoop().RunUntilIdle();
@@ -932,17 +670,17 @@ TEST_F(AudioRendererImplTest, ImmediateEndOfStream) {
Initialize();
{
SCOPED_TRACE("Preroll()");
- WaitableMessageLoopEvent event;
- renderer_->Preroll(base::TimeDelta(), event.GetPipelineStatusCB());
+ renderer_->StartPlayingFrom(base::TimeDelta());
WaitForPendingRead();
+ EXPECT_CALL(*this, OnBufferingStateChange(BUFFERING_HAVE_ENOUGH));
DeliverEndOfStream();
- event.RunAndWaitForStatus(PIPELINE_OK);
}
StartRendering();
// Read a single frame. We shouldn't be able to satisfy it.
- EXPECT_FALSE(ConsumeBufferedData(1, NULL));
- WaitForEnded();
+ EXPECT_FALSE(ended());
+ EXPECT_FALSE(ConsumeBufferedData(1));
+ EXPECT_TRUE(ended());
}
TEST_F(AudioRendererImplTest, OnRenderErrorCausesDecodeError) {
diff --git a/media/filters/chunk_demuxer.cc b/media/filters/chunk_demuxer.cc
index 3b9d7b95dd..441d796938 100644
--- a/media/filters/chunk_demuxer.cc
+++ b/media/filters/chunk_demuxer.cc
@@ -561,8 +561,7 @@ bool SourceState::OnNewConfigs(
return false;
}
- if (!frame_processor_->AddTrack(FrameProcessorBase::kAudioTrackId,
- audio_)) {
+ if (!frame_processor_->AddTrack(FrameProcessor::kAudioTrackId, audio_)) {
DVLOG(1) << "Failed to add audio track to frame processor.";
return false;
}
@@ -581,8 +580,7 @@ bool SourceState::OnNewConfigs(
return false;
}
- if (!frame_processor_->AddTrack(FrameProcessorBase::kVideoTrackId,
- video_)) {
+ if (!frame_processor_->AddTrack(FrameProcessor::kVideoTrackId, video_)) {
DVLOG(1) << "Failed to add video track to frame processor.";
return false;
}
@@ -954,6 +952,10 @@ TextTrackConfig ChunkDemuxerStream::text_track_config() {
return stream_->GetCurrentTextTrackConfig();
}
+VideoRotation ChunkDemuxerStream::video_rotation() {
+ return VIDEO_ROTATION_0;
+}
+
void ChunkDemuxerStream::ChangeState_Locked(State state) {
lock_.AssertAcquired();
DVLOG(1) << "ChunkDemuxerStream::ChangeState_Locked() : "
@@ -1306,9 +1308,16 @@ void ChunkDemuxer::Abort(const std::string& id,
base::AutoLock auto_lock(lock_);
DCHECK(!id.empty());
CHECK(IsValidId(id));
+ bool old_waiting_for_data = IsSeekWaitingForData_Locked();
source_state_map_[id]->Abort(append_window_start,
append_window_end,
timestamp_offset);
+ // Abort can possibly emit some buffers.
+ // Need to check whether seeking can be completed.
+ if (old_waiting_for_data && !IsSeekWaitingForData_Locked() &&
+ !seek_cb_.is_null()) {
+ base::ResetAndReturn(&seek_cb_).Run(PIPELINE_OK);
+ }
}
void ChunkDemuxer::Remove(const std::string& id, TimeDelta start,
diff --git a/media/filters/chunk_demuxer.h b/media/filters/chunk_demuxer.h
index cd01b1ee90..2abeeeaef5 100644
--- a/media/filters/chunk_demuxer.h
+++ b/media/filters/chunk_demuxer.h
@@ -86,6 +86,7 @@ class MEDIA_EXPORT ChunkDemuxerStream : public DemuxerStream {
virtual AudioDecoderConfig audio_decoder_config() OVERRIDE;
virtual VideoDecoderConfig video_decoder_config() OVERRIDE;
virtual bool SupportsConfigChanges() OVERRIDE;
+ virtual VideoRotation video_rotation() OVERRIDE;
// Returns the text track configuration. It is an error to call this method
// if type() != TEXT.
diff --git a/media/filters/chunk_demuxer_unittest.cc b/media/filters/chunk_demuxer_unittest.cc
index 2326de2de6..0bb3ecc5e4 100644
--- a/media/filters/chunk_demuxer_unittest.cc
+++ b/media/filters/chunk_demuxer_unittest.cc
@@ -397,43 +397,170 @@ class ChunkDemuxerTest : public ::testing::Test {
timecode, end_timecode, track_number, block_duration));
}
- // |cluster_description| - A space delimited string of buffer info that
- // is used to construct a cluster. Each buffer info is a timestamp in
- // milliseconds and optionally followed by a 'K' to indicate that a buffer
- // should be marked as a keyframe. For example "0K 30 60" should constuct
- // a cluster with 3 blocks: a keyframe with timestamp 0 and 2 non-keyframes
- // at 30ms and 60ms.
- void AppendSingleStreamCluster(const std::string& source_id, int track_number,
- const std::string& cluster_description) {
+ struct BlockInfo {
+ BlockInfo()
+ : track_number(0),
+ timestamp_in_ms(0),
+ flags(0),
+ duration(0) {
+ }
+
+ BlockInfo(int tn, int ts, int f, int d)
+ : track_number(tn),
+ timestamp_in_ms(ts),
+ flags(f),
+ duration(d) {
+ }
+
+ int track_number;
+ int timestamp_in_ms;
+ int flags;
+ int duration;
+
+ bool operator< (const BlockInfo& rhs) const {
+ return timestamp_in_ms < rhs.timestamp_in_ms;
+ }
+ };
+
+ // |track_number| - The track number to place in
+ // |block_descriptions| - A space delimited string of block info that
+ // is used to populate |blocks|. Each block info has a timestamp in
+ // milliseconds and optionally followed by a 'K' to indicate that a block
+ // should be marked as a keyframe. For example "0K 30 60" should populate
+ // |blocks| with 3 BlockInfo objects: a keyframe with timestamp 0 and 2
+ // non-keyframes at 30ms and 60ms.
+ void ParseBlockDescriptions(int track_number,
+ const std::string block_descriptions,
+ std::vector<BlockInfo>* blocks) {
std::vector<std::string> timestamps;
- base::SplitString(cluster_description, ' ', &timestamps);
+ base::SplitString(block_descriptions, ' ', &timestamps);
- ClusterBuilder cb;
- std::vector<uint8> data(10);
for (size_t i = 0; i < timestamps.size(); ++i) {
std::string timestamp_str = timestamps[i];
- int block_flags = 0;
+ BlockInfo block_info;
+ block_info.track_number = track_number;
+ block_info.flags = 0;
+ block_info.duration = 0;
+
if (EndsWith(timestamp_str, "K", true)) {
- block_flags = kWebMFlagKeyframe;
+ block_info.flags = kWebMFlagKeyframe;
// Remove the "K" off of the token.
timestamp_str = timestamp_str.substr(0, timestamps[i].length() - 1);
}
- int timestamp_in_ms;
- CHECK(base::StringToInt(timestamp_str, &timestamp_in_ms));
-
- if (i == 0)
- cb.SetClusterTimecode(timestamp_in_ms);
+ CHECK(base::StringToInt(timestamp_str, &block_info.timestamp_in_ms));
if (track_number == kTextTrackNum ||
track_number == kAlternateTextTrackNum) {
- cb.AddBlockGroup(track_number, timestamp_in_ms, kTextBlockDuration,
- block_flags, &data[0], data.size());
+ block_info.duration = kTextBlockDuration;
+ ASSERT_EQ(kWebMFlagKeyframe, block_info.flags)
+ << "Text block with timestamp " << block_info.timestamp_in_ms
+ << " was not marked as a keyframe."
+ << " All text blocks must be keyframes";
+ }
+
+ blocks->push_back(block_info);
+ }
+ }
+
+ scoped_ptr<Cluster> GenerateCluster(const std::vector<BlockInfo>& blocks,
+ bool unknown_size) {
+ DCHECK_GT(blocks.size(), 0u);
+ ClusterBuilder cb;
+
+ std::vector<uint8> data(10);
+ for (size_t i = 0; i < blocks.size(); ++i) {
+ if (i == 0)
+ cb.SetClusterTimecode(blocks[i].timestamp_in_ms);
+
+ if (blocks[i].duration) {
+ if (blocks[i].track_number == kVideoTrackNum) {
+ AddVideoBlockGroup(&cb,
+ blocks[i].track_number, blocks[i].timestamp_in_ms,
+ blocks[i].duration, blocks[i].flags);
+ } else {
+ cb.AddBlockGroup(blocks[i].track_number, blocks[i].timestamp_in_ms,
+ blocks[i].duration, blocks[i].flags,
+ &data[0], data.size());
+ }
} else {
- cb.AddSimpleBlock(track_number, timestamp_in_ms, block_flags,
+ cb.AddSimpleBlock(blocks[i].track_number, blocks[i].timestamp_in_ms,
+ blocks[i].flags,
&data[0], data.size());
}
}
- AppendCluster(source_id, cb.Finish());
+
+ return unknown_size ? cb.FinishWithUnknownSize() : cb.Finish();
+ }
+
+ scoped_ptr<Cluster> GenerateCluster(
+ std::priority_queue<BlockInfo> block_queue,
+ bool unknown_size) {
+ std::vector<BlockInfo> blocks(block_queue.size());
+ for (size_t i = block_queue.size() - 1; !block_queue.empty(); --i) {
+ blocks[i] = block_queue.top();
+ block_queue.pop();
+ }
+
+ return GenerateCluster(blocks, unknown_size);
+ }
+
+ // |block_descriptions| - The block descriptions used to construct the
+ // cluster. See the documentation for ParseBlockDescriptions() for details on
+ // the string format.
+ void AppendSingleStreamCluster(const std::string& source_id, int track_number,
+ const std::string& block_descriptions) {
+ std::vector<BlockInfo> blocks;
+ ParseBlockDescriptions(track_number, block_descriptions, &blocks);
+ AppendCluster(source_id, GenerateCluster(blocks, false));
+ }
+
+ struct MuxedStreamInfo {
+ MuxedStreamInfo()
+ : track_number(0),
+ block_descriptions("")
+ {}
+
+ MuxedStreamInfo(int track_num, const char* block_desc)
+ : track_number(track_num),
+ block_descriptions(block_desc) {
+ }
+
+ int track_number;
+ // The block description passed to ParseBlockDescriptions().
+ // See the documentation for that method for details on the string format.
+ const char* block_descriptions;
+ };
+
+ void AppendMuxedCluster(const MuxedStreamInfo& msi_1,
+ const MuxedStreamInfo& msi_2) {
+ std::vector<MuxedStreamInfo> msi(2);
+ msi[0] = msi_1;
+ msi[1] = msi_2;
+ AppendMuxedCluster(msi);
+ }
+
+ void AppendMuxedCluster(const MuxedStreamInfo& msi_1,
+ const MuxedStreamInfo& msi_2,
+ const MuxedStreamInfo& msi_3) {
+ std::vector<MuxedStreamInfo> msi(3);
+ msi[0] = msi_1;
+ msi[1] = msi_2;
+ msi[2] = msi_3;
+ AppendMuxedCluster(msi);
+ }
+
+ void AppendMuxedCluster(const std::vector<MuxedStreamInfo> msi) {
+ std::priority_queue<BlockInfo> block_queue;
+ for (size_t i = 0; i < msi.size(); ++i) {
+ std::vector<BlockInfo> track_blocks;
+ ParseBlockDescriptions(msi[i].track_number, msi[i].block_descriptions,
+ &track_blocks);
+
+ for (size_t j = 0; j < track_blocks.size(); ++j)
+ block_queue.push(track_blocks[j]);
+ }
+
+ AppendCluster(kSourceId, GenerateCluster(block_queue, false));
}
void AppendData(const std::string& source_id,
@@ -678,17 +805,14 @@ class ChunkDemuxerTest : public ::testing::Test {
bool unknown_size) {
CHECK_GT(block_count, 0);
- int size = 10;
- scoped_ptr<uint8[]> data(new uint8[size]);
-
- ClusterBuilder cb;
- cb.SetClusterTimecode(std::min(first_audio_timecode, first_video_timecode));
+ std::priority_queue<BlockInfo> block_queue;
if (block_count == 1) {
- cb.AddBlockGroup(kAudioTrackNum, first_audio_timecode,
- kAudioBlockDuration, kWebMFlagKeyframe,
- data.get(), size);
- return cb.Finish();
+ block_queue.push(BlockInfo(kAudioTrackNum,
+ first_audio_timecode,
+ kWebMFlagKeyframe,
+ kAudioBlockDuration));
+ return GenerateCluster(block_queue, unknown_size);
}
int audio_timecode = first_audio_timecode;
@@ -699,33 +823,34 @@ class ChunkDemuxerTest : public ::testing::Test {
uint8 video_flag = kWebMFlagKeyframe;
for (int i = 0; i < block_count - 2; i++) {
if (audio_timecode <= video_timecode) {
- cb.AddSimpleBlock(kAudioTrackNum, audio_timecode, kWebMFlagKeyframe,
- data.get(), size);
+ block_queue.push(BlockInfo(kAudioTrackNum,
+ audio_timecode,
+ kWebMFlagKeyframe,
+ 0));
audio_timecode += kAudioBlockDuration;
continue;
}
- cb.AddSimpleBlock(kVideoTrackNum, video_timecode, video_flag, data.get(),
- size);
+ block_queue.push(BlockInfo(kVideoTrackNum,
+ video_timecode,
+ video_flag,
+ 0));
video_timecode += kVideoBlockDuration;
video_flag = 0;
}
// Make the last 2 blocks BlockGroups so that they don't get delayed by the
// block duration calculation logic.
- if (audio_timecode <= video_timecode) {
- cb.AddBlockGroup(kAudioTrackNum, audio_timecode, kAudioBlockDuration,
- kWebMFlagKeyframe, data.get(), size);
- AddVideoBlockGroup(&cb, kVideoTrackNum, video_timecode,
- kVideoBlockDuration, video_flag);
- } else {
- AddVideoBlockGroup(&cb, kVideoTrackNum, video_timecode,
- kVideoBlockDuration, video_flag);
- cb.AddBlockGroup(kAudioTrackNum, audio_timecode, kAudioBlockDuration,
- kWebMFlagKeyframe, data.get(), size);
- }
+ block_queue.push(BlockInfo(kAudioTrackNum,
+ audio_timecode,
+ kWebMFlagKeyframe,
+ kAudioBlockDuration));
+ block_queue.push(BlockInfo(kVideoTrackNum,
+ video_timecode,
+ video_flag,
+ kVideoBlockDuration));
- return unknown_size ? cb.FinishWithUnknownSize() : cb.Finish();
+ return GenerateCluster(block_queue, unknown_size);
}
scoped_ptr<Cluster> GenerateSingleStreamCluster(int timecode,
@@ -1198,9 +1323,10 @@ TEST_F(ChunkDemuxerTest, SingleTextTrackIdChange) {
ASSERT_TRUE(video_stream);
ASSERT_TRUE(text_stream);
- AppendSingleStreamCluster(kSourceId, kAudioTrackNum, "0K 23K");
- AppendSingleStreamCluster(kSourceId, kVideoTrackNum, "0K 30");
- AppendSingleStreamCluster(kSourceId, kTextTrackNum, "10K");
+ AppendMuxedCluster(
+ MuxedStreamInfo(kAudioTrackNum, "0K 23K"),
+ MuxedStreamInfo(kVideoTrackNum, "0K 30"),
+ MuxedStreamInfo(kTextTrackNum, "10K"));
CheckExpectedRanges(kSourceId, "{ [0,46) }");
scoped_ptr<uint8[]> info_tracks;
@@ -1213,9 +1339,10 @@ TEST_F(ChunkDemuxerTest, SingleTextTrackIdChange) {
append_window_end_for_next_append_,
&timestamp_offset_map_[kSourceId]);
- AppendSingleStreamCluster(kSourceId, kAudioTrackNum, "46K 69K");
- AppendSingleStreamCluster(kSourceId, kVideoTrackNum, "60K");
- AppendSingleStreamCluster(kSourceId, kAlternateTextTrackNum, "45K");
+ AppendMuxedCluster(
+ MuxedStreamInfo(kAudioTrackNum, "46K 69K"),
+ MuxedStreamInfo(kVideoTrackNum, "60K"),
+ MuxedStreamInfo(kAlternateTextTrackNum, "45K"));
CheckExpectedRanges(kSourceId, "{ [0,92) }");
CheckExpectedBuffers(audio_stream, "0 23 46 69");
@@ -1242,22 +1369,22 @@ TEST_F(ChunkDemuxerTest, InitSegmentSetsNeedRandomAccessPointFlag) {
DemuxerStream* video_stream = demuxer_->GetStream(DemuxerStream::VIDEO);
ASSERT_TRUE(audio_stream && video_stream && text_stream);
- AppendSingleStreamCluster(kSourceId, kAudioTrackNum, "0 23K");
- AppendSingleStreamCluster(kSourceId, kVideoTrackNum, "0 30K");
- AppendSingleStreamCluster(kSourceId, kTextTrackNum, "0 40K");
- CheckExpectedRanges(kSourceId, "{ [30,46) }");
+ AppendMuxedCluster(
+ MuxedStreamInfo(kAudioTrackNum, "0 23K"),
+ MuxedStreamInfo(kVideoTrackNum, "0 30K"),
+ MuxedStreamInfo(kTextTrackNum, "25K 40K"));
+ CheckExpectedRanges(kSourceId, "{ [23,46) }");
AppendInitSegment(HAS_TEXT | HAS_AUDIO | HAS_VIDEO);
- AppendSingleStreamCluster(kSourceId, kAudioTrackNum, "46 69K");
- AppendSingleStreamCluster(kSourceId, kVideoTrackNum, "60 90K");
- AppendSingleStreamCluster(kSourceId, kTextTrackNum, "80 90K");
- CheckExpectedRanges(kSourceId, "{ [30,92) }");
+ AppendMuxedCluster(
+ MuxedStreamInfo(kAudioTrackNum, "46 69K"),
+ MuxedStreamInfo(kVideoTrackNum, "60 90K"),
+ MuxedStreamInfo(kTextTrackNum, "80K 90K"));
+ CheckExpectedRanges(kSourceId, "{ [23,92) }");
CheckExpectedBuffers(audio_stream, "23 69");
CheckExpectedBuffers(video_stream, "30 90");
-
- // WebM parser marks all text buffers as keyframes.
- CheckExpectedBuffers(text_stream, "0 40 80 90");
+ CheckExpectedBuffers(text_stream, "25 40 80 90");
}
// Make sure that the demuxer reports an error if Shutdown()
@@ -1730,8 +1857,9 @@ TEST_F(ChunkDemuxerTest, EndOfStreamRangeChanges) {
.WillOnce(SaveArg<0>(&text_stream));
ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO | HAS_TEXT));
- AppendSingleStreamCluster(kSourceId, kVideoTrackNum, "0K 33");
- AppendSingleStreamCluster(kSourceId, kAudioTrackNum, "0K 23K");
+ AppendMuxedCluster(
+ MuxedStreamInfo(kVideoTrackNum, "0K 33"),
+ MuxedStreamInfo(kAudioTrackNum, "0K 23K"));
// Check expected ranges and verify that an empty text track does not
// affect the expected ranges.
@@ -1752,7 +1880,10 @@ TEST_F(ChunkDemuxerTest, EndOfStreamRangeChanges) {
// Add text track data and verify that the buffered ranges don't change
// since the intersection of all the tracks doesn't change.
EXPECT_CALL(host_, SetDuration(base::TimeDelta::FromMilliseconds(200)));
- AppendSingleStreamCluster(kSourceId, kTextTrackNum, "0K 100K");
+ AppendMuxedCluster(
+ MuxedStreamInfo(kVideoTrackNum, "0K 33"),
+ MuxedStreamInfo(kAudioTrackNum, "0K 23K"),
+ MuxedStreamInfo(kTextTrackNum, "0K 100K"));
CheckExpectedRanges(kSourceId, "{ [0,46) }");
// Mark end of stream and verify that text track data is reflected in
@@ -2369,22 +2500,26 @@ TEST_F(ChunkDemuxerTest, GetBufferedRanges_AudioVideoText) {
ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO | HAS_TEXT));
// Append audio & video data
- AppendSingleStreamCluster(kSourceId, kAudioTrackNum, "0K 23");
- AppendSingleStreamCluster(kSourceId, kVideoTrackNum, "0K 33");
+ AppendMuxedCluster(
+ MuxedStreamInfo(kAudioTrackNum, "0K 23"),
+ MuxedStreamInfo(kVideoTrackNum, "0K 33"));
// Verify that a text track with no cues does not result in an empty buffered
// range.
CheckExpectedRanges("{ [0,46) }");
// Add some text cues.
- AppendSingleStreamCluster(kSourceId, kTextTrackNum, "0K 100K");
+ AppendMuxedCluster(
+ MuxedStreamInfo(kAudioTrackNum, "100K 123"),
+ MuxedStreamInfo(kVideoTrackNum, "100K 133"),
+ MuxedStreamInfo(kTextTrackNum, "100K 200K"));
- // Verify that the new cues did not affect the buffered ranges.
- CheckExpectedRanges("{ [0,46) }");
+ // Verify that the text cues are not reflected in the buffered ranges.
+ CheckExpectedRanges("{ [0,46) [100,146) }");
- // Remove the buffered range.
+ // Remove the buffered ranges.
demuxer_->Remove(kSourceId, base::TimeDelta(),
- base::TimeDelta::FromMilliseconds(46));
+ base::TimeDelta::FromMilliseconds(250));
CheckExpectedRanges("{ }");
}
@@ -2394,8 +2529,9 @@ TEST_F(ChunkDemuxerTest, GetBufferedRanges_AudioVideoText) {
TEST_F(ChunkDemuxerTest, GetBufferedRanges_EndOfStream) {
ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
- AppendSingleStreamCluster(kSourceId, kAudioTrackNum, "0K 23K");
- AppendSingleStreamCluster(kSourceId, kVideoTrackNum, "0K 33");
+ AppendMuxedCluster(
+ MuxedStreamInfo(kAudioTrackNum, "0K 23K"),
+ MuxedStreamInfo(kVideoTrackNum, "0K 33"));
CheckExpectedRanges("{ [0,46) }");
@@ -2412,11 +2548,10 @@ TEST_F(ChunkDemuxerTest, GetBufferedRanges_EndOfStream) {
// Append and remove data so that the 2 streams' end ranges do not overlap.
- EXPECT_CALL(host_, SetDuration(base::TimeDelta::FromMilliseconds(246)));
EXPECT_CALL(host_, SetDuration(base::TimeDelta::FromMilliseconds(398)));
- AppendSingleStreamCluster(kSourceId, kAudioTrackNum, "200K 223K");
- AppendSingleStreamCluster(kSourceId, kVideoTrackNum,
- "200K 233 266 299 332K 365");
+ AppendMuxedCluster(
+ MuxedStreamInfo(kAudioTrackNum, "200K 223K"),
+ MuxedStreamInfo(kVideoTrackNum, "200K 233 266 299 332K 365"));
// At this point, the per-stream ranges are as follows:
// Audio: [0,46) [200,246)
@@ -2431,8 +2566,9 @@ TEST_F(ChunkDemuxerTest, GetBufferedRanges_EndOfStream) {
// Video: [0,66) [332,398)
CheckExpectedRanges("{ [0,46) }");
- AppendSingleStreamCluster(kSourceId, kAudioTrackNum, "200K 223K");
- AppendSingleStreamCluster(kSourceId, kVideoTrackNum, "200K 233");
+ AppendMuxedCluster(
+ MuxedStreamInfo(kAudioTrackNum, "200K 223K"),
+ MuxedStreamInfo(kVideoTrackNum, "200K 233"));
// At this point, the per-stream ranges are as follows:
// Audio: [0,46) [200,246)
@@ -2886,6 +3022,45 @@ TEST_F(ChunkDemuxerTest, EmitBuffersDuringAbort) {
EXPECT_EQ(range_after_abort.start(0), range_before_abort.start(0));
EXPECT_GT(range_after_abort.end(0), range_before_abort.end(0));
}
+
+TEST_F(ChunkDemuxerTest, SeekCompleteDuringAbort) {
+ EXPECT_CALL(*this, DemuxerOpened());
+ demuxer_->Initialize(
+ &host_, CreateInitDoneCB(kInfiniteDuration(), PIPELINE_OK), true);
+ EXPECT_EQ(ChunkDemuxer::kOk, AddIdForMp2tSource(kSourceId));
+
+ // For info:
+ // DTS/PTS derived using dvbsnoop -s ts -if bear-1280x720.ts -tssubdecode
+ // Video: first PES:
+ // PTS: 126912 (0x0001efc0) [= 90 kHz-Timestamp: 0:00:01.4101]
+ // DTS: 123909 (0x0001e405) [= 90 kHz-Timestamp: 0:00:01.3767]
+ // Audio: first PES:
+ // PTS: 126000 (0x0001ec30) [= 90 kHz-Timestamp: 0:00:01.4000]
+ // DTS: 123910 (0x0001e406) [= 90 kHz-Timestamp: 0:00:01.3767]
+ // Video: last PES:
+ // PTS: 370155 (0x0005a5eb) [= 90 kHz-Timestamp: 0:00:04.1128]
+ // DTS: 367152 (0x00059a30) [= 90 kHz-Timestamp: 0:00:04.0794]
+ // Audio: last PES:
+ // PTS: 353788 (0x000565fc) [= 90 kHz-Timestamp: 0:00:03.9309]
+
+ scoped_refptr<DecoderBuffer> buffer = ReadTestDataFile("bear-1280x720.ts");
+ AppendData(kSourceId, buffer->data(), buffer->data_size());
+
+ // Confirm we're in the middle of parsing a media segment.
+ ASSERT_TRUE(demuxer_->IsParsingMediaSegment(kSourceId));
+
+ // Seek to a time corresponding to buffers that will be emitted during the
+ // abort.
+ Seek(base::TimeDelta::FromMilliseconds(4110));
+
+ // Abort on the Mpeg2 TS parser triggers the emission of the last video
+ // buffer which is pending in the stream parser.
+ demuxer_->Abort(kSourceId,
+ append_window_start_for_next_append_,
+ append_window_end_for_next_append_,
+ &timestamp_offset_map_[kSourceId]);
+}
+
#endif
#endif
@@ -3288,14 +3463,15 @@ TEST_F(ChunkDemuxerTest, AppendWindow_Text) {
// Append a cluster that starts before and ends after the append
// window.
- AppendSingleStreamCluster(kSourceId, kVideoTrackNum,
- "0K 30 60 90 120K 150 180 210 240K 270 300 330K");
- AppendSingleStreamCluster(kSourceId, kTextTrackNum, "0K 100K 200K 300K");
+ AppendMuxedCluster(
+ MuxedStreamInfo(kVideoTrackNum,
+ "0K 30 60 90 120K 150 180 210 240K 270 300 330K"),
+ MuxedStreamInfo(kTextTrackNum, "0K 100K 200K 300K" ));
// Verify that text cues that start outside the window are not included
// in the buffer. Also verify that cues that extend beyond the
// window are not included.
- CheckExpectedRanges(kSourceId, "{ [120,270) }");
+ CheckExpectedRanges(kSourceId, "{ [100,270) }");
CheckExpectedBuffers(video_stream, "120 150 180 210 240");
CheckExpectedBuffers(text_stream, "100");
@@ -3303,10 +3479,11 @@ TEST_F(ChunkDemuxerTest, AppendWindow_Text) {
append_window_end_for_next_append_ = base::TimeDelta::FromMilliseconds(650);
// Append more data and verify that a new range is created.
- AppendSingleStreamCluster(kSourceId, kVideoTrackNum,
- "360 390 420K 450 480 510 540K 570 600 630K");
- AppendSingleStreamCluster(kSourceId, kTextTrackNum, "400K 500K 600K 700K");
- CheckExpectedRanges(kSourceId, "{ [120,270) [420,630) }");
+ AppendMuxedCluster(
+ MuxedStreamInfo(kVideoTrackNum,
+ "360 390 420K 450 480 510 540K 570 600 630K"),
+ MuxedStreamInfo(kTextTrackNum, "400K 500K 600K 700K" ));
+ CheckExpectedRanges(kSourceId, "{ [100,270) [400,630) }");
// Seek to the new range and verify that the expected buffers are returned.
Seek(base::TimeDelta::FromMilliseconds(420));
@@ -3331,11 +3508,10 @@ TEST_F(ChunkDemuxerTest, Remove_AudioVideoText) {
DemuxerStream* audio_stream = demuxer_->GetStream(DemuxerStream::AUDIO);
DemuxerStream* video_stream = demuxer_->GetStream(DemuxerStream::VIDEO);
- AppendSingleStreamCluster(kSourceId, kAudioTrackNum,
- "0K 20K 40K 60K 80K 100K 120K 140K");
- AppendSingleStreamCluster(kSourceId, kVideoTrackNum,
- "0K 30 60 90 120K 150 180");
- AppendSingleStreamCluster(kSourceId, kTextTrackNum, "0K 100K 200K");
+ AppendMuxedCluster(
+ MuxedStreamInfo(kAudioTrackNum, "0K 20K 40K 60K 80K 100K 120K 140K"),
+ MuxedStreamInfo(kVideoTrackNum, "0K 30 60 90 120K 150 180"),
+ MuxedStreamInfo(kTextTrackNum, "0K 100K 200K"));
CheckExpectedBuffers(audio_stream, "0 20 40 60 80 100 120 140");
CheckExpectedBuffers(video_stream, "0 30 60 90 120 150 180");
@@ -3350,11 +3526,10 @@ TEST_F(ChunkDemuxerTest, Remove_AudioVideoText) {
// Append new buffers that are clearly different than the original
// ones and verify that only the new buffers are returned.
- AppendSingleStreamCluster(kSourceId, kAudioTrackNum,
- "1K 21K 41K 61K 81K 101K 121K 141K");
- AppendSingleStreamCluster(kSourceId, kVideoTrackNum,
- "1K 31 61 91 121K 151 181");
- AppendSingleStreamCluster(kSourceId, kTextTrackNum, "1K 101K 201K");
+ AppendMuxedCluster(
+ MuxedStreamInfo(kAudioTrackNum, "1K 21K 41K 61K 81K 101K 121K 141K"),
+ MuxedStreamInfo(kVideoTrackNum, "1K 31 61 91 121K 151 181"),
+ MuxedStreamInfo(kTextTrackNum, "1K 101K 201K"));
Seek(base::TimeDelta());
CheckExpectedBuffers(audio_stream, "1 21 41 61 81 101 121 141");
@@ -3412,14 +3587,14 @@ TEST_F(ChunkDemuxerTest, SeekCompletesWithoutTextCues) {
bool text_read_done = false;
text_stream->Read(base::Bind(&OnReadDone,
- base::TimeDelta::FromMilliseconds(125),
+ base::TimeDelta::FromMilliseconds(225),
&text_read_done));
// Append audio & video data so the seek completes.
- AppendSingleStreamCluster(kSourceId, kAudioTrackNum,
- "0K 20K 40K 60K 80K 100K 120K 140K 160K 180K");
- AppendSingleStreamCluster(kSourceId, kVideoTrackNum,
- "0K 30 60 90 120K 150 180 210");
+ AppendMuxedCluster(
+ MuxedStreamInfo(kAudioTrackNum,
+ "0K 20K 40K 60K 80K 100K 120K 140K 160K 180K 200K"),
+ MuxedStreamInfo(kVideoTrackNum, "0K 30 60 90 120K 150 180 210"));
message_loop_.RunUntilIdle();
EXPECT_TRUE(seek_cb_was_called);
@@ -3433,14 +3608,17 @@ TEST_F(ChunkDemuxerTest, SeekCompletesWithoutTextCues) {
// Append text cues that start after the seek point and verify that
// they are returned by Read() calls.
- AppendSingleStreamCluster(kSourceId, kTextTrackNum, "125K 175K 225K");
+ AppendMuxedCluster(
+ MuxedStreamInfo(kAudioTrackNum, "220K 240K 260K 280K"),
+ MuxedStreamInfo(kVideoTrackNum, "240K 270 300 330"),
+ MuxedStreamInfo(kTextTrackNum, "225K 275K 325K"));
message_loop_.RunUntilIdle();
EXPECT_TRUE(text_read_done);
- // NOTE: we start at 175 here because the buffer at 125 was returned
+ // NOTE: we start at 275 here because the buffer at 225 was returned
// to the pending read initiated above.
- CheckExpectedBuffers(text_stream, "175 225");
+ CheckExpectedBuffers(text_stream, "275 325");
// Verify that audio & video streams continue to return expected values.
CheckExpectedBuffers(audio_stream, "160 180");
diff --git a/media/filters/decoder_selector.cc b/media/filters/decoder_selector.cc
index 9020597798..7214b338be 100644
--- a/media/filters/decoder_selector.cc
+++ b/media/filters/decoder_selector.cc
@@ -62,7 +62,13 @@ DecoderSelector<StreamType>::DecoderSelector(
template <DemuxerStream::Type StreamType>
DecoderSelector<StreamType>::~DecoderSelector() {
DVLOG(2) << __FUNCTION__;
- DCHECK(select_decoder_cb_.is_null());
+ DCHECK(task_runner_->BelongsToCurrentThread());
+
+ if (!select_decoder_cb_.is_null())
+ ReturnNullDecoder();
+
+ decoder_.reset();
+ decrypted_stream_.reset();
}
template <DemuxerStream::Type StreamType>
@@ -112,40 +118,6 @@ void DecoderSelector<StreamType>::SelectDecoder(
}
template <DemuxerStream::Type StreamType>
-void DecoderSelector<StreamType>::Abort() {
- DVLOG(2) << __FUNCTION__;
- DCHECK(task_runner_->BelongsToCurrentThread());
-
- // This could happen when SelectDecoder() was not called or when
- // |select_decoder_cb_| was already posted but not fired (e.g. in the
- // message loop queue).
- if (select_decoder_cb_.is_null())
- return;
-
- // We must be trying to initialize the |decoder_| or the
- // |decrypted_stream_|. Invalid all weak pointers so that all initialization
- // callbacks won't fire.
- weak_ptr_factory_.InvalidateWeakPtrs();
-
- if (decoder_) {
- // |decrypted_stream_| is either NULL or already initialized. We don't
- // need to Stop() |decrypted_stream_| in either case.
- decoder_->Stop();
- ReturnNullDecoder();
- return;
- }
-
- if (decrypted_stream_) {
- decrypted_stream_->Stop(
- base::Bind(&DecoderSelector<StreamType>::ReturnNullDecoder,
- weak_ptr_factory_.GetWeakPtr()));
- return;
- }
-
- NOTREACHED();
-}
-
-template <DemuxerStream::Type StreamType>
void DecoderSelector<StreamType>::DecryptingDecoderInitDone(
PipelineStatus status) {
DVLOG(2) << __FUNCTION__;
diff --git a/media/filters/decoder_selector.h b/media/filters/decoder_selector.h
index 662e8082ea..c50f9fba71 100644
--- a/media/filters/decoder_selector.h
+++ b/media/filters/decoder_selector.h
@@ -57,6 +57,9 @@ class MEDIA_EXPORT DecoderSelector {
const scoped_refptr<base::SingleThreadTaskRunner>& message_loop,
ScopedVector<Decoder> decoders,
const SetDecryptorReadyCB& set_decryptor_ready_cb);
+
+ // Aborts pending Decoder selection and fires |select_decoder_cb| with
+ // NULL and NULL immediately if it's pending.
~DecoderSelector();
// Initializes and selects a Decoder that can decode the |stream|.
@@ -67,10 +70,6 @@ class MEDIA_EXPORT DecoderSelector {
const SelectDecoderCB& select_decoder_cb,
const typename Decoder::OutputCB& output_cb);
- // Aborts pending Decoder selection and fires |select_decoder_cb| with
- // NULL and NULL immediately if it's pending.
- void Abort();
-
private:
void DecryptingDecoderInitDone(PipelineStatus status);
void DecryptingDemuxerStreamInitDone(PipelineStatus status);
diff --git a/media/filters/decoder_stream.cc b/media/filters/decoder_stream.cc
index a912398f53..10916ea980 100644
--- a/media/filters/decoder_stream.cc
+++ b/media/filters/decoder_stream.cc
@@ -56,7 +56,25 @@ DecoderStream<StreamType>::DecoderStream(
template <DemuxerStream::Type StreamType>
DecoderStream<StreamType>::~DecoderStream() {
- DCHECK(state_ == STATE_UNINITIALIZED || state_ == STATE_STOPPED) << state_;
+ FUNCTION_DVLOG(2);
+ DCHECK(task_runner_->BelongsToCurrentThread());
+
+ decoder_selector_.reset();
+
+ if (!init_cb_.is_null()) {
+ task_runner_->PostTask(FROM_HERE,
+ base::Bind(base::ResetAndReturn(&init_cb_), false));
+ }
+ if (!read_cb_.is_null()) {
+ task_runner_->PostTask(FROM_HERE, base::Bind(
+ base::ResetAndReturn(&read_cb_), ABORTED, scoped_refptr<Output>()));
+ }
+ if (!reset_cb_.is_null())
+ task_runner_->PostTask(FROM_HERE, base::ResetAndReturn(&reset_cb_));
+
+ stream_ = NULL;
+ decoder_.reset();
+ decrypting_demuxer_stream_.reset();
}
template <DemuxerStream::Type StreamType>
@@ -89,13 +107,12 @@ template <DemuxerStream::Type StreamType>
void DecoderStream<StreamType>::Read(const ReadCB& read_cb) {
FUNCTION_DVLOG(2);
DCHECK(task_runner_->BelongsToCurrentThread());
- DCHECK(state_ != STATE_UNINITIALIZED && state_ != STATE_INITIALIZING &&
- state_ != STATE_STOPPED) << state_;
+ DCHECK(state_ != STATE_UNINITIALIZED && state_ != STATE_INITIALIZING)
+ << state_;
// No two reads in the flight at any time.
DCHECK(read_cb_.is_null());
// No read during resetting or stopping process.
DCHECK(reset_cb_.is_null());
- DCHECK(stop_cb_.is_null());
if (state_ == STATE_ERROR) {
task_runner_->PostTask(
@@ -125,9 +142,8 @@ template <DemuxerStream::Type StreamType>
void DecoderStream<StreamType>::Reset(const base::Closure& closure) {
FUNCTION_DVLOG(2);
DCHECK(task_runner_->BelongsToCurrentThread());
- DCHECK(state_ != STATE_UNINITIALIZED && state_ != STATE_STOPPED) << state_;
+ DCHECK(state_ != STATE_UNINITIALIZED)<< state_;
DCHECK(reset_cb_.is_null());
- DCHECK(stop_cb_.is_null());
reset_cb_ = closure;
@@ -160,52 +176,6 @@ void DecoderStream<StreamType>::Reset(const base::Closure& closure) {
}
template <DemuxerStream::Type StreamType>
-void DecoderStream<StreamType>::Stop(const base::Closure& closure) {
- FUNCTION_DVLOG(2);
- DCHECK(task_runner_->BelongsToCurrentThread());
- DCHECK_NE(state_, STATE_STOPPED) << state_;
- DCHECK(stop_cb_.is_null());
-
- stop_cb_ = closure;
-
- if (state_ == STATE_INITIALIZING) {
- decoder_selector_->Abort();
- return;
- }
-
- DCHECK(init_cb_.is_null());
-
- // All pending callbacks will be dropped.
- weak_factory_.InvalidateWeakPtrs();
-
- // Post callbacks to prevent reentrance into this object.
- if (!read_cb_.is_null()) {
- task_runner_->PostTask(FROM_HERE, base::Bind(
- base::ResetAndReturn(&read_cb_), ABORTED, scoped_refptr<Output>()));
- }
- if (!reset_cb_.is_null())
- task_runner_->PostTask(FROM_HERE, base::ResetAndReturn(&reset_cb_));
-
- if (decrypting_demuxer_stream_) {
- decrypting_demuxer_stream_->Stop(base::Bind(
- &DecoderStream<StreamType>::StopDecoder, weak_factory_.GetWeakPtr()));
- return;
- }
-
- // We may not have a |decoder_| if Stop() was called during initialization.
- if (decoder_) {
- StopDecoder();
- return;
- }
-
- state_ = STATE_STOPPED;
- stream_ = NULL;
- decoder_.reset();
- decrypting_demuxer_stream_.reset();
- task_runner_->PostTask(FROM_HERE, base::ResetAndReturn(&stop_cb_));
-}
-
-template <DemuxerStream::Type StreamType>
bool DecoderStream<StreamType>::CanReadWithoutStalling() const {
DCHECK(task_runner_->BelongsToCurrentThread());
return !ready_outputs_.empty() || decoder_->CanReadWithoutStalling();
@@ -258,19 +228,14 @@ void DecoderStream<StreamType>::OnDecoderSelected(
state_ = STATE_UNINITIALIZED;
StreamTraits::FinishInitialization(
base::ResetAndReturn(&init_cb_), selected_decoder.get(), stream_);
- } else {
- state_ = STATE_NORMAL;
- decoder_ = selected_decoder.Pass();
- decrypting_demuxer_stream_ = decrypting_demuxer_stream.Pass();
- StreamTraits::FinishInitialization(
- base::ResetAndReturn(&init_cb_), decoder_.get(), stream_);
- }
-
- // Stop() called during initialization.
- if (!stop_cb_.is_null()) {
- Stop(base::ResetAndReturn(&stop_cb_));
return;
}
+
+ state_ = STATE_NORMAL;
+ decoder_ = selected_decoder.Pass();
+ decrypting_demuxer_stream_ = decrypting_demuxer_stream.Pass();
+ StreamTraits::FinishInitialization(
+ base::ResetAndReturn(&init_cb_), decoder_.get(), stream_);
}
template <DemuxerStream::Type StreamType>
@@ -288,7 +253,6 @@ void DecoderStream<StreamType>::Decode(
DCHECK(state_ == STATE_NORMAL || state_ == STATE_FLUSHING_DECODER) << state_;
DCHECK_LT(pending_decode_requests_, GetMaxDecodeRequests());
DCHECK(reset_cb_.is_null());
- DCHECK(stop_cb_.is_null());
DCHECK(buffer);
int buffer_size = buffer->end_of_stream() ? 0 : buffer->data_size();
@@ -315,7 +279,6 @@ void DecoderStream<StreamType>::OnDecodeDone(int buffer_size,
DCHECK(state_ == STATE_NORMAL || state_ == STATE_FLUSHING_DECODER ||
state_ == STATE_PENDING_DEMUXER_READ || state_ == STATE_ERROR)
<< state_;
- DCHECK(stop_cb_.is_null());
DCHECK_GT(pending_decode_requests_, 0);
--pending_decode_requests_;
@@ -410,7 +373,6 @@ void DecoderStream<StreamType>::ReadFromDemuxerStream() {
DCHECK_EQ(state_, STATE_NORMAL) << state_;
DCHECK(CanDecodeMore());
DCHECK(reset_cb_.is_null());
- DCHECK(stop_cb_.is_null());
state_ = STATE_PENDING_DEMUXER_READ;
stream_->Read(base::Bind(&DecoderStream<StreamType>::OnBufferReady,
@@ -422,18 +384,16 @@ void DecoderStream<StreamType>::OnBufferReady(
DemuxerStream::Status status,
const scoped_refptr<DecoderBuffer>& buffer) {
FUNCTION_DVLOG(2) << ": " << status << ", "
- << buffer->AsHumanReadableString();
+ << (buffer ? buffer->AsHumanReadableString() : "NULL");
DCHECK(task_runner_->BelongsToCurrentThread());
- DCHECK(state_ == STATE_PENDING_DEMUXER_READ || state_ == STATE_ERROR ||
- state_ == STATE_STOPPED)
+ DCHECK(state_ == STATE_PENDING_DEMUXER_READ || state_ == STATE_ERROR)
<< state_;
DCHECK_EQ(buffer.get() != NULL, status == DemuxerStream::kOk) << status;
- DCHECK(stop_cb_.is_null());
// Decoding has been stopped (e.g due to an error).
if (state_ != STATE_PENDING_DEMUXER_READ) {
- DCHECK(state_ == STATE_ERROR || state_ == STATE_STOPPED);
+ DCHECK(state_ == STATE_ERROR);
DCHECK(read_cb_.is_null());
return;
}
@@ -514,7 +474,6 @@ void DecoderStream<StreamType>::OnDecoderReinitialized(PipelineStatus status) {
FUNCTION_DVLOG(2);
DCHECK(task_runner_->BelongsToCurrentThread());
DCHECK_EQ(state_, STATE_REINITIALIZING_DECODER) << state_;
- DCHECK(stop_cb_.is_null());
// ReinitializeDecoder() can be called in two cases:
// 1, Flushing decoder finished (see OnDecodeOutputReady()).
@@ -562,7 +521,6 @@ void DecoderStream<StreamType>::OnDecoderReset() {
// before the reset callback is fired.
DCHECK(read_cb_.is_null());
DCHECK(!reset_cb_.is_null());
- DCHECK(stop_cb_.is_null());
if (state_ != STATE_FLUSHING_DECODER) {
state_ = STATE_NORMAL;
@@ -575,23 +533,6 @@ void DecoderStream<StreamType>::OnDecoderReset() {
ReinitializeDecoder();
}
-template <DemuxerStream::Type StreamType>
-void DecoderStream<StreamType>::StopDecoder() {
- FUNCTION_DVLOG(2);
- DCHECK(task_runner_->BelongsToCurrentThread());
- DCHECK(state_ != STATE_UNINITIALIZED && state_ != STATE_STOPPED) << state_;
- DCHECK(!stop_cb_.is_null());
-
- state_ = STATE_STOPPED;
- decoder_->Stop();
- stream_ = NULL;
- decoder_.reset();
- decrypting_demuxer_stream_.reset();
- // Post |stop_cb_| because pending |read_cb_| and/or |reset_cb_| are also
- // posted in Stop().
- task_runner_->PostTask(FROM_HERE, base::ResetAndReturn(&stop_cb_));
-}
-
template class DecoderStream<DemuxerStream::VIDEO>;
template class DecoderStream<DemuxerStream::AUDIO>;
diff --git a/media/filters/decoder_stream.h b/media/filters/decoder_stream.h
index 7cb78738da..c077a1830e 100644
--- a/media/filters/decoder_stream.h
+++ b/media/filters/decoder_stream.h
@@ -65,23 +65,16 @@ class MEDIA_EXPORT DecoderStream {
// Reads a decoded Output and returns it via the |read_cb|. Note that
// |read_cb| is always called asynchronously. This method should only be
// called after initialization has succeeded and must not be called during
- // any pending Reset() and/or Stop().
+ // pending Reset().
void Read(const ReadCB& read_cb);
// Resets the decoder, flushes all decoded outputs and/or internal buffers,
// fires any existing pending read callback and calls |closure| on completion.
// Note that |closure| is always called asynchronously. This method should
// only be called after initialization has succeeded and must not be called
- // during any pending Reset() and/or Stop().
+ // during pending Reset().
void Reset(const base::Closure& closure);
- // Stops the decoder, fires any existing pending read callback or reset
- // callback and calls |closure| on completion. Note that |closure| is always
- // called asynchronously. The DecoderStream cannot be used anymore after
- // it is stopped. This method can be called at any time but not during another
- // pending Stop().
- void Stop(const base::Closure& closure);
-
// Returns true if the decoder currently has the ability to decode and return
// an Output.
// TODO(rileya): Remove the need for this by refactoring Decoder queueing
@@ -117,12 +110,11 @@ class MEDIA_EXPORT DecoderStream {
enum State {
STATE_UNINITIALIZED,
STATE_INITIALIZING,
- STATE_NORMAL, // Includes idle, pending decoder decode/reset/stop.
+ STATE_NORMAL, // Includes idle, pending decoder decode/reset.
STATE_FLUSHING_DECODER,
STATE_PENDING_DEMUXER_READ,
STATE_REINITIALIZING_DECODER,
STATE_END_OF_STREAM, // End of stream reached; returns EOS on all reads.
- STATE_STOPPED,
STATE_ERROR
};
@@ -165,8 +157,6 @@ class MEDIA_EXPORT DecoderStream {
void ResetDecoder();
void OnDecoderReset();
- void StopDecoder();
-
scoped_refptr<base::SingleThreadTaskRunner> task_runner_;
State state_;
@@ -176,7 +166,6 @@ class MEDIA_EXPORT DecoderStream {
ReadCB read_cb_;
base::Closure reset_cb_;
- base::Closure stop_cb_;
DemuxerStream* stream_;
bool low_delay_;
diff --git a/media/filters/decrypting_audio_decoder.cc b/media/filters/decrypting_audio_decoder.cc
index 136e171d73..79d8209330 100644
--- a/media/filters/decrypting_audio_decoder.cc
+++ b/media/filters/decrypting_audio_decoder.cc
@@ -144,13 +144,12 @@ void DecryptingAudioDecoder::Reset(const base::Closure& closure) {
DoReset();
}
-void DecryptingAudioDecoder::Stop() {
- DVLOG(2) << "Stop() - state: " << state_;
+DecryptingAudioDecoder::~DecryptingAudioDecoder() {
+ DVLOG(2) << __FUNCTION__;
DCHECK(task_runner_->BelongsToCurrentThread());
- // Invalidate all weak pointers so that pending callbacks won't be fired into
- // this object.
- weak_factory_.InvalidateWeakPtrs();
+ if (state_ == kUninitialized)
+ return;
if (decryptor_) {
decryptor_->DeinitializeDecoder(Decryptor::kAudio);
@@ -165,12 +164,6 @@ void DecryptingAudioDecoder::Stop() {
base::ResetAndReturn(&decode_cb_).Run(kAborted);
if (!reset_cb_.is_null())
base::ResetAndReturn(&reset_cb_).Run();
-
- state_ = kStopped;
-}
-
-DecryptingAudioDecoder::~DecryptingAudioDecoder() {
- DCHECK(state_ == kUninitialized || state_ == kStopped) << state_;
}
void DecryptingAudioDecoder::SetDecryptor(Decryptor* decryptor) {
@@ -184,8 +177,7 @@ void DecryptingAudioDecoder::SetDecryptor(Decryptor* decryptor) {
if (!decryptor) {
base::ResetAndReturn(&init_cb_).Run(DECODER_ERROR_NOT_SUPPORTED);
- // TODO(xhwang): Add kError state. See http://crbug.com/251503
- state_ = kStopped;
+ state_ = kError;
return;
}
@@ -212,7 +204,8 @@ void DecryptingAudioDecoder::FinishInitialization(bool success) {
if (!success) {
base::ResetAndReturn(&init_cb_).Run(DECODER_ERROR_NOT_SUPPORTED);
- state_ = kStopped;
+ decryptor_ = NULL;
+ state_ = kError;
return;
}
diff --git a/media/filters/decrypting_audio_decoder.h b/media/filters/decrypting_audio_decoder.h
index 6d1df7c3c9..70cbc56322 100644
--- a/media/filters/decrypting_audio_decoder.h
+++ b/media/filters/decrypting_audio_decoder.h
@@ -49,7 +49,6 @@ class MEDIA_EXPORT DecryptingAudioDecoder : public AudioDecoder {
virtual void Decode(const scoped_refptr<DecoderBuffer>& buffer,
const DecodeCB& decode_cb) OVERRIDE;
virtual void Reset(const base::Closure& closure) OVERRIDE;
- virtual void Stop() OVERRIDE;
private:
// For a detailed state diagram please see this link: http://goo.gl/8jAok
@@ -64,7 +63,7 @@ class MEDIA_EXPORT DecryptingAudioDecoder : public AudioDecoder {
kPendingDecode,
kWaitingForKey,
kDecodeFinished,
- kStopped,
+ kError
};
// Callback for DecryptorHost::RequestDecryptor().
@@ -101,7 +100,6 @@ class MEDIA_EXPORT DecryptingAudioDecoder : public AudioDecoder {
OutputCB output_cb_;
DecodeCB decode_cb_;
base::Closure reset_cb_;
- base::Closure stop_cb_;
// The current decoder configuration.
AudioDecoderConfig config_;
diff --git a/media/filters/decrypting_audio_decoder_unittest.cc b/media/filters/decrypting_audio_decoder_unittest.cc
index 8f187e1ae7..83d7f5f36b 100644
--- a/media/filters/decrypting_audio_decoder_unittest.cc
+++ b/media/filters/decrypting_audio_decoder_unittest.cc
@@ -79,7 +79,7 @@ class DecryptingAudioDecoderTest : public testing::Test {
virtual ~DecryptingAudioDecoderTest() {
EXPECT_CALL(*this, RequestDecryptorNotification(_))
.Times(testing::AnyNumber());
- Stop();
+ Destroy();
}
void InitializeAndExpectStatus(const AudioDecoderConfig& config,
@@ -234,12 +234,12 @@ class DecryptingAudioDecoderTest : public testing::Test {
message_loop_.RunUntilIdle();
}
- void Stop() {
+ void Destroy() {
EXPECT_CALL(*decryptor_, DeinitializeDecoder(Decryptor::kAudio))
.WillRepeatedly(InvokeWithoutArgs(
this, &DecryptingAudioDecoderTest::AbortAllPendingCBs));
- decoder_->Stop();
+ decoder_.reset();
message_loop_.RunUntilIdle();
}
diff --git a/media/filters/decrypting_demuxer_stream.cc b/media/filters/decrypting_demuxer_stream.cc
index 6a1de5f38c..4ec6c530b2 100644
--- a/media/filters/decrypting_demuxer_stream.cc
+++ b/media/filters/decrypting_demuxer_stream.cc
@@ -74,7 +74,6 @@ void DecryptingDemuxerStream::Reset(const base::Closure& closure) {
DVLOG(2) << __FUNCTION__ << " - state: " << state_;
DCHECK(task_runner_->BelongsToCurrentThread());
DCHECK(state_ != kUninitialized) << state_;
- DCHECK(state_ != kStopped) << state_;
DCHECK(reset_cb_.is_null());
reset_cb_ = BindToCurrentLoop(closure);
@@ -110,36 +109,6 @@ void DecryptingDemuxerStream::Reset(const base::Closure& closure) {
DoReset();
}
-void DecryptingDemuxerStream::Stop(const base::Closure& closure) {
- DVLOG(2) << __FUNCTION__ << " - state: " << state_;
- DCHECK(task_runner_->BelongsToCurrentThread());
- DCHECK(state_ != kUninitialized) << state_;
-
- // Invalidate all weak pointers so that pending callbacks won't be fired into
- // this object.
- weak_factory_.InvalidateWeakPtrs();
-
- // At this point the render thread is likely paused (in WebMediaPlayerImpl's
- // Destroy()), so running |closure| can't wait for anything that requires the
- // render thread to process messages to complete (such as PPAPI methods).
- if (decryptor_) {
- decryptor_->CancelDecrypt(GetDecryptorStreamType());
- decryptor_ = NULL;
- }
- if (!set_decryptor_ready_cb_.is_null())
- base::ResetAndReturn(&set_decryptor_ready_cb_).Run(DecryptorReadyCB());
- if (!init_cb_.is_null())
- base::ResetAndReturn(&init_cb_).Run(PIPELINE_ERROR_ABORT);
- if (!read_cb_.is_null())
- base::ResetAndReturn(&read_cb_).Run(kAborted, NULL);
- if (!reset_cb_.is_null())
- base::ResetAndReturn(&reset_cb_).Run();
- pending_buffer_to_decrypt_ = NULL;
-
- state_ = kStopped;
- BindToCurrentLoop(closure).Run();
-}
-
AudioDecoderConfig DecryptingDemuxerStream::audio_decoder_config() {
DCHECK(state_ != kUninitialized && state_ != kDecryptorRequested) << state_;
CHECK_EQ(demuxer_stream_->type(), AUDIO);
@@ -165,8 +134,30 @@ bool DecryptingDemuxerStream::SupportsConfigChanges() {
return demuxer_stream_->SupportsConfigChanges();
}
+VideoRotation DecryptingDemuxerStream::video_rotation() {
+ return VIDEO_ROTATION_0;
+}
+
DecryptingDemuxerStream::~DecryptingDemuxerStream() {
DVLOG(2) << __FUNCTION__ << " : state_ = " << state_;
+ DCHECK(task_runner_->BelongsToCurrentThread());
+
+ if (state_ == kUninitialized)
+ return;
+
+ if (decryptor_) {
+ decryptor_->CancelDecrypt(GetDecryptorStreamType());
+ decryptor_ = NULL;
+ }
+ if (!set_decryptor_ready_cb_.is_null())
+ base::ResetAndReturn(&set_decryptor_ready_cb_).Run(DecryptorReadyCB());
+ if (!init_cb_.is_null())
+ base::ResetAndReturn(&init_cb_).Run(PIPELINE_ERROR_ABORT);
+ if (!read_cb_.is_null())
+ base::ResetAndReturn(&read_cb_).Run(kAborted, NULL);
+ if (!reset_cb_.is_null())
+ base::ResetAndReturn(&reset_cb_).Run();
+ pending_buffer_to_decrypt_ = NULL;
}
void DecryptingDemuxerStream::SetDecryptor(Decryptor* decryptor) {
diff --git a/media/filters/decrypting_demuxer_stream.h b/media/filters/decrypting_demuxer_stream.h
index ec9f4b4663..c65df176fa 100644
--- a/media/filters/decrypting_demuxer_stream.h
+++ b/media/filters/decrypting_demuxer_stream.h
@@ -31,6 +31,8 @@ class MEDIA_EXPORT DecryptingDemuxerStream : public DemuxerStream {
DecryptingDemuxerStream(
const scoped_refptr<base::SingleThreadTaskRunner>& task_runner,
const SetDecryptorReadyCB& set_decryptor_ready_cb);
+
+ // Cancels all pending operations immediately and fires all pending callbacks.
virtual ~DecryptingDemuxerStream();
void Initialize(DemuxerStream* stream,
@@ -42,13 +44,6 @@ class MEDIA_EXPORT DecryptingDemuxerStream : public DemuxerStream {
// kUninitialized if |this| hasn't been initialized, or to kIdle otherwise.
void Reset(const base::Closure& closure);
- // Cancels all pending operations immediately and fires all pending callbacks
- // and sets the state to kStopped. Does NOT wait for any pending operations.
- // Note: During the teardown process, media pipeline will be waiting on the
- // render main thread. If a Decryptor depends on the render main thread
- // (e.g. PpapiDecryptor), the pending DecryptCB would not be satisfied.
- void Stop(const base::Closure& closure);
-
// DemuxerStream implementation.
virtual void Read(const ReadCB& read_cb) OVERRIDE;
virtual AudioDecoderConfig audio_decoder_config() OVERRIDE;
@@ -56,6 +51,7 @@ class MEDIA_EXPORT DecryptingDemuxerStream : public DemuxerStream {
virtual Type type() OVERRIDE;
virtual void EnableBitstreamConverter() OVERRIDE;
virtual bool SupportsConfigChanges() OVERRIDE;
+ virtual VideoRotation video_rotation() OVERRIDE;
private:
// For a detailed state diagram please see this link: http://goo.gl/8jAok
@@ -68,8 +64,7 @@ class MEDIA_EXPORT DecryptingDemuxerStream : public DemuxerStream {
kIdle,
kPendingDemuxerRead,
kPendingDecrypt,
- kWaitingForKey,
- kStopped
+ kWaitingForKey
};
// Callback for DecryptorHost::RequestDecryptor().
diff --git a/media/filters/decrypting_demuxer_stream_unittest.cc b/media/filters/decrypting_demuxer_stream_unittest.cc
index 14485c0e4e..fec56248e0 100644
--- a/media/filters/decrypting_demuxer_stream_unittest.cc
+++ b/media/filters/decrypting_demuxer_stream_unittest.cc
@@ -89,6 +89,13 @@ class DecryptingDemuxerStreamTest : public testing::Test {
decrypted_buffer_(new DecoderBuffer(kFakeBufferSize)) {
}
+ virtual ~DecryptingDemuxerStreamTest() {
+ if (is_decryptor_set_)
+ EXPECT_CALL(*decryptor_, CancelDecrypt(_));
+ demuxer_stream_.reset();
+ message_loop_.RunUntilIdle();
+ }
+
void InitializeAudioAndExpectStatus(const AudioDecoderConfig& config,
PipelineStatus status) {
input_audio_stream_->set_audio_decoder_config(config);
@@ -237,15 +244,6 @@ class DecryptingDemuxerStreamTest : public testing::Test {
message_loop_.RunUntilIdle();
}
- // Stops the |demuxer_stream_| without satisfying/aborting any pending
- // operations.
- void Stop() {
- if (is_decryptor_set_)
- EXPECT_CALL(*decryptor_, CancelDecrypt(Decryptor::kAudio));
- demuxer_stream_->Stop(NewExpectedClosure());
- message_loop_.RunUntilIdle();
- }
-
MOCK_METHOD1(RequestDecryptorNotification, void(const DecryptorReadyCB&));
MOCK_METHOD2(BufferReady, void(DemuxerStream::Status,
@@ -379,8 +377,7 @@ TEST_F(DecryptingDemuxerStreamTest, KeyAdded_DruingPendingDecrypt) {
message_loop_.RunUntilIdle();
}
-// Test resetting when the DecryptingDemuxerStream is in kDecryptorRequested
-// state.
+// Test resetting in kDecryptorRequested state.
TEST_F(DecryptingDemuxerStreamTest, Reset_DuringDecryptorRequested) {
// One for decryptor request, one for canceling request during Reset().
EXPECT_CALL(*this, RequestDecryptorNotification(_))
@@ -392,22 +389,20 @@ TEST_F(DecryptingDemuxerStreamTest, Reset_DuringDecryptorRequested) {
Reset();
}
-// Test resetting when the DecryptingDemuxerStream is in kIdle state but has
-// not returned any buffer.
+// Test resetting in kIdle state but has not returned any buffer.
TEST_F(DecryptingDemuxerStreamTest, Reset_DuringIdleAfterInitialization) {
Initialize();
Reset();
}
-// Test resetting when the DecryptingDemuxerStream is in kIdle state after it
-// has returned one buffer.
+// Test resetting in kIdle state after having returned one buffer.
TEST_F(DecryptingDemuxerStreamTest, Reset_DuringIdleAfterReadOneBuffer) {
Initialize();
EnterNormalReadingState();
Reset();
}
-// Test resetting when DecryptingDemuxerStream is in kPendingDemuxerRead state.
+// Test resetting in kPendingDemuxerRead state.
TEST_F(DecryptingDemuxerStreamTest, Reset_DuringPendingDemuxerRead) {
Initialize();
EnterPendingReadState();
@@ -419,7 +414,7 @@ TEST_F(DecryptingDemuxerStreamTest, Reset_DuringPendingDemuxerRead) {
message_loop_.RunUntilIdle();
}
-// Test resetting when the DecryptingDemuxerStream is in kPendingDecrypt state.
+// Test resetting in kPendingDecrypt state.
TEST_F(DecryptingDemuxerStreamTest, Reset_DuringPendingDecrypt) {
Initialize();
EnterPendingDecryptState();
@@ -429,7 +424,7 @@ TEST_F(DecryptingDemuxerStreamTest, Reset_DuringPendingDecrypt) {
Reset();
}
-// Test resetting when the DecryptingDemuxerStream is in kWaitingForKey state.
+// Test resetting in kWaitingForKey state.
TEST_F(DecryptingDemuxerStreamTest, Reset_DuringWaitingForKey) {
Initialize();
EnterWaitingForKeyState();
@@ -439,7 +434,7 @@ TEST_F(DecryptingDemuxerStreamTest, Reset_DuringWaitingForKey) {
Reset();
}
-// Test resetting after the DecryptingDemuxerStream has been reset.
+// Test resetting after reset.
TEST_F(DecryptingDemuxerStreamTest, Reset_AfterReset) {
Initialize();
EnterNormalReadingState();
@@ -458,7 +453,7 @@ TEST_F(DecryptingDemuxerStreamTest, DemuxerRead_Aborted) {
ReadAndExpectBufferReadyWith(DemuxerStream::kAborted, NULL);
}
-// Test resetting when DecryptingDemuxerStream is waiting for an aborted read.
+// Test resetting when waiting for an aborted read.
TEST_F(DecryptingDemuxerStreamTest, Reset_DuringAbortedDemuxerRead) {
Initialize();
EnterPendingReadState();
@@ -487,8 +482,7 @@ TEST_F(DecryptingDemuxerStreamTest, DemuxerRead_ConfigChanged) {
ReadAndExpectBufferReadyWith(DemuxerStream::kConfigChanged, NULL);
}
-// Test resetting when DecryptingDemuxerStream is waiting for a config changed
-// read.
+// Test resetting when waiting for a config changed read.
TEST_F(DecryptingDemuxerStreamTest, Reset_DuringConfigChangedDemuxerRead) {
Initialize();
EnterPendingReadState();
@@ -501,9 +495,11 @@ TEST_F(DecryptingDemuxerStreamTest, Reset_DuringConfigChangedDemuxerRead) {
message_loop_.RunUntilIdle();
}
-// Test stopping when the DecryptingDemuxerStream is in kDecryptorRequested
-// state.
-TEST_F(DecryptingDemuxerStreamTest, Stop_DuringDecryptorRequested) {
+// The following tests test destruction in various scenarios. The destruction
+// happens in DecryptingDemuxerStreamTest's dtor.
+
+// Test destruction in kDecryptorRequested state.
+TEST_F(DecryptingDemuxerStreamTest, Destroy_DuringDecryptorRequested) {
// One for decryptor request, one for canceling request during Reset().
EXPECT_CALL(*this, RequestDecryptorNotification(_))
.Times(2);
@@ -511,57 +507,48 @@ TEST_F(DecryptingDemuxerStreamTest, Stop_DuringDecryptorRequested) {
kCodecVorbis, kSampleFormatPlanarF32, CHANNEL_LAYOUT_STEREO, 44100,
NULL, 0, true);
InitializeAudioAndExpectStatus(input_config, PIPELINE_ERROR_ABORT);
- Stop();
}
-// Test stopping when the DecryptingDemuxerStream is in kIdle state but has
-// not returned any buffer.
-TEST_F(DecryptingDemuxerStreamTest, Stop_DuringIdleAfterInitialization) {
+// Test destruction in kIdle state but has not returned any buffer.
+TEST_F(DecryptingDemuxerStreamTest, Destroy_DuringIdleAfterInitialization) {
Initialize();
- Stop();
}
-// Test stopping when the DecryptingDemuxerStream is in kIdle state after it
-// has returned one buffer.
-TEST_F(DecryptingDemuxerStreamTest, Stop_DuringIdleAfterReadOneBuffer) {
+// Test destruction in kIdle state after having returned one buffer.
+TEST_F(DecryptingDemuxerStreamTest, Destroy_DuringIdleAfterReadOneBuffer) {
Initialize();
EnterNormalReadingState();
- Stop();
}
-// Test stopping when DecryptingDemuxerStream is in kPendingDemuxerRead state.
-TEST_F(DecryptingDemuxerStreamTest, Stop_DuringPendingDemuxerRead) {
+// Test destruction in kPendingDemuxerRead state.
+TEST_F(DecryptingDemuxerStreamTest, Destroy_DuringPendingDemuxerRead) {
Initialize();
EnterPendingReadState();
EXPECT_CALL(*this, BufferReady(DemuxerStream::kAborted, IsNull()));
- Stop();
}
-// Test stopping when the DecryptingDemuxerStream is in kPendingDecrypt state.
-TEST_F(DecryptingDemuxerStreamTest, Stop_DuringPendingDecrypt) {
+// Test destruction in kPendingDecrypt state.
+TEST_F(DecryptingDemuxerStreamTest, Destroy_DuringPendingDecrypt) {
Initialize();
EnterPendingDecryptState();
EXPECT_CALL(*this, BufferReady(DemuxerStream::kAborted, IsNull()));
- Stop();
}
-// Test stopping when the DecryptingDemuxerStream is in kWaitingForKey state.
-TEST_F(DecryptingDemuxerStreamTest, Stop_DuringWaitingForKey) {
+// Test destruction in kWaitingForKey state.
+TEST_F(DecryptingDemuxerStreamTest, Destroy_DuringWaitingForKey) {
Initialize();
EnterWaitingForKeyState();
EXPECT_CALL(*this, BufferReady(DemuxerStream::kAborted, IsNull()));
- Stop();
}
-// Test stopping after the DecryptingDemuxerStream has been reset.
-TEST_F(DecryptingDemuxerStreamTest, Stop_AfterReset) {
+// Test destruction after reset.
+TEST_F(DecryptingDemuxerStreamTest, Destroy_AfterReset) {
Initialize();
EnterNormalReadingState();
Reset();
- Stop();
}
} // namespace media
diff --git a/media/filters/decrypting_video_decoder.cc b/media/filters/decrypting_video_decoder.cc
index eb40625f8a..2651c5da74 100644
--- a/media/filters/decrypting_video_decoder.cc
+++ b/media/filters/decrypting_video_decoder.cc
@@ -125,18 +125,12 @@ void DecryptingVideoDecoder::Reset(const base::Closure& closure) {
DoReset();
}
-void DecryptingVideoDecoder::Stop() {
+DecryptingVideoDecoder::~DecryptingVideoDecoder() {
DCHECK(task_runner_->BelongsToCurrentThread());
- DVLOG(2) << "Stop() - state: " << state_;
- // Invalidate all weak pointers so that pending callbacks won't be fired into
- // this object.
- weak_factory_.InvalidateWeakPtrs();
+ if (state_ == kUninitialized)
+ return;
- // At this point the render thread is likely paused (in WebMediaPlayerImpl's
- // Destroy()), so running |closure| can't wait for anything that requires the
- // render thread to be processing messages to complete (such as PPAPI
- // callbacks).
if (decryptor_) {
decryptor_->DeinitializeDecoder(Decryptor::kVideo);
decryptor_ = NULL;
@@ -150,12 +144,6 @@ void DecryptingVideoDecoder::Stop() {
base::ResetAndReturn(&decode_cb_).Run(kAborted);
if (!reset_cb_.is_null())
base::ResetAndReturn(&reset_cb_).Run();
-
- state_ = kStopped;
-}
-
-DecryptingVideoDecoder::~DecryptingVideoDecoder() {
- DCHECK(state_ == kUninitialized || state_ == kStopped) << state_;
}
void DecryptingVideoDecoder::SetDecryptor(Decryptor* decryptor) {
@@ -168,7 +156,7 @@ void DecryptingVideoDecoder::SetDecryptor(Decryptor* decryptor) {
if (!decryptor) {
base::ResetAndReturn(&init_cb_).Run(DECODER_ERROR_NOT_SUPPORTED);
- state_ = kStopped;
+ state_ = kError;
return;
}
@@ -191,7 +179,8 @@ void DecryptingVideoDecoder::FinishInitialization(bool success) {
if (!success) {
base::ResetAndReturn(&init_cb_).Run(DECODER_ERROR_NOT_SUPPORTED);
- state_ = kStopped;
+ decryptor_ = NULL;
+ state_ = kError;
return;
}
diff --git a/media/filters/decrypting_video_decoder.h b/media/filters/decrypting_video_decoder.h
index ac4caf8695..a1f43edfea 100644
--- a/media/filters/decrypting_video_decoder.h
+++ b/media/filters/decrypting_video_decoder.h
@@ -39,7 +39,6 @@ class MEDIA_EXPORT DecryptingVideoDecoder : public VideoDecoder {
virtual void Decode(const scoped_refptr<DecoderBuffer>& buffer,
const DecodeCB& decode_cb) OVERRIDE;
virtual void Reset(const base::Closure& closure) OVERRIDE;
- virtual void Stop() OVERRIDE;
private:
// For a detailed state diagram please see this link: http://goo.gl/8jAok
@@ -53,7 +52,6 @@ class MEDIA_EXPORT DecryptingVideoDecoder : public VideoDecoder {
kPendingDecode,
kWaitingForKey,
kDecodeFinished,
- kStopped,
kError
};
@@ -77,9 +75,6 @@ class MEDIA_EXPORT DecryptingVideoDecoder : public VideoDecoder {
// Reset decoder and call |reset_cb_|.
void DoReset();
- // Free decoder resources and call |stop_cb_|.
- void DoStop();
-
scoped_refptr<base::SingleThreadTaskRunner> task_runner_;
State state_;
diff --git a/media/filters/decrypting_video_decoder_unittest.cc b/media/filters/decrypting_video_decoder_unittest.cc
index 738753b518..ed956ea1d5 100644
--- a/media/filters/decrypting_video_decoder_unittest.cc
+++ b/media/filters/decrypting_video_decoder_unittest.cc
@@ -75,7 +75,7 @@ class DecryptingVideoDecoderTest : public testing::Test {
}
virtual ~DecryptingVideoDecoderTest() {
- Stop();
+ Destroy();
}
// Initializes the |decoder_| and expects |status|. Note the initialization
@@ -209,12 +209,12 @@ class DecryptingVideoDecoderTest : public testing::Test {
message_loop_.RunUntilIdle();
}
- void Stop() {
+ void Destroy() {
EXPECT_CALL(*decryptor_, DeinitializeDecoder(Decryptor::kVideo))
.WillRepeatedly(InvokeWithoutArgs(
this, &DecryptingVideoDecoderTest::AbortAllPendingCBs));
- decoder_->Stop();
+ decoder_.reset();
message_loop_.RunUntilIdle();
}
@@ -399,8 +399,8 @@ TEST_F(DecryptingVideoDecoderTest, Reset_AfterReset) {
Reset();
}
-// Test stopping when the decoder is in kDecryptorRequested state.
-TEST_F(DecryptingVideoDecoderTest, Stop_DuringDecryptorRequested) {
+// Test destruction when the decoder is in kDecryptorRequested state.
+TEST_F(DecryptingVideoDecoderTest, Destroy_DuringDecryptorRequested) {
DecryptorReadyCB decryptor_ready_cb;
EXPECT_CALL(*this, RequestDecryptorNotification(_))
.WillOnce(SaveArg<0>(&decryptor_ready_cb));
@@ -413,16 +413,16 @@ TEST_F(DecryptingVideoDecoderTest, Stop_DuringDecryptorRequested) {
// |decryptor_ready_cb| is saved but not called here.
EXPECT_FALSE(decryptor_ready_cb.is_null());
- // During stop, RequestDecryptorNotification() should be called with a NULL
- // callback to cancel the |decryptor_ready_cb|.
+ // During destruction, RequestDecryptorNotification() should be called with a
+ // NULL callback to cancel the |decryptor_ready_cb|.
EXPECT_CALL(*this, RequestDecryptorNotification(IsNullCallback()))
.WillOnce(ResetAndRunCallback(&decryptor_ready_cb,
reinterpret_cast<Decryptor*>(NULL)));
- Stop();
+ Destroy();
}
-// Test stopping when the decoder is in kPendingDecoderInit state.
-TEST_F(DecryptingVideoDecoderTest, Stop_DuringPendingDecoderInit) {
+// Test destruction when the decoder is in kPendingDecoderInit state.
+TEST_F(DecryptingVideoDecoderTest, Destroy_DuringPendingDecoderInit) {
EXPECT_CALL(*decryptor_, InitializeVideoDecoder(_, _))
.WillOnce(SaveArg<1>(&pending_init_cb_));
@@ -430,57 +430,57 @@ TEST_F(DecryptingVideoDecoderTest, Stop_DuringPendingDecoderInit) {
DECODER_ERROR_NOT_SUPPORTED);
EXPECT_FALSE(pending_init_cb_.is_null());
- Stop();
+ Destroy();
}
-// Test stopping when the decoder is in kIdle state but has not decoded any
+// Test destruction when the decoder is in kIdle state but has not decoded any
// frame.
-TEST_F(DecryptingVideoDecoderTest, Stop_DuringIdleAfterInitialization) {
+TEST_F(DecryptingVideoDecoderTest, Destroy_DuringIdleAfterInitialization) {
Initialize();
- Stop();
+ Destroy();
}
-// Test stopping when the decoder is in kIdle state after it has decoded one
+// Test destruction when the decoder is in kIdle state after it has decoded one
// frame.
-TEST_F(DecryptingVideoDecoderTest, Stop_DuringIdleAfterDecodedOneFrame) {
+TEST_F(DecryptingVideoDecoderTest, Destroy_DuringIdleAfterDecodedOneFrame) {
Initialize();
EnterNormalDecodingState();
- Stop();
+ Destroy();
}
-// Test stopping when the decoder is in kPendingDecode state.
-TEST_F(DecryptingVideoDecoderTest, Stop_DuringPendingDecode) {
+// Test destruction when the decoder is in kPendingDecode state.
+TEST_F(DecryptingVideoDecoderTest, Destroy_DuringPendingDecode) {
Initialize();
EnterPendingDecodeState();
EXPECT_CALL(*this, DecodeDone(VideoDecoder::kAborted));
- Stop();
+ Destroy();
}
-// Test stopping when the decoder is in kWaitingForKey state.
-TEST_F(DecryptingVideoDecoderTest, Stop_DuringWaitingForKey) {
+// Test destruction when the decoder is in kWaitingForKey state.
+TEST_F(DecryptingVideoDecoderTest, Destroy_DuringWaitingForKey) {
Initialize();
EnterWaitingForKeyState();
EXPECT_CALL(*this, DecodeDone(VideoDecoder::kAborted));
- Stop();
+ Destroy();
}
-// Test stopping when the decoder has hit end of stream and is in
+// Test destruction when the decoder has hit end of stream and is in
// kDecodeFinished state.
-TEST_F(DecryptingVideoDecoderTest, Stop_AfterDecodeFinished) {
+TEST_F(DecryptingVideoDecoderTest, Destroy_AfterDecodeFinished) {
Initialize();
EnterNormalDecodingState();
EnterEndOfStreamState();
- Stop();
+ Destroy();
}
-// Test stopping when there is a pending reset on the decoder.
+// Test destruction when there is a pending reset on the decoder.
// Reset is pending because it cannot complete when the video decode callback
// is pending.
-TEST_F(DecryptingVideoDecoderTest, Stop_DuringPendingReset) {
+TEST_F(DecryptingVideoDecoderTest, Destroy_DuringPendingReset) {
Initialize();
EnterPendingDecodeState();
@@ -488,23 +488,15 @@ TEST_F(DecryptingVideoDecoderTest, Stop_DuringPendingReset) {
EXPECT_CALL(*this, DecodeDone(VideoDecoder::kAborted));
decoder_->Reset(NewExpectedClosure());
- Stop();
+ Destroy();
}
-// Test stopping after the decoder has been reset.
-TEST_F(DecryptingVideoDecoderTest, Stop_AfterReset) {
+// Test destruction after the decoder has been reset.
+TEST_F(DecryptingVideoDecoderTest, Destroy_AfterReset) {
Initialize();
EnterNormalDecodingState();
Reset();
- Stop();
-}
-
-// Test stopping after the decoder has been stopped.
-TEST_F(DecryptingVideoDecoderTest, Stop_AfterStop) {
- Initialize();
- EnterNormalDecodingState();
- Stop();
- Stop();
+ Destroy();
}
} // namespace media
diff --git a/media/filters/fake_demuxer_stream.cc b/media/filters/fake_demuxer_stream.cc
index 78386e3b04..941778c576 100644
--- a/media/filters/fake_demuxer_stream.cc
+++ b/media/filters/fake_demuxer_stream.cc
@@ -97,6 +97,10 @@ bool FakeDemuxerStream::SupportsConfigChanges() {
return config_changes_;
}
+VideoRotation FakeDemuxerStream::video_rotation() {
+ return VIDEO_ROTATION_0;
+}
+
void FakeDemuxerStream::HoldNextRead() {
DCHECK(task_runner_->BelongsToCurrentThread());
read_to_hold_ = next_read_num_;
diff --git a/media/filters/fake_demuxer_stream.h b/media/filters/fake_demuxer_stream.h
index bacf0bddef..90efe6ef55 100644
--- a/media/filters/fake_demuxer_stream.h
+++ b/media/filters/fake_demuxer_stream.h
@@ -34,6 +34,7 @@ class FakeDemuxerStream : public DemuxerStream {
virtual Type type() OVERRIDE;
virtual void EnableBitstreamConverter() OVERRIDE;
virtual bool SupportsConfigChanges() OVERRIDE;
+ virtual VideoRotation video_rotation() OVERRIDE;
void Initialize();
diff --git a/media/filters/fake_video_decoder.cc b/media/filters/fake_video_decoder.cc
index 1df718227a..05dc410f87 100644
--- a/media/filters/fake_video_decoder.cc
+++ b/media/filters/fake_video_decoder.cc
@@ -25,7 +25,19 @@ FakeVideoDecoder::FakeVideoDecoder(int decoding_delay,
}
FakeVideoDecoder::~FakeVideoDecoder() {
- DCHECK_EQ(state_, STATE_UNINITIALIZED);
+ DCHECK(thread_checker_.CalledOnValidThread());
+
+ if (state_ == STATE_UNINITIALIZED)
+ return;
+
+ if (!init_cb_.IsNull())
+ SatisfyInit();
+ if (!held_decode_callbacks_.empty())
+ SatisfyDecode();
+ if (!reset_cb_.IsNull())
+ SatisfyReset();
+
+ decoded_frames_.clear();
}
void FakeVideoDecoder::Initialize(const VideoDecoderConfig& config,
@@ -64,10 +76,10 @@ void FakeVideoDecoder::Decode(const scoped_refptr<DecoderBuffer>& buffer,
max_parallel_decoding_requests_);
int buffer_size = buffer->end_of_stream() ? 0 : buffer->data_size();
- DecodeCB wrapped_decode_cb =
- BindToCurrentLoop(base::Bind(&FakeVideoDecoder::OnFrameDecoded,
- weak_factory_.GetWeakPtr(),
- buffer_size, decode_cb));
+ DecodeCB wrapped_decode_cb = base::Bind(&FakeVideoDecoder::OnFrameDecoded,
+ weak_factory_.GetWeakPtr(),
+ buffer_size,
+ BindToCurrentLoop(decode_cb));
if (state_ == STATE_ERROR) {
wrapped_decode_cb.Run(kDecodeError);
@@ -100,20 +112,6 @@ void FakeVideoDecoder::Reset(const base::Closure& closure) {
DoReset();
}
-void FakeVideoDecoder::Stop() {
- DCHECK(thread_checker_.CalledOnValidThread());
-
- if (!init_cb_.IsNull())
- SatisfyInit();
- if (!held_decode_callbacks_.empty())
- SatisfyDecode();
- if (!reset_cb_.IsNull())
- SatisfyReset();
-
- decoded_frames_.clear();
- state_ = STATE_UNINITIALIZED;
-}
-
void FakeVideoDecoder::HoldNextInit() {
DCHECK(thread_checker_.CalledOnValidThread());
init_cb_.HoldCallback();
diff --git a/media/filters/fake_video_decoder.h b/media/filters/fake_video_decoder.h
index 21cb2a1f79..5e476d8fae 100644
--- a/media/filters/fake_video_decoder.h
+++ b/media/filters/fake_video_decoder.h
@@ -43,7 +43,6 @@ class FakeVideoDecoder : public VideoDecoder {
virtual void Decode(const scoped_refptr<DecoderBuffer>& buffer,
const DecodeCB& decode_cb) OVERRIDE;
virtual void Reset(const base::Closure& closure) OVERRIDE;
- virtual void Stop() OVERRIDE;
virtual int GetMaxDecodeRequests() const OVERRIDE;
// Holds the next init/decode/reset callback from firing.
diff --git a/media/filters/fake_video_decoder_unittest.cc b/media/filters/fake_video_decoder_unittest.cc
index 2772b54ffb..3598a7a48e 100644
--- a/media/filters/fake_video_decoder_unittest.cc
+++ b/media/filters/fake_video_decoder_unittest.cc
@@ -39,7 +39,7 @@ class FakeVideoDecoderTest
is_reset_pending_(false) {}
virtual ~FakeVideoDecoderTest() {
- Stop();
+ Destroy();
}
void InitializeWithConfig(const VideoDecoderConfig& config) {
@@ -197,8 +197,8 @@ class FakeVideoDecoderTest
ExpectResetResult(OK);
}
- void Stop() {
- decoder_->Stop();
+ void Destroy() {
+ decoder_.reset();
message_loop_.RunUntilIdle();
// All pending callbacks must have been fired.
@@ -365,35 +365,35 @@ TEST_P(FakeVideoDecoderTest, Reset_PendingDuringPendingRead) {
SatisfyReset();
}
-TEST_P(FakeVideoDecoderTest, Stop) {
+TEST_P(FakeVideoDecoderTest, Destroy) {
Initialize();
ReadOneFrame();
ExpectReadResult(OK);
- Stop();
+ Destroy();
}
-TEST_P(FakeVideoDecoderTest, Stop_DuringPendingInitialization) {
+TEST_P(FakeVideoDecoderTest, Destroy_DuringPendingInitialization) {
EnterPendingInitState();
- Stop();
+ Destroy();
}
-TEST_P(FakeVideoDecoderTest, Stop_DuringPendingRead) {
+TEST_P(FakeVideoDecoderTest, Destroy_DuringPendingRead) {
Initialize();
EnterPendingReadState();
- Stop();
+ Destroy();
}
-TEST_P(FakeVideoDecoderTest, Stop_DuringPendingReset) {
+TEST_P(FakeVideoDecoderTest, Destroy_DuringPendingReset) {
Initialize();
EnterPendingResetState();
- Stop();
+ Destroy();
}
-TEST_P(FakeVideoDecoderTest, Stop_DuringPendingReadAndPendingReset) {
+TEST_P(FakeVideoDecoderTest, Destroy_DuringPendingReadAndPendingReset) {
Initialize();
EnterPendingReadState();
EnterPendingResetState();
- Stop();
+ Destroy();
}
} // namespace media
diff --git a/media/filters/ffmpeg_audio_decoder.cc b/media/filters/ffmpeg_audio_decoder.cc
index 28347acb52..b45b9401b5 100644
--- a/media/filters/ffmpeg_audio_decoder.cc
+++ b/media/filters/ffmpeg_audio_decoder.cc
@@ -135,9 +135,12 @@ FFmpegAudioDecoder::FFmpegAudioDecoder(
}
FFmpegAudioDecoder::~FFmpegAudioDecoder() {
- DCHECK_EQ(state_, kUninitialized);
- DCHECK(!codec_context_);
- DCHECK(!av_frame_);
+ DCHECK(task_runner_->BelongsToCurrentThread());
+
+ if (state_ != kUninitialized) {
+ ReleaseFFmpegResources();
+ ResetTimestampState();
+ }
}
void FFmpegAudioDecoder::Initialize(const AudioDecoderConfig& config,
@@ -192,17 +195,6 @@ void FFmpegAudioDecoder::Reset(const base::Closure& closure) {
task_runner_->PostTask(FROM_HERE, closure);
}
-void FFmpegAudioDecoder::Stop() {
- DCHECK(task_runner_->BelongsToCurrentThread());
-
- if (state_ == kUninitialized)
- return;
-
- ReleaseFFmpegResources();
- ResetTimestampState();
- state_ = kUninitialized;
-}
-
void FFmpegAudioDecoder::DecodeBuffer(
const scoped_refptr<DecoderBuffer>& buffer,
const DecodeCB& decode_cb) {
diff --git a/media/filters/ffmpeg_audio_decoder.h b/media/filters/ffmpeg_audio_decoder.h
index 39a408973d..680128c3fd 100644
--- a/media/filters/ffmpeg_audio_decoder.h
+++ b/media/filters/ffmpeg_audio_decoder.h
@@ -42,7 +42,6 @@ class MEDIA_EXPORT FFmpegAudioDecoder : public AudioDecoder {
virtual void Decode(const scoped_refptr<DecoderBuffer>& buffer,
const DecodeCB& decode_cb) OVERRIDE;
virtual void Reset(const base::Closure& closure) OVERRIDE;
- virtual void Stop() OVERRIDE;
private:
// There are four states the decoder can be in:
diff --git a/media/filters/ffmpeg_demuxer.cc b/media/filters/ffmpeg_demuxer.cc
index 92c438ea3a..a2264dcc8e 100644
--- a/media/filters/ffmpeg_demuxer.cc
+++ b/media/filters/ffmpeg_demuxer.cc
@@ -14,6 +14,7 @@
#include "base/memory/scoped_ptr.h"
#include "base/message_loop/message_loop_proxy.h"
#include "base/metrics/sparse_histogram.h"
+#include "base/strings/string_number_conversions.h"
#include "base/strings/string_util.h"
#include "base/strings/stringprintf.h"
#include "base/sys_byteorder.h"
@@ -84,21 +85,22 @@ static base::TimeDelta ExtractStartTime(AVStream* stream,
//
// FFmpegDemuxerStream
//
-FFmpegDemuxerStream::FFmpegDemuxerStream(
- FFmpegDemuxer* demuxer,
- AVStream* stream,
- bool discard_negative_timestamps)
+FFmpegDemuxerStream::FFmpegDemuxerStream(FFmpegDemuxer* demuxer,
+ AVStream* stream)
: demuxer_(demuxer),
task_runner_(base::MessageLoopProxy::current()),
stream_(stream),
type_(UNKNOWN),
end_of_stream_(false),
last_packet_timestamp_(kNoTimestamp()),
+ video_rotation_(VIDEO_ROTATION_0),
bitstream_converter_enabled_(false),
- discard_negative_timestamps_(discard_negative_timestamps) {
+ fixup_negative_ogg_timestamps_(false) {
DCHECK(demuxer_);
bool is_encrypted = false;
+ int rotation = 0;
+ AVDictionaryEntry* rotation_entry = NULL;
// Determine our media format.
switch (stream->codec->codec_type) {
@@ -111,6 +113,28 @@ FFmpegDemuxerStream::FFmpegDemuxerStream(
type_ = VIDEO;
AVStreamToVideoDecoderConfig(stream, &video_config_, true);
is_encrypted = video_config_.is_encrypted();
+
+ rotation_entry = av_dict_get(stream->metadata, "rotate", NULL, 0);
+ if (rotation_entry && rotation_entry->value && rotation_entry->value[0])
+ base::StringToInt(rotation_entry->value, &rotation);
+
+ switch (rotation) {
+ case 0:
+ break;
+ case 90:
+ video_rotation_ = VIDEO_ROTATION_90;
+ break;
+ case 180:
+ video_rotation_ = VIDEO_ROTATION_180;
+ break;
+ case 270:
+ video_rotation_ = VIDEO_ROTATION_270;
+ break;
+ default:
+ LOG(ERROR) << "Unsupported video rotation metadata: " << rotation;
+ break;
+ }
+
break;
case AVMEDIA_TYPE_SUBTITLE:
type_ = TEXT;
@@ -254,19 +278,37 @@ void FFmpegDemuxerStream::EnqueuePacket(ScopedAVPacket packet) {
buffer->set_decrypt_config(decrypt_config.Pass());
}
- buffer->set_duration(
- ConvertStreamTimestamp(stream_->time_base, packet->duration));
+ if (packet->duration >= 0) {
+ buffer->set_duration(
+ ConvertStreamTimestamp(stream_->time_base, packet->duration));
+ } else {
+ // TODO(wolenetz): Remove when FFmpeg stops returning negative durations.
+ // https://crbug.com/394418
+ DVLOG(1) << "FFmpeg returned a buffer with a negative duration! "
+ << packet->duration;
+ buffer->set_duration(kNoTimestamp());
+ }
// Note: If pts is AV_NOPTS_VALUE, stream_timestamp will be kNoTimestamp().
const base::TimeDelta stream_timestamp =
ConvertStreamTimestamp(stream_->time_base, packet->pts);
if (stream_timestamp != kNoTimestamp()) {
- buffer->set_timestamp(stream_timestamp - demuxer_->start_time());
+ // If this is an OGG file with negative timestamps don't rebase any other
+ // stream types against the negative starting time.
+ base::TimeDelta start_time = demuxer_->start_time();
+ if (fixup_negative_ogg_timestamps_ && type() != AUDIO &&
+ start_time < base::TimeDelta()) {
+ DCHECK(stream_timestamp >= base::TimeDelta());
+ start_time = base::TimeDelta();
+ }
+
+ buffer->set_timestamp(stream_timestamp - start_time);
// If enabled, mark packets with negative timestamps for post-decode
// discard.
- if (discard_negative_timestamps_ && stream_timestamp < base::TimeDelta()) {
+ if (fixup_negative_ogg_timestamps_ &&
+ stream_timestamp < base::TimeDelta()) {
if (stream_timestamp + buffer->duration() < base::TimeDelta()) {
// Discard the entire packet if it's entirely before zero.
buffer->set_discard_padding(
@@ -372,6 +414,10 @@ VideoDecoderConfig FFmpegDemuxerStream::video_decoder_config() {
return video_config_;
}
+VideoRotation FFmpegDemuxerStream::video_rotation() {
+ return video_rotation_;
+}
+
FFmpegDemuxerStream::~FFmpegDemuxerStream() {
DCHECK(!demuxer_);
DCHECK(read_cb_.is_null());
@@ -502,12 +548,21 @@ void FFmpegDemuxer::Seek(base::TimeDelta time, const PipelineStatusCB& cb) {
// FFmpeg requires seeks to be adjusted according to the lowest starting time.
const base::TimeDelta seek_time = time + start_time_;
- // Choose the preferred stream if |seek_time| occurs after its starting time,
- // otherwise use the fallback stream.
+ // Choose the seeking stream based on whether it contains the seek time, if no
+ // match can be found prefer the preferred stream.
+ //
+ // TODO(dalecurtis): Currently FFmpeg does not ensure that all streams in a
+ // given container will demux all packets after the seek point. Instead it
+ // only guarantees that all packets after the file position of the seek will
+ // be demuxed. It's an open question whether FFmpeg should fix this:
+ // http://lists.ffmpeg.org/pipermail/ffmpeg-devel/2014-June/159212.html
+ // Tracked by http://crbug.com/387996.
DCHECK(preferred_stream_for_seeking_.second != kNoTimestamp());
- const int stream_index = seek_time >= preferred_stream_for_seeking_.second
- ? preferred_stream_for_seeking_.first
- : fallback_stream_for_seeking_.first;
+ const int stream_index =
+ seek_time < preferred_stream_for_seeking_.second &&
+ seek_time >= fallback_stream_for_seeking_.second
+ ? fallback_stream_for_seeking_.first
+ : preferred_stream_for_seeking_.first;
DCHECK_NE(stream_index, -1);
const AVStream* seeking_stream =
@@ -722,7 +777,6 @@ void FFmpegDemuxer::OnFindStreamInfoDone(const PipelineStatusCB& status_cb,
AVStream* stream = format_context->streams[i];
const AVCodecContext* codec_context = stream->codec;
const AVMediaType codec_type = codec_context->codec_type;
- bool discard_negative_timestamps = false;
if (codec_type == AVMEDIA_TYPE_AUDIO) {
if (audio_stream)
@@ -737,13 +791,6 @@ void FFmpegDemuxer::OnFindStreamInfoDone(const PipelineStatusCB& status_cb,
if (!audio_config.IsValidConfig())
continue;
audio_stream = stream;
-
- // Enable post-decode frame dropping for packets with negative timestamps
- // as outlined in section A.2 in the Ogg Vorbis spec:
- // http://xiph.org/vorbis/doc/Vorbis_I_spec.html
- discard_negative_timestamps =
- audio_config.codec() == kCodecVorbis &&
- strcmp(glue_->format_context()->iformat->name, "ogg") == 0;
} else if (codec_type == AVMEDIA_TYPE_VIDEO) {
if (video_stream)
continue;
@@ -766,13 +813,21 @@ void FFmpegDemuxer::OnFindStreamInfoDone(const PipelineStatusCB& status_cb,
continue;
}
- streams_[i] =
- new FFmpegDemuxerStream(this, stream, discard_negative_timestamps);
+ streams_[i] = new FFmpegDemuxerStream(this, stream);
max_duration = std::max(max_duration, streams_[i]->duration());
const base::TimeDelta start_time =
ExtractStartTime(stream, start_time_estimates[i]);
- if (start_time == kNoTimestamp())
+ const bool has_start_time = start_time != kNoTimestamp();
+
+ // Always prefer the video stream for seeking. If none exists, we'll swap
+ // the fallback stream with the preferred stream below.
+ if (codec_type == AVMEDIA_TYPE_VIDEO) {
+ preferred_stream_for_seeking_ =
+ StreamSeekInfo(i, has_start_time ? start_time : base::TimeDelta());
+ }
+
+ if (!has_start_time)
continue;
if (start_time < start_time_) {
@@ -780,13 +835,8 @@ void FFmpegDemuxer::OnFindStreamInfoDone(const PipelineStatusCB& status_cb,
// Choose the stream with the lowest starting time as the fallback stream
// for seeking. Video should always be preferred.
- fallback_stream_for_seeking_ = std::make_pair(i, start_time);
+ fallback_stream_for_seeking_ = StreamSeekInfo(i, start_time);
}
-
- // Always prefer the video stream for seeking. If none exists, we'll swap
- // the fallback stream with the preferred stream below.
- if (codec_type == AVMEDIA_TYPE_VIDEO)
- preferred_stream_for_seeking_ = std::make_pair(i, start_time);
}
if (!audio_stream && !video_stream) {
@@ -809,12 +859,33 @@ void FFmpegDemuxer::OnFindStreamInfoDone(const PipelineStatusCB& status_cb,
max_duration = kInfiniteDuration();
}
+ // Ogg has some peculiarities around negative timestamps, so use this flag to
+ // setup the FFmpegDemuxerStreams appropriately.
+ //
+ // Post-decode frame dropping for packets with negative timestamps is outlined
+ // in section A.2 in the Ogg Vorbis spec:
+ // http://xiph.org/vorbis/doc/Vorbis_I_spec.html
+ if (strcmp(format_context->iformat->name, "ogg") == 0 && audio_stream &&
+ audio_stream->codec->codec_id == AV_CODEC_ID_VORBIS) {
+ for (size_t i = 0; i < streams_.size(); ++i) {
+ if (streams_[i])
+ streams_[i]->enable_negative_timestamp_fixups_for_ogg();
+ }
+
+ // Fixup the seeking information to avoid selecting the audio stream simply
+ // because it has a lower starting time.
+ if (fallback_stream_for_seeking_.first == audio_stream->index &&
+ fallback_stream_for_seeking_.second < base::TimeDelta()) {
+ fallback_stream_for_seeking_.second = base::TimeDelta();
+ }
+ }
+
// If no start time could be determined, default to zero and prefer the video
// stream over the audio stream for seeking. E.g., The WAV demuxer does not
// put timestamps on its frames.
if (start_time_ == kInfiniteDuration()) {
start_time_ = base::TimeDelta();
- preferred_stream_for_seeking_ = std::make_pair(
+ preferred_stream_for_seeking_ = StreamSeekInfo(
video_stream ? video_stream->index : audio_stream->index, start_time_);
} else if (!video_stream) {
// If no video stream exists, use the audio or text stream found above.
diff --git a/media/filters/ffmpeg_demuxer.h b/media/filters/ffmpeg_demuxer.h
index ca38f1b922..3df575fa94 100644
--- a/media/filters/ffmpeg_demuxer.h
+++ b/media/filters/ffmpeg_demuxer.h
@@ -58,12 +58,7 @@ class FFmpegDemuxerStream : public DemuxerStream {
public:
// Keeps a copy of |demuxer| and initializes itself using information inside
// |stream|. Both parameters must outlive |this|.
- // |discard_negative_timestamps| tells the DemuxerStream that all packets with
- // negative timestamps should be marked for post-decode discard. All decoded
- // data before time zero will be discarded.
- FFmpegDemuxerStream(FFmpegDemuxer* demuxer,
- AVStream* stream,
- bool discard_negative_timestamps);
+ FFmpegDemuxerStream(FFmpegDemuxer* demuxer, AVStream* stream);
virtual ~FFmpegDemuxerStream();
// Enqueues the given AVPacket. It is invalid to queue a |packet| after
@@ -82,6 +77,14 @@ class FFmpegDemuxerStream : public DemuxerStream {
base::TimeDelta duration() const { return duration_; }
+ // Enables fixes for ogg files with negative timestamps. For AUDIO streams,
+ // all packets with negative timestamps will be marked for post-decode
+ // discard. For all other stream types, if FFmpegDemuxer::start_time() is
+ // negative, it will not be used to shift timestamps during EnqueuePacket().
+ void enable_negative_timestamp_fixups_for_ogg() {
+ fixup_negative_ogg_timestamps_ = true;
+ }
+
// DemuxerStream implementation.
virtual Type type() OVERRIDE;
virtual void Read(const ReadCB& read_cb) OVERRIDE;
@@ -89,6 +92,7 @@ class FFmpegDemuxerStream : public DemuxerStream {
virtual bool SupportsConfigChanges() OVERRIDE;
virtual AudioDecoderConfig audio_decoder_config() OVERRIDE;
virtual VideoDecoderConfig video_decoder_config() OVERRIDE;
+ virtual VideoRotation video_rotation() OVERRIDE;
// Returns the range of buffered data in this stream.
Ranges<base::TimeDelta> GetBufferedRanges() const;
@@ -130,6 +134,7 @@ class FFmpegDemuxerStream : public DemuxerStream {
bool end_of_stream_;
base::TimeDelta last_packet_timestamp_;
Ranges<base::TimeDelta> buffered_ranges_;
+ VideoRotation video_rotation_;
DecoderBufferQueue buffer_queue_;
ReadCB read_cb_;
@@ -141,7 +146,7 @@ class FFmpegDemuxerStream : public DemuxerStream {
bool bitstream_converter_enabled_;
std::string encryption_key_id_;
- const bool discard_negative_timestamps_;
+ bool fixup_negative_ogg_timestamps_;
DISALLOW_COPY_AND_ASSIGN(FFmpegDemuxerStream);
};
diff --git a/media/filters/ffmpeg_demuxer_unittest.cc b/media/filters/ffmpeg_demuxer_unittest.cc
index 636b757354..bf72e29a17 100644
--- a/media/filters/ffmpeg_demuxer_unittest.cc
+++ b/media/filters/ffmpeg_demuxer_unittest.cc
@@ -193,6 +193,10 @@ class FFmpegDemuxerTest : public testing::Test {
return demuxer_->glue_->format_context();
}
+ int preferred_seeking_stream_index() const {
+ return demuxer_->preferred_stream_for_seeking_.first;
+ }
+
void ReadUntilEndOfStream(DemuxerStream* stream) {
bool got_eos_buffer = false;
const int kMaxBuffers = 170;
@@ -412,6 +416,12 @@ TEST_F(FFmpegDemuxerTest, Read_Text) {
message_loop_.Run();
}
+TEST_F(FFmpegDemuxerTest, SeekInitialized_NoVideoStartTime) {
+ CreateDemuxer("audio-start-time-only.webm");
+ InitializeDemuxer();
+ EXPECT_EQ(0, preferred_seeking_stream_index());
+}
+
TEST_F(FFmpegDemuxerTest, Read_VideoPositiveStartTime) {
const int64 kTimelineOffsetMs = 1352550896000LL;
@@ -479,30 +489,86 @@ TEST_F(FFmpegDemuxerTest, Read_AudioNoStartTime) {
}
}
-TEST_F(FFmpegDemuxerTest, Read_AudioNegativeStartTimeAndOggDiscard) {
+// TODO(dalecurtis): Test is disabled since FFmpeg does not currently guarantee
+// the order of demuxed packets in OGG containers. Re-enable once we decide to
+// either workaround it or attempt a fix upstream. See http://crbug.com/387996.
+TEST_F(FFmpegDemuxerTest,
+ DISABLED_Read_AudioNegativeStartTimeAndOggDiscard_Bear) {
// Many ogg files have negative starting timestamps, so ensure demuxing and
// seeking work correctly with a negative start time.
CreateDemuxer("bear.ogv");
InitializeDemuxer();
+ // Attempt a read from the video stream and run the message loop until done.
+ DemuxerStream* video = demuxer_->GetStream(DemuxerStream::VIDEO);
+ DemuxerStream* audio = demuxer_->GetStream(DemuxerStream::AUDIO);
+
// Run the test twice with a seek in between.
for (int i = 0; i < 2; ++i) {
- demuxer_->GetStream(DemuxerStream::AUDIO)->Read(
+ audio->Read(
NewReadCBWithCheckedDiscard(FROM_HERE, 40, 0, kInfiniteDuration()));
message_loop_.Run();
- demuxer_->GetStream(DemuxerStream::AUDIO)->Read(
+ audio->Read(
NewReadCBWithCheckedDiscard(FROM_HERE, 41, 2903, kInfiniteDuration()));
message_loop_.Run();
- demuxer_->GetStream(DemuxerStream::AUDIO)->Read(NewReadCBWithCheckedDiscard(
+ audio->Read(NewReadCBWithCheckedDiscard(
FROM_HERE, 173, 5805, base::TimeDelta::FromMicroseconds(10159)));
message_loop_.Run();
- demuxer_->GetStream(DemuxerStream::AUDIO)
- ->Read(NewReadCB(FROM_HERE, 148, 18866));
+ audio->Read(NewReadCB(FROM_HERE, 148, 18866));
message_loop_.Run();
EXPECT_EQ(base::TimeDelta::FromMicroseconds(-15964),
demuxer_->start_time());
+ video->Read(NewReadCB(FROM_HERE, 5751, 0));
+ message_loop_.Run();
+
+ video->Read(NewReadCB(FROM_HERE, 846, 33367));
+ message_loop_.Run();
+
+ video->Read(NewReadCB(FROM_HERE, 1255, 66733));
+ message_loop_.Run();
+
+ // Seek back to the beginning and repeat the test.
+ WaitableMessageLoopEvent event;
+ demuxer_->Seek(base::TimeDelta(), event.GetPipelineStatusCB());
+ event.RunAndWaitForStatus(PIPELINE_OK);
+ }
+}
+
+// Same test above, but using sync2.ogv which has video stream muxed before the
+// audio stream, so seeking based only on start time will fail since ffmpeg is
+// essentially just seeking based on file position.
+TEST_F(FFmpegDemuxerTest, Read_AudioNegativeStartTimeAndOggDiscard_Sync) {
+ // Many ogg files have negative starting timestamps, so ensure demuxing and
+ // seeking work correctly with a negative start time.
+ CreateDemuxer("sync2.ogv");
+ InitializeDemuxer();
+
+ // Attempt a read from the video stream and run the message loop until done.
+ DemuxerStream* video = demuxer_->GetStream(DemuxerStream::VIDEO);
+ DemuxerStream* audio = demuxer_->GetStream(DemuxerStream::AUDIO);
+
+ // Run the test twice with a seek in between.
+ for (int i = 0; i < 2; ++i) {
+ audio->Read(NewReadCBWithCheckedDiscard(
+ FROM_HERE, 1, 0, base::TimeDelta::FromMicroseconds(2902)));
+ message_loop_.Run();
+
+ audio->Read(NewReadCB(FROM_HERE, 1, 2902));
+ message_loop_.Run();
+ EXPECT_EQ(base::TimeDelta::FromMicroseconds(-2902),
+ demuxer_->start_time());
+
+ video->Read(NewReadCB(FROM_HERE, 9997, 0));
+ message_loop_.Run();
+
+ video->Read(NewReadCB(FROM_HERE, 16, 33241));
+ message_loop_.Run();
+
+ video->Read(NewReadCB(FROM_HERE, 631, 66482));
+ message_loop_.Run();
+
// Seek back to the beginning and repeat the test.
WaitableMessageLoopEvent event;
demuxer_->Seek(base::TimeDelta(), event.GetPipelineStatusCB());
@@ -850,6 +916,42 @@ TEST_F(FFmpegDemuxerTest, IsValidAnnexB) {
}
}
+TEST_F(FFmpegDemuxerTest, Rotate_Metadata_0) {
+ CreateDemuxer("bear_rotate_0.mp4");
+ InitializeDemuxer();
+
+ DemuxerStream* stream = demuxer_->GetStream(DemuxerStream::VIDEO);
+ ASSERT_TRUE(stream);
+ ASSERT_EQ(VIDEO_ROTATION_0, stream->video_rotation());
+}
+
+TEST_F(FFmpegDemuxerTest, Rotate_Metadata_90) {
+ CreateDemuxer("bear_rotate_90.mp4");
+ InitializeDemuxer();
+
+ DemuxerStream* stream = demuxer_->GetStream(DemuxerStream::VIDEO);
+ ASSERT_TRUE(stream);
+ ASSERT_EQ(VIDEO_ROTATION_90, stream->video_rotation());
+}
+
+TEST_F(FFmpegDemuxerTest, Rotate_Metadata_180) {
+ CreateDemuxer("bear_rotate_180.mp4");
+ InitializeDemuxer();
+
+ DemuxerStream* stream = demuxer_->GetStream(DemuxerStream::VIDEO);
+ ASSERT_TRUE(stream);
+ ASSERT_EQ(VIDEO_ROTATION_180, stream->video_rotation());
+}
+
+TEST_F(FFmpegDemuxerTest, Rotate_Metadata_270) {
+ CreateDemuxer("bear_rotate_270.mp4");
+ InitializeDemuxer();
+
+ DemuxerStream* stream = demuxer_->GetStream(DemuxerStream::VIDEO);
+ ASSERT_TRUE(stream);
+ ASSERT_EQ(VIDEO_ROTATION_270, stream->video_rotation());
+}
+
#endif
} // namespace media
diff --git a/media/filters/ffmpeg_video_decoder.cc b/media/filters/ffmpeg_video_decoder.cc
index 3436aa9955..b7da7f7a46 100644
--- a/media/filters/ffmpeg_video_decoder.cc
+++ b/media/filters/ffmpeg_video_decoder.cc
@@ -241,20 +241,11 @@ void FFmpegVideoDecoder::Reset(const base::Closure& closure) {
task_runner_->PostTask(FROM_HERE, closure);
}
-void FFmpegVideoDecoder::Stop() {
+FFmpegVideoDecoder::~FFmpegVideoDecoder() {
DCHECK(task_runner_->BelongsToCurrentThread());
- if (state_ == kUninitialized)
- return;
-
- ReleaseFFmpegResources();
- state_ = kUninitialized;
-}
-
-FFmpegVideoDecoder::~FFmpegVideoDecoder() {
- DCHECK_EQ(kUninitialized, state_);
- DCHECK(!codec_context_);
- DCHECK(!av_frame_);
+ if (state_ != kUninitialized)
+ ReleaseFFmpegResources();
}
bool FFmpegVideoDecoder::FFmpegDecode(
diff --git a/media/filters/ffmpeg_video_decoder.h b/media/filters/ffmpeg_video_decoder.h
index d7b35f1d37..529d69e70b 100644
--- a/media/filters/ffmpeg_video_decoder.h
+++ b/media/filters/ffmpeg_video_decoder.h
@@ -43,7 +43,6 @@ class MEDIA_EXPORT FFmpegVideoDecoder : public VideoDecoder {
virtual void Decode(const scoped_refptr<DecoderBuffer>& buffer,
const DecodeCB& decode_cb) OVERRIDE;
virtual void Reset(const base::Closure& closure) OVERRIDE;
- virtual void Stop() OVERRIDE;
// Callback called from within FFmpeg to allocate a buffer based on
// the dimensions of |codec_context|. See AVCodecContext.get_buffer2
diff --git a/media/filters/ffmpeg_video_decoder_unittest.cc b/media/filters/ffmpeg_video_decoder_unittest.cc
index 9ccdbc2865..c3fde2700b 100644
--- a/media/filters/ffmpeg_video_decoder_unittest.cc
+++ b/media/filters/ffmpeg_video_decoder_unittest.cc
@@ -61,7 +61,7 @@ class FFmpegVideoDecoderTest : public testing::Test {
}
virtual ~FFmpegVideoDecoderTest() {
- Stop();
+ Destroy();
}
void Initialize() {
@@ -89,8 +89,8 @@ class FFmpegVideoDecoderTest : public testing::Test {
message_loop_.RunUntilIdle();
}
- void Stop() {
- decoder_->Stop();
+ void Destroy() {
+ decoder_.reset();
message_loop_.RunUntilIdle();
}
@@ -421,25 +421,25 @@ TEST_F(FFmpegVideoDecoderTest, Reset_EndOfStream) {
Reset();
}
-// Test stopping when decoder has initialized but not decoded.
-TEST_F(FFmpegVideoDecoderTest, Stop_Initialized) {
+// Test destruction when decoder has initialized but not decoded.
+TEST_F(FFmpegVideoDecoderTest, Destroy_Initialized) {
Initialize();
- Stop();
+ Destroy();
}
-// Test stopping when decoder has decoded single frame.
-TEST_F(FFmpegVideoDecoderTest, Stop_Decoding) {
+// Test destruction when decoder has decoded single frame.
+TEST_F(FFmpegVideoDecoderTest, Destroy_Decoding) {
Initialize();
EnterDecodingState();
- Stop();
+ Destroy();
}
-// Test stopping when decoder has hit end of stream.
-TEST_F(FFmpegVideoDecoderTest, Stop_EndOfStream) {
+// Test destruction when decoder has hit end of stream.
+TEST_F(FFmpegVideoDecoderTest, Destroy_EndOfStream) {
Initialize();
EnterDecodingState();
EnterEndOfStreamState();
- Stop();
+ Destroy();
}
} // namespace media
diff --git a/media/filters/frame_processor.cc b/media/filters/frame_processor.cc
index 68f4c613d8..a10ed5017e 100644
--- a/media/filters/frame_processor.cc
+++ b/media/filters/frame_processor.cc
@@ -4,20 +4,167 @@
#include "media/filters/frame_processor.h"
+#include <cstdlib>
+
#include "base/stl_util.h"
#include "media/base/buffers.h"
#include "media/base/stream_parser_buffer.h"
namespace media {
+// Helper class to capture per-track details needed by a frame processor. Some
+// of this information may be duplicated in the short-term in the associated
+// ChunkDemuxerStream and SourceBufferStream for a track.
+// This parallels the MSE spec each of a SourceBuffer's Track Buffers at
+// http://www.w3.org/TR/media-source/#track-buffers.
+class MseTrackBuffer {
+ public:
+ explicit MseTrackBuffer(ChunkDemuxerStream* stream);
+ ~MseTrackBuffer();
+
+ // Get/set |last_decode_timestamp_|.
+ base::TimeDelta last_decode_timestamp() const {
+ return last_decode_timestamp_;
+ }
+ void set_last_decode_timestamp(base::TimeDelta timestamp) {
+ last_decode_timestamp_ = timestamp;
+ }
+
+ // Get/set |last_frame_duration_|.
+ base::TimeDelta last_frame_duration() const {
+ return last_frame_duration_;
+ }
+ void set_last_frame_duration(base::TimeDelta duration) {
+ last_frame_duration_ = duration;
+ }
+
+ // Gets |highest_presentation_timestamp_|.
+ base::TimeDelta highest_presentation_timestamp() const {
+ return highest_presentation_timestamp_;
+ }
+
+ // Get/set |needs_random_access_point_|.
+ bool needs_random_access_point() const {
+ return needs_random_access_point_;
+ }
+ void set_needs_random_access_point(bool needs_random_access_point) {
+ needs_random_access_point_ = needs_random_access_point;
+ }
+
+ // Gets a pointer to this track's ChunkDemuxerStream.
+ ChunkDemuxerStream* stream() const { return stream_; }
+
+ // Unsets |last_decode_timestamp_|, unsets |last_frame_duration_|,
+ // unsets |highest_presentation_timestamp_|, and sets
+ // |needs_random_access_point_| to true.
+ void Reset();
+
+ // If |highest_presentation_timestamp_| is unset or |timestamp| is greater
+ // than |highest_presentation_timestamp_|, sets
+ // |highest_presentation_timestamp_| to |timestamp|. Note that bidirectional
+ // prediction between coded frames can cause |timestamp| to not be
+ // monotonically increasing even though the decode timestamps are
+ // monotonically increasing.
+ void SetHighestPresentationTimestampIfIncreased(base::TimeDelta timestamp);
+
+ // Adds |frame| to the end of |processed_frames_|.
+ void EnqueueProcessedFrame(const scoped_refptr<StreamParserBuffer>& frame);
+
+ // Appends |processed_frames_|, if not empty, to |stream_| and clears
+ // |processed_frames_|. Returns false if append failed, true otherwise.
+ // |processed_frames_| is cleared in both cases.
+ bool FlushProcessedFrames();
+
+ private:
+ // The decode timestamp of the last coded frame appended in the current coded
+ // frame group. Initially kNoTimestamp(), meaning "unset".
+ base::TimeDelta last_decode_timestamp_;
+
+ // The coded frame duration of the last coded frame appended in the current
+ // coded frame group. Initially kNoTimestamp(), meaning "unset".
+ base::TimeDelta last_frame_duration_;
+
+ // The highest presentation timestamp encountered in a coded frame appended
+ // in the current coded frame group. Initially kNoTimestamp(), meaning
+ // "unset".
+ base::TimeDelta highest_presentation_timestamp_;
+
+ // Keeps track of whether the track buffer is waiting for a random access
+ // point coded frame. Initially set to true to indicate that a random access
+ // point coded frame is needed before anything can be added to the track
+ // buffer.
+ bool needs_random_access_point_;
+
+ // Pointer to the stream associated with this track. The stream is not owned
+ // by |this|.
+ ChunkDemuxerStream* const stream_;
+
+ // Queue of processed frames that have not yet been appended to |stream_|.
+ // EnqueueProcessedFrame() adds to this queue, and FlushProcessedFrames()
+ // clears it.
+ StreamParser::BufferQueue processed_frames_;
+
+ DISALLOW_COPY_AND_ASSIGN(MseTrackBuffer);
+};
+
+MseTrackBuffer::MseTrackBuffer(ChunkDemuxerStream* stream)
+ : last_decode_timestamp_(kNoTimestamp()),
+ last_frame_duration_(kNoTimestamp()),
+ highest_presentation_timestamp_(kNoTimestamp()),
+ needs_random_access_point_(true),
+ stream_(stream) {
+ DCHECK(stream_);
+}
+
+MseTrackBuffer::~MseTrackBuffer() {
+ DVLOG(2) << __FUNCTION__ << "()";
+}
+
+void MseTrackBuffer::Reset() {
+ DVLOG(2) << __FUNCTION__ << "()";
+
+ last_decode_timestamp_ = kNoTimestamp();
+ last_frame_duration_ = kNoTimestamp();
+ highest_presentation_timestamp_ = kNoTimestamp();
+ needs_random_access_point_ = true;
+}
+
+void MseTrackBuffer::SetHighestPresentationTimestampIfIncreased(
+ base::TimeDelta timestamp) {
+ if (highest_presentation_timestamp_ == kNoTimestamp() ||
+ timestamp > highest_presentation_timestamp_) {
+ highest_presentation_timestamp_ = timestamp;
+ }
+}
+
+void MseTrackBuffer::EnqueueProcessedFrame(
+ const scoped_refptr<StreamParserBuffer>& frame) {
+ processed_frames_.push_back(frame);
+}
+
+bool MseTrackBuffer::FlushProcessedFrames() {
+ if (processed_frames_.empty())
+ return true;
+
+ bool result = stream_->Append(processed_frames_);
+ processed_frames_.clear();
+ DVLOG_IF(3, !result) << __FUNCTION__
+ << "(): Failure appending processed frames to stream";
+
+ return result;
+}
+
FrameProcessor::FrameProcessor(const UpdateDurationCB& update_duration_cb)
- : update_duration_cb_(update_duration_cb) {
+ : sequence_mode_(false),
+ group_start_timestamp_(kNoTimestamp()),
+ update_duration_cb_(update_duration_cb) {
DVLOG(2) << __FUNCTION__ << "()";
DCHECK(!update_duration_cb.is_null());
}
FrameProcessor::~FrameProcessor() {
- DVLOG(2) << __FUNCTION__;
+ DVLOG(2) << __FUNCTION__ << "()";
+ STLDeleteValues(&track_buffers_);
}
void FrameProcessor::SetSequenceMode(bool sequence_mode) {
@@ -63,10 +210,14 @@ bool FrameProcessor::ProcessFrames(
frames_itr != frames.end(); ++frames_itr) {
if (!ProcessFrame(*frames_itr, append_window_start, append_window_end,
timestamp_offset, new_media_segment)) {
+ FlushProcessedFrames();
return false;
}
}
+ if (!FlushProcessedFrames())
+ return false;
+
// 2. - 4. Are handled by the WebMediaPlayer / Pipeline / Media Element.
// Step 5:
@@ -75,6 +226,180 @@ bool FrameProcessor::ProcessFrames(
return true;
}
+void FrameProcessor::SetGroupStartTimestampIfInSequenceMode(
+ base::TimeDelta timestamp_offset) {
+ DVLOG(2) << __FUNCTION__ << "(" << timestamp_offset.InSecondsF() << ")";
+ DCHECK(kNoTimestamp() != timestamp_offset);
+ if (sequence_mode_)
+ group_start_timestamp_ = timestamp_offset;
+
+ // Changes to timestampOffset should invalidate the preroll buffer.
+ audio_preroll_buffer_ = NULL;
+}
+
+bool FrameProcessor::AddTrack(StreamParser::TrackId id,
+ ChunkDemuxerStream* stream) {
+ DVLOG(2) << __FUNCTION__ << "(): id=" << id;
+
+ MseTrackBuffer* existing_track = FindTrack(id);
+ DCHECK(!existing_track);
+ if (existing_track)
+ return false;
+
+ track_buffers_[id] = new MseTrackBuffer(stream);
+ return true;
+}
+
+bool FrameProcessor::UpdateTrack(StreamParser::TrackId old_id,
+ StreamParser::TrackId new_id) {
+ DVLOG(2) << __FUNCTION__ << "() : old_id=" << old_id << ", new_id=" << new_id;
+
+ if (old_id == new_id || !FindTrack(old_id) || FindTrack(new_id))
+ return false;
+
+ track_buffers_[new_id] = track_buffers_[old_id];
+ CHECK_EQ(1u, track_buffers_.erase(old_id));
+ return true;
+}
+
+void FrameProcessor::SetAllTrackBuffersNeedRandomAccessPoint() {
+ for (TrackBufferMap::iterator itr = track_buffers_.begin();
+ itr != track_buffers_.end();
+ ++itr) {
+ itr->second->set_needs_random_access_point(true);
+ }
+}
+
+void FrameProcessor::Reset() {
+ DVLOG(2) << __FUNCTION__ << "()";
+ for (TrackBufferMap::iterator itr = track_buffers_.begin();
+ itr != track_buffers_.end(); ++itr) {
+ itr->second->Reset();
+ }
+}
+
+void FrameProcessor::OnPossibleAudioConfigUpdate(
+ const AudioDecoderConfig& config) {
+ DCHECK(config.IsValidConfig());
+
+ // Always clear the preroll buffer when a config update is received.
+ audio_preroll_buffer_ = NULL;
+
+ if (config.Matches(current_audio_config_))
+ return;
+
+ current_audio_config_ = config;
+ sample_duration_ = base::TimeDelta::FromSecondsD(
+ 1.0 / current_audio_config_.samples_per_second());
+}
+
+MseTrackBuffer* FrameProcessor::FindTrack(StreamParser::TrackId id) {
+ TrackBufferMap::iterator itr = track_buffers_.find(id);
+ if (itr == track_buffers_.end())
+ return NULL;
+
+ return itr->second;
+}
+
+void FrameProcessor::NotifyNewMediaSegmentStarting(
+ base::TimeDelta segment_timestamp) {
+ DVLOG(2) << __FUNCTION__ << "(" << segment_timestamp.InSecondsF() << ")";
+
+ for (TrackBufferMap::iterator itr = track_buffers_.begin();
+ itr != track_buffers_.end();
+ ++itr) {
+ itr->second->stream()->OnNewMediaSegment(segment_timestamp);
+ }
+}
+
+bool FrameProcessor::FlushProcessedFrames() {
+ DVLOG(2) << __FUNCTION__ << "()";
+
+ bool result = true;
+ for (TrackBufferMap::iterator itr = track_buffers_.begin();
+ itr != track_buffers_.end();
+ ++itr) {
+ if (!itr->second->FlushProcessedFrames())
+ result = false;
+ }
+
+ return result;
+}
+
+bool FrameProcessor::HandlePartialAppendWindowTrimming(
+ base::TimeDelta append_window_start,
+ base::TimeDelta append_window_end,
+ const scoped_refptr<StreamParserBuffer>& buffer) {
+ DCHECK(buffer->duration() > base::TimeDelta());
+ DCHECK_EQ(DemuxerStream::AUDIO, buffer->type());
+
+ const base::TimeDelta frame_end_timestamp =
+ buffer->timestamp() + buffer->duration();
+
+ // Ignore any buffers which start after |append_window_start| or end after
+ // |append_window_end|. For simplicity, even those that start before
+ // |append_window_start|.
+ if (buffer->timestamp() > append_window_start ||
+ frame_end_timestamp > append_window_end) {
+ // TODO(dalecurtis): Partial append window trimming could also be done
+ // around |append_window_end|, but is not necessary since splice frames
+ // cover overlaps there.
+ return false;
+ }
+
+ // If the buffer is entirely before |append_window_start|, save it as preroll
+ // for the first buffer which overlaps |append_window_start|.
+ if (buffer->timestamp() < append_window_start &&
+ frame_end_timestamp <= append_window_start) {
+ audio_preroll_buffer_ = buffer;
+ return false;
+ }
+
+ // There's nothing to be done if we have no preroll and the buffer starts on
+ // the append window start.
+ if (buffer->timestamp() == append_window_start && !audio_preroll_buffer_)
+ return false;
+
+ // See if a partial discard can be done around |append_window_start|.
+ DCHECK(buffer->timestamp() <= append_window_start);
+ DCHECK(buffer->IsKeyframe());
+ DVLOG(1) << "Truncating buffer which overlaps append window start."
+ << " presentation_timestamp " << buffer->timestamp().InSecondsF()
+ << " append_window_start " << append_window_start.InSecondsF();
+
+ // If this isn't the first buffer discarded by the append window, try to use
+ // the last buffer discarded for preroll. This ensures that the partially
+ // trimmed buffer can be correctly decoded.
+ if (audio_preroll_buffer_) {
+ // We only want to use the preroll buffer if it directly precedes (less than
+ // one sample apart) the current buffer.
+ const int64 delta = std::abs((audio_preroll_buffer_->timestamp() +
+ audio_preroll_buffer_->duration() -
+ buffer->timestamp()).InMicroseconds());
+ if (delta < sample_duration_.InMicroseconds()) {
+ buffer->SetPrerollBuffer(audio_preroll_buffer_);
+ } else {
+ // TODO(dalecurtis): Add a MEDIA_LOG() for when this is dropped unused.
+ }
+ audio_preroll_buffer_ = NULL;
+ }
+
+ // Decrease the duration appropriately. We only need to shorten the buffer if
+ // it overlaps |append_window_start|.
+ if (buffer->timestamp() < append_window_start) {
+ buffer->set_discard_padding(std::make_pair(
+ append_window_start - buffer->timestamp(), base::TimeDelta()));
+ buffer->set_duration(frame_end_timestamp - append_window_start);
+ }
+
+ // Adjust the timestamp of this buffer forward to |append_window_start|. The
+ // timestamps are always set, even if |buffer|'s timestamp is already set to
+ // |append_window_start|, to ensure the preroll buffer is setup correctly.
+ buffer->set_timestamp(append_window_start);
+ buffer->SetDecodeTimestamp(append_window_start);
+ return true;
+}
+
bool FrameProcessor::ProcessFrame(
const scoped_refptr<StreamParserBuffer>& frame,
base::TimeDelta append_window_start,
@@ -104,7 +429,8 @@ bool FrameProcessor::ProcessFrame(
<< ", TrackID=" << frame->track_id()
<< ", PTS=" << presentation_timestamp.InSecondsF()
<< ", DTS=" << decode_timestamp.InSecondsF()
- << ", DUR=" << frame_duration.InSecondsF();
+ << ", DUR=" << frame_duration.InSecondsF()
+ << ", RAP=" << frame->IsKeyframe();
// Sanity check the timestamps.
if (presentation_timestamp == kNoTimestamp()) {
@@ -276,14 +602,6 @@ bool FrameProcessor::ProcessFrame(
frame_end_timestamp > append_window_end) {
track_buffer->set_needs_random_access_point(true);
DVLOG(3) << "Dropping frame that is outside append window.";
-
- if (!sequence_mode_) {
- // This also triggers a discontinuity so we need to treat the next
- // frames appended within the append window as if they were the
- // beginning of a new segment.
- *new_media_segment = true;
- }
-
return true;
}
@@ -325,6 +643,11 @@ bool FrameProcessor::ProcessFrame(
// If it is the first in a new media segment or following a discontinuity,
// notify all the track buffers' streams that a new segment is beginning.
if (*new_media_segment) {
+ // First, complete the append to track buffer streams of previous media
+ // segment's frames, if any.
+ if (!FlushProcessedFrames())
+ return false;
+
*new_media_segment = false;
NotifyNewMediaSegmentStarting(decode_timestamp);
}
@@ -333,16 +656,12 @@ bool FrameProcessor::ProcessFrame(
<< "PTS=" << presentation_timestamp.InSecondsF()
<< ", DTS=" << decode_timestamp.InSecondsF();
- // Steps 13-18:
- // TODO(wolenetz): Collect and emit more than one buffer at a time, if
- // possible. Also refactor SourceBufferStream to conform to spec GC timing.
+ // Steps 13-18: Note, we optimize by appending groups of contiguous
+ // processed frames for each track buffer at end of ProcessFrames() or prior
+ // to NotifyNewMediaSegmentStarting().
+ // TODO(wolenetz): Refactor SourceBufferStream to conform to spec GC timing.
// See http://crbug.com/371197.
- StreamParser::BufferQueue buffer_to_append;
- buffer_to_append.push_back(frame);
- if (!track_buffer->stream()->Append(buffer_to_append)) {
- DVLOG(3) << __FUNCTION__ << ": Failure appending frame to stream";
- return false;
- }
+ track_buffer->EnqueueProcessedFrame(frame);
// 19. Set last decode timestamp for track buffer to decode timestamp.
track_buffer->set_last_decode_timestamp(decode_timestamp);
diff --git a/media/filters/frame_processor.h b/media/filters/frame_processor.h
index fcfe737572..0067b789d5 100644
--- a/media/filters/frame_processor.h
+++ b/media/filters/frame_processor.h
@@ -5,34 +5,125 @@
#ifndef MEDIA_FILTERS_FRAME_PROCESSOR_H_
#define MEDIA_FILTERS_FRAME_PROCESSOR_H_
+#include <map>
+
#include "base/basictypes.h"
#include "base/callback_forward.h"
#include "base/time/time.h"
#include "media/base/media_export.h"
#include "media/base/stream_parser.h"
-#include "media/filters/frame_processor_base.h"
+#include "media/filters/chunk_demuxer.h"
namespace media {
+class MseTrackBuffer;
+
// Helper class that implements Media Source Extension's coded frame processing
// algorithm.
-class MEDIA_EXPORT FrameProcessor : public FrameProcessorBase {
+class MEDIA_EXPORT FrameProcessor {
public:
typedef base::Callback<void(base::TimeDelta)> UpdateDurationCB;
+
+ // TODO(wolenetz/acolwell): Ensure that all TrackIds are coherent and unique
+ // for each track buffer. For now, special track identifiers are used for each
+ // of audio and video here, and text TrackIds are assumed to be non-negative.
+ // See http://crbug.com/341581.
+ enum {
+ kAudioTrackId = -2,
+ kVideoTrackId = -3
+ };
+
explicit FrameProcessor(const UpdateDurationCB& update_duration_cb);
- virtual ~FrameProcessor();
-
- // FrameProcessorBase implementation
- virtual void SetSequenceMode(bool sequence_mode) OVERRIDE;
- virtual bool ProcessFrames(const StreamParser::BufferQueue& audio_buffers,
- const StreamParser::BufferQueue& video_buffers,
- const StreamParser::TextBufferQueueMap& text_map,
- base::TimeDelta append_window_start,
- base::TimeDelta append_window_end,
- bool* new_media_segment,
- base::TimeDelta* timestamp_offset) OVERRIDE;
+ ~FrameProcessor();
+
+ // Get/set the current append mode, which if true means "sequence" and if
+ // false means "segments".
+ // See http://www.w3.org/TR/media-source/#widl-SourceBuffer-mode.
+ bool sequence_mode() { return sequence_mode_; }
+ void SetSequenceMode(bool sequence_mode);
+
+ // Processes buffers in |audio_buffers|, |video_buffers|, and |text_map|.
+ // Returns true on success or false on failure which indicates decode error.
+ // |append_window_start| and |append_window_end| correspond to the MSE spec's
+ // similarly named source buffer attributes that are used in coded frame
+ // processing.
+ // |*new_media_segment| tracks whether the next buffers processed within the
+ // append window represent the start of a new media segment. This method may
+ // both use and update this flag.
+ // Uses |*timestamp_offset| according to the coded frame processing algorithm,
+ // including updating it as required in 'sequence' mode frame processing.
+ bool ProcessFrames(const StreamParser::BufferQueue& audio_buffers,
+ const StreamParser::BufferQueue& video_buffers,
+ const StreamParser::TextBufferQueueMap& text_map,
+ base::TimeDelta append_window_start,
+ base::TimeDelta append_window_end,
+ bool* new_media_segment,
+ base::TimeDelta* timestamp_offset);
+
+ // Signals the frame processor to update its group start timestamp to be
+ // |timestamp_offset| if it is in sequence append mode.
+ void SetGroupStartTimestampIfInSequenceMode(base::TimeDelta timestamp_offset);
+
+ // Adds a new track with unique track ID |id|.
+ // If |id| has previously been added, returns false to indicate error.
+ // Otherwise, returns true, indicating future ProcessFrames() will emit
+ // frames for the track |id| to |stream|.
+ bool AddTrack(StreamParser::TrackId id, ChunkDemuxerStream* stream);
+
+ // Updates the internal mapping of TrackId to track buffer for the track
+ // buffer formerly associated with |old_id| to be associated with |new_id|.
+ // Returns false to indicate failure due to either no existing track buffer
+ // for |old_id| or collision with previous track buffer already mapped to
+ // |new_id|. Otherwise returns true.
+ bool UpdateTrack(StreamParser::TrackId old_id, StreamParser::TrackId new_id);
+
+ // Sets the need random access point flag on all track buffers to true.
+ void SetAllTrackBuffersNeedRandomAccessPoint();
+
+ // Resets state for the coded frame processing algorithm as described in steps
+ // 2-5 of the MSE Reset Parser State algorithm described at
+ // http://www.w3.org/TR/media-source/#sourcebuffer-reset-parser-state
+ void Reset();
+
+ // Must be called when the audio config is updated. Used to manage when
+ // the preroll buffer is cleared and the allowed "fudge" factor between
+ // preroll buffers.
+ void OnPossibleAudioConfigUpdate(const AudioDecoderConfig& config);
private:
+ typedef std::map<StreamParser::TrackId, MseTrackBuffer*> TrackBufferMap;
+
+ // If |track_buffers_| contains |id|, returns a pointer to the associated
+ // MseTrackBuffer. Otherwise, returns NULL.
+ MseTrackBuffer* FindTrack(StreamParser::TrackId id);
+
+ // Signals all track buffers' streams that a new media segment is starting
+ // with timestamp |segment_timestamp|.
+ void NotifyNewMediaSegmentStarting(base::TimeDelta segment_timestamp);
+
+ // Helper that signals each track buffer to append any processed, but not yet
+ // appended, frames to its stream. Returns true on success, or false if one or
+ // more of the appends failed.
+ bool FlushProcessedFrames();
+
+ // Handles partial append window trimming of |buffer|. Returns true if the
+ // given |buffer| can be partially trimmed or have preroll added; otherwise,
+ // returns false.
+ //
+ // If |buffer| overlaps |append_window_start|, the portion of |buffer| before
+ // |append_window_start| will be marked for post-decode discard. Further, if
+ // |audio_preroll_buffer_| exists and abuts |buffer|, it will be set as
+ // preroll on |buffer| and |audio_preroll_buffer_| will be cleared. If the
+ // preroll buffer does not abut |buffer|, it will be discarded, but not used.
+ //
+ // If |buffer| lies entirely before |append_window_start|, and thus would
+ // normally be discarded, |audio_preroll_buffer_| will be set to |buffer| and
+ // the method will return false.
+ bool HandlePartialAppendWindowTrimming(
+ base::TimeDelta append_window_start,
+ base::TimeDelta append_window_end,
+ const scoped_refptr<StreamParserBuffer>& buffer);
+
// Helper that processes one frame with the coded frame processing algorithm.
// Returns false on error or true on success.
bool ProcessFrame(const scoped_refptr<StreamParserBuffer>& frame,
@@ -41,6 +132,28 @@ class MEDIA_EXPORT FrameProcessor : public FrameProcessorBase {
base::TimeDelta* timestamp_offset,
bool* new_media_segment);
+ // TrackId-indexed map of each track's stream.
+ TrackBufferMap track_buffers_;
+
+ // The last audio buffer seen by the frame processor that was removed because
+ // it was entirely before the start of the append window.
+ scoped_refptr<StreamParserBuffer> audio_preroll_buffer_;
+
+ // The AudioDecoderConfig associated with buffers handed to ProcessFrames().
+ AudioDecoderConfig current_audio_config_;
+ base::TimeDelta sample_duration_;
+
+ // The AppendMode of the associated SourceBuffer.
+ // See SetSequenceMode() for interpretation of |sequence_mode_|.
+ // Per http://www.w3.org/TR/media-source/#widl-SourceBuffer-mode:
+ // Controls how a sequence of media segments are handled. This is initially
+ // set to false ("segments").
+ bool sequence_mode_;
+
+ // Tracks the MSE coded frame processing variable of same name.
+ // Initially kNoTimestamp(), meaning "unset".
+ base::TimeDelta group_start_timestamp_;
+
// Tracks the MSE coded frame processing variable of same name. It stores the
// highest coded frame end timestamp across all coded frames in the current
// coded frame group. It is set to 0 when the SourceBuffer object is created
diff --git a/media/filters/frame_processor_base.cc b/media/filters/frame_processor_base.cc
deleted file mode 100644
index c0593fe4c0..0000000000
--- a/media/filters/frame_processor_base.cc
+++ /dev/null
@@ -1,214 +0,0 @@
-// Copyright 2014 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "media/filters/frame_processor_base.h"
-
-#include <cstdlib>
-
-#include "base/stl_util.h"
-#include "media/base/buffers.h"
-
-namespace media {
-
-MseTrackBuffer::MseTrackBuffer(ChunkDemuxerStream* stream)
- : last_decode_timestamp_(kNoTimestamp()),
- last_frame_duration_(kNoTimestamp()),
- highest_presentation_timestamp_(kNoTimestamp()),
- needs_random_access_point_(true),
- stream_(stream) {
- DCHECK(stream_);
-}
-
-MseTrackBuffer::~MseTrackBuffer() {
- DVLOG(2) << __FUNCTION__ << "()";
-}
-
-void MseTrackBuffer::Reset() {
- DVLOG(2) << __FUNCTION__ << "()";
-
- last_decode_timestamp_ = kNoTimestamp();
- last_frame_duration_ = kNoTimestamp();
- highest_presentation_timestamp_ = kNoTimestamp();
- needs_random_access_point_ = true;
-}
-
-void MseTrackBuffer::SetHighestPresentationTimestampIfIncreased(
- base::TimeDelta timestamp) {
- if (highest_presentation_timestamp_ == kNoTimestamp() ||
- timestamp > highest_presentation_timestamp_) {
- highest_presentation_timestamp_ = timestamp;
- }
-}
-
-FrameProcessorBase::FrameProcessorBase()
- : sequence_mode_(false),
- group_start_timestamp_(kNoTimestamp()) {}
-
-FrameProcessorBase::~FrameProcessorBase() {
- DVLOG(2) << __FUNCTION__ << "()";
-
- STLDeleteValues(&track_buffers_);
-}
-
-void FrameProcessorBase::SetGroupStartTimestampIfInSequenceMode(
- base::TimeDelta timestamp_offset) {
- DVLOG(2) << __FUNCTION__ << "(" << timestamp_offset.InSecondsF() << ")";
- DCHECK(kNoTimestamp() != timestamp_offset);
- if (sequence_mode_)
- group_start_timestamp_ = timestamp_offset;
-
- // Changes to timestampOffset should invalidate the preroll buffer.
- audio_preroll_buffer_ = NULL;
-}
-
-bool FrameProcessorBase::AddTrack(StreamParser::TrackId id,
- ChunkDemuxerStream* stream) {
- DVLOG(2) << __FUNCTION__ << "(): id=" << id;
-
- MseTrackBuffer* existing_track = FindTrack(id);
- DCHECK(!existing_track);
- if (existing_track)
- return false;
-
- track_buffers_[id] = new MseTrackBuffer(stream);
- return true;
-}
-
-bool FrameProcessorBase::UpdateTrack(StreamParser::TrackId old_id,
- StreamParser::TrackId new_id) {
- DVLOG(2) << __FUNCTION__ << "() : old_id=" << old_id << ", new_id=" << new_id;
-
- if (old_id == new_id || !FindTrack(old_id) || FindTrack(new_id))
- return false;
-
- track_buffers_[new_id] = track_buffers_[old_id];
- CHECK_EQ(1u, track_buffers_.erase(old_id));
- return true;
-}
-
-void FrameProcessorBase::SetAllTrackBuffersNeedRandomAccessPoint() {
- for (TrackBufferMap::iterator itr = track_buffers_.begin();
- itr != track_buffers_.end();
- ++itr) {
- itr->second->set_needs_random_access_point(true);
- }
-}
-
-void FrameProcessorBase::Reset() {
- DVLOG(2) << __FUNCTION__ << "()";
- for (TrackBufferMap::iterator itr = track_buffers_.begin();
- itr != track_buffers_.end(); ++itr) {
- itr->second->Reset();
- }
-}
-
-MseTrackBuffer* FrameProcessorBase::FindTrack(StreamParser::TrackId id) {
- TrackBufferMap::iterator itr = track_buffers_.find(id);
- if (itr == track_buffers_.end())
- return NULL;
-
- return itr->second;
-}
-
-void FrameProcessorBase::NotifyNewMediaSegmentStarting(
- base::TimeDelta segment_timestamp) {
- DVLOG(2) << __FUNCTION__ << "(" << segment_timestamp.InSecondsF() << ")";
-
- for (TrackBufferMap::iterator itr = track_buffers_.begin();
- itr != track_buffers_.end();
- ++itr) {
- itr->second->stream()->OnNewMediaSegment(segment_timestamp);
- }
-}
-
-bool FrameProcessorBase::HandlePartialAppendWindowTrimming(
- base::TimeDelta append_window_start,
- base::TimeDelta append_window_end,
- const scoped_refptr<StreamParserBuffer>& buffer) {
- DCHECK(buffer->duration() > base::TimeDelta());
- DCHECK_EQ(DemuxerStream::AUDIO, buffer->type());
-
- const base::TimeDelta frame_end_timestamp =
- buffer->timestamp() + buffer->duration();
-
- // Ignore any buffers which start after |append_window_start| or end after
- // |append_window_end|. For simplicity, even those that start before
- // |append_window_start|.
- if (buffer->timestamp() > append_window_start ||
- frame_end_timestamp > append_window_end) {
- // TODO(dalecurtis): Partial append window trimming could also be done
- // around |append_window_end|, but is not necessary since splice frames
- // cover overlaps there.
- return false;
- }
-
- // If the buffer is entirely before |append_window_start|, save it as preroll
- // for the first buffer which overlaps |append_window_start|.
- if (buffer->timestamp() < append_window_start &&
- frame_end_timestamp <= append_window_start) {
- audio_preroll_buffer_ = buffer;
- return false;
- }
-
- // There's nothing to be done if we have no preroll and the buffer starts on
- // the append window start.
- if (buffer->timestamp() == append_window_start && !audio_preroll_buffer_)
- return false;
-
- // See if a partial discard can be done around |append_window_start|.
- DCHECK(buffer->timestamp() <= append_window_start);
- DCHECK(buffer->IsKeyframe());
- DVLOG(1) << "Truncating buffer which overlaps append window start."
- << " presentation_timestamp " << buffer->timestamp().InSecondsF()
- << " append_window_start " << append_window_start.InSecondsF();
-
- // If this isn't the first buffer discarded by the append window, try to use
- // the last buffer discarded for preroll. This ensures that the partially
- // trimmed buffer can be correctly decoded.
- if (audio_preroll_buffer_) {
- // We only want to use the preroll buffer if it directly precedes (less than
- // one sample apart) the current buffer.
- const int64 delta = std::abs((audio_preroll_buffer_->timestamp() +
- audio_preroll_buffer_->duration() -
- buffer->timestamp()).InMicroseconds());
- if (delta < sample_duration_.InMicroseconds()) {
- buffer->SetPrerollBuffer(audio_preroll_buffer_);
- } else {
- // TODO(dalecurtis): Add a MEDIA_LOG() for when this is dropped unused.
- }
- audio_preroll_buffer_ = NULL;
- }
-
- // Decrease the duration appropriately. We only need to shorten the buffer if
- // it overlaps |append_window_start|.
- if (buffer->timestamp() < append_window_start) {
- buffer->set_discard_padding(std::make_pair(
- append_window_start - buffer->timestamp(), base::TimeDelta()));
- buffer->set_duration(frame_end_timestamp - append_window_start);
- }
-
- // Adjust the timestamp of this buffer forward to |append_window_start|. The
- // timestamps are always set, even if |buffer|'s timestamp is already set to
- // |append_window_start|, to ensure the preroll buffer is setup correctly.
- buffer->set_timestamp(append_window_start);
- buffer->SetDecodeTimestamp(append_window_start);
- return true;
-}
-
-void FrameProcessorBase::OnPossibleAudioConfigUpdate(
- const AudioDecoderConfig& config) {
- DCHECK(config.IsValidConfig());
-
- // Always clear the preroll buffer when a config update is received.
- audio_preroll_buffer_ = NULL;
-
- if (config.Matches(current_audio_config_))
- return;
-
- current_audio_config_ = config;
- sample_duration_ = base::TimeDelta::FromSecondsD(
- 1.0 / current_audio_config_.samples_per_second());
-}
-
-} // namespace media
diff --git a/media/filters/frame_processor_base.h b/media/filters/frame_processor_base.h
deleted file mode 100644
index 7947efb892..0000000000
--- a/media/filters/frame_processor_base.h
+++ /dev/null
@@ -1,234 +0,0 @@
-// Copyright 2014 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef MEDIA_FILTERS_FRAME_PROCESSOR_BASE_H_
-#define MEDIA_FILTERS_FRAME_PROCESSOR_BASE_H_
-
-#include <map>
-
-#include "base/basictypes.h"
-#include "base/time/time.h"
-#include "media/base/media_export.h"
-#include "media/base/stream_parser.h"
-#include "media/filters/chunk_demuxer.h"
-
-namespace media {
-
-// Helper class to capture per-track details needed by a frame processor. Some
-// of this information may be duplicated in the short-term in the associated
-// ChunkDemuxerStream and SourceBufferStream for a track.
-// This parallels the MSE spec each of a SourceBuffer's Track Buffers at
-// http://www.w3.org/TR/media-source/#track-buffers.
-class MseTrackBuffer {
- public:
- explicit MseTrackBuffer(ChunkDemuxerStream* stream);
- ~MseTrackBuffer();
-
- // Get/set |last_decode_timestamp_|.
- base::TimeDelta last_decode_timestamp() const {
- return last_decode_timestamp_;
- }
- void set_last_decode_timestamp(base::TimeDelta timestamp) {
- last_decode_timestamp_ = timestamp;
- }
-
- // Get/set |last_frame_duration_|.
- base::TimeDelta last_frame_duration() const {
- return last_frame_duration_;
- }
- void set_last_frame_duration(base::TimeDelta duration) {
- last_frame_duration_ = duration;
- }
-
- // Gets |highest_presentation_timestamp_|.
- base::TimeDelta highest_presentation_timestamp() const {
- return highest_presentation_timestamp_;
- }
-
- // Get/set |needs_random_access_point_|.
- bool needs_random_access_point() const {
- return needs_random_access_point_;
- }
- void set_needs_random_access_point(bool needs_random_access_point) {
- needs_random_access_point_ = needs_random_access_point;
- }
-
- // Gets a pointer to this track's ChunkDemuxerStream.
- ChunkDemuxerStream* stream() const { return stream_; }
-
- // Unsets |last_decode_timestamp_|, unsets |last_frame_duration_|,
- // unsets |highest_presentation_timestamp_|, and sets
- // |needs_random_access_point_| to true.
- void Reset();
-
- // If |highest_presentation_timestamp_| is unset or |timestamp| is greater
- // than |highest_presentation_timestamp_|, sets
- // |highest_presentation_timestamp_| to |timestamp|. Note that bidirectional
- // prediction between coded frames can cause |timestamp| to not be
- // monotonically increasing even though the decode timestamps are
- // monotonically increasing.
- void SetHighestPresentationTimestampIfIncreased(base::TimeDelta timestamp);
-
- private:
- // The decode timestamp of the last coded frame appended in the current coded
- // frame group. Initially kNoTimestamp(), meaning "unset".
- base::TimeDelta last_decode_timestamp_;
-
- // The coded frame duration of the last coded frame appended in the current
- // coded frame group. Initially kNoTimestamp(), meaning "unset".
- base::TimeDelta last_frame_duration_;
-
- // The highest presentation timestamp encountered in a coded frame appended
- // in the current coded frame group. Initially kNoTimestamp(), meaning
- // "unset".
- base::TimeDelta highest_presentation_timestamp_;
-
- // Keeps track of whether the track buffer is waiting for a random access
- // point coded frame. Initially set to true to indicate that a random access
- // point coded frame is needed before anything can be added to the track
- // buffer.
- bool needs_random_access_point_;
-
- // Pointer to the stream associated with this track. The stream is not owned
- // by |this|.
- ChunkDemuxerStream* const stream_;
-
- DISALLOW_COPY_AND_ASSIGN(MseTrackBuffer);
-};
-
-// Abstract interface for helper class implementation of Media Source
-// Extension's coded frame processing algorithm.
-// TODO(wolenetz): Once the new FrameProcessor implementation stabilizes, remove
-// LegacyFrameProcessor and fold this interface into FrameProcessor. See
-// http://crbug.com/249422.
-class MEDIA_EXPORT FrameProcessorBase {
- public:
- // TODO(wolenetz/acolwell): Ensure that all TrackIds are coherent and unique
- // for each track buffer. For now, special track identifiers are used for each
- // of audio and video here, and text TrackIds are assumed to be non-negative.
- // See http://crbug.com/341581.
- enum {
- kAudioTrackId = -2,
- kVideoTrackId = -3
- };
-
- virtual ~FrameProcessorBase();
-
- // Get/set the current append mode, which if true means "sequence" and if
- // false means "segments".
- // See http://www.w3.org/TR/media-source/#widl-SourceBuffer-mode.
- bool sequence_mode() { return sequence_mode_; }
- virtual void SetSequenceMode(bool sequence_mode) = 0;
-
- // Processes buffers in |audio_buffers|, |video_buffers|, and |text_map|.
- // Returns true on success or false on failure which indicates decode error.
- // |append_window_start| and |append_window_end| correspond to the MSE spec's
- // similarly named source buffer attributes that are used in coded frame
- // processing.
- // |*new_media_segment| tracks whether the next buffers processed within the
- // append window represent the start of a new media segment. This method may
- // both use and update this flag.
- // Uses |*timestamp_offset| according to the coded frame processing algorithm,
- // including updating it as required in 'sequence' mode frame processing.
- virtual bool ProcessFrames(const StreamParser::BufferQueue& audio_buffers,
- const StreamParser::BufferQueue& video_buffers,
- const StreamParser::TextBufferQueueMap& text_map,
- base::TimeDelta append_window_start,
- base::TimeDelta append_window_end,
- bool* new_media_segment,
- base::TimeDelta* timestamp_offset) = 0;
-
- // Signals the frame processor to update its group start timestamp to be
- // |timestamp_offset| if it is in sequence append mode.
- void SetGroupStartTimestampIfInSequenceMode(base::TimeDelta timestamp_offset);
-
- // Adds a new track with unique track ID |id|.
- // If |id| has previously been added, returns false to indicate error.
- // Otherwise, returns true, indicating future ProcessFrames() will emit
- // frames for the track |id| to |stream|.
- bool AddTrack(StreamParser::TrackId id, ChunkDemuxerStream* stream);
-
- // Updates the internal mapping of TrackId to track buffer for the track
- // buffer formerly associated with |old_id| to be associated with |new_id|.
- // Returns false to indicate failure due to either no existing track buffer
- // for |old_id| or collision with previous track buffer already mapped to
- // |new_id|. Otherwise returns true.
- bool UpdateTrack(StreamParser::TrackId old_id, StreamParser::TrackId new_id);
-
- // Sets the need random access point flag on all track buffers to true.
- void SetAllTrackBuffersNeedRandomAccessPoint();
-
- // Resets state for the coded frame processing algorithm as described in steps
- // 2-5 of the MSE Reset Parser State algorithm described at
- // http://www.w3.org/TR/media-source/#sourcebuffer-reset-parser-state
- void Reset();
-
- // Must be called when the audio config is updated. Used to manage when
- // the preroll buffer is cleared and the allowed "fudge" factor between
- // preroll buffers.
- void OnPossibleAudioConfigUpdate(const AudioDecoderConfig& config);
-
- protected:
- typedef std::map<StreamParser::TrackId, MseTrackBuffer*> TrackBufferMap;
-
- FrameProcessorBase();
-
- // If |track_buffers_| contains |id|, returns a pointer to the associated
- // MseTrackBuffer. Otherwise, returns NULL.
- MseTrackBuffer* FindTrack(StreamParser::TrackId id);
-
- // Signals all track buffers' streams that a new media segment is starting
- // with timestamp |segment_timestamp|.
- void NotifyNewMediaSegmentStarting(base::TimeDelta segment_timestamp);
-
- // Handles partial append window trimming of |buffer|. Returns true if the
- // given |buffer| can be partially trimmed or have preroll added; otherwise,
- // returns false.
- //
- // If |buffer| overlaps |append_window_start|, the portion of |buffer| before
- // |append_window_start| will be marked for post-decode discard. Further, if
- // |audio_preroll_buffer_| exists and abuts |buffer|, it will be set as
- // preroll on |buffer| and |audio_preroll_buffer_| will be cleared. If the
- // preroll buffer does not abut |buffer|, it will be discarded, but not used.
- //
- // If |buffer| lies entirely before |append_window_start|, and thus would
- // normally be discarded, |audio_preroll_buffer_| will be set to |buffer| and
- // the method will return false.
- bool HandlePartialAppendWindowTrimming(
- base::TimeDelta append_window_start,
- base::TimeDelta append_window_end,
- const scoped_refptr<StreamParserBuffer>& buffer);
-
- // The AppendMode of the associated SourceBuffer.
- // See SetSequenceMode() for interpretation of |sequence_mode_|.
- // Per http://www.w3.org/TR/media-source/#widl-SourceBuffer-mode:
- // Controls how a sequence of media segments are handled. This is initially
- // set to false ("segments").
- bool sequence_mode_;
-
- // TrackId-indexed map of each track's stream.
- TrackBufferMap track_buffers_;
-
- // Tracks the MSE coded frame processing variable of same name.
- // Initially kNoTimestamp(), meaning "unset".
- // Note: LegacyFrameProcessor does not use this member; it's here to reduce
- // short-term plumbing of SetGroupStartTimestampIfInSequenceMode() until
- // LegacyFrameProcessor is removed.
- base::TimeDelta group_start_timestamp_;
-
- private:
- // The last audio buffer seen by the frame processor that was removed because
- // it was entirely before the start of the append window.
- scoped_refptr<StreamParserBuffer> audio_preroll_buffer_;
-
- // The AudioDecoderConfig associated with buffers handed to ProcessFrames().
- AudioDecoderConfig current_audio_config_;
- base::TimeDelta sample_duration_;
-
- DISALLOW_COPY_AND_ASSIGN(FrameProcessorBase);
-};
-
-} // namespace media
-
-#endif // MEDIA_FILTERS_FRAME_PROCESSOR_BASE_H_
diff --git a/media/filters/gpu_video_decoder.cc b/media/filters/gpu_video_decoder.cc
index bc545b7d65..efabc95eca 100644
--- a/media/filters/gpu_video_decoder.cc
+++ b/media/filters/gpu_video_decoder.cc
@@ -98,15 +98,6 @@ void GpuVideoDecoder::Reset(const base::Closure& closure) {
vda_->Reset();
}
-void GpuVideoDecoder::Stop() {
- DCheckGpuVideoAcceleratorFactoriesTaskRunnerIsCurrent();
- if (vda_)
- DestroyVDA();
- DCHECK(bitstream_buffers_in_decoder_.empty());
- if (!pending_reset_cb_.is_null())
- base::ResetAndReturn(&pending_reset_cb_).Run();
-}
-
static bool IsCodedSizeSupported(const gfx::Size& coded_size) {
#if defined(OS_WIN)
// Windows Media Foundation H.264 decoding does not support decoding videos
@@ -560,14 +551,20 @@ void GpuVideoDecoder::NotifyEndOfBitstreamBuffer(int32 id) {
GpuVideoDecoder::~GpuVideoDecoder() {
DCheckGpuVideoAcceleratorFactoriesTaskRunnerIsCurrent();
- // Stop should have been already called.
- DCHECK(!vda_.get() && assigned_picture_buffers_.empty());
+ if (vda_)
+ DestroyVDA();
DCHECK(bitstream_buffers_in_decoder_.empty());
+ DCHECK(assigned_picture_buffers_.empty());
+
+ if (!pending_reset_cb_.is_null())
+ base::ResetAndReturn(&pending_reset_cb_).Run();
+
for (size_t i = 0; i < available_shm_segments_.size(); ++i) {
available_shm_segments_[i]->shm->Close();
delete available_shm_segments_[i];
}
available_shm_segments_.clear();
+
for (std::map<int32, PendingDecoderBuffer>::iterator it =
bitstream_buffers_in_decoder_.begin();
it != bitstream_buffers_in_decoder_.end(); ++it) {
diff --git a/media/filters/gpu_video_decoder.h b/media/filters/gpu_video_decoder.h
index e15200830b..bae5566d1e 100644
--- a/media/filters/gpu_video_decoder.h
+++ b/media/filters/gpu_video_decoder.h
@@ -49,7 +49,6 @@ class MEDIA_EXPORT GpuVideoDecoder
virtual void Decode(const scoped_refptr<DecoderBuffer>& buffer,
const DecodeCB& decode_cb) OVERRIDE;
virtual void Reset(const base::Closure& closure) OVERRIDE;
- virtual void Stop() OVERRIDE;
virtual bool NeedsBitstreamConversion() const OVERRIDE;
virtual bool CanReadWithoutStalling() const OVERRIDE;
virtual int GetMaxDecodeRequests() const OVERRIDE;
diff --git a/media/filters/h264_bitstream_buffer.cc b/media/filters/h264_bitstream_buffer.cc
new file mode 100644
index 0000000000..48463a5be1
--- /dev/null
+++ b/media/filters/h264_bitstream_buffer.cc
@@ -0,0 +1,152 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/filters/h264_bitstream_buffer.h"
+
+#include "base/sys_byteorder.h"
+
+namespace media {
+
+H264BitstreamBuffer::H264BitstreamBuffer() : data_(NULL) {
+ Reset();
+}
+
+H264BitstreamBuffer::~H264BitstreamBuffer() {
+ free(data_);
+ data_ = NULL;
+}
+
+void H264BitstreamBuffer::Reset() {
+ free(data_);
+ data_ = NULL;
+
+ capacity_ = 0;
+ pos_ = 0;
+ reg_ = 0;
+
+ Grow();
+
+ bits_left_in_reg_ = kRegBitSize;
+}
+
+void H264BitstreamBuffer::Grow() {
+ data_ = static_cast<uint8*>(realloc(data_, capacity_ + kGrowBytes));
+ CHECK(data_) << "Failed growing the buffer";
+ capacity_ += kGrowBytes;
+}
+
+void H264BitstreamBuffer::FlushReg() {
+ // Flush all bytes that have at least one bit cached, but not more
+ // (on Flush(), reg_ may not be full).
+ size_t bits_in_reg = kRegBitSize - bits_left_in_reg_;
+ if (bits_in_reg == 0)
+ return;
+
+ size_t bytes_in_reg = (bits_in_reg + 7) / 8;
+ reg_ <<= (kRegBitSize - bits_in_reg);
+
+ // Convert to MSB and append as such to the stream.
+ reg_ = base::HostToNet64(reg_);
+
+ // Make sure we have enough space. Grow() will CHECK() on allocation failure.
+ if (pos_ + bytes_in_reg < capacity_)
+ Grow();
+
+ memcpy(data_ + pos_, &reg_, bytes_in_reg);
+ pos_ += bytes_in_reg;
+
+ reg_ = 0;
+ bits_left_in_reg_ = kRegBitSize;
+}
+
+void H264BitstreamBuffer::AppendU64(size_t num_bits, uint64 val) {
+ CHECK_LE(num_bits, kRegBitSize);
+
+ while (num_bits > 0) {
+ if (bits_left_in_reg_ == 0)
+ FlushReg();
+
+ uint64 bits_to_write =
+ num_bits > bits_left_in_reg_ ? bits_left_in_reg_ : num_bits;
+ uint64 val_to_write = (val >> (num_bits - bits_to_write));
+ if (bits_to_write < 64)
+ val_to_write &= ((1ull << bits_to_write) - 1);
+ reg_ <<= bits_to_write;
+ reg_ |= val_to_write;
+ num_bits -= bits_to_write;
+ bits_left_in_reg_ -= bits_to_write;
+ }
+}
+
+void H264BitstreamBuffer::AppendBool(bool val) {
+ if (bits_left_in_reg_ == 0)
+ FlushReg();
+
+ reg_ <<= 1;
+ reg_ |= (static_cast<uint64>(val) & 1);
+ --bits_left_in_reg_;
+}
+
+void H264BitstreamBuffer::AppendSE(int val) {
+ if (val > 0)
+ AppendUE(val * 2 - 1);
+ else
+ AppendUE(-val * 2);
+}
+
+void H264BitstreamBuffer::AppendUE(unsigned int val) {
+ size_t num_zeros = 0;
+ unsigned int v = val + 1;
+
+ while (v > 1) {
+ v >>= 1;
+ ++num_zeros;
+ }
+
+ AppendBits(num_zeros, 0);
+ AppendBits(num_zeros + 1, val + 1);
+}
+
+#define DCHECK_FINISHED() \
+ DCHECK_EQ(bits_left_in_reg_, kRegBitSize) << "Pending bits not yet written " \
+ "to the buffer, call " \
+ "FinishNALU() first."
+
+void H264BitstreamBuffer::BeginNALU(H264NALU::Type nalu_type, int nal_ref_idc) {
+ DCHECK_FINISHED();
+
+ DCHECK_LE(nalu_type, H264NALU::kEOStream);
+ DCHECK_GE(nal_ref_idc, 0);
+ DCHECK_LE(nal_ref_idc, 3);
+
+ AppendBits(32, 0x00000001);
+ AppendBits(1, 0); // forbidden_zero_bit
+ AppendBits(2, nal_ref_idc);
+ AppendBits(5, nalu_type);
+}
+
+void H264BitstreamBuffer::FinishNALU() {
+ // RBSP stop one bit.
+ AppendBits(1, 1);
+
+ // Byte-alignment zero bits.
+ AppendBits(bits_left_in_reg_ % 8, 0);
+
+ if (bits_left_in_reg_ != kRegBitSize)
+ FlushReg();
+}
+
+size_t H264BitstreamBuffer::BytesInBuffer() {
+ DCHECK_FINISHED();
+ return pos_;
+}
+
+uint8* H264BitstreamBuffer::data() {
+ DCHECK(data_);
+ DCHECK_FINISHED();
+
+ return data_;
+}
+
+} // namespace media
diff --git a/media/filters/h264_bitstream_buffer.h b/media/filters/h264_bitstream_buffer.h
new file mode 100644
index 0000000000..4b0511d9bc
--- /dev/null
+++ b/media/filters/h264_bitstream_buffer.h
@@ -0,0 +1,120 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// This file contains an implementation of a H264BitstreamBuffer class for
+// constructing raw bitstream buffers containing NAL units in
+// H.264 Annex-B stream format.
+// See H.264 spec Annex B and chapter 7for more details.
+
+#ifndef MEDIA_FILTERS_H264_BITSTREAM_BUFFER_H_
+#define MEDIA_FILTERS_H264_BITSTREAM_BUFFER_H_
+
+#include "base/gtest_prod_util.h"
+#include "base/numerics/safe_conversions.h"
+#include "media/base/media_export.h"
+#include "media/base/video_frame.h"
+#include "media/filters/h264_parser.h"
+
+namespace media {
+
+// Holds one or more NALUs as a raw bitstream buffer in H.264 Annex-B format.
+// Note that this class currently does NOT insert emulation prevention
+// three-byte sequences (spec 7.3.1).
+class MEDIA_EXPORT H264BitstreamBuffer {
+ public:
+ H264BitstreamBuffer();
+ ~H264BitstreamBuffer();
+
+ // Discard all data and reset the buffer for reuse.
+ void Reset();
+
+ // Append |num_bits| bits to the stream from |val|.
+ // |val| is interpreted in the host endianness.
+ template <typename T>
+ void AppendBits(size_t num_bits, T val) {
+ AppendU64(num_bits, static_cast<uint64>(val));
+ }
+
+ void AppendBits(size_t num_bits, bool val) {
+ DCHECK_EQ(num_bits, 1ul);
+ AppendBool(val);
+ }
+
+ // Append a one-bit bool/flag value |val| to the stream.
+ void AppendBool(bool val);
+
+ // Append a signed value in |val| in Exp-Golomb code.
+ void AppendSE(int val);
+
+ // Append an unsigned value in |val| in Exp-Golomb code.
+ void AppendUE(unsigned int val);
+
+ // Start a new NALU of type |nalu_type| and with given |nal_ref_idc|
+ // (see spec). Note, that until FinishNALU() is called, some of the bits
+ // may not be flushed into the buffer and the data will not be correctly
+ // aligned with trailing bits.
+ void BeginNALU(H264NALU::Type nalu_type, int nal_ref_idc);
+
+ // Finish current NALU. This will flush any cached bits and correctly align
+ // the buffer with RBSP trailing bits. This MUST be called for the stream
+ // returned by data() to be correct.
+ void FinishNALU();
+
+ // Return number of full bytes in the stream. Note that FinishNALU() has to
+ // be called to flush cached bits, or the return value will not include them.
+ size_t BytesInBuffer();
+
+ // Return a pointer to the stream. FinishNALU() must be called before
+ // accessing the stream, otherwise some bits may still be cached and not
+ // in the buffer.
+ uint8* data();
+
+ private:
+ FRIEND_TEST_ALL_PREFIXES(H264BitstreamBufferAppendBitsTest,
+ AppendAndVerifyBits);
+
+ // Allocate additional memory (kGrowBytes bytes) for the buffer.
+ void Grow();
+
+ // Append |num_bits| bits from U64 value |val| (in host endianness).
+ void AppendU64(size_t num_bits, uint64 val);
+
+ // Flush any cached bits in the reg with byte granularity, i.e. enough
+ // bytes to flush all pending bits, but not more.
+ void FlushReg();
+
+ typedef uint64 RegType;
+ enum {
+ // Sizes of reg_.
+ kRegByteSize = sizeof(RegType),
+ kRegBitSize = kRegByteSize * 8,
+ // Amount of bytes to grow the buffer by when we run out of
+ // previously-allocated memory for it.
+ kGrowBytes = 4096,
+ };
+
+ COMPILE_ASSERT(kGrowBytes >= kRegByteSize,
+ kGrowBytes_must_be_larger_than_kRegByteSize);
+
+ // Unused bits left in reg_.
+ size_t bits_left_in_reg_;
+
+ // Cache for appended bits. Bits are flushed to data_ with kRegByteSize
+ // granularity, i.e. when reg_ becomes full, or when an explicit FlushReg()
+ // is called.
+ RegType reg_;
+
+ // Current capacity of data_, in bytes.
+ size_t capacity_;
+
+ // Current byte offset in data_ (points to the start of unwritten bits).
+ size_t pos_;
+
+ // Buffer for stream data.
+ uint8* data_;
+};
+
+} // namespace media
+
+#endif // MEDIA_FILTERS_H264_BITSTREAM_BUFFER_H_
diff --git a/media/filters/h264_bitstream_buffer_unittest.cc b/media/filters/h264_bitstream_buffer_unittest.cc
new file mode 100644
index 0000000000..53f33d3cbc
--- /dev/null
+++ b/media/filters/h264_bitstream_buffer_unittest.cc
@@ -0,0 +1,56 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/filters/h264_bitstream_buffer.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace media {
+
+namespace {
+const uint64 kTestPattern = 0xfedcba0987654321;
+}
+
+class H264BitstreamBufferAppendBitsTest
+ : public ::testing::TestWithParam<uint64> {};
+
+// TODO(posciak): More tests!
+
+TEST_P(H264BitstreamBufferAppendBitsTest, AppendAndVerifyBits) {
+ H264BitstreamBuffer b;
+ uint64 num_bits = GetParam();
+ // TODO(posciak): Tests for >64 bits.
+ ASSERT_LE(num_bits, 64u);
+ uint64 num_bytes = (num_bits + 7) / 8;
+
+ b.AppendBits(num_bits, kTestPattern);
+ b.FlushReg();
+
+ EXPECT_EQ(b.BytesInBuffer(), num_bytes);
+
+ uint8* ptr = b.data();
+ uint64 got = 0;
+ uint64 expected = kTestPattern;
+
+ if (num_bits < 64)
+ expected &= ((1ull << num_bits) - 1);
+
+ while (num_bits > 8) {
+ got |= (*ptr & 0xff);
+ num_bits -= 8;
+ got <<= (num_bits > 8 ? 8 : num_bits);
+ ptr++;
+ }
+ if (num_bits > 0) {
+ uint64 temp = (*ptr & 0xff);
+ temp >>= (8 - num_bits);
+ got |= temp;
+ }
+ EXPECT_EQ(got, expected) << std::hex << "0x" << got << " vs 0x" << expected;
+}
+
+INSTANTIATE_TEST_CASE_P(AppendNumBits,
+ H264BitstreamBufferAppendBitsTest,
+ ::testing::Range(static_cast<uint64>(1),
+ static_cast<uint64>(65)));
+} // namespace media
diff --git a/media/filters/h264_parser.cc b/media/filters/h264_parser.cc
index 4cdc695933..ee21ab82a1 100644
--- a/media/filters/h264_parser.cc
+++ b/media/filters/h264_parser.cc
@@ -106,10 +106,6 @@ H264SEIMessage::H264SEIMessage() {
} \
} while (0)
-enum AspectRatioIdc {
- kExtendedSar = 255,
-};
-
// ISO 14496 part 10
// VUI parameters: Table E-1 "Meaning of sample aspect ratio indicator"
static const int kTableSarWidth[] = {
@@ -608,7 +604,7 @@ H264Parser::Result H264Parser::ParseVUIParameters(H264SPS* sps) {
if (aspect_ratio_info_present_flag) {
int aspect_ratio_idc;
READ_BITS_OR_RETURN(8, &aspect_ratio_idc);
- if (aspect_ratio_idc == kExtendedSar) {
+ if (aspect_ratio_idc == H264SPS::kExtendedSar) {
READ_BITS_OR_RETURN(16, &sps->sar_width);
READ_BITS_OR_RETURN(16, &sps->sar_height);
} else {
diff --git a/media/filters/h264_parser.h b/media/filters/h264_parser.h
index 3a60dcea65..45020af613 100644
--- a/media/filters/h264_parser.h
+++ b/media/filters/h264_parser.h
@@ -63,6 +63,26 @@ enum {
struct MEDIA_EXPORT H264SPS {
H264SPS();
+ enum H264ProfileIDC {
+ kProfileIDCBaseline = 66,
+ kProfileIDCConstrainedBaseline = kProfileIDCBaseline,
+ kProfileIDCMain = 77,
+ kProfileIDCHigh = 100,
+ };
+
+ enum AspectRatioIdc {
+ kExtendedSar = 255,
+ };
+
+ enum {
+ // Constants for HRD parameters (spec ch. E.2.2).
+ kBitRateScaleConstantTerm = 6, // Equation E-37.
+ kCPBSizeScaleConstantTerm = 4, // Equation E-38.
+ kDefaultInitialCPBRemovalDelayLength = 24,
+ kDefaultDPBOutputDelayLength = 24,
+ kDefaultTimeOffsetLength = 24,
+ };
+
int profile_idc;
bool constraint_set0_flag;
bool constraint_set1_flag;
@@ -111,6 +131,25 @@ struct MEDIA_EXPORT H264SPS {
bool bitstream_restriction_flag;
int max_num_reorder_frames;
int max_dec_frame_buffering;
+ bool timing_info_present_flag;
+ int num_units_in_tick;
+ int time_scale;
+ bool fixed_frame_rate_flag;
+
+ // TODO(posciak): actually parse these instead of ParseAndIgnoreHRDParameters.
+ bool nal_hrd_parameters_present_flag;
+ int cpb_cnt_minus1;
+ int bit_rate_scale;
+ int cpb_size_scale;
+ int bit_rate_value_minus1[32];
+ int cpb_size_value_minus1[32];
+ bool cbr_flag[32];
+ int initial_cpb_removal_delay_length_minus_1;
+ int cpb_removal_delay_length_minus1;
+ int dpb_output_delay_length_minus1;
+ int time_offset_length;
+
+ bool low_delay_hrd_flag;
int chroma_array_type;
};
diff --git a/media/filters/opus_audio_decoder.cc b/media/filters/opus_audio_decoder.cc
index bf59adf691..62784ddf71 100644
--- a/media/filters/opus_audio_decoder.cc
+++ b/media/filters/opus_audio_decoder.cc
@@ -173,7 +173,8 @@ struct OpusExtraData {
channel_mapping(0),
num_streams(0),
num_coupled(0),
- gain_db(0) {
+ gain_db(0),
+ stream_map() {
memcpy(stream_map,
kDefaultOpusChannelLayout,
kMaxChannelsWithDefaultLayout);
@@ -281,7 +282,7 @@ void OpusAudioDecoder::Reset(const base::Closure& closure) {
task_runner_->PostTask(FROM_HERE, closure);
}
-void OpusAudioDecoder::Stop() {
+OpusAudioDecoder::~OpusAudioDecoder() {
DCHECK(task_runner_->BelongsToCurrentThread());
if (!opus_decoder_)
@@ -292,10 +293,6 @@ void OpusAudioDecoder::Stop() {
CloseDecoder();
}
-OpusAudioDecoder::~OpusAudioDecoder() {
- DCHECK(!opus_decoder_);
-}
-
void OpusAudioDecoder::DecodeBuffer(
const scoped_refptr<DecoderBuffer>& input,
const DecodeCB& decode_cb) {
diff --git a/media/filters/opus_audio_decoder.h b/media/filters/opus_audio_decoder.h
index 504701a52f..19ef04d0db 100644
--- a/media/filters/opus_audio_decoder.h
+++ b/media/filters/opus_audio_decoder.h
@@ -37,7 +37,6 @@ class MEDIA_EXPORT OpusAudioDecoder : public AudioDecoder {
virtual void Decode(const scoped_refptr<DecoderBuffer>& buffer,
const DecodeCB& decode_cb) OVERRIDE;
virtual void Reset(const base::Closure& closure) OVERRIDE;
- virtual void Stop() OVERRIDE;
private:
// Reads from the demuxer stream with corresponding callback method.
diff --git a/media/filters/pipeline_integration_test.cc b/media/filters/pipeline_integration_test.cc
index f991dc3997..2a1cf6b969 100644
--- a/media/filters/pipeline_integration_test.cc
+++ b/media/filters/pipeline_integration_test.cc
@@ -535,9 +535,11 @@ class PipelineIntegrationTest
public PipelineIntegrationTestBase {
public:
void StartPipelineWithMediaSource(MockMediaSource* source) {
- EXPECT_CALL(*this, OnMetadata(_)).Times(AtMost(1))
+ EXPECT_CALL(*this, OnMetadata(_))
+ .Times(AtMost(1))
.WillRepeatedly(SaveArg<0>(&metadata_));
- EXPECT_CALL(*this, OnPrerollCompleted()).Times(AtMost(1));
+ EXPECT_CALL(*this, OnBufferingStateChanged(BUFFERING_HAVE_ENOUGH))
+ .Times(AtMost(1));
pipeline_->Start(
CreateFilterCollection(source->GetDemuxer(), NULL),
base::Bind(&PipelineIntegrationTest::OnEnded, base::Unretained(this)),
@@ -545,7 +547,7 @@ class PipelineIntegrationTest
QuitOnStatusCB(PIPELINE_OK),
base::Bind(&PipelineIntegrationTest::OnMetadata,
base::Unretained(this)),
- base::Bind(&PipelineIntegrationTest::OnPrerollCompleted,
+ base::Bind(&PipelineIntegrationTest::OnBufferingStateChanged,
base::Unretained(this)),
base::Closure());
@@ -560,9 +562,11 @@ class PipelineIntegrationTest
void StartPipelineWithEncryptedMedia(
MockMediaSource* source,
FakeEncryptedMedia* encrypted_media) {
- EXPECT_CALL(*this, OnMetadata(_)).Times(AtMost(1))
+ EXPECT_CALL(*this, OnMetadata(_))
+ .Times(AtMost(1))
.WillRepeatedly(SaveArg<0>(&metadata_));
- EXPECT_CALL(*this, OnPrerollCompleted()).Times(AtMost(1));
+ EXPECT_CALL(*this, OnBufferingStateChanged(BUFFERING_HAVE_ENOUGH))
+ .Times(AtMost(1));
pipeline_->Start(
CreateFilterCollection(source->GetDemuxer(),
encrypted_media->decryptor()),
@@ -571,7 +575,7 @@ class PipelineIntegrationTest
QuitOnStatusCB(PIPELINE_OK),
base::Bind(&PipelineIntegrationTest::OnMetadata,
base::Unretained(this)),
- base::Bind(&PipelineIntegrationTest::OnPrerollCompleted,
+ base::Bind(&PipelineIntegrationTest::OnBufferingStateChanged,
base::Unretained(this)),
base::Closure());
@@ -1390,6 +1394,28 @@ TEST_F(PipelineIntegrationTest, DISABLED_SeekWhilePlaying) {
ASSERT_TRUE(WaitUntilOnEnded());
}
+#if defined(USE_PROPRIETARY_CODECS)
+TEST_F(PipelineIntegrationTest, Rotated_Metadata_0) {
+ ASSERT_TRUE(Start(GetTestDataFilePath("bear_rotate_0.mp4"), PIPELINE_OK));
+ ASSERT_EQ(VIDEO_ROTATION_0, metadata_.video_rotation);
+}
+
+TEST_F(PipelineIntegrationTest, Rotated_Metadata_90) {
+ ASSERT_TRUE(Start(GetTestDataFilePath("bear_rotate_90.mp4"), PIPELINE_OK));
+ ASSERT_EQ(VIDEO_ROTATION_90, metadata_.video_rotation);
+}
+
+TEST_F(PipelineIntegrationTest, Rotated_Metadata_180) {
+ ASSERT_TRUE(Start(GetTestDataFilePath("bear_rotate_180.mp4"), PIPELINE_OK));
+ ASSERT_EQ(VIDEO_ROTATION_180, metadata_.video_rotation);
+}
+
+TEST_F(PipelineIntegrationTest, Rotated_Metadata_270) {
+ ASSERT_TRUE(Start(GetTestDataFilePath("bear_rotate_270.mp4"), PIPELINE_OK));
+ ASSERT_EQ(VIDEO_ROTATION_270, metadata_.video_rotation);
+}
+#endif
+
// Verify audio decoder & renderer can handle aborted demuxer reads.
TEST_F(PipelineIntegrationTest, ChunkDemuxerAbortRead_AudioOnly) {
ASSERT_TRUE(TestSeekDuringRead("bear-320x240-audio-only.webm", kAudioOnlyWebM,
@@ -1451,6 +1477,14 @@ TEST_F(PipelineIntegrationTest, BasicPlayback_VP8A_Odd_WebM) {
EXPECT_EQ(last_video_frame_format_, VideoFrame::YV12A);
}
+// Verify that VP9 video with odd width/height can be played back.
+TEST_F(PipelineIntegrationTest, BasicPlayback_VP9_Odd_WebM) {
+ ASSERT_TRUE(Start(GetTestDataFilePath("bear-vp9-odd-dimensions.webm"),
+ PIPELINE_OK));
+ Play();
+ ASSERT_TRUE(WaitUntilOnEnded());
+}
+
// Verify that VP8 video with inband text track can be played back.
TEST_F(PipelineIntegrationTest,
BasicPlayback_VP8_WebVTT_WebM) {
@@ -1461,7 +1495,9 @@ TEST_F(PipelineIntegrationTest,
}
// Verify that VP9 video with 4:4:4 subsampling can be played back.
-TEST_F(PipelineIntegrationTest, P444_VP9_WebM) {
+// TODO(johannkoenig): Reenable after landing libvpx roll
+// http://www.crbug.com/392309
+TEST_F(PipelineIntegrationTest, DISABLED_P444_VP9_WebM) {
ASSERT_TRUE(Start(GetTestDataFilePath("bear-320x240-P444.webm"),
PIPELINE_OK));
Play();
@@ -1477,4 +1513,32 @@ TEST_F(PipelineIntegrationTest, BasicPlayback_OddVideoSize) {
ASSERT_TRUE(WaitUntilOnEnded());
}
+// Verify that OPUS audio in a webm which reports a 44.1kHz sample rate plays
+// correctly at 48kHz
+TEST_F(PipelineIntegrationTest, BasicPlayback_Opus441kHz) {
+ ASSERT_TRUE(Start(GetTestDataFilePath("sfx-opus-441.webm"), PIPELINE_OK));
+ Play();
+ ASSERT_TRUE(WaitUntilOnEnded());
+ EXPECT_EQ(48000,
+ demuxer_->GetStream(DemuxerStream::AUDIO)
+ ->audio_decoder_config()
+ .samples_per_second());
+}
+
+// Same as above but using MediaSource.
+TEST_F(PipelineIntegrationTest, BasicPlayback_MediaSource_Opus441kHz) {
+ MockMediaSource source(
+ "sfx-opus-441.webm", kOpusAudioOnlyWebM, kAppendWholeFile);
+ StartPipelineWithMediaSource(&source);
+ source.EndOfStream();
+ Play();
+ ASSERT_TRUE(WaitUntilOnEnded());
+ source.Abort();
+ Stop();
+ EXPECT_EQ(48000,
+ demuxer_->GetStream(DemuxerStream::AUDIO)
+ ->audio_decoder_config()
+ .samples_per_second());
+}
+
} // namespace media
diff --git a/media/filters/pipeline_integration_test_base.cc b/media/filters/pipeline_integration_test_base.cc
index c179903ad3..54e7f58aea 100644
--- a/media/filters/pipeline_integration_test_base.cc
+++ b/media/filters/pipeline_integration_test_base.cc
@@ -6,8 +6,8 @@
#include "base/bind.h"
#include "base/memory/scoped_vector.h"
-#include "media/base/clock.h"
#include "media/base/media_log.h"
+#include "media/base/time_delta_interpolator.h"
#include "media/filters/audio_renderer_impl.h"
#include "media/filters/chunk_demuxer.h"
#include "media/filters/ffmpeg_audio_decoder.h"
@@ -37,6 +37,10 @@ PipelineIntegrationTestBase::PipelineIntegrationTestBase()
last_video_frame_format_(VideoFrame::UNKNOWN),
hardware_config_(AudioParameters(), AudioParameters()) {
base::MD5Init(&md5_context_);
+
+ // Prevent non-deterministic buffering state callbacks from firing (e.g., slow
+ // machine, valgrind).
+ pipeline_->set_underflow_disabled_for_testing(true);
}
PipelineIntegrationTestBase::~PipelineIntegrationTestBase() {
@@ -104,9 +108,11 @@ void PipelineIntegrationTestBase::OnError(PipelineStatus status) {
bool PipelineIntegrationTestBase::Start(const base::FilePath& file_path,
PipelineStatus expected_status) {
- EXPECT_CALL(*this, OnMetadata(_)).Times(AtMost(1))
+ EXPECT_CALL(*this, OnMetadata(_))
+ .Times(AtMost(1))
.WillRepeatedly(SaveArg<0>(&metadata_));
- EXPECT_CALL(*this, OnPrerollCompleted()).Times(AtMost(1));
+ EXPECT_CALL(*this, OnBufferingStateChanged(BUFFERING_HAVE_ENOUGH))
+ .Times(AtMost(1));
pipeline_->Start(
CreateFilterCollection(file_path, NULL),
base::Bind(&PipelineIntegrationTestBase::OnEnded, base::Unretained(this)),
@@ -114,7 +120,7 @@ bool PipelineIntegrationTestBase::Start(const base::FilePath& file_path,
QuitOnStatusCB(expected_status),
base::Bind(&PipelineIntegrationTestBase::OnMetadata,
base::Unretained(this)),
- base::Bind(&PipelineIntegrationTestBase::OnPrerollCompleted,
+ base::Bind(&PipelineIntegrationTestBase::OnBufferingStateChanged,
base::Unretained(this)),
base::Closure());
message_loop_.Run();
@@ -127,7 +133,8 @@ bool PipelineIntegrationTestBase::Start(const base::FilePath& file_path,
hashing_enabled_ = test_type == kHashed;
clockless_playback_ = test_type == kClockless;
if (clockless_playback_) {
- pipeline_->SetClockForTesting(new Clock(&dummy_clock_));
+ pipeline_->SetTimeDeltaInterpolatorForTesting(
+ new TimeDeltaInterpolator(&dummy_clock_));
}
return Start(file_path, expected_status);
}
@@ -138,9 +145,11 @@ bool PipelineIntegrationTestBase::Start(const base::FilePath& file_path) {
bool PipelineIntegrationTestBase::Start(const base::FilePath& file_path,
Decryptor* decryptor) {
- EXPECT_CALL(*this, OnMetadata(_)).Times(AtMost(1))
+ EXPECT_CALL(*this, OnMetadata(_))
+ .Times(AtMost(1))
.WillRepeatedly(SaveArg<0>(&metadata_));
- EXPECT_CALL(*this, OnPrerollCompleted()).Times(AtMost(1));
+ EXPECT_CALL(*this, OnBufferingStateChanged(BUFFERING_HAVE_ENOUGH))
+ .Times(AtMost(1));
pipeline_->Start(
CreateFilterCollection(file_path, decryptor),
base::Bind(&PipelineIntegrationTestBase::OnEnded, base::Unretained(this)),
@@ -149,7 +158,7 @@ bool PipelineIntegrationTestBase::Start(const base::FilePath& file_path,
base::Unretained(this)),
base::Bind(&PipelineIntegrationTestBase::OnMetadata,
base::Unretained(this)),
- base::Bind(&PipelineIntegrationTestBase::OnPrerollCompleted,
+ base::Bind(&PipelineIntegrationTestBase::OnBufferingStateChanged,
base::Unretained(this)),
base::Closure());
message_loop_.Run();
@@ -167,7 +176,7 @@ void PipelineIntegrationTestBase::Pause() {
bool PipelineIntegrationTestBase::Seek(base::TimeDelta seek_time) {
ended_ = false;
- EXPECT_CALL(*this, OnPrerollCompleted());
+ EXPECT_CALL(*this, OnBufferingStateChanged(BUFFERING_HAVE_ENOUGH));
pipeline_->Seek(seek_time, QuitOnStatusCB(PIPELINE_OK));
message_loop_.Run();
return (pipeline_status_ == PIPELINE_OK);
@@ -239,8 +248,10 @@ PipelineIntegrationTestBase::CreateFilterCollection(
collection->SetDemuxer(demuxer_.get());
ScopedVector<VideoDecoder> video_decoders;
+#if !defined(MEDIA_DISABLE_LIBVPX)
video_decoders.push_back(
new VpxVideoDecoder(message_loop_.message_loop_proxy()));
+#endif // !defined(MEDIA_DISABLE_LIBVPX)
video_decoders.push_back(
new FFmpegVideoDecoder(message_loop_.message_loop_proxy()));
diff --git a/media/filters/pipeline_integration_test_base.h b/media/filters/pipeline_integration_test_base.h
index 10cf2620a8..37f744c0e4 100644
--- a/media/filters/pipeline_integration_test_base.h
+++ b/media/filters/pipeline_integration_test_base.h
@@ -134,7 +134,7 @@ class PipelineIntegrationTestBase {
void OnVideoRendererPaint(const scoped_refptr<VideoFrame>& frame);
MOCK_METHOD1(OnMetadata, void(PipelineMetadata));
- MOCK_METHOD0(OnPrerollCompleted, void());
+ MOCK_METHOD1(OnBufferingStateChanged, void(BufferingState));
};
} // namespace media
diff --git a/media/filters/skcanvas_video_renderer.cc b/media/filters/skcanvas_video_renderer.cc
index 0f5dd0ecdb..062e317b71 100644
--- a/media/filters/skcanvas_video_renderer.cc
+++ b/media/filters/skcanvas_video_renderer.cc
@@ -56,10 +56,8 @@ static void ConvertVideoFrameToBitmap(
if (bitmap->isNull() ||
bitmap->width() != video_frame->visible_rect().width() ||
bitmap->height() != video_frame->visible_rect().height()) {
- bitmap->setConfig(SkBitmap::kARGB_8888_Config,
- video_frame->visible_rect().width(),
- video_frame->visible_rect().height());
- bitmap->allocPixels();
+ bitmap->allocN32Pixels(video_frame->visible_rect().width(),
+ video_frame->visible_rect().height());
bitmap->setIsVolatile(true);
}
@@ -215,6 +213,10 @@ void SkCanvasVideoRenderer::Paint(media::VideoFrame* video_frame,
last_frame_timestamp_ = video_frame->timestamp();
}
+ // Use SRC mode so we completely overwrite the buffer (in case we have alpha)
+ // this means we don't need the extra cost of clearing the buffer first.
+ paint.setXfermode(SkXfermode::Create(SkXfermode::kSrc_Mode));
+
// Paint using |last_frame_|.
paint.setFilterLevel(SkPaint::kLow_FilterLevel);
canvas->drawBitmapRect(last_frame_, NULL, dest, &paint);
diff --git a/media/filters/skcanvas_video_renderer_unittest.cc b/media/filters/skcanvas_video_renderer_unittest.cc
index dd01c70496..5db3a34d6c 100644
--- a/media/filters/skcanvas_video_renderer_unittest.cc
+++ b/media/filters/skcanvas_video_renderer_unittest.cc
@@ -214,6 +214,16 @@ TEST_F(SkCanvasVideoRendererTest, NoFrame) {
EXPECT_EQ(SK_ColorBLACK, GetColor(target_canvas()));
}
+TEST_F(SkCanvasVideoRendererTest, TransparentFrame) {
+ // Test that we don't blend with existing canvas contents.
+ FillCanvas(target_canvas(), SK_ColorRED);
+ Paint(VideoFrame::CreateTransparentFrame(gfx::Size(kWidth, kHeight)),
+ target_canvas(),
+ kNone);
+ EXPECT_EQ(static_cast<SkColor>(SK_ColorTRANSPARENT),
+ GetColor(target_canvas()));
+}
+
TEST_F(SkCanvasVideoRendererTest, Natural) {
Paint(natural_frame(), target_canvas(), kRed);
EXPECT_EQ(SK_ColorRED, GetColor(target_canvas()));
diff --git a/media/filters/source_buffer_platform.cc b/media/filters/source_buffer_platform.cc
new file mode 100644
index 0000000000..6457e8cca0
--- /dev/null
+++ b/media/filters/source_buffer_platform.cc
@@ -0,0 +1,14 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/filters/source_buffer_platform.h"
+
+namespace media {
+
+// 12MB: approximately 5 minutes of 320Kbps content.
+// 150MB: approximately 5 minutes of 4Mbps content.
+const int kSourceBufferAudioMemoryLimit = 12 * 1024 * 1024;
+const int kSourceBufferVideoMemoryLimit = 150 * 1024 * 1024;
+
+} // namespace media
diff --git a/media/filters/source_buffer_platform.h b/media/filters/source_buffer_platform.h
new file mode 100644
index 0000000000..b063f99a5e
--- /dev/null
+++ b/media/filters/source_buffer_platform.h
@@ -0,0 +1,18 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_FILTERS_SOURCE_BUFFER_PLATFORM_H_
+#define MEDIA_FILTERS_SOURCE_BUFFER_PLATFORM_H_
+
+#include "media/base/media_export.h"
+
+namespace media {
+
+// The maximum amount of data in bytes the stream will keep in memory.
+MEDIA_EXPORT extern const int kSourceBufferAudioMemoryLimit;
+MEDIA_EXPORT extern const int kSourceBufferVideoMemoryLimit;
+
+} // namespace media
+
+#endif // MEDIA_FILTERS_SOURCE_BUFFER_PLATFORM_H_
diff --git a/media/filters/source_buffer_stream.cc b/media/filters/source_buffer_stream.cc
index fe22b4a795..8bc65854c0 100644
--- a/media/filters/source_buffer_stream.cc
+++ b/media/filters/source_buffer_stream.cc
@@ -11,6 +11,7 @@
#include "base/debug/trace_event.h"
#include "base/logging.h"
#include "media/base/audio_splicer.h"
+#include "media/filters/source_buffer_platform.h"
namespace media {
@@ -333,11 +334,6 @@ static int kDefaultBufferDurationInMs = 125;
static base::TimeDelta kSeekToStartFudgeRoom() {
return base::TimeDelta::FromMilliseconds(1000);
}
-// The maximum amount of data in bytes the stream will keep in memory.
-// 12MB: approximately 5 minutes of 320Kbps content.
-// 150MB: approximately 5 minutes of 4Mbps content.
-static int kDefaultAudioMemoryLimit = 12 * 1024 * 1024;
-static int kDefaultVideoMemoryLimit = 150 * 1024 * 1024;
namespace media {
@@ -358,7 +354,7 @@ SourceBufferStream::SourceBufferStream(const AudioDecoderConfig& audio_config,
last_appended_buffer_is_keyframe_(false),
last_output_buffer_timestamp_(kNoTimestamp()),
max_interbuffer_distance_(kNoTimestamp()),
- memory_limit_(kDefaultAudioMemoryLimit),
+ memory_limit_(kSourceBufferAudioMemoryLimit),
config_change_pending_(false),
splice_buffers_index_(0),
pending_buffers_complete_(false),
@@ -384,7 +380,7 @@ SourceBufferStream::SourceBufferStream(const VideoDecoderConfig& video_config,
last_appended_buffer_is_keyframe_(false),
last_output_buffer_timestamp_(kNoTimestamp()),
max_interbuffer_distance_(kNoTimestamp()),
- memory_limit_(kDefaultVideoMemoryLimit),
+ memory_limit_(kSourceBufferVideoMemoryLimit),
config_change_pending_(false),
splice_buffers_index_(0),
pending_buffers_complete_(false),
@@ -411,7 +407,7 @@ SourceBufferStream::SourceBufferStream(const TextTrackConfig& text_config,
last_appended_buffer_is_keyframe_(false),
last_output_buffer_timestamp_(kNoTimestamp()),
max_interbuffer_distance_(kNoTimestamp()),
- memory_limit_(kDefaultAudioMemoryLimit),
+ memory_limit_(kSourceBufferAudioMemoryLimit),
config_change_pending_(false),
splice_buffers_index_(0),
pending_buffers_complete_(false),
@@ -721,6 +717,11 @@ bool SourceBufferStream::IsMonotonicallyIncreasing(
base::TimeDelta current_timestamp = (*itr)->GetDecodeTimestamp();
bool current_is_keyframe = (*itr)->IsKeyframe();
DCHECK(current_timestamp != kNoTimestamp());
+ DCHECK((*itr)->duration() >= base::TimeDelta())
+ << "Packet with invalid duration."
+ << " pts " << (*itr)->timestamp().InSecondsF()
+ << " dts " << (*itr)->GetDecodeTimestamp().InSecondsF()
+ << " dur " << (*itr)->duration().InSecondsF();
if (prev_timestamp != kNoTimestamp()) {
if (current_timestamp < prev_timestamp) {
diff --git a/media/filters/source_buffer_stream_unittest.cc b/media/filters/source_buffer_stream_unittest.cc
index 50efdacd4f..01e4fd2f92 100644
--- a/media/filters/source_buffer_stream_unittest.cc
+++ b/media/filters/source_buffer_stream_unittest.cc
@@ -31,8 +31,7 @@ static const int kDataSize = 1;
class SourceBufferStreamTest : public testing::Test {
protected:
- SourceBufferStreamTest()
- : accurate_durations_(false) {
+ SourceBufferStreamTest() {
video_config_ = TestVideoConfig::Normal();
SetStreamInfo(kDefaultFramesPerSecond, kDefaultKeyframesPerSecond);
stream_.reset(new SourceBufferStream(video_config_, log_cb(), true));
@@ -57,7 +56,6 @@ class SourceBufferStreamTest : public testing::Test {
void SetAudioStream() {
video_config_ = TestVideoConfig::Invalid();
- accurate_durations_ = true;
audio_config_.Initialize(kCodecVorbis,
kSampleFormatPlanarF32,
CHANNEL_LAYOUT_STEREO,
@@ -278,8 +276,7 @@ class SourceBufferStreamTest : public testing::Test {
break;
}
- if (timestamps[i] == "C")
- EXPECT_EQ(SourceBufferStream::kConfigChange, status);
+ EXPECT_EQ("C", timestamps[i]);
ss << "C";
continue;
@@ -289,7 +286,10 @@ class SourceBufferStreamTest : public testing::Test {
if (status != SourceBufferStream::kSuccess)
break;
- ss << buffer->GetDecodeTimestamp().InMilliseconds();
+ ss << buffer->timestamp().InMilliseconds();
+
+ if (buffer->GetDecodeTimestamp() != buffer->timestamp())
+ ss << "|" << buffer->GetDecodeTimestamp().InMilliseconds();
// Handle preroll buffers.
if (EndsWith(timestamps[i], "P", true)) {
@@ -407,8 +407,7 @@ class SourceBufferStreamTest : public testing::Test {
presentation_timestamp = timestamp - frame_duration_;
}
buffer->set_timestamp(presentation_timestamp);
- if (accurate_durations_)
- buffer->set_duration(frame_duration_);
+ buffer->set_duration(frame_duration_);
queue.push_back(buffer);
}
@@ -416,14 +415,40 @@ class SourceBufferStreamTest : public testing::Test {
EXPECT_EQ(expect_success, stream_->Append(queue));
}
+ void UpdateLastBufferDuration(base::TimeDelta current_dts,
+ BufferQueue* buffers) {
+ if (buffers->empty() || buffers->back()->duration() > base::TimeDelta())
+ return;
+
+ base::TimeDelta last_dts = buffers->back()->GetDecodeTimestamp();
+ DCHECK(current_dts >= last_dts);
+ buffers->back()->set_duration(current_dts - last_dts);
+ }
+
// StringToBufferQueue() allows for the generation of StreamParserBuffers from
// coded strings of timestamps separated by spaces. Supported syntax:
//
- // ##:
- // Generates a StreamParserBuffer with decode timestamp ##. E.g., "0 1 2 3".
+ // xx:
+ // Generates a StreamParserBuffer with decode and presentation timestamp xx.
+ // E.g., "0 1 2 3".
+ //
+ // pp|dd:
+ // Generates a StreamParserBuffer with presentation timestamp pp and decode
+ // timestamp dd. E.g., "0|0 3|1 1|2 2|3".
+ //
+ // ##Dzz
+ // Specifies the duration for a buffer. ## represents one of the 2 timestamp
+ // formats above. zz specifies the duration of the buffer in milliseconds.
+ // If the duration isn't specified with this syntax then the buffer duration
+ // is determined by the difference between the decode timestamp in ## and
+ // the decode timestamp of the previous buffer in the string. If the string
+ // only contains 1 buffer then the duration must be explicitly specified with
+ // this format.
//
// ##K:
- // Indicates the buffer with timestamp ## reflects a keyframe. E.g., "0K 1".
+ // Indicates the buffer with timestamp ## reflects a keyframe. ##
+ // can be any of the 3 timestamp formats above.
+ // E.g., "0K 1|2K 2|4D2K".
//
// S(a# ... y# z#)
// Indicates a splice frame buffer should be created with timestamp z#. The
@@ -479,19 +504,40 @@ class SourceBufferStreamTest : public testing::Test {
timestamps[i] = timestamps[i].substr(0, timestamps[i].length() - 1);
}
- int time_in_ms;
- CHECK(base::StringToInt(timestamps[i], &time_in_ms));
+ int duration_in_ms = 0;
+ size_t duration_pos = timestamps[i].find('D');
+ if (duration_pos != std::string::npos) {
+ CHECK(base::StringToInt(timestamps[i].substr(duration_pos + 1),
+ &duration_in_ms));
+ timestamps[i] = timestamps[i].substr(0, duration_pos);
+ }
+
+ std::vector<std::string> buffer_timestamps;
+ base::SplitString(timestamps[i], '|', &buffer_timestamps);
+
+ if (buffer_timestamps.size() == 1)
+ buffer_timestamps.push_back(buffer_timestamps[0]);
+
+ CHECK_EQ(2u, buffer_timestamps.size());
+
+ int pts_in_ms = 0;
+ int dts_in_ms = 0;
+ CHECK(base::StringToInt(buffer_timestamps[0], &pts_in_ms));
+ CHECK(base::StringToInt(buffer_timestamps[1], &dts_in_ms));
// Create buffer. Buffer type and track ID are meaningless to these tests.
scoped_refptr<StreamParserBuffer> buffer =
StreamParserBuffer::CopyFrom(&kDataA, kDataSize, is_keyframe,
DemuxerStream::AUDIO, 0);
- base::TimeDelta timestamp =
- base::TimeDelta::FromMilliseconds(time_in_ms);
- buffer->set_timestamp(timestamp);
- if (accurate_durations_)
- buffer->set_duration(frame_duration_);
- buffer->SetDecodeTimestamp(timestamp);
+ buffer->set_timestamp(base::TimeDelta::FromMilliseconds(pts_in_ms));
+
+ if (dts_in_ms != pts_in_ms) {
+ buffer->SetDecodeTimestamp(
+ base::TimeDelta::FromMilliseconds(dts_in_ms));
+ }
+
+ if (duration_in_ms)
+ buffer->set_duration(base::TimeDelta::FromMilliseconds(duration_in_ms));
// Simulate preroll buffers by just generating another buffer and sticking
// it as the preroll.
@@ -504,13 +550,22 @@ class SourceBufferStreamTest : public testing::Test {
}
if (splice_frame) {
+ // Make sure that splice frames aren't used with content where decode
+ // and presentation timestamps can differ. (i.e., B-frames)
+ CHECK_EQ(buffer->GetDecodeTimestamp().InMicroseconds(),
+ buffer->timestamp().InMicroseconds());
if (!pre_splice_buffers.empty()) {
// Enforce strictly monotonically increasing timestamps.
CHECK_GT(
- timestamp.InMicroseconds(),
+ buffer->timestamp().InMicroseconds(),
+ pre_splice_buffers.back()->timestamp().InMicroseconds());
+ CHECK_GT(
+ buffer->GetDecodeTimestamp().InMicroseconds(),
pre_splice_buffers.back()->GetDecodeTimestamp().InMicroseconds());
}
buffer->SetConfigId(splice_config_id);
+ UpdateLastBufferDuration(buffer->GetDecodeTimestamp(),
+ &pre_splice_buffers);
pre_splice_buffers.push_back(buffer);
continue;
}
@@ -523,8 +578,18 @@ class SourceBufferStreamTest : public testing::Test {
pre_splice_buffers.clear();
}
+ UpdateLastBufferDuration(buffer->GetDecodeTimestamp(), &buffers);
buffers.push_back(buffer);
}
+
+ // If the last buffer doesn't have a duration, assume it is the
+ // same as the second to last buffer.
+ if (buffers.size() >= 2 &&
+ buffers.back()->duration() <= base::TimeDelta()) {
+ buffers.back()->set_duration(
+ buffers[buffers.size() - 2]->duration());
+ }
+
return buffers;
}
@@ -565,10 +630,6 @@ class SourceBufferStreamTest : public testing::Test {
int frames_per_second_;
int keyframes_per_second_;
base::TimeDelta frame_duration_;
- // TODO(dalecurtis): It's silly to have this, all tests should use accurate
- // durations instead. However, currently all tests are written with an
- // expectation of 0 duration, so it's an involved change.
- bool accurate_durations_;
DISALLOW_COPY_AND_ASSIGN(SourceBufferStreamTest);
};
@@ -691,7 +752,7 @@ TEST_F(SourceBufferStreamTest,
// Completely overlap the old buffers, with a segment that starts
// after the old segment start timestamp, but before the timestamp
// of the first buffer in the segment.
- NewSegmentAppend("20K 50K 80K 110K");
+ NewSegmentAppend("20K 50K 80K 110D10K");
// Verify that the buffered ranges are updated properly and we don't crash.
CheckExpectedRangesByTimestamp("{ [20,150) }");
@@ -779,7 +840,7 @@ TEST_F(SourceBufferStreamTest, End_Overlap_SingleBuffer) {
NewSegmentAppend("0K 30 60 90 120K 150");
CheckExpectedRangesByTimestamp("{ [0,180) }");
- NewSegmentAppend("0K");
+ NewSegmentAppend("0D30K");
CheckExpectedRangesByTimestamp("{ [0,30) [120,180) }");
CheckExpectedBuffers("0K");
@@ -1548,8 +1609,8 @@ TEST_F(SourceBufferStreamTest, Overlap_OneByOne_DeleteGroup) {
// Seek to 130ms.
SeekToTimestamp(base::TimeDelta::FromMilliseconds(130));
- // Overlap with a new segment from 0 to 120ms.
- NewSegmentAppendOneByOne("0K 120");
+ // Overlap with a new segment from 0 to 130ms.
+ NewSegmentAppendOneByOne("0K 120D10");
// Next buffer should still be 130ms.
CheckExpectedBuffers("130K");
@@ -1580,21 +1641,21 @@ TEST_F(SourceBufferStreamTest, Overlap_OneByOne_BetweenMediaSegments) {
// new : 0K 30 60 90 120K
// after: 0K 30 60 90 *120K* 130K
TEST_F(SourceBufferStreamTest, Overlap_OneByOne_TrackBuffer) {
- NewSegmentAppendOneByOne("10K 40 70 100K 125 130K");
+ NewSegmentAppendOneByOne("10K 40 70 100K 125 130D30K");
CheckExpectedRangesByTimestamp("{ [10,160) }");
// Seek to 70ms.
SeekToTimestamp(base::TimeDelta::FromMilliseconds(70));
CheckExpectedBuffers("10K 40");
- // Overlap with a new segment from 0 to 120ms.
- NewSegmentAppendOneByOne("0K 30 60 90 120K");
+ // Overlap with a new segment from 0 to 130ms.
+ NewSegmentAppendOneByOne("0K 30 60 90 120D10K");
CheckExpectedRangesByTimestamp("{ [0,160) }");
- // Should return frames 70ms and 100ms from the track buffer, then switch
+ // Should return frame 70ms from the track buffer, then switch
// to the new data at 120K, then switch back to the old data at 130K. The
// frame at 125ms that depended on keyframe 100ms should have been deleted.
- CheckExpectedBuffers("70 100K 120K 130K");
+ CheckExpectedBuffers("70 120K 130K");
// Check the final result: should not include data from the track buffer.
SeekToTimestamp(base::TimeDelta::FromMilliseconds(0));
@@ -1606,11 +1667,11 @@ TEST_F(SourceBufferStreamTest, Overlap_OneByOne_TrackBuffer) {
// old : 10K 40 *70* 100K 125 130K
// new : 0K 30 60 90 120K
// after: 0K 30 60 90 *120K* 130K
-// track: 70 100K
+// track: 70
// new : 110K 130
// after: 0K 30 60 90 *110K* 130
TEST_F(SourceBufferStreamTest, Overlap_OneByOne_TrackBuffer2) {
- NewSegmentAppendOneByOne("10K 40 70 100K 125 130K");
+ NewSegmentAppendOneByOne("10K 40 70 100K 125 130D30K");
CheckExpectedRangesByTimestamp("{ [10,160) }");
// Seek to 70ms.
@@ -1619,15 +1680,15 @@ TEST_F(SourceBufferStreamTest, Overlap_OneByOne_TrackBuffer2) {
// Overlap with a new segment from 0 to 120ms; 70ms and 100ms go in track
// buffer.
- NewSegmentAppendOneByOne("0K 30 60 90 120K");
+ NewSegmentAppendOneByOne("0K 30 60 90 120D10K");
CheckExpectedRangesByTimestamp("{ [0,160) }");
// Now overlap the keyframe at 120ms.
NewSegmentAppendOneByOne("110K 130");
- // Should expect buffers 70ms and 100ms from the track buffer. Then it should
+ // Should return frame 70ms from the track buffer. Then it should
// return the keyframe after the track buffer, which is at 110ms.
- CheckExpectedBuffers("70 100K 110K 130");
+ CheckExpectedBuffers("70 110K 130");
}
// Overlap the next keyframe after the end of the track buffer without a
@@ -1635,33 +1696,32 @@ TEST_F(SourceBufferStreamTest, Overlap_OneByOne_TrackBuffer2) {
// old : 10K 40 *70* 100K 125 130K
// new : 0K 30 60 90 120K
// after: 0K 30 60 90 *120K* 130K
-// track: 70 100K
+// track: 70
// new : 50K 80 110 140
// after: 0K 30 50K 80 110 140 * (waiting for keyframe)
-// track: 70 100K 120K 130K
+// track: 70
TEST_F(SourceBufferStreamTest, Overlap_OneByOne_TrackBuffer3) {
- NewSegmentAppendOneByOne("10K 40 70 100K 125 130K");
+ NewSegmentAppendOneByOne("10K 40 70 100K 125 130D30K");
CheckExpectedRangesByTimestamp("{ [10,160) }");
// Seek to 70ms.
SeekToTimestamp(base::TimeDelta::FromMilliseconds(70));
CheckExpectedBuffers("10K 40");
- // Overlap with a new segment from 0 to 120ms; 70ms and 100ms go in track
- // buffer.
- NewSegmentAppendOneByOne("0K 30 60 90 120K");
+ // Overlap with a new segment from 0 to 120ms; 70ms goes in track buffer.
+ NewSegmentAppendOneByOne("0K 30 60 90 120D10K");
CheckExpectedRangesByTimestamp("{ [0,160) }");
- // Now overlap the keyframe at 120ms. There's no keyframe after 70ms, so 120ms
- // and 130ms go into the track buffer.
+ // Now overlap the keyframe at 120ms and 130ms.
NewSegmentAppendOneByOne("50K 80 110 140");
+ CheckExpectedRangesByTimestamp("{ [0,170) }");
// Should have all the buffers from the track buffer, then stall.
- CheckExpectedBuffers("70 100K 120K 130K");
+ CheckExpectedBuffers("70");
CheckNoNextBuffer();
// Appending a keyframe should fulfill the read.
- AppendBuffersOneByOne("150K");
+ AppendBuffersOneByOne("150D30K");
CheckExpectedBuffers("150K");
CheckNoNextBuffer();
}
@@ -1671,12 +1731,12 @@ TEST_F(SourceBufferStreamTest, Overlap_OneByOne_TrackBuffer3) {
// old : 10K 40 *70* 100K 125 130K
// new : 0K 30 60 90 120K
// after: 0K 30 60 90 *120K* 130K
-// track: 70 100K
+// track: 70
// new : 80K 110 140
// after: 0K 30 60 *80K* 110 140
// track: 70
TEST_F(SourceBufferStreamTest, Overlap_OneByOne_TrackBuffer4) {
- NewSegmentAppendOneByOne("10K 40 70 100K 125 130K");
+ NewSegmentAppendOneByOne("10K 40 70 100K 125 130D30K");
CheckExpectedRangesByTimestamp("{ [10,160) }");
// Seek to 70ms.
@@ -1685,7 +1745,7 @@ TEST_F(SourceBufferStreamTest, Overlap_OneByOne_TrackBuffer4) {
// Overlap with a new segment from 0 to 120ms; 70ms and 100ms go in track
// buffer.
- NewSegmentAppendOneByOne("0K 30 60 90 120K");
+ NewSegmentAppendOneByOne("0K 30 60 90 120D10K");
CheckExpectedRangesByTimestamp("{ [0,160) }");
// Now append a keyframe at 80ms.
@@ -1701,7 +1761,7 @@ TEST_F(SourceBufferStreamTest, Overlap_OneByOne_TrackBuffer4) {
// old : 10K 40 *70* 100K
// new : 0K 30 60 90 120
// after: 0K 30 60 90 120 * (waiting for keyframe)
-// track: 70 100K
+// track: 70
// new : 80K 110 140
// after: 0K 30 60 *80K* 110 140
// track: 70
@@ -1713,13 +1773,12 @@ TEST_F(SourceBufferStreamTest, Overlap_OneByOne_TrackBuffer5) {
SeekToTimestamp(base::TimeDelta::FromMilliseconds(70));
CheckExpectedBuffers("10K 40");
- // Overlap with a new segment from 0 to 120ms; 70ms and 100ms go in track
+ // Overlap with a new segment from 0 to 120ms; 70ms goes in track
// buffer.
NewSegmentAppendOneByOne("0K 30 60 90 120");
CheckExpectedRangesByTimestamp("{ [0,150) }");
- // Now append a keyframe at 80ms. The buffer at 100ms should be deleted from
- // the track buffer.
+ // Now append a keyframe at 80ms.
NewSegmentAppendOneByOne("80K 110 140");
CheckExpectedBuffers("70 80K 110 140");
@@ -1731,13 +1790,13 @@ TEST_F(SourceBufferStreamTest, Overlap_OneByOne_TrackBuffer5) {
// old : 10K 40 *70* 100K 125 130K ... 200K 230
// new : 0K 30 60 90 120K
// after: 0K 30 60 90 *120K* 130K ... 200K 230
-// track: 70 100K
+// track: 70
// old : 0K 30 60 90 *120K* 130K ... 200K 230
// new : 260K 290
// after: 0K 30 60 90 *120K* 130K ... 200K 230 260K 290
-// track: 70 100K
+// track: 70
TEST_F(SourceBufferStreamTest, Overlap_OneByOne_TrackBuffer6) {
- NewSegmentAppendOneByOne("10K 40 70 100K 125 130K");
+ NewSegmentAppendOneByOne("10K 40 70 100K 125 130D30K");
NewSegmentAppendOneByOne("200K 230");
CheckExpectedRangesByTimestamp("{ [10,160) [200,260) }");
@@ -1746,7 +1805,7 @@ TEST_F(SourceBufferStreamTest, Overlap_OneByOne_TrackBuffer6) {
CheckExpectedBuffers("10K 40");
// Overlap with a new segment from 0 to 120ms.
- NewSegmentAppendOneByOne("0K 30 60 90 120K");
+ NewSegmentAppendOneByOne("0K 30 60 90 120D10K");
CheckExpectedRangesByTimestamp("{ [0,160) [200,260) }");
// Verify that 70 gets read out of the track buffer.
@@ -1756,7 +1815,7 @@ TEST_F(SourceBufferStreamTest, Overlap_OneByOne_TrackBuffer6) {
NewSegmentAppendOneByOne("260K 290");
CheckExpectedRangesByTimestamp("{ [0,160) [200,320) }");
- CheckExpectedBuffers("100K 120K 130K");
+ CheckExpectedBuffers("120K 130K");
CheckNoNextBuffer();
// Check the final result: should not include data from the track buffer.
@@ -2122,7 +2181,7 @@ TEST_F(SourceBufferStreamTest, GetNextBuffer_ExhaustThenStartOverlap2) {
CheckNoNextBuffer();
// Append a keyframe with the same timestamp as the last buffer output.
- NewSegmentAppend("120K");
+ NewSegmentAppend("120D30K");
CheckNoNextBuffer();
// Append the rest of the segment and make sure that buffers are returned
@@ -2550,7 +2609,7 @@ TEST_F(SourceBufferStreamTest, GarbageCollection_SaveAppendGOP) {
// Make sure you can continue appending data to this GOP; again, GC should not
// wipe out anything.
- AppendBuffers("120");
+ AppendBuffers("120D30");
CheckExpectedRangesByTimestamp("{ [0,150) }");
// Set memory limit to 100 and append a 2nd range after this without
@@ -2572,7 +2631,7 @@ TEST_F(SourceBufferStreamTest, GarbageCollection_SaveAppendGOP) {
CheckExpectedRangesByTimestamp("{ [290,380) [500,620) }");
// Continue appending to this GOP after GC.
- AppendBuffers("620");
+ AppendBuffers("620D30");
CheckExpectedRangesByTimestamp("{ [290,380) [500,650) }");
}
@@ -2591,7 +2650,7 @@ TEST_F(SourceBufferStreamTest, GarbageCollection_SaveAppendGOP_Middle) {
// This whole GOP should be saved, and should be able to continue appending
// data to it.
CheckExpectedRangesByTimestamp("{ [80,170) }");
- AppendBuffers("170");
+ AppendBuffers("170D30");
CheckExpectedRangesByTimestamp("{ [80,200) }");
// Set memory limit to 100 and append a 2nd range after this without
@@ -2614,7 +2673,7 @@ TEST_F(SourceBufferStreamTest, GarbageCollection_SaveAppendGOP_Middle) {
CheckExpectedRangesByTimestamp("{ [80,200) [500,620) }");
// Continue appending to this GOP after GC.
- AppendBuffers("620");
+ AppendBuffers("620D30");
CheckExpectedRangesByTimestamp("{ [80,200) [500,650) }");
}
@@ -3629,7 +3688,7 @@ TEST_F(SourceBufferStreamTest, Text_CompleteOverlap) {
CheckExpectedRangesByTimestamp("{ [3000,4500) }");
NewSegmentAppend("0K 501K 1001K 1501K 2001K 2501K "
"3001K 3501K 4001K 4501K 5001K");
- CheckExpectedRangesByTimestamp("{ [0,5502) }");
+ CheckExpectedRangesByTimestamp("{ [0,5501) }");
Seek(0);
CheckExpectedBuffers("0K 501K 1001K 1501K 2001K 2501K "
@@ -3641,7 +3700,7 @@ TEST_F(SourceBufferStreamTest, Text_OverlapAfter) {
NewSegmentAppend("0K 500K 1000K 1500K 2000K");
CheckExpectedRangesByTimestamp("{ [0,2500) }");
NewSegmentAppend("1499K 2001K 2501K 3001K");
- CheckExpectedRangesByTimestamp("{ [0,3503) }");
+ CheckExpectedRangesByTimestamp("{ [0,3501) }");
Seek(0);
CheckExpectedBuffers("0K 500K 1000K 1499K 2001K 2501K 3001K");
@@ -3652,22 +3711,22 @@ TEST_F(SourceBufferStreamTest, Text_OverlapBefore) {
NewSegmentAppend("1500K 2000K 2500K 3000K 3500K");
CheckExpectedRangesByTimestamp("{ [1500,4000) }");
NewSegmentAppend("0K 501K 1001K 1501K 2001K");
- CheckExpectedRangesByTimestamp("{ [0,4001) }");
+ CheckExpectedRangesByTimestamp("{ [0,4000) }");
Seek(0);
- CheckExpectedBuffers("0K 501K 1001K 1501K 2001K 2500K 3000K 3500K");
+ CheckExpectedBuffers("0K 501K 1001K 1501K 2001K 3000K 3500K");
}
TEST_F(SourceBufferStreamTest, SpliceFrame_Basic) {
Seek(0);
- NewSegmentAppend("0K S(3K 6 9 10) 15 20 S(25K 30 35) 40");
+ NewSegmentAppend("0K S(3K 6 9D3 10D5) 15 20 S(25K 30D5 35D5) 40");
CheckExpectedBuffers("0K 3K 6 9 C 10 15 20 25K 30 C 35 40");
CheckNoNextBuffer();
}
TEST_F(SourceBufferStreamTest, SpliceFrame_SeekClearsSplice) {
Seek(0);
- NewSegmentAppend("0K S(3K 6 9 10) 15K 20");
+ NewSegmentAppend("0K S(3K 6 9D3 10D5) 15K 20");
CheckExpectedBuffers("0K 3K 6");
SeekToTimestamp(base::TimeDelta::FromMilliseconds(15));
@@ -3677,7 +3736,7 @@ TEST_F(SourceBufferStreamTest, SpliceFrame_SeekClearsSplice) {
TEST_F(SourceBufferStreamTest, SpliceFrame_SeekClearsSpliceFromTrackBuffer) {
Seek(0);
- NewSegmentAppend("0K 2K S(3K 6 9 10) 15K 20");
+ NewSegmentAppend("0K 2K S(3K 6 9D3 10D5) 15K 20");
CheckExpectedBuffers("0K 2K");
// Overlap the existing segment.
@@ -3699,7 +3758,7 @@ TEST_F(SourceBufferStreamTest, SpliceFrame_ConfigChangeWithinSplice) {
Seek(0);
CheckVideoConfig(video_config_);
- NewSegmentAppend("0K S(3K 6C 9 10) 15");
+ NewSegmentAppend("0K S(3K 6C 9D3 10D5) 15");
CheckExpectedBuffers("0K 3K C");
CheckVideoConfig(new_config);
@@ -3712,7 +3771,7 @@ TEST_F(SourceBufferStreamTest, SpliceFrame_ConfigChangeWithinSplice) {
TEST_F(SourceBufferStreamTest, SpliceFrame_BasicFromTrackBuffer) {
Seek(0);
- NewSegmentAppend("0K 5K S(8K 9 10) 20");
+ NewSegmentAppend("0K 5K S(8K 9D1 10D10) 20");
CheckExpectedBuffers("0K 5K");
// Overlap the existing segment.
@@ -3732,7 +3791,7 @@ TEST_F(SourceBufferStreamTest,
Seek(0);
CheckVideoConfig(video_config_);
- NewSegmentAppend("0K 5K S(7K 8C 9 10) 20");
+ NewSegmentAppend("0K 5K S(7K 8C 9D1 10D10) 20");
CheckExpectedBuffers("0K 5K");
// Overlap the existing segment.
@@ -3777,7 +3836,7 @@ TEST_F(SourceBufferStreamTest, Audio_SpliceFrame_NoDoubleSplice) {
Seek(0);
// Create a splice before the first splice which would include it.
- NewSegmentAppend("9K");
+ NewSegmentAppend("9D2K");
// A splice on top of a splice should result in a discard of the original
// splice and no new splice frame being generated.
@@ -3803,7 +3862,7 @@ TEST_F(SourceBufferStreamTest, Audio_SpliceFrame_CorrectMediaSegmentStartTime) {
CheckExpectedRangesByTimestamp("{ [0,6) }");
NewSegmentAppend("6K 8K 10K");
CheckExpectedRangesByTimestamp("{ [0,12) }");
- NewSegmentAppend("1K 4K");
+ NewSegmentAppend("1K 4D2K");
CheckExpectedRangesByTimestamp("{ [0,12) }");
CheckExpectedBuffers("0K 2K 4K C 1K 4K 6K 8K 10K");
CheckNoNextBuffer();
@@ -3840,9 +3899,9 @@ TEST_F(SourceBufferStreamTest, Audio_SpliceFrame_NoTinySplices) {
// 2ms this results in an overlap of 1ms between the ranges. A splice frame
// should not be generated since it requires at least 2 frames, or 2ms in this
// case, of data to crossfade.
- NewSegmentAppend("0K");
+ NewSegmentAppend("0D2K");
CheckExpectedRangesByTimestamp("{ [0,2) }");
- NewSegmentAppend("1K");
+ NewSegmentAppend("1D2K");
CheckExpectedRangesByTimestamp("{ [0,3) }");
CheckExpectedBuffers("0K 1K");
CheckNoNextBuffer();
@@ -3864,6 +3923,15 @@ TEST_F(SourceBufferStreamTest, Audio_PrerollFrame) {
CheckNoNextBuffer();
}
+TEST_F(SourceBufferStreamTest, BFrames) {
+ Seek(0);
+ NewSegmentAppend("0K 120|30 30|60 60|90 90|120");
+ CheckExpectedRangesByTimestamp("{ [0,150) }");
+
+ CheckExpectedBuffers("0K 120|30 30|60 60|90 90|120");
+ CheckNoNextBuffer();
+}
+
// TODO(vrk): Add unit tests where keyframes are unaligned between streams.
// (crbug.com/133557)
diff --git a/media/filters/video_decoder_selector_unittest.cc b/media/filters/video_decoder_selector_unittest.cc
index 62b1a42145..57760b5e06 100644
--- a/media/filters/video_decoder_selector_unittest.cc
+++ b/media/filters/video_decoder_selector_unittest.cc
@@ -26,7 +26,7 @@ class VideoDecoderSelectorTest : public ::testing::Test {
public:
enum DecryptorCapability {
kNoDecryptor,
- // Used to test Abort() during DecryptingVideoDecoder::Initialize() and
+ // Used to test destruction during DecryptingVideoDecoder::Initialize() and
// DecryptingDemuxerStream::Initialize(). We don't need this for normal
// VideoDecoders since we use MockVideoDecoder.
kHoldSetDecryptor,
@@ -45,9 +45,6 @@ class VideoDecoderSelectorTest : public ::testing::Test {
}
~VideoDecoderSelectorTest() {
- if (selected_decoder_)
- selected_decoder_->Stop();
-
message_loop_.RunUntilIdle();
}
@@ -119,11 +116,11 @@ class VideoDecoderSelectorTest : public ::testing::Test {
message_loop_.RunUntilIdle();
}
- void SelectDecoderAndAbort() {
+ void SelectDecoderAndDestroy() {
SelectDecoder();
EXPECT_CALL(*this, OnDecoderSelected(IsNull(), IsNull()));
- decoder_selector_->Abort();
+ decoder_selector_.reset();
message_loop_.RunUntilIdle();
}
@@ -131,16 +128,19 @@ class VideoDecoderSelectorTest : public ::testing::Test {
NOTREACHED();
}
- // Fixture members.
- scoped_ptr<VideoDecoderSelector> decoder_selector_;
+ // Declare |decoder_selector_| after |demuxer_stream_| and |decryptor_| since
+ // |demuxer_stream_| and |decryptor_| should outlive |decoder_selector_|.
scoped_ptr<StrictMock<MockDemuxerStream> > demuxer_stream_;
+
// Use NiceMock since we don't care about most of calls on the decryptor, e.g.
// RegisterNewKeyCB().
scoped_ptr<NiceMock<MockDecryptor> > decryptor_;
+
+ scoped_ptr<VideoDecoderSelector> decoder_selector_;
+
StrictMock<MockVideoDecoder>* decoder_1_;
StrictMock<MockVideoDecoder>* decoder_2_;
ScopedVector<VideoDecoder> all_decoders_;
-
scoped_ptr<VideoDecoder> selected_decoder_;
base::MessageLoop message_loop_;
@@ -149,11 +149,6 @@ class VideoDecoderSelectorTest : public ::testing::Test {
DISALLOW_COPY_AND_ASSIGN(VideoDecoderSelectorTest);
};
-// Note:
-// In all the tests, Stop() is expected to be called on a decoder if a decoder:
-// - is pending initialization and DecoderSelector::Abort() is called, or
-// - has been successfully initialized.
-
// The stream is not encrypted but we have no clear decoder. No decoder can be
// selected.
TEST_F(VideoDecoderSelectorTest, ClearStream_NoDecryptor_NoClearDecoder) {
@@ -174,20 +169,18 @@ TEST_F(VideoDecoderSelectorTest, ClearStream_NoDecryptor_OneClearDecoder) {
EXPECT_CALL(*decoder_1_, Initialize(_, _, _, _))
.WillOnce(RunCallback<2>(PIPELINE_OK));
EXPECT_CALL(*this, OnDecoderSelected(decoder_1_, IsNull()));
- EXPECT_CALL(*decoder_1_, Stop());
SelectDecoder();
}
TEST_F(VideoDecoderSelectorTest,
- Abort_ClearStream_NoDecryptor_OneClearDecoder) {
+ Destroy_ClearStream_NoDecryptor_OneClearDecoder) {
UseClearStream();
InitializeDecoderSelector(kNoDecryptor, 1);
EXPECT_CALL(*decoder_1_, Initialize(_, _, _, _));
- EXPECT_CALL(*decoder_1_, Stop());
- SelectDecoderAndAbort();
+ SelectDecoderAndDestroy();
}
// The stream is not encrypted and we have multiple clear decoders. The first
@@ -201,22 +194,20 @@ TEST_F(VideoDecoderSelectorTest, ClearStream_NoDecryptor_MultipleClearDecoder) {
EXPECT_CALL(*decoder_2_, Initialize(_, _, _, _))
.WillOnce(RunCallback<2>(PIPELINE_OK));
EXPECT_CALL(*this, OnDecoderSelected(decoder_2_, IsNull()));
- EXPECT_CALL(*decoder_2_, Stop());
SelectDecoder();
}
TEST_F(VideoDecoderSelectorTest,
- Abort_ClearStream_NoDecryptor_MultipleClearDecoder) {
+ Destroy_ClearStream_NoDecryptor_MultipleClearDecoder) {
UseClearStream();
InitializeDecoderSelector(kNoDecryptor, 2);
EXPECT_CALL(*decoder_1_, Initialize(_, _, _, _))
.WillOnce(RunCallback<2>(DECODER_ERROR_NOT_SUPPORTED));
EXPECT_CALL(*decoder_2_, Initialize(_, _, _, _));
- EXPECT_CALL(*decoder_2_, Stop());
- SelectDecoderAndAbort();
+ SelectDecoderAndDestroy();
}
// There is a decryptor but the stream is not encrypted. The decoder will be
@@ -228,19 +219,17 @@ TEST_F(VideoDecoderSelectorTest, ClearStream_HasDecryptor) {
EXPECT_CALL(*decoder_1_, Initialize(_, _, _, _))
.WillOnce(RunCallback<2>(PIPELINE_OK));
EXPECT_CALL(*this, OnDecoderSelected(decoder_1_, IsNull()));
- EXPECT_CALL(*decoder_1_, Stop());
SelectDecoder();
}
-TEST_F(VideoDecoderSelectorTest, Abort_ClearStream_HasDecryptor) {
+TEST_F(VideoDecoderSelectorTest, Destroy_ClearStream_HasDecryptor) {
UseClearStream();
InitializeDecoderSelector(kDecryptOnly, 1);
EXPECT_CALL(*decoder_1_, Initialize(_, _, _, _));
- EXPECT_CALL(*decoder_1_, Stop());
- SelectDecoderAndAbort();
+ SelectDecoderAndDestroy();
}
// The stream is encrypted and there's no decryptor. No decoder can be selected.
@@ -265,11 +254,11 @@ TEST_F(VideoDecoderSelectorTest, EncryptedStream_DecryptOnly_NoClearDecoder) {
}
TEST_F(VideoDecoderSelectorTest,
- Abort_EncryptedStream_DecryptOnly_NoClearDecoder) {
+ Destroy_EncryptedStream_DecryptOnly_NoClearDecoder) {
UseEncryptedStream();
InitializeDecoderSelector(kHoldSetDecryptor, 0);
- SelectDecoderAndAbort();
+ SelectDecoderAndDestroy();
}
// Decryptor can do decryption-only and there's a decoder available. The decoder
@@ -281,20 +270,18 @@ TEST_F(VideoDecoderSelectorTest, EncryptedStream_DecryptOnly_OneClearDecoder) {
EXPECT_CALL(*decoder_1_, Initialize(_, _, _, _))
.WillOnce(RunCallback<2>(PIPELINE_OK));
EXPECT_CALL(*this, OnDecoderSelected(decoder_1_, NotNull()));
- EXPECT_CALL(*decoder_1_, Stop());
SelectDecoder();
}
TEST_F(VideoDecoderSelectorTest,
- Abort_EncryptedStream_DecryptOnly_OneClearDecoder) {
+ Destroy_EncryptedStream_DecryptOnly_OneClearDecoder) {
UseEncryptedStream();
InitializeDecoderSelector(kDecryptOnly, 1);
EXPECT_CALL(*decoder_1_, Initialize(_, _, _, _));
- EXPECT_CALL(*decoder_1_, Stop());
- SelectDecoderAndAbort();
+ SelectDecoderAndDestroy();
}
// Decryptor can only do decryption and there are multiple decoders available.
@@ -310,22 +297,20 @@ TEST_F(VideoDecoderSelectorTest,
EXPECT_CALL(*decoder_2_, Initialize(_, _, _, _))
.WillOnce(RunCallback<2>(PIPELINE_OK));
EXPECT_CALL(*this, OnDecoderSelected(decoder_2_, NotNull()));
- EXPECT_CALL(*decoder_2_, Stop());
SelectDecoder();
}
TEST_F(VideoDecoderSelectorTest,
- Abort_EncryptedStream_DecryptOnly_MultipleClearDecoder) {
+ Destroy_EncryptedStream_DecryptOnly_MultipleClearDecoder) {
UseEncryptedStream();
InitializeDecoderSelector(kDecryptOnly, 2);
EXPECT_CALL(*decoder_1_, Initialize(_, _, _, _))
.WillOnce(RunCallback<2>(DECODER_ERROR_NOT_SUPPORTED));
EXPECT_CALL(*decoder_2_, Initialize(_, _, _, _));
- EXPECT_CALL(*decoder_2_, Stop());
- SelectDecoderAndAbort();
+ SelectDecoderAndDestroy();
}
// Decryptor can do decryption and decoding. A DecryptingVideoDecoder will be
@@ -340,11 +325,11 @@ TEST_F(VideoDecoderSelectorTest, EncryptedStream_DecryptAndDecode) {
SelectDecoder();
}
-TEST_F(VideoDecoderSelectorTest, Abort_EncryptedStream_DecryptAndDecode) {
+TEST_F(VideoDecoderSelectorTest, Destroy_EncryptedStream_DecryptAndDecode) {
UseEncryptedStream();
InitializeDecoderSelector(kHoldSetDecryptor, 1);
- SelectDecoderAndAbort();
+ SelectDecoderAndDestroy();
}
} // namespace media
diff --git a/media/filters/video_frame_stream_unittest.cc b/media/filters/video_frame_stream_unittest.cc
index f2494042a1..492e7cf9ff 100644
--- a/media/filters/video_frame_stream_unittest.cc
+++ b/media/filters/video_frame_stream_unittest.cc
@@ -76,14 +76,19 @@ class VideoFrameStreamTest
}
~VideoFrameStreamTest() {
+ // Check that the pipeline statistics callback was fired correctly.
+ if (decoder_)
+ EXPECT_EQ(decoder_->total_bytes_decoded(), total_bytes_decoded_);
+
+ is_initialized_ = false;
+ decoder_ = NULL;
+ video_frame_stream_.reset();
+ message_loop_.RunUntilIdle();
+
DCHECK(!pending_initialize_);
DCHECK(!pending_read_);
DCHECK(!pending_reset_);
DCHECK(!pending_stop_);
-
- if (is_initialized_)
- Stop();
- EXPECT_FALSE(is_initialized_);
}
MOCK_METHOD1(OnNewSpliceBuffer, void(base::TimeDelta));
@@ -157,16 +162,6 @@ class VideoFrameStreamTest
pending_reset_ = false;
}
- void OnStopped() {
- DCHECK(!pending_initialize_);
- DCHECK(!pending_read_);
- DCHECK(!pending_reset_);
- DCHECK(pending_stop_);
- pending_stop_ = false;
- is_initialized_ = false;
- decoder_ = NULL;
- }
-
void ReadOneFrame() {
frame_read_ = NULL;
pending_read_ = true;
@@ -270,7 +265,7 @@ class VideoFrameStreamTest
break;
// These two cases are only interesting to test during
- // VideoFrameStream::Stop(). There's no need to satisfy a callback.
+ // VideoFrameStream destruction. There's no need to satisfy a callback.
case SET_DECRYPTOR:
case DECRYPTOR_NO_KEY:
NOTREACHED();
@@ -315,15 +310,6 @@ class VideoFrameStreamTest
SatisfyPendingCallback(DECODER_RESET);
}
- void Stop() {
- // Check that the pipeline statistics callback was fired correctly.
- EXPECT_EQ(decoder_->total_bytes_decoded(), total_bytes_decoded_);
- pending_stop_ = true;
- video_frame_stream_->Stop(base::Bind(&VideoFrameStreamTest::OnStopped,
- base::Unretained(this)));
- message_loop_.RunUntilIdle();
- }
-
base::MessageLoop message_loop_;
scoped_ptr<VideoFrameStream> video_frame_stream_;
@@ -577,118 +563,98 @@ TEST_P(VideoFrameStreamTest, Reset_DuringNoKeyRead) {
Reset();
}
-TEST_P(VideoFrameStreamTest, Stop_BeforeInitialization) {
- pending_stop_ = true;
- video_frame_stream_->Stop(
- base::Bind(&VideoFrameStreamTest::OnStopped, base::Unretained(this)));
- message_loop_.RunUntilIdle();
+// In the following Destroy_* tests, |video_frame_stream_| is destroyed in
+// VideoFrameStreamTest dtor.
+
+TEST_P(VideoFrameStreamTest, Destroy_BeforeInitialization) {
}
-TEST_P(VideoFrameStreamTest, Stop_DuringSetDecryptor) {
+TEST_P(VideoFrameStreamTest, Destroy_DuringSetDecryptor) {
if (!GetParam().is_encrypted) {
DVLOG(1) << "SetDecryptor test only runs when the stream is encrytped.";
return;
}
EnterPendingState(SET_DECRYPTOR);
- pending_stop_ = true;
- video_frame_stream_->Stop(
- base::Bind(&VideoFrameStreamTest::OnStopped, base::Unretained(this)));
- message_loop_.RunUntilIdle();
}
-TEST_P(VideoFrameStreamTest, Stop_DuringInitialization) {
+TEST_P(VideoFrameStreamTest, Destroy_DuringInitialization) {
EnterPendingState(DECODER_INIT);
- Stop();
}
-TEST_P(VideoFrameStreamTest, Stop_AfterInitialization) {
+TEST_P(VideoFrameStreamTest, Destroy_AfterInitialization) {
Initialize();
- Stop();
}
-TEST_P(VideoFrameStreamTest, Stop_DuringReinitialization) {
+TEST_P(VideoFrameStreamTest, Destroy_DuringReinitialization) {
Initialize();
EnterPendingState(DECODER_REINIT);
- Stop();
}
-TEST_P(VideoFrameStreamTest, Stop_AfterReinitialization) {
+TEST_P(VideoFrameStreamTest, Destroy_AfterReinitialization) {
Initialize();
EnterPendingState(DECODER_REINIT);
SatisfyPendingCallback(DECODER_REINIT);
- Stop();
}
-TEST_P(VideoFrameStreamTest, Stop_DuringDemuxerRead_Normal) {
+TEST_P(VideoFrameStreamTest, Destroy_DuringDemuxerRead_Normal) {
Initialize();
EnterPendingState(DEMUXER_READ_NORMAL);
- Stop();
}
-TEST_P(VideoFrameStreamTest, Stop_DuringDemuxerRead_ConfigChange) {
+TEST_P(VideoFrameStreamTest, Destroy_DuringDemuxerRead_ConfigChange) {
Initialize();
EnterPendingState(DEMUXER_READ_CONFIG_CHANGE);
- Stop();
}
-TEST_P(VideoFrameStreamTest, Stop_DuringNormalDecoderDecode) {
+TEST_P(VideoFrameStreamTest, Destroy_DuringNormalDecoderDecode) {
Initialize();
EnterPendingState(DECODER_DECODE);
- Stop();
}
-TEST_P(VideoFrameStreamTest, Stop_AfterNormalRead) {
+TEST_P(VideoFrameStreamTest, Destroy_AfterNormalRead) {
Initialize();
Read();
- Stop();
}
-TEST_P(VideoFrameStreamTest, Stop_AfterConfigChangeRead) {
+TEST_P(VideoFrameStreamTest, Destroy_AfterConfigChangeRead) {
Initialize();
EnterPendingState(DEMUXER_READ_CONFIG_CHANGE);
SatisfyPendingCallback(DEMUXER_READ_CONFIG_CHANGE);
- Stop();
}
-TEST_P(VideoFrameStreamTest, Stop_DuringNoKeyRead) {
+TEST_P(VideoFrameStreamTest, Destroy_DuringNoKeyRead) {
Initialize();
EnterPendingState(DECRYPTOR_NO_KEY);
- Stop();
}
-TEST_P(VideoFrameStreamTest, Stop_DuringReset) {
+TEST_P(VideoFrameStreamTest, Destroy_DuringReset) {
Initialize();
EnterPendingState(DECODER_RESET);
- Stop();
}
-TEST_P(VideoFrameStreamTest, Stop_AfterReset) {
+TEST_P(VideoFrameStreamTest, Destroy_AfterReset) {
Initialize();
Reset();
- Stop();
}
-TEST_P(VideoFrameStreamTest, Stop_DuringRead_DuringReset) {
+TEST_P(VideoFrameStreamTest, Destroy_DuringRead_DuringReset) {
Initialize();
EnterPendingState(DECODER_DECODE);
EnterPendingState(DECODER_RESET);
- Stop();
}
-TEST_P(VideoFrameStreamTest, Stop_AfterRead_DuringReset) {
+TEST_P(VideoFrameStreamTest, Destroy_AfterRead_DuringReset) {
Initialize();
EnterPendingState(DECODER_DECODE);
EnterPendingState(DECODER_RESET);
SatisfyPendingCallback(DECODER_DECODE);
- Stop();
}
-TEST_P(VideoFrameStreamTest, Stop_AfterRead_AfterReset) {
+TEST_P(VideoFrameStreamTest, Destroy_AfterRead_AfterReset) {
Initialize();
Read();
Reset();
- Stop();
}
TEST_P(VideoFrameStreamTest, DecoderErrorWhenReading) {
diff --git a/media/filters/video_renderer_impl.cc b/media/filters/video_renderer_impl.cc
index 736a91f2f2..a544bacc1f 100644
--- a/media/filters/video_renderer_impl.cc
+++ b/media/filters/video_renderer_impl.cc
@@ -25,7 +25,9 @@ VideoRendererImpl::VideoRendererImpl(
const PaintCB& paint_cb,
bool drop_frames)
: task_runner_(task_runner),
- video_frame_stream_(task_runner, decoders.Pass(), set_decryptor_ready_cb),
+ video_frame_stream_(new VideoFrameStream(task_runner,
+ decoders.Pass(),
+ set_decryptor_ready_cb)),
low_delay_(false),
received_end_of_stream_(false),
rendered_end_of_stream_(false),
@@ -34,7 +36,7 @@ VideoRendererImpl::VideoRendererImpl(
thread_(),
pending_read_(false),
drop_frames_(drop_frames),
- playback_rate_(0),
+ buffering_state_(BUFFERING_HAVE_NOTHING),
paint_cb_(paint_cb),
last_timestamp_(kNoTimestamp()),
frames_decoded_(0),
@@ -49,27 +51,24 @@ VideoRendererImpl::~VideoRendererImpl() {
CHECK(thread_.is_null());
}
-void VideoRendererImpl::Play(const base::Closure& callback) {
- DCHECK(task_runner_->BelongsToCurrentThread());
- base::AutoLock auto_lock(lock_);
- DCHECK_EQ(kPrerolled, state_);
- state_ = kPlaying;
- callback.Run();
-}
-
void VideoRendererImpl::Flush(const base::Closure& callback) {
DCHECK(task_runner_->BelongsToCurrentThread());
base::AutoLock auto_lock(lock_);
- DCHECK_NE(state_, kUninitialized);
+ DCHECK_EQ(state_, kPlaying);
flush_cb_ = callback;
state_ = kFlushing;
// This is necessary if the |video_frame_stream_| has already seen an end of
// stream and needs to drain it before flushing it.
ready_frames_.clear();
+ if (buffering_state_ != BUFFERING_HAVE_NOTHING) {
+ buffering_state_ = BUFFERING_HAVE_NOTHING;
+ buffering_state_cb_.Run(BUFFERING_HAVE_NOTHING);
+ }
received_end_of_stream_ = false;
rendered_end_of_stream_ = false;
- video_frame_stream_.Reset(
+
+ video_frame_stream_->Reset(
base::Bind(&VideoRendererImpl::OnVideoFrameStreamResetDone,
weak_factory_.GetWeakPtr()));
}
@@ -78,7 +77,7 @@ void VideoRendererImpl::Stop(const base::Closure& callback) {
DCHECK(task_runner_->BelongsToCurrentThread());
base::AutoLock auto_lock(lock_);
if (state_ == kUninitialized || state_ == kStopped) {
- callback.Run();
+ task_runner_->PostTask(FROM_HERE, callback);
return;
}
@@ -105,40 +104,20 @@ void VideoRendererImpl::Stop(const base::Closure& callback) {
base::PlatformThread::Join(thread_to_join);
}
- video_frame_stream_.Stop(callback);
+ video_frame_stream_.reset();
+ task_runner_->PostTask(FROM_HERE, callback);
}
-void VideoRendererImpl::SetPlaybackRate(float playback_rate) {
+void VideoRendererImpl::StartPlayingFrom(base::TimeDelta timestamp) {
DCHECK(task_runner_->BelongsToCurrentThread());
base::AutoLock auto_lock(lock_);
- playback_rate_ = playback_rate;
-}
-
-void VideoRendererImpl::Preroll(base::TimeDelta time,
- const PipelineStatusCB& cb) {
- DCHECK(task_runner_->BelongsToCurrentThread());
- base::AutoLock auto_lock(lock_);
- DCHECK(!cb.is_null());
- DCHECK(preroll_cb_.is_null());
- DCHECK(state_ == kFlushed || state_ == kPlaying) << "state_ " << state_;
-
- if (state_ == kFlushed) {
- DCHECK(time != kNoTimestamp());
- DCHECK(!pending_read_);
- DCHECK(ready_frames_.empty());
- } else {
- DCHECK(time == kNoTimestamp());
- }
-
- state_ = kPrerolling;
- preroll_cb_ = cb;
- preroll_timestamp_ = time;
-
- if (ShouldTransitionToPrerolled_Locked()) {
- TransitionToPrerolled_Locked();
- return;
- }
+ DCHECK_EQ(state_, kFlushed);
+ DCHECK(!pending_read_);
+ DCHECK(ready_frames_.empty());
+ DCHECK_EQ(buffering_state_, BUFFERING_HAVE_NOTHING);
+ state_ = kPlaying;
+ start_timestamp_ = timestamp;
AttemptRead_Locked();
}
@@ -147,6 +126,7 @@ void VideoRendererImpl::Initialize(DemuxerStream* stream,
const PipelineStatusCB& init_cb,
const StatisticsCB& statistics_cb,
const TimeCB& max_time_cb,
+ const BufferingStateCB& buffering_state_cb,
const base::Closure& ended_cb,
const PipelineStatusCB& error_cb,
const TimeDeltaCB& get_time_cb,
@@ -158,6 +138,7 @@ void VideoRendererImpl::Initialize(DemuxerStream* stream,
DCHECK(!init_cb.is_null());
DCHECK(!statistics_cb.is_null());
DCHECK(!max_time_cb.is_null());
+ DCHECK(!buffering_state_cb.is_null());
DCHECK(!ended_cb.is_null());
DCHECK(!get_time_cb.is_null());
DCHECK(!get_duration_cb.is_null());
@@ -168,13 +149,14 @@ void VideoRendererImpl::Initialize(DemuxerStream* stream,
init_cb_ = init_cb;
statistics_cb_ = statistics_cb;
max_time_cb_ = max_time_cb;
+ buffering_state_cb_ = buffering_state_cb;
ended_cb_ = ended_cb;
error_cb_ = error_cb;
get_time_cb_ = get_time_cb;
get_duration_cb_ = get_duration_cb;
state_ = kInitializing;
- video_frame_stream_.Initialize(
+ video_frame_stream_->Initialize(
stream,
low_delay,
statistics_cb,
@@ -235,53 +217,54 @@ void VideoRendererImpl::ThreadMain() {
return;
// Remain idle as long as we're not playing.
- if (state_ != kPlaying || playback_rate_ == 0) {
+ if (state_ != kPlaying || buffering_state_ != BUFFERING_HAVE_ENOUGH) {
UpdateStatsAndWait_Locked(kIdleTimeDelta);
continue;
}
// Remain idle until we have the next frame ready for rendering.
if (ready_frames_.empty()) {
- if (received_end_of_stream_ && !rendered_end_of_stream_) {
- rendered_end_of_stream_ = true;
- ended_cb_.Run();
+ if (received_end_of_stream_) {
+ if (!rendered_end_of_stream_) {
+ rendered_end_of_stream_ = true;
+ ended_cb_.Run();
+ }
+ } else {
+ buffering_state_ = BUFFERING_HAVE_NOTHING;
+ task_runner_->PostTask(
+ FROM_HERE, base::Bind(buffering_state_cb_, BUFFERING_HAVE_NOTHING));
}
UpdateStatsAndWait_Locked(kIdleTimeDelta);
continue;
}
- base::TimeDelta remaining_time =
- CalculateSleepDuration(ready_frames_.front(), playback_rate_);
+ base::TimeDelta now = get_time_cb_.Run();
+ base::TimeDelta target_paint_timestamp = ready_frames_.front()->timestamp();
+ base::TimeDelta latest_paint_timestamp;
- // Sleep up to a maximum of our idle time until we're within the time to
- // render the next frame.
- if (remaining_time.InMicroseconds() > 0) {
- remaining_time = std::min(remaining_time, kIdleTimeDelta);
- UpdateStatsAndWait_Locked(remaining_time);
- continue;
- }
-
- // Deadline is defined as the midpoint between this frame and the next
+ // Deadline is defined as the duration between this frame and the next
// frame, using the delta between this frame and the previous frame as the
// assumption for frame duration.
//
- // TODO(scherkus): An improvement over midpoint might be selecting the
- // minimum and/or maximum between the midpoint and some constants. As a
- // thought experiment, consider what would be better than the midpoint
- // for both the 1fps case and 120fps case.
- //
// TODO(scherkus): This can be vastly improved. Use a histogram to measure
// the accuracy of our frame timing code. http://crbug.com/149829
- if (drop_frames_ && last_timestamp_ != kNoTimestamp()) {
- base::TimeDelta now = get_time_cb_.Run();
- base::TimeDelta deadline = ready_frames_.front()->timestamp() +
- (ready_frames_.front()->timestamp() - last_timestamp_) / 2;
-
- if (now > deadline) {
- DropNextReadyFrame_Locked();
- continue;
- }
+ if (last_timestamp_ == kNoTimestamp()) {
+ latest_paint_timestamp = base::TimeDelta::Max();
+ } else {
+ base::TimeDelta duration = target_paint_timestamp - last_timestamp_;
+ latest_paint_timestamp = target_paint_timestamp + duration;
+ }
+
+ // Remain idle until we've reached our target paint window.
+ if (now < target_paint_timestamp) {
+ UpdateStatsAndWait_Locked(kIdleTimeDelta);
+ continue;
+ }
+
+ if (now > latest_paint_timestamp && drop_frames_) {
+ DropNextReadyFrame_Locked();
+ continue;
}
// Congratulations! You've made it past the video frame timing gauntlet.
@@ -338,12 +321,6 @@ void VideoRendererImpl::FrameReady(VideoFrameStream::Status status,
PipelineStatus error = PIPELINE_ERROR_DECODE;
if (status == VideoFrameStream::DECRYPT_ERROR)
error = PIPELINE_ERROR_DECRYPT;
-
- if (!preroll_cb_.is_null()) {
- base::ResetAndReturn(&preroll_cb_).Run(error);
- return;
- }
-
error_cb_.Run(error);
return;
}
@@ -353,38 +330,28 @@ void VideoRendererImpl::FrameReady(VideoFrameStream::Status status,
if (state_ == kStopped || state_ == kFlushing)
return;
- if (!frame.get()) {
- // Abort preroll early for a NULL frame because we won't get more frames.
- // A new preroll will be requested after this one completes so there is no
- // point trying to collect more frames.
- if (state_ == kPrerolling)
- TransitionToPrerolled_Locked();
+ DCHECK_EQ(state_, kPlaying);
+ // Can happen when demuxers are preparing for a new Seek().
+ if (!frame) {
+ DCHECK_EQ(status, VideoFrameStream::DEMUXER_READ_ABORTED);
return;
}
if (frame->end_of_stream()) {
DCHECK(!received_end_of_stream_);
received_end_of_stream_ = true;
- max_time_cb_.Run(get_duration_cb_.Run());
-
- if (state_ == kPrerolling)
- TransitionToPrerolled_Locked();
-
- return;
- }
-
- // Maintain the latest frame decoded so the correct frame is displayed after
- // prerolling has completed.
- if (state_ == kPrerolling && preroll_timestamp_ != kNoTimestamp() &&
- frame->timestamp() <= preroll_timestamp_) {
- ready_frames_.clear();
+ } else {
+ // Maintain the latest frame decoded so the correct frame is displayed after
+ // prerolling has completed.
+ if (frame->timestamp() <= start_timestamp_)
+ ready_frames_.clear();
+ AddReadyFrame_Locked(frame);
}
- AddReadyFrame_Locked(frame);
-
- if (ShouldTransitionToPrerolled_Locked())
- TransitionToPrerolled_Locked();
+ // Signal buffering state if we've met our conditions for having enough data.
+ if (buffering_state_ != BUFFERING_HAVE_ENOUGH && HaveEnoughData_Locked())
+ TransitionToHaveEnough_Locked();
// Always request more decoded video if we have capacity. This serves two
// purposes:
@@ -393,11 +360,39 @@ void VideoRendererImpl::FrameReady(VideoFrameStream::Status status,
AttemptRead_Locked();
}
-bool VideoRendererImpl::ShouldTransitionToPrerolled_Locked() {
- return state_ == kPrerolling &&
- (!video_frame_stream_.CanReadWithoutStalling() ||
- ready_frames_.size() >= static_cast<size_t>(limits::kMaxVideoFrames) ||
- (low_delay_ && ready_frames_.size() > 0));
+bool VideoRendererImpl::HaveEnoughData_Locked() {
+ DCHECK_EQ(state_, kPlaying);
+ return received_end_of_stream_ ||
+ !video_frame_stream_->CanReadWithoutStalling() ||
+ ready_frames_.size() >= static_cast<size_t>(limits::kMaxVideoFrames) ||
+ (low_delay_ && ready_frames_.size() > 0);
+}
+
+void VideoRendererImpl::TransitionToHaveEnough_Locked() {
+ DCHECK_EQ(buffering_state_, BUFFERING_HAVE_NOTHING);
+
+ if (received_end_of_stream_)
+ max_time_cb_.Run(get_duration_cb_.Run());
+
+ if (!ready_frames_.empty()) {
+ // Max time isn't reported while we're in a have nothing state as we could
+ // be discarding frames to find |start_timestamp_|.
+ if (!received_end_of_stream_) {
+ base::TimeDelta max_timestamp = ready_frames_[0]->timestamp();
+ for (size_t i = 1; i < ready_frames_.size(); ++i) {
+ if (ready_frames_[i]->timestamp() > max_timestamp)
+ max_timestamp = ready_frames_[i]->timestamp();
+ }
+ max_time_cb_.Run(max_timestamp);
+ }
+
+ // Because the clock might remain paused in for an undetermined amount
+ // of time (e.g., seeking while paused), paint the first frame.
+ PaintNextReadyFrame_Locked();
+ }
+
+ buffering_state_ = BUFFERING_HAVE_ENOUGH;
+ buffering_state_cb_.Run(BUFFERING_HAVE_ENOUGH);
}
void VideoRendererImpl::AddReadyFrame_Locked(
@@ -419,7 +414,12 @@ void VideoRendererImpl::AddReadyFrame_Locked(
DCHECK_LE(ready_frames_.size(),
static_cast<size_t>(limits::kMaxVideoFrames));
- max_time_cb_.Run(frame->timestamp());
+ // FrameReady() may add frames but discard them when we're decoding frames to
+ // reach |start_timestamp_|. In this case we'll only want to update the max
+ // time when we know we've reached |start_timestamp_| and have buffered enough
+ // frames to being playback.
+ if (buffering_state_ == BUFFERING_HAVE_ENOUGH)
+ max_time_cb_.Run(frame->timestamp());
// Avoid needlessly waking up |thread_| unless playing.
if (state_ == kPlaying)
@@ -441,12 +441,10 @@ void VideoRendererImpl::AttemptRead_Locked() {
}
switch (state_) {
- case kPrerolling:
- case kPrerolled:
case kPlaying:
pending_read_ = true;
- video_frame_stream_.Read(base::Bind(&VideoRendererImpl::FrameReady,
- weak_factory_.GetWeakPtr()));
+ video_frame_stream_->Read(base::Bind(&VideoRendererImpl::FrameReady,
+ weak_factory_.GetWeakPtr()));
return;
case kUninitialized:
@@ -468,46 +466,19 @@ void VideoRendererImpl::OnVideoFrameStreamResetDone() {
DCHECK(ready_frames_.empty());
DCHECK(!received_end_of_stream_);
DCHECK(!rendered_end_of_stream_);
+ DCHECK_EQ(buffering_state_, BUFFERING_HAVE_NOTHING);
state_ = kFlushed;
last_timestamp_ = kNoTimestamp();
base::ResetAndReturn(&flush_cb_).Run();
}
-base::TimeDelta VideoRendererImpl::CalculateSleepDuration(
- const scoped_refptr<VideoFrame>& next_frame,
- float playback_rate) {
- // Determine the current and next presentation timestamps.
- base::TimeDelta now = get_time_cb_.Run();
- base::TimeDelta next_pts = next_frame->timestamp();
-
- // Scale our sleep based on the playback rate.
- base::TimeDelta sleep = next_pts - now;
- return base::TimeDelta::FromMicroseconds(
- static_cast<int64>(sleep.InMicroseconds() / playback_rate));
-}
-
void VideoRendererImpl::DoStopOrError_Locked() {
lock_.AssertAcquired();
last_timestamp_ = kNoTimestamp();
ready_frames_.clear();
}
-void VideoRendererImpl::TransitionToPrerolled_Locked() {
- lock_.AssertAcquired();
- DCHECK_EQ(state_, kPrerolling);
-
- state_ = kPrerolled;
-
- // Because we might remain in the prerolled state for an undetermined amount
- // of time (e.g., we seeked while paused), we'll paint the first prerolled
- // frame.
- if (!ready_frames_.empty())
- PaintNextReadyFrame_Locked();
-
- base::ResetAndReturn(&preroll_cb_).Run(PIPELINE_OK);
-}
-
void VideoRendererImpl::UpdateStatsAndWait_Locked(
base::TimeDelta wait_duration) {
lock_.AssertAcquired();
diff --git a/media/filters/video_renderer_impl.h b/media/filters/video_renderer_impl.h
index 4fef25d55c..06262dadf4 100644
--- a/media/filters/video_renderer_impl.h
+++ b/media/filters/video_renderer_impl.h
@@ -8,6 +8,7 @@
#include <deque>
#include "base/memory/ref_counted.h"
+#include "base/memory/scoped_ptr.h"
#include "base/memory/scoped_vector.h"
#include "base/memory/weak_ptr.h"
#include "base/synchronization/condition_variable.h"
@@ -61,16 +62,14 @@ class MEDIA_EXPORT VideoRendererImpl
const PipelineStatusCB& init_cb,
const StatisticsCB& statistics_cb,
const TimeCB& max_time_cb,
+ const BufferingStateCB& buffering_state_cb,
const base::Closure& ended_cb,
const PipelineStatusCB& error_cb,
const TimeDeltaCB& get_time_cb,
const TimeDeltaCB& get_duration_cb) OVERRIDE;
- virtual void Play(const base::Closure& callback) OVERRIDE;
virtual void Flush(const base::Closure& callback) OVERRIDE;
- virtual void Preroll(base::TimeDelta time,
- const PipelineStatusCB& cb) OVERRIDE;
+ virtual void StartPlayingFrom(base::TimeDelta timestamp) OVERRIDE;
virtual void Stop(const base::Closure& callback) OVERRIDE;
- virtual void SetPlaybackRate(float playback_rate) OVERRIDE;
// PlatformThread::Delegate implementation.
virtual void ThreadMain() OVERRIDE;
@@ -96,14 +95,6 @@ class MEDIA_EXPORT VideoRendererImpl
// Called when VideoFrameStream::Reset() completes.
void OnVideoFrameStreamResetDone();
- // Calculates the duration to sleep for based on |last_timestamp_|,
- // the next frame timestamp (may be NULL), and the provided playback rate.
- //
- // We don't use |playback_rate_| to avoid locking.
- base::TimeDelta CalculateSleepDuration(
- const scoped_refptr<VideoFrame>& next_frame,
- float playback_rate);
-
// Helper function that flushes the buffers when a Stop() or error occurs.
void DoStopOrError_Locked();
@@ -117,11 +108,10 @@ class MEDIA_EXPORT VideoRendererImpl
// A read is scheduled to replace the frame.
void DropNextReadyFrame_Locked();
- void TransitionToPrerolled_Locked();
-
- // Returns true of all conditions have been met to transition from
- // kPrerolling to kPrerolled.
- bool ShouldTransitionToPrerolled_Locked();
+ // Returns true if the renderer has enough data for playback purposes.
+ // Note that having enough data may be due to reaching end of stream.
+ bool HaveEnoughData_Locked();
+ void TransitionToHaveEnough_Locked();
// Runs |statistics_cb_| with |frames_decoded_| and |frames_dropped_|, resets
// them to 0, and then waits on |frame_available_| for up to the
@@ -134,7 +124,7 @@ class MEDIA_EXPORT VideoRendererImpl
base::Lock lock_;
// Provides video frames to VideoRendererImpl.
- VideoFrameStream video_frame_stream_;
+ scoped_ptr<VideoFrameStream> video_frame_stream_;
// Flag indicating low-delay mode.
bool low_delay_;
@@ -152,36 +142,28 @@ class MEDIA_EXPORT VideoRendererImpl
// always check |state_| to see if it was set to STOPPED after waking up!
base::ConditionVariable frame_available_;
- // State transition Diagram of this class:
- // [kUninitialized]
- // |
- // | Initialize()
- // [kInitializing]
- // |
- // V
- // +------[kFlushed]<---------------OnVideoFrameStreamResetDone()
- // | | Preroll() or upon ^
- // | V got first frame [kFlushing]
- // | [kPrerolling] ^
- // | | |
- // | V Got enough frames |
- // | [kPrerolled]--------------------------|
- // | | Flush() ^
- // | V Play() |
- // | [kPlaying]---------------------------|
- // | Flush() ^ Flush()
- // | |
- // +-----> [kStopped] [Any state other than]
- // [ kUninitialized ]
-
- // Simple state tracking variable.
+ // Important detail: being in kPlaying doesn't imply that video is being
+ // rendered. Rather, it means that the renderer is ready to go. The actual
+ // rendering of video is controlled by time advancing via |time_cb_|.
+ //
+ // kUninitialized
+ // | Initialize()
+ // |
+ // V
+ // kInitializing
+ // | Decoders initialized
+ // |
+ // V Decoders reset
+ // kFlushed <------------------ kFlushing
+ // | StartPlayingFrom() ^
+ // | |
+ // | | Flush()
+ // `---------> kPlaying --------'
enum State {
kUninitialized,
kInitializing,
- kPrerolled,
kFlushing,
kFlushed,
- kPrerolling,
kPlaying,
kStopped,
};
@@ -196,22 +178,22 @@ class MEDIA_EXPORT VideoRendererImpl
bool drop_frames_;
- float playback_rate_;
+ BufferingState buffering_state_;
// Playback operation callbacks.
base::Closure flush_cb_;
- PipelineStatusCB preroll_cb_;
// Event callbacks.
PipelineStatusCB init_cb_;
StatisticsCB statistics_cb_;
TimeCB max_time_cb_;
+ BufferingStateCB buffering_state_cb_;
base::Closure ended_cb_;
PipelineStatusCB error_cb_;
TimeDeltaCB get_time_cb_;
TimeDeltaCB get_duration_cb_;
- base::TimeDelta preroll_timestamp_;
+ base::TimeDelta start_timestamp_;
// Embedder callback for notifying a new frame is available for painting.
PaintCB paint_cb_;
diff --git a/media/filters/video_renderer_impl_unittest.cc b/media/filters/video_renderer_impl_unittest.cc
index 355d875174..90002eeaee 100644
--- a/media/filters/video_renderer_impl_unittest.cc
+++ b/media/filters/video_renderer_impl_unittest.cc
@@ -27,7 +27,6 @@
using ::testing::_;
using ::testing::AnyNumber;
using ::testing::AtLeast;
-using ::testing::InSequence;
using ::testing::Invoke;
using ::testing::NiceMock;
using ::testing::NotNull;
@@ -57,13 +56,12 @@ class VideoRendererImplTest : public ::testing::Test {
ScopedVector<VideoDecoder> decoders;
decoders.push_back(decoder_);
- renderer_.reset(
- new VideoRendererImpl(message_loop_.message_loop_proxy(),
- decoders.Pass(),
- media::SetDecryptorReadyCB(),
- base::Bind(&StrictMock<MockDisplayCB>::Display,
- base::Unretained(&mock_display_cb_)),
- true));
+ renderer_.reset(new VideoRendererImpl(
+ message_loop_.message_loop_proxy(),
+ decoders.Pass(),
+ media::SetDecryptorReadyCB(),
+ base::Bind(&StrictMock<MockCB>::Display, base::Unretained(&mock_cb_)),
+ true));
demuxer_stream_.set_video_decoder_config(TestVideoConfig::Normal());
@@ -71,8 +69,6 @@ class VideoRendererImplTest : public ::testing::Test {
EXPECT_CALL(demuxer_stream_, Read(_)).WillRepeatedly(
RunCallback<0>(DemuxerStream::kOk,
scoped_refptr<DecoderBuffer>(new DecoderBuffer(0))));
- EXPECT_CALL(*decoder_, Stop())
- .WillRepeatedly(Invoke(this, &VideoRendererImplTest::StopRequested));
EXPECT_CALL(statistics_cb_object_, OnStatistics(_))
.Times(AnyNumber());
EXPECT_CALL(*this, OnTimeUpdate(_))
@@ -96,11 +92,6 @@ class VideoRendererImplTest : public ::testing::Test {
EXPECT_CALL(*decoder_, Reset(_))
.WillRepeatedly(Invoke(this, &VideoRendererImplTest::FlushRequested));
- InSequence s;
-
- // Set playback rate before anything else happens.
- renderer_->SetPlaybackRate(1.0f);
-
// Initialize, we shouldn't have any reads.
InitializeRenderer(PIPELINE_OK, low_delay);
}
@@ -125,6 +116,8 @@ class VideoRendererImplTest : public ::testing::Test {
base::Unretained(&statistics_cb_object_)),
base::Bind(&VideoRendererImplTest::OnTimeUpdate,
base::Unretained(this)),
+ base::Bind(&StrictMock<MockCB>::BufferingStateChange,
+ base::Unretained(&mock_cb_)),
ended_event_.GetClosure(),
error_event_.GetPipelineStatusCB(),
base::Bind(&VideoRendererImplTest::GetTime, base::Unretained(this)),
@@ -132,20 +125,11 @@ class VideoRendererImplTest : public ::testing::Test {
base::Unretained(this)));
}
- void Play() {
- SCOPED_TRACE("Play()");
- WaitableMessageLoopEvent event;
- renderer_->Play(event.GetClosure());
- event.RunAndWait();
- }
-
- void Preroll(int timestamp_ms, PipelineStatus expected) {
- SCOPED_TRACE(base::StringPrintf("Preroll(%d, %d)", timestamp_ms, expected));
- WaitableMessageLoopEvent event;
- renderer_->Preroll(
- base::TimeDelta::FromMilliseconds(timestamp_ms),
- event.GetPipelineStatusCB());
- event.RunAndWaitForStatus(expected);
+ void StartPlayingFrom(int timestamp_ms) {
+ SCOPED_TRACE(base::StringPrintf("StartPlayingFrom(%d)", timestamp_ms));
+ renderer_->StartPlayingFrom(
+ base::TimeDelta::FromMilliseconds(timestamp_ms));
+ message_loop_.RunUntilIdle();
}
void Flush() {
@@ -163,7 +147,6 @@ class VideoRendererImplTest : public ::testing::Test {
}
void Shutdown() {
- Flush();
Stop();
}
@@ -290,12 +273,13 @@ class VideoRendererImplTest : public ::testing::Test {
NiceMock<MockDemuxerStream> demuxer_stream_;
MockStatisticsCB statistics_cb_object_;
- // Use StrictMock<T> to catch missing/extra display callbacks.
- class MockDisplayCB {
+ // Use StrictMock<T> to catch missing/extra callbacks.
+ class MockCB {
public:
MOCK_METHOD1(Display, void(const scoped_refptr<VideoFrame>&));
+ MOCK_METHOD1(BufferingStateChange, void(BufferingState));
};
- StrictMock<MockDisplayCB> mock_display_cb_;
+ StrictMock<MockCB> mock_cb_;
private:
base::TimeDelta GetTime() {
@@ -334,15 +318,6 @@ class VideoRendererImplTest : public ::testing::Test {
message_loop_.PostTask(FROM_HERE, callback);
}
- void StopRequested() {
- DCHECK_EQ(&message_loop_, base::MessageLoop::current());
- decode_results_.clear();
- if (!decode_cb_.is_null()) {
- QueueFrames("abort");
- SatisfyPendingRead();
- }
- }
-
base::MessageLoop message_loop_;
// Used to protect |time_|.
@@ -380,11 +355,12 @@ TEST_F(VideoRendererImplTest, Initialize) {
Shutdown();
}
-TEST_F(VideoRendererImplTest, InitializeAndPreroll) {
+TEST_F(VideoRendererImplTest, InitializeAndStartPlayingFrom) {
Initialize();
QueueFrames("0 10 20 30");
- EXPECT_CALL(mock_display_cb_, Display(HasTimestamp(0)));
- Preroll(0, PIPELINE_OK);
+ EXPECT_CALL(mock_cb_, Display(HasTimestamp(0)));
+ EXPECT_CALL(mock_cb_, BufferingStateChange(BUFFERING_HAVE_ENOUGH));
+ StartPlayingFrom(0);
Shutdown();
}
@@ -402,6 +378,11 @@ TEST_F(VideoRendererImplTest, StopWhileInitializing) {
TEST_F(VideoRendererImplTest, StopWhileFlushing) {
Initialize();
+ QueueFrames("0 10 20 30");
+ EXPECT_CALL(mock_cb_, Display(HasTimestamp(0)));
+ EXPECT_CALL(mock_cb_, BufferingStateChange(BUFFERING_HAVE_ENOUGH));
+ StartPlayingFrom(0);
+ EXPECT_CALL(mock_cb_, BufferingStateChange(BUFFERING_HAVE_NOTHING));
renderer_->Flush(base::Bind(&ExpectNotCalled, PIPELINE_OK));
Stop();
@@ -411,18 +392,28 @@ TEST_F(VideoRendererImplTest, StopWhileFlushing) {
TEST_F(VideoRendererImplTest, Play) {
Initialize();
QueueFrames("0 10 20 30");
- EXPECT_CALL(mock_display_cb_, Display(HasTimestamp(0)));
- Preroll(0, PIPELINE_OK);
- Play();
+ EXPECT_CALL(mock_cb_, Display(HasTimestamp(0)));
+ EXPECT_CALL(mock_cb_, BufferingStateChange(BUFFERING_HAVE_ENOUGH));
+ StartPlayingFrom(0);
+ Shutdown();
+}
+
+TEST_F(VideoRendererImplTest, FlushWithNothingBuffered) {
+ Initialize();
+ StartPlayingFrom(0);
+
+ // We shouldn't expect a buffering state change since we never reached
+ // BUFFERING_HAVE_ENOUGH.
+ Flush();
Shutdown();
}
TEST_F(VideoRendererImplTest, EndOfStream_ClipDuration) {
Initialize();
- QueueFrames("0 10 20 30");
- EXPECT_CALL(mock_display_cb_, Display(HasTimestamp(0)));
- Preroll(0, PIPELINE_OK);
- Play();
+ QueueFrames("0");
+ EXPECT_CALL(mock_cb_, Display(HasTimestamp(0)));
+ EXPECT_CALL(mock_cb_, BufferingStateChange(BUFFERING_HAVE_ENOUGH));
+ StartPlayingFrom(0);
// Next frame has timestamp way past duration. Its timestamp will be adjusted
// to match the duration of the video.
@@ -433,7 +424,7 @@ TEST_F(VideoRendererImplTest, EndOfStream_ClipDuration) {
// Queue the end of stream frame and wait for the last frame to be rendered.
SatisfyPendingReadWithEndOfStream();
- EXPECT_CALL(mock_display_cb_, Display(HasTimestamp(kVideoDurationInMs)));
+ EXPECT_CALL(mock_cb_, Display(HasTimestamp(kVideoDurationInMs)));
AdvanceTimeInMs(kVideoDurationInMs);
WaitForEnded();
@@ -443,9 +434,9 @@ TEST_F(VideoRendererImplTest, EndOfStream_ClipDuration) {
TEST_F(VideoRendererImplTest, DecodeError_Playing) {
Initialize();
QueueFrames("0 10 20 30");
- EXPECT_CALL(mock_display_cb_, Display(HasTimestamp(0)));
- Preroll(0, PIPELINE_OK);
- Play();
+ EXPECT_CALL(mock_cb_, Display(HasTimestamp(0)));
+ EXPECT_CALL(mock_cb_, BufferingStateChange(BUFFERING_HAVE_ENOUGH));
+ StartPlayingFrom(0);
QueueFrames("error");
SatisfyPendingRead();
@@ -453,54 +444,61 @@ TEST_F(VideoRendererImplTest, DecodeError_Playing) {
Shutdown();
}
-TEST_F(VideoRendererImplTest, DecodeError_DuringPreroll) {
+TEST_F(VideoRendererImplTest, DecodeError_DuringStartPlayingFrom) {
Initialize();
QueueFrames("error");
- Preroll(0, PIPELINE_ERROR_DECODE);
+ StartPlayingFrom(0);
Shutdown();
}
-TEST_F(VideoRendererImplTest, Preroll_Exact) {
+TEST_F(VideoRendererImplTest, StartPlayingFrom_Exact) {
Initialize();
QueueFrames("50 60 70 80 90");
- EXPECT_CALL(mock_display_cb_, Display(HasTimestamp(60)));
- Preroll(60, PIPELINE_OK);
+ EXPECT_CALL(mock_cb_, Display(HasTimestamp(60)));
+ EXPECT_CALL(mock_cb_, BufferingStateChange(BUFFERING_HAVE_ENOUGH));
+ StartPlayingFrom(60);
Shutdown();
}
-TEST_F(VideoRendererImplTest, Preroll_RightBefore) {
+TEST_F(VideoRendererImplTest, StartPlayingFrom_RightBefore) {
Initialize();
QueueFrames("50 60 70 80 90");
- EXPECT_CALL(mock_display_cb_, Display(HasTimestamp(50)));
- Preroll(59, PIPELINE_OK);
+ EXPECT_CALL(mock_cb_, Display(HasTimestamp(50)));
+ EXPECT_CALL(mock_cb_, BufferingStateChange(BUFFERING_HAVE_ENOUGH));
+ StartPlayingFrom(59);
Shutdown();
}
-TEST_F(VideoRendererImplTest, Preroll_RightAfter) {
+TEST_F(VideoRendererImplTest, StartPlayingFrom_RightAfter) {
Initialize();
QueueFrames("50 60 70 80 90");
- EXPECT_CALL(mock_display_cb_, Display(HasTimestamp(60)));
- Preroll(61, PIPELINE_OK);
+ EXPECT_CALL(mock_cb_, Display(HasTimestamp(60)));
+ EXPECT_CALL(mock_cb_, BufferingStateChange(BUFFERING_HAVE_ENOUGH));
+ StartPlayingFrom(61);
Shutdown();
}
-TEST_F(VideoRendererImplTest, Preroll_LowDelay) {
+TEST_F(VideoRendererImplTest, StartPlayingFrom_LowDelay) {
// In low-delay mode only one frame is required to finish preroll.
InitializeWithLowDelay(true);
QueueFrames("0");
- EXPECT_CALL(mock_display_cb_, Display(HasTimestamp(0)));
- Preroll(0, PIPELINE_OK);
- Play();
+ // Expect some amount of have enough/nothing due to only requiring one frame.
+ EXPECT_CALL(mock_cb_, Display(HasTimestamp(0)));
+ EXPECT_CALL(mock_cb_, BufferingStateChange(BUFFERING_HAVE_ENOUGH))
+ .Times(AnyNumber());
+ EXPECT_CALL(mock_cb_, BufferingStateChange(BUFFERING_HAVE_NOTHING))
+ .Times(AnyNumber());
+ StartPlayingFrom(0);
QueueFrames("10");
SatisfyPendingRead();
WaitableMessageLoopEvent event;
- EXPECT_CALL(mock_display_cb_, Display(HasTimestamp(10)))
+ EXPECT_CALL(mock_cb_, Display(HasTimestamp(10)))
.WillOnce(RunClosure(event.GetClosure()));
AdvanceTimeInMs(10);
event.RunAndWait();
@@ -508,12 +506,12 @@ TEST_F(VideoRendererImplTest, Preroll_LowDelay) {
Shutdown();
}
-TEST_F(VideoRendererImplTest, PlayAfterPreroll) {
+TEST_F(VideoRendererImplTest, PlayAfterStartPlayingFrom) {
Initialize();
QueueFrames("0 10 20 30");
- EXPECT_CALL(mock_display_cb_, Display(HasTimestamp(0)));
- Preroll(0, PIPELINE_OK);
- Play();
+ EXPECT_CALL(mock_cb_, Display(HasTimestamp(0)));
+ EXPECT_CALL(mock_cb_, BufferingStateChange(BUFFERING_HAVE_ENOUGH));
+ StartPlayingFrom(0);
// Check that there is an outstanding Read() request.
EXPECT_TRUE(IsReadPending());
@@ -521,73 +519,13 @@ TEST_F(VideoRendererImplTest, PlayAfterPreroll) {
Shutdown();
}
-TEST_F(VideoRendererImplTest, Rebuffer) {
- Initialize();
- QueueFrames("0 10 20 30");
- EXPECT_CALL(mock_display_cb_, Display(HasTimestamp(0)));
- Preroll(0, PIPELINE_OK);
- Play();
-
- // Advance time past prerolled time drain the ready frame queue.
- AdvanceTimeInMs(50);
- WaitForPendingRead();
-
- // Simulate a Preroll/Play rebuffer sequence.
- WaitableMessageLoopEvent event;
- renderer_->Preroll(kNoTimestamp(),
- event.GetPipelineStatusCB());
-
- // Queue enough frames to satisfy preroll.
- QueueFrames("40 50 60 70");
- SatisfyPendingRead();
-
- // TODO(scherkus): We shouldn't display the next ready frame in a rebuffer
- // situation, see http://crbug.com/365516
- EXPECT_CALL(mock_display_cb_, Display(_)).Times(AtLeast(1));
-
- event.RunAndWaitForStatus(PIPELINE_OK);
-
- Play();
-
- Shutdown();
-}
-
-TEST_F(VideoRendererImplTest, Rebuffer_AlreadyHaveEnoughFrames) {
- Initialize();
- QueueFrames("0 10 20 30");
- EXPECT_CALL(mock_display_cb_, Display(HasTimestamp(0)));
- Preroll(0, PIPELINE_OK);
-
- // Queue an extra frame so that we'll have enough frames to satisfy
- // preroll even after the first frame is painted.
- QueueFrames("40");
- SatisfyPendingRead();
- Play();
-
- // Simulate a Preroll/Play rebuffer sequence.
- //
- // TODO(scherkus): We shouldn't display the next ready frame in a rebuffer
- // situation, see http://crbug.com/365516
- EXPECT_CALL(mock_display_cb_, Display(_)).Times(AtLeast(1));
-
- WaitableMessageLoopEvent event;
- renderer_->Preroll(kNoTimestamp(),
- event.GetPipelineStatusCB());
-
- event.RunAndWaitForStatus(PIPELINE_OK);
-
- Play();
-
- Shutdown();
-}
-
// Verify that a late decoder response doesn't break invariants in the renderer.
TEST_F(VideoRendererImplTest, StopDuringOutstandingRead) {
Initialize();
QueueFrames("0 10 20 30");
- EXPECT_CALL(mock_display_cb_, Display(HasTimestamp(0)));
- Preroll(0, PIPELINE_OK);
- Play();
+ EXPECT_CALL(mock_cb_, Display(HasTimestamp(0)));
+ EXPECT_CALL(mock_cb_, BufferingStateChange(BUFFERING_HAVE_ENOUGH));
+ StartPlayingFrom(0);
// Check that there is an outstanding Read() request.
EXPECT_TRUE(IsReadPending());
@@ -598,9 +536,39 @@ TEST_F(VideoRendererImplTest, StopDuringOutstandingRead) {
}
TEST_F(VideoRendererImplTest, VideoDecoder_InitFailure) {
- InSequence s;
InitializeRenderer(DECODER_ERROR_NOT_SUPPORTED, false);
Stop();
}
+TEST_F(VideoRendererImplTest, Underflow) {
+ Initialize();
+ QueueFrames("0 10 20 30");
+ EXPECT_CALL(mock_cb_, Display(HasTimestamp(0)));
+ EXPECT_CALL(mock_cb_, BufferingStateChange(BUFFERING_HAVE_ENOUGH));
+ StartPlayingFrom(0);
+
+ // Frames should be dropped and we should signal having nothing.
+ {
+ SCOPED_TRACE("Waiting for BUFFERING_HAVE_NOTHING");
+ WaitableMessageLoopEvent event;
+ EXPECT_CALL(mock_cb_, BufferingStateChange(BUFFERING_HAVE_NOTHING))
+ .WillOnce(RunClosure(event.GetClosure()));
+ AdvanceTimeInMs(100);
+ event.RunAndWait();
+ }
+
+ // Receiving end of stream should signal having enough.
+ {
+ SCOPED_TRACE("Waiting for BUFFERING_HAVE_ENOUGH");
+ WaitableMessageLoopEvent event;
+ EXPECT_CALL(mock_cb_, BufferingStateChange(BUFFERING_HAVE_ENOUGH))
+ .WillOnce(RunClosure(event.GetClosure()));
+ SatisfyPendingReadWithEndOfStream();
+ event.RunAndWait();
+ }
+
+ WaitForEnded();
+ Shutdown();
+}
+
} // namespace media
diff --git a/media/filters/vpx_video_decoder.cc b/media/filters/vpx_video_decoder.cc
index 20416f1480..9cadbca36c 100644
--- a/media/filters/vpx_video_decoder.cc
+++ b/media/filters/vpx_video_decoder.cc
@@ -209,7 +209,7 @@ VpxVideoDecoder::VpxVideoDecoder(
vpx_codec_alpha_(NULL) {}
VpxVideoDecoder::~VpxVideoDecoder() {
- DCHECK_EQ(kUninitialized, state_);
+ DCHECK(task_runner_->BelongsToCurrentThread());
CloseDecoder();
}
@@ -338,12 +338,6 @@ void VpxVideoDecoder::Reset(const base::Closure& closure) {
task_runner_->PostTask(FROM_HERE, closure);
}
-void VpxVideoDecoder::Stop() {
- DCHECK(task_runner_->BelongsToCurrentThread());
-
- state_ = kUninitialized;
-}
-
void VpxVideoDecoder::DecodeBuffer(const scoped_refptr<DecoderBuffer>& buffer) {
DCHECK(task_runner_->BelongsToCurrentThread());
DCHECK_NE(state_, kUninitialized);
diff --git a/media/filters/vpx_video_decoder.h b/media/filters/vpx_video_decoder.h
index 22d119bea7..0e1a941632 100644
--- a/media/filters/vpx_video_decoder.h
+++ b/media/filters/vpx_video_decoder.h
@@ -39,7 +39,6 @@ class MEDIA_EXPORT VpxVideoDecoder : public VideoDecoder {
virtual void Decode(const scoped_refptr<DecoderBuffer>& buffer,
const DecodeCB& decode_cb) OVERRIDE;
virtual void Reset(const base::Closure& closure) OVERRIDE;
- virtual void Stop() OVERRIDE;
private:
enum DecoderState {
diff --git a/media/formats/mp2t/es_adapter_video.cc b/media/formats/mp2t/es_adapter_video.cc
new file mode 100644
index 0000000000..5e604a911e
--- /dev/null
+++ b/media/formats/mp2t/es_adapter_video.cc
@@ -0,0 +1,190 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/formats/mp2t/es_adapter_video.h"
+
+#include "media/base/buffers.h"
+#include "media/base/stream_parser_buffer.h"
+#include "media/base/video_decoder_config.h"
+#include "media/formats/mp2t/mp2t_common.h"
+
+namespace media {
+namespace mp2t {
+
+// Arbitrary decision about the frame duration when there is no previous
+// hint about what could be the frame duration.
+static const int kDefaultFrameDurationMs = 40;
+
+// To calculate the frame duration, we make an assumption
+// that the timestamp of the next frame in presentation order
+// is no further than 5 frames away in decode order.
+// TODO(damienv): the previous assumption should cover most of the practical
+// cases. However, the right way to calculate the frame duration would be
+// to emulate the H264 dpb bumping process.
+static const size_t kHistorySize = 5;
+
+EsAdapterVideo::EsAdapterVideo(
+ const NewVideoConfigCB& new_video_config_cb,
+ const EmitBufferCB& emit_buffer_cb)
+ : new_video_config_cb_(new_video_config_cb),
+ emit_buffer_cb_(emit_buffer_cb),
+ has_valid_config_(false),
+ has_valid_frame_(false),
+ last_frame_duration_(
+ base::TimeDelta::FromMilliseconds(kDefaultFrameDurationMs)),
+ buffer_index_(0) {
+}
+
+EsAdapterVideo::~EsAdapterVideo() {
+}
+
+void EsAdapterVideo::Flush() {
+ ProcessPendingBuffers(true);
+}
+
+void EsAdapterVideo::Reset() {
+ has_valid_config_ = false;
+ has_valid_frame_ = false;
+
+ last_frame_duration_ =
+ base::TimeDelta::FromMilliseconds(kDefaultFrameDurationMs);
+
+ config_list_.clear();
+ buffer_index_ = 0;
+ buffer_list_.clear();
+ emitted_pts_.clear();
+
+ discarded_frames_min_pts_ = base::TimeDelta();
+ discarded_frames_dts_.clear();
+}
+
+void EsAdapterVideo::OnConfigChanged(
+ const VideoDecoderConfig& video_decoder_config) {
+ config_list_.push_back(
+ ConfigEntry(buffer_index_ + buffer_list_.size(), video_decoder_config));
+ has_valid_config_ = true;
+ ProcessPendingBuffers(false);
+}
+
+void EsAdapterVideo::OnNewBuffer(
+ const scoped_refptr<StreamParserBuffer>& stream_parser_buffer) {
+ // Discard the incoming frame:
+ // - if it is not associated with any config,
+ // - or if only non-key frames have been added to a new segment.
+ if (!has_valid_config_ ||
+ (!has_valid_frame_ && !stream_parser_buffer->IsKeyframe())) {
+ if (discarded_frames_dts_.empty() ||
+ discarded_frames_min_pts_ > stream_parser_buffer->timestamp()) {
+ discarded_frames_min_pts_ = stream_parser_buffer->timestamp();
+ }
+ discarded_frames_dts_.push_back(
+ stream_parser_buffer->GetDecodeTimestamp());
+ return;
+ }
+
+ has_valid_frame_ = true;
+
+ if (!discarded_frames_dts_.empty())
+ ReplaceDiscardedFrames(stream_parser_buffer);
+
+ buffer_list_.push_back(stream_parser_buffer);
+ ProcessPendingBuffers(false);
+}
+
+void EsAdapterVideo::ProcessPendingBuffers(bool flush) {
+ DCHECK(has_valid_config_);
+
+ while (!buffer_list_.empty() &&
+ (flush || buffer_list_.size() > kHistorySize)) {
+ // Signal a config change, just before emitting the corresponding frame.
+ if (!config_list_.empty() && config_list_.front().first == buffer_index_) {
+ new_video_config_cb_.Run(config_list_.front().second);
+ config_list_.pop_front();
+ }
+
+ scoped_refptr<StreamParserBuffer> buffer = buffer_list_.front();
+ buffer_list_.pop_front();
+ buffer_index_++;
+
+ if (buffer->duration() == kNoTimestamp()) {
+ base::TimeDelta next_frame_pts = GetNextFramePts(buffer->timestamp());
+ if (next_frame_pts == kNoTimestamp()) {
+ // This can happen when emitting the very last buffer
+ // or if the stream do not meet the assumption behind |kHistorySize|.
+ DVLOG(LOG_LEVEL_ES) << "Using last frame duration: "
+ << last_frame_duration_.InMilliseconds();
+ buffer->set_duration(last_frame_duration_);
+ } else {
+ base::TimeDelta duration = next_frame_pts - buffer->timestamp();
+ DVLOG(LOG_LEVEL_ES) << "Frame duration: " << duration.InMilliseconds();
+ buffer->set_duration(duration);
+ }
+ }
+
+ emitted_pts_.push_back(buffer->timestamp());
+ if (emitted_pts_.size() > kHistorySize)
+ emitted_pts_.pop_front();
+
+ last_frame_duration_ = buffer->duration();
+ emit_buffer_cb_.Run(buffer);
+ }
+}
+
+base::TimeDelta EsAdapterVideo::GetNextFramePts(base::TimeDelta current_pts) {
+ base::TimeDelta next_pts = kNoTimestamp();
+
+ // Consider the timestamps of future frames (in decode order).
+ // Note: the next frame is not enough when the GOP includes some B frames.
+ for (BufferQueue::const_iterator it = buffer_list_.begin();
+ it != buffer_list_.end(); ++it) {
+ if ((*it)->timestamp() < current_pts)
+ continue;
+ if (next_pts == kNoTimestamp() || next_pts > (*it)->timestamp())
+ next_pts = (*it)->timestamp();
+ }
+
+ // Consider the timestamps of previous frames (in decode order).
+ // In a simple GOP structure with B frames, the frame next to the last B
+ // frame (in presentation order) is located before in decode order.
+ for (std::list<base::TimeDelta>::const_iterator it = emitted_pts_.begin();
+ it != emitted_pts_.end(); ++it) {
+ if (*it < current_pts)
+ continue;
+ if (next_pts == kNoTimestamp() || next_pts > *it)
+ next_pts = *it;
+ }
+
+ return next_pts;
+}
+
+void EsAdapterVideo::ReplaceDiscardedFrames(
+ const scoped_refptr<StreamParserBuffer>& stream_parser_buffer) {
+ DCHECK(!discarded_frames_dts_.empty());
+ DCHECK(stream_parser_buffer->IsKeyframe());
+
+ // PTS is interpolated between the min PTS of discarded frames
+ // and the PTS of the first valid buffer.
+ base::TimeDelta pts = discarded_frames_min_pts_;
+ base::TimeDelta pts_delta =
+ (stream_parser_buffer->timestamp() - pts) / discarded_frames_dts_.size();
+
+ while (!discarded_frames_dts_.empty()) {
+ scoped_refptr<StreamParserBuffer> frame =
+ StreamParserBuffer::CopyFrom(
+ stream_parser_buffer->data(),
+ stream_parser_buffer->data_size(),
+ stream_parser_buffer->IsKeyframe(),
+ stream_parser_buffer->type(),
+ stream_parser_buffer->track_id());
+ frame->SetDecodeTimestamp(discarded_frames_dts_.front());
+ frame->set_timestamp(pts);
+ frame->set_duration(pts_delta);
+ buffer_list_.push_back(frame);
+ pts += pts_delta;
+ discarded_frames_dts_.pop_front();
+ }
+}
+
+} // namespace mp2t
+} // namespace media
diff --git a/media/formats/mp2t/es_adapter_video.h b/media/formats/mp2t/es_adapter_video.h
new file mode 100644
index 0000000000..0739fc3c27
--- /dev/null
+++ b/media/formats/mp2t/es_adapter_video.h
@@ -0,0 +1,98 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_FORMATS_MP2T_ES_ADAPTER_VIDEO_H_
+#define MEDIA_FORMATS_MP2T_ES_ADAPTER_VIDEO_H_
+
+#include <deque>
+#include <list>
+#include <utility>
+
+#include "base/callback.h"
+#include "base/memory/ref_counted.h"
+#include "base/time/time.h"
+#include "media/base/media_export.h"
+
+namespace media {
+class StreamParserBuffer;
+class VideoDecoderConfig;
+
+namespace mp2t {
+
+// Some constraints of the MSE spec are not necessarily met by video streams
+// inside an Mpeg2 TS stream.
+// The goal of the ES adapter is to modify the incoming buffers to meet these
+// constraints, e.g.
+// - get the frame duration,
+// - replace the leading non-key frames by the first key frame to avoid
+// creating a hole in the video timeline.
+class MEDIA_EXPORT EsAdapterVideo {
+ public:
+ typedef base::Callback<void(const VideoDecoderConfig&)> NewVideoConfigCB;
+ typedef base::Callback<void(scoped_refptr<StreamParserBuffer>)> EmitBufferCB;
+
+ EsAdapterVideo(
+ const NewVideoConfigCB& new_video_config_cb,
+ const EmitBufferCB& emit_buffer_cb);
+ ~EsAdapterVideo();
+
+ // Force the emission of the pending video buffers.
+ void Flush();
+
+ // Reset the ES adapter to its initial state.
+ void Reset();
+
+ // Provide the configuration that applies to the upcoming video buffers.
+ void OnConfigChanged(const VideoDecoderConfig& video_decoder_config);
+
+ // Provide a new video buffer.
+ void OnNewBuffer(
+ const scoped_refptr<StreamParserBuffer>& stream_parser_buffer);
+
+ private:
+ typedef std::deque<scoped_refptr<StreamParserBuffer> > BufferQueue;
+ typedef std::pair<int64, VideoDecoderConfig> ConfigEntry;
+
+ void ProcessPendingBuffers(bool flush);
+
+ // Return the PTS of the frame that comes just after |current_pts| in
+ // presentation order. Return kNoTimestamp() if not found.
+ base::TimeDelta GetNextFramePts(base::TimeDelta current_pts);
+
+ // Replace the leading non key frames by |stream_parser_buffer|
+ // (this one must be a key frame).
+ void ReplaceDiscardedFrames(
+ const scoped_refptr<StreamParserBuffer>& stream_parser_buffer);
+
+ NewVideoConfigCB new_video_config_cb_;
+ EmitBufferCB emit_buffer_cb_;
+
+ bool has_valid_config_;
+ bool has_valid_frame_;
+
+ // Duration of the last video frame.
+ base::TimeDelta last_frame_duration_;
+
+ // Association between a video config and a buffer index.
+ std::list<ConfigEntry> config_list_;
+
+ // Global index of the first buffer in |buffer_list_|.
+ int64 buffer_index_;
+
+ // List of buffer to be emitted and PTS of frames already emitted.
+ BufferQueue buffer_list_;
+ std::list<base::TimeDelta> emitted_pts_;
+
+ // - Minimum PTS of discarded frames.
+ // - DTS of discarded frames.
+ base::TimeDelta discarded_frames_min_pts_;
+ std::list<base::TimeDelta> discarded_frames_dts_;
+
+ DISALLOW_COPY_AND_ASSIGN(EsAdapterVideo);
+};
+
+} // namespace mp2t
+} // namespace media
+
+#endif // MEDIA_FORMATS_MP2T_ES_ADAPTER_VIDEO_H_
diff --git a/media/formats/mp2t/es_adapter_video_unittest.cc b/media/formats/mp2t/es_adapter_video_unittest.cc
new file mode 100644
index 0000000000..a5446cfd05
--- /dev/null
+++ b/media/formats/mp2t/es_adapter_video_unittest.cc
@@ -0,0 +1,148 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <sstream>
+#include <string>
+#include <vector>
+
+#include "base/bind.h"
+#include "base/logging.h"
+#include "base/strings/string_util.h"
+#include "base/time/time.h"
+#include "media/base/stream_parser_buffer.h"
+#include "media/base/video_decoder_config.h"
+#include "media/formats/mp2t/es_adapter_video.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace media {
+namespace mp2t {
+
+namespace {
+
+VideoDecoderConfig CreateFakeVideoConfig() {
+ gfx::Size coded_size(320, 240);
+ gfx::Rect visible_rect(0, 0, 320, 240);
+ gfx::Size natural_size(320, 240);
+ return VideoDecoderConfig(
+ kCodecH264,
+ H264PROFILE_MAIN,
+ VideoFrame::I420,
+ coded_size,
+ visible_rect,
+ natural_size,
+ NULL, 0, false);
+}
+
+StreamParserBuffer::BufferQueue
+GenerateFakeBuffers(const int* frame_pts_ms,
+ const bool* is_key_frame,
+ size_t frame_count) {
+ uint8 dummy_buffer[] = {0, 0, 0, 0};
+
+ StreamParserBuffer::BufferQueue buffers(frame_count);
+ for (size_t k = 0; k < frame_count; k++) {
+ buffers[k] = StreamParserBuffer::CopyFrom(
+ dummy_buffer, arraysize(dummy_buffer),
+ is_key_frame[k], DemuxerStream::VIDEO, 0);
+ buffers[k]->set_timestamp(
+ base::TimeDelta::FromMilliseconds(frame_pts_ms[k]));
+ }
+ return buffers;
+}
+
+}
+
+class EsAdapterVideoTest : public testing::Test {
+ public:
+ EsAdapterVideoTest();
+ virtual ~EsAdapterVideoTest() {}
+
+ protected:
+ // Feed the ES adapter with the buffers from |buffer_queue|.
+ // Return the durations computed by the ES adapter as well as
+ // whether each frame emitted by the adapter is a key frame.
+ std::string RunAdapterTest(const StreamParser::BufferQueue& buffer_queue);
+
+ private:
+ void OnNewConfig(const VideoDecoderConfig& video_config);
+ void OnNewBuffer(scoped_refptr<StreamParserBuffer> buffer);
+
+ EsAdapterVideo es_adapter_;
+
+ std::stringstream buffer_descriptors_;
+
+ DISALLOW_COPY_AND_ASSIGN(EsAdapterVideoTest);
+};
+
+EsAdapterVideoTest::EsAdapterVideoTest()
+ : es_adapter_(base::Bind(&EsAdapterVideoTest::OnNewConfig,
+ base::Unretained(this)),
+ base::Bind(&EsAdapterVideoTest::OnNewBuffer,
+ base::Unretained(this))) {
+}
+
+void EsAdapterVideoTest::OnNewConfig(const VideoDecoderConfig& video_config) {
+}
+
+void EsAdapterVideoTest::OnNewBuffer(
+ scoped_refptr<StreamParserBuffer> buffer) {
+ buffer_descriptors_ << "(" << buffer->duration().InMilliseconds() << ","
+ << (buffer->IsKeyframe() ? "Y" : "N") << ") ";
+}
+
+std::string EsAdapterVideoTest::RunAdapterTest(
+ const StreamParserBuffer::BufferQueue& buffer_queue) {
+ buffer_descriptors_.clear();
+
+ es_adapter_.OnConfigChanged(CreateFakeVideoConfig());
+ for (StreamParserBuffer::BufferQueue::const_iterator it =
+ buffer_queue.begin(); it != buffer_queue.end(); ++it) {
+ es_adapter_.OnNewBuffer(*it);
+ }
+ es_adapter_.Flush();
+
+ std::string s = buffer_descriptors_.str();
+ base::TrimWhitespaceASCII(s, base::TRIM_ALL, &s);
+ return s;
+}
+
+TEST_F(EsAdapterVideoTest, FrameDurationSimpleGop) {
+ // PTS for a GOP without B frames - strictly increasing.
+ int pts_ms[] = {30, 31, 33, 36, 40, 45, 51, 58};
+ bool is_key_frame[] = {
+ true, false, false, false,
+ false, false, false, false };
+ StreamParserBuffer::BufferQueue buffer_queue =
+ GenerateFakeBuffers(pts_ms, is_key_frame, arraysize(pts_ms));
+
+ EXPECT_EQ("(1,Y) (2,N) (3,N) (4,N) (5,N) (6,N) (7,N) (7,N)",
+ RunAdapterTest(buffer_queue));
+}
+
+TEST_F(EsAdapterVideoTest, FrameDurationComplexGop) {
+ // PTS for a GOP with B frames.
+ int pts_ms[] = {30, 120, 60, 90, 210, 150, 180, 300, 240, 270};
+ bool is_key_frame[] = {
+ true, false, false, false, false,
+ false, false, false, false, false };
+ StreamParserBuffer::BufferQueue buffer_queue =
+ GenerateFakeBuffers(pts_ms, is_key_frame, arraysize(pts_ms));
+
+ EXPECT_EQ("(30,Y) (30,N) (30,N) (30,N) (30,N) "
+ "(30,N) (30,N) (30,N) (30,N) (30,N)",
+ RunAdapterTest(buffer_queue));
+}
+
+TEST_F(EsAdapterVideoTest, LeadingNonKeyFrames) {
+ int pts_ms[] = {30, 40, 50, 120, 150, 180};
+ bool is_key_frame[] = {false, false, false, true, false, false};
+ StreamParserBuffer::BufferQueue buffer_queue =
+ GenerateFakeBuffers(pts_ms, is_key_frame, arraysize(pts_ms));
+
+ EXPECT_EQ("(30,Y) (30,Y) (30,Y) (30,Y) (30,N) (30,N)",
+ RunAdapterTest(buffer_queue));
+}
+
+} // namespace mp2t
+} // namespace media
diff --git a/media/formats/mp2t/es_parser_adts.cc b/media/formats/mp2t/es_parser_adts.cc
index 84ddf785ae..433baabe5e 100644
--- a/media/formats/mp2t/es_parser_adts.cc
+++ b/media/formats/mp2t/es_parser_adts.cc
@@ -14,6 +14,7 @@
#include "media/base/buffers.h"
#include "media/base/channel_layout.h"
#include "media/base/stream_parser_buffer.h"
+#include "media/formats/common/offset_byte_queue.h"
#include "media/formats/mp2t/mp2t_common.h"
#include "media/formats/mpeg/adts_constants.h"
@@ -37,40 +38,36 @@ static size_t ExtractAdtsChannelConfig(const uint8* adts_header) {
// Return true if buf corresponds to an ADTS syncword.
// |buf| size must be at least 2.
static bool isAdtsSyncWord(const uint8* buf) {
+ // The first 12 bits must be 1.
+ // The layer field (2 bits) must be set to 0.
return (buf[0] == 0xff) && ((buf[1] & 0xf6) == 0xf0);
}
-// Look for an ADTS syncword.
-// |new_pos| returns
-// - either the byte position of the ADTS frame (if found)
-// - or the byte position of 1st byte that was not processed (if not found).
-// In every case, the returned value in |new_pos| is such that new_pos >= pos
-// |frame_sz| returns the size of the ADTS frame (if found).
-// Return whether a syncword was found.
-static bool LookForSyncWord(const uint8* raw_es, int raw_es_size,
- int pos,
- int* new_pos, int* frame_sz) {
- DCHECK_GE(pos, 0);
- DCHECK_LE(pos, raw_es_size);
-
- int max_offset = raw_es_size - kADTSHeaderMinSize;
- if (pos >= max_offset) {
- // Do not change the position if:
- // - max_offset < 0: not enough bytes to get a full header
- // Since pos >= 0, this is a subcase of the next condition.
- // - pos >= max_offset: might be the case after reading one full frame,
- // |pos| is then incremented by the frame size and might then point
- // to the end of the buffer.
- *new_pos = pos;
- return false;
- }
+namespace mp2t {
+
+struct EsParserAdts::AdtsFrame {
+ // Pointer to the ES data.
+ const uint8* data;
+
+ // Frame size;
+ int size;
+
+ // Frame offset in the ES queue.
+ int64 queue_offset;
+};
+
+bool EsParserAdts::LookForAdtsFrame(AdtsFrame* adts_frame) {
+ int es_size;
+ const uint8* es;
+ es_queue_->Peek(&es, &es_size);
- for (int offset = pos; offset < max_offset; offset++) {
- const uint8* cur_buf = &raw_es[offset];
+ int max_offset = es_size - kADTSHeaderMinSize;
+ if (max_offset <= 0)
+ return false;
+ for (int offset = 0; offset < max_offset; offset++) {
+ const uint8* cur_buf = &es[offset];
if (!isAdtsSyncWord(cur_buf))
- // The first 12 bits must be 1.
- // The layer field (2 bits) must be set to 0.
continue;
int frame_size = ExtractAdtsFrameSize(cur_buf);
@@ -79,24 +76,41 @@ static bool LookForSyncWord(const uint8* raw_es, int raw_es_size,
continue;
}
+ int remaining_size = es_size - offset;
+ if (remaining_size < frame_size) {
+ // Not a full frame: will resume when we have more data.
+ es_queue_->Pop(offset);
+ return false;
+ }
+
// Check whether there is another frame
// |size| apart from the current one.
- int remaining_size = raw_es_size - offset;
if (remaining_size >= frame_size + 2 &&
!isAdtsSyncWord(&cur_buf[frame_size])) {
continue;
}
- *new_pos = offset;
- *frame_sz = frame_size;
+ es_queue_->Pop(offset);
+ es_queue_->Peek(&adts_frame->data, &es_size);
+ adts_frame->queue_offset = es_queue_->head();
+ adts_frame->size = frame_size;
+ DVLOG(LOG_LEVEL_ES)
+ << "ADTS syncword @ pos=" << adts_frame->queue_offset
+ << " frame_size=" << adts_frame->size;
+ DVLOG(LOG_LEVEL_ES)
+ << "ADTS header: "
+ << base::HexEncode(adts_frame->data, kADTSHeaderMinSize);
return true;
}
- *new_pos = max_offset;
+ es_queue_->Pop(max_offset);
return false;
}
-namespace mp2t {
+void EsParserAdts::SkipAdtsFrame(const AdtsFrame& adts_frame) {
+ DCHECK_EQ(adts_frame.queue_offset, es_queue_->head());
+ es_queue_->Pop(adts_frame.size);
+}
EsParserAdts::EsParserAdts(
const NewAudioConfigCB& new_audio_config_cb,
@@ -104,7 +118,8 @@ EsParserAdts::EsParserAdts(
bool sbr_in_mimetype)
: new_audio_config_cb_(new_audio_config_cb),
emit_buffer_cb_(emit_buffer_cb),
- sbr_in_mimetype_(sbr_in_mimetype) {
+ sbr_in_mimetype_(sbr_in_mimetype),
+ es_queue_(new media::OffsetByteQueue()) {
}
EsParserAdts::~EsParserAdts() {
@@ -113,45 +128,25 @@ EsParserAdts::~EsParserAdts() {
bool EsParserAdts::Parse(const uint8* buf, int size,
base::TimeDelta pts,
base::TimeDelta dts) {
- int raw_es_size;
- const uint8* raw_es;
-
// The incoming PTS applies to the access unit that comes just after
// the beginning of |buf|.
- if (pts != kNoTimestamp()) {
- es_byte_queue_.Peek(&raw_es, &raw_es_size);
- pts_list_.push_back(EsPts(raw_es_size, pts));
- }
+ if (pts != kNoTimestamp())
+ pts_list_.push_back(EsPts(es_queue_->tail(), pts));
// Copy the input data to the ES buffer.
- es_byte_queue_.Push(buf, size);
- es_byte_queue_.Peek(&raw_es, &raw_es_size);
-
- // Look for every ADTS frame in the ES buffer starting at offset = 0
- int es_position = 0;
- int frame_size;
- while (LookForSyncWord(raw_es, raw_es_size, es_position,
- &es_position, &frame_size)) {
- DVLOG(LOG_LEVEL_ES)
- << "ADTS syncword @ pos=" << es_position
- << " frame_size=" << frame_size;
- DVLOG(LOG_LEVEL_ES)
- << "ADTS header: "
- << base::HexEncode(&raw_es[es_position], kADTSHeaderMinSize);
-
- // Do not process the frame if this one is a partial frame.
- int remaining_size = raw_es_size - es_position;
- if (frame_size > remaining_size)
- break;
+ es_queue_->Push(buf, size);
+ // Look for every ADTS frame in the ES buffer.
+ AdtsFrame adts_frame;
+ while (LookForAdtsFrame(&adts_frame)) {
// Update the audio configuration if needed.
- DCHECK_GE(frame_size, kADTSHeaderMinSize);
- if (!UpdateAudioConfiguration(&raw_es[es_position]))
+ DCHECK_GE(adts_frame.size, kADTSHeaderMinSize);
+ if (!UpdateAudioConfiguration(adts_frame.data))
return false;
// Get the PTS & the duration of this access unit.
while (!pts_list_.empty() &&
- pts_list_.front().first <= es_position) {
+ pts_list_.front().first <= adts_frame.queue_offset) {
audio_timestamp_helper_->SetBaseTimestamp(pts_list_.front().second);
pts_list_.pop_front();
}
@@ -167,8 +162,8 @@ bool EsParserAdts::Parse(const uint8* buf, int size,
// type and allow multiple audio tracks. See https://crbug.com/341581.
scoped_refptr<StreamParserBuffer> stream_parser_buffer =
StreamParserBuffer::CopyFrom(
- &raw_es[es_position],
- frame_size,
+ adts_frame.data,
+ adts_frame.size,
is_key_frame,
DemuxerStream::AUDIO, 0);
stream_parser_buffer->SetDecodeTimestamp(current_pts);
@@ -180,12 +175,9 @@ bool EsParserAdts::Parse(const uint8* buf, int size,
audio_timestamp_helper_->AddFrames(kSamplesPerAACFrame);
// Skip the current frame.
- es_position += frame_size;
+ SkipAdtsFrame(adts_frame);
}
- // Discard all the bytes that have been processed.
- DiscardEs(es_position);
-
return true;
}
@@ -193,7 +185,7 @@ void EsParserAdts::Flush() {
}
void EsParserAdts::Reset() {
- es_byte_queue_.Reset();
+ es_queue_.reset(new media::OffsetByteQueue());
pts_list_.clear();
last_audio_decoder_config_ = AudioDecoderConfig();
}
@@ -272,19 +264,6 @@ bool EsParserAdts::UpdateAudioConfiguration(const uint8* adts_header) {
return true;
}
-void EsParserAdts::DiscardEs(int nbytes) {
- DCHECK_GE(nbytes, 0);
- if (nbytes <= 0)
- return;
-
- // Adjust the ES position of each PTS.
- for (EsPtsList::iterator it = pts_list_.begin(); it != pts_list_.end(); ++it)
- it->first -= nbytes;
-
- // Discard |nbytes| of ES.
- es_byte_queue_.Pop(nbytes);
-}
-
} // namespace mp2t
} // namespace media
diff --git a/media/formats/mp2t/es_parser_adts.h b/media/formats/mp2t/es_parser_adts.h
index e55eaf70e1..03c90110e7 100644
--- a/media/formats/mp2t/es_parser_adts.h
+++ b/media/formats/mp2t/es_parser_adts.h
@@ -13,12 +13,12 @@
#include "base/memory/scoped_ptr.h"
#include "base/time/time.h"
#include "media/base/audio_decoder_config.h"
-#include "media/base/byte_queue.h"
#include "media/formats/mp2t/es_parser.h"
namespace media {
class AudioTimestampHelper;
class BitReader;
+class OffsetByteQueue;
class StreamParserBuffer;
}
@@ -43,17 +43,26 @@ class EsParserAdts : public EsParser {
private:
// Used to link a PTS with a byte position in the ES stream.
- typedef std::pair<int, base::TimeDelta> EsPts;
+ typedef std::pair<int64, base::TimeDelta> EsPts;
typedef std::list<EsPts> EsPtsList;
+ struct AdtsFrame;
+
+ // Synchronize the stream on an ADTS syncword (consuming bytes from
+ // |es_queue_| if needed).
+ // Returns true when a full ADTS frame has been found: in that case
+ // |adts_frame| structure is filled up accordingly.
+ // Returns false otherwise (no ADTS syncword found or partial ADTS frame).
+ bool LookForAdtsFrame(AdtsFrame* adts_frame);
+
+ // Skip an ADTS frame in the ES queue.
+ void SkipAdtsFrame(const AdtsFrame& adts_frame);
+
// Signal any audio configuration change (if any).
// Return false if the current audio config is not
// a supported ADTS audio config.
bool UpdateAudioConfiguration(const uint8* adts_header);
- // Discard some bytes from the ES stream.
- void DiscardEs(int nbytes);
-
// Callbacks:
// - to signal a new audio configuration,
// - to send ES buffers.
@@ -65,7 +74,7 @@ class EsParserAdts : public EsParser {
bool sbr_in_mimetype_;
// Bytes of the ES stream that have not been emitted yet.
- ByteQueue es_byte_queue_;
+ scoped_ptr<media::OffsetByteQueue> es_queue_;
// List of PTS associated with a position in the ES stream.
EsPtsList pts_list_;
diff --git a/media/formats/mp2t/es_parser_h264.cc b/media/formats/mp2t/es_parser_h264.cc
index 691678ce81..f2c166cf8c 100644
--- a/media/formats/mp2t/es_parser_h264.cc
+++ b/media/formats/mp2t/es_parser_h264.cc
@@ -12,6 +12,7 @@
#include "media/base/video_frame.h"
#include "media/filters/h264_parser.h"
#include "media/formats/common/offset_byte_queue.h"
+#include "media/formats/mp2t/es_adapter_video.h"
#include "media/formats/mp2t/mp2t_common.h"
#include "ui/gfx/rect.h"
#include "ui/gfx/size.h"
@@ -26,12 +27,11 @@ const int kMinAUDSize = 4;
EsParserH264::EsParserH264(
const NewVideoConfigCB& new_video_config_cb,
const EmitBufferCB& emit_buffer_cb)
- : new_video_config_cb_(new_video_config_cb),
- emit_buffer_cb_(emit_buffer_cb),
- es_queue_(new media::OffsetByteQueue()),
- h264_parser_(new H264Parser()),
- current_access_unit_pos_(0),
- next_access_unit_pos_(0) {
+ : es_adapter_(new_video_config_cb, emit_buffer_cb),
+ es_queue_(new media::OffsetByteQueue()),
+ h264_parser_(new H264Parser()),
+ current_access_unit_pos_(0),
+ next_access_unit_pos_(0) {
}
EsParserH264::~EsParserH264() {
@@ -75,6 +75,8 @@ void EsParserH264::Flush() {
uint8 aud[] = { 0x00, 0x00, 0x01, 0x09 };
es_queue_->Push(aud, sizeof(aud));
ParseInternal();
+
+ es_adapter_.Flush();
}
void EsParserH264::Reset() {
@@ -85,6 +87,7 @@ void EsParserH264::Reset() {
next_access_unit_pos_ = 0;
timing_desc_list_.clear();
last_video_decoder_config_ = VideoDecoderConfig();
+ es_adapter_.Reset();
}
bool EsParserH264::FindAUD(int64* stream_pos) {
@@ -273,7 +276,7 @@ bool EsParserH264::EmitFrame(int64 access_unit_pos, int access_unit_size,
0);
stream_parser_buffer->SetDecodeTimestamp(current_timing_desc.dts);
stream_parser_buffer->set_timestamp(current_timing_desc.pts);
- emit_buffer_cb_.Run(stream_parser_buffer);
+ es_adapter_.OnNewBuffer(stream_parser_buffer);
return true;
}
@@ -321,7 +324,7 @@ bool EsParserH264::UpdateVideoDecoderConfig(const H264SPS* sps) {
DVLOG(1) << "SAR: width=" << sps->sar_width
<< " height=" << sps->sar_height;
last_video_decoder_config_ = video_decoder_config;
- new_video_config_cb_.Run(video_decoder_config);
+ es_adapter_.OnConfigChanged(video_decoder_config);
}
return true;
diff --git a/media/formats/mp2t/es_parser_h264.h b/media/formats/mp2t/es_parser_h264.h
index bf4f4cc1d9..674b2c650a 100644
--- a/media/formats/mp2t/es_parser_h264.h
+++ b/media/formats/mp2t/es_parser_h264.h
@@ -15,6 +15,7 @@
#include "base/time/time.h"
#include "media/base/media_export.h"
#include "media/base/video_decoder_config.h"
+#include "media/formats/mp2t/es_adapter_video.h"
#include "media/formats/mp2t/es_parser.h"
namespace media {
@@ -72,9 +73,7 @@ class MEDIA_EXPORT EsParserH264 : NON_EXPORTED_BASE(public EsParser) {
// Return true if successful.
bool UpdateVideoDecoderConfig(const H264SPS* sps);
- // Callbacks to pass the stream configuration and the frames.
- NewVideoConfigCB new_video_config_cb_;
- EmitBufferCB emit_buffer_cb_;
+ EsAdapterVideo es_adapter_;
// Bytes of the ES stream that have not been emitted yet.
scoped_ptr<media::OffsetByteQueue> es_queue_;
@@ -89,6 +88,8 @@ class MEDIA_EXPORT EsParserH264 : NON_EXPORTED_BASE(public EsParser) {
// Last video decoder config.
VideoDecoderConfig last_video_decoder_config_;
+
+ DISALLOW_COPY_AND_ASSIGN(EsParserH264);
};
} // namespace mp2t
diff --git a/media/formats/mp2t/mp2t_stream_parser.cc b/media/formats/mp2t/mp2t_stream_parser.cc
index 48497559d6..35c61d6bde 100644
--- a/media/formats/mp2t/mp2t_stream_parser.cc
+++ b/media/formats/mp2t/mp2t_stream_parser.cc
@@ -157,8 +157,7 @@ Mp2tStreamParser::Mp2tStreamParser(bool sbr_in_mimetype)
selected_audio_pid_(-1),
selected_video_pid_(-1),
is_initialized_(false),
- segment_started_(false),
- first_video_frame_in_segment_(true) {
+ segment_started_(false) {
}
Mp2tStreamParser::~Mp2tStreamParser() {
@@ -210,8 +209,6 @@ void Mp2tStreamParser::Flush() {
// Note: does not need to invoke |end_of_segment_cb_| since flushing the
// stream parser already involves the end of the current segment.
segment_started_ = false;
- first_video_frame_in_segment_ = true;
- discarded_frames_dts_.clear();
// Remove any bytes left in the TS buffer.
// (i.e. any partial TS packet => less than 188 bytes).
@@ -417,13 +414,20 @@ void Mp2tStreamParser::OnVideoConfigChanged(
DCHECK_EQ(pes_pid, selected_video_pid_);
DCHECK(video_decoder_config.IsValidConfig());
- // Create a new entry in |buffer_queue_chain_| with the updated configs.
- BufferQueueWithConfig buffer_queue_with_config(
- false,
- buffer_queue_chain_.empty()
- ? AudioDecoderConfig() : buffer_queue_chain_.back().audio_config,
- video_decoder_config);
- buffer_queue_chain_.push_back(buffer_queue_with_config);
+ if (!buffer_queue_chain_.empty() &&
+ !buffer_queue_chain_.back().video_config.IsValidConfig()) {
+ // No video has been received so far, can reuse the existing video queue.
+ DCHECK(buffer_queue_chain_.back().video_queue.empty());
+ buffer_queue_chain_.back().video_config = video_decoder_config;
+ } else {
+ // Create a new entry in |buffer_queue_chain_| with the updated configs.
+ BufferQueueWithConfig buffer_queue_with_config(
+ false,
+ buffer_queue_chain_.empty()
+ ? AudioDecoderConfig() : buffer_queue_chain_.back().audio_config,
+ video_decoder_config);
+ buffer_queue_chain_.push_back(buffer_queue_with_config);
+ }
// Replace any non valid config with the 1st valid entry.
// This might happen if there was no available config before.
@@ -442,13 +446,20 @@ void Mp2tStreamParser::OnAudioConfigChanged(
DCHECK_EQ(pes_pid, selected_audio_pid_);
DCHECK(audio_decoder_config.IsValidConfig());
- // Create a new entry in |buffer_queue_chain_| with the updated configs.
- BufferQueueWithConfig buffer_queue_with_config(
- false,
- audio_decoder_config,
- buffer_queue_chain_.empty()
- ? VideoDecoderConfig() : buffer_queue_chain_.back().video_config);
- buffer_queue_chain_.push_back(buffer_queue_with_config);
+ if (!buffer_queue_chain_.empty() &&
+ !buffer_queue_chain_.back().audio_config.IsValidConfig()) {
+ // No audio has been received so far, can reuse the existing audio queue.
+ DCHECK(buffer_queue_chain_.back().audio_queue.empty());
+ buffer_queue_chain_.back().audio_config = audio_decoder_config;
+ } else {
+ // Create a new entry in |buffer_queue_chain_| with the updated configs.
+ BufferQueueWithConfig buffer_queue_with_config(
+ false,
+ audio_decoder_config,
+ buffer_queue_chain_.empty()
+ ? VideoDecoderConfig() : buffer_queue_chain_.back().video_config);
+ buffer_queue_chain_.push_back(buffer_queue_with_config);
+ }
// Replace any non valid config with the 1st valid entry.
// This might happen if there was no available config before.
@@ -505,7 +516,9 @@ void Mp2tStreamParser::OnEmitAudioBuffer(
<< " dts="
<< stream_parser_buffer->GetDecodeTimestamp().InMilliseconds()
<< " pts="
- << stream_parser_buffer->timestamp().InMilliseconds();
+ << stream_parser_buffer->timestamp().InMilliseconds()
+ << " dur="
+ << stream_parser_buffer->duration().InMilliseconds();
stream_parser_buffer->set_timestamp(
stream_parser_buffer->timestamp() - time_offset_);
stream_parser_buffer->SetDecodeTimestamp(
@@ -513,7 +526,7 @@ void Mp2tStreamParser::OnEmitAudioBuffer(
// Ignore the incoming buffer if it is not associated with any config.
if (buffer_queue_chain_.empty()) {
- DVLOG(1) << "Ignoring audio buffer with no corresponding audio config";
+ NOTREACHED() << "Cannot provide buffers before configs";
return;
}
@@ -533,6 +546,8 @@ void Mp2tStreamParser::OnEmitVideoBuffer(
<< stream_parser_buffer->GetDecodeTimestamp().InMilliseconds()
<< " pts="
<< stream_parser_buffer->timestamp().InMilliseconds()
+ << " dur="
+ << stream_parser_buffer->duration().InMilliseconds()
<< " IsKeyframe="
<< stream_parser_buffer->IsKeyframe();
stream_parser_buffer->set_timestamp(
@@ -540,29 +555,12 @@ void Mp2tStreamParser::OnEmitVideoBuffer(
stream_parser_buffer->SetDecodeTimestamp(
stream_parser_buffer->GetDecodeTimestamp() - time_offset_);
- // Discard the incoming buffer:
- // - if it is not associated with any config,
- // - or if only non-key frames have been added to a new segment.
- if (buffer_queue_chain_.empty() ||
- (first_video_frame_in_segment_ && !stream_parser_buffer->IsKeyframe())) {
- DVLOG(1) << "Discard video buffer:"
- << " keyframe=" << stream_parser_buffer->IsKeyframe()
- << " dts="
- << stream_parser_buffer->GetDecodeTimestamp().InMilliseconds();
- if (discarded_frames_dts_.empty() ||
- discarded_frames_min_pts_ > stream_parser_buffer->timestamp()) {
- discarded_frames_min_pts_ = stream_parser_buffer->timestamp();
- }
- discarded_frames_dts_.push_back(
- stream_parser_buffer->GetDecodeTimestamp());
+ // Ignore the incoming buffer if it is not associated with any config.
+ if (buffer_queue_chain_.empty()) {
+ NOTREACHED() << "Cannot provide buffers before configs";
return;
}
- // Fill the gap created by frames that have been discarded.
- if (!discarded_frames_dts_.empty())
- FillVideoGap(stream_parser_buffer);
-
- first_video_frame_in_segment_ = false;
buffer_queue_chain_.back().video_queue.push_back(stream_parser_buffer);
}
@@ -630,33 +628,5 @@ bool Mp2tStreamParser::EmitRemainingBuffers() {
return true;
}
-void Mp2tStreamParser::FillVideoGap(
- const scoped_refptr<StreamParserBuffer>& stream_parser_buffer) {
- DCHECK(!buffer_queue_chain_.empty());
- DCHECK(!discarded_frames_dts_.empty());
- DCHECK(stream_parser_buffer->IsKeyframe());
-
- // PTS is interpolated between the min PTS of discarded frames
- // and the PTS of the first valid buffer.
- base::TimeDelta pts = discarded_frames_min_pts_;
- base::TimeDelta pts_delta =
- (stream_parser_buffer->timestamp() - pts) / discarded_frames_dts_.size();
-
- while (!discarded_frames_dts_.empty()) {
- scoped_refptr<StreamParserBuffer> frame =
- StreamParserBuffer::CopyFrom(
- stream_parser_buffer->data(),
- stream_parser_buffer->data_size(),
- stream_parser_buffer->IsKeyframe(),
- stream_parser_buffer->type(),
- stream_parser_buffer->track_id());
- frame->SetDecodeTimestamp(discarded_frames_dts_.front());
- frame->set_timestamp(pts);
- buffer_queue_chain_.back().video_queue.push_back(frame);
- pts += pts_delta;
- discarded_frames_dts_.pop_front();
- }
-}
-
} // namespace mp2t
} // namespace media
diff --git a/media/formats/mp2t/mp2t_stream_parser.h b/media/formats/mp2t/mp2t_stream_parser.h
index 61f344067e..e419f029ae 100644
--- a/media/formats/mp2t/mp2t_stream_parser.h
+++ b/media/formats/mp2t/mp2t_stream_parser.h
@@ -92,12 +92,6 @@ class MEDIA_EXPORT Mp2tStreamParser : public StreamParser {
scoped_refptr<StreamParserBuffer> stream_parser_buffer);
bool EmitRemainingBuffers();
- // At the beginning of a new segment, some video frames might be discarded.
- // This function fills the hole by duplicating the first valid key frame
- // given by |stream_parser_buffer|.
- void FillVideoGap(
- const scoped_refptr<StreamParserBuffer>& stream_parser_buffer);
-
// List of callbacks.
InitCB init_cb_;
NewConfigCB config_cb_;
@@ -121,11 +115,6 @@ class MEDIA_EXPORT Mp2tStreamParser : public StreamParser {
int selected_audio_pid_;
int selected_video_pid_;
- // DTS of discarded buffers.
- // Min PTS of discarded buffers.
- std::list<base::TimeDelta> discarded_frames_dts_;
- base::TimeDelta discarded_frames_min_pts_;
-
// Pending audio & video buffers.
std::list<BufferQueueWithConfig> buffer_queue_chain_;
@@ -134,7 +123,6 @@ class MEDIA_EXPORT Mp2tStreamParser : public StreamParser {
// Indicate whether a segment was started.
bool segment_started_;
- bool first_video_frame_in_segment_;
base::TimeDelta time_offset_;
DISALLOW_COPY_AND_ASSIGN(Mp2tStreamParser);
diff --git a/media/formats/mp2t/mp2t_stream_parser_unittest.cc b/media/formats/mp2t/mp2t_stream_parser_unittest.cc
index 1f32986285..f74baa6dcd 100644
--- a/media/formats/mp2t/mp2t_stream_parser_unittest.cc
+++ b/media/formats/mp2t/mp2t_stream_parser_unittest.cc
@@ -22,11 +22,38 @@
namespace media {
namespace mp2t {
+namespace {
+
+bool IsMonotonic(const StreamParser::BufferQueue& buffers) {
+ if (buffers.empty())
+ return true;
+
+ StreamParser::BufferQueue::const_iterator it1 = buffers.begin();
+ StreamParser::BufferQueue::const_iterator it2 = ++it1;
+ for ( ; it2 != buffers.end(); ++it1, ++it2) {
+ if ((*it2)->GetDecodeTimestamp() < (*it1)->GetDecodeTimestamp())
+ return false;
+ }
+ return true;
+}
+
+bool IsAlmostEqual(base::TimeDelta t0, base::TimeDelta t1) {
+ base::TimeDelta kMaxDeviation = base::TimeDelta::FromMilliseconds(5);
+ base::TimeDelta diff = t1 - t0;
+ return (diff >= -kMaxDeviation && diff <= kMaxDeviation);
+}
+
+} // namespace
+
class Mp2tStreamParserTest : public testing::Test {
public:
Mp2tStreamParserTest()
- : audio_frame_count_(0),
+ : segment_count_(0),
+ config_count_(0),
+ audio_frame_count_(0),
video_frame_count_(0),
+ audio_min_dts_(kNoTimestamp()),
+ audio_max_dts_(kNoTimestamp()),
video_min_dts_(kNoTimestamp()),
video_max_dts_(kNoTimestamp()) {
bool has_sbr = false;
@@ -35,11 +62,26 @@ class Mp2tStreamParserTest : public testing::Test {
protected:
scoped_ptr<Mp2tStreamParser> parser_;
+ int segment_count_;
+ int config_count_;
int audio_frame_count_;
int video_frame_count_;
+ base::TimeDelta audio_min_dts_;
+ base::TimeDelta audio_max_dts_;
base::TimeDelta video_min_dts_;
base::TimeDelta video_max_dts_;
+ void ResetStats() {
+ segment_count_ = 0;
+ config_count_ = 0;
+ audio_frame_count_ = 0;
+ video_frame_count_ = 0;
+ audio_min_dts_ = kNoTimestamp();
+ audio_max_dts_ = kNoTimestamp();
+ video_min_dts_ = kNoTimestamp();
+ video_max_dts_ = kNoTimestamp();
+ }
+
bool AppendData(const uint8* data, size_t length) {
return parser_->Parse(data, length);
}
@@ -70,6 +112,7 @@ class Mp2tStreamParserTest : public testing::Test {
DVLOG(1) << "OnNewConfig: audio=" << ac.IsValidConfig()
<< ", video=" << vc.IsValidConfig();
// Test streams have both audio and video, verify the configs are valid.
+ config_count_++;
EXPECT_TRUE(ac.IsValidConfig());
EXPECT_TRUE(vc.IsValidConfig());
return true;
@@ -90,43 +133,57 @@ class Mp2tStreamParserTest : public testing::Test {
bool OnNewBuffers(const StreamParser::BufferQueue& audio_buffers,
const StreamParser::BufferQueue& video_buffers,
const StreamParser::TextBufferQueueMap& text_map) {
+ EXPECT_GT(config_count_, 0);
DumpBuffers("audio_buffers", audio_buffers);
DumpBuffers("video_buffers", video_buffers);
- audio_frame_count_ += audio_buffers.size();
- video_frame_count_ += video_buffers.size();
// TODO(wolenetz/acolwell): Add text track support to more MSE parsers. See
// http://crbug.com/336926.
if (!text_map.empty())
return false;
- if (video_min_dts_ == kNoTimestamp() && !video_buffers.empty())
- video_min_dts_ = video_buffers.front()->GetDecodeTimestamp();
+ // Verify monotonicity.
+ if (!IsMonotonic(video_buffers))
+ return false;
+ if (!IsMonotonic(audio_buffers))
+ return false;
+
if (!video_buffers.empty()) {
- video_max_dts_ = video_buffers.back()->GetDecodeTimestamp();
- // Verify monotonicity.
- StreamParser::BufferQueue::const_iterator it1 = video_buffers.begin();
- StreamParser::BufferQueue::const_iterator it2 = ++it1;
- for ( ; it2 != video_buffers.end(); ++it1, ++it2) {
- if ((*it2)->GetDecodeTimestamp() < (*it1)->GetDecodeTimestamp())
- return false;
- }
+ base::TimeDelta first_dts = video_buffers.front()->GetDecodeTimestamp();
+ base::TimeDelta last_dts = video_buffers.back()->GetDecodeTimestamp();
+ if (video_max_dts_ != kNoTimestamp() && first_dts < video_max_dts_)
+ return false;
+ if (video_min_dts_ == kNoTimestamp())
+ video_min_dts_ = first_dts;
+ video_max_dts_ = last_dts;
+ }
+ if (!audio_buffers.empty()) {
+ base::TimeDelta first_dts = audio_buffers.front()->GetDecodeTimestamp();
+ base::TimeDelta last_dts = audio_buffers.back()->GetDecodeTimestamp();
+ if (audio_max_dts_ != kNoTimestamp() && first_dts < audio_max_dts_)
+ return false;
+ if (audio_min_dts_ == kNoTimestamp())
+ audio_min_dts_ = first_dts;
+ audio_max_dts_ = last_dts;
}
+ audio_frame_count_ += audio_buffers.size();
+ video_frame_count_ += video_buffers.size();
return true;
}
void OnKeyNeeded(const std::string& type,
const std::vector<uint8>& init_data) {
- DVLOG(1) << "OnKeyNeeded: " << init_data.size();
+ NOTREACHED() << "OnKeyNeeded not expected in the Mpeg2 TS parser";
}
void OnNewSegment() {
DVLOG(1) << "OnNewSegment";
+ segment_count_++;
}
void OnEndOfSegment() {
- DVLOG(1) << "OnEndOfSegment()";
+ NOTREACHED() << "OnEndOfSegment not expected in the Mpeg2 TS parser";
}
void InitializeParser() {
@@ -160,27 +217,38 @@ TEST_F(Mp2tStreamParserTest, UnalignedAppend17) {
// Test small, non-segment-aligned appends.
InitializeParser();
ParseMpeg2TsFile("bear-1280x720.ts", 17);
- EXPECT_EQ(video_frame_count_, 81);
parser_->Flush();
EXPECT_EQ(video_frame_count_, 82);
+ // This stream has no mid-stream configuration change.
+ EXPECT_EQ(config_count_, 1);
+ EXPECT_EQ(segment_count_, 1);
}
TEST_F(Mp2tStreamParserTest, UnalignedAppend512) {
// Test small, non-segment-aligned appends.
InitializeParser();
ParseMpeg2TsFile("bear-1280x720.ts", 512);
- EXPECT_EQ(video_frame_count_, 81);
parser_->Flush();
EXPECT_EQ(video_frame_count_, 82);
+ // This stream has no mid-stream configuration change.
+ EXPECT_EQ(config_count_, 1);
+ EXPECT_EQ(segment_count_, 1);
}
TEST_F(Mp2tStreamParserTest, AppendAfterFlush512) {
InitializeParser();
ParseMpeg2TsFile("bear-1280x720.ts", 512);
parser_->Flush();
+ EXPECT_EQ(video_frame_count_, 82);
+ EXPECT_EQ(config_count_, 1);
+ EXPECT_EQ(segment_count_, 1);
+ ResetStats();
ParseMpeg2TsFile("bear-1280x720.ts", 512);
parser_->Flush();
+ EXPECT_EQ(video_frame_count_, 82);
+ EXPECT_EQ(config_count_, 1);
+ EXPECT_EQ(segment_count_, 1);
}
TEST_F(Mp2tStreamParserTest, TimestampWrapAround) {
@@ -190,9 +258,29 @@ TEST_F(Mp2tStreamParserTest, TimestampWrapAround) {
// in the Mpeg2 TS stream.
InitializeParser();
ParseMpeg2TsFile("bear-1280x720_ptswraparound.ts", 512);
- EXPECT_EQ(video_frame_count_, 81);
- EXPECT_GE(video_min_dts_, base::TimeDelta::FromSeconds(95443 - 10));
- EXPECT_LE(video_max_dts_, base::TimeDelta::FromSeconds(95443 + 10));
+ parser_->Flush();
+ EXPECT_EQ(video_frame_count_, 82);
+
+ EXPECT_TRUE(IsAlmostEqual(video_min_dts_,
+ base::TimeDelta::FromSecondsD(95443.376)));
+ EXPECT_TRUE(IsAlmostEqual(video_max_dts_,
+ base::TimeDelta::FromSecondsD(95446.079)));
+
+ // Note: for audio, AdtsStreamParser considers only the PTS (which is then
+ // used as the DTS).
+ // TODO(damienv): most of the time, audio streams just have PTS. Here, only
+ // the first PES packet has a DTS, all the other PES packets have PTS only.
+ // Reconsider the expected value for |audio_min_dts_| if DTS are used as part
+ // of the ADTS stream parser.
+ //
+ // Note: the last pts for audio is 95445.931 but this PES packet includes
+ // 9 ADTS frames with 1 AAC frame in each ADTS frame.
+ // So the PTS of the last AAC frame is:
+ // 95445.931 + 8 * (1024 / 44100) = 95446.117
+ EXPECT_TRUE(IsAlmostEqual(audio_min_dts_,
+ base::TimeDelta::FromSecondsD(95443.400)));
+ EXPECT_TRUE(IsAlmostEqual(audio_max_dts_,
+ base::TimeDelta::FromSecondsD(95446.117)));
}
} // namespace mp2t
diff --git a/media/formats/mp2t/ts_packet.cc b/media/formats/mp2t/ts_packet.cc
index 8463c11e33..e134aed9e6 100644
--- a/media/formats/mp2t/ts_packet.cc
+++ b/media/formats/mp2t/ts_packet.cc
@@ -203,7 +203,12 @@ bool TsPacket::ParseAdaptationField(BitReader* bit_reader,
for (int k = 0; k < adaptation_field_remaining_size; k++) {
int stuffing_byte;
RCHECK(bit_reader->ReadBits(8, &stuffing_byte));
- RCHECK(stuffing_byte == 0xff);
+ // Unfortunately, a lot of streams exist in the field that do not fill
+ // the remaining of the adaptation field with the expected stuffing value:
+ // do not fail if that's the case.
+ DVLOG_IF(1, stuffing_byte != 0xff)
+ << "Stream not compliant: invalid stuffing byte "
+ << std::hex << stuffing_byte;
}
DVLOG(LOG_LEVEL_TS) << "random_access_indicator=" << random_access_indicator_;
diff --git a/media/formats/mp4/mp4_stream_parser.cc b/media/formats/mp4/mp4_stream_parser.cc
index f9ad71e415..0eb15d9f2c 100644
--- a/media/formats/mp4/mp4_stream_parser.cc
+++ b/media/formats/mp4/mp4_stream_parser.cc
@@ -567,14 +567,13 @@ bool MP4StreamParser::SendAndFlushSamples(BufferQueue* audio_buffers,
return success;
}
-bool MP4StreamParser::ReadAndDiscardMDATsUntil(const int64 offset) {
+bool MP4StreamParser::ReadAndDiscardMDATsUntil(int64 max_clear_offset) {
bool err = false;
- while (mdat_tail_ < offset) {
+ int64 upper_bound = std::min(max_clear_offset, queue_.tail());
+ while (mdat_tail_ < upper_bound) {
const uint8* buf = NULL;
int size = 0;
queue_.PeekAt(mdat_tail_, &buf, &size);
- if (size <= 0)
- return false;
FourCC type;
int box_sz;
@@ -588,7 +587,7 @@ bool MP4StreamParser::ReadAndDiscardMDATsUntil(const int64 offset) {
}
mdat_tail_ += box_sz;
}
- queue_.Trim(std::min(mdat_tail_, offset));
+ queue_.Trim(std::min(mdat_tail_, upper_bound));
return !err;
}
diff --git a/media/formats/mp4/mp4_stream_parser.h b/media/formats/mp4/mp4_stream_parser.h
index 4923ab9582..bf7bfa0128 100644
--- a/media/formats/mp4/mp4_stream_parser.h
+++ b/media/formats/mp4/mp4_stream_parser.h
@@ -57,10 +57,12 @@ class MEDIA_EXPORT MP4StreamParser : public StreamParser {
// To retain proper framing, each 'mdat' atom must be read; to limit memory
// usage, the atom's data needs to be discarded incrementally as frames are
// extracted from the stream. This function discards data from the stream up
- // to |offset|, updating the |mdat_tail_| value so that framing can be
- // retained after all 'mdat' information has been read.
+ // to |max_clear_offset|, updating the |mdat_tail_| value so that framing can
+ // be retained after all 'mdat' information has been read. |max_clear_offset|
+ // is the upper bound on what can be removed from |queue_|. Anything below
+ // this offset is no longer needed by the parser.
// Returns 'true' on success, 'false' if there was an error.
- bool ReadAndDiscardMDATsUntil(const int64 offset);
+ bool ReadAndDiscardMDATsUntil(int64 max_clear_offset);
void ChangeState(State new_state);
diff --git a/media/formats/mp4/track_run_iterator.cc b/media/formats/mp4/track_run_iterator.cc
index fefc768452..f3dc830101 100644
--- a/media/formats/mp4/track_run_iterator.cc
+++ b/media/formats/mp4/track_run_iterator.cc
@@ -90,8 +90,7 @@ static void PopulateSampleInfo(const TrackExtends& trex,
const int64 edit_list_offset,
const uint32 i,
SampleInfo* sample_info,
- const SampleDependsOn sdtp_sample_depends_on,
- bool is_sync_sample) {
+ const SampleDependsOn sdtp_sample_depends_on) {
if (i < trun.sample_sizes.size()) {
sample_info->size = trun.sample_sizes[i];
} else if (tfhd.default_sample_size > 0) {
@@ -237,8 +236,6 @@ bool TrackRunIterator::Init(const MovieFragment& moof) {
int64 run_start_dts = traf.decode_time.decode_time;
int sample_count_sum = 0;
- const SyncSample& sync_sample =
- trak->media.information.sample_table.sync_sample;
for (size_t j = 0; j < traf.runs.size(); j++) {
const TrackFragmentRun& trun = traf.runs[j];
TrackRunInfo tri;
@@ -300,8 +297,7 @@ bool TrackRunIterator::Init(const MovieFragment& moof) {
tri.samples.resize(trun.sample_count);
for (size_t k = 0; k < trun.sample_count; k++) {
PopulateSampleInfo(*trex, traf.header, trun, edit_list_offset,
- k, &tri.samples[k], traf.sdtp.sample_depends_on(k),
- sync_sample.IsSyncSample(k));
+ k, &tri.samples[k], traf.sdtp.sample_depends_on(k));
run_start_dts += tri.samples[k].duration;
if (!is_sample_to_group_valid) {
diff --git a/media/formats/webm/webm_audio_client.cc b/media/formats/webm/webm_audio_client.cc
index 6fe9a8434f..e0d382c256 100644
--- a/media/formats/webm/webm_audio_client.cc
+++ b/media/formats/webm/webm_audio_client.cc
@@ -58,6 +58,11 @@ bool WebMAudioClient::InitializeConfig(
if (output_samples_per_second_ > 0)
samples_per_second = output_samples_per_second_;
+ // Always use 48kHz for OPUS. See the "Input Sample Rate" section of the
+ // spec: http://tools.ietf.org/html/draft-terriberry-oggopus-01#page-11
+ if (audio_codec == kCodecOpus)
+ samples_per_second = 48000;
+
const uint8* extra_data = NULL;
size_t extra_data_size = 0;
if (codec_private.size() > 0) {
diff --git a/media/formats/webm/webm_video_client.cc b/media/formats/webm/webm_video_client.cc
index 5ea398ef06..bda78efafa 100644
--- a/media/formats/webm/webm_video_client.cc
+++ b/media/formats/webm/webm_video_client.cc
@@ -74,21 +74,19 @@ bool WebMVideoClient::InitializeConfig(
gfx::Rect visible_rect(crop_top_, crop_left_,
pixel_width_ - (crop_left_ + crop_right_),
pixel_height_ - (crop_top_ + crop_bottom_));
- gfx::Size natural_size = coded_size;
if (display_unit_ == 0) {
if (display_width_ <= 0)
- display_width_ = pixel_width_;
+ display_width_ = visible_rect.width();
if (display_height_ <= 0)
- display_height_ = pixel_height_;
- natural_size = gfx::Size(display_width_, display_height_);
+ display_height_ = visible_rect.height();
} else if (display_unit_ == 3) {
if (display_width_ <= 0 || display_height_ <= 0)
return false;
- natural_size = gfx::Size(display_width_, display_height_);
} else {
MEDIA_LOG(log_cb_) << "Unsupported display unit type " << display_unit_;
return false;
}
+ gfx::Size natural_size = gfx::Size(display_width_, display_height_);
const uint8* extra_data = NULL;
size_t extra_data_size = 0;
if (codec_private.size() > 0) {
diff --git a/media/media.gyp b/media/media.gyp
index 2c0fbc2565..f1e81dd180 100644
--- a/media/media.gyp
+++ b/media/media.gyp
@@ -22,7 +22,7 @@
'media_use_libvpx%': 1,
}],
# Enable ALSA and Pulse for runtime selection.
- ['(OS=="linux" or OS=="freebsd" or OS=="solaris") and embedded!=1', {
+ ['(OS=="linux" or OS=="freebsd" or OS=="solaris") and (embedded!=1 or (chromecast==1 and target_arch!="arm"))', {
# ALSA is always needed for Web MIDI even if the cras is enabled.
'use_alsa%': 1,
'conditions': [
@@ -206,13 +206,15 @@
'base/android/media_player_manager.h',
'base/android/media_resource_getter.cc',
'base/android/media_resource_getter.h',
+ 'base/audio_block_fifo.cc',
+ 'base/audio_block_fifo.h',
'base/audio_buffer.cc',
'base/audio_buffer.h',
'base/audio_buffer_queue.cc',
'base/audio_buffer_queue.h',
- 'base/audio_capturer_source.h',
'base/audio_buffer_converter.cc',
'base/audio_buffer_converter.h',
+ 'base/audio_capturer_source.h',
'base/audio_converter.cc',
'base/audio_converter.h',
'base/audio_decoder.cc',
@@ -256,8 +258,6 @@
'base/cdm_promise.h',
'base/channel_mixer.cc',
'base/channel_mixer.h',
- 'base/clock.cc',
- 'base/clock.h',
'base/container_names.cc',
'base/container_names.h',
'base/data_buffer.cc',
@@ -334,6 +334,8 @@
'base/text_track.h',
'base/text_track_config.cc',
'base/text_track_config.h',
+ 'base/time_delta_interpolator.cc',
+ 'base/time_delta_interpolator.h',
'base/user_input_monitor.cc',
'base/user_input_monitor.h',
'base/user_input_monitor_linux.cc',
@@ -349,6 +351,7 @@
'base/video_frame_pool.h',
'base/video_renderer.cc',
'base/video_renderer.h',
+ 'base/video_rotation.h',
'base/video_util.cc',
'base/video_util.h',
'base/yuv_convert.cc',
@@ -400,8 +403,6 @@
'filters/file_data_source.h',
'filters/frame_processor.cc',
'filters/frame_processor.h',
- 'filters/frame_processor_base.cc',
- 'filters/frame_processor_base.h',
'filters/gpu_video_accelerator_factories.cc',
'filters/gpu_video_accelerator_factories.h',
'filters/gpu_video_decoder.cc',
@@ -416,6 +417,8 @@
'filters/opus_audio_decoder.h',
'filters/skcanvas_video_renderer.cc',
'filters/skcanvas_video_renderer.h',
+ 'filters/source_buffer_platform.cc',
+ 'filters/source_buffer_platform.h',
'filters/source_buffer_stream.cc',
'filters/source_buffer_stream.h',
'filters/stream_parser_factory.cc',
@@ -657,6 +660,13 @@
'formats/webm/chromeos/webm_encoder.h',
],
}],
+ # For VaapiVideoEncodeAccelerator.
+ ['target_arch != "arm" and chromeos == 1 and use_x11 == 1', {
+ 'sources': [
+ 'filters/h264_bitstream_buffer.cc',
+ 'filters/h264_bitstream_buffer.h',
+ ],
+ }],
['OS!="ios"', {
'dependencies': [
'../third_party/libyuv/libyuv.gyp:libyuv',
@@ -767,6 +777,11 @@
],
},
]
+ }, {
+ 'sources!': [
+ 'ozone/media_ozone_platform.cc',
+ 'ozone/media_ozone_platform.h',
+ ]
}],
['OS!="linux"', {
'sources!': [
@@ -920,6 +935,8 @@
'filters/ffmpeg_h264_to_annex_b_bitstream_converter.h',
'filters/h264_to_annex_b_bitstream_converter.cc',
'filters/h264_to_annex_b_bitstream_converter.h',
+ 'formats/mp2t/es_adapter_video.cc',
+ 'formats/mp2t/es_adapter_video.h',
'formats/mp2t/es_parser.h',
'formats/mp2t/es_parser_adts.cc',
'formats/mp2t/es_parser_adts.h',
@@ -1010,6 +1027,7 @@
'../ui/base/ui_base.gyp:ui_base',
'../ui/gfx/gfx.gyp:gfx',
'../ui/gfx/gfx.gyp:gfx_geometry',
+ '../ui/gfx/gfx.gyp:gfx_test_support',
'../url/url.gyp:url_lib',
],
'sources': [
@@ -1045,6 +1063,7 @@
'base/android/media_codec_bridge_unittest.cc',
'base/android/media_drm_bridge_unittest.cc',
'base/android/media_source_player_unittest.cc',
+ 'base/audio_block_fifo_unittest.cc',
'base/audio_buffer_converter_unittest.cc',
'base/audio_buffer_unittest.cc',
'base/audio_buffer_queue_unittest.cc',
@@ -1065,7 +1084,6 @@
'base/callback_holder.h',
'base/callback_holder_unittest.cc',
'base/channel_mixer_unittest.cc',
- 'base/clock_unittest.cc',
'base/container_names_unittest.cc',
'base/data_buffer_unittest.cc',
'base/decoder_buffer_queue_unittest.cc',
@@ -1084,6 +1102,7 @@
'base/stream_parser_unittest.cc',
'base/text_ranges_unittest.cc',
'base/text_renderer_unittest.cc',
+ 'base/time_delta_interpolator_unittest.cc',
'base/user_input_monitor_unittest.cc',
'base/vector_math_testing.h',
'base/vector_math_unittest.cc',
@@ -1221,6 +1240,11 @@
}],
],
}],
+ ['target_arch != "arm" and chromeos == 1 and use_x11 == 1', {
+ 'sources': [
+ 'filters/h264_bitstream_buffer_unittest.cc',
+ ],
+ }],
['use_alsa==0', {
'sources!': [
'audio/alsa/alsa_output_unittest.cc',
@@ -1238,6 +1262,7 @@
'filters/h264_to_annex_b_bitstream_converter_unittest.cc',
'formats/common/stream_parser_test_base.cc',
'formats/common/stream_parser_test_base.h',
+ 'formats/mp2t/es_adapter_video_unittest.cc',
'formats/mp2t/es_parser_h264_unittest.cc',
'formats/mp2t/mp2t_stream_parser_unittest.cc',
'formats/mp4/aac_unittest.cc',
@@ -1278,6 +1303,7 @@
'../testing/perf/perf_test.gyp:perf_test',
'../ui/base/ui_base.gyp:ui_base',
'../ui/gfx/gfx.gyp:gfx',
+ '../ui/gfx/gfx.gyp:gfx_test_support',
'../ui/gfx/gfx.gyp:gfx_geometry',
'../ui/gl/gl.gyp:gl',
'media',
@@ -1583,10 +1609,6 @@
},
'includes': ['../build/apk_test.gypi'],
},
- ],
- }],
- ['OS=="android"', {
- 'targets': [
{
'target_name': 'media_android_jni_headers',
'type': 'none',
@@ -1700,7 +1722,6 @@
'../base/base.gyp:base',
'../base/base.gyp:base_i18n',
'../base/base.gyp:test_support_base',
- '../base/base.gyp:test_support_perf',
'../testing/gtest.gyp:gtest',
'../third_party/ffmpeg/ffmpeg.gyp:ffmpeg',
'media',
diff --git a/media/media.target.darwin-arm.mk b/media/media.target.darwin-arm.mk
index f1a8ce832b..030344464c 100644
--- a/media/media.target.darwin-arm.mk
+++ b/media/media.target.darwin-arm.mk
@@ -66,6 +66,7 @@ LOCAL_SRC_FILES := \
media/audio/virtual_audio_output_stream.cc \
media/base/android/demuxer_stream_player_params.cc \
media/base/android/media_resource_getter.cc \
+ media/base/audio_block_fifo.cc \
media/base/audio_buffer.cc \
media/base/audio_buffer_queue.cc \
media/base/audio_buffer_converter.cc \
@@ -87,7 +88,6 @@ LOCAL_SRC_FILES := \
media/base/byte_queue.cc \
media/base/cdm_promise.cc \
media/base/channel_mixer.cc \
- media/base/clock.cc \
media/base/data_buffer.cc \
media/base/data_source.cc \
media/base/decoder_buffer.cc \
@@ -120,6 +120,7 @@ LOCAL_SRC_FILES := \
media/base/text_ranges.cc \
media/base/text_renderer.cc \
media/base/text_track_config.cc \
+ media/base/time_delta_interpolator.cc \
media/base/user_input_monitor.cc \
media/base/video_decoder.cc \
media/base/video_decoder_config.cc \
@@ -144,12 +145,12 @@ LOCAL_SRC_FILES := \
media/filters/decrypting_video_decoder.cc \
media/filters/file_data_source.cc \
media/filters/frame_processor.cc \
- media/filters/frame_processor_base.cc \
media/filters/gpu_video_accelerator_factories.cc \
media/filters/gpu_video_decoder.cc \
media/filters/h264_bit_reader.cc \
media/filters/h264_parser.cc \
media/filters/skcanvas_video_renderer.cc \
+ media/filters/source_buffer_platform.cc \
media/filters/source_buffer_stream.cc \
media/filters/stream_parser_factory.cc \
media/filters/video_frame_scheduler_impl.cc \
@@ -195,6 +196,7 @@ LOCAL_SRC_FILES := \
media/base/browser_cdm.cc \
media/base/media_stub.cc \
media/filters/h264_to_annex_b_bitstream_converter.cc \
+ media/formats/mp2t/es_adapter_video.cc \
media/formats/mp2t/es_parser_adts.cc \
media/formats/mp2t/es_parser_h264.cc \
media/formats/mp2t/mp2t_stream_parser.cc \
@@ -254,9 +256,9 @@ MY_CFLAGS_Debug := \
-Wno-unused-but-set-variable \
-Os \
-g \
- -fomit-frame-pointer \
-fdata-sections \
-ffunction-sections \
+ -fomit-frame-pointer \
-funwind-tables
MY_DEFS_Debug := \
@@ -275,6 +277,7 @@ MY_DEFS_Debug := \
'-DSYSTEM_NATIVELY_SIGNALS_MEMORY_PRESSURE' \
'-DENABLE_EGLIMAGE=1' \
'-DCLD_VERSION=1' \
+ '-DCLD_DATA_FROM_STATIC' \
'-DENABLE_PRINTING=1' \
'-DENABLE_MANAGED_USERS=1' \
'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
@@ -292,14 +295,10 @@ MY_DEFS_Debug := \
'-DSK_ATTR_DEPRECATED=SK_NOTHING_ARG1' \
'-DGR_GL_IGNORE_ES3_MSAA=0' \
'-DSK_WILL_NEVER_DRAW_PERSPECTIVE_TEXT' \
- '-DSK_SUPPORT_LEGACY_GETTOPDEVICE' \
- '-DSK_SUPPORT_LEGACY_BITMAP_CONFIG' \
- '-DSK_SUPPORT_LEGACY_DEVICE_VIRTUAL_ISOPAQUE' \
- '-DSK_SUPPORT_LEGACY_N32_NAME' \
- '-DSK_SUPPORT_LEGACY_SETCONFIG' \
+ '-DSK_SUPPORT_LEGACY_PICTURE_CLONE' \
+ '-DSK_SUPPORT_LEGACY_GETDEVICE' \
'-DSK_IGNORE_ETC1_SUPPORT' \
'-DSK_IGNORE_GPU_DITHER' \
- '-DSK_SUPPORT_LEGACY_GETTOTALCLIP' \
'-DSK_BUILD_FOR_ANDROID' \
'-DSK_USE_POSIX_THREADS' \
'-DSK_DEFERRED_CANVAS_USES_FACTORIES=1' \
@@ -357,6 +356,9 @@ LOCAL_CPPFLAGS_Debug := \
-fvisibility-inlines-hidden \
-Wsign-compare \
-Wno-abi \
+ -std=gnu++11 \
+ -Wno-narrowing \
+ -Wno-literal-suffix \
-Wno-non-virtual-dtor \
-Wno-sign-promo
@@ -418,6 +420,7 @@ MY_DEFS_Release := \
'-DSYSTEM_NATIVELY_SIGNALS_MEMORY_PRESSURE' \
'-DENABLE_EGLIMAGE=1' \
'-DCLD_VERSION=1' \
+ '-DCLD_DATA_FROM_STATIC' \
'-DENABLE_PRINTING=1' \
'-DENABLE_MANAGED_USERS=1' \
'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
@@ -435,14 +438,10 @@ MY_DEFS_Release := \
'-DSK_ATTR_DEPRECATED=SK_NOTHING_ARG1' \
'-DGR_GL_IGNORE_ES3_MSAA=0' \
'-DSK_WILL_NEVER_DRAW_PERSPECTIVE_TEXT' \
- '-DSK_SUPPORT_LEGACY_GETTOPDEVICE' \
- '-DSK_SUPPORT_LEGACY_BITMAP_CONFIG' \
- '-DSK_SUPPORT_LEGACY_DEVICE_VIRTUAL_ISOPAQUE' \
- '-DSK_SUPPORT_LEGACY_N32_NAME' \
- '-DSK_SUPPORT_LEGACY_SETCONFIG' \
+ '-DSK_SUPPORT_LEGACY_PICTURE_CLONE' \
+ '-DSK_SUPPORT_LEGACY_GETDEVICE' \
'-DSK_IGNORE_ETC1_SUPPORT' \
'-DSK_IGNORE_GPU_DITHER' \
- '-DSK_SUPPORT_LEGACY_GETTOTALCLIP' \
'-DSK_BUILD_FOR_ANDROID' \
'-DSK_USE_POSIX_THREADS' \
'-DSK_DEFERRED_CANVAS_USES_FACTORIES=1' \
@@ -501,6 +500,9 @@ LOCAL_CPPFLAGS_Release := \
-fvisibility-inlines-hidden \
-Wsign-compare \
-Wno-abi \
+ -std=gnu++11 \
+ -Wno-narrowing \
+ -Wno-literal-suffix \
-Wno-non-virtual-dtor \
-Wno-sign-promo
diff --git a/media/media.target.darwin-arm64.mk b/media/media.target.darwin-arm64.mk
index 4d294cc99b..4caf2ba04c 100644
--- a/media/media.target.darwin-arm64.mk
+++ b/media/media.target.darwin-arm64.mk
@@ -66,6 +66,7 @@ LOCAL_SRC_FILES := \
media/audio/virtual_audio_output_stream.cc \
media/base/android/demuxer_stream_player_params.cc \
media/base/android/media_resource_getter.cc \
+ media/base/audio_block_fifo.cc \
media/base/audio_buffer.cc \
media/base/audio_buffer_queue.cc \
media/base/audio_buffer_converter.cc \
@@ -87,7 +88,6 @@ LOCAL_SRC_FILES := \
media/base/byte_queue.cc \
media/base/cdm_promise.cc \
media/base/channel_mixer.cc \
- media/base/clock.cc \
media/base/data_buffer.cc \
media/base/data_source.cc \
media/base/decoder_buffer.cc \
@@ -120,6 +120,7 @@ LOCAL_SRC_FILES := \
media/base/text_ranges.cc \
media/base/text_renderer.cc \
media/base/text_track_config.cc \
+ media/base/time_delta_interpolator.cc \
media/base/user_input_monitor.cc \
media/base/video_decoder.cc \
media/base/video_decoder_config.cc \
@@ -144,12 +145,12 @@ LOCAL_SRC_FILES := \
media/filters/decrypting_video_decoder.cc \
media/filters/file_data_source.cc \
media/filters/frame_processor.cc \
- media/filters/frame_processor_base.cc \
media/filters/gpu_video_accelerator_factories.cc \
media/filters/gpu_video_decoder.cc \
media/filters/h264_bit_reader.cc \
media/filters/h264_parser.cc \
media/filters/skcanvas_video_renderer.cc \
+ media/filters/source_buffer_platform.cc \
media/filters/source_buffer_stream.cc \
media/filters/stream_parser_factory.cc \
media/filters/video_frame_scheduler_impl.cc \
@@ -195,6 +196,7 @@ LOCAL_SRC_FILES := \
media/base/browser_cdm.cc \
media/base/media_stub.cc \
media/filters/h264_to_annex_b_bitstream_converter.cc \
+ media/formats/mp2t/es_adapter_video.cc \
media/formats/mp2t/es_parser_adts.cc \
media/formats/mp2t/es_parser_h264.cc \
media/formats/mp2t/mp2t_stream_parser.cc \
@@ -244,7 +246,6 @@ MY_CFLAGS_Debug := \
-Wno-unused-but-set-variable \
-Os \
-g \
- -fomit-frame-pointer \
-fdata-sections \
-ffunction-sections \
-funwind-tables
@@ -265,6 +266,7 @@ MY_DEFS_Debug := \
'-DSYSTEM_NATIVELY_SIGNALS_MEMORY_PRESSURE' \
'-DENABLE_EGLIMAGE=1' \
'-DCLD_VERSION=1' \
+ '-DCLD_DATA_FROM_STATIC' \
'-DENABLE_PRINTING=1' \
'-DENABLE_MANAGED_USERS=1' \
'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
@@ -282,14 +284,10 @@ MY_DEFS_Debug := \
'-DSK_ATTR_DEPRECATED=SK_NOTHING_ARG1' \
'-DGR_GL_IGNORE_ES3_MSAA=0' \
'-DSK_WILL_NEVER_DRAW_PERSPECTIVE_TEXT' \
- '-DSK_SUPPORT_LEGACY_GETTOPDEVICE' \
- '-DSK_SUPPORT_LEGACY_BITMAP_CONFIG' \
- '-DSK_SUPPORT_LEGACY_DEVICE_VIRTUAL_ISOPAQUE' \
- '-DSK_SUPPORT_LEGACY_N32_NAME' \
- '-DSK_SUPPORT_LEGACY_SETCONFIG' \
+ '-DSK_SUPPORT_LEGACY_PICTURE_CLONE' \
+ '-DSK_SUPPORT_LEGACY_GETDEVICE' \
'-DSK_IGNORE_ETC1_SUPPORT' \
'-DSK_IGNORE_GPU_DITHER' \
- '-DSK_SUPPORT_LEGACY_GETTOTALCLIP' \
'-DSK_BUILD_FOR_ANDROID' \
'-DSK_USE_POSIX_THREADS' \
'-DSK_DEFERRED_CANVAS_USES_FACTORIES=1' \
@@ -346,6 +344,9 @@ LOCAL_CPPFLAGS_Debug := \
-fno-threadsafe-statics \
-fvisibility-inlines-hidden \
-Wsign-compare \
+ -std=gnu++11 \
+ -Wno-narrowing \
+ -Wno-literal-suffix \
-Wno-non-virtual-dtor \
-Wno-sign-promo
@@ -378,7 +379,6 @@ MY_CFLAGS_Release := \
-fno-ident \
-fdata-sections \
-ffunction-sections \
- -fomit-frame-pointer \
-funwind-tables
MY_DEFS_Release := \
@@ -397,6 +397,7 @@ MY_DEFS_Release := \
'-DSYSTEM_NATIVELY_SIGNALS_MEMORY_PRESSURE' \
'-DENABLE_EGLIMAGE=1' \
'-DCLD_VERSION=1' \
+ '-DCLD_DATA_FROM_STATIC' \
'-DENABLE_PRINTING=1' \
'-DENABLE_MANAGED_USERS=1' \
'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
@@ -414,14 +415,10 @@ MY_DEFS_Release := \
'-DSK_ATTR_DEPRECATED=SK_NOTHING_ARG1' \
'-DGR_GL_IGNORE_ES3_MSAA=0' \
'-DSK_WILL_NEVER_DRAW_PERSPECTIVE_TEXT' \
- '-DSK_SUPPORT_LEGACY_GETTOPDEVICE' \
- '-DSK_SUPPORT_LEGACY_BITMAP_CONFIG' \
- '-DSK_SUPPORT_LEGACY_DEVICE_VIRTUAL_ISOPAQUE' \
- '-DSK_SUPPORT_LEGACY_N32_NAME' \
- '-DSK_SUPPORT_LEGACY_SETCONFIG' \
+ '-DSK_SUPPORT_LEGACY_PICTURE_CLONE' \
+ '-DSK_SUPPORT_LEGACY_GETDEVICE' \
'-DSK_IGNORE_ETC1_SUPPORT' \
'-DSK_IGNORE_GPU_DITHER' \
- '-DSK_SUPPORT_LEGACY_GETTOTALCLIP' \
'-DSK_BUILD_FOR_ANDROID' \
'-DSK_USE_POSIX_THREADS' \
'-DSK_DEFERRED_CANVAS_USES_FACTORIES=1' \
@@ -479,6 +476,9 @@ LOCAL_CPPFLAGS_Release := \
-fno-threadsafe-statics \
-fvisibility-inlines-hidden \
-Wsign-compare \
+ -std=gnu++11 \
+ -Wno-narrowing \
+ -Wno-literal-suffix \
-Wno-non-virtual-dtor \
-Wno-sign-promo
diff --git a/media/media.target.darwin-mips.mk b/media/media.target.darwin-mips.mk
index bf12f63c5a..b1b7311c47 100644
--- a/media/media.target.darwin-mips.mk
+++ b/media/media.target.darwin-mips.mk
@@ -66,6 +66,7 @@ LOCAL_SRC_FILES := \
media/audio/virtual_audio_output_stream.cc \
media/base/android/demuxer_stream_player_params.cc \
media/base/android/media_resource_getter.cc \
+ media/base/audio_block_fifo.cc \
media/base/audio_buffer.cc \
media/base/audio_buffer_queue.cc \
media/base/audio_buffer_converter.cc \
@@ -87,7 +88,6 @@ LOCAL_SRC_FILES := \
media/base/byte_queue.cc \
media/base/cdm_promise.cc \
media/base/channel_mixer.cc \
- media/base/clock.cc \
media/base/data_buffer.cc \
media/base/data_source.cc \
media/base/decoder_buffer.cc \
@@ -120,6 +120,7 @@ LOCAL_SRC_FILES := \
media/base/text_ranges.cc \
media/base/text_renderer.cc \
media/base/text_track_config.cc \
+ media/base/time_delta_interpolator.cc \
media/base/user_input_monitor.cc \
media/base/video_decoder.cc \
media/base/video_decoder_config.cc \
@@ -144,12 +145,12 @@ LOCAL_SRC_FILES := \
media/filters/decrypting_video_decoder.cc \
media/filters/file_data_source.cc \
media/filters/frame_processor.cc \
- media/filters/frame_processor_base.cc \
media/filters/gpu_video_accelerator_factories.cc \
media/filters/gpu_video_decoder.cc \
media/filters/h264_bit_reader.cc \
media/filters/h264_parser.cc \
media/filters/skcanvas_video_renderer.cc \
+ media/filters/source_buffer_platform.cc \
media/filters/source_buffer_stream.cc \
media/filters/stream_parser_factory.cc \
media/filters/video_frame_scheduler_impl.cc \
@@ -195,6 +196,7 @@ LOCAL_SRC_FILES := \
media/base/browser_cdm.cc \
media/base/media_stub.cc \
media/filters/h264_to_annex_b_bitstream_converter.cc \
+ media/formats/mp2t/es_adapter_video.cc \
media/formats/mp2t/es_parser_adts.cc \
media/formats/mp2t/es_parser_h264.cc \
media/formats/mp2t/mp2t_stream_parser.cc \
@@ -248,9 +250,9 @@ MY_CFLAGS_Debug := \
-Wno-unused-but-set-variable \
-Os \
-g \
- -fomit-frame-pointer \
-fdata-sections \
-ffunction-sections \
+ -fomit-frame-pointer \
-funwind-tables
MY_DEFS_Debug := \
@@ -269,6 +271,7 @@ MY_DEFS_Debug := \
'-DSYSTEM_NATIVELY_SIGNALS_MEMORY_PRESSURE' \
'-DENABLE_EGLIMAGE=1' \
'-DCLD_VERSION=1' \
+ '-DCLD_DATA_FROM_STATIC' \
'-DENABLE_PRINTING=1' \
'-DENABLE_MANAGED_USERS=1' \
'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
@@ -286,14 +289,10 @@ MY_DEFS_Debug := \
'-DSK_ATTR_DEPRECATED=SK_NOTHING_ARG1' \
'-DGR_GL_IGNORE_ES3_MSAA=0' \
'-DSK_WILL_NEVER_DRAW_PERSPECTIVE_TEXT' \
- '-DSK_SUPPORT_LEGACY_GETTOPDEVICE' \
- '-DSK_SUPPORT_LEGACY_BITMAP_CONFIG' \
- '-DSK_SUPPORT_LEGACY_DEVICE_VIRTUAL_ISOPAQUE' \
- '-DSK_SUPPORT_LEGACY_N32_NAME' \
- '-DSK_SUPPORT_LEGACY_SETCONFIG' \
+ '-DSK_SUPPORT_LEGACY_PICTURE_CLONE' \
+ '-DSK_SUPPORT_LEGACY_GETDEVICE' \
'-DSK_IGNORE_ETC1_SUPPORT' \
'-DSK_IGNORE_GPU_DITHER' \
- '-DSK_SUPPORT_LEGACY_GETTOTALCLIP' \
'-DSK_BUILD_FOR_ANDROID' \
'-DSK_USE_POSIX_THREADS' \
'-DSK_DEFERRED_CANVAS_USES_FACTORIES=1' \
@@ -351,6 +350,9 @@ LOCAL_CPPFLAGS_Debug := \
-fvisibility-inlines-hidden \
-Wsign-compare \
-Wno-uninitialized \
+ -std=gnu++11 \
+ -Wno-narrowing \
+ -Wno-literal-suffix \
-Wno-non-virtual-dtor \
-Wno-sign-promo
@@ -406,6 +408,7 @@ MY_DEFS_Release := \
'-DSYSTEM_NATIVELY_SIGNALS_MEMORY_PRESSURE' \
'-DENABLE_EGLIMAGE=1' \
'-DCLD_VERSION=1' \
+ '-DCLD_DATA_FROM_STATIC' \
'-DENABLE_PRINTING=1' \
'-DENABLE_MANAGED_USERS=1' \
'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
@@ -423,14 +426,10 @@ MY_DEFS_Release := \
'-DSK_ATTR_DEPRECATED=SK_NOTHING_ARG1' \
'-DGR_GL_IGNORE_ES3_MSAA=0' \
'-DSK_WILL_NEVER_DRAW_PERSPECTIVE_TEXT' \
- '-DSK_SUPPORT_LEGACY_GETTOPDEVICE' \
- '-DSK_SUPPORT_LEGACY_BITMAP_CONFIG' \
- '-DSK_SUPPORT_LEGACY_DEVICE_VIRTUAL_ISOPAQUE' \
- '-DSK_SUPPORT_LEGACY_N32_NAME' \
- '-DSK_SUPPORT_LEGACY_SETCONFIG' \
+ '-DSK_SUPPORT_LEGACY_PICTURE_CLONE' \
+ '-DSK_SUPPORT_LEGACY_GETDEVICE' \
'-DSK_IGNORE_ETC1_SUPPORT' \
'-DSK_IGNORE_GPU_DITHER' \
- '-DSK_SUPPORT_LEGACY_GETTOTALCLIP' \
'-DSK_BUILD_FOR_ANDROID' \
'-DSK_USE_POSIX_THREADS' \
'-DSK_DEFERRED_CANVAS_USES_FACTORIES=1' \
@@ -489,6 +488,9 @@ LOCAL_CPPFLAGS_Release := \
-fvisibility-inlines-hidden \
-Wsign-compare \
-Wno-uninitialized \
+ -std=gnu++11 \
+ -Wno-narrowing \
+ -Wno-literal-suffix \
-Wno-non-virtual-dtor \
-Wno-sign-promo
diff --git a/media/media.target.darwin-x86.mk b/media/media.target.darwin-x86.mk
index 59091882bd..1560f2a310 100644
--- a/media/media.target.darwin-x86.mk
+++ b/media/media.target.darwin-x86.mk
@@ -66,6 +66,7 @@ LOCAL_SRC_FILES := \
media/audio/virtual_audio_output_stream.cc \
media/base/android/demuxer_stream_player_params.cc \
media/base/android/media_resource_getter.cc \
+ media/base/audio_block_fifo.cc \
media/base/audio_buffer.cc \
media/base/audio_buffer_queue.cc \
media/base/audio_buffer_converter.cc \
@@ -87,7 +88,6 @@ LOCAL_SRC_FILES := \
media/base/byte_queue.cc \
media/base/cdm_promise.cc \
media/base/channel_mixer.cc \
- media/base/clock.cc \
media/base/data_buffer.cc \
media/base/data_source.cc \
media/base/decoder_buffer.cc \
@@ -120,6 +120,7 @@ LOCAL_SRC_FILES := \
media/base/text_ranges.cc \
media/base/text_renderer.cc \
media/base/text_track_config.cc \
+ media/base/time_delta_interpolator.cc \
media/base/user_input_monitor.cc \
media/base/video_decoder.cc \
media/base/video_decoder_config.cc \
@@ -144,12 +145,12 @@ LOCAL_SRC_FILES := \
media/filters/decrypting_video_decoder.cc \
media/filters/file_data_source.cc \
media/filters/frame_processor.cc \
- media/filters/frame_processor_base.cc \
media/filters/gpu_video_accelerator_factories.cc \
media/filters/gpu_video_decoder.cc \
media/filters/h264_bit_reader.cc \
media/filters/h264_parser.cc \
media/filters/skcanvas_video_renderer.cc \
+ media/filters/source_buffer_platform.cc \
media/filters/source_buffer_stream.cc \
media/filters/stream_parser_factory.cc \
media/filters/video_frame_scheduler_impl.cc \
@@ -195,6 +196,7 @@ LOCAL_SRC_FILES := \
media/base/browser_cdm.cc \
media/base/media_stub.cc \
media/filters/h264_to_annex_b_bitstream_converter.cc \
+ media/formats/mp2t/es_adapter_video.cc \
media/formats/mp2t/es_parser_adts.cc \
media/formats/mp2t/es_parser_h264.cc \
media/formats/mp2t/mp2t_stream_parser.cc \
@@ -250,9 +252,9 @@ MY_CFLAGS_Debug := \
-fno-stack-protector \
-Os \
-g \
- -fomit-frame-pointer \
-fdata-sections \
-ffunction-sections \
+ -fomit-frame-pointer \
-funwind-tables
MY_DEFS_Debug := \
@@ -271,6 +273,7 @@ MY_DEFS_Debug := \
'-DSYSTEM_NATIVELY_SIGNALS_MEMORY_PRESSURE' \
'-DENABLE_EGLIMAGE=1' \
'-DCLD_VERSION=1' \
+ '-DCLD_DATA_FROM_STATIC' \
'-DENABLE_PRINTING=1' \
'-DENABLE_MANAGED_USERS=1' \
'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
@@ -288,14 +291,10 @@ MY_DEFS_Debug := \
'-DSK_ATTR_DEPRECATED=SK_NOTHING_ARG1' \
'-DGR_GL_IGNORE_ES3_MSAA=0' \
'-DSK_WILL_NEVER_DRAW_PERSPECTIVE_TEXT' \
- '-DSK_SUPPORT_LEGACY_GETTOPDEVICE' \
- '-DSK_SUPPORT_LEGACY_BITMAP_CONFIG' \
- '-DSK_SUPPORT_LEGACY_DEVICE_VIRTUAL_ISOPAQUE' \
- '-DSK_SUPPORT_LEGACY_N32_NAME' \
- '-DSK_SUPPORT_LEGACY_SETCONFIG' \
+ '-DSK_SUPPORT_LEGACY_PICTURE_CLONE' \
+ '-DSK_SUPPORT_LEGACY_GETDEVICE' \
'-DSK_IGNORE_ETC1_SUPPORT' \
'-DSK_IGNORE_GPU_DITHER' \
- '-DSK_SUPPORT_LEGACY_GETTOTALCLIP' \
'-DSK_BUILD_FOR_ANDROID' \
'-DSK_USE_POSIX_THREADS' \
'-DSK_DEFERRED_CANVAS_USES_FACTORIES=1' \
@@ -352,6 +351,9 @@ LOCAL_CPPFLAGS_Debug := \
-fno-threadsafe-statics \
-fvisibility-inlines-hidden \
-Wsign-compare \
+ -std=gnu++11 \
+ -Wno-narrowing \
+ -Wno-literal-suffix \
-Wno-non-virtual-dtor \
-Wno-sign-promo
@@ -408,6 +410,7 @@ MY_DEFS_Release := \
'-DSYSTEM_NATIVELY_SIGNALS_MEMORY_PRESSURE' \
'-DENABLE_EGLIMAGE=1' \
'-DCLD_VERSION=1' \
+ '-DCLD_DATA_FROM_STATIC' \
'-DENABLE_PRINTING=1' \
'-DENABLE_MANAGED_USERS=1' \
'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
@@ -425,14 +428,10 @@ MY_DEFS_Release := \
'-DSK_ATTR_DEPRECATED=SK_NOTHING_ARG1' \
'-DGR_GL_IGNORE_ES3_MSAA=0' \
'-DSK_WILL_NEVER_DRAW_PERSPECTIVE_TEXT' \
- '-DSK_SUPPORT_LEGACY_GETTOPDEVICE' \
- '-DSK_SUPPORT_LEGACY_BITMAP_CONFIG' \
- '-DSK_SUPPORT_LEGACY_DEVICE_VIRTUAL_ISOPAQUE' \
- '-DSK_SUPPORT_LEGACY_N32_NAME' \
- '-DSK_SUPPORT_LEGACY_SETCONFIG' \
+ '-DSK_SUPPORT_LEGACY_PICTURE_CLONE' \
+ '-DSK_SUPPORT_LEGACY_GETDEVICE' \
'-DSK_IGNORE_ETC1_SUPPORT' \
'-DSK_IGNORE_GPU_DITHER' \
- '-DSK_SUPPORT_LEGACY_GETTOTALCLIP' \
'-DSK_BUILD_FOR_ANDROID' \
'-DSK_USE_POSIX_THREADS' \
'-DSK_DEFERRED_CANVAS_USES_FACTORIES=1' \
@@ -490,6 +489,9 @@ LOCAL_CPPFLAGS_Release := \
-fno-threadsafe-statics \
-fvisibility-inlines-hidden \
-Wsign-compare \
+ -std=gnu++11 \
+ -Wno-narrowing \
+ -Wno-literal-suffix \
-Wno-non-virtual-dtor \
-Wno-sign-promo
diff --git a/media/media.target.darwin-x86_64.mk b/media/media.target.darwin-x86_64.mk
index 72d4605b07..c1a1a77746 100644
--- a/media/media.target.darwin-x86_64.mk
+++ b/media/media.target.darwin-x86_64.mk
@@ -66,6 +66,7 @@ LOCAL_SRC_FILES := \
media/audio/virtual_audio_output_stream.cc \
media/base/android/demuxer_stream_player_params.cc \
media/base/android/media_resource_getter.cc \
+ media/base/audio_block_fifo.cc \
media/base/audio_buffer.cc \
media/base/audio_buffer_queue.cc \
media/base/audio_buffer_converter.cc \
@@ -87,7 +88,6 @@ LOCAL_SRC_FILES := \
media/base/byte_queue.cc \
media/base/cdm_promise.cc \
media/base/channel_mixer.cc \
- media/base/clock.cc \
media/base/data_buffer.cc \
media/base/data_source.cc \
media/base/decoder_buffer.cc \
@@ -120,6 +120,7 @@ LOCAL_SRC_FILES := \
media/base/text_ranges.cc \
media/base/text_renderer.cc \
media/base/text_track_config.cc \
+ media/base/time_delta_interpolator.cc \
media/base/user_input_monitor.cc \
media/base/video_decoder.cc \
media/base/video_decoder_config.cc \
@@ -144,12 +145,12 @@ LOCAL_SRC_FILES := \
media/filters/decrypting_video_decoder.cc \
media/filters/file_data_source.cc \
media/filters/frame_processor.cc \
- media/filters/frame_processor_base.cc \
media/filters/gpu_video_accelerator_factories.cc \
media/filters/gpu_video_decoder.cc \
media/filters/h264_bit_reader.cc \
media/filters/h264_parser.cc \
media/filters/skcanvas_video_renderer.cc \
+ media/filters/source_buffer_platform.cc \
media/filters/source_buffer_stream.cc \
media/filters/stream_parser_factory.cc \
media/filters/video_frame_scheduler_impl.cc \
@@ -195,6 +196,7 @@ LOCAL_SRC_FILES := \
media/base/browser_cdm.cc \
media/base/media_stub.cc \
media/filters/h264_to_annex_b_bitstream_converter.cc \
+ media/formats/mp2t/es_adapter_video.cc \
media/formats/mp2t/es_parser_adts.cc \
media/formats/mp2t/es_parser_h264.cc \
media/formats/mp2t/mp2t_stream_parser.cc \
@@ -249,9 +251,9 @@ MY_CFLAGS_Debug := \
-Wno-unused-but-set-variable \
-Os \
-g \
- -fomit-frame-pointer \
-fdata-sections \
-ffunction-sections \
+ -fomit-frame-pointer \
-funwind-tables
MY_DEFS_Debug := \
@@ -270,6 +272,7 @@ MY_DEFS_Debug := \
'-DSYSTEM_NATIVELY_SIGNALS_MEMORY_PRESSURE' \
'-DENABLE_EGLIMAGE=1' \
'-DCLD_VERSION=1' \
+ '-DCLD_DATA_FROM_STATIC' \
'-DENABLE_PRINTING=1' \
'-DENABLE_MANAGED_USERS=1' \
'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
@@ -287,14 +290,10 @@ MY_DEFS_Debug := \
'-DSK_ATTR_DEPRECATED=SK_NOTHING_ARG1' \
'-DGR_GL_IGNORE_ES3_MSAA=0' \
'-DSK_WILL_NEVER_DRAW_PERSPECTIVE_TEXT' \
- '-DSK_SUPPORT_LEGACY_GETTOPDEVICE' \
- '-DSK_SUPPORT_LEGACY_BITMAP_CONFIG' \
- '-DSK_SUPPORT_LEGACY_DEVICE_VIRTUAL_ISOPAQUE' \
- '-DSK_SUPPORT_LEGACY_N32_NAME' \
- '-DSK_SUPPORT_LEGACY_SETCONFIG' \
+ '-DSK_SUPPORT_LEGACY_PICTURE_CLONE' \
+ '-DSK_SUPPORT_LEGACY_GETDEVICE' \
'-DSK_IGNORE_ETC1_SUPPORT' \
'-DSK_IGNORE_GPU_DITHER' \
- '-DSK_SUPPORT_LEGACY_GETTOTALCLIP' \
'-DSK_BUILD_FOR_ANDROID' \
'-DSK_USE_POSIX_THREADS' \
'-DSK_DEFERRED_CANVAS_USES_FACTORIES=1' \
@@ -351,6 +350,9 @@ LOCAL_CPPFLAGS_Debug := \
-fno-threadsafe-statics \
-fvisibility-inlines-hidden \
-Wsign-compare \
+ -std=gnu++11 \
+ -Wno-narrowing \
+ -Wno-literal-suffix \
-Wno-non-virtual-dtor \
-Wno-sign-promo
@@ -406,6 +408,7 @@ MY_DEFS_Release := \
'-DSYSTEM_NATIVELY_SIGNALS_MEMORY_PRESSURE' \
'-DENABLE_EGLIMAGE=1' \
'-DCLD_VERSION=1' \
+ '-DCLD_DATA_FROM_STATIC' \
'-DENABLE_PRINTING=1' \
'-DENABLE_MANAGED_USERS=1' \
'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
@@ -423,14 +426,10 @@ MY_DEFS_Release := \
'-DSK_ATTR_DEPRECATED=SK_NOTHING_ARG1' \
'-DGR_GL_IGNORE_ES3_MSAA=0' \
'-DSK_WILL_NEVER_DRAW_PERSPECTIVE_TEXT' \
- '-DSK_SUPPORT_LEGACY_GETTOPDEVICE' \
- '-DSK_SUPPORT_LEGACY_BITMAP_CONFIG' \
- '-DSK_SUPPORT_LEGACY_DEVICE_VIRTUAL_ISOPAQUE' \
- '-DSK_SUPPORT_LEGACY_N32_NAME' \
- '-DSK_SUPPORT_LEGACY_SETCONFIG' \
+ '-DSK_SUPPORT_LEGACY_PICTURE_CLONE' \
+ '-DSK_SUPPORT_LEGACY_GETDEVICE' \
'-DSK_IGNORE_ETC1_SUPPORT' \
'-DSK_IGNORE_GPU_DITHER' \
- '-DSK_SUPPORT_LEGACY_GETTOTALCLIP' \
'-DSK_BUILD_FOR_ANDROID' \
'-DSK_USE_POSIX_THREADS' \
'-DSK_DEFERRED_CANVAS_USES_FACTORIES=1' \
@@ -488,6 +487,9 @@ LOCAL_CPPFLAGS_Release := \
-fno-threadsafe-statics \
-fvisibility-inlines-hidden \
-Wsign-compare \
+ -std=gnu++11 \
+ -Wno-narrowing \
+ -Wno-literal-suffix \
-Wno-non-virtual-dtor \
-Wno-sign-promo
diff --git a/media/media.target.linux-arm.mk b/media/media.target.linux-arm.mk
index f1a8ce832b..030344464c 100644
--- a/media/media.target.linux-arm.mk
+++ b/media/media.target.linux-arm.mk
@@ -66,6 +66,7 @@ LOCAL_SRC_FILES := \
media/audio/virtual_audio_output_stream.cc \
media/base/android/demuxer_stream_player_params.cc \
media/base/android/media_resource_getter.cc \
+ media/base/audio_block_fifo.cc \
media/base/audio_buffer.cc \
media/base/audio_buffer_queue.cc \
media/base/audio_buffer_converter.cc \
@@ -87,7 +88,6 @@ LOCAL_SRC_FILES := \
media/base/byte_queue.cc \
media/base/cdm_promise.cc \
media/base/channel_mixer.cc \
- media/base/clock.cc \
media/base/data_buffer.cc \
media/base/data_source.cc \
media/base/decoder_buffer.cc \
@@ -120,6 +120,7 @@ LOCAL_SRC_FILES := \
media/base/text_ranges.cc \
media/base/text_renderer.cc \
media/base/text_track_config.cc \
+ media/base/time_delta_interpolator.cc \
media/base/user_input_monitor.cc \
media/base/video_decoder.cc \
media/base/video_decoder_config.cc \
@@ -144,12 +145,12 @@ LOCAL_SRC_FILES := \
media/filters/decrypting_video_decoder.cc \
media/filters/file_data_source.cc \
media/filters/frame_processor.cc \
- media/filters/frame_processor_base.cc \
media/filters/gpu_video_accelerator_factories.cc \
media/filters/gpu_video_decoder.cc \
media/filters/h264_bit_reader.cc \
media/filters/h264_parser.cc \
media/filters/skcanvas_video_renderer.cc \
+ media/filters/source_buffer_platform.cc \
media/filters/source_buffer_stream.cc \
media/filters/stream_parser_factory.cc \
media/filters/video_frame_scheduler_impl.cc \
@@ -195,6 +196,7 @@ LOCAL_SRC_FILES := \
media/base/browser_cdm.cc \
media/base/media_stub.cc \
media/filters/h264_to_annex_b_bitstream_converter.cc \
+ media/formats/mp2t/es_adapter_video.cc \
media/formats/mp2t/es_parser_adts.cc \
media/formats/mp2t/es_parser_h264.cc \
media/formats/mp2t/mp2t_stream_parser.cc \
@@ -254,9 +256,9 @@ MY_CFLAGS_Debug := \
-Wno-unused-but-set-variable \
-Os \
-g \
- -fomit-frame-pointer \
-fdata-sections \
-ffunction-sections \
+ -fomit-frame-pointer \
-funwind-tables
MY_DEFS_Debug := \
@@ -275,6 +277,7 @@ MY_DEFS_Debug := \
'-DSYSTEM_NATIVELY_SIGNALS_MEMORY_PRESSURE' \
'-DENABLE_EGLIMAGE=1' \
'-DCLD_VERSION=1' \
+ '-DCLD_DATA_FROM_STATIC' \
'-DENABLE_PRINTING=1' \
'-DENABLE_MANAGED_USERS=1' \
'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
@@ -292,14 +295,10 @@ MY_DEFS_Debug := \
'-DSK_ATTR_DEPRECATED=SK_NOTHING_ARG1' \
'-DGR_GL_IGNORE_ES3_MSAA=0' \
'-DSK_WILL_NEVER_DRAW_PERSPECTIVE_TEXT' \
- '-DSK_SUPPORT_LEGACY_GETTOPDEVICE' \
- '-DSK_SUPPORT_LEGACY_BITMAP_CONFIG' \
- '-DSK_SUPPORT_LEGACY_DEVICE_VIRTUAL_ISOPAQUE' \
- '-DSK_SUPPORT_LEGACY_N32_NAME' \
- '-DSK_SUPPORT_LEGACY_SETCONFIG' \
+ '-DSK_SUPPORT_LEGACY_PICTURE_CLONE' \
+ '-DSK_SUPPORT_LEGACY_GETDEVICE' \
'-DSK_IGNORE_ETC1_SUPPORT' \
'-DSK_IGNORE_GPU_DITHER' \
- '-DSK_SUPPORT_LEGACY_GETTOTALCLIP' \
'-DSK_BUILD_FOR_ANDROID' \
'-DSK_USE_POSIX_THREADS' \
'-DSK_DEFERRED_CANVAS_USES_FACTORIES=1' \
@@ -357,6 +356,9 @@ LOCAL_CPPFLAGS_Debug := \
-fvisibility-inlines-hidden \
-Wsign-compare \
-Wno-abi \
+ -std=gnu++11 \
+ -Wno-narrowing \
+ -Wno-literal-suffix \
-Wno-non-virtual-dtor \
-Wno-sign-promo
@@ -418,6 +420,7 @@ MY_DEFS_Release := \
'-DSYSTEM_NATIVELY_SIGNALS_MEMORY_PRESSURE' \
'-DENABLE_EGLIMAGE=1' \
'-DCLD_VERSION=1' \
+ '-DCLD_DATA_FROM_STATIC' \
'-DENABLE_PRINTING=1' \
'-DENABLE_MANAGED_USERS=1' \
'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
@@ -435,14 +438,10 @@ MY_DEFS_Release := \
'-DSK_ATTR_DEPRECATED=SK_NOTHING_ARG1' \
'-DGR_GL_IGNORE_ES3_MSAA=0' \
'-DSK_WILL_NEVER_DRAW_PERSPECTIVE_TEXT' \
- '-DSK_SUPPORT_LEGACY_GETTOPDEVICE' \
- '-DSK_SUPPORT_LEGACY_BITMAP_CONFIG' \
- '-DSK_SUPPORT_LEGACY_DEVICE_VIRTUAL_ISOPAQUE' \
- '-DSK_SUPPORT_LEGACY_N32_NAME' \
- '-DSK_SUPPORT_LEGACY_SETCONFIG' \
+ '-DSK_SUPPORT_LEGACY_PICTURE_CLONE' \
+ '-DSK_SUPPORT_LEGACY_GETDEVICE' \
'-DSK_IGNORE_ETC1_SUPPORT' \
'-DSK_IGNORE_GPU_DITHER' \
- '-DSK_SUPPORT_LEGACY_GETTOTALCLIP' \
'-DSK_BUILD_FOR_ANDROID' \
'-DSK_USE_POSIX_THREADS' \
'-DSK_DEFERRED_CANVAS_USES_FACTORIES=1' \
@@ -501,6 +500,9 @@ LOCAL_CPPFLAGS_Release := \
-fvisibility-inlines-hidden \
-Wsign-compare \
-Wno-abi \
+ -std=gnu++11 \
+ -Wno-narrowing \
+ -Wno-literal-suffix \
-Wno-non-virtual-dtor \
-Wno-sign-promo
diff --git a/media/media.target.linux-arm64.mk b/media/media.target.linux-arm64.mk
index 4d294cc99b..4caf2ba04c 100644
--- a/media/media.target.linux-arm64.mk
+++ b/media/media.target.linux-arm64.mk
@@ -66,6 +66,7 @@ LOCAL_SRC_FILES := \
media/audio/virtual_audio_output_stream.cc \
media/base/android/demuxer_stream_player_params.cc \
media/base/android/media_resource_getter.cc \
+ media/base/audio_block_fifo.cc \
media/base/audio_buffer.cc \
media/base/audio_buffer_queue.cc \
media/base/audio_buffer_converter.cc \
@@ -87,7 +88,6 @@ LOCAL_SRC_FILES := \
media/base/byte_queue.cc \
media/base/cdm_promise.cc \
media/base/channel_mixer.cc \
- media/base/clock.cc \
media/base/data_buffer.cc \
media/base/data_source.cc \
media/base/decoder_buffer.cc \
@@ -120,6 +120,7 @@ LOCAL_SRC_FILES := \
media/base/text_ranges.cc \
media/base/text_renderer.cc \
media/base/text_track_config.cc \
+ media/base/time_delta_interpolator.cc \
media/base/user_input_monitor.cc \
media/base/video_decoder.cc \
media/base/video_decoder_config.cc \
@@ -144,12 +145,12 @@ LOCAL_SRC_FILES := \
media/filters/decrypting_video_decoder.cc \
media/filters/file_data_source.cc \
media/filters/frame_processor.cc \
- media/filters/frame_processor_base.cc \
media/filters/gpu_video_accelerator_factories.cc \
media/filters/gpu_video_decoder.cc \
media/filters/h264_bit_reader.cc \
media/filters/h264_parser.cc \
media/filters/skcanvas_video_renderer.cc \
+ media/filters/source_buffer_platform.cc \
media/filters/source_buffer_stream.cc \
media/filters/stream_parser_factory.cc \
media/filters/video_frame_scheduler_impl.cc \
@@ -195,6 +196,7 @@ LOCAL_SRC_FILES := \
media/base/browser_cdm.cc \
media/base/media_stub.cc \
media/filters/h264_to_annex_b_bitstream_converter.cc \
+ media/formats/mp2t/es_adapter_video.cc \
media/formats/mp2t/es_parser_adts.cc \
media/formats/mp2t/es_parser_h264.cc \
media/formats/mp2t/mp2t_stream_parser.cc \
@@ -244,7 +246,6 @@ MY_CFLAGS_Debug := \
-Wno-unused-but-set-variable \
-Os \
-g \
- -fomit-frame-pointer \
-fdata-sections \
-ffunction-sections \
-funwind-tables
@@ -265,6 +266,7 @@ MY_DEFS_Debug := \
'-DSYSTEM_NATIVELY_SIGNALS_MEMORY_PRESSURE' \
'-DENABLE_EGLIMAGE=1' \
'-DCLD_VERSION=1' \
+ '-DCLD_DATA_FROM_STATIC' \
'-DENABLE_PRINTING=1' \
'-DENABLE_MANAGED_USERS=1' \
'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
@@ -282,14 +284,10 @@ MY_DEFS_Debug := \
'-DSK_ATTR_DEPRECATED=SK_NOTHING_ARG1' \
'-DGR_GL_IGNORE_ES3_MSAA=0' \
'-DSK_WILL_NEVER_DRAW_PERSPECTIVE_TEXT' \
- '-DSK_SUPPORT_LEGACY_GETTOPDEVICE' \
- '-DSK_SUPPORT_LEGACY_BITMAP_CONFIG' \
- '-DSK_SUPPORT_LEGACY_DEVICE_VIRTUAL_ISOPAQUE' \
- '-DSK_SUPPORT_LEGACY_N32_NAME' \
- '-DSK_SUPPORT_LEGACY_SETCONFIG' \
+ '-DSK_SUPPORT_LEGACY_PICTURE_CLONE' \
+ '-DSK_SUPPORT_LEGACY_GETDEVICE' \
'-DSK_IGNORE_ETC1_SUPPORT' \
'-DSK_IGNORE_GPU_DITHER' \
- '-DSK_SUPPORT_LEGACY_GETTOTALCLIP' \
'-DSK_BUILD_FOR_ANDROID' \
'-DSK_USE_POSIX_THREADS' \
'-DSK_DEFERRED_CANVAS_USES_FACTORIES=1' \
@@ -346,6 +344,9 @@ LOCAL_CPPFLAGS_Debug := \
-fno-threadsafe-statics \
-fvisibility-inlines-hidden \
-Wsign-compare \
+ -std=gnu++11 \
+ -Wno-narrowing \
+ -Wno-literal-suffix \
-Wno-non-virtual-dtor \
-Wno-sign-promo
@@ -378,7 +379,6 @@ MY_CFLAGS_Release := \
-fno-ident \
-fdata-sections \
-ffunction-sections \
- -fomit-frame-pointer \
-funwind-tables
MY_DEFS_Release := \
@@ -397,6 +397,7 @@ MY_DEFS_Release := \
'-DSYSTEM_NATIVELY_SIGNALS_MEMORY_PRESSURE' \
'-DENABLE_EGLIMAGE=1' \
'-DCLD_VERSION=1' \
+ '-DCLD_DATA_FROM_STATIC' \
'-DENABLE_PRINTING=1' \
'-DENABLE_MANAGED_USERS=1' \
'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
@@ -414,14 +415,10 @@ MY_DEFS_Release := \
'-DSK_ATTR_DEPRECATED=SK_NOTHING_ARG1' \
'-DGR_GL_IGNORE_ES3_MSAA=0' \
'-DSK_WILL_NEVER_DRAW_PERSPECTIVE_TEXT' \
- '-DSK_SUPPORT_LEGACY_GETTOPDEVICE' \
- '-DSK_SUPPORT_LEGACY_BITMAP_CONFIG' \
- '-DSK_SUPPORT_LEGACY_DEVICE_VIRTUAL_ISOPAQUE' \
- '-DSK_SUPPORT_LEGACY_N32_NAME' \
- '-DSK_SUPPORT_LEGACY_SETCONFIG' \
+ '-DSK_SUPPORT_LEGACY_PICTURE_CLONE' \
+ '-DSK_SUPPORT_LEGACY_GETDEVICE' \
'-DSK_IGNORE_ETC1_SUPPORT' \
'-DSK_IGNORE_GPU_DITHER' \
- '-DSK_SUPPORT_LEGACY_GETTOTALCLIP' \
'-DSK_BUILD_FOR_ANDROID' \
'-DSK_USE_POSIX_THREADS' \
'-DSK_DEFERRED_CANVAS_USES_FACTORIES=1' \
@@ -479,6 +476,9 @@ LOCAL_CPPFLAGS_Release := \
-fno-threadsafe-statics \
-fvisibility-inlines-hidden \
-Wsign-compare \
+ -std=gnu++11 \
+ -Wno-narrowing \
+ -Wno-literal-suffix \
-Wno-non-virtual-dtor \
-Wno-sign-promo
diff --git a/media/media.target.linux-mips.mk b/media/media.target.linux-mips.mk
index bf12f63c5a..b1b7311c47 100644
--- a/media/media.target.linux-mips.mk
+++ b/media/media.target.linux-mips.mk
@@ -66,6 +66,7 @@ LOCAL_SRC_FILES := \
media/audio/virtual_audio_output_stream.cc \
media/base/android/demuxer_stream_player_params.cc \
media/base/android/media_resource_getter.cc \
+ media/base/audio_block_fifo.cc \
media/base/audio_buffer.cc \
media/base/audio_buffer_queue.cc \
media/base/audio_buffer_converter.cc \
@@ -87,7 +88,6 @@ LOCAL_SRC_FILES := \
media/base/byte_queue.cc \
media/base/cdm_promise.cc \
media/base/channel_mixer.cc \
- media/base/clock.cc \
media/base/data_buffer.cc \
media/base/data_source.cc \
media/base/decoder_buffer.cc \
@@ -120,6 +120,7 @@ LOCAL_SRC_FILES := \
media/base/text_ranges.cc \
media/base/text_renderer.cc \
media/base/text_track_config.cc \
+ media/base/time_delta_interpolator.cc \
media/base/user_input_monitor.cc \
media/base/video_decoder.cc \
media/base/video_decoder_config.cc \
@@ -144,12 +145,12 @@ LOCAL_SRC_FILES := \
media/filters/decrypting_video_decoder.cc \
media/filters/file_data_source.cc \
media/filters/frame_processor.cc \
- media/filters/frame_processor_base.cc \
media/filters/gpu_video_accelerator_factories.cc \
media/filters/gpu_video_decoder.cc \
media/filters/h264_bit_reader.cc \
media/filters/h264_parser.cc \
media/filters/skcanvas_video_renderer.cc \
+ media/filters/source_buffer_platform.cc \
media/filters/source_buffer_stream.cc \
media/filters/stream_parser_factory.cc \
media/filters/video_frame_scheduler_impl.cc \
@@ -195,6 +196,7 @@ LOCAL_SRC_FILES := \
media/base/browser_cdm.cc \
media/base/media_stub.cc \
media/filters/h264_to_annex_b_bitstream_converter.cc \
+ media/formats/mp2t/es_adapter_video.cc \
media/formats/mp2t/es_parser_adts.cc \
media/formats/mp2t/es_parser_h264.cc \
media/formats/mp2t/mp2t_stream_parser.cc \
@@ -248,9 +250,9 @@ MY_CFLAGS_Debug := \
-Wno-unused-but-set-variable \
-Os \
-g \
- -fomit-frame-pointer \
-fdata-sections \
-ffunction-sections \
+ -fomit-frame-pointer \
-funwind-tables
MY_DEFS_Debug := \
@@ -269,6 +271,7 @@ MY_DEFS_Debug := \
'-DSYSTEM_NATIVELY_SIGNALS_MEMORY_PRESSURE' \
'-DENABLE_EGLIMAGE=1' \
'-DCLD_VERSION=1' \
+ '-DCLD_DATA_FROM_STATIC' \
'-DENABLE_PRINTING=1' \
'-DENABLE_MANAGED_USERS=1' \
'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
@@ -286,14 +289,10 @@ MY_DEFS_Debug := \
'-DSK_ATTR_DEPRECATED=SK_NOTHING_ARG1' \
'-DGR_GL_IGNORE_ES3_MSAA=0' \
'-DSK_WILL_NEVER_DRAW_PERSPECTIVE_TEXT' \
- '-DSK_SUPPORT_LEGACY_GETTOPDEVICE' \
- '-DSK_SUPPORT_LEGACY_BITMAP_CONFIG' \
- '-DSK_SUPPORT_LEGACY_DEVICE_VIRTUAL_ISOPAQUE' \
- '-DSK_SUPPORT_LEGACY_N32_NAME' \
- '-DSK_SUPPORT_LEGACY_SETCONFIG' \
+ '-DSK_SUPPORT_LEGACY_PICTURE_CLONE' \
+ '-DSK_SUPPORT_LEGACY_GETDEVICE' \
'-DSK_IGNORE_ETC1_SUPPORT' \
'-DSK_IGNORE_GPU_DITHER' \
- '-DSK_SUPPORT_LEGACY_GETTOTALCLIP' \
'-DSK_BUILD_FOR_ANDROID' \
'-DSK_USE_POSIX_THREADS' \
'-DSK_DEFERRED_CANVAS_USES_FACTORIES=1' \
@@ -351,6 +350,9 @@ LOCAL_CPPFLAGS_Debug := \
-fvisibility-inlines-hidden \
-Wsign-compare \
-Wno-uninitialized \
+ -std=gnu++11 \
+ -Wno-narrowing \
+ -Wno-literal-suffix \
-Wno-non-virtual-dtor \
-Wno-sign-promo
@@ -406,6 +408,7 @@ MY_DEFS_Release := \
'-DSYSTEM_NATIVELY_SIGNALS_MEMORY_PRESSURE' \
'-DENABLE_EGLIMAGE=1' \
'-DCLD_VERSION=1' \
+ '-DCLD_DATA_FROM_STATIC' \
'-DENABLE_PRINTING=1' \
'-DENABLE_MANAGED_USERS=1' \
'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
@@ -423,14 +426,10 @@ MY_DEFS_Release := \
'-DSK_ATTR_DEPRECATED=SK_NOTHING_ARG1' \
'-DGR_GL_IGNORE_ES3_MSAA=0' \
'-DSK_WILL_NEVER_DRAW_PERSPECTIVE_TEXT' \
- '-DSK_SUPPORT_LEGACY_GETTOPDEVICE' \
- '-DSK_SUPPORT_LEGACY_BITMAP_CONFIG' \
- '-DSK_SUPPORT_LEGACY_DEVICE_VIRTUAL_ISOPAQUE' \
- '-DSK_SUPPORT_LEGACY_N32_NAME' \
- '-DSK_SUPPORT_LEGACY_SETCONFIG' \
+ '-DSK_SUPPORT_LEGACY_PICTURE_CLONE' \
+ '-DSK_SUPPORT_LEGACY_GETDEVICE' \
'-DSK_IGNORE_ETC1_SUPPORT' \
'-DSK_IGNORE_GPU_DITHER' \
- '-DSK_SUPPORT_LEGACY_GETTOTALCLIP' \
'-DSK_BUILD_FOR_ANDROID' \
'-DSK_USE_POSIX_THREADS' \
'-DSK_DEFERRED_CANVAS_USES_FACTORIES=1' \
@@ -489,6 +488,9 @@ LOCAL_CPPFLAGS_Release := \
-fvisibility-inlines-hidden \
-Wsign-compare \
-Wno-uninitialized \
+ -std=gnu++11 \
+ -Wno-narrowing \
+ -Wno-literal-suffix \
-Wno-non-virtual-dtor \
-Wno-sign-promo
diff --git a/media/media.target.linux-x86.mk b/media/media.target.linux-x86.mk
index 59091882bd..1560f2a310 100644
--- a/media/media.target.linux-x86.mk
+++ b/media/media.target.linux-x86.mk
@@ -66,6 +66,7 @@ LOCAL_SRC_FILES := \
media/audio/virtual_audio_output_stream.cc \
media/base/android/demuxer_stream_player_params.cc \
media/base/android/media_resource_getter.cc \
+ media/base/audio_block_fifo.cc \
media/base/audio_buffer.cc \
media/base/audio_buffer_queue.cc \
media/base/audio_buffer_converter.cc \
@@ -87,7 +88,6 @@ LOCAL_SRC_FILES := \
media/base/byte_queue.cc \
media/base/cdm_promise.cc \
media/base/channel_mixer.cc \
- media/base/clock.cc \
media/base/data_buffer.cc \
media/base/data_source.cc \
media/base/decoder_buffer.cc \
@@ -120,6 +120,7 @@ LOCAL_SRC_FILES := \
media/base/text_ranges.cc \
media/base/text_renderer.cc \
media/base/text_track_config.cc \
+ media/base/time_delta_interpolator.cc \
media/base/user_input_monitor.cc \
media/base/video_decoder.cc \
media/base/video_decoder_config.cc \
@@ -144,12 +145,12 @@ LOCAL_SRC_FILES := \
media/filters/decrypting_video_decoder.cc \
media/filters/file_data_source.cc \
media/filters/frame_processor.cc \
- media/filters/frame_processor_base.cc \
media/filters/gpu_video_accelerator_factories.cc \
media/filters/gpu_video_decoder.cc \
media/filters/h264_bit_reader.cc \
media/filters/h264_parser.cc \
media/filters/skcanvas_video_renderer.cc \
+ media/filters/source_buffer_platform.cc \
media/filters/source_buffer_stream.cc \
media/filters/stream_parser_factory.cc \
media/filters/video_frame_scheduler_impl.cc \
@@ -195,6 +196,7 @@ LOCAL_SRC_FILES := \
media/base/browser_cdm.cc \
media/base/media_stub.cc \
media/filters/h264_to_annex_b_bitstream_converter.cc \
+ media/formats/mp2t/es_adapter_video.cc \
media/formats/mp2t/es_parser_adts.cc \
media/formats/mp2t/es_parser_h264.cc \
media/formats/mp2t/mp2t_stream_parser.cc \
@@ -250,9 +252,9 @@ MY_CFLAGS_Debug := \
-fno-stack-protector \
-Os \
-g \
- -fomit-frame-pointer \
-fdata-sections \
-ffunction-sections \
+ -fomit-frame-pointer \
-funwind-tables
MY_DEFS_Debug := \
@@ -271,6 +273,7 @@ MY_DEFS_Debug := \
'-DSYSTEM_NATIVELY_SIGNALS_MEMORY_PRESSURE' \
'-DENABLE_EGLIMAGE=1' \
'-DCLD_VERSION=1' \
+ '-DCLD_DATA_FROM_STATIC' \
'-DENABLE_PRINTING=1' \
'-DENABLE_MANAGED_USERS=1' \
'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
@@ -288,14 +291,10 @@ MY_DEFS_Debug := \
'-DSK_ATTR_DEPRECATED=SK_NOTHING_ARG1' \
'-DGR_GL_IGNORE_ES3_MSAA=0' \
'-DSK_WILL_NEVER_DRAW_PERSPECTIVE_TEXT' \
- '-DSK_SUPPORT_LEGACY_GETTOPDEVICE' \
- '-DSK_SUPPORT_LEGACY_BITMAP_CONFIG' \
- '-DSK_SUPPORT_LEGACY_DEVICE_VIRTUAL_ISOPAQUE' \
- '-DSK_SUPPORT_LEGACY_N32_NAME' \
- '-DSK_SUPPORT_LEGACY_SETCONFIG' \
+ '-DSK_SUPPORT_LEGACY_PICTURE_CLONE' \
+ '-DSK_SUPPORT_LEGACY_GETDEVICE' \
'-DSK_IGNORE_ETC1_SUPPORT' \
'-DSK_IGNORE_GPU_DITHER' \
- '-DSK_SUPPORT_LEGACY_GETTOTALCLIP' \
'-DSK_BUILD_FOR_ANDROID' \
'-DSK_USE_POSIX_THREADS' \
'-DSK_DEFERRED_CANVAS_USES_FACTORIES=1' \
@@ -352,6 +351,9 @@ LOCAL_CPPFLAGS_Debug := \
-fno-threadsafe-statics \
-fvisibility-inlines-hidden \
-Wsign-compare \
+ -std=gnu++11 \
+ -Wno-narrowing \
+ -Wno-literal-suffix \
-Wno-non-virtual-dtor \
-Wno-sign-promo
@@ -408,6 +410,7 @@ MY_DEFS_Release := \
'-DSYSTEM_NATIVELY_SIGNALS_MEMORY_PRESSURE' \
'-DENABLE_EGLIMAGE=1' \
'-DCLD_VERSION=1' \
+ '-DCLD_DATA_FROM_STATIC' \
'-DENABLE_PRINTING=1' \
'-DENABLE_MANAGED_USERS=1' \
'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
@@ -425,14 +428,10 @@ MY_DEFS_Release := \
'-DSK_ATTR_DEPRECATED=SK_NOTHING_ARG1' \
'-DGR_GL_IGNORE_ES3_MSAA=0' \
'-DSK_WILL_NEVER_DRAW_PERSPECTIVE_TEXT' \
- '-DSK_SUPPORT_LEGACY_GETTOPDEVICE' \
- '-DSK_SUPPORT_LEGACY_BITMAP_CONFIG' \
- '-DSK_SUPPORT_LEGACY_DEVICE_VIRTUAL_ISOPAQUE' \
- '-DSK_SUPPORT_LEGACY_N32_NAME' \
- '-DSK_SUPPORT_LEGACY_SETCONFIG' \
+ '-DSK_SUPPORT_LEGACY_PICTURE_CLONE' \
+ '-DSK_SUPPORT_LEGACY_GETDEVICE' \
'-DSK_IGNORE_ETC1_SUPPORT' \
'-DSK_IGNORE_GPU_DITHER' \
- '-DSK_SUPPORT_LEGACY_GETTOTALCLIP' \
'-DSK_BUILD_FOR_ANDROID' \
'-DSK_USE_POSIX_THREADS' \
'-DSK_DEFERRED_CANVAS_USES_FACTORIES=1' \
@@ -490,6 +489,9 @@ LOCAL_CPPFLAGS_Release := \
-fno-threadsafe-statics \
-fvisibility-inlines-hidden \
-Wsign-compare \
+ -std=gnu++11 \
+ -Wno-narrowing \
+ -Wno-literal-suffix \
-Wno-non-virtual-dtor \
-Wno-sign-promo
diff --git a/media/media.target.linux-x86_64.mk b/media/media.target.linux-x86_64.mk
index 72d4605b07..c1a1a77746 100644
--- a/media/media.target.linux-x86_64.mk
+++ b/media/media.target.linux-x86_64.mk
@@ -66,6 +66,7 @@ LOCAL_SRC_FILES := \
media/audio/virtual_audio_output_stream.cc \
media/base/android/demuxer_stream_player_params.cc \
media/base/android/media_resource_getter.cc \
+ media/base/audio_block_fifo.cc \
media/base/audio_buffer.cc \
media/base/audio_buffer_queue.cc \
media/base/audio_buffer_converter.cc \
@@ -87,7 +88,6 @@ LOCAL_SRC_FILES := \
media/base/byte_queue.cc \
media/base/cdm_promise.cc \
media/base/channel_mixer.cc \
- media/base/clock.cc \
media/base/data_buffer.cc \
media/base/data_source.cc \
media/base/decoder_buffer.cc \
@@ -120,6 +120,7 @@ LOCAL_SRC_FILES := \
media/base/text_ranges.cc \
media/base/text_renderer.cc \
media/base/text_track_config.cc \
+ media/base/time_delta_interpolator.cc \
media/base/user_input_monitor.cc \
media/base/video_decoder.cc \
media/base/video_decoder_config.cc \
@@ -144,12 +145,12 @@ LOCAL_SRC_FILES := \
media/filters/decrypting_video_decoder.cc \
media/filters/file_data_source.cc \
media/filters/frame_processor.cc \
- media/filters/frame_processor_base.cc \
media/filters/gpu_video_accelerator_factories.cc \
media/filters/gpu_video_decoder.cc \
media/filters/h264_bit_reader.cc \
media/filters/h264_parser.cc \
media/filters/skcanvas_video_renderer.cc \
+ media/filters/source_buffer_platform.cc \
media/filters/source_buffer_stream.cc \
media/filters/stream_parser_factory.cc \
media/filters/video_frame_scheduler_impl.cc \
@@ -195,6 +196,7 @@ LOCAL_SRC_FILES := \
media/base/browser_cdm.cc \
media/base/media_stub.cc \
media/filters/h264_to_annex_b_bitstream_converter.cc \
+ media/formats/mp2t/es_adapter_video.cc \
media/formats/mp2t/es_parser_adts.cc \
media/formats/mp2t/es_parser_h264.cc \
media/formats/mp2t/mp2t_stream_parser.cc \
@@ -249,9 +251,9 @@ MY_CFLAGS_Debug := \
-Wno-unused-but-set-variable \
-Os \
-g \
- -fomit-frame-pointer \
-fdata-sections \
-ffunction-sections \
+ -fomit-frame-pointer \
-funwind-tables
MY_DEFS_Debug := \
@@ -270,6 +272,7 @@ MY_DEFS_Debug := \
'-DSYSTEM_NATIVELY_SIGNALS_MEMORY_PRESSURE' \
'-DENABLE_EGLIMAGE=1' \
'-DCLD_VERSION=1' \
+ '-DCLD_DATA_FROM_STATIC' \
'-DENABLE_PRINTING=1' \
'-DENABLE_MANAGED_USERS=1' \
'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
@@ -287,14 +290,10 @@ MY_DEFS_Debug := \
'-DSK_ATTR_DEPRECATED=SK_NOTHING_ARG1' \
'-DGR_GL_IGNORE_ES3_MSAA=0' \
'-DSK_WILL_NEVER_DRAW_PERSPECTIVE_TEXT' \
- '-DSK_SUPPORT_LEGACY_GETTOPDEVICE' \
- '-DSK_SUPPORT_LEGACY_BITMAP_CONFIG' \
- '-DSK_SUPPORT_LEGACY_DEVICE_VIRTUAL_ISOPAQUE' \
- '-DSK_SUPPORT_LEGACY_N32_NAME' \
- '-DSK_SUPPORT_LEGACY_SETCONFIG' \
+ '-DSK_SUPPORT_LEGACY_PICTURE_CLONE' \
+ '-DSK_SUPPORT_LEGACY_GETDEVICE' \
'-DSK_IGNORE_ETC1_SUPPORT' \
'-DSK_IGNORE_GPU_DITHER' \
- '-DSK_SUPPORT_LEGACY_GETTOTALCLIP' \
'-DSK_BUILD_FOR_ANDROID' \
'-DSK_USE_POSIX_THREADS' \
'-DSK_DEFERRED_CANVAS_USES_FACTORIES=1' \
@@ -351,6 +350,9 @@ LOCAL_CPPFLAGS_Debug := \
-fno-threadsafe-statics \
-fvisibility-inlines-hidden \
-Wsign-compare \
+ -std=gnu++11 \
+ -Wno-narrowing \
+ -Wno-literal-suffix \
-Wno-non-virtual-dtor \
-Wno-sign-promo
@@ -406,6 +408,7 @@ MY_DEFS_Release := \
'-DSYSTEM_NATIVELY_SIGNALS_MEMORY_PRESSURE' \
'-DENABLE_EGLIMAGE=1' \
'-DCLD_VERSION=1' \
+ '-DCLD_DATA_FROM_STATIC' \
'-DENABLE_PRINTING=1' \
'-DENABLE_MANAGED_USERS=1' \
'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
@@ -423,14 +426,10 @@ MY_DEFS_Release := \
'-DSK_ATTR_DEPRECATED=SK_NOTHING_ARG1' \
'-DGR_GL_IGNORE_ES3_MSAA=0' \
'-DSK_WILL_NEVER_DRAW_PERSPECTIVE_TEXT' \
- '-DSK_SUPPORT_LEGACY_GETTOPDEVICE' \
- '-DSK_SUPPORT_LEGACY_BITMAP_CONFIG' \
- '-DSK_SUPPORT_LEGACY_DEVICE_VIRTUAL_ISOPAQUE' \
- '-DSK_SUPPORT_LEGACY_N32_NAME' \
- '-DSK_SUPPORT_LEGACY_SETCONFIG' \
+ '-DSK_SUPPORT_LEGACY_PICTURE_CLONE' \
+ '-DSK_SUPPORT_LEGACY_GETDEVICE' \
'-DSK_IGNORE_ETC1_SUPPORT' \
'-DSK_IGNORE_GPU_DITHER' \
- '-DSK_SUPPORT_LEGACY_GETTOTALCLIP' \
'-DSK_BUILD_FOR_ANDROID' \
'-DSK_USE_POSIX_THREADS' \
'-DSK_DEFERRED_CANVAS_USES_FACTORIES=1' \
@@ -488,6 +487,9 @@ LOCAL_CPPFLAGS_Release := \
-fno-threadsafe-statics \
-fvisibility-inlines-hidden \
-Wsign-compare \
+ -std=gnu++11 \
+ -Wno-narrowing \
+ -Wno-literal-suffix \
-Wno-non-virtual-dtor \
-Wno-sign-promo
diff --git a/media/media_android_imageformat_list.target.darwin-arm.mk b/media/media_android_imageformat_list.target.darwin-arm.mk
index 104d9248cb..f305403bbe 100644
--- a/media/media_android_imageformat_list.target.darwin-arm.mk
+++ b/media/media_android_imageformat_list.target.darwin-arm.mk
@@ -76,9 +76,9 @@ MY_CFLAGS_Debug := \
-Wno-unused-but-set-variable \
-Os \
-g \
- -fomit-frame-pointer \
-fdata-sections \
-ffunction-sections \
+ -fomit-frame-pointer \
-funwind-tables
MY_DEFS_Debug := \
@@ -97,6 +97,7 @@ MY_DEFS_Debug := \
'-DSYSTEM_NATIVELY_SIGNALS_MEMORY_PRESSURE' \
'-DENABLE_EGLIMAGE=1' \
'-DCLD_VERSION=1' \
+ '-DCLD_DATA_FROM_STATIC' \
'-DENABLE_PRINTING=1' \
'-DENABLE_MANAGED_USERS=1' \
'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
@@ -134,6 +135,9 @@ LOCAL_CPPFLAGS_Debug := \
-fvisibility-inlines-hidden \
-Wsign-compare \
-Wno-abi \
+ -std=gnu++11 \
+ -Wno-narrowing \
+ -Wno-literal-suffix \
-Wno-non-virtual-dtor \
-Wno-sign-promo
@@ -195,6 +199,7 @@ MY_DEFS_Release := \
'-DSYSTEM_NATIVELY_SIGNALS_MEMORY_PRESSURE' \
'-DENABLE_EGLIMAGE=1' \
'-DCLD_VERSION=1' \
+ '-DCLD_DATA_FROM_STATIC' \
'-DENABLE_PRINTING=1' \
'-DENABLE_MANAGED_USERS=1' \
'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
@@ -233,6 +238,9 @@ LOCAL_CPPFLAGS_Release := \
-fvisibility-inlines-hidden \
-Wsign-compare \
-Wno-abi \
+ -std=gnu++11 \
+ -Wno-narrowing \
+ -Wno-literal-suffix \
-Wno-non-virtual-dtor \
-Wno-sign-promo
diff --git a/media/media_android_imageformat_list.target.darwin-arm64.mk b/media/media_android_imageformat_list.target.darwin-arm64.mk
index c0d86f76c9..38eaeb6a96 100644
--- a/media/media_android_imageformat_list.target.darwin-arm64.mk
+++ b/media/media_android_imageformat_list.target.darwin-arm64.mk
@@ -66,7 +66,6 @@ MY_CFLAGS_Debug := \
-Wno-unused-but-set-variable \
-Os \
-g \
- -fomit-frame-pointer \
-fdata-sections \
-ffunction-sections \
-funwind-tables
@@ -87,6 +86,7 @@ MY_DEFS_Debug := \
'-DSYSTEM_NATIVELY_SIGNALS_MEMORY_PRESSURE' \
'-DENABLE_EGLIMAGE=1' \
'-DCLD_VERSION=1' \
+ '-DCLD_DATA_FROM_STATIC' \
'-DENABLE_PRINTING=1' \
'-DENABLE_MANAGED_USERS=1' \
'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
@@ -123,6 +123,9 @@ LOCAL_CPPFLAGS_Debug := \
-fno-threadsafe-statics \
-fvisibility-inlines-hidden \
-Wsign-compare \
+ -std=gnu++11 \
+ -Wno-narrowing \
+ -Wno-literal-suffix \
-Wno-non-virtual-dtor \
-Wno-sign-promo
@@ -155,7 +158,6 @@ MY_CFLAGS_Release := \
-fno-ident \
-fdata-sections \
-ffunction-sections \
- -fomit-frame-pointer \
-funwind-tables
MY_DEFS_Release := \
@@ -174,6 +176,7 @@ MY_DEFS_Release := \
'-DSYSTEM_NATIVELY_SIGNALS_MEMORY_PRESSURE' \
'-DENABLE_EGLIMAGE=1' \
'-DCLD_VERSION=1' \
+ '-DCLD_DATA_FROM_STATIC' \
'-DENABLE_PRINTING=1' \
'-DENABLE_MANAGED_USERS=1' \
'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
@@ -211,6 +214,9 @@ LOCAL_CPPFLAGS_Release := \
-fno-threadsafe-statics \
-fvisibility-inlines-hidden \
-Wsign-compare \
+ -std=gnu++11 \
+ -Wno-narrowing \
+ -Wno-literal-suffix \
-Wno-non-virtual-dtor \
-Wno-sign-promo
diff --git a/media/media_android_imageformat_list.target.darwin-mips.mk b/media/media_android_imageformat_list.target.darwin-mips.mk
index 997819d550..abce12f135 100644
--- a/media/media_android_imageformat_list.target.darwin-mips.mk
+++ b/media/media_android_imageformat_list.target.darwin-mips.mk
@@ -70,9 +70,9 @@ MY_CFLAGS_Debug := \
-Wno-unused-but-set-variable \
-Os \
-g \
- -fomit-frame-pointer \
-fdata-sections \
-ffunction-sections \
+ -fomit-frame-pointer \
-funwind-tables
MY_DEFS_Debug := \
@@ -91,6 +91,7 @@ MY_DEFS_Debug := \
'-DSYSTEM_NATIVELY_SIGNALS_MEMORY_PRESSURE' \
'-DENABLE_EGLIMAGE=1' \
'-DCLD_VERSION=1' \
+ '-DCLD_DATA_FROM_STATIC' \
'-DENABLE_PRINTING=1' \
'-DENABLE_MANAGED_USERS=1' \
'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
@@ -128,6 +129,9 @@ LOCAL_CPPFLAGS_Debug := \
-fvisibility-inlines-hidden \
-Wsign-compare \
-Wno-uninitialized \
+ -std=gnu++11 \
+ -Wno-narrowing \
+ -Wno-literal-suffix \
-Wno-non-virtual-dtor \
-Wno-sign-promo
@@ -183,6 +187,7 @@ MY_DEFS_Release := \
'-DSYSTEM_NATIVELY_SIGNALS_MEMORY_PRESSURE' \
'-DENABLE_EGLIMAGE=1' \
'-DCLD_VERSION=1' \
+ '-DCLD_DATA_FROM_STATIC' \
'-DENABLE_PRINTING=1' \
'-DENABLE_MANAGED_USERS=1' \
'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
@@ -221,6 +226,9 @@ LOCAL_CPPFLAGS_Release := \
-fvisibility-inlines-hidden \
-Wsign-compare \
-Wno-uninitialized \
+ -std=gnu++11 \
+ -Wno-narrowing \
+ -Wno-literal-suffix \
-Wno-non-virtual-dtor \
-Wno-sign-promo
diff --git a/media/media_android_imageformat_list.target.darwin-x86.mk b/media/media_android_imageformat_list.target.darwin-x86.mk
index 8ab00637e3..12fc31ad08 100644
--- a/media/media_android_imageformat_list.target.darwin-x86.mk
+++ b/media/media_android_imageformat_list.target.darwin-x86.mk
@@ -71,9 +71,9 @@ MY_CFLAGS_Debug := \
-fno-stack-protector \
-Os \
-g \
- -fomit-frame-pointer \
-fdata-sections \
-ffunction-sections \
+ -fomit-frame-pointer \
-funwind-tables
MY_DEFS_Debug := \
@@ -92,6 +92,7 @@ MY_DEFS_Debug := \
'-DSYSTEM_NATIVELY_SIGNALS_MEMORY_PRESSURE' \
'-DENABLE_EGLIMAGE=1' \
'-DCLD_VERSION=1' \
+ '-DCLD_DATA_FROM_STATIC' \
'-DENABLE_PRINTING=1' \
'-DENABLE_MANAGED_USERS=1' \
'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
@@ -128,6 +129,9 @@ LOCAL_CPPFLAGS_Debug := \
-fno-threadsafe-statics \
-fvisibility-inlines-hidden \
-Wsign-compare \
+ -std=gnu++11 \
+ -Wno-narrowing \
+ -Wno-literal-suffix \
-Wno-non-virtual-dtor \
-Wno-sign-promo
@@ -184,6 +188,7 @@ MY_DEFS_Release := \
'-DSYSTEM_NATIVELY_SIGNALS_MEMORY_PRESSURE' \
'-DENABLE_EGLIMAGE=1' \
'-DCLD_VERSION=1' \
+ '-DCLD_DATA_FROM_STATIC' \
'-DENABLE_PRINTING=1' \
'-DENABLE_MANAGED_USERS=1' \
'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
@@ -221,6 +226,9 @@ LOCAL_CPPFLAGS_Release := \
-fno-threadsafe-statics \
-fvisibility-inlines-hidden \
-Wsign-compare \
+ -std=gnu++11 \
+ -Wno-narrowing \
+ -Wno-literal-suffix \
-Wno-non-virtual-dtor \
-Wno-sign-promo
diff --git a/media/media_android_imageformat_list.target.darwin-x86_64.mk b/media/media_android_imageformat_list.target.darwin-x86_64.mk
index d6b7da2689..ffd42b32fb 100644
--- a/media/media_android_imageformat_list.target.darwin-x86_64.mk
+++ b/media/media_android_imageformat_list.target.darwin-x86_64.mk
@@ -70,9 +70,9 @@ MY_CFLAGS_Debug := \
-Wno-unused-but-set-variable \
-Os \
-g \
- -fomit-frame-pointer \
-fdata-sections \
-ffunction-sections \
+ -fomit-frame-pointer \
-funwind-tables
MY_DEFS_Debug := \
@@ -91,6 +91,7 @@ MY_DEFS_Debug := \
'-DSYSTEM_NATIVELY_SIGNALS_MEMORY_PRESSURE' \
'-DENABLE_EGLIMAGE=1' \
'-DCLD_VERSION=1' \
+ '-DCLD_DATA_FROM_STATIC' \
'-DENABLE_PRINTING=1' \
'-DENABLE_MANAGED_USERS=1' \
'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
@@ -127,6 +128,9 @@ LOCAL_CPPFLAGS_Debug := \
-fno-threadsafe-statics \
-fvisibility-inlines-hidden \
-Wsign-compare \
+ -std=gnu++11 \
+ -Wno-narrowing \
+ -Wno-literal-suffix \
-Wno-non-virtual-dtor \
-Wno-sign-promo
@@ -182,6 +186,7 @@ MY_DEFS_Release := \
'-DSYSTEM_NATIVELY_SIGNALS_MEMORY_PRESSURE' \
'-DENABLE_EGLIMAGE=1' \
'-DCLD_VERSION=1' \
+ '-DCLD_DATA_FROM_STATIC' \
'-DENABLE_PRINTING=1' \
'-DENABLE_MANAGED_USERS=1' \
'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
@@ -219,6 +224,9 @@ LOCAL_CPPFLAGS_Release := \
-fno-threadsafe-statics \
-fvisibility-inlines-hidden \
-Wsign-compare \
+ -std=gnu++11 \
+ -Wno-narrowing \
+ -Wno-literal-suffix \
-Wno-non-virtual-dtor \
-Wno-sign-promo
diff --git a/media/media_android_imageformat_list.target.linux-arm.mk b/media/media_android_imageformat_list.target.linux-arm.mk
index 104d9248cb..f305403bbe 100644
--- a/media/media_android_imageformat_list.target.linux-arm.mk
+++ b/media/media_android_imageformat_list.target.linux-arm.mk
@@ -76,9 +76,9 @@ MY_CFLAGS_Debug := \
-Wno-unused-but-set-variable \
-Os \
-g \
- -fomit-frame-pointer \
-fdata-sections \
-ffunction-sections \
+ -fomit-frame-pointer \
-funwind-tables
MY_DEFS_Debug := \
@@ -97,6 +97,7 @@ MY_DEFS_Debug := \
'-DSYSTEM_NATIVELY_SIGNALS_MEMORY_PRESSURE' \
'-DENABLE_EGLIMAGE=1' \
'-DCLD_VERSION=1' \
+ '-DCLD_DATA_FROM_STATIC' \
'-DENABLE_PRINTING=1' \
'-DENABLE_MANAGED_USERS=1' \
'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
@@ -134,6 +135,9 @@ LOCAL_CPPFLAGS_Debug := \
-fvisibility-inlines-hidden \
-Wsign-compare \
-Wno-abi \
+ -std=gnu++11 \
+ -Wno-narrowing \
+ -Wno-literal-suffix \
-Wno-non-virtual-dtor \
-Wno-sign-promo
@@ -195,6 +199,7 @@ MY_DEFS_Release := \
'-DSYSTEM_NATIVELY_SIGNALS_MEMORY_PRESSURE' \
'-DENABLE_EGLIMAGE=1' \
'-DCLD_VERSION=1' \
+ '-DCLD_DATA_FROM_STATIC' \
'-DENABLE_PRINTING=1' \
'-DENABLE_MANAGED_USERS=1' \
'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
@@ -233,6 +238,9 @@ LOCAL_CPPFLAGS_Release := \
-fvisibility-inlines-hidden \
-Wsign-compare \
-Wno-abi \
+ -std=gnu++11 \
+ -Wno-narrowing \
+ -Wno-literal-suffix \
-Wno-non-virtual-dtor \
-Wno-sign-promo
diff --git a/media/media_android_imageformat_list.target.linux-arm64.mk b/media/media_android_imageformat_list.target.linux-arm64.mk
index c0d86f76c9..38eaeb6a96 100644
--- a/media/media_android_imageformat_list.target.linux-arm64.mk
+++ b/media/media_android_imageformat_list.target.linux-arm64.mk
@@ -66,7 +66,6 @@ MY_CFLAGS_Debug := \
-Wno-unused-but-set-variable \
-Os \
-g \
- -fomit-frame-pointer \
-fdata-sections \
-ffunction-sections \
-funwind-tables
@@ -87,6 +86,7 @@ MY_DEFS_Debug := \
'-DSYSTEM_NATIVELY_SIGNALS_MEMORY_PRESSURE' \
'-DENABLE_EGLIMAGE=1' \
'-DCLD_VERSION=1' \
+ '-DCLD_DATA_FROM_STATIC' \
'-DENABLE_PRINTING=1' \
'-DENABLE_MANAGED_USERS=1' \
'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
@@ -123,6 +123,9 @@ LOCAL_CPPFLAGS_Debug := \
-fno-threadsafe-statics \
-fvisibility-inlines-hidden \
-Wsign-compare \
+ -std=gnu++11 \
+ -Wno-narrowing \
+ -Wno-literal-suffix \
-Wno-non-virtual-dtor \
-Wno-sign-promo
@@ -155,7 +158,6 @@ MY_CFLAGS_Release := \
-fno-ident \
-fdata-sections \
-ffunction-sections \
- -fomit-frame-pointer \
-funwind-tables
MY_DEFS_Release := \
@@ -174,6 +176,7 @@ MY_DEFS_Release := \
'-DSYSTEM_NATIVELY_SIGNALS_MEMORY_PRESSURE' \
'-DENABLE_EGLIMAGE=1' \
'-DCLD_VERSION=1' \
+ '-DCLD_DATA_FROM_STATIC' \
'-DENABLE_PRINTING=1' \
'-DENABLE_MANAGED_USERS=1' \
'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
@@ -211,6 +214,9 @@ LOCAL_CPPFLAGS_Release := \
-fno-threadsafe-statics \
-fvisibility-inlines-hidden \
-Wsign-compare \
+ -std=gnu++11 \
+ -Wno-narrowing \
+ -Wno-literal-suffix \
-Wno-non-virtual-dtor \
-Wno-sign-promo
diff --git a/media/media_android_imageformat_list.target.linux-mips.mk b/media/media_android_imageformat_list.target.linux-mips.mk
index 997819d550..abce12f135 100644
--- a/media/media_android_imageformat_list.target.linux-mips.mk
+++ b/media/media_android_imageformat_list.target.linux-mips.mk
@@ -70,9 +70,9 @@ MY_CFLAGS_Debug := \
-Wno-unused-but-set-variable \
-Os \
-g \
- -fomit-frame-pointer \
-fdata-sections \
-ffunction-sections \
+ -fomit-frame-pointer \
-funwind-tables
MY_DEFS_Debug := \
@@ -91,6 +91,7 @@ MY_DEFS_Debug := \
'-DSYSTEM_NATIVELY_SIGNALS_MEMORY_PRESSURE' \
'-DENABLE_EGLIMAGE=1' \
'-DCLD_VERSION=1' \
+ '-DCLD_DATA_FROM_STATIC' \
'-DENABLE_PRINTING=1' \
'-DENABLE_MANAGED_USERS=1' \
'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
@@ -128,6 +129,9 @@ LOCAL_CPPFLAGS_Debug := \
-fvisibility-inlines-hidden \
-Wsign-compare \
-Wno-uninitialized \
+ -std=gnu++11 \
+ -Wno-narrowing \
+ -Wno-literal-suffix \
-Wno-non-virtual-dtor \
-Wno-sign-promo
@@ -183,6 +187,7 @@ MY_DEFS_Release := \
'-DSYSTEM_NATIVELY_SIGNALS_MEMORY_PRESSURE' \
'-DENABLE_EGLIMAGE=1' \
'-DCLD_VERSION=1' \
+ '-DCLD_DATA_FROM_STATIC' \
'-DENABLE_PRINTING=1' \
'-DENABLE_MANAGED_USERS=1' \
'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
@@ -221,6 +226,9 @@ LOCAL_CPPFLAGS_Release := \
-fvisibility-inlines-hidden \
-Wsign-compare \
-Wno-uninitialized \
+ -std=gnu++11 \
+ -Wno-narrowing \
+ -Wno-literal-suffix \
-Wno-non-virtual-dtor \
-Wno-sign-promo
diff --git a/media/media_android_imageformat_list.target.linux-x86.mk b/media/media_android_imageformat_list.target.linux-x86.mk
index 8ab00637e3..12fc31ad08 100644
--- a/media/media_android_imageformat_list.target.linux-x86.mk
+++ b/media/media_android_imageformat_list.target.linux-x86.mk
@@ -71,9 +71,9 @@ MY_CFLAGS_Debug := \
-fno-stack-protector \
-Os \
-g \
- -fomit-frame-pointer \
-fdata-sections \
-ffunction-sections \
+ -fomit-frame-pointer \
-funwind-tables
MY_DEFS_Debug := \
@@ -92,6 +92,7 @@ MY_DEFS_Debug := \
'-DSYSTEM_NATIVELY_SIGNALS_MEMORY_PRESSURE' \
'-DENABLE_EGLIMAGE=1' \
'-DCLD_VERSION=1' \
+ '-DCLD_DATA_FROM_STATIC' \
'-DENABLE_PRINTING=1' \
'-DENABLE_MANAGED_USERS=1' \
'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
@@ -128,6 +129,9 @@ LOCAL_CPPFLAGS_Debug := \
-fno-threadsafe-statics \
-fvisibility-inlines-hidden \
-Wsign-compare \
+ -std=gnu++11 \
+ -Wno-narrowing \
+ -Wno-literal-suffix \
-Wno-non-virtual-dtor \
-Wno-sign-promo
@@ -184,6 +188,7 @@ MY_DEFS_Release := \
'-DSYSTEM_NATIVELY_SIGNALS_MEMORY_PRESSURE' \
'-DENABLE_EGLIMAGE=1' \
'-DCLD_VERSION=1' \
+ '-DCLD_DATA_FROM_STATIC' \
'-DENABLE_PRINTING=1' \
'-DENABLE_MANAGED_USERS=1' \
'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
@@ -221,6 +226,9 @@ LOCAL_CPPFLAGS_Release := \
-fno-threadsafe-statics \
-fvisibility-inlines-hidden \
-Wsign-compare \
+ -std=gnu++11 \
+ -Wno-narrowing \
+ -Wno-literal-suffix \
-Wno-non-virtual-dtor \
-Wno-sign-promo
diff --git a/media/media_android_imageformat_list.target.linux-x86_64.mk b/media/media_android_imageformat_list.target.linux-x86_64.mk
index d6b7da2689..ffd42b32fb 100644
--- a/media/media_android_imageformat_list.target.linux-x86_64.mk
+++ b/media/media_android_imageformat_list.target.linux-x86_64.mk
@@ -70,9 +70,9 @@ MY_CFLAGS_Debug := \
-Wno-unused-but-set-variable \
-Os \
-g \
- -fomit-frame-pointer \
-fdata-sections \
-ffunction-sections \
+ -fomit-frame-pointer \
-funwind-tables
MY_DEFS_Debug := \
@@ -91,6 +91,7 @@ MY_DEFS_Debug := \
'-DSYSTEM_NATIVELY_SIGNALS_MEMORY_PRESSURE' \
'-DENABLE_EGLIMAGE=1' \
'-DCLD_VERSION=1' \
+ '-DCLD_DATA_FROM_STATIC' \
'-DENABLE_PRINTING=1' \
'-DENABLE_MANAGED_USERS=1' \
'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
@@ -127,6 +128,9 @@ LOCAL_CPPFLAGS_Debug := \
-fno-threadsafe-statics \
-fvisibility-inlines-hidden \
-Wsign-compare \
+ -std=gnu++11 \
+ -Wno-narrowing \
+ -Wno-literal-suffix \
-Wno-non-virtual-dtor \
-Wno-sign-promo
@@ -182,6 +186,7 @@ MY_DEFS_Release := \
'-DSYSTEM_NATIVELY_SIGNALS_MEMORY_PRESSURE' \
'-DENABLE_EGLIMAGE=1' \
'-DCLD_VERSION=1' \
+ '-DCLD_DATA_FROM_STATIC' \
'-DENABLE_PRINTING=1' \
'-DENABLE_MANAGED_USERS=1' \
'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
@@ -219,6 +224,9 @@ LOCAL_CPPFLAGS_Release := \
-fno-threadsafe-statics \
-fvisibility-inlines-hidden \
-Wsign-compare \
+ -std=gnu++11 \
+ -Wno-narrowing \
+ -Wno-literal-suffix \
-Wno-non-virtual-dtor \
-Wno-sign-promo
diff --git a/media/media_android_jni_headers.target.darwin-arm.mk b/media/media_android_jni_headers.target.darwin-arm.mk
index 8872809e76..1aff91040f 100644
--- a/media/media_android_jni_headers.target.darwin-arm.mk
+++ b/media/media_android_jni_headers.target.darwin-arm.mk
@@ -165,9 +165,9 @@ MY_CFLAGS_Debug := \
-Wno-unused-but-set-variable \
-Os \
-g \
- -fomit-frame-pointer \
-fdata-sections \
-ffunction-sections \
+ -fomit-frame-pointer \
-funwind-tables
MY_DEFS_Debug := \
@@ -186,6 +186,7 @@ MY_DEFS_Debug := \
'-DSYSTEM_NATIVELY_SIGNALS_MEMORY_PRESSURE' \
'-DENABLE_EGLIMAGE=1' \
'-DCLD_VERSION=1' \
+ '-DCLD_DATA_FROM_STATIC' \
'-DENABLE_PRINTING=1' \
'-DENABLE_MANAGED_USERS=1' \
'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
@@ -223,6 +224,9 @@ LOCAL_CPPFLAGS_Debug := \
-fvisibility-inlines-hidden \
-Wsign-compare \
-Wno-abi \
+ -std=gnu++11 \
+ -Wno-narrowing \
+ -Wno-literal-suffix \
-Wno-non-virtual-dtor \
-Wno-sign-promo
@@ -284,6 +288,7 @@ MY_DEFS_Release := \
'-DSYSTEM_NATIVELY_SIGNALS_MEMORY_PRESSURE' \
'-DENABLE_EGLIMAGE=1' \
'-DCLD_VERSION=1' \
+ '-DCLD_DATA_FROM_STATIC' \
'-DENABLE_PRINTING=1' \
'-DENABLE_MANAGED_USERS=1' \
'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
@@ -322,6 +327,9 @@ LOCAL_CPPFLAGS_Release := \
-fvisibility-inlines-hidden \
-Wsign-compare \
-Wno-abi \
+ -std=gnu++11 \
+ -Wno-narrowing \
+ -Wno-literal-suffix \
-Wno-non-virtual-dtor \
-Wno-sign-promo
diff --git a/media/media_android_jni_headers.target.darwin-arm64.mk b/media/media_android_jni_headers.target.darwin-arm64.mk
index 1e9c4f0520..8cf0cbb1be 100644
--- a/media/media_android_jni_headers.target.darwin-arm64.mk
+++ b/media/media_android_jni_headers.target.darwin-arm64.mk
@@ -155,7 +155,6 @@ MY_CFLAGS_Debug := \
-Wno-unused-but-set-variable \
-Os \
-g \
- -fomit-frame-pointer \
-fdata-sections \
-ffunction-sections \
-funwind-tables
@@ -176,6 +175,7 @@ MY_DEFS_Debug := \
'-DSYSTEM_NATIVELY_SIGNALS_MEMORY_PRESSURE' \
'-DENABLE_EGLIMAGE=1' \
'-DCLD_VERSION=1' \
+ '-DCLD_DATA_FROM_STATIC' \
'-DENABLE_PRINTING=1' \
'-DENABLE_MANAGED_USERS=1' \
'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
@@ -212,6 +212,9 @@ LOCAL_CPPFLAGS_Debug := \
-fno-threadsafe-statics \
-fvisibility-inlines-hidden \
-Wsign-compare \
+ -std=gnu++11 \
+ -Wno-narrowing \
+ -Wno-literal-suffix \
-Wno-non-virtual-dtor \
-Wno-sign-promo
@@ -244,7 +247,6 @@ MY_CFLAGS_Release := \
-fno-ident \
-fdata-sections \
-ffunction-sections \
- -fomit-frame-pointer \
-funwind-tables
MY_DEFS_Release := \
@@ -263,6 +265,7 @@ MY_DEFS_Release := \
'-DSYSTEM_NATIVELY_SIGNALS_MEMORY_PRESSURE' \
'-DENABLE_EGLIMAGE=1' \
'-DCLD_VERSION=1' \
+ '-DCLD_DATA_FROM_STATIC' \
'-DENABLE_PRINTING=1' \
'-DENABLE_MANAGED_USERS=1' \
'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
@@ -300,6 +303,9 @@ LOCAL_CPPFLAGS_Release := \
-fno-threadsafe-statics \
-fvisibility-inlines-hidden \
-Wsign-compare \
+ -std=gnu++11 \
+ -Wno-narrowing \
+ -Wno-literal-suffix \
-Wno-non-virtual-dtor \
-Wno-sign-promo
diff --git a/media/media_android_jni_headers.target.darwin-mips.mk b/media/media_android_jni_headers.target.darwin-mips.mk
index 90d6e51bf8..e18a93c746 100644
--- a/media/media_android_jni_headers.target.darwin-mips.mk
+++ b/media/media_android_jni_headers.target.darwin-mips.mk
@@ -159,9 +159,9 @@ MY_CFLAGS_Debug := \
-Wno-unused-but-set-variable \
-Os \
-g \
- -fomit-frame-pointer \
-fdata-sections \
-ffunction-sections \
+ -fomit-frame-pointer \
-funwind-tables
MY_DEFS_Debug := \
@@ -180,6 +180,7 @@ MY_DEFS_Debug := \
'-DSYSTEM_NATIVELY_SIGNALS_MEMORY_PRESSURE' \
'-DENABLE_EGLIMAGE=1' \
'-DCLD_VERSION=1' \
+ '-DCLD_DATA_FROM_STATIC' \
'-DENABLE_PRINTING=1' \
'-DENABLE_MANAGED_USERS=1' \
'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
@@ -217,6 +218,9 @@ LOCAL_CPPFLAGS_Debug := \
-fvisibility-inlines-hidden \
-Wsign-compare \
-Wno-uninitialized \
+ -std=gnu++11 \
+ -Wno-narrowing \
+ -Wno-literal-suffix \
-Wno-non-virtual-dtor \
-Wno-sign-promo
@@ -272,6 +276,7 @@ MY_DEFS_Release := \
'-DSYSTEM_NATIVELY_SIGNALS_MEMORY_PRESSURE' \
'-DENABLE_EGLIMAGE=1' \
'-DCLD_VERSION=1' \
+ '-DCLD_DATA_FROM_STATIC' \
'-DENABLE_PRINTING=1' \
'-DENABLE_MANAGED_USERS=1' \
'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
@@ -310,6 +315,9 @@ LOCAL_CPPFLAGS_Release := \
-fvisibility-inlines-hidden \
-Wsign-compare \
-Wno-uninitialized \
+ -std=gnu++11 \
+ -Wno-narrowing \
+ -Wno-literal-suffix \
-Wno-non-virtual-dtor \
-Wno-sign-promo
diff --git a/media/media_android_jni_headers.target.darwin-x86.mk b/media/media_android_jni_headers.target.darwin-x86.mk
index e130b129e3..5238f652ba 100644
--- a/media/media_android_jni_headers.target.darwin-x86.mk
+++ b/media/media_android_jni_headers.target.darwin-x86.mk
@@ -160,9 +160,9 @@ MY_CFLAGS_Debug := \
-fno-stack-protector \
-Os \
-g \
- -fomit-frame-pointer \
-fdata-sections \
-ffunction-sections \
+ -fomit-frame-pointer \
-funwind-tables
MY_DEFS_Debug := \
@@ -181,6 +181,7 @@ MY_DEFS_Debug := \
'-DSYSTEM_NATIVELY_SIGNALS_MEMORY_PRESSURE' \
'-DENABLE_EGLIMAGE=1' \
'-DCLD_VERSION=1' \
+ '-DCLD_DATA_FROM_STATIC' \
'-DENABLE_PRINTING=1' \
'-DENABLE_MANAGED_USERS=1' \
'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
@@ -217,6 +218,9 @@ LOCAL_CPPFLAGS_Debug := \
-fno-threadsafe-statics \
-fvisibility-inlines-hidden \
-Wsign-compare \
+ -std=gnu++11 \
+ -Wno-narrowing \
+ -Wno-literal-suffix \
-Wno-non-virtual-dtor \
-Wno-sign-promo
@@ -273,6 +277,7 @@ MY_DEFS_Release := \
'-DSYSTEM_NATIVELY_SIGNALS_MEMORY_PRESSURE' \
'-DENABLE_EGLIMAGE=1' \
'-DCLD_VERSION=1' \
+ '-DCLD_DATA_FROM_STATIC' \
'-DENABLE_PRINTING=1' \
'-DENABLE_MANAGED_USERS=1' \
'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
@@ -310,6 +315,9 @@ LOCAL_CPPFLAGS_Release := \
-fno-threadsafe-statics \
-fvisibility-inlines-hidden \
-Wsign-compare \
+ -std=gnu++11 \
+ -Wno-narrowing \
+ -Wno-literal-suffix \
-Wno-non-virtual-dtor \
-Wno-sign-promo
diff --git a/media/media_android_jni_headers.target.darwin-x86_64.mk b/media/media_android_jni_headers.target.darwin-x86_64.mk
index 02ffbe0206..dce1897509 100644
--- a/media/media_android_jni_headers.target.darwin-x86_64.mk
+++ b/media/media_android_jni_headers.target.darwin-x86_64.mk
@@ -159,9 +159,9 @@ MY_CFLAGS_Debug := \
-Wno-unused-but-set-variable \
-Os \
-g \
- -fomit-frame-pointer \
-fdata-sections \
-ffunction-sections \
+ -fomit-frame-pointer \
-funwind-tables
MY_DEFS_Debug := \
@@ -180,6 +180,7 @@ MY_DEFS_Debug := \
'-DSYSTEM_NATIVELY_SIGNALS_MEMORY_PRESSURE' \
'-DENABLE_EGLIMAGE=1' \
'-DCLD_VERSION=1' \
+ '-DCLD_DATA_FROM_STATIC' \
'-DENABLE_PRINTING=1' \
'-DENABLE_MANAGED_USERS=1' \
'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
@@ -216,6 +217,9 @@ LOCAL_CPPFLAGS_Debug := \
-fno-threadsafe-statics \
-fvisibility-inlines-hidden \
-Wsign-compare \
+ -std=gnu++11 \
+ -Wno-narrowing \
+ -Wno-literal-suffix \
-Wno-non-virtual-dtor \
-Wno-sign-promo
@@ -271,6 +275,7 @@ MY_DEFS_Release := \
'-DSYSTEM_NATIVELY_SIGNALS_MEMORY_PRESSURE' \
'-DENABLE_EGLIMAGE=1' \
'-DCLD_VERSION=1' \
+ '-DCLD_DATA_FROM_STATIC' \
'-DENABLE_PRINTING=1' \
'-DENABLE_MANAGED_USERS=1' \
'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
@@ -308,6 +313,9 @@ LOCAL_CPPFLAGS_Release := \
-fno-threadsafe-statics \
-fvisibility-inlines-hidden \
-Wsign-compare \
+ -std=gnu++11 \
+ -Wno-narrowing \
+ -Wno-literal-suffix \
-Wno-non-virtual-dtor \
-Wno-sign-promo
diff --git a/media/media_android_jni_headers.target.linux-arm.mk b/media/media_android_jni_headers.target.linux-arm.mk
index 8872809e76..1aff91040f 100644
--- a/media/media_android_jni_headers.target.linux-arm.mk
+++ b/media/media_android_jni_headers.target.linux-arm.mk
@@ -165,9 +165,9 @@ MY_CFLAGS_Debug := \
-Wno-unused-but-set-variable \
-Os \
-g \
- -fomit-frame-pointer \
-fdata-sections \
-ffunction-sections \
+ -fomit-frame-pointer \
-funwind-tables
MY_DEFS_Debug := \
@@ -186,6 +186,7 @@ MY_DEFS_Debug := \
'-DSYSTEM_NATIVELY_SIGNALS_MEMORY_PRESSURE' \
'-DENABLE_EGLIMAGE=1' \
'-DCLD_VERSION=1' \
+ '-DCLD_DATA_FROM_STATIC' \
'-DENABLE_PRINTING=1' \
'-DENABLE_MANAGED_USERS=1' \
'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
@@ -223,6 +224,9 @@ LOCAL_CPPFLAGS_Debug := \
-fvisibility-inlines-hidden \
-Wsign-compare \
-Wno-abi \
+ -std=gnu++11 \
+ -Wno-narrowing \
+ -Wno-literal-suffix \
-Wno-non-virtual-dtor \
-Wno-sign-promo
@@ -284,6 +288,7 @@ MY_DEFS_Release := \
'-DSYSTEM_NATIVELY_SIGNALS_MEMORY_PRESSURE' \
'-DENABLE_EGLIMAGE=1' \
'-DCLD_VERSION=1' \
+ '-DCLD_DATA_FROM_STATIC' \
'-DENABLE_PRINTING=1' \
'-DENABLE_MANAGED_USERS=1' \
'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
@@ -322,6 +327,9 @@ LOCAL_CPPFLAGS_Release := \
-fvisibility-inlines-hidden \
-Wsign-compare \
-Wno-abi \
+ -std=gnu++11 \
+ -Wno-narrowing \
+ -Wno-literal-suffix \
-Wno-non-virtual-dtor \
-Wno-sign-promo
diff --git a/media/media_android_jni_headers.target.linux-arm64.mk b/media/media_android_jni_headers.target.linux-arm64.mk
index 1e9c4f0520..8cf0cbb1be 100644
--- a/media/media_android_jni_headers.target.linux-arm64.mk
+++ b/media/media_android_jni_headers.target.linux-arm64.mk
@@ -155,7 +155,6 @@ MY_CFLAGS_Debug := \
-Wno-unused-but-set-variable \
-Os \
-g \
- -fomit-frame-pointer \
-fdata-sections \
-ffunction-sections \
-funwind-tables
@@ -176,6 +175,7 @@ MY_DEFS_Debug := \
'-DSYSTEM_NATIVELY_SIGNALS_MEMORY_PRESSURE' \
'-DENABLE_EGLIMAGE=1' \
'-DCLD_VERSION=1' \
+ '-DCLD_DATA_FROM_STATIC' \
'-DENABLE_PRINTING=1' \
'-DENABLE_MANAGED_USERS=1' \
'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
@@ -212,6 +212,9 @@ LOCAL_CPPFLAGS_Debug := \
-fno-threadsafe-statics \
-fvisibility-inlines-hidden \
-Wsign-compare \
+ -std=gnu++11 \
+ -Wno-narrowing \
+ -Wno-literal-suffix \
-Wno-non-virtual-dtor \
-Wno-sign-promo
@@ -244,7 +247,6 @@ MY_CFLAGS_Release := \
-fno-ident \
-fdata-sections \
-ffunction-sections \
- -fomit-frame-pointer \
-funwind-tables
MY_DEFS_Release := \
@@ -263,6 +265,7 @@ MY_DEFS_Release := \
'-DSYSTEM_NATIVELY_SIGNALS_MEMORY_PRESSURE' \
'-DENABLE_EGLIMAGE=1' \
'-DCLD_VERSION=1' \
+ '-DCLD_DATA_FROM_STATIC' \
'-DENABLE_PRINTING=1' \
'-DENABLE_MANAGED_USERS=1' \
'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
@@ -300,6 +303,9 @@ LOCAL_CPPFLAGS_Release := \
-fno-threadsafe-statics \
-fvisibility-inlines-hidden \
-Wsign-compare \
+ -std=gnu++11 \
+ -Wno-narrowing \
+ -Wno-literal-suffix \
-Wno-non-virtual-dtor \
-Wno-sign-promo
diff --git a/media/media_android_jni_headers.target.linux-mips.mk b/media/media_android_jni_headers.target.linux-mips.mk
index 90d6e51bf8..e18a93c746 100644
--- a/media/media_android_jni_headers.target.linux-mips.mk
+++ b/media/media_android_jni_headers.target.linux-mips.mk
@@ -159,9 +159,9 @@ MY_CFLAGS_Debug := \
-Wno-unused-but-set-variable \
-Os \
-g \
- -fomit-frame-pointer \
-fdata-sections \
-ffunction-sections \
+ -fomit-frame-pointer \
-funwind-tables
MY_DEFS_Debug := \
@@ -180,6 +180,7 @@ MY_DEFS_Debug := \
'-DSYSTEM_NATIVELY_SIGNALS_MEMORY_PRESSURE' \
'-DENABLE_EGLIMAGE=1' \
'-DCLD_VERSION=1' \
+ '-DCLD_DATA_FROM_STATIC' \
'-DENABLE_PRINTING=1' \
'-DENABLE_MANAGED_USERS=1' \
'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
@@ -217,6 +218,9 @@ LOCAL_CPPFLAGS_Debug := \
-fvisibility-inlines-hidden \
-Wsign-compare \
-Wno-uninitialized \
+ -std=gnu++11 \
+ -Wno-narrowing \
+ -Wno-literal-suffix \
-Wno-non-virtual-dtor \
-Wno-sign-promo
@@ -272,6 +276,7 @@ MY_DEFS_Release := \
'-DSYSTEM_NATIVELY_SIGNALS_MEMORY_PRESSURE' \
'-DENABLE_EGLIMAGE=1' \
'-DCLD_VERSION=1' \
+ '-DCLD_DATA_FROM_STATIC' \
'-DENABLE_PRINTING=1' \
'-DENABLE_MANAGED_USERS=1' \
'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
@@ -310,6 +315,9 @@ LOCAL_CPPFLAGS_Release := \
-fvisibility-inlines-hidden \
-Wsign-compare \
-Wno-uninitialized \
+ -std=gnu++11 \
+ -Wno-narrowing \
+ -Wno-literal-suffix \
-Wno-non-virtual-dtor \
-Wno-sign-promo
diff --git a/media/media_android_jni_headers.target.linux-x86.mk b/media/media_android_jni_headers.target.linux-x86.mk
index e130b129e3..5238f652ba 100644
--- a/media/media_android_jni_headers.target.linux-x86.mk
+++ b/media/media_android_jni_headers.target.linux-x86.mk
@@ -160,9 +160,9 @@ MY_CFLAGS_Debug := \
-fno-stack-protector \
-Os \
-g \
- -fomit-frame-pointer \
-fdata-sections \
-ffunction-sections \
+ -fomit-frame-pointer \
-funwind-tables
MY_DEFS_Debug := \
@@ -181,6 +181,7 @@ MY_DEFS_Debug := \
'-DSYSTEM_NATIVELY_SIGNALS_MEMORY_PRESSURE' \
'-DENABLE_EGLIMAGE=1' \
'-DCLD_VERSION=1' \
+ '-DCLD_DATA_FROM_STATIC' \
'-DENABLE_PRINTING=1' \
'-DENABLE_MANAGED_USERS=1' \
'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
@@ -217,6 +218,9 @@ LOCAL_CPPFLAGS_Debug := \
-fno-threadsafe-statics \
-fvisibility-inlines-hidden \
-Wsign-compare \
+ -std=gnu++11 \
+ -Wno-narrowing \
+ -Wno-literal-suffix \
-Wno-non-virtual-dtor \
-Wno-sign-promo
@@ -273,6 +277,7 @@ MY_DEFS_Release := \
'-DSYSTEM_NATIVELY_SIGNALS_MEMORY_PRESSURE' \
'-DENABLE_EGLIMAGE=1' \
'-DCLD_VERSION=1' \
+ '-DCLD_DATA_FROM_STATIC' \
'-DENABLE_PRINTING=1' \
'-DENABLE_MANAGED_USERS=1' \
'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
@@ -310,6 +315,9 @@ LOCAL_CPPFLAGS_Release := \
-fno-threadsafe-statics \
-fvisibility-inlines-hidden \
-Wsign-compare \
+ -std=gnu++11 \
+ -Wno-narrowing \
+ -Wno-literal-suffix \
-Wno-non-virtual-dtor \
-Wno-sign-promo
diff --git a/media/media_android_jni_headers.target.linux-x86_64.mk b/media/media_android_jni_headers.target.linux-x86_64.mk
index 02ffbe0206..dce1897509 100644
--- a/media/media_android_jni_headers.target.linux-x86_64.mk
+++ b/media/media_android_jni_headers.target.linux-x86_64.mk
@@ -159,9 +159,9 @@ MY_CFLAGS_Debug := \
-Wno-unused-but-set-variable \
-Os \
-g \
- -fomit-frame-pointer \
-fdata-sections \
-ffunction-sections \
+ -fomit-frame-pointer \
-funwind-tables
MY_DEFS_Debug := \
@@ -180,6 +180,7 @@ MY_DEFS_Debug := \
'-DSYSTEM_NATIVELY_SIGNALS_MEMORY_PRESSURE' \
'-DENABLE_EGLIMAGE=1' \
'-DCLD_VERSION=1' \
+ '-DCLD_DATA_FROM_STATIC' \
'-DENABLE_PRINTING=1' \
'-DENABLE_MANAGED_USERS=1' \
'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
@@ -216,6 +217,9 @@ LOCAL_CPPFLAGS_Debug := \
-fno-threadsafe-statics \
-fvisibility-inlines-hidden \
-Wsign-compare \
+ -std=gnu++11 \
+ -Wno-narrowing \
+ -Wno-literal-suffix \
-Wno-non-virtual-dtor \
-Wno-sign-promo
@@ -271,6 +275,7 @@ MY_DEFS_Release := \
'-DSYSTEM_NATIVELY_SIGNALS_MEMORY_PRESSURE' \
'-DENABLE_EGLIMAGE=1' \
'-DCLD_VERSION=1' \
+ '-DCLD_DATA_FROM_STATIC' \
'-DENABLE_PRINTING=1' \
'-DENABLE_MANAGED_USERS=1' \
'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
@@ -308,6 +313,9 @@ LOCAL_CPPFLAGS_Release := \
-fno-threadsafe-statics \
-fvisibility-inlines-hidden \
-Wsign-compare \
+ -std=gnu++11 \
+ -Wno-narrowing \
+ -Wno-literal-suffix \
-Wno-non-virtual-dtor \
-Wno-sign-promo
diff --git a/media/media_asm.target.darwin-x86.mk b/media/media_asm.target.darwin-x86.mk
index dcda20fd53..5d0897e36d 100644
--- a/media/media_asm.target.darwin-x86.mk
+++ b/media/media_asm.target.darwin-x86.mk
@@ -160,9 +160,9 @@ MY_CFLAGS_Debug := \
-fno-stack-protector \
-Os \
-g \
- -fomit-frame-pointer \
-fdata-sections \
-ffunction-sections \
+ -fomit-frame-pointer \
-funwind-tables
MY_DEFS_Debug := \
@@ -181,6 +181,7 @@ MY_DEFS_Debug := \
'-DSYSTEM_NATIVELY_SIGNALS_MEMORY_PRESSURE' \
'-DENABLE_EGLIMAGE=1' \
'-DCLD_VERSION=1' \
+ '-DCLD_DATA_FROM_STATIC' \
'-DENABLE_PRINTING=1' \
'-DENABLE_MANAGED_USERS=1' \
'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
@@ -217,6 +218,9 @@ LOCAL_CPPFLAGS_Debug := \
-fno-threadsafe-statics \
-fvisibility-inlines-hidden \
-Wsign-compare \
+ -std=gnu++11 \
+ -Wno-narrowing \
+ -Wno-literal-suffix \
-Wno-non-virtual-dtor \
-Wno-sign-promo
@@ -273,6 +277,7 @@ MY_DEFS_Release := \
'-DSYSTEM_NATIVELY_SIGNALS_MEMORY_PRESSURE' \
'-DENABLE_EGLIMAGE=1' \
'-DCLD_VERSION=1' \
+ '-DCLD_DATA_FROM_STATIC' \
'-DENABLE_PRINTING=1' \
'-DENABLE_MANAGED_USERS=1' \
'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
@@ -310,6 +315,9 @@ LOCAL_CPPFLAGS_Release := \
-fno-threadsafe-statics \
-fvisibility-inlines-hidden \
-Wsign-compare \
+ -std=gnu++11 \
+ -Wno-narrowing \
+ -Wno-literal-suffix \
-Wno-non-virtual-dtor \
-Wno-sign-promo
diff --git a/media/media_asm.target.darwin-x86_64.mk b/media/media_asm.target.darwin-x86_64.mk
index ad04768022..35b66ba090 100644
--- a/media/media_asm.target.darwin-x86_64.mk
+++ b/media/media_asm.target.darwin-x86_64.mk
@@ -181,9 +181,9 @@ MY_CFLAGS_Debug := \
-Wno-unused-but-set-variable \
-Os \
-g \
- -fomit-frame-pointer \
-fdata-sections \
-ffunction-sections \
+ -fomit-frame-pointer \
-funwind-tables
MY_DEFS_Debug := \
@@ -202,6 +202,7 @@ MY_DEFS_Debug := \
'-DSYSTEM_NATIVELY_SIGNALS_MEMORY_PRESSURE' \
'-DENABLE_EGLIMAGE=1' \
'-DCLD_VERSION=1' \
+ '-DCLD_DATA_FROM_STATIC' \
'-DENABLE_PRINTING=1' \
'-DENABLE_MANAGED_USERS=1' \
'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
@@ -238,6 +239,9 @@ LOCAL_CPPFLAGS_Debug := \
-fno-threadsafe-statics \
-fvisibility-inlines-hidden \
-Wsign-compare \
+ -std=gnu++11 \
+ -Wno-narrowing \
+ -Wno-literal-suffix \
-Wno-non-virtual-dtor \
-Wno-sign-promo
@@ -293,6 +297,7 @@ MY_DEFS_Release := \
'-DSYSTEM_NATIVELY_SIGNALS_MEMORY_PRESSURE' \
'-DENABLE_EGLIMAGE=1' \
'-DCLD_VERSION=1' \
+ '-DCLD_DATA_FROM_STATIC' \
'-DENABLE_PRINTING=1' \
'-DENABLE_MANAGED_USERS=1' \
'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
@@ -330,6 +335,9 @@ LOCAL_CPPFLAGS_Release := \
-fno-threadsafe-statics \
-fvisibility-inlines-hidden \
-Wsign-compare \
+ -std=gnu++11 \
+ -Wno-narrowing \
+ -Wno-literal-suffix \
-Wno-non-virtual-dtor \
-Wno-sign-promo
diff --git a/media/media_asm.target.linux-x86.mk b/media/media_asm.target.linux-x86.mk
index dcda20fd53..5d0897e36d 100644
--- a/media/media_asm.target.linux-x86.mk
+++ b/media/media_asm.target.linux-x86.mk
@@ -160,9 +160,9 @@ MY_CFLAGS_Debug := \
-fno-stack-protector \
-Os \
-g \
- -fomit-frame-pointer \
-fdata-sections \
-ffunction-sections \
+ -fomit-frame-pointer \
-funwind-tables
MY_DEFS_Debug := \
@@ -181,6 +181,7 @@ MY_DEFS_Debug := \
'-DSYSTEM_NATIVELY_SIGNALS_MEMORY_PRESSURE' \
'-DENABLE_EGLIMAGE=1' \
'-DCLD_VERSION=1' \
+ '-DCLD_DATA_FROM_STATIC' \
'-DENABLE_PRINTING=1' \
'-DENABLE_MANAGED_USERS=1' \
'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
@@ -217,6 +218,9 @@ LOCAL_CPPFLAGS_Debug := \
-fno-threadsafe-statics \
-fvisibility-inlines-hidden \
-Wsign-compare \
+ -std=gnu++11 \
+ -Wno-narrowing \
+ -Wno-literal-suffix \
-Wno-non-virtual-dtor \
-Wno-sign-promo
@@ -273,6 +277,7 @@ MY_DEFS_Release := \
'-DSYSTEM_NATIVELY_SIGNALS_MEMORY_PRESSURE' \
'-DENABLE_EGLIMAGE=1' \
'-DCLD_VERSION=1' \
+ '-DCLD_DATA_FROM_STATIC' \
'-DENABLE_PRINTING=1' \
'-DENABLE_MANAGED_USERS=1' \
'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
@@ -310,6 +315,9 @@ LOCAL_CPPFLAGS_Release := \
-fno-threadsafe-statics \
-fvisibility-inlines-hidden \
-Wsign-compare \
+ -std=gnu++11 \
+ -Wno-narrowing \
+ -Wno-literal-suffix \
-Wno-non-virtual-dtor \
-Wno-sign-promo
diff --git a/media/media_asm.target.linux-x86_64.mk b/media/media_asm.target.linux-x86_64.mk
index ad04768022..35b66ba090 100644
--- a/media/media_asm.target.linux-x86_64.mk
+++ b/media/media_asm.target.linux-x86_64.mk
@@ -181,9 +181,9 @@ MY_CFLAGS_Debug := \
-Wno-unused-but-set-variable \
-Os \
-g \
- -fomit-frame-pointer \
-fdata-sections \
-ffunction-sections \
+ -fomit-frame-pointer \
-funwind-tables
MY_DEFS_Debug := \
@@ -202,6 +202,7 @@ MY_DEFS_Debug := \
'-DSYSTEM_NATIVELY_SIGNALS_MEMORY_PRESSURE' \
'-DENABLE_EGLIMAGE=1' \
'-DCLD_VERSION=1' \
+ '-DCLD_DATA_FROM_STATIC' \
'-DENABLE_PRINTING=1' \
'-DENABLE_MANAGED_USERS=1' \
'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
@@ -238,6 +239,9 @@ LOCAL_CPPFLAGS_Debug := \
-fno-threadsafe-statics \
-fvisibility-inlines-hidden \
-Wsign-compare \
+ -std=gnu++11 \
+ -Wno-narrowing \
+ -Wno-literal-suffix \
-Wno-non-virtual-dtor \
-Wno-sign-promo
@@ -293,6 +297,7 @@ MY_DEFS_Release := \
'-DSYSTEM_NATIVELY_SIGNALS_MEMORY_PRESSURE' \
'-DENABLE_EGLIMAGE=1' \
'-DCLD_VERSION=1' \
+ '-DCLD_DATA_FROM_STATIC' \
'-DENABLE_PRINTING=1' \
'-DENABLE_MANAGED_USERS=1' \
'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
@@ -330,6 +335,9 @@ LOCAL_CPPFLAGS_Release := \
-fno-threadsafe-statics \
-fvisibility-inlines-hidden \
-Wsign-compare \
+ -std=gnu++11 \
+ -Wno-narrowing \
+ -Wno-literal-suffix \
-Wno-non-virtual-dtor \
-Wno-sign-promo
diff --git a/media/media_mmx.target.darwin-x86.mk b/media/media_mmx.target.darwin-x86.mk
index 84d2ac2574..2d6ec027ce 100644
--- a/media/media_mmx.target.darwin-x86.mk
+++ b/media/media_mmx.target.darwin-x86.mk
@@ -59,9 +59,9 @@ MY_CFLAGS_Debug := \
-fno-stack-protector \
-Os \
-g \
- -fomit-frame-pointer \
-fdata-sections \
-ffunction-sections \
+ -fomit-frame-pointer \
-funwind-tables
MY_DEFS_Debug := \
@@ -80,6 +80,7 @@ MY_DEFS_Debug := \
'-DSYSTEM_NATIVELY_SIGNALS_MEMORY_PRESSURE' \
'-DENABLE_EGLIMAGE=1' \
'-DCLD_VERSION=1' \
+ '-DCLD_DATA_FROM_STATIC' \
'-DENABLE_PRINTING=1' \
'-DENABLE_MANAGED_USERS=1' \
'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
@@ -118,6 +119,9 @@ LOCAL_CPPFLAGS_Debug := \
-fno-threadsafe-statics \
-fvisibility-inlines-hidden \
-Wsign-compare \
+ -std=gnu++11 \
+ -Wno-narrowing \
+ -Wno-literal-suffix \
-Wno-non-virtual-dtor \
-Wno-sign-promo
@@ -175,6 +179,7 @@ MY_DEFS_Release := \
'-DSYSTEM_NATIVELY_SIGNALS_MEMORY_PRESSURE' \
'-DENABLE_EGLIMAGE=1' \
'-DCLD_VERSION=1' \
+ '-DCLD_DATA_FROM_STATIC' \
'-DENABLE_PRINTING=1' \
'-DENABLE_MANAGED_USERS=1' \
'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
@@ -214,6 +219,9 @@ LOCAL_CPPFLAGS_Release := \
-fno-threadsafe-statics \
-fvisibility-inlines-hidden \
-Wsign-compare \
+ -std=gnu++11 \
+ -Wno-narrowing \
+ -Wno-literal-suffix \
-Wno-non-virtual-dtor \
-Wno-sign-promo
diff --git a/media/media_mmx.target.darwin-x86_64.mk b/media/media_mmx.target.darwin-x86_64.mk
index 54966716aa..eac0d0e15f 100644
--- a/media/media_mmx.target.darwin-x86_64.mk
+++ b/media/media_mmx.target.darwin-x86_64.mk
@@ -58,9 +58,9 @@ MY_CFLAGS_Debug := \
-Wno-unused-but-set-variable \
-Os \
-g \
- -fomit-frame-pointer \
-fdata-sections \
-ffunction-sections \
+ -fomit-frame-pointer \
-funwind-tables
MY_DEFS_Debug := \
@@ -79,6 +79,7 @@ MY_DEFS_Debug := \
'-DSYSTEM_NATIVELY_SIGNALS_MEMORY_PRESSURE' \
'-DENABLE_EGLIMAGE=1' \
'-DCLD_VERSION=1' \
+ '-DCLD_DATA_FROM_STATIC' \
'-DENABLE_PRINTING=1' \
'-DENABLE_MANAGED_USERS=1' \
'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
@@ -117,6 +118,9 @@ LOCAL_CPPFLAGS_Debug := \
-fno-threadsafe-statics \
-fvisibility-inlines-hidden \
-Wsign-compare \
+ -std=gnu++11 \
+ -Wno-narrowing \
+ -Wno-literal-suffix \
-Wno-non-virtual-dtor \
-Wno-sign-promo
@@ -173,6 +177,7 @@ MY_DEFS_Release := \
'-DSYSTEM_NATIVELY_SIGNALS_MEMORY_PRESSURE' \
'-DENABLE_EGLIMAGE=1' \
'-DCLD_VERSION=1' \
+ '-DCLD_DATA_FROM_STATIC' \
'-DENABLE_PRINTING=1' \
'-DENABLE_MANAGED_USERS=1' \
'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
@@ -212,6 +217,9 @@ LOCAL_CPPFLAGS_Release := \
-fno-threadsafe-statics \
-fvisibility-inlines-hidden \
-Wsign-compare \
+ -std=gnu++11 \
+ -Wno-narrowing \
+ -Wno-literal-suffix \
-Wno-non-virtual-dtor \
-Wno-sign-promo
diff --git a/media/media_mmx.target.linux-x86.mk b/media/media_mmx.target.linux-x86.mk
index 84d2ac2574..2d6ec027ce 100644
--- a/media/media_mmx.target.linux-x86.mk
+++ b/media/media_mmx.target.linux-x86.mk
@@ -59,9 +59,9 @@ MY_CFLAGS_Debug := \
-fno-stack-protector \
-Os \
-g \
- -fomit-frame-pointer \
-fdata-sections \
-ffunction-sections \
+ -fomit-frame-pointer \
-funwind-tables
MY_DEFS_Debug := \
@@ -80,6 +80,7 @@ MY_DEFS_Debug := \
'-DSYSTEM_NATIVELY_SIGNALS_MEMORY_PRESSURE' \
'-DENABLE_EGLIMAGE=1' \
'-DCLD_VERSION=1' \
+ '-DCLD_DATA_FROM_STATIC' \
'-DENABLE_PRINTING=1' \
'-DENABLE_MANAGED_USERS=1' \
'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
@@ -118,6 +119,9 @@ LOCAL_CPPFLAGS_Debug := \
-fno-threadsafe-statics \
-fvisibility-inlines-hidden \
-Wsign-compare \
+ -std=gnu++11 \
+ -Wno-narrowing \
+ -Wno-literal-suffix \
-Wno-non-virtual-dtor \
-Wno-sign-promo
@@ -175,6 +179,7 @@ MY_DEFS_Release := \
'-DSYSTEM_NATIVELY_SIGNALS_MEMORY_PRESSURE' \
'-DENABLE_EGLIMAGE=1' \
'-DCLD_VERSION=1' \
+ '-DCLD_DATA_FROM_STATIC' \
'-DENABLE_PRINTING=1' \
'-DENABLE_MANAGED_USERS=1' \
'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
@@ -214,6 +219,9 @@ LOCAL_CPPFLAGS_Release := \
-fno-threadsafe-statics \
-fvisibility-inlines-hidden \
-Wsign-compare \
+ -std=gnu++11 \
+ -Wno-narrowing \
+ -Wno-literal-suffix \
-Wno-non-virtual-dtor \
-Wno-sign-promo
diff --git a/media/media_mmx.target.linux-x86_64.mk b/media/media_mmx.target.linux-x86_64.mk
index 54966716aa..eac0d0e15f 100644
--- a/media/media_mmx.target.linux-x86_64.mk
+++ b/media/media_mmx.target.linux-x86_64.mk
@@ -58,9 +58,9 @@ MY_CFLAGS_Debug := \
-Wno-unused-but-set-variable \
-Os \
-g \
- -fomit-frame-pointer \
-fdata-sections \
-ffunction-sections \
+ -fomit-frame-pointer \
-funwind-tables
MY_DEFS_Debug := \
@@ -79,6 +79,7 @@ MY_DEFS_Debug := \
'-DSYSTEM_NATIVELY_SIGNALS_MEMORY_PRESSURE' \
'-DENABLE_EGLIMAGE=1' \
'-DCLD_VERSION=1' \
+ '-DCLD_DATA_FROM_STATIC' \
'-DENABLE_PRINTING=1' \
'-DENABLE_MANAGED_USERS=1' \
'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
@@ -117,6 +118,9 @@ LOCAL_CPPFLAGS_Debug := \
-fno-threadsafe-statics \
-fvisibility-inlines-hidden \
-Wsign-compare \
+ -std=gnu++11 \
+ -Wno-narrowing \
+ -Wno-literal-suffix \
-Wno-non-virtual-dtor \
-Wno-sign-promo
@@ -173,6 +177,7 @@ MY_DEFS_Release := \
'-DSYSTEM_NATIVELY_SIGNALS_MEMORY_PRESSURE' \
'-DENABLE_EGLIMAGE=1' \
'-DCLD_VERSION=1' \
+ '-DCLD_DATA_FROM_STATIC' \
'-DENABLE_PRINTING=1' \
'-DENABLE_MANAGED_USERS=1' \
'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
@@ -212,6 +217,9 @@ LOCAL_CPPFLAGS_Release := \
-fno-threadsafe-statics \
-fvisibility-inlines-hidden \
-Wsign-compare \
+ -std=gnu++11 \
+ -Wno-narrowing \
+ -Wno-literal-suffix \
-Wno-non-virtual-dtor \
-Wno-sign-promo
diff --git a/media/media_sse2.target.darwin-x86.mk b/media/media_sse2.target.darwin-x86.mk
index 71a3ad00f9..5c5c4e86f0 100644
--- a/media/media_sse2.target.darwin-x86.mk
+++ b/media/media_sse2.target.darwin-x86.mk
@@ -61,9 +61,9 @@ MY_CFLAGS_Debug := \
-fno-stack-protector \
-Os \
-g \
- -fomit-frame-pointer \
-fdata-sections \
-ffunction-sections \
+ -fomit-frame-pointer \
-funwind-tables
MY_DEFS_Debug := \
@@ -82,6 +82,7 @@ MY_DEFS_Debug := \
'-DSYSTEM_NATIVELY_SIGNALS_MEMORY_PRESSURE' \
'-DENABLE_EGLIMAGE=1' \
'-DCLD_VERSION=1' \
+ '-DCLD_DATA_FROM_STATIC' \
'-DENABLE_PRINTING=1' \
'-DENABLE_MANAGED_USERS=1' \
'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
@@ -120,6 +121,9 @@ LOCAL_CPPFLAGS_Debug := \
-fno-threadsafe-statics \
-fvisibility-inlines-hidden \
-Wsign-compare \
+ -std=gnu++11 \
+ -Wno-narrowing \
+ -Wno-literal-suffix \
-Wno-non-virtual-dtor \
-Wno-sign-promo
@@ -177,6 +181,7 @@ MY_DEFS_Release := \
'-DSYSTEM_NATIVELY_SIGNALS_MEMORY_PRESSURE' \
'-DENABLE_EGLIMAGE=1' \
'-DCLD_VERSION=1' \
+ '-DCLD_DATA_FROM_STATIC' \
'-DENABLE_PRINTING=1' \
'-DENABLE_MANAGED_USERS=1' \
'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
@@ -216,6 +221,9 @@ LOCAL_CPPFLAGS_Release := \
-fno-threadsafe-statics \
-fvisibility-inlines-hidden \
-Wsign-compare \
+ -std=gnu++11 \
+ -Wno-narrowing \
+ -Wno-literal-suffix \
-Wno-non-virtual-dtor \
-Wno-sign-promo
diff --git a/media/media_sse2.target.darwin-x86_64.mk b/media/media_sse2.target.darwin-x86_64.mk
index a2c14f5e0b..e2fb9522b2 100644
--- a/media/media_sse2.target.darwin-x86_64.mk
+++ b/media/media_sse2.target.darwin-x86_64.mk
@@ -60,9 +60,9 @@ MY_CFLAGS_Debug := \
-Wno-unused-but-set-variable \
-Os \
-g \
- -fomit-frame-pointer \
-fdata-sections \
-ffunction-sections \
+ -fomit-frame-pointer \
-funwind-tables
MY_DEFS_Debug := \
@@ -81,6 +81,7 @@ MY_DEFS_Debug := \
'-DSYSTEM_NATIVELY_SIGNALS_MEMORY_PRESSURE' \
'-DENABLE_EGLIMAGE=1' \
'-DCLD_VERSION=1' \
+ '-DCLD_DATA_FROM_STATIC' \
'-DENABLE_PRINTING=1' \
'-DENABLE_MANAGED_USERS=1' \
'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
@@ -119,6 +120,9 @@ LOCAL_CPPFLAGS_Debug := \
-fno-threadsafe-statics \
-fvisibility-inlines-hidden \
-Wsign-compare \
+ -std=gnu++11 \
+ -Wno-narrowing \
+ -Wno-literal-suffix \
-Wno-non-virtual-dtor \
-Wno-sign-promo
@@ -175,6 +179,7 @@ MY_DEFS_Release := \
'-DSYSTEM_NATIVELY_SIGNALS_MEMORY_PRESSURE' \
'-DENABLE_EGLIMAGE=1' \
'-DCLD_VERSION=1' \
+ '-DCLD_DATA_FROM_STATIC' \
'-DENABLE_PRINTING=1' \
'-DENABLE_MANAGED_USERS=1' \
'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
@@ -214,6 +219,9 @@ LOCAL_CPPFLAGS_Release := \
-fno-threadsafe-statics \
-fvisibility-inlines-hidden \
-Wsign-compare \
+ -std=gnu++11 \
+ -Wno-narrowing \
+ -Wno-literal-suffix \
-Wno-non-virtual-dtor \
-Wno-sign-promo
diff --git a/media/media_sse2.target.linux-x86.mk b/media/media_sse2.target.linux-x86.mk
index 71a3ad00f9..5c5c4e86f0 100644
--- a/media/media_sse2.target.linux-x86.mk
+++ b/media/media_sse2.target.linux-x86.mk
@@ -61,9 +61,9 @@ MY_CFLAGS_Debug := \
-fno-stack-protector \
-Os \
-g \
- -fomit-frame-pointer \
-fdata-sections \
-ffunction-sections \
+ -fomit-frame-pointer \
-funwind-tables
MY_DEFS_Debug := \
@@ -82,6 +82,7 @@ MY_DEFS_Debug := \
'-DSYSTEM_NATIVELY_SIGNALS_MEMORY_PRESSURE' \
'-DENABLE_EGLIMAGE=1' \
'-DCLD_VERSION=1' \
+ '-DCLD_DATA_FROM_STATIC' \
'-DENABLE_PRINTING=1' \
'-DENABLE_MANAGED_USERS=1' \
'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
@@ -120,6 +121,9 @@ LOCAL_CPPFLAGS_Debug := \
-fno-threadsafe-statics \
-fvisibility-inlines-hidden \
-Wsign-compare \
+ -std=gnu++11 \
+ -Wno-narrowing \
+ -Wno-literal-suffix \
-Wno-non-virtual-dtor \
-Wno-sign-promo
@@ -177,6 +181,7 @@ MY_DEFS_Release := \
'-DSYSTEM_NATIVELY_SIGNALS_MEMORY_PRESSURE' \
'-DENABLE_EGLIMAGE=1' \
'-DCLD_VERSION=1' \
+ '-DCLD_DATA_FROM_STATIC' \
'-DENABLE_PRINTING=1' \
'-DENABLE_MANAGED_USERS=1' \
'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
@@ -216,6 +221,9 @@ LOCAL_CPPFLAGS_Release := \
-fno-threadsafe-statics \
-fvisibility-inlines-hidden \
-Wsign-compare \
+ -std=gnu++11 \
+ -Wno-narrowing \
+ -Wno-literal-suffix \
-Wno-non-virtual-dtor \
-Wno-sign-promo
diff --git a/media/media_sse2.target.linux-x86_64.mk b/media/media_sse2.target.linux-x86_64.mk
index a2c14f5e0b..e2fb9522b2 100644
--- a/media/media_sse2.target.linux-x86_64.mk
+++ b/media/media_sse2.target.linux-x86_64.mk
@@ -60,9 +60,9 @@ MY_CFLAGS_Debug := \
-Wno-unused-but-set-variable \
-Os \
-g \
- -fomit-frame-pointer \
-fdata-sections \
-ffunction-sections \
+ -fomit-frame-pointer \
-funwind-tables
MY_DEFS_Debug := \
@@ -81,6 +81,7 @@ MY_DEFS_Debug := \
'-DSYSTEM_NATIVELY_SIGNALS_MEMORY_PRESSURE' \
'-DENABLE_EGLIMAGE=1' \
'-DCLD_VERSION=1' \
+ '-DCLD_DATA_FROM_STATIC' \
'-DENABLE_PRINTING=1' \
'-DENABLE_MANAGED_USERS=1' \
'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
@@ -119,6 +120,9 @@ LOCAL_CPPFLAGS_Debug := \
-fno-threadsafe-statics \
-fvisibility-inlines-hidden \
-Wsign-compare \
+ -std=gnu++11 \
+ -Wno-narrowing \
+ -Wno-literal-suffix \
-Wno-non-virtual-dtor \
-Wno-sign-promo
@@ -175,6 +179,7 @@ MY_DEFS_Release := \
'-DSYSTEM_NATIVELY_SIGNALS_MEMORY_PRESSURE' \
'-DENABLE_EGLIMAGE=1' \
'-DCLD_VERSION=1' \
+ '-DCLD_DATA_FROM_STATIC' \
'-DENABLE_PRINTING=1' \
'-DENABLE_MANAGED_USERS=1' \
'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
@@ -214,6 +219,9 @@ LOCAL_CPPFLAGS_Release := \
-fno-threadsafe-statics \
-fvisibility-inlines-hidden \
-Wsign-compare \
+ -std=gnu++11 \
+ -Wno-narrowing \
+ -Wno-literal-suffix \
-Wno-non-virtual-dtor \
-Wno-sign-promo
diff --git a/media/player_android.target.darwin-arm.mk b/media/player_android.target.darwin-arm.mk
index 556aa726d7..746384a093 100644
--- a/media/player_android.target.darwin-arm.mk
+++ b/media/player_android.target.darwin-arm.mk
@@ -77,9 +77,9 @@ MY_CFLAGS_Debug := \
-Wno-unused-but-set-variable \
-Os \
-g \
- -fomit-frame-pointer \
-fdata-sections \
-ffunction-sections \
+ -fomit-frame-pointer \
-funwind-tables
MY_DEFS_Debug := \
@@ -98,6 +98,7 @@ MY_DEFS_Debug := \
'-DSYSTEM_NATIVELY_SIGNALS_MEMORY_PRESSURE' \
'-DENABLE_EGLIMAGE=1' \
'-DCLD_VERSION=1' \
+ '-DCLD_DATA_FROM_STATIC' \
'-DENABLE_PRINTING=1' \
'-DENABLE_MANAGED_USERS=1' \
'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
@@ -147,6 +148,9 @@ LOCAL_CPPFLAGS_Debug := \
-fvisibility-inlines-hidden \
-Wsign-compare \
-Wno-abi \
+ -std=gnu++11 \
+ -Wno-narrowing \
+ -Wno-literal-suffix \
-Wno-non-virtual-dtor \
-Wno-sign-promo
@@ -208,6 +212,7 @@ MY_DEFS_Release := \
'-DSYSTEM_NATIVELY_SIGNALS_MEMORY_PRESSURE' \
'-DENABLE_EGLIMAGE=1' \
'-DCLD_VERSION=1' \
+ '-DCLD_DATA_FROM_STATIC' \
'-DENABLE_PRINTING=1' \
'-DENABLE_MANAGED_USERS=1' \
'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
@@ -258,6 +263,9 @@ LOCAL_CPPFLAGS_Release := \
-fvisibility-inlines-hidden \
-Wsign-compare \
-Wno-abi \
+ -std=gnu++11 \
+ -Wno-narrowing \
+ -Wno-literal-suffix \
-Wno-non-virtual-dtor \
-Wno-sign-promo
diff --git a/media/player_android.target.darwin-arm64.mk b/media/player_android.target.darwin-arm64.mk
index 211a6c0931..157e45bb5a 100644
--- a/media/player_android.target.darwin-arm64.mk
+++ b/media/player_android.target.darwin-arm64.mk
@@ -67,7 +67,6 @@ MY_CFLAGS_Debug := \
-Wno-unused-but-set-variable \
-Os \
-g \
- -fomit-frame-pointer \
-fdata-sections \
-ffunction-sections \
-funwind-tables
@@ -88,6 +87,7 @@ MY_DEFS_Debug := \
'-DSYSTEM_NATIVELY_SIGNALS_MEMORY_PRESSURE' \
'-DENABLE_EGLIMAGE=1' \
'-DCLD_VERSION=1' \
+ '-DCLD_DATA_FROM_STATIC' \
'-DENABLE_PRINTING=1' \
'-DENABLE_MANAGED_USERS=1' \
'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
@@ -136,6 +136,9 @@ LOCAL_CPPFLAGS_Debug := \
-fno-threadsafe-statics \
-fvisibility-inlines-hidden \
-Wsign-compare \
+ -std=gnu++11 \
+ -Wno-narrowing \
+ -Wno-literal-suffix \
-Wno-non-virtual-dtor \
-Wno-sign-promo
@@ -168,7 +171,6 @@ MY_CFLAGS_Release := \
-fno-ident \
-fdata-sections \
-ffunction-sections \
- -fomit-frame-pointer \
-funwind-tables
MY_DEFS_Release := \
@@ -187,6 +189,7 @@ MY_DEFS_Release := \
'-DSYSTEM_NATIVELY_SIGNALS_MEMORY_PRESSURE' \
'-DENABLE_EGLIMAGE=1' \
'-DCLD_VERSION=1' \
+ '-DCLD_DATA_FROM_STATIC' \
'-DENABLE_PRINTING=1' \
'-DENABLE_MANAGED_USERS=1' \
'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
@@ -236,6 +239,9 @@ LOCAL_CPPFLAGS_Release := \
-fno-threadsafe-statics \
-fvisibility-inlines-hidden \
-Wsign-compare \
+ -std=gnu++11 \
+ -Wno-narrowing \
+ -Wno-literal-suffix \
-Wno-non-virtual-dtor \
-Wno-sign-promo
diff --git a/media/player_android.target.darwin-mips.mk b/media/player_android.target.darwin-mips.mk
index b32fc0b739..8062947e9b 100644
--- a/media/player_android.target.darwin-mips.mk
+++ b/media/player_android.target.darwin-mips.mk
@@ -71,9 +71,9 @@ MY_CFLAGS_Debug := \
-Wno-unused-but-set-variable \
-Os \
-g \
- -fomit-frame-pointer \
-fdata-sections \
-ffunction-sections \
+ -fomit-frame-pointer \
-funwind-tables
MY_DEFS_Debug := \
@@ -92,6 +92,7 @@ MY_DEFS_Debug := \
'-DSYSTEM_NATIVELY_SIGNALS_MEMORY_PRESSURE' \
'-DENABLE_EGLIMAGE=1' \
'-DCLD_VERSION=1' \
+ '-DCLD_DATA_FROM_STATIC' \
'-DENABLE_PRINTING=1' \
'-DENABLE_MANAGED_USERS=1' \
'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
@@ -141,6 +142,9 @@ LOCAL_CPPFLAGS_Debug := \
-fvisibility-inlines-hidden \
-Wsign-compare \
-Wno-uninitialized \
+ -std=gnu++11 \
+ -Wno-narrowing \
+ -Wno-literal-suffix \
-Wno-non-virtual-dtor \
-Wno-sign-promo
@@ -196,6 +200,7 @@ MY_DEFS_Release := \
'-DSYSTEM_NATIVELY_SIGNALS_MEMORY_PRESSURE' \
'-DENABLE_EGLIMAGE=1' \
'-DCLD_VERSION=1' \
+ '-DCLD_DATA_FROM_STATIC' \
'-DENABLE_PRINTING=1' \
'-DENABLE_MANAGED_USERS=1' \
'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
@@ -246,6 +251,9 @@ LOCAL_CPPFLAGS_Release := \
-fvisibility-inlines-hidden \
-Wsign-compare \
-Wno-uninitialized \
+ -std=gnu++11 \
+ -Wno-narrowing \
+ -Wno-literal-suffix \
-Wno-non-virtual-dtor \
-Wno-sign-promo
diff --git a/media/player_android.target.darwin-x86.mk b/media/player_android.target.darwin-x86.mk
index 5c37d43813..ab1a7c36d8 100644
--- a/media/player_android.target.darwin-x86.mk
+++ b/media/player_android.target.darwin-x86.mk
@@ -72,9 +72,9 @@ MY_CFLAGS_Debug := \
-fno-stack-protector \
-Os \
-g \
- -fomit-frame-pointer \
-fdata-sections \
-ffunction-sections \
+ -fomit-frame-pointer \
-funwind-tables
MY_DEFS_Debug := \
@@ -93,6 +93,7 @@ MY_DEFS_Debug := \
'-DSYSTEM_NATIVELY_SIGNALS_MEMORY_PRESSURE' \
'-DENABLE_EGLIMAGE=1' \
'-DCLD_VERSION=1' \
+ '-DCLD_DATA_FROM_STATIC' \
'-DENABLE_PRINTING=1' \
'-DENABLE_MANAGED_USERS=1' \
'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
@@ -141,6 +142,9 @@ LOCAL_CPPFLAGS_Debug := \
-fno-threadsafe-statics \
-fvisibility-inlines-hidden \
-Wsign-compare \
+ -std=gnu++11 \
+ -Wno-narrowing \
+ -Wno-literal-suffix \
-Wno-non-virtual-dtor \
-Wno-sign-promo
@@ -197,6 +201,7 @@ MY_DEFS_Release := \
'-DSYSTEM_NATIVELY_SIGNALS_MEMORY_PRESSURE' \
'-DENABLE_EGLIMAGE=1' \
'-DCLD_VERSION=1' \
+ '-DCLD_DATA_FROM_STATIC' \
'-DENABLE_PRINTING=1' \
'-DENABLE_MANAGED_USERS=1' \
'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
@@ -246,6 +251,9 @@ LOCAL_CPPFLAGS_Release := \
-fno-threadsafe-statics \
-fvisibility-inlines-hidden \
-Wsign-compare \
+ -std=gnu++11 \
+ -Wno-narrowing \
+ -Wno-literal-suffix \
-Wno-non-virtual-dtor \
-Wno-sign-promo
diff --git a/media/player_android.target.darwin-x86_64.mk b/media/player_android.target.darwin-x86_64.mk
index 37185d35eb..f30752e541 100644
--- a/media/player_android.target.darwin-x86_64.mk
+++ b/media/player_android.target.darwin-x86_64.mk
@@ -71,9 +71,9 @@ MY_CFLAGS_Debug := \
-Wno-unused-but-set-variable \
-Os \
-g \
- -fomit-frame-pointer \
-fdata-sections \
-ffunction-sections \
+ -fomit-frame-pointer \
-funwind-tables
MY_DEFS_Debug := \
@@ -92,6 +92,7 @@ MY_DEFS_Debug := \
'-DSYSTEM_NATIVELY_SIGNALS_MEMORY_PRESSURE' \
'-DENABLE_EGLIMAGE=1' \
'-DCLD_VERSION=1' \
+ '-DCLD_DATA_FROM_STATIC' \
'-DENABLE_PRINTING=1' \
'-DENABLE_MANAGED_USERS=1' \
'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
@@ -140,6 +141,9 @@ LOCAL_CPPFLAGS_Debug := \
-fno-threadsafe-statics \
-fvisibility-inlines-hidden \
-Wsign-compare \
+ -std=gnu++11 \
+ -Wno-narrowing \
+ -Wno-literal-suffix \
-Wno-non-virtual-dtor \
-Wno-sign-promo
@@ -195,6 +199,7 @@ MY_DEFS_Release := \
'-DSYSTEM_NATIVELY_SIGNALS_MEMORY_PRESSURE' \
'-DENABLE_EGLIMAGE=1' \
'-DCLD_VERSION=1' \
+ '-DCLD_DATA_FROM_STATIC' \
'-DENABLE_PRINTING=1' \
'-DENABLE_MANAGED_USERS=1' \
'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
@@ -244,6 +249,9 @@ LOCAL_CPPFLAGS_Release := \
-fno-threadsafe-statics \
-fvisibility-inlines-hidden \
-Wsign-compare \
+ -std=gnu++11 \
+ -Wno-narrowing \
+ -Wno-literal-suffix \
-Wno-non-virtual-dtor \
-Wno-sign-promo
diff --git a/media/player_android.target.linux-arm.mk b/media/player_android.target.linux-arm.mk
index 556aa726d7..746384a093 100644
--- a/media/player_android.target.linux-arm.mk
+++ b/media/player_android.target.linux-arm.mk
@@ -77,9 +77,9 @@ MY_CFLAGS_Debug := \
-Wno-unused-but-set-variable \
-Os \
-g \
- -fomit-frame-pointer \
-fdata-sections \
-ffunction-sections \
+ -fomit-frame-pointer \
-funwind-tables
MY_DEFS_Debug := \
@@ -98,6 +98,7 @@ MY_DEFS_Debug := \
'-DSYSTEM_NATIVELY_SIGNALS_MEMORY_PRESSURE' \
'-DENABLE_EGLIMAGE=1' \
'-DCLD_VERSION=1' \
+ '-DCLD_DATA_FROM_STATIC' \
'-DENABLE_PRINTING=1' \
'-DENABLE_MANAGED_USERS=1' \
'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
@@ -147,6 +148,9 @@ LOCAL_CPPFLAGS_Debug := \
-fvisibility-inlines-hidden \
-Wsign-compare \
-Wno-abi \
+ -std=gnu++11 \
+ -Wno-narrowing \
+ -Wno-literal-suffix \
-Wno-non-virtual-dtor \
-Wno-sign-promo
@@ -208,6 +212,7 @@ MY_DEFS_Release := \
'-DSYSTEM_NATIVELY_SIGNALS_MEMORY_PRESSURE' \
'-DENABLE_EGLIMAGE=1' \
'-DCLD_VERSION=1' \
+ '-DCLD_DATA_FROM_STATIC' \
'-DENABLE_PRINTING=1' \
'-DENABLE_MANAGED_USERS=1' \
'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
@@ -258,6 +263,9 @@ LOCAL_CPPFLAGS_Release := \
-fvisibility-inlines-hidden \
-Wsign-compare \
-Wno-abi \
+ -std=gnu++11 \
+ -Wno-narrowing \
+ -Wno-literal-suffix \
-Wno-non-virtual-dtor \
-Wno-sign-promo
diff --git a/media/player_android.target.linux-arm64.mk b/media/player_android.target.linux-arm64.mk
index 211a6c0931..157e45bb5a 100644
--- a/media/player_android.target.linux-arm64.mk
+++ b/media/player_android.target.linux-arm64.mk
@@ -67,7 +67,6 @@ MY_CFLAGS_Debug := \
-Wno-unused-but-set-variable \
-Os \
-g \
- -fomit-frame-pointer \
-fdata-sections \
-ffunction-sections \
-funwind-tables
@@ -88,6 +87,7 @@ MY_DEFS_Debug := \
'-DSYSTEM_NATIVELY_SIGNALS_MEMORY_PRESSURE' \
'-DENABLE_EGLIMAGE=1' \
'-DCLD_VERSION=1' \
+ '-DCLD_DATA_FROM_STATIC' \
'-DENABLE_PRINTING=1' \
'-DENABLE_MANAGED_USERS=1' \
'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
@@ -136,6 +136,9 @@ LOCAL_CPPFLAGS_Debug := \
-fno-threadsafe-statics \
-fvisibility-inlines-hidden \
-Wsign-compare \
+ -std=gnu++11 \
+ -Wno-narrowing \
+ -Wno-literal-suffix \
-Wno-non-virtual-dtor \
-Wno-sign-promo
@@ -168,7 +171,6 @@ MY_CFLAGS_Release := \
-fno-ident \
-fdata-sections \
-ffunction-sections \
- -fomit-frame-pointer \
-funwind-tables
MY_DEFS_Release := \
@@ -187,6 +189,7 @@ MY_DEFS_Release := \
'-DSYSTEM_NATIVELY_SIGNALS_MEMORY_PRESSURE' \
'-DENABLE_EGLIMAGE=1' \
'-DCLD_VERSION=1' \
+ '-DCLD_DATA_FROM_STATIC' \
'-DENABLE_PRINTING=1' \
'-DENABLE_MANAGED_USERS=1' \
'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
@@ -236,6 +239,9 @@ LOCAL_CPPFLAGS_Release := \
-fno-threadsafe-statics \
-fvisibility-inlines-hidden \
-Wsign-compare \
+ -std=gnu++11 \
+ -Wno-narrowing \
+ -Wno-literal-suffix \
-Wno-non-virtual-dtor \
-Wno-sign-promo
diff --git a/media/player_android.target.linux-mips.mk b/media/player_android.target.linux-mips.mk
index b32fc0b739..8062947e9b 100644
--- a/media/player_android.target.linux-mips.mk
+++ b/media/player_android.target.linux-mips.mk
@@ -71,9 +71,9 @@ MY_CFLAGS_Debug := \
-Wno-unused-but-set-variable \
-Os \
-g \
- -fomit-frame-pointer \
-fdata-sections \
-ffunction-sections \
+ -fomit-frame-pointer \
-funwind-tables
MY_DEFS_Debug := \
@@ -92,6 +92,7 @@ MY_DEFS_Debug := \
'-DSYSTEM_NATIVELY_SIGNALS_MEMORY_PRESSURE' \
'-DENABLE_EGLIMAGE=1' \
'-DCLD_VERSION=1' \
+ '-DCLD_DATA_FROM_STATIC' \
'-DENABLE_PRINTING=1' \
'-DENABLE_MANAGED_USERS=1' \
'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
@@ -141,6 +142,9 @@ LOCAL_CPPFLAGS_Debug := \
-fvisibility-inlines-hidden \
-Wsign-compare \
-Wno-uninitialized \
+ -std=gnu++11 \
+ -Wno-narrowing \
+ -Wno-literal-suffix \
-Wno-non-virtual-dtor \
-Wno-sign-promo
@@ -196,6 +200,7 @@ MY_DEFS_Release := \
'-DSYSTEM_NATIVELY_SIGNALS_MEMORY_PRESSURE' \
'-DENABLE_EGLIMAGE=1' \
'-DCLD_VERSION=1' \
+ '-DCLD_DATA_FROM_STATIC' \
'-DENABLE_PRINTING=1' \
'-DENABLE_MANAGED_USERS=1' \
'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
@@ -246,6 +251,9 @@ LOCAL_CPPFLAGS_Release := \
-fvisibility-inlines-hidden \
-Wsign-compare \
-Wno-uninitialized \
+ -std=gnu++11 \
+ -Wno-narrowing \
+ -Wno-literal-suffix \
-Wno-non-virtual-dtor \
-Wno-sign-promo
diff --git a/media/player_android.target.linux-x86.mk b/media/player_android.target.linux-x86.mk
index 5c37d43813..ab1a7c36d8 100644
--- a/media/player_android.target.linux-x86.mk
+++ b/media/player_android.target.linux-x86.mk
@@ -72,9 +72,9 @@ MY_CFLAGS_Debug := \
-fno-stack-protector \
-Os \
-g \
- -fomit-frame-pointer \
-fdata-sections \
-ffunction-sections \
+ -fomit-frame-pointer \
-funwind-tables
MY_DEFS_Debug := \
@@ -93,6 +93,7 @@ MY_DEFS_Debug := \
'-DSYSTEM_NATIVELY_SIGNALS_MEMORY_PRESSURE' \
'-DENABLE_EGLIMAGE=1' \
'-DCLD_VERSION=1' \
+ '-DCLD_DATA_FROM_STATIC' \
'-DENABLE_PRINTING=1' \
'-DENABLE_MANAGED_USERS=1' \
'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
@@ -141,6 +142,9 @@ LOCAL_CPPFLAGS_Debug := \
-fno-threadsafe-statics \
-fvisibility-inlines-hidden \
-Wsign-compare \
+ -std=gnu++11 \
+ -Wno-narrowing \
+ -Wno-literal-suffix \
-Wno-non-virtual-dtor \
-Wno-sign-promo
@@ -197,6 +201,7 @@ MY_DEFS_Release := \
'-DSYSTEM_NATIVELY_SIGNALS_MEMORY_PRESSURE' \
'-DENABLE_EGLIMAGE=1' \
'-DCLD_VERSION=1' \
+ '-DCLD_DATA_FROM_STATIC' \
'-DENABLE_PRINTING=1' \
'-DENABLE_MANAGED_USERS=1' \
'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
@@ -246,6 +251,9 @@ LOCAL_CPPFLAGS_Release := \
-fno-threadsafe-statics \
-fvisibility-inlines-hidden \
-Wsign-compare \
+ -std=gnu++11 \
+ -Wno-narrowing \
+ -Wno-literal-suffix \
-Wno-non-virtual-dtor \
-Wno-sign-promo
diff --git a/media/player_android.target.linux-x86_64.mk b/media/player_android.target.linux-x86_64.mk
index 37185d35eb..f30752e541 100644
--- a/media/player_android.target.linux-x86_64.mk
+++ b/media/player_android.target.linux-x86_64.mk
@@ -71,9 +71,9 @@ MY_CFLAGS_Debug := \
-Wno-unused-but-set-variable \
-Os \
-g \
- -fomit-frame-pointer \
-fdata-sections \
-ffunction-sections \
+ -fomit-frame-pointer \
-funwind-tables
MY_DEFS_Debug := \
@@ -92,6 +92,7 @@ MY_DEFS_Debug := \
'-DSYSTEM_NATIVELY_SIGNALS_MEMORY_PRESSURE' \
'-DENABLE_EGLIMAGE=1' \
'-DCLD_VERSION=1' \
+ '-DCLD_DATA_FROM_STATIC' \
'-DENABLE_PRINTING=1' \
'-DENABLE_MANAGED_USERS=1' \
'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
@@ -140,6 +141,9 @@ LOCAL_CPPFLAGS_Debug := \
-fno-threadsafe-statics \
-fvisibility-inlines-hidden \
-Wsign-compare \
+ -std=gnu++11 \
+ -Wno-narrowing \
+ -Wno-literal-suffix \
-Wno-non-virtual-dtor \
-Wno-sign-promo
@@ -195,6 +199,7 @@ MY_DEFS_Release := \
'-DSYSTEM_NATIVELY_SIGNALS_MEMORY_PRESSURE' \
'-DENABLE_EGLIMAGE=1' \
'-DCLD_VERSION=1' \
+ '-DCLD_DATA_FROM_STATIC' \
'-DENABLE_PRINTING=1' \
'-DENABLE_MANAGED_USERS=1' \
'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
@@ -244,6 +249,9 @@ LOCAL_CPPFLAGS_Release := \
-fno-threadsafe-statics \
-fvisibility-inlines-hidden \
-Wsign-compare \
+ -std=gnu++11 \
+ -Wno-narrowing \
+ -Wno-literal-suffix \
-Wno-non-virtual-dtor \
-Wno-sign-promo
diff --git a/media/shared_memory_support.target.darwin-arm.mk b/media/shared_memory_support.target.darwin-arm.mk
index 72750537e8..897f6a92f2 100644
--- a/media/shared_memory_support.target.darwin-arm.mk
+++ b/media/shared_memory_support.target.darwin-arm.mk
@@ -66,9 +66,9 @@ MY_CFLAGS_Debug := \
-Wno-unused-but-set-variable \
-Os \
-g \
- -fomit-frame-pointer \
-fdata-sections \
-ffunction-sections \
+ -fomit-frame-pointer \
-funwind-tables
MY_DEFS_Debug := \
@@ -87,6 +87,7 @@ MY_DEFS_Debug := \
'-DSYSTEM_NATIVELY_SIGNALS_MEMORY_PRESSURE' \
'-DENABLE_EGLIMAGE=1' \
'-DCLD_VERSION=1' \
+ '-DCLD_DATA_FROM_STATIC' \
'-DENABLE_PRINTING=1' \
'-DENABLE_MANAGED_USERS=1' \
'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
@@ -127,6 +128,9 @@ LOCAL_CPPFLAGS_Debug := \
-fvisibility-inlines-hidden \
-Wsign-compare \
-Wno-abi \
+ -std=gnu++11 \
+ -Wno-narrowing \
+ -Wno-literal-suffix \
-Wno-non-virtual-dtor \
-Wno-sign-promo
@@ -188,6 +192,7 @@ MY_DEFS_Release := \
'-DSYSTEM_NATIVELY_SIGNALS_MEMORY_PRESSURE' \
'-DENABLE_EGLIMAGE=1' \
'-DCLD_VERSION=1' \
+ '-DCLD_DATA_FROM_STATIC' \
'-DENABLE_PRINTING=1' \
'-DENABLE_MANAGED_USERS=1' \
'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
@@ -229,6 +234,9 @@ LOCAL_CPPFLAGS_Release := \
-fvisibility-inlines-hidden \
-Wsign-compare \
-Wno-abi \
+ -std=gnu++11 \
+ -Wno-narrowing \
+ -Wno-literal-suffix \
-Wno-non-virtual-dtor \
-Wno-sign-promo
diff --git a/media/shared_memory_support.target.darwin-arm64.mk b/media/shared_memory_support.target.darwin-arm64.mk
index 20aad89a41..a563ab38ab 100644
--- a/media/shared_memory_support.target.darwin-arm64.mk
+++ b/media/shared_memory_support.target.darwin-arm64.mk
@@ -56,7 +56,6 @@ MY_CFLAGS_Debug := \
-Wno-unused-but-set-variable \
-Os \
-g \
- -fomit-frame-pointer \
-fdata-sections \
-ffunction-sections \
-funwind-tables
@@ -77,6 +76,7 @@ MY_DEFS_Debug := \
'-DSYSTEM_NATIVELY_SIGNALS_MEMORY_PRESSURE' \
'-DENABLE_EGLIMAGE=1' \
'-DCLD_VERSION=1' \
+ '-DCLD_DATA_FROM_STATIC' \
'-DENABLE_PRINTING=1' \
'-DENABLE_MANAGED_USERS=1' \
'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
@@ -116,6 +116,9 @@ LOCAL_CPPFLAGS_Debug := \
-fno-threadsafe-statics \
-fvisibility-inlines-hidden \
-Wsign-compare \
+ -std=gnu++11 \
+ -Wno-narrowing \
+ -Wno-literal-suffix \
-Wno-non-virtual-dtor \
-Wno-sign-promo
@@ -148,7 +151,6 @@ MY_CFLAGS_Release := \
-fno-ident \
-fdata-sections \
-ffunction-sections \
- -fomit-frame-pointer \
-funwind-tables
MY_DEFS_Release := \
@@ -167,6 +169,7 @@ MY_DEFS_Release := \
'-DSYSTEM_NATIVELY_SIGNALS_MEMORY_PRESSURE' \
'-DENABLE_EGLIMAGE=1' \
'-DCLD_VERSION=1' \
+ '-DCLD_DATA_FROM_STATIC' \
'-DENABLE_PRINTING=1' \
'-DENABLE_MANAGED_USERS=1' \
'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
@@ -207,6 +210,9 @@ LOCAL_CPPFLAGS_Release := \
-fno-threadsafe-statics \
-fvisibility-inlines-hidden \
-Wsign-compare \
+ -std=gnu++11 \
+ -Wno-narrowing \
+ -Wno-literal-suffix \
-Wno-non-virtual-dtor \
-Wno-sign-promo
diff --git a/media/shared_memory_support.target.darwin-mips.mk b/media/shared_memory_support.target.darwin-mips.mk
index 1c25eee6ac..35791ce899 100644
--- a/media/shared_memory_support.target.darwin-mips.mk
+++ b/media/shared_memory_support.target.darwin-mips.mk
@@ -60,9 +60,9 @@ MY_CFLAGS_Debug := \
-Wno-unused-but-set-variable \
-Os \
-g \
- -fomit-frame-pointer \
-fdata-sections \
-ffunction-sections \
+ -fomit-frame-pointer \
-funwind-tables
MY_DEFS_Debug := \
@@ -81,6 +81,7 @@ MY_DEFS_Debug := \
'-DSYSTEM_NATIVELY_SIGNALS_MEMORY_PRESSURE' \
'-DENABLE_EGLIMAGE=1' \
'-DCLD_VERSION=1' \
+ '-DCLD_DATA_FROM_STATIC' \
'-DENABLE_PRINTING=1' \
'-DENABLE_MANAGED_USERS=1' \
'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
@@ -121,6 +122,9 @@ LOCAL_CPPFLAGS_Debug := \
-fvisibility-inlines-hidden \
-Wsign-compare \
-Wno-uninitialized \
+ -std=gnu++11 \
+ -Wno-narrowing \
+ -Wno-literal-suffix \
-Wno-non-virtual-dtor \
-Wno-sign-promo
@@ -176,6 +180,7 @@ MY_DEFS_Release := \
'-DSYSTEM_NATIVELY_SIGNALS_MEMORY_PRESSURE' \
'-DENABLE_EGLIMAGE=1' \
'-DCLD_VERSION=1' \
+ '-DCLD_DATA_FROM_STATIC' \
'-DENABLE_PRINTING=1' \
'-DENABLE_MANAGED_USERS=1' \
'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
@@ -217,6 +222,9 @@ LOCAL_CPPFLAGS_Release := \
-fvisibility-inlines-hidden \
-Wsign-compare \
-Wno-uninitialized \
+ -std=gnu++11 \
+ -Wno-narrowing \
+ -Wno-literal-suffix \
-Wno-non-virtual-dtor \
-Wno-sign-promo
diff --git a/media/shared_memory_support.target.darwin-x86.mk b/media/shared_memory_support.target.darwin-x86.mk
index 327df5363f..a7b2124d3d 100644
--- a/media/shared_memory_support.target.darwin-x86.mk
+++ b/media/shared_memory_support.target.darwin-x86.mk
@@ -61,9 +61,9 @@ MY_CFLAGS_Debug := \
-fno-stack-protector \
-Os \
-g \
- -fomit-frame-pointer \
-fdata-sections \
-ffunction-sections \
+ -fomit-frame-pointer \
-funwind-tables
MY_DEFS_Debug := \
@@ -82,6 +82,7 @@ MY_DEFS_Debug := \
'-DSYSTEM_NATIVELY_SIGNALS_MEMORY_PRESSURE' \
'-DENABLE_EGLIMAGE=1' \
'-DCLD_VERSION=1' \
+ '-DCLD_DATA_FROM_STATIC' \
'-DENABLE_PRINTING=1' \
'-DENABLE_MANAGED_USERS=1' \
'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
@@ -121,6 +122,9 @@ LOCAL_CPPFLAGS_Debug := \
-fno-threadsafe-statics \
-fvisibility-inlines-hidden \
-Wsign-compare \
+ -std=gnu++11 \
+ -Wno-narrowing \
+ -Wno-literal-suffix \
-Wno-non-virtual-dtor \
-Wno-sign-promo
@@ -177,6 +181,7 @@ MY_DEFS_Release := \
'-DSYSTEM_NATIVELY_SIGNALS_MEMORY_PRESSURE' \
'-DENABLE_EGLIMAGE=1' \
'-DCLD_VERSION=1' \
+ '-DCLD_DATA_FROM_STATIC' \
'-DENABLE_PRINTING=1' \
'-DENABLE_MANAGED_USERS=1' \
'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
@@ -217,6 +222,9 @@ LOCAL_CPPFLAGS_Release := \
-fno-threadsafe-statics \
-fvisibility-inlines-hidden \
-Wsign-compare \
+ -std=gnu++11 \
+ -Wno-narrowing \
+ -Wno-literal-suffix \
-Wno-non-virtual-dtor \
-Wno-sign-promo
diff --git a/media/shared_memory_support.target.darwin-x86_64.mk b/media/shared_memory_support.target.darwin-x86_64.mk
index d067e9c272..7f82cbdaaa 100644
--- a/media/shared_memory_support.target.darwin-x86_64.mk
+++ b/media/shared_memory_support.target.darwin-x86_64.mk
@@ -60,9 +60,9 @@ MY_CFLAGS_Debug := \
-Wno-unused-but-set-variable \
-Os \
-g \
- -fomit-frame-pointer \
-fdata-sections \
-ffunction-sections \
+ -fomit-frame-pointer \
-funwind-tables
MY_DEFS_Debug := \
@@ -81,6 +81,7 @@ MY_DEFS_Debug := \
'-DSYSTEM_NATIVELY_SIGNALS_MEMORY_PRESSURE' \
'-DENABLE_EGLIMAGE=1' \
'-DCLD_VERSION=1' \
+ '-DCLD_DATA_FROM_STATIC' \
'-DENABLE_PRINTING=1' \
'-DENABLE_MANAGED_USERS=1' \
'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
@@ -120,6 +121,9 @@ LOCAL_CPPFLAGS_Debug := \
-fno-threadsafe-statics \
-fvisibility-inlines-hidden \
-Wsign-compare \
+ -std=gnu++11 \
+ -Wno-narrowing \
+ -Wno-literal-suffix \
-Wno-non-virtual-dtor \
-Wno-sign-promo
@@ -175,6 +179,7 @@ MY_DEFS_Release := \
'-DSYSTEM_NATIVELY_SIGNALS_MEMORY_PRESSURE' \
'-DENABLE_EGLIMAGE=1' \
'-DCLD_VERSION=1' \
+ '-DCLD_DATA_FROM_STATIC' \
'-DENABLE_PRINTING=1' \
'-DENABLE_MANAGED_USERS=1' \
'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
@@ -215,6 +220,9 @@ LOCAL_CPPFLAGS_Release := \
-fno-threadsafe-statics \
-fvisibility-inlines-hidden \
-Wsign-compare \
+ -std=gnu++11 \
+ -Wno-narrowing \
+ -Wno-literal-suffix \
-Wno-non-virtual-dtor \
-Wno-sign-promo
diff --git a/media/shared_memory_support.target.linux-arm.mk b/media/shared_memory_support.target.linux-arm.mk
index 72750537e8..897f6a92f2 100644
--- a/media/shared_memory_support.target.linux-arm.mk
+++ b/media/shared_memory_support.target.linux-arm.mk
@@ -66,9 +66,9 @@ MY_CFLAGS_Debug := \
-Wno-unused-but-set-variable \
-Os \
-g \
- -fomit-frame-pointer \
-fdata-sections \
-ffunction-sections \
+ -fomit-frame-pointer \
-funwind-tables
MY_DEFS_Debug := \
@@ -87,6 +87,7 @@ MY_DEFS_Debug := \
'-DSYSTEM_NATIVELY_SIGNALS_MEMORY_PRESSURE' \
'-DENABLE_EGLIMAGE=1' \
'-DCLD_VERSION=1' \
+ '-DCLD_DATA_FROM_STATIC' \
'-DENABLE_PRINTING=1' \
'-DENABLE_MANAGED_USERS=1' \
'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
@@ -127,6 +128,9 @@ LOCAL_CPPFLAGS_Debug := \
-fvisibility-inlines-hidden \
-Wsign-compare \
-Wno-abi \
+ -std=gnu++11 \
+ -Wno-narrowing \
+ -Wno-literal-suffix \
-Wno-non-virtual-dtor \
-Wno-sign-promo
@@ -188,6 +192,7 @@ MY_DEFS_Release := \
'-DSYSTEM_NATIVELY_SIGNALS_MEMORY_PRESSURE' \
'-DENABLE_EGLIMAGE=1' \
'-DCLD_VERSION=1' \
+ '-DCLD_DATA_FROM_STATIC' \
'-DENABLE_PRINTING=1' \
'-DENABLE_MANAGED_USERS=1' \
'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
@@ -229,6 +234,9 @@ LOCAL_CPPFLAGS_Release := \
-fvisibility-inlines-hidden \
-Wsign-compare \
-Wno-abi \
+ -std=gnu++11 \
+ -Wno-narrowing \
+ -Wno-literal-suffix \
-Wno-non-virtual-dtor \
-Wno-sign-promo
diff --git a/media/shared_memory_support.target.linux-arm64.mk b/media/shared_memory_support.target.linux-arm64.mk
index 20aad89a41..a563ab38ab 100644
--- a/media/shared_memory_support.target.linux-arm64.mk
+++ b/media/shared_memory_support.target.linux-arm64.mk
@@ -56,7 +56,6 @@ MY_CFLAGS_Debug := \
-Wno-unused-but-set-variable \
-Os \
-g \
- -fomit-frame-pointer \
-fdata-sections \
-ffunction-sections \
-funwind-tables
@@ -77,6 +76,7 @@ MY_DEFS_Debug := \
'-DSYSTEM_NATIVELY_SIGNALS_MEMORY_PRESSURE' \
'-DENABLE_EGLIMAGE=1' \
'-DCLD_VERSION=1' \
+ '-DCLD_DATA_FROM_STATIC' \
'-DENABLE_PRINTING=1' \
'-DENABLE_MANAGED_USERS=1' \
'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
@@ -116,6 +116,9 @@ LOCAL_CPPFLAGS_Debug := \
-fno-threadsafe-statics \
-fvisibility-inlines-hidden \
-Wsign-compare \
+ -std=gnu++11 \
+ -Wno-narrowing \
+ -Wno-literal-suffix \
-Wno-non-virtual-dtor \
-Wno-sign-promo
@@ -148,7 +151,6 @@ MY_CFLAGS_Release := \
-fno-ident \
-fdata-sections \
-ffunction-sections \
- -fomit-frame-pointer \
-funwind-tables
MY_DEFS_Release := \
@@ -167,6 +169,7 @@ MY_DEFS_Release := \
'-DSYSTEM_NATIVELY_SIGNALS_MEMORY_PRESSURE' \
'-DENABLE_EGLIMAGE=1' \
'-DCLD_VERSION=1' \
+ '-DCLD_DATA_FROM_STATIC' \
'-DENABLE_PRINTING=1' \
'-DENABLE_MANAGED_USERS=1' \
'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
@@ -207,6 +210,9 @@ LOCAL_CPPFLAGS_Release := \
-fno-threadsafe-statics \
-fvisibility-inlines-hidden \
-Wsign-compare \
+ -std=gnu++11 \
+ -Wno-narrowing \
+ -Wno-literal-suffix \
-Wno-non-virtual-dtor \
-Wno-sign-promo
diff --git a/media/shared_memory_support.target.linux-mips.mk b/media/shared_memory_support.target.linux-mips.mk
index 1c25eee6ac..35791ce899 100644
--- a/media/shared_memory_support.target.linux-mips.mk
+++ b/media/shared_memory_support.target.linux-mips.mk
@@ -60,9 +60,9 @@ MY_CFLAGS_Debug := \
-Wno-unused-but-set-variable \
-Os \
-g \
- -fomit-frame-pointer \
-fdata-sections \
-ffunction-sections \
+ -fomit-frame-pointer \
-funwind-tables
MY_DEFS_Debug := \
@@ -81,6 +81,7 @@ MY_DEFS_Debug := \
'-DSYSTEM_NATIVELY_SIGNALS_MEMORY_PRESSURE' \
'-DENABLE_EGLIMAGE=1' \
'-DCLD_VERSION=1' \
+ '-DCLD_DATA_FROM_STATIC' \
'-DENABLE_PRINTING=1' \
'-DENABLE_MANAGED_USERS=1' \
'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
@@ -121,6 +122,9 @@ LOCAL_CPPFLAGS_Debug := \
-fvisibility-inlines-hidden \
-Wsign-compare \
-Wno-uninitialized \
+ -std=gnu++11 \
+ -Wno-narrowing \
+ -Wno-literal-suffix \
-Wno-non-virtual-dtor \
-Wno-sign-promo
@@ -176,6 +180,7 @@ MY_DEFS_Release := \
'-DSYSTEM_NATIVELY_SIGNALS_MEMORY_PRESSURE' \
'-DENABLE_EGLIMAGE=1' \
'-DCLD_VERSION=1' \
+ '-DCLD_DATA_FROM_STATIC' \
'-DENABLE_PRINTING=1' \
'-DENABLE_MANAGED_USERS=1' \
'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
@@ -217,6 +222,9 @@ LOCAL_CPPFLAGS_Release := \
-fvisibility-inlines-hidden \
-Wsign-compare \
-Wno-uninitialized \
+ -std=gnu++11 \
+ -Wno-narrowing \
+ -Wno-literal-suffix \
-Wno-non-virtual-dtor \
-Wno-sign-promo
diff --git a/media/shared_memory_support.target.linux-x86.mk b/media/shared_memory_support.target.linux-x86.mk
index 327df5363f..a7b2124d3d 100644
--- a/media/shared_memory_support.target.linux-x86.mk
+++ b/media/shared_memory_support.target.linux-x86.mk
@@ -61,9 +61,9 @@ MY_CFLAGS_Debug := \
-fno-stack-protector \
-Os \
-g \
- -fomit-frame-pointer \
-fdata-sections \
-ffunction-sections \
+ -fomit-frame-pointer \
-funwind-tables
MY_DEFS_Debug := \
@@ -82,6 +82,7 @@ MY_DEFS_Debug := \
'-DSYSTEM_NATIVELY_SIGNALS_MEMORY_PRESSURE' \
'-DENABLE_EGLIMAGE=1' \
'-DCLD_VERSION=1' \
+ '-DCLD_DATA_FROM_STATIC' \
'-DENABLE_PRINTING=1' \
'-DENABLE_MANAGED_USERS=1' \
'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
@@ -121,6 +122,9 @@ LOCAL_CPPFLAGS_Debug := \
-fno-threadsafe-statics \
-fvisibility-inlines-hidden \
-Wsign-compare \
+ -std=gnu++11 \
+ -Wno-narrowing \
+ -Wno-literal-suffix \
-Wno-non-virtual-dtor \
-Wno-sign-promo
@@ -177,6 +181,7 @@ MY_DEFS_Release := \
'-DSYSTEM_NATIVELY_SIGNALS_MEMORY_PRESSURE' \
'-DENABLE_EGLIMAGE=1' \
'-DCLD_VERSION=1' \
+ '-DCLD_DATA_FROM_STATIC' \
'-DENABLE_PRINTING=1' \
'-DENABLE_MANAGED_USERS=1' \
'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
@@ -217,6 +222,9 @@ LOCAL_CPPFLAGS_Release := \
-fno-threadsafe-statics \
-fvisibility-inlines-hidden \
-Wsign-compare \
+ -std=gnu++11 \
+ -Wno-narrowing \
+ -Wno-literal-suffix \
-Wno-non-virtual-dtor \
-Wno-sign-promo
diff --git a/media/shared_memory_support.target.linux-x86_64.mk b/media/shared_memory_support.target.linux-x86_64.mk
index d067e9c272..7f82cbdaaa 100644
--- a/media/shared_memory_support.target.linux-x86_64.mk
+++ b/media/shared_memory_support.target.linux-x86_64.mk
@@ -60,9 +60,9 @@ MY_CFLAGS_Debug := \
-Wno-unused-but-set-variable \
-Os \
-g \
- -fomit-frame-pointer \
-fdata-sections \
-ffunction-sections \
+ -fomit-frame-pointer \
-funwind-tables
MY_DEFS_Debug := \
@@ -81,6 +81,7 @@ MY_DEFS_Debug := \
'-DSYSTEM_NATIVELY_SIGNALS_MEMORY_PRESSURE' \
'-DENABLE_EGLIMAGE=1' \
'-DCLD_VERSION=1' \
+ '-DCLD_DATA_FROM_STATIC' \
'-DENABLE_PRINTING=1' \
'-DENABLE_MANAGED_USERS=1' \
'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
@@ -120,6 +121,9 @@ LOCAL_CPPFLAGS_Debug := \
-fno-threadsafe-statics \
-fvisibility-inlines-hidden \
-Wsign-compare \
+ -std=gnu++11 \
+ -Wno-narrowing \
+ -Wno-literal-suffix \
-Wno-non-virtual-dtor \
-Wno-sign-promo
@@ -175,6 +179,7 @@ MY_DEFS_Release := \
'-DSYSTEM_NATIVELY_SIGNALS_MEMORY_PRESSURE' \
'-DENABLE_EGLIMAGE=1' \
'-DCLD_VERSION=1' \
+ '-DCLD_DATA_FROM_STATIC' \
'-DENABLE_PRINTING=1' \
'-DENABLE_MANAGED_USERS=1' \
'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
@@ -215,6 +220,9 @@ LOCAL_CPPFLAGS_Release := \
-fno-threadsafe-statics \
-fvisibility-inlines-hidden \
-Wsign-compare \
+ -std=gnu++11 \
+ -Wno-narrowing \
+ -Wno-literal-suffix \
-Wno-non-virtual-dtor \
-Wno-sign-promo
diff --git a/media/test/data/README b/media/test/data/README
index a7c3ce2149..6968d13d62 100644
--- a/media/test/data/README
+++ b/media/test/data/README
@@ -3,19 +3,21 @@
// found in the LICENSE file.
bear-320x240.webm - WebM encode of bear.1280x720.mp4 resized to 320x240.
+bear-320x240-video-only.webm - The video track of bear-320x240.webm.
+bear-320x240-audio-only.webm - The audio track of bear-320x240.webm.
bear-vp9.webm - VP9 video only WebM file.
bear-vp9-opus.webm - VP9 Video with Opus Audio.
bear-vp8-webvtt.webm - WebM VP8 video with WebVTT subtitle track.
bear-vp8a.webm - WebM VP8 video with alpha channel.
bear-vp8a-odd-dimensions.webm - WebM VP8 video with alpha channel and odd dimensions.
bear-opus.webm - Opus Audio only WebM file.
-bear-opus-end-trimming.webm - File to test end trimming. It has one byte
- artificially added so that there is maximum
- padding at the end. It is an Opus Audio only WebM
+bear-opus-end-trimming.webm - File to test end trimming. It has one byte
+ artificially added so that there is maximum
+ padding at the end. It is an Opus Audio only WebM
file.
-no_streams.webm - Header, Info, & Tracks element from bear-320x240.webm slightly corrupted so it looks
+no_streams.webm - Header, Info, & Tracks element from bear-320x240.webm slightly corrupted so it looks
like there are no tracks.
-nonzero-start-time.webm - Has the same headers as bear-320x240.webm but the first cluster of this file
+nonzero-start-time.webm - Has the same headers as bear-320x240.webm but the first cluster of this file
is the second cluster of bear-320x240.webm. This creates the situation where
the media data doesn't start at time 0.
bear-320x240-live.webm - bear-320x240.webm remuxed w/o a duration and using clusters with unknown sizes.
@@ -47,10 +49,17 @@ bear-320x240-av_enc-av.webm - bear-320x240.webm with audio & video encrypted usi
bear-320x240-av_enc-av_clear-1s.webm - Same as bear-320x240-av_enc-av.webm but with no frames in the first second encrypted.
bear-320x240-av_enc-av_clear-all.webm - Same as bear-320x240-av_enc-av.webm but with no frames encrypted.
bear-640x360-av_enc-av.webm - bear-640x360.webm with audio & video encrypted using key ID [1] and key [2].
+bear-320x240-av_enc-v.webm - bear-320x240.webm with video track encrypted using key ID [1] and key [2].
+bear-320x240-av_enc-a.webm - bear-320x240.webm with audio track encrypted using key ID [1] and key [2].
+bear-320x240-v_enc-v.webm - bear-320x240-video-only.webm encrypted using key ID [1] and key [2].
+bear-320x240-v-vp9_enc-v.webm - bear-vp9.webm VP9 video only encrypted using key ID [1] and key [2].
bear-640x360-a_frag-cenc.mp4 - A fragmented MP4 version of the audio track of bear-640x360.mp4 encrypted (ISO CENC) using key ID [1] and key [2].
bear-640x360-a_frag-cenc-key_rotation.mp4 - A fragmented MP4 version of the audio track of bear-640x360.mp4 encrypted (ISO CENC) using key ID [1] and key [2] with key rotation [3].
bear-640x360-v_frag-cenc.mp4 - A fragmented MP4 version of the video track of bear-640x360.mp4 encrypted (ISO CENC) using key ID [1] and key [2].
bear-640x360-v_frag-cenc-key_rotation.mp4 - A fragmented MP4 version of the video track of bear-640x360.mp4 encrypted (ISO CENC) using key ID [1] and key [2] with key rotation [3].
+bear-a_enc-a.webm - bear-320x240-audio-only.webm encrypted using key ID [1] and key [2].
+frame_size_change-av_enc-v.webm - third_party/WebKit/LayoutTests/media/resources/frame_size_change.webm encrypted using key ID [1] and key [2].
+
[1] 30313233343536373839303132333435
[2] ebdd62f16814d27b68ef122afce4ae3c
diff --git a/media/test/data/audio-start-time-only.webm b/media/test/data/audio-start-time-only.webm
new file mode 100644
index 0000000000..f3088c85bf
--- /dev/null
+++ b/media/test/data/audio-start-time-only.webm
Binary files differ
diff --git a/media/test/data/bear-320x240-av_enc-a.webm b/media/test/data/bear-320x240-av_enc-a.webm
new file mode 100644
index 0000000000..d228e537e2
--- /dev/null
+++ b/media/test/data/bear-320x240-av_enc-a.webm
Binary files differ
diff --git a/media/test/data/bear-320x240-av_enc-v.webm b/media/test/data/bear-320x240-av_enc-v.webm
new file mode 100644
index 0000000000..0aa6c63e88
--- /dev/null
+++ b/media/test/data/bear-320x240-av_enc-v.webm
Binary files differ
diff --git a/media/test/data/bear-320x240-v-vp9_enc-v.webm b/media/test/data/bear-320x240-v-vp9_enc-v.webm
new file mode 100644
index 0000000000..c8f00293ae
--- /dev/null
+++ b/media/test/data/bear-320x240-v-vp9_enc-v.webm
Binary files differ
diff --git a/media/test/data/bear-320x240-v_enc-v.webm b/media/test/data/bear-320x240-v_enc-v.webm
new file mode 100644
index 0000000000..f12f9ac29c
--- /dev/null
+++ b/media/test/data/bear-320x240-v_enc-v.webm
Binary files differ
diff --git a/media/test/data/bear-a_enc-a.webm b/media/test/data/bear-a_enc-a.webm
new file mode 100644
index 0000000000..4d87b8eb3b
--- /dev/null
+++ b/media/test/data/bear-a_enc-a.webm
Binary files differ
diff --git a/media/test/data/bear-opus.webm b/media/test/data/bear-opus.webm
new file mode 100644
index 0000000000..c198148814
--- /dev/null
+++ b/media/test/data/bear-opus.webm
Binary files differ
diff --git a/media/test/data/bear-vp9-odd-dimensions.webm b/media/test/data/bear-vp9-odd-dimensions.webm
new file mode 100644
index 0000000000..4d65a906fa
--- /dev/null
+++ b/media/test/data/bear-vp9-odd-dimensions.webm
Binary files differ
diff --git a/media/test/data/bear.flac b/media/test/data/bear.flac
index 6b0286c186..1db13f9193 100644
--- a/media/test/data/bear.flac
+++ b/media/test/data/bear.flac
Binary files differ
diff --git a/media/test/data/bear.mp4 b/media/test/data/bear.mp4
new file mode 100644
index 0000000000..3763b59336
--- /dev/null
+++ b/media/test/data/bear.mp4
Binary files differ
diff --git a/media/test/data/bear.webm b/media/test/data/bear.webm
new file mode 100644
index 0000000000..422df3f9d3
--- /dev/null
+++ b/media/test/data/bear.webm
Binary files differ
diff --git a/media/test/data/bear_192kHz.wav b/media/test/data/bear_192kHz.wav
new file mode 100644
index 0000000000..20cbb4a954
--- /dev/null
+++ b/media/test/data/bear_192kHz.wav
Binary files differ
diff --git a/media/test/data/bear_3kHz.wav b/media/test/data/bear_3kHz.wav
new file mode 100644
index 0000000000..d8d798b6b8
--- /dev/null
+++ b/media/test/data/bear_3kHz.wav
Binary files differ
diff --git a/media/test/data/bear_alaw.wav b/media/test/data/bear_alaw.wav
new file mode 100644
index 0000000000..ef0d3dc915
--- /dev/null
+++ b/media/test/data/bear_alaw.wav
Binary files differ
diff --git a/media/test/data/bear_divx_mp3.avi b/media/test/data/bear_divx_mp3.avi
new file mode 100644
index 0000000000..c6e1a313aa
--- /dev/null
+++ b/media/test/data/bear_divx_mp3.avi
Binary files differ
diff --git a/media/test/data/bear_gsm_ms.wav b/media/test/data/bear_gsm_ms.wav
new file mode 100644
index 0000000000..138e90b114
--- /dev/null
+++ b/media/test/data/bear_gsm_ms.wav
Binary files differ
diff --git a/media/test/data/bear_h264_aac.3gp b/media/test/data/bear_h264_aac.3gp
new file mode 100644
index 0000000000..761166b474
--- /dev/null
+++ b/media/test/data/bear_h264_aac.3gp
Binary files differ
diff --git a/media/test/data/bear_mpeg4_amrnb.3gp b/media/test/data/bear_mpeg4_amrnb.3gp
new file mode 100644
index 0000000000..ced825c49a
--- /dev/null
+++ b/media/test/data/bear_mpeg4_amrnb.3gp
Binary files differ
diff --git a/media/test/data/bear_mpeg4_mp3.avi b/media/test/data/bear_mpeg4_mp3.avi
new file mode 100644
index 0000000000..5c5be3d16d
--- /dev/null
+++ b/media/test/data/bear_mpeg4_mp3.avi
Binary files differ
diff --git a/media/test/data/bear_mpeg4asp_mp3.avi b/media/test/data/bear_mpeg4asp_mp3.avi
new file mode 100644
index 0000000000..3131e2d3da
--- /dev/null
+++ b/media/test/data/bear_mpeg4asp_mp3.avi
Binary files differ
diff --git a/media/test/data/bear_mulaw.wav b/media/test/data/bear_mulaw.wav
new file mode 100644
index 0000000000..3fd3607be5
--- /dev/null
+++ b/media/test/data/bear_mulaw.wav
Binary files differ
diff --git a/media/test/data/bear_pcm.wav b/media/test/data/bear_pcm.wav
new file mode 100644
index 0000000000..1870eed0c5
--- /dev/null
+++ b/media/test/data/bear_pcm.wav
Binary files differ
diff --git a/media/test/data/bear_pcm_s16be.mov b/media/test/data/bear_pcm_s16be.mov
new file mode 100644
index 0000000000..7076dd88ae
--- /dev/null
+++ b/media/test/data/bear_pcm_s16be.mov
Binary files differ
diff --git a/media/test/data/bear_pcm_s24be.mov b/media/test/data/bear_pcm_s24be.mov
new file mode 100644
index 0000000000..b82be8df90
--- /dev/null
+++ b/media/test/data/bear_pcm_s24be.mov
Binary files differ
diff --git a/media/test/data/bear_rotate_0.mp4 b/media/test/data/bear_rotate_0.mp4
new file mode 100644
index 0000000000..e3fb262c1b
--- /dev/null
+++ b/media/test/data/bear_rotate_0.mp4
Binary files differ
diff --git a/media/test/data/bear_rotate_180.mp4 b/media/test/data/bear_rotate_180.mp4
new file mode 100644
index 0000000000..ec16d2a6e1
--- /dev/null
+++ b/media/test/data/bear_rotate_180.mp4
Binary files differ
diff --git a/media/test/data/bear_rotate_270.mp4 b/media/test/data/bear_rotate_270.mp4
new file mode 100644
index 0000000000..8b585b3182
--- /dev/null
+++ b/media/test/data/bear_rotate_270.mp4
Binary files differ
diff --git a/media/test/data/bear_rotate_90.mp4 b/media/test/data/bear_rotate_90.mp4
new file mode 100644
index 0000000000..c9216545a0
--- /dev/null
+++ b/media/test/data/bear_rotate_90.mp4
Binary files differ
diff --git a/media/test/data/bear_silent.mp4 b/media/test/data/bear_silent.mp4
new file mode 100644
index 0000000000..aaf0a9f1cb
--- /dev/null
+++ b/media/test/data/bear_silent.mp4
Binary files differ
diff --git a/media/test/data/bear_silent.ogv b/media/test/data/bear_silent.ogv
new file mode 100644
index 0000000000..923331831f
--- /dev/null
+++ b/media/test/data/bear_silent.ogv
Binary files differ
diff --git a/media/test/data/bear_silent.webm b/media/test/data/bear_silent.webm
new file mode 100644
index 0000000000..85bf7d8848
--- /dev/null
+++ b/media/test/data/bear_silent.webm
Binary files differ
diff --git a/media/test/data/blackwhite.png b/media/test/data/blackwhite.png
new file mode 100644
index 0000000000..e7d08ace48
--- /dev/null
+++ b/media/test/data/blackwhite.png
Binary files differ
diff --git a/media/test/data/blackwhite_yuv420p.avi b/media/test/data/blackwhite_yuv420p.avi
new file mode 100644
index 0000000000..14c6f8be18
--- /dev/null
+++ b/media/test/data/blackwhite_yuv420p.avi
Binary files differ
diff --git a/media/test/data/blackwhite_yuv420p.mp4 b/media/test/data/blackwhite_yuv420p.mp4
new file mode 100644
index 0000000000..aa6a49dfef
--- /dev/null
+++ b/media/test/data/blackwhite_yuv420p.mp4
Binary files differ
diff --git a/media/test/data/blackwhite_yuv420p.ogv b/media/test/data/blackwhite_yuv420p.ogv
new file mode 100644
index 0000000000..e2fae13f15
--- /dev/null
+++ b/media/test/data/blackwhite_yuv420p.ogv
Binary files differ
diff --git a/media/test/data/blackwhite_yuv420p.webm b/media/test/data/blackwhite_yuv420p.webm
new file mode 100644
index 0000000000..9713cbb301
--- /dev/null
+++ b/media/test/data/blackwhite_yuv420p.webm
Binary files differ
diff --git a/media/test/data/blackwhite_yuv422p.mp4 b/media/test/data/blackwhite_yuv422p.mp4
new file mode 100644
index 0000000000..5804d284fc
--- /dev/null
+++ b/media/test/data/blackwhite_yuv422p.mp4
Binary files differ
diff --git a/media/test/data/blackwhite_yuv422p.ogv b/media/test/data/blackwhite_yuv422p.ogv
new file mode 100644
index 0000000000..73a4eac0ef
--- /dev/null
+++ b/media/test/data/blackwhite_yuv422p.ogv
Binary files differ
diff --git a/media/test/data/blackwhite_yuv444p.mp4 b/media/test/data/blackwhite_yuv444p.mp4
new file mode 100644
index 0000000000..28dfb5b818
--- /dev/null
+++ b/media/test/data/blackwhite_yuv444p.mp4
Binary files differ
diff --git a/media/test/data/blackwhite_yuv444p.ogv b/media/test/data/blackwhite_yuv444p.ogv
new file mode 100644
index 0000000000..ba8ee17a14
--- /dev/null
+++ b/media/test/data/blackwhite_yuv444p.ogv
Binary files differ
diff --git a/media/test/data/blackwhite_yuv444p.webm b/media/test/data/blackwhite_yuv444p.webm
new file mode 100644
index 0000000000..4d86c5a0b9
--- /dev/null
+++ b/media/test/data/blackwhite_yuv444p.webm
Binary files differ
diff --git a/media/test/data/blackwhite_yuvj420p.mp4 b/media/test/data/blackwhite_yuvj420p.mp4
new file mode 100644
index 0000000000..c250d6cf46
--- /dev/null
+++ b/media/test/data/blackwhite_yuvj420p.mp4
Binary files differ
diff --git a/media/test/data/frame_size_change-av_enc-v.webm b/media/test/data/frame_size_change-av_enc-v.webm
new file mode 100644
index 0000000000..200be551b3
--- /dev/null
+++ b/media/test/data/frame_size_change-av_enc-v.webm
Binary files differ
diff --git a/media/test/data/sfx-opus-441.webm b/media/test/data/sfx-opus-441.webm
new file mode 100644
index 0000000000..dfeabce929
--- /dev/null
+++ b/media/test/data/sfx-opus-441.webm
Binary files differ
diff --git a/media/test/data/sync2.ogv b/media/test/data/sync2.ogv
new file mode 100644
index 0000000000..6b328a24de
--- /dev/null
+++ b/media/test/data/sync2.ogv
Binary files differ
diff --git a/media/test/data/tulip2.webm b/media/test/data/tulip2.webm
new file mode 100644
index 0000000000..a2774f607d
--- /dev/null
+++ b/media/test/data/tulip2.webm
Binary files differ
diff --git a/media/tools/player_x11/player_x11.cc b/media/tools/player_x11/player_x11.cc
index b691743d90..94aac8aa61 100644
--- a/media/tools/player_x11/player_x11.cc
+++ b/media/tools/player_x11/player_x11.cc
@@ -86,6 +86,8 @@ static void OnStatus(media::PipelineStatus status) {}
static void OnMetadata(media::PipelineMetadata metadata) {}
+static void OnBufferingStateChanged(media::BufferingState buffering_state) {}
+
static void NeedKey(const std::string& type,
const std::vector<uint8>& init_data) {
std::cout << "File is encrypted." << std::endl;
@@ -146,7 +148,7 @@ void InitPipeline(
pipeline->Start(
collection.Pass(), base::Bind(&DoNothing), base::Bind(&OnStatus),
base::Bind(&SaveStatusAndSignal, &event, &status),
- base::Bind(&OnMetadata), base::Bind(&DoNothing),
+ base::Bind(&OnMetadata), base::Bind(&OnBufferingStateChanged),
base::Bind(&DoNothing));
// Wait until the pipeline is fully initialized.
diff --git a/media/video/capture/fake_video_capture_device.cc b/media/video/capture/fake_video_capture_device.cc
index 302396a22b..6f4fd75174 100644
--- a/media/video/capture/fake_video_capture_device.cc
+++ b/media/video/capture/fake_video_capture_device.cc
@@ -104,12 +104,10 @@ void FakeVideoCaptureDevice::OnCaptureTask() {
VideoFrame::AllocationSize(VideoFrame::I420, capture_format_.frame_size);
memset(fake_frame_.get(), 0, frame_size);
+ SkImageInfo info = SkImageInfo::MakeA8(capture_format_.frame_size.width(),
+ capture_format_.frame_size.height());
SkBitmap bitmap;
- bitmap.setConfig(SkBitmap::kA8_Config,
- capture_format_.frame_size.width(),
- capture_format_.frame_size.height(),
- capture_format_.frame_size.width()),
- bitmap.setPixels(fake_frame_.get());
+ bitmap.installPixels(info, fake_frame_.get(), info.width());
SkCanvas canvas(bitmap);
// Draw a sweeping circle to show an animation.
diff --git a/media/video/capture/fake_video_capture_device_factory.cc b/media/video/capture/fake_video_capture_device_factory.cc
index 5183dc0e9f..31ad67458e 100644
--- a/media/video/capture/fake_video_capture_device_factory.cc
+++ b/media/video/capture/fake_video_capture_device_factory.cc
@@ -29,8 +29,14 @@ void FakeVideoCaptureDeviceFactory::GetDeviceNames(
DCHECK(thread_checker_.CalledOnValidThread());
DCHECK(device_names->empty());
for (int n = 0; n < number_of_devices_; ++n) {
+#if !defined(OS_MACOSX)
VideoCaptureDevice::Name name(base::StringPrintf("fake_device_%d", n),
base::StringPrintf("/dev/video%d", n));
+#else
+ VideoCaptureDevice::Name name(base::StringPrintf("fake_device_%d", n),
+ base::StringPrintf("/dev/video%d", n),
+ VideoCaptureDevice::Name::AVFOUNDATION);
+#endif
device_names->push_back(name);
}
}
diff --git a/media/video/capture/file_video_capture_device_factory.cc b/media/video/capture/file_video_capture_device_factory.cc
index ae7845064e..059ae2dedc 100644
--- a/media/video/capture/file_video_capture_device_factory.cc
+++ b/media/video/capture/file_video_capture_device_factory.cc
@@ -45,6 +45,11 @@ void FileVideoCaptureDeviceFactory::GetDeviceNames(
device_names->push_back(VideoCaptureDevice::Name(
base::SysWideToUTF8(command_line_file_path.value()),
kFileVideoCaptureDeviceName));
+#elif defined(OS_MACOSX)
+ device_names->push_back(VideoCaptureDevice::Name(
+ command_line_file_path.value(),
+ kFileVideoCaptureDeviceName,
+ VideoCaptureDevice::Name::AVFOUNDATION));
#else
device_names->push_back(VideoCaptureDevice::Name(
command_line_file_path.value(),
diff --git a/media/video/capture/linux/video_capture_device_factory_linux.cc b/media/video/capture/linux/video_capture_device_factory_linux.cc
index c8821eebbd..303e14af26 100644
--- a/media/video/capture/linux/video_capture_device_factory_linux.cc
+++ b/media/video/capture/linux/video_capture_device_factory_linux.cc
@@ -175,6 +175,10 @@ void VideoCaptureDeviceFactoryLinux::GetDeviceSupportedFormats(
break;
}
supported_formats->push_back(supported_format);
+ DVLOG(1) << device.name()
+ << " resolution: " << supported_format.frame_size.ToString()
+ << ", fps: " << supported_format.frame_rate
+ << ", pixel format: " << supported_format.pixel_format;
++frame_interval.index;
}
++frame_size.index;
diff --git a/media/video/capture/linux/video_capture_device_linux.cc b/media/video/capture/linux/video_capture_device_linux.cc
index 11151ac728..c15f2f1cfe 100644
--- a/media/video/capture/linux/video_capture_device_linux.cc
+++ b/media/video/capture/linux/video_capture_device_linux.cc
@@ -34,7 +34,7 @@ enum { kContinuousTimeoutLimit = 10 };
// Time to wait in milliseconds before v4l2_thread_ reschedules OnCaptureTask
// if an event is triggered (select) but no video frame is read.
enum { kCaptureSelectWaitMs = 10 };
-// MJPEG is prefered if the width or height is larger than this.
+// MJPEG is preferred if the width or height is larger than this.
enum { kMjpegWidth = 640 };
enum { kMjpegHeight = 480 };
// Typical framerate, in fps
@@ -256,7 +256,8 @@ void VideoCaptureDeviceLinux::OnAllocateAndStart(int width,
video_fmt.fmt.pix.pixelformat = *best;
if (HANDLE_EINTR(ioctl(device_fd_.get(), VIDIOC_S_FMT, &video_fmt)) < 0) {
- SetErrorState("Failed to set camera format");
+ SetErrorState(
+ base::StringPrintf("Failed to set camera format: %s", strerror(errno)));
return;
}
diff --git a/media/video/capture/mac/avfoundation_glue.h b/media/video/capture/mac/avfoundation_glue.h
index ac679c2bf1..1d6e177733 100644
--- a/media/video/capture/mac/avfoundation_glue.h
+++ b/media/video/capture/mac/avfoundation_glue.h
@@ -62,6 +62,7 @@ MEDIA_EXPORT
- (NSString*)localizedName;
- (BOOL)isSuspended;
- (NSArray*)formats;
+- (int32_t)transportType;
@end
diff --git a/media/video/capture/mac/video_capture_device_avfoundation_mac.mm b/media/video/capture/mac/video_capture_device_avfoundation_mac.mm
index 2412aac91d..399ad7d938 100644
--- a/media/video/capture/mac/video_capture_device_avfoundation_mac.mm
+++ b/media/video/capture/mac/video_capture_device_avfoundation_mac.mm
@@ -23,7 +23,11 @@
if (([device hasMediaType:AVFoundationGlue::AVMediaTypeVideo()] ||
[device hasMediaType:AVFoundationGlue::AVMediaTypeMuxed()]) &&
![device isSuspended]) {
- [deviceNames setObject:[device localizedName]
+ DeviceNameAndTransportType* nameAndTransportType =
+ [[[DeviceNameAndTransportType alloc]
+ initWithName:[device localizedName]
+ transportType:[device transportType]] autorelease];
+ [deviceNames setObject:nameAndTransportType
forKey:[device uniqueID]];
}
}
@@ -74,7 +78,7 @@
[format videoSupportedFrameRateRanges]) {
media::VideoCaptureFormat format(
gfx::Size(dimensions.width, dimensions.height),
- static_cast<int>(frameRate.maxFrameRate),
+ frameRate.maxFrameRate,
pixelFormat);
formats->push_back(format);
DVLOG(2) << name.name() << " resolution: "
diff --git a/media/video/capture/mac/video_capture_device_factory_mac.mm b/media/video/capture/mac/video_capture_device_factory_mac.mm
index d58a25c658..af97fd21e0 100644
--- a/media/video/capture/mac/video_capture_device_factory_mac.mm
+++ b/media/video/capture/mac/video_capture_device_factory_mac.mm
@@ -4,8 +4,11 @@
#include "media/video/capture/mac/video_capture_device_factory_mac.h"
+#import <IOKit/audio/IOAudioTypes.h>
+
#include "base/bind.h"
#include "base/location.h"
+#include "base/strings/string_util.h"
#include "base/task_runner_util.h"
#import "media/video/capture/mac/avfoundation_glue.h"
#include "media/video/capture/mac/video_capture_device_mac.h"
@@ -15,15 +18,12 @@
namespace media {
// Some devices are not correctly supported in AVFoundation, f.i. Blackmagic,
-// see http://crbug.com/347371. The devices are identified by USB Vendor ID and
-// by a characteristic substring of the name, usually the vendor's name.
+// see http://crbug.com/347371. The devices are identified by a characteristic
+// trailing substring of uniqueId and by (part of) the vendor's name.
const struct NameAndVid {
- const char* vid;
+ const char* unique_id_signature;
const char* name;
-} kBlacklistedCameras[] = { { "a82c", "Blackmagic" } };
-
-// In device identifiers, the USB VID and PID are stored in 4 bytes each.
-const size_t kVidPidSize = 4;
+} kBlacklistedCameras[] = { { "-01FDA82C8A9C", "Blackmagic" } };
static scoped_ptr<media::VideoCaptureDevice::Names>
EnumerateDevicesUsingQTKit() {
@@ -34,7 +34,7 @@ EnumerateDevicesUsingQTKit() {
[VideoCaptureDeviceQTKit getDeviceNames:capture_devices];
for (NSString* key in capture_devices) {
VideoCaptureDevice::Name name(
- [[capture_devices valueForKey:key] UTF8String],
+ [[[capture_devices valueForKey:key] deviceName] UTF8String],
[key UTF8String], VideoCaptureDevice::Name::QTKIT);
device_names->push_back(name);
}
@@ -102,19 +102,24 @@ void VideoCaptureDeviceFactoryMac::GetDeviceNames(
bool is_any_device_blacklisted = false;
DVLOG(1) << "Enumerating video capture devices using AVFoundation";
capture_devices = [VideoCaptureDeviceAVFoundation deviceNames];
- std::string device_vid;
// Enumerate all devices found by AVFoundation, translate the info for each
// to class Name and add it to |device_names|.
for (NSString* key in capture_devices) {
+ int transport_type = [[capture_devices valueForKey:key] transportType];
+ // Transport types are defined for Audio devices and reused for video.
+ VideoCaptureDevice::Name::TransportType device_transport_type =
+ (transport_type == kIOAudioDeviceTransportTypeBuiltIn ||
+ transport_type == kIOAudioDeviceTransportTypeUSB)
+ ? VideoCaptureDevice::Name::USB_OR_BUILT_IN
+ : VideoCaptureDevice::Name::OTHER_TRANSPORT;
VideoCaptureDevice::Name name(
- [[capture_devices valueForKey:key] UTF8String],
- [key UTF8String], VideoCaptureDevice::Name::AVFOUNDATION);
+ [[[capture_devices valueForKey:key] deviceName] UTF8String],
+ [key UTF8String], VideoCaptureDevice::Name::AVFOUNDATION,
+ device_transport_type);
device_names->push_back(name);
- // Extract the device's Vendor ID and compare to all blacklisted ones.
- device_vid = name.GetModel().substr(0, kVidPidSize);
for (size_t i = 0; i < arraysize(kBlacklistedCameras); ++i) {
- is_any_device_blacklisted |=
- !strcasecmp(device_vid.c_str(), kBlacklistedCameras[i].vid);
+ is_any_device_blacklisted |= EndsWith(name.id(),
+ kBlacklistedCameras[i].unique_id_signature, false);
if (is_any_device_blacklisted)
break;
}
@@ -126,7 +131,7 @@ void VideoCaptureDeviceFactoryMac::GetDeviceNames(
if (is_any_device_blacklisted) {
capture_devices = [VideoCaptureDeviceQTKit deviceNames];
for (NSString* key in capture_devices) {
- NSString* device_name = [capture_devices valueForKey:key];
+ NSString* device_name = [[capture_devices valueForKey:key] deviceName];
for (size_t i = 0; i < arraysize(kBlacklistedCameras); ++i) {
if ([device_name rangeOfString:@(kBlacklistedCameras[i].name)
options:NSCaseInsensitiveSearch].length != 0) {
diff --git a/media/video/capture/mac/video_capture_device_mac.h b/media/video/capture/mac/video_capture_device_mac.h
index 36dc015e37..60da1396d5 100644
--- a/media/video/capture/mac/video_capture_device_mac.h
+++ b/media/video/capture/mac/video_capture_device_mac.h
@@ -10,9 +10,12 @@
#ifndef MEDIA_VIDEO_CAPTURE_MAC_VIDEO_CAPTURE_DEVICE_MAC_H_
#define MEDIA_VIDEO_CAPTURE_MAC_VIDEO_CAPTURE_DEVICE_MAC_H_
+#import <Foundation/Foundation.h>
+
#include <string>
#include "base/compiler_specific.h"
+#include "base/mac/scoped_nsobject.h"
#include "base/memory/ref_counted.h"
#include "base/memory/weak_ptr.h"
#include "media/video/capture/video_capture_device.h"
@@ -24,8 +27,30 @@ namespace base {
class SingleThreadTaskRunner;
}
+// Small class to bundle device name and connection type into a dictionary.
+MEDIA_EXPORT
+@interface DeviceNameAndTransportType : NSObject {
+ @private
+ base::scoped_nsobject<NSString> deviceName_;
+ // The transport type of the device (USB, PCI, etc), values are defined in
+ // <IOKit/audio/IOAudioTypes.h> as kIOAudioDeviceTransportType*.
+ int32_t transportType_;
+}
+
+- (id)initWithName:(NSString*)name transportType:(int32_t)transportType;
+
+- (NSString*)deviceName;
+- (int32_t)transportType;
+@end
+
namespace media {
+enum {
+ // Unknown transport type, addition to the kIOAudioDeviceTransportType*
+ // family for QTKit devices where this attribute isn't published.
+ kIOAudioDeviceTransportTypeUnknown = 'unkn'
+};
+
// Called by VideoCaptureManager to open, close and start, stop Mac video
// capture devices.
class VideoCaptureDeviceMac : public VideoCaptureDevice {
diff --git a/media/video/capture/mac/video_capture_device_mac.mm b/media/video/capture/mac/video_capture_device_mac.mm
index 60278b7615..b13496c805 100644
--- a/media/video/capture/mac/video_capture_device_mac.mm
+++ b/media/video/capture/mac/video_capture_device_mac.mm
@@ -21,6 +21,26 @@
#import "media/video/capture/mac/video_capture_device_avfoundation_mac.h"
#import "media/video/capture/mac/video_capture_device_qtkit_mac.h"
+@implementation DeviceNameAndTransportType
+
+- (id)initWithName:(NSString*)deviceName transportType:(int32_t)transportType {
+ if (self = [super init]) {
+ deviceName_.reset([deviceName copy]);
+ transportType_ = transportType;
+ }
+ return self;
+}
+
+- (NSString*)deviceName {
+ return deviceName_;
+}
+
+- (int32_t)transportType {
+ return transportType_;
+}
+
+@end // @implementation DeviceNameAndTransportType
+
namespace media {
const int kMinFrameRate = 1;
@@ -307,10 +327,12 @@ static void SetAntiFlickerInUsbDevice(const int vendor_id,
}
const std::string VideoCaptureDevice::Name::GetModel() const {
+ // Skip the AVFoundation's not USB nor built-in devices.
+ if (capture_api_type() == AVFOUNDATION && transport_type() != USB_OR_BUILT_IN)
+ return "";
// Both PID and VID are 4 characters.
- if (unique_id_.size() < 2 * kVidPidSize) {
+ if (unique_id_.size() < 2 * kVidPidSize)
return "";
- }
// The last characters of device id is a concatenation of VID and then PID.
const size_t vid_location = unique_id_.size() - 2 * kVidPidSize;
diff --git a/media/video/capture/mac/video_capture_device_qtkit_mac.mm b/media/video/capture/mac/video_capture_device_qtkit_mac.mm
index c884c723df..599cfac59e 100644
--- a/media/video/capture/mac/video_capture_device_qtkit_mac.mm
+++ b/media/video/capture/mac/video_capture_device_qtkit_mac.mm
@@ -29,9 +29,15 @@
});
for (QTCaptureDevice* device in captureDevices) {
- if (![[device attributeForKey:QTCaptureDeviceSuspendedAttribute] boolValue])
- [deviceNames setObject:[device localizedDisplayName]
- forKey:[device uniqueID]];
+ if ([[device attributeForKey:QTCaptureDeviceSuspendedAttribute] boolValue])
+ continue;
+ DeviceNameAndTransportType* nameAndTransportType =
+ [[[DeviceNameAndTransportType alloc]
+ initWithName:[device localizedDisplayName]
+ transportType:media::kIOAudioDeviceTransportTypeUnknown]
+ autorelease];
+ [deviceNames setObject:nameAndTransportType
+ forKey:[device uniqueID]];
}
}
diff --git a/media/video/capture/video_capture_device.cc b/media/video/capture/video_capture_device.cc
index 2efff7de02..2d8420791f 100644
--- a/media/video/capture/video_capture_device.cc
+++ b/media/video/capture/video_capture_device.cc
@@ -19,6 +19,39 @@ const std::string VideoCaptureDevice::Name::GetNameAndModel() const {
return device_name_ + suffix;
}
+VideoCaptureDevice::Name::Name() {}
+
+VideoCaptureDevice::Name::Name(const std::string& name, const std::string& id)
+ : device_name_(name), unique_id_(id) {}
+
+#if defined(OS_WIN)
+VideoCaptureDevice::Name::Name(const std::string& name,
+ const std::string& id,
+ const CaptureApiType api_type)
+ : device_name_(name), unique_id_(id), capture_api_class_(api_type) {}
+#endif
+
+#if defined(OS_MACOSX)
+VideoCaptureDevice::Name::Name(const std::string& name,
+ const std::string& id,
+ const CaptureApiType api_type)
+ : device_name_(name),
+ unique_id_(id),
+ capture_api_class_(api_type),
+ transport_type_(OTHER_TRANSPORT) {}
+
+VideoCaptureDevice::Name::Name(const std::string& name,
+ const std::string& id,
+ const CaptureApiType api_type,
+ const TransportType transport_type)
+ : device_name_(name),
+ unique_id_(id),
+ capture_api_class_(api_type),
+ transport_type_(transport_type) {}
+#endif
+
+VideoCaptureDevice::Name::~Name() {}
+
VideoCaptureDevice::~VideoCaptureDevice() {}
int VideoCaptureDevice::GetPowerLineFrequencyForLocation() const {
diff --git a/media/video/capture/video_capture_device.h b/media/video/capture/video_capture_device.h
index afb4c3567a..3f953f8896 100644
--- a/media/video/capture/video_capture_device.h
+++ b/media/video/capture/video_capture_device.h
@@ -38,9 +38,8 @@ class MEDIA_EXPORT VideoCaptureDevice {
// VideoCaptureDevice::Create.
class MEDIA_EXPORT Name {
public:
- Name() {}
- Name(const std::string& name, const std::string& id)
- : device_name_(name), unique_id_(id) {}
+ Name();
+ Name(const std::string& name, const std::string& id);
#if defined(OS_WIN)
// Windows targets Capture Api type: it can only be set on construction.
@@ -57,14 +56,24 @@ class MEDIA_EXPORT VideoCaptureDevice {
QTKIT,
API_TYPE_UNKNOWN
};
+ // For AVFoundation Api, identify devices that are built-in or USB.
+ enum TransportType {
+ USB_OR_BUILT_IN,
+ OTHER_TRANSPORT
+ };
#endif
#if defined(OS_WIN) || defined(OS_MACOSX)
Name(const std::string& name,
const std::string& id,
- const CaptureApiType api_type)
- : device_name_(name), unique_id_(id), capture_api_class_(api_type) {}
+ const CaptureApiType api_type);
+#endif
+#if defined(OS_MACOSX)
+ Name(const std::string& name,
+ const std::string& id,
+ const CaptureApiType api_type,
+ const TransportType transport_type);
#endif
- ~Name() {}
+ ~Name();
// Friendly name of a device
const std::string& name() const { return device_name_; }
@@ -95,6 +104,11 @@ class MEDIA_EXPORT VideoCaptureDevice {
CaptureApiType capture_api_type() const {
return capture_api_class_.capture_api_type();
}
+#endif
+#if defined(OS_MACOSX)
+ TransportType transport_type() const {
+ return transport_type_;
+ }
#endif // if defined(OS_WIN)
private:
@@ -118,6 +132,9 @@ class MEDIA_EXPORT VideoCaptureDevice {
CaptureApiClass capture_api_class_;
#endif
+#if defined(OS_MACOSX)
+ TransportType transport_type_;
+#endif
// Allow generated copy constructor and assignment.
};
diff --git a/media/video/capture/video_capture_types.cc b/media/video/capture/video_capture_types.cc
index 7dcc7a02e7..fa78f42748 100644
--- a/media/video/capture/video_capture_types.cc
+++ b/media/video/capture/video_capture_types.cc
@@ -9,7 +9,7 @@
namespace media {
VideoCaptureFormat::VideoCaptureFormat()
- : frame_rate(0), pixel_format(PIXEL_FORMAT_UNKNOWN) {}
+ : frame_rate(0.0f), pixel_format(PIXEL_FORMAT_UNKNOWN) {}
VideoCaptureFormat::VideoCaptureFormat(const gfx::Size& frame_size,
float frame_rate,
@@ -23,7 +23,7 @@ bool VideoCaptureFormat::IsValid() const {
(frame_size.height() < media::limits::kMaxDimension) &&
(frame_size.GetArea() >= 0) &&
(frame_size.GetArea() < media::limits::kMaxCanvas) &&
- (frame_rate > 0) &&
+ (frame_rate >= 0.0f) &&
(frame_rate < media::limits::kMaxFramesPerSecond) &&
(pixel_format >= PIXEL_FORMAT_UNKNOWN) &&
(pixel_format < PIXEL_FORMAT_MAX);
diff --git a/media/video/capture/win/sink_filter_win.cc b/media/video/capture/win/sink_filter_win.cc
index e3bb0a5856..8e9c941f6e 100644
--- a/media/video/capture/win/sink_filter_win.cc
+++ b/media/video/capture/win/sink_filter_win.cc
@@ -7,6 +7,8 @@
#include "base/logging.h"
#include "media/video/capture/win/sink_input_pin_win.h"
+namespace media {
+
// Define GUID for I420. This is the color format we would like to support but
// it is not defined in the DirectShow SDK.
// http://msdn.microsoft.com/en-us/library/dd757532.aspx
@@ -15,7 +17,12 @@ GUID kMediaSubTypeI420 = {
0x30323449, 0x0000, 0x0010, { 0x80, 0x00, 0x00, 0xAA, 0x00, 0x38, 0x9B, 0x71}
};
-namespace media {
+// UYVY synonym with BT709 color components, used in HD video. This variation
+// might appear in non-USB capture cards and it's implemented as a normal YUV
+// pixel format with the characters HDYC encoded in the first array word.
+GUID kMediaSubTypeHDYC = {
+ 0x43594448, 0x0000, 0x0010, { 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71}
+};
SinkFilterObserver::~SinkFilterObserver() {}
diff --git a/media/video/capture/win/sink_filter_win.h b/media/video/capture/win/sink_filter_win.h
index e454f0b984..7265de578e 100644
--- a/media/video/capture/win/sink_filter_win.h
+++ b/media/video/capture/win/sink_filter_win.h
@@ -16,13 +16,18 @@
#include "media/video/capture/win/filter_base_win.h"
#include "media/video/capture/win/sink_filter_observer_win.h"
+namespace media {
+
// Define GUID for I420. This is the color format we would like to support but
// it is not defined in the DirectShow SDK.
// http://msdn.microsoft.com/en-us/library/dd757532.aspx
// 30323449-0000-0010-8000-00AA00389B71.
extern GUID kMediaSubTypeI420;
-namespace media {
+// UYVY synonym with BT709 color components, used in HD video. This variation
+// might appear in non-USB capture cards and it's implemented as a normal YUV
+// pixel format with the characters HDYC encoded in the first array word.
+extern GUID kMediaSubTypeHDYC;
class SinkInputPin;
diff --git a/media/video/capture/win/video_capture_device_factory_win.cc b/media/video/capture/win/video_capture_device_factory_win.cc
index 920126df09..75ee59d3bf 100644
--- a/media/video/capture/win/video_capture_device_factory_win.cc
+++ b/media/video/capture/win/video_capture_device_factory_win.cc
@@ -167,23 +167,26 @@ static void GetDeviceNamesMediaFoundation(
if (!EnumerateVideoDevicesMediaFoundation(&devices, &count))
return;
- HRESULT hr;
for (UINT32 i = 0; i < count; ++i) {
- UINT32 name_size, id_size;
- ScopedCoMem<wchar_t> name, id;
- if (SUCCEEDED(hr = devices[i]->GetAllocatedString(
- MF_DEVSOURCE_ATTRIBUTE_FRIENDLY_NAME, &name, &name_size)) &&
- SUCCEEDED(hr = devices[i]->GetAllocatedString(
- MF_DEVSOURCE_ATTRIBUTE_SOURCE_TYPE_VIDCAP_SYMBOLIC_LINK, &id,
- &id_size))) {
- std::wstring name_w(name, name_size), id_w(id, id_size);
- VideoCaptureDevice::Name device(base::SysWideToUTF8(name_w),
- base::SysWideToUTF8(id_w),
- VideoCaptureDevice::Name::MEDIA_FOUNDATION);
- device_names->push_back(device);
- } else {
- DLOG(WARNING) << "GetAllocatedString failed: " << std::hex << hr;
+ ScopedCoMem<wchar_t> name;
+ UINT32 name_size;
+ HRESULT hr = devices[i]->GetAllocatedString(
+ MF_DEVSOURCE_ATTRIBUTE_FRIENDLY_NAME, &name, &name_size);
+ if (SUCCEEDED(hr)) {
+ ScopedCoMem<wchar_t> id;
+ UINT32 id_size;
+ hr = devices[i]->GetAllocatedString(
+ MF_DEVSOURCE_ATTRIBUTE_SOURCE_TYPE_VIDCAP_SYMBOLIC_LINK, &id,
+ &id_size);
+ if (SUCCEEDED(hr)) {
+ device_names->push_back(VideoCaptureDevice::Name(
+ base::SysWideToUTF8(std::wstring(name, name_size)),
+ base::SysWideToUTF8(std::wstring(id, id_size)),
+ VideoCaptureDevice::Name::MEDIA_FOUNDATION));
+ }
}
+ if (FAILED(hr))
+ DLOG(WARNING) << "GetAllocatedString failed: " << std::hex << hr;
devices[i]->Release();
}
}
@@ -206,95 +209,72 @@ static void GetDeviceSupportedFormatsDirectShow(
if (hr != S_OK)
return;
- // Walk the capture devices. No need to check for "google camera adapter",
- // since this is already skipped in the enumeration of GetDeviceNames().
- ScopedComPtr<IMoniker> moniker;
- int index = 0;
- ScopedVariant device_id;
- while (enum_moniker->Next(1, moniker.Receive(), NULL) == S_OK) {
- ScopedComPtr<IPropertyBag> prop_bag;
- hr = moniker->BindToStorage(0, 0, IID_IPropertyBag, prop_bag.ReceiveVoid());
- if (FAILED(hr)) {
- moniker.Release();
- continue;
- }
-
- device_id.Reset();
- hr = prop_bag->Read(L"DevicePath", device_id.Receive(), 0);
- if (FAILED(hr)) {
- DVLOG(1) << "Couldn't read a device's DevicePath.";
- return;
- }
- if (device.id() == base::SysWideToUTF8(V_BSTR(&device_id)))
- break;
- moniker.Release();
+ // Walk the capture devices. No need to check for device presence again, that
+ // is caught in GetDeviceFilter(). "google camera adapter" and old VFW devices
+ // are already skipped in the previous GetDeviceNames() enumeration.
+ base::win::ScopedComPtr<IBaseFilter> capture_filter;
+ hr = VideoCaptureDeviceWin::GetDeviceFilter(device,
+ capture_filter.Receive());
+ if (!capture_filter) {
+ DVLOG(2) << "Failed to create capture filter.";
+ return;
}
- if (moniker.get()) {
- base::win::ScopedComPtr<IBaseFilter> capture_filter;
- hr = VideoCaptureDeviceWin::GetDeviceFilter(device,
- capture_filter.Receive());
- if (!capture_filter) {
- DVLOG(2) << "Failed to create capture filter.";
- return;
- }
+ base::win::ScopedComPtr<IPin> output_capture_pin(
+ VideoCaptureDeviceWin::GetPin(capture_filter,
+ PINDIR_OUTPUT,
+ PIN_CATEGORY_CAPTURE));
+ if (!output_capture_pin) {
+ DVLOG(2) << "Failed to get capture output pin";
+ return;
+ }
- base::win::ScopedComPtr<IPin> output_capture_pin(
- VideoCaptureDeviceWin::GetPin(capture_filter,
- PINDIR_OUTPUT,
- PIN_CATEGORY_CAPTURE));
- if (!output_capture_pin) {
- DVLOG(2) << "Failed to get capture output pin";
- return;
- }
+ ScopedComPtr<IAMStreamConfig> stream_config;
+ hr = output_capture_pin.QueryInterface(stream_config.Receive());
+ if (FAILED(hr)) {
+ DVLOG(2) << "Failed to get IAMStreamConfig interface from "
+ "capture device";
+ return;
+ }
- ScopedComPtr<IAMStreamConfig> stream_config;
- hr = output_capture_pin.QueryInterface(stream_config.Receive());
- if (FAILED(hr)) {
- DVLOG(2) << "Failed to get IAMStreamConfig interface from "
- "capture device";
- return;
- }
+ int count = 0, size = 0;
+ hr = stream_config->GetNumberOfCapabilities(&count, &size);
+ if (FAILED(hr)) {
+ DVLOG(2) << "Failed to GetNumberOfCapabilities";
+ return;
+ }
- int count = 0, size = 0;
- hr = stream_config->GetNumberOfCapabilities(&count, &size);
- if (FAILED(hr)) {
- DVLOG(2) << "Failed to GetNumberOfCapabilities";
+ scoped_ptr<BYTE[]> caps(new BYTE[size]);
+ for (int i = 0; i < count; ++i) {
+ VideoCaptureDeviceWin::ScopedMediaType media_type;
+ hr = stream_config->GetStreamCaps(i, media_type.Receive(), caps.get());
+ // GetStreamCaps() may return S_FALSE, so don't use FAILED() or SUCCEED()
+ // macros here since they'll trigger incorrectly.
+ if (hr != S_OK) {
+ DVLOG(2) << "Failed to GetStreamCaps";
return;
}
- scoped_ptr<BYTE[]> caps(new BYTE[size]);
- for (int i = 0; i < count; ++i) {
- VideoCaptureDeviceWin::ScopedMediaType media_type;
- hr = stream_config->GetStreamCaps(i, media_type.Receive(), caps.get());
- // GetStreamCaps() may return S_FALSE, so don't use FAILED() or SUCCEED()
- // macros here since they'll trigger incorrectly.
- if (hr != S_OK) {
- DVLOG(2) << "Failed to GetStreamCaps";
- return;
- }
-
- if (media_type->majortype == MEDIATYPE_Video &&
- media_type->formattype == FORMAT_VideoInfo) {
- VideoCaptureFormat format;
- format.pixel_format =
- VideoCaptureDeviceWin::TranslateMediaSubtypeToPixelFormat(
- media_type->subtype);
- if (format.pixel_format == PIXEL_FORMAT_UNKNOWN)
- continue;
- VIDEOINFOHEADER* h =
- reinterpret_cast<VIDEOINFOHEADER*>(media_type->pbFormat);
- format.frame_size.SetSize(h->bmiHeader.biWidth,
- h->bmiHeader.biHeight);
- // Trust the frame rate from the VIDEOINFOHEADER.
- format.frame_rate = (h->AvgTimePerFrame > 0) ?
- static_cast<int>(kSecondsToReferenceTime / h->AvgTimePerFrame) :
- 0;
- formats->push_back(format);
- DVLOG(1) << device.name() << " resolution: "
- << format.frame_size.ToString() << ", fps: " << format.frame_rate
- << ", pixel format: " << format.pixel_format;
- }
+ if (media_type->majortype == MEDIATYPE_Video &&
+ media_type->formattype == FORMAT_VideoInfo) {
+ VideoCaptureFormat format;
+ format.pixel_format =
+ VideoCaptureDeviceWin::TranslateMediaSubtypeToPixelFormat(
+ media_type->subtype);
+ if (format.pixel_format == PIXEL_FORMAT_UNKNOWN)
+ continue;
+ VIDEOINFOHEADER* h =
+ reinterpret_cast<VIDEOINFOHEADER*>(media_type->pbFormat);
+ format.frame_size.SetSize(h->bmiHeader.biWidth,
+ h->bmiHeader.biHeight);
+ // Trust the frame rate from the VIDEOINFOHEADER.
+ format.frame_rate = (h->AvgTimePerFrame > 0) ?
+ kSecondsToReferenceTime / static_cast<float>(h->AvgTimePerFrame) :
+ 0.0f;
+ formats->push_back(format);
+ DVLOG(1) << device.name() << " resolution: "
+ << format.frame_size.ToString() << ", fps: " << format.frame_rate
+ << ", pixel format: " << format.pixel_format;
}
}
}
@@ -309,18 +289,21 @@ static void GetDeviceSupportedFormatsMediaFoundation(
return;
}
- HRESULT hr;
base::win::ScopedComPtr<IMFSourceReader> reader;
- if (FAILED(hr = MFCreateSourceReaderFromMediaSource(source, NULL,
- reader.Receive()))) {
+ HRESULT hr =
+ MFCreateSourceReaderFromMediaSource(source, NULL, reader.Receive());
+ if (FAILED(hr)) {
DLOG(ERROR) << "MFCreateSourceReaderFromMediaSource: " << std::hex << hr;
return;
}
DWORD stream_index = 0;
ScopedComPtr<IMFMediaType> type;
- while (SUCCEEDED(hr = reader->GetNativeMediaType(
- MF_SOURCE_READER_FIRST_VIDEO_STREAM, stream_index, type.Receive()))) {
+ for (hr = reader->GetNativeMediaType(kFirstVideoStream, stream_index,
+ type.Receive());
+ SUCCEEDED(hr);
+ hr = reader->GetNativeMediaType(kFirstVideoStream, stream_index,
+ type.Receive())) {
UINT32 width, height;
hr = MFGetAttributeSize(type, MF_MT_FRAME_SIZE, &width, &height);
if (FAILED(hr)) {
@@ -336,7 +319,8 @@ static void GetDeviceSupportedFormatsMediaFoundation(
DLOG(ERROR) << "MFGetAttributeSize: " << std::hex << hr;
return;
}
- capture_format.frame_rate = denominator ? numerator / denominator : 0;
+ capture_format.frame_rate = denominator
+ ? static_cast<float>(numerator) / denominator : 0.0f;
GUID type_guid;
hr = type->GetGUID(MF_MT_SUBTYPE, &type_guid);
@@ -402,14 +386,13 @@ scoped_ptr<VideoCaptureDevice> VideoCaptureDeviceFactoryWin::Create(
}
if (!static_cast<VideoCaptureDeviceMFWin*>(device.get())->Init(source))
device.reset();
- } else if (device_name.capture_api_type() ==
- VideoCaptureDevice::Name::DIRECT_SHOW) {
+ } else {
+ DCHECK_EQ(device_name.capture_api_type(),
+ VideoCaptureDevice::Name::DIRECT_SHOW);
device.reset(new VideoCaptureDeviceWin(device_name));
DVLOG(1) << " DirectShow Device: " << device_name.name();
if (!static_cast<VideoCaptureDeviceWin*>(device.get())->Init())
device.reset();
- } else {
- NOTREACHED() << " Couldn't recognize VideoCaptureDevice type";
}
return device.Pass();
}
diff --git a/media/video/capture/win/video_capture_device_mf_win.cc b/media/video/capture/win/video_capture_device_mf_win.cc
index 07c7612f03..de1f6eb2df 100644
--- a/media/video/capture/win/video_capture_device_mf_win.cc
+++ b/media/video/capture/win/video_capture_device_mf_win.cc
@@ -71,8 +71,11 @@ HRESULT FillCapabilities(IMFSourceReader* source,
DWORD stream_index = 0;
ScopedComPtr<IMFMediaType> type;
HRESULT hr;
- while (SUCCEEDED(hr = source->GetNativeMediaType(
- MF_SOURCE_READER_FIRST_VIDEO_STREAM, stream_index, type.Receive()))) {
+ for (hr = source->GetNativeMediaType(kFirstVideoStream, stream_index,
+ type.Receive());
+ SUCCEEDED(hr);
+ hr = source->GetNativeMediaType(kFirstVideoStream, stream_index,
+ type.Receive())) {
VideoCaptureCapabilityWin capability(stream_index++);
if (FillCapabilitiesFromType(type, &capability))
capabilities->Add(capability);
@@ -194,12 +197,12 @@ const std::string VideoCaptureDevice::Name::GetModel() const {
const size_t vid_location = unique_id_.find(kVidPrefix);
if (vid_location == std::string::npos ||
vid_location + vid_prefix_size + kVidPidSize > unique_id_.size()) {
- return "";
+ return std::string();
}
const size_t pid_location = unique_id_.find(kPidPrefix);
if (pid_location == std::string::npos ||
pid_location + pid_prefix_size + kVidPidSize > unique_id_.size()) {
- return "";
+ return std::string();
}
std::string id_vendor =
unique_id_.substr(vid_location + vid_prefix_size, kVidPidSize);
@@ -245,34 +248,34 @@ void VideoCaptureDeviceMFWin::AllocateAndStart(
CapabilityList capabilities;
HRESULT hr = S_OK;
- if (!reader_ || FAILED(hr = FillCapabilities(reader_, &capabilities))) {
- OnError(hr);
- return;
- }
-
- VideoCaptureCapabilityWin found_capability =
- capabilities.GetBestMatchedFormat(
- params.requested_format.frame_size.width(),
- params.requested_format.frame_size.height(),
- params.requested_format.frame_rate);
-
- ScopedComPtr<IMFMediaType> type;
- if (FAILED(hr = reader_->GetNativeMediaType(
- MF_SOURCE_READER_FIRST_VIDEO_STREAM, found_capability.stream_index,
- type.Receive())) ||
- FAILED(hr = reader_->SetCurrentMediaType(
- MF_SOURCE_READER_FIRST_VIDEO_STREAM, NULL, type))) {
- OnError(hr);
- return;
+ if (reader_) {
+ hr = FillCapabilities(reader_, &capabilities);
+ if (SUCCEEDED(hr)) {
+ VideoCaptureCapabilityWin found_capability =
+ capabilities.GetBestMatchedFormat(
+ params.requested_format.frame_size.width(),
+ params.requested_format.frame_size.height(),
+ params.requested_format.frame_rate);
+
+ ScopedComPtr<IMFMediaType> type;
+ hr = reader_->GetNativeMediaType(
+ kFirstVideoStream, found_capability.stream_index, type.Receive());
+ if (SUCCEEDED(hr)) {
+ hr = reader_->SetCurrentMediaType(kFirstVideoStream, NULL, type);
+ if (SUCCEEDED(hr)) {
+ hr = reader_->ReadSample(kFirstVideoStream, 0, NULL, NULL, NULL,
+ NULL);
+ if (SUCCEEDED(hr)) {
+ capture_format_ = found_capability.supported_format;
+ capture_ = true;
+ return;
+ }
+ }
+ }
+ }
}
- if (FAILED(hr = reader_->ReadSample(MF_SOURCE_READER_FIRST_VIDEO_STREAM, 0,
- NULL, NULL, NULL, NULL))) {
- OnError(hr);
- return;
- }
- capture_format_ = found_capability.supported_format;
- capture_ = true;
+ OnError(hr);
}
void VideoCaptureDeviceMFWin::StopAndDeAllocate() {
@@ -285,8 +288,8 @@ void VideoCaptureDeviceMFWin::StopAndDeAllocate() {
if (capture_) {
capture_ = false;
callback_->SetSignalOnFlush(&flushed);
- HRESULT hr = reader_->Flush(MF_SOURCE_READER_ALL_STREAMS);
- wait = SUCCEEDED(hr);
+ wait = SUCCEEDED(reader_->Flush(
+ static_cast<DWORD>(MF_SOURCE_READER_ALL_STREAMS)));
if (!wait) {
callback_->SetSignalOnFlush(NULL);
}
@@ -315,8 +318,8 @@ void VideoCaptureDeviceMFWin::OnIncomingCapturedData(
}
if (capture_) {
- HRESULT hr = reader_->ReadSample(MF_SOURCE_READER_FIRST_VIDEO_STREAM, 0,
- NULL, NULL, NULL, NULL);
+ HRESULT hr =
+ reader_->ReadSample(kFirstVideoStream, 0, NULL, NULL, NULL, NULL);
if (FAILED(hr)) {
// If running the *VideoCap* unit tests on repeat, this can sometimes
// fail with HRESULT_FROM_WINHRESULT_FROM_WIN32(ERROR_INVALID_FUNCTION).
diff --git a/media/video/capture/win/video_capture_device_mf_win.h b/media/video/capture/win/video_capture_device_mf_win.h
index 476a455a2a..0d2c207da8 100644
--- a/media/video/capture/win/video_capture_device_mf_win.h
+++ b/media/video/capture/win/video_capture_device_mf_win.h
@@ -26,6 +26,9 @@ namespace media {
class MFReaderCallback;
+const DWORD kFirstVideoStream =
+ static_cast<DWORD>(MF_SOURCE_READER_FIRST_VIDEO_STREAM);
+
class MEDIA_EXPORT VideoCaptureDeviceMFWin
: public base::NonThreadSafe,
public VideoCaptureDevice {
diff --git a/media/video/capture/win/video_capture_device_win.cc b/media/video/capture/win/video_capture_device_win.cc
index b533de9e07..e47c5cbf05 100644
--- a/media/video/capture/win/video_capture_device_win.cc
+++ b/media/video/capture/win/video_capture_device_win.cc
@@ -142,6 +142,7 @@ VideoPixelFormat VideoCaptureDeviceWin::TranslateMediaSubtypeToPixelFormat(
{ MEDIASUBTYPE_MJPG, PIXEL_FORMAT_MJPEG },
{ MEDIASUBTYPE_UYVY, PIXEL_FORMAT_UYVY },
{ MEDIASUBTYPE_ARGB32, PIXEL_FORMAT_ARGB },
+ { kMediaSubTypeHDYC, PIXEL_FORMAT_UYVY },
};
for (size_t i = 0; i < ARRAYSIZE_UNSAFE(pixel_formats); ++i) {
if (sub_type == pixel_formats[i].sub_type)
@@ -355,6 +356,10 @@ void VideoCaptureDeviceWin::AllocateAndStart(
// Connect the MJPEG filter to the Capture filter.
hr += graph_builder_->ConnectDirect(output_mjpg_pin_, input_sink_pin_,
NULL);
+ } else if (media_type->subtype == kMediaSubTypeHDYC) {
+ // HDYC pixel format, used by the DeckLink capture card, needs an AVI
+ // decompressor filter after source, let |graph_builder_| add it.
+ hr = graph_builder_->Connect(output_capture_pin_, input_sink_pin_);
} else {
hr = graph_builder_->ConnectDirect(output_capture_pin_, input_sink_pin_,
NULL);
diff --git a/media/video_capture_android_jni_headers.target.darwin-arm.mk b/media/video_capture_android_jni_headers.target.darwin-arm.mk
index 9e9d2a3036..12856bb901 100644
--- a/media/video_capture_android_jni_headers.target.darwin-arm.mk
+++ b/media/video_capture_android_jni_headers.target.darwin-arm.mk
@@ -88,9 +88,9 @@ MY_CFLAGS_Debug := \
-Wno-unused-but-set-variable \
-Os \
-g \
- -fomit-frame-pointer \
-fdata-sections \
-ffunction-sections \
+ -fomit-frame-pointer \
-funwind-tables
MY_DEFS_Debug := \
@@ -109,6 +109,7 @@ MY_DEFS_Debug := \
'-DSYSTEM_NATIVELY_SIGNALS_MEMORY_PRESSURE' \
'-DENABLE_EGLIMAGE=1' \
'-DCLD_VERSION=1' \
+ '-DCLD_DATA_FROM_STATIC' \
'-DENABLE_PRINTING=1' \
'-DENABLE_MANAGED_USERS=1' \
'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
@@ -146,6 +147,9 @@ LOCAL_CPPFLAGS_Debug := \
-fvisibility-inlines-hidden \
-Wsign-compare \
-Wno-abi \
+ -std=gnu++11 \
+ -Wno-narrowing \
+ -Wno-literal-suffix \
-Wno-non-virtual-dtor \
-Wno-sign-promo
@@ -207,6 +211,7 @@ MY_DEFS_Release := \
'-DSYSTEM_NATIVELY_SIGNALS_MEMORY_PRESSURE' \
'-DENABLE_EGLIMAGE=1' \
'-DCLD_VERSION=1' \
+ '-DCLD_DATA_FROM_STATIC' \
'-DENABLE_PRINTING=1' \
'-DENABLE_MANAGED_USERS=1' \
'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
@@ -245,6 +250,9 @@ LOCAL_CPPFLAGS_Release := \
-fvisibility-inlines-hidden \
-Wsign-compare \
-Wno-abi \
+ -std=gnu++11 \
+ -Wno-narrowing \
+ -Wno-literal-suffix \
-Wno-non-virtual-dtor \
-Wno-sign-promo
diff --git a/media/video_capture_android_jni_headers.target.darwin-arm64.mk b/media/video_capture_android_jni_headers.target.darwin-arm64.mk
index c000fd55e4..9356049848 100644
--- a/media/video_capture_android_jni_headers.target.darwin-arm64.mk
+++ b/media/video_capture_android_jni_headers.target.darwin-arm64.mk
@@ -78,7 +78,6 @@ MY_CFLAGS_Debug := \
-Wno-unused-but-set-variable \
-Os \
-g \
- -fomit-frame-pointer \
-fdata-sections \
-ffunction-sections \
-funwind-tables
@@ -99,6 +98,7 @@ MY_DEFS_Debug := \
'-DSYSTEM_NATIVELY_SIGNALS_MEMORY_PRESSURE' \
'-DENABLE_EGLIMAGE=1' \
'-DCLD_VERSION=1' \
+ '-DCLD_DATA_FROM_STATIC' \
'-DENABLE_PRINTING=1' \
'-DENABLE_MANAGED_USERS=1' \
'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
@@ -135,6 +135,9 @@ LOCAL_CPPFLAGS_Debug := \
-fno-threadsafe-statics \
-fvisibility-inlines-hidden \
-Wsign-compare \
+ -std=gnu++11 \
+ -Wno-narrowing \
+ -Wno-literal-suffix \
-Wno-non-virtual-dtor \
-Wno-sign-promo
@@ -167,7 +170,6 @@ MY_CFLAGS_Release := \
-fno-ident \
-fdata-sections \
-ffunction-sections \
- -fomit-frame-pointer \
-funwind-tables
MY_DEFS_Release := \
@@ -186,6 +188,7 @@ MY_DEFS_Release := \
'-DSYSTEM_NATIVELY_SIGNALS_MEMORY_PRESSURE' \
'-DENABLE_EGLIMAGE=1' \
'-DCLD_VERSION=1' \
+ '-DCLD_DATA_FROM_STATIC' \
'-DENABLE_PRINTING=1' \
'-DENABLE_MANAGED_USERS=1' \
'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
@@ -223,6 +226,9 @@ LOCAL_CPPFLAGS_Release := \
-fno-threadsafe-statics \
-fvisibility-inlines-hidden \
-Wsign-compare \
+ -std=gnu++11 \
+ -Wno-narrowing \
+ -Wno-literal-suffix \
-Wno-non-virtual-dtor \
-Wno-sign-promo
diff --git a/media/video_capture_android_jni_headers.target.darwin-mips.mk b/media/video_capture_android_jni_headers.target.darwin-mips.mk
index 13584be2b5..b509b90921 100644
--- a/media/video_capture_android_jni_headers.target.darwin-mips.mk
+++ b/media/video_capture_android_jni_headers.target.darwin-mips.mk
@@ -82,9 +82,9 @@ MY_CFLAGS_Debug := \
-Wno-unused-but-set-variable \
-Os \
-g \
- -fomit-frame-pointer \
-fdata-sections \
-ffunction-sections \
+ -fomit-frame-pointer \
-funwind-tables
MY_DEFS_Debug := \
@@ -103,6 +103,7 @@ MY_DEFS_Debug := \
'-DSYSTEM_NATIVELY_SIGNALS_MEMORY_PRESSURE' \
'-DENABLE_EGLIMAGE=1' \
'-DCLD_VERSION=1' \
+ '-DCLD_DATA_FROM_STATIC' \
'-DENABLE_PRINTING=1' \
'-DENABLE_MANAGED_USERS=1' \
'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
@@ -140,6 +141,9 @@ LOCAL_CPPFLAGS_Debug := \
-fvisibility-inlines-hidden \
-Wsign-compare \
-Wno-uninitialized \
+ -std=gnu++11 \
+ -Wno-narrowing \
+ -Wno-literal-suffix \
-Wno-non-virtual-dtor \
-Wno-sign-promo
@@ -195,6 +199,7 @@ MY_DEFS_Release := \
'-DSYSTEM_NATIVELY_SIGNALS_MEMORY_PRESSURE' \
'-DENABLE_EGLIMAGE=1' \
'-DCLD_VERSION=1' \
+ '-DCLD_DATA_FROM_STATIC' \
'-DENABLE_PRINTING=1' \
'-DENABLE_MANAGED_USERS=1' \
'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
@@ -233,6 +238,9 @@ LOCAL_CPPFLAGS_Release := \
-fvisibility-inlines-hidden \
-Wsign-compare \
-Wno-uninitialized \
+ -std=gnu++11 \
+ -Wno-narrowing \
+ -Wno-literal-suffix \
-Wno-non-virtual-dtor \
-Wno-sign-promo
diff --git a/media/video_capture_android_jni_headers.target.darwin-x86.mk b/media/video_capture_android_jni_headers.target.darwin-x86.mk
index 2314cabe50..dfdd79d0c8 100644
--- a/media/video_capture_android_jni_headers.target.darwin-x86.mk
+++ b/media/video_capture_android_jni_headers.target.darwin-x86.mk
@@ -83,9 +83,9 @@ MY_CFLAGS_Debug := \
-fno-stack-protector \
-Os \
-g \
- -fomit-frame-pointer \
-fdata-sections \
-ffunction-sections \
+ -fomit-frame-pointer \
-funwind-tables
MY_DEFS_Debug := \
@@ -104,6 +104,7 @@ MY_DEFS_Debug := \
'-DSYSTEM_NATIVELY_SIGNALS_MEMORY_PRESSURE' \
'-DENABLE_EGLIMAGE=1' \
'-DCLD_VERSION=1' \
+ '-DCLD_DATA_FROM_STATIC' \
'-DENABLE_PRINTING=1' \
'-DENABLE_MANAGED_USERS=1' \
'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
@@ -140,6 +141,9 @@ LOCAL_CPPFLAGS_Debug := \
-fno-threadsafe-statics \
-fvisibility-inlines-hidden \
-Wsign-compare \
+ -std=gnu++11 \
+ -Wno-narrowing \
+ -Wno-literal-suffix \
-Wno-non-virtual-dtor \
-Wno-sign-promo
@@ -196,6 +200,7 @@ MY_DEFS_Release := \
'-DSYSTEM_NATIVELY_SIGNALS_MEMORY_PRESSURE' \
'-DENABLE_EGLIMAGE=1' \
'-DCLD_VERSION=1' \
+ '-DCLD_DATA_FROM_STATIC' \
'-DENABLE_PRINTING=1' \
'-DENABLE_MANAGED_USERS=1' \
'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
@@ -233,6 +238,9 @@ LOCAL_CPPFLAGS_Release := \
-fno-threadsafe-statics \
-fvisibility-inlines-hidden \
-Wsign-compare \
+ -std=gnu++11 \
+ -Wno-narrowing \
+ -Wno-literal-suffix \
-Wno-non-virtual-dtor \
-Wno-sign-promo
diff --git a/media/video_capture_android_jni_headers.target.darwin-x86_64.mk b/media/video_capture_android_jni_headers.target.darwin-x86_64.mk
index 6d4d6c963b..9e04f1da57 100644
--- a/media/video_capture_android_jni_headers.target.darwin-x86_64.mk
+++ b/media/video_capture_android_jni_headers.target.darwin-x86_64.mk
@@ -82,9 +82,9 @@ MY_CFLAGS_Debug := \
-Wno-unused-but-set-variable \
-Os \
-g \
- -fomit-frame-pointer \
-fdata-sections \
-ffunction-sections \
+ -fomit-frame-pointer \
-funwind-tables
MY_DEFS_Debug := \
@@ -103,6 +103,7 @@ MY_DEFS_Debug := \
'-DSYSTEM_NATIVELY_SIGNALS_MEMORY_PRESSURE' \
'-DENABLE_EGLIMAGE=1' \
'-DCLD_VERSION=1' \
+ '-DCLD_DATA_FROM_STATIC' \
'-DENABLE_PRINTING=1' \
'-DENABLE_MANAGED_USERS=1' \
'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
@@ -139,6 +140,9 @@ LOCAL_CPPFLAGS_Debug := \
-fno-threadsafe-statics \
-fvisibility-inlines-hidden \
-Wsign-compare \
+ -std=gnu++11 \
+ -Wno-narrowing \
+ -Wno-literal-suffix \
-Wno-non-virtual-dtor \
-Wno-sign-promo
@@ -194,6 +198,7 @@ MY_DEFS_Release := \
'-DSYSTEM_NATIVELY_SIGNALS_MEMORY_PRESSURE' \
'-DENABLE_EGLIMAGE=1' \
'-DCLD_VERSION=1' \
+ '-DCLD_DATA_FROM_STATIC' \
'-DENABLE_PRINTING=1' \
'-DENABLE_MANAGED_USERS=1' \
'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
@@ -231,6 +236,9 @@ LOCAL_CPPFLAGS_Release := \
-fno-threadsafe-statics \
-fvisibility-inlines-hidden \
-Wsign-compare \
+ -std=gnu++11 \
+ -Wno-narrowing \
+ -Wno-literal-suffix \
-Wno-non-virtual-dtor \
-Wno-sign-promo
diff --git a/media/video_capture_android_jni_headers.target.linux-arm.mk b/media/video_capture_android_jni_headers.target.linux-arm.mk
index 9e9d2a3036..12856bb901 100644
--- a/media/video_capture_android_jni_headers.target.linux-arm.mk
+++ b/media/video_capture_android_jni_headers.target.linux-arm.mk
@@ -88,9 +88,9 @@ MY_CFLAGS_Debug := \
-Wno-unused-but-set-variable \
-Os \
-g \
- -fomit-frame-pointer \
-fdata-sections \
-ffunction-sections \
+ -fomit-frame-pointer \
-funwind-tables
MY_DEFS_Debug := \
@@ -109,6 +109,7 @@ MY_DEFS_Debug := \
'-DSYSTEM_NATIVELY_SIGNALS_MEMORY_PRESSURE' \
'-DENABLE_EGLIMAGE=1' \
'-DCLD_VERSION=1' \
+ '-DCLD_DATA_FROM_STATIC' \
'-DENABLE_PRINTING=1' \
'-DENABLE_MANAGED_USERS=1' \
'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
@@ -146,6 +147,9 @@ LOCAL_CPPFLAGS_Debug := \
-fvisibility-inlines-hidden \
-Wsign-compare \
-Wno-abi \
+ -std=gnu++11 \
+ -Wno-narrowing \
+ -Wno-literal-suffix \
-Wno-non-virtual-dtor \
-Wno-sign-promo
@@ -207,6 +211,7 @@ MY_DEFS_Release := \
'-DSYSTEM_NATIVELY_SIGNALS_MEMORY_PRESSURE' \
'-DENABLE_EGLIMAGE=1' \
'-DCLD_VERSION=1' \
+ '-DCLD_DATA_FROM_STATIC' \
'-DENABLE_PRINTING=1' \
'-DENABLE_MANAGED_USERS=1' \
'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
@@ -245,6 +250,9 @@ LOCAL_CPPFLAGS_Release := \
-fvisibility-inlines-hidden \
-Wsign-compare \
-Wno-abi \
+ -std=gnu++11 \
+ -Wno-narrowing \
+ -Wno-literal-suffix \
-Wno-non-virtual-dtor \
-Wno-sign-promo
diff --git a/media/video_capture_android_jni_headers.target.linux-arm64.mk b/media/video_capture_android_jni_headers.target.linux-arm64.mk
index c000fd55e4..9356049848 100644
--- a/media/video_capture_android_jni_headers.target.linux-arm64.mk
+++ b/media/video_capture_android_jni_headers.target.linux-arm64.mk
@@ -78,7 +78,6 @@ MY_CFLAGS_Debug := \
-Wno-unused-but-set-variable \
-Os \
-g \
- -fomit-frame-pointer \
-fdata-sections \
-ffunction-sections \
-funwind-tables
@@ -99,6 +98,7 @@ MY_DEFS_Debug := \
'-DSYSTEM_NATIVELY_SIGNALS_MEMORY_PRESSURE' \
'-DENABLE_EGLIMAGE=1' \
'-DCLD_VERSION=1' \
+ '-DCLD_DATA_FROM_STATIC' \
'-DENABLE_PRINTING=1' \
'-DENABLE_MANAGED_USERS=1' \
'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
@@ -135,6 +135,9 @@ LOCAL_CPPFLAGS_Debug := \
-fno-threadsafe-statics \
-fvisibility-inlines-hidden \
-Wsign-compare \
+ -std=gnu++11 \
+ -Wno-narrowing \
+ -Wno-literal-suffix \
-Wno-non-virtual-dtor \
-Wno-sign-promo
@@ -167,7 +170,6 @@ MY_CFLAGS_Release := \
-fno-ident \
-fdata-sections \
-ffunction-sections \
- -fomit-frame-pointer \
-funwind-tables
MY_DEFS_Release := \
@@ -186,6 +188,7 @@ MY_DEFS_Release := \
'-DSYSTEM_NATIVELY_SIGNALS_MEMORY_PRESSURE' \
'-DENABLE_EGLIMAGE=1' \
'-DCLD_VERSION=1' \
+ '-DCLD_DATA_FROM_STATIC' \
'-DENABLE_PRINTING=1' \
'-DENABLE_MANAGED_USERS=1' \
'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
@@ -223,6 +226,9 @@ LOCAL_CPPFLAGS_Release := \
-fno-threadsafe-statics \
-fvisibility-inlines-hidden \
-Wsign-compare \
+ -std=gnu++11 \
+ -Wno-narrowing \
+ -Wno-literal-suffix \
-Wno-non-virtual-dtor \
-Wno-sign-promo
diff --git a/media/video_capture_android_jni_headers.target.linux-mips.mk b/media/video_capture_android_jni_headers.target.linux-mips.mk
index 13584be2b5..b509b90921 100644
--- a/media/video_capture_android_jni_headers.target.linux-mips.mk
+++ b/media/video_capture_android_jni_headers.target.linux-mips.mk
@@ -82,9 +82,9 @@ MY_CFLAGS_Debug := \
-Wno-unused-but-set-variable \
-Os \
-g \
- -fomit-frame-pointer \
-fdata-sections \
-ffunction-sections \
+ -fomit-frame-pointer \
-funwind-tables
MY_DEFS_Debug := \
@@ -103,6 +103,7 @@ MY_DEFS_Debug := \
'-DSYSTEM_NATIVELY_SIGNALS_MEMORY_PRESSURE' \
'-DENABLE_EGLIMAGE=1' \
'-DCLD_VERSION=1' \
+ '-DCLD_DATA_FROM_STATIC' \
'-DENABLE_PRINTING=1' \
'-DENABLE_MANAGED_USERS=1' \
'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
@@ -140,6 +141,9 @@ LOCAL_CPPFLAGS_Debug := \
-fvisibility-inlines-hidden \
-Wsign-compare \
-Wno-uninitialized \
+ -std=gnu++11 \
+ -Wno-narrowing \
+ -Wno-literal-suffix \
-Wno-non-virtual-dtor \
-Wno-sign-promo
@@ -195,6 +199,7 @@ MY_DEFS_Release := \
'-DSYSTEM_NATIVELY_SIGNALS_MEMORY_PRESSURE' \
'-DENABLE_EGLIMAGE=1' \
'-DCLD_VERSION=1' \
+ '-DCLD_DATA_FROM_STATIC' \
'-DENABLE_PRINTING=1' \
'-DENABLE_MANAGED_USERS=1' \
'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
@@ -233,6 +238,9 @@ LOCAL_CPPFLAGS_Release := \
-fvisibility-inlines-hidden \
-Wsign-compare \
-Wno-uninitialized \
+ -std=gnu++11 \
+ -Wno-narrowing \
+ -Wno-literal-suffix \
-Wno-non-virtual-dtor \
-Wno-sign-promo
diff --git a/media/video_capture_android_jni_headers.target.linux-x86.mk b/media/video_capture_android_jni_headers.target.linux-x86.mk
index 2314cabe50..dfdd79d0c8 100644
--- a/media/video_capture_android_jni_headers.target.linux-x86.mk
+++ b/media/video_capture_android_jni_headers.target.linux-x86.mk
@@ -83,9 +83,9 @@ MY_CFLAGS_Debug := \
-fno-stack-protector \
-Os \
-g \
- -fomit-frame-pointer \
-fdata-sections \
-ffunction-sections \
+ -fomit-frame-pointer \
-funwind-tables
MY_DEFS_Debug := \
@@ -104,6 +104,7 @@ MY_DEFS_Debug := \
'-DSYSTEM_NATIVELY_SIGNALS_MEMORY_PRESSURE' \
'-DENABLE_EGLIMAGE=1' \
'-DCLD_VERSION=1' \
+ '-DCLD_DATA_FROM_STATIC' \
'-DENABLE_PRINTING=1' \
'-DENABLE_MANAGED_USERS=1' \
'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
@@ -140,6 +141,9 @@ LOCAL_CPPFLAGS_Debug := \
-fno-threadsafe-statics \
-fvisibility-inlines-hidden \
-Wsign-compare \
+ -std=gnu++11 \
+ -Wno-narrowing \
+ -Wno-literal-suffix \
-Wno-non-virtual-dtor \
-Wno-sign-promo
@@ -196,6 +200,7 @@ MY_DEFS_Release := \
'-DSYSTEM_NATIVELY_SIGNALS_MEMORY_PRESSURE' \
'-DENABLE_EGLIMAGE=1' \
'-DCLD_VERSION=1' \
+ '-DCLD_DATA_FROM_STATIC' \
'-DENABLE_PRINTING=1' \
'-DENABLE_MANAGED_USERS=1' \
'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
@@ -233,6 +238,9 @@ LOCAL_CPPFLAGS_Release := \
-fno-threadsafe-statics \
-fvisibility-inlines-hidden \
-Wsign-compare \
+ -std=gnu++11 \
+ -Wno-narrowing \
+ -Wno-literal-suffix \
-Wno-non-virtual-dtor \
-Wno-sign-promo
diff --git a/media/video_capture_android_jni_headers.target.linux-x86_64.mk b/media/video_capture_android_jni_headers.target.linux-x86_64.mk
index 6d4d6c963b..9e04f1da57 100644
--- a/media/video_capture_android_jni_headers.target.linux-x86_64.mk
+++ b/media/video_capture_android_jni_headers.target.linux-x86_64.mk
@@ -82,9 +82,9 @@ MY_CFLAGS_Debug := \
-Wno-unused-but-set-variable \
-Os \
-g \
- -fomit-frame-pointer \
-fdata-sections \
-ffunction-sections \
+ -fomit-frame-pointer \
-funwind-tables
MY_DEFS_Debug := \
@@ -103,6 +103,7 @@ MY_DEFS_Debug := \
'-DSYSTEM_NATIVELY_SIGNALS_MEMORY_PRESSURE' \
'-DENABLE_EGLIMAGE=1' \
'-DCLD_VERSION=1' \
+ '-DCLD_DATA_FROM_STATIC' \
'-DENABLE_PRINTING=1' \
'-DENABLE_MANAGED_USERS=1' \
'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
@@ -139,6 +140,9 @@ LOCAL_CPPFLAGS_Debug := \
-fno-threadsafe-statics \
-fvisibility-inlines-hidden \
-Wsign-compare \
+ -std=gnu++11 \
+ -Wno-narrowing \
+ -Wno-literal-suffix \
-Wno-non-virtual-dtor \
-Wno-sign-promo
@@ -194,6 +198,7 @@ MY_DEFS_Release := \
'-DSYSTEM_NATIVELY_SIGNALS_MEMORY_PRESSURE' \
'-DENABLE_EGLIMAGE=1' \
'-DCLD_VERSION=1' \
+ '-DCLD_DATA_FROM_STATIC' \
'-DENABLE_PRINTING=1' \
'-DENABLE_MANAGED_USERS=1' \
'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
@@ -231,6 +236,9 @@ LOCAL_CPPFLAGS_Release := \
-fno-threadsafe-statics \
-fvisibility-inlines-hidden \
-Wsign-compare \
+ -std=gnu++11 \
+ -Wno-narrowing \
+ -Wno-literal-suffix \
-Wno-non-virtual-dtor \
-Wno-sign-promo