aboutsummaryrefslogtreecommitdiff
path: root/webrtc/modules/video_coding/main
diff options
context:
space:
mode:
Diffstat (limited to 'webrtc/modules/video_coding/main')
-rw-r--r--webrtc/modules/video_coding/main/interface/mock/mock_vcm_callbacks.h35
-rw-r--r--webrtc/modules/video_coding/main/interface/video_coding.h544
-rw-r--r--webrtc/modules/video_coding/main/interface/video_coding_defines.h201
-rw-r--r--webrtc/modules/video_coding/main/source/OWNERS5
-rw-r--r--webrtc/modules/video_coding/main/source/codec_database.cc687
-rw-r--r--webrtc/modules/video_coding/main/source/codec_database.h184
-rw-r--r--webrtc/modules/video_coding/main/source/codec_timer.cc136
-rw-r--r--webrtc/modules/video_coding/main/source/codec_timer.h62
-rw-r--r--webrtc/modules/video_coding/main/source/content_metrics_processing.cc125
-rw-r--r--webrtc/modules/video_coding/main/source/content_metrics_processing.h76
-rw-r--r--webrtc/modules/video_coding/main/source/decoding_state.cc223
-rw-r--r--webrtc/modules/video_coding/main/source/decoding_state.h70
-rw-r--r--webrtc/modules/video_coding/main/source/decoding_state_unittest.cc449
-rw-r--r--webrtc/modules/video_coding/main/source/encoded_frame.cc229
-rw-r--r--webrtc/modules/video_coding/main/source/encoded_frame.h127
-rw-r--r--webrtc/modules/video_coding/main/source/fec_tables_xor.h6481
-rw-r--r--webrtc/modules/video_coding/main/source/frame_buffer.cc297
-rw-r--r--webrtc/modules/video_coding/main/source/frame_buffer.h92
-rw-r--r--webrtc/modules/video_coding/main/source/generic_decoder.cc198
-rw-r--r--webrtc/modules/video_coding/main/source/generic_decoder.h112
-rw-r--r--webrtc/modules/video_coding/main/source/generic_encoder.cc298
-rw-r--r--webrtc/modules/video_coding/main/source/generic_encoder.h142
-rw-r--r--webrtc/modules/video_coding/main/source/inter_frame_delay.cc114
-rw-r--r--webrtc/modules/video_coding/main/source/inter_frame_delay.h66
-rw-r--r--webrtc/modules/video_coding/main/source/internal_defines.h68
-rw-r--r--webrtc/modules/video_coding/main/source/jitter_buffer.cc1339
-rw-r--r--webrtc/modules/video_coding/main/source/jitter_buffer.h396
-rw-r--r--webrtc/modules/video_coding/main/source/jitter_buffer_common.h72
-rw-r--r--webrtc/modules/video_coding/main/source/jitter_buffer_unittest.cc2575
-rw-r--r--webrtc/modules/video_coding/main/source/jitter_estimator.cc482
-rw-r--r--webrtc/modules/video_coding/main/source/jitter_estimator.h165
-rw-r--r--webrtc/modules/video_coding/main/source/jitter_estimator_tests.cc160
-rw-r--r--webrtc/modules/video_coding/main/source/media_opt_util.cc774
-rw-r--r--webrtc/modules/video_coding/main/source/media_opt_util.h364
-rw-r--r--webrtc/modules/video_coding/main/source/media_optimization.cc648
-rw-r--r--webrtc/modules/video_coding/main/source/media_optimization.h180
-rw-r--r--webrtc/modules/video_coding/main/source/media_optimization_unittest.cc155
-rw-r--r--webrtc/modules/video_coding/main/source/nack_fec_tables.h126
-rw-r--r--webrtc/modules/video_coding/main/source/packet.cc154
-rw-r--r--webrtc/modules/video_coding/main/source/packet.h59
-rw-r--r--webrtc/modules/video_coding/main/source/qm_select.cc958
-rw-r--r--webrtc/modules/video_coding/main/source/qm_select.h373
-rw-r--r--webrtc/modules/video_coding/main/source/qm_select_data.h227
-rw-r--r--webrtc/modules/video_coding/main/source/qm_select_unittest.cc1311
-rw-r--r--webrtc/modules/video_coding/main/source/receiver.cc268
-rw-r--r--webrtc/modules/video_coding/main/source/receiver.h92
-rw-r--r--webrtc/modules/video_coding/main/source/receiver_unittest.cc526
-rw-r--r--webrtc/modules/video_coding/main/source/rtt_filter.cc202
-rw-r--r--webrtc/modules/video_coding/main/source/rtt_filter.h68
-rw-r--r--webrtc/modules/video_coding/main/source/session_info.cc580
-rw-r--r--webrtc/modules/video_coding/main/source/session_info.h172
-rw-r--r--webrtc/modules/video_coding/main/source/session_info_unittest.cc1064
-rw-r--r--webrtc/modules/video_coding/main/source/test/stream_generator.cc127
-rw-r--r--webrtc/modules/video_coding/main/source/test/stream_generator.h72
-rw-r--r--webrtc/modules/video_coding/main/source/timestamp_map.cc65
-rw-r--r--webrtc/modules/video_coding/main/source/timestamp_map.h47
-rw-r--r--webrtc/modules/video_coding/main/source/timing.cc279
-rw-r--r--webrtc/modules/video_coding/main/source/timing.h127
-rw-r--r--webrtc/modules/video_coding/main/source/timing_unittest.cc147
-rw-r--r--webrtc/modules/video_coding/main/source/video_coding_impl.cc359
-rw-r--r--webrtc/modules/video_coding/main/source/video_coding_impl.h237
-rw-r--r--webrtc/modules/video_coding/main/source/video_coding_robustness_unittest.cc238
-rw-r--r--webrtc/modules/video_coding/main/source/video_receiver.cc578
-rw-r--r--webrtc/modules/video_coding/main/source/video_receiver_unittest.cc211
-rw-r--r--webrtc/modules/video_coding/main/source/video_sender.cc376
-rw-r--r--webrtc/modules/video_coding/main/source/video_sender_unittest.cc494
-rw-r--r--webrtc/modules/video_coding/main/test/plotJitterEstimate.m52
-rw-r--r--webrtc/modules/video_coding/main/test/plotReceiveTrace.m213
-rw-r--r--webrtc/modules/video_coding/main/test/plotTimingTest.m62
-rw-r--r--webrtc/modules/video_coding/main/test/receiver_tests.h43
-rw-r--r--webrtc/modules/video_coding/main/test/release_test.h17
-rw-r--r--webrtc/modules/video_coding/main/test/rtp_player.cc493
-rw-r--r--webrtc/modules/video_coding/main/test/rtp_player.h97
-rw-r--r--webrtc/modules/video_coding/main/test/subfigure.m30
-rw-r--r--webrtc/modules/video_coding/main/test/test_util.cc139
-rw-r--r--webrtc/modules/video_coding/main/test/test_util.h86
-rw-r--r--webrtc/modules/video_coding/main/test/tester_main.cc75
-rw-r--r--webrtc/modules/video_coding/main/test/vcm_payload_sink_factory.cc210
-rw-r--r--webrtc/modules/video_coding/main/test/vcm_payload_sink_factory.h63
-rw-r--r--webrtc/modules/video_coding/main/test/video_rtp_play.cc88
-rw-r--r--webrtc/modules/video_coding/main/test/video_source.h82
81 files changed, 29618 insertions, 0 deletions
diff --git a/webrtc/modules/video_coding/main/interface/mock/mock_vcm_callbacks.h b/webrtc/modules/video_coding/main/interface/mock/mock_vcm_callbacks.h
new file mode 100644
index 0000000000..302d4a3a13
--- /dev/null
+++ b/webrtc/modules/video_coding/main/interface/mock/mock_vcm_callbacks.h
@@ -0,0 +1,35 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef WEBRTC_MODULES_VIDEO_CODING_MAIN_INTERFACE_MOCK_MOCK_VCM_CALLBACKS_H_
+#define WEBRTC_MODULES_VIDEO_CODING_MAIN_INTERFACE_MOCK_MOCK_VCM_CALLBACKS_H_
+
+#include "testing/gmock/include/gmock/gmock.h"
+#include "webrtc/modules/video_coding/main/interface/video_coding_defines.h"
+#include "webrtc/typedefs.h"
+
+namespace webrtc {
+
+class MockVCMFrameTypeCallback : public VCMFrameTypeCallback {
+ public:
+ MOCK_METHOD0(RequestKeyFrame, int32_t());
+ MOCK_METHOD1(SliceLossIndicationRequest,
+ int32_t(const uint64_t pictureId));
+};
+
+class MockPacketRequestCallback : public VCMPacketRequestCallback {
+ public:
+ MOCK_METHOD2(ResendPackets, int32_t(const uint16_t* sequenceNumbers,
+ uint16_t length));
+};
+
+} // namespace webrtc
+
+#endif // WEBRTC_MODULES_VIDEO_CODING_MAIN_INTERFACE_MOCK_MOCK_VCM_CALLBACKS_H_
diff --git a/webrtc/modules/video_coding/main/interface/video_coding.h b/webrtc/modules/video_coding/main/interface/video_coding.h
new file mode 100644
index 0000000000..67f7b635cb
--- /dev/null
+++ b/webrtc/modules/video_coding/main/interface/video_coding.h
@@ -0,0 +1,544 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef WEBRTC_MODULES_INTERFACE_VIDEO_CODING_H_
+#define WEBRTC_MODULES_INTERFACE_VIDEO_CODING_H_
+
+#if defined(WEBRTC_WIN)
+// This is a workaround on Windows due to the fact that some Windows
+// headers define CreateEvent as a macro to either CreateEventW or CreateEventA.
+// This can cause problems since we use that name as well and could
+// declare them as one thing here whereas in another place a windows header
+// may have been included and then implementing CreateEvent() causes compilation
+// errors. So for consistency, we include the main windows header here.
+#include <windows.h>
+#endif
+
+#include "webrtc/modules/interface/module.h"
+#include "webrtc/modules/interface/module_common_types.h"
+#include "webrtc/modules/video_coding/main/interface/video_coding_defines.h"
+#include "webrtc/system_wrappers/include/event_wrapper.h"
+#include "webrtc/video_frame.h"
+
+namespace webrtc
+{
+
+class Clock;
+class EncodedImageCallback;
+class VideoEncoder;
+class VideoDecoder;
+struct CodecSpecificInfo;
+
+class EventFactory {
+ public:
+ virtual ~EventFactory() {}
+
+ virtual EventWrapper* CreateEvent() = 0;
+};
+
+class EventFactoryImpl : public EventFactory {
+ public:
+ virtual ~EventFactoryImpl() {}
+
+ virtual EventWrapper* CreateEvent() {
+ return EventWrapper::Create();
+ }
+};
+
+// Used to indicate which decode with errors mode should be used.
+enum VCMDecodeErrorMode {
+ kNoErrors, // Never decode with errors. Video will freeze
+ // if nack is disabled.
+ kSelectiveErrors, // Frames that are determined decodable in
+ // VCMSessionInfo may be decoded with missing
+ // packets. As not all incomplete frames will be
+ // decodable, video will freeze if nack is disabled.
+ kWithErrors // Release frames as needed. Errors may be
+ // introduced as some encoded frames may not be
+ // complete.
+};
+
+class VideoCodingModule : public Module
+{
+public:
+ enum SenderNackMode {
+ kNackNone,
+ kNackAll,
+ kNackSelective
+ };
+
+ enum ReceiverRobustness {
+ kNone,
+ kHardNack,
+ kSoftNack,
+ kReferenceSelection
+ };
+
+ static VideoCodingModule* Create(
+ Clock* clock,
+ VideoEncoderRateObserver* encoder_rate_observer,
+ VCMQMSettingsCallback* qm_settings_callback);
+
+ static VideoCodingModule* Create(Clock* clock, EventFactory* event_factory);
+
+ static void Destroy(VideoCodingModule* module);
+
+ // Get number of supported codecs
+ //
+ // Return value : Number of supported codecs
+ static uint8_t NumberOfCodecs();
+
+ // Get supported codec settings with using id
+ //
+ // Input:
+ // - listId : Id or index of the codec to look up
+ // - codec : Memory where the codec settings will be stored
+ //
+ // Return value : VCM_OK, on success
+ // VCM_PARAMETER_ERROR if codec not supported or id too high
+ static int32_t Codec(const uint8_t listId, VideoCodec* codec);
+
+ // Get supported codec settings using codec type
+ //
+ // Input:
+ // - codecType : The codec type to get settings for
+ // - codec : Memory where the codec settings will be stored
+ //
+ // Return value : VCM_OK, on success
+ // VCM_PARAMETER_ERROR if codec not supported
+ static int32_t Codec(VideoCodecType codecType, VideoCodec* codec);
+
+ /*
+ * Sender
+ */
+
+ // Registers a codec to be used for encoding. Calling this
+ // API multiple times overwrites any previously registered codecs.
+ //
+ // NOTE: Must be called on the thread that constructed the VCM instance.
+ //
+ // Input:
+ // - sendCodec : Settings for the codec to be registered.
+ // - numberOfCores : The number of cores the codec is allowed
+ // to use.
+ // - maxPayloadSize : The maximum size each payload is allowed
+ // to have. Usually MTU - overhead.
+ //
+ // Return value : VCM_OK, on success.
+ // < 0, on error.
+ virtual int32_t RegisterSendCodec(const VideoCodec* sendCodec,
+ uint32_t numberOfCores,
+ uint32_t maxPayloadSize) = 0;
+
+ // Get the current send codec in use.
+ //
+ // If a codec has not been set yet, the |id| property of the return value
+ // will be 0 and |name| empty.
+ //
+ // NOTE: This method intentionally does not hold locks and minimizes data
+ // copying. It must be called on the thread where the VCM was constructed.
+ virtual const VideoCodec& GetSendCodec() const = 0;
+
+ // DEPRECATED: Use GetSendCodec() instead.
+ //
+ // API to get the current send codec in use.
+ //
+ // Input:
+ // - currentSendCodec : Address where the sendCodec will be written.
+ //
+ // Return value : VCM_OK, on success.
+ // < 0, on error.
+ //
+ // NOTE: The returned codec information is not guaranteed to be current when
+ // the call returns. This method acquires a lock that is aligned with
+ // video encoding, so it should be assumed to be allowed to block for
+ // several milliseconds.
+ virtual int32_t SendCodec(VideoCodec* currentSendCodec) const = 0;
+
+ // DEPRECATED: Use GetSendCodec() instead.
+ //
+ // API to get the current send codec type
+ //
+ // Return value : Codec type, on success.
+ // kVideoCodecUnknown, on error or if no send codec is set
+ // NOTE: Same notes apply as for SendCodec() above.
+ virtual VideoCodecType SendCodec() const = 0;
+
+ // Register an external encoder object. This can not be used together with
+ // external decoder callbacks.
+ //
+ // Input:
+ // - externalEncoder : Encoder object to be used for encoding frames inserted
+ // with the AddVideoFrame API.
+ // - payloadType : The payload type bound which this encoder is bound to.
+ //
+ // Return value : VCM_OK, on success.
+ // < 0, on error.
+ virtual int32_t RegisterExternalEncoder(VideoEncoder* externalEncoder,
+ uint8_t payloadType,
+ bool internalSource = false) = 0;
+
+ // API to get currently configured encoder target bitrate in bits/s.
+ //
+ // Return value : 0, on success.
+ // < 0, on error.
+ virtual int Bitrate(unsigned int* bitrate) const = 0;
+
+ // API to get currently configured encoder target frame rate.
+ //
+ // Return value : 0, on success.
+ // < 0, on error.
+ virtual int FrameRate(unsigned int* framerate) const = 0;
+
+ // Sets the parameters describing the send channel. These parameters are inputs to the
+ // Media Optimization inside the VCM and also specifies the target bit rate for the
+ // encoder. Bit rate used by NACK should already be compensated for by the user.
+ //
+ // Input:
+ // - target_bitrate : The target bitrate for VCM in bits/s.
+ // - lossRate : Fractions of lost packets the past second.
+ // (loss rate in percent = 100 * packetLoss / 255)
+ // - rtt : Current round-trip time in ms.
+ //
+ // Return value : VCM_OK, on success.
+ // < 0, on error.
+ virtual int32_t SetChannelParameters(uint32_t target_bitrate,
+ uint8_t lossRate,
+ int64_t rtt) = 0;
+
+ // Sets the parameters describing the receive channel. These parameters are inputs to the
+ // Media Optimization inside the VCM.
+ //
+ // Input:
+ // - rtt : Current round-trip time in ms.
+ // with the most amount available bandwidth in a conference
+ // scenario
+ //
+ // Return value : VCM_OK, on success.
+ // < 0, on error.
+ virtual int32_t SetReceiveChannelParameters(int64_t rtt) = 0;
+
+ // Register a transport callback which will be called to deliver the encoded data and
+ // side information.
+ //
+ // Input:
+ // - transport : The callback object to register.
+ //
+ // Return value : VCM_OK, on success.
+ // < 0, on error.
+ virtual int32_t RegisterTransportCallback(VCMPacketizationCallback* transport) = 0;
+
+ // Register video output information callback which will be called to deliver information
+ // about the video stream produced by the encoder, for instance the average frame rate and
+ // bit rate.
+ //
+ // Input:
+ // - outputInformation : The callback object to register.
+ //
+ // Return value : VCM_OK, on success.
+ // < 0, on error.
+ virtual int32_t RegisterSendStatisticsCallback(
+ VCMSendStatisticsCallback* sendStats) = 0;
+
+ // Register a video protection callback which will be called to deliver
+ // the requested FEC rate and NACK status (on/off).
+ //
+ // Input:
+ // - protection : The callback object to register.
+ //
+ // Return value : VCM_OK, on success.
+ // < 0, on error.
+ virtual int32_t RegisterProtectionCallback(VCMProtectionCallback* protection) = 0;
+
+ // Enable or disable a video protection method.
+ //
+ // Input:
+ // - videoProtection : The method to enable or disable.
+ // - enable : True if the method should be enabled, false if
+ // it should be disabled.
+ //
+ // Return value : VCM_OK, on success.
+ // < 0, on error.
+ virtual int32_t SetVideoProtection(VCMVideoProtection videoProtection,
+ bool enable) = 0;
+
+ // Add one raw video frame to the encoder. This function does all the necessary
+ // processing, then decides what frame type to encode, or if the frame should be
+ // dropped. If the frame should be encoded it passes the frame to the encoder
+ // before it returns.
+ //
+ // Input:
+ // - videoFrame : Video frame to encode.
+ // - codecSpecificInfo : Extra codec information, e.g., pre-parsed in-band signaling.
+ //
+ // Return value : VCM_OK, on success.
+ // < 0, on error.
+ virtual int32_t AddVideoFrame(
+ const VideoFrame& videoFrame,
+ const VideoContentMetrics* contentMetrics = NULL,
+ const CodecSpecificInfo* codecSpecificInfo = NULL) = 0;
+
+ // Next frame encoded should be an intra frame (keyframe).
+ //
+ // Return value : VCM_OK, on success.
+ // < 0, on error.
+ virtual int32_t IntraFrameRequest(int stream_index) = 0;
+
+ // Frame Dropper enable. Can be used to disable the frame dropping when the encoder
+ // over-uses its bit rate. This API is designed to be used when the encoded frames
+ // are supposed to be stored to an AVI file, or when the I420 codec is used and the
+ // target bit rate shouldn't affect the frame rate.
+ //
+ // Input:
+ // - enable : True to enable the setting, false to disable it.
+ //
+ // Return value : VCM_OK, on success.
+ // < 0, on error.
+ virtual int32_t EnableFrameDropper(bool enable) = 0;
+
+
+ /*
+ * Receiver
+ */
+
+ // Register possible receive codecs, can be called multiple times for different codecs.
+ // The module will automatically switch between registered codecs depending on the
+ // payload type of incoming frames. The actual decoder will be created when needed.
+ //
+ // Input:
+ // - receiveCodec : Settings for the codec to be registered.
+ // - numberOfCores : Number of CPU cores that the decoder is allowed to use.
+ // - requireKeyFrame : Set this to true if you don't want any delta frames
+ // to be decoded until the first key frame has been decoded.
+ //
+ // Return value : VCM_OK, on success.
+ // < 0, on error.
+ virtual int32_t RegisterReceiveCodec(const VideoCodec* receiveCodec,
+ int32_t numberOfCores,
+ bool requireKeyFrame = false) = 0;
+
+ // Register an externally defined decoder/renderer object. Can be a decoder only or a
+ // decoder coupled with a renderer. Note that RegisterReceiveCodec must be called to
+ // be used for decoding incoming streams.
+ //
+ // Input:
+ // - externalDecoder : The external decoder/renderer object.
+ // - payloadType : The payload type which this decoder should be
+ // registered to.
+ // - internalRenderTiming : True if the internal renderer (if any) of the decoder
+ // object can make sure to render at a given time in ms.
+ //
+ // Return value : VCM_OK, on success.
+ // < 0, on error.
+ virtual int32_t RegisterExternalDecoder(VideoDecoder* externalDecoder,
+ uint8_t payloadType,
+ bool internalRenderTiming) = 0;
+
+ // Register a receive callback. Will be called whenever there is a new frame ready
+ // for rendering.
+ //
+ // Input:
+ // - receiveCallback : The callback object to be used by the module when a
+ // frame is ready for rendering.
+ // De-register with a NULL pointer.
+ //
+ // Return value : VCM_OK, on success.
+ // < 0, on error.
+ virtual int32_t RegisterReceiveCallback(VCMReceiveCallback* receiveCallback) = 0;
+
+ // Register a receive statistics callback which will be called to deliver information
+ // about the video stream received by the receiving side of the VCM, for instance the
+ // average frame rate and bit rate.
+ //
+ // Input:
+ // - receiveStats : The callback object to register.
+ //
+ // Return value : VCM_OK, on success.
+ // < 0, on error.
+ virtual int32_t RegisterReceiveStatisticsCallback(
+ VCMReceiveStatisticsCallback* receiveStats) = 0;
+
+ // Register a decoder timing callback which will be called to deliver
+ // information about the timing of the decoder in the receiving side of the
+ // VCM, for instance the current and maximum frame decode latency.
+ //
+ // Input:
+ // - decoderTiming : The callback object to register.
+ //
+ // Return value : VCM_OK, on success.
+ // < 0, on error.
+ virtual int32_t RegisterDecoderTimingCallback(
+ VCMDecoderTimingCallback* decoderTiming) = 0;
+
+ // Register a frame type request callback. This callback will be called when the
+ // module needs to request specific frame types from the send side.
+ //
+ // Input:
+ // - frameTypeCallback : The callback object to be used by the module when
+ // requesting a specific type of frame from the send side.
+ // De-register with a NULL pointer.
+ //
+ // Return value : VCM_OK, on success.
+ // < 0, on error.
+ virtual int32_t RegisterFrameTypeCallback(
+ VCMFrameTypeCallback* frameTypeCallback) = 0;
+
+ // Registers a callback which is called whenever the receive side of the VCM
+ // encounters holes in the packet sequence and needs packets to be retransmitted.
+ //
+ // Input:
+ // - callback : The callback to be registered in the VCM.
+ //
+ // Return value : VCM_OK, on success.
+ // <0, on error.
+ virtual int32_t RegisterPacketRequestCallback(
+ VCMPacketRequestCallback* callback) = 0;
+
+ // Waits for the next frame in the jitter buffer to become complete
+ // (waits no longer than maxWaitTimeMs), then passes it to the decoder for decoding.
+ // Should be called as often as possible to get the most out of the decoder.
+ //
+ // Return value : VCM_OK, on success.
+ // < 0, on error.
+ virtual int32_t Decode(uint16_t maxWaitTimeMs = 200) = 0;
+
+ // Registers a callback which conveys the size of the render buffer.
+ virtual int RegisterRenderBufferSizeCallback(
+ VCMRenderBufferSizeCallback* callback) = 0;
+
+ // Reset the decoder state to the initial state.
+ //
+ // Return value : VCM_OK, on success.
+ // < 0, on error.
+ virtual int32_t ResetDecoder() = 0;
+
+ // API to get the codec which is currently used for decoding by the module.
+ //
+ // Input:
+ // - currentReceiveCodec : Settings for the codec to be registered.
+ //
+ // Return value : VCM_OK, on success.
+ // < 0, on error.
+ virtual int32_t ReceiveCodec(VideoCodec* currentReceiveCodec) const = 0;
+
+ // API to get the codec type currently used for decoding by the module.
+ //
+ // Return value : codecy type, on success.
+ // kVideoCodecUnknown, on error or if no receive codec is registered
+ virtual VideoCodecType ReceiveCodec() const = 0;
+
+ // Insert a parsed packet into the receiver side of the module. Will be placed in the
+ // jitter buffer waiting for the frame to become complete. Returns as soon as the packet
+ // has been placed in the jitter buffer.
+ //
+ // Input:
+ // - incomingPayload : Payload of the packet.
+ // - payloadLength : Length of the payload.
+ // - rtpInfo : The parsed header.
+ //
+ // Return value : VCM_OK, on success.
+ // < 0, on error.
+ virtual int32_t IncomingPacket(const uint8_t* incomingPayload,
+ size_t payloadLength,
+ const WebRtcRTPHeader& rtpInfo) = 0;
+
+ // Minimum playout delay (Used for lip-sync). This is the minimum delay required
+ // to sync with audio. Not included in VideoCodingModule::Delay()
+ // Defaults to 0 ms.
+ //
+ // Input:
+ // - minPlayoutDelayMs : Additional delay in ms.
+ //
+ // Return value : VCM_OK, on success.
+ // < 0, on error.
+ virtual int32_t SetMinimumPlayoutDelay(uint32_t minPlayoutDelayMs) = 0;
+
+ // Set the time required by the renderer to render a frame.
+ //
+ // Input:
+ // - timeMS : The time in ms required by the renderer to render a frame.
+ //
+ // Return value : VCM_OK, on success.
+ // < 0, on error.
+ virtual int32_t SetRenderDelay(uint32_t timeMS) = 0;
+
+ // The total delay desired by the VCM. Can be less than the minimum
+ // delay set with SetMinimumPlayoutDelay.
+ //
+ // Return value : Total delay in ms, on success.
+ // < 0, on error.
+ virtual int32_t Delay() const = 0;
+
+ // Returns the number of packets discarded by the jitter buffer due to being
+ // too late. This can include duplicated packets which arrived after the
+ // frame was sent to the decoder. Therefore packets which were prematurely
+ // NACKed will be counted.
+ virtual uint32_t DiscardedPackets() const = 0;
+
+
+ // Robustness APIs
+
+ // Set the receiver robustness mode. The mode decides how the receiver
+ // responds to losses in the stream. The type of counter-measure (soft or
+ // hard NACK, dual decoder, RPS, etc.) is selected through the
+ // robustnessMode parameter. The errorMode parameter decides if it is
+ // allowed to display frames corrupted by losses. Note that not all
+ // combinations of the two parameters are feasible. An error will be
+ // returned for invalid combinations.
+ // Input:
+ // - robustnessMode : selected robustness mode.
+ // - errorMode : selected error mode.
+ //
+ // Return value : VCM_OK, on success;
+ // < 0, on error.
+ virtual int SetReceiverRobustnessMode(ReceiverRobustness robustnessMode,
+ VCMDecodeErrorMode errorMode) = 0;
+
+ // Set the decode error mode. The mode decides which errors (if any) are
+ // allowed in decodable frames. Note that setting decode_error_mode to
+ // anything other than kWithErrors without enabling nack will cause
+ // long-term freezes (resulting from frequent key frame requests) if
+ // packet loss occurs.
+ virtual void SetDecodeErrorMode(VCMDecodeErrorMode decode_error_mode) = 0;
+
+ // Sets the maximum number of sequence numbers that we are allowed to NACK
+ // and the oldest sequence number that we will consider to NACK. If a
+ // sequence number older than |max_packet_age_to_nack| is missing
+ // a key frame will be requested. A key frame will also be requested if the
+ // time of incomplete or non-continuous frames in the jitter buffer is above
+ // |max_incomplete_time_ms|.
+ virtual void SetNackSettings(size_t max_nack_list_size,
+ int max_packet_age_to_nack,
+ int max_incomplete_time_ms) = 0;
+
+ // Setting a desired delay to the VCM receiver. Video rendering will be
+ // delayed by at least desired_delay_ms.
+ virtual int SetMinReceiverDelay(int desired_delay_ms) = 0;
+
+ // Lets the sender suspend video when the rate drops below
+ // |threshold_bps|, and turns back on when the rate goes back up above
+ // |threshold_bps| + |window_bps|.
+ virtual void SuspendBelowMinBitrate() = 0;
+
+ // Returns true if SuspendBelowMinBitrate is engaged and the video has been
+ // suspended due to bandwidth limitations; otherwise false.
+ virtual bool VideoSuspended() const = 0;
+
+ virtual void RegisterPreDecodeImageCallback(
+ EncodedImageCallback* observer) = 0;
+ virtual void RegisterPostEncodeImageCallback(
+ EncodedImageCallback* post_encode_callback) = 0;
+ // Releases pending decode calls, permitting faster thread shutdown.
+ virtual void TriggerDecoderShutdown() = 0;
+};
+
+} // namespace webrtc
+
+#endif // WEBRTC_MODULES_INTERFACE_VIDEO_CODING_H_
diff --git a/webrtc/modules/video_coding/main/interface/video_coding_defines.h b/webrtc/modules/video_coding/main/interface/video_coding_defines.h
new file mode 100644
index 0000000000..fd38d64415
--- /dev/null
+++ b/webrtc/modules/video_coding/main/interface/video_coding_defines.h
@@ -0,0 +1,201 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef WEBRTC_MODULES_INTERFACE_VIDEO_CODING_DEFINES_H_
+#define WEBRTC_MODULES_INTERFACE_VIDEO_CODING_DEFINES_H_
+
+#include "webrtc/modules/interface/module_common_types.h"
+#include "webrtc/typedefs.h"
+#include "webrtc/video_frame.h"
+
+namespace webrtc {
+
+// Error codes
+#define VCM_FRAME_NOT_READY 3
+#define VCM_REQUEST_SLI 2
+#define VCM_MISSING_CALLBACK 1
+#define VCM_OK 0
+#define VCM_GENERAL_ERROR -1
+#define VCM_LEVEL_EXCEEDED -2
+#define VCM_MEMORY -3
+#define VCM_PARAMETER_ERROR -4
+#define VCM_UNKNOWN_PAYLOAD -5
+#define VCM_CODEC_ERROR -6
+#define VCM_UNINITIALIZED -7
+#define VCM_NO_CODEC_REGISTERED -8
+#define VCM_JITTER_BUFFER_ERROR -9
+#define VCM_OLD_PACKET_ERROR -10
+#define VCM_NO_FRAME_DECODED -11
+#define VCM_ERROR_REQUEST_SLI -12
+#define VCM_NOT_IMPLEMENTED -20
+
+enum { kDefaultStartBitrateKbps = 300 };
+
+enum VCMVideoProtection {
+ kProtectionNone,
+ kProtectionNack,
+ kProtectionFEC,
+ kProtectionNackFEC,
+};
+
+enum VCMTemporalDecimation {
+ kBitrateOverUseDecimation,
+};
+
+struct VCMFrameCount {
+ uint32_t numKeyFrames;
+ uint32_t numDeltaFrames;
+};
+
+// Callback class used for sending data ready to be packetized
+class VCMPacketizationCallback {
+ public:
+ virtual int32_t SendData(uint8_t payloadType,
+ const EncodedImage& encoded_image,
+ const RTPFragmentationHeader& fragmentationHeader,
+ const RTPVideoHeader* rtpVideoHdr) = 0;
+
+ protected:
+ virtual ~VCMPacketizationCallback() {
+ }
+};
+
+// Callback class used for passing decoded frames which are ready to be rendered.
+class VCMReceiveCallback {
+ public:
+ virtual int32_t FrameToRender(VideoFrame& videoFrame) = 0;
+ virtual int32_t ReceivedDecodedReferenceFrame(
+ const uint64_t pictureId) {
+ return -1;
+ }
+ // Called when the current receive codec changes.
+ virtual void OnIncomingPayloadType(int payload_type) {}
+
+ protected:
+ virtual ~VCMReceiveCallback() {
+ }
+};
+
+// Callback class used for informing the user of the bit rate and frame rate produced by the
+// encoder.
+class VCMSendStatisticsCallback {
+ public:
+ virtual int32_t SendStatistics(const uint32_t bitRate,
+ const uint32_t frameRate) = 0;
+
+ protected:
+ virtual ~VCMSendStatisticsCallback() {
+ }
+};
+
+// Callback class used for informing the user of the incoming bit rate and frame rate.
+class VCMReceiveStatisticsCallback {
+ public:
+ virtual void OnReceiveRatesUpdated(uint32_t bitRate, uint32_t frameRate) = 0;
+ virtual void OnDiscardedPacketsUpdated(int discarded_packets) = 0;
+ virtual void OnFrameCountsUpdated(const FrameCounts& frame_counts) = 0;
+
+ protected:
+ virtual ~VCMReceiveStatisticsCallback() {
+ }
+};
+
+// Callback class used for informing the user of decode timing info.
+class VCMDecoderTimingCallback {
+ public:
+ virtual void OnDecoderTiming(int decode_ms,
+ int max_decode_ms,
+ int current_delay_ms,
+ int target_delay_ms,
+ int jitter_buffer_ms,
+ int min_playout_delay_ms,
+ int render_delay_ms) = 0;
+
+ protected:
+ virtual ~VCMDecoderTimingCallback() {}
+};
+
+// Callback class used for telling the user about how to configure the FEC,
+// and the rates sent the last second is returned to the VCM.
+class VCMProtectionCallback {
+ public:
+ virtual int ProtectionRequest(const FecProtectionParams* delta_params,
+ const FecProtectionParams* key_params,
+ uint32_t* sent_video_rate_bps,
+ uint32_t* sent_nack_rate_bps,
+ uint32_t* sent_fec_rate_bps) = 0;
+
+ protected:
+ virtual ~VCMProtectionCallback() {
+ }
+};
+
+class VideoEncoderRateObserver {
+ public:
+ virtual ~VideoEncoderRateObserver() {}
+ virtual void OnSetRates(uint32_t bitrate_bps, int framerate) = 0;
+};
+
+// Callback class used for telling the user about what frame type needed to continue decoding.
+// Typically a key frame when the stream has been corrupted in some way.
+class VCMFrameTypeCallback {
+ public:
+ virtual int32_t RequestKeyFrame() = 0;
+ virtual int32_t SliceLossIndicationRequest(
+ const uint64_t pictureId) {
+ return -1;
+ }
+
+ protected:
+ virtual ~VCMFrameTypeCallback() {
+ }
+};
+
+// Callback class used for telling the user about which packet sequence numbers are currently
+// missing and need to be resent.
+class VCMPacketRequestCallback {
+ public:
+ virtual int32_t ResendPackets(const uint16_t* sequenceNumbers,
+ uint16_t length) = 0;
+
+ protected:
+ virtual ~VCMPacketRequestCallback() {
+ }
+};
+
+// Callback used to inform the user of the the desired resolution
+// as subscribed by Media Optimization (Quality Modes)
+class VCMQMSettingsCallback {
+ public:
+ virtual int32_t SetVideoQMSettings(const uint32_t frameRate,
+ const uint32_t width,
+ const uint32_t height) = 0;
+
+ virtual void SetTargetFramerate(int frame_rate) = 0;
+
+ protected:
+ virtual ~VCMQMSettingsCallback() {
+ }
+};
+
+// Callback class used for telling the user about the size (in time) of the
+// render buffer, that is the size in time of the complete continuous frames.
+class VCMRenderBufferSizeCallback {
+ public:
+ virtual void RenderBufferSizeMs(int buffer_size_ms) = 0;
+
+ protected:
+ virtual ~VCMRenderBufferSizeCallback() {
+ }
+};
+
+} // namespace webrtc
+
+#endif // WEBRTC_MODULES_INTERFACE_VIDEO_CODING_DEFINES_H_
diff --git a/webrtc/modules/video_coding/main/source/OWNERS b/webrtc/modules/video_coding/main/source/OWNERS
new file mode 100644
index 0000000000..3ee6b4bf5f
--- /dev/null
+++ b/webrtc/modules/video_coding/main/source/OWNERS
@@ -0,0 +1,5 @@
+
+# These are for the common case of adding or renaming files. If you're doing
+# structural changes, please get a review from a reviewer in this file.
+per-file *.gyp=*
+per-file *.gypi=*
diff --git a/webrtc/modules/video_coding/main/source/codec_database.cc b/webrtc/modules/video_coding/main/source/codec_database.cc
new file mode 100644
index 0000000000..bfdc609e3c
--- /dev/null
+++ b/webrtc/modules/video_coding/main/source/codec_database.cc
@@ -0,0 +1,687 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "webrtc/modules/video_coding/main/source/codec_database.h"
+
+#include <assert.h>
+
+#include "webrtc/base/checks.h"
+#include "webrtc/engine_configurations.h"
+#ifdef VIDEOCODEC_H264
+#include "webrtc/modules/video_coding/codecs/h264/include/h264.h"
+#endif
+#ifdef VIDEOCODEC_I420
+#include "webrtc/modules/video_coding/codecs/i420/include/i420.h"
+#endif
+#ifdef VIDEOCODEC_VP8
+#include "webrtc/modules/video_coding/codecs/vp8/include/vp8.h"
+#endif
+#ifdef VIDEOCODEC_VP9
+#include "webrtc/modules/video_coding/codecs/vp9/include/vp9.h"
+#endif
+#include "webrtc/modules/video_coding/main/source/internal_defines.h"
+#include "webrtc/system_wrappers/include/logging.h"
+
+namespace {
+const size_t kDefaultPayloadSize = 1440;
+const uint8_t kDefaultPayloadType = 100;
+}
+
+namespace webrtc {
+
+VideoCodecVP8 VideoEncoder::GetDefaultVp8Settings() {
+ VideoCodecVP8 vp8_settings;
+ memset(&vp8_settings, 0, sizeof(vp8_settings));
+
+ vp8_settings.resilience = kResilientStream;
+ vp8_settings.numberOfTemporalLayers = 1;
+ vp8_settings.denoisingOn = true;
+ vp8_settings.errorConcealmentOn = false;
+ vp8_settings.automaticResizeOn = false;
+ vp8_settings.frameDroppingOn = true;
+ vp8_settings.keyFrameInterval = 3000;
+
+ return vp8_settings;
+}
+
+VideoCodecVP9 VideoEncoder::GetDefaultVp9Settings() {
+ VideoCodecVP9 vp9_settings;
+ memset(&vp9_settings, 0, sizeof(vp9_settings));
+
+ vp9_settings.resilience = 1;
+ vp9_settings.numberOfTemporalLayers = 1;
+ vp9_settings.denoisingOn = false;
+ vp9_settings.frameDroppingOn = true;
+ vp9_settings.keyFrameInterval = 3000;
+ vp9_settings.adaptiveQpMode = true;
+ vp9_settings.automaticResizeOn = true;
+ vp9_settings.numberOfSpatialLayers = 1;
+ vp9_settings.flexibleMode = false;
+ return vp9_settings;
+}
+
+VideoCodecH264 VideoEncoder::GetDefaultH264Settings() {
+ VideoCodecH264 h264_settings;
+ memset(&h264_settings, 0, sizeof(h264_settings));
+
+ h264_settings.profile = kProfileBase;
+ h264_settings.frameDroppingOn = true;
+ h264_settings.keyFrameInterval = 3000;
+ h264_settings.spsData = NULL;
+ h264_settings.spsLen = 0;
+ h264_settings.ppsData = NULL;
+ h264_settings.ppsLen = 0;
+
+ return h264_settings;
+}
+
+VCMDecoderMapItem::VCMDecoderMapItem(VideoCodec* settings,
+ int number_of_cores,
+ bool require_key_frame)
+ : settings(settings),
+ number_of_cores(number_of_cores),
+ require_key_frame(require_key_frame) {
+ assert(number_of_cores >= 0);
+}
+
+VCMExtDecoderMapItem::VCMExtDecoderMapItem(
+ VideoDecoder* external_decoder_instance,
+ uint8_t payload_type,
+ bool internal_render_timing)
+ : payload_type(payload_type),
+ external_decoder_instance(external_decoder_instance),
+ internal_render_timing(internal_render_timing) {
+}
+
+VCMCodecDataBase::VCMCodecDataBase(
+ VideoEncoderRateObserver* encoder_rate_observer,
+ VCMEncodedFrameCallback* encoded_frame_callback)
+ : number_of_cores_(0),
+ max_payload_size_(kDefaultPayloadSize),
+ periodic_key_frames_(false),
+ pending_encoder_reset_(true),
+ send_codec_(),
+ receive_codec_(),
+ encoder_payload_type_(0),
+ external_encoder_(NULL),
+ internal_source_(false),
+ encoder_rate_observer_(encoder_rate_observer),
+ encoded_frame_callback_(encoded_frame_callback),
+ ptr_decoder_(NULL),
+ dec_map_(),
+ dec_external_map_() {}
+
+VCMCodecDataBase::~VCMCodecDataBase() {
+ ResetSender();
+ ResetReceiver();
+}
+
+int VCMCodecDataBase::NumberOfCodecs() {
+ return VCM_NUM_VIDEO_CODECS_AVAILABLE;
+}
+
+bool VCMCodecDataBase::Codec(int list_id,
+ VideoCodec* settings) {
+ if (!settings) {
+ return false;
+ }
+ if (list_id >= VCM_NUM_VIDEO_CODECS_AVAILABLE) {
+ return false;
+ }
+ memset(settings, 0, sizeof(VideoCodec));
+ switch (list_id) {
+#ifdef VIDEOCODEC_VP8
+ case VCM_VP8_IDX: {
+ strncpy(settings->plName, "VP8", 4);
+ settings->codecType = kVideoCodecVP8;
+ // 96 to 127 dynamic payload types for video codecs.
+ settings->plType = kDefaultPayloadType;
+ settings->startBitrate = kDefaultStartBitrateKbps;
+ settings->minBitrate = VCM_MIN_BITRATE;
+ settings->maxBitrate = 0;
+ settings->maxFramerate = VCM_DEFAULT_FRAME_RATE;
+ settings->width = VCM_DEFAULT_CODEC_WIDTH;
+ settings->height = VCM_DEFAULT_CODEC_HEIGHT;
+ settings->numberOfSimulcastStreams = 0;
+ settings->qpMax = 56;
+ settings->codecSpecific.VP8 = VideoEncoder::GetDefaultVp8Settings();
+ return true;
+ }
+#endif
+#ifdef VIDEOCODEC_VP9
+ case VCM_VP9_IDX: {
+ strncpy(settings->plName, "VP9", 4);
+ settings->codecType = kVideoCodecVP9;
+ // 96 to 127 dynamic payload types for video codecs.
+ settings->plType = kDefaultPayloadType;
+ settings->startBitrate = 100;
+ settings->minBitrate = VCM_MIN_BITRATE;
+ settings->maxBitrate = 0;
+ settings->maxFramerate = VCM_DEFAULT_FRAME_RATE;
+ settings->width = VCM_DEFAULT_CODEC_WIDTH;
+ settings->height = VCM_DEFAULT_CODEC_HEIGHT;
+ settings->numberOfSimulcastStreams = 0;
+ settings->qpMax = 56;
+ settings->codecSpecific.VP9 = VideoEncoder::GetDefaultVp9Settings();
+ return true;
+ }
+#endif
+#ifdef VIDEOCODEC_H264
+ case VCM_H264_IDX: {
+ strncpy(settings->plName, "H264", 5);
+ settings->codecType = kVideoCodecH264;
+ // 96 to 127 dynamic payload types for video codecs.
+ settings->plType = kDefaultPayloadType;
+ settings->startBitrate = kDefaultStartBitrateKbps;
+ settings->minBitrate = VCM_MIN_BITRATE;
+ settings->maxBitrate = 0;
+ settings->maxFramerate = VCM_DEFAULT_FRAME_RATE;
+ settings->width = VCM_DEFAULT_CODEC_WIDTH;
+ settings->height = VCM_DEFAULT_CODEC_HEIGHT;
+ settings->numberOfSimulcastStreams = 0;
+ settings->qpMax = 56;
+ settings->codecSpecific.H264 = VideoEncoder::GetDefaultH264Settings();
+ return true;
+ }
+#endif
+#ifdef VIDEOCODEC_I420
+ case VCM_I420_IDX: {
+ strncpy(settings->plName, "I420", 5);
+ settings->codecType = kVideoCodecI420;
+ // 96 to 127 dynamic payload types for video codecs.
+ settings->plType = kDefaultPayloadType;
+ // Bitrate needed for this size and framerate.
+ settings->startBitrate = 3 * VCM_DEFAULT_CODEC_WIDTH *
+ VCM_DEFAULT_CODEC_HEIGHT * 8 *
+ VCM_DEFAULT_FRAME_RATE / 1000 / 2;
+ settings->maxBitrate = settings->startBitrate;
+ settings->maxFramerate = VCM_DEFAULT_FRAME_RATE;
+ settings->width = VCM_DEFAULT_CODEC_WIDTH;
+ settings->height = VCM_DEFAULT_CODEC_HEIGHT;
+ settings->minBitrate = VCM_MIN_BITRATE;
+ settings->numberOfSimulcastStreams = 0;
+ return true;
+ }
+#endif
+ default: {
+ return false;
+ }
+ }
+}
+
+bool VCMCodecDataBase::Codec(VideoCodecType codec_type,
+ VideoCodec* settings) {
+ for (int i = 0; i < VCMCodecDataBase::NumberOfCodecs(); i++) {
+ const bool ret = VCMCodecDataBase::Codec(i, settings);
+ if (!ret) {
+ return false;
+ }
+ if (codec_type == settings->codecType) {
+ return true;
+ }
+ }
+ return false;
+}
+
+void VCMCodecDataBase::ResetSender() {
+ DeleteEncoder();
+ periodic_key_frames_ = false;
+}
+
+// Assuming only one registered encoder - since only one used, no need for more.
+bool VCMCodecDataBase::SetSendCodec(const VideoCodec* send_codec,
+ int number_of_cores,
+ size_t max_payload_size) {
+ RTC_DCHECK(send_codec);
+ if (max_payload_size == 0) {
+ max_payload_size = kDefaultPayloadSize;
+ }
+ RTC_DCHECK_GE(number_of_cores, 1);
+ RTC_DCHECK_GE(send_codec->plType, 1);
+ // Make sure the start bit rate is sane...
+ RTC_DCHECK_LE(send_codec->startBitrate, 1000000u);
+ RTC_DCHECK(send_codec->codecType != kVideoCodecUnknown);
+ bool reset_required = pending_encoder_reset_;
+ if (number_of_cores_ != number_of_cores) {
+ number_of_cores_ = number_of_cores;
+ reset_required = true;
+ }
+ if (max_payload_size_ != max_payload_size) {
+ max_payload_size_ = max_payload_size;
+ reset_required = true;
+ }
+
+ VideoCodec new_send_codec;
+ memcpy(&new_send_codec, send_codec, sizeof(new_send_codec));
+
+ if (new_send_codec.maxBitrate == 0) {
+ // max is one bit per pixel
+ new_send_codec.maxBitrate = (static_cast<int>(send_codec->height) *
+ static_cast<int>(send_codec->width) *
+ static_cast<int>(send_codec->maxFramerate)) / 1000;
+ if (send_codec->startBitrate > new_send_codec.maxBitrate) {
+ // But if the user tries to set a higher start bit rate we will
+ // increase the max accordingly.
+ new_send_codec.maxBitrate = send_codec->startBitrate;
+ }
+ }
+
+ if (new_send_codec.startBitrate > new_send_codec.maxBitrate)
+ new_send_codec.startBitrate = new_send_codec.maxBitrate;
+
+ if (!reset_required) {
+ reset_required = RequiresEncoderReset(new_send_codec);
+ }
+
+ memcpy(&send_codec_, &new_send_codec, sizeof(send_codec_));
+
+ if (!reset_required) {
+ encoded_frame_callback_->SetPayloadType(send_codec_.plType);
+ return true;
+ }
+
+ // If encoder exists, will destroy it and create new one.
+ DeleteEncoder();
+ RTC_DCHECK_EQ(encoder_payload_type_, send_codec_.plType)
+ << "Encoder not registered for payload type " << send_codec_.plType;
+ ptr_encoder_.reset(
+ new VCMGenericEncoder(external_encoder_, encoder_rate_observer_,
+ encoded_frame_callback_, internal_source_));
+ encoded_frame_callback_->SetPayloadType(send_codec_.plType);
+ encoded_frame_callback_->SetInternalSource(internal_source_);
+ if (ptr_encoder_->InitEncode(&send_codec_, number_of_cores_,
+ max_payload_size_) < 0) {
+ LOG(LS_ERROR) << "Failed to initialize video encoder.";
+ DeleteEncoder();
+ return false;
+ }
+
+ // Intentionally don't check return value since the encoder registration
+ // shouldn't fail because the codec doesn't support changing the periodic key
+ // frame setting.
+ ptr_encoder_->SetPeriodicKeyFrames(periodic_key_frames_);
+
+ pending_encoder_reset_ = false;
+
+ return true;
+}
+
+bool VCMCodecDataBase::SendCodec(VideoCodec* current_send_codec) const {
+ if (!ptr_encoder_) {
+ return false;
+ }
+ memcpy(current_send_codec, &send_codec_, sizeof(VideoCodec));
+ return true;
+}
+
+VideoCodecType VCMCodecDataBase::SendCodec() const {
+ if (!ptr_encoder_) {
+ return kVideoCodecUnknown;
+ }
+ return send_codec_.codecType;
+}
+
+bool VCMCodecDataBase::DeregisterExternalEncoder(
+ uint8_t payload_type, bool* was_send_codec) {
+ assert(was_send_codec);
+ *was_send_codec = false;
+ if (encoder_payload_type_ != payload_type) {
+ return false;
+ }
+ if (send_codec_.plType == payload_type) {
+ // De-register as send codec if needed.
+ DeleteEncoder();
+ memset(&send_codec_, 0, sizeof(VideoCodec));
+ *was_send_codec = true;
+ }
+ encoder_payload_type_ = 0;
+ external_encoder_ = NULL;
+ internal_source_ = false;
+ return true;
+}
+
+void VCMCodecDataBase::RegisterExternalEncoder(
+ VideoEncoder* external_encoder,
+ uint8_t payload_type,
+ bool internal_source) {
+ // Since only one encoder can be used at a given time, only one external
+ // encoder can be registered/used.
+ external_encoder_ = external_encoder;
+ encoder_payload_type_ = payload_type;
+ internal_source_ = internal_source;
+ pending_encoder_reset_ = true;
+}
+
+bool VCMCodecDataBase::RequiresEncoderReset(const VideoCodec& new_send_codec) {
+ if (ptr_encoder_ == NULL) {
+ return true;
+ }
+
+ // Does not check startBitrate or maxFramerate
+ if (new_send_codec.codecType != send_codec_.codecType ||
+ strcmp(new_send_codec.plName, send_codec_.plName) != 0 ||
+ new_send_codec.plType != send_codec_.plType ||
+ new_send_codec.width != send_codec_.width ||
+ new_send_codec.height != send_codec_.height ||
+ new_send_codec.maxBitrate != send_codec_.maxBitrate ||
+ new_send_codec.minBitrate != send_codec_.minBitrate ||
+ new_send_codec.qpMax != send_codec_.qpMax ||
+ new_send_codec.numberOfSimulcastStreams !=
+ send_codec_.numberOfSimulcastStreams ||
+ new_send_codec.mode != send_codec_.mode ||
+ new_send_codec.extra_options != send_codec_.extra_options) {
+ return true;
+ }
+
+ switch (new_send_codec.codecType) {
+ case kVideoCodecVP8:
+ if (memcmp(&new_send_codec.codecSpecific.VP8,
+ &send_codec_.codecSpecific.VP8,
+ sizeof(new_send_codec.codecSpecific.VP8)) != 0) {
+ return true;
+ }
+ break;
+ case kVideoCodecVP9:
+ if (memcmp(&new_send_codec.codecSpecific.VP9,
+ &send_codec_.codecSpecific.VP9,
+ sizeof(new_send_codec.codecSpecific.VP9)) != 0) {
+ return true;
+ }
+ break;
+ case kVideoCodecH264:
+ if (memcmp(&new_send_codec.codecSpecific.H264,
+ &send_codec_.codecSpecific.H264,
+ sizeof(new_send_codec.codecSpecific.H264)) != 0) {
+ return true;
+ }
+ break;
+ case kVideoCodecGeneric:
+ break;
+ // Known codecs without payload-specifics
+ case kVideoCodecI420:
+ case kVideoCodecRED:
+ case kVideoCodecULPFEC:
+ break;
+ // Unknown codec type, reset just to be sure.
+ case kVideoCodecUnknown:
+ return true;
+ }
+
+ if (new_send_codec.numberOfSimulcastStreams > 0) {
+ for (unsigned char i = 0; i < new_send_codec.numberOfSimulcastStreams;
+ ++i) {
+ if (memcmp(&new_send_codec.simulcastStream[i],
+ &send_codec_.simulcastStream[i],
+ sizeof(new_send_codec.simulcastStream[i])) !=
+ 0) {
+ return true;
+ }
+ }
+ }
+ return false;
+}
+
+VCMGenericEncoder* VCMCodecDataBase::GetEncoder() {
+ return ptr_encoder_.get();
+}
+
+bool VCMCodecDataBase::SetPeriodicKeyFrames(bool enable) {
+ periodic_key_frames_ = enable;
+ if (ptr_encoder_) {
+ return (ptr_encoder_->SetPeriodicKeyFrames(periodic_key_frames_) == 0);
+ }
+ return true;
+}
+
+void VCMCodecDataBase::ResetReceiver() {
+ ReleaseDecoder(ptr_decoder_);
+ ptr_decoder_ = NULL;
+ memset(&receive_codec_, 0, sizeof(VideoCodec));
+ while (!dec_map_.empty()) {
+ DecoderMap::iterator it = dec_map_.begin();
+ delete (*it).second;
+ dec_map_.erase(it);
+ }
+ while (!dec_external_map_.empty()) {
+ ExternalDecoderMap::iterator external_it = dec_external_map_.begin();
+ delete (*external_it).second;
+ dec_external_map_.erase(external_it);
+ }
+}
+
+bool VCMCodecDataBase::DeregisterExternalDecoder(uint8_t payload_type) {
+ ExternalDecoderMap::iterator it = dec_external_map_.find(payload_type);
+ if (it == dec_external_map_.end()) {
+ // Not found
+ return false;
+ }
+ // We can't use payload_type to check if the decoder is currently in use,
+ // because payload type may be out of date (e.g. before we decode the first
+ // frame after RegisterReceiveCodec)
+ if (ptr_decoder_ != NULL &&
+ &ptr_decoder_->_decoder == (*it).second->external_decoder_instance) {
+ // Release it if it was registered and in use.
+ ReleaseDecoder(ptr_decoder_);
+ ptr_decoder_ = NULL;
+ }
+ DeregisterReceiveCodec(payload_type);
+ delete (*it).second;
+ dec_external_map_.erase(it);
+ return true;
+}
+
+// Add the external encoder object to the list of external decoders.
+// Won't be registered as a receive codec until RegisterReceiveCodec is called.
+bool VCMCodecDataBase::RegisterExternalDecoder(
+ VideoDecoder* external_decoder,
+ uint8_t payload_type,
+ bool internal_render_timing) {
+ // Check if payload value already exists, if so - erase old and insert new.
+ VCMExtDecoderMapItem* ext_decoder = new VCMExtDecoderMapItem(
+ external_decoder, payload_type, internal_render_timing);
+ if (!ext_decoder) {
+ return false;
+ }
+ DeregisterExternalDecoder(payload_type);
+ dec_external_map_[payload_type] = ext_decoder;
+ return true;
+}
+
+bool VCMCodecDataBase::DecoderRegistered() const {
+ return !dec_map_.empty();
+}
+
+bool VCMCodecDataBase::RegisterReceiveCodec(
+ const VideoCodec* receive_codec,
+ int number_of_cores,
+ bool require_key_frame) {
+ if (number_of_cores < 0) {
+ return false;
+ }
+ // Check if payload value already exists, if so - erase old and insert new.
+ DeregisterReceiveCodec(receive_codec->plType);
+ if (receive_codec->codecType == kVideoCodecUnknown) {
+ return false;
+ }
+ VideoCodec* new_receive_codec = new VideoCodec(*receive_codec);
+ dec_map_[receive_codec->plType] = new VCMDecoderMapItem(new_receive_codec,
+ number_of_cores,
+ require_key_frame);
+ return true;
+}
+
+bool VCMCodecDataBase::DeregisterReceiveCodec(
+ uint8_t payload_type) {
+ DecoderMap::iterator it = dec_map_.find(payload_type);
+ if (it == dec_map_.end()) {
+ return false;
+ }
+ VCMDecoderMapItem* dec_item = (*it).second;
+ delete dec_item;
+ dec_map_.erase(it);
+ if (receive_codec_.plType == payload_type) {
+ // This codec is currently in use.
+ memset(&receive_codec_, 0, sizeof(VideoCodec));
+ }
+ return true;
+}
+
+bool VCMCodecDataBase::ReceiveCodec(VideoCodec* current_receive_codec) const {
+ assert(current_receive_codec);
+ if (!ptr_decoder_) {
+ return false;
+ }
+ memcpy(current_receive_codec, &receive_codec_, sizeof(VideoCodec));
+ return true;
+}
+
+VideoCodecType VCMCodecDataBase::ReceiveCodec() const {
+ if (!ptr_decoder_) {
+ return kVideoCodecUnknown;
+ }
+ return receive_codec_.codecType;
+}
+
+VCMGenericDecoder* VCMCodecDataBase::GetDecoder(
+ uint8_t payload_type, VCMDecodedFrameCallback* decoded_frame_callback) {
+ if (payload_type == receive_codec_.plType || payload_type == 0) {
+ return ptr_decoder_;
+ }
+ // Check for exisitng decoder, if exists - delete.
+ if (ptr_decoder_) {
+ ReleaseDecoder(ptr_decoder_);
+ ptr_decoder_ = NULL;
+ memset(&receive_codec_, 0, sizeof(VideoCodec));
+ }
+ ptr_decoder_ = CreateAndInitDecoder(payload_type, &receive_codec_);
+ if (!ptr_decoder_) {
+ return NULL;
+ }
+ VCMReceiveCallback* callback = decoded_frame_callback->UserReceiveCallback();
+ if (callback) callback->OnIncomingPayloadType(receive_codec_.plType);
+ if (ptr_decoder_->RegisterDecodeCompleteCallback(decoded_frame_callback)
+ < 0) {
+ ReleaseDecoder(ptr_decoder_);
+ ptr_decoder_ = NULL;
+ memset(&receive_codec_, 0, sizeof(VideoCodec));
+ return NULL;
+ }
+ return ptr_decoder_;
+}
+
+void VCMCodecDataBase::ReleaseDecoder(VCMGenericDecoder* decoder) const {
+ if (decoder) {
+ assert(&decoder->_decoder);
+ decoder->Release();
+ if (!decoder->External()) {
+ delete &decoder->_decoder;
+ }
+ delete decoder;
+ }
+}
+
+bool VCMCodecDataBase::SupportsRenderScheduling() const {
+ const VCMExtDecoderMapItem* ext_item = FindExternalDecoderItem(
+ receive_codec_.plType);
+ if (ext_item == nullptr)
+ return true;
+ return ext_item->internal_render_timing;
+}
+
+bool VCMCodecDataBase::MatchesCurrentResolution(int width, int height) const {
+ return send_codec_.width == width && send_codec_.height == height;
+}
+
+VCMGenericDecoder* VCMCodecDataBase::CreateAndInitDecoder(
+ uint8_t payload_type,
+ VideoCodec* new_codec) const {
+ assert(new_codec);
+ const VCMDecoderMapItem* decoder_item = FindDecoderItem(payload_type);
+ if (!decoder_item) {
+ LOG(LS_ERROR) << "Can't find a decoder associated with payload type: "
+ << static_cast<int>(payload_type);
+ return NULL;
+ }
+ VCMGenericDecoder* ptr_decoder = NULL;
+ const VCMExtDecoderMapItem* external_dec_item =
+ FindExternalDecoderItem(payload_type);
+ if (external_dec_item) {
+ // External codec.
+ ptr_decoder = new VCMGenericDecoder(
+ *external_dec_item->external_decoder_instance, true);
+ } else {
+ // Create decoder.
+ ptr_decoder = CreateDecoder(decoder_item->settings->codecType);
+ }
+ if (!ptr_decoder)
+ return NULL;
+
+ if (ptr_decoder->InitDecode(decoder_item->settings.get(),
+ decoder_item->number_of_cores) < 0) {
+ ReleaseDecoder(ptr_decoder);
+ return NULL;
+ }
+ memcpy(new_codec, decoder_item->settings.get(), sizeof(VideoCodec));
+ return ptr_decoder;
+}
+
+void VCMCodecDataBase::DeleteEncoder() {
+ if (!ptr_encoder_)
+ return;
+ ptr_encoder_->Release();
+ ptr_encoder_.reset();
+}
+
+VCMGenericDecoder* VCMCodecDataBase::CreateDecoder(VideoCodecType type) const {
+ switch (type) {
+#ifdef VIDEOCODEC_VP8
+ case kVideoCodecVP8:
+ return new VCMGenericDecoder(*(VP8Decoder::Create()));
+#endif
+#ifdef VIDEOCODEC_VP9
+ case kVideoCodecVP9:
+ return new VCMGenericDecoder(*(VP9Decoder::Create()));
+#endif
+#ifdef VIDEOCODEC_I420
+ case kVideoCodecI420:
+ return new VCMGenericDecoder(*(new I420Decoder));
+#endif
+#ifdef VIDEOCODEC_H264
+ case kVideoCodecH264:
+ if (H264Decoder::IsSupported()) {
+ return new VCMGenericDecoder(*(H264Decoder::Create()));
+ }
+ break;
+#endif
+ default:
+ break;
+ }
+ LOG(LS_WARNING) << "No internal decoder of this type exists.";
+ return NULL;
+}
+
+const VCMDecoderMapItem* VCMCodecDataBase::FindDecoderItem(
+ uint8_t payload_type) const {
+ DecoderMap::const_iterator it = dec_map_.find(payload_type);
+ if (it != dec_map_.end()) {
+ return (*it).second;
+ }
+ return NULL;
+}
+
+const VCMExtDecoderMapItem* VCMCodecDataBase::FindExternalDecoderItem(
+ uint8_t payload_type) const {
+ ExternalDecoderMap::const_iterator it = dec_external_map_.find(payload_type);
+ if (it != dec_external_map_.end()) {
+ return (*it).second;
+ }
+ return NULL;
+}
+} // namespace webrtc
diff --git a/webrtc/modules/video_coding/main/source/codec_database.h b/webrtc/modules/video_coding/main/source/codec_database.h
new file mode 100644
index 0000000000..93aa9c3ba8
--- /dev/null
+++ b/webrtc/modules/video_coding/main/source/codec_database.h
@@ -0,0 +1,184 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef WEBRTC_MODULES_VIDEO_CODING_MAIN_SOURCE_CODEC_DATABASE_H_
+#define WEBRTC_MODULES_VIDEO_CODING_MAIN_SOURCE_CODEC_DATABASE_H_
+
+#include <map>
+
+#include "webrtc/base/scoped_ptr.h"
+#include "webrtc/modules/video_coding/codecs/interface/video_codec_interface.h"
+#include "webrtc/modules/video_coding/main/interface/video_coding.h"
+#include "webrtc/modules/video_coding/main/source/generic_decoder.h"
+#include "webrtc/modules/video_coding/main/source/generic_encoder.h"
+#include "webrtc/typedefs.h"
+
+namespace webrtc {
+
+struct VCMDecoderMapItem {
+ public:
+ VCMDecoderMapItem(VideoCodec* settings,
+ int number_of_cores,
+ bool require_key_frame);
+
+ rtc::scoped_ptr<VideoCodec> settings;
+ int number_of_cores;
+ bool require_key_frame;
+};
+
+struct VCMExtDecoderMapItem {
+ public:
+ VCMExtDecoderMapItem(VideoDecoder* external_decoder_instance,
+ uint8_t payload_type,
+ bool internal_render_timing);
+
+ uint8_t payload_type;
+ VideoDecoder* external_decoder_instance;
+ bool internal_render_timing;
+};
+
+class VCMCodecDataBase {
+ public:
+ VCMCodecDataBase(VideoEncoderRateObserver* encoder_rate_observer,
+ VCMEncodedFrameCallback* encoded_frame_callback);
+ ~VCMCodecDataBase();
+
+ // Sender Side
+ // Returns the number of supported codecs (or -1 in case of error).
+ static int NumberOfCodecs();
+
+ // Returns the default settings for the codec with id |list_id|.
+ static bool Codec(int list_id, VideoCodec* settings);
+
+ // Returns the default settings for the codec with type |codec_type|.
+ static bool Codec(VideoCodecType codec_type, VideoCodec* settings);
+
+ void ResetSender();
+
+ // Sets the sender side codec and initiates the desired codec given the
+ // VideoCodec struct.
+ // Returns true if the codec was successfully registered, false otherwise.
+ bool SetSendCodec(const VideoCodec* send_codec,
+ int number_of_cores,
+ size_t max_payload_size);
+
+ // Gets the current send codec. Relevant for internal codecs only.
+ // Returns true if there is a send codec, false otherwise.
+ bool SendCodec(VideoCodec* current_send_codec) const;
+
+ // Gets current send side codec type. Relevant for internal codecs only.
+ // Returns kVideoCodecUnknown if there is no send codec.
+ VideoCodecType SendCodec() const;
+
+ // Registers and initializes an external encoder object.
+ // |internal_source| should be set to true if the codec has an internal
+ // video source and doesn't need the user to provide it with frames via
+ // the Encode() method.
+ void RegisterExternalEncoder(VideoEncoder* external_encoder,
+ uint8_t payload_type,
+ bool internal_source);
+
+ // Deregisters an external encoder. Returns true if the encoder was
+ // found and deregistered, false otherwise. |was_send_codec| is set to true
+ // if the external encoder was the send codec before being deregistered.
+ bool DeregisterExternalEncoder(uint8_t payload_type, bool* was_send_codec);
+
+ VCMGenericEncoder* GetEncoder();
+
+ bool SetPeriodicKeyFrames(bool enable);
+
+ // Receiver Side
+ void ResetReceiver();
+
+ // Deregisters an external decoder object specified by |payload_type|.
+ bool DeregisterExternalDecoder(uint8_t payload_type);
+
+ // Registers an external decoder object to the payload type |payload_type|.
+ // |internal_render_timing| is set to true if the |external_decoder| has
+ // built in rendering which is able to obey the render timestamps of the
+ // encoded frames.
+ bool RegisterExternalDecoder(VideoDecoder* external_decoder,
+ uint8_t payload_type,
+ bool internal_render_timing);
+
+ bool DecoderRegistered() const;
+
+ bool RegisterReceiveCodec(const VideoCodec* receive_codec,
+ int number_of_cores,
+ bool require_key_frame);
+
+ bool DeregisterReceiveCodec(uint8_t payload_type);
+
+ // Get current receive side codec. Relevant for internal codecs only.
+ bool ReceiveCodec(VideoCodec* current_receive_codec) const;
+
+ // Get current receive side codec type. Relevant for internal codecs only.
+ VideoCodecType ReceiveCodec() const;
+
+ // Returns a decoder specified by |payload_type|. The decoded frame callback
+ // of the encoder is set to |decoded_frame_callback|. If no such decoder
+ // already exists an instance will be created and initialized.
+ // NULL is returned if no encoder with the specified payload type was found
+ // and the function failed to create one.
+ VCMGenericDecoder* GetDecoder(
+ uint8_t payload_type, VCMDecodedFrameCallback* decoded_frame_callback);
+
+ // Deletes the memory of the decoder instance |decoder|. Used to delete
+ // deep copies returned by CreateDecoderCopy().
+ void ReleaseDecoder(VCMGenericDecoder* decoder) const;
+
+ // Returns true if the currently active decoder supports render scheduling,
+ // that is, it is able to render frames according to the render timestamp of
+ // the encoded frames.
+ bool SupportsRenderScheduling() const;
+
+ bool MatchesCurrentResolution(int width, int height) const;
+
+ private:
+ typedef std::map<uint8_t, VCMDecoderMapItem*> DecoderMap;
+ typedef std::map<uint8_t, VCMExtDecoderMapItem*> ExternalDecoderMap;
+
+ VCMGenericDecoder* CreateAndInitDecoder(uint8_t payload_type,
+ VideoCodec* new_codec) const;
+
+ // Determines whether a new codec has to be created or not.
+ // Checks every setting apart from maxFramerate and startBitrate.
+ bool RequiresEncoderReset(const VideoCodec& send_codec);
+
+ void DeleteEncoder();
+
+ // Create an internal Decoder given a codec type
+ VCMGenericDecoder* CreateDecoder(VideoCodecType type) const;
+
+ const VCMDecoderMapItem* FindDecoderItem(uint8_t payload_type) const;
+
+ const VCMExtDecoderMapItem* FindExternalDecoderItem(
+ uint8_t payload_type) const;
+
+ int number_of_cores_;
+ size_t max_payload_size_;
+ bool periodic_key_frames_;
+ bool pending_encoder_reset_;
+ VideoCodec send_codec_;
+ VideoCodec receive_codec_;
+ uint8_t encoder_payload_type_;
+ VideoEncoder* external_encoder_;
+ bool internal_source_;
+ VideoEncoderRateObserver* const encoder_rate_observer_;
+ VCMEncodedFrameCallback* const encoded_frame_callback_;
+ rtc::scoped_ptr<VCMGenericEncoder> ptr_encoder_;
+ VCMGenericDecoder* ptr_decoder_;
+ DecoderMap dec_map_;
+ ExternalDecoderMap dec_external_map_;
+}; // VCMCodecDataBase
+
+} // namespace webrtc
+
+#endif // WEBRTC_MODULES_VIDEO_CODING_MAIN_SOURCE_CODEC_DATABASE_H_
diff --git a/webrtc/modules/video_coding/main/source/codec_timer.cc b/webrtc/modules/video_coding/main/source/codec_timer.cc
new file mode 100644
index 0000000000..a462258813
--- /dev/null
+++ b/webrtc/modules/video_coding/main/source/codec_timer.cc
@@ -0,0 +1,136 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "webrtc/modules/video_coding/main/source/codec_timer.h"
+
+#include <assert.h>
+
+namespace webrtc
+{
+
+// The first kIgnoredSampleCount samples will be ignored.
+static const int32_t kIgnoredSampleCount = 5;
+
+VCMCodecTimer::VCMCodecTimer()
+:
+_filteredMax(0),
+_ignoredSampleCount(0),
+_shortMax(0),
+_history()
+{
+ Reset();
+}
+
+int32_t VCMCodecTimer::StopTimer(int64_t startTimeMs, int64_t nowMs)
+{
+ const int32_t timeDiff = static_cast<int32_t>(nowMs - startTimeMs);
+ MaxFilter(timeDiff, nowMs);
+ return timeDiff;
+}
+
+void VCMCodecTimer::Reset()
+{
+ _filteredMax = 0;
+ _ignoredSampleCount = 0;
+ _shortMax = 0;
+ for (int i=0; i < MAX_HISTORY_SIZE; i++)
+ {
+ _history[i].shortMax = 0;
+ _history[i].timeMs = -1;
+ }
+}
+
+// Update the max-value filter
+void VCMCodecTimer::MaxFilter(int32_t decodeTime, int64_t nowMs)
+{
+ if (_ignoredSampleCount >= kIgnoredSampleCount)
+ {
+ UpdateMaxHistory(decodeTime, nowMs);
+ ProcessHistory(nowMs);
+ }
+ else
+ {
+ _ignoredSampleCount++;
+ }
+}
+
+void
+VCMCodecTimer::UpdateMaxHistory(int32_t decodeTime, int64_t now)
+{
+ if (_history[0].timeMs >= 0 &&
+ now - _history[0].timeMs < SHORT_FILTER_MS)
+ {
+ if (decodeTime > _shortMax)
+ {
+ _shortMax = decodeTime;
+ }
+ }
+ else
+ {
+ // Only add a new value to the history once a second
+ if(_history[0].timeMs == -1)
+ {
+ // First, no shift
+ _shortMax = decodeTime;
+ }
+ else
+ {
+ // Shift
+ for(int i = (MAX_HISTORY_SIZE - 2); i >= 0 ; i--)
+ {
+ _history[i+1].shortMax = _history[i].shortMax;
+ _history[i+1].timeMs = _history[i].timeMs;
+ }
+ }
+ if (_shortMax == 0)
+ {
+ _shortMax = decodeTime;
+ }
+
+ _history[0].shortMax = _shortMax;
+ _history[0].timeMs = now;
+ _shortMax = 0;
+ }
+}
+
+void
+VCMCodecTimer::ProcessHistory(int64_t nowMs)
+{
+ _filteredMax = _shortMax;
+ if (_history[0].timeMs == -1)
+ {
+ return;
+ }
+ for (int i=0; i < MAX_HISTORY_SIZE; i++)
+ {
+ if (_history[i].timeMs == -1)
+ {
+ break;
+ }
+ if (nowMs - _history[i].timeMs > MAX_HISTORY_SIZE * SHORT_FILTER_MS)
+ {
+ // This sample (and all samples after this) is too old
+ break;
+ }
+ if (_history[i].shortMax > _filteredMax)
+ {
+ // This sample is the largest one this far into the history
+ _filteredMax = _history[i].shortMax;
+ }
+ }
+}
+
+// Get the maximum observed time within a time window
+int32_t VCMCodecTimer::RequiredDecodeTimeMs(FrameType /*frameType*/) const
+{
+ return _filteredMax;
+}
+
+}
diff --git a/webrtc/modules/video_coding/main/source/codec_timer.h b/webrtc/modules/video_coding/main/source/codec_timer.h
new file mode 100644
index 0000000000..9268e8d817
--- /dev/null
+++ b/webrtc/modules/video_coding/main/source/codec_timer.h
@@ -0,0 +1,62 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef WEBRTC_MODULES_VIDEO_CODING_CODEC_TIMER_H_
+#define WEBRTC_MODULES_VIDEO_CODING_CODEC_TIMER_H_
+
+#include "webrtc/modules/interface/module_common_types.h"
+#include "webrtc/typedefs.h"
+
+namespace webrtc
+{
+
+// MAX_HISTORY_SIZE * SHORT_FILTER_MS defines the window size in milliseconds
+#define MAX_HISTORY_SIZE 10
+#define SHORT_FILTER_MS 1000
+
+class VCMShortMaxSample
+{
+public:
+ VCMShortMaxSample() : shortMax(0), timeMs(-1) {};
+
+ int32_t shortMax;
+ int64_t timeMs;
+};
+
+class VCMCodecTimer
+{
+public:
+ VCMCodecTimer();
+
+ // Updates and returns the max filtered decode time.
+ int32_t StopTimer(int64_t startTimeMs, int64_t nowMs);
+
+ // Empty the list of timers.
+ void Reset();
+
+ // Get the required decode time in ms.
+ int32_t RequiredDecodeTimeMs(FrameType frameType) const;
+
+private:
+ void UpdateMaxHistory(int32_t decodeTime, int64_t now);
+ void MaxFilter(int32_t newTime, int64_t nowMs);
+ void ProcessHistory(int64_t nowMs);
+
+ int32_t _filteredMax;
+ // The number of samples ignored so far.
+ int32_t _ignoredSampleCount;
+ int32_t _shortMax;
+ VCMShortMaxSample _history[MAX_HISTORY_SIZE];
+
+};
+
+} // namespace webrtc
+
+#endif // WEBRTC_MODULES_VIDEO_CODING_CODEC_TIMER_H_
diff --git a/webrtc/modules/video_coding/main/source/content_metrics_processing.cc b/webrtc/modules/video_coding/main/source/content_metrics_processing.cc
new file mode 100644
index 0000000000..757ffb0e46
--- /dev/null
+++ b/webrtc/modules/video_coding/main/source/content_metrics_processing.cc
@@ -0,0 +1,125 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "webrtc/modules/video_coding/main/source/content_metrics_processing.h"
+
+#include <math.h>
+
+#include "webrtc/modules/interface/module_common_types.h"
+#include "webrtc/modules/video_coding/main/interface/video_coding_defines.h"
+
+namespace webrtc {
+//////////////////////////////////
+/// VCMContentMetricsProcessing //
+//////////////////////////////////
+
+VCMContentMetricsProcessing::VCMContentMetricsProcessing()
+ : recursive_avg_factor_(1 / 150.0f), // matched to 30fps.
+ frame_cnt_uniform_avg_(0),
+ avg_motion_level_(0.0f),
+ avg_spatial_level_(0.0f) {
+ recursive_avg_ = new VideoContentMetrics();
+ uniform_avg_ = new VideoContentMetrics();
+}
+
+VCMContentMetricsProcessing::~VCMContentMetricsProcessing() {
+ delete recursive_avg_;
+ delete uniform_avg_;
+}
+
+int VCMContentMetricsProcessing::Reset() {
+ recursive_avg_->Reset();
+ uniform_avg_->Reset();
+ frame_cnt_uniform_avg_ = 0;
+ avg_motion_level_ = 0.0f;
+ avg_spatial_level_ = 0.0f;
+ return VCM_OK;
+}
+
+void VCMContentMetricsProcessing::UpdateFrameRate(uint32_t frameRate) {
+ // Update factor for recursive averaging.
+ recursive_avg_factor_ = static_cast<float> (1000.0f) /
+ static_cast<float>(frameRate * kQmMinIntervalMs);
+}
+
+VideoContentMetrics* VCMContentMetricsProcessing::LongTermAvgData() {
+ return recursive_avg_;
+}
+
+VideoContentMetrics* VCMContentMetricsProcessing::ShortTermAvgData() {
+ if (frame_cnt_uniform_avg_ == 0) {
+ return NULL;
+ }
+ // Two metrics are used: motion and spatial level.
+ uniform_avg_->motion_magnitude = avg_motion_level_ /
+ static_cast<float>(frame_cnt_uniform_avg_);
+ uniform_avg_->spatial_pred_err = avg_spatial_level_ /
+ static_cast<float>(frame_cnt_uniform_avg_);
+ return uniform_avg_;
+}
+
+void VCMContentMetricsProcessing::ResetShortTermAvgData() {
+ // Reset.
+ avg_motion_level_ = 0.0f;
+ avg_spatial_level_ = 0.0f;
+ frame_cnt_uniform_avg_ = 0;
+}
+
+int VCMContentMetricsProcessing::UpdateContentData(
+ const VideoContentMetrics *contentMetrics) {
+ if (contentMetrics == NULL) {
+ return VCM_OK;
+ }
+ return ProcessContent(contentMetrics);
+}
+
+int VCMContentMetricsProcessing::ProcessContent(
+ const VideoContentMetrics *contentMetrics) {
+ // Update the recursive averaged metrics: average is over longer window
+ // of time: over QmMinIntervalMs ms.
+ UpdateRecursiveAvg(contentMetrics);
+ // Update the uniform averaged metrics: average is over shorter window
+ // of time: based on ~RTCP reports.
+ UpdateUniformAvg(contentMetrics);
+ return VCM_OK;
+}
+
+void VCMContentMetricsProcessing::UpdateUniformAvg(
+ const VideoContentMetrics *contentMetrics) {
+ // Update frame counter.
+ frame_cnt_uniform_avg_ += 1;
+ // Update averaged metrics: motion and spatial level are used.
+ avg_motion_level_ += contentMetrics->motion_magnitude;
+ avg_spatial_level_ += contentMetrics->spatial_pred_err;
+ return;
+}
+
+void VCMContentMetricsProcessing::UpdateRecursiveAvg(
+ const VideoContentMetrics *contentMetrics) {
+
+ // Spatial metrics: 2x2, 1x2(H), 2x1(V).
+ recursive_avg_->spatial_pred_err = (1 - recursive_avg_factor_) *
+ recursive_avg_->spatial_pred_err +
+ recursive_avg_factor_ * contentMetrics->spatial_pred_err;
+
+ recursive_avg_->spatial_pred_err_h = (1 - recursive_avg_factor_) *
+ recursive_avg_->spatial_pred_err_h +
+ recursive_avg_factor_ * contentMetrics->spatial_pred_err_h;
+
+ recursive_avg_->spatial_pred_err_v = (1 - recursive_avg_factor_) *
+ recursive_avg_->spatial_pred_err_v +
+ recursive_avg_factor_ * contentMetrics->spatial_pred_err_v;
+
+ // Motion metric: Derived from NFD (normalized frame difference).
+ recursive_avg_->motion_magnitude = (1 - recursive_avg_factor_) *
+ recursive_avg_->motion_magnitude +
+ recursive_avg_factor_ * contentMetrics->motion_magnitude;
+}
+} // namespace
diff --git a/webrtc/modules/video_coding/main/source/content_metrics_processing.h b/webrtc/modules/video_coding/main/source/content_metrics_processing.h
new file mode 100644
index 0000000000..3517f757d4
--- /dev/null
+++ b/webrtc/modules/video_coding/main/source/content_metrics_processing.h
@@ -0,0 +1,76 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef WEBRTC_MODULES_VIDEO_CODING_CONTENT_METRICS_PROCESSING_H_
+#define WEBRTC_MODULES_VIDEO_CODING_CONTENT_METRICS_PROCESSING_H_
+
+#include "webrtc/typedefs.h"
+
+namespace webrtc {
+
+struct VideoContentMetrics;
+
+// QM interval time (in ms)
+enum {
+ kQmMinIntervalMs = 10000
+};
+
+// Flag for NFD metric vs motion metric
+enum {
+ kNfdMetric = 1
+};
+
+/**********************************/
+/* Content Metrics Processing */
+/**********************************/
+class VCMContentMetricsProcessing {
+ public:
+ VCMContentMetricsProcessing();
+ ~VCMContentMetricsProcessing();
+
+ // Update class with latest metrics.
+ int UpdateContentData(const VideoContentMetrics *contentMetrics);
+
+ // Reset the short-term averaged content data.
+ void ResetShortTermAvgData();
+
+ // Initialize.
+ int Reset();
+
+ // Inform class of current frame rate.
+ void UpdateFrameRate(uint32_t frameRate);
+
+ // Returns the long-term averaged content data: recursive average over longer
+ // time scale.
+ VideoContentMetrics* LongTermAvgData();
+
+ // Returns the short-term averaged content data: uniform average over
+ // shorter time scalE.
+ VideoContentMetrics* ShortTermAvgData();
+
+ private:
+ // Compute working average.
+ int ProcessContent(const VideoContentMetrics *contentMetrics);
+
+ // Update the recursive averaged metrics: longer time average (~5/10 secs).
+ void UpdateRecursiveAvg(const VideoContentMetrics *contentMetrics);
+
+ // Update the uniform averaged metrics: shorter time average (~RTCP report).
+ void UpdateUniformAvg(const VideoContentMetrics *contentMetrics);
+
+ VideoContentMetrics* recursive_avg_;
+ VideoContentMetrics* uniform_avg_;
+ float recursive_avg_factor_;
+ uint32_t frame_cnt_uniform_avg_;
+ float avg_motion_level_;
+ float avg_spatial_level_;
+};
+} // namespace webrtc
+#endif // WEBRTC_MODULES_VIDEO_CODING_CONTENT_METRICS_PROCESSING_H_
diff --git a/webrtc/modules/video_coding/main/source/decoding_state.cc b/webrtc/modules/video_coding/main/source/decoding_state.cc
new file mode 100644
index 0000000000..cc92f1c83f
--- /dev/null
+++ b/webrtc/modules/video_coding/main/source/decoding_state.cc
@@ -0,0 +1,223 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "webrtc/modules/video_coding/main/source/decoding_state.h"
+
+#include "webrtc/modules/interface/module_common_types.h"
+#include "webrtc/modules/video_coding/main/source/frame_buffer.h"
+#include "webrtc/modules/video_coding/main/source/jitter_buffer_common.h"
+#include "webrtc/modules/video_coding/main/source/packet.h"
+
+namespace webrtc {
+
+VCMDecodingState::VCMDecodingState()
+ : sequence_num_(0),
+ time_stamp_(0),
+ picture_id_(kNoPictureId),
+ temporal_id_(kNoTemporalIdx),
+ tl0_pic_id_(kNoTl0PicIdx),
+ full_sync_(true),
+ in_initial_state_(true) {}
+
+VCMDecodingState::~VCMDecodingState() {}
+
+void VCMDecodingState::Reset() {
+ // TODO(mikhal): Verify - not always would want to reset the sync
+ sequence_num_ = 0;
+ time_stamp_ = 0;
+ picture_id_ = kNoPictureId;
+ temporal_id_ = kNoTemporalIdx;
+ tl0_pic_id_ = kNoTl0PicIdx;
+ full_sync_ = true;
+ in_initial_state_ = true;
+}
+
+uint32_t VCMDecodingState::time_stamp() const {
+ return time_stamp_;
+}
+
+uint16_t VCMDecodingState::sequence_num() const {
+ return sequence_num_;
+}
+
+bool VCMDecodingState::IsOldFrame(const VCMFrameBuffer* frame) const {
+ assert(frame != NULL);
+ if (in_initial_state_)
+ return false;
+ return !IsNewerTimestamp(frame->TimeStamp(), time_stamp_);
+}
+
+bool VCMDecodingState::IsOldPacket(const VCMPacket* packet) const {
+ assert(packet != NULL);
+ if (in_initial_state_)
+ return false;
+ return !IsNewerTimestamp(packet->timestamp, time_stamp_);
+}
+
+void VCMDecodingState::SetState(const VCMFrameBuffer* frame) {
+ assert(frame != NULL && frame->GetHighSeqNum() >= 0);
+ UpdateSyncState(frame);
+ sequence_num_ = static_cast<uint16_t>(frame->GetHighSeqNum());
+ time_stamp_ = frame->TimeStamp();
+ picture_id_ = frame->PictureId();
+ temporal_id_ = frame->TemporalId();
+ tl0_pic_id_ = frame->Tl0PicId();
+ in_initial_state_ = false;
+}
+
+void VCMDecodingState::CopyFrom(const VCMDecodingState& state) {
+ sequence_num_ = state.sequence_num_;
+ time_stamp_ = state.time_stamp_;
+ picture_id_ = state.picture_id_;
+ temporal_id_ = state.temporal_id_;
+ tl0_pic_id_ = state.tl0_pic_id_;
+ full_sync_ = state.full_sync_;
+ in_initial_state_ = state.in_initial_state_;
+}
+
+bool VCMDecodingState::UpdateEmptyFrame(const VCMFrameBuffer* frame) {
+ bool empty_packet = frame->GetHighSeqNum() == frame->GetLowSeqNum();
+ if (in_initial_state_ && empty_packet) {
+ // Drop empty packets as long as we are in the initial state.
+ return true;
+ }
+ if ((empty_packet && ContinuousSeqNum(frame->GetHighSeqNum())) ||
+ ContinuousFrame(frame)) {
+ // Continuous empty packets or continuous frames can be dropped if we
+ // advance the sequence number.
+ sequence_num_ = frame->GetHighSeqNum();
+ time_stamp_ = frame->TimeStamp();
+ return true;
+ }
+ return false;
+}
+
+void VCMDecodingState::UpdateOldPacket(const VCMPacket* packet) {
+ assert(packet != NULL);
+ if (packet->timestamp == time_stamp_) {
+ // Late packet belonging to the last decoded frame - make sure we update the
+ // last decoded sequence number.
+ sequence_num_ = LatestSequenceNumber(packet->seqNum, sequence_num_);
+ }
+}
+
+void VCMDecodingState::SetSeqNum(uint16_t new_seq_num) {
+ sequence_num_ = new_seq_num;
+}
+
+bool VCMDecodingState::in_initial_state() const {
+ return in_initial_state_;
+}
+
+bool VCMDecodingState::full_sync() const {
+ return full_sync_;
+}
+
+void VCMDecodingState::UpdateSyncState(const VCMFrameBuffer* frame) {
+ if (in_initial_state_)
+ return;
+ if (frame->TemporalId() == kNoTemporalIdx ||
+ frame->Tl0PicId() == kNoTl0PicIdx) {
+ full_sync_ = true;
+ } else if (frame->FrameType() == kVideoFrameKey || frame->LayerSync()) {
+ full_sync_ = true;
+ } else if (full_sync_) {
+ // Verify that we are still in sync.
+ // Sync will be broken if continuity is true for layers but not for the
+ // other methods (PictureId and SeqNum).
+ if (UsingPictureId(frame)) {
+ // First check for a valid tl0PicId.
+ if (frame->Tl0PicId() - tl0_pic_id_ > 1) {
+ full_sync_ = false;
+ } else {
+ full_sync_ = ContinuousPictureId(frame->PictureId());
+ }
+ } else {
+ full_sync_ = ContinuousSeqNum(static_cast<uint16_t>(
+ frame->GetLowSeqNum()));
+ }
+ }
+}
+
+bool VCMDecodingState::ContinuousFrame(const VCMFrameBuffer* frame) const {
+ // Check continuity based on the following hierarchy:
+ // - Temporal layers (stop here if out of sync).
+ // - Picture Id when available.
+ // - Sequence numbers.
+ // Return true when in initial state.
+ // Note that when a method is not applicable it will return false.
+ assert(frame != NULL);
+ // A key frame is always considered continuous as it doesn't refer to any
+ // frames and therefore won't introduce any errors even if prior frames are
+ // missing.
+ if (frame->FrameType() == kVideoFrameKey)
+ return true;
+ // When in the initial state we always require a key frame to start decoding.
+ if (in_initial_state_)
+ return false;
+ if (ContinuousLayer(frame->TemporalId(), frame->Tl0PicId()))
+ return true;
+ // tl0picId is either not used, or should remain unchanged.
+ if (frame->Tl0PicId() != tl0_pic_id_)
+ return false;
+ // Base layers are not continuous or temporal layers are inactive.
+ // In the presence of temporal layers, check for Picture ID/sequence number
+ // continuity if sync can be restored by this frame.
+ if (!full_sync_ && !frame->LayerSync())
+ return false;
+ if (UsingPictureId(frame)) {
+ return ContinuousPictureId(frame->PictureId());
+ } else {
+ return ContinuousSeqNum(static_cast<uint16_t>(frame->GetLowSeqNum()));
+ }
+}
+
+bool VCMDecodingState::ContinuousPictureId(int picture_id) const {
+ int next_picture_id = picture_id_ + 1;
+ if (picture_id < picture_id_) {
+ // Wrap
+ if (picture_id_ >= 0x80) {
+ // 15 bits used for picture id
+ return ((next_picture_id & 0x7FFF) == picture_id);
+ } else {
+ // 7 bits used for picture id
+ return ((next_picture_id & 0x7F) == picture_id);
+ }
+ }
+ // No wrap
+ return (next_picture_id == picture_id);
+}
+
+bool VCMDecodingState::ContinuousSeqNum(uint16_t seq_num) const {
+ return seq_num == static_cast<uint16_t>(sequence_num_ + 1);
+}
+
+bool VCMDecodingState::ContinuousLayer(int temporal_id,
+ int tl0_pic_id) const {
+ // First, check if applicable.
+ if (temporal_id == kNoTemporalIdx || tl0_pic_id == kNoTl0PicIdx)
+ return false;
+ // If this is the first frame to use temporal layers, make sure we start
+ // from base.
+ else if (tl0_pic_id_ == kNoTl0PicIdx && temporal_id_ == kNoTemporalIdx &&
+ temporal_id == 0)
+ return true;
+
+ // Current implementation: Look for base layer continuity.
+ if (temporal_id != 0)
+ return false;
+ return (static_cast<uint8_t>(tl0_pic_id_ + 1) == tl0_pic_id);
+}
+
+bool VCMDecodingState::UsingPictureId(const VCMFrameBuffer* frame) const {
+ return (frame->PictureId() != kNoPictureId && picture_id_ != kNoPictureId);
+}
+
+} // namespace webrtc
diff --git a/webrtc/modules/video_coding/main/source/decoding_state.h b/webrtc/modules/video_coding/main/source/decoding_state.h
new file mode 100644
index 0000000000..99ee335195
--- /dev/null
+++ b/webrtc/modules/video_coding/main/source/decoding_state.h
@@ -0,0 +1,70 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef WEBRTC_MODULES_VIDEO_CODING_DECODING_STATE_H_
+#define WEBRTC_MODULES_VIDEO_CODING_DECODING_STATE_H_
+
+#include "webrtc/typedefs.h"
+
+namespace webrtc {
+
+// Forward declarations
+class VCMFrameBuffer;
+class VCMPacket;
+
+class VCMDecodingState {
+ public:
+ VCMDecodingState();
+ ~VCMDecodingState();
+ // Check for old frame
+ bool IsOldFrame(const VCMFrameBuffer* frame) const;
+ // Check for old packet
+ bool IsOldPacket(const VCMPacket* packet) const;
+ // Check for frame continuity based on current decoded state. Use best method
+ // possible, i.e. temporal info, picture ID or sequence number.
+ bool ContinuousFrame(const VCMFrameBuffer* frame) const;
+ void SetState(const VCMFrameBuffer* frame);
+ void CopyFrom(const VCMDecodingState& state);
+ bool UpdateEmptyFrame(const VCMFrameBuffer* frame);
+ // Update the sequence number if the timestamp matches current state and the
+ // sequence number is higher than the current one. This accounts for packets
+ // arriving late.
+ void UpdateOldPacket(const VCMPacket* packet);
+ void SetSeqNum(uint16_t new_seq_num);
+ void Reset();
+ uint32_t time_stamp() const;
+ uint16_t sequence_num() const;
+ // Return true if at initial state.
+ bool in_initial_state() const;
+ // Return true when sync is on - decode all layers.
+ bool full_sync() const;
+
+ private:
+ void UpdateSyncState(const VCMFrameBuffer* frame);
+ // Designated continuity functions
+ bool ContinuousPictureId(int picture_id) const;
+ bool ContinuousSeqNum(uint16_t seq_num) const;
+ bool ContinuousLayer(int temporal_id, int tl0_pic_id) const;
+ bool UsingPictureId(const VCMFrameBuffer* frame) const;
+
+ // Keep state of last decoded frame.
+ // TODO(mikhal/stefan): create designated classes to handle these types.
+ uint16_t sequence_num_;
+ uint32_t time_stamp_;
+ int picture_id_;
+ int temporal_id_;
+ int tl0_pic_id_;
+ bool full_sync_; // Sync flag when temporal layers are used.
+ bool in_initial_state_;
+};
+
+} // namespace webrtc
+
+#endif // WEBRTC_MODULES_VIDEO_CODING_DECODING_STATE_H_
diff --git a/webrtc/modules/video_coding/main/source/decoding_state_unittest.cc b/webrtc/modules/video_coding/main/source/decoding_state_unittest.cc
new file mode 100644
index 0000000000..feae701a65
--- /dev/null
+++ b/webrtc/modules/video_coding/main/source/decoding_state_unittest.cc
@@ -0,0 +1,449 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <string.h>
+
+#include "testing/gtest/include/gtest/gtest.h"
+#include "webrtc/modules/interface/module_common_types.h"
+#include "webrtc/modules/video_coding/main/source/decoding_state.h"
+#include "webrtc/modules/video_coding/main/source/frame_buffer.h"
+#include "webrtc/modules/video_coding/main/source/jitter_buffer_common.h"
+#include "webrtc/modules/video_coding/main/source/packet.h"
+
+namespace webrtc {
+
+TEST(TestDecodingState, Sanity) {
+ VCMDecodingState dec_state;
+ dec_state.Reset();
+ EXPECT_TRUE(dec_state.in_initial_state());
+ EXPECT_TRUE(dec_state.full_sync());
+}
+
+TEST(TestDecodingState, FrameContinuity) {
+ VCMDecodingState dec_state;
+ // Check that makes decision based on correct method.
+ VCMFrameBuffer frame;
+ VCMFrameBuffer frame_key;
+ VCMPacket packet;
+ packet.isFirstPacket = true;
+ packet.timestamp = 1;
+ packet.seqNum = 0xffff;
+ packet.frameType = kVideoFrameDelta;
+ packet.codecSpecificHeader.codec = kRtpVideoVp8;
+ packet.codecSpecificHeader.codecHeader.VP8.pictureId = 0x007F;
+ FrameData frame_data;
+ frame_data.rtt_ms = 0;
+ frame_data.rolling_average_packets_per_frame = -1;
+ EXPECT_LE(0, frame.InsertPacket(packet, 0, kNoErrors, frame_data));
+ // Always start with a key frame.
+ dec_state.Reset();
+ EXPECT_FALSE(dec_state.ContinuousFrame(&frame));
+ packet.frameType = kVideoFrameKey;
+ EXPECT_LE(0, frame_key.InsertPacket(packet, 0, kNoErrors, frame_data));
+ EXPECT_TRUE(dec_state.ContinuousFrame(&frame_key));
+ dec_state.SetState(&frame);
+ frame.Reset();
+ packet.frameType = kVideoFrameDelta;
+ // Use pictureId
+ packet.isFirstPacket = false;
+ packet.codecSpecificHeader.codecHeader.VP8.pictureId = 0x0002;
+ EXPECT_LE(0, frame.InsertPacket(packet, 0, kNoErrors, frame_data));
+ EXPECT_FALSE(dec_state.ContinuousFrame(&frame));
+ frame.Reset();
+ packet.codecSpecificHeader.codecHeader.VP8.pictureId = 0;
+ packet.seqNum = 10;
+ EXPECT_LE(0, frame.InsertPacket(packet, 0, kNoErrors, frame_data));
+ EXPECT_TRUE(dec_state.ContinuousFrame(&frame));
+
+ // Use sequence numbers.
+ packet.codecSpecificHeader.codecHeader.VP8.pictureId = kNoPictureId;
+ frame.Reset();
+ packet.seqNum = dec_state.sequence_num() - 1u;
+ EXPECT_LE(0, frame.InsertPacket(packet, 0, kNoErrors, frame_data));
+ EXPECT_FALSE(dec_state.ContinuousFrame(&frame));
+ frame.Reset();
+ packet.seqNum = dec_state.sequence_num() + 1u;
+ EXPECT_LE(0, frame.InsertPacket(packet, 0, kNoErrors, frame_data));
+ // Insert another packet to this frame
+ packet.seqNum++;
+ EXPECT_LE(0, frame.InsertPacket(packet, 0, kNoErrors, frame_data));
+ // Verify wrap.
+ EXPECT_LE(dec_state.sequence_num(), 0xffff);
+ EXPECT_TRUE(dec_state.ContinuousFrame(&frame));
+ dec_state.SetState(&frame);
+
+ // Insert packet with temporal info.
+ dec_state.Reset();
+ frame.Reset();
+ packet.codecSpecificHeader.codecHeader.VP8.tl0PicIdx = 0;
+ packet.codecSpecificHeader.codecHeader.VP8.temporalIdx = 0;
+ packet.codecSpecificHeader.codecHeader.VP8.pictureId = 0;
+ packet.seqNum = 1;
+ packet.timestamp = 1;
+ EXPECT_TRUE(dec_state.full_sync());
+ EXPECT_LE(0, frame.InsertPacket(packet, 0, kNoErrors, frame_data));
+ dec_state.SetState(&frame);
+ EXPECT_TRUE(dec_state.full_sync());
+ frame.Reset();
+ // 1 layer up - still good.
+ packet.codecSpecificHeader.codecHeader.VP8.tl0PicIdx = 0;
+ packet.codecSpecificHeader.codecHeader.VP8.temporalIdx = 1;
+ packet.codecSpecificHeader.codecHeader.VP8.pictureId = 1;
+ packet.seqNum = 2;
+ packet.timestamp = 2;
+ EXPECT_LE(0, frame.InsertPacket(packet, 0, kNoErrors, frame_data));
+ EXPECT_TRUE(dec_state.ContinuousFrame(&frame));
+ dec_state.SetState(&frame);
+ EXPECT_TRUE(dec_state.full_sync());
+ frame.Reset();
+ // Lost non-base layer packet => should update sync parameter.
+ packet.codecSpecificHeader.codecHeader.VP8.tl0PicIdx = 0;
+ packet.codecSpecificHeader.codecHeader.VP8.temporalIdx = 3;
+ packet.codecSpecificHeader.codecHeader.VP8.pictureId = 3;
+ packet.seqNum = 4;
+ packet.timestamp = 4;
+ EXPECT_LE(0, frame.InsertPacket(packet, 0, kNoErrors, frame_data));
+ EXPECT_FALSE(dec_state.ContinuousFrame(&frame));
+ // Now insert the next non-base layer (belonging to a next tl0PicId).
+ frame.Reset();
+ packet.codecSpecificHeader.codecHeader.VP8.tl0PicIdx = 1;
+ packet.codecSpecificHeader.codecHeader.VP8.temporalIdx = 2;
+ packet.codecSpecificHeader.codecHeader.VP8.pictureId = 4;
+ packet.seqNum = 5;
+ packet.timestamp = 5;
+ EXPECT_LE(0, frame.InsertPacket(packet, 0, kNoErrors, frame_data));
+ // Checking continuity and not updating the state - this should not trigger
+ // an update of sync state.
+ EXPECT_FALSE(dec_state.ContinuousFrame(&frame));
+ EXPECT_TRUE(dec_state.full_sync());
+ // Next base layer (dropped interim non-base layers) - should update sync.
+ frame.Reset();
+ packet.codecSpecificHeader.codecHeader.VP8.tl0PicIdx = 1;
+ packet.codecSpecificHeader.codecHeader.VP8.temporalIdx = 0;
+ packet.codecSpecificHeader.codecHeader.VP8.pictureId = 5;
+ packet.seqNum = 6;
+ packet.timestamp = 6;
+ EXPECT_LE(0, frame.InsertPacket(packet, 0, kNoErrors, frame_data));
+ EXPECT_TRUE(dec_state.ContinuousFrame(&frame));
+ dec_state.SetState(&frame);
+ EXPECT_FALSE(dec_state.full_sync());
+
+ // Check wrap for temporal layers.
+ frame.Reset();
+ packet.codecSpecificHeader.codecHeader.VP8.tl0PicIdx = 0x00FF;
+ packet.codecSpecificHeader.codecHeader.VP8.temporalIdx = 0;
+ packet.codecSpecificHeader.codecHeader.VP8.pictureId = 6;
+ packet.seqNum = 7;
+ packet.timestamp = 7;
+ EXPECT_LE(0, frame.InsertPacket(packet, 0, kNoErrors, frame_data));
+ dec_state.SetState(&frame);
+ EXPECT_FALSE(dec_state.full_sync());
+ frame.Reset();
+ packet.codecSpecificHeader.codecHeader.VP8.tl0PicIdx = 0x0000;
+ packet.codecSpecificHeader.codecHeader.VP8.temporalIdx = 0;
+ packet.codecSpecificHeader.codecHeader.VP8.pictureId = 7;
+ packet.seqNum = 8;
+ packet.timestamp = 8;
+ EXPECT_LE(0, frame.InsertPacket(packet, 0, kNoErrors, frame_data));
+ EXPECT_TRUE(dec_state.ContinuousFrame(&frame));
+ // The current frame is not continuous
+ dec_state.SetState(&frame);
+ EXPECT_FALSE(dec_state.ContinuousFrame(&frame));
+}
+
+TEST(TestDecodingState, UpdateOldPacket) {
+ VCMDecodingState dec_state;
+ // Update only if zero size and newer than previous.
+ // Should only update if the timeStamp match.
+ VCMFrameBuffer frame;
+ VCMPacket packet;
+ packet.timestamp = 1;
+ packet.seqNum = 1;
+ packet.frameType = kVideoFrameDelta;
+ FrameData frame_data;
+ frame_data.rtt_ms = 0;
+ frame_data.rolling_average_packets_per_frame = -1;
+ EXPECT_LE(0, frame.InsertPacket(packet, 0, kNoErrors, frame_data));
+ dec_state.SetState(&frame);
+ EXPECT_EQ(dec_state.sequence_num(), 1);
+ // Insert an empty packet that does not belong to the same frame.
+ // => Sequence num should be the same.
+ packet.timestamp = 2;
+ dec_state.UpdateOldPacket(&packet);
+ EXPECT_EQ(dec_state.sequence_num(), 1);
+ // Now insert empty packet belonging to the same frame.
+ packet.timestamp = 1;
+ packet.seqNum = 2;
+ packet.frameType = kEmptyFrame;
+ packet.sizeBytes = 0;
+ dec_state.UpdateOldPacket(&packet);
+ EXPECT_EQ(dec_state.sequence_num(), 2);
+ // Now insert delta packet belonging to the same frame.
+ packet.timestamp = 1;
+ packet.seqNum = 3;
+ packet.frameType = kVideoFrameDelta;
+ packet.sizeBytes = 1400;
+ dec_state.UpdateOldPacket(&packet);
+ EXPECT_EQ(dec_state.sequence_num(), 3);
+ // Insert a packet belonging to an older timestamp - should not update the
+ // sequence number.
+ packet.timestamp = 0;
+ packet.seqNum = 4;
+ packet.frameType = kEmptyFrame;
+ packet.sizeBytes = 0;
+ dec_state.UpdateOldPacket(&packet);
+ EXPECT_EQ(dec_state.sequence_num(), 3);
+}
+
+TEST(TestDecodingState, MultiLayerBehavior) {
+ // Identify sync/non-sync when more than one layer.
+ VCMDecodingState dec_state;
+ // Identify packets belonging to old frames/packets.
+ // Set state for current frames.
+ // tl0PicIdx 0, temporal id 0.
+ VCMFrameBuffer frame;
+ VCMPacket packet;
+ packet.frameType = kVideoFrameDelta;
+ packet.codecSpecificHeader.codec = kRtpVideoVp8;
+ packet.timestamp = 0;
+ packet.seqNum = 0;
+ packet.codecSpecificHeader.codecHeader.VP8.tl0PicIdx = 0;
+ packet.codecSpecificHeader.codecHeader.VP8.temporalIdx = 0;
+ packet.codecSpecificHeader.codecHeader.VP8.pictureId = 0;
+ FrameData frame_data;
+ frame_data.rtt_ms = 0;
+ frame_data.rolling_average_packets_per_frame = -1;
+ EXPECT_LE(0, frame.InsertPacket(packet, 0, kNoErrors, frame_data));
+ dec_state.SetState(&frame);
+ // tl0PicIdx 0, temporal id 1.
+ frame.Reset();
+ packet.timestamp = 1;
+ packet.seqNum = 1;
+ packet.codecSpecificHeader.codecHeader.VP8.tl0PicIdx = 0;
+ packet.codecSpecificHeader.codecHeader.VP8.temporalIdx = 1;
+ packet.codecSpecificHeader.codecHeader.VP8.pictureId = 1;
+ EXPECT_LE(0, frame.InsertPacket(packet, 0, kNoErrors, frame_data));
+ EXPECT_TRUE(dec_state.ContinuousFrame(&frame));
+ dec_state.SetState(&frame);
+ EXPECT_TRUE(dec_state.full_sync());
+ // Lost tl0PicIdx 0, temporal id 2.
+ // Insert tl0PicIdx 0, temporal id 3.
+ frame.Reset();
+ packet.timestamp = 3;
+ packet.seqNum = 3;
+ packet.codecSpecificHeader.codecHeader.VP8.tl0PicIdx = 0;
+ packet.codecSpecificHeader.codecHeader.VP8.temporalIdx = 3;
+ packet.codecSpecificHeader.codecHeader.VP8.pictureId = 3;
+ EXPECT_LE(0, frame.InsertPacket(packet, 0, kNoErrors, frame_data));
+ EXPECT_FALSE(dec_state.ContinuousFrame(&frame));
+ dec_state.SetState(&frame);
+ EXPECT_FALSE(dec_state.full_sync());
+ // Insert next base layer
+ frame.Reset();
+ packet.timestamp = 4;
+ packet.seqNum = 4;
+ packet.codecSpecificHeader.codecHeader.VP8.tl0PicIdx = 1;
+ packet.codecSpecificHeader.codecHeader.VP8.temporalIdx = 0;
+ packet.codecSpecificHeader.codecHeader.VP8.pictureId = 4;
+ EXPECT_LE(0, frame.InsertPacket(packet, 0, kNoErrors, frame_data));
+ EXPECT_TRUE(dec_state.ContinuousFrame(&frame));
+ dec_state.SetState(&frame);
+ EXPECT_FALSE(dec_state.full_sync());
+ // Insert key frame - should update sync value.
+ // A key frame is always a base layer.
+ frame.Reset();
+ packet.frameType = kVideoFrameKey;
+ packet.isFirstPacket = 1;
+ packet.timestamp = 5;
+ packet.seqNum = 5;
+ packet.codecSpecificHeader.codecHeader.VP8.tl0PicIdx = 2;
+ packet.codecSpecificHeader.codecHeader.VP8.temporalIdx = 0;
+ packet.codecSpecificHeader.codecHeader.VP8.pictureId = 5;
+ EXPECT_LE(0, frame.InsertPacket(packet, 0, kNoErrors, frame_data));
+ EXPECT_TRUE(dec_state.ContinuousFrame(&frame));
+ dec_state.SetState(&frame);
+ EXPECT_TRUE(dec_state.full_sync());
+ // After sync, a continuous PictureId is required
+ // (continuous base layer is not enough )
+ frame.Reset();
+ packet.frameType = kVideoFrameDelta;
+ packet.timestamp = 6;
+ packet.seqNum = 6;
+ packet.codecSpecificHeader.codecHeader.VP8.tl0PicIdx = 3;
+ packet.codecSpecificHeader.codecHeader.VP8.temporalIdx = 0;
+ packet.codecSpecificHeader.codecHeader.VP8.pictureId = 6;
+ EXPECT_LE(0, frame.InsertPacket(packet, 0, kNoErrors, frame_data));
+ EXPECT_TRUE(dec_state.ContinuousFrame(&frame));
+ EXPECT_TRUE(dec_state.full_sync());
+ frame.Reset();
+ packet.frameType = kVideoFrameDelta;
+ packet.isFirstPacket = 1;
+ packet.timestamp = 8;
+ packet.seqNum = 8;
+ packet.codecSpecificHeader.codecHeader.VP8.tl0PicIdx = 4;
+ packet.codecSpecificHeader.codecHeader.VP8.temporalIdx = 0;
+ packet.codecSpecificHeader.codecHeader.VP8.pictureId = 8;
+ EXPECT_LE(0, frame.InsertPacket(packet, 0, kNoErrors, frame_data));
+ EXPECT_FALSE(dec_state.ContinuousFrame(&frame));
+ EXPECT_TRUE(dec_state.full_sync());
+ dec_state.SetState(&frame);
+ EXPECT_FALSE(dec_state.full_sync());
+
+ // Insert a non-ref frame - should update sync value.
+ frame.Reset();
+ packet.frameType = kVideoFrameDelta;
+ packet.isFirstPacket = 1;
+ packet.timestamp = 9;
+ packet.seqNum = 9;
+ packet.codecSpecificHeader.codecHeader.VP8.tl0PicIdx = 4;
+ packet.codecSpecificHeader.codecHeader.VP8.temporalIdx = 2;
+ packet.codecSpecificHeader.codecHeader.VP8.pictureId = 9;
+ packet.codecSpecificHeader.codecHeader.VP8.layerSync = true;
+ EXPECT_LE(0, frame.InsertPacket(packet, 0, kNoErrors, frame_data));
+ dec_state.SetState(&frame);
+ EXPECT_TRUE(dec_state.full_sync());
+
+ // The following test will verify the sync flag behavior after a loss.
+ // Create the following pattern:
+ // Update base layer, lose packet 1 (sync flag on, layer 2), insert packet 3
+ // (sync flag on, layer 2) check continuity and sync flag after inserting
+ // packet 2 (sync flag on, layer 1).
+ // Base layer.
+ frame.Reset();
+ dec_state.Reset();
+ packet.frameType = kVideoFrameDelta;
+ packet.isFirstPacket = 1;
+ packet.markerBit = 1;
+ packet.timestamp = 0;
+ packet.seqNum = 0;
+ packet.codecSpecificHeader.codecHeader.VP8.tl0PicIdx = 0;
+ packet.codecSpecificHeader.codecHeader.VP8.temporalIdx = 0;
+ packet.codecSpecificHeader.codecHeader.VP8.pictureId = 0;
+ packet.codecSpecificHeader.codecHeader.VP8.layerSync = false;
+ EXPECT_LE(0, frame.InsertPacket(packet, 0, kNoErrors, frame_data));
+ dec_state.SetState(&frame);
+ EXPECT_TRUE(dec_state.full_sync());
+ // Layer 2 - 2 packets (insert one, lose one).
+ frame.Reset();
+ packet.frameType = kVideoFrameDelta;
+ packet.isFirstPacket = 1;
+ packet.markerBit = 0;
+ packet.timestamp = 1;
+ packet.seqNum = 1;
+ packet.codecSpecificHeader.codecHeader.VP8.tl0PicIdx = 0;
+ packet.codecSpecificHeader.codecHeader.VP8.temporalIdx = 2;
+ packet.codecSpecificHeader.codecHeader.VP8.pictureId = 1;
+ packet.codecSpecificHeader.codecHeader.VP8.layerSync = true;
+ EXPECT_LE(0, frame.InsertPacket(packet, 0, kNoErrors, frame_data));
+ EXPECT_TRUE(dec_state.ContinuousFrame(&frame));
+ // Layer 1
+ frame.Reset();
+ packet.frameType = kVideoFrameDelta;
+ packet.isFirstPacket = 1;
+ packet.markerBit = 1;
+ packet.timestamp = 2;
+ packet.seqNum = 3;
+ packet.codecSpecificHeader.codecHeader.VP8.tl0PicIdx = 0;
+ packet.codecSpecificHeader.codecHeader.VP8.temporalIdx = 1;
+ packet.codecSpecificHeader.codecHeader.VP8.pictureId = 2;
+ packet.codecSpecificHeader.codecHeader.VP8.layerSync = true;
+ EXPECT_LE(0, frame.InsertPacket(packet, 0, kNoErrors, frame_data));
+ EXPECT_FALSE(dec_state.ContinuousFrame(&frame));
+ EXPECT_TRUE(dec_state.full_sync());
+}
+
+TEST(TestDecodingState, DiscontinuousPicIdContinuousSeqNum) {
+ VCMDecodingState dec_state;
+ VCMFrameBuffer frame;
+ VCMPacket packet;
+ frame.Reset();
+ packet.frameType = kVideoFrameKey;
+ packet.codecSpecificHeader.codec = kRtpVideoVp8;
+ packet.timestamp = 0;
+ packet.seqNum = 0;
+ packet.codecSpecificHeader.codecHeader.VP8.tl0PicIdx = 0;
+ packet.codecSpecificHeader.codecHeader.VP8.temporalIdx = 0;
+ packet.codecSpecificHeader.codecHeader.VP8.pictureId = 0;
+ FrameData frame_data;
+ frame_data.rtt_ms = 0;
+ frame_data.rolling_average_packets_per_frame = -1;
+ EXPECT_LE(0, frame.InsertPacket(packet, 0, kNoErrors, frame_data));
+ dec_state.SetState(&frame);
+ EXPECT_TRUE(dec_state.full_sync());
+
+ // Continuous sequence number but discontinuous picture id. This implies a
+ // a loss and we have to fall back to only decoding the base layer.
+ frame.Reset();
+ packet.frameType = kVideoFrameDelta;
+ packet.timestamp += 3000;
+ ++packet.seqNum;
+ packet.codecSpecificHeader.codecHeader.VP8.temporalIdx = 1;
+ packet.codecSpecificHeader.codecHeader.VP8.pictureId = 2;
+ EXPECT_LE(0, frame.InsertPacket(packet, 0, kNoErrors, frame_data));
+ EXPECT_FALSE(dec_state.ContinuousFrame(&frame));
+ dec_state.SetState(&frame);
+ EXPECT_FALSE(dec_state.full_sync());
+}
+
+TEST(TestDecodingState, OldInput) {
+ VCMDecodingState dec_state;
+ // Identify packets belonging to old frames/packets.
+ // Set state for current frames.
+ VCMFrameBuffer frame;
+ VCMPacket packet;
+ packet.timestamp = 10;
+ packet.seqNum = 1;
+ FrameData frame_data;
+ frame_data.rtt_ms = 0;
+ frame_data.rolling_average_packets_per_frame = -1;
+ EXPECT_LE(0, frame.InsertPacket(packet, 0, kNoErrors, frame_data));
+ dec_state.SetState(&frame);
+ packet.timestamp = 9;
+ EXPECT_TRUE(dec_state.IsOldPacket(&packet));
+ // Check for old frame
+ frame.Reset();
+ frame.InsertPacket(packet, 0, kNoErrors, frame_data);
+ EXPECT_TRUE(dec_state.IsOldFrame(&frame));
+}
+
+TEST(TestDecodingState, PictureIdRepeat) {
+ VCMDecodingState dec_state;
+ VCMFrameBuffer frame;
+ VCMPacket packet;
+ packet.frameType = kVideoFrameDelta;
+ packet.codecSpecificHeader.codec = kRtpVideoVp8;
+ packet.timestamp = 0;
+ packet.seqNum = 0;
+ packet.codecSpecificHeader.codecHeader.VP8.tl0PicIdx = 0;
+ packet.codecSpecificHeader.codecHeader.VP8.temporalIdx = 0;
+ packet.codecSpecificHeader.codecHeader.VP8.pictureId = 0;
+ FrameData frame_data;
+ frame_data.rtt_ms = 0;
+ frame_data.rolling_average_packets_per_frame = -1;
+ EXPECT_LE(0, frame.InsertPacket(packet, 0, kNoErrors, frame_data));
+ dec_state.SetState(&frame);
+ // tl0PicIdx 0, temporal id 1.
+ frame.Reset();
+ ++packet.timestamp;
+ ++packet.seqNum;
+ packet.codecSpecificHeader.codecHeader.VP8.temporalIdx++;
+ packet.codecSpecificHeader.codecHeader.VP8.pictureId++;
+ EXPECT_LE(0, frame.InsertPacket(packet, 0, kNoErrors, frame_data));
+ EXPECT_TRUE(dec_state.ContinuousFrame(&frame));
+ frame.Reset();
+ // Testing only gap in tl0PicIdx when tl0PicIdx in continuous.
+ packet.codecSpecificHeader.codecHeader.VP8.tl0PicIdx += 3;
+ packet.codecSpecificHeader.codecHeader.VP8.temporalIdx++;
+ packet.codecSpecificHeader.codecHeader.VP8.tl0PicIdx = 1;
+ EXPECT_LE(0, frame.InsertPacket(packet, 0, kNoErrors, frame_data));
+ EXPECT_FALSE(dec_state.ContinuousFrame(&frame));
+}
+
+} // namespace webrtc
diff --git a/webrtc/modules/video_coding/main/source/encoded_frame.cc b/webrtc/modules/video_coding/main/source/encoded_frame.cc
new file mode 100644
index 0000000000..d86704d632
--- /dev/null
+++ b/webrtc/modules/video_coding/main/source/encoded_frame.cc
@@ -0,0 +1,229 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "webrtc/modules/video_coding/main/interface/video_coding_defines.h"
+#include "webrtc/modules/video_coding/main/source/encoded_frame.h"
+#include "webrtc/modules/video_coding/main/source/generic_encoder.h"
+#include "webrtc/modules/video_coding/main/source/jitter_buffer_common.h"
+
+namespace webrtc {
+
+VCMEncodedFrame::VCMEncodedFrame()
+ : webrtc::EncodedImage(),
+ _renderTimeMs(-1),
+ _payloadType(0),
+ _missingFrame(false),
+ _codec(kVideoCodecUnknown),
+ _fragmentation(),
+ _rotation(kVideoRotation_0),
+ _rotation_set(false) {
+ _codecSpecificInfo.codecType = kVideoCodecUnknown;
+}
+
+VCMEncodedFrame::VCMEncodedFrame(const webrtc::EncodedImage& rhs)
+ : webrtc::EncodedImage(rhs),
+ _renderTimeMs(-1),
+ _payloadType(0),
+ _missingFrame(false),
+ _codec(kVideoCodecUnknown),
+ _fragmentation(),
+ _rotation(kVideoRotation_0),
+ _rotation_set(false) {
+ _codecSpecificInfo.codecType = kVideoCodecUnknown;
+ _buffer = NULL;
+ _size = 0;
+ _length = 0;
+ if (rhs._buffer != NULL)
+ {
+ VerifyAndAllocate(rhs._length);
+ memcpy(_buffer, rhs._buffer, rhs._length);
+ }
+}
+
+VCMEncodedFrame::VCMEncodedFrame(const VCMEncodedFrame& rhs)
+ : webrtc::EncodedImage(rhs),
+ _renderTimeMs(rhs._renderTimeMs),
+ _payloadType(rhs._payloadType),
+ _missingFrame(rhs._missingFrame),
+ _codecSpecificInfo(rhs._codecSpecificInfo),
+ _codec(rhs._codec),
+ _fragmentation(),
+ _rotation(rhs._rotation),
+ _rotation_set(rhs._rotation_set) {
+ _buffer = NULL;
+ _size = 0;
+ _length = 0;
+ if (rhs._buffer != NULL)
+ {
+ VerifyAndAllocate(rhs._length);
+ memcpy(_buffer, rhs._buffer, rhs._length);
+ _length = rhs._length;
+ }
+ _fragmentation.CopyFrom(rhs._fragmentation);
+}
+
+VCMEncodedFrame::~VCMEncodedFrame()
+{
+ Free();
+}
+
+void VCMEncodedFrame::Free()
+{
+ Reset();
+ if (_buffer != NULL)
+ {
+ delete [] _buffer;
+ _buffer = NULL;
+ }
+}
+
+void VCMEncodedFrame::Reset()
+{
+ _renderTimeMs = -1;
+ _timeStamp = 0;
+ _payloadType = 0;
+ _frameType = kVideoFrameDelta;
+ _encodedWidth = 0;
+ _encodedHeight = 0;
+ _completeFrame = false;
+ _missingFrame = false;
+ _length = 0;
+ _codecSpecificInfo.codecType = kVideoCodecUnknown;
+ _codec = kVideoCodecUnknown;
+ _rotation = kVideoRotation_0;
+ _rotation_set = false;
+}
+
+void VCMEncodedFrame::CopyCodecSpecific(const RTPVideoHeader* header)
+{
+ if (header) {
+ switch (header->codec) {
+ case kRtpVideoVp8: {
+ if (_codecSpecificInfo.codecType != kVideoCodecVP8) {
+ // This is the first packet for this frame.
+ _codecSpecificInfo.codecSpecific.VP8.pictureId = -1;
+ _codecSpecificInfo.codecSpecific.VP8.temporalIdx = 0;
+ _codecSpecificInfo.codecSpecific.VP8.layerSync = false;
+ _codecSpecificInfo.codecSpecific.VP8.keyIdx = -1;
+ _codecSpecificInfo.codecType = kVideoCodecVP8;
+ }
+ _codecSpecificInfo.codecSpecific.VP8.nonReference =
+ header->codecHeader.VP8.nonReference;
+ if (header->codecHeader.VP8.pictureId != kNoPictureId) {
+ _codecSpecificInfo.codecSpecific.VP8.pictureId =
+ header->codecHeader.VP8.pictureId;
+ }
+ if (header->codecHeader.VP8.temporalIdx != kNoTemporalIdx) {
+ _codecSpecificInfo.codecSpecific.VP8.temporalIdx =
+ header->codecHeader.VP8.temporalIdx;
+ _codecSpecificInfo.codecSpecific.VP8.layerSync =
+ header->codecHeader.VP8.layerSync;
+ }
+ if (header->codecHeader.VP8.keyIdx != kNoKeyIdx) {
+ _codecSpecificInfo.codecSpecific.VP8.keyIdx =
+ header->codecHeader.VP8.keyIdx;
+ }
+ break;
+ }
+ case kRtpVideoVp9: {
+ if (_codecSpecificInfo.codecType != kVideoCodecVP9) {
+ // This is the first packet for this frame.
+ _codecSpecificInfo.codecSpecific.VP9.picture_id = -1;
+ _codecSpecificInfo.codecSpecific.VP9.temporal_idx = 0;
+ _codecSpecificInfo.codecSpecific.VP9.spatial_idx = 0;
+ _codecSpecificInfo.codecSpecific.VP9.gof_idx = 0;
+ _codecSpecificInfo.codecSpecific.VP9.inter_layer_predicted = false;
+ _codecSpecificInfo.codecSpecific.VP9.tl0_pic_idx = -1;
+ _codecSpecificInfo.codecType = kVideoCodecVP9;
+ }
+ _codecSpecificInfo.codecSpecific.VP9.inter_pic_predicted =
+ header->codecHeader.VP9.inter_pic_predicted;
+ _codecSpecificInfo.codecSpecific.VP9.flexible_mode =
+ header->codecHeader.VP9.flexible_mode;
+ _codecSpecificInfo.codecSpecific.VP9.ss_data_available =
+ header->codecHeader.VP9.ss_data_available;
+ if (header->codecHeader.VP9.picture_id != kNoPictureId) {
+ _codecSpecificInfo.codecSpecific.VP9.picture_id =
+ header->codecHeader.VP9.picture_id;
+ }
+ if (header->codecHeader.VP9.tl0_pic_idx != kNoTl0PicIdx) {
+ _codecSpecificInfo.codecSpecific.VP9.tl0_pic_idx =
+ header->codecHeader.VP9.tl0_pic_idx;
+ }
+ if (header->codecHeader.VP9.temporal_idx != kNoTemporalIdx) {
+ _codecSpecificInfo.codecSpecific.VP9.temporal_idx =
+ header->codecHeader.VP9.temporal_idx;
+ _codecSpecificInfo.codecSpecific.VP9.temporal_up_switch =
+ header->codecHeader.VP9.temporal_up_switch;
+ }
+ if (header->codecHeader.VP9.spatial_idx != kNoSpatialIdx) {
+ _codecSpecificInfo.codecSpecific.VP9.spatial_idx =
+ header->codecHeader.VP9.spatial_idx;
+ _codecSpecificInfo.codecSpecific.VP9.inter_layer_predicted =
+ header->codecHeader.VP9.inter_layer_predicted;
+ }
+ if (header->codecHeader.VP9.gof_idx != kNoGofIdx) {
+ _codecSpecificInfo.codecSpecific.VP9.gof_idx =
+ header->codecHeader.VP9.gof_idx;
+ }
+ if (header->codecHeader.VP9.ss_data_available) {
+ _codecSpecificInfo.codecSpecific.VP9.num_spatial_layers =
+ header->codecHeader.VP9.num_spatial_layers;
+ _codecSpecificInfo.codecSpecific.VP9
+ .spatial_layer_resolution_present =
+ header->codecHeader.VP9.spatial_layer_resolution_present;
+ if (header->codecHeader.VP9.spatial_layer_resolution_present) {
+ for (size_t i = 0; i < header->codecHeader.VP9.num_spatial_layers;
+ ++i) {
+ _codecSpecificInfo.codecSpecific.VP9.width[i] =
+ header->codecHeader.VP9.width[i];
+ _codecSpecificInfo.codecSpecific.VP9.height[i] =
+ header->codecHeader.VP9.height[i];
+ }
+ }
+ _codecSpecificInfo.codecSpecific.VP9.gof.CopyGofInfoVP9(
+ header->codecHeader.VP9.gof);
+ }
+ break;
+ }
+ case kRtpVideoH264: {
+ _codecSpecificInfo.codecType = kVideoCodecH264;
+ break;
+ }
+ default: {
+ _codecSpecificInfo.codecType = kVideoCodecUnknown;
+ break;
+ }
+ }
+ }
+}
+
+const RTPFragmentationHeader* VCMEncodedFrame::FragmentationHeader() const {
+ return &_fragmentation;
+}
+
+void VCMEncodedFrame::VerifyAndAllocate(size_t minimumSize)
+{
+ if(minimumSize > _size)
+ {
+ // create buffer of sufficient size
+ uint8_t* newBuffer = new uint8_t[minimumSize];
+ if(_buffer)
+ {
+ // copy old data
+ memcpy(newBuffer, _buffer, _size);
+ delete [] _buffer;
+ }
+ _buffer = newBuffer;
+ _size = minimumSize;
+ }
+}
+
+} // namespace webrtc
diff --git a/webrtc/modules/video_coding/main/source/encoded_frame.h b/webrtc/modules/video_coding/main/source/encoded_frame.h
new file mode 100644
index 0000000000..608578c35d
--- /dev/null
+++ b/webrtc/modules/video_coding/main/source/encoded_frame.h
@@ -0,0 +1,127 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef WEBRTC_MODULES_VIDEO_CODING_ENCODED_FRAME_H_
+#define WEBRTC_MODULES_VIDEO_CODING_ENCODED_FRAME_H_
+
+#include <vector>
+
+#include "webrtc/common_types.h"
+#include "webrtc/common_video/interface/video_image.h"
+#include "webrtc/modules/interface/module_common_types.h"
+#include "webrtc/modules/video_coding/codecs/interface/video_codec_interface.h"
+#include "webrtc/modules/video_coding/main/interface/video_coding_defines.h"
+
+namespace webrtc
+{
+
+class VCMEncodedFrame : protected EncodedImage
+{
+public:
+ VCMEncodedFrame();
+ VCMEncodedFrame(const webrtc::EncodedImage& rhs);
+ VCMEncodedFrame(const VCMEncodedFrame& rhs);
+
+ ~VCMEncodedFrame();
+ /**
+ * Delete VideoFrame and resets members to zero
+ */
+ void Free();
+ /**
+ * Set render time in milliseconds
+ */
+ void SetRenderTime(const int64_t renderTimeMs) {_renderTimeMs = renderTimeMs;}
+
+ /**
+ * Set the encoded frame size
+ */
+ void SetEncodedSize(uint32_t width, uint32_t height)
+ { _encodedWidth = width; _encodedHeight = height; }
+ /**
+ * Get the encoded image
+ */
+ const webrtc::EncodedImage& EncodedImage() const
+ { return static_cast<const webrtc::EncodedImage&>(*this); }
+ /**
+ * Get pointer to frame buffer
+ */
+ const uint8_t* Buffer() const {return _buffer;}
+ /**
+ * Get frame length
+ */
+ size_t Length() const {return _length;}
+ /**
+ * Get frame timestamp (90kHz)
+ */
+ uint32_t TimeStamp() const {return _timeStamp;}
+ /**
+ * Get render time in milliseconds
+ */
+ int64_t RenderTimeMs() const {return _renderTimeMs;}
+ /**
+ * Get frame type
+ */
+ webrtc::FrameType FrameType() const { return _frameType; }
+ /**
+ * Get frame rotation
+ */
+ VideoRotation rotation() const { return _rotation; }
+ /**
+ * True if this frame is complete, false otherwise
+ */
+ bool Complete() const { return _completeFrame; }
+ /**
+ * True if there's a frame missing before this frame
+ */
+ bool MissingFrame() const { return _missingFrame; }
+ /**
+ * Payload type of the encoded payload
+ */
+ uint8_t PayloadType() const { return _payloadType; }
+ /**
+ * Get codec specific info.
+ * The returned pointer is only valid as long as the VCMEncodedFrame
+ * is valid. Also, VCMEncodedFrame owns the pointer and will delete
+ * the object.
+ */
+ const CodecSpecificInfo* CodecSpecific() const {return &_codecSpecificInfo;}
+
+ const RTPFragmentationHeader* FragmentationHeader() const;
+
+protected:
+ /**
+ * Verifies that current allocated buffer size is larger than or equal to the input size.
+ * If the current buffer size is smaller, a new allocation is made and the old buffer data
+ * is copied to the new buffer.
+ * Buffer size is updated to minimumSize.
+ */
+ void VerifyAndAllocate(size_t minimumSize);
+
+ void Reset();
+
+ void CopyCodecSpecific(const RTPVideoHeader* header);
+
+ int64_t _renderTimeMs;
+ uint8_t _payloadType;
+ bool _missingFrame;
+ CodecSpecificInfo _codecSpecificInfo;
+ webrtc::VideoCodecType _codec;
+ RTPFragmentationHeader _fragmentation;
+ VideoRotation _rotation;
+
+ // Video rotation is only set along with the last packet for each frame
+ // (same as marker bit). This |_rotation_set| is only for debugging purpose
+ // to ensure we don't set it twice for a frame.
+ bool _rotation_set;
+};
+
+} // namespace webrtc
+
+#endif // WEBRTC_MODULES_VIDEO_CODING_ENCODED_FRAME_H_
diff --git a/webrtc/modules/video_coding/main/source/fec_tables_xor.h b/webrtc/modules/video_coding/main/source/fec_tables_xor.h
new file mode 100644
index 0000000000..28c67b4565
--- /dev/null
+++ b/webrtc/modules/video_coding/main/source/fec_tables_xor.h
@@ -0,0 +1,6481 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef WEBRTC_MODULES_VIDEO_CODING_SOURCE_FEC_TABLES_XOR_H_
+#define WEBRTC_MODULES_VIDEO_CODING_SOURCE_FEC_TABLES_XOR_H_
+
+// This is a private header for media_opt_util.cc.
+// It should not be included by other files.
+
+namespace webrtc {
+
+// Table for Protection factor (code rate) of delta frames, for the XOR FEC.
+// Input is the packet loss and an effective rate (bits/frame).
+// Output is array kCodeRateXORTable[k], where k = rate_i*129 + loss_j;
+// loss_j = 0,1,..128, and rate_i varies over some range.
+static const int kSizeCodeRateXORTable = 6450;
+static const unsigned char kCodeRateXORTable[kSizeCodeRateXORTable] = {
+0,
+0,
+0,
+0,
+0,
+0,
+0,
+0,
+0,
+0,
+0,
+0,
+0,
+0,
+0,
+0,
+0,
+0,
+0,
+0,
+0,
+0,
+0,
+0,
+0,
+0,
+0,
+0,
+0,
+0,
+0,
+0,
+0,
+11,
+11,
+11,
+11,
+11,
+11,
+11,
+11,
+11,
+11,
+11,
+11,
+11,
+11,
+11,
+39,
+39,
+39,
+39,
+39,
+39,
+39,
+39,
+39,
+39,
+39,
+39,
+39,
+39,
+39,
+39,
+39,
+39,
+39,
+39,
+39,
+39,
+39,
+39,
+39,
+39,
+39,
+39,
+39,
+39,
+39,
+39,
+39,
+39,
+39,
+39,
+39,
+39,
+39,
+39,
+39,
+39,
+39,
+39,
+39,
+39,
+39,
+39,
+51,
+51,
+51,
+51,
+51,
+51,
+51,
+51,
+51,
+51,
+51,
+51,
+51,
+51,
+51,
+51,
+51,
+51,
+51,
+51,
+51,
+51,
+51,
+51,
+51,
+51,
+51,
+51,
+51,
+51,
+51,
+51,
+51,
+0,
+0,
+0,
+0,
+0,
+0,
+0,
+0,
+0,
+0,
+0,
+0,
+0,
+0,
+0,
+0,
+0,
+0,
+8,
+8,
+8,
+8,
+8,
+8,
+8,
+8,
+8,
+8,
+8,
+8,
+8,
+8,
+8,
+30,
+30,
+30,
+30,
+30,
+30,
+30,
+30,
+30,
+30,
+30,
+30,
+30,
+30,
+30,
+56,
+56,
+56,
+56,
+56,
+56,
+56,
+65,
+65,
+65,
+65,
+65,
+65,
+65,
+65,
+65,
+65,
+65,
+65,
+65,
+65,
+65,
+65,
+65,
+65,
+65,
+65,
+65,
+65,
+65,
+65,
+65,
+65,
+65,
+65,
+65,
+65,
+65,
+65,
+65,
+65,
+65,
+65,
+65,
+65,
+65,
+65,
+65,
+87,
+87,
+87,
+87,
+87,
+87,
+87,
+87,
+87,
+87,
+87,
+87,
+87,
+87,
+87,
+87,
+87,
+87,
+87,
+87,
+87,
+87,
+87,
+87,
+78,
+78,
+78,
+78,
+78,
+78,
+78,
+78,
+78,
+0,
+0,
+0,
+0,
+0,
+0,
+0,
+0,
+0,
+0,
+0,
+0,
+0,
+0,
+0,
+6,
+6,
+6,
+23,
+23,
+23,
+23,
+23,
+23,
+23,
+23,
+23,
+23,
+23,
+23,
+23,
+23,
+23,
+44,
+44,
+44,
+44,
+44,
+44,
+50,
+50,
+50,
+50,
+50,
+50,
+50,
+50,
+50,
+68,
+68,
+68,
+68,
+68,
+68,
+68,
+85,
+85,
+85,
+85,
+85,
+85,
+85,
+85,
+85,
+85,
+85,
+85,
+85,
+85,
+85,
+85,
+85,
+85,
+85,
+85,
+85,
+85,
+85,
+85,
+85,
+85,
+85,
+85,
+85,
+85,
+85,
+85,
+85,
+85,
+85,
+85,
+85,
+85,
+85,
+85,
+85,
+105,
+105,
+105,
+105,
+105,
+105,
+105,
+105,
+105,
+105,
+105,
+105,
+105,
+105,
+105,
+105,
+105,
+105,
+105,
+105,
+105,
+105,
+105,
+105,
+88,
+88,
+88,
+88,
+88,
+88,
+88,
+88,
+88,
+0,
+0,
+0,
+0,
+0,
+0,
+0,
+0,
+0,
+5,
+5,
+5,
+5,
+5,
+5,
+19,
+19,
+19,
+36,
+41,
+41,
+41,
+41,
+41,
+41,
+41,
+41,
+41,
+41,
+41,
+41,
+41,
+41,
+55,
+55,
+55,
+55,
+55,
+55,
+69,
+69,
+69,
+69,
+69,
+69,
+69,
+69,
+69,
+75,
+75,
+80,
+80,
+80,
+80,
+80,
+97,
+97,
+97,
+97,
+97,
+97,
+97,
+97,
+97,
+97,
+102,
+102,
+102,
+102,
+102,
+102,
+102,
+102,
+102,
+102,
+102,
+102,
+102,
+102,
+102,
+102,
+102,
+102,
+102,
+102,
+102,
+102,
+102,
+102,
+102,
+102,
+102,
+102,
+102,
+102,
+102,
+116,
+116,
+116,
+116,
+116,
+116,
+116,
+116,
+116,
+116,
+116,
+116,
+116,
+116,
+116,
+116,
+116,
+116,
+116,
+116,
+116,
+116,
+116,
+116,
+100,
+100,
+100,
+100,
+100,
+100,
+100,
+100,
+100,
+0,
+0,
+0,
+0,
+0,
+0,
+0,
+0,
+4,
+16,
+16,
+16,
+16,
+16,
+16,
+30,
+35,
+35,
+47,
+58,
+58,
+58,
+58,
+58,
+58,
+58,
+58,
+58,
+58,
+58,
+58,
+58,
+58,
+63,
+63,
+63,
+63,
+63,
+63,
+77,
+77,
+77,
+77,
+77,
+77,
+77,
+82,
+82,
+82,
+82,
+94,
+94,
+94,
+94,
+94,
+105,
+105,
+105,
+105,
+110,
+110,
+110,
+110,
+110,
+110,
+122,
+122,
+122,
+122,
+122,
+122,
+122,
+122,
+122,
+122,
+122,
+122,
+122,
+122,
+122,
+122,
+122,
+122,
+122,
+122,
+122,
+122,
+122,
+122,
+122,
+122,
+122,
+122,
+122,
+122,
+122,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+115,
+115,
+115,
+115,
+115,
+115,
+115,
+115,
+115,
+0,
+0,
+0,
+0,
+0,
+0,
+0,
+4,
+14,
+27,
+27,
+27,
+27,
+27,
+31,
+41,
+52,
+52,
+56,
+69,
+69,
+69,
+69,
+69,
+69,
+69,
+69,
+69,
+69,
+69,
+69,
+69,
+69,
+69,
+69,
+69,
+69,
+69,
+69,
+79,
+79,
+79,
+79,
+83,
+83,
+83,
+94,
+94,
+94,
+94,
+106,
+106,
+106,
+106,
+106,
+115,
+115,
+115,
+115,
+125,
+125,
+125,
+125,
+125,
+125,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+0,
+0,
+0,
+0,
+3,
+3,
+3,
+17,
+28,
+38,
+38,
+38,
+38,
+38,
+47,
+51,
+63,
+63,
+63,
+72,
+72,
+72,
+72,
+72,
+72,
+72,
+76,
+76,
+76,
+76,
+80,
+80,
+80,
+80,
+80,
+80,
+80,
+80,
+80,
+84,
+84,
+84,
+84,
+93,
+93,
+93,
+105,
+105,
+105,
+105,
+114,
+114,
+114,
+114,
+114,
+124,
+124,
+124,
+124,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+0,
+0,
+0,
+0,
+12,
+12,
+12,
+35,
+43,
+47,
+47,
+47,
+47,
+47,
+58,
+58,
+66,
+66,
+66,
+70,
+70,
+70,
+70,
+70,
+73,
+73,
+82,
+82,
+82,
+86,
+94,
+94,
+94,
+94,
+94,
+94,
+94,
+94,
+94,
+94,
+94,
+94,
+94,
+105,
+105,
+105,
+114,
+114,
+114,
+114,
+117,
+117,
+117,
+117,
+117,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+0,
+0,
+0,
+0,
+24,
+24,
+24,
+49,
+53,
+53,
+53,
+53,
+53,
+53,
+61,
+61,
+64,
+64,
+64,
+64,
+70,
+70,
+70,
+70,
+78,
+78,
+88,
+88,
+88,
+96,
+106,
+106,
+106,
+106,
+106,
+106,
+106,
+106,
+106,
+106,
+112,
+112,
+112,
+120,
+120,
+120,
+124,
+124,
+124,
+124,
+124,
+124,
+124,
+124,
+124,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+0,
+0,
+0,
+5,
+36,
+36,
+36,
+55,
+55,
+55,
+55,
+55,
+55,
+55,
+58,
+58,
+58,
+58,
+58,
+64,
+78,
+78,
+78,
+78,
+87,
+87,
+94,
+94,
+94,
+103,
+110,
+110,
+110,
+110,
+110,
+110,
+110,
+110,
+116,
+116,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+0,
+0,
+0,
+18,
+43,
+43,
+43,
+53,
+53,
+53,
+53,
+53,
+53,
+53,
+53,
+58,
+58,
+58,
+58,
+71,
+87,
+87,
+87,
+87,
+94,
+94,
+97,
+97,
+97,
+109,
+111,
+111,
+111,
+111,
+111,
+111,
+111,
+111,
+125,
+125,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+0,
+0,
+0,
+31,
+46,
+46,
+46,
+48,
+48,
+48,
+48,
+48,
+48,
+48,
+48,
+66,
+66,
+66,
+66,
+80,
+93,
+93,
+93,
+93,
+95,
+95,
+95,
+95,
+100,
+115,
+115,
+115,
+115,
+115,
+115,
+115,
+115,
+115,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+0,
+0,
+4,
+40,
+45,
+45,
+45,
+45,
+45,
+45,
+45,
+45,
+49,
+49,
+49,
+74,
+74,
+74,
+74,
+86,
+90,
+90,
+90,
+90,
+95,
+95,
+95,
+95,
+106,
+120,
+120,
+120,
+120,
+120,
+120,
+120,
+120,
+120,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+0,
+0,
+14,
+42,
+42,
+42,
+42,
+42,
+42,
+42,
+42,
+46,
+56,
+56,
+56,
+80,
+80,
+80,
+80,
+84,
+84,
+84,
+84,
+88,
+99,
+99,
+99,
+99,
+111,
+122,
+122,
+122,
+122,
+122,
+122,
+122,
+122,
+122,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+0,
+0,
+26,
+40,
+40,
+40,
+40,
+40,
+40,
+40,
+40,
+54,
+66,
+66,
+66,
+80,
+80,
+80,
+80,
+80,
+80,
+80,
+84,
+94,
+106,
+106,
+106,
+106,
+116,
+120,
+120,
+120,
+120,
+120,
+120,
+120,
+120,
+124,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+0,
+3,
+34,
+38,
+38,
+38,
+38,
+38,
+42,
+42,
+42,
+63,
+72,
+72,
+76,
+80,
+80,
+80,
+80,
+80,
+80,
+80,
+89,
+101,
+114,
+114,
+114,
+114,
+118,
+118,
+118,
+118,
+118,
+118,
+118,
+118,
+118,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+0,
+12,
+36,
+36,
+36,
+36,
+36,
+36,
+49,
+49,
+49,
+69,
+73,
+76,
+86,
+86,
+86,
+86,
+86,
+86,
+86,
+86,
+97,
+109,
+122,
+122,
+122,
+122,
+122,
+122,
+122,
+122,
+122,
+122,
+122,
+122,
+122,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+0,
+22,
+34,
+34,
+34,
+34,
+38,
+38,
+57,
+57,
+57,
+69,
+73,
+82,
+92,
+92,
+92,
+92,
+92,
+92,
+96,
+96,
+104,
+117,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+0,
+29,
+33,
+33,
+33,
+33,
+44,
+44,
+62,
+62,
+62,
+69,
+77,
+87,
+95,
+95,
+95,
+95,
+95,
+95,
+107,
+107,
+110,
+120,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+0,
+31,
+31,
+31,
+31,
+31,
+51,
+51,
+62,
+65,
+65,
+73,
+83,
+91,
+94,
+94,
+94,
+94,
+97,
+97,
+114,
+114,
+114,
+122,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+0,
+29,
+29,
+29,
+29,
+29,
+56,
+56,
+59,
+70,
+70,
+79,
+86,
+89,
+89,
+89,
+89,
+89,
+100,
+100,
+116,
+116,
+116,
+122,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+0,
+28,
+28,
+28,
+28,
+28,
+57,
+57,
+57,
+76,
+76,
+83,
+86,
+86,
+86,
+86,
+86,
+89,
+104,
+104,
+114,
+114,
+114,
+124,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+0,
+27,
+27,
+27,
+27,
+30,
+55,
+55,
+55,
+80,
+80,
+83,
+86,
+86,
+86,
+86,
+86,
+93,
+108,
+108,
+111,
+111,
+111,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+0,
+26,
+26,
+26,
+26,
+36,
+53,
+53,
+53,
+80,
+80,
+80,
+90,
+90,
+90,
+90,
+90,
+98,
+107,
+107,
+107,
+107,
+107,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+0,
+26,
+26,
+26,
+28,
+42,
+52,
+54,
+54,
+78,
+78,
+78,
+95,
+95,
+95,
+97,
+97,
+104,
+106,
+106,
+106,
+106,
+106,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+0,
+24,
+24,
+24,
+33,
+47,
+49,
+58,
+58,
+74,
+74,
+74,
+97,
+97,
+97,
+106,
+106,
+108,
+108,
+108,
+108,
+108,
+108,
+124,
+124,
+124,
+124,
+124,
+124,
+124,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+0,
+24,
+24,
+24,
+39,
+48,
+50,
+63,
+63,
+72,
+74,
+74,
+96,
+96,
+96,
+109,
+111,
+111,
+111,
+111,
+111,
+111,
+111,
+119,
+119,
+122,
+122,
+122,
+122,
+122,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+0,
+23,
+23,
+23,
+43,
+46,
+54,
+66,
+66,
+69,
+77,
+77,
+92,
+92,
+92,
+105,
+113,
+113,
+113,
+113,
+113,
+113,
+113,
+115,
+117,
+123,
+123,
+123,
+123,
+123,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+0,
+22,
+22,
+22,
+44,
+44,
+59,
+67,
+67,
+67,
+81,
+81,
+89,
+89,
+89,
+97,
+112,
+112,
+112,
+112,
+112,
+112,
+112,
+112,
+119,
+126,
+126,
+126,
+126,
+126,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+0,
+21,
+21,
+24,
+43,
+45,
+63,
+65,
+65,
+67,
+85,
+85,
+87,
+87,
+87,
+91,
+109,
+109,
+109,
+111,
+111,
+111,
+111,
+111,
+123,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+0,
+21,
+21,
+28,
+42,
+50,
+63,
+63,
+66,
+71,
+85,
+85,
+85,
+85,
+87,
+92,
+106,
+106,
+108,
+114,
+114,
+114,
+114,
+114,
+125,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+0,
+20,
+20,
+34,
+41,
+54,
+62,
+62,
+69,
+75,
+82,
+82,
+82,
+82,
+92,
+98,
+105,
+105,
+110,
+117,
+117,
+117,
+117,
+117,
+124,
+124,
+126,
+126,
+126,
+126,
+126,
+126,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+0,
+20,
+20,
+38,
+40,
+58,
+60,
+60,
+73,
+78,
+80,
+80,
+80,
+80,
+100,
+105,
+107,
+107,
+113,
+118,
+118,
+118,
+118,
+118,
+120,
+120,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+0,
+19,
+21,
+38,
+40,
+58,
+58,
+60,
+75,
+77,
+77,
+77,
+81,
+81,
+107,
+109,
+109,
+109,
+114,
+116,
+116,
+116,
+116,
+116,
+116,
+116,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+0,
+18,
+25,
+37,
+44,
+56,
+56,
+63,
+75,
+75,
+75,
+75,
+88,
+88,
+111,
+111,
+111,
+111,
+112,
+112,
+112,
+112,
+112,
+112,
+112,
+114,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+0,
+18,
+30,
+36,
+48,
+55,
+55,
+67,
+73,
+73,
+73,
+73,
+97,
+97,
+110,
+110,
+110,
+110,
+110,
+110,
+110,
+110,
+110,
+110,
+110,
+116,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+0,
+18,
+34,
+36,
+52,
+55,
+55,
+70,
+72,
+73,
+73,
+73,
+102,
+104,
+108,
+108,
+108,
+108,
+109,
+109,
+109,
+109,
+109,
+109,
+109,
+119,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+0,
+17,
+35,
+35,
+52,
+59,
+59,
+70,
+70,
+76,
+76,
+76,
+99,
+105,
+105,
+105,
+105,
+105,
+111,
+111,
+111,
+111,
+111,
+111,
+111,
+121,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+0,
+17,
+34,
+36,
+51,
+61,
+62,
+70,
+70,
+80,
+80,
+80,
+93,
+103,
+103,
+103,
+103,
+103,
+112,
+112,
+112,
+112,
+112,
+116,
+118,
+124,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+0,
+16,
+33,
+39,
+50,
+59,
+65,
+72,
+72,
+82,
+82,
+82,
+91,
+100,
+100,
+100,
+100,
+100,
+109,
+109,
+109,
+109,
+109,
+121,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+0,
+16,
+32,
+43,
+48,
+54,
+66,
+75,
+75,
+81,
+83,
+83,
+92,
+97,
+97,
+97,
+99,
+99,
+105,
+105,
+105,
+105,
+105,
+123,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+0,
+15,
+31,
+46,
+47,
+49,
+69,
+77,
+77,
+81,
+85,
+85,
+93,
+95,
+95,
+95,
+100,
+100,
+102,
+102,
+102,
+102,
+102,
+120,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+0,
+15,
+30,
+46,
+48,
+48,
+70,
+75,
+79,
+82,
+87,
+87,
+92,
+94,
+94,
+94,
+103,
+103,
+103,
+103,
+103,
+104,
+104,
+115,
+120,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+0,
+15,
+30,
+45,
+50,
+50,
+68,
+70,
+80,
+85,
+89,
+89,
+90,
+95,
+95,
+95,
+104,
+104,
+104,
+104,
+104,
+109,
+109,
+112,
+114,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+0,
+14,
+29,
+44,
+54,
+54,
+64,
+64,
+83,
+87,
+88,
+88,
+88,
+98,
+98,
+98,
+103,
+103,
+103,
+103,
+103,
+113,
+113,
+113,
+113,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+0,
+14,
+29,
+43,
+56,
+56,
+61,
+61,
+84,
+85,
+88,
+88,
+88,
+100,
+100,
+100,
+102,
+102,
+102,
+102,
+102,
+113,
+116,
+116,
+116,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+0,
+14,
+28,
+42,
+57,
+57,
+62,
+62,
+80,
+80,
+91,
+91,
+91,
+100,
+100,
+100,
+100,
+100,
+100,
+100,
+100,
+109,
+119,
+119,
+119,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+0,
+14,
+28,
+42,
+56,
+56,
+65,
+66,
+76,
+76,
+92,
+92,
+92,
+97,
+97,
+97,
+101,
+101,
+101,
+101,
+101,
+106,
+121,
+121,
+121,
+126,
+126,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+0,
+13,
+27,
+41,
+55,
+55,
+67,
+72,
+74,
+74,
+90,
+90,
+90,
+91,
+91,
+91,
+105,
+105,
+105,
+105,
+105,
+107,
+122,
+122,
+122,
+123,
+123,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+0,
+13,
+27,
+40,
+54,
+54,
+67,
+76,
+76,
+76,
+85,
+85,
+85,
+85,
+85,
+85,
+112,
+112,
+112,
+112,
+112,
+112,
+121,
+121,
+121,
+121,
+121,
+126,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+127,
+
+
+};
+
+} // namespace webrtc
+
+#endif // WEBRTC_MODULES_VIDEO_CODING_SOURCE_FEC_TABLES_XOR_H_
diff --git a/webrtc/modules/video_coding/main/source/frame_buffer.cc b/webrtc/modules/video_coding/main/source/frame_buffer.cc
new file mode 100644
index 0000000000..5b6680ec61
--- /dev/null
+++ b/webrtc/modules/video_coding/main/source/frame_buffer.cc
@@ -0,0 +1,297 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "webrtc/modules/video_coding/main/source/frame_buffer.h"
+
+#include <assert.h>
+#include <string.h>
+
+#include "webrtc/base/checks.h"
+#include "webrtc/modules/video_coding/main/source/packet.h"
+#include "webrtc/system_wrappers/include/logging.h"
+
+namespace webrtc {
+
+VCMFrameBuffer::VCMFrameBuffer()
+ :
+ _state(kStateEmpty),
+ _nackCount(0),
+ _latestPacketTimeMs(-1) {
+}
+
+VCMFrameBuffer::~VCMFrameBuffer() {
+}
+
+VCMFrameBuffer::VCMFrameBuffer(const VCMFrameBuffer& rhs)
+:
+VCMEncodedFrame(rhs),
+_state(rhs._state),
+_sessionInfo(),
+_nackCount(rhs._nackCount),
+_latestPacketTimeMs(rhs._latestPacketTimeMs) {
+ _sessionInfo = rhs._sessionInfo;
+ _sessionInfo.UpdateDataPointers(rhs._buffer, _buffer);
+}
+
+webrtc::FrameType
+VCMFrameBuffer::FrameType() const {
+ return _sessionInfo.FrameType();
+}
+
+int32_t
+VCMFrameBuffer::GetLowSeqNum() const {
+ return _sessionInfo.LowSequenceNumber();
+}
+
+int32_t
+VCMFrameBuffer::GetHighSeqNum() const {
+ return _sessionInfo.HighSequenceNumber();
+}
+
+int VCMFrameBuffer::PictureId() const {
+ return _sessionInfo.PictureId();
+}
+
+int VCMFrameBuffer::TemporalId() const {
+ return _sessionInfo.TemporalId();
+}
+
+bool VCMFrameBuffer::LayerSync() const {
+ return _sessionInfo.LayerSync();
+}
+
+int VCMFrameBuffer::Tl0PicId() const {
+ return _sessionInfo.Tl0PicId();
+}
+
+bool VCMFrameBuffer::NonReference() const {
+ return _sessionInfo.NonReference();
+}
+
+void VCMFrameBuffer::SetGofInfo(const GofInfoVP9& gof_info, size_t idx) {
+ _sessionInfo.SetGofInfo(gof_info, idx);
+ // TODO(asapersson): Consider adding hdr->VP9.ref_picture_id for testing.
+ _codecSpecificInfo.codecSpecific.VP9.temporal_idx =
+ gof_info.temporal_idx[idx];
+ _codecSpecificInfo.codecSpecific.VP9.temporal_up_switch =
+ gof_info.temporal_up_switch[idx];
+}
+
+bool
+VCMFrameBuffer::IsSessionComplete() const {
+ return _sessionInfo.complete();
+}
+
+// Insert packet
+VCMFrameBufferEnum
+VCMFrameBuffer::InsertPacket(const VCMPacket& packet,
+ int64_t timeInMs,
+ VCMDecodeErrorMode decode_error_mode,
+ const FrameData& frame_data) {
+ assert(!(NULL == packet.dataPtr && packet.sizeBytes > 0));
+ if (packet.dataPtr != NULL) {
+ _payloadType = packet.payloadType;
+ }
+
+ if (kStateEmpty == _state) {
+ // First packet (empty and/or media) inserted into this frame.
+ // store some info and set some initial values.
+ _timeStamp = packet.timestamp;
+ // We only take the ntp timestamp of the first packet of a frame.
+ ntp_time_ms_ = packet.ntp_time_ms_;
+ _codec = packet.codec;
+ if (packet.frameType != kEmptyFrame) {
+ // first media packet
+ SetState(kStateIncomplete);
+ }
+ }
+
+ uint32_t requiredSizeBytes = Length() + packet.sizeBytes +
+ (packet.insertStartCode ? kH264StartCodeLengthBytes : 0);
+ if (requiredSizeBytes >= _size) {
+ const uint8_t* prevBuffer = _buffer;
+ const uint32_t increments = requiredSizeBytes /
+ kBufferIncStepSizeBytes +
+ (requiredSizeBytes %
+ kBufferIncStepSizeBytes > 0);
+ const uint32_t newSize = _size +
+ increments * kBufferIncStepSizeBytes;
+ if (newSize > kMaxJBFrameSizeBytes) {
+ LOG(LS_ERROR) << "Failed to insert packet due to frame being too "
+ "big.";
+ return kSizeError;
+ }
+ VerifyAndAllocate(newSize);
+ _sessionInfo.UpdateDataPointers(prevBuffer, _buffer);
+ }
+
+ if (packet.width > 0 && packet.height > 0) {
+ _encodedWidth = packet.width;
+ _encodedHeight = packet.height;
+ }
+
+ // Don't copy payload specific data for empty packets (e.g padding packets).
+ if (packet.sizeBytes > 0)
+ CopyCodecSpecific(&packet.codecSpecificHeader);
+
+ int retVal = _sessionInfo.InsertPacket(packet, _buffer,
+ decode_error_mode,
+ frame_data);
+ if (retVal == -1) {
+ return kSizeError;
+ } else if (retVal == -2) {
+ return kDuplicatePacket;
+ } else if (retVal == -3) {
+ return kOutOfBoundsPacket;
+ }
+ // update length
+ _length = Length() + static_cast<uint32_t>(retVal);
+
+ _latestPacketTimeMs = timeInMs;
+
+ // http://www.etsi.org/deliver/etsi_ts/126100_126199/126114/12.07.00_60/
+ // ts_126114v120700p.pdf Section 7.4.5.
+ // The MTSI client shall add the payload bytes as defined in this clause
+ // onto the last RTP packet in each group of packets which make up a key
+ // frame (I-frame or IDR frame in H.264 (AVC), or an IRAP picture in H.265
+ // (HEVC)).
+ if (packet.markerBit) {
+ RTC_DCHECK(!_rotation_set);
+ _rotation = packet.codecSpecificHeader.rotation;
+ _rotation_set = true;
+ }
+
+ if (_sessionInfo.complete()) {
+ SetState(kStateComplete);
+ return kCompleteSession;
+ } else if (_sessionInfo.decodable()) {
+ SetState(kStateDecodable);
+ return kDecodableSession;
+ }
+ return kIncomplete;
+}
+
+int64_t
+VCMFrameBuffer::LatestPacketTimeMs() const {
+ return _latestPacketTimeMs;
+}
+
+void
+VCMFrameBuffer::IncrementNackCount() {
+ _nackCount++;
+}
+
+int16_t
+VCMFrameBuffer::GetNackCount() const {
+ return _nackCount;
+}
+
+bool
+VCMFrameBuffer::HaveFirstPacket() const {
+ return _sessionInfo.HaveFirstPacket();
+}
+
+bool
+VCMFrameBuffer::HaveLastPacket() const {
+ return _sessionInfo.HaveLastPacket();
+}
+
+int
+VCMFrameBuffer::NumPackets() const {
+ return _sessionInfo.NumPackets();
+}
+
+void
+VCMFrameBuffer::Reset() {
+ _length = 0;
+ _timeStamp = 0;
+ _sessionInfo.Reset();
+ _payloadType = 0;
+ _nackCount = 0;
+ _latestPacketTimeMs = -1;
+ _state = kStateEmpty;
+ VCMEncodedFrame::Reset();
+}
+
+// Set state of frame
+void
+VCMFrameBuffer::SetState(VCMFrameBufferStateEnum state) {
+ if (_state == state) {
+ return;
+ }
+ switch (state) {
+ case kStateIncomplete:
+ // we can go to this state from state kStateEmpty
+ assert(_state == kStateEmpty);
+
+ // Do nothing, we received a packet
+ break;
+
+ case kStateComplete:
+ assert(_state == kStateEmpty ||
+ _state == kStateIncomplete ||
+ _state == kStateDecodable);
+
+ break;
+
+ case kStateEmpty:
+ // Should only be set to empty through Reset().
+ assert(false);
+ break;
+
+ case kStateDecodable:
+ assert(_state == kStateEmpty ||
+ _state == kStateIncomplete);
+ break;
+ }
+ _state = state;
+}
+
+// Get current state of frame
+VCMFrameBufferStateEnum
+VCMFrameBuffer::GetState() const {
+ return _state;
+}
+
+// Get current state of frame
+VCMFrameBufferStateEnum
+VCMFrameBuffer::GetState(uint32_t& timeStamp) const {
+ timeStamp = TimeStamp();
+ return GetState();
+}
+
+bool
+VCMFrameBuffer::IsRetransmitted() const {
+ return _sessionInfo.session_nack();
+}
+
+void
+VCMFrameBuffer::PrepareForDecode(bool continuous) {
+#ifdef INDEPENDENT_PARTITIONS
+ if (_codec == kVideoCodecVP8) {
+ _length =
+ _sessionInfo.BuildVP8FragmentationHeader(_buffer, _length,
+ &_fragmentation);
+ } else {
+ size_t bytes_removed = _sessionInfo.MakeDecodable();
+ _length -= bytes_removed;
+ }
+#else
+ size_t bytes_removed = _sessionInfo.MakeDecodable();
+ _length -= bytes_removed;
+#endif
+ // Transfer frame information to EncodedFrame and create any codec
+ // specific information.
+ _frameType = _sessionInfo.FrameType();
+ _completeFrame = _sessionInfo.complete();
+ _missingFrame = !continuous;
+}
+
+} // namespace webrtc
diff --git a/webrtc/modules/video_coding/main/source/frame_buffer.h b/webrtc/modules/video_coding/main/source/frame_buffer.h
new file mode 100644
index 0000000000..ab4ff6574e
--- /dev/null
+++ b/webrtc/modules/video_coding/main/source/frame_buffer.h
@@ -0,0 +1,92 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef WEBRTC_MODULES_VIDEO_CODING_MAIN_SOURCE_FRAME_BUFFER_H_
+#define WEBRTC_MODULES_VIDEO_CODING_MAIN_SOURCE_FRAME_BUFFER_H_
+
+#include "webrtc/modules/interface/module_common_types.h"
+#include "webrtc/modules/video_coding/main/interface/video_coding.h"
+#include "webrtc/modules/video_coding/main/source/encoded_frame.h"
+#include "webrtc/modules/video_coding/main/source/jitter_buffer_common.h"
+#include "webrtc/modules/video_coding/main/source/session_info.h"
+#include "webrtc/typedefs.h"
+
+namespace webrtc {
+
+class VCMFrameBuffer : public VCMEncodedFrame {
+ public:
+ VCMFrameBuffer();
+ virtual ~VCMFrameBuffer();
+
+ VCMFrameBuffer(const VCMFrameBuffer& rhs);
+
+ virtual void Reset();
+
+ VCMFrameBufferEnum InsertPacket(const VCMPacket& packet,
+ int64_t timeInMs,
+ VCMDecodeErrorMode decode_error_mode,
+ const FrameData& frame_data);
+
+ // State
+ // Get current state of frame
+ VCMFrameBufferStateEnum GetState() const;
+ // Get current state and timestamp of frame
+ VCMFrameBufferStateEnum GetState(uint32_t& timeStamp) const;
+ void PrepareForDecode(bool continuous);
+
+ bool IsRetransmitted() const;
+ bool IsSessionComplete() const;
+ bool HaveFirstPacket() const;
+ bool HaveLastPacket() const;
+ int NumPackets() const;
+ // Makes sure the session contain a decodable stream.
+ void MakeSessionDecodable();
+
+ // Sequence numbers
+ // Get lowest packet sequence number in frame
+ int32_t GetLowSeqNum() const;
+ // Get highest packet sequence number in frame
+ int32_t GetHighSeqNum() const;
+
+ int PictureId() const;
+ int TemporalId() const;
+ bool LayerSync() const;
+ int Tl0PicId() const;
+ bool NonReference() const;
+
+ void SetGofInfo(const GofInfoVP9& gof_info, size_t idx);
+
+ // Increments a counter to keep track of the number of packets of this frame
+ // which were NACKed before they arrived.
+ void IncrementNackCount();
+ // Returns the number of packets of this frame which were NACKed before they
+ // arrived.
+ int16_t GetNackCount() const;
+
+ int64_t LatestPacketTimeMs() const;
+
+ webrtc::FrameType FrameType() const;
+ void SetPreviousFrameLoss();
+
+ // The number of packets discarded because the decoder can't make use of them.
+ int NotDecodablePackets() const;
+
+ private:
+ void SetState(VCMFrameBufferStateEnum state); // Set state of frame
+
+ VCMFrameBufferStateEnum _state; // Current state of the frame
+ VCMSessionInfo _sessionInfo;
+ uint16_t _nackCount;
+ int64_t _latestPacketTimeMs;
+};
+
+} // namespace webrtc
+
+#endif // WEBRTC_MODULES_VIDEO_CODING_MAIN_SOURCE_FRAME_BUFFER_H_
diff --git a/webrtc/modules/video_coding/main/source/generic_decoder.cc b/webrtc/modules/video_coding/main/source/generic_decoder.cc
new file mode 100644
index 0000000000..8b2d3974de
--- /dev/null
+++ b/webrtc/modules/video_coding/main/source/generic_decoder.cc
@@ -0,0 +1,198 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "webrtc/modules/video_coding/main/interface/video_coding.h"
+#include "webrtc/modules/video_coding/main/source/generic_decoder.h"
+#include "webrtc/modules/video_coding/main/source/internal_defines.h"
+#include "webrtc/system_wrappers/include/clock.h"
+#include "webrtc/system_wrappers/include/logging.h"
+
+namespace webrtc {
+
+VCMDecodedFrameCallback::VCMDecodedFrameCallback(VCMTiming& timing,
+ Clock* clock)
+:
+_critSect(CriticalSectionWrapper::CreateCriticalSection()),
+_clock(clock),
+_receiveCallback(NULL),
+_timing(timing),
+_timestampMap(kDecoderFrameMemoryLength),
+_lastReceivedPictureID(0)
+{
+}
+
+VCMDecodedFrameCallback::~VCMDecodedFrameCallback()
+{
+ delete _critSect;
+}
+
+void VCMDecodedFrameCallback::SetUserReceiveCallback(
+ VCMReceiveCallback* receiveCallback)
+{
+ CriticalSectionScoped cs(_critSect);
+ _receiveCallback = receiveCallback;
+}
+
+VCMReceiveCallback* VCMDecodedFrameCallback::UserReceiveCallback()
+{
+ CriticalSectionScoped cs(_critSect);
+ return _receiveCallback;
+}
+
+int32_t VCMDecodedFrameCallback::Decoded(VideoFrame& decodedImage) {
+ // TODO(holmer): We should improve this so that we can handle multiple
+ // callbacks from one call to Decode().
+ VCMFrameInformation* frameInfo;
+ VCMReceiveCallback* callback;
+ {
+ CriticalSectionScoped cs(_critSect);
+ frameInfo = _timestampMap.Pop(decodedImage.timestamp());
+ callback = _receiveCallback;
+ }
+
+ if (frameInfo == NULL) {
+ LOG(LS_WARNING) << "Too many frames backed up in the decoder, dropping "
+ "this one.";
+ return WEBRTC_VIDEO_CODEC_OK;
+ }
+
+ _timing.StopDecodeTimer(
+ decodedImage.timestamp(),
+ frameInfo->decodeStartTimeMs,
+ _clock->TimeInMilliseconds(),
+ frameInfo->renderTimeMs);
+
+ if (callback != NULL)
+ {
+ decodedImage.set_render_time_ms(frameInfo->renderTimeMs);
+ decodedImage.set_rotation(frameInfo->rotation);
+ callback->FrameToRender(decodedImage);
+ }
+ return WEBRTC_VIDEO_CODEC_OK;
+}
+
+int32_t
+VCMDecodedFrameCallback::ReceivedDecodedReferenceFrame(
+ const uint64_t pictureId)
+{
+ CriticalSectionScoped cs(_critSect);
+ if (_receiveCallback != NULL)
+ {
+ return _receiveCallback->ReceivedDecodedReferenceFrame(pictureId);
+ }
+ return -1;
+}
+
+int32_t
+VCMDecodedFrameCallback::ReceivedDecodedFrame(const uint64_t pictureId)
+{
+ _lastReceivedPictureID = pictureId;
+ return 0;
+}
+
+uint64_t VCMDecodedFrameCallback::LastReceivedPictureID() const
+{
+ return _lastReceivedPictureID;
+}
+
+void VCMDecodedFrameCallback::Map(uint32_t timestamp,
+ VCMFrameInformation* frameInfo) {
+ CriticalSectionScoped cs(_critSect);
+ _timestampMap.Add(timestamp, frameInfo);
+}
+
+int32_t VCMDecodedFrameCallback::Pop(uint32_t timestamp)
+{
+ CriticalSectionScoped cs(_critSect);
+ if (_timestampMap.Pop(timestamp) == NULL)
+ {
+ return VCM_GENERAL_ERROR;
+ }
+ return VCM_OK;
+}
+
+VCMGenericDecoder::VCMGenericDecoder(VideoDecoder& decoder, bool isExternal)
+:
+_callback(NULL),
+_frameInfos(),
+_nextFrameInfoIdx(0),
+_decoder(decoder),
+_codecType(kVideoCodecUnknown),
+_isExternal(isExternal),
+_keyFrameDecoded(false)
+{
+}
+
+VCMGenericDecoder::~VCMGenericDecoder()
+{
+}
+
+int32_t VCMGenericDecoder::InitDecode(const VideoCodec* settings,
+ int32_t numberOfCores)
+{
+ _codecType = settings->codecType;
+
+ return _decoder.InitDecode(settings, numberOfCores);
+}
+
+int32_t VCMGenericDecoder::Decode(const VCMEncodedFrame& frame,
+ int64_t nowMs)
+{
+ _frameInfos[_nextFrameInfoIdx].decodeStartTimeMs = nowMs;
+ _frameInfos[_nextFrameInfoIdx].renderTimeMs = frame.RenderTimeMs();
+ _frameInfos[_nextFrameInfoIdx].rotation = frame.rotation();
+ _callback->Map(frame.TimeStamp(), &_frameInfos[_nextFrameInfoIdx]);
+
+ _nextFrameInfoIdx = (_nextFrameInfoIdx + 1) % kDecoderFrameMemoryLength;
+ int32_t ret = _decoder.Decode(frame.EncodedImage(),
+ frame.MissingFrame(),
+ frame.FragmentationHeader(),
+ frame.CodecSpecific(),
+ frame.RenderTimeMs());
+
+ if (ret < WEBRTC_VIDEO_CODEC_OK)
+ {
+ LOG(LS_WARNING) << "Failed to decode frame with timestamp "
+ << frame.TimeStamp() << ", error code: " << ret;
+ _callback->Pop(frame.TimeStamp());
+ return ret;
+ }
+ else if (ret == WEBRTC_VIDEO_CODEC_NO_OUTPUT ||
+ ret == WEBRTC_VIDEO_CODEC_REQUEST_SLI)
+ {
+ // No output
+ _callback->Pop(frame.TimeStamp());
+ }
+ return ret;
+}
+
+int32_t
+VCMGenericDecoder::Release()
+{
+ return _decoder.Release();
+}
+
+int32_t VCMGenericDecoder::Reset()
+{
+ return _decoder.Reset();
+}
+
+int32_t VCMGenericDecoder::RegisterDecodeCompleteCallback(VCMDecodedFrameCallback* callback)
+{
+ _callback = callback;
+ return _decoder.RegisterDecodeCompleteCallback(callback);
+}
+
+bool VCMGenericDecoder::External() const
+{
+ return _isExternal;
+}
+
+} // namespace
diff --git a/webrtc/modules/video_coding/main/source/generic_decoder.h b/webrtc/modules/video_coding/main/source/generic_decoder.h
new file mode 100644
index 0000000000..09929e64f4
--- /dev/null
+++ b/webrtc/modules/video_coding/main/source/generic_decoder.h
@@ -0,0 +1,112 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef WEBRTC_MODULES_VIDEO_CODING_GENERIC_DECODER_H_
+#define WEBRTC_MODULES_VIDEO_CODING_GENERIC_DECODER_H_
+
+#include "webrtc/modules/interface/module_common_types.h"
+#include "webrtc/modules/video_coding/codecs/interface/video_codec_interface.h"
+#include "webrtc/modules/video_coding/main/source/encoded_frame.h"
+#include "webrtc/modules/video_coding/main/source/timestamp_map.h"
+#include "webrtc/modules/video_coding/main/source/timing.h"
+
+namespace webrtc
+{
+
+class VCMReceiveCallback;
+
+enum { kDecoderFrameMemoryLength = 10 };
+
+struct VCMFrameInformation
+{
+ int64_t renderTimeMs;
+ int64_t decodeStartTimeMs;
+ void* userData;
+ VideoRotation rotation;
+};
+
+class VCMDecodedFrameCallback : public DecodedImageCallback
+{
+public:
+ VCMDecodedFrameCallback(VCMTiming& timing, Clock* clock);
+ virtual ~VCMDecodedFrameCallback();
+ void SetUserReceiveCallback(VCMReceiveCallback* receiveCallback);
+ VCMReceiveCallback* UserReceiveCallback();
+
+ virtual int32_t Decoded(VideoFrame& decodedImage);
+ virtual int32_t ReceivedDecodedReferenceFrame(const uint64_t pictureId);
+ virtual int32_t ReceivedDecodedFrame(const uint64_t pictureId);
+
+ uint64_t LastReceivedPictureID() const;
+
+ void Map(uint32_t timestamp, VCMFrameInformation* frameInfo);
+ int32_t Pop(uint32_t timestamp);
+
+private:
+ // Protect |_receiveCallback| and |_timestampMap|.
+ CriticalSectionWrapper* _critSect;
+ Clock* _clock;
+ VCMReceiveCallback* _receiveCallback; // Guarded by |_critSect|.
+ VCMTiming& _timing;
+ VCMTimestampMap _timestampMap; // Guarded by |_critSect|.
+ uint64_t _lastReceivedPictureID;
+};
+
+
+class VCMGenericDecoder
+{
+ friend class VCMCodecDataBase;
+public:
+ VCMGenericDecoder(VideoDecoder& decoder, bool isExternal = false);
+ ~VCMGenericDecoder();
+
+ /**
+ * Initialize the decoder with the information from the VideoCodec
+ */
+ int32_t InitDecode(const VideoCodec* settings,
+ int32_t numberOfCores);
+
+ /**
+ * Decode to a raw I420 frame,
+ *
+ * inputVideoBuffer reference to encoded video frame
+ */
+ int32_t Decode(const VCMEncodedFrame& inputFrame, int64_t nowMs);
+
+ /**
+ * Free the decoder memory
+ */
+ int32_t Release();
+
+ /**
+ * Reset the decoder state, prepare for a new call
+ */
+ int32_t Reset();
+
+ /**
+ * Set decode callback. Deregistering while decoding is illegal.
+ */
+ int32_t RegisterDecodeCompleteCallback(VCMDecodedFrameCallback* callback);
+
+ bool External() const;
+
+private:
+ VCMDecodedFrameCallback* _callback;
+ VCMFrameInformation _frameInfos[kDecoderFrameMemoryLength];
+ uint32_t _nextFrameInfoIdx;
+ VideoDecoder& _decoder;
+ VideoCodecType _codecType;
+ bool _isExternal;
+ bool _keyFrameDecoded;
+};
+
+} // namespace webrtc
+
+#endif // WEBRTC_MODULES_VIDEO_CODING_GENERIC_DECODER_H_
diff --git a/webrtc/modules/video_coding/main/source/generic_encoder.cc b/webrtc/modules/video_coding/main/source/generic_encoder.cc
new file mode 100644
index 0000000000..de196040f0
--- /dev/null
+++ b/webrtc/modules/video_coding/main/source/generic_encoder.cc
@@ -0,0 +1,298 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "webrtc/base/checks.h"
+#include "webrtc/engine_configurations.h"
+#include "webrtc/modules/video_coding/main/source/encoded_frame.h"
+#include "webrtc/modules/video_coding/main/source/generic_encoder.h"
+#include "webrtc/modules/video_coding/main/source/media_optimization.h"
+#include "webrtc/system_wrappers/include/critical_section_wrapper.h"
+#include "webrtc/system_wrappers/include/logging.h"
+
+namespace webrtc {
+namespace {
+// Map information from info into rtp. If no relevant information is found
+// in info, rtp is set to NULL.
+void CopyCodecSpecific(const CodecSpecificInfo* info, RTPVideoHeader* rtp) {
+ RTC_DCHECK(info);
+ switch (info->codecType) {
+ case kVideoCodecVP8: {
+ rtp->codec = kRtpVideoVp8;
+ rtp->codecHeader.VP8.InitRTPVideoHeaderVP8();
+ rtp->codecHeader.VP8.pictureId = info->codecSpecific.VP8.pictureId;
+ rtp->codecHeader.VP8.nonReference =
+ info->codecSpecific.VP8.nonReference;
+ rtp->codecHeader.VP8.temporalIdx = info->codecSpecific.VP8.temporalIdx;
+ rtp->codecHeader.VP8.layerSync = info->codecSpecific.VP8.layerSync;
+ rtp->codecHeader.VP8.tl0PicIdx = info->codecSpecific.VP8.tl0PicIdx;
+ rtp->codecHeader.VP8.keyIdx = info->codecSpecific.VP8.keyIdx;
+ rtp->simulcastIdx = info->codecSpecific.VP8.simulcastIdx;
+ return;
+ }
+ case kVideoCodecVP9: {
+ rtp->codec = kRtpVideoVp9;
+ rtp->codecHeader.VP9.InitRTPVideoHeaderVP9();
+ rtp->codecHeader.VP9.inter_pic_predicted =
+ info->codecSpecific.VP9.inter_pic_predicted;
+ rtp->codecHeader.VP9.flexible_mode =
+ info->codecSpecific.VP9.flexible_mode;
+ rtp->codecHeader.VP9.ss_data_available =
+ info->codecSpecific.VP9.ss_data_available;
+ rtp->codecHeader.VP9.picture_id = info->codecSpecific.VP9.picture_id;
+ rtp->codecHeader.VP9.tl0_pic_idx = info->codecSpecific.VP9.tl0_pic_idx;
+ rtp->codecHeader.VP9.temporal_idx = info->codecSpecific.VP9.temporal_idx;
+ rtp->codecHeader.VP9.spatial_idx = info->codecSpecific.VP9.spatial_idx;
+ rtp->codecHeader.VP9.temporal_up_switch =
+ info->codecSpecific.VP9.temporal_up_switch;
+ rtp->codecHeader.VP9.inter_layer_predicted =
+ info->codecSpecific.VP9.inter_layer_predicted;
+ rtp->codecHeader.VP9.gof_idx = info->codecSpecific.VP9.gof_idx;
+
+ // Packetizer needs to know the number of spatial layers to correctly set
+ // the marker bit, even when the number won't be written in the packet.
+ rtp->codecHeader.VP9.num_spatial_layers =
+ info->codecSpecific.VP9.num_spatial_layers;
+ if (info->codecSpecific.VP9.ss_data_available) {
+ rtp->codecHeader.VP9.spatial_layer_resolution_present =
+ info->codecSpecific.VP9.spatial_layer_resolution_present;
+ if (info->codecSpecific.VP9.spatial_layer_resolution_present) {
+ for (size_t i = 0; i < info->codecSpecific.VP9.num_spatial_layers;
+ ++i) {
+ rtp->codecHeader.VP9.width[i] = info->codecSpecific.VP9.width[i];
+ rtp->codecHeader.VP9.height[i] = info->codecSpecific.VP9.height[i];
+ }
+ }
+ rtp->codecHeader.VP9.gof.CopyGofInfoVP9(info->codecSpecific.VP9.gof);
+ }
+ return;
+ }
+ case kVideoCodecH264:
+ rtp->codec = kRtpVideoH264;
+ return;
+ case kVideoCodecGeneric:
+ rtp->codec = kRtpVideoGeneric;
+ rtp->simulcastIdx = info->codecSpecific.generic.simulcast_idx;
+ return;
+ default:
+ return;
+ }
+}
+} // namespace
+
+//#define DEBUG_ENCODER_BIT_STREAM
+
+VCMGenericEncoder::VCMGenericEncoder(
+ VideoEncoder* encoder,
+ VideoEncoderRateObserver* rate_observer,
+ VCMEncodedFrameCallback* encoded_frame_callback,
+ bool internalSource)
+ : encoder_(encoder),
+ rate_observer_(rate_observer),
+ vcm_encoded_frame_callback_(encoded_frame_callback),
+ internal_source_(internalSource),
+ encoder_params_({0, 0, 0, 0}),
+ rotation_(kVideoRotation_0),
+ is_screenshare_(false) {}
+
+VCMGenericEncoder::~VCMGenericEncoder() {}
+
+int32_t VCMGenericEncoder::Release() {
+ return encoder_->Release();
+}
+
+int32_t VCMGenericEncoder::InitEncode(const VideoCodec* settings,
+ int32_t numberOfCores,
+ size_t maxPayloadSize) {
+ {
+ rtc::CritScope lock(&params_lock_);
+ encoder_params_.target_bitrate = settings->startBitrate * 1000;
+ encoder_params_.input_frame_rate = settings->maxFramerate;
+ }
+
+ is_screenshare_ = settings->mode == VideoCodecMode::kScreensharing;
+ if (encoder_->InitEncode(settings, numberOfCores, maxPayloadSize) != 0) {
+ LOG(LS_ERROR) << "Failed to initialize the encoder associated with "
+ "payload name: "
+ << settings->plName;
+ return -1;
+ }
+ encoder_->RegisterEncodeCompleteCallback(vcm_encoded_frame_callback_);
+ return 0;
+}
+
+int32_t VCMGenericEncoder::Encode(const VideoFrame& inputFrame,
+ const CodecSpecificInfo* codecSpecificInfo,
+ const std::vector<FrameType>& frameTypes) {
+ for (FrameType frame_type : frameTypes)
+ RTC_DCHECK(frame_type == kVideoFrameKey || frame_type == kVideoFrameDelta);
+
+ rotation_ = inputFrame.rotation();
+
+ // Keep track of the current frame rotation and apply to the output of the
+ // encoder. There might not be exact as the encoder could have one frame delay
+ // but it should be close enough.
+ // TODO(pbos): Map from timestamp, this is racy (even if rotation_ is locked
+ // properly, which it isn't). More than one frame may be in the pipeline.
+ vcm_encoded_frame_callback_->SetRotation(rotation_);
+
+ int32_t result = encoder_->Encode(inputFrame, codecSpecificInfo, &frameTypes);
+ if (is_screenshare_ &&
+ result == WEBRTC_VIDEO_CODEC_TARGET_BITRATE_OVERSHOOT) {
+ // Target bitrate exceeded, encoder state has been reset - try again.
+ return encoder_->Encode(inputFrame, codecSpecificInfo, &frameTypes);
+ }
+
+ return result;
+}
+
+void VCMGenericEncoder::SetEncoderParameters(const EncoderParameters& params) {
+ bool channel_parameters_have_changed;
+ bool rates_have_changed;
+ {
+ rtc::CritScope lock(&params_lock_);
+ channel_parameters_have_changed =
+ params.loss_rate != encoder_params_.loss_rate ||
+ params.rtt != encoder_params_.rtt;
+ rates_have_changed =
+ params.target_bitrate != encoder_params_.target_bitrate ||
+ params.input_frame_rate != encoder_params_.input_frame_rate;
+ encoder_params_ = params;
+ }
+ if (channel_parameters_have_changed)
+ encoder_->SetChannelParameters(params.loss_rate, params.rtt);
+ if (rates_have_changed) {
+ uint32_t target_bitrate_kbps = (params.target_bitrate + 500) / 1000;
+ encoder_->SetRates(target_bitrate_kbps, params.input_frame_rate);
+ if (rate_observer_ != nullptr) {
+ rate_observer_->OnSetRates(params.target_bitrate,
+ params.input_frame_rate);
+ }
+ }
+}
+
+EncoderParameters VCMGenericEncoder::GetEncoderParameters() const {
+ rtc::CritScope lock(&params_lock_);
+ return encoder_params_;
+}
+
+int32_t
+VCMGenericEncoder::SetPeriodicKeyFrames(bool enable)
+{
+ return encoder_->SetPeriodicKeyFrames(enable);
+}
+
+int32_t VCMGenericEncoder::RequestFrame(
+ const std::vector<FrameType>& frame_types) {
+ VideoFrame image;
+ return encoder_->Encode(image, NULL, &frame_types);
+}
+
+bool
+VCMGenericEncoder::InternalSource() const
+{
+ return internal_source_;
+}
+
+void VCMGenericEncoder::OnDroppedFrame() {
+ encoder_->OnDroppedFrame();
+}
+
+bool VCMGenericEncoder::SupportsNativeHandle() const {
+ return encoder_->SupportsNativeHandle();
+}
+
+int VCMGenericEncoder::GetTargetFramerate() {
+ return encoder_->GetTargetFramerate();
+}
+
+ /***************************
+ * Callback Implementation
+ ***************************/
+VCMEncodedFrameCallback::VCMEncodedFrameCallback(
+ EncodedImageCallback* post_encode_callback)
+ : _sendCallback(),
+ _mediaOpt(NULL),
+ _payloadType(0),
+ _internalSource(false),
+ _rotation(kVideoRotation_0),
+ post_encode_callback_(post_encode_callback)
+#ifdef DEBUG_ENCODER_BIT_STREAM
+ ,
+ _bitStreamAfterEncoder(NULL)
+#endif
+{
+#ifdef DEBUG_ENCODER_BIT_STREAM
+ _bitStreamAfterEncoder = fopen("encoderBitStream.bit", "wb");
+#endif
+}
+
+VCMEncodedFrameCallback::~VCMEncodedFrameCallback()
+{
+#ifdef DEBUG_ENCODER_BIT_STREAM
+ fclose(_bitStreamAfterEncoder);
+#endif
+}
+
+int32_t
+VCMEncodedFrameCallback::SetTransportCallback(VCMPacketizationCallback* transport)
+{
+ _sendCallback = transport;
+ return VCM_OK;
+}
+
+int32_t VCMEncodedFrameCallback::Encoded(
+ const EncodedImage& encodedImage,
+ const CodecSpecificInfo* codecSpecificInfo,
+ const RTPFragmentationHeader* fragmentationHeader) {
+ RTC_DCHECK(encodedImage._frameType == kVideoFrameKey ||
+ encodedImage._frameType == kVideoFrameDelta);
+ post_encode_callback_->Encoded(encodedImage, NULL, NULL);
+
+ if (_sendCallback == NULL) {
+ return VCM_UNINITIALIZED;
+ }
+
+#ifdef DEBUG_ENCODER_BIT_STREAM
+ if (_bitStreamAfterEncoder != NULL) {
+ fwrite(encodedImage._buffer, 1, encodedImage._length,
+ _bitStreamAfterEncoder);
+ }
+#endif
+
+ RTPVideoHeader rtpVideoHeader;
+ memset(&rtpVideoHeader, 0, sizeof(RTPVideoHeader));
+ RTPVideoHeader* rtpVideoHeaderPtr = &rtpVideoHeader;
+ if (codecSpecificInfo) {
+ CopyCodecSpecific(codecSpecificInfo, rtpVideoHeaderPtr);
+ }
+ rtpVideoHeader.rotation = _rotation;
+
+ int32_t callbackReturn = _sendCallback->SendData(
+ _payloadType, encodedImage, *fragmentationHeader, rtpVideoHeaderPtr);
+ if (callbackReturn < 0) {
+ return callbackReturn;
+ }
+
+ if (_mediaOpt != NULL) {
+ _mediaOpt->UpdateWithEncodedData(encodedImage);
+ if (_internalSource)
+ return _mediaOpt->DropFrame(); // Signal to encoder to drop next frame.
+ }
+ return VCM_OK;
+}
+
+void
+VCMEncodedFrameCallback::SetMediaOpt(
+ media_optimization::MediaOptimization *mediaOpt)
+{
+ _mediaOpt = mediaOpt;
+}
+
+} // namespace webrtc
diff --git a/webrtc/modules/video_coding/main/source/generic_encoder.h b/webrtc/modules/video_coding/main/source/generic_encoder.h
new file mode 100644
index 0000000000..3a7132860f
--- /dev/null
+++ b/webrtc/modules/video_coding/main/source/generic_encoder.h
@@ -0,0 +1,142 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef WEBRTC_MODULES_VIDEO_CODING_GENERIC_ENCODER_H_
+#define WEBRTC_MODULES_VIDEO_CODING_GENERIC_ENCODER_H_
+
+#include "webrtc/modules/video_coding/codecs/interface/video_codec_interface.h"
+#include "webrtc/modules/video_coding/main/interface/video_coding_defines.h"
+
+#include <stdio.h>
+
+#include "webrtc/base/criticalsection.h"
+#include "webrtc/base/scoped_ptr.h"
+
+namespace webrtc {
+class CriticalSectionWrapper;
+
+namespace media_optimization {
+class MediaOptimization;
+} // namespace media_optimization
+
+struct EncoderParameters {
+ uint32_t target_bitrate;
+ uint8_t loss_rate;
+ int64_t rtt;
+ uint32_t input_frame_rate;
+};
+
+/*************************************/
+/* VCMEncodeFrameCallback class */
+/***********************************/
+class VCMEncodedFrameCallback : public EncodedImageCallback
+{
+public:
+ VCMEncodedFrameCallback(EncodedImageCallback* post_encode_callback);
+ virtual ~VCMEncodedFrameCallback();
+
+ /*
+ * Callback implementation - codec encode complete
+ */
+ int32_t Encoded(
+ const EncodedImage& encodedImage,
+ const CodecSpecificInfo* codecSpecificInfo = NULL,
+ const RTPFragmentationHeader* fragmentationHeader = NULL);
+ /*
+ * Callback implementation - generic encoder encode complete
+ */
+ int32_t SetTransportCallback(VCMPacketizationCallback* transport);
+ /**
+ * Set media Optimization
+ */
+ void SetMediaOpt (media_optimization::MediaOptimization* mediaOpt);
+
+ void SetPayloadType(uint8_t payloadType) { _payloadType = payloadType; };
+ void SetInternalSource(bool internalSource) { _internalSource = internalSource; };
+
+ void SetRotation(VideoRotation rotation) { _rotation = rotation; }
+
+private:
+ VCMPacketizationCallback* _sendCallback;
+ media_optimization::MediaOptimization* _mediaOpt;
+ uint8_t _payloadType;
+ bool _internalSource;
+ VideoRotation _rotation;
+
+ EncodedImageCallback* post_encode_callback_;
+
+#ifdef DEBUG_ENCODER_BIT_STREAM
+ FILE* _bitStreamAfterEncoder;
+#endif
+};// end of VCMEncodeFrameCallback class
+
+
+/******************************/
+/* VCMGenericEncoder class */
+/******************************/
+class VCMGenericEncoder
+{
+ friend class VCMCodecDataBase;
+public:
+ VCMGenericEncoder(VideoEncoder* encoder,
+ VideoEncoderRateObserver* rate_observer,
+ VCMEncodedFrameCallback* encoded_frame_callback,
+ bool internalSource);
+ ~VCMGenericEncoder();
+ /**
+ * Free encoder memory
+ */
+ int32_t Release();
+ /**
+ * Initialize the encoder with the information from the VideoCodec
+ */
+ int32_t InitEncode(const VideoCodec* settings,
+ int32_t numberOfCores,
+ size_t maxPayloadSize);
+ /**
+ * Encode raw image
+ * inputFrame : Frame containing raw image
+ * codecSpecificInfo : Specific codec data
+ * cameraFrameRate : Request or information from the remote side
+ * frameType : The requested frame type to encode
+ */
+ int32_t Encode(const VideoFrame& inputFrame,
+ const CodecSpecificInfo* codecSpecificInfo,
+ const std::vector<FrameType>& frameTypes);
+
+ void SetEncoderParameters(const EncoderParameters& params);
+ EncoderParameters GetEncoderParameters() const;
+
+ int32_t SetPeriodicKeyFrames(bool enable);
+
+ int32_t RequestFrame(const std::vector<FrameType>& frame_types);
+
+ bool InternalSource() const;
+
+ void OnDroppedFrame();
+
+ bool SupportsNativeHandle() const;
+
+ int GetTargetFramerate();
+
+private:
+ VideoEncoder* const encoder_;
+ VideoEncoderRateObserver* const rate_observer_;
+ VCMEncodedFrameCallback* const vcm_encoded_frame_callback_;
+ const bool internal_source_;
+ mutable rtc::CriticalSection params_lock_;
+ EncoderParameters encoder_params_ GUARDED_BY(params_lock_);
+ VideoRotation rotation_;
+ bool is_screenshare_;
+}; // end of VCMGenericEncoder class
+
+} // namespace webrtc
+
+#endif // WEBRTC_MODULES_VIDEO_CODING_GENERIC_ENCODER_H_
diff --git a/webrtc/modules/video_coding/main/source/inter_frame_delay.cc b/webrtc/modules/video_coding/main/source/inter_frame_delay.cc
new file mode 100644
index 0000000000..4786917e16
--- /dev/null
+++ b/webrtc/modules/video_coding/main/source/inter_frame_delay.cc
@@ -0,0 +1,114 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "webrtc/modules/video_coding/main/source/inter_frame_delay.h"
+
+namespace webrtc {
+
+VCMInterFrameDelay::VCMInterFrameDelay(int64_t currentWallClock)
+{
+ Reset(currentWallClock);
+}
+
+// Resets the delay estimate
+void
+VCMInterFrameDelay::Reset(int64_t currentWallClock)
+{
+ _zeroWallClock = currentWallClock;
+ _wrapArounds = 0;
+ _prevWallClock = 0;
+ _prevTimestamp = 0;
+ _dTS = 0;
+}
+
+// Calculates the delay of a frame with the given timestamp.
+// This method is called when the frame is complete.
+bool
+VCMInterFrameDelay::CalculateDelay(uint32_t timestamp,
+ int64_t *delay,
+ int64_t currentWallClock)
+{
+ if (_prevWallClock == 0)
+ {
+ // First set of data, initialization, wait for next frame
+ _prevWallClock = currentWallClock;
+ _prevTimestamp = timestamp;
+ *delay = 0;
+ return true;
+ }
+
+ int32_t prevWrapArounds = _wrapArounds;
+ CheckForWrapArounds(timestamp);
+
+ // This will be -1 for backward wrap arounds and +1 for forward wrap arounds
+ int32_t wrapAroundsSincePrev = _wrapArounds - prevWrapArounds;
+
+ // Account for reordering in jitter variance estimate in the future?
+ // Note that this also captures incomplete frames which are grabbed
+ // for decoding after a later frame has been complete, i.e. real
+ // packet losses.
+ if ((wrapAroundsSincePrev == 0 && timestamp < _prevTimestamp) || wrapAroundsSincePrev < 0)
+ {
+ *delay = 0;
+ return false;
+ }
+
+ // Compute the compensated timestamp difference and convert it to ms and
+ // round it to closest integer.
+ _dTS = static_cast<int64_t>((timestamp + wrapAroundsSincePrev *
+ (static_cast<int64_t>(1)<<32) - _prevTimestamp) / 90.0 + 0.5);
+
+ // frameDelay is the difference of dT and dTS -- i.e. the difference of
+ // the wall clock time difference and the timestamp difference between
+ // two following frames.
+ *delay = static_cast<int64_t>(currentWallClock - _prevWallClock - _dTS);
+
+ _prevTimestamp = timestamp;
+ _prevWallClock = currentWallClock;
+
+ return true;
+}
+
+// Returns the current difference between incoming timestamps
+uint32_t VCMInterFrameDelay::CurrentTimeStampDiffMs() const
+{
+ if (_dTS < 0)
+ {
+ return 0;
+ }
+ return static_cast<uint32_t>(_dTS);
+}
+
+// Investigates if the timestamp clock has overflowed since the last timestamp and
+// keeps track of the number of wrap arounds since reset.
+void
+VCMInterFrameDelay::CheckForWrapArounds(uint32_t timestamp)
+{
+ if (timestamp < _prevTimestamp)
+ {
+ // This difference will probably be less than -2^31 if we have had a wrap around
+ // (e.g. timestamp = 1, _previousTimestamp = 2^32 - 1). Since it is cast to a Word32,
+ // it should be positive.
+ if (static_cast<int32_t>(timestamp - _prevTimestamp) > 0)
+ {
+ // Forward wrap around
+ _wrapArounds++;
+ }
+ }
+ // This difference will probably be less than -2^31 if we have had a backward wrap around.
+ // Since it is cast to a Word32, it should be positive.
+ else if (static_cast<int32_t>(_prevTimestamp - timestamp) > 0)
+ {
+ // Backward wrap around
+ _wrapArounds--;
+ }
+}
+
+}
diff --git a/webrtc/modules/video_coding/main/source/inter_frame_delay.h b/webrtc/modules/video_coding/main/source/inter_frame_delay.h
new file mode 100644
index 0000000000..58b326ae96
--- /dev/null
+++ b/webrtc/modules/video_coding/main/source/inter_frame_delay.h
@@ -0,0 +1,66 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef WEBRTC_MODULES_VIDEO_CODING_INTER_FRAME_DELAY_H_
+#define WEBRTC_MODULES_VIDEO_CODING_INTER_FRAME_DELAY_H_
+
+#include "webrtc/typedefs.h"
+
+namespace webrtc
+{
+
+class VCMInterFrameDelay
+{
+public:
+ VCMInterFrameDelay(int64_t currentWallClock);
+
+ // Resets the estimate. Zeros are given as parameters.
+ void Reset(int64_t currentWallClock);
+
+ // Calculates the delay of a frame with the given timestamp.
+ // This method is called when the frame is complete.
+ //
+ // Input:
+ // - timestamp : RTP timestamp of a received frame
+ // - *delay : Pointer to memory where the result should be stored
+ // - currentWallClock : The current time in milliseconds.
+ // Should be -1 for normal operation, only used for testing.
+ // Return value : true if OK, false when reordered timestamps
+ bool CalculateDelay(uint32_t timestamp,
+ int64_t *delay,
+ int64_t currentWallClock);
+
+ // Returns the current difference between incoming timestamps
+ //
+ // Return value : Wrap-around compensated difference between incoming
+ // timestamps.
+ uint32_t CurrentTimeStampDiffMs() const;
+
+private:
+ // Controls if the RTP timestamp counter has had a wrap around
+ // between the current and the previously received frame.
+ //
+ // Input:
+ // - timestmap : RTP timestamp of the current frame.
+ void CheckForWrapArounds(uint32_t timestamp);
+
+ int64_t _zeroWallClock; // Local timestamp of the first video packet received
+ int32_t _wrapArounds; // Number of wrapArounds detected
+ // The previous timestamp passed to the delay estimate
+ uint32_t _prevTimestamp;
+ // The previous wall clock timestamp used by the delay estimate
+ int64_t _prevWallClock;
+ // Wrap-around compensated difference between incoming timestamps
+ int64_t _dTS;
+};
+
+} // namespace webrtc
+
+#endif // WEBRTC_MODULES_VIDEO_CODING_INTER_FRAME_DELAY_H_
diff --git a/webrtc/modules/video_coding/main/source/internal_defines.h b/webrtc/modules/video_coding/main/source/internal_defines.h
new file mode 100644
index 0000000000..adc940f20d
--- /dev/null
+++ b/webrtc/modules/video_coding/main/source/internal_defines.h
@@ -0,0 +1,68 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef WEBRTC_MODULES_VIDEO_CODING_SOURCE_INTERNAL_DEFINES_H_
+#define WEBRTC_MODULES_VIDEO_CODING_SOURCE_INTERNAL_DEFINES_H_
+
+#include "webrtc/typedefs.h"
+
+namespace webrtc
+{
+
+#define MASK_32_BITS(x) (0xFFFFFFFF & (x))
+
+inline uint32_t MaskWord64ToUWord32(int64_t w64)
+{
+ return static_cast<uint32_t>(MASK_32_BITS(w64));
+}
+
+#define VCM_MAX(a, b) (((a) > (b)) ? (a) : (b))
+#define VCM_MIN(a, b) (((a) < (b)) ? (a) : (b))
+
+#define VCM_DEFAULT_CODEC_WIDTH 352
+#define VCM_DEFAULT_CODEC_HEIGHT 288
+#define VCM_DEFAULT_FRAME_RATE 30
+#define VCM_MIN_BITRATE 30
+#define VCM_FLUSH_INDICATOR 4
+
+// Helper macros for creating the static codec list
+#define VCM_NO_CODEC_IDX -1
+#ifdef VIDEOCODEC_VP8
+ #define VCM_VP8_IDX (VCM_NO_CODEC_IDX + 1)
+#else
+ #define VCM_VP8_IDX VCM_NO_CODEC_IDX
+#endif
+#ifdef VIDEOCODEC_VP9
+ #define VCM_VP9_IDX (VCM_VP8_IDX + 1)
+#else
+ #define VCM_VP9_IDX VCM_VP8_IDX
+#endif
+#ifdef VIDEOCODEC_H264
+ #define VCM_H264_IDX (VCM_VP9_IDX + 1)
+#else
+ #define VCM_H264_IDX VCM_VP9_IDX
+#endif
+#ifdef VIDEOCODEC_I420
+ #define VCM_I420_IDX (VCM_H264_IDX + 1)
+#else
+ #define VCM_I420_IDX VCM_H264_IDX
+#endif
+#define VCM_NUM_VIDEO_CODECS_AVAILABLE (VCM_I420_IDX + 1)
+
+#define VCM_NO_RECEIVER_ID 0
+
+inline int32_t VCMId(const int32_t vcmId, const int32_t receiverId = 0)
+{
+ return static_cast<int32_t>((vcmId << 16) + receiverId);
+}
+
+} // namespace webrtc
+
+#endif // WEBRTC_MODULES_VIDEO_CODING_SOURCE_INTERNAL_DEFINES_H_
diff --git a/webrtc/modules/video_coding/main/source/jitter_buffer.cc b/webrtc/modules/video_coding/main/source/jitter_buffer.cc
new file mode 100644
index 0000000000..bfdd7867d9
--- /dev/null
+++ b/webrtc/modules/video_coding/main/source/jitter_buffer.cc
@@ -0,0 +1,1339 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+#include "webrtc/modules/video_coding/main/source/jitter_buffer.h"
+
+#include <assert.h>
+
+#include <algorithm>
+#include <utility>
+
+#include "webrtc/base/checks.h"
+#include "webrtc/base/trace_event.h"
+#include "webrtc/modules/rtp_rtcp/interface/rtp_rtcp_defines.h"
+#include "webrtc/modules/video_coding/main/interface/video_coding.h"
+#include "webrtc/modules/video_coding/main/source/frame_buffer.h"
+#include "webrtc/modules/video_coding/main/source/inter_frame_delay.h"
+#include "webrtc/modules/video_coding/main/source/internal_defines.h"
+#include "webrtc/modules/video_coding/main/source/jitter_buffer_common.h"
+#include "webrtc/modules/video_coding/main/source/jitter_estimator.h"
+#include "webrtc/modules/video_coding/main/source/packet.h"
+#include "webrtc/system_wrappers/include/clock.h"
+#include "webrtc/system_wrappers/include/critical_section_wrapper.h"
+#include "webrtc/system_wrappers/include/event_wrapper.h"
+#include "webrtc/system_wrappers/include/logging.h"
+#include "webrtc/system_wrappers/include/metrics.h"
+
+namespace webrtc {
+
+// Interval for updating SS data.
+static const uint32_t kSsCleanupIntervalSec = 60;
+
+// Use this rtt if no value has been reported.
+static const int64_t kDefaultRtt = 200;
+
+typedef std::pair<uint32_t, VCMFrameBuffer*> FrameListPair;
+
+bool IsKeyFrame(FrameListPair pair) {
+ return pair.second->FrameType() == kVideoFrameKey;
+}
+
+bool HasNonEmptyState(FrameListPair pair) {
+ return pair.second->GetState() != kStateEmpty;
+}
+
+void FrameList::InsertFrame(VCMFrameBuffer* frame) {
+ insert(rbegin().base(), FrameListPair(frame->TimeStamp(), frame));
+}
+
+VCMFrameBuffer* FrameList::PopFrame(uint32_t timestamp) {
+ FrameList::iterator it = find(timestamp);
+ if (it == end())
+ return NULL;
+ VCMFrameBuffer* frame = it->second;
+ erase(it);
+ return frame;
+}
+
+VCMFrameBuffer* FrameList::Front() const {
+ return begin()->second;
+}
+
+VCMFrameBuffer* FrameList::Back() const {
+ return rbegin()->second;
+}
+
+int FrameList::RecycleFramesUntilKeyFrame(FrameList::iterator* key_frame_it,
+ UnorderedFrameList* free_frames) {
+ int drop_count = 0;
+ FrameList::iterator it = begin();
+ while (!empty()) {
+ // Throw at least one frame.
+ it->second->Reset();
+ free_frames->push_back(it->second);
+ erase(it++);
+ ++drop_count;
+ if (it != end() && it->second->FrameType() == kVideoFrameKey) {
+ *key_frame_it = it;
+ return drop_count;
+ }
+ }
+ *key_frame_it = end();
+ return drop_count;
+}
+
+void FrameList::CleanUpOldOrEmptyFrames(VCMDecodingState* decoding_state,
+ UnorderedFrameList* free_frames) {
+ while (!empty()) {
+ VCMFrameBuffer* oldest_frame = Front();
+ bool remove_frame = false;
+ if (oldest_frame->GetState() == kStateEmpty && size() > 1) {
+ // This frame is empty, try to update the last decoded state and drop it
+ // if successful.
+ remove_frame = decoding_state->UpdateEmptyFrame(oldest_frame);
+ } else {
+ remove_frame = decoding_state->IsOldFrame(oldest_frame);
+ }
+ if (!remove_frame) {
+ break;
+ }
+ free_frames->push_back(oldest_frame);
+ TRACE_EVENT_INSTANT1("webrtc", "JB::OldOrEmptyFrameDropped", "timestamp",
+ oldest_frame->TimeStamp());
+ erase(begin());
+ }
+}
+
+void FrameList::Reset(UnorderedFrameList* free_frames) {
+ while (!empty()) {
+ begin()->second->Reset();
+ free_frames->push_back(begin()->second);
+ erase(begin());
+ }
+}
+
+bool Vp9SsMap::Insert(const VCMPacket& packet) {
+ if (!packet.codecSpecificHeader.codecHeader.VP9.ss_data_available)
+ return false;
+
+ ss_map_[packet.timestamp] = packet.codecSpecificHeader.codecHeader.VP9.gof;
+ return true;
+}
+
+void Vp9SsMap::Reset() {
+ ss_map_.clear();
+}
+
+bool Vp9SsMap::Find(uint32_t timestamp, SsMap::iterator* it_out) {
+ bool found = false;
+ for (SsMap::iterator it = ss_map_.begin(); it != ss_map_.end(); ++it) {
+ if (it->first == timestamp || IsNewerTimestamp(timestamp, it->first)) {
+ *it_out = it;
+ found = true;
+ }
+ }
+ return found;
+}
+
+void Vp9SsMap::RemoveOld(uint32_t timestamp) {
+ if (!TimeForCleanup(timestamp))
+ return;
+
+ SsMap::iterator it;
+ if (!Find(timestamp, &it))
+ return;
+
+ ss_map_.erase(ss_map_.begin(), it);
+ AdvanceFront(timestamp);
+}
+
+bool Vp9SsMap::TimeForCleanup(uint32_t timestamp) const {
+ if (ss_map_.empty() || !IsNewerTimestamp(timestamp, ss_map_.begin()->first))
+ return false;
+
+ uint32_t diff = timestamp - ss_map_.begin()->first;
+ return diff / kVideoPayloadTypeFrequency >= kSsCleanupIntervalSec;
+}
+
+void Vp9SsMap::AdvanceFront(uint32_t timestamp) {
+ RTC_DCHECK(!ss_map_.empty());
+ GofInfoVP9 gof = ss_map_.begin()->second;
+ ss_map_.erase(ss_map_.begin());
+ ss_map_[timestamp] = gof;
+}
+
+bool Vp9SsMap::UpdatePacket(VCMPacket* packet) {
+ uint8_t gof_idx = packet->codecSpecificHeader.codecHeader.VP9.gof_idx;
+ if (gof_idx == kNoGofIdx)
+ return false; // No update needed.
+
+ SsMap::iterator it;
+ if (!Find(packet->timestamp, &it))
+ return false; // Corresponding SS not yet received.
+
+ if (gof_idx >= it->second.num_frames_in_gof)
+ return false; // Assume corresponding SS not yet received.
+
+ RTPVideoHeaderVP9* vp9 = &packet->codecSpecificHeader.codecHeader.VP9;
+ vp9->temporal_idx = it->second.temporal_idx[gof_idx];
+ vp9->temporal_up_switch = it->second.temporal_up_switch[gof_idx];
+
+ // TODO(asapersson): Set vp9.ref_picture_id[i] and add usage.
+ vp9->num_ref_pics = it->second.num_ref_pics[gof_idx];
+ for (size_t i = 0; i < it->second.num_ref_pics[gof_idx]; ++i) {
+ vp9->pid_diff[i] = it->second.pid_diff[gof_idx][i];
+ }
+ return true;
+}
+
+void Vp9SsMap::UpdateFrames(FrameList* frames) {
+ for (const auto& frame_it : *frames) {
+ uint8_t gof_idx =
+ frame_it.second->CodecSpecific()->codecSpecific.VP9.gof_idx;
+ if (gof_idx == kNoGofIdx) {
+ continue;
+ }
+ SsMap::iterator ss_it;
+ if (Find(frame_it.second->TimeStamp(), &ss_it)) {
+ if (gof_idx >= ss_it->second.num_frames_in_gof) {
+ continue; // Assume corresponding SS not yet received.
+ }
+ frame_it.second->SetGofInfo(ss_it->second, gof_idx);
+ }
+ }
+}
+
+VCMJitterBuffer::VCMJitterBuffer(Clock* clock,
+ rtc::scoped_ptr<EventWrapper> event)
+ : clock_(clock),
+ running_(false),
+ crit_sect_(CriticalSectionWrapper::CreateCriticalSection()),
+ frame_event_(event.Pass()),
+ max_number_of_frames_(kStartNumberOfFrames),
+ free_frames_(),
+ decodable_frames_(),
+ incomplete_frames_(),
+ last_decoded_state_(),
+ first_packet_since_reset_(true),
+ stats_callback_(NULL),
+ incoming_frame_rate_(0),
+ incoming_frame_count_(0),
+ time_last_incoming_frame_count_(0),
+ incoming_bit_count_(0),
+ incoming_bit_rate_(0),
+ num_consecutive_old_packets_(0),
+ num_packets_(0),
+ num_duplicated_packets_(0),
+ num_discarded_packets_(0),
+ time_first_packet_ms_(0),
+ jitter_estimate_(clock),
+ inter_frame_delay_(clock_->TimeInMilliseconds()),
+ rtt_ms_(kDefaultRtt),
+ nack_mode_(kNoNack),
+ low_rtt_nack_threshold_ms_(-1),
+ high_rtt_nack_threshold_ms_(-1),
+ missing_sequence_numbers_(SequenceNumberLessThan()),
+ max_nack_list_size_(0),
+ max_packet_age_to_nack_(0),
+ max_incomplete_time_ms_(0),
+ decode_error_mode_(kNoErrors),
+ average_packets_per_frame_(0.0f),
+ frame_counter_(0) {
+ for (int i = 0; i < kStartNumberOfFrames; i++)
+ free_frames_.push_back(new VCMFrameBuffer());
+}
+
+VCMJitterBuffer::~VCMJitterBuffer() {
+ Stop();
+ for (UnorderedFrameList::iterator it = free_frames_.begin();
+ it != free_frames_.end(); ++it) {
+ delete *it;
+ }
+ for (FrameList::iterator it = incomplete_frames_.begin();
+ it != incomplete_frames_.end(); ++it) {
+ delete it->second;
+ }
+ for (FrameList::iterator it = decodable_frames_.begin();
+ it != decodable_frames_.end(); ++it) {
+ delete it->second;
+ }
+ delete crit_sect_;
+}
+
+void VCMJitterBuffer::UpdateHistograms() {
+ if (num_packets_ <= 0 || !running_) {
+ return;
+ }
+ int64_t elapsed_sec =
+ (clock_->TimeInMilliseconds() - time_first_packet_ms_) / 1000;
+ if (elapsed_sec < metrics::kMinRunTimeInSeconds) {
+ return;
+ }
+
+ RTC_HISTOGRAM_PERCENTAGE("WebRTC.Video.DiscardedPacketsInPercent",
+ num_discarded_packets_ * 100 / num_packets_);
+ RTC_HISTOGRAM_PERCENTAGE("WebRTC.Video.DuplicatedPacketsInPercent",
+ num_duplicated_packets_ * 100 / num_packets_);
+
+ int total_frames =
+ receive_statistics_.key_frames + receive_statistics_.delta_frames;
+ if (total_frames > 0) {
+ RTC_HISTOGRAM_COUNTS_100("WebRTC.Video.CompleteFramesReceivedPerSecond",
+ static_cast<int>((total_frames / elapsed_sec) + 0.5f));
+ RTC_HISTOGRAM_COUNTS_1000(
+ "WebRTC.Video.KeyFramesReceivedInPermille",
+ static_cast<int>(
+ (receive_statistics_.key_frames * 1000.0f / total_frames) + 0.5f));
+ }
+}
+
+void VCMJitterBuffer::Start() {
+ CriticalSectionScoped cs(crit_sect_);
+ running_ = true;
+ incoming_frame_count_ = 0;
+ incoming_frame_rate_ = 0;
+ incoming_bit_count_ = 0;
+ incoming_bit_rate_ = 0;
+ time_last_incoming_frame_count_ = clock_->TimeInMilliseconds();
+ receive_statistics_ = FrameCounts();
+
+ num_consecutive_old_packets_ = 0;
+ num_packets_ = 0;
+ num_duplicated_packets_ = 0;
+ num_discarded_packets_ = 0;
+ time_first_packet_ms_ = 0;
+
+ // Start in a non-signaled state.
+ waiting_for_completion_.frame_size = 0;
+ waiting_for_completion_.timestamp = 0;
+ waiting_for_completion_.latest_packet_time = -1;
+ first_packet_since_reset_ = true;
+ rtt_ms_ = kDefaultRtt;
+ last_decoded_state_.Reset();
+ vp9_ss_map_.Reset();
+}
+
+void VCMJitterBuffer::Stop() {
+ crit_sect_->Enter();
+ UpdateHistograms();
+ running_ = false;
+ last_decoded_state_.Reset();
+ vp9_ss_map_.Reset();
+
+ // Make sure all frames are free and reset.
+ for (FrameList::iterator it = decodable_frames_.begin();
+ it != decodable_frames_.end(); ++it) {
+ free_frames_.push_back(it->second);
+ }
+ for (FrameList::iterator it = incomplete_frames_.begin();
+ it != incomplete_frames_.end(); ++it) {
+ free_frames_.push_back(it->second);
+ }
+ for (UnorderedFrameList::iterator it = free_frames_.begin();
+ it != free_frames_.end(); ++it) {
+ (*it)->Reset();
+ }
+ decodable_frames_.clear();
+ incomplete_frames_.clear();
+ crit_sect_->Leave();
+ // Make sure we wake up any threads waiting on these events.
+ frame_event_->Set();
+}
+
+bool VCMJitterBuffer::Running() const {
+ CriticalSectionScoped cs(crit_sect_);
+ return running_;
+}
+
+void VCMJitterBuffer::Flush() {
+ CriticalSectionScoped cs(crit_sect_);
+ decodable_frames_.Reset(&free_frames_);
+ incomplete_frames_.Reset(&free_frames_);
+ last_decoded_state_.Reset(); // TODO(mikhal): sync reset.
+ vp9_ss_map_.Reset();
+ num_consecutive_old_packets_ = 0;
+ // Also reset the jitter and delay estimates
+ jitter_estimate_.Reset();
+ inter_frame_delay_.Reset(clock_->TimeInMilliseconds());
+ waiting_for_completion_.frame_size = 0;
+ waiting_for_completion_.timestamp = 0;
+ waiting_for_completion_.latest_packet_time = -1;
+ first_packet_since_reset_ = true;
+ missing_sequence_numbers_.clear();
+}
+
+// Get received key and delta frames
+FrameCounts VCMJitterBuffer::FrameStatistics() const {
+ CriticalSectionScoped cs(crit_sect_);
+ return receive_statistics_;
+}
+
+int VCMJitterBuffer::num_packets() const {
+ CriticalSectionScoped cs(crit_sect_);
+ return num_packets_;
+}
+
+int VCMJitterBuffer::num_duplicated_packets() const {
+ CriticalSectionScoped cs(crit_sect_);
+ return num_duplicated_packets_;
+}
+
+int VCMJitterBuffer::num_discarded_packets() const {
+ CriticalSectionScoped cs(crit_sect_);
+ return num_discarded_packets_;
+}
+
+// Calculate framerate and bitrate.
+void VCMJitterBuffer::IncomingRateStatistics(unsigned int* framerate,
+ unsigned int* bitrate) {
+ assert(framerate);
+ assert(bitrate);
+ CriticalSectionScoped cs(crit_sect_);
+ const int64_t now = clock_->TimeInMilliseconds();
+ int64_t diff = now - time_last_incoming_frame_count_;
+ if (diff < 1000 && incoming_frame_rate_ > 0 && incoming_bit_rate_ > 0) {
+ // Make sure we report something even though less than
+ // 1 second has passed since last update.
+ *framerate = incoming_frame_rate_;
+ *bitrate = incoming_bit_rate_;
+ } else if (incoming_frame_count_ != 0) {
+ // We have received frame(s) since last call to this function
+
+ // Prepare calculations
+ if (diff <= 0) {
+ diff = 1;
+ }
+ // we add 0.5f for rounding
+ float rate = 0.5f + ((incoming_frame_count_ * 1000.0f) / diff);
+ if (rate < 1.0f) {
+ rate = 1.0f;
+ }
+
+ // Calculate frame rate
+ // Let r be rate.
+ // r(0) = 1000*framecount/delta_time.
+ // (I.e. frames per second since last calculation.)
+ // frame_rate = r(0)/2 + r(-1)/2
+ // (I.e. fr/s average this and the previous calculation.)
+ *framerate = (incoming_frame_rate_ + static_cast<unsigned int>(rate)) / 2;
+ incoming_frame_rate_ = static_cast<unsigned int>(rate);
+
+ // Calculate bit rate
+ if (incoming_bit_count_ == 0) {
+ *bitrate = 0;
+ } else {
+ *bitrate = 10 * ((100 * incoming_bit_count_) /
+ static_cast<unsigned int>(diff));
+ }
+ incoming_bit_rate_ = *bitrate;
+
+ // Reset count
+ incoming_frame_count_ = 0;
+ incoming_bit_count_ = 0;
+ time_last_incoming_frame_count_ = now;
+
+ } else {
+ // No frames since last call
+ time_last_incoming_frame_count_ = clock_->TimeInMilliseconds();
+ *framerate = 0;
+ *bitrate = 0;
+ incoming_frame_rate_ = 0;
+ incoming_bit_rate_ = 0;
+ }
+}
+
+// Answers the question:
+// Will the packet sequence be complete if the next frame is grabbed for
+// decoding right now? That is, have we lost a frame between the last decoded
+// frame and the next, or is the next
+// frame missing one or more packets?
+bool VCMJitterBuffer::CompleteSequenceWithNextFrame() {
+ CriticalSectionScoped cs(crit_sect_);
+ // Finding oldest frame ready for decoder, check sequence number and size
+ CleanUpOldOrEmptyFrames();
+ if (!decodable_frames_.empty()) {
+ if (decodable_frames_.Front()->GetState() == kStateComplete) {
+ return true;
+ }
+ } else if (incomplete_frames_.size() <= 1) {
+ // Frame not ready to be decoded.
+ return true;
+ }
+ return false;
+}
+
+// Returns immediately or a |max_wait_time_ms| ms event hang waiting for a
+// complete frame, |max_wait_time_ms| decided by caller.
+bool VCMJitterBuffer::NextCompleteTimestamp(
+ uint32_t max_wait_time_ms, uint32_t* timestamp) {
+ crit_sect_->Enter();
+ if (!running_) {
+ crit_sect_->Leave();
+ return false;
+ }
+ CleanUpOldOrEmptyFrames();
+
+ if (decodable_frames_.empty() ||
+ decodable_frames_.Front()->GetState() != kStateComplete) {
+ const int64_t end_wait_time_ms = clock_->TimeInMilliseconds() +
+ max_wait_time_ms;
+ int64_t wait_time_ms = max_wait_time_ms;
+ while (wait_time_ms > 0) {
+ crit_sect_->Leave();
+ const EventTypeWrapper ret =
+ frame_event_->Wait(static_cast<uint32_t>(wait_time_ms));
+ crit_sect_->Enter();
+ if (ret == kEventSignaled) {
+ // Are we shutting down the jitter buffer?
+ if (!running_) {
+ crit_sect_->Leave();
+ return false;
+ }
+ // Finding oldest frame ready for decoder.
+ CleanUpOldOrEmptyFrames();
+ if (decodable_frames_.empty() ||
+ decodable_frames_.Front()->GetState() != kStateComplete) {
+ wait_time_ms = end_wait_time_ms - clock_->TimeInMilliseconds();
+ } else {
+ break;
+ }
+ } else {
+ break;
+ }
+ }
+ }
+ if (decodable_frames_.empty() ||
+ decodable_frames_.Front()->GetState() != kStateComplete) {
+ crit_sect_->Leave();
+ return false;
+ }
+ *timestamp = decodable_frames_.Front()->TimeStamp();
+ crit_sect_->Leave();
+ return true;
+}
+
+bool VCMJitterBuffer::NextMaybeIncompleteTimestamp(uint32_t* timestamp) {
+ CriticalSectionScoped cs(crit_sect_);
+ if (!running_) {
+ return false;
+ }
+ if (decode_error_mode_ == kNoErrors) {
+ // No point to continue, as we are not decoding with errors.
+ return false;
+ }
+
+ CleanUpOldOrEmptyFrames();
+
+ if (decodable_frames_.empty()) {
+ return false;
+ }
+ VCMFrameBuffer* oldest_frame = decodable_frames_.Front();
+ // If we have exactly one frame in the buffer, release it only if it is
+ // complete. We know decodable_frames_ is not empty due to the previous
+ // check.
+ if (decodable_frames_.size() == 1 && incomplete_frames_.empty()
+ && oldest_frame->GetState() != kStateComplete) {
+ return false;
+ }
+
+ *timestamp = oldest_frame->TimeStamp();
+ return true;
+}
+
+VCMEncodedFrame* VCMJitterBuffer::ExtractAndSetDecode(uint32_t timestamp) {
+ CriticalSectionScoped cs(crit_sect_);
+ if (!running_) {
+ return NULL;
+ }
+ // Extract the frame with the desired timestamp.
+ VCMFrameBuffer* frame = decodable_frames_.PopFrame(timestamp);
+ bool continuous = true;
+ if (!frame) {
+ frame = incomplete_frames_.PopFrame(timestamp);
+ if (frame)
+ continuous = last_decoded_state_.ContinuousFrame(frame);
+ else
+ return NULL;
+ }
+ TRACE_EVENT_ASYNC_STEP0("webrtc", "Video", timestamp, "Extract");
+ // Frame pulled out from jitter buffer, update the jitter estimate.
+ const bool retransmitted = (frame->GetNackCount() > 0);
+ if (retransmitted) {
+ jitter_estimate_.FrameNacked();
+ } else if (frame->Length() > 0) {
+ // Ignore retransmitted and empty frames.
+ if (waiting_for_completion_.latest_packet_time >= 0) {
+ UpdateJitterEstimate(waiting_for_completion_, true);
+ }
+ if (frame->GetState() == kStateComplete) {
+ UpdateJitterEstimate(*frame, false);
+ } else {
+ // Wait for this one to get complete.
+ waiting_for_completion_.frame_size = frame->Length();
+ waiting_for_completion_.latest_packet_time =
+ frame->LatestPacketTimeMs();
+ waiting_for_completion_.timestamp = frame->TimeStamp();
+ }
+ }
+
+ // The state must be changed to decoding before cleaning up zero sized
+ // frames to avoid empty frames being cleaned up and then given to the
+ // decoder. Propagates the missing_frame bit.
+ frame->PrepareForDecode(continuous);
+
+ // We have a frame - update the last decoded state and nack list.
+ last_decoded_state_.SetState(frame);
+ DropPacketsFromNackList(last_decoded_state_.sequence_num());
+
+ if ((*frame).IsSessionComplete())
+ UpdateAveragePacketsPerFrame(frame->NumPackets());
+
+ return frame;
+}
+
+// Release frame when done with decoding. Should never be used to release
+// frames from within the jitter buffer.
+void VCMJitterBuffer::ReleaseFrame(VCMEncodedFrame* frame) {
+ CriticalSectionScoped cs(crit_sect_);
+ VCMFrameBuffer* frame_buffer = static_cast<VCMFrameBuffer*>(frame);
+ if (frame_buffer) {
+ free_frames_.push_back(frame_buffer);
+ }
+}
+
+// Gets frame to use for this timestamp. If no match, get empty frame.
+VCMFrameBufferEnum VCMJitterBuffer::GetFrame(const VCMPacket& packet,
+ VCMFrameBuffer** frame,
+ FrameList** frame_list) {
+ *frame = incomplete_frames_.PopFrame(packet.timestamp);
+ if (*frame != NULL) {
+ *frame_list = &incomplete_frames_;
+ return kNoError;
+ }
+ *frame = decodable_frames_.PopFrame(packet.timestamp);
+ if (*frame != NULL) {
+ *frame_list = &decodable_frames_;
+ return kNoError;
+ }
+
+ *frame_list = NULL;
+ // No match, return empty frame.
+ *frame = GetEmptyFrame();
+ if (*frame == NULL) {
+ // No free frame! Try to reclaim some...
+ LOG(LS_WARNING) << "Unable to get empty frame; Recycling.";
+ bool found_key_frame = RecycleFramesUntilKeyFrame();
+ *frame = GetEmptyFrame();
+ assert(*frame);
+ if (!found_key_frame) {
+ free_frames_.push_back(*frame);
+ return kFlushIndicator;
+ }
+ }
+ (*frame)->Reset();
+ return kNoError;
+}
+
+int64_t VCMJitterBuffer::LastPacketTime(const VCMEncodedFrame* frame,
+ bool* retransmitted) const {
+ assert(retransmitted);
+ CriticalSectionScoped cs(crit_sect_);
+ const VCMFrameBuffer* frame_buffer =
+ static_cast<const VCMFrameBuffer*>(frame);
+ *retransmitted = (frame_buffer->GetNackCount() > 0);
+ return frame_buffer->LatestPacketTimeMs();
+}
+
+VCMFrameBufferEnum VCMJitterBuffer::InsertPacket(const VCMPacket& packet,
+ bool* retransmitted) {
+ CriticalSectionScoped cs(crit_sect_);
+
+ ++num_packets_;
+ if (num_packets_ == 1) {
+ time_first_packet_ms_ = clock_->TimeInMilliseconds();
+ }
+ // Does this packet belong to an old frame?
+ if (last_decoded_state_.IsOldPacket(&packet)) {
+ // Account only for media packets.
+ if (packet.sizeBytes > 0) {
+ num_discarded_packets_++;
+ num_consecutive_old_packets_++;
+ if (stats_callback_ != NULL)
+ stats_callback_->OnDiscardedPacketsUpdated(num_discarded_packets_);
+ }
+ // Update last decoded sequence number if the packet arrived late and
+ // belongs to a frame with a timestamp equal to the last decoded
+ // timestamp.
+ last_decoded_state_.UpdateOldPacket(&packet);
+ DropPacketsFromNackList(last_decoded_state_.sequence_num());
+
+ // Also see if this old packet made more incomplete frames continuous.
+ FindAndInsertContinuousFramesWithState(last_decoded_state_);
+
+ if (num_consecutive_old_packets_ > kMaxConsecutiveOldPackets) {
+ LOG(LS_WARNING)
+ << num_consecutive_old_packets_
+ << " consecutive old packets received. Flushing the jitter buffer.";
+ Flush();
+ return kFlushIndicator;
+ }
+ return kOldPacket;
+ }
+
+ num_consecutive_old_packets_ = 0;
+
+ if (packet.codec == kVideoCodecVP9) {
+ if (packet.codecSpecificHeader.codecHeader.VP9.flexible_mode) {
+ // TODO(asapersson): Add support for flexible mode.
+ return kGeneralError;
+ }
+ if (!packet.codecSpecificHeader.codecHeader.VP9.flexible_mode) {
+ if (vp9_ss_map_.Insert(packet))
+ vp9_ss_map_.UpdateFrames(&incomplete_frames_);
+
+ vp9_ss_map_.UpdatePacket(const_cast<VCMPacket*>(&packet));
+ }
+ if (!last_decoded_state_.in_initial_state())
+ vp9_ss_map_.RemoveOld(last_decoded_state_.time_stamp());
+ }
+
+ VCMFrameBuffer* frame;
+ FrameList* frame_list;
+ const VCMFrameBufferEnum error = GetFrame(packet, &frame, &frame_list);
+ if (error != kNoError)
+ return error;
+
+ int64_t now_ms = clock_->TimeInMilliseconds();
+ // We are keeping track of the first and latest seq numbers, and
+ // the number of wraps to be able to calculate how many packets we expect.
+ if (first_packet_since_reset_) {
+ // Now it's time to start estimating jitter
+ // reset the delay estimate.
+ inter_frame_delay_.Reset(now_ms);
+ }
+
+ // Empty packets may bias the jitter estimate (lacking size component),
+ // therefore don't let empty packet trigger the following updates:
+ if (packet.frameType != kEmptyFrame) {
+ if (waiting_for_completion_.timestamp == packet.timestamp) {
+ // This can get bad if we have a lot of duplicate packets,
+ // we will then count some packet multiple times.
+ waiting_for_completion_.frame_size += packet.sizeBytes;
+ waiting_for_completion_.latest_packet_time = now_ms;
+ } else if (waiting_for_completion_.latest_packet_time >= 0 &&
+ waiting_for_completion_.latest_packet_time + 2000 <= now_ms) {
+ // A packet should never be more than two seconds late
+ UpdateJitterEstimate(waiting_for_completion_, true);
+ waiting_for_completion_.latest_packet_time = -1;
+ waiting_for_completion_.frame_size = 0;
+ waiting_for_completion_.timestamp = 0;
+ }
+ }
+
+ VCMFrameBufferStateEnum previous_state = frame->GetState();
+ // Insert packet.
+ FrameData frame_data;
+ frame_data.rtt_ms = rtt_ms_;
+ frame_data.rolling_average_packets_per_frame = average_packets_per_frame_;
+ VCMFrameBufferEnum buffer_state =
+ frame->InsertPacket(packet, now_ms, decode_error_mode_, frame_data);
+
+ if (previous_state != kStateComplete) {
+ TRACE_EVENT_ASYNC_BEGIN1("webrtc", "Video", frame->TimeStamp(),
+ "timestamp", frame->TimeStamp());
+ }
+
+ if (buffer_state > 0) {
+ incoming_bit_count_ += packet.sizeBytes << 3;
+ if (first_packet_since_reset_) {
+ latest_received_sequence_number_ = packet.seqNum;
+ first_packet_since_reset_ = false;
+ } else {
+ if (IsPacketRetransmitted(packet)) {
+ frame->IncrementNackCount();
+ }
+ if (!UpdateNackList(packet.seqNum) &&
+ packet.frameType != kVideoFrameKey) {
+ buffer_state = kFlushIndicator;
+ }
+
+ latest_received_sequence_number_ = LatestSequenceNumber(
+ latest_received_sequence_number_, packet.seqNum);
+ }
+ }
+
+ // Is the frame already in the decodable list?
+ bool continuous = IsContinuous(*frame);
+ switch (buffer_state) {
+ case kGeneralError:
+ case kTimeStampError:
+ case kSizeError: {
+ free_frames_.push_back(frame);
+ break;
+ }
+ case kCompleteSession: {
+ if (previous_state != kStateDecodable &&
+ previous_state != kStateComplete) {
+ CountFrame(*frame);
+ if (continuous) {
+ // Signal that we have a complete session.
+ frame_event_->Set();
+ }
+ }
+ FALLTHROUGH();
+ }
+ // Note: There is no break here - continuing to kDecodableSession.
+ case kDecodableSession: {
+ *retransmitted = (frame->GetNackCount() > 0);
+ if (continuous) {
+ decodable_frames_.InsertFrame(frame);
+ FindAndInsertContinuousFrames(*frame);
+ } else {
+ incomplete_frames_.InsertFrame(frame);
+ }
+ break;
+ }
+ case kIncomplete: {
+ if (frame->GetState() == kStateEmpty &&
+ last_decoded_state_.UpdateEmptyFrame(frame)) {
+ free_frames_.push_back(frame);
+ return kNoError;
+ } else {
+ incomplete_frames_.InsertFrame(frame);
+ }
+ break;
+ }
+ case kNoError:
+ case kOutOfBoundsPacket:
+ case kDuplicatePacket: {
+ // Put back the frame where it came from.
+ if (frame_list != NULL) {
+ frame_list->InsertFrame(frame);
+ } else {
+ free_frames_.push_back(frame);
+ }
+ ++num_duplicated_packets_;
+ break;
+ }
+ case kFlushIndicator:
+ free_frames_.push_back(frame);
+ return kFlushIndicator;
+ default: assert(false);
+ }
+ return buffer_state;
+}
+
+bool VCMJitterBuffer::IsContinuousInState(const VCMFrameBuffer& frame,
+ const VCMDecodingState& decoding_state) const {
+ if (decode_error_mode_ == kWithErrors)
+ return true;
+ // Is this frame (complete or decodable) and continuous?
+ // kStateDecodable will never be set when decode_error_mode_ is false
+ // as SessionInfo determines this state based on the error mode (and frame
+ // completeness).
+ return (frame.GetState() == kStateComplete ||
+ frame.GetState() == kStateDecodable) &&
+ decoding_state.ContinuousFrame(&frame);
+}
+
+bool VCMJitterBuffer::IsContinuous(const VCMFrameBuffer& frame) const {
+ if (IsContinuousInState(frame, last_decoded_state_)) {
+ return true;
+ }
+ VCMDecodingState decoding_state;
+ decoding_state.CopyFrom(last_decoded_state_);
+ for (FrameList::const_iterator it = decodable_frames_.begin();
+ it != decodable_frames_.end(); ++it) {
+ VCMFrameBuffer* decodable_frame = it->second;
+ if (IsNewerTimestamp(decodable_frame->TimeStamp(), frame.TimeStamp())) {
+ break;
+ }
+ decoding_state.SetState(decodable_frame);
+ if (IsContinuousInState(frame, decoding_state)) {
+ return true;
+ }
+ }
+ return false;
+}
+
+void VCMJitterBuffer::FindAndInsertContinuousFrames(
+ const VCMFrameBuffer& new_frame) {
+ VCMDecodingState decoding_state;
+ decoding_state.CopyFrom(last_decoded_state_);
+ decoding_state.SetState(&new_frame);
+ FindAndInsertContinuousFramesWithState(decoding_state);
+}
+
+void VCMJitterBuffer::FindAndInsertContinuousFramesWithState(
+ const VCMDecodingState& original_decoded_state) {
+ // Copy original_decoded_state so we can move the state forward with each
+ // decodable frame we find.
+ VCMDecodingState decoding_state;
+ decoding_state.CopyFrom(original_decoded_state);
+
+ // When temporal layers are available, we search for a complete or decodable
+ // frame until we hit one of the following:
+ // 1. Continuous base or sync layer.
+ // 2. The end of the list was reached.
+ for (FrameList::iterator it = incomplete_frames_.begin();
+ it != incomplete_frames_.end();) {
+ VCMFrameBuffer* frame = it->second;
+ if (IsNewerTimestamp(original_decoded_state.time_stamp(),
+ frame->TimeStamp())) {
+ ++it;
+ continue;
+ }
+ if (IsContinuousInState(*frame, decoding_state)) {
+ decodable_frames_.InsertFrame(frame);
+ incomplete_frames_.erase(it++);
+ decoding_state.SetState(frame);
+ } else if (frame->TemporalId() <= 0) {
+ break;
+ } else {
+ ++it;
+ }
+ }
+}
+
+uint32_t VCMJitterBuffer::EstimatedJitterMs() {
+ CriticalSectionScoped cs(crit_sect_);
+ // Compute RTT multiplier for estimation.
+ // low_rtt_nackThresholdMs_ == -1 means no FEC.
+ double rtt_mult = 1.0f;
+ if (low_rtt_nack_threshold_ms_ >= 0 &&
+ rtt_ms_ >= low_rtt_nack_threshold_ms_) {
+ // For RTTs above low_rtt_nack_threshold_ms_ we don't apply extra delay
+ // when waiting for retransmissions.
+ rtt_mult = 0.0f;
+ }
+ return jitter_estimate_.GetJitterEstimate(rtt_mult);
+}
+
+void VCMJitterBuffer::UpdateRtt(int64_t rtt_ms) {
+ CriticalSectionScoped cs(crit_sect_);
+ rtt_ms_ = rtt_ms;
+ jitter_estimate_.UpdateRtt(rtt_ms);
+}
+
+void VCMJitterBuffer::SetNackMode(VCMNackMode mode,
+ int64_t low_rtt_nack_threshold_ms,
+ int64_t high_rtt_nack_threshold_ms) {
+ CriticalSectionScoped cs(crit_sect_);
+ nack_mode_ = mode;
+ if (mode == kNoNack) {
+ missing_sequence_numbers_.clear();
+ }
+ assert(low_rtt_nack_threshold_ms >= -1 && high_rtt_nack_threshold_ms >= -1);
+ assert(high_rtt_nack_threshold_ms == -1 ||
+ low_rtt_nack_threshold_ms <= high_rtt_nack_threshold_ms);
+ assert(low_rtt_nack_threshold_ms > -1 || high_rtt_nack_threshold_ms == -1);
+ low_rtt_nack_threshold_ms_ = low_rtt_nack_threshold_ms;
+ high_rtt_nack_threshold_ms_ = high_rtt_nack_threshold_ms;
+ // Don't set a high start rtt if high_rtt_nack_threshold_ms_ is used, to not
+ // disable NACK in |kNack| mode.
+ if (rtt_ms_ == kDefaultRtt && high_rtt_nack_threshold_ms_ != -1) {
+ rtt_ms_ = 0;
+ }
+ if (!WaitForRetransmissions()) {
+ jitter_estimate_.ResetNackCount();
+ }
+}
+
+void VCMJitterBuffer::SetNackSettings(size_t max_nack_list_size,
+ int max_packet_age_to_nack,
+ int max_incomplete_time_ms) {
+ CriticalSectionScoped cs(crit_sect_);
+ assert(max_packet_age_to_nack >= 0);
+ assert(max_incomplete_time_ms_ >= 0);
+ max_nack_list_size_ = max_nack_list_size;
+ max_packet_age_to_nack_ = max_packet_age_to_nack;
+ max_incomplete_time_ms_ = max_incomplete_time_ms;
+}
+
+VCMNackMode VCMJitterBuffer::nack_mode() const {
+ CriticalSectionScoped cs(crit_sect_);
+ return nack_mode_;
+}
+
+int VCMJitterBuffer::NonContinuousOrIncompleteDuration() {
+ if (incomplete_frames_.empty()) {
+ return 0;
+ }
+ uint32_t start_timestamp = incomplete_frames_.Front()->TimeStamp();
+ if (!decodable_frames_.empty()) {
+ start_timestamp = decodable_frames_.Back()->TimeStamp();
+ }
+ return incomplete_frames_.Back()->TimeStamp() - start_timestamp;
+}
+
+uint16_t VCMJitterBuffer::EstimatedLowSequenceNumber(
+ const VCMFrameBuffer& frame) const {
+ assert(frame.GetLowSeqNum() >= 0);
+ if (frame.HaveFirstPacket())
+ return frame.GetLowSeqNum();
+
+ // This estimate is not accurate if more than one packet with lower sequence
+ // number is lost.
+ return frame.GetLowSeqNum() - 1;
+}
+
+std::vector<uint16_t> VCMJitterBuffer::GetNackList(bool* request_key_frame) {
+ CriticalSectionScoped cs(crit_sect_);
+ *request_key_frame = false;
+ if (nack_mode_ == kNoNack) {
+ return std::vector<uint16_t>();
+ }
+ if (last_decoded_state_.in_initial_state()) {
+ VCMFrameBuffer* next_frame = NextFrame();
+ const bool first_frame_is_key = next_frame &&
+ next_frame->FrameType() == kVideoFrameKey &&
+ next_frame->HaveFirstPacket();
+ if (!first_frame_is_key) {
+ bool have_non_empty_frame = decodable_frames_.end() != find_if(
+ decodable_frames_.begin(), decodable_frames_.end(),
+ HasNonEmptyState);
+ if (!have_non_empty_frame) {
+ have_non_empty_frame = incomplete_frames_.end() != find_if(
+ incomplete_frames_.begin(), incomplete_frames_.end(),
+ HasNonEmptyState);
+ }
+ bool found_key_frame = RecycleFramesUntilKeyFrame();
+ if (!found_key_frame) {
+ *request_key_frame = have_non_empty_frame;
+ return std::vector<uint16_t>();
+ }
+ }
+ }
+ if (TooLargeNackList()) {
+ *request_key_frame = !HandleTooLargeNackList();
+ }
+ if (max_incomplete_time_ms_ > 0) {
+ int non_continuous_incomplete_duration =
+ NonContinuousOrIncompleteDuration();
+ if (non_continuous_incomplete_duration > 90 * max_incomplete_time_ms_) {
+ LOG_F(LS_WARNING) << "Too long non-decodable duration: "
+ << non_continuous_incomplete_duration << " > "
+ << 90 * max_incomplete_time_ms_;
+ FrameList::reverse_iterator rit = find_if(incomplete_frames_.rbegin(),
+ incomplete_frames_.rend(), IsKeyFrame);
+ if (rit == incomplete_frames_.rend()) {
+ // Request a key frame if we don't have one already.
+ *request_key_frame = true;
+ return std::vector<uint16_t>();
+ } else {
+ // Skip to the last key frame. If it's incomplete we will start
+ // NACKing it.
+ // Note that the estimated low sequence number is correct for VP8
+ // streams because only the first packet of a key frame is marked.
+ last_decoded_state_.Reset();
+ DropPacketsFromNackList(EstimatedLowSequenceNumber(*rit->second));
+ }
+ }
+ }
+ std::vector<uint16_t> nack_list(missing_sequence_numbers_.begin(),
+ missing_sequence_numbers_.end());
+ return nack_list;
+}
+
+void VCMJitterBuffer::SetDecodeErrorMode(VCMDecodeErrorMode error_mode) {
+ CriticalSectionScoped cs(crit_sect_);
+ decode_error_mode_ = error_mode;
+}
+
+VCMFrameBuffer* VCMJitterBuffer::NextFrame() const {
+ if (!decodable_frames_.empty())
+ return decodable_frames_.Front();
+ if (!incomplete_frames_.empty())
+ return incomplete_frames_.Front();
+ return NULL;
+}
+
+bool VCMJitterBuffer::UpdateNackList(uint16_t sequence_number) {
+ if (nack_mode_ == kNoNack) {
+ return true;
+ }
+ // Make sure we don't add packets which are already too old to be decoded.
+ if (!last_decoded_state_.in_initial_state()) {
+ latest_received_sequence_number_ = LatestSequenceNumber(
+ latest_received_sequence_number_,
+ last_decoded_state_.sequence_num());
+ }
+ if (IsNewerSequenceNumber(sequence_number,
+ latest_received_sequence_number_)) {
+ // Push any missing sequence numbers to the NACK list.
+ for (uint16_t i = latest_received_sequence_number_ + 1;
+ IsNewerSequenceNumber(sequence_number, i); ++i) {
+ missing_sequence_numbers_.insert(missing_sequence_numbers_.end(), i);
+ TRACE_EVENT_INSTANT1(TRACE_DISABLED_BY_DEFAULT("webrtc_rtp"), "AddNack",
+ "seqnum", i);
+ }
+ if (TooLargeNackList() && !HandleTooLargeNackList()) {
+ LOG(LS_WARNING) << "Requesting key frame due to too large NACK list.";
+ return false;
+ }
+ if (MissingTooOldPacket(sequence_number) &&
+ !HandleTooOldPackets(sequence_number)) {
+ LOG(LS_WARNING) << "Requesting key frame due to missing too old packets";
+ return false;
+ }
+ } else {
+ missing_sequence_numbers_.erase(sequence_number);
+ TRACE_EVENT_INSTANT1(TRACE_DISABLED_BY_DEFAULT("webrtc_rtp"), "RemoveNack",
+ "seqnum", sequence_number);
+ }
+ return true;
+}
+
+bool VCMJitterBuffer::TooLargeNackList() const {
+ return missing_sequence_numbers_.size() > max_nack_list_size_;
+}
+
+bool VCMJitterBuffer::HandleTooLargeNackList() {
+ // Recycle frames until the NACK list is small enough. It is likely cheaper to
+ // request a key frame than to retransmit this many missing packets.
+ LOG_F(LS_WARNING) << "NACK list has grown too large: "
+ << missing_sequence_numbers_.size() << " > "
+ << max_nack_list_size_;
+ bool key_frame_found = false;
+ while (TooLargeNackList()) {
+ key_frame_found = RecycleFramesUntilKeyFrame();
+ }
+ return key_frame_found;
+}
+
+bool VCMJitterBuffer::MissingTooOldPacket(
+ uint16_t latest_sequence_number) const {
+ if (missing_sequence_numbers_.empty()) {
+ return false;
+ }
+ const uint16_t age_of_oldest_missing_packet = latest_sequence_number -
+ *missing_sequence_numbers_.begin();
+ // Recycle frames if the NACK list contains too old sequence numbers as
+ // the packets may have already been dropped by the sender.
+ return age_of_oldest_missing_packet > max_packet_age_to_nack_;
+}
+
+bool VCMJitterBuffer::HandleTooOldPackets(uint16_t latest_sequence_number) {
+ bool key_frame_found = false;
+ const uint16_t age_of_oldest_missing_packet = latest_sequence_number -
+ *missing_sequence_numbers_.begin();
+ LOG_F(LS_WARNING) << "NACK list contains too old sequence numbers: "
+ << age_of_oldest_missing_packet << " > "
+ << max_packet_age_to_nack_;
+ while (MissingTooOldPacket(latest_sequence_number)) {
+ key_frame_found = RecycleFramesUntilKeyFrame();
+ }
+ return key_frame_found;
+}
+
+void VCMJitterBuffer::DropPacketsFromNackList(
+ uint16_t last_decoded_sequence_number) {
+ // Erase all sequence numbers from the NACK list which we won't need any
+ // longer.
+ missing_sequence_numbers_.erase(missing_sequence_numbers_.begin(),
+ missing_sequence_numbers_.upper_bound(
+ last_decoded_sequence_number));
+}
+
+int64_t VCMJitterBuffer::LastDecodedTimestamp() const {
+ CriticalSectionScoped cs(crit_sect_);
+ return last_decoded_state_.time_stamp();
+}
+
+void VCMJitterBuffer::RenderBufferSize(uint32_t* timestamp_start,
+ uint32_t* timestamp_end) {
+ CriticalSectionScoped cs(crit_sect_);
+ CleanUpOldOrEmptyFrames();
+ *timestamp_start = 0;
+ *timestamp_end = 0;
+ if (decodable_frames_.empty()) {
+ return;
+ }
+ *timestamp_start = decodable_frames_.Front()->TimeStamp();
+ *timestamp_end = decodable_frames_.Back()->TimeStamp();
+}
+
+void VCMJitterBuffer::RegisterStatsCallback(
+ VCMReceiveStatisticsCallback* callback) {
+ CriticalSectionScoped cs(crit_sect_);
+ stats_callback_ = callback;
+}
+
+VCMFrameBuffer* VCMJitterBuffer::GetEmptyFrame() {
+ if (free_frames_.empty()) {
+ if (!TryToIncreaseJitterBufferSize()) {
+ return NULL;
+ }
+ }
+ VCMFrameBuffer* frame = free_frames_.front();
+ free_frames_.pop_front();
+ return frame;
+}
+
+bool VCMJitterBuffer::TryToIncreaseJitterBufferSize() {
+ if (max_number_of_frames_ >= kMaxNumberOfFrames)
+ return false;
+ free_frames_.push_back(new VCMFrameBuffer());
+ ++max_number_of_frames_;
+ TRACE_COUNTER1("webrtc", "JBMaxFrames", max_number_of_frames_);
+ return true;
+}
+
+// Recycle oldest frames up to a key frame, used if jitter buffer is completely
+// full.
+bool VCMJitterBuffer::RecycleFramesUntilKeyFrame() {
+ // First release incomplete frames, and only release decodable frames if there
+ // are no incomplete ones.
+ FrameList::iterator key_frame_it;
+ bool key_frame_found = false;
+ int dropped_frames = 0;
+ dropped_frames += incomplete_frames_.RecycleFramesUntilKeyFrame(
+ &key_frame_it, &free_frames_);
+ key_frame_found = key_frame_it != incomplete_frames_.end();
+ if (dropped_frames == 0) {
+ dropped_frames += decodable_frames_.RecycleFramesUntilKeyFrame(
+ &key_frame_it, &free_frames_);
+ key_frame_found = key_frame_it != decodable_frames_.end();
+ }
+ TRACE_EVENT_INSTANT0("webrtc", "JB::RecycleFramesUntilKeyFrame");
+ if (key_frame_found) {
+ LOG(LS_INFO) << "Found key frame while dropping frames.";
+ // Reset last decoded state to make sure the next frame decoded is a key
+ // frame, and start NACKing from here.
+ last_decoded_state_.Reset();
+ DropPacketsFromNackList(EstimatedLowSequenceNumber(*key_frame_it->second));
+ } else if (decodable_frames_.empty()) {
+ // All frames dropped. Reset the decoding state and clear missing sequence
+ // numbers as we're starting fresh.
+ last_decoded_state_.Reset();
+ missing_sequence_numbers_.clear();
+ }
+ return key_frame_found;
+}
+
+// Must be called under the critical section |crit_sect_|.
+void VCMJitterBuffer::CountFrame(const VCMFrameBuffer& frame) {
+ incoming_frame_count_++;
+
+ if (frame.FrameType() == kVideoFrameKey) {
+ TRACE_EVENT_ASYNC_STEP0("webrtc", "Video",
+ frame.TimeStamp(), "KeyComplete");
+ } else {
+ TRACE_EVENT_ASYNC_STEP0("webrtc", "Video",
+ frame.TimeStamp(), "DeltaComplete");
+ }
+
+ // Update receive statistics. We count all layers, thus when you use layers
+ // adding all key and delta frames might differ from frame count.
+ if (frame.IsSessionComplete()) {
+ if (frame.FrameType() == kVideoFrameKey) {
+ ++receive_statistics_.key_frames;
+ } else {
+ ++receive_statistics_.delta_frames;
+ }
+ if (stats_callback_ != NULL)
+ stats_callback_->OnFrameCountsUpdated(receive_statistics_);
+ }
+}
+
+void VCMJitterBuffer::UpdateAveragePacketsPerFrame(int current_number_packets) {
+ if (frame_counter_ > kFastConvergeThreshold) {
+ average_packets_per_frame_ = average_packets_per_frame_
+ * (1 - kNormalConvergeMultiplier)
+ + current_number_packets * kNormalConvergeMultiplier;
+ } else if (frame_counter_ > 0) {
+ average_packets_per_frame_ = average_packets_per_frame_
+ * (1 - kFastConvergeMultiplier)
+ + current_number_packets * kFastConvergeMultiplier;
+ frame_counter_++;
+ } else {
+ average_packets_per_frame_ = current_number_packets;
+ frame_counter_++;
+ }
+}
+
+// Must be called under the critical section |crit_sect_|.
+void VCMJitterBuffer::CleanUpOldOrEmptyFrames() {
+ decodable_frames_.CleanUpOldOrEmptyFrames(&last_decoded_state_,
+ &free_frames_);
+ incomplete_frames_.CleanUpOldOrEmptyFrames(&last_decoded_state_,
+ &free_frames_);
+ if (!last_decoded_state_.in_initial_state()) {
+ DropPacketsFromNackList(last_decoded_state_.sequence_num());
+ }
+}
+
+// Must be called from within |crit_sect_|.
+bool VCMJitterBuffer::IsPacketRetransmitted(const VCMPacket& packet) const {
+ return missing_sequence_numbers_.find(packet.seqNum) !=
+ missing_sequence_numbers_.end();
+}
+
+// Must be called under the critical section |crit_sect_|. Should never be
+// called with retransmitted frames, they must be filtered out before this
+// function is called.
+void VCMJitterBuffer::UpdateJitterEstimate(const VCMJitterSample& sample,
+ bool incomplete_frame) {
+ if (sample.latest_packet_time == -1) {
+ return;
+ }
+ UpdateJitterEstimate(sample.latest_packet_time, sample.timestamp,
+ sample.frame_size, incomplete_frame);
+}
+
+// Must be called under the critical section crit_sect_. Should never be
+// called with retransmitted frames, they must be filtered out before this
+// function is called.
+void VCMJitterBuffer::UpdateJitterEstimate(const VCMFrameBuffer& frame,
+ bool incomplete_frame) {
+ if (frame.LatestPacketTimeMs() == -1) {
+ return;
+ }
+ // No retransmitted frames should be a part of the jitter
+ // estimate.
+ UpdateJitterEstimate(frame.LatestPacketTimeMs(), frame.TimeStamp(),
+ frame.Length(), incomplete_frame);
+}
+
+// Must be called under the critical section |crit_sect_|. Should never be
+// called with retransmitted frames, they must be filtered out before this
+// function is called.
+void VCMJitterBuffer::UpdateJitterEstimate(
+ int64_t latest_packet_time_ms,
+ uint32_t timestamp,
+ unsigned int frame_size,
+ bool incomplete_frame) {
+ if (latest_packet_time_ms == -1) {
+ return;
+ }
+ int64_t frame_delay;
+ bool not_reordered = inter_frame_delay_.CalculateDelay(timestamp,
+ &frame_delay,
+ latest_packet_time_ms);
+ // Filter out frames which have been reordered in time by the network
+ if (not_reordered) {
+ // Update the jitter estimate with the new samples
+ jitter_estimate_.UpdateEstimate(frame_delay, frame_size, incomplete_frame);
+ }
+}
+
+bool VCMJitterBuffer::WaitForRetransmissions() {
+ if (nack_mode_ == kNoNack) {
+ // NACK disabled -> don't wait for retransmissions.
+ return false;
+ }
+ // Evaluate if the RTT is higher than |high_rtt_nack_threshold_ms_|, and in
+ // that case we don't wait for retransmissions.
+ if (high_rtt_nack_threshold_ms_ >= 0 &&
+ rtt_ms_ >= high_rtt_nack_threshold_ms_) {
+ return false;
+ }
+ return true;
+}
+} // namespace webrtc
diff --git a/webrtc/modules/video_coding/main/source/jitter_buffer.h b/webrtc/modules/video_coding/main/source/jitter_buffer.h
new file mode 100644
index 0000000000..f4a3638f7d
--- /dev/null
+++ b/webrtc/modules/video_coding/main/source/jitter_buffer.h
@@ -0,0 +1,396 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef WEBRTC_MODULES_VIDEO_CODING_MAIN_SOURCE_JITTER_BUFFER_H_
+#define WEBRTC_MODULES_VIDEO_CODING_MAIN_SOURCE_JITTER_BUFFER_H_
+
+#include <list>
+#include <map>
+#include <set>
+#include <vector>
+
+#include "webrtc/base/constructormagic.h"
+#include "webrtc/base/thread_annotations.h"
+#include "webrtc/modules/interface/module_common_types.h"
+#include "webrtc/modules/video_coding/main/interface/video_coding.h"
+#include "webrtc/modules/video_coding/main/interface/video_coding_defines.h"
+#include "webrtc/modules/video_coding/main/source/decoding_state.h"
+#include "webrtc/modules/video_coding/main/source/inter_frame_delay.h"
+#include "webrtc/modules/video_coding/main/source/jitter_buffer_common.h"
+#include "webrtc/modules/video_coding/main/source/jitter_estimator.h"
+#include "webrtc/system_wrappers/include/critical_section_wrapper.h"
+#include "webrtc/typedefs.h"
+
+namespace webrtc {
+
+enum VCMNackMode {
+ kNack,
+ kNoNack
+};
+
+// forward declarations
+class Clock;
+class EventFactory;
+class EventWrapper;
+class VCMFrameBuffer;
+class VCMPacket;
+class VCMEncodedFrame;
+
+typedef std::list<VCMFrameBuffer*> UnorderedFrameList;
+
+struct VCMJitterSample {
+ VCMJitterSample() : timestamp(0), frame_size(0), latest_packet_time(-1) {}
+ uint32_t timestamp;
+ uint32_t frame_size;
+ int64_t latest_packet_time;
+};
+
+class TimestampLessThan {
+ public:
+ bool operator() (uint32_t timestamp1,
+ uint32_t timestamp2) const {
+ return IsNewerTimestamp(timestamp2, timestamp1);
+ }
+};
+
+class FrameList
+ : public std::map<uint32_t, VCMFrameBuffer*, TimestampLessThan> {
+ public:
+ void InsertFrame(VCMFrameBuffer* frame);
+ VCMFrameBuffer* PopFrame(uint32_t timestamp);
+ VCMFrameBuffer* Front() const;
+ VCMFrameBuffer* Back() const;
+ int RecycleFramesUntilKeyFrame(FrameList::iterator* key_frame_it,
+ UnorderedFrameList* free_frames);
+ void CleanUpOldOrEmptyFrames(VCMDecodingState* decoding_state,
+ UnorderedFrameList* free_frames);
+ void Reset(UnorderedFrameList* free_frames);
+};
+
+class Vp9SsMap {
+ public:
+ typedef std::map<uint32_t, GofInfoVP9, TimestampLessThan> SsMap;
+ bool Insert(const VCMPacket& packet);
+ void Reset();
+
+ // Removes SS data that are older than |timestamp|.
+ // The |timestamp| should be an old timestamp, i.e. packets with older
+ // timestamps should no longer be inserted.
+ void RemoveOld(uint32_t timestamp);
+
+ bool UpdatePacket(VCMPacket* packet);
+ void UpdateFrames(FrameList* frames);
+
+ // Public for testing.
+ // Returns an iterator to the corresponding SS data for the input |timestamp|.
+ bool Find(uint32_t timestamp, SsMap::iterator* it);
+
+ private:
+ // These two functions are called by RemoveOld.
+ // Checks if it is time to do a clean up (done each kSsCleanupIntervalSec).
+ bool TimeForCleanup(uint32_t timestamp) const;
+
+ // Advances the oldest SS data to handle timestamp wrap in cases where SS data
+ // are received very seldom (e.g. only once in beginning, second when
+ // IsNewerTimestamp is not true).
+ void AdvanceFront(uint32_t timestamp);
+
+ SsMap ss_map_;
+};
+
+class VCMJitterBuffer {
+ public:
+ VCMJitterBuffer(Clock* clock, rtc::scoped_ptr<EventWrapper> event);
+
+ ~VCMJitterBuffer();
+
+ // Initializes and starts jitter buffer.
+ void Start();
+
+ // Signals all internal events and stops the jitter buffer.
+ void Stop();
+
+ // Returns true if the jitter buffer is running.
+ bool Running() const;
+
+ // Empty the jitter buffer of all its data.
+ void Flush();
+
+ // Get the number of received frames, by type, since the jitter buffer
+ // was started.
+ FrameCounts FrameStatistics() const;
+
+ // The number of packets discarded by the jitter buffer because the decoder
+ // won't be able to decode them.
+ int num_not_decodable_packets() const;
+
+ // Gets number of packets received.
+ int num_packets() const;
+
+ // Gets number of duplicated packets received.
+ int num_duplicated_packets() const;
+
+ // Gets number of packets discarded by the jitter buffer.
+ int num_discarded_packets() const;
+
+ // Statistics, Calculate frame and bit rates.
+ void IncomingRateStatistics(unsigned int* framerate,
+ unsigned int* bitrate);
+
+ // Checks if the packet sequence will be complete if the next frame would be
+ // grabbed for decoding. That is, if a frame has been lost between the
+ // last decoded frame and the next, or if the next frame is missing one
+ // or more packets.
+ bool CompleteSequenceWithNextFrame();
+
+ // Wait |max_wait_time_ms| for a complete frame to arrive.
+ // The function returns true once such a frame is found, its corresponding
+ // timestamp is returned. Otherwise, returns false.
+ bool NextCompleteTimestamp(uint32_t max_wait_time_ms, uint32_t* timestamp);
+
+ // Locates a frame for decoding (even an incomplete) without delay.
+ // The function returns true once such a frame is found, its corresponding
+ // timestamp is returned. Otherwise, returns false.
+ bool NextMaybeIncompleteTimestamp(uint32_t* timestamp);
+
+ // Extract frame corresponding to input timestamp.
+ // Frame will be set to a decoding state.
+ VCMEncodedFrame* ExtractAndSetDecode(uint32_t timestamp);
+
+ // Releases a frame returned from the jitter buffer, should be called when
+ // done with decoding.
+ void ReleaseFrame(VCMEncodedFrame* frame);
+
+ // Returns the time in ms when the latest packet was inserted into the frame.
+ // Retransmitted is set to true if any of the packets belonging to the frame
+ // has been retransmitted.
+ int64_t LastPacketTime(const VCMEncodedFrame* frame,
+ bool* retransmitted) const;
+
+ // Inserts a packet into a frame returned from GetFrame().
+ // If the return value is <= 0, |frame| is invalidated and the pointer must
+ // be dropped after this function returns.
+ VCMFrameBufferEnum InsertPacket(const VCMPacket& packet,
+ bool* retransmitted);
+
+ // Returns the estimated jitter in milliseconds.
+ uint32_t EstimatedJitterMs();
+
+ // Updates the round-trip time estimate.
+ void UpdateRtt(int64_t rtt_ms);
+
+ // Set the NACK mode. |high_rtt_nack_threshold_ms| is an RTT threshold in ms
+ // above which NACK will be disabled if the NACK mode is |kNack|, -1 meaning
+ // that NACK is always enabled in the |kNack| mode.
+ // |low_rtt_nack_threshold_ms| is an RTT threshold in ms below which we expect
+ // to rely on NACK only, and therefore are using larger buffers to have time
+ // to wait for retransmissions.
+ void SetNackMode(VCMNackMode mode, int64_t low_rtt_nack_threshold_ms,
+ int64_t high_rtt_nack_threshold_ms);
+
+ void SetNackSettings(size_t max_nack_list_size,
+ int max_packet_age_to_nack,
+ int max_incomplete_time_ms);
+
+ // Returns the current NACK mode.
+ VCMNackMode nack_mode() const;
+
+ // Returns a list of the sequence numbers currently missing.
+ std::vector<uint16_t> GetNackList(bool* request_key_frame);
+
+ // Set decode error mode - Should not be changed in the middle of the
+ // session. Changes will not influence frames already in the buffer.
+ void SetDecodeErrorMode(VCMDecodeErrorMode error_mode);
+ int64_t LastDecodedTimestamp() const;
+ VCMDecodeErrorMode decode_error_mode() const {return decode_error_mode_;}
+
+ // Used to compute time of complete continuous frames. Returns the timestamps
+ // corresponding to the start and end of the continuous complete buffer.
+ void RenderBufferSize(uint32_t* timestamp_start, uint32_t* timestamp_end);
+
+ void RegisterStatsCallback(VCMReceiveStatisticsCallback* callback);
+
+ private:
+ class SequenceNumberLessThan {
+ public:
+ bool operator() (const uint16_t& sequence_number1,
+ const uint16_t& sequence_number2) const {
+ return IsNewerSequenceNumber(sequence_number2, sequence_number1);
+ }
+ };
+ typedef std::set<uint16_t, SequenceNumberLessThan> SequenceNumberSet;
+
+ // Gets the frame assigned to the timestamp of the packet. May recycle
+ // existing frames if no free frames are available. Returns an error code if
+ // failing, or kNoError on success. |frame_list| contains which list the
+ // packet was in, or NULL if it was not in a FrameList (a new frame).
+ VCMFrameBufferEnum GetFrame(const VCMPacket& packet,
+ VCMFrameBuffer** frame,
+ FrameList** frame_list)
+ EXCLUSIVE_LOCKS_REQUIRED(crit_sect_);
+
+ // Returns true if |frame| is continuous in |decoding_state|, not taking
+ // decodable frames into account.
+ bool IsContinuousInState(const VCMFrameBuffer& frame,
+ const VCMDecodingState& decoding_state) const
+ EXCLUSIVE_LOCKS_REQUIRED(crit_sect_);
+ // Returns true if |frame| is continuous in the |last_decoded_state_|, taking
+ // all decodable frames into account.
+ bool IsContinuous(const VCMFrameBuffer& frame) const
+ EXCLUSIVE_LOCKS_REQUIRED(crit_sect_);
+ // Looks for frames in |incomplete_frames_| which are continuous in the
+ // provided |decoded_state|. Starts the search from the timestamp of
+ // |decoded_state|.
+ void FindAndInsertContinuousFramesWithState(
+ const VCMDecodingState& decoded_state)
+ EXCLUSIVE_LOCKS_REQUIRED(crit_sect_);
+ // Looks for frames in |incomplete_frames_| which are continuous in
+ // |last_decoded_state_| taking all decodable frames into account. Starts
+ // the search from |new_frame|.
+ void FindAndInsertContinuousFrames(const VCMFrameBuffer& new_frame)
+ EXCLUSIVE_LOCKS_REQUIRED(crit_sect_);
+ VCMFrameBuffer* NextFrame() const EXCLUSIVE_LOCKS_REQUIRED(crit_sect_);
+ // Returns true if the NACK list was updated to cover sequence numbers up to
+ // |sequence_number|. If false a key frame is needed to get into a state where
+ // we can continue decoding.
+ bool UpdateNackList(uint16_t sequence_number)
+ EXCLUSIVE_LOCKS_REQUIRED(crit_sect_);
+ bool TooLargeNackList() const;
+ // Returns true if the NACK list was reduced without problem. If false a key
+ // frame is needed to get into a state where we can continue decoding.
+ bool HandleTooLargeNackList() EXCLUSIVE_LOCKS_REQUIRED(crit_sect_);
+ bool MissingTooOldPacket(uint16_t latest_sequence_number) const
+ EXCLUSIVE_LOCKS_REQUIRED(crit_sect_);
+ // Returns true if the too old packets was successfully removed from the NACK
+ // list. If false, a key frame is needed to get into a state where we can
+ // continue decoding.
+ bool HandleTooOldPackets(uint16_t latest_sequence_number)
+ EXCLUSIVE_LOCKS_REQUIRED(crit_sect_);
+ // Drops all packets in the NACK list up until |last_decoded_sequence_number|.
+ void DropPacketsFromNackList(uint16_t last_decoded_sequence_number);
+
+ void ReleaseFrameIfNotDecoding(VCMFrameBuffer* frame);
+
+ // Gets an empty frame, creating a new frame if necessary (i.e. increases
+ // jitter buffer size).
+ VCMFrameBuffer* GetEmptyFrame() EXCLUSIVE_LOCKS_REQUIRED(crit_sect_);
+
+ // Attempts to increase the size of the jitter buffer. Returns true on
+ // success, false otherwise.
+ bool TryToIncreaseJitterBufferSize() EXCLUSIVE_LOCKS_REQUIRED(crit_sect_);
+
+ // Recycles oldest frames until a key frame is found. Used if jitter buffer is
+ // completely full. Returns true if a key frame was found.
+ bool RecycleFramesUntilKeyFrame() EXCLUSIVE_LOCKS_REQUIRED(crit_sect_);
+
+ // Updates the frame statistics.
+ // Counts only complete frames, so decodable incomplete frames will not be
+ // counted.
+ void CountFrame(const VCMFrameBuffer& frame)
+ EXCLUSIVE_LOCKS_REQUIRED(crit_sect_);
+
+ // Update rolling average of packets per frame.
+ void UpdateAveragePacketsPerFrame(int current_number_packets_);
+
+ // Cleans the frame list in the JB from old/empty frames.
+ // Should only be called prior to actual use.
+ void CleanUpOldOrEmptyFrames() EXCLUSIVE_LOCKS_REQUIRED(crit_sect_);
+
+ // Returns true if |packet| is likely to have been retransmitted.
+ bool IsPacketRetransmitted(const VCMPacket& packet) const;
+
+ // The following three functions update the jitter estimate with the
+ // payload size, receive time and RTP timestamp of a frame.
+ void UpdateJitterEstimate(const VCMJitterSample& sample,
+ bool incomplete_frame);
+ void UpdateJitterEstimate(const VCMFrameBuffer& frame, bool incomplete_frame);
+ void UpdateJitterEstimate(int64_t latest_packet_time_ms,
+ uint32_t timestamp,
+ unsigned int frame_size,
+ bool incomplete_frame);
+
+ // Returns true if we should wait for retransmissions, false otherwise.
+ bool WaitForRetransmissions();
+
+ int NonContinuousOrIncompleteDuration() EXCLUSIVE_LOCKS_REQUIRED(crit_sect_);
+
+ uint16_t EstimatedLowSequenceNumber(const VCMFrameBuffer& frame) const;
+
+ void UpdateHistograms() EXCLUSIVE_LOCKS_REQUIRED(crit_sect_);
+
+ Clock* clock_;
+ // If we are running (have started) or not.
+ bool running_;
+ CriticalSectionWrapper* crit_sect_;
+ // Event to signal when we have a frame ready for decoder.
+ rtc::scoped_ptr<EventWrapper> frame_event_;
+ // Number of allocated frames.
+ int max_number_of_frames_;
+ UnorderedFrameList free_frames_ GUARDED_BY(crit_sect_);
+ FrameList decodable_frames_ GUARDED_BY(crit_sect_);
+ FrameList incomplete_frames_ GUARDED_BY(crit_sect_);
+ VCMDecodingState last_decoded_state_ GUARDED_BY(crit_sect_);
+ bool first_packet_since_reset_;
+ // Contains scalability structure data for VP9.
+ Vp9SsMap vp9_ss_map_ GUARDED_BY(crit_sect_);
+
+ // Statistics.
+ VCMReceiveStatisticsCallback* stats_callback_ GUARDED_BY(crit_sect_);
+ // Frame counts for each type (key, delta, ...)
+ FrameCounts receive_statistics_;
+ // Latest calculated frame rates of incoming stream.
+ unsigned int incoming_frame_rate_;
+ unsigned int incoming_frame_count_;
+ int64_t time_last_incoming_frame_count_;
+ unsigned int incoming_bit_count_;
+ unsigned int incoming_bit_rate_;
+ // Number of frames in a row that have been too old.
+ int num_consecutive_old_frames_;
+ // Number of packets in a row that have been too old.
+ int num_consecutive_old_packets_;
+ // Number of packets received.
+ int num_packets_ GUARDED_BY(crit_sect_);
+ // Number of duplicated packets received.
+ int num_duplicated_packets_ GUARDED_BY(crit_sect_);
+ // Number of packets discarded by the jitter buffer.
+ int num_discarded_packets_ GUARDED_BY(crit_sect_);
+ // Time when first packet is received.
+ int64_t time_first_packet_ms_ GUARDED_BY(crit_sect_);
+
+ // Jitter estimation.
+ // Filter for estimating jitter.
+ VCMJitterEstimator jitter_estimate_;
+ // Calculates network delays used for jitter calculations.
+ VCMInterFrameDelay inter_frame_delay_;
+ VCMJitterSample waiting_for_completion_;
+ int64_t rtt_ms_;
+
+ // NACK and retransmissions.
+ VCMNackMode nack_mode_;
+ int64_t low_rtt_nack_threshold_ms_;
+ int64_t high_rtt_nack_threshold_ms_;
+ // Holds the internal NACK list (the missing sequence numbers).
+ SequenceNumberSet missing_sequence_numbers_;
+ uint16_t latest_received_sequence_number_;
+ size_t max_nack_list_size_;
+ int max_packet_age_to_nack_; // Measured in sequence numbers.
+ int max_incomplete_time_ms_;
+
+ VCMDecodeErrorMode decode_error_mode_;
+ // Estimated rolling average of packets per frame
+ float average_packets_per_frame_;
+ // average_packets_per_frame converges fast if we have fewer than this many
+ // frames.
+ int frame_counter_;
+ RTC_DISALLOW_COPY_AND_ASSIGN(VCMJitterBuffer);
+};
+} // namespace webrtc
+
+#endif // WEBRTC_MODULES_VIDEO_CODING_MAIN_SOURCE_JITTER_BUFFER_H_
diff --git a/webrtc/modules/video_coding/main/source/jitter_buffer_common.h b/webrtc/modules/video_coding/main/source/jitter_buffer_common.h
new file mode 100644
index 0000000000..97af78087a
--- /dev/null
+++ b/webrtc/modules/video_coding/main/source/jitter_buffer_common.h
@@ -0,0 +1,72 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef WEBRTC_MODULES_VIDEO_CODING_JITTER_BUFFER_COMMON_H_
+#define WEBRTC_MODULES_VIDEO_CODING_JITTER_BUFFER_COMMON_H_
+
+#include "webrtc/typedefs.h"
+
+namespace webrtc {
+
+// Used to estimate rolling average of packets per frame.
+static const float kFastConvergeMultiplier = 0.4f;
+static const float kNormalConvergeMultiplier = 0.2f;
+
+enum { kMaxNumberOfFrames = 300 };
+enum { kStartNumberOfFrames = 6 };
+enum { kMaxVideoDelayMs = 10000 };
+enum { kPacketsPerFrameMultiplier = 5 };
+enum { kFastConvergeThreshold = 5};
+
+enum VCMJitterBufferEnum {
+ kMaxConsecutiveOldFrames = 60,
+ kMaxConsecutiveOldPackets = 300,
+ // TODO(sprang): Reduce this limit once codecs don't sometimes wildly
+ // overshoot bitrate target.
+ kMaxPacketsInSession = 1400, // Allows ~2MB frames.
+ kBufferIncStepSizeBytes = 30000, // >20 packets.
+ kMaxJBFrameSizeBytes = 4000000 // sanity don't go above 4Mbyte.
+};
+
+enum VCMFrameBufferEnum {
+ kOutOfBoundsPacket = -7,
+ kNotInitialized = -6,
+ kOldPacket = -5,
+ kGeneralError = -4,
+ kFlushIndicator = -3, // Indicator that a flush has occurred.
+ kTimeStampError = -2,
+ kSizeError = -1,
+ kNoError = 0,
+ kIncomplete = 1, // Frame incomplete.
+ kCompleteSession = 3, // at least one layer in the frame complete.
+ kDecodableSession = 4, // Frame incomplete, but ready to be decoded
+ kDuplicatePacket = 5 // We're receiving a duplicate packet.
+};
+
+enum VCMFrameBufferStateEnum {
+ kStateEmpty, // frame popped by the RTP receiver
+ kStateIncomplete, // frame that have one or more packet(s) stored
+ kStateComplete, // frame that have all packets
+ kStateDecodable // Hybrid mode - frame can be decoded
+};
+
+enum { kH264StartCodeLengthBytes = 4};
+
+// Used to indicate if a received packet contain a complete NALU (or equivalent)
+enum VCMNaluCompleteness {
+ kNaluUnset = 0, // Packet has not been filled.
+ kNaluComplete = 1, // Packet can be decoded as is.
+ kNaluStart, // Packet contain beginning of NALU
+ kNaluIncomplete, // Packet is not beginning or end of NALU
+ kNaluEnd, // Packet is the end of a NALU
+};
+} // namespace webrtc
+
+#endif // WEBRTC_MODULES_VIDEO_CODING_JITTER_BUFFER_COMMON_H_
diff --git a/webrtc/modules/video_coding/main/source/jitter_buffer_unittest.cc b/webrtc/modules/video_coding/main/source/jitter_buffer_unittest.cc
new file mode 100644
index 0000000000..d6c6d4985b
--- /dev/null
+++ b/webrtc/modules/video_coding/main/source/jitter_buffer_unittest.cc
@@ -0,0 +1,2575 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <string.h>
+
+#include <list>
+
+#include "testing/gtest/include/gtest/gtest.h"
+#include "webrtc/modules/video_coding/main/source/frame_buffer.h"
+#include "webrtc/modules/video_coding/main/source/jitter_buffer.h"
+#include "webrtc/modules/video_coding/main/source/media_opt_util.h"
+#include "webrtc/modules/video_coding/main/source/packet.h"
+#include "webrtc/modules/video_coding/main/source/test/stream_generator.h"
+#include "webrtc/modules/video_coding/main/test/test_util.h"
+#include "webrtc/system_wrappers/include/clock.h"
+#include "webrtc/system_wrappers/include/metrics.h"
+#include "webrtc/test/histogram.h"
+
+namespace webrtc {
+
+namespace {
+ const uint32_t kProcessIntervalSec = 60;
+} // namespace
+
+class Vp9SsMapTest : public ::testing::Test {
+ protected:
+ Vp9SsMapTest()
+ : packet_(data_, 1400, 1234, 1, true) {}
+
+ virtual void SetUp() {
+ packet_.isFirstPacket = true;
+ packet_.markerBit = true;
+ packet_.frameType = kVideoFrameKey;
+ packet_.codec = kVideoCodecVP9;
+ packet_.codecSpecificHeader.codec = kRtpVideoVp9;
+ packet_.codecSpecificHeader.codecHeader.VP9.flexible_mode = false;
+ packet_.codecSpecificHeader.codecHeader.VP9.gof_idx = 0;
+ packet_.codecSpecificHeader.codecHeader.VP9.temporal_idx = kNoTemporalIdx;
+ packet_.codecSpecificHeader.codecHeader.VP9.temporal_up_switch = false;
+ packet_.codecSpecificHeader.codecHeader.VP9.ss_data_available = true;
+ packet_.codecSpecificHeader.codecHeader.VP9.gof.SetGofInfoVP9(
+ kTemporalStructureMode3); // kTemporalStructureMode3: 0-2-1-2..
+ }
+
+ Vp9SsMap map_;
+ uint8_t data_[1500];
+ VCMPacket packet_;
+};
+
+TEST_F(Vp9SsMapTest, Insert) {
+ EXPECT_TRUE(map_.Insert(packet_));
+}
+
+TEST_F(Vp9SsMapTest, Insert_NoSsData) {
+ packet_.codecSpecificHeader.codecHeader.VP9.ss_data_available = false;
+ EXPECT_FALSE(map_.Insert(packet_));
+}
+
+TEST_F(Vp9SsMapTest, Find) {
+ EXPECT_TRUE(map_.Insert(packet_));
+ Vp9SsMap::SsMap::iterator it;
+ EXPECT_TRUE(map_.Find(packet_.timestamp, &it));
+ EXPECT_EQ(packet_.timestamp, it->first);
+}
+
+TEST_F(Vp9SsMapTest, Find_WithWrap) {
+ const uint32_t kSsTimestamp1 = 0xFFFFFFFF;
+ const uint32_t kSsTimestamp2 = 100;
+ packet_.timestamp = kSsTimestamp1;
+ EXPECT_TRUE(map_.Insert(packet_));
+ packet_.timestamp = kSsTimestamp2;
+ EXPECT_TRUE(map_.Insert(packet_));
+ Vp9SsMap::SsMap::iterator it;
+ EXPECT_FALSE(map_.Find(kSsTimestamp1 - 1, &it));
+ EXPECT_TRUE(map_.Find(kSsTimestamp1, &it));
+ EXPECT_EQ(kSsTimestamp1, it->first);
+ EXPECT_TRUE(map_.Find(0, &it));
+ EXPECT_EQ(kSsTimestamp1, it->first);
+ EXPECT_TRUE(map_.Find(kSsTimestamp2 - 1, &it));
+ EXPECT_EQ(kSsTimestamp1, it->first);
+ EXPECT_TRUE(map_.Find(kSsTimestamp2, &it));
+ EXPECT_EQ(kSsTimestamp2, it->first);
+ EXPECT_TRUE(map_.Find(kSsTimestamp2 + 1, &it));
+ EXPECT_EQ(kSsTimestamp2, it->first);
+}
+
+TEST_F(Vp9SsMapTest, Reset) {
+ EXPECT_TRUE(map_.Insert(packet_));
+ Vp9SsMap::SsMap::iterator it;
+ EXPECT_TRUE(map_.Find(packet_.timestamp, &it));
+ EXPECT_EQ(packet_.timestamp, it->first);
+
+ map_.Reset();
+ EXPECT_FALSE(map_.Find(packet_.timestamp, &it));
+}
+
+TEST_F(Vp9SsMapTest, RemoveOld) {
+ Vp9SsMap::SsMap::iterator it;
+ const uint32_t kSsTimestamp1 = 10000;
+ packet_.timestamp = kSsTimestamp1;
+ EXPECT_TRUE(map_.Insert(packet_));
+
+ const uint32_t kTimestamp = kSsTimestamp1 + kProcessIntervalSec * 90000;
+ map_.RemoveOld(kTimestamp - 1); // Interval not passed.
+ EXPECT_TRUE(map_.Find(kSsTimestamp1, &it)); // Should not been removed.
+
+ map_.RemoveOld(kTimestamp);
+ EXPECT_FALSE(map_.Find(kSsTimestamp1, &it));
+ EXPECT_TRUE(map_.Find(kTimestamp, &it));
+ EXPECT_EQ(kTimestamp, it->first);
+}
+
+TEST_F(Vp9SsMapTest, RemoveOld_WithWrap) {
+ Vp9SsMap::SsMap::iterator it;
+ const uint32_t kSsTimestamp1 = 0xFFFFFFFF - kProcessIntervalSec * 90000;
+ const uint32_t kSsTimestamp2 = 10;
+ const uint32_t kSsTimestamp3 = 1000;
+ packet_.timestamp = kSsTimestamp1;
+ EXPECT_TRUE(map_.Insert(packet_));
+ packet_.timestamp = kSsTimestamp2;
+ EXPECT_TRUE(map_.Insert(packet_));
+ packet_.timestamp = kSsTimestamp3;
+ EXPECT_TRUE(map_.Insert(packet_));
+
+ map_.RemoveOld(kSsTimestamp3);
+ EXPECT_FALSE(map_.Find(kSsTimestamp1, &it));
+ EXPECT_FALSE(map_.Find(kSsTimestamp2, &it));
+ EXPECT_TRUE(map_.Find(kSsTimestamp3, &it));
+}
+
+TEST_F(Vp9SsMapTest, UpdatePacket_NoSsData) {
+ packet_.codecSpecificHeader.codecHeader.VP9.gof_idx = 0;
+ EXPECT_FALSE(map_.UpdatePacket(&packet_));
+}
+
+TEST_F(Vp9SsMapTest, UpdatePacket_NoGofIdx) {
+ EXPECT_TRUE(map_.Insert(packet_));
+ packet_.codecSpecificHeader.codecHeader.VP9.gof_idx = kNoGofIdx;
+ EXPECT_FALSE(map_.UpdatePacket(&packet_));
+}
+
+TEST_F(Vp9SsMapTest, UpdatePacket_InvalidGofIdx) {
+ EXPECT_TRUE(map_.Insert(packet_));
+ packet_.codecSpecificHeader.codecHeader.VP9.gof_idx = 4;
+ EXPECT_FALSE(map_.UpdatePacket(&packet_));
+}
+
+TEST_F(Vp9SsMapTest, UpdatePacket) {
+ EXPECT_TRUE(map_.Insert(packet_)); // kTemporalStructureMode3: 0-2-1-2..
+
+ packet_.codecSpecificHeader.codecHeader.VP9.gof_idx = 0;
+ EXPECT_TRUE(map_.UpdatePacket(&packet_));
+ EXPECT_EQ(0, packet_.codecSpecificHeader.codecHeader.VP9.temporal_idx);
+ EXPECT_FALSE(packet_.codecSpecificHeader.codecHeader.VP9.temporal_up_switch);
+ EXPECT_EQ(1U, packet_.codecSpecificHeader.codecHeader.VP9.num_ref_pics);
+ EXPECT_EQ(4, packet_.codecSpecificHeader.codecHeader.VP9.pid_diff[0]);
+
+ packet_.codecSpecificHeader.codecHeader.VP9.gof_idx = 1;
+ EXPECT_TRUE(map_.UpdatePacket(&packet_));
+ EXPECT_EQ(2, packet_.codecSpecificHeader.codecHeader.VP9.temporal_idx);
+ EXPECT_TRUE(packet_.codecSpecificHeader.codecHeader.VP9.temporal_up_switch);
+ EXPECT_EQ(1U, packet_.codecSpecificHeader.codecHeader.VP9.num_ref_pics);
+ EXPECT_EQ(1, packet_.codecSpecificHeader.codecHeader.VP9.pid_diff[0]);
+
+ packet_.codecSpecificHeader.codecHeader.VP9.gof_idx = 2;
+ EXPECT_TRUE(map_.UpdatePacket(&packet_));
+ EXPECT_EQ(1, packet_.codecSpecificHeader.codecHeader.VP9.temporal_idx);
+ EXPECT_TRUE(packet_.codecSpecificHeader.codecHeader.VP9.temporal_up_switch);
+ EXPECT_EQ(1U, packet_.codecSpecificHeader.codecHeader.VP9.num_ref_pics);
+ EXPECT_EQ(2, packet_.codecSpecificHeader.codecHeader.VP9.pid_diff[0]);
+
+ packet_.codecSpecificHeader.codecHeader.VP9.gof_idx = 3;
+ EXPECT_TRUE(map_.UpdatePacket(&packet_));
+ EXPECT_EQ(2, packet_.codecSpecificHeader.codecHeader.VP9.temporal_idx);
+ EXPECT_FALSE(packet_.codecSpecificHeader.codecHeader.VP9.temporal_up_switch);
+ EXPECT_EQ(2U, packet_.codecSpecificHeader.codecHeader.VP9.num_ref_pics);
+ EXPECT_EQ(1, packet_.codecSpecificHeader.codecHeader.VP9.pid_diff[0]);
+ EXPECT_EQ(2, packet_.codecSpecificHeader.codecHeader.VP9.pid_diff[1]);
+}
+
+class TestBasicJitterBuffer : public ::testing::Test {
+ protected:
+ virtual void SetUp() {
+ clock_.reset(new SimulatedClock(0));
+ jitter_buffer_.reset(new VCMJitterBuffer(
+ clock_.get(),
+ rtc::scoped_ptr<EventWrapper>(event_factory_.CreateEvent())));
+ jitter_buffer_->Start();
+ seq_num_ = 1234;
+ timestamp_ = 0;
+ size_ = 1400;
+ // Data vector - 0, 0, 0x80, 3, 4, 5, 6, 7, 8, 9, 0, 0, 0x80, 3....
+ data_[0] = 0;
+ data_[1] = 0;
+ data_[2] = 0x80;
+ int count = 3;
+ for (unsigned int i = 3; i < sizeof(data_) - 3; ++i) {
+ data_[i] = count;
+ count++;
+ if (count == 10) {
+ data_[i + 1] = 0;
+ data_[i + 2] = 0;
+ data_[i + 3] = 0x80;
+ count = 3;
+ i += 3;
+ }
+ }
+ packet_.reset(new VCMPacket(data_, size_, seq_num_, timestamp_, true));
+ }
+
+ VCMEncodedFrame* DecodeCompleteFrame() {
+ uint32_t timestamp = 0;
+ bool found_frame = jitter_buffer_->NextCompleteTimestamp(10, &timestamp);
+ if (!found_frame)
+ return NULL;
+ VCMEncodedFrame* frame = jitter_buffer_->ExtractAndSetDecode(timestamp);
+ return frame;
+ }
+
+ VCMEncodedFrame* DecodeIncompleteFrame() {
+ uint32_t timestamp = 0;
+ bool found_frame = jitter_buffer_->NextMaybeIncompleteTimestamp(&timestamp);
+ if (!found_frame)
+ return NULL;
+ VCMEncodedFrame* frame = jitter_buffer_->ExtractAndSetDecode(timestamp);
+ return frame;
+ }
+
+ void CheckOutFrame(VCMEncodedFrame* frame_out,
+ unsigned int size,
+ bool startCode) {
+ ASSERT_TRUE(frame_out);
+
+ const uint8_t* outData = frame_out->Buffer();
+ unsigned int i = 0;
+
+ if (startCode) {
+ EXPECT_EQ(0, outData[0]);
+ EXPECT_EQ(0, outData[1]);
+ EXPECT_EQ(0, outData[2]);
+ EXPECT_EQ(1, outData[3]);
+ i += 4;
+ }
+
+ EXPECT_EQ(size, frame_out->Length());
+ int count = 3;
+ for (; i < size; i++) {
+ if (outData[i] == 0 && outData[i + 1] == 0 && outData[i + 2] == 0x80) {
+ i += 2;
+ } else if (startCode && outData[i] == 0 && outData[i + 1] == 0) {
+ EXPECT_EQ(0, outData[0]);
+ EXPECT_EQ(0, outData[1]);
+ EXPECT_EQ(0, outData[2]);
+ EXPECT_EQ(1, outData[3]);
+ i += 3;
+ } else {
+ EXPECT_EQ(count, outData[i]);
+ count++;
+ if (count == 10) {
+ count = 3;
+ }
+ }
+ }
+ }
+
+ uint16_t seq_num_;
+ uint32_t timestamp_;
+ int size_;
+ uint8_t data_[1500];
+ rtc::scoped_ptr<VCMPacket> packet_;
+ rtc::scoped_ptr<SimulatedClock> clock_;
+ NullEventFactory event_factory_;
+ rtc::scoped_ptr<VCMJitterBuffer> jitter_buffer_;
+};
+
+
+class TestRunningJitterBuffer : public ::testing::Test {
+ protected:
+ enum { kDataBufferSize = 10 };
+
+ virtual void SetUp() {
+ clock_.reset(new SimulatedClock(0));
+ max_nack_list_size_ = 150;
+ oldest_packet_to_nack_ = 250;
+ jitter_buffer_ = new VCMJitterBuffer(
+ clock_.get(),
+ rtc::scoped_ptr<EventWrapper>(event_factory_.CreateEvent()));
+ stream_generator_ = new StreamGenerator(0, clock_->TimeInMilliseconds());
+ jitter_buffer_->Start();
+ jitter_buffer_->SetNackSettings(max_nack_list_size_,
+ oldest_packet_to_nack_, 0);
+ memset(data_buffer_, 0, kDataBufferSize);
+ }
+
+ virtual void TearDown() {
+ jitter_buffer_->Stop();
+ delete stream_generator_;
+ delete jitter_buffer_;
+ }
+
+ VCMFrameBufferEnum InsertPacketAndPop(int index) {
+ VCMPacket packet;
+ packet.dataPtr = data_buffer_;
+ bool packet_available = stream_generator_->PopPacket(&packet, index);
+ EXPECT_TRUE(packet_available);
+ if (!packet_available)
+ return kGeneralError; // Return here to avoid crashes below.
+ bool retransmitted = false;
+ return jitter_buffer_->InsertPacket(packet, &retransmitted);
+ }
+
+ VCMFrameBufferEnum InsertPacket(int index) {
+ VCMPacket packet;
+ packet.dataPtr = data_buffer_;
+ bool packet_available = stream_generator_->GetPacket(&packet, index);
+ EXPECT_TRUE(packet_available);
+ if (!packet_available)
+ return kGeneralError; // Return here to avoid crashes below.
+ bool retransmitted = false;
+ return jitter_buffer_->InsertPacket(packet, &retransmitted);
+ }
+
+ VCMFrameBufferEnum InsertFrame(FrameType frame_type) {
+ stream_generator_->GenerateFrame(
+ frame_type, (frame_type != kEmptyFrame) ? 1 : 0,
+ (frame_type == kEmptyFrame) ? 1 : 0, clock_->TimeInMilliseconds());
+ VCMFrameBufferEnum ret = InsertPacketAndPop(0);
+ clock_->AdvanceTimeMilliseconds(kDefaultFramePeriodMs);
+ return ret;
+ }
+
+ VCMFrameBufferEnum InsertFrames(int num_frames, FrameType frame_type) {
+ VCMFrameBufferEnum ret_for_all = kNoError;
+ for (int i = 0; i < num_frames; ++i) {
+ VCMFrameBufferEnum ret = InsertFrame(frame_type);
+ if (ret < kNoError) {
+ ret_for_all = ret;
+ } else if (ret_for_all >= kNoError) {
+ ret_for_all = ret;
+ }
+ }
+ return ret_for_all;
+ }
+
+ void DropFrame(int num_packets) {
+ stream_generator_->GenerateFrame(kVideoFrameDelta, num_packets, 0,
+ clock_->TimeInMilliseconds());
+ for (int i = 0; i < num_packets; ++i)
+ stream_generator_->DropLastPacket();
+ clock_->AdvanceTimeMilliseconds(kDefaultFramePeriodMs);
+ }
+
+ bool DecodeCompleteFrame() {
+ uint32_t timestamp = 0;
+ bool found_frame = jitter_buffer_->NextCompleteTimestamp(0, &timestamp);
+ if (!found_frame)
+ return false;
+
+ VCMEncodedFrame* frame = jitter_buffer_->ExtractAndSetDecode(timestamp);
+ bool ret = (frame != NULL);
+ jitter_buffer_->ReleaseFrame(frame);
+ return ret;
+ }
+
+ bool DecodeIncompleteFrame() {
+ uint32_t timestamp = 0;
+ bool found_frame = jitter_buffer_->NextMaybeIncompleteTimestamp(&timestamp);
+ if (!found_frame)
+ return false;
+ VCMEncodedFrame* frame = jitter_buffer_->ExtractAndSetDecode(timestamp);
+ bool ret = (frame != NULL);
+ jitter_buffer_->ReleaseFrame(frame);
+ return ret;
+ }
+
+ VCMJitterBuffer* jitter_buffer_;
+ StreamGenerator* stream_generator_;
+ rtc::scoped_ptr<SimulatedClock> clock_;
+ NullEventFactory event_factory_;
+ size_t max_nack_list_size_;
+ int oldest_packet_to_nack_;
+ uint8_t data_buffer_[kDataBufferSize];
+};
+
+class TestJitterBufferNack : public TestRunningJitterBuffer {
+ protected:
+ virtual void SetUp() {
+ TestRunningJitterBuffer::SetUp();
+ jitter_buffer_->SetNackMode(kNack, -1, -1);
+ }
+
+ virtual void TearDown() {
+ TestRunningJitterBuffer::TearDown();
+ }
+};
+
+TEST_F(TestBasicJitterBuffer, StopRunning) {
+ jitter_buffer_->Stop();
+ EXPECT_TRUE(NULL == DecodeCompleteFrame());
+ EXPECT_TRUE(NULL == DecodeIncompleteFrame());
+ jitter_buffer_->Start();
+ // Allow selective errors.
+ jitter_buffer_->SetDecodeErrorMode(kSelectiveErrors);
+
+ // No packets inserted.
+ EXPECT_TRUE(NULL == DecodeCompleteFrame());
+ EXPECT_TRUE(NULL == DecodeIncompleteFrame());
+
+ // Allow decoding with errors.
+ jitter_buffer_->SetDecodeErrorMode(kWithErrors);
+
+ // No packets inserted.
+ EXPECT_TRUE(NULL == DecodeCompleteFrame());
+ EXPECT_TRUE(NULL == DecodeIncompleteFrame());
+}
+
+TEST_F(TestBasicJitterBuffer, SinglePacketFrame) {
+ // Always start with a complete key frame when not allowing errors.
+ jitter_buffer_->SetDecodeErrorMode(kNoErrors);
+ packet_->frameType = kVideoFrameKey;
+ packet_->isFirstPacket = true;
+ packet_->markerBit = true;
+ packet_->timestamp += 123 * 90;
+
+ // Insert the packet to the jitter buffer and get a frame.
+ bool retransmitted = false;
+ EXPECT_EQ(kCompleteSession, jitter_buffer_->InsertPacket(*packet_,
+ &retransmitted));
+ VCMEncodedFrame* frame_out = DecodeCompleteFrame();
+ CheckOutFrame(frame_out, size_, false);
+ EXPECT_EQ(kVideoFrameKey, frame_out->FrameType());
+ jitter_buffer_->ReleaseFrame(frame_out);
+}
+
+TEST_F(TestBasicJitterBuffer, VerifyHistogramStats) {
+ test::ClearHistograms();
+ // Always start with a complete key frame when not allowing errors.
+ jitter_buffer_->SetDecodeErrorMode(kNoErrors);
+ packet_->frameType = kVideoFrameKey;
+ packet_->isFirstPacket = true;
+ packet_->markerBit = true;
+ packet_->timestamp += 123 * 90;
+
+ // Insert single packet frame to the jitter buffer and get a frame.
+ bool retransmitted = false;
+ EXPECT_EQ(kCompleteSession, jitter_buffer_->InsertPacket(*packet_,
+ &retransmitted));
+ VCMEncodedFrame* frame_out = DecodeCompleteFrame();
+ CheckOutFrame(frame_out, size_, false);
+ EXPECT_EQ(kVideoFrameKey, frame_out->FrameType());
+ jitter_buffer_->ReleaseFrame(frame_out);
+
+ // Verify that histograms are updated when the jitter buffer is stopped.
+ clock_->AdvanceTimeMilliseconds(metrics::kMinRunTimeInSeconds * 1000);
+ jitter_buffer_->Stop();
+ EXPECT_EQ(0, test::LastHistogramSample(
+ "WebRTC.Video.DiscardedPacketsInPercent"));
+ EXPECT_EQ(0, test::LastHistogramSample(
+ "WebRTC.Video.DuplicatedPacketsInPercent"));
+ EXPECT_NE(-1, test::LastHistogramSample(
+ "WebRTC.Video.CompleteFramesReceivedPerSecond"));
+ EXPECT_EQ(1000, test::LastHistogramSample(
+ "WebRTC.Video.KeyFramesReceivedInPermille"));
+
+ // Verify that histograms are not updated if stop is called again.
+ jitter_buffer_->Stop();
+ EXPECT_EQ(1, test::NumHistogramSamples(
+ "WebRTC.Video.DiscardedPacketsInPercent"));
+ EXPECT_EQ(1, test::NumHistogramSamples(
+ "WebRTC.Video.DuplicatedPacketsInPercent"));
+ EXPECT_EQ(1, test::NumHistogramSamples(
+ "WebRTC.Video.CompleteFramesReceivedPerSecond"));
+ EXPECT_EQ(1, test::NumHistogramSamples(
+ "WebRTC.Video.KeyFramesReceivedInPermille"));
+}
+
+TEST_F(TestBasicJitterBuffer, DualPacketFrame) {
+ packet_->frameType = kVideoFrameKey;
+ packet_->isFirstPacket = true;
+ packet_->markerBit = false;
+
+ bool retransmitted = false;
+ EXPECT_EQ(kIncomplete, jitter_buffer_->InsertPacket(*packet_,
+ &retransmitted));
+ VCMEncodedFrame* frame_out = DecodeCompleteFrame();
+ // Should not be complete.
+ EXPECT_TRUE(frame_out == NULL);
+
+ ++seq_num_;
+ packet_->isFirstPacket = false;
+ packet_->markerBit = true;
+ packet_->seqNum = seq_num_;
+
+ EXPECT_EQ(kCompleteSession, jitter_buffer_->InsertPacket(*packet_,
+ &retransmitted));
+
+ frame_out = DecodeCompleteFrame();
+ CheckOutFrame(frame_out, 2 * size_, false);
+
+ EXPECT_EQ(kVideoFrameKey, frame_out->FrameType());
+ jitter_buffer_->ReleaseFrame(frame_out);
+}
+
+TEST_F(TestBasicJitterBuffer, 100PacketKeyFrame) {
+ packet_->frameType = kVideoFrameKey;
+ packet_->isFirstPacket = true;
+ packet_->markerBit = false;
+
+ bool retransmitted = false;
+ EXPECT_EQ(kIncomplete, jitter_buffer_->InsertPacket(*packet_,
+ &retransmitted));
+
+ VCMEncodedFrame* frame_out = DecodeCompleteFrame();
+
+ // Frame should not be complete.
+ EXPECT_TRUE(frame_out == NULL);
+
+ // Insert 98 frames.
+ int loop = 0;
+ do {
+ seq_num_++;
+ packet_->isFirstPacket = false;
+ packet_->markerBit = false;
+ packet_->seqNum = seq_num_;
+
+ EXPECT_EQ(kIncomplete, jitter_buffer_->InsertPacket(*packet_,
+ &retransmitted));
+ loop++;
+ } while (loop < 98);
+
+ // Insert last packet.
+ ++seq_num_;
+ packet_->isFirstPacket = false;
+ packet_->markerBit = true;
+ packet_->seqNum = seq_num_;
+
+ EXPECT_EQ(kCompleteSession, jitter_buffer_->InsertPacket(*packet_,
+ &retransmitted));
+
+ frame_out = DecodeCompleteFrame();
+
+ CheckOutFrame(frame_out, 100 * size_, false);
+ EXPECT_EQ(kVideoFrameKey, frame_out->FrameType());
+ jitter_buffer_->ReleaseFrame(frame_out);
+}
+
+TEST_F(TestBasicJitterBuffer, 100PacketDeltaFrame) {
+ // Always start with a complete key frame.
+ packet_->frameType = kVideoFrameKey;
+ packet_->isFirstPacket = true;
+ packet_->markerBit = true;
+
+ bool retransmitted = false;
+ EXPECT_EQ(kCompleteSession, jitter_buffer_->InsertPacket(*packet_,
+ &retransmitted));
+ VCMEncodedFrame* frame_out = DecodeCompleteFrame();
+ EXPECT_FALSE(frame_out == NULL);
+ jitter_buffer_->ReleaseFrame(frame_out);
+
+ ++seq_num_;
+ packet_->seqNum = seq_num_;
+ packet_->markerBit = false;
+ packet_->frameType = kVideoFrameDelta;
+ packet_->timestamp += 33 * 90;
+
+ EXPECT_EQ(kIncomplete, jitter_buffer_->InsertPacket(*packet_,
+ &retransmitted));
+
+ frame_out = DecodeCompleteFrame();
+
+ // Frame should not be complete.
+ EXPECT_TRUE(frame_out == NULL);
+
+ packet_->isFirstPacket = false;
+ // Insert 98 frames.
+ int loop = 0;
+ do {
+ ++seq_num_;
+ packet_->seqNum = seq_num_;
+
+ // Insert a packet into a frame.
+ EXPECT_EQ(kIncomplete, jitter_buffer_->InsertPacket(*packet_,
+ &retransmitted));
+ loop++;
+ } while (loop < 98);
+
+ // Insert the last packet.
+ ++seq_num_;
+ packet_->isFirstPacket = false;
+ packet_->markerBit = true;
+ packet_->seqNum = seq_num_;
+
+ EXPECT_EQ(kCompleteSession, jitter_buffer_->InsertPacket(*packet_,
+ &retransmitted));
+
+ frame_out = DecodeCompleteFrame();
+
+ CheckOutFrame(frame_out, 100 * size_, false);
+ EXPECT_EQ(kVideoFrameDelta, frame_out->FrameType());
+ jitter_buffer_->ReleaseFrame(frame_out);
+}
+
+TEST_F(TestBasicJitterBuffer, PacketReorderingReverseOrder) {
+ // Insert the "first" packet last.
+ seq_num_ += 100;
+ packet_->frameType = kVideoFrameKey;
+ packet_->isFirstPacket = false;
+ packet_->markerBit = true;
+ packet_->seqNum = seq_num_;
+ packet_->timestamp = timestamp_;
+
+ bool retransmitted = false;
+ EXPECT_EQ(kIncomplete, jitter_buffer_->InsertPacket(*packet_,
+ &retransmitted));
+
+ VCMEncodedFrame* frame_out = DecodeCompleteFrame();
+
+ EXPECT_TRUE(frame_out == NULL);
+
+ // Insert 98 packets.
+ int loop = 0;
+ do {
+ seq_num_--;
+ packet_->isFirstPacket = false;
+ packet_->markerBit = false;
+ packet_->seqNum = seq_num_;
+
+ EXPECT_EQ(kIncomplete, jitter_buffer_->InsertPacket(*packet_,
+ &retransmitted));
+ loop++;
+ } while (loop < 98);
+
+ // Insert the last packet.
+ seq_num_--;
+ packet_->isFirstPacket = true;
+ packet_->markerBit = false;
+ packet_->seqNum = seq_num_;
+
+ EXPECT_EQ(kCompleteSession, jitter_buffer_->InsertPacket(*packet_,
+ &retransmitted));
+
+ frame_out = DecodeCompleteFrame();;
+
+ CheckOutFrame(frame_out, 100 * size_, false);
+
+ EXPECT_EQ(kVideoFrameKey, frame_out->FrameType());
+ jitter_buffer_->ReleaseFrame(frame_out);
+}
+
+TEST_F(TestBasicJitterBuffer, FrameReordering2Frames2PacketsEach) {
+ packet_->frameType = kVideoFrameDelta;
+ packet_->isFirstPacket = true;
+ packet_->markerBit = false;
+
+ bool retransmitted = false;
+ EXPECT_EQ(kIncomplete, jitter_buffer_->InsertPacket(*packet_,
+ &retransmitted));
+
+ VCMEncodedFrame* frame_out = DecodeCompleteFrame();
+
+ EXPECT_TRUE(frame_out == NULL);
+
+ seq_num_++;
+ packet_->isFirstPacket = false;
+ packet_->markerBit = true;
+ packet_->seqNum = seq_num_;
+
+ EXPECT_EQ(kCompleteSession, jitter_buffer_->InsertPacket(*packet_,
+ &retransmitted));
+
+ // check that we fail to get frame since seqnum is not continuous
+ frame_out = DecodeCompleteFrame();
+ EXPECT_TRUE(frame_out == NULL);
+
+ seq_num_ -= 3;
+ timestamp_ -= 33*90;
+ packet_->frameType = kVideoFrameKey;
+ packet_->isFirstPacket = true;
+ packet_->markerBit = false;
+ packet_->seqNum = seq_num_;
+ packet_->timestamp = timestamp_;
+
+ EXPECT_EQ(kIncomplete, jitter_buffer_->InsertPacket(*packet_,
+ &retransmitted));
+
+ frame_out = DecodeCompleteFrame();
+
+ // It should not be complete.
+ EXPECT_TRUE(frame_out == NULL);
+
+ seq_num_++;
+ packet_->isFirstPacket = false;
+ packet_->markerBit = true;
+ packet_->seqNum = seq_num_;
+
+ EXPECT_EQ(kCompleteSession, jitter_buffer_->InsertPacket(*packet_,
+ &retransmitted));
+
+ frame_out = DecodeCompleteFrame();
+ CheckOutFrame(frame_out, 2 * size_, false);
+ EXPECT_EQ(kVideoFrameKey, frame_out->FrameType());
+ jitter_buffer_->ReleaseFrame(frame_out);
+
+ frame_out = DecodeCompleteFrame();
+ CheckOutFrame(frame_out, 2 * size_, false);
+ EXPECT_EQ(kVideoFrameDelta, frame_out->FrameType());
+ jitter_buffer_->ReleaseFrame(frame_out);
+}
+
+TEST_F(TestBasicJitterBuffer, TestReorderingWithPadding) {
+ packet_->frameType = kVideoFrameKey;
+ packet_->isFirstPacket = true;
+ packet_->markerBit = true;
+
+ // Send in an initial good packet/frame (Frame A) to start things off.
+ bool retransmitted = false;
+ EXPECT_EQ(kCompleteSession,
+ jitter_buffer_->InsertPacket(*packet_, &retransmitted));
+ VCMEncodedFrame* frame_out = DecodeCompleteFrame();
+ EXPECT_TRUE(frame_out != NULL);
+ jitter_buffer_->ReleaseFrame(frame_out);
+
+ // Now send in a complete delta frame (Frame C), but with a sequence number
+ // gap. No pic index either, so no temporal scalability cheating :)
+ packet_->frameType = kVideoFrameDelta;
+ // Leave a gap of 2 sequence numbers and two frames.
+ packet_->seqNum = seq_num_ + 3;
+ packet_->timestamp = timestamp_ + (66 * 90);
+ // Still isFirst = marker = true.
+ // Session should be complete (frame is complete), but there's nothing to
+ // decode yet.
+ EXPECT_EQ(kCompleteSession,
+ jitter_buffer_->InsertPacket(*packet_, &retransmitted));
+ frame_out = DecodeCompleteFrame();
+ EXPECT_TRUE(frame_out == NULL);
+
+ // Now send in a complete delta frame (Frame B) that is continuous from A, but
+ // doesn't fill the full gap to C. The rest of the gap is going to be padding.
+ packet_->seqNum = seq_num_ + 1;
+ packet_->timestamp = timestamp_ + (33 * 90);
+ // Still isFirst = marker = true.
+ EXPECT_EQ(kCompleteSession,
+ jitter_buffer_->InsertPacket(*packet_, &retransmitted));
+ frame_out = DecodeCompleteFrame();
+ EXPECT_TRUE(frame_out != NULL);
+ jitter_buffer_->ReleaseFrame(frame_out);
+
+ // But Frame C isn't continuous yet.
+ frame_out = DecodeCompleteFrame();
+ EXPECT_TRUE(frame_out == NULL);
+
+ // Add in the padding. These are empty packets (data length is 0) with no
+ // marker bit and matching the timestamp of Frame B.
+ VCMPacket empty_packet(data_, 0, seq_num_ + 2, timestamp_ + (33 * 90), false);
+ EXPECT_EQ(kOldPacket,
+ jitter_buffer_->InsertPacket(empty_packet, &retransmitted));
+ empty_packet.seqNum += 1;
+ EXPECT_EQ(kOldPacket,
+ jitter_buffer_->InsertPacket(empty_packet, &retransmitted));
+
+ // But now Frame C should be ready!
+ frame_out = DecodeCompleteFrame();
+ EXPECT_TRUE(frame_out != NULL);
+ jitter_buffer_->ReleaseFrame(frame_out);
+}
+
+TEST_F(TestBasicJitterBuffer, DuplicatePackets) {
+ packet_->frameType = kVideoFrameKey;
+ packet_->isFirstPacket = true;
+ packet_->markerBit = false;
+ packet_->seqNum = seq_num_;
+ packet_->timestamp = timestamp_;
+ EXPECT_EQ(0, jitter_buffer_->num_packets());
+ EXPECT_EQ(0, jitter_buffer_->num_duplicated_packets());
+
+ bool retransmitted = false;
+ EXPECT_EQ(kIncomplete, jitter_buffer_->InsertPacket(*packet_,
+ &retransmitted));
+
+ VCMEncodedFrame* frame_out = DecodeCompleteFrame();
+
+ EXPECT_TRUE(frame_out == NULL);
+ EXPECT_EQ(1, jitter_buffer_->num_packets());
+ EXPECT_EQ(0, jitter_buffer_->num_duplicated_packets());
+
+ // Insert a packet into a frame.
+ EXPECT_EQ(kDuplicatePacket, jitter_buffer_->InsertPacket(*packet_,
+ &retransmitted));
+ EXPECT_EQ(2, jitter_buffer_->num_packets());
+ EXPECT_EQ(1, jitter_buffer_->num_duplicated_packets());
+
+ seq_num_++;
+ packet_->seqNum = seq_num_;
+ packet_->markerBit = true;
+ packet_->isFirstPacket = false;
+
+ EXPECT_EQ(kCompleteSession, jitter_buffer_->InsertPacket(*packet_,
+ &retransmitted));
+
+ frame_out = DecodeCompleteFrame();
+ ASSERT_TRUE(frame_out != NULL);
+ CheckOutFrame(frame_out, 2 * size_, false);
+
+ EXPECT_EQ(kVideoFrameKey, frame_out->FrameType());
+ EXPECT_EQ(3, jitter_buffer_->num_packets());
+ EXPECT_EQ(1, jitter_buffer_->num_duplicated_packets());
+ jitter_buffer_->ReleaseFrame(frame_out);
+}
+
+TEST_F(TestBasicJitterBuffer, DuplicatePreviousDeltaFramePacket) {
+ packet_->frameType = kVideoFrameKey;
+ packet_->isFirstPacket = true;
+ packet_->markerBit = true;
+ packet_->seqNum = seq_num_;
+ packet_->timestamp = timestamp_;
+ jitter_buffer_->SetDecodeErrorMode(kNoErrors);
+ EXPECT_EQ(0, jitter_buffer_->num_packets());
+ EXPECT_EQ(0, jitter_buffer_->num_duplicated_packets());
+
+ bool retransmitted = false;
+ // Insert first complete frame.
+ EXPECT_EQ(kCompleteSession,
+ jitter_buffer_->InsertPacket(*packet_, &retransmitted));
+
+ VCMEncodedFrame* frame_out = DecodeCompleteFrame();
+ ASSERT_TRUE(frame_out != NULL);
+ CheckOutFrame(frame_out, size_, false);
+ EXPECT_EQ(kVideoFrameKey, frame_out->FrameType());
+ jitter_buffer_->ReleaseFrame(frame_out);
+
+ // Insert 3 delta frames.
+ for (uint16_t i = 1; i <= 3; ++i) {
+ packet_->seqNum = seq_num_ + i;
+ packet_->timestamp = timestamp_ + (i * 33) * 90;
+ packet_->frameType = kVideoFrameDelta;
+ EXPECT_EQ(kCompleteSession,
+ jitter_buffer_->InsertPacket(*packet_, &retransmitted));
+ EXPECT_EQ(i + 1, jitter_buffer_->num_packets());
+ EXPECT_EQ(0, jitter_buffer_->num_duplicated_packets());
+ }
+
+ // Retransmit second delta frame.
+ packet_->seqNum = seq_num_ + 2;
+ packet_->timestamp = timestamp_ + 66 * 90;
+
+ EXPECT_EQ(kDuplicatePacket,
+ jitter_buffer_->InsertPacket(*packet_, &retransmitted));
+
+ EXPECT_EQ(5, jitter_buffer_->num_packets());
+ EXPECT_EQ(1, jitter_buffer_->num_duplicated_packets());
+
+ // Should be able to decode 3 delta frames, key frame already decoded.
+ for (size_t i = 0; i < 3; ++i) {
+ frame_out = DecodeCompleteFrame();
+ ASSERT_TRUE(frame_out != NULL);
+ CheckOutFrame(frame_out, size_, false);
+ EXPECT_EQ(kVideoFrameDelta, frame_out->FrameType());
+ jitter_buffer_->ReleaseFrame(frame_out);
+ }
+}
+
+TEST_F(TestBasicJitterBuffer, TestSkipForwardVp9) {
+ // Verify that JB skips forward to next base layer frame.
+ // -------------------------------------------------
+ // | 65485 | 65486 | 65487 | 65488 | 65489 | ...
+ // | pid:5 | pid:6 | pid:7 | pid:8 | pid:9 | ...
+ // | tid:0 | tid:2 | tid:1 | tid:2 | tid:0 | ...
+ // | ss | x | x | x | |
+ // -------------------------------------------------
+ // |<----------tl0idx:200--------->|<---tl0idx:201---
+
+ bool re = false;
+ packet_->codec = kVideoCodecVP9;
+ packet_->codecSpecificHeader.codec = kRtpVideoVp9;
+ packet_->isFirstPacket = true;
+ packet_->markerBit = true;
+ packet_->codecSpecificHeader.codecHeader.VP9.flexible_mode = false;
+ packet_->codecSpecificHeader.codecHeader.VP9.spatial_idx = 0;
+ packet_->codecSpecificHeader.codecHeader.VP9.beginning_of_frame = true;
+ packet_->codecSpecificHeader.codecHeader.VP9.end_of_frame = true;
+ packet_->codecSpecificHeader.codecHeader.VP9.temporal_idx = kNoTemporalIdx;
+ packet_->codecSpecificHeader.codecHeader.VP9.temporal_up_switch = false;
+
+ packet_->seqNum = 65485;
+ packet_->timestamp = 1000;
+ packet_->frameType = kVideoFrameKey;
+ packet_->codecSpecificHeader.codecHeader.VP9.picture_id = 5;
+ packet_->codecSpecificHeader.codecHeader.VP9.tl0_pic_idx = 200;
+ packet_->codecSpecificHeader.codecHeader.VP9.gof_idx = 0;
+ packet_->codecSpecificHeader.codecHeader.VP9.ss_data_available = true;
+ packet_->codecSpecificHeader.codecHeader.VP9.gof.SetGofInfoVP9(
+ kTemporalStructureMode3); // kTemporalStructureMode3: 0-2-1-2..
+ EXPECT_EQ(kCompleteSession, jitter_buffer_->InsertPacket(*packet_, &re));
+
+ // Insert next temporal layer 0.
+ packet_->seqNum = 65489;
+ packet_->timestamp = 13000;
+ packet_->frameType = kVideoFrameDelta;
+ packet_->codecSpecificHeader.codecHeader.VP9.picture_id = 9;
+ packet_->codecSpecificHeader.codecHeader.VP9.tl0_pic_idx = 201;
+ packet_->codecSpecificHeader.codecHeader.VP9.gof_idx = 0;
+ packet_->codecSpecificHeader.codecHeader.VP9.ss_data_available = false;
+ EXPECT_EQ(kCompleteSession, jitter_buffer_->InsertPacket(*packet_, &re));
+
+ VCMEncodedFrame* frame_out = DecodeCompleteFrame();
+ EXPECT_EQ(1000U, frame_out->TimeStamp());
+ EXPECT_EQ(kVideoFrameKey, frame_out->FrameType());
+ jitter_buffer_->ReleaseFrame(frame_out);
+
+ frame_out = DecodeCompleteFrame();
+ EXPECT_EQ(13000U, frame_out->TimeStamp());
+ EXPECT_EQ(kVideoFrameDelta, frame_out->FrameType());
+ jitter_buffer_->ReleaseFrame(frame_out);
+}
+
+TEST_F(TestBasicJitterBuffer, ReorderedVp9SsData_3TlLayers) {
+ // Verify that frames are updated with SS data when SS packet is reordered.
+ // --------------------------------
+ // | 65486 | 65487 | 65485 |...
+ // | pid:6 | pid:7 | pid:5 |...
+ // | tid:2 | tid:1 | tid:0 |...
+ // | | | ss |
+ // --------------------------------
+ // |<--------tl0idx:200--------->|
+
+ bool re = false;
+ packet_->codec = kVideoCodecVP9;
+ packet_->codecSpecificHeader.codec = kRtpVideoVp9;
+ packet_->isFirstPacket = true;
+ packet_->markerBit = true;
+ packet_->codecSpecificHeader.codecHeader.VP9.flexible_mode = false;
+ packet_->codecSpecificHeader.codecHeader.VP9.spatial_idx = 0;
+ packet_->codecSpecificHeader.codecHeader.VP9.beginning_of_frame = true;
+ packet_->codecSpecificHeader.codecHeader.VP9.end_of_frame = true;
+ packet_->codecSpecificHeader.codecHeader.VP9.temporal_idx = kNoTemporalIdx;
+ packet_->codecSpecificHeader.codecHeader.VP9.temporal_up_switch = false;
+ packet_->codecSpecificHeader.codecHeader.VP9.tl0_pic_idx = 200;
+
+ packet_->seqNum = 65486;
+ packet_->timestamp = 6000;
+ packet_->frameType = kVideoFrameDelta;
+ packet_->codecSpecificHeader.codecHeader.VP9.picture_id = 6;
+ packet_->codecSpecificHeader.codecHeader.VP9.gof_idx = 1;
+ EXPECT_EQ(kCompleteSession, jitter_buffer_->InsertPacket(*packet_, &re));
+
+ packet_->seqNum = 65487;
+ packet_->timestamp = 9000;
+ packet_->frameType = kVideoFrameDelta;
+ packet_->codecSpecificHeader.codecHeader.VP9.picture_id = 7;
+ packet_->codecSpecificHeader.codecHeader.VP9.gof_idx = 2;
+ EXPECT_EQ(kCompleteSession, jitter_buffer_->InsertPacket(*packet_, &re));
+
+ // Insert first frame with SS data.
+ packet_->seqNum = 65485;
+ packet_->timestamp = 3000;
+ packet_->frameType = kVideoFrameKey;
+ packet_->width = 352;
+ packet_->height = 288;
+ packet_->codecSpecificHeader.codecHeader.VP9.picture_id = 5;
+ packet_->codecSpecificHeader.codecHeader.VP9.gof_idx = 0;
+ packet_->codecSpecificHeader.codecHeader.VP9.ss_data_available = true;
+ packet_->codecSpecificHeader.codecHeader.VP9.gof.SetGofInfoVP9(
+ kTemporalStructureMode3); // kTemporalStructureMode3: 0-2-1-2..
+ EXPECT_EQ(kCompleteSession, jitter_buffer_->InsertPacket(*packet_, &re));
+
+ VCMEncodedFrame* frame_out = DecodeCompleteFrame();
+ EXPECT_EQ(3000U, frame_out->TimeStamp());
+ EXPECT_EQ(kVideoFrameKey, frame_out->FrameType());
+ EXPECT_EQ(0, frame_out->CodecSpecific()->codecSpecific.VP9.temporal_idx);
+ EXPECT_FALSE(
+ frame_out->CodecSpecific()->codecSpecific.VP9.temporal_up_switch);
+ jitter_buffer_->ReleaseFrame(frame_out);
+
+ frame_out = DecodeCompleteFrame();
+ EXPECT_EQ(6000U, frame_out->TimeStamp());
+ EXPECT_EQ(kVideoFrameDelta, frame_out->FrameType());
+ EXPECT_EQ(2, frame_out->CodecSpecific()->codecSpecific.VP9.temporal_idx);
+ EXPECT_TRUE(frame_out->CodecSpecific()->codecSpecific.VP9.temporal_up_switch);
+ jitter_buffer_->ReleaseFrame(frame_out);
+
+ frame_out = DecodeCompleteFrame();
+ EXPECT_EQ(9000U, frame_out->TimeStamp());
+ EXPECT_EQ(kVideoFrameDelta, frame_out->FrameType());
+ EXPECT_EQ(1, frame_out->CodecSpecific()->codecSpecific.VP9.temporal_idx);
+ EXPECT_TRUE(frame_out->CodecSpecific()->codecSpecific.VP9.temporal_up_switch);
+ jitter_buffer_->ReleaseFrame(frame_out);
+}
+
+TEST_F(TestBasicJitterBuffer, ReorderedVp9SsData_2Tl2SLayers) {
+ // Verify that frames are updated with SS data when SS packet is reordered.
+ // -----------------------------------------
+ // | 65486 | 65487 | 65485 | 65484 |...
+ // | pid:6 | pid:6 | pid:5 | pid:5 |...
+ // | tid:1 | tid:1 | tid:0 | tid:0 |...
+ // | sid:0 | sid:1 | sid:1 | sid:0 |...
+ // | t:6000 | t:6000 | t:3000 | t:3000 |
+ // | | | | ss |
+ // -----------------------------------------
+ // |<-----------tl0idx:200------------>|
+
+ bool re = false;
+ packet_->codec = kVideoCodecVP9;
+ packet_->codecSpecificHeader.codec = kRtpVideoVp9;
+ packet_->codecSpecificHeader.codecHeader.VP9.flexible_mode = false;
+ packet_->codecSpecificHeader.codecHeader.VP9.beginning_of_frame = true;
+ packet_->codecSpecificHeader.codecHeader.VP9.end_of_frame = true;
+ packet_->codecSpecificHeader.codecHeader.VP9.temporal_idx = kNoTemporalIdx;
+ packet_->codecSpecificHeader.codecHeader.VP9.temporal_up_switch = false;
+ packet_->codecSpecificHeader.codecHeader.VP9.tl0_pic_idx = 200;
+
+ packet_->isFirstPacket = true;
+ packet_->markerBit = false;
+ packet_->seqNum = 65486;
+ packet_->timestamp = 6000;
+ packet_->frameType = kVideoFrameDelta;
+ packet_->codecSpecificHeader.codecHeader.VP9.spatial_idx = 0;
+ packet_->codecSpecificHeader.codecHeader.VP9.picture_id = 6;
+ packet_->codecSpecificHeader.codecHeader.VP9.gof_idx = 1;
+ EXPECT_EQ(kIncomplete, jitter_buffer_->InsertPacket(*packet_, &re));
+
+ packet_->isFirstPacket = false;
+ packet_->markerBit = true;
+ packet_->seqNum = 65487;
+ packet_->frameType = kVideoFrameDelta;
+ packet_->codecSpecificHeader.codecHeader.VP9.spatial_idx = 1;
+ packet_->codecSpecificHeader.codecHeader.VP9.picture_id = 6;
+ packet_->codecSpecificHeader.codecHeader.VP9.gof_idx = 1;
+ EXPECT_EQ(kCompleteSession, jitter_buffer_->InsertPacket(*packet_, &re));
+
+ packet_->isFirstPacket = false;
+ packet_->markerBit = true;
+ packet_->seqNum = 65485;
+ packet_->timestamp = 3000;
+ packet_->frameType = kVideoFrameKey;
+ packet_->codecSpecificHeader.codecHeader.VP9.spatial_idx = 1;
+ packet_->codecSpecificHeader.codecHeader.VP9.picture_id = 5;
+ packet_->codecSpecificHeader.codecHeader.VP9.gof_idx = 0;
+ EXPECT_EQ(kIncomplete, jitter_buffer_->InsertPacket(*packet_, &re));
+
+ // Insert first frame with SS data.
+ packet_->isFirstPacket = true;
+ packet_->markerBit = false;
+ packet_->seqNum = 65484;
+ packet_->frameType = kVideoFrameKey;
+ packet_->width = 352;
+ packet_->height = 288;
+ packet_->codecSpecificHeader.codecHeader.VP9.spatial_idx = 0;
+ packet_->codecSpecificHeader.codecHeader.VP9.picture_id = 5;
+ packet_->codecSpecificHeader.codecHeader.VP9.gof_idx = 0;
+ packet_->codecSpecificHeader.codecHeader.VP9.ss_data_available = true;
+ packet_->codecSpecificHeader.codecHeader.VP9.gof.SetGofInfoVP9(
+ kTemporalStructureMode2); // kTemporalStructureMode3: 0-1-0-1..
+ EXPECT_EQ(kCompleteSession, jitter_buffer_->InsertPacket(*packet_, &re));
+
+ VCMEncodedFrame* frame_out = DecodeCompleteFrame();
+ EXPECT_EQ(3000U, frame_out->TimeStamp());
+ EXPECT_EQ(kVideoFrameKey, frame_out->FrameType());
+ EXPECT_EQ(0, frame_out->CodecSpecific()->codecSpecific.VP9.temporal_idx);
+ EXPECT_FALSE(
+ frame_out->CodecSpecific()->codecSpecific.VP9.temporal_up_switch);
+ jitter_buffer_->ReleaseFrame(frame_out);
+
+ frame_out = DecodeCompleteFrame();
+ EXPECT_EQ(6000U, frame_out->TimeStamp());
+ EXPECT_EQ(kVideoFrameDelta, frame_out->FrameType());
+ EXPECT_EQ(1, frame_out->CodecSpecific()->codecSpecific.VP9.temporal_idx);
+ EXPECT_TRUE(frame_out->CodecSpecific()->codecSpecific.VP9.temporal_up_switch);
+ jitter_buffer_->ReleaseFrame(frame_out);
+}
+
+TEST_F(TestBasicJitterBuffer, H264InsertStartCode) {
+ packet_->frameType = kVideoFrameKey;
+ packet_->isFirstPacket = true;
+ packet_->markerBit = false;
+ packet_->seqNum = seq_num_;
+ packet_->timestamp = timestamp_;
+ packet_->insertStartCode = true;
+
+ bool retransmitted = false;
+ EXPECT_EQ(kIncomplete, jitter_buffer_->InsertPacket(*packet_,
+ &retransmitted));
+
+ VCMEncodedFrame* frame_out = DecodeCompleteFrame();
+
+ // Frame should not be complete.
+ EXPECT_TRUE(frame_out == NULL);
+
+ seq_num_++;
+ packet_->isFirstPacket = false;
+ packet_->markerBit = true;
+ packet_->seqNum = seq_num_;
+
+ EXPECT_EQ(kCompleteSession, jitter_buffer_->InsertPacket(*packet_,
+ &retransmitted));
+
+ frame_out = DecodeCompleteFrame();
+ CheckOutFrame(frame_out, size_ * 2 + 4 * 2, true);
+ EXPECT_EQ(kVideoFrameKey, frame_out->FrameType());
+ jitter_buffer_->ReleaseFrame(frame_out);
+}
+
+// Test threshold conditions of decodable state.
+TEST_F(TestBasicJitterBuffer, PacketLossWithSelectiveErrorsThresholdCheck) {
+ jitter_buffer_->SetDecodeErrorMode(kSelectiveErrors);
+ // Always start with a key frame. Use 10 packets to test Decodable State
+ // boundaries.
+ packet_->frameType = kVideoFrameKey;
+ packet_->isFirstPacket = true;
+ packet_->markerBit = false;
+ packet_->seqNum = seq_num_;
+ packet_->timestamp = timestamp_;
+
+ bool retransmitted = false;
+ EXPECT_EQ(kIncomplete, jitter_buffer_->InsertPacket(*packet_,
+ &retransmitted));
+ uint32_t timestamp = 0;
+ EXPECT_FALSE(jitter_buffer_->NextCompleteTimestamp(0, &timestamp));
+ EXPECT_FALSE(jitter_buffer_->NextMaybeIncompleteTimestamp(&timestamp));
+
+ packet_->isFirstPacket = false;
+ for (int i = 1; i < 9; ++i) {
+ packet_->seqNum++;
+ EXPECT_EQ(kIncomplete, jitter_buffer_->InsertPacket(*packet_,
+ &retransmitted));
+ EXPECT_FALSE(jitter_buffer_->NextCompleteTimestamp(0, &timestamp));
+ EXPECT_FALSE(jitter_buffer_->NextMaybeIncompleteTimestamp(&timestamp));
+ }
+
+ // last packet
+ packet_->markerBit = true;
+ packet_->seqNum++;
+
+ EXPECT_EQ(kCompleteSession, jitter_buffer_->InsertPacket(*packet_,
+ &retransmitted));
+ VCMEncodedFrame* frame_out = DecodeCompleteFrame();
+ CheckOutFrame(frame_out, 10 * size_, false);
+ EXPECT_EQ(kVideoFrameKey, frame_out->FrameType());
+ jitter_buffer_->ReleaseFrame(frame_out);
+
+ // An incomplete frame can only be decoded once a subsequent frame has begun
+ // to arrive. Insert packet in distant frame for this purpose.
+ packet_->frameType = kVideoFrameDelta;
+ packet_->isFirstPacket = true;
+ packet_->markerBit = false;
+ packet_->seqNum += 100;
+ packet_->timestamp += 33 * 90 * 8;
+
+ EXPECT_EQ(kDecodableSession, jitter_buffer_->InsertPacket(*packet_,
+ &retransmitted));
+ EXPECT_FALSE(jitter_buffer_->NextCompleteTimestamp(0, &timestamp));
+ EXPECT_FALSE(jitter_buffer_->NextMaybeIncompleteTimestamp(&timestamp));
+
+ // Insert second frame
+ packet_->seqNum -= 99;
+ packet_->timestamp -= 33 * 90 * 7;
+
+ EXPECT_EQ(kDecodableSession, jitter_buffer_->InsertPacket(*packet_,
+ &retransmitted));
+ EXPECT_FALSE(jitter_buffer_->NextCompleteTimestamp(0, &timestamp));
+ EXPECT_TRUE(jitter_buffer_->NextMaybeIncompleteTimestamp(&timestamp));
+
+ packet_->isFirstPacket = false;
+ for (int i = 1; i < 8; ++i) {
+ packet_->seqNum++;
+ EXPECT_EQ(kDecodableSession, jitter_buffer_->InsertPacket(*packet_,
+ &retransmitted));
+ EXPECT_FALSE(jitter_buffer_->NextCompleteTimestamp(0, &timestamp));
+ EXPECT_TRUE(jitter_buffer_->NextMaybeIncompleteTimestamp(&timestamp));
+ }
+
+ packet_->seqNum++;
+ EXPECT_EQ(kDecodableSession, jitter_buffer_->InsertPacket(*packet_,
+ &retransmitted));
+ EXPECT_FALSE(jitter_buffer_->NextCompleteTimestamp(0, &timestamp));
+ EXPECT_TRUE(jitter_buffer_->NextMaybeIncompleteTimestamp(&timestamp));
+
+ frame_out = DecodeIncompleteFrame();
+ ASSERT_FALSE(NULL == frame_out);
+ CheckOutFrame(frame_out, 9 * size_, false);
+ EXPECT_EQ(kVideoFrameDelta, frame_out->FrameType());
+ jitter_buffer_->ReleaseFrame(frame_out);
+
+ packet_->markerBit = true;
+ packet_->seqNum++;
+ EXPECT_EQ(kOldPacket, jitter_buffer_->InsertPacket(*packet_,
+ &retransmitted));
+}
+
+// Make sure first packet is present before a frame can be decoded.
+TEST_F(TestBasicJitterBuffer, PacketLossWithSelectiveErrorsIncompleteKey) {
+ jitter_buffer_->SetDecodeErrorMode(kSelectiveErrors);
+ // Always start with a key frame.
+ packet_->frameType = kVideoFrameKey;
+ packet_->isFirstPacket = true;
+ packet_->markerBit = true;
+ packet_->seqNum = seq_num_;
+ packet_->timestamp = timestamp_;
+
+ bool retransmitted = false;
+ EXPECT_EQ(kCompleteSession, jitter_buffer_->InsertPacket(*packet_,
+ &retransmitted));
+ VCMEncodedFrame* frame_out = DecodeCompleteFrame();
+ CheckOutFrame(frame_out, size_, false);
+ EXPECT_EQ(kVideoFrameKey, frame_out->FrameType());
+ jitter_buffer_->ReleaseFrame(frame_out);
+
+ // An incomplete frame can only be decoded once a subsequent frame has begun
+ // to arrive. Insert packet in distant frame for this purpose.
+ packet_->frameType = kVideoFrameDelta;
+ packet_->isFirstPacket = false;
+ packet_->markerBit = false;
+ packet_->seqNum += 100;
+ packet_->timestamp += 33*90*8;
+ EXPECT_EQ(kIncomplete, jitter_buffer_->InsertPacket(*packet_,
+ &retransmitted));
+ uint32_t timestamp;
+ EXPECT_FALSE(jitter_buffer_->NextCompleteTimestamp(0, &timestamp));
+ EXPECT_FALSE(jitter_buffer_->NextMaybeIncompleteTimestamp(&timestamp));
+
+ // Insert second frame - an incomplete key frame.
+ packet_->frameType = kVideoFrameKey;
+ packet_->isFirstPacket = true;
+ packet_->seqNum -= 99;
+ packet_->timestamp -= 33*90*7;
+
+ EXPECT_EQ(kIncomplete, jitter_buffer_->InsertPacket(*packet_,
+ &retransmitted));
+ EXPECT_FALSE(jitter_buffer_->NextCompleteTimestamp(0, &timestamp));
+ EXPECT_FALSE(jitter_buffer_->NextMaybeIncompleteTimestamp(&timestamp));
+
+ // Insert a few more packets. Make sure we're waiting for the key frame to be
+ // complete.
+ packet_->isFirstPacket = false;
+ for (int i = 1; i < 5; ++i) {
+ packet_->seqNum++;
+ EXPECT_EQ(kIncomplete, jitter_buffer_->InsertPacket(*packet_,
+ &retransmitted));
+ EXPECT_FALSE(jitter_buffer_->NextCompleteTimestamp(0, &timestamp));
+ EXPECT_FALSE(jitter_buffer_->NextMaybeIncompleteTimestamp(&timestamp));
+ }
+
+ // Complete key frame.
+ packet_->markerBit = true;
+ packet_->seqNum++;
+ EXPECT_EQ(kCompleteSession, jitter_buffer_->InsertPacket(*packet_,
+ &retransmitted));
+ frame_out = DecodeCompleteFrame();
+ CheckOutFrame(frame_out, 6 * size_, false);
+ EXPECT_EQ(kVideoFrameKey, frame_out->FrameType());
+ jitter_buffer_->ReleaseFrame(frame_out);
+}
+
+// Make sure first packet is present before a frame can be decoded.
+TEST_F(TestBasicJitterBuffer, PacketLossWithSelectiveErrorsMissingFirstPacket) {
+ jitter_buffer_->SetDecodeErrorMode(kSelectiveErrors);
+ // Always start with a key frame.
+ packet_->frameType = kVideoFrameKey;
+ packet_->isFirstPacket = true;
+ packet_->markerBit = true;
+ packet_->seqNum = seq_num_;
+ packet_->timestamp = timestamp_;
+
+ bool retransmitted = false;
+ EXPECT_EQ(kCompleteSession, jitter_buffer_->InsertPacket(*packet_,
+ &retransmitted));
+ VCMEncodedFrame* frame_out = DecodeCompleteFrame();
+ CheckOutFrame(frame_out, size_, false);
+ EXPECT_EQ(kVideoFrameKey, frame_out->FrameType());
+ jitter_buffer_->ReleaseFrame(frame_out);
+
+ // An incomplete frame can only be decoded once a subsequent frame has begun
+ // to arrive. Insert packet in distant frame for this purpose.
+ packet_->frameType = kVideoFrameDelta;
+ packet_->isFirstPacket = false;
+ packet_->markerBit = false;
+ packet_->seqNum += 100;
+ packet_->timestamp += 33*90*8;
+ EXPECT_EQ(kIncomplete, jitter_buffer_->InsertPacket(*packet_,
+ &retransmitted));
+ uint32_t timestamp;
+ EXPECT_FALSE(jitter_buffer_->NextCompleteTimestamp(0, &timestamp));
+ EXPECT_FALSE(jitter_buffer_->NextMaybeIncompleteTimestamp(&timestamp));
+
+ // Insert second frame with the first packet missing. Make sure we're waiting
+ // for the key frame to be complete.
+ packet_->seqNum -= 98;
+ packet_->timestamp -= 33*90*7;
+
+ EXPECT_EQ(kIncomplete, jitter_buffer_->InsertPacket(*packet_,
+ &retransmitted));
+ EXPECT_FALSE(jitter_buffer_->NextCompleteTimestamp(0, &timestamp));
+ EXPECT_FALSE(jitter_buffer_->NextMaybeIncompleteTimestamp(&timestamp));
+
+ for (int i = 0; i < 5; ++i) {
+ packet_->seqNum++;
+ EXPECT_EQ(kIncomplete, jitter_buffer_->InsertPacket(*packet_,
+ &retransmitted));
+ EXPECT_FALSE(jitter_buffer_->NextCompleteTimestamp(0, &timestamp));
+ EXPECT_FALSE(jitter_buffer_->NextMaybeIncompleteTimestamp(&timestamp));
+ }
+
+ // Add first packet. Frame should now be decodable, but incomplete.
+ packet_->isFirstPacket = true;
+ packet_->seqNum -= 6;
+ EXPECT_EQ(kDecodableSession, jitter_buffer_->InsertPacket(*packet_,
+ &retransmitted));
+ EXPECT_FALSE(jitter_buffer_->NextCompleteTimestamp(0, &timestamp));
+ EXPECT_TRUE(jitter_buffer_->NextMaybeIncompleteTimestamp(&timestamp));
+
+ frame_out = DecodeIncompleteFrame();
+ CheckOutFrame(frame_out, 7 * size_, false);
+ EXPECT_EQ(kVideoFrameDelta, frame_out->FrameType());
+ jitter_buffer_->ReleaseFrame(frame_out);
+}
+
+TEST_F(TestBasicJitterBuffer, DiscontinuousStreamWhenDecodingWithErrors) {
+ // Will use one packet per frame.
+ jitter_buffer_->SetDecodeErrorMode(kWithErrors);
+ packet_->frameType = kVideoFrameKey;
+ packet_->isFirstPacket = true;
+ packet_->markerBit = true;
+ packet_->seqNum = seq_num_;
+ packet_->timestamp = timestamp_;
+ bool retransmitted = false;
+ EXPECT_EQ(kCompleteSession, jitter_buffer_->InsertPacket(*packet_,
+ &retransmitted));
+ uint32_t next_timestamp;
+ EXPECT_TRUE(jitter_buffer_->NextCompleteTimestamp(0, &next_timestamp));
+ EXPECT_EQ(packet_->timestamp, next_timestamp);
+ VCMEncodedFrame* frame = jitter_buffer_->ExtractAndSetDecode(next_timestamp);
+ EXPECT_TRUE(frame != NULL);
+ jitter_buffer_->ReleaseFrame(frame);
+
+ // Drop a complete frame.
+ timestamp_ += 2 * 33 * 90;
+ seq_num_ += 2;
+ packet_->frameType = kVideoFrameDelta;
+ packet_->isFirstPacket = true;
+ packet_->markerBit = false;
+ packet_->seqNum = seq_num_;
+ packet_->timestamp = timestamp_;
+ EXPECT_EQ(kDecodableSession, jitter_buffer_->InsertPacket(*packet_,
+ &retransmitted));
+ // Insert a packet (so the previous one will be released).
+ timestamp_ += 33 * 90;
+ seq_num_ += 2;
+ packet_->frameType = kVideoFrameDelta;
+ packet_->isFirstPacket = true;
+ packet_->markerBit = false;
+ packet_->seqNum = seq_num_;
+ packet_->timestamp = timestamp_;
+ EXPECT_EQ(kDecodableSession, jitter_buffer_->InsertPacket(*packet_,
+ &retransmitted));
+ EXPECT_FALSE(jitter_buffer_->NextCompleteTimestamp(0, &next_timestamp));
+ EXPECT_TRUE(jitter_buffer_->NextMaybeIncompleteTimestamp(&next_timestamp));
+ EXPECT_EQ(packet_->timestamp - 33 * 90, next_timestamp);
+}
+
+TEST_F(TestBasicJitterBuffer, PacketLoss) {
+ // Verify missing packets statistics and not decodable packets statistics.
+ // Insert 10 frames consisting of 4 packets and remove one from all of them.
+ // The last packet is an empty (non-media) packet.
+
+ // Select a start seqNum which triggers a difficult wrap situation
+ // The JB will only output (incomplete)frames if the next one has started
+ // to arrive. Start by inserting one frame (key).
+ jitter_buffer_->SetDecodeErrorMode(kWithErrors);
+ seq_num_ = 0xffff - 4;
+ seq_num_++;
+ packet_->frameType = kVideoFrameKey;
+ packet_->isFirstPacket = true;
+ packet_->markerBit = false;
+ packet_->seqNum = seq_num_;
+ packet_->timestamp = timestamp_;
+ packet_->completeNALU = kNaluStart;
+
+ bool retransmitted = false;
+ EXPECT_EQ(kDecodableSession, jitter_buffer_->InsertPacket(*packet_,
+ &retransmitted));
+ for (int i = 0; i < 11; ++i) {
+ webrtc::FrameType frametype = kVideoFrameDelta;
+ seq_num_++;
+ timestamp_ += 33*90;
+ packet_->frameType = frametype;
+ packet_->isFirstPacket = true;
+ packet_->markerBit = false;
+ packet_->seqNum = seq_num_;
+ packet_->timestamp = timestamp_;
+ packet_->completeNALU = kNaluStart;
+
+ EXPECT_EQ(kDecodableSession, jitter_buffer_->InsertPacket(*packet_,
+ &retransmitted));
+
+ VCMEncodedFrame* frame_out = DecodeCompleteFrame();
+
+ // Should not be complete.
+ EXPECT_TRUE(frame_out == NULL);
+
+ seq_num_ += 2;
+ packet_->isFirstPacket = false;
+ packet_->markerBit = true;
+ packet_->seqNum = seq_num_;
+ packet_->completeNALU = kNaluEnd;
+
+ EXPECT_EQ(jitter_buffer_->InsertPacket(*packet_, &retransmitted),
+ kDecodableSession);
+
+ // Insert an empty (non-media) packet.
+ seq_num_++;
+ packet_->isFirstPacket = false;
+ packet_->markerBit = false;
+ packet_->seqNum = seq_num_;
+ packet_->completeNALU = kNaluEnd;
+ packet_->frameType = kEmptyFrame;
+
+ EXPECT_EQ(jitter_buffer_->InsertPacket(*packet_, &retransmitted),
+ kDecodableSession);
+ frame_out = DecodeIncompleteFrame();
+
+ // One of the packets has been discarded by the jitter buffer.
+ // Last frame can't be extracted yet.
+ if (i < 10) {
+ CheckOutFrame(frame_out, size_, false);
+
+ if (i == 0) {
+ EXPECT_EQ(kVideoFrameKey, frame_out->FrameType());
+ } else {
+ EXPECT_EQ(frametype, frame_out->FrameType());
+ }
+ EXPECT_FALSE(frame_out->Complete());
+ EXPECT_FALSE(frame_out->MissingFrame());
+ }
+
+ jitter_buffer_->ReleaseFrame(frame_out);
+ }
+
+ // Insert 3 old packets and verify that we have 3 discarded packets
+ // Match value to actual latest timestamp decoded.
+ timestamp_ -= 33 * 90;
+ packet_->timestamp = timestamp_ - 1000;
+
+ EXPECT_EQ(kOldPacket, jitter_buffer_->InsertPacket(*packet_,
+ &retransmitted));
+
+ packet_->timestamp = timestamp_ - 500;
+
+ EXPECT_EQ(kOldPacket, jitter_buffer_->InsertPacket(*packet_,
+ &retransmitted));
+
+ packet_->timestamp = timestamp_ - 100;
+
+ EXPECT_EQ(kOldPacket, jitter_buffer_->InsertPacket(*packet_,
+ &retransmitted));
+
+ EXPECT_EQ(3, jitter_buffer_->num_discarded_packets());
+
+ jitter_buffer_->Flush();
+
+ // This statistic shouldn't be reset by a flush.
+ EXPECT_EQ(3, jitter_buffer_->num_discarded_packets());
+}
+
+TEST_F(TestBasicJitterBuffer, DeltaFrame100PacketsWithSeqNumWrap) {
+ seq_num_ = 0xfff0;
+ packet_->frameType = kVideoFrameKey;
+ packet_->isFirstPacket = true;
+ packet_->markerBit = false;
+ packet_->seqNum = seq_num_;
+ packet_->timestamp = timestamp_;
+
+ bool retransmitted = false;
+ EXPECT_EQ(kIncomplete, jitter_buffer_->InsertPacket(*packet_,
+ &retransmitted));
+
+ VCMEncodedFrame* frame_out = DecodeCompleteFrame();
+
+ EXPECT_TRUE(frame_out == NULL);
+
+ int loop = 0;
+ do {
+ seq_num_++;
+ packet_->isFirstPacket = false;
+ packet_->markerBit = false;
+ packet_->seqNum = seq_num_;
+
+ EXPECT_EQ(kIncomplete, jitter_buffer_->InsertPacket(*packet_,
+ &retransmitted));
+
+ frame_out = DecodeCompleteFrame();
+
+ EXPECT_TRUE(frame_out == NULL);
+
+ loop++;
+ } while (loop < 98);
+
+ seq_num_++;
+ packet_->isFirstPacket = false;
+ packet_->markerBit = true;
+ packet_->seqNum = seq_num_;
+
+ EXPECT_EQ(kCompleteSession, jitter_buffer_->InsertPacket(*packet_,
+ &retransmitted));
+
+ frame_out = DecodeCompleteFrame();
+
+ CheckOutFrame(frame_out, 100 * size_, false);
+
+ EXPECT_EQ(kVideoFrameKey, frame_out->FrameType());
+ jitter_buffer_->ReleaseFrame(frame_out);
+}
+
+TEST_F(TestBasicJitterBuffer, PacketReorderingReverseWithNegSeqNumWrap) {
+ // Insert "first" packet last seqnum.
+ seq_num_ = 10;
+ packet_->frameType = kVideoFrameKey;
+ packet_->isFirstPacket = false;
+ packet_->markerBit = true;
+ packet_->seqNum = seq_num_;
+
+ bool retransmitted = false;
+ EXPECT_EQ(kIncomplete, jitter_buffer_->InsertPacket(*packet_,
+ &retransmitted));
+ VCMEncodedFrame* frame_out = DecodeCompleteFrame();
+
+ // Should not be complete.
+ EXPECT_TRUE(frame_out == NULL);
+
+ // Insert 98 frames.
+ int loop = 0;
+ do {
+ seq_num_--;
+ packet_->isFirstPacket = false;
+ packet_->markerBit = false;
+ packet_->seqNum = seq_num_;
+
+ EXPECT_EQ(kIncomplete, jitter_buffer_->InsertPacket(*packet_,
+ &retransmitted));
+
+ frame_out = DecodeCompleteFrame();
+
+ EXPECT_TRUE(frame_out == NULL);
+
+ loop++;
+ } while (loop < 98);
+
+ // Insert last packet.
+ seq_num_--;
+ packet_->isFirstPacket = true;
+ packet_->markerBit = false;
+ packet_->seqNum = seq_num_;
+
+ EXPECT_EQ(kCompleteSession, jitter_buffer_->InsertPacket(*packet_,
+ &retransmitted));
+
+ frame_out = DecodeCompleteFrame();
+ CheckOutFrame(frame_out, 100 * size_, false);
+ EXPECT_EQ(kVideoFrameKey, frame_out->FrameType());
+ jitter_buffer_->ReleaseFrame(frame_out);
+}
+
+TEST_F(TestBasicJitterBuffer, TestInsertOldFrame) {
+ // ------- -------
+ // | 2 | | 1 |
+ // ------- -------
+ // t = 3000 t = 2000
+ seq_num_ = 2;
+ timestamp_ = 3000;
+ packet_->frameType = kVideoFrameKey;
+ packet_->isFirstPacket = true;
+ packet_->markerBit = true;
+ packet_->timestamp = timestamp_;
+ packet_->seqNum = seq_num_;
+
+ bool retransmitted = false;
+ EXPECT_EQ(kCompleteSession, jitter_buffer_->InsertPacket(*packet_,
+ &retransmitted));
+
+ VCMEncodedFrame* frame_out = DecodeCompleteFrame();
+ EXPECT_EQ(3000u, frame_out->TimeStamp());
+ CheckOutFrame(frame_out, size_, false);
+ EXPECT_EQ(kVideoFrameKey, frame_out->FrameType());
+ jitter_buffer_->ReleaseFrame(frame_out);
+
+ seq_num_--;
+ timestamp_ = 2000;
+ packet_->frameType = kVideoFrameDelta;
+ packet_->isFirstPacket = true;
+ packet_->markerBit = true;
+ packet_->seqNum = seq_num_;
+ packet_->timestamp = timestamp_;
+
+ EXPECT_EQ(kOldPacket, jitter_buffer_->InsertPacket(*packet_,
+ &retransmitted));
+}
+
+TEST_F(TestBasicJitterBuffer, TestInsertOldFrameWithSeqNumWrap) {
+ // ------- -------
+ // | 2 | | 1 |
+ // ------- -------
+ // t = 3000 t = 0xffffff00
+
+ seq_num_ = 2;
+ timestamp_ = 3000;
+ packet_->frameType = kVideoFrameKey;
+ packet_->isFirstPacket = true;
+ packet_->markerBit = true;
+ packet_->seqNum = seq_num_;
+ packet_->timestamp = timestamp_;
+
+ bool retransmitted = false;
+ EXPECT_EQ(kCompleteSession, jitter_buffer_->InsertPacket(*packet_,
+ &retransmitted));
+
+ VCMEncodedFrame* frame_out = DecodeCompleteFrame();
+ EXPECT_EQ(timestamp_, frame_out->TimeStamp());
+
+ CheckOutFrame(frame_out, size_, false);
+
+ EXPECT_EQ(kVideoFrameKey, frame_out->FrameType());
+
+ jitter_buffer_->ReleaseFrame(frame_out);
+
+ seq_num_--;
+ timestamp_ = 0xffffff00;
+ packet_->frameType = kVideoFrameDelta;
+ packet_->isFirstPacket = true;
+ packet_->markerBit = true;
+ packet_->seqNum = seq_num_;
+ packet_->timestamp = timestamp_;
+
+
+ // This timestamp is old.
+ EXPECT_EQ(kOldPacket, jitter_buffer_->InsertPacket(*packet_,
+ &retransmitted));
+}
+
+TEST_F(TestBasicJitterBuffer, TimestampWrap) {
+ // --------------- ---------------
+ // | 1 | 2 | | 3 | 4 |
+ // --------------- ---------------
+ // t = 0xffffff00 t = 33*90
+
+ timestamp_ = 0xffffff00;
+ packet_->frameType = kVideoFrameKey;
+ packet_->isFirstPacket = true;
+ packet_->markerBit = false;
+ packet_->seqNum = seq_num_;
+ packet_->timestamp = timestamp_;
+
+ bool retransmitted = false;
+ EXPECT_EQ(kIncomplete, jitter_buffer_->InsertPacket(*packet_,
+ &retransmitted));
+
+ VCMEncodedFrame* frame_out = DecodeCompleteFrame();
+ EXPECT_TRUE(frame_out == NULL);
+
+ seq_num_++;
+ packet_->isFirstPacket = false;
+ packet_->markerBit = true;
+ packet_->seqNum = seq_num_;
+
+ EXPECT_EQ(kCompleteSession, jitter_buffer_->InsertPacket(*packet_,
+ &retransmitted));
+
+ frame_out = DecodeCompleteFrame();
+ CheckOutFrame(frame_out, 2 * size_, false);
+ jitter_buffer_->ReleaseFrame(frame_out);
+
+ seq_num_++;
+ timestamp_ += 33*90;
+ packet_->frameType = kVideoFrameDelta;
+ packet_->isFirstPacket = true;
+ packet_->markerBit = false;
+ packet_->seqNum = seq_num_;
+ packet_->timestamp = timestamp_;
+
+ EXPECT_EQ(kIncomplete, jitter_buffer_->InsertPacket(*packet_,
+ &retransmitted));
+
+ frame_out = DecodeCompleteFrame();
+ EXPECT_TRUE(frame_out == NULL);
+
+ seq_num_++;
+ packet_->isFirstPacket = false;
+ packet_->markerBit = true;
+ packet_->seqNum = seq_num_;
+
+ EXPECT_EQ(kCompleteSession, jitter_buffer_->InsertPacket(*packet_,
+ &retransmitted));
+
+ frame_out = DecodeCompleteFrame();
+ CheckOutFrame(frame_out, 2 * size_, false);
+ EXPECT_EQ(kVideoFrameDelta, frame_out->FrameType());
+ jitter_buffer_->ReleaseFrame(frame_out);
+}
+
+TEST_F(TestBasicJitterBuffer, 2FrameWithTimestampWrap) {
+ // ------- -------
+ // | 1 | | 2 |
+ // ------- -------
+ // t = 0xffffff00 t = 2700
+
+ timestamp_ = 0xffffff00;
+ packet_->frameType = kVideoFrameKey;
+ packet_->isFirstPacket = true;
+ packet_->markerBit = true;
+ packet_->timestamp = timestamp_;
+
+ bool retransmitted = false;
+ // Insert first frame (session will be complete).
+ EXPECT_EQ(kCompleteSession, jitter_buffer_->InsertPacket(*packet_,
+ &retransmitted));
+
+ // Insert next frame.
+ seq_num_++;
+ timestamp_ = 2700;
+ packet_->frameType = kVideoFrameDelta;
+ packet_->isFirstPacket = true;
+ packet_->markerBit = true;
+ packet_->seqNum = seq_num_;
+ packet_->timestamp = timestamp_;
+
+ EXPECT_EQ(kCompleteSession, jitter_buffer_->InsertPacket(*packet_,
+ &retransmitted));
+
+ VCMEncodedFrame* frame_out = DecodeCompleteFrame();
+ EXPECT_EQ(0xffffff00, frame_out->TimeStamp());
+ CheckOutFrame(frame_out, size_, false);
+ EXPECT_EQ(kVideoFrameKey, frame_out->FrameType());
+ jitter_buffer_->ReleaseFrame(frame_out);
+
+ VCMEncodedFrame* frame_out2 = DecodeCompleteFrame();
+ EXPECT_EQ(2700u, frame_out2->TimeStamp());
+ CheckOutFrame(frame_out2, size_, false);
+ EXPECT_EQ(kVideoFrameDelta, frame_out2->FrameType());
+ jitter_buffer_->ReleaseFrame(frame_out2);
+}
+
+TEST_F(TestBasicJitterBuffer, Insert2FramesReOrderedWithTimestampWrap) {
+ // ------- -------
+ // | 2 | | 1 |
+ // ------- -------
+ // t = 2700 t = 0xffffff00
+
+ seq_num_ = 2;
+ timestamp_ = 2700;
+ packet_->frameType = kVideoFrameDelta;
+ packet_->isFirstPacket = true;
+ packet_->markerBit = true;
+ packet_->seqNum = seq_num_;
+ packet_->timestamp = timestamp_;
+
+ bool retransmitted = false;
+ EXPECT_EQ(kCompleteSession, jitter_buffer_->InsertPacket(*packet_,
+ &retransmitted));
+
+ // Insert second frame
+ seq_num_--;
+ timestamp_ = 0xffffff00;
+ packet_->frameType = kVideoFrameKey;
+ packet_->isFirstPacket = true;
+ packet_->markerBit = true;
+ packet_->seqNum = seq_num_;
+ packet_->timestamp = timestamp_;
+
+ EXPECT_EQ(kCompleteSession, jitter_buffer_->InsertPacket(*packet_,
+ &retransmitted));
+
+ VCMEncodedFrame* frame_out = DecodeCompleteFrame();
+ EXPECT_EQ(0xffffff00, frame_out->TimeStamp());
+ CheckOutFrame(frame_out, size_, false);
+ EXPECT_EQ(kVideoFrameKey, frame_out->FrameType());
+ jitter_buffer_->ReleaseFrame(frame_out);
+
+ VCMEncodedFrame* frame_out2 = DecodeCompleteFrame();
+ EXPECT_EQ(2700u, frame_out2->TimeStamp());
+ CheckOutFrame(frame_out2, size_, false);
+ EXPECT_EQ(kVideoFrameDelta, frame_out2->FrameType());
+ jitter_buffer_->ReleaseFrame(frame_out2);
+}
+
+TEST_F(TestBasicJitterBuffer, DeltaFrameWithMoreThanMaxNumberOfPackets) {
+ int loop = 0;
+ bool firstPacket = true;
+ bool retransmitted = false;
+ // Insert kMaxPacketsInJitterBuffer into frame.
+ do {
+ seq_num_++;
+ packet_->isFirstPacket = false;
+ packet_->markerBit = false;
+ packet_->seqNum = seq_num_;
+
+ if (firstPacket) {
+ EXPECT_EQ(kIncomplete, jitter_buffer_->InsertPacket(*packet_,
+ &retransmitted));
+ firstPacket = false;
+ } else {
+ EXPECT_EQ(kIncomplete, jitter_buffer_->InsertPacket(*packet_,
+ &retransmitted));
+ }
+
+ loop++;
+ } while (loop < kMaxPacketsInSession);
+
+ // Max number of packets inserted.
+ // Insert one more packet.
+ seq_num_++;
+ packet_->isFirstPacket = false;
+ packet_->markerBit = true;
+ packet_->seqNum = seq_num_;
+
+ // Insert the packet -> frame recycled.
+ EXPECT_EQ(kSizeError, jitter_buffer_->InsertPacket(*packet_,
+ &retransmitted));
+ EXPECT_TRUE(NULL == DecodeCompleteFrame());
+
+}
+
+TEST_F(TestBasicJitterBuffer, ExceedNumOfFrameWithSeqNumWrap) {
+ // TEST fill JB with more than max number of frame (50 delta frames +
+ // 51 key frames) with wrap in seq_num_
+ //
+ // --------------------------------------------------------------
+ // | 65485 | 65486 | 65487 | .... | 65535 | 0 | 1 | 2 | .....| 50 |
+ // --------------------------------------------------------------
+ // |<-----------delta frames------------->|<------key frames----->|
+
+ int loop = 0;
+ seq_num_ = 65485;
+ uint32_t first_key_frame_timestamp = 0;
+ bool retransmitted = false;
+ // Insert MAX_NUMBER_OF_FRAMES frames.
+ do {
+ timestamp_ += 33*90;
+ seq_num_++;
+ packet_->isFirstPacket = true;
+ packet_->markerBit = true;
+ packet_->seqNum = seq_num_;
+ packet_->timestamp = timestamp_;
+
+ if (loop == 50) {
+ first_key_frame_timestamp = packet_->timestamp;
+ packet_->frameType = kVideoFrameKey;
+ }
+
+ // Insert frame.
+ EXPECT_EQ(kCompleteSession, jitter_buffer_->InsertPacket(*packet_,
+ &retransmitted));
+
+ loop++;
+ } while (loop < kMaxNumberOfFrames);
+
+ // Max number of frames inserted.
+
+ // Insert one more frame.
+ timestamp_ += 33*90;
+ seq_num_++;
+ packet_->isFirstPacket = true;
+ packet_->markerBit = true;
+ packet_->seqNum = seq_num_;
+ packet_->timestamp = timestamp_;
+
+ // Now, no free frame - frames will be recycled until first key frame.
+ EXPECT_EQ(kFlushIndicator,
+ jitter_buffer_->InsertPacket(*packet_, &retransmitted));
+
+ VCMEncodedFrame* frame_out = DecodeCompleteFrame();
+ EXPECT_EQ(first_key_frame_timestamp, frame_out->TimeStamp());
+ CheckOutFrame(frame_out, size_, false);
+ EXPECT_EQ(kVideoFrameKey, frame_out->FrameType());
+ jitter_buffer_->ReleaseFrame(frame_out);
+}
+
+TEST_F(TestBasicJitterBuffer, EmptyLastFrame) {
+ jitter_buffer_->SetDecodeErrorMode(kWithErrors);
+ seq_num_ = 3;
+ // Insert one empty packet per frame, should never return the last timestamp
+ // inserted. Only return empty frames in the presence of subsequent frames.
+ int maxSize = 1000;
+ bool retransmitted = false;
+ for (int i = 0; i < maxSize + 10; i++) {
+ timestamp_ += 33 * 90;
+ seq_num_++;
+ packet_->isFirstPacket = false;
+ packet_->markerBit = false;
+ packet_->seqNum = seq_num_;
+ packet_->timestamp = timestamp_;
+ packet_->frameType = kEmptyFrame;
+
+ EXPECT_EQ(kNoError, jitter_buffer_->InsertPacket(*packet_,
+ &retransmitted));
+ VCMEncodedFrame* testFrame = DecodeIncompleteFrame();
+ // Timestamp should never be the last TS inserted.
+ if (testFrame != NULL) {
+ EXPECT_TRUE(testFrame->TimeStamp() < timestamp_);
+ jitter_buffer_->ReleaseFrame(testFrame);
+ }
+ }
+}
+
+TEST_F(TestBasicJitterBuffer, H264IncompleteNalu) {
+ jitter_buffer_->SetNackMode(kNoNack, -1, -1);
+ jitter_buffer_->SetDecodeErrorMode(kWithErrors);
+ ++seq_num_;
+ timestamp_ += 33 * 90;
+ int insertedLength = 0;
+ packet_->seqNum = seq_num_;
+ packet_->timestamp = timestamp_;
+ packet_->frameType = kVideoFrameKey;
+ packet_->isFirstPacket = true;
+ packet_->completeNALU = kNaluStart;
+ packet_->markerBit = false;
+ bool retransmitted = false;
+
+ EXPECT_EQ(kDecodableSession, jitter_buffer_->InsertPacket(*packet_,
+ &retransmitted));
+
+ seq_num_ += 2; // Skip one packet.
+ packet_->seqNum = seq_num_;
+ packet_->frameType = kVideoFrameKey;
+ packet_->isFirstPacket = false;
+ packet_->completeNALU = kNaluIncomplete;
+ packet_->markerBit = false;
+
+ EXPECT_EQ(kDecodableSession, jitter_buffer_->InsertPacket(*packet_,
+ &retransmitted));
+
+ seq_num_++;
+ packet_->seqNum = seq_num_;
+ packet_->frameType = kVideoFrameKey;
+ packet_->isFirstPacket = false;
+ packet_->completeNALU = kNaluEnd;
+ packet_->markerBit = false;
+
+ EXPECT_EQ(kDecodableSession, jitter_buffer_->InsertPacket(*packet_,
+ &retransmitted));
+
+ seq_num_++;
+ packet_->seqNum = seq_num_;
+ packet_->completeNALU = kNaluComplete;
+ packet_->markerBit = true; // Last packet.
+ EXPECT_EQ(kDecodableSession, jitter_buffer_->InsertPacket(*packet_,
+ &retransmitted));
+ // The JB will only output (incomplete) frames if a packet belonging to a
+ // subsequent frame was already inserted. Insert one packet of a subsequent
+ // frame. place high timestamp so the JB would always have a next frame
+ // (otherwise, for every inserted frame we need to take care of the next
+ // frame as well).
+ packet_->seqNum = 1;
+ packet_->timestamp = timestamp_ + 33 * 90 * 10;
+ packet_->frameType = kVideoFrameDelta;
+ packet_->isFirstPacket = false;
+ packet_->completeNALU = kNaluStart;
+ packet_->markerBit = false;
+
+ EXPECT_EQ(kDecodableSession, jitter_buffer_->InsertPacket(*packet_,
+ &retransmitted));
+
+ VCMEncodedFrame* frame_out = DecodeIncompleteFrame();
+
+ // We can decode everything from a NALU until a packet has been lost.
+ // Thus we can decode the first packet of the first NALU and the second NALU
+ // which consists of one packet.
+ CheckOutFrame(frame_out, packet_->sizeBytes * 2, false);
+ jitter_buffer_->ReleaseFrame(frame_out);
+
+ // Test reordered start frame + 1 lost.
+ seq_num_ += 2; // Re-order 1 frame.
+ timestamp_ += 33*90;
+ insertedLength = 0;
+
+ packet_->seqNum = seq_num_;
+ packet_->timestamp = timestamp_;
+ packet_->frameType = kVideoFrameKey;
+ packet_->isFirstPacket = false;
+ packet_->completeNALU = kNaluEnd;
+ packet_->markerBit = false;
+ EXPECT_EQ(kDecodableSession, jitter_buffer_->InsertPacket(*packet_,
+ &retransmitted));
+ insertedLength += packet_->sizeBytes; // This packet should be decoded.
+ seq_num_--;
+ packet_->seqNum = seq_num_;
+ packet_->timestamp = timestamp_;
+ packet_->frameType = kVideoFrameKey;
+ packet_->isFirstPacket = true;
+ packet_->completeNALU = kNaluStart;
+ packet_->markerBit = false;
+
+ EXPECT_EQ(kDecodableSession, jitter_buffer_->InsertPacket(*packet_,
+ &retransmitted));
+ insertedLength += packet_->sizeBytes; // This packet should be decoded.
+
+ seq_num_ += 3; // One packet drop.
+ packet_->seqNum = seq_num_;
+ packet_->timestamp = timestamp_;
+ packet_->frameType = kVideoFrameKey;
+ packet_->isFirstPacket = false;
+ packet_->completeNALU = kNaluComplete;
+ packet_->markerBit = false;
+ EXPECT_EQ(kDecodableSession, jitter_buffer_->InsertPacket(*packet_,
+ &retransmitted));
+ insertedLength += packet_->sizeBytes; // This packet should be decoded.
+ seq_num_++;
+ packet_->seqNum = seq_num_;
+ packet_->timestamp = timestamp_;
+ packet_->frameType = kVideoFrameKey;
+ packet_->isFirstPacket = false;
+ packet_->completeNALU = kNaluStart;
+ packet_->markerBit = false;
+ EXPECT_EQ(kDecodableSession, jitter_buffer_->InsertPacket(*packet_,
+ &retransmitted));
+ // This packet should be decoded since it's the beginning of a NAL.
+ insertedLength += packet_->sizeBytes;
+
+ seq_num_ += 2;
+ packet_->seqNum = seq_num_;
+ packet_->timestamp = timestamp_;
+ packet_->frameType = kVideoFrameKey;
+ packet_->isFirstPacket = false;
+ packet_->completeNALU = kNaluEnd;
+ packet_->markerBit = true;
+ EXPECT_EQ(kDecodableSession, jitter_buffer_->InsertPacket(*packet_,
+ &retransmitted));
+ // This packet should not be decoded because it is an incomplete NAL if it
+ // is the last.
+ frame_out = DecodeIncompleteFrame();
+ // Only last NALU is complete.
+ CheckOutFrame(frame_out, insertedLength, false);
+ jitter_buffer_->ReleaseFrame(frame_out);
+
+ // Test to insert empty packet.
+ seq_num_++;
+ timestamp_ += 33 * 90;
+ VCMPacket emptypacket(data_, 0, seq_num_, timestamp_, true);
+ emptypacket.seqNum = seq_num_;
+ emptypacket.timestamp = timestamp_;
+ emptypacket.frameType = kVideoFrameKey;
+ emptypacket.isFirstPacket = true;
+ emptypacket.completeNALU = kNaluComplete;
+ emptypacket.markerBit = true;
+ EXPECT_EQ(kCompleteSession, jitter_buffer_->InsertPacket(emptypacket,
+ &retransmitted));
+ // This packet should not be decoded because it is an incomplete NAL if it
+ // is the last.
+
+ // Will be sent to the decoder, as a packet belonging to a subsequent frame
+ // has arrived.
+ frame_out = DecodeIncompleteFrame();
+ EXPECT_TRUE(frame_out != NULL);
+ jitter_buffer_->ReleaseFrame(frame_out);
+
+ // Test that a frame can include an empty packet.
+ seq_num_++;
+ timestamp_ += 33 * 90;
+
+ packet_->seqNum = seq_num_;
+ packet_->timestamp = timestamp_;
+ packet_->frameType = kVideoFrameKey;
+ packet_->isFirstPacket = true;
+ packet_->completeNALU = kNaluComplete;
+ packet_->markerBit = false;
+
+ EXPECT_EQ(kDecodableSession, jitter_buffer_->InsertPacket(*packet_,
+ &retransmitted));
+
+ seq_num_++;
+ emptypacket.seqNum = seq_num_;
+ emptypacket.timestamp = timestamp_;
+ emptypacket.frameType = kVideoFrameKey;
+ emptypacket.isFirstPacket = true;
+ emptypacket.completeNALU = kNaluComplete;
+ emptypacket.markerBit = true;
+ EXPECT_EQ(kCompleteSession, jitter_buffer_->InsertPacket(emptypacket,
+ &retransmitted));
+
+ frame_out = DecodeCompleteFrame();
+ // Only last NALU is complete
+ CheckOutFrame(frame_out, packet_->sizeBytes, false);
+ jitter_buffer_->ReleaseFrame(frame_out);
+}
+
+TEST_F(TestBasicJitterBuffer, NextFrameWhenIncomplete) {
+ // Test that a we cannot get incomplete frames from the JB if we haven't
+ // received the marker bit, unless we have received a packet from a later
+ // timestamp.
+ jitter_buffer_->SetDecodeErrorMode(kWithErrors);
+ // Start with a complete key frame - insert and decode.
+ packet_->frameType = kVideoFrameKey;
+ packet_->isFirstPacket = true;
+ packet_->markerBit = true;
+ bool retransmitted = false;
+
+ EXPECT_EQ(kCompleteSession, jitter_buffer_->InsertPacket(*packet_,
+ &retransmitted));
+ VCMEncodedFrame* frame_out = DecodeCompleteFrame();
+ EXPECT_TRUE(frame_out != NULL);
+ jitter_buffer_->ReleaseFrame(frame_out);
+
+ packet_->seqNum += 2;
+ packet_->timestamp += 33 * 90;
+ packet_->frameType = kVideoFrameDelta;
+ packet_->isFirstPacket = false;
+ packet_->markerBit = false;
+
+
+ EXPECT_EQ(kDecodableSession, jitter_buffer_->InsertPacket(*packet_,
+ &retransmitted));
+
+ frame_out = DecodeIncompleteFrame();
+ EXPECT_TRUE(frame_out == NULL);
+
+ packet_->seqNum += 2;
+ packet_->timestamp += 33 * 90;
+ packet_->isFirstPacket = true;
+
+ EXPECT_EQ(kDecodableSession, jitter_buffer_->InsertPacket(*packet_,
+ &retransmitted));
+
+ frame_out = DecodeIncompleteFrame();
+ CheckOutFrame(frame_out, packet_->sizeBytes, false);
+ jitter_buffer_->ReleaseFrame(frame_out);
+}
+
+TEST_F(TestRunningJitterBuffer, Full) {
+ // Insert a key frame and decode it.
+ EXPECT_GE(InsertFrame(kVideoFrameKey), kNoError);
+ EXPECT_TRUE(DecodeCompleteFrame());
+ DropFrame(1);
+ // Fill the jitter buffer.
+ EXPECT_GE(InsertFrames(kMaxNumberOfFrames, kVideoFrameDelta), kNoError);
+ // Make sure we can't decode these frames.
+ EXPECT_FALSE(DecodeCompleteFrame());
+ // This frame will make the jitter buffer recycle frames until a key frame.
+ // Since none is found it will have to wait until the next key frame before
+ // decoding.
+ EXPECT_EQ(kFlushIndicator, InsertFrame(kVideoFrameDelta));
+ EXPECT_FALSE(DecodeCompleteFrame());
+}
+
+TEST_F(TestRunningJitterBuffer, EmptyPackets) {
+ // Make sure a frame can get complete even though empty packets are missing.
+ stream_generator_->GenerateFrame(kVideoFrameKey, 3, 3,
+ clock_->TimeInMilliseconds());
+ bool request_key_frame = false;
+ // Insert empty packet.
+ EXPECT_EQ(kNoError, InsertPacketAndPop(4));
+ EXPECT_FALSE(request_key_frame);
+ // Insert 3 media packets.
+ EXPECT_EQ(kIncomplete, InsertPacketAndPop(0));
+ EXPECT_FALSE(request_key_frame);
+ EXPECT_EQ(kIncomplete, InsertPacketAndPop(0));
+ EXPECT_FALSE(request_key_frame);
+ EXPECT_EQ(kCompleteSession, InsertPacketAndPop(0));
+ EXPECT_FALSE(request_key_frame);
+ // Insert empty packet.
+ EXPECT_EQ(kCompleteSession, InsertPacketAndPop(0));
+ EXPECT_FALSE(request_key_frame);
+}
+
+TEST_F(TestRunningJitterBuffer, StatisticsTest) {
+ FrameCounts frame_stats(jitter_buffer_->FrameStatistics());
+ EXPECT_EQ(0, frame_stats.delta_frames);
+ EXPECT_EQ(0, frame_stats.key_frames);
+
+ uint32_t framerate = 0;
+ uint32_t bitrate = 0;
+ jitter_buffer_->IncomingRateStatistics(&framerate, &bitrate);
+ EXPECT_EQ(0u, framerate);
+ EXPECT_EQ(0u, bitrate);
+
+ // Insert a couple of key and delta frames.
+ InsertFrame(kVideoFrameKey);
+ InsertFrame(kVideoFrameDelta);
+ InsertFrame(kVideoFrameDelta);
+ InsertFrame(kVideoFrameKey);
+ InsertFrame(kVideoFrameDelta);
+ // Decode some of them to make sure the statistics doesn't depend on frames
+ // being decoded.
+ EXPECT_TRUE(DecodeCompleteFrame());
+ EXPECT_TRUE(DecodeCompleteFrame());
+ frame_stats = jitter_buffer_->FrameStatistics();
+ EXPECT_EQ(3, frame_stats.delta_frames);
+ EXPECT_EQ(2, frame_stats.key_frames);
+
+ // Insert 20 more frames to get estimates of bitrate and framerate over
+ // 1 second.
+ for (int i = 0; i < 20; ++i) {
+ InsertFrame(kVideoFrameDelta);
+ }
+ jitter_buffer_->IncomingRateStatistics(&framerate, &bitrate);
+ // TODO(holmer): The current implementation returns the average of the last
+ // two framerate calculations, which is why it takes two calls to reach the
+ // actual framerate. This should be fixed.
+ EXPECT_EQ(kDefaultFrameRate / 2u, framerate);
+ EXPECT_EQ(kDefaultBitrateKbps, bitrate);
+ // Insert 25 more frames to get estimates of bitrate and framerate over
+ // 2 seconds.
+ for (int i = 0; i < 25; ++i) {
+ InsertFrame(kVideoFrameDelta);
+ }
+ jitter_buffer_->IncomingRateStatistics(&framerate, &bitrate);
+ EXPECT_EQ(kDefaultFrameRate, framerate);
+ EXPECT_EQ(kDefaultBitrateKbps, bitrate);
+}
+
+TEST_F(TestRunningJitterBuffer, SkipToKeyFrame) {
+ // Insert delta frames.
+ EXPECT_GE(InsertFrames(5, kVideoFrameDelta), kNoError);
+ // Can't decode without a key frame.
+ EXPECT_FALSE(DecodeCompleteFrame());
+ InsertFrame(kVideoFrameKey);
+ // Skip to the next key frame.
+ EXPECT_TRUE(DecodeCompleteFrame());
+}
+
+TEST_F(TestRunningJitterBuffer, DontSkipToKeyFrameIfDecodable) {
+ InsertFrame(kVideoFrameKey);
+ EXPECT_TRUE(DecodeCompleteFrame());
+ const int kNumDeltaFrames = 5;
+ EXPECT_GE(InsertFrames(kNumDeltaFrames, kVideoFrameDelta), kNoError);
+ InsertFrame(kVideoFrameKey);
+ for (int i = 0; i < kNumDeltaFrames + 1; ++i) {
+ EXPECT_TRUE(DecodeCompleteFrame());
+ }
+}
+
+TEST_F(TestRunningJitterBuffer, KeyDeltaKeyDelta) {
+ InsertFrame(kVideoFrameKey);
+ EXPECT_TRUE(DecodeCompleteFrame());
+ const int kNumDeltaFrames = 5;
+ EXPECT_GE(InsertFrames(kNumDeltaFrames, kVideoFrameDelta), kNoError);
+ InsertFrame(kVideoFrameKey);
+ EXPECT_GE(InsertFrames(kNumDeltaFrames, kVideoFrameDelta), kNoError);
+ InsertFrame(kVideoFrameKey);
+ for (int i = 0; i < 2 * (kNumDeltaFrames + 1); ++i) {
+ EXPECT_TRUE(DecodeCompleteFrame());
+ }
+}
+
+TEST_F(TestRunningJitterBuffer, TwoPacketsNonContinuous) {
+ InsertFrame(kVideoFrameKey);
+ EXPECT_TRUE(DecodeCompleteFrame());
+ stream_generator_->GenerateFrame(kVideoFrameDelta, 1, 0,
+ clock_->TimeInMilliseconds());
+ clock_->AdvanceTimeMilliseconds(kDefaultFramePeriodMs);
+ stream_generator_->GenerateFrame(kVideoFrameDelta, 2, 0,
+ clock_->TimeInMilliseconds());
+ EXPECT_EQ(kIncomplete, InsertPacketAndPop(1));
+ EXPECT_EQ(kCompleteSession, InsertPacketAndPop(1));
+ EXPECT_FALSE(DecodeCompleteFrame());
+ EXPECT_EQ(kCompleteSession, InsertPacketAndPop(0));
+ EXPECT_TRUE(DecodeCompleteFrame());
+ EXPECT_TRUE(DecodeCompleteFrame());
+}
+
+TEST_F(TestJitterBufferNack, EmptyPackets) {
+ // Make sure empty packets doesn't clog the jitter buffer.
+ jitter_buffer_->SetNackMode(kNack, media_optimization::kLowRttNackMs, -1);
+ EXPECT_GE(InsertFrames(kMaxNumberOfFrames, kEmptyFrame), kNoError);
+ InsertFrame(kVideoFrameKey);
+ EXPECT_TRUE(DecodeCompleteFrame());
+}
+
+TEST_F(TestJitterBufferNack, NackTooOldPackets) {
+ // Insert a key frame and decode it.
+ EXPECT_GE(InsertFrame(kVideoFrameKey), kNoError);
+ EXPECT_TRUE(DecodeCompleteFrame());
+
+ // Drop one frame and insert |kNackHistoryLength| to trigger NACKing a too
+ // old packet.
+ DropFrame(1);
+ // Insert a frame which should trigger a recycle until the next key frame.
+ EXPECT_EQ(kFlushIndicator, InsertFrames(oldest_packet_to_nack_ + 1,
+ kVideoFrameDelta));
+ EXPECT_FALSE(DecodeCompleteFrame());
+
+ bool request_key_frame = false;
+ std::vector<uint16_t> nack_list =
+ jitter_buffer_->GetNackList(&request_key_frame);
+ // No key frame will be requested since the jitter buffer is empty.
+ EXPECT_FALSE(request_key_frame);
+ EXPECT_EQ(0u, nack_list.size());
+
+ EXPECT_GE(InsertFrame(kVideoFrameDelta), kNoError);
+ // Waiting for a key frame.
+ EXPECT_FALSE(DecodeCompleteFrame());
+ EXPECT_FALSE(DecodeIncompleteFrame());
+
+ // The next complete continuous frame isn't a key frame, but we're waiting
+ // for one.
+ EXPECT_FALSE(DecodeCompleteFrame());
+ EXPECT_GE(InsertFrame(kVideoFrameKey), kNoError);
+ // Skipping ahead to the key frame.
+ EXPECT_TRUE(DecodeCompleteFrame());
+}
+
+TEST_F(TestJitterBufferNack, NackLargeJitterBuffer) {
+ // Insert a key frame and decode it.
+ EXPECT_GE(InsertFrame(kVideoFrameKey), kNoError);
+ EXPECT_TRUE(DecodeCompleteFrame());
+
+ // Insert a frame which should trigger a recycle until the next key frame.
+ EXPECT_GE(InsertFrames(oldest_packet_to_nack_, kVideoFrameDelta), kNoError);
+
+ bool request_key_frame = false;
+ std::vector<uint16_t> nack_list =
+ jitter_buffer_->GetNackList(&request_key_frame);
+ // Verify that the jitter buffer does not request a key frame.
+ EXPECT_FALSE(request_key_frame);
+ // Verify that no packets are NACKed.
+ EXPECT_EQ(0u, nack_list.size());
+ // Verify that we can decode the next frame.
+ EXPECT_TRUE(DecodeCompleteFrame());
+}
+
+TEST_F(TestJitterBufferNack, NackListFull) {
+ // Insert a key frame and decode it.
+ EXPECT_GE(InsertFrame(kVideoFrameKey), kNoError);
+ EXPECT_TRUE(DecodeCompleteFrame());
+
+ // Generate and drop |kNackHistoryLength| packets to fill the NACK list.
+ DropFrame(max_nack_list_size_ + 1);
+ // Insert a frame which should trigger a recycle until the next key frame.
+ EXPECT_EQ(kFlushIndicator, InsertFrame(kVideoFrameDelta));
+ EXPECT_FALSE(DecodeCompleteFrame());
+
+ bool request_key_frame = false;
+ jitter_buffer_->GetNackList(&request_key_frame);
+ // The jitter buffer is empty, so we won't request key frames until we get a
+ // packet.
+ EXPECT_FALSE(request_key_frame);
+
+ EXPECT_GE(InsertFrame(kVideoFrameDelta), kNoError);
+ // Now we have a packet in the jitter buffer, a key frame will be requested
+ // since it's not a key frame.
+ jitter_buffer_->GetNackList(&request_key_frame);
+ // The jitter buffer is empty, so we won't request key frames until we get a
+ // packet.
+ EXPECT_TRUE(request_key_frame);
+ // The next complete continuous frame isn't a key frame, but we're waiting
+ // for one.
+ EXPECT_FALSE(DecodeCompleteFrame());
+ EXPECT_FALSE(DecodeIncompleteFrame());
+ EXPECT_GE(InsertFrame(kVideoFrameKey), kNoError);
+ // Skipping ahead to the key frame.
+ EXPECT_TRUE(DecodeCompleteFrame());
+}
+
+TEST_F(TestJitterBufferNack, NoNackListReturnedBeforeFirstDecode) {
+ DropFrame(10);
+ // Insert a frame and try to generate a NACK list. Shouldn't get one.
+ EXPECT_GE(InsertFrame(kVideoFrameDelta), kNoError);
+ bool request_key_frame = false;
+ std::vector<uint16_t> nack_list =
+ jitter_buffer_->GetNackList(&request_key_frame);
+ // No list generated, and a key frame request is signaled.
+ EXPECT_EQ(0u, nack_list.size());
+ EXPECT_TRUE(request_key_frame);
+}
+
+TEST_F(TestJitterBufferNack, NackListBuiltBeforeFirstDecode) {
+ stream_generator_->Init(0, clock_->TimeInMilliseconds());
+ InsertFrame(kVideoFrameKey);
+ stream_generator_->GenerateFrame(kVideoFrameDelta, 2, 0,
+ clock_->TimeInMilliseconds());
+ stream_generator_->NextPacket(NULL); // Drop packet.
+ EXPECT_EQ(kIncomplete, InsertPacketAndPop(0));
+ EXPECT_TRUE(DecodeCompleteFrame());
+ bool extended = false;
+ std::vector<uint16_t> nack_list = jitter_buffer_->GetNackList(&extended);
+ EXPECT_EQ(1u, nack_list.size());
+}
+
+TEST_F(TestJitterBufferNack, VerifyRetransmittedFlag) {
+ stream_generator_->Init(0, clock_->TimeInMilliseconds());
+ stream_generator_->GenerateFrame(kVideoFrameKey, 3, 0,
+ clock_->TimeInMilliseconds());
+ VCMPacket packet;
+ stream_generator_->PopPacket(&packet, 0);
+ bool retransmitted = false;
+ EXPECT_EQ(kIncomplete, jitter_buffer_->InsertPacket(packet, &retransmitted));
+ EXPECT_FALSE(retransmitted);
+ // Drop second packet.
+ stream_generator_->PopPacket(&packet, 1);
+ EXPECT_EQ(kIncomplete, jitter_buffer_->InsertPacket(packet, &retransmitted));
+ EXPECT_FALSE(retransmitted);
+ EXPECT_FALSE(DecodeCompleteFrame());
+ bool extended = false;
+ std::vector<uint16_t> nack_list = jitter_buffer_->GetNackList(&extended);
+ EXPECT_EQ(1u, nack_list.size());
+ stream_generator_->PopPacket(&packet, 0);
+ EXPECT_EQ(packet.seqNum, nack_list[0]);
+ EXPECT_EQ(kCompleteSession, jitter_buffer_->InsertPacket(packet,
+ &retransmitted));
+ EXPECT_TRUE(retransmitted);
+ EXPECT_TRUE(DecodeCompleteFrame());
+}
+
+TEST_F(TestJitterBufferNack, UseNackToRecoverFirstKeyFrame) {
+ stream_generator_->Init(0, clock_->TimeInMilliseconds());
+ stream_generator_->GenerateFrame(kVideoFrameKey, 3, 0,
+ clock_->TimeInMilliseconds());
+ EXPECT_EQ(kIncomplete, InsertPacketAndPop(0));
+ // Drop second packet.
+ EXPECT_EQ(kIncomplete, InsertPacketAndPop(1));
+ EXPECT_FALSE(DecodeCompleteFrame());
+ bool extended = false;
+ std::vector<uint16_t> nack_list = jitter_buffer_->GetNackList(&extended);
+ EXPECT_EQ(1u, nack_list.size());
+ VCMPacket packet;
+ stream_generator_->GetPacket(&packet, 0);
+ EXPECT_EQ(packet.seqNum, nack_list[0]);
+}
+
+TEST_F(TestJitterBufferNack, UseNackToRecoverFirstKeyFrameSecondInQueue) {
+ VCMPacket packet;
+ stream_generator_->Init(0, clock_->TimeInMilliseconds());
+ // First frame is delta.
+ stream_generator_->GenerateFrame(kVideoFrameDelta, 3, 0,
+ clock_->TimeInMilliseconds());
+ EXPECT_EQ(kIncomplete, InsertPacketAndPop(0));
+ // Drop second packet in frame.
+ ASSERT_TRUE(stream_generator_->PopPacket(&packet, 0));
+ EXPECT_EQ(kIncomplete, InsertPacketAndPop(0));
+ // Second frame is key.
+ stream_generator_->GenerateFrame(kVideoFrameKey, 3, 0,
+ clock_->TimeInMilliseconds() + 10);
+ EXPECT_EQ(kIncomplete, InsertPacketAndPop(0));
+ // Drop second packet in frame.
+ EXPECT_EQ(kIncomplete, InsertPacketAndPop(1));
+ EXPECT_FALSE(DecodeCompleteFrame());
+ bool extended = false;
+ std::vector<uint16_t> nack_list = jitter_buffer_->GetNackList(&extended);
+ EXPECT_EQ(1u, nack_list.size());
+ stream_generator_->GetPacket(&packet, 0);
+ EXPECT_EQ(packet.seqNum, nack_list[0]);
+}
+
+TEST_F(TestJitterBufferNack, NormalOperation) {
+ EXPECT_EQ(kNack, jitter_buffer_->nack_mode());
+ jitter_buffer_->SetDecodeErrorMode(kWithErrors);
+
+ EXPECT_GE(InsertFrame(kVideoFrameKey), kNoError);
+ EXPECT_TRUE(DecodeIncompleteFrame());
+
+ // ----------------------------------------------------------------
+ // | 1 | 2 | .. | 8 | 9 | x | 11 | 12 | .. | 19 | x | 21 | .. | 100 |
+ // ----------------------------------------------------------------
+ stream_generator_->GenerateFrame(kVideoFrameKey, 100, 0,
+ clock_->TimeInMilliseconds());
+ clock_->AdvanceTimeMilliseconds(kDefaultFramePeriodMs);
+ EXPECT_EQ(kDecodableSession, InsertPacketAndPop(0));
+ // Verify that the frame is incomplete.
+ EXPECT_FALSE(DecodeCompleteFrame());
+ while (stream_generator_->PacketsRemaining() > 1) {
+ if (stream_generator_->NextSequenceNumber() % 10 != 0) {
+ EXPECT_EQ(kDecodableSession, InsertPacketAndPop(0));
+ } else {
+ stream_generator_->NextPacket(NULL); // Drop packet
+ }
+ }
+ EXPECT_EQ(kDecodableSession, InsertPacketAndPop(0));
+ EXPECT_EQ(0, stream_generator_->PacketsRemaining());
+ EXPECT_FALSE(DecodeCompleteFrame());
+ EXPECT_FALSE(DecodeIncompleteFrame());
+ bool request_key_frame = false;
+ std::vector<uint16_t> nack_list =
+ jitter_buffer_->GetNackList(&request_key_frame);
+ // Verify the NACK list.
+ const size_t kExpectedNackSize = 9;
+ ASSERT_EQ(kExpectedNackSize, nack_list.size());
+ for (size_t i = 0; i < nack_list.size(); ++i)
+ EXPECT_EQ((1 + i) * 10, nack_list[i]);
+}
+
+TEST_F(TestJitterBufferNack, NormalOperationWrap) {
+ bool request_key_frame = false;
+ // ------- ------------------------------------------------------------
+ // | 65532 | | 65533 | 65534 | 65535 | x | 1 | .. | 9 | x | 11 |.....| 96 |
+ // ------- ------------------------------------------------------------
+ stream_generator_->Init(65532, clock_->TimeInMilliseconds());
+ InsertFrame(kVideoFrameKey);
+ EXPECT_FALSE(request_key_frame);
+ EXPECT_TRUE(DecodeCompleteFrame());
+ stream_generator_->GenerateFrame(kVideoFrameDelta, 100, 0,
+ clock_->TimeInMilliseconds());
+ EXPECT_EQ(kIncomplete, InsertPacketAndPop(0));
+ while (stream_generator_->PacketsRemaining() > 1) {
+ if (stream_generator_->NextSequenceNumber() % 10 != 0) {
+ EXPECT_EQ(kIncomplete, InsertPacketAndPop(0));
+ EXPECT_FALSE(request_key_frame);
+ } else {
+ stream_generator_->NextPacket(NULL); // Drop packet
+ }
+ }
+ EXPECT_EQ(kIncomplete, InsertPacketAndPop(0));
+ EXPECT_FALSE(request_key_frame);
+ EXPECT_EQ(0, stream_generator_->PacketsRemaining());
+ EXPECT_FALSE(DecodeCompleteFrame());
+ EXPECT_FALSE(DecodeCompleteFrame());
+ bool extended = false;
+ std::vector<uint16_t> nack_list = jitter_buffer_->GetNackList(&extended);
+ // Verify the NACK list.
+ const size_t kExpectedNackSize = 10;
+ ASSERT_EQ(kExpectedNackSize, nack_list.size());
+ for (size_t i = 0; i < nack_list.size(); ++i)
+ EXPECT_EQ(i * 10, nack_list[i]);
+}
+
+TEST_F(TestJitterBufferNack, NormalOperationWrap2) {
+ bool request_key_frame = false;
+ // -----------------------------------
+ // | 65532 | 65533 | 65534 | x | 0 | 1 |
+ // -----------------------------------
+ stream_generator_->Init(65532, clock_->TimeInMilliseconds());
+ InsertFrame(kVideoFrameKey);
+ EXPECT_FALSE(request_key_frame);
+ EXPECT_TRUE(DecodeCompleteFrame());
+ stream_generator_->GenerateFrame(kVideoFrameDelta, 1, 0,
+ clock_->TimeInMilliseconds());
+ clock_->AdvanceTimeMilliseconds(kDefaultFramePeriodMs);
+ for (int i = 0; i < 5; ++i) {
+ if (stream_generator_->NextSequenceNumber() != 65535) {
+ EXPECT_EQ(kCompleteSession, InsertPacketAndPop(0));
+ EXPECT_FALSE(request_key_frame);
+ } else {
+ stream_generator_->NextPacket(NULL); // Drop packet
+ }
+ stream_generator_->GenerateFrame(kVideoFrameDelta, 1, 0,
+ clock_->TimeInMilliseconds());
+ clock_->AdvanceTimeMilliseconds(kDefaultFramePeriodMs);
+ }
+ EXPECT_EQ(kCompleteSession, InsertPacketAndPop(0));
+ EXPECT_FALSE(request_key_frame);
+ bool extended = false;
+ std::vector<uint16_t> nack_list = jitter_buffer_->GetNackList(&extended);
+ // Verify the NACK list.
+ ASSERT_EQ(1u, nack_list.size());
+ EXPECT_EQ(65535, nack_list[0]);
+}
+
+TEST_F(TestJitterBufferNack, ResetByFutureKeyFrameDoesntError) {
+ stream_generator_->Init(0, clock_->TimeInMilliseconds());
+ InsertFrame(kVideoFrameKey);
+ EXPECT_TRUE(DecodeCompleteFrame());
+ bool extended = false;
+ std::vector<uint16_t> nack_list = jitter_buffer_->GetNackList(&extended);
+ EXPECT_EQ(0u, nack_list.size());
+
+ // Far-into-the-future video frame, could be caused by resetting the encoder
+ // or otherwise restarting. This should not fail when error when the packet is
+ // a keyframe, even if all of the nack list needs to be flushed.
+ stream_generator_->Init(10000, clock_->TimeInMilliseconds());
+ clock_->AdvanceTimeMilliseconds(kDefaultFramePeriodMs);
+ InsertFrame(kVideoFrameKey);
+ EXPECT_TRUE(DecodeCompleteFrame());
+ nack_list = jitter_buffer_->GetNackList(&extended);
+ EXPECT_EQ(0u, nack_list.size());
+
+ // Stream should be decodable from this point.
+ clock_->AdvanceTimeMilliseconds(kDefaultFramePeriodMs);
+ InsertFrame(kVideoFrameDelta);
+ EXPECT_TRUE(DecodeCompleteFrame());
+ nack_list = jitter_buffer_->GetNackList(&extended);
+ EXPECT_EQ(0u, nack_list.size());
+}
+
+} // namespace webrtc
diff --git a/webrtc/modules/video_coding/main/source/jitter_estimator.cc b/webrtc/modules/video_coding/main/source/jitter_estimator.cc
new file mode 100644
index 0000000000..5894c88d72
--- /dev/null
+++ b/webrtc/modules/video_coding/main/source/jitter_estimator.cc
@@ -0,0 +1,482 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "webrtc/modules/video_coding/main/source/internal_defines.h"
+#include "webrtc/modules/video_coding/main/source/jitter_estimator.h"
+#include "webrtc/modules/video_coding/main/source/rtt_filter.h"
+#include "webrtc/system_wrappers/include/clock.h"
+#include "webrtc/system_wrappers/include/field_trial.h"
+
+#include <assert.h>
+#include <math.h>
+#include <stdlib.h>
+#include <string.h>
+
+namespace webrtc {
+
+enum { kStartupDelaySamples = 30 };
+enum { kFsAccuStartupSamples = 5 };
+enum { kMaxFramerateEstimate = 200 };
+
+VCMJitterEstimator::VCMJitterEstimator(const Clock* clock,
+ int32_t vcmId,
+ int32_t receiverId)
+ : _vcmId(vcmId),
+ _receiverId(receiverId),
+ _phi(0.97),
+ _psi(0.9999),
+ _alphaCountMax(400),
+ _thetaLow(0.000001),
+ _nackLimit(3),
+ _numStdDevDelayOutlier(15),
+ _numStdDevFrameSizeOutlier(3),
+ _noiseStdDevs(2.33), // ~Less than 1% chance
+ // (look up in normal distribution table)...
+ _noiseStdDevOffset(30.0), // ...of getting 30 ms freezes
+ _rttFilter(),
+ fps_counter_(30), // TODO(sprang): Use an estimator with limit based on
+ // time, rather than number of samples.
+ low_rate_experiment_(kInit),
+ clock_(clock) {
+ Reset();
+}
+
+VCMJitterEstimator::~VCMJitterEstimator() {
+}
+
+VCMJitterEstimator&
+VCMJitterEstimator::operator=(const VCMJitterEstimator& rhs)
+{
+ if (this != &rhs)
+ {
+ memcpy(_thetaCov, rhs._thetaCov, sizeof(_thetaCov));
+ memcpy(_Qcov, rhs._Qcov, sizeof(_Qcov));
+
+ _vcmId = rhs._vcmId;
+ _receiverId = rhs._receiverId;
+ _avgFrameSize = rhs._avgFrameSize;
+ _varFrameSize = rhs._varFrameSize;
+ _maxFrameSize = rhs._maxFrameSize;
+ _fsSum = rhs._fsSum;
+ _fsCount = rhs._fsCount;
+ _lastUpdateT = rhs._lastUpdateT;
+ _prevEstimate = rhs._prevEstimate;
+ _prevFrameSize = rhs._prevFrameSize;
+ _avgNoise = rhs._avgNoise;
+ _alphaCount = rhs._alphaCount;
+ _filterJitterEstimate = rhs._filterJitterEstimate;
+ _startupCount = rhs._startupCount;
+ _latestNackTimestamp = rhs._latestNackTimestamp;
+ _nackCount = rhs._nackCount;
+ _rttFilter = rhs._rttFilter;
+ }
+ return *this;
+}
+
+// Resets the JitterEstimate
+void
+VCMJitterEstimator::Reset()
+{
+ _theta[0] = 1/(512e3/8);
+ _theta[1] = 0;
+ _varNoise = 4.0;
+
+ _thetaCov[0][0] = 1e-4;
+ _thetaCov[1][1] = 1e2;
+ _thetaCov[0][1] = _thetaCov[1][0] = 0;
+ _Qcov[0][0] = 2.5e-10;
+ _Qcov[1][1] = 1e-10;
+ _Qcov[0][1] = _Qcov[1][0] = 0;
+ _avgFrameSize = 500;
+ _maxFrameSize = 500;
+ _varFrameSize = 100;
+ _lastUpdateT = -1;
+ _prevEstimate = -1.0;
+ _prevFrameSize = 0;
+ _avgNoise = 0.0;
+ _alphaCount = 1;
+ _filterJitterEstimate = 0.0;
+ _latestNackTimestamp = 0;
+ _nackCount = 0;
+ _fsSum = 0;
+ _fsCount = 0;
+ _startupCount = 0;
+ _rttFilter.Reset();
+ fps_counter_.Reset();
+}
+
+void
+VCMJitterEstimator::ResetNackCount()
+{
+ _nackCount = 0;
+}
+
+// Updates the estimates with the new measurements
+void
+VCMJitterEstimator::UpdateEstimate(int64_t frameDelayMS, uint32_t frameSizeBytes,
+ bool incompleteFrame /* = false */)
+{
+ if (frameSizeBytes == 0)
+ {
+ return;
+ }
+ int deltaFS = frameSizeBytes - _prevFrameSize;
+ if (_fsCount < kFsAccuStartupSamples)
+ {
+ _fsSum += frameSizeBytes;
+ _fsCount++;
+ }
+ else if (_fsCount == kFsAccuStartupSamples)
+ {
+ // Give the frame size filter
+ _avgFrameSize = static_cast<double>(_fsSum) /
+ static_cast<double>(_fsCount);
+ _fsCount++;
+ }
+ if (!incompleteFrame || frameSizeBytes > _avgFrameSize)
+ {
+ double avgFrameSize = _phi * _avgFrameSize +
+ (1 - _phi) * frameSizeBytes;
+ if (frameSizeBytes < _avgFrameSize + 2 * sqrt(_varFrameSize))
+ {
+ // Only update the average frame size if this sample wasn't a
+ // key frame
+ _avgFrameSize = avgFrameSize;
+ }
+ // Update the variance anyway since we want to capture cases where we only get
+ // key frames.
+ _varFrameSize = VCM_MAX(_phi * _varFrameSize + (1 - _phi) *
+ (frameSizeBytes - avgFrameSize) *
+ (frameSizeBytes - avgFrameSize), 1.0);
+ }
+
+ // Update max frameSize estimate
+ _maxFrameSize = VCM_MAX(_psi * _maxFrameSize, static_cast<double>(frameSizeBytes));
+
+ if (_prevFrameSize == 0)
+ {
+ _prevFrameSize = frameSizeBytes;
+ return;
+ }
+ _prevFrameSize = frameSizeBytes;
+
+ // Only update the Kalman filter if the sample is not considered
+ // an extreme outlier. Even if it is an extreme outlier from a
+ // delay point of view, if the frame size also is large the
+ // deviation is probably due to an incorrect line slope.
+ double deviation = DeviationFromExpectedDelay(frameDelayMS, deltaFS);
+
+ if (fabs(deviation) < _numStdDevDelayOutlier * sqrt(_varNoise) ||
+ frameSizeBytes > _avgFrameSize + _numStdDevFrameSizeOutlier * sqrt(_varFrameSize))
+ {
+ // Update the variance of the deviation from the
+ // line given by the Kalman filter
+ EstimateRandomJitter(deviation, incompleteFrame);
+ // Prevent updating with frames which have been congested by a large
+ // frame, and therefore arrives almost at the same time as that frame.
+ // This can occur when we receive a large frame (key frame) which
+ // has been delayed. The next frame is of normal size (delta frame),
+ // and thus deltaFS will be << 0. This removes all frame samples
+ // which arrives after a key frame.
+ if ((!incompleteFrame || deviation >= 0.0) &&
+ static_cast<double>(deltaFS) > - 0.25 * _maxFrameSize)
+ {
+ // Update the Kalman filter with the new data
+ KalmanEstimateChannel(frameDelayMS, deltaFS);
+ }
+ }
+ else
+ {
+ int nStdDev = (deviation >= 0) ? _numStdDevDelayOutlier : -_numStdDevDelayOutlier;
+ EstimateRandomJitter(nStdDev * sqrt(_varNoise), incompleteFrame);
+ }
+ // Post process the total estimated jitter
+ if (_startupCount >= kStartupDelaySamples)
+ {
+ PostProcessEstimate();
+ }
+ else
+ {
+ _startupCount++;
+ }
+}
+
+// Updates the nack/packet ratio
+void
+VCMJitterEstimator::FrameNacked()
+{
+ // Wait until _nackLimit retransmissions has been received,
+ // then always add ~1 RTT delay.
+ // TODO(holmer): Should we ever remove the additional delay if the
+ // the packet losses seem to have stopped? We could for instance scale
+ // the number of RTTs to add with the amount of retransmissions in a given
+ // time interval, or similar.
+ if (_nackCount < _nackLimit)
+ {
+ _nackCount++;
+ }
+}
+
+// Updates Kalman estimate of the channel
+// The caller is expected to sanity check the inputs.
+void
+VCMJitterEstimator::KalmanEstimateChannel(int64_t frameDelayMS,
+ int32_t deltaFSBytes)
+{
+ double Mh[2];
+ double hMh_sigma;
+ double kalmanGain[2];
+ double measureRes;
+ double t00, t01;
+
+ // Kalman filtering
+
+ // Prediction
+ // M = M + Q
+ _thetaCov[0][0] += _Qcov[0][0];
+ _thetaCov[0][1] += _Qcov[0][1];
+ _thetaCov[1][0] += _Qcov[1][0];
+ _thetaCov[1][1] += _Qcov[1][1];
+
+ // Kalman gain
+ // K = M*h'/(sigma2n + h*M*h') = M*h'/(1 + h*M*h')
+ // h = [dFS 1]
+ // Mh = M*h'
+ // hMh_sigma = h*M*h' + R
+ Mh[0] = _thetaCov[0][0] * deltaFSBytes + _thetaCov[0][1];
+ Mh[1] = _thetaCov[1][0] * deltaFSBytes + _thetaCov[1][1];
+ // sigma weights measurements with a small deltaFS as noisy and
+ // measurements with large deltaFS as good
+ if (_maxFrameSize < 1.0)
+ {
+ return;
+ }
+ double sigma = (300.0 * exp(-fabs(static_cast<double>(deltaFSBytes)) /
+ (1e0 * _maxFrameSize)) + 1) * sqrt(_varNoise);
+ if (sigma < 1.0)
+ {
+ sigma = 1.0;
+ }
+ hMh_sigma = deltaFSBytes * Mh[0] + Mh[1] + sigma;
+ if ((hMh_sigma < 1e-9 && hMh_sigma >= 0) || (hMh_sigma > -1e-9 && hMh_sigma <= 0))
+ {
+ assert(false);
+ return;
+ }
+ kalmanGain[0] = Mh[0] / hMh_sigma;
+ kalmanGain[1] = Mh[1] / hMh_sigma;
+
+ // Correction
+ // theta = theta + K*(dT - h*theta)
+ measureRes = frameDelayMS - (deltaFSBytes * _theta[0] + _theta[1]);
+ _theta[0] += kalmanGain[0] * measureRes;
+ _theta[1] += kalmanGain[1] * measureRes;
+
+ if (_theta[0] < _thetaLow)
+ {
+ _theta[0] = _thetaLow;
+ }
+
+ // M = (I - K*h)*M
+ t00 = _thetaCov[0][0];
+ t01 = _thetaCov[0][1];
+ _thetaCov[0][0] = (1 - kalmanGain[0] * deltaFSBytes) * t00 -
+ kalmanGain[0] * _thetaCov[1][0];
+ _thetaCov[0][1] = (1 - kalmanGain[0] * deltaFSBytes) * t01 -
+ kalmanGain[0] * _thetaCov[1][1];
+ _thetaCov[1][0] = _thetaCov[1][0] * (1 - kalmanGain[1]) -
+ kalmanGain[1] * deltaFSBytes * t00;
+ _thetaCov[1][1] = _thetaCov[1][1] * (1 - kalmanGain[1]) -
+ kalmanGain[1] * deltaFSBytes * t01;
+
+ // Covariance matrix, must be positive semi-definite
+ assert(_thetaCov[0][0] + _thetaCov[1][1] >= 0 &&
+ _thetaCov[0][0] * _thetaCov[1][1] - _thetaCov[0][1] * _thetaCov[1][0] >= 0 &&
+ _thetaCov[0][0] >= 0);
+}
+
+// Calculate difference in delay between a sample and the
+// expected delay estimated by the Kalman filter
+double
+VCMJitterEstimator::DeviationFromExpectedDelay(int64_t frameDelayMS,
+ int32_t deltaFSBytes) const
+{
+ return frameDelayMS - (_theta[0] * deltaFSBytes + _theta[1]);
+}
+
+// Estimates the random jitter by calculating the variance of the
+// sample distance from the line given by theta.
+void VCMJitterEstimator::EstimateRandomJitter(double d_dT,
+ bool incompleteFrame) {
+ uint64_t now = clock_->TimeInMicroseconds();
+ if (_lastUpdateT != -1) {
+ fps_counter_.AddSample(now - _lastUpdateT);
+ }
+ _lastUpdateT = now;
+
+ if (_alphaCount == 0) {
+ assert(false);
+ return;
+ }
+ double alpha =
+ static_cast<double>(_alphaCount - 1) / static_cast<double>(_alphaCount);
+ _alphaCount++;
+ if (_alphaCount > _alphaCountMax)
+ _alphaCount = _alphaCountMax;
+
+ if (LowRateExperimentEnabled()) {
+ // In order to avoid a low frame rate stream to react slower to changes,
+ // scale the alpha weight relative a 30 fps stream.
+ double fps = GetFrameRate();
+ if (fps > 0.0) {
+ double rate_scale = 30.0 / fps;
+ // At startup, there can be a lot of noise in the fps estimate.
+ // Interpolate rate_scale linearly, from 1.0 at sample #1, to 30.0 / fps
+ // at sample #kStartupDelaySamples.
+ if (_alphaCount < kStartupDelaySamples) {
+ rate_scale =
+ (_alphaCount * rate_scale + (kStartupDelaySamples - _alphaCount)) /
+ kStartupDelaySamples;
+ }
+ alpha = pow(alpha, rate_scale);
+ }
+ }
+
+ double avgNoise = alpha * _avgNoise + (1 - alpha) * d_dT;
+ double varNoise =
+ alpha * _varNoise + (1 - alpha) * (d_dT - _avgNoise) * (d_dT - _avgNoise);
+ if (!incompleteFrame || varNoise > _varNoise) {
+ _avgNoise = avgNoise;
+ _varNoise = varNoise;
+ }
+ if (_varNoise < 1.0) {
+ // The variance should never be zero, since we might get
+ // stuck and consider all samples as outliers.
+ _varNoise = 1.0;
+ }
+}
+
+double
+VCMJitterEstimator::NoiseThreshold() const
+{
+ double noiseThreshold = _noiseStdDevs * sqrt(_varNoise) - _noiseStdDevOffset;
+ if (noiseThreshold < 1.0)
+ {
+ noiseThreshold = 1.0;
+ }
+ return noiseThreshold;
+}
+
+// Calculates the current jitter estimate from the filtered estimates
+double
+VCMJitterEstimator::CalculateEstimate()
+{
+ double ret = _theta[0] * (_maxFrameSize - _avgFrameSize) + NoiseThreshold();
+
+ // A very low estimate (or negative) is neglected
+ if (ret < 1.0) {
+ if (_prevEstimate <= 0.01)
+ {
+ ret = 1.0;
+ }
+ else
+ {
+ ret = _prevEstimate;
+ }
+ }
+ if (ret > 10000.0) // Sanity
+ {
+ ret = 10000.0;
+ }
+ _prevEstimate = ret;
+ return ret;
+}
+
+void
+VCMJitterEstimator::PostProcessEstimate()
+{
+ _filterJitterEstimate = CalculateEstimate();
+}
+
+void
+VCMJitterEstimator::UpdateRtt(int64_t rttMs)
+{
+ _rttFilter.Update(rttMs);
+}
+
+void
+VCMJitterEstimator::UpdateMaxFrameSize(uint32_t frameSizeBytes)
+{
+ if (_maxFrameSize < frameSizeBytes)
+ {
+ _maxFrameSize = frameSizeBytes;
+ }
+}
+
+// Returns the current filtered estimate if available,
+// otherwise tries to calculate an estimate.
+int VCMJitterEstimator::GetJitterEstimate(double rttMultiplier) {
+ double jitterMS = CalculateEstimate() + OPERATING_SYSTEM_JITTER;
+ if (_filterJitterEstimate > jitterMS)
+ jitterMS = _filterJitterEstimate;
+ if (_nackCount >= _nackLimit)
+ jitterMS += _rttFilter.RttMs() * rttMultiplier;
+
+ if (LowRateExperimentEnabled()) {
+ static const double kJitterScaleLowThreshold = 5.0;
+ static const double kJitterScaleHighThreshold = 10.0;
+ double fps = GetFrameRate();
+ // Ignore jitter for very low fps streams.
+ if (fps < kJitterScaleLowThreshold) {
+ if (fps == 0.0) {
+ return jitterMS;
+ }
+ return 0;
+ }
+
+ // Semi-low frame rate; scale by factor linearly interpolated from 0.0 at
+ // kJitterScaleLowThreshold to 1.0 at kJitterScaleHighThreshold.
+ if (fps < kJitterScaleHighThreshold) {
+ jitterMS =
+ (1.0 / (kJitterScaleHighThreshold - kJitterScaleLowThreshold)) *
+ (fps - kJitterScaleLowThreshold) * jitterMS;
+ }
+ }
+
+ return static_cast<uint32_t>(jitterMS + 0.5);
+}
+
+bool VCMJitterEstimator::LowRateExperimentEnabled() {
+ if (low_rate_experiment_ == kInit) {
+ std::string group =
+ webrtc::field_trial::FindFullName("WebRTC-ReducedJitterDelay");
+ if (group == "Disabled") {
+ low_rate_experiment_ = kDisabled;
+ } else {
+ low_rate_experiment_ = kEnabled;
+ }
+ }
+ return low_rate_experiment_ == kEnabled ? true : false;
+}
+
+double VCMJitterEstimator::GetFrameRate() const {
+ if (fps_counter_.count() == 0)
+ return 0;
+
+ double fps = 1000000.0 / fps_counter_.ComputeMean();
+ // Sanity check.
+ assert(fps >= 0.0);
+ if (fps > kMaxFramerateEstimate) {
+ fps = kMaxFramerateEstimate;
+ }
+ return fps;
+}
+
+}
diff --git a/webrtc/modules/video_coding/main/source/jitter_estimator.h b/webrtc/modules/video_coding/main/source/jitter_estimator.h
new file mode 100644
index 0000000000..46ed67ba1d
--- /dev/null
+++ b/webrtc/modules/video_coding/main/source/jitter_estimator.h
@@ -0,0 +1,165 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef WEBRTC_MODULES_VIDEO_CODING_JITTER_ESTIMATOR_H_
+#define WEBRTC_MODULES_VIDEO_CODING_JITTER_ESTIMATOR_H_
+
+#include "webrtc/base/rollingaccumulator.h"
+#include "webrtc/modules/video_coding/main/source/rtt_filter.h"
+#include "webrtc/typedefs.h"
+
+namespace webrtc
+{
+
+class Clock;
+
+class VCMJitterEstimator
+{
+public:
+ VCMJitterEstimator(const Clock* clock,
+ int32_t vcmId = 0,
+ int32_t receiverId = 0);
+ virtual ~VCMJitterEstimator();
+ VCMJitterEstimator& operator=(const VCMJitterEstimator& rhs);
+
+ // Resets the estimate to the initial state
+ void Reset();
+ void ResetNackCount();
+
+ // Updates the jitter estimate with the new data.
+ //
+ // Input:
+ // - frameDelay : Delay-delta calculated by UTILDelayEstimate in milliseconds
+ // - frameSize : Frame size of the current frame.
+ // - incompleteFrame : Flags if the frame is used to update the estimate before it
+ // was complete. Default is false.
+ void UpdateEstimate(int64_t frameDelayMS,
+ uint32_t frameSizeBytes,
+ bool incompleteFrame = false);
+
+ // Returns the current jitter estimate in milliseconds and adds
+ // also adds an RTT dependent term in cases of retransmission.
+ // Input:
+ // - rttMultiplier : RTT param multiplier (when applicable).
+ //
+ // Return value : Jitter estimate in milliseconds
+ int GetJitterEstimate(double rttMultiplier);
+
+ // Updates the nack counter.
+ void FrameNacked();
+
+ // Updates the RTT filter.
+ //
+ // Input:
+ // - rttMs : RTT in ms
+ void UpdateRtt(int64_t rttMs);
+
+ void UpdateMaxFrameSize(uint32_t frameSizeBytes);
+
+ // A constant describing the delay from the jitter buffer
+ // to the delay on the receiving side which is not accounted
+ // for by the jitter buffer nor the decoding delay estimate.
+ static const uint32_t OPERATING_SYSTEM_JITTER = 10;
+
+protected:
+ // These are protected for better testing possibilities
+ double _theta[2]; // Estimated line parameters (slope, offset)
+ double _varNoise; // Variance of the time-deviation from the line
+
+ virtual bool LowRateExperimentEnabled();
+
+private:
+ // Updates the Kalman filter for the line describing
+ // the frame size dependent jitter.
+ //
+ // Input:
+ // - frameDelayMS : Delay-delta calculated by UTILDelayEstimate in milliseconds
+ // - deltaFSBytes : Frame size delta, i.e.
+ // : frame size at time T minus frame size at time T-1
+ void KalmanEstimateChannel(int64_t frameDelayMS, int32_t deltaFSBytes);
+
+ // Updates the random jitter estimate, i.e. the variance
+ // of the time deviations from the line given by the Kalman filter.
+ //
+ // Input:
+ // - d_dT : The deviation from the kalman estimate
+ // - incompleteFrame : True if the frame used to update the estimate
+ // with was incomplete
+ void EstimateRandomJitter(double d_dT, bool incompleteFrame);
+
+ double NoiseThreshold() const;
+
+ // Calculates the current jitter estimate.
+ //
+ // Return value : The current jitter estimate in milliseconds
+ double CalculateEstimate();
+
+ // Post process the calculated estimate
+ void PostProcessEstimate();
+
+ // Calculates the difference in delay between a sample and the
+ // expected delay estimated by the Kalman filter.
+ //
+ // Input:
+ // - frameDelayMS : Delay-delta calculated by UTILDelayEstimate in milliseconds
+ // - deltaFS : Frame size delta, i.e. frame size at time
+ // T minus frame size at time T-1
+ //
+ // Return value : The difference in milliseconds
+ double DeviationFromExpectedDelay(int64_t frameDelayMS,
+ int32_t deltaFSBytes) const;
+
+ double GetFrameRate() const;
+
+ // Constants, filter parameters
+ int32_t _vcmId;
+ int32_t _receiverId;
+ const double _phi;
+ const double _psi;
+ const uint32_t _alphaCountMax;
+ const double _thetaLow;
+ const uint32_t _nackLimit;
+ const int32_t _numStdDevDelayOutlier;
+ const int32_t _numStdDevFrameSizeOutlier;
+ const double _noiseStdDevs;
+ const double _noiseStdDevOffset;
+
+ double _thetaCov[2][2]; // Estimate covariance
+ double _Qcov[2][2]; // Process noise covariance
+ double _avgFrameSize; // Average frame size
+ double _varFrameSize; // Frame size variance
+ double _maxFrameSize; // Largest frame size received (descending
+ // with a factor _psi)
+ uint32_t _fsSum;
+ uint32_t _fsCount;
+
+ int64_t _lastUpdateT;
+ double _prevEstimate; // The previously returned jitter estimate
+ uint32_t _prevFrameSize; // Frame size of the previous frame
+ double _avgNoise; // Average of the random jitter
+ uint32_t _alphaCount;
+ double _filterJitterEstimate; // The filtered sum of jitter estimates
+
+ uint32_t _startupCount;
+
+ int64_t _latestNackTimestamp; // Timestamp in ms when the latest nack was seen
+ uint32_t _nackCount; // Keeps track of the number of nacks received,
+ // but never goes above _nackLimit
+ VCMRttFilter _rttFilter;
+
+ rtc::RollingAccumulator<uint64_t> fps_counter_;
+ enum ExperimentFlag { kInit, kEnabled, kDisabled };
+ ExperimentFlag low_rate_experiment_;
+ const Clock* clock_;
+};
+
+} // namespace webrtc
+
+#endif // WEBRTC_MODULES_VIDEO_CODING_JITTER_ESTIMATOR_H_
diff --git a/webrtc/modules/video_coding/main/source/jitter_estimator_tests.cc b/webrtc/modules/video_coding/main/source/jitter_estimator_tests.cc
new file mode 100644
index 0000000000..c69c4bcdad
--- /dev/null
+++ b/webrtc/modules/video_coding/main/source/jitter_estimator_tests.cc
@@ -0,0 +1,160 @@
+/* Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "webrtc/modules/video_coding/main/source/jitter_estimator.h"
+
+#include "testing/gtest/include/gtest/gtest.h"
+#include "webrtc/system_wrappers/include/clock.h"
+
+namespace webrtc {
+
+class TestEstimator : public VCMJitterEstimator {
+ public:
+ explicit TestEstimator(bool exp_enabled)
+ : VCMJitterEstimator(&fake_clock_, 0, 0),
+ fake_clock_(0),
+ exp_enabled_(exp_enabled) {}
+
+ virtual bool LowRateExperimentEnabled() { return exp_enabled_; }
+
+ void AdvanceClock(int64_t microseconds) {
+ fake_clock_.AdvanceTimeMicroseconds(microseconds);
+ }
+
+ private:
+ SimulatedClock fake_clock_;
+ const bool exp_enabled_;
+};
+
+class TestVCMJitterEstimator : public ::testing::Test {
+ protected:
+ TestVCMJitterEstimator()
+ : regular_estimator_(false), low_rate_estimator_(true) {}
+
+ virtual void SetUp() { regular_estimator_.Reset(); }
+
+ TestEstimator regular_estimator_;
+ TestEstimator low_rate_estimator_;
+};
+
+// Generates some simple test data in the form of a sawtooth wave.
+class ValueGenerator {
+ public:
+ ValueGenerator(int32_t amplitude) : amplitude_(amplitude), counter_(0) {}
+ virtual ~ValueGenerator() {}
+
+ int64_t Delay() { return ((counter_ % 11) - 5) * amplitude_; }
+
+ uint32_t FrameSize() { return 1000 + Delay(); }
+
+ void Advance() { ++counter_; }
+
+ private:
+ const int32_t amplitude_;
+ int64_t counter_;
+};
+
+// 5 fps, disable jitter delay altogether.
+TEST_F(TestVCMJitterEstimator, TestLowRate) {
+ ValueGenerator gen(10);
+ uint64_t time_delta = 1000000 / 5;
+ for (int i = 0; i < 60; ++i) {
+ regular_estimator_.UpdateEstimate(gen.Delay(), gen.FrameSize());
+ regular_estimator_.AdvanceClock(time_delta);
+ low_rate_estimator_.UpdateEstimate(gen.Delay(), gen.FrameSize());
+ low_rate_estimator_.AdvanceClock(time_delta);
+ EXPECT_GT(regular_estimator_.GetJitterEstimate(0), 0);
+ if (i > 2)
+ EXPECT_EQ(low_rate_estimator_.GetJitterEstimate(0), 0);
+ gen.Advance();
+ }
+}
+
+// 8 fps, steady state estimate should be in interpolated interval between 0
+// and value of previous method.
+TEST_F(TestVCMJitterEstimator, TestMidRate) {
+ ValueGenerator gen(10);
+ uint64_t time_delta = 1000000 / 8;
+ for (int i = 0; i < 60; ++i) {
+ regular_estimator_.UpdateEstimate(gen.Delay(), gen.FrameSize());
+ regular_estimator_.AdvanceClock(time_delta);
+ low_rate_estimator_.UpdateEstimate(gen.Delay(), gen.FrameSize());
+ low_rate_estimator_.AdvanceClock(time_delta);
+ EXPECT_GT(regular_estimator_.GetJitterEstimate(0), 0);
+ EXPECT_GT(low_rate_estimator_.GetJitterEstimate(0), 0);
+ EXPECT_GE(regular_estimator_.GetJitterEstimate(0),
+ low_rate_estimator_.GetJitterEstimate(0));
+ gen.Advance();
+ }
+}
+
+// 30 fps, steady state estimate should be same as previous method.
+TEST_F(TestVCMJitterEstimator, TestHighRate) {
+ ValueGenerator gen(10);
+ uint64_t time_delta = 1000000 / 30;
+ for (int i = 0; i < 60; ++i) {
+ regular_estimator_.UpdateEstimate(gen.Delay(), gen.FrameSize());
+ regular_estimator_.AdvanceClock(time_delta);
+ low_rate_estimator_.UpdateEstimate(gen.Delay(), gen.FrameSize());
+ low_rate_estimator_.AdvanceClock(time_delta);
+ EXPECT_EQ(regular_estimator_.GetJitterEstimate(0),
+ low_rate_estimator_.GetJitterEstimate(0));
+ gen.Advance();
+ }
+}
+
+// 10 fps, high jitter then low jitter. Low rate estimator should converge
+// faster to low noise estimate.
+TEST_F(TestVCMJitterEstimator, TestConvergence) {
+ // Reach a steady state with high noise.
+ ValueGenerator gen(50);
+ uint64_t time_delta = 1000000 / 10;
+ for (int i = 0; i < 100; ++i) {
+ regular_estimator_.UpdateEstimate(gen.Delay(), gen.FrameSize());
+ regular_estimator_.AdvanceClock(time_delta * 2);
+ low_rate_estimator_.UpdateEstimate(gen.Delay(), gen.FrameSize());
+ low_rate_estimator_.AdvanceClock(time_delta * 2);
+ gen.Advance();
+ }
+
+ int threshold = regular_estimator_.GetJitterEstimate(0) / 2;
+
+ // New generator with zero noise.
+ ValueGenerator low_gen(0);
+ int regular_iterations = 0;
+ int low_rate_iterations = 0;
+ for (int i = 0; i < 500; ++i) {
+ if (regular_iterations == 0) {
+ regular_estimator_.UpdateEstimate(low_gen.Delay(), low_gen.FrameSize());
+ regular_estimator_.AdvanceClock(time_delta);
+ if (regular_estimator_.GetJitterEstimate(0) < threshold) {
+ regular_iterations = i;
+ }
+ }
+
+ if (low_rate_iterations == 0) {
+ low_rate_estimator_.UpdateEstimate(low_gen.Delay(), low_gen.FrameSize());
+ low_rate_estimator_.AdvanceClock(time_delta);
+ if (low_rate_estimator_.GetJitterEstimate(0) < threshold) {
+ low_rate_iterations = i;
+ }
+ }
+
+ if (regular_iterations != 0 && low_rate_iterations != 0) {
+ break;
+ }
+
+ gen.Advance();
+ }
+
+ EXPECT_NE(regular_iterations, 0);
+ EXPECT_NE(low_rate_iterations, 0);
+ EXPECT_LE(low_rate_iterations, regular_iterations);
+}
+}
diff --git a/webrtc/modules/video_coding/main/source/media_opt_util.cc b/webrtc/modules/video_coding/main/source/media_opt_util.cc
new file mode 100644
index 0000000000..51decbed97
--- /dev/null
+++ b/webrtc/modules/video_coding/main/source/media_opt_util.cc
@@ -0,0 +1,774 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "webrtc/modules/video_coding/main/source/media_opt_util.h"
+
+#include <algorithm>
+#include <float.h>
+#include <limits.h>
+#include <math.h>
+
+#include "webrtc/modules/interface/module_common_types.h"
+#include "webrtc/modules/video_coding/codecs/vp8/include/vp8_common_types.h"
+#include "webrtc/modules/video_coding/main/interface/video_coding_defines.h"
+#include "webrtc/modules/video_coding/main/source/fec_tables_xor.h"
+#include "webrtc/modules/video_coding/main/source/nack_fec_tables.h"
+
+namespace webrtc {
+// Max value of loss rates in off-line model
+static const int kPacketLossMax = 129;
+
+namespace media_optimization {
+
+VCMProtectionMethod::VCMProtectionMethod()
+ : _effectivePacketLoss(0),
+ _protectionFactorK(0),
+ _protectionFactorD(0),
+ _scaleProtKey(2.0f),
+ _maxPayloadSize(1460),
+ _qmRobustness(new VCMQmRobustness()),
+ _useUepProtectionK(false),
+ _useUepProtectionD(true),
+ _corrFecCost(1.0),
+ _type(kNone) {
+}
+
+VCMProtectionMethod::~VCMProtectionMethod()
+{
+ delete _qmRobustness;
+}
+void
+VCMProtectionMethod::UpdateContentMetrics(const
+ VideoContentMetrics* contentMetrics)
+{
+ _qmRobustness->UpdateContent(contentMetrics);
+}
+
+VCMNackFecMethod::VCMNackFecMethod(int64_t lowRttNackThresholdMs,
+ int64_t highRttNackThresholdMs)
+ : VCMFecMethod(),
+ _lowRttNackMs(lowRttNackThresholdMs),
+ _highRttNackMs(highRttNackThresholdMs),
+ _maxFramesFec(1) {
+ assert(lowRttNackThresholdMs >= -1 && highRttNackThresholdMs >= -1);
+ assert(highRttNackThresholdMs == -1 ||
+ lowRttNackThresholdMs <= highRttNackThresholdMs);
+ assert(lowRttNackThresholdMs > -1 || highRttNackThresholdMs == -1);
+ _type = kNackFec;
+}
+
+VCMNackFecMethod::~VCMNackFecMethod()
+{
+ //
+}
+bool
+VCMNackFecMethod::ProtectionFactor(const VCMProtectionParameters* parameters)
+{
+ // Hybrid Nack FEC has three operational modes:
+ // 1. Low RTT (below kLowRttNackMs) - Nack only: Set FEC rate
+ // (_protectionFactorD) to zero. -1 means no FEC.
+ // 2. High RTT (above _highRttNackMs) - FEC Only: Keep FEC factors.
+ // -1 means always allow NACK.
+ // 3. Medium RTT values - Hybrid mode: We will only nack the
+ // residual following the decoding of the FEC (refer to JB logic). FEC
+ // delta protection factor will be adjusted based on the RTT.
+
+ // Otherwise: we count on FEC; if the RTT is below a threshold, then we
+ // nack the residual, based on a decision made in the JB.
+
+ // Compute the protection factors
+ VCMFecMethod::ProtectionFactor(parameters);
+ if (_lowRttNackMs == -1 || parameters->rtt < _lowRttNackMs)
+ {
+ _protectionFactorD = 0;
+ VCMFecMethod::UpdateProtectionFactorD(_protectionFactorD);
+ }
+
+ // When in Hybrid mode (RTT range), adjust FEC rates based on the
+ // RTT (NACK effectiveness) - adjustment factor is in the range [0,1].
+ else if (_highRttNackMs == -1 || parameters->rtt < _highRttNackMs)
+ {
+ // TODO(mikhal): Disabling adjustment temporarily.
+ // uint16_t rttIndex = (uint16_t) parameters->rtt;
+ float adjustRtt = 1.0f;// (float)VCMNackFecTable[rttIndex] / 100.0f;
+
+ // Adjust FEC with NACK on (for delta frame only)
+ // table depends on RTT relative to rttMax (NACK Threshold)
+ _protectionFactorD = static_cast<uint8_t>
+ (adjustRtt *
+ static_cast<float>(_protectionFactorD));
+ // update FEC rates after applying adjustment
+ VCMFecMethod::UpdateProtectionFactorD(_protectionFactorD);
+ }
+
+ return true;
+}
+
+int VCMNackFecMethod::ComputeMaxFramesFec(
+ const VCMProtectionParameters* parameters) {
+ if (parameters->numLayers > 2) {
+ // For more than 2 temporal layers we will only have FEC on the base layer,
+ // and the base layers will be pretty far apart. Therefore we force one
+ // frame FEC.
+ return 1;
+ }
+ // We set the max number of frames to base the FEC on so that on average
+ // we will have complete frames in one RTT. Note that this is an upper
+ // bound, and that the actual number of frames used for FEC is decided by the
+ // RTP module based on the actual number of packets and the protection factor.
+ float base_layer_framerate = parameters->frameRate /
+ static_cast<float>(1 << (parameters->numLayers - 1));
+ int max_frames_fec = std::max(static_cast<int>(
+ 2.0f * base_layer_framerate * parameters->rtt /
+ 1000.0f + 0.5f), 1);
+ // |kUpperLimitFramesFec| is the upper limit on how many frames we
+ // allow any FEC to be based on.
+ if (max_frames_fec > kUpperLimitFramesFec) {
+ max_frames_fec = kUpperLimitFramesFec;
+ }
+ return max_frames_fec;
+}
+
+int VCMNackFecMethod::MaxFramesFec() const {
+ return _maxFramesFec;
+}
+
+bool VCMNackFecMethod::BitRateTooLowForFec(
+ const VCMProtectionParameters* parameters) {
+ // Bitrate below which we turn off FEC, regardless of reported packet loss.
+ // The condition should depend on resolution and content. For now, use
+ // threshold on bytes per frame, with some effect for the frame size.
+ // The condition for turning off FEC is also based on other factors,
+ // such as |_numLayers|, |_maxFramesFec|, and |_rtt|.
+ int estimate_bytes_per_frame = 1000 * BitsPerFrame(parameters) / 8;
+ int max_bytes_per_frame = kMaxBytesPerFrameForFec;
+ int num_pixels = parameters->codecWidth * parameters->codecHeight;
+ if (num_pixels <= 352 * 288) {
+ max_bytes_per_frame = kMaxBytesPerFrameForFecLow;
+ } else if (num_pixels > 640 * 480) {
+ max_bytes_per_frame = kMaxBytesPerFrameForFecHigh;
+ }
+ // TODO (marpan): add condition based on maximum frames used for FEC,
+ // and expand condition based on frame size.
+ // Max round trip time threshold in ms.
+ const int64_t kMaxRttTurnOffFec = 200;
+ if (estimate_bytes_per_frame < max_bytes_per_frame &&
+ parameters->numLayers < 3 &&
+ parameters->rtt < kMaxRttTurnOffFec) {
+ return true;
+ }
+ return false;
+}
+
+bool
+VCMNackFecMethod::EffectivePacketLoss(const VCMProtectionParameters* parameters)
+{
+ // Set the effective packet loss for encoder (based on FEC code).
+ // Compute the effective packet loss and residual packet loss due to FEC.
+ VCMFecMethod::EffectivePacketLoss(parameters);
+ return true;
+}
+
+bool
+VCMNackFecMethod::UpdateParameters(const VCMProtectionParameters* parameters)
+{
+ ProtectionFactor(parameters);
+ EffectivePacketLoss(parameters);
+ _maxFramesFec = ComputeMaxFramesFec(parameters);
+ if (BitRateTooLowForFec(parameters)) {
+ _protectionFactorK = 0;
+ _protectionFactorD = 0;
+ }
+
+ // Protection/fec rates obtained above are defined relative to total number
+ // of packets (total rate: source + fec) FEC in RTP module assumes
+ // protection factor is defined relative to source number of packets so we
+ // should convert the factor to reduce mismatch between mediaOpt's rate and
+ // the actual one
+ _protectionFactorK = VCMFecMethod::ConvertFECRate(_protectionFactorK);
+ _protectionFactorD = VCMFecMethod::ConvertFECRate(_protectionFactorD);
+
+ return true;
+}
+
+VCMNackMethod::VCMNackMethod():
+VCMProtectionMethod()
+{
+ _type = kNack;
+}
+
+VCMNackMethod::~VCMNackMethod()
+{
+ //
+}
+
+bool
+VCMNackMethod::EffectivePacketLoss(const VCMProtectionParameters* parameter)
+{
+ // Effective Packet Loss, NA in current version.
+ _effectivePacketLoss = 0;
+ return true;
+}
+
+bool
+VCMNackMethod::UpdateParameters(const VCMProtectionParameters* parameters)
+{
+ // Compute the effective packet loss
+ EffectivePacketLoss(parameters);
+
+ // nackCost = (bitRate - nackCost) * (lossPr)
+ return true;
+}
+
+VCMFecMethod::VCMFecMethod():
+VCMProtectionMethod()
+{
+ _type = kFec;
+}
+VCMFecMethod::~VCMFecMethod()
+{
+ //
+}
+
+uint8_t
+VCMFecMethod::BoostCodeRateKey(uint8_t packetFrameDelta,
+ uint8_t packetFrameKey) const
+{
+ uint8_t boostRateKey = 2;
+ // Default: ratio scales the FEC protection up for I frames
+ uint8_t ratio = 1;
+
+ if (packetFrameDelta > 0)
+ {
+ ratio = (int8_t) (packetFrameKey / packetFrameDelta);
+ }
+ ratio = VCM_MAX(boostRateKey, ratio);
+
+ return ratio;
+}
+
+uint8_t
+VCMFecMethod::ConvertFECRate(uint8_t codeRateRTP) const
+{
+ return static_cast<uint8_t> (VCM_MIN(255,(0.5 + 255.0 * codeRateRTP /
+ (float)(255 - codeRateRTP))));
+}
+
+// Update FEC with protectionFactorD
+void
+VCMFecMethod::UpdateProtectionFactorD(uint8_t protectionFactorD)
+{
+ _protectionFactorD = protectionFactorD;
+}
+
+// Update FEC with protectionFactorK
+void
+VCMFecMethod::UpdateProtectionFactorK(uint8_t protectionFactorK)
+{
+ _protectionFactorK = protectionFactorK;
+}
+
+bool
+VCMFecMethod::ProtectionFactor(const VCMProtectionParameters* parameters)
+{
+ // FEC PROTECTION SETTINGS: varies with packet loss and bitrate
+
+ // No protection if (filtered) packetLoss is 0
+ uint8_t packetLoss = (uint8_t) (255 * parameters->lossPr);
+ if (packetLoss == 0)
+ {
+ _protectionFactorK = 0;
+ _protectionFactorD = 0;
+ return true;
+ }
+
+ // Parameters for FEC setting:
+ // first partition size, thresholds, table pars, spatial resoln fac.
+
+ // First partition protection: ~ 20%
+ uint8_t firstPartitionProt = (uint8_t) (255 * 0.20);
+
+ // Minimum protection level needed to generate one FEC packet for one
+ // source packet/frame (in RTP sender)
+ uint8_t minProtLevelFec = 85;
+
+ // Threshold on packetLoss and bitRrate/frameRate (=average #packets),
+ // above which we allocate protection to cover at least first partition.
+ uint8_t lossThr = 0;
+ uint8_t packetNumThr = 1;
+
+ // Parameters for range of rate index of table.
+ const uint8_t ratePar1 = 5;
+ const uint8_t ratePar2 = 49;
+
+ // Spatial resolution size, relative to a reference size.
+ float spatialSizeToRef = static_cast<float>
+ (parameters->codecWidth * parameters->codecHeight) /
+ (static_cast<float>(704 * 576));
+ // resolnFac: This parameter will generally increase/decrease the FEC rate
+ // (for fixed bitRate and packetLoss) based on system size.
+ // Use a smaller exponent (< 1) to control/soften system size effect.
+ const float resolnFac = 1.0 / powf(spatialSizeToRef, 0.3f);
+
+ const int bitRatePerFrame = BitsPerFrame(parameters);
+
+
+ // Average number of packets per frame (source and fec):
+ const uint8_t avgTotPackets = 1 + (uint8_t)
+ ((float) bitRatePerFrame * 1000.0
+ / (float) (8.0 * _maxPayloadSize) + 0.5);
+
+ // FEC rate parameters: for P and I frame
+ uint8_t codeRateDelta = 0;
+ uint8_t codeRateKey = 0;
+
+ // Get index for table: the FEC protection depends on an effective rate.
+ // The range on the rate index corresponds to rates (bps)
+ // from ~200k to ~8000k, for 30fps
+ const uint16_t effRateFecTable = static_cast<uint16_t>
+ (resolnFac * bitRatePerFrame);
+ uint8_t rateIndexTable =
+ (uint8_t) VCM_MAX(VCM_MIN((effRateFecTable - ratePar1) /
+ ratePar1, ratePar2), 0);
+
+ // Restrict packet loss range to 50:
+ // current tables defined only up to 50%
+ if (packetLoss >= kPacketLossMax)
+ {
+ packetLoss = kPacketLossMax - 1;
+ }
+ uint16_t indexTable = rateIndexTable * kPacketLossMax + packetLoss;
+
+ // Check on table index
+ assert(indexTable < kSizeCodeRateXORTable);
+
+ // Protection factor for P frame
+ codeRateDelta = kCodeRateXORTable[indexTable];
+
+ if (packetLoss > lossThr && avgTotPackets > packetNumThr)
+ {
+ // Set a minimum based on first partition size.
+ if (codeRateDelta < firstPartitionProt)
+ {
+ codeRateDelta = firstPartitionProt;
+ }
+ }
+
+ // Check limit on amount of protection for P frame; 50% is max.
+ if (codeRateDelta >= kPacketLossMax)
+ {
+ codeRateDelta = kPacketLossMax - 1;
+ }
+
+ float adjustFec = 1.0f;
+ // Avoid additional adjustments when layers are active.
+ // TODO(mikhal/marco): Update adjusmtent based on layer info.
+ if (parameters->numLayers == 1)
+ {
+ adjustFec = _qmRobustness->AdjustFecFactor(codeRateDelta,
+ parameters->bitRate,
+ parameters->frameRate,
+ parameters->rtt,
+ packetLoss);
+ }
+
+ codeRateDelta = static_cast<uint8_t>(codeRateDelta * adjustFec);
+
+ // For Key frame:
+ // Effectively at a higher rate, so we scale/boost the rate
+ // The boost factor may depend on several factors: ratio of packet
+ // number of I to P frames, how much protection placed on P frames, etc.
+ const uint8_t packetFrameDelta = (uint8_t)
+ (0.5 + parameters->packetsPerFrame);
+ const uint8_t packetFrameKey = (uint8_t)
+ (0.5 + parameters->packetsPerFrameKey);
+ const uint8_t boostKey = BoostCodeRateKey(packetFrameDelta,
+ packetFrameKey);
+
+ rateIndexTable = (uint8_t) VCM_MAX(VCM_MIN(
+ 1 + (boostKey * effRateFecTable - ratePar1) /
+ ratePar1,ratePar2),0);
+ uint16_t indexTableKey = rateIndexTable * kPacketLossMax + packetLoss;
+
+ indexTableKey = VCM_MIN(indexTableKey, kSizeCodeRateXORTable);
+
+ // Check on table index
+ assert(indexTableKey < kSizeCodeRateXORTable);
+
+ // Protection factor for I frame
+ codeRateKey = kCodeRateXORTable[indexTableKey];
+
+ // Boosting for Key frame.
+ int boostKeyProt = _scaleProtKey * codeRateDelta;
+ if (boostKeyProt >= kPacketLossMax)
+ {
+ boostKeyProt = kPacketLossMax - 1;
+ }
+
+ // Make sure I frame protection is at least larger than P frame protection,
+ // and at least as high as filtered packet loss.
+ codeRateKey = static_cast<uint8_t> (VCM_MAX(packetLoss,
+ VCM_MAX(boostKeyProt, codeRateKey)));
+
+ // Check limit on amount of protection for I frame: 50% is max.
+ if (codeRateKey >= kPacketLossMax)
+ {
+ codeRateKey = kPacketLossMax - 1;
+ }
+
+ _protectionFactorK = codeRateKey;
+ _protectionFactorD = codeRateDelta;
+
+ // Generally there is a rate mis-match between the FEC cost estimated
+ // in mediaOpt and the actual FEC cost sent out in RTP module.
+ // This is more significant at low rates (small # of source packets), where
+ // the granularity of the FEC decreases. In this case, non-zero protection
+ // in mediaOpt may generate 0 FEC packets in RTP sender (since actual #FEC
+ // is based on rounding off protectionFactor on actual source packet number).
+ // The correction factor (_corrFecCost) attempts to corrects this, at least
+ // for cases of low rates (small #packets) and low protection levels.
+
+ float numPacketsFl = 1.0f + ((float) bitRatePerFrame * 1000.0
+ / (float) (8.0 * _maxPayloadSize) + 0.5);
+
+ const float estNumFecGen = 0.5f + static_cast<float> (_protectionFactorD *
+ numPacketsFl / 255.0f);
+
+
+ // We reduce cost factor (which will reduce overhead for FEC and
+ // hybrid method) and not the protectionFactor.
+ _corrFecCost = 1.0f;
+ if (estNumFecGen < 1.1f && _protectionFactorD < minProtLevelFec)
+ {
+ _corrFecCost = 0.5f;
+ }
+ if (estNumFecGen < 0.9f && _protectionFactorD < minProtLevelFec)
+ {
+ _corrFecCost = 0.0f;
+ }
+
+ // TODO (marpan): Set the UEP protection on/off for Key and Delta frames
+ _useUepProtectionK = _qmRobustness->SetUepProtection(codeRateKey,
+ parameters->bitRate,
+ packetLoss,
+ 0);
+
+ _useUepProtectionD = _qmRobustness->SetUepProtection(codeRateDelta,
+ parameters->bitRate,
+ packetLoss,
+ 1);
+
+ // DONE WITH FEC PROTECTION SETTINGS
+ return true;
+}
+
+int VCMFecMethod::BitsPerFrame(const VCMProtectionParameters* parameters) {
+ // When temporal layers are available FEC will only be applied on the base
+ // layer.
+ const float bitRateRatio =
+ kVp8LayerRateAlloction[parameters->numLayers - 1][0];
+ float frameRateRatio = powf(1 / 2.0, parameters->numLayers - 1);
+ float bitRate = parameters->bitRate * bitRateRatio;
+ float frameRate = parameters->frameRate * frameRateRatio;
+
+ // TODO(mikhal): Update factor following testing.
+ float adjustmentFactor = 1;
+
+ // Average bits per frame (units of kbits)
+ return static_cast<int>(adjustmentFactor * bitRate / frameRate);
+}
+
+bool
+VCMFecMethod::EffectivePacketLoss(const VCMProtectionParameters* parameters)
+{
+ // Effective packet loss to encoder is based on RPL (residual packet loss)
+ // this is a soft setting based on degree of FEC protection
+ // RPL = received/input packet loss - average_FEC_recovery
+ // note: received/input packet loss may be filtered based on FilteredLoss
+
+ // Effective Packet Loss, NA in current version.
+ _effectivePacketLoss = 0;
+
+ return true;
+}
+
+bool
+VCMFecMethod::UpdateParameters(const VCMProtectionParameters* parameters)
+{
+ // Compute the protection factor
+ ProtectionFactor(parameters);
+
+ // Compute the effective packet loss
+ EffectivePacketLoss(parameters);
+
+ // Protection/fec rates obtained above is defined relative to total number
+ // of packets (total rate: source+fec) FEC in RTP module assumes protection
+ // factor is defined relative to source number of packets so we should
+ // convert the factor to reduce mismatch between mediaOpt suggested rate and
+ // the actual rate
+ _protectionFactorK = ConvertFECRate(_protectionFactorK);
+ _protectionFactorD = ConvertFECRate(_protectionFactorD);
+
+ return true;
+}
+VCMLossProtectionLogic::VCMLossProtectionLogic(int64_t nowMs):
+_currentParameters(),
+_rtt(0),
+_lossPr(0.0f),
+_bitRate(0.0f),
+_frameRate(0.0f),
+_keyFrameSize(0.0f),
+_fecRateKey(0),
+_fecRateDelta(0),
+_lastPrUpdateT(0),
+_lossPr255(0.9999f),
+_lossPrHistory(),
+_shortMaxLossPr255(0),
+_packetsPerFrame(0.9999f),
+_packetsPerFrameKey(0.9999f),
+_codecWidth(0),
+_codecHeight(0),
+_numLayers(1)
+{
+ Reset(nowMs);
+}
+
+VCMLossProtectionLogic::~VCMLossProtectionLogic()
+{
+ Release();
+}
+
+void VCMLossProtectionLogic::SetMethod(
+ enum VCMProtectionMethodEnum newMethodType) {
+ if (_selectedMethod && _selectedMethod->Type() == newMethodType)
+ return;
+
+ switch(newMethodType) {
+ case kNack:
+ _selectedMethod.reset(new VCMNackMethod());
+ break;
+ case kFec:
+ _selectedMethod.reset(new VCMFecMethod());
+ break;
+ case kNackFec:
+ _selectedMethod.reset(new VCMNackFecMethod(kLowRttNackMs, -1));
+ break;
+ case kNone:
+ _selectedMethod.reset();
+ break;
+ }
+ UpdateMethod();
+}
+
+void
+VCMLossProtectionLogic::UpdateRtt(int64_t rtt)
+{
+ _rtt = rtt;
+}
+
+void
+VCMLossProtectionLogic::UpdateMaxLossHistory(uint8_t lossPr255,
+ int64_t now)
+{
+ if (_lossPrHistory[0].timeMs >= 0 &&
+ now - _lossPrHistory[0].timeMs < kLossPrShortFilterWinMs)
+ {
+ if (lossPr255 > _shortMaxLossPr255)
+ {
+ _shortMaxLossPr255 = lossPr255;
+ }
+ }
+ else
+ {
+ // Only add a new value to the history once a second
+ if (_lossPrHistory[0].timeMs == -1)
+ {
+ // First, no shift
+ _shortMaxLossPr255 = lossPr255;
+ }
+ else
+ {
+ // Shift
+ for (int32_t i = (kLossPrHistorySize - 2); i >= 0; i--)
+ {
+ _lossPrHistory[i + 1].lossPr255 = _lossPrHistory[i].lossPr255;
+ _lossPrHistory[i + 1].timeMs = _lossPrHistory[i].timeMs;
+ }
+ }
+ if (_shortMaxLossPr255 == 0)
+ {
+ _shortMaxLossPr255 = lossPr255;
+ }
+
+ _lossPrHistory[0].lossPr255 = _shortMaxLossPr255;
+ _lossPrHistory[0].timeMs = now;
+ _shortMaxLossPr255 = 0;
+ }
+}
+
+uint8_t
+VCMLossProtectionLogic::MaxFilteredLossPr(int64_t nowMs) const
+{
+ uint8_t maxFound = _shortMaxLossPr255;
+ if (_lossPrHistory[0].timeMs == -1)
+ {
+ return maxFound;
+ }
+ for (int32_t i = 0; i < kLossPrHistorySize; i++)
+ {
+ if (_lossPrHistory[i].timeMs == -1)
+ {
+ break;
+ }
+ if (nowMs - _lossPrHistory[i].timeMs >
+ kLossPrHistorySize * kLossPrShortFilterWinMs)
+ {
+ // This sample (and all samples after this) is too old
+ break;
+ }
+ if (_lossPrHistory[i].lossPr255 > maxFound)
+ {
+ // This sample is the largest one this far into the history
+ maxFound = _lossPrHistory[i].lossPr255;
+ }
+ }
+ return maxFound;
+}
+
+uint8_t VCMLossProtectionLogic::FilteredLoss(
+ int64_t nowMs,
+ FilterPacketLossMode filter_mode,
+ uint8_t lossPr255) {
+
+ // Update the max window filter.
+ UpdateMaxLossHistory(lossPr255, nowMs);
+
+ // Update the recursive average filter.
+ _lossPr255.Apply(static_cast<float> (nowMs - _lastPrUpdateT),
+ static_cast<float> (lossPr255));
+ _lastPrUpdateT = nowMs;
+
+ // Filtered loss: default is received loss (no filtering).
+ uint8_t filtered_loss = lossPr255;
+
+ switch (filter_mode) {
+ case kNoFilter:
+ break;
+ case kAvgFilter:
+ filtered_loss = static_cast<uint8_t>(_lossPr255.filtered() + 0.5);
+ break;
+ case kMaxFilter:
+ filtered_loss = MaxFilteredLossPr(nowMs);
+ break;
+ }
+
+ return filtered_loss;
+}
+
+void
+VCMLossProtectionLogic::UpdateFilteredLossPr(uint8_t packetLossEnc)
+{
+ _lossPr = (float) packetLossEnc / (float) 255.0;
+}
+
+void
+VCMLossProtectionLogic::UpdateBitRate(float bitRate)
+{
+ _bitRate = bitRate;
+}
+
+void
+VCMLossProtectionLogic::UpdatePacketsPerFrame(float nPackets, int64_t nowMs)
+{
+ _packetsPerFrame.Apply(static_cast<float>(nowMs - _lastPacketPerFrameUpdateT),
+ nPackets);
+ _lastPacketPerFrameUpdateT = nowMs;
+}
+
+void
+VCMLossProtectionLogic::UpdatePacketsPerFrameKey(float nPackets, int64_t nowMs)
+{
+ _packetsPerFrameKey.Apply(static_cast<float>(nowMs -
+ _lastPacketPerFrameUpdateTKey), nPackets);
+ _lastPacketPerFrameUpdateTKey = nowMs;
+}
+
+void
+VCMLossProtectionLogic::UpdateKeyFrameSize(float keyFrameSize)
+{
+ _keyFrameSize = keyFrameSize;
+}
+
+void
+VCMLossProtectionLogic::UpdateFrameSize(uint16_t width,
+ uint16_t height)
+{
+ _codecWidth = width;
+ _codecHeight = height;
+}
+
+void VCMLossProtectionLogic::UpdateNumLayers(int numLayers) {
+ _numLayers = (numLayers == 0) ? 1 : numLayers;
+}
+
+bool
+VCMLossProtectionLogic::UpdateMethod()
+{
+ if (!_selectedMethod)
+ return false;
+ _currentParameters.rtt = _rtt;
+ _currentParameters.lossPr = _lossPr;
+ _currentParameters.bitRate = _bitRate;
+ _currentParameters.frameRate = _frameRate; // rename actual frame rate?
+ _currentParameters.keyFrameSize = _keyFrameSize;
+ _currentParameters.fecRateDelta = _fecRateDelta;
+ _currentParameters.fecRateKey = _fecRateKey;
+ _currentParameters.packetsPerFrame = _packetsPerFrame.filtered();
+ _currentParameters.packetsPerFrameKey = _packetsPerFrameKey.filtered();
+ _currentParameters.codecWidth = _codecWidth;
+ _currentParameters.codecHeight = _codecHeight;
+ _currentParameters.numLayers = _numLayers;
+ return _selectedMethod->UpdateParameters(&_currentParameters);
+}
+
+VCMProtectionMethod*
+VCMLossProtectionLogic::SelectedMethod() const
+{
+ return _selectedMethod.get();
+}
+
+VCMProtectionMethodEnum VCMLossProtectionLogic::SelectedType() const {
+ return _selectedMethod ? _selectedMethod->Type() : kNone;
+}
+
+void
+VCMLossProtectionLogic::Reset(int64_t nowMs)
+{
+ _lastPrUpdateT = nowMs;
+ _lastPacketPerFrameUpdateT = nowMs;
+ _lastPacketPerFrameUpdateTKey = nowMs;
+ _lossPr255.Reset(0.9999f);
+ _packetsPerFrame.Reset(0.9999f);
+ _fecRateDelta = _fecRateKey = 0;
+ for (int32_t i = 0; i < kLossPrHistorySize; i++)
+ {
+ _lossPrHistory[i].lossPr255 = 0;
+ _lossPrHistory[i].timeMs = -1;
+ }
+ _shortMaxLossPr255 = 0;
+ Release();
+}
+
+void VCMLossProtectionLogic::Release() {
+ _selectedMethod.reset();
+}
+
+} // namespace media_optimization
+} // namespace webrtc
diff --git a/webrtc/modules/video_coding/main/source/media_opt_util.h b/webrtc/modules/video_coding/main/source/media_opt_util.h
new file mode 100644
index 0000000000..2085bbcde9
--- /dev/null
+++ b/webrtc/modules/video_coding/main/source/media_opt_util.h
@@ -0,0 +1,364 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef WEBRTC_MODULES_VIDEO_CODING_MEDIA_OPT_UTIL_H_
+#define WEBRTC_MODULES_VIDEO_CODING_MEDIA_OPT_UTIL_H_
+
+#include <math.h>
+#include <stdlib.h>
+
+#include "webrtc/base/exp_filter.h"
+#include "webrtc/base/scoped_ptr.h"
+#include "webrtc/modules/video_coding/main/source/internal_defines.h"
+#include "webrtc/modules/video_coding/main/source/qm_select.h"
+#include "webrtc/system_wrappers/include/trace.h"
+#include "webrtc/typedefs.h"
+
+namespace webrtc {
+namespace media_optimization {
+
+// Number of time periods used for (max) window filter for packet loss
+// TODO (marpan): set reasonable window size for filtered packet loss,
+// adjustment should be based on logged/real data of loss stats/correlation.
+enum { kLossPrHistorySize = 10 };
+
+// 1000 ms, total filter length is (kLossPrHistorySize * 1000) ms
+enum { kLossPrShortFilterWinMs = 1000 };
+
+// The type of filter used on the received packet loss reports.
+enum FilterPacketLossMode {
+ kNoFilter, // No filtering on received loss.
+ kAvgFilter, // Recursive average filter.
+ kMaxFilter // Max-window filter, over the time interval of:
+ // (kLossPrHistorySize * kLossPrShortFilterWinMs) ms.
+};
+
+// Thresholds for hybrid NACK/FEC
+// common to media optimization and the jitter buffer.
+const int64_t kLowRttNackMs = 20;
+
+struct VCMProtectionParameters
+{
+ VCMProtectionParameters() : rtt(0), lossPr(0.0f), bitRate(0.0f),
+ packetsPerFrame(0.0f), packetsPerFrameKey(0.0f), frameRate(0.0f),
+ keyFrameSize(0.0f), fecRateDelta(0), fecRateKey(0),
+ codecWidth(0), codecHeight(0),
+ numLayers(1)
+ {}
+
+ int64_t rtt;
+ float lossPr;
+ float bitRate;
+ float packetsPerFrame;
+ float packetsPerFrameKey;
+ float frameRate;
+ float keyFrameSize;
+ uint8_t fecRateDelta;
+ uint8_t fecRateKey;
+ uint16_t codecWidth;
+ uint16_t codecHeight;
+ int numLayers;
+};
+
+
+/******************************/
+/* VCMProtectionMethod class */
+/******************************/
+
+enum VCMProtectionMethodEnum
+{
+ kNack,
+ kFec,
+ kNackFec,
+ kNone
+};
+
+class VCMLossProbabilitySample
+{
+public:
+ VCMLossProbabilitySample() : lossPr255(0), timeMs(-1) {};
+
+ uint8_t lossPr255;
+ int64_t timeMs;
+};
+
+
+class VCMProtectionMethod
+{
+public:
+ VCMProtectionMethod();
+ virtual ~VCMProtectionMethod();
+
+ // Updates the efficiency of the method using the parameters provided
+ //
+ // Input:
+ // - parameters : Parameters used to calculate efficiency
+ //
+ // Return value : True if this method is recommended in
+ // the given conditions.
+ virtual bool UpdateParameters(const VCMProtectionParameters* parameters) = 0;
+
+ // Returns the protection type
+ //
+ // Return value : The protection type
+ enum VCMProtectionMethodEnum Type() const { return _type; }
+
+ // Returns the effective packet loss for ER, required by this protection method
+ //
+ // Return value : Required effective packet loss
+ virtual uint8_t RequiredPacketLossER() { return _effectivePacketLoss; }
+
+ // Extracts the FEC protection factor for Key frame, required by this protection method
+ //
+ // Return value : Required protectionFactor for Key frame
+ virtual uint8_t RequiredProtectionFactorK() { return _protectionFactorK; }
+
+ // Extracts the FEC protection factor for Delta frame, required by this protection method
+ //
+ // Return value : Required protectionFactor for delta frame
+ virtual uint8_t RequiredProtectionFactorD() { return _protectionFactorD; }
+
+ // Extracts whether the FEC Unequal protection (UEP) is used for Key frame.
+ //
+ // Return value : Required Unequal protection on/off state.
+ virtual bool RequiredUepProtectionK() { return _useUepProtectionK; }
+
+ // Extracts whether the the FEC Unequal protection (UEP) is used for Delta frame.
+ //
+ // Return value : Required Unequal protection on/off state.
+ virtual bool RequiredUepProtectionD() { return _useUepProtectionD; }
+
+ virtual int MaxFramesFec() const { return 1; }
+
+ // Updates content metrics
+ void UpdateContentMetrics(const VideoContentMetrics* contentMetrics);
+
+protected:
+
+ uint8_t _effectivePacketLoss;
+ uint8_t _protectionFactorK;
+ uint8_t _protectionFactorD;
+ // Estimation of residual loss after the FEC
+ float _scaleProtKey;
+ int32_t _maxPayloadSize;
+
+ VCMQmRobustness* _qmRobustness;
+ bool _useUepProtectionK;
+ bool _useUepProtectionD;
+ float _corrFecCost;
+ enum VCMProtectionMethodEnum _type;
+};
+
+class VCMNackMethod : public VCMProtectionMethod
+{
+public:
+ VCMNackMethod();
+ virtual ~VCMNackMethod();
+ virtual bool UpdateParameters(const VCMProtectionParameters* parameters);
+ // Get the effective packet loss
+ bool EffectivePacketLoss(const VCMProtectionParameters* parameter);
+};
+
+class VCMFecMethod : public VCMProtectionMethod
+{
+public:
+ VCMFecMethod();
+ virtual ~VCMFecMethod();
+ virtual bool UpdateParameters(const VCMProtectionParameters* parameters);
+ // Get the effective packet loss for ER
+ bool EffectivePacketLoss(const VCMProtectionParameters* parameters);
+ // Get the FEC protection factors
+ bool ProtectionFactor(const VCMProtectionParameters* parameters);
+ // Get the boost for key frame protection
+ uint8_t BoostCodeRateKey(uint8_t packetFrameDelta,
+ uint8_t packetFrameKey) const;
+ // Convert the rates: defined relative to total# packets or source# packets
+ uint8_t ConvertFECRate(uint8_t codeRate) const;
+ // Get the average effective recovery from FEC: for random loss model
+ float AvgRecoveryFEC(const VCMProtectionParameters* parameters) const;
+ // Update FEC with protectionFactorD
+ void UpdateProtectionFactorD(uint8_t protectionFactorD);
+ // Update FEC with protectionFactorK
+ void UpdateProtectionFactorK(uint8_t protectionFactorK);
+ // Compute the bits per frame. Account for temporal layers when applicable.
+ int BitsPerFrame(const VCMProtectionParameters* parameters);
+
+protected:
+ enum { kUpperLimitFramesFec = 6 };
+ // Thresholds values for the bytes/frame and round trip time, below which we
+ // may turn off FEC, depending on |_numLayers| and |_maxFramesFec|.
+ // Max bytes/frame for VGA, corresponds to ~140k at 25fps.
+ enum { kMaxBytesPerFrameForFec = 700 };
+ // Max bytes/frame for CIF and lower: corresponds to ~80k at 25fps.
+ enum { kMaxBytesPerFrameForFecLow = 400 };
+ // Max bytes/frame for frame size larger than VGA, ~200k at 25fps.
+ enum { kMaxBytesPerFrameForFecHigh = 1000 };
+};
+
+
+class VCMNackFecMethod : public VCMFecMethod
+{
+public:
+ VCMNackFecMethod(int64_t lowRttNackThresholdMs,
+ int64_t highRttNackThresholdMs);
+ virtual ~VCMNackFecMethod();
+ virtual bool UpdateParameters(const VCMProtectionParameters* parameters);
+ // Get the effective packet loss for ER
+ bool EffectivePacketLoss(const VCMProtectionParameters* parameters);
+ // Get the protection factors
+ bool ProtectionFactor(const VCMProtectionParameters* parameters);
+ // Get the max number of frames the FEC is allowed to be based on.
+ int MaxFramesFec() const;
+ // Turn off the FEC based on low bitrate and other factors.
+ bool BitRateTooLowForFec(const VCMProtectionParameters* parameters);
+private:
+ int ComputeMaxFramesFec(const VCMProtectionParameters* parameters);
+
+ int64_t _lowRttNackMs;
+ int64_t _highRttNackMs;
+ int _maxFramesFec;
+};
+
+class VCMLossProtectionLogic
+{
+public:
+ VCMLossProtectionLogic(int64_t nowMs);
+ ~VCMLossProtectionLogic();
+
+ // Set the protection method to be used
+ //
+ // Input:
+ // - newMethodType : New requested protection method type. If one
+ // is already set, it will be deleted and replaced
+ void SetMethod(VCMProtectionMethodEnum newMethodType);
+
+ // Update the round-trip time
+ //
+ // Input:
+ // - rtt : Round-trip time in seconds.
+ void UpdateRtt(int64_t rtt);
+
+ // Update the filtered packet loss.
+ //
+ // Input:
+ // - packetLossEnc : The reported packet loss filtered
+ // (max window or average)
+ void UpdateFilteredLossPr(uint8_t packetLossEnc);
+
+ // Update the current target bit rate.
+ //
+ // Input:
+ // - bitRate : The current target bit rate in kbits/s
+ void UpdateBitRate(float bitRate);
+
+ // Update the number of packets per frame estimate, for delta frames
+ //
+ // Input:
+ // - nPackets : Number of packets in the latest sent frame.
+ void UpdatePacketsPerFrame(float nPackets, int64_t nowMs);
+
+ // Update the number of packets per frame estimate, for key frames
+ //
+ // Input:
+ // - nPackets : umber of packets in the latest sent frame.
+ void UpdatePacketsPerFrameKey(float nPackets, int64_t nowMs);
+
+ // Update the keyFrameSize estimate
+ //
+ // Input:
+ // - keyFrameSize : The size of the latest sent key frame.
+ void UpdateKeyFrameSize(float keyFrameSize);
+
+ // Update the frame rate
+ //
+ // Input:
+ // - frameRate : The current target frame rate.
+ void UpdateFrameRate(float frameRate) { _frameRate = frameRate; }
+
+ // Update the frame size
+ //
+ // Input:
+ // - width : The codec frame width.
+ // - height : The codec frame height.
+ void UpdateFrameSize(uint16_t width, uint16_t height);
+
+ // Update the number of active layers
+ //
+ // Input:
+ // - numLayers : Number of layers used.
+ void UpdateNumLayers(int numLayers);
+
+ // The amount of packet loss to cover for with FEC.
+ //
+ // Input:
+ // - fecRateKey : Packet loss to cover for with FEC when
+ // sending key frames.
+ // - fecRateDelta : Packet loss to cover for with FEC when
+ // sending delta frames.
+ void UpdateFECRates(uint8_t fecRateKey, uint8_t fecRateDelta)
+ { _fecRateKey = fecRateKey;
+ _fecRateDelta = fecRateDelta; }
+
+ // Update the protection methods with the current VCMProtectionParameters
+ // and set the requested protection settings.
+ // Return value : Returns true on update
+ bool UpdateMethod();
+
+ // Returns the method currently selected.
+ //
+ // Return value : The protection method currently selected.
+ VCMProtectionMethod* SelectedMethod() const;
+
+ // Return the protection type of the currently selected method
+ VCMProtectionMethodEnum SelectedType() const;
+
+ // Updates the filtered loss for the average and max window packet loss,
+ // and returns the filtered loss probability in the interval [0, 255].
+ // The returned filtered loss value depends on the parameter |filter_mode|.
+ // The input parameter |lossPr255| is the received packet loss.
+
+ // Return value : The filtered loss probability
+ uint8_t FilteredLoss(int64_t nowMs, FilterPacketLossMode filter_mode,
+ uint8_t lossPr255);
+
+ void Reset(int64_t nowMs);
+
+ void Release();
+
+private:
+ // Sets the available loss protection methods.
+ void UpdateMaxLossHistory(uint8_t lossPr255, int64_t now);
+ uint8_t MaxFilteredLossPr(int64_t nowMs) const;
+ rtc::scoped_ptr<VCMProtectionMethod> _selectedMethod;
+ VCMProtectionParameters _currentParameters;
+ int64_t _rtt;
+ float _lossPr;
+ float _bitRate;
+ float _frameRate;
+ float _keyFrameSize;
+ uint8_t _fecRateKey;
+ uint8_t _fecRateDelta;
+ int64_t _lastPrUpdateT;
+ int64_t _lastPacketPerFrameUpdateT;
+ int64_t _lastPacketPerFrameUpdateTKey;
+ rtc::ExpFilter _lossPr255;
+ VCMLossProbabilitySample _lossPrHistory[kLossPrHistorySize];
+ uint8_t _shortMaxLossPr255;
+ rtc::ExpFilter _packetsPerFrame;
+ rtc::ExpFilter _packetsPerFrameKey;
+ uint16_t _codecWidth;
+ uint16_t _codecHeight;
+ int _numLayers;
+};
+
+} // namespace media_optimization
+} // namespace webrtc
+
+#endif // WEBRTC_MODULES_VIDEO_CODING_MEDIA_OPT_UTIL_H_
diff --git a/webrtc/modules/video_coding/main/source/media_optimization.cc b/webrtc/modules/video_coding/main/source/media_optimization.cc
new file mode 100644
index 0000000000..cc73d3803d
--- /dev/null
+++ b/webrtc/modules/video_coding/main/source/media_optimization.cc
@@ -0,0 +1,648 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "webrtc/modules/video_coding/main/source/media_optimization.h"
+
+#include "webrtc/modules/video_coding/main/source/content_metrics_processing.h"
+#include "webrtc/modules/video_coding/main/source/qm_select.h"
+#include "webrtc/modules/video_coding/utility/include/frame_dropper.h"
+#include "webrtc/system_wrappers/include/clock.h"
+#include "webrtc/system_wrappers/include/logging.h"
+
+namespace webrtc {
+namespace media_optimization {
+namespace {
+void UpdateProtectionCallback(
+ VCMProtectionMethod* selected_method,
+ uint32_t* video_rate_bps,
+ uint32_t* nack_overhead_rate_bps,
+ uint32_t* fec_overhead_rate_bps,
+ VCMProtectionCallback* video_protection_callback) {
+ FecProtectionParams delta_fec_params;
+ FecProtectionParams key_fec_params;
+ // Get the FEC code rate for Key frames (set to 0 when NA).
+ key_fec_params.fec_rate = selected_method->RequiredProtectionFactorK();
+
+ // Get the FEC code rate for Delta frames (set to 0 when NA).
+ delta_fec_params.fec_rate = selected_method->RequiredProtectionFactorD();
+
+ // Get the FEC-UEP protection status for Key frames: UEP on/off.
+ key_fec_params.use_uep_protection = selected_method->RequiredUepProtectionK();
+
+ // Get the FEC-UEP protection status for Delta frames: UEP on/off.
+ delta_fec_params.use_uep_protection =
+ selected_method->RequiredUepProtectionD();
+
+ // The RTP module currently requires the same |max_fec_frames| for both
+ // key and delta frames.
+ delta_fec_params.max_fec_frames = selected_method->MaxFramesFec();
+ key_fec_params.max_fec_frames = selected_method->MaxFramesFec();
+
+ // Set the FEC packet mask type. |kFecMaskBursty| is more effective for
+ // consecutive losses and little/no packet re-ordering. As we currently
+ // do not have feedback data on the degree of correlated losses and packet
+ // re-ordering, we keep default setting to |kFecMaskRandom| for now.
+ delta_fec_params.fec_mask_type = kFecMaskRandom;
+ key_fec_params.fec_mask_type = kFecMaskRandom;
+
+ // TODO(Marco): Pass FEC protection values per layer.
+ video_protection_callback->ProtectionRequest(&delta_fec_params,
+ &key_fec_params,
+ video_rate_bps,
+ nack_overhead_rate_bps,
+ fec_overhead_rate_bps);
+}
+} // namespace
+
+struct MediaOptimization::EncodedFrameSample {
+ EncodedFrameSample(size_t size_bytes,
+ uint32_t timestamp,
+ int64_t time_complete_ms)
+ : size_bytes(size_bytes),
+ timestamp(timestamp),
+ time_complete_ms(time_complete_ms) {}
+
+ size_t size_bytes;
+ uint32_t timestamp;
+ int64_t time_complete_ms;
+};
+
+MediaOptimization::MediaOptimization(Clock* clock)
+ : crit_sect_(CriticalSectionWrapper::CreateCriticalSection()),
+ clock_(clock),
+ max_bit_rate_(0),
+ send_codec_type_(kVideoCodecUnknown),
+ codec_width_(0),
+ codec_height_(0),
+ user_frame_rate_(0),
+ frame_dropper_(new FrameDropper),
+ loss_prot_logic_(
+ new VCMLossProtectionLogic(clock_->TimeInMilliseconds())),
+ fraction_lost_(0),
+ send_statistics_zero_encode_(0),
+ max_payload_size_(1460),
+ video_target_bitrate_(0),
+ incoming_frame_rate_(0),
+ enable_qm_(false),
+ encoded_frame_samples_(),
+ avg_sent_bit_rate_bps_(0),
+ avg_sent_framerate_(0),
+ key_frame_cnt_(0),
+ delta_frame_cnt_(0),
+ content_(new VCMContentMetricsProcessing()),
+ qm_resolution_(new VCMQmResolution()),
+ last_qm_update_time_(0),
+ last_change_time_(0),
+ num_layers_(0),
+ suspension_enabled_(false),
+ video_suspended_(false),
+ suspension_threshold_bps_(0),
+ suspension_window_bps_(0) {
+ memset(send_statistics_, 0, sizeof(send_statistics_));
+ memset(incoming_frame_times_, -1, sizeof(incoming_frame_times_));
+}
+
+MediaOptimization::~MediaOptimization(void) {
+ loss_prot_logic_->Release();
+}
+
+void MediaOptimization::Reset() {
+ CriticalSectionScoped lock(crit_sect_.get());
+ SetEncodingDataInternal(
+ kVideoCodecUnknown, 0, 0, 0, 0, 0, 0, max_payload_size_);
+ memset(incoming_frame_times_, -1, sizeof(incoming_frame_times_));
+ incoming_frame_rate_ = 0.0;
+ frame_dropper_->Reset();
+ loss_prot_logic_->Reset(clock_->TimeInMilliseconds());
+ frame_dropper_->SetRates(0, 0);
+ content_->Reset();
+ qm_resolution_->Reset();
+ loss_prot_logic_->UpdateFrameRate(incoming_frame_rate_);
+ loss_prot_logic_->Reset(clock_->TimeInMilliseconds());
+ send_statistics_zero_encode_ = 0;
+ video_target_bitrate_ = 0;
+ codec_width_ = 0;
+ codec_height_ = 0;
+ user_frame_rate_ = 0;
+ key_frame_cnt_ = 0;
+ delta_frame_cnt_ = 0;
+ last_qm_update_time_ = 0;
+ last_change_time_ = 0;
+ encoded_frame_samples_.clear();
+ avg_sent_bit_rate_bps_ = 0;
+ num_layers_ = 1;
+}
+
+void MediaOptimization::SetEncodingData(VideoCodecType send_codec_type,
+ int32_t max_bit_rate,
+ uint32_t target_bitrate,
+ uint16_t width,
+ uint16_t height,
+ uint32_t frame_rate,
+ int num_layers,
+ int32_t mtu) {
+ CriticalSectionScoped lock(crit_sect_.get());
+ SetEncodingDataInternal(send_codec_type,
+ max_bit_rate,
+ frame_rate,
+ target_bitrate,
+ width,
+ height,
+ num_layers,
+ mtu);
+}
+
+void MediaOptimization::SetEncodingDataInternal(VideoCodecType send_codec_type,
+ int32_t max_bit_rate,
+ uint32_t frame_rate,
+ uint32_t target_bitrate,
+ uint16_t width,
+ uint16_t height,
+ int num_layers,
+ int32_t mtu) {
+ // Everything codec specific should be reset here since this means the codec
+ // has changed. If native dimension values have changed, then either user
+ // initiated change, or QM initiated change. Will be able to determine only
+ // after the processing of the first frame.
+ last_change_time_ = clock_->TimeInMilliseconds();
+ content_->Reset();
+ content_->UpdateFrameRate(frame_rate);
+
+ max_bit_rate_ = max_bit_rate;
+ send_codec_type_ = send_codec_type;
+ video_target_bitrate_ = target_bitrate;
+ float target_bitrate_kbps = static_cast<float>(target_bitrate) / 1000.0f;
+ loss_prot_logic_->UpdateBitRate(target_bitrate_kbps);
+ loss_prot_logic_->UpdateFrameRate(static_cast<float>(frame_rate));
+ loss_prot_logic_->UpdateFrameSize(width, height);
+ loss_prot_logic_->UpdateNumLayers(num_layers);
+ frame_dropper_->Reset();
+ frame_dropper_->SetRates(target_bitrate_kbps, static_cast<float>(frame_rate));
+ user_frame_rate_ = static_cast<float>(frame_rate);
+ codec_width_ = width;
+ codec_height_ = height;
+ num_layers_ = (num_layers <= 1) ? 1 : num_layers; // Can also be zero.
+ max_payload_size_ = mtu;
+ qm_resolution_->Initialize(target_bitrate_kbps,
+ user_frame_rate_,
+ codec_width_,
+ codec_height_,
+ num_layers_);
+}
+
+uint32_t MediaOptimization::SetTargetRates(
+ uint32_t target_bitrate,
+ uint8_t fraction_lost,
+ int64_t round_trip_time_ms,
+ VCMProtectionCallback* protection_callback,
+ VCMQMSettingsCallback* qmsettings_callback) {
+ CriticalSectionScoped lock(crit_sect_.get());
+ VCMProtectionMethod* selected_method = loss_prot_logic_->SelectedMethod();
+ float target_bitrate_kbps = static_cast<float>(target_bitrate) / 1000.0f;
+ loss_prot_logic_->UpdateBitRate(target_bitrate_kbps);
+ loss_prot_logic_->UpdateRtt(round_trip_time_ms);
+
+ // Get frame rate for encoder: this is the actual/sent frame rate.
+ float actual_frame_rate = SentFrameRateInternal();
+
+ // Sanity check.
+ if (actual_frame_rate < 1.0) {
+ actual_frame_rate = 1.0;
+ }
+
+ // Update frame rate for the loss protection logic class: frame rate should
+ // be the actual/sent rate.
+ loss_prot_logic_->UpdateFrameRate(actual_frame_rate);
+
+ fraction_lost_ = fraction_lost;
+
+ // Returns the filtered packet loss, used for the protection setting.
+ // The filtered loss may be the received loss (no filter), or some
+ // filtered value (average or max window filter).
+ // Use max window filter for now.
+ FilterPacketLossMode filter_mode = kMaxFilter;
+ uint8_t packet_loss_enc = loss_prot_logic_->FilteredLoss(
+ clock_->TimeInMilliseconds(), filter_mode, fraction_lost);
+
+ // For now use the filtered loss for computing the robustness settings.
+ loss_prot_logic_->UpdateFilteredLossPr(packet_loss_enc);
+
+ // Rate cost of the protection methods.
+ float protection_overhead_rate = 0.0f;
+
+ // Update protection settings, when applicable.
+ float sent_video_rate_kbps = 0.0f;
+ if (loss_prot_logic_->SelectedType() != kNone) {
+ // Update protection method with content metrics.
+ selected_method->UpdateContentMetrics(content_->ShortTermAvgData());
+
+ // Update method will compute the robustness settings for the given
+ // protection method and the overhead cost
+ // the protection method is set by the user via SetVideoProtection.
+ loss_prot_logic_->UpdateMethod();
+
+ // Update protection callback with protection settings.
+ uint32_t sent_video_rate_bps = 0;
+ uint32_t sent_nack_rate_bps = 0;
+ uint32_t sent_fec_rate_bps = 0;
+ // Get the bit cost of protection method, based on the amount of
+ // overhead data actually transmitted (including headers) the last
+ // second.
+ if (protection_callback) {
+ UpdateProtectionCallback(selected_method,
+ &sent_video_rate_bps,
+ &sent_nack_rate_bps,
+ &sent_fec_rate_bps,
+ protection_callback);
+ }
+ uint32_t sent_total_rate_bps =
+ sent_video_rate_bps + sent_nack_rate_bps + sent_fec_rate_bps;
+ // Estimate the overhead costs of the next second as staying the same
+ // wrt the source bitrate.
+ if (sent_total_rate_bps > 0) {
+ protection_overhead_rate =
+ static_cast<float>(sent_nack_rate_bps + sent_fec_rate_bps) /
+ sent_total_rate_bps;
+ }
+ // Cap the overhead estimate to 50%.
+ if (protection_overhead_rate > 0.5)
+ protection_overhead_rate = 0.5;
+
+ // Get the effective packet loss for encoder ER when applicable. Should be
+ // passed to encoder via fraction_lost.
+ packet_loss_enc = selected_method->RequiredPacketLossER();
+ sent_video_rate_kbps = static_cast<float>(sent_video_rate_bps) / 1000.0f;
+ }
+
+ // Source coding rate: total rate - protection overhead.
+ video_target_bitrate_ = target_bitrate * (1.0 - protection_overhead_rate);
+
+ // Cap target video bitrate to codec maximum.
+ if (max_bit_rate_ > 0 && video_target_bitrate_ > max_bit_rate_) {
+ video_target_bitrate_ = max_bit_rate_;
+ }
+
+ // Update encoding rates following protection settings.
+ float target_video_bitrate_kbps =
+ static_cast<float>(video_target_bitrate_) / 1000.0f;
+ frame_dropper_->SetRates(target_video_bitrate_kbps, incoming_frame_rate_);
+
+ if (enable_qm_ && qmsettings_callback) {
+ // Update QM with rates.
+ qm_resolution_->UpdateRates(target_video_bitrate_kbps,
+ sent_video_rate_kbps,
+ incoming_frame_rate_,
+ fraction_lost_);
+ // Check for QM selection.
+ bool select_qm = CheckStatusForQMchange();
+ if (select_qm) {
+ SelectQuality(qmsettings_callback);
+ }
+ // Reset the short-term averaged content data.
+ content_->ResetShortTermAvgData();
+ }
+
+ CheckSuspendConditions();
+
+ return video_target_bitrate_;
+}
+
+void MediaOptimization::SetProtectionMethod(VCMProtectionMethodEnum method) {
+ CriticalSectionScoped lock(crit_sect_.get());
+ loss_prot_logic_->SetMethod(method);
+}
+
+uint32_t MediaOptimization::InputFrameRate() {
+ CriticalSectionScoped lock(crit_sect_.get());
+ return InputFrameRateInternal();
+}
+
+uint32_t MediaOptimization::InputFrameRateInternal() {
+ ProcessIncomingFrameRate(clock_->TimeInMilliseconds());
+ return uint32_t(incoming_frame_rate_ + 0.5f);
+}
+
+uint32_t MediaOptimization::SentFrameRate() {
+ CriticalSectionScoped lock(crit_sect_.get());
+ return SentFrameRateInternal();
+}
+
+uint32_t MediaOptimization::SentFrameRateInternal() {
+ PurgeOldFrameSamples(clock_->TimeInMilliseconds());
+ UpdateSentFramerate();
+ return avg_sent_framerate_;
+}
+
+uint32_t MediaOptimization::SentBitRate() {
+ CriticalSectionScoped lock(crit_sect_.get());
+ const int64_t now_ms = clock_->TimeInMilliseconds();
+ PurgeOldFrameSamples(now_ms);
+ UpdateSentBitrate(now_ms);
+ return avg_sent_bit_rate_bps_;
+}
+
+int32_t MediaOptimization::UpdateWithEncodedData(
+ const EncodedImage& encoded_image) {
+ size_t encoded_length = encoded_image._length;
+ uint32_t timestamp = encoded_image._timeStamp;
+ CriticalSectionScoped lock(crit_sect_.get());
+ const int64_t now_ms = clock_->TimeInMilliseconds();
+ PurgeOldFrameSamples(now_ms);
+ if (encoded_frame_samples_.size() > 0 &&
+ encoded_frame_samples_.back().timestamp == timestamp) {
+ // Frames having the same timestamp are generated from the same input
+ // frame. We don't want to double count them, but only increment the
+ // size_bytes.
+ encoded_frame_samples_.back().size_bytes += encoded_length;
+ encoded_frame_samples_.back().time_complete_ms = now_ms;
+ } else {
+ encoded_frame_samples_.push_back(
+ EncodedFrameSample(encoded_length, timestamp, now_ms));
+ }
+ UpdateSentBitrate(now_ms);
+ UpdateSentFramerate();
+ if (encoded_length > 0) {
+ const bool delta_frame = encoded_image._frameType != kVideoFrameKey;
+
+ frame_dropper_->Fill(encoded_length, delta_frame);
+ if (max_payload_size_ > 0 && encoded_length > 0) {
+ const float min_packets_per_frame =
+ encoded_length / static_cast<float>(max_payload_size_);
+ if (delta_frame) {
+ loss_prot_logic_->UpdatePacketsPerFrame(min_packets_per_frame,
+ clock_->TimeInMilliseconds());
+ } else {
+ loss_prot_logic_->UpdatePacketsPerFrameKey(
+ min_packets_per_frame, clock_->TimeInMilliseconds());
+ }
+
+ if (enable_qm_) {
+ // Update quality select with encoded length.
+ qm_resolution_->UpdateEncodedSize(encoded_length);
+ }
+ }
+ if (!delta_frame && encoded_length > 0) {
+ loss_prot_logic_->UpdateKeyFrameSize(static_cast<float>(encoded_length));
+ }
+
+ // Updating counters.
+ if (delta_frame) {
+ delta_frame_cnt_++;
+ } else {
+ key_frame_cnt_++;
+ }
+ }
+
+ return VCM_OK;
+}
+
+void MediaOptimization::EnableQM(bool enable) {
+ CriticalSectionScoped lock(crit_sect_.get());
+ enable_qm_ = enable;
+}
+
+void MediaOptimization::EnableFrameDropper(bool enable) {
+ CriticalSectionScoped lock(crit_sect_.get());
+ frame_dropper_->Enable(enable);
+}
+
+void MediaOptimization::SuspendBelowMinBitrate(int threshold_bps,
+ int window_bps) {
+ CriticalSectionScoped lock(crit_sect_.get());
+ assert(threshold_bps > 0 && window_bps >= 0);
+ suspension_threshold_bps_ = threshold_bps;
+ suspension_window_bps_ = window_bps;
+ suspension_enabled_ = true;
+ video_suspended_ = false;
+}
+
+bool MediaOptimization::IsVideoSuspended() const {
+ CriticalSectionScoped lock(crit_sect_.get());
+ return video_suspended_;
+}
+
+bool MediaOptimization::DropFrame() {
+ CriticalSectionScoped lock(crit_sect_.get());
+ UpdateIncomingFrameRate();
+ // Leak appropriate number of bytes.
+ frame_dropper_->Leak((uint32_t)(InputFrameRateInternal() + 0.5f));
+ if (video_suspended_) {
+ return true; // Drop all frames when muted.
+ }
+ return frame_dropper_->DropFrame();
+}
+
+void MediaOptimization::UpdateContentData(
+ const VideoContentMetrics* content_metrics) {
+ CriticalSectionScoped lock(crit_sect_.get());
+ // Updating content metrics.
+ if (content_metrics == NULL) {
+ // Disable QM if metrics are NULL.
+ enable_qm_ = false;
+ qm_resolution_->Reset();
+ } else {
+ content_->UpdateContentData(content_metrics);
+ }
+}
+
+void MediaOptimization::UpdateIncomingFrameRate() {
+ int64_t now = clock_->TimeInMilliseconds();
+ if (incoming_frame_times_[0] == 0) {
+ // No shifting if this is the first time.
+ } else {
+ // Shift all times one step.
+ for (int32_t i = (kFrameCountHistorySize - 2); i >= 0; i--) {
+ incoming_frame_times_[i + 1] = incoming_frame_times_[i];
+ }
+ }
+ incoming_frame_times_[0] = now;
+ ProcessIncomingFrameRate(now);
+}
+
+int32_t MediaOptimization::SelectQuality(
+ VCMQMSettingsCallback* video_qmsettings_callback) {
+ // Reset quantities for QM select.
+ qm_resolution_->ResetQM();
+
+ // Update QM will long-term averaged content metrics.
+ qm_resolution_->UpdateContent(content_->LongTermAvgData());
+
+ // Select quality mode.
+ VCMResolutionScale* qm = NULL;
+ int32_t ret = qm_resolution_->SelectResolution(&qm);
+ if (ret < 0) {
+ return ret;
+ }
+
+ // Check for updates to spatial/temporal modes.
+ QMUpdate(qm, video_qmsettings_callback);
+
+ // Reset all the rate and related frame counters quantities.
+ qm_resolution_->ResetRates();
+
+ // Reset counters.
+ last_qm_update_time_ = clock_->TimeInMilliseconds();
+
+ // Reset content metrics.
+ content_->Reset();
+
+ return VCM_OK;
+}
+
+void MediaOptimization::PurgeOldFrameSamples(int64_t now_ms) {
+ while (!encoded_frame_samples_.empty()) {
+ if (now_ms - encoded_frame_samples_.front().time_complete_ms >
+ kBitrateAverageWinMs) {
+ encoded_frame_samples_.pop_front();
+ } else {
+ break;
+ }
+ }
+}
+
+void MediaOptimization::UpdateSentBitrate(int64_t now_ms) {
+ if (encoded_frame_samples_.empty()) {
+ avg_sent_bit_rate_bps_ = 0;
+ return;
+ }
+ size_t framesize_sum = 0;
+ for (FrameSampleList::iterator it = encoded_frame_samples_.begin();
+ it != encoded_frame_samples_.end();
+ ++it) {
+ framesize_sum += it->size_bytes;
+ }
+ float denom = static_cast<float>(
+ now_ms - encoded_frame_samples_.front().time_complete_ms);
+ if (denom >= 1.0f) {
+ avg_sent_bit_rate_bps_ =
+ static_cast<uint32_t>(framesize_sum * 8.0f * 1000.0f / denom + 0.5f);
+ } else {
+ avg_sent_bit_rate_bps_ = framesize_sum * 8;
+ }
+}
+
+void MediaOptimization::UpdateSentFramerate() {
+ if (encoded_frame_samples_.size() <= 1) {
+ avg_sent_framerate_ = encoded_frame_samples_.size();
+ return;
+ }
+ int denom = encoded_frame_samples_.back().timestamp -
+ encoded_frame_samples_.front().timestamp;
+ if (denom > 0) {
+ avg_sent_framerate_ =
+ (90000 * (encoded_frame_samples_.size() - 1) + denom / 2) / denom;
+ } else {
+ avg_sent_framerate_ = encoded_frame_samples_.size();
+ }
+}
+
+bool MediaOptimization::QMUpdate(
+ VCMResolutionScale* qm,
+ VCMQMSettingsCallback* video_qmsettings_callback) {
+ // Check for no change.
+ if (!qm->change_resolution_spatial && !qm->change_resolution_temporal) {
+ return false;
+ }
+
+ // Check for change in frame rate.
+ if (qm->change_resolution_temporal) {
+ incoming_frame_rate_ = qm->frame_rate;
+ // Reset frame rate estimate.
+ memset(incoming_frame_times_, -1, sizeof(incoming_frame_times_));
+ }
+
+ // Check for change in frame size.
+ if (qm->change_resolution_spatial) {
+ codec_width_ = qm->codec_width;
+ codec_height_ = qm->codec_height;
+ }
+
+ LOG(LS_INFO) << "Media optimizer requests the video resolution to be changed "
+ "to " << qm->codec_width << "x" << qm->codec_height << "@"
+ << qm->frame_rate;
+
+ // Update VPM with new target frame rate and frame size.
+ // Note: use |qm->frame_rate| instead of |_incoming_frame_rate| for updating
+ // target frame rate in VPM frame dropper. The quantity |_incoming_frame_rate|
+ // will vary/fluctuate, and since we don't want to change the state of the
+ // VPM frame dropper, unless a temporal action was selected, we use the
+ // quantity |qm->frame_rate| for updating.
+ video_qmsettings_callback->SetVideoQMSettings(
+ qm->frame_rate, codec_width_, codec_height_);
+ content_->UpdateFrameRate(qm->frame_rate);
+ qm_resolution_->UpdateCodecParameters(
+ qm->frame_rate, codec_width_, codec_height_);
+ return true;
+}
+
+// Check timing constraints and look for significant change in:
+// (1) scene content,
+// (2) target bit rate.
+bool MediaOptimization::CheckStatusForQMchange() {
+ bool status = true;
+
+ // Check that we do not call QMSelect too often, and that we waited some time
+ // (to sample the metrics) from the event last_change_time
+ // last_change_time is the time where user changed the size/rate/frame rate
+ // (via SetEncodingData).
+ int64_t now = clock_->TimeInMilliseconds();
+ if ((now - last_qm_update_time_) < kQmMinIntervalMs ||
+ (now - last_change_time_) < kQmMinIntervalMs) {
+ status = false;
+ }
+
+ return status;
+}
+
+// Allowing VCM to keep track of incoming frame rate.
+void MediaOptimization::ProcessIncomingFrameRate(int64_t now) {
+ int32_t num = 0;
+ int32_t nr_of_frames = 0;
+ for (num = 1; num < (kFrameCountHistorySize - 1); ++num) {
+ if (incoming_frame_times_[num] <= 0 ||
+ // don't use data older than 2 s
+ now - incoming_frame_times_[num] > kFrameHistoryWinMs) {
+ break;
+ } else {
+ nr_of_frames++;
+ }
+ }
+ if (num > 1) {
+ const int64_t diff =
+ incoming_frame_times_[0] - incoming_frame_times_[num - 1];
+ incoming_frame_rate_ = 0.0; // No frame rate estimate available.
+ if (diff > 0) {
+ incoming_frame_rate_ = nr_of_frames * 1000.0f / static_cast<float>(diff);
+ }
+ }
+}
+
+void MediaOptimization::CheckSuspendConditions() {
+ // Check conditions for SuspendBelowMinBitrate. |video_target_bitrate_| is in
+ // bps.
+ if (suspension_enabled_) {
+ if (!video_suspended_) {
+ // Check if we just went below the threshold.
+ if (video_target_bitrate_ < suspension_threshold_bps_) {
+ video_suspended_ = true;
+ }
+ } else {
+ // Video is already suspended. Check if we just went over the threshold
+ // with a margin.
+ if (video_target_bitrate_ >
+ suspension_threshold_bps_ + suspension_window_bps_) {
+ video_suspended_ = false;
+ }
+ }
+ }
+}
+
+} // namespace media_optimization
+} // namespace webrtc
diff --git a/webrtc/modules/video_coding/main/source/media_optimization.h b/webrtc/modules/video_coding/main/source/media_optimization.h
new file mode 100644
index 0000000000..c4feeff743
--- /dev/null
+++ b/webrtc/modules/video_coding/main/source/media_optimization.h
@@ -0,0 +1,180 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef WEBRTC_MODULES_VIDEO_CODING_MAIN_SOURCE_MEDIA_OPTIMIZATION_H_
+#define WEBRTC_MODULES_VIDEO_CODING_MAIN_SOURCE_MEDIA_OPTIMIZATION_H_
+
+#include <list>
+
+#include "webrtc/base/scoped_ptr.h"
+#include "webrtc/modules/interface/module_common_types.h"
+#include "webrtc/modules/video_coding/main/interface/video_coding.h"
+#include "webrtc/modules/video_coding/main/source/media_opt_util.h"
+#include "webrtc/modules/video_coding/main/source/qm_select.h"
+#include "webrtc/system_wrappers/include/critical_section_wrapper.h"
+
+namespace webrtc {
+
+// Forward declarations.
+class Clock;
+class FrameDropper;
+class VCMContentMetricsProcessing;
+
+namespace media_optimization {
+
+class MediaOptimization {
+ public:
+ explicit MediaOptimization(Clock* clock);
+ ~MediaOptimization();
+
+ // TODO(andresp): Can Reset and SetEncodingData be done at construction time
+ // only?
+ void Reset();
+
+ // Informs media optimization of initial encoding state.
+ void SetEncodingData(VideoCodecType send_codec_type,
+ int32_t max_bit_rate,
+ uint32_t bit_rate,
+ uint16_t width,
+ uint16_t height,
+ uint32_t frame_rate,
+ int num_temporal_layers,
+ int32_t mtu);
+
+ // Sets target rates for the encoder given the channel parameters.
+ // Inputs: target bitrate - the encoder target bitrate in bits/s.
+ // fraction_lost - packet loss rate in % in the network.
+ // round_trip_time_ms - round trip time in milliseconds.
+ // min_bit_rate - the bit rate of the end-point with lowest rate.
+ // max_bit_rate - the bit rate of the end-point with highest rate.
+ // TODO(andresp): Find if the callbacks can be triggered only after releasing
+ // an internal critical section.
+ uint32_t SetTargetRates(uint32_t target_bitrate,
+ uint8_t fraction_lost,
+ int64_t round_trip_time_ms,
+ VCMProtectionCallback* protection_callback,
+ VCMQMSettingsCallback* qmsettings_callback);
+
+ void SetProtectionMethod(VCMProtectionMethodEnum method);
+ void EnableQM(bool enable);
+ void EnableFrameDropper(bool enable);
+
+ // Lets the sender suspend video when the rate drops below
+ // |threshold_bps|, and turns back on when the rate goes back up above
+ // |threshold_bps| + |window_bps|.
+ void SuspendBelowMinBitrate(int threshold_bps, int window_bps);
+ bool IsVideoSuspended() const;
+
+ bool DropFrame();
+
+ void UpdateContentData(const VideoContentMetrics* content_metrics);
+
+ // Informs Media Optimization of encoded output.
+ int32_t UpdateWithEncodedData(const EncodedImage& encoded_image);
+
+ // InputFrameRate 0 = no frame rate estimate available.
+ uint32_t InputFrameRate();
+ uint32_t SentFrameRate();
+ uint32_t SentBitRate();
+
+ private:
+ enum {
+ kFrameCountHistorySize = 90
+ };
+ enum {
+ kFrameHistoryWinMs = 2000
+ };
+ enum {
+ kBitrateAverageWinMs = 1000
+ };
+
+ struct EncodedFrameSample;
+ typedef std::list<EncodedFrameSample> FrameSampleList;
+
+ void UpdateIncomingFrameRate() EXCLUSIVE_LOCKS_REQUIRED(crit_sect_);
+ void PurgeOldFrameSamples(int64_t now_ms)
+ EXCLUSIVE_LOCKS_REQUIRED(crit_sect_);
+ void UpdateSentBitrate(int64_t now_ms) EXCLUSIVE_LOCKS_REQUIRED(crit_sect_);
+ void UpdateSentFramerate() EXCLUSIVE_LOCKS_REQUIRED(crit_sect_);
+
+ // Computes new Quality Mode.
+ int32_t SelectQuality(VCMQMSettingsCallback* qmsettings_callback)
+ EXCLUSIVE_LOCKS_REQUIRED(crit_sect_);
+
+ // Verifies if QM settings differ from default, i.e. if an update is required.
+ // Computes actual values, as will be sent to the encoder.
+ bool QMUpdate(VCMResolutionScale* qm,
+ VCMQMSettingsCallback* qmsettings_callback)
+ EXCLUSIVE_LOCKS_REQUIRED(crit_sect_);
+
+ // Checks if we should make a QM change. Return true if yes, false otherwise.
+ bool CheckStatusForQMchange() EXCLUSIVE_LOCKS_REQUIRED(crit_sect_);
+
+ void ProcessIncomingFrameRate(int64_t now)
+ EXCLUSIVE_LOCKS_REQUIRED(crit_sect_);
+
+ // Checks conditions for suspending the video. The method compares
+ // |video_target_bitrate_| with the threshold values for suspension, and
+ // changes the state of |video_suspended_| accordingly.
+ void CheckSuspendConditions() EXCLUSIVE_LOCKS_REQUIRED(crit_sect_);
+
+ void SetEncodingDataInternal(VideoCodecType send_codec_type,
+ int32_t max_bit_rate,
+ uint32_t frame_rate,
+ uint32_t bit_rate,
+ uint16_t width,
+ uint16_t height,
+ int num_temporal_layers,
+ int32_t mtu)
+ EXCLUSIVE_LOCKS_REQUIRED(crit_sect_);
+
+ uint32_t InputFrameRateInternal() EXCLUSIVE_LOCKS_REQUIRED(crit_sect_);
+
+ uint32_t SentFrameRateInternal() EXCLUSIVE_LOCKS_REQUIRED(crit_sect_);
+
+ // Protect all members.
+ rtc::scoped_ptr<CriticalSectionWrapper> crit_sect_;
+
+ Clock* clock_ GUARDED_BY(crit_sect_);
+ int32_t max_bit_rate_ GUARDED_BY(crit_sect_);
+ VideoCodecType send_codec_type_ GUARDED_BY(crit_sect_);
+ uint16_t codec_width_ GUARDED_BY(crit_sect_);
+ uint16_t codec_height_ GUARDED_BY(crit_sect_);
+ float user_frame_rate_ GUARDED_BY(crit_sect_);
+ rtc::scoped_ptr<FrameDropper> frame_dropper_ GUARDED_BY(crit_sect_);
+ rtc::scoped_ptr<VCMLossProtectionLogic> loss_prot_logic_
+ GUARDED_BY(crit_sect_);
+ uint8_t fraction_lost_ GUARDED_BY(crit_sect_);
+ uint32_t send_statistics_[4] GUARDED_BY(crit_sect_);
+ uint32_t send_statistics_zero_encode_ GUARDED_BY(crit_sect_);
+ int32_t max_payload_size_ GUARDED_BY(crit_sect_);
+ int video_target_bitrate_ GUARDED_BY(crit_sect_);
+ float incoming_frame_rate_ GUARDED_BY(crit_sect_);
+ int64_t incoming_frame_times_[kFrameCountHistorySize] GUARDED_BY(crit_sect_);
+ bool enable_qm_ GUARDED_BY(crit_sect_);
+ std::list<EncodedFrameSample> encoded_frame_samples_ GUARDED_BY(crit_sect_);
+ uint32_t avg_sent_bit_rate_bps_ GUARDED_BY(crit_sect_);
+ uint32_t avg_sent_framerate_ GUARDED_BY(crit_sect_);
+ uint32_t key_frame_cnt_ GUARDED_BY(crit_sect_);
+ uint32_t delta_frame_cnt_ GUARDED_BY(crit_sect_);
+ rtc::scoped_ptr<VCMContentMetricsProcessing> content_ GUARDED_BY(crit_sect_);
+ rtc::scoped_ptr<VCMQmResolution> qm_resolution_ GUARDED_BY(crit_sect_);
+ int64_t last_qm_update_time_ GUARDED_BY(crit_sect_);
+ int64_t last_change_time_ GUARDED_BY(crit_sect_); // Content/user triggered.
+ int num_layers_ GUARDED_BY(crit_sect_);
+ bool suspension_enabled_ GUARDED_BY(crit_sect_);
+ bool video_suspended_ GUARDED_BY(crit_sect_);
+ int suspension_threshold_bps_ GUARDED_BY(crit_sect_);
+ int suspension_window_bps_ GUARDED_BY(crit_sect_);
+};
+} // namespace media_optimization
+} // namespace webrtc
+
+#endif // WEBRTC_MODULES_VIDEO_CODING_MAIN_SOURCE_MEDIA_OPTIMIZATION_H_
diff --git a/webrtc/modules/video_coding/main/source/media_optimization_unittest.cc b/webrtc/modules/video_coding/main/source/media_optimization_unittest.cc
new file mode 100644
index 0000000000..be528d9932
--- /dev/null
+++ b/webrtc/modules/video_coding/main/source/media_optimization_unittest.cc
@@ -0,0 +1,155 @@
+/*
+ * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "testing/gtest/include/gtest/gtest.h"
+#include "webrtc/modules/video_coding/main/source/media_optimization.h"
+#include "webrtc/system_wrappers/include/clock.h"
+
+namespace webrtc {
+namespace media_optimization {
+
+class TestMediaOptimization : public ::testing::Test {
+ protected:
+ enum {
+ kSampleRate = 90000 // RTP timestamps per second.
+ };
+
+ // Note: simulated clock starts at 1 seconds, since parts of webrtc use 0 as
+ // a special case (e.g. frame rate in media optimization).
+ TestMediaOptimization()
+ : clock_(1000),
+ media_opt_(&clock_),
+ frame_time_ms_(33),
+ next_timestamp_(0) {}
+
+ // This method mimics what happens in VideoSender::AddVideoFrame.
+ void AddFrameAndAdvanceTime(uint32_t bitrate_bps, bool expect_frame_drop) {
+ bool frame_dropped = media_opt_.DropFrame();
+ EXPECT_EQ(expect_frame_drop, frame_dropped);
+ if (!frame_dropped) {
+ size_t bytes_per_frame = bitrate_bps * frame_time_ms_ / (8 * 1000);
+ EncodedImage encoded_image;
+ encoded_image._length = bytes_per_frame;
+ encoded_image._timeStamp = next_timestamp_;
+ encoded_image._frameType = kVideoFrameKey;
+ ASSERT_EQ(VCM_OK, media_opt_.UpdateWithEncodedData(encoded_image));
+ }
+ next_timestamp_ += frame_time_ms_ * kSampleRate / 1000;
+ clock_.AdvanceTimeMilliseconds(frame_time_ms_);
+ }
+
+ SimulatedClock clock_;
+ MediaOptimization media_opt_;
+ int frame_time_ms_;
+ uint32_t next_timestamp_;
+};
+
+
+TEST_F(TestMediaOptimization, VerifyMuting) {
+ // Enable video suspension with these limits.
+ // Suspend the video when the rate is below 50 kbps and resume when it gets
+ // above 50 + 10 kbps again.
+ const uint32_t kThresholdBps = 50000;
+ const uint32_t kWindowBps = 10000;
+ media_opt_.SuspendBelowMinBitrate(kThresholdBps, kWindowBps);
+
+ // The video should not be suspended from the start.
+ EXPECT_FALSE(media_opt_.IsVideoSuspended());
+
+ uint32_t target_bitrate_kbps = 100;
+ media_opt_.SetTargetRates(target_bitrate_kbps * 1000,
+ 0, // Lossrate.
+ 100, // RTT in ms.
+ nullptr, nullptr);
+ media_opt_.EnableFrameDropper(true);
+ for (int time = 0; time < 2000; time += frame_time_ms_) {
+ ASSERT_NO_FATAL_FAILURE(AddFrameAndAdvanceTime(target_bitrate_kbps, false));
+ }
+
+ // Set the target rate below the limit for muting.
+ media_opt_.SetTargetRates(kThresholdBps - 1000,
+ 0, // Lossrate.
+ 100, // RTT in ms.
+ nullptr, nullptr);
+ // Expect the muter to engage immediately and stay muted.
+ // Test during 2 seconds.
+ for (int time = 0; time < 2000; time += frame_time_ms_) {
+ EXPECT_TRUE(media_opt_.IsVideoSuspended());
+ ASSERT_NO_FATAL_FAILURE(AddFrameAndAdvanceTime(target_bitrate_kbps, true));
+ }
+
+ // Set the target above the limit for muting, but not above the
+ // limit + window.
+ media_opt_.SetTargetRates(kThresholdBps + 1000,
+ 0, // Lossrate.
+ 100, // RTT in ms.
+ nullptr, nullptr);
+ // Expect the muter to stay muted.
+ // Test during 2 seconds.
+ for (int time = 0; time < 2000; time += frame_time_ms_) {
+ EXPECT_TRUE(media_opt_.IsVideoSuspended());
+ ASSERT_NO_FATAL_FAILURE(AddFrameAndAdvanceTime(target_bitrate_kbps, true));
+ }
+
+ // Set the target above limit + window.
+ media_opt_.SetTargetRates(kThresholdBps + kWindowBps + 1000,
+ 0, // Lossrate.
+ 100, // RTT in ms.
+ nullptr, nullptr);
+ // Expect the muter to disengage immediately.
+ // Test during 2 seconds.
+ for (int time = 0; time < 2000; time += frame_time_ms_) {
+ EXPECT_FALSE(media_opt_.IsVideoSuspended());
+ ASSERT_NO_FATAL_FAILURE(
+ AddFrameAndAdvanceTime((kThresholdBps + kWindowBps) / 1000, false));
+ }
+}
+
+TEST_F(TestMediaOptimization, ProtectsUsingFecBitrateAboveCodecMax) {
+ static const int kCodecBitrateBps = 100000;
+ static const int kMaxBitrateBps = 130000;
+
+ class ProtectionCallback : public VCMProtectionCallback {
+ int ProtectionRequest(const FecProtectionParams* delta_params,
+ const FecProtectionParams* key_params,
+ uint32_t* sent_video_rate_bps,
+ uint32_t* sent_nack_rate_bps,
+ uint32_t* sent_fec_rate_bps) override {
+ *sent_video_rate_bps = kCodecBitrateBps;
+ *sent_nack_rate_bps = 0;
+ *sent_fec_rate_bps = fec_rate_bps_;
+ return 0;
+ }
+
+ public:
+ uint32_t fec_rate_bps_;
+ } protection_callback;
+
+ media_opt_.SetProtectionMethod(kFec);
+ media_opt_.SetEncodingData(kVideoCodecVP8, kCodecBitrateBps, kCodecBitrateBps,
+ 640, 480, 30, 1, 1000);
+
+ // Using 10% of codec bitrate for FEC, should still be able to use all of it.
+ protection_callback.fec_rate_bps_ = kCodecBitrateBps / 10;
+ uint32_t target_bitrate = media_opt_.SetTargetRates(
+ kMaxBitrateBps, 0, 0, &protection_callback, nullptr);
+
+ EXPECT_EQ(kCodecBitrateBps, static_cast<int>(target_bitrate));
+
+ // Using as much for codec bitrate as fec rate, new target rate should share
+ // both equally, but only be half of max (since that ceiling should be hit).
+ protection_callback.fec_rate_bps_ = kCodecBitrateBps;
+ target_bitrate = media_opt_.SetTargetRates(kMaxBitrateBps, 128, 100,
+ &protection_callback, nullptr);
+ EXPECT_EQ(kMaxBitrateBps / 2, static_cast<int>(target_bitrate));
+}
+
+} // namespace media_optimization
+} // namespace webrtc
diff --git a/webrtc/modules/video_coding/main/source/nack_fec_tables.h b/webrtc/modules/video_coding/main/source/nack_fec_tables.h
new file mode 100644
index 0000000000..b82bb1b4ba
--- /dev/null
+++ b/webrtc/modules/video_coding/main/source/nack_fec_tables.h
@@ -0,0 +1,126 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef WEBRTC_MODULES_VIDEO_CODING_SOURCE_NACK_FEC_TABLES_H_
+#define WEBRTC_MODULES_VIDEO_CODING_SOURCE_NACK_FEC_TABLES_H_
+
+namespace webrtc
+{
+
+// Table for adjusting FEC rate for NACK/FEC protection method
+// Table values are built as a sigmoid function, ranging from 0 to 100, based on
+// the HybridNackTH values defined in media_opt_util.h.
+const uint16_t VCMNackFecTable[100] = {
+0,
+0,
+0,
+0,
+0,
+0,
+0,
+0,
+0,
+0,
+0,
+1,
+1,
+1,
+1,
+1,
+2,
+2,
+2,
+3,
+3,
+4,
+5,
+6,
+7,
+9,
+10,
+12,
+15,
+18,
+21,
+24,
+28,
+32,
+37,
+41,
+46,
+51,
+56,
+61,
+66,
+70,
+74,
+78,
+81,
+84,
+86,
+89,
+90,
+92,
+93,
+95,
+95,
+96,
+97,
+97,
+98,
+98,
+99,
+99,
+99,
+99,
+99,
+99,
+100,
+100,
+100,
+100,
+100,
+100,
+100,
+100,
+100,
+100,
+100,
+100,
+100,
+100,
+100,
+100,
+100,
+100,
+100,
+100,
+100,
+100,
+100,
+100,
+100,
+100,
+100,
+100,
+100,
+100,
+100,
+100,
+100,
+100,
+100,
+100,
+
+};
+
+} // namespace webrtc
+
+#endif // WEBRTC_MODULES_VIDEO_CODING_SOURCE_NACK_FEC_TABLES_H_
diff --git a/webrtc/modules/video_coding/main/source/packet.cc b/webrtc/modules/video_coding/main/source/packet.cc
new file mode 100644
index 0000000000..fd5a6abb8c
--- /dev/null
+++ b/webrtc/modules/video_coding/main/source/packet.cc
@@ -0,0 +1,154 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "webrtc/modules/interface/module_common_types.h"
+#include "webrtc/modules/video_coding/main/source/packet.h"
+
+#include <assert.h>
+
+namespace webrtc {
+
+VCMPacket::VCMPacket()
+ : payloadType(0),
+ timestamp(0),
+ ntp_time_ms_(0),
+ seqNum(0),
+ dataPtr(NULL),
+ sizeBytes(0),
+ markerBit(false),
+ frameType(kEmptyFrame),
+ codec(kVideoCodecUnknown),
+ isFirstPacket(false),
+ completeNALU(kNaluUnset),
+ insertStartCode(false),
+ width(0),
+ height(0),
+ codecSpecificHeader() {}
+
+VCMPacket::VCMPacket(const uint8_t* ptr,
+ const size_t size,
+ const WebRtcRTPHeader& rtpHeader) :
+ payloadType(rtpHeader.header.payloadType),
+ timestamp(rtpHeader.header.timestamp),
+ ntp_time_ms_(rtpHeader.ntp_time_ms),
+ seqNum(rtpHeader.header.sequenceNumber),
+ dataPtr(ptr),
+ sizeBytes(size),
+ markerBit(rtpHeader.header.markerBit),
+
+ frameType(rtpHeader.frameType),
+ codec(kVideoCodecUnknown),
+ isFirstPacket(rtpHeader.type.Video.isFirstPacket),
+ completeNALU(kNaluComplete),
+ insertStartCode(false),
+ width(rtpHeader.type.Video.width),
+ height(rtpHeader.type.Video.height),
+ codecSpecificHeader(rtpHeader.type.Video)
+{
+ CopyCodecSpecifics(rtpHeader.type.Video);
+}
+
+VCMPacket::VCMPacket(const uint8_t* ptr,
+ size_t size,
+ uint16_t seq,
+ uint32_t ts,
+ bool mBit) :
+ payloadType(0),
+ timestamp(ts),
+ ntp_time_ms_(0),
+ seqNum(seq),
+ dataPtr(ptr),
+ sizeBytes(size),
+ markerBit(mBit),
+
+ frameType(kVideoFrameDelta),
+ codec(kVideoCodecUnknown),
+ isFirstPacket(false),
+ completeNALU(kNaluComplete),
+ insertStartCode(false),
+ width(0),
+ height(0),
+ codecSpecificHeader()
+{}
+
+void VCMPacket::Reset() {
+ payloadType = 0;
+ timestamp = 0;
+ ntp_time_ms_ = 0;
+ seqNum = 0;
+ dataPtr = NULL;
+ sizeBytes = 0;
+ markerBit = false;
+ frameType = kEmptyFrame;
+ codec = kVideoCodecUnknown;
+ isFirstPacket = false;
+ completeNALU = kNaluUnset;
+ insertStartCode = false;
+ width = 0;
+ height = 0;
+ memset(&codecSpecificHeader, 0, sizeof(RTPVideoHeader));
+}
+
+void VCMPacket::CopyCodecSpecifics(const RTPVideoHeader& videoHeader) {
+ if (markerBit) {
+ codecSpecificHeader.rotation = videoHeader.rotation;
+ }
+ switch (videoHeader.codec) {
+ case kRtpVideoVp8:
+ // Handle all packets within a frame as depending on the previous packet
+ // TODO(holmer): This should be changed to make fragments independent
+ // when the VP8 RTP receiver supports fragments.
+ if (isFirstPacket && markerBit)
+ completeNALU = kNaluComplete;
+ else if (isFirstPacket)
+ completeNALU = kNaluStart;
+ else if (markerBit)
+ completeNALU = kNaluEnd;
+ else
+ completeNALU = kNaluIncomplete;
+
+ codec = kVideoCodecVP8;
+ return;
+ case kRtpVideoVp9:
+ if (isFirstPacket && markerBit)
+ completeNALU = kNaluComplete;
+ else if (isFirstPacket)
+ completeNALU = kNaluStart;
+ else if (markerBit)
+ completeNALU = kNaluEnd;
+ else
+ completeNALU = kNaluIncomplete;
+
+ codec = kVideoCodecVP9;
+ return;
+ case kRtpVideoH264:
+ isFirstPacket = videoHeader.isFirstPacket;
+ if (isFirstPacket)
+ insertStartCode = true;
+
+ if (isFirstPacket && markerBit) {
+ completeNALU = kNaluComplete;
+ } else if (isFirstPacket) {
+ completeNALU = kNaluStart;
+ } else if (markerBit) {
+ completeNALU = kNaluEnd;
+ } else {
+ completeNALU = kNaluIncomplete;
+ }
+ codec = kVideoCodecH264;
+ return;
+ case kRtpVideoGeneric:
+ case kRtpVideoNone:
+ codec = kVideoCodecUnknown;
+ return;
+ }
+}
+
+} // namespace webrtc
diff --git a/webrtc/modules/video_coding/main/source/packet.h b/webrtc/modules/video_coding/main/source/packet.h
new file mode 100644
index 0000000000..80bf532502
--- /dev/null
+++ b/webrtc/modules/video_coding/main/source/packet.h
@@ -0,0 +1,59 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef WEBRTC_MODULES_VIDEO_CODING_PACKET_H_
+#define WEBRTC_MODULES_VIDEO_CODING_PACKET_H_
+
+#include "webrtc/modules/interface/module_common_types.h"
+#include "webrtc/modules/video_coding/main/source/jitter_buffer_common.h"
+#include "webrtc/typedefs.h"
+
+namespace webrtc {
+
+class VCMPacket {
+public:
+ VCMPacket();
+ VCMPacket(const uint8_t* ptr,
+ const size_t size,
+ const WebRtcRTPHeader& rtpHeader);
+ VCMPacket(const uint8_t* ptr,
+ size_t size,
+ uint16_t seqNum,
+ uint32_t timestamp,
+ bool markerBit);
+
+ void Reset();
+
+ uint8_t payloadType;
+ uint32_t timestamp;
+ // NTP time of the capture time in local timebase in milliseconds.
+ int64_t ntp_time_ms_;
+ uint16_t seqNum;
+ const uint8_t* dataPtr;
+ size_t sizeBytes;
+ bool markerBit;
+
+ FrameType frameType;
+ VideoCodecType codec;
+
+ bool isFirstPacket; // Is this first packet in a frame.
+ VCMNaluCompleteness completeNALU; // Default is kNaluIncomplete.
+ bool insertStartCode; // True if a start code should be inserted before this
+ // packet.
+ int width;
+ int height;
+ RTPVideoHeader codecSpecificHeader;
+
+protected:
+ void CopyCodecSpecifics(const RTPVideoHeader& videoHeader);
+};
+
+} // namespace webrtc
+#endif // WEBRTC_MODULES_VIDEO_CODING_PACKET_H_
diff --git a/webrtc/modules/video_coding/main/source/qm_select.cc b/webrtc/modules/video_coding/main/source/qm_select.cc
new file mode 100644
index 0000000000..e86d0755c0
--- /dev/null
+++ b/webrtc/modules/video_coding/main/source/qm_select.cc
@@ -0,0 +1,958 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "webrtc/modules/video_coding/main/source/qm_select.h"
+
+#include <math.h>
+
+#include "webrtc/modules/interface/module_common_types.h"
+#include "webrtc/modules/video_coding/main/interface/video_coding_defines.h"
+#include "webrtc/modules/video_coding/main/source/internal_defines.h"
+#include "webrtc/modules/video_coding/main/source/qm_select_data.h"
+#include "webrtc/system_wrappers/include/trace.h"
+
+namespace webrtc {
+
+// QM-METHOD class
+
+VCMQmMethod::VCMQmMethod()
+ : content_metrics_(NULL),
+ width_(0),
+ height_(0),
+ user_frame_rate_(0.0f),
+ native_width_(0),
+ native_height_(0),
+ native_frame_rate_(0.0f),
+ image_type_(kVGA),
+ framerate_level_(kFrameRateHigh),
+ init_(false) {
+ ResetQM();
+}
+
+VCMQmMethod::~VCMQmMethod() {
+}
+
+void VCMQmMethod::ResetQM() {
+ aspect_ratio_ = 1.0f;
+ motion_.Reset();
+ spatial_.Reset();
+ content_class_ = 0;
+}
+
+uint8_t VCMQmMethod::ComputeContentClass() {
+ ComputeMotionNFD();
+ ComputeSpatial();
+ return content_class_ = 3 * motion_.level + spatial_.level;
+}
+
+void VCMQmMethod::UpdateContent(const VideoContentMetrics* contentMetrics) {
+ content_metrics_ = contentMetrics;
+}
+
+void VCMQmMethod::ComputeMotionNFD() {
+ if (content_metrics_) {
+ motion_.value = content_metrics_->motion_magnitude;
+ }
+ // Determine motion level.
+ if (motion_.value < kLowMotionNfd) {
+ motion_.level = kLow;
+ } else if (motion_.value > kHighMotionNfd) {
+ motion_.level = kHigh;
+ } else {
+ motion_.level = kDefault;
+ }
+}
+
+void VCMQmMethod::ComputeSpatial() {
+ float spatial_err = 0.0;
+ float spatial_err_h = 0.0;
+ float spatial_err_v = 0.0;
+ if (content_metrics_) {
+ spatial_err = content_metrics_->spatial_pred_err;
+ spatial_err_h = content_metrics_->spatial_pred_err_h;
+ spatial_err_v = content_metrics_->spatial_pred_err_v;
+ }
+ // Spatial measure: take average of 3 prediction errors.
+ spatial_.value = (spatial_err + spatial_err_h + spatial_err_v) / 3.0f;
+
+ // Reduce thresholds for large scenes/higher pixel correlation.
+ float scale2 = image_type_ > kVGA ? kScaleTexture : 1.0;
+
+ if (spatial_.value > scale2 * kHighTexture) {
+ spatial_.level = kHigh;
+ } else if (spatial_.value < scale2 * kLowTexture) {
+ spatial_.level = kLow;
+ } else {
+ spatial_.level = kDefault;
+ }
+}
+
+ImageType VCMQmMethod::GetImageType(uint16_t width,
+ uint16_t height) {
+ // Get the image type for the encoder frame size.
+ uint32_t image_size = width * height;
+ if (image_size == kSizeOfImageType[kQCIF]) {
+ return kQCIF;
+ } else if (image_size == kSizeOfImageType[kHCIF]) {
+ return kHCIF;
+ } else if (image_size == kSizeOfImageType[kQVGA]) {
+ return kQVGA;
+ } else if (image_size == kSizeOfImageType[kCIF]) {
+ return kCIF;
+ } else if (image_size == kSizeOfImageType[kHVGA]) {
+ return kHVGA;
+ } else if (image_size == kSizeOfImageType[kVGA]) {
+ return kVGA;
+ } else if (image_size == kSizeOfImageType[kQFULLHD]) {
+ return kQFULLHD;
+ } else if (image_size == kSizeOfImageType[kWHD]) {
+ return kWHD;
+ } else if (image_size == kSizeOfImageType[kFULLHD]) {
+ return kFULLHD;
+ } else {
+ // No exact match, find closet one.
+ return FindClosestImageType(width, height);
+ }
+}
+
+ImageType VCMQmMethod::FindClosestImageType(uint16_t width, uint16_t height) {
+ float size = static_cast<float>(width * height);
+ float min = size;
+ int isel = 0;
+ for (int i = 0; i < kNumImageTypes; ++i) {
+ float dist = fabs(size - kSizeOfImageType[i]);
+ if (dist < min) {
+ min = dist;
+ isel = i;
+ }
+ }
+ return static_cast<ImageType>(isel);
+}
+
+FrameRateLevelClass VCMQmMethod::FrameRateLevel(float avg_framerate) {
+ if (avg_framerate <= kLowFrameRate) {
+ return kFrameRateLow;
+ } else if (avg_framerate <= kMiddleFrameRate) {
+ return kFrameRateMiddle1;
+ } else if (avg_framerate <= kHighFrameRate) {
+ return kFrameRateMiddle2;
+ } else {
+ return kFrameRateHigh;
+ }
+}
+
+// RESOLUTION CLASS
+
+VCMQmResolution::VCMQmResolution()
+ : qm_(new VCMResolutionScale()) {
+ Reset();
+}
+
+VCMQmResolution::~VCMQmResolution() {
+ delete qm_;
+}
+
+void VCMQmResolution::ResetRates() {
+ sum_target_rate_ = 0.0f;
+ sum_incoming_framerate_ = 0.0f;
+ sum_rate_MM_ = 0.0f;
+ sum_rate_MM_sgn_ = 0.0f;
+ sum_packet_loss_ = 0.0f;
+ buffer_level_ = kInitBufferLevel * target_bitrate_;
+ frame_cnt_ = 0;
+ frame_cnt_delta_ = 0;
+ low_buffer_cnt_ = 0;
+ update_rate_cnt_ = 0;
+}
+
+void VCMQmResolution::ResetDownSamplingState() {
+ state_dec_factor_spatial_ = 1.0;
+ state_dec_factor_temporal_ = 1.0;
+ for (int i = 0; i < kDownActionHistorySize; i++) {
+ down_action_history_[i].spatial = kNoChangeSpatial;
+ down_action_history_[i].temporal = kNoChangeTemporal;
+ }
+}
+
+void VCMQmResolution::Reset() {
+ target_bitrate_ = 0.0f;
+ incoming_framerate_ = 0.0f;
+ buffer_level_ = 0.0f;
+ per_frame_bandwidth_ = 0.0f;
+ avg_target_rate_ = 0.0f;
+ avg_incoming_framerate_ = 0.0f;
+ avg_ratio_buffer_low_ = 0.0f;
+ avg_rate_mismatch_ = 0.0f;
+ avg_rate_mismatch_sgn_ = 0.0f;
+ avg_packet_loss_ = 0.0f;
+ encoder_state_ = kStableEncoding;
+ num_layers_ = 1;
+ ResetRates();
+ ResetDownSamplingState();
+ ResetQM();
+}
+
+EncoderState VCMQmResolution::GetEncoderState() {
+ return encoder_state_;
+}
+
+// Initialize state after re-initializing the encoder,
+// i.e., after SetEncodingData() in mediaOpt.
+int VCMQmResolution::Initialize(float bitrate,
+ float user_framerate,
+ uint16_t width,
+ uint16_t height,
+ int num_layers) {
+ if (user_framerate == 0.0f || width == 0 || height == 0) {
+ return VCM_PARAMETER_ERROR;
+ }
+ Reset();
+ target_bitrate_ = bitrate;
+ incoming_framerate_ = user_framerate;
+ UpdateCodecParameters(user_framerate, width, height);
+ native_width_ = width;
+ native_height_ = height;
+ native_frame_rate_ = user_framerate;
+ num_layers_ = num_layers;
+ // Initial buffer level.
+ buffer_level_ = kInitBufferLevel * target_bitrate_;
+ // Per-frame bandwidth.
+ per_frame_bandwidth_ = target_bitrate_ / user_framerate;
+ init_ = true;
+ return VCM_OK;
+}
+
+void VCMQmResolution::UpdateCodecParameters(float frame_rate, uint16_t width,
+ uint16_t height) {
+ width_ = width;
+ height_ = height;
+ // |user_frame_rate| is the target frame rate for VPM frame dropper.
+ user_frame_rate_ = frame_rate;
+ image_type_ = GetImageType(width, height);
+}
+
+// Update rate data after every encoded frame.
+void VCMQmResolution::UpdateEncodedSize(size_t encoded_size) {
+ frame_cnt_++;
+ // Convert to Kbps.
+ float encoded_size_kbits = 8.0f * static_cast<float>(encoded_size) / 1000.0f;
+
+ // Update the buffer level:
+ // Note this is not the actual encoder buffer level.
+ // |buffer_level_| is reset to an initial value after SelectResolution is
+ // called, and does not account for frame dropping by encoder or VCM.
+ buffer_level_ += per_frame_bandwidth_ - encoded_size_kbits;
+
+ // Counter for occurrences of low buffer level:
+ // low/negative values means encoder is likely dropping frames.
+ if (buffer_level_ <= kPercBufferThr * kInitBufferLevel * target_bitrate_) {
+ low_buffer_cnt_++;
+ }
+}
+
+// Update various quantities after SetTargetRates in MediaOpt.
+void VCMQmResolution::UpdateRates(float target_bitrate,
+ float encoder_sent_rate,
+ float incoming_framerate,
+ uint8_t packet_loss) {
+ // Sum the target bitrate: this is the encoder rate from previous update
+ // (~1sec), i.e, before the update for next ~1sec.
+ sum_target_rate_ += target_bitrate_;
+ update_rate_cnt_++;
+
+ // Sum the received (from RTCP reports) packet loss rates.
+ sum_packet_loss_ += static_cast<float>(packet_loss / 255.0);
+
+ // Sum the sequence rate mismatch:
+ // Mismatch here is based on the difference between the target rate
+ // used (in previous ~1sec) and the average actual encoding rate measured
+ // at previous ~1sec.
+ float diff = target_bitrate_ - encoder_sent_rate;
+ if (target_bitrate_ > 0.0)
+ sum_rate_MM_ += fabs(diff) / target_bitrate_;
+ int sgnDiff = diff > 0 ? 1 : (diff < 0 ? -1 : 0);
+ // To check for consistent under(+)/over_shooting(-) of target rate.
+ sum_rate_MM_sgn_ += sgnDiff;
+
+ // Update with the current new target and frame rate:
+ // these values are ones the encoder will use for the current/next ~1sec.
+ target_bitrate_ = target_bitrate;
+ incoming_framerate_ = incoming_framerate;
+ sum_incoming_framerate_ += incoming_framerate_;
+ // Update the per_frame_bandwidth:
+ // this is the per_frame_bw for the current/next ~1sec.
+ per_frame_bandwidth_ = 0.0f;
+ if (incoming_framerate_ > 0.0f) {
+ per_frame_bandwidth_ = target_bitrate_ / incoming_framerate_;
+ }
+}
+
+// Select the resolution factors: frame size and frame rate change (qm scales).
+// Selection is for going down in resolution, or for going back up
+// (if a previous down-sampling action was taken).
+
+// In the current version the following constraints are imposed:
+// 1) We only allow for one action, either down or up, at a given time.
+// 2) The possible down-sampling actions are: spatial by 1/2x1/2, 3/4x3/4;
+// temporal/frame rate reduction by 1/2 and 2/3.
+// 3) The action for going back up is the reverse of last (spatial or temporal)
+// down-sampling action. The list of down-sampling actions from the
+// Initialize() state are kept in |down_action_history_|.
+// 4) The total amount of down-sampling (spatial and/or temporal) from the
+// Initialize() state (native resolution) is limited by various factors.
+int VCMQmResolution::SelectResolution(VCMResolutionScale** qm) {
+ if (!init_) {
+ return VCM_UNINITIALIZED;
+ }
+ if (content_metrics_ == NULL) {
+ Reset();
+ *qm = qm_;
+ return VCM_OK;
+ }
+
+ // Check conditions on down-sampling state.
+ assert(state_dec_factor_spatial_ >= 1.0f);
+ assert(state_dec_factor_temporal_ >= 1.0f);
+ assert(state_dec_factor_spatial_ <= kMaxSpatialDown);
+ assert(state_dec_factor_temporal_ <= kMaxTempDown);
+ assert(state_dec_factor_temporal_ * state_dec_factor_spatial_ <=
+ kMaxTotalDown);
+
+ // Compute content class for selection.
+ content_class_ = ComputeContentClass();
+ // Compute various rate quantities for selection.
+ ComputeRatesForSelection();
+
+ // Get the encoder state.
+ ComputeEncoderState();
+
+ // Default settings: no action.
+ SetDefaultAction();
+ *qm = qm_;
+
+ // Check for going back up in resolution, if we have had some down-sampling
+ // relative to native state in Initialize().
+ if (down_action_history_[0].spatial != kNoChangeSpatial ||
+ down_action_history_[0].temporal != kNoChangeTemporal) {
+ if (GoingUpResolution()) {
+ *qm = qm_;
+ return VCM_OK;
+ }
+ }
+
+ // Check for going down in resolution.
+ if (GoingDownResolution()) {
+ *qm = qm_;
+ return VCM_OK;
+ }
+ return VCM_OK;
+}
+
+void VCMQmResolution::SetDefaultAction() {
+ qm_->codec_width = width_;
+ qm_->codec_height = height_;
+ qm_->frame_rate = user_frame_rate_;
+ qm_->change_resolution_spatial = false;
+ qm_->change_resolution_temporal = false;
+ qm_->spatial_width_fact = 1.0f;
+ qm_->spatial_height_fact = 1.0f;
+ qm_->temporal_fact = 1.0f;
+ action_.spatial = kNoChangeSpatial;
+ action_.temporal = kNoChangeTemporal;
+}
+
+void VCMQmResolution::ComputeRatesForSelection() {
+ avg_target_rate_ = 0.0f;
+ avg_incoming_framerate_ = 0.0f;
+ avg_ratio_buffer_low_ = 0.0f;
+ avg_rate_mismatch_ = 0.0f;
+ avg_rate_mismatch_sgn_ = 0.0f;
+ avg_packet_loss_ = 0.0f;
+ if (frame_cnt_ > 0) {
+ avg_ratio_buffer_low_ = static_cast<float>(low_buffer_cnt_) /
+ static_cast<float>(frame_cnt_);
+ }
+ if (update_rate_cnt_ > 0) {
+ avg_rate_mismatch_ = static_cast<float>(sum_rate_MM_) /
+ static_cast<float>(update_rate_cnt_);
+ avg_rate_mismatch_sgn_ = static_cast<float>(sum_rate_MM_sgn_) /
+ static_cast<float>(update_rate_cnt_);
+ avg_target_rate_ = static_cast<float>(sum_target_rate_) /
+ static_cast<float>(update_rate_cnt_);
+ avg_incoming_framerate_ = static_cast<float>(sum_incoming_framerate_) /
+ static_cast<float>(update_rate_cnt_);
+ avg_packet_loss_ = static_cast<float>(sum_packet_loss_) /
+ static_cast<float>(update_rate_cnt_);
+ }
+ // For selection we may want to weight some quantities more heavily
+ // with the current (i.e., next ~1sec) rate values.
+ avg_target_rate_ = kWeightRate * avg_target_rate_ +
+ (1.0 - kWeightRate) * target_bitrate_;
+ avg_incoming_framerate_ = kWeightRate * avg_incoming_framerate_ +
+ (1.0 - kWeightRate) * incoming_framerate_;
+ // Use base layer frame rate for temporal layers: this will favor spatial.
+ assert(num_layers_ > 0);
+ framerate_level_ = FrameRateLevel(
+ avg_incoming_framerate_ / static_cast<float>(1 << (num_layers_ - 1)));
+}
+
+void VCMQmResolution::ComputeEncoderState() {
+ // Default.
+ encoder_state_ = kStableEncoding;
+
+ // Assign stressed state if:
+ // 1) occurrences of low buffer levels is high, or
+ // 2) rate mis-match is high, and consistent over-shooting by encoder.
+ if ((avg_ratio_buffer_low_ > kMaxBufferLow) ||
+ ((avg_rate_mismatch_ > kMaxRateMisMatch) &&
+ (avg_rate_mismatch_sgn_ < -kRateOverShoot))) {
+ encoder_state_ = kStressedEncoding;
+ }
+ // Assign easy state if:
+ // 1) rate mis-match is high, and
+ // 2) consistent under-shooting by encoder.
+ if ((avg_rate_mismatch_ > kMaxRateMisMatch) &&
+ (avg_rate_mismatch_sgn_ > kRateUnderShoot)) {
+ encoder_state_ = kEasyEncoding;
+ }
+}
+
+bool VCMQmResolution::GoingUpResolution() {
+ // For going up, we check for undoing the previous down-sampling action.
+
+ float fac_width = kFactorWidthSpatial[down_action_history_[0].spatial];
+ float fac_height = kFactorHeightSpatial[down_action_history_[0].spatial];
+ float fac_temp = kFactorTemporal[down_action_history_[0].temporal];
+ // For going up spatially, we allow for going up by 3/4x3/4 at each stage.
+ // So if the last spatial action was 1/2x1/2 it would be undone in 2 stages.
+ // Modify the fac_width/height for this case.
+ if (down_action_history_[0].spatial == kOneQuarterSpatialUniform) {
+ fac_width = kFactorWidthSpatial[kOneQuarterSpatialUniform] /
+ kFactorWidthSpatial[kOneHalfSpatialUniform];
+ fac_height = kFactorHeightSpatial[kOneQuarterSpatialUniform] /
+ kFactorHeightSpatial[kOneHalfSpatialUniform];
+ }
+
+ // Check if we should go up both spatially and temporally.
+ if (down_action_history_[0].spatial != kNoChangeSpatial &&
+ down_action_history_[0].temporal != kNoChangeTemporal) {
+ if (ConditionForGoingUp(fac_width, fac_height, fac_temp,
+ kTransRateScaleUpSpatialTemp)) {
+ action_.spatial = down_action_history_[0].spatial;
+ action_.temporal = down_action_history_[0].temporal;
+ UpdateDownsamplingState(kUpResolution);
+ return true;
+ }
+ }
+ // Check if we should go up either spatially or temporally.
+ bool selected_up_spatial = false;
+ bool selected_up_temporal = false;
+ if (down_action_history_[0].spatial != kNoChangeSpatial) {
+ selected_up_spatial = ConditionForGoingUp(fac_width, fac_height, 1.0f,
+ kTransRateScaleUpSpatial);
+ }
+ if (down_action_history_[0].temporal != kNoChangeTemporal) {
+ selected_up_temporal = ConditionForGoingUp(1.0f, 1.0f, fac_temp,
+ kTransRateScaleUpTemp);
+ }
+ if (selected_up_spatial && !selected_up_temporal) {
+ action_.spatial = down_action_history_[0].spatial;
+ action_.temporal = kNoChangeTemporal;
+ UpdateDownsamplingState(kUpResolution);
+ return true;
+ } else if (!selected_up_spatial && selected_up_temporal) {
+ action_.spatial = kNoChangeSpatial;
+ action_.temporal = down_action_history_[0].temporal;
+ UpdateDownsamplingState(kUpResolution);
+ return true;
+ } else if (selected_up_spatial && selected_up_temporal) {
+ PickSpatialOrTemporal();
+ UpdateDownsamplingState(kUpResolution);
+ return true;
+ }
+ return false;
+}
+
+bool VCMQmResolution::ConditionForGoingUp(float fac_width,
+ float fac_height,
+ float fac_temp,
+ float scale_fac) {
+ float estimated_transition_rate_up = GetTransitionRate(fac_width, fac_height,
+ fac_temp, scale_fac);
+ // Go back up if:
+ // 1) target rate is above threshold and current encoder state is stable, or
+ // 2) encoder state is easy (encoder is significantly under-shooting target).
+ if (((avg_target_rate_ > estimated_transition_rate_up) &&
+ (encoder_state_ == kStableEncoding)) ||
+ (encoder_state_ == kEasyEncoding)) {
+ return true;
+ } else {
+ return false;
+ }
+}
+
+bool VCMQmResolution::GoingDownResolution() {
+ float estimated_transition_rate_down =
+ GetTransitionRate(1.0f, 1.0f, 1.0f, 1.0f);
+ float max_rate = kFrameRateFac[framerate_level_] * kMaxRateQm[image_type_];
+ // Resolution reduction if:
+ // (1) target rate is below transition rate, or
+ // (2) encoder is in stressed state and target rate below a max threshold.
+ if ((avg_target_rate_ < estimated_transition_rate_down ) ||
+ (encoder_state_ == kStressedEncoding && avg_target_rate_ < max_rate)) {
+ // Get the down-sampling action: based on content class, and how low
+ // average target rate is relative to transition rate.
+ uint8_t spatial_fact =
+ kSpatialAction[content_class_ +
+ 9 * RateClass(estimated_transition_rate_down)];
+ uint8_t temp_fact =
+ kTemporalAction[content_class_ +
+ 9 * RateClass(estimated_transition_rate_down)];
+
+ switch (spatial_fact) {
+ case 4: {
+ action_.spatial = kOneQuarterSpatialUniform;
+ break;
+ }
+ case 2: {
+ action_.spatial = kOneHalfSpatialUniform;
+ break;
+ }
+ case 1: {
+ action_.spatial = kNoChangeSpatial;
+ break;
+ }
+ default: {
+ assert(false);
+ }
+ }
+ switch (temp_fact) {
+ case 3: {
+ action_.temporal = kTwoThirdsTemporal;
+ break;
+ }
+ case 2: {
+ action_.temporal = kOneHalfTemporal;
+ break;
+ }
+ case 1: {
+ action_.temporal = kNoChangeTemporal;
+ break;
+ }
+ default: {
+ assert(false);
+ }
+ }
+ // Only allow for one action (spatial or temporal) at a given time.
+ assert(action_.temporal == kNoChangeTemporal ||
+ action_.spatial == kNoChangeSpatial);
+
+ // Adjust cases not captured in tables, mainly based on frame rate, and
+ // also check for odd frame sizes.
+ AdjustAction();
+
+ // Update down-sampling state.
+ if (action_.spatial != kNoChangeSpatial ||
+ action_.temporal != kNoChangeTemporal) {
+ UpdateDownsamplingState(kDownResolution);
+ return true;
+ }
+ }
+ return false;
+}
+
+float VCMQmResolution::GetTransitionRate(float fac_width,
+ float fac_height,
+ float fac_temp,
+ float scale_fac) {
+ ImageType image_type = GetImageType(
+ static_cast<uint16_t>(fac_width * width_),
+ static_cast<uint16_t>(fac_height * height_));
+
+ FrameRateLevelClass framerate_level =
+ FrameRateLevel(fac_temp * avg_incoming_framerate_);
+ // If we are checking for going up temporally, and this is the last
+ // temporal action, then use native frame rate.
+ if (down_action_history_[1].temporal == kNoChangeTemporal &&
+ fac_temp > 1.0f) {
+ framerate_level = FrameRateLevel(native_frame_rate_);
+ }
+
+ // The maximum allowed rate below which down-sampling is allowed:
+ // Nominal values based on image format (frame size and frame rate).
+ float max_rate = kFrameRateFac[framerate_level] * kMaxRateQm[image_type];
+
+ uint8_t image_class = image_type > kVGA ? 1: 0;
+ uint8_t table_index = image_class * 9 + content_class_;
+ // Scale factor for down-sampling transition threshold:
+ // factor based on the content class and the image size.
+ float scaleTransRate = kScaleTransRateQm[table_index];
+ // Threshold bitrate for resolution action.
+ return static_cast<float> (scale_fac * scaleTransRate * max_rate);
+}
+
+void VCMQmResolution::UpdateDownsamplingState(UpDownAction up_down) {
+ if (up_down == kUpResolution) {
+ qm_->spatial_width_fact = 1.0f / kFactorWidthSpatial[action_.spatial];
+ qm_->spatial_height_fact = 1.0f / kFactorHeightSpatial[action_.spatial];
+ // If last spatial action was 1/2x1/2, we undo it in two steps, so the
+ // spatial scale factor in this first step is modified as (4.0/3.0 / 2.0).
+ if (action_.spatial == kOneQuarterSpatialUniform) {
+ qm_->spatial_width_fact =
+ 1.0f * kFactorWidthSpatial[kOneHalfSpatialUniform] /
+ kFactorWidthSpatial[kOneQuarterSpatialUniform];
+ qm_->spatial_height_fact =
+ 1.0f * kFactorHeightSpatial[kOneHalfSpatialUniform] /
+ kFactorHeightSpatial[kOneQuarterSpatialUniform];
+ }
+ qm_->temporal_fact = 1.0f / kFactorTemporal[action_.temporal];
+ RemoveLastDownAction();
+ } else if (up_down == kDownResolution) {
+ ConstrainAmountOfDownSampling();
+ ConvertSpatialFractionalToWhole();
+ qm_->spatial_width_fact = kFactorWidthSpatial[action_.spatial];
+ qm_->spatial_height_fact = kFactorHeightSpatial[action_.spatial];
+ qm_->temporal_fact = kFactorTemporal[action_.temporal];
+ InsertLatestDownAction();
+ } else {
+ // This function should only be called if either the Up or Down action
+ // has been selected.
+ assert(false);
+ }
+ UpdateCodecResolution();
+ state_dec_factor_spatial_ = state_dec_factor_spatial_ *
+ qm_->spatial_width_fact * qm_->spatial_height_fact;
+ state_dec_factor_temporal_ = state_dec_factor_temporal_ * qm_->temporal_fact;
+}
+
+void VCMQmResolution::UpdateCodecResolution() {
+ if (action_.spatial != kNoChangeSpatial) {
+ qm_->change_resolution_spatial = true;
+ qm_->codec_width = static_cast<uint16_t>(width_ /
+ qm_->spatial_width_fact + 0.5f);
+ qm_->codec_height = static_cast<uint16_t>(height_ /
+ qm_->spatial_height_fact + 0.5f);
+ // Size should not exceed native sizes.
+ assert(qm_->codec_width <= native_width_);
+ assert(qm_->codec_height <= native_height_);
+ // New sizes should be multiple of 2, otherwise spatial should not have
+ // been selected.
+ assert(qm_->codec_width % 2 == 0);
+ assert(qm_->codec_height % 2 == 0);
+ }
+ if (action_.temporal != kNoChangeTemporal) {
+ qm_->change_resolution_temporal = true;
+ // Update the frame rate based on the average incoming frame rate.
+ qm_->frame_rate = avg_incoming_framerate_ / qm_->temporal_fact + 0.5f;
+ if (down_action_history_[0].temporal == 0) {
+ // When we undo the last temporal-down action, make sure we go back up
+ // to the native frame rate. Since the incoming frame rate may
+ // fluctuate over time, |avg_incoming_framerate_| scaled back up may
+ // be smaller than |native_frame rate_|.
+ qm_->frame_rate = native_frame_rate_;
+ }
+ }
+}
+
+uint8_t VCMQmResolution::RateClass(float transition_rate) {
+ return avg_target_rate_ < (kFacLowRate * transition_rate) ? 0:
+ (avg_target_rate_ >= transition_rate ? 2 : 1);
+}
+
+// TODO(marpan): Would be better to capture these frame rate adjustments by
+// extending the table data (qm_select_data.h).
+void VCMQmResolution::AdjustAction() {
+ // If the spatial level is default state (neither low or high), motion level
+ // is not high, and spatial action was selected, switch to 2/3 frame rate
+ // reduction if the average incoming frame rate is high.
+ if (spatial_.level == kDefault && motion_.level != kHigh &&
+ action_.spatial != kNoChangeSpatial &&
+ framerate_level_ == kFrameRateHigh) {
+ action_.spatial = kNoChangeSpatial;
+ action_.temporal = kTwoThirdsTemporal;
+ }
+ // If both motion and spatial level are low, and temporal down action was
+ // selected, switch to spatial 3/4x3/4 if the frame rate is not above the
+ // lower middle level (|kFrameRateMiddle1|).
+ if (motion_.level == kLow && spatial_.level == kLow &&
+ framerate_level_ <= kFrameRateMiddle1 &&
+ action_.temporal != kNoChangeTemporal) {
+ action_.spatial = kOneHalfSpatialUniform;
+ action_.temporal = kNoChangeTemporal;
+ }
+ // If spatial action is selected, and there has been too much spatial
+ // reduction already (i.e., 1/4), then switch to temporal action if the
+ // average frame rate is not low.
+ if (action_.spatial != kNoChangeSpatial &&
+ down_action_history_[0].spatial == kOneQuarterSpatialUniform &&
+ framerate_level_ != kFrameRateLow) {
+ action_.spatial = kNoChangeSpatial;
+ action_.temporal = kTwoThirdsTemporal;
+ }
+ // Never use temporal action if number of temporal layers is above 2.
+ if (num_layers_ > 2) {
+ if (action_.temporal != kNoChangeTemporal) {
+ action_.spatial = kOneHalfSpatialUniform;
+ }
+ action_.temporal = kNoChangeTemporal;
+ }
+ // If spatial action was selected, we need to make sure the frame sizes
+ // are multiples of two. Otherwise switch to 2/3 temporal.
+ if (action_.spatial != kNoChangeSpatial &&
+ !EvenFrameSize()) {
+ action_.spatial = kNoChangeSpatial;
+ // Only one action (spatial or temporal) is allowed at a given time, so need
+ // to check whether temporal action is currently selected.
+ action_.temporal = kTwoThirdsTemporal;
+ }
+}
+
+void VCMQmResolution::ConvertSpatialFractionalToWhole() {
+ // If 3/4 spatial is selected, check if there has been another 3/4,
+ // and if so, combine them into 1/2. 1/2 scaling is more efficient than 9/16.
+ // Note we define 3/4x3/4 spatial as kOneHalfSpatialUniform.
+ if (action_.spatial == kOneHalfSpatialUniform) {
+ bool found = false;
+ int isel = kDownActionHistorySize;
+ for (int i = 0; i < kDownActionHistorySize; ++i) {
+ if (down_action_history_[i].spatial == kOneHalfSpatialUniform) {
+ isel = i;
+ found = true;
+ break;
+ }
+ }
+ if (found) {
+ action_.spatial = kOneQuarterSpatialUniform;
+ state_dec_factor_spatial_ = state_dec_factor_spatial_ /
+ (kFactorWidthSpatial[kOneHalfSpatialUniform] *
+ kFactorHeightSpatial[kOneHalfSpatialUniform]);
+ // Check if switching to 1/2x1/2 (=1/4) spatial is allowed.
+ ConstrainAmountOfDownSampling();
+ if (action_.spatial == kNoChangeSpatial) {
+ // Not allowed. Go back to 3/4x3/4 spatial.
+ action_.spatial = kOneHalfSpatialUniform;
+ state_dec_factor_spatial_ = state_dec_factor_spatial_ *
+ kFactorWidthSpatial[kOneHalfSpatialUniform] *
+ kFactorHeightSpatial[kOneHalfSpatialUniform];
+ } else {
+ // Switching is allowed. Remove 3/4x3/4 from the history, and update
+ // the frame size.
+ for (int i = isel; i < kDownActionHistorySize - 1; ++i) {
+ down_action_history_[i].spatial =
+ down_action_history_[i + 1].spatial;
+ }
+ width_ = width_ * kFactorWidthSpatial[kOneHalfSpatialUniform];
+ height_ = height_ * kFactorHeightSpatial[kOneHalfSpatialUniform];
+ }
+ }
+ }
+}
+
+// Returns false if the new frame sizes, under the current spatial action,
+// are not multiples of two.
+bool VCMQmResolution::EvenFrameSize() {
+ if (action_.spatial == kOneHalfSpatialUniform) {
+ if ((width_ * 3 / 4) % 2 != 0 || (height_ * 3 / 4) % 2 != 0) {
+ return false;
+ }
+ } else if (action_.spatial == kOneQuarterSpatialUniform) {
+ if ((width_ * 1 / 2) % 2 != 0 || (height_ * 1 / 2) % 2 != 0) {
+ return false;
+ }
+ }
+ return true;
+}
+
+void VCMQmResolution::InsertLatestDownAction() {
+ if (action_.spatial != kNoChangeSpatial) {
+ for (int i = kDownActionHistorySize - 1; i > 0; --i) {
+ down_action_history_[i].spatial = down_action_history_[i - 1].spatial;
+ }
+ down_action_history_[0].spatial = action_.spatial;
+ }
+ if (action_.temporal != kNoChangeTemporal) {
+ for (int i = kDownActionHistorySize - 1; i > 0; --i) {
+ down_action_history_[i].temporal = down_action_history_[i - 1].temporal;
+ }
+ down_action_history_[0].temporal = action_.temporal;
+ }
+}
+
+void VCMQmResolution::RemoveLastDownAction() {
+ if (action_.spatial != kNoChangeSpatial) {
+ // If the last spatial action was 1/2x1/2 we replace it with 3/4x3/4.
+ if (action_.spatial == kOneQuarterSpatialUniform) {
+ down_action_history_[0].spatial = kOneHalfSpatialUniform;
+ } else {
+ for (int i = 0; i < kDownActionHistorySize - 1; ++i) {
+ down_action_history_[i].spatial = down_action_history_[i + 1].spatial;
+ }
+ down_action_history_[kDownActionHistorySize - 1].spatial =
+ kNoChangeSpatial;
+ }
+ }
+ if (action_.temporal != kNoChangeTemporal) {
+ for (int i = 0; i < kDownActionHistorySize - 1; ++i) {
+ down_action_history_[i].temporal = down_action_history_[i + 1].temporal;
+ }
+ down_action_history_[kDownActionHistorySize - 1].temporal =
+ kNoChangeTemporal;
+ }
+}
+
+void VCMQmResolution::ConstrainAmountOfDownSampling() {
+ // Sanity checks on down-sampling selection:
+ // override the settings for too small image size and/or frame rate.
+ // Also check the limit on current down-sampling states.
+
+ float spatial_width_fact = kFactorWidthSpatial[action_.spatial];
+ float spatial_height_fact = kFactorHeightSpatial[action_.spatial];
+ float temporal_fact = kFactorTemporal[action_.temporal];
+ float new_dec_factor_spatial = state_dec_factor_spatial_ *
+ spatial_width_fact * spatial_height_fact;
+ float new_dec_factor_temp = state_dec_factor_temporal_ * temporal_fact;
+
+ // No spatial sampling if current frame size is too small, or if the
+ // amount of spatial down-sampling is above maximum spatial down-action.
+ if ((width_ * height_) <= kMinImageSize ||
+ new_dec_factor_spatial > kMaxSpatialDown) {
+ action_.spatial = kNoChangeSpatial;
+ new_dec_factor_spatial = state_dec_factor_spatial_;
+ }
+ // No frame rate reduction if average frame rate is below some point, or if
+ // the amount of temporal down-sampling is above maximum temporal down-action.
+ if (avg_incoming_framerate_ <= kMinFrameRate ||
+ new_dec_factor_temp > kMaxTempDown) {
+ action_.temporal = kNoChangeTemporal;
+ new_dec_factor_temp = state_dec_factor_temporal_;
+ }
+ // Check if the total (spatial-temporal) down-action is above maximum allowed,
+ // if so, disallow the current selected down-action.
+ if (new_dec_factor_spatial * new_dec_factor_temp > kMaxTotalDown) {
+ if (action_.spatial != kNoChangeSpatial) {
+ action_.spatial = kNoChangeSpatial;
+ } else if (action_.temporal != kNoChangeTemporal) {
+ action_.temporal = kNoChangeTemporal;
+ } else {
+ // We only allow for one action (spatial or temporal) at a given time, so
+ // either spatial or temporal action is selected when this function is
+ // called. If the selected action is disallowed from one of the above
+ // 2 prior conditions (on spatial & temporal max down-action), then this
+ // condition "total down-action > |kMaxTotalDown|" would not be entered.
+ assert(false);
+ }
+ }
+}
+
+void VCMQmResolution::PickSpatialOrTemporal() {
+ // Pick the one that has had the most down-sampling thus far.
+ if (state_dec_factor_spatial_ > state_dec_factor_temporal_) {
+ action_.spatial = down_action_history_[0].spatial;
+ action_.temporal = kNoChangeTemporal;
+ } else {
+ action_.spatial = kNoChangeSpatial;
+ action_.temporal = down_action_history_[0].temporal;
+ }
+}
+
+// TODO(marpan): Update when we allow for directional spatial down-sampling.
+void VCMQmResolution::SelectSpatialDirectionMode(float transition_rate) {
+ // Default is 4/3x4/3
+ // For bit rates well below transitional rate, we select 2x2.
+ if (avg_target_rate_ < transition_rate * kRateRedSpatial2X2) {
+ qm_->spatial_width_fact = 2.0f;
+ qm_->spatial_height_fact = 2.0f;
+ }
+ // Otherwise check prediction errors and aspect ratio.
+ float spatial_err = 0.0f;
+ float spatial_err_h = 0.0f;
+ float spatial_err_v = 0.0f;
+ if (content_metrics_) {
+ spatial_err = content_metrics_->spatial_pred_err;
+ spatial_err_h = content_metrics_->spatial_pred_err_h;
+ spatial_err_v = content_metrics_->spatial_pred_err_v;
+ }
+
+ // Favor 1x2 if aspect_ratio is 16:9.
+ if (aspect_ratio_ >= 16.0f / 9.0f) {
+ // Check if 1x2 has lowest prediction error.
+ if (spatial_err_h < spatial_err && spatial_err_h < spatial_err_v) {
+ qm_->spatial_width_fact = 2.0f;
+ qm_->spatial_height_fact = 1.0f;
+ }
+ }
+ // Check for 4/3x4/3 selection: favor 2x2 over 1x2 and 2x1.
+ if (spatial_err < spatial_err_h * (1.0f + kSpatialErr2x2VsHoriz) &&
+ spatial_err < spatial_err_v * (1.0f + kSpatialErr2X2VsVert)) {
+ qm_->spatial_width_fact = 4.0f / 3.0f;
+ qm_->spatial_height_fact = 4.0f / 3.0f;
+ }
+ // Check for 2x1 selection.
+ if (spatial_err_v < spatial_err_h * (1.0f - kSpatialErrVertVsHoriz) &&
+ spatial_err_v < spatial_err * (1.0f - kSpatialErr2X2VsVert)) {
+ qm_->spatial_width_fact = 1.0f;
+ qm_->spatial_height_fact = 2.0f;
+ }
+}
+
+// ROBUSTNESS CLASS
+
+VCMQmRobustness::VCMQmRobustness() {
+ Reset();
+}
+
+VCMQmRobustness::~VCMQmRobustness() {
+}
+
+void VCMQmRobustness::Reset() {
+ prev_total_rate_ = 0.0f;
+ prev_rtt_time_ = 0;
+ prev_packet_loss_ = 0;
+ prev_code_rate_delta_ = 0;
+ ResetQM();
+}
+
+// Adjust the FEC rate based on the content and the network state
+// (packet loss rate, total rate/bandwidth, round trip time).
+// Note that packetLoss here is the filtered loss value.
+float VCMQmRobustness::AdjustFecFactor(uint8_t code_rate_delta,
+ float total_rate,
+ float framerate,
+ int64_t rtt_time,
+ uint8_t packet_loss) {
+ // Default: no adjustment
+ float adjust_fec = 1.0f;
+ if (content_metrics_ == NULL) {
+ return adjust_fec;
+ }
+ // Compute class state of the content.
+ ComputeMotionNFD();
+ ComputeSpatial();
+
+ // TODO(marpan): Set FEC adjustment factor.
+
+ // Keep track of previous values of network state:
+ // adjustment may be also based on pattern of changes in network state.
+ prev_total_rate_ = total_rate;
+ prev_rtt_time_ = rtt_time;
+ prev_packet_loss_ = packet_loss;
+ prev_code_rate_delta_ = code_rate_delta;
+ return adjust_fec;
+}
+
+// Set the UEP (unequal-protection across packets) on/off for the FEC.
+bool VCMQmRobustness::SetUepProtection(uint8_t code_rate_delta,
+ float total_rate,
+ uint8_t packet_loss,
+ bool frame_type) {
+ // Default.
+ return false;
+}
+} // namespace
diff --git a/webrtc/modules/video_coding/main/source/qm_select.h b/webrtc/modules/video_coding/main/source/qm_select.h
new file mode 100644
index 0000000000..079e7f8879
--- /dev/null
+++ b/webrtc/modules/video_coding/main/source/qm_select.h
@@ -0,0 +1,373 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef WEBRTC_MODULES_VIDEO_CODING_QM_SELECT_H_
+#define WEBRTC_MODULES_VIDEO_CODING_QM_SELECT_H_
+
+#include "webrtc/common_types.h"
+#include "webrtc/typedefs.h"
+
+/******************************************************/
+/* Quality Modes: Resolution and Robustness settings */
+/******************************************************/
+
+namespace webrtc {
+struct VideoContentMetrics;
+
+struct VCMResolutionScale {
+ VCMResolutionScale()
+ : codec_width(640),
+ codec_height(480),
+ frame_rate(30.0f),
+ spatial_width_fact(1.0f),
+ spatial_height_fact(1.0f),
+ temporal_fact(1.0f),
+ change_resolution_spatial(false),
+ change_resolution_temporal(false) {
+ }
+ uint16_t codec_width;
+ uint16_t codec_height;
+ float frame_rate;
+ float spatial_width_fact;
+ float spatial_height_fact;
+ float temporal_fact;
+ bool change_resolution_spatial;
+ bool change_resolution_temporal;
+};
+
+enum ImageType {
+ kQCIF = 0, // 176x144
+ kHCIF, // 264x216 = half(~3/4x3/4) CIF.
+ kQVGA, // 320x240 = quarter VGA.
+ kCIF, // 352x288
+ kHVGA, // 480x360 = half(~3/4x3/4) VGA.
+ kVGA, // 640x480
+ kQFULLHD, // 960x540 = quarter FULLHD, and half(~3/4x3/4) WHD.
+ kWHD, // 1280x720
+ kFULLHD, // 1920x1080
+ kNumImageTypes
+};
+
+const uint32_t kSizeOfImageType[kNumImageTypes] =
+{ 25344, 57024, 76800, 101376, 172800, 307200, 518400, 921600, 2073600 };
+
+enum FrameRateLevelClass {
+ kFrameRateLow,
+ kFrameRateMiddle1,
+ kFrameRateMiddle2,
+ kFrameRateHigh
+};
+
+enum ContentLevelClass {
+ kLow,
+ kHigh,
+ kDefault
+};
+
+struct VCMContFeature {
+ VCMContFeature()
+ : value(0.0f),
+ level(kDefault) {
+ }
+ void Reset() {
+ value = 0.0f;
+ level = kDefault;
+ }
+ float value;
+ ContentLevelClass level;
+};
+
+enum UpDownAction {
+ kUpResolution,
+ kDownResolution
+};
+
+enum SpatialAction {
+ kNoChangeSpatial,
+ kOneHalfSpatialUniform, // 3/4 x 3/4: 9/6 ~1/2 pixel reduction.
+ kOneQuarterSpatialUniform, // 1/2 x 1/2: 1/4 pixel reduction.
+ kNumModesSpatial
+};
+
+enum TemporalAction {
+ kNoChangeTemporal,
+ kTwoThirdsTemporal, // 2/3 frame rate reduction
+ kOneHalfTemporal, // 1/2 frame rate reduction
+ kNumModesTemporal
+};
+
+struct ResolutionAction {
+ ResolutionAction()
+ : spatial(kNoChangeSpatial),
+ temporal(kNoChangeTemporal) {
+ }
+ SpatialAction spatial;
+ TemporalAction temporal;
+};
+
+// Down-sampling factors for spatial (width and height), and temporal.
+const float kFactorWidthSpatial[kNumModesSpatial] =
+ { 1.0f, 4.0f / 3.0f, 2.0f };
+
+const float kFactorHeightSpatial[kNumModesSpatial] =
+ { 1.0f, 4.0f / 3.0f, 2.0f };
+
+const float kFactorTemporal[kNumModesTemporal] =
+ { 1.0f, 1.5f, 2.0f };
+
+enum EncoderState {
+ kStableEncoding, // Low rate mis-match, stable buffer levels.
+ kStressedEncoding, // Significant over-shooting of target rate,
+ // Buffer under-flow, etc.
+ kEasyEncoding // Significant under-shooting of target rate.
+};
+
+// QmMethod class: main class for resolution and robustness settings
+
+class VCMQmMethod {
+ public:
+ VCMQmMethod();
+ virtual ~VCMQmMethod();
+
+ // Reset values
+ void ResetQM();
+ virtual void Reset() = 0;
+
+ // Compute content class.
+ uint8_t ComputeContentClass();
+
+ // Update with the content metrics.
+ void UpdateContent(const VideoContentMetrics* content_metrics);
+
+ // Compute spatial texture magnitude and level.
+ // Spatial texture is a spatial prediction error measure.
+ void ComputeSpatial();
+
+ // Compute motion magnitude and level for NFD metric.
+ // NFD is normalized frame difference (normalized by spatial variance).
+ void ComputeMotionNFD();
+
+ // Get the imageType (CIF, VGA, HD, etc) for the system width/height.
+ ImageType GetImageType(uint16_t width, uint16_t height);
+
+ // Return the closest image type.
+ ImageType FindClosestImageType(uint16_t width, uint16_t height);
+
+ // Get the frame rate level.
+ FrameRateLevelClass FrameRateLevel(float frame_rate);
+
+ protected:
+ // Content Data.
+ const VideoContentMetrics* content_metrics_;
+
+ // Encoder frame sizes and native frame sizes.
+ uint16_t width_;
+ uint16_t height_;
+ float user_frame_rate_;
+ uint16_t native_width_;
+ uint16_t native_height_;
+ float native_frame_rate_;
+ float aspect_ratio_;
+ // Image type and frame rate leve, for the current encoder resolution.
+ ImageType image_type_;
+ FrameRateLevelClass framerate_level_;
+ // Content class data.
+ VCMContFeature motion_;
+ VCMContFeature spatial_;
+ uint8_t content_class_;
+ bool init_;
+};
+
+// Resolution settings class
+
+class VCMQmResolution : public VCMQmMethod {
+ public:
+ VCMQmResolution();
+ virtual ~VCMQmResolution();
+
+ // Reset all quantities.
+ virtual void Reset();
+
+ // Reset rate quantities and counters after every SelectResolution() call.
+ void ResetRates();
+
+ // Reset down-sampling state.
+ void ResetDownSamplingState();
+
+ // Get the encoder state.
+ EncoderState GetEncoderState();
+
+ // Initialize after SetEncodingData in media_opt.
+ int Initialize(float bitrate,
+ float user_framerate,
+ uint16_t width,
+ uint16_t height,
+ int num_layers);
+
+ // Update the encoder frame size.
+ void UpdateCodecParameters(float frame_rate, uint16_t width, uint16_t height);
+
+ // Update with actual bit rate (size of the latest encoded frame)
+ // and frame type, after every encoded frame.
+ void UpdateEncodedSize(size_t encoded_size);
+
+ // Update with new target bitrate, actual encoder sent rate, frame_rate,
+ // loss rate: every ~1 sec from SetTargetRates in media_opt.
+ void UpdateRates(float target_bitrate,
+ float encoder_sent_rate,
+ float incoming_framerate,
+ uint8_t packet_loss);
+
+ // Extract ST (spatio-temporal) resolution action.
+ // Inputs: qm: Reference to the quality modes pointer.
+ // Output: the spatial and/or temporal scale change.
+ int SelectResolution(VCMResolutionScale** qm);
+
+ private:
+ // Set the default resolution action.
+ void SetDefaultAction();
+
+ // Compute rates for the selection of down-sampling action.
+ void ComputeRatesForSelection();
+
+ // Compute the encoder state.
+ void ComputeEncoderState();
+
+ // Return true if the action is to go back up in resolution.
+ bool GoingUpResolution();
+
+ // Return true if the action is to go down in resolution.
+ bool GoingDownResolution();
+
+ // Check the condition for going up in resolution by the scale factors:
+ // |facWidth|, |facHeight|, |facTemp|.
+ // |scaleFac| is a scale factor for the transition rate.
+ bool ConditionForGoingUp(float fac_width,
+ float fac_height,
+ float fac_temp,
+ float scale_fac);
+
+ // Get the bitrate threshold for the resolution action.
+ // The case |facWidth|=|facHeight|=|facTemp|==1 is for down-sampling action.
+ // |scaleFac| is a scale factor for the transition rate.
+ float GetTransitionRate(float fac_width,
+ float fac_height,
+ float fac_temp,
+ float scale_fac);
+
+ // Update the down-sampling state.
+ void UpdateDownsamplingState(UpDownAction up_down);
+
+ // Update the codec frame size and frame rate.
+ void UpdateCodecResolution();
+
+ // Return a state based on average target rate relative transition rate.
+ uint8_t RateClass(float transition_rate);
+
+ // Adjust the action selected from the table.
+ void AdjustAction();
+
+ // Covert 2 stages of 3/4 (=9/16) spatial decimation to 1/2.
+ void ConvertSpatialFractionalToWhole();
+
+ // Returns true if the new frame sizes, under the selected spatial action,
+ // are of even size.
+ bool EvenFrameSize();
+
+ // Insert latest down-sampling action into the history list.
+ void InsertLatestDownAction();
+
+ // Remove the last (first element) down-sampling action from the list.
+ void RemoveLastDownAction();
+
+ // Check constraints on the amount of down-sampling allowed.
+ void ConstrainAmountOfDownSampling();
+
+ // For going up in resolution: pick spatial or temporal action,
+ // if both actions were separately selected.
+ void PickSpatialOrTemporal();
+
+ // Select the directional (1x2 or 2x1) spatial down-sampling action.
+ void SelectSpatialDirectionMode(float transition_rate);
+
+ enum { kDownActionHistorySize = 10};
+
+ VCMResolutionScale* qm_;
+ // Encoder rate control parameters.
+ float target_bitrate_;
+ float incoming_framerate_;
+ float per_frame_bandwidth_;
+ float buffer_level_;
+
+ // Data accumulated every ~1sec from MediaOpt.
+ float sum_target_rate_;
+ float sum_incoming_framerate_;
+ float sum_rate_MM_;
+ float sum_rate_MM_sgn_;
+ float sum_packet_loss_;
+ // Counters.
+ uint32_t frame_cnt_;
+ uint32_t frame_cnt_delta_;
+ uint32_t update_rate_cnt_;
+ uint32_t low_buffer_cnt_;
+
+ // Resolution state parameters.
+ float state_dec_factor_spatial_;
+ float state_dec_factor_temporal_;
+
+ // Quantities used for selection.
+ float avg_target_rate_;
+ float avg_incoming_framerate_;
+ float avg_ratio_buffer_low_;
+ float avg_rate_mismatch_;
+ float avg_rate_mismatch_sgn_;
+ float avg_packet_loss_;
+ EncoderState encoder_state_;
+ ResolutionAction action_;
+ // Short history of the down-sampling actions from the Initialize() state.
+ // This is needed for going up in resolution. Since the total amount of
+ // down-sampling actions are constrained, the length of the list need not be
+ // large: i.e., (4/3) ^{kDownActionHistorySize} <= kMaxDownSample.
+ ResolutionAction down_action_history_[kDownActionHistorySize];
+ int num_layers_;
+};
+
+// Robustness settings class.
+
+class VCMQmRobustness : public VCMQmMethod {
+ public:
+ VCMQmRobustness();
+ ~VCMQmRobustness();
+
+ virtual void Reset();
+
+ // Adjust FEC rate based on content: every ~1 sec from SetTargetRates.
+ // Returns an adjustment factor.
+ float AdjustFecFactor(uint8_t code_rate_delta,
+ float total_rate,
+ float framerate,
+ int64_t rtt_time,
+ uint8_t packet_loss);
+
+ // Set the UEP protection on/off.
+ bool SetUepProtection(uint8_t code_rate_delta,
+ float total_rate,
+ uint8_t packet_loss,
+ bool frame_type);
+
+ private:
+ // Previous state of network parameters.
+ float prev_total_rate_;
+ int64_t prev_rtt_time_;
+ uint8_t prev_packet_loss_;
+ uint8_t prev_code_rate_delta_;
+};
+} // namespace webrtc
+#endif // WEBRTC_MODULES_VIDEO_CODING_QM_SELECT_H_
diff --git a/webrtc/modules/video_coding/main/source/qm_select_data.h b/webrtc/modules/video_coding/main/source/qm_select_data.h
new file mode 100644
index 0000000000..dc6bce4811
--- /dev/null
+++ b/webrtc/modules/video_coding/main/source/qm_select_data.h
@@ -0,0 +1,227 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef WEBRTC_MODULES_VIDEO_CODING_SOURCE_QM_SELECT_DATA_H_
+#define WEBRTC_MODULES_VIDEO_CODING_SOURCE_QM_SELECT_DATA_H_
+
+/***************************************************************
+*QMSelectData.h
+* This file includes parameters for content-aware media optimization
+****************************************************************/
+
+#include "webrtc/typedefs.h"
+
+namespace webrtc {
+//
+// PARAMETERS FOR RESOLUTION ADAPTATION
+//
+
+// Initial level of buffer in secs.
+const float kInitBufferLevel = 0.5f;
+
+// Threshold of (max) buffer size below which we consider too low (underflow).
+const float kPercBufferThr = 0.10f;
+
+// Threshold on the occurrences of low buffer levels.
+const float kMaxBufferLow = 0.30f;
+
+// Threshold on rate mismatch.
+const float kMaxRateMisMatch = 0.5f;
+
+// Threshold on amount of under/over encoder shooting.
+const float kRateOverShoot = 0.75f;
+const float kRateUnderShoot = 0.75f;
+
+// Factor to favor weighting the average rates with the current/last data.
+const float kWeightRate = 0.70f;
+
+// Factor for transitional rate for going back up in resolution.
+const float kTransRateScaleUpSpatial = 1.25f;
+const float kTransRateScaleUpTemp = 1.25f;
+const float kTransRateScaleUpSpatialTemp = 1.25f;
+
+// Threshold on packet loss rate, above which favor resolution reduction.
+const float kPacketLossThr = 0.1f;
+
+// Factor for reducing transitional bitrate under packet loss.
+const float kPacketLossRateFac = 1.0f;
+
+// Maximum possible transitional rate for down-sampling:
+// (units in kbps), for 30fps.
+const uint16_t kMaxRateQm[9] = {
+ 0, // QCIF
+ 50, // kHCIF
+ 125, // kQVGA
+ 200, // CIF
+ 280, // HVGA
+ 400, // VGA
+ 700, // QFULLHD
+ 1000, // WHD
+ 1500 // FULLHD
+};
+
+// Frame rate scale for maximum transition rate.
+const float kFrameRateFac[4] = {
+ 0.5f, // Low
+ 0.7f, // Middle level 1
+ 0.85f, // Middle level 2
+ 1.0f, // High
+};
+
+// Scale for transitional rate: based on content class
+// motion=L/H/D,spatial==L/H/D: for low, high, middle levels
+const float kScaleTransRateQm[18] = {
+ // VGA and lower
+ 0.40f, // L, L
+ 0.50f, // L, H
+ 0.40f, // L, D
+ 0.60f, // H ,L
+ 0.60f, // H, H
+ 0.60f, // H, D
+ 0.50f, // D, L
+ 0.50f, // D, D
+ 0.50f, // D, H
+
+ // over VGA
+ 0.40f, // L, L
+ 0.50f, // L, H
+ 0.40f, // L, D
+ 0.60f, // H ,L
+ 0.60f, // H, H
+ 0.60f, // H, D
+ 0.50f, // D, L
+ 0.50f, // D, D
+ 0.50f, // D, H
+};
+
+// Threshold on the target rate relative to transitional rate.
+const float kFacLowRate = 0.5f;
+
+// Action for down-sampling:
+// motion=L/H/D,spatial==L/H/D, for low, high, middle levels;
+// rate = 0/1/2, for target rate state relative to transition rate.
+const uint8_t kSpatialAction[27] = {
+// rateClass = 0:
+ 1, // L, L
+ 1, // L, H
+ 1, // L, D
+ 4, // H ,L
+ 1, // H, H
+ 4, // H, D
+ 4, // D, L
+ 1, // D, H
+ 2, // D, D
+
+// rateClass = 1:
+ 1, // L, L
+ 1, // L, H
+ 1, // L, D
+ 2, // H ,L
+ 1, // H, H
+ 2, // H, D
+ 2, // D, L
+ 1, // D, H
+ 2, // D, D
+
+// rateClass = 2:
+ 1, // L, L
+ 1, // L, H
+ 1, // L, D
+ 2, // H ,L
+ 1, // H, H
+ 2, // H, D
+ 2, // D, L
+ 1, // D, H
+ 2, // D, D
+};
+
+const uint8_t kTemporalAction[27] = {
+// rateClass = 0:
+ 3, // L, L
+ 2, // L, H
+ 2, // L, D
+ 1, // H ,L
+ 3, // H, H
+ 1, // H, D
+ 1, // D, L
+ 2, // D, H
+ 1, // D, D
+
+// rateClass = 1:
+ 3, // L, L
+ 3, // L, H
+ 3, // L, D
+ 1, // H ,L
+ 3, // H, H
+ 1, // H, D
+ 1, // D, L
+ 3, // D, H
+ 1, // D, D
+
+// rateClass = 2:
+ 1, // L, L
+ 3, // L, H
+ 3, // L, D
+ 1, // H ,L
+ 3, // H, H
+ 1, // H, D
+ 1, // D, L
+ 3, // D, H
+ 1, // D, D
+};
+
+// Control the total amount of down-sampling allowed.
+const float kMaxSpatialDown = 8.0f;
+const float kMaxTempDown = 3.0f;
+const float kMaxTotalDown = 9.0f;
+
+// Minimum image size for a spatial down-sampling.
+const int kMinImageSize = 176 * 144;
+
+// Minimum frame rate for temporal down-sampling:
+// no frame rate reduction if incomingFrameRate <= MIN_FRAME_RATE.
+const int kMinFrameRate = 8;
+
+//
+// PARAMETERS FOR FEC ADJUSTMENT: TODO (marpan)
+//
+
+//
+// PARAMETETS FOR SETTING LOW/HIGH STATES OF CONTENT METRICS:
+//
+
+// Thresholds for frame rate:
+const int kLowFrameRate = 10;
+const int kMiddleFrameRate = 15;
+const int kHighFrameRate = 25;
+
+// Thresholds for motion: motion level is from NFD.
+const float kHighMotionNfd = 0.075f;
+const float kLowMotionNfd = 0.03f;
+
+// Thresholds for spatial prediction error:
+// this is applied on the average of (2x2,1x2,2x1).
+const float kHighTexture = 0.035f;
+const float kLowTexture = 0.020f;
+
+// Used to reduce thresholds for larger/HD scenes: correction factor since
+// higher correlation in HD scenes means lower spatial prediction error.
+const float kScaleTexture = 0.9f;
+
+// Percentage reduction in transitional bitrate for 2x2 selected over 1x2/2x1.
+const float kRateRedSpatial2X2 = 0.6f;
+
+const float kSpatialErr2x2VsHoriz = 0.1f; // percentage to favor 2x2 over H
+const float kSpatialErr2X2VsVert = 0.1f; // percentage to favor 2x2 over V
+const float kSpatialErrVertVsHoriz = 0.1f; // percentage to favor H over V
+
+} // namespace webrtc
+
+#endif // WEBRTC_MODULES_VIDEO_CODING_SOURCE_QM_SELECT_DATA_H_
diff --git a/webrtc/modules/video_coding/main/source/qm_select_unittest.cc b/webrtc/modules/video_coding/main/source/qm_select_unittest.cc
new file mode 100644
index 0000000000..6abc0d3099
--- /dev/null
+++ b/webrtc/modules/video_coding/main/source/qm_select_unittest.cc
@@ -0,0 +1,1311 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+/*
+ * This file includes unit tests the QmResolution class
+ * In particular, for the selection of spatial and/or temporal down-sampling.
+ */
+
+#include "testing/gtest/include/gtest/gtest.h"
+
+#include "webrtc/modules/interface/module_common_types.h"
+#include "webrtc/modules/video_coding/main/source/qm_select.h"
+
+namespace webrtc {
+
+// Representative values of content metrics for: low/high/medium(default) state,
+// based on parameters settings in qm_select_data.h.
+const float kSpatialLow = 0.01f;
+const float kSpatialMedium = 0.03f;
+const float kSpatialHigh = 0.1f;
+const float kTemporalLow = 0.01f;
+const float kTemporalMedium = 0.06f;
+const float kTemporalHigh = 0.1f;
+
+class QmSelectTest : public ::testing::Test {
+ protected:
+ QmSelectTest()
+ : qm_resolution_(new VCMQmResolution()),
+ content_metrics_(new VideoContentMetrics()),
+ qm_scale_(NULL) {
+ }
+ VCMQmResolution* qm_resolution_;
+ VideoContentMetrics* content_metrics_;
+ VCMResolutionScale* qm_scale_;
+
+ void InitQmNativeData(float initial_bit_rate,
+ int user_frame_rate,
+ int native_width,
+ int native_height,
+ int num_layers);
+
+ void UpdateQmEncodedFrame(size_t* encoded_size, size_t num_updates);
+
+ void UpdateQmRateData(int* target_rate,
+ int* encoder_sent_rate,
+ int* incoming_frame_rate,
+ uint8_t* fraction_lost,
+ int num_updates);
+
+ void UpdateQmContentData(float motion_metric,
+ float spatial_metric,
+ float spatial_metric_horiz,
+ float spatial_metric_vert);
+
+ bool IsSelectedActionCorrect(VCMResolutionScale* qm_scale,
+ float fac_width,
+ float fac_height,
+ float fac_temp,
+ uint16_t new_width,
+ uint16_t new_height,
+ float new_frame_rate);
+
+ void TearDown() {
+ delete qm_resolution_;
+ delete content_metrics_;
+ }
+};
+
+TEST_F(QmSelectTest, HandleInputs) {
+ // Expect parameter error. Initialize with invalid inputs.
+ EXPECT_EQ(-4, qm_resolution_->Initialize(1000, 0, 640, 480, 1));
+ EXPECT_EQ(-4, qm_resolution_->Initialize(1000, 30, 640, 0, 1));
+ EXPECT_EQ(-4, qm_resolution_->Initialize(1000, 30, 0, 480, 1));
+
+ // Expect uninitialized error.: No valid initialization before selection.
+ EXPECT_EQ(-7, qm_resolution_->SelectResolution(&qm_scale_));
+
+ VideoContentMetrics* content_metrics = NULL;
+ EXPECT_EQ(0, qm_resolution_->Initialize(1000, 30, 640, 480, 1));
+ qm_resolution_->UpdateContent(content_metrics);
+ // Content metrics are NULL: Expect success and no down-sampling action.
+ EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_));
+ EXPECT_TRUE(IsSelectedActionCorrect(qm_scale_, 1.0, 1.0, 1.0, 640, 480,
+ 30.0f));
+}
+
+// TODO(marpan): Add a test for number of temporal layers > 1.
+
+// No down-sampling action at high rates.
+TEST_F(QmSelectTest, NoActionHighRate) {
+ // Initialize with bitrate, frame rate, native system width/height, and
+ // number of temporal layers.
+ InitQmNativeData(800, 30, 640, 480, 1);
+
+ // Update with encoder frame size.
+ uint16_t codec_width = 640;
+ uint16_t codec_height = 480;
+ qm_resolution_->UpdateCodecParameters(30.0f, codec_width, codec_height);
+ EXPECT_EQ(5, qm_resolution_->GetImageType(codec_width, codec_height));
+
+ // Update rates for a sequence of intervals.
+ int target_rate[] = {800, 800, 800};
+ int encoder_sent_rate[] = {800, 800, 800};
+ int incoming_frame_rate[] = {30, 30, 30};
+ uint8_t fraction_lost[] = {10, 10, 10};
+ UpdateQmRateData(target_rate, encoder_sent_rate, incoming_frame_rate,
+ fraction_lost, 3);
+
+ // Update content: motion level, and 3 spatial prediction errors.
+ UpdateQmContentData(kTemporalLow, kSpatialLow, kSpatialLow, kSpatialLow);
+ EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_));
+ EXPECT_EQ(0, qm_resolution_->ComputeContentClass());
+ EXPECT_EQ(kStableEncoding, qm_resolution_->GetEncoderState());
+ EXPECT_TRUE(IsSelectedActionCorrect(qm_scale_, 1.0f, 1.0f, 1.0f, 640, 480,
+ 30.0f));
+}
+
+// Rate is well below transition, down-sampling action is taken,
+// depending on the content state.
+TEST_F(QmSelectTest, DownActionLowRate) {
+ // Initialize with bitrate, frame rate, native system width/height, and
+ // number of temporal layers.
+ InitQmNativeData(50, 30, 640, 480, 1);
+
+ // Update with encoder frame size.
+ uint16_t codec_width = 640;
+ uint16_t codec_height = 480;
+ qm_resolution_->UpdateCodecParameters(30.0f, codec_width, codec_height);
+ EXPECT_EQ(5, qm_resolution_->GetImageType(codec_width, codec_height));
+
+ // Update rates for a sequence of intervals.
+ int target_rate[] = {50, 50, 50};
+ int encoder_sent_rate[] = {50, 50, 50};
+ int incoming_frame_rate[] = {30, 30, 30};
+ uint8_t fraction_lost[] = {10, 10, 10};
+ UpdateQmRateData(target_rate, encoder_sent_rate, incoming_frame_rate,
+ fraction_lost, 3);
+
+ // Update content: motion level, and 3 spatial prediction errors.
+ // High motion, low spatial: 2x2 spatial expected.
+ UpdateQmContentData(kTemporalHigh, kSpatialLow, kSpatialLow, kSpatialLow);
+ EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_));
+ EXPECT_EQ(3, qm_resolution_->ComputeContentClass());
+ EXPECT_EQ(kStableEncoding, qm_resolution_->GetEncoderState());
+ EXPECT_TRUE(IsSelectedActionCorrect(qm_scale_, 2.0f, 2.0f, 1.0f, 320, 240,
+ 30.0f));
+
+ qm_resolution_->ResetDownSamplingState();
+ // Low motion, low spatial: 2/3 temporal is expected.
+ UpdateQmContentData(kTemporalLow, kSpatialLow, kSpatialLow, kSpatialLow);
+ EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_));
+ EXPECT_EQ(0, qm_resolution_->ComputeContentClass());
+ EXPECT_TRUE(IsSelectedActionCorrect(qm_scale_, 1.0f, 1.0f, 1.5f, 640, 480,
+ 20.5f));
+
+ qm_resolution_->ResetDownSamplingState();
+ // Medium motion, low spatial: 2x2 spatial expected.
+ UpdateQmContentData(kTemporalMedium, kSpatialLow, kSpatialLow, kSpatialLow);
+ EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_));
+ EXPECT_EQ(6, qm_resolution_->ComputeContentClass());
+ EXPECT_TRUE(IsSelectedActionCorrect(qm_scale_, 2.0f, 2.0f, 1.0f, 320, 240,
+ 30.0f));
+
+ qm_resolution_->ResetDownSamplingState();
+ // High motion, high spatial: 2/3 temporal expected.
+ UpdateQmContentData(kTemporalHigh, kSpatialHigh, kSpatialHigh, kSpatialHigh);
+ EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_));
+ EXPECT_EQ(4, qm_resolution_->ComputeContentClass());
+ EXPECT_TRUE(IsSelectedActionCorrect(qm_scale_, 1.0f, 1.0f, 1.5f, 640, 480,
+ 20.5f));
+
+ qm_resolution_->ResetDownSamplingState();
+ // Low motion, high spatial: 1/2 temporal expected.
+ UpdateQmContentData(kTemporalLow, kSpatialHigh, kSpatialHigh, kSpatialHigh);
+ EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_));
+ EXPECT_EQ(1, qm_resolution_->ComputeContentClass());
+ EXPECT_TRUE(IsSelectedActionCorrect(qm_scale_, 1.0f, 1.0f, 2.0f, 640, 480,
+ 15.5f));
+
+ qm_resolution_->ResetDownSamplingState();
+ // Medium motion, high spatial: 1/2 temporal expected.
+ UpdateQmContentData(kTemporalMedium, kSpatialHigh, kSpatialHigh,
+ kSpatialHigh);
+ EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_));
+ EXPECT_EQ(7, qm_resolution_->ComputeContentClass());
+ EXPECT_TRUE(IsSelectedActionCorrect(qm_scale_, 1.0f, 1.0f, 2.0f, 640, 480,
+ 15.5f));
+
+ qm_resolution_->ResetDownSamplingState();
+ // High motion, medium spatial: 2x2 spatial expected.
+ UpdateQmContentData(kTemporalHigh, kSpatialMedium, kSpatialMedium,
+ kSpatialMedium);
+ EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_));
+ EXPECT_EQ(5, qm_resolution_->ComputeContentClass());
+ // Target frame rate for frame dropper should be the same as previous == 15.
+ EXPECT_TRUE(IsSelectedActionCorrect(qm_scale_, 2.0f, 2.0f, 1.0f, 320, 240,
+ 30.0f));
+
+ qm_resolution_->ResetDownSamplingState();
+ // Low motion, medium spatial: high frame rate, so 1/2 temporal expected.
+ UpdateQmContentData(kTemporalLow, kSpatialMedium, kSpatialMedium,
+ kSpatialMedium);
+ EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_));
+ EXPECT_EQ(2, qm_resolution_->ComputeContentClass());
+ EXPECT_TRUE(IsSelectedActionCorrect(qm_scale_, 1.0f, 1.0f, 2.0f, 640, 480,
+ 15.5f));
+
+ qm_resolution_->ResetDownSamplingState();
+ // Medium motion, medium spatial: high frame rate, so 2/3 temporal expected.
+ UpdateQmContentData(kTemporalMedium, kSpatialMedium, kSpatialMedium,
+ kSpatialMedium);
+ EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_));
+ EXPECT_EQ(8, qm_resolution_->ComputeContentClass());
+ EXPECT_TRUE(IsSelectedActionCorrect(qm_scale_, 1.0f, 1.0f, 1.5f, 640, 480,
+ 20.5f));
+}
+
+// Rate mis-match is high, and we have over-shooting.
+// since target rate is below max for down-sampling, down-sampling is selected.
+TEST_F(QmSelectTest, DownActionHighRateMMOvershoot) {
+ // Initialize with bitrate, frame rate, native system width/height, and
+ // number of temporal layers.
+ InitQmNativeData(300, 30, 640, 480, 1);
+
+ // Update with encoder frame size.
+ uint16_t codec_width = 640;
+ uint16_t codec_height = 480;
+ qm_resolution_->UpdateCodecParameters(30.0f, codec_width, codec_height);
+ EXPECT_EQ(5, qm_resolution_->GetImageType(codec_width, codec_height));
+
+ // Update rates for a sequence of intervals.
+ int target_rate[] = {300, 300, 300};
+ int encoder_sent_rate[] = {900, 900, 900};
+ int incoming_frame_rate[] = {30, 30, 30};
+ uint8_t fraction_lost[] = {10, 10, 10};
+ UpdateQmRateData(target_rate, encoder_sent_rate, incoming_frame_rate,
+ fraction_lost, 3);
+
+ // Update content: motion level, and 3 spatial prediction errors.
+ // High motion, low spatial.
+ UpdateQmContentData(kTemporalHigh, kSpatialLow, kSpatialLow, kSpatialLow);
+ EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_));
+ EXPECT_EQ(3, qm_resolution_->ComputeContentClass());
+ EXPECT_EQ(kStressedEncoding, qm_resolution_->GetEncoderState());
+ EXPECT_TRUE(IsSelectedActionCorrect(qm_scale_, 4.0f / 3.0f, 4.0f / 3.0f,
+ 1.0f, 480, 360, 30.0f));
+
+ qm_resolution_->ResetDownSamplingState();
+ // Low motion, high spatial
+ UpdateQmContentData(kTemporalLow, kSpatialHigh, kSpatialHigh, kSpatialHigh);
+ EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_));
+ EXPECT_EQ(1, qm_resolution_->ComputeContentClass());
+ EXPECT_TRUE(IsSelectedActionCorrect(qm_scale_, 1.0f, 1.0f, 1.5f, 640, 480,
+ 20.5f));
+}
+
+// Rate mis-match is high, target rate is below max for down-sampling,
+// but since we have consistent under-shooting, no down-sampling action.
+TEST_F(QmSelectTest, NoActionHighRateMMUndershoot) {
+ // Initialize with bitrate, frame rate, native system width/height, and
+ // number of temporal layers.
+ InitQmNativeData(300, 30, 640, 480, 1);
+
+ // Update with encoder frame size.
+ uint16_t codec_width = 640;
+ uint16_t codec_height = 480;
+ qm_resolution_->UpdateCodecParameters(30.0f, codec_width, codec_height);
+ EXPECT_EQ(5, qm_resolution_->GetImageType(codec_width, codec_height));
+
+ // Update rates for a sequence of intervals.
+ int target_rate[] = {300, 300, 300};
+ int encoder_sent_rate[] = {100, 100, 100};
+ int incoming_frame_rate[] = {30, 30, 30};
+ uint8_t fraction_lost[] = {10, 10, 10};
+ UpdateQmRateData(target_rate, encoder_sent_rate, incoming_frame_rate,
+ fraction_lost, 3);
+
+ // Update content: motion level, and 3 spatial prediction errors.
+ // High motion, low spatial.
+ UpdateQmContentData(kTemporalHigh, kSpatialLow, kSpatialLow, kSpatialLow);
+ EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_));
+ EXPECT_EQ(3, qm_resolution_->ComputeContentClass());
+ EXPECT_EQ(kEasyEncoding, qm_resolution_->GetEncoderState());
+ EXPECT_TRUE(IsSelectedActionCorrect(qm_scale_, 1.0f, 1.0f, 1.0f, 640, 480,
+ 30.0f));
+
+ qm_resolution_->ResetDownSamplingState();
+ // Low motion, high spatial
+ UpdateQmContentData(kTemporalLow, kSpatialHigh, kSpatialHigh, kSpatialHigh);
+ EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_));
+ EXPECT_EQ(1, qm_resolution_->ComputeContentClass());
+ EXPECT_TRUE(IsSelectedActionCorrect(qm_scale_, 1.0f, 1.0f, 1.0f, 640, 480,
+ 30.0f));
+}
+
+// Buffer is underflowing, and target rate is below max for down-sampling,
+// so action is taken.
+TEST_F(QmSelectTest, DownActionBufferUnderflow) {
+ // Initialize with bitrate, frame rate, native system width/height, and
+ // number of temporal layers.
+ InitQmNativeData(300, 30, 640, 480, 1);
+
+ // Update with encoder frame size.
+ uint16_t codec_width = 640;
+ uint16_t codec_height = 480;
+ qm_resolution_->UpdateCodecParameters(30.0f, codec_width, codec_height);
+ EXPECT_EQ(5, qm_resolution_->GetImageType(codec_width, codec_height));
+
+ // Update with encoded size over a number of frames.
+ // per-frame bandwidth = 15 = 450/30: simulate (decoder) buffer underflow:
+ size_t encoded_size[] = {200, 100, 50, 30, 60, 40, 20, 30, 20, 40};
+ UpdateQmEncodedFrame(encoded_size, GTEST_ARRAY_SIZE_(encoded_size));
+
+ // Update rates for a sequence of intervals.
+ int target_rate[] = {300, 300, 300};
+ int encoder_sent_rate[] = {450, 450, 450};
+ int incoming_frame_rate[] = {30, 30, 30};
+ uint8_t fraction_lost[] = {10, 10, 10};
+ UpdateQmRateData(target_rate, encoder_sent_rate, incoming_frame_rate,
+ fraction_lost, 3);
+
+ // Update content: motion level, and 3 spatial prediction errors.
+ // High motion, low spatial.
+ UpdateQmContentData(kTemporalHigh, kSpatialLow, kSpatialLow, kSpatialLow);
+ EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_));
+ EXPECT_EQ(3, qm_resolution_->ComputeContentClass());
+ EXPECT_EQ(kStressedEncoding, qm_resolution_->GetEncoderState());
+ EXPECT_TRUE(IsSelectedActionCorrect(qm_scale_, 4.0f / 3.0f, 4.0f / 3.0f,
+ 1.0f, 480, 360, 30.0f));
+
+ qm_resolution_->ResetDownSamplingState();
+ // Low motion, high spatial
+ UpdateQmContentData(kTemporalLow, kSpatialHigh, kSpatialHigh, kSpatialHigh);
+ EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_));
+ EXPECT_EQ(1, qm_resolution_->ComputeContentClass());
+ EXPECT_TRUE(IsSelectedActionCorrect(qm_scale_, 1.0f, 1.0f, 1.5f, 640, 480,
+ 20.5f));
+}
+
+// Target rate is below max for down-sampling, but buffer level is stable,
+// so no action is taken.
+TEST_F(QmSelectTest, NoActionBufferStable) {
+ // Initialize with bitrate, frame rate, native system width/height, and
+ // number of temporal layers.
+ InitQmNativeData(350, 30, 640, 480, 1);
+
+ // Update with encoder frame size.
+ uint16_t codec_width = 640;
+ uint16_t codec_height = 480;
+ qm_resolution_->UpdateCodecParameters(30.0f, codec_width, codec_height);
+ EXPECT_EQ(5, qm_resolution_->GetImageType(codec_width, codec_height));
+
+ // Update with encoded size over a number of frames.
+ // per-frame bandwidth = 15 = 450/30: simulate stable (decoder) buffer levels.
+ size_t encoded_size[] = {40, 10, 10, 16, 18, 20, 17, 20, 16, 15};
+ UpdateQmEncodedFrame(encoded_size, GTEST_ARRAY_SIZE_(encoded_size));
+
+ // Update rates for a sequence of intervals.
+ int target_rate[] = {350, 350, 350};
+ int encoder_sent_rate[] = {350, 450, 450};
+ int incoming_frame_rate[] = {30, 30, 30};
+ uint8_t fraction_lost[] = {10, 10, 10};
+ UpdateQmRateData(target_rate, encoder_sent_rate, incoming_frame_rate,
+ fraction_lost, 3);
+
+ // Update content: motion level, and 3 spatial prediction errors.
+ // High motion, low spatial.
+ UpdateQmContentData(kTemporalHigh, kSpatialLow, kSpatialLow, kSpatialLow);
+ EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_));
+ EXPECT_EQ(3, qm_resolution_->ComputeContentClass());
+ EXPECT_EQ(kStableEncoding, qm_resolution_->GetEncoderState());
+ EXPECT_TRUE(IsSelectedActionCorrect(qm_scale_, 1.0f, 1.0f, 1.0f, 640, 480,
+ 30.0f));
+
+ qm_resolution_->ResetDownSamplingState();
+ // Low motion, high spatial
+ UpdateQmContentData(kTemporalLow, kSpatialHigh, kSpatialHigh, kSpatialHigh);
+ EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_));
+ EXPECT_EQ(1, qm_resolution_->ComputeContentClass());
+ EXPECT_TRUE(IsSelectedActionCorrect(qm_scale_, 1.0f, 1.0f, 1.0f, 640, 480,
+ 30.0f));
+}
+
+// Very low rate, but no spatial down-sampling below some size (QCIF).
+TEST_F(QmSelectTest, LimitDownSpatialAction) {
+ // Initialize with bitrate, frame rate, native system width/height, and
+ // number of temporal layers.
+ InitQmNativeData(10, 30, 176, 144, 1);
+
+ // Update with encoder frame size.
+ uint16_t codec_width = 176;
+ uint16_t codec_height = 144;
+ qm_resolution_->UpdateCodecParameters(30.0f, codec_width, codec_height);
+ EXPECT_EQ(0, qm_resolution_->GetImageType(codec_width, codec_height));
+
+ // Update rates for a sequence of intervals.
+ int target_rate[] = {10, 10, 10};
+ int encoder_sent_rate[] = {10, 10, 10};
+ int incoming_frame_rate[] = {30, 30, 30};
+ uint8_t fraction_lost[] = {10, 10, 10};
+ UpdateQmRateData(target_rate, encoder_sent_rate, incoming_frame_rate,
+ fraction_lost, 3);
+
+ // Update content: motion level, and 3 spatial prediction errors.
+ // High motion, low spatial.
+ UpdateQmContentData(kTemporalHigh, kSpatialLow, kSpatialLow, kSpatialLow);
+ EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_));
+ EXPECT_EQ(3, qm_resolution_->ComputeContentClass());
+ EXPECT_EQ(kStableEncoding, qm_resolution_->GetEncoderState());
+ EXPECT_TRUE(IsSelectedActionCorrect(qm_scale_, 1.0f, 1.0f, 1.0f, 176, 144,
+ 30.0f));
+}
+
+// Very low rate, but no frame reduction below some frame_rate (8fps).
+TEST_F(QmSelectTest, LimitDownTemporalAction) {
+ // Initialize with bitrate, frame rate, native system width/height, and
+ // number of temporal layers.
+ InitQmNativeData(10, 8, 640, 480, 1);
+
+ // Update with encoder frame size.
+ uint16_t codec_width = 640;
+ uint16_t codec_height = 480;
+ qm_resolution_->UpdateCodecParameters(8.0f, codec_width, codec_height);
+ EXPECT_EQ(5, qm_resolution_->GetImageType(codec_width, codec_height));
+
+ // Update rates for a sequence of intervals.
+ int target_rate[] = {10, 10, 10};
+ int encoder_sent_rate[] = {10, 10, 10};
+ int incoming_frame_rate[] = {8, 8, 8};
+ uint8_t fraction_lost[] = {10, 10, 10};
+ UpdateQmRateData(target_rate, encoder_sent_rate, incoming_frame_rate,
+ fraction_lost, 3);
+
+ // Update content: motion level, and 3 spatial prediction errors.
+ // Low motion, medium spatial.
+ UpdateQmContentData(kTemporalLow, kSpatialMedium, kSpatialMedium,
+ kSpatialMedium);
+ EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_));
+ EXPECT_EQ(2, qm_resolution_->ComputeContentClass());
+ EXPECT_EQ(kStableEncoding, qm_resolution_->GetEncoderState());
+ EXPECT_TRUE(IsSelectedActionCorrect(qm_scale_, 1.0f, 1.0f, 1.0f, 640, 480,
+ 8.0f));
+}
+
+// Two stages: spatial down-sample and then back up spatially,
+// as rate as increased.
+TEST_F(QmSelectTest, 2StageDownSpatialUpSpatial) {
+ // Initialize with bitrate, frame rate, native system width/height, and
+ // number of temporal layers.
+ InitQmNativeData(50, 30, 640, 480, 1);
+
+ // Update with encoder frame size.
+ uint16_t codec_width = 640;
+ uint16_t codec_height = 480;
+ qm_resolution_->UpdateCodecParameters(30.0f, codec_width, codec_height);
+ EXPECT_EQ(5, qm_resolution_->GetImageType(codec_width, codec_height));
+
+ // Update rates for a sequence of intervals.
+ int target_rate[] = {50, 50, 50};
+ int encoder_sent_rate[] = {50, 50, 50};
+ int incoming_frame_rate[] = {30, 30, 30};
+ uint8_t fraction_lost[] = {10, 10, 10};
+ UpdateQmRateData(target_rate, encoder_sent_rate, incoming_frame_rate,
+ fraction_lost, 3);
+
+ // Update content: motion level, and 3 spatial prediction errors.
+ // High motion, low spatial.
+ UpdateQmContentData(kTemporalHigh, kSpatialLow, kSpatialLow, kSpatialLow);
+ EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_));
+ EXPECT_EQ(3, qm_resolution_->ComputeContentClass());
+ EXPECT_EQ(kStableEncoding, qm_resolution_->GetEncoderState());
+ EXPECT_TRUE(IsSelectedActionCorrect(qm_scale_, 2.0f, 2.0f, 1.0f, 320, 240,
+ 30.0f));
+
+ // Reset and go up in rate: expected to go back up, in 2 stages of 3/4.
+ qm_resolution_->ResetRates();
+ qm_resolution_->UpdateCodecParameters(30.0f, 320, 240);
+ EXPECT_EQ(2, qm_resolution_->GetImageType(320, 240));
+ // Update rates for a sequence of intervals.
+ int target_rate2[] = {400, 400, 400, 400, 400};
+ int encoder_sent_rate2[] = {400, 400, 400, 400, 400};
+ int incoming_frame_rate2[] = {30, 30, 30, 30, 30};
+ uint8_t fraction_lost2[] = {10, 10, 10, 10, 10};
+ UpdateQmRateData(target_rate2, encoder_sent_rate2, incoming_frame_rate2,
+ fraction_lost2, 5);
+ EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_));
+ EXPECT_EQ(kStableEncoding, qm_resolution_->GetEncoderState());
+ float scale = (4.0f / 3.0f) / 2.0f;
+ EXPECT_TRUE(IsSelectedActionCorrect(qm_scale_, scale, scale, 1.0f, 480, 360,
+ 30.0f));
+
+ qm_resolution_->UpdateCodecParameters(30.0f, 480, 360);
+ EXPECT_EQ(4, qm_resolution_->GetImageType(480, 360));
+ EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_));
+ EXPECT_TRUE(IsSelectedActionCorrect(qm_scale_, 3.0f / 4.0f, 3.0f / 4.0f, 1.0f,
+ 640, 480, 30.0f));
+}
+
+// Two stages: spatial down-sample and then back up spatially, since encoder
+// is under-shooting target even though rate has not increased much.
+TEST_F(QmSelectTest, 2StageDownSpatialUpSpatialUndershoot) {
+ // Initialize with bitrate, frame rate, native system width/height, and
+ // number of temporal layers.
+ InitQmNativeData(50, 30, 640, 480, 1);
+
+ // Update with encoder frame size.
+ uint16_t codec_width = 640;
+ uint16_t codec_height = 480;
+ qm_resolution_->UpdateCodecParameters(30.0f, codec_width, codec_height);
+ EXPECT_EQ(5, qm_resolution_->GetImageType(codec_width, codec_height));
+
+ // Update rates for a sequence of intervals.
+ int target_rate[] = {50, 50, 50};
+ int encoder_sent_rate[] = {50, 50, 50};
+ int incoming_frame_rate[] = {30, 30, 30};
+ uint8_t fraction_lost[] = {10, 10, 10};
+ UpdateQmRateData(target_rate, encoder_sent_rate, incoming_frame_rate,
+ fraction_lost, 3);
+
+ // Update content: motion level, and 3 spatial prediction errors.
+ // High motion, low spatial.
+ UpdateQmContentData(kTemporalHigh, kSpatialLow, kSpatialLow, kSpatialLow);
+ EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_));
+ EXPECT_EQ(3, qm_resolution_->ComputeContentClass());
+ EXPECT_EQ(kStableEncoding, qm_resolution_->GetEncoderState());
+ EXPECT_TRUE(IsSelectedActionCorrect(qm_scale_, 2.0f, 2.0f, 1.0f, 320, 240,
+ 30.0f));
+
+ // Reset rates and simulate under-shooting scenario.: expect to go back up.
+ // Goes up spatially in two stages for 1/2x1/2 down-sampling.
+ qm_resolution_->ResetRates();
+ qm_resolution_->UpdateCodecParameters(30.0f, 320, 240);
+ EXPECT_EQ(2, qm_resolution_->GetImageType(320, 240));
+ // Update rates for a sequence of intervals.
+ int target_rate2[] = {200, 200, 200, 200, 200};
+ int encoder_sent_rate2[] = {50, 50, 50, 50, 50};
+ int incoming_frame_rate2[] = {30, 30, 30, 30, 30};
+ uint8_t fraction_lost2[] = {10, 10, 10, 10, 10};
+ UpdateQmRateData(target_rate2, encoder_sent_rate2, incoming_frame_rate2,
+ fraction_lost2, 5);
+ EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_));
+ EXPECT_EQ(kEasyEncoding, qm_resolution_->GetEncoderState());
+ float scale = (4.0f / 3.0f) / 2.0f;
+ EXPECT_TRUE(IsSelectedActionCorrect(qm_scale_, scale, scale, 1.0f, 480, 360,
+ 30.0f));
+
+ qm_resolution_->UpdateCodecParameters(30.0f, 480, 360);
+ EXPECT_EQ(4, qm_resolution_->GetImageType(480, 360));
+ EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_));
+ EXPECT_TRUE(IsSelectedActionCorrect(qm_scale_, 3.0f / 4.0f, 3.0f / 4.0f, 1.0f,
+ 640, 480, 30.0f));
+}
+
+// Two stages: spatial down-sample and then no action to go up,
+// as encoding rate mis-match is too high.
+TEST_F(QmSelectTest, 2StageDownSpatialNoActionUp) {
+ // Initialize with bitrate, frame rate, native system width/height, and
+ // number of temporal layers.
+ InitQmNativeData(50, 30, 640, 480, 1);
+
+ // Update with encoder frame size.
+ uint16_t codec_width = 640;
+ uint16_t codec_height = 480;
+ qm_resolution_->UpdateCodecParameters(30.0f, codec_width, codec_height);
+ EXPECT_EQ(5, qm_resolution_->GetImageType(codec_width, codec_height));
+
+ // Update rates for a sequence of intervals.
+ int target_rate[] = {50, 50, 50};
+ int encoder_sent_rate[] = {50, 50, 50};
+ int incoming_frame_rate[] = {30, 30, 30};
+ uint8_t fraction_lost[] = {10, 10, 10};
+ UpdateQmRateData(target_rate, encoder_sent_rate, incoming_frame_rate,
+ fraction_lost, 3);
+
+ // Update content: motion level, and 3 spatial prediction errors.
+ // High motion, low spatial.
+ UpdateQmContentData(kTemporalHigh, kSpatialLow, kSpatialLow, kSpatialLow);
+ EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_));
+ EXPECT_EQ(3, qm_resolution_->ComputeContentClass());
+ EXPECT_EQ(kStableEncoding, qm_resolution_->GetEncoderState());
+ EXPECT_TRUE(IsSelectedActionCorrect(qm_scale_, 2.0f, 2.0f, 1.0f, 320, 240,
+ 30.0f));
+
+ // Reset and simulate large rate mis-match: expect no action to go back up.
+ qm_resolution_->ResetRates();
+ qm_resolution_->UpdateCodecParameters(30.0f, 320, 240);
+ EXPECT_EQ(2, qm_resolution_->GetImageType(320, 240));
+ // Update rates for a sequence of intervals.
+ int target_rate2[] = {400, 400, 400, 400, 400};
+ int encoder_sent_rate2[] = {1000, 1000, 1000, 1000, 1000};
+ int incoming_frame_rate2[] = {30, 30, 30, 30, 30};
+ uint8_t fraction_lost2[] = {10, 10, 10, 10, 10};
+ UpdateQmRateData(target_rate2, encoder_sent_rate2, incoming_frame_rate2,
+ fraction_lost2, 5);
+ EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_));
+ EXPECT_EQ(kStressedEncoding, qm_resolution_->GetEncoderState());
+ EXPECT_TRUE(IsSelectedActionCorrect(qm_scale_, 1.0f, 1.0f, 1.0f, 320, 240,
+ 30.0f));
+}
+
+// Two stages: temporally down-sample and then back up temporally,
+// as rate as increased.
+TEST_F(QmSelectTest, 2StatgeDownTemporalUpTemporal) {
+ // Initialize with bitrate, frame rate, native system width/height, and
+ // number of temporal layers.
+ InitQmNativeData(50, 30, 640, 480, 1);
+
+ // Update with encoder frame size.
+ uint16_t codec_width = 640;
+ uint16_t codec_height = 480;
+ qm_resolution_->UpdateCodecParameters(30.0f, codec_width, codec_height);
+ EXPECT_EQ(5, qm_resolution_->GetImageType(codec_width, codec_height));
+
+ // Update rates for a sequence of intervals.
+ int target_rate[] = {50, 50, 50};
+ int encoder_sent_rate[] = {50, 50, 50};
+ int incoming_frame_rate[] = {30, 30, 30};
+ uint8_t fraction_lost[] = {10, 10, 10};
+ UpdateQmRateData(target_rate, encoder_sent_rate, incoming_frame_rate,
+ fraction_lost, 3);
+
+ // Update content: motion level, and 3 spatial prediction errors.
+ // Low motion, high spatial.
+ UpdateQmContentData(kTemporalLow, kSpatialHigh, kSpatialHigh, kSpatialHigh);
+ EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_));
+ EXPECT_EQ(1, qm_resolution_->ComputeContentClass());
+ EXPECT_EQ(kStableEncoding, qm_resolution_->GetEncoderState());
+ EXPECT_TRUE(IsSelectedActionCorrect(qm_scale_, 1.0f, 1.0f, 2.0f, 640, 480,
+ 15.5f));
+
+ // Reset rates and go up in rate: expect to go back up.
+ qm_resolution_->ResetRates();
+ // Update rates for a sequence of intervals.
+ int target_rate2[] = {400, 400, 400, 400, 400};
+ int encoder_sent_rate2[] = {400, 400, 400, 400, 400};
+ int incoming_frame_rate2[] = {15, 15, 15, 15, 15};
+ uint8_t fraction_lost2[] = {10, 10, 10, 10, 10};
+ UpdateQmRateData(target_rate2, encoder_sent_rate2, incoming_frame_rate2,
+ fraction_lost2, 5);
+ EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_));
+ EXPECT_EQ(kStableEncoding, qm_resolution_->GetEncoderState());
+ EXPECT_TRUE(IsSelectedActionCorrect(qm_scale_, 1.0f, 1.0f, 0.5f, 640, 480,
+ 30.0f));
+}
+
+// Two stages: temporal down-sample and then back up temporally, since encoder
+// is under-shooting target even though rate has not increased much.
+TEST_F(QmSelectTest, 2StatgeDownTemporalUpTemporalUndershoot) {
+ // Initialize with bitrate, frame rate, native system width/height, and
+ // number of temporal layers.
+ InitQmNativeData(50, 30, 640, 480, 1);
+
+ // Update with encoder frame size.
+ uint16_t codec_width = 640;
+ uint16_t codec_height = 480;
+ qm_resolution_->UpdateCodecParameters(30.0f, codec_width, codec_height);
+ EXPECT_EQ(5, qm_resolution_->GetImageType(codec_width, codec_height));
+
+ // Update rates for a sequence of intervals.
+ int target_rate[] = {50, 50, 50};
+ int encoder_sent_rate[] = {50, 50, 50};
+ int incoming_frame_rate[] = {30, 30, 30};
+ uint8_t fraction_lost[] = {10, 10, 10};
+ UpdateQmRateData(target_rate, encoder_sent_rate, incoming_frame_rate,
+ fraction_lost, 3);
+
+ // Update content: motion level, and 3 spatial prediction errors.
+ // Low motion, high spatial.
+ UpdateQmContentData(kTemporalLow, kSpatialHigh, kSpatialHigh, kSpatialHigh);
+ EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_));
+ EXPECT_EQ(1, qm_resolution_->ComputeContentClass());
+ EXPECT_EQ(kStableEncoding, qm_resolution_->GetEncoderState());
+ EXPECT_TRUE(IsSelectedActionCorrect(qm_scale_, 1.0f, 1.0f, 2.0f, 640, 480,
+ 15.5f));
+
+ // Reset rates and simulate under-shooting scenario.: expect to go back up.
+ qm_resolution_->ResetRates();
+ // Update rates for a sequence of intervals.
+ int target_rate2[] = {150, 150, 150, 150, 150};
+ int encoder_sent_rate2[] = {50, 50, 50, 50, 50};
+ int incoming_frame_rate2[] = {15, 15, 15, 15, 15};
+ uint8_t fraction_lost2[] = {10, 10, 10, 10, 10};
+ UpdateQmRateData(target_rate2, encoder_sent_rate2, incoming_frame_rate2,
+ fraction_lost2, 5);
+ EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_));
+ EXPECT_EQ(kEasyEncoding, qm_resolution_->GetEncoderState());
+ EXPECT_TRUE(IsSelectedActionCorrect(qm_scale_, 1.0f, 1.0f, 0.5f, 640, 480,
+ 30.0f));
+}
+
+// Two stages: temporal down-sample and then no action to go up,
+// as encoding rate mis-match is too high.
+TEST_F(QmSelectTest, 2StageDownTemporalNoActionUp) {
+ // Initialize with bitrate, frame rate, native system width/height, and
+ // number of temporal layers.
+ InitQmNativeData(50, 30, 640, 480, 1);
+
+ // Update with encoder frame size.
+ uint16_t codec_width = 640;
+ uint16_t codec_height = 480;
+ qm_resolution_->UpdateCodecParameters(30.0f, codec_width, codec_height);
+ EXPECT_EQ(5, qm_resolution_->GetImageType(codec_width, codec_height));
+
+ // Update rates for a sequence of intervals.
+ int target_rate[] = {50, 50, 50};
+ int encoder_sent_rate[] = {50, 50, 50};
+ int incoming_frame_rate[] = {30, 30, 30};
+ uint8_t fraction_lost[] = {10, 10, 10};
+ UpdateQmRateData(target_rate, encoder_sent_rate, incoming_frame_rate,
+ fraction_lost, 3);
+
+ // Update content: motion level, and 3 spatial prediction errors.
+ // Low motion, high spatial.
+ UpdateQmContentData(kTemporalLow, kSpatialHigh, kSpatialHigh, kSpatialHigh);
+ EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_));
+ EXPECT_EQ(1, qm_resolution_->ComputeContentClass());
+ EXPECT_EQ(kStableEncoding, qm_resolution_->GetEncoderState());
+ EXPECT_TRUE(IsSelectedActionCorrect(qm_scale_, 1, 1, 2, 640, 480, 15.5f));
+
+ // Reset and simulate large rate mis-match: expect no action to go back up.
+ qm_resolution_->UpdateCodecParameters(15.0f, codec_width, codec_height);
+ qm_resolution_->ResetRates();
+ // Update rates for a sequence of intervals.
+ int target_rate2[] = {600, 600, 600, 600, 600};
+ int encoder_sent_rate2[] = {1000, 1000, 1000, 1000, 1000};
+ int incoming_frame_rate2[] = {15, 15, 15, 15, 15};
+ uint8_t fraction_lost2[] = {10, 10, 10, 10, 10};
+ UpdateQmRateData(target_rate2, encoder_sent_rate2, incoming_frame_rate2,
+ fraction_lost2, 5);
+ EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_));
+ EXPECT_EQ(kStressedEncoding, qm_resolution_->GetEncoderState());
+ EXPECT_TRUE(IsSelectedActionCorrect(qm_scale_, 1.0f, 1.0f, 1.0f, 640, 480,
+ 15.0f));
+}
+// 3 stages: spatial down-sample, followed by temporal down-sample,
+// and then go up to full state, as encoding rate has increased.
+TEST_F(QmSelectTest, 3StageDownSpatialTemporlaUpSpatialTemporal) {
+ // Initialize with bitrate, frame rate, native system width/height, and
+ // number of temporal layers.
+ InitQmNativeData(80, 30, 640, 480, 1);
+
+ // Update with encoder frame size.
+ uint16_t codec_width = 640;
+ uint16_t codec_height = 480;
+ qm_resolution_->UpdateCodecParameters(30.0f, codec_width, codec_height);
+ EXPECT_EQ(5, qm_resolution_->GetImageType(codec_width, codec_height));
+
+ // Update rates for a sequence of intervals.
+ int target_rate[] = {80, 80, 80};
+ int encoder_sent_rate[] = {80, 80, 80};
+ int incoming_frame_rate[] = {30, 30, 30};
+ uint8_t fraction_lost[] = {10, 10, 10};
+ UpdateQmRateData(target_rate, encoder_sent_rate, incoming_frame_rate,
+ fraction_lost, 3);
+
+ // Update content: motion level, and 3 spatial prediction errors.
+ // High motion, low spatial.
+ UpdateQmContentData(kTemporalHigh, kSpatialLow, kSpatialLow, kSpatialLow);
+ EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_));
+ EXPECT_EQ(3, qm_resolution_->ComputeContentClass());
+ EXPECT_EQ(kStableEncoding, qm_resolution_->GetEncoderState());
+ EXPECT_TRUE(IsSelectedActionCorrect(qm_scale_, 2.0f, 2.0f, 1.0f, 320, 240,
+ 30.0f));
+
+ // Change content data: expect temporal down-sample.
+ qm_resolution_->UpdateCodecParameters(30.0f, 320, 240);
+ EXPECT_EQ(2, qm_resolution_->GetImageType(320, 240));
+
+ // Reset rates and go lower in rate.
+ qm_resolution_->ResetRates();
+ int target_rate2[] = {40, 40, 40, 40, 40};
+ int encoder_sent_rate2[] = {40, 40, 40, 40, 40};
+ int incoming_frame_rate2[] = {30, 30, 30, 30, 30};
+ uint8_t fraction_lost2[] = {10, 10, 10, 10, 10};
+ UpdateQmRateData(target_rate2, encoder_sent_rate2, incoming_frame_rate2,
+ fraction_lost2, 5);
+
+ // Update content: motion level, and 3 spatial prediction errors.
+ // Low motion, high spatial.
+ UpdateQmContentData(kTemporalLow, kSpatialHigh, kSpatialHigh, kSpatialHigh);
+ EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_));
+ EXPECT_EQ(1, qm_resolution_->ComputeContentClass());
+ EXPECT_EQ(kStableEncoding, qm_resolution_->GetEncoderState());
+ EXPECT_TRUE(IsSelectedActionCorrect(qm_scale_, 1.0f, 1.0f, 1.5f, 320, 240,
+ 20.5f));
+
+ // Reset rates and go high up in rate: expect to go back up both spatial
+ // and temporally. The 1/2x1/2 spatial is undone in two stages.
+ qm_resolution_->ResetRates();
+ // Update rates for a sequence of intervals.
+ int target_rate3[] = {1000, 1000, 1000, 1000, 1000};
+ int encoder_sent_rate3[] = {1000, 1000, 1000, 1000, 1000};
+ int incoming_frame_rate3[] = {20, 20, 20, 20, 20};
+ uint8_t fraction_lost3[] = {10, 10, 10, 10, 10};
+ UpdateQmRateData(target_rate3, encoder_sent_rate3, incoming_frame_rate3,
+ fraction_lost3, 5);
+
+ EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_));
+ EXPECT_EQ(1, qm_resolution_->ComputeContentClass());
+ EXPECT_EQ(kStableEncoding, qm_resolution_->GetEncoderState());
+ float scale = (4.0f / 3.0f) / 2.0f;
+ EXPECT_TRUE(IsSelectedActionCorrect(qm_scale_, scale, scale, 2.0f / 3.0f,
+ 480, 360, 30.0f));
+
+ qm_resolution_->UpdateCodecParameters(30.0f, 480, 360);
+ EXPECT_EQ(4, qm_resolution_->GetImageType(480, 360));
+ EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_));
+ EXPECT_TRUE(IsSelectedActionCorrect(qm_scale_, 3.0f / 4.0f, 3.0f / 4.0f, 1.0f,
+ 640, 480, 30.0f));
+}
+
+// No down-sampling below some total amount.
+TEST_F(QmSelectTest, NoActionTooMuchDownSampling) {
+ // Initialize with bitrate, frame rate, native system width/height, and
+ // number of temporal layers.
+ InitQmNativeData(150, 30, 1280, 720, 1);
+
+ // Update with encoder frame size.
+ uint16_t codec_width = 1280;
+ uint16_t codec_height = 720;
+ qm_resolution_->UpdateCodecParameters(30.0f, codec_width, codec_height);
+ EXPECT_EQ(7, qm_resolution_->GetImageType(codec_width, codec_height));
+
+ // Update rates for a sequence of intervals.
+ int target_rate[] = {150, 150, 150};
+ int encoder_sent_rate[] = {150, 150, 150};
+ int incoming_frame_rate[] = {30, 30, 30};
+ uint8_t fraction_lost[] = {10, 10, 10};
+ UpdateQmRateData(target_rate, encoder_sent_rate, incoming_frame_rate,
+ fraction_lost, 3);
+
+ // Update content: motion level, and 3 spatial prediction errors.
+ // High motion, low spatial.
+ UpdateQmContentData(kTemporalHigh, kSpatialLow, kSpatialLow, kSpatialLow);
+ EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_));
+ EXPECT_EQ(3, qm_resolution_->ComputeContentClass());
+ EXPECT_EQ(kStableEncoding, qm_resolution_->GetEncoderState());
+ EXPECT_TRUE(IsSelectedActionCorrect(qm_scale_, 2.0f, 2.0f, 1.0f, 640, 360,
+ 30.0f));
+
+ // Reset and lower rates to get another spatial action (3/4x3/4).
+ // Lower the frame rate for spatial to be selected again.
+ qm_resolution_->ResetRates();
+ qm_resolution_->UpdateCodecParameters(10.0f, 640, 360);
+ EXPECT_EQ(4, qm_resolution_->GetImageType(640, 360));
+ // Update rates for a sequence of intervals.
+ int target_rate2[] = {70, 70, 70, 70, 70};
+ int encoder_sent_rate2[] = {70, 70, 70, 70, 70};
+ int incoming_frame_rate2[] = {10, 10, 10, 10, 10};
+ uint8_t fraction_lost2[] = {10, 10, 10, 10, 10};
+ UpdateQmRateData(target_rate2, encoder_sent_rate2, incoming_frame_rate2,
+ fraction_lost2, 5);
+
+ // Update content: motion level, and 3 spatial prediction errors.
+ // High motion, medium spatial.
+ UpdateQmContentData(kTemporalHigh, kSpatialMedium, kSpatialMedium,
+ kSpatialMedium);
+ EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_));
+ EXPECT_EQ(5, qm_resolution_->ComputeContentClass());
+ EXPECT_EQ(kStableEncoding, qm_resolution_->GetEncoderState());
+ EXPECT_TRUE(IsSelectedActionCorrect(qm_scale_, 4.0f / 3.0f, 4.0f / 3.0f,
+ 1.0f, 480, 270, 10.0f));
+
+ // Reset and go to very low rate: no action should be taken,
+ // we went down too much already.
+ qm_resolution_->ResetRates();
+ qm_resolution_->UpdateCodecParameters(10.0f, 480, 270);
+ EXPECT_EQ(3, qm_resolution_->GetImageType(480, 270));
+ // Update rates for a sequence of intervals.
+ int target_rate3[] = {10, 10, 10, 10, 10};
+ int encoder_sent_rate3[] = {10, 10, 10, 10, 10};
+ int incoming_frame_rate3[] = {10, 10, 10, 10, 10};
+ uint8_t fraction_lost3[] = {10, 10, 10, 10, 10};
+ UpdateQmRateData(target_rate3, encoder_sent_rate3, incoming_frame_rate3,
+ fraction_lost3, 5);
+ EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_));
+ EXPECT_EQ(5, qm_resolution_->ComputeContentClass());
+ EXPECT_EQ(kStableEncoding, qm_resolution_->GetEncoderState());
+ EXPECT_TRUE(IsSelectedActionCorrect(qm_scale_, 1.0f, 1.0f, 1.0f, 480, 270,
+ 10.0f));
+}
+
+// Multiple down-sampling stages and then undo all of them.
+// Spatial down-sample 3/4x3/4, followed by temporal down-sample 2/3,
+// followed by spatial 3/4x3/4. Then go up to full state,
+// as encoding rate has increased.
+TEST_F(QmSelectTest, MultipleStagesCheckActionHistory1) {
+ // Initialize with bitrate, frame rate, native system width/height, and
+ // number of temporal layers.
+ InitQmNativeData(150, 30, 640, 480, 1);
+
+ // Update with encoder frame size.
+ uint16_t codec_width = 640;
+ uint16_t codec_height = 480;
+ qm_resolution_->UpdateCodecParameters(30.0f, codec_width, codec_height);
+ EXPECT_EQ(5, qm_resolution_->GetImageType(codec_width, codec_height));
+
+ // Go down spatial 3/4x3/4.
+ // Update rates for a sequence of intervals.
+ int target_rate[] = {150, 150, 150};
+ int encoder_sent_rate[] = {150, 150, 150};
+ int incoming_frame_rate[] = {30, 30, 30};
+ uint8_t fraction_lost[] = {10, 10, 10};
+ UpdateQmRateData(target_rate, encoder_sent_rate, incoming_frame_rate,
+ fraction_lost, 3);
+
+ // Update content: motion level, and 3 spatial prediction errors.
+ // Medium motion, low spatial.
+ UpdateQmContentData(kTemporalMedium, kSpatialLow, kSpatialLow, kSpatialLow);
+ EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_));
+ EXPECT_EQ(6, qm_resolution_->ComputeContentClass());
+ EXPECT_EQ(kStableEncoding, qm_resolution_->GetEncoderState());
+ EXPECT_TRUE(IsSelectedActionCorrect(qm_scale_, 4.0f / 3.0f, 4.0f / 3.0f,
+ 1.0f, 480, 360, 30.0f));
+ // Go down 2/3 temporal.
+ qm_resolution_->UpdateCodecParameters(30.0f, 480, 360);
+ EXPECT_EQ(4, qm_resolution_->GetImageType(480, 360));
+ qm_resolution_->ResetRates();
+ int target_rate2[] = {100, 100, 100, 100, 100};
+ int encoder_sent_rate2[] = {100, 100, 100, 100, 100};
+ int incoming_frame_rate2[] = {30, 30, 30, 30, 30};
+ uint8_t fraction_lost2[] = {10, 10, 10, 10, 10};
+ UpdateQmRateData(target_rate2, encoder_sent_rate2, incoming_frame_rate2,
+ fraction_lost2, 5);
+
+ // Update content: motion level, and 3 spatial prediction errors.
+ // Low motion, high spatial.
+ UpdateQmContentData(kTemporalLow, kSpatialHigh, kSpatialHigh, kSpatialHigh);
+ EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_));
+ EXPECT_EQ(1, qm_resolution_->ComputeContentClass());
+ EXPECT_EQ(kStableEncoding, qm_resolution_->GetEncoderState());
+ EXPECT_TRUE(IsSelectedActionCorrect(qm_scale_, 1.0f, 1.0f, 1.5f, 480, 360,
+ 20.5f));
+
+ // Go down 3/4x3/4 spatial:
+ qm_resolution_->UpdateCodecParameters(20.0f, 480, 360);
+ qm_resolution_->ResetRates();
+ int target_rate3[] = {80, 80, 80, 80, 80};
+ int encoder_sent_rate3[] = {80, 80, 80, 80, 80};
+ int incoming_frame_rate3[] = {20, 20, 20, 20, 20};
+ uint8_t fraction_lost3[] = {10, 10, 10, 10, 10};
+ UpdateQmRateData(target_rate3, encoder_sent_rate3, incoming_frame_rate3,
+ fraction_lost3, 5);
+
+ // Update content: motion level, and 3 spatial prediction errors.
+ // High motion, low spatial.
+ UpdateQmContentData(kTemporalHigh, kSpatialLow, kSpatialLow, kSpatialLow);
+ EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_));
+ EXPECT_EQ(3, qm_resolution_->ComputeContentClass());
+ EXPECT_EQ(kStableEncoding, qm_resolution_->GetEncoderState());
+ // The two spatial actions of 3/4x3/4 are converted to 1/2x1/2,
+ // so scale factor is 2.0.
+ EXPECT_TRUE(IsSelectedActionCorrect(qm_scale_, 2.0f, 2.0f, 1.0f, 320, 240,
+ 20.0f));
+
+ // Reset rates and go high up in rate: expect to go up:
+ // 1/2x1x2 spatial and 1/2 temporally.
+
+ // Go up 1/2x1/2 spatially and 1/2 temporally. Spatial is done in 2 stages.
+ qm_resolution_->UpdateCodecParameters(15.0f, 320, 240);
+ EXPECT_EQ(2, qm_resolution_->GetImageType(320, 240));
+ qm_resolution_->ResetRates();
+ // Update rates for a sequence of intervals.
+ int target_rate4[] = {1000, 1000, 1000, 1000, 1000};
+ int encoder_sent_rate4[] = {1000, 1000, 1000, 1000, 1000};
+ int incoming_frame_rate4[] = {15, 15, 15, 15, 15};
+ uint8_t fraction_lost4[] = {10, 10, 10, 10, 10};
+ UpdateQmRateData(target_rate4, encoder_sent_rate4, incoming_frame_rate4,
+ fraction_lost4, 5);
+
+ EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_));
+ EXPECT_EQ(3, qm_resolution_->ComputeContentClass());
+ EXPECT_EQ(kStableEncoding, qm_resolution_->GetEncoderState());
+ float scale = (4.0f / 3.0f) / 2.0f;
+ EXPECT_TRUE(IsSelectedActionCorrect(qm_scale_, scale, scale, 2.0f / 3.0f, 480,
+ 360, 30.0f));
+
+ qm_resolution_->UpdateCodecParameters(30.0f, 480, 360);
+ EXPECT_EQ(4, qm_resolution_->GetImageType(480, 360));
+ EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_));
+ EXPECT_TRUE(IsSelectedActionCorrect(qm_scale_, 3.0f / 4.0f, 3.0f / 4.0f, 1.0f,
+ 640, 480, 30.0f));
+}
+
+// Multiple down-sampling and up-sample stages, with partial undoing.
+// Spatial down-sample 1/2x1/2, followed by temporal down-sample 2/3, undo the
+// temporal, then another temporal, and then undo both spatial and temporal.
+TEST_F(QmSelectTest, MultipleStagesCheckActionHistory2) {
+ // Initialize with bitrate, frame rate, native system width/height, and
+ // number of temporal layers.
+ InitQmNativeData(80, 30, 640, 480, 1);
+
+ // Update with encoder frame size.
+ uint16_t codec_width = 640;
+ uint16_t codec_height = 480;
+ qm_resolution_->UpdateCodecParameters(30.0f, codec_width, codec_height);
+ EXPECT_EQ(5, qm_resolution_->GetImageType(codec_width, codec_height));
+
+ // Go down 1/2x1/2 spatial.
+ // Update rates for a sequence of intervals.
+ int target_rate[] = {80, 80, 80};
+ int encoder_sent_rate[] = {80, 80, 80};
+ int incoming_frame_rate[] = {30, 30, 30};
+ uint8_t fraction_lost[] = {10, 10, 10};
+ UpdateQmRateData(target_rate, encoder_sent_rate, incoming_frame_rate,
+ fraction_lost, 3);
+
+ // Update content: motion level, and 3 spatial prediction errors.
+ // Medium motion, low spatial.
+ UpdateQmContentData(kTemporalMedium, kSpatialLow, kSpatialLow, kSpatialLow);
+ EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_));
+ EXPECT_EQ(6, qm_resolution_->ComputeContentClass());
+ EXPECT_EQ(kStableEncoding, qm_resolution_->GetEncoderState());
+ EXPECT_TRUE(IsSelectedActionCorrect(qm_scale_, 2.0f, 2.0f, 1.0f, 320, 240,
+ 30.0f));
+
+ // Go down 2/3 temporal.
+ qm_resolution_->UpdateCodecParameters(30.0f, 320, 240);
+ EXPECT_EQ(2, qm_resolution_->GetImageType(320, 240));
+ qm_resolution_->ResetRates();
+ int target_rate2[] = {40, 40, 40, 40, 40};
+ int encoder_sent_rate2[] = {40, 40, 40, 40, 40};
+ int incoming_frame_rate2[] = {30, 30, 30, 30, 30};
+ uint8_t fraction_lost2[] = {10, 10, 10, 10, 10};
+ UpdateQmRateData(target_rate2, encoder_sent_rate2, incoming_frame_rate2,
+ fraction_lost2, 5);
+
+ // Update content: motion level, and 3 spatial prediction errors.
+ // Medium motion, high spatial.
+ UpdateQmContentData(kTemporalMedium, kSpatialHigh, kSpatialHigh,
+ kSpatialHigh);
+ EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_));
+ EXPECT_EQ(7, qm_resolution_->ComputeContentClass());
+ EXPECT_EQ(kStableEncoding, qm_resolution_->GetEncoderState());
+ EXPECT_TRUE(IsSelectedActionCorrect(qm_scale_, 1.0f, 1.0f, 1.5f, 320, 240,
+ 20.5f));
+
+ // Go up 2/3 temporally.
+ qm_resolution_->UpdateCodecParameters(20.0f, 320, 240);
+ qm_resolution_->ResetRates();
+ // Update rates for a sequence of intervals.
+ int target_rate3[] = {150, 150, 150, 150, 150};
+ int encoder_sent_rate3[] = {150, 150, 150, 150, 150};
+ int incoming_frame_rate3[] = {20, 20, 20, 20, 20};
+ uint8_t fraction_lost3[] = {10, 10, 10, 10, 10};
+ UpdateQmRateData(target_rate3, encoder_sent_rate3, incoming_frame_rate3,
+ fraction_lost3, 5);
+
+ EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_));
+ EXPECT_EQ(7, qm_resolution_->ComputeContentClass());
+ EXPECT_EQ(kStableEncoding, qm_resolution_->GetEncoderState());
+ EXPECT_TRUE(IsSelectedActionCorrect(qm_scale_, 1.0f, 1.0f, 2.0f / 3.0f, 320,
+ 240, 30.0f));
+
+ // Go down 2/3 temporal.
+ qm_resolution_->UpdateCodecParameters(30.0f, 320, 240);
+ EXPECT_EQ(2, qm_resolution_->GetImageType(320, 240));
+ qm_resolution_->ResetRates();
+ int target_rate4[] = {40, 40, 40, 40, 40};
+ int encoder_sent_rate4[] = {40, 40, 40, 40, 40};
+ int incoming_frame_rate4[] = {30, 30, 30, 30, 30};
+ uint8_t fraction_lost4[] = {10, 10, 10, 10, 10};
+ UpdateQmRateData(target_rate4, encoder_sent_rate4, incoming_frame_rate4,
+ fraction_lost4, 5);
+
+ // Update content: motion level, and 3 spatial prediction errors.
+ // Low motion, high spatial.
+ UpdateQmContentData(kTemporalLow, kSpatialHigh, kSpatialHigh, kSpatialHigh);
+ EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_));
+ EXPECT_EQ(1, qm_resolution_->ComputeContentClass());
+ EXPECT_EQ(kStableEncoding, qm_resolution_->GetEncoderState());
+ EXPECT_TRUE(IsSelectedActionCorrect(qm_scale_, 1.0f, 1.0f, 1.5f, 320, 240,
+ 20.5f));
+
+ // Go up spatial and temporal. Spatial undoing is done in 2 stages.
+ qm_resolution_->UpdateCodecParameters(20.5f, 320, 240);
+ qm_resolution_->ResetRates();
+ // Update rates for a sequence of intervals.
+ int target_rate5[] = {1000, 1000, 1000, 1000, 1000};
+ int encoder_sent_rate5[] = {1000, 1000, 1000, 1000, 1000};
+ int incoming_frame_rate5[] = {20, 20, 20, 20, 20};
+ uint8_t fraction_lost5[] = {10, 10, 10, 10, 10};
+ UpdateQmRateData(target_rate5, encoder_sent_rate5, incoming_frame_rate5,
+ fraction_lost5, 5);
+
+ EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_));
+ float scale = (4.0f / 3.0f) / 2.0f;
+ EXPECT_TRUE(IsSelectedActionCorrect(qm_scale_, scale, scale, 2.0f / 3.0f,
+ 480, 360, 30.0f));
+
+ qm_resolution_->UpdateCodecParameters(30.0f, 480, 360);
+ EXPECT_EQ(4, qm_resolution_->GetImageType(480, 360));
+ EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_));
+ EXPECT_TRUE(IsSelectedActionCorrect(qm_scale_, 3.0f / 4.0f, 3.0f / 4.0f, 1.0f,
+ 640, 480, 30.0f));
+}
+
+// Multiple down-sampling and up-sample stages, with partial undoing.
+// Spatial down-sample 3/4x3/4, followed by temporal down-sample 2/3,
+// undo the temporal 2/3, and then undo the spatial.
+TEST_F(QmSelectTest, MultipleStagesCheckActionHistory3) {
+ // Initialize with bitrate, frame rate, native system width/height, and
+ // number of temporal layers.
+ InitQmNativeData(100, 30, 640, 480, 1);
+
+ // Update with encoder frame size.
+ uint16_t codec_width = 640;
+ uint16_t codec_height = 480;
+ qm_resolution_->UpdateCodecParameters(30.0f, codec_width, codec_height);
+ EXPECT_EQ(5, qm_resolution_->GetImageType(codec_width, codec_height));
+
+ // Go down 3/4x3/4 spatial.
+ // Update rates for a sequence of intervals.
+ int target_rate[] = {100, 100, 100};
+ int encoder_sent_rate[] = {100, 100, 100};
+ int incoming_frame_rate[] = {30, 30, 30};
+ uint8_t fraction_lost[] = {10, 10, 10};
+ UpdateQmRateData(target_rate, encoder_sent_rate, incoming_frame_rate,
+ fraction_lost, 3);
+
+ // Update content: motion level, and 3 spatial prediction errors.
+ // Medium motion, low spatial.
+ UpdateQmContentData(kTemporalMedium, kSpatialLow, kSpatialLow, kSpatialLow);
+ EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_));
+ EXPECT_EQ(6, qm_resolution_->ComputeContentClass());
+ EXPECT_EQ(kStableEncoding, qm_resolution_->GetEncoderState());
+ EXPECT_TRUE(IsSelectedActionCorrect(qm_scale_, 4.0f / 3.0f, 4.0f / 3.0f,
+ 1.0f, 480, 360, 30.0f));
+
+ // Go down 2/3 temporal.
+ qm_resolution_->UpdateCodecParameters(30.0f, 480, 360);
+ EXPECT_EQ(4, qm_resolution_->GetImageType(480, 360));
+ qm_resolution_->ResetRates();
+ int target_rate2[] = {100, 100, 100, 100, 100};
+ int encoder_sent_rate2[] = {100, 100, 100, 100, 100};
+ int incoming_frame_rate2[] = {30, 30, 30, 30, 30};
+ uint8_t fraction_lost2[] = {10, 10, 10, 10, 10};
+ UpdateQmRateData(target_rate2, encoder_sent_rate2, incoming_frame_rate2,
+ fraction_lost2, 5);
+
+ // Update content: motion level, and 3 spatial prediction errors.
+ // Low motion, high spatial.
+ UpdateQmContentData(kTemporalLow, kSpatialHigh, kSpatialHigh, kSpatialHigh);
+ EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_));
+ EXPECT_EQ(1, qm_resolution_->ComputeContentClass());
+ EXPECT_EQ(kStableEncoding, qm_resolution_->GetEncoderState());
+ EXPECT_TRUE(IsSelectedActionCorrect(qm_scale_, 1.0f, 1.0f, 1.5f, 480, 360,
+ 20.5f));
+
+ // Go up 2/3 temporal.
+ qm_resolution_->UpdateCodecParameters(20.5f, 480, 360);
+ qm_resolution_->ResetRates();
+ // Update rates for a sequence of intervals.
+ int target_rate3[] = {250, 250, 250, 250, 250};
+ int encoder_sent_rate3[] = {250, 250, 250, 250, 250};
+ int incoming_frame_rate3[] = {20, 20, 20, 20, 120};
+ uint8_t fraction_lost3[] = {10, 10, 10, 10, 10};
+ UpdateQmRateData(target_rate3, encoder_sent_rate3, incoming_frame_rate3,
+ fraction_lost3, 5);
+
+ EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_));
+ EXPECT_EQ(1, qm_resolution_->ComputeContentClass());
+ EXPECT_EQ(kStableEncoding, qm_resolution_->GetEncoderState());
+ EXPECT_TRUE(IsSelectedActionCorrect(qm_scale_, 1.0f, 1.0f, 2.0f / 3.0f, 480,
+ 360, 30.0f));
+
+ // Go up spatial.
+ qm_resolution_->UpdateCodecParameters(30.0f, 480, 360);
+ EXPECT_EQ(4, qm_resolution_->GetImageType(480, 360));
+ qm_resolution_->ResetRates();
+ int target_rate4[] = {500, 500, 500, 500, 500};
+ int encoder_sent_rate4[] = {500, 500, 500, 500, 500};
+ int incoming_frame_rate4[] = {30, 30, 30, 30, 30};
+ uint8_t fraction_lost4[] = {30, 30, 30, 30, 30};
+ UpdateQmRateData(target_rate4, encoder_sent_rate4, incoming_frame_rate4,
+ fraction_lost4, 5);
+
+ EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_));
+ EXPECT_EQ(kStableEncoding, qm_resolution_->GetEncoderState());
+ EXPECT_TRUE(IsSelectedActionCorrect(qm_scale_, 3.0f / 4.0f, 3.0f / 4.0f,
+ 1.0f, 640, 480, 30.0f));
+}
+
+// Two stages of 3/4x3/4 converted to one stage of 1/2x1/2.
+TEST_F(QmSelectTest, ConvertThreeQuartersToOneHalf) {
+ // Initialize with bitrate, frame rate, native system width/height, and
+ // number of temporal layers.
+ InitQmNativeData(150, 30, 640, 480, 1);
+
+ // Update with encoder frame size.
+ uint16_t codec_width = 640;
+ uint16_t codec_height = 480;
+ qm_resolution_->UpdateCodecParameters(30.0f, codec_width, codec_height);
+ EXPECT_EQ(5, qm_resolution_->GetImageType(codec_width, codec_height));
+
+ // Go down 3/4x3/4 spatial.
+ // Update rates for a sequence of intervals.
+ int target_rate[] = {150, 150, 150};
+ int encoder_sent_rate[] = {150, 150, 150};
+ int incoming_frame_rate[] = {30, 30, 30};
+ uint8_t fraction_lost[] = {10, 10, 10};
+ UpdateQmRateData(target_rate, encoder_sent_rate, incoming_frame_rate,
+ fraction_lost, 3);
+
+ // Update content: motion level, and 3 spatial prediction errors.
+ // Medium motion, low spatial.
+ UpdateQmContentData(kTemporalMedium, kSpatialLow, kSpatialLow, kSpatialLow);
+ EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_));
+ EXPECT_EQ(6, qm_resolution_->ComputeContentClass());
+ EXPECT_EQ(kStableEncoding, qm_resolution_->GetEncoderState());
+ EXPECT_TRUE(IsSelectedActionCorrect(qm_scale_, 4.0f / 3.0f, 4.0f / 3.0f,
+ 1.0f, 480, 360, 30.0f));
+
+ // Set rates to go down another 3/4 spatial. Should be converted ton 1/2.
+ qm_resolution_->UpdateCodecParameters(30.0f, 480, 360);
+ EXPECT_EQ(4, qm_resolution_->GetImageType(480, 360));
+ qm_resolution_->ResetRates();
+ int target_rate2[] = {100, 100, 100, 100, 100};
+ int encoder_sent_rate2[] = {100, 100, 100, 100, 100};
+ int incoming_frame_rate2[] = {30, 30, 30, 30, 30};
+ uint8_t fraction_lost2[] = {10, 10, 10, 10, 10};
+ UpdateQmRateData(target_rate2, encoder_sent_rate2, incoming_frame_rate2,
+ fraction_lost2, 5);
+
+ // Update content: motion level, and 3 spatial prediction errors.
+ // Medium motion, low spatial.
+ UpdateQmContentData(kTemporalMedium, kSpatialLow, kSpatialLow, kSpatialLow);
+ EXPECT_EQ(0, qm_resolution_->SelectResolution(&qm_scale_));
+ EXPECT_EQ(6, qm_resolution_->ComputeContentClass());
+ EXPECT_EQ(kStableEncoding, qm_resolution_->GetEncoderState());
+ EXPECT_TRUE(IsSelectedActionCorrect(qm_scale_, 2.0f, 2.0f, 1.0f, 320, 240,
+ 30.0f));
+}
+
+void QmSelectTest::InitQmNativeData(float initial_bit_rate,
+ int user_frame_rate,
+ int native_width,
+ int native_height,
+ int num_layers) {
+ EXPECT_EQ(0, qm_resolution_->Initialize(initial_bit_rate,
+ user_frame_rate,
+ native_width,
+ native_height,
+ num_layers));
+}
+
+void QmSelectTest::UpdateQmContentData(float motion_metric,
+ float spatial_metric,
+ float spatial_metric_horiz,
+ float spatial_metric_vert) {
+ content_metrics_->motion_magnitude = motion_metric;
+ content_metrics_->spatial_pred_err = spatial_metric;
+ content_metrics_->spatial_pred_err_h = spatial_metric_horiz;
+ content_metrics_->spatial_pred_err_v = spatial_metric_vert;
+ qm_resolution_->UpdateContent(content_metrics_);
+}
+
+void QmSelectTest::UpdateQmEncodedFrame(size_t* encoded_size,
+ size_t num_updates) {
+ for (size_t i = 0; i < num_updates; ++i) {
+ // Convert to bytes.
+ size_t encoded_size_update = 1000 * encoded_size[i] / 8;
+ qm_resolution_->UpdateEncodedSize(encoded_size_update);
+ }
+}
+
+void QmSelectTest::UpdateQmRateData(int* target_rate,
+ int* encoder_sent_rate,
+ int* incoming_frame_rate,
+ uint8_t* fraction_lost,
+ int num_updates) {
+ for (int i = 0; i < num_updates; ++i) {
+ float target_rate_update = target_rate[i];
+ float encoder_sent_rate_update = encoder_sent_rate[i];
+ float incoming_frame_rate_update = incoming_frame_rate[i];
+ uint8_t fraction_lost_update = fraction_lost[i];
+ qm_resolution_->UpdateRates(target_rate_update,
+ encoder_sent_rate_update,
+ incoming_frame_rate_update,
+ fraction_lost_update);
+ }
+}
+
+// Check is the selected action from the QmResolution class is the same
+// as the expected scales from |fac_width|, |fac_height|, |fac_temp|.
+bool QmSelectTest::IsSelectedActionCorrect(VCMResolutionScale* qm_scale,
+ float fac_width,
+ float fac_height,
+ float fac_temp,
+ uint16_t new_width,
+ uint16_t new_height,
+ float new_frame_rate) {
+ if (qm_scale->spatial_width_fact == fac_width &&
+ qm_scale->spatial_height_fact == fac_height &&
+ qm_scale->temporal_fact == fac_temp &&
+ qm_scale->codec_width == new_width &&
+ qm_scale->codec_height == new_height &&
+ qm_scale->frame_rate == new_frame_rate) {
+ return true;
+ } else {
+ return false;
+ }
+}
+} // namespace webrtc
diff --git a/webrtc/modules/video_coding/main/source/receiver.cc b/webrtc/modules/video_coding/main/source/receiver.cc
new file mode 100644
index 0000000000..0707a9c3cd
--- /dev/null
+++ b/webrtc/modules/video_coding/main/source/receiver.cc
@@ -0,0 +1,268 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "webrtc/modules/video_coding/main/source/receiver.h"
+
+#include <assert.h>
+
+#include <cstdlib>
+
+#include "webrtc/base/trace_event.h"
+#include "webrtc/modules/video_coding/main/source/encoded_frame.h"
+#include "webrtc/modules/video_coding/main/source/internal_defines.h"
+#include "webrtc/modules/video_coding/main/source/media_opt_util.h"
+#include "webrtc/system_wrappers/include/clock.h"
+#include "webrtc/system_wrappers/include/logging.h"
+
+namespace webrtc {
+
+enum { kMaxReceiverDelayMs = 10000 };
+
+VCMReceiver::VCMReceiver(VCMTiming* timing,
+ Clock* clock,
+ EventFactory* event_factory)
+ : VCMReceiver(timing,
+ clock,
+ rtc::scoped_ptr<EventWrapper>(event_factory->CreateEvent()),
+ rtc::scoped_ptr<EventWrapper>(event_factory->CreateEvent())) {
+}
+
+VCMReceiver::VCMReceiver(VCMTiming* timing,
+ Clock* clock,
+ rtc::scoped_ptr<EventWrapper> receiver_event,
+ rtc::scoped_ptr<EventWrapper> jitter_buffer_event)
+ : crit_sect_(CriticalSectionWrapper::CreateCriticalSection()),
+ clock_(clock),
+ jitter_buffer_(clock_, jitter_buffer_event.Pass()),
+ timing_(timing),
+ render_wait_event_(receiver_event.Pass()),
+ max_video_delay_ms_(kMaxVideoDelayMs) {
+ Reset();
+}
+
+VCMReceiver::~VCMReceiver() {
+ render_wait_event_->Set();
+ delete crit_sect_;
+}
+
+void VCMReceiver::Reset() {
+ CriticalSectionScoped cs(crit_sect_);
+ if (!jitter_buffer_.Running()) {
+ jitter_buffer_.Start();
+ } else {
+ jitter_buffer_.Flush();
+ }
+}
+
+void VCMReceiver::UpdateRtt(int64_t rtt) {
+ jitter_buffer_.UpdateRtt(rtt);
+}
+
+int32_t VCMReceiver::InsertPacket(const VCMPacket& packet,
+ uint16_t frame_width,
+ uint16_t frame_height) {
+ // Insert the packet into the jitter buffer. The packet can either be empty or
+ // contain media at this point.
+ bool retransmitted = false;
+ const VCMFrameBufferEnum ret = jitter_buffer_.InsertPacket(packet,
+ &retransmitted);
+ if (ret == kOldPacket) {
+ return VCM_OK;
+ } else if (ret == kFlushIndicator) {
+ return VCM_FLUSH_INDICATOR;
+ } else if (ret < 0) {
+ return VCM_JITTER_BUFFER_ERROR;
+ }
+ if (ret == kCompleteSession && !retransmitted) {
+ // We don't want to include timestamps which have suffered from
+ // retransmission here, since we compensate with extra retransmission
+ // delay within the jitter estimate.
+ timing_->IncomingTimestamp(packet.timestamp, clock_->TimeInMilliseconds());
+ }
+ return VCM_OK;
+}
+
+void VCMReceiver::TriggerDecoderShutdown() {
+ jitter_buffer_.Stop();
+ render_wait_event_->Set();
+}
+
+VCMEncodedFrame* VCMReceiver::FrameForDecoding(uint16_t max_wait_time_ms,
+ int64_t& next_render_time_ms,
+ bool render_timing) {
+ const int64_t start_time_ms = clock_->TimeInMilliseconds();
+ uint32_t frame_timestamp = 0;
+ // Exhaust wait time to get a complete frame for decoding.
+ bool found_frame = jitter_buffer_.NextCompleteTimestamp(
+ max_wait_time_ms, &frame_timestamp);
+
+ if (!found_frame)
+ found_frame = jitter_buffer_.NextMaybeIncompleteTimestamp(&frame_timestamp);
+
+ if (!found_frame)
+ return NULL;
+
+ // We have a frame - Set timing and render timestamp.
+ timing_->SetJitterDelay(jitter_buffer_.EstimatedJitterMs());
+ const int64_t now_ms = clock_->TimeInMilliseconds();
+ timing_->UpdateCurrentDelay(frame_timestamp);
+ next_render_time_ms = timing_->RenderTimeMs(frame_timestamp, now_ms);
+ // Check render timing.
+ bool timing_error = false;
+ // Assume that render timing errors are due to changes in the video stream.
+ if (next_render_time_ms < 0) {
+ timing_error = true;
+ } else if (std::abs(next_render_time_ms - now_ms) > max_video_delay_ms_) {
+ int frame_delay = static_cast<int>(std::abs(next_render_time_ms - now_ms));
+ LOG(LS_WARNING) << "A frame about to be decoded is out of the configured "
+ << "delay bounds (" << frame_delay << " > "
+ << max_video_delay_ms_
+ << "). Resetting the video jitter buffer.";
+ timing_error = true;
+ } else if (static_cast<int>(timing_->TargetVideoDelay()) >
+ max_video_delay_ms_) {
+ LOG(LS_WARNING) << "The video target delay has grown larger than "
+ << max_video_delay_ms_ << " ms. Resetting jitter buffer.";
+ timing_error = true;
+ }
+
+ if (timing_error) {
+ // Timing error => reset timing and flush the jitter buffer.
+ jitter_buffer_.Flush();
+ timing_->Reset();
+ return NULL;
+ }
+
+ if (!render_timing) {
+ // Decode frame as close as possible to the render timestamp.
+ const int32_t available_wait_time = max_wait_time_ms -
+ static_cast<int32_t>(clock_->TimeInMilliseconds() - start_time_ms);
+ uint16_t new_max_wait_time = static_cast<uint16_t>(
+ VCM_MAX(available_wait_time, 0));
+ uint32_t wait_time_ms = timing_->MaxWaitingTime(
+ next_render_time_ms, clock_->TimeInMilliseconds());
+ if (new_max_wait_time < wait_time_ms) {
+ // We're not allowed to wait until the frame is supposed to be rendered,
+ // waiting as long as we're allowed to avoid busy looping, and then return
+ // NULL. Next call to this function might return the frame.
+ render_wait_event_->Wait(new_max_wait_time);
+ return NULL;
+ }
+ // Wait until it's time to render.
+ render_wait_event_->Wait(wait_time_ms);
+ }
+
+ // Extract the frame from the jitter buffer and set the render time.
+ VCMEncodedFrame* frame = jitter_buffer_.ExtractAndSetDecode(frame_timestamp);
+ if (frame == NULL) {
+ return NULL;
+ }
+ frame->SetRenderTime(next_render_time_ms);
+ TRACE_EVENT_ASYNC_STEP1("webrtc", "Video", frame->TimeStamp(),
+ "SetRenderTS", "render_time", next_render_time_ms);
+ if (!frame->Complete()) {
+ // Update stats for incomplete frames.
+ bool retransmitted = false;
+ const int64_t last_packet_time_ms =
+ jitter_buffer_.LastPacketTime(frame, &retransmitted);
+ if (last_packet_time_ms >= 0 && !retransmitted) {
+ // We don't want to include timestamps which have suffered from
+ // retransmission here, since we compensate with extra retransmission
+ // delay within the jitter estimate.
+ timing_->IncomingTimestamp(frame_timestamp, last_packet_time_ms);
+ }
+ }
+ return frame;
+}
+
+void VCMReceiver::ReleaseFrame(VCMEncodedFrame* frame) {
+ jitter_buffer_.ReleaseFrame(frame);
+}
+
+void VCMReceiver::ReceiveStatistics(uint32_t* bitrate,
+ uint32_t* framerate) {
+ assert(bitrate);
+ assert(framerate);
+ jitter_buffer_.IncomingRateStatistics(framerate, bitrate);
+}
+
+uint32_t VCMReceiver::DiscardedPackets() const {
+ return jitter_buffer_.num_discarded_packets();
+}
+
+void VCMReceiver::SetNackMode(VCMNackMode nackMode,
+ int64_t low_rtt_nack_threshold_ms,
+ int64_t high_rtt_nack_threshold_ms) {
+ CriticalSectionScoped cs(crit_sect_);
+ // Default to always having NACK enabled in hybrid mode.
+ jitter_buffer_.SetNackMode(nackMode, low_rtt_nack_threshold_ms,
+ high_rtt_nack_threshold_ms);
+}
+
+void VCMReceiver::SetNackSettings(size_t max_nack_list_size,
+ int max_packet_age_to_nack,
+ int max_incomplete_time_ms) {
+ jitter_buffer_.SetNackSettings(max_nack_list_size,
+ max_packet_age_to_nack,
+ max_incomplete_time_ms);
+}
+
+VCMNackMode VCMReceiver::NackMode() const {
+ CriticalSectionScoped cs(crit_sect_);
+ return jitter_buffer_.nack_mode();
+}
+
+std::vector<uint16_t> VCMReceiver::NackList(bool* request_key_frame) {
+ return jitter_buffer_.GetNackList(request_key_frame);
+}
+
+void VCMReceiver::SetDecodeErrorMode(VCMDecodeErrorMode decode_error_mode) {
+ jitter_buffer_.SetDecodeErrorMode(decode_error_mode);
+}
+
+VCMDecodeErrorMode VCMReceiver::DecodeErrorMode() const {
+ return jitter_buffer_.decode_error_mode();
+}
+
+int VCMReceiver::SetMinReceiverDelay(int desired_delay_ms) {
+ CriticalSectionScoped cs(crit_sect_);
+ if (desired_delay_ms < 0 || desired_delay_ms > kMaxReceiverDelayMs) {
+ return -1;
+ }
+ max_video_delay_ms_ = desired_delay_ms + kMaxVideoDelayMs;
+ // Initializing timing to the desired delay.
+ timing_->set_min_playout_delay(desired_delay_ms);
+ return 0;
+}
+
+int VCMReceiver::RenderBufferSizeMs() {
+ uint32_t timestamp_start = 0u;
+ uint32_t timestamp_end = 0u;
+ // Render timestamps are computed just prior to decoding. Therefore this is
+ // only an estimate based on frames' timestamps and current timing state.
+ jitter_buffer_.RenderBufferSize(&timestamp_start, &timestamp_end);
+ if (timestamp_start == timestamp_end) {
+ return 0;
+ }
+ // Update timing.
+ const int64_t now_ms = clock_->TimeInMilliseconds();
+ timing_->SetJitterDelay(jitter_buffer_.EstimatedJitterMs());
+ // Get render timestamps.
+ uint32_t render_start = timing_->RenderTimeMs(timestamp_start, now_ms);
+ uint32_t render_end = timing_->RenderTimeMs(timestamp_end, now_ms);
+ return render_end - render_start;
+}
+
+void VCMReceiver::RegisterStatsCallback(
+ VCMReceiveStatisticsCallback* callback) {
+ jitter_buffer_.RegisterStatsCallback(callback);
+}
+
+} // namespace webrtc
diff --git a/webrtc/modules/video_coding/main/source/receiver.h b/webrtc/modules/video_coding/main/source/receiver.h
new file mode 100644
index 0000000000..e2515d438f
--- /dev/null
+++ b/webrtc/modules/video_coding/main/source/receiver.h
@@ -0,0 +1,92 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef WEBRTC_MODULES_VIDEO_CODING_MAIN_SOURCE_RECEIVER_H_
+#define WEBRTC_MODULES_VIDEO_CODING_MAIN_SOURCE_RECEIVER_H_
+
+#include "webrtc/modules/video_coding/main/source/jitter_buffer.h"
+#include "webrtc/modules/video_coding/main/source/packet.h"
+#include "webrtc/modules/video_coding/main/source/timing.h"
+#include "webrtc/system_wrappers/include/critical_section_wrapper.h"
+#include "webrtc/modules/video_coding/main/interface/video_coding.h"
+#include "webrtc/modules/video_coding/main/interface/video_coding_defines.h"
+
+namespace webrtc {
+
+class Clock;
+class VCMEncodedFrame;
+
+class VCMReceiver {
+ public:
+ VCMReceiver(VCMTiming* timing,
+ Clock* clock,
+ EventFactory* event_factory);
+
+ // Using this constructor, you can specify a different event factory for the
+ // jitter buffer. Useful for unit tests when you want to simulate incoming
+ // packets, in which case the jitter buffer's wait event is different from
+ // that of VCMReceiver itself.
+ VCMReceiver(VCMTiming* timing,
+ Clock* clock,
+ rtc::scoped_ptr<EventWrapper> receiver_event,
+ rtc::scoped_ptr<EventWrapper> jitter_buffer_event);
+
+ ~VCMReceiver();
+
+ void Reset();
+ void UpdateRtt(int64_t rtt);
+ int32_t InsertPacket(const VCMPacket& packet,
+ uint16_t frame_width,
+ uint16_t frame_height);
+ VCMEncodedFrame* FrameForDecoding(uint16_t max_wait_time_ms,
+ int64_t& next_render_time_ms,
+ bool render_timing = true);
+ void ReleaseFrame(VCMEncodedFrame* frame);
+ void ReceiveStatistics(uint32_t* bitrate, uint32_t* framerate);
+ uint32_t DiscardedPackets() const;
+
+ // NACK.
+ void SetNackMode(VCMNackMode nackMode,
+ int64_t low_rtt_nack_threshold_ms,
+ int64_t high_rtt_nack_threshold_ms);
+ void SetNackSettings(size_t max_nack_list_size,
+ int max_packet_age_to_nack,
+ int max_incomplete_time_ms);
+ VCMNackMode NackMode() const;
+ std::vector<uint16_t> NackList(bool* request_key_frame);
+
+ // Receiver video delay.
+ int SetMinReceiverDelay(int desired_delay_ms);
+
+ // Decoding with errors.
+ void SetDecodeErrorMode(VCMDecodeErrorMode decode_error_mode);
+ VCMDecodeErrorMode DecodeErrorMode() const;
+
+ // Returns size in time (milliseconds) of complete continuous frames in the
+ // jitter buffer. The render time is estimated based on the render delay at
+ // the time this function is called.
+ int RenderBufferSizeMs();
+
+ void RegisterStatsCallback(VCMReceiveStatisticsCallback* callback);
+
+ void TriggerDecoderShutdown();
+
+ private:
+ CriticalSectionWrapper* crit_sect_;
+ Clock* const clock_;
+ VCMJitterBuffer jitter_buffer_;
+ VCMTiming* timing_;
+ rtc::scoped_ptr<EventWrapper> render_wait_event_;
+ int max_video_delay_ms_;
+};
+
+} // namespace webrtc
+
+#endif // WEBRTC_MODULES_VIDEO_CODING_MAIN_SOURCE_RECEIVER_H_
diff --git a/webrtc/modules/video_coding/main/source/receiver_unittest.cc b/webrtc/modules/video_coding/main/source/receiver_unittest.cc
new file mode 100644
index 0000000000..359b241e72
--- /dev/null
+++ b/webrtc/modules/video_coding/main/source/receiver_unittest.cc
@@ -0,0 +1,526 @@
+/* Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <string.h>
+
+#include <list>
+#include <queue>
+
+#include "testing/gtest/include/gtest/gtest.h"
+#include "webrtc/base/checks.h"
+#include "webrtc/modules/video_coding/main/source/packet.h"
+#include "webrtc/modules/video_coding/main/source/receiver.h"
+#include "webrtc/modules/video_coding/main/source/test/stream_generator.h"
+#include "webrtc/modules/video_coding/main/source/timing.h"
+#include "webrtc/modules/video_coding/main/test/test_util.h"
+#include "webrtc/system_wrappers/include/clock.h"
+#include "webrtc/system_wrappers/include/critical_section_wrapper.h"
+
+namespace webrtc {
+
+class TestVCMReceiver : public ::testing::Test {
+ protected:
+ enum { kWidth = 640 };
+ enum { kHeight = 480 };
+
+ TestVCMReceiver()
+ : clock_(new SimulatedClock(0)),
+ timing_(clock_.get()),
+ receiver_(&timing_, clock_.get(), &event_factory_) {
+
+ stream_generator_.reset(new
+ StreamGenerator(0, clock_->TimeInMilliseconds()));
+ }
+
+ virtual void SetUp() {
+ receiver_.Reset();
+ }
+
+ int32_t InsertPacket(int index) {
+ VCMPacket packet;
+ bool packet_available = stream_generator_->GetPacket(&packet, index);
+ EXPECT_TRUE(packet_available);
+ if (!packet_available)
+ return kGeneralError; // Return here to avoid crashes below.
+ return receiver_.InsertPacket(packet, kWidth, kHeight);
+ }
+
+ int32_t InsertPacketAndPop(int index) {
+ VCMPacket packet;
+ bool packet_available = stream_generator_->PopPacket(&packet, index);
+ EXPECT_TRUE(packet_available);
+ if (!packet_available)
+ return kGeneralError; // Return here to avoid crashes below.
+ return receiver_.InsertPacket(packet, kWidth, kHeight);
+ }
+
+ int32_t InsertFrame(FrameType frame_type, bool complete) {
+ int num_of_packets = complete ? 1 : 2;
+ stream_generator_->GenerateFrame(
+ frame_type, (frame_type != kEmptyFrame) ? num_of_packets : 0,
+ (frame_type == kEmptyFrame) ? 1 : 0, clock_->TimeInMilliseconds());
+ int32_t ret = InsertPacketAndPop(0);
+ if (!complete) {
+ // Drop the second packet.
+ VCMPacket packet;
+ stream_generator_->PopPacket(&packet, 0);
+ }
+ clock_->AdvanceTimeMilliseconds(kDefaultFramePeriodMs);
+ return ret;
+ }
+
+ bool DecodeNextFrame() {
+ int64_t render_time_ms = 0;
+ VCMEncodedFrame* frame =
+ receiver_.FrameForDecoding(0, render_time_ms, false);
+ if (!frame)
+ return false;
+ receiver_.ReleaseFrame(frame);
+ return true;
+ }
+
+ rtc::scoped_ptr<SimulatedClock> clock_;
+ VCMTiming timing_;
+ NullEventFactory event_factory_;
+ VCMReceiver receiver_;
+ rtc::scoped_ptr<StreamGenerator> stream_generator_;
+};
+
+TEST_F(TestVCMReceiver, RenderBufferSize_AllComplete) {
+ EXPECT_EQ(0, receiver_.RenderBufferSizeMs());
+ EXPECT_GE(InsertFrame(kVideoFrameKey, true), kNoError);
+ int num_of_frames = 10;
+ for (int i = 0; i < num_of_frames; ++i) {
+ EXPECT_GE(InsertFrame(kVideoFrameDelta, true), kNoError);
+ }
+ EXPECT_EQ(num_of_frames * kDefaultFramePeriodMs,
+ receiver_.RenderBufferSizeMs());
+}
+
+TEST_F(TestVCMReceiver, RenderBufferSize_SkipToKeyFrame) {
+ EXPECT_EQ(0, receiver_.RenderBufferSizeMs());
+ const int kNumOfNonDecodableFrames = 2;
+ for (int i = 0; i < kNumOfNonDecodableFrames; ++i) {
+ EXPECT_GE(InsertFrame(kVideoFrameDelta, true), kNoError);
+ }
+ const int kNumOfFrames = 10;
+ EXPECT_GE(InsertFrame(kVideoFrameKey, true), kNoError);
+ for (int i = 0; i < kNumOfFrames - 1; ++i) {
+ EXPECT_GE(InsertFrame(kVideoFrameDelta, true), kNoError);
+ }
+ EXPECT_EQ((kNumOfFrames - 1) * kDefaultFramePeriodMs,
+ receiver_.RenderBufferSizeMs());
+}
+
+TEST_F(TestVCMReceiver, RenderBufferSize_NotAllComplete) {
+ EXPECT_EQ(0, receiver_.RenderBufferSizeMs());
+ EXPECT_GE(InsertFrame(kVideoFrameKey, true), kNoError);
+ int num_of_frames = 10;
+ for (int i = 0; i < num_of_frames; ++i) {
+ EXPECT_GE(InsertFrame(kVideoFrameDelta, true), kNoError);
+ }
+ num_of_frames++;
+ EXPECT_GE(InsertFrame(kVideoFrameDelta, false), kNoError);
+ for (int i = 0; i < num_of_frames; ++i) {
+ EXPECT_GE(InsertFrame(kVideoFrameDelta, true), kNoError);
+ }
+ EXPECT_EQ((num_of_frames - 1) * kDefaultFramePeriodMs,
+ receiver_.RenderBufferSizeMs());
+}
+
+TEST_F(TestVCMReceiver, RenderBufferSize_NoKeyFrame) {
+ EXPECT_EQ(0, receiver_.RenderBufferSizeMs());
+ int num_of_frames = 10;
+ for (int i = 0; i < num_of_frames; ++i) {
+ EXPECT_GE(InsertFrame(kVideoFrameDelta, true), kNoError);
+ }
+ int64_t next_render_time_ms = 0;
+ VCMEncodedFrame* frame = receiver_.FrameForDecoding(10, next_render_time_ms);
+ EXPECT_TRUE(frame == NULL);
+ receiver_.ReleaseFrame(frame);
+ EXPECT_GE(InsertFrame(kVideoFrameDelta, false), kNoError);
+ for (int i = 0; i < num_of_frames; ++i) {
+ EXPECT_GE(InsertFrame(kVideoFrameDelta, true), kNoError);
+ }
+ EXPECT_EQ(0, receiver_.RenderBufferSizeMs());
+}
+
+TEST_F(TestVCMReceiver, NonDecodableDuration_Empty) {
+ // Enable NACK and with no RTT thresholds for disabling retransmission delay.
+ receiver_.SetNackMode(kNack, -1, -1);
+ const size_t kMaxNackListSize = 1000;
+ const int kMaxPacketAgeToNack = 1000;
+ const int kMaxNonDecodableDuration = 500;
+ const int kMinDelayMs = 500;
+ receiver_.SetNackSettings(kMaxNackListSize, kMaxPacketAgeToNack,
+ kMaxNonDecodableDuration);
+ EXPECT_GE(InsertFrame(kVideoFrameKey, true), kNoError);
+ // Advance time until it's time to decode the key frame.
+ clock_->AdvanceTimeMilliseconds(kMinDelayMs);
+ EXPECT_TRUE(DecodeNextFrame());
+ bool request_key_frame = false;
+ std::vector<uint16_t> nack_list = receiver_.NackList(&request_key_frame);
+ EXPECT_FALSE(request_key_frame);
+}
+
+TEST_F(TestVCMReceiver, NonDecodableDuration_NoKeyFrame) {
+ // Enable NACK and with no RTT thresholds for disabling retransmission delay.
+ receiver_.SetNackMode(kNack, -1, -1);
+ const size_t kMaxNackListSize = 1000;
+ const int kMaxPacketAgeToNack = 1000;
+ const int kMaxNonDecodableDuration = 500;
+ receiver_.SetNackSettings(kMaxNackListSize, kMaxPacketAgeToNack,
+ kMaxNonDecodableDuration);
+ const int kNumFrames = kDefaultFrameRate * kMaxNonDecodableDuration / 1000;
+ for (int i = 0; i < kNumFrames; ++i) {
+ EXPECT_GE(InsertFrame(kVideoFrameDelta, true), kNoError);
+ }
+ bool request_key_frame = false;
+ std::vector<uint16_t> nack_list = receiver_.NackList(&request_key_frame);
+ EXPECT_TRUE(request_key_frame);
+}
+
+TEST_F(TestVCMReceiver, NonDecodableDuration_OneIncomplete) {
+ // Enable NACK and with no RTT thresholds for disabling retransmission delay.
+ receiver_.SetNackMode(kNack, -1, -1);
+ const size_t kMaxNackListSize = 1000;
+ const int kMaxPacketAgeToNack = 1000;
+ const int kMaxNonDecodableDuration = 500;
+ const int kMaxNonDecodableDurationFrames = (kDefaultFrameRate *
+ kMaxNonDecodableDuration + 500) / 1000;
+ const int kMinDelayMs = 500;
+ receiver_.SetNackSettings(kMaxNackListSize, kMaxPacketAgeToNack,
+ kMaxNonDecodableDuration);
+ receiver_.SetMinReceiverDelay(kMinDelayMs);
+ int64_t key_frame_inserted = clock_->TimeInMilliseconds();
+ EXPECT_GE(InsertFrame(kVideoFrameKey, true), kNoError);
+ // Insert an incomplete frame.
+ EXPECT_GE(InsertFrame(kVideoFrameDelta, false), kNoError);
+ // Insert enough frames to have too long non-decodable sequence.
+ for (int i = 0; i < kMaxNonDecodableDurationFrames;
+ ++i) {
+ EXPECT_GE(InsertFrame(kVideoFrameDelta, true), kNoError);
+ }
+ // Advance time until it's time to decode the key frame.
+ clock_->AdvanceTimeMilliseconds(kMinDelayMs - clock_->TimeInMilliseconds() -
+ key_frame_inserted);
+ EXPECT_TRUE(DecodeNextFrame());
+ // Make sure we get a key frame request.
+ bool request_key_frame = false;
+ std::vector<uint16_t> nack_list = receiver_.NackList(&request_key_frame);
+ EXPECT_TRUE(request_key_frame);
+}
+
+TEST_F(TestVCMReceiver, NonDecodableDuration_NoTrigger) {
+ // Enable NACK and with no RTT thresholds for disabling retransmission delay.
+ receiver_.SetNackMode(kNack, -1, -1);
+ const size_t kMaxNackListSize = 1000;
+ const int kMaxPacketAgeToNack = 1000;
+ const int kMaxNonDecodableDuration = 500;
+ const int kMaxNonDecodableDurationFrames = (kDefaultFrameRate *
+ kMaxNonDecodableDuration + 500) / 1000;
+ const int kMinDelayMs = 500;
+ receiver_.SetNackSettings(kMaxNackListSize, kMaxPacketAgeToNack,
+ kMaxNonDecodableDuration);
+ receiver_.SetMinReceiverDelay(kMinDelayMs);
+ int64_t key_frame_inserted = clock_->TimeInMilliseconds();
+ EXPECT_GE(InsertFrame(kVideoFrameKey, true), kNoError);
+ // Insert an incomplete frame.
+ EXPECT_GE(InsertFrame(kVideoFrameDelta, false), kNoError);
+ // Insert all but one frame to not trigger a key frame request due to
+ // too long duration of non-decodable frames.
+ for (int i = 0; i < kMaxNonDecodableDurationFrames - 1;
+ ++i) {
+ EXPECT_GE(InsertFrame(kVideoFrameDelta, true), kNoError);
+ }
+ // Advance time until it's time to decode the key frame.
+ clock_->AdvanceTimeMilliseconds(kMinDelayMs - clock_->TimeInMilliseconds() -
+ key_frame_inserted);
+ EXPECT_TRUE(DecodeNextFrame());
+ // Make sure we don't get a key frame request since we haven't generated
+ // enough frames.
+ bool request_key_frame = false;
+ std::vector<uint16_t> nack_list = receiver_.NackList(&request_key_frame);
+ EXPECT_FALSE(request_key_frame);
+}
+
+TEST_F(TestVCMReceiver, NonDecodableDuration_NoTrigger2) {
+ // Enable NACK and with no RTT thresholds for disabling retransmission delay.
+ receiver_.SetNackMode(kNack, -1, -1);
+ const size_t kMaxNackListSize = 1000;
+ const int kMaxPacketAgeToNack = 1000;
+ const int kMaxNonDecodableDuration = 500;
+ const int kMaxNonDecodableDurationFrames = (kDefaultFrameRate *
+ kMaxNonDecodableDuration + 500) / 1000;
+ const int kMinDelayMs = 500;
+ receiver_.SetNackSettings(kMaxNackListSize, kMaxPacketAgeToNack,
+ kMaxNonDecodableDuration);
+ receiver_.SetMinReceiverDelay(kMinDelayMs);
+ int64_t key_frame_inserted = clock_->TimeInMilliseconds();
+ EXPECT_GE(InsertFrame(kVideoFrameKey, true), kNoError);
+ // Insert enough frames to have too long non-decodable sequence, except that
+ // we don't have any losses.
+ for (int i = 0; i < kMaxNonDecodableDurationFrames;
+ ++i) {
+ EXPECT_GE(InsertFrame(kVideoFrameDelta, true), kNoError);
+ }
+ // Insert an incomplete frame.
+ EXPECT_GE(InsertFrame(kVideoFrameDelta, false), kNoError);
+ // Advance time until it's time to decode the key frame.
+ clock_->AdvanceTimeMilliseconds(kMinDelayMs - clock_->TimeInMilliseconds() -
+ key_frame_inserted);
+ EXPECT_TRUE(DecodeNextFrame());
+ // Make sure we don't get a key frame request since the non-decodable duration
+ // is only one frame.
+ bool request_key_frame = false;
+ std::vector<uint16_t> nack_list = receiver_.NackList(&request_key_frame);
+ EXPECT_FALSE(request_key_frame);
+}
+
+TEST_F(TestVCMReceiver, NonDecodableDuration_KeyFrameAfterIncompleteFrames) {
+ // Enable NACK and with no RTT thresholds for disabling retransmission delay.
+ receiver_.SetNackMode(kNack, -1, -1);
+ const size_t kMaxNackListSize = 1000;
+ const int kMaxPacketAgeToNack = 1000;
+ const int kMaxNonDecodableDuration = 500;
+ const int kMaxNonDecodableDurationFrames = (kDefaultFrameRate *
+ kMaxNonDecodableDuration + 500) / 1000;
+ const int kMinDelayMs = 500;
+ receiver_.SetNackSettings(kMaxNackListSize, kMaxPacketAgeToNack,
+ kMaxNonDecodableDuration);
+ receiver_.SetMinReceiverDelay(kMinDelayMs);
+ int64_t key_frame_inserted = clock_->TimeInMilliseconds();
+ EXPECT_GE(InsertFrame(kVideoFrameKey, true), kNoError);
+ // Insert an incomplete frame.
+ EXPECT_GE(InsertFrame(kVideoFrameDelta, false), kNoError);
+ // Insert enough frames to have too long non-decodable sequence.
+ for (int i = 0; i < kMaxNonDecodableDurationFrames;
+ ++i) {
+ EXPECT_GE(InsertFrame(kVideoFrameDelta, true), kNoError);
+ }
+ EXPECT_GE(InsertFrame(kVideoFrameKey, true), kNoError);
+ // Advance time until it's time to decode the key frame.
+ clock_->AdvanceTimeMilliseconds(kMinDelayMs - clock_->TimeInMilliseconds() -
+ key_frame_inserted);
+ EXPECT_TRUE(DecodeNextFrame());
+ // Make sure we don't get a key frame request since we have a key frame
+ // in the list.
+ bool request_key_frame = false;
+ std::vector<uint16_t> nack_list = receiver_.NackList(&request_key_frame);
+ EXPECT_FALSE(request_key_frame);
+}
+
+// A simulated clock, when time elapses, will insert frames into the jitter
+// buffer, based on initial settings.
+class SimulatedClockWithFrames : public SimulatedClock {
+ public:
+ SimulatedClockWithFrames(StreamGenerator* stream_generator,
+ VCMReceiver* receiver)
+ : SimulatedClock(0),
+ stream_generator_(stream_generator),
+ receiver_(receiver) {}
+ virtual ~SimulatedClockWithFrames() {}
+
+ // If |stop_on_frame| is true and next frame arrives between now and
+ // now+|milliseconds|, the clock will be advanced to the arrival time of next
+ // frame.
+ // Otherwise, the clock will be advanced by |milliseconds|.
+ //
+ // For both cases, a frame will be inserted into the jitter buffer at the
+ // instant when the clock time is timestamps_.front().arrive_time.
+ //
+ // Return true if some frame arrives between now and now+|milliseconds|.
+ bool AdvanceTimeMilliseconds(int64_t milliseconds, bool stop_on_frame) {
+ return AdvanceTimeMicroseconds(milliseconds * 1000, stop_on_frame);
+ };
+
+ bool AdvanceTimeMicroseconds(int64_t microseconds, bool stop_on_frame) {
+ int64_t start_time = TimeInMicroseconds();
+ int64_t end_time = start_time + microseconds;
+ bool frame_injected = false;
+ while (!timestamps_.empty() &&
+ timestamps_.front().arrive_time <= end_time) {
+ RTC_DCHECK(timestamps_.front().arrive_time >= start_time);
+
+ SimulatedClock::AdvanceTimeMicroseconds(timestamps_.front().arrive_time -
+ TimeInMicroseconds());
+ GenerateAndInsertFrame((timestamps_.front().render_time + 500) / 1000);
+ timestamps_.pop();
+ frame_injected = true;
+
+ if (stop_on_frame)
+ return frame_injected;
+ }
+
+ if (TimeInMicroseconds() < end_time) {
+ SimulatedClock::AdvanceTimeMicroseconds(end_time - TimeInMicroseconds());
+ }
+ return frame_injected;
+ };
+
+ // Input timestamps are in unit Milliseconds.
+ // And |arrive_timestamps| must be positive and in increasing order.
+ // |arrive_timestamps| determine when we are going to insert frames into the
+ // jitter buffer.
+ // |render_timestamps| are the timestamps on the frame.
+ void SetFrames(const int64_t* arrive_timestamps,
+ const int64_t* render_timestamps,
+ size_t size) {
+ int64_t previous_arrive_timestamp = 0;
+ for (size_t i = 0; i < size; i++) {
+ RTC_CHECK(arrive_timestamps[i] >= previous_arrive_timestamp);
+ timestamps_.push(TimestampPair(arrive_timestamps[i] * 1000,
+ render_timestamps[i] * 1000));
+ previous_arrive_timestamp = arrive_timestamps[i];
+ }
+ }
+
+ private:
+ struct TimestampPair {
+ TimestampPair(int64_t arrive_timestamp, int64_t render_timestamp)
+ : arrive_time(arrive_timestamp), render_time(render_timestamp) {}
+
+ int64_t arrive_time;
+ int64_t render_time;
+ };
+
+ void GenerateAndInsertFrame(int64_t render_timestamp_ms) {
+ VCMPacket packet;
+ stream_generator_->GenerateFrame(FrameType::kVideoFrameKey,
+ 1, // media packets
+ 0, // empty packets
+ render_timestamp_ms);
+
+ bool packet_available = stream_generator_->PopPacket(&packet, 0);
+ EXPECT_TRUE(packet_available);
+ if (!packet_available)
+ return; // Return here to avoid crashes below.
+ receiver_->InsertPacket(packet, 640, 480);
+ }
+
+ std::queue<TimestampPair> timestamps_;
+ StreamGenerator* stream_generator_;
+ VCMReceiver* receiver_;
+};
+
+// Use a SimulatedClockWithFrames
+// Wait call will do either of these:
+// 1. If |stop_on_frame| is true, the clock will be turned to the exact instant
+// that the first frame comes and the frame will be inserted into the jitter
+// buffer, or the clock will be turned to now + |max_time| if no frame comes in
+// the window.
+// 2. If |stop_on_frame| is false, the clock will be turn to now + |max_time|,
+// and all the frames arriving between now and now + |max_time| will be
+// inserted into the jitter buffer.
+//
+// This is used to simulate the JitterBuffer getting packets from internet as
+// time elapses.
+
+class FrameInjectEvent : public EventWrapper {
+ public:
+ FrameInjectEvent(SimulatedClockWithFrames* clock, bool stop_on_frame)
+ : clock_(clock), stop_on_frame_(stop_on_frame) {}
+
+ bool Set() override { return true; }
+
+ EventTypeWrapper Wait(unsigned long max_time) override {
+ if (clock_->AdvanceTimeMilliseconds(max_time, stop_on_frame_) &&
+ stop_on_frame_) {
+ return EventTypeWrapper::kEventSignaled;
+ } else {
+ return EventTypeWrapper::kEventTimeout;
+ }
+ }
+
+ private:
+ SimulatedClockWithFrames* clock_;
+ bool stop_on_frame_;
+};
+
+class VCMReceiverTimingTest : public ::testing::Test {
+ protected:
+
+ VCMReceiverTimingTest()
+
+ : clock_(&stream_generator_, &receiver_),
+ stream_generator_(0, clock_.TimeInMilliseconds()),
+ timing_(&clock_),
+ receiver_(
+ &timing_,
+ &clock_,
+ rtc::scoped_ptr<EventWrapper>(new FrameInjectEvent(&clock_, false)),
+ rtc::scoped_ptr<EventWrapper>(
+ new FrameInjectEvent(&clock_, true))) {}
+
+
+ virtual void SetUp() { receiver_.Reset(); }
+
+ SimulatedClockWithFrames clock_;
+ StreamGenerator stream_generator_;
+ VCMTiming timing_;
+ VCMReceiver receiver_;
+};
+
+// Test whether VCMReceiver::FrameForDecoding handles parameter
+// |max_wait_time_ms| correctly:
+// 1. The function execution should never take more than |max_wait_time_ms|.
+// 2. If the function exit before now + |max_wait_time_ms|, a frame must be
+// returned.
+TEST_F(VCMReceiverTimingTest, FrameForDecoding) {
+ const size_t kNumFrames = 100;
+ const int kFramePeriod = 40;
+ int64_t arrive_timestamps[kNumFrames];
+ int64_t render_timestamps[kNumFrames];
+ int64_t next_render_time;
+
+ // Construct test samples.
+ // render_timestamps are the timestamps stored in the Frame;
+ // arrive_timestamps controls when the Frame packet got received.
+ for (size_t i = 0; i < kNumFrames; i++) {
+ // Preset frame rate to 25Hz.
+ // But we add a reasonable deviation to arrive_timestamps to mimic Internet
+ // fluctuation.
+ arrive_timestamps[i] =
+ (i + 1) * kFramePeriod + (i % 10) * ((i % 2) ? 1 : -1);
+ render_timestamps[i] = (i + 1) * kFramePeriod;
+ }
+
+ clock_.SetFrames(arrive_timestamps, render_timestamps, kNumFrames);
+
+ // Record how many frames we finally get out of the receiver.
+ size_t num_frames_return = 0;
+
+ const int64_t kMaxWaitTime = 30;
+
+ // Ideally, we should get all frames that we input in InitializeFrames.
+ // In the case that FrameForDecoding kills frames by error, we rely on the
+ // build bot to kill the test.
+ while (num_frames_return < kNumFrames) {
+ int64_t start_time = clock_.TimeInMilliseconds();
+ VCMEncodedFrame* frame =
+ receiver_.FrameForDecoding(kMaxWaitTime, next_render_time, false);
+ int64_t end_time = clock_.TimeInMilliseconds();
+
+ // In any case the FrameForDecoding should not wait longer than
+ // max_wait_time.
+ // In the case that we did not get a frame, it should have been waiting for
+ // exactly max_wait_time. (By the testing samples we constructed above, we
+ // are sure there is no timing error, so the only case it returns with NULL
+ // is that it runs out of time.)
+ if (frame) {
+ receiver_.ReleaseFrame(frame);
+ ++num_frames_return;
+ EXPECT_GE(kMaxWaitTime, end_time - start_time);
+ } else {
+ EXPECT_EQ(kMaxWaitTime, end_time - start_time);
+ }
+ }
+}
+
+} // namespace webrtc
diff --git a/webrtc/modules/video_coding/main/source/rtt_filter.cc b/webrtc/modules/video_coding/main/source/rtt_filter.cc
new file mode 100644
index 0000000000..5742e8fa89
--- /dev/null
+++ b/webrtc/modules/video_coding/main/source/rtt_filter.cc
@@ -0,0 +1,202 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "webrtc/modules/video_coding/main/source/internal_defines.h"
+#include "webrtc/modules/video_coding/main/source/rtt_filter.h"
+
+#include <math.h>
+#include <stdlib.h>
+#include <string.h>
+
+namespace webrtc {
+
+VCMRttFilter::VCMRttFilter()
+ : _filtFactMax(35),
+ _jumpStdDevs(2.5),
+ _driftStdDevs(3.5),
+ _detectThreshold(kMaxDriftJumpCount) {
+ Reset();
+}
+
+VCMRttFilter&
+VCMRttFilter::operator=(const VCMRttFilter& rhs)
+{
+ if (this != &rhs)
+ {
+ _gotNonZeroUpdate = rhs._gotNonZeroUpdate;
+ _avgRtt = rhs._avgRtt;
+ _varRtt = rhs._varRtt;
+ _maxRtt = rhs._maxRtt;
+ _filtFactCount = rhs._filtFactCount;
+ _jumpCount = rhs._jumpCount;
+ _driftCount = rhs._driftCount;
+ memcpy(_jumpBuf, rhs._jumpBuf, sizeof(_jumpBuf));
+ memcpy(_driftBuf, rhs._driftBuf, sizeof(_driftBuf));
+ }
+ return *this;
+}
+
+void
+VCMRttFilter::Reset()
+{
+ _gotNonZeroUpdate = false;
+ _avgRtt = 0;
+ _varRtt = 0;
+ _maxRtt = 0;
+ _filtFactCount = 1;
+ _jumpCount = 0;
+ _driftCount = 0;
+ memset(_jumpBuf, 0, kMaxDriftJumpCount);
+ memset(_driftBuf, 0, kMaxDriftJumpCount);
+}
+
+void
+VCMRttFilter::Update(int64_t rttMs)
+{
+ if (!_gotNonZeroUpdate)
+ {
+ if (rttMs == 0)
+ {
+ return;
+ }
+ _gotNonZeroUpdate = true;
+ }
+
+ // Sanity check
+ if (rttMs > 3000)
+ {
+ rttMs = 3000;
+ }
+
+ double filtFactor = 0;
+ if (_filtFactCount > 1)
+ {
+ filtFactor = static_cast<double>(_filtFactCount - 1) / _filtFactCount;
+ }
+ _filtFactCount++;
+ if (_filtFactCount > _filtFactMax)
+ {
+ // This prevents filtFactor from going above
+ // (_filtFactMax - 1) / _filtFactMax,
+ // e.g., _filtFactMax = 50 => filtFactor = 49/50 = 0.98
+ _filtFactCount = _filtFactMax;
+ }
+ double oldAvg = _avgRtt;
+ double oldVar = _varRtt;
+ _avgRtt = filtFactor * _avgRtt + (1 - filtFactor) * rttMs;
+ _varRtt = filtFactor * _varRtt + (1 - filtFactor) *
+ (rttMs - _avgRtt) * (rttMs - _avgRtt);
+ _maxRtt = VCM_MAX(rttMs, _maxRtt);
+ if (!JumpDetection(rttMs) || !DriftDetection(rttMs))
+ {
+ // In some cases we don't want to update the statistics
+ _avgRtt = oldAvg;
+ _varRtt = oldVar;
+ }
+}
+
+bool
+VCMRttFilter::JumpDetection(int64_t rttMs)
+{
+ double diffFromAvg = _avgRtt - rttMs;
+ if (fabs(diffFromAvg) > _jumpStdDevs * sqrt(_varRtt))
+ {
+ int diffSign = (diffFromAvg >= 0) ? 1 : -1;
+ int jumpCountSign = (_jumpCount >= 0) ? 1 : -1;
+ if (diffSign != jumpCountSign)
+ {
+ // Since the signs differ the samples currently
+ // in the buffer is useless as they represent a
+ // jump in a different direction.
+ _jumpCount = 0;
+ }
+ if (abs(_jumpCount) < kMaxDriftJumpCount)
+ {
+ // Update the buffer used for the short time
+ // statistics.
+ // The sign of the diff is used for updating the counter since
+ // we want to use the same buffer for keeping track of when
+ // the RTT jumps down and up.
+ _jumpBuf[abs(_jumpCount)] = rttMs;
+ _jumpCount += diffSign;
+ }
+ if (abs(_jumpCount) >= _detectThreshold)
+ {
+ // Detected an RTT jump
+ ShortRttFilter(_jumpBuf, abs(_jumpCount));
+ _filtFactCount = _detectThreshold + 1;
+ _jumpCount = 0;
+ }
+ else
+ {
+ return false;
+ }
+ }
+ else
+ {
+ _jumpCount = 0;
+ }
+ return true;
+}
+
+bool
+VCMRttFilter::DriftDetection(int64_t rttMs)
+{
+ if (_maxRtt - _avgRtt > _driftStdDevs * sqrt(_varRtt))
+ {
+ if (_driftCount < kMaxDriftJumpCount)
+ {
+ // Update the buffer used for the short time
+ // statistics.
+ _driftBuf[_driftCount] = rttMs;
+ _driftCount++;
+ }
+ if (_driftCount >= _detectThreshold)
+ {
+ // Detected an RTT drift
+ ShortRttFilter(_driftBuf, _driftCount);
+ _filtFactCount = _detectThreshold + 1;
+ _driftCount = 0;
+ }
+ }
+ else
+ {
+ _driftCount = 0;
+ }
+ return true;
+}
+
+void
+VCMRttFilter::ShortRttFilter(int64_t* buf, uint32_t length)
+{
+ if (length == 0)
+ {
+ return;
+ }
+ _maxRtt = 0;
+ _avgRtt = 0;
+ for (uint32_t i=0; i < length; i++)
+ {
+ if (buf[i] > _maxRtt)
+ {
+ _maxRtt = buf[i];
+ }
+ _avgRtt += buf[i];
+ }
+ _avgRtt = _avgRtt / static_cast<double>(length);
+}
+
+int64_t
+VCMRttFilter::RttMs() const
+{
+ return static_cast<int64_t>(_maxRtt + 0.5);
+}
+
+}
diff --git a/webrtc/modules/video_coding/main/source/rtt_filter.h b/webrtc/modules/video_coding/main/source/rtt_filter.h
new file mode 100644
index 0000000000..9e14a1ab39
--- /dev/null
+++ b/webrtc/modules/video_coding/main/source/rtt_filter.h
@@ -0,0 +1,68 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef WEBRTC_MODULES_VIDEO_CODING_RTT_FILTER_H_
+#define WEBRTC_MODULES_VIDEO_CODING_RTT_FILTER_H_
+
+#include "webrtc/typedefs.h"
+
+namespace webrtc
+{
+
+class VCMRttFilter
+{
+public:
+ VCMRttFilter();
+
+ VCMRttFilter& operator=(const VCMRttFilter& rhs);
+
+ // Resets the filter.
+ void Reset();
+ // Updates the filter with a new sample.
+ void Update(int64_t rttMs);
+ // A getter function for the current RTT level in ms.
+ int64_t RttMs() const;
+
+private:
+ // The size of the drift and jump memory buffers
+ // and thus also the detection threshold for these
+ // detectors in number of samples.
+ enum { kMaxDriftJumpCount = 5 };
+ // Detects RTT jumps by comparing the difference between
+ // samples and average to the standard deviation.
+ // Returns true if the long time statistics should be updated
+ // and false otherwise
+ bool JumpDetection(int64_t rttMs);
+ // Detects RTT drifts by comparing the difference between
+ // max and average to the standard deviation.
+ // Returns true if the long time statistics should be updated
+ // and false otherwise
+ bool DriftDetection(int64_t rttMs);
+ // Computes the short time average and maximum of the vector buf.
+ void ShortRttFilter(int64_t* buf, uint32_t length);
+
+ bool _gotNonZeroUpdate;
+ double _avgRtt;
+ double _varRtt;
+ int64_t _maxRtt;
+ uint32_t _filtFactCount;
+ const uint32_t _filtFactMax;
+ const double _jumpStdDevs;
+ const double _driftStdDevs;
+ int32_t _jumpCount;
+ int32_t _driftCount;
+ const int32_t _detectThreshold;
+ int64_t _jumpBuf[kMaxDriftJumpCount];
+ int64_t _driftBuf[kMaxDriftJumpCount];
+};
+
+} // namespace webrtc
+
+#endif // WEBRTC_MODULES_VIDEO_CODING_RTT_FILTER_H_
diff --git a/webrtc/modules/video_coding/main/source/session_info.cc b/webrtc/modules/video_coding/main/source/session_info.cc
new file mode 100644
index 0000000000..9a1bc54e52
--- /dev/null
+++ b/webrtc/modules/video_coding/main/source/session_info.cc
@@ -0,0 +1,580 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "webrtc/modules/video_coding/main/source/session_info.h"
+
+#include "webrtc/modules/video_coding/main/source/packet.h"
+#include "webrtc/system_wrappers/include/logging.h"
+
+namespace webrtc {
+
+namespace {
+
+uint16_t BufferToUWord16(const uint8_t* dataBuffer) {
+ return (dataBuffer[0] << 8) | dataBuffer[1];
+}
+
+} // namespace
+
+VCMSessionInfo::VCMSessionInfo()
+ : session_nack_(false),
+ complete_(false),
+ decodable_(false),
+ frame_type_(kVideoFrameDelta),
+ packets_(),
+ empty_seq_num_low_(-1),
+ empty_seq_num_high_(-1),
+ first_packet_seq_num_(-1),
+ last_packet_seq_num_(-1) {
+}
+
+void VCMSessionInfo::UpdateDataPointers(const uint8_t* old_base_ptr,
+ const uint8_t* new_base_ptr) {
+ for (PacketIterator it = packets_.begin(); it != packets_.end(); ++it)
+ if ((*it).dataPtr != NULL) {
+ assert(old_base_ptr != NULL && new_base_ptr != NULL);
+ (*it).dataPtr = new_base_ptr + ((*it).dataPtr - old_base_ptr);
+ }
+}
+
+int VCMSessionInfo::LowSequenceNumber() const {
+ if (packets_.empty())
+ return empty_seq_num_low_;
+ return packets_.front().seqNum;
+}
+
+int VCMSessionInfo::HighSequenceNumber() const {
+ if (packets_.empty())
+ return empty_seq_num_high_;
+ if (empty_seq_num_high_ == -1)
+ return packets_.back().seqNum;
+ return LatestSequenceNumber(packets_.back().seqNum, empty_seq_num_high_);
+}
+
+int VCMSessionInfo::PictureId() const {
+ if (packets_.empty())
+ return kNoPictureId;
+ if (packets_.front().codecSpecificHeader.codec == kRtpVideoVp8) {
+ return packets_.front().codecSpecificHeader.codecHeader.VP8.pictureId;
+ } else if (packets_.front().codecSpecificHeader.codec == kRtpVideoVp9) {
+ return packets_.front().codecSpecificHeader.codecHeader.VP9.picture_id;
+ } else {
+ return kNoPictureId;
+ }
+}
+
+int VCMSessionInfo::TemporalId() const {
+ if (packets_.empty())
+ return kNoTemporalIdx;
+ if (packets_.front().codecSpecificHeader.codec == kRtpVideoVp8) {
+ return packets_.front().codecSpecificHeader.codecHeader.VP8.temporalIdx;
+ } else if (packets_.front().codecSpecificHeader.codec == kRtpVideoVp9) {
+ return packets_.front().codecSpecificHeader.codecHeader.VP9.temporal_idx;
+ } else {
+ return kNoTemporalIdx;
+ }
+}
+
+bool VCMSessionInfo::LayerSync() const {
+ if (packets_.empty())
+ return false;
+ if (packets_.front().codecSpecificHeader.codec == kRtpVideoVp8) {
+ return packets_.front().codecSpecificHeader.codecHeader.VP8.layerSync;
+ } else if (packets_.front().codecSpecificHeader.codec == kRtpVideoVp9) {
+ return
+ packets_.front().codecSpecificHeader.codecHeader.VP9.temporal_up_switch;
+ } else {
+ return false;
+ }
+}
+
+int VCMSessionInfo::Tl0PicId() const {
+ if (packets_.empty())
+ return kNoTl0PicIdx;
+ if (packets_.front().codecSpecificHeader.codec == kRtpVideoVp8) {
+ return packets_.front().codecSpecificHeader.codecHeader.VP8.tl0PicIdx;
+ } else if (packets_.front().codecSpecificHeader.codec == kRtpVideoVp9) {
+ return packets_.front().codecSpecificHeader.codecHeader.VP9.tl0_pic_idx;
+ } else {
+ return kNoTl0PicIdx;
+ }
+}
+
+bool VCMSessionInfo::NonReference() const {
+ if (packets_.empty() ||
+ packets_.front().codecSpecificHeader.codec != kRtpVideoVp8)
+ return false;
+ return packets_.front().codecSpecificHeader.codecHeader.VP8.nonReference;
+}
+
+void VCMSessionInfo::SetGofInfo(const GofInfoVP9& gof_info, size_t idx) {
+ if (packets_.empty() ||
+ packets_.front().codecSpecificHeader.codec != kRtpVideoVp9 ||
+ packets_.front().codecSpecificHeader.codecHeader.VP9.flexible_mode) {
+ return;
+ }
+ packets_.front().codecSpecificHeader.codecHeader.VP9.temporal_idx =
+ gof_info.temporal_idx[idx];
+ packets_.front().codecSpecificHeader.codecHeader.VP9.temporal_up_switch =
+ gof_info.temporal_up_switch[idx];
+ packets_.front().codecSpecificHeader.codecHeader.VP9.num_ref_pics =
+ gof_info.num_ref_pics[idx];
+ for (size_t i = 0; i < gof_info.num_ref_pics[idx]; ++i) {
+ packets_.front().codecSpecificHeader.codecHeader.VP9.pid_diff[i] =
+ gof_info.pid_diff[idx][i];
+ }
+}
+
+void VCMSessionInfo::Reset() {
+ session_nack_ = false;
+ complete_ = false;
+ decodable_ = false;
+ frame_type_ = kVideoFrameDelta;
+ packets_.clear();
+ empty_seq_num_low_ = -1;
+ empty_seq_num_high_ = -1;
+ first_packet_seq_num_ = -1;
+ last_packet_seq_num_ = -1;
+}
+
+size_t VCMSessionInfo::SessionLength() const {
+ size_t length = 0;
+ for (PacketIteratorConst it = packets_.begin(); it != packets_.end(); ++it)
+ length += (*it).sizeBytes;
+ return length;
+}
+
+int VCMSessionInfo::NumPackets() const {
+ return packets_.size();
+}
+
+size_t VCMSessionInfo::InsertBuffer(uint8_t* frame_buffer,
+ PacketIterator packet_it) {
+ VCMPacket& packet = *packet_it;
+ PacketIterator it;
+
+ // Calculate the offset into the frame buffer for this packet.
+ size_t offset = 0;
+ for (it = packets_.begin(); it != packet_it; ++it)
+ offset += (*it).sizeBytes;
+
+ // Set the data pointer to pointing to the start of this packet in the
+ // frame buffer.
+ const uint8_t* packet_buffer = packet.dataPtr;
+ packet.dataPtr = frame_buffer + offset;
+
+ // We handle H.264 STAP-A packets in a special way as we need to remove the
+ // two length bytes between each NAL unit, and potentially add start codes.
+ // TODO(pbos): Remove H264 parsing from this step and use a fragmentation
+ // header supplied by the H264 depacketizer.
+ const size_t kH264NALHeaderLengthInBytes = 1;
+ const size_t kLengthFieldLength = 2;
+ if (packet.codecSpecificHeader.codec == kRtpVideoH264 &&
+ packet.codecSpecificHeader.codecHeader.H264.packetization_type ==
+ kH264StapA) {
+ size_t required_length = 0;
+ const uint8_t* nalu_ptr = packet_buffer + kH264NALHeaderLengthInBytes;
+ while (nalu_ptr < packet_buffer + packet.sizeBytes) {
+ size_t length = BufferToUWord16(nalu_ptr);
+ required_length +=
+ length + (packet.insertStartCode ? kH264StartCodeLengthBytes : 0);
+ nalu_ptr += kLengthFieldLength + length;
+ }
+ ShiftSubsequentPackets(packet_it, required_length);
+ nalu_ptr = packet_buffer + kH264NALHeaderLengthInBytes;
+ uint8_t* frame_buffer_ptr = frame_buffer + offset;
+ while (nalu_ptr < packet_buffer + packet.sizeBytes) {
+ size_t length = BufferToUWord16(nalu_ptr);
+ nalu_ptr += kLengthFieldLength;
+ frame_buffer_ptr += Insert(nalu_ptr,
+ length,
+ packet.insertStartCode,
+ const_cast<uint8_t*>(frame_buffer_ptr));
+ nalu_ptr += length;
+ }
+ packet.sizeBytes = required_length;
+ return packet.sizeBytes;
+ }
+ ShiftSubsequentPackets(
+ packet_it,
+ packet.sizeBytes +
+ (packet.insertStartCode ? kH264StartCodeLengthBytes : 0));
+
+ packet.sizeBytes = Insert(packet_buffer,
+ packet.sizeBytes,
+ packet.insertStartCode,
+ const_cast<uint8_t*>(packet.dataPtr));
+ return packet.sizeBytes;
+}
+
+size_t VCMSessionInfo::Insert(const uint8_t* buffer,
+ size_t length,
+ bool insert_start_code,
+ uint8_t* frame_buffer) {
+ if (insert_start_code) {
+ const unsigned char startCode[] = {0, 0, 0, 1};
+ memcpy(frame_buffer, startCode, kH264StartCodeLengthBytes);
+ }
+ memcpy(frame_buffer + (insert_start_code ? kH264StartCodeLengthBytes : 0),
+ buffer,
+ length);
+ length += (insert_start_code ? kH264StartCodeLengthBytes : 0);
+
+ return length;
+}
+
+void VCMSessionInfo::ShiftSubsequentPackets(PacketIterator it,
+ int steps_to_shift) {
+ ++it;
+ if (it == packets_.end())
+ return;
+ uint8_t* first_packet_ptr = const_cast<uint8_t*>((*it).dataPtr);
+ int shift_length = 0;
+ // Calculate the total move length and move the data pointers in advance.
+ for (; it != packets_.end(); ++it) {
+ shift_length += (*it).sizeBytes;
+ if ((*it).dataPtr != NULL)
+ (*it).dataPtr += steps_to_shift;
+ }
+ memmove(first_packet_ptr + steps_to_shift, first_packet_ptr, shift_length);
+}
+
+void VCMSessionInfo::UpdateCompleteSession() {
+ if (HaveFirstPacket() && HaveLastPacket()) {
+ // Do we have all the packets in this session?
+ bool complete_session = true;
+ PacketIterator it = packets_.begin();
+ PacketIterator prev_it = it;
+ ++it;
+ for (; it != packets_.end(); ++it) {
+ if (!InSequence(it, prev_it)) {
+ complete_session = false;
+ break;
+ }
+ prev_it = it;
+ }
+ complete_ = complete_session;
+ }
+}
+
+void VCMSessionInfo::UpdateDecodableSession(const FrameData& frame_data) {
+ // Irrelevant if session is already complete or decodable
+ if (complete_ || decodable_)
+ return;
+ // TODO(agalusza): Account for bursty loss.
+ // TODO(agalusza): Refine these values to better approximate optimal ones.
+ // Do not decode frames if the RTT is lower than this.
+ const int64_t kRttThreshold = 100;
+ // Do not decode frames if the number of packets is between these two
+ // thresholds.
+ const float kLowPacketPercentageThreshold = 0.2f;
+ const float kHighPacketPercentageThreshold = 0.8f;
+ if (frame_data.rtt_ms < kRttThreshold
+ || frame_type_ == kVideoFrameKey
+ || !HaveFirstPacket()
+ || (NumPackets() <= kHighPacketPercentageThreshold
+ * frame_data.rolling_average_packets_per_frame
+ && NumPackets() > kLowPacketPercentageThreshold
+ * frame_data.rolling_average_packets_per_frame))
+ return;
+
+ decodable_ = true;
+}
+
+bool VCMSessionInfo::complete() const {
+ return complete_;
+}
+
+bool VCMSessionInfo::decodable() const {
+ return decodable_;
+}
+
+// Find the end of the NAL unit which the packet pointed to by |packet_it|
+// belongs to. Returns an iterator to the last packet of the frame if the end
+// of the NAL unit wasn't found.
+VCMSessionInfo::PacketIterator VCMSessionInfo::FindNaluEnd(
+ PacketIterator packet_it) const {
+ if ((*packet_it).completeNALU == kNaluEnd ||
+ (*packet_it).completeNALU == kNaluComplete) {
+ return packet_it;
+ }
+ // Find the end of the NAL unit.
+ for (; packet_it != packets_.end(); ++packet_it) {
+ if (((*packet_it).completeNALU == kNaluComplete &&
+ (*packet_it).sizeBytes > 0) ||
+ // Found next NALU.
+ (*packet_it).completeNALU == kNaluStart)
+ return --packet_it;
+ if ((*packet_it).completeNALU == kNaluEnd)
+ return packet_it;
+ }
+ // The end wasn't found.
+ return --packet_it;
+}
+
+size_t VCMSessionInfo::DeletePacketData(PacketIterator start,
+ PacketIterator end) {
+ size_t bytes_to_delete = 0; // The number of bytes to delete.
+ PacketIterator packet_after_end = end;
+ ++packet_after_end;
+
+ // Get the number of bytes to delete.
+ // Clear the size of these packets.
+ for (PacketIterator it = start; it != packet_after_end; ++it) {
+ bytes_to_delete += (*it).sizeBytes;
+ (*it).sizeBytes = 0;
+ (*it).dataPtr = NULL;
+ }
+ if (bytes_to_delete > 0)
+ ShiftSubsequentPackets(end, -static_cast<int>(bytes_to_delete));
+ return bytes_to_delete;
+}
+
+size_t VCMSessionInfo::BuildVP8FragmentationHeader(
+ uint8_t* frame_buffer,
+ size_t frame_buffer_length,
+ RTPFragmentationHeader* fragmentation) {
+ size_t new_length = 0;
+ // Allocate space for max number of partitions
+ fragmentation->VerifyAndAllocateFragmentationHeader(kMaxVP8Partitions);
+ fragmentation->fragmentationVectorSize = 0;
+ memset(fragmentation->fragmentationLength, 0,
+ kMaxVP8Partitions * sizeof(size_t));
+ if (packets_.empty())
+ return new_length;
+ PacketIterator it = FindNextPartitionBeginning(packets_.begin());
+ while (it != packets_.end()) {
+ const int partition_id =
+ (*it).codecSpecificHeader.codecHeader.VP8.partitionId;
+ PacketIterator partition_end = FindPartitionEnd(it);
+ fragmentation->fragmentationOffset[partition_id] =
+ (*it).dataPtr - frame_buffer;
+ assert(fragmentation->fragmentationOffset[partition_id] <
+ frame_buffer_length);
+ fragmentation->fragmentationLength[partition_id] =
+ (*partition_end).dataPtr + (*partition_end).sizeBytes - (*it).dataPtr;
+ assert(fragmentation->fragmentationLength[partition_id] <=
+ frame_buffer_length);
+ new_length += fragmentation->fragmentationLength[partition_id];
+ ++partition_end;
+ it = FindNextPartitionBeginning(partition_end);
+ if (partition_id + 1 > fragmentation->fragmentationVectorSize)
+ fragmentation->fragmentationVectorSize = partition_id + 1;
+ }
+ // Set all empty fragments to start where the previous fragment ends,
+ // and have zero length.
+ if (fragmentation->fragmentationLength[0] == 0)
+ fragmentation->fragmentationOffset[0] = 0;
+ for (int i = 1; i < fragmentation->fragmentationVectorSize; ++i) {
+ if (fragmentation->fragmentationLength[i] == 0)
+ fragmentation->fragmentationOffset[i] =
+ fragmentation->fragmentationOffset[i - 1] +
+ fragmentation->fragmentationLength[i - 1];
+ assert(i == 0 ||
+ fragmentation->fragmentationOffset[i] >=
+ fragmentation->fragmentationOffset[i - 1]);
+ }
+ assert(new_length <= frame_buffer_length);
+ return new_length;
+}
+
+VCMSessionInfo::PacketIterator VCMSessionInfo::FindNextPartitionBeginning(
+ PacketIterator it) const {
+ while (it != packets_.end()) {
+ if ((*it).codecSpecificHeader.codecHeader.VP8.beginningOfPartition) {
+ return it;
+ }
+ ++it;
+ }
+ return it;
+}
+
+VCMSessionInfo::PacketIterator VCMSessionInfo::FindPartitionEnd(
+ PacketIterator it) const {
+ assert((*it).codec == kVideoCodecVP8);
+ PacketIterator prev_it = it;
+ const int partition_id =
+ (*it).codecSpecificHeader.codecHeader.VP8.partitionId;
+ while (it != packets_.end()) {
+ bool beginning =
+ (*it).codecSpecificHeader.codecHeader.VP8.beginningOfPartition;
+ int current_partition_id =
+ (*it).codecSpecificHeader.codecHeader.VP8.partitionId;
+ bool packet_loss_found = (!beginning && !InSequence(it, prev_it));
+ if (packet_loss_found ||
+ (beginning && current_partition_id != partition_id)) {
+ // Missing packet, the previous packet was the last in sequence.
+ return prev_it;
+ }
+ prev_it = it;
+ ++it;
+ }
+ return prev_it;
+}
+
+bool VCMSessionInfo::InSequence(const PacketIterator& packet_it,
+ const PacketIterator& prev_packet_it) {
+ // If the two iterators are pointing to the same packet they are considered
+ // to be in sequence.
+ return (packet_it == prev_packet_it ||
+ (static_cast<uint16_t>((*prev_packet_it).seqNum + 1) ==
+ (*packet_it).seqNum));
+}
+
+size_t VCMSessionInfo::MakeDecodable() {
+ size_t return_length = 0;
+ if (packets_.empty()) {
+ return 0;
+ }
+ PacketIterator it = packets_.begin();
+ // Make sure we remove the first NAL unit if it's not decodable.
+ if ((*it).completeNALU == kNaluIncomplete ||
+ (*it).completeNALU == kNaluEnd) {
+ PacketIterator nalu_end = FindNaluEnd(it);
+ return_length += DeletePacketData(it, nalu_end);
+ it = nalu_end;
+ }
+ PacketIterator prev_it = it;
+ // Take care of the rest of the NAL units.
+ for (; it != packets_.end(); ++it) {
+ bool start_of_nalu = ((*it).completeNALU == kNaluStart ||
+ (*it).completeNALU == kNaluComplete);
+ if (!start_of_nalu && !InSequence(it, prev_it)) {
+ // Found a sequence number gap due to packet loss.
+ PacketIterator nalu_end = FindNaluEnd(it);
+ return_length += DeletePacketData(it, nalu_end);
+ it = nalu_end;
+ }
+ prev_it = it;
+ }
+ return return_length;
+}
+
+void VCMSessionInfo::SetNotDecodableIfIncomplete() {
+ // We don't need to check for completeness first because the two are
+ // orthogonal. If complete_ is true, decodable_ is irrelevant.
+ decodable_ = false;
+}
+
+bool
+VCMSessionInfo::HaveFirstPacket() const {
+ return !packets_.empty() && (first_packet_seq_num_ != -1);
+}
+
+bool
+VCMSessionInfo::HaveLastPacket() const {
+ return !packets_.empty() && (last_packet_seq_num_ != -1);
+}
+
+bool
+VCMSessionInfo::session_nack() const {
+ return session_nack_;
+}
+
+int VCMSessionInfo::InsertPacket(const VCMPacket& packet,
+ uint8_t* frame_buffer,
+ VCMDecodeErrorMode decode_error_mode,
+ const FrameData& frame_data) {
+ if (packet.frameType == kEmptyFrame) {
+ // Update sequence number of an empty packet.
+ // Only media packets are inserted into the packet list.
+ InformOfEmptyPacket(packet.seqNum);
+ return 0;
+ }
+
+ if (packets_.size() == kMaxPacketsInSession) {
+ LOG(LS_ERROR) << "Max number of packets per frame has been reached.";
+ return -1;
+ }
+
+ // Find the position of this packet in the packet list in sequence number
+ // order and insert it. Loop over the list in reverse order.
+ ReversePacketIterator rit = packets_.rbegin();
+ for (; rit != packets_.rend(); ++rit)
+ if (LatestSequenceNumber(packet.seqNum, (*rit).seqNum) == packet.seqNum)
+ break;
+
+ // Check for duplicate packets.
+ if (rit != packets_.rend() &&
+ (*rit).seqNum == packet.seqNum && (*rit).sizeBytes > 0)
+ return -2;
+
+ if (packet.codec == kVideoCodecH264) {
+ frame_type_ = packet.frameType;
+ if (packet.isFirstPacket &&
+ (first_packet_seq_num_ == -1 ||
+ IsNewerSequenceNumber(first_packet_seq_num_, packet.seqNum))) {
+ first_packet_seq_num_ = packet.seqNum;
+ }
+ if (packet.markerBit &&
+ (last_packet_seq_num_ == -1 ||
+ IsNewerSequenceNumber(packet.seqNum, last_packet_seq_num_))) {
+ last_packet_seq_num_ = packet.seqNum;
+ }
+ } else {
+ // Only insert media packets between first and last packets (when
+ // available).
+ // Placing check here, as to properly account for duplicate packets.
+ // Check if this is first packet (only valid for some codecs)
+ // Should only be set for one packet per session.
+ if (packet.isFirstPacket && first_packet_seq_num_ == -1) {
+ // The first packet in a frame signals the frame type.
+ frame_type_ = packet.frameType;
+ // Store the sequence number for the first packet.
+ first_packet_seq_num_ = static_cast<int>(packet.seqNum);
+ } else if (first_packet_seq_num_ != -1 &&
+ IsNewerSequenceNumber(first_packet_seq_num_, packet.seqNum)) {
+ LOG(LS_WARNING) << "Received packet with a sequence number which is out "
+ "of frame boundaries";
+ return -3;
+ } else if (frame_type_ == kEmptyFrame && packet.frameType != kEmptyFrame) {
+ // Update the frame type with the type of the first media packet.
+ // TODO(mikhal): Can this trigger?
+ frame_type_ = packet.frameType;
+ }
+
+ // Track the marker bit, should only be set for one packet per session.
+ if (packet.markerBit && last_packet_seq_num_ == -1) {
+ last_packet_seq_num_ = static_cast<int>(packet.seqNum);
+ } else if (last_packet_seq_num_ != -1 &&
+ IsNewerSequenceNumber(packet.seqNum, last_packet_seq_num_)) {
+ LOG(LS_WARNING) << "Received packet with a sequence number which is out "
+ "of frame boundaries";
+ return -3;
+ }
+ }
+
+ // The insert operation invalidates the iterator |rit|.
+ PacketIterator packet_list_it = packets_.insert(rit.base(), packet);
+
+ size_t returnLength = InsertBuffer(frame_buffer, packet_list_it);
+ UpdateCompleteSession();
+ if (decode_error_mode == kWithErrors)
+ decodable_ = true;
+ else if (decode_error_mode == kSelectiveErrors)
+ UpdateDecodableSession(frame_data);
+ return static_cast<int>(returnLength);
+}
+
+void VCMSessionInfo::InformOfEmptyPacket(uint16_t seq_num) {
+ // Empty packets may be FEC or filler packets. They are sequential and
+ // follow the data packets, therefore, we should only keep track of the high
+ // and low sequence numbers and may assume that the packets in between are
+ // empty packets belonging to the same frame (timestamp).
+ if (empty_seq_num_high_ == -1)
+ empty_seq_num_high_ = seq_num;
+ else
+ empty_seq_num_high_ = LatestSequenceNumber(seq_num, empty_seq_num_high_);
+ if (empty_seq_num_low_ == -1 || IsNewerSequenceNumber(empty_seq_num_low_,
+ seq_num))
+ empty_seq_num_low_ = seq_num;
+}
+
+} // namespace webrtc
diff --git a/webrtc/modules/video_coding/main/source/session_info.h b/webrtc/modules/video_coding/main/source/session_info.h
new file mode 100644
index 0000000000..88071e19d5
--- /dev/null
+++ b/webrtc/modules/video_coding/main/source/session_info.h
@@ -0,0 +1,172 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef WEBRTC_MODULES_VIDEO_CODING_MAIN_SOURCE_SESSION_INFO_H_
+#define WEBRTC_MODULES_VIDEO_CODING_MAIN_SOURCE_SESSION_INFO_H_
+
+#include <list>
+
+#include "webrtc/modules/interface/module_common_types.h"
+#include "webrtc/modules/video_coding/main/interface/video_coding.h"
+#include "webrtc/modules/video_coding/main/source/packet.h"
+#include "webrtc/typedefs.h"
+
+namespace webrtc {
+// Used to pass data from jitter buffer to session info.
+// This data is then used in determining whether a frame is decodable.
+struct FrameData {
+ int64_t rtt_ms;
+ float rolling_average_packets_per_frame;
+};
+
+class VCMSessionInfo {
+ public:
+ VCMSessionInfo();
+
+ void UpdateDataPointers(const uint8_t* old_base_ptr,
+ const uint8_t* new_base_ptr);
+ // NACK - Building the NACK lists.
+ // Build hard NACK list: Zero out all entries in list up to and including
+ // _lowSeqNum.
+ int BuildHardNackList(int* seq_num_list,
+ int seq_num_list_length,
+ int nack_seq_nums_index);
+
+ // Build soft NACK list: Zero out only a subset of the packets, discard
+ // empty packets.
+ int BuildSoftNackList(int* seq_num_list,
+ int seq_num_list_length,
+ int nack_seq_nums_index,
+ int rtt_ms);
+ void Reset();
+ int InsertPacket(const VCMPacket& packet,
+ uint8_t* frame_buffer,
+ VCMDecodeErrorMode enable_decodable_state,
+ const FrameData& frame_data);
+ bool complete() const;
+ bool decodable() const;
+
+ // Builds fragmentation headers for VP8, each fragment being a decodable
+ // VP8 partition. Returns the total number of bytes which are decodable. Is
+ // used instead of MakeDecodable for VP8.
+ size_t BuildVP8FragmentationHeader(uint8_t* frame_buffer,
+ size_t frame_buffer_length,
+ RTPFragmentationHeader* fragmentation);
+
+ // Makes the frame decodable. I.e., only contain decodable NALUs. All
+ // non-decodable NALUs will be deleted and packets will be moved to in
+ // memory to remove any empty space.
+ // Returns the number of bytes deleted from the session.
+ size_t MakeDecodable();
+
+ // Sets decodable_ to false.
+ // Used by the dual decoder. After the mode is changed to kNoErrors from
+ // kWithErrors or kSelective errors, any states that have been marked
+ // decodable and are not complete are marked as non-decodable.
+ void SetNotDecodableIfIncomplete();
+
+ size_t SessionLength() const;
+ int NumPackets() const;
+ bool HaveFirstPacket() const;
+ bool HaveLastPacket() const;
+ bool session_nack() const;
+ webrtc::FrameType FrameType() const { return frame_type_; }
+ int LowSequenceNumber() const;
+
+ // Returns highest sequence number, media or empty.
+ int HighSequenceNumber() const;
+ int PictureId() const;
+ int TemporalId() const;
+ bool LayerSync() const;
+ int Tl0PicId() const;
+ bool NonReference() const;
+
+ void SetGofInfo(const GofInfoVP9& gof_info, size_t idx);
+
+ // The number of packets discarded because the decoder can't make use of
+ // them.
+ int packets_not_decodable() const;
+
+ private:
+ enum { kMaxVP8Partitions = 9 };
+
+ typedef std::list<VCMPacket> PacketList;
+ typedef PacketList::iterator PacketIterator;
+ typedef PacketList::const_iterator PacketIteratorConst;
+ typedef PacketList::reverse_iterator ReversePacketIterator;
+
+ void InformOfEmptyPacket(uint16_t seq_num);
+
+ // Finds the packet of the beginning of the next VP8 partition. If
+ // none is found the returned iterator points to |packets_.end()|.
+ // |it| is expected to point to the last packet of the previous partition,
+ // or to the first packet of the frame. |packets_skipped| is incremented
+ // for each packet found which doesn't have the beginning bit set.
+ PacketIterator FindNextPartitionBeginning(PacketIterator it) const;
+
+ // Returns an iterator pointing to the last packet of the partition pointed to
+ // by |it|.
+ PacketIterator FindPartitionEnd(PacketIterator it) const;
+ static bool InSequence(const PacketIterator& it,
+ const PacketIterator& prev_it);
+ size_t InsertBuffer(uint8_t* frame_buffer,
+ PacketIterator packetIterator);
+ size_t Insert(const uint8_t* buffer,
+ size_t length,
+ bool insert_start_code,
+ uint8_t* frame_buffer);
+ void ShiftSubsequentPackets(PacketIterator it, int steps_to_shift);
+ PacketIterator FindNaluEnd(PacketIterator packet_iter) const;
+ // Deletes the data of all packets between |start| and |end|, inclusively.
+ // Note that this function doesn't delete the actual packets.
+ size_t DeletePacketData(PacketIterator start,
+ PacketIterator end);
+ void UpdateCompleteSession();
+
+ // When enabled, determine if session is decodable, i.e. incomplete but
+ // would be sent to the decoder.
+ // Note: definition assumes random loss.
+ // A frame is defined to be decodable when:
+ // Round trip time is higher than threshold
+ // It is not a key frame
+ // It has the first packet: In VP8 the first packet contains all or part of
+ // the first partition, which consists of the most relevant information for
+ // decoding.
+ // Either more than the upper threshold of the average number of packets per
+ // frame is present
+ // or less than the lower threshold of the average number of packets per
+ // frame is present: suggests a small frame. Such a frame is unlikely
+ // to contain many motion vectors, so having the first packet will
+ // likely suffice. Once we have more than the lower threshold of the
+ // frame, we know that the frame is medium or large-sized.
+ void UpdateDecodableSession(const FrameData& frame_data);
+
+ // If this session has been NACKed by the jitter buffer.
+ bool session_nack_;
+ bool complete_;
+ bool decodable_;
+ webrtc::FrameType frame_type_;
+ // Packets in this frame.
+ PacketList packets_;
+ int empty_seq_num_low_;
+ int empty_seq_num_high_;
+
+ // The following two variables correspond to the first and last media packets
+ // in a session defined by the first packet flag and the marker bit.
+ // They are not necessarily equal to the front and back packets, as packets
+ // may enter out of order.
+ // TODO(mikhal): Refactor the list to use a map.
+ int first_packet_seq_num_;
+ int last_packet_seq_num_;
+};
+
+} // namespace webrtc
+
+#endif // WEBRTC_MODULES_VIDEO_CODING_MAIN_SOURCE_SESSION_INFO_H_
diff --git a/webrtc/modules/video_coding/main/source/session_info_unittest.cc b/webrtc/modules/video_coding/main/source/session_info_unittest.cc
new file mode 100644
index 0000000000..58c352d3fc
--- /dev/null
+++ b/webrtc/modules/video_coding/main/source/session_info_unittest.cc
@@ -0,0 +1,1064 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <string.h>
+
+#include "testing/gtest/include/gtest/gtest.h"
+#include "webrtc/modules/interface/module_common_types.h"
+#include "webrtc/modules/video_coding/main/source/packet.h"
+#include "webrtc/modules/video_coding/main/source/session_info.h"
+
+namespace webrtc {
+
+class TestSessionInfo : public ::testing::Test {
+ protected:
+ virtual void SetUp() {
+ memset(packet_buffer_, 0, sizeof(packet_buffer_));
+ memset(frame_buffer_, 0, sizeof(frame_buffer_));
+ session_.Reset();
+ packet_.Reset();
+ packet_.frameType = kVideoFrameDelta;
+ packet_.sizeBytes = packet_buffer_size();
+ packet_.dataPtr = packet_buffer_;
+ packet_.seqNum = 0;
+ packet_.timestamp = 0;
+ frame_data.rtt_ms = 0;
+ frame_data.rolling_average_packets_per_frame = -1;
+ }
+
+ void FillPacket(uint8_t start_value) {
+ for (size_t i = 0; i < packet_buffer_size(); ++i)
+ packet_buffer_[i] = start_value + i;
+ }
+
+ void VerifyPacket(uint8_t* start_ptr, uint8_t start_value) {
+ for (size_t j = 0; j < packet_buffer_size(); ++j) {
+ ASSERT_EQ(start_value + j, start_ptr[j]);
+ }
+ }
+
+ size_t packet_buffer_size() const {
+ return sizeof(packet_buffer_) / sizeof(packet_buffer_[0]);
+ }
+ size_t frame_buffer_size() const {
+ return sizeof(frame_buffer_) / sizeof(frame_buffer_[0]);
+ }
+
+ enum { kPacketBufferSize = 10 };
+
+ uint8_t packet_buffer_[kPacketBufferSize];
+ uint8_t frame_buffer_[10 * kPacketBufferSize];
+
+ VCMSessionInfo session_;
+ VCMPacket packet_;
+ FrameData frame_data;
+};
+
+class TestVP8Partitions : public TestSessionInfo {
+ protected:
+ enum { kMaxVP8Partitions = 9 };
+
+ virtual void SetUp() {
+ TestSessionInfo::SetUp();
+ vp8_header_ = &packet_header_.type.Video.codecHeader.VP8;
+ packet_header_.frameType = kVideoFrameDelta;
+ packet_header_.type.Video.codec = kRtpVideoVp8;
+ vp8_header_->InitRTPVideoHeaderVP8();
+ fragmentation_.VerifyAndAllocateFragmentationHeader(kMaxVP8Partitions);
+ }
+
+ bool VerifyPartition(int partition_id,
+ int packets_expected,
+ int start_value) {
+ EXPECT_EQ(packets_expected * packet_buffer_size(),
+ fragmentation_.fragmentationLength[partition_id]);
+ for (int i = 0; i < packets_expected; ++i) {
+ size_t packet_index = fragmentation_.fragmentationOffset[partition_id] +
+ i * packet_buffer_size();
+ if (packet_index + packet_buffer_size() > frame_buffer_size())
+ return false;
+ VerifyPacket(frame_buffer_ + packet_index, start_value + i);
+ }
+ return true;
+ }
+
+ WebRtcRTPHeader packet_header_;
+ RTPVideoHeaderVP8* vp8_header_;
+ RTPFragmentationHeader fragmentation_;
+};
+
+class TestNalUnits : public TestSessionInfo {
+ protected:
+ virtual void SetUp() {
+ TestSessionInfo::SetUp();
+ packet_.codec = kVideoCodecVP8;
+ }
+
+ bool VerifyNalu(int offset, int packets_expected, int start_value) {
+ EXPECT_GE(session_.SessionLength(),
+ packets_expected * packet_buffer_size());
+ for (int i = 0; i < packets_expected; ++i) {
+ int packet_index = (offset + i) * packet_buffer_size();
+ VerifyPacket(frame_buffer_ + packet_index, start_value + i);
+ }
+ return true;
+ }
+};
+
+class TestNackList : public TestSessionInfo {
+ protected:
+ static const size_t kMaxSeqNumListLength = 30;
+
+ virtual void SetUp() {
+ TestSessionInfo::SetUp();
+ seq_num_list_length_ = 0;
+ memset(seq_num_list_, 0, sizeof(seq_num_list_));
+ }
+
+ void BuildSeqNumList(uint16_t low,
+ uint16_t high) {
+ size_t i = 0;
+ while (low != high + 1) {
+ EXPECT_LT(i, kMaxSeqNumListLength);
+ if (i >= kMaxSeqNumListLength) {
+ seq_num_list_length_ = kMaxSeqNumListLength;
+ return;
+ }
+ seq_num_list_[i] = low;
+ low++;
+ i++;
+ }
+ seq_num_list_length_ = i;
+ }
+
+ void VerifyAll(int value) {
+ for (int i = 0; i < seq_num_list_length_; ++i)
+ EXPECT_EQ(seq_num_list_[i], value);
+ }
+
+ int seq_num_list_[kMaxSeqNumListLength];
+ int seq_num_list_length_;
+};
+
+TEST_F(TestSessionInfo, TestSimpleAPIs) {
+ packet_.isFirstPacket = true;
+ packet_.seqNum = 0xFFFE;
+ packet_.sizeBytes = packet_buffer_size();
+ packet_.frameType = kVideoFrameKey;
+ FillPacket(0);
+ EXPECT_EQ(packet_buffer_size(),
+ static_cast<size_t>(session_.InsertPacket(packet_, frame_buffer_,
+ kNoErrors, frame_data)));
+ EXPECT_FALSE(session_.HaveLastPacket());
+ EXPECT_EQ(kVideoFrameKey, session_.FrameType());
+
+ packet_.isFirstPacket = false;
+ packet_.markerBit = true;
+ packet_.seqNum += 1;
+ EXPECT_EQ(packet_buffer_size(),
+ static_cast<size_t>(session_.InsertPacket(packet_, frame_buffer_,
+ kNoErrors, frame_data)));
+ EXPECT_TRUE(session_.HaveLastPacket());
+ EXPECT_EQ(packet_.seqNum, session_.HighSequenceNumber());
+ EXPECT_EQ(0xFFFE, session_.LowSequenceNumber());
+
+ // Insert empty packet which will be the new high sequence number.
+ // To make things more difficult we will make sure to have a wrap here.
+ packet_.isFirstPacket = false;
+ packet_.markerBit = true;
+ packet_.seqNum = 2;
+ packet_.sizeBytes = 0;
+ packet_.frameType = kEmptyFrame;
+ EXPECT_EQ(0,
+ session_.InsertPacket(packet_,
+ frame_buffer_,
+ kNoErrors,
+ frame_data));
+ EXPECT_EQ(packet_.seqNum, session_.HighSequenceNumber());
+}
+
+TEST_F(TestSessionInfo, NormalOperation) {
+ packet_.seqNum = 0xFFFF;
+ packet_.isFirstPacket = true;
+ packet_.markerBit = false;
+ FillPacket(0);
+ EXPECT_EQ(packet_buffer_size(),
+ static_cast<size_t>(session_.InsertPacket(packet_, frame_buffer_,
+ kNoErrors, frame_data)));
+
+ packet_.isFirstPacket = false;
+ for (int i = 1; i < 9; ++i) {
+ packet_.seqNum += 1;
+ FillPacket(i);
+ ASSERT_EQ(packet_buffer_size(),
+ static_cast<size_t>(session_.InsertPacket(packet_, frame_buffer_,
+ kNoErrors,
+ frame_data)));
+ }
+
+ packet_.seqNum += 1;
+ packet_.markerBit = true;
+ FillPacket(9);
+ EXPECT_EQ(packet_buffer_size(),
+ static_cast<size_t>(session_.InsertPacket(packet_, frame_buffer_,
+ kNoErrors, frame_data)));
+
+ EXPECT_EQ(10 * packet_buffer_size(), session_.SessionLength());
+ for (int i = 0; i < 10; ++i) {
+ SCOPED_TRACE("Calling VerifyPacket");
+ VerifyPacket(frame_buffer_ + i * packet_buffer_size(), i);
+ }
+}
+
+TEST_F(TestSessionInfo, ErrorsEqualDecodableState) {
+ packet_.seqNum = 0xFFFF;
+ packet_.isFirstPacket = false;
+ packet_.markerBit = false;
+ FillPacket(3);
+ EXPECT_EQ(packet_buffer_size(),
+ static_cast<size_t>(session_.InsertPacket(packet_, frame_buffer_,
+ kWithErrors,
+ frame_data)));
+ EXPECT_TRUE(session_.decodable());
+}
+
+TEST_F(TestSessionInfo, SelectiveDecodableState) {
+ packet_.seqNum = 0xFFFF;
+ packet_.isFirstPacket = false;
+ packet_.markerBit = false;
+ FillPacket(1);
+ frame_data.rolling_average_packets_per_frame = 11;
+ frame_data.rtt_ms = 150;
+ EXPECT_EQ(packet_buffer_size(),
+ static_cast<size_t>(session_.InsertPacket(packet_, frame_buffer_,
+ kSelectiveErrors,
+ frame_data)));
+ EXPECT_FALSE(session_.decodable());
+
+ packet_.seqNum -= 1;
+ FillPacket(0);
+ packet_.isFirstPacket = true;
+ EXPECT_EQ(packet_buffer_size(),
+ static_cast<size_t>(session_.InsertPacket(packet_, frame_buffer_,
+ kSelectiveErrors,
+ frame_data)));
+ EXPECT_TRUE(session_.decodable());
+
+ packet_.isFirstPacket = false;
+ packet_.seqNum += 1;
+ for (int i = 2; i < 8; ++i) {
+ packet_.seqNum += 1;
+ FillPacket(i);
+ EXPECT_EQ(packet_buffer_size(),
+ static_cast<size_t>(session_.InsertPacket(packet_, frame_buffer_,
+ kSelectiveErrors,
+ frame_data)));
+ EXPECT_TRUE(session_.decodable());
+ }
+
+ packet_.seqNum += 1;
+ FillPacket(8);
+ EXPECT_EQ(packet_buffer_size(),
+ static_cast<size_t>(session_.InsertPacket(packet_, frame_buffer_,
+ kSelectiveErrors,
+ frame_data)));
+ EXPECT_TRUE(session_.decodable());
+}
+
+TEST_F(TestSessionInfo, OutOfBoundsPackets1PacketFrame) {
+ packet_.seqNum = 0x0001;
+ packet_.isFirstPacket = true;
+ packet_.markerBit = true;
+ FillPacket(1);
+ EXPECT_EQ(packet_buffer_size(),
+ static_cast<size_t>(session_.InsertPacket(packet_, frame_buffer_,
+ kNoErrors, frame_data)));
+
+ packet_.seqNum = 0x0004;
+ packet_.isFirstPacket = true;
+ packet_.markerBit = true;
+ FillPacket(1);
+ EXPECT_EQ(-3, session_.InsertPacket(packet_,
+ frame_buffer_,
+ kNoErrors,
+ frame_data));
+ packet_.seqNum = 0x0000;
+ packet_.isFirstPacket = false;
+ packet_.markerBit = false;
+ FillPacket(1);
+ EXPECT_EQ(-3, session_.InsertPacket(packet_,
+ frame_buffer_,
+ kNoErrors,
+ frame_data));
+}
+
+TEST_F(TestSessionInfo, SetMarkerBitOnce) {
+ packet_.seqNum = 0x0005;
+ packet_.isFirstPacket = false;
+ packet_.markerBit = true;
+ FillPacket(1);
+ EXPECT_EQ(packet_buffer_size(),
+ static_cast<size_t>(session_.InsertPacket(packet_, frame_buffer_,
+ kNoErrors, frame_data)));
+ ++packet_.seqNum;
+ packet_.isFirstPacket = true;
+ packet_.markerBit = true;
+ FillPacket(1);
+ EXPECT_EQ(-3, session_.InsertPacket(packet_,
+ frame_buffer_,
+ kNoErrors,
+ frame_data));
+}
+
+TEST_F(TestSessionInfo, OutOfBoundsPacketsBase) {
+ // Allow packets in the range 5-6.
+ packet_.seqNum = 0x0005;
+ packet_.isFirstPacket = true;
+ packet_.markerBit = false;
+ FillPacket(1);
+ EXPECT_EQ(packet_buffer_size(),
+ static_cast<size_t>(session_.InsertPacket(packet_, frame_buffer_,
+ kNoErrors, frame_data)));
+ // Insert an older packet with a first packet set.
+ packet_.seqNum = 0x0004;
+ packet_.isFirstPacket = true;
+ packet_.markerBit = true;
+ FillPacket(1);
+ EXPECT_EQ(-3, session_.InsertPacket(packet_,
+ frame_buffer_,
+ kNoErrors,
+ frame_data));
+ packet_.seqNum = 0x0006;
+ packet_.isFirstPacket = true;
+ packet_.markerBit = true;
+ FillPacket(1);
+ EXPECT_EQ(packet_buffer_size(),
+ static_cast<size_t>(session_.InsertPacket(packet_, frame_buffer_,
+ kNoErrors, frame_data)));
+ packet_.seqNum = 0x0008;
+ packet_.isFirstPacket = false;
+ packet_.markerBit = true;
+ FillPacket(1);
+ EXPECT_EQ(-3, session_.InsertPacket(packet_,
+ frame_buffer_,
+ kNoErrors,
+ frame_data));
+}
+
+TEST_F(TestSessionInfo, OutOfBoundsPacketsWrap) {
+ packet_.seqNum = 0xFFFE;
+ packet_.isFirstPacket = true;
+ packet_.markerBit = false;
+ FillPacket(1);
+ EXPECT_EQ(packet_buffer_size(),
+ static_cast<size_t>(session_.InsertPacket(packet_, frame_buffer_,
+ kNoErrors, frame_data)));
+
+ packet_.seqNum = 0x0004;
+ packet_.isFirstPacket = false;
+ packet_.markerBit = true;
+ FillPacket(1);
+ EXPECT_EQ(packet_buffer_size(),
+ static_cast<size_t>(session_.InsertPacket(packet_, frame_buffer_,
+ kNoErrors, frame_data)));
+ packet_.seqNum = 0x0002;
+ packet_.isFirstPacket = false;
+ packet_.markerBit = false;
+ FillPacket(1);
+ ASSERT_EQ(packet_buffer_size(),
+ static_cast<size_t>(session_.InsertPacket(packet_, frame_buffer_,
+ kNoErrors, frame_data)));
+ packet_.seqNum = 0xFFF0;
+ packet_.isFirstPacket = false;
+ packet_.markerBit = false;
+ FillPacket(1);
+ EXPECT_EQ(-3,
+ session_.InsertPacket(packet_,
+ frame_buffer_,
+ kNoErrors,
+ frame_data));
+ packet_.seqNum = 0x0006;
+ packet_.isFirstPacket = false;
+ packet_.markerBit = false;
+ FillPacket(1);
+ EXPECT_EQ(-3,
+ session_.InsertPacket(packet_,
+ frame_buffer_,
+ kNoErrors,
+ frame_data));
+}
+
+TEST_F(TestSessionInfo, OutOfBoundsOutOfOrder) {
+ // Insert out of bound regular packets, and then the first and last packet.
+ // Verify that correct bounds are maintained.
+ packet_.seqNum = 0x0003;
+ packet_.isFirstPacket = false;
+ packet_.markerBit = false;
+ FillPacket(1);
+ EXPECT_EQ(packet_buffer_size(),
+ static_cast<size_t>(session_.InsertPacket(packet_, frame_buffer_,
+ kNoErrors, frame_data)));
+ // Insert an older packet with a first packet set.
+ packet_.seqNum = 0x0005;
+ packet_.isFirstPacket = true;
+ packet_.markerBit = false;
+ FillPacket(1);
+ EXPECT_EQ(packet_buffer_size(),
+ static_cast<size_t>(session_.InsertPacket(packet_, frame_buffer_,
+ kNoErrors, frame_data)));
+ packet_.seqNum = 0x0004;
+ packet_.isFirstPacket = false;
+ packet_.markerBit = false;
+ FillPacket(1);
+ EXPECT_EQ(-3, session_.InsertPacket(packet_,
+ frame_buffer_,
+ kNoErrors,
+ frame_data));
+ packet_.seqNum = 0x0010;
+ packet_.isFirstPacket = false;
+ packet_.markerBit = false;
+ FillPacket(1);
+ EXPECT_EQ(packet_buffer_size(),
+ static_cast<size_t>(session_.InsertPacket(packet_, frame_buffer_,
+ kNoErrors, frame_data)));
+ packet_.seqNum = 0x0008;
+ packet_.isFirstPacket = false;
+ packet_.markerBit = true;
+ FillPacket(1);
+ EXPECT_EQ(packet_buffer_size(),
+ static_cast<size_t>(session_.InsertPacket(packet_, frame_buffer_,
+ kNoErrors, frame_data)));
+
+ packet_.seqNum = 0x0009;
+ packet_.isFirstPacket = false;
+ packet_.markerBit = false;
+ FillPacket(1);
+ EXPECT_EQ(-3, session_.InsertPacket(packet_,
+ frame_buffer_,
+ kNoErrors,
+ frame_data));
+}
+
+TEST_F(TestVP8Partitions, TwoPartitionsOneLoss) {
+ // Partition 0 | Partition 1
+ // [ 0 ] [ 2 ] | [ 3 ]
+ packet_header_.type.Video.isFirstPacket = true;
+ vp8_header_->beginningOfPartition = true;
+ vp8_header_->partitionId = 0;
+ packet_header_.header.markerBit = false;
+ packet_header_.header.sequenceNumber = 0;
+ FillPacket(0);
+ VCMPacket* packet = new VCMPacket(packet_buffer_, packet_buffer_size(),
+ packet_header_);
+ EXPECT_EQ(packet_buffer_size(),
+ static_cast<size_t>(session_.InsertPacket(*packet, frame_buffer_,
+ kNoErrors, frame_data)));
+ delete packet;
+
+ packet_header_.type.Video.isFirstPacket = false;
+ vp8_header_->partitionId = 0;
+ vp8_header_->beginningOfPartition = false;
+ packet_header_.header.markerBit = false;
+ packet_header_.header.sequenceNumber += 2;
+ FillPacket(2);
+ packet = new VCMPacket(packet_buffer_, packet_buffer_size(), packet_header_);
+ EXPECT_EQ(packet_buffer_size(),
+ static_cast<size_t>(session_.InsertPacket(*packet, frame_buffer_,
+ kNoErrors, frame_data)));
+ delete packet;
+
+ packet_header_.type.Video.isFirstPacket = false;
+ vp8_header_->partitionId = 1;
+ vp8_header_->beginningOfPartition = true;
+ packet_header_.header.markerBit = true;
+ packet_header_.header.sequenceNumber += 1;
+ FillPacket(3);
+ packet = new VCMPacket(packet_buffer_, packet_buffer_size(), packet_header_);
+ EXPECT_EQ(packet_buffer_size(),
+ static_cast<size_t>(session_.InsertPacket(*packet, frame_buffer_,
+ kNoErrors, frame_data)));
+ delete packet;
+
+ // One packet should be removed (end of partition 0).
+ EXPECT_EQ(2 * packet_buffer_size(),
+ session_.BuildVP8FragmentationHeader(
+ frame_buffer_, frame_buffer_size(), &fragmentation_));
+ SCOPED_TRACE("Calling VerifyPartition");
+ EXPECT_TRUE(VerifyPartition(0, 1, 0));
+ SCOPED_TRACE("Calling VerifyPartition");
+ EXPECT_TRUE(VerifyPartition(1, 1, 3));
+}
+
+TEST_F(TestVP8Partitions, TwoPartitionsOneLoss2) {
+ // Partition 0 | Partition 1
+ // [ 1 ] [ 2 ] | [ 3 ] [ 5 ]
+ packet_header_.type.Video.isFirstPacket = true;
+ vp8_header_->beginningOfPartition = true;
+ vp8_header_->partitionId = 0;
+ packet_header_.header.markerBit = false;
+ packet_header_.header.sequenceNumber = 1;
+ FillPacket(1);
+ VCMPacket* packet = new VCMPacket(packet_buffer_, packet_buffer_size(),
+ packet_header_);
+ EXPECT_EQ(packet_buffer_size(),
+ static_cast<size_t>(session_.InsertPacket(*packet, frame_buffer_,
+ kNoErrors, frame_data)));
+ delete packet;
+
+ packet_header_.type.Video.isFirstPacket = false;
+ vp8_header_->partitionId = 0;
+ vp8_header_->beginningOfPartition = false;
+ packet_header_.header.markerBit = false;
+ packet_header_.header.sequenceNumber += 1;
+ FillPacket(2);
+ packet = new VCMPacket(packet_buffer_, packet_buffer_size(), packet_header_);
+ EXPECT_EQ(packet_buffer_size(),
+ static_cast<size_t>(session_.InsertPacket(*packet, frame_buffer_,
+ kNoErrors, frame_data)));
+ delete packet;
+
+ packet_header_.type.Video.isFirstPacket = false;
+ vp8_header_->partitionId = 1;
+ vp8_header_->beginningOfPartition = true;
+ packet_header_.header.markerBit = false;
+ packet_header_.header.sequenceNumber += 1;
+ FillPacket(3);
+ packet = new VCMPacket(packet_buffer_, packet_buffer_size(), packet_header_);
+ EXPECT_EQ(packet_buffer_size(),
+ static_cast<size_t>(session_.InsertPacket(*packet, frame_buffer_,
+ kNoErrors, frame_data)));
+ delete packet;
+
+ packet_header_.type.Video.isFirstPacket = false;
+ vp8_header_->partitionId = 1;
+ vp8_header_->beginningOfPartition = false;
+ packet_header_.header.markerBit = true;
+ packet_header_.header.sequenceNumber += 2;
+ FillPacket(5);
+ packet = new VCMPacket(packet_buffer_, packet_buffer_size(), packet_header_);
+ EXPECT_EQ(packet_buffer_size(),
+ static_cast<size_t>(session_.InsertPacket(*packet, frame_buffer_,
+ kNoErrors, frame_data)));
+ delete packet;
+
+ // One packet should be removed (end of partition 2), 3 left.
+ EXPECT_EQ(3 * packet_buffer_size(),
+ session_.BuildVP8FragmentationHeader(
+ frame_buffer_, frame_buffer_size(), &fragmentation_));
+ SCOPED_TRACE("Calling VerifyPartition");
+ EXPECT_TRUE(VerifyPartition(0, 2, 1));
+ SCOPED_TRACE("Calling VerifyPartition");
+ EXPECT_TRUE(VerifyPartition(1, 1, 3));
+}
+
+TEST_F(TestVP8Partitions, TwoPartitionsNoLossWrap) {
+ // Partition 0 | Partition 1
+ // [ fffd ] [ fffe ] | [ ffff ] [ 0 ]
+ packet_header_.type.Video.isFirstPacket = true;
+ vp8_header_->beginningOfPartition = true;
+ vp8_header_->partitionId = 0;
+ packet_header_.header.markerBit = false;
+ packet_header_.header.sequenceNumber = 0xfffd;
+ FillPacket(0);
+ VCMPacket* packet = new VCMPacket(packet_buffer_, packet_buffer_size(),
+ packet_header_);
+ EXPECT_EQ(packet_buffer_size(),
+ static_cast<size_t>(session_.InsertPacket(*packet, frame_buffer_,
+ kNoErrors, frame_data)));
+ delete packet;
+
+ packet_header_.type.Video.isFirstPacket = false;
+ vp8_header_->partitionId = 0;
+ vp8_header_->beginningOfPartition = false;
+ packet_header_.header.markerBit = false;
+ packet_header_.header.sequenceNumber += 1;
+ FillPacket(1);
+ packet = new VCMPacket(packet_buffer_, packet_buffer_size(), packet_header_);
+ EXPECT_EQ(packet_buffer_size(),
+ static_cast<size_t>(session_.InsertPacket(*packet, frame_buffer_,
+ kNoErrors, frame_data)));
+ delete packet;
+
+ packet_header_.type.Video.isFirstPacket = false;
+ vp8_header_->partitionId = 1;
+ vp8_header_->beginningOfPartition = true;
+ packet_header_.header.markerBit = false;
+ packet_header_.header.sequenceNumber += 1;
+ FillPacket(2);
+ packet = new VCMPacket(packet_buffer_, packet_buffer_size(), packet_header_);
+ EXPECT_EQ(packet_buffer_size(),
+ static_cast<size_t>(session_.InsertPacket(*packet, frame_buffer_,
+ kNoErrors, frame_data)));
+ delete packet;
+
+ packet_header_.type.Video.isFirstPacket = false;
+ vp8_header_->partitionId = 1;
+ vp8_header_->beginningOfPartition = false;
+ packet_header_.header.markerBit = true;
+ packet_header_.header.sequenceNumber += 1;
+ FillPacket(3);
+ packet = new VCMPacket(packet_buffer_, packet_buffer_size(), packet_header_);
+ EXPECT_EQ(packet_buffer_size(),
+ static_cast<size_t>(session_.InsertPacket(*packet, frame_buffer_,
+ kNoErrors, frame_data)));
+ delete packet;
+
+ // No packet should be removed.
+ EXPECT_EQ(4 * packet_buffer_size(),
+ session_.BuildVP8FragmentationHeader(
+ frame_buffer_, frame_buffer_size(), &fragmentation_));
+ SCOPED_TRACE("Calling VerifyPartition");
+ EXPECT_TRUE(VerifyPartition(0, 2, 0));
+ SCOPED_TRACE("Calling VerifyPartition");
+ EXPECT_TRUE(VerifyPartition(1, 2, 2));
+}
+
+TEST_F(TestVP8Partitions, TwoPartitionsLossWrap) {
+ // Partition 0 | Partition 1
+ // [ fffd ] [ fffe ] | [ ffff ] [ 1 ]
+ packet_header_.type.Video.isFirstPacket = true;
+ vp8_header_->beginningOfPartition = true;
+ vp8_header_->partitionId = 0;
+ packet_header_.header.markerBit = false;
+ packet_header_.header.sequenceNumber = 0xfffd;
+ FillPacket(0);
+ VCMPacket* packet = new VCMPacket(packet_buffer_, packet_buffer_size(),
+ packet_header_);
+ EXPECT_EQ(packet_buffer_size(),
+ static_cast<size_t>(session_.InsertPacket(*packet, frame_buffer_,
+ kNoErrors, frame_data)));
+ delete packet;
+
+ packet_header_.type.Video.isFirstPacket = false;
+ vp8_header_->partitionId = 0;
+ vp8_header_->beginningOfPartition = false;
+ packet_header_.header.markerBit = false;
+ packet_header_.header.sequenceNumber += 1;
+ FillPacket(1);
+ packet = new VCMPacket(packet_buffer_, packet_buffer_size(), packet_header_);
+ EXPECT_EQ(packet_buffer_size(),
+ static_cast<size_t>(session_.InsertPacket(*packet, frame_buffer_,
+ kNoErrors, frame_data)));
+ delete packet;
+
+ packet_header_.type.Video.isFirstPacket = false;
+ vp8_header_->partitionId = 1;
+ vp8_header_->beginningOfPartition = true;
+ packet_header_.header.markerBit = false;
+ packet_header_.header.sequenceNumber += 1;
+ FillPacket(2);
+ packet = new VCMPacket(packet_buffer_, packet_buffer_size(), packet_header_);
+ EXPECT_EQ(packet_buffer_size(),
+ static_cast<size_t>(session_.InsertPacket(*packet, frame_buffer_,
+ kNoErrors, frame_data)));
+ delete packet;
+
+ packet_header_.type.Video.isFirstPacket = false;
+ vp8_header_->partitionId = 1;
+ vp8_header_->beginningOfPartition = false;
+ packet_header_.header.markerBit = true;
+ packet_header_.header.sequenceNumber += 2;
+ FillPacket(3);
+ packet = new VCMPacket(packet_buffer_, packet_buffer_size(), packet_header_);
+ EXPECT_EQ(packet_buffer_size(),
+ static_cast<size_t>(session_.InsertPacket(*packet, frame_buffer_,
+ kNoErrors, frame_data)));
+ delete packet;
+
+ // One packet should be removed from the last partition
+ EXPECT_EQ(3 * packet_buffer_size(),
+ session_.BuildVP8FragmentationHeader(
+ frame_buffer_, frame_buffer_size(), &fragmentation_));
+ SCOPED_TRACE("Calling VerifyPartition");
+ EXPECT_TRUE(VerifyPartition(0, 2, 0));
+ SCOPED_TRACE("Calling VerifyPartition");
+ EXPECT_TRUE(VerifyPartition(1, 1, 2));
+}
+
+
+TEST_F(TestVP8Partitions, ThreePartitionsOneMissing) {
+ // Partition 1 |Partition 2 | Partition 3
+ // [ 1 ] [ 2 ] | | [ 5 ] | [ 6 ]
+ packet_header_.type.Video.isFirstPacket = true;
+ vp8_header_->beginningOfPartition = true;
+ vp8_header_->partitionId = 0;
+ packet_header_.header.markerBit = false;
+ packet_header_.header.sequenceNumber = 1;
+ FillPacket(1);
+ VCMPacket* packet = new VCMPacket(packet_buffer_, packet_buffer_size(),
+ packet_header_);
+ EXPECT_EQ(packet_buffer_size(),
+ static_cast<size_t>(session_.InsertPacket(*packet, frame_buffer_,
+ kNoErrors, frame_data)));
+ delete packet;
+
+ packet_header_.type.Video.isFirstPacket = false;
+ vp8_header_->partitionId = 0;
+ vp8_header_->beginningOfPartition = false;
+ packet_header_.header.markerBit = false;
+ packet_header_.header.sequenceNumber += 1;
+ FillPacket(2);
+ packet = new VCMPacket(packet_buffer_, packet_buffer_size(), packet_header_);
+ EXPECT_EQ(packet_buffer_size(),
+ static_cast<size_t>(session_.InsertPacket(*packet, frame_buffer_,
+ kNoErrors, frame_data)));
+ delete packet;
+
+ packet_header_.type.Video.isFirstPacket = false;
+ vp8_header_->partitionId = 2;
+ vp8_header_->beginningOfPartition = true;
+ packet_header_.header.markerBit = false;
+ packet_header_.header.sequenceNumber += 3;
+ FillPacket(5);
+ packet = new VCMPacket(packet_buffer_, packet_buffer_size(), packet_header_);
+ EXPECT_EQ(packet_buffer_size(),
+ static_cast<size_t>(session_.InsertPacket(*packet, frame_buffer_,
+ kNoErrors, frame_data)));
+ delete packet;
+
+ packet_header_.type.Video.isFirstPacket = false;
+ vp8_header_->partitionId = 2;
+ vp8_header_->beginningOfPartition = false;
+ packet_header_.header.markerBit = true;
+ packet_header_.header.sequenceNumber += 1;
+ FillPacket(6);
+ packet = new VCMPacket(packet_buffer_, packet_buffer_size(), packet_header_);
+ EXPECT_EQ(packet_buffer_size(),
+ static_cast<size_t>(session_.InsertPacket(*packet, frame_buffer_,
+ kNoErrors, frame_data)));
+ delete packet;
+
+ // No packet should be removed.
+ EXPECT_EQ(4 * packet_buffer_size(),
+ session_.BuildVP8FragmentationHeader(
+ frame_buffer_, frame_buffer_size(), &fragmentation_));
+ SCOPED_TRACE("Calling VerifyPartition");
+ EXPECT_TRUE(VerifyPartition(0, 2, 1));
+ SCOPED_TRACE("Calling VerifyPartition");
+ EXPECT_TRUE(VerifyPartition(2, 2, 5));
+}
+
+TEST_F(TestVP8Partitions, ThreePartitionsLossInSecond) {
+ // Partition 0 |Partition 1 | Partition 2
+ // [ 1 ] [ 2 ] | [ 4 ] [ 5 ] | [ 6 ] [ 7 ]
+ packet_header_.type.Video.isFirstPacket = true;
+ vp8_header_->beginningOfPartition = true;
+ vp8_header_->partitionId = 0;
+ packet_header_.header.markerBit = false;
+ packet_header_.header.sequenceNumber = 1;
+ FillPacket(1);
+ VCMPacket* packet = new VCMPacket(packet_buffer_, packet_buffer_size(),
+ packet_header_);
+ EXPECT_EQ(packet_buffer_size(),
+ static_cast<size_t>(session_.InsertPacket(*packet, frame_buffer_,
+ kNoErrors, frame_data)));
+ delete packet;
+
+ packet_header_.type.Video.isFirstPacket = false;
+ vp8_header_->partitionId = 0;
+ vp8_header_->beginningOfPartition = false;
+ packet_header_.header.markerBit = false;
+ packet_header_.header.sequenceNumber += 1;
+ FillPacket(2);
+ packet = new VCMPacket(packet_buffer_, packet_buffer_size(),
+ packet_header_);
+ EXPECT_EQ(packet_buffer_size(),
+ static_cast<size_t>(session_.InsertPacket(*packet, frame_buffer_,
+ kNoErrors, frame_data)));
+ delete packet;
+
+ packet_header_.type.Video.isFirstPacket = false;
+ vp8_header_->partitionId = 1;
+ vp8_header_->beginningOfPartition = false;
+ packet_header_.header.markerBit = false;
+ packet_header_.header.sequenceNumber += 2;
+ FillPacket(4);
+ packet = new VCMPacket(packet_buffer_, packet_buffer_size(), packet_header_);
+ EXPECT_EQ(packet_buffer_size(),
+ static_cast<size_t>(session_.InsertPacket(*packet, frame_buffer_,
+ kNoErrors, frame_data)));
+ delete packet;
+
+ packet_header_.type.Video.isFirstPacket = false;
+ vp8_header_->partitionId = 1;
+ vp8_header_->beginningOfPartition = false;
+ packet_header_.header.markerBit = false;
+ packet_header_.header.sequenceNumber += 1;
+ FillPacket(5);
+ packet = new VCMPacket(packet_buffer_, packet_buffer_size(), packet_header_);
+ EXPECT_EQ(packet_buffer_size(),
+ static_cast<size_t>(session_.InsertPacket(*packet, frame_buffer_,
+ kNoErrors, frame_data)));
+ delete packet;
+
+ packet_header_.type.Video.isFirstPacket = false;
+ vp8_header_->partitionId = 2;
+ vp8_header_->beginningOfPartition = true;
+ packet_header_.header.markerBit = false;
+ packet_header_.header.sequenceNumber += 1;
+ FillPacket(6);
+ packet = new VCMPacket(packet_buffer_, packet_buffer_size(), packet_header_);
+ EXPECT_EQ(packet_buffer_size(),
+ static_cast<size_t>(session_.InsertPacket(*packet, frame_buffer_,
+ kNoErrors, frame_data)));
+ delete packet;
+
+ packet_header_.type.Video.isFirstPacket = false;
+ vp8_header_->partitionId = 2;
+ vp8_header_->beginningOfPartition = false;
+ packet_header_.header.markerBit = true;
+ packet_header_.header.sequenceNumber += 1;
+ FillPacket(7);
+ packet = new VCMPacket(packet_buffer_, packet_buffer_size(), packet_header_);
+ EXPECT_EQ(packet_buffer_size(),
+ static_cast<size_t>(session_.InsertPacket(*packet, frame_buffer_,
+ kNoErrors, frame_data)));
+ delete packet;
+
+ // 2 partitions left. 2 packets removed from second partition
+ EXPECT_EQ(4 * packet_buffer_size(),
+ session_.BuildVP8FragmentationHeader(
+ frame_buffer_, frame_buffer_size(), &fragmentation_));
+ SCOPED_TRACE("Calling VerifyPartition");
+ EXPECT_TRUE(VerifyPartition(0, 2, 1));
+ SCOPED_TRACE("Calling VerifyPartition");
+ EXPECT_TRUE(VerifyPartition(2, 2, 6));
+}
+
+TEST_F(TestVP8Partitions, AggregationOverTwoPackets) {
+ // Partition 0 | Partition 1 | Partition 2
+ // [ 0 | ] [ 1 ] | [ 2 ]
+ packet_header_.type.Video.isFirstPacket = true;
+ vp8_header_->beginningOfPartition = true;
+ vp8_header_->partitionId = 0;
+ packet_header_.header.markerBit = false;
+ packet_header_.header.sequenceNumber = 0;
+ FillPacket(0);
+ VCMPacket* packet = new VCMPacket(packet_buffer_, packet_buffer_size(),
+ packet_header_);
+ EXPECT_EQ(packet_buffer_size(),
+ static_cast<size_t>(session_.InsertPacket(*packet, frame_buffer_,
+ kNoErrors, frame_data)));
+ delete packet;
+
+ packet_header_.type.Video.isFirstPacket = false;
+ vp8_header_->partitionId = 1;
+ vp8_header_->beginningOfPartition = false;
+ packet_header_.header.markerBit = false;
+ packet_header_.header.sequenceNumber += 1;
+ FillPacket(1);
+ packet = new VCMPacket(packet_buffer_, packet_buffer_size(), packet_header_);
+ EXPECT_EQ(packet_buffer_size(),
+ static_cast<size_t>(session_.InsertPacket(*packet, frame_buffer_,
+ kNoErrors, frame_data)));
+ delete packet;
+
+ packet_header_.type.Video.isFirstPacket = false;
+ vp8_header_->partitionId = 2;
+ vp8_header_->beginningOfPartition = true;
+ packet_header_.header.markerBit = true;
+ packet_header_.header.sequenceNumber += 1;
+ FillPacket(2);
+ packet = new VCMPacket(packet_buffer_, packet_buffer_size(), packet_header_);
+ EXPECT_EQ(packet_buffer_size(),
+ static_cast<size_t>(session_.InsertPacket(*packet, frame_buffer_,
+ kNoErrors, frame_data)));
+ delete packet;
+
+ // No packets removed.
+ EXPECT_EQ(3 * packet_buffer_size(),
+ session_.BuildVP8FragmentationHeader(
+ frame_buffer_, frame_buffer_size(), &fragmentation_));
+ SCOPED_TRACE("Calling VerifyPartition");
+ EXPECT_TRUE(VerifyPartition(0, 2, 0));
+ // This partition is aggregated in partition 0
+ SCOPED_TRACE("Calling VerifyPartition");
+ EXPECT_TRUE(VerifyPartition(1, 0, 0));
+ SCOPED_TRACE("Calling VerifyPartition");
+ EXPECT_TRUE(VerifyPartition(2, 1, 2));
+}
+
+TEST_F(TestNalUnits, OnlyReceivedEmptyPacket) {
+ packet_.isFirstPacket = false;
+ packet_.completeNALU = kNaluComplete;
+ packet_.frameType = kEmptyFrame;
+ packet_.sizeBytes = 0;
+ packet_.seqNum = 0;
+ packet_.markerBit = false;
+ EXPECT_EQ(0, session_.InsertPacket(packet_,
+ frame_buffer_,
+ kNoErrors,
+ frame_data));
+
+ EXPECT_EQ(0U, session_.MakeDecodable());
+ EXPECT_EQ(0U, session_.SessionLength());
+}
+
+TEST_F(TestNalUnits, OneIsolatedNaluLoss) {
+ packet_.isFirstPacket = true;
+ packet_.completeNALU = kNaluComplete;
+ packet_.seqNum = 0;
+ packet_.markerBit = false;
+ FillPacket(0);
+ EXPECT_EQ(packet_buffer_size(),
+ static_cast<size_t>(session_.InsertPacket(packet_, frame_buffer_,
+ kNoErrors, frame_data)));
+
+ packet_.isFirstPacket = false;
+ packet_.completeNALU = kNaluComplete;
+ packet_.seqNum += 2;
+ packet_.markerBit = true;
+ FillPacket(2);
+ EXPECT_EQ(packet_buffer_size(),
+ static_cast<size_t>(session_.InsertPacket(packet_, frame_buffer_,
+ kNoErrors, frame_data)));
+
+ EXPECT_EQ(0U, session_.MakeDecodable());
+ EXPECT_EQ(2 * packet_buffer_size(), session_.SessionLength());
+ SCOPED_TRACE("Calling VerifyNalu");
+ EXPECT_TRUE(VerifyNalu(0, 1, 0));
+ SCOPED_TRACE("Calling VerifyNalu");
+ EXPECT_TRUE(VerifyNalu(1, 1, 2));
+}
+
+TEST_F(TestNalUnits, LossInMiddleOfNalu) {
+ packet_.isFirstPacket = true;
+ packet_.completeNALU = kNaluComplete;
+ packet_.seqNum = 0;
+ packet_.markerBit = false;
+ FillPacket(0);
+ EXPECT_EQ(packet_buffer_size(),
+ static_cast<size_t>(session_.InsertPacket(packet_, frame_buffer_,
+ kNoErrors, frame_data)));
+
+ packet_.isFirstPacket = false;
+ packet_.completeNALU = kNaluEnd;
+ packet_.seqNum += 2;
+ packet_.markerBit = true;
+ FillPacket(2);
+ EXPECT_EQ(packet_buffer_size(),
+ static_cast<size_t>(session_.InsertPacket(packet_, frame_buffer_,
+ kNoErrors, frame_data)));
+
+ EXPECT_EQ(packet_buffer_size(), session_.MakeDecodable());
+ EXPECT_EQ(packet_buffer_size(), session_.SessionLength());
+ SCOPED_TRACE("Calling VerifyNalu");
+ EXPECT_TRUE(VerifyNalu(0, 1, 0));
+}
+
+TEST_F(TestNalUnits, StartAndEndOfLastNalUnitLost) {
+ packet_.isFirstPacket = true;
+ packet_.completeNALU = kNaluComplete;
+ packet_.seqNum = 0;
+ packet_.markerBit = false;
+ FillPacket(0);
+ EXPECT_EQ(packet_buffer_size(),
+ static_cast<size_t>(session_.InsertPacket(packet_, frame_buffer_,
+ kNoErrors, frame_data)));
+
+ packet_.isFirstPacket = false;
+ packet_.completeNALU = kNaluIncomplete;
+ packet_.seqNum += 2;
+ packet_.markerBit = false;
+ FillPacket(1);
+ EXPECT_EQ(packet_buffer_size(),
+ static_cast<size_t>(session_.InsertPacket(packet_, frame_buffer_,
+ kNoErrors, frame_data)));
+
+ EXPECT_EQ(packet_buffer_size(), session_.MakeDecodable());
+ EXPECT_EQ(packet_buffer_size(), session_.SessionLength());
+ SCOPED_TRACE("Calling VerifyNalu");
+ EXPECT_TRUE(VerifyNalu(0, 1, 0));
+}
+
+TEST_F(TestNalUnits, ReorderWrapNoLoss) {
+ packet_.seqNum = 0xFFFF;
+ packet_.isFirstPacket = false;
+ packet_.completeNALU = kNaluIncomplete;
+ packet_.seqNum += 1;
+ packet_.markerBit = false;
+ FillPacket(1);
+ EXPECT_EQ(packet_buffer_size(),
+ static_cast<size_t>(session_.InsertPacket(packet_, frame_buffer_,
+ kNoErrors, frame_data)));
+
+ packet_.isFirstPacket = true;
+ packet_.completeNALU = kNaluComplete;
+ packet_.seqNum -= 1;
+ packet_.markerBit = false;
+ FillPacket(0);
+ EXPECT_EQ(packet_buffer_size(),
+ static_cast<size_t>(session_.InsertPacket(packet_, frame_buffer_,
+ kNoErrors, frame_data)));
+
+ packet_.isFirstPacket = false;
+ packet_.completeNALU = kNaluEnd;
+ packet_.seqNum += 2;
+ packet_.markerBit = true;
+ FillPacket(2);
+ EXPECT_EQ(packet_buffer_size(),
+ static_cast<size_t>(session_.InsertPacket(packet_, frame_buffer_,
+ kNoErrors, frame_data)));
+
+ EXPECT_EQ(0U, session_.MakeDecodable());
+ EXPECT_EQ(3 * packet_buffer_size(), session_.SessionLength());
+ SCOPED_TRACE("Calling VerifyNalu");
+ EXPECT_TRUE(VerifyNalu(0, 1, 0));
+}
+
+TEST_F(TestNalUnits, WrapLosses) {
+ packet_.seqNum = 0xFFFF;
+ packet_.isFirstPacket = false;
+ packet_.completeNALU = kNaluIncomplete;
+ packet_.markerBit = false;
+ FillPacket(1);
+ EXPECT_EQ(packet_buffer_size(),
+ static_cast<size_t>(session_.InsertPacket(packet_, frame_buffer_,
+ kNoErrors, frame_data)));
+
+ packet_.isFirstPacket = false;
+ packet_.completeNALU = kNaluEnd;
+ packet_.seqNum += 2;
+ packet_.markerBit = true;
+ FillPacket(2);
+ EXPECT_EQ(packet_buffer_size(),
+ static_cast<size_t>(session_.InsertPacket(packet_, frame_buffer_,
+ kNoErrors, frame_data)));
+
+ EXPECT_EQ(2 * packet_buffer_size(), session_.MakeDecodable());
+ EXPECT_EQ(0U, session_.SessionLength());
+}
+
+TEST_F(TestNalUnits, ReorderWrapLosses) {
+ packet_.seqNum = 0xFFFF;
+
+ packet_.isFirstPacket = false;
+ packet_.completeNALU = kNaluEnd;
+ packet_.seqNum += 2;
+ packet_.markerBit = true;
+ FillPacket(2);
+ EXPECT_EQ(packet_buffer_size(),
+ static_cast<size_t>(session_.InsertPacket(packet_, frame_buffer_,
+ kNoErrors, frame_data)));
+
+ packet_.seqNum -= 2;
+ packet_.isFirstPacket = false;
+ packet_.completeNALU = kNaluIncomplete;
+ packet_.markerBit = false;
+ FillPacket(1);
+ EXPECT_EQ(packet_buffer_size(),
+ static_cast<size_t>(session_.InsertPacket(packet_, frame_buffer_,
+ kNoErrors, frame_data)));
+
+ EXPECT_EQ(2 * packet_buffer_size(), session_.MakeDecodable());
+ EXPECT_EQ(0U, session_.SessionLength());
+}
+
+} // namespace webrtc
diff --git a/webrtc/modules/video_coding/main/source/test/stream_generator.cc b/webrtc/modules/video_coding/main/source/test/stream_generator.cc
new file mode 100644
index 0000000000..b365d96dc0
--- /dev/null
+++ b/webrtc/modules/video_coding/main/source/test/stream_generator.cc
@@ -0,0 +1,127 @@
+/*
+ * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "webrtc/modules/video_coding/main/source/test/stream_generator.h"
+
+#include <string.h>
+
+#include <list>
+
+#include "testing/gtest/include/gtest/gtest.h"
+#include "webrtc/modules/video_coding/main/source/packet.h"
+#include "webrtc/modules/video_coding/main/test/test_util.h"
+#include "webrtc/system_wrappers/include/clock.h"
+
+namespace webrtc {
+
+StreamGenerator::StreamGenerator(uint16_t start_seq_num, int64_t current_time)
+ : packets_(), sequence_number_(start_seq_num), start_time_(current_time) {
+}
+
+void StreamGenerator::Init(uint16_t start_seq_num, int64_t current_time) {
+ packets_.clear();
+ sequence_number_ = start_seq_num;
+ start_time_ = current_time;
+ memset(packet_buffer_, 0, sizeof(packet_buffer_));
+}
+
+void StreamGenerator::GenerateFrame(FrameType type,
+ int num_media_packets,
+ int num_empty_packets,
+ int64_t time_ms) {
+ uint32_t timestamp = 90 * (time_ms - start_time_);
+ for (int i = 0; i < num_media_packets; ++i) {
+ const int packet_size =
+ (kFrameSize + num_media_packets / 2) / num_media_packets;
+ bool marker_bit = (i == num_media_packets - 1);
+ packets_.push_back(GeneratePacket(
+ sequence_number_, timestamp, packet_size, (i == 0), marker_bit, type));
+ ++sequence_number_;
+ }
+ for (int i = 0; i < num_empty_packets; ++i) {
+ packets_.push_back(GeneratePacket(sequence_number_, timestamp, 0, false,
+ false, kEmptyFrame));
+ ++sequence_number_;
+ }
+}
+
+VCMPacket StreamGenerator::GeneratePacket(uint16_t sequence_number,
+ uint32_t timestamp,
+ unsigned int size,
+ bool first_packet,
+ bool marker_bit,
+ FrameType type) {
+ EXPECT_LT(size, kMaxPacketSize);
+ VCMPacket packet;
+ packet.seqNum = sequence_number;
+ packet.timestamp = timestamp;
+ packet.frameType = type;
+ packet.isFirstPacket = first_packet;
+ packet.markerBit = marker_bit;
+ packet.sizeBytes = size;
+ packet.dataPtr = packet_buffer_;
+ if (packet.isFirstPacket)
+ packet.completeNALU = kNaluStart;
+ else if (packet.markerBit)
+ packet.completeNALU = kNaluEnd;
+ else
+ packet.completeNALU = kNaluIncomplete;
+ return packet;
+}
+
+bool StreamGenerator::PopPacket(VCMPacket* packet, int index) {
+ std::list<VCMPacket>::iterator it = GetPacketIterator(index);
+ if (it == packets_.end())
+ return false;
+ if (packet)
+ *packet = (*it);
+ packets_.erase(it);
+ return true;
+}
+
+bool StreamGenerator::GetPacket(VCMPacket* packet, int index) {
+ std::list<VCMPacket>::iterator it = GetPacketIterator(index);
+ if (it == packets_.end())
+ return false;
+ if (packet)
+ *packet = (*it);
+ return true;
+}
+
+bool StreamGenerator::NextPacket(VCMPacket* packet) {
+ if (packets_.empty())
+ return false;
+ if (packet != NULL)
+ *packet = packets_.front();
+ packets_.pop_front();
+ return true;
+}
+
+void StreamGenerator::DropLastPacket() { packets_.pop_back(); }
+
+uint16_t StreamGenerator::NextSequenceNumber() const {
+ if (packets_.empty())
+ return sequence_number_;
+ return packets_.front().seqNum;
+}
+
+int StreamGenerator::PacketsRemaining() const { return packets_.size(); }
+
+std::list<VCMPacket>::iterator StreamGenerator::GetPacketIterator(int index) {
+ std::list<VCMPacket>::iterator it = packets_.begin();
+ for (int i = 0; i < index; ++i) {
+ ++it;
+ if (it == packets_.end())
+ break;
+ }
+ return it;
+}
+
+} // namespace webrtc
diff --git a/webrtc/modules/video_coding/main/source/test/stream_generator.h b/webrtc/modules/video_coding/main/source/test/stream_generator.h
new file mode 100644
index 0000000000..7902d16706
--- /dev/null
+++ b/webrtc/modules/video_coding/main/source/test/stream_generator.h
@@ -0,0 +1,72 @@
+/*
+ * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+#ifndef WEBRTC_MODULES_VIDEO_CODING_MAIN_SOURCE_TEST_STREAM_GENERATOR_H_
+#define WEBRTC_MODULES_VIDEO_CODING_MAIN_SOURCE_TEST_STREAM_GENERATOR_H_
+
+#include <list>
+
+#include "webrtc/modules/video_coding/main/source/packet.h"
+#include "webrtc/modules/video_coding/main/test/test_util.h"
+#include "webrtc/typedefs.h"
+
+namespace webrtc {
+
+const unsigned int kDefaultBitrateKbps = 1000;
+const unsigned int kDefaultFrameRate = 25;
+const unsigned int kMaxPacketSize = 1500;
+const unsigned int kFrameSize =
+ (kDefaultBitrateKbps + kDefaultFrameRate * 4) / (kDefaultFrameRate * 8);
+const int kDefaultFramePeriodMs = 1000 / kDefaultFrameRate;
+
+class StreamGenerator {
+ public:
+ StreamGenerator(uint16_t start_seq_num, int64_t current_time);
+ void Init(uint16_t start_seq_num, int64_t current_time);
+
+ // |time_ms| denotes the timestamp you want to put on the frame, and the unit
+ // is millisecond. GenerateFrame will translate |time_ms| into a 90kHz
+ // timestamp and put it on the frame.
+ void GenerateFrame(FrameType type,
+ int num_media_packets,
+ int num_empty_packets,
+ int64_t time_ms);
+
+ bool PopPacket(VCMPacket* packet, int index);
+ void DropLastPacket();
+
+ bool GetPacket(VCMPacket* packet, int index);
+
+ bool NextPacket(VCMPacket* packet);
+
+ uint16_t NextSequenceNumber() const;
+
+ int PacketsRemaining() const;
+
+ private:
+ VCMPacket GeneratePacket(uint16_t sequence_number,
+ uint32_t timestamp,
+ unsigned int size,
+ bool first_packet,
+ bool marker_bit,
+ FrameType type);
+
+ std::list<VCMPacket>::iterator GetPacketIterator(int index);
+
+ std::list<VCMPacket> packets_;
+ uint16_t sequence_number_;
+ int64_t start_time_;
+ uint8_t packet_buffer_[kMaxPacketSize];
+
+ RTC_DISALLOW_COPY_AND_ASSIGN(StreamGenerator);
+};
+
+} // namespace webrtc
+
+#endif // WEBRTC_MODULES_VIDEO_CODING_MAIN_SOURCE_TEST_STREAM_GENERATOR_H_
diff --git a/webrtc/modules/video_coding/main/source/timestamp_map.cc b/webrtc/modules/video_coding/main/source/timestamp_map.cc
new file mode 100644
index 0000000000..c68a5af7ba
--- /dev/null
+++ b/webrtc/modules/video_coding/main/source/timestamp_map.cc
@@ -0,0 +1,65 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <assert.h>
+#include <stdlib.h>
+
+#include "webrtc/modules/interface/module_common_types.h"
+#include "webrtc/modules/video_coding/main/source/timestamp_map.h"
+
+namespace webrtc {
+
+VCMTimestampMap::VCMTimestampMap(size_t capacity)
+ : ring_buffer_(new TimestampDataTuple[capacity]),
+ capacity_(capacity),
+ next_add_idx_(0),
+ next_pop_idx_(0) {
+}
+
+VCMTimestampMap::~VCMTimestampMap() {
+}
+
+void VCMTimestampMap::Add(uint32_t timestamp, VCMFrameInformation* data) {
+ ring_buffer_[next_add_idx_].timestamp = timestamp;
+ ring_buffer_[next_add_idx_].data = data;
+ next_add_idx_ = (next_add_idx_ + 1) % capacity_;
+
+ if (next_add_idx_ == next_pop_idx_) {
+ // Circular list full; forget oldest entry.
+ next_pop_idx_ = (next_pop_idx_ + 1) % capacity_;
+ }
+}
+
+VCMFrameInformation* VCMTimestampMap::Pop(uint32_t timestamp) {
+ while (!IsEmpty()) {
+ if (ring_buffer_[next_pop_idx_].timestamp == timestamp) {
+ // Found start time for this timestamp.
+ VCMFrameInformation* data = ring_buffer_[next_pop_idx_].data;
+ ring_buffer_[next_pop_idx_].data = nullptr;
+ next_pop_idx_ = (next_pop_idx_ + 1) % capacity_;
+ return data;
+ } else if (IsNewerTimestamp(ring_buffer_[next_pop_idx_].timestamp,
+ timestamp)) {
+ // The timestamp we are looking for is not in the list.
+ return nullptr;
+ }
+
+ // Not in this position, check next (and forget this position).
+ next_pop_idx_ = (next_pop_idx_ + 1) % capacity_;
+ }
+
+ // Could not find matching timestamp in list.
+ return nullptr;
+}
+
+bool VCMTimestampMap::IsEmpty() const {
+ return (next_add_idx_ == next_pop_idx_);
+}
+}
diff --git a/webrtc/modules/video_coding/main/source/timestamp_map.h b/webrtc/modules/video_coding/main/source/timestamp_map.h
new file mode 100644
index 0000000000..3d6f1bca0f
--- /dev/null
+++ b/webrtc/modules/video_coding/main/source/timestamp_map.h
@@ -0,0 +1,47 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef WEBRTC_MODULES_VIDEO_CODING_TIMESTAMP_MAP_H_
+#define WEBRTC_MODULES_VIDEO_CODING_TIMESTAMP_MAP_H_
+
+#include "webrtc/base/scoped_ptr.h"
+#include "webrtc/typedefs.h"
+
+namespace webrtc {
+
+struct VCMFrameInformation;
+
+class VCMTimestampMap {
+ public:
+ explicit VCMTimestampMap(size_t capacity);
+ ~VCMTimestampMap();
+
+ // Empty the map.
+ void Reset();
+
+ void Add(uint32_t timestamp, VCMFrameInformation* data);
+ VCMFrameInformation* Pop(uint32_t timestamp);
+
+ private:
+ struct TimestampDataTuple {
+ uint32_t timestamp;
+ VCMFrameInformation* data;
+ };
+ bool IsEmpty() const;
+
+ rtc::scoped_ptr<TimestampDataTuple[]> ring_buffer_;
+ const size_t capacity_;
+ size_t next_add_idx_;
+ size_t next_pop_idx_;
+};
+
+} // namespace webrtc
+
+#endif // WEBRTC_MODULES_VIDEO_CODING_TIMESTAMP_MAP_H_
diff --git a/webrtc/modules/video_coding/main/source/timing.cc b/webrtc/modules/video_coding/main/source/timing.cc
new file mode 100644
index 0000000000..8d59135876
--- /dev/null
+++ b/webrtc/modules/video_coding/main/source/timing.cc
@@ -0,0 +1,279 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "webrtc/modules/video_coding/main/source/timing.h"
+
+#include "webrtc/modules/video_coding/main/source/internal_defines.h"
+#include "webrtc/modules/video_coding/main/source/jitter_buffer_common.h"
+#include "webrtc/system_wrappers/include/clock.h"
+#include "webrtc/system_wrappers/include/metrics.h"
+#include "webrtc/system_wrappers/include/timestamp_extrapolator.h"
+
+
+namespace webrtc {
+
+VCMTiming::VCMTiming(Clock* clock,
+ VCMTiming* master_timing)
+ : crit_sect_(CriticalSectionWrapper::CreateCriticalSection()),
+ clock_(clock),
+ master_(false),
+ ts_extrapolator_(),
+ codec_timer_(),
+ render_delay_ms_(kDefaultRenderDelayMs),
+ min_playout_delay_ms_(0),
+ jitter_delay_ms_(0),
+ current_delay_ms_(0),
+ last_decode_ms_(0),
+ prev_frame_timestamp_(0),
+ num_decoded_frames_(0),
+ num_delayed_decoded_frames_(0),
+ first_decoded_frame_ms_(-1),
+ sum_missed_render_deadline_ms_(0) {
+ if (master_timing == NULL) {
+ master_ = true;
+ ts_extrapolator_ = new TimestampExtrapolator(clock_->TimeInMilliseconds());
+ } else {
+ ts_extrapolator_ = master_timing->ts_extrapolator_;
+ }
+}
+
+VCMTiming::~VCMTiming() {
+ UpdateHistograms();
+ if (master_) {
+ delete ts_extrapolator_;
+ }
+ delete crit_sect_;
+}
+
+void VCMTiming::UpdateHistograms() const {
+ CriticalSectionScoped cs(crit_sect_);
+ if (num_decoded_frames_ == 0) {
+ return;
+ }
+ int64_t elapsed_sec =
+ (clock_->TimeInMilliseconds() - first_decoded_frame_ms_) / 1000;
+ if (elapsed_sec < metrics::kMinRunTimeInSeconds) {
+ return;
+ }
+ RTC_HISTOGRAM_COUNTS_100("WebRTC.Video.DecodedFramesPerSecond",
+ static_cast<int>((num_decoded_frames_ / elapsed_sec) + 0.5f));
+ RTC_HISTOGRAM_PERCENTAGE("WebRTC.Video.DelayedFramesToRenderer",
+ num_delayed_decoded_frames_ * 100 / num_decoded_frames_);
+ if (num_delayed_decoded_frames_ > 0) {
+ RTC_HISTOGRAM_COUNTS_1000(
+ "WebRTC.Video.DelayedFramesToRenderer_AvgDelayInMs",
+ sum_missed_render_deadline_ms_ / num_delayed_decoded_frames_);
+ }
+}
+
+void VCMTiming::Reset() {
+ CriticalSectionScoped cs(crit_sect_);
+ ts_extrapolator_->Reset(clock_->TimeInMilliseconds());
+ codec_timer_.Reset();
+ render_delay_ms_ = kDefaultRenderDelayMs;
+ min_playout_delay_ms_ = 0;
+ jitter_delay_ms_ = 0;
+ current_delay_ms_ = 0;
+ prev_frame_timestamp_ = 0;
+}
+
+void VCMTiming::ResetDecodeTime() {
+ CriticalSectionScoped lock(crit_sect_);
+ codec_timer_.Reset();
+}
+
+void VCMTiming::set_render_delay(uint32_t render_delay_ms) {
+ CriticalSectionScoped cs(crit_sect_);
+ render_delay_ms_ = render_delay_ms;
+}
+
+void VCMTiming::set_min_playout_delay(uint32_t min_playout_delay_ms) {
+ CriticalSectionScoped cs(crit_sect_);
+ min_playout_delay_ms_ = min_playout_delay_ms;
+}
+
+void VCMTiming::SetJitterDelay(uint32_t jitter_delay_ms) {
+ CriticalSectionScoped cs(crit_sect_);
+ if (jitter_delay_ms != jitter_delay_ms_) {
+ jitter_delay_ms_ = jitter_delay_ms;
+ // When in initial state, set current delay to minimum delay.
+ if (current_delay_ms_ == 0) {
+ current_delay_ms_ = jitter_delay_ms_;
+ }
+ }
+}
+
+void VCMTiming::UpdateCurrentDelay(uint32_t frame_timestamp) {
+ CriticalSectionScoped cs(crit_sect_);
+ uint32_t target_delay_ms = TargetDelayInternal();
+
+ if (current_delay_ms_ == 0) {
+ // Not initialized, set current delay to target.
+ current_delay_ms_ = target_delay_ms;
+ } else if (target_delay_ms != current_delay_ms_) {
+ int64_t delay_diff_ms = static_cast<int64_t>(target_delay_ms) -
+ current_delay_ms_;
+ // Never change the delay with more than 100 ms every second. If we're
+ // changing the delay in too large steps we will get noticeable freezes. By
+ // limiting the change we can increase the delay in smaller steps, which
+ // will be experienced as the video is played in slow motion. When lowering
+ // the delay the video will be played at a faster pace.
+ int64_t max_change_ms = 0;
+ if (frame_timestamp < 0x0000ffff && prev_frame_timestamp_ > 0xffff0000) {
+ // wrap
+ max_change_ms = kDelayMaxChangeMsPerS * (frame_timestamp +
+ (static_cast<int64_t>(1) << 32) - prev_frame_timestamp_) / 90000;
+ } else {
+ max_change_ms = kDelayMaxChangeMsPerS *
+ (frame_timestamp - prev_frame_timestamp_) / 90000;
+ }
+ if (max_change_ms <= 0) {
+ // Any changes less than 1 ms are truncated and
+ // will be postponed. Negative change will be due
+ // to reordering and should be ignored.
+ return;
+ }
+ delay_diff_ms = std::max(delay_diff_ms, -max_change_ms);
+ delay_diff_ms = std::min(delay_diff_ms, max_change_ms);
+
+ current_delay_ms_ = current_delay_ms_ + static_cast<int32_t>(delay_diff_ms);
+ }
+ prev_frame_timestamp_ = frame_timestamp;
+}
+
+void VCMTiming::UpdateCurrentDelay(int64_t render_time_ms,
+ int64_t actual_decode_time_ms) {
+ CriticalSectionScoped cs(crit_sect_);
+ uint32_t target_delay_ms = TargetDelayInternal();
+ int64_t delayed_ms = actual_decode_time_ms -
+ (render_time_ms - MaxDecodeTimeMs() - render_delay_ms_);
+ if (delayed_ms < 0) {
+ return;
+ }
+ if (current_delay_ms_ + delayed_ms <= target_delay_ms) {
+ current_delay_ms_ += static_cast<uint32_t>(delayed_ms);
+ } else {
+ current_delay_ms_ = target_delay_ms;
+ }
+}
+
+int32_t VCMTiming::StopDecodeTimer(uint32_t time_stamp,
+ int64_t start_time_ms,
+ int64_t now_ms,
+ int64_t render_time_ms) {
+ CriticalSectionScoped cs(crit_sect_);
+ int32_t time_diff_ms = codec_timer_.StopTimer(start_time_ms, now_ms);
+ assert(time_diff_ms >= 0);
+ last_decode_ms_ = time_diff_ms;
+
+ // Update stats.
+ ++num_decoded_frames_;
+ if (num_decoded_frames_ == 1) {
+ first_decoded_frame_ms_ = now_ms;
+ }
+ int time_until_rendering_ms = render_time_ms - render_delay_ms_ - now_ms;
+ if (time_until_rendering_ms < 0) {
+ sum_missed_render_deadline_ms_ += -time_until_rendering_ms;
+ ++num_delayed_decoded_frames_;
+ }
+ return 0;
+}
+
+void VCMTiming::IncomingTimestamp(uint32_t time_stamp, int64_t now_ms) {
+ CriticalSectionScoped cs(crit_sect_);
+ ts_extrapolator_->Update(now_ms, time_stamp);
+}
+
+int64_t VCMTiming::RenderTimeMs(uint32_t frame_timestamp, int64_t now_ms)
+ const {
+ CriticalSectionScoped cs(crit_sect_);
+ const int64_t render_time_ms = RenderTimeMsInternal(frame_timestamp, now_ms);
+ return render_time_ms;
+}
+
+int64_t VCMTiming::RenderTimeMsInternal(uint32_t frame_timestamp,
+ int64_t now_ms) const {
+ int64_t estimated_complete_time_ms =
+ ts_extrapolator_->ExtrapolateLocalTime(frame_timestamp);
+ if (estimated_complete_time_ms == -1) {
+ estimated_complete_time_ms = now_ms;
+ }
+
+ // Make sure that we have at least the playout delay.
+ uint32_t actual_delay = std::max(current_delay_ms_, min_playout_delay_ms_);
+ return estimated_complete_time_ms + actual_delay;
+}
+
+// Must be called from inside a critical section.
+int32_t VCMTiming::MaxDecodeTimeMs(FrameType frame_type /*= kVideoFrameDelta*/)
+ const {
+ const int32_t decode_time_ms = codec_timer_.RequiredDecodeTimeMs(frame_type);
+ assert(decode_time_ms >= 0);
+ return decode_time_ms;
+}
+
+uint32_t VCMTiming::MaxWaitingTime(int64_t render_time_ms, int64_t now_ms)
+ const {
+ CriticalSectionScoped cs(crit_sect_);
+
+ const int64_t max_wait_time_ms = render_time_ms - now_ms -
+ MaxDecodeTimeMs() - render_delay_ms_;
+
+ if (max_wait_time_ms < 0) {
+ return 0;
+ }
+ return static_cast<uint32_t>(max_wait_time_ms);
+}
+
+bool VCMTiming::EnoughTimeToDecode(uint32_t available_processing_time_ms)
+ const {
+ CriticalSectionScoped cs(crit_sect_);
+ int32_t max_decode_time_ms = MaxDecodeTimeMs();
+ if (max_decode_time_ms < 0) {
+ // Haven't decoded any frames yet, try decoding one to get an estimate
+ // of the decode time.
+ return true;
+ } else if (max_decode_time_ms == 0) {
+ // Decode time is less than 1, set to 1 for now since
+ // we don't have any better precision. Count ticks later?
+ max_decode_time_ms = 1;
+ }
+ return static_cast<int32_t>(available_processing_time_ms) -
+ max_decode_time_ms > 0;
+}
+
+uint32_t VCMTiming::TargetVideoDelay() const {
+ CriticalSectionScoped cs(crit_sect_);
+ return TargetDelayInternal();
+}
+
+uint32_t VCMTiming::TargetDelayInternal() const {
+ return std::max(min_playout_delay_ms_,
+ jitter_delay_ms_ + MaxDecodeTimeMs() + render_delay_ms_);
+}
+
+void VCMTiming::GetTimings(int* decode_ms,
+ int* max_decode_ms,
+ int* current_delay_ms,
+ int* target_delay_ms,
+ int* jitter_buffer_ms,
+ int* min_playout_delay_ms,
+ int* render_delay_ms) const {
+ CriticalSectionScoped cs(crit_sect_);
+ *decode_ms = last_decode_ms_;
+ *max_decode_ms = MaxDecodeTimeMs();
+ *current_delay_ms = current_delay_ms_;
+ *target_delay_ms = TargetDelayInternal();
+ *jitter_buffer_ms = jitter_delay_ms_;
+ *min_playout_delay_ms = min_playout_delay_ms_;
+ *render_delay_ms = render_delay_ms_;
+}
+
+} // namespace webrtc
diff --git a/webrtc/modules/video_coding/main/source/timing.h b/webrtc/modules/video_coding/main/source/timing.h
new file mode 100644
index 0000000000..d3b8fa673f
--- /dev/null
+++ b/webrtc/modules/video_coding/main/source/timing.h
@@ -0,0 +1,127 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef WEBRTC_MODULES_VIDEO_CODING_MAIN_SOURCE_TIMING_H_
+#define WEBRTC_MODULES_VIDEO_CODING_MAIN_SOURCE_TIMING_H_
+
+#include "webrtc/base/thread_annotations.h"
+#include "webrtc/modules/video_coding/main/source/codec_timer.h"
+#include "webrtc/system_wrappers/include/critical_section_wrapper.h"
+#include "webrtc/typedefs.h"
+
+namespace webrtc {
+
+class Clock;
+class TimestampExtrapolator;
+
+class VCMTiming {
+ public:
+ // The primary timing component should be passed
+ // if this is the dual timing component.
+ VCMTiming(Clock* clock,
+ VCMTiming* master_timing = NULL);
+ ~VCMTiming();
+
+ // Resets the timing to the initial state.
+ void Reset();
+ void ResetDecodeTime();
+
+ // Set the amount of time needed to render an image. Defaults to 10 ms.
+ void set_render_delay(uint32_t render_delay_ms);
+
+ // Set the minimum time the video must be delayed on the receiver to
+ // get the desired jitter buffer level.
+ void SetJitterDelay(uint32_t required_delay_ms);
+
+ // Set the minimum playout delay required to sync video with audio.
+ void set_min_playout_delay(uint32_t min_playout_delay);
+
+ // Increases or decreases the current delay to get closer to the target delay.
+ // Calculates how long it has been since the previous call to this function,
+ // and increases/decreases the delay in proportion to the time difference.
+ void UpdateCurrentDelay(uint32_t frame_timestamp);
+
+ // Increases or decreases the current delay to get closer to the target delay.
+ // Given the actual decode time in ms and the render time in ms for a frame,
+ // this function calculates how late the frame is and increases the delay
+ // accordingly.
+ void UpdateCurrentDelay(int64_t render_time_ms,
+ int64_t actual_decode_time_ms);
+
+ // Stops the decoder timer, should be called when the decoder returns a frame
+ // or when the decoded frame callback is called.
+ int32_t StopDecodeTimer(uint32_t time_stamp,
+ int64_t start_time_ms,
+ int64_t now_ms,
+ int64_t render_time_ms);
+
+ // Used to report that a frame is passed to decoding. Updates the timestamp
+ // filter which is used to map between timestamps and receiver system time.
+ void IncomingTimestamp(uint32_t time_stamp, int64_t last_packet_time_ms);
+ // Returns the receiver system time when the frame with timestamp
+ // frame_timestamp should be rendered, assuming that the system time currently
+ // is now_ms.
+ int64_t RenderTimeMs(uint32_t frame_timestamp, int64_t now_ms) const;
+
+ // Returns the maximum time in ms that we can wait for a frame to become
+ // complete before we must pass it to the decoder.
+ uint32_t MaxWaitingTime(int64_t render_time_ms, int64_t now_ms) const;
+
+ // Returns the current target delay which is required delay + decode time +
+ // render delay.
+ uint32_t TargetVideoDelay() const;
+
+ // Calculates whether or not there is enough time to decode a frame given a
+ // certain amount of processing time.
+ bool EnoughTimeToDecode(uint32_t available_processing_time_ms) const;
+
+ // Return current timing information.
+ void GetTimings(int* decode_ms,
+ int* max_decode_ms,
+ int* current_delay_ms,
+ int* target_delay_ms,
+ int* jitter_buffer_ms,
+ int* min_playout_delay_ms,
+ int* render_delay_ms) const;
+
+ enum { kDefaultRenderDelayMs = 10 };
+ enum { kDelayMaxChangeMsPerS = 100 };
+
+ protected:
+ int32_t MaxDecodeTimeMs(FrameType frame_type = kVideoFrameDelta) const
+ EXCLUSIVE_LOCKS_REQUIRED(crit_sect_);
+ int64_t RenderTimeMsInternal(uint32_t frame_timestamp, int64_t now_ms) const
+ EXCLUSIVE_LOCKS_REQUIRED(crit_sect_);
+ uint32_t TargetDelayInternal() const EXCLUSIVE_LOCKS_REQUIRED(crit_sect_);
+
+ private:
+ void UpdateHistograms() const;
+
+ CriticalSectionWrapper* crit_sect_;
+ Clock* const clock_;
+ bool master_ GUARDED_BY(crit_sect_);
+ TimestampExtrapolator* ts_extrapolator_ GUARDED_BY(crit_sect_);
+ VCMCodecTimer codec_timer_ GUARDED_BY(crit_sect_);
+ uint32_t render_delay_ms_ GUARDED_BY(crit_sect_);
+ uint32_t min_playout_delay_ms_ GUARDED_BY(crit_sect_);
+ uint32_t jitter_delay_ms_ GUARDED_BY(crit_sect_);
+ uint32_t current_delay_ms_ GUARDED_BY(crit_sect_);
+ int last_decode_ms_ GUARDED_BY(crit_sect_);
+ uint32_t prev_frame_timestamp_ GUARDED_BY(crit_sect_);
+
+ // Statistics.
+ size_t num_decoded_frames_ GUARDED_BY(crit_sect_);
+ size_t num_delayed_decoded_frames_ GUARDED_BY(crit_sect_);
+ int64_t first_decoded_frame_ms_ GUARDED_BY(crit_sect_);
+ uint64_t sum_missed_render_deadline_ms_ GUARDED_BY(crit_sect_);
+};
+} // namespace webrtc
+
+#endif // WEBRTC_MODULES_VIDEO_CODING_MAIN_SOURCE_TIMING_H_
diff --git a/webrtc/modules/video_coding/main/source/timing_unittest.cc b/webrtc/modules/video_coding/main/source/timing_unittest.cc
new file mode 100644
index 0000000000..694a600c2a
--- /dev/null
+++ b/webrtc/modules/video_coding/main/source/timing_unittest.cc
@@ -0,0 +1,147 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <math.h>
+#include <stdio.h>
+#include <stdlib.h>
+
+#include "testing/gtest/include/gtest/gtest.h"
+
+#include "webrtc/modules/video_coding/main/interface/video_coding.h"
+#include "webrtc/modules/video_coding/main/source/internal_defines.h"
+#include "webrtc/modules/video_coding/main/source/timing.h"
+#include "webrtc/modules/video_coding/main/test/test_util.h"
+#include "webrtc/system_wrappers/include/clock.h"
+#include "webrtc/system_wrappers/include/trace.h"
+#include "webrtc/test/testsupport/fileutils.h"
+
+namespace webrtc {
+
+TEST(ReceiverTiming, Tests) {
+ SimulatedClock clock(0);
+ VCMTiming timing(&clock);
+ uint32_t waitTime = 0;
+ uint32_t jitterDelayMs = 0;
+ uint32_t maxDecodeTimeMs = 0;
+ uint32_t timeStamp = 0;
+
+ timing.Reset();
+
+ timing.UpdateCurrentDelay(timeStamp);
+
+ timing.Reset();
+
+ timing.IncomingTimestamp(timeStamp, clock.TimeInMilliseconds());
+ jitterDelayMs = 20;
+ timing.SetJitterDelay(jitterDelayMs);
+ timing.UpdateCurrentDelay(timeStamp);
+ timing.set_render_delay(0);
+ waitTime = timing.MaxWaitingTime(
+ timing.RenderTimeMs(timeStamp, clock.TimeInMilliseconds()),
+ clock.TimeInMilliseconds());
+ // First update initializes the render time. Since we have no decode delay
+ // we get waitTime = renderTime - now - renderDelay = jitter.
+ EXPECT_EQ(jitterDelayMs, waitTime);
+
+ jitterDelayMs += VCMTiming::kDelayMaxChangeMsPerS + 10;
+ timeStamp += 90000;
+ clock.AdvanceTimeMilliseconds(1000);
+ timing.SetJitterDelay(jitterDelayMs);
+ timing.UpdateCurrentDelay(timeStamp);
+ waitTime = timing.MaxWaitingTime(timing.RenderTimeMs(
+ timeStamp, clock.TimeInMilliseconds()), clock.TimeInMilliseconds());
+ // Since we gradually increase the delay we only get 100 ms every second.
+ EXPECT_EQ(jitterDelayMs - 10, waitTime);
+
+ timeStamp += 90000;
+ clock.AdvanceTimeMilliseconds(1000);
+ timing.UpdateCurrentDelay(timeStamp);
+ waitTime = timing.MaxWaitingTime(
+ timing.RenderTimeMs(timeStamp, clock.TimeInMilliseconds()),
+ clock.TimeInMilliseconds());
+ EXPECT_EQ(waitTime, jitterDelayMs);
+
+ // 300 incoming frames without jitter, verify that this gives the exact wait
+ // time.
+ for (int i = 0; i < 300; i++) {
+ clock.AdvanceTimeMilliseconds(1000 / 25);
+ timeStamp += 90000 / 25;
+ timing.IncomingTimestamp(timeStamp, clock.TimeInMilliseconds());
+ }
+ timing.UpdateCurrentDelay(timeStamp);
+ waitTime = timing.MaxWaitingTime(
+ timing.RenderTimeMs(timeStamp, clock.TimeInMilliseconds()),
+ clock.TimeInMilliseconds());
+ EXPECT_EQ(waitTime, jitterDelayMs);
+
+ // Add decode time estimates.
+ for (int i = 0; i < 10; i++) {
+ int64_t startTimeMs = clock.TimeInMilliseconds();
+ clock.AdvanceTimeMilliseconds(10);
+ timing.StopDecodeTimer(timeStamp, startTimeMs,
+ clock.TimeInMilliseconds(), timing.RenderTimeMs(
+ timeStamp, clock.TimeInMilliseconds()));
+ timeStamp += 90000 / 25;
+ clock.AdvanceTimeMilliseconds(1000 / 25 - 10);
+ timing.IncomingTimestamp(timeStamp, clock.TimeInMilliseconds());
+ }
+ maxDecodeTimeMs = 10;
+ timing.SetJitterDelay(jitterDelayMs);
+ clock.AdvanceTimeMilliseconds(1000);
+ timeStamp += 90000;
+ timing.UpdateCurrentDelay(timeStamp);
+ waitTime = timing.MaxWaitingTime(
+ timing.RenderTimeMs(timeStamp, clock.TimeInMilliseconds()),
+ clock.TimeInMilliseconds());
+ EXPECT_EQ(waitTime, jitterDelayMs);
+
+ uint32_t minTotalDelayMs = 200;
+ timing.set_min_playout_delay(minTotalDelayMs);
+ clock.AdvanceTimeMilliseconds(5000);
+ timeStamp += 5*90000;
+ timing.UpdateCurrentDelay(timeStamp);
+ const int kRenderDelayMs = 10;
+ timing.set_render_delay(kRenderDelayMs);
+ waitTime = timing.MaxWaitingTime(
+ timing.RenderTimeMs(timeStamp, clock.TimeInMilliseconds()),
+ clock.TimeInMilliseconds());
+ // We should at least have minTotalDelayMs - decodeTime (10) - renderTime
+ // (10) to wait.
+ EXPECT_EQ(waitTime, minTotalDelayMs - maxDecodeTimeMs - kRenderDelayMs);
+ // The total video delay should be equal to the min total delay.
+ EXPECT_EQ(minTotalDelayMs, timing.TargetVideoDelay());
+
+ // Reset playout delay.
+ timing.set_min_playout_delay(0);
+ clock.AdvanceTimeMilliseconds(5000);
+ timeStamp += 5*90000;
+ timing.UpdateCurrentDelay(timeStamp);
+}
+
+TEST(ReceiverTiming, WrapAround) {
+ const int kFramerate = 25;
+ SimulatedClock clock(0);
+ VCMTiming timing(&clock);
+ // Provoke a wrap-around. The forth frame will have wrapped at 25 fps.
+ uint32_t timestamp = 0xFFFFFFFFu - 3 * 90000 / kFramerate;
+ for (int i = 0; i < 4; ++i) {
+ timing.IncomingTimestamp(timestamp, clock.TimeInMilliseconds());
+ clock.AdvanceTimeMilliseconds(1000 / kFramerate);
+ timestamp += 90000 / kFramerate;
+ int64_t render_time = timing.RenderTimeMs(0xFFFFFFFFu,
+ clock.TimeInMilliseconds());
+ EXPECT_EQ(3 * 1000 / kFramerate, render_time);
+ render_time = timing.RenderTimeMs(89u, // One second later in 90 kHz.
+ clock.TimeInMilliseconds());
+ EXPECT_EQ(3 * 1000 / kFramerate + 1, render_time);
+ }
+}
+
+} // namespace webrtc
diff --git a/webrtc/modules/video_coding/main/source/video_coding_impl.cc b/webrtc/modules/video_coding/main/source/video_coding_impl.cc
new file mode 100644
index 0000000000..b0a6754cbd
--- /dev/null
+++ b/webrtc/modules/video_coding/main/source/video_coding_impl.cc
@@ -0,0 +1,359 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "webrtc/common_types.h"
+#include "webrtc/common_video/libyuv/include/webrtc_libyuv.h"
+#include "webrtc/modules/video_coding/codecs/interface/video_codec_interface.h"
+#include "webrtc/modules/video_coding/main/source/encoded_frame.h"
+#include "webrtc/modules/video_coding/main/source/jitter_buffer.h"
+#include "webrtc/modules/video_coding/main/source/packet.h"
+#include "webrtc/modules/video_coding/main/source/video_coding_impl.h"
+#include "webrtc/system_wrappers/include/clock.h"
+
+namespace webrtc {
+namespace vcm {
+
+int64_t
+VCMProcessTimer::Period() const {
+ return _periodMs;
+}
+
+int64_t
+VCMProcessTimer::TimeUntilProcess() const {
+ const int64_t time_since_process = _clock->TimeInMilliseconds() - _latestMs;
+ const int64_t time_until_process = _periodMs - time_since_process;
+ return std::max<int64_t>(time_until_process, 0);
+}
+
+void
+VCMProcessTimer::Processed() {
+ _latestMs = _clock->TimeInMilliseconds();
+}
+} // namespace vcm
+
+namespace {
+// This wrapper provides a way to modify the callback without the need to expose
+// a register method all the way down to the function calling it.
+class EncodedImageCallbackWrapper : public EncodedImageCallback {
+ public:
+ EncodedImageCallbackWrapper()
+ : cs_(CriticalSectionWrapper::CreateCriticalSection()), callback_(NULL) {}
+
+ virtual ~EncodedImageCallbackWrapper() {}
+
+ void Register(EncodedImageCallback* callback) {
+ CriticalSectionScoped cs(cs_.get());
+ callback_ = callback;
+ }
+
+ // TODO(andresp): Change to void as return value is ignored.
+ virtual int32_t Encoded(const EncodedImage& encoded_image,
+ const CodecSpecificInfo* codec_specific_info,
+ const RTPFragmentationHeader* fragmentation) {
+ CriticalSectionScoped cs(cs_.get());
+ if (callback_)
+ return callback_->Encoded(
+ encoded_image, codec_specific_info, fragmentation);
+ return 0;
+ }
+
+ private:
+ rtc::scoped_ptr<CriticalSectionWrapper> cs_;
+ EncodedImageCallback* callback_ GUARDED_BY(cs_);
+};
+
+class VideoCodingModuleImpl : public VideoCodingModule {
+ public:
+ VideoCodingModuleImpl(Clock* clock,
+ EventFactory* event_factory,
+ bool owns_event_factory,
+ VideoEncoderRateObserver* encoder_rate_observer,
+ VCMQMSettingsCallback* qm_settings_callback)
+ : VideoCodingModule(),
+ sender_(new vcm::VideoSender(clock,
+ &post_encode_callback_,
+ encoder_rate_observer,
+ qm_settings_callback)),
+ receiver_(new vcm::VideoReceiver(clock, event_factory)),
+ own_event_factory_(owns_event_factory ? event_factory : NULL) {}
+
+ virtual ~VideoCodingModuleImpl() {
+ sender_.reset();
+ receiver_.reset();
+ own_event_factory_.reset();
+ }
+
+ int64_t TimeUntilNextProcess() override {
+ int64_t sender_time = sender_->TimeUntilNextProcess();
+ int64_t receiver_time = receiver_->TimeUntilNextProcess();
+ assert(sender_time >= 0);
+ assert(receiver_time >= 0);
+ return VCM_MIN(sender_time, receiver_time);
+ }
+
+ int32_t Process() override {
+ int32_t sender_return = sender_->Process();
+ int32_t receiver_return = receiver_->Process();
+ if (sender_return != VCM_OK)
+ return sender_return;
+ return receiver_return;
+ }
+
+ int32_t RegisterSendCodec(const VideoCodec* sendCodec,
+ uint32_t numberOfCores,
+ uint32_t maxPayloadSize) override {
+ return sender_->RegisterSendCodec(sendCodec, numberOfCores, maxPayloadSize);
+ }
+
+ const VideoCodec& GetSendCodec() const override {
+ return sender_->GetSendCodec();
+ }
+
+ // DEPRECATED.
+ int32_t SendCodec(VideoCodec* currentSendCodec) const override {
+ return sender_->SendCodecBlocking(currentSendCodec);
+ }
+
+ // DEPRECATED.
+ VideoCodecType SendCodec() const override {
+ return sender_->SendCodecBlocking();
+ }
+
+ int32_t RegisterExternalEncoder(VideoEncoder* externalEncoder,
+ uint8_t payloadType,
+ bool internalSource) override {
+ return sender_->RegisterExternalEncoder(
+ externalEncoder, payloadType, internalSource);
+ }
+
+ int Bitrate(unsigned int* bitrate) const override {
+ return sender_->Bitrate(bitrate);
+ }
+
+ int FrameRate(unsigned int* framerate) const override {
+ return sender_->FrameRate(framerate);
+ }
+
+ int32_t SetChannelParameters(uint32_t target_bitrate, // bits/s.
+ uint8_t lossRate,
+ int64_t rtt) override {
+ return sender_->SetChannelParameters(target_bitrate, lossRate, rtt);
+ }
+
+ int32_t RegisterTransportCallback(
+ VCMPacketizationCallback* transport) override {
+ return sender_->RegisterTransportCallback(transport);
+ }
+
+ int32_t RegisterSendStatisticsCallback(
+ VCMSendStatisticsCallback* sendStats) override {
+ return sender_->RegisterSendStatisticsCallback(sendStats);
+ }
+
+ int32_t RegisterProtectionCallback(
+ VCMProtectionCallback* protection) override {
+ return sender_->RegisterProtectionCallback(protection);
+ }
+
+ int32_t SetVideoProtection(VCMVideoProtection videoProtection,
+ bool enable) override {
+ // TODO(pbos): Remove enable from receive-side protection modes as well.
+ if (enable)
+ sender_->SetVideoProtection(videoProtection);
+ return receiver_->SetVideoProtection(videoProtection, enable);
+ }
+
+ int32_t AddVideoFrame(const VideoFrame& videoFrame,
+ const VideoContentMetrics* contentMetrics,
+ const CodecSpecificInfo* codecSpecificInfo) override {
+ return sender_->AddVideoFrame(
+ videoFrame, contentMetrics, codecSpecificInfo);
+ }
+
+ int32_t IntraFrameRequest(int stream_index) override {
+ return sender_->IntraFrameRequest(stream_index);
+ }
+
+ int32_t EnableFrameDropper(bool enable) override {
+ return sender_->EnableFrameDropper(enable);
+ }
+
+ void SuspendBelowMinBitrate() override {
+ return sender_->SuspendBelowMinBitrate();
+ }
+
+ bool VideoSuspended() const override { return sender_->VideoSuspended(); }
+
+ int32_t RegisterReceiveCodec(const VideoCodec* receiveCodec,
+ int32_t numberOfCores,
+ bool requireKeyFrame) override {
+ return receiver_->RegisterReceiveCodec(
+ receiveCodec, numberOfCores, requireKeyFrame);
+ }
+
+ int32_t RegisterExternalDecoder(VideoDecoder* externalDecoder,
+ uint8_t payloadType,
+ bool internalRenderTiming) override {
+ return receiver_->RegisterExternalDecoder(
+ externalDecoder, payloadType, internalRenderTiming);
+ }
+
+ int32_t RegisterReceiveCallback(
+ VCMReceiveCallback* receiveCallback) override {
+ return receiver_->RegisterReceiveCallback(receiveCallback);
+ }
+
+ int32_t RegisterReceiveStatisticsCallback(
+ VCMReceiveStatisticsCallback* receiveStats) override {
+ return receiver_->RegisterReceiveStatisticsCallback(receiveStats);
+ }
+
+ int32_t RegisterDecoderTimingCallback(
+ VCMDecoderTimingCallback* decoderTiming) override {
+ return receiver_->RegisterDecoderTimingCallback(decoderTiming);
+ }
+
+ int32_t RegisterFrameTypeCallback(
+ VCMFrameTypeCallback* frameTypeCallback) override {
+ return receiver_->RegisterFrameTypeCallback(frameTypeCallback);
+ }
+
+ int32_t RegisterPacketRequestCallback(
+ VCMPacketRequestCallback* callback) override {
+ return receiver_->RegisterPacketRequestCallback(callback);
+ }
+
+ int RegisterRenderBufferSizeCallback(
+ VCMRenderBufferSizeCallback* callback) override {
+ return receiver_->RegisterRenderBufferSizeCallback(callback);
+ }
+
+ int32_t Decode(uint16_t maxWaitTimeMs) override {
+ return receiver_->Decode(maxWaitTimeMs);
+ }
+
+ int32_t ResetDecoder() override { return receiver_->ResetDecoder(); }
+
+ int32_t ReceiveCodec(VideoCodec* currentReceiveCodec) const override {
+ return receiver_->ReceiveCodec(currentReceiveCodec);
+ }
+
+ VideoCodecType ReceiveCodec() const override {
+ return receiver_->ReceiveCodec();
+ }
+
+ int32_t IncomingPacket(const uint8_t* incomingPayload,
+ size_t payloadLength,
+ const WebRtcRTPHeader& rtpInfo) override {
+ return receiver_->IncomingPacket(incomingPayload, payloadLength, rtpInfo);
+ }
+
+ int32_t SetMinimumPlayoutDelay(uint32_t minPlayoutDelayMs) override {
+ return receiver_->SetMinimumPlayoutDelay(minPlayoutDelayMs);
+ }
+
+ int32_t SetRenderDelay(uint32_t timeMS) override {
+ return receiver_->SetRenderDelay(timeMS);
+ }
+
+ int32_t Delay() const override { return receiver_->Delay(); }
+
+ uint32_t DiscardedPackets() const override {
+ return receiver_->DiscardedPackets();
+ }
+
+ int SetReceiverRobustnessMode(ReceiverRobustness robustnessMode,
+ VCMDecodeErrorMode errorMode) override {
+ return receiver_->SetReceiverRobustnessMode(robustnessMode, errorMode);
+ }
+
+ void SetNackSettings(size_t max_nack_list_size,
+ int max_packet_age_to_nack,
+ int max_incomplete_time_ms) override {
+ return receiver_->SetNackSettings(
+ max_nack_list_size, max_packet_age_to_nack, max_incomplete_time_ms);
+ }
+
+ void SetDecodeErrorMode(VCMDecodeErrorMode decode_error_mode) override {
+ return receiver_->SetDecodeErrorMode(decode_error_mode);
+ }
+
+ int SetMinReceiverDelay(int desired_delay_ms) override {
+ return receiver_->SetMinReceiverDelay(desired_delay_ms);
+ }
+
+ int32_t SetReceiveChannelParameters(int64_t rtt) override {
+ return receiver_->SetReceiveChannelParameters(rtt);
+ }
+
+ void RegisterPreDecodeImageCallback(EncodedImageCallback* observer) override {
+ receiver_->RegisterPreDecodeImageCallback(observer);
+ }
+
+ void RegisterPostEncodeImageCallback(
+ EncodedImageCallback* observer) override {
+ post_encode_callback_.Register(observer);
+ }
+
+ void TriggerDecoderShutdown() override {
+ receiver_->TriggerDecoderShutdown();
+ }
+
+ private:
+ EncodedImageCallbackWrapper post_encode_callback_;
+ // TODO(tommi): Change sender_ and receiver_ to be non pointers
+ // (construction is 1 alloc instead of 3).
+ rtc::scoped_ptr<vcm::VideoSender> sender_;
+ rtc::scoped_ptr<vcm::VideoReceiver> receiver_;
+ rtc::scoped_ptr<EventFactory> own_event_factory_;
+};
+} // namespace
+
+uint8_t VideoCodingModule::NumberOfCodecs() {
+ return VCMCodecDataBase::NumberOfCodecs();
+}
+
+int32_t VideoCodingModule::Codec(uint8_t listId, VideoCodec* codec) {
+ if (codec == NULL) {
+ return VCM_PARAMETER_ERROR;
+ }
+ return VCMCodecDataBase::Codec(listId, codec) ? 0 : -1;
+}
+
+int32_t VideoCodingModule::Codec(VideoCodecType codecType, VideoCodec* codec) {
+ if (codec == NULL) {
+ return VCM_PARAMETER_ERROR;
+ }
+ return VCMCodecDataBase::Codec(codecType, codec) ? 0 : -1;
+}
+
+VideoCodingModule* VideoCodingModule::Create(
+ Clock* clock,
+ VideoEncoderRateObserver* encoder_rate_observer,
+ VCMQMSettingsCallback* qm_settings_callback) {
+ return new VideoCodingModuleImpl(clock, new EventFactoryImpl, true,
+ encoder_rate_observer, qm_settings_callback);
+}
+
+VideoCodingModule* VideoCodingModule::Create(
+ Clock* clock,
+ EventFactory* event_factory) {
+ assert(clock);
+ assert(event_factory);
+ return new VideoCodingModuleImpl(clock, event_factory, false, nullptr,
+ nullptr);
+}
+
+void VideoCodingModule::Destroy(VideoCodingModule* module) {
+ if (module != NULL) {
+ delete static_cast<VideoCodingModuleImpl*>(module);
+ }
+}
+} // namespace webrtc
diff --git a/webrtc/modules/video_coding/main/source/video_coding_impl.h b/webrtc/modules/video_coding/main/source/video_coding_impl.h
new file mode 100644
index 0000000000..57f38dad13
--- /dev/null
+++ b/webrtc/modules/video_coding/main/source/video_coding_impl.h
@@ -0,0 +1,237 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef WEBRTC_MODULES_VIDEO_CODING_VIDEO_CODING_IMPL_H_
+#define WEBRTC_MODULES_VIDEO_CODING_VIDEO_CODING_IMPL_H_
+
+#include "webrtc/modules/video_coding/main/interface/video_coding.h"
+
+#include <vector>
+
+#include "webrtc/base/thread_annotations.h"
+#include "webrtc/base/thread_checker.h"
+#include "webrtc/modules/video_coding/main/source/codec_database.h"
+#include "webrtc/modules/video_coding/main/source/frame_buffer.h"
+#include "webrtc/modules/video_coding/main/source/generic_decoder.h"
+#include "webrtc/modules/video_coding/main/source/generic_encoder.h"
+#include "webrtc/modules/video_coding/main/source/jitter_buffer.h"
+#include "webrtc/modules/video_coding/main/source/media_optimization.h"
+#include "webrtc/modules/video_coding/main/source/receiver.h"
+#include "webrtc/modules/video_coding/main/source/timing.h"
+#include "webrtc/modules/video_coding/utility/include/qp_parser.h"
+#include "webrtc/system_wrappers/include/clock.h"
+#include "webrtc/system_wrappers/include/critical_section_wrapper.h"
+
+namespace webrtc {
+
+class EncodedFrameObserver;
+
+namespace vcm {
+
+class VCMProcessTimer {
+ public:
+ VCMProcessTimer(int64_t periodMs, Clock* clock)
+ : _clock(clock),
+ _periodMs(periodMs),
+ _latestMs(_clock->TimeInMilliseconds()) {}
+ int64_t Period() const;
+ int64_t TimeUntilProcess() const;
+ void Processed();
+
+ private:
+ Clock* _clock;
+ int64_t _periodMs;
+ int64_t _latestMs;
+};
+
+class VideoSender {
+ public:
+ typedef VideoCodingModule::SenderNackMode SenderNackMode;
+
+ VideoSender(Clock* clock,
+ EncodedImageCallback* post_encode_callback,
+ VideoEncoderRateObserver* encoder_rate_observer,
+ VCMQMSettingsCallback* qm_settings_callback);
+
+ ~VideoSender();
+
+ // Register the send codec to be used.
+ // This method must be called on the construction thread.
+ int32_t RegisterSendCodec(const VideoCodec* sendCodec,
+ uint32_t numberOfCores,
+ uint32_t maxPayloadSize);
+ // Non-blocking access to the currently active send codec configuration.
+ // Must be called from the same thread as the VideoSender instance was
+ // created on.
+ const VideoCodec& GetSendCodec() const;
+
+ // Get a copy of the currently configured send codec.
+ // This method acquires a lock to copy the current configuration out,
+ // so it can block and the returned information is not guaranteed to be
+ // accurate upon return. Consider using GetSendCodec() instead and make
+ // decisions on that thread with regards to the current codec.
+ int32_t SendCodecBlocking(VideoCodec* currentSendCodec) const;
+
+ // Same as SendCodecBlocking. Try to use GetSendCodec() instead.
+ VideoCodecType SendCodecBlocking() const;
+
+ int32_t RegisterExternalEncoder(VideoEncoder* externalEncoder,
+ uint8_t payloadType,
+ bool internalSource);
+
+ int Bitrate(unsigned int* bitrate) const;
+ int FrameRate(unsigned int* framerate) const;
+
+ int32_t SetChannelParameters(uint32_t target_bitrate, // bits/s.
+ uint8_t lossRate,
+ int64_t rtt);
+
+ int32_t RegisterTransportCallback(VCMPacketizationCallback* transport);
+ int32_t RegisterSendStatisticsCallback(VCMSendStatisticsCallback* sendStats);
+ int32_t RegisterProtectionCallback(VCMProtectionCallback* protection);
+ void SetVideoProtection(VCMVideoProtection videoProtection);
+
+ int32_t AddVideoFrame(const VideoFrame& videoFrame,
+ const VideoContentMetrics* _contentMetrics,
+ const CodecSpecificInfo* codecSpecificInfo);
+
+ int32_t IntraFrameRequest(int stream_index);
+ int32_t EnableFrameDropper(bool enable);
+
+ void SuspendBelowMinBitrate();
+ bool VideoSuspended() const;
+
+ int64_t TimeUntilNextProcess();
+ int32_t Process();
+
+ private:
+ void SetEncoderParameters(EncoderParameters params)
+ EXCLUSIVE_LOCKS_REQUIRED(send_crit_);
+
+ Clock* const clock_;
+
+ rtc::scoped_ptr<CriticalSectionWrapper> process_crit_sect_;
+ mutable rtc::CriticalSection send_crit_;
+ VCMGenericEncoder* _encoder;
+ VCMEncodedFrameCallback _encodedFrameCallback;
+ std::vector<FrameType> _nextFrameTypes;
+ media_optimization::MediaOptimization _mediaOpt;
+ VCMSendStatisticsCallback* _sendStatsCallback GUARDED_BY(process_crit_sect_);
+ VCMCodecDataBase _codecDataBase GUARDED_BY(send_crit_);
+ bool frame_dropper_enabled_ GUARDED_BY(send_crit_);
+ VCMProcessTimer _sendStatsTimer;
+
+ // Must be accessed on the construction thread of VideoSender.
+ VideoCodec current_codec_;
+ rtc::ThreadChecker main_thread_;
+
+ VCMQMSettingsCallback* const qm_settings_callback_;
+ VCMProtectionCallback* protection_callback_;
+
+ rtc::CriticalSection params_lock_;
+ EncoderParameters encoder_params_ GUARDED_BY(params_lock_);
+};
+
+class VideoReceiver {
+ public:
+ typedef VideoCodingModule::ReceiverRobustness ReceiverRobustness;
+
+ VideoReceiver(Clock* clock, EventFactory* event_factory);
+ ~VideoReceiver();
+
+ int32_t RegisterReceiveCodec(const VideoCodec* receiveCodec,
+ int32_t numberOfCores,
+ bool requireKeyFrame);
+
+ int32_t RegisterExternalDecoder(VideoDecoder* externalDecoder,
+ uint8_t payloadType,
+ bool internalRenderTiming);
+ int32_t RegisterReceiveCallback(VCMReceiveCallback* receiveCallback);
+ int32_t RegisterReceiveStatisticsCallback(
+ VCMReceiveStatisticsCallback* receiveStats);
+ int32_t RegisterDecoderTimingCallback(
+ VCMDecoderTimingCallback* decoderTiming);
+ int32_t RegisterFrameTypeCallback(VCMFrameTypeCallback* frameTypeCallback);
+ int32_t RegisterPacketRequestCallback(VCMPacketRequestCallback* callback);
+ int RegisterRenderBufferSizeCallback(VCMRenderBufferSizeCallback* callback);
+
+ int32_t Decode(uint16_t maxWaitTimeMs);
+ int32_t ResetDecoder();
+
+ int32_t ReceiveCodec(VideoCodec* currentReceiveCodec) const;
+ VideoCodecType ReceiveCodec() const;
+
+ int32_t IncomingPacket(const uint8_t* incomingPayload,
+ size_t payloadLength,
+ const WebRtcRTPHeader& rtpInfo);
+ int32_t SetMinimumPlayoutDelay(uint32_t minPlayoutDelayMs);
+ int32_t SetRenderDelay(uint32_t timeMS);
+ int32_t Delay() const;
+ uint32_t DiscardedPackets() const;
+
+ int SetReceiverRobustnessMode(ReceiverRobustness robustnessMode,
+ VCMDecodeErrorMode errorMode);
+ void SetNackSettings(size_t max_nack_list_size,
+ int max_packet_age_to_nack,
+ int max_incomplete_time_ms);
+
+ void SetDecodeErrorMode(VCMDecodeErrorMode decode_error_mode);
+ int SetMinReceiverDelay(int desired_delay_ms);
+
+ int32_t SetReceiveChannelParameters(int64_t rtt);
+ int32_t SetVideoProtection(VCMVideoProtection videoProtection, bool enable);
+
+ int64_t TimeUntilNextProcess();
+ int32_t Process();
+
+ void RegisterPreDecodeImageCallback(EncodedImageCallback* observer);
+ void TriggerDecoderShutdown();
+
+ protected:
+ int32_t Decode(const webrtc::VCMEncodedFrame& frame)
+ EXCLUSIVE_LOCKS_REQUIRED(_receiveCritSect);
+ int32_t RequestKeyFrame();
+ int32_t RequestSliceLossIndication(const uint64_t pictureID) const;
+
+ private:
+ Clock* const clock_;
+ rtc::scoped_ptr<CriticalSectionWrapper> process_crit_sect_;
+ CriticalSectionWrapper* _receiveCritSect;
+ VCMTiming _timing;
+ VCMReceiver _receiver;
+ VCMDecodedFrameCallback _decodedFrameCallback;
+ VCMFrameTypeCallback* _frameTypeCallback GUARDED_BY(process_crit_sect_);
+ VCMReceiveStatisticsCallback* _receiveStatsCallback
+ GUARDED_BY(process_crit_sect_);
+ VCMDecoderTimingCallback* _decoderTimingCallback
+ GUARDED_BY(process_crit_sect_);
+ VCMPacketRequestCallback* _packetRequestCallback
+ GUARDED_BY(process_crit_sect_);
+ VCMRenderBufferSizeCallback* render_buffer_callback_
+ GUARDED_BY(process_crit_sect_);
+ VCMGenericDecoder* _decoder;
+#ifdef DEBUG_DECODER_BIT_STREAM
+ FILE* _bitStreamBeforeDecoder;
+#endif
+ VCMFrameBuffer _frameFromFile;
+ bool _scheduleKeyRequest GUARDED_BY(process_crit_sect_);
+ size_t max_nack_list_size_ GUARDED_BY(process_crit_sect_);
+ EncodedImageCallback* pre_decode_image_callback_ GUARDED_BY(_receiveCritSect);
+
+ VCMCodecDataBase _codecDataBase GUARDED_BY(_receiveCritSect);
+ VCMProcessTimer _receiveStatsTimer;
+ VCMProcessTimer _retransmissionTimer;
+ VCMProcessTimer _keyRequestTimer;
+ QpParser qp_parser_;
+};
+
+} // namespace vcm
+} // namespace webrtc
+#endif // WEBRTC_MODULES_VIDEO_CODING_VIDEO_CODING_IMPL_H_
diff --git a/webrtc/modules/video_coding/main/source/video_coding_robustness_unittest.cc b/webrtc/modules/video_coding/main/source/video_coding_robustness_unittest.cc
new file mode 100644
index 0000000000..ac6e16bd80
--- /dev/null
+++ b/webrtc/modules/video_coding/main/source/video_coding_robustness_unittest.cc
@@ -0,0 +1,238 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "testing/gmock/include/gmock/gmock.h"
+#include "testing/gtest/include/gtest/gtest.h"
+#include "webrtc/modules/video_coding/codecs/interface/mock/mock_video_codec_interface.h"
+#include "webrtc/modules/video_coding/main/interface/mock/mock_vcm_callbacks.h"
+#include "webrtc/modules/video_coding/main/interface/video_coding.h"
+#include "webrtc/modules/video_coding/main/test/test_util.h"
+#include "webrtc/system_wrappers/include/clock.h"
+
+namespace webrtc {
+
+using ::testing::Return;
+using ::testing::_;
+using ::testing::ElementsAre;
+using ::testing::AllOf;
+using ::testing::Args;
+using ::testing::Field;
+using ::testing::Pointee;
+using ::testing::NiceMock;
+using ::testing::Sequence;
+
+class VCMRobustnessTest : public ::testing::Test {
+ protected:
+ static const size_t kPayloadLen = 10;
+
+ virtual void SetUp() {
+ clock_.reset(new SimulatedClock(0));
+ ASSERT_TRUE(clock_.get() != NULL);
+ vcm_ = VideoCodingModule::Create(clock_.get(), &event_factory_);
+ ASSERT_TRUE(vcm_ != NULL);
+ const size_t kMaxNackListSize = 250;
+ const int kMaxPacketAgeToNack = 450;
+ vcm_->SetNackSettings(kMaxNackListSize, kMaxPacketAgeToNack, 0);
+ ASSERT_EQ(0, vcm_->RegisterFrameTypeCallback(&frame_type_callback_));
+ ASSERT_EQ(0, vcm_->RegisterPacketRequestCallback(&request_callback_));
+ ASSERT_EQ(VCM_OK, vcm_->Codec(kVideoCodecVP8, &video_codec_));
+ ASSERT_EQ(VCM_OK, vcm_->RegisterReceiveCodec(&video_codec_, 1));
+ ASSERT_EQ(VCM_OK, vcm_->RegisterExternalDecoder(&decoder_,
+ video_codec_.plType,
+ true));
+ }
+
+ virtual void TearDown() {
+ VideoCodingModule::Destroy(vcm_);
+ }
+
+ void InsertPacket(uint32_t timestamp,
+ uint16_t seq_no,
+ bool first,
+ bool marker_bit,
+ FrameType frame_type) {
+ const uint8_t payload[kPayloadLen] = {0};
+ WebRtcRTPHeader rtp_info;
+ memset(&rtp_info, 0, sizeof(rtp_info));
+ rtp_info.frameType = frame_type;
+ rtp_info.header.timestamp = timestamp;
+ rtp_info.header.sequenceNumber = seq_no;
+ rtp_info.header.markerBit = marker_bit;
+ rtp_info.header.payloadType = video_codec_.plType;
+ rtp_info.type.Video.codec = kRtpVideoVp8;
+ rtp_info.type.Video.codecHeader.VP8.InitRTPVideoHeaderVP8();
+ rtp_info.type.Video.isFirstPacket = first;
+
+ ASSERT_EQ(VCM_OK, vcm_->IncomingPacket(payload, kPayloadLen, rtp_info));
+ }
+
+ VideoCodingModule* vcm_;
+ VideoCodec video_codec_;
+ MockVCMFrameTypeCallback frame_type_callback_;
+ MockPacketRequestCallback request_callback_;
+ NiceMock<MockVideoDecoder> decoder_;
+ NiceMock<MockVideoDecoder> decoderCopy_;
+ rtc::scoped_ptr<SimulatedClock> clock_;
+ NullEventFactory event_factory_;
+};
+
+TEST_F(VCMRobustnessTest, TestHardNack) {
+ Sequence s;
+ EXPECT_CALL(request_callback_, ResendPackets(_, 2))
+ .With(Args<0, 1>(ElementsAre(6, 7)))
+ .Times(1);
+ for (int ts = 0; ts <= 6000; ts += 3000) {
+ EXPECT_CALL(decoder_, Decode(AllOf(Field(&EncodedImage::_timeStamp, ts),
+ Field(&EncodedImage::_length,
+ kPayloadLen * 3),
+ Field(&EncodedImage::_completeFrame,
+ true)),
+ false, _, _, _))
+ .Times(1)
+ .InSequence(s);
+ }
+
+ ASSERT_EQ(VCM_OK, vcm_->SetReceiverRobustnessMode(
+ VideoCodingModule::kHardNack,
+ kNoErrors));
+
+ InsertPacket(0, 0, true, false, kVideoFrameKey);
+ InsertPacket(0, 1, false, false, kVideoFrameKey);
+ InsertPacket(0, 2, false, true, kVideoFrameKey);
+ clock_->AdvanceTimeMilliseconds(1000 / 30);
+
+ InsertPacket(3000, 3, true, false, kVideoFrameDelta);
+ InsertPacket(3000, 4, false, false, kVideoFrameDelta);
+ InsertPacket(3000, 5, false, true, kVideoFrameDelta);
+ clock_->AdvanceTimeMilliseconds(1000 / 30);
+
+ ASSERT_EQ(VCM_OK, vcm_->Decode(0));
+ ASSERT_EQ(VCM_OK, vcm_->Decode(0));
+ ASSERT_EQ(VCM_FRAME_NOT_READY, vcm_->Decode(0));
+
+ clock_->AdvanceTimeMilliseconds(10);
+
+ ASSERT_EQ(VCM_OK, vcm_->Process());
+
+ ASSERT_EQ(VCM_FRAME_NOT_READY, vcm_->Decode(0));
+
+ InsertPacket(6000, 8, false, true, kVideoFrameDelta);
+ clock_->AdvanceTimeMilliseconds(10);
+ ASSERT_EQ(VCM_OK, vcm_->Process());
+
+ ASSERT_EQ(VCM_FRAME_NOT_READY, vcm_->Decode(0));
+
+ InsertPacket(6000, 6, true, false, kVideoFrameDelta);
+ InsertPacket(6000, 7, false, false, kVideoFrameDelta);
+ clock_->AdvanceTimeMilliseconds(10);
+ ASSERT_EQ(VCM_OK, vcm_->Process());
+
+ ASSERT_EQ(VCM_OK, vcm_->Decode(0));
+}
+
+TEST_F(VCMRobustnessTest, TestHardNackNoneDecoded) {
+ EXPECT_CALL(request_callback_, ResendPackets(_, _))
+ .Times(0);
+ EXPECT_CALL(frame_type_callback_, RequestKeyFrame())
+ .Times(1);
+
+ ASSERT_EQ(VCM_OK, vcm_->SetReceiverRobustnessMode(
+ VideoCodingModule::kHardNack,
+ kNoErrors));
+
+ InsertPacket(3000, 3, true, false, kVideoFrameDelta);
+ InsertPacket(3000, 4, false, false, kVideoFrameDelta);
+ InsertPacket(3000, 5, false, true, kVideoFrameDelta);
+
+ EXPECT_EQ(VCM_FRAME_NOT_READY, vcm_->Decode(0));
+ ASSERT_EQ(VCM_OK, vcm_->Process());
+
+ clock_->AdvanceTimeMilliseconds(10);
+
+ EXPECT_EQ(VCM_FRAME_NOT_READY, vcm_->Decode(0));
+ ASSERT_EQ(VCM_OK, vcm_->Process());
+}
+
+TEST_F(VCMRobustnessTest, TestModeNoneWithErrors) {
+ EXPECT_CALL(decoder_, InitDecode(_, _)).Times(1);
+ EXPECT_CALL(decoder_, Release()).Times(1);
+ Sequence s1;
+ EXPECT_CALL(request_callback_, ResendPackets(_, 1))
+ .With(Args<0, 1>(ElementsAre(4)))
+ .Times(0);
+
+ EXPECT_CALL(decoder_, Copy())
+ .Times(0);
+ EXPECT_CALL(decoderCopy_, Copy())
+ .Times(0);
+
+ // Decode operations
+ EXPECT_CALL(decoder_, Decode(AllOf(Field(&EncodedImage::_timeStamp, 0),
+ Field(&EncodedImage::_completeFrame,
+ true)),
+ false, _, _, _))
+ .Times(1)
+ .InSequence(s1);
+ EXPECT_CALL(decoder_, Decode(AllOf(Field(&EncodedImage::_timeStamp, 3000),
+ Field(&EncodedImage::_completeFrame,
+ false)),
+ false, _, _, _))
+ .Times(1)
+ .InSequence(s1);
+ EXPECT_CALL(decoder_, Decode(AllOf(Field(&EncodedImage::_timeStamp, 6000),
+ Field(&EncodedImage::_completeFrame,
+ true)),
+ false, _, _, _))
+ .Times(1)
+ .InSequence(s1);
+ EXPECT_CALL(decoder_, Decode(AllOf(Field(&EncodedImage::_timeStamp, 9000),
+ Field(&EncodedImage::_completeFrame,
+ true)),
+ false, _, _, _))
+ .Times(1)
+ .InSequence(s1);
+
+ ASSERT_EQ(VCM_OK, vcm_->SetReceiverRobustnessMode(
+ VideoCodingModule::kNone,
+ kWithErrors));
+
+ InsertPacket(0, 0, true, false, kVideoFrameKey);
+ InsertPacket(0, 1, false, false, kVideoFrameKey);
+ InsertPacket(0, 2, false, true, kVideoFrameKey);
+ EXPECT_EQ(VCM_OK, vcm_->Decode(0)); // Decode timestamp 0.
+ EXPECT_EQ(VCM_OK, vcm_->Process()); // Expect no NACK list.
+
+ clock_->AdvanceTimeMilliseconds(33);
+ InsertPacket(3000, 3, true, false, kVideoFrameDelta);
+ // Packet 4 missing
+ InsertPacket(3000, 5, false, true, kVideoFrameDelta);
+ EXPECT_EQ(VCM_FRAME_NOT_READY, vcm_->Decode(0));
+ EXPECT_EQ(VCM_OK, vcm_->Process()); // Expect no NACK list.
+
+ clock_->AdvanceTimeMilliseconds(33);
+ InsertPacket(6000, 6, true, false, kVideoFrameDelta);
+ InsertPacket(6000, 7, false, false, kVideoFrameDelta);
+ InsertPacket(6000, 8, false, true, kVideoFrameDelta);
+ EXPECT_EQ(VCM_OK, vcm_->Decode(0)); // Decode timestamp 3000 incomplete.
+ EXPECT_EQ(VCM_OK, vcm_->Process()); // Expect no NACK list.
+
+ clock_->AdvanceTimeMilliseconds(10);
+ EXPECT_EQ(VCM_OK, vcm_->Decode(0)); // Decode timestamp 6000 complete.
+ EXPECT_EQ(VCM_OK, vcm_->Process()); // Expect no NACK list.
+
+ clock_->AdvanceTimeMilliseconds(23);
+ InsertPacket(3000, 4, false, false, kVideoFrameDelta);
+
+ InsertPacket(9000, 9, true, false, kVideoFrameDelta);
+ InsertPacket(9000, 10, false, false, kVideoFrameDelta);
+ InsertPacket(9000, 11, false, true, kVideoFrameDelta);
+ EXPECT_EQ(VCM_OK, vcm_->Decode(0)); // Decode timestamp 9000 complete.
+}
+} // namespace webrtc
diff --git a/webrtc/modules/video_coding/main/source/video_receiver.cc b/webrtc/modules/video_coding/main/source/video_receiver.cc
new file mode 100644
index 0000000000..77c069cf2d
--- /dev/null
+++ b/webrtc/modules/video_coding/main/source/video_receiver.cc
@@ -0,0 +1,578 @@
+/*
+ * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "webrtc/base/checks.h"
+#include "webrtc/base/trace_event.h"
+#include "webrtc/common_types.h"
+#include "webrtc/common_video/libyuv/include/webrtc_libyuv.h"
+#include "webrtc/modules/video_coding/codecs/interface/video_codec_interface.h"
+#include "webrtc/modules/video_coding/main/source/encoded_frame.h"
+#include "webrtc/modules/video_coding/main/source/jitter_buffer.h"
+#include "webrtc/modules/video_coding/main/source/packet.h"
+#include "webrtc/modules/video_coding/main/source/video_coding_impl.h"
+#include "webrtc/system_wrappers/include/clock.h"
+#include "webrtc/system_wrappers/include/logging.h"
+
+// #define DEBUG_DECODER_BIT_STREAM
+
+namespace webrtc {
+namespace vcm {
+
+VideoReceiver::VideoReceiver(Clock* clock, EventFactory* event_factory)
+ : clock_(clock),
+ process_crit_sect_(CriticalSectionWrapper::CreateCriticalSection()),
+ _receiveCritSect(CriticalSectionWrapper::CreateCriticalSection()),
+ _timing(clock_),
+ _receiver(&_timing, clock_, event_factory),
+ _decodedFrameCallback(_timing, clock_),
+ _frameTypeCallback(NULL),
+ _receiveStatsCallback(NULL),
+ _decoderTimingCallback(NULL),
+ _packetRequestCallback(NULL),
+ render_buffer_callback_(NULL),
+ _decoder(NULL),
+#ifdef DEBUG_DECODER_BIT_STREAM
+ _bitStreamBeforeDecoder(NULL),
+#endif
+ _frameFromFile(),
+ _scheduleKeyRequest(false),
+ max_nack_list_size_(0),
+ pre_decode_image_callback_(NULL),
+ _codecDataBase(nullptr, nullptr),
+ _receiveStatsTimer(1000, clock_),
+ _retransmissionTimer(10, clock_),
+ _keyRequestTimer(500, clock_) {
+ assert(clock_);
+#ifdef DEBUG_DECODER_BIT_STREAM
+ _bitStreamBeforeDecoder = fopen("decoderBitStream.bit", "wb");
+#endif
+}
+
+VideoReceiver::~VideoReceiver() {
+ delete _receiveCritSect;
+#ifdef DEBUG_DECODER_BIT_STREAM
+ fclose(_bitStreamBeforeDecoder);
+#endif
+}
+
+int32_t VideoReceiver::Process() {
+ int32_t returnValue = VCM_OK;
+
+ // Receive-side statistics
+ if (_receiveStatsTimer.TimeUntilProcess() == 0) {
+ _receiveStatsTimer.Processed();
+ CriticalSectionScoped cs(process_crit_sect_.get());
+ if (_receiveStatsCallback != NULL) {
+ uint32_t bitRate;
+ uint32_t frameRate;
+ _receiver.ReceiveStatistics(&bitRate, &frameRate);
+ _receiveStatsCallback->OnReceiveRatesUpdated(bitRate, frameRate);
+ }
+
+ if (_decoderTimingCallback != NULL) {
+ int decode_ms;
+ int max_decode_ms;
+ int current_delay_ms;
+ int target_delay_ms;
+ int jitter_buffer_ms;
+ int min_playout_delay_ms;
+ int render_delay_ms;
+ _timing.GetTimings(&decode_ms,
+ &max_decode_ms,
+ &current_delay_ms,
+ &target_delay_ms,
+ &jitter_buffer_ms,
+ &min_playout_delay_ms,
+ &render_delay_ms);
+ _decoderTimingCallback->OnDecoderTiming(decode_ms,
+ max_decode_ms,
+ current_delay_ms,
+ target_delay_ms,
+ jitter_buffer_ms,
+ min_playout_delay_ms,
+ render_delay_ms);
+ }
+
+ // Size of render buffer.
+ if (render_buffer_callback_) {
+ int buffer_size_ms = _receiver.RenderBufferSizeMs();
+ render_buffer_callback_->RenderBufferSizeMs(buffer_size_ms);
+ }
+ }
+
+ // Key frame requests
+ if (_keyRequestTimer.TimeUntilProcess() == 0) {
+ _keyRequestTimer.Processed();
+ bool request_key_frame = false;
+ {
+ CriticalSectionScoped cs(process_crit_sect_.get());
+ request_key_frame = _scheduleKeyRequest && _frameTypeCallback != NULL;
+ }
+ if (request_key_frame) {
+ const int32_t ret = RequestKeyFrame();
+ if (ret != VCM_OK && returnValue == VCM_OK) {
+ returnValue = ret;
+ }
+ }
+ }
+
+ // Packet retransmission requests
+ // TODO(holmer): Add API for changing Process interval and make sure it's
+ // disabled when NACK is off.
+ if (_retransmissionTimer.TimeUntilProcess() == 0) {
+ _retransmissionTimer.Processed();
+ bool callback_registered = false;
+ uint16_t length;
+ {
+ CriticalSectionScoped cs(process_crit_sect_.get());
+ length = max_nack_list_size_;
+ callback_registered = _packetRequestCallback != NULL;
+ }
+ if (callback_registered && length > 0) {
+ // Collect sequence numbers from the default receiver.
+ bool request_key_frame = false;
+ std::vector<uint16_t> nackList = _receiver.NackList(&request_key_frame);
+ int32_t ret = VCM_OK;
+ if (request_key_frame) {
+ ret = RequestKeyFrame();
+ if (ret != VCM_OK && returnValue == VCM_OK) {
+ returnValue = ret;
+ }
+ }
+ if (ret == VCM_OK && !nackList.empty()) {
+ CriticalSectionScoped cs(process_crit_sect_.get());
+ if (_packetRequestCallback != NULL) {
+ _packetRequestCallback->ResendPackets(&nackList[0], nackList.size());
+ }
+ }
+ }
+ }
+
+ return returnValue;
+}
+
+int64_t VideoReceiver::TimeUntilNextProcess() {
+ int64_t timeUntilNextProcess = _receiveStatsTimer.TimeUntilProcess();
+ if (_receiver.NackMode() != kNoNack) {
+ // We need a Process call more often if we are relying on
+ // retransmissions
+ timeUntilNextProcess =
+ VCM_MIN(timeUntilNextProcess, _retransmissionTimer.TimeUntilProcess());
+ }
+ timeUntilNextProcess =
+ VCM_MIN(timeUntilNextProcess, _keyRequestTimer.TimeUntilProcess());
+
+ return timeUntilNextProcess;
+}
+
+int32_t VideoReceiver::SetReceiveChannelParameters(int64_t rtt) {
+ CriticalSectionScoped receiveCs(_receiveCritSect);
+ _receiver.UpdateRtt(rtt);
+ return 0;
+}
+
+// Enable or disable a video protection method.
+// Note: This API should be deprecated, as it does not offer a distinction
+// between the protection method and decoding with or without errors. If such a
+// behavior is desired, use the following API: SetReceiverRobustnessMode.
+int32_t VideoReceiver::SetVideoProtection(VCMVideoProtection videoProtection,
+ bool enable) {
+ // By default, do not decode with errors.
+ _receiver.SetDecodeErrorMode(kNoErrors);
+ switch (videoProtection) {
+ case kProtectionNack: {
+ RTC_DCHECK(enable);
+ _receiver.SetNackMode(kNack, -1, -1);
+ break;
+ }
+
+ case kProtectionNackFEC: {
+ CriticalSectionScoped cs(_receiveCritSect);
+ RTC_DCHECK(enable);
+ _receiver.SetNackMode(kNack, media_optimization::kLowRttNackMs, -1);
+ _receiver.SetDecodeErrorMode(kNoErrors);
+ break;
+ }
+ case kProtectionFEC:
+ case kProtectionNone:
+ // No receiver-side protection.
+ RTC_DCHECK(enable);
+ _receiver.SetNackMode(kNoNack, -1, -1);
+ _receiver.SetDecodeErrorMode(kWithErrors);
+ break;
+ }
+ return VCM_OK;
+}
+
+// Register a receive callback. Will be called whenever there is a new frame
+// ready for rendering.
+int32_t VideoReceiver::RegisterReceiveCallback(
+ VCMReceiveCallback* receiveCallback) {
+ CriticalSectionScoped cs(_receiveCritSect);
+ _decodedFrameCallback.SetUserReceiveCallback(receiveCallback);
+ return VCM_OK;
+}
+
+int32_t VideoReceiver::RegisterReceiveStatisticsCallback(
+ VCMReceiveStatisticsCallback* receiveStats) {
+ CriticalSectionScoped cs(process_crit_sect_.get());
+ _receiver.RegisterStatsCallback(receiveStats);
+ _receiveStatsCallback = receiveStats;
+ return VCM_OK;
+}
+
+int32_t VideoReceiver::RegisterDecoderTimingCallback(
+ VCMDecoderTimingCallback* decoderTiming) {
+ CriticalSectionScoped cs(process_crit_sect_.get());
+ _decoderTimingCallback = decoderTiming;
+ return VCM_OK;
+}
+
+// Register an externally defined decoder/render object.
+// Can be a decoder only or a decoder coupled with a renderer.
+int32_t VideoReceiver::RegisterExternalDecoder(VideoDecoder* externalDecoder,
+ uint8_t payloadType,
+ bool internalRenderTiming) {
+ CriticalSectionScoped cs(_receiveCritSect);
+ if (externalDecoder == NULL) {
+ // Make sure the VCM updates the decoder next time it decodes.
+ _decoder = NULL;
+ return _codecDataBase.DeregisterExternalDecoder(payloadType) ? 0 : -1;
+ }
+ return _codecDataBase.RegisterExternalDecoder(
+ externalDecoder, payloadType, internalRenderTiming)
+ ? 0
+ : -1;
+}
+
+// Register a frame type request callback.
+int32_t VideoReceiver::RegisterFrameTypeCallback(
+ VCMFrameTypeCallback* frameTypeCallback) {
+ CriticalSectionScoped cs(process_crit_sect_.get());
+ _frameTypeCallback = frameTypeCallback;
+ return VCM_OK;
+}
+
+int32_t VideoReceiver::RegisterPacketRequestCallback(
+ VCMPacketRequestCallback* callback) {
+ CriticalSectionScoped cs(process_crit_sect_.get());
+ _packetRequestCallback = callback;
+ return VCM_OK;
+}
+
+int VideoReceiver::RegisterRenderBufferSizeCallback(
+ VCMRenderBufferSizeCallback* callback) {
+ CriticalSectionScoped cs(process_crit_sect_.get());
+ render_buffer_callback_ = callback;
+ return VCM_OK;
+}
+
+void VideoReceiver::TriggerDecoderShutdown() {
+ _receiver.TriggerDecoderShutdown();
+}
+
+// Decode next frame, blocking.
+// Should be called as often as possible to get the most out of the decoder.
+int32_t VideoReceiver::Decode(uint16_t maxWaitTimeMs) {
+ int64_t nextRenderTimeMs;
+ bool supports_render_scheduling;
+ {
+ CriticalSectionScoped cs(_receiveCritSect);
+ supports_render_scheduling = _codecDataBase.SupportsRenderScheduling();
+ }
+
+ VCMEncodedFrame* frame = _receiver.FrameForDecoding(
+ maxWaitTimeMs, nextRenderTimeMs, supports_render_scheduling);
+
+ if (frame == NULL) {
+ return VCM_FRAME_NOT_READY;
+ } else {
+ CriticalSectionScoped cs(_receiveCritSect);
+
+ // If this frame was too late, we should adjust the delay accordingly
+ _timing.UpdateCurrentDelay(frame->RenderTimeMs(),
+ clock_->TimeInMilliseconds());
+
+ if (pre_decode_image_callback_) {
+ EncodedImage encoded_image(frame->EncodedImage());
+ int qp = -1;
+ if (qp_parser_.GetQp(*frame, &qp)) {
+ encoded_image.qp_ = qp;
+ }
+ pre_decode_image_callback_->Encoded(
+ encoded_image, frame->CodecSpecific(), NULL);
+ }
+
+#ifdef DEBUG_DECODER_BIT_STREAM
+ if (_bitStreamBeforeDecoder != NULL) {
+ // Write bit stream to file for debugging purposes
+ if (fwrite(
+ frame->Buffer(), 1, frame->Length(), _bitStreamBeforeDecoder) !=
+ frame->Length()) {
+ return -1;
+ }
+ }
+#endif
+ const int32_t ret = Decode(*frame);
+ _receiver.ReleaseFrame(frame);
+ frame = NULL;
+ if (ret != VCM_OK) {
+ return ret;
+ }
+ }
+ return VCM_OK;
+}
+
+int32_t VideoReceiver::RequestSliceLossIndication(
+ const uint64_t pictureID) const {
+ TRACE_EVENT1("webrtc", "RequestSLI", "picture_id", pictureID);
+ CriticalSectionScoped cs(process_crit_sect_.get());
+ if (_frameTypeCallback != NULL) {
+ const int32_t ret =
+ _frameTypeCallback->SliceLossIndicationRequest(pictureID);
+ if (ret < 0) {
+ return ret;
+ }
+ } else {
+ return VCM_MISSING_CALLBACK;
+ }
+ return VCM_OK;
+}
+
+int32_t VideoReceiver::RequestKeyFrame() {
+ TRACE_EVENT0("webrtc", "RequestKeyFrame");
+ CriticalSectionScoped process_cs(process_crit_sect_.get());
+ if (_frameTypeCallback != NULL) {
+ const int32_t ret = _frameTypeCallback->RequestKeyFrame();
+ if (ret < 0) {
+ return ret;
+ }
+ _scheduleKeyRequest = false;
+ } else {
+ return VCM_MISSING_CALLBACK;
+ }
+ return VCM_OK;
+}
+
+// Must be called from inside the receive side critical section.
+int32_t VideoReceiver::Decode(const VCMEncodedFrame& frame) {
+ TRACE_EVENT_ASYNC_STEP1("webrtc",
+ "Video",
+ frame.TimeStamp(),
+ "Decode",
+ "type",
+ frame.FrameType());
+ // Change decoder if payload type has changed
+ const bool renderTimingBefore = _codecDataBase.SupportsRenderScheduling();
+ _decoder =
+ _codecDataBase.GetDecoder(frame.PayloadType(), &_decodedFrameCallback);
+ if (renderTimingBefore != _codecDataBase.SupportsRenderScheduling()) {
+ // Make sure we reset the decode time estimate since it will
+ // be zero for codecs without render timing.
+ _timing.ResetDecodeTime();
+ }
+ if (_decoder == NULL) {
+ return VCM_NO_CODEC_REGISTERED;
+ }
+ // Decode a frame
+ int32_t ret = _decoder->Decode(frame, clock_->TimeInMilliseconds());
+
+ // Check for failed decoding, run frame type request callback if needed.
+ bool request_key_frame = false;
+ if (ret < 0) {
+ if (ret == VCM_ERROR_REQUEST_SLI) {
+ return RequestSliceLossIndication(
+ _decodedFrameCallback.LastReceivedPictureID() + 1);
+ } else {
+ request_key_frame = true;
+ }
+ } else if (ret == VCM_REQUEST_SLI) {
+ ret = RequestSliceLossIndication(
+ _decodedFrameCallback.LastReceivedPictureID() + 1);
+ }
+ if (!frame.Complete() || frame.MissingFrame()) {
+ request_key_frame = true;
+ ret = VCM_OK;
+ }
+ if (request_key_frame) {
+ CriticalSectionScoped cs(process_crit_sect_.get());
+ _scheduleKeyRequest = true;
+ }
+ TRACE_EVENT_ASYNC_END0("webrtc", "Video", frame.TimeStamp());
+ return ret;
+}
+
+// Reset the decoder state
+int32_t VideoReceiver::ResetDecoder() {
+ bool reset_key_request = false;
+ {
+ CriticalSectionScoped cs(_receiveCritSect);
+ if (_decoder != NULL) {
+ _receiver.Reset();
+ _timing.Reset();
+ reset_key_request = true;
+ _decoder->Reset();
+ }
+ }
+ if (reset_key_request) {
+ CriticalSectionScoped cs(process_crit_sect_.get());
+ _scheduleKeyRequest = false;
+ }
+ return VCM_OK;
+}
+
+// Register possible receive codecs, can be called multiple times
+int32_t VideoReceiver::RegisterReceiveCodec(const VideoCodec* receiveCodec,
+ int32_t numberOfCores,
+ bool requireKeyFrame) {
+ CriticalSectionScoped cs(_receiveCritSect);
+ if (receiveCodec == NULL) {
+ return VCM_PARAMETER_ERROR;
+ }
+ if (!_codecDataBase.RegisterReceiveCodec(
+ receiveCodec, numberOfCores, requireKeyFrame)) {
+ return -1;
+ }
+ return 0;
+}
+
+// Get current received codec
+int32_t VideoReceiver::ReceiveCodec(VideoCodec* currentReceiveCodec) const {
+ CriticalSectionScoped cs(_receiveCritSect);
+ if (currentReceiveCodec == NULL) {
+ return VCM_PARAMETER_ERROR;
+ }
+ return _codecDataBase.ReceiveCodec(currentReceiveCodec) ? 0 : -1;
+}
+
+// Get current received codec
+VideoCodecType VideoReceiver::ReceiveCodec() const {
+ CriticalSectionScoped cs(_receiveCritSect);
+ return _codecDataBase.ReceiveCodec();
+}
+
+// Incoming packet from network parsed and ready for decode, non blocking.
+int32_t VideoReceiver::IncomingPacket(const uint8_t* incomingPayload,
+ size_t payloadLength,
+ const WebRtcRTPHeader& rtpInfo) {
+ if (rtpInfo.frameType == kVideoFrameKey) {
+ TRACE_EVENT1("webrtc",
+ "VCM::PacketKeyFrame",
+ "seqnum",
+ rtpInfo.header.sequenceNumber);
+ }
+ if (incomingPayload == NULL) {
+ // The jitter buffer doesn't handle non-zero payload lengths for packets
+ // without payload.
+ // TODO(holmer): We should fix this in the jitter buffer.
+ payloadLength = 0;
+ }
+ const VCMPacket packet(incomingPayload, payloadLength, rtpInfo);
+ int32_t ret = _receiver.InsertPacket(packet, rtpInfo.type.Video.width,
+ rtpInfo.type.Video.height);
+ // TODO(holmer): Investigate if this somehow should use the key frame
+ // request scheduling to throttle the requests.
+ if (ret == VCM_FLUSH_INDICATOR) {
+ RequestKeyFrame();
+ ResetDecoder();
+ } else if (ret < 0) {
+ return ret;
+ }
+ return VCM_OK;
+}
+
+// Minimum playout delay (used for lip-sync). This is the minimum delay required
+// to sync with audio. Not included in VideoCodingModule::Delay()
+// Defaults to 0 ms.
+int32_t VideoReceiver::SetMinimumPlayoutDelay(uint32_t minPlayoutDelayMs) {
+ _timing.set_min_playout_delay(minPlayoutDelayMs);
+ return VCM_OK;
+}
+
+// The estimated delay caused by rendering, defaults to
+// kDefaultRenderDelayMs = 10 ms
+int32_t VideoReceiver::SetRenderDelay(uint32_t timeMS) {
+ _timing.set_render_delay(timeMS);
+ return VCM_OK;
+}
+
+// Current video delay
+int32_t VideoReceiver::Delay() const { return _timing.TargetVideoDelay(); }
+
+uint32_t VideoReceiver::DiscardedPackets() const {
+ return _receiver.DiscardedPackets();
+}
+
+int VideoReceiver::SetReceiverRobustnessMode(
+ ReceiverRobustness robustnessMode,
+ VCMDecodeErrorMode decode_error_mode) {
+ CriticalSectionScoped cs(_receiveCritSect);
+ switch (robustnessMode) {
+ case VideoCodingModule::kNone:
+ _receiver.SetNackMode(kNoNack, -1, -1);
+ break;
+ case VideoCodingModule::kHardNack:
+ // Always wait for retransmissions (except when decoding with errors).
+ _receiver.SetNackMode(kNack, -1, -1);
+ break;
+ case VideoCodingModule::kSoftNack:
+#if 1
+ assert(false); // TODO(hlundin): Not completed.
+ return VCM_NOT_IMPLEMENTED;
+#else
+ // Enable hybrid NACK/FEC. Always wait for retransmissions and don't add
+ // extra delay when RTT is above kLowRttNackMs.
+ _receiver.SetNackMode(kNack, media_optimization::kLowRttNackMs, -1);
+ break;
+#endif
+ case VideoCodingModule::kReferenceSelection:
+#if 1
+ assert(false); // TODO(hlundin): Not completed.
+ return VCM_NOT_IMPLEMENTED;
+#else
+ if (decode_error_mode == kNoErrors) {
+ return VCM_PARAMETER_ERROR;
+ }
+ _receiver.SetNackMode(kNoNack, -1, -1);
+ break;
+#endif
+ }
+ _receiver.SetDecodeErrorMode(decode_error_mode);
+ return VCM_OK;
+}
+
+void VideoReceiver::SetDecodeErrorMode(VCMDecodeErrorMode decode_error_mode) {
+ CriticalSectionScoped cs(_receiveCritSect);
+ _receiver.SetDecodeErrorMode(decode_error_mode);
+}
+
+void VideoReceiver::SetNackSettings(size_t max_nack_list_size,
+ int max_packet_age_to_nack,
+ int max_incomplete_time_ms) {
+ if (max_nack_list_size != 0) {
+ CriticalSectionScoped process_cs(process_crit_sect_.get());
+ max_nack_list_size_ = max_nack_list_size;
+ }
+ _receiver.SetNackSettings(
+ max_nack_list_size, max_packet_age_to_nack, max_incomplete_time_ms);
+}
+
+int VideoReceiver::SetMinReceiverDelay(int desired_delay_ms) {
+ return _receiver.SetMinReceiverDelay(desired_delay_ms);
+}
+
+void VideoReceiver::RegisterPreDecodeImageCallback(
+ EncodedImageCallback* observer) {
+ CriticalSectionScoped cs(_receiveCritSect);
+ pre_decode_image_callback_ = observer;
+}
+
+} // namespace vcm
+} // namespace webrtc
diff --git a/webrtc/modules/video_coding/main/source/video_receiver_unittest.cc b/webrtc/modules/video_coding/main/source/video_receiver_unittest.cc
new file mode 100644
index 0000000000..75ea29a1ec
--- /dev/null
+++ b/webrtc/modules/video_coding/main/source/video_receiver_unittest.cc
@@ -0,0 +1,211 @@
+/*
+ * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <vector>
+
+#include "testing/gtest/include/gtest/gtest.h"
+#include "webrtc/base/scoped_ptr.h"
+#include "webrtc/modules/video_coding/codecs/interface/mock/mock_video_codec_interface.h"
+#include "webrtc/modules/video_coding/main/interface/mock/mock_vcm_callbacks.h"
+#include "webrtc/modules/video_coding/main/interface/video_coding.h"
+#include "webrtc/modules/video_coding/main/source/video_coding_impl.h"
+#include "webrtc/modules/video_coding/main/test/test_util.h"
+#include "webrtc/system_wrappers/include/clock.h"
+
+using ::testing::_;
+using ::testing::NiceMock;
+
+namespace webrtc {
+namespace vcm {
+namespace {
+
+class TestVideoReceiver : public ::testing::Test {
+ protected:
+ static const int kUnusedPayloadType = 10;
+
+ TestVideoReceiver() : clock_(0) {}
+
+ virtual void SetUp() {
+ receiver_.reset(new VideoReceiver(&clock_, &event_factory_));
+ EXPECT_EQ(0, receiver_->RegisterExternalDecoder(&decoder_,
+ kUnusedPayloadType, true));
+ const size_t kMaxNackListSize = 250;
+ const int kMaxPacketAgeToNack = 450;
+ receiver_->SetNackSettings(kMaxNackListSize, kMaxPacketAgeToNack, 0);
+
+ memset(&settings_, 0, sizeof(settings_));
+ EXPECT_EQ(0, VideoCodingModule::Codec(kVideoCodecVP8, &settings_));
+ settings_.plType = kUnusedPayloadType; // Use the mocked encoder.
+ EXPECT_EQ(0, receiver_->RegisterReceiveCodec(&settings_, 1, true));
+ }
+
+ void InsertAndVerifyPaddingFrame(const uint8_t* payload,
+ WebRtcRTPHeader* header) {
+ ASSERT_TRUE(header != NULL);
+ for (int j = 0; j < 5; ++j) {
+ // Padding only packets are passed to the VCM with payload size 0.
+ EXPECT_EQ(0, receiver_->IncomingPacket(payload, 0, *header));
+ ++header->header.sequenceNumber;
+ }
+ EXPECT_EQ(0, receiver_->Process());
+ EXPECT_CALL(decoder_, Decode(_, _, _, _, _)).Times(0);
+ EXPECT_EQ(VCM_FRAME_NOT_READY, receiver_->Decode(0));
+ }
+
+ void InsertAndVerifyDecodableFrame(const uint8_t* payload,
+ size_t length,
+ WebRtcRTPHeader* header) {
+ ASSERT_TRUE(header != NULL);
+ EXPECT_EQ(0, receiver_->IncomingPacket(payload, length, *header));
+ ++header->header.sequenceNumber;
+ EXPECT_CALL(packet_request_callback_, ResendPackets(_, _)).Times(0);
+ EXPECT_EQ(0, receiver_->Process());
+ EXPECT_CALL(decoder_, Decode(_, _, _, _, _)).Times(1);
+ EXPECT_EQ(0, receiver_->Decode(0));
+ }
+
+ SimulatedClock clock_;
+ NullEventFactory event_factory_;
+ VideoCodec settings_;
+ NiceMock<MockVideoDecoder> decoder_;
+ NiceMock<MockPacketRequestCallback> packet_request_callback_;
+
+ rtc::scoped_ptr<VideoReceiver> receiver_;
+};
+
+TEST_F(TestVideoReceiver, PaddingOnlyFrames) {
+ EXPECT_EQ(0, receiver_->SetVideoProtection(kProtectionNack, true));
+ EXPECT_EQ(
+ 0, receiver_->RegisterPacketRequestCallback(&packet_request_callback_));
+ const size_t kPaddingSize = 220;
+ const uint8_t payload[kPaddingSize] = {0};
+ WebRtcRTPHeader header;
+ memset(&header, 0, sizeof(header));
+ header.frameType = kEmptyFrame;
+ header.header.markerBit = false;
+ header.header.paddingLength = kPaddingSize;
+ header.header.payloadType = kUnusedPayloadType;
+ header.header.ssrc = 1;
+ header.header.headerLength = 12;
+ header.type.Video.codec = kRtpVideoVp8;
+ for (int i = 0; i < 10; ++i) {
+ EXPECT_CALL(packet_request_callback_, ResendPackets(_, _)).Times(0);
+ InsertAndVerifyPaddingFrame(payload, &header);
+ clock_.AdvanceTimeMilliseconds(33);
+ header.header.timestamp += 3000;
+ }
+}
+
+TEST_F(TestVideoReceiver, PaddingOnlyFramesWithLosses) {
+ EXPECT_EQ(0, receiver_->SetVideoProtection(kProtectionNack, true));
+ EXPECT_EQ(
+ 0, receiver_->RegisterPacketRequestCallback(&packet_request_callback_));
+ const size_t kFrameSize = 1200;
+ const size_t kPaddingSize = 220;
+ const uint8_t payload[kFrameSize] = {0};
+ WebRtcRTPHeader header;
+ memset(&header, 0, sizeof(header));
+ header.frameType = kEmptyFrame;
+ header.header.markerBit = false;
+ header.header.paddingLength = kPaddingSize;
+ header.header.payloadType = kUnusedPayloadType;
+ header.header.ssrc = 1;
+ header.header.headerLength = 12;
+ header.type.Video.codec = kRtpVideoVp8;
+ // Insert one video frame to get one frame decoded.
+ header.frameType = kVideoFrameKey;
+ header.type.Video.isFirstPacket = true;
+ header.header.markerBit = true;
+ InsertAndVerifyDecodableFrame(payload, kFrameSize, &header);
+ clock_.AdvanceTimeMilliseconds(33);
+ header.header.timestamp += 3000;
+
+ header.frameType = kEmptyFrame;
+ header.type.Video.isFirstPacket = false;
+ header.header.markerBit = false;
+ // Insert padding frames.
+ for (int i = 0; i < 10; ++i) {
+ // Lose one packet from the 6th frame.
+ if (i == 5) {
+ ++header.header.sequenceNumber;
+ }
+ // Lose the 4th frame.
+ if (i == 3) {
+ header.header.sequenceNumber += 5;
+ } else {
+ if (i > 3 && i < 5) {
+ EXPECT_CALL(packet_request_callback_, ResendPackets(_, 5)).Times(1);
+ } else if (i >= 5) {
+ EXPECT_CALL(packet_request_callback_, ResendPackets(_, 6)).Times(1);
+ } else {
+ EXPECT_CALL(packet_request_callback_, ResendPackets(_, _)).Times(0);
+ }
+ InsertAndVerifyPaddingFrame(payload, &header);
+ }
+ clock_.AdvanceTimeMilliseconds(33);
+ header.header.timestamp += 3000;
+ }
+}
+
+TEST_F(TestVideoReceiver, PaddingOnlyAndVideo) {
+ EXPECT_EQ(0, receiver_->SetVideoProtection(kProtectionNack, true));
+ EXPECT_EQ(
+ 0, receiver_->RegisterPacketRequestCallback(&packet_request_callback_));
+ const size_t kFrameSize = 1200;
+ const size_t kPaddingSize = 220;
+ const uint8_t payload[kFrameSize] = {0};
+ WebRtcRTPHeader header;
+ memset(&header, 0, sizeof(header));
+ header.frameType = kEmptyFrame;
+ header.type.Video.isFirstPacket = false;
+ header.header.markerBit = false;
+ header.header.paddingLength = kPaddingSize;
+ header.header.payloadType = kUnusedPayloadType;
+ header.header.ssrc = 1;
+ header.header.headerLength = 12;
+ header.type.Video.codec = kRtpVideoVp8;
+ header.type.Video.codecHeader.VP8.pictureId = -1;
+ header.type.Video.codecHeader.VP8.tl0PicIdx = -1;
+ for (int i = 0; i < 3; ++i) {
+ // Insert 2 video frames.
+ for (int j = 0; j < 2; ++j) {
+ if (i == 0 && j == 0) // First frame should be a key frame.
+ header.frameType = kVideoFrameKey;
+ else
+ header.frameType = kVideoFrameDelta;
+ header.type.Video.isFirstPacket = true;
+ header.header.markerBit = true;
+ InsertAndVerifyDecodableFrame(payload, kFrameSize, &header);
+ clock_.AdvanceTimeMilliseconds(33);
+ header.header.timestamp += 3000;
+ }
+
+ // Insert 2 padding only frames.
+ header.frameType = kEmptyFrame;
+ header.type.Video.isFirstPacket = false;
+ header.header.markerBit = false;
+ for (int j = 0; j < 2; ++j) {
+ // InsertAndVerifyPaddingFrame(payload, &header);
+ clock_.AdvanceTimeMilliseconds(33);
+ header.header.timestamp += 3000;
+ }
+ }
+}
+
+TEST_F(TestVideoReceiver, ReceiverDelay) {
+ EXPECT_EQ(0, receiver_->SetMinReceiverDelay(0));
+ EXPECT_EQ(0, receiver_->SetMinReceiverDelay(5000));
+ EXPECT_EQ(-1, receiver_->SetMinReceiverDelay(-100));
+ EXPECT_EQ(-1, receiver_->SetMinReceiverDelay(10010));
+}
+
+} // namespace
+} // namespace vcm
+} // namespace webrtc
diff --git a/webrtc/modules/video_coding/main/source/video_sender.cc b/webrtc/modules/video_coding/main/source/video_sender.cc
new file mode 100644
index 0000000000..98230b1e9e
--- /dev/null
+++ b/webrtc/modules/video_coding/main/source/video_sender.cc
@@ -0,0 +1,376 @@
+/*
+ * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "webrtc/common_types.h"
+
+#include <algorithm> // std::max
+
+#include "webrtc/base/checks.h"
+#include "webrtc/common_video/libyuv/include/webrtc_libyuv.h"
+#include "webrtc/modules/video_coding/codecs/interface/video_codec_interface.h"
+#include "webrtc/modules/video_coding/main/source/encoded_frame.h"
+#include "webrtc/modules/video_coding/main/source/video_coding_impl.h"
+#include "webrtc/modules/video_coding/utility/include/quality_scaler.h"
+#include "webrtc/system_wrappers/include/clock.h"
+#include "webrtc/system_wrappers/include/logging.h"
+
+namespace webrtc {
+namespace vcm {
+
+VideoSender::VideoSender(Clock* clock,
+ EncodedImageCallback* post_encode_callback,
+ VideoEncoderRateObserver* encoder_rate_observer,
+ VCMQMSettingsCallback* qm_settings_callback)
+ : clock_(clock),
+ process_crit_sect_(CriticalSectionWrapper::CreateCriticalSection()),
+ _encoder(nullptr),
+ _encodedFrameCallback(post_encode_callback),
+ _nextFrameTypes(1, kVideoFrameDelta),
+ _mediaOpt(clock_),
+ _sendStatsCallback(nullptr),
+ _codecDataBase(encoder_rate_observer, &_encodedFrameCallback),
+ frame_dropper_enabled_(true),
+ _sendStatsTimer(1000, clock_),
+ current_codec_(),
+ qm_settings_callback_(qm_settings_callback),
+ protection_callback_(nullptr),
+ encoder_params_({0, 0, 0, 0}) {
+ // Allow VideoSender to be created on one thread but used on another, post
+ // construction. This is currently how this class is being used by at least
+ // one external project (diffractor).
+ _mediaOpt.EnableQM(qm_settings_callback_ != nullptr);
+ _mediaOpt.Reset();
+ main_thread_.DetachFromThread();
+}
+
+VideoSender::~VideoSender() {}
+
+int32_t VideoSender::Process() {
+ int32_t returnValue = VCM_OK;
+
+ if (_sendStatsTimer.TimeUntilProcess() == 0) {
+ _sendStatsTimer.Processed();
+ CriticalSectionScoped cs(process_crit_sect_.get());
+ if (_sendStatsCallback != nullptr) {
+ uint32_t bitRate = _mediaOpt.SentBitRate();
+ uint32_t frameRate = _mediaOpt.SentFrameRate();
+ _sendStatsCallback->SendStatistics(bitRate, frameRate);
+ }
+ }
+
+ {
+ rtc::CritScope cs(&params_lock_);
+ // Force an encoder parameters update, so that incoming frame rate is
+ // updated even if bandwidth hasn't changed.
+ encoder_params_.input_frame_rate = _mediaOpt.InputFrameRate();
+ }
+
+ return returnValue;
+}
+
+int64_t VideoSender::TimeUntilNextProcess() {
+ return _sendStatsTimer.TimeUntilProcess();
+}
+
+// Register the send codec to be used.
+int32_t VideoSender::RegisterSendCodec(const VideoCodec* sendCodec,
+ uint32_t numberOfCores,
+ uint32_t maxPayloadSize) {
+ RTC_DCHECK(main_thread_.CalledOnValidThread());
+ rtc::CritScope lock(&send_crit_);
+ if (sendCodec == nullptr) {
+ return VCM_PARAMETER_ERROR;
+ }
+
+ bool ret =
+ _codecDataBase.SetSendCodec(sendCodec, numberOfCores, maxPayloadSize);
+
+ // Update encoder regardless of result to make sure that we're not holding on
+ // to a deleted instance.
+ _encoder = _codecDataBase.GetEncoder();
+ // Cache the current codec here so they can be fetched from this thread
+ // without requiring the _sendCritSect lock.
+ current_codec_ = *sendCodec;
+
+ if (!ret) {
+ LOG(LS_ERROR) << "Failed to initialize set encoder with payload name '"
+ << sendCodec->plName << "'.";
+ return VCM_CODEC_ERROR;
+ }
+
+ int numLayers;
+ if (sendCodec->codecType == kVideoCodecVP8) {
+ numLayers = sendCodec->codecSpecific.VP8.numberOfTemporalLayers;
+ } else if (sendCodec->codecType == kVideoCodecVP9) {
+ numLayers = sendCodec->codecSpecific.VP9.numberOfTemporalLayers;
+ } else {
+ numLayers = 1;
+ }
+
+ // If we have screensharing and we have layers, we disable frame dropper.
+ bool disable_frame_dropper =
+ numLayers > 1 && sendCodec->mode == kScreensharing;
+ if (disable_frame_dropper) {
+ _mediaOpt.EnableFrameDropper(false);
+ } else if (frame_dropper_enabled_) {
+ _mediaOpt.EnableFrameDropper(true);
+ }
+ _nextFrameTypes.clear();
+ _nextFrameTypes.resize(VCM_MAX(sendCodec->numberOfSimulcastStreams, 1),
+ kVideoFrameDelta);
+
+ _mediaOpt.SetEncodingData(sendCodec->codecType,
+ sendCodec->maxBitrate * 1000,
+ sendCodec->startBitrate * 1000,
+ sendCodec->width,
+ sendCodec->height,
+ sendCodec->maxFramerate,
+ numLayers,
+ maxPayloadSize);
+ return VCM_OK;
+}
+
+const VideoCodec& VideoSender::GetSendCodec() const {
+ RTC_DCHECK(main_thread_.CalledOnValidThread());
+ return current_codec_;
+}
+
+int32_t VideoSender::SendCodecBlocking(VideoCodec* currentSendCodec) const {
+ rtc::CritScope lock(&send_crit_);
+ if (currentSendCodec == nullptr) {
+ return VCM_PARAMETER_ERROR;
+ }
+ return _codecDataBase.SendCodec(currentSendCodec) ? 0 : -1;
+}
+
+VideoCodecType VideoSender::SendCodecBlocking() const {
+ rtc::CritScope lock(&send_crit_);
+ return _codecDataBase.SendCodec();
+}
+
+// Register an external decoder object.
+// This can not be used together with external decoder callbacks.
+int32_t VideoSender::RegisterExternalEncoder(VideoEncoder* externalEncoder,
+ uint8_t payloadType,
+ bool internalSource /*= false*/) {
+ RTC_DCHECK(main_thread_.CalledOnValidThread());
+
+ rtc::CritScope lock(&send_crit_);
+
+ if (externalEncoder == nullptr) {
+ bool wasSendCodec = false;
+ const bool ret =
+ _codecDataBase.DeregisterExternalEncoder(payloadType, &wasSendCodec);
+ if (wasSendCodec) {
+ // Make sure the VCM doesn't use the de-registered codec
+ _encoder = nullptr;
+ }
+ return ret ? 0 : -1;
+ }
+ _codecDataBase.RegisterExternalEncoder(
+ externalEncoder, payloadType, internalSource);
+ return 0;
+}
+
+// Get encode bitrate
+int VideoSender::Bitrate(unsigned int* bitrate) const {
+ RTC_DCHECK(main_thread_.CalledOnValidThread());
+ // Since we're running on the thread that's the only thread known to modify
+ // the value of _encoder, we don't need to grab the lock here.
+
+ if (!_encoder)
+ return VCM_UNINITIALIZED;
+ *bitrate = _encoder->GetEncoderParameters().target_bitrate;
+ return 0;
+}
+
+// Get encode frame rate
+int VideoSender::FrameRate(unsigned int* framerate) const {
+ RTC_DCHECK(main_thread_.CalledOnValidThread());
+ // Since we're running on the thread that's the only thread known to modify
+ // the value of _encoder, we don't need to grab the lock here.
+
+ if (!_encoder)
+ return VCM_UNINITIALIZED;
+
+ *framerate = _encoder->GetEncoderParameters().input_frame_rate;
+ return 0;
+}
+
+int32_t VideoSender::SetChannelParameters(uint32_t target_bitrate,
+ uint8_t lossRate,
+ int64_t rtt) {
+ uint32_t target_rate =
+ _mediaOpt.SetTargetRates(target_bitrate, lossRate, rtt,
+ protection_callback_, qm_settings_callback_);
+
+ uint32_t input_frame_rate = _mediaOpt.InputFrameRate();
+
+ rtc::CritScope cs(&params_lock_);
+ encoder_params_ = {target_rate, lossRate, rtt, input_frame_rate};
+
+ return VCM_OK;
+}
+
+void VideoSender::SetEncoderParameters(EncoderParameters params) {
+ if (params.target_bitrate == 0)
+ return;
+
+ if (params.input_frame_rate == 0) {
+ // No frame rate estimate available, use default.
+ params.input_frame_rate = current_codec_.maxFramerate;
+ }
+ if (_encoder != nullptr)
+ _encoder->SetEncoderParameters(params);
+}
+
+int32_t VideoSender::RegisterTransportCallback(
+ VCMPacketizationCallback* transport) {
+ rtc::CritScope lock(&send_crit_);
+ _encodedFrameCallback.SetMediaOpt(&_mediaOpt);
+ _encodedFrameCallback.SetTransportCallback(transport);
+ return VCM_OK;
+}
+
+// Register video output information callback which will be called to deliver
+// information about the video stream produced by the encoder, for instance the
+// average frame rate and bit rate.
+int32_t VideoSender::RegisterSendStatisticsCallback(
+ VCMSendStatisticsCallback* sendStats) {
+ CriticalSectionScoped cs(process_crit_sect_.get());
+ _sendStatsCallback = sendStats;
+ return VCM_OK;
+}
+
+// Register a video protection callback which will be called to deliver the
+// requested FEC rate and NACK status (on/off).
+// Note: this callback is assumed to only be registered once and before it is
+// used in this class.
+int32_t VideoSender::RegisterProtectionCallback(
+ VCMProtectionCallback* protection_callback) {
+ RTC_DCHECK(protection_callback == nullptr || protection_callback_ == nullptr);
+ protection_callback_ = protection_callback;
+ return VCM_OK;
+}
+
+// Enable or disable a video protection method.
+void VideoSender::SetVideoProtection(VCMVideoProtection videoProtection) {
+ rtc::CritScope lock(&send_crit_);
+ switch (videoProtection) {
+ case kProtectionNone:
+ _mediaOpt.SetProtectionMethod(media_optimization::kNone);
+ break;
+ case kProtectionNack:
+ _mediaOpt.SetProtectionMethod(media_optimization::kNack);
+ break;
+ case kProtectionNackFEC:
+ _mediaOpt.SetProtectionMethod(media_optimization::kNackFec);
+ break;
+ case kProtectionFEC:
+ _mediaOpt.SetProtectionMethod(media_optimization::kFec);
+ break;
+ }
+}
+// Add one raw video frame to the encoder, blocking.
+int32_t VideoSender::AddVideoFrame(const VideoFrame& videoFrame,
+ const VideoContentMetrics* contentMetrics,
+ const CodecSpecificInfo* codecSpecificInfo) {
+ EncoderParameters encoder_params;
+ {
+ rtc::CritScope lock(&params_lock_);
+ encoder_params = encoder_params_;
+ }
+ rtc::CritScope lock(&send_crit_);
+ if (_encoder == nullptr)
+ return VCM_UNINITIALIZED;
+ SetEncoderParameters(encoder_params);
+ // TODO(holmer): Add support for dropping frames per stream. Currently we
+ // only have one frame dropper for all streams.
+ if (_nextFrameTypes[0] == kEmptyFrame) {
+ return VCM_OK;
+ }
+ if (_mediaOpt.DropFrame()) {
+ _encoder->OnDroppedFrame();
+ return VCM_OK;
+ }
+ _mediaOpt.UpdateContentData(contentMetrics);
+ // TODO(pbos): Make sure setting send codec is synchronized with video
+ // processing so frame size always matches.
+ if (!_codecDataBase.MatchesCurrentResolution(videoFrame.width(),
+ videoFrame.height())) {
+ LOG(LS_ERROR) << "Incoming frame doesn't match set resolution. Dropping.";
+ return VCM_PARAMETER_ERROR;
+ }
+ VideoFrame converted_frame = videoFrame;
+ if (converted_frame.native_handle() && !_encoder->SupportsNativeHandle()) {
+ // This module only supports software encoding.
+ // TODO(pbos): Offload conversion from the encoder thread.
+ converted_frame = converted_frame.ConvertNativeToI420Frame();
+ RTC_CHECK(!converted_frame.IsZeroSize())
+ << "Frame conversion failed, won't be able to encode frame.";
+ }
+ int32_t ret =
+ _encoder->Encode(converted_frame, codecSpecificInfo, _nextFrameTypes);
+ if (ret < 0) {
+ LOG(LS_ERROR) << "Failed to encode frame. Error code: " << ret;
+ return ret;
+ }
+ for (size_t i = 0; i < _nextFrameTypes.size(); ++i) {
+ _nextFrameTypes[i] = kVideoFrameDelta; // Default frame type.
+ }
+ if (qm_settings_callback_)
+ qm_settings_callback_->SetTargetFramerate(_encoder->GetTargetFramerate());
+ return VCM_OK;
+}
+
+int32_t VideoSender::IntraFrameRequest(int stream_index) {
+ rtc::CritScope lock(&send_crit_);
+ if (stream_index < 0 ||
+ static_cast<unsigned int>(stream_index) >= _nextFrameTypes.size()) {
+ return -1;
+ }
+ _nextFrameTypes[stream_index] = kVideoFrameKey;
+ if (_encoder != nullptr && _encoder->InternalSource()) {
+ // Try to request the frame if we have an external encoder with
+ // internal source since AddVideoFrame never will be called.
+ if (_encoder->RequestFrame(_nextFrameTypes) == WEBRTC_VIDEO_CODEC_OK) {
+ _nextFrameTypes[stream_index] = kVideoFrameDelta;
+ }
+ }
+ return VCM_OK;
+}
+
+int32_t VideoSender::EnableFrameDropper(bool enable) {
+ rtc::CritScope lock(&send_crit_);
+ frame_dropper_enabled_ = enable;
+ _mediaOpt.EnableFrameDropper(enable);
+ return VCM_OK;
+}
+
+void VideoSender::SuspendBelowMinBitrate() {
+ RTC_DCHECK(main_thread_.CalledOnValidThread());
+ int threshold_bps;
+ if (current_codec_.numberOfSimulcastStreams == 0) {
+ threshold_bps = current_codec_.minBitrate * 1000;
+ } else {
+ threshold_bps = current_codec_.simulcastStream[0].minBitrate * 1000;
+ }
+ // Set the hysteresis window to be at 10% of the threshold, but at least
+ // 10 kbps.
+ int window_bps = std::max(threshold_bps / 10, 10000);
+ _mediaOpt.SuspendBelowMinBitrate(threshold_bps, window_bps);
+}
+
+bool VideoSender::VideoSuspended() const {
+ rtc::CritScope lock(&send_crit_);
+ return _mediaOpt.IsVideoSuspended();
+}
+} // namespace vcm
+} // namespace webrtc
diff --git a/webrtc/modules/video_coding/main/source/video_sender_unittest.cc b/webrtc/modules/video_coding/main/source/video_sender_unittest.cc
new file mode 100644
index 0000000000..e9c8bd79b6
--- /dev/null
+++ b/webrtc/modules/video_coding/main/source/video_sender_unittest.cc
@@ -0,0 +1,494 @@
+/*
+ * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <vector>
+
+#include "testing/gtest/include/gtest/gtest.h"
+#include "webrtc/base/scoped_ptr.h"
+#include "webrtc/common.h"
+#include "webrtc/modules/video_coding/codecs/interface/mock/mock_video_codec_interface.h"
+#include "webrtc/modules/video_coding/codecs/vp8/include/vp8.h"
+#include "webrtc/modules/video_coding/codecs/vp8/include/vp8_common_types.h"
+#include "webrtc/modules/video_coding/codecs/vp8/temporal_layers.h"
+#include "webrtc/modules/video_coding/main/interface/mock/mock_vcm_callbacks.h"
+#include "webrtc/modules/video_coding/main/interface/video_coding.h"
+#include "webrtc/modules/video_coding/main/source/video_coding_impl.h"
+#include "webrtc/modules/video_coding/main/test/test_util.h"
+#include "webrtc/system_wrappers/include/clock.h"
+#include "webrtc/test/frame_generator.h"
+#include "webrtc/test/testsupport/fileutils.h"
+#include "webrtc/test/testsupport/gtest_disable.h"
+
+using ::testing::_;
+using ::testing::AllOf;
+using ::testing::ElementsAre;
+using ::testing::ElementsAreArray;
+using ::testing::Field;
+using ::testing::NiceMock;
+using ::testing::Pointee;
+using ::testing::Return;
+using ::testing::FloatEq;
+using std::vector;
+using webrtc::test::FrameGenerator;
+
+namespace webrtc {
+namespace vcm {
+namespace {
+enum {
+ kMaxNumberOfTemporalLayers = 3
+};
+
+struct Vp8StreamInfo {
+ float framerate_fps[kMaxNumberOfTemporalLayers];
+ int bitrate_kbps[kMaxNumberOfTemporalLayers];
+};
+
+MATCHER_P(MatchesVp8StreamInfo, expected, "") {
+ bool res = true;
+ for (int tl = 0; tl < kMaxNumberOfTemporalLayers; ++tl) {
+ if (fabs(expected.framerate_fps[tl] - arg.framerate_fps[tl]) > 0.5) {
+ *result_listener << " framerate_fps[" << tl
+ << "] = " << arg.framerate_fps[tl] << " (expected "
+ << expected.framerate_fps[tl] << ") ";
+ res = false;
+ }
+ if (abs(expected.bitrate_kbps[tl] - arg.bitrate_kbps[tl]) > 10) {
+ *result_listener << " bitrate_kbps[" << tl
+ << "] = " << arg.bitrate_kbps[tl] << " (expected "
+ << expected.bitrate_kbps[tl] << ") ";
+ res = false;
+ }
+ }
+ return res;
+}
+
+class EmptyFrameGenerator : public FrameGenerator {
+ public:
+ EmptyFrameGenerator(int width, int height) : width_(width), height_(height) {}
+ VideoFrame* NextFrame() override {
+ frame_.reset(new VideoFrame());
+ frame_->CreateEmptyFrame(width_, height_, width_, (width_ + 1) / 2,
+ (width_ + 1) / 2);
+ return frame_.get();
+ }
+
+ private:
+ const int width_;
+ const int height_;
+ rtc::scoped_ptr<VideoFrame> frame_;
+};
+
+class PacketizationCallback : public VCMPacketizationCallback {
+ public:
+ PacketizationCallback(Clock* clock)
+ : clock_(clock), start_time_ms_(clock_->TimeInMilliseconds()) {}
+
+ virtual ~PacketizationCallback() {}
+
+ int32_t SendData(uint8_t payload_type,
+ const EncodedImage& encoded_image,
+ const RTPFragmentationHeader& fragmentation_header,
+ const RTPVideoHeader* rtp_video_header) override {
+ assert(rtp_video_header);
+ frame_data_.push_back(FrameData(encoded_image._length, *rtp_video_header));
+ return 0;
+ }
+
+ void Reset() {
+ frame_data_.clear();
+ start_time_ms_ = clock_->TimeInMilliseconds();
+ }
+
+ float FramerateFpsWithinTemporalLayer(int temporal_layer) {
+ return CountFramesWithinTemporalLayer(temporal_layer) *
+ (1000.0 / interval_ms());
+ }
+
+ float BitrateKbpsWithinTemporalLayer(int temporal_layer) {
+ return SumPayloadBytesWithinTemporalLayer(temporal_layer) * 8.0 /
+ interval_ms();
+ }
+
+ Vp8StreamInfo CalculateVp8StreamInfo() {
+ Vp8StreamInfo info;
+ for (int tl = 0; tl < 3; ++tl) {
+ info.framerate_fps[tl] = FramerateFpsWithinTemporalLayer(tl);
+ info.bitrate_kbps[tl] = BitrateKbpsWithinTemporalLayer(tl);
+ }
+ return info;
+ }
+
+ private:
+ struct FrameData {
+ FrameData() {}
+
+ FrameData(size_t payload_size, const RTPVideoHeader& rtp_video_header)
+ : payload_size(payload_size), rtp_video_header(rtp_video_header) {}
+
+ size_t payload_size;
+ RTPVideoHeader rtp_video_header;
+ };
+
+ int64_t interval_ms() {
+ int64_t diff = (clock_->TimeInMilliseconds() - start_time_ms_);
+ EXPECT_GT(diff, 0);
+ return diff;
+ }
+
+ int CountFramesWithinTemporalLayer(int temporal_layer) {
+ int frames = 0;
+ for (size_t i = 0; i < frame_data_.size(); ++i) {
+ EXPECT_EQ(kRtpVideoVp8, frame_data_[i].rtp_video_header.codec);
+ const uint8_t temporal_idx =
+ frame_data_[i].rtp_video_header.codecHeader.VP8.temporalIdx;
+ if (temporal_idx <= temporal_layer || temporal_idx == kNoTemporalIdx)
+ frames++;
+ }
+ return frames;
+ }
+
+ size_t SumPayloadBytesWithinTemporalLayer(int temporal_layer) {
+ size_t payload_size = 0;
+ for (size_t i = 0; i < frame_data_.size(); ++i) {
+ EXPECT_EQ(kRtpVideoVp8, frame_data_[i].rtp_video_header.codec);
+ const uint8_t temporal_idx =
+ frame_data_[i].rtp_video_header.codecHeader.VP8.temporalIdx;
+ if (temporal_idx <= temporal_layer || temporal_idx == kNoTemporalIdx)
+ payload_size += frame_data_[i].payload_size;
+ }
+ return payload_size;
+ }
+
+ Clock* clock_;
+ int64_t start_time_ms_;
+ vector<FrameData> frame_data_;
+};
+
+class TestVideoSender : public ::testing::Test {
+ protected:
+ // Note: simulated clock starts at 1 seconds, since parts of webrtc use 0 as
+ // a special case (e.g. frame rate in media optimization).
+ TestVideoSender() : clock_(1000), packetization_callback_(&clock_) {}
+
+ void SetUp() override {
+ sender_.reset(
+ new VideoSender(&clock_, &post_encode_callback_, nullptr, nullptr));
+ EXPECT_EQ(0, sender_->RegisterTransportCallback(&packetization_callback_));
+ }
+
+ void AddFrame() {
+ assert(generator_.get());
+ sender_->AddVideoFrame(*generator_->NextFrame(), NULL, NULL);
+ }
+
+ SimulatedClock clock_;
+ PacketizationCallback packetization_callback_;
+ MockEncodedImageCallback post_encode_callback_;
+ // Used by subclassing tests, need to outlive sender_.
+ rtc::scoped_ptr<VideoEncoder> encoder_;
+ rtc::scoped_ptr<VideoSender> sender_;
+ rtc::scoped_ptr<FrameGenerator> generator_;
+};
+
+class TestVideoSenderWithMockEncoder : public TestVideoSender {
+ protected:
+ static const int kDefaultWidth = 1280;
+ static const int kDefaultHeight = 720;
+ static const int kNumberOfStreams = 3;
+ static const int kNumberOfLayers = 3;
+ static const int kUnusedPayloadType = 10;
+
+ void SetUp() override {
+ TestVideoSender::SetUp();
+ EXPECT_EQ(
+ 0,
+ sender_->RegisterExternalEncoder(&encoder_, kUnusedPayloadType, false));
+ memset(&settings_, 0, sizeof(settings_));
+ EXPECT_EQ(0, VideoCodingModule::Codec(kVideoCodecVP8, &settings_));
+ settings_.numberOfSimulcastStreams = kNumberOfStreams;
+ ConfigureStream(kDefaultWidth / 4,
+ kDefaultHeight / 4,
+ 100,
+ &settings_.simulcastStream[0]);
+ ConfigureStream(kDefaultWidth / 2,
+ kDefaultHeight / 2,
+ 500,
+ &settings_.simulcastStream[1]);
+ ConfigureStream(
+ kDefaultWidth, kDefaultHeight, 1200, &settings_.simulcastStream[2]);
+ settings_.plType = kUnusedPayloadType; // Use the mocked encoder.
+ generator_.reset(
+ new EmptyFrameGenerator(settings_.width, settings_.height));
+ EXPECT_EQ(0, sender_->RegisterSendCodec(&settings_, 1, 1200));
+ }
+
+ void TearDown() override { sender_.reset(); }
+
+ void ExpectIntraRequest(int stream) {
+ if (stream == -1) {
+ // No intra request expected.
+ EXPECT_CALL(
+ encoder_,
+ Encode(_, _, Pointee(ElementsAre(kVideoFrameDelta, kVideoFrameDelta,
+ kVideoFrameDelta))))
+ .Times(1)
+ .WillRepeatedly(Return(0));
+ return;
+ }
+ assert(stream >= 0);
+ assert(stream < kNumberOfStreams);
+ std::vector<FrameType> frame_types(kNumberOfStreams, kVideoFrameDelta);
+ frame_types[stream] = kVideoFrameKey;
+ EXPECT_CALL(
+ encoder_,
+ Encode(_,
+ _,
+ Pointee(ElementsAreArray(&frame_types[0], frame_types.size()))))
+ .Times(1).WillRepeatedly(Return(0));
+ }
+
+ static void ConfigureStream(int width,
+ int height,
+ int max_bitrate,
+ SimulcastStream* stream) {
+ assert(stream);
+ stream->width = width;
+ stream->height = height;
+ stream->maxBitrate = max_bitrate;
+ stream->numberOfTemporalLayers = kNumberOfLayers;
+ stream->qpMax = 45;
+ }
+
+ VideoCodec settings_;
+ NiceMock<MockVideoEncoder> encoder_;
+};
+
+TEST_F(TestVideoSenderWithMockEncoder, TestIntraRequests) {
+ EXPECT_EQ(0, sender_->IntraFrameRequest(0));
+ ExpectIntraRequest(0);
+ AddFrame();
+ ExpectIntraRequest(-1);
+ AddFrame();
+
+ EXPECT_EQ(0, sender_->IntraFrameRequest(1));
+ ExpectIntraRequest(1);
+ AddFrame();
+ ExpectIntraRequest(-1);
+ AddFrame();
+
+ EXPECT_EQ(0, sender_->IntraFrameRequest(2));
+ ExpectIntraRequest(2);
+ AddFrame();
+ ExpectIntraRequest(-1);
+ AddFrame();
+
+ EXPECT_EQ(-1, sender_->IntraFrameRequest(3));
+ ExpectIntraRequest(-1);
+ AddFrame();
+
+ EXPECT_EQ(-1, sender_->IntraFrameRequest(-1));
+ ExpectIntraRequest(-1);
+ AddFrame();
+}
+
+TEST_F(TestVideoSenderWithMockEncoder, TestIntraRequestsInternalCapture) {
+ // De-register current external encoder.
+ EXPECT_EQ(0,
+ sender_->RegisterExternalEncoder(NULL, kUnusedPayloadType, false));
+ // Register encoder with internal capture.
+ EXPECT_EQ(
+ 0, sender_->RegisterExternalEncoder(&encoder_, kUnusedPayloadType, true));
+ EXPECT_EQ(0, sender_->RegisterSendCodec(&settings_, 1, 1200));
+ ExpectIntraRequest(0);
+ EXPECT_EQ(0, sender_->IntraFrameRequest(0));
+ ExpectIntraRequest(1);
+ EXPECT_EQ(0, sender_->IntraFrameRequest(1));
+ ExpectIntraRequest(2);
+ EXPECT_EQ(0, sender_->IntraFrameRequest(2));
+ // No requests expected since these indices are out of bounds.
+ EXPECT_EQ(-1, sender_->IntraFrameRequest(3));
+ EXPECT_EQ(-1, sender_->IntraFrameRequest(-1));
+}
+
+TEST_F(TestVideoSenderWithMockEncoder, EncoderFramerateUpdatedViaProcess) {
+ sender_->SetChannelParameters(settings_.startBitrate * 1000, 0, 200);
+ const int64_t kRateStatsWindowMs = 2000;
+ const uint32_t kInputFps = 20;
+ int64_t start_time = clock_.TimeInMilliseconds();
+ while (clock_.TimeInMilliseconds() < start_time + kRateStatsWindowMs) {
+ AddFrame();
+ clock_.AdvanceTimeMilliseconds(1000 / kInputFps);
+ }
+ EXPECT_CALL(encoder_, SetRates(_, kInputFps)).Times(1).WillOnce(Return(0));
+ sender_->Process();
+ AddFrame();
+}
+
+TEST_F(TestVideoSenderWithMockEncoder,
+ NoRedundantSetChannelParameterOrSetRatesCalls) {
+ const uint8_t kLossRate = 4;
+ const uint8_t kRtt = 200;
+ const int64_t kRateStatsWindowMs = 2000;
+ const uint32_t kInputFps = 20;
+ int64_t start_time = clock_.TimeInMilliseconds();
+ // Expect initial call to SetChannelParameters. Rates are initialized through
+ // InitEncode and expects no additional call before the framerate (or bitrate)
+ // updates.
+ EXPECT_CALL(encoder_, SetChannelParameters(kLossRate, kRtt))
+ .Times(1)
+ .WillOnce(Return(0));
+ sender_->SetChannelParameters(settings_.startBitrate * 1000, kLossRate, kRtt);
+ while (clock_.TimeInMilliseconds() < start_time + kRateStatsWindowMs) {
+ AddFrame();
+ clock_.AdvanceTimeMilliseconds(1000 / kInputFps);
+ }
+ // After process, input framerate should be updated but not ChannelParameters
+ // as they are the same as before.
+ EXPECT_CALL(encoder_, SetRates(_, kInputFps)).Times(1).WillOnce(Return(0));
+ sender_->Process();
+ AddFrame();
+ // Call to SetChannelParameters with changed bitrate should call encoder
+ // SetRates but not encoder SetChannelParameters (that are unchanged).
+ EXPECT_CALL(encoder_, SetRates(2 * settings_.startBitrate, kInputFps))
+ .Times(1)
+ .WillOnce(Return(0));
+ sender_->SetChannelParameters(2 * settings_.startBitrate * 1000, kLossRate,
+ kRtt);
+ AddFrame();
+}
+
+class TestVideoSenderWithVp8 : public TestVideoSender {
+ public:
+ TestVideoSenderWithVp8()
+ : codec_bitrate_kbps_(300), available_bitrate_kbps_(1000) {}
+
+ void SetUp() override {
+ TestVideoSender::SetUp();
+
+ const char* input_video = "foreman_cif";
+ const int width = 352;
+ const int height = 288;
+ generator_.reset(FrameGenerator::CreateFromYuvFile(
+ std::vector<std::string>(1, test::ResourcePath(input_video, "yuv")),
+ width, height, 1));
+
+ codec_ = MakeVp8VideoCodec(width, height, 3);
+ codec_.minBitrate = 10;
+ codec_.startBitrate = codec_bitrate_kbps_;
+ codec_.maxBitrate = codec_bitrate_kbps_;
+ encoder_.reset(VP8Encoder::Create());
+ ASSERT_EQ(0, sender_->RegisterExternalEncoder(encoder_.get(), codec_.plType,
+ false));
+ EXPECT_EQ(0, sender_->RegisterSendCodec(&codec_, 1, 1200));
+ }
+
+ static VideoCodec MakeVp8VideoCodec(int width,
+ int height,
+ int temporal_layers) {
+ VideoCodec codec;
+ memset(&codec, 0, sizeof(codec));
+ EXPECT_EQ(0, VideoCodingModule::Codec(kVideoCodecVP8, &codec));
+ codec.width = width;
+ codec.height = height;
+ codec.codecSpecific.VP8.numberOfTemporalLayers = temporal_layers;
+ return codec;
+ }
+
+ void InsertFrames(float framerate, float seconds) {
+ for (int i = 0; i < seconds * framerate; ++i) {
+ clock_.AdvanceTimeMilliseconds(1000.0f / framerate);
+ EXPECT_CALL(post_encode_callback_, Encoded(_, NULL, NULL))
+ .WillOnce(Return(0));
+ AddFrame();
+ // SetChannelParameters needs to be called frequently to propagate
+ // framerate from the media optimization into the encoder.
+ // Note: SetChannelParameters fails if less than 2 frames are in the
+ // buffer since it will fail to calculate the framerate.
+ if (i != 0) {
+ EXPECT_EQ(VCM_OK, sender_->SetChannelParameters(
+ available_bitrate_kbps_ * 1000, 0, 200));
+ }
+ }
+ }
+
+ Vp8StreamInfo SimulateWithFramerate(float framerate) {
+ const float short_simulation_interval = 5.0;
+ const float long_simulation_interval = 10.0;
+ // It appears that this 5 seconds simulation is needed to allow
+ // bitrate and framerate to stabilize.
+ InsertFrames(framerate, short_simulation_interval);
+ packetization_callback_.Reset();
+
+ InsertFrames(framerate, long_simulation_interval);
+ return packetization_callback_.CalculateVp8StreamInfo();
+ }
+
+ protected:
+ VideoCodec codec_;
+ int codec_bitrate_kbps_;
+ int available_bitrate_kbps_;
+};
+
+TEST_F(TestVideoSenderWithVp8,
+ DISABLED_ON_IOS(DISABLED_ON_ANDROID(FixedTemporalLayersStrategy))) {
+ const int low_b = codec_bitrate_kbps_ * kVp8LayerRateAlloction[2][0];
+ const int mid_b = codec_bitrate_kbps_ * kVp8LayerRateAlloction[2][1];
+ const int high_b = codec_bitrate_kbps_ * kVp8LayerRateAlloction[2][2];
+ {
+ Vp8StreamInfo expected = {{7.5, 15.0, 30.0}, {low_b, mid_b, high_b}};
+ EXPECT_THAT(SimulateWithFramerate(30.0), MatchesVp8StreamInfo(expected));
+ }
+ {
+ Vp8StreamInfo expected = {{3.75, 7.5, 15.0}, {low_b, mid_b, high_b}};
+ EXPECT_THAT(SimulateWithFramerate(15.0), MatchesVp8StreamInfo(expected));
+ }
+}
+
+TEST_F(TestVideoSenderWithVp8,
+ DISABLED_ON_IOS(DISABLED_ON_ANDROID(RealTimeTemporalLayersStrategy))) {
+ Config extra_options;
+ extra_options.Set<TemporalLayers::Factory>(
+ new RealTimeTemporalLayersFactory());
+ VideoCodec codec = MakeVp8VideoCodec(352, 288, 3);
+ codec.extra_options = &extra_options;
+ codec.minBitrate = 10;
+ codec.startBitrate = codec_bitrate_kbps_;
+ codec.maxBitrate = codec_bitrate_kbps_;
+ EXPECT_EQ(0, sender_->RegisterSendCodec(&codec, 1, 1200));
+
+ const int low_b = codec_bitrate_kbps_ * 0.4;
+ const int mid_b = codec_bitrate_kbps_ * 0.6;
+ const int high_b = codec_bitrate_kbps_;
+
+ {
+ Vp8StreamInfo expected = {{7.5, 15.0, 30.0}, {low_b, mid_b, high_b}};
+ EXPECT_THAT(SimulateWithFramerate(30.0), MatchesVp8StreamInfo(expected));
+ }
+ {
+ Vp8StreamInfo expected = {{5.0, 10.0, 20.0}, {low_b, mid_b, high_b}};
+ EXPECT_THAT(SimulateWithFramerate(20.0), MatchesVp8StreamInfo(expected));
+ }
+ {
+ Vp8StreamInfo expected = {{7.5, 15.0, 15.0}, {mid_b, high_b, high_b}};
+ EXPECT_THAT(SimulateWithFramerate(15.0), MatchesVp8StreamInfo(expected));
+ }
+ {
+ Vp8StreamInfo expected = {{5.0, 10.0, 10.0}, {mid_b, high_b, high_b}};
+ EXPECT_THAT(SimulateWithFramerate(10.0), MatchesVp8StreamInfo(expected));
+ }
+ {
+ // TODO(andresp): Find out why this fails with framerate = 7.5
+ Vp8StreamInfo expected = {{7.0, 7.0, 7.0}, {high_b, high_b, high_b}};
+ EXPECT_THAT(SimulateWithFramerate(7.0), MatchesVp8StreamInfo(expected));
+ }
+}
+} // namespace
+} // namespace vcm
+} // namespace webrtc
diff --git a/webrtc/modules/video_coding/main/test/plotJitterEstimate.m b/webrtc/modules/video_coding/main/test/plotJitterEstimate.m
new file mode 100644
index 0000000000..d6185f55da
--- /dev/null
+++ b/webrtc/modules/video_coding/main/test/plotJitterEstimate.m
@@ -0,0 +1,52 @@
+function plotJitterEstimate(filename)
+
+[timestamps, framedata, slopes, randJitters, framestats, timetable, filtjitter, rtt, rttStatsVec] = jitterBufferTraceParser(filename);
+
+x = 1:size(framestats, 1);
+%figure(2);
+subfigure(3, 2, 1);
+hold on;
+plot(x, slopes(x, 1).*(framestats(x, 1) - framestats(x, 2)) + 3*sqrt(randJitters(x,2)), 'b'); title('Estimate ms');
+plot(x, filtjitter, 'r');
+plot(x, slopes(x, 1).*(framestats(x, 1) - framestats(x, 2)), 'g');
+subfigure(3, 2, 2);
+%subplot(211);
+plot(x, slopes(x, 1)); title('Line slope');
+%subplot(212);
+%plot(x, slopes(x, 2)); title('Line offset');
+subfigure(3, 2, 3); hold on;
+plot(x, framestats); plot(x, framedata(x, 1)); title('frame size and average frame size');
+subfigure(3, 2, 4);
+plot(x, framedata(x, 2)); title('Delay');
+subfigure(3, 2, 5);
+hold on;
+plot(x, randJitters(x,1),'r');
+plot(x, randJitters(x,2)); title('Random jitter');
+
+subfigure(3, 2, 6);
+delays = framedata(:,2);
+dL = [0; framedata(2:end, 1) - framedata(1:end-1, 1)];
+hold on;
+plot(dL, delays, '.');
+s = [min(dL) max(dL)];
+plot(s, slopes(end, 1)*s + slopes(end, 2), 'g');
+plot(s, slopes(end, 1)*s + slopes(end, 2) + 3*sqrt(randJitters(end,2)), 'r');
+plot(s, slopes(end, 1)*s + slopes(end, 2) - 3*sqrt(randJitters(end,2)), 'r');
+title('theta(1)*x+theta(2), (dT-dTS)/dL');
+if sum(size(rttStatsVec)) > 0
+ figure; hold on;
+ rttNstdDevsDrift = 3.5;
+ rttNstdDevsJump = 2.5;
+ rttSamples = rttStatsVec(:, 1);
+ rttAvgs = rttStatsVec(:, 2);
+ rttStdDevs = sqrt(rttStatsVec(:, 3));
+ rttMax = rttStatsVec(:, 4);
+ plot(rttSamples, 'ko-');
+ plot(rttAvgs, 'g');
+ plot(rttAvgs + rttNstdDevsDrift*rttStdDevs, 'b--');
+ plot(rttAvgs + rttNstdDevsJump*rttStdDevs, 'b');
+ plot(rttAvgs - rttNstdDevsJump*rttStdDevs, 'b');
+ plot(rttMax, 'r');
+ %plot(driftRestarts*max(maxRtts), '.');
+ %plot(jumpRestarts*max(maxRtts), '.');
+end \ No newline at end of file
diff --git a/webrtc/modules/video_coding/main/test/plotReceiveTrace.m b/webrtc/modules/video_coding/main/test/plotReceiveTrace.m
new file mode 100644
index 0000000000..4d262aa165
--- /dev/null
+++ b/webrtc/modules/video_coding/main/test/plotReceiveTrace.m
@@ -0,0 +1,213 @@
+function [t, TS] = plotReceiveTrace(filename, flat)
+fid=fopen(filename);
+%DEBUG ; ( 8:32:33:375 | 0) VIDEO:1 ; 5260; First packet of frame 1869537938
+%DEBUG ; ( 8:32:33:375 | 0) VIDEO CODING:1 ; 5260; Decoding timestamp 1869534934
+%DEBUG ; ( 8:32:33:375 | 0) VIDEO:1 ; 5260; Render frame 1869534934 at 20772610
+%DEBUG ; ( 8:32:33:375 | 0) VIDEO CODING:-1 ; 5260; Frame decoded: timeStamp=1870511259 decTime=0 maxDecTime=0, at 19965
+%DEBUG ; ( 7:59:42:500 | 0) VIDEO:-1 ; 2500; Received complete frame timestamp 1870514263 frame type 1 frame size 7862 at time 19965, jitter estimate was 130
+%DEBUG ; ( 8: 5:51:774 | 0) VIDEO:-1 ; 3968; ExtrapolateLocalTime(1870967878)=24971 ms
+
+if nargin == 1
+ flat = 0;
+end
+line = fgetl(fid);
+estimatedArrivalTime = [];
+packetTime = [];
+firstPacketTime = [];
+decodeTime = [];
+decodeCompleteTime = [];
+renderTime = [];
+completeTime = [];
+while ischar(line)%line ~= -1
+ if length(line) == 0
+ line = fgetl(fid);
+ continue;
+ end
+ % Parse the trace line header
+ [tempres, count] = sscanf(line, 'DEBUG ; (%u:%u:%u:%u |%*lu)%13c:');
+ if count < 5
+ line = fgetl(fid);
+ continue;
+ end
+ hr=tempres(1);
+ mn=tempres(2);
+ sec=tempres(3);
+ ms=tempres(4);
+ timeInMs=hr*60*60*1000 + mn*60*1000 + sec*1000 + ms;
+ label = tempres(5:end);
+ I = find(label ~= 32);
+ label = label(I(1):end); % remove white spaces
+ if ~strncmp(char(label), 'VIDEO', 5) & ~strncmp(char(label), 'VIDEO CODING', 12)
+ line = fgetl(fid);
+ continue;
+ end
+ message = line(72:end);
+
+ % Parse message
+ [p, count] = sscanf(message, 'ExtrapolateLocalTime(%lu)=%lu ms');
+ if count == 2
+ estimatedArrivalTime = [estimatedArrivalTime; p'];
+ line = fgetl(fid);
+ continue;
+ end
+
+ [p, count] = sscanf(message, 'Packet seqNo %u of frame %lu at %lu');
+ if count == 3
+ packetTime = [packetTime; p'];
+ line = fgetl(fid);
+ continue;
+ end
+
+ [p, count] = sscanf(message, 'First packet of frame %lu at %lu');
+ if count == 2
+ firstPacketTime = [firstPacketTime; p'];
+ line = fgetl(fid);
+ continue;
+ end
+
+ [p, count] = sscanf(message, 'Decoding timestamp %lu at %lu');
+ if count == 2
+ decodeTime = [decodeTime; p'];
+ line = fgetl(fid);
+ continue;
+ end
+
+ [p, count] = sscanf(message, 'Render frame %lu at %lu. Render delay %lu, required delay %lu, max decode time %lu, min total delay %lu');
+ if count == 6
+ renderTime = [renderTime; p'];
+ line = fgetl(fid);
+ continue;
+ end
+
+ [p, count] = sscanf(message, 'Frame decoded: timeStamp=%lu decTime=%d maxDecTime=%lu, at %lu');
+ if count == 4
+ decodeCompleteTime = [decodeCompleteTime; p'];
+ line = fgetl(fid);
+ continue;
+ end
+
+ [p, count] = sscanf(message, 'Received complete frame timestamp %lu frame type %u frame size %*u at time %lu, jitter estimate was %lu');
+ if count == 4
+ completeTime = [completeTime; p'];
+ line = fgetl(fid);
+ continue;
+ end
+
+ line = fgetl(fid);
+end
+fclose(fid);
+
+t = completeTime(:,3);
+TS = completeTime(:,1);
+
+figure;
+subplot(211);
+hold on;
+slope = 0;
+
+if sum(size(packetTime)) > 0
+ % Plot the time when each packet arrives
+ firstTimeStamp = packetTime(1,2);
+ x = (packetTime(:,2) - firstTimeStamp)/90;
+ if flat
+ slope = x;
+ end
+ firstTime = packetTime(1,3);
+ plot(x, packetTime(:,3) - firstTime - slope, 'b.');
+else
+ % Plot the time when the first packet of a frame arrives
+ firstTimeStamp = firstPacketTime(1,1);
+ x = (firstPacketTime(:,1) - firstTimeStamp)/90;
+ if flat
+ slope = x;
+ end
+ firstTime = firstPacketTime(1,2);
+ plot(x, firstPacketTime(:,2) - firstTime - slope, 'b.');
+end
+
+% Plot the frame complete time
+if prod(size(completeTime)) > 0
+ x = (completeTime(:,1) - firstTimeStamp)/90;
+ if flat
+ slope = x;
+ end
+ plot(x, completeTime(:,3) - firstTime - slope, 'ks');
+end
+
+% Plot the time the decode starts
+if prod(size(decodeTime)) > 0
+ x = (decodeTime(:,1) - firstTimeStamp)/90;
+ if flat
+ slope = x;
+ end
+ plot(x, decodeTime(:,2) - firstTime - slope, 'r.');
+end
+
+% Plot the decode complete time
+if prod(size(decodeCompleteTime)) > 0
+ x = (decodeCompleteTime(:,1) - firstTimeStamp)/90;
+ if flat
+ slope = x;
+ end
+ plot(x, decodeCompleteTime(:,4) - firstTime - slope, 'g.');
+end
+
+if prod(size(renderTime)) > 0
+ % Plot the wanted render time in ms
+ x = (renderTime(:,1) - firstTimeStamp)/90;
+ if flat
+ slope = x;
+ end
+ plot(x, renderTime(:,2) - firstTime - slope, 'c-');
+
+ % Plot the render time if there were no render delay or decoding delay.
+ x = (renderTime(:,1) - firstTimeStamp)/90;
+ if flat
+ slope = x;
+ end
+ plot(x, renderTime(:,2) - firstTime - slope - renderTime(:, 3) - renderTime(:, 5), 'c--');
+
+ % Plot the render time if there were no render delay.
+ x = (renderTime(:,1) - firstTimeStamp)/90;
+ if flat
+ slope = x;
+ end
+ plot(x, renderTime(:,2) - firstTime - slope - renderTime(:, 3) - renderTime(:, 5), 'b-');
+end
+
+%plot(x, 90*x, 'r-');
+
+xlabel('RTP timestamp (in ms)');
+ylabel('Time (ms)');
+legend('Packet arrives', 'Frame complete', 'Decode', 'Decode complete', 'Time to render', 'Only jitter', 'Must decode');
+
+% subplot(312);
+% hold on;
+% completeTs = completeTime(:, 1);
+% arrivalTs = estimatedArrivalTime(:, 1);
+% [c, completeIdx, arrivalIdx] = intersect(completeTs, arrivalTs);
+% %plot(completeTs(completeIdx), completeTime(completeIdx, 3) - estimatedArrivalTime(arrivalIdx, 2));
+% timeUntilComplete = completeTime(completeIdx, 3) - estimatedArrivalTime(arrivalIdx, 2);
+% devFromAvgCompleteTime = timeUntilComplete - mean(timeUntilComplete);
+% plot(completeTs(completeIdx) - completeTs(completeIdx(1)), devFromAvgCompleteTime);
+% plot(completeTime(:, 1) - completeTime(1, 1), completeTime(:, 4), 'r');
+% plot(decodeCompleteTime(:, 1) - decodeCompleteTime(1, 1), decodeCompleteTime(:, 2), 'g');
+% plot(decodeCompleteTime(:, 1) - decodeCompleteTime(1, 1), decodeCompleteTime(:, 3), 'k');
+% xlabel('RTP timestamp');
+% ylabel('Time (ms)');
+% legend('Complete time - Estimated arrival time', 'Desired jitter buffer level', 'Actual decode time', 'Max decode time', 0);
+
+if prod(size(renderTime)) > 0
+ subplot(212);
+ hold on;
+ firstTime = renderTime(1, 1);
+ targetDelay = max(renderTime(:, 3) + renderTime(:, 4) + renderTime(:, 5), renderTime(:, 6));
+ plot(renderTime(:, 1) - firstTime, renderTime(:, 3), 'r-');
+ plot(renderTime(:, 1) - firstTime, renderTime(:, 4), 'b-');
+ plot(renderTime(:, 1) - firstTime, renderTime(:, 5), 'g-');
+ plot(renderTime(:, 1) - firstTime, renderTime(:, 6), 'k-');
+ plot(renderTime(:, 1) - firstTime, targetDelay, 'c-');
+ xlabel('RTP timestamp');
+ ylabel('Time (ms)');
+ legend('Render delay', 'Jitter delay', 'Decode delay', 'Extra delay', 'Min total delay');
+end \ No newline at end of file
diff --git a/webrtc/modules/video_coding/main/test/plotTimingTest.m b/webrtc/modules/video_coding/main/test/plotTimingTest.m
new file mode 100644
index 0000000000..52a6f303cd
--- /dev/null
+++ b/webrtc/modules/video_coding/main/test/plotTimingTest.m
@@ -0,0 +1,62 @@
+function plotTimingTest(filename)
+fid=fopen(filename);
+
+%DEBUG ; ( 9:53:33:859 | 0) VIDEO:-1 ; 7132; Stochastic test 1
+%DEBUG ; ( 9:53:33:859 | 0) VIDEO CODING:-1 ; 7132; Frame decoded: timeStamp=3000 decTime=10 at 10012
+%DEBUG ; ( 9:53:33:859 | 0) VIDEO:-1 ; 7132; timeStamp=3000 clock=10037 maxWaitTime=0
+%DEBUG ; ( 9:53:33:859 | 0) VIDEO:-1 ; 7132; timeStampMs=33 renderTime=54
+line = fgetl(fid);
+decTime = [];
+waitTime = [];
+renderTime = [];
+foundStart = 0;
+testName = 'Stochastic test 1';
+while ischar(line)
+ if length(line) == 0
+ line = fgetl(fid);
+ continue;
+ end
+ lineOrig = line;
+ line = line(72:end);
+ if ~foundStart
+ if strncmp(line, testName, length(testName))
+ foundStart = 1;
+ end
+ line = fgetl(fid);
+ continue;
+ end
+ [p, count] = sscanf(line, 'Frame decoded: timeStamp=%lu decTime=%d maxDecTime=%d, at %lu');
+ if count == 4
+ decTime = [decTime; p'];
+ line = fgetl(fid);
+ continue;
+ end
+ [p, count] = sscanf(line, 'timeStamp=%u clock=%u maxWaitTime=%u');
+ if count == 3
+ waitTime = [waitTime; p'];
+ line = fgetl(fid);
+ continue;
+ end
+ [p, count] = sscanf(line, 'timeStamp=%u renderTime=%u');
+ if count == 2
+ renderTime = [renderTime; p'];
+ line = fgetl(fid);
+ continue;
+ end
+ line = fgetl(fid);
+end
+fclose(fid);
+
+% Compensate for wrap arounds and start counting from zero.
+timeStamps = waitTime(:, 1);
+tsDiff = diff(timeStamps);
+wrapIdx = find(tsDiff < 0);
+timeStamps(wrapIdx+1:end) = hex2dec('ffffffff') + timeStamps(wrapIdx+1:end);
+timeStamps = timeStamps - timeStamps(1);
+
+figure;
+hold on;
+plot(timeStamps, decTime(:, 2), 'r');
+plot(timeStamps, waitTime(:, 3), 'g');
+plot(timeStamps(2:end), diff(renderTime(:, 2)), 'b');
+legend('Decode time', 'Max wait time', 'Render time diff'); \ No newline at end of file
diff --git a/webrtc/modules/video_coding/main/test/receiver_tests.h b/webrtc/modules/video_coding/main/test/receiver_tests.h
new file mode 100644
index 0000000000..6d7b7beeb5
--- /dev/null
+++ b/webrtc/modules/video_coding/main/test/receiver_tests.h
@@ -0,0 +1,43 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef WEBRTC_MODULES_VIDEO_CODING_TEST_RECEIVER_TESTS_H_
+#define WEBRTC_MODULES_VIDEO_CODING_TEST_RECEIVER_TESTS_H_
+
+#include "webrtc/common_types.h"
+#include "webrtc/modules/interface/module_common_types.h"
+#include "webrtc/modules/rtp_rtcp/interface/rtp_rtcp.h"
+#include "webrtc/modules/video_coding/main/interface/video_coding.h"
+#include "webrtc/modules/video_coding/main/test/test_util.h"
+#include "webrtc/modules/video_coding/main/test/video_source.h"
+#include "webrtc/typedefs.h"
+
+#include <stdio.h>
+#include <string>
+
+class RtpDataCallback : public webrtc::NullRtpData {
+ public:
+ RtpDataCallback(webrtc::VideoCodingModule* vcm) : vcm_(vcm) {}
+ virtual ~RtpDataCallback() {}
+
+ int32_t OnReceivedPayloadData(
+ const uint8_t* payload_data,
+ const size_t payload_size,
+ const webrtc::WebRtcRTPHeader* rtp_header) override {
+ return vcm_->IncomingPacket(payload_data, payload_size, *rtp_header);
+ }
+
+ private:
+ webrtc::VideoCodingModule* vcm_;
+};
+
+int RtpPlay(const CmdArgs& args);
+
+#endif // WEBRTC_MODULES_VIDEO_CODING_TEST_RECEIVER_TESTS_H_
diff --git a/webrtc/modules/video_coding/main/test/release_test.h b/webrtc/modules/video_coding/main/test/release_test.h
new file mode 100644
index 0000000000..e90dcaef01
--- /dev/null
+++ b/webrtc/modules/video_coding/main/test/release_test.h
@@ -0,0 +1,17 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef RELEASE_TEST_H
+#define RELEASE_TEST_H
+
+int ReleaseTest();
+int ReleaseTestPart2();
+
+#endif
diff --git a/webrtc/modules/video_coding/main/test/rtp_player.cc b/webrtc/modules/video_coding/main/test/rtp_player.cc
new file mode 100644
index 0000000000..6717cf227d
--- /dev/null
+++ b/webrtc/modules/video_coding/main/test/rtp_player.cc
@@ -0,0 +1,493 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "webrtc/modules/video_coding/main/test/rtp_player.h"
+
+#include <stdio.h>
+
+#include <map>
+
+#include "webrtc/base/scoped_ptr.h"
+#include "webrtc/modules/rtp_rtcp/interface/rtp_header_parser.h"
+#include "webrtc/modules/rtp_rtcp/interface/rtp_payload_registry.h"
+#include "webrtc/modules/rtp_rtcp/interface/rtp_receiver.h"
+#include "webrtc/modules/rtp_rtcp/interface/rtp_rtcp.h"
+#include "webrtc/modules/video_coding/main/source/internal_defines.h"
+#include "webrtc/modules/video_coding/main/test/test_util.h"
+#include "webrtc/system_wrappers/include/clock.h"
+#include "webrtc/system_wrappers/include/critical_section_wrapper.h"
+#include "webrtc/test/rtp_file_reader.h"
+
+#if 1
+# define DEBUG_LOG1(text, arg)
+#else
+# define DEBUG_LOG1(text, arg) (printf(text "\n", arg))
+#endif
+
+namespace webrtc {
+namespace rtpplayer {
+
+enum {
+ kMaxPacketBufferSize = 4096,
+ kDefaultTransmissionTimeOffsetExtensionId = 2
+};
+
+class RawRtpPacket {
+ public:
+ RawRtpPacket(const uint8_t* data, size_t length, uint32_t ssrc,
+ uint16_t seq_num)
+ : data_(new uint8_t[length]),
+ length_(length),
+ resend_time_ms_(-1),
+ ssrc_(ssrc),
+ seq_num_(seq_num) {
+ assert(data);
+ memcpy(data_.get(), data, length_);
+ }
+
+ const uint8_t* data() const { return data_.get(); }
+ size_t length() const { return length_; }
+ int64_t resend_time_ms() const { return resend_time_ms_; }
+ void set_resend_time_ms(int64_t timeMs) { resend_time_ms_ = timeMs; }
+ uint32_t ssrc() const { return ssrc_; }
+ uint16_t seq_num() const { return seq_num_; }
+
+ private:
+ rtc::scoped_ptr<uint8_t[]> data_;
+ size_t length_;
+ int64_t resend_time_ms_;
+ uint32_t ssrc_;
+ uint16_t seq_num_;
+
+ RTC_DISALLOW_IMPLICIT_CONSTRUCTORS(RawRtpPacket);
+};
+
+class LostPackets {
+ public:
+ LostPackets(Clock* clock, int64_t rtt_ms)
+ : crit_sect_(CriticalSectionWrapper::CreateCriticalSection()),
+ debug_file_(fopen("PacketLossDebug.txt", "w")),
+ loss_count_(0),
+ packets_(),
+ clock_(clock),
+ rtt_ms_(rtt_ms) {
+ assert(clock);
+ }
+
+ ~LostPackets() {
+ if (debug_file_) {
+ fclose(debug_file_);
+ debug_file_ = NULL;
+ }
+ while (!packets_.empty()) {
+ delete packets_.back();
+ packets_.pop_back();
+ }
+ }
+
+ void AddPacket(RawRtpPacket* packet) {
+ assert(packet);
+ printf("Throw: %08x:%u\n", packet->ssrc(), packet->seq_num());
+ CriticalSectionScoped cs(crit_sect_.get());
+ if (debug_file_) {
+ fprintf(debug_file_, "%u Lost packet: %u\n", loss_count_,
+ packet->seq_num());
+ }
+ packets_.push_back(packet);
+ loss_count_++;
+ }
+
+ void SetResendTime(uint32_t ssrc, int16_t resendSeqNum) {
+ int64_t resend_time_ms = clock_->TimeInMilliseconds() + rtt_ms_;
+ int64_t now_ms = clock_->TimeInMilliseconds();
+ CriticalSectionScoped cs(crit_sect_.get());
+ for (RtpPacketIterator it = packets_.begin(); it != packets_.end(); ++it) {
+ RawRtpPacket* packet = *it;
+ if (ssrc == packet->ssrc() && resendSeqNum == packet->seq_num() &&
+ packet->resend_time_ms() + 10 < now_ms) {
+ if (debug_file_) {
+ fprintf(debug_file_, "Resend %u at %u\n", packet->seq_num(),
+ MaskWord64ToUWord32(resend_time_ms));
+ }
+ packet->set_resend_time_ms(resend_time_ms);
+ return;
+ }
+ }
+ // We may get here since the captured stream may itself be missing packets.
+ }
+
+ RawRtpPacket* NextPacketToResend(int64_t time_now) {
+ CriticalSectionScoped cs(crit_sect_.get());
+ for (RtpPacketIterator it = packets_.begin(); it != packets_.end(); ++it) {
+ RawRtpPacket* packet = *it;
+ if (time_now >= packet->resend_time_ms() &&
+ packet->resend_time_ms() != -1) {
+ packets_.erase(it);
+ return packet;
+ }
+ }
+ return NULL;
+ }
+
+ int NumberOfPacketsToResend() const {
+ CriticalSectionScoped cs(crit_sect_.get());
+ int count = 0;
+ for (ConstRtpPacketIterator it = packets_.begin(); it != packets_.end();
+ ++it) {
+ if ((*it)->resend_time_ms() >= 0) {
+ count++;
+ }
+ }
+ return count;
+ }
+
+ void LogPacketResent(RawRtpPacket* packet) {
+ int64_t now_ms = clock_->TimeInMilliseconds();
+ CriticalSectionScoped cs(crit_sect_.get());
+ if (debug_file_) {
+ fprintf(debug_file_, "Resent %u at %u\n", packet->seq_num(),
+ MaskWord64ToUWord32(now_ms));
+ }
+ }
+
+ void Print() const {
+ CriticalSectionScoped cs(crit_sect_.get());
+ printf("Lost packets: %u\n", loss_count_);
+ printf("Packets waiting to be resent: %d\n", NumberOfPacketsToResend());
+ printf("Packets still lost: %zd\n", packets_.size());
+ printf("Sequence numbers:\n");
+ for (ConstRtpPacketIterator it = packets_.begin(); it != packets_.end();
+ ++it) {
+ printf("%u, ", (*it)->seq_num());
+ }
+ printf("\n");
+ }
+
+ private:
+ typedef std::vector<RawRtpPacket*> RtpPacketList;
+ typedef RtpPacketList::iterator RtpPacketIterator;
+ typedef RtpPacketList::const_iterator ConstRtpPacketIterator;
+
+ rtc::scoped_ptr<CriticalSectionWrapper> crit_sect_;
+ FILE* debug_file_;
+ int loss_count_;
+ RtpPacketList packets_;
+ Clock* clock_;
+ int64_t rtt_ms_;
+
+ RTC_DISALLOW_IMPLICIT_CONSTRUCTORS(LostPackets);
+};
+
+class SsrcHandlers {
+ public:
+ SsrcHandlers(PayloadSinkFactoryInterface* payload_sink_factory,
+ const PayloadTypes& payload_types)
+ : payload_sink_factory_(payload_sink_factory),
+ payload_types_(payload_types),
+ handlers_() {
+ assert(payload_sink_factory);
+ }
+
+ ~SsrcHandlers() {
+ while (!handlers_.empty()) {
+ delete handlers_.begin()->second;
+ handlers_.erase(handlers_.begin());
+ }
+ }
+
+ int RegisterSsrc(uint32_t ssrc, LostPackets* lost_packets, Clock* clock) {
+ if (handlers_.count(ssrc) > 0) {
+ return 0;
+ }
+ DEBUG_LOG1("Registering handler for ssrc=%08x", ssrc);
+
+ rtc::scoped_ptr<Handler> handler(
+ new Handler(ssrc, payload_types_, lost_packets));
+ handler->payload_sink_.reset(payload_sink_factory_->Create(handler.get()));
+ if (handler->payload_sink_.get() == NULL) {
+ return -1;
+ }
+
+ RtpRtcp::Configuration configuration;
+ configuration.clock = clock;
+ configuration.audio = false;
+ handler->rtp_module_.reset(RtpReceiver::CreateVideoReceiver(
+ configuration.clock, handler->payload_sink_.get(), NULL,
+ handler->rtp_payload_registry_.get()));
+ if (handler->rtp_module_.get() == NULL) {
+ return -1;
+ }
+
+ handler->rtp_module_->SetNACKStatus(kNackOff);
+ handler->rtp_header_parser_->RegisterRtpHeaderExtension(
+ kRtpExtensionTransmissionTimeOffset,
+ kDefaultTransmissionTimeOffsetExtensionId);
+
+ for (PayloadTypesIterator it = payload_types_.begin();
+ it != payload_types_.end(); ++it) {
+ VideoCodec codec;
+ memset(&codec, 0, sizeof(codec));
+ strncpy(codec.plName, it->name().c_str(), sizeof(codec.plName)-1);
+ codec.plType = it->payload_type();
+ codec.codecType = it->codec_type();
+ if (handler->rtp_module_->RegisterReceivePayload(codec.plName,
+ codec.plType,
+ 90000,
+ 0,
+ codec.maxBitrate) < 0) {
+ return -1;
+ }
+ }
+
+ handlers_[ssrc] = handler.release();
+ return 0;
+ }
+
+ void IncomingPacket(const uint8_t* data, size_t length) {
+ for (HandlerMapIt it = handlers_.begin(); it != handlers_.end(); ++it) {
+ if (!it->second->rtp_header_parser_->IsRtcp(data, length)) {
+ RTPHeader header;
+ it->second->rtp_header_parser_->Parse(data, length, &header);
+ PayloadUnion payload_specific;
+ it->second->rtp_payload_registry_->GetPayloadSpecifics(
+ header.payloadType, &payload_specific);
+ it->second->rtp_module_->IncomingRtpPacket(header, data, length,
+ payload_specific, true);
+ }
+ }
+ }
+
+ private:
+ class Handler : public RtpStreamInterface {
+ public:
+ Handler(uint32_t ssrc, const PayloadTypes& payload_types,
+ LostPackets* lost_packets)
+ : rtp_header_parser_(RtpHeaderParser::Create()),
+ rtp_payload_registry_(new RTPPayloadRegistry(
+ RTPPayloadStrategy::CreateStrategy(false))),
+ rtp_module_(),
+ payload_sink_(),
+ ssrc_(ssrc),
+ payload_types_(payload_types),
+ lost_packets_(lost_packets) {
+ assert(lost_packets);
+ }
+ virtual ~Handler() {}
+
+ virtual void ResendPackets(const uint16_t* sequence_numbers,
+ uint16_t length) {
+ assert(sequence_numbers);
+ for (uint16_t i = 0; i < length; i++) {
+ lost_packets_->SetResendTime(ssrc_, sequence_numbers[i]);
+ }
+ }
+
+ virtual uint32_t ssrc() const { return ssrc_; }
+ virtual const PayloadTypes& payload_types() const {
+ return payload_types_;
+ }
+
+ rtc::scoped_ptr<RtpHeaderParser> rtp_header_parser_;
+ rtc::scoped_ptr<RTPPayloadRegistry> rtp_payload_registry_;
+ rtc::scoped_ptr<RtpReceiver> rtp_module_;
+ rtc::scoped_ptr<PayloadSinkInterface> payload_sink_;
+
+ private:
+ uint32_t ssrc_;
+ const PayloadTypes& payload_types_;
+ LostPackets* lost_packets_;
+
+ RTC_DISALLOW_COPY_AND_ASSIGN(Handler);
+ };
+
+ typedef std::map<uint32_t, Handler*> HandlerMap;
+ typedef std::map<uint32_t, Handler*>::iterator HandlerMapIt;
+
+ PayloadSinkFactoryInterface* payload_sink_factory_;
+ PayloadTypes payload_types_;
+ HandlerMap handlers_;
+
+ RTC_DISALLOW_IMPLICIT_CONSTRUCTORS(SsrcHandlers);
+};
+
+class RtpPlayerImpl : public RtpPlayerInterface {
+ public:
+ RtpPlayerImpl(PayloadSinkFactoryInterface* payload_sink_factory,
+ const PayloadTypes& payload_types,
+ Clock* clock,
+ rtc::scoped_ptr<test::RtpFileReader>* packet_source,
+ float loss_rate,
+ int64_t rtt_ms,
+ bool reordering)
+ : ssrc_handlers_(payload_sink_factory, payload_types),
+ clock_(clock),
+ next_rtp_time_(0),
+ first_packet_(true),
+ first_packet_rtp_time_(0),
+ first_packet_time_ms_(0),
+ loss_rate_(loss_rate),
+ lost_packets_(clock, rtt_ms),
+ resend_packet_count_(0),
+ no_loss_startup_(100),
+ end_of_file_(false),
+ reordering_(false),
+ reorder_buffer_() {
+ assert(clock);
+ assert(packet_source);
+ assert(packet_source->get());
+ packet_source_.swap(*packet_source);
+ srand(321);
+ }
+
+ virtual ~RtpPlayerImpl() {}
+
+ virtual int NextPacket(int64_t time_now) {
+ // Send any packets ready to be resent.
+ for (RawRtpPacket* packet = lost_packets_.NextPacketToResend(time_now);
+ packet != NULL;
+ packet = lost_packets_.NextPacketToResend(time_now)) {
+ int ret = SendPacket(packet->data(), packet->length());
+ if (ret > 0) {
+ printf("Resend: %08x:%u\n", packet->ssrc(), packet->seq_num());
+ lost_packets_.LogPacketResent(packet);
+ resend_packet_count_++;
+ }
+ delete packet;
+ if (ret < 0) {
+ return ret;
+ }
+ }
+
+ // Send any packets from packet source.
+ if (!end_of_file_ && (TimeUntilNextPacket() == 0 || first_packet_)) {
+ if (first_packet_) {
+ if (!packet_source_->NextPacket(&next_packet_))
+ return 0;
+ first_packet_rtp_time_ = next_packet_.time_ms;
+ first_packet_time_ms_ = clock_->TimeInMilliseconds();
+ first_packet_ = false;
+ }
+
+ if (reordering_ && reorder_buffer_.get() == NULL) {
+ reorder_buffer_.reset(
+ new RawRtpPacket(next_packet_.data, next_packet_.length, 0, 0));
+ return 0;
+ }
+ int ret = SendPacket(next_packet_.data, next_packet_.length);
+ if (reorder_buffer_.get()) {
+ SendPacket(reorder_buffer_->data(), reorder_buffer_->length());
+ reorder_buffer_.reset(NULL);
+ }
+ if (ret < 0) {
+ return ret;
+ }
+
+ if (!packet_source_->NextPacket(&next_packet_)) {
+ end_of_file_ = true;
+ return 0;
+ }
+ else if (next_packet_.length == 0) {
+ return 0;
+ }
+ }
+
+ if (end_of_file_ && lost_packets_.NumberOfPacketsToResend() == 0) {
+ return 1;
+ }
+ return 0;
+ }
+
+ virtual uint32_t TimeUntilNextPacket() const {
+ int64_t time_left = (next_rtp_time_ - first_packet_rtp_time_) -
+ (clock_->TimeInMilliseconds() - first_packet_time_ms_);
+ if (time_left < 0) {
+ return 0;
+ }
+ return static_cast<uint32_t>(time_left);
+ }
+
+ virtual void Print() const {
+ printf("Resent packets: %u\n", resend_packet_count_);
+ lost_packets_.Print();
+ }
+
+ private:
+ int SendPacket(const uint8_t* data, size_t length) {
+ assert(data);
+ assert(length > 0);
+
+ rtc::scoped_ptr<RtpHeaderParser> rtp_header_parser(
+ RtpHeaderParser::Create());
+ if (!rtp_header_parser->IsRtcp(data, length)) {
+ RTPHeader header;
+ if (!rtp_header_parser->Parse(data, length, &header)) {
+ return -1;
+ }
+ uint32_t ssrc = header.ssrc;
+ if (ssrc_handlers_.RegisterSsrc(ssrc, &lost_packets_, clock_) < 0) {
+ DEBUG_LOG1("Unable to register ssrc: %d", ssrc);
+ return -1;
+ }
+
+ if (no_loss_startup_ > 0) {
+ no_loss_startup_--;
+ } else if ((rand() + 1.0)/(RAND_MAX + 1.0) < loss_rate_) {
+ uint16_t seq_num = header.sequenceNumber;
+ lost_packets_.AddPacket(new RawRtpPacket(data, length, ssrc, seq_num));
+ DEBUG_LOG1("Dropped packet: %d!", header.header.sequenceNumber);
+ return 0;
+ }
+ }
+
+ ssrc_handlers_.IncomingPacket(data, length);
+ return 1;
+ }
+
+ SsrcHandlers ssrc_handlers_;
+ Clock* clock_;
+ rtc::scoped_ptr<test::RtpFileReader> packet_source_;
+ test::RtpPacket next_packet_;
+ uint32_t next_rtp_time_;
+ bool first_packet_;
+ int64_t first_packet_rtp_time_;
+ int64_t first_packet_time_ms_;
+ float loss_rate_;
+ LostPackets lost_packets_;
+ uint32_t resend_packet_count_;
+ uint32_t no_loss_startup_;
+ bool end_of_file_;
+ bool reordering_;
+ rtc::scoped_ptr<RawRtpPacket> reorder_buffer_;
+
+ RTC_DISALLOW_IMPLICIT_CONSTRUCTORS(RtpPlayerImpl);
+};
+
+RtpPlayerInterface* Create(const std::string& input_filename,
+ PayloadSinkFactoryInterface* payload_sink_factory, Clock* clock,
+ const PayloadTypes& payload_types, float loss_rate, int64_t rtt_ms,
+ bool reordering) {
+ rtc::scoped_ptr<test::RtpFileReader> packet_source(
+ test::RtpFileReader::Create(test::RtpFileReader::kRtpDump,
+ input_filename));
+ if (packet_source.get() == NULL) {
+ packet_source.reset(test::RtpFileReader::Create(test::RtpFileReader::kPcap,
+ input_filename));
+ if (packet_source.get() == NULL) {
+ return NULL;
+ }
+ }
+
+ rtc::scoped_ptr<RtpPlayerImpl> impl(
+ new RtpPlayerImpl(payload_sink_factory, payload_types, clock,
+ &packet_source, loss_rate, rtt_ms, reordering));
+ return impl.release();
+}
+} // namespace rtpplayer
+} // namespace webrtc
diff --git a/webrtc/modules/video_coding/main/test/rtp_player.h b/webrtc/modules/video_coding/main/test/rtp_player.h
new file mode 100644
index 0000000000..7459231416
--- /dev/null
+++ b/webrtc/modules/video_coding/main/test/rtp_player.h
@@ -0,0 +1,97 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef WEBRTC_MODULES_VIDEO_CODING_TEST_RTP_PLAYER_H_
+#define WEBRTC_MODULES_VIDEO_CODING_TEST_RTP_PLAYER_H_
+
+#include <string>
+#include <vector>
+
+#include "webrtc/modules/rtp_rtcp/interface/rtp_rtcp_defines.h"
+#include "webrtc/modules/video_coding/main/interface/video_coding_defines.h"
+
+namespace webrtc {
+class Clock;
+
+namespace rtpplayer {
+
+class PayloadCodecTuple {
+ public:
+ PayloadCodecTuple(uint8_t payload_type, const std::string& codec_name,
+ VideoCodecType codec_type)
+ : name_(codec_name),
+ payload_type_(payload_type),
+ codec_type_(codec_type) {
+ }
+
+ const std::string& name() const { return name_; }
+ uint8_t payload_type() const { return payload_type_; }
+ VideoCodecType codec_type() const { return codec_type_; }
+
+ private:
+ std::string name_;
+ uint8_t payload_type_;
+ VideoCodecType codec_type_;
+};
+
+typedef std::vector<PayloadCodecTuple> PayloadTypes;
+typedef std::vector<PayloadCodecTuple>::const_iterator PayloadTypesIterator;
+
+// Implemented by RtpPlayer and given to client as a means to retrieve
+// information about a specific RTP stream.
+class RtpStreamInterface {
+ public:
+ virtual ~RtpStreamInterface() {}
+
+ // Ask for missing packets to be resent.
+ virtual void ResendPackets(const uint16_t* sequence_numbers,
+ uint16_t length) = 0;
+
+ virtual uint32_t ssrc() const = 0;
+ virtual const PayloadTypes& payload_types() const = 0;
+};
+
+// Implemented by a sink. Wraps RtpData because its d-tor is protected.
+class PayloadSinkInterface : public RtpData {
+ public:
+ virtual ~PayloadSinkInterface() {}
+};
+
+// Implemented to provide a sink for RTP data, such as hooking up a VCM to
+// the incoming RTP stream.
+class PayloadSinkFactoryInterface {
+ public:
+ virtual ~PayloadSinkFactoryInterface() {}
+
+ // Return NULL if failed to create sink. 'stream' is guaranteed to be
+ // around for as long as the RtpData. The returned object is owned by
+ // the caller (RtpPlayer).
+ virtual PayloadSinkInterface* Create(RtpStreamInterface* stream) = 0;
+};
+
+// The client's view of an RtpPlayer.
+class RtpPlayerInterface {
+ public:
+ virtual ~RtpPlayerInterface() {}
+
+ virtual int NextPacket(int64_t timeNow) = 0;
+ virtual uint32_t TimeUntilNextPacket() const = 0;
+ virtual void Print() const = 0;
+};
+
+RtpPlayerInterface* Create(const std::string& inputFilename,
+ PayloadSinkFactoryInterface* payloadSinkFactory, Clock* clock,
+ const PayloadTypes& payload_types, float lossRate, int64_t rttMs,
+ bool reordering);
+
+} // namespace rtpplayer
+} // namespace webrtc
+
+#endif // WEBRTC_MODULES_VIDEO_CODING_TEST_RTP_PLAYER_H_
diff --git a/webrtc/modules/video_coding/main/test/subfigure.m b/webrtc/modules/video_coding/main/test/subfigure.m
new file mode 100644
index 0000000000..eadfcb69bd
--- /dev/null
+++ b/webrtc/modules/video_coding/main/test/subfigure.m
@@ -0,0 +1,30 @@
+function H = subfigure(m, n, p)
+%
+% H = SUBFIGURE(m, n, p)
+%
+% Create a new figure window and adjust position and size such that it will
+% become the p-th tile in an m-by-n matrix of windows. (The interpretation of
+% m, n, and p is the same as for SUBPLOT.
+%
+% Henrik Lundin, 2009-01-19
+%
+
+
+h = figure;
+
+[j, i] = ind2sub([n m], p);
+scrsz = get(0,'ScreenSize'); % get screen size
+%scrsz = [1, 1, 1600, 1200];
+
+taskbarSize = 58;
+windowbarSize = 68;
+windowBorder = 4;
+
+scrsz(2) = scrsz(2) + taskbarSize;
+scrsz(4) = scrsz(4) - taskbarSize;
+
+set(h, 'position', [(j-1)/n * scrsz(3) + scrsz(1) + windowBorder,...
+ (m-i)/m * scrsz(4) + scrsz(2) + windowBorder, ...
+ scrsz(3)/n - (windowBorder + windowBorder),...
+ scrsz(4)/m - (windowbarSize + windowBorder + windowBorder)]);
+
diff --git a/webrtc/modules/video_coding/main/test/test_util.cc b/webrtc/modules/video_coding/main/test/test_util.cc
new file mode 100644
index 0000000000..cd858da288
--- /dev/null
+++ b/webrtc/modules/video_coding/main/test/test_util.cc
@@ -0,0 +1,139 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "webrtc/modules/video_coding/main/test/test_util.h"
+
+#include <assert.h>
+#include <math.h>
+
+#include <iomanip>
+#include <sstream>
+
+#include "webrtc/common_video/libyuv/include/webrtc_libyuv.h"
+#include "webrtc/modules/video_coding/main/source/internal_defines.h"
+#include "webrtc/test/testsupport/fileutils.h"
+
+CmdArgs::CmdArgs()
+ : codecName("VP8"),
+ codecType(webrtc::kVideoCodecVP8),
+ width(352),
+ height(288),
+ rtt(0),
+ inputFile(webrtc::test::ProjectRootPath() + "/resources/foreman_cif.yuv"),
+ outputFile(webrtc::test::OutputPath() +
+ "video_coding_test_output_352x288.yuv") {
+}
+
+namespace {
+
+void SplitFilename(const std::string& filename, std::string* basename,
+ std::string* extension) {
+ assert(basename);
+ assert(extension);
+
+ std::string::size_type idx;
+ idx = filename.rfind('.');
+
+ if(idx != std::string::npos) {
+ *basename = filename.substr(0, idx);
+ *extension = filename.substr(idx + 1);
+ } else {
+ *basename = filename;
+ *extension = "";
+ }
+}
+
+std::string AppendWidthHeightCount(const std::string& filename, int width,
+ int height, int count) {
+ std::string basename;
+ std::string extension;
+ SplitFilename(filename, &basename, &extension);
+ std::stringstream ss;
+ ss << basename << "_" << count << "." << width << "_" << height << "." <<
+ extension;
+ return ss.str();
+}
+
+} // namespace
+
+FileOutputFrameReceiver::FileOutputFrameReceiver(
+ const std::string& base_out_filename, uint32_t ssrc)
+ : out_filename_(),
+ out_file_(NULL),
+ timing_file_(NULL),
+ width_(0),
+ height_(0),
+ count_(0) {
+ std::string basename;
+ std::string extension;
+ if (base_out_filename.empty()) {
+ basename = webrtc::test::OutputPath() + "rtp_decoded";
+ extension = "yuv";
+ } else {
+ SplitFilename(base_out_filename, &basename, &extension);
+ }
+ std::stringstream ss;
+ ss << basename << "_" << std::hex << std::setw(8) << std::setfill('0') <<
+ ssrc << "." << extension;
+ out_filename_ = ss.str();
+}
+
+FileOutputFrameReceiver::~FileOutputFrameReceiver() {
+ if (timing_file_ != NULL) {
+ fclose(timing_file_);
+ }
+ if (out_file_ != NULL) {
+ fclose(out_file_);
+ }
+}
+
+int32_t FileOutputFrameReceiver::FrameToRender(
+ webrtc::VideoFrame& video_frame) {
+ if (timing_file_ == NULL) {
+ std::string basename;
+ std::string extension;
+ SplitFilename(out_filename_, &basename, &extension);
+ timing_file_ = fopen((basename + "_renderTiming.txt").c_str(), "w");
+ if (timing_file_ == NULL) {
+ return -1;
+ }
+ }
+ if (out_file_ == NULL || video_frame.width() != width_ ||
+ video_frame.height() != height_) {
+ if (out_file_) {
+ fclose(out_file_);
+ }
+ printf("New size: %dx%d\n", video_frame.width(), video_frame.height());
+ width_ = video_frame.width();
+ height_ = video_frame.height();
+ std::string filename_with_width_height = AppendWidthHeightCount(
+ out_filename_, width_, height_, count_);
+ ++count_;
+ out_file_ = fopen(filename_with_width_height.c_str(), "wb");
+ if (out_file_ == NULL) {
+ return -1;
+ }
+ }
+ fprintf(timing_file_, "%u, %u\n", video_frame.timestamp(),
+ webrtc::MaskWord64ToUWord32(video_frame.render_time_ms()));
+ if (PrintVideoFrame(video_frame, out_file_) < 0) {
+ return -1;
+ }
+ return 0;
+}
+
+webrtc::RtpVideoCodecTypes ConvertCodecType(const char* plname) {
+ if (strncmp(plname,"VP8" , 3) == 0) {
+ return webrtc::kRtpVideoVp8;
+ } else {
+ // Default value.
+ return webrtc::kRtpVideoGeneric;
+ }
+}
diff --git a/webrtc/modules/video_coding/main/test/test_util.h b/webrtc/modules/video_coding/main/test/test_util.h
new file mode 100644
index 0000000000..27f66fe011
--- /dev/null
+++ b/webrtc/modules/video_coding/main/test/test_util.h
@@ -0,0 +1,86 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef WEBRTC_MODULES_VIDEO_CODING_TEST_TEST_UTIL_H_
+#define WEBRTC_MODULES_VIDEO_CODING_TEST_TEST_UTIL_H_
+
+/*
+ * General declarations used through out VCM offline tests.
+ */
+
+#include <string>
+
+#include "webrtc/base/constructormagic.h"
+#include "webrtc/modules/interface/module_common_types.h"
+#include "webrtc/modules/video_coding/main/interface/video_coding.h"
+#include "webrtc/system_wrappers/include/event_wrapper.h"
+
+enum { kMaxNackListSize = 250 };
+enum { kMaxPacketAgeToNack = 450 };
+
+class NullEvent : public webrtc::EventWrapper {
+ public:
+ virtual ~NullEvent() {}
+
+ virtual bool Set() { return true; }
+
+ virtual bool Reset() { return true; }
+
+ virtual webrtc::EventTypeWrapper Wait(unsigned long max_time) {
+ return webrtc::kEventTimeout;
+ }
+
+ virtual bool StartTimer(bool periodic, unsigned long time) { return true; }
+
+ virtual bool StopTimer() { return true; }
+};
+
+class NullEventFactory : public webrtc::EventFactory {
+ public:
+ virtual ~NullEventFactory() {}
+
+ virtual webrtc::EventWrapper* CreateEvent() {
+ return new NullEvent;
+ }
+};
+
+class FileOutputFrameReceiver : public webrtc::VCMReceiveCallback {
+ public:
+ FileOutputFrameReceiver(const std::string& base_out_filename, uint32_t ssrc);
+ virtual ~FileOutputFrameReceiver();
+
+ // VCMReceiveCallback
+ virtual int32_t FrameToRender(webrtc::VideoFrame& video_frame);
+
+ private:
+ std::string out_filename_;
+ FILE* out_file_;
+ FILE* timing_file_;
+ int width_;
+ int height_;
+ int count_;
+
+ RTC_DISALLOW_IMPLICIT_CONSTRUCTORS(FileOutputFrameReceiver);
+};
+
+class CmdArgs {
+ public:
+ CmdArgs();
+
+ std::string codecName;
+ webrtc::VideoCodecType codecType;
+ int width;
+ int height;
+ int rtt;
+ std::string inputFile;
+ std::string outputFile;
+};
+
+#endif
diff --git a/webrtc/modules/video_coding/main/test/tester_main.cc b/webrtc/modules/video_coding/main/test/tester_main.cc
new file mode 100644
index 0000000000..2885f00bd5
--- /dev/null
+++ b/webrtc/modules/video_coding/main/test/tester_main.cc
@@ -0,0 +1,75 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+
+#include <stdlib.h>
+#include <string.h>
+
+#include "gflags/gflags.h"
+#include "webrtc/modules/video_coding/main/interface/video_coding.h"
+#include "webrtc/modules/video_coding/main/test/receiver_tests.h"
+#include "webrtc/test/testsupport/fileutils.h"
+
+DEFINE_string(codec, "VP8", "Codec to use (VP8 or I420).");
+DEFINE_int32(width, 352, "Width in pixels of the frames in the input file.");
+DEFINE_int32(height, 288, "Height in pixels of the frames in the input file.");
+DEFINE_int32(rtt, 0, "RTT (round-trip time), in milliseconds.");
+DEFINE_string(input_filename, webrtc::test::ProjectRootPath() +
+ "/resources/foreman_cif.yuv", "Input file.");
+DEFINE_string(output_filename, webrtc::test::OutputPath() +
+ "video_coding_test_output_352x288.yuv", "Output file.");
+
+using namespace webrtc;
+
+/*
+ * Build with EVENT_DEBUG defined
+ * to build the tests with simulated events.
+ */
+
+int vcmMacrosTests = 0;
+int vcmMacrosErrors = 0;
+
+int ParseArguments(CmdArgs& args) {
+ args.width = FLAGS_width;
+ args.height = FLAGS_height;
+ if (args.width < 1 || args.height < 1) {
+ return -1;
+ }
+ args.codecName = FLAGS_codec;
+ if (args.codecName == "VP8") {
+ args.codecType = kVideoCodecVP8;
+ } else if (args.codecName == "VP9") {
+ args.codecType = kVideoCodecVP9;
+ } else if (args.codecName == "I420") {
+ args.codecType = kVideoCodecI420;
+ } else {
+ printf("Invalid codec: %s\n", args.codecName.c_str());
+ return -1;
+ }
+ args.inputFile = FLAGS_input_filename;
+ args.outputFile = FLAGS_output_filename;
+ args.rtt = FLAGS_rtt;
+ return 0;
+}
+
+int main(int argc, char **argv) {
+ // Initialize WebRTC fileutils.h so paths to resources can be resolved.
+ webrtc::test::SetExecutablePath(argv[0]);
+ google::ParseCommandLineFlags(&argc, &argv, true);
+
+ CmdArgs args;
+ if (ParseArguments(args) != 0) {
+ printf("Unable to parse input arguments\n");
+ return -1;
+ }
+
+ printf("Running video coding tests...\n");
+ return RtpPlay(args);
+}
diff --git a/webrtc/modules/video_coding/main/test/vcm_payload_sink_factory.cc b/webrtc/modules/video_coding/main/test/vcm_payload_sink_factory.cc
new file mode 100644
index 0000000000..2d874cd1bd
--- /dev/null
+++ b/webrtc/modules/video_coding/main/test/vcm_payload_sink_factory.cc
@@ -0,0 +1,210 @@
+/*
+ * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "webrtc/modules/video_coding/main/test/vcm_payload_sink_factory.h"
+
+#include <assert.h>
+
+#include <algorithm>
+
+#include "webrtc/modules/rtp_rtcp/interface/rtp_rtcp.h"
+#include "webrtc/modules/video_coding/main/test/test_util.h"
+#include "webrtc/system_wrappers/include/clock.h"
+#include "webrtc/system_wrappers/include/critical_section_wrapper.h"
+
+namespace webrtc {
+namespace rtpplayer {
+
+class VcmPayloadSinkFactory::VcmPayloadSink
+ : public PayloadSinkInterface,
+ public VCMPacketRequestCallback {
+ public:
+ VcmPayloadSink(VcmPayloadSinkFactory* factory,
+ RtpStreamInterface* stream,
+ rtc::scoped_ptr<VideoCodingModule>* vcm,
+ rtc::scoped_ptr<FileOutputFrameReceiver>* frame_receiver)
+ : factory_(factory), stream_(stream), vcm_(), frame_receiver_() {
+ assert(factory);
+ assert(stream);
+ assert(vcm);
+ assert(vcm->get());
+ assert(frame_receiver);
+ assert(frame_receiver->get());
+ vcm_.swap(*vcm);
+ frame_receiver_.swap(*frame_receiver);
+ vcm_->RegisterPacketRequestCallback(this);
+ vcm_->RegisterReceiveCallback(frame_receiver_.get());
+ }
+
+ virtual ~VcmPayloadSink() {
+ factory_->Remove(this);
+ }
+
+ // PayloadSinkInterface
+ int32_t OnReceivedPayloadData(const uint8_t* payload_data,
+ const size_t payload_size,
+ const WebRtcRTPHeader* rtp_header) override {
+ return vcm_->IncomingPacket(payload_data, payload_size, *rtp_header);
+ }
+
+ bool OnRecoveredPacket(const uint8_t* packet, size_t packet_length) override {
+ // We currently don't handle FEC.
+ return true;
+ }
+
+ // VCMPacketRequestCallback
+ int32_t ResendPackets(const uint16_t* sequence_numbers,
+ uint16_t length) override {
+ stream_->ResendPackets(sequence_numbers, length);
+ return 0;
+ }
+
+ int DecodeAndProcess(bool should_decode, bool decode_dual_frame) {
+ if (should_decode) {
+ if (vcm_->Decode() < 0) {
+ return -1;
+ }
+ }
+ return Process() ? 0 : -1;
+ }
+
+ bool Process() {
+ if (vcm_->TimeUntilNextProcess() <= 0) {
+ if (vcm_->Process() < 0) {
+ return false;
+ }
+ }
+ return true;
+ }
+
+ bool Decode() {
+ vcm_->Decode(10000);
+ return true;
+ }
+
+ private:
+ VcmPayloadSinkFactory* factory_;
+ RtpStreamInterface* stream_;
+ rtc::scoped_ptr<VideoCodingModule> vcm_;
+ rtc::scoped_ptr<FileOutputFrameReceiver> frame_receiver_;
+
+ RTC_DISALLOW_IMPLICIT_CONSTRUCTORS(VcmPayloadSink);
+};
+
+VcmPayloadSinkFactory::VcmPayloadSinkFactory(
+ const std::string& base_out_filename,
+ Clock* clock,
+ bool protection_enabled,
+ VCMVideoProtection protection_method,
+ int64_t rtt_ms,
+ uint32_t render_delay_ms,
+ uint32_t min_playout_delay_ms)
+ : base_out_filename_(base_out_filename),
+ clock_(clock),
+ protection_enabled_(protection_enabled),
+ protection_method_(protection_method),
+ rtt_ms_(rtt_ms),
+ render_delay_ms_(render_delay_ms),
+ min_playout_delay_ms_(min_playout_delay_ms),
+ null_event_factory_(new NullEventFactory()),
+ crit_sect_(CriticalSectionWrapper::CreateCriticalSection()),
+ sinks_() {
+ assert(clock);
+ assert(crit_sect_.get());
+}
+
+VcmPayloadSinkFactory::~VcmPayloadSinkFactory() {
+ assert(sinks_.empty());
+}
+
+PayloadSinkInterface* VcmPayloadSinkFactory::Create(
+ RtpStreamInterface* stream) {
+ assert(stream);
+ CriticalSectionScoped cs(crit_sect_.get());
+
+ rtc::scoped_ptr<VideoCodingModule> vcm(
+ VideoCodingModule::Create(clock_, null_event_factory_.get()));
+ if (vcm.get() == NULL) {
+ return NULL;
+ }
+
+ const PayloadTypes& plt = stream->payload_types();
+ for (PayloadTypesIterator it = plt.begin(); it != plt.end();
+ ++it) {
+ if (it->codec_type() != kVideoCodecULPFEC &&
+ it->codec_type() != kVideoCodecRED) {
+ VideoCodec codec;
+ if (VideoCodingModule::Codec(it->codec_type(), &codec) < 0) {
+ return NULL;
+ }
+ codec.plType = it->payload_type();
+ if (vcm->RegisterReceiveCodec(&codec, 1) < 0) {
+ return NULL;
+ }
+ }
+ }
+
+ vcm->SetChannelParameters(0, 0, rtt_ms_);
+ vcm->SetVideoProtection(protection_method_, protection_enabled_);
+ vcm->SetRenderDelay(render_delay_ms_);
+ vcm->SetMinimumPlayoutDelay(min_playout_delay_ms_);
+ vcm->SetNackSettings(kMaxNackListSize, kMaxPacketAgeToNack, 0);
+
+ rtc::scoped_ptr<FileOutputFrameReceiver> frame_receiver(
+ new FileOutputFrameReceiver(base_out_filename_, stream->ssrc()));
+ rtc::scoped_ptr<VcmPayloadSink> sink(
+ new VcmPayloadSink(this, stream, &vcm, &frame_receiver));
+
+ sinks_.push_back(sink.get());
+ return sink.release();
+}
+
+int VcmPayloadSinkFactory::DecodeAndProcessAll(bool decode_dual_frame) {
+ CriticalSectionScoped cs(crit_sect_.get());
+ assert(clock_);
+ bool should_decode = (clock_->TimeInMilliseconds() % 5) == 0;
+ for (Sinks::iterator it = sinks_.begin(); it != sinks_.end(); ++it) {
+ if ((*it)->DecodeAndProcess(should_decode, decode_dual_frame) < 0) {
+ return -1;
+ }
+ }
+ return 0;
+}
+
+bool VcmPayloadSinkFactory::ProcessAll() {
+ CriticalSectionScoped cs(crit_sect_.get());
+ for (Sinks::iterator it = sinks_.begin(); it != sinks_.end(); ++it) {
+ if (!(*it)->Process()) {
+ return false;
+ }
+ }
+ return true;
+}
+
+bool VcmPayloadSinkFactory::DecodeAll() {
+ CriticalSectionScoped cs(crit_sect_.get());
+ for (Sinks::iterator it = sinks_.begin(); it != sinks_.end(); ++it) {
+ if (!(*it)->Decode()) {
+ return false;
+ }
+ }
+ return true;
+}
+
+void VcmPayloadSinkFactory::Remove(VcmPayloadSink* sink) {
+ assert(sink);
+ CriticalSectionScoped cs(crit_sect_.get());
+ Sinks::iterator it = std::find(sinks_.begin(), sinks_.end(), sink);
+ assert(it != sinks_.end());
+ sinks_.erase(it);
+}
+
+} // namespace rtpplayer
+} // namespace webrtc
diff --git a/webrtc/modules/video_coding/main/test/vcm_payload_sink_factory.h b/webrtc/modules/video_coding/main/test/vcm_payload_sink_factory.h
new file mode 100644
index 0000000000..ec94bdc382
--- /dev/null
+++ b/webrtc/modules/video_coding/main/test/vcm_payload_sink_factory.h
@@ -0,0 +1,63 @@
+/*
+ * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <string>
+#include <vector>
+
+#include "webrtc/base/constructormagic.h"
+#include "webrtc/base/scoped_ptr.h"
+#include "webrtc/modules/video_coding/main/interface/video_coding_defines.h"
+#include "webrtc/modules/video_coding/main/test/rtp_player.h"
+
+class NullEventFactory;
+
+namespace webrtc {
+class Clock;
+class CriticalSectionWrapper;
+
+namespace rtpplayer {
+class VcmPayloadSinkFactory : public PayloadSinkFactoryInterface {
+ public:
+ VcmPayloadSinkFactory(const std::string& base_out_filename,
+ Clock* clock, bool protection_enabled,
+ VCMVideoProtection protection_method,
+ int64_t rtt_ms, uint32_t render_delay_ms,
+ uint32_t min_playout_delay_ms);
+ virtual ~VcmPayloadSinkFactory();
+
+ // PayloadSinkFactoryInterface
+ virtual PayloadSinkInterface* Create(RtpStreamInterface* stream);
+
+ int DecodeAndProcessAll(bool decode_dual_frame);
+ bool ProcessAll();
+ bool DecodeAll();
+
+ private:
+ class VcmPayloadSink;
+ friend class VcmPayloadSink;
+ typedef std::vector<VcmPayloadSink*> Sinks;
+
+ void Remove(VcmPayloadSink* sink);
+
+ std::string base_out_filename_;
+ Clock* clock_;
+ bool protection_enabled_;
+ VCMVideoProtection protection_method_;
+ int64_t rtt_ms_;
+ uint32_t render_delay_ms_;
+ uint32_t min_playout_delay_ms_;
+ rtc::scoped_ptr<NullEventFactory> null_event_factory_;
+ rtc::scoped_ptr<CriticalSectionWrapper> crit_sect_;
+ Sinks sinks_;
+
+ RTC_DISALLOW_IMPLICIT_CONSTRUCTORS(VcmPayloadSinkFactory);
+};
+} // namespace rtpplayer
+} // namespace webrtc
diff --git a/webrtc/modules/video_coding/main/test/video_rtp_play.cc b/webrtc/modules/video_coding/main/test/video_rtp_play.cc
new file mode 100644
index 0000000000..8460601bf5
--- /dev/null
+++ b/webrtc/modules/video_coding/main/test/video_rtp_play.cc
@@ -0,0 +1,88 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "webrtc/modules/video_coding/main/test/receiver_tests.h"
+#include "webrtc/modules/video_coding/main/test/vcm_payload_sink_factory.h"
+#include "webrtc/system_wrappers/include/trace.h"
+#include "webrtc/test/testsupport/fileutils.h"
+
+namespace {
+
+const bool kConfigProtectionEnabled = true;
+const webrtc::VCMVideoProtection kConfigProtectionMethod =
+ webrtc::kProtectionNack;
+const float kConfigLossRate = 0.0f;
+const bool kConfigReordering = false;
+const int64_t kConfigRttMs = 0;
+const uint32_t kConfigRenderDelayMs = 0;
+const uint32_t kConfigMinPlayoutDelayMs = 0;
+const int64_t kConfigMaxRuntimeMs = -1;
+const uint8_t kDefaultUlpFecPayloadType = 97;
+const uint8_t kDefaultRedPayloadType = 96;
+const uint8_t kDefaultVp8PayloadType = 100;
+} // namespace
+
+int RtpPlay(const CmdArgs& args) {
+ std::string trace_file = webrtc::test::OutputPath() + "receiverTestTrace.txt";
+ webrtc::Trace::CreateTrace();
+ webrtc::Trace::SetTraceFile(trace_file.c_str());
+ webrtc::Trace::set_level_filter(webrtc::kTraceAll);
+
+ webrtc::rtpplayer::PayloadTypes payload_types;
+ payload_types.push_back(webrtc::rtpplayer::PayloadCodecTuple(
+ kDefaultUlpFecPayloadType, "ULPFEC", webrtc::kVideoCodecULPFEC));
+ payload_types.push_back(webrtc::rtpplayer::PayloadCodecTuple(
+ kDefaultRedPayloadType, "RED", webrtc::kVideoCodecRED));
+ payload_types.push_back(webrtc::rtpplayer::PayloadCodecTuple(
+ kDefaultVp8PayloadType, "VP8", webrtc::kVideoCodecVP8));
+
+ std::string output_file = args.outputFile;
+ if (output_file.empty())
+ output_file = webrtc::test::OutputPath() + "RtpPlay_decoded.yuv";
+
+ webrtc::SimulatedClock clock(0);
+ webrtc::rtpplayer::VcmPayloadSinkFactory factory(output_file, &clock,
+ kConfigProtectionEnabled, kConfigProtectionMethod, kConfigRttMs,
+ kConfigRenderDelayMs, kConfigMinPlayoutDelayMs);
+ rtc::scoped_ptr<webrtc::rtpplayer::RtpPlayerInterface> rtp_player(
+ webrtc::rtpplayer::Create(args.inputFile, &factory, &clock, payload_types,
+ kConfigLossRate, kConfigRttMs,
+ kConfigReordering));
+ if (rtp_player.get() == NULL) {
+ return -1;
+ }
+
+ int ret = 0;
+ while ((ret = rtp_player->NextPacket(clock.TimeInMilliseconds())) == 0) {
+ ret = factory.DecodeAndProcessAll(true);
+ if (ret < 0 || (kConfigMaxRuntimeMs > -1 &&
+ clock.TimeInMilliseconds() >= kConfigMaxRuntimeMs)) {
+ break;
+ }
+ clock.AdvanceTimeMilliseconds(1);
+ }
+
+ rtp_player->Print();
+
+ switch (ret) {
+ case 1:
+ printf("Success\n");
+ return 0;
+ case -1:
+ printf("Failed\n");
+ return -1;
+ case 0:
+ printf("Timeout\n");
+ return -1;
+ }
+
+ webrtc::Trace::ReturnTrace();
+ return 0;
+}
diff --git a/webrtc/modules/video_coding/main/test/video_source.h b/webrtc/modules/video_coding/main/test/video_source.h
new file mode 100644
index 0000000000..05deb4a39b
--- /dev/null
+++ b/webrtc/modules/video_coding/main/test/video_source.h
@@ -0,0 +1,82 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef WEBRTC_MODULES_VIDEO_CODING_TEST_VIDEO_SOURCE_H_
+#define WEBRTC_MODULES_VIDEO_CODING_TEST_VIDEO_SOURCE_H_
+
+#include "webrtc/common_video/libyuv/include/webrtc_libyuv.h"
+#include "webrtc/typedefs.h"
+
+#include <string>
+
+enum VideoSize
+ {
+ kUndefined,
+ kSQCIF, // 128*96 = 12 288
+ kQQVGA, // 160*120 = 19 200
+ kQCIF, // 176*144 = 25 344
+ kCGA, // 320*200 = 64 000
+ kQVGA, // 320*240 = 76 800
+ kSIF, // 352*240 = 84 480
+ kWQVGA, // 400*240 = 96 000
+ kCIF, // 352*288 = 101 376
+ kW288p, // 512*288 = 147 456 (WCIF)
+ k448p, // 576*448 = 281 088
+ kVGA, // 640*480 = 307 200
+ k432p, // 720*432 = 311 040
+ kW432p, // 768*432 = 331 776
+ k4SIF, // 704*480 = 337 920
+ kW448p, // 768*448 = 344 064
+ kNTSC, // 720*480 = 345 600
+ kFW448p, // 800*448 = 358 400
+ kWVGA, // 800*480 = 384 000
+ k4CIF, // 704*576 = 405 504
+ kSVGA, // 800*600 = 480 000
+ kW544p, // 960*544 = 522 240
+ kW576p, // 1024*576 = 589 824 (W4CIF)
+ kHD, // 960*720 = 691 200
+ kXGA, // 1024*768 = 786 432
+ kWHD, // 1280*720 = 921 600
+ kFullHD, // 1440*1080 = 1 555 200
+ kWFullHD, // 1920*1080 = 2 073 600
+
+ kNumberOfVideoSizes
+ };
+
+
+class VideoSource
+{
+public:
+ VideoSource();
+ VideoSource(std::string fileName, VideoSize size, float frameRate, webrtc::VideoType type = webrtc::kI420);
+ VideoSource(std::string fileName, uint16_t width, uint16_t height,
+ float frameRate = 30, webrtc::VideoType type = webrtc::kI420);
+
+ std::string GetFileName() const { return _fileName; }
+ uint16_t GetWidth() const { return _width; }
+ uint16_t GetHeight() const { return _height; }
+ webrtc::VideoType GetType() const { return _type; }
+ float GetFrameRate() const { return _frameRate; }
+ int GetWidthHeight( VideoSize size);
+
+ // Returns the filename with the path (including the leading slash) removed.
+ std::string GetName() const;
+
+ size_t GetFrameLength() const;
+
+private:
+ std::string _fileName;
+ uint16_t _width;
+ uint16_t _height;
+ webrtc::VideoType _type;
+ float _frameRate;
+};
+
+#endif // WEBRTC_MODULES_VIDEO_CODING_TEST_VIDEO_SOURCE_H_