aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDanil Chapovalov <danilchap@webrtc.org>2020-07-21 15:01:50 +0200
committerCommit Bot <commit-bot@chromium.org>2020-07-21 14:37:08 +0000
commit31cb3abd3683270edcc18c8e61a9ab851dd99b17 (patch)
tree90ca17aec33537ca1635e728dad42b7ad9e6598e
parenta5d9c1a45c2bd748b7ec7b6456b7ddddfc46d2d6 (diff)
downloadwebrtc-31cb3abd3683270edcc18c8e61a9ab851dd99b17.tar.gz
Do not propage RTPFragmentationHeader into rtp_rtcp
It is not longer needed by the rtp_rtcp module. Bug: webrtc:6471 Change-Id: I89a4374a50c54a02e9f20a5ce789eac308aaffeb Reviewed-on: https://webrtc-review.googlesource.com/c/src/+/179523 Reviewed-by: Philip Eliasson <philipel@webrtc.org> Reviewed-by: Sebastian Jansson <srte@webrtc.org> Commit-Queue: Danil Chapovalov <danilchap@webrtc.org> Cr-Commit-Position: refs/heads/master@{#31773}
-rw-r--r--call/rtp_video_sender.cc3
-rw-r--r--modules/rtp_rtcp/source/nack_rtx_unittest.cc4
-rw-r--r--modules/rtp_rtcp/source/rtp_format.cc3
-rw-r--r--modules/rtp_rtcp/source/rtp_format.h4
-rw-r--r--modules/rtp_rtcp/source/rtp_format_vp8_test_helper.h7
-rw-r--r--modules/rtp_rtcp/source/rtp_rtcp_impl2_unittest.cc2
-rw-r--r--modules/rtp_rtcp/source/rtp_rtcp_impl_unittest.cc2
-rw-r--r--modules/rtp_rtcp/source/rtp_sender_unittest.cc29
-rw-r--r--modules/rtp_rtcp/source/rtp_sender_video.cc29
-rw-r--r--modules/rtp_rtcp/source/rtp_sender_video.h18
-rw-r--r--modules/rtp_rtcp/source/rtp_sender_video_frame_transformer_delegate.cc23
-rw-r--r--modules/rtp_rtcp/source/rtp_sender_video_frame_transformer_delegate.h1
-rw-r--r--modules/rtp_rtcp/source/rtp_sender_video_unittest.cc96
13 files changed, 97 insertions, 124 deletions
diff --git a/call/rtp_video_sender.cc b/call/rtp_video_sender.cc
index 3217b3ce20..fb6825e719 100644
--- a/call/rtp_video_sender.cc
+++ b/call/rtp_video_sender.cc
@@ -506,7 +506,7 @@ bool RtpVideoSender::IsActiveLocked() {
EncodedImageCallback::Result RtpVideoSender::OnEncodedImage(
const EncodedImage& encoded_image,
const CodecSpecificInfo* codec_specific_info,
- const RTPFragmentationHeader* fragmentation) {
+ const RTPFragmentationHeader* /*fragmentation*/) {
fec_controller_->UpdateWithEncodedData(encoded_image.size(),
encoded_image._frameType);
MutexLock lock(&mutex_);
@@ -559,7 +559,6 @@ EncodedImageCallback::Result RtpVideoSender::OnEncodedImage(
bool send_result = rtp_streams_[stream_index].sender_video->SendEncodedImage(
rtp_config_.payload_type, codec_type_, rtp_timestamp, encoded_image,
- fragmentation,
params_[stream_index].GetRtpVideoHeader(
encoded_image, codec_specific_info, shared_frame_id_),
expected_retransmission_time_ms);
diff --git a/modules/rtp_rtcp/source/nack_rtx_unittest.cc b/modules/rtp_rtcp/source/nack_rtx_unittest.cc
index c30eb32a44..8afaf3ee61 100644
--- a/modules/rtp_rtcp/source/nack_rtx_unittest.cc
+++ b/modules/rtp_rtcp/source/nack_rtx_unittest.cc
@@ -211,7 +211,7 @@ class RtpRtcpRtxNackTest : public ::testing::Test {
video_header.frame_type = VideoFrameType::kVideoFrameDelta;
EXPECT_TRUE(rtp_sender_video_->SendVideo(
kPayloadType, VideoCodecType::kVideoCodecGeneric, timestamp,
- timestamp / 90, payload_data, nullptr, video_header, 0));
+ timestamp / 90, payload_data, video_header, 0));
// Min required delay until retransmit = 5 + RTT ms (RTT = 0).
fake_clock.AdvanceTimeMilliseconds(5);
int length = BuildNackList(nack_list);
@@ -261,7 +261,7 @@ TEST_F(RtpRtcpRtxNackTest, LongNackList) {
video_header.frame_type = VideoFrameType::kVideoFrameDelta;
EXPECT_TRUE(rtp_sender_video_->SendVideo(
kPayloadType, VideoCodecType::kVideoCodecGeneric, timestamp,
- timestamp / 90, payload_data, nullptr, video_header, 0));
+ timestamp / 90, payload_data, video_header, 0));
// Prepare next frame.
timestamp += 3000;
fake_clock.AdvanceTimeMilliseconds(33);
diff --git a/modules/rtp_rtcp/source/rtp_format.cc b/modules/rtp_rtcp/source/rtp_format.cc
index f6f4a48f04..7703a6bf0f 100644
--- a/modules/rtp_rtcp/source/rtp_format.cc
+++ b/modules/rtp_rtcp/source/rtp_format.cc
@@ -30,8 +30,7 @@ std::unique_ptr<RtpPacketizer> RtpPacketizer::Create(
rtc::ArrayView<const uint8_t> payload,
PayloadSizeLimits limits,
// Codec-specific details.
- const RTPVideoHeader& rtp_video_header,
- const RTPFragmentationHeader* /*fragmentation*/) {
+ const RTPVideoHeader& rtp_video_header) {
if (!type) {
// Use raw packetizer.
return std::make_unique<RtpPacketizerGeneric>(payload, limits);
diff --git a/modules/rtp_rtcp/source/rtp_format.h b/modules/rtp_rtcp/source/rtp_format.h
index dca8285b62..b593f29b1d 100644
--- a/modules/rtp_rtcp/source/rtp_format.h
+++ b/modules/rtp_rtcp/source/rtp_format.h
@@ -18,7 +18,6 @@
#include "absl/types/optional.h"
#include "api/array_view.h"
-#include "modules/include/module_common_types.h"
#include "modules/rtp_rtcp/source/rtp_video_header.h"
namespace webrtc {
@@ -41,8 +40,7 @@ class RtpPacketizer {
rtc::ArrayView<const uint8_t> payload,
PayloadSizeLimits limits,
// Codec-specific details.
- const RTPVideoHeader& rtp_video_header,
- const RTPFragmentationHeader* fragmentation);
+ const RTPVideoHeader& rtp_video_header);
virtual ~RtpPacketizer() = default;
diff --git a/modules/rtp_rtcp/source/rtp_format_vp8_test_helper.h b/modules/rtp_rtcp/source/rtp_format_vp8_test_helper.h
index 03d4e58576..916d6577f1 100644
--- a/modules/rtp_rtcp/source/rtp_format_vp8_test_helper.h
+++ b/modules/rtp_rtcp/source/rtp_format_vp8_test_helper.h
@@ -10,10 +10,9 @@
// This file contains the class RtpFormatVp8TestHelper. The class is
// responsible for setting up a fake VP8 bitstream according to the
-// RTPVideoHeaderVP8 header, and partition information. After initialization,
-// an RTPFragmentationHeader is provided so that the tester can create a
-// packetizer. The packetizer can then be provided to this helper class, which
-// will then extract all packets and compare to the expected outcome.
+// RTPVideoHeaderVP8 header. The packetizer can then be provided to this helper
+// class, which will then extract all packets and compare to the expected
+// outcome.
#ifndef MODULES_RTP_RTCP_SOURCE_RTP_FORMAT_VP8_TEST_HELPER_H_
#define MODULES_RTP_RTCP_SOURCE_RTP_FORMAT_VP8_TEST_HELPER_H_
diff --git a/modules/rtp_rtcp/source/rtp_rtcp_impl2_unittest.cc b/modules/rtp_rtcp/source/rtp_rtcp_impl2_unittest.cc
index 348a9f95e0..81c71aa58e 100644
--- a/modules/rtp_rtcp/source/rtp_rtcp_impl2_unittest.cc
+++ b/modules/rtp_rtcp/source/rtp_rtcp_impl2_unittest.cc
@@ -236,7 +236,7 @@ class RtpRtcpImpl2Test : public ::testing::Test {
const uint8_t payload[100] = {0};
EXPECT_TRUE(module->impl_->OnSendingRtpFrame(0, 0, codec_.plType, true));
EXPECT_TRUE(sender->SendVideo(codec_.plType, VideoCodecType::kVideoCodecVP8,
- 0, 0, payload, nullptr, rtp_video_header, 0));
+ 0, 0, payload, rtp_video_header, 0));
}
void IncomingRtcpNack(const RtpRtcpModule* module, uint16_t sequence_number) {
diff --git a/modules/rtp_rtcp/source/rtp_rtcp_impl_unittest.cc b/modules/rtp_rtcp/source/rtp_rtcp_impl_unittest.cc
index dd7b512ff2..aefa91e23e 100644
--- a/modules/rtp_rtcp/source/rtp_rtcp_impl_unittest.cc
+++ b/modules/rtp_rtcp/source/rtp_rtcp_impl_unittest.cc
@@ -225,7 +225,7 @@ class RtpRtcpImplTest : public ::testing::Test {
const uint8_t payload[100] = {0};
EXPECT_TRUE(module->impl_->OnSendingRtpFrame(0, 0, codec_.plType, true));
EXPECT_TRUE(sender->SendVideo(codec_.plType, VideoCodecType::kVideoCodecVP8,
- 0, 0, payload, nullptr, rtp_video_header, 0));
+ 0, 0, payload, rtp_video_header, 0));
}
void IncomingRtcpNack(const RtpRtcpModule* module, uint16_t sequence_number) {
diff --git a/modules/rtp_rtcp/source/rtp_sender_unittest.cc b/modules/rtp_rtcp/source/rtp_sender_unittest.cc
index 9146eb8fd8..c19fbe8863 100644
--- a/modules/rtp_rtcp/source/rtp_sender_unittest.cc
+++ b/modules/rtp_rtcp/source/rtp_sender_unittest.cc
@@ -769,7 +769,7 @@ TEST_P(RtpSenderTestWithoutPacer, OnSendSideDelayUpdated) {
video_header.frame_type = VideoFrameType::kVideoFrameKey;
EXPECT_TRUE(rtp_sender_video.SendVideo(
kPayloadType, kCodecType, capture_time_ms * kCaptureTimeMsToRtpTimestamp,
- capture_time_ms, kPayloadData, nullptr, video_header,
+ capture_time_ms, kPayloadData, video_header,
kDefaultExpectedRetransmissionTimeMs));
// Send another packet with 20 ms delay. The average, max and total should be
@@ -781,7 +781,7 @@ TEST_P(RtpSenderTestWithoutPacer, OnSendSideDelayUpdated) {
video_header.frame_type = VideoFrameType::kVideoFrameKey;
EXPECT_TRUE(rtp_sender_video.SendVideo(
kPayloadType, kCodecType, capture_time_ms * kCaptureTimeMsToRtpTimestamp,
- capture_time_ms, kPayloadData, nullptr, video_header,
+ capture_time_ms, kPayloadData, video_header,
kDefaultExpectedRetransmissionTimeMs));
// Send another packet at the same time, which replaces the last packet.
@@ -794,7 +794,7 @@ TEST_P(RtpSenderTestWithoutPacer, OnSendSideDelayUpdated) {
video_header.frame_type = VideoFrameType::kVideoFrameKey;
EXPECT_TRUE(rtp_sender_video.SendVideo(
kPayloadType, kCodecType, capture_time_ms * kCaptureTimeMsToRtpTimestamp,
- capture_time_ms, kPayloadData, nullptr, video_header,
+ capture_time_ms, kPayloadData, video_header,
kDefaultExpectedRetransmissionTimeMs));
// Send a packet 1 second later. The earlier packets should have timed
@@ -808,7 +808,7 @@ TEST_P(RtpSenderTestWithoutPacer, OnSendSideDelayUpdated) {
video_header.frame_type = VideoFrameType::kVideoFrameKey;
EXPECT_TRUE(rtp_sender_video.SendVideo(
kPayloadType, kCodecType, capture_time_ms * kCaptureTimeMsToRtpTimestamp,
- capture_time_ms, kPayloadData, nullptr, video_header,
+ capture_time_ms, kPayloadData, video_header,
kDefaultExpectedRetransmissionTimeMs));
}
@@ -1258,7 +1258,7 @@ TEST_P(RtpSenderTestWithoutPacer, SendGenericVideo) {
RTPVideoHeader video_header;
video_header.frame_type = VideoFrameType::kVideoFrameKey;
ASSERT_TRUE(rtp_sender_video.SendVideo(kPayloadType, kCodecType, 1234, 4321,
- payload, nullptr, video_header,
+ payload, video_header,
kDefaultExpectedRetransmissionTimeMs));
auto sent_payload = transport_.last_sent_packet().payload();
@@ -1274,7 +1274,7 @@ TEST_P(RtpSenderTestWithoutPacer, SendGenericVideo) {
video_header.frame_type = VideoFrameType::kVideoFrameDelta;
ASSERT_TRUE(rtp_sender_video.SendVideo(kPayloadType, kCodecType, 1234, 4321,
- payload, nullptr, video_header,
+ payload, video_header,
kDefaultExpectedRetransmissionTimeMs));
sent_payload = transport_.last_sent_packet().payload();
@@ -1299,7 +1299,7 @@ TEST_P(RtpSenderTestWithoutPacer, SendRawVideo) {
RTPVideoHeader video_header;
video_header.frame_type = VideoFrameType::kVideoFrameKey;
ASSERT_TRUE(rtp_sender_video.SendVideo(kPayloadType, absl::nullopt, 1234,
- 4321, payload, nullptr, video_header,
+ 4321, payload, video_header,
kDefaultExpectedRetransmissionTimeMs));
auto sent_payload = transport_.last_sent_packet().payload();
@@ -1391,8 +1391,7 @@ TEST_P(RtpSenderTest, SendFlexfecPackets) {
video_header.frame_type = VideoFrameType::kVideoFrameKey;
EXPECT_TRUE(rtp_sender_video.SendVideo(
kMediaPayloadType, kCodecType, kTimestamp, clock_->TimeInMilliseconds(),
- kPayloadData, nullptr, video_header,
- kDefaultExpectedRetransmissionTimeMs));
+ kPayloadData, video_header, kDefaultExpectedRetransmissionTimeMs));
ASSERT_TRUE(media_packet != nullptr);
ASSERT_TRUE(fec_packet != nullptr);
@@ -1467,8 +1466,7 @@ TEST_P(RtpSenderTestWithoutPacer, SendFlexfecPackets) {
video_header.frame_type = VideoFrameType::kVideoFrameKey;
EXPECT_TRUE(rtp_sender_video.SendVideo(
kMediaPayloadType, kCodecType, kTimestamp, clock_->TimeInMilliseconds(),
- kPayloadData, nullptr, video_header,
- kDefaultExpectedRetransmissionTimeMs));
+ kPayloadData, video_header, kDefaultExpectedRetransmissionTimeMs));
ASSERT_EQ(2, transport_.packets_sent());
const RtpPacketReceived& media_packet = transport_.sent_packets_[0];
@@ -1797,8 +1795,7 @@ TEST_P(RtpSenderTest, FecOverheadRate) {
video_header.frame_type = VideoFrameType::kVideoFrameKey;
EXPECT_TRUE(rtp_sender_video.SendVideo(
kMediaPayloadType, kCodecType, kTimestamp, clock_->TimeInMilliseconds(),
- kPayloadData, nullptr, video_header,
- kDefaultExpectedRetransmissionTimeMs));
+ kPayloadData, video_header, kDefaultExpectedRetransmissionTimeMs));
time_controller_.AdvanceTime(TimeDelta::Millis(kTimeBetweenPacketsMs));
}
@@ -1888,7 +1885,7 @@ TEST_P(RtpSenderTest, BitrateCallbacks) {
for (uint32_t i = 0; i < kNumPackets; ++i) {
video_header.frame_type = VideoFrameType::kVideoFrameKey;
ASSERT_TRUE(rtp_sender_video.SendVideo(
- kPayloadType, kCodecType, 1234, 4321, payload, nullptr, video_header,
+ kPayloadType, kCodecType, 1234, 4321, payload, video_header,
kDefaultExpectedRetransmissionTimeMs));
time_controller_.AdvanceTime(TimeDelta::Millis(kPacketInterval));
}
@@ -1925,7 +1922,7 @@ TEST_P(RtpSenderTestWithoutPacer, StreamDataCountersCallbacks) {
RTPVideoHeader video_header;
video_header.frame_type = VideoFrameType::kVideoFrameKey;
ASSERT_TRUE(rtp_sender_video.SendVideo(kPayloadType, kCodecType, 1234, 4321,
- payload, nullptr, video_header,
+ payload, video_header,
kDefaultExpectedRetransmissionTimeMs));
StreamDataCounters expected;
expected.transmitted.payload_bytes = 6;
@@ -1999,7 +1996,7 @@ TEST_P(RtpSenderTestWithoutPacer, StreamDataCountersCallbacksUlpfec) {
}
video_header.frame_type = VideoFrameType::kVideoFrameDelta;
ASSERT_TRUE(rtp_sender_video.SendVideo(kPayloadType, kCodecType, 1234, 4321,
- payload, nullptr, video_header,
+ payload, video_header,
kDefaultExpectedRetransmissionTimeMs));
expected.transmitted.payload_bytes = 28;
expected.transmitted.header_bytes = 24;
diff --git a/modules/rtp_rtcp/source/rtp_sender_video.cc b/modules/rtp_rtcp/source/rtp_sender_video.cc
index 3816a51b57..0f3e8b9966 100644
--- a/modules/rtp_rtcp/source/rtp_sender_video.cc
+++ b/modules/rtp_rtcp/source/rtp_sender_video.cc
@@ -183,7 +183,8 @@ void RTPSenderVideo::LogAndSendToNetwork(
packetized_payload_size += packet->payload_size();
}
}
- // AV1 packetizer may produce less packetized bytes than unpacketized.
+ // AV1 and H264 packetizers may produce less packetized bytes than
+ // unpacketized.
if (packetized_payload_size >= unpacketized_payload_size) {
packetization_overhead_bitrate_.Update(
packetized_payload_size - unpacketized_payload_size,
@@ -392,7 +393,6 @@ bool RTPSenderVideo::SendVideo(
uint32_t rtp_timestamp,
int64_t capture_time_ms,
rtc::ArrayView<const uint8_t> payload,
- const RTPFragmentationHeader* fragmentation,
RTPVideoHeader video_header,
absl::optional<int64_t> expected_retransmission_time_ms) {
#if RTC_TRACE_EVENTS_ENABLED
@@ -528,8 +528,8 @@ bool RTPSenderVideo::SendVideo(
"one is required since require_frame_encryptor is set";
}
- std::unique_ptr<RtpPacketizer> packetizer = RtpPacketizer::Create(
- codec_type, payload, limits, video_header, fragmentation);
+ std::unique_ptr<RtpPacketizer> packetizer =
+ RtpPacketizer::Create(codec_type, payload, limits, video_header);
// TODO(bugs.webrtc.org/10714): retransmission_settings_ should generally be
// replaced by expected_retransmission_time_ms.has_value(). For now, though,
@@ -541,16 +541,6 @@ bool RTPSenderVideo::SendVideo(
: false;
const size_t num_packets = packetizer->NumPackets();
- size_t unpacketized_payload_size;
- if (fragmentation && fragmentation->fragmentationVectorSize > 0) {
- unpacketized_payload_size = 0;
- for (uint16_t i = 0; i < fragmentation->fragmentationVectorSize; ++i) {
- unpacketized_payload_size += fragmentation->fragmentationLength[i];
- }
- } else {
- unpacketized_payload_size = payload.size();
- }
-
if (num_packets == 0)
return false;
@@ -643,7 +633,7 @@ bool RTPSenderVideo::SendVideo(
}
}
- LogAndSendToNetwork(std::move(rtp_packets), unpacketized_payload_size);
+ LogAndSendToNetwork(std::move(rtp_packets), payload.size());
// Update details about the last sent frame.
last_rotation_ = video_header.rotation;
@@ -678,18 +668,17 @@ bool RTPSenderVideo::SendEncodedImage(
absl::optional<VideoCodecType> codec_type,
uint32_t rtp_timestamp,
const EncodedImage& encoded_image,
- const RTPFragmentationHeader* fragmentation,
RTPVideoHeader video_header,
absl::optional<int64_t> expected_retransmission_time_ms) {
if (frame_transformer_delegate_) {
// The frame will be sent async once transformed.
return frame_transformer_delegate_->TransformFrame(
- payload_type, codec_type, rtp_timestamp, encoded_image, fragmentation,
- video_header, expected_retransmission_time_ms);
+ payload_type, codec_type, rtp_timestamp, encoded_image, video_header,
+ expected_retransmission_time_ms);
}
return SendVideo(payload_type, codec_type, rtp_timestamp,
- encoded_image.capture_time_ms_, encoded_image, fragmentation,
- video_header, expected_retransmission_time_ms);
+ encoded_image.capture_time_ms_, encoded_image, video_header,
+ expected_retransmission_time_ms);
}
uint32_t RTPSenderVideo::VideoBitrateSent() const {
diff --git a/modules/rtp_rtcp/source/rtp_sender_video.h b/modules/rtp_rtcp/source/rtp_sender_video.h
index ce7a6aa89e..57f8fcc7ac 100644
--- a/modules/rtp_rtcp/source/rtp_sender_video.h
+++ b/modules/rtp_rtcp/source/rtp_sender_video.h
@@ -24,7 +24,6 @@
#include "api/transport/rtp/dependency_descriptor.h"
#include "api/video/video_codec_type.h"
#include "api/video/video_frame_type.h"
-#include "modules/include/module_common_types.h"
#include "modules/rtp_rtcp/include/rtp_rtcp_defines.h"
#include "modules/rtp_rtcp/source/absolute_capture_time_sender.h"
#include "modules/rtp_rtcp/source/active_decode_targets_helper.h"
@@ -33,6 +32,7 @@
#include "modules/rtp_rtcp/source/rtp_sender_video_frame_transformer_delegate.h"
#include "modules/rtp_rtcp/source/rtp_video_header.h"
#include "modules/rtp_rtcp/source/video_fec_generator.h"
+#include "rtc_base/deprecation.h"
#include "rtc_base/one_time_event.h"
#include "rtc_base/race_checker.h"
#include "rtc_base/rate_statistics.h"
@@ -42,6 +42,7 @@
namespace webrtc {
+class RTPFragmentationHeader;
class FrameEncryptorInterface;
class RtpPacketizer;
class RtpPacketToSend;
@@ -90,6 +91,19 @@ class RTPSenderVideo {
virtual ~RTPSenderVideo();
+ RTC_DEPRECATED
+ bool SendVideo(int payload_type,
+ absl::optional<VideoCodecType> codec_type,
+ uint32_t rtp_timestamp,
+ int64_t capture_time_ms,
+ rtc::ArrayView<const uint8_t> payload,
+ const RTPFragmentationHeader* /*fragmentation*/,
+ RTPVideoHeader video_header,
+ absl::optional<int64_t> expected_retransmission_time_ms) {
+ return SendVideo(payload_type, codec_type, rtp_timestamp, capture_time_ms,
+ payload, video_header, expected_retransmission_time_ms);
+ }
+
// expected_retransmission_time_ms.has_value() -> retransmission allowed.
// Calls to this method is assumed to be externally serialized.
bool SendVideo(int payload_type,
@@ -97,7 +111,6 @@ class RTPSenderVideo {
uint32_t rtp_timestamp,
int64_t capture_time_ms,
rtc::ArrayView<const uint8_t> payload,
- const RTPFragmentationHeader* fragmentation,
RTPVideoHeader video_header,
absl::optional<int64_t> expected_retransmission_time_ms);
@@ -106,7 +119,6 @@ class RTPSenderVideo {
absl::optional<VideoCodecType> codec_type,
uint32_t rtp_timestamp,
const EncodedImage& encoded_image,
- const RTPFragmentationHeader* fragmentation,
RTPVideoHeader video_header,
absl::optional<int64_t> expected_retransmission_time_ms);
diff --git a/modules/rtp_rtcp/source/rtp_sender_video_frame_transformer_delegate.cc b/modules/rtp_rtcp/source/rtp_sender_video_frame_transformer_delegate.cc
index f902c23502..786e46777a 100644
--- a/modules/rtp_rtcp/source/rtp_sender_video_frame_transformer_delegate.cc
+++ b/modules/rtp_rtcp/source/rtp_sender_video_frame_transformer_delegate.cc
@@ -21,15 +21,6 @@
namespace webrtc {
namespace {
-std::unique_ptr<RTPFragmentationHeader> CreateFragmentationHeader(
- const RTPFragmentationHeader* fragmentation_header) {
- if (!fragmentation_header)
- return nullptr;
- auto ret = std::make_unique<RTPFragmentationHeader>();
- ret->CopyFrom(*fragmentation_header);
- return ret;
-}
-
class TransformableVideoSenderFrame : public TransformableVideoFrameInterface {
public:
TransformableVideoSenderFrame(
@@ -38,7 +29,6 @@ class TransformableVideoSenderFrame : public TransformableVideoFrameInterface {
int payload_type,
absl::optional<VideoCodecType> codec_type,
uint32_t rtp_timestamp,
- const RTPFragmentationHeader* fragmentation_header,
absl::optional<int64_t> expected_retransmission_time_ms,
uint32_t ssrc)
: encoded_data_(encoded_image.GetEncodedData()),
@@ -50,9 +40,7 @@ class TransformableVideoSenderFrame : public TransformableVideoFrameInterface {
timestamp_(rtp_timestamp),
capture_time_ms_(encoded_image.capture_time_ms_),
expected_retransmission_time_ms_(expected_retransmission_time_ms),
- ssrc_(ssrc),
- fragmentation_header_(CreateFragmentationHeader(fragmentation_header)) {
- }
+ ssrc_(ssrc) {}
~TransformableVideoSenderFrame() override = default;
@@ -83,10 +71,6 @@ class TransformableVideoSenderFrame : public TransformableVideoFrameInterface {
absl::optional<VideoCodecType> GetCodecType() const { return codec_type_; }
int64_t GetCaptureTimeMs() const { return capture_time_ms_; }
- RTPFragmentationHeader* GetFragmentationHeader() const {
- return fragmentation_header_.get();
- }
-
const absl::optional<int64_t>& GetExpectedRetransmissionTimeMs() const {
return expected_retransmission_time_ms_;
}
@@ -102,7 +86,6 @@ class TransformableVideoSenderFrame : public TransformableVideoFrameInterface {
const int64_t capture_time_ms_;
const absl::optional<int64_t> expected_retransmission_time_ms_;
const uint32_t ssrc_;
- const std::unique_ptr<RTPFragmentationHeader> fragmentation_header_;
};
} // namespace
@@ -126,7 +109,6 @@ bool RTPSenderVideoFrameTransformerDelegate::TransformFrame(
absl::optional<VideoCodecType> codec_type,
uint32_t rtp_timestamp,
const EncodedImage& encoded_image,
- const RTPFragmentationHeader* fragmentation,
RTPVideoHeader video_header,
absl::optional<int64_t> expected_retransmission_time_ms) {
if (!encoder_queue_) {
@@ -139,7 +121,7 @@ bool RTPSenderVideoFrameTransformerDelegate::TransformFrame(
}
frame_transformer_->Transform(std::make_unique<TransformableVideoSenderFrame>(
encoded_image, video_header, payload_type, codec_type, rtp_timestamp,
- fragmentation, expected_retransmission_time_ms, ssrc_));
+ expected_retransmission_time_ms, ssrc_));
return true;
}
@@ -172,7 +154,6 @@ void RTPSenderVideoFrameTransformerDelegate::SendVideo(
transformed_video_frame->GetTimestamp(),
transformed_video_frame->GetCaptureTimeMs(),
transformed_video_frame->GetData(),
- transformed_video_frame->GetFragmentationHeader(),
transformed_video_frame->GetHeader(),
transformed_video_frame->GetExpectedRetransmissionTimeMs());
}
diff --git a/modules/rtp_rtcp/source/rtp_sender_video_frame_transformer_delegate.h b/modules/rtp_rtcp/source/rtp_sender_video_frame_transformer_delegate.h
index 945b86927c..a14ce3a81e 100644
--- a/modules/rtp_rtcp/source/rtp_sender_video_frame_transformer_delegate.h
+++ b/modules/rtp_rtcp/source/rtp_sender_video_frame_transformer_delegate.h
@@ -40,7 +40,6 @@ class RTPSenderVideoFrameTransformerDelegate : public TransformedFrameCallback {
absl::optional<VideoCodecType> codec_type,
uint32_t rtp_timestamp,
const EncodedImage& encoded_image,
- const RTPFragmentationHeader* fragmentation,
RTPVideoHeader video_header,
absl::optional<int64_t> expected_retransmission_time_ms);
diff --git a/modules/rtp_rtcp/source/rtp_sender_video_unittest.cc b/modules/rtp_rtcp/source/rtp_sender_video_unittest.cc
index c53725e339..6a049ceb7a 100644
--- a/modules/rtp_rtcp/source/rtp_sender_video_unittest.cc
+++ b/modules/rtp_rtcp/source/rtp_sender_video_unittest.cc
@@ -204,8 +204,8 @@ TEST_P(RtpSenderVideoTest, KeyFrameHasCVO) {
RTPVideoHeader hdr;
hdr.rotation = kVideoRotation_0;
hdr.frame_type = VideoFrameType::kVideoFrameKey;
- rtp_sender_video_.SendVideo(kPayload, kType, kTimestamp, 0, kFrame, nullptr,
- hdr, kDefaultExpectedRetransmissionTimeMs);
+ rtp_sender_video_.SendVideo(kPayload, kType, kTimestamp, 0, kFrame, hdr,
+ kDefaultExpectedRetransmissionTimeMs);
VideoRotation rotation;
EXPECT_TRUE(
@@ -231,7 +231,7 @@ TEST_P(RtpSenderVideoTest, TimingFrameHasPacketizationTimstampSet) {
fake_clock_.AdvanceTimeMilliseconds(kPacketizationTimeMs);
hdr.frame_type = VideoFrameType::kVideoFrameKey;
rtp_sender_video_.SendVideo(kPayload, kType, kTimestamp, kCaptureTimestamp,
- kFrame, nullptr, hdr,
+ kFrame, hdr,
kDefaultExpectedRetransmissionTimeMs);
VideoSendTiming timing;
EXPECT_TRUE(transport_.last_sent_packet().GetExtension<VideoTimingExtension>(
@@ -249,15 +249,15 @@ TEST_P(RtpSenderVideoTest, DeltaFrameHasCVOWhenChanged) {
RTPVideoHeader hdr;
hdr.rotation = kVideoRotation_90;
hdr.frame_type = VideoFrameType::kVideoFrameKey;
- EXPECT_TRUE(rtp_sender_video_.SendVideo(
- kPayload, kType, kTimestamp, 0, kFrame, nullptr, hdr,
- kDefaultExpectedRetransmissionTimeMs));
+ EXPECT_TRUE(
+ rtp_sender_video_.SendVideo(kPayload, kType, kTimestamp, 0, kFrame, hdr,
+ kDefaultExpectedRetransmissionTimeMs));
hdr.rotation = kVideoRotation_0;
hdr.frame_type = VideoFrameType::kVideoFrameDelta;
- EXPECT_TRUE(rtp_sender_video_.SendVideo(
- kPayload, kType, kTimestamp + 1, 0, kFrame, nullptr, hdr,
- kDefaultExpectedRetransmissionTimeMs));
+ EXPECT_TRUE(
+ rtp_sender_video_.SendVideo(kPayload, kType, kTimestamp + 1, 0, kFrame,
+ hdr, kDefaultExpectedRetransmissionTimeMs));
VideoRotation rotation;
EXPECT_TRUE(
@@ -273,14 +273,14 @@ TEST_P(RtpSenderVideoTest, DeltaFrameHasCVOWhenNonZero) {
RTPVideoHeader hdr;
hdr.rotation = kVideoRotation_90;
hdr.frame_type = VideoFrameType::kVideoFrameKey;
- EXPECT_TRUE(rtp_sender_video_.SendVideo(
- kPayload, kType, kTimestamp, 0, kFrame, nullptr, hdr,
- kDefaultExpectedRetransmissionTimeMs));
+ EXPECT_TRUE(
+ rtp_sender_video_.SendVideo(kPayload, kType, kTimestamp, 0, kFrame, hdr,
+ kDefaultExpectedRetransmissionTimeMs));
hdr.frame_type = VideoFrameType::kVideoFrameDelta;
- EXPECT_TRUE(rtp_sender_video_.SendVideo(
- kPayload, kType, kTimestamp + 1, 0, kFrame, nullptr, hdr,
- kDefaultExpectedRetransmissionTimeMs));
+ EXPECT_TRUE(
+ rtp_sender_video_.SendVideo(kPayload, kType, kTimestamp + 1, 0, kFrame,
+ hdr, kDefaultExpectedRetransmissionTimeMs));
VideoRotation rotation;
EXPECT_TRUE(
@@ -514,8 +514,8 @@ TEST_P(RtpSenderVideoTest, SendsDependencyDescriptorWhenVideoStructureIsSet) {
generic.decode_target_indications = {DecodeTargetIndication::kSwitch,
DecodeTargetIndication::kSwitch};
hdr.frame_type = VideoFrameType::kVideoFrameKey;
- rtp_sender_video_.SendVideo(kPayload, kType, kTimestamp, 0, kFrame, nullptr,
- hdr, kDefaultExpectedRetransmissionTimeMs);
+ rtp_sender_video_.SendVideo(kPayload, kType, kTimestamp, 0, kFrame, hdr,
+ kDefaultExpectedRetransmissionTimeMs);
ASSERT_EQ(transport_.packets_sent(), 1);
DependencyDescriptor descriptor_key;
@@ -540,8 +540,8 @@ TEST_P(RtpSenderVideoTest, SendsDependencyDescriptorWhenVideoStructureIsSet) {
generic.decode_target_indications = {DecodeTargetIndication::kNotPresent,
DecodeTargetIndication::kRequired};
hdr.frame_type = VideoFrameType::kVideoFrameDelta;
- rtp_sender_video_.SendVideo(kPayload, kType, kTimestamp, 0, kFrame, nullptr,
- hdr, kDefaultExpectedRetransmissionTimeMs);
+ rtp_sender_video_.SendVideo(kPayload, kType, kTimestamp, 0, kFrame, hdr,
+ kDefaultExpectedRetransmissionTimeMs);
EXPECT_EQ(transport_.packets_sent(), 2);
DependencyDescriptor descriptor_delta;
@@ -580,8 +580,8 @@ TEST_P(RtpSenderVideoTest, PropagatesChainDiffsIntoDependencyDescriptor) {
DecodeTargetIndication::kSwitch};
generic.chain_diffs = {2};
hdr.frame_type = VideoFrameType::kVideoFrameKey;
- rtp_sender_video_.SendVideo(kPayload, kType, kTimestamp, 0, kFrame, nullptr,
- hdr, kDefaultExpectedRetransmissionTimeMs);
+ rtp_sender_video_.SendVideo(kPayload, kType, kTimestamp, 0, kFrame, hdr,
+ kDefaultExpectedRetransmissionTimeMs);
ASSERT_EQ(transport_.packets_sent(), 1);
DependencyDescriptor descriptor_key;
@@ -615,8 +615,8 @@ TEST_P(RtpSenderVideoTest,
generic.active_decode_targets = 0b01;
generic.chain_diffs = {1};
hdr.frame_type = VideoFrameType::kVideoFrameKey;
- rtp_sender_video_.SendVideo(kPayload, kType, kTimestamp, 0, kFrame, nullptr,
- hdr, kDefaultExpectedRetransmissionTimeMs);
+ rtp_sender_video_.SendVideo(kPayload, kType, kTimestamp, 0, kFrame, hdr,
+ kDefaultExpectedRetransmissionTimeMs);
ASSERT_EQ(transport_.packets_sent(), 1);
DependencyDescriptor descriptor_key;
@@ -653,8 +653,8 @@ TEST_P(RtpSenderVideoTest,
DecodeTargetIndication::kSwitch};
hdr.frame_type = VideoFrameType::kVideoFrameKey;
rtp_sender_video_.SetVideoStructure(&video_structure1);
- rtp_sender_video_.SendVideo(kPayload, kType, kTimestamp, 0, kFrame, nullptr,
- hdr, kDefaultExpectedRetransmissionTimeMs);
+ rtp_sender_video_.SendVideo(kPayload, kType, kTimestamp, 0, kFrame, hdr,
+ kDefaultExpectedRetransmissionTimeMs);
// Parse 1st extension.
ASSERT_EQ(transport_.packets_sent(), 1);
DependencyDescriptor descriptor_key1;
@@ -669,8 +669,8 @@ TEST_P(RtpSenderVideoTest,
generic.decode_target_indications = {DecodeTargetIndication::kDiscardable,
DecodeTargetIndication::kNotPresent};
hdr.frame_type = VideoFrameType::kVideoFrameDelta;
- rtp_sender_video_.SendVideo(kPayload, kType, kTimestamp, 0, kFrame, nullptr,
- hdr, kDefaultExpectedRetransmissionTimeMs);
+ rtp_sender_video_.SendVideo(kPayload, kType, kTimestamp, 0, kFrame, hdr,
+ kDefaultExpectedRetransmissionTimeMs);
ASSERT_EQ(transport_.packets_sent(), 2);
RtpPacket delta_packet = transport_.last_sent_packet();
@@ -681,8 +681,8 @@ TEST_P(RtpSenderVideoTest,
DecodeTargetIndication::kSwitch};
hdr.frame_type = VideoFrameType::kVideoFrameKey;
rtp_sender_video_.SetVideoStructure(&video_structure2);
- rtp_sender_video_.SendVideo(kPayload, kType, kTimestamp, 0, kFrame, nullptr,
- hdr, kDefaultExpectedRetransmissionTimeMs);
+ rtp_sender_video_.SendVideo(kPayload, kType, kTimestamp, 0, kFrame, hdr,
+ kDefaultExpectedRetransmissionTimeMs);
// Parse the 2nd key frame.
ASSERT_EQ(transport_.packets_sent(), 3);
DependencyDescriptor descriptor_key2;
@@ -736,8 +736,8 @@ TEST_P(RtpSenderVideoTest,
EXPECT_CALL(*encryptor,
Encrypt(_, _, Not(IsEmpty()), ElementsAreArray(kFrame), _, _));
- rtp_sender_video.SendVideo(kPayload, kType, kTimestamp, 0, kFrame, nullptr,
- hdr, kDefaultExpectedRetransmissionTimeMs);
+ rtp_sender_video.SendVideo(kPayload, kType, kTimestamp, 0, kFrame, hdr,
+ kDefaultExpectedRetransmissionTimeMs);
// Double check packet with the dependency descriptor is sent.
ASSERT_EQ(transport_.packets_sent(), 1);
EXPECT_TRUE(transport_.last_sent_packet()
@@ -758,8 +758,8 @@ TEST_P(RtpSenderVideoTest, PopulateGenericFrameDescriptor) {
generic.dependencies.push_back(kFrameId - 1);
generic.dependencies.push_back(kFrameId - 500);
hdr.frame_type = VideoFrameType::kVideoFrameDelta;
- rtp_sender_video_.SendVideo(kPayload, kType, kTimestamp, 0, kFrame, nullptr,
- hdr, kDefaultExpectedRetransmissionTimeMs);
+ rtp_sender_video_.SendVideo(kPayload, kType, kTimestamp, 0, kFrame, hdr,
+ kDefaultExpectedRetransmissionTimeMs);
RtpGenericFrameDescriptor descriptor_wire;
EXPECT_EQ(1, transport_.packets_sent());
@@ -793,7 +793,7 @@ void RtpSenderVideoTest::
generic.frame_id = kFrameId;
hdr.frame_type = VideoFrameType::kVideoFrameDelta;
rtp_sender_video_.SendVideo(kPayload, VideoCodecType::kVideoCodecVP8,
- kTimestamp, 0, kFrame, nullptr, hdr,
+ kTimestamp, 0, kFrame, hdr,
kDefaultExpectedRetransmissionTimeMs);
ASSERT_EQ(transport_.packets_sent(), 1);
@@ -820,7 +820,7 @@ TEST_P(RtpSenderVideoTest, AbsoluteCaptureTime) {
RTPVideoHeader hdr;
hdr.frame_type = VideoFrameType::kVideoFrameKey;
rtp_sender_video_.SendVideo(kPayload, kType, kTimestamp,
- kAbsoluteCaptureTimestampMs, kFrame, nullptr, hdr,
+ kAbsoluteCaptureTimestampMs, kFrame, hdr,
kDefaultExpectedRetransmissionTimeMs);
// It is expected that one and only one of the packets sent on this video
@@ -853,8 +853,8 @@ TEST_P(RtpSenderVideoTest, PopulatesPlayoutDelay) {
auto& vp8_header = hdr.video_type_header.emplace<RTPVideoHeaderVP8>();
vp8_header.temporalIdx = 0;
- rtp_sender_video_.SendVideo(kPayload, kType, kTimestamp, 0, kFrame, nullptr,
- hdr, kDefaultExpectedRetransmissionTimeMs);
+ rtp_sender_video_.SendVideo(kPayload, kType, kTimestamp, 0, kFrame, hdr,
+ kDefaultExpectedRetransmissionTimeMs);
EXPECT_FALSE(
transport_.last_sent_packet().HasExtension<PlayoutDelayLimits>());
@@ -862,8 +862,8 @@ TEST_P(RtpSenderVideoTest, PopulatesPlayoutDelay) {
hdr.playout_delay = kExpectedDelay;
hdr.frame_type = VideoFrameType::kVideoFrameDelta;
vp8_header.temporalIdx = 1;
- rtp_sender_video_.SendVideo(kPayload, kType, kTimestamp, 0, kFrame, nullptr,
- hdr, kDefaultExpectedRetransmissionTimeMs);
+ rtp_sender_video_.SendVideo(kPayload, kType, kTimestamp, 0, kFrame, hdr,
+ kDefaultExpectedRetransmissionTimeMs);
PlayoutDelay received_delay = PlayoutDelay::Noop();
ASSERT_TRUE(transport_.last_sent_packet().GetExtension<PlayoutDelayLimits>(
&received_delay));
@@ -873,23 +873,23 @@ TEST_P(RtpSenderVideoTest, PopulatesPlayoutDelay) {
// be populated since dilvery wasn't guaranteed on the last one.
hdr.playout_delay = PlayoutDelay::Noop(); // Inidcates "no change".
vp8_header.temporalIdx = 0;
- rtp_sender_video_.SendVideo(kPayload, kType, kTimestamp, 0, kFrame, nullptr,
- hdr, kDefaultExpectedRetransmissionTimeMs);
+ rtp_sender_video_.SendVideo(kPayload, kType, kTimestamp, 0, kFrame, hdr,
+ kDefaultExpectedRetransmissionTimeMs);
ASSERT_TRUE(transport_.last_sent_packet().GetExtension<PlayoutDelayLimits>(
&received_delay));
EXPECT_EQ(received_delay, kExpectedDelay);
// The next frame does not need the extensions since it's delivery has
// already been guaranteed.
- rtp_sender_video_.SendVideo(kPayload, kType, kTimestamp, 0, kFrame, nullptr,
- hdr, kDefaultExpectedRetransmissionTimeMs);
+ rtp_sender_video_.SendVideo(kPayload, kType, kTimestamp, 0, kFrame, hdr,
+ kDefaultExpectedRetransmissionTimeMs);
EXPECT_FALSE(
transport_.last_sent_packet().HasExtension<PlayoutDelayLimits>());
// Insert key-frame, we need to refresh the state here.
hdr.frame_type = VideoFrameType::kVideoFrameKey;
- rtp_sender_video_.SendVideo(kPayload, kType, kTimestamp, 0, kFrame, nullptr,
- hdr, kDefaultExpectedRetransmissionTimeMs);
+ rtp_sender_video_.SendVideo(kPayload, kType, kTimestamp, 0, kFrame, hdr,
+ kDefaultExpectedRetransmissionTimeMs);
ASSERT_TRUE(transport_.last_sent_packet().GetExtension<PlayoutDelayLimits>(
&received_delay));
EXPECT_EQ(received_delay, kExpectedDelay);
@@ -975,7 +975,7 @@ TEST_F(RtpSenderVideoWithFrameTransformerTest,
EXPECT_CALL(*mock_frame_transformer, Transform);
rtp_sender_video->SendEncodedImage(kPayload, kType, kTimestamp,
- *encoded_image, nullptr, video_header,
+ *encoded_image, video_header,
kDefaultExpectedRetransmissionTimeMs);
}
@@ -1001,7 +1001,7 @@ TEST_F(RtpSenderVideoWithFrameTransformerTest, OnTransformedFrameSendsVideo) {
encoder_queue.SendTask(
[&] {
rtp_sender_video->SendEncodedImage(
- kPayload, kType, kTimestamp, *encoded_image, nullptr, video_header,
+ kPayload, kType, kTimestamp, *encoded_image, video_header,
kDefaultExpectedRetransmissionTimeMs);
},
RTC_FROM_HERE);
@@ -1047,7 +1047,7 @@ TEST_F(RtpSenderVideoWithFrameTransformerTest,
ElementsAre(DecodeTargetIndication::kSwitch));
});
rtp_sender_video->SendEncodedImage(kPayload, kType, kTimestamp,
- *encoded_image, nullptr, video_header,
+ *encoded_image, video_header,
kDefaultExpectedRetransmissionTimeMs);
}