aboutsummaryrefslogtreecommitdiff
path: root/test/fuzzers
diff options
context:
space:
mode:
Diffstat (limited to 'test/fuzzers')
-rw-r--r--test/fuzzers/BUILD.gn36
-rw-r--r--test/fuzzers/congestion_controller_feedback_fuzzer.cc6
-rw-r--r--test/fuzzers/dcsctp_socket_fuzzer.cc28
-rw-r--r--test/fuzzers/rtp_frame_reference_finder_fuzzer.cc9
-rw-r--r--test/fuzzers/rtp_header_parser_fuzzer.cc21
-rw-r--r--test/fuzzers/rtp_packet_fuzzer.cc9
-rw-r--r--test/fuzzers/sdp_integration_fuzzer.cc8
-rw-r--r--test/fuzzers/utils/BUILD.gn1
-rw-r--r--test/fuzzers/utils/rtp_replayer.cc37
-rw-r--r--test/fuzzers/vp9_encoder_references_fuzzer.cc498
10 files changed, 597 insertions, 56 deletions
diff --git a/test/fuzzers/BUILD.gn b/test/fuzzers/BUILD.gn
index 9395c7a8f1..23ad728dba 100644
--- a/test/fuzzers/BUILD.gn
+++ b/test/fuzzers/BUILD.gn
@@ -245,6 +245,7 @@ webrtc_fuzzer_test("congestion_controller_feedback_fuzzer") {
"../../modules/remote_bitrate_estimator",
"../../modules/rtp_rtcp:rtp_rtcp_format",
]
+ absl_deps = [ "//third_party/abseil-cpp/absl/functional:bind_front" ]
}
rtc_library("audio_decoder_fuzzer") {
@@ -604,6 +605,17 @@ webrtc_fuzzer_test("sctp_utils_fuzzer") {
]
}
+webrtc_fuzzer_test("dcsctp_socket_fuzzer") {
+ sources = [ "dcsctp_socket_fuzzer.cc" ]
+ deps = [
+ "../../net/dcsctp/fuzzers:dcsctp_fuzzers",
+ "../../net/dcsctp/public:socket",
+ "../../net/dcsctp/public:types",
+ "../../net/dcsctp/socket:dcsctp_socket",
+ "../../rtc_base:rtc_base_approved",
+ ]
+}
+
webrtc_fuzzer_test("rtp_header_parser_fuzzer") {
sources = [ "rtp_header_parser_fuzzer.cc" ]
deps = [ "../:rtp_test_utils" ]
@@ -623,6 +635,30 @@ webrtc_fuzzer_test("vp8_replay_fuzzer") {
seed_corpus = "corpora/rtpdump-corpus/vp8"
}
+if (rtc_build_libvpx) {
+ webrtc_fuzzer_test("vp9_encoder_references_fuzzer") {
+ sources = [ "vp9_encoder_references_fuzzer.cc" ]
+ deps = [
+ "..:test_support",
+ "../../api:array_view",
+ "../../api/transport:webrtc_key_value_config",
+ "../../api/video:video_frame",
+ "../../api/video_codecs:video_codecs_api",
+ "../../modules/video_coding:frame_dependencies_calculator",
+ "../../modules/video_coding:mock_libvpx_interface",
+ "../../modules/video_coding:webrtc_vp9",
+ "../../rtc_base:safe_compare",
+ rtc_libvpx_dir,
+ ]
+ absl_deps = [
+ "//third_party/abseil-cpp/absl/algorithm:container",
+ "//third_party/abseil-cpp/absl/base:core_headers",
+ "//third_party/abseil-cpp/absl/container:inlined_vector",
+ ]
+ defines = [ "RTC_ENABLE_VP9" ]
+ }
+}
+
webrtc_fuzzer_test("vp9_replay_fuzzer") {
sources = [ "vp9_replay_fuzzer.cc" ]
deps = [
diff --git a/test/fuzzers/congestion_controller_feedback_fuzzer.cc b/test/fuzzers/congestion_controller_feedback_fuzzer.cc
index 084c8c300a..06a73b0434 100644
--- a/test/fuzzers/congestion_controller_feedback_fuzzer.cc
+++ b/test/fuzzers/congestion_controller_feedback_fuzzer.cc
@@ -8,6 +8,7 @@
* be found in the AUTHORS file in the root of the source tree.
*/
+#include "absl/functional/bind_front.h"
#include "modules/congestion_controller/include/receive_side_congestion_controller.h"
#include "modules/pacing/packet_router.h"
#include "modules/remote_bitrate_estimator/include/remote_bitrate_estimator.h"
@@ -21,7 +22,10 @@ void FuzzOneInput(const uint8_t* data, size_t size) {
return;
SimulatedClock clock(data[i++]);
PacketRouter packet_router;
- ReceiveSideCongestionController cc(&clock, &packet_router);
+ ReceiveSideCongestionController cc(
+ &clock,
+ absl::bind_front(&PacketRouter::SendCombinedRtcpPacket, &packet_router),
+ absl::bind_front(&PacketRouter::SendRemb, &packet_router), nullptr);
RemoteBitrateEstimator* rbe = cc.GetRemoteBitrateEstimator(true);
RTPHeader header;
header.ssrc = ByteReader<uint32_t>::ReadBigEndian(&data[i]);
diff --git a/test/fuzzers/dcsctp_socket_fuzzer.cc b/test/fuzzers/dcsctp_socket_fuzzer.cc
new file mode 100644
index 0000000000..390cbb7f6c
--- /dev/null
+++ b/test/fuzzers/dcsctp_socket_fuzzer.cc
@@ -0,0 +1,28 @@
+/*
+ * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+#include "net/dcsctp/fuzzers/dcsctp_fuzzers.h"
+#include "net/dcsctp/public/dcsctp_message.h"
+#include "net/dcsctp/public/dcsctp_options.h"
+#include "net/dcsctp/public/dcsctp_socket.h"
+#include "net/dcsctp/socket/dcsctp_socket.h"
+#include "rtc_base/logging.h"
+
+namespace webrtc {
+
+void FuzzOneInput(const uint8_t* data, size_t size) {
+ dcsctp::dcsctp_fuzzers::FuzzerCallbacks cb;
+ dcsctp::DcSctpOptions options;
+ options.disable_checksum_verification = true;
+ dcsctp::DcSctpSocket socket("A", cb, nullptr, options);
+
+ dcsctp::dcsctp_fuzzers::FuzzSocket(socket, cb,
+ rtc::ArrayView<const uint8_t>(data, size));
+}
+} // namespace webrtc
diff --git a/test/fuzzers/rtp_frame_reference_finder_fuzzer.cc b/test/fuzzers/rtp_frame_reference_finder_fuzzer.cc
index aeeb5c03a4..fdb4aa5f3c 100644
--- a/test/fuzzers/rtp_frame_reference_finder_fuzzer.cc
+++ b/test/fuzzers/rtp_frame_reference_finder_fuzzer.cc
@@ -12,9 +12,7 @@
#include "api/rtp_packet_infos.h"
#include "modules/video_coding/frame_object.h"
-#include "modules/video_coding/packet_buffer.h"
#include "modules/video_coding/rtp_frame_reference_finder.h"
-#include "system_wrappers/include/clock.h"
namespace webrtc {
@@ -58,10 +56,6 @@ class DataReader {
size_t offset_ = 0;
};
-class NullCallback : public OnCompleteFrameCallback {
- void OnCompleteFrame(std::unique_ptr<EncodedFrame> frame) override {}
-};
-
absl::optional<RTPVideoHeader::GenericDescriptorInfo>
GenerateGenericFrameDependencies(DataReader* reader) {
absl::optional<RTPVideoHeader::GenericDescriptorInfo> result;
@@ -91,8 +85,7 @@ GenerateGenericFrameDependencies(DataReader* reader) {
void FuzzOneInput(const uint8_t* data, size_t size) {
DataReader reader(data, size);
- NullCallback cb;
- RtpFrameReferenceFinder reference_finder(&cb);
+ RtpFrameReferenceFinder reference_finder;
auto codec = static_cast<VideoCodecType>(reader.GetNum<uint8_t>() % 5);
diff --git a/test/fuzzers/rtp_header_parser_fuzzer.cc b/test/fuzzers/rtp_header_parser_fuzzer.cc
index d6af5ca3ce..cb5bea2456 100644
--- a/test/fuzzers/rtp_header_parser_fuzzer.cc
+++ b/test/fuzzers/rtp_header_parser_fuzzer.cc
@@ -22,27 +22,6 @@ namespace webrtc {
void FuzzOneInput(const uint8_t* data, size_t size) {
RtpHeaderParser::IsRtcp(data, size);
RtpHeaderParser::GetSsrc(data, size);
- RTPHeader rtp_header;
-
- std::unique_ptr<RtpHeaderParser> rtp_header_parser(
- RtpHeaderParser::CreateForTest());
-
- rtp_header_parser->Parse(data, size, &rtp_header);
- for (int i = 1; i < kRtpExtensionNumberOfExtensions; ++i) {
- if (size > 0 && i >= data[size - 1]) {
- RTPExtensionType add_extension = static_cast<RTPExtensionType>(i);
- rtp_header_parser->RegisterRtpHeaderExtension(add_extension, i);
- }
- }
- rtp_header_parser->Parse(data, size, &rtp_header);
-
- for (int i = 1; i < kRtpExtensionNumberOfExtensions; ++i) {
- if (size > 1 && i >= data[size - 2]) {
- RTPExtensionType remove_extension = static_cast<RTPExtensionType>(i);
- rtp_header_parser->DeregisterRtpHeaderExtension(remove_extension);
- }
- }
- rtp_header_parser->Parse(data, size, &rtp_header);
}
} // namespace webrtc
diff --git a/test/fuzzers/rtp_packet_fuzzer.cc b/test/fuzzers/rtp_packet_fuzzer.cc
index 9e8fd6f4c1..3f2fc5e668 100644
--- a/test/fuzzers/rtp_packet_fuzzer.cc
+++ b/test/fuzzers/rtp_packet_fuzzer.cc
@@ -9,6 +9,7 @@
*/
#include <bitset>
+#include <vector>
#include "absl/types/optional.h"
#include "modules/rtp_rtcp/include/rtp_header_extension_map.h"
@@ -76,6 +77,11 @@ void FuzzOneInput(const uint8_t* data, size_t size) {
uint8_t audio_level;
packet.GetExtension<AudioLevel>(&voice_activity, &audio_level);
break;
+ case kRtpExtensionCsrcAudioLevel: {
+ std::vector<uint8_t> audio_levels;
+ packet.GetExtension<CsrcAudioLevel>(&audio_levels);
+ break;
+ }
case kRtpExtensionAbsoluteSendTime:
uint32_t sendtime;
packet.GetExtension<AbsoluteSendTime>(&sendtime);
@@ -109,10 +115,11 @@ void FuzzOneInput(const uint8_t* data, size_t size) {
VideoContentType content_type;
packet.GetExtension<VideoContentTypeExtension>(&content_type);
break;
- case kRtpExtensionVideoTiming:
+ case kRtpExtensionVideoTiming: {
VideoSendTiming timing;
packet.GetExtension<VideoTimingExtension>(&timing);
break;
+ }
case kRtpExtensionRtpStreamId: {
std::string rsid;
packet.GetExtension<RtpStreamId>(&rsid);
diff --git a/test/fuzzers/sdp_integration_fuzzer.cc b/test/fuzzers/sdp_integration_fuzzer.cc
index dba09721bb..bc181f0573 100644
--- a/test/fuzzers/sdp_integration_fuzzer.cc
+++ b/test/fuzzers/sdp_integration_fuzzer.cc
@@ -34,8 +34,8 @@ void FuzzOneInput(const uint8_t* data, size_t size) {
// Note - we do not do test.ConnectFakeSignaling(); all signals
// generated are discarded.
- rtc::scoped_refptr<MockSetSessionDescriptionObserver> srd_observer(
- new rtc::RefCountedObject<MockSetSessionDescriptionObserver>());
+ auto srd_observer =
+ rtc::make_ref_counted<MockSetSessionDescriptionObserver>();
webrtc::SdpParseError error;
std::unique_ptr<webrtc::SessionDescriptionInterface> sdp(
@@ -47,8 +47,8 @@ void FuzzOneInput(const uint8_t* data, size_t size) {
EXPECT_TRUE_WAIT(srd_observer->called(), 100);
// If set-remote-description was successful, try to answer.
- rtc::scoped_refptr<MockSetSessionDescriptionObserver> sld_observer(
- new rtc::RefCountedObject<MockSetSessionDescriptionObserver>());
+ auto sld_observer =
+ rtc::make_ref_counted<MockSetSessionDescriptionObserver>();
if (srd_observer->result()) {
test.caller()->pc()->SetLocalDescription(sld_observer.get());
EXPECT_TRUE_WAIT(sld_observer->called(), 100);
diff --git a/test/fuzzers/utils/BUILD.gn b/test/fuzzers/utils/BUILD.gn
index 6249156058..3e0782f39d 100644
--- a/test/fuzzers/utils/BUILD.gn
+++ b/test/fuzzers/utils/BUILD.gn
@@ -24,6 +24,7 @@ rtc_library("rtp_replayer") {
"../../../call:call_interfaces",
"../../../common_video",
"../../../media:rtc_internal_video_codecs",
+ "../../../modules/rtp_rtcp:rtp_rtcp_format",
"../../../rtc_base:checks",
"../../../rtc_base:rtc_base_approved",
"../../../rtc_base:rtc_base_tests_utils",
diff --git a/test/fuzzers/utils/rtp_replayer.cc b/test/fuzzers/utils/rtp_replayer.cc
index a664adb31d..43b1fc2ea4 100644
--- a/test/fuzzers/utils/rtp_replayer.cc
+++ b/test/fuzzers/utils/rtp_replayer.cc
@@ -17,13 +17,13 @@
#include "api/task_queue/default_task_queue_factory.h"
#include "api/transport/field_trial_based_config.h"
+#include "modules/rtp_rtcp/source/rtp_packet.h"
#include "rtc_base/strings/json.h"
#include "system_wrappers/include/clock.h"
#include "test/call_config_utils.h"
#include "test/encoder_settings.h"
#include "test/fake_decoder.h"
#include "test/rtp_file_reader.h"
-#include "test/rtp_header_parser.h"
#include "test/run_loop.h"
namespace webrtc {
@@ -164,37 +164,32 @@ void RtpReplayer::ReplayPackets(rtc::FakeClock* clock,
std::min(deliver_in_ms, static_cast<int64_t>(100))));
}
+ rtc::CopyOnWriteBuffer packet_buffer(packet.data, packet.length);
++num_packets;
- switch (call->Receiver()->DeliverPacket(
- webrtc::MediaType::VIDEO,
- rtc::CopyOnWriteBuffer(packet.data, packet.length),
- /* packet_time_us */ -1)) {
+ switch (call->Receiver()->DeliverPacket(webrtc::MediaType::VIDEO,
+ packet_buffer,
+ /* packet_time_us */ -1)) {
case PacketReceiver::DELIVERY_OK:
break;
case PacketReceiver::DELIVERY_UNKNOWN_SSRC: {
- RTPHeader header;
- std::unique_ptr<RtpHeaderParser> parser(
- RtpHeaderParser::CreateForTest());
-
- parser->Parse(packet.data, packet.length, &header);
- if (unknown_packets[header.ssrc] == 0) {
- RTC_LOG(LS_ERROR) << "Unknown SSRC: " << header.ssrc;
+ webrtc::RtpPacket header;
+ header.Parse(packet_buffer);
+ if (unknown_packets[header.Ssrc()] == 0) {
+ RTC_LOG(LS_ERROR) << "Unknown SSRC: " << header.Ssrc();
}
- ++unknown_packets[header.ssrc];
+ ++unknown_packets[header.Ssrc()];
break;
}
case PacketReceiver::DELIVERY_PACKET_ERROR: {
RTC_LOG(LS_ERROR)
<< "Packet error, corrupt packets or incorrect setup?";
- RTPHeader header;
- std::unique_ptr<RtpHeaderParser> parser(
- RtpHeaderParser::CreateForTest());
- parser->Parse(packet.data, packet.length, &header);
+ webrtc::RtpPacket header;
+ header.Parse(packet_buffer);
RTC_LOG(LS_ERROR) << "Packet packet_length=" << packet.length
- << " payload_type=" << header.payloadType
- << " sequence_number=" << header.sequenceNumber
- << " time_stamp=" << header.timestamp
- << " ssrc=" << header.ssrc;
+ << " payload_type=" << header.PayloadType()
+ << " sequence_number=" << header.SequenceNumber()
+ << " time_stamp=" << header.Timestamp()
+ << " ssrc=" << header.Ssrc();
break;
}
}
diff --git a/test/fuzzers/vp9_encoder_references_fuzzer.cc b/test/fuzzers/vp9_encoder_references_fuzzer.cc
new file mode 100644
index 0000000000..9c793ae9aa
--- /dev/null
+++ b/test/fuzzers/vp9_encoder_references_fuzzer.cc
@@ -0,0 +1,498 @@
+/*
+ * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <stdint.h>
+
+#include "absl/algorithm/container.h"
+#include "absl/base/macros.h"
+#include "absl/container/inlined_vector.h"
+#include "api/array_view.h"
+#include "api/transport/webrtc_key_value_config.h"
+#include "api/video/video_frame.h"
+#include "api/video_codecs/video_codec.h"
+#include "api/video_codecs/video_encoder.h"
+#include "modules/video_coding/codecs/interface/mock_libvpx_interface.h"
+#include "modules/video_coding/codecs/vp9/libvpx_vp9_encoder.h"
+#include "modules/video_coding/frame_dependencies_calculator.h"
+#include "rtc_base/numerics/safe_compare.h"
+#include "test/fuzzers/fuzz_data_helper.h"
+#include "test/gmock.h"
+
+// Fuzzer simulates various svc configurations and libvpx encoder dropping
+// layer frames.
+// Validates vp9 encoder wrapper produces consistent frame references.
+namespace webrtc {
+namespace {
+
+using test::FuzzDataHelper;
+using ::testing::NiceMock;
+
+class FrameValidator : public EncodedImageCallback {
+ public:
+ ~FrameValidator() override = default;
+
+ Result OnEncodedImage(const EncodedImage& encoded_image,
+ const CodecSpecificInfo* codec_specific_info) override {
+ RTC_CHECK(codec_specific_info);
+ RTC_CHECK_EQ(codec_specific_info->codecType, kVideoCodecVP9);
+ if (codec_specific_info->codecSpecific.VP9.first_frame_in_picture) {
+ ++picture_id_;
+ }
+ int64_t frame_id = frame_id_++;
+ LayerFrame& layer_frame = frames_[frame_id % kMaxFrameHistorySize];
+ layer_frame.picture_id = picture_id_;
+ layer_frame.spatial_id = encoded_image.SpatialIndex().value_or(0);
+ layer_frame.frame_id = frame_id;
+ layer_frame.temporal_id =
+ codec_specific_info->codecSpecific.VP9.temporal_idx;
+ if (layer_frame.temporal_id == kNoTemporalIdx) {
+ layer_frame.temporal_id = 0;
+ }
+ layer_frame.vp9_non_ref_for_inter_layer_pred =
+ codec_specific_info->codecSpecific.VP9.non_ref_for_inter_layer_pred;
+ CheckVp9References(layer_frame, codec_specific_info->codecSpecific.VP9);
+
+ if (codec_specific_info->generic_frame_info.has_value()) {
+ absl::InlinedVector<int64_t, 5> frame_dependencies =
+ dependencies_calculator_.FromBuffersUsage(
+ frame_id,
+ codec_specific_info->generic_frame_info->encoder_buffers);
+
+ CheckGenericReferences(frame_dependencies,
+ *codec_specific_info->generic_frame_info);
+ CheckGenericAndCodecSpecificReferencesAreConsistent(
+ frame_dependencies, *codec_specific_info, layer_frame);
+ }
+
+ return Result(Result::OK);
+ }
+
+ private:
+ // With 4 spatial layers and patterns up to 8 pictures, it should be enought
+ // to keep 32 last frames to validate dependencies.
+ static constexpr size_t kMaxFrameHistorySize = 32;
+ struct LayerFrame {
+ int64_t frame_id;
+ int64_t picture_id;
+ int spatial_id;
+ int temporal_id;
+ bool vp9_non_ref_for_inter_layer_pred;
+ };
+
+ void CheckVp9References(const LayerFrame& layer_frame,
+ const CodecSpecificInfoVP9& vp9_info) {
+ if (layer_frame.frame_id == 0) {
+ RTC_CHECK(!vp9_info.inter_layer_predicted);
+ } else {
+ const LayerFrame& previous_frame = Frame(layer_frame.frame_id - 1);
+ if (vp9_info.inter_layer_predicted) {
+ RTC_CHECK(!previous_frame.vp9_non_ref_for_inter_layer_pred);
+ RTC_CHECK_EQ(layer_frame.picture_id, previous_frame.picture_id);
+ }
+ if (previous_frame.picture_id == layer_frame.picture_id) {
+ RTC_CHECK_GT(layer_frame.spatial_id, previous_frame.spatial_id);
+ // The check below would fail for temporal shift structures. Remove it
+ // or move it to !flexible_mode section when vp9 encoder starts
+ // supporting such structures.
+ RTC_CHECK_EQ(layer_frame.temporal_id, previous_frame.temporal_id);
+ }
+ }
+ if (!vp9_info.flexible_mode) {
+ if (vp9_info.gof.num_frames_in_gof > 0) {
+ gof_.CopyGofInfoVP9(vp9_info.gof);
+ }
+ RTC_CHECK_EQ(gof_.temporal_idx[vp9_info.gof_idx],
+ layer_frame.temporal_id);
+ }
+ }
+
+ void CheckGenericReferences(rtc::ArrayView<const int64_t> frame_dependencies,
+ const GenericFrameInfo& generic_info) const {
+ for (int64_t dependency_frame_id : frame_dependencies) {
+ RTC_CHECK_GE(dependency_frame_id, 0);
+ const LayerFrame& dependency = Frame(dependency_frame_id);
+ RTC_CHECK_GE(generic_info.spatial_id, dependency.spatial_id);
+ RTC_CHECK_GE(generic_info.temporal_id, dependency.temporal_id);
+ }
+ }
+
+ void CheckGenericAndCodecSpecificReferencesAreConsistent(
+ rtc::ArrayView<const int64_t> frame_dependencies,
+ const CodecSpecificInfo& info,
+ const LayerFrame& layer_frame) const {
+ const CodecSpecificInfoVP9& vp9_info = info.codecSpecific.VP9;
+ const GenericFrameInfo& generic_info = *info.generic_frame_info;
+
+ RTC_CHECK_EQ(generic_info.spatial_id, layer_frame.spatial_id);
+ RTC_CHECK_EQ(generic_info.temporal_id, layer_frame.temporal_id);
+ auto picture_id_diffs =
+ rtc::MakeArrayView(vp9_info.p_diff, vp9_info.num_ref_pics);
+ RTC_CHECK_EQ(
+ frame_dependencies.size(),
+ picture_id_diffs.size() + (vp9_info.inter_layer_predicted ? 1 : 0));
+ for (int64_t dependency_frame_id : frame_dependencies) {
+ RTC_CHECK_GE(dependency_frame_id, 0);
+ const LayerFrame& dependency = Frame(dependency_frame_id);
+ if (dependency.spatial_id != layer_frame.spatial_id) {
+ RTC_CHECK(vp9_info.inter_layer_predicted);
+ RTC_CHECK_EQ(layer_frame.picture_id, dependency.picture_id);
+ RTC_CHECK_GT(layer_frame.spatial_id, dependency.spatial_id);
+ } else {
+ RTC_CHECK(vp9_info.inter_pic_predicted);
+ RTC_CHECK_EQ(layer_frame.spatial_id, dependency.spatial_id);
+ RTC_CHECK(absl::c_linear_search(
+ picture_id_diffs, layer_frame.picture_id - dependency.picture_id));
+ }
+ }
+ }
+
+ const LayerFrame& Frame(int64_t frame_id) const {
+ auto& frame = frames_[frame_id % kMaxFrameHistorySize];
+ RTC_CHECK_EQ(frame.frame_id, frame_id);
+ return frame;
+ }
+
+ GofInfoVP9 gof_;
+ int64_t frame_id_ = 0;
+ int64_t picture_id_ = 1;
+ FrameDependenciesCalculator dependencies_calculator_;
+ LayerFrame frames_[kMaxFrameHistorySize];
+};
+
+class FieldTrials : public WebRtcKeyValueConfig {
+ public:
+ explicit FieldTrials(FuzzDataHelper& config)
+ : flags_(config.ReadOrDefaultValue<uint8_t>(0)) {}
+
+ ~FieldTrials() override = default;
+ std::string Lookup(absl::string_view key) const override {
+ static constexpr absl::string_view kBinaryFieldTrials[] = {
+ "WebRTC-Vp9DependencyDescriptor",
+ "WebRTC-Vp9ExternalRefCtrl",
+ "WebRTC-Vp9IssueKeyFrameOnLayerDeactivation",
+ };
+ for (size_t i = 0; i < ABSL_ARRAYSIZE(kBinaryFieldTrials); ++i) {
+ if (key == kBinaryFieldTrials[i]) {
+ return (flags_ & (1u << i)) ? "Enabled" : "Disabled";
+ }
+ }
+
+ // Ignore following field trials.
+ if (key == "WebRTC-CongestionWindow" ||
+ key == "WebRTC-UseBaseHeavyVP8TL3RateAllocation" ||
+ key == "WebRTC-SimulcastUpswitchHysteresisPercent" ||
+ key == "WebRTC-SimulcastScreenshareUpswitchHysteresisPercent" ||
+ key == "WebRTC-VideoRateControl" ||
+ key == "WebRTC-VP9-PerformanceFlags" ||
+ key == "WebRTC-VP9VariableFramerateScreenshare" ||
+ key == "WebRTC-VP9QualityScaler") {
+ return "";
+ }
+ // Crash when using unexpected field trial to decide if it should be fuzzed
+ // or have a constant value.
+ RTC_CHECK(false) << "Unfuzzed field trial " << key << "\n";
+ }
+
+ private:
+ const uint8_t flags_;
+};
+
+VideoCodec CodecSettings(FuzzDataHelper& rng) {
+ uint16_t config = rng.ReadOrDefaultValue<uint16_t>(0);
+ // Test up to to 4 spatial and 4 temporal layers.
+ int num_spatial_layers = 1 + (config & 0b11);
+ int num_temporal_layers = 1 + ((config >> 2) & 0b11);
+
+ VideoCodec codec_settings = {};
+ codec_settings.codecType = kVideoCodecVP9;
+ codec_settings.maxFramerate = 30;
+ codec_settings.width = 320 << (num_spatial_layers - 1);
+ codec_settings.height = 180 << (num_spatial_layers - 1);
+ if (num_spatial_layers > 1) {
+ for (int sid = 0; sid < num_spatial_layers; ++sid) {
+ SpatialLayer& spatial_layer = codec_settings.spatialLayers[sid];
+ codec_settings.width = 320 << sid;
+ codec_settings.height = 180 << sid;
+ spatial_layer.maxFramerate = codec_settings.maxFramerate;
+ spatial_layer.numberOfTemporalLayers = num_temporal_layers;
+ }
+ }
+ codec_settings.VP9()->numberOfSpatialLayers = num_spatial_layers;
+ codec_settings.VP9()->numberOfTemporalLayers = num_temporal_layers;
+ int inter_layer_pred = (config >> 4) & 0b11;
+ // There are only 3 valid values.
+ codec_settings.VP9()->interLayerPred = static_cast<InterLayerPredMode>(
+ inter_layer_pred < 3 ? inter_layer_pred : 0);
+ codec_settings.VP9()->flexibleMode = (config & (1u << 6)) != 0;
+ codec_settings.VP9()->frameDroppingOn = (config & (1u << 7)) != 0;
+ codec_settings.mode = VideoCodecMode::kRealtimeVideo;
+ return codec_settings;
+}
+
+VideoEncoder::Settings EncoderSettings() {
+ return VideoEncoder::Settings(VideoEncoder::Capabilities(false),
+ /*number_of_cores=*/1,
+ /*max_payload_size=*/0);
+}
+
+struct LibvpxState {
+ LibvpxState() {
+ pkt.kind = VPX_CODEC_CX_FRAME_PKT;
+ pkt.data.frame.buf = pkt_buffer;
+ pkt.data.frame.sz = ABSL_ARRAYSIZE(pkt_buffer);
+ layer_id.spatial_layer_id = -1;
+ }
+
+ uint8_t pkt_buffer[1000] = {};
+ vpx_codec_enc_cfg_t config = {};
+ vpx_codec_priv_output_cx_pkt_cb_pair_t callback = {};
+ vpx_image_t img = {};
+ vpx_svc_ref_frame_config_t ref_config = {};
+ vpx_svc_layer_id_t layer_id = {};
+ vpx_svc_frame_drop_t frame_drop = {};
+ vpx_codec_cx_pkt pkt = {};
+};
+
+class StubLibvpx : public NiceMock<MockLibvpxInterface> {
+ public:
+ explicit StubLibvpx(LibvpxState* state) : state_(state) { RTC_CHECK(state_); }
+
+ vpx_codec_err_t codec_enc_config_default(vpx_codec_iface_t* iface,
+ vpx_codec_enc_cfg_t* cfg,
+ unsigned int usage) const override {
+ state_->config = *cfg;
+ return VPX_CODEC_OK;
+ }
+
+ vpx_codec_err_t codec_enc_init(vpx_codec_ctx_t* ctx,
+ vpx_codec_iface_t* iface,
+ const vpx_codec_enc_cfg_t* cfg,
+ vpx_codec_flags_t flags) const override {
+ RTC_CHECK(ctx);
+ ctx->err = VPX_CODEC_OK;
+ return VPX_CODEC_OK;
+ }
+
+ vpx_image_t* img_wrap(vpx_image_t* img,
+ vpx_img_fmt_t fmt,
+ unsigned int d_w,
+ unsigned int d_h,
+ unsigned int stride_align,
+ unsigned char* img_data) const override {
+ state_->img.fmt = fmt;
+ state_->img.d_w = d_w;
+ state_->img.d_h = d_h;
+ return &state_->img;
+ }
+
+ vpx_codec_err_t codec_encode(vpx_codec_ctx_t* ctx,
+ const vpx_image_t* img,
+ vpx_codec_pts_t pts,
+ uint64_t duration,
+ vpx_enc_frame_flags_t flags,
+ uint64_t deadline) const override {
+ if (flags & VPX_EFLAG_FORCE_KF) {
+ state_->pkt.data.frame.flags = VPX_FRAME_IS_KEY;
+ } else {
+ state_->pkt.data.frame.flags = 0;
+ }
+ state_->pkt.data.frame.duration = duration;
+ return VPX_CODEC_OK;
+ }
+
+ vpx_codec_err_t codec_control(vpx_codec_ctx_t* ctx,
+ vp8e_enc_control_id ctrl_id,
+ void* param) const override {
+ if (ctrl_id == VP9E_REGISTER_CX_CALLBACK) {
+ state_->callback =
+ *reinterpret_cast<vpx_codec_priv_output_cx_pkt_cb_pair_t*>(param);
+ }
+ return VPX_CODEC_OK;
+ }
+
+ vpx_codec_err_t codec_control(
+ vpx_codec_ctx_t* ctx,
+ vp8e_enc_control_id ctrl_id,
+ vpx_svc_ref_frame_config_t* param) const override {
+ switch (ctrl_id) {
+ case VP9E_SET_SVC_REF_FRAME_CONFIG:
+ state_->ref_config = *param;
+ break;
+ case VP9E_GET_SVC_REF_FRAME_CONFIG:
+ *param = state_->ref_config;
+ break;
+ default:
+ break;
+ }
+ return VPX_CODEC_OK;
+ }
+
+ vpx_codec_err_t codec_control(vpx_codec_ctx_t* ctx,
+ vp8e_enc_control_id ctrl_id,
+ vpx_svc_layer_id_t* param) const override {
+ switch (ctrl_id) {
+ case VP9E_SET_SVC_LAYER_ID:
+ state_->layer_id = *param;
+ break;
+ case VP9E_GET_SVC_LAYER_ID:
+ *param = state_->layer_id;
+ break;
+ default:
+ break;
+ }
+ return VPX_CODEC_OK;
+ }
+
+ vpx_codec_err_t codec_control(vpx_codec_ctx_t* ctx,
+ vp8e_enc_control_id ctrl_id,
+ vpx_svc_frame_drop_t* param) const override {
+ if (ctrl_id == VP9E_SET_SVC_FRAME_DROP_LAYER) {
+ state_->frame_drop = *param;
+ }
+ return VPX_CODEC_OK;
+ }
+
+ vpx_codec_err_t codec_enc_config_set(
+ vpx_codec_ctx_t* ctx,
+ const vpx_codec_enc_cfg_t* cfg) const override {
+ state_->config = *cfg;
+ return VPX_CODEC_OK;
+ }
+
+ private:
+ LibvpxState* const state_;
+};
+
+enum Actions {
+ kEncode,
+ kSetRates,
+};
+
+// When a layer frame is marked for drop, drops all layer frames from that
+// pictures with larger spatial ids.
+constexpr bool DropAbove(uint8_t layers_mask, int sid) {
+ uint8_t full_mask = (uint8_t{1} << (sid + 1)) - 1;
+ return (layers_mask & full_mask) != full_mask;
+}
+// inline unittests
+static_assert(DropAbove(0b1011, /*sid=*/0) == false, "");
+static_assert(DropAbove(0b1011, /*sid=*/1) == false, "");
+static_assert(DropAbove(0b1011, /*sid=*/2) == true, "");
+static_assert(DropAbove(0b1011, /*sid=*/3) == true, "");
+
+// When a layer frame is marked for drop, drops all layer frames from that
+// pictures with smaller spatial ids.
+constexpr bool DropBelow(uint8_t layers_mask, int sid, int num_layers) {
+ return (layers_mask >> sid) != (1 << (num_layers - sid)) - 1;
+}
+// inline unittests
+static_assert(DropBelow(0b1101, /*sid=*/0, 4) == true, "");
+static_assert(DropBelow(0b1101, /*sid=*/1, 4) == true, "");
+static_assert(DropBelow(0b1101, /*sid=*/2, 4) == false, "");
+static_assert(DropBelow(0b1101, /*sid=*/3, 4) == false, "");
+
+} // namespace
+
+void FuzzOneInput(const uint8_t* data, size_t size) {
+ FuzzDataHelper helper(rtc::MakeArrayView(data, size));
+
+ FrameValidator validator;
+ FieldTrials field_trials(helper);
+ // Setup call callbacks for the fake
+ LibvpxState state;
+
+ // Initialize encoder
+ LibvpxVp9Encoder encoder(cricket::VideoCodec(),
+ std::make_unique<StubLibvpx>(&state), field_trials);
+ VideoCodec codec = CodecSettings(helper);
+ if (encoder.InitEncode(&codec, EncoderSettings()) != WEBRTC_VIDEO_CODEC_OK) {
+ return;
+ }
+ RTC_CHECK_EQ(encoder.RegisterEncodeCompleteCallback(&validator),
+ WEBRTC_VIDEO_CODEC_OK);
+ {
+ // Enable all the layers initially. Encoder doesn't support producing
+ // frames when no layers are enabled.
+ LibvpxVp9Encoder::RateControlParameters parameters;
+ parameters.framerate_fps = 30.0;
+ for (int sid = 0; sid < codec.VP9()->numberOfSpatialLayers; ++sid) {
+ for (int tid = 0; tid < codec.VP9()->numberOfTemporalLayers; ++tid) {
+ parameters.bitrate.SetBitrate(sid, tid, 100'000);
+ }
+ }
+ encoder.SetRates(parameters);
+ }
+
+ std::vector<VideoFrameType> frame_types(1);
+ VideoFrame fake_image = VideoFrame::Builder()
+ .set_video_frame_buffer(I420Buffer::Create(
+ int{codec.width}, int{codec.height}))
+ .build();
+
+ // Start producing frames at random.
+ while (helper.CanReadBytes(1)) {
+ uint8_t action = helper.Read<uint8_t>();
+ switch (action & 0b11) {
+ case kEncode: {
+ // bitmask of the action: SSSS-K00, where
+ // four S bit indicate which spatial layers should be produced,
+ // K bit indicates if frame should be a key frame.
+ frame_types[0] = (action & 0b100) ? VideoFrameType::kVideoFrameKey
+ : VideoFrameType::kVideoFrameDelta;
+ encoder.Encode(fake_image, &frame_types);
+ uint8_t encode_spatial_layers = (action >> 4);
+ for (size_t sid = 0; sid < state.config.ss_number_layers; ++sid) {
+ bool drop = true;
+ switch (state.frame_drop.framedrop_mode) {
+ case FULL_SUPERFRAME_DROP:
+ drop = encode_spatial_layers == 0;
+ break;
+ case LAYER_DROP:
+ drop = (encode_spatial_layers & (1 << sid)) == 0;
+ break;
+ case CONSTRAINED_LAYER_DROP:
+ drop = DropBelow(encode_spatial_layers, sid,
+ state.config.ss_number_layers);
+ break;
+ case CONSTRAINED_FROM_ABOVE_DROP:
+ drop = DropAbove(encode_spatial_layers, sid);
+ break;
+ }
+ if (!drop) {
+ state.layer_id.spatial_layer_id = sid;
+ state.callback.output_cx_pkt(&state.pkt, state.callback.user_priv);
+ }
+ }
+ } break;
+ case kSetRates: {
+ // bitmask of the action: (S3)(S1)(S0)01,
+ // where Sx is number of temporal layers to enable for spatial layer x
+ // In pariculat Sx = 0 indicates spatial layer x should be disabled.
+ LibvpxVp9Encoder::RateControlParameters parameters;
+ parameters.framerate_fps = 30.0;
+ for (int sid = 0; sid < codec.VP9()->numberOfSpatialLayers; ++sid) {
+ int temporal_layers = (action >> ((1 + sid) * 2)) & 0b11;
+ for (int tid = 0; tid < temporal_layers; ++tid) {
+ parameters.bitrate.SetBitrate(sid, tid, 100'000);
+ }
+ }
+ // Ignore allocation that turns off all the layers. in such case
+ // it is up to upper-layer code not to call Encode.
+ if (parameters.bitrate.get_sum_bps() > 0) {
+ encoder.SetRates(parameters);
+ }
+ } break;
+ default:
+ // Unspecificed values are noop.
+ break;
+ }
+ }
+}
+} // namespace webrtc