summaryrefslogtreecommitdiff
path: root/media/cast
diff options
context:
space:
mode:
authorTorne (Richard Coles) <torne@google.com>2014-10-27 14:56:43 +0000
committerTorne (Richard Coles) <torne@google.com>2014-10-27 14:56:43 +0000
commit0b0f963dd8b51fbabf01fa148e5d3eff645300e7 (patch)
tree1dd63e715432bebc2a3acba92204d7cdd12ee67b /media/cast
parent12144dc038ba97fbfd95d7c49cccde498c42de8f (diff)
downloadchromium_org-0b0f963dd8b51fbabf01fa148e5d3eff645300e7.tar.gz
Merge from Chromium at DEPS revision 614f7b807940
This commit was generated by merge_to_master.py. Change-Id: I89d093f14f8025ce1e505ce2a2755212c5c4fd2a
Diffstat (limited to 'media/cast')
-rw-r--r--media/cast/cast_defines.h8
-rw-r--r--media/cast/cast_sender_impl.cc13
-rw-r--r--media/cast/cast_sender_impl.h17
-rw-r--r--media/cast/common/clock_drift_smoother.cc5
-rw-r--r--media/cast/logging/encoding_event_subscriber.h6
-rw-r--r--media/cast/logging/log_serializer.cc6
-rw-r--r--media/cast/logging/proto/raw_events.proto2
-rw-r--r--media/cast/logging/receiver_time_offset_estimator.h2
-rw-r--r--media/cast/logging/receiver_time_offset_estimator_impl.h10
-rw-r--r--media/cast/logging/serialize_deserialize_test.cc2
-rw-r--r--media/cast/logging/simple_event_subscriber.h6
-rw-r--r--media/cast/logging/stats_event_subscriber.h6
-rw-r--r--media/cast/net/cast_transport_config.h1
-rw-r--r--media/cast/net/cast_transport_defines.h7
-rw-r--r--media/cast/net/cast_transport_sender_impl.cc50
-rw-r--r--media/cast/net/cast_transport_sender_impl.h34
-rw-r--r--media/cast/net/cast_transport_sender_impl_unittest.cc6
-rw-r--r--media/cast/net/pacing/paced_sender.h12
-rw-r--r--media/cast/net/pacing/paced_sender_unittest.cc6
-rw-r--r--media/cast/net/rtcp/receiver_rtcp_event_subscriber.h6
-rw-r--r--media/cast/net/rtcp/receiver_rtcp_event_subscriber_unittest.cc2
-rw-r--r--media/cast/net/rtcp/rtcp_unittest.cc23
-rw-r--r--media/cast/net/rtp/cast_message_builder_unittest.cc2
-rw-r--r--media/cast/net/rtp/receiver_stats.h10
-rw-r--r--media/cast/net/rtp/rtp_packetizer_unittest.cc17
-rw-r--r--media/cast/net/udp_transport.cc5
-rw-r--r--media/cast/net/udp_transport.h10
-rw-r--r--media/cast/net/udp_transport_unittest.cc2
-rw-r--r--media/cast/receiver/audio_decoder.cc10
-rw-r--r--media/cast/receiver/audio_decoder_unittest.cc2
-rw-r--r--media/cast/receiver/cast_receiver_impl.h20
-rw-r--r--media/cast/receiver/frame_receiver.h4
-rw-r--r--media/cast/receiver/video_decoder.cc8
-rw-r--r--media/cast/receiver/video_decoder_unittest.cc6
-rw-r--r--media/cast/sender/audio_encoder.cc490
-rw-r--r--media/cast/sender/audio_encoder.h2
-rw-r--r--media/cast/sender/audio_encoder_unittest.cc21
-rw-r--r--media/cast/sender/audio_sender.cc37
-rw-r--r--media/cast/sender/audio_sender.h8
-rw-r--r--media/cast/sender/audio_sender_unittest.cc7
-rw-r--r--media/cast/sender/congestion_control.cc80
-rw-r--r--media/cast/sender/congestion_control.h5
-rw-r--r--media/cast/sender/congestion_control_unittest.cc15
-rw-r--r--media/cast/sender/external_video_encoder.cc66
-rw-r--r--media/cast/sender/external_video_encoder.h26
-rw-r--r--media/cast/sender/external_video_encoder_unittest.cc45
-rw-r--r--media/cast/sender/fake_software_video_encoder.cc34
-rw-r--r--media/cast/sender/fake_software_video_encoder.h15
-rw-r--r--media/cast/sender/frame_sender.cc14
-rw-r--r--media/cast/sender/frame_sender.h4
-rw-r--r--media/cast/sender/software_video_encoder.h9
-rw-r--r--media/cast/sender/video_encoder.h14
-rw-r--r--media/cast/sender/video_encoder_impl.cc35
-rw-r--r--media/cast/sender/video_encoder_impl.h27
-rw-r--r--media/cast/sender/video_encoder_impl_unittest.cc57
-rw-r--r--media/cast/sender/video_sender.cc64
-rw-r--r--media/cast/sender/video_sender.h11
-rw-r--r--media/cast/sender/video_sender_unittest.cc46
-rw-r--r--media/cast/sender/vp8_encoder.cc269
-rw-r--r--media/cast/sender/vp8_encoder.h73
-rw-r--r--media/cast/test/cast_benchmarks.cc34
-rw-r--r--media/cast/test/end2end_unittest.cc75
-rw-r--r--media/cast/test/fake_media_source.cc96
-rw-r--r--media/cast/test/fake_media_source.h11
-rw-r--r--media/cast/test/fake_receiver_time_offset_estimator.h10
-rw-r--r--media/cast/test/fake_single_thread_task_runner.h17
-rw-r--r--media/cast/test/fake_video_encode_accelerator.h28
-rw-r--r--media/cast/test/loopback_transport.cc4
-rw-r--r--media/cast/test/loopback_transport.h7
-rw-r--r--media/cast/test/receiver.cc27
-rw-r--r--media/cast/test/simulator.cc62
-rw-r--r--media/cast/test/skewed_single_thread_task_runner.h17
-rw-r--r--media/cast/test/skewed_tick_clock.h2
-rw-r--r--media/cast/test/utility/audio_utility.cc2
-rw-r--r--media/cast/test/utility/in_process_receiver.cc1
-rw-r--r--media/cast/test/utility/standalone_cast_environment.h2
-rw-r--r--media/cast/test/utility/tap_proxy.cc8
-rw-r--r--media/cast/test/utility/udp_proxy.cc48
-rw-r--r--media/cast/test/utility/udp_proxy_main.cc3
79 files changed, 1391 insertions, 863 deletions
diff --git a/media/cast/cast_defines.h b/media/cast/cast_defines.h
index 76e252716e..661c095249 100644
--- a/media/cast/cast_defines.h
+++ b/media/cast/cast_defines.h
@@ -7,6 +7,7 @@
#include <stdint.h>
+#include <cmath>
#include <map>
#include <set>
@@ -178,9 +179,10 @@ inline void ConvertTimeTicksToNtp(const base::TimeTicks& time,
inline base::TimeTicks ConvertNtpToTimeTicks(uint32 ntp_seconds,
uint32 ntp_fractions) {
- int64 ntp_time_us =
- static_cast<int64>(ntp_seconds) * base::Time::kMicrosecondsPerSecond +
- static_cast<int64>(ntp_fractions) / kMagicFractionalUnit;
+ // We need to ceil() here because the calculation of |fractions| in
+ // ConvertTimeToFractions() effectively does a floor().
+ int64 ntp_time_us = ntp_seconds * base::Time::kMicrosecondsPerSecond +
+ static_cast<int64>(std::ceil(ntp_fractions / kMagicFractionalUnit));
base::TimeDelta elapsed_since_unix_epoch = base::TimeDelta::FromMicroseconds(
ntp_time_us -
diff --git a/media/cast/cast_sender_impl.cc b/media/cast/cast_sender_impl.cc
index 7b77517730..571e92ef54 100644
--- a/media/cast/cast_sender_impl.cc
+++ b/media/cast/cast_sender_impl.cc
@@ -21,9 +21,8 @@ class LocalVideoFrameInput : public VideoFrameInput {
base::WeakPtr<VideoSender> video_sender)
: cast_environment_(cast_environment), video_sender_(video_sender) {}
- virtual void InsertRawVideoFrame(
- const scoped_refptr<media::VideoFrame>& video_frame,
- const base::TimeTicks& capture_time) OVERRIDE {
+ void InsertRawVideoFrame(const scoped_refptr<media::VideoFrame>& video_frame,
+ const base::TimeTicks& capture_time) override {
cast_environment_->PostTask(CastEnvironment::MAIN,
FROM_HERE,
base::Bind(&VideoSender::InsertRawVideoFrame,
@@ -33,7 +32,7 @@ class LocalVideoFrameInput : public VideoFrameInput {
}
protected:
- virtual ~LocalVideoFrameInput() {}
+ ~LocalVideoFrameInput() override {}
private:
friend class base::RefCountedThreadSafe<LocalVideoFrameInput>;
@@ -52,8 +51,8 @@ class LocalAudioFrameInput : public AudioFrameInput {
base::WeakPtr<AudioSender> audio_sender)
: cast_environment_(cast_environment), audio_sender_(audio_sender) {}
- virtual void InsertAudio(scoped_ptr<AudioBus> audio_bus,
- const base::TimeTicks& recorded_time) OVERRIDE {
+ void InsertAudio(scoped_ptr<AudioBus> audio_bus,
+ const base::TimeTicks& recorded_time) override {
cast_environment_->PostTask(CastEnvironment::MAIN,
FROM_HERE,
base::Bind(&AudioSender::InsertAudio,
@@ -63,7 +62,7 @@ class LocalAudioFrameInput : public AudioFrameInput {
}
protected:
- virtual ~LocalAudioFrameInput() {}
+ ~LocalAudioFrameInput() override {}
private:
friend class base::RefCountedThreadSafe<LocalAudioFrameInput>;
diff --git a/media/cast/cast_sender_impl.h b/media/cast/cast_sender_impl.h
index e3f16fea2f..b76603e498 100644
--- a/media/cast/cast_sender_impl.h
+++ b/media/cast/cast_sender_impl.h
@@ -27,23 +27,22 @@ class CastSenderImpl : public CastSender {
CastSenderImpl(scoped_refptr<CastEnvironment> cast_environment,
CastTransportSender* const transport_sender);
- virtual void InitializeAudio(
+ void InitializeAudio(
const AudioSenderConfig& audio_config,
- const CastInitializationCallback& cast_initialization_cb) OVERRIDE;
- virtual void InitializeVideo(
+ const CastInitializationCallback& cast_initialization_cb) override;
+ void InitializeVideo(
const VideoSenderConfig& video_config,
const CastInitializationCallback& cast_initialization_cb,
const CreateVideoEncodeAcceleratorCallback& create_vea_cb,
const CreateVideoEncodeMemoryCallback& create_video_encode_mem_cb)
- OVERRIDE;
+ override;
- virtual void SetTargetPlayoutDelay(
- base::TimeDelta new_target_playout_delay) OVERRIDE;
+ void SetTargetPlayoutDelay(base::TimeDelta new_target_playout_delay) override;
- virtual ~CastSenderImpl();
+ ~CastSenderImpl() override;
- virtual scoped_refptr<AudioFrameInput> audio_frame_input() OVERRIDE;
- virtual scoped_refptr<VideoFrameInput> video_frame_input() OVERRIDE;
+ scoped_refptr<AudioFrameInput> audio_frame_input() override;
+ scoped_refptr<VideoFrameInput> video_frame_input() override;
private:
void ReceivedPacket(scoped_ptr<Packet> packet);
diff --git a/media/cast/common/clock_drift_smoother.cc b/media/cast/common/clock_drift_smoother.cc
index aff9a396de..df3ffdb77c 100644
--- a/media/cast/common/clock_drift_smoother.cc
+++ b/media/cast/common/clock_drift_smoother.cc
@@ -27,7 +27,7 @@ void ClockDriftSmoother::Reset(base::TimeTicks now,
base::TimeDelta measured_offset) {
DCHECK(!now.is_null());
last_update_time_ = now;
- estimate_us_ = measured_offset.InMicroseconds();
+ estimate_us_ = static_cast<double>(measured_offset.InMicroseconds());
}
void ClockDriftSmoother::Update(base::TimeTicks now,
@@ -39,7 +39,8 @@ void ClockDriftSmoother::Update(base::TimeTicks now,
// |now| is not monotonically non-decreasing.
NOTREACHED();
} else {
- const double elapsed_us = (now - last_update_time_).InMicroseconds();
+ const double elapsed_us =
+ static_cast<double>((now - last_update_time_).InMicroseconds());
last_update_time_ = now;
const double weight =
elapsed_us / (elapsed_us + time_constant_.InMicroseconds());
diff --git a/media/cast/logging/encoding_event_subscriber.h b/media/cast/logging/encoding_event_subscriber.h
index ca2cccb5f7..a67e5bca2b 100644
--- a/media/cast/logging/encoding_event_subscriber.h
+++ b/media/cast/logging/encoding_event_subscriber.h
@@ -49,11 +49,11 @@ class EncodingEventSubscriber : public RawEventSubscriber {
// timestamp).
EncodingEventSubscriber(EventMediaType event_media_type, size_t max_frames);
- virtual ~EncodingEventSubscriber();
+ ~EncodingEventSubscriber() override;
// RawReventSubscriber implementations.
- virtual void OnReceiveFrameEvent(const FrameEvent& frame_event) OVERRIDE;
- virtual void OnReceivePacketEvent(const PacketEvent& packet_event) OVERRIDE;
+ void OnReceiveFrameEvent(const FrameEvent& frame_event) override;
+ void OnReceivePacketEvent(const PacketEvent& packet_event) override;
// Assigns frame events and packet events received so far to |frame_events|
// and |packet_events| and resets the internal state.
diff --git a/media/cast/logging/log_serializer.cc b/media/cast/logging/log_serializer.cc
index afcf77013f..c5cb252d78 100644
--- a/media/cast/logging/log_serializer.cc
+++ b/media/cast/logging/log_serializer.cc
@@ -47,7 +47,7 @@ bool DoSerializeEvents(const LogMetadata& metadata,
int proto_size = metadata.ByteSize();
DCHECK(proto_size <= kMaxSerializedProtoBytes);
- if (!writer.WriteU16(proto_size))
+ if (!writer.WriteU16(static_cast<uint16>(proto_size)))
return false;
if (!metadata.SerializeToArray(writer.ptr(), writer.remaining()))
return false;
@@ -73,7 +73,7 @@ bool DoSerializeEvents(const LogMetadata& metadata,
DCHECK(proto_size <= kMaxSerializedProtoBytes);
// Write size of the proto, then write the proto.
- if (!writer.WriteU16(proto_size))
+ if (!writer.WriteU16(static_cast<uint16>(proto_size)))
return false;
if (!frame_event.SerializeToArray(writer.ptr(), writer.remaining()))
return false;
@@ -97,7 +97,7 @@ bool DoSerializeEvents(const LogMetadata& metadata,
DCHECK(proto_size <= kMaxSerializedProtoBytes);
// Write size of the proto, then write the proto.
- if (!writer.WriteU16(proto_size))
+ if (!writer.WriteU16(static_cast<uint16>(proto_size)))
return false;
if (!packet_event.SerializeToArray(writer.ptr(), writer.remaining()))
return false;
diff --git a/media/cast/logging/proto/raw_events.proto b/media/cast/logging/proto/raw_events.proto
index 1d2c537db8..e9e75bc219 100644
--- a/media/cast/logging/proto/raw_events.proto
+++ b/media/cast/logging/proto/raw_events.proto
@@ -120,7 +120,7 @@ message AggregatedFrameEvent {
optional int32 encoded_frame_size = 4;
// Only set if there is a frame playout event.
- optional int32 delay_millis = 5;
+ optional int64 delay_millis = 5;
// Only set if there is a video frame encoded event.
optional bool key_frame = 6;
diff --git a/media/cast/logging/receiver_time_offset_estimator.h b/media/cast/logging/receiver_time_offset_estimator.h
index 5880a8d5ac..6fb5f67769 100644
--- a/media/cast/logging/receiver_time_offset_estimator.h
+++ b/media/cast/logging/receiver_time_offset_estimator.h
@@ -24,7 +24,7 @@ namespace cast {
// timestamp.
class ReceiverTimeOffsetEstimator : public RawEventSubscriber {
public:
- virtual ~ReceiverTimeOffsetEstimator() {}
+ ~ReceiverTimeOffsetEstimator() override {}
// If bounds are known, assigns |lower_bound| and |upper_bound| with the
// lower bound and upper bound for the offset value, respectively.
diff --git a/media/cast/logging/receiver_time_offset_estimator_impl.h b/media/cast/logging/receiver_time_offset_estimator_impl.h
index 768ccbdb0f..bc5348b90e 100644
--- a/media/cast/logging/receiver_time_offset_estimator_impl.h
+++ b/media/cast/logging/receiver_time_offset_estimator_impl.h
@@ -36,15 +36,15 @@ class ReceiverTimeOffsetEstimatorImpl : public ReceiverTimeOffsetEstimator {
public:
ReceiverTimeOffsetEstimatorImpl();
- virtual ~ReceiverTimeOffsetEstimatorImpl();
+ ~ReceiverTimeOffsetEstimatorImpl() override;
// RawEventSubscriber implementations.
- virtual void OnReceiveFrameEvent(const FrameEvent& frame_event) OVERRIDE;
- virtual void OnReceivePacketEvent(const PacketEvent& packet_event) OVERRIDE;
+ void OnReceiveFrameEvent(const FrameEvent& frame_event) override;
+ void OnReceivePacketEvent(const PacketEvent& packet_event) override;
// ReceiverTimeOffsetEstimator implementation.
- virtual bool GetReceiverOffsetBounds(base::TimeDelta* lower_bound,
- base::TimeDelta* upper_bound) OVERRIDE;
+ bool GetReceiverOffsetBounds(base::TimeDelta* lower_bound,
+ base::TimeDelta* upper_bound) override;
private:
// This helper uses the difference between sent and recived event
diff --git a/media/cast/logging/serialize_deserialize_test.cc b/media/cast/logging/serialize_deserialize_test.cc
index 7e5aa7d3b5..8c8e2fe7bf 100644
--- a/media/cast/logging/serialize_deserialize_test.cc
+++ b/media/cast/logging/serialize_deserialize_test.cc
@@ -29,7 +29,7 @@ const media::cast::CastLoggingEvent kVideoPacketEvents[] = {
// The frame event fields cycle through these numbers.
const int kEncodedFrameSize[] = {512, 425, 399, 400, 237};
-const int kDelayMillis[] = {15, 4, 8, 42, 23, 16};
+const int64 kDelayMillis[] = {15, 4, 8, 42, 23, 16};
const int kMaxSerializedBytes = 10000;
diff --git a/media/cast/logging/simple_event_subscriber.h b/media/cast/logging/simple_event_subscriber.h
index adc4763f5f..176ab8c0d7 100644
--- a/media/cast/logging/simple_event_subscriber.h
+++ b/media/cast/logging/simple_event_subscriber.h
@@ -22,11 +22,11 @@ class SimpleEventSubscriber : public RawEventSubscriber {
public:
SimpleEventSubscriber();
- virtual ~SimpleEventSubscriber();
+ ~SimpleEventSubscriber() override;
// RawEventSubscriber implementations.
- virtual void OnReceiveFrameEvent(const FrameEvent& frame_event) OVERRIDE;
- virtual void OnReceivePacketEvent(const PacketEvent& packet_event) OVERRIDE;
+ void OnReceiveFrameEvent(const FrameEvent& frame_event) override;
+ void OnReceivePacketEvent(const PacketEvent& packet_event) override;
// Assigns frame events received so far to |frame_events| and clears them
// from this object.
diff --git a/media/cast/logging/stats_event_subscriber.h b/media/cast/logging/stats_event_subscriber.h
index 7662e60fd1..20394021bd 100644
--- a/media/cast/logging/stats_event_subscriber.h
+++ b/media/cast/logging/stats_event_subscriber.h
@@ -32,11 +32,11 @@ class StatsEventSubscriber : public RawEventSubscriber {
base::TickClock* clock,
ReceiverTimeOffsetEstimator* offset_estimator);
- virtual ~StatsEventSubscriber();
+ ~StatsEventSubscriber() override;
// RawReventSubscriber implementations.
- virtual void OnReceiveFrameEvent(const FrameEvent& frame_event) OVERRIDE;
- virtual void OnReceivePacketEvent(const PacketEvent& packet_event) OVERRIDE;
+ void OnReceiveFrameEvent(const FrameEvent& frame_event) override;
+ void OnReceivePacketEvent(const PacketEvent& packet_event) override;
// Returns stats as a DictionaryValue. The dictionary contains one entry -
// "audio" or "video" pointing to an inner dictionary.
diff --git a/media/cast/net/cast_transport_config.h b/media/cast/net/cast_transport_config.h
index c5da103acf..0f101d896e 100644
--- a/media/cast/net/cast_transport_config.h
+++ b/media/cast/net/cast_transport_config.h
@@ -22,6 +22,7 @@ enum Codec {
CODEC_UNKNOWN,
CODEC_AUDIO_OPUS,
CODEC_AUDIO_PCM16,
+ CODEC_AUDIO_AAC,
CODEC_VIDEO_FAKE,
CODEC_VIDEO_VP8,
CODEC_VIDEO_H264,
diff --git a/media/cast/net/cast_transport_defines.h b/media/cast/net/cast_transport_defines.h
index f7d681c7ad..c8b9fc110f 100644
--- a/media/cast/net/cast_transport_defines.h
+++ b/media/cast/net/cast_transport_defines.h
@@ -84,13 +84,6 @@ class FrameIdWrapHelper {
DISALLOW_COPY_AND_ASSIGN(FrameIdWrapHelper);
};
-inline uint32 GetVideoRtpTimestamp(const base::TimeTicks& time_ticks) {
- base::TimeTicks zero_time;
- base::TimeDelta recorded_delta = time_ticks - zero_time;
- // Timestamp is in 90 KHz for video.
- return static_cast<uint32>(recorded_delta.InMilliseconds() * 90);
-}
-
} // namespace cast
} // namespace media
diff --git a/media/cast/net/cast_transport_sender_impl.cc b/media/cast/net/cast_transport_sender_impl.cc
index 6c746f485b..390180f131 100644
--- a/media/cast/net/cast_transport_sender_impl.cc
+++ b/media/cast/net/cast_transport_sender_impl.cc
@@ -9,12 +9,22 @@
#include "media/cast/net/cast_transport_config.h"
#include "media/cast/net/cast_transport_defines.h"
#include "media/cast/net/udp_transport.h"
+#include "net/base/net_errors.h"
#include "net/base/net_util.h"
namespace media {
namespace cast {
namespace {
+
+// See header file for what these mean.
+const char kOptionPacerTargetBurstSize[] = "pacer_target_burst_size";
+const char kOptionPacerMaxBurstSize[] = "pacer_max_burst_size";
+const char kOptionSendBufferMinSize[] = "send_buffer_min_size";
+const char kOptionDscp[] = "DSCP";
+const char kOptionWifiDisableScan[] = "disable_wifi_scan";
+const char kOptionWifiMediaStreamingMode[] = "media_streaming_mode";
+
int LookupOptionWithDefault(const base::DictionaryValue& options,
const std::string& path,
int default_value) {
@@ -26,6 +36,17 @@ int LookupOptionWithDefault(const base::DictionaryValue& options,
}
};
+int32 GetTransportSendBufferSize(const base::DictionaryValue& options) {
+ // Socket send buffer size needs to be at least greater than one burst
+ // size.
+ int32 max_burst_size =
+ LookupOptionWithDefault(options, kOptionPacerMaxBurstSize,
+ kMaxBurstSize) * kMaxIpPacketSize;
+ int32 min_send_buffer_size =
+ LookupOptionWithDefault(options, kOptionSendBufferMinSize, 0);
+ return std::max(max_burst_size, min_send_buffer_size);
+}
+
} // namespace
scoped_ptr<CastTransportSender> CastTransportSender::Create(
@@ -66,17 +87,20 @@ CastTransportSenderImpl::CastTransportSenderImpl(
: clock_(clock),
status_callback_(status_callback),
transport_task_runner_(transport_task_runner),
- transport_(external_transport ? NULL
- : new UdpTransport(net_log,
- transport_task_runner,
- net::IPEndPoint(),
- remote_end_point,
- status_callback)),
- pacer_(LookupOptionWithDefault(*options.get(),
- "pacer_target_burst_size",
+ transport_(
+ external_transport ?
+ NULL :
+ new UdpTransport(net_log,
+ transport_task_runner,
+ net::IPEndPoint(),
+ remote_end_point,
+ GetTransportSendBufferSize(*options),
+ status_callback)),
+ pacer_(LookupOptionWithDefault(*options,
+ kOptionPacerTargetBurstSize,
kTargetBurstSize),
- LookupOptionWithDefault(*options.get(),
- "pacer_max_burst_size",
+ LookupOptionWithDefault(*options,
+ kOptionPacerMaxBurstSize,
kMaxBurstSize),
clock,
&logging_,
@@ -98,7 +122,7 @@ CastTransportSenderImpl::CastTransportSenderImpl(
raw_events_callback_interval);
}
if (transport_) {
- if (options->HasKey("DSCP")) {
+ if (options->HasKey(kOptionDscp)) {
// The default DSCP value for cast is AF41. Which gives it a higher
// priority over other traffic.
transport_->SetDscp(net::DSCP_AF41);
@@ -107,10 +131,10 @@ CastTransportSenderImpl::CastTransportSenderImpl(
base::Bind(&CastTransportSenderImpl::OnReceivedPacket,
weak_factory_.GetWeakPtr()));
int wifi_options = 0;
- if (options->HasKey("disable_wifi_scan")) {
+ if (options->HasKey(kOptionWifiDisableScan)) {
wifi_options |= net::WIFI_OPTIONS_DISABLE_SCAN;
}
- if (options->HasKey("media_streaming_mode")) {
+ if (options->HasKey(kOptionWifiMediaStreamingMode)) {
wifi_options |= net::WIFI_OPTIONS_MEDIA_STREAMING_MODE;
}
if (wifi_options) {
diff --git a/media/cast/net/cast_transport_sender_impl.h b/media/cast/net/cast_transport_sender_impl.h
index cff5561e28..f5c07c927f 100644
--- a/media/cast/net/cast_transport_sender_impl.h
+++ b/media/cast/net/cast_transport_sender_impl.h
@@ -61,6 +61,8 @@ class CastTransportSenderImpl : public CastTransportSender {
// per 10 ms ideally.
// "pacer_max_burst_size": int - specifies how many pakcets to send
// per 10 ms, max
+ // "send_buffer_min_size": int - specifies the minimum socket send buffer
+ // size
// "disable_wifi_scan" (value ignored) - disable wifi scans while streaming
// "media_streaming_mode" (value ignored) - turn media streaming mode on
// Note, these options may be ignored on some platforms.
@@ -75,28 +77,26 @@ class CastTransportSenderImpl : public CastTransportSender {
const scoped_refptr<base::SingleThreadTaskRunner>& transport_task_runner,
PacketSender* external_transport);
- virtual ~CastTransportSenderImpl();
+ ~CastTransportSenderImpl() override;
- virtual void InitializeAudio(const CastTransportRtpConfig& config,
- const RtcpCastMessageCallback& cast_message_cb,
- const RtcpRttCallback& rtt_cb) OVERRIDE;
- virtual void InitializeVideo(const CastTransportRtpConfig& config,
- const RtcpCastMessageCallback& cast_message_cb,
- const RtcpRttCallback& rtt_cb) OVERRIDE;
- virtual void InsertFrame(uint32 ssrc, const EncodedFrame& frame) OVERRIDE;
+ void InitializeAudio(const CastTransportRtpConfig& config,
+ const RtcpCastMessageCallback& cast_message_cb,
+ const RtcpRttCallback& rtt_cb) override;
+ void InitializeVideo(const CastTransportRtpConfig& config,
+ const RtcpCastMessageCallback& cast_message_cb,
+ const RtcpRttCallback& rtt_cb) override;
+ void InsertFrame(uint32 ssrc, const EncodedFrame& frame) override;
- virtual void SendSenderReport(
- uint32 ssrc,
- base::TimeTicks current_time,
- uint32 current_time_as_rtp_timestamp) OVERRIDE;
+ void SendSenderReport(uint32 ssrc,
+ base::TimeTicks current_time,
+ uint32 current_time_as_rtp_timestamp) override;
- virtual void CancelSendingFrames(
- uint32 ssrc,
- const std::vector<uint32>& frame_ids) OVERRIDE;
+ void CancelSendingFrames(uint32 ssrc,
+ const std::vector<uint32>& frame_ids) override;
- virtual void ResendFrameForKickstart(uint32 ssrc, uint32 frame_id) OVERRIDE;
+ void ResendFrameForKickstart(uint32 ssrc, uint32 frame_id) override;
- virtual PacketReceiverCallback PacketReceiverForTesting() OVERRIDE;
+ PacketReceiverCallback PacketReceiverForTesting() override;
private:
FRIEND_TEST_ALL_PREFIXES(CastTransportSenderImplTest, NacksCancelRetransmits);
diff --git a/media/cast/net/cast_transport_sender_impl_unittest.cc b/media/cast/net/cast_transport_sender_impl_unittest.cc
index fa9ec471cf..8330627412 100644
--- a/media/cast/net/cast_transport_sender_impl_unittest.cc
+++ b/media/cast/net/cast_transport_sender_impl_unittest.cc
@@ -31,7 +31,7 @@ class FakePacketSender : public PacketSender {
FakePacketSender()
: paused_(false), packets_sent_(0), bytes_sent_(0) {}
- virtual bool SendPacket(PacketRef packet, const base::Closure& cb) OVERRIDE {
+ bool SendPacket(PacketRef packet, const base::Closure& cb) override {
if (paused_) {
stored_packet_ = packet;
callback_ = cb;
@@ -42,9 +42,7 @@ class FakePacketSender : public PacketSender {
return true;
}
- virtual int64 GetBytesSent() OVERRIDE {
- return bytes_sent_;
- }
+ int64 GetBytesSent() override { return bytes_sent_; }
void SetPaused(bool paused) {
paused_ = paused;
diff --git a/media/cast/net/pacing/paced_sender.h b/media/cast/net/pacing/paced_sender.h
index 66cc6ec462..71349c00b4 100644
--- a/media/cast/net/pacing/paced_sender.h
+++ b/media/cast/net/pacing/paced_sender.h
@@ -89,7 +89,7 @@ class PacedSender : public PacedPacketSender,
PacketSender* external_transport,
const scoped_refptr<base::SingleThreadTaskRunner>& transport_task_runner);
- virtual ~PacedSender();
+ ~PacedSender() override;
// These must be called before non-RTCP packets are sent.
void RegisterAudioSsrc(uint32 audio_ssrc);
@@ -111,11 +111,11 @@ class PacedSender : public PacedPacketSender,
int64 GetLastByteSentForSsrc(uint32 ssrc);
// PacedPacketSender implementation.
- virtual bool SendPackets(const SendPacketVector& packets) OVERRIDE;
- virtual bool ResendPackets(const SendPacketVector& packets,
- const DedupInfo& dedup_info) OVERRIDE;
- virtual bool SendRtcpPacket(uint32 ssrc, PacketRef packet) OVERRIDE;
- virtual void CancelSendingPacket(const PacketKey& packet_key) OVERRIDE;
+ bool SendPackets(const SendPacketVector& packets) override;
+ bool ResendPackets(const SendPacketVector& packets,
+ const DedupInfo& dedup_info) override;
+ bool SendRtcpPacket(uint32 ssrc, PacketRef packet) override;
+ void CancelSendingPacket(const PacketKey& packet_key) override;
private:
// Actually sends the packets to the transport.
diff --git a/media/cast/net/pacing/paced_sender_unittest.cc b/media/cast/net/pacing/paced_sender_unittest.cc
index e1fa5586d1..4a8810df42 100644
--- a/media/cast/net/pacing/paced_sender_unittest.cc
+++ b/media/cast/net/pacing/paced_sender_unittest.cc
@@ -32,7 +32,7 @@ class TestPacketSender : public PacketSender {
public:
TestPacketSender() : bytes_sent_(0) {}
- virtual bool SendPacket(PacketRef packet, const base::Closure& cb) OVERRIDE {
+ bool SendPacket(PacketRef packet, const base::Closure& cb) override {
EXPECT_FALSE(expected_packet_size_.empty());
size_t expected_packet_size = expected_packet_size_.front();
expected_packet_size_.pop_front();
@@ -41,9 +41,7 @@ class TestPacketSender : public PacketSender {
return true;
}
- virtual int64 GetBytesSent() OVERRIDE {
- return bytes_sent_;
- }
+ int64 GetBytesSent() override { return bytes_sent_; }
void AddExpectedSize(int expected_packet_size, int repeat_count) {
for (int i = 0; i < repeat_count; ++i) {
diff --git a/media/cast/net/rtcp/receiver_rtcp_event_subscriber.h b/media/cast/net/rtcp/receiver_rtcp_event_subscriber.h
index 7e80ffebc6..c08733ca94 100644
--- a/media/cast/net/rtcp/receiver_rtcp_event_subscriber.h
+++ b/media/cast/net/rtcp/receiver_rtcp_event_subscriber.h
@@ -37,11 +37,11 @@ class ReceiverRtcpEventSubscriber : public RawEventSubscriber {
ReceiverRtcpEventSubscriber(const size_t max_size_to_retain,
EventMediaType type);
- virtual ~ReceiverRtcpEventSubscriber();
+ ~ReceiverRtcpEventSubscriber() override;
// RawEventSubscriber implementation.
- virtual void OnReceiveFrameEvent(const FrameEvent& frame_event) OVERRIDE;
- virtual void OnReceivePacketEvent(const PacketEvent& packet_event) OVERRIDE;
+ void OnReceiveFrameEvent(const FrameEvent& frame_event) override;
+ void OnReceivePacketEvent(const PacketEvent& packet_event) override;
// Assigns events collected to |rtcp_events| and clears them from this
// object.
diff --git a/media/cast/net/rtcp/receiver_rtcp_event_subscriber_unittest.cc b/media/cast/net/rtcp/receiver_rtcp_event_subscriber_unittest.cc
index 8d975592f2..422340cb92 100644
--- a/media/cast/net/rtcp/receiver_rtcp_event_subscriber_unittest.cc
+++ b/media/cast/net/rtcp/receiver_rtcp_event_subscriber_unittest.cc
@@ -35,7 +35,7 @@ class ReceiverRtcpEventSubscriberTest : public ::testing::Test {
virtual ~ReceiverRtcpEventSubscriberTest() {}
- virtual void TearDown() OVERRIDE {
+ virtual void TearDown() override {
if (event_subscriber_) {
cast_environment_->Logging()->RemoveRawEventSubscriber(
event_subscriber_.get());
diff --git a/media/cast/net/rtcp/rtcp_unittest.cc b/media/cast/net/rtcp/rtcp_unittest.cc
index 0b0d32785b..9363f7062a 100644
--- a/media/cast/net/rtcp/rtcp_unittest.cc
+++ b/media/cast/net/rtcp/rtcp_unittest.cc
@@ -32,23 +32,20 @@ class FakeRtcpTransport : public PacedPacketSender {
base::TimeDelta packet_delay() const { return packet_delay_; }
void set_packet_delay(base::TimeDelta delay) { packet_delay_ = delay; }
- virtual bool SendRtcpPacket(uint32 ssrc, PacketRef packet) OVERRIDE {
+ bool SendRtcpPacket(uint32 ssrc, PacketRef packet) override {
clock_->Advance(packet_delay_);
rtcp_->IncomingRtcpPacket(&packet->data[0], packet->data.size());
return true;
}
- virtual bool SendPackets(const SendPacketVector& packets) OVERRIDE {
- return false;
- }
+ bool SendPackets(const SendPacketVector& packets) override { return false; }
- virtual bool ResendPackets(
- const SendPacketVector& packets, const DedupInfo& dedup_info) OVERRIDE {
+ bool ResendPackets(const SendPacketVector& packets,
+ const DedupInfo& dedup_info) override {
return false;
}
- virtual void CancelSendingPacket(const PacketKey& packet_key) OVERRIDE {
- }
+ void CancelSendingPacket(const PacketKey& packet_key) override {}
private:
base::SimpleTestTickClock* const clock_;
@@ -61,12 +58,12 @@ class FakeRtcpTransport : public PacedPacketSender {
class FakeReceiverStats : public RtpReceiverStatistics {
public:
FakeReceiverStats() {}
- virtual ~FakeReceiverStats() {}
+ ~FakeReceiverStats() override {}
- virtual void GetStatistics(uint8* fraction_lost,
- uint32* cumulative_lost,
- uint32* extended_high_sequence_number,
- uint32* jitter) OVERRIDE {
+ void GetStatistics(uint8* fraction_lost,
+ uint32* cumulative_lost,
+ uint32* extended_high_sequence_number,
+ uint32* jitter) override {
*fraction_lost = 0;
*cumulative_lost = 0;
*extended_high_sequence_number = 0;
diff --git a/media/cast/net/rtp/cast_message_builder_unittest.cc b/media/cast/net/rtp/cast_message_builder_unittest.cc
index 02cfe1095f..708090024a 100644
--- a/media/cast/net/rtp/cast_message_builder_unittest.cc
+++ b/media/cast/net/rtp/cast_message_builder_unittest.cc
@@ -28,7 +28,7 @@ class NackFeedbackVerification : public RtpPayloadFeedback {
NackFeedbackVerification()
: triggered_(false), missing_packets_(), last_frame_acked_(0) {}
- virtual void CastFeedback(const RtcpCastMessage& cast_feedback) OVERRIDE {
+ void CastFeedback(const RtcpCastMessage& cast_feedback) override {
EXPECT_EQ(kSsrc, cast_feedback.media_ssrc);
last_frame_acked_ = cast_feedback.ack_frame_id;
diff --git a/media/cast/net/rtp/receiver_stats.h b/media/cast/net/rtp/receiver_stats.h
index 9de6b22c20..d428403dff 100644
--- a/media/cast/net/rtp/receiver_stats.h
+++ b/media/cast/net/rtp/receiver_stats.h
@@ -16,12 +16,12 @@ namespace cast {
class ReceiverStats : public RtpReceiverStatistics {
public:
explicit ReceiverStats(base::TickClock* clock);
- virtual ~ReceiverStats() OVERRIDE;
+ ~ReceiverStats() override;
- virtual void GetStatistics(uint8* fraction_lost,
- uint32* cumulative_lost, // 24 bits valid.
- uint32* extended_high_sequence_number,
- uint32* jitter) OVERRIDE;
+ void GetStatistics(uint8* fraction_lost,
+ uint32* cumulative_lost, // 24 bits valid.
+ uint32* extended_high_sequence_number,
+ uint32* jitter) override;
void UpdateStatistics(const RtpCastHeader& header);
private:
diff --git a/media/cast/net/rtp/rtp_packetizer_unittest.cc b/media/cast/net/rtp/rtp_packetizer_unittest.cc
index 6ac8ea17ee..aa1b4c59b2 100644
--- a/media/cast/net/rtp/rtp_packetizer_unittest.cc
+++ b/media/cast/net/rtp/rtp_packetizer_unittest.cc
@@ -37,7 +37,7 @@ class TestRtpPacketTransport : public PacketSender {
expected_number_of_packets_(0),
expected_packet_id_(0),
expected_frame_id_(0),
- expectd_rtp_timestamp_(0) {}
+ expected_rtp_timestamp_(0) {}
void VerifyRtpHeader(const RtpCastTestHeader& rtp_header) {
VerifyCommonRtpHeader(rtp_header);
@@ -47,7 +47,7 @@ class TestRtpPacketTransport : public PacketSender {
void VerifyCommonRtpHeader(const RtpCastTestHeader& rtp_header) {
EXPECT_EQ(kPayload, rtp_header.payload_type);
EXPECT_EQ(sequence_number_, rtp_header.sequence_number);
- EXPECT_EQ(expectd_rtp_timestamp_, rtp_header.rtp_timestamp);
+ EXPECT_EQ(expected_rtp_timestamp_, rtp_header.rtp_timestamp);
EXPECT_EQ(config_.ssrc, rtp_header.ssrc);
EXPECT_EQ(0, rtp_header.num_csrcs);
}
@@ -61,7 +61,7 @@ class TestRtpPacketTransport : public PacketSender {
EXPECT_EQ(expected_frame_id_ - 1u, rtp_header.reference_frame_id);
}
- virtual bool SendPacket(PacketRef packet, const base::Closure& cb) OVERRIDE {
+ bool SendPacket(PacketRef packet, const base::Closure& cb) override {
++packets_sent_;
RtpHeaderParser parser(&packet->data[0], packet->data.size());
RtpCastTestHeader rtp_header;
@@ -72,9 +72,7 @@ class TestRtpPacketTransport : public PacketSender {
return true;
}
- virtual int64 GetBytesSent() OVERRIDE {
- return 0;
- }
+ int64 GetBytesSent() override { return 0; }
size_t number_of_packets_received() const { return packets_sent_; }
@@ -83,7 +81,7 @@ class TestRtpPacketTransport : public PacketSender {
}
void set_rtp_timestamp(uint32 rtp_timestamp) {
- expectd_rtp_timestamp_ = rtp_timestamp;
+ expected_rtp_timestamp_ = rtp_timestamp;
}
RtpPacketizerConfig config_;
@@ -94,7 +92,7 @@ class TestRtpPacketTransport : public PacketSender {
// Assuming packets arrive in sequence.
int expected_packet_id_;
uint32 expected_frame_id_;
- uint32 expectd_rtp_timestamp_;
+ uint32 expected_rtp_timestamp_;
DISALLOW_COPY_AND_ASSIGN(TestRtpPacketTransport);
};
@@ -121,8 +119,7 @@ class RtpPacketizerTest : public ::testing::Test {
video_frame_.frame_id = 0;
video_frame_.referenced_frame_id = kStartFrameId;
video_frame_.data.assign(kFrameSize, 123);
- video_frame_.rtp_timestamp =
- GetVideoRtpTimestamp(testing_clock_.NowTicks());
+ video_frame_.rtp_timestamp = 0x0055aa11;
}
void RunTasks(int during_ms) {
diff --git a/media/cast/net/udp_transport.cc b/media/cast/net/udp_transport.cc
index fa9be469ed..43ef862840 100644
--- a/media/cast/net/udp_transport.cc
+++ b/media/cast/net/udp_transport.cc
@@ -42,6 +42,7 @@ UdpTransport::UdpTransport(
const scoped_refptr<base::SingleThreadTaskRunner>& io_thread_proxy,
const net::IPEndPoint& local_end_point,
const net::IPEndPoint& remote_end_point,
+ int32 send_buffer_size,
const CastTransportStatusCallback& status_callback)
: io_thread_proxy_(io_thread_proxy),
local_addr_(local_end_point),
@@ -54,6 +55,7 @@ UdpTransport::UdpTransport(
receive_pending_(false),
client_connected_(false),
next_dscp_value_(net::DSCP_NO_CHANGE),
+ send_buffer_size_(send_buffer_size),
status_callback_(status_callback),
bytes_sent_(0),
weak_factory_(this) {
@@ -85,6 +87,9 @@ void UdpTransport::StartReceiving(
} else {
NOTREACHED() << "Either local or remote address has to be defined.";
}
+ if (udp_socket_->SetSendBufferSize(send_buffer_size_) != net::OK) {
+ LOG(WARNING) << "Failed to set socket send buffer size.";
+ }
ScheduleReceiveNextPacket();
}
diff --git a/media/cast/net/udp_transport.h b/media/cast/net/udp_transport.h
index d88f2f3734..0ee634680f 100644
--- a/media/cast/net/udp_transport.h
+++ b/media/cast/net/udp_transport.h
@@ -35,13 +35,15 @@ class UdpTransport : public PacketSender {
// |remote_end_point| specifies the address and port to send packets
// to. If the value is 0.0.0.0:0 the the end point is set to the source
// address of the first packet received.
+ // |send_buffer_size| specifies the size of the socket send buffer.
UdpTransport(
net::NetLog* net_log,
const scoped_refptr<base::SingleThreadTaskRunner>& io_thread_proxy,
const net::IPEndPoint& local_end_point,
const net::IPEndPoint& remote_end_point,
+ int32 send_buffer_size,
const CastTransportStatusCallback& status_callback);
- virtual ~UdpTransport();
+ ~UdpTransport() override;
// Start receiving packets. Packets are submitted to |packet_receiver|.
void StartReceiving(const PacketReceiverCallback& packet_receiver);
@@ -51,9 +53,8 @@ class UdpTransport : public PacketSender {
void SetDscp(net::DiffServCodePoint dscp);
// PacketSender implementations.
- virtual bool SendPacket(PacketRef packet,
- const base::Closure& cb) OVERRIDE;
- virtual int64 GetBytesSent() OVERRIDE;
+ bool SendPacket(PacketRef packet, const base::Closure& cb) override;
+ int64 GetBytesSent() override;
private:
// Requests and processes packets from |udp_socket_|. This method is called
@@ -82,6 +83,7 @@ class UdpTransport : public PacketSender {
scoped_refptr<net::WrappedIOBuffer> recv_buf_;
net::IPEndPoint recv_addr_;
PacketReceiverCallback packet_receiver_;
+ int32 send_buffer_size_;
const CastTransportStatusCallback status_callback_;
int bytes_sent_;
diff --git a/media/cast/net/udp_transport_unittest.cc b/media/cast/net/udp_transport_unittest.cc
index 7ae938b4ca..2bc9bab58a 100644
--- a/media/cast/net/udp_transport_unittest.cc
+++ b/media/cast/net/udp_transport_unittest.cc
@@ -65,11 +65,13 @@ TEST(UdpTransport, SendAndReceive) {
message_loop.message_loop_proxy(),
free_local_port1,
free_local_port2,
+ 65536,
base::Bind(&UpdateCastTransportStatus));
UdpTransport recv_transport(NULL,
message_loop.message_loop_proxy(),
free_local_port2,
net::IPEndPoint(empty_addr_number, 0),
+ 65536,
base::Bind(&UpdateCastTransportStatus));
Packet packet;
diff --git a/media/cast/receiver/audio_decoder.cc b/media/cast/receiver/audio_decoder.cc
index aeed137d14..5f4720e8fc 100644
--- a/media/cast/receiver/audio_decoder.cc
+++ b/media/cast/receiver/audio_decoder.cc
@@ -116,9 +116,9 @@ class AudioDecoder::OpusImpl : public AudioDecoder::ImplBase {
}
private:
- virtual ~OpusImpl() {}
+ ~OpusImpl() override {}
- virtual void RecoverBecauseFramesWereDropped() OVERRIDE {
+ void RecoverBecauseFramesWereDropped() override {
// Passing NULL for the input data notifies the decoder of frame loss.
const opus_int32 result =
opus_decode_float(
@@ -126,7 +126,7 @@ class AudioDecoder::OpusImpl : public AudioDecoder::ImplBase {
DCHECK_GE(result, 0);
}
- virtual scoped_ptr<AudioBus> Decode(uint8* data, int len) OVERRIDE {
+ scoped_ptr<AudioBus> Decode(uint8* data, int len) override {
scoped_ptr<AudioBus> audio_bus;
const opus_int32 num_samples_decoded = opus_decode_float(
opus_decoder_, data, len, buffer_.get(), max_samples_per_frame_, 0);
@@ -175,9 +175,9 @@ class AudioDecoder::Pcm16Impl : public AudioDecoder::ImplBase {
}
private:
- virtual ~Pcm16Impl() {}
+ ~Pcm16Impl() override {}
- virtual scoped_ptr<AudioBus> Decode(uint8* data, int len) OVERRIDE {
+ scoped_ptr<AudioBus> Decode(uint8* data, int len) override {
scoped_ptr<AudioBus> audio_bus;
const int num_samples = len / sizeof(int16) / num_channels_;
if (num_samples <= 0)
diff --git a/media/cast/receiver/audio_decoder_unittest.cc b/media/cast/receiver/audio_decoder_unittest.cc
index 62e4fbbf61..7f467156a9 100644
--- a/media/cast/receiver/audio_decoder_unittest.cc
+++ b/media/cast/receiver/audio_decoder_unittest.cc
@@ -41,7 +41,7 @@ class AudioDecoderTest : public ::testing::TestWithParam<TestScenario> {
}
protected:
- virtual void SetUp() OVERRIDE {
+ virtual void SetUp() override {
audio_decoder_.reset(new AudioDecoder(cast_environment_,
GetParam().num_channels,
GetParam().sampling_rate,
diff --git a/media/cast/receiver/cast_receiver_impl.h b/media/cast/receiver/cast_receiver_impl.h
index 3443561257..5431393d73 100644
--- a/media/cast/receiver/cast_receiver_impl.h
+++ b/media/cast/receiver/cast_receiver_impl.h
@@ -29,18 +29,18 @@ class CastReceiverImpl : public CastReceiver {
const FrameReceiverConfig& video_config,
PacketSender* const packet_sender);
- virtual ~CastReceiverImpl();
+ ~CastReceiverImpl() override;
// CastReceiver implementation.
- virtual PacketReceiverCallback packet_receiver() OVERRIDE;
- virtual void RequestDecodedAudioFrame(
- const AudioFrameDecodedCallback& callback) OVERRIDE;
- virtual void RequestEncodedAudioFrame(
- const ReceiveEncodedFrameCallback& callback) OVERRIDE;
- virtual void RequestDecodedVideoFrame(
- const VideoFrameDecodedCallback& callback) OVERRIDE;
- virtual void RequestEncodedVideoFrame(
- const ReceiveEncodedFrameCallback& callback) OVERRIDE;
+ PacketReceiverCallback packet_receiver() override;
+ void RequestDecodedAudioFrame(
+ const AudioFrameDecodedCallback& callback) override;
+ void RequestEncodedAudioFrame(
+ const ReceiveEncodedFrameCallback& callback) override;
+ void RequestDecodedVideoFrame(
+ const VideoFrameDecodedCallback& callback) override;
+ void RequestEncodedVideoFrame(
+ const ReceiveEncodedFrameCallback& callback) override;
private:
// Forwards |packet| to a specific RTP frame receiver, or drops it if SSRC
diff --git a/media/cast/receiver/frame_receiver.h b/media/cast/receiver/frame_receiver.h
index 67f5417145..4d673f34a7 100644
--- a/media/cast/receiver/frame_receiver.h
+++ b/media/cast/receiver/frame_receiver.h
@@ -52,7 +52,7 @@ class FrameReceiver : public RtpPayloadFeedback,
EventMediaType event_media_type,
PacedPacketSender* const packet_sender);
- virtual ~FrameReceiver();
+ ~FrameReceiver() override;
// Request an encoded frame.
//
@@ -76,7 +76,7 @@ class FrameReceiver : public RtpPayloadFeedback,
size_t payload_size);
// RtpPayloadFeedback implementation.
- virtual void CastFeedback(const RtcpCastMessage& cast_message) OVERRIDE;
+ void CastFeedback(const RtcpCastMessage& cast_message) override;
private:
// Processes ready-to-consume packets from |framer_|, decrypting each packet's
diff --git a/media/cast/receiver/video_decoder.cc b/media/cast/receiver/video_decoder.cc
index 2c7a9fddb8..e536c3e23b 100644
--- a/media/cast/receiver/video_decoder.cc
+++ b/media/cast/receiver/video_decoder.cc
@@ -114,12 +114,12 @@ class VideoDecoder::Vp8Impl : public VideoDecoder::ImplBase {
}
private:
- virtual ~Vp8Impl() {
+ ~Vp8Impl() override {
if (ImplBase::cast_initialization_status_ == STATUS_VIDEO_INITIALIZED)
CHECK_EQ(VPX_CODEC_OK, vpx_codec_destroy(&context_));
}
- virtual scoped_refptr<VideoFrame> Decode(uint8* data, int len) OVERRIDE {
+ scoped_refptr<VideoFrame> Decode(uint8* data, int len) override {
if (len <= 0 || vpx_codec_decode(&context_,
data,
static_cast<unsigned int>(len),
@@ -181,9 +181,9 @@ class VideoDecoder::FakeImpl : public VideoDecoder::ImplBase {
}
private:
- virtual ~FakeImpl() {}
+ ~FakeImpl() override {}
- virtual scoped_refptr<VideoFrame> Decode(uint8* data, int len) OVERRIDE {
+ scoped_refptr<VideoFrame> Decode(uint8* data, int len) override {
// Make sure this is a JSON string.
if (!len || data[0] != '{')
return NULL;
diff --git a/media/cast/receiver/video_decoder_unittest.cc b/media/cast/receiver/video_decoder_unittest.cc
index 95d92b8064..2184707efb 100644
--- a/media/cast/receiver/video_decoder_unittest.cc
+++ b/media/cast/receiver/video_decoder_unittest.cc
@@ -40,7 +40,7 @@ class VideoDecoderTest : public ::testing::TestWithParam<Codec> {
public:
VideoDecoderTest()
: cast_environment_(new StandaloneCastEnvironment()),
- vp8_encoder_(GetVideoSenderConfigForTest(), 0),
+ vp8_encoder_(GetVideoSenderConfigForTest()),
cond_(&lock_) {
vp8_encoder_.Initialize();
}
@@ -51,7 +51,7 @@ class VideoDecoderTest : public ::testing::TestWithParam<Codec> {
}
protected:
- virtual void SetUp() OVERRIDE {
+ virtual void SetUp() override {
video_decoder_.reset(new VideoDecoder(cast_environment_, GetParam()));
CHECK_EQ(STATUS_VIDEO_INITIALIZED, video_decoder_->InitializationResult());
@@ -83,7 +83,7 @@ class VideoDecoderTest : public ::testing::TestWithParam<Codec> {
new EncodedFrame());
// Test only supports VP8, currently.
CHECK_EQ(CODEC_VIDEO_VP8, GetParam());
- vp8_encoder_.Encode(video_frame, encoded_frame.get());
+ vp8_encoder_.Encode(video_frame, base::TimeTicks(), encoded_frame.get());
// Rewrite frame IDs for testing purposes.
encoded_frame->frame_id = last_frame_id_ + 1 + num_dropped_frames;
if (last_frame_id_ == 0)
diff --git a/media/cast/sender/audio_encoder.cc b/media/cast/sender/audio_encoder.cc
index f0c5f8555e..273151f280 100644
--- a/media/cast/sender/audio_encoder.cc
+++ b/media/cast/sender/audio_encoder.cc
@@ -5,6 +5,8 @@
#include "media/cast/sender/audio_encoder.h"
#include <algorithm>
+#include <limits>
+#include <string>
#include "base/bind.h"
#include "base/bind_helpers.h"
@@ -15,25 +17,25 @@
#include "media/base/audio_bus.h"
#include "media/cast/cast_defines.h"
#include "media/cast/cast_environment.h"
+
+#if !defined(OS_IOS)
#include "third_party/opus/src/include/opus.h"
+#endif
+
+#if defined(OS_MACOSX)
+#include <AudioToolbox/AudioToolbox.h>
+#endif
namespace media {
namespace cast {
namespace {
-// The fixed number of audio frames per second and, inversely, the duration of
-// one frame's worth of samples.
-const int kFramesPerSecond = 100;
-const int kFrameDurationMillis = 1000 / kFramesPerSecond; // No remainder!
-
-// Threshold used to decide whether audio being delivered to the encoder is
-// coming in too slow with respect to the capture timestamps.
-const int kUnderrunThresholdMillis = 3 * kFrameDurationMillis;
+const int kUnderrunSkipThreshold = 3;
+const int kDefaultFramesPerSecond = 100;
} // namespace
-
// Base class that handles the common problem of feeding one or more AudioBus'
// data into a buffer and then, once the buffer is full, encoding the signal and
// emitting an EncodedFrame via the FrameEncodedCallback.
@@ -47,13 +49,17 @@ class AudioEncoder::ImplBase
Codec codec,
int num_channels,
int sampling_rate,
+ int samples_per_frame,
const FrameEncodedCallback& callback)
: cast_environment_(cast_environment),
codec_(codec),
num_channels_(num_channels),
- samples_per_frame_(sampling_rate / kFramesPerSecond),
+ samples_per_frame_(samples_per_frame),
callback_(callback),
cast_initialization_status_(STATUS_AUDIO_UNINITIALIZED),
+ frame_duration_(base::TimeDelta::FromMicroseconds(
+ base::Time::kMicrosecondsPerSecond * samples_per_frame_ /
+ sampling_rate)),
buffer_fill_end_(0),
frame_id_(0),
frame_rtp_timestamp_(0),
@@ -61,7 +67,7 @@ class AudioEncoder::ImplBase
// Support for max sampling rate of 48KHz, 2 channels, 100 ms duration.
const int kMaxSamplesTimesChannelsPerFrame = 48 * 2 * 100;
if (num_channels_ <= 0 || samples_per_frame_ <= 0 ||
- sampling_rate % kFramesPerSecond != 0 ||
+ frame_duration_ == base::TimeDelta() ||
samples_per_frame_ * num_channels_ > kMaxSamplesTimesChannelsPerFrame) {
cast_initialization_status_ = STATUS_INVALID_AUDIO_CONFIGURATION;
}
@@ -75,6 +81,8 @@ class AudioEncoder::ImplBase
return samples_per_frame_;
}
+ base::TimeDelta frame_duration() const { return frame_duration_; }
+
void EncodeAudio(scoped_ptr<AudioBus> audio_bus,
const base::TimeTicks& recorded_time) {
DCHECK_EQ(cast_initialization_status_, STATUS_AUDIO_INITIALIZED);
@@ -86,20 +94,16 @@ class AudioEncoder::ImplBase
// frame's RTP timestamp by the estimated number of frames missed. On the
// other hand, don't attempt to resolve overruns: A receiver should
// gracefully deal with an excess of audio data.
- const base::TimeDelta frame_duration =
- base::TimeDelta::FromMilliseconds(kFrameDurationMillis);
base::TimeDelta buffer_fill_duration =
- buffer_fill_end_ * frame_duration / samples_per_frame_;
+ buffer_fill_end_ * frame_duration_ / samples_per_frame_;
if (!frame_capture_time_.is_null()) {
const base::TimeDelta amount_ahead_by =
recorded_time - (frame_capture_time_ + buffer_fill_duration);
- if (amount_ahead_by >
- base::TimeDelta::FromMilliseconds(kUnderrunThresholdMillis)) {
+ const int64 num_frames_missed = amount_ahead_by / frame_duration_;
+ if (num_frames_missed > kUnderrunSkipThreshold) {
samples_dropped_from_buffer_ += buffer_fill_end_;
buffer_fill_end_ = 0;
buffer_fill_duration = base::TimeDelta();
- const int64 num_frames_missed = amount_ahead_by /
- base::TimeDelta::FromMilliseconds(kFrameDurationMillis);
frame_rtp_timestamp_ +=
static_cast<uint32>(num_frames_missed * samples_per_frame_);
DVLOG(1) << "Skipping RTP timestamp ahead to account for "
@@ -145,7 +149,7 @@ class AudioEncoder::ImplBase
buffer_fill_end_ = 0;
++frame_id_;
frame_rtp_timestamp_ += samples_per_frame_;
- frame_capture_time_ += frame_duration;
+ frame_capture_time_ += frame_duration_;
}
}
@@ -168,6 +172,10 @@ class AudioEncoder::ImplBase
// Subclass' ctor is expected to set this to STATUS_AUDIO_INITIALIZED.
CastInitializationStatus cast_initialization_status_;
+ // The duration of one frame of encoded audio samples. Derived from
+ // |samples_per_frame_| and the sampling rate.
+ const base::TimeDelta frame_duration_;
+
private:
// In the case where a call to EncodeAudio() cannot completely fill the
// buffer, this points to the position at which to populate data in a later
@@ -198,6 +206,7 @@ class AudioEncoder::ImplBase
DISALLOW_COPY_AND_ASSIGN(ImplBase);
};
+#if !defined(OS_IOS)
class AudioEncoder::OpusImpl : public AudioEncoder::ImplBase {
public:
OpusImpl(const scoped_refptr<CastEnvironment>& cast_environment,
@@ -209,12 +218,16 @@ class AudioEncoder::OpusImpl : public AudioEncoder::ImplBase {
CODEC_AUDIO_OPUS,
num_channels,
sampling_rate,
+ sampling_rate / kDefaultFramesPerSecond, /* 10 ms frames */
callback),
encoder_memory_(new uint8[opus_encoder_get_size(num_channels)]),
opus_encoder_(reinterpret_cast<OpusEncoder*>(encoder_memory_.get())),
buffer_(new float[num_channels * samples_per_frame_]) {
- if (ImplBase::cast_initialization_status_ != STATUS_AUDIO_UNINITIALIZED)
+ if (ImplBase::cast_initialization_status_ != STATUS_AUDIO_UNINITIALIZED ||
+ sampling_rate % samples_per_frame_ != 0 ||
+ !IsValidFrameDuration(frame_duration_)) {
return;
+ }
if (opus_encoder_init(opus_encoder_,
sampling_rate,
num_channels,
@@ -237,12 +250,12 @@ class AudioEncoder::OpusImpl : public AudioEncoder::ImplBase {
}
private:
- virtual ~OpusImpl() {}
+ ~OpusImpl() override {}
- virtual void TransferSamplesIntoBuffer(const AudioBus* audio_bus,
- int source_offset,
- int buffer_fill_offset,
- int num_samples) OVERRIDE {
+ void TransferSamplesIntoBuffer(const AudioBus* audio_bus,
+ int source_offset,
+ int buffer_fill_offset,
+ int num_samples) override {
// Opus requires channel-interleaved samples in a single array.
for (int ch = 0; ch < audio_bus->channels(); ++ch) {
const float* src = audio_bus->channel(ch) + source_offset;
@@ -253,7 +266,7 @@ class AudioEncoder::OpusImpl : public AudioEncoder::ImplBase {
}
}
- virtual bool EncodeFromFilledBuffer(std::string* out) OVERRIDE {
+ bool EncodeFromFilledBuffer(std::string* out) override {
out->resize(kOpusMaxPayloadSize);
const opus_int32 result =
opus_encode_float(opus_encoder_,
@@ -274,6 +287,16 @@ class AudioEncoder::OpusImpl : public AudioEncoder::ImplBase {
}
}
+ static bool IsValidFrameDuration(base::TimeDelta duration) {
+ // See https://tools.ietf.org/html/rfc6716#section-2.1.4
+ return duration == base::TimeDelta::FromMicroseconds(2500) ||
+ duration == base::TimeDelta::FromMilliseconds(5) ||
+ duration == base::TimeDelta::FromMilliseconds(10) ||
+ duration == base::TimeDelta::FromMilliseconds(20) ||
+ duration == base::TimeDelta::FromMilliseconds(40) ||
+ duration == base::TimeDelta::FromMilliseconds(60);
+ }
+
const scoped_ptr<uint8[]> encoder_memory_;
OpusEncoder* const opus_encoder_;
const scoped_ptr<float[]> buffer_;
@@ -288,6 +311,388 @@ class AudioEncoder::OpusImpl : public AudioEncoder::ImplBase {
DISALLOW_COPY_AND_ASSIGN(OpusImpl);
};
+#endif
+
+#if defined(OS_MACOSX)
+class AudioEncoder::AppleAacImpl : public AudioEncoder::ImplBase {
+ // AAC-LC has two access unit sizes (960 and 1024). The Apple encoder only
+ // supports the latter.
+ static const int kAccessUnitSamples = 1024;
+
+ // Size of an ADTS header (w/o checksum). See
+ // http://wiki.multimedia.cx/index.php?title=ADTS
+ static const int kAdtsHeaderSize = 7;
+
+ public:
+ AppleAacImpl(const scoped_refptr<CastEnvironment>& cast_environment,
+ int num_channels,
+ int sampling_rate,
+ int bitrate,
+ const FrameEncodedCallback& callback)
+ : ImplBase(cast_environment,
+ CODEC_AUDIO_AAC,
+ num_channels,
+ sampling_rate,
+ kAccessUnitSamples,
+ callback),
+ input_buffer_(AudioBus::Create(num_channels, kAccessUnitSamples)),
+ input_bus_(AudioBus::CreateWrapper(num_channels)),
+ max_access_unit_size_(0),
+ output_buffer_(nullptr),
+ converter_(nullptr),
+ file_(nullptr),
+ num_access_units_(0),
+ can_resume_(true) {
+ if (ImplBase::cast_initialization_status_ != STATUS_AUDIO_UNINITIALIZED) {
+ return;
+ }
+ if (!Initialize(sampling_rate, bitrate)) {
+ ImplBase::cast_initialization_status_ =
+ STATUS_INVALID_AUDIO_CONFIGURATION;
+ return;
+ }
+ ImplBase::cast_initialization_status_ = STATUS_AUDIO_INITIALIZED;
+ }
+
+ private:
+ virtual ~AppleAacImpl() { Teardown(); }
+
+ // Destroys the existing audio converter and file, if any.
+ void Teardown() {
+ if (converter_) {
+ AudioConverterDispose(converter_);
+ converter_ = nullptr;
+ }
+ if (file_) {
+ AudioFileClose(file_);
+ file_ = nullptr;
+ }
+ }
+
+ // Initializes the audio converter and file. Calls Teardown to destroy any
+ // existing state. This is so that Initialize() may be called to setup another
+ // converter after a non-resumable interruption.
+ bool Initialize(int sampling_rate, int bitrate) {
+ // Teardown previous audio converter and file.
+ Teardown();
+
+ // Input data comes from AudioBus objects, which carry non-interleaved
+ // packed native-endian float samples. Note that in Core Audio, a frame is
+ // one sample across all channels at a given point in time. When describing
+ // a non-interleaved samples format, the "per frame" fields mean "per
+ // channel" or "per stream", with the exception of |mChannelsPerFrame|. For
+ // uncompressed formats, one packet contains one frame.
+ AudioStreamBasicDescription in_asbd;
+ in_asbd.mSampleRate = sampling_rate;
+ in_asbd.mFormatID = kAudioFormatLinearPCM;
+ in_asbd.mFormatFlags =
+ kAudioFormatFlagsNativeFloatPacked | kAudioFormatFlagIsNonInterleaved;
+ in_asbd.mChannelsPerFrame = num_channels_;
+ in_asbd.mBitsPerChannel = sizeof(float) * 8;
+ in_asbd.mFramesPerPacket = 1;
+ in_asbd.mBytesPerPacket = in_asbd.mBytesPerFrame = sizeof(float);
+ in_asbd.mReserved = 0;
+
+ // Request AAC-LC encoding, with no downmixing or downsampling.
+ AudioStreamBasicDescription out_asbd;
+ memset(&out_asbd, 0, sizeof(AudioStreamBasicDescription));
+ out_asbd.mSampleRate = sampling_rate;
+ out_asbd.mFormatID = kAudioFormatMPEG4AAC;
+ out_asbd.mChannelsPerFrame = num_channels_;
+ UInt32 prop_size = sizeof(out_asbd);
+ if (AudioFormatGetProperty(kAudioFormatProperty_FormatInfo,
+ 0,
+ nullptr,
+ &prop_size,
+ &out_asbd) != noErr) {
+ return false;
+ }
+
+ if (AudioConverterNew(&in_asbd, &out_asbd, &converter_) != noErr) {
+ return false;
+ }
+
+ // The converter will fully specify the output format and update the
+ // relevant fields of the structure, which we can now query.
+ prop_size = sizeof(out_asbd);
+ if (AudioConverterGetProperty(converter_,
+ kAudioConverterCurrentOutputStreamDescription,
+ &prop_size,
+ &out_asbd) != noErr) {
+ return false;
+ }
+
+ // If bitrate is <= 0, allow the encoder to pick a suitable value.
+ // Otherwise, set the bitrate (which can fail if the value is not suitable
+ // or compatible with the output sampling rate or channels).
+ if (bitrate > 0) {
+ prop_size = sizeof(int);
+ if (AudioConverterSetProperty(
+ converter_, kAudioConverterEncodeBitRate, prop_size, &bitrate) !=
+ noErr) {
+ return false;
+ }
+ }
+
+#if defined(OS_IOS)
+ // See the comment next to |can_resume_| for details on resumption. Some
+ // converters can return kAudioConverterErr_PropertyNotSupported, in which
+ // case resumption is implicitly supported. This is the only location where
+ // the implementation modifies |can_resume_|.
+ uint32_t can_resume;
+ prop_size = sizeof(can_resume);
+ OSStatus oserr = AudioConverterGetProperty(
+ converter_,
+ kAudioConverterPropertyCanResumeFromInterruption,
+ &prop_size,
+ &can_resume);
+ if (oserr == noErr) {
+ const_cast<bool&>(can_resume_) = can_resume != 0;
+ }
+#endif
+
+ // Figure out the maximum size of an access unit that the encoder can
+ // produce. |mBytesPerPacket| will be 0 for variable size configurations,
+ // in which case we must query the value.
+ uint32_t max_access_unit_size = out_asbd.mBytesPerPacket;
+ if (max_access_unit_size == 0) {
+ prop_size = sizeof(max_access_unit_size);
+ if (AudioConverterGetProperty(
+ converter_,
+ kAudioConverterPropertyMaximumOutputPacketSize,
+ &prop_size,
+ &max_access_unit_size) != noErr) {
+ return false;
+ }
+ }
+
+ // This is the only location where the implementation modifies
+ // |max_access_unit_size_|.
+ const_cast<uint32_t&>(max_access_unit_size_) = max_access_unit_size;
+
+ // Allocate a buffer to store one access unit. This is the only location
+ // where the implementation modifies |access_unit_buffer_|.
+ const_cast<scoped_ptr<uint8[]>&>(access_unit_buffer_)
+ .reset(new uint8[max_access_unit_size]);
+
+ // Initialize the converter ABL. Note that the buffer size has to be set
+ // before every encode operation, since the field is modified to indicate
+ // the size of the output data (on input it indicates the buffer capacity).
+ converter_abl_.mNumberBuffers = 1;
+ converter_abl_.mBuffers[0].mNumberChannels = num_channels_;
+ converter_abl_.mBuffers[0].mData = access_unit_buffer_.get();
+
+ // The "magic cookie" is an encoder state vector required for decoding and
+ // packetization. It is queried now from |converter_| then set on |file_|
+ // after initialization.
+ UInt32 cookie_size;
+ if (AudioConverterGetPropertyInfo(converter_,
+ kAudioConverterCompressionMagicCookie,
+ &cookie_size,
+ nullptr) != noErr) {
+ return false;
+ }
+ scoped_ptr<uint8[]> cookie_data(new uint8[cookie_size]);
+ if (AudioConverterGetProperty(converter_,
+ kAudioConverterCompressionMagicCookie,
+ &cookie_size,
+ cookie_data.get()) != noErr) {
+ return false;
+ }
+
+ if (AudioFileInitializeWithCallbacks(this,
+ nullptr,
+ &FileWriteCallback,
+ nullptr,
+ nullptr,
+ kAudioFileAAC_ADTSType,
+ &out_asbd,
+ 0,
+ &file_) != noErr) {
+ return false;
+ }
+
+ if (AudioFileSetProperty(file_,
+ kAudioFilePropertyMagicCookieData,
+ cookie_size,
+ cookie_data.get()) != noErr) {
+ return false;
+ }
+
+ // Initially the input bus points to the input buffer. See the comment on
+ // |input_bus_| for more on this optimization.
+ input_bus_->set_frames(kAccessUnitSamples);
+ for (int ch = 0; ch < input_buffer_->channels(); ++ch) {
+ input_bus_->SetChannelData(ch, input_buffer_->channel(ch));
+ }
+
+ return true;
+ }
+
+ void TransferSamplesIntoBuffer(const AudioBus* audio_bus,
+ int source_offset,
+ int buffer_fill_offset,
+ int num_samples) override {
+ DCHECK_EQ(audio_bus->channels(), input_buffer_->channels());
+
+ // See the comment on |input_bus_| for more on this optimization. Note that
+ // we cannot elide the copy if the source offset would result in an
+ // unaligned pointer.
+ if (num_samples == kAccessUnitSamples &&
+ source_offset * sizeof(float) % AudioBus::kChannelAlignment == 0) {
+ DCHECK_EQ(buffer_fill_offset, 0);
+ for (int ch = 0; ch < audio_bus->channels(); ++ch) {
+ auto samples = const_cast<float*>(audio_bus->channel(ch));
+ input_bus_->SetChannelData(ch, samples + source_offset);
+ }
+ return;
+ }
+
+ // Copy the samples into the input buffer.
+ DCHECK_EQ(input_bus_->channel(0), input_buffer_->channel(0));
+ audio_bus->CopyPartialFramesTo(
+ source_offset, num_samples, buffer_fill_offset, input_buffer_.get());
+ }
+
+ bool EncodeFromFilledBuffer(std::string* out) override {
+ // Reset the buffer size field to the buffer capacity.
+ converter_abl_.mBuffers[0].mDataByteSize = max_access_unit_size_;
+
+ // Encode the current input buffer. This is a sychronous call.
+ OSStatus oserr;
+ UInt32 io_num_packets = 1;
+ AudioStreamPacketDescription packet_description;
+ oserr = AudioConverterFillComplexBuffer(converter_,
+ &ConverterFillDataCallback,
+ this,
+ &io_num_packets,
+ &converter_abl_,
+ &packet_description);
+ if (oserr != noErr || io_num_packets == 0) {
+ return false;
+ }
+
+ // Reserve space in the output buffer to write the packet.
+ out->reserve(packet_description.mDataByteSize + kAdtsHeaderSize);
+
+ // Set the current output buffer and emit an ADTS-wrapped AAC access unit.
+ // This is a synchronous call. After it returns, reset the output buffer.
+ output_buffer_ = out;
+ oserr = AudioFileWritePackets(file_,
+ false,
+ converter_abl_.mBuffers[0].mDataByteSize,
+ &packet_description,
+ num_access_units_,
+ &io_num_packets,
+ converter_abl_.mBuffers[0].mData);
+ output_buffer_ = nullptr;
+ if (oserr != noErr || io_num_packets == 0) {
+ return false;
+ }
+ num_access_units_ += io_num_packets;
+ return true;
+ }
+
+ // The |AudioConverterFillComplexBuffer| input callback function. Configures
+ // the provided |AudioBufferList| to alias |input_bus_|. The implementation
+ // can only supply |kAccessUnitSamples| samples as a result of not copying
+ // samples or tracking read and write positions. Note that this function is
+ // called synchronously by |AudioConverterFillComplexBuffer|.
+ static OSStatus ConverterFillDataCallback(
+ AudioConverterRef in_converter,
+ UInt32* io_num_packets,
+ AudioBufferList* io_data,
+ AudioStreamPacketDescription** out_packet_desc,
+ void* in_encoder) {
+ DCHECK(in_encoder);
+ auto encoder = reinterpret_cast<AppleAacImpl*>(in_encoder);
+ auto input_buffer = encoder->input_buffer_.get();
+ auto input_bus = encoder->input_bus_.get();
+
+ DCHECK_EQ(static_cast<int>(*io_num_packets), kAccessUnitSamples);
+ DCHECK_EQ(io_data->mNumberBuffers,
+ static_cast<unsigned>(input_bus->channels()));
+ for (int i_buf = 0, end = io_data->mNumberBuffers; i_buf < end; ++i_buf) {
+ io_data->mBuffers[i_buf].mNumberChannels = 1;
+ io_data->mBuffers[i_buf].mDataByteSize = sizeof(float) * *io_num_packets;
+ io_data->mBuffers[i_buf].mData = input_bus->channel(i_buf);
+
+ // Reset the input bus back to the input buffer. See the comment on
+ // |input_bus_| for more on this optimization.
+ input_bus->SetChannelData(i_buf, input_buffer->channel(i_buf));
+ }
+ return noErr;
+ }
+
+ // The AudioFile write callback function. Appends the data to the encoder's
+ // current |output_buffer_|.
+ static OSStatus FileWriteCallback(void* in_encoder,
+ SInt64 in_position,
+ UInt32 in_size,
+ const void* in_buffer,
+ UInt32* out_size) {
+ DCHECK(in_encoder);
+ DCHECK(in_buffer);
+ auto encoder = reinterpret_cast<const AppleAacImpl*>(in_encoder);
+ auto buffer = reinterpret_cast<const std::string::value_type*>(in_buffer);
+
+ std::string* const output_buffer = encoder->output_buffer_;
+ DCHECK(output_buffer);
+
+ output_buffer->append(buffer, in_size);
+ *out_size = in_size;
+ return noErr;
+ }
+
+ // Buffer that holds one AAC access unit worth of samples. The input callback
+ // function provides samples from this buffer via |input_bus_| to the encoder.
+ const scoped_ptr<AudioBus> input_buffer_;
+
+ // Wrapper AudioBus used by the input callback function. Normally it wraps
+ // |input_buffer_|. However, as an optimization when the client submits a
+ // buffer containing exactly one access unit worth of samples, the bus is
+ // redirected to the client buffer temporarily. We know that the base
+ // implementation will call us right after to encode the buffer and thus we
+ // can eliminate the copy into |input_buffer_|.
+ const scoped_ptr<AudioBus> input_bus_;
+
+ // A buffer that holds one AAC access unit. Initialized in |Initialize| once
+ // the maximum access unit size is known.
+ const scoped_ptr<uint8[]> access_unit_buffer_;
+
+ // The maximum size of an access unit that the encoder can emit.
+ const uint32_t max_access_unit_size_;
+
+ // A temporary pointer to the current output buffer. Only non-null when
+ // writing an access unit. Accessed by the AudioFile write callback function.
+ std::string* output_buffer_;
+
+ // The |AudioConverter| is responsible for AAC encoding. This is a Core Audio
+ // object, not to be confused with |media::AudioConverter|.
+ AudioConverterRef converter_;
+
+ // The |AudioFile| is responsible for ADTS packetization.
+ AudioFileID file_;
+
+ // An |AudioBufferList| passed to the converter to store encoded samples.
+ AudioBufferList converter_abl_;
+
+ // The number of access units emitted so far by the encoder.
+ uint64_t num_access_units_;
+
+ // On iOS, audio codecs can be interrupted by other services (such as an
+ // audio alert or phone call). Depending on the underlying hardware and
+ // configuration, the codec may have to be thrown away and re-initialized
+ // after such an interruption. This flag tracks if we can resume or not from
+ // such an interruption. It is initialized to true, which is the only possible
+ // value on OS X and on most modern iOS hardware.
+ // TODO(jfroy): Implement encoder re-initialization after interruption.
+ // https://crbug.com/424787
+ const bool can_resume_;
+
+ DISALLOW_COPY_AND_ASSIGN(AppleAacImpl);
+};
+#endif // defined(OS_MACOSX)
class AudioEncoder::Pcm16Impl : public AudioEncoder::ImplBase {
public:
@@ -299,6 +704,7 @@ class AudioEncoder::Pcm16Impl : public AudioEncoder::ImplBase {
CODEC_AUDIO_PCM16,
num_channels,
sampling_rate,
+ sampling_rate / kDefaultFramesPerSecond, /* 10 ms frames */
callback),
buffer_(new int16[num_channels * samples_per_frame_]) {
if (ImplBase::cast_initialization_status_ != STATUS_AUDIO_UNINITIALIZED)
@@ -307,12 +713,12 @@ class AudioEncoder::Pcm16Impl : public AudioEncoder::ImplBase {
}
private:
- virtual ~Pcm16Impl() {}
+ ~Pcm16Impl() override {}
- virtual void TransferSamplesIntoBuffer(const AudioBus* audio_bus,
- int source_offset,
- int buffer_fill_offset,
- int num_samples) OVERRIDE {
+ void TransferSamplesIntoBuffer(const AudioBus* audio_bus,
+ int source_offset,
+ int buffer_fill_offset,
+ int num_samples) override {
audio_bus->ToInterleavedPartial(
source_offset,
num_samples,
@@ -320,7 +726,7 @@ class AudioEncoder::Pcm16Impl : public AudioEncoder::ImplBase {
buffer_.get() + buffer_fill_offset * num_channels_);
}
- virtual bool EncodeFromFilledBuffer(std::string* out) OVERRIDE {
+ bool EncodeFromFilledBuffer(std::string* out) override {
// Output 16-bit PCM integers in big-endian byte order.
out->resize(num_channels_ * samples_per_frame_ * sizeof(int16));
const int16* src = buffer_.get();
@@ -349,6 +755,7 @@ AudioEncoder::AudioEncoder(
// as all calls to InsertAudio() are by the same thread.
insert_thread_checker_.DetachFromThread();
switch (codec) {
+#if !defined(OS_IOS)
case CODEC_AUDIO_OPUS:
impl_ = new OpusImpl(cast_environment,
num_channels,
@@ -356,6 +763,16 @@ AudioEncoder::AudioEncoder(
bitrate,
frame_encoded_callback);
break;
+#endif
+#if defined(OS_MACOSX)
+ case CODEC_AUDIO_AAC:
+ impl_ = new AppleAacImpl(cast_environment,
+ num_channels,
+ sampling_rate,
+ bitrate,
+ frame_encoded_callback);
+ break;
+#endif // defined(OS_MACOSX)
case CODEC_AUDIO_PCM16:
impl_ = new Pcm16Impl(cast_environment,
num_channels,
@@ -387,6 +804,15 @@ int AudioEncoder::GetSamplesPerFrame() const {
return impl_->samples_per_frame();
}
+base::TimeDelta AudioEncoder::GetFrameDuration() const {
+ DCHECK(insert_thread_checker_.CalledOnValidThread());
+ if (InitializationResult() != STATUS_AUDIO_INITIALIZED) {
+ NOTREACHED();
+ return base::TimeDelta();
+ }
+ return impl_->frame_duration();
+}
+
void AudioEncoder::InsertAudio(scoped_ptr<AudioBus> audio_bus,
const base::TimeTicks& recorded_time) {
DCHECK(insert_thread_checker_.CalledOnValidThread());
diff --git a/media/cast/sender/audio_encoder.h b/media/cast/sender/audio_encoder.h
index e0a3d8a5ba..8c5bafad77 100644
--- a/media/cast/sender/audio_encoder.h
+++ b/media/cast/sender/audio_encoder.h
@@ -36,6 +36,7 @@ class AudioEncoder {
CastInitializationStatus InitializationResult() const;
int GetSamplesPerFrame() const;
+ base::TimeDelta GetFrameDuration() const;
void InsertAudio(scoped_ptr<AudioBus> audio_bus,
const base::TimeTicks& recorded_time);
@@ -44,6 +45,7 @@ class AudioEncoder {
class ImplBase;
class OpusImpl;
class Pcm16Impl;
+ class AppleAacImpl;
const scoped_refptr<CastEnvironment> cast_environment_;
scoped_refptr<ImplBase> impl_;
diff --git a/media/cast/sender/audio_encoder_unittest.cc b/media/cast/sender/audio_encoder_unittest.cc
index a33ed3bc0e..795ab7259f 100644
--- a/media/cast/sender/audio_encoder_unittest.cc
+++ b/media/cast/sender/audio_encoder_unittest.cc
@@ -39,6 +39,10 @@ class TestEncodedAudioFrameReceiver {
upper_bound_ = upper_bound;
}
+ void SetSamplesPerFrame(int samples_per_frame) {
+ samples_per_frame_ = samples_per_frame;
+ }
+
void FrameEncoded(scoped_ptr<EncodedFrame> encoded_frame,
int samples_skipped) {
EXPECT_EQ(encoded_frame->dependency, EncodedFrame::KEY);
@@ -49,9 +53,7 @@ class TestEncodedAudioFrameReceiver {
// of the fixed frame size.
EXPECT_LE(rtp_lower_bound_, encoded_frame->rtp_timestamp);
rtp_lower_bound_ = encoded_frame->rtp_timestamp;
- // Note: In audio_encoder.cc, 100 is the fixed audio frame rate.
- const int kSamplesPerFrame = kDefaultAudioSamplingRate / 100;
- EXPECT_EQ(0u, encoded_frame->rtp_timestamp % kSamplesPerFrame);
+ EXPECT_EQ(0u, encoded_frame->rtp_timestamp % samples_per_frame_);
EXPECT_TRUE(!encoded_frame->data.empty());
EXPECT_LE(lower_bound_, encoded_frame->reference_time);
@@ -65,6 +67,7 @@ class TestEncodedAudioFrameReceiver {
const Codec codec_;
int frames_received_;
uint32 rtp_lower_bound_;
+ int samples_per_frame_;
base::TimeTicks lower_bound_;
base::TimeTicks upper_bound_;
@@ -116,9 +119,7 @@ class AudioEncoderTest : public ::testing::TestWithParam<TestScenario> {
CreateObjectsForCodec(codec);
- // Note: In audio_encoder.cc, 10 ms is the fixed frame duration.
- const base::TimeDelta frame_duration =
- base::TimeDelta::FromMilliseconds(10);
+ const base::TimeDelta frame_duration = audio_encoder_->GetFrameDuration();
for (size_t i = 0; i < scenario.num_durations; ++i) {
const bool simulate_missing_data = scenario.durations_in_ms[i] < 0;
@@ -160,6 +161,8 @@ class AudioEncoderTest : public ::testing::TestWithParam<TestScenario> {
codec,
base::Bind(&TestEncodedAudioFrameReceiver::FrameEncoded,
base::Unretained(receiver_.get()))));
+
+ receiver_->SetSamplesPerFrame(audio_encoder_->GetSamplesPerFrame());
}
base::SimpleTestTickClock* testing_clock_; // Owned by CastEnvironment.
@@ -180,6 +183,12 @@ TEST_P(AudioEncoderTest, EncodePcm16) {
RunTestForCodec(CODEC_AUDIO_PCM16);
}
+#if defined(OS_MACOSX)
+TEST_P(AudioEncoderTest, EncodeAac) {
+ RunTestForCodec(CODEC_AUDIO_AAC);
+}
+#endif
+
static const int64 kOneCall_3Millis[] = {3};
static const int64 kOneCall_10Millis[] = {10};
static const int64 kOneCall_13Millis[] = {13};
diff --git a/media/cast/sender/audio_sender.cc b/media/cast/sender/audio_sender.cc
index 23fd6d1072..4748218c8e 100644
--- a/media/cast/sender/audio_sender.cc
+++ b/media/cast/sender/audio_sender.cc
@@ -13,34 +13,23 @@
namespace media {
namespace cast {
-namespace {
-
-// TODO(miu): This should be specified in AudioSenderConfig, but currently it is
-// fixed to 100 FPS (i.e., 10 ms per frame), and AudioEncoder assumes this as
-// well.
-const int kAudioFrameRate = 100;
-
-} // namespace
AudioSender::AudioSender(scoped_refptr<CastEnvironment> cast_environment,
const AudioSenderConfig& audio_config,
CastTransportSender* const transport_sender)
- : FrameSender(
- cast_environment,
- true,
- transport_sender,
- base::TimeDelta::FromMilliseconds(audio_config.rtcp_interval),
- audio_config.frequency,
- audio_config.ssrc,
- kAudioFrameRate,
- audio_config.min_playout_delay,
- audio_config.max_playout_delay,
- NewFixedCongestionControl(audio_config.bitrate)),
+ : FrameSender(cast_environment,
+ true,
+ transport_sender,
+ base::TimeDelta::FromMilliseconds(audio_config.rtcp_interval),
+ audio_config.frequency,
+ audio_config.ssrc,
+ 0, // |max_frame_rate_| is set after encoder initialization.
+ audio_config.min_playout_delay,
+ audio_config.max_playout_delay,
+ NewFixedCongestionControl(audio_config.bitrate)),
samples_in_encoder_(0),
weak_factory_(this) {
cast_initialization_status_ = STATUS_AUDIO_UNINITIALIZED;
- VLOG(1) << "max_unacked_frames " << max_unacked_frames_;
- DCHECK_GT(max_unacked_frames_, 0);
if (!audio_config.use_external_encoder) {
audio_encoder_.reset(
@@ -58,6 +47,12 @@ AudioSender::AudioSender(scoped_refptr<CastEnvironment> cast_environment,
cast_initialization_status_ = STATUS_AUDIO_UNINITIALIZED;
}
+ // The number of samples per encoded audio frame depends on the codec and its
+ // initialization parameters. Now that we have an encoder, we can calculate
+ // the maximum frame rate.
+ max_frame_rate_ =
+ audio_config.frequency / audio_encoder_->GetSamplesPerFrame();
+
media::cast::CastTransportRtpConfig transport_config;
transport_config.ssrc = audio_config.ssrc;
transport_config.feedback_ssrc = audio_config.incoming_feedback_ssrc;
diff --git a/media/cast/sender/audio_sender.h b/media/cast/sender/audio_sender.h
index 791cc8e6fd..d7f8c69432 100644
--- a/media/cast/sender/audio_sender.h
+++ b/media/cast/sender/audio_sender.h
@@ -35,7 +35,7 @@ class AudioSender : public FrameSender,
const AudioSenderConfig& audio_config,
CastTransportSender* const transport_sender);
- virtual ~AudioSender();
+ ~AudioSender() override;
CastInitializationStatus InitializationResult() const {
return cast_initialization_status_;
@@ -51,9 +51,9 @@ class AudioSender : public FrameSender,
const base::TimeTicks& recorded_time);
protected:
- virtual int GetNumberOfFramesInEncoder() const OVERRIDE;
- virtual base::TimeDelta GetInFlightMediaDuration() const OVERRIDE;
- virtual void OnAck(uint32 frame_id) OVERRIDE;
+ int GetNumberOfFramesInEncoder() const override;
+ base::TimeDelta GetInFlightMediaDuration() const override;
+ void OnAck(uint32 frame_id) override;
private:
// Called by the |audio_encoder_| with the next EncodedFrame to send.
diff --git a/media/cast/sender/audio_sender_unittest.cc b/media/cast/sender/audio_sender_unittest.cc
index b651c83ba6..af8ac3f319 100644
--- a/media/cast/sender/audio_sender_unittest.cc
+++ b/media/cast/sender/audio_sender_unittest.cc
@@ -25,8 +25,7 @@ class TestPacketSender : public PacketSender {
public:
TestPacketSender() : number_of_rtp_packets_(0), number_of_rtcp_packets_(0) {}
- virtual bool SendPacket(PacketRef packet,
- const base::Closure& cb) OVERRIDE {
+ bool SendPacket(PacketRef packet, const base::Closure& cb) override {
if (Rtcp::IsRtcpPacket(&packet->data[0], packet->data.size())) {
++number_of_rtcp_packets_;
} else {
@@ -41,9 +40,7 @@ class TestPacketSender : public PacketSender {
return true;
}
- virtual int64 GetBytesSent() OVERRIDE {
- return 0;
- }
+ int64 GetBytesSent() override { return 0; }
int number_of_rtp_packets() const { return number_of_rtp_packets_; }
diff --git a/media/cast/sender/congestion_control.cc b/media/cast/sender/congestion_control.cc
index 30e3be7d6e..d14f9b08f3 100644
--- a/media/cast/sender/congestion_control.cc
+++ b/media/cast/sender/congestion_control.cc
@@ -27,23 +27,25 @@ class AdaptiveCongestionControl : public CongestionControl {
AdaptiveCongestionControl(base::TickClock* clock,
uint32 max_bitrate_configured,
uint32 min_bitrate_configured,
- size_t max_unacked_frames);
+ double max_frame_rate);
- virtual ~AdaptiveCongestionControl() OVERRIDE;
+ ~AdaptiveCongestionControl() override;
- virtual void UpdateRtt(base::TimeDelta rtt) OVERRIDE;
+ void UpdateRtt(base::TimeDelta rtt) override;
+
+ void UpdateTargetPlayoutDelay(base::TimeDelta delay) override;
// Called when an encoded frame is sent to the transport.
- virtual void SendFrameToTransport(uint32 frame_id,
- size_t frame_size,
- base::TimeTicks when) OVERRIDE;
+ void SendFrameToTransport(uint32 frame_id,
+ size_t frame_size,
+ base::TimeTicks when) override;
// Called when we receive an ACK for a frame.
- virtual void AckFrame(uint32 frame_id, base::TimeTicks when) OVERRIDE;
+ void AckFrame(uint32 frame_id, base::TimeTicks when) override;
// Returns the bitrate we should use for the next frame.
- virtual uint32 GetBitrate(base::TimeTicks playout_time,
- base::TimeDelta playout_delay) OVERRIDE;
+ uint32 GetBitrate(base::TimeTicks playout_time,
+ base::TimeDelta playout_delay) override;
private:
struct FrameStats {
@@ -61,6 +63,8 @@ class AdaptiveCongestionControl : public CongestionControl {
// Get the FrameStats for a given |frame_id|.
// Note: Older FrameStats will be removed automatically.
FrameStats* GetFrameStats(uint32 frame_id);
+ // Discard old FrameStats.
+ void PruneFrameStats();
// Calculate a safe bitrate. This is based on how much we've been
// sending in the past.
double CalculateSafeBitrate();
@@ -76,6 +80,7 @@ class AdaptiveCongestionControl : public CongestionControl {
base::TickClock* const clock_; // Not owned by this class.
const uint32 max_bitrate_configured_;
const uint32 min_bitrate_configured_;
+ const double max_frame_rate_;
std::deque<FrameStats> frame_stats_;
uint32 last_frame_stats_;
uint32 last_acked_frame_;
@@ -91,24 +96,23 @@ class AdaptiveCongestionControl : public CongestionControl {
class FixedCongestionControl : public CongestionControl {
public:
FixedCongestionControl(uint32 bitrate) : bitrate_(bitrate) {}
- virtual ~FixedCongestionControl() OVERRIDE {}
+ ~FixedCongestionControl() override {}
- virtual void UpdateRtt(base::TimeDelta rtt) OVERRIDE {
- }
+ void UpdateRtt(base::TimeDelta rtt) override {}
+
+ void UpdateTargetPlayoutDelay(base::TimeDelta delay) override {}
// Called when an encoded frame is sent to the transport.
- virtual void SendFrameToTransport(uint32 frame_id,
- size_t frame_size,
- base::TimeTicks when) OVERRIDE {
- }
+ void SendFrameToTransport(uint32 frame_id,
+ size_t frame_size,
+ base::TimeTicks when) override {}
// Called when we receive an ACK for a frame.
- virtual void AckFrame(uint32 frame_id, base::TimeTicks when) OVERRIDE {
- }
+ void AckFrame(uint32 frame_id, base::TimeTicks when) override {}
// Returns the bitrate we should use for the next frame.
- virtual uint32 GetBitrate(base::TimeTicks playout_time,
- base::TimeDelta playout_delay) OVERRIDE {
+ uint32 GetBitrate(base::TimeTicks playout_time,
+ base::TimeDelta playout_delay) override {
return bitrate_;
}
@@ -122,11 +126,11 @@ CongestionControl* NewAdaptiveCongestionControl(
base::TickClock* clock,
uint32 max_bitrate_configured,
uint32 min_bitrate_configured,
- size_t max_unacked_frames) {
+ double max_frame_rate) {
return new AdaptiveCongestionControl(clock,
max_bitrate_configured,
min_bitrate_configured,
- max_unacked_frames);
+ max_frame_rate);
}
CongestionControl* NewFixedCongestionControl(uint32 bitrate) {
@@ -150,14 +154,15 @@ AdaptiveCongestionControl::AdaptiveCongestionControl(
base::TickClock* clock,
uint32 max_bitrate_configured,
uint32 min_bitrate_configured,
- size_t max_unacked_frames)
+ double max_frame_rate)
: clock_(clock),
max_bitrate_configured_(max_bitrate_configured),
min_bitrate_configured_(min_bitrate_configured),
+ max_frame_rate_(max_frame_rate),
last_frame_stats_(static_cast<uint32>(-1)),
last_acked_frame_(static_cast<uint32>(-1)),
last_encoded_frame_(static_cast<uint32>(-1)),
- history_size_(max_unacked_frames + kHistorySize),
+ history_size_(kHistorySize),
acked_bits_in_history_(0) {
DCHECK_GE(max_bitrate_configured, min_bitrate_configured) << "Invalid config";
frame_stats_.resize(2);
@@ -175,6 +180,17 @@ void AdaptiveCongestionControl::UpdateRtt(base::TimeDelta rtt) {
rtt_ = (7 * rtt_ + rtt) / 8;
}
+void AdaptiveCongestionControl::UpdateTargetPlayoutDelay(
+ base::TimeDelta delay) {
+ const int max_unacked_frames =
+ std::min(kMaxUnackedFrames,
+ 1 + static_cast<int>(delay * max_frame_rate_ /
+ base::TimeDelta::FromSeconds(1)));
+ DCHECK_GT(max_unacked_frames, 0);
+ history_size_ = max_unacked_frames + kHistorySize;
+ PruneFrameStats();
+}
+
// Calculate how much "dead air" there is between two frames.
base::TimeDelta AdaptiveCongestionControl::DeadTime(const FrameStats& a,
const FrameStats& b) {
@@ -205,7 +221,16 @@ AdaptiveCongestionControl::GetFrameStats(uint32 frame_id) {
last_frame_stats_ += offset;
offset = 0;
}
- while (frame_stats_.size() > history_size_) {
+ PruneFrameStats();
+ offset += frame_stats_.size() - 1;
+ if (offset < 0 || offset >= static_cast<int32>(frame_stats_.size())) {
+ return NULL;
+ }
+ return &frame_stats_[offset];
+}
+
+void AdaptiveCongestionControl::PruneFrameStats() {
+ while (frame_stats_.size() > history_size_) {
DCHECK_GT(frame_stats_.size(), 1UL);
DCHECK(!frame_stats_[0].ack_time.is_null());
acked_bits_in_history_ -= frame_stats_[0].frame_size;
@@ -215,11 +240,6 @@ AdaptiveCongestionControl::GetFrameStats(uint32 frame_id) {
DCHECK_GE(dead_time_in_history_.InSecondsF(), 0.0);
frame_stats_.pop_front();
}
- offset += frame_stats_.size() - 1;
- if (offset < 0 || offset >= static_cast<int32>(frame_stats_.size())) {
- return NULL;
- }
- return &frame_stats_[offset];
}
void AdaptiveCongestionControl::AckFrame(uint32 frame_id,
diff --git a/media/cast/sender/congestion_control.h b/media/cast/sender/congestion_control.h
index 5d1256f75a..8c3d764e20 100644
--- a/media/cast/sender/congestion_control.h
+++ b/media/cast/sender/congestion_control.h
@@ -22,6 +22,9 @@ class CongestionControl {
// Called with latest measured rtt value.
virtual void UpdateRtt(base::TimeDelta rtt) = 0;
+ // Called with an updated target playout delay value.
+ virtual void UpdateTargetPlayoutDelay(base::TimeDelta delay) = 0;
+
// Called when an encoded frame is sent to the transport.
virtual void SendFrameToTransport(uint32 frame_id,
size_t frame_size,
@@ -38,7 +41,7 @@ CongestionControl* NewAdaptiveCongestionControl(
base::TickClock* clock,
uint32 max_bitrate_configured,
uint32 min_bitrate_configured,
- size_t max_unacked_frames);
+ double max_frame_rate);
CongestionControl* NewFixedCongestionControl(uint32 bitrate);
diff --git a/media/cast/sender/congestion_control_unittest.cc b/media/cast/sender/congestion_control_unittest.cc
index ec68c02625..bf76a35c27 100644
--- a/media/cast/sender/congestion_control_unittest.cc
+++ b/media/cast/sender/congestion_control_unittest.cc
@@ -16,6 +16,8 @@ namespace cast {
static const uint32 kMaxBitrateConfigured = 5000000;
static const uint32 kMinBitrateConfigured = 500000;
+static const int64 kFrameDelayMs = 33;
+static const double kMaxFrameRate = 1000.0 / kFrameDelayMs;
static const int64 kStartMillisecond = INT64_C(12345678900000);
static const double kTargetEmptyBufferFraction = 0.9;
@@ -26,7 +28,13 @@ class CongestionControlTest : public ::testing::Test {
testing_clock_.Advance(
base::TimeDelta::FromMilliseconds(kStartMillisecond));
congestion_control_.reset(NewAdaptiveCongestionControl(
- &testing_clock_, kMaxBitrateConfigured, kMinBitrateConfigured, 10));
+ &testing_clock_, kMaxBitrateConfigured, kMinBitrateConfigured,
+ kMaxFrameRate));
+ const int max_unacked_frames = 10;
+ const base::TimeDelta target_playout_delay =
+ (max_unacked_frames - 1) * base::TimeDelta::FromSeconds(1) /
+ kMaxFrameRate;
+ congestion_control_->UpdateTargetPlayoutDelay(target_playout_delay);
}
void AckFrame(uint32 frame_id) {
@@ -60,17 +68,16 @@ class CongestionControlTest : public ::testing::Test {
};
TEST_F(CongestionControlTest, SimpleRun) {
- uint32 frame_delay = 33;
uint32 frame_size = 10000 * 8;
Run(500,
frame_size,
base::TimeDelta::FromMilliseconds(10),
- base::TimeDelta::FromMilliseconds(frame_delay),
+ base::TimeDelta::FromMilliseconds(kFrameDelayMs),
base::TimeDelta::FromMilliseconds(45));
// Empty the buffer.
task_runner_->Sleep(base::TimeDelta::FromMilliseconds(100));
- uint32 safe_bitrate = frame_size * 1000 / frame_delay;
+ uint32 safe_bitrate = frame_size * 1000 / kFrameDelayMs;
uint32 bitrate = congestion_control_->GetBitrate(
testing_clock_.NowTicks() + base::TimeDelta::FromMilliseconds(300),
base::TimeDelta::FromMilliseconds(300));
diff --git a/media/cast/sender/external_video_encoder.cc b/media/cast/sender/external_video_encoder.cc
index 5aca424d47..6dec102436 100644
--- a/media/cast/sender/external_video_encoder.cc
+++ b/media/cast/sender/external_video_encoder.cc
@@ -41,14 +41,17 @@ namespace media {
namespace cast {
// Container for the associated data of a video frame being processed.
-struct EncodedFrameReturnData {
- EncodedFrameReturnData(base::TimeTicks c_time,
- VideoEncoder::FrameEncodedCallback callback) {
- capture_time = c_time;
- frame_encoded_callback = callback;
- }
- base::TimeTicks capture_time;
- VideoEncoder::FrameEncodedCallback frame_encoded_callback;
+struct InProgressFrameEncode {
+ const RtpTimestamp rtp_timestamp;
+ const base::TimeTicks reference_time;
+ const VideoEncoder::FrameEncodedCallback frame_encoded_callback;
+
+ InProgressFrameEncode(RtpTimestamp rtp,
+ base::TimeTicks r_time,
+ VideoEncoder::FrameEncodedCallback callback)
+ : rtp_timestamp(rtp),
+ reference_time(r_time),
+ frame_encoded_callback(callback) {}
};
// The ExternalVideoEncoder class can be deleted directly by cast, while
@@ -154,21 +157,23 @@ class LocalVideoEncodeAcceleratorClient
void EncodeVideoFrame(
const scoped_refptr<media::VideoFrame>& video_frame,
- const base::TimeTicks& capture_time,
+ const base::TimeTicks& reference_time,
bool key_frame_requested,
const VideoEncoder::FrameEncodedCallback& frame_encoded_callback) {
DCHECK(encoder_task_runner_.get());
DCHECK(encoder_task_runner_->RunsTasksOnCurrentThread());
- encoded_frame_data_storage_.push_back(
- EncodedFrameReturnData(capture_time, frame_encoded_callback));
+ in_progress_frame_encodes_.push_back(InProgressFrameEncode(
+ TimeDeltaToRtpDelta(video_frame->timestamp(), kVideoFrequency),
+ reference_time,
+ frame_encoded_callback));
// BitstreamBufferReady will be called once the encoder is done.
video_encode_accelerator_->Encode(video_frame, key_frame_requested);
}
protected:
- virtual void NotifyError(VideoEncodeAccelerator::Error error) OVERRIDE {
+ void NotifyError(VideoEncodeAccelerator::Error error) override {
DCHECK(encoder_task_runner_.get());
DCHECK(encoder_task_runner_->RunsTasksOnCurrentThread());
VLOG(1) << "ExternalVideoEncoder NotifyError: " << error;
@@ -180,9 +185,9 @@ class LocalVideoEncodeAcceleratorClient
}
// Called to allocate the input and output buffers.
- virtual void RequireBitstreamBuffers(unsigned int input_count,
- const gfx::Size& input_coded_size,
- size_t output_buffer_size) OVERRIDE {
+ void RequireBitstreamBuffers(unsigned int input_count,
+ const gfx::Size& input_coded_size,
+ size_t output_buffer_size) override {
DCHECK(encoder_task_runner_.get());
DCHECK(encoder_task_runner_->RunsTasksOnCurrentThread());
DCHECK(video_encode_accelerator_);
@@ -197,9 +202,9 @@ class LocalVideoEncodeAcceleratorClient
// Encoder has encoded a frame and it's available in one of out output
// buffers.
- virtual void BitstreamBufferReady(int32 bitstream_buffer_id,
- size_t payload_size,
- bool key_frame) OVERRIDE {
+ void BitstreamBufferReady(int32 bitstream_buffer_id,
+ size_t payload_size,
+ bool key_frame) override {
DCHECK(encoder_task_runner_.get());
DCHECK(encoder_task_runner_->RunsTasksOnCurrentThread());
if (bitstream_buffer_id < 0 ||
@@ -226,9 +231,10 @@ class LocalVideoEncodeAcceleratorClient
// with the first key frame.
stream_header_.append(static_cast<const char*>(output_buffer->memory()),
payload_size);
- } else if (!encoded_frame_data_storage_.empty()) {
- scoped_ptr<EncodedFrame> encoded_frame(
- new EncodedFrame());
+ } else if (!in_progress_frame_encodes_.empty()) {
+ const InProgressFrameEncode& request = in_progress_frame_encodes_.front();
+
+ scoped_ptr<EncodedFrame> encoded_frame(new EncodedFrame());
encoded_frame->dependency = key_frame ? EncodedFrame::KEY :
EncodedFrame::DEPENDENT;
encoded_frame->frame_id = ++last_encoded_frame_id_;
@@ -236,10 +242,8 @@ class LocalVideoEncodeAcceleratorClient
encoded_frame->referenced_frame_id = encoded_frame->frame_id;
else
encoded_frame->referenced_frame_id = encoded_frame->frame_id - 1;
- encoded_frame->reference_time =
- encoded_frame_data_storage_.front().capture_time;
- encoded_frame->rtp_timestamp =
- GetVideoRtpTimestamp(encoded_frame->reference_time);
+ encoded_frame->rtp_timestamp = request.rtp_timestamp;
+ encoded_frame->reference_time = request.reference_time;
if (!stream_header_.empty()) {
encoded_frame->data = stream_header_;
stream_header_.clear();
@@ -259,10 +263,10 @@ class LocalVideoEncodeAcceleratorClient
cast_environment_->PostTask(
CastEnvironment::MAIN,
FROM_HERE,
- base::Bind(encoded_frame_data_storage_.front().frame_encoded_callback,
+ base::Bind(request.frame_encoded_callback,
base::Passed(&encoded_frame)));
- encoded_frame_data_storage_.pop_front();
+ in_progress_frame_encodes_.pop_front();
} else {
VLOG(1) << "BitstreamBufferReady(): no encoded frame data available";
}
@@ -357,7 +361,7 @@ class LocalVideoEncodeAcceleratorClient
friend class base::RefCountedThreadSafe<LocalVideoEncodeAcceleratorClient>;
- virtual ~LocalVideoEncodeAcceleratorClient() {
+ ~LocalVideoEncodeAcceleratorClient() override {
Destroy();
DCHECK(!video_encode_accelerator_);
}
@@ -376,7 +380,7 @@ class LocalVideoEncodeAcceleratorClient
ScopedVector<base::SharedMemory> output_buffers_;
// FIFO list.
- std::list<EncodedFrameReturnData> encoded_frame_data_storage_;
+ std::list<InProgressFrameEncode> in_progress_frame_encodes_;
DISALLOW_COPY_AND_ASSIGN(LocalVideoEncodeAcceleratorClient);
};
@@ -435,7 +439,7 @@ void ExternalVideoEncoder::OnCreateVideoEncodeAccelerator(
bool ExternalVideoEncoder::EncodeVideoFrame(
const scoped_refptr<media::VideoFrame>& video_frame,
- const base::TimeTicks& capture_time,
+ const base::TimeTicks& reference_time,
const FrameEncodedCallback& frame_encoded_callback) {
DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
@@ -447,7 +451,7 @@ bool ExternalVideoEncoder::EncodeVideoFrame(
base::Bind(&LocalVideoEncodeAcceleratorClient::EncodeVideoFrame,
video_accelerator_client_,
video_frame,
- capture_time,
+ reference_time,
key_frame_requested_,
frame_encoded_callback));
diff --git a/media/cast/sender/external_video_encoder.h b/media/cast/sender/external_video_encoder.h
index 3a5f73b38e..90125557c0 100644
--- a/media/cast/sender/external_video_encoder.h
+++ b/media/cast/sender/external_video_encoder.h
@@ -32,24 +32,16 @@ class ExternalVideoEncoder : public VideoEncoder {
const CreateVideoEncodeAcceleratorCallback& create_vea_cb,
const CreateVideoEncodeMemoryCallback& create_video_encode_mem_cb);
- virtual ~ExternalVideoEncoder();
-
- // Called from the main cast thread. This function post the encode task to the
- // video encoder thread;
- // The video_frame must be valid until the closure callback is called.
- // The closure callback is called from the video encoder thread as soon as
- // the encoder is done with the frame; it does not mean that the encoded frame
- // has been sent out.
- // Once the encoded frame is ready the frame_encoded_callback is called.
- virtual bool EncodeVideoFrame(
- const scoped_refptr<media::VideoFrame>& video_frame,
- const base::TimeTicks& capture_time,
- const FrameEncodedCallback& frame_encoded_callback) OVERRIDE;
+ ~ExternalVideoEncoder() override;
- // The following functions are called from the main cast thread.
- virtual void SetBitRate(int new_bit_rate) OVERRIDE;
- virtual void GenerateKeyFrame() OVERRIDE;
- virtual void LatestFrameIdToReference(uint32 frame_id) OVERRIDE;
+ // VideoEncoder implementation.
+ bool EncodeVideoFrame(
+ const scoped_refptr<media::VideoFrame>& video_frame,
+ const base::TimeTicks& reference_time,
+ const FrameEncodedCallback& frame_encoded_callback) override;
+ void SetBitRate(int new_bit_rate) override;
+ void GenerateKeyFrame() override;
+ void LatestFrameIdToReference(uint32 frame_id) override;
// Called when video_accelerator_client_ has finished creating the VEA and
// is ready for use.
diff --git a/media/cast/sender/external_video_encoder_unittest.cc b/media/cast/sender/external_video_encoder_unittest.cc
index 2f6fa9e605..12a6b6a935 100644
--- a/media/cast/sender/external_video_encoder_unittest.cc
+++ b/media/cast/sender/external_video_encoder_unittest.cc
@@ -63,10 +63,12 @@ class TestVideoEncoderCallback
void SetExpectedResult(uint32 expected_frame_id,
uint32 expected_last_referenced_frame_id,
- const base::TimeTicks& expected_capture_time) {
+ uint32 expected_rtp_timestamp,
+ const base::TimeTicks& expected_reference_time) {
expected_frame_id_ = expected_frame_id;
expected_last_referenced_frame_id_ = expected_last_referenced_frame_id;
- expected_capture_time_ = expected_capture_time;
+ expected_rtp_timestamp_ = expected_rtp_timestamp;
+ expected_reference_time_ = expected_reference_time;
}
void DeliverEncodedVideoFrame(
@@ -80,7 +82,8 @@ class TestVideoEncoderCallback
EXPECT_EQ(expected_frame_id_, encoded_frame->frame_id);
EXPECT_EQ(expected_last_referenced_frame_id_,
encoded_frame->referenced_frame_id);
- EXPECT_EQ(expected_capture_time_, encoded_frame->reference_time);
+ EXPECT_EQ(expected_rtp_timestamp_, encoded_frame->rtp_timestamp);
+ EXPECT_EQ(expected_reference_time_, encoded_frame->reference_time);
}
protected:
@@ -92,7 +95,8 @@ class TestVideoEncoderCallback
bool expected_key_frame_;
uint32 expected_frame_id_;
uint32 expected_last_referenced_frame_id_;
- base::TimeTicks expected_capture_time_;
+ uint32 expected_rtp_timestamp_;
+ base::TimeTicks expected_reference_time_;
DISALLOW_COPY_AND_ASSIGN(TestVideoEncoderCallback);
};
@@ -122,6 +126,7 @@ class ExternalVideoEncoderTest : public ::testing::Test {
PopulateVideoFrame(video_frame_.get(), 123);
testing_clock_ = new base::SimpleTestTickClock();
+ testing_clock_->Advance(base::TimeTicks::Now() - base::TimeTicks());
task_runner_ = new test::FakeSingleThreadTaskRunner(testing_clock_);
cast_environment_ =
new CastEnvironment(scoped_ptr<base::TickClock>(testing_clock_).Pass(),
@@ -145,6 +150,12 @@ class ExternalVideoEncoderTest : public ::testing::Test {
virtual ~ExternalVideoEncoderTest() {}
+ void AdvanceClockAndVideoFrameTimestamp() {
+ testing_clock_->Advance(base::TimeDelta::FromMilliseconds(33));
+ video_frame_->set_timestamp(
+ video_frame_->timestamp() + base::TimeDelta::FromMilliseconds(33));
+ }
+
base::SimpleTestTickClock* testing_clock_; // Owned by CastEnvironment.
test::FakeVideoEncodeAccelerator* fake_vea_; // Owned by video_encoder_.
std::vector<uint32> stored_bitrates_;
@@ -165,19 +176,23 @@ TEST_F(ExternalVideoEncoderTest, EncodePattern30fpsRunningOutOfAck) {
base::Bind(&TestVideoEncoderCallback::DeliverEncodedVideoFrame,
test_video_encoder_callback_.get());
- base::TimeTicks capture_time;
- capture_time += base::TimeDelta::FromMilliseconds(33);
- test_video_encoder_callback_->SetExpectedResult(0, 0, capture_time);
+ test_video_encoder_callback_->SetExpectedResult(
+ 0, 0, TimeDeltaToRtpDelta(video_frame_->timestamp(), kVideoFrequency),
+ testing_clock_->NowTicks());
video_encoder_->SetBitRate(2000);
EXPECT_TRUE(video_encoder_->EncodeVideoFrame(
- video_frame_, capture_time, frame_encoded_callback));
+ video_frame_, testing_clock_->NowTicks(), frame_encoded_callback));
task_runner_->RunTasks();
for (int i = 0; i < 6; ++i) {
- capture_time += base::TimeDelta::FromMilliseconds(33);
- test_video_encoder_callback_->SetExpectedResult(i + 1, i, capture_time);
+ AdvanceClockAndVideoFrameTimestamp();
+ test_video_encoder_callback_->SetExpectedResult(
+ i + 1,
+ i,
+ TimeDeltaToRtpDelta(video_frame_->timestamp(), kVideoFrequency),
+ testing_clock_->NowTicks());
EXPECT_TRUE(video_encoder_->EncodeVideoFrame(
- video_frame_, capture_time, frame_encoded_callback));
+ video_frame_, testing_clock_->NowTicks(), frame_encoded_callback));
task_runner_->RunTasks();
}
// We need to run the task to cleanup the GPU instance.
@@ -199,11 +214,11 @@ TEST_F(ExternalVideoEncoderTest, StreamHeader) {
fake_vea_->SendDummyFrameForTesting(false);
// Verify the first returned bitstream buffer is still a key frame.
- base::TimeTicks capture_time;
- capture_time += base::TimeDelta::FromMilliseconds(33);
- test_video_encoder_callback_->SetExpectedResult(0, 0, capture_time);
+ test_video_encoder_callback_->SetExpectedResult(
+ 0, 0, TimeDeltaToRtpDelta(video_frame_->timestamp(), kVideoFrequency),
+ testing_clock_->NowTicks());
EXPECT_TRUE(video_encoder_->EncodeVideoFrame(
- video_frame_, capture_time, frame_encoded_callback));
+ video_frame_, testing_clock_->NowTicks(), frame_encoded_callback));
task_runner_->RunTasks();
// We need to run the task to cleanup the GPU instance.
diff --git a/media/cast/sender/fake_software_video_encoder.cc b/media/cast/sender/fake_software_video_encoder.cc
index bd96f78c89..12b6b775ef 100644
--- a/media/cast/sender/fake_software_video_encoder.cc
+++ b/media/cast/sender/fake_software_video_encoder.cc
@@ -6,6 +6,7 @@
#include "base/json/json_writer.h"
#include "base/values.h"
+#include "media/base/video_frame.h"
#include "media/cast/net/cast_transport_config.h"
#ifndef OFFICIAL_BUILD
@@ -26,29 +27,34 @@ FakeSoftwareVideoEncoder::~FakeSoftwareVideoEncoder() {}
void FakeSoftwareVideoEncoder::Initialize() {}
-bool FakeSoftwareVideoEncoder::Encode(
+void FakeSoftwareVideoEncoder::Encode(
const scoped_refptr<media::VideoFrame>& video_frame,
- EncodedFrame* encoded_image) {
- encoded_image->frame_id = frame_id_++;
+ const base::TimeTicks& reference_time,
+ EncodedFrame* encoded_frame) {
+ DCHECK(encoded_frame);
+
+ encoded_frame->frame_id = frame_id_++;
if (next_frame_is_key_) {
- encoded_image->dependency = EncodedFrame::KEY;
- encoded_image->referenced_frame_id = encoded_image->frame_id;
+ encoded_frame->dependency = EncodedFrame::KEY;
+ encoded_frame->referenced_frame_id = encoded_frame->frame_id;
next_frame_is_key_ = false;
} else {
- encoded_image->dependency = EncodedFrame::DEPENDENT;
- encoded_image->referenced_frame_id = encoded_image->frame_id - 1;
+ encoded_frame->dependency = EncodedFrame::DEPENDENT;
+ encoded_frame->referenced_frame_id = encoded_frame->frame_id - 1;
}
+ encoded_frame->rtp_timestamp =
+ TimeDeltaToRtpDelta(video_frame->timestamp(), kVideoFrequency);
+ encoded_frame->reference_time = reference_time;
base::DictionaryValue values;
values.SetBoolean("key",
- encoded_image->dependency == EncodedFrame::KEY);
- values.SetInteger("ref", encoded_image->referenced_frame_id);
- values.SetInteger("id", encoded_image->frame_id);
+ encoded_frame->dependency == EncodedFrame::KEY);
+ values.SetInteger("ref", encoded_frame->referenced_frame_id);
+ values.SetInteger("id", encoded_frame->frame_id);
values.SetInteger("size", frame_size_);
- base::JSONWriter::Write(&values, &encoded_image->data);
- encoded_image->data.resize(
- std::max<size_t>(encoded_image->data.size(), frame_size_), ' ');
- return true;
+ base::JSONWriter::Write(&values, &encoded_frame->data);
+ encoded_frame->data.resize(
+ std::max<size_t>(encoded_frame->data.size(), frame_size_), ' ');
}
void FakeSoftwareVideoEncoder::UpdateRates(uint32 new_bitrate) {
diff --git a/media/cast/sender/fake_software_video_encoder.h b/media/cast/sender/fake_software_video_encoder.h
index 5491ae0cc8..cf5769c857 100644
--- a/media/cast/sender/fake_software_video_encoder.h
+++ b/media/cast/sender/fake_software_video_encoder.h
@@ -14,15 +14,16 @@ namespace cast {
class FakeSoftwareVideoEncoder : public SoftwareVideoEncoder {
public:
FakeSoftwareVideoEncoder(const VideoSenderConfig& video_config);
- virtual ~FakeSoftwareVideoEncoder();
+ ~FakeSoftwareVideoEncoder() override;
// SoftwareVideoEncoder implementations.
- virtual void Initialize() OVERRIDE;
- virtual bool Encode(const scoped_refptr<media::VideoFrame>& video_frame,
- EncodedFrame* encoded_image) OVERRIDE;
- virtual void UpdateRates(uint32 new_bitrate) OVERRIDE;
- virtual void GenerateKeyFrame() OVERRIDE;
- virtual void LatestFrameIdToReference(uint32 frame_id) OVERRIDE;
+ void Initialize() override;
+ void Encode(const scoped_refptr<media::VideoFrame>& video_frame,
+ const base::TimeTicks& reference_time,
+ EncodedFrame* encoded_frame) override;
+ void UpdateRates(uint32 new_bitrate) override;
+ void GenerateKeyFrame() override;
+ void LatestFrameIdToReference(uint32 frame_id) override;
private:
VideoSenderConfig video_config_;
diff --git a/media/cast/sender/frame_sender.cc b/media/cast/sender/frame_sender.cc
index 0b94f74a11..c5319a4fae 100644
--- a/media/cast/sender/frame_sender.cc
+++ b/media/cast/sender/frame_sender.cc
@@ -39,6 +39,7 @@ FrameSender::FrameSender(scoped_refptr<CastEnvironment> cast_environment,
min_playout_delay_(min_playout_delay == base::TimeDelta() ?
max_playout_delay : min_playout_delay),
max_playout_delay_(max_playout_delay),
+ send_target_playout_delay_(false),
max_frame_rate_(max_frame_rate),
num_aggressive_rtcp_reports_sent_(0),
last_sent_frame_id_(0),
@@ -106,17 +107,20 @@ void FrameSender::OnMeasuredRoundTripTime(base::TimeDelta rtt) {
void FrameSender::SetTargetPlayoutDelay(
base::TimeDelta new_target_playout_delay) {
+ if (send_target_playout_delay_ &&
+ target_playout_delay_ == new_target_playout_delay) {
+ return;
+ }
new_target_playout_delay = std::max(new_target_playout_delay,
min_playout_delay_);
new_target_playout_delay = std::min(new_target_playout_delay,
max_playout_delay_);
+ VLOG(2) << SENDER_SSRC << "Target playout delay changing from "
+ << target_playout_delay_.InMilliseconds() << " ms to "
+ << new_target_playout_delay.InMilliseconds() << " ms.";
target_playout_delay_ = new_target_playout_delay;
- max_unacked_frames_ =
- std::min(kMaxUnackedFrames,
- 1 + static_cast<int>(target_playout_delay_ *
- max_frame_rate_ /
- base::TimeDelta::FromSeconds(1)));
send_target_playout_delay_ = true;
+ congestion_control_->UpdateTargetPlayoutDelay(target_playout_delay_);
}
void FrameSender::ResendCheck() {
diff --git a/media/cast/sender/frame_sender.h b/media/cast/sender/frame_sender.h
index 0e8595d213..a3ef1e5511 100644
--- a/media/cast/sender/frame_sender.h
+++ b/media/cast/sender/frame_sender.h
@@ -125,10 +125,6 @@ class FrameSender {
// Max encoded frames generated per second.
double max_frame_rate_;
- // Maximum number of outstanding frames before the encoding and sending of
- // new frames shall halt.
- int max_unacked_frames_;
-
// Counts how many RTCP reports are being "aggressively" sent (i.e., one per
// frame) at the start of the session. Once a threshold is reached, RTCP
// reports are instead sent at the configured interval + random drift.
diff --git a/media/cast/sender/software_video_encoder.h b/media/cast/sender/software_video_encoder.h
index 16c8cd3473..e3136d5352 100644
--- a/media/cast/sender/software_video_encoder.h
+++ b/media/cast/sender/software_video_encoder.h
@@ -8,6 +8,10 @@
#include "base/basictypes.h"
#include "base/memory/ref_counted.h"
+namespace base {
+class TimeTicks;
+}
+
namespace media {
class VideoFrame;
}
@@ -25,8 +29,9 @@ class SoftwareVideoEncoder {
virtual void Initialize() = 0;
// Encode a raw image (as a part of a video stream).
- virtual bool Encode(const scoped_refptr<media::VideoFrame>& video_frame,
- EncodedFrame* encoded_image) = 0;
+ virtual void Encode(const scoped_refptr<media::VideoFrame>& video_frame,
+ const base::TimeTicks& reference_time,
+ EncodedFrame* encoded_frame) = 0;
// Update the encoder with a new target bit rate.
virtual void UpdateRates(uint32 new_bitrate) = 0;
diff --git a/media/cast/sender/video_encoder.h b/media/cast/sender/video_encoder.h
index d788c7b2aa..b3bdbe44f0 100644
--- a/media/cast/sender/video_encoder.h
+++ b/media/cast/sender/video_encoder.h
@@ -20,19 +20,17 @@ namespace cast {
// All these functions are called from the main cast thread.
class VideoEncoder {
public:
- typedef base::Callback<void(scoped_ptr<EncodedFrame>)>
- FrameEncodedCallback;
+ typedef base::Callback<void(scoped_ptr<EncodedFrame>)> FrameEncodedCallback;
virtual ~VideoEncoder() {}
- // The video_frame must be valid until the closure callback is called.
- // The closure callback is called from the video encoder thread as soon as
- // the encoder is done with the frame; it does not mean that the encoded frame
- // has been sent out.
- // Once the encoded frame is ready the frame_encoded_callback is called.
+ // If true is returned, the Encoder has accepted the request and will process
+ // it asynchronously, running |frame_encoded_callback| on the MAIN
+ // CastEnvironment thread with the result. If false is returned, nothing
+ // happens and the callback will not be run.
virtual bool EncodeVideoFrame(
const scoped_refptr<media::VideoFrame>& video_frame,
- const base::TimeTicks& capture_time,
+ const base::TimeTicks& reference_time,
const FrameEncodedCallback& frame_encoded_callback) = 0;
// Inform the encoder about the new target bit rate.
diff --git a/media/cast/sender/video_encoder_impl.cc b/media/cast/sender/video_encoder_impl.cc
index 4cbb769f0d..a54ddfc7ef 100644
--- a/media/cast/sender/video_encoder_impl.cc
+++ b/media/cast/sender/video_encoder_impl.cc
@@ -12,16 +12,13 @@
#include "media/base/video_frame.h"
#include "media/cast/cast_defines.h"
#include "media/cast/sender/fake_software_video_encoder.h"
-#if !defined(MEDIA_DISABLE_LIBVPX)
#include "media/cast/sender/vp8_encoder.h"
-#endif // !defined(MEDIA_DISABLE_LIBVPX)
namespace media {
namespace cast {
namespace {
-#if !defined(MEDIA_DISABLE_LIBVPX)
typedef base::Callback<void(Vp8Encoder*)> PassEncoderCallback;
void InitializeEncoderOnEncoderThread(
@@ -30,13 +27,12 @@ void InitializeEncoderOnEncoderThread(
DCHECK(environment->CurrentlyOn(CastEnvironment::VIDEO));
encoder->Initialize();
}
-#endif // !defined(MEDIA_DISABLE_LIBVPX)
void EncodeVideoFrameOnEncoderThread(
scoped_refptr<CastEnvironment> environment,
SoftwareVideoEncoder* encoder,
const scoped_refptr<media::VideoFrame>& video_frame,
- const base::TimeTicks& capture_time,
+ const base::TimeTicks& reference_time,
const VideoEncoderImpl::CodecDynamicConfig& dynamic_config,
const VideoEncoderImpl::FrameEncodedCallback& frame_encoded_callback) {
DCHECK(environment->CurrentlyOn(CastEnvironment::VIDEO));
@@ -47,41 +43,26 @@ void EncodeVideoFrameOnEncoderThread(
dynamic_config.latest_frame_id_to_reference);
encoder->UpdateRates(dynamic_config.bit_rate);
- scoped_ptr<EncodedFrame> encoded_frame(
- new EncodedFrame());
- if (!encoder->Encode(video_frame, encoded_frame.get())) {
- VLOG(1) << "Encoding failed";
- return;
- }
- if (encoded_frame->data.empty()) {
- VLOG(1) << "Encoding resulted in an empty frame";
- return;
- }
- encoded_frame->rtp_timestamp = GetVideoRtpTimestamp(capture_time);
- encoded_frame->reference_time = capture_time;
-
+ scoped_ptr<EncodedFrame> encoded_frame(new EncodedFrame());
+ encoder->Encode(video_frame, reference_time, encoded_frame.get());
environment->PostTask(
CastEnvironment::MAIN,
FROM_HERE,
- base::Bind(
- frame_encoded_callback, base::Passed(&encoded_frame)));
+ base::Bind(frame_encoded_callback, base::Passed(&encoded_frame)));
}
} // namespace
VideoEncoderImpl::VideoEncoderImpl(
scoped_refptr<CastEnvironment> cast_environment,
- const VideoSenderConfig& video_config,
- int max_unacked_frames)
+ const VideoSenderConfig& video_config)
: cast_environment_(cast_environment) {
if (video_config.codec == CODEC_VIDEO_VP8) {
-#if !defined(MEDIA_DISABLE_LIBVPX)
- encoder_.reset(new Vp8Encoder(video_config, max_unacked_frames));
+ encoder_.reset(new Vp8Encoder(video_config));
cast_environment_->PostTask(CastEnvironment::VIDEO,
FROM_HERE,
base::Bind(&InitializeEncoderOnEncoderThread,
cast_environment,
encoder_.get()));
-#endif // !defined(MEDIA_DISABLE_LIBVPX)
#ifndef OFFICIAL_BUILD
} else if (video_config.codec == CODEC_VIDEO_FAKE) {
encoder_.reset(new FakeSoftwareVideoEncoder(video_config));
@@ -108,7 +89,7 @@ VideoEncoderImpl::~VideoEncoderImpl() {
bool VideoEncoderImpl::EncodeVideoFrame(
const scoped_refptr<media::VideoFrame>& video_frame,
- const base::TimeTicks& capture_time,
+ const base::TimeTicks& reference_time,
const FrameEncodedCallback& frame_encoded_callback) {
DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
cast_environment_->PostTask(CastEnvironment::VIDEO,
@@ -117,7 +98,7 @@ bool VideoEncoderImpl::EncodeVideoFrame(
cast_environment_,
encoder_.get(),
video_frame,
- capture_time,
+ reference_time,
dynamic_config_,
frame_encoded_callback));
diff --git a/media/cast/sender/video_encoder_impl.h b/media/cast/sender/video_encoder_impl.h
index 54a380265c..58a6769e21 100644
--- a/media/cast/sender/video_encoder_impl.h
+++ b/media/cast/sender/video_encoder_impl.h
@@ -30,27 +30,18 @@ class VideoEncoderImpl : public VideoEncoder {
FrameEncodedCallback;
VideoEncoderImpl(scoped_refptr<CastEnvironment> cast_environment,
- const VideoSenderConfig& video_config,
- int max_unacked_frames);
+ const VideoSenderConfig& video_config);
- virtual ~VideoEncoderImpl();
+ ~VideoEncoderImpl() override;
- // Called from the main cast thread. This function post the encode task to the
- // video encoder thread;
- // The video_frame must be valid until the closure callback is called.
- // The closure callback is called from the video encoder thread as soon as
- // the encoder is done with the frame; it does not mean that the encoded frame
- // has been sent out.
- // Once the encoded frame is ready the frame_encoded_callback is called.
- virtual bool EncodeVideoFrame(
+ // VideoEncoder implementation.
+ bool EncodeVideoFrame(
const scoped_refptr<media::VideoFrame>& video_frame,
- const base::TimeTicks& capture_time,
- const FrameEncodedCallback& frame_encoded_callback) OVERRIDE;
-
- // The following functions are called from the main cast thread.
- virtual void SetBitRate(int new_bit_rate) OVERRIDE;
- virtual void GenerateKeyFrame() OVERRIDE;
- virtual void LatestFrameIdToReference(uint32 frame_id) OVERRIDE;
+ const base::TimeTicks& reference_time,
+ const FrameEncodedCallback& frame_encoded_callback) override;
+ void SetBitRate(int new_bit_rate) override;
+ void GenerateKeyFrame() override;
+ void LatestFrameIdToReference(uint32 frame_id) override;
private:
scoped_refptr<CastEnvironment> cast_environment_;
diff --git a/media/cast/sender/video_encoder_impl_unittest.cc b/media/cast/sender/video_encoder_impl_unittest.cc
index 43f7366883..7c0047ff7f 100644
--- a/media/cast/sender/video_encoder_impl_unittest.cc
+++ b/media/cast/sender/video_encoder_impl_unittest.cc
@@ -35,10 +35,12 @@ class TestVideoEncoderCallback
void SetExpectedResult(uint32 expected_frame_id,
uint32 expected_last_referenced_frame_id,
- const base::TimeTicks& expected_capture_time) {
+ uint32 expected_rtp_timestamp,
+ const base::TimeTicks& expected_reference_time) {
expected_frame_id_ = expected_frame_id;
expected_last_referenced_frame_id_ = expected_last_referenced_frame_id;
- expected_capture_time_ = expected_capture_time;
+ expected_rtp_timestamp_ = expected_rtp_timestamp;
+ expected_reference_time_ = expected_reference_time;
}
void DeliverEncodedVideoFrame(
@@ -52,8 +54,8 @@ class TestVideoEncoderCallback
EXPECT_EQ(expected_last_referenced_frame_id_,
encoded_frame->referenced_frame_id)
<< "frame id: " << expected_frame_id_;
- EXPECT_LT(0u, encoded_frame->rtp_timestamp);
- EXPECT_EQ(expected_capture_time_, encoded_frame->reference_time);
+ EXPECT_EQ(expected_rtp_timestamp_, encoded_frame->rtp_timestamp);
+ EXPECT_EQ(expected_reference_time_, encoded_frame->reference_time);
EXPECT_FALSE(encoded_frame->data.empty());
++count_frames_delivered_;
}
@@ -67,7 +69,8 @@ class TestVideoEncoderCallback
uint32 expected_frame_id_;
uint32 expected_last_referenced_frame_id_;
- base::TimeTicks expected_capture_time_;
+ uint32 expected_rtp_timestamp_;
+ base::TimeTicks expected_reference_time_;
DISALLOW_COPY_AND_ASSIGN(TestVideoEncoderCallback);
};
@@ -86,7 +89,7 @@ class VideoEncoderImplTest : public ::testing::Test {
virtual ~VideoEncoderImplTest() {}
- virtual void SetUp() OVERRIDE {
+ virtual void SetUp() override {
testing_clock_ = new base::SimpleTestTickClock();
testing_clock_->Advance(base::TimeTicks::Now() - base::TimeTicks());
task_runner_ = new test::FakeSingleThreadTaskRunner(testing_clock_);
@@ -97,7 +100,7 @@ class VideoEncoderImplTest : public ::testing::Test {
task_runner_);
}
- virtual void TearDown() OVERRIDE {
+ virtual void TearDown() override {
video_encoder_.reset();
task_runner_->RunTasks();
}
@@ -105,9 +108,14 @@ class VideoEncoderImplTest : public ::testing::Test {
void CreateEncoder() {
test_video_encoder_callback_ = new TestVideoEncoderCallback(
video_config_.max_number_of_video_buffers_used != 1);
- video_encoder_.reset(new VideoEncoderImpl(
- cast_environment_, video_config_,
- 0 /* useless arg to be removed in later change */));
+ video_encoder_.reset(
+ new VideoEncoderImpl(cast_environment_, video_config_));
+ }
+
+ void AdvanceClockAndVideoFrameTimestamp() {
+ testing_clock_->Advance(base::TimeDelta::FromMilliseconds(33));
+ video_frame_->set_timestamp(
+ video_frame_->timestamp() + base::TimeDelta::FromMilliseconds(33));
}
base::SimpleTestTickClock* testing_clock_; // Owned by CastEnvironment.
@@ -132,15 +140,19 @@ TEST_F(VideoEncoderImplTest, GeneratesKeyFrameThenOnlyDeltaFrames) {
EXPECT_EQ(0, test_video_encoder_callback_->count_frames_delivered());
test_video_encoder_callback_->SetExpectedResult(
- 0, 0, testing_clock_->NowTicks());
+ 0, 0, TimeDeltaToRtpDelta(video_frame_->timestamp(), kVideoFrequency),
+ testing_clock_->NowTicks());
EXPECT_TRUE(video_encoder_->EncodeVideoFrame(
video_frame_, testing_clock_->NowTicks(), frame_encoded_callback));
task_runner_->RunTasks();
for (uint32 frame_id = 1; frame_id < 10; ++frame_id) {
- testing_clock_->Advance(base::TimeDelta::FromMilliseconds(33));
+ AdvanceClockAndVideoFrameTimestamp();
test_video_encoder_callback_->SetExpectedResult(
- frame_id, frame_id - 1, testing_clock_->NowTicks());
+ frame_id,
+ frame_id - 1,
+ TimeDeltaToRtpDelta(video_frame_->timestamp(), kVideoFrequency),
+ testing_clock_->NowTicks());
EXPECT_TRUE(video_encoder_->EncodeVideoFrame(
video_frame_, testing_clock_->NowTicks(), frame_encoded_callback));
task_runner_->RunTasks();
@@ -161,23 +173,26 @@ TEST_F(VideoEncoderImplTest,
EXPECT_EQ(0, test_video_encoder_callback_->count_frames_delivered());
test_video_encoder_callback_->SetExpectedResult(
- 0, 0, testing_clock_->NowTicks());
+ 0, 0, TimeDeltaToRtpDelta(video_frame_->timestamp(), kVideoFrequency),
+ testing_clock_->NowTicks());
EXPECT_TRUE(video_encoder_->EncodeVideoFrame(
video_frame_, testing_clock_->NowTicks(), frame_encoded_callback));
task_runner_->RunTasks();
- testing_clock_->Advance(base::TimeDelta::FromMilliseconds(33));
+ AdvanceClockAndVideoFrameTimestamp();
video_encoder_->LatestFrameIdToReference(0);
test_video_encoder_callback_->SetExpectedResult(
- 1, 0, testing_clock_->NowTicks());
+ 1, 0, TimeDeltaToRtpDelta(video_frame_->timestamp(), kVideoFrequency),
+ testing_clock_->NowTicks());
EXPECT_TRUE(video_encoder_->EncodeVideoFrame(
video_frame_, testing_clock_->NowTicks(), frame_encoded_callback));
task_runner_->RunTasks();
- testing_clock_->Advance(base::TimeDelta::FromMilliseconds(33));
+ AdvanceClockAndVideoFrameTimestamp();
video_encoder_->LatestFrameIdToReference(1);
test_video_encoder_callback_->SetExpectedResult(
- 2, 1, testing_clock_->NowTicks());
+ 2, 1, TimeDeltaToRtpDelta(video_frame_->timestamp(), kVideoFrequency),
+ testing_clock_->NowTicks());
EXPECT_TRUE(video_encoder_->EncodeVideoFrame(
video_frame_, testing_clock_->NowTicks(), frame_encoded_callback));
task_runner_->RunTasks();
@@ -185,9 +200,11 @@ TEST_F(VideoEncoderImplTest,
video_encoder_->LatestFrameIdToReference(2);
for (uint32 frame_id = 3; frame_id < 10; ++frame_id) {
- testing_clock_->Advance(base::TimeDelta::FromMilliseconds(33));
+ AdvanceClockAndVideoFrameTimestamp();
test_video_encoder_callback_->SetExpectedResult(
- frame_id, 2, testing_clock_->NowTicks());
+ frame_id, 2,
+ TimeDeltaToRtpDelta(video_frame_->timestamp(), kVideoFrequency),
+ testing_clock_->NowTicks());
EXPECT_TRUE(video_encoder_->EncodeVideoFrame(
video_frame_, testing_clock_->NowTicks(), frame_encoded_callback));
task_runner_->RunTasks();
diff --git a/media/cast/sender/video_sender.cc b/media/cast/sender/video_sender.cc
index 784e8c6a34..16b7159bef 100644
--- a/media/cast/sender/video_sender.cc
+++ b/media/cast/sender/video_sender.cc
@@ -56,18 +56,18 @@ VideoSender::VideoSender(
video_config.max_frame_rate,
video_config.min_playout_delay,
video_config.max_playout_delay,
- NewFixedCongestionControl(
- (video_config.min_bitrate + video_config.max_bitrate) / 2)),
+ video_config.use_external_encoder ?
+ NewFixedCongestionControl(
+ (video_config.min_bitrate + video_config.max_bitrate) / 2) :
+ NewAdaptiveCongestionControl(cast_environment->Clock(),
+ video_config.max_bitrate,
+ video_config.min_bitrate,
+ video_config.max_frame_rate)),
frames_in_encoder_(0),
last_bitrate_(0),
playout_delay_change_cb_(playout_delay_change_cb),
weak_factory_(this) {
cast_initialization_status_ = STATUS_VIDEO_UNINITIALIZED;
- VLOG(1) << "max_unacked_frames is " << max_unacked_frames_
- << " for target_playout_delay="
- << target_playout_delay_.InMilliseconds() << " ms"
- << " and max_frame_rate=" << video_config.max_frame_rate;
- DCHECK_GT(max_unacked_frames_, 0);
if (video_config.use_external_encoder) {
video_encoder_.reset(new ExternalVideoEncoder(
@@ -79,13 +79,7 @@ VideoSender::VideoSender(
create_video_encode_mem_cb));
} else {
// Software encoder is initialized immediately.
- congestion_control_.reset(
- NewAdaptiveCongestionControl(cast_environment->Clock(),
- video_config.max_bitrate,
- video_config.min_bitrate,
- max_unacked_frames_));
- video_encoder_.reset(new VideoEncoderImpl(
- cast_environment, video_config, max_unacked_frames_));
+ video_encoder_.reset(new VideoEncoderImpl(cast_environment, video_config));
cast_initialization_status_ = STATUS_VIDEO_INITIALIZED;
}
@@ -116,7 +110,7 @@ VideoSender::~VideoSender() {
void VideoSender::InsertRawVideoFrame(
const scoped_refptr<media::VideoFrame>& video_frame,
- const base::TimeTicks& capture_time) {
+ const base::TimeTicks& reference_time) {
DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
if (cast_initialization_status_ != STATUS_VIDEO_INITIALIZED) {
NOTREACHED();
@@ -124,30 +118,35 @@ void VideoSender::InsertRawVideoFrame(
}
DCHECK(video_encoder_.get()) << "Invalid state";
- RtpTimestamp rtp_timestamp = GetVideoRtpTimestamp(capture_time);
+ const RtpTimestamp rtp_timestamp =
+ TimeDeltaToRtpDelta(video_frame->timestamp(), kVideoFrequency);
+ const base::TimeTicks insertion_time = cast_environment_->Clock()->NowTicks();
+ // TODO(miu): Plumb in capture timestamps. For now, make it look like capture
+ // took zero time by setting the BEGIN and END event to the same timestamp.
cast_environment_->Logging()->InsertFrameEvent(
- capture_time, FRAME_CAPTURE_BEGIN, VIDEO_EVENT,
- rtp_timestamp, kFrameIdUnknown);
+ insertion_time, FRAME_CAPTURE_BEGIN, VIDEO_EVENT, rtp_timestamp,
+ kFrameIdUnknown);
cast_environment_->Logging()->InsertFrameEvent(
- cast_environment_->Clock()->NowTicks(),
- FRAME_CAPTURE_END, VIDEO_EVENT,
- rtp_timestamp,
+ insertion_time, FRAME_CAPTURE_END, VIDEO_EVENT, rtp_timestamp,
kFrameIdUnknown);
// Used by chrome/browser/extension/api/cast_streaming/performance_test.cc
TRACE_EVENT_INSTANT2(
"cast_perf_test", "InsertRawVideoFrame",
TRACE_EVENT_SCOPE_THREAD,
- "timestamp", capture_time.ToInternalValue(),
+ "timestamp", reference_time.ToInternalValue(),
"rtp_timestamp", rtp_timestamp);
- // Drop the frame if its reference timestamp is not an increase over the last
- // frame's. This protects: 1) the duration calculations that assume
- // timestamps are monotonically non-decreasing, and 2) assumptions made deeper
- // in the implementation where each frame's RTP timestamp needs to be unique.
+ // Drop the frame if either its RTP or reference timestamp is not an increase
+ // over the last frame's. This protects: 1) the duration calculations that
+ // assume timestamps are monotonically non-decreasing, and 2) assumptions made
+ // deeper in the implementation where each frame's RTP timestamp needs to be
+ // unique.
if (!last_enqueued_frame_reference_time_.is_null() &&
- capture_time <= last_enqueued_frame_reference_time_) {
- VLOG(1) << "Dropping video frame: Reference time did not increase.";
+ (!IsNewerRtpTimestamp(rtp_timestamp,
+ last_enqueued_frame_rtp_timestamp_) ||
+ reference_time <= last_enqueued_frame_reference_time_)) {
+ VLOG(1) << "Dropping video frame: RTP or reference time did not increase.";
return;
}
@@ -157,7 +156,7 @@ void VideoSender::InsertRawVideoFrame(
// guess will be eliminated when |duration_in_encoder_| is updated in
// OnEncodedVideoFrame().
const base::TimeDelta duration_added_by_next_frame = frames_in_encoder_ > 0 ?
- capture_time - last_enqueued_frame_reference_time_ :
+ reference_time - last_enqueued_frame_reference_time_ :
base::TimeDelta::FromSecondsD(1.0 / max_frame_rate_);
if (ShouldDropNextFrame(duration_added_by_next_frame)) {
@@ -173,7 +172,7 @@ void VideoSender::InsertRawVideoFrame(
}
uint32 bitrate = congestion_control_->GetBitrate(
- capture_time + target_playout_delay_, target_playout_delay_);
+ reference_time + target_playout_delay_, target_playout_delay_);
if (bitrate != last_bitrate_) {
video_encoder_->SetBitRate(bitrate);
last_bitrate_ = bitrate;
@@ -181,13 +180,14 @@ void VideoSender::InsertRawVideoFrame(
if (video_encoder_->EncodeVideoFrame(
video_frame,
- capture_time,
+ reference_time,
base::Bind(&VideoSender::OnEncodedVideoFrame,
weak_factory_.GetWeakPtr(),
bitrate))) {
frames_in_encoder_++;
duration_in_encoder_ += duration_added_by_next_frame;
- last_enqueued_frame_reference_time_ = capture_time;
+ last_enqueued_frame_rtp_timestamp_ = rtp_timestamp;
+ last_enqueued_frame_reference_time_ = reference_time;
} else {
VLOG(1) << "Encoder rejected a frame. Skipping...";
}
diff --git a/media/cast/sender/video_sender.h b/media/cast/sender/video_sender.h
index e7658abeea..826099ecf4 100644
--- a/media/cast/sender/video_sender.h
+++ b/media/cast/sender/video_sender.h
@@ -45,7 +45,7 @@ class VideoSender : public FrameSender,
CastTransportSender* const transport_sender,
const PlayoutDelayChangeCB& playout_delay_change_cb);
- virtual ~VideoSender();
+ ~VideoSender() override;
// Note: It is not guaranteed that |video_frame| will actually be encoded and
// sent, if VideoSender detects too many frames in flight. Therefore, clients
@@ -54,12 +54,12 @@ class VideoSender : public FrameSender,
// Note: It is invalid to call this method if InitializationResult() returns
// anything but STATUS_VIDEO_INITIALIZED.
void InsertRawVideoFrame(const scoped_refptr<media::VideoFrame>& video_frame,
- const base::TimeTicks& capture_time);
+ const base::TimeTicks& reference_time);
protected:
- virtual int GetNumberOfFramesInEncoder() const OVERRIDE;
- virtual base::TimeDelta GetInFlightMediaDuration() const OVERRIDE;
- virtual void OnAck(uint32 frame_id) OVERRIDE;
+ int GetNumberOfFramesInEncoder() const override;
+ base::TimeDelta GetInFlightMediaDuration() const override;
+ void OnAck(uint32 frame_id) override;
private:
// Called when the encoder is initialized or has failed to initialize.
@@ -83,6 +83,7 @@ class VideoSender : public FrameSender,
base::TimeDelta duration_in_encoder_;
// The timestamp of the frame that was last enqueued in |video_encoder_|.
+ RtpTimestamp last_enqueued_frame_rtp_timestamp_;
base::TimeTicks last_enqueued_frame_reference_time_;
// Remember what we set the bitrate to before, no need to set it again if
diff --git a/media/cast/sender/video_sender_unittest.cc b/media/cast/sender/video_sender_unittest.cc
index be6594443c..2276e332b0 100644
--- a/media/cast/sender/video_sender_unittest.cc
+++ b/media/cast/sender/video_sender_unittest.cc
@@ -64,8 +64,7 @@ class TestPacketSender : public PacketSender {
paused_(false) {}
// A singular packet implies a RTCP packet.
- virtual bool SendPacket(PacketRef packet,
- const base::Closure& cb) OVERRIDE {
+ bool SendPacket(PacketRef packet, const base::Closure& cb) override {
if (paused_) {
stored_packet_ = packet;
callback_ = cb;
@@ -85,9 +84,7 @@ class TestPacketSender : public PacketSender {
return true;
}
- virtual int64 GetBytesSent() OVERRIDE {
- return 0;
- }
+ int64 GetBytesSent() override { return 0; }
int number_of_rtp_packets() const { return number_of_rtp_packets_; }
@@ -160,7 +157,7 @@ class VideoSenderTest : public ::testing::Test {
virtual ~VideoSenderTest() {}
- virtual void TearDown() OVERRIDE {
+ virtual void TearDown() override {
video_sender_.reset();
task_runner_->RunTasks();
}
@@ -221,19 +218,25 @@ class VideoSenderTest : public ::testing::Test {
}
scoped_refptr<media::VideoFrame> GetNewVideoFrame() {
+ if (first_frame_timestamp_.is_null())
+ first_frame_timestamp_ = testing_clock_->NowTicks();
gfx::Size size(kWidth, kHeight);
scoped_refptr<media::VideoFrame> video_frame =
media::VideoFrame::CreateFrame(
- VideoFrame::I420, size, gfx::Rect(size), size, base::TimeDelta());
+ VideoFrame::I420, size, gfx::Rect(size), size,
+ testing_clock_->NowTicks() - first_frame_timestamp_);
PopulateVideoFrame(video_frame.get(), last_pixel_value_++);
return video_frame;
}
scoped_refptr<media::VideoFrame> GetLargeNewVideoFrame() {
+ if (first_frame_timestamp_.is_null())
+ first_frame_timestamp_ = testing_clock_->NowTicks();
gfx::Size size(kWidth, kHeight);
scoped_refptr<media::VideoFrame> video_frame =
media::VideoFrame::CreateFrame(
- VideoFrame::I420, size, gfx::Rect(size), size, base::TimeDelta());
+ VideoFrame::I420, size, gfx::Rect(size), size,
+ testing_clock_->NowTicks() - first_frame_timestamp_);
PopulateVideoFrameWithNoise(video_frame.get());
return video_frame;
}
@@ -250,6 +253,7 @@ class VideoSenderTest : public ::testing::Test {
std::vector<uint32> stored_bitrates_;
scoped_refptr<CastEnvironment> cast_environment_;
int last_pixel_value_;
+ base::TimeTicks first_frame_timestamp_;
DISALLOW_COPY_AND_ASSIGN(VideoSenderTest);
};
@@ -258,8 +262,8 @@ TEST_F(VideoSenderTest, BuiltInEncoder) {
EXPECT_EQ(STATUS_VIDEO_INITIALIZED, InitEncoder(false, true));
scoped_refptr<media::VideoFrame> video_frame = GetNewVideoFrame();
- const base::TimeTicks capture_time = testing_clock_->NowTicks();
- video_sender_->InsertRawVideoFrame(video_frame, capture_time);
+ const base::TimeTicks reference_time = testing_clock_->NowTicks();
+ video_sender_->InsertRawVideoFrame(video_frame, reference_time);
task_runner_->RunTasks();
EXPECT_LE(1, transport_.number_of_rtp_packets());
@@ -271,12 +275,12 @@ TEST_F(VideoSenderTest, ExternalEncoder) {
scoped_refptr<media::VideoFrame> video_frame = GetNewVideoFrame();
- const base::TimeTicks capture_time = testing_clock_->NowTicks();
- video_sender_->InsertRawVideoFrame(video_frame, capture_time);
+ const base::TimeTicks reference_time = testing_clock_->NowTicks();
+ video_sender_->InsertRawVideoFrame(video_frame, reference_time);
task_runner_->RunTasks();
- video_sender_->InsertRawVideoFrame(video_frame, capture_time);
+ video_sender_->InsertRawVideoFrame(video_frame, reference_time);
task_runner_->RunTasks();
- video_sender_->InsertRawVideoFrame(video_frame, capture_time);
+ video_sender_->InsertRawVideoFrame(video_frame, reference_time);
task_runner_->RunTasks();
// Fixed bitrate is used for external encoder. Bitrate is only once
@@ -298,8 +302,8 @@ TEST_F(VideoSenderTest, RtcpTimer) {
scoped_refptr<media::VideoFrame> video_frame = GetNewVideoFrame();
- const base::TimeTicks capture_time = testing_clock_->NowTicks();
- video_sender_->InsertRawVideoFrame(video_frame, capture_time);
+ const base::TimeTicks reference_time = testing_clock_->NowTicks();
+ video_sender_->InsertRawVideoFrame(video_frame, reference_time);
// Make sure that we send at least one RTCP packet.
base::TimeDelta max_rtcp_timeout =
@@ -322,8 +326,8 @@ TEST_F(VideoSenderTest, ResendTimer) {
scoped_refptr<media::VideoFrame> video_frame = GetNewVideoFrame();
- const base::TimeTicks capture_time = testing_clock_->NowTicks();
- video_sender_->InsertRawVideoFrame(video_frame, capture_time);
+ const base::TimeTicks reference_time = testing_clock_->NowTicks();
+ video_sender_->InsertRawVideoFrame(video_frame, reference_time);
// ACK the key frame.
RtcpCastMessage cast_feedback(1);
@@ -332,7 +336,7 @@ TEST_F(VideoSenderTest, ResendTimer) {
video_sender_->OnReceivedCastFeedback(cast_feedback);
video_frame = GetNewVideoFrame();
- video_sender_->InsertRawVideoFrame(video_frame, capture_time);
+ video_sender_->InsertRawVideoFrame(video_frame, reference_time);
base::TimeDelta max_resend_timeout =
base::TimeDelta::FromMilliseconds(1 + kDefaultRtpMaxDelayMs);
@@ -354,8 +358,8 @@ TEST_F(VideoSenderTest, LogAckReceivedEvent) {
for (int i = 0; i < num_frames; i++) {
scoped_refptr<media::VideoFrame> video_frame = GetNewVideoFrame();
- const base::TimeTicks capture_time = testing_clock_->NowTicks();
- video_sender_->InsertRawVideoFrame(video_frame, capture_time);
+ const base::TimeTicks reference_time = testing_clock_->NowTicks();
+ video_sender_->InsertRawVideoFrame(video_frame, reference_time);
RunTasks(33);
}
diff --git a/media/cast/sender/vp8_encoder.cc b/media/cast/sender/vp8_encoder.cc
index 918b1f4054..bf430c1869 100644
--- a/media/cast/sender/vp8_encoder.cc
+++ b/media/cast/sender/vp8_encoder.cc
@@ -4,8 +4,6 @@
#include "media/cast/sender/vp8_encoder.h"
-#include <vector>
-
#include "base/logging.h"
#include "media/base/video_frame.h"
#include "media/cast/cast_defines.h"
@@ -15,20 +13,28 @@
namespace media {
namespace cast {
-static const uint32 kMinIntra = 300;
+namespace {
+
+// After a pause in the video stream, what is the maximum duration amount to
+// pass to the encoder for the next frame (in terms of 1/max_fps sized periods)?
+// This essentially controls the encoded size of the first frame that follows a
+// pause in the video stream.
+const int kRestartFramePeriods = 3;
-Vp8Encoder::Vp8Encoder(const VideoSenderConfig& video_config,
- int max_unacked_frames)
+} // namespace
+
+Vp8Encoder::Vp8Encoder(const VideoSenderConfig& video_config)
: cast_config_(video_config),
use_multiple_video_buffers_(
cast_config_.max_number_of_video_buffers_used ==
kNumberOfVp8VideoBuffers),
+ raw_image_(nullptr),
key_frame_requested_(true),
- first_frame_received_(false),
last_encoded_frame_id_(kStartFrameId),
last_acked_frame_id_(kStartFrameId),
- frame_id_to_reference_(kStartFrameId - 1),
undroppable_frames_(0) {
+ config_.g_timebase.den = 0; // Not initialized.
+
// VP8 have 3 buffers available for prediction, with
// max_number_of_video_buffers_used set to 1 we maximize the coding efficiency
// however in this mode we can not skip frames in the receiver to catch up
@@ -44,14 +50,15 @@ Vp8Encoder::Vp8Encoder(const VideoSenderConfig& video_config,
}
Vp8Encoder::~Vp8Encoder() {
- vpx_codec_destroy(encoder_.get());
+ DCHECK(thread_checker_.CalledOnValidThread());
+ if (is_initialized())
+ vpx_codec_destroy(&encoder_);
vpx_img_free(raw_image_);
}
void Vp8Encoder::Initialize() {
DCHECK(thread_checker_.CalledOnValidThread());
- config_.reset(new vpx_codec_enc_cfg_t());
- encoder_.reset(new vpx_codec_ctx_t());
+ DCHECK(!is_initialized());
// Creating a wrapper to the image - setting image data to NULL. Actual
// pointer will be set during encode. Setting align to 1, as it is
@@ -63,64 +70,77 @@ void Vp8Encoder::Initialize() {
buffer_state_[i].frame_id = kStartFrameId;
buffer_state_[i].state = kBufferStartState;
}
- InitEncode(cast_config_.number_of_encode_threads);
-}
-void Vp8Encoder::InitEncode(int number_of_encode_threads) {
- DCHECK(thread_checker_.CalledOnValidThread());
// Populate encoder configuration with default values.
- if (vpx_codec_enc_config_default(vpx_codec_vp8_cx(), config_.get(), 0)) {
- DCHECK(false) << "Invalid return value";
+ if (vpx_codec_enc_config_default(vpx_codec_vp8_cx(), &config_, 0)) {
+ NOTREACHED() << "Invalid return value";
+ config_.g_timebase.den = 0; // Do not call vpx_codec_destroy() in dtor.
+ return;
}
- config_->g_w = cast_config_.width;
- config_->g_h = cast_config_.height;
- config_->rc_target_bitrate = cast_config_.start_bitrate / 1000; // In kbit/s.
-
- // Setting the codec time base.
- config_->g_timebase.num = 1;
- config_->g_timebase.den = kVideoFrequency;
- config_->g_lag_in_frames = 0;
- config_->kf_mode = VPX_KF_DISABLED;
+
+ config_.g_threads = cast_config_.number_of_encode_threads;
+ config_.g_w = cast_config_.width;
+ config_.g_h = cast_config_.height;
+ // Set the timebase to match that of base::TimeDelta.
+ config_.g_timebase.num = 1;
+ config_.g_timebase.den = base::Time::kMicrosecondsPerSecond;
if (use_multiple_video_buffers_) {
// We must enable error resilience when we use multiple buffers, due to
// codec requirements.
- config_->g_error_resilient = 1;
+ config_.g_error_resilient = 1;
}
- config_->g_threads = number_of_encode_threads;
+ config_.g_pass = VPX_RC_ONE_PASS;
+ config_.g_lag_in_frames = 0; // Immediate data output for each frame.
// Rate control settings.
- // Never allow the encoder to drop frame internally.
- config_->rc_dropframe_thresh = 0;
- config_->rc_end_usage = VPX_CBR;
- config_->g_pass = VPX_RC_ONE_PASS;
- config_->rc_resize_allowed = 0;
- config_->rc_min_quantizer = cast_config_.min_qp;
- config_->rc_max_quantizer = cast_config_.max_qp;
- config_->rc_undershoot_pct = 100;
- config_->rc_overshoot_pct = 15;
- config_->rc_buf_initial_sz = 500;
- config_->rc_buf_optimal_sz = 600;
- config_->rc_buf_sz = 1000;
-
- // set the maximum target size of any key-frame.
- uint32 rc_max_intra_target = MaxIntraTarget(config_->rc_buf_optimal_sz);
+ config_.rc_dropframe_thresh = 0; // The encoder may not drop any frames.
+ config_.rc_resize_allowed = 0; // TODO(miu): Why not? Investigate this.
+ config_.rc_end_usage = VPX_CBR;
+ config_.rc_target_bitrate = cast_config_.start_bitrate / 1000; // In kbit/s.
+ config_.rc_min_quantizer = cast_config_.min_qp;
+ config_.rc_max_quantizer = cast_config_.max_qp;
+ // TODO(miu): Revisit these now that the encoder is being successfully
+ // micro-managed.
+ config_.rc_undershoot_pct = 100;
+ config_.rc_overshoot_pct = 15;
+ // TODO(miu): Document why these rc_buf_*_sz values were chosen and/or
+ // research for better values. Should they be computed from the target
+ // playout delay?
+ config_.rc_buf_initial_sz = 500;
+ config_.rc_buf_optimal_sz = 600;
+ config_.rc_buf_sz = 1000;
+
+ config_.kf_mode = VPX_KF_DISABLED;
+
vpx_codec_flags_t flags = 0;
- if (vpx_codec_enc_init(
- encoder_.get(), vpx_codec_vp8_cx(), config_.get(), flags)) {
- DCHECK(false) << "vpx_codec_enc_init() failed.";
- encoder_.reset();
+ if (vpx_codec_enc_init(&encoder_, vpx_codec_vp8_cx(), &config_, flags)) {
+ NOTREACHED() << "vpx_codec_enc_init() failed.";
+ config_.g_timebase.den = 0; // Do not call vpx_codec_destroy() in dtor.
return;
}
- vpx_codec_control(encoder_.get(), VP8E_SET_STATIC_THRESHOLD, 1);
- vpx_codec_control(encoder_.get(), VP8E_SET_NOISE_SENSITIVITY, 0);
- vpx_codec_control(encoder_.get(), VP8E_SET_CPUUSED, -6);
- vpx_codec_control(
- encoder_.get(), VP8E_SET_MAX_INTRA_BITRATE_PCT, rc_max_intra_target);
+
+ // Raise the threshold for considering macroblocks as static. The default is
+ // zero, so this setting makes the encoder less sensitive to motion. This
+ // lowers the probability of needing to utilize more CPU to search for motion
+ // vectors.
+ vpx_codec_control(&encoder_, VP8E_SET_STATIC_THRESHOLD, 1);
+
+ // Improve quality by enabling sets of codec features that utilize more CPU.
+ // The default is zero, with increasingly more CPU to be used as the value is
+ // more negative.
+ // TODO(miu): Document why this value was chosen and expected behaviors.
+ // Should this be dynamic w.r.t. hardware performance?
+ vpx_codec_control(&encoder_, VP8E_SET_CPUUSED, -6);
}
-bool Vp8Encoder::Encode(const scoped_refptr<media::VideoFrame>& video_frame,
- EncodedFrame* encoded_image) {
+void Vp8Encoder::Encode(const scoped_refptr<media::VideoFrame>& video_frame,
+ const base::TimeTicks& reference_time,
+ EncodedFrame* encoded_frame) {
DCHECK(thread_checker_.CalledOnValidThread());
+ DCHECK(encoded_frame);
+
+ CHECK(is_initialized()); // No illegal reference to |config_| or |encoder_|.
+
// Image in vpx_image_t format.
// Input image is const. VP8's raw image is not defined as const.
raw_image_->planes[VPX_PLANE_Y] =
@@ -151,79 +171,83 @@ bool Vp8Encoder::Encode(const scoped_refptr<media::VideoFrame>& video_frame,
GetCodecUpdateFlags(buffer_to_update, &flags);
}
- // Note: The duration does not reflect the real time between frames. This is
- // done to keep the encoder happy.
- //
- // TODO(miu): This is a semi-hack. We should consider using
- // |video_frame->timestamp()| instead.
- uint32 duration = kVideoFrequency / cast_config_.max_frame_rate;
-
- // Note: Timestamp here is used for bitrate calculation. The absolute value
- // is not important.
- if (!first_frame_received_) {
- first_frame_received_ = true;
- first_frame_timestamp_ = video_frame->timestamp();
- }
-
- vpx_codec_pts_t timestamp =
- (video_frame->timestamp() - first_frame_timestamp_).InMicroseconds() *
- kVideoFrequency / base::Time::kMicrosecondsPerSecond;
-
- if (vpx_codec_encode(encoder_.get(),
- raw_image_,
- timestamp,
- duration,
- flags,
- VPX_DL_REALTIME) != VPX_CODEC_OK) {
- LOG(ERROR) << "Failed to encode for once.";
- return false;
- }
-
- // Get encoded frame.
+ // The frame duration given to the VP8 codec affects a number of important
+ // behaviors, including: per-frame bandwidth, CPU time spent encoding,
+ // temporal quality trade-offs, and key/golden/alt-ref frame generation
+ // intervals. Use the actual amount of time between the current and previous
+ // frames as a prediction for the next frame's duration, but bound the
+ // prediction to account for the fact that the frame rate can be highly
+ // variable, including long pauses in the video stream.
+ const base::TimeDelta minimum_frame_duration =
+ base::TimeDelta::FromSecondsD(1.0 / cast_config_.max_frame_rate);
+ const base::TimeDelta maximum_frame_duration =
+ base::TimeDelta::FromSecondsD(static_cast<double>(kRestartFramePeriods) /
+ cast_config_.max_frame_rate);
+ const base::TimeDelta last_frame_duration =
+ video_frame->timestamp() - last_frame_timestamp_;
+ const base::TimeDelta predicted_frame_duration =
+ std::max(minimum_frame_duration,
+ std::min(maximum_frame_duration, last_frame_duration));
+ last_frame_timestamp_ = video_frame->timestamp();
+
+ // Encode the frame. The presentation time stamp argument here is fixed to
+ // zero to force the encoder to base its single-frame bandwidth calculations
+ // entirely on |predicted_frame_duration| and the target bitrate setting being
+ // micro-managed via calls to UpdateRates().
+ CHECK_EQ(vpx_codec_encode(&encoder_,
+ raw_image_,
+ 0,
+ predicted_frame_duration.InMicroseconds(),
+ flags,
+ VPX_DL_REALTIME),
+ VPX_CODEC_OK)
+ << "BUG: Invalid arguments passed to vpx_codec_encode().";
+
+ // Pull data from the encoder, populating a new EncodedFrame.
+ encoded_frame->frame_id = ++last_encoded_frame_id_;
const vpx_codec_cx_pkt_t* pkt = NULL;
vpx_codec_iter_t iter = NULL;
- bool is_key_frame = false;
- while ((pkt = vpx_codec_get_cx_data(encoder_.get(), &iter)) != NULL) {
+ while ((pkt = vpx_codec_get_cx_data(&encoder_, &iter)) != NULL) {
if (pkt->kind != VPX_CODEC_CX_FRAME_PKT)
continue;
- encoded_image->data.assign(
+ if (pkt->data.frame.flags & VPX_FRAME_IS_KEY) {
+ // TODO(hubbe): Replace "dependency" with a "bool is_key_frame".
+ encoded_frame->dependency = EncodedFrame::KEY;
+ encoded_frame->referenced_frame_id = encoded_frame->frame_id;
+ } else {
+ encoded_frame->dependency = EncodedFrame::DEPENDENT;
+ // Frame dependencies could theoretically be relaxed by looking for the
+ // VPX_FRAME_IS_DROPPABLE flag, but in recent testing (Oct 2014), this
+ // flag never seems to be set.
+ encoded_frame->referenced_frame_id = latest_frame_id_to_reference;
+ }
+ encoded_frame->rtp_timestamp =
+ TimeDeltaToRtpDelta(video_frame->timestamp(), kVideoFrequency);
+ encoded_frame->reference_time = reference_time;
+ encoded_frame->data.assign(
static_cast<const uint8*>(pkt->data.frame.buf),
static_cast<const uint8*>(pkt->data.frame.buf) + pkt->data.frame.sz);
- is_key_frame = !!(pkt->data.frame.flags & VPX_FRAME_IS_KEY);
break; // Done, since all data is provided in one CX_FRAME_PKT packet.
}
- // Don't update frame_id for zero size frames.
- if (encoded_image->data.empty())
- return true;
-
- // Populate the encoded frame.
- encoded_image->frame_id = ++last_encoded_frame_id_;
- if (is_key_frame) {
- // TODO(Hubbe): Replace "dependency" with a "bool is_key_frame".
- encoded_image->dependency = EncodedFrame::KEY;
- encoded_image->referenced_frame_id = encoded_image->frame_id;
- } else {
- encoded_image->dependency = EncodedFrame::DEPENDENT;
- encoded_image->referenced_frame_id = latest_frame_id_to_reference;
- }
+ DCHECK(!encoded_frame->data.empty())
+ << "BUG: Encoder must provide data since lagged encoding is disabled.";
- DVLOG(1) << "VP8 encoded frame_id " << encoded_image->frame_id
- << ", sized:" << encoded_image->data.size();
+ DVLOG(2) << "VP8 encoded frame_id " << encoded_frame->frame_id
+ << ", sized:" << encoded_frame->data.size();
- if (is_key_frame) {
+ if (encoded_frame->dependency == EncodedFrame::KEY) {
key_frame_requested_ = false;
for (int i = 0; i < kNumberOfVp8VideoBuffers; ++i) {
buffer_state_[i].state = kBufferSent;
- buffer_state_[i].frame_id = encoded_image->frame_id;
+ buffer_state_[i].frame_id = encoded_frame->frame_id;
}
} else {
if (buffer_to_update != kNoBuffer) {
buffer_state_[buffer_to_update].state = kBufferSent;
- buffer_state_[buffer_to_update].frame_id = encoded_image->frame_id;
+ buffer_state_[buffer_to_update].frame_id = encoded_frame->frame_id;
}
}
- return true;
}
uint32 Vp8Encoder::GetCodecReferenceFlags(vpx_codec_flags_t* flags) {
@@ -370,16 +394,22 @@ void Vp8Encoder::GetCodecUpdateFlags(Vp8Buffers buffer_to_update,
void Vp8Encoder::UpdateRates(uint32 new_bitrate) {
DCHECK(thread_checker_.CalledOnValidThread());
+
+ if (!is_initialized())
+ return;
+
uint32 new_bitrate_kbit = new_bitrate / 1000;
- if (config_->rc_target_bitrate == new_bitrate_kbit)
+ if (config_.rc_target_bitrate == new_bitrate_kbit)
return;
- config_->rc_target_bitrate = new_bitrate_kbit;
+ config_.rc_target_bitrate = new_bitrate_kbit;
// Update encoder context.
- if (vpx_codec_enc_config_set(encoder_.get(), config_.get())) {
- DCHECK(false) << "Invalid return value";
+ if (vpx_codec_enc_config_set(&encoder_, &config_)) {
+ NOTREACHED() << "Invalid return value";
}
+
+ VLOG(1) << "VP8 new rc_target_bitrate: " << new_bitrate_kbit << " kbps";
}
void Vp8Encoder::LatestFrameIdToReference(uint32 frame_id) {
@@ -387,7 +417,7 @@ void Vp8Encoder::LatestFrameIdToReference(uint32 frame_id) {
if (!use_multiple_video_buffers_)
return;
- VLOG(1) << "VP8 ok to reference frame:" << static_cast<int>(frame_id);
+ VLOG(2) << "VP8 ok to reference frame:" << static_cast<int>(frame_id);
for (int i = 0; i < kNumberOfVp8VideoBuffers; ++i) {
if (frame_id == buffer_state_[i].frame_id) {
buffer_state_[i].state = kBufferAcked;
@@ -404,22 +434,5 @@ void Vp8Encoder::GenerateKeyFrame() {
key_frame_requested_ = true;
}
-// Calculate the max size of the key frame relative to a normal delta frame.
-uint32 Vp8Encoder::MaxIntraTarget(uint32 optimal_buffer_size_ms) const {
- // Set max to the optimal buffer level (normalized by target BR),
- // and scaled by a scale_parameter.
- // Max target size = scalePar * optimalBufferSize * targetBR[Kbps].
- // This values is presented in percentage of perFrameBw:
- // perFrameBw = targetBR[Kbps] * 1000 / frameRate.
- // The target in % is as follows:
-
- float scale_parameter = 0.5;
- uint32 target_pct = optimal_buffer_size_ms * scale_parameter *
- cast_config_.max_frame_rate / 10;
-
- // Don't go below 3 times the per frame bandwidth.
- return std::max(target_pct, kMinIntra);
-}
-
} // namespace cast
} // namespace media
diff --git a/media/cast/sender/vp8_encoder.h b/media/cast/sender/vp8_encoder.h
index abe5eebb88..387dbf27a4 100644
--- a/media/cast/sender/vp8_encoder.h
+++ b/media/cast/sender/vp8_encoder.h
@@ -8,7 +8,6 @@
#include "base/basictypes.h"
#include "base/memory/scoped_ptr.h"
#include "base/threading/thread_checker.h"
-#include "base/time/time.h"
#include "media/cast/cast_config.h"
#include "media/cast/sender/software_video_encoder.h"
#include "third_party/libvpx/source/libvpx/vpx/vpx_encoder.h"
@@ -17,37 +16,27 @@ namespace media {
class VideoFrame;
}
-// VPX forward declaration.
-typedef struct vpx_codec_ctx vpx_enc_ctx_t;
-
namespace media {
namespace cast {
-const int kNumberOfVp8VideoBuffers = 3;
-
class Vp8Encoder : public SoftwareVideoEncoder {
public:
- Vp8Encoder(const VideoSenderConfig& video_config, int max_unacked_frames);
-
- virtual ~Vp8Encoder();
-
- // Initialize the encoder before Encode() can be called. This method
- // must be called on the thread that Encode() is called.
- virtual void Initialize() OVERRIDE;
+ explicit Vp8Encoder(const VideoSenderConfig& video_config);
- // Encode a raw image (as a part of a video stream).
- virtual bool Encode(const scoped_refptr<media::VideoFrame>& video_frame,
- EncodedFrame* encoded_image) OVERRIDE;
+ ~Vp8Encoder() override;
- // Update the encoder with a new target bit rate.
- virtual void UpdateRates(uint32 new_bitrate) OVERRIDE;
-
- // Set the next frame to be a key frame.
- virtual void GenerateKeyFrame() OVERRIDE;
-
- virtual void LatestFrameIdToReference(uint32 frame_id) OVERRIDE;
+ // SoftwareVideoEncoder implementations.
+ void Initialize() override;
+ void Encode(const scoped_refptr<media::VideoFrame>& video_frame,
+ const base::TimeTicks& reference_time,
+ EncodedFrame* encoded_frame) override;
+ void UpdateRates(uint32 new_bitrate) override;
+ void GenerateKeyFrame() override;
+ void LatestFrameIdToReference(uint32 frame_id) override;
private:
+ enum { kNumberOfVp8VideoBuffers = 3 };
+
enum Vp8Buffers {
kAltRefBuffer = 0,
kGoldenBuffer = 1,
@@ -60,15 +49,17 @@ class Vp8Encoder : public SoftwareVideoEncoder {
kBufferSent,
kBufferAcked
};
+
struct BufferState {
uint32 frame_id;
Vp8BufferState state;
};
- void InitEncode(int number_of_cores);
-
- // Calculate the max target in % for a keyframe.
- uint32 MaxIntraTarget(uint32 optimal_buffer_size) const;
+ bool is_initialized() const {
+ // Initialize() sets the timebase denominator value to non-zero if the
+ // encoder is successfully initialized, and it is zero otherwise.
+ return config_.g_timebase.den != 0;
+ }
// Calculate which next Vp8 buffers to update with the next frame.
Vp8Buffers GetNextBufferToUpdate();
@@ -84,18 +75,32 @@ class Vp8Encoder : public SoftwareVideoEncoder {
const VideoSenderConfig cast_config_;
const bool use_multiple_video_buffers_;
- // VP8 internal objects.
- scoped_ptr<vpx_codec_enc_cfg_t> config_;
- scoped_ptr<vpx_enc_ctx_t> encoder_;
+ // VP8 internal objects. These are valid for use only while is_initialized()
+ // returns true.
+ vpx_codec_enc_cfg_t config_;
+ vpx_codec_ctx_t encoder_;
+
+ // Wrapper for access to YUV data planes in a media::VideoFrame.
vpx_image_t* raw_image_;
+ // Set to true to request the next frame emitted by Vp8Encoder be a key frame.
bool key_frame_requested_;
- bool first_frame_received_;
- base::TimeDelta first_frame_timestamp_;
+
+ // The |VideoFrame::timestamp()| of the last encoded frame. This is used to
+ // predict the duration of the next frame.
+ base::TimeDelta last_frame_timestamp_;
+
+ // The last encoded frame's ID.
uint32 last_encoded_frame_id_;
+
+ // Used to track which buffers are old enough to be re-used.
uint32 last_acked_frame_id_;
- uint32 frame_id_to_reference_;
- uint32 undroppable_frames_;
+
+ // Used by GetNextBufferToUpdate() to track how many consecutive times the
+ // newest buffer had to be overwritten.
+ int undroppable_frames_;
+
+ // Tracks the lifecycle and dependency state of each of the three buffers.
BufferState buffer_state_[kNumberOfVp8VideoBuffers];
// This is bound to the thread where Initialize() is called.
diff --git a/media/cast/test/cast_benchmarks.cc b/media/cast/test/cast_benchmarks.cc
index 880f4ada8d..bfa12706fd 100644
--- a/media/cast/test/cast_benchmarks.cc
+++ b/media/cast/test/cast_benchmarks.cc
@@ -108,24 +108,21 @@ class CastTransportSenderWrapper : public CastTransportSender {
encoded_audio_bytes_ = encoded_audio_bytes;
}
- virtual void InitializeAudio(
- const CastTransportRtpConfig& config,
- const RtcpCastMessageCallback& cast_message_cb,
- const RtcpRttCallback& rtt_cb) OVERRIDE {
+ void InitializeAudio(const CastTransportRtpConfig& config,
+ const RtcpCastMessageCallback& cast_message_cb,
+ const RtcpRttCallback& rtt_cb) override {
audio_ssrc_ = config.ssrc;
transport_->InitializeAudio(config, cast_message_cb, rtt_cb);
}
- virtual void InitializeVideo(
- const CastTransportRtpConfig& config,
- const RtcpCastMessageCallback& cast_message_cb,
- const RtcpRttCallback& rtt_cb) OVERRIDE {
+ void InitializeVideo(const CastTransportRtpConfig& config,
+ const RtcpCastMessageCallback& cast_message_cb,
+ const RtcpRttCallback& rtt_cb) override {
video_ssrc_ = config.ssrc;
transport_->InitializeVideo(config, cast_message_cb, rtt_cb);
}
- virtual void InsertFrame(uint32 ssrc,
- const EncodedFrame& frame) OVERRIDE {
+ void InsertFrame(uint32 ssrc, const EncodedFrame& frame) override {
if (ssrc == audio_ssrc_) {
*encoded_audio_bytes_ += frame.data.size();
} else if (ssrc == video_ssrc_) {
@@ -134,27 +131,24 @@ class CastTransportSenderWrapper : public CastTransportSender {
transport_->InsertFrame(ssrc, frame);
}
- virtual void SendSenderReport(
- uint32 ssrc,
- base::TimeTicks current_time,
- uint32 current_time_as_rtp_timestamp) OVERRIDE {
+ void SendSenderReport(uint32 ssrc,
+ base::TimeTicks current_time,
+ uint32 current_time_as_rtp_timestamp) override {
transport_->SendSenderReport(ssrc,
current_time,
current_time_as_rtp_timestamp);
}
- virtual void CancelSendingFrames(
- uint32 ssrc,
- const std::vector<uint32>& frame_ids) OVERRIDE {
+ void CancelSendingFrames(uint32 ssrc,
+ const std::vector<uint32>& frame_ids) override {
transport_->CancelSendingFrames(ssrc, frame_ids);
}
- virtual void ResendFrameForKickstart(uint32 ssrc,
- uint32 frame_id) OVERRIDE {
+ void ResendFrameForKickstart(uint32 ssrc, uint32 frame_id) override {
transport_->ResendFrameForKickstart(ssrc, frame_id);
}
- virtual PacketReceiverCallback PacketReceiverForTesting() OVERRIDE {
+ PacketReceiverCallback PacketReceiverForTesting() override {
return transport_->PacketReceiverForTesting();
}
diff --git a/media/cast/test/end2end_unittest.cc b/media/cast/test/end2end_unittest.cc
index a80912a900..7489927cd8 100644
--- a/media/cast/test/end2end_unittest.cc
+++ b/media/cast/test/end2end_unittest.cc
@@ -171,10 +171,10 @@ class LoopBackPacketPipe : public test::PacketPipe {
LoopBackPacketPipe(const PacketReceiverCallback& packet_receiver)
: packet_receiver_(packet_receiver) {}
- virtual ~LoopBackPacketPipe() {}
+ ~LoopBackPacketPipe() override {}
// PacketPipe implementations.
- virtual void Send(scoped_ptr<Packet> packet) OVERRIDE {
+ void Send(scoped_ptr<Packet> packet) override {
packet_receiver_.Run(packet.Pass());
}
@@ -206,8 +206,7 @@ class LoopBackTransport : public PacketSender {
packet_pipe_->InitOnIOThread(task_runner, clock);
}
- virtual bool SendPacket(PacketRef packet,
- const base::Closure& cb) OVERRIDE {
+ bool SendPacket(PacketRef packet, const base::Closure& cb) override {
DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
if (!send_packets_)
return true;
@@ -224,9 +223,7 @@ class LoopBackTransport : public PacketSender {
return true;
}
- virtual int64 GetBytesSent() OVERRIDE {
- return bytes_sent_;
- }
+ int64 GetBytesSent() override { return bytes_sent_; }
void SetSendPackets(bool send_packets) { send_packets_ = send_packets; }
@@ -547,16 +544,16 @@ class End2EndTest : public ::testing::Test {
for (int i = 0; i < count; ++i) {
scoped_ptr<AudioBus> audio_bus(audio_bus_factory_->NextAudioBus(
base::TimeDelta::FromMilliseconds(kAudioFrameDurationMs)));
- const base::TimeTicks capture_time =
+ const base::TimeTicks reference_time =
testing_clock_sender_->NowTicks() +
i * base::TimeDelta::FromMilliseconds(kAudioFrameDurationMs);
if (will_be_checked) {
test_receiver_audio_callback_->AddExpectedResult(
*audio_bus,
- capture_time +
+ reference_time +
base::TimeDelta::FromMilliseconds(kTargetPlayoutDelayMs));
}
- audio_frame_input_->InsertAudio(audio_bus.Pass(), capture_time);
+ audio_frame_input_->InsertAudio(audio_bus.Pass(), reference_time);
}
}
@@ -565,14 +562,14 @@ class End2EndTest : public ::testing::Test {
for (int i = 0; i < count; ++i) {
scoped_ptr<AudioBus> audio_bus(audio_bus_factory_->NextAudioBus(
base::TimeDelta::FromMilliseconds(kAudioFrameDurationMs)));
- const base::TimeTicks capture_time =
+ const base::TimeTicks reference_time =
testing_clock_sender_->NowTicks() +
i * base::TimeDelta::FromMilliseconds(kAudioFrameDurationMs);
test_receiver_audio_callback_->AddExpectedResult(
*audio_bus,
- capture_time + delay +
+ reference_time + delay +
base::TimeDelta::FromMilliseconds(kTargetPlayoutDelayMs));
- audio_frame_input_->InsertAudio(audio_bus.Pass(), capture_time);
+ audio_frame_input_->InsertAudio(audio_bus.Pass(), reference_time);
}
}
@@ -638,29 +635,38 @@ class End2EndTest : public ::testing::Test {
&event_subscriber_sender_);
}
- virtual void TearDown() OVERRIDE {
+ virtual void TearDown() override {
cast_sender_.reset();
cast_receiver_.reset();
task_runner_->RunTasks();
}
- void SendVideoFrame(int start_value, const base::TimeTicks& capture_time) {
+ void SendVideoFrame(int start_value, const base::TimeTicks& reference_time) {
if (start_time_.is_null())
- start_time_ = capture_time;
- base::TimeDelta time_diff = capture_time - start_time_;
+ start_time_ = reference_time;
+ // TODO(miu): Consider using a slightly skewed clock for the media timestamp
+ // since the video clock may not be the same as the reference clock.
+ const base::TimeDelta time_diff = reference_time - start_time_;
gfx::Size size(video_sender_config_.width, video_sender_config_.height);
EXPECT_TRUE(VideoFrame::IsValidConfig(
VideoFrame::I420, size, gfx::Rect(size), size));
scoped_refptr<media::VideoFrame> video_frame =
media::VideoFrame::CreateFrame(
- VideoFrame::I420, size, gfx::Rect(size), size, time_diff);
+ VideoFrame::I420, size, gfx::Rect(size), size,
+ time_diff);
PopulateVideoFrame(video_frame.get(), start_value);
- video_frame_input_->InsertRawVideoFrame(video_frame, capture_time);
+ video_frame_input_->InsertRawVideoFrame(video_frame, reference_time);
}
- void SendFakeVideoFrame(const base::TimeTicks& capture_time) {
- video_frame_input_->InsertRawVideoFrame(
- media::VideoFrame::CreateBlackFrame(gfx::Size(2, 2)), capture_time);
+ void SendFakeVideoFrame(const base::TimeTicks& reference_time) {
+ if (start_time_.is_null())
+ start_time_ = reference_time;
+ const scoped_refptr<media::VideoFrame> black_frame =
+ media::VideoFrame::CreateBlackFrame(gfx::Size(2, 2));
+ // TODO(miu): Consider using a slightly skewed clock for the media timestamp
+ // since the video clock may not be the same as the reference clock.
+ black_frame->set_timestamp(reference_time - start_time_);
+ video_frame_input_->InsertRawVideoFrame(black_frame, reference_time);
}
void RunTasks(int ms) {
@@ -1004,20 +1010,19 @@ TEST_F(End2EndTest, DropEveryOtherFrame3Buffers) {
sender_to_receiver_.DropAllPacketsBelongingToOddFrames();
int video_start = kVideoStart;
- base::TimeTicks capture_time;
+ base::TimeTicks reference_time;
int i = 0;
for (; i < 20; ++i) {
- capture_time = testing_clock_sender_->NowTicks();
- SendVideoFrame(video_start, capture_time);
+ reference_time = testing_clock_sender_->NowTicks();
+ SendVideoFrame(video_start, reference_time);
if (i % 2 == 0) {
test_receiver_video_callback_->AddExpectedResult(
video_start,
video_sender_config_.width,
video_sender_config_.height,
- capture_time +
- base::TimeDelta::FromMilliseconds(target_delay),
+ reference_time + base::TimeDelta::FromMilliseconds(target_delay),
i == 0);
// GetRawVideoFrame will not return the frame until we are close in
@@ -1051,14 +1056,15 @@ TEST_F(End2EndTest, CryptoVideo) {
int frames_counter = 0;
for (; frames_counter < 3; ++frames_counter) {
- const base::TimeTicks capture_time = testing_clock_sender_->NowTicks();
- SendVideoFrame(frames_counter, capture_time);
+ const base::TimeTicks reference_time = testing_clock_sender_->NowTicks();
+ SendVideoFrame(frames_counter, reference_time);
test_receiver_video_callback_->AddExpectedResult(
frames_counter,
video_sender_config_.width,
video_sender_config_.height,
- capture_time + base::TimeDelta::FromMilliseconds(kTargetPlayoutDelayMs),
+ reference_time +
+ base::TimeDelta::FromMilliseconds(kTargetPlayoutDelayMs),
true);
RunTasks(kFrameTimerMs);
@@ -1108,15 +1114,16 @@ TEST_F(End2EndTest, VideoLogging) {
int video_start = kVideoStart;
const int num_frames = 5;
for (int i = 0; i < num_frames; ++i) {
- base::TimeTicks capture_time = testing_clock_sender_->NowTicks();
+ base::TimeTicks reference_time = testing_clock_sender_->NowTicks();
test_receiver_video_callback_->AddExpectedResult(
video_start,
video_sender_config_.width,
video_sender_config_.height,
- capture_time + base::TimeDelta::FromMilliseconds(kTargetPlayoutDelayMs),
+ reference_time +
+ base::TimeDelta::FromMilliseconds(kTargetPlayoutDelayMs),
true);
- SendVideoFrame(video_start, capture_time);
+ SendVideoFrame(video_start, reference_time);
RunTasks(kFrameTimerMs);
cast_receiver_->RequestDecodedVideoFrame(
@@ -1489,7 +1496,7 @@ TEST_F(End2EndTest, TestSetPlayoutDelay) {
int64 delta = (video_ticks_[i].second -
video_ticks_[i-1].second).InMilliseconds();
if (delta > 100) {
- EXPECT_EQ(delta, kNewDelay - kTargetPlayoutDelayMs + kFrameTimerMs);
+ EXPECT_EQ(kNewDelay - kTargetPlayoutDelayMs + kFrameTimerMs, delta);
EXPECT_EQ(0u, jump);
jump = i;
}
diff --git a/media/cast/test/fake_media_source.cc b/media/cast/test/fake_media_source.cc
index 73b249336a..d687ef356f 100644
--- a/media/cast/test/fake_media_source.cc
+++ b/media/cast/test/fake_media_source.cc
@@ -41,6 +41,16 @@ void AVFreeFrame(AVFrame* frame) {
av_frame_free(&frame);
}
+base::TimeDelta PtsToTimeDelta(int64 pts, const AVRational& time_base) {
+ return pts * base::TimeDelta::FromSeconds(1) * time_base.num / time_base.den;
+}
+
+int64 TimeDeltaToPts(base::TimeDelta delta, const AVRational& time_base) {
+ return static_cast<int64>(
+ delta.InSecondsF() * time_base.den / time_base.num +
+ 0.5 /* rounding */);
+}
+
} // namespace
namespace media {
@@ -56,7 +66,6 @@ FakeMediaSource::FakeMediaSource(
clock_(clock),
audio_frame_count_(0),
video_frame_count_(0),
- weak_factory_(this),
av_format_context_(NULL),
audio_stream_index_(-1),
playback_rate_(1.0),
@@ -64,7 +73,8 @@ FakeMediaSource::FakeMediaSource(
video_frame_rate_numerator_(video_config.max_frame_rate),
video_frame_rate_denominator_(1),
video_first_pts_(0),
- video_first_pts_set_(false) {
+ video_first_pts_set_(false),
+ weak_factory_(this) {
audio_bus_factory_.reset(new TestAudioBusFactory(kAudioChannels,
kAudioSamplingFrequency,
kSoundFrequency,
@@ -78,11 +88,6 @@ void FakeMediaSource::SetSourceFile(const base::FilePath& video_file,
int override_fps) {
DCHECK(!video_file.empty());
- if (override_fps) {
- video_config_.max_frame_rate = override_fps;
- video_frame_rate_numerator_ = override_fps;
- }
-
LOG(INFO) << "Source: " << video_file.value();
if (!file_data_.Initialize(video_file)) {
LOG(ERROR) << "Cannot load file.";
@@ -162,18 +167,16 @@ void FakeMediaSource::SetSourceFile(const base::FilePath& video_file,
LOG(WARNING) << "Found multiple video streams.";
}
video_stream_index_ = static_cast<int>(i);
- if (!override_fps) {
- video_frame_rate_numerator_ = av_stream->r_frame_rate.num;
- video_frame_rate_denominator_ = av_stream->r_frame_rate.den;
- // Max frame rate is rounded up.
- video_config_.max_frame_rate =
- video_frame_rate_denominator_ +
- video_frame_rate_numerator_ - 1;
- video_config_.max_frame_rate /= video_frame_rate_denominator_;
- } else {
+ if (override_fps > 0) {
// If video is played at a manual speed audio needs to match.
playback_rate_ = 1.0 * override_fps *
- av_stream->r_frame_rate.den / av_stream->r_frame_rate.num;
+ av_stream->r_frame_rate.den / av_stream->r_frame_rate.num;
+ video_frame_rate_numerator_ = override_fps;
+ video_frame_rate_denominator_ = 1;
+ } else {
+ playback_rate_ = 1.0;
+ video_frame_rate_numerator_ = av_stream->r_frame_rate.num;
+ video_frame_rate_denominator_ = av_stream->r_frame_rate.den;
}
LOG(INFO) << "Source file has video.";
} else {
@@ -190,11 +193,14 @@ void FakeMediaSource::Start(scoped_refptr<AudioFrameInput> audio_frame_input,
video_frame_input_ = video_frame_input;
LOG(INFO) << "Max Frame rate: " << video_config_.max_frame_rate;
- LOG(INFO) << "Real Frame rate: "
+ LOG(INFO) << "Source Frame rate: "
<< video_frame_rate_numerator_ << "/"
<< video_frame_rate_denominator_ << " fps.";
LOG(INFO) << "Audio playback rate: " << playback_rate_;
+ if (start_time_.is_null())
+ start_time_ = clock_->NowTicks();
+
if (!is_transcoding_audio() && !is_transcoding_video()) {
// Send fake patterns.
task_runner_->PostTask(
@@ -235,9 +241,7 @@ void FakeMediaSource::SendNextFakeFrame() {
PopulateVideoFrame(video_frame.get(), synthetic_count_);
++synthetic_count_;
- base::TimeTicks now = clock_->NowTicks();
- if (start_time_.is_null())
- start_time_ = now;
+ const base::TimeTicks now = clock_->NowTicks();
base::TimeDelta video_time = VideoFrameTime(++video_frame_count_);
video_frame->set_timestamp(video_time);
@@ -263,7 +267,7 @@ void FakeMediaSource::SendNextFakeFrame() {
audio_time = AudioFrameTime(++audio_frame_count_);
}
- // This is the time since the stream started.
+ // This is the time since FakeMediaSource was started.
const base::TimeDelta elapsed_time = now - start_time_;
// Handle the case when frame generation cannot keep up.
@@ -313,11 +317,10 @@ bool FakeMediaSource::SendNextTranscodedVideo(base::TimeDelta elapsed_time) {
decoded_frame->rows(VideoFrame::kVPlane),
video_frame.get());
- base::TimeDelta video_time;
// Use the timestamp from the file if we're transcoding.
- video_time = ScaleTimestamp(decoded_frame->timestamp());
+ video_frame->set_timestamp(ScaleTimestamp(decoded_frame->timestamp()));
video_frame_input_->InsertRawVideoFrame(
- video_frame, start_time_ + video_time);
+ video_frame, start_time_ + video_frame->timestamp());
// Make sure queue is not empty.
Decode(false);
@@ -347,11 +350,6 @@ bool FakeMediaSource::SendNextTranscodedAudio(base::TimeDelta elapsed_time) {
}
void FakeMediaSource::SendNextFrame() {
- if (start_time_.is_null())
- start_time_ = clock_->NowTicks();
- if (start_time_.is_null())
- start_time_ = clock_->NowTicks();
-
// Send as much as possible. Audio is sent according to
// system time.
while (SendNextTranscodedAudio(clock_->NowTicks() - start_time_));
@@ -364,9 +362,6 @@ void FakeMediaSource::SendNextFrame() {
// the end of the stream.
LOG(INFO) << "Rewind.";
Rewind();
- start_time_ = base::TimeTicks();
- audio_sent_ts_.reset();
- video_first_pts_set_ = false;
}
// Send next send.
@@ -384,8 +379,7 @@ base::TimeDelta FakeMediaSource::VideoFrameTime(int frame_number) {
}
base::TimeDelta FakeMediaSource::ScaleTimestamp(base::TimeDelta timestamp) {
- return base::TimeDelta::FromMicroseconds(
- timestamp.InMicroseconds() / playback_rate_);
+ return base::TimeDelta::FromSecondsD(timestamp.InSecondsF() / playback_rate_);
}
base::TimeDelta FakeMediaSource::AudioFrameTime(int frame_number) {
@@ -400,7 +394,7 @@ void FakeMediaSource::Rewind() {
ScopedAVPacket FakeMediaSource::DemuxOnePacket(bool* audio) {
ScopedAVPacket packet(new AVPacket());
if (av_read_frame(av_format_context_, packet.get()) < 0) {
- LOG(ERROR) << "Failed to read one AVPacket.";
+ VLOG(1) << "Failed to read one AVPacket.";
packet.reset();
return packet.Pass();
}
@@ -463,8 +457,7 @@ void FakeMediaSource::DecodeAudio(ScopedAVPacket packet) {
av_audio_context()->sample_rate,
frames_read,
&avframe->data[0],
- // Note: Not all files have correct values for pkt_pts.
- base::TimeDelta::FromMilliseconds(avframe->pkt_pts));
+ PtsToTimeDelta(avframe->pkt_pts, av_audio_stream()->time_base));
audio_algo_.EnqueueBuffer(buffer);
av_frame_unref(avframe);
} while (packet_temp.size > 0);
@@ -509,9 +502,6 @@ void FakeMediaSource::DecodeVideo(ScopedAVPacket packet) {
// Video.
int got_picture;
AVFrame* avframe = av_frame_alloc();
- // Tell the decoder to reorder for us.
- avframe->reordered_opaque =
- av_video_context()->reordered_opaque = packet->pts;
CHECK(avcodec_decode_video2(
av_video_context(), avframe, &got_picture, packet.get()) >= 0)
<< "Video decode error.";
@@ -520,12 +510,23 @@ void FakeMediaSource::DecodeVideo(ScopedAVPacket packet) {
return;
}
gfx::Size size(av_video_context()->width, av_video_context()->height);
- if (!video_first_pts_set_ ||
- avframe->reordered_opaque < video_first_pts_) {
+
+ if (!video_first_pts_set_) {
+ video_first_pts_ = avframe->pkt_pts;
video_first_pts_set_ = true;
- video_first_pts_ = avframe->reordered_opaque;
}
- int64 pts = avframe->reordered_opaque - video_first_pts_;
+ const AVRational& time_base = av_video_stream()->time_base;
+ base::TimeDelta timestamp =
+ PtsToTimeDelta(avframe->pkt_pts - video_first_pts_, time_base);
+ if (timestamp < last_video_frame_timestamp_) {
+ // Stream has rewound. Rebase |video_first_pts_|.
+ const AVRational& frame_rate = av_video_stream()->r_frame_rate;
+ timestamp = last_video_frame_timestamp_ +
+ (base::TimeDelta::FromSeconds(1) * frame_rate.den / frame_rate.num);
+ const int64 adjustment_pts = TimeDeltaToPts(timestamp, time_base);
+ video_first_pts_ = avframe->pkt_pts - adjustment_pts;
+ }
+
video_frame_queue_.push(
VideoFrame::WrapExternalYuvData(
media::VideoFrame::YV12,
@@ -538,8 +539,9 @@ void FakeMediaSource::DecodeVideo(ScopedAVPacket packet) {
avframe->data[0],
avframe->data[1],
avframe->data[2],
- base::TimeDelta::FromMilliseconds(pts),
+ timestamp,
base::Bind(&AVFreeFrame, avframe)));
+ last_video_frame_timestamp_ = timestamp;
}
void FakeMediaSource::Decode(bool decode_audio) {
@@ -553,7 +555,7 @@ void FakeMediaSource::Decode(bool decode_audio) {
bool audio_packet = false;
ScopedAVPacket packet = DemuxOnePacket(&audio_packet);
if (!packet) {
- LOG(INFO) << "End of stream.";
+ VLOG(1) << "End of stream.";
return;
}
diff --git a/media/cast/test/fake_media_source.h b/media/cast/test/fake_media_source.h
index b18d44ea2a..4e6a4c3a5f 100644
--- a/media/cast/test/fake_media_source.h
+++ b/media/cast/test/fake_media_source.h
@@ -97,8 +97,8 @@ class FakeMediaSource {
AVCodecContext* av_audio_context();
AVCodecContext* av_video_context();
- scoped_refptr<base::SingleThreadTaskRunner> task_runner_;
- VideoSenderConfig video_config_;
+ const scoped_refptr<base::SingleThreadTaskRunner> task_runner_;
+ const VideoSenderConfig video_config_;
scoped_refptr<AudioFrameInput> audio_frame_input_;
scoped_refptr<VideoFrameInput> video_frame_input_;
uint8 synthetic_count_;
@@ -112,9 +112,6 @@ class FakeMediaSource {
int video_frame_count_;
scoped_ptr<TestAudioBusFactory> audio_bus_factory_;
- // NOTE: Weak pointers must be invalidated before all other member variables.
- base::WeakPtrFactory<FakeMediaSource> weak_factory_;
-
base::MemoryMappedFile file_data_;
scoped_ptr<InMemoryUrlProtocol> protocol_;
scoped_ptr<FFmpegGlue> glue_;
@@ -140,9 +137,13 @@ class FakeMediaSource {
std::queue<scoped_refptr<VideoFrame> > video_frame_queue_;
int64 video_first_pts_;
bool video_first_pts_set_;
+ base::TimeDelta last_video_frame_timestamp_;
std::queue<AudioBus*> audio_bus_queue_;
+ // NOTE: Weak pointers must be invalidated before all other member variables.
+ base::WeakPtrFactory<FakeMediaSource> weak_factory_;
+
DISALLOW_COPY_AND_ASSIGN(FakeMediaSource);
};
diff --git a/media/cast/test/fake_receiver_time_offset_estimator.h b/media/cast/test/fake_receiver_time_offset_estimator.h
index 2a4d3cd393..fc3a901b92 100644
--- a/media/cast/test/fake_receiver_time_offset_estimator.h
+++ b/media/cast/test/fake_receiver_time_offset_estimator.h
@@ -20,15 +20,15 @@ class FakeReceiverTimeOffsetEstimator : public ReceiverTimeOffsetEstimator {
public:
FakeReceiverTimeOffsetEstimator(base::TimeDelta offset);
- virtual ~FakeReceiverTimeOffsetEstimator();
+ ~FakeReceiverTimeOffsetEstimator() override;
// RawReventSubscriber implementations.
- virtual void OnReceiveFrameEvent(const FrameEvent& frame_event) OVERRIDE;
- virtual void OnReceivePacketEvent(const PacketEvent& packet_event) OVERRIDE;
+ void OnReceiveFrameEvent(const FrameEvent& frame_event) override;
+ void OnReceivePacketEvent(const PacketEvent& packet_event) override;
// ReceiverTimeOffsetEstimator
- virtual bool GetReceiverOffsetBounds(base::TimeDelta* lower_bound,
- base::TimeDelta* upper_bound) OVERRIDE;
+ bool GetReceiverOffsetBounds(base::TimeDelta* lower_bound,
+ base::TimeDelta* upper_bound) override;
private:
const base::TimeDelta offset_;
diff --git a/media/cast/test/fake_single_thread_task_runner.h b/media/cast/test/fake_single_thread_task_runner.h
index 779a897cfb..60aaae4774 100644
--- a/media/cast/test/fake_single_thread_task_runner.h
+++ b/media/cast/test/fake_single_thread_task_runner.h
@@ -28,20 +28,19 @@ class FakeSingleThreadTaskRunner : public base::SingleThreadTaskRunner {
void Sleep(base::TimeDelta t);
// base::SingleThreadTaskRunner implementation.
- virtual bool PostDelayedTask(const tracked_objects::Location& from_here,
- const base::Closure& task,
- base::TimeDelta delay) OVERRIDE;
+ bool PostDelayedTask(const tracked_objects::Location& from_here,
+ const base::Closure& task,
+ base::TimeDelta delay) override;
- virtual bool RunsTasksOnCurrentThread() const OVERRIDE;
+ bool RunsTasksOnCurrentThread() const override;
// This function is currently not used, and will return false.
- virtual bool PostNonNestableDelayedTask(
- const tracked_objects::Location& from_here,
- const base::Closure& task,
- base::TimeDelta delay) OVERRIDE;
+ bool PostNonNestableDelayedTask(const tracked_objects::Location& from_here,
+ const base::Closure& task,
+ base::TimeDelta delay) override;
protected:
- virtual ~FakeSingleThreadTaskRunner();
+ ~FakeSingleThreadTaskRunner() override;
private:
base::SimpleTestTickClock* const clock_;
diff --git a/media/cast/test/fake_video_encode_accelerator.h b/media/cast/test/fake_video_encode_accelerator.h
index 740b8aa9a7..65cc36c3ab 100644
--- a/media/cast/test/fake_video_encode_accelerator.h
+++ b/media/cast/test/fake_video_encode_accelerator.h
@@ -26,25 +26,25 @@ class FakeVideoEncodeAccelerator : public VideoEncodeAccelerator {
explicit FakeVideoEncodeAccelerator(
const scoped_refptr<base::SingleThreadTaskRunner>& task_runner,
std::vector<uint32>* stored_bitrates);
- virtual ~FakeVideoEncodeAccelerator();
+ ~FakeVideoEncodeAccelerator() override;
- virtual std::vector<VideoEncodeAccelerator::SupportedProfile>
- GetSupportedProfiles() OVERRIDE;
- virtual bool Initialize(media::VideoFrame::Format input_format,
- const gfx::Size& input_visible_size,
- VideoCodecProfile output_profile,
- uint32 initial_bitrate,
- Client* client) OVERRIDE;
+ std::vector<VideoEncodeAccelerator::SupportedProfile> GetSupportedProfiles()
+ override;
+ bool Initialize(media::VideoFrame::Format input_format,
+ const gfx::Size& input_visible_size,
+ VideoCodecProfile output_profile,
+ uint32 initial_bitrate,
+ Client* client) override;
- virtual void Encode(const scoped_refptr<VideoFrame>& frame,
- bool force_keyframe) OVERRIDE;
+ void Encode(const scoped_refptr<VideoFrame>& frame,
+ bool force_keyframe) override;
- virtual void UseOutputBitstreamBuffer(const BitstreamBuffer& buffer) OVERRIDE;
+ void UseOutputBitstreamBuffer(const BitstreamBuffer& buffer) override;
- virtual void RequestEncodingParametersChange(uint32 bitrate,
- uint32 framerate) OVERRIDE;
+ void RequestEncodingParametersChange(uint32 bitrate,
+ uint32 framerate) override;
- virtual void Destroy() OVERRIDE;
+ void Destroy() override;
void SendDummyFrameForTesting(bool key_frame);
void SetWillInitializationSucceed(bool will_initialization_succeed) {
diff --git a/media/cast/test/loopback_transport.cc b/media/cast/test/loopback_transport.cc
index bc19f016c7..2b32fe33e7 100644
--- a/media/cast/test/loopback_transport.cc
+++ b/media/cast/test/loopback_transport.cc
@@ -20,10 +20,10 @@ class LoopBackPacketPipe : public test::PacketPipe {
const PacketReceiverCallback& packet_receiver)
: packet_receiver_(packet_receiver) {}
- virtual ~LoopBackPacketPipe() {}
+ ~LoopBackPacketPipe() override {}
// PacketPipe implementations.
- virtual void Send(scoped_ptr<Packet> packet) OVERRIDE {
+ void Send(scoped_ptr<Packet> packet) override {
packet_receiver_.Run(packet.Pass());
}
diff --git a/media/cast/test/loopback_transport.h b/media/cast/test/loopback_transport.h
index cf29fbb789..2aca2f3469 100644
--- a/media/cast/test/loopback_transport.h
+++ b/media/cast/test/loopback_transport.h
@@ -27,12 +27,11 @@ class LoopBackTransport : public PacketSender {
public:
explicit LoopBackTransport(
scoped_refptr<CastEnvironment> cast_environment);
- virtual ~LoopBackTransport();
+ ~LoopBackTransport() override;
- virtual bool SendPacket(PacketRef packet,
- const base::Closure& cb) OVERRIDE;
+ bool SendPacket(PacketRef packet, const base::Closure& cb) override;
- virtual int64 GetBytesSent() OVERRIDE;
+ int64 GetBytesSent() override;
// Initiailize this loopback transport.
// Establish a flow of packets from |pipe| to |packet_receiver|.
diff --git a/media/cast/test/receiver.cc b/media/cast/test/receiver.cc
index 65b0e6059a..d608669d62 100644
--- a/media/cast/test/receiver.cc
+++ b/media/cast/test/receiver.cc
@@ -202,9 +202,9 @@ class NaivePlayer : public InProcessReceiver,
num_audio_frames_processed_(0),
currently_playing_audio_frame_start_(-1) {}
- virtual ~NaivePlayer() {}
+ ~NaivePlayer() override {}
- virtual void Start() OVERRIDE {
+ void Start() override {
AudioManager::Get()->GetTaskRunner()->PostTask(
FROM_HERE,
base::Bind(&NaivePlayer::StartAudioOutputOnAudioManagerThread,
@@ -214,7 +214,7 @@ class NaivePlayer : public InProcessReceiver,
InProcessReceiver::Start();
}
- virtual void Stop() OVERRIDE {
+ void Stop() override {
// First, stop audio output to the Chromium audio stack.
base::WaitableEvent done(false, false);
DCHECK(!AudioManager::Get()->GetTaskRunner()->BelongsToCurrentThread());
@@ -265,9 +265,9 @@ class NaivePlayer : public InProcessReceiver,
////////////////////////////////////////////////////////////////////
// InProcessReceiver overrides.
- virtual void OnVideoFrame(const scoped_refptr<VideoFrame>& video_frame,
- const base::TimeTicks& playout_time,
- bool is_continuous) OVERRIDE {
+ void OnVideoFrame(const scoped_refptr<VideoFrame>& video_frame,
+ const base::TimeTicks& playout_time,
+ bool is_continuous) override {
DCHECK(cast_env()->CurrentlyOn(CastEnvironment::MAIN));
LOG_IF(WARNING, !is_continuous)
<< "Video: Discontinuity in received frames.";
@@ -282,9 +282,9 @@ class NaivePlayer : public InProcessReceiver,
}
}
- virtual void OnAudioFrame(scoped_ptr<AudioBus> audio_frame,
- const base::TimeTicks& playout_time,
- bool is_continuous) OVERRIDE {
+ void OnAudioFrame(scoped_ptr<AudioBus> audio_frame,
+ const base::TimeTicks& playout_time,
+ bool is_continuous) override {
DCHECK(cast_env()->CurrentlyOn(CastEnvironment::MAIN));
LOG_IF(WARNING, !is_continuous)
<< "Audio: Discontinuity in received frames.";
@@ -316,8 +316,7 @@ class NaivePlayer : public InProcessReceiver,
////////////////////////////////////////////////////////////////////
// AudioSourceCallback implementation.
- virtual int OnMoreData(AudioBus* dest, AudioBuffersState buffers_state)
- OVERRIDE {
+ int OnMoreData(AudioBus* dest, uint32 total_bytes_delay) override {
// Note: This method is being invoked by a separate thread unknown to us
// (i.e., outside of CastEnvironment).
@@ -329,8 +328,8 @@ class NaivePlayer : public InProcessReceiver,
base::AutoLock auto_lock(audio_lock_);
// Prune the queue, skipping entries that are too old.
- // TODO(miu): Use |buffers_state| to account for audio buffering delays
- // upstream.
+ // TODO(miu): Use |total_bytes_delay| to account for audio buffering
+ // delays upstream.
const base::TimeTicks earliest_time_to_play =
cast_env()->Clock()->NowTicks() - max_frame_age_;
while (!audio_playout_queue_.empty() &&
@@ -377,7 +376,7 @@ class NaivePlayer : public InProcessReceiver,
return dest->frames();
}
- virtual void OnError(AudioOutputStream* stream) OVERRIDE {
+ void OnError(AudioOutputStream* stream) override {
LOG(ERROR) << "AudioOutputStream reports an error. "
<< "Playback is unlikely to continue.";
}
diff --git a/media/cast/test/simulator.cc b/media/cast/test/simulator.cc
index e3872a98d9..61084386c3 100644
--- a/media/cast/test/simulator.cc
+++ b/media/cast/test/simulator.cc
@@ -14,6 +14,14 @@
// --target-delay-ms=
// Target playout delay to configure (integer number of milliseconds).
// Optional; default is 400.
+// --max-frame-rate=
+// The maximum frame rate allowed at any time during the Cast session.
+// Optional; default is 30.
+// --source-frame-rate=
+// Overrides the playback rate; the source video will play faster/slower.
+// --run-time=
+// In seconds, how long the Cast session runs for.
+// Optional; default is 180.
//
// Output:
// - Raw event log of the simulation session tagged with the unique test ID,
@@ -75,16 +83,19 @@ const char kOutputPath[] = "output";
const char kSimulationId[] = "sim-id";
const char kLibDir[] = "lib-dir";
const char kTargetDelay[] = "target-delay-ms";
-
-base::TimeDelta GetTargetPlayoutDelay() {
- const std::string delay_str =
- CommandLine::ForCurrentProcess()->GetSwitchValueASCII(kTargetDelay);
- if (delay_str.empty())
- return base::TimeDelta::FromMilliseconds(400);
- int delay_ms;
- CHECK(base::StringToInt(delay_str, &delay_ms));
- CHECK_GT(delay_ms, 0);
- return base::TimeDelta::FromMilliseconds(delay_ms);
+const char kMaxFrameRate[] = "max-frame-rate";
+const char kSourceFrameRate[] = "source-frame-rate";
+const char kRunTime[] = "run-time";
+
+int GetIntegerSwitchValue(const char* switch_name, int default_value) {
+ const std::string as_str =
+ CommandLine::ForCurrentProcess()->GetSwitchValueASCII(switch_name);
+ if (as_str.empty())
+ return default_value;
+ int as_int;
+ CHECK(base::StringToInt(as_str, &as_int));
+ CHECK_GT(as_int, 0);
+ return as_int;
}
void UpdateCastTransportStatus(CastTransportStatus status) {
@@ -186,7 +197,7 @@ void AppendLogToFile(media::cast::proto::LogMetadata* metadata,
return;
}
- if (AppendToFile(output_path, serialized_log.get(), output_bytes) == -1) {
+ if (!AppendToFile(output_path, serialized_log.get(), output_bytes)) {
LOG(ERROR) << "Failed to append to log.";
}
}
@@ -234,7 +245,9 @@ void RunSimulation(const base::FilePath& source_path,
// Audio sender config.
AudioSenderConfig audio_sender_config = GetDefaultAudioSenderConfig();
- audio_sender_config.max_playout_delay = GetTargetPlayoutDelay();
+ audio_sender_config.min_playout_delay =
+ audio_sender_config.max_playout_delay = base::TimeDelta::FromMilliseconds(
+ GetIntegerSwitchValue(kTargetDelay, 400));
// Audio receiver config.
FrameReceiverConfig audio_receiver_config =
@@ -247,7 +260,10 @@ void RunSimulation(const base::FilePath& source_path,
video_sender_config.max_bitrate = 2500000;
video_sender_config.min_bitrate = 2000000;
video_sender_config.start_bitrate = 2000000;
- video_sender_config.max_playout_delay = GetTargetPlayoutDelay();
+ video_sender_config.min_playout_delay =
+ video_sender_config.max_playout_delay =
+ audio_sender_config.max_playout_delay;
+ video_sender_config.max_frame_rate = GetIntegerSwitchValue(kMaxFrameRate, 30);
// Video receiver config.
FrameReceiverConfig video_receiver_config =
@@ -329,14 +345,17 @@ void RunSimulation(const base::FilePath& source_path,
// Start sending.
if (!source_path.empty()) {
// 0 means using the FPS from the file.
- media_source.SetSourceFile(source_path, 0);
+ media_source.SetSourceFile(source_path,
+ GetIntegerSwitchValue(kSourceFrameRate, 0));
}
media_source.Start(cast_sender->audio_frame_input(),
cast_sender->video_frame_input());
// Run for 3 minutes.
base::TimeDelta elapsed_time;
- while (elapsed_time.InMinutes() < 3) {
+ const base::TimeDelta desired_run_time =
+ base::TimeDelta::FromSeconds(GetIntegerSwitchValue(kRunTime, 180));
+ while (elapsed_time < desired_run_time) {
// Each step is 100us.
base::TimeDelta step = base::TimeDelta::FromMicroseconds(100);
task_runner->Sleep(step);
@@ -388,10 +407,15 @@ void RunSimulation(const base::FilePath& source_path,
}
}
- double avg_encoded_bitrate =
- !encoded_video_frames ? 0 :
- 8.0 * encoded_size * video_sender_config.max_frame_rate /
- encoded_video_frames / 1000;
+ // Subtract fraction of dropped frames from |elapsed_time| before estimating
+ // the average encoded bitrate.
+ const base::TimeDelta elapsed_time_undropped =
+ total_video_frames <= 0 ? base::TimeDelta() :
+ (elapsed_time * (total_video_frames - dropped_video_frames) /
+ total_video_frames);
+ const double avg_encoded_bitrate =
+ elapsed_time_undropped <= base::TimeDelta() ? 0 :
+ 8.0 * encoded_size / elapsed_time_undropped.InSecondsF() / 1000;
double avg_target_bitrate =
!encoded_video_frames ? 0 : target_bitrate / encoded_video_frames / 1000;
diff --git a/media/cast/test/skewed_single_thread_task_runner.h b/media/cast/test/skewed_single_thread_task_runner.h
index 5ad2f8d8bb..1a4d0abe9c 100644
--- a/media/cast/test/skewed_single_thread_task_runner.h
+++ b/media/cast/test/skewed_single_thread_task_runner.h
@@ -29,20 +29,19 @@ class SkewedSingleThreadTaskRunner : public base::SingleThreadTaskRunner {
void SetSkew(double skew);
// base::SingleThreadTaskRunner implementation.
- virtual bool PostDelayedTask(const tracked_objects::Location& from_here,
- const base::Closure& task,
- base::TimeDelta delay) OVERRIDE;
+ bool PostDelayedTask(const tracked_objects::Location& from_here,
+ const base::Closure& task,
+ base::TimeDelta delay) override;
- virtual bool RunsTasksOnCurrentThread() const OVERRIDE;
+ bool RunsTasksOnCurrentThread() const override;
// This function is currently not used, and will return false.
- virtual bool PostNonNestableDelayedTask(
- const tracked_objects::Location& from_here,
- const base::Closure& task,
- base::TimeDelta delay) OVERRIDE;
+ bool PostNonNestableDelayedTask(const tracked_objects::Location& from_here,
+ const base::Closure& task,
+ base::TimeDelta delay) override;
protected:
- virtual ~SkewedSingleThreadTaskRunner();
+ ~SkewedSingleThreadTaskRunner() override;
private:
double skew_;
diff --git a/media/cast/test/skewed_tick_clock.h b/media/cast/test/skewed_tick_clock.h
index dcb538448c..a5539a34c7 100644
--- a/media/cast/test/skewed_tick_clock.h
+++ b/media/cast/test/skewed_tick_clock.h
@@ -25,7 +25,7 @@ class SkewedTickClock : public base::TickClock {
// jump forwards or backwards, only changing the offset will
// do that.
void SetSkew(double skew, base::TimeDelta offset);
- virtual base::TimeTicks NowTicks() OVERRIDE;
+ base::TimeTicks NowTicks() override;
private:
base::TimeTicks SkewTicks(base::TimeTicks now);
diff --git a/media/cast/test/utility/audio_utility.cc b/media/cast/test/utility/audio_utility.cc
index 8dde4dd9f8..094f141ab0 100644
--- a/media/cast/test/utility/audio_utility.cc
+++ b/media/cast/test/utility/audio_utility.cc
@@ -36,7 +36,7 @@ scoped_ptr<AudioBus> TestAudioBusFactory::NextAudioBus(
const int num_samples = static_cast<int>((sample_rate_ * duration) /
base::TimeDelta::FromSeconds(1));
scoped_ptr<AudioBus> bus(AudioBus::Create(num_channels_, num_samples));
- source_.OnMoreData(bus.get(), AudioBuffersState());
+ source_.OnMoreData(bus.get(), 0);
bus->Scale(volume_);
return bus.Pass();
}
diff --git a/media/cast/test/utility/in_process_receiver.cc b/media/cast/test/utility/in_process_receiver.cc
index c5666d7b96..fb41843551 100644
--- a/media/cast/test/utility/in_process_receiver.cc
+++ b/media/cast/test/utility/in_process_receiver.cc
@@ -81,6 +81,7 @@ void InProcessReceiver::StartOnMainThread() {
cast_environment_->GetTaskRunner(CastEnvironment::MAIN),
local_end_point_,
remote_end_point_,
+ 65536,
base::Bind(&InProcessReceiver::UpdateCastTransportStatus,
base::Unretained(this))));
cast_receiver_ = CastReceiver::Create(
diff --git a/media/cast/test/utility/standalone_cast_environment.h b/media/cast/test/utility/standalone_cast_environment.h
index 91240e1ce5..206de212f2 100644
--- a/media/cast/test/utility/standalone_cast_environment.h
+++ b/media/cast/test/utility/standalone_cast_environment.h
@@ -28,7 +28,7 @@ class StandaloneCastEnvironment : public CastEnvironment,
void Shutdown();
protected:
- virtual ~StandaloneCastEnvironment();
+ ~StandaloneCastEnvironment() override;
base::Thread main_thread_;
base::Thread audio_thread_;
diff --git a/media/cast/test/utility/tap_proxy.cc b/media/cast/test/utility/tap_proxy.cc
index 7827bf976f..0546874111 100644
--- a/media/cast/test/utility/tap_proxy.cc
+++ b/media/cast/test/utility/tap_proxy.cc
@@ -41,7 +41,7 @@ class SendToFDPipe : public PacketPipe {
public:
explicit SendToFDPipe(int fd) : fd_(fd) {
}
- virtual void Send(scoped_ptr<Packet> packet) OVERRIDE {
+ virtual void Send(scoped_ptr<Packet> packet) override {
while (1) {
int written = write(
fd_,
@@ -89,7 +89,7 @@ class QueueManager : public base::MessageLoopForIO::Watcher {
}
// MessageLoopForIO::Watcher methods
- virtual void OnFileCanReadWithoutBlocking(int fd) OVERRIDE {
+ virtual void OnFileCanReadWithoutBlocking(int fd) override {
scoped_ptr<Packet> packet(new Packet(kMaxPacketSize));
int nread = read(input_fd_,
reinterpret_cast<char*>(&packet->front()),
@@ -103,7 +103,7 @@ class QueueManager : public base::MessageLoopForIO::Watcher {
packet->resize(nread);
packet_pipe_->Send(packet.Pass());
}
- virtual void OnFileCanWriteWithoutBlocking(int fd) OVERRIDE {
+ virtual void OnFileCanWriteWithoutBlocking(int fd) override {
NOTREACHED();
}
@@ -173,7 +173,7 @@ class ByteCounterPipe : public media::cast::test::PacketPipe {
public:
ByteCounterPipe(ByteCounter* counter) : counter_(counter) {}
virtual void Send(scoped_ptr<media::cast::Packet> packet)
- OVERRIDE {
+ override {
counter_->Increment(packet->size());
pipe_->Send(packet.Pass());
}
diff --git a/media/cast/test/utility/udp_proxy.cc b/media/cast/test/utility/udp_proxy.cc
index 95640a364e..237c1e7cd8 100644
--- a/media/cast/test/utility/udp_proxy.cc
+++ b/media/cast/test/utility/udp_proxy.cc
@@ -56,7 +56,7 @@ class Buffer : public PacketPipe {
CHECK_GT(max_megabits_per_second, 0);
}
- virtual void Send(scoped_ptr<Packet> packet) OVERRIDE {
+ void Send(scoped_ptr<Packet> packet) override {
if (packet->size() + buffer_size_ <= max_buffer_size_) {
buffer_size_ += packet->size();
buffer_.push_back(linked_ptr<Packet>(packet.release()));
@@ -116,7 +116,7 @@ class RandomDrop : public PacketPipe {
RandomDrop(double drop_fraction)
: drop_fraction_(static_cast<int>(drop_fraction * RAND_MAX)) {}
- virtual void Send(scoped_ptr<Packet> packet) OVERRIDE {
+ void Send(scoped_ptr<Packet> packet) override {
if (rand() > drop_fraction_) {
pipe_->Send(packet.Pass());
}
@@ -133,9 +133,9 @@ scoped_ptr<PacketPipe> NewRandomDrop(double drop_fraction) {
class SimpleDelayBase : public PacketPipe {
public:
SimpleDelayBase() : weak_factory_(this) {}
- virtual ~SimpleDelayBase() {}
+ ~SimpleDelayBase() override {}
- virtual void Send(scoped_ptr<Packet> packet) OVERRIDE {
+ void Send(scoped_ptr<Packet> packet) override {
double seconds = GetDelay();
task_runner_->PostDelayedTask(
FROM_HERE,
@@ -158,9 +158,7 @@ class SimpleDelayBase : public PacketPipe {
class ConstantDelay : public SimpleDelayBase {
public:
ConstantDelay(double delay_seconds) : delay_seconds_(delay_seconds) {}
- virtual double GetDelay() OVERRIDE {
- return delay_seconds_;
- }
+ double GetDelay() override { return delay_seconds_; }
private:
double delay_seconds_;
@@ -174,9 +172,7 @@ class RandomUnsortedDelay : public SimpleDelayBase {
public:
RandomUnsortedDelay(double random_delay) : random_delay_(random_delay) {}
- virtual double GetDelay() OVERRIDE {
- return random_delay_ * base::RandDouble();
- }
+ double GetDelay() override { return random_delay_ * base::RandDouble(); }
private:
double random_delay_;
@@ -193,11 +189,11 @@ class DuplicateAndDelay : public RandomUnsortedDelay {
RandomUnsortedDelay(random_delay),
delay_min_(delay_min) {
}
- virtual void Send(scoped_ptr<Packet> packet) OVERRIDE {
+ void Send(scoped_ptr<Packet> packet) override {
pipe_->Send(scoped_ptr<Packet>(new Packet(*packet.get())));
RandomUnsortedDelay::Send(packet.Pass());
}
- virtual double GetDelay() OVERRIDE {
+ double GetDelay() override {
return RandomUnsortedDelay::GetDelay() + delay_min_;
}
private:
@@ -220,7 +216,7 @@ class RandomSortedDelay : public PacketPipe {
seconds_between_extra_delay_(seconds_between_extra_delay),
weak_factory_(this) {}
- virtual void Send(scoped_ptr<Packet> packet) OVERRIDE {
+ void Send(scoped_ptr<Packet> packet) override {
buffer_.push_back(linked_ptr<Packet>(packet.release()));
if (buffer_.size() == 1) {
next_send_ = std::max(
@@ -230,9 +226,9 @@ class RandomSortedDelay : public PacketPipe {
ProcessBuffer();
}
}
- virtual void InitOnIOThread(
+ void InitOnIOThread(
const scoped_refptr<base::SingleThreadTaskRunner>& task_runner,
- base::TickClock* clock) OVERRIDE {
+ base::TickClock* clock) override {
PacketPipe::InitOnIOThread(task_runner, clock);
// As we start the stream, assume that we are in a random
// place between two extra delays, thus multiplier = 1.0;
@@ -309,14 +305,14 @@ class NetworkGlitchPipe : public PacketPipe {
max_outage_time_(average_outage_time * 2),
weak_factory_(this) {}
- virtual void InitOnIOThread(
+ void InitOnIOThread(
const scoped_refptr<base::SingleThreadTaskRunner>& task_runner,
- base::TickClock* clock) OVERRIDE {
+ base::TickClock* clock) override {
PacketPipe::InitOnIOThread(task_runner, clock);
Flip();
}
- virtual void Send(scoped_ptr<Packet> packet) OVERRIDE {
+ void Send(scoped_ptr<Packet> packet) override {
if (works_) {
pipe_->Send(packet.Pass());
}
@@ -360,7 +356,7 @@ class InterruptedPoissonProcess::InternalBuffer : public PacketPipe {
weak_factory_(this) {
}
- virtual void Send(scoped_ptr<Packet> packet) OVERRIDE {
+ void Send(scoped_ptr<Packet> packet) override {
// Drop if buffer is full.
if (stored_size_ >= stored_limit_)
return;
@@ -370,9 +366,9 @@ class InterruptedPoissonProcess::InternalBuffer : public PacketPipe {
DCHECK(buffer_.size() == buffer_time_.size());
}
- virtual void InitOnIOThread(
+ void InitOnIOThread(
const scoped_refptr<base::SingleThreadTaskRunner>& task_runner,
- base::TickClock* clock) OVERRIDE {
+ base::TickClock* clock) override {
clock_ = clock;
if (ipp_)
ipp_->InitOnIOThread(task_runner, clock);
@@ -451,7 +447,7 @@ scoped_ptr<PacketPipe> InterruptedPoissonProcess::NewBuffer(size_t size) {
scoped_ptr<InternalBuffer> buffer(
new InternalBuffer(weak_factory_.GetWeakPtr(), size));
send_buffers_.push_back(buffer->GetWeakPtr());
- return buffer.PassAs<PacketPipe>();
+ return buffer.Pass();
}
base::TimeDelta InterruptedPoissonProcess::NextEvent(double rate) {
@@ -552,10 +548,8 @@ class PacketSender : public PacketPipe {
public:
PacketSender(UDPProxyImpl* udp_proxy, const net::IPEndPoint* destination)
: udp_proxy_(udp_proxy), destination_(destination) {}
- virtual void Send(scoped_ptr<Packet> packet) OVERRIDE;
- virtual void AppendToPipe(scoped_ptr<PacketPipe> pipe) OVERRIDE {
- NOTREACHED();
- }
+ void Send(scoped_ptr<Packet> packet) override;
+ void AppendToPipe(scoped_ptr<PacketPipe> pipe) override { NOTREACHED(); }
private:
UDPProxyImpl* udp_proxy_;
@@ -662,7 +656,7 @@ class UDPProxyImpl : public UDPProxy {
start_event.Wait();
}
- virtual ~UDPProxyImpl() {
+ ~UDPProxyImpl() override {
base::WaitableEvent stop_event(false, false);
proxy_thread_.message_loop_proxy()->PostTask(
FROM_HERE,
diff --git a/media/cast/test/utility/udp_proxy_main.cc b/media/cast/test/utility/udp_proxy_main.cc
index 9dd047abfd..212b5976e3 100644
--- a/media/cast/test/utility/udp_proxy_main.cc
+++ b/media/cast/test/utility/udp_proxy_main.cc
@@ -68,8 +68,7 @@ ByteCounter out_pipe_output_counter;
class ByteCounterPipe : public media::cast::test::PacketPipe {
public:
ByteCounterPipe(ByteCounter* counter) : counter_(counter) {}
- virtual void Send(scoped_ptr<media::cast::Packet> packet)
- OVERRIDE {
+ void Send(scoped_ptr<media::cast::Packet> packet) override {
counter_->Increment(packet->size());
pipe_->Send(packet.Pass());
}