summaryrefslogtreecommitdiff
path: root/media/filters
diff options
context:
space:
mode:
authorPrimiano Tucci <primiano@google.com>2014-09-30 14:45:55 +0100
committerPrimiano Tucci <primiano@google.com>2014-09-30 14:45:55 +0100
commit1320f92c476a1ad9d19dba2a48c72b75566198e9 (patch)
treeea7f149ccad687b22c18a72b729646568b2d54fb /media/filters
parent39b78c562f50ad7d5551ee861121f899239525a2 (diff)
downloadchromium_org-1320f92c476a1ad9d19dba2a48c72b75566198e9.tar.gz
Merge from Chromium at DEPS revision 267aeeb8d85c
This commit was generated by merge_to_master.py. Change-Id: Id3aac9713b301fae64408cdaee0888724eeb7c0e
Diffstat (limited to 'media/filters')
-rw-r--r--media/filters/audio_clock.cc55
-rw-r--r--media/filters/audio_clock.h70
-rw-r--r--media/filters/audio_clock_unittest.cc204
-rw-r--r--media/filters/audio_renderer_algorithm.cc4
-rw-r--r--media/filters/audio_renderer_algorithm.h4
-rw-r--r--media/filters/audio_renderer_impl.cc110
-rw-r--r--media/filters/audio_renderer_impl.h20
-rw-r--r--media/filters/audio_renderer_impl_unittest.cc54
-rw-r--r--media/filters/chunk_demuxer.cc113
-rw-r--r--media/filters/chunk_demuxer.h24
-rw-r--r--media/filters/chunk_demuxer_unittest.cc102
-rw-r--r--media/filters/decoder_stream.cc29
-rw-r--r--media/filters/decoder_stream.h11
-rw-r--r--media/filters/decoder_stream_traits.cc4
-rw-r--r--media/filters/decrypting_audio_decoder.cc4
-rw-r--r--media/filters/decrypting_audio_decoder.h1
-rw-r--r--media/filters/decrypting_video_decoder.cc4
-rw-r--r--media/filters/decrypting_video_decoder.h1
-rw-r--r--media/filters/fake_demuxer_stream.cc4
-rw-r--r--media/filters/fake_demuxer_stream.h1
-rw-r--r--media/filters/fake_video_decoder.cc6
-rw-r--r--media/filters/fake_video_decoder.h1
-rw-r--r--media/filters/fake_video_decoder_unittest.cc10
-rw-r--r--media/filters/ffmpeg_audio_decoder.cc6
-rw-r--r--media/filters/ffmpeg_audio_decoder.h1
-rw-r--r--media/filters/ffmpeg_demuxer.cc35
-rw-r--r--media/filters/ffmpeg_demuxer.h10
-rw-r--r--media/filters/ffmpeg_demuxer_unittest.cc36
-rw-r--r--media/filters/ffmpeg_video_decoder.cc6
-rw-r--r--media/filters/ffmpeg_video_decoder.h1
-rw-r--r--media/filters/frame_processor.cc8
-rw-r--r--media/filters/frame_processor_unittest.cc26
-rw-r--r--media/filters/gpu_video_accelerator_factories.h6
-rw-r--r--media/filters/gpu_video_decoder.cc10
-rw-r--r--media/filters/gpu_video_decoder.h6
-rw-r--r--media/filters/mock_gpu_video_accelerator_factories.h2
-rw-r--r--media/filters/opus_audio_decoder.cc8
-rw-r--r--media/filters/opus_audio_decoder.h1
-rw-r--r--media/filters/pipeline_integration_perftest.cc11
-rw-r--r--media/filters/pipeline_integration_test.cc105
-rw-r--r--media/filters/pipeline_integration_test_base.cc81
-rw-r--r--media/filters/pipeline_integration_test_base.h16
-rw-r--r--media/filters/renderer_impl.cc505
-rw-r--r--media/filters/renderer_impl.h163
-rw-r--r--media/filters/renderer_impl_unittest.cc434
-rw-r--r--media/filters/skcanvas_video_renderer.cc223
-rw-r--r--media/filters/skcanvas_video_renderer.h8
-rw-r--r--media/filters/skcanvas_video_renderer_unittest.cc79
-rw-r--r--media/filters/source_buffer_range.cc591
-rw-r--r--media/filters/source_buffer_range.h289
-rw-r--r--media/filters/source_buffer_stream.cc914
-rw-r--r--media/filters/source_buffer_stream.h2
-rw-r--r--media/filters/source_buffer_stream_unittest.cc2
-rw-r--r--media/filters/stream_parser_factory.cc4
-rw-r--r--media/filters/video_frame_stream_unittest.cc42
-rw-r--r--media/filters/video_renderer_impl.cc62
-rw-r--r--media/filters/video_renderer_impl.h16
-rw-r--r--media/filters/video_renderer_impl_unittest.cc117
-rw-r--r--media/filters/vpx_video_decoder.cc12
-rw-r--r--media/filters/vpx_video_decoder.h1
60 files changed, 3181 insertions, 1494 deletions
diff --git a/media/filters/audio_clock.cc b/media/filters/audio_clock.cc
index eae807ece5..117d603820 100644
--- a/media/filters/audio_clock.cc
+++ b/media/filters/audio_clock.cc
@@ -18,8 +18,8 @@ AudioClock::AudioClock(base::TimeDelta start_timestamp, int sample_rate)
static_cast<double>(base::Time::kMicrosecondsPerSecond) /
sample_rate),
total_buffered_frames_(0),
- current_media_timestamp_(start_timestamp),
- audio_data_buffered_(0) {
+ front_timestamp_(start_timestamp),
+ back_timestamp_(start_timestamp) {
}
AudioClock::~AudioClock() {
@@ -35,7 +35,7 @@ void AudioClock::WroteAudio(int frames_written,
DCHECK_GE(playback_rate, 0);
// First write: initialize buffer with silence.
- if (start_timestamp_ == current_media_timestamp_ && buffered_.empty())
+ if (start_timestamp_ == front_timestamp_ && buffered_.empty())
PushBufferedAudioData(delay_frames, 0.0f);
// Move frames from |buffered_| into the computed timestamp based on
@@ -45,24 +45,24 @@ void AudioClock::WroteAudio(int frames_written,
// reallocations in cases where |buffered_| gets emptied.
int64_t frames_played =
std::max(INT64_C(0), total_buffered_frames_ - delay_frames);
- current_media_timestamp_ += ComputeBufferedMediaTime(frames_played);
+ front_timestamp_ += ComputeBufferedMediaTime(frames_played);
PushBufferedAudioData(frames_written, playback_rate);
PushBufferedAudioData(frames_requested - frames_written, 0.0f);
PopBufferedAudioData(frames_played);
+ back_timestamp_ += base::TimeDelta::FromMicroseconds(
+ frames_written * playback_rate * microseconds_per_frame_);
+
// Update cached values.
double scaled_frames = 0;
double scaled_frames_at_same_rate = 0;
bool found_silence = false;
- audio_data_buffered_ = false;
for (size_t i = 0; i < buffered_.size(); ++i) {
if (buffered_[i].playback_rate == 0) {
found_silence = true;
continue;
}
- audio_data_buffered_ = true;
-
// Any buffered silence breaks our contiguous stretch of audio data.
if (found_silence)
break;
@@ -80,15 +80,52 @@ void AudioClock::WroteAudio(int frames_written,
microseconds_per_frame_);
}
-base::TimeDelta AudioClock::CurrentMediaTimestampSinceWriting(
+base::TimeDelta AudioClock::TimestampSinceWriting(
base::TimeDelta time_since_writing) const {
int64_t frames_played_since_writing = std::min(
total_buffered_frames_,
static_cast<int64_t>(time_since_writing.InSecondsF() * sample_rate_));
- return current_media_timestamp_ +
+ return front_timestamp_ +
ComputeBufferedMediaTime(frames_played_since_writing);
}
+base::TimeDelta AudioClock::TimeUntilPlayback(base::TimeDelta timestamp) const {
+ DCHECK(timestamp >= front_timestamp_);
+ DCHECK(timestamp <= back_timestamp_);
+
+ int64_t frames_until_timestamp = 0;
+ double timestamp_us = timestamp.InMicroseconds();
+ double media_time_us = front_timestamp_.InMicroseconds();
+
+ for (size_t i = 0; i < buffered_.size(); ++i) {
+ // Leading silence is always accounted prior to anything else.
+ if (buffered_[i].playback_rate == 0) {
+ frames_until_timestamp += buffered_[i].frames;
+ continue;
+ }
+
+ // Calculate upper bound on media time for current block of buffered frames.
+ double delta_us = buffered_[i].frames * buffered_[i].playback_rate *
+ microseconds_per_frame_;
+ double max_media_time_us = media_time_us + delta_us;
+
+ // Determine amount of media time to convert to frames for current block. If
+ // target timestamp falls within current block, scale the amount of frames
+ // based on remaining amount of media time.
+ if (timestamp_us <= max_media_time_us) {
+ frames_until_timestamp +=
+ buffered_[i].frames * (timestamp_us - media_time_us) / delta_us;
+ break;
+ }
+
+ media_time_us = max_media_time_us;
+ frames_until_timestamp += buffered_[i].frames;
+ }
+
+ return base::TimeDelta::FromMicroseconds(frames_until_timestamp *
+ microseconds_per_frame_);
+}
+
AudioClock::AudioData::AudioData(int64_t frames, float playback_rate)
: frames(frames), playback_rate(playback_rate) {
}
diff --git a/media/filters/audio_clock.h b/media/filters/audio_clock.h
index 1a27dee000..6472f11319 100644
--- a/media/filters/audio_clock.h
+++ b/media/filters/audio_clock.h
@@ -16,6 +16,36 @@ namespace media {
// estimating the amount of delay in wall clock time. Takes changes in playback
// rate into account to handle scenarios where multiple rates may be present in
// a playback pipeline with large delay.
+//
+//
+// USAGE
+//
+// Prior to starting audio playback, construct an AudioClock with an initial
+// media timestamp and a sample rate matching the sample rate the audio device
+// was opened at.
+//
+// Each time the audio rendering callback is executed, call WroteAudio() once
+// (and only once!) containing information on what was written:
+// 1) How many frames of audio data requested
+// 2) How many frames of audio data provided
+// 3) The playback rate of the audio data provided
+// 4) The current amount of delay
+//
+// After a call to WroteAudio(), clients can inspect the resulting media
+// timestamp. This can be used for UI purposes, synchronizing video, etc...
+//
+//
+// DETAILS
+//
+// Silence (whether caused by the initial audio delay or failing to write the
+// amount of requested frames due to underflow) is also modeled and will cause
+// the media timestamp to stop increasing until all known silence has been
+// played. AudioClock's model is initialized with silence during the first call
+// to WroteAudio() using the delay value.
+//
+// Playback rates are tracked for translating frame durations into media
+// durations. Since silence doesn't affect media timestamps, it also isn't
+// affected by playback rates.
class MEDIA_EXPORT AudioClock {
public:
AudioClock(base::TimeDelta start_timestamp, int sample_rate);
@@ -29,17 +59,37 @@ class MEDIA_EXPORT AudioClock {
int delay_frames,
float playback_rate);
- // Calculates the current media timestamp taking silence and changes in
- // playback rate into account.
- base::TimeDelta current_media_timestamp() const {
- return current_media_timestamp_;
- }
+ // Returns the bounds of media data currently buffered by the audio hardware,
+ // taking silence and changes in playback rate into account. Buffered audio
+ // structure and timestamps are updated with every call to WroteAudio().
+ //
+ // start_timestamp = 1000 ms sample_rate = 40 Hz
+ // +-----------------------+-----------------------+-----------------------+
+ // | 10 frames silence | 20 frames @ 1.0x | 20 frames @ 0.5x |
+ // | = 250 ms (wall) | = 500 ms (wall) | = 500 ms (wall) |
+ // | = 0 ms (media) | = 500 ms (media) | = 250 ms (media) |
+ // +-----------------------+-----------------------+-----------------------+
+ // ^ ^
+ // front_timestamp() is equal to back_timestamp() is equal to
+ // |start_timestamp| since no amount of media frames tracked
+ // media data has been played yet. by AudioClock, which would be
+ // 1000 + 500 + 250 = 1750 ms.
+ base::TimeDelta front_timestamp() const { return front_timestamp_; }
+ base::TimeDelta back_timestamp() const { return back_timestamp_; }
// Clients can provide |time_since_writing| to simulate the passage of time
// since last writing audio to get a more accurate current media timestamp.
- base::TimeDelta CurrentMediaTimestampSinceWriting(
+ //
+ // The value will be bounded between front_timestamp() and back_timestamp().
+ base::TimeDelta TimestampSinceWriting(
base::TimeDelta time_since_writing) const;
+ // Returns the amount of wall time until |timestamp| will be played by the
+ // audio hardware.
+ //
+ // |timestamp| must be within front_timestamp() and back_timestamp().
+ base::TimeDelta TimeUntilPlayback(base::TimeDelta timestamp) const;
+
// Returns the amount of contiguous media time buffered at the head of the
// audio hardware buffer. Silence introduced into the audio hardware buffer is
// treated as a break in media time.
@@ -53,10 +103,6 @@ class MEDIA_EXPORT AudioClock {
return contiguous_audio_data_buffered_at_same_rate_;
}
- // Returns true if there is any audio data buffered by the audio hardware,
- // even if there is silence mixed in.
- bool audio_data_buffered() const { return audio_data_buffered_; }
-
private:
// Even with a ridiculously high sample rate of 256kHz, using 64 bits will
// permit tracking up to 416999965 days worth of time (that's 1141 millenia).
@@ -81,10 +127,10 @@ class MEDIA_EXPORT AudioClock {
std::deque<AudioData> buffered_;
int64_t total_buffered_frames_;
- base::TimeDelta current_media_timestamp_;
+ base::TimeDelta front_timestamp_;
+ base::TimeDelta back_timestamp_;
// Cached results of last call to WroteAudio().
- bool audio_data_buffered_;
base::TimeDelta contiguous_audio_data_buffered_;
base::TimeDelta contiguous_audio_data_buffered_at_same_rate_;
diff --git a/media/filters/audio_clock_unittest.cc b/media/filters/audio_clock_unittest.cc
index 5f69fd8efe..fa7248b46a 100644
--- a/media/filters/audio_clock_unittest.cc
+++ b/media/filters/audio_clock_unittest.cc
@@ -24,18 +24,24 @@ class AudioClockTest : public testing::Test {
frames_written, frames_requested, delay_frames, playback_rate);
}
- int CurrentMediaTimestampInDays() {
- return clock_.current_media_timestamp().InDays();
+ int FrontTimestampInDays() { return clock_.front_timestamp().InDays(); }
+
+ int FrontTimestampInMilliseconds() {
+ return clock_.front_timestamp().InMilliseconds();
}
- int CurrentMediaTimestampInMilliseconds() {
- return clock_.current_media_timestamp().InMilliseconds();
+ int BackTimestampInMilliseconds() {
+ return clock_.back_timestamp().InMilliseconds();
}
- int CurrentMediaTimestampSinceLastWritingInMilliseconds(int milliseconds) {
- return clock_.CurrentMediaTimestampSinceWriting(
- base::TimeDelta::FromMilliseconds(milliseconds))
- .InMilliseconds();
+ int TimestampSinceLastWritingInMilliseconds(int milliseconds) {
+ return clock_.TimestampSinceWriting(base::TimeDelta::FromMilliseconds(
+ milliseconds)).InMilliseconds();
+ }
+
+ int TimeUntilPlaybackInMilliseconds(int timestamp_ms) {
+ return clock_.TimeUntilPlayback(base::TimeDelta::FromMilliseconds(
+ timestamp_ms)).InMilliseconds();
}
int ContiguousAudioDataBufferedInDays() {
@@ -58,21 +64,26 @@ class AudioClockTest : public testing::Test {
DISALLOW_COPY_AND_ASSIGN(AudioClockTest);
};
-TEST_F(AudioClockTest, CurrentMediaTimestampStartsAtStartTimestamp) {
+TEST_F(AudioClockTest, FrontTimestampStartsAtStartTimestamp) {
+ base::TimeDelta expected = base::TimeDelta::FromSeconds(123);
+ AudioClock clock(expected, sample_rate_);
+
+ EXPECT_EQ(expected, clock.front_timestamp());
+}
+
+TEST_F(AudioClockTest, BackTimestampStartsAtStartTimestamp) {
base::TimeDelta expected = base::TimeDelta::FromSeconds(123);
AudioClock clock(expected, sample_rate_);
- EXPECT_EQ(expected, clock.current_media_timestamp());
+ EXPECT_EQ(expected, clock.back_timestamp());
}
-TEST_F(AudioClockTest,
- CurrentMediaTimestampSinceWritingStartsAtStartTimestamp) {
+TEST_F(AudioClockTest, TimestampSinceWritingStartsAtStartTimestamp) {
base::TimeDelta expected = base::TimeDelta::FromSeconds(123);
AudioClock clock(expected, sample_rate_);
base::TimeDelta time_since_writing = base::TimeDelta::FromSeconds(456);
- EXPECT_EQ(expected,
- clock.CurrentMediaTimestampSinceWriting(time_since_writing));
+ EXPECT_EQ(expected, clock.TimestampSinceWriting(time_since_writing));
}
TEST_F(AudioClockTest, ContiguousAudioDataBufferedStartsAtZero) {
@@ -84,38 +95,38 @@ TEST_F(AudioClockTest, ContiguousAudioDataBufferedAtSameRateStartsAtZero) {
clock_.contiguous_audio_data_buffered_at_same_rate());
}
-TEST_F(AudioClockTest, AudioDataBufferedStartsAtFalse) {
- EXPECT_FALSE(clock_.audio_data_buffered());
-}
-
TEST_F(AudioClockTest, Playback) {
// The first time we write data we should still expect our start timestamp
// due to delay.
WroteAudio(10, 10, 20, 1.0);
- EXPECT_EQ(0, CurrentMediaTimestampInMilliseconds());
+ EXPECT_EQ(0, FrontTimestampInMilliseconds());
+ EXPECT_EQ(1000, BackTimestampInMilliseconds());
EXPECT_EQ(0, ContiguousAudioDataBufferedInMilliseconds());
EXPECT_EQ(0, ContiguousAudioDataBufferedAtSameRateInMilliseconds());
- EXPECT_TRUE(clock_.audio_data_buffered());
// The media time should remain at start timestamp as we write data.
WroteAudio(10, 10, 20, 1.0);
- EXPECT_EQ(0, CurrentMediaTimestampInMilliseconds());
+ EXPECT_EQ(0, FrontTimestampInMilliseconds());
+ EXPECT_EQ(2000, BackTimestampInMilliseconds());
EXPECT_EQ(0, ContiguousAudioDataBufferedInMilliseconds());
EXPECT_EQ(0, ContiguousAudioDataBufferedAtSameRateInMilliseconds());
WroteAudio(10, 10, 20, 1.0);
- EXPECT_EQ(0, CurrentMediaTimestampInMilliseconds());
+ EXPECT_EQ(0, FrontTimestampInMilliseconds());
+ EXPECT_EQ(3000, BackTimestampInMilliseconds());
EXPECT_EQ(3000, ContiguousAudioDataBufferedInMilliseconds());
EXPECT_EQ(3000, ContiguousAudioDataBufferedAtSameRateInMilliseconds());
// The media time should now start advanced now that delay has been covered.
WroteAudio(10, 10, 20, 1.0);
- EXPECT_EQ(1000, CurrentMediaTimestampInMilliseconds());
+ EXPECT_EQ(1000, FrontTimestampInMilliseconds());
+ EXPECT_EQ(4000, BackTimestampInMilliseconds());
EXPECT_EQ(3000, ContiguousAudioDataBufferedInMilliseconds());
EXPECT_EQ(3000, ContiguousAudioDataBufferedAtSameRateInMilliseconds());
WroteAudio(10, 10, 20, 1.0);
- EXPECT_EQ(2000, CurrentMediaTimestampInMilliseconds());
+ EXPECT_EQ(2000, FrontTimestampInMilliseconds());
+ EXPECT_EQ(5000, BackTimestampInMilliseconds());
EXPECT_EQ(3000, ContiguousAudioDataBufferedInMilliseconds());
EXPECT_EQ(3000, ContiguousAudioDataBufferedAtSameRateInMilliseconds());
@@ -123,22 +134,26 @@ TEST_F(AudioClockTest, Playback) {
// - Current time will advance by one second until it hits rate change
// - Contiguous audio data will start shrinking immediately
WroteAudio(10, 10, 20, 0.5);
- EXPECT_EQ(3000, CurrentMediaTimestampInMilliseconds());
+ EXPECT_EQ(3000, FrontTimestampInMilliseconds());
+ EXPECT_EQ(5500, BackTimestampInMilliseconds());
EXPECT_EQ(2500, ContiguousAudioDataBufferedInMilliseconds());
EXPECT_EQ(2000, ContiguousAudioDataBufferedAtSameRateInMilliseconds());
WroteAudio(10, 10, 20, 0.5);
- EXPECT_EQ(4000, CurrentMediaTimestampInMilliseconds());
+ EXPECT_EQ(4000, FrontTimestampInMilliseconds());
+ EXPECT_EQ(6000, BackTimestampInMilliseconds());
EXPECT_EQ(2000, ContiguousAudioDataBufferedInMilliseconds());
EXPECT_EQ(1000, ContiguousAudioDataBufferedAtSameRateInMilliseconds());
WroteAudio(10, 10, 20, 0.5);
- EXPECT_EQ(5000, CurrentMediaTimestampInMilliseconds());
+ EXPECT_EQ(5000, FrontTimestampInMilliseconds());
+ EXPECT_EQ(6500, BackTimestampInMilliseconds());
EXPECT_EQ(1500, ContiguousAudioDataBufferedInMilliseconds());
EXPECT_EQ(1500, ContiguousAudioDataBufferedAtSameRateInMilliseconds());
WroteAudio(10, 10, 20, 0.5);
- EXPECT_EQ(5500, CurrentMediaTimestampInMilliseconds());
+ EXPECT_EQ(5500, FrontTimestampInMilliseconds());
+ EXPECT_EQ(7000, BackTimestampInMilliseconds());
EXPECT_EQ(1500, ContiguousAudioDataBufferedInMilliseconds());
EXPECT_EQ(1500, ContiguousAudioDataBufferedAtSameRateInMilliseconds());
@@ -146,22 +161,26 @@ TEST_F(AudioClockTest, Playback) {
// - Current time will advance by half a second until it hits rate change
// - Contiguous audio data will start growing immediately
WroteAudio(10, 10, 20, 2);
- EXPECT_EQ(6000, CurrentMediaTimestampInMilliseconds());
+ EXPECT_EQ(6000, FrontTimestampInMilliseconds());
+ EXPECT_EQ(9000, BackTimestampInMilliseconds());
EXPECT_EQ(3000, ContiguousAudioDataBufferedInMilliseconds());
EXPECT_EQ(1000, ContiguousAudioDataBufferedAtSameRateInMilliseconds());
WroteAudio(10, 10, 20, 2);
- EXPECT_EQ(6500, CurrentMediaTimestampInMilliseconds());
+ EXPECT_EQ(6500, FrontTimestampInMilliseconds());
+ EXPECT_EQ(11000, BackTimestampInMilliseconds());
EXPECT_EQ(4500, ContiguousAudioDataBufferedInMilliseconds());
EXPECT_EQ(500, ContiguousAudioDataBufferedAtSameRateInMilliseconds());
WroteAudio(10, 10, 20, 2);
- EXPECT_EQ(7000, CurrentMediaTimestampInMilliseconds());
+ EXPECT_EQ(7000, FrontTimestampInMilliseconds());
+ EXPECT_EQ(13000, BackTimestampInMilliseconds());
EXPECT_EQ(6000, ContiguousAudioDataBufferedInMilliseconds());
EXPECT_EQ(6000, ContiguousAudioDataBufferedAtSameRateInMilliseconds());
WroteAudio(10, 10, 20, 2);
- EXPECT_EQ(9000, CurrentMediaTimestampInMilliseconds());
+ EXPECT_EQ(9000, FrontTimestampInMilliseconds());
+ EXPECT_EQ(15000, BackTimestampInMilliseconds());
EXPECT_EQ(6000, ContiguousAudioDataBufferedInMilliseconds());
EXPECT_EQ(6000, ContiguousAudioDataBufferedAtSameRateInMilliseconds());
@@ -169,58 +188,64 @@ TEST_F(AudioClockTest, Playback) {
// - Current time will advance by half a second until it hits silence
// - Contiguous audio data will start shrinking towards zero
WroteAudio(0, 10, 20, 2);
- EXPECT_EQ(11000, CurrentMediaTimestampInMilliseconds());
+ EXPECT_EQ(11000, FrontTimestampInMilliseconds());
+ EXPECT_EQ(15000, BackTimestampInMilliseconds());
EXPECT_EQ(4000, ContiguousAudioDataBufferedInMilliseconds());
EXPECT_EQ(4000, ContiguousAudioDataBufferedAtSameRateInMilliseconds());
WroteAudio(0, 10, 20, 2);
- EXPECT_EQ(13000, CurrentMediaTimestampInMilliseconds());
+ EXPECT_EQ(13000, FrontTimestampInMilliseconds());
+ EXPECT_EQ(15000, BackTimestampInMilliseconds());
EXPECT_EQ(2000, ContiguousAudioDataBufferedInMilliseconds());
EXPECT_EQ(2000, ContiguousAudioDataBufferedAtSameRateInMilliseconds());
- EXPECT_TRUE(clock_.audio_data_buffered()); // Still audio data buffered.
WroteAudio(0, 10, 20, 2);
- EXPECT_EQ(15000, CurrentMediaTimestampInMilliseconds());
+ EXPECT_EQ(15000, FrontTimestampInMilliseconds());
+ EXPECT_EQ(15000, BackTimestampInMilliseconds());
EXPECT_EQ(0, ContiguousAudioDataBufferedInMilliseconds());
EXPECT_EQ(0, ContiguousAudioDataBufferedAtSameRateInMilliseconds());
- EXPECT_FALSE(clock_.audio_data_buffered()); // No more audio data buffered.
// At this point media time should stop increasing.
WroteAudio(0, 10, 20, 2);
- EXPECT_EQ(15000, CurrentMediaTimestampInMilliseconds());
+ EXPECT_EQ(15000, FrontTimestampInMilliseconds());
+ EXPECT_EQ(15000, BackTimestampInMilliseconds());
EXPECT_EQ(0, ContiguousAudioDataBufferedInMilliseconds());
EXPECT_EQ(0, ContiguousAudioDataBufferedAtSameRateInMilliseconds());
- EXPECT_FALSE(clock_.audio_data_buffered());
}
TEST_F(AudioClockTest, AlternatingAudioAndSilence) {
// Buffer #1: [0, 1000)
WroteAudio(10, 10, 20, 1.0);
- EXPECT_EQ(0, CurrentMediaTimestampInMilliseconds());
+ EXPECT_EQ(0, FrontTimestampInMilliseconds());
+ EXPECT_EQ(1000, BackTimestampInMilliseconds());
EXPECT_EQ(0, ContiguousAudioDataBufferedInMilliseconds());
// Buffer #2: 1000ms of silence
WroteAudio(0, 10, 20, 1.0);
- EXPECT_EQ(0, CurrentMediaTimestampInMilliseconds());
+ EXPECT_EQ(0, FrontTimestampInMilliseconds());
+ EXPECT_EQ(1000, BackTimestampInMilliseconds());
EXPECT_EQ(0, ContiguousAudioDataBufferedInMilliseconds());
// Buffer #3: [1000, 2000):
// - Buffer #1 is at front with 1000ms of contiguous audio data
WroteAudio(10, 10, 20, 1.0);
- EXPECT_EQ(0, CurrentMediaTimestampInMilliseconds());
+ EXPECT_EQ(0, FrontTimestampInMilliseconds());
+ EXPECT_EQ(2000, BackTimestampInMilliseconds());
EXPECT_EQ(1000, ContiguousAudioDataBufferedInMilliseconds());
// Buffer #4: 1000ms of silence
// - Buffer #1 has been played out
// - Buffer #2 of silence leaves us with 0ms of contiguous audio data
WroteAudio(0, 10, 20, 1.0);
- EXPECT_EQ(1000, CurrentMediaTimestampInMilliseconds());
+ EXPECT_EQ(1000, FrontTimestampInMilliseconds());
+ EXPECT_EQ(2000, BackTimestampInMilliseconds());
EXPECT_EQ(0, ContiguousAudioDataBufferedInMilliseconds());
// Buffer #5: [2000, 3000):
// - Buffer #3 is at front with 1000ms of contiguous audio data
WroteAudio(10, 10, 20, 1.0);
- EXPECT_EQ(1000, CurrentMediaTimestampInMilliseconds());
+ EXPECT_EQ(1000, FrontTimestampInMilliseconds());
+ EXPECT_EQ(3000, BackTimestampInMilliseconds());
EXPECT_EQ(1000, ContiguousAudioDataBufferedInMilliseconds());
}
@@ -228,65 +253,104 @@ TEST_F(AudioClockTest, ZeroDelay) {
// The first time we write data we should expect the first timestamp
// immediately.
WroteAudio(10, 10, 0, 1.0);
- EXPECT_EQ(0, CurrentMediaTimestampInMilliseconds());
+ EXPECT_EQ(0, FrontTimestampInMilliseconds());
+ EXPECT_EQ(1000, BackTimestampInMilliseconds());
EXPECT_EQ(1000, ContiguousAudioDataBufferedInMilliseconds());
// Ditto for all subsequent buffers.
WroteAudio(10, 10, 0, 1.0);
- EXPECT_EQ(1000, CurrentMediaTimestampInMilliseconds());
+ EXPECT_EQ(1000, FrontTimestampInMilliseconds());
+ EXPECT_EQ(2000, BackTimestampInMilliseconds());
EXPECT_EQ(1000, ContiguousAudioDataBufferedInMilliseconds());
WroteAudio(10, 10, 0, 1.0);
- EXPECT_EQ(2000, CurrentMediaTimestampInMilliseconds());
+ EXPECT_EQ(2000, FrontTimestampInMilliseconds());
+ EXPECT_EQ(3000, BackTimestampInMilliseconds());
EXPECT_EQ(1000, ContiguousAudioDataBufferedInMilliseconds());
// Ditto for silence.
WroteAudio(0, 10, 0, 1.0);
- EXPECT_EQ(3000, CurrentMediaTimestampInMilliseconds());
+ EXPECT_EQ(3000, FrontTimestampInMilliseconds());
+ EXPECT_EQ(3000, BackTimestampInMilliseconds());
EXPECT_EQ(0, ContiguousAudioDataBufferedInMilliseconds());
WroteAudio(0, 10, 0, 1.0);
- EXPECT_EQ(3000, CurrentMediaTimestampInMilliseconds());
+ EXPECT_EQ(3000, FrontTimestampInMilliseconds());
+ EXPECT_EQ(3000, BackTimestampInMilliseconds());
EXPECT_EQ(0, ContiguousAudioDataBufferedInMilliseconds());
}
-TEST_F(AudioClockTest, CurrentMediaTimestampSinceLastWriting) {
+TEST_F(AudioClockTest, TimestampSinceLastWriting) {
// Construct an audio clock with the following representation:
//
+ // |- existing delay -|------------ calls to WroteAudio() -----------------|
// +-------------------+----------------+------------------+----------------+
- // | 10 frames silence | 10 frames @ 1x | 10 frames @ 0.5x | 10 frames @ 2x |
+ // | 20 frames silence | 10 frames @ 1x | 10 frames @ 0.5x | 10 frames @ 2x |
// +-------------------+----------------+------------------+----------------+
// Media timestamp: 0 1000 1500 3500
// Wall clock time: 2000 3000 4000 5000
WroteAudio(10, 10, 40, 1.0);
WroteAudio(10, 10, 40, 0.5);
WroteAudio(10, 10, 40, 2.0);
- EXPECT_EQ(0, CurrentMediaTimestampInMilliseconds());
+ EXPECT_EQ(0, FrontTimestampInMilliseconds());
+ EXPECT_EQ(3500, BackTimestampInMilliseconds());
EXPECT_EQ(0, ContiguousAudioDataBufferedInMilliseconds());
// Simulate passing 2000ms of initial delay in the audio hardware.
- EXPECT_EQ(0, CurrentMediaTimestampSinceLastWritingInMilliseconds(0));
- EXPECT_EQ(0, CurrentMediaTimestampSinceLastWritingInMilliseconds(500));
- EXPECT_EQ(0, CurrentMediaTimestampSinceLastWritingInMilliseconds(1000));
- EXPECT_EQ(0, CurrentMediaTimestampSinceLastWritingInMilliseconds(1500));
- EXPECT_EQ(0, CurrentMediaTimestampSinceLastWritingInMilliseconds(2000));
+ EXPECT_EQ(0, TimestampSinceLastWritingInMilliseconds(0));
+ EXPECT_EQ(0, TimestampSinceLastWritingInMilliseconds(500));
+ EXPECT_EQ(0, TimestampSinceLastWritingInMilliseconds(1000));
+ EXPECT_EQ(0, TimestampSinceLastWritingInMilliseconds(1500));
+ EXPECT_EQ(0, TimestampSinceLastWritingInMilliseconds(2000));
// Now we should see the 1.0x buffer.
- EXPECT_EQ(500, CurrentMediaTimestampSinceLastWritingInMilliseconds(2500));
- EXPECT_EQ(1000, CurrentMediaTimestampSinceLastWritingInMilliseconds(3000));
+ EXPECT_EQ(500, TimestampSinceLastWritingInMilliseconds(2500));
+ EXPECT_EQ(1000, TimestampSinceLastWritingInMilliseconds(3000));
// Now we should see the 0.5x buffer.
- EXPECT_EQ(1250, CurrentMediaTimestampSinceLastWritingInMilliseconds(3500));
- EXPECT_EQ(1500, CurrentMediaTimestampSinceLastWritingInMilliseconds(4000));
+ EXPECT_EQ(1250, TimestampSinceLastWritingInMilliseconds(3500));
+ EXPECT_EQ(1500, TimestampSinceLastWritingInMilliseconds(4000));
// Now we should see the 2.0x buffer.
- EXPECT_EQ(2500, CurrentMediaTimestampSinceLastWritingInMilliseconds(4500));
- EXPECT_EQ(3500, CurrentMediaTimestampSinceLastWritingInMilliseconds(5000));
+ EXPECT_EQ(2500, TimestampSinceLastWritingInMilliseconds(4500));
+ EXPECT_EQ(3500, TimestampSinceLastWritingInMilliseconds(5000));
// Times beyond the known length of the audio clock should return the last
// media timestamp we know of.
- EXPECT_EQ(3500, CurrentMediaTimestampSinceLastWritingInMilliseconds(5001));
- EXPECT_EQ(3500, CurrentMediaTimestampSinceLastWritingInMilliseconds(6000));
+ EXPECT_EQ(3500, TimestampSinceLastWritingInMilliseconds(5001));
+ EXPECT_EQ(3500, TimestampSinceLastWritingInMilliseconds(6000));
+}
+
+TEST_F(AudioClockTest, TimeUntilPlayback) {
+ // Construct an audio clock with the following representation:
+ //
+ // existing
+ // |- delay -|------------------ calls to WroteAudio() ------------------|
+ // +------------+---------+------------+-----------+------------+-----------+
+ // | 20 silence | 10 @ 1x | 10 silence | 10 @ 0.5x | 10 silence | 10 @ 2.0x |
+ // +------------+---------+------------+-----------+------------+-----------+
+ // Media: 0 1000 1000 1500 1500 3500
+ // Wall: 2000 3000 4000 5000 6000 7000
+ WroteAudio(10, 10, 60, 1.0);
+ WroteAudio(0, 10, 60, 1.0);
+ WroteAudio(10, 10, 60, 0.5);
+ WroteAudio(0, 10, 60, 0.5);
+ WroteAudio(10, 10, 60, 2.0);
+ EXPECT_EQ(0, FrontTimestampInMilliseconds());
+ EXPECT_EQ(3500, BackTimestampInMilliseconds());
+ EXPECT_EQ(0, ContiguousAudioDataBufferedInMilliseconds());
+
+ // Media timestamp zero has to wait for silence to pass.
+ EXPECT_EQ(2000, TimeUntilPlaybackInMilliseconds(0));
+
+ // From then on out it's simply adding up the number of frames and taking
+ // silence into account.
+ EXPECT_EQ(2500, TimeUntilPlaybackInMilliseconds(500));
+ EXPECT_EQ(3000, TimeUntilPlaybackInMilliseconds(1000));
+ EXPECT_EQ(4500, TimeUntilPlaybackInMilliseconds(1250));
+ EXPECT_EQ(5000, TimeUntilPlaybackInMilliseconds(1500));
+ EXPECT_EQ(6500, TimeUntilPlaybackInMilliseconds(2500));
+ EXPECT_EQ(7000, TimeUntilPlaybackInMilliseconds(3500));
}
TEST_F(AudioClockTest, SupportsYearsWorthOfAudioData) {
@@ -298,25 +362,25 @@ TEST_F(AudioClockTest, SupportsYearsWorthOfAudioData) {
// Use zero delay to test calculation of current timestamp.
WroteAudio(huge_amount_of_frames, huge_amount_of_frames, 0, 1.0);
- EXPECT_EQ(0, CurrentMediaTimestampInDays());
+ EXPECT_EQ(0, FrontTimestampInDays());
EXPECT_EQ(2485, ContiguousAudioDataBufferedInDays());
WroteAudio(huge_amount_of_frames, huge_amount_of_frames, 0, 1.0);
- EXPECT_EQ(huge.InDays(), CurrentMediaTimestampInDays());
+ EXPECT_EQ(huge.InDays(), FrontTimestampInDays());
EXPECT_EQ(huge.InDays(), ContiguousAudioDataBufferedInDays());
WroteAudio(huge_amount_of_frames, huge_amount_of_frames, 0, 1.0);
- EXPECT_EQ((huge * 2).InDays(), CurrentMediaTimestampInDays());
+ EXPECT_EQ((huge * 2).InDays(), FrontTimestampInDays());
EXPECT_EQ(huge.InDays(), ContiguousAudioDataBufferedInDays());
WroteAudio(huge_amount_of_frames, huge_amount_of_frames, 0, 1.0);
- EXPECT_EQ((huge * 3).InDays(), CurrentMediaTimestampInDays());
+ EXPECT_EQ((huge * 3).InDays(), FrontTimestampInDays());
EXPECT_EQ(huge.InDays(), ContiguousAudioDataBufferedInDays());
// Use huge delay to test calculation of buffered data.
WroteAudio(
huge_amount_of_frames, huge_amount_of_frames, huge_amount_of_frames, 1.0);
- EXPECT_EQ((huge * 3).InDays(), CurrentMediaTimestampInDays());
+ EXPECT_EQ((huge * 3).InDays(), FrontTimestampInDays());
EXPECT_EQ((huge * 2).InDays(), ContiguousAudioDataBufferedInDays());
}
diff --git a/media/filters/audio_renderer_algorithm.cc b/media/filters/audio_renderer_algorithm.cc
index fb5cf7428c..b604b9ee27 100644
--- a/media/filters/audio_renderer_algorithm.cc
+++ b/media/filters/audio_renderer_algorithm.cc
@@ -210,10 +210,6 @@ void AudioRendererAlgorithm::FlushBuffers() {
capacity_ = kStartingBufferSizeInFrames;
}
-base::TimeDelta AudioRendererAlgorithm::GetTime() {
- return audio_buffer_.current_time();
-}
-
void AudioRendererAlgorithm::EnqueueBuffer(
const scoped_refptr<AudioBuffer>& buffer_in) {
DCHECK(!buffer_in->end_of_stream());
diff --git a/media/filters/audio_renderer_algorithm.h b/media/filters/audio_renderer_algorithm.h
index b36eb0807e..68b18a54bb 100644
--- a/media/filters/audio_renderer_algorithm.h
+++ b/media/filters/audio_renderer_algorithm.h
@@ -51,10 +51,6 @@ class MEDIA_EXPORT AudioRendererAlgorithm {
// Clears |audio_buffer_|.
void FlushBuffers();
- // Returns the time of the next byte in our data or kNoTimestamp() if current
- // time is unknown.
- base::TimeDelta GetTime();
-
// Enqueues a buffer. It is called from the owner of the algorithm after a
// read completes.
void EnqueueBuffer(const scoped_refptr<AudioBuffer>& buffer_in);
diff --git a/media/filters/audio_renderer_impl.cc b/media/filters/audio_renderer_impl.cc
index fbf4c163d3..5a536998d4 100644
--- a/media/filters/audio_renderer_impl.cc
+++ b/media/filters/audio_renderer_impl.cc
@@ -45,13 +45,15 @@ AudioRendererImpl::AudioRendererImpl(
media::AudioRendererSink* sink,
ScopedVector<AudioDecoder> decoders,
const SetDecryptorReadyCB& set_decryptor_ready_cb,
- AudioHardwareConfig* hardware_config)
+ const AudioHardwareConfig& hardware_config,
+ const scoped_refptr<MediaLog>& media_log)
: task_runner_(task_runner),
expecting_config_changes_(false),
sink_(sink),
audio_buffer_stream_(new AudioBufferStream(task_runner,
decoders.Pass(),
- set_decryptor_ready_cb)),
+ set_decryptor_ready_cb,
+ media_log)),
hardware_config_(hardware_config),
playback_rate_(0),
state_(kUninitialized),
@@ -61,7 +63,6 @@ AudioRendererImpl::AudioRendererImpl(
pending_read_(false),
received_end_of_stream_(false),
rendered_end_of_stream_(false),
- last_timestamp_update_(kNoTimestamp()),
weak_factory_(this) {
audio_buffer_stream_->set_splice_observer(base::Bind(
&AudioRendererImpl::OnNewSpliceBuffer, weak_factory_.GetWeakPtr()));
@@ -148,18 +149,30 @@ void AudioRendererImpl::SetMediaTime(base::TimeDelta time) {
DCHECK_EQ(state_, kFlushed);
start_timestamp_ = time;
+ ended_timestamp_ = kInfiniteDuration();
+ last_render_ticks_ = base::TimeTicks();
audio_clock_.reset(new AudioClock(time, audio_parameters_.sample_rate()));
}
base::TimeDelta AudioRendererImpl::CurrentMediaTime() {
- DVLOG(1) << __FUNCTION__;
- DCHECK(task_runner_->BelongsToCurrentThread());
+ DVLOG(2) << __FUNCTION__;
+
+ // In practice the Render() method is called with a high enough frequency
+ // that returning only the front timestamp is good enough and also prevents
+ // returning values that go backwards in time.
+ base::AutoLock auto_lock(lock_);
+ return audio_clock_->front_timestamp();
+}
+
+base::TimeDelta AudioRendererImpl::CurrentMediaTimeForSyncingVideo() {
+ DVLOG(2) << __FUNCTION__;
- // TODO(scherkus): Finish implementing when ready to switch Pipeline to using
- // TimeSource http://crbug.com/370634
- NOTIMPLEMENTED();
+ base::AutoLock auto_lock(lock_);
+ if (last_render_ticks_.is_null())
+ return audio_clock_->front_timestamp();
- return base::TimeDelta();
+ return audio_clock_->TimestampSinceWriting(base::TimeTicks::Now() -
+ last_render_ticks_);
}
TimeSource* AudioRendererImpl::GetTimeSource() {
@@ -203,10 +216,8 @@ void AudioRendererImpl::ResetDecoderDone() {
DCHECK_EQ(state_, kFlushed);
DCHECK(!flush_cb_.is_null());
- audio_clock_.reset();
received_end_of_stream_ = false;
rendered_end_of_stream_ = false;
- last_timestamp_update_ = kNoTimestamp();
// Flush() may have been called while underflowed/not fully buffered.
if (buffering_state_ != BUFFERING_HAVE_NOTHING)
@@ -240,7 +251,6 @@ void AudioRendererImpl::StartPlaying() {
void AudioRendererImpl::Initialize(DemuxerStream* stream,
const PipelineStatusCB& init_cb,
const StatisticsCB& statistics_cb,
- const TimeCB& time_cb,
const BufferingStateCB& buffering_state_cb,
const base::Closure& ended_cb,
const PipelineStatusCB& error_cb) {
@@ -249,17 +259,18 @@ void AudioRendererImpl::Initialize(DemuxerStream* stream,
DCHECK_EQ(stream->type(), DemuxerStream::AUDIO);
DCHECK(!init_cb.is_null());
DCHECK(!statistics_cb.is_null());
- DCHECK(!time_cb.is_null());
DCHECK(!buffering_state_cb.is_null());
DCHECK(!ended_cb.is_null());
DCHECK(!error_cb.is_null());
DCHECK_EQ(kUninitialized, state_);
- DCHECK(sink_);
+ DCHECK(sink_.get());
state_ = kInitializing;
- init_cb_ = init_cb;
- time_cb_ = time_cb;
+ // Always post |init_cb_| because |this| could be destroyed if initialization
+ // failed.
+ init_cb_ = BindToCurrentLoop(init_cb);
+
buffering_state_cb_ = buffering_state_cb;
ended_cb_ = ended_cb;
error_cb_ = error_cb;
@@ -274,14 +285,13 @@ void AudioRendererImpl::Initialize(DemuxerStream* stream,
stream->audio_decoder_config().channel_layout(),
ChannelLayoutToChannelCount(
stream->audio_decoder_config().channel_layout()),
- 0,
stream->audio_decoder_config().samples_per_second(),
stream->audio_decoder_config().bits_per_channel(),
buffer_size);
buffer_converter_.reset();
} else {
// TODO(rileya): Support hardware config changes
- const AudioParameters& hw_params = hardware_config_->GetOutputConfig();
+ const AudioParameters& hw_params = hardware_config_.GetOutputConfig();
audio_parameters_.Reset(
hw_params.format(),
// Always use the source's channel layout and channel count to avoid
@@ -291,10 +301,9 @@ void AudioRendererImpl::Initialize(DemuxerStream* stream,
stream->audio_decoder_config().channel_layout(),
ChannelLayoutToChannelCount(
stream->audio_decoder_config().channel_layout()),
- hw_params.input_channels(),
hw_params.sample_rate(),
hw_params.bits_per_sample(),
- hardware_config_->GetHighLatencyBufferSize());
+ hardware_config_.GetHighLatencyBufferSize());
}
audio_clock_.reset(
@@ -348,13 +357,12 @@ void AudioRendererImpl::OnAudioBufferStreamInitialized(bool success) {
}
DCHECK(!sink_playing_);
-
base::ResetAndReturn(&init_cb_).Run(PIPELINE_OK);
}
void AudioRendererImpl::SetVolume(float volume) {
DCHECK(task_runner_->BelongsToCurrentThread());
- DCHECK(sink_);
+ DCHECK(sink_.get());
sink_->SetVolume(volume);
}
@@ -508,7 +516,7 @@ void AudioRendererImpl::SetPlaybackRate(float playback_rate) {
DVLOG(1) << __FUNCTION__ << "(" << playback_rate << ")";
DCHECK(task_runner_->BelongsToCurrentThread());
DCHECK_GE(playback_rate, 0);
- DCHECK(sink_);
+ DCHECK(sink_.get());
base::AutoLock auto_lock(lock_);
@@ -535,7 +543,7 @@ void AudioRendererImpl::SetPlaybackRate(float playback_rate) {
bool AudioRendererImpl::IsBeforeStartTime(
const scoped_refptr<AudioBuffer>& buffer) {
DCHECK_EQ(state_, kPlaying);
- return buffer && !buffer->end_of_stream() &&
+ return buffer.get() && !buffer->end_of_stream() &&
(buffer->timestamp() + buffer->duration()) < start_timestamp_;
}
@@ -547,9 +555,9 @@ int AudioRendererImpl::Render(AudioBus* audio_bus,
const int delay_frames = static_cast<int>(playback_delay.InSecondsF() *
audio_parameters_.sample_rate());
int frames_written = 0;
- base::Closure time_cb;
{
base::AutoLock auto_lock(lock_);
+ last_render_ticks_ = base::TimeTicks::Now();
// Ensure Stop() hasn't destroyed our |algorithm_| on the pipeline thread.
if (!algorithm_) {
@@ -587,46 +595,46 @@ int AudioRendererImpl::Render(AudioBus* audio_bus,
frames_written =
algorithm_->FillBuffer(audio_bus, requested_frames, playback_rate_);
}
- audio_clock_->WroteAudio(
- frames_written, requested_frames, delay_frames, playback_rate_);
+ // Per the TimeSource API the media time should always increase even after
+ // we've rendered all known audio data. Doing so simplifies scenarios where
+ // we have other sources of media data that need to be scheduled after audio
+ // data has ended.
+ //
+ // That being said, we don't want to advance time when underflowed as we
+ // know more decoded frames will eventually arrive. If we did, we would
+ // throw things out of sync when said decoded frames arrive.
+ int frames_after_end_of_stream = 0;
if (frames_written == 0) {
- if (received_end_of_stream_ && !rendered_end_of_stream_ &&
- !audio_clock_->audio_data_buffered()) {
- rendered_end_of_stream_ = true;
- task_runner_->PostTask(FROM_HERE, ended_cb_);
- } else if (!received_end_of_stream_ && state_ == kPlaying) {
- if (buffering_state_ != BUFFERING_HAVE_NOTHING) {
- algorithm_->IncreaseQueueCapacity();
- SetBufferingState_Locked(BUFFERING_HAVE_NOTHING);
- }
+ if (received_end_of_stream_) {
+ if (ended_timestamp_ == kInfiniteDuration())
+ ended_timestamp_ = audio_clock_->back_timestamp();
+ frames_after_end_of_stream = requested_frames;
+ } else if (state_ == kPlaying &&
+ buffering_state_ != BUFFERING_HAVE_NOTHING) {
+ algorithm_->IncreaseQueueCapacity();
+ SetBufferingState_Locked(BUFFERING_HAVE_NOTHING);
}
}
+ audio_clock_->WroteAudio(frames_written + frames_after_end_of_stream,
+ requested_frames,
+ delay_frames,
+ playback_rate_);
+
if (CanRead_Locked()) {
task_runner_->PostTask(FROM_HERE,
base::Bind(&AudioRendererImpl::AttemptRead,
weak_factory_.GetWeakPtr()));
}
- // Firing |ended_cb_| means we no longer need to run |time_cb_|.
- if (!rendered_end_of_stream_ &&
- last_timestamp_update_ != audio_clock_->current_media_timestamp()) {
- // Since |max_time| uses linear interpolation, only provide an upper bound
- // that is for audio data at the same playback rate. Failing to do so can
- // make time jump backwards when the linear interpolated time advances
- // past buffered regions of audio at different rates.
- last_timestamp_update_ = audio_clock_->current_media_timestamp();
- base::TimeDelta max_time =
- last_timestamp_update_ +
- audio_clock_->contiguous_audio_data_buffered_at_same_rate();
- time_cb = base::Bind(time_cb_, last_timestamp_update_, max_time);
+ if (audio_clock_->front_timestamp() >= ended_timestamp_ &&
+ !rendered_end_of_stream_) {
+ rendered_end_of_stream_ = true;
+ task_runner_->PostTask(FROM_HERE, ended_cb_);
}
}
- if (!time_cb.is_null())
- task_runner_->PostTask(FROM_HERE, time_cb);
-
DCHECK_LE(frames_written, requested_frames);
return frames_written;
}
diff --git a/media/filters/audio_renderer_impl.h b/media/filters/audio_renderer_impl.h
index 99d6200cab..ac8c3adcda 100644
--- a/media/filters/audio_renderer_impl.h
+++ b/media/filters/audio_renderer_impl.h
@@ -29,6 +29,7 @@
#include "media/base/audio_renderer.h"
#include "media/base/audio_renderer_sink.h"
#include "media/base/decryptor.h"
+#include "media/base/media_log.h"
#include "media/base/time_source.h"
#include "media/filters/audio_renderer_algorithm.h"
#include "media/filters/decoder_stream.h"
@@ -64,7 +65,8 @@ class MEDIA_EXPORT AudioRendererImpl
AudioRendererSink* sink,
ScopedVector<AudioDecoder> decoders,
const SetDecryptorReadyCB& set_decryptor_ready_cb,
- AudioHardwareConfig* hardware_params);
+ const AudioHardwareConfig& hardware_params,
+ const scoped_refptr<MediaLog>& media_log);
virtual ~AudioRendererImpl();
// TimeSource implementation.
@@ -73,12 +75,12 @@ class MEDIA_EXPORT AudioRendererImpl
virtual void SetPlaybackRate(float rate) OVERRIDE;
virtual void SetMediaTime(base::TimeDelta time) OVERRIDE;
virtual base::TimeDelta CurrentMediaTime() OVERRIDE;
+ virtual base::TimeDelta CurrentMediaTimeForSyncingVideo() OVERRIDE;
// AudioRenderer implementation.
virtual void Initialize(DemuxerStream* stream,
const PipelineStatusCB& init_cb,
const StatisticsCB& statistics_cb,
- const TimeCB& time_cb,
const BufferingStateCB& buffering_state_cb,
const base::Closure& ended_cb,
const PipelineStatusCB& error_cb) OVERRIDE;
@@ -206,14 +208,13 @@ class MEDIA_EXPORT AudioRendererImpl
scoped_ptr<AudioBufferStream> audio_buffer_stream_;
// Interface to the hardware audio params.
- const AudioHardwareConfig* const hardware_config_;
+ const AudioHardwareConfig& hardware_config_;
// Cached copy of hardware params from |hardware_config_|.
AudioParameters audio_parameters_;
// Callbacks provided during Initialize().
PipelineStatusCB init_cb_;
- TimeCB time_cb_;
BufferingStateCB buffering_state_cb_;
base::Closure ended_cb_;
PipelineStatusCB error_cb_;
@@ -248,8 +249,17 @@ class MEDIA_EXPORT AudioRendererImpl
scoped_ptr<AudioClock> audio_clock_;
+ // The media timestamp to begin playback at after seeking. Set via
+ // SetMediaTime().
base::TimeDelta start_timestamp_;
- base::TimeDelta last_timestamp_update_;
+
+ // The media timestamp to signal end of audio playback. Determined during
+ // Render() when writing the final frames of decoded audio data.
+ base::TimeDelta ended_timestamp_;
+
+ // Set every Render() and used to provide an interpolated time value to
+ // CurrentMediaTimeForSyncingVideo().
+ base::TimeTicks last_render_ticks_;
// End variables which must be accessed under |lock_|. ----------------------
diff --git a/media/filters/audio_renderer_impl_unittest.cc b/media/filters/audio_renderer_impl_unittest.cc
index 98d5e7fa3d..288d27ccb6 100644
--- a/media/filters/audio_renderer_impl_unittest.cc
+++ b/media/filters/audio_renderer_impl_unittest.cc
@@ -61,8 +61,6 @@ class AudioRendererImplTest : public ::testing::Test {
: hardware_config_(AudioParameters(), AudioParameters()),
demuxer_stream_(DemuxerStream::AUDIO),
decoder_(new MockAudioDecoder()),
- last_time_update_(kNoTimestamp()),
- last_max_time_(kNoTimestamp()),
ended_(false) {
AudioDecoderConfig audio_config(kCodec,
kSampleFormat,
@@ -95,10 +93,11 @@ class AudioRendererImplTest : public ::testing::Test {
decoders.push_back(decoder_);
sink_ = new FakeAudioRendererSink();
renderer_.reset(new AudioRendererImpl(message_loop_.message_loop_proxy(),
- sink_,
+ sink_.get(),
decoders.Pass(),
SetDecryptorReadyCB(),
- &hardware_config_));
+ hardware_config_,
+ new MediaLog()));
}
virtual ~AudioRendererImplTest() {
@@ -115,20 +114,12 @@ class AudioRendererImplTest : public ::testing::Test {
MOCK_METHOD1(OnBufferingStateChange, void(BufferingState));
MOCK_METHOD1(OnError, void(PipelineStatus));
- void OnAudioTimeCallback(TimeDelta current_time, TimeDelta max_time) {
- CHECK(current_time <= max_time);
- last_time_update_ = current_time;
- last_max_time_ = max_time;
- }
-
void InitializeRenderer(const PipelineStatusCB& pipeline_status_cb) {
renderer_->Initialize(
&demuxer_stream_,
pipeline_status_cb,
base::Bind(&AudioRendererImplTest::OnStatistics,
base::Unretained(this)),
- base::Bind(&AudioRendererImplTest::OnAudioTimeCallback,
- base::Unretained(this)),
base::Bind(&AudioRendererImplTest::OnBufferingStateChange,
base::Unretained(this)),
base::Bind(&AudioRendererImplTest::OnEnded,
@@ -332,12 +323,10 @@ class AudioRendererImplTest : public ::testing::Test {
return renderer_->splicer_->HasNextBuffer();
}
- base::TimeDelta last_time_update() const {
- return last_time_update_;
+ base::TimeDelta CurrentMediaTime() {
+ return renderer_->CurrentMediaTime();
}
- base::TimeDelta last_max_time() const { return last_max_time_; }
-
bool ended() const { return ended_; }
// Fixture members.
@@ -379,7 +368,7 @@ class AudioRendererImplTest : public ::testing::Test {
void DeliverBuffer(AudioDecoder::Status status,
const scoped_refptr<AudioBuffer>& buffer) {
CHECK(!decode_cb_.is_null());
- if (buffer && !buffer->end_of_stream())
+ if (buffer.get() && !buffer->end_of_stream())
output_cb_.Run(buffer);
base::ResetAndReturn(&decode_cb_).Run(status);
@@ -407,8 +396,6 @@ class AudioRendererImplTest : public ::testing::Test {
base::Closure wait_for_pending_decode_cb_;
PipelineStatusCB init_decoder_cb_;
- base::TimeDelta last_time_update_;
- base::TimeDelta last_max_time_;
bool ended_;
DISALLOW_COPY_AND_ASSIGN(AudioRendererImplTest);
@@ -632,34 +619,27 @@ TEST_F(AudioRendererImplTest, TimeUpdatesOnFirstBuffer) {
StartTicking();
AudioTimestampHelper timestamp_helper(kOutputSamplesPerSecond);
- EXPECT_EQ(kNoTimestamp(), last_time_update());
- EXPECT_EQ(kNoTimestamp(), last_max_time());
+ timestamp_helper.SetBaseTimestamp(base::TimeDelta());
- // Preroll() should be buffered some data, consume half of it now.
+ // Time should be the starting timestamp as nothing's been consumed yet.
+ EXPECT_EQ(timestamp_helper.GetTimestamp(), CurrentMediaTime());
+
+ // Consume some audio data.
OutputFrames frames_to_consume(frames_buffered().value / 2);
EXPECT_TRUE(ConsumeBufferedData(frames_to_consume));
WaitForPendingRead();
- base::RunLoop().RunUntilIdle();
- // ConsumeBufferedData() uses an audio delay of zero, so ensure we received
- // a time update that's equal to |kFramesToConsume| from above.
- timestamp_helper.SetBaseTimestamp(base::TimeDelta());
- timestamp_helper.AddFrames(frames_to_consume.value);
- EXPECT_EQ(base::TimeDelta(), last_time_update());
- EXPECT_EQ(timestamp_helper.GetTimestamp(), last_max_time());
+ // Time shouldn't change just yet because we've only sent the initial audio
+ // data to the hardware.
+ EXPECT_EQ(timestamp_helper.GetTimestamp(), CurrentMediaTime());
- // The next time update should match the remaining frames_buffered(), but only
- // after running the message loop.
+ // Consume some more audio data.
frames_to_consume = frames_buffered();
EXPECT_TRUE(ConsumeBufferedData(frames_to_consume));
- EXPECT_EQ(base::TimeDelta(), last_time_update());
- EXPECT_EQ(timestamp_helper.GetTimestamp(), last_max_time());
- // Now the times should be updated.
- base::RunLoop().RunUntilIdle();
- EXPECT_EQ(timestamp_helper.GetTimestamp(), last_time_update());
+ // Now time should change now that the audio hardware has called back.
timestamp_helper.AddFrames(frames_to_consume.value);
- EXPECT_EQ(timestamp_helper.GetTimestamp(), last_max_time());
+ EXPECT_EQ(timestamp_helper.GetTimestamp(), CurrentMediaTime());
}
TEST_F(AudioRendererImplTest, ImmediateEndOfStream) {
diff --git a/media/filters/chunk_demuxer.cc b/media/filters/chunk_demuxer.cc
index ff4337d157..505d9c1f69 100644
--- a/media/filters/chunk_demuxer.cc
+++ b/media/filters/chunk_demuxer.cc
@@ -90,6 +90,8 @@ class SourceState {
typedef base::Callback<ChunkDemuxerStream*(
DemuxerStream::Type)> CreateDemuxerStreamCB;
+ typedef ChunkDemuxer::InitSegmentReceivedCB InitSegmentReceivedCB;
+
typedef base::Callback<void(
ChunkDemuxerStream*, const TextTrackConfig&)> NewTextTrackCB;
@@ -111,11 +113,14 @@ class SourceState {
// error occurred. |*timestamp_offset| is used and possibly updated by the
// append. |append_window_start| and |append_window_end| correspond to the MSE
// spec's similarly named source buffer attributes that are used in coded
- // frame processing.
- bool Append(const uint8* data, size_t length,
+ // frame processing. |init_segment_received_cb| is run for each new fully
+ // parsed initialization segment.
+ bool Append(const uint8* data,
+ size_t length,
TimeDelta append_window_start,
TimeDelta append_window_end,
- TimeDelta* timestamp_offset);
+ TimeDelta* timestamp_offset,
+ const InitSegmentReceivedCB& init_segment_received_cb);
// Aborts the current append sequence and resets the parser.
void Abort(TimeDelta append_window_start,
@@ -156,9 +161,10 @@ class SourceState {
void MarkEndOfStream();
void UnmarkEndOfStream();
void Shutdown();
- // Sets the memory limit on each stream. |memory_limit| is the
- // maximum number of bytes each stream is allowed to hold in its buffer.
- void SetMemoryLimitsForTesting(int memory_limit);
+ // Sets the memory limit on each stream of a specific type.
+ // |memory_limit| is the maximum number of bytes each stream of type |type|
+ // is allowed to hold in its buffer.
+ void SetMemoryLimits(DemuxerStream::Type type, int memory_limit);
bool IsSeekWaitingForData() const;
private:
@@ -231,6 +237,13 @@ class SourceState {
LogCB log_cb_;
StreamParser::InitCB init_cb_;
+ // During Append(), OnNewConfigs() will trigger the initialization segment
+ // received algorithm. This callback is only non-NULL during the lifetime of
+ // an Append() call. Note, the MSE spec explicitly disallows this algorithm
+ // during an Abort(), since Abort() is allowed only to emit coded frames, and
+ // only if the parser is PARSING_MEDIA_SEGMENT (not an INIT segment).
+ InitSegmentReceivedCB init_segment_received_cb_;
+
// Indicates that timestampOffset should be updated automatically during
// OnNewBuffers() based on the earliest end timestamp of the buffers provided.
// TODO(wolenetz): Refactor this function while integrating April 29, 2014
@@ -299,20 +312,27 @@ void SourceState::SetGroupStartTimestampIfInSequenceMode(
frame_processor_->SetGroupStartTimestampIfInSequenceMode(timestamp_offset);
}
-bool SourceState::Append(const uint8* data, size_t length,
- TimeDelta append_window_start,
- TimeDelta append_window_end,
- TimeDelta* timestamp_offset) {
+bool SourceState::Append(
+ const uint8* data,
+ size_t length,
+ TimeDelta append_window_start,
+ TimeDelta append_window_end,
+ TimeDelta* timestamp_offset,
+ const InitSegmentReceivedCB& init_segment_received_cb) {
DCHECK(timestamp_offset);
DCHECK(!timestamp_offset_during_append_);
+ DCHECK(!init_segment_received_cb.is_null());
+ DCHECK(init_segment_received_cb_.is_null());
append_window_start_during_append_ = append_window_start;
append_window_end_during_append_ = append_window_end;
timestamp_offset_during_append_ = timestamp_offset;
+ init_segment_received_cb_= init_segment_received_cb;
// TODO(wolenetz/acolwell): Curry and pass a NewBuffersCB here bound with
// append window and timestamp offset pointer. See http://crbug.com/351454.
bool err = stream_parser_->Parse(data, length);
timestamp_offset_during_append_ = NULL;
+ init_segment_received_cb_.Reset();
return err;
}
@@ -485,16 +505,26 @@ void SourceState::Shutdown() {
}
}
-void SourceState::SetMemoryLimitsForTesting(int memory_limit) {
- if (audio_)
- audio_->set_memory_limit_for_testing(memory_limit);
-
- if (video_)
- video_->set_memory_limit_for_testing(memory_limit);
-
- for (TextStreamMap::iterator itr = text_stream_map_.begin();
- itr != text_stream_map_.end(); ++itr) {
- itr->second->set_memory_limit_for_testing(memory_limit);
+void SourceState::SetMemoryLimits(DemuxerStream::Type type, int memory_limit) {
+ switch (type) {
+ case DemuxerStream::AUDIO:
+ if (audio_)
+ audio_->set_memory_limit(memory_limit);
+ break;
+ case DemuxerStream::VIDEO:
+ if (video_)
+ video_->set_memory_limit(memory_limit);
+ break;
+ case DemuxerStream::TEXT:
+ for (TextStreamMap::iterator itr = text_stream_map_.begin();
+ itr != text_stream_map_.end(); ++itr) {
+ itr->second->set_memory_limit(memory_limit);
+ }
+ break;
+ case DemuxerStream::UNKNOWN:
+ case DemuxerStream::NUM_TYPES:
+ NOTREACHED();
+ break;
}
}
@@ -523,6 +553,7 @@ bool SourceState::OnNewConfigs(
DVLOG(1) << "OnNewConfigs(" << allow_audio << ", " << allow_video
<< ", " << audio_config.IsValidConfig()
<< ", " << video_config.IsValidConfig() << ")";
+ DCHECK(!init_segment_received_cb_.is_null());
if (!audio_config.IsValidConfig() && !video_config.IsValidConfig()) {
DVLOG(1) << "OnNewConfigs() : Audio & video config are not valid!";
@@ -665,6 +696,9 @@ bool SourceState::OnNewConfigs(
frame_processor_->SetAllTrackBuffersNeedRandomAccessPoint();
DVLOG(1) << "OnNewConfigs() : " << (success ? "success" : "failed");
+ if (success)
+ init_segment_received_cb_.Run();
+
return success;
}
@@ -933,8 +967,6 @@ void ChunkDemuxerStream::Read(const ReadCB& read_cb) {
DemuxerStream::Type ChunkDemuxerStream::type() { return type_; }
-void ChunkDemuxerStream::EnableBitstreamConverter() {}
-
AudioDecoderConfig ChunkDemuxerStream::audio_decoder_config() {
CHECK_EQ(type_, AUDIO);
base::AutoLock auto_lock(lock_);
@@ -1057,10 +1089,9 @@ void ChunkDemuxer::Initialize(
base::ResetAndReturn(&open_cb_).Run();
}
-void ChunkDemuxer::Stop(const base::Closure& callback) {
+void ChunkDemuxer::Stop() {
DVLOG(1) << "Stop()";
Shutdown();
- callback.Run();
}
void ChunkDemuxer::Seek(TimeDelta time, const PipelineStatusCB& cb) {
@@ -1094,6 +1125,10 @@ void ChunkDemuxer::Seek(TimeDelta time, const PipelineStatusCB& cb) {
}
// Demuxer implementation.
+base::Time ChunkDemuxer::GetTimelineOffset() const {
+ return timeline_offset_;
+}
+
DemuxerStream* ChunkDemuxer::GetStream(DemuxerStream::Type type) {
DCHECK_NE(type, DemuxerStream::TEXT);
base::AutoLock auto_lock(lock_);
@@ -1106,8 +1141,8 @@ DemuxerStream* ChunkDemuxer::GetStream(DemuxerStream::Type type) {
return NULL;
}
-base::Time ChunkDemuxer::GetTimelineOffset() const {
- return timeline_offset_;
+TimeDelta ChunkDemuxer::GetStartTime() const {
+ return TimeDelta();
}
Demuxer::Liveness ChunkDemuxer::GetLiveness() const {
@@ -1230,15 +1265,19 @@ Ranges<TimeDelta> ChunkDemuxer::GetBufferedRanges(const std::string& id) const {
return itr->second->GetBufferedRanges(duration_, state_ == ENDED);
}
-void ChunkDemuxer::AppendData(const std::string& id,
- const uint8* data, size_t length,
- TimeDelta append_window_start,
- TimeDelta append_window_end,
- TimeDelta* timestamp_offset) {
+void ChunkDemuxer::AppendData(
+ const std::string& id,
+ const uint8* data,
+ size_t length,
+ TimeDelta append_window_start,
+ TimeDelta append_window_end,
+ TimeDelta* timestamp_offset,
+ const InitSegmentReceivedCB& init_segment_received_cb) {
DVLOG(1) << "AppendData(" << id << ", " << length << ")";
DCHECK(!id.empty());
DCHECK(timestamp_offset);
+ DCHECK(!init_segment_received_cb.is_null());
Ranges<TimeDelta> ranges;
@@ -1261,7 +1300,8 @@ void ChunkDemuxer::AppendData(const std::string& id,
if (!source_state_map_[id]->Append(data, length,
append_window_start,
append_window_end,
- timestamp_offset)) {
+ timestamp_offset,
+ init_segment_received_cb)) {
ReportError_Locked(DEMUXER_ERROR_COULD_NOT_OPEN);
return;
}
@@ -1272,7 +1312,8 @@ void ChunkDemuxer::AppendData(const std::string& id,
if (!source_state_map_[id]->Append(data, length,
append_window_start,
append_window_end,
- timestamp_offset)) {
+ timestamp_offset,
+ init_segment_received_cb)) {
ReportError_Locked(PIPELINE_ERROR_DECODE);
return;
}
@@ -1503,10 +1544,10 @@ void ChunkDemuxer::Shutdown() {
base::ResetAndReturn(&seek_cb_).Run(PIPELINE_ERROR_ABORT);
}
-void ChunkDemuxer::SetMemoryLimitsForTesting(int memory_limit) {
+void ChunkDemuxer::SetMemoryLimits(DemuxerStream::Type type, int memory_limit) {
for (SourceStateMap::iterator itr = source_state_map_.begin();
itr != source_state_map_.end(); ++itr) {
- itr->second->SetMemoryLimitsForTesting(memory_limit);
+ itr->second->SetMemoryLimits(type, memory_limit);
}
}
@@ -1605,7 +1646,7 @@ void ChunkDemuxer::OnSourceInitDone(
return;
}
- SeekAllSources(base::TimeDelta());
+ SeekAllSources(GetStartTime());
StartReturningData();
if (duration_ == kNoTimestamp())
diff --git a/media/filters/chunk_demuxer.h b/media/filters/chunk_demuxer.h
index 476780bff2..4837dff49f 100644
--- a/media/filters/chunk_demuxer.h
+++ b/media/filters/chunk_demuxer.h
@@ -82,7 +82,6 @@ class MEDIA_EXPORT ChunkDemuxerStream : public DemuxerStream {
// DemuxerStream methods.
virtual void Read(const ReadCB& read_cb) OVERRIDE;
virtual Type type() OVERRIDE;
- virtual void EnableBitstreamConverter() OVERRIDE;
virtual AudioDecoderConfig audio_decoder_config() OVERRIDE;
virtual VideoDecoderConfig video_decoder_config() OVERRIDE;
virtual bool SupportsConfigChanges() OVERRIDE;
@@ -93,8 +92,8 @@ class MEDIA_EXPORT ChunkDemuxerStream : public DemuxerStream {
TextTrackConfig text_track_config();
// Sets the memory limit, in bytes, on the SourceBufferStream.
- void set_memory_limit_for_testing(int memory_limit) {
- stream_->set_memory_limit_for_testing(memory_limit);
+ void set_memory_limit(int memory_limit) {
+ stream_->set_memory_limit(memory_limit);
}
bool supports_partial_append_window_trimming() const {
@@ -138,6 +137,8 @@ class MEDIA_EXPORT ChunkDemuxer : public Demuxer {
kReachedIdLimit, // Reached ID limit. We can't handle any more IDs.
};
+ typedef base::Closure InitSegmentReceivedCB;
+
// |open_cb| Run when Initialize() is called to signal that the demuxer
// is ready to receive media data via AppenData().
// |need_key_cb| Run when the demuxer determines that an encryption key is
@@ -159,10 +160,11 @@ class MEDIA_EXPORT ChunkDemuxer : public Demuxer {
virtual void Initialize(DemuxerHost* host,
const PipelineStatusCB& cb,
bool enable_text_tracks) OVERRIDE;
- virtual void Stop(const base::Closure& callback) OVERRIDE;
+ virtual void Stop() OVERRIDE;
virtual void Seek(base::TimeDelta time, const PipelineStatusCB& cb) OVERRIDE;
- virtual DemuxerStream* GetStream(DemuxerStream::Type type) OVERRIDE;
virtual base::Time GetTimelineOffset() const OVERRIDE;
+ virtual DemuxerStream* GetStream(DemuxerStream::Type type) OVERRIDE;
+ virtual base::TimeDelta GetStartTime() const OVERRIDE;
virtual Liveness GetLiveness() const OVERRIDE;
// Methods used by an external object to control this demuxer.
@@ -212,10 +214,13 @@ class MEDIA_EXPORT ChunkDemuxer : public Demuxer {
// |append_window_start| and |append_window_end| correspond to the MSE spec's
// similarly named source buffer attributes that are used in coded frame
// processing.
+ // |init_segment_received_cb| is run for each newly successfully parsed
+ // initialization segment.
void AppendData(const std::string& id, const uint8* data, size_t length,
base::TimeDelta append_window_start,
base::TimeDelta append_window_end,
- base::TimeDelta* timestamp_offset);
+ base::TimeDelta* timestamp_offset,
+ const InitSegmentReceivedCB& init_segment_received_cb);
// Aborts parsing the current segment and reset the parser to a state where
// it can accept a new segment.
@@ -263,9 +268,10 @@ class MEDIA_EXPORT ChunkDemuxer : public Demuxer {
void Shutdown();
- // Sets the memory limit on each stream. |memory_limit| is the
- // maximum number of bytes each stream is allowed to hold in its buffer.
- void SetMemoryLimitsForTesting(int memory_limit);
+ // Sets the memory limit on each stream of a specific type.
+ // |memory_limit| is the maximum number of bytes each stream of type |type|
+ // is allowed to hold in its buffer.
+ void SetMemoryLimits(DemuxerStream::Type type, int memory_limit);
// Returns the ranges representing the buffered data in the demuxer.
// TODO(wolenetz): Remove this method once MediaSourceDelegate no longer
diff --git a/media/filters/chunk_demuxer_unittest.cc b/media/filters/chunk_demuxer_unittest.cc
index 4ac5616b72..6271323cbb 100644
--- a/media/filters/chunk_demuxer_unittest.cc
+++ b/media/filters/chunk_demuxer_unittest.cc
@@ -162,6 +162,9 @@ class ChunkDemuxerTest : public ::testing::Test {
ChunkDemuxerTest()
: append_window_end_for_next_append_(kInfiniteDuration()) {
+ init_segment_received_cb_ =
+ base::Bind(&ChunkDemuxerTest::InitSegmentReceived,
+ base::Unretained(this));
CreateNewDemuxer();
}
@@ -573,7 +576,8 @@ class ChunkDemuxerTest : public ::testing::Test {
demuxer_->AppendData(source_id, data, length,
append_window_start_for_next_append_,
append_window_end_for_next_append_,
- &timestamp_offset_map_[source_id]);
+ &timestamp_offset_map_[source_id],
+ init_segment_received_cb_);
}
void AppendDataInPieces(const uint8* data, size_t length) {
@@ -664,6 +668,14 @@ class ChunkDemuxerTest : public ::testing::Test {
expected_duration = kDefaultDuration();
EXPECT_CALL(*this, DemuxerOpened());
+
+ // Adding expectation prior to CreateInitDoneCB() here because InSequence
+ // tests require init segment received before duration set. Also, only
+ // expect an init segment received callback if there is actually a track in
+ // it.
+ if (stream_flags != 0)
+ EXPECT_CALL(*this, InitSegmentReceived());
+
demuxer_->Initialize(
&host_, CreateInitDoneCB(expected_duration, expected_status), true);
@@ -696,7 +708,9 @@ class ChunkDemuxerTest : public ::testing::Test {
video_flags |= HAS_TEXT;
}
+ EXPECT_CALL(*this, InitSegmentReceived());
AppendInitSegmentWithSourceId(audio_id, audio_flags);
+ EXPECT_CALL(*this, InitSegmentReceived());
AppendInitSegmentWithSourceId(video_id, video_flags);
return true;
}
@@ -729,6 +743,9 @@ class ChunkDemuxerTest : public ::testing::Test {
EXPECT_CALL(*this, DemuxerOpened());
+ // Adding expectation prior to CreateInitDoneCB() here because InSequence
+ // tests require init segment received before duration set.
+ EXPECT_CALL(*this, InitSegmentReceived());
demuxer_->Initialize(
&host_, CreateInitDoneCB(base::TimeDelta::FromMilliseconds(2744),
PIPELINE_OK), true);
@@ -755,6 +772,7 @@ class ChunkDemuxerTest : public ::testing::Test {
// media/test/data/bear-320x240-manifest.js which were
// generated from media/test/data/bear-640x360.webm and
// media/test/data/bear-320x240.webm respectively.
+ EXPECT_CALL(*this, InitSegmentReceived());
AppendData(bear2->data(), 4340);
// Append a media segment that goes from [0.527000, 1.014000).
@@ -763,6 +781,7 @@ class ChunkDemuxerTest : public ::testing::Test {
// Append initialization segment for bear1 & fill gap with [779-1197)
// segment.
+ EXPECT_CALL(*this, InitSegmentReceived());
AppendData(bear1->data(), 4370);
AppendData(bear1->data() + 72737, 28183);
CheckExpectedRanges(kSourceId, "{ [0,2736) }");
@@ -962,8 +981,19 @@ class ChunkDemuxerTest : public ::testing::Test {
void CheckExpectedRanges(const std::string& id,
const std::string& expected) {
- Ranges<base::TimeDelta> r = demuxer_->GetBufferedRanges(id);
+ CheckExpectedRanges(demuxer_->GetBufferedRanges(id), expected);
+ }
+
+ void CheckExpectedRanges(DemuxerStream::Type type,
+ const std::string& expected) {
+ ChunkDemuxerStream* stream =
+ static_cast<ChunkDemuxerStream*>(demuxer_->GetStream(type));
+ CheckExpectedRanges(stream->GetBufferedRanges(kDefaultDuration()),
+ expected);
+ }
+ void CheckExpectedRanges(const Ranges<base::TimeDelta>& r,
+ const std::string& expected) {
std::stringstream ss;
ss << "{ ";
for (size_t i = 0; i < r.size(); ++i) {
@@ -1087,6 +1117,7 @@ class ChunkDemuxerTest : public ::testing::Test {
// Read a WebM file into memory and send the data to the demuxer.
scoped_refptr<DecoderBuffer> buffer = ReadTestDataFile(filename);
+ EXPECT_CALL(*this, InitSegmentReceived());
AppendDataInPieces(buffer->data(), buffer->data_size(), 512);
// Verify that the timestamps on the first few packets match what we
@@ -1131,6 +1162,8 @@ class ChunkDemuxerTest : public ::testing::Test {
NeedKeyMock(type, init_data_ptr, init_data.size());
}
+ MOCK_METHOD0(InitSegmentReceived, void(void));
+
void Seek(base::TimeDelta seek_time) {
demuxer_->StartWaitingForSeek(seek_time);
demuxer_->Seek(seek_time, NewExpectedStatusCB(PIPELINE_OK));
@@ -1155,6 +1188,7 @@ class ChunkDemuxerTest : public ::testing::Test {
MockDemuxerHost host_;
scoped_ptr<ChunkDemuxer> demuxer_;
+ ChunkDemuxer::InitSegmentReceivedCB init_segment_received_cb_;
base::TimeDelta append_window_start_for_next_append_;
base::TimeDelta append_window_end_for_next_append_;
@@ -1337,10 +1371,12 @@ TEST_F(ChunkDemuxerTest, SingleTextTrackIdChange) {
CreateInitSegmentWithAlternateTextTrackNum(HAS_TEXT | HAS_AUDIO | HAS_VIDEO,
false, false,
&info_tracks, &info_tracks_size);
+ EXPECT_CALL(*this, InitSegmentReceived());
demuxer_->AppendData(kSourceId, info_tracks.get(), info_tracks_size,
append_window_start_for_next_append_,
append_window_end_for_next_append_,
- &timestamp_offset_map_[kSourceId]);
+ &timestamp_offset_map_[kSourceId],
+ init_segment_received_cb_);
AppendMuxedCluster(
MuxedStreamInfo(kAudioTrackNum, "46K 69K"),
@@ -1378,6 +1414,7 @@ TEST_F(ChunkDemuxerTest, InitSegmentSetsNeedRandomAccessPointFlag) {
MuxedStreamInfo(kTextTrackNum, "25K 40K"));
CheckExpectedRanges(kSourceId, "{ [23,46) }");
+ EXPECT_CALL(*this, InitSegmentReceived());
AppendInitSegment(HAS_TEXT | HAS_AUDIO | HAS_VIDEO);
AppendMuxedCluster(
MuxedStreamInfo(kAudioTrackNum, "46K 69K"),
@@ -1401,6 +1438,7 @@ TEST_F(ChunkDemuxerTest, Shutdown_BeforeAllInitSegmentsAppended) {
EXPECT_EQ(AddId("audio", HAS_AUDIO), ChunkDemuxer::kOk);
EXPECT_EQ(AddId("video", HAS_VIDEO), ChunkDemuxer::kOk);
+ EXPECT_CALL(*this, InitSegmentReceived());
AppendInitSegmentWithSourceId("audio", HAS_AUDIO);
ShutdownDemuxer();
@@ -1418,6 +1456,7 @@ TEST_F(ChunkDemuxerTest, Shutdown_BeforeAllInitSegmentsAppendedText) {
EXPECT_CALL(host_, AddTextStream(_, _))
.Times(Exactly(1));
+ EXPECT_CALL(*this, InitSegmentReceived());
AppendInitSegmentWithSourceId("video_and_text", HAS_VIDEO | HAS_TEXT);
ShutdownDemuxer();
@@ -1526,7 +1565,8 @@ TEST_F(ChunkDemuxerTest, AppendDataBeforeInit) {
demuxer_->AppendData(kSourceId, info_tracks.get(), info_tracks_size,
append_window_start_for_next_append_,
append_window_end_for_next_append_,
- &timestamp_offset_map_[kSourceId]);
+ &timestamp_offset_map_[kSourceId],
+ init_segment_received_cb_);
}
// Make sure Read() callbacks are dispatched with the proper data.
@@ -1562,7 +1602,8 @@ TEST_F(ChunkDemuxerTest, OutOfOrderClusters) {
demuxer_->AppendData(kSourceId, cluster_c->data(), cluster_c->size(),
append_window_start_for_next_append_,
append_window_end_for_next_append_,
- &timestamp_offset_map_[kSourceId]);
+ &timestamp_offset_map_[kSourceId],
+ init_segment_received_cb_);
}
TEST_F(ChunkDemuxerTest, NonMonotonicButAboveClusterTimecode) {
@@ -1587,7 +1628,8 @@ TEST_F(ChunkDemuxerTest, NonMonotonicButAboveClusterTimecode) {
demuxer_->AppendData(kSourceId, cluster_b->data(), cluster_b->size(),
append_window_start_for_next_append_,
append_window_end_for_next_append_,
- &timestamp_offset_map_[kSourceId]);
+ &timestamp_offset_map_[kSourceId],
+ init_segment_received_cb_);
}
TEST_F(ChunkDemuxerTest, BackwardsAndBeforeClusterTimecode) {
@@ -1612,7 +1654,8 @@ TEST_F(ChunkDemuxerTest, BackwardsAndBeforeClusterTimecode) {
demuxer_->AppendData(kSourceId, cluster_b->data(), cluster_b->size(),
append_window_start_for_next_append_,
append_window_end_for_next_append_,
- &timestamp_offset_map_[kSourceId]);
+ &timestamp_offset_map_[kSourceId],
+ init_segment_received_cb_);
}
@@ -1923,6 +1966,7 @@ TEST_F(ChunkDemuxerTest, AppendingInPieces) {
memcpy(dst, cluster_b->data(), cluster_b->size());
dst += cluster_b->size();
+ EXPECT_CALL(*this, InitSegmentReceived());
AppendDataInPieces(buffer.get(), buffer_size);
GenerateExpectedReads(0, 9);
@@ -2081,7 +2125,8 @@ TEST_F(ChunkDemuxerTest, ParseErrorDuringInit) {
demuxer_->AppendData(kSourceId, &tmp, 1,
append_window_start_for_next_append_,
append_window_end_for_next_append_,
- &timestamp_offset_map_[kSourceId]);
+ &timestamp_offset_map_[kSourceId],
+ init_segment_received_cb_);
}
TEST_F(ChunkDemuxerTest, AVHeadersWithAudioOnlyType) {
@@ -2118,6 +2163,7 @@ TEST_F(ChunkDemuxerTest, MultipleHeaders) {
AppendCluster(kDefaultFirstCluster());
// Append another identical initialization segment.
+ EXPECT_CALL(*this, InitSegmentReceived());
AppendInitSegment(HAS_AUDIO | HAS_VIDEO);
AppendCluster(kDefaultSecondCluster());
@@ -2172,6 +2218,7 @@ TEST_F(ChunkDemuxerTest, AddIdFailures) {
// Adding an id with audio/video should fail because we already added audio.
ASSERT_EQ(AddId(), ChunkDemuxer::kReachedIdLimit);
+ EXPECT_CALL(*this, InitSegmentReceived());
AppendInitSegmentWithSourceId(audio_id, HAS_AUDIO);
// Adding an id after append should fail.
@@ -2404,6 +2451,7 @@ TEST_F(ChunkDemuxerTest, GetBufferedRanges_AudioIdOnly) {
&host_, CreateInitDoneCB(kDefaultDuration(), PIPELINE_OK), true);
ASSERT_EQ(AddId(kSourceId, HAS_AUDIO), ChunkDemuxer::kOk);
+ EXPECT_CALL(*this, InitSegmentReceived());
AppendInitSegment(HAS_AUDIO);
// Test a simple cluster.
@@ -2426,6 +2474,7 @@ TEST_F(ChunkDemuxerTest, GetBufferedRanges_VideoIdOnly) {
&host_, CreateInitDoneCB(kDefaultDuration(), PIPELINE_OK), true);
ASSERT_EQ(AddId(kSourceId, HAS_VIDEO), ChunkDemuxer::kOk);
+ EXPECT_CALL(*this, InitSegmentReceived());
AppendInitSegment(HAS_VIDEO);
// Test a simple cluster.
@@ -3004,6 +3053,7 @@ TEST_F(ChunkDemuxerTest, EmitBuffersDuringAbort) {
// PTS: 353788 (0x000565fc) [= 90 kHz-Timestamp: 0:00:03.9309]
scoped_refptr<DecoderBuffer> buffer = ReadTestDataFile("bear-1280x720.ts");
+ EXPECT_CALL(*this, InitSegmentReceived());
AppendData(kSourceId, buffer->data(), buffer->data_size());
// Confirm we're in the middle of parsing a media segment.
@@ -3047,6 +3097,7 @@ TEST_F(ChunkDemuxerTest, SeekCompleteDuringAbort) {
// PTS: 353788 (0x000565fc) [= 90 kHz-Timestamp: 0:00:03.9309]
scoped_refptr<DecoderBuffer> buffer = ReadTestDataFile("bear-1280x720.ts");
+ EXPECT_CALL(*this, InitSegmentReceived());
AppendData(kSourceId, buffer->data(), buffer->data_size());
// Confirm we're in the middle of parsing a media segment.
@@ -3269,10 +3320,40 @@ TEST_F(ChunkDemuxerTest, CanceledSeekDuringInitialPreroll) {
AppendCluster(seek_time.InMilliseconds(), 10);
}
+TEST_F(ChunkDemuxerTest, SetMemoryLimitType) {
+ ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
+
+ // Set different memory limits for audio and video.
+ demuxer_->SetMemoryLimits(DemuxerStream::AUDIO, 10 * kBlockSize);
+ demuxer_->SetMemoryLimits(DemuxerStream::VIDEO, 5 * kBlockSize);
+
+ base::TimeDelta seek_time = base::TimeDelta::FromMilliseconds(1000);
+
+ // Append data at the start that can be garbage collected:
+ AppendSingleStreamCluster(kSourceId, kAudioTrackNum, 0, 10);
+ AppendSingleStreamCluster(kSourceId, kVideoTrackNum, 0, 5);
+
+ CheckExpectedRanges(DemuxerStream::AUDIO, "{ [0,230) }");
+ CheckExpectedRanges(DemuxerStream::VIDEO, "{ [0,165) }");
+
+ // Seek so we can garbage collect the data appended above.
+ Seek(seek_time);
+
+ // Append data at seek_time.
+ AppendSingleStreamCluster(kSourceId, kAudioTrackNum,
+ seek_time.InMilliseconds(), 10);
+ AppendSingleStreamCluster(kSourceId, kVideoTrackNum,
+ seek_time.InMilliseconds(), 5);
+
+ // Verify that the old data, and nothing more, has been garbage collected.
+ CheckExpectedRanges(DemuxerStream::AUDIO, "{ [1000,1230) }");
+ CheckExpectedRanges(DemuxerStream::VIDEO, "{ [1000,1165) }");
+}
+
TEST_F(ChunkDemuxerTest, GCDuringSeek) {
ASSERT_TRUE(InitDemuxer(HAS_AUDIO));
- demuxer_->SetMemoryLimitsForTesting(5 * kBlockSize);
+ demuxer_->SetMemoryLimits(DemuxerStream::AUDIO, 5 * kBlockSize);
base::TimeDelta seek_time1 = base::TimeDelta::FromMilliseconds(1000);
base::TimeDelta seek_time2 = base::TimeDelta::FromMilliseconds(500);
@@ -3406,6 +3487,7 @@ TEST_F(ChunkDemuxerTest, AppendWindow_WebMFile_AudioOnly) {
// partial append window trim must come from a previous Append() call.
scoped_refptr<DecoderBuffer> buffer =
ReadTestDataFile("bear-320x240-audio-only.webm");
+ EXPECT_CALL(*this, InitSegmentReceived());
AppendDataInPieces(buffer->data(), buffer->data_size(), 128);
DemuxerStream* stream = demuxer_->GetStream(DemuxerStream::AUDIO);
@@ -3432,6 +3514,7 @@ TEST_F(ChunkDemuxerTest, AppendWindow_AudioConfigUpdateRemovesPreroll) {
// Read a WebM file into memory and append the data.
scoped_refptr<DecoderBuffer> buffer =
ReadTestDataFile("bear-320x240-audio-only.webm");
+ EXPECT_CALL(*this, InitSegmentReceived());
AppendDataInPieces(buffer->data(), buffer->data_size(), 512);
CheckExpectedRanges(kSourceId, "{ }");
@@ -3441,6 +3524,7 @@ TEST_F(ChunkDemuxerTest, AppendWindow_AudioConfigUpdateRemovesPreroll) {
// Read a second WebM with a different config in and append the data.
scoped_refptr<DecoderBuffer> buffer2 =
ReadTestDataFile("bear-320x240-audio-only-48khz.webm");
+ EXPECT_CALL(*this, InitSegmentReceived());
EXPECT_CALL(host_, SetDuration(_)).Times(AnyNumber());
ASSERT_TRUE(SetTimestampOffset(kSourceId, duration_1));
AppendDataInPieces(buffer2->data(), buffer2->data_size(), 512);
diff --git a/media/filters/decoder_stream.cc b/media/filters/decoder_stream.cc
index 0e11c4ecbc..8f2deaf984 100644
--- a/media/filters/decoder_stream.cc
+++ b/media/filters/decoder_stream.cc
@@ -42,8 +42,10 @@ template <DemuxerStream::Type StreamType>
DecoderStream<StreamType>::DecoderStream(
const scoped_refptr<base::SingleThreadTaskRunner>& task_runner,
ScopedVector<Decoder> decoders,
- const SetDecryptorReadyCB& set_decryptor_ready_cb)
+ const SetDecryptorReadyCB& set_decryptor_ready_cb,
+ const scoped_refptr<MediaLog>& media_log)
: task_runner_(task_runner),
+ media_log_(media_log),
state_(STATE_UNINITIALIZED),
stream_(NULL),
low_delay_(false),
@@ -52,6 +54,7 @@ DecoderStream<StreamType>::DecoderStream(
decoders.Pass(),
set_decryptor_ready_cb)),
active_splice_(false),
+ decoding_eos_(false),
pending_decode_requests_(0),
weak_factory_(this) {}
@@ -207,7 +210,7 @@ bool DecoderStream<StreamType>::CanDecodeMore() const {
// empty.
int num_decodes =
static_cast<int>(ready_outputs_.size()) + pending_decode_requests_;
- return num_decodes < GetMaxDecodeRequests();
+ return !decoding_eos_ && num_decodes < GetMaxDecodeRequests();
}
template <DemuxerStream::Type StreamType>
@@ -235,6 +238,12 @@ void DecoderStream<StreamType>::OnDecoderSelected(
decoder_ = selected_decoder.Pass();
decrypting_demuxer_stream_ = decrypting_demuxer_stream.Pass();
+ const std::string stream_type = DecoderStreamTraits<StreamType>::ToString();
+ media_log_->SetBooleanProperty((stream_type + "_dds").c_str(),
+ decrypting_demuxer_stream_);
+ media_log_->SetStringProperty((stream_type + "_decoder").c_str(),
+ decoder_->GetDisplayName());
+
if (StreamTraits::NeedsBitstreamConversion(decoder_.get()))
stream_->EnableBitstreamConverter();
base::ResetAndReturn(&init_cb_).Run(true);
@@ -255,11 +264,15 @@ void DecoderStream<StreamType>::Decode(
DCHECK(state_ == STATE_NORMAL || state_ == STATE_FLUSHING_DECODER) << state_;
DCHECK_LT(pending_decode_requests_, GetMaxDecodeRequests());
DCHECK(reset_cb_.is_null());
- DCHECK(buffer);
+ DCHECK(buffer.get());
int buffer_size = buffer->end_of_stream() ? 0 : buffer->data_size();
TRACE_EVENT_ASYNC_BEGIN0("media", GetTraceString<StreamType>(), this);
+
+ if (buffer->end_of_stream())
+ decoding_eos_ = true;
+
++pending_decode_requests_;
decoder_->Decode(buffer,
base::Bind(&DecoderStream<StreamType>::OnDecodeDone,
@@ -287,6 +300,9 @@ void DecoderStream<StreamType>::OnDecodeDone(int buffer_size,
TRACE_EVENT_ASYNC_END0("media", GetTraceString<StreamType>(), this);
+ if (end_of_stream)
+ decoding_eos_ = false;
+
if (state_ == STATE_ERROR) {
DCHECK(read_cb_.is_null());
return;
@@ -339,7 +355,7 @@ template <DemuxerStream::Type StreamType>
void DecoderStream<StreamType>::OnDecodeOutputReady(
const scoped_refptr<Output>& output) {
FUNCTION_DVLOG(2) << ": " << output->timestamp().InMilliseconds() << " ms";
- DCHECK(output);
+ DCHECK(output.get());
DCHECK(!output->end_of_stream());
DCHECK(state_ == STATE_NORMAL || state_ == STATE_FLUSHING_DECODER ||
state_ == STATE_PENDING_DEMUXER_READ || state_ == STATE_ERROR)
@@ -386,7 +402,8 @@ void DecoderStream<StreamType>::OnBufferReady(
DemuxerStream::Status status,
const scoped_refptr<DecoderBuffer>& buffer) {
FUNCTION_DVLOG(2) << ": " << status << ", "
- << (buffer ? buffer->AsHumanReadableString() : "NULL");
+ << (buffer.get() ? buffer->AsHumanReadableString()
+ : "NULL");
DCHECK(task_runner_->BelongsToCurrentThread());
DCHECK(state_ == STATE_PENDING_DEMUXER_READ || state_ == STATE_ERROR)
@@ -448,7 +465,7 @@ void DecoderStream<StreamType>::OnBufferReady(
Decode(buffer);
// Read more data if the decoder supports multiple parallel decoding requests.
- if (CanDecodeMore() && !buffer->end_of_stream())
+ if (CanDecodeMore())
ReadFromDemuxerStream();
}
diff --git a/media/filters/decoder_stream.h b/media/filters/decoder_stream.h
index d6ee126634..28587c573a 100644
--- a/media/filters/decoder_stream.h
+++ b/media/filters/decoder_stream.h
@@ -15,6 +15,7 @@
#include "media/base/decryptor.h"
#include "media/base/demuxer_stream.h"
#include "media/base/media_export.h"
+#include "media/base/media_log.h"
#include "media/base/pipeline_status.h"
#include "media/filters/decoder_selector.h"
#include "media/filters/decoder_stream_traits.h"
@@ -52,7 +53,8 @@ class MEDIA_EXPORT DecoderStream {
DecoderStream(
const scoped_refptr<base::SingleThreadTaskRunner>& task_runner,
ScopedVector<Decoder> decoders,
- const SetDecryptorReadyCB& set_decryptor_ready_cb);
+ const SetDecryptorReadyCB& set_decryptor_ready_cb,
+ const scoped_refptr<MediaLog>& media_log);
virtual ~DecoderStream();
// Initializes the DecoderStream and returns the initialization result
@@ -159,6 +161,8 @@ class MEDIA_EXPORT DecoderStream {
scoped_refptr<base::SingleThreadTaskRunner> task_runner_;
+ scoped_refptr<MediaLog> media_log_;
+
State state_;
StatisticsCB statistics_cb_;
@@ -183,6 +187,11 @@ class MEDIA_EXPORT DecoderStream {
// splice_timestamp() of kNoTimestamp() is encountered.
bool active_splice_;
+ // An end-of-stream buffer has been sent for decoding, no more buffers should
+ // be sent for decoding until it completes.
+ // TODO(sandersd): Turn this into a State. http://crbug.com/408316
+ bool decoding_eos_;
+
// Decoded buffers that haven't been read yet. Used when the decoder supports
// parallel decoding.
std::list<scoped_refptr<Output> > ready_outputs_;
diff --git a/media/filters/decoder_stream_traits.cc b/media/filters/decoder_stream_traits.cc
index 824b912825..1c654c3b08 100644
--- a/media/filters/decoder_stream_traits.cc
+++ b/media/filters/decoder_stream_traits.cc
@@ -15,7 +15,7 @@
namespace media {
std::string DecoderStreamTraits<DemuxerStream::AUDIO>::ToString() {
- return "Audio";
+ return "audio";
}
void DecoderStreamTraits<DemuxerStream::AUDIO>::Initialize(
@@ -47,7 +47,7 @@ scoped_refptr<DecoderStreamTraits<DemuxerStream::AUDIO>::OutputType>
}
std::string DecoderStreamTraits<DemuxerStream::VIDEO>::ToString() {
- return "Video";
+ return "video";
}
void DecoderStreamTraits<DemuxerStream::VIDEO>::Initialize(
diff --git a/media/filters/decrypting_audio_decoder.cc b/media/filters/decrypting_audio_decoder.cc
index ee50e2edac..c5494f04f1 100644
--- a/media/filters/decrypting_audio_decoder.cc
+++ b/media/filters/decrypting_audio_decoder.cc
@@ -44,6 +44,10 @@ DecryptingAudioDecoder::DecryptingAudioDecoder(
key_added_while_decode_pending_(false),
weak_factory_(this) {}
+std::string DecryptingAudioDecoder::GetDisplayName() const {
+ return "DecryptingAudioDecoder";
+}
+
void DecryptingAudioDecoder::Initialize(const AudioDecoderConfig& config,
const PipelineStatusCB& status_cb,
const OutputCB& output_cb) {
diff --git a/media/filters/decrypting_audio_decoder.h b/media/filters/decrypting_audio_decoder.h
index 9ea211a1ab..5dd455ccda 100644
--- a/media/filters/decrypting_audio_decoder.h
+++ b/media/filters/decrypting_audio_decoder.h
@@ -43,6 +43,7 @@ class MEDIA_EXPORT DecryptingAudioDecoder : public AudioDecoder {
virtual ~DecryptingAudioDecoder();
// AudioDecoder implementation.
+ virtual std::string GetDisplayName() const OVERRIDE;
virtual void Initialize(const AudioDecoderConfig& config,
const PipelineStatusCB& status_cb,
const OutputCB& output_cb) OVERRIDE;
diff --git a/media/filters/decrypting_video_decoder.cc b/media/filters/decrypting_video_decoder.cc
index 421ec07fa8..4a14bad5dc 100644
--- a/media/filters/decrypting_video_decoder.cc
+++ b/media/filters/decrypting_video_decoder.cc
@@ -30,6 +30,10 @@ DecryptingVideoDecoder::DecryptingVideoDecoder(
trace_id_(0),
weak_factory_(this) {}
+std::string DecryptingVideoDecoder::GetDisplayName() const {
+ return "DecryptingVideoDecoder";
+}
+
void DecryptingVideoDecoder::Initialize(const VideoDecoderConfig& config,
bool /* low_delay */,
const PipelineStatusCB& status_cb,
diff --git a/media/filters/decrypting_video_decoder.h b/media/filters/decrypting_video_decoder.h
index 6550a2986b..c301e55ef5 100644
--- a/media/filters/decrypting_video_decoder.h
+++ b/media/filters/decrypting_video_decoder.h
@@ -32,6 +32,7 @@ class MEDIA_EXPORT DecryptingVideoDecoder : public VideoDecoder {
virtual ~DecryptingVideoDecoder();
// VideoDecoder implementation.
+ virtual std::string GetDisplayName() const OVERRIDE;
virtual void Initialize(const VideoDecoderConfig& config,
bool low_delay,
const PipelineStatusCB& status_cb,
diff --git a/media/filters/fake_demuxer_stream.cc b/media/filters/fake_demuxer_stream.cc
index 941778c576..6be6d0a2dc 100644
--- a/media/filters/fake_demuxer_stream.cc
+++ b/media/filters/fake_demuxer_stream.cc
@@ -89,10 +89,6 @@ DemuxerStream::Type FakeDemuxerStream::type() {
return VIDEO;
}
-void FakeDemuxerStream::EnableBitstreamConverter() {
- DCHECK(task_runner_->BelongsToCurrentThread());
-}
-
bool FakeDemuxerStream::SupportsConfigChanges() {
return config_changes_;
}
diff --git a/media/filters/fake_demuxer_stream.h b/media/filters/fake_demuxer_stream.h
index 90efe6ef55..2846ed9db7 100644
--- a/media/filters/fake_demuxer_stream.h
+++ b/media/filters/fake_demuxer_stream.h
@@ -32,7 +32,6 @@ class FakeDemuxerStream : public DemuxerStream {
virtual AudioDecoderConfig audio_decoder_config() OVERRIDE;
virtual VideoDecoderConfig video_decoder_config() OVERRIDE;
virtual Type type() OVERRIDE;
- virtual void EnableBitstreamConverter() OVERRIDE;
virtual bool SupportsConfigChanges() OVERRIDE;
virtual VideoRotation video_rotation() OVERRIDE;
diff --git a/media/filters/fake_video_decoder.cc b/media/filters/fake_video_decoder.cc
index 05dc410f87..f7f1abd854 100644
--- a/media/filters/fake_video_decoder.cc
+++ b/media/filters/fake_video_decoder.cc
@@ -40,6 +40,10 @@ FakeVideoDecoder::~FakeVideoDecoder() {
decoded_frames_.clear();
}
+std::string FakeVideoDecoder::GetDisplayName() const {
+ return "FakeVideoDecoder";
+}
+
void FakeVideoDecoder::Initialize(const VideoDecoderConfig& config,
bool low_delay,
const PipelineStatusCB& status_cb,
@@ -74,6 +78,7 @@ void FakeVideoDecoder::Decode(const scoped_refptr<DecoderBuffer>& buffer,
decoding_delay_ + held_decode_callbacks_.size());
DCHECK_LT(static_cast<int>(held_decode_callbacks_.size()),
max_parallel_decoding_requests_);
+ DCHECK_NE(state_, STATE_END_OF_STREAM);
int buffer_size = buffer->end_of_stream() ? 0 : buffer->data_size();
DecodeCB wrapped_decode_cb = base::Bind(&FakeVideoDecoder::OnFrameDecoded,
@@ -223,6 +228,7 @@ void FakeVideoDecoder::RunDecodeCallback(const DecodeCB& decode_cb) {
output_cb_.Run(decoded_frames_.front());
decoded_frames_.pop_front();
}
+ state_ = STATE_NORMAL;
} else if (!decoded_frames_.empty()) {
output_cb_.Run(decoded_frames_.front());
decoded_frames_.pop_front();
diff --git a/media/filters/fake_video_decoder.h b/media/filters/fake_video_decoder.h
index 5e476d8fae..e96dc16656 100644
--- a/media/filters/fake_video_decoder.h
+++ b/media/filters/fake_video_decoder.h
@@ -36,6 +36,7 @@ class FakeVideoDecoder : public VideoDecoder {
virtual ~FakeVideoDecoder();
// VideoDecoder implementation.
+ virtual std::string GetDisplayName() const OVERRIDE;
virtual void Initialize(const VideoDecoderConfig& config,
bool low_delay,
const PipelineStatusCB& status_cb,
diff --git a/media/filters/fake_video_decoder_unittest.cc b/media/filters/fake_video_decoder_unittest.cc
index 3598a7a48e..ec0d303150 100644
--- a/media/filters/fake_video_decoder_unittest.cc
+++ b/media/filters/fake_video_decoder_unittest.cc
@@ -92,17 +92,17 @@ class FakeVideoDecoderTest
case OK:
EXPECT_EQ(0, pending_decode_requests_);
ASSERT_EQ(VideoDecoder::kOk, last_decode_status_);
- ASSERT_TRUE(last_decoded_frame_);
+ ASSERT_TRUE(last_decoded_frame_.get());
break;
case NOT_ENOUGH_DATA:
EXPECT_EQ(0, pending_decode_requests_);
ASSERT_EQ(VideoDecoder::kOk, last_decode_status_);
- ASSERT_FALSE(last_decoded_frame_);
+ ASSERT_FALSE(last_decoded_frame_.get());
break;
case ABORTED:
EXPECT_EQ(0, pending_decode_requests_);
ASSERT_EQ(VideoDecoder::kAborted, last_decode_status_);
- EXPECT_FALSE(last_decoded_frame_);
+ EXPECT_FALSE(last_decoded_frame_.get());
break;
}
}
@@ -132,7 +132,7 @@ class FakeVideoDecoderTest
last_decoded_frame_ = NULL;
do {
Decode();
- } while (!last_decoded_frame_ && pending_decode_requests_ == 0);
+ } while (!last_decoded_frame_.get() && pending_decode_requests_ == 0);
}
void ReadAllFrames() {
@@ -274,7 +274,7 @@ TEST_P(FakeVideoDecoderTest, Read_Pending_NotEnoughData) {
SatisfyDecodeAndExpect(NOT_ENOUGH_DATA);
// Verify that FrameReady() hasn't been called.
- EXPECT_FALSE(last_decoded_frame_);
+ EXPECT_FALSE(last_decoded_frame_.get());
}
TEST_P(FakeVideoDecoderTest, Read_Pending_OK) {
diff --git a/media/filters/ffmpeg_audio_decoder.cc b/media/filters/ffmpeg_audio_decoder.cc
index b45b9401b5..ae4f3fb52e 100644
--- a/media/filters/ffmpeg_audio_decoder.cc
+++ b/media/filters/ffmpeg_audio_decoder.cc
@@ -143,6 +143,10 @@ FFmpegAudioDecoder::~FFmpegAudioDecoder() {
}
}
+std::string FFmpegAudioDecoder::GetDisplayName() const {
+ return "FFmpegAudioDecoder";
+}
+
void FFmpegAudioDecoder::Initialize(const AudioDecoderConfig& config,
const PipelineStatusCB& status_cb,
const OutputCB& output_cb) {
@@ -202,7 +206,7 @@ void FFmpegAudioDecoder::DecodeBuffer(
DCHECK_NE(state_, kUninitialized);
DCHECK_NE(state_, kDecodeFinished);
DCHECK_NE(state_, kError);
- DCHECK(buffer);
+ DCHECK(buffer.get());
// Make sure we are notified if http://crbug.com/49709 returns. Issue also
// occurs with some damaged files.
diff --git a/media/filters/ffmpeg_audio_decoder.h b/media/filters/ffmpeg_audio_decoder.h
index 680128c3fd..b13a0163a6 100644
--- a/media/filters/ffmpeg_audio_decoder.h
+++ b/media/filters/ffmpeg_audio_decoder.h
@@ -36,6 +36,7 @@ class MEDIA_EXPORT FFmpegAudioDecoder : public AudioDecoder {
virtual ~FFmpegAudioDecoder();
// AudioDecoder implementation.
+ virtual std::string GetDisplayName() const OVERRIDE;
virtual void Initialize(const AudioDecoderConfig& config,
const PipelineStatusCB& status_cb,
const OutputCB& output_cb) OVERRIDE;
diff --git a/media/filters/ffmpeg_demuxer.cc b/media/filters/ffmpeg_demuxer.cc
index 15b6a51fb1..2cc89130b1 100644
--- a/media/filters/ffmpeg_demuxer.cc
+++ b/media/filters/ffmpeg_demuxer.cc
@@ -305,6 +305,12 @@ void FFmpegDemuxerStream::EnqueuePacket(ScopedAVPacket packet) {
start_time = base::TimeDelta();
}
+ // Don't rebase timestamps for positive start times, the HTML Media Spec
+ // details this in section "4.8.10.6 Offsets into the media resource." We
+ // will still need to rebase timestamps before seeking with FFmpeg though.
+ if (start_time > base::TimeDelta())
+ start_time = base::TimeDelta();
+
buffer->set_timestamp(stream_timestamp - start_time);
// If enabled, mark audio packets with negative timestamps for post-decode
@@ -555,10 +561,14 @@ FFmpegDemuxer::FFmpegDemuxer(
FFmpegDemuxer::~FFmpegDemuxer() {}
-void FFmpegDemuxer::Stop(const base::Closure& callback) {
+void FFmpegDemuxer::Stop() {
DCHECK(task_runner_->BelongsToCurrentThread());
- url_protocol_->Abort();
+
+ // The order of Stop() and Abort() is important here. If Abort() is called
+ // first, control may pass into FFmpeg where it can destruct buffers that are
+ // in the process of being fulfilled by the DataSource.
data_source_->Stop();
+ url_protocol_->Abort();
// This will block until all tasks complete. Note that after this returns it's
// possible for reply tasks (e.g., OnReadFrameDone()) to be queued on this
@@ -573,7 +583,6 @@ void FFmpegDemuxer::Stop(const base::Closure& callback) {
}
data_source_ = NULL;
- task_runner_->PostTask(FROM_HERE, callback);
}
void FFmpegDemuxer::Seek(base::TimeDelta time, const PipelineStatusCB& cb) {
@@ -585,7 +594,15 @@ void FFmpegDemuxer::Seek(base::TimeDelta time, const PipelineStatusCB& cb) {
// we know we're going to drop it on the floor.
// FFmpeg requires seeks to be adjusted according to the lowest starting time.
- const base::TimeDelta seek_time = time + start_time_;
+ // Since EnqueuePacket() rebased negative timestamps by the start time, we
+ // must correct the shift here.
+ //
+ // Additionally, to workaround limitations in how we expose seekable ranges to
+ // Blink (http://crbug.com/137275), we also want to clamp seeks before the
+ // start time to the start time.
+ const base::TimeDelta seek_time =
+ start_time_ < base::TimeDelta() ? time + start_time_
+ : time < start_time_ ? start_time_ : time;
// Choose the seeking stream based on whether it contains the seek time, if no
// match can be found prefer the preferred stream.
@@ -649,6 +666,10 @@ void FFmpegDemuxer::Initialize(DemuxerHost* host,
status_cb));
}
+base::Time FFmpegDemuxer::GetTimelineOffset() const {
+ return timeline_offset_;
+}
+
DemuxerStream* FFmpegDemuxer::GetStream(DemuxerStream::Type type) {
DCHECK(task_runner_->BelongsToCurrentThread());
return GetFFmpegStream(type);
@@ -665,8 +686,8 @@ FFmpegDemuxerStream* FFmpegDemuxer::GetFFmpegStream(
return NULL;
}
-base::Time FFmpegDemuxer::GetTimelineOffset() const {
- return timeline_offset_;
+base::TimeDelta FFmpegDemuxer::GetStartTime() const {
+ return std::max(start_time_, base::TimeDelta());
}
Demuxer::Liveness FFmpegDemuxer::GetLiveness() const {
@@ -942,7 +963,7 @@ void FFmpegDemuxer::OnFindStreamInfoDone(const PipelineStatusCB& status_cb,
// Since we're shifting the externally visible start time to zero, we need to
// adjust the timeline offset to compensate.
- if (!timeline_offset_.is_null())
+ if (!timeline_offset_.is_null() && start_time_ < base::TimeDelta())
timeline_offset_ += start_time_;
if (max_duration == kInfiniteDuration() && !timeline_offset_.is_null()) {
diff --git a/media/filters/ffmpeg_demuxer.h b/media/filters/ffmpeg_demuxer.h
index 1ec8cfe964..d2855e0d97 100644
--- a/media/filters/ffmpeg_demuxer.h
+++ b/media/filters/ffmpeg_demuxer.h
@@ -164,10 +164,11 @@ class MEDIA_EXPORT FFmpegDemuxer : public Demuxer {
virtual void Initialize(DemuxerHost* host,
const PipelineStatusCB& status_cb,
bool enable_text_tracks) OVERRIDE;
- virtual void Stop(const base::Closure& callback) OVERRIDE;
+ virtual void Stop() OVERRIDE;
virtual void Seek(base::TimeDelta time, const PipelineStatusCB& cb) OVERRIDE;
- virtual DemuxerStream* GetStream(DemuxerStream::Type type) OVERRIDE;
virtual base::Time GetTimelineOffset() const OVERRIDE;
+ virtual DemuxerStream* GetStream(DemuxerStream::Type type) OVERRIDE;
+ virtual base::TimeDelta GetStartTime() const OVERRIDE;
virtual Liveness GetLiveness() const OVERRIDE;
// Calls |need_key_cb_| with the initialization data encountered in the file.
@@ -179,8 +180,9 @@ class MEDIA_EXPORT FFmpegDemuxer : public Demuxer {
void NotifyCapacityAvailable();
void NotifyBufferingChanged();
- // The lowest demuxed timestamp. DemuxerStream's must use this to adjust
- // packet timestamps such that external clients see a zero-based timeline.
+ // The lowest demuxed timestamp. If negative, DemuxerStreams must use this to
+ // adjust packet timestamps such that external clients see a zero-based
+ // timeline.
base::TimeDelta start_time() const { return start_time_; }
private:
diff --git a/media/filters/ffmpeg_demuxer_unittest.cc b/media/filters/ffmpeg_demuxer_unittest.cc
index 76ccba2259..36e4824b82 100644
--- a/media/filters/ffmpeg_demuxer_unittest.cc
+++ b/media/filters/ffmpeg_demuxer_unittest.cc
@@ -67,11 +67,8 @@ class FFmpegDemuxerTest : public testing::Test {
FFmpegDemuxerTest() {}
virtual ~FFmpegDemuxerTest() {
- if (demuxer_) {
- WaitableMessageLoopEvent event;
- demuxer_->Stop(event.GetClosure());
- event.RunAndWait();
- }
+ if (demuxer_)
+ demuxer_->Stop();
}
void CreateDemuxer(const std::string& name) {
@@ -441,26 +438,17 @@ TEST_F(FFmpegDemuxerTest, Read_VideoPositiveStartTime) {
// Run the test twice with a seek in between.
for (int i = 0; i < 2; ++i) {
- // Check first buffer in video stream. It should have been adjusted such
- // that it starts 400ms after the first audio buffer.
- video->Read(
- NewReadCB(FROM_HERE,
- 5636,
- (video_start_time - audio_start_time).InMicroseconds()));
+ video->Read(NewReadCB(FROM_HERE, 5636, video_start_time.InMicroseconds()));
message_loop_.Run();
-
- // Since the audio buffer has a lower first timestamp, it should become
- // zero.
- audio->Read(NewReadCB(FROM_HERE, 165, 0));
+ audio->Read(NewReadCB(FROM_HERE, 165, audio_start_time.InMicroseconds()));
message_loop_.Run();
// Verify that the start time is equal to the lowest timestamp (ie the
// audio).
EXPECT_EQ(audio_start_time, demuxer_->start_time());
- // Verify that the timeline offset has been adjusted by the start time.
- EXPECT_EQ(kTimelineOffsetMs + audio_start_time.InMilliseconds(),
- demuxer_->GetTimelineOffset().ToJavaTime());
+ // Verify that the timeline offset has not been adjusted by the start time.
+ EXPECT_EQ(kTimelineOffsetMs, demuxer_->GetTimelineOffset().ToJavaTime());
// Seek back to the beginning and repeat the test.
WaitableMessageLoopEvent event;
@@ -560,6 +548,10 @@ TEST_F(FFmpegDemuxerTest, Read_AudioNegativeStartTimeAndOggDiscard_Sync) {
EXPECT_EQ(base::TimeDelta::FromMicroseconds(-2902),
demuxer_->start_time());
+ // Though the internal start time may be below zero, the exposed media time
+ // must always be greater than zero.
+ EXPECT_EQ(base::TimeDelta(), demuxer_->GetStartTime());
+
video->Read(NewReadCB(FROM_HERE, 9997, 0));
message_loop_.Run();
@@ -753,9 +745,7 @@ TEST_F(FFmpegDemuxerTest, Stop) {
DemuxerStream* audio = demuxer_->GetStream(DemuxerStream::AUDIO);
ASSERT_TRUE(audio);
- WaitableMessageLoopEvent event;
- demuxer_->Stop(event.GetClosure());
- event.RunAndWait();
+ demuxer_->Stop();
// Reads after being stopped are all EOS buffers.
StrictMock<MockReadCB> callback;
@@ -914,9 +904,7 @@ TEST_F(FFmpegDemuxerTest, IsValidAnnexB) {
stream->Read(base::Bind(&ValidateAnnexB, stream));
message_loop_.Run();
- WaitableMessageLoopEvent event;
- demuxer_->Stop(event.GetClosure());
- event.RunAndWait();
+ demuxer_->Stop();
demuxer_.reset();
data_source_.reset();
}
diff --git a/media/filters/ffmpeg_video_decoder.cc b/media/filters/ffmpeg_video_decoder.cc
index b7da7f7a46..93dcee2157 100644
--- a/media/filters/ffmpeg_video_decoder.cc
+++ b/media/filters/ffmpeg_video_decoder.cc
@@ -149,6 +149,10 @@ int FFmpegVideoDecoder::GetVideoBuffer(struct AVCodecContext* codec_context,
return 0;
}
+std::string FFmpegVideoDecoder::GetDisplayName() const {
+ return "FFmpegVideoDecoder";
+}
+
void FFmpegVideoDecoder::Initialize(const VideoDecoderConfig& config,
bool low_delay,
const PipelineStatusCB& status_cb,
@@ -177,7 +181,7 @@ void FFmpegVideoDecoder::Initialize(const VideoDecoderConfig& config,
void FFmpegVideoDecoder::Decode(const scoped_refptr<DecoderBuffer>& buffer,
const DecodeCB& decode_cb) {
DCHECK(task_runner_->BelongsToCurrentThread());
- DCHECK(buffer);
+ DCHECK(buffer.get());
DCHECK(!decode_cb.is_null());
CHECK_NE(state_, kUninitialized);
diff --git a/media/filters/ffmpeg_video_decoder.h b/media/filters/ffmpeg_video_decoder.h
index 529d69e70b..909a578a06 100644
--- a/media/filters/ffmpeg_video_decoder.h
+++ b/media/filters/ffmpeg_video_decoder.h
@@ -36,6 +36,7 @@ class MEDIA_EXPORT FFmpegVideoDecoder : public VideoDecoder {
void set_decode_nalus(bool decode_nalus) { decode_nalus_ = decode_nalus; }
// VideoDecoder implementation.
+ virtual std::string GetDisplayName() const OVERRIDE;
virtual void Initialize(const VideoDecoderConfig& config,
bool low_delay,
const PipelineStatusCB& status_cb,
diff --git a/media/filters/frame_processor.cc b/media/filters/frame_processor.cc
index 6d98e8b13a..ee49ba38bf 100644
--- a/media/filters/frame_processor.cc
+++ b/media/filters/frame_processor.cc
@@ -356,7 +356,7 @@ bool FrameProcessor::HandlePartialAppendWindowTrimming(
// If we have a preroll buffer see if we can attach it to the first buffer
// overlapping or after |append_window_start|.
- if (audio_preroll_buffer_) {
+ if (audio_preroll_buffer_.get()) {
// We only want to use the preroll buffer if it directly precedes (less
// than one sample apart) the current buffer.
const int64 delta = std::abs((audio_preroll_buffer_->timestamp() +
@@ -598,12 +598,6 @@ bool FrameProcessor::ProcessFrame(
HandlePartialAppendWindowTrimming(append_window_start,
append_window_end,
frame)) {
- // If |frame| was front-trimmed a discontinuity may exist, so treat the
- // next frames appended as if they were the beginning of a new media
- // segment.
- if (frame->timestamp() != presentation_timestamp && !sequence_mode_)
- *new_media_segment = true;
-
// |frame| has been partially trimmed or had preroll added. Though
// |frame|'s duration may have changed, do not update |frame_duration|
// here, so |track_buffer|'s last frame duration update uses original
diff --git a/media/filters/frame_processor_unittest.cc b/media/filters/frame_processor_unittest.cc
index 4daf5d0940..ff2f16c4a5 100644
--- a/media/filters/frame_processor_unittest.cc
+++ b/media/filters/frame_processor_unittest.cc
@@ -249,7 +249,7 @@ class FrameProcessorTest : public testing::TestWithParam<bool> {
private:
void StoreStatusAndBuffer(DemuxerStream::Status status,
const scoped_refptr<DecoderBuffer>& buffer) {
- if (status == DemuxerStream::kOk && buffer) {
+ if (status == DemuxerStream::kOk && buffer.get()) {
DVLOG(3) << __FUNCTION__ << "status: " << status << " ts: "
<< buffer->timestamp().InSecondsF();
} else {
@@ -664,6 +664,30 @@ TEST_P(FrameProcessorTest, PartialAppendWindowFilterNoDiscontinuity) {
CheckReadsThenReadStalls(audio_.get(), "7:0 19");
}
+TEST_P(FrameProcessorTest, PartialAppendWindowFilterNoNewMediaSegment) {
+ // Tests that a new media segment is not forcibly signalled for audio frame
+ // partial front trim, to prevent incorrect introduction of a discontinuity
+ // and potentially a non-keyframe video frame to be processed next after the
+ // discontinuity.
+ InSequence s;
+ AddTestTracks(HAS_AUDIO | HAS_VIDEO);
+ new_media_segment_ = true;
+ frame_processor_->SetSequenceMode(GetParam());
+ EXPECT_CALL(callbacks_, PossibleDurationIncrease(frame_duration_));
+ ProcessFrames("", "0K");
+ EXPECT_CALL(callbacks_, PossibleDurationIncrease(frame_duration_));
+ ProcessFrames("-5K", "");
+ EXPECT_CALL(callbacks_, PossibleDurationIncrease(frame_duration_* 2));
+ ProcessFrames("", "10");
+
+ EXPECT_EQ(base::TimeDelta(), timestamp_offset_);
+ EXPECT_FALSE(new_media_segment_);
+ CheckExpectedRangesByTimestamp(audio_.get(), "{ [0,5) }");
+ CheckExpectedRangesByTimestamp(video_.get(), "{ [0,20) }");
+ CheckReadsThenReadStalls(audio_.get(), "0:-5");
+ CheckReadsThenReadStalls(video_.get(), "0 10");
+}
+
INSTANTIATE_TEST_CASE_P(SequenceMode, FrameProcessorTest, Values(true));
INSTANTIATE_TEST_CASE_P(SegmentsMode, FrameProcessorTest, Values(false));
diff --git a/media/filters/gpu_video_accelerator_factories.h b/media/filters/gpu_video_accelerator_factories.h
index a6859c78f2..6ed04c74f0 100644
--- a/media/filters/gpu_video_accelerator_factories.h
+++ b/media/filters/gpu_video_accelerator_factories.h
@@ -11,6 +11,7 @@
#include "base/memory/scoped_ptr.h"
#include "gpu/command_buffer/common/mailbox.h"
#include "media/base/media_export.h"
+#include "media/video/video_encode_accelerator.h"
class SkBitmap;
@@ -27,7 +28,6 @@ class Size;
namespace media {
class VideoDecodeAccelerator;
-class VideoEncodeAccelerator;
// Helper interface for specifying factories needed to instantiate a hardware
// video accelerator.
@@ -73,6 +73,10 @@ class MEDIA_EXPORT GpuVideoAcceleratorFactories
// Returns the task runner the video accelerator runs on.
virtual scoped_refptr<base::SingleThreadTaskRunner> GetTaskRunner() = 0;
+ // Returns the supported codec profiles of video encode accelerator.
+ virtual std::vector<VideoEncodeAccelerator::SupportedProfile>
+ GetVideoEncodeAcceleratorSupportedProfiles() = 0;
+
protected:
friend class base::RefCountedThreadSafe<GpuVideoAcceleratorFactories>;
virtual ~GpuVideoAcceleratorFactories();
diff --git a/media/filters/gpu_video_decoder.cc b/media/filters/gpu_video_decoder.cc
index 3954e09406..5ca6c67d9b 100644
--- a/media/filters/gpu_video_decoder.cc
+++ b/media/filters/gpu_video_decoder.cc
@@ -18,7 +18,6 @@
#include "gpu/command_buffer/common/mailbox_holder.h"
#include "media/base/bind_to_current_loop.h"
#include "media/base/decoder_buffer.h"
-#include "media/base/media_log.h"
#include "media/base/media_switches.h"
#include "media/base/pipeline.h"
#include "media/base/pipeline_status.h"
@@ -61,12 +60,10 @@ GpuVideoDecoder::BufferData::BufferData(
GpuVideoDecoder::BufferData::~BufferData() {}
GpuVideoDecoder::GpuVideoDecoder(
- const scoped_refptr<GpuVideoAcceleratorFactories>& factories,
- const scoped_refptr<MediaLog>& media_log)
+ const scoped_refptr<GpuVideoAcceleratorFactories>& factories)
: needs_bitstream_conversion_(false),
factories_(factories),
state_(kNormal),
- media_log_(media_log),
decoder_texture_target_(0),
next_picture_buffer_id_(0),
next_bitstream_buffer_id_(0),
@@ -138,6 +135,10 @@ static void ReportGpuVideoDecoderInitializeStatusToUMAAndRunCB(
cb.Run(status);
}
+std::string GpuVideoDecoder::GetDisplayName() const {
+ return "GpuVideoDecoder";
+}
+
void GpuVideoDecoder::Initialize(const VideoDecoderConfig& config,
bool /* low_delay */,
const PipelineStatusCB& orig_status_cb,
@@ -187,7 +188,6 @@ void GpuVideoDecoder::Initialize(const VideoDecoderConfig& config,
}
DVLOG(3) << "GpuVideoDecoder::Initialize() succeeded.";
- media_log_->SetStringProperty("video_decoder", "gpu");
status_cb.Run(PIPELINE_OK);
}
diff --git a/media/filters/gpu_video_decoder.h b/media/filters/gpu_video_decoder.h
index 33076840fa..743c1278c6 100644
--- a/media/filters/gpu_video_decoder.h
+++ b/media/filters/gpu_video_decoder.h
@@ -38,10 +38,10 @@ class MEDIA_EXPORT GpuVideoDecoder
public VideoDecodeAccelerator::Client {
public:
explicit GpuVideoDecoder(
- const scoped_refptr<GpuVideoAcceleratorFactories>& factories,
- const scoped_refptr<MediaLog>& media_log);
+ const scoped_refptr<GpuVideoAcceleratorFactories>& factories);
// VideoDecoder implementation.
+ virtual std::string GetDisplayName() const OVERRIDE;
virtual void Initialize(const VideoDecoderConfig& config,
bool low_delay,
const PipelineStatusCB& status_cb,
@@ -152,8 +152,6 @@ class MEDIA_EXPORT GpuVideoDecoder
// steady-state of the decoder.
std::vector<SHMBuffer*> available_shm_segments_;
- scoped_refptr<MediaLog> media_log_;
-
std::map<int32, PendingDecoderBuffer> bitstream_buffers_in_decoder_;
PictureBufferMap assigned_picture_buffers_;
// PictureBuffers given to us by VDA via PictureReady, which we sent forward
diff --git a/media/filters/mock_gpu_video_accelerator_factories.h b/media/filters/mock_gpu_video_accelerator_factories.h
index fde3b08fea..bfa89a1eb7 100644
--- a/media/filters/mock_gpu_video_accelerator_factories.h
+++ b/media/filters/mock_gpu_video_accelerator_factories.h
@@ -45,6 +45,8 @@ class MockGpuVideoAcceleratorFactories : public GpuVideoAcceleratorFactories {
const SkBitmap& pixels));
MOCK_METHOD1(CreateSharedMemory, base::SharedMemory*(size_t size));
MOCK_METHOD0(GetTaskRunner, scoped_refptr<base::SingleThreadTaskRunner>());
+ MOCK_METHOD0(GetVideoEncodeAcceleratorSupportedProfiles,
+ std::vector<VideoEncodeAccelerator::SupportedProfile>());
virtual scoped_ptr<VideoDecodeAccelerator> CreateVideoDecodeAccelerator()
OVERRIDE;
diff --git a/media/filters/opus_audio_decoder.cc b/media/filters/opus_audio_decoder.cc
index 62784ddf71..51484eaa55 100644
--- a/media/filters/opus_audio_decoder.cc
+++ b/media/filters/opus_audio_decoder.cc
@@ -249,6 +249,10 @@ OpusAudioDecoder::OpusAudioDecoder(
opus_decoder_(NULL),
start_input_timestamp_(kNoTimestamp()) {}
+std::string OpusAudioDecoder::GetDisplayName() const {
+ return "OpusAudioDecoder";
+}
+
void OpusAudioDecoder::Initialize(const AudioDecoderConfig& config,
const PipelineStatusCB& status_cb,
const OutputCB& output_cb) {
@@ -298,7 +302,7 @@ void OpusAudioDecoder::DecodeBuffer(
const DecodeCB& decode_cb) {
DCHECK(task_runner_->BelongsToCurrentThread());
DCHECK(!decode_cb.is_null());
- DCHECK(input);
+ DCHECK(input.get());
// Libopus does not buffer output. Decoding is complete when an end of stream
// input buffer is received.
@@ -330,7 +334,7 @@ void OpusAudioDecoder::DecodeBuffer(
return;
}
- if (output_buffer) {
+ if (output_buffer.get()) {
output_cb_.Run(output_buffer);
}
diff --git a/media/filters/opus_audio_decoder.h b/media/filters/opus_audio_decoder.h
index 19ef04d0db..b69af92d9a 100644
--- a/media/filters/opus_audio_decoder.h
+++ b/media/filters/opus_audio_decoder.h
@@ -31,6 +31,7 @@ class MEDIA_EXPORT OpusAudioDecoder : public AudioDecoder {
virtual ~OpusAudioDecoder();
// AudioDecoder implementation.
+ virtual std::string GetDisplayName() const OVERRIDE;
virtual void Initialize(const AudioDecoderConfig& config,
const PipelineStatusCB& status_cb,
const OutputCB& output_cb) OVERRIDE;
diff --git a/media/filters/pipeline_integration_perftest.cc b/media/filters/pipeline_integration_perftest.cc
index aea9363c94..d84bd136a0 100644
--- a/media/filters/pipeline_integration_perftest.cc
+++ b/media/filters/pipeline_integration_perftest.cc
@@ -68,10 +68,7 @@ TEST(PipelineIntegrationPerfTest, AudioPlaybackBenchmark) {
}
TEST(PipelineIntegrationPerfTest, VP8PlaybackBenchmark) {
- RunVideoPlaybackBenchmark("bear-640x360.webm",
- "clockless_video_playback_vp8");
- RunVideoPlaybackBenchmark("bear-320x240.webm",
- "clockless_video_playback_vp8");
+ RunVideoPlaybackBenchmark("bear_silent.webm", "clockless_video_playback_vp8");
}
TEST(PipelineIntegrationPerfTest, VP9PlaybackBenchmark) {
@@ -79,13 +76,13 @@ TEST(PipelineIntegrationPerfTest, VP9PlaybackBenchmark) {
}
TEST(PipelineIntegrationPerfTest, TheoraPlaybackBenchmark) {
- RunVideoPlaybackBenchmark("bear.ogv", "clockless_video_playback_theora");
+ RunVideoPlaybackBenchmark("bear_silent.ogv",
+ "clockless_video_playback_theora");
}
#if defined(USE_PROPRIETARY_CODECS)
TEST(PipelineIntegrationPerfTest, MP4PlaybackBenchmark) {
- RunVideoPlaybackBenchmark("bear-1280x720.mp4",
- "clockless_video_playback_mp4");
+ RunVideoPlaybackBenchmark("bear_silent.mp4", "clockless_video_playback_mp4");
}
#endif
diff --git a/media/filters/pipeline_integration_test.cc b/media/filters/pipeline_integration_test.cc
index 3f24646533..d9904f611e 100644
--- a/media/filters/pipeline_integration_test.cc
+++ b/media/filters/pipeline_integration_test.cc
@@ -17,9 +17,12 @@
#include "media/cdm/aes_decryptor.h"
#include "media/cdm/json_web_key.h"
#include "media/filters/chunk_demuxer.h"
+#include "media/filters/renderer_impl.h"
+#include "testing/gmock/include/gmock/gmock.h"
using testing::_;
using testing::AnyNumber;
+using testing::AtLeast;
using testing::AtMost;
using testing::SaveArg;
@@ -118,10 +121,11 @@ class FakeEncryptedMedia {
const std::vector<uint8>& message,
const GURL& destination_url) = 0;
- virtual void OnSessionReady(const std::string& web_session_id) = 0;
-
virtual void OnSessionClosed(const std::string& web_session_id) = 0;
+ virtual void OnSessionKeysChange(const std::string& web_session_id,
+ bool has_additional_usable_key) = 0;
+
// Errors are not expected unless overridden.
virtual void OnSessionError(const std::string& web_session_id,
const std::string& error_name,
@@ -139,6 +143,8 @@ class FakeEncryptedMedia {
: decryptor_(base::Bind(&FakeEncryptedMedia::OnSessionMessage,
base::Unretained(this)),
base::Bind(&FakeEncryptedMedia::OnSessionClosed,
+ base::Unretained(this)),
+ base::Bind(&FakeEncryptedMedia::OnSessionKeysChange,
base::Unretained(this))),
app_(app) {}
@@ -153,14 +159,15 @@ class FakeEncryptedMedia {
app_->OnSessionMessage(web_session_id, message, destination_url);
}
- void OnSessionReady(const std::string& web_session_id) {
- app_->OnSessionReady(web_session_id);
- }
-
void OnSessionClosed(const std::string& web_session_id) {
app_->OnSessionClosed(web_session_id);
}
+ void OnSessionKeysChange(const std::string& web_session_id,
+ bool has_additional_usable_key) {
+ app_->OnSessionKeysChange(web_session_id, has_additional_usable_key);
+ }
+
void OnSessionError(const std::string& web_session_id,
const std::string& error_name,
uint32 system_code,
@@ -233,12 +240,14 @@ class KeyProvidingApp : public FakeEncryptedMedia::AppBase {
EXPECT_EQ(current_session_id_, web_session_id);
}
- virtual void OnSessionReady(const std::string& web_session_id) OVERRIDE {
+ virtual void OnSessionClosed(const std::string& web_session_id) OVERRIDE {
EXPECT_EQ(current_session_id_, web_session_id);
}
- virtual void OnSessionClosed(const std::string& web_session_id) OVERRIDE {
+ virtual void OnSessionKeysChange(const std::string& web_session_id,
+ bool has_additional_usable_key) OVERRIDE {
EXPECT_EQ(current_session_id_, web_session_id);
+ EXPECT_EQ(has_additional_usable_key, true);
}
virtual void NeedKey(const std::string& type,
@@ -358,14 +367,15 @@ class NoResponseApp : public FakeEncryptedMedia::AppBase {
FAIL() << "Unexpected Message";
}
- virtual void OnSessionReady(const std::string& web_session_id) OVERRIDE {
+ virtual void OnSessionClosed(const std::string& web_session_id) OVERRIDE {
EXPECT_FALSE(web_session_id.empty());
- FAIL() << "Unexpected Ready";
+ FAIL() << "Unexpected Closed";
}
- virtual void OnSessionClosed(const std::string& web_session_id) OVERRIDE {
+ virtual void OnSessionKeysChange(const std::string& web_session_id,
+ bool has_additional_usable_key) OVERRIDE {
EXPECT_FALSE(web_session_id.empty());
- FAIL() << "Unexpected Closed";
+ EXPECT_EQ(has_additional_usable_key, true);
}
virtual void NeedKey(const std::string& type,
@@ -431,7 +441,9 @@ class MockMediaSource {
chunk_demuxer_->AppendData(
kSourceId, file_data_->data() + current_position_, size,
- base::TimeDelta(), kInfiniteDuration(), &last_timestamp_offset_);
+ base::TimeDelta(), kInfiniteDuration(), &last_timestamp_offset_,
+ base::Bind(&MockMediaSource::InitSegmentReceived,
+ base::Unretained(this)));
current_position_ += size;
}
@@ -441,7 +453,9 @@ class MockMediaSource {
CHECK(!chunk_demuxer_->IsParsingMediaSegment(kSourceId));
chunk_demuxer_->AppendData(kSourceId, pData, size,
base::TimeDelta(), kInfiniteDuration(),
- &timestamp_offset);
+ &timestamp_offset,
+ base::Bind(&MockMediaSource::InitSegmentReceived,
+ base::Unretained(this)));
last_timestamp_offset_ = timestamp_offset;
}
@@ -456,7 +470,9 @@ class MockMediaSource {
size,
append_window_start,
append_window_end,
- &timestamp_offset);
+ &timestamp_offset,
+ base::Bind(&MockMediaSource::InitSegmentReceived,
+ base::Unretained(this)));
last_timestamp_offset_ = timestamp_offset;
}
@@ -518,6 +534,8 @@ class MockMediaSource {
return last_timestamp_offset_;
}
+ MOCK_METHOD0(InitSegmentReceived, void(void));
+
private:
base::FilePath file_path_;
scoped_refptr<DecoderBuffer> file_data_;
@@ -535,13 +553,16 @@ class PipelineIntegrationTest
public PipelineIntegrationTestBase {
public:
void StartPipelineWithMediaSource(MockMediaSource* source) {
+ EXPECT_CALL(*source, InitSegmentReceived()).Times(AtLeast(1));
EXPECT_CALL(*this, OnMetadata(_))
.Times(AtMost(1))
.WillRepeatedly(SaveArg<0>(&metadata_));
EXPECT_CALL(*this, OnBufferingStateChanged(BUFFERING_HAVE_ENOUGH))
.Times(AtMost(1));
+ demuxer_ = source->GetDemuxer().Pass();
pipeline_->Start(
- CreateFilterCollection(source->GetDemuxer(), NULL),
+ demuxer_.get(),
+ CreateRenderer(NULL),
base::Bind(&PipelineIntegrationTest::OnEnded, base::Unretained(this)),
base::Bind(&PipelineIntegrationTest::OnError, base::Unretained(this)),
QuitOnStatusCB(PIPELINE_OK),
@@ -549,8 +570,9 @@ class PipelineIntegrationTest
base::Unretained(this)),
base::Bind(&PipelineIntegrationTest::OnBufferingStateChanged,
base::Unretained(this)),
- base::Closure());
-
+ base::Closure(),
+ base::Bind(&PipelineIntegrationTest::OnAddTextTrack,
+ base::Unretained(this)));
message_loop_.Run();
}
@@ -562,14 +584,16 @@ class PipelineIntegrationTest
void StartPipelineWithEncryptedMedia(
MockMediaSource* source,
FakeEncryptedMedia* encrypted_media) {
+ EXPECT_CALL(*source, InitSegmentReceived()).Times(AtLeast(1));
EXPECT_CALL(*this, OnMetadata(_))
.Times(AtMost(1))
.WillRepeatedly(SaveArg<0>(&metadata_));
EXPECT_CALL(*this, OnBufferingStateChanged(BUFFERING_HAVE_ENOUGH))
.Times(AtMost(1));
+ demuxer_ = source->GetDemuxer().Pass();
pipeline_->Start(
- CreateFilterCollection(source->GetDemuxer(),
- encrypted_media->decryptor()),
+ demuxer_.get(),
+ CreateRenderer(encrypted_media->decryptor()),
base::Bind(&PipelineIntegrationTest::OnEnded, base::Unretained(this)),
base::Bind(&PipelineIntegrationTest::OnError, base::Unretained(this)),
QuitOnStatusCB(PIPELINE_OK),
@@ -577,7 +601,9 @@ class PipelineIntegrationTest
base::Unretained(this)),
base::Bind(&PipelineIntegrationTest::OnBufferingStateChanged,
base::Unretained(this)),
- base::Closure());
+ base::Closure(),
+ base::Bind(&PipelineIntegrationTest::OnAddTextTrack,
+ base::Unretained(this)));
source->set_need_key_cb(base::Bind(&FakeEncryptedMedia::NeedKey,
base::Unretained(encrypted_media)));
@@ -1486,8 +1512,8 @@ TEST_F(PipelineIntegrationTest, BasicPlayback_VP9_Odd_WebM) {
}
// Verify that VP8 video with inband text track can be played back.
-TEST_F(PipelineIntegrationTest,
- BasicPlayback_VP8_WebVTT_WebM) {
+TEST_F(PipelineIntegrationTest, BasicPlayback_VP8_WebVTT_WebM) {
+ EXPECT_CALL(*this, OnAddTextTrack(_, _));
ASSERT_TRUE(Start(GetTestDataFilePath("bear-vp8-webvtt.webm"),
PIPELINE_OK));
Play();
@@ -1545,6 +1571,7 @@ TEST_F(PipelineIntegrationTest, BasicPlaybackChainedOgg) {
ASSERT_TRUE(Start(GetTestDataFilePath("double-sfx.ogg"), PIPELINE_OK));
Play();
ASSERT_TRUE(WaitUntilOnEnded());
+ ASSERT_EQ(base::TimeDelta(), demuxer_->GetStartTime());
}
// Ensures audio-video playback with missing or negative timestamps fails softly
@@ -1553,6 +1580,38 @@ TEST_F(PipelineIntegrationTest, BasicPlaybackChainedOggVideo) {
ASSERT_TRUE(Start(GetTestDataFilePath("double-bear.ogv"), PIPELINE_OK));
Play();
EXPECT_EQ(PIPELINE_ERROR_DECODE, WaitUntilEndedOrError());
+ ASSERT_EQ(base::TimeDelta(), demuxer_->GetStartTime());
+}
+
+// Tests that we signal ended even when audio runs longer than video track.
+TEST_F(PipelineIntegrationTest, BasicPlaybackAudioLongerThanVideo) {
+ ASSERT_TRUE(Start(GetTestDataFilePath("bear_audio_longer_than_video.ogv"),
+ PIPELINE_OK));
+ // Audio track is 2000ms. Video track is 1001ms. Duration should be higher
+ // of the two.
+ EXPECT_EQ(2000, pipeline_->GetMediaDuration().InMilliseconds());
+ Play();
+ ASSERT_TRUE(WaitUntilOnEnded());
+}
+
+// Tests that we signal ended even when audio runs shorter than video track.
+TEST_F(PipelineIntegrationTest, BasicPlaybackAudioShorterThanVideo) {
+ ASSERT_TRUE(Start(GetTestDataFilePath("bear_audio_shorter_than_video.ogv"),
+ PIPELINE_OK));
+ // Audio track is 500ms. Video track is 1001ms. Duration should be higher of
+ // the two.
+ EXPECT_EQ(1001, pipeline_->GetMediaDuration().InMilliseconds());
+ Play();
+ ASSERT_TRUE(WaitUntilOnEnded());
+}
+
+TEST_F(PipelineIntegrationTest, BasicPlaybackPositiveStartTime) {
+ ASSERT_TRUE(
+ Start(GetTestDataFilePath("nonzero-start-time.webm"), PIPELINE_OK));
+ Play();
+ ASSERT_TRUE(WaitUntilOnEnded());
+ ASSERT_EQ(base::TimeDelta::FromMicroseconds(396000),
+ demuxer_->GetStartTime());
}
} // namespace media
diff --git a/media/filters/pipeline_integration_test_base.cc b/media/filters/pipeline_integration_test_base.cc
index a6352868fa..3a3c91fa8d 100644
--- a/media/filters/pipeline_integration_test_base.cc
+++ b/media/filters/pipeline_integration_test_base.cc
@@ -7,7 +7,6 @@
#include "base/bind.h"
#include "base/memory/scoped_vector.h"
#include "media/base/media_log.h"
-#include "media/base/time_delta_interpolator.h"
#include "media/filters/audio_renderer_impl.h"
#include "media/filters/chunk_demuxer.h"
#include "media/filters/ffmpeg_audio_decoder.h"
@@ -15,6 +14,7 @@
#include "media/filters/ffmpeg_video_decoder.h"
#include "media/filters/file_data_source.h"
#include "media/filters/opus_audio_decoder.h"
+#include "media/filters/renderer_impl.h"
#include "media/filters/vpx_video_decoder.h"
using ::testing::_;
@@ -37,10 +37,6 @@ PipelineIntegrationTestBase::PipelineIntegrationTestBase()
last_video_frame_format_(VideoFrame::UNKNOWN),
hardware_config_(AudioParameters(), AudioParameters()) {
base::MD5Init(&md5_context_);
-
- // Prevent non-deterministic buffering state callbacks from firing (e.g., slow
- // machine, valgrind).
- pipeline_->set_underflow_disabled_for_testing(true);
}
PipelineIntegrationTestBase::~PipelineIntegrationTestBase() {
@@ -113,8 +109,10 @@ bool PipelineIntegrationTestBase::Start(const base::FilePath& file_path,
.WillRepeatedly(SaveArg<0>(&metadata_));
EXPECT_CALL(*this, OnBufferingStateChanged(BUFFERING_HAVE_ENOUGH))
.Times(AtMost(1));
+ CreateDemuxer(file_path);
pipeline_->Start(
- CreateFilterCollection(file_path, NULL),
+ demuxer_.get(),
+ CreateRenderer(NULL),
base::Bind(&PipelineIntegrationTestBase::OnEnded, base::Unretained(this)),
base::Bind(&PipelineIntegrationTestBase::OnError, base::Unretained(this)),
QuitOnStatusCB(expected_status),
@@ -122,7 +120,9 @@ bool PipelineIntegrationTestBase::Start(const base::FilePath& file_path,
base::Unretained(this)),
base::Bind(&PipelineIntegrationTestBase::OnBufferingStateChanged,
base::Unretained(this)),
- base::Closure());
+ base::Closure(),
+ base::Bind(&PipelineIntegrationTestBase::OnAddTextTrack,
+ base::Unretained(this)));
message_loop_.Run();
return (pipeline_status_ == PIPELINE_OK);
}
@@ -132,10 +132,6 @@ bool PipelineIntegrationTestBase::Start(const base::FilePath& file_path,
kTestType test_type) {
hashing_enabled_ = test_type == kHashed;
clockless_playback_ = test_type == kClockless;
- if (clockless_playback_) {
- pipeline_->SetTimeDeltaInterpolatorForTesting(
- new TimeDeltaInterpolator(&dummy_clock_));
- }
return Start(file_path, expected_status);
}
@@ -150,8 +146,11 @@ bool PipelineIntegrationTestBase::Start(const base::FilePath& file_path,
.WillRepeatedly(SaveArg<0>(&metadata_));
EXPECT_CALL(*this, OnBufferingStateChanged(BUFFERING_HAVE_ENOUGH))
.Times(AtMost(1));
+
+ CreateDemuxer(file_path);
pipeline_->Start(
- CreateFilterCollection(file_path, decryptor),
+ demuxer_.get(),
+ CreateRenderer(decryptor),
base::Bind(&PipelineIntegrationTestBase::OnEnded, base::Unretained(this)),
base::Bind(&PipelineIntegrationTestBase::OnError, base::Unretained(this)),
base::Bind(&PipelineIntegrationTestBase::OnStatusCallback,
@@ -160,7 +159,9 @@ bool PipelineIntegrationTestBase::Start(const base::FilePath& file_path,
base::Unretained(this)),
base::Bind(&PipelineIntegrationTestBase::OnBufferingStateChanged,
base::Unretained(this)),
- base::Closure());
+ base::Closure(),
+ base::Bind(&PipelineIntegrationTestBase::OnAddTextTrack,
+ base::Unretained(this)));
message_loop_.Run();
return (pipeline_status_ == PIPELINE_OK);
}
@@ -219,10 +220,8 @@ bool PipelineIntegrationTestBase::WaitUntilCurrentTimeIsAfter(
return (pipeline_status_ == PIPELINE_OK);
}
-scoped_ptr<FilterCollection>
-PipelineIntegrationTestBase::CreateFilterCollection(
- const base::FilePath& file_path,
- Decryptor* decryptor) {
+void PipelineIntegrationTestBase::CreateDemuxer(
+ const base::FilePath& file_path) {
FileDataSource* file_data_source = new FileDataSource();
CHECK(file_data_source->Initialize(file_path)) << "Is " << file_path.value()
<< " missing?";
@@ -230,23 +229,15 @@ PipelineIntegrationTestBase::CreateFilterCollection(
Demuxer::NeedKeyCB need_key_cb = base::Bind(
&PipelineIntegrationTestBase::DemuxerNeedKeyCB, base::Unretained(this));
- scoped_ptr<Demuxer> demuxer(
- new FFmpegDemuxer(message_loop_.message_loop_proxy(),
- data_source_.get(),
- need_key_cb,
- new MediaLog()));
- return CreateFilterCollection(demuxer.Pass(), decryptor);
+ demuxer_ =
+ scoped_ptr<Demuxer>(new FFmpegDemuxer(message_loop_.message_loop_proxy(),
+ data_source_.get(),
+ need_key_cb,
+ new MediaLog()));
}
-scoped_ptr<FilterCollection>
-PipelineIntegrationTestBase::CreateFilterCollection(
- scoped_ptr<Demuxer> demuxer,
+scoped_ptr<Renderer> PipelineIntegrationTestBase::CreateRenderer(
Decryptor* decryptor) {
- demuxer_ = demuxer.Pass();
-
- scoped_ptr<FilterCollection> collection(new FilterCollection());
- collection->SetDemuxer(demuxer_.get());
-
ScopedVector<VideoDecoder> video_decoders;
#if !defined(MEDIA_DISABLE_LIBVPX)
video_decoders.push_back(
@@ -256,7 +247,7 @@ PipelineIntegrationTestBase::CreateFilterCollection(
new FFmpegVideoDecoder(message_loop_.message_loop_proxy()));
// Disable frame dropping if hashing is enabled.
- scoped_ptr<VideoRenderer> renderer(new VideoRendererImpl(
+ scoped_ptr<VideoRenderer> video_renderer(new VideoRendererImpl(
message_loop_.message_loop_proxy(),
video_decoders.Pass(),
base::Bind(&PipelineIntegrationTestBase::SetDecryptor,
@@ -264,8 +255,8 @@ PipelineIntegrationTestBase::CreateFilterCollection(
decryptor),
base::Bind(&PipelineIntegrationTestBase::OnVideoRendererPaint,
base::Unretained(this)),
- false));
- collection->SetVideoRenderer(renderer.Pass());
+ false,
+ new MediaLog()));
if (!clockless_playback_) {
audio_sink_ = new NullAudioSink(message_loop_.message_loop_proxy());
@@ -286,7 +277,7 @@ PipelineIntegrationTestBase::CreateFilterCollection(
512);
hardware_config_.UpdateOutputConfig(out_params);
- AudioRendererImpl* audio_renderer_impl = new AudioRendererImpl(
+ scoped_ptr<AudioRenderer> audio_renderer(new AudioRendererImpl(
message_loop_.message_loop_proxy(),
(clockless_playback_)
? static_cast<AudioRendererSink*>(clockless_audio_sink_.get())
@@ -295,13 +286,25 @@ PipelineIntegrationTestBase::CreateFilterCollection(
base::Bind(&PipelineIntegrationTestBase::SetDecryptor,
base::Unretained(this),
decryptor),
- &hardware_config_);
+ hardware_config_,
+ new MediaLog()));
if (hashing_enabled_)
audio_sink_->StartAudioHashForTesting();
- scoped_ptr<AudioRenderer> audio_renderer(audio_renderer_impl);
- collection->SetAudioRenderer(audio_renderer.Pass());
- return collection.Pass();
+ scoped_ptr<RendererImpl> renderer_impl(
+ new RendererImpl(message_loop_.message_loop_proxy(),
+ demuxer_.get(),
+ audio_renderer.Pass(),
+ video_renderer.Pass()));
+
+ // Prevent non-deterministic buffering state callbacks from firing (e.g., slow
+ // machine, valgrind).
+ renderer_impl->DisableUnderflowForTesting();
+
+ if (clockless_playback_)
+ renderer_impl->EnableClocklessVideoPlaybackForTesting();
+
+ return renderer_impl.PassAs<Renderer>();
}
void PipelineIntegrationTestBase::SetDecryptor(
diff --git a/media/filters/pipeline_integration_test_base.h b/media/filters/pipeline_integration_test_base.h
index f6b8d244d8..cfa9f30cd8 100644
--- a/media/filters/pipeline_integration_test_base.h
+++ b/media/filters/pipeline_integration_test_base.h
@@ -11,9 +11,10 @@
#include "media/audio/null_audio_sink.h"
#include "media/base/audio_hardware_config.h"
#include "media/base/demuxer.h"
-#include "media/base/filter_collection.h"
#include "media/base/media_keys.h"
#include "media/base/pipeline.h"
+#include "media/base/text_track.h"
+#include "media/base/text_track_config.h"
#include "media/base/video_frame.h"
#include "media/filters/video_renderer_impl.h"
#include "testing/gmock/include/gmock/gmock.h"
@@ -77,8 +78,6 @@ class PipelineIntegrationTestBase {
bool Seek(base::TimeDelta seek_time);
void Stop();
bool WaitUntilCurrentTimeIsAfter(const base::TimeDelta& wait_time);
- scoped_ptr<FilterCollection> CreateFilterCollection(
- const base::FilePath& file_path, Decryptor* decryptor);
// Returns the MD5 hash of all video frames seen. Should only be called once
// after playback completes. First time hashes should be generated with
@@ -126,8 +125,12 @@ class PipelineIntegrationTestBase {
void OnEnded();
void OnError(PipelineStatus status);
void QuitAfterCurrentTimeTask(const base::TimeDelta& quit_time);
- scoped_ptr<FilterCollection> CreateFilterCollection(
- scoped_ptr<Demuxer> demuxer, Decryptor* decryptor);
+
+ // Creates Demuxer and sets |demuxer_|.
+ void CreateDemuxer(const base::FilePath& file_path);
+
+ // Creates and returns a Renderer.
+ scoped_ptr<Renderer> CreateRenderer(Decryptor* decryptor);
void SetDecryptor(Decryptor* decryptor,
const DecryptorReadyCB& decryptor_ready_cb);
@@ -136,6 +139,9 @@ class PipelineIntegrationTestBase {
MOCK_METHOD1(OnMetadata, void(PipelineMetadata));
MOCK_METHOD1(OnBufferingStateChanged, void(BufferingState));
MOCK_METHOD1(DecryptorAttached, void(bool));
+ MOCK_METHOD2(OnAddTextTrack,
+ void(const TextTrackConfig& config,
+ const AddTextTrackDoneCB& done_cb));
};
} // namespace media
diff --git a/media/filters/renderer_impl.cc b/media/filters/renderer_impl.cc
new file mode 100644
index 0000000000..e6253e7512
--- /dev/null
+++ b/media/filters/renderer_impl.cc
@@ -0,0 +1,505 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/filters/renderer_impl.h"
+
+#include "base/bind.h"
+#include "base/callback.h"
+#include "base/callback_helpers.h"
+#include "base/compiler_specific.h"
+#include "base/location.h"
+#include "base/single_thread_task_runner.h"
+#include "media/base/audio_renderer.h"
+#include "media/base/demuxer_stream_provider.h"
+#include "media/base/time_source.h"
+#include "media/base/video_renderer.h"
+#include "media/base/wall_clock_time_source.h"
+
+namespace media {
+
+RendererImpl::RendererImpl(
+ const scoped_refptr<base::SingleThreadTaskRunner>& task_runner,
+ DemuxerStreamProvider* demuxer_stream_provider,
+ scoped_ptr<AudioRenderer> audio_renderer,
+ scoped_ptr<VideoRenderer> video_renderer)
+ : state_(STATE_UNINITIALIZED),
+ task_runner_(task_runner),
+ demuxer_stream_provider_(demuxer_stream_provider),
+ audio_renderer_(audio_renderer.Pass()),
+ video_renderer_(video_renderer.Pass()),
+ time_source_(NULL),
+ time_ticking_(false),
+ audio_buffering_state_(BUFFERING_HAVE_NOTHING),
+ video_buffering_state_(BUFFERING_HAVE_NOTHING),
+ audio_ended_(false),
+ video_ended_(false),
+ underflow_disabled_for_testing_(false),
+ clockless_video_playback_enabled_for_testing_(false),
+ weak_factory_(this),
+ weak_this_(weak_factory_.GetWeakPtr()) {
+ DVLOG(1) << __FUNCTION__;
+}
+
+RendererImpl::~RendererImpl() {
+ DVLOG(1) << __FUNCTION__;
+ DCHECK(task_runner_->BelongsToCurrentThread());
+
+ // Tear down in opposite order of construction as |video_renderer_| can still
+ // need |time_source_| (which can be |audio_renderer_|) to be alive.
+ video_renderer_.reset();
+ audio_renderer_.reset();
+
+ FireAllPendingCallbacks();
+}
+
+void RendererImpl::Initialize(const base::Closure& init_cb,
+ const StatisticsCB& statistics_cb,
+ const base::Closure& ended_cb,
+ const PipelineStatusCB& error_cb,
+ const BufferingStateCB& buffering_state_cb) {
+ DVLOG(1) << __FUNCTION__;
+ DCHECK(task_runner_->BelongsToCurrentThread());
+ DCHECK_EQ(state_, STATE_UNINITIALIZED) << state_;
+ DCHECK(!init_cb.is_null());
+ DCHECK(!statistics_cb.is_null());
+ DCHECK(!ended_cb.is_null());
+ DCHECK(!error_cb.is_null());
+ DCHECK(!buffering_state_cb.is_null());
+ DCHECK(demuxer_stream_provider_->GetStream(DemuxerStream::AUDIO) ||
+ demuxer_stream_provider_->GetStream(DemuxerStream::VIDEO));
+
+ statistics_cb_ = statistics_cb;
+ ended_cb_ = ended_cb;
+ error_cb_ = error_cb;
+ buffering_state_cb_ = buffering_state_cb;
+
+ init_cb_ = init_cb;
+ state_ = STATE_INITIALIZING;
+ InitializeAudioRenderer();
+}
+
+void RendererImpl::Flush(const base::Closure& flush_cb) {
+ DVLOG(1) << __FUNCTION__;
+ DCHECK(task_runner_->BelongsToCurrentThread());
+ DCHECK_EQ(state_, STATE_PLAYING) << state_;
+ DCHECK(flush_cb_.is_null());
+
+ flush_cb_ = flush_cb;
+ state_ = STATE_FLUSHING;
+
+ if (time_ticking_)
+ PausePlayback();
+
+ FlushAudioRenderer();
+}
+
+void RendererImpl::StartPlayingFrom(base::TimeDelta time) {
+ DVLOG(1) << __FUNCTION__;
+ DCHECK(task_runner_->BelongsToCurrentThread());
+ DCHECK_EQ(state_, STATE_PLAYING) << state_;
+
+ time_source_->SetMediaTime(time);
+
+ if (audio_renderer_)
+ audio_renderer_->StartPlaying();
+ if (video_renderer_)
+ video_renderer_->StartPlayingFrom(time);
+}
+
+void RendererImpl::SetPlaybackRate(float playback_rate) {
+ DVLOG(1) << __FUNCTION__ << "(" << playback_rate << ")";
+ DCHECK(task_runner_->BelongsToCurrentThread());
+
+ // Playback rate changes are only carried out while playing.
+ if (state_ != STATE_PLAYING)
+ return;
+
+ time_source_->SetPlaybackRate(playback_rate);
+}
+
+void RendererImpl::SetVolume(float volume) {
+ DVLOG(1) << __FUNCTION__;
+ DCHECK(task_runner_->BelongsToCurrentThread());
+
+ if (audio_renderer_)
+ audio_renderer_->SetVolume(volume);
+}
+
+base::TimeDelta RendererImpl::GetMediaTime() {
+ // No BelongsToCurrentThread() checking because this can be called from other
+ // threads.
+ return time_source_->CurrentMediaTime();
+}
+
+bool RendererImpl::HasAudio() {
+ DCHECK(task_runner_->BelongsToCurrentThread());
+ return audio_renderer_ != NULL;
+}
+
+bool RendererImpl::HasVideo() {
+ DCHECK(task_runner_->BelongsToCurrentThread());
+ return video_renderer_ != NULL;
+}
+
+void RendererImpl::SetCdm(MediaKeys* cdm) {
+ DVLOG(1) << __FUNCTION__;
+ DCHECK(task_runner_->BelongsToCurrentThread());
+ // TODO(xhwang): Explore to possibility to move CDM setting from
+ // WebMediaPlayerImpl to this class. See http://crbug.com/401264
+ NOTREACHED();
+}
+
+void RendererImpl::DisableUnderflowForTesting() {
+ DVLOG(1) << __FUNCTION__;
+ DCHECK(task_runner_->BelongsToCurrentThread());
+ DCHECK_EQ(state_, STATE_UNINITIALIZED);
+
+ underflow_disabled_for_testing_ = true;
+}
+
+void RendererImpl::EnableClocklessVideoPlaybackForTesting() {
+ DVLOG(1) << __FUNCTION__;
+ DCHECK(task_runner_->BelongsToCurrentThread());
+ DCHECK_EQ(state_, STATE_UNINITIALIZED);
+ DCHECK(underflow_disabled_for_testing_)
+ << "Underflow must be disabled for clockless video playback";
+
+ clockless_video_playback_enabled_for_testing_ = true;
+}
+
+base::TimeDelta RendererImpl::GetMediaTimeForSyncingVideo() {
+ // No BelongsToCurrentThread() checking because this can be called from other
+ // threads.
+ //
+ // TODO(scherkus): Currently called from VideoRendererImpl's internal thread,
+ // which should go away at some point http://crbug.com/110814
+ if (clockless_video_playback_enabled_for_testing_)
+ return base::TimeDelta::Max();
+
+ return time_source_->CurrentMediaTimeForSyncingVideo();
+}
+
+void RendererImpl::InitializeAudioRenderer() {
+ DVLOG(1) << __FUNCTION__;
+ DCHECK(task_runner_->BelongsToCurrentThread());
+ DCHECK_EQ(state_, STATE_INITIALIZING) << state_;
+ DCHECK(!init_cb_.is_null());
+
+ PipelineStatusCB done_cb =
+ base::Bind(&RendererImpl::OnAudioRendererInitializeDone, weak_this_);
+
+ if (!demuxer_stream_provider_->GetStream(DemuxerStream::AUDIO)) {
+ audio_renderer_.reset();
+ task_runner_->PostTask(FROM_HERE, base::Bind(done_cb, PIPELINE_OK));
+ return;
+ }
+
+ audio_renderer_->Initialize(
+ demuxer_stream_provider_->GetStream(DemuxerStream::AUDIO),
+ done_cb,
+ base::Bind(&RendererImpl::OnUpdateStatistics, weak_this_),
+ base::Bind(&RendererImpl::OnBufferingStateChanged, weak_this_,
+ &audio_buffering_state_),
+ base::Bind(&RendererImpl::OnAudioRendererEnded, weak_this_),
+ base::Bind(&RendererImpl::OnError, weak_this_));
+}
+
+void RendererImpl::OnAudioRendererInitializeDone(PipelineStatus status) {
+ DVLOG(1) << __FUNCTION__ << ": " << status;
+ DCHECK(task_runner_->BelongsToCurrentThread());
+ DCHECK_EQ(state_, STATE_INITIALIZING) << state_;
+ DCHECK(!init_cb_.is_null());
+
+ if (status != PIPELINE_OK) {
+ audio_renderer_.reset();
+ OnError(status);
+ return;
+ }
+
+ InitializeVideoRenderer();
+}
+
+void RendererImpl::InitializeVideoRenderer() {
+ DVLOG(1) << __FUNCTION__;
+ DCHECK(task_runner_->BelongsToCurrentThread());
+ DCHECK_EQ(state_, STATE_INITIALIZING) << state_;
+ DCHECK(!init_cb_.is_null());
+
+ PipelineStatusCB done_cb =
+ base::Bind(&RendererImpl::OnVideoRendererInitializeDone, weak_this_);
+
+ if (!demuxer_stream_provider_->GetStream(DemuxerStream::VIDEO)) {
+ video_renderer_.reset();
+ task_runner_->PostTask(FROM_HERE, base::Bind(done_cb, PIPELINE_OK));
+ return;
+ }
+
+ video_renderer_->Initialize(
+ demuxer_stream_provider_->GetStream(DemuxerStream::VIDEO),
+ demuxer_stream_provider_->GetLiveness() ==
+ DemuxerStreamProvider::LIVENESS_LIVE,
+ done_cb,
+ base::Bind(&RendererImpl::OnUpdateStatistics, weak_this_),
+ base::Bind(&RendererImpl::OnBufferingStateChanged,
+ weak_this_,
+ &video_buffering_state_),
+ base::Bind(&RendererImpl::OnVideoRendererEnded, weak_this_),
+ base::Bind(&RendererImpl::OnError, weak_this_),
+ base::Bind(&RendererImpl::GetMediaTimeForSyncingVideo,
+ base::Unretained(this)));
+}
+
+void RendererImpl::OnVideoRendererInitializeDone(PipelineStatus status) {
+ DVLOG(1) << __FUNCTION__ << ": " << status;
+ DCHECK(task_runner_->BelongsToCurrentThread());
+ DCHECK_EQ(state_, STATE_INITIALIZING) << state_;
+ DCHECK(!init_cb_.is_null());
+
+ if (status != PIPELINE_OK) {
+ audio_renderer_.reset();
+ video_renderer_.reset();
+ OnError(status);
+ return;
+ }
+
+ if (audio_renderer_) {
+ time_source_ = audio_renderer_->GetTimeSource();
+ } else {
+ wall_clock_time_source_.reset(new WallClockTimeSource());
+ time_source_ = wall_clock_time_source_.get();
+ }
+
+ state_ = STATE_PLAYING;
+ DCHECK(time_source_);
+ DCHECK(audio_renderer_ || video_renderer_);
+ base::ResetAndReturn(&init_cb_).Run();
+}
+
+void RendererImpl::FlushAudioRenderer() {
+ DVLOG(1) << __FUNCTION__;
+ DCHECK(task_runner_->BelongsToCurrentThread());
+ DCHECK_EQ(state_, STATE_FLUSHING) << state_;
+ DCHECK(!flush_cb_.is_null());
+
+ if (!audio_renderer_) {
+ OnAudioRendererFlushDone();
+ return;
+ }
+
+ audio_renderer_->Flush(
+ base::Bind(&RendererImpl::OnAudioRendererFlushDone, weak_this_));
+}
+
+void RendererImpl::OnAudioRendererFlushDone() {
+ DVLOG(1) << __FUNCTION__;
+ DCHECK(task_runner_->BelongsToCurrentThread());
+
+ if (state_ == STATE_ERROR) {
+ DCHECK(flush_cb_.is_null());
+ return;
+ }
+
+ DCHECK_EQ(state_, STATE_FLUSHING) << state_;
+ DCHECK(!flush_cb_.is_null());
+
+ DCHECK_EQ(audio_buffering_state_, BUFFERING_HAVE_NOTHING);
+ audio_ended_ = false;
+ FlushVideoRenderer();
+}
+
+void RendererImpl::FlushVideoRenderer() {
+ DVLOG(1) << __FUNCTION__;
+ DCHECK(task_runner_->BelongsToCurrentThread());
+ DCHECK_EQ(state_, STATE_FLUSHING) << state_;
+ DCHECK(!flush_cb_.is_null());
+
+ if (!video_renderer_) {
+ OnVideoRendererFlushDone();
+ return;
+ }
+
+ video_renderer_->Flush(
+ base::Bind(&RendererImpl::OnVideoRendererFlushDone, weak_this_));
+}
+
+void RendererImpl::OnVideoRendererFlushDone() {
+ DVLOG(1) << __FUNCTION__;
+ DCHECK(task_runner_->BelongsToCurrentThread());
+
+ if (state_ == STATE_ERROR) {
+ DCHECK(flush_cb_.is_null());
+ return;
+ }
+
+ DCHECK_EQ(state_, STATE_FLUSHING) << state_;
+ DCHECK(!flush_cb_.is_null());
+
+ DCHECK_EQ(video_buffering_state_, BUFFERING_HAVE_NOTHING);
+ video_ended_ = false;
+ state_ = STATE_PLAYING;
+ base::ResetAndReturn(&flush_cb_).Run();
+}
+
+void RendererImpl::OnUpdateStatistics(const PipelineStatistics& stats) {
+ DCHECK(task_runner_->BelongsToCurrentThread());
+ statistics_cb_.Run(stats);
+}
+
+void RendererImpl::OnBufferingStateChanged(BufferingState* buffering_state,
+ BufferingState new_buffering_state) {
+ DVLOG(1) << __FUNCTION__ << "(" << *buffering_state << ", "
+ << new_buffering_state << ") "
+ << (buffering_state == &audio_buffering_state_ ? "audio" : "video");
+ DCHECK(task_runner_->BelongsToCurrentThread());
+ bool was_waiting_for_enough_data = WaitingForEnoughData();
+
+ *buffering_state = new_buffering_state;
+
+ // Disable underflow by ignoring updates that renderers have ran out of data.
+ if (state_ == STATE_PLAYING && underflow_disabled_for_testing_ &&
+ time_ticking_) {
+ DVLOG(1) << "Update ignored because underflow is disabled for testing.";
+ return;
+ }
+
+ // Renderer underflowed.
+ if (!was_waiting_for_enough_data && WaitingForEnoughData()) {
+ PausePlayback();
+
+ // TODO(scherkus): Fire BUFFERING_HAVE_NOTHING callback to alert clients of
+ // underflow state http://crbug.com/144683
+ return;
+ }
+
+ // Renderer prerolled.
+ if (was_waiting_for_enough_data && !WaitingForEnoughData()) {
+ StartPlayback();
+ buffering_state_cb_.Run(BUFFERING_HAVE_ENOUGH);
+ return;
+ }
+}
+
+bool RendererImpl::WaitingForEnoughData() const {
+ DCHECK(task_runner_->BelongsToCurrentThread());
+ if (state_ != STATE_PLAYING)
+ return false;
+ if (audio_renderer_ && audio_buffering_state_ != BUFFERING_HAVE_ENOUGH)
+ return true;
+ if (video_renderer_ && video_buffering_state_ != BUFFERING_HAVE_ENOUGH)
+ return true;
+ return false;
+}
+
+void RendererImpl::PausePlayback() {
+ DVLOG(1) << __FUNCTION__;
+ DCHECK(task_runner_->BelongsToCurrentThread());
+ DCHECK(time_ticking_);
+ switch (state_) {
+ case STATE_PLAYING:
+ DCHECK(PlaybackHasEnded() || WaitingForEnoughData())
+ << "Playback should only pause due to ending or underflowing";
+ break;
+
+ case STATE_FLUSHING:
+ // It's OK to pause playback when flushing.
+ break;
+
+ case STATE_UNINITIALIZED:
+ case STATE_INITIALIZING:
+ case STATE_ERROR:
+ NOTREACHED() << "Invalid state: " << state_;
+ break;
+ }
+
+ time_ticking_ = false;
+ time_source_->StopTicking();
+}
+
+void RendererImpl::StartPlayback() {
+ DVLOG(1) << __FUNCTION__;
+ DCHECK(task_runner_->BelongsToCurrentThread());
+ DCHECK_EQ(state_, STATE_PLAYING);
+ DCHECK(!time_ticking_);
+ DCHECK(!WaitingForEnoughData());
+
+ time_ticking_ = true;
+ time_source_->StartTicking();
+}
+
+void RendererImpl::OnAudioRendererEnded() {
+ DVLOG(1) << __FUNCTION__;
+ DCHECK(task_runner_->BelongsToCurrentThread());
+
+ if (state_ != STATE_PLAYING)
+ return;
+
+ DCHECK(!audio_ended_);
+ audio_ended_ = true;
+
+ RunEndedCallbackIfNeeded();
+}
+
+void RendererImpl::OnVideoRendererEnded() {
+ DVLOG(1) << __FUNCTION__;
+ DCHECK(task_runner_->BelongsToCurrentThread());
+
+ if (state_ != STATE_PLAYING)
+ return;
+
+ DCHECK(!video_ended_);
+ video_ended_ = true;
+
+ RunEndedCallbackIfNeeded();
+}
+
+bool RendererImpl::PlaybackHasEnded() const {
+ DVLOG(1) << __FUNCTION__;
+ DCHECK(task_runner_->BelongsToCurrentThread());
+
+ if (audio_renderer_ && !audio_ended_)
+ return false;
+
+ if (video_renderer_ && !video_ended_)
+ return false;
+
+ return true;
+}
+
+void RendererImpl::RunEndedCallbackIfNeeded() {
+ DVLOG(1) << __FUNCTION__;
+ DCHECK(task_runner_->BelongsToCurrentThread());
+
+ if (!PlaybackHasEnded())
+ return;
+
+ if (time_ticking_)
+ PausePlayback();
+
+ ended_cb_.Run();
+}
+
+void RendererImpl::OnError(PipelineStatus error) {
+ DVLOG(1) << __FUNCTION__ << "(" << error << ")";
+ DCHECK(task_runner_->BelongsToCurrentThread());
+ DCHECK_NE(PIPELINE_OK, error) << "PIPELINE_OK isn't an error!";
+
+ state_ = STATE_ERROR;
+
+ // Pipeline will destroy |this| as the result of error.
+ base::ResetAndReturn(&error_cb_).Run(error);
+
+ FireAllPendingCallbacks();
+}
+
+void RendererImpl::FireAllPendingCallbacks() {
+ DCHECK(task_runner_->BelongsToCurrentThread());
+
+ if (!init_cb_.is_null())
+ base::ResetAndReturn(&init_cb_).Run();
+
+ if (!flush_cb_.is_null())
+ base::ResetAndReturn(&flush_cb_).Run();
+}
+
+} // namespace media
diff --git a/media/filters/renderer_impl.h b/media/filters/renderer_impl.h
new file mode 100644
index 0000000000..c418233213
--- /dev/null
+++ b/media/filters/renderer_impl.h
@@ -0,0 +1,163 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_FILTERS_RENDERER_IMPL_H_
+#define MEDIA_FILTERS_RENDERER_IMPL_H_
+
+#include "base/memory/ref_counted.h"
+#include "base/memory/scoped_ptr.h"
+#include "base/memory/weak_ptr.h"
+#include "base/synchronization/lock.h"
+#include "base/time/clock.h"
+#include "base/time/default_tick_clock.h"
+#include "base/time/time.h"
+#include "media/base/buffering_state.h"
+#include "media/base/media_export.h"
+#include "media/base/pipeline_status.h"
+#include "media/base/renderer.h"
+
+namespace base {
+class SingleThreadTaskRunner;
+}
+
+namespace media {
+
+class AudioRenderer;
+class DemuxerStreamProvider;
+class TimeSource;
+class VideoRenderer;
+class WallClockTimeSource;
+
+class MEDIA_EXPORT RendererImpl : public Renderer {
+ public:
+ // Renders audio/video streams in |demuxer_stream_provider| using
+ // |audio_renderer| and |video_renderer| provided. All methods except for
+ // GetMediaTime() run on the |task_runner|. GetMediaTime() runs on the render
+ // main thread because it's part of JS sync API.
+ RendererImpl(const scoped_refptr<base::SingleThreadTaskRunner>& task_runner,
+ DemuxerStreamProvider* demuxer_stream_provider,
+ scoped_ptr<AudioRenderer> audio_renderer,
+ scoped_ptr<VideoRenderer> video_renderer);
+
+ virtual ~RendererImpl();
+
+ // Renderer implementation.
+ virtual void Initialize(const base::Closure& init_cb,
+ const StatisticsCB& statistics_cb,
+ const base::Closure& ended_cb,
+ const PipelineStatusCB& error_cb,
+ const BufferingStateCB& buffering_state_cb) OVERRIDE;
+ virtual void Flush(const base::Closure& flush_cb) OVERRIDE;
+ virtual void StartPlayingFrom(base::TimeDelta time) OVERRIDE;
+ virtual void SetPlaybackRate(float playback_rate) OVERRIDE;
+ virtual void SetVolume(float volume) OVERRIDE;
+ virtual base::TimeDelta GetMediaTime() OVERRIDE;
+ virtual bool HasAudio() OVERRIDE;
+ virtual bool HasVideo() OVERRIDE;
+ virtual void SetCdm(MediaKeys* cdm) OVERRIDE;
+
+ // Helper functions for testing purposes. Must be called before Initialize().
+ void DisableUnderflowForTesting();
+ void EnableClocklessVideoPlaybackForTesting();
+
+ private:
+ enum State {
+ STATE_UNINITIALIZED,
+ STATE_INITIALIZING,
+ STATE_FLUSHING,
+ STATE_PLAYING,
+ STATE_ERROR
+ };
+
+ base::TimeDelta GetMediaTimeForSyncingVideo();
+
+ // Helper functions and callbacks for Initialize().
+ void InitializeAudioRenderer();
+ void OnAudioRendererInitializeDone(PipelineStatus status);
+ void InitializeVideoRenderer();
+ void OnVideoRendererInitializeDone(PipelineStatus status);
+
+ // Helper functions and callbacks for Flush().
+ void FlushAudioRenderer();
+ void OnAudioRendererFlushDone();
+ void FlushVideoRenderer();
+ void OnVideoRendererFlushDone();
+
+ // Callback executed by filters to update statistics.
+ void OnUpdateStatistics(const PipelineStatistics& stats);
+
+ // Collection of callback methods and helpers for tracking changes in
+ // buffering state and transition from paused/underflow states and playing
+ // states.
+ //
+ // While in the kPlaying state:
+ // - A waiting to non-waiting transition indicates preroll has completed
+ // and StartPlayback() should be called
+ // - A non-waiting to waiting transition indicates underflow has occurred
+ // and PausePlayback() should be called
+ void OnBufferingStateChanged(BufferingState* buffering_state,
+ BufferingState new_buffering_state);
+ bool WaitingForEnoughData() const;
+ void PausePlayback();
+ void StartPlayback();
+
+ // Callbacks executed when a renderer has ended.
+ void OnAudioRendererEnded();
+ void OnVideoRendererEnded();
+ bool PlaybackHasEnded() const;
+ void RunEndedCallbackIfNeeded();
+
+ // Callback executed when a runtime error happens.
+ void OnError(PipelineStatus error);
+
+ void FireAllPendingCallbacks();
+
+ State state_;
+
+ // Task runner used to execute pipeline tasks.
+ scoped_refptr<base::SingleThreadTaskRunner> task_runner_;
+
+ DemuxerStreamProvider* demuxer_stream_provider_;
+
+ // Permanent callbacks to notify various renderer states/stats.
+ StatisticsCB statistics_cb_;
+ base::Closure ended_cb_;
+ PipelineStatusCB error_cb_;
+ BufferingStateCB buffering_state_cb_;
+
+ // Temporary callback used for Initialize() and Flush().
+ base::Closure init_cb_;
+ base::Closure flush_cb_;
+
+ scoped_ptr<AudioRenderer> audio_renderer_;
+ scoped_ptr<VideoRenderer> video_renderer_;
+
+ // Renderer-provided time source used to control playback.
+ TimeSource* time_source_;
+ scoped_ptr<WallClockTimeSource> wall_clock_time_source_;
+ bool time_ticking_;
+
+ // The time to start playback from after starting/seeking has completed.
+ base::TimeDelta start_time_;
+
+ BufferingState audio_buffering_state_;
+ BufferingState video_buffering_state_;
+
+ // Whether we've received the audio/video ended events.
+ bool audio_ended_;
+ bool video_ended_;
+
+ bool underflow_disabled_for_testing_;
+ bool clockless_video_playback_enabled_for_testing_;
+
+ // NOTE: Weak pointers must be invalidated before all other member variables.
+ base::WeakPtrFactory<RendererImpl> weak_factory_;
+ base::WeakPtr<RendererImpl> weak_this_;
+
+ DISALLOW_COPY_AND_ASSIGN(RendererImpl);
+};
+
+} // namespace media
+
+#endif // MEDIA_FILTERS_RENDERER_IMPL_H_
diff --git a/media/filters/renderer_impl_unittest.cc b/media/filters/renderer_impl_unittest.cc
new file mode 100644
index 0000000000..7f74d15496
--- /dev/null
+++ b/media/filters/renderer_impl_unittest.cc
@@ -0,0 +1,434 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <vector>
+
+#include "base/bind.h"
+#include "base/message_loop/message_loop.h"
+#include "base/run_loop.h"
+#include "base/test/simple_test_tick_clock.h"
+#include "media/base/gmock_callback_support.h"
+#include "media/base/mock_filters.h"
+#include "media/base/test_helpers.h"
+#include "media/filters/renderer_impl.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+using ::testing::_;
+using ::testing::DoAll;
+using ::testing::InSequence;
+using ::testing::Mock;
+using ::testing::Return;
+using ::testing::SaveArg;
+using ::testing::StrictMock;
+
+namespace media {
+
+const int64 kStartPlayingTimeInMs = 100;
+
+ACTION_P2(SetBufferingState, cb, buffering_state) {
+ cb->Run(buffering_state);
+}
+
+ACTION_P2(AudioError, cb, error) {
+ cb->Run(error);
+}
+
+class RendererImplTest : public ::testing::Test {
+ public:
+ // Used for setting expectations on pipeline callbacks. Using a StrictMock
+ // also lets us test for missing callbacks.
+ class CallbackHelper {
+ public:
+ CallbackHelper() {}
+ virtual ~CallbackHelper() {}
+
+ MOCK_METHOD0(OnInitialize, void());
+ MOCK_METHOD0(OnFlushed, void());
+ MOCK_METHOD0(OnEnded, void());
+ MOCK_METHOD1(OnError, void(PipelineStatus));
+ MOCK_METHOD1(OnUpdateStatistics, void(const PipelineStatistics&));
+ MOCK_METHOD1(OnBufferingStateChange, void(BufferingState));
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(CallbackHelper);
+ };
+
+ RendererImplTest()
+ : demuxer_(new StrictMock<MockDemuxer>()),
+ video_renderer_(new StrictMock<MockVideoRenderer>()),
+ audio_renderer_(new StrictMock<MockAudioRenderer>()),
+ renderer_impl_(
+ new RendererImpl(message_loop_.message_loop_proxy(),
+ demuxer_.get(),
+ scoped_ptr<AudioRenderer>(audio_renderer_),
+ scoped_ptr<VideoRenderer>(video_renderer_))) {
+ // SetDemuxerExpectations() adds overriding expectations for expected
+ // non-NULL streams.
+ DemuxerStream* null_pointer = NULL;
+ EXPECT_CALL(*demuxer_, GetStream(_))
+ .WillRepeatedly(Return(null_pointer));
+ EXPECT_CALL(*demuxer_, GetLiveness())
+ .WillRepeatedly(Return(Demuxer::LIVENESS_UNKNOWN));
+ }
+
+ virtual ~RendererImplTest() {
+ renderer_impl_.reset();
+ base::RunLoop().RunUntilIdle();
+ }
+
+ protected:
+ typedef std::vector<MockDemuxerStream*> MockDemuxerStreamVector;
+
+ scoped_ptr<StrictMock<MockDemuxerStream> > CreateStream(
+ DemuxerStream::Type type) {
+ scoped_ptr<StrictMock<MockDemuxerStream> > stream(
+ new StrictMock<MockDemuxerStream>(type));
+ return stream.Pass();
+ }
+
+ // Sets up expectations to allow the audio renderer to initialize.
+ void SetAudioRendererInitializeExpectations(PipelineStatus status) {
+ EXPECT_CALL(*audio_renderer_,
+ Initialize(audio_stream_.get(), _, _, _, _, _))
+ .WillOnce(DoAll(SaveArg<3>(&audio_buffering_state_cb_),
+ SaveArg<4>(&audio_ended_cb_),
+ SaveArg<5>(&audio_error_cb_),
+ RunCallback<1>(status)));
+ }
+
+ // Sets up expectations to allow the video renderer to initialize.
+ void SetVideoRendererInitializeExpectations(PipelineStatus status) {
+ EXPECT_CALL(*video_renderer_,
+ Initialize(video_stream_.get(), _, _, _, _, _, _, _))
+ .WillOnce(DoAll(SaveArg<4>(&video_buffering_state_cb_),
+ SaveArg<5>(&video_ended_cb_),
+ RunCallback<2>(status)));
+ }
+
+ void InitializeAndExpect(PipelineStatus start_status) {
+ if (start_status != PIPELINE_OK)
+ EXPECT_CALL(callbacks_, OnError(start_status));
+
+ EXPECT_CALL(callbacks_, OnInitialize());
+
+ if (start_status == PIPELINE_OK && audio_stream_) {
+ EXPECT_CALL(*audio_renderer_, GetTimeSource())
+ .WillOnce(Return(&time_source_));
+ }
+
+ renderer_impl_->Initialize(
+ base::Bind(&CallbackHelper::OnInitialize,
+ base::Unretained(&callbacks_)),
+ base::Bind(&CallbackHelper::OnUpdateStatistics,
+ base::Unretained(&callbacks_)),
+ base::Bind(&CallbackHelper::OnEnded, base::Unretained(&callbacks_)),
+ base::Bind(&CallbackHelper::OnError, base::Unretained(&callbacks_)),
+ base::Bind(&CallbackHelper::OnBufferingStateChange,
+ base::Unretained(&callbacks_)));
+ base::RunLoop().RunUntilIdle();
+ }
+
+ void CreateAudioStream() {
+ audio_stream_ = CreateStream(DemuxerStream::AUDIO);
+ streams_.push_back(audio_stream_.get());
+ EXPECT_CALL(*demuxer_, GetStream(DemuxerStream::AUDIO))
+ .WillRepeatedly(Return(audio_stream_.get()));
+ }
+
+ void CreateVideoStream() {
+ video_stream_ = CreateStream(DemuxerStream::VIDEO);
+ video_stream_->set_video_decoder_config(video_decoder_config_);
+ streams_.push_back(video_stream_.get());
+ EXPECT_CALL(*demuxer_, GetStream(DemuxerStream::VIDEO))
+ .WillRepeatedly(Return(video_stream_.get()));
+ }
+
+ void CreateAudioAndVideoStream() {
+ CreateAudioStream();
+ CreateVideoStream();
+ }
+
+ void InitializeWithAudio() {
+ CreateAudioStream();
+ SetAudioRendererInitializeExpectations(PIPELINE_OK);
+ InitializeAndExpect(PIPELINE_OK);
+ }
+
+ void InitializeWithVideo() {
+ CreateVideoStream();
+ SetVideoRendererInitializeExpectations(PIPELINE_OK);
+ InitializeAndExpect(PIPELINE_OK);
+ }
+
+ void InitializeWithAudioAndVideo() {
+ CreateAudioAndVideoStream();
+ SetAudioRendererInitializeExpectations(PIPELINE_OK);
+ SetVideoRendererInitializeExpectations(PIPELINE_OK);
+ InitializeAndExpect(PIPELINE_OK);
+ }
+
+ void Play() {
+ DCHECK(audio_stream_ || video_stream_);
+ EXPECT_CALL(callbacks_, OnBufferingStateChange(BUFFERING_HAVE_ENOUGH));
+
+ base::TimeDelta start_time(
+ base::TimeDelta::FromMilliseconds(kStartPlayingTimeInMs));
+
+ if (audio_stream_) {
+ EXPECT_CALL(time_source_, SetMediaTime(start_time));
+ EXPECT_CALL(time_source_, StartTicking());
+ EXPECT_CALL(*audio_renderer_, StartPlaying())
+ .WillOnce(SetBufferingState(&audio_buffering_state_cb_,
+ BUFFERING_HAVE_ENOUGH));
+ }
+
+ if (video_stream_) {
+ EXPECT_CALL(*video_renderer_, StartPlayingFrom(start_time))
+ .WillOnce(SetBufferingState(&video_buffering_state_cb_,
+ BUFFERING_HAVE_ENOUGH));
+ }
+
+ renderer_impl_->StartPlayingFrom(start_time);
+ base::RunLoop().RunUntilIdle();
+ }
+
+ void Flush(bool underflowed) {
+ if (audio_stream_) {
+ if (!underflowed)
+ EXPECT_CALL(time_source_, StopTicking());
+ EXPECT_CALL(*audio_renderer_, Flush(_))
+ .WillOnce(DoAll(SetBufferingState(&audio_buffering_state_cb_,
+ BUFFERING_HAVE_NOTHING),
+ RunClosure<0>()));
+ }
+
+ if (video_stream_) {
+ EXPECT_CALL(*video_renderer_, Flush(_))
+ .WillOnce(DoAll(SetBufferingState(&video_buffering_state_cb_,
+ BUFFERING_HAVE_NOTHING),
+ RunClosure<0>()));
+ }
+
+ EXPECT_CALL(callbacks_, OnFlushed());
+
+ renderer_impl_->Flush(
+ base::Bind(&CallbackHelper::OnFlushed, base::Unretained(&callbacks_)));
+ base::RunLoop().RunUntilIdle();
+ }
+
+ void SetPlaybackRate(float playback_rate) {
+ EXPECT_CALL(time_source_, SetPlaybackRate(playback_rate));
+ renderer_impl_->SetPlaybackRate(playback_rate);
+ base::RunLoop().RunUntilIdle();
+ }
+
+ int64 GetMediaTimeMs() {
+ return renderer_impl_->GetMediaTime().InMilliseconds();
+ }
+
+ bool IsMediaTimeAdvancing(float playback_rate) {
+ int64 start_time_ms = GetMediaTimeMs();
+ const int64 time_to_advance_ms = 100;
+
+ test_tick_clock_.Advance(
+ base::TimeDelta::FromMilliseconds(time_to_advance_ms));
+
+ if (GetMediaTimeMs() == start_time_ms + time_to_advance_ms * playback_rate)
+ return true;
+
+ DCHECK_EQ(start_time_ms, GetMediaTimeMs());
+ return false;
+ }
+
+ bool IsMediaTimeAdvancing() {
+ return IsMediaTimeAdvancing(1.0f);
+ }
+
+ // Fixture members.
+ base::MessageLoop message_loop_;
+ StrictMock<CallbackHelper> callbacks_;
+ base::SimpleTestTickClock test_tick_clock_;
+
+ scoped_ptr<StrictMock<MockDemuxer> > demuxer_;
+ StrictMock<MockVideoRenderer>* video_renderer_;
+ StrictMock<MockAudioRenderer>* audio_renderer_;
+ scoped_ptr<RendererImpl> renderer_impl_;
+
+ StrictMock<MockTimeSource> time_source_;
+ scoped_ptr<StrictMock<MockDemuxerStream> > audio_stream_;
+ scoped_ptr<StrictMock<MockDemuxerStream> > video_stream_;
+ MockDemuxerStreamVector streams_;
+ BufferingStateCB audio_buffering_state_cb_;
+ BufferingStateCB video_buffering_state_cb_;
+ base::Closure audio_ended_cb_;
+ base::Closure video_ended_cb_;
+ PipelineStatusCB audio_error_cb_;
+ VideoDecoderConfig video_decoder_config_;
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(RendererImplTest);
+};
+
+TEST_F(RendererImplTest, DestroyBeforeInitialize) {
+ // |renderer_impl_| will be destroyed in the dtor.
+}
+
+TEST_F(RendererImplTest, InitializeWithAudio) {
+ InitializeWithAudio();
+}
+
+TEST_F(RendererImplTest, InitializeWithVideo) {
+ InitializeWithVideo();
+}
+
+TEST_F(RendererImplTest, InitializeWithAudioVideo) {
+ InitializeWithAudioAndVideo();
+}
+
+TEST_F(RendererImplTest, InitializeWithAudio_Failed) {
+ CreateAudioStream();
+ SetAudioRendererInitializeExpectations(PIPELINE_ERROR_INITIALIZATION_FAILED);
+ InitializeAndExpect(PIPELINE_ERROR_INITIALIZATION_FAILED);
+}
+
+TEST_F(RendererImplTest, InitializeWithVideo_Failed) {
+ CreateVideoStream();
+ SetVideoRendererInitializeExpectations(PIPELINE_ERROR_INITIALIZATION_FAILED);
+ InitializeAndExpect(PIPELINE_ERROR_INITIALIZATION_FAILED);
+}
+
+TEST_F(RendererImplTest, InitializeWithAudioVideo_AudioRendererFailed) {
+ CreateAudioAndVideoStream();
+ SetAudioRendererInitializeExpectations(PIPELINE_ERROR_INITIALIZATION_FAILED);
+ // VideoRenderer::Initialize() should not be called.
+ InitializeAndExpect(PIPELINE_ERROR_INITIALIZATION_FAILED);
+}
+
+TEST_F(RendererImplTest, InitializeWithAudioVideo_VideoRendererFailed) {
+ CreateAudioAndVideoStream();
+ SetAudioRendererInitializeExpectations(PIPELINE_OK);
+ SetVideoRendererInitializeExpectations(PIPELINE_ERROR_INITIALIZATION_FAILED);
+ InitializeAndExpect(PIPELINE_ERROR_INITIALIZATION_FAILED);
+}
+
+TEST_F(RendererImplTest, StartPlayingFrom) {
+ InitializeWithAudioAndVideo();
+ Play();
+}
+
+TEST_F(RendererImplTest, FlushAfterInitialization) {
+ InitializeWithAudioAndVideo();
+ Flush(true);
+}
+
+TEST_F(RendererImplTest, FlushAfterPlay) {
+ InitializeWithAudioAndVideo();
+ Play();
+ Flush(false);
+}
+
+TEST_F(RendererImplTest, FlushAfterUnderflow) {
+ InitializeWithAudioAndVideo();
+ Play();
+
+ // Simulate underflow.
+ EXPECT_CALL(time_source_, StopTicking());
+ audio_buffering_state_cb_.Run(BUFFERING_HAVE_NOTHING);
+
+ // Flush while underflowed. We shouldn't call StopTicking() again.
+ Flush(true);
+}
+
+TEST_F(RendererImplTest, SetPlaybackRate) {
+ InitializeWithAudioAndVideo();
+ SetPlaybackRate(1.0f);
+ SetPlaybackRate(2.0f);
+}
+
+TEST_F(RendererImplTest, SetVolume) {
+ InitializeWithAudioAndVideo();
+ EXPECT_CALL(*audio_renderer_, SetVolume(2.0f));
+ renderer_impl_->SetVolume(2.0f);
+}
+
+TEST_F(RendererImplTest, AudioStreamEnded) {
+ InitializeWithAudio();
+ Play();
+
+ EXPECT_CALL(time_source_, StopTicking());
+ EXPECT_CALL(callbacks_, OnEnded());
+
+ audio_ended_cb_.Run();
+ base::RunLoop().RunUntilIdle();
+}
+
+TEST_F(RendererImplTest, VideoStreamEnded) {
+ InitializeWithVideo();
+ Play();
+
+ // Video ended won't affect |time_source_|.
+ EXPECT_CALL(callbacks_, OnEnded());
+
+ video_ended_cb_.Run();
+ base::RunLoop().RunUntilIdle();
+}
+
+TEST_F(RendererImplTest, AudioVideoStreamsEnded) {
+ InitializeWithAudioAndVideo();
+ Play();
+
+ // OnEnded() is called only when all streams have finished.
+ audio_ended_cb_.Run();
+ base::RunLoop().RunUntilIdle();
+
+ EXPECT_CALL(time_source_, StopTicking());
+ EXPECT_CALL(callbacks_, OnEnded());
+
+ video_ended_cb_.Run();
+ base::RunLoop().RunUntilIdle();
+}
+
+TEST_F(RendererImplTest, ErrorAfterInitialize) {
+ InitializeWithAudio();
+ EXPECT_CALL(callbacks_, OnError(PIPELINE_ERROR_DECODE));
+ audio_error_cb_.Run(PIPELINE_ERROR_DECODE);
+ base::RunLoop().RunUntilIdle();
+}
+
+TEST_F(RendererImplTest, ErrorDuringPlaying) {
+ InitializeWithAudio();
+ Play();
+
+ EXPECT_CALL(callbacks_, OnError(PIPELINE_ERROR_DECODE));
+ audio_error_cb_.Run(PIPELINE_ERROR_DECODE);
+ base::RunLoop().RunUntilIdle();
+}
+
+TEST_F(RendererImplTest, ErrorDuringFlush) {
+ InitializeWithAudio();
+ Play();
+
+ InSequence s;
+ EXPECT_CALL(time_source_, StopTicking());
+ EXPECT_CALL(*audio_renderer_, Flush(_)).WillOnce(DoAll(
+ AudioError(&audio_error_cb_, PIPELINE_ERROR_DECODE),
+ RunClosure<0>()));
+ EXPECT_CALL(callbacks_, OnError(PIPELINE_ERROR_DECODE));
+ EXPECT_CALL(callbacks_, OnFlushed());
+ renderer_impl_->Flush(
+ base::Bind(&CallbackHelper::OnFlushed, base::Unretained(&callbacks_)));
+ base::RunLoop().RunUntilIdle();
+}
+
+TEST_F(RendererImplTest, ErrorAfterFlush) {
+ InitializeWithAudio();
+ Play();
+ Flush(false);
+
+ EXPECT_CALL(callbacks_, OnError(PIPELINE_ERROR_DECODE));
+ audio_error_cb_.Run(PIPELINE_ERROR_DECODE);
+ base::RunLoop().RunUntilIdle();
+}
+
+} // namespace media
diff --git a/media/filters/skcanvas_video_renderer.cc b/media/filters/skcanvas_video_renderer.cc
index 003c1951a9..9bdd393e56 100644
--- a/media/filters/skcanvas_video_renderer.cc
+++ b/media/filters/skcanvas_video_renderer.cc
@@ -9,6 +9,7 @@
#include "media/base/yuv_convert.h"
#include "third_party/libyuv/include/libyuv.h"
#include "third_party/skia/include/core/SkCanvas.h"
+#include "third_party/skia/include/core/SkImageGenerator.h"
#include "ui/gfx/skbitmap_operations.h"
// Skia internal format depends on a platform. On Android it is ABGR, on others
@@ -28,24 +29,56 @@
namespace media {
static bool IsYUV(media::VideoFrame::Format format) {
- return format == media::VideoFrame::YV12 ||
- format == media::VideoFrame::YV16 ||
- format == media::VideoFrame::I420 ||
- format == media::VideoFrame::YV12A ||
- format == media::VideoFrame::YV12J ||
- format == media::VideoFrame::YV24;
+ switch (format) {
+ case VideoFrame::YV12:
+ case VideoFrame::YV16:
+ case VideoFrame::I420:
+ case VideoFrame::YV12A:
+ case VideoFrame::YV12J:
+ case VideoFrame::YV24:
+ case VideoFrame::NV12:
+ return true;
+ case VideoFrame::UNKNOWN:
+ case VideoFrame::NATIVE_TEXTURE:
+#if defined(VIDEO_HOLE)
+ case VideoFrame::HOLE:
+#endif // defined(VIDEO_HOLE)
+ return false;
+ }
+ NOTREACHED() << "Invalid videoframe format provided: " << format;
+ return false;
+}
+
+static bool IsJPEGColorSpace(media::VideoFrame::Format format) {
+ switch (format) {
+ case VideoFrame::YV12J:
+ return true;
+ case VideoFrame::YV12:
+ case VideoFrame::YV16:
+ case VideoFrame::I420:
+ case VideoFrame::YV12A:
+ case VideoFrame::YV24:
+ case VideoFrame::NV12:
+ case VideoFrame::UNKNOWN:
+ case VideoFrame::NATIVE_TEXTURE:
+#if defined(VIDEO_HOLE)
+ case VideoFrame::HOLE:
+#endif // defined(VIDEO_HOLE)
+ return false;
+ }
+ NOTREACHED() << "Invalid videoframe format provided: " << format;
+ return false;
}
static bool IsYUVOrNative(media::VideoFrame::Format format) {
return IsYUV(format) || format == media::VideoFrame::NATIVE_TEXTURE;
}
-// Converts a VideoFrame containing YUV data to a SkBitmap containing RGB data.
-//
-// |bitmap| will be (re)allocated to match the dimensions of |video_frame|.
-static void ConvertVideoFrameToBitmap(
+// Converts a |video_frame| to raw |rgb_pixels|.
+static void ConvertVideoFrameToRGBPixels(
const scoped_refptr<media::VideoFrame>& video_frame,
- SkBitmap* bitmap) {
+ void* rgb_pixels,
+ size_t row_bytes) {
DCHECK(IsYUVOrNative(video_frame->format()))
<< video_frame->format();
if (IsYUV(video_frame->format())) {
@@ -53,17 +86,6 @@ static void ConvertVideoFrameToBitmap(
video_frame->stride(media::VideoFrame::kVPlane));
}
- // Check if |bitmap| needs to be (re)allocated.
- if (bitmap->isNull() ||
- bitmap->width() != video_frame->visible_rect().width() ||
- bitmap->height() != video_frame->visible_rect().height()) {
- bitmap->allocN32Pixels(video_frame->visible_rect().width(),
- video_frame->visible_rect().height());
- bitmap->setIsVolatile(true);
- }
-
- bitmap->lockPixels();
-
size_t y_offset = 0;
size_t uv_offset = 0;
if (IsYUV(video_frame->format())) {
@@ -90,8 +112,8 @@ static void ConvertVideoFrameToBitmap(
video_frame->stride(media::VideoFrame::kUPlane),
video_frame->data(media::VideoFrame::kVPlane) + uv_offset,
video_frame->stride(media::VideoFrame::kVPlane),
- static_cast<uint8*>(bitmap->getPixels()),
- bitmap->rowBytes(),
+ static_cast<uint8*>(rgb_pixels),
+ row_bytes,
video_frame->visible_rect().width(),
video_frame->visible_rect().height());
break;
@@ -101,12 +123,12 @@ static void ConvertVideoFrameToBitmap(
video_frame->data(media::VideoFrame::kYPlane) + y_offset,
video_frame->data(media::VideoFrame::kUPlane) + uv_offset,
video_frame->data(media::VideoFrame::kVPlane) + uv_offset,
- static_cast<uint8*>(bitmap->getPixels()),
+ static_cast<uint8*>(rgb_pixels),
video_frame->visible_rect().width(),
video_frame->visible_rect().height(),
video_frame->stride(media::VideoFrame::kYPlane),
video_frame->stride(media::VideoFrame::kUPlane),
- bitmap->rowBytes(),
+ row_bytes,
media::YV12J);
break;
@@ -118,8 +140,8 @@ static void ConvertVideoFrameToBitmap(
video_frame->stride(media::VideoFrame::kUPlane),
video_frame->data(media::VideoFrame::kVPlane) + uv_offset,
video_frame->stride(media::VideoFrame::kVPlane),
- static_cast<uint8*>(bitmap->getPixels()),
- bitmap->rowBytes(),
+ static_cast<uint8*>(rgb_pixels),
+ row_bytes,
video_frame->visible_rect().width(),
video_frame->visible_rect().height());
break;
@@ -133,13 +155,13 @@ static void ConvertVideoFrameToBitmap(
video_frame->data(media::VideoFrame::kUPlane) + uv_offset,
video_frame->data(media::VideoFrame::kVPlane) + uv_offset,
video_frame->data(media::VideoFrame::kAPlane),
- static_cast<uint8*>(bitmap->getPixels()),
+ static_cast<uint8*>(rgb_pixels),
video_frame->visible_rect().width(),
video_frame->visible_rect().height(),
video_frame->stride(media::VideoFrame::kYPlane),
video_frame->stride(media::VideoFrame::kUPlane),
video_frame->stride(media::VideoFrame::kAPlane),
- bitmap->rowBytes(),
+ row_bytes,
media::YV12);
break;
@@ -151,42 +173,128 @@ static void ConvertVideoFrameToBitmap(
video_frame->stride(media::VideoFrame::kUPlane),
video_frame->data(media::VideoFrame::kVPlane) + uv_offset,
video_frame->stride(media::VideoFrame::kVPlane),
- static_cast<uint8*>(bitmap->getPixels()),
- bitmap->rowBytes(),
+ static_cast<uint8*>(rgb_pixels),
+ row_bytes,
video_frame->visible_rect().width(),
video_frame->visible_rect().height());
#if SK_R32_SHIFT == 0 && SK_G32_SHIFT == 8 && SK_B32_SHIFT == 16 && \
SK_A32_SHIFT == 24
- libyuv::ARGBToABGR(
- static_cast<uint8*>(bitmap->getPixels()),
- bitmap->rowBytes(),
- static_cast<uint8*>(bitmap->getPixels()),
- bitmap->rowBytes(),
- video_frame->visible_rect().width(),
- video_frame->visible_rect().height());
+ libyuv::ARGBToABGR(static_cast<uint8*>(rgb_pixels),
+ row_bytes,
+ static_cast<uint8*>(rgb_pixels),
+ row_bytes,
+ video_frame->visible_rect().width(),
+ video_frame->visible_rect().height());
#endif
break;
- case media::VideoFrame::NATIVE_TEXTURE:
+ case media::VideoFrame::NATIVE_TEXTURE: {
DCHECK_EQ(video_frame->format(), media::VideoFrame::NATIVE_TEXTURE);
- video_frame->ReadPixelsFromNativeTexture(*bitmap);
+ SkBitmap tmp;
+ tmp.installPixels(
+ SkImageInfo::MakeN32Premul(video_frame->visible_rect().width(),
+ video_frame->visible_rect().height()),
+ rgb_pixels,
+ row_bytes);
+ video_frame->ReadPixelsFromNativeTexture(tmp);
break;
-
+ }
default:
NOTREACHED();
break;
}
- bitmap->notifyPixelsChanged();
- bitmap->unlockPixels();
}
+// Generates an RGB image from a VideoFrame.
+class VideoImageGenerator : public SkImageGenerator {
+ public:
+ VideoImageGenerator(const scoped_refptr<VideoFrame>& frame) : frame_(frame) {}
+ virtual ~VideoImageGenerator() {}
+
+ void set_frame(const scoped_refptr<VideoFrame>& frame) { frame_ = frame; }
+
+ protected:
+ virtual bool onGetInfo(SkImageInfo* info) OVERRIDE {
+ info->fWidth = frame_->visible_rect().width();
+ info->fHeight = frame_->visible_rect().height();
+ info->fColorType = kN32_SkColorType;
+ info->fAlphaType = kPremul_SkAlphaType;
+ return true;
+ }
+
+ virtual bool onGetPixels(const SkImageInfo& info,
+ void* pixels,
+ size_t row_bytes,
+ SkPMColor ctable[],
+ int* ctable_count) OVERRIDE {
+ if (!frame_.get())
+ return false;
+ if (!pixels)
+ return true;
+ // If skia couldn't do the YUV conversion, we will.
+ ConvertVideoFrameToRGBPixels(frame_, pixels, row_bytes);
+ frame_ = NULL;
+ return true;
+ }
+
+ virtual bool onGetYUV8Planes(SkISize sizes[3],
+ void* planes[3],
+ size_t row_bytes[3],
+ SkYUVColorSpace* color_space) OVERRIDE {
+ if (!frame_.get() || !IsYUV(frame_->format()))
+ return false;
+
+ if (color_space) {
+ if (IsJPEGColorSpace(frame_->format()))
+ *color_space = kJPEG_SkYUVColorSpace;
+ else
+ *color_space = kRec601_SkYUVColorSpace;
+ }
+
+ for (int plane = VideoFrame::kYPlane; plane <= VideoFrame::kVPlane;
+ ++plane) {
+ if (sizes) {
+ gfx::Size size;
+ size =
+ VideoFrame::PlaneSize(frame_->format(),
+ plane,
+ gfx::Size(frame_->visible_rect().width(),
+ frame_->visible_rect().height()));
+ sizes[plane].set(size.width(), size.height());
+ }
+ if (row_bytes && planes) {
+ size_t offset;
+ int y_shift = (frame_->format() == media::VideoFrame::YV16) ? 0 : 1;
+ if (plane == media::VideoFrame::kYPlane) {
+ offset = (frame_->stride(media::VideoFrame::kYPlane) *
+ frame_->visible_rect().y()) +
+ frame_->visible_rect().x();
+ } else {
+ offset = (frame_->stride(media::VideoFrame::kUPlane) *
+ (frame_->visible_rect().y() >> y_shift)) +
+ (frame_->visible_rect().x() >> 1);
+ }
+ row_bytes[plane] = static_cast<size_t>(frame_->stride(plane));
+ planes[plane] = frame_->data(plane) + offset;
+ }
+ }
+ if (planes && row_bytes)
+ frame_ = NULL;
+ return true;
+ }
+
+ private:
+ scoped_refptr<VideoFrame> frame_;
+};
+
SkCanvasVideoRenderer::SkCanvasVideoRenderer()
- : last_frame_timestamp_(media::kNoTimestamp()) {
+ : generator_(NULL), last_frame_timestamp_(media::kNoTimestamp()) {
+ last_frame_.setIsVolatile(true);
}
SkCanvasVideoRenderer::~SkCanvasVideoRenderer() {}
-void SkCanvasVideoRenderer::Paint(media::VideoFrame* video_frame,
+void SkCanvasVideoRenderer::Paint(const scoped_refptr<VideoFrame>& video_frame,
SkCanvas* canvas,
const gfx::RectF& dest_rect,
uint8 alpha,
@@ -204,16 +312,24 @@ void SkCanvasVideoRenderer::Paint(media::VideoFrame* video_frame,
// Paint black rectangle if there isn't a frame available or the
// frame has an unexpected format.
- if (!video_frame || !IsYUVOrNative(video_frame->format())) {
+ if (!video_frame.get() || !IsYUVOrNative(video_frame->format())) {
canvas->drawRect(dest, paint);
+ canvas->flush();
return;
}
// Check if we should convert and update |last_frame_|.
if (last_frame_.isNull() ||
video_frame->timestamp() != last_frame_timestamp_) {
- ConvertVideoFrameToBitmap(video_frame, &last_frame_);
+ generator_ = new VideoImageGenerator(video_frame);
+ // Note: This takes ownership of |generator_|.
+ if (!SkInstallDiscardablePixelRef(generator_, &last_frame_)) {
+ NOTREACHED();
+ }
+
+ // TODO(rileya): Perform this rotation on the canvas, rather than allocating
+ // a new bitmap and copying.
switch (video_rotation) {
case VIDEO_ROTATION_0:
break;
@@ -231,7 +347,15 @@ void SkCanvasVideoRenderer::Paint(media::VideoFrame* video_frame,
break;
}
+ // We copied the frame into a new bitmap and threw out the old one, so we
+ // no longer have a |generator_| around. This should be removed when the
+ // above TODO is addressed.
+ if (video_rotation != VIDEO_ROTATION_0)
+ generator_ = NULL;
+
last_frame_timestamp_ = video_frame->timestamp();
+ } else if (generator_) {
+ generator_->set_frame(video_frame);
}
paint.setXfermodeMode(mode);
@@ -239,9 +363,10 @@ void SkCanvasVideoRenderer::Paint(media::VideoFrame* video_frame,
// Paint using |last_frame_|.
paint.setFilterLevel(SkPaint::kLow_FilterLevel);
canvas->drawBitmapRect(last_frame_, NULL, dest, &paint);
+ canvas->flush();
}
-void SkCanvasVideoRenderer::Copy(media::VideoFrame* video_frame,
+void SkCanvasVideoRenderer::Copy(const scoped_refptr<VideoFrame>& video_frame,
SkCanvas* canvas) {
Paint(video_frame,
canvas,
diff --git a/media/filters/skcanvas_video_renderer.h b/media/filters/skcanvas_video_renderer.h
index 3d1f4da56f..a44607395d 100644
--- a/media/filters/skcanvas_video_renderer.h
+++ b/media/filters/skcanvas_video_renderer.h
@@ -5,6 +5,7 @@
#ifndef MEDIA_FILTERS_SKCANVAS_VIDEO_RENDERER_H_
#define MEDIA_FILTERS_SKCANVAS_VIDEO_RENDERER_H_
+#include "base/memory/ref_counted.h"
#include "base/time/time.h"
#include "media/base/media_export.h"
#include "media/base/video_rotation.h"
@@ -17,6 +18,7 @@ class SkCanvas;
namespace media {
class VideoFrame;
+class VideoImageGenerator;
// Handles rendering of VideoFrames to SkCanvases, doing any necessary YUV
// conversion and caching of resulting RGB bitmaps.
@@ -29,7 +31,7 @@ class MEDIA_EXPORT SkCanvasVideoRenderer {
// specified by |dest_rect|.
//
// Black will be painted on |canvas| if |video_frame| is null.
- void Paint(media::VideoFrame* video_frame,
+ void Paint(const scoped_refptr<VideoFrame>& video_frame,
SkCanvas* canvas,
const gfx::RectF& dest_rect,
uint8 alpha,
@@ -37,9 +39,11 @@ class MEDIA_EXPORT SkCanvasVideoRenderer {
VideoRotation video_rotation);
// Copy |video_frame| on |canvas|.
- void Copy(media::VideoFrame* video_frame, SkCanvas* canvas);
+ void Copy(const scoped_refptr<VideoFrame>&, SkCanvas* canvas);
private:
+ VideoImageGenerator* generator_;
+
// An RGB bitmap and corresponding timestamp of the previously converted
// video frame data.
SkBitmap last_frame_;
diff --git a/media/filters/skcanvas_video_renderer_unittest.cc b/media/filters/skcanvas_video_renderer_unittest.cc
index 358ce0ab1e..e11c2963fb 100644
--- a/media/filters/skcanvas_video_renderer_unittest.cc
+++ b/media/filters/skcanvas_video_renderer_unittest.cc
@@ -24,7 +24,7 @@ void FillCanvas(SkCanvas* canvas, SkColor color) {
// Helper for returning the color of a solid |canvas|.
SkColor GetColorAt(SkCanvas* canvas, int x, int y) {
SkBitmap bitmap;
- if (!bitmap.allocN32Pixels(1, 1))
+ if (!bitmap.tryAllocN32Pixels(1, 1))
return 0;
if (!canvas->readPixels(&bitmap, x, y))
return 0;
@@ -52,20 +52,22 @@ class SkCanvasVideoRendererTest : public testing::Test {
// Paints the |video_frame| to the |canvas| using |renderer_|, setting the
// color of |video_frame| to |color| first.
- void Paint(VideoFrame* video_frame, SkCanvas* canvas, Color color);
- void PaintRotated(VideoFrame* video_frame,
+ void Paint(const scoped_refptr<VideoFrame>& video_frame,
+ SkCanvas* canvas,
+ Color color);
+ void PaintRotated(const scoped_refptr<VideoFrame>& video_frame,
SkCanvas* canvas,
Color color,
SkXfermode::Mode mode,
VideoRotation video_rotation);
- void Copy(VideoFrame* video_frame, SkCanvas* canvas);
+ void Copy(const scoped_refptr<VideoFrame>& video_frame, SkCanvas* canvas);
// Getters for various frame sizes.
- VideoFrame* natural_frame() { return natural_frame_.get(); }
- VideoFrame* larger_frame() { return larger_frame_.get(); }
- VideoFrame* smaller_frame() { return smaller_frame_.get(); }
- VideoFrame* cropped_frame() { return cropped_frame_.get(); }
+ scoped_refptr<VideoFrame> natural_frame() { return natural_frame_; }
+ scoped_refptr<VideoFrame> larger_frame() { return larger_frame_; }
+ scoped_refptr<VideoFrame> smaller_frame() { return smaller_frame_; }
+ scoped_refptr<VideoFrame> cropped_frame() { return cropped_frame_; }
// Standard canvas.
SkCanvas* target_canvas() { return &target_canvas_; }
@@ -184,9 +186,9 @@ SkCanvasVideoRendererTest::SkCanvasVideoRendererTest()
21, 21, 21, 21, 107, 107, 107, 107,
};
- media::CopyYPlane(cropped_y_plane, 16, 16, cropped_frame());
- media::CopyUPlane(cropped_u_plane, 8, 8, cropped_frame());
- media::CopyVPlane(cropped_v_plane, 8, 8, cropped_frame());
+ media::CopyYPlane(cropped_y_plane, 16, 16, cropped_frame().get());
+ media::CopyUPlane(cropped_u_plane, 8, 8, cropped_frame().get());
+ media::CopyVPlane(cropped_v_plane, 8, 8, cropped_frame().get());
}
SkCanvasVideoRendererTest::~SkCanvasVideoRendererTest() {}
@@ -200,37 +202,40 @@ void SkCanvasVideoRendererTest::PaintWithoutFrame(SkCanvas* canvas) {
VIDEO_ROTATION_0);
}
-void SkCanvasVideoRendererTest::Paint(VideoFrame* video_frame,
- SkCanvas* canvas,
- Color color) {
+void SkCanvasVideoRendererTest::Paint(
+ const scoped_refptr<VideoFrame>& video_frame,
+ SkCanvas* canvas,
+ Color color) {
PaintRotated(
video_frame, canvas, color, SkXfermode::kSrcOver_Mode, VIDEO_ROTATION_0);
}
-void SkCanvasVideoRendererTest::PaintRotated(VideoFrame* video_frame,
- SkCanvas* canvas,
- Color color,
- SkXfermode::Mode mode,
- VideoRotation video_rotation) {
+void SkCanvasVideoRendererTest::PaintRotated(
+ const scoped_refptr<VideoFrame>& video_frame,
+ SkCanvas* canvas,
+ Color color,
+ SkXfermode::Mode mode,
+ VideoRotation video_rotation) {
switch (color) {
case kNone:
break;
case kRed:
- media::FillYUV(video_frame, 76, 84, 255);
+ media::FillYUV(video_frame.get(), 76, 84, 255);
break;
case kGreen:
- media::FillYUV(video_frame, 149, 43, 21);
+ media::FillYUV(video_frame.get(), 149, 43, 21);
break;
case kBlue:
- media::FillYUV(video_frame, 29, 255, 107);
+ media::FillYUV(video_frame.get(), 29, 255, 107);
break;
}
renderer_.Paint(
video_frame, canvas, kNaturalRect, 0xFF, mode, video_rotation);
}
-void SkCanvasVideoRendererTest::Copy(VideoFrame* video_frame,
- SkCanvas* canvas) {
+void SkCanvasVideoRendererTest::Copy(
+ const scoped_refptr<VideoFrame>& video_frame,
+ SkCanvas* canvas) {
renderer_.Copy(video_frame, canvas);
}
@@ -243,29 +248,31 @@ TEST_F(SkCanvasVideoRendererTest, NoFrame) {
TEST_F(SkCanvasVideoRendererTest, TransparentFrame) {
FillCanvas(target_canvas(), SK_ColorRED);
- PaintRotated(VideoFrame::CreateTransparentFrame(gfx::Size(kWidth, kHeight)),
- target_canvas(),
- kNone,
- SkXfermode::kSrcOver_Mode,
- VIDEO_ROTATION_0);
+ PaintRotated(
+ VideoFrame::CreateTransparentFrame(gfx::Size(kWidth, kHeight)).get(),
+ target_canvas(),
+ kNone,
+ SkXfermode::kSrcOver_Mode,
+ VIDEO_ROTATION_0);
EXPECT_EQ(static_cast<SkColor>(SK_ColorRED), GetColor(target_canvas()));
}
TEST_F(SkCanvasVideoRendererTest, TransparentFrameSrcMode) {
FillCanvas(target_canvas(), SK_ColorRED);
// SRC mode completely overwrites the buffer.
- PaintRotated(VideoFrame::CreateTransparentFrame(gfx::Size(kWidth, kHeight)),
- target_canvas(),
- kNone,
- SkXfermode::kSrc_Mode,
- VIDEO_ROTATION_0);
+ PaintRotated(
+ VideoFrame::CreateTransparentFrame(gfx::Size(kWidth, kHeight)).get(),
+ target_canvas(),
+ kNone,
+ SkXfermode::kSrc_Mode,
+ VIDEO_ROTATION_0);
EXPECT_EQ(static_cast<SkColor>(SK_ColorTRANSPARENT),
GetColor(target_canvas()));
}
TEST_F(SkCanvasVideoRendererTest, CopyTransparentFrame) {
FillCanvas(target_canvas(), SK_ColorRED);
- Copy(VideoFrame::CreateTransparentFrame(gfx::Size(kWidth, kHeight)),
+ Copy(VideoFrame::CreateTransparentFrame(gfx::Size(kWidth, kHeight)).get(),
target_canvas());
EXPECT_EQ(static_cast<SkColor>(SK_ColorTRANSPARENT),
GetColor(target_canvas()));
@@ -293,7 +300,7 @@ TEST_F(SkCanvasVideoRendererTest, Smaller) {
}
TEST_F(SkCanvasVideoRendererTest, NoTimestamp) {
- VideoFrame* video_frame = natural_frame();
+ VideoFrame* video_frame = natural_frame().get();
video_frame->set_timestamp(media::kNoTimestamp());
Paint(video_frame, target_canvas(), kRed);
EXPECT_EQ(SK_ColorRED, GetColor(target_canvas()));
diff --git a/media/filters/source_buffer_range.cc b/media/filters/source_buffer_range.cc
new file mode 100644
index 0000000000..c0f3c78d0d
--- /dev/null
+++ b/media/filters/source_buffer_range.cc
@@ -0,0 +1,591 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/filters/source_buffer_range.h"
+
+#include <algorithm>
+
+namespace media {
+
+// Comparison operators for std::upper_bound() and std::lower_bound().
+static bool CompareTimeDeltaToStreamParserBuffer(
+ const DecodeTimestamp& decode_timestamp,
+ const scoped_refptr<StreamParserBuffer>& buffer) {
+ return decode_timestamp < buffer->GetDecodeTimestamp();
+}
+static bool CompareStreamParserBufferToTimeDelta(
+ const scoped_refptr<StreamParserBuffer>& buffer,
+ const DecodeTimestamp& decode_timestamp) {
+ return buffer->GetDecodeTimestamp() < decode_timestamp;
+}
+
+bool SourceBufferRange::AllowSameTimestamp(
+ bool prev_is_keyframe, bool current_is_keyframe) {
+ return prev_is_keyframe || !current_is_keyframe;
+}
+
+SourceBufferRange::SourceBufferRange(
+ GapPolicy gap_policy, const BufferQueue& new_buffers,
+ DecodeTimestamp media_segment_start_time,
+ const InterbufferDistanceCB& interbuffer_distance_cb)
+ : gap_policy_(gap_policy),
+ keyframe_map_index_base_(0),
+ next_buffer_index_(-1),
+ media_segment_start_time_(media_segment_start_time),
+ interbuffer_distance_cb_(interbuffer_distance_cb),
+ size_in_bytes_(0) {
+ CHECK(!new_buffers.empty());
+ DCHECK(new_buffers.front()->IsKeyframe());
+ DCHECK(!interbuffer_distance_cb.is_null());
+ AppendBuffersToEnd(new_buffers);
+}
+
+SourceBufferRange::~SourceBufferRange() {}
+
+void SourceBufferRange::AppendBuffersToEnd(const BufferQueue& new_buffers) {
+ DCHECK(buffers_.empty() || CanAppendBuffersToEnd(new_buffers));
+ DCHECK(media_segment_start_time_ == kNoDecodeTimestamp() ||
+ media_segment_start_time_ <=
+ new_buffers.front()->GetDecodeTimestamp());
+ for (BufferQueue::const_iterator itr = new_buffers.begin();
+ itr != new_buffers.end();
+ ++itr) {
+ DCHECK((*itr)->GetDecodeTimestamp() != kNoDecodeTimestamp());
+ buffers_.push_back(*itr);
+ size_in_bytes_ += (*itr)->data_size();
+
+ if ((*itr)->IsKeyframe()) {
+ keyframe_map_.insert(
+ std::make_pair((*itr)->GetDecodeTimestamp(),
+ buffers_.size() - 1 + keyframe_map_index_base_));
+ }
+ }
+}
+
+void SourceBufferRange::Seek(DecodeTimestamp timestamp) {
+ DCHECK(CanSeekTo(timestamp));
+ DCHECK(!keyframe_map_.empty());
+
+ KeyframeMap::iterator result = GetFirstKeyframeBefore(timestamp);
+ next_buffer_index_ = result->second - keyframe_map_index_base_;
+ DCHECK_LT(next_buffer_index_, static_cast<int>(buffers_.size()));
+}
+
+void SourceBufferRange::SeekAheadTo(DecodeTimestamp timestamp) {
+ SeekAhead(timestamp, false);
+}
+
+void SourceBufferRange::SeekAheadPast(DecodeTimestamp timestamp) {
+ SeekAhead(timestamp, true);
+}
+
+void SourceBufferRange::SeekAhead(DecodeTimestamp timestamp,
+ bool skip_given_timestamp) {
+ DCHECK(!keyframe_map_.empty());
+
+ KeyframeMap::iterator result =
+ GetFirstKeyframeAt(timestamp, skip_given_timestamp);
+
+ // If there isn't a keyframe after |timestamp|, then seek to end and return
+ // kNoTimestamp to signal such.
+ if (result == keyframe_map_.end()) {
+ next_buffer_index_ = -1;
+ return;
+ }
+ next_buffer_index_ = result->second - keyframe_map_index_base_;
+ DCHECK_LT(next_buffer_index_, static_cast<int>(buffers_.size()));
+}
+
+void SourceBufferRange::SeekToStart() {
+ DCHECK(!buffers_.empty());
+ next_buffer_index_ = 0;
+}
+
+SourceBufferRange* SourceBufferRange::SplitRange(
+ DecodeTimestamp timestamp, bool is_exclusive) {
+ CHECK(!buffers_.empty());
+
+ // Find the first keyframe after |timestamp|. If |is_exclusive|, do not
+ // include keyframes at |timestamp|.
+ KeyframeMap::iterator new_beginning_keyframe =
+ GetFirstKeyframeAt(timestamp, is_exclusive);
+
+ // If there is no keyframe after |timestamp|, we can't split the range.
+ if (new_beginning_keyframe == keyframe_map_.end())
+ return NULL;
+
+ // Remove the data beginning at |keyframe_index| from |buffers_| and save it
+ // into |removed_buffers|.
+ int keyframe_index =
+ new_beginning_keyframe->second - keyframe_map_index_base_;
+ DCHECK_LT(keyframe_index, static_cast<int>(buffers_.size()));
+ BufferQueue::iterator starting_point = buffers_.begin() + keyframe_index;
+ BufferQueue removed_buffers(starting_point, buffers_.end());
+
+ DecodeTimestamp new_range_start_timestamp = kNoDecodeTimestamp();
+ if (GetStartTimestamp() < buffers_.front()->GetDecodeTimestamp() &&
+ timestamp < removed_buffers.front()->GetDecodeTimestamp()) {
+ // The split is in the gap between |media_segment_start_time_| and
+ // the first buffer of the new range so we should set the start
+ // time of the new range to |timestamp| so we preserve part of the
+ // gap in the new range.
+ new_range_start_timestamp = timestamp;
+ }
+
+ keyframe_map_.erase(new_beginning_keyframe, keyframe_map_.end());
+ FreeBufferRange(starting_point, buffers_.end());
+
+ // Create a new range with |removed_buffers|.
+ SourceBufferRange* split_range =
+ new SourceBufferRange(
+ gap_policy_, removed_buffers, new_range_start_timestamp,
+ interbuffer_distance_cb_);
+
+ // If the next buffer position is now in |split_range|, update the state of
+ // this range and |split_range| accordingly.
+ if (next_buffer_index_ >= static_cast<int>(buffers_.size())) {
+ split_range->next_buffer_index_ = next_buffer_index_ - keyframe_index;
+ ResetNextBufferPosition();
+ }
+
+ return split_range;
+}
+
+SourceBufferRange::BufferQueue::iterator SourceBufferRange::GetBufferItrAt(
+ DecodeTimestamp timestamp,
+ bool skip_given_timestamp) {
+ return skip_given_timestamp
+ ? std::upper_bound(buffers_.begin(),
+ buffers_.end(),
+ timestamp,
+ CompareTimeDeltaToStreamParserBuffer)
+ : std::lower_bound(buffers_.begin(),
+ buffers_.end(),
+ timestamp,
+ CompareStreamParserBufferToTimeDelta);
+}
+
+SourceBufferRange::KeyframeMap::iterator
+SourceBufferRange::GetFirstKeyframeAt(DecodeTimestamp timestamp,
+ bool skip_given_timestamp) {
+ return skip_given_timestamp ?
+ keyframe_map_.upper_bound(timestamp) :
+ keyframe_map_.lower_bound(timestamp);
+}
+
+SourceBufferRange::KeyframeMap::iterator
+SourceBufferRange::GetFirstKeyframeBefore(DecodeTimestamp timestamp) {
+ KeyframeMap::iterator result = keyframe_map_.lower_bound(timestamp);
+ // lower_bound() returns the first element >= |timestamp|, so we want the
+ // previous element if it did not return the element exactly equal to
+ // |timestamp|.
+ if (result != keyframe_map_.begin() &&
+ (result == keyframe_map_.end() || result->first != timestamp)) {
+ --result;
+ }
+ return result;
+}
+
+void SourceBufferRange::DeleteAll(BufferQueue* removed_buffers) {
+ TruncateAt(buffers_.begin(), removed_buffers);
+}
+
+bool SourceBufferRange::TruncateAt(
+ DecodeTimestamp timestamp, BufferQueue* removed_buffers,
+ bool is_exclusive) {
+ // Find the place in |buffers_| where we will begin deleting data.
+ BufferQueue::iterator starting_point =
+ GetBufferItrAt(timestamp, is_exclusive);
+ return TruncateAt(starting_point, removed_buffers);
+}
+
+int SourceBufferRange::DeleteGOPFromFront(BufferQueue* deleted_buffers) {
+ DCHECK(!FirstGOPContainsNextBufferPosition());
+ DCHECK(deleted_buffers);
+
+ int buffers_deleted = 0;
+ int total_bytes_deleted = 0;
+
+ KeyframeMap::iterator front = keyframe_map_.begin();
+ DCHECK(front != keyframe_map_.end());
+
+ // Delete the keyframe at the start of |keyframe_map_|.
+ keyframe_map_.erase(front);
+
+ // Now we need to delete all the buffers that depend on the keyframe we've
+ // just deleted.
+ int end_index = keyframe_map_.size() > 0 ?
+ keyframe_map_.begin()->second - keyframe_map_index_base_ :
+ buffers_.size();
+
+ // Delete buffers from the beginning of the buffered range up until (but not
+ // including) the next keyframe.
+ for (int i = 0; i < end_index; i++) {
+ int bytes_deleted = buffers_.front()->data_size();
+ size_in_bytes_ -= bytes_deleted;
+ total_bytes_deleted += bytes_deleted;
+ deleted_buffers->push_back(buffers_.front());
+ buffers_.pop_front();
+ ++buffers_deleted;
+ }
+
+ // Update |keyframe_map_index_base_| to account for the deleted buffers.
+ keyframe_map_index_base_ += buffers_deleted;
+
+ if (next_buffer_index_ > -1) {
+ next_buffer_index_ -= buffers_deleted;
+ DCHECK_GE(next_buffer_index_, 0);
+ }
+
+ // Invalidate media segment start time if we've deleted the first buffer of
+ // the range.
+ if (buffers_deleted > 0)
+ media_segment_start_time_ = kNoDecodeTimestamp();
+
+ return total_bytes_deleted;
+}
+
+int SourceBufferRange::DeleteGOPFromBack(BufferQueue* deleted_buffers) {
+ DCHECK(!LastGOPContainsNextBufferPosition());
+ DCHECK(deleted_buffers);
+
+ // Remove the last GOP's keyframe from the |keyframe_map_|.
+ KeyframeMap::iterator back = keyframe_map_.end();
+ DCHECK_GT(keyframe_map_.size(), 0u);
+ --back;
+
+ // The index of the first buffer in the last GOP is equal to the new size of
+ // |buffers_| after that GOP is deleted.
+ size_t goal_size = back->second - keyframe_map_index_base_;
+ keyframe_map_.erase(back);
+
+ int total_bytes_deleted = 0;
+ while (buffers_.size() != goal_size) {
+ int bytes_deleted = buffers_.back()->data_size();
+ size_in_bytes_ -= bytes_deleted;
+ total_bytes_deleted += bytes_deleted;
+ // We're removing buffers from the back, so push each removed buffer to the
+ // front of |deleted_buffers| so that |deleted_buffers| are in nondecreasing
+ // order.
+ deleted_buffers->push_front(buffers_.back());
+ buffers_.pop_back();
+ }
+
+ return total_bytes_deleted;
+}
+
+int SourceBufferRange::GetRemovalGOP(
+ DecodeTimestamp start_timestamp, DecodeTimestamp end_timestamp,
+ int total_bytes_to_free, DecodeTimestamp* removal_end_timestamp) {
+ int bytes_to_free = total_bytes_to_free;
+ int bytes_removed = 0;
+
+ KeyframeMap::iterator gop_itr = GetFirstKeyframeAt(start_timestamp, false);
+ if (gop_itr == keyframe_map_.end())
+ return 0;
+ int keyframe_index = gop_itr->second - keyframe_map_index_base_;
+ BufferQueue::iterator buffer_itr = buffers_.begin() + keyframe_index;
+ KeyframeMap::iterator gop_end = keyframe_map_.end();
+ if (end_timestamp < GetBufferedEndTimestamp())
+ gop_end = GetFirstKeyframeBefore(end_timestamp);
+
+ // Check if the removal range is within a GOP and skip the loop if so.
+ // [keyframe]...[start_timestamp]...[end_timestamp]...[keyframe]
+ KeyframeMap::iterator gop_itr_prev = gop_itr;
+ if (gop_itr_prev != keyframe_map_.begin() && --gop_itr_prev == gop_end)
+ gop_end = gop_itr;
+
+ while (gop_itr != gop_end && bytes_to_free > 0) {
+ ++gop_itr;
+
+ int gop_size = 0;
+ int next_gop_index = gop_itr == keyframe_map_.end() ?
+ buffers_.size() : gop_itr->second - keyframe_map_index_base_;
+ BufferQueue::iterator next_gop_start = buffers_.begin() + next_gop_index;
+ for (; buffer_itr != next_gop_start; ++buffer_itr)
+ gop_size += (*buffer_itr)->data_size();
+
+ bytes_removed += gop_size;
+ bytes_to_free -= gop_size;
+ }
+ if (bytes_removed > 0) {
+ *removal_end_timestamp = gop_itr == keyframe_map_.end() ?
+ GetBufferedEndTimestamp() : gop_itr->first;
+ }
+ return bytes_removed;
+}
+
+bool SourceBufferRange::FirstGOPContainsNextBufferPosition() const {
+ if (!HasNextBufferPosition())
+ return false;
+
+ // If there is only one GOP, it must contain the next buffer position.
+ if (keyframe_map_.size() == 1u)
+ return true;
+
+ KeyframeMap::const_iterator second_gop = keyframe_map_.begin();
+ ++second_gop;
+ return next_buffer_index_ < second_gop->second - keyframe_map_index_base_;
+}
+
+bool SourceBufferRange::LastGOPContainsNextBufferPosition() const {
+ if (!HasNextBufferPosition())
+ return false;
+
+ // If there is only one GOP, it must contain the next buffer position.
+ if (keyframe_map_.size() == 1u)
+ return true;
+
+ KeyframeMap::const_iterator last_gop = keyframe_map_.end();
+ --last_gop;
+ return last_gop->second - keyframe_map_index_base_ <= next_buffer_index_;
+}
+
+void SourceBufferRange::FreeBufferRange(
+ const BufferQueue::iterator& starting_point,
+ const BufferQueue::iterator& ending_point) {
+ for (BufferQueue::iterator itr = starting_point;
+ itr != ending_point; ++itr) {
+ size_in_bytes_ -= (*itr)->data_size();
+ DCHECK_GE(size_in_bytes_, 0);
+ }
+ buffers_.erase(starting_point, ending_point);
+}
+
+bool SourceBufferRange::TruncateAt(
+ const BufferQueue::iterator& starting_point, BufferQueue* removed_buffers) {
+ DCHECK(!removed_buffers || removed_buffers->empty());
+
+ // Return if we're not deleting anything.
+ if (starting_point == buffers_.end())
+ return buffers_.empty();
+
+ // Reset the next buffer index if we will be deleting the buffer that's next
+ // in sequence.
+ if (HasNextBufferPosition()) {
+ DecodeTimestamp next_buffer_timestamp = GetNextTimestamp();
+ if (next_buffer_timestamp == kNoDecodeTimestamp() ||
+ next_buffer_timestamp >= (*starting_point)->GetDecodeTimestamp()) {
+ if (HasNextBuffer() && removed_buffers) {
+ int starting_offset = starting_point - buffers_.begin();
+ int next_buffer_offset = next_buffer_index_ - starting_offset;
+ DCHECK_GE(next_buffer_offset, 0);
+ BufferQueue saved(starting_point + next_buffer_offset, buffers_.end());
+ removed_buffers->swap(saved);
+ }
+ ResetNextBufferPosition();
+ }
+ }
+
+ // Remove keyframes from |starting_point| onward.
+ KeyframeMap::iterator starting_point_keyframe =
+ keyframe_map_.lower_bound((*starting_point)->GetDecodeTimestamp());
+ keyframe_map_.erase(starting_point_keyframe, keyframe_map_.end());
+
+ // Remove everything from |starting_point| onward.
+ FreeBufferRange(starting_point, buffers_.end());
+ return buffers_.empty();
+}
+
+bool SourceBufferRange::GetNextBuffer(
+ scoped_refptr<StreamParserBuffer>* out_buffer) {
+ if (!HasNextBuffer())
+ return false;
+
+ *out_buffer = buffers_[next_buffer_index_];
+ next_buffer_index_++;
+ return true;
+}
+
+bool SourceBufferRange::HasNextBuffer() const {
+ return next_buffer_index_ >= 0 &&
+ next_buffer_index_ < static_cast<int>(buffers_.size());
+}
+
+int SourceBufferRange::GetNextConfigId() const {
+ DCHECK(HasNextBuffer());
+ // If the next buffer is an audio splice frame, the next effective config id
+ // comes from the first fade out preroll buffer.
+ return buffers_[next_buffer_index_]->GetSpliceBufferConfigId(0);
+}
+
+DecodeTimestamp SourceBufferRange::GetNextTimestamp() const {
+ DCHECK(!buffers_.empty());
+ DCHECK(HasNextBufferPosition());
+
+ if (next_buffer_index_ >= static_cast<int>(buffers_.size())) {
+ return kNoDecodeTimestamp();
+ }
+
+ return buffers_[next_buffer_index_]->GetDecodeTimestamp();
+}
+
+bool SourceBufferRange::HasNextBufferPosition() const {
+ return next_buffer_index_ >= 0;
+}
+
+void SourceBufferRange::ResetNextBufferPosition() {
+ next_buffer_index_ = -1;
+}
+
+void SourceBufferRange::AppendRangeToEnd(const SourceBufferRange& range,
+ bool transfer_current_position) {
+ DCHECK(CanAppendRangeToEnd(range));
+ DCHECK(!buffers_.empty());
+
+ if (transfer_current_position && range.next_buffer_index_ >= 0)
+ next_buffer_index_ = range.next_buffer_index_ + buffers_.size();
+
+ AppendBuffersToEnd(range.buffers_);
+}
+
+bool SourceBufferRange::CanAppendRangeToEnd(
+ const SourceBufferRange& range) const {
+ return CanAppendBuffersToEnd(range.buffers_);
+}
+
+bool SourceBufferRange::CanAppendBuffersToEnd(
+ const BufferQueue& buffers) const {
+ DCHECK(!buffers_.empty());
+ return IsNextInSequence(buffers.front()->GetDecodeTimestamp(),
+ buffers.front()->IsKeyframe());
+}
+
+bool SourceBufferRange::BelongsToRange(DecodeTimestamp timestamp) const {
+ DCHECK(!buffers_.empty());
+
+ return (IsNextInSequence(timestamp, false) ||
+ (GetStartTimestamp() <= timestamp && timestamp <= GetEndTimestamp()));
+}
+
+bool SourceBufferRange::CanSeekTo(DecodeTimestamp timestamp) const {
+ DecodeTimestamp start_timestamp =
+ std::max(DecodeTimestamp(), GetStartTimestamp() - GetFudgeRoom());
+ return !keyframe_map_.empty() && start_timestamp <= timestamp &&
+ timestamp < GetBufferedEndTimestamp();
+}
+
+bool SourceBufferRange::CompletelyOverlaps(
+ const SourceBufferRange& range) const {
+ return GetStartTimestamp() <= range.GetStartTimestamp() &&
+ GetEndTimestamp() >= range.GetEndTimestamp();
+}
+
+bool SourceBufferRange::EndOverlaps(const SourceBufferRange& range) const {
+ return range.GetStartTimestamp() <= GetEndTimestamp() &&
+ GetEndTimestamp() < range.GetEndTimestamp();
+}
+
+DecodeTimestamp SourceBufferRange::GetStartTimestamp() const {
+ DCHECK(!buffers_.empty());
+ DecodeTimestamp start_timestamp = media_segment_start_time_;
+ if (start_timestamp == kNoDecodeTimestamp())
+ start_timestamp = buffers_.front()->GetDecodeTimestamp();
+ return start_timestamp;
+}
+
+DecodeTimestamp SourceBufferRange::GetEndTimestamp() const {
+ DCHECK(!buffers_.empty());
+ return buffers_.back()->GetDecodeTimestamp();
+}
+
+DecodeTimestamp SourceBufferRange::GetBufferedEndTimestamp() const {
+ DCHECK(!buffers_.empty());
+ base::TimeDelta duration = buffers_.back()->duration();
+ if (duration == kNoTimestamp() || duration == base::TimeDelta())
+ duration = GetApproximateDuration();
+ return GetEndTimestamp() + duration;
+}
+
+DecodeTimestamp SourceBufferRange::NextKeyframeTimestamp(
+ DecodeTimestamp timestamp) {
+ DCHECK(!keyframe_map_.empty());
+
+ if (timestamp < GetStartTimestamp() || timestamp >= GetBufferedEndTimestamp())
+ return kNoDecodeTimestamp();
+
+ KeyframeMap::iterator itr = GetFirstKeyframeAt(timestamp, false);
+ if (itr == keyframe_map_.end())
+ return kNoDecodeTimestamp();
+
+ // If the timestamp is inside the gap between the start of the media
+ // segment and the first buffer, then just pretend there is a
+ // keyframe at the specified timestamp.
+ if (itr == keyframe_map_.begin() &&
+ timestamp > media_segment_start_time_ &&
+ timestamp < itr->first) {
+ return timestamp;
+ }
+
+ return itr->first;
+}
+
+DecodeTimestamp SourceBufferRange::KeyframeBeforeTimestamp(
+ DecodeTimestamp timestamp) {
+ DCHECK(!keyframe_map_.empty());
+
+ if (timestamp < GetStartTimestamp() || timestamp >= GetBufferedEndTimestamp())
+ return kNoDecodeTimestamp();
+
+ return GetFirstKeyframeBefore(timestamp)->first;
+}
+
+bool SourceBufferRange::IsNextInSequence(
+ DecodeTimestamp timestamp, bool is_keyframe) const {
+ DecodeTimestamp end = buffers_.back()->GetDecodeTimestamp();
+ if (end < timestamp &&
+ (gap_policy_ == ALLOW_GAPS ||
+ timestamp <= end + GetFudgeRoom())) {
+ return true;
+ }
+
+ return timestamp == end && AllowSameTimestamp(
+ buffers_.back()->IsKeyframe(), is_keyframe);
+}
+
+base::TimeDelta SourceBufferRange::GetFudgeRoom() const {
+ // Because we do not know exactly when is the next timestamp, any buffer
+ // that starts within 2x the approximate duration of a buffer is considered
+ // within this range.
+ return 2 * GetApproximateDuration();
+}
+
+base::TimeDelta SourceBufferRange::GetApproximateDuration() const {
+ base::TimeDelta max_interbuffer_distance = interbuffer_distance_cb_.Run();
+ DCHECK(max_interbuffer_distance != kNoTimestamp());
+ return max_interbuffer_distance;
+}
+
+bool SourceBufferRange::GetBuffersInRange(DecodeTimestamp start,
+ DecodeTimestamp end,
+ BufferQueue* buffers) {
+ // Find the nearest buffer with a decode timestamp <= start.
+ const DecodeTimestamp first_timestamp = KeyframeBeforeTimestamp(start);
+ if (first_timestamp == kNoDecodeTimestamp())
+ return false;
+
+ // Find all buffers involved in the range.
+ const size_t previous_size = buffers->size();
+ for (BufferQueue::iterator it = GetBufferItrAt(first_timestamp, false);
+ it != buffers_.end();
+ ++it) {
+ const scoped_refptr<StreamParserBuffer>& buffer = *it;
+ // Buffers without duration are not supported, so bail if we encounter any.
+ if (buffer->duration() == kNoTimestamp() ||
+ buffer->duration() <= base::TimeDelta()) {
+ return false;
+ }
+ if (buffer->end_of_stream() ||
+ buffer->timestamp() >= end.ToPresentationTime()) {
+ break;
+ }
+
+ if (buffer->timestamp() + buffer->duration() <= start.ToPresentationTime())
+ continue;
+ buffers->push_back(buffer);
+ }
+ return previous_size < buffers->size();
+}
+
+} // namespace media
diff --git a/media/filters/source_buffer_range.h b/media/filters/source_buffer_range.h
new file mode 100644
index 0000000000..1961e340b4
--- /dev/null
+++ b/media/filters/source_buffer_range.h
@@ -0,0 +1,289 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_FILTERS_SOURCE_BUFFER_RANGE_H_
+#define MEDIA_FILTERS_SOURCE_BUFFER_RANGE_H_
+
+#include <map>
+
+#include "base/callback.h"
+#include "base/memory/ref_counted.h"
+#include "media/base/stream_parser_buffer.h"
+
+namespace media {
+
+// Helper class representing a range of buffered data. All buffers in a
+// SourceBufferRange are ordered sequentially in decode timestamp order with no
+// gaps.
+class SourceBufferRange {
+ public:
+ // Returns the maximum distance in time between any buffer seen in this
+ // stream. Used to estimate the duration of a buffer if its duration is not
+ // known.
+ typedef base::Callback<base::TimeDelta()> InterbufferDistanceCB;
+
+ typedef StreamParser::BufferQueue BufferQueue;
+
+ // Policy for handling large gaps between buffers. Continuous media like
+ // audio & video should use NO_GAPS_ALLOWED. Discontinuous media like
+ // timed text should use ALLOW_GAPS because large differences in timestamps
+ // are common and acceptable.
+ enum GapPolicy {
+ NO_GAPS_ALLOWED,
+ ALLOW_GAPS
+ };
+
+ // Buffers with the same timestamp are only allowed under certain conditions.
+ // More precisely, it is allowed in all situations except when the previous
+ // frame is not a key frame and the current is a key frame.
+ // Examples of situations where DTS of two consecutive frames can be equal:
+ // - Video: VP8 Alt-Ref frames.
+ // - Video: IPBPBP...: DTS for I frame and for P frame can be equal.
+ // - Text track cues that start at same time.
+ // Returns true if |prev_is_keyframe| and |current_is_keyframe| indicate a
+ // same timestamp situation that is allowed. False is returned otherwise.
+ static bool AllowSameTimestamp(bool prev_is_keyframe,
+ bool current_is_keyframe);
+
+ // Creates a source buffer range with |new_buffers|. |new_buffers| cannot be
+ // empty and the front of |new_buffers| must be a keyframe.
+ // |media_segment_start_time| refers to the starting timestamp for the media
+ // segment to which these buffers belong.
+ SourceBufferRange(GapPolicy gap_policy,
+ const BufferQueue& new_buffers,
+ DecodeTimestamp media_segment_start_time,
+ const InterbufferDistanceCB& interbuffer_distance_cb);
+
+ ~SourceBufferRange();
+
+ // Appends |buffers| to the end of the range and updates |keyframe_map_| as
+ // it encounters new keyframes. Assumes |buffers| belongs at the end of the
+ // range.
+ void AppendBuffersToEnd(const BufferQueue& buffers);
+ bool CanAppendBuffersToEnd(const BufferQueue& buffers) const;
+
+ // Appends the buffers from |range| into this range.
+ // The first buffer in |range| must come directly after the last buffer
+ // in this range.
+ // If |transfer_current_position| is true, |range|'s |next_buffer_index_|
+ // is transfered to this SourceBufferRange.
+ void AppendRangeToEnd(const SourceBufferRange& range,
+ bool transfer_current_position);
+ bool CanAppendRangeToEnd(const SourceBufferRange& range) const;
+
+ // Updates |next_buffer_index_| to point to the Buffer containing |timestamp|.
+ // Assumes |timestamp| is valid and in this range.
+ void Seek(DecodeTimestamp timestamp);
+
+ // Updates |next_buffer_index_| to point to next keyframe after or equal to
+ // |timestamp|.
+ void SeekAheadTo(DecodeTimestamp timestamp);
+
+ // Updates |next_buffer_index_| to point to next keyframe strictly after
+ // |timestamp|.
+ void SeekAheadPast(DecodeTimestamp timestamp);
+
+ // Seeks to the beginning of the range.
+ void SeekToStart();
+
+ // Finds the next keyframe from |buffers_| after |timestamp| (or at
+ // |timestamp| if |is_exclusive| is false) and creates and returns a new
+ // SourceBufferRange with the buffers from that keyframe onward.
+ // The buffers in the new SourceBufferRange are moved out of this range. If
+ // there is no keyframe after |timestamp|, SplitRange() returns null and this
+ // range is unmodified.
+ SourceBufferRange* SplitRange(DecodeTimestamp timestamp, bool is_exclusive);
+
+ // Deletes the buffers from this range starting at |timestamp|, exclusive if
+ // |is_exclusive| is true, inclusive otherwise.
+ // Resets |next_buffer_index_| if the buffer at |next_buffer_index_| was
+ // deleted, and deletes the |keyframe_map_| entries for the buffers that
+ // were removed.
+ // |deleted_buffers| contains the buffers that were deleted from this range,
+ // starting at the buffer that had been at |next_buffer_index_|.
+ // Returns true if everything in the range was deleted. Otherwise
+ // returns false.
+ bool TruncateAt(DecodeTimestamp timestamp,
+ BufferQueue* deleted_buffers, bool is_exclusive);
+ // Deletes all buffers in range.
+ void DeleteAll(BufferQueue* deleted_buffers);
+
+ // Deletes a GOP from the front or back of the range and moves these
+ // buffers into |deleted_buffers|. Returns the number of bytes deleted from
+ // the range (i.e. the size in bytes of |deleted_buffers|).
+ int DeleteGOPFromFront(BufferQueue* deleted_buffers);
+ int DeleteGOPFromBack(BufferQueue* deleted_buffers);
+
+ // Gets the range of GOP to secure at least |bytes_to_free| from
+ // [|start_timestamp|, |end_timestamp|).
+ // Returns the size of the buffers to secure if the buffers of
+ // [|start_timestamp|, |end_removal_timestamp|) is removed.
+ // Will not update |end_removal_timestamp| if the returned size is 0.
+ int GetRemovalGOP(
+ DecodeTimestamp start_timestamp, DecodeTimestamp end_timestamp,
+ int bytes_to_free, DecodeTimestamp* end_removal_timestamp);
+
+ // Indicates whether the GOP at the beginning or end of the range contains the
+ // next buffer position.
+ bool FirstGOPContainsNextBufferPosition() const;
+ bool LastGOPContainsNextBufferPosition() const;
+
+ // Updates |out_buffer| with the next buffer in presentation order. Seek()
+ // must be called before calls to GetNextBuffer(), and buffers are returned
+ // in order from the last call to Seek(). Returns true if |out_buffer| is
+ // filled with a valid buffer, false if there is not enough data to fulfill
+ // the request.
+ bool GetNextBuffer(scoped_refptr<StreamParserBuffer>* out_buffer);
+ bool HasNextBuffer() const;
+
+ // Returns the config ID for the buffer that will be returned by
+ // GetNextBuffer().
+ int GetNextConfigId() const;
+
+ // Returns true if the range knows the position of the next buffer it should
+ // return, i.e. it has been Seek()ed. This does not necessarily mean that it
+ // has the next buffer yet.
+ bool HasNextBufferPosition() const;
+
+ // Resets this range to an "unseeked" state.
+ void ResetNextBufferPosition();
+
+ // Returns the timestamp of the next buffer that will be returned from
+ // GetNextBuffer(), or kNoTimestamp() if the timestamp is unknown.
+ DecodeTimestamp GetNextTimestamp() const;
+
+ // Returns the start timestamp of the range.
+ DecodeTimestamp GetStartTimestamp() const;
+
+ // Returns the timestamp of the last buffer in the range.
+ DecodeTimestamp GetEndTimestamp() const;
+
+ // Returns the timestamp for the end of the buffered region in this range.
+ // This is an approximation if the duration for the last buffer in the range
+ // is unset.
+ DecodeTimestamp GetBufferedEndTimestamp() const;
+
+ // Gets the timestamp for the keyframe that is after |timestamp|. If
+ // there isn't a keyframe in the range after |timestamp| then kNoTimestamp()
+ // is returned. If |timestamp| is in the "gap" between the value returned by
+ // GetStartTimestamp() and the timestamp on the first buffer in |buffers_|,
+ // then |timestamp| is returned.
+ DecodeTimestamp NextKeyframeTimestamp(DecodeTimestamp timestamp);
+
+ // Gets the timestamp for the closest keyframe that is <= |timestamp|. If
+ // there isn't a keyframe before |timestamp| or |timestamp| is outside
+ // this range, then kNoTimestamp() is returned.
+ DecodeTimestamp KeyframeBeforeTimestamp(DecodeTimestamp timestamp);
+
+ // Returns whether a buffer with a starting timestamp of |timestamp| would
+ // belong in this range. This includes a buffer that would be appended to
+ // the end of the range.
+ bool BelongsToRange(DecodeTimestamp timestamp) const;
+
+ // Returns true if the range has enough data to seek to the specified
+ // |timestamp|, false otherwise.
+ bool CanSeekTo(DecodeTimestamp timestamp) const;
+
+ // Returns true if this range's buffered timespan completely overlaps the
+ // buffered timespan of |range|.
+ bool CompletelyOverlaps(const SourceBufferRange& range) const;
+
+ // Returns true if the end of this range contains buffers that overlaps with
+ // the beginning of |range|.
+ bool EndOverlaps(const SourceBufferRange& range) const;
+
+ // Returns true if |timestamp| is the timestamp of the next buffer in
+ // sequence after |buffers_.back()|, false otherwise.
+ bool IsNextInSequence(DecodeTimestamp timestamp, bool is_keyframe) const;
+
+ // Adds all buffers which overlap [start, end) to the end of |buffers|. If
+ // no buffers exist in the range returns false, true otherwise.
+ bool GetBuffersInRange(DecodeTimestamp start, DecodeTimestamp end,
+ BufferQueue* buffers);
+
+ int size_in_bytes() const { return size_in_bytes_; }
+
+ private:
+ typedef std::map<DecodeTimestamp, int> KeyframeMap;
+
+ // Seeks the range to the next keyframe after |timestamp|. If
+ // |skip_given_timestamp| is true, the seek will go to a keyframe with a
+ // timestamp strictly greater than |timestamp|.
+ void SeekAhead(DecodeTimestamp timestamp, bool skip_given_timestamp);
+
+ // Returns an iterator in |buffers_| pointing to the buffer at |timestamp|.
+ // If |skip_given_timestamp| is true, this returns the first buffer with
+ // timestamp greater than |timestamp|.
+ BufferQueue::iterator GetBufferItrAt(
+ DecodeTimestamp timestamp, bool skip_given_timestamp);
+
+ // Returns an iterator in |keyframe_map_| pointing to the next keyframe after
+ // |timestamp|. If |skip_given_timestamp| is true, this returns the first
+ // keyframe with a timestamp strictly greater than |timestamp|.
+ KeyframeMap::iterator GetFirstKeyframeAt(
+ DecodeTimestamp timestamp, bool skip_given_timestamp);
+
+ // Returns an iterator in |keyframe_map_| pointing to the first keyframe
+ // before or at |timestamp|.
+ KeyframeMap::iterator GetFirstKeyframeBefore(DecodeTimestamp timestamp);
+
+ // Helper method to delete buffers in |buffers_| starting at
+ // |starting_point|, an iterator in |buffers_|.
+ // Returns true if everything in the range was removed. Returns
+ // false if the range still contains buffers.
+ bool TruncateAt(const BufferQueue::iterator& starting_point,
+ BufferQueue* deleted_buffers);
+
+ // Frees the buffers in |buffers_| from [|start_point|,|ending_point|) and
+ // updates the |size_in_bytes_| accordingly. Does not update |keyframe_map_|.
+ void FreeBufferRange(const BufferQueue::iterator& starting_point,
+ const BufferQueue::iterator& ending_point);
+
+ // Returns the distance in time estimating how far from the beginning or end
+ // of this range a buffer can be to considered in the range.
+ base::TimeDelta GetFudgeRoom() const;
+
+ // Returns the approximate duration of a buffer in this range.
+ base::TimeDelta GetApproximateDuration() const;
+
+ // Keeps track of whether gaps are allowed.
+ const GapPolicy gap_policy_;
+
+ // An ordered list of buffers in this range.
+ BufferQueue buffers_;
+
+ // Maps keyframe timestamps to its index position in |buffers_|.
+ KeyframeMap keyframe_map_;
+
+ // Index base of all positions in |keyframe_map_|. In other words, the
+ // real position of entry |k| of |keyframe_map_| in the range is:
+ // keyframe_map_[k] - keyframe_map_index_base_
+ int keyframe_map_index_base_;
+
+ // Index into |buffers_| for the next buffer to be returned by
+ // GetNextBuffer(), set to -1 before Seek().
+ int next_buffer_index_;
+
+ // If the first buffer in this range is the beginning of a media segment,
+ // |media_segment_start_time_| is the time when the media segment begins.
+ // |media_segment_start_time_| may be <= the timestamp of the first buffer in
+ // |buffers_|. |media_segment_start_time_| is kNoTimestamp() if this range
+ // does not start at the beginning of a media segment, which can only happen
+ // garbage collection or after an end overlap that results in a split range
+ // (we don't have a way of knowing the media segment timestamp for the new
+ // range).
+ DecodeTimestamp media_segment_start_time_;
+
+ // Called to get the largest interbuffer distance seen so far in the stream.
+ InterbufferDistanceCB interbuffer_distance_cb_;
+
+ // Stores the amount of memory taken up by the data in |buffers_|.
+ int size_in_bytes_;
+
+ DISALLOW_COPY_AND_ASSIGN(SourceBufferRange);
+};
+
+} // namespace media
+
+#endif // MEDIA_FILTERS_SOURCE_BUFFER_RANGE_H_
diff --git a/media/filters/source_buffer_stream.cc b/media/filters/source_buffer_stream.cc
index a0381d2ccf..5a2de0b49b 100644
--- a/media/filters/source_buffer_stream.cc
+++ b/media/filters/source_buffer_stream.cc
@@ -12,281 +12,10 @@
#include "base/logging.h"
#include "media/base/audio_splicer.h"
#include "media/filters/source_buffer_platform.h"
+#include "media/filters/source_buffer_range.h"
namespace media {
-typedef StreamParser::BufferQueue BufferQueue;
-
-// Buffers with the same timestamp are only allowed under certain conditions.
-// More precisely, it is allowed in all situations except when the previous
-// frame is not a key frame and the current is a key frame.
-// Examples of situations where DTS of two consecutive frames can be equal:
-// - Video: VP8 Alt-Ref frames.
-// - Video: IPBPBP...: DTS for I frame and for P frame can be equal.
-// - Text track cues that start at same time.
-// Returns true if |prev_is_keyframe| and |current_is_keyframe| indicate a
-// same timestamp situation that is allowed. False is returned otherwise.
-static bool AllowSameTimestamp(
- bool prev_is_keyframe, bool current_is_keyframe,
- SourceBufferStream::Type type) {
- return prev_is_keyframe || !current_is_keyframe;
-}
-
-// Returns the config ID of |buffer| if |buffer| has no splice buffers or
-// |index| is out of range. Otherwise returns the config ID for the fade out
-// preroll buffer at position |index|.
-static int GetConfigId(StreamParserBuffer* buffer, size_t index) {
- return index < buffer->splice_buffers().size()
- ? buffer->splice_buffers()[index]->GetConfigId()
- : buffer->GetConfigId();
-}
-
-// Helper class representing a range of buffered data. All buffers in a
-// SourceBufferRange are ordered sequentially in presentation order with no
-// gaps.
-class SourceBufferRange {
- public:
- // Returns the maximum distance in time between any buffer seen in this
- // stream. Used to estimate the duration of a buffer if its duration is not
- // known.
- typedef base::Callback<base::TimeDelta()> InterbufferDistanceCB;
-
- // Creates a source buffer range with |new_buffers|. |new_buffers| cannot be
- // empty and the front of |new_buffers| must be a keyframe.
- // |media_segment_start_time| refers to the starting timestamp for the media
- // segment to which these buffers belong.
- SourceBufferRange(SourceBufferStream::Type type,
- const BufferQueue& new_buffers,
- DecodeTimestamp media_segment_start_time,
- const InterbufferDistanceCB& interbuffer_distance_cb);
-
- // Appends |buffers| to the end of the range and updates |keyframe_map_| as
- // it encounters new keyframes. Assumes |buffers| belongs at the end of the
- // range.
- void AppendBuffersToEnd(const BufferQueue& buffers);
- bool CanAppendBuffersToEnd(const BufferQueue& buffers) const;
-
- // Appends the buffers from |range| into this range.
- // The first buffer in |range| must come directly after the last buffer
- // in this range.
- // If |transfer_current_position| is true, |range|'s |next_buffer_index_|
- // is transfered to this SourceBufferRange.
- void AppendRangeToEnd(const SourceBufferRange& range,
- bool transfer_current_position);
- bool CanAppendRangeToEnd(const SourceBufferRange& range) const;
-
- // Updates |next_buffer_index_| to point to the Buffer containing |timestamp|.
- // Assumes |timestamp| is valid and in this range.
- void Seek(DecodeTimestamp timestamp);
-
- // Updates |next_buffer_index_| to point to next keyframe after or equal to
- // |timestamp|.
- void SeekAheadTo(DecodeTimestamp timestamp);
-
- // Updates |next_buffer_index_| to point to next keyframe strictly after
- // |timestamp|.
- void SeekAheadPast(DecodeTimestamp timestamp);
-
- // Seeks to the beginning of the range.
- void SeekToStart();
-
- // Finds the next keyframe from |buffers_| after |timestamp| (or at
- // |timestamp| if |is_exclusive| is false) and creates and returns a new
- // SourceBufferRange with the buffers from that keyframe onward.
- // The buffers in the new SourceBufferRange are moved out of this range. If
- // there is no keyframe after |timestamp|, SplitRange() returns null and this
- // range is unmodified.
- SourceBufferRange* SplitRange(DecodeTimestamp timestamp, bool is_exclusive);
-
- // Deletes the buffers from this range starting at |timestamp|, exclusive if
- // |is_exclusive| is true, inclusive otherwise.
- // Resets |next_buffer_index_| if the buffer at |next_buffer_index_| was
- // deleted, and deletes the |keyframe_map_| entries for the buffers that
- // were removed.
- // |deleted_buffers| contains the buffers that were deleted from this range,
- // starting at the buffer that had been at |next_buffer_index_|.
- // Returns true if everything in the range was deleted. Otherwise
- // returns false.
- bool TruncateAt(DecodeTimestamp timestamp,
- BufferQueue* deleted_buffers, bool is_exclusive);
- // Deletes all buffers in range.
- void DeleteAll(BufferQueue* deleted_buffers);
-
- // Deletes a GOP from the front or back of the range and moves these
- // buffers into |deleted_buffers|. Returns the number of bytes deleted from
- // the range (i.e. the size in bytes of |deleted_buffers|).
- int DeleteGOPFromFront(BufferQueue* deleted_buffers);
- int DeleteGOPFromBack(BufferQueue* deleted_buffers);
-
- // Gets the range of GOP to secure at least |bytes_to_free| from
- // [|start_timestamp|, |end_timestamp|).
- // Returns the size of the buffers to secure if the buffers of
- // [|start_timestamp|, |end_removal_timestamp|) is removed.
- // Will not update |end_removal_timestamp| if the returned size is 0.
- int GetRemovalGOP(
- DecodeTimestamp start_timestamp, DecodeTimestamp end_timestamp,
- int bytes_to_free, DecodeTimestamp* end_removal_timestamp);
-
- // Indicates whether the GOP at the beginning or end of the range contains the
- // next buffer position.
- bool FirstGOPContainsNextBufferPosition() const;
- bool LastGOPContainsNextBufferPosition() const;
-
- // Updates |out_buffer| with the next buffer in presentation order. Seek()
- // must be called before calls to GetNextBuffer(), and buffers are returned
- // in order from the last call to Seek(). Returns true if |out_buffer| is
- // filled with a valid buffer, false if there is not enough data to fulfill
- // the request.
- bool GetNextBuffer(scoped_refptr<StreamParserBuffer>* out_buffer);
- bool HasNextBuffer() const;
-
- // Returns the config ID for the buffer that will be returned by
- // GetNextBuffer().
- int GetNextConfigId() const;
-
- // Returns true if the range knows the position of the next buffer it should
- // return, i.e. it has been Seek()ed. This does not necessarily mean that it
- // has the next buffer yet.
- bool HasNextBufferPosition() const;
-
- // Resets this range to an "unseeked" state.
- void ResetNextBufferPosition();
-
- // Returns the timestamp of the next buffer that will be returned from
- // GetNextBuffer(), or kNoTimestamp() if the timestamp is unknown.
- DecodeTimestamp GetNextTimestamp() const;
-
- // Returns the start timestamp of the range.
- DecodeTimestamp GetStartTimestamp() const;
-
- // Returns the timestamp of the last buffer in the range.
- DecodeTimestamp GetEndTimestamp() const;
-
- // Returns the timestamp for the end of the buffered region in this range.
- // This is an approximation if the duration for the last buffer in the range
- // is unset.
- DecodeTimestamp GetBufferedEndTimestamp() const;
-
- // Gets the timestamp for the keyframe that is after |timestamp|. If
- // there isn't a keyframe in the range after |timestamp| then kNoTimestamp()
- // is returned. If |timestamp| is in the "gap" between the value returned by
- // GetStartTimestamp() and the timestamp on the first buffer in |buffers_|,
- // then |timestamp| is returned.
- DecodeTimestamp NextKeyframeTimestamp(DecodeTimestamp timestamp);
-
- // Gets the timestamp for the closest keyframe that is <= |timestamp|. If
- // there isn't a keyframe before |timestamp| or |timestamp| is outside
- // this range, then kNoTimestamp() is returned.
- DecodeTimestamp KeyframeBeforeTimestamp(DecodeTimestamp timestamp);
-
- // Returns whether a buffer with a starting timestamp of |timestamp| would
- // belong in this range. This includes a buffer that would be appended to
- // the end of the range.
- bool BelongsToRange(DecodeTimestamp timestamp) const;
-
- // Returns true if the range has enough data to seek to the specified
- // |timestamp|, false otherwise.
- bool CanSeekTo(DecodeTimestamp timestamp) const;
-
- // Returns true if this range's buffered timespan completely overlaps the
- // buffered timespan of |range|.
- bool CompletelyOverlaps(const SourceBufferRange& range) const;
-
- // Returns true if the end of this range contains buffers that overlaps with
- // the beginning of |range|.
- bool EndOverlaps(const SourceBufferRange& range) const;
-
- // Returns true if |timestamp| is the timestamp of the next buffer in
- // sequence after |buffers_.back()|, false otherwise.
- bool IsNextInSequence(DecodeTimestamp timestamp, bool is_keyframe) const;
-
- // Adds all buffers which overlap [start, end) to the end of |buffers|. If
- // no buffers exist in the range returns false, true otherwise.
- bool GetBuffersInRange(DecodeTimestamp start, DecodeTimestamp end,
- BufferQueue* buffers);
-
- int size_in_bytes() const { return size_in_bytes_; }
-
- private:
- typedef std::map<DecodeTimestamp, int> KeyframeMap;
-
- // Seeks the range to the next keyframe after |timestamp|. If
- // |skip_given_timestamp| is true, the seek will go to a keyframe with a
- // timestamp strictly greater than |timestamp|.
- void SeekAhead(DecodeTimestamp timestamp, bool skip_given_timestamp);
-
- // Returns an iterator in |buffers_| pointing to the buffer at |timestamp|.
- // If |skip_given_timestamp| is true, this returns the first buffer with
- // timestamp greater than |timestamp|.
- BufferQueue::iterator GetBufferItrAt(
- DecodeTimestamp timestamp, bool skip_given_timestamp);
-
- // Returns an iterator in |keyframe_map_| pointing to the next keyframe after
- // |timestamp|. If |skip_given_timestamp| is true, this returns the first
- // keyframe with a timestamp strictly greater than |timestamp|.
- KeyframeMap::iterator GetFirstKeyframeAt(
- DecodeTimestamp timestamp, bool skip_given_timestamp);
-
- // Returns an iterator in |keyframe_map_| pointing to the first keyframe
- // before or at |timestamp|.
- KeyframeMap::iterator GetFirstKeyframeBefore(DecodeTimestamp timestamp);
-
- // Helper method to delete buffers in |buffers_| starting at
- // |starting_point|, an iterator in |buffers_|.
- // Returns true if everything in the range was removed. Returns
- // false if the range still contains buffers.
- bool TruncateAt(const BufferQueue::iterator& starting_point,
- BufferQueue* deleted_buffers);
-
- // Frees the buffers in |buffers_| from [|start_point|,|ending_point|) and
- // updates the |size_in_bytes_| accordingly. Does not update |keyframe_map_|.
- void FreeBufferRange(const BufferQueue::iterator& starting_point,
- const BufferQueue::iterator& ending_point);
-
- // Returns the distance in time estimating how far from the beginning or end
- // of this range a buffer can be to considered in the range.
- base::TimeDelta GetFudgeRoom() const;
-
- // Returns the approximate duration of a buffer in this range.
- base::TimeDelta GetApproximateDuration() const;
-
- // Type of this stream.
- const SourceBufferStream::Type type_;
-
- // An ordered list of buffers in this range.
- BufferQueue buffers_;
-
- // Maps keyframe timestamps to its index position in |buffers_|.
- KeyframeMap keyframe_map_;
-
- // Index base of all positions in |keyframe_map_|. In other words, the
- // real position of entry |k| of |keyframe_map_| in the range is:
- // keyframe_map_[k] - keyframe_map_index_base_
- int keyframe_map_index_base_;
-
- // Index into |buffers_| for the next buffer to be returned by
- // GetNextBuffer(), set to -1 before Seek().
- int next_buffer_index_;
-
- // If the first buffer in this range is the beginning of a media segment,
- // |media_segment_start_time_| is the time when the media segment begins.
- // |media_segment_start_time_| may be <= the timestamp of the first buffer in
- // |buffers_|. |media_segment_start_time_| is kNoTimestamp() if this range
- // does not start at the beginning of a media segment, which can only happen
- // garbage collection or after an end overlap that results in a split range
- // (we don't have a way of knowing the media segment timestamp for the new
- // range).
- DecodeTimestamp media_segment_start_time_;
-
- // Called to get the largest interbuffer distance seen so far in the stream.
- InterbufferDistanceCB interbuffer_distance_cb_;
-
- // Stores the amount of memory taken up by the data in |buffers_|.
- int size_in_bytes_;
-
- DISALLOW_COPY_AND_ASSIGN(SourceBufferRange);
-};
-
// Helper method that returns true if |ranges| is sorted in increasing order,
// false otherwise.
static bool IsRangeListSorted(
@@ -301,21 +30,14 @@ static bool IsRangeListSorted(
return true;
}
-// Comparison operators for std::upper_bound() and std::lower_bound().
-static bool CompareTimeDeltaToStreamParserBuffer(
- const DecodeTimestamp& decode_timestamp,
- const scoped_refptr<StreamParserBuffer>& buffer) {
- return decode_timestamp < buffer->GetDecodeTimestamp();
-}
-static bool CompareStreamParserBufferToTimeDelta(
- const scoped_refptr<StreamParserBuffer>& buffer,
- const DecodeTimestamp& decode_timestamp) {
- return buffer->GetDecodeTimestamp() < decode_timestamp;
-}
-
// Returns an estimate of how far from the beginning or end of a range a buffer
// can be to still be considered in the range, given the |approximate_duration|
// of a buffer in the stream.
+// TODO(wolenetz): Once all stream parsers emit accurate frame durations, use
+// logic like FrameProcessor (2*last_frame_duration + last_decode_timestamp)
+// instead of an overall maximum interbuffer delta for range discontinuity
+// detection, and adjust similarly for splice frame discontinuity detection.
+// See http://crbug.com/351489 and http://crbug.com/351166.
static base::TimeDelta ComputeFudgeRoom(base::TimeDelta approximate_duration) {
// Because we do not know exactly when is the next timestamp, any buffer
// that starts within 2x the approximate duration of a buffer is considered
@@ -333,6 +55,20 @@ static base::TimeDelta kSeekToStartFudgeRoom() {
return base::TimeDelta::FromMilliseconds(1000);
}
+static SourceBufferRange::GapPolicy TypeToGapPolicy(
+ SourceBufferStream::Type type) {
+ switch (type) {
+ case SourceBufferStream::kAudio:
+ case SourceBufferStream::kVideo:
+ return SourceBufferRange::NO_GAPS_ALLOWED;
+ case SourceBufferStream::kText:
+ return SourceBufferRange::ALLOW_GAPS;
+ }
+
+ NOTREACHED();
+ return SourceBufferRange::NO_GAPS_ALLOWED;
+}
+
SourceBufferStream::SourceBufferStream(const AudioDecoderConfig& audio_config,
const LogCB& log_cb,
bool splice_frames_enabled)
@@ -418,6 +154,8 @@ SourceBufferStream::~SourceBufferStream() {
void SourceBufferStream::OnNewMediaSegment(
DecodeTimestamp media_segment_start_time) {
+ DVLOG(1) << __FUNCTION__ << "(" << media_segment_start_time.InSecondsF()
+ << ")";
DCHECK(!end_of_stream_);
media_segment_start_time_ = media_segment_start_time;
new_media_segment_ = true;
@@ -432,8 +170,11 @@ void SourceBufferStream::OnNewMediaSegment(
media_segment_start_time)) {
last_appended_buffer_timestamp_ = kNoDecodeTimestamp();
last_appended_buffer_is_keyframe_ = false;
+ DVLOG(3) << __FUNCTION__ << " next appended buffers will be in a new range";
} else if (last_range != ranges_.end()) {
DCHECK(last_range == range_for_next_append_);
+ DVLOG(3) << __FUNCTION__ << " next appended buffers will continue range "
+ << "unless intervening remove makes discontinuity";
}
}
@@ -524,7 +265,8 @@ bool SourceBufferStream::Append(const BufferQueue& buffers) {
range_for_next_append_ =
AddToRanges(new SourceBufferRange(
- GetType(), *buffers_for_new_range, new_range_start_time,
+ TypeToGapPolicy(GetType()),
+ *buffers_for_new_range, new_range_start_time,
base::Bind(&SourceBufferStream::GetMaxInterbufferDistance,
base::Unretained(this))));
last_appended_buffer_timestamp_ =
@@ -729,8 +471,8 @@ bool SourceBufferStream::IsMonotonicallyIncreasing(
}
if (current_timestamp == prev_timestamp &&
- !AllowSameTimestamp(prev_is_keyframe, current_is_keyframe,
- GetType())) {
+ !SourceBufferRange::AllowSameTimestamp(prev_is_keyframe,
+ current_is_keyframe)) {
MEDIA_LOG(log_cb_) << "Unexpected combination of buffers with the"
<< " same timestamp detected at "
<< current_timestamp.InSecondsF();
@@ -748,8 +490,8 @@ bool SourceBufferStream::IsNextTimestampValid(
DecodeTimestamp next_timestamp, bool next_is_keyframe) const {
return (last_appended_buffer_timestamp_ != next_timestamp) ||
new_media_segment_ ||
- AllowSameTimestamp(last_appended_buffer_is_keyframe_, next_is_keyframe,
- GetType());
+ SourceBufferRange::AllowSameTimestamp(last_appended_buffer_is_keyframe_,
+ next_is_keyframe);
}
@@ -771,8 +513,15 @@ void SourceBufferStream::UpdateMaxInterbufferDistance(
DecodeTimestamp current_timestamp = (*itr)->GetDecodeTimestamp();
DCHECK(current_timestamp != kNoDecodeTimestamp());
+ base::TimeDelta interbuffer_distance = (*itr)->duration();
+ DCHECK(interbuffer_distance >= base::TimeDelta());
+
if (prev_timestamp != kNoDecodeTimestamp()) {
- base::TimeDelta interbuffer_distance = current_timestamp - prev_timestamp;
+ interbuffer_distance =
+ std::max(current_timestamp - prev_timestamp, interbuffer_distance);
+ }
+
+ if (interbuffer_distance > base::TimeDelta()) {
if (max_interbuffer_distance_ == kNoTimestamp()) {
max_interbuffer_distance_ = interbuffer_distance;
} else {
@@ -916,7 +665,8 @@ int SourceBufferStream::FreeBuffers(int total_bytes_to_free,
DCHECK(!new_range_for_append);
// Create a new range containing these buffers.
new_range_for_append = new SourceBufferRange(
- GetType(), buffers, kNoDecodeTimestamp(),
+ TypeToGapPolicy(GetType()),
+ buffers, kNoDecodeTimestamp(),
base::Bind(&SourceBufferStream::GetMaxInterbufferDistance,
base::Unretained(this)));
range_for_next_append_ = ranges_.end();
@@ -1011,7 +761,7 @@ void SourceBufferStream::PrepareRangesForNextAppend(
const bool is_exclusive =
new_buffers.front()->splice_buffers().empty() &&
prev_timestamp == next_timestamp &&
- AllowSameTimestamp(prev_is_keyframe, next_is_keyframe, GetType());
+ SourceBufferRange::AllowSameTimestamp(prev_is_keyframe, next_is_keyframe);
// Delete the buffers that |new_buffers| overlaps.
DecodeTimestamp start = new_buffers.front()->GetDecodeTimestamp();
@@ -1149,7 +899,7 @@ void SourceBufferStream::OnSetDuration(base::TimeDelta duration) {
SourceBufferStream::Status SourceBufferStream::GetNextBuffer(
scoped_refptr<StreamParserBuffer>* out_buffer) {
- if (!pending_buffer_) {
+ if (!pending_buffer_.get()) {
const SourceBufferStream::Status status = GetNextBufferInternal(out_buffer);
if (status != SourceBufferStream::kSuccess || !SetPendingBuffer(out_buffer))
return status;
@@ -1158,7 +908,7 @@ SourceBufferStream::Status SourceBufferStream::GetNextBuffer(
if (!pending_buffer_->splice_buffers().empty())
return HandleNextBufferWithSplice(out_buffer);
- DCHECK(pending_buffer_->preroll_buffer());
+ DCHECK(pending_buffer_->preroll_buffer().get());
return HandleNextBufferWithPreroll(out_buffer);
}
@@ -1183,7 +933,7 @@ SourceBufferStream::Status SourceBufferStream::HandleNextBufferWithSplice(
splice_buffers[splice_buffers_index_]->splice_timestamp());
// No pre splice buffers should have preroll.
- DCHECK(!splice_buffers[splice_buffers_index_]->preroll_buffer());
+ DCHECK(!splice_buffers[splice_buffers_index_]->preroll_buffer().get());
*out_buffer = splice_buffers[splice_buffers_index_++];
return SourceBufferStream::kSuccess;
@@ -1241,7 +991,7 @@ SourceBufferStream::Status SourceBufferStream::GetNextBufferInternal(
// If the next buffer is an audio splice frame, the next effective config id
// comes from the first splice buffer.
- if (GetConfigId(next_buffer, 0) != current_config_index_) {
+ if (next_buffer->GetSpliceBufferConfigId(0) != current_config_index_) {
config_change_pending_ = true;
DVLOG(1) << "Config change (track buffer config ID does not match).";
return kConfigChange;
@@ -1461,14 +1211,14 @@ bool SourceBufferStream::UpdateVideoConfig(const VideoDecoderConfig& config) {
void SourceBufferStream::CompleteConfigChange() {
config_change_pending_ = false;
- if (pending_buffer_) {
+ if (pending_buffer_.get()) {
current_config_index_ =
- GetConfigId(pending_buffer_, splice_buffers_index_);
+ pending_buffer_->GetSpliceBufferConfigId(splice_buffers_index_);
return;
}
if (!track_buffer_.empty()) {
- current_config_index_ = GetConfigId(track_buffer_.front(), 0);
+ current_config_index_ = track_buffer_.front()->GetSpliceBufferConfigId(0);
return;
}
@@ -1685,7 +1435,7 @@ void SourceBufferStream::GenerateSpliceFrame(const BufferQueue& new_buffers) {
return;
}
- if (pre_splice_buffers[i]->preroll_buffer()) {
+ if (pre_splice_buffers[i]->preroll_buffer().get()) {
DVLOG(1) << "Can't generate splice: overlapped buffers contain preroll.";
return;
}
@@ -1709,571 +1459,13 @@ void SourceBufferStream::GenerateSpliceFrame(const BufferQueue& new_buffers) {
new_buffers.front()->ConvertToSpliceBuffer(pre_splice_buffers);
}
-SourceBufferRange::SourceBufferRange(
- SourceBufferStream::Type type, const BufferQueue& new_buffers,
- DecodeTimestamp media_segment_start_time,
- const InterbufferDistanceCB& interbuffer_distance_cb)
- : type_(type),
- keyframe_map_index_base_(0),
- next_buffer_index_(-1),
- media_segment_start_time_(media_segment_start_time),
- interbuffer_distance_cb_(interbuffer_distance_cb),
- size_in_bytes_(0) {
- CHECK(!new_buffers.empty());
- DCHECK(new_buffers.front()->IsKeyframe());
- DCHECK(!interbuffer_distance_cb.is_null());
- AppendBuffersToEnd(new_buffers);
-}
-
-void SourceBufferRange::AppendBuffersToEnd(const BufferQueue& new_buffers) {
- DCHECK(buffers_.empty() || CanAppendBuffersToEnd(new_buffers));
- DCHECK(media_segment_start_time_ == kNoDecodeTimestamp() ||
- media_segment_start_time_ <=
- new_buffers.front()->GetDecodeTimestamp());
- for (BufferQueue::const_iterator itr = new_buffers.begin();
- itr != new_buffers.end();
- ++itr) {
- DCHECK((*itr)->GetDecodeTimestamp() != kNoDecodeTimestamp());
- buffers_.push_back(*itr);
- size_in_bytes_ += (*itr)->data_size();
-
- if ((*itr)->IsKeyframe()) {
- keyframe_map_.insert(
- std::make_pair((*itr)->GetDecodeTimestamp(),
- buffers_.size() - 1 + keyframe_map_index_base_));
- }
- }
-}
-
-void SourceBufferRange::Seek(DecodeTimestamp timestamp) {
- DCHECK(CanSeekTo(timestamp));
- DCHECK(!keyframe_map_.empty());
-
- KeyframeMap::iterator result = GetFirstKeyframeBefore(timestamp);
- next_buffer_index_ = result->second - keyframe_map_index_base_;
- DCHECK_LT(next_buffer_index_, static_cast<int>(buffers_.size()));
-}
-
-void SourceBufferRange::SeekAheadTo(DecodeTimestamp timestamp) {
- SeekAhead(timestamp, false);
-}
-
-void SourceBufferRange::SeekAheadPast(DecodeTimestamp timestamp) {
- SeekAhead(timestamp, true);
-}
-
-void SourceBufferRange::SeekAhead(DecodeTimestamp timestamp,
- bool skip_given_timestamp) {
- DCHECK(!keyframe_map_.empty());
-
- KeyframeMap::iterator result =
- GetFirstKeyframeAt(timestamp, skip_given_timestamp);
-
- // If there isn't a keyframe after |timestamp|, then seek to end and return
- // kNoTimestamp to signal such.
- if (result == keyframe_map_.end()) {
- next_buffer_index_ = -1;
- return;
- }
- next_buffer_index_ = result->second - keyframe_map_index_base_;
- DCHECK_LT(next_buffer_index_, static_cast<int>(buffers_.size()));
-}
-
-void SourceBufferRange::SeekToStart() {
- DCHECK(!buffers_.empty());
- next_buffer_index_ = 0;
-}
-
-SourceBufferRange* SourceBufferRange::SplitRange(
- DecodeTimestamp timestamp, bool is_exclusive) {
- CHECK(!buffers_.empty());
-
- // Find the first keyframe after |timestamp|. If |is_exclusive|, do not
- // include keyframes at |timestamp|.
- KeyframeMap::iterator new_beginning_keyframe =
- GetFirstKeyframeAt(timestamp, is_exclusive);
-
- // If there is no keyframe after |timestamp|, we can't split the range.
- if (new_beginning_keyframe == keyframe_map_.end())
- return NULL;
-
- // Remove the data beginning at |keyframe_index| from |buffers_| and save it
- // into |removed_buffers|.
- int keyframe_index =
- new_beginning_keyframe->second - keyframe_map_index_base_;
- DCHECK_LT(keyframe_index, static_cast<int>(buffers_.size()));
- BufferQueue::iterator starting_point = buffers_.begin() + keyframe_index;
- BufferQueue removed_buffers(starting_point, buffers_.end());
-
- DecodeTimestamp new_range_start_timestamp = kNoDecodeTimestamp();
- if (GetStartTimestamp() < buffers_.front()->GetDecodeTimestamp() &&
- timestamp < removed_buffers.front()->GetDecodeTimestamp()) {
- // The split is in the gap between |media_segment_start_time_| and
- // the first buffer of the new range so we should set the start
- // time of the new range to |timestamp| so we preserve part of the
- // gap in the new range.
- new_range_start_timestamp = timestamp;
- }
-
- keyframe_map_.erase(new_beginning_keyframe, keyframe_map_.end());
- FreeBufferRange(starting_point, buffers_.end());
-
- // Create a new range with |removed_buffers|.
- SourceBufferRange* split_range =
- new SourceBufferRange(
- type_, removed_buffers, new_range_start_timestamp,
- interbuffer_distance_cb_);
-
- // If the next buffer position is now in |split_range|, update the state of
- // this range and |split_range| accordingly.
- if (next_buffer_index_ >= static_cast<int>(buffers_.size())) {
- split_range->next_buffer_index_ = next_buffer_index_ - keyframe_index;
- ResetNextBufferPosition();
- }
-
- return split_range;
-}
-
-BufferQueue::iterator SourceBufferRange::GetBufferItrAt(
- DecodeTimestamp timestamp,
- bool skip_given_timestamp) {
- return skip_given_timestamp
- ? std::upper_bound(buffers_.begin(),
- buffers_.end(),
- timestamp,
- CompareTimeDeltaToStreamParserBuffer)
- : std::lower_bound(buffers_.begin(),
- buffers_.end(),
- timestamp,
- CompareStreamParserBufferToTimeDelta);
-}
-
-SourceBufferRange::KeyframeMap::iterator
-SourceBufferRange::GetFirstKeyframeAt(DecodeTimestamp timestamp,
- bool skip_given_timestamp) {
- return skip_given_timestamp ?
- keyframe_map_.upper_bound(timestamp) :
- keyframe_map_.lower_bound(timestamp);
-}
-
-SourceBufferRange::KeyframeMap::iterator
-SourceBufferRange::GetFirstKeyframeBefore(DecodeTimestamp timestamp) {
- KeyframeMap::iterator result = keyframe_map_.lower_bound(timestamp);
- // lower_bound() returns the first element >= |timestamp|, so we want the
- // previous element if it did not return the element exactly equal to
- // |timestamp|.
- if (result != keyframe_map_.begin() &&
- (result == keyframe_map_.end() || result->first != timestamp)) {
- --result;
- }
- return result;
-}
-
-void SourceBufferRange::DeleteAll(BufferQueue* removed_buffers) {
- TruncateAt(buffers_.begin(), removed_buffers);
-}
-
-bool SourceBufferRange::TruncateAt(
- DecodeTimestamp timestamp, BufferQueue* removed_buffers,
- bool is_exclusive) {
- // Find the place in |buffers_| where we will begin deleting data.
- BufferQueue::iterator starting_point =
- GetBufferItrAt(timestamp, is_exclusive);
- return TruncateAt(starting_point, removed_buffers);
-}
-
-int SourceBufferRange::DeleteGOPFromFront(BufferQueue* deleted_buffers) {
- DCHECK(!FirstGOPContainsNextBufferPosition());
- DCHECK(deleted_buffers);
-
- int buffers_deleted = 0;
- int total_bytes_deleted = 0;
-
- KeyframeMap::iterator front = keyframe_map_.begin();
- DCHECK(front != keyframe_map_.end());
-
- // Delete the keyframe at the start of |keyframe_map_|.
- keyframe_map_.erase(front);
-
- // Now we need to delete all the buffers that depend on the keyframe we've
- // just deleted.
- int end_index = keyframe_map_.size() > 0 ?
- keyframe_map_.begin()->second - keyframe_map_index_base_ :
- buffers_.size();
-
- // Delete buffers from the beginning of the buffered range up until (but not
- // including) the next keyframe.
- for (int i = 0; i < end_index; i++) {
- int bytes_deleted = buffers_.front()->data_size();
- size_in_bytes_ -= bytes_deleted;
- total_bytes_deleted += bytes_deleted;
- deleted_buffers->push_back(buffers_.front());
- buffers_.pop_front();
- ++buffers_deleted;
- }
-
- // Update |keyframe_map_index_base_| to account for the deleted buffers.
- keyframe_map_index_base_ += buffers_deleted;
-
- if (next_buffer_index_ > -1) {
- next_buffer_index_ -= buffers_deleted;
- DCHECK_GE(next_buffer_index_, 0);
- }
-
- // Invalidate media segment start time if we've deleted the first buffer of
- // the range.
- if (buffers_deleted > 0)
- media_segment_start_time_ = kNoDecodeTimestamp();
-
- return total_bytes_deleted;
-}
-
-int SourceBufferRange::DeleteGOPFromBack(BufferQueue* deleted_buffers) {
- DCHECK(!LastGOPContainsNextBufferPosition());
- DCHECK(deleted_buffers);
-
- // Remove the last GOP's keyframe from the |keyframe_map_|.
- KeyframeMap::iterator back = keyframe_map_.end();
- DCHECK_GT(keyframe_map_.size(), 0u);
- --back;
-
- // The index of the first buffer in the last GOP is equal to the new size of
- // |buffers_| after that GOP is deleted.
- size_t goal_size = back->second - keyframe_map_index_base_;
- keyframe_map_.erase(back);
-
- int total_bytes_deleted = 0;
- while (buffers_.size() != goal_size) {
- int bytes_deleted = buffers_.back()->data_size();
- size_in_bytes_ -= bytes_deleted;
- total_bytes_deleted += bytes_deleted;
- // We're removing buffers from the back, so push each removed buffer to the
- // front of |deleted_buffers| so that |deleted_buffers| are in nondecreasing
- // order.
- deleted_buffers->push_front(buffers_.back());
- buffers_.pop_back();
- }
-
- return total_bytes_deleted;
-}
-
-int SourceBufferRange::GetRemovalGOP(
- DecodeTimestamp start_timestamp, DecodeTimestamp end_timestamp,
- int total_bytes_to_free, DecodeTimestamp* removal_end_timestamp) {
- int bytes_to_free = total_bytes_to_free;
- int bytes_removed = 0;
-
- KeyframeMap::iterator gop_itr = GetFirstKeyframeAt(start_timestamp, false);
- if (gop_itr == keyframe_map_.end())
- return 0;
- int keyframe_index = gop_itr->second - keyframe_map_index_base_;
- BufferQueue::iterator buffer_itr = buffers_.begin() + keyframe_index;
- KeyframeMap::iterator gop_end = keyframe_map_.end();
- if (end_timestamp < GetBufferedEndTimestamp())
- gop_end = GetFirstKeyframeBefore(end_timestamp);
-
- // Check if the removal range is within a GOP and skip the loop if so.
- // [keyframe]...[start_timestamp]...[end_timestamp]...[keyframe]
- KeyframeMap::iterator gop_itr_prev = gop_itr;
- if (gop_itr_prev != keyframe_map_.begin() && --gop_itr_prev == gop_end)
- gop_end = gop_itr;
-
- while (gop_itr != gop_end && bytes_to_free > 0) {
- ++gop_itr;
-
- int gop_size = 0;
- int next_gop_index = gop_itr == keyframe_map_.end() ?
- buffers_.size() : gop_itr->second - keyframe_map_index_base_;
- BufferQueue::iterator next_gop_start = buffers_.begin() + next_gop_index;
- for (; buffer_itr != next_gop_start; ++buffer_itr)
- gop_size += (*buffer_itr)->data_size();
-
- bytes_removed += gop_size;
- bytes_to_free -= gop_size;
- }
- if (bytes_removed > 0) {
- *removal_end_timestamp = gop_itr == keyframe_map_.end() ?
- GetBufferedEndTimestamp() : gop_itr->first;
- }
- return bytes_removed;
-}
-
-bool SourceBufferRange::FirstGOPContainsNextBufferPosition() const {
- if (!HasNextBufferPosition())
- return false;
-
- // If there is only one GOP, it must contain the next buffer position.
- if (keyframe_map_.size() == 1u)
- return true;
-
- KeyframeMap::const_iterator second_gop = keyframe_map_.begin();
- ++second_gop;
- return next_buffer_index_ < second_gop->second - keyframe_map_index_base_;
-}
-
-bool SourceBufferRange::LastGOPContainsNextBufferPosition() const {
- if (!HasNextBufferPosition())
- return false;
-
- // If there is only one GOP, it must contain the next buffer position.
- if (keyframe_map_.size() == 1u)
- return true;
-
- KeyframeMap::const_iterator last_gop = keyframe_map_.end();
- --last_gop;
- return last_gop->second - keyframe_map_index_base_ <= next_buffer_index_;
-}
-
-void SourceBufferRange::FreeBufferRange(
- const BufferQueue::iterator& starting_point,
- const BufferQueue::iterator& ending_point) {
- for (BufferQueue::iterator itr = starting_point;
- itr != ending_point; ++itr) {
- size_in_bytes_ -= (*itr)->data_size();
- DCHECK_GE(size_in_bytes_, 0);
- }
- buffers_.erase(starting_point, ending_point);
-}
-
-bool SourceBufferRange::TruncateAt(
- const BufferQueue::iterator& starting_point, BufferQueue* removed_buffers) {
- DCHECK(!removed_buffers || removed_buffers->empty());
-
- // Return if we're not deleting anything.
- if (starting_point == buffers_.end())
- return buffers_.empty();
-
- // Reset the next buffer index if we will be deleting the buffer that's next
- // in sequence.
- if (HasNextBufferPosition()) {
- DecodeTimestamp next_buffer_timestamp = GetNextTimestamp();
- if (next_buffer_timestamp == kNoDecodeTimestamp() ||
- next_buffer_timestamp >= (*starting_point)->GetDecodeTimestamp()) {
- if (HasNextBuffer() && removed_buffers) {
- int starting_offset = starting_point - buffers_.begin();
- int next_buffer_offset = next_buffer_index_ - starting_offset;
- DCHECK_GE(next_buffer_offset, 0);
- BufferQueue saved(starting_point + next_buffer_offset, buffers_.end());
- removed_buffers->swap(saved);
- }
- ResetNextBufferPosition();
- }
- }
-
- // Remove keyframes from |starting_point| onward.
- KeyframeMap::iterator starting_point_keyframe =
- keyframe_map_.lower_bound((*starting_point)->GetDecodeTimestamp());
- keyframe_map_.erase(starting_point_keyframe, keyframe_map_.end());
-
- // Remove everything from |starting_point| onward.
- FreeBufferRange(starting_point, buffers_.end());
- return buffers_.empty();
-}
-
-bool SourceBufferRange::GetNextBuffer(
- scoped_refptr<StreamParserBuffer>* out_buffer) {
- if (!HasNextBuffer())
- return false;
-
- *out_buffer = buffers_[next_buffer_index_];
- next_buffer_index_++;
- return true;
-}
-
-bool SourceBufferRange::HasNextBuffer() const {
- return next_buffer_index_ >= 0 &&
- next_buffer_index_ < static_cast<int>(buffers_.size());
-}
-
-int SourceBufferRange::GetNextConfigId() const {
- DCHECK(HasNextBuffer());
- // If the next buffer is an audio splice frame, the next effective config id
- // comes from the first fade out preroll buffer.
- return GetConfigId(buffers_[next_buffer_index_], 0);
-}
-
-DecodeTimestamp SourceBufferRange::GetNextTimestamp() const {
- DCHECK(!buffers_.empty());
- DCHECK(HasNextBufferPosition());
-
- if (next_buffer_index_ >= static_cast<int>(buffers_.size())) {
- return kNoDecodeTimestamp();
- }
-
- return buffers_[next_buffer_index_]->GetDecodeTimestamp();
-}
-
-bool SourceBufferRange::HasNextBufferPosition() const {
- return next_buffer_index_ >= 0;
-}
-
-void SourceBufferRange::ResetNextBufferPosition() {
- next_buffer_index_ = -1;
-}
-
-void SourceBufferRange::AppendRangeToEnd(const SourceBufferRange& range,
- bool transfer_current_position) {
- DCHECK(CanAppendRangeToEnd(range));
- DCHECK(!buffers_.empty());
-
- if (transfer_current_position && range.next_buffer_index_ >= 0)
- next_buffer_index_ = range.next_buffer_index_ + buffers_.size();
-
- AppendBuffersToEnd(range.buffers_);
-}
-
-bool SourceBufferRange::CanAppendRangeToEnd(
- const SourceBufferRange& range) const {
- return CanAppendBuffersToEnd(range.buffers_);
-}
-
-bool SourceBufferRange::CanAppendBuffersToEnd(
- const BufferQueue& buffers) const {
- DCHECK(!buffers_.empty());
- return IsNextInSequence(buffers.front()->GetDecodeTimestamp(),
- buffers.front()->IsKeyframe());
-}
-
-bool SourceBufferRange::BelongsToRange(DecodeTimestamp timestamp) const {
- DCHECK(!buffers_.empty());
-
- return (IsNextInSequence(timestamp, false) ||
- (GetStartTimestamp() <= timestamp && timestamp <= GetEndTimestamp()));
-}
-
-bool SourceBufferRange::CanSeekTo(DecodeTimestamp timestamp) const {
- DecodeTimestamp start_timestamp =
- std::max(DecodeTimestamp(), GetStartTimestamp() - GetFudgeRoom());
- return !keyframe_map_.empty() && start_timestamp <= timestamp &&
- timestamp < GetBufferedEndTimestamp();
-}
-
-bool SourceBufferRange::CompletelyOverlaps(
- const SourceBufferRange& range) const {
- return GetStartTimestamp() <= range.GetStartTimestamp() &&
- GetEndTimestamp() >= range.GetEndTimestamp();
-}
-
-bool SourceBufferRange::EndOverlaps(const SourceBufferRange& range) const {
- return range.GetStartTimestamp() <= GetEndTimestamp() &&
- GetEndTimestamp() < range.GetEndTimestamp();
-}
-
-DecodeTimestamp SourceBufferRange::GetStartTimestamp() const {
- DCHECK(!buffers_.empty());
- DecodeTimestamp start_timestamp = media_segment_start_time_;
- if (start_timestamp == kNoDecodeTimestamp())
- start_timestamp = buffers_.front()->GetDecodeTimestamp();
- return start_timestamp;
-}
-
-DecodeTimestamp SourceBufferRange::GetEndTimestamp() const {
- DCHECK(!buffers_.empty());
- return buffers_.back()->GetDecodeTimestamp();
-}
-
-DecodeTimestamp SourceBufferRange::GetBufferedEndTimestamp() const {
- DCHECK(!buffers_.empty());
- base::TimeDelta duration = buffers_.back()->duration();
- if (duration == kNoTimestamp() || duration == base::TimeDelta())
- duration = GetApproximateDuration();
- return GetEndTimestamp() + duration;
-}
-
-DecodeTimestamp SourceBufferRange::NextKeyframeTimestamp(
- DecodeTimestamp timestamp) {
- DCHECK(!keyframe_map_.empty());
-
- if (timestamp < GetStartTimestamp() || timestamp >= GetBufferedEndTimestamp())
- return kNoDecodeTimestamp();
-
- KeyframeMap::iterator itr = GetFirstKeyframeAt(timestamp, false);
- if (itr == keyframe_map_.end())
- return kNoDecodeTimestamp();
-
- // If the timestamp is inside the gap between the start of the media
- // segment and the first buffer, then just pretend there is a
- // keyframe at the specified timestamp.
- if (itr == keyframe_map_.begin() &&
- timestamp > media_segment_start_time_ &&
- timestamp < itr->first) {
- return timestamp;
- }
-
- return itr->first;
-}
-
-DecodeTimestamp SourceBufferRange::KeyframeBeforeTimestamp(
- DecodeTimestamp timestamp) {
- DCHECK(!keyframe_map_.empty());
-
- if (timestamp < GetStartTimestamp() || timestamp >= GetBufferedEndTimestamp())
- return kNoDecodeTimestamp();
-
- return GetFirstKeyframeBefore(timestamp)->first;
-}
-
-bool SourceBufferRange::IsNextInSequence(
- DecodeTimestamp timestamp, bool is_keyframe) const {
- DecodeTimestamp end = buffers_.back()->GetDecodeTimestamp();
- if (end < timestamp &&
- (type_ == SourceBufferStream::kText ||
- timestamp <= end + GetFudgeRoom())) {
- return true;
- }
-
- return timestamp == end && AllowSameTimestamp(
- buffers_.back()->IsKeyframe(), is_keyframe, type_);
-}
-
-base::TimeDelta SourceBufferRange::GetFudgeRoom() const {
- return ComputeFudgeRoom(GetApproximateDuration());
-}
-
-base::TimeDelta SourceBufferRange::GetApproximateDuration() const {
- base::TimeDelta max_interbuffer_distance = interbuffer_distance_cb_.Run();
- DCHECK(max_interbuffer_distance != kNoTimestamp());
- return max_interbuffer_distance;
-}
-
-bool SourceBufferRange::GetBuffersInRange(DecodeTimestamp start,
- DecodeTimestamp end,
- BufferQueue* buffers) {
- // Find the nearest buffer with a decode timestamp <= start.
- const DecodeTimestamp first_timestamp = KeyframeBeforeTimestamp(start);
- if (first_timestamp == kNoDecodeTimestamp())
- return false;
-
- // Find all buffers involved in the range.
- const size_t previous_size = buffers->size();
- for (BufferQueue::iterator it = GetBufferItrAt(first_timestamp, false);
- it != buffers_.end();
- ++it) {
- const scoped_refptr<StreamParserBuffer>& buffer = *it;
- // Buffers without duration are not supported, so bail if we encounter any.
- if (buffer->duration() == kNoTimestamp() ||
- buffer->duration() <= base::TimeDelta()) {
- return false;
- }
- if (buffer->end_of_stream() ||
- buffer->timestamp() >= end.ToPresentationTime()) {
- break;
- }
-
- if (buffer->timestamp() + buffer->duration() <= start.ToPresentationTime())
- continue;
- buffers->push_back(buffer);
- }
- return previous_size < buffers->size();
-}
-
bool SourceBufferStream::SetPendingBuffer(
scoped_refptr<StreamParserBuffer>* out_buffer) {
- DCHECK(*out_buffer);
- DCHECK(!pending_buffer_);
+ DCHECK(out_buffer->get());
+ DCHECK(!pending_buffer_.get());
const bool have_splice_buffers = !(*out_buffer)->splice_buffers().empty();
- const bool have_preroll_buffer = !!(*out_buffer)->preroll_buffer();
+ const bool have_preroll_buffer = !!(*out_buffer)->preroll_buffer().get();
if (!have_splice_buffers && !have_preroll_buffer)
return false;
diff --git a/media/filters/source_buffer_stream.h b/media/filters/source_buffer_stream.h
index efbe31f8ae..b5bb855c1f 100644
--- a/media/filters/source_buffer_stream.h
+++ b/media/filters/source_buffer_stream.h
@@ -141,7 +141,7 @@ class MEDIA_EXPORT SourceBufferStream {
// yet.
base::TimeDelta GetMaxInterbufferDistance() const;
- void set_memory_limit_for_testing(int memory_limit) {
+ void set_memory_limit(int memory_limit) {
memory_limit_ = memory_limit;
}
diff --git a/media/filters/source_buffer_stream_unittest.cc b/media/filters/source_buffer_stream_unittest.cc
index a1df540e69..0fbd0425a5 100644
--- a/media/filters/source_buffer_stream_unittest.cc
+++ b/media/filters/source_buffer_stream_unittest.cc
@@ -38,7 +38,7 @@ class SourceBufferStreamTest : public testing::Test {
}
void SetMemoryLimit(int buffers_of_data) {
- stream_->set_memory_limit_for_testing(buffers_of_data * kDataSize);
+ stream_->set_memory_limit(buffers_of_data * kDataSize);
}
void SetStreamInfo(int frames_per_second, int keyframes_per_second) {
diff --git a/media/filters/stream_parser_factory.cc b/media/filters/stream_parser_factory.cc
index daeaea23b7..b47fa75373 100644
--- a/media/filters/stream_parser_factory.cc
+++ b/media/filters/stream_parser_factory.cc
@@ -11,7 +11,7 @@
#include "media/base/media_log.h"
#include "media/base/media_switches.h"
#include "media/formats/mpeg/adts_stream_parser.h"
-#include "media/formats/mpeg/mp3_stream_parser.h"
+#include "media/formats/mpeg/mpeg1_audio_stream_parser.h"
#include "media/formats/webm/webm_stream_parser.h"
#if defined(OS_ANDROID)
@@ -190,7 +190,7 @@ static const CodecInfo* kAudioMP3Codecs[] = {
static StreamParser* BuildMP3Parser(
const std::vector<std::string>& codecs, const LogCB& log_cb) {
- return new MP3StreamParser();
+ return new MPEG1AudioStreamParser();
}
static const CodecInfo kADTSCodecInfo = { NULL, CodecInfo::AUDIO, NULL,
diff --git a/media/filters/video_frame_stream_unittest.cc b/media/filters/video_frame_stream_unittest.cc
index 96848e42cd..3a19e558a8 100644
--- a/media/filters/video_frame_stream_unittest.cc
+++ b/media/filters/video_frame_stream_unittest.cc
@@ -82,7 +82,8 @@ class VideoFrameStreamTest
message_loop_.message_loop_proxy(),
decoders.Pass(),
base::Bind(&VideoFrameStreamTest::SetDecryptorReadyCallback,
- base::Unretained(this))));
+ base::Unretained(this)),
+ new MediaLog()));
// Decryptor can only decrypt (not decrypt-and-decode) so that
// DecryptingDemuxerStream will be used.
@@ -372,6 +373,7 @@ INSTANTIATE_TEST_CASE_P(
VideoFrameStreamTestParams(false, 0, 1),
VideoFrameStreamTestParams(false, 3, 1),
VideoFrameStreamTestParams(false, 7, 1)));
+
INSTANTIATE_TEST_CASE_P(
Encrypted,
VideoFrameStreamTest,
@@ -488,8 +490,44 @@ TEST_P(VideoFrameStreamTest, Read_BlockedDemuxerAndDecoder) {
EXPECT_FALSE(pending_read_);
}
-// No Reset() before initialization is successfully completed.
+TEST_P(VideoFrameStreamTest, Read_DuringEndOfStreamDecode) {
+ // Test applies only when the decoder allows multiple parallel requests, and
+ // they are not satisfied in a single batch.
+ if (GetParam().parallel_decoding == 1 || GetParam().decoding_delay != 0)
+ return;
+
+ Initialize();
+ decoder_->HoldDecode();
+
+ // Read all of the frames up to end of stream. Since parallel decoding is
+ // enabled, the end of stream buffer will be sent to the decoder immediately,
+ // but we don't satisfy it yet.
+ for (int configuration = 0; configuration < kNumConfigs; configuration++) {
+ for (int frame = 0; frame < kNumBuffersInOneConfig; frame++) {
+ ReadOneFrame();
+ while (pending_read_) {
+ decoder_->SatisfySingleDecode();
+ message_loop_.RunUntilIdle();
+ }
+ }
+ }
+
+ // Read() again. The callback must be delayed until the decode completes.
+ ReadOneFrame();
+ ASSERT_TRUE(pending_read_);
+
+ // Satisfy decoding of the end of stream buffer. The read should complete.
+ decoder_->SatisfySingleDecode();
+ message_loop_.RunUntilIdle();
+ ASSERT_FALSE(pending_read_);
+ EXPECT_EQ(last_read_status_, VideoFrameStream::OK);
+ // The read output should indicate end of stream.
+ ASSERT_TRUE(frame_read_.get());
+ EXPECT_TRUE(frame_read_->end_of_stream());
+}
+
+// No Reset() before initialization is successfully completed.
TEST_P(VideoFrameStreamTest, Reset_AfterInitialization) {
Initialize();
Reset();
diff --git a/media/filters/video_renderer_impl.cc b/media/filters/video_renderer_impl.cc
index 25099ca0f8..1bd3f270ab 100644
--- a/media/filters/video_renderer_impl.cc
+++ b/media/filters/video_renderer_impl.cc
@@ -11,6 +11,7 @@
#include "base/location.h"
#include "base/single_thread_task_runner.h"
#include "base/threading/platform_thread.h"
+#include "media/base/bind_to_current_loop.h"
#include "media/base/buffers.h"
#include "media/base/limits.h"
#include "media/base/pipeline.h"
@@ -23,11 +24,13 @@ VideoRendererImpl::VideoRendererImpl(
ScopedVector<VideoDecoder> decoders,
const SetDecryptorReadyCB& set_decryptor_ready_cb,
const PaintCB& paint_cb,
- bool drop_frames)
+ bool drop_frames,
+ const scoped_refptr<MediaLog>& media_log)
: task_runner_(task_runner),
video_frame_stream_(new VideoFrameStream(task_runner,
decoders.Pass(),
- set_decryptor_ready_cb)),
+ set_decryptor_ready_cb,
+ media_log)),
low_delay_(false),
received_end_of_stream_(false),
rendered_end_of_stream_(false),
@@ -67,6 +70,7 @@ VideoRendererImpl::~VideoRendererImpl() {
}
void VideoRendererImpl::Flush(const base::Closure& callback) {
+ DVLOG(1) << __FUNCTION__;
DCHECK(task_runner_->BelongsToCurrentThread());
base::AutoLock auto_lock(lock_);
DCHECK_EQ(state_, kPlaying);
@@ -88,7 +92,8 @@ void VideoRendererImpl::Flush(const base::Closure& callback) {
weak_factory_.GetWeakPtr()));
}
-void VideoRendererImpl::StartPlaying() {
+void VideoRendererImpl::StartPlayingFrom(base::TimeDelta timestamp) {
+ DVLOG(1) << __FUNCTION__ << "(" << timestamp.InMicroseconds() << ")";
DCHECK(task_runner_->BelongsToCurrentThread());
base::AutoLock auto_lock(lock_);
DCHECK_EQ(state_, kFlushed);
@@ -97,7 +102,7 @@ void VideoRendererImpl::StartPlaying() {
DCHECK_EQ(buffering_state_, BUFFERING_HAVE_NOTHING);
state_ = kPlaying;
- start_timestamp_ = get_time_cb_.Run();
+ start_timestamp_ = timestamp;
AttemptRead_Locked();
}
@@ -105,35 +110,32 @@ void VideoRendererImpl::Initialize(DemuxerStream* stream,
bool low_delay,
const PipelineStatusCB& init_cb,
const StatisticsCB& statistics_cb,
- const TimeCB& max_time_cb,
const BufferingStateCB& buffering_state_cb,
const base::Closure& ended_cb,
const PipelineStatusCB& error_cb,
- const TimeDeltaCB& get_time_cb,
- const TimeDeltaCB& get_duration_cb) {
+ const TimeDeltaCB& get_time_cb) {
DCHECK(task_runner_->BelongsToCurrentThread());
base::AutoLock auto_lock(lock_);
DCHECK(stream);
DCHECK_EQ(stream->type(), DemuxerStream::VIDEO);
DCHECK(!init_cb.is_null());
DCHECK(!statistics_cb.is_null());
- DCHECK(!max_time_cb.is_null());
DCHECK(!buffering_state_cb.is_null());
DCHECK(!ended_cb.is_null());
DCHECK(!get_time_cb.is_null());
- DCHECK(!get_duration_cb.is_null());
DCHECK_EQ(kUninitialized, state_);
low_delay_ = low_delay;
- init_cb_ = init_cb;
+ // Always post |init_cb_| because |this| could be destroyed if initialization
+ // failed.
+ init_cb_ = BindToCurrentLoop(init_cb);
+
statistics_cb_ = statistics_cb;
- max_time_cb_ = max_time_cb;
buffering_state_cb_ = buffering_state_cb;
ended_cb_ = ended_cb;
error_cb_ = error_cb;
get_time_cb_ = get_time_cb;
- get_duration_cb_ = get_duration_cb;
state_ = kInitializing;
video_frame_stream_->Initialize(
@@ -292,6 +294,7 @@ void VideoRendererImpl::DropNextReadyFrame_Locked() {
void VideoRendererImpl::FrameReady(VideoFrameStream::Status status,
const scoped_refptr<VideoFrame>& frame) {
+ DCHECK(task_runner_->BelongsToCurrentThread());
base::AutoLock auto_lock(lock_);
DCHECK_NE(state_, kUninitialized);
DCHECK_NE(state_, kFlushed);
@@ -305,7 +308,7 @@ void VideoRendererImpl::FrameReady(VideoFrameStream::Status status,
PipelineStatus error = PIPELINE_ERROR_DECODE;
if (status == VideoFrameStream::DECRYPT_ERROR)
error = PIPELINE_ERROR_DECRYPT;
- error_cb_.Run(error);
+ task_runner_->PostTask(FROM_HERE, base::Bind(error_cb_, error));
return;
}
@@ -317,7 +320,7 @@ void VideoRendererImpl::FrameReady(VideoFrameStream::Status status,
DCHECK_EQ(state_, kPlaying);
// Can happen when demuxers are preparing for a new Seek().
- if (!frame) {
+ if (!frame.get()) {
DCHECK_EQ(status, VideoFrameStream::DEMUXER_READ_ABORTED);
return;
}
@@ -356,21 +359,7 @@ void VideoRendererImpl::TransitionToHaveEnough_Locked() {
DCHECK(task_runner_->BelongsToCurrentThread());
DCHECK_EQ(buffering_state_, BUFFERING_HAVE_NOTHING);
- if (received_end_of_stream_)
- max_time_cb_.Run(get_duration_cb_.Run());
-
if (!ready_frames_.empty()) {
- // Max time isn't reported while we're in a have nothing state as we could
- // be discarding frames to find |start_timestamp_|.
- if (!received_end_of_stream_) {
- base::TimeDelta max_timestamp = ready_frames_[0]->timestamp();
- for (size_t i = 1; i < ready_frames_.size(); ++i) {
- if (ready_frames_[i]->timestamp() > max_timestamp)
- max_timestamp = ready_frames_[i]->timestamp();
- }
- max_time_cb_.Run(max_timestamp);
- }
-
// Because the clock might remain paused in for an undetermined amount
// of time (e.g., seeking while paused), paint the first frame.
PaintNextReadyFrame_Locked();
@@ -386,27 +375,10 @@ void VideoRendererImpl::AddReadyFrame_Locked(
lock_.AssertAcquired();
DCHECK(!frame->end_of_stream());
- // Adjust the incoming frame if its rendering stop time is past the duration
- // of the video itself. This is typically the last frame of the video and
- // occurs if the container specifies a duration that isn't a multiple of the
- // frame rate. Another way for this to happen is for the container to state
- // a smaller duration than the largest packet timestamp.
- base::TimeDelta duration = get_duration_cb_.Run();
- if (frame->timestamp() > duration) {
- frame->set_timestamp(duration);
- }
-
ready_frames_.push_back(frame);
DCHECK_LE(ready_frames_.size(),
static_cast<size_t>(limits::kMaxVideoFrames));
- // FrameReady() may add frames but discard them when we're decoding frames to
- // reach |start_timestamp_|. In this case we'll only want to update the max
- // time when we know we've reached |start_timestamp_| and have buffered enough
- // frames to being playback.
- if (buffering_state_ == BUFFERING_HAVE_ENOUGH)
- max_time_cb_.Run(frame->timestamp());
-
// Avoid needlessly waking up |thread_| unless playing.
if (state_ == kPlaying)
frame_available_.Signal();
diff --git a/media/filters/video_renderer_impl.h b/media/filters/video_renderer_impl.h
index 181beb57d5..a1f027dd41 100644
--- a/media/filters/video_renderer_impl.h
+++ b/media/filters/video_renderer_impl.h
@@ -16,6 +16,7 @@
#include "base/threading/platform_thread.h"
#include "media/base/decryptor.h"
#include "media/base/demuxer_stream.h"
+#include "media/base/media_log.h"
#include "media/base/pipeline_status.h"
#include "media/base/video_decoder.h"
#include "media/base/video_frame.h"
@@ -53,7 +54,8 @@ class MEDIA_EXPORT VideoRendererImpl
ScopedVector<VideoDecoder> decoders,
const SetDecryptorReadyCB& set_decryptor_ready_cb,
const PaintCB& paint_cb,
- bool drop_frames);
+ bool drop_frames,
+ const scoped_refptr<MediaLog>& media_log);
virtual ~VideoRendererImpl();
// VideoRenderer implementation.
@@ -61,14 +63,12 @@ class MEDIA_EXPORT VideoRendererImpl
bool low_delay,
const PipelineStatusCB& init_cb,
const StatisticsCB& statistics_cb,
- const TimeCB& max_time_cb,
const BufferingStateCB& buffering_state_cb,
const base::Closure& ended_cb,
const PipelineStatusCB& error_cb,
- const TimeDeltaCB& get_time_cb,
- const TimeDeltaCB& get_duration_cb) OVERRIDE;
+ const TimeDeltaCB& get_time_cb) OVERRIDE;
virtual void Flush(const base::Closure& callback) OVERRIDE;
- virtual void StartPlaying() OVERRIDE;
+ virtual void StartPlayingFrom(base::TimeDelta timestamp) OVERRIDE;
// PlatformThread::Delegate implementation.
virtual void ThreadMain() OVERRIDE;
@@ -140,7 +140,7 @@ class MEDIA_EXPORT VideoRendererImpl
// Important detail: being in kPlaying doesn't imply that video is being
// rendered. Rather, it means that the renderer is ready to go. The actual
- // rendering of video is controlled by time advancing via |time_cb_|.
+ // rendering of video is controlled by time advancing via |get_time_cb_|.
//
// kUninitialized
// | Initialize()
@@ -151,7 +151,7 @@ class MEDIA_EXPORT VideoRendererImpl
// |
// V Decoders reset
// kFlushed <------------------ kFlushing
- // | StartPlaying() ^
+ // | StartPlayingFrom() ^
// | |
// | | Flush()
// `---------> kPlaying --------'
@@ -181,12 +181,10 @@ class MEDIA_EXPORT VideoRendererImpl
// Event callbacks.
PipelineStatusCB init_cb_;
StatisticsCB statistics_cb_;
- TimeCB max_time_cb_;
BufferingStateCB buffering_state_cb_;
base::Closure ended_cb_;
PipelineStatusCB error_cb_;
TimeDeltaCB get_time_cb_;
- TimeDeltaCB get_duration_cb_;
base::TimeDelta start_timestamp_;
diff --git a/media/filters/video_renderer_impl_unittest.cc b/media/filters/video_renderer_impl_unittest.cc
index 4f44939640..7bda5104f9 100644
--- a/media/filters/video_renderer_impl_unittest.cc
+++ b/media/filters/video_renderer_impl_unittest.cc
@@ -43,10 +43,6 @@ MATCHER_P(HasTimestamp, ms, "") {
return arg->timestamp().InMilliseconds() == ms;
}
-// Arbitrary value. Has to be larger to cover any timestamp value used in tests
-// and kTimeToDeclareHaveNothing.
-static const int kVideoDurationInMs = 10000;
-
class VideoRendererImplTest : public ::testing::Test {
public:
VideoRendererImplTest()
@@ -60,7 +56,8 @@ class VideoRendererImplTest : public ::testing::Test {
decoders.Pass(),
media::SetDecryptorReadyCB(),
base::Bind(&StrictMock<MockCB>::Display, base::Unretained(&mock_cb_)),
- true));
+ true,
+ new MediaLog()));
demuxer_stream_.set_video_decoder_config(TestVideoConfig::Normal());
@@ -68,17 +65,10 @@ class VideoRendererImplTest : public ::testing::Test {
EXPECT_CALL(demuxer_stream_, Read(_)).WillRepeatedly(
RunCallback<0>(DemuxerStream::kOk,
scoped_refptr<DecoderBuffer>(new DecoderBuffer(0))));
- EXPECT_CALL(statistics_cb_object_, OnStatistics(_))
- .Times(AnyNumber());
- EXPECT_CALL(*this, OnTimeUpdate(_))
- .Times(AnyNumber());
}
virtual ~VideoRendererImplTest() {}
- // Callbacks passed into Initialize().
- MOCK_METHOD1(OnTimeUpdate, void(base::TimeDelta));
-
void Initialize() {
InitializeWithLowDelay(false);
}
@@ -111,22 +101,19 @@ class VideoRendererImplTest : public ::testing::Test {
&demuxer_stream_,
low_delay,
status_cb,
- base::Bind(&MockStatisticsCB::OnStatistics,
- base::Unretained(&statistics_cb_object_)),
- base::Bind(&VideoRendererImplTest::OnTimeUpdate,
+ base::Bind(&VideoRendererImplTest::OnStatisticsUpdate,
base::Unretained(this)),
base::Bind(&StrictMock<MockCB>::BufferingStateChange,
base::Unretained(&mock_cb_)),
ended_event_.GetClosure(),
error_event_.GetPipelineStatusCB(),
- base::Bind(&VideoRendererImplTest::GetTime, base::Unretained(this)),
- base::Bind(&VideoRendererImplTest::GetDuration,
- base::Unretained(this)));
+ base::Bind(&VideoRendererImplTest::GetTime, base::Unretained(this)));
}
- void StartPlaying() {
- SCOPED_TRACE("StartPlaying()");
- renderer_->StartPlaying();
+ void StartPlayingFrom(int milliseconds) {
+ SCOPED_TRACE(base::StringPrintf("StartPlayingFrom(%d)", milliseconds));
+ renderer_->StartPlayingFrom(
+ base::TimeDelta::FromMilliseconds(milliseconds));
message_loop_.RunUntilIdle();
}
@@ -140,6 +127,7 @@ class VideoRendererImplTest : public ::testing::Test {
void Destroy() {
SCOPED_TRACE("Destroy()");
renderer_.reset();
+ message_loop_.RunUntilIdle();
}
// Parses a string representation of video frames and generates corresponding
@@ -223,7 +211,7 @@ class VideoRendererImplTest : public ::testing::Test {
// Post tasks for OutputCB and DecodeCB.
scoped_refptr<VideoFrame> frame = decode_results_.front().second;
- if (frame)
+ if (frame.get())
message_loop_.PostTask(FROM_HERE, base::Bind(output_cb_, frame));
message_loop_.PostTask(
FROM_HERE, base::Bind(base::ResetAndReturn(&decode_cb_),
@@ -255,7 +243,6 @@ class VideoRendererImplTest : public ::testing::Test {
DCHECK_EQ(&message_loop_, base::MessageLoop::current());
base::AutoLock l(lock_);
time_ += base::TimeDelta::FromMilliseconds(time_ms);
- DCHECK_LE(time_.InMicroseconds(), GetDuration().InMicroseconds());
}
protected:
@@ -263,7 +250,6 @@ class VideoRendererImplTest : public ::testing::Test {
scoped_ptr<VideoRendererImpl> renderer_;
MockVideoDecoder* decoder_; // Owned by |renderer_|.
NiceMock<MockDemuxerStream> demuxer_stream_;
- MockStatisticsCB statistics_cb_object_;
// Use StrictMock<T> to catch missing/extra callbacks.
class MockCB {
@@ -279,10 +265,6 @@ class VideoRendererImplTest : public ::testing::Test {
return time_;
}
- base::TimeDelta GetDuration() {
- return base::TimeDelta::FromMilliseconds(kVideoDurationInMs);
- }
-
void DecodeRequested(const scoped_refptr<DecoderBuffer>& buffer,
const VideoDecoder::DecodeCB& decode_cb) {
DCHECK_EQ(&message_loop_, base::MessageLoop::current());
@@ -310,6 +292,8 @@ class VideoRendererImplTest : public ::testing::Test {
message_loop_.PostTask(FROM_HERE, callback);
}
+ void OnStatisticsUpdate(const PipelineStatistics& stats) {}
+
base::MessageLoop message_loop_;
// Used to protect |time_|.
@@ -347,12 +331,12 @@ TEST_F(VideoRendererImplTest, Initialize) {
Destroy();
}
-TEST_F(VideoRendererImplTest, InitializeAndStartPlaying) {
+TEST_F(VideoRendererImplTest, InitializeAndStartPlayingFrom) {
Initialize();
QueueFrames("0 10 20 30");
EXPECT_CALL(mock_cb_, Display(HasTimestamp(0)));
EXPECT_CALL(mock_cb_, BufferingStateChange(BUFFERING_HAVE_ENOUGH));
- StartPlaying();
+ StartPlayingFrom(0);
Destroy();
}
@@ -366,7 +350,7 @@ TEST_F(VideoRendererImplTest, DestroyWhileFlushing) {
QueueFrames("0 10 20 30");
EXPECT_CALL(mock_cb_, Display(HasTimestamp(0)));
EXPECT_CALL(mock_cb_, BufferingStateChange(BUFFERING_HAVE_ENOUGH));
- StartPlaying();
+ StartPlayingFrom(0);
EXPECT_CALL(mock_cb_, BufferingStateChange(BUFFERING_HAVE_NOTHING));
renderer_->Flush(NewExpectedClosure());
Destroy();
@@ -377,13 +361,13 @@ TEST_F(VideoRendererImplTest, Play) {
QueueFrames("0 10 20 30");
EXPECT_CALL(mock_cb_, Display(HasTimestamp(0)));
EXPECT_CALL(mock_cb_, BufferingStateChange(BUFFERING_HAVE_ENOUGH));
- StartPlaying();
+ StartPlayingFrom(0);
Destroy();
}
TEST_F(VideoRendererImplTest, FlushWithNothingBuffered) {
Initialize();
- StartPlaying();
+ StartPlayingFrom(0);
// We shouldn't expect a buffering state change since we never reached
// BUFFERING_HAVE_ENOUGH.
@@ -391,35 +375,12 @@ TEST_F(VideoRendererImplTest, FlushWithNothingBuffered) {
Destroy();
}
-TEST_F(VideoRendererImplTest, EndOfStream_ClipDuration) {
- Initialize();
- QueueFrames("0");
- EXPECT_CALL(mock_cb_, Display(HasTimestamp(0)));
- EXPECT_CALL(mock_cb_, BufferingStateChange(BUFFERING_HAVE_ENOUGH));
- StartPlaying();
-
- // Next frame has timestamp way past duration. Its timestamp will be adjusted
- // to match the duration of the video.
- QueueFrames(base::IntToString(kVideoDurationInMs + 1000));
- SatisfyPendingRead();
- WaitForPendingRead();
-
- // Queue the end of stream frame and wait for the last frame to be rendered.
- SatisfyPendingReadWithEndOfStream();
-
- EXPECT_CALL(mock_cb_, Display(HasTimestamp(kVideoDurationInMs)));
- AdvanceTimeInMs(kVideoDurationInMs);
- WaitForEnded();
-
- Destroy();
-}
-
TEST_F(VideoRendererImplTest, DecodeError_Playing) {
Initialize();
QueueFrames("0 10 20 30");
EXPECT_CALL(mock_cb_, Display(HasTimestamp(0)));
EXPECT_CALL(mock_cb_, BufferingStateChange(BUFFERING_HAVE_ENOUGH));
- StartPlaying();
+ StartPlayingFrom(0);
QueueFrames("error");
SatisfyPendingRead();
@@ -427,47 +388,44 @@ TEST_F(VideoRendererImplTest, DecodeError_Playing) {
Destroy();
}
-TEST_F(VideoRendererImplTest, DecodeError_DuringStartPlaying) {
+TEST_F(VideoRendererImplTest, DecodeError_DuringStartPlayingFrom) {
Initialize();
QueueFrames("error");
- StartPlaying();
+ StartPlayingFrom(0);
Destroy();
}
-TEST_F(VideoRendererImplTest, StartPlaying_Exact) {
+TEST_F(VideoRendererImplTest, StartPlayingFrom_Exact) {
Initialize();
QueueFrames("50 60 70 80 90");
EXPECT_CALL(mock_cb_, Display(HasTimestamp(60)));
EXPECT_CALL(mock_cb_, BufferingStateChange(BUFFERING_HAVE_ENOUGH));
- AdvanceTimeInMs(60);
- StartPlaying();
+ StartPlayingFrom(60);
Destroy();
}
-TEST_F(VideoRendererImplTest, StartPlaying_RightBefore) {
+TEST_F(VideoRendererImplTest, StartPlayingFrom_RightBefore) {
Initialize();
QueueFrames("50 60 70 80 90");
EXPECT_CALL(mock_cb_, Display(HasTimestamp(50)));
EXPECT_CALL(mock_cb_, BufferingStateChange(BUFFERING_HAVE_ENOUGH));
- AdvanceTimeInMs(59);
- StartPlaying();
+ StartPlayingFrom(59);
Destroy();
}
-TEST_F(VideoRendererImplTest, StartPlaying_RightAfter) {
+TEST_F(VideoRendererImplTest, StartPlayingFrom_RightAfter) {
Initialize();
QueueFrames("50 60 70 80 90");
EXPECT_CALL(mock_cb_, Display(HasTimestamp(60)));
EXPECT_CALL(mock_cb_, BufferingStateChange(BUFFERING_HAVE_ENOUGH));
- AdvanceTimeInMs(61);
- StartPlaying();
+ StartPlayingFrom(61);
Destroy();
}
-TEST_F(VideoRendererImplTest, StartPlaying_LowDelay) {
+TEST_F(VideoRendererImplTest, StartPlayingFrom_LowDelay) {
// In low-delay mode only one frame is required to finish preroll.
InitializeWithLowDelay(true);
QueueFrames("0");
@@ -478,7 +436,7 @@ TEST_F(VideoRendererImplTest, StartPlaying_LowDelay) {
.Times(AnyNumber());
EXPECT_CALL(mock_cb_, BufferingStateChange(BUFFERING_HAVE_NOTHING))
.Times(AnyNumber());
- StartPlaying();
+ StartPlayingFrom(0);
QueueFrames("10");
SatisfyPendingRead();
@@ -492,26 +450,13 @@ TEST_F(VideoRendererImplTest, StartPlaying_LowDelay) {
Destroy();
}
-TEST_F(VideoRendererImplTest, PlayAfterStartPlaying) {
- Initialize();
- QueueFrames("0 10 20 30");
- EXPECT_CALL(mock_cb_, Display(HasTimestamp(0)));
- EXPECT_CALL(mock_cb_, BufferingStateChange(BUFFERING_HAVE_ENOUGH));
- StartPlaying();
-
- // Check that there is an outstanding Read() request.
- EXPECT_TRUE(IsReadPending());
-
- Destroy();
-}
-
// Verify that a late decoder response doesn't break invariants in the renderer.
TEST_F(VideoRendererImplTest, DestroyDuringOutstandingRead) {
Initialize();
QueueFrames("0 10 20 30");
EXPECT_CALL(mock_cb_, Display(HasTimestamp(0)));
EXPECT_CALL(mock_cb_, BufferingStateChange(BUFFERING_HAVE_ENOUGH));
- StartPlaying();
+ StartPlayingFrom(0);
// Check that there is an outstanding Read() request.
EXPECT_TRUE(IsReadPending());
@@ -529,7 +474,7 @@ TEST_F(VideoRendererImplTest, Underflow) {
QueueFrames("0 10 20 30");
EXPECT_CALL(mock_cb_, Display(HasTimestamp(0)));
EXPECT_CALL(mock_cb_, BufferingStateChange(BUFFERING_HAVE_ENOUGH));
- StartPlaying();
+ StartPlayingFrom(0);
// Advance time slightly. Frames should be dropped and we should NOT signal
// having nothing.
@@ -541,7 +486,7 @@ TEST_F(VideoRendererImplTest, Underflow) {
WaitableMessageLoopEvent event;
EXPECT_CALL(mock_cb_, BufferingStateChange(BUFFERING_HAVE_NOTHING))
.WillOnce(RunClosure(event.GetClosure()));
- AdvanceTimeInMs(3000); // Must match kTimeToDeclareHaveNothing.
+ AdvanceTimeInMs(3000); // Must match kTimeToDeclareHaveNothing.
event.RunAndWait();
}
diff --git a/media/filters/vpx_video_decoder.cc b/media/filters/vpx_video_decoder.cc
index ec902b888f..bf7cc3cdaa 100644
--- a/media/filters/vpx_video_decoder.cc
+++ b/media/filters/vpx_video_decoder.cc
@@ -202,6 +202,10 @@ VpxVideoDecoder::~VpxVideoDecoder() {
CloseDecoder();
}
+std::string VpxVideoDecoder::GetDisplayName() const {
+ return "VpxVideoDecoder";
+}
+
void VpxVideoDecoder::Initialize(const VideoDecoderConfig& config,
bool low_delay,
const PipelineStatusCB& status_cb,
@@ -267,7 +271,7 @@ bool VpxVideoDecoder::ConfigureDecoder(const VideoDecoderConfig& config) {
if (vpx_codec_set_frame_buffer_functions(vpx_codec_,
&MemoryPool::GetVP9FrameBuffer,
&MemoryPool::ReleaseVP9FrameBuffer,
- memory_pool_)) {
+ memory_pool_.get())) {
LOG(ERROR) << "Failed to configure external buffers.";
return false;
}
@@ -333,7 +337,7 @@ void VpxVideoDecoder::DecodeBuffer(const scoped_refptr<DecoderBuffer>& buffer) {
DCHECK_NE(state_, kDecodeFinished);
DCHECK_NE(state_, kError);
DCHECK(!decode_cb_.is_null());
- DCHECK(buffer);
+ DCHECK(buffer.get());
// Transition to kDecodeFinished on the first end of stream buffer.
if (state_ == kNormal && buffer->end_of_stream()) {
@@ -351,7 +355,7 @@ void VpxVideoDecoder::DecodeBuffer(const scoped_refptr<DecoderBuffer>& buffer) {
base::ResetAndReturn(&decode_cb_).Run(kOk);
- if (video_frame)
+ if (video_frame.get())
output_cb_.Run(video_frame);
}
@@ -449,7 +453,7 @@ void VpxVideoDecoder::CopyVpxImageTo(const vpx_image* vpx_image,
gfx::Size size(vpx_image->d_w, vpx_image->d_h);
- if (!vpx_codec_alpha_ && memory_pool_) {
+ if (!vpx_codec_alpha_ && memory_pool_.get()) {
*video_frame = VideoFrame::WrapExternalYuvData(
codec_format,
size, gfx::Rect(size), config_.natural_size(),
diff --git a/media/filters/vpx_video_decoder.h b/media/filters/vpx_video_decoder.h
index 0e1a941632..fdc5958572 100644
--- a/media/filters/vpx_video_decoder.h
+++ b/media/filters/vpx_video_decoder.h
@@ -32,6 +32,7 @@ class MEDIA_EXPORT VpxVideoDecoder : public VideoDecoder {
virtual ~VpxVideoDecoder();
// VideoDecoder implementation.
+ virtual std::string GetDisplayName() const OVERRIDE;
virtual void Initialize(const VideoDecoderConfig& config,
bool low_delay,
const PipelineStatusCB& status_cb,