// WebM Block bytes that represent a VP8 interframe.
const uint8 kVP8Interframe[] = { 0x11, 0x00, 0x00 };
+static const uint8 kCuesHeader[] = {
+ 0x1C, 0x53, 0xBB, 0x6B, // Cues ID
+ 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // cues(size = 0)
+};
+
const int kTracksHeaderSize = sizeof(kTracksHeader);
const int kTracksSizeOffset = 4;
const int kVideoTrackNum = 1;
const int kAudioTrackNum = 2;
const int kTextTrackNum = 3;
+const int kAlternateTextTrackNum = 4;
const int kAudioBlockDuration = 23;
const int kVideoBlockDuration = 33;
// The data pointed by |buffer| should be at least 8 bytes long.
// |number| should be in the range 0 <= number < 0x00FFFFFFFFFFFFFF.
static void WriteInt64(uint8* buffer, int64 number) {
- DCHECK(number >= 0 && number < GG_LONGLONG(0x00FFFFFFFFFFFFFF));
+ DCHECK(number >= 0 && number < 0x00FFFFFFFFFFFFFFLL);
buffer[0] = 0x01;
int64 tmp = number;
for (int i = 7; i > 0; i--) {
static void LogFunc(const std::string& str) { DVLOG(1) << str; }
-class ChunkDemuxerTest : public testing::Test {
+class ChunkDemuxerTest : public ::testing::Test {
protected:
enum CodecsIndex {
AUDIO,
return GenerateCluster(46, 66, 5);
}
- ChunkDemuxerTest() {
+ ChunkDemuxerTest()
+ : append_window_end_for_next_append_(kInfiniteDuration()) {
+ init_segment_received_cb_ =
+ base::Bind(&ChunkDemuxerTest::InitSegmentReceived,
+ base::Unretained(this));
CreateNewDemuxer();
}
base::Bind(&ChunkDemuxerTest::DemuxerOpened, base::Unretained(this));
Demuxer::NeedKeyCB need_key_cb =
base::Bind(&ChunkDemuxerTest::DemuxerNeedKey, base::Unretained(this));
- demuxer_.reset(new ChunkDemuxer(open_cb, need_key_cb,
- base::Bind(&LogFunc)));
+ demuxer_.reset(
+ new ChunkDemuxer(open_cb, need_key_cb, base::Bind(&LogFunc), true));
}
virtual ~ChunkDemuxerTest() {
}
void CreateInitSegment(int stream_flags,
- bool is_audio_encrypted, bool is_video_encrypted,
+ bool is_audio_encrypted,
+ bool is_video_encrypted,
scoped_ptr<uint8[]>* buffer,
int* size) {
+ CreateInitSegmentInternal(
+ stream_flags, is_audio_encrypted, is_video_encrypted, buffer, false,
+ size);
+ }
+
+ void CreateInitSegmentWithAlternateTextTrackNum(int stream_flags,
+ bool is_audio_encrypted,
+ bool is_video_encrypted,
+ scoped_ptr<uint8[]>* buffer,
+ int* size) {
+ DCHECK(stream_flags & HAS_TEXT);
+ CreateInitSegmentInternal(
+ stream_flags, is_audio_encrypted, is_video_encrypted, buffer, true,
+ size);
+ }
+
+ void CreateInitSegmentInternal(int stream_flags,
+ bool is_audio_encrypted,
+ bool is_video_encrypted,
+ scoped_ptr<uint8[]>* buffer,
+ bool use_alternate_text_track_id,
+ int* size) {
bool has_audio = (stream_flags & HAS_AUDIO) != 0;
bool has_video = (stream_flags & HAS_VIDEO) != 0;
bool has_text = (stream_flags & HAS_TEXT) != 0;
//
// This is the track entry for a text track,
// TrackEntry [AE], size=30
- // TrackNum [D7], size=1, val=3
- // TrackUID [73] [C5], size=1, value=3
+ // TrackNum [D7], size=1, val=3 (or 4 if use_alternate_text_track_id)
+ // TrackUID [73] [C5], size=1, value=3 (must remain constant for same
+ // track, even if TrackNum changes)
// TrackType [83], size=1, val=0x11
// CodecId [86], size=18, val="D_WEBVTT/SUBTITLES"
- const char str[] = "\xAE\x9E\xD7\x81\x03\x73\xC5\x81\x03"
- "\x83\x81\x11\x86\x92"
- "D_WEBVTT/SUBTITLES";
+ char str[] = "\xAE\x9E\xD7\x81\x03\x73\xC5\x81\x03"
+ "\x83\x81\x11\x86\x92"
+ "D_WEBVTT/SUBTITLES";
+ DCHECK_EQ(str[4], kTextTrackNum);
+ if (use_alternate_text_track_id)
+ str[4] = kAlternateTextTrackNum;
+
const int len = strlen(str);
DCHECK_EQ(len, 32);
const uint8* const buf = reinterpret_cast<const uint8*>(str);
return demuxer_->AddId(source_id, type, codecs);
}
+ ChunkDemuxer::Status AddIdForMp2tSource(const std::string& source_id) {
+ std::vector<std::string> codecs;
+ std::string type = "video/mp2t";
+ codecs.push_back("mp4a.40.2");
+ codecs.push_back("avc1.640028");
+ return demuxer_->AddId(source_id, type, codecs);
+ }
+
void AppendData(const uint8* data, size_t length) {
AppendData(kSourceId, data, length);
}
case kAudioTrackNum:
block_duration = kAudioBlockDuration;
break;
- case kTextTrackNum:
+ case kTextTrackNum: // Fall-through.
+ case kAlternateTextTrackNum:
block_duration = kTextBlockDuration;
break;
}
timecode, end_timecode, track_number, block_duration));
}
- // |cluster_description| - A space delimited string of buffer info that
- // is used to construct a cluster. Each buffer info is a timestamp in
- // milliseconds and optionally followed by a 'K' to indicate that a buffer
- // should be marked as a keyframe. For example "0K 30 60" should constuct
- // a cluster with 3 blocks: a keyframe with timestamp 0 and 2 non-keyframes
- // at 30ms and 60ms.
- void AppendSingleStreamCluster(const std::string& source_id, int track_number,
- const std::string& cluster_description) {
+ struct BlockInfo {
+ BlockInfo()
+ : track_number(0),
+ timestamp_in_ms(0),
+ flags(0),
+ duration(0) {
+ }
+
+ BlockInfo(int tn, int ts, int f, int d)
+ : track_number(tn),
+ timestamp_in_ms(ts),
+ flags(f),
+ duration(d) {
+ }
+
+ int track_number;
+ int timestamp_in_ms;
+ int flags;
+ int duration;
+
+ bool operator< (const BlockInfo& rhs) const {
+ return timestamp_in_ms < rhs.timestamp_in_ms;
+ }
+ };
+
+ // |track_number| - The track number to place in
+ // |block_descriptions| - A space delimited string of block info that
+ // is used to populate |blocks|. Each block info has a timestamp in
+ // milliseconds and optionally followed by a 'K' to indicate that a block
+ // should be marked as a keyframe. For example "0K 30 60" should populate
+ // |blocks| with 3 BlockInfo objects: a keyframe with timestamp 0 and 2
+ // non-keyframes at 30ms and 60ms.
+ void ParseBlockDescriptions(int track_number,
+ const std::string block_descriptions,
+ std::vector<BlockInfo>* blocks) {
std::vector<std::string> timestamps;
- base::SplitString(cluster_description, ' ', ×tamps);
+ base::SplitString(block_descriptions, ' ', ×tamps);
- ClusterBuilder cb;
- std::vector<uint8> data(10);
for (size_t i = 0; i < timestamps.size(); ++i) {
std::string timestamp_str = timestamps[i];
- int block_flags = 0;
+ BlockInfo block_info;
+ block_info.track_number = track_number;
+ block_info.flags = 0;
+ block_info.duration = 0;
+
if (EndsWith(timestamp_str, "K", true)) {
- block_flags = kWebMFlagKeyframe;
+ block_info.flags = kWebMFlagKeyframe;
// Remove the "K" off of the token.
timestamp_str = timestamp_str.substr(0, timestamps[i].length() - 1);
}
- int timestamp_in_ms;
- CHECK(base::StringToInt(timestamp_str, ×tamp_in_ms));
+ CHECK(base::StringToInt(timestamp_str, &block_info.timestamp_in_ms));
+
+ if (track_number == kTextTrackNum ||
+ track_number == kAlternateTextTrackNum) {
+ block_info.duration = kTextBlockDuration;
+ ASSERT_EQ(kWebMFlagKeyframe, block_info.flags)
+ << "Text block with timestamp " << block_info.timestamp_in_ms
+ << " was not marked as a keyframe."
+ << " All text blocks must be keyframes";
+ }
- if (i == 0)
- cb.SetClusterTimecode(timestamp_in_ms);
+ if (track_number == kAudioTrackNum)
+ ASSERT_TRUE(block_info.flags & kWebMFlagKeyframe);
+
+ blocks->push_back(block_info);
+ }
+ }
- if (track_number == kTextTrackNum) {
- cb.AddBlockGroup(track_number, timestamp_in_ms, kTextBlockDuration,
- block_flags, &data[0], data.size());
+ scoped_ptr<Cluster> GenerateCluster(const std::vector<BlockInfo>& blocks,
+ bool unknown_size) {
+ DCHECK_GT(blocks.size(), 0u);
+ ClusterBuilder cb;
+
+ std::vector<uint8> data(10);
+ for (size_t i = 0; i < blocks.size(); ++i) {
+ if (i == 0)
+ cb.SetClusterTimecode(blocks[i].timestamp_in_ms);
+
+ if (blocks[i].duration) {
+ if (blocks[i].track_number == kVideoTrackNum) {
+ AddVideoBlockGroup(&cb,
+ blocks[i].track_number, blocks[i].timestamp_in_ms,
+ blocks[i].duration, blocks[i].flags);
+ } else {
+ cb.AddBlockGroup(blocks[i].track_number, blocks[i].timestamp_in_ms,
+ blocks[i].duration, blocks[i].flags,
+ &data[0], data.size());
+ }
} else {
- cb.AddSimpleBlock(track_number, timestamp_in_ms, block_flags,
+ cb.AddSimpleBlock(blocks[i].track_number, blocks[i].timestamp_in_ms,
+ blocks[i].flags,
&data[0], data.size());
}
}
- AppendCluster(source_id, cb.Finish());
+
+ return unknown_size ? cb.FinishWithUnknownSize() : cb.Finish();
+ }
+
+ scoped_ptr<Cluster> GenerateCluster(
+ std::priority_queue<BlockInfo> block_queue,
+ bool unknown_size) {
+ std::vector<BlockInfo> blocks(block_queue.size());
+ for (size_t i = block_queue.size() - 1; !block_queue.empty(); --i) {
+ blocks[i] = block_queue.top();
+ block_queue.pop();
+ }
+
+ return GenerateCluster(blocks, unknown_size);
+ }
+
+ // |block_descriptions| - The block descriptions used to construct the
+ // cluster. See the documentation for ParseBlockDescriptions() for details on
+ // the string format.
+ void AppendSingleStreamCluster(const std::string& source_id, int track_number,
+ const std::string& block_descriptions) {
+ std::vector<BlockInfo> blocks;
+ ParseBlockDescriptions(track_number, block_descriptions, &blocks);
+ AppendCluster(source_id, GenerateCluster(blocks, false));
+ }
+
+ struct MuxedStreamInfo {
+ MuxedStreamInfo()
+ : track_number(0),
+ block_descriptions("")
+ {}
+
+ MuxedStreamInfo(int track_num, const char* block_desc)
+ : track_number(track_num),
+ block_descriptions(block_desc) {
+ }
+
+ int track_number;
+ // The block description passed to ParseBlockDescriptions().
+ // See the documentation for that method for details on the string format.
+ const char* block_descriptions;
+ };
+
+ void AppendMuxedCluster(const MuxedStreamInfo& msi_1,
+ const MuxedStreamInfo& msi_2) {
+ std::vector<MuxedStreamInfo> msi(2);
+ msi[0] = msi_1;
+ msi[1] = msi_2;
+ AppendMuxedCluster(msi);
+ }
+
+ void AppendMuxedCluster(const MuxedStreamInfo& msi_1,
+ const MuxedStreamInfo& msi_2,
+ const MuxedStreamInfo& msi_3) {
+ std::vector<MuxedStreamInfo> msi(3);
+ msi[0] = msi_1;
+ msi[1] = msi_2;
+ msi[2] = msi_3;
+ AppendMuxedCluster(msi);
+ }
+
+ void AppendMuxedCluster(const std::vector<MuxedStreamInfo> msi) {
+ std::priority_queue<BlockInfo> block_queue;
+ for (size_t i = 0; i < msi.size(); ++i) {
+ std::vector<BlockInfo> track_blocks;
+ ParseBlockDescriptions(msi[i].track_number, msi[i].block_descriptions,
+ &track_blocks);
+
+ for (size_t j = 0; j < track_blocks.size(); ++j)
+ block_queue.push(track_blocks[j]);
+ }
+
+ AppendCluster(kSourceId, GenerateCluster(block_queue, false));
}
void AppendData(const std::string& source_id,
const uint8* data, size_t length) {
EXPECT_CALL(host_, AddBufferedTimeRange(_, _)).Times(AnyNumber());
- demuxer_->AppendData(source_id, data, length);
+
+ demuxer_->AppendData(source_id, data, length,
+ append_window_start_for_next_append_,
+ append_window_end_for_next_append_,
+ ×tamp_offset_map_[source_id],
+ init_segment_received_cb_);
}
void AppendDataInPieces(const uint8* data, size_t length) {
expected_duration = kDefaultDuration();
EXPECT_CALL(*this, DemuxerOpened());
+
+ // Adding expectation prior to CreateInitDoneCB() here because InSequence
+ // tests require init segment received before duration set. Also, only
+ // expect an init segment received callback if there is actually a track in
+ // it.
+ if (stream_flags != 0)
+ EXPECT_CALL(*this, InitSegmentReceived());
+
demuxer_->Initialize(
&host_, CreateInitDoneCB(expected_duration, expected_status), true);
video_flags |= HAS_TEXT;
}
+ EXPECT_CALL(*this, InitSegmentReceived());
AppendInitSegmentWithSourceId(audio_id, audio_flags);
+ EXPECT_CALL(*this, InitSegmentReceived());
AppendInitSegmentWithSourceId(video_id, video_flags);
return true;
}
// bear-640x360.webm VideoDecoderConfig returns 640x360 for its natural_size()
// The resulting video stream returns data from each file for the following
// time ranges.
- // bear-320x240.webm : [0-501) [801-2737)
+ // bear-320x240.webm : [0-501) [801-2736)
// bear-640x360.webm : [527-793)
//
// bear-320x240.webm AudioDecoderConfig returns 3863 for its extra_data_size()
// bear-640x360.webm AudioDecoderConfig returns 3935 for its extra_data_size()
// The resulting audio stream returns data from each file for the following
// time ranges.
- // bear-320x240.webm : [0-524) [779-2737)
+ // bear-320x240.webm : [0-524) [779-2736)
// bear-640x360.webm : [527-759)
bool InitDemuxerWithConfigChangeData() {
scoped_refptr<DecoderBuffer> bear1 = ReadTestDataFile("bear-320x240.webm");
scoped_refptr<DecoderBuffer> bear2 = ReadTestDataFile("bear-640x360.webm");
EXPECT_CALL(*this, DemuxerOpened());
+
+ // Adding expectation prior to CreateInitDoneCB() here because InSequence
+ // tests require init segment received before duration set.
+ EXPECT_CALL(*this, InitSegmentReceived());
demuxer_->Initialize(
&host_, CreateInitDoneCB(base::TimeDelta::FromMilliseconds(2744),
PIPELINE_OK), true);
return false;
// Append the whole bear1 file.
+ // TODO(wolenetz/acolwell): Remove this extra SetDuration expectation once
+ // the files are fixed to have the correct duration in their init segments,
+ // and the CreateInitDoneCB() call, above, is fixed to used that duration.
+ // See http://crbug.com/354284.
+ EXPECT_CALL(host_, SetDuration(base::TimeDelta::FromMilliseconds(2746)));
AppendData(bear1->data(), bear1->data_size());
- CheckExpectedRanges(kSourceId, "{ [0,2737) }");
+ // Last audio frame has timestamp 2721 and duration 24 (estimated from max
+ // seen so far for audio track).
+ // Last video frame has timestamp 2703 and duration 33 (from TrackEntry
+ // DefaultDuration for video track).
+ CheckExpectedRanges(kSourceId, "{ [0,2736) }");
// Append initialization segment for bear2.
// Note: Offsets here and below are derived from
// media/test/data/bear-320x240-manifest.js which were
// generated from media/test/data/bear-640x360.webm and
// media/test/data/bear-320x240.webm respectively.
+ EXPECT_CALL(*this, InitSegmentReceived());
AppendData(bear2->data(), 4340);
// Append a media segment that goes from [0.527000, 1.014000).
AppendData(bear2->data() + 55290, 18785);
- CheckExpectedRanges(kSourceId, "{ [0,1028) [1201,2737) }");
+ CheckExpectedRanges(kSourceId, "{ [0,1027) [1201,2736) }");
// Append initialization segment for bear1 & fill gap with [779-1197)
// segment.
+ EXPECT_CALL(*this, InitSegmentReceived());
AppendData(bear1->data(), 4370);
AppendData(bear1->data() + 72737, 28183);
- CheckExpectedRanges(kSourceId, "{ [0,2737) }");
+ CheckExpectedRanges(kSourceId, "{ [0,2736) }");
MarkEndOfStream(PIPELINE_OK);
return true;
scoped_ptr<Cluster> GenerateCluster(int first_audio_timecode,
int first_video_timecode,
int block_count) {
+ return GenerateCluster(first_audio_timecode, first_video_timecode,
+ block_count, false);
+ }
+ scoped_ptr<Cluster> GenerateCluster(int first_audio_timecode,
+ int first_video_timecode,
+ int block_count,
+ bool unknown_size) {
CHECK_GT(block_count, 0);
- int size = 10;
- scoped_ptr<uint8[]> data(new uint8[size]);
-
- ClusterBuilder cb;
- cb.SetClusterTimecode(std::min(first_audio_timecode, first_video_timecode));
+ std::priority_queue<BlockInfo> block_queue;
if (block_count == 1) {
- cb.AddBlockGroup(kAudioTrackNum, first_audio_timecode,
- kAudioBlockDuration, kWebMFlagKeyframe,
- data.get(), size);
- return cb.Finish();
+ block_queue.push(BlockInfo(kAudioTrackNum,
+ first_audio_timecode,
+ kWebMFlagKeyframe,
+ kAudioBlockDuration));
+ return GenerateCluster(block_queue, unknown_size);
}
int audio_timecode = first_audio_timecode;
uint8 video_flag = kWebMFlagKeyframe;
for (int i = 0; i < block_count - 2; i++) {
if (audio_timecode <= video_timecode) {
- cb.AddSimpleBlock(kAudioTrackNum, audio_timecode, kWebMFlagKeyframe,
- data.get(), size);
+ block_queue.push(BlockInfo(kAudioTrackNum,
+ audio_timecode,
+ kWebMFlagKeyframe,
+ 0));
audio_timecode += kAudioBlockDuration;
continue;
}
- cb.AddSimpleBlock(kVideoTrackNum, video_timecode, video_flag, data.get(),
- size);
+ block_queue.push(BlockInfo(kVideoTrackNum,
+ video_timecode,
+ video_flag,
+ 0));
video_timecode += kVideoBlockDuration;
video_flag = 0;
}
// Make the last 2 blocks BlockGroups so that they don't get delayed by the
// block duration calculation logic.
- if (audio_timecode <= video_timecode) {
- cb.AddBlockGroup(kAudioTrackNum, audio_timecode, kAudioBlockDuration,
- kWebMFlagKeyframe, data.get(), size);
- AddVideoBlockGroup(&cb, kVideoTrackNum, video_timecode,
- kVideoBlockDuration, video_flag);
- } else {
- AddVideoBlockGroup(&cb, kVideoTrackNum, video_timecode,
- kVideoBlockDuration, video_flag);
- cb.AddBlockGroup(kAudioTrackNum, audio_timecode, kAudioBlockDuration,
- kWebMFlagKeyframe, data.get(), size);
- }
+ block_queue.push(BlockInfo(kAudioTrackNum,
+ audio_timecode,
+ kWebMFlagKeyframe,
+ kAudioBlockDuration));
+ block_queue.push(BlockInfo(kVideoTrackNum,
+ video_timecode,
+ video_flag,
+ kVideoBlockDuration));
- return cb.Finish();
+ return GenerateCluster(block_queue, unknown_size);
}
scoped_ptr<Cluster> GenerateSingleStreamCluster(int timecode,
cb.SetClusterTimecode(timecode);
// Create simple blocks for everything except the last block.
- for (int i = 0; timecode < (end_timecode - block_duration); i++) {
+ while (timecode < (end_timecode - block_duration)) {
cb.AddSimpleBlock(track_number, timecode, kWebMFlagKeyframe,
&data[0], data.size());
timecode += block_duration;
}
- // Make the last block a BlockGroup so that it doesn't get delayed by the
- // block duration calculation logic.
if (track_number == kVideoTrackNum) {
AddVideoBlockGroup(&cb, track_number, timecode, block_duration,
kWebMFlagKeyframe);
cb.AddBlockGroup(track_number, timecode, block_duration,
kWebMFlagKeyframe, &data[0], data.size());
}
+
return cb.Finish();
}
void CheckExpectedRanges(const std::string& id,
const std::string& expected) {
- Ranges<base::TimeDelta> r = demuxer_->GetBufferedRanges(id);
+ CheckExpectedRanges(demuxer_->GetBufferedRanges(id), expected);
+ }
+
+ void CheckExpectedRanges(DemuxerStream::Type type,
+ const std::string& expected) {
+ ChunkDemuxerStream* stream =
+ static_cast<ChunkDemuxerStream*>(demuxer_->GetStream(type));
+ CheckExpectedRanges(stream->GetBufferedRanges(kDefaultDuration()),
+ expected);
+ }
+ void CheckExpectedRanges(const Ranges<base::TimeDelta>& r,
+ const std::string& expected) {
std::stringstream ss;
ss << "{ ";
for (size_t i = 0; i < r.size(); ++i) {
base::SplitString(expected, ' ', ×tamps);
std::stringstream ss;
for (size_t i = 0; i < timestamps.size(); ++i) {
- DemuxerStream::Status status;
+ // Initialize status to kAborted since it's possible for Read() to return
+ // without calling StoreStatusAndBuffer() if it doesn't have any buffers
+ // left to return.
+ DemuxerStream::Status status = DemuxerStream::kAborted;
scoped_refptr<DecoderBuffer> buffer;
stream->Read(base::Bind(&ChunkDemuxerTest::StoreStatusAndBuffer,
base::Unretained(this), &status, &buffer));
if (i > 0)
ss << " ";
ss << buffer->timestamp().InMilliseconds();
+
+ // Handle preroll buffers.
+ if (EndsWith(timestamps[i], "P", true)) {
+ ASSERT_EQ(kInfiniteDuration(), buffer->discard_padding().first);
+ ASSERT_EQ(base::TimeDelta(), buffer->discard_padding().second);
+ ss << "P";
+ }
}
EXPECT_EQ(expected, ss.str());
}
// Read a WebM file into memory and send the data to the demuxer.
scoped_refptr<DecoderBuffer> buffer = ReadTestDataFile(filename);
+ EXPECT_CALL(*this, InitSegmentReceived());
AppendDataInPieces(buffer->data(), buffer->data_size(), 512);
// Verify that the timestamps on the first few packets match what we
NeedKeyMock(type, init_data_ptr, init_data.size());
}
+ MOCK_METHOD0(InitSegmentReceived, void(void));
+
void Seek(base::TimeDelta seek_time) {
demuxer_->StartWaitingForSeek(seek_time);
demuxer_->Seek(seek_time, NewExpectedStatusCB(PIPELINE_OK));
message_loop_.RunUntilIdle();
}
+ bool SetTimestampOffset(const std::string& id,
+ base::TimeDelta timestamp_offset) {
+ if (demuxer_->IsParsingMediaSegment(id))
+ return false;
+
+ timestamp_offset_map_[id] = timestamp_offset;
+ return true;
+ }
+
base::MessageLoop message_loop_;
MockDemuxerHost host_;
scoped_ptr<ChunkDemuxer> demuxer_;
+ ChunkDemuxer::InitSegmentReceivedCB init_segment_received_cb_;
+
+ base::TimeDelta append_window_start_for_next_append_;
+ base::TimeDelta append_window_end_for_next_append_;
+
+ // Map of source id to timestamp offset to use for the next AppendData()
+ // operation for that source id.
+ std::map<std::string, base::TimeDelta> timestamp_offset_map_;
private:
DISALLOW_COPY_AND_ASSIGN(ChunkDemuxerTest);
if (is_audio_encrypted || is_video_encrypted) {
int need_key_count = (is_audio_encrypted ? 1 : 0) +
(is_video_encrypted ? 1 : 0);
- EXPECT_CALL(*this, NeedKeyMock(kWebMEncryptInitDataType, NotNull(),
+ EXPECT_CALL(*this, NeedKeyMock(kWebMInitDataType, NotNull(),
DecryptConfig::kDecryptionKeySize))
.Times(Exactly(need_key_count));
}
EXPECT_EQ(kSampleFormatPlanarF32, config.sample_format());
EXPECT_EQ(is_audio_encrypted,
audio_stream->audio_decoder_config().is_encrypted());
+ EXPECT_TRUE(static_cast<ChunkDemuxerStream*>(audio_stream)
+ ->supports_partial_append_window_trimming());
} else {
EXPECT_FALSE(audio_stream);
}
EXPECT_TRUE(video_stream);
EXPECT_EQ(is_video_encrypted,
video_stream->video_decoder_config().is_encrypted());
+ EXPECT_FALSE(static_cast<ChunkDemuxerStream*>(video_stream)
+ ->supports_partial_append_window_trimming());
} else {
EXPECT_FALSE(video_stream);
}
ASSERT_TRUE(text_stream);
EXPECT_EQ(DemuxerStream::TEXT, text_stream->type());
EXPECT_EQ(kTextSubtitles, text_config.kind());
+ EXPECT_FALSE(static_cast<ChunkDemuxerStream*>(text_stream)
+ ->supports_partial_append_window_trimming());
DemuxerStream* audio_stream = demuxer_->GetStream(DemuxerStream::AUDIO);
if (has_audio) {
EXPECT_EQ(kSampleFormatPlanarF32, config.sample_format());
EXPECT_EQ(is_audio_encrypted,
audio_stream->audio_decoder_config().is_encrypted());
+ EXPECT_TRUE(static_cast<ChunkDemuxerStream*>(audio_stream)
+ ->supports_partial_append_window_trimming());
} else {
EXPECT_FALSE(audio_stream);
}
EXPECT_TRUE(video_stream);
EXPECT_EQ(is_video_encrypted,
video_stream->video_decoder_config().is_encrypted());
+ EXPECT_FALSE(static_cast<ChunkDemuxerStream*>(video_stream)
+ ->supports_partial_append_window_trimming());
} else {
EXPECT_FALSE(video_stream);
}
}
}
+TEST_F(ChunkDemuxerTest, SingleTextTrackIdChange) {
+ // Test with 1 video stream, 1 audio, and 1 text stream. Send a second init
+ // segment in which the text track ID changes. Verify appended buffers before
+ // and after the second init segment map to the same underlying track buffers.
+ CreateNewDemuxer();
+ DemuxerStream* text_stream = NULL;
+ TextTrackConfig text_config;
+ EXPECT_CALL(host_, AddTextStream(_, _))
+ .WillOnce(DoAll(SaveArg<0>(&text_stream),
+ SaveArg<1>(&text_config)));
+ ASSERT_TRUE(InitDemuxerWithEncryptionInfo(
+ HAS_TEXT | HAS_AUDIO | HAS_VIDEO, false, false));
+ DemuxerStream* audio_stream = demuxer_->GetStream(DemuxerStream::AUDIO);
+ DemuxerStream* video_stream = demuxer_->GetStream(DemuxerStream::VIDEO);
+ ASSERT_TRUE(audio_stream);
+ ASSERT_TRUE(video_stream);
+ ASSERT_TRUE(text_stream);
+
+ AppendMuxedCluster(
+ MuxedStreamInfo(kAudioTrackNum, "0K 23K"),
+ MuxedStreamInfo(kVideoTrackNum, "0K 30"),
+ MuxedStreamInfo(kTextTrackNum, "10K"));
+ CheckExpectedRanges(kSourceId, "{ [0,46) }");
+
+ scoped_ptr<uint8[]> info_tracks;
+ int info_tracks_size = 0;
+ CreateInitSegmentWithAlternateTextTrackNum(HAS_TEXT | HAS_AUDIO | HAS_VIDEO,
+ false, false,
+ &info_tracks, &info_tracks_size);
+ EXPECT_CALL(*this, InitSegmentReceived());
+ demuxer_->AppendData(kSourceId, info_tracks.get(), info_tracks_size,
+ append_window_start_for_next_append_,
+ append_window_end_for_next_append_,
+ ×tamp_offset_map_[kSourceId],
+ init_segment_received_cb_);
+
+ AppendMuxedCluster(
+ MuxedStreamInfo(kAudioTrackNum, "46K 69K"),
+ MuxedStreamInfo(kVideoTrackNum, "60K"),
+ MuxedStreamInfo(kAlternateTextTrackNum, "45K"));
+
+ CheckExpectedRanges(kSourceId, "{ [0,92) }");
+ CheckExpectedBuffers(audio_stream, "0 23 46 69");
+ CheckExpectedBuffers(video_stream, "0 30 60");
+ CheckExpectedBuffers(text_stream, "10 45");
+
+ ShutdownDemuxer();
+}
+
+TEST_F(ChunkDemuxerTest, InitSegmentSetsNeedRandomAccessPointFlag) {
+ // Tests that non-keyframes following an init segment are allowed
+ // and dropped, as expected if the initialization segment received
+ // algorithm correctly sets the needs random access point flag to true for all
+ // track buffers. Note that the first initialization segment is insufficient
+ // to fully test this since needs random access point flag initializes to
+ // true.
+ CreateNewDemuxer();
+ DemuxerStream* text_stream = NULL;
+ EXPECT_CALL(host_, AddTextStream(_, _))
+ .WillOnce(SaveArg<0>(&text_stream));
+ ASSERT_TRUE(InitDemuxerWithEncryptionInfo(
+ HAS_TEXT | HAS_AUDIO | HAS_VIDEO, false, false));
+ DemuxerStream* audio_stream = demuxer_->GetStream(DemuxerStream::AUDIO);
+ DemuxerStream* video_stream = demuxer_->GetStream(DemuxerStream::VIDEO);
+ ASSERT_TRUE(audio_stream && video_stream && text_stream);
+
+ AppendMuxedCluster(
+ MuxedStreamInfo(kAudioTrackNum, "23K"),
+ MuxedStreamInfo(kVideoTrackNum, "0 30K"),
+ MuxedStreamInfo(kTextTrackNum, "25K 40K"));
+ CheckExpectedRanges(kSourceId, "{ [23,46) }");
+
+ EXPECT_CALL(*this, InitSegmentReceived());
+ AppendInitSegment(HAS_TEXT | HAS_AUDIO | HAS_VIDEO);
+ AppendMuxedCluster(
+ MuxedStreamInfo(kAudioTrackNum, "46K 69K"),
+ MuxedStreamInfo(kVideoTrackNum, "60 90K"),
+ MuxedStreamInfo(kTextTrackNum, "80K 90K"));
+ CheckExpectedRanges(kSourceId, "{ [23,92) }");
+
+ CheckExpectedBuffers(audio_stream, "23 46 69");
+ CheckExpectedBuffers(video_stream, "30 90");
+ CheckExpectedBuffers(text_stream, "25 40 80 90");
+}
+
// Make sure that the demuxer reports an error if Shutdown()
// is called before all the initialization segments are appended.
TEST_F(ChunkDemuxerTest, Shutdown_BeforeAllInitSegmentsAppended) {
EXPECT_EQ(AddId("audio", HAS_AUDIO), ChunkDemuxer::kOk);
EXPECT_EQ(AddId("video", HAS_VIDEO), ChunkDemuxer::kOk);
+ EXPECT_CALL(*this, InitSegmentReceived());
AppendInitSegmentWithSourceId("audio", HAS_AUDIO);
ShutdownDemuxer();
EXPECT_CALL(host_, AddTextStream(_, _))
.Times(Exactly(1));
+ EXPECT_CALL(*this, InitSegmentReceived());
AppendInitSegmentWithSourceId("video_and_text", HAS_VIDEO | HAS_TEXT);
ShutdownDemuxer();
ExpectRead(DemuxerStream::AUDIO, 0);
ExpectRead(DemuxerStream::VIDEO, 0);
ExpectRead(DemuxerStream::AUDIO, kAudioBlockDuration);
- // Note: We skip trying to read a video buffer here because computing
- // the duration for this block relies on successfully parsing the last block
- // in the cluster the cluster.
- ExpectRead(DemuxerStream::AUDIO, 2 * kAudioBlockDuration);
Seek(base::TimeDelta::FromSeconds(5));
int info_tracks_size = 0;
CreateInitSegment(HAS_AUDIO | HAS_VIDEO,
false, false, &info_tracks, &info_tracks_size);
-
- demuxer_->AppendData(kSourceId, info_tracks.get(), info_tracks_size);
+ demuxer_->AppendData(kSourceId, info_tracks.get(), info_tracks_size,
+ append_window_start_for_next_append_,
+ append_window_end_for_next_append_,
+ ×tamp_offset_map_[kSourceId],
+ init_segment_received_cb_);
}
// Make sure Read() callbacks are dispatched with the proper data.
// Verify that AppendData() can still accept more data.
scoped_ptr<Cluster> cluster_c(GenerateCluster(45, 2));
- demuxer_->AppendData(kSourceId, cluster_c->data(), cluster_c->size());
+ demuxer_->AppendData(kSourceId, cluster_c->data(), cluster_c->size(),
+ append_window_start_for_next_append_,
+ append_window_end_for_next_append_,
+ ×tamp_offset_map_[kSourceId],
+ init_segment_received_cb_);
}
TEST_F(ChunkDemuxerTest, NonMonotonicButAboveClusterTimecode) {
// Verify that AppendData() ignores data after the error.
scoped_ptr<Cluster> cluster_b(GenerateCluster(20, 2));
- demuxer_->AppendData(kSourceId, cluster_b->data(), cluster_b->size());
+ demuxer_->AppendData(kSourceId, cluster_b->data(), cluster_b->size(),
+ append_window_start_for_next_append_,
+ append_window_end_for_next_append_,
+ ×tamp_offset_map_[kSourceId],
+ init_segment_received_cb_);
}
TEST_F(ChunkDemuxerTest, BackwardsAndBeforeClusterTimecode) {
// Verify that AppendData() ignores data after the error.
scoped_ptr<Cluster> cluster_b(GenerateCluster(6, 2));
- demuxer_->AppendData(kSourceId, cluster_b->data(), cluster_b->size());
+ demuxer_->AppendData(kSourceId, cluster_b->data(), cluster_b->size(),
+ append_window_start_for_next_append_,
+ append_window_end_for_next_append_,
+ ×tamp_offset_map_[kSourceId],
+ init_segment_received_cb_);
}
.WillOnce(SaveArg<0>(&text_stream));
ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO | HAS_TEXT));
- AppendSingleStreamCluster(kSourceId, kVideoTrackNum, "0K 30");
- AppendSingleStreamCluster(kSourceId, kAudioTrackNum, "0K 23K");
+ AppendMuxedCluster(
+ MuxedStreamInfo(kVideoTrackNum, "0K 33"),
+ MuxedStreamInfo(kAudioTrackNum, "0K 23K"));
// Check expected ranges and verify that an empty text track does not
// affect the expected ranges.
CheckExpectedRanges(kSourceId, "{ [0,46) }");
- EXPECT_CALL(host_, SetDuration(base::TimeDelta::FromMilliseconds(60)));
+ EXPECT_CALL(host_, SetDuration(base::TimeDelta::FromMilliseconds(66)));
MarkEndOfStream(PIPELINE_OK);
// Check expected ranges and verify that an empty text track does not
// affect the expected ranges.
- CheckExpectedRanges(kSourceId, "{ [0,60) }");
+ CheckExpectedRanges(kSourceId, "{ [0,66) }");
// Unmark end of stream state and verify that the ranges return to
// their pre-"end of stream" values.
// Add text track data and verify that the buffered ranges don't change
// since the intersection of all the tracks doesn't change.
EXPECT_CALL(host_, SetDuration(base::TimeDelta::FromMilliseconds(200)));
- AppendSingleStreamCluster(kSourceId, kTextTrackNum, "0K 100K");
+ AppendMuxedCluster(
+ MuxedStreamInfo(kVideoTrackNum, "0K 33"),
+ MuxedStreamInfo(kAudioTrackNum, "0K 23K"),
+ MuxedStreamInfo(kTextTrackNum, "0K 100K"));
CheckExpectedRanges(kSourceId, "{ [0,46) }");
// Mark end of stream and verify that text track data is reflected in
memcpy(dst, cluster_b->data(), cluster_b->size());
dst += cluster_b->size();
+ EXPECT_CALL(*this, InitSegmentReceived());
AppendDataInPieces(buffer.get(), buffer_size);
GenerateExpectedReads(0, 9);
{kSkip, kSkip},
};
+ // TODO(wolenetz/acolwell): Remove this SetDuration expectation and update the
+ // ParseWebMFile() call's expected duration, below, once the file is fixed to
+ // have the correct duration in the init segment. See http://crbug.com/354284.
+ EXPECT_CALL(host_, SetDuration(base::TimeDelta::FromMilliseconds(2746)));
+
ASSERT_TRUE(ParseWebMFile("bear-320x240.webm", buffer_timestamps,
base::TimeDelta::FromMilliseconds(2744)));
}
{kSkip, kSkip},
};
+ // TODO(wolenetz/acolwell): Remove this SetDuration expectation and update the
+ // ParseWebMFile() call's expected duration, below, once the file is fixed to
+ // have the correct duration in the init segment. See http://crbug.com/354284.
+ EXPECT_CALL(host_, SetDuration(base::TimeDelta::FromMilliseconds(2746)));
+
ASSERT_TRUE(ParseWebMFile("bear-320x240-audio-only.webm", buffer_timestamps,
base::TimeDelta::FromMilliseconds(2744),
HAS_AUDIO));
{kSkip, kSkip},
};
+ // TODO(wolenetz/acolwell): Remove this SetDuration expectation and update the
+ // ParseWebMFile() call's expected duration, below, once the file is fixed to
+ // have the correct duration in the init segment. See http://crbug.com/354284.
+ EXPECT_CALL(host_, SetDuration(base::TimeDelta::FromMilliseconds(2736)));
+
ASSERT_TRUE(ParseWebMFile("bear-320x240-video-only.webm", buffer_timestamps,
base::TimeDelta::FromMilliseconds(2703),
HAS_VIDEO));
EXPECT_FALSE(audio_read_done);
EXPECT_FALSE(video_read_done);
- // Append data one byte at a time until the audio read completes.
+ // Append data one byte at a time until one or both reads complete.
int i = 0;
- for (; i < cluster->size() && !audio_read_done; ++i) {
+ for (; i < cluster->size() && !(audio_read_done || video_read_done); ++i) {
AppendData(cluster->data() + i, 1);
message_loop_.RunUntilIdle();
}
- EXPECT_TRUE(audio_read_done);
- EXPECT_FALSE(video_read_done);
+ EXPECT_TRUE(audio_read_done || video_read_done);
EXPECT_GT(i, 0);
EXPECT_LT(i, cluster->size());
- // Append data one byte at a time until the video read completes.
- for (; i < cluster->size() && !video_read_done; ++i) {
- AppendData(cluster->data() + i, 1);
- message_loop_.RunUntilIdle();
- }
-
- EXPECT_TRUE(video_read_done);
- EXPECT_LT(i, cluster->size());
-
audio_read_done = false;
video_read_done = false;
ReadAudio(base::Bind(&OnReadDone,
ASSERT_EQ(AddId(), ChunkDemuxer::kOk);
uint8 tmp = 0;
- demuxer_->AppendData(kSourceId, &tmp, 1);
+ demuxer_->AppendData(kSourceId, &tmp, 1,
+ append_window_start_for_next_append_,
+ append_window_end_for_next_append_,
+ ×tamp_offset_map_[kSourceId],
+ init_segment_received_cb_);
}
TEST_F(ChunkDemuxerTest, AVHeadersWithAudioOnlyType) {
AppendCluster(kDefaultFirstCluster());
// Append another identical initialization segment.
+ EXPECT_CALL(*this, InitSegmentReceived());
AppendInitSegment(HAS_AUDIO | HAS_VIDEO);
AppendCluster(kDefaultSecondCluster());
// Adding an id with audio/video should fail because we already added audio.
ASSERT_EQ(AddId(), ChunkDemuxer::kReachedIdLimit);
+ EXPECT_CALL(*this, InitSegmentReceived());
AppendInitSegmentWithSourceId(audio_id, HAS_AUDIO);
// Adding an id after append should fail.
&host_, CreateInitDoneCB(kDefaultDuration(), PIPELINE_OK), true);
ASSERT_EQ(AddId(kSourceId, HAS_AUDIO), ChunkDemuxer::kOk);
+ EXPECT_CALL(*this, InitSegmentReceived());
AppendInitSegment(HAS_AUDIO);
// Test a simple cluster.
&host_, CreateInitDoneCB(kDefaultDuration(), PIPELINE_OK), true);
ASSERT_EQ(AddId(kSourceId, HAS_VIDEO), ChunkDemuxer::kOk);
+ EXPECT_CALL(*this, InitSegmentReceived());
AppendInitSegment(HAS_VIDEO);
// Test a simple cluster.
ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO | HAS_TEXT));
// Append audio & video data
- AppendSingleStreamCluster(kSourceId, kAudioTrackNum, "0K 23");
- AppendSingleStreamCluster(kSourceId, kVideoTrackNum, "0K 33");
+ AppendMuxedCluster(
+ MuxedStreamInfo(kAudioTrackNum, "0K 23K"),
+ MuxedStreamInfo(kVideoTrackNum, "0K 33"));
// Verify that a text track with no cues does not result in an empty buffered
// range.
CheckExpectedRanges("{ [0,46) }");
// Add some text cues.
- AppendSingleStreamCluster(kSourceId, kTextTrackNum, "0K 100K");
+ AppendMuxedCluster(
+ MuxedStreamInfo(kAudioTrackNum, "100K 123K"),
+ MuxedStreamInfo(kVideoTrackNum, "100K 133"),
+ MuxedStreamInfo(kTextTrackNum, "100K 200K"));
- // Verify that the new cues did not affect the buffered ranges.
- CheckExpectedRanges("{ [0,46) }");
+ // Verify that the text cues are not reflected in the buffered ranges.
+ CheckExpectedRanges("{ [0,46) [100,146) }");
- // Remove the buffered range.
+ // Remove the buffered ranges.
demuxer_->Remove(kSourceId, base::TimeDelta(),
- base::TimeDelta::FromMilliseconds(46));
+ base::TimeDelta::FromMilliseconds(250));
CheckExpectedRanges("{ }");
}
TEST_F(ChunkDemuxerTest, GetBufferedRanges_EndOfStream) {
ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
- AppendSingleStreamCluster(kSourceId, kAudioTrackNum, "0K 23K");
- AppendSingleStreamCluster(kSourceId, kVideoTrackNum, "0K 33");
+ AppendMuxedCluster(
+ MuxedStreamInfo(kAudioTrackNum, "0K 23K"),
+ MuxedStreamInfo(kVideoTrackNum, "0K 33"));
CheckExpectedRanges("{ [0,46) }");
// Append and remove data so that the 2 streams' end ranges do not overlap.
- EXPECT_CALL(host_, SetDuration(base::TimeDelta::FromMilliseconds(246)));
- EXPECT_CALL(host_, SetDuration(base::TimeDelta::FromMilliseconds(366)));
- AppendSingleStreamCluster(kSourceId, kAudioTrackNum, "200K 223K");
- AppendSingleStreamCluster(kSourceId, kVideoTrackNum,
- "200K 233 266 299 300K 333");
+ EXPECT_CALL(host_, SetDuration(base::TimeDelta::FromMilliseconds(398)));
+ AppendMuxedCluster(
+ MuxedStreamInfo(kAudioTrackNum, "200K 223K"),
+ MuxedStreamInfo(kVideoTrackNum, "200K 233 266 299 332K 365"));
// At this point, the per-stream ranges are as follows:
// Audio: [0,46) [200,246)
- // Video: [0,66) [200,366)
+ // Video: [0,66) [200,398)
CheckExpectedRanges("{ [0,46) [200,246) }");
demuxer_->Remove(kSourceId, base::TimeDelta::FromMilliseconds(200),
// At this point, the per-stream ranges are as follows:
// Audio: [0,46)
- // Video: [0,66) [300,366)
+ // Video: [0,66) [332,398)
CheckExpectedRanges("{ [0,46) }");
- AppendSingleStreamCluster(kSourceId, kAudioTrackNum, "200K 223K");
- AppendSingleStreamCluster(kSourceId, kVideoTrackNum, "200K 233");
+ AppendMuxedCluster(
+ MuxedStreamInfo(kAudioTrackNum, "200K 223K"),
+ MuxedStreamInfo(kVideoTrackNum, "200K 233"));
// At this point, the per-stream ranges are as follows:
// Audio: [0,46) [200,246)
- // Video: [0,66) [200,266) [300,366)
+ // Video: [0,66) [200,266) [332,398)
// NOTE: The last range on each stream do not overlap in time.
CheckExpectedRanges("{ [0,46) [200,246) }");
// NOTE: The last range on each stream gets extended to the highest
// end timestamp according to the spec. The last audio range gets extended
- // from [200,246) to [200,366) which is why the intersection results in the
+ // from [200,246) to [200,398) which is why the intersection results in the
// middle range getting larger AND the new range appearing.
- CheckExpectedRanges("{ [0,46) [200,266) [300,366) }");
+ CheckExpectedRanges("{ [0,46) [200,266) [332,398) }");
}
TEST_F(ChunkDemuxerTest, DifferentStreamTimecodes) {
ExpectRead(DemuxerStream::AUDIO, 0);
+ // The first config change seen is from a splice frame representing an overlap
+ // of buffer from config 1 by buffers from config 2.
ReadUntilNotOkOrEndOfStream(DemuxerStream::AUDIO, &status, &last_timestamp);
-
ASSERT_EQ(status, DemuxerStream::kConfigChanged);
EXPECT_EQ(last_timestamp.InMilliseconds(), 524);
EXPECT_EQ(audio_config_2.samples_per_second(), 44100);
EXPECT_EQ(audio_config_2.extra_data_size(), 3935u);
- ExpectRead(DemuxerStream::AUDIO, 527);
-
- // Read until the next config change.
+ // The next config change is from a splice frame representing an overlap of
+ // buffers from config 2 by buffers from config 1.
ReadUntilNotOkOrEndOfStream(DemuxerStream::AUDIO, &status, &last_timestamp);
ASSERT_EQ(status, DemuxerStream::kConfigChanged);
- EXPECT_EQ(last_timestamp.InMilliseconds(), 759);
-
- // Get the new config and verify that it matches the first one.
+ EXPECT_EQ(last_timestamp.InMilliseconds(), 782);
ASSERT_TRUE(audio_config_1.Matches(audio->audio_decoder_config()));
- ExpectRead(DemuxerStream::AUDIO, 779);
-
// Read until the end of the stream just to make sure there aren't any other
// config changes.
ReadUntilNotOkOrEndOfStream(DemuxerStream::AUDIO, &status, &last_timestamp);
ASSERT_EQ(status, DemuxerStream::kOk);
+ EXPECT_EQ(last_timestamp.InMilliseconds(), 2744);
}
TEST_F(ChunkDemuxerTest, ConfigChange_Seek) {
TEST_F(ChunkDemuxerTest, TimestampPositiveOffset) {
ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
- ASSERT_TRUE(demuxer_->SetTimestampOffset(
- kSourceId, base::TimeDelta::FromSeconds(30)));
+ ASSERT_TRUE(SetTimestampOffset(kSourceId, base::TimeDelta::FromSeconds(30)));
AppendCluster(GenerateCluster(0, 2));
Seek(base::TimeDelta::FromMilliseconds(30000));
TEST_F(ChunkDemuxerTest, TimestampNegativeOffset) {
ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
- ASSERT_TRUE(demuxer_->SetTimestampOffset(
- kSourceId, base::TimeDelta::FromSeconds(-1)));
+ ASSERT_TRUE(SetTimestampOffset(kSourceId, base::TimeDelta::FromSeconds(-1)));
AppendCluster(GenerateCluster(1000, 2));
GenerateExpectedReads(0, 2);
std::string video_id = "video1";
ASSERT_TRUE(InitDemuxerAudioAndVideoSources(audio_id, video_id));
- ASSERT_TRUE(demuxer_->SetTimestampOffset(
+ ASSERT_TRUE(SetTimestampOffset(
audio_id, base::TimeDelta::FromMilliseconds(-2500)));
- ASSERT_TRUE(demuxer_->SetTimestampOffset(
+ ASSERT_TRUE(SetTimestampOffset(
video_id, base::TimeDelta::FromMilliseconds(-2500)));
AppendCluster(audio_id, GenerateSingleStreamCluster(2500,
2500 + kAudioBlockDuration * 4, kAudioTrackNum, kAudioBlockDuration));
Seek(base::TimeDelta::FromMilliseconds(27300));
- ASSERT_TRUE(demuxer_->SetTimestampOffset(
+ ASSERT_TRUE(SetTimestampOffset(
audio_id, base::TimeDelta::FromMilliseconds(27300)));
- ASSERT_TRUE(demuxer_->SetTimestampOffset(
+ ASSERT_TRUE(SetTimestampOffset(
video_id, base::TimeDelta::FromMilliseconds(27300)));
AppendCluster(audio_id, GenerateSingleStreamCluster(
0, kAudioBlockDuration * 4, kAudioTrackNum, kAudioBlockDuration));
GenerateAudioStreamExpectedReads(27300, 4);
}
-TEST_F(ChunkDemuxerTest, TimestampOffsetMidMediaSegment) {
+TEST_F(ChunkDemuxerTest, IsParsingMediaSegmentMidMediaSegment) {
ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
scoped_ptr<Cluster> cluster = GenerateCluster(0, 2);
// Append only part of the cluster data.
AppendData(cluster->data(), cluster->size() - 13);
- // Setting a timestamp should fail because we're in the middle of a cluster.
- ASSERT_FALSE(demuxer_->SetTimestampOffset(
- kSourceId, base::TimeDelta::FromSeconds(25)));
+ // Confirm we're in the middle of parsing a media segment.
+ ASSERT_TRUE(demuxer_->IsParsingMediaSegment(kSourceId));
+
+ demuxer_->Abort(kSourceId,
+ append_window_start_for_next_append_,
+ append_window_end_for_next_append_,
+ ×tamp_offset_map_[kSourceId]);
+
+ // After Abort(), parsing should no longer be in the middle of a media
+ // segment.
+ ASSERT_FALSE(demuxer_->IsParsingMediaSegment(kSourceId));
+}
- demuxer_->Abort(kSourceId);
- // After Abort(), setting a timestamp should succeed since we're no longer
- // in the middle of a cluster
- ASSERT_TRUE(demuxer_->SetTimestampOffset(
- kSourceId, base::TimeDelta::FromSeconds(25)));
+#if defined(USE_PROPRIETARY_CODECS)
+#if defined(ENABLE_MPEG2TS_STREAM_PARSER)
+TEST_F(ChunkDemuxerTest, EmitBuffersDuringAbort) {
+ EXPECT_CALL(*this, DemuxerOpened());
+ demuxer_->Initialize(
+ &host_, CreateInitDoneCB(kInfiniteDuration(), PIPELINE_OK), true);
+ EXPECT_EQ(ChunkDemuxer::kOk, AddIdForMp2tSource(kSourceId));
+
+ // For info:
+ // DTS/PTS derived using dvbsnoop -s ts -if bear-1280x720.ts -tssubdecode
+ // Video: first PES:
+ // PTS: 126912 (0x0001efc0) [= 90 kHz-Timestamp: 0:00:01.4101]
+ // DTS: 123909 (0x0001e405) [= 90 kHz-Timestamp: 0:00:01.3767]
+ // Audio: first PES:
+ // PTS: 126000 (0x0001ec30) [= 90 kHz-Timestamp: 0:00:01.4000]
+ // DTS: 123910 (0x0001e406) [= 90 kHz-Timestamp: 0:00:01.3767]
+ // Video: last PES:
+ // PTS: 370155 (0x0005a5eb) [= 90 kHz-Timestamp: 0:00:04.1128]
+ // DTS: 367152 (0x00059a30) [= 90 kHz-Timestamp: 0:00:04.0794]
+ // Audio: last PES:
+ // PTS: 353788 (0x000565fc) [= 90 kHz-Timestamp: 0:00:03.9309]
+
+ scoped_refptr<DecoderBuffer> buffer = ReadTestDataFile("bear-1280x720.ts");
+ EXPECT_CALL(*this, InitSegmentReceived());
+ AppendData(kSourceId, buffer->data(), buffer->data_size());
+
+ // Confirm we're in the middle of parsing a media segment.
+ ASSERT_TRUE(demuxer_->IsParsingMediaSegment(kSourceId));
+
+ // Abort on the Mpeg2 TS parser triggers the emission of the last video
+ // buffer which is pending in the stream parser.
+ Ranges<base::TimeDelta> range_before_abort =
+ demuxer_->GetBufferedRanges(kSourceId);
+ demuxer_->Abort(kSourceId,
+ append_window_start_for_next_append_,
+ append_window_end_for_next_append_,
+ ×tamp_offset_map_[kSourceId]);
+ Ranges<base::TimeDelta> range_after_abort =
+ demuxer_->GetBufferedRanges(kSourceId);
+
+ ASSERT_EQ(range_before_abort.size(), 1u);
+ ASSERT_EQ(range_after_abort.size(), 1u);
+ EXPECT_EQ(range_after_abort.start(0), range_before_abort.start(0));
+ EXPECT_GT(range_after_abort.end(0), range_before_abort.end(0));
+}
+
+TEST_F(ChunkDemuxerTest, SeekCompleteDuringAbort) {
+ EXPECT_CALL(*this, DemuxerOpened());
+ demuxer_->Initialize(
+ &host_, CreateInitDoneCB(kInfiniteDuration(), PIPELINE_OK), true);
+ EXPECT_EQ(ChunkDemuxer::kOk, AddIdForMp2tSource(kSourceId));
+
+ // For info:
+ // DTS/PTS derived using dvbsnoop -s ts -if bear-1280x720.ts -tssubdecode
+ // Video: first PES:
+ // PTS: 126912 (0x0001efc0) [= 90 kHz-Timestamp: 0:00:01.4101]
+ // DTS: 123909 (0x0001e405) [= 90 kHz-Timestamp: 0:00:01.3767]
+ // Audio: first PES:
+ // PTS: 126000 (0x0001ec30) [= 90 kHz-Timestamp: 0:00:01.4000]
+ // DTS: 123910 (0x0001e406) [= 90 kHz-Timestamp: 0:00:01.3767]
+ // Video: last PES:
+ // PTS: 370155 (0x0005a5eb) [= 90 kHz-Timestamp: 0:00:04.1128]
+ // DTS: 367152 (0x00059a30) [= 90 kHz-Timestamp: 0:00:04.0794]
+ // Audio: last PES:
+ // PTS: 353788 (0x000565fc) [= 90 kHz-Timestamp: 0:00:03.9309]
+
+ scoped_refptr<DecoderBuffer> buffer = ReadTestDataFile("bear-1280x720.ts");
+ EXPECT_CALL(*this, InitSegmentReceived());
+ AppendData(kSourceId, buffer->data(), buffer->data_size());
+
+ // Confirm we're in the middle of parsing a media segment.
+ ASSERT_TRUE(demuxer_->IsParsingMediaSegment(kSourceId));
+
+ // Seek to a time corresponding to buffers that will be emitted during the
+ // abort.
+ Seek(base::TimeDelta::FromMilliseconds(4110));
+
+ // Abort on the Mpeg2 TS parser triggers the emission of the last video
+ // buffer which is pending in the stream parser.
+ demuxer_->Abort(kSourceId,
+ append_window_start_for_next_append_,
+ append_window_end_for_next_append_,
+ ×tamp_offset_map_[kSourceId]);
}
-TEST_F(ChunkDemuxerTest, WebMParsingMediaSegmentDetection) {
- // TODO(wolenetz): Also test 'unknown' sized clusters.
- // See http://crbug.com/335676.
+#endif
+#endif
+
+TEST_F(ChunkDemuxerTest, WebMIsParsingMediaSegmentDetection) {
const uint8 kBuffer[] = {
0x1F, 0x43, 0xB6, 0x75, 0x83, // CLUSTER (size = 3)
0xE7, 0x81, 0x01, // Cluster TIMECODE (value = 1)
+
+ 0x1F, 0x43, 0xB6, 0x75, 0xFF, // CLUSTER (size = unknown; really 3 due to:)
+ 0xE7, 0x81, 0x02, // Cluster TIMECODE (value = 2)
+ /* e.g. put some blocks here... */
+ 0x1A, 0x45, 0xDF, 0xA3, 0x8A, // EBMLHEADER (size = 10, not fully appended)
};
- // Setting timestamp offset or append mode is allowed only while not
- // parsing a media segment. This array indicates whether or not these
- // operations are allowed following each incrementally appended byte in
- // |kBuffer|.
+ // This array indicates expected return value of IsParsingMediaSegment()
+ // following each incrementally appended byte in |kBuffer|.
const bool kExpectedReturnValues[] = {
+ false, false, false, false, true,
+ true, true, false,
+
+ false, false, false, false, true,
+ true, true, true,
+
true, true, true, true, false,
- false, false, true,
};
COMPILE_ASSERT(arraysize(kBuffer) == arraysize(kExpectedReturnValues),
DVLOG(3) << "Appending and testing index " << i;
AppendData(kBuffer + i, 1);
bool expected_return_value = kExpectedReturnValues[i];
- EXPECT_EQ(expected_return_value, demuxer_->SetTimestampOffset(
- kSourceId, base::TimeDelta::FromSeconds(25)));
- EXPECT_EQ(expected_return_value, demuxer_->SetSequenceMode(
- kSourceId, true));
- EXPECT_EQ(expected_return_value, demuxer_->SetSequenceMode(
- kSourceId, false));
+ EXPECT_EQ(expected_return_value,
+ demuxer_->IsParsingMediaSegment(kSourceId));
}
}
-TEST_F(ChunkDemuxerTest, SetSequenceModeMidMediaSegment) {
- ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
-
- scoped_ptr<Cluster> cluster = GenerateCluster(0, 2);
- // Append only part of the cluster data.
- AppendData(cluster->data(), cluster->size() - 13);
-
- // Setting append mode should fail because we're in the middle of a cluster.
- ASSERT_FALSE(demuxer_->SetSequenceMode(kSourceId, true));
- ASSERT_FALSE(demuxer_->SetSequenceMode(kSourceId, false));
-
- demuxer_->Abort(kSourceId);
- // After Abort(), setting append mode should succeed since we're no longer
- // in the middle of a cluster.
- ASSERT_TRUE(demuxer_->SetSequenceMode(kSourceId, true));
- ASSERT_TRUE(demuxer_->SetSequenceMode(kSourceId, false));
-}
-
TEST_F(ChunkDemuxerTest, DurationChange) {
ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
const int kStreamDuration = kDefaultDuration().InMilliseconds();
CheckExpectedRanges(kSourceId, "{ [201191,201224) }");
- // Add data at the currently set duration. The duration should not increase.
+ // Add data beginning at the currently set duration and expect a new duration
+ // to be signaled. Note that the last video block will have a higher end
+ // timestamp than the last audio block.
+ const int kNewStreamDurationVideo = kStreamDuration + kVideoBlockDuration;
+ EXPECT_CALL(host_, SetDuration(
+ base::TimeDelta::FromMilliseconds(kNewStreamDurationVideo)));
AppendCluster(GenerateCluster(kDefaultDuration().InMilliseconds(), 2));
- // Range should not be affected.
- CheckExpectedRanges(kSourceId, "{ [201191,201224) }");
+ CheckExpectedRanges(kSourceId, "{ [201191,201247) }");
- // Now add data past the duration and expect a new duration to be signalled.
- const int kNewStreamDuration = kStreamDuration + kAudioBlockDuration * 2;
+ // Add more data to the end of each media type. Note that the last audio block
+ // will have a higher end timestamp than the last video block.
+ const int kFinalStreamDuration = kStreamDuration + kAudioBlockDuration * 3;
EXPECT_CALL(host_, SetDuration(
- base::TimeDelta::FromMilliseconds(kNewStreamDuration)));
+ base::TimeDelta::FromMilliseconds(kFinalStreamDuration)));
AppendCluster(GenerateCluster(kStreamDuration + kAudioBlockDuration,
kStreamDuration + kVideoBlockDuration,
- 2));
+ 3));
- // See that the range has increased appropriately.
- CheckExpectedRanges(kSourceId, "{ [201191,201270) }");
+ // See that the range has increased appropriately (but not to the full
+ // duration of 201293, since there is not enough video appended for that).
+ CheckExpectedRanges(kSourceId, "{ [201191,201290) }");
}
TEST_F(ChunkDemuxerTest, DurationChangeTimestampOffset) {
ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
-
- ASSERT_TRUE(demuxer_->SetTimestampOffset(kSourceId, kDefaultDuration()));
-
+ ASSERT_TRUE(SetTimestampOffset(kSourceId, kDefaultDuration()));
EXPECT_CALL(host_, SetDuration(
kDefaultDuration() + base::TimeDelta::FromMilliseconds(
- kAudioBlockDuration * 2)));
+ kVideoBlockDuration * 2)));
AppendCluster(GenerateCluster(0, 4));
}
message_loop_.RunUntilIdle();
}
-TEST_F(ChunkDemuxerTest, ReadAfterAudioDisabled) {
- ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
- AppendCluster(kDefaultFirstCluster());
-
- DemuxerStream* stream = demuxer_->GetStream(DemuxerStream::AUDIO);
- ASSERT_TRUE(stream);
-
- // The stream should no longer be present.
- demuxer_->OnAudioRendererDisabled();
- ASSERT_FALSE(demuxer_->GetStream(DemuxerStream::AUDIO));
-
- // Normally this would return an audio buffer at timestamp zero, but
- // all reads should return EOS buffers when disabled.
- bool audio_read_done = false;
- stream->Read(base::Bind(&OnReadDone_EOSExpected, &audio_read_done));
- message_loop_.RunUntilIdle();
-
- EXPECT_TRUE(audio_read_done);
-}
-
-// Verifies that signalling end of stream while stalled at a gap
+// Verifies that signaling end of stream while stalled at a gap
// boundary does not trigger end of stream buffers to be returned.
TEST_F(ChunkDemuxerTest, EndOfStreamWhileWaitingForGapToBeFilled) {
ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
AppendCluster(300, 10);
CheckExpectedRanges(kSourceId, "{ [0,132) [300,432) }");
-
GenerateExpectedReads(0, 10);
bool audio_read_done = false;
demuxer_->UnmarkEndOfStream();
- AppendCluster(138, 24);
+ AppendCluster(138, 22);
message_loop_.RunUntilIdle();
- CheckExpectedRanges(kSourceId, "{ [0,438) }");
+ CheckExpectedRanges(kSourceId, "{ [0,435) }");
// Verify that the reads have completed.
EXPECT_TRUE(audio_read_done);
EXPECT_TRUE(video_read_done);
// Read the rest of the buffers.
- GenerateExpectedReads(161, 171, 22);
+ GenerateExpectedReads(161, 171, 20);
// Verify that reads block because the append cleared the end of stream state.
audio_read_done = false;
EXPECT_FALSE(audio_read_done);
EXPECT_FALSE(video_read_done);
+ EXPECT_CALL(host_, SetDuration(base::TimeDelta::FromMilliseconds(437)));
MarkEndOfStream(PIPELINE_OK);
EXPECT_TRUE(audio_read_done);
AppendCluster(seek_time.InMilliseconds(), 10);
}
+TEST_F(ChunkDemuxerTest, SetMemoryLimitType) {
+ ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
+
+ // Set different memory limits for audio and video.
+ demuxer_->SetMemoryLimits(DemuxerStream::AUDIO, 10 * kBlockSize);
+ demuxer_->SetMemoryLimits(DemuxerStream::VIDEO, 5 * kBlockSize);
+
+ base::TimeDelta seek_time = base::TimeDelta::FromMilliseconds(1000);
+
+ // Append data at the start that can be garbage collected:
+ AppendSingleStreamCluster(kSourceId, kAudioTrackNum, 0, 10);
+ AppendSingleStreamCluster(kSourceId, kVideoTrackNum, 0, 5);
+
+ CheckExpectedRanges(DemuxerStream::AUDIO, "{ [0,230) }");
+ CheckExpectedRanges(DemuxerStream::VIDEO, "{ [0,165) }");
+
+ // Seek so we can garbage collect the data appended above.
+ Seek(seek_time);
+
+ // Append data at seek_time.
+ AppendSingleStreamCluster(kSourceId, kAudioTrackNum,
+ seek_time.InMilliseconds(), 10);
+ AppendSingleStreamCluster(kSourceId, kVideoTrackNum,
+ seek_time.InMilliseconds(), 5);
+
+ // Verify that the old data, and nothing more, has been garbage collected.
+ CheckExpectedRanges(DemuxerStream::AUDIO, "{ [1000,1230) }");
+ CheckExpectedRanges(DemuxerStream::VIDEO, "{ [1000,1165) }");
+}
+
TEST_F(ChunkDemuxerTest, GCDuringSeek) {
ASSERT_TRUE(InitDemuxer(HAS_AUDIO));
- demuxer_->SetMemoryLimitsForTesting(5 * kBlockSize);
+ demuxer_->SetMemoryLimits(DemuxerStream::AUDIO, 5 * kBlockSize);
base::TimeDelta seek_time1 = base::TimeDelta::FromMilliseconds(1000);
base::TimeDelta seek_time2 = base::TimeDelta::FromMilliseconds(500);
CheckExpectedRanges(kSourceId, "{ [500,592) [792,815) }");
}
-TEST_F(ChunkDemuxerTest, RemoveBeforeInitSegment) {
- EXPECT_CALL(*this, DemuxerOpened());
- demuxer_->Initialize(
- &host_, CreateInitDoneCB(kNoTimestamp(), PIPELINE_OK), true);
-
- EXPECT_EQ(ChunkDemuxer::kOk, AddId(kSourceId, HAS_AUDIO | HAS_VIDEO));
-
- demuxer_->Remove(kSourceId, base::TimeDelta::FromMilliseconds(0),
- base::TimeDelta::FromMilliseconds(1));
-}
-
TEST_F(ChunkDemuxerTest, AppendWindow_Video) {
ASSERT_TRUE(InitDemuxer(HAS_VIDEO));
DemuxerStream* stream = demuxer_->GetStream(DemuxerStream::VIDEO);
- // Set the append window to [20,280).
- demuxer_->SetAppendWindowStart(kSourceId,
- base::TimeDelta::FromMilliseconds(20));
- demuxer_->SetAppendWindowEnd(kSourceId,
- base::TimeDelta::FromMilliseconds(280));
+ // Set the append window to [50,280).
+ append_window_start_for_next_append_ = base::TimeDelta::FromMilliseconds(50);
+ append_window_end_for_next_append_ = base::TimeDelta::FromMilliseconds(280);
// Append a cluster that starts before and ends after the append window.
AppendSingleStreamCluster(kSourceId, kVideoTrackNum,
// Verify that GOPs that start outside the window are not included
// in the buffer. Also verify that buffers that start inside the
- // window and extend beyond the end of the window are included.
- CheckExpectedRanges(kSourceId, "{ [120,300) }");
- CheckExpectedBuffers(stream, "120 150 180 210 240 270");
+ // window and extend beyond the end of the window are not included.
+ CheckExpectedRanges(kSourceId, "{ [120,270) }");
+ CheckExpectedBuffers(stream, "120 150 180 210 240");
- // Extend the append window to [20,650).
- demuxer_->SetAppendWindowEnd(kSourceId,
- base::TimeDelta::FromMilliseconds(650));
+ // Extend the append window to [50,650).
+ append_window_end_for_next_append_ = base::TimeDelta::FromMilliseconds(650);
// Append more data and verify that adding buffers start at the next
// keyframe.
AppendSingleStreamCluster(kSourceId, kVideoTrackNum,
"360 390 420K 450 480 510 540K 570 600 630K");
- CheckExpectedRanges(kSourceId, "{ [120,300) [420,660) }");
+ CheckExpectedRanges(kSourceId, "{ [120,270) [420,630) }");
}
TEST_F(ChunkDemuxerTest, AppendWindow_Audio) {
ASSERT_TRUE(InitDemuxer(HAS_AUDIO));
DemuxerStream* stream = demuxer_->GetStream(DemuxerStream::AUDIO);
- // Set the append window to [20,280).
- demuxer_->SetAppendWindowStart(kSourceId,
- base::TimeDelta::FromMilliseconds(20));
- demuxer_->SetAppendWindowEnd(kSourceId,
- base::TimeDelta::FromMilliseconds(280));
+ // Set the append window to [50,280).
+ append_window_start_for_next_append_ = base::TimeDelta::FromMilliseconds(50);
+ append_window_end_for_next_append_ = base::TimeDelta::FromMilliseconds(280);
// Append a cluster that starts before and ends after the append window.
AppendSingleStreamCluster(
kSourceId, kAudioTrackNum,
"0K 30K 60K 90K 120K 150K 180K 210K 240K 270K 300K 330K");
- // Verify that frames that start outside the window are not included
+ // Verify that frames that end outside the window are not included
// in the buffer. Also verify that buffers that start inside the
- // window and extend beyond the end of the window are included.
- CheckExpectedRanges(kSourceId, "{ [30,300) }");
- CheckExpectedBuffers(stream, "30 60 90 120 150 180 210 240 270");
+ // window and extend beyond the end of the window are not included.
+ //
+ // The first 50ms of the range should be truncated since it overlaps
+ // the start of the append window.
+ CheckExpectedRanges(kSourceId, "{ [50,280) }");
- // Extend the append window to [20,650).
- demuxer_->SetAppendWindowEnd(kSourceId,
- base::TimeDelta::FromMilliseconds(650));
+ // The "50P" buffer is the "0" buffer marked for complete discard. The next
+ // "50" buffer is the "30" buffer marked with 20ms of start discard.
+ CheckExpectedBuffers(stream, "50P 50 60 90 120 150 180 210 240");
+
+ // Extend the append window to [50,650).
+ append_window_end_for_next_append_ = base::TimeDelta::FromMilliseconds(650);
// Append more data and verify that a new range is created.
AppendSingleStreamCluster(
kSourceId, kAudioTrackNum,
"360K 390K 420K 450K 480K 510K 540K 570K 600K 630K");
- CheckExpectedRanges(kSourceId, "{ [30,300) [360,660) }");
+ CheckExpectedRanges(kSourceId, "{ [50,280) [360,650) }");
+}
+
+TEST_F(ChunkDemuxerTest, AppendWindow_AudioOverlapStartAndEnd) {
+ ASSERT_TRUE(InitDemuxer(HAS_AUDIO));
+
+ // Set the append window to [10,20).
+ append_window_start_for_next_append_ = base::TimeDelta::FromMilliseconds(10);
+ append_window_end_for_next_append_ = base::TimeDelta::FromMilliseconds(20);
+
+ // Append a cluster that starts before and ends after the append window.
+ AppendSingleStreamCluster(kSourceId, kAudioTrackNum, "0K");
+
+ // Verify the append is clipped to the append window.
+ CheckExpectedRanges(kSourceId, "{ [10,20) }");
+}
+
+TEST_F(ChunkDemuxerTest, AppendWindow_WebMFile_AudioOnly) {
+ EXPECT_CALL(*this, DemuxerOpened());
+ demuxer_->Initialize(
+ &host_,
+ CreateInitDoneCB(base::TimeDelta::FromMilliseconds(2744), PIPELINE_OK),
+ true);
+ ASSERT_EQ(ChunkDemuxer::kOk, AddId(kSourceId, HAS_AUDIO));
+
+ // Set the append window to [50,150).
+ append_window_start_for_next_append_ = base::TimeDelta::FromMilliseconds(50);
+ append_window_end_for_next_append_ = base::TimeDelta::FromMilliseconds(150);
+
+ // Read a WebM file into memory and send the data to the demuxer. The chunk
+ // size has been chosen carefully to ensure the preroll buffer used by the
+ // partial append window trim must come from a previous Append() call.
+ scoped_refptr<DecoderBuffer> buffer =
+ ReadTestDataFile("bear-320x240-audio-only.webm");
+ EXPECT_CALL(*this, InitSegmentReceived());
+ AppendDataInPieces(buffer->data(), buffer->data_size(), 128);
+
+ DemuxerStream* stream = demuxer_->GetStream(DemuxerStream::AUDIO);
+ CheckExpectedBuffers(stream, "50P 50 62 86 109 122 125 128");
+}
+
+TEST_F(ChunkDemuxerTest, AppendWindow_AudioConfigUpdateRemovesPreroll) {
+ EXPECT_CALL(*this, DemuxerOpened());
+ demuxer_->Initialize(
+ &host_,
+ CreateInitDoneCB(base::TimeDelta::FromMilliseconds(2744), PIPELINE_OK),
+ true);
+ ASSERT_EQ(ChunkDemuxer::kOk, AddId(kSourceId, HAS_AUDIO));
+
+ // Set the append window such that the first file is completely before the
+ // append window.
+ // TODO(wolenetz/acolwell): Update this duration once the files are fixed to
+ // have the correct duration in their init segments, and the
+ // CreateInitDoneCB() call, above, is fixed to used that duration. See
+ // http://crbug.com/354284.
+ const base::TimeDelta duration_1 = base::TimeDelta::FromMilliseconds(2746);
+ append_window_start_for_next_append_ = duration_1;
+
+ // Read a WebM file into memory and append the data.
+ scoped_refptr<DecoderBuffer> buffer =
+ ReadTestDataFile("bear-320x240-audio-only.webm");
+ EXPECT_CALL(*this, InitSegmentReceived());
+ AppendDataInPieces(buffer->data(), buffer->data_size(), 512);
+ CheckExpectedRanges(kSourceId, "{ }");
+
+ DemuxerStream* stream = demuxer_->GetStream(DemuxerStream::AUDIO);
+ AudioDecoderConfig config_1 = stream->audio_decoder_config();
+
+ // Read a second WebM with a different config in and append the data.
+ scoped_refptr<DecoderBuffer> buffer2 =
+ ReadTestDataFile("bear-320x240-audio-only-48khz.webm");
+ EXPECT_CALL(*this, InitSegmentReceived());
+ EXPECT_CALL(host_, SetDuration(_)).Times(AnyNumber());
+ ASSERT_TRUE(SetTimestampOffset(kSourceId, duration_1));
+ AppendDataInPieces(buffer2->data(), buffer2->data_size(), 512);
+ CheckExpectedRanges(kSourceId, "{ [2746,5519) }");
+
+ Seek(duration_1);
+ ExpectConfigChanged(DemuxerStream::AUDIO);
+ ASSERT_FALSE(config_1.Matches(stream->audio_decoder_config()));
+ CheckExpectedBuffers(stream, "2746 2767 2789 2810");
}
TEST_F(ChunkDemuxerTest, AppendWindow_Text) {
DemuxerStream* video_stream = demuxer_->GetStream(DemuxerStream::VIDEO);
// Set the append window to [20,280).
- demuxer_->SetAppendWindowStart(kSourceId,
- base::TimeDelta::FromMilliseconds(20));
- demuxer_->SetAppendWindowEnd(kSourceId,
- base::TimeDelta::FromMilliseconds(280));
+ append_window_start_for_next_append_ = base::TimeDelta::FromMilliseconds(20);
+ append_window_end_for_next_append_ = base::TimeDelta::FromMilliseconds(280);
// Append a cluster that starts before and ends after the append
// window.
- AppendSingleStreamCluster(kSourceId, kVideoTrackNum,
- "0K 30 60 90 120K 150 180 210 240K 270 300 330K");
- AppendSingleStreamCluster(kSourceId, kTextTrackNum, "0K 100K 200K 300K");
+ AppendMuxedCluster(
+ MuxedStreamInfo(kVideoTrackNum,
+ "0K 30 60 90 120K 150 180 210 240K 270 300 330K"),
+ MuxedStreamInfo(kTextTrackNum, "0K 100K 200K 300K" ));
// Verify that text cues that start outside the window are not included
// in the buffer. Also verify that cues that extend beyond the
- // window are included.
- CheckExpectedRanges(kSourceId, "{ [120,300) }");
- CheckExpectedBuffers(video_stream, "120 150 180 210 240 270");
- CheckExpectedBuffers(text_stream, "100 200");
+ // window are not included.
+ CheckExpectedRanges(kSourceId, "{ [100,270) }");
+ CheckExpectedBuffers(video_stream, "120 150 180 210 240");
+ CheckExpectedBuffers(text_stream, "100");
// Extend the append window to [20,650).
- demuxer_->SetAppendWindowEnd(kSourceId,
- base::TimeDelta::FromMilliseconds(650));
+ append_window_end_for_next_append_ = base::TimeDelta::FromMilliseconds(650);
// Append more data and verify that a new range is created.
- AppendSingleStreamCluster(kSourceId, kVideoTrackNum,
- "360 390 420K 450 480 510 540K 570 600 630K");
- AppendSingleStreamCluster(kSourceId, kTextTrackNum, "400K 500K 600K 700K");
- CheckExpectedRanges(kSourceId, "{ [120,300) [420,660) }");
+ AppendMuxedCluster(
+ MuxedStreamInfo(kVideoTrackNum,
+ "360 390 420K 450 480 510 540K 570 600 630K"),
+ MuxedStreamInfo(kTextTrackNum, "400K 500K 600K 700K" ));
+ CheckExpectedRanges(kSourceId, "{ [100,270) [400,630) }");
// Seek to the new range and verify that the expected buffers are returned.
Seek(base::TimeDelta::FromMilliseconds(420));
- CheckExpectedBuffers(video_stream, "420 450 480 510 540 570 600 630");
- CheckExpectedBuffers(text_stream, "400 500 600");
+ CheckExpectedBuffers(video_stream, "420 450 480 510 540 570 600");
+ CheckExpectedBuffers(text_stream, "400 500");
}
TEST_F(ChunkDemuxerTest, StartWaitingForSeekAfterParseError) {
DemuxerStream* audio_stream = demuxer_->GetStream(DemuxerStream::AUDIO);
DemuxerStream* video_stream = demuxer_->GetStream(DemuxerStream::VIDEO);
- AppendSingleStreamCluster(kSourceId, kAudioTrackNum,
- "0K 20K 40K 60K 80K 100K 120K 140K");
- AppendSingleStreamCluster(kSourceId, kVideoTrackNum,
- "0K 30 60 90 120K 150 180");
- AppendSingleStreamCluster(kSourceId, kTextTrackNum, "0K 100K 200K");
+ AppendMuxedCluster(
+ MuxedStreamInfo(kAudioTrackNum, "0K 20K 40K 60K 80K 100K 120K 140K"),
+ MuxedStreamInfo(kVideoTrackNum, "0K 30 60 90 120K 150 180"),
+ MuxedStreamInfo(kTextTrackNum, "0K 100K 200K"));
CheckExpectedBuffers(audio_stream, "0 20 40 60 80 100 120 140");
CheckExpectedBuffers(video_stream, "0 30 60 90 120 150 180");
// Append new buffers that are clearly different than the original
// ones and verify that only the new buffers are returned.
- AppendSingleStreamCluster(kSourceId, kAudioTrackNum,
- "1K 21K 41K 61K 81K 101K 121K 141K");
- AppendSingleStreamCluster(kSourceId, kVideoTrackNum,
- "1K 31 61 91 121K 151 181");
- AppendSingleStreamCluster(kSourceId, kTextTrackNum, "1K 101K 201K");
+ AppendMuxedCluster(
+ MuxedStreamInfo(kAudioTrackNum, "1K 21K 41K 61K 81K 101K 121K 141K"),
+ MuxedStreamInfo(kVideoTrackNum, "1K 31 61 91 121K 151 181"),
+ MuxedStreamInfo(kTextTrackNum, "1K 101K 201K"));
Seek(base::TimeDelta());
CheckExpectedBuffers(audio_stream, "1 21 41 61 81 101 121 141");
CheckExpectedBuffers(text_stream, "1 101 201");
}
+TEST_F(ChunkDemuxerTest, Remove_StartAtDuration) {
+ ASSERT_TRUE(InitDemuxer(HAS_AUDIO));
+ DemuxerStream* audio_stream = demuxer_->GetStream(DemuxerStream::AUDIO);
+
+ // Set the duration to something small so that the append that
+ // follows updates the duration to reflect the end of the appended data.
+ EXPECT_CALL(host_, SetDuration(
+ base::TimeDelta::FromMilliseconds(1)));
+ demuxer_->SetDuration(0.001);
+
+ EXPECT_CALL(host_, SetDuration(
+ base::TimeDelta::FromMilliseconds(160)));
+ AppendSingleStreamCluster(kSourceId, kAudioTrackNum,
+ "0K 20K 40K 60K 80K 100K 120K 140K");
+
+ CheckExpectedRanges(kSourceId, "{ [0,160) }");
+ CheckExpectedBuffers(audio_stream, "0 20 40 60 80 100 120 140");
+
+ demuxer_->Remove(kSourceId,
+ base::TimeDelta::FromSecondsD(demuxer_->GetDuration()),
+ kInfiniteDuration());
+
+ Seek(base::TimeDelta());
+ CheckExpectedRanges(kSourceId, "{ [0,160) }");
+ CheckExpectedBuffers(audio_stream, "0 20 40 60 80 100 120 140");
+}
+
// Verifies that a Seek() will complete without text cues for
// the seek point and will return cues after the seek position
// when they are eventually appended.
bool text_read_done = false;
text_stream->Read(base::Bind(&OnReadDone,
- base::TimeDelta::FromMilliseconds(125),
+ base::TimeDelta::FromMilliseconds(225),
&text_read_done));
// Append audio & video data so the seek completes.
- AppendSingleStreamCluster(kSourceId, kAudioTrackNum,
- "0K 20K 40K 60K 80K 100K 120K 140K 160K 180K");
- AppendSingleStreamCluster(kSourceId, kVideoTrackNum,
- "0K 30 60 90 120K 150 180 210");
+ AppendMuxedCluster(
+ MuxedStreamInfo(kAudioTrackNum,
+ "0K 20K 40K 60K 80K 100K 120K 140K 160K 180K 200K"),
+ MuxedStreamInfo(kVideoTrackNum, "0K 30 60 90 120K 150 180 210"));
message_loop_.RunUntilIdle();
EXPECT_TRUE(seek_cb_was_called);
// Append text cues that start after the seek point and verify that
// they are returned by Read() calls.
- AppendSingleStreamCluster(kSourceId, kTextTrackNum, "125K 175K 225K");
+ AppendMuxedCluster(
+ MuxedStreamInfo(kAudioTrackNum, "220K 240K 260K 280K"),
+ MuxedStreamInfo(kVideoTrackNum, "240K 270 300 330"),
+ MuxedStreamInfo(kTextTrackNum, "225K 275K 325K"));
message_loop_.RunUntilIdle();
EXPECT_TRUE(text_read_done);
- // NOTE: we start at 175 here because the buffer at 125 was returned
+ // NOTE: we start at 275 here because the buffer at 225 was returned
// to the pending read initiated above.
- CheckExpectedBuffers(text_stream, "175 225");
+ CheckExpectedBuffers(text_stream, "275 325");
- // Verify that audio & video streams contiue to return expected values.
+ // Verify that audio & video streams continue to return expected values.
CheckExpectedBuffers(audio_stream, "160 180");
CheckExpectedBuffers(video_stream, "180 210");
}
+TEST_F(ChunkDemuxerTest, ClusterWithUnknownSize) {
+ ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
+
+ AppendCluster(GenerateCluster(0, 0, 4, true));
+ CheckExpectedRanges(kSourceId, "{ [0,46) }");
+
+ // A new cluster indicates end of the previous cluster with unknown size.
+ AppendCluster(GenerateCluster(46, 66, 5, true));
+ CheckExpectedRanges(kSourceId, "{ [0,115) }");
+}
+
+TEST_F(ChunkDemuxerTest, CuesBetweenClustersWithUnknownSize) {
+ ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
+
+ // Add two clusters separated by Cues in a single Append() call.
+ scoped_ptr<Cluster> cluster = GenerateCluster(0, 0, 4, true);
+ std::vector<uint8> data(cluster->data(), cluster->data() + cluster->size());
+ data.insert(data.end(), kCuesHeader, kCuesHeader + sizeof(kCuesHeader));
+ cluster = GenerateCluster(46, 66, 5, true);
+ data.insert(data.end(), cluster->data(), cluster->data() + cluster->size());
+ AppendData(&*data.begin(), data.size());
+
+ CheckExpectedRanges(kSourceId, "{ [0,115) }");
+}
+
+TEST_F(ChunkDemuxerTest, CuesBetweenClusters) {
+ ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
+
+ AppendCluster(GenerateCluster(0, 0, 4));
+ AppendData(kCuesHeader, sizeof(kCuesHeader));
+ AppendCluster(GenerateCluster(46, 66, 5));
+ CheckExpectedRanges(kSourceId, "{ [0,115) }");
+}
+
} // namespace media