1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
8 #include "base/message_loop/message_loop.h"
9 #include "base/strings/string_number_conversions.h"
10 #include "base/strings/string_split.h"
11 #include "base/strings/string_util.h"
12 #include "media/base/audio_decoder_config.h"
13 #include "media/base/decoder_buffer.h"
14 #include "media/base/decrypt_config.h"
15 #include "media/base/mock_demuxer_host.h"
16 #include "media/base/test_data_util.h"
17 #include "media/base/test_helpers.h"
18 #include "media/filters/chunk_demuxer.h"
19 #include "media/formats/webm/cluster_builder.h"
20 #include "media/formats/webm/webm_constants.h"
21 #include "media/formats/webm/webm_crypto_helpers.h"
22 #include "testing/gtest/include/gtest/gtest.h"
24 using ::testing::AnyNumber;
25 using ::testing::Exactly;
26 using ::testing::InSequence;
27 using ::testing::NotNull;
28 using ::testing::Return;
29 using ::testing::SaveArg;
30 using ::testing::SetArgumentPointee;
35 const uint8 kTracksHeader[] = {
36 0x16, 0x54, 0xAE, 0x6B, // Tracks ID
37 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // tracks(size = 0)
40 // WebM Block bytes that represent a VP8 keyframe.
41 const uint8 kVP8Keyframe[] = {
42 0x010, 0x00, 0x00, 0x9d, 0x01, 0x2a, 0x00, 0x10, 0x00, 0x10, 0x00
45 // WebM Block bytes that represent a VP8 interframe.
46 const uint8 kVP8Interframe[] = { 0x11, 0x00, 0x00 };
48 static const uint8 kCuesHeader[] = {
49 0x1C, 0x53, 0xBB, 0x6B, // Cues ID
50 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // cues(size = 0)
53 const int kTracksHeaderSize = sizeof(kTracksHeader);
54 const int kTracksSizeOffset = 4;
56 // The size of TrackEntry element in test file "webm_vorbis_track_entry" starts
57 // at index 1 and spans 8 bytes.
58 const int kAudioTrackSizeOffset = 1;
59 const int kAudioTrackSizeWidth = 8;
60 const int kAudioTrackEntryHeaderSize =
61 kAudioTrackSizeOffset + kAudioTrackSizeWidth;
63 // The size of TrackEntry element in test file "webm_vp8_track_entry" starts at
64 // index 1 and spans 8 bytes.
65 const int kVideoTrackSizeOffset = 1;
66 const int kVideoTrackSizeWidth = 8;
67 const int kVideoTrackEntryHeaderSize =
68 kVideoTrackSizeOffset + kVideoTrackSizeWidth;
70 const int kVideoTrackNum = 1;
71 const int kAudioTrackNum = 2;
72 const int kTextTrackNum = 3;
73 const int kAlternateTextTrackNum = 4;
75 const int kAudioBlockDuration = 23;
76 const int kVideoBlockDuration = 33;
77 const int kTextBlockDuration = 100;
78 const int kBlockSize = 10;
80 const char kSourceId[] = "SourceId";
81 const char kDefaultFirstClusterRange[] = "{ [0,46) }";
82 const int kDefaultFirstClusterEndTimestamp = 66;
83 const int kDefaultSecondClusterEndTimestamp = 132;
85 base::TimeDelta kDefaultDuration() {
86 return base::TimeDelta::FromMilliseconds(201224);
89 // Write an integer into buffer in the form of vint that spans 8 bytes.
90 // The data pointed by |buffer| should be at least 8 bytes long.
91 // |number| should be in the range 0 <= number < 0x00FFFFFFFFFFFFFF.
92 static void WriteInt64(uint8* buffer, int64 number) {
93 DCHECK(number >= 0 && number < 0x00FFFFFFFFFFFFFFLL);
96 for (int i = 7; i > 0; i--) {
97 buffer[i] = tmp & 0xff;
102 MATCHER_P(HasTimestamp, timestamp_in_ms, "") {
103 return arg.get() && !arg->end_of_stream() &&
104 arg->timestamp().InMilliseconds() == timestamp_in_ms;
107 MATCHER(IsEndOfStream, "") { return arg.get() && arg->end_of_stream(); }
109 static void OnReadDone(const base::TimeDelta& expected_time,
111 DemuxerStream::Status status,
112 const scoped_refptr<DecoderBuffer>& buffer) {
113 EXPECT_EQ(status, DemuxerStream::kOk);
114 EXPECT_EQ(expected_time, buffer->timestamp());
118 static void OnReadDone_AbortExpected(
119 bool* called, DemuxerStream::Status status,
120 const scoped_refptr<DecoderBuffer>& buffer) {
121 EXPECT_EQ(status, DemuxerStream::kAborted);
122 EXPECT_EQ(NULL, buffer.get());
126 static void OnReadDone_EOSExpected(bool* called,
127 DemuxerStream::Status status,
128 const scoped_refptr<DecoderBuffer>& buffer) {
129 EXPECT_EQ(status, DemuxerStream::kOk);
130 EXPECT_TRUE(buffer->end_of_stream());
134 static void OnSeekDone_OKExpected(bool* called, PipelineStatus status) {
135 EXPECT_EQ(status, PIPELINE_OK);
139 static void LogFunc(const std::string& str) { DVLOG(1) << str; }
141 class ChunkDemuxerTest : public ::testing::Test {
149 // Default cluster to append first for simple tests.
150 scoped_ptr<Cluster> kDefaultFirstCluster() {
151 return GenerateCluster(0, 4);
154 // Default cluster to append after kDefaultFirstCluster()
155 // has been appended. This cluster starts with blocks that
156 // have timestamps consistent with the end times of the blocks
157 // in kDefaultFirstCluster() so that these two clusters represent
158 // a continuous region.
159 scoped_ptr<Cluster> kDefaultSecondCluster() {
160 return GenerateCluster(46, 66, 5);
164 : append_window_end_for_next_append_(kInfiniteDuration()) {
168 void CreateNewDemuxer() {
169 base::Closure open_cb =
170 base::Bind(&ChunkDemuxerTest::DemuxerOpened, base::Unretained(this));
171 Demuxer::NeedKeyCB need_key_cb =
172 base::Bind(&ChunkDemuxerTest::DemuxerNeedKey, base::Unretained(this));
174 new ChunkDemuxer(open_cb, need_key_cb, base::Bind(&LogFunc), true));
177 virtual ~ChunkDemuxerTest() {
181 void CreateInitSegment(int stream_flags,
182 bool is_audio_encrypted,
183 bool is_video_encrypted,
184 scoped_ptr<uint8[]>* buffer,
186 CreateInitSegmentInternal(
187 stream_flags, is_audio_encrypted, is_video_encrypted, buffer, false,
191 void CreateInitSegmentWithAlternateTextTrackNum(int stream_flags,
192 bool is_audio_encrypted,
193 bool is_video_encrypted,
194 scoped_ptr<uint8[]>* buffer,
196 DCHECK(stream_flags & HAS_TEXT);
197 CreateInitSegmentInternal(
198 stream_flags, is_audio_encrypted, is_video_encrypted, buffer, true,
202 void CreateInitSegmentInternal(int stream_flags,
203 bool is_audio_encrypted,
204 bool is_video_encrypted,
205 scoped_ptr<uint8[]>* buffer,
206 bool use_alternate_text_track_id,
208 bool has_audio = (stream_flags & HAS_AUDIO) != 0;
209 bool has_video = (stream_flags & HAS_VIDEO) != 0;
210 bool has_text = (stream_flags & HAS_TEXT) != 0;
211 scoped_refptr<DecoderBuffer> ebml_header;
212 scoped_refptr<DecoderBuffer> info;
213 scoped_refptr<DecoderBuffer> audio_track_entry;
214 scoped_refptr<DecoderBuffer> video_track_entry;
215 scoped_refptr<DecoderBuffer> audio_content_encodings;
216 scoped_refptr<DecoderBuffer> video_content_encodings;
217 scoped_refptr<DecoderBuffer> text_track_entry;
219 ebml_header = ReadTestDataFile("webm_ebml_element");
221 info = ReadTestDataFile("webm_info_element");
223 int tracks_element_size = 0;
226 audio_track_entry = ReadTestDataFile("webm_vorbis_track_entry");
227 tracks_element_size += audio_track_entry->data_size();
228 if (is_audio_encrypted) {
229 audio_content_encodings = ReadTestDataFile("webm_content_encodings");
230 tracks_element_size += audio_content_encodings->data_size();
235 video_track_entry = ReadTestDataFile("webm_vp8_track_entry");
236 tracks_element_size += video_track_entry->data_size();
237 if (is_video_encrypted) {
238 video_content_encodings = ReadTestDataFile("webm_content_encodings");
239 tracks_element_size += video_content_encodings->data_size();
244 // TODO(matthewjheaney): create an abstraction to do
245 // this (http://crbug/321454).
246 // We need it to also handle the creation of multiple text tracks.
248 // This is the track entry for a text track,
249 // TrackEntry [AE], size=30
250 // TrackNum [D7], size=1, val=3 (or 4 if use_alternate_text_track_id)
251 // TrackUID [73] [C5], size=1, value=3 (must remain constant for same
252 // track, even if TrackNum changes)
253 // TrackType [83], size=1, val=0x11
254 // CodecId [86], size=18, val="D_WEBVTT/SUBTITLES"
255 char str[] = "\xAE\x9E\xD7\x81\x03\x73\xC5\x81\x03"
256 "\x83\x81\x11\x86\x92"
257 "D_WEBVTT/SUBTITLES";
258 DCHECK_EQ(str[4], kTextTrackNum);
259 if (use_alternate_text_track_id)
260 str[4] = kAlternateTextTrackNum;
262 const int len = strlen(str);
264 const uint8* const buf = reinterpret_cast<const uint8*>(str);
265 text_track_entry = DecoderBuffer::CopyFrom(buf, len);
266 tracks_element_size += text_track_entry->data_size();
269 *size = ebml_header->data_size() + info->data_size() +
270 kTracksHeaderSize + tracks_element_size;
272 buffer->reset(new uint8[*size]);
274 uint8* buf = buffer->get();
275 memcpy(buf, ebml_header->data(), ebml_header->data_size());
276 buf += ebml_header->data_size();
278 memcpy(buf, info->data(), info->data_size());
279 buf += info->data_size();
281 memcpy(buf, kTracksHeader, kTracksHeaderSize);
282 WriteInt64(buf + kTracksSizeOffset, tracks_element_size);
283 buf += kTracksHeaderSize;
285 // TODO(xhwang): Simplify this! Probably have test data files that contain
286 // ContentEncodings directly instead of trying to create one at run-time.
288 memcpy(buf, audio_track_entry->data(),
289 audio_track_entry->data_size());
290 if (is_audio_encrypted) {
291 memcpy(buf + audio_track_entry->data_size(),
292 audio_content_encodings->data(),
293 audio_content_encodings->data_size());
294 WriteInt64(buf + kAudioTrackSizeOffset,
295 audio_track_entry->data_size() +
296 audio_content_encodings->data_size() -
297 kAudioTrackEntryHeaderSize);
298 buf += audio_content_encodings->data_size();
300 buf += audio_track_entry->data_size();
304 memcpy(buf, video_track_entry->data(),
305 video_track_entry->data_size());
306 if (is_video_encrypted) {
307 memcpy(buf + video_track_entry->data_size(),
308 video_content_encodings->data(),
309 video_content_encodings->data_size());
310 WriteInt64(buf + kVideoTrackSizeOffset,
311 video_track_entry->data_size() +
312 video_content_encodings->data_size() -
313 kVideoTrackEntryHeaderSize);
314 buf += video_content_encodings->data_size();
316 buf += video_track_entry->data_size();
320 memcpy(buf, text_track_entry->data(),
321 text_track_entry->data_size());
322 buf += text_track_entry->data_size();
326 ChunkDemuxer::Status AddId() {
327 return AddId(kSourceId, HAS_AUDIO | HAS_VIDEO);
330 ChunkDemuxer::Status AddId(const std::string& source_id, int stream_flags) {
331 bool has_audio = (stream_flags & HAS_AUDIO) != 0;
332 bool has_video = (stream_flags & HAS_VIDEO) != 0;
333 std::vector<std::string> codecs;
337 codecs.push_back("vorbis");
342 codecs.push_back("vp8");
346 if (!has_audio && !has_video) {
347 return AddId(kSourceId, HAS_AUDIO | HAS_VIDEO);
350 return demuxer_->AddId(source_id, type, codecs);
353 ChunkDemuxer::Status AddIdForMp2tSource(const std::string& source_id) {
354 std::vector<std::string> codecs;
355 std::string type = "video/mp2t";
356 codecs.push_back("mp4a.40.2");
357 codecs.push_back("avc1.640028");
358 return demuxer_->AddId(source_id, type, codecs);
361 void AppendData(const uint8* data, size_t length) {
362 AppendData(kSourceId, data, length);
365 void AppendCluster(const std::string& source_id,
366 scoped_ptr<Cluster> cluster) {
367 AppendData(source_id, cluster->data(), cluster->size());
370 void AppendCluster(scoped_ptr<Cluster> cluster) {
371 AppendCluster(kSourceId, cluster.Pass());
374 void AppendCluster(int timecode, int block_count) {
375 AppendCluster(GenerateCluster(timecode, block_count));
378 void AppendSingleStreamCluster(const std::string& source_id, int track_number,
379 int timecode, int block_count) {
380 int block_duration = 0;
381 switch (track_number) {
383 block_duration = kVideoBlockDuration;
386 block_duration = kAudioBlockDuration;
388 case kTextTrackNum: // Fall-through.
389 case kAlternateTextTrackNum:
390 block_duration = kTextBlockDuration;
393 ASSERT_NE(block_duration, 0);
394 int end_timecode = timecode + block_count * block_duration;
395 AppendCluster(source_id,
396 GenerateSingleStreamCluster(
397 timecode, end_timecode, track_number, block_duration));
408 BlockInfo(int tn, int ts, int f, int d)
420 bool operator< (const BlockInfo& rhs) const {
421 return timestamp_in_ms < rhs.timestamp_in_ms;
425 // |track_number| - The track number to place in
426 // |block_descriptions| - A space delimited string of block info that
427 // is used to populate |blocks|. Each block info has a timestamp in
428 // milliseconds and optionally followed by a 'K' to indicate that a block
429 // should be marked as a keyframe. For example "0K 30 60" should populate
430 // |blocks| with 3 BlockInfo objects: a keyframe with timestamp 0 and 2
431 // non-keyframes at 30ms and 60ms.
432 void ParseBlockDescriptions(int track_number,
433 const std::string block_descriptions,
434 std::vector<BlockInfo>* blocks) {
435 std::vector<std::string> timestamps;
436 base::SplitString(block_descriptions, ' ', ×tamps);
438 for (size_t i = 0; i < timestamps.size(); ++i) {
439 std::string timestamp_str = timestamps[i];
440 BlockInfo block_info;
441 block_info.track_number = track_number;
442 block_info.flags = 0;
443 block_info.duration = 0;
445 if (EndsWith(timestamp_str, "K", true)) {
446 block_info.flags = kWebMFlagKeyframe;
447 // Remove the "K" off of the token.
448 timestamp_str = timestamp_str.substr(0, timestamps[i].length() - 1);
450 CHECK(base::StringToInt(timestamp_str, &block_info.timestamp_in_ms));
452 if (track_number == kTextTrackNum ||
453 track_number == kAlternateTextTrackNum) {
454 block_info.duration = kTextBlockDuration;
455 ASSERT_EQ(kWebMFlagKeyframe, block_info.flags)
456 << "Text block with timestamp " << block_info.timestamp_in_ms
457 << " was not marked as a keyframe."
458 << " All text blocks must be keyframes";
461 if (track_number == kAudioTrackNum)
462 ASSERT_TRUE(block_info.flags & kWebMFlagKeyframe);
464 blocks->push_back(block_info);
468 scoped_ptr<Cluster> GenerateCluster(const std::vector<BlockInfo>& blocks,
470 DCHECK_GT(blocks.size(), 0u);
473 std::vector<uint8> data(10);
474 for (size_t i = 0; i < blocks.size(); ++i) {
476 cb.SetClusterTimecode(blocks[i].timestamp_in_ms);
478 if (blocks[i].duration) {
479 if (blocks[i].track_number == kVideoTrackNum) {
480 AddVideoBlockGroup(&cb,
481 blocks[i].track_number, blocks[i].timestamp_in_ms,
482 blocks[i].duration, blocks[i].flags);
484 cb.AddBlockGroup(blocks[i].track_number, blocks[i].timestamp_in_ms,
485 blocks[i].duration, blocks[i].flags,
486 &data[0], data.size());
489 cb.AddSimpleBlock(blocks[i].track_number, blocks[i].timestamp_in_ms,
491 &data[0], data.size());
495 return unknown_size ? cb.FinishWithUnknownSize() : cb.Finish();
498 scoped_ptr<Cluster> GenerateCluster(
499 std::priority_queue<BlockInfo> block_queue,
501 std::vector<BlockInfo> blocks(block_queue.size());
502 for (size_t i = block_queue.size() - 1; !block_queue.empty(); --i) {
503 blocks[i] = block_queue.top();
507 return GenerateCluster(blocks, unknown_size);
510 // |block_descriptions| - The block descriptions used to construct the
511 // cluster. See the documentation for ParseBlockDescriptions() for details on
512 // the string format.
513 void AppendSingleStreamCluster(const std::string& source_id, int track_number,
514 const std::string& block_descriptions) {
515 std::vector<BlockInfo> blocks;
516 ParseBlockDescriptions(track_number, block_descriptions, &blocks);
517 AppendCluster(source_id, GenerateCluster(blocks, false));
520 struct MuxedStreamInfo {
523 block_descriptions("")
526 MuxedStreamInfo(int track_num, const char* block_desc)
527 : track_number(track_num),
528 block_descriptions(block_desc) {
532 // The block description passed to ParseBlockDescriptions().
533 // See the documentation for that method for details on the string format.
534 const char* block_descriptions;
537 void AppendMuxedCluster(const MuxedStreamInfo& msi_1,
538 const MuxedStreamInfo& msi_2) {
539 std::vector<MuxedStreamInfo> msi(2);
542 AppendMuxedCluster(msi);
545 void AppendMuxedCluster(const MuxedStreamInfo& msi_1,
546 const MuxedStreamInfo& msi_2,
547 const MuxedStreamInfo& msi_3) {
548 std::vector<MuxedStreamInfo> msi(3);
552 AppendMuxedCluster(msi);
555 void AppendMuxedCluster(const std::vector<MuxedStreamInfo> msi) {
556 std::priority_queue<BlockInfo> block_queue;
557 for (size_t i = 0; i < msi.size(); ++i) {
558 std::vector<BlockInfo> track_blocks;
559 ParseBlockDescriptions(msi[i].track_number, msi[i].block_descriptions,
562 for (size_t j = 0; j < track_blocks.size(); ++j)
563 block_queue.push(track_blocks[j]);
566 AppendCluster(kSourceId, GenerateCluster(block_queue, false));
569 void AppendData(const std::string& source_id,
570 const uint8* data, size_t length) {
571 EXPECT_CALL(host_, AddBufferedTimeRange(_, _)).Times(AnyNumber());
573 demuxer_->AppendData(source_id, data, length,
574 append_window_start_for_next_append_,
575 append_window_end_for_next_append_,
576 ×tamp_offset_map_[source_id]);
579 void AppendDataInPieces(const uint8* data, size_t length) {
580 AppendDataInPieces(data, length, 7);
583 void AppendDataInPieces(const uint8* data, size_t length, size_t piece_size) {
584 const uint8* start = data;
585 const uint8* end = data + length;
586 while (start < end) {
587 size_t append_size = std::min(piece_size,
588 static_cast<size_t>(end - start));
589 AppendData(start, append_size);
590 start += append_size;
594 void AppendInitSegment(int stream_flags) {
595 AppendInitSegmentWithSourceId(kSourceId, stream_flags);
598 void AppendInitSegmentWithSourceId(const std::string& source_id,
600 AppendInitSegmentWithEncryptedInfo(source_id, stream_flags, false, false);
603 void AppendInitSegmentWithEncryptedInfo(const std::string& source_id,
605 bool is_audio_encrypted,
606 bool is_video_encrypted) {
607 scoped_ptr<uint8[]> info_tracks;
608 int info_tracks_size = 0;
609 CreateInitSegment(stream_flags,
610 is_audio_encrypted, is_video_encrypted,
611 &info_tracks, &info_tracks_size);
612 AppendData(source_id, info_tracks.get(), info_tracks_size);
615 void AppendGarbage() {
616 // Fill up an array with gibberish.
617 int garbage_cluster_size = 10;
618 scoped_ptr<uint8[]> garbage_cluster(new uint8[garbage_cluster_size]);
619 for (int i = 0; i < garbage_cluster_size; ++i)
620 garbage_cluster[i] = i;
621 AppendData(garbage_cluster.get(), garbage_cluster_size);
624 void InitDoneCalled(PipelineStatus expected_status,
625 PipelineStatus status) {
626 EXPECT_EQ(status, expected_status);
629 void AppendEmptyCluster(int timecode) {
630 AppendCluster(GenerateEmptyCluster(timecode));
633 PipelineStatusCB CreateInitDoneCB(const base::TimeDelta& expected_duration,
634 PipelineStatus expected_status) {
635 if (expected_duration != kNoTimestamp())
636 EXPECT_CALL(host_, SetDuration(expected_duration));
637 return CreateInitDoneCB(expected_status);
640 PipelineStatusCB CreateInitDoneCB(PipelineStatus expected_status) {
641 return base::Bind(&ChunkDemuxerTest::InitDoneCalled,
642 base::Unretained(this),
652 bool InitDemuxer(int stream_flags) {
653 return InitDemuxerWithEncryptionInfo(stream_flags, false, false);
656 bool InitDemuxerWithEncryptionInfo(
657 int stream_flags, bool is_audio_encrypted, bool is_video_encrypted) {
659 PipelineStatus expected_status =
660 (stream_flags != 0) ? PIPELINE_OK : DEMUXER_ERROR_COULD_NOT_OPEN;
662 base::TimeDelta expected_duration = kNoTimestamp();
663 if (expected_status == PIPELINE_OK)
664 expected_duration = kDefaultDuration();
666 EXPECT_CALL(*this, DemuxerOpened());
667 demuxer_->Initialize(
668 &host_, CreateInitDoneCB(expected_duration, expected_status), true);
670 if (AddId(kSourceId, stream_flags) != ChunkDemuxer::kOk)
673 AppendInitSegmentWithEncryptedInfo(
674 kSourceId, stream_flags,
675 is_audio_encrypted, is_video_encrypted);
679 bool InitDemuxerAudioAndVideoSourcesText(const std::string& audio_id,
680 const std::string& video_id,
682 EXPECT_CALL(*this, DemuxerOpened());
683 demuxer_->Initialize(
684 &host_, CreateInitDoneCB(kDefaultDuration(), PIPELINE_OK), true);
686 if (AddId(audio_id, HAS_AUDIO) != ChunkDemuxer::kOk)
688 if (AddId(video_id, HAS_VIDEO) != ChunkDemuxer::kOk)
691 int audio_flags = HAS_AUDIO;
692 int video_flags = HAS_VIDEO;
695 audio_flags |= HAS_TEXT;
696 video_flags |= HAS_TEXT;
699 AppendInitSegmentWithSourceId(audio_id, audio_flags);
700 AppendInitSegmentWithSourceId(video_id, video_flags);
704 bool InitDemuxerAudioAndVideoSources(const std::string& audio_id,
705 const std::string& video_id) {
706 return InitDemuxerAudioAndVideoSourcesText(audio_id, video_id, false);
709 // Initializes the demuxer with data from 2 files with different
710 // decoder configurations. This is used to test the decoder config change
713 // bear-320x240.webm VideoDecoderConfig returns 320x240 for its natural_size()
714 // bear-640x360.webm VideoDecoderConfig returns 640x360 for its natural_size()
715 // The resulting video stream returns data from each file for the following
717 // bear-320x240.webm : [0-501) [801-2736)
718 // bear-640x360.webm : [527-793)
720 // bear-320x240.webm AudioDecoderConfig returns 3863 for its extra_data_size()
721 // bear-640x360.webm AudioDecoderConfig returns 3935 for its extra_data_size()
722 // The resulting audio stream returns data from each file for the following
724 // bear-320x240.webm : [0-524) [779-2736)
725 // bear-640x360.webm : [527-759)
726 bool InitDemuxerWithConfigChangeData() {
727 scoped_refptr<DecoderBuffer> bear1 = ReadTestDataFile("bear-320x240.webm");
728 scoped_refptr<DecoderBuffer> bear2 = ReadTestDataFile("bear-640x360.webm");
730 EXPECT_CALL(*this, DemuxerOpened());
732 demuxer_->Initialize(
733 &host_, CreateInitDoneCB(base::TimeDelta::FromMilliseconds(2744),
736 if (AddId(kSourceId, HAS_AUDIO | HAS_VIDEO) != ChunkDemuxer::kOk)
739 // Append the whole bear1 file.
740 // TODO(wolenetz/acolwell): Remove this extra SetDuration expectation once
741 // the files are fixed to have the correct duration in their init segments,
742 // and the CreateInitDoneCB() call, above, is fixed to used that duration.
743 // See http://crbug.com/354284.
744 EXPECT_CALL(host_, SetDuration(base::TimeDelta::FromMilliseconds(2746)));
745 AppendData(bear1->data(), bear1->data_size());
746 // Last audio frame has timestamp 2721 and duration 24 (estimated from max
747 // seen so far for audio track).
748 // Last video frame has timestamp 2703 and duration 33 (from TrackEntry
749 // DefaultDuration for video track).
750 CheckExpectedRanges(kSourceId, "{ [0,2736) }");
752 // Append initialization segment for bear2.
753 // Note: Offsets here and below are derived from
754 // media/test/data/bear-640x360-manifest.js and
755 // media/test/data/bear-320x240-manifest.js which were
756 // generated from media/test/data/bear-640x360.webm and
757 // media/test/data/bear-320x240.webm respectively.
758 AppendData(bear2->data(), 4340);
760 // Append a media segment that goes from [0.527000, 1.014000).
761 AppendData(bear2->data() + 55290, 18785);
762 CheckExpectedRanges(kSourceId, "{ [0,1027) [1201,2736) }");
764 // Append initialization segment for bear1 & fill gap with [779-1197)
766 AppendData(bear1->data(), 4370);
767 AppendData(bear1->data() + 72737, 28183);
768 CheckExpectedRanges(kSourceId, "{ [0,2736) }");
770 MarkEndOfStream(PIPELINE_OK);
774 void ShutdownDemuxer() {
776 demuxer_->Shutdown();
777 message_loop_.RunUntilIdle();
781 void AddSimpleBlock(ClusterBuilder* cb, int track_num, int64 timecode) {
782 uint8 data[] = { 0x00 };
783 cb->AddSimpleBlock(track_num, timecode, 0, data, sizeof(data));
786 scoped_ptr<Cluster> GenerateCluster(int timecode, int block_count) {
787 return GenerateCluster(timecode, timecode, block_count);
790 void AddVideoBlockGroup(ClusterBuilder* cb, int track_num, int64 timecode,
791 int duration, int flags) {
793 (flags & kWebMFlagKeyframe) != 0 ? kVP8Keyframe : kVP8Interframe;
794 int size = (flags & kWebMFlagKeyframe) != 0 ? sizeof(kVP8Keyframe) :
795 sizeof(kVP8Interframe);
796 cb->AddBlockGroup(track_num, timecode, duration, flags, data, size);
799 scoped_ptr<Cluster> GenerateCluster(int first_audio_timecode,
800 int first_video_timecode,
802 return GenerateCluster(first_audio_timecode, first_video_timecode,
805 scoped_ptr<Cluster> GenerateCluster(int first_audio_timecode,
806 int first_video_timecode,
809 CHECK_GT(block_count, 0);
811 std::priority_queue<BlockInfo> block_queue;
813 if (block_count == 1) {
814 block_queue.push(BlockInfo(kAudioTrackNum,
815 first_audio_timecode,
817 kAudioBlockDuration));
818 return GenerateCluster(block_queue, unknown_size);
821 int audio_timecode = first_audio_timecode;
822 int video_timecode = first_video_timecode;
824 // Create simple blocks for everything except the last 2 blocks.
825 // The first video frame must be a keyframe.
826 uint8 video_flag = kWebMFlagKeyframe;
827 for (int i = 0; i < block_count - 2; i++) {
828 if (audio_timecode <= video_timecode) {
829 block_queue.push(BlockInfo(kAudioTrackNum,
833 audio_timecode += kAudioBlockDuration;
837 block_queue.push(BlockInfo(kVideoTrackNum,
841 video_timecode += kVideoBlockDuration;
845 // Make the last 2 blocks BlockGroups so that they don't get delayed by the
846 // block duration calculation logic.
847 block_queue.push(BlockInfo(kAudioTrackNum,
850 kAudioBlockDuration));
851 block_queue.push(BlockInfo(kVideoTrackNum,
854 kVideoBlockDuration));
856 return GenerateCluster(block_queue, unknown_size);
859 scoped_ptr<Cluster> GenerateSingleStreamCluster(int timecode,
862 int block_duration) {
863 CHECK_GT(end_timecode, timecode);
865 std::vector<uint8> data(kBlockSize);
868 cb.SetClusterTimecode(timecode);
870 // Create simple blocks for everything except the last block.
871 while (timecode < (end_timecode - block_duration)) {
872 cb.AddSimpleBlock(track_number, timecode, kWebMFlagKeyframe,
873 &data[0], data.size());
874 timecode += block_duration;
877 if (track_number == kVideoTrackNum) {
878 AddVideoBlockGroup(&cb, track_number, timecode, block_duration,
881 cb.AddBlockGroup(track_number, timecode, block_duration,
882 kWebMFlagKeyframe, &data[0], data.size());
888 void Read(DemuxerStream::Type type, const DemuxerStream::ReadCB& read_cb) {
889 demuxer_->GetStream(type)->Read(read_cb);
890 message_loop_.RunUntilIdle();
893 void ReadAudio(const DemuxerStream::ReadCB& read_cb) {
894 Read(DemuxerStream::AUDIO, read_cb);
897 void ReadVideo(const DemuxerStream::ReadCB& read_cb) {
898 Read(DemuxerStream::VIDEO, read_cb);
901 void GenerateExpectedReads(int timecode, int block_count) {
902 GenerateExpectedReads(timecode, timecode, block_count);
905 void GenerateExpectedReads(int start_audio_timecode,
906 int start_video_timecode,
908 CHECK_GT(block_count, 0);
910 if (block_count == 1) {
911 ExpectRead(DemuxerStream::AUDIO, start_audio_timecode);
915 int audio_timecode = start_audio_timecode;
916 int video_timecode = start_video_timecode;
918 for (int i = 0; i < block_count; i++) {
919 if (audio_timecode <= video_timecode) {
920 ExpectRead(DemuxerStream::AUDIO, audio_timecode);
921 audio_timecode += kAudioBlockDuration;
925 ExpectRead(DemuxerStream::VIDEO, video_timecode);
926 video_timecode += kVideoBlockDuration;
930 void GenerateSingleStreamExpectedReads(int timecode,
932 DemuxerStream::Type type,
933 int block_duration) {
934 CHECK_GT(block_count, 0);
935 int stream_timecode = timecode;
937 for (int i = 0; i < block_count; i++) {
938 ExpectRead(type, stream_timecode);
939 stream_timecode += block_duration;
943 void GenerateAudioStreamExpectedReads(int timecode, int block_count) {
944 GenerateSingleStreamExpectedReads(
945 timecode, block_count, DemuxerStream::AUDIO, kAudioBlockDuration);
948 void GenerateVideoStreamExpectedReads(int timecode, int block_count) {
949 GenerateSingleStreamExpectedReads(
950 timecode, block_count, DemuxerStream::VIDEO, kVideoBlockDuration);
953 scoped_ptr<Cluster> GenerateEmptyCluster(int timecode) {
955 cb.SetClusterTimecode(timecode);
959 void CheckExpectedRanges(const std::string& expected) {
960 CheckExpectedRanges(kSourceId, expected);
963 void CheckExpectedRanges(const std::string& id,
964 const std::string& expected) {
965 Ranges<base::TimeDelta> r = demuxer_->GetBufferedRanges(id);
967 std::stringstream ss;
969 for (size_t i = 0; i < r.size(); ++i) {
970 ss << "[" << r.start(i).InMilliseconds() << ","
971 << r.end(i).InMilliseconds() << ") ";
974 EXPECT_EQ(expected, ss.str());
977 MOCK_METHOD2(ReadDone, void(DemuxerStream::Status status,
978 const scoped_refptr<DecoderBuffer>&));
980 void StoreStatusAndBuffer(DemuxerStream::Status* status_out,
981 scoped_refptr<DecoderBuffer>* buffer_out,
982 DemuxerStream::Status status,
983 const scoped_refptr<DecoderBuffer>& buffer) {
984 *status_out = status;
985 *buffer_out = buffer;
988 void ReadUntilNotOkOrEndOfStream(DemuxerStream::Type type,
989 DemuxerStream::Status* status,
990 base::TimeDelta* last_timestamp) {
991 DemuxerStream* stream = demuxer_->GetStream(type);
992 scoped_refptr<DecoderBuffer> buffer;
994 *last_timestamp = kNoTimestamp();
996 stream->Read(base::Bind(&ChunkDemuxerTest::StoreStatusAndBuffer,
997 base::Unretained(this), status, &buffer));
998 base::MessageLoop::current()->RunUntilIdle();
999 if (*status == DemuxerStream::kOk && !buffer->end_of_stream())
1000 *last_timestamp = buffer->timestamp();
1001 } while (*status == DemuxerStream::kOk && !buffer->end_of_stream());
1004 void ExpectEndOfStream(DemuxerStream::Type type) {
1005 EXPECT_CALL(*this, ReadDone(DemuxerStream::kOk, IsEndOfStream()));
1006 demuxer_->GetStream(type)->Read(base::Bind(
1007 &ChunkDemuxerTest::ReadDone, base::Unretained(this)));
1008 message_loop_.RunUntilIdle();
1011 void ExpectRead(DemuxerStream::Type type, int64 timestamp_in_ms) {
1012 EXPECT_CALL(*this, ReadDone(DemuxerStream::kOk,
1013 HasTimestamp(timestamp_in_ms)));
1014 demuxer_->GetStream(type)->Read(base::Bind(
1015 &ChunkDemuxerTest::ReadDone, base::Unretained(this)));
1016 message_loop_.RunUntilIdle();
1019 void ExpectConfigChanged(DemuxerStream::Type type) {
1020 EXPECT_CALL(*this, ReadDone(DemuxerStream::kConfigChanged, _));
1021 demuxer_->GetStream(type)->Read(base::Bind(
1022 &ChunkDemuxerTest::ReadDone, base::Unretained(this)));
1023 message_loop_.RunUntilIdle();
1026 void CheckExpectedBuffers(DemuxerStream* stream,
1027 const std::string& expected) {
1028 std::vector<std::string> timestamps;
1029 base::SplitString(expected, ' ', ×tamps);
1030 std::stringstream ss;
1031 for (size_t i = 0; i < timestamps.size(); ++i) {
1032 // Initialize status to kAborted since it's possible for Read() to return
1033 // without calling StoreStatusAndBuffer() if it doesn't have any buffers
1035 DemuxerStream::Status status = DemuxerStream::kAborted;
1036 scoped_refptr<DecoderBuffer> buffer;
1037 stream->Read(base::Bind(&ChunkDemuxerTest::StoreStatusAndBuffer,
1038 base::Unretained(this), &status, &buffer));
1039 base::MessageLoop::current()->RunUntilIdle();
1040 if (status != DemuxerStream::kOk || buffer->end_of_stream())
1045 ss << buffer->timestamp().InMilliseconds();
1047 // Handle preroll buffers.
1048 if (EndsWith(timestamps[i], "P", true)) {
1049 ASSERT_EQ(kInfiniteDuration(), buffer->discard_padding().first);
1050 ASSERT_EQ(base::TimeDelta(), buffer->discard_padding().second);
1054 EXPECT_EQ(expected, ss.str());
1057 MOCK_METHOD1(Checkpoint, void(int id));
1059 struct BufferTimestamps {
1063 static const int kSkip = -1;
1065 // Test parsing a WebM file.
1066 // |filename| - The name of the file in media/test/data to parse.
1067 // |timestamps| - The expected timestamps on the parsed buffers.
1068 // a timestamp of kSkip indicates that a Read() call for that stream
1069 // shouldn't be made on that iteration of the loop. If both streams have
1070 // a kSkip then the loop will terminate.
1071 bool ParseWebMFile(const std::string& filename,
1072 const BufferTimestamps* timestamps,
1073 const base::TimeDelta& duration) {
1074 return ParseWebMFile(filename, timestamps, duration, HAS_AUDIO | HAS_VIDEO);
1077 bool ParseWebMFile(const std::string& filename,
1078 const BufferTimestamps* timestamps,
1079 const base::TimeDelta& duration,
1081 EXPECT_CALL(*this, DemuxerOpened());
1082 demuxer_->Initialize(
1083 &host_, CreateInitDoneCB(duration, PIPELINE_OK), true);
1085 if (AddId(kSourceId, stream_flags) != ChunkDemuxer::kOk)
1088 // Read a WebM file into memory and send the data to the demuxer.
1089 scoped_refptr<DecoderBuffer> buffer = ReadTestDataFile(filename);
1090 AppendDataInPieces(buffer->data(), buffer->data_size(), 512);
1092 // Verify that the timestamps on the first few packets match what we
1095 (timestamps[i].audio_time_ms != kSkip ||
1096 timestamps[i].video_time_ms != kSkip);
1098 bool audio_read_done = false;
1099 bool video_read_done = false;
1101 if (timestamps[i].audio_time_ms != kSkip) {
1102 ReadAudio(base::Bind(&OnReadDone,
1103 base::TimeDelta::FromMilliseconds(
1104 timestamps[i].audio_time_ms),
1106 EXPECT_TRUE(audio_read_done);
1109 if (timestamps[i].video_time_ms != kSkip) {
1110 ReadVideo(base::Bind(&OnReadDone,
1111 base::TimeDelta::FromMilliseconds(
1112 timestamps[i].video_time_ms),
1114 EXPECT_TRUE(video_read_done);
1121 MOCK_METHOD0(DemuxerOpened, void());
1122 // TODO(xhwang): This is a workaround of the issue that move-only parameters
1123 // are not supported in mocked methods. Remove this when the issue is fixed
1124 // (http://code.google.com/p/googletest/issues/detail?id=395) or when we use
1125 // std::string instead of scoped_ptr<uint8[]> (http://crbug.com/130689).
1126 MOCK_METHOD3(NeedKeyMock, void(const std::string& type,
1127 const uint8* init_data, int init_data_size));
1128 void DemuxerNeedKey(const std::string& type,
1129 const std::vector<uint8>& init_data) {
1130 const uint8* init_data_ptr = init_data.empty() ? NULL : &init_data[0];
1131 NeedKeyMock(type, init_data_ptr, init_data.size());
1134 void Seek(base::TimeDelta seek_time) {
1135 demuxer_->StartWaitingForSeek(seek_time);
1136 demuxer_->Seek(seek_time, NewExpectedStatusCB(PIPELINE_OK));
1137 message_loop_.RunUntilIdle();
1140 void MarkEndOfStream(PipelineStatus status) {
1141 demuxer_->MarkEndOfStream(status);
1142 message_loop_.RunUntilIdle();
1145 bool SetTimestampOffset(const std::string& id,
1146 base::TimeDelta timestamp_offset) {
1147 if (demuxer_->IsParsingMediaSegment(id))
1150 timestamp_offset_map_[id] = timestamp_offset;
1154 base::MessageLoop message_loop_;
1155 MockDemuxerHost host_;
1157 scoped_ptr<ChunkDemuxer> demuxer_;
1159 base::TimeDelta append_window_start_for_next_append_;
1160 base::TimeDelta append_window_end_for_next_append_;
1162 // Map of source id to timestamp offset to use for the next AppendData()
1163 // operation for that source id.
1164 std::map<std::string, base::TimeDelta> timestamp_offset_map_;
1167 DISALLOW_COPY_AND_ASSIGN(ChunkDemuxerTest);
1170 TEST_F(ChunkDemuxerTest, Init) {
1171 // Test no streams, audio-only, video-only, and audio & video scenarios.
1172 // Audio and video streams can be encrypted or not encrypted.
1173 for (int i = 0; i < 16; i++) {
1174 bool has_audio = (i & 0x1) != 0;
1175 bool has_video = (i & 0x2) != 0;
1176 bool is_audio_encrypted = (i & 0x4) != 0;
1177 bool is_video_encrypted = (i & 0x8) != 0;
1179 // No test on invalid combination.
1180 if ((!has_audio && is_audio_encrypted) ||
1181 (!has_video && is_video_encrypted)) {
1187 if (is_audio_encrypted || is_video_encrypted) {
1188 int need_key_count = (is_audio_encrypted ? 1 : 0) +
1189 (is_video_encrypted ? 1 : 0);
1190 EXPECT_CALL(*this, NeedKeyMock(kWebMEncryptInitDataType, NotNull(),
1191 DecryptConfig::kDecryptionKeySize))
1192 .Times(Exactly(need_key_count));
1195 int stream_flags = 0;
1197 stream_flags |= HAS_AUDIO;
1200 stream_flags |= HAS_VIDEO;
1202 ASSERT_TRUE(InitDemuxerWithEncryptionInfo(
1203 stream_flags, is_audio_encrypted, is_video_encrypted));
1205 DemuxerStream* audio_stream = demuxer_->GetStream(DemuxerStream::AUDIO);
1207 ASSERT_TRUE(audio_stream);
1209 const AudioDecoderConfig& config = audio_stream->audio_decoder_config();
1210 EXPECT_EQ(kCodecVorbis, config.codec());
1211 EXPECT_EQ(32, config.bits_per_channel());
1212 EXPECT_EQ(CHANNEL_LAYOUT_STEREO, config.channel_layout());
1213 EXPECT_EQ(44100, config.samples_per_second());
1214 EXPECT_TRUE(config.extra_data());
1215 EXPECT_GT(config.extra_data_size(), 0u);
1216 EXPECT_EQ(kSampleFormatPlanarF32, config.sample_format());
1217 EXPECT_EQ(is_audio_encrypted,
1218 audio_stream->audio_decoder_config().is_encrypted());
1219 EXPECT_TRUE(static_cast<ChunkDemuxerStream*>(audio_stream)
1220 ->supports_partial_append_window_trimming());
1222 EXPECT_FALSE(audio_stream);
1225 DemuxerStream* video_stream = demuxer_->GetStream(DemuxerStream::VIDEO);
1227 EXPECT_TRUE(video_stream);
1228 EXPECT_EQ(is_video_encrypted,
1229 video_stream->video_decoder_config().is_encrypted());
1230 EXPECT_FALSE(static_cast<ChunkDemuxerStream*>(video_stream)
1231 ->supports_partial_append_window_trimming());
1233 EXPECT_FALSE(video_stream);
1241 // TODO(acolwell): Fold this test into Init tests since the tests are
1242 // almost identical.
1243 TEST_F(ChunkDemuxerTest, InitText) {
1244 // Test with 1 video stream and 1 text streams, and 0 or 1 audio streams.
1245 // No encryption cases handled here.
1246 bool has_video = true;
1247 bool is_audio_encrypted = false;
1248 bool is_video_encrypted = false;
1249 for (int i = 0; i < 2; i++) {
1250 bool has_audio = (i & 0x1) != 0;
1254 DemuxerStream* text_stream = NULL;
1255 TextTrackConfig text_config;
1256 EXPECT_CALL(host_, AddTextStream(_, _))
1257 .WillOnce(DoAll(SaveArg<0>(&text_stream),
1258 SaveArg<1>(&text_config)));
1260 int stream_flags = HAS_TEXT;
1262 stream_flags |= HAS_AUDIO;
1265 stream_flags |= HAS_VIDEO;
1267 ASSERT_TRUE(InitDemuxerWithEncryptionInfo(
1268 stream_flags, is_audio_encrypted, is_video_encrypted));
1269 ASSERT_TRUE(text_stream);
1270 EXPECT_EQ(DemuxerStream::TEXT, text_stream->type());
1271 EXPECT_EQ(kTextSubtitles, text_config.kind());
1272 EXPECT_FALSE(static_cast<ChunkDemuxerStream*>(text_stream)
1273 ->supports_partial_append_window_trimming());
1275 DemuxerStream* audio_stream = demuxer_->GetStream(DemuxerStream::AUDIO);
1277 ASSERT_TRUE(audio_stream);
1279 const AudioDecoderConfig& config = audio_stream->audio_decoder_config();
1280 EXPECT_EQ(kCodecVorbis, config.codec());
1281 EXPECT_EQ(32, config.bits_per_channel());
1282 EXPECT_EQ(CHANNEL_LAYOUT_STEREO, config.channel_layout());
1283 EXPECT_EQ(44100, config.samples_per_second());
1284 EXPECT_TRUE(config.extra_data());
1285 EXPECT_GT(config.extra_data_size(), 0u);
1286 EXPECT_EQ(kSampleFormatPlanarF32, config.sample_format());
1287 EXPECT_EQ(is_audio_encrypted,
1288 audio_stream->audio_decoder_config().is_encrypted());
1289 EXPECT_TRUE(static_cast<ChunkDemuxerStream*>(audio_stream)
1290 ->supports_partial_append_window_trimming());
1292 EXPECT_FALSE(audio_stream);
1295 DemuxerStream* video_stream = demuxer_->GetStream(DemuxerStream::VIDEO);
1297 EXPECT_TRUE(video_stream);
1298 EXPECT_EQ(is_video_encrypted,
1299 video_stream->video_decoder_config().is_encrypted());
1300 EXPECT_FALSE(static_cast<ChunkDemuxerStream*>(video_stream)
1301 ->supports_partial_append_window_trimming());
1303 EXPECT_FALSE(video_stream);
1311 TEST_F(ChunkDemuxerTest, SingleTextTrackIdChange) {
1312 // Test with 1 video stream, 1 audio, and 1 text stream. Send a second init
1313 // segment in which the text track ID changes. Verify appended buffers before
1314 // and after the second init segment map to the same underlying track buffers.
1316 DemuxerStream* text_stream = NULL;
1317 TextTrackConfig text_config;
1318 EXPECT_CALL(host_, AddTextStream(_, _))
1319 .WillOnce(DoAll(SaveArg<0>(&text_stream),
1320 SaveArg<1>(&text_config)));
1321 ASSERT_TRUE(InitDemuxerWithEncryptionInfo(
1322 HAS_TEXT | HAS_AUDIO | HAS_VIDEO, false, false));
1323 DemuxerStream* audio_stream = demuxer_->GetStream(DemuxerStream::AUDIO);
1324 DemuxerStream* video_stream = demuxer_->GetStream(DemuxerStream::VIDEO);
1325 ASSERT_TRUE(audio_stream);
1326 ASSERT_TRUE(video_stream);
1327 ASSERT_TRUE(text_stream);
1330 MuxedStreamInfo(kAudioTrackNum, "0K 23K"),
1331 MuxedStreamInfo(kVideoTrackNum, "0K 30"),
1332 MuxedStreamInfo(kTextTrackNum, "10K"));
1333 CheckExpectedRanges(kSourceId, "{ [0,46) }");
1335 scoped_ptr<uint8[]> info_tracks;
1336 int info_tracks_size = 0;
1337 CreateInitSegmentWithAlternateTextTrackNum(HAS_TEXT | HAS_AUDIO | HAS_VIDEO,
1339 &info_tracks, &info_tracks_size);
1340 demuxer_->AppendData(kSourceId, info_tracks.get(), info_tracks_size,
1341 append_window_start_for_next_append_,
1342 append_window_end_for_next_append_,
1343 ×tamp_offset_map_[kSourceId]);
1346 MuxedStreamInfo(kAudioTrackNum, "46K 69K"),
1347 MuxedStreamInfo(kVideoTrackNum, "60K"),
1348 MuxedStreamInfo(kAlternateTextTrackNum, "45K"));
1350 CheckExpectedRanges(kSourceId, "{ [0,92) }");
1351 CheckExpectedBuffers(audio_stream, "0 23 46 69");
1352 CheckExpectedBuffers(video_stream, "0 30 60");
1353 CheckExpectedBuffers(text_stream, "10 45");
1358 TEST_F(ChunkDemuxerTest, InitSegmentSetsNeedRandomAccessPointFlag) {
1359 // Tests that non-keyframes following an init segment are allowed
1360 // and dropped, as expected if the initialization segment received
1361 // algorithm correctly sets the needs random access point flag to true for all
1362 // track buffers. Note that the first initialization segment is insufficient
1363 // to fully test this since needs random access point flag initializes to
1366 DemuxerStream* text_stream = NULL;
1367 EXPECT_CALL(host_, AddTextStream(_, _))
1368 .WillOnce(SaveArg<0>(&text_stream));
1369 ASSERT_TRUE(InitDemuxerWithEncryptionInfo(
1370 HAS_TEXT | HAS_AUDIO | HAS_VIDEO, false, false));
1371 DemuxerStream* audio_stream = demuxer_->GetStream(DemuxerStream::AUDIO);
1372 DemuxerStream* video_stream = demuxer_->GetStream(DemuxerStream::VIDEO);
1373 ASSERT_TRUE(audio_stream && video_stream && text_stream);
1376 MuxedStreamInfo(kAudioTrackNum, "23K"),
1377 MuxedStreamInfo(kVideoTrackNum, "0 30K"),
1378 MuxedStreamInfo(kTextTrackNum, "25K 40K"));
1379 CheckExpectedRanges(kSourceId, "{ [23,46) }");
1381 AppendInitSegment(HAS_TEXT | HAS_AUDIO | HAS_VIDEO);
1383 MuxedStreamInfo(kAudioTrackNum, "46K 69K"),
1384 MuxedStreamInfo(kVideoTrackNum, "60 90K"),
1385 MuxedStreamInfo(kTextTrackNum, "80K 90K"));
1386 CheckExpectedRanges(kSourceId, "{ [23,92) }");
1388 CheckExpectedBuffers(audio_stream, "23 46 69");
1389 CheckExpectedBuffers(video_stream, "30 90");
1390 CheckExpectedBuffers(text_stream, "25 40 80 90");
1393 // Make sure that the demuxer reports an error if Shutdown()
1394 // is called before all the initialization segments are appended.
1395 TEST_F(ChunkDemuxerTest, Shutdown_BeforeAllInitSegmentsAppended) {
1396 EXPECT_CALL(*this, DemuxerOpened());
1397 demuxer_->Initialize(
1398 &host_, CreateInitDoneCB(
1399 kDefaultDuration(), DEMUXER_ERROR_COULD_NOT_OPEN), true);
1401 EXPECT_EQ(AddId("audio", HAS_AUDIO), ChunkDemuxer::kOk);
1402 EXPECT_EQ(AddId("video", HAS_VIDEO), ChunkDemuxer::kOk);
1404 AppendInitSegmentWithSourceId("audio", HAS_AUDIO);
1409 TEST_F(ChunkDemuxerTest, Shutdown_BeforeAllInitSegmentsAppendedText) {
1410 EXPECT_CALL(*this, DemuxerOpened());
1411 demuxer_->Initialize(
1412 &host_, CreateInitDoneCB(
1413 kDefaultDuration(), DEMUXER_ERROR_COULD_NOT_OPEN), true);
1415 EXPECT_EQ(AddId("audio", HAS_AUDIO), ChunkDemuxer::kOk);
1416 EXPECT_EQ(AddId("video_and_text", HAS_VIDEO), ChunkDemuxer::kOk);
1418 EXPECT_CALL(host_, AddTextStream(_, _))
1421 AppendInitSegmentWithSourceId("video_and_text", HAS_VIDEO | HAS_TEXT);
1426 // Verifies that all streams waiting for data receive an end of stream
1427 // buffer when Shutdown() is called.
1428 TEST_F(ChunkDemuxerTest, Shutdown_EndOfStreamWhileWaitingForData) {
1429 DemuxerStream* text_stream = NULL;
1430 EXPECT_CALL(host_, AddTextStream(_, _))
1431 .WillOnce(SaveArg<0>(&text_stream));
1432 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO | HAS_TEXT));
1434 DemuxerStream* audio_stream = demuxer_->GetStream(DemuxerStream::AUDIO);
1435 DemuxerStream* video_stream = demuxer_->GetStream(DemuxerStream::VIDEO);
1437 bool audio_read_done = false;
1438 bool video_read_done = false;
1439 bool text_read_done = false;
1440 audio_stream->Read(base::Bind(&OnReadDone_EOSExpected, &audio_read_done));
1441 video_stream->Read(base::Bind(&OnReadDone_EOSExpected, &video_read_done));
1442 text_stream->Read(base::Bind(&OnReadDone_EOSExpected, &text_read_done));
1443 message_loop_.RunUntilIdle();
1445 EXPECT_FALSE(audio_read_done);
1446 EXPECT_FALSE(video_read_done);
1447 EXPECT_FALSE(text_read_done);
1451 EXPECT_TRUE(audio_read_done);
1452 EXPECT_TRUE(video_read_done);
1453 EXPECT_TRUE(text_read_done);
1456 // Test that Seek() completes successfully when the first cluster
1458 TEST_F(ChunkDemuxerTest, AppendDataAfterSeek) {
1459 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
1460 AppendCluster(kDefaultFirstCluster());
1464 EXPECT_CALL(*this, Checkpoint(1));
1466 Seek(base::TimeDelta::FromMilliseconds(46));
1468 EXPECT_CALL(*this, Checkpoint(2));
1472 AppendCluster(kDefaultSecondCluster());
1474 message_loop_.RunUntilIdle();
1479 // Test that parsing errors are handled for clusters appended after init.
1480 TEST_F(ChunkDemuxerTest, ErrorWhileParsingClusterAfterInit) {
1481 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
1482 AppendCluster(kDefaultFirstCluster());
1484 EXPECT_CALL(host_, OnDemuxerError(PIPELINE_ERROR_DECODE));
1488 // Test the case where a Seek() is requested while the parser
1489 // is in the middle of cluster. This is to verify that the parser
1490 // does not reset itself on a seek.
1491 TEST_F(ChunkDemuxerTest, SeekWhileParsingCluster) {
1492 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
1496 scoped_ptr<Cluster> cluster_a(GenerateCluster(0, 6));
1498 // Split the cluster into two appends at an arbitrary point near the end.
1499 int first_append_size = cluster_a->size() - 11;
1500 int second_append_size = cluster_a->size() - first_append_size;
1502 // Append the first part of the cluster.
1503 AppendData(cluster_a->data(), first_append_size);
1505 ExpectRead(DemuxerStream::AUDIO, 0);
1506 ExpectRead(DemuxerStream::VIDEO, 0);
1507 ExpectRead(DemuxerStream::AUDIO, kAudioBlockDuration);
1509 Seek(base::TimeDelta::FromSeconds(5));
1511 // Append the rest of the cluster.
1512 AppendData(cluster_a->data() + first_append_size, second_append_size);
1514 // Append the new cluster and verify that only the blocks
1515 // in the new cluster are returned.
1516 AppendCluster(GenerateCluster(5000, 6));
1517 GenerateExpectedReads(5000, 6);
1520 // Test the case where AppendData() is called before Init().
1521 TEST_F(ChunkDemuxerTest, AppendDataBeforeInit) {
1522 scoped_ptr<uint8[]> info_tracks;
1523 int info_tracks_size = 0;
1524 CreateInitSegment(HAS_AUDIO | HAS_VIDEO,
1525 false, false, &info_tracks, &info_tracks_size);
1526 demuxer_->AppendData(kSourceId, info_tracks.get(), info_tracks_size,
1527 append_window_start_for_next_append_,
1528 append_window_end_for_next_append_,
1529 ×tamp_offset_map_[kSourceId]);
1532 // Make sure Read() callbacks are dispatched with the proper data.
1533 TEST_F(ChunkDemuxerTest, Read) {
1534 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
1536 AppendCluster(kDefaultFirstCluster());
1538 bool audio_read_done = false;
1539 bool video_read_done = false;
1540 ReadAudio(base::Bind(&OnReadDone,
1541 base::TimeDelta::FromMilliseconds(0),
1543 ReadVideo(base::Bind(&OnReadDone,
1544 base::TimeDelta::FromMilliseconds(0),
1547 EXPECT_TRUE(audio_read_done);
1548 EXPECT_TRUE(video_read_done);
1551 TEST_F(ChunkDemuxerTest, OutOfOrderClusters) {
1552 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
1553 AppendCluster(kDefaultFirstCluster());
1554 AppendCluster(GenerateCluster(10, 4));
1556 // Make sure that AppendCluster() does not fail with a cluster that has
1557 // overlaps with the previously appended cluster.
1558 AppendCluster(GenerateCluster(5, 4));
1560 // Verify that AppendData() can still accept more data.
1561 scoped_ptr<Cluster> cluster_c(GenerateCluster(45, 2));
1562 demuxer_->AppendData(kSourceId, cluster_c->data(), cluster_c->size(),
1563 append_window_start_for_next_append_,
1564 append_window_end_for_next_append_,
1565 ×tamp_offset_map_[kSourceId]);
1568 TEST_F(ChunkDemuxerTest, NonMonotonicButAboveClusterTimecode) {
1569 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
1570 AppendCluster(kDefaultFirstCluster());
1574 // Test the case where block timecodes are not monotonically
1575 // increasing but stay above the cluster timecode.
1576 cb.SetClusterTimecode(5);
1577 AddSimpleBlock(&cb, kAudioTrackNum, 5);
1578 AddSimpleBlock(&cb, kVideoTrackNum, 10);
1579 AddSimpleBlock(&cb, kAudioTrackNum, 7);
1580 AddSimpleBlock(&cb, kVideoTrackNum, 15);
1582 EXPECT_CALL(host_, OnDemuxerError(PIPELINE_ERROR_DECODE));
1583 AppendCluster(cb.Finish());
1585 // Verify that AppendData() ignores data after the error.
1586 scoped_ptr<Cluster> cluster_b(GenerateCluster(20, 2));
1587 demuxer_->AppendData(kSourceId, cluster_b->data(), cluster_b->size(),
1588 append_window_start_for_next_append_,
1589 append_window_end_for_next_append_,
1590 ×tamp_offset_map_[kSourceId]);
1593 TEST_F(ChunkDemuxerTest, BackwardsAndBeforeClusterTimecode) {
1594 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
1595 AppendCluster(kDefaultFirstCluster());
1599 // Test timecodes going backwards and including values less than the cluster
1601 cb.SetClusterTimecode(5);
1602 AddSimpleBlock(&cb, kAudioTrackNum, 5);
1603 AddSimpleBlock(&cb, kVideoTrackNum, 5);
1604 AddSimpleBlock(&cb, kAudioTrackNum, 3);
1605 AddSimpleBlock(&cb, kVideoTrackNum, 3);
1607 EXPECT_CALL(host_, OnDemuxerError(PIPELINE_ERROR_DECODE));
1608 AppendCluster(cb.Finish());
1610 // Verify that AppendData() ignores data after the error.
1611 scoped_ptr<Cluster> cluster_b(GenerateCluster(6, 2));
1612 demuxer_->AppendData(kSourceId, cluster_b->data(), cluster_b->size(),
1613 append_window_start_for_next_append_,
1614 append_window_end_for_next_append_,
1615 ×tamp_offset_map_[kSourceId]);
1619 TEST_F(ChunkDemuxerTest, PerStreamMonotonicallyIncreasingTimestamps) {
1620 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
1621 AppendCluster(kDefaultFirstCluster());
1625 // Test monotonic increasing timestamps on a per stream
1627 cb.SetClusterTimecode(5);
1628 AddSimpleBlock(&cb, kAudioTrackNum, 5);
1629 AddSimpleBlock(&cb, kVideoTrackNum, 5);
1630 AddSimpleBlock(&cb, kAudioTrackNum, 4);
1631 AddSimpleBlock(&cb, kVideoTrackNum, 7);
1633 EXPECT_CALL(host_, OnDemuxerError(PIPELINE_ERROR_DECODE));
1634 AppendCluster(cb.Finish());
1637 // Test the case where a cluster is passed to AppendCluster() before
1638 // INFO & TRACKS data.
1639 TEST_F(ChunkDemuxerTest, ClusterBeforeInitSegment) {
1640 EXPECT_CALL(*this, DemuxerOpened());
1641 demuxer_->Initialize(
1642 &host_, NewExpectedStatusCB(DEMUXER_ERROR_COULD_NOT_OPEN), true);
1644 ASSERT_EQ(AddId(), ChunkDemuxer::kOk);
1646 AppendCluster(GenerateCluster(0, 1));
1649 // Test cases where we get an MarkEndOfStream() call during initialization.
1650 TEST_F(ChunkDemuxerTest, EOSDuringInit) {
1651 EXPECT_CALL(*this, DemuxerOpened());
1652 demuxer_->Initialize(
1653 &host_, NewExpectedStatusCB(DEMUXER_ERROR_COULD_NOT_OPEN), true);
1654 MarkEndOfStream(PIPELINE_OK);
1657 TEST_F(ChunkDemuxerTest, EndOfStreamWithNoAppend) {
1658 EXPECT_CALL(*this, DemuxerOpened());
1659 demuxer_->Initialize(
1660 &host_, NewExpectedStatusCB(DEMUXER_ERROR_COULD_NOT_OPEN), true);
1662 ASSERT_EQ(AddId(), ChunkDemuxer::kOk);
1664 CheckExpectedRanges("{ }");
1665 MarkEndOfStream(PIPELINE_OK);
1667 CheckExpectedRanges("{ }");
1668 demuxer_->RemoveId(kSourceId);
1672 TEST_F(ChunkDemuxerTest, EndOfStreamWithNoMediaAppend) {
1673 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
1675 CheckExpectedRanges("{ }");
1676 MarkEndOfStream(PIPELINE_OK);
1677 CheckExpectedRanges("{ }");
1680 TEST_F(ChunkDemuxerTest, DecodeErrorEndOfStream) {
1681 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
1683 AppendCluster(kDefaultFirstCluster());
1684 CheckExpectedRanges(kDefaultFirstClusterRange);
1686 EXPECT_CALL(host_, OnDemuxerError(PIPELINE_ERROR_DECODE));
1687 MarkEndOfStream(PIPELINE_ERROR_DECODE);
1688 CheckExpectedRanges(kDefaultFirstClusterRange);
1691 TEST_F(ChunkDemuxerTest, NetworkErrorEndOfStream) {
1692 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
1694 AppendCluster(kDefaultFirstCluster());
1695 CheckExpectedRanges(kDefaultFirstClusterRange);
1697 EXPECT_CALL(host_, OnDemuxerError(PIPELINE_ERROR_NETWORK));
1698 MarkEndOfStream(PIPELINE_ERROR_NETWORK);
1701 // Helper class to reduce duplicate code when testing end of stream
1703 class EndOfStreamHelper {
1705 explicit EndOfStreamHelper(Demuxer* demuxer)
1706 : demuxer_(demuxer),
1707 audio_read_done_(false),
1708 video_read_done_(false) {
1711 // Request a read on the audio and video streams.
1712 void RequestReads() {
1713 EXPECT_FALSE(audio_read_done_);
1714 EXPECT_FALSE(video_read_done_);
1716 DemuxerStream* audio = demuxer_->GetStream(DemuxerStream::AUDIO);
1717 DemuxerStream* video = demuxer_->GetStream(DemuxerStream::VIDEO);
1719 audio->Read(base::Bind(&OnEndOfStreamReadDone, &audio_read_done_));
1720 video->Read(base::Bind(&OnEndOfStreamReadDone, &video_read_done_));
1721 base::MessageLoop::current()->RunUntilIdle();
1724 // Check to see if |audio_read_done_| and |video_read_done_| variables
1725 // match |expected|.
1726 void CheckIfReadDonesWereCalled(bool expected) {
1727 base::MessageLoop::current()->RunUntilIdle();
1728 EXPECT_EQ(expected, audio_read_done_);
1729 EXPECT_EQ(expected, video_read_done_);
1733 static void OnEndOfStreamReadDone(
1735 DemuxerStream::Status status,
1736 const scoped_refptr<DecoderBuffer>& buffer) {
1737 EXPECT_EQ(status, DemuxerStream::kOk);
1738 EXPECT_TRUE(buffer->end_of_stream());
1743 bool audio_read_done_;
1744 bool video_read_done_;
1746 DISALLOW_COPY_AND_ASSIGN(EndOfStreamHelper);
1749 // Make sure that all pending reads that we don't have media data for get an
1750 // "end of stream" buffer when MarkEndOfStream() is called.
1751 TEST_F(ChunkDemuxerTest, EndOfStreamWithPendingReads) {
1752 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
1754 AppendCluster(GenerateCluster(0, 2));
1756 bool audio_read_done_1 = false;
1757 bool video_read_done_1 = false;
1758 EndOfStreamHelper end_of_stream_helper_1(demuxer_.get());
1759 EndOfStreamHelper end_of_stream_helper_2(demuxer_.get());
1761 ReadAudio(base::Bind(&OnReadDone,
1762 base::TimeDelta::FromMilliseconds(0),
1763 &audio_read_done_1));
1764 ReadVideo(base::Bind(&OnReadDone,
1765 base::TimeDelta::FromMilliseconds(0),
1766 &video_read_done_1));
1767 message_loop_.RunUntilIdle();
1769 EXPECT_TRUE(audio_read_done_1);
1770 EXPECT_TRUE(video_read_done_1);
1772 end_of_stream_helper_1.RequestReads();
1774 EXPECT_CALL(host_, SetDuration(
1775 base::TimeDelta::FromMilliseconds(kVideoBlockDuration)));
1776 MarkEndOfStream(PIPELINE_OK);
1778 end_of_stream_helper_1.CheckIfReadDonesWereCalled(true);
1780 end_of_stream_helper_2.RequestReads();
1781 end_of_stream_helper_2.CheckIfReadDonesWereCalled(true);
1784 // Make sure that all Read() calls after we get an MarkEndOfStream()
1785 // call return an "end of stream" buffer.
1786 TEST_F(ChunkDemuxerTest, ReadsAfterEndOfStream) {
1787 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
1789 AppendCluster(GenerateCluster(0, 2));
1791 bool audio_read_done_1 = false;
1792 bool video_read_done_1 = false;
1793 EndOfStreamHelper end_of_stream_helper_1(demuxer_.get());
1794 EndOfStreamHelper end_of_stream_helper_2(demuxer_.get());
1795 EndOfStreamHelper end_of_stream_helper_3(demuxer_.get());
1797 ReadAudio(base::Bind(&OnReadDone,
1798 base::TimeDelta::FromMilliseconds(0),
1799 &audio_read_done_1));
1800 ReadVideo(base::Bind(&OnReadDone,
1801 base::TimeDelta::FromMilliseconds(0),
1802 &video_read_done_1));
1804 end_of_stream_helper_1.RequestReads();
1806 EXPECT_TRUE(audio_read_done_1);
1807 EXPECT_TRUE(video_read_done_1);
1808 end_of_stream_helper_1.CheckIfReadDonesWereCalled(false);
1810 EXPECT_CALL(host_, SetDuration(
1811 base::TimeDelta::FromMilliseconds(kVideoBlockDuration)));
1812 MarkEndOfStream(PIPELINE_OK);
1814 end_of_stream_helper_1.CheckIfReadDonesWereCalled(true);
1816 // Request a few more reads and make sure we immediately get
1817 // end of stream buffers.
1818 end_of_stream_helper_2.RequestReads();
1819 end_of_stream_helper_2.CheckIfReadDonesWereCalled(true);
1821 end_of_stream_helper_3.RequestReads();
1822 end_of_stream_helper_3.CheckIfReadDonesWereCalled(true);
1825 TEST_F(ChunkDemuxerTest, EndOfStreamDuringCanceledSeek) {
1826 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
1828 AppendCluster(0, 10);
1829 EXPECT_CALL(host_, SetDuration(base::TimeDelta::FromMilliseconds(138)));
1830 MarkEndOfStream(PIPELINE_OK);
1832 // Start the first seek.
1833 Seek(base::TimeDelta::FromMilliseconds(20));
1835 // Simulate another seek being requested before the first
1836 // seek has finished prerolling.
1837 base::TimeDelta seek_time2 = base::TimeDelta::FromMilliseconds(30);
1838 demuxer_->CancelPendingSeek(seek_time2);
1840 // Finish second seek.
1843 DemuxerStream::Status status;
1844 base::TimeDelta last_timestamp;
1846 // Make sure audio can reach end of stream.
1847 ReadUntilNotOkOrEndOfStream(DemuxerStream::AUDIO, &status, &last_timestamp);
1848 ASSERT_EQ(status, DemuxerStream::kOk);
1850 // Make sure video can reach end of stream.
1851 ReadUntilNotOkOrEndOfStream(DemuxerStream::VIDEO, &status, &last_timestamp);
1852 ASSERT_EQ(status, DemuxerStream::kOk);
1855 // Verify buffered range change behavior for audio/video/text tracks.
1856 TEST_F(ChunkDemuxerTest, EndOfStreamRangeChanges) {
1857 DemuxerStream* text_stream = NULL;
1859 EXPECT_CALL(host_, AddTextStream(_, _))
1860 .WillOnce(SaveArg<0>(&text_stream));
1861 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO | HAS_TEXT));
1864 MuxedStreamInfo(kVideoTrackNum, "0K 33"),
1865 MuxedStreamInfo(kAudioTrackNum, "0K 23K"));
1867 // Check expected ranges and verify that an empty text track does not
1868 // affect the expected ranges.
1869 CheckExpectedRanges(kSourceId, "{ [0,46) }");
1871 EXPECT_CALL(host_, SetDuration(base::TimeDelta::FromMilliseconds(66)));
1872 MarkEndOfStream(PIPELINE_OK);
1874 // Check expected ranges and verify that an empty text track does not
1875 // affect the expected ranges.
1876 CheckExpectedRanges(kSourceId, "{ [0,66) }");
1878 // Unmark end of stream state and verify that the ranges return to
1879 // their pre-"end of stream" values.
1880 demuxer_->UnmarkEndOfStream();
1881 CheckExpectedRanges(kSourceId, "{ [0,46) }");
1883 // Add text track data and verify that the buffered ranges don't change
1884 // since the intersection of all the tracks doesn't change.
1885 EXPECT_CALL(host_, SetDuration(base::TimeDelta::FromMilliseconds(200)));
1887 MuxedStreamInfo(kVideoTrackNum, "0K 33"),
1888 MuxedStreamInfo(kAudioTrackNum, "0K 23K"),
1889 MuxedStreamInfo(kTextTrackNum, "0K 100K"));
1890 CheckExpectedRanges(kSourceId, "{ [0,46) }");
1892 // Mark end of stream and verify that text track data is reflected in
1894 MarkEndOfStream(PIPELINE_OK);
1895 CheckExpectedRanges(kSourceId, "{ [0,200) }");
1898 // Make sure AppendData() will accept elements that span multiple calls.
1899 TEST_F(ChunkDemuxerTest, AppendingInPieces) {
1900 EXPECT_CALL(*this, DemuxerOpened());
1901 demuxer_->Initialize(
1902 &host_, CreateInitDoneCB(kDefaultDuration(), PIPELINE_OK), true);
1904 ASSERT_EQ(AddId(), ChunkDemuxer::kOk);
1906 scoped_ptr<uint8[]> info_tracks;
1907 int info_tracks_size = 0;
1908 CreateInitSegment(HAS_AUDIO | HAS_VIDEO,
1909 false, false, &info_tracks, &info_tracks_size);
1911 scoped_ptr<Cluster> cluster_a(kDefaultFirstCluster());
1912 scoped_ptr<Cluster> cluster_b(kDefaultSecondCluster());
1914 size_t buffer_size = info_tracks_size + cluster_a->size() + cluster_b->size();
1915 scoped_ptr<uint8[]> buffer(new uint8[buffer_size]);
1916 uint8* dst = buffer.get();
1917 memcpy(dst, info_tracks.get(), info_tracks_size);
1918 dst += info_tracks_size;
1920 memcpy(dst, cluster_a->data(), cluster_a->size());
1921 dst += cluster_a->size();
1923 memcpy(dst, cluster_b->data(), cluster_b->size());
1924 dst += cluster_b->size();
1926 AppendDataInPieces(buffer.get(), buffer_size);
1928 GenerateExpectedReads(0, 9);
1931 TEST_F(ChunkDemuxerTest, WebMFile_AudioAndVideo) {
1932 struct BufferTimestamps buffer_timestamps[] = {
1941 // TODO(wolenetz/acolwell): Remove this SetDuration expectation and update the
1942 // ParseWebMFile() call's expected duration, below, once the file is fixed to
1943 // have the correct duration in the init segment. See http://crbug.com/354284.
1944 EXPECT_CALL(host_, SetDuration(base::TimeDelta::FromMilliseconds(2746)));
1946 ASSERT_TRUE(ParseWebMFile("bear-320x240.webm", buffer_timestamps,
1947 base::TimeDelta::FromMilliseconds(2744)));
1950 TEST_F(ChunkDemuxerTest, WebMFile_LiveAudioAndVideo) {
1951 struct BufferTimestamps buffer_timestamps[] = {
1960 ASSERT_TRUE(ParseWebMFile("bear-320x240-live.webm", buffer_timestamps,
1961 kInfiniteDuration()));
1964 TEST_F(ChunkDemuxerTest, WebMFile_AudioOnly) {
1965 struct BufferTimestamps buffer_timestamps[] = {
1974 // TODO(wolenetz/acolwell): Remove this SetDuration expectation and update the
1975 // ParseWebMFile() call's expected duration, below, once the file is fixed to
1976 // have the correct duration in the init segment. See http://crbug.com/354284.
1977 EXPECT_CALL(host_, SetDuration(base::TimeDelta::FromMilliseconds(2746)));
1979 ASSERT_TRUE(ParseWebMFile("bear-320x240-audio-only.webm", buffer_timestamps,
1980 base::TimeDelta::FromMilliseconds(2744),
1984 TEST_F(ChunkDemuxerTest, WebMFile_VideoOnly) {
1985 struct BufferTimestamps buffer_timestamps[] = {
1994 // TODO(wolenetz/acolwell): Remove this SetDuration expectation and update the
1995 // ParseWebMFile() call's expected duration, below, once the file is fixed to
1996 // have the correct duration in the init segment. See http://crbug.com/354284.
1997 EXPECT_CALL(host_, SetDuration(base::TimeDelta::FromMilliseconds(2736)));
1999 ASSERT_TRUE(ParseWebMFile("bear-320x240-video-only.webm", buffer_timestamps,
2000 base::TimeDelta::FromMilliseconds(2703),
2004 TEST_F(ChunkDemuxerTest, WebMFile_AltRefFrames) {
2005 struct BufferTimestamps buffer_timestamps[] = {
2014 ASSERT_TRUE(ParseWebMFile("bear-320x240-altref.webm", buffer_timestamps,
2015 base::TimeDelta::FromMilliseconds(2767)));
2018 // Verify that we output buffers before the entire cluster has been parsed.
2019 TEST_F(ChunkDemuxerTest, IncrementalClusterParsing) {
2020 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
2021 AppendEmptyCluster(0);
2023 scoped_ptr<Cluster> cluster(GenerateCluster(0, 6));
2025 bool audio_read_done = false;
2026 bool video_read_done = false;
2027 ReadAudio(base::Bind(&OnReadDone,
2028 base::TimeDelta::FromMilliseconds(0),
2030 ReadVideo(base::Bind(&OnReadDone,
2031 base::TimeDelta::FromMilliseconds(0),
2034 // Make sure the reads haven't completed yet.
2035 EXPECT_FALSE(audio_read_done);
2036 EXPECT_FALSE(video_read_done);
2038 // Append data one byte at a time until one or both reads complete.
2040 for (; i < cluster->size() && !(audio_read_done || video_read_done); ++i) {
2041 AppendData(cluster->data() + i, 1);
2042 message_loop_.RunUntilIdle();
2045 EXPECT_TRUE(audio_read_done || video_read_done);
2047 EXPECT_LT(i, cluster->size());
2049 audio_read_done = false;
2050 video_read_done = false;
2051 ReadAudio(base::Bind(&OnReadDone,
2052 base::TimeDelta::FromMilliseconds(23),
2054 ReadVideo(base::Bind(&OnReadDone,
2055 base::TimeDelta::FromMilliseconds(33),
2058 // Make sure the reads haven't completed yet.
2059 EXPECT_FALSE(audio_read_done);
2060 EXPECT_FALSE(video_read_done);
2062 // Append the remaining data.
2063 ASSERT_LT(i, cluster->size());
2064 AppendData(cluster->data() + i, cluster->size() - i);
2066 message_loop_.RunUntilIdle();
2068 EXPECT_TRUE(audio_read_done);
2069 EXPECT_TRUE(video_read_done);
2072 TEST_F(ChunkDemuxerTest, ParseErrorDuringInit) {
2073 EXPECT_CALL(*this, DemuxerOpened());
2074 demuxer_->Initialize(
2075 &host_, CreateInitDoneCB(
2076 kNoTimestamp(), DEMUXER_ERROR_COULD_NOT_OPEN), true);
2078 ASSERT_EQ(AddId(), ChunkDemuxer::kOk);
2081 demuxer_->AppendData(kSourceId, &tmp, 1,
2082 append_window_start_for_next_append_,
2083 append_window_end_for_next_append_,
2084 ×tamp_offset_map_[kSourceId]);
2087 TEST_F(ChunkDemuxerTest, AVHeadersWithAudioOnlyType) {
2088 EXPECT_CALL(*this, DemuxerOpened());
2089 demuxer_->Initialize(
2090 &host_, CreateInitDoneCB(kNoTimestamp(),
2091 DEMUXER_ERROR_COULD_NOT_OPEN), true);
2093 std::vector<std::string> codecs(1);
2094 codecs[0] = "vorbis";
2095 ASSERT_EQ(demuxer_->AddId(kSourceId, "audio/webm", codecs),
2098 AppendInitSegment(HAS_AUDIO | HAS_VIDEO);
2101 TEST_F(ChunkDemuxerTest, AVHeadersWithVideoOnlyType) {
2102 EXPECT_CALL(*this, DemuxerOpened());
2103 demuxer_->Initialize(
2104 &host_, CreateInitDoneCB(kNoTimestamp(),
2105 DEMUXER_ERROR_COULD_NOT_OPEN), true);
2107 std::vector<std::string> codecs(1);
2109 ASSERT_EQ(demuxer_->AddId(kSourceId, "video/webm", codecs),
2112 AppendInitSegment(HAS_AUDIO | HAS_VIDEO);
2115 TEST_F(ChunkDemuxerTest, MultipleHeaders) {
2116 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
2118 AppendCluster(kDefaultFirstCluster());
2120 // Append another identical initialization segment.
2121 AppendInitSegment(HAS_AUDIO | HAS_VIDEO);
2123 AppendCluster(kDefaultSecondCluster());
2125 GenerateExpectedReads(0, 9);
2128 TEST_F(ChunkDemuxerTest, AddSeparateSourcesForAudioAndVideo) {
2129 std::string audio_id = "audio1";
2130 std::string video_id = "video1";
2131 ASSERT_TRUE(InitDemuxerAudioAndVideoSources(audio_id, video_id));
2133 // Append audio and video data into separate source ids.
2134 AppendCluster(audio_id,
2135 GenerateSingleStreamCluster(0, 92, kAudioTrackNum, kAudioBlockDuration));
2136 GenerateAudioStreamExpectedReads(0, 4);
2137 AppendCluster(video_id,
2138 GenerateSingleStreamCluster(0, 132, kVideoTrackNum, kVideoBlockDuration));
2139 GenerateVideoStreamExpectedReads(0, 4);
2142 TEST_F(ChunkDemuxerTest, AddSeparateSourcesForAudioAndVideoText) {
2143 // TODO(matthewjheaney): Here and elsewhere, we need more tests
2144 // for inband text tracks (http://crbug/321455).
2146 std::string audio_id = "audio1";
2147 std::string video_id = "video1";
2149 EXPECT_CALL(host_, AddTextStream(_, _))
2151 ASSERT_TRUE(InitDemuxerAudioAndVideoSourcesText(audio_id, video_id, true));
2153 // Append audio and video data into separate source ids.
2154 AppendCluster(audio_id,
2155 GenerateSingleStreamCluster(0, 92, kAudioTrackNum, kAudioBlockDuration));
2156 GenerateAudioStreamExpectedReads(0, 4);
2157 AppendCluster(video_id,
2158 GenerateSingleStreamCluster(0, 132, kVideoTrackNum, kVideoBlockDuration));
2159 GenerateVideoStreamExpectedReads(0, 4);
2162 TEST_F(ChunkDemuxerTest, AddIdFailures) {
2163 EXPECT_CALL(*this, DemuxerOpened());
2164 demuxer_->Initialize(
2165 &host_, CreateInitDoneCB(kDefaultDuration(), PIPELINE_OK), true);
2167 std::string audio_id = "audio1";
2168 std::string video_id = "video1";
2170 ASSERT_EQ(AddId(audio_id, HAS_AUDIO), ChunkDemuxer::kOk);
2172 // Adding an id with audio/video should fail because we already added audio.
2173 ASSERT_EQ(AddId(), ChunkDemuxer::kReachedIdLimit);
2175 AppendInitSegmentWithSourceId(audio_id, HAS_AUDIO);
2177 // Adding an id after append should fail.
2178 ASSERT_EQ(AddId(video_id, HAS_VIDEO), ChunkDemuxer::kReachedIdLimit);
2181 // Test that Read() calls after a RemoveId() return "end of stream" buffers.
2182 TEST_F(ChunkDemuxerTest, RemoveId) {
2183 std::string audio_id = "audio1";
2184 std::string video_id = "video1";
2185 ASSERT_TRUE(InitDemuxerAudioAndVideoSources(audio_id, video_id));
2187 // Append audio and video data into separate source ids.
2188 AppendCluster(audio_id,
2189 GenerateSingleStreamCluster(0, 92, kAudioTrackNum, kAudioBlockDuration));
2190 AppendCluster(video_id,
2191 GenerateSingleStreamCluster(0, 132, kVideoTrackNum, kVideoBlockDuration));
2193 // Read() from audio should return normal buffers.
2194 GenerateAudioStreamExpectedReads(0, 4);
2196 // Remove the audio id.
2197 demuxer_->RemoveId(audio_id);
2199 // Read() from audio should return "end of stream" buffers.
2200 bool audio_read_done = false;
2201 ReadAudio(base::Bind(&OnReadDone_EOSExpected, &audio_read_done));
2202 message_loop_.RunUntilIdle();
2203 EXPECT_TRUE(audio_read_done);
2205 // Read() from video should still return normal buffers.
2206 GenerateVideoStreamExpectedReads(0, 4);
2209 // Test that removing an ID immediately after adding it does not interfere with
2210 // quota for new IDs in the future.
2211 TEST_F(ChunkDemuxerTest, RemoveAndAddId) {
2212 std::string audio_id_1 = "audio1";
2213 ASSERT_TRUE(AddId(audio_id_1, HAS_AUDIO) == ChunkDemuxer::kOk);
2214 demuxer_->RemoveId(audio_id_1);
2216 std::string audio_id_2 = "audio2";
2217 ASSERT_TRUE(AddId(audio_id_2, HAS_AUDIO) == ChunkDemuxer::kOk);
2220 TEST_F(ChunkDemuxerTest, SeekCanceled) {
2221 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
2223 // Append cluster at the beginning of the stream.
2224 AppendCluster(GenerateCluster(0, 4));
2226 // Seek to an unbuffered region.
2227 Seek(base::TimeDelta::FromSeconds(50));
2229 // Attempt to read in unbuffered area; should not fulfill the read.
2230 bool audio_read_done = false;
2231 bool video_read_done = false;
2232 ReadAudio(base::Bind(&OnReadDone_AbortExpected, &audio_read_done));
2233 ReadVideo(base::Bind(&OnReadDone_AbortExpected, &video_read_done));
2234 EXPECT_FALSE(audio_read_done);
2235 EXPECT_FALSE(video_read_done);
2237 // Now cancel the pending seek, which should flush the reads with empty
2239 base::TimeDelta seek_time = base::TimeDelta::FromSeconds(0);
2240 demuxer_->CancelPendingSeek(seek_time);
2241 message_loop_.RunUntilIdle();
2242 EXPECT_TRUE(audio_read_done);
2243 EXPECT_TRUE(video_read_done);
2245 // A seek back to the buffered region should succeed.
2247 GenerateExpectedReads(0, 4);
2250 TEST_F(ChunkDemuxerTest, SeekCanceledWhileWaitingForSeek) {
2251 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
2253 // Append cluster at the beginning of the stream.
2254 AppendCluster(GenerateCluster(0, 4));
2256 // Start waiting for a seek.
2257 base::TimeDelta seek_time1 = base::TimeDelta::FromSeconds(50);
2258 base::TimeDelta seek_time2 = base::TimeDelta::FromSeconds(0);
2259 demuxer_->StartWaitingForSeek(seek_time1);
2261 // Now cancel the upcoming seek to an unbuffered region.
2262 demuxer_->CancelPendingSeek(seek_time2);
2263 demuxer_->Seek(seek_time1, NewExpectedStatusCB(PIPELINE_OK));
2265 // Read requests should be fulfilled with empty buffers.
2266 bool audio_read_done = false;
2267 bool video_read_done = false;
2268 ReadAudio(base::Bind(&OnReadDone_AbortExpected, &audio_read_done));
2269 ReadVideo(base::Bind(&OnReadDone_AbortExpected, &video_read_done));
2270 EXPECT_TRUE(audio_read_done);
2271 EXPECT_TRUE(video_read_done);
2273 // A seek back to the buffered region should succeed.
2275 GenerateExpectedReads(0, 4);
2278 // Test that Seek() successfully seeks to all source IDs.
2279 TEST_F(ChunkDemuxerTest, SeekAudioAndVideoSources) {
2280 std::string audio_id = "audio1";
2281 std::string video_id = "video1";
2282 ASSERT_TRUE(InitDemuxerAudioAndVideoSources(audio_id, video_id));
2286 GenerateSingleStreamCluster(0, 92, kAudioTrackNum, kAudioBlockDuration));
2289 GenerateSingleStreamCluster(0, 132, kVideoTrackNum, kVideoBlockDuration));
2291 // Read() should return buffers at 0.
2292 bool audio_read_done = false;
2293 bool video_read_done = false;
2294 ReadAudio(base::Bind(&OnReadDone,
2295 base::TimeDelta::FromMilliseconds(0),
2297 ReadVideo(base::Bind(&OnReadDone,
2298 base::TimeDelta::FromMilliseconds(0),
2300 EXPECT_TRUE(audio_read_done);
2301 EXPECT_TRUE(video_read_done);
2303 // Seek to 3 (an unbuffered region).
2304 Seek(base::TimeDelta::FromSeconds(3));
2306 audio_read_done = false;
2307 video_read_done = false;
2308 ReadAudio(base::Bind(&OnReadDone,
2309 base::TimeDelta::FromSeconds(3),
2311 ReadVideo(base::Bind(&OnReadDone,
2312 base::TimeDelta::FromSeconds(3),
2314 // Read()s should not return until after data is appended at the Seek point.
2315 EXPECT_FALSE(audio_read_done);
2316 EXPECT_FALSE(video_read_done);
2318 AppendCluster(audio_id,
2319 GenerateSingleStreamCluster(
2320 3000, 3092, kAudioTrackNum, kAudioBlockDuration));
2321 AppendCluster(video_id,
2322 GenerateSingleStreamCluster(
2323 3000, 3132, kVideoTrackNum, kVideoBlockDuration));
2325 message_loop_.RunUntilIdle();
2327 // Read() should return buffers at 3.
2328 EXPECT_TRUE(audio_read_done);
2329 EXPECT_TRUE(video_read_done);
2332 // Test that Seek() completes successfully when EndOfStream
2333 // is called before data is available for that seek point.
2334 // This scenario might be useful if seeking past the end of stream
2335 // of either audio or video (or both).
2336 TEST_F(ChunkDemuxerTest, EndOfStreamAfterPastEosSeek) {
2337 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
2339 AppendCluster(GenerateSingleStreamCluster(0, 120, kAudioTrackNum, 10));
2340 AppendCluster(GenerateSingleStreamCluster(0, 100, kVideoTrackNum, 5));
2342 // Seeking past the end of video.
2343 // Note: audio data is available for that seek point.
2344 bool seek_cb_was_called = false;
2345 base::TimeDelta seek_time = base::TimeDelta::FromMilliseconds(110);
2346 demuxer_->StartWaitingForSeek(seek_time);
2347 demuxer_->Seek(seek_time,
2348 base::Bind(OnSeekDone_OKExpected, &seek_cb_was_called));
2349 message_loop_.RunUntilIdle();
2351 EXPECT_FALSE(seek_cb_was_called);
2353 EXPECT_CALL(host_, SetDuration(
2354 base::TimeDelta::FromMilliseconds(120)));
2355 MarkEndOfStream(PIPELINE_OK);
2356 message_loop_.RunUntilIdle();
2358 EXPECT_TRUE(seek_cb_was_called);
2363 // Test that EndOfStream is ignored if coming during a pending seek
2364 // whose seek time is before some existing ranges.
2365 TEST_F(ChunkDemuxerTest, EndOfStreamDuringPendingSeek) {
2366 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
2368 AppendCluster(GenerateSingleStreamCluster(0, 120, kAudioTrackNum, 10));
2369 AppendCluster(GenerateSingleStreamCluster(0, 100, kVideoTrackNum, 5));
2370 AppendCluster(GenerateSingleStreamCluster(200, 300, kAudioTrackNum, 10));
2371 AppendCluster(GenerateSingleStreamCluster(200, 300, kVideoTrackNum, 5));
2373 bool seek_cb_was_called = false;
2374 base::TimeDelta seek_time = base::TimeDelta::FromMilliseconds(160);
2375 demuxer_->StartWaitingForSeek(seek_time);
2376 demuxer_->Seek(seek_time,
2377 base::Bind(OnSeekDone_OKExpected, &seek_cb_was_called));
2378 message_loop_.RunUntilIdle();
2380 EXPECT_FALSE(seek_cb_was_called);
2382 EXPECT_CALL(host_, SetDuration(base::TimeDelta::FromMilliseconds(300)));
2383 MarkEndOfStream(PIPELINE_OK);
2384 message_loop_.RunUntilIdle();
2386 EXPECT_FALSE(seek_cb_was_called);
2388 demuxer_->UnmarkEndOfStream();
2390 AppendCluster(GenerateSingleStreamCluster(140, 180, kAudioTrackNum, 10));
2391 AppendCluster(GenerateSingleStreamCluster(140, 180, kVideoTrackNum, 5));
2393 message_loop_.RunUntilIdle();
2395 EXPECT_TRUE(seek_cb_was_called);
2400 // Test ranges in an audio-only stream.
2401 TEST_F(ChunkDemuxerTest, GetBufferedRanges_AudioIdOnly) {
2402 EXPECT_CALL(*this, DemuxerOpened());
2403 demuxer_->Initialize(
2404 &host_, CreateInitDoneCB(kDefaultDuration(), PIPELINE_OK), true);
2406 ASSERT_EQ(AddId(kSourceId, HAS_AUDIO), ChunkDemuxer::kOk);
2407 AppendInitSegment(HAS_AUDIO);
2409 // Test a simple cluster.
2411 GenerateSingleStreamCluster(0, 92, kAudioTrackNum, kAudioBlockDuration));
2413 CheckExpectedRanges("{ [0,92) }");
2415 // Append a disjoint cluster to check for two separate ranges.
2416 AppendCluster(GenerateSingleStreamCluster(
2417 150, 219, kAudioTrackNum, kAudioBlockDuration));
2419 CheckExpectedRanges("{ [0,92) [150,219) }");
2422 // Test ranges in a video-only stream.
2423 TEST_F(ChunkDemuxerTest, GetBufferedRanges_VideoIdOnly) {
2424 EXPECT_CALL(*this, DemuxerOpened());
2425 demuxer_->Initialize(
2426 &host_, CreateInitDoneCB(kDefaultDuration(), PIPELINE_OK), true);
2428 ASSERT_EQ(AddId(kSourceId, HAS_VIDEO), ChunkDemuxer::kOk);
2429 AppendInitSegment(HAS_VIDEO);
2431 // Test a simple cluster.
2433 GenerateSingleStreamCluster(0, 132, kVideoTrackNum, kVideoBlockDuration));
2435 CheckExpectedRanges("{ [0,132) }");
2437 // Append a disjoint cluster to check for two separate ranges.
2438 AppendCluster(GenerateSingleStreamCluster(
2439 200, 299, kVideoTrackNum, kVideoBlockDuration));
2441 CheckExpectedRanges("{ [0,132) [200,299) }");
2444 TEST_F(ChunkDemuxerTest, GetBufferedRanges_AudioVideo) {
2445 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
2449 // Buffered Range: 0 -> 23
2450 // Audio block duration is smaller than video block duration,
2451 // so the buffered ranges should correspond to the audio blocks.
2452 AppendCluster(GenerateSingleStreamCluster(
2453 0, kAudioBlockDuration, kAudioTrackNum, kAudioBlockDuration));
2454 AppendCluster(GenerateSingleStreamCluster(
2455 0, kVideoBlockDuration, kVideoTrackNum, kVideoBlockDuration));
2457 CheckExpectedRanges("{ [0,23) }");
2459 // Audio: 300 -> 400
2460 // Video: 320 -> 420
2461 // Buffered Range: 320 -> 400 (end overlap)
2462 AppendCluster(GenerateSingleStreamCluster(300, 400, kAudioTrackNum, 50));
2463 AppendCluster(GenerateSingleStreamCluster(320, 420, kVideoTrackNum, 50));
2465 CheckExpectedRanges("{ [0,23) [320,400) }");
2467 // Audio: 520 -> 590
2468 // Video: 500 -> 570
2469 // Buffered Range: 520 -> 570 (front overlap)
2470 AppendCluster(GenerateSingleStreamCluster(520, 590, kAudioTrackNum, 70));
2471 AppendCluster(GenerateSingleStreamCluster(500, 570, kVideoTrackNum, 70));
2473 CheckExpectedRanges("{ [0,23) [320,400) [520,570) }");
2475 // Audio: 720 -> 750
2476 // Video: 700 -> 770
2477 // Buffered Range: 720 -> 750 (complete overlap, audio)
2478 AppendCluster(GenerateSingleStreamCluster(720, 750, kAudioTrackNum, 30));
2479 AppendCluster(GenerateSingleStreamCluster(700, 770, kVideoTrackNum, 70));
2481 CheckExpectedRanges("{ [0,23) [320,400) [520,570) [720,750) }");
2483 // Audio: 900 -> 970
2484 // Video: 920 -> 950
2485 // Buffered Range: 920 -> 950 (complete overlap, video)
2486 AppendCluster(GenerateSingleStreamCluster(900, 970, kAudioTrackNum, 70));
2487 AppendCluster(GenerateSingleStreamCluster(920, 950, kVideoTrackNum, 30));
2489 CheckExpectedRanges("{ [0,23) [320,400) [520,570) [720,750) [920,950) }");
2491 // Appending within buffered range should not affect buffered ranges.
2492 AppendCluster(GenerateSingleStreamCluster(930, 950, kAudioTrackNum, 20));
2493 CheckExpectedRanges("{ [0,23) [320,400) [520,570) [720,750) [920,950) }");
2495 // Appending to single stream outside buffered ranges should not affect
2497 AppendCluster(GenerateSingleStreamCluster(1230, 1240, kVideoTrackNum, 10));
2498 CheckExpectedRanges("{ [0,23) [320,400) [520,570) [720,750) [920,950) }");
2501 TEST_F(ChunkDemuxerTest, GetBufferedRanges_AudioVideoText) {
2502 EXPECT_CALL(host_, AddTextStream(_, _));
2503 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO | HAS_TEXT));
2505 // Append audio & video data
2507 MuxedStreamInfo(kAudioTrackNum, "0K 23K"),
2508 MuxedStreamInfo(kVideoTrackNum, "0K 33"));
2510 // Verify that a text track with no cues does not result in an empty buffered
2512 CheckExpectedRanges("{ [0,46) }");
2514 // Add some text cues.
2516 MuxedStreamInfo(kAudioTrackNum, "100K 123K"),
2517 MuxedStreamInfo(kVideoTrackNum, "100K 133"),
2518 MuxedStreamInfo(kTextTrackNum, "100K 200K"));
2520 // Verify that the text cues are not reflected in the buffered ranges.
2521 CheckExpectedRanges("{ [0,46) [100,146) }");
2523 // Remove the buffered ranges.
2524 demuxer_->Remove(kSourceId, base::TimeDelta(),
2525 base::TimeDelta::FromMilliseconds(250));
2526 CheckExpectedRanges("{ }");
2529 // Once MarkEndOfStream() is called, GetBufferedRanges should not cut off any
2530 // over-hanging tails at the end of the ranges as this is likely due to block
2531 // duration differences.
2532 TEST_F(ChunkDemuxerTest, GetBufferedRanges_EndOfStream) {
2533 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
2536 MuxedStreamInfo(kAudioTrackNum, "0K 23K"),
2537 MuxedStreamInfo(kVideoTrackNum, "0K 33"));
2539 CheckExpectedRanges("{ [0,46) }");
2541 EXPECT_CALL(host_, SetDuration(base::TimeDelta::FromMilliseconds(66)));
2542 MarkEndOfStream(PIPELINE_OK);
2544 // Verify that the range extends to the end of the video data.
2545 CheckExpectedRanges("{ [0,66) }");
2547 // Verify that the range reverts to the intersection when end of stream
2548 // has been cancelled.
2549 demuxer_->UnmarkEndOfStream();
2550 CheckExpectedRanges("{ [0,46) }");
2552 // Append and remove data so that the 2 streams' end ranges do not overlap.
2554 EXPECT_CALL(host_, SetDuration(base::TimeDelta::FromMilliseconds(398)));
2556 MuxedStreamInfo(kAudioTrackNum, "200K 223K"),
2557 MuxedStreamInfo(kVideoTrackNum, "200K 233 266 299 332K 365"));
2559 // At this point, the per-stream ranges are as follows:
2560 // Audio: [0,46) [200,246)
2561 // Video: [0,66) [200,398)
2562 CheckExpectedRanges("{ [0,46) [200,246) }");
2564 demuxer_->Remove(kSourceId, base::TimeDelta::FromMilliseconds(200),
2565 base::TimeDelta::FromMilliseconds(300));
2567 // At this point, the per-stream ranges are as follows:
2569 // Video: [0,66) [332,398)
2570 CheckExpectedRanges("{ [0,46) }");
2573 MuxedStreamInfo(kAudioTrackNum, "200K 223K"),
2574 MuxedStreamInfo(kVideoTrackNum, "200K 233"));
2576 // At this point, the per-stream ranges are as follows:
2577 // Audio: [0,46) [200,246)
2578 // Video: [0,66) [200,266) [332,398)
2579 // NOTE: The last range on each stream do not overlap in time.
2580 CheckExpectedRanges("{ [0,46) [200,246) }");
2582 MarkEndOfStream(PIPELINE_OK);
2584 // NOTE: The last range on each stream gets extended to the highest
2585 // end timestamp according to the spec. The last audio range gets extended
2586 // from [200,246) to [200,398) which is why the intersection results in the
2587 // middle range getting larger AND the new range appearing.
2588 CheckExpectedRanges("{ [0,46) [200,266) [332,398) }");
2591 TEST_F(ChunkDemuxerTest, DifferentStreamTimecodes) {
2592 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
2594 // Create a cluster where the video timecode begins 25ms after the audio.
2595 AppendCluster(GenerateCluster(0, 25, 8));
2597 Seek(base::TimeDelta::FromSeconds(0));
2598 GenerateExpectedReads(0, 25, 8);
2600 // Seek to 5 seconds.
2601 Seek(base::TimeDelta::FromSeconds(5));
2603 // Generate a cluster to fulfill this seek, where audio timecode begins 25ms
2605 AppendCluster(GenerateCluster(5025, 5000, 8));
2606 GenerateExpectedReads(5025, 5000, 8);
2609 TEST_F(ChunkDemuxerTest, DifferentStreamTimecodesSeparateSources) {
2610 std::string audio_id = "audio1";
2611 std::string video_id = "video1";
2612 ASSERT_TRUE(InitDemuxerAudioAndVideoSources(audio_id, video_id));
2614 // Generate two streams where the video stream starts 5ms after the audio
2615 // stream and append them.
2616 AppendCluster(audio_id, GenerateSingleStreamCluster(
2617 25, 4 * kAudioBlockDuration + 25, kAudioTrackNum, kAudioBlockDuration));
2618 AppendCluster(video_id, GenerateSingleStreamCluster(
2619 30, 4 * kVideoBlockDuration + 30, kVideoTrackNum, kVideoBlockDuration));
2621 // Both streams should be able to fulfill a seek to 25.
2622 Seek(base::TimeDelta::FromMilliseconds(25));
2623 GenerateAudioStreamExpectedReads(25, 4);
2624 GenerateVideoStreamExpectedReads(30, 4);
2627 TEST_F(ChunkDemuxerTest, DifferentStreamTimecodesOutOfRange) {
2628 std::string audio_id = "audio1";
2629 std::string video_id = "video1";
2630 ASSERT_TRUE(InitDemuxerAudioAndVideoSources(audio_id, video_id));
2632 // Generate two streams where the video stream starts 10s after the audio
2633 // stream and append them.
2634 AppendCluster(audio_id, GenerateSingleStreamCluster(0,
2635 4 * kAudioBlockDuration + 0, kAudioTrackNum, kAudioBlockDuration));
2636 AppendCluster(video_id, GenerateSingleStreamCluster(10000,
2637 4 * kVideoBlockDuration + 10000, kVideoTrackNum, kVideoBlockDuration));
2639 // Should not be able to fulfill a seek to 0.
2640 base::TimeDelta seek_time = base::TimeDelta::FromMilliseconds(0);
2641 demuxer_->StartWaitingForSeek(seek_time);
2642 demuxer_->Seek(seek_time,
2643 NewExpectedStatusCB(PIPELINE_ERROR_ABORT));
2644 ExpectRead(DemuxerStream::AUDIO, 0);
2645 ExpectEndOfStream(DemuxerStream::VIDEO);
2648 TEST_F(ChunkDemuxerTest, ClusterWithNoBuffers) {
2649 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
2651 // Generate and append an empty cluster beginning at 0.
2652 AppendEmptyCluster(0);
2654 // Sanity check that data can be appended after this cluster correctly.
2655 AppendCluster(GenerateCluster(0, 2));
2656 ExpectRead(DemuxerStream::AUDIO, 0);
2657 ExpectRead(DemuxerStream::VIDEO, 0);
2660 TEST_F(ChunkDemuxerTest, CodecPrefixMatching) {
2661 ChunkDemuxer::Status expected = ChunkDemuxer::kNotSupported;
2663 #if defined(USE_PROPRIETARY_CODECS)
2664 expected = ChunkDemuxer::kOk;
2667 std::vector<std::string> codecs;
2668 codecs.push_back("avc1.4D4041");
2670 EXPECT_EQ(demuxer_->AddId("source_id", "video/mp4", codecs), expected);
2673 // Test codec ID's that are not compliant with RFC6381, but have been
2674 // seen in the wild.
2675 TEST_F(ChunkDemuxerTest, CodecIDsThatAreNotRFC6381Compliant) {
2676 ChunkDemuxer::Status expected = ChunkDemuxer::kNotSupported;
2678 #if defined(USE_PROPRIETARY_CODECS)
2679 expected = ChunkDemuxer::kOk;
2681 const char* codec_ids[] = {
2682 // GPAC places leading zeros on the audio object type.
2687 for (size_t i = 0; i < arraysize(codec_ids); ++i) {
2688 std::vector<std::string> codecs;
2689 codecs.push_back(codec_ids[i]);
2691 ChunkDemuxer::Status result =
2692 demuxer_->AddId("source_id", "audio/mp4", codecs);
2694 EXPECT_EQ(result, expected)
2695 << "Fail to add codec_id '" << codec_ids[i] << "'";
2697 if (result == ChunkDemuxer::kOk)
2698 demuxer_->RemoveId("source_id");
2702 TEST_F(ChunkDemuxerTest, EndOfStreamStillSetAfterSeek) {
2703 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
2705 EXPECT_CALL(host_, SetDuration(_))
2706 .Times(AnyNumber());
2708 base::TimeDelta kLastAudioTimestamp = base::TimeDelta::FromMilliseconds(92);
2709 base::TimeDelta kLastVideoTimestamp = base::TimeDelta::FromMilliseconds(99);
2711 AppendCluster(kDefaultFirstCluster());
2712 AppendCluster(kDefaultSecondCluster());
2713 MarkEndOfStream(PIPELINE_OK);
2715 DemuxerStream::Status status;
2716 base::TimeDelta last_timestamp;
2718 // Verify that we can read audio & video to the end w/o problems.
2719 ReadUntilNotOkOrEndOfStream(DemuxerStream::AUDIO, &status, &last_timestamp);
2720 EXPECT_EQ(DemuxerStream::kOk, status);
2721 EXPECT_EQ(kLastAudioTimestamp, last_timestamp);
2723 ReadUntilNotOkOrEndOfStream(DemuxerStream::VIDEO, &status, &last_timestamp);
2724 EXPECT_EQ(DemuxerStream::kOk, status);
2725 EXPECT_EQ(kLastVideoTimestamp, last_timestamp);
2727 // Seek back to 0 and verify that we can read to the end again..
2728 Seek(base::TimeDelta::FromMilliseconds(0));
2730 ReadUntilNotOkOrEndOfStream(DemuxerStream::AUDIO, &status, &last_timestamp);
2731 EXPECT_EQ(DemuxerStream::kOk, status);
2732 EXPECT_EQ(kLastAudioTimestamp, last_timestamp);
2734 ReadUntilNotOkOrEndOfStream(DemuxerStream::VIDEO, &status, &last_timestamp);
2735 EXPECT_EQ(DemuxerStream::kOk, status);
2736 EXPECT_EQ(kLastVideoTimestamp, last_timestamp);
2739 TEST_F(ChunkDemuxerTest, GetBufferedRangesBeforeInitSegment) {
2740 EXPECT_CALL(*this, DemuxerOpened());
2741 demuxer_->Initialize(&host_, CreateInitDoneCB(PIPELINE_OK), true);
2742 ASSERT_EQ(AddId("audio", HAS_AUDIO), ChunkDemuxer::kOk);
2743 ASSERT_EQ(AddId("video", HAS_VIDEO), ChunkDemuxer::kOk);
2745 CheckExpectedRanges("audio", "{ }");
2746 CheckExpectedRanges("video", "{ }");
2749 // Test that Seek() completes successfully when the first cluster
2751 TEST_F(ChunkDemuxerTest, EndOfStreamDuringSeek) {
2754 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
2756 AppendCluster(kDefaultFirstCluster());
2758 base::TimeDelta seek_time = base::TimeDelta::FromSeconds(0);
2759 demuxer_->StartWaitingForSeek(seek_time);
2761 AppendCluster(kDefaultSecondCluster());
2762 EXPECT_CALL(host_, SetDuration(
2763 base::TimeDelta::FromMilliseconds(kDefaultSecondClusterEndTimestamp)));
2764 MarkEndOfStream(PIPELINE_OK);
2766 demuxer_->Seek(seek_time, NewExpectedStatusCB(PIPELINE_OK));
2768 GenerateExpectedReads(0, 4);
2769 GenerateExpectedReads(46, 66, 5);
2771 EndOfStreamHelper end_of_stream_helper(demuxer_.get());
2772 end_of_stream_helper.RequestReads();
2773 end_of_stream_helper.CheckIfReadDonesWereCalled(true);
2776 TEST_F(ChunkDemuxerTest, ConfigChange_Video) {
2779 ASSERT_TRUE(InitDemuxerWithConfigChangeData());
2781 DemuxerStream::Status status;
2782 base::TimeDelta last_timestamp;
2784 DemuxerStream* video = demuxer_->GetStream(DemuxerStream::VIDEO);
2786 // Fetch initial video config and verify it matches what we expect.
2787 const VideoDecoderConfig& video_config_1 = video->video_decoder_config();
2788 ASSERT_TRUE(video_config_1.IsValidConfig());
2789 EXPECT_EQ(video_config_1.natural_size().width(), 320);
2790 EXPECT_EQ(video_config_1.natural_size().height(), 240);
2792 ExpectRead(DemuxerStream::VIDEO, 0);
2794 ReadUntilNotOkOrEndOfStream(DemuxerStream::VIDEO, &status, &last_timestamp);
2796 ASSERT_EQ(status, DemuxerStream::kConfigChanged);
2797 EXPECT_EQ(last_timestamp.InMilliseconds(), 501);
2799 // Fetch the new decoder config.
2800 const VideoDecoderConfig& video_config_2 = video->video_decoder_config();
2801 ASSERT_TRUE(video_config_2.IsValidConfig());
2802 EXPECT_EQ(video_config_2.natural_size().width(), 640);
2803 EXPECT_EQ(video_config_2.natural_size().height(), 360);
2805 ExpectRead(DemuxerStream::VIDEO, 527);
2807 // Read until the next config change.
2808 ReadUntilNotOkOrEndOfStream(DemuxerStream::VIDEO, &status, &last_timestamp);
2809 ASSERT_EQ(status, DemuxerStream::kConfigChanged);
2810 EXPECT_EQ(last_timestamp.InMilliseconds(), 793);
2812 // Get the new config and verify that it matches the first one.
2813 ASSERT_TRUE(video_config_1.Matches(video->video_decoder_config()));
2815 ExpectRead(DemuxerStream::VIDEO, 801);
2817 // Read until the end of the stream just to make sure there aren't any other
2819 ReadUntilNotOkOrEndOfStream(DemuxerStream::VIDEO, &status, &last_timestamp);
2820 ASSERT_EQ(status, DemuxerStream::kOk);
2823 TEST_F(ChunkDemuxerTest, ConfigChange_Audio) {
2826 ASSERT_TRUE(InitDemuxerWithConfigChangeData());
2828 DemuxerStream::Status status;
2829 base::TimeDelta last_timestamp;
2831 DemuxerStream* audio = demuxer_->GetStream(DemuxerStream::AUDIO);
2833 // Fetch initial audio config and verify it matches what we expect.
2834 const AudioDecoderConfig& audio_config_1 = audio->audio_decoder_config();
2835 ASSERT_TRUE(audio_config_1.IsValidConfig());
2836 EXPECT_EQ(audio_config_1.samples_per_second(), 44100);
2837 EXPECT_EQ(audio_config_1.extra_data_size(), 3863u);
2839 ExpectRead(DemuxerStream::AUDIO, 0);
2841 // The first config change seen is from a splice frame representing an overlap
2842 // of buffer from config 1 by buffers from config 2.
2843 ReadUntilNotOkOrEndOfStream(DemuxerStream::AUDIO, &status, &last_timestamp);
2844 ASSERT_EQ(status, DemuxerStream::kConfigChanged);
2845 EXPECT_EQ(last_timestamp.InMilliseconds(), 524);
2847 // Fetch the new decoder config.
2848 const AudioDecoderConfig& audio_config_2 = audio->audio_decoder_config();
2849 ASSERT_TRUE(audio_config_2.IsValidConfig());
2850 EXPECT_EQ(audio_config_2.samples_per_second(), 44100);
2851 EXPECT_EQ(audio_config_2.extra_data_size(), 3935u);
2853 // The next config change is from a splice frame representing an overlap of
2854 // buffers from config 2 by buffers from config 1.
2855 ReadUntilNotOkOrEndOfStream(DemuxerStream::AUDIO, &status, &last_timestamp);
2856 ASSERT_EQ(status, DemuxerStream::kConfigChanged);
2857 EXPECT_EQ(last_timestamp.InMilliseconds(), 782);
2858 ASSERT_TRUE(audio_config_1.Matches(audio->audio_decoder_config()));
2860 // Read until the end of the stream just to make sure there aren't any other
2862 ReadUntilNotOkOrEndOfStream(DemuxerStream::AUDIO, &status, &last_timestamp);
2863 ASSERT_EQ(status, DemuxerStream::kOk);
2864 EXPECT_EQ(last_timestamp.InMilliseconds(), 2744);
2867 TEST_F(ChunkDemuxerTest, ConfigChange_Seek) {
2870 ASSERT_TRUE(InitDemuxerWithConfigChangeData());
2872 DemuxerStream* video = demuxer_->GetStream(DemuxerStream::VIDEO);
2874 // Fetch initial video config and verify it matches what we expect.
2875 const VideoDecoderConfig& video_config_1 = video->video_decoder_config();
2876 ASSERT_TRUE(video_config_1.IsValidConfig());
2877 EXPECT_EQ(video_config_1.natural_size().width(), 320);
2878 EXPECT_EQ(video_config_1.natural_size().height(), 240);
2880 ExpectRead(DemuxerStream::VIDEO, 0);
2882 // Seek to a location with a different config.
2883 Seek(base::TimeDelta::FromMilliseconds(527));
2885 // Verify that the config change is signalled.
2886 ExpectConfigChanged(DemuxerStream::VIDEO);
2888 // Fetch the new decoder config and verify it is what we expect.
2889 const VideoDecoderConfig& video_config_2 = video->video_decoder_config();
2890 ASSERT_TRUE(video_config_2.IsValidConfig());
2891 EXPECT_EQ(video_config_2.natural_size().width(), 640);
2892 EXPECT_EQ(video_config_2.natural_size().height(), 360);
2894 // Verify that Read() will return a buffer now.
2895 ExpectRead(DemuxerStream::VIDEO, 527);
2897 // Seek back to the beginning and verify we get another config change.
2898 Seek(base::TimeDelta::FromMilliseconds(0));
2899 ExpectConfigChanged(DemuxerStream::VIDEO);
2900 ASSERT_TRUE(video_config_1.Matches(video->video_decoder_config()));
2901 ExpectRead(DemuxerStream::VIDEO, 0);
2903 // Seek to a location that requires a config change and then
2904 // seek to a new location that has the same configuration as
2905 // the start of the file without a Read() in the middle.
2906 Seek(base::TimeDelta::FromMilliseconds(527));
2907 Seek(base::TimeDelta::FromMilliseconds(801));
2909 // Verify that no config change is signalled.
2910 ExpectRead(DemuxerStream::VIDEO, 801);
2911 ASSERT_TRUE(video_config_1.Matches(video->video_decoder_config()));
2914 TEST_F(ChunkDemuxerTest, TimestampPositiveOffset) {
2915 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
2917 ASSERT_TRUE(SetTimestampOffset(kSourceId, base::TimeDelta::FromSeconds(30)));
2918 AppendCluster(GenerateCluster(0, 2));
2920 Seek(base::TimeDelta::FromMilliseconds(30000));
2922 GenerateExpectedReads(30000, 2);
2925 TEST_F(ChunkDemuxerTest, TimestampNegativeOffset) {
2926 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
2928 ASSERT_TRUE(SetTimestampOffset(kSourceId, base::TimeDelta::FromSeconds(-1)));
2929 AppendCluster(GenerateCluster(1000, 2));
2931 GenerateExpectedReads(0, 2);
2934 TEST_F(ChunkDemuxerTest, TimestampOffsetSeparateStreams) {
2935 std::string audio_id = "audio1";
2936 std::string video_id = "video1";
2937 ASSERT_TRUE(InitDemuxerAudioAndVideoSources(audio_id, video_id));
2939 ASSERT_TRUE(SetTimestampOffset(
2940 audio_id, base::TimeDelta::FromMilliseconds(-2500)));
2941 ASSERT_TRUE(SetTimestampOffset(
2942 video_id, base::TimeDelta::FromMilliseconds(-2500)));
2943 AppendCluster(audio_id, GenerateSingleStreamCluster(2500,
2944 2500 + kAudioBlockDuration * 4, kAudioTrackNum, kAudioBlockDuration));
2945 AppendCluster(video_id, GenerateSingleStreamCluster(2500,
2946 2500 + kVideoBlockDuration * 4, kVideoTrackNum, kVideoBlockDuration));
2947 GenerateAudioStreamExpectedReads(0, 4);
2948 GenerateVideoStreamExpectedReads(0, 4);
2950 Seek(base::TimeDelta::FromMilliseconds(27300));
2952 ASSERT_TRUE(SetTimestampOffset(
2953 audio_id, base::TimeDelta::FromMilliseconds(27300)));
2954 ASSERT_TRUE(SetTimestampOffset(
2955 video_id, base::TimeDelta::FromMilliseconds(27300)));
2956 AppendCluster(audio_id, GenerateSingleStreamCluster(
2957 0, kAudioBlockDuration * 4, kAudioTrackNum, kAudioBlockDuration));
2958 AppendCluster(video_id, GenerateSingleStreamCluster(
2959 0, kVideoBlockDuration * 4, kVideoTrackNum, kVideoBlockDuration));
2960 GenerateVideoStreamExpectedReads(27300, 4);
2961 GenerateAudioStreamExpectedReads(27300, 4);
2964 TEST_F(ChunkDemuxerTest, IsParsingMediaSegmentMidMediaSegment) {
2965 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
2967 scoped_ptr<Cluster> cluster = GenerateCluster(0, 2);
2968 // Append only part of the cluster data.
2969 AppendData(cluster->data(), cluster->size() - 13);
2971 // Confirm we're in the middle of parsing a media segment.
2972 ASSERT_TRUE(demuxer_->IsParsingMediaSegment(kSourceId));
2974 demuxer_->Abort(kSourceId,
2975 append_window_start_for_next_append_,
2976 append_window_end_for_next_append_,
2977 ×tamp_offset_map_[kSourceId]);
2979 // After Abort(), parsing should no longer be in the middle of a media
2981 ASSERT_FALSE(demuxer_->IsParsingMediaSegment(kSourceId));
2984 #if defined(USE_PROPRIETARY_CODECS)
2985 #if defined(ENABLE_MPEG2TS_STREAM_PARSER)
2986 TEST_F(ChunkDemuxerTest, EmitBuffersDuringAbort) {
2987 EXPECT_CALL(*this, DemuxerOpened());
2988 demuxer_->Initialize(
2989 &host_, CreateInitDoneCB(kInfiniteDuration(), PIPELINE_OK), true);
2990 EXPECT_EQ(ChunkDemuxer::kOk, AddIdForMp2tSource(kSourceId));
2993 // DTS/PTS derived using dvbsnoop -s ts -if bear-1280x720.ts -tssubdecode
2994 // Video: first PES:
2995 // PTS: 126912 (0x0001efc0) [= 90 kHz-Timestamp: 0:00:01.4101]
2996 // DTS: 123909 (0x0001e405) [= 90 kHz-Timestamp: 0:00:01.3767]
2997 // Audio: first PES:
2998 // PTS: 126000 (0x0001ec30) [= 90 kHz-Timestamp: 0:00:01.4000]
2999 // DTS: 123910 (0x0001e406) [= 90 kHz-Timestamp: 0:00:01.3767]
3001 // PTS: 370155 (0x0005a5eb) [= 90 kHz-Timestamp: 0:00:04.1128]
3002 // DTS: 367152 (0x00059a30) [= 90 kHz-Timestamp: 0:00:04.0794]
3004 // PTS: 353788 (0x000565fc) [= 90 kHz-Timestamp: 0:00:03.9309]
3006 scoped_refptr<DecoderBuffer> buffer = ReadTestDataFile("bear-1280x720.ts");
3007 AppendData(kSourceId, buffer->data(), buffer->data_size());
3009 // Confirm we're in the middle of parsing a media segment.
3010 ASSERT_TRUE(demuxer_->IsParsingMediaSegment(kSourceId));
3012 // Abort on the Mpeg2 TS parser triggers the emission of the last video
3013 // buffer which is pending in the stream parser.
3014 Ranges<base::TimeDelta> range_before_abort =
3015 demuxer_->GetBufferedRanges(kSourceId);
3016 demuxer_->Abort(kSourceId,
3017 append_window_start_for_next_append_,
3018 append_window_end_for_next_append_,
3019 ×tamp_offset_map_[kSourceId]);
3020 Ranges<base::TimeDelta> range_after_abort =
3021 demuxer_->GetBufferedRanges(kSourceId);
3023 ASSERT_EQ(range_before_abort.size(), 1u);
3024 ASSERT_EQ(range_after_abort.size(), 1u);
3025 EXPECT_EQ(range_after_abort.start(0), range_before_abort.start(0));
3026 EXPECT_GT(range_after_abort.end(0), range_before_abort.end(0));
3029 TEST_F(ChunkDemuxerTest, SeekCompleteDuringAbort) {
3030 EXPECT_CALL(*this, DemuxerOpened());
3031 demuxer_->Initialize(
3032 &host_, CreateInitDoneCB(kInfiniteDuration(), PIPELINE_OK), true);
3033 EXPECT_EQ(ChunkDemuxer::kOk, AddIdForMp2tSource(kSourceId));
3036 // DTS/PTS derived using dvbsnoop -s ts -if bear-1280x720.ts -tssubdecode
3037 // Video: first PES:
3038 // PTS: 126912 (0x0001efc0) [= 90 kHz-Timestamp: 0:00:01.4101]
3039 // DTS: 123909 (0x0001e405) [= 90 kHz-Timestamp: 0:00:01.3767]
3040 // Audio: first PES:
3041 // PTS: 126000 (0x0001ec30) [= 90 kHz-Timestamp: 0:00:01.4000]
3042 // DTS: 123910 (0x0001e406) [= 90 kHz-Timestamp: 0:00:01.3767]
3044 // PTS: 370155 (0x0005a5eb) [= 90 kHz-Timestamp: 0:00:04.1128]
3045 // DTS: 367152 (0x00059a30) [= 90 kHz-Timestamp: 0:00:04.0794]
3047 // PTS: 353788 (0x000565fc) [= 90 kHz-Timestamp: 0:00:03.9309]
3049 scoped_refptr<DecoderBuffer> buffer = ReadTestDataFile("bear-1280x720.ts");
3050 AppendData(kSourceId, buffer->data(), buffer->data_size());
3052 // Confirm we're in the middle of parsing a media segment.
3053 ASSERT_TRUE(demuxer_->IsParsingMediaSegment(kSourceId));
3055 // Seek to a time corresponding to buffers that will be emitted during the
3057 Seek(base::TimeDelta::FromMilliseconds(4110));
3059 // Abort on the Mpeg2 TS parser triggers the emission of the last video
3060 // buffer which is pending in the stream parser.
3061 demuxer_->Abort(kSourceId,
3062 append_window_start_for_next_append_,
3063 append_window_end_for_next_append_,
3064 ×tamp_offset_map_[kSourceId]);
3070 TEST_F(ChunkDemuxerTest, WebMIsParsingMediaSegmentDetection) {
3071 const uint8 kBuffer[] = {
3072 0x1F, 0x43, 0xB6, 0x75, 0x83, // CLUSTER (size = 3)
3073 0xE7, 0x81, 0x01, // Cluster TIMECODE (value = 1)
3075 0x1F, 0x43, 0xB6, 0x75, 0xFF, // CLUSTER (size = unknown; really 3 due to:)
3076 0xE7, 0x81, 0x02, // Cluster TIMECODE (value = 2)
3077 /* e.g. put some blocks here... */
3078 0x1A, 0x45, 0xDF, 0xA3, 0x8A, // EBMLHEADER (size = 10, not fully appended)
3081 // This array indicates expected return value of IsParsingMediaSegment()
3082 // following each incrementally appended byte in |kBuffer|.
3083 const bool kExpectedReturnValues[] = {
3084 false, false, false, false, true,
3087 false, false, false, false, true,
3090 true, true, true, true, false,
3093 COMPILE_ASSERT(arraysize(kBuffer) == arraysize(kExpectedReturnValues),
3094 test_arrays_out_of_sync);
3095 COMPILE_ASSERT(arraysize(kBuffer) == sizeof(kBuffer), not_one_byte_per_index);
3097 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
3099 for (size_t i = 0; i < sizeof(kBuffer); i++) {
3100 DVLOG(3) << "Appending and testing index " << i;
3101 AppendData(kBuffer + i, 1);
3102 bool expected_return_value = kExpectedReturnValues[i];
3103 EXPECT_EQ(expected_return_value,
3104 demuxer_->IsParsingMediaSegment(kSourceId));
3108 TEST_F(ChunkDemuxerTest, DurationChange) {
3109 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
3110 const int kStreamDuration = kDefaultDuration().InMilliseconds();
3112 // Add data leading up to the currently set duration.
3113 AppendCluster(GenerateCluster(kStreamDuration - kAudioBlockDuration,
3114 kStreamDuration - kVideoBlockDuration,
3117 CheckExpectedRanges(kSourceId, "{ [201191,201224) }");
3119 // Add data beginning at the currently set duration and expect a new duration
3120 // to be signaled. Note that the last video block will have a higher end
3121 // timestamp than the last audio block.
3122 const int kNewStreamDurationVideo = kStreamDuration + kVideoBlockDuration;
3123 EXPECT_CALL(host_, SetDuration(
3124 base::TimeDelta::FromMilliseconds(kNewStreamDurationVideo)));
3125 AppendCluster(GenerateCluster(kDefaultDuration().InMilliseconds(), 2));
3127 CheckExpectedRanges(kSourceId, "{ [201191,201247) }");
3129 // Add more data to the end of each media type. Note that the last audio block
3130 // will have a higher end timestamp than the last video block.
3131 const int kFinalStreamDuration = kStreamDuration + kAudioBlockDuration * 3;
3132 EXPECT_CALL(host_, SetDuration(
3133 base::TimeDelta::FromMilliseconds(kFinalStreamDuration)));
3134 AppendCluster(GenerateCluster(kStreamDuration + kAudioBlockDuration,
3135 kStreamDuration + kVideoBlockDuration,
3138 // See that the range has increased appropriately (but not to the full
3139 // duration of 201293, since there is not enough video appended for that).
3140 CheckExpectedRanges(kSourceId, "{ [201191,201290) }");
3143 TEST_F(ChunkDemuxerTest, DurationChangeTimestampOffset) {
3144 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
3145 ASSERT_TRUE(SetTimestampOffset(kSourceId, kDefaultDuration()));
3146 EXPECT_CALL(host_, SetDuration(
3147 kDefaultDuration() + base::TimeDelta::FromMilliseconds(
3148 kVideoBlockDuration * 2)));
3149 AppendCluster(GenerateCluster(0, 4));
3152 TEST_F(ChunkDemuxerTest, EndOfStreamTruncateDuration) {
3153 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
3155 AppendCluster(kDefaultFirstCluster());
3157 EXPECT_CALL(host_, SetDuration(
3158 base::TimeDelta::FromMilliseconds(kDefaultFirstClusterEndTimestamp)));
3159 MarkEndOfStream(PIPELINE_OK);
3163 TEST_F(ChunkDemuxerTest, ZeroLengthAppend) {
3164 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
3165 AppendData(NULL, 0);
3168 TEST_F(ChunkDemuxerTest, AppendAfterEndOfStream) {
3169 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
3171 EXPECT_CALL(host_, SetDuration(_))
3172 .Times(AnyNumber());
3174 AppendCluster(kDefaultFirstCluster());
3175 MarkEndOfStream(PIPELINE_OK);
3177 demuxer_->UnmarkEndOfStream();
3179 AppendCluster(kDefaultSecondCluster());
3180 MarkEndOfStream(PIPELINE_OK);
3183 // Test receiving a Shutdown() call before we get an Initialize()
3184 // call. This can happen if video element gets destroyed before
3185 // the pipeline has a chance to initialize the demuxer.
3186 TEST_F(ChunkDemuxerTest, Shutdown_BeforeInitialize) {
3187 demuxer_->Shutdown();
3188 demuxer_->Initialize(
3189 &host_, CreateInitDoneCB(DEMUXER_ERROR_COULD_NOT_OPEN), true);
3190 message_loop_.RunUntilIdle();
3193 // Verifies that signaling end of stream while stalled at a gap
3194 // boundary does not trigger end of stream buffers to be returned.
3195 TEST_F(ChunkDemuxerTest, EndOfStreamWhileWaitingForGapToBeFilled) {
3196 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
3198 AppendCluster(0, 10);
3199 AppendCluster(300, 10);
3200 CheckExpectedRanges(kSourceId, "{ [0,132) [300,432) }");
3202 GenerateExpectedReads(0, 10);
3204 bool audio_read_done = false;
3205 bool video_read_done = false;
3206 ReadAudio(base::Bind(&OnReadDone,
3207 base::TimeDelta::FromMilliseconds(138),
3209 ReadVideo(base::Bind(&OnReadDone,
3210 base::TimeDelta::FromMilliseconds(138),
3213 // Verify that the reads didn't complete
3214 EXPECT_FALSE(audio_read_done);
3215 EXPECT_FALSE(video_read_done);
3217 EXPECT_CALL(host_, SetDuration(base::TimeDelta::FromMilliseconds(438)));
3218 MarkEndOfStream(PIPELINE_OK);
3220 // Verify that the reads still haven't completed.
3221 EXPECT_FALSE(audio_read_done);
3222 EXPECT_FALSE(video_read_done);
3224 demuxer_->UnmarkEndOfStream();
3226 AppendCluster(138, 22);
3228 message_loop_.RunUntilIdle();
3230 CheckExpectedRanges(kSourceId, "{ [0,435) }");
3232 // Verify that the reads have completed.
3233 EXPECT_TRUE(audio_read_done);
3234 EXPECT_TRUE(video_read_done);
3236 // Read the rest of the buffers.
3237 GenerateExpectedReads(161, 171, 20);
3239 // Verify that reads block because the append cleared the end of stream state.
3240 audio_read_done = false;
3241 video_read_done = false;
3242 ReadAudio(base::Bind(&OnReadDone_EOSExpected,
3244 ReadVideo(base::Bind(&OnReadDone_EOSExpected,
3247 // Verify that the reads don't complete.
3248 EXPECT_FALSE(audio_read_done);
3249 EXPECT_FALSE(video_read_done);
3251 EXPECT_CALL(host_, SetDuration(base::TimeDelta::FromMilliseconds(437)));
3252 MarkEndOfStream(PIPELINE_OK);
3254 EXPECT_TRUE(audio_read_done);
3255 EXPECT_TRUE(video_read_done);
3258 TEST_F(ChunkDemuxerTest, CanceledSeekDuringInitialPreroll) {
3259 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
3262 base::TimeDelta seek_time = base::TimeDelta::FromMilliseconds(200);
3263 demuxer_->CancelPendingSeek(seek_time);
3265 // Initiate the seek to the new location.
3268 // Append data to satisfy the seek.
3269 AppendCluster(seek_time.InMilliseconds(), 10);
3272 TEST_F(ChunkDemuxerTest, GCDuringSeek) {
3273 ASSERT_TRUE(InitDemuxer(HAS_AUDIO));
3275 demuxer_->SetMemoryLimitsForTesting(5 * kBlockSize);
3277 base::TimeDelta seek_time1 = base::TimeDelta::FromMilliseconds(1000);
3278 base::TimeDelta seek_time2 = base::TimeDelta::FromMilliseconds(500);
3280 // Initiate a seek to |seek_time1|.
3283 // Append data to satisfy the first seek request.
3284 AppendSingleStreamCluster(kSourceId, kAudioTrackNum,
3285 seek_time1.InMilliseconds(), 5);
3286 CheckExpectedRanges(kSourceId, "{ [1000,1115) }");
3288 // Signal that the second seek is starting.
3289 demuxer_->StartWaitingForSeek(seek_time2);
3291 // Append data to satisfy the second seek. This append triggers
3292 // the garbage collection logic since we set the memory limit to
3294 AppendSingleStreamCluster(kSourceId, kAudioTrackNum,
3295 seek_time2.InMilliseconds(), 5);
3297 // Verify that the buffers that cover |seek_time2| do not get
3298 // garbage collected.
3299 CheckExpectedRanges(kSourceId, "{ [500,615) }");
3301 // Complete the seek.
3302 demuxer_->Seek(seek_time2, NewExpectedStatusCB(PIPELINE_OK));
3305 // Append more data and make sure that the blocks for |seek_time2|
3306 // don't get removed.
3308 // NOTE: The current GC algorithm tries to preserve the GOP at the
3309 // current position as well as the last appended GOP. This is
3310 // why there are 2 ranges in the expectations.
3311 AppendSingleStreamCluster(kSourceId, kAudioTrackNum, 700, 5);
3312 CheckExpectedRanges(kSourceId, "{ [500,592) [792,815) }");
3315 TEST_F(ChunkDemuxerTest, AppendWindow_Video) {
3316 ASSERT_TRUE(InitDemuxer(HAS_VIDEO));
3317 DemuxerStream* stream = demuxer_->GetStream(DemuxerStream::VIDEO);
3319 // Set the append window to [50,280).
3320 append_window_start_for_next_append_ = base::TimeDelta::FromMilliseconds(50);
3321 append_window_end_for_next_append_ = base::TimeDelta::FromMilliseconds(280);
3323 // Append a cluster that starts before and ends after the append window.
3324 AppendSingleStreamCluster(kSourceId, kVideoTrackNum,
3325 "0K 30 60 90 120K 150 180 210 240K 270 300 330K");
3327 // Verify that GOPs that start outside the window are not included
3328 // in the buffer. Also verify that buffers that start inside the
3329 // window and extend beyond the end of the window are not included.
3330 CheckExpectedRanges(kSourceId, "{ [120,270) }");
3331 CheckExpectedBuffers(stream, "120 150 180 210 240");
3333 // Extend the append window to [50,650).
3334 append_window_end_for_next_append_ = base::TimeDelta::FromMilliseconds(650);
3336 // Append more data and verify that adding buffers start at the next
3338 AppendSingleStreamCluster(kSourceId, kVideoTrackNum,
3339 "360 390 420K 450 480 510 540K 570 600 630K");
3340 CheckExpectedRanges(kSourceId, "{ [120,270) [420,630) }");
3343 TEST_F(ChunkDemuxerTest, AppendWindow_Audio) {
3344 ASSERT_TRUE(InitDemuxer(HAS_AUDIO));
3345 DemuxerStream* stream = demuxer_->GetStream(DemuxerStream::AUDIO);
3347 // Set the append window to [50,280).
3348 append_window_start_for_next_append_ = base::TimeDelta::FromMilliseconds(50);
3349 append_window_end_for_next_append_ = base::TimeDelta::FromMilliseconds(280);
3351 // Append a cluster that starts before and ends after the append window.
3352 AppendSingleStreamCluster(
3353 kSourceId, kAudioTrackNum,
3354 "0K 30K 60K 90K 120K 150K 180K 210K 240K 270K 300K 330K");
3356 // Verify that frames that end outside the window are not included
3357 // in the buffer. Also verify that buffers that start inside the
3358 // window and extend beyond the end of the window are not included.
3360 // The first 50ms of the range should be truncated since it overlaps
3361 // the start of the append window.
3362 CheckExpectedRanges(kSourceId, "{ [50,280) }");
3364 // The "50P" buffer is the "0" buffer marked for complete discard. The next
3365 // "50" buffer is the "30" buffer marked with 20ms of start discard.
3366 CheckExpectedBuffers(stream, "50P 50 60 90 120 150 180 210 240");
3368 // Extend the append window to [50,650).
3369 append_window_end_for_next_append_ = base::TimeDelta::FromMilliseconds(650);
3371 // Append more data and verify that a new range is created.
3372 AppendSingleStreamCluster(
3373 kSourceId, kAudioTrackNum,
3374 "360K 390K 420K 450K 480K 510K 540K 570K 600K 630K");
3375 CheckExpectedRanges(kSourceId, "{ [50,280) [360,650) }");
3378 TEST_F(ChunkDemuxerTest, AppendWindow_AudioOverlapStartAndEnd) {
3379 ASSERT_TRUE(InitDemuxer(HAS_AUDIO));
3381 // Set the append window to [10,20).
3382 append_window_start_for_next_append_ = base::TimeDelta::FromMilliseconds(10);
3383 append_window_end_for_next_append_ = base::TimeDelta::FromMilliseconds(20);
3385 // Append a cluster that starts before and ends after the append window.
3386 AppendSingleStreamCluster(kSourceId, kAudioTrackNum, "0K");
3388 // Verify the append is clipped to the append window.
3389 CheckExpectedRanges(kSourceId, "{ [10,20) }");
3392 TEST_F(ChunkDemuxerTest, AppendWindow_WebMFile_AudioOnly) {
3393 EXPECT_CALL(*this, DemuxerOpened());
3394 demuxer_->Initialize(
3396 CreateInitDoneCB(base::TimeDelta::FromMilliseconds(2744), PIPELINE_OK),
3398 ASSERT_EQ(ChunkDemuxer::kOk, AddId(kSourceId, HAS_AUDIO));
3400 // Set the append window to [50,150).
3401 append_window_start_for_next_append_ = base::TimeDelta::FromMilliseconds(50);
3402 append_window_end_for_next_append_ = base::TimeDelta::FromMilliseconds(150);
3404 // Read a WebM file into memory and send the data to the demuxer. The chunk
3405 // size has been chosen carefully to ensure the preroll buffer used by the
3406 // partial append window trim must come from a previous Append() call.
3407 scoped_refptr<DecoderBuffer> buffer =
3408 ReadTestDataFile("bear-320x240-audio-only.webm");
3409 AppendDataInPieces(buffer->data(), buffer->data_size(), 128);
3411 DemuxerStream* stream = demuxer_->GetStream(DemuxerStream::AUDIO);
3412 CheckExpectedBuffers(stream, "50P 50 62 86 109 122 125 128");
3415 TEST_F(ChunkDemuxerTest, AppendWindow_AudioConfigUpdateRemovesPreroll) {
3416 EXPECT_CALL(*this, DemuxerOpened());
3417 demuxer_->Initialize(
3419 CreateInitDoneCB(base::TimeDelta::FromMilliseconds(2744), PIPELINE_OK),
3421 ASSERT_EQ(ChunkDemuxer::kOk, AddId(kSourceId, HAS_AUDIO));
3423 // Set the append window such that the first file is completely before the
3425 // TODO(wolenetz/acolwell): Update this duration once the files are fixed to
3426 // have the correct duration in their init segments, and the
3427 // CreateInitDoneCB() call, above, is fixed to used that duration. See
3428 // http://crbug.com/354284.
3429 const base::TimeDelta duration_1 = base::TimeDelta::FromMilliseconds(2746);
3430 append_window_start_for_next_append_ = duration_1;
3432 // Read a WebM file into memory and append the data.
3433 scoped_refptr<DecoderBuffer> buffer =
3434 ReadTestDataFile("bear-320x240-audio-only.webm");
3435 AppendDataInPieces(buffer->data(), buffer->data_size(), 512);
3436 CheckExpectedRanges(kSourceId, "{ }");
3438 DemuxerStream* stream = demuxer_->GetStream(DemuxerStream::AUDIO);
3439 AudioDecoderConfig config_1 = stream->audio_decoder_config();
3441 // Read a second WebM with a different config in and append the data.
3442 scoped_refptr<DecoderBuffer> buffer2 =
3443 ReadTestDataFile("bear-320x240-audio-only-48khz.webm");
3444 EXPECT_CALL(host_, SetDuration(_)).Times(AnyNumber());
3445 ASSERT_TRUE(SetTimestampOffset(kSourceId, duration_1));
3446 AppendDataInPieces(buffer2->data(), buffer2->data_size(), 512);
3447 CheckExpectedRanges(kSourceId, "{ [2746,5519) }");
3450 ExpectConfigChanged(DemuxerStream::AUDIO);
3451 ASSERT_FALSE(config_1.Matches(stream->audio_decoder_config()));
3452 CheckExpectedBuffers(stream, "2746 2767 2789 2810");
3455 TEST_F(ChunkDemuxerTest, AppendWindow_Text) {
3456 DemuxerStream* text_stream = NULL;
3457 EXPECT_CALL(host_, AddTextStream(_, _))
3458 .WillOnce(SaveArg<0>(&text_stream));
3459 ASSERT_TRUE(InitDemuxer(HAS_VIDEO | HAS_TEXT));
3460 DemuxerStream* video_stream = demuxer_->GetStream(DemuxerStream::VIDEO);
3462 // Set the append window to [20,280).
3463 append_window_start_for_next_append_ = base::TimeDelta::FromMilliseconds(20);
3464 append_window_end_for_next_append_ = base::TimeDelta::FromMilliseconds(280);
3466 // Append a cluster that starts before and ends after the append
3469 MuxedStreamInfo(kVideoTrackNum,
3470 "0K 30 60 90 120K 150 180 210 240K 270 300 330K"),
3471 MuxedStreamInfo(kTextTrackNum, "0K 100K 200K 300K" ));
3473 // Verify that text cues that start outside the window are not included
3474 // in the buffer. Also verify that cues that extend beyond the
3475 // window are not included.
3476 CheckExpectedRanges(kSourceId, "{ [100,270) }");
3477 CheckExpectedBuffers(video_stream, "120 150 180 210 240");
3478 CheckExpectedBuffers(text_stream, "100");
3480 // Extend the append window to [20,650).
3481 append_window_end_for_next_append_ = base::TimeDelta::FromMilliseconds(650);
3483 // Append more data and verify that a new range is created.
3485 MuxedStreamInfo(kVideoTrackNum,
3486 "360 390 420K 450 480 510 540K 570 600 630K"),
3487 MuxedStreamInfo(kTextTrackNum, "400K 500K 600K 700K" ));
3488 CheckExpectedRanges(kSourceId, "{ [100,270) [400,630) }");
3490 // Seek to the new range and verify that the expected buffers are returned.
3491 Seek(base::TimeDelta::FromMilliseconds(420));
3492 CheckExpectedBuffers(video_stream, "420 450 480 510 540 570 600");
3493 CheckExpectedBuffers(text_stream, "400 500");
3496 TEST_F(ChunkDemuxerTest, StartWaitingForSeekAfterParseError) {
3497 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
3498 EXPECT_CALL(host_, OnDemuxerError(PIPELINE_ERROR_DECODE));
3500 base::TimeDelta seek_time = base::TimeDelta::FromSeconds(50);
3501 demuxer_->StartWaitingForSeek(seek_time);
3504 TEST_F(ChunkDemuxerTest, Remove_AudioVideoText) {
3505 DemuxerStream* text_stream = NULL;
3506 EXPECT_CALL(host_, AddTextStream(_, _))
3507 .WillOnce(SaveArg<0>(&text_stream));
3508 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO | HAS_TEXT));
3510 DemuxerStream* audio_stream = demuxer_->GetStream(DemuxerStream::AUDIO);
3511 DemuxerStream* video_stream = demuxer_->GetStream(DemuxerStream::VIDEO);
3514 MuxedStreamInfo(kAudioTrackNum, "0K 20K 40K 60K 80K 100K 120K 140K"),
3515 MuxedStreamInfo(kVideoTrackNum, "0K 30 60 90 120K 150 180"),
3516 MuxedStreamInfo(kTextTrackNum, "0K 100K 200K"));
3518 CheckExpectedBuffers(audio_stream, "0 20 40 60 80 100 120 140");
3519 CheckExpectedBuffers(video_stream, "0 30 60 90 120 150 180");
3520 CheckExpectedBuffers(text_stream, "0 100 200");
3522 // Remove the buffers that were added.
3523 demuxer_->Remove(kSourceId, base::TimeDelta(),
3524 base::TimeDelta::FromMilliseconds(300));
3526 // Verify that all the appended data has been removed.
3527 CheckExpectedRanges(kSourceId, "{ }");
3529 // Append new buffers that are clearly different than the original
3530 // ones and verify that only the new buffers are returned.
3532 MuxedStreamInfo(kAudioTrackNum, "1K 21K 41K 61K 81K 101K 121K 141K"),
3533 MuxedStreamInfo(kVideoTrackNum, "1K 31 61 91 121K 151 181"),
3534 MuxedStreamInfo(kTextTrackNum, "1K 101K 201K"));
3536 Seek(base::TimeDelta());
3537 CheckExpectedBuffers(audio_stream, "1 21 41 61 81 101 121 141");
3538 CheckExpectedBuffers(video_stream, "1 31 61 91 121 151 181");
3539 CheckExpectedBuffers(text_stream, "1 101 201");
3542 TEST_F(ChunkDemuxerTest, Remove_StartAtDuration) {
3543 ASSERT_TRUE(InitDemuxer(HAS_AUDIO));
3544 DemuxerStream* audio_stream = demuxer_->GetStream(DemuxerStream::AUDIO);
3546 // Set the duration to something small so that the append that
3547 // follows updates the duration to reflect the end of the appended data.
3548 EXPECT_CALL(host_, SetDuration(
3549 base::TimeDelta::FromMilliseconds(1)));
3550 demuxer_->SetDuration(0.001);
3552 EXPECT_CALL(host_, SetDuration(
3553 base::TimeDelta::FromMilliseconds(160)));
3554 AppendSingleStreamCluster(kSourceId, kAudioTrackNum,
3555 "0K 20K 40K 60K 80K 100K 120K 140K");
3557 CheckExpectedRanges(kSourceId, "{ [0,160) }");
3558 CheckExpectedBuffers(audio_stream, "0 20 40 60 80 100 120 140");
3560 demuxer_->Remove(kSourceId,
3561 base::TimeDelta::FromSecondsD(demuxer_->GetDuration()),
3562 kInfiniteDuration());
3564 Seek(base::TimeDelta());
3565 CheckExpectedRanges(kSourceId, "{ [0,160) }");
3566 CheckExpectedBuffers(audio_stream, "0 20 40 60 80 100 120 140");
3569 // Verifies that a Seek() will complete without text cues for
3570 // the seek point and will return cues after the seek position
3571 // when they are eventually appended.
3572 TEST_F(ChunkDemuxerTest, SeekCompletesWithoutTextCues) {
3573 DemuxerStream* text_stream = NULL;
3574 EXPECT_CALL(host_, AddTextStream(_, _))
3575 .WillOnce(SaveArg<0>(&text_stream));
3576 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO | HAS_TEXT));
3578 DemuxerStream* audio_stream = demuxer_->GetStream(DemuxerStream::AUDIO);
3579 DemuxerStream* video_stream = demuxer_->GetStream(DemuxerStream::VIDEO);
3581 base::TimeDelta seek_time = base::TimeDelta::FromMilliseconds(120);
3582 bool seek_cb_was_called = false;
3583 demuxer_->StartWaitingForSeek(seek_time);
3584 demuxer_->Seek(seek_time,
3585 base::Bind(OnSeekDone_OKExpected, &seek_cb_was_called));
3586 message_loop_.RunUntilIdle();
3588 EXPECT_FALSE(seek_cb_was_called);
3590 bool text_read_done = false;
3591 text_stream->Read(base::Bind(&OnReadDone,
3592 base::TimeDelta::FromMilliseconds(225),
3595 // Append audio & video data so the seek completes.
3597 MuxedStreamInfo(kAudioTrackNum,
3598 "0K 20K 40K 60K 80K 100K 120K 140K 160K 180K 200K"),
3599 MuxedStreamInfo(kVideoTrackNum, "0K 30 60 90 120K 150 180 210"));
3601 message_loop_.RunUntilIdle();
3602 EXPECT_TRUE(seek_cb_was_called);
3603 EXPECT_FALSE(text_read_done);
3605 // Read some audio & video buffers to further verify seek completion.
3606 CheckExpectedBuffers(audio_stream, "120 140");
3607 CheckExpectedBuffers(video_stream, "120 150");
3609 EXPECT_FALSE(text_read_done);
3611 // Append text cues that start after the seek point and verify that
3612 // they are returned by Read() calls.
3614 MuxedStreamInfo(kAudioTrackNum, "220K 240K 260K 280K"),
3615 MuxedStreamInfo(kVideoTrackNum, "240K 270 300 330"),
3616 MuxedStreamInfo(kTextTrackNum, "225K 275K 325K"));
3618 message_loop_.RunUntilIdle();
3619 EXPECT_TRUE(text_read_done);
3621 // NOTE: we start at 275 here because the buffer at 225 was returned
3622 // to the pending read initiated above.
3623 CheckExpectedBuffers(text_stream, "275 325");
3625 // Verify that audio & video streams continue to return expected values.
3626 CheckExpectedBuffers(audio_stream, "160 180");
3627 CheckExpectedBuffers(video_stream, "180 210");
3630 TEST_F(ChunkDemuxerTest, ClusterWithUnknownSize) {
3631 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
3633 AppendCluster(GenerateCluster(0, 0, 4, true));
3634 CheckExpectedRanges(kSourceId, "{ [0,46) }");
3636 // A new cluster indicates end of the previous cluster with unknown size.
3637 AppendCluster(GenerateCluster(46, 66, 5, true));
3638 CheckExpectedRanges(kSourceId, "{ [0,115) }");
3641 TEST_F(ChunkDemuxerTest, CuesBetweenClustersWithUnknownSize) {
3642 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
3644 // Add two clusters separated by Cues in a single Append() call.
3645 scoped_ptr<Cluster> cluster = GenerateCluster(0, 0, 4, true);
3646 std::vector<uint8> data(cluster->data(), cluster->data() + cluster->size());
3647 data.insert(data.end(), kCuesHeader, kCuesHeader + sizeof(kCuesHeader));
3648 cluster = GenerateCluster(46, 66, 5, true);
3649 data.insert(data.end(), cluster->data(), cluster->data() + cluster->size());
3650 AppendData(&*data.begin(), data.size());
3652 CheckExpectedRanges(kSourceId, "{ [0,115) }");
3655 TEST_F(ChunkDemuxerTest, CuesBetweenClusters) {
3656 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
3658 AppendCluster(GenerateCluster(0, 0, 4));
3659 AppendData(kCuesHeader, sizeof(kCuesHeader));
3660 AppendCluster(GenerateCluster(46, 66, 5));
3661 CheckExpectedRanges(kSourceId, "{ [0,115) }");
3664 } // namespace media