6271323cbb33c4a4302bf652c33fc3aad11367bf
[platform/framework/web/crosswalk.git] / src / media / filters / chunk_demuxer_unittest.cc
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include <algorithm>
6
7 #include "base/bind.h"
8 #include "base/message_loop/message_loop.h"
9 #include "base/strings/string_number_conversions.h"
10 #include "base/strings/string_split.h"
11 #include "base/strings/string_util.h"
12 #include "media/base/audio_decoder_config.h"
13 #include "media/base/decoder_buffer.h"
14 #include "media/base/decrypt_config.h"
15 #include "media/base/mock_demuxer_host.h"
16 #include "media/base/test_data_util.h"
17 #include "media/base/test_helpers.h"
18 #include "media/filters/chunk_demuxer.h"
19 #include "media/formats/webm/cluster_builder.h"
20 #include "media/formats/webm/webm_constants.h"
21 #include "media/formats/webm/webm_crypto_helpers.h"
22 #include "testing/gtest/include/gtest/gtest.h"
23
24 using ::testing::AnyNumber;
25 using ::testing::Exactly;
26 using ::testing::InSequence;
27 using ::testing::NotNull;
28 using ::testing::Return;
29 using ::testing::SaveArg;
30 using ::testing::SetArgumentPointee;
31 using ::testing::_;
32
33 namespace media {
34
35 const uint8 kTracksHeader[] = {
36   0x16, 0x54, 0xAE, 0x6B,  // Tracks ID
37   0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,  // tracks(size = 0)
38 };
39
40 // WebM Block bytes that represent a VP8 keyframe.
41 const uint8 kVP8Keyframe[] = {
42   0x010, 0x00, 0x00, 0x9d, 0x01, 0x2a, 0x00, 0x10, 0x00, 0x10, 0x00
43 };
44
45 // WebM Block bytes that represent a VP8 interframe.
46 const uint8 kVP8Interframe[] = { 0x11, 0x00, 0x00 };
47
48 static const uint8 kCuesHeader[] = {
49   0x1C, 0x53, 0xBB, 0x6B,  // Cues ID
50   0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,  // cues(size = 0)
51 };
52
53 const int kTracksHeaderSize = sizeof(kTracksHeader);
54 const int kTracksSizeOffset = 4;
55
56 // The size of TrackEntry element in test file "webm_vorbis_track_entry" starts
57 // at index 1 and spans 8 bytes.
58 const int kAudioTrackSizeOffset = 1;
59 const int kAudioTrackSizeWidth = 8;
60 const int kAudioTrackEntryHeaderSize =
61     kAudioTrackSizeOffset + kAudioTrackSizeWidth;
62
63 // The size of TrackEntry element in test file "webm_vp8_track_entry" starts at
64 // index 1 and spans 8 bytes.
65 const int kVideoTrackSizeOffset = 1;
66 const int kVideoTrackSizeWidth = 8;
67 const int kVideoTrackEntryHeaderSize =
68     kVideoTrackSizeOffset + kVideoTrackSizeWidth;
69
70 const int kVideoTrackNum = 1;
71 const int kAudioTrackNum = 2;
72 const int kTextTrackNum = 3;
73 const int kAlternateTextTrackNum = 4;
74
75 const int kAudioBlockDuration = 23;
76 const int kVideoBlockDuration = 33;
77 const int kTextBlockDuration = 100;
78 const int kBlockSize = 10;
79
80 const char kSourceId[] = "SourceId";
81 const char kDefaultFirstClusterRange[] = "{ [0,46) }";
82 const int kDefaultFirstClusterEndTimestamp = 66;
83 const int kDefaultSecondClusterEndTimestamp = 132;
84
85 base::TimeDelta kDefaultDuration() {
86   return base::TimeDelta::FromMilliseconds(201224);
87 }
88
89 // Write an integer into buffer in the form of vint that spans 8 bytes.
90 // The data pointed by |buffer| should be at least 8 bytes long.
91 // |number| should be in the range 0 <= number < 0x00FFFFFFFFFFFFFF.
92 static void WriteInt64(uint8* buffer, int64 number) {
93   DCHECK(number >= 0 && number < 0x00FFFFFFFFFFFFFFLL);
94   buffer[0] = 0x01;
95   int64 tmp = number;
96   for (int i = 7; i > 0; i--) {
97     buffer[i] = tmp & 0xff;
98     tmp >>= 8;
99   }
100 }
101
102 MATCHER_P(HasTimestamp, timestamp_in_ms, "") {
103   return arg.get() && !arg->end_of_stream() &&
104          arg->timestamp().InMilliseconds() == timestamp_in_ms;
105 }
106
107 MATCHER(IsEndOfStream, "") { return arg.get() && arg->end_of_stream(); }
108
109 static void OnReadDone(const base::TimeDelta& expected_time,
110                        bool* called,
111                        DemuxerStream::Status status,
112                        const scoped_refptr<DecoderBuffer>& buffer) {
113   EXPECT_EQ(status, DemuxerStream::kOk);
114   EXPECT_EQ(expected_time, buffer->timestamp());
115   *called = true;
116 }
117
118 static void OnReadDone_AbortExpected(
119     bool* called, DemuxerStream::Status status,
120     const scoped_refptr<DecoderBuffer>& buffer) {
121   EXPECT_EQ(status, DemuxerStream::kAborted);
122   EXPECT_EQ(NULL, buffer.get());
123   *called = true;
124 }
125
126 static void OnReadDone_EOSExpected(bool* called,
127                                    DemuxerStream::Status status,
128                                    const scoped_refptr<DecoderBuffer>& buffer) {
129   EXPECT_EQ(status, DemuxerStream::kOk);
130   EXPECT_TRUE(buffer->end_of_stream());
131   *called = true;
132 }
133
134 static void OnSeekDone_OKExpected(bool* called, PipelineStatus status) {
135   EXPECT_EQ(status, PIPELINE_OK);
136   *called = true;
137 }
138
139 static void LogFunc(const std::string& str) { DVLOG(1) << str; }
140
141 class ChunkDemuxerTest : public ::testing::Test {
142  protected:
143   enum CodecsIndex {
144     AUDIO,
145     VIDEO,
146     MAX_CODECS_INDEX
147   };
148
149   // Default cluster to append first for simple tests.
150   scoped_ptr<Cluster> kDefaultFirstCluster() {
151     return GenerateCluster(0, 4);
152   }
153
154   // Default cluster to append after kDefaultFirstCluster()
155   // has been appended. This cluster starts with blocks that
156   // have timestamps consistent with the end times of the blocks
157   // in kDefaultFirstCluster() so that these two clusters represent
158   // a continuous region.
159   scoped_ptr<Cluster> kDefaultSecondCluster() {
160     return GenerateCluster(46, 66, 5);
161   }
162
163   ChunkDemuxerTest()
164       : append_window_end_for_next_append_(kInfiniteDuration()) {
165     init_segment_received_cb_ =
166         base::Bind(&ChunkDemuxerTest::InitSegmentReceived,
167                    base::Unretained(this));
168     CreateNewDemuxer();
169   }
170
171   void CreateNewDemuxer() {
172     base::Closure open_cb =
173         base::Bind(&ChunkDemuxerTest::DemuxerOpened, base::Unretained(this));
174     Demuxer::NeedKeyCB need_key_cb =
175         base::Bind(&ChunkDemuxerTest::DemuxerNeedKey, base::Unretained(this));
176     demuxer_.reset(
177         new ChunkDemuxer(open_cb, need_key_cb, base::Bind(&LogFunc), true));
178   }
179
180   virtual ~ChunkDemuxerTest() {
181     ShutdownDemuxer();
182   }
183
184   void CreateInitSegment(int stream_flags,
185                          bool is_audio_encrypted,
186                          bool is_video_encrypted,
187                          scoped_ptr<uint8[]>* buffer,
188                          int* size) {
189     CreateInitSegmentInternal(
190         stream_flags, is_audio_encrypted, is_video_encrypted, buffer, false,
191         size);
192   }
193
194   void CreateInitSegmentWithAlternateTextTrackNum(int stream_flags,
195                                                   bool is_audio_encrypted,
196                                                   bool is_video_encrypted,
197                                                   scoped_ptr<uint8[]>* buffer,
198                                                   int* size) {
199     DCHECK(stream_flags & HAS_TEXT);
200     CreateInitSegmentInternal(
201         stream_flags, is_audio_encrypted, is_video_encrypted, buffer, true,
202         size);
203   }
204
205   void CreateInitSegmentInternal(int stream_flags,
206                                  bool is_audio_encrypted,
207                                  bool is_video_encrypted,
208                                  scoped_ptr<uint8[]>* buffer,
209                                  bool use_alternate_text_track_id,
210                                  int* size) {
211     bool has_audio = (stream_flags & HAS_AUDIO) != 0;
212     bool has_video = (stream_flags & HAS_VIDEO) != 0;
213     bool has_text = (stream_flags & HAS_TEXT) != 0;
214     scoped_refptr<DecoderBuffer> ebml_header;
215     scoped_refptr<DecoderBuffer> info;
216     scoped_refptr<DecoderBuffer> audio_track_entry;
217     scoped_refptr<DecoderBuffer> video_track_entry;
218     scoped_refptr<DecoderBuffer> audio_content_encodings;
219     scoped_refptr<DecoderBuffer> video_content_encodings;
220     scoped_refptr<DecoderBuffer> text_track_entry;
221
222     ebml_header = ReadTestDataFile("webm_ebml_element");
223
224     info = ReadTestDataFile("webm_info_element");
225
226     int tracks_element_size = 0;
227
228     if (has_audio) {
229       audio_track_entry = ReadTestDataFile("webm_vorbis_track_entry");
230       tracks_element_size += audio_track_entry->data_size();
231       if (is_audio_encrypted) {
232         audio_content_encodings = ReadTestDataFile("webm_content_encodings");
233         tracks_element_size += audio_content_encodings->data_size();
234       }
235     }
236
237     if (has_video) {
238       video_track_entry = ReadTestDataFile("webm_vp8_track_entry");
239       tracks_element_size += video_track_entry->data_size();
240       if (is_video_encrypted) {
241         video_content_encodings = ReadTestDataFile("webm_content_encodings");
242         tracks_element_size += video_content_encodings->data_size();
243       }
244     }
245
246     if (has_text) {
247       // TODO(matthewjheaney): create an abstraction to do
248       // this (http://crbug/321454).
249       // We need it to also handle the creation of multiple text tracks.
250       //
251       // This is the track entry for a text track,
252       // TrackEntry [AE], size=30
253       //   TrackNum [D7], size=1, val=3 (or 4 if use_alternate_text_track_id)
254       //   TrackUID [73] [C5], size=1, value=3 (must remain constant for same
255       //     track, even if TrackNum changes)
256       //   TrackType [83], size=1, val=0x11
257       //   CodecId [86], size=18, val="D_WEBVTT/SUBTITLES"
258       char str[] = "\xAE\x9E\xD7\x81\x03\x73\xC5\x81\x03"
259                    "\x83\x81\x11\x86\x92"
260                    "D_WEBVTT/SUBTITLES";
261       DCHECK_EQ(str[4], kTextTrackNum);
262       if (use_alternate_text_track_id)
263         str[4] = kAlternateTextTrackNum;
264
265       const int len = strlen(str);
266       DCHECK_EQ(len, 32);
267       const uint8* const buf = reinterpret_cast<const uint8*>(str);
268       text_track_entry = DecoderBuffer::CopyFrom(buf, len);
269       tracks_element_size += text_track_entry->data_size();
270     }
271
272     *size = ebml_header->data_size() + info->data_size() +
273         kTracksHeaderSize + tracks_element_size;
274
275     buffer->reset(new uint8[*size]);
276
277     uint8* buf = buffer->get();
278     memcpy(buf, ebml_header->data(), ebml_header->data_size());
279     buf += ebml_header->data_size();
280
281     memcpy(buf, info->data(), info->data_size());
282     buf += info->data_size();
283
284     memcpy(buf, kTracksHeader, kTracksHeaderSize);
285     WriteInt64(buf + kTracksSizeOffset, tracks_element_size);
286     buf += kTracksHeaderSize;
287
288     // TODO(xhwang): Simplify this! Probably have test data files that contain
289     // ContentEncodings directly instead of trying to create one at run-time.
290     if (has_audio) {
291       memcpy(buf, audio_track_entry->data(),
292              audio_track_entry->data_size());
293       if (is_audio_encrypted) {
294         memcpy(buf + audio_track_entry->data_size(),
295                audio_content_encodings->data(),
296                audio_content_encodings->data_size());
297         WriteInt64(buf + kAudioTrackSizeOffset,
298                    audio_track_entry->data_size() +
299                    audio_content_encodings->data_size() -
300                    kAudioTrackEntryHeaderSize);
301         buf += audio_content_encodings->data_size();
302       }
303       buf += audio_track_entry->data_size();
304     }
305
306     if (has_video) {
307       memcpy(buf, video_track_entry->data(),
308              video_track_entry->data_size());
309       if (is_video_encrypted) {
310         memcpy(buf + video_track_entry->data_size(),
311                video_content_encodings->data(),
312                video_content_encodings->data_size());
313         WriteInt64(buf + kVideoTrackSizeOffset,
314                    video_track_entry->data_size() +
315                    video_content_encodings->data_size() -
316                    kVideoTrackEntryHeaderSize);
317         buf += video_content_encodings->data_size();
318       }
319       buf += video_track_entry->data_size();
320     }
321
322     if (has_text) {
323       memcpy(buf, text_track_entry->data(),
324              text_track_entry->data_size());
325       buf += text_track_entry->data_size();
326     }
327   }
328
329   ChunkDemuxer::Status AddId() {
330     return AddId(kSourceId, HAS_AUDIO | HAS_VIDEO);
331   }
332
333   ChunkDemuxer::Status AddId(const std::string& source_id, int stream_flags) {
334     bool has_audio = (stream_flags & HAS_AUDIO) != 0;
335     bool has_video = (stream_flags & HAS_VIDEO) != 0;
336     std::vector<std::string> codecs;
337     std::string type;
338
339     if (has_audio) {
340       codecs.push_back("vorbis");
341       type = "audio/webm";
342     }
343
344     if (has_video) {
345       codecs.push_back("vp8");
346       type = "video/webm";
347     }
348
349     if (!has_audio && !has_video) {
350       return AddId(kSourceId, HAS_AUDIO | HAS_VIDEO);
351     }
352
353     return demuxer_->AddId(source_id, type, codecs);
354   }
355
356   ChunkDemuxer::Status AddIdForMp2tSource(const std::string& source_id) {
357     std::vector<std::string> codecs;
358     std::string type = "video/mp2t";
359     codecs.push_back("mp4a.40.2");
360     codecs.push_back("avc1.640028");
361     return demuxer_->AddId(source_id, type, codecs);
362   }
363
364   void AppendData(const uint8* data, size_t length) {
365     AppendData(kSourceId, data, length);
366   }
367
368   void AppendCluster(const std::string& source_id,
369                      scoped_ptr<Cluster> cluster) {
370     AppendData(source_id, cluster->data(), cluster->size());
371   }
372
373   void AppendCluster(scoped_ptr<Cluster> cluster) {
374     AppendCluster(kSourceId, cluster.Pass());
375   }
376
377   void AppendCluster(int timecode, int block_count) {
378     AppendCluster(GenerateCluster(timecode, block_count));
379   }
380
381   void AppendSingleStreamCluster(const std::string& source_id, int track_number,
382                                  int timecode, int block_count) {
383     int block_duration = 0;
384     switch (track_number) {
385       case kVideoTrackNum:
386         block_duration = kVideoBlockDuration;
387         break;
388       case kAudioTrackNum:
389         block_duration = kAudioBlockDuration;
390         break;
391       case kTextTrackNum:  // Fall-through.
392       case kAlternateTextTrackNum:
393         block_duration = kTextBlockDuration;
394         break;
395     }
396     ASSERT_NE(block_duration, 0);
397     int end_timecode = timecode + block_count * block_duration;
398     AppendCluster(source_id,
399                   GenerateSingleStreamCluster(
400                       timecode, end_timecode, track_number, block_duration));
401   }
402
403   struct BlockInfo {
404     BlockInfo()
405         : track_number(0),
406           timestamp_in_ms(0),
407           flags(0),
408           duration(0) {
409     }
410
411     BlockInfo(int tn, int ts, int f, int d)
412         : track_number(tn),
413           timestamp_in_ms(ts),
414           flags(f),
415           duration(d) {
416     }
417
418     int track_number;
419     int timestamp_in_ms;
420     int flags;
421     int duration;
422
423     bool operator< (const BlockInfo& rhs) const {
424       return timestamp_in_ms < rhs.timestamp_in_ms;
425     }
426   };
427
428   // |track_number| - The track number to place in
429   // |block_descriptions| - A space delimited string of block info that
430   //  is used to populate |blocks|. Each block info has a timestamp in
431   //  milliseconds and optionally followed by a 'K' to indicate that a block
432   //  should be marked as a keyframe. For example "0K 30 60" should populate
433   //  |blocks| with 3 BlockInfo objects: a keyframe with timestamp 0 and 2
434   //  non-keyframes at 30ms and 60ms.
435   void ParseBlockDescriptions(int track_number,
436                               const std::string block_descriptions,
437                               std::vector<BlockInfo>* blocks) {
438     std::vector<std::string> timestamps;
439     base::SplitString(block_descriptions, ' ', &timestamps);
440
441     for (size_t i = 0; i < timestamps.size(); ++i) {
442       std::string timestamp_str = timestamps[i];
443       BlockInfo block_info;
444       block_info.track_number = track_number;
445       block_info.flags = 0;
446       block_info.duration = 0;
447
448       if (EndsWith(timestamp_str, "K", true)) {
449         block_info.flags = kWebMFlagKeyframe;
450         // Remove the "K" off of the token.
451         timestamp_str = timestamp_str.substr(0, timestamps[i].length() - 1);
452       }
453       CHECK(base::StringToInt(timestamp_str, &block_info.timestamp_in_ms));
454
455       if (track_number == kTextTrackNum ||
456           track_number == kAlternateTextTrackNum) {
457         block_info.duration = kTextBlockDuration;
458         ASSERT_EQ(kWebMFlagKeyframe, block_info.flags)
459             << "Text block with timestamp " << block_info.timestamp_in_ms
460             << " was not marked as a keyframe."
461             << " All text blocks must be keyframes";
462       }
463
464       if (track_number == kAudioTrackNum)
465         ASSERT_TRUE(block_info.flags & kWebMFlagKeyframe);
466
467       blocks->push_back(block_info);
468     }
469   }
470
471   scoped_ptr<Cluster> GenerateCluster(const std::vector<BlockInfo>& blocks,
472                                       bool unknown_size) {
473     DCHECK_GT(blocks.size(), 0u);
474     ClusterBuilder cb;
475
476     std::vector<uint8> data(10);
477     for (size_t i = 0; i < blocks.size(); ++i) {
478       if (i == 0)
479         cb.SetClusterTimecode(blocks[i].timestamp_in_ms);
480
481       if (blocks[i].duration) {
482         if (blocks[i].track_number == kVideoTrackNum) {
483           AddVideoBlockGroup(&cb,
484                              blocks[i].track_number, blocks[i].timestamp_in_ms,
485                              blocks[i].duration, blocks[i].flags);
486         } else {
487           cb.AddBlockGroup(blocks[i].track_number, blocks[i].timestamp_in_ms,
488                            blocks[i].duration, blocks[i].flags,
489                            &data[0], data.size());
490         }
491       } else {
492         cb.AddSimpleBlock(blocks[i].track_number, blocks[i].timestamp_in_ms,
493                           blocks[i].flags,
494                           &data[0], data.size());
495       }
496     }
497
498     return unknown_size ? cb.FinishWithUnknownSize() : cb.Finish();
499   }
500
501   scoped_ptr<Cluster> GenerateCluster(
502       std::priority_queue<BlockInfo> block_queue,
503       bool unknown_size) {
504     std::vector<BlockInfo> blocks(block_queue.size());
505     for (size_t i = block_queue.size() - 1; !block_queue.empty(); --i) {
506       blocks[i] = block_queue.top();
507       block_queue.pop();
508     }
509
510     return GenerateCluster(blocks, unknown_size);
511   }
512
513   // |block_descriptions| - The block descriptions used to construct the
514   // cluster. See the documentation for ParseBlockDescriptions() for details on
515   // the string format.
516   void AppendSingleStreamCluster(const std::string& source_id, int track_number,
517                                  const std::string& block_descriptions) {
518     std::vector<BlockInfo> blocks;
519     ParseBlockDescriptions(track_number, block_descriptions, &blocks);
520     AppendCluster(source_id, GenerateCluster(blocks, false));
521   }
522
523   struct MuxedStreamInfo {
524     MuxedStreamInfo()
525         : track_number(0),
526           block_descriptions("")
527     {}
528
529     MuxedStreamInfo(int track_num, const char* block_desc)
530         : track_number(track_num),
531           block_descriptions(block_desc) {
532     }
533
534     int track_number;
535     // The block description passed to ParseBlockDescriptions().
536     // See the documentation for that method for details on the string format.
537     const char* block_descriptions;
538   };
539
540   void AppendMuxedCluster(const MuxedStreamInfo& msi_1,
541                           const MuxedStreamInfo& msi_2) {
542     std::vector<MuxedStreamInfo> msi(2);
543     msi[0] = msi_1;
544     msi[1] = msi_2;
545     AppendMuxedCluster(msi);
546   }
547
548   void AppendMuxedCluster(const MuxedStreamInfo& msi_1,
549                           const MuxedStreamInfo& msi_2,
550                           const MuxedStreamInfo& msi_3) {
551     std::vector<MuxedStreamInfo> msi(3);
552     msi[0] = msi_1;
553     msi[1] = msi_2;
554     msi[2] = msi_3;
555     AppendMuxedCluster(msi);
556   }
557
558   void AppendMuxedCluster(const std::vector<MuxedStreamInfo> msi) {
559     std::priority_queue<BlockInfo> block_queue;
560     for (size_t i = 0; i < msi.size(); ++i) {
561       std::vector<BlockInfo> track_blocks;
562       ParseBlockDescriptions(msi[i].track_number, msi[i].block_descriptions,
563                              &track_blocks);
564
565       for (size_t j = 0; j < track_blocks.size(); ++j)
566         block_queue.push(track_blocks[j]);
567     }
568
569     AppendCluster(kSourceId, GenerateCluster(block_queue, false));
570   }
571
572   void AppendData(const std::string& source_id,
573                   const uint8* data, size_t length) {
574     EXPECT_CALL(host_, AddBufferedTimeRange(_, _)).Times(AnyNumber());
575
576     demuxer_->AppendData(source_id, data, length,
577                          append_window_start_for_next_append_,
578                          append_window_end_for_next_append_,
579                          &timestamp_offset_map_[source_id],
580                          init_segment_received_cb_);
581   }
582
583   void AppendDataInPieces(const uint8* data, size_t length) {
584     AppendDataInPieces(data, length, 7);
585   }
586
587   void AppendDataInPieces(const uint8* data, size_t length, size_t piece_size) {
588     const uint8* start = data;
589     const uint8* end = data + length;
590     while (start < end) {
591       size_t append_size = std::min(piece_size,
592                                     static_cast<size_t>(end - start));
593       AppendData(start, append_size);
594       start += append_size;
595     }
596   }
597
598   void AppendInitSegment(int stream_flags) {
599     AppendInitSegmentWithSourceId(kSourceId, stream_flags);
600   }
601
602   void AppendInitSegmentWithSourceId(const std::string& source_id,
603                                      int stream_flags) {
604     AppendInitSegmentWithEncryptedInfo(source_id, stream_flags, false, false);
605   }
606
607   void AppendInitSegmentWithEncryptedInfo(const std::string& source_id,
608                                           int stream_flags,
609                                           bool is_audio_encrypted,
610                                           bool is_video_encrypted) {
611     scoped_ptr<uint8[]> info_tracks;
612     int info_tracks_size = 0;
613     CreateInitSegment(stream_flags,
614                       is_audio_encrypted, is_video_encrypted,
615                       &info_tracks, &info_tracks_size);
616     AppendData(source_id, info_tracks.get(), info_tracks_size);
617   }
618
619   void AppendGarbage() {
620     // Fill up an array with gibberish.
621     int garbage_cluster_size = 10;
622     scoped_ptr<uint8[]> garbage_cluster(new uint8[garbage_cluster_size]);
623     for (int i = 0; i < garbage_cluster_size; ++i)
624       garbage_cluster[i] = i;
625     AppendData(garbage_cluster.get(), garbage_cluster_size);
626   }
627
628   void InitDoneCalled(PipelineStatus expected_status,
629                       PipelineStatus status) {
630     EXPECT_EQ(status, expected_status);
631   }
632
633   void AppendEmptyCluster(int timecode) {
634     AppendCluster(GenerateEmptyCluster(timecode));
635   }
636
637   PipelineStatusCB CreateInitDoneCB(const base::TimeDelta& expected_duration,
638                                     PipelineStatus expected_status) {
639     if (expected_duration != kNoTimestamp())
640       EXPECT_CALL(host_, SetDuration(expected_duration));
641     return CreateInitDoneCB(expected_status);
642   }
643
644   PipelineStatusCB CreateInitDoneCB(PipelineStatus expected_status) {
645     return base::Bind(&ChunkDemuxerTest::InitDoneCalled,
646                       base::Unretained(this),
647                       expected_status);
648   }
649
650   enum StreamFlags {
651     HAS_AUDIO = 1 << 0,
652     HAS_VIDEO = 1 << 1,
653     HAS_TEXT = 1 << 2
654   };
655
656   bool InitDemuxer(int stream_flags) {
657     return InitDemuxerWithEncryptionInfo(stream_flags, false, false);
658   }
659
660   bool InitDemuxerWithEncryptionInfo(
661       int stream_flags, bool is_audio_encrypted, bool is_video_encrypted) {
662
663     PipelineStatus expected_status =
664         (stream_flags != 0) ? PIPELINE_OK : DEMUXER_ERROR_COULD_NOT_OPEN;
665
666     base::TimeDelta expected_duration = kNoTimestamp();
667     if (expected_status == PIPELINE_OK)
668       expected_duration = kDefaultDuration();
669
670     EXPECT_CALL(*this, DemuxerOpened());
671
672     // Adding expectation prior to CreateInitDoneCB() here because InSequence
673     // tests require init segment received before duration set. Also, only
674     // expect an init segment received callback if there is actually a track in
675     // it.
676     if (stream_flags != 0)
677       EXPECT_CALL(*this, InitSegmentReceived());
678
679     demuxer_->Initialize(
680         &host_, CreateInitDoneCB(expected_duration, expected_status), true);
681
682     if (AddId(kSourceId, stream_flags) != ChunkDemuxer::kOk)
683       return false;
684
685     AppendInitSegmentWithEncryptedInfo(
686         kSourceId, stream_flags,
687         is_audio_encrypted, is_video_encrypted);
688     return true;
689   }
690
691   bool InitDemuxerAudioAndVideoSourcesText(const std::string& audio_id,
692                                            const std::string& video_id,
693                                            bool has_text) {
694     EXPECT_CALL(*this, DemuxerOpened());
695     demuxer_->Initialize(
696         &host_, CreateInitDoneCB(kDefaultDuration(), PIPELINE_OK), true);
697
698     if (AddId(audio_id, HAS_AUDIO) != ChunkDemuxer::kOk)
699       return false;
700     if (AddId(video_id, HAS_VIDEO) != ChunkDemuxer::kOk)
701       return false;
702
703     int audio_flags = HAS_AUDIO;
704     int video_flags = HAS_VIDEO;
705
706     if (has_text) {
707       audio_flags |= HAS_TEXT;
708       video_flags |= HAS_TEXT;
709     }
710
711     EXPECT_CALL(*this, InitSegmentReceived());
712     AppendInitSegmentWithSourceId(audio_id, audio_flags);
713     EXPECT_CALL(*this, InitSegmentReceived());
714     AppendInitSegmentWithSourceId(video_id, video_flags);
715     return true;
716   }
717
718   bool InitDemuxerAudioAndVideoSources(const std::string& audio_id,
719                                        const std::string& video_id) {
720     return InitDemuxerAudioAndVideoSourcesText(audio_id, video_id, false);
721   }
722
723   // Initializes the demuxer with data from 2 files with different
724   // decoder configurations. This is used to test the decoder config change
725   // logic.
726   //
727   // bear-320x240.webm VideoDecoderConfig returns 320x240 for its natural_size()
728   // bear-640x360.webm VideoDecoderConfig returns 640x360 for its natural_size()
729   // The resulting video stream returns data from each file for the following
730   // time ranges.
731   // bear-320x240.webm : [0-501)       [801-2736)
732   // bear-640x360.webm :       [527-793)
733   //
734   // bear-320x240.webm AudioDecoderConfig returns 3863 for its extra_data_size()
735   // bear-640x360.webm AudioDecoderConfig returns 3935 for its extra_data_size()
736   // The resulting audio stream returns data from each file for the following
737   // time ranges.
738   // bear-320x240.webm : [0-524)       [779-2736)
739   // bear-640x360.webm :       [527-759)
740   bool InitDemuxerWithConfigChangeData() {
741     scoped_refptr<DecoderBuffer> bear1 = ReadTestDataFile("bear-320x240.webm");
742     scoped_refptr<DecoderBuffer> bear2 = ReadTestDataFile("bear-640x360.webm");
743
744     EXPECT_CALL(*this, DemuxerOpened());
745
746     // Adding expectation prior to CreateInitDoneCB() here because InSequence
747     // tests require init segment received before duration set.
748     EXPECT_CALL(*this, InitSegmentReceived());
749     demuxer_->Initialize(
750         &host_, CreateInitDoneCB(base::TimeDelta::FromMilliseconds(2744),
751                                  PIPELINE_OK), true);
752
753     if (AddId(kSourceId, HAS_AUDIO | HAS_VIDEO) != ChunkDemuxer::kOk)
754       return false;
755
756     // Append the whole bear1 file.
757     // TODO(wolenetz/acolwell): Remove this extra SetDuration expectation once
758     // the files are fixed to have the correct duration in their init segments,
759     // and the CreateInitDoneCB() call, above, is fixed to used that duration.
760     // See http://crbug.com/354284.
761     EXPECT_CALL(host_, SetDuration(base::TimeDelta::FromMilliseconds(2746)));
762     AppendData(bear1->data(), bear1->data_size());
763     // Last audio frame has timestamp 2721 and duration 24 (estimated from max
764     // seen so far for audio track).
765     // Last video frame has timestamp 2703 and duration 33 (from TrackEntry
766     // DefaultDuration for video track).
767     CheckExpectedRanges(kSourceId, "{ [0,2736) }");
768
769     // Append initialization segment for bear2.
770     // Note: Offsets here and below are derived from
771     // media/test/data/bear-640x360-manifest.js and
772     // media/test/data/bear-320x240-manifest.js which were
773     // generated from media/test/data/bear-640x360.webm and
774     // media/test/data/bear-320x240.webm respectively.
775     EXPECT_CALL(*this, InitSegmentReceived());
776     AppendData(bear2->data(), 4340);
777
778     // Append a media segment that goes from [0.527000, 1.014000).
779     AppendData(bear2->data() + 55290, 18785);
780     CheckExpectedRanges(kSourceId, "{ [0,1027) [1201,2736) }");
781
782     // Append initialization segment for bear1 & fill gap with [779-1197)
783     // segment.
784     EXPECT_CALL(*this, InitSegmentReceived());
785     AppendData(bear1->data(), 4370);
786     AppendData(bear1->data() + 72737, 28183);
787     CheckExpectedRanges(kSourceId, "{ [0,2736) }");
788
789     MarkEndOfStream(PIPELINE_OK);
790     return true;
791   }
792
793   void ShutdownDemuxer() {
794     if (demuxer_) {
795       demuxer_->Shutdown();
796       message_loop_.RunUntilIdle();
797     }
798   }
799
800   void AddSimpleBlock(ClusterBuilder* cb, int track_num, int64 timecode) {
801     uint8 data[] = { 0x00 };
802     cb->AddSimpleBlock(track_num, timecode, 0, data, sizeof(data));
803   }
804
805   scoped_ptr<Cluster> GenerateCluster(int timecode, int block_count) {
806     return GenerateCluster(timecode, timecode, block_count);
807   }
808
809   void AddVideoBlockGroup(ClusterBuilder* cb, int track_num, int64 timecode,
810                           int duration, int flags) {
811     const uint8* data =
812         (flags & kWebMFlagKeyframe) != 0 ? kVP8Keyframe : kVP8Interframe;
813     int size = (flags & kWebMFlagKeyframe) != 0 ? sizeof(kVP8Keyframe) :
814         sizeof(kVP8Interframe);
815     cb->AddBlockGroup(track_num, timecode, duration, flags, data, size);
816   }
817
818   scoped_ptr<Cluster> GenerateCluster(int first_audio_timecode,
819                                       int first_video_timecode,
820                                       int block_count) {
821     return GenerateCluster(first_audio_timecode, first_video_timecode,
822                            block_count, false);
823   }
824   scoped_ptr<Cluster> GenerateCluster(int first_audio_timecode,
825                                       int first_video_timecode,
826                                       int block_count,
827                                       bool unknown_size) {
828     CHECK_GT(block_count, 0);
829
830     std::priority_queue<BlockInfo> block_queue;
831
832     if (block_count == 1) {
833       block_queue.push(BlockInfo(kAudioTrackNum,
834                                  first_audio_timecode,
835                                  kWebMFlagKeyframe,
836                                  kAudioBlockDuration));
837       return GenerateCluster(block_queue, unknown_size);
838     }
839
840     int audio_timecode = first_audio_timecode;
841     int video_timecode = first_video_timecode;
842
843     // Create simple blocks for everything except the last 2 blocks.
844     // The first video frame must be a keyframe.
845     uint8 video_flag = kWebMFlagKeyframe;
846     for (int i = 0; i < block_count - 2; i++) {
847       if (audio_timecode <= video_timecode) {
848         block_queue.push(BlockInfo(kAudioTrackNum,
849                                    audio_timecode,
850                                    kWebMFlagKeyframe,
851                                    0));
852         audio_timecode += kAudioBlockDuration;
853         continue;
854       }
855
856       block_queue.push(BlockInfo(kVideoTrackNum,
857                                  video_timecode,
858                                  video_flag,
859                                  0));
860       video_timecode += kVideoBlockDuration;
861       video_flag = 0;
862     }
863
864     // Make the last 2 blocks BlockGroups so that they don't get delayed by the
865     // block duration calculation logic.
866     block_queue.push(BlockInfo(kAudioTrackNum,
867                                audio_timecode,
868                                kWebMFlagKeyframe,
869                                kAudioBlockDuration));
870     block_queue.push(BlockInfo(kVideoTrackNum,
871                                video_timecode,
872                                video_flag,
873                                kVideoBlockDuration));
874
875     return GenerateCluster(block_queue, unknown_size);
876   }
877
878   scoped_ptr<Cluster> GenerateSingleStreamCluster(int timecode,
879                                                   int end_timecode,
880                                                   int track_number,
881                                                   int block_duration) {
882     CHECK_GT(end_timecode, timecode);
883
884     std::vector<uint8> data(kBlockSize);
885
886     ClusterBuilder cb;
887     cb.SetClusterTimecode(timecode);
888
889     // Create simple blocks for everything except the last block.
890     while (timecode < (end_timecode - block_duration)) {
891       cb.AddSimpleBlock(track_number, timecode, kWebMFlagKeyframe,
892                         &data[0], data.size());
893       timecode += block_duration;
894     }
895
896     if (track_number == kVideoTrackNum) {
897       AddVideoBlockGroup(&cb, track_number, timecode, block_duration,
898                          kWebMFlagKeyframe);
899     } else {
900       cb.AddBlockGroup(track_number, timecode, block_duration,
901                        kWebMFlagKeyframe, &data[0], data.size());
902     }
903
904     return cb.Finish();
905   }
906
907   void Read(DemuxerStream::Type type, const DemuxerStream::ReadCB& read_cb) {
908     demuxer_->GetStream(type)->Read(read_cb);
909     message_loop_.RunUntilIdle();
910   }
911
912   void ReadAudio(const DemuxerStream::ReadCB& read_cb) {
913     Read(DemuxerStream::AUDIO, read_cb);
914   }
915
916   void ReadVideo(const DemuxerStream::ReadCB& read_cb) {
917     Read(DemuxerStream::VIDEO, read_cb);
918   }
919
920   void GenerateExpectedReads(int timecode, int block_count) {
921     GenerateExpectedReads(timecode, timecode, block_count);
922   }
923
924   void GenerateExpectedReads(int start_audio_timecode,
925                              int start_video_timecode,
926                              int block_count) {
927     CHECK_GT(block_count, 0);
928
929     if (block_count == 1) {
930       ExpectRead(DemuxerStream::AUDIO, start_audio_timecode);
931       return;
932     }
933
934     int audio_timecode = start_audio_timecode;
935     int video_timecode = start_video_timecode;
936
937     for (int i = 0; i < block_count; i++) {
938       if (audio_timecode <= video_timecode) {
939         ExpectRead(DemuxerStream::AUDIO, audio_timecode);
940         audio_timecode += kAudioBlockDuration;
941         continue;
942       }
943
944       ExpectRead(DemuxerStream::VIDEO, video_timecode);
945       video_timecode += kVideoBlockDuration;
946     }
947   }
948
949   void GenerateSingleStreamExpectedReads(int timecode,
950                                          int block_count,
951                                          DemuxerStream::Type type,
952                                          int block_duration) {
953     CHECK_GT(block_count, 0);
954     int stream_timecode = timecode;
955
956     for (int i = 0; i < block_count; i++) {
957       ExpectRead(type, stream_timecode);
958       stream_timecode += block_duration;
959     }
960   }
961
962   void GenerateAudioStreamExpectedReads(int timecode, int block_count) {
963     GenerateSingleStreamExpectedReads(
964         timecode, block_count, DemuxerStream::AUDIO, kAudioBlockDuration);
965   }
966
967   void GenerateVideoStreamExpectedReads(int timecode, int block_count) {
968     GenerateSingleStreamExpectedReads(
969         timecode, block_count, DemuxerStream::VIDEO, kVideoBlockDuration);
970   }
971
972   scoped_ptr<Cluster> GenerateEmptyCluster(int timecode) {
973     ClusterBuilder cb;
974     cb.SetClusterTimecode(timecode);
975     return cb.Finish();
976   }
977
978   void CheckExpectedRanges(const std::string& expected) {
979     CheckExpectedRanges(kSourceId, expected);
980   }
981
982   void CheckExpectedRanges(const std::string&  id,
983                            const std::string& expected) {
984     CheckExpectedRanges(demuxer_->GetBufferedRanges(id), expected);
985   }
986
987   void CheckExpectedRanges(DemuxerStream::Type type,
988                            const std::string& expected) {
989     ChunkDemuxerStream* stream =
990         static_cast<ChunkDemuxerStream*>(demuxer_->GetStream(type));
991     CheckExpectedRanges(stream->GetBufferedRanges(kDefaultDuration()),
992                         expected);
993   }
994
995   void CheckExpectedRanges(const Ranges<base::TimeDelta>& r,
996                            const std::string& expected) {
997     std::stringstream ss;
998     ss << "{ ";
999     for (size_t i = 0; i < r.size(); ++i) {
1000       ss << "[" << r.start(i).InMilliseconds() << ","
1001          << r.end(i).InMilliseconds() << ") ";
1002     }
1003     ss << "}";
1004     EXPECT_EQ(expected, ss.str());
1005   }
1006
1007   MOCK_METHOD2(ReadDone, void(DemuxerStream::Status status,
1008                               const scoped_refptr<DecoderBuffer>&));
1009
1010   void StoreStatusAndBuffer(DemuxerStream::Status* status_out,
1011                             scoped_refptr<DecoderBuffer>* buffer_out,
1012                             DemuxerStream::Status status,
1013                             const scoped_refptr<DecoderBuffer>& buffer) {
1014     *status_out = status;
1015     *buffer_out = buffer;
1016   }
1017
1018   void ReadUntilNotOkOrEndOfStream(DemuxerStream::Type type,
1019                                    DemuxerStream::Status* status,
1020                                    base::TimeDelta* last_timestamp) {
1021     DemuxerStream* stream = demuxer_->GetStream(type);
1022     scoped_refptr<DecoderBuffer> buffer;
1023
1024     *last_timestamp = kNoTimestamp();
1025     do {
1026       stream->Read(base::Bind(&ChunkDemuxerTest::StoreStatusAndBuffer,
1027                               base::Unretained(this), status, &buffer));
1028       base::MessageLoop::current()->RunUntilIdle();
1029       if (*status == DemuxerStream::kOk && !buffer->end_of_stream())
1030         *last_timestamp = buffer->timestamp();
1031     } while (*status == DemuxerStream::kOk && !buffer->end_of_stream());
1032   }
1033
1034   void ExpectEndOfStream(DemuxerStream::Type type) {
1035     EXPECT_CALL(*this, ReadDone(DemuxerStream::kOk, IsEndOfStream()));
1036     demuxer_->GetStream(type)->Read(base::Bind(
1037         &ChunkDemuxerTest::ReadDone, base::Unretained(this)));
1038     message_loop_.RunUntilIdle();
1039   }
1040
1041   void ExpectRead(DemuxerStream::Type type, int64 timestamp_in_ms) {
1042     EXPECT_CALL(*this, ReadDone(DemuxerStream::kOk,
1043                                 HasTimestamp(timestamp_in_ms)));
1044     demuxer_->GetStream(type)->Read(base::Bind(
1045         &ChunkDemuxerTest::ReadDone, base::Unretained(this)));
1046     message_loop_.RunUntilIdle();
1047   }
1048
1049   void ExpectConfigChanged(DemuxerStream::Type type) {
1050     EXPECT_CALL(*this, ReadDone(DemuxerStream::kConfigChanged, _));
1051     demuxer_->GetStream(type)->Read(base::Bind(
1052         &ChunkDemuxerTest::ReadDone, base::Unretained(this)));
1053     message_loop_.RunUntilIdle();
1054   }
1055
1056   void CheckExpectedBuffers(DemuxerStream* stream,
1057                             const std::string& expected) {
1058     std::vector<std::string> timestamps;
1059     base::SplitString(expected, ' ', &timestamps);
1060     std::stringstream ss;
1061     for (size_t i = 0; i < timestamps.size(); ++i) {
1062       // Initialize status to kAborted since it's possible for Read() to return
1063       // without calling StoreStatusAndBuffer() if it doesn't have any buffers
1064       // left to return.
1065       DemuxerStream::Status status = DemuxerStream::kAborted;
1066       scoped_refptr<DecoderBuffer> buffer;
1067       stream->Read(base::Bind(&ChunkDemuxerTest::StoreStatusAndBuffer,
1068                               base::Unretained(this), &status, &buffer));
1069       base::MessageLoop::current()->RunUntilIdle();
1070       if (status != DemuxerStream::kOk || buffer->end_of_stream())
1071         break;
1072
1073       if (i > 0)
1074         ss << " ";
1075       ss << buffer->timestamp().InMilliseconds();
1076
1077       // Handle preroll buffers.
1078       if (EndsWith(timestamps[i], "P", true)) {
1079         ASSERT_EQ(kInfiniteDuration(), buffer->discard_padding().first);
1080         ASSERT_EQ(base::TimeDelta(), buffer->discard_padding().second);
1081         ss << "P";
1082       }
1083     }
1084     EXPECT_EQ(expected, ss.str());
1085   }
1086
1087   MOCK_METHOD1(Checkpoint, void(int id));
1088
1089   struct BufferTimestamps {
1090     int video_time_ms;
1091     int audio_time_ms;
1092   };
1093   static const int kSkip = -1;
1094
1095   // Test parsing a WebM file.
1096   // |filename| - The name of the file in media/test/data to parse.
1097   // |timestamps| - The expected timestamps on the parsed buffers.
1098   //    a timestamp of kSkip indicates that a Read() call for that stream
1099   //    shouldn't be made on that iteration of the loop. If both streams have
1100   //    a kSkip then the loop will terminate.
1101   bool ParseWebMFile(const std::string& filename,
1102                      const BufferTimestamps* timestamps,
1103                      const base::TimeDelta& duration) {
1104     return ParseWebMFile(filename, timestamps, duration, HAS_AUDIO | HAS_VIDEO);
1105   }
1106
1107   bool ParseWebMFile(const std::string& filename,
1108                      const BufferTimestamps* timestamps,
1109                      const base::TimeDelta& duration,
1110                      int stream_flags) {
1111     EXPECT_CALL(*this, DemuxerOpened());
1112     demuxer_->Initialize(
1113         &host_, CreateInitDoneCB(duration, PIPELINE_OK), true);
1114
1115     if (AddId(kSourceId, stream_flags) != ChunkDemuxer::kOk)
1116       return false;
1117
1118     // Read a WebM file into memory and send the data to the demuxer.
1119     scoped_refptr<DecoderBuffer> buffer = ReadTestDataFile(filename);
1120     EXPECT_CALL(*this, InitSegmentReceived());
1121     AppendDataInPieces(buffer->data(), buffer->data_size(), 512);
1122
1123     // Verify that the timestamps on the first few packets match what we
1124     // expect.
1125     for (size_t i = 0;
1126          (timestamps[i].audio_time_ms != kSkip ||
1127           timestamps[i].video_time_ms != kSkip);
1128          i++) {
1129       bool audio_read_done = false;
1130       bool video_read_done = false;
1131
1132       if (timestamps[i].audio_time_ms != kSkip) {
1133         ReadAudio(base::Bind(&OnReadDone,
1134                              base::TimeDelta::FromMilliseconds(
1135                                  timestamps[i].audio_time_ms),
1136                              &audio_read_done));
1137         EXPECT_TRUE(audio_read_done);
1138       }
1139
1140       if (timestamps[i].video_time_ms != kSkip) {
1141         ReadVideo(base::Bind(&OnReadDone,
1142                              base::TimeDelta::FromMilliseconds(
1143                                  timestamps[i].video_time_ms),
1144                              &video_read_done));
1145         EXPECT_TRUE(video_read_done);
1146       }
1147     }
1148
1149     return true;
1150   }
1151
1152   MOCK_METHOD0(DemuxerOpened, void());
1153   // TODO(xhwang): This is a workaround of the issue that move-only parameters
1154   // are not supported in mocked methods. Remove this when the issue is fixed
1155   // (http://code.google.com/p/googletest/issues/detail?id=395) or when we use
1156   // std::string instead of scoped_ptr<uint8[]> (http://crbug.com/130689).
1157   MOCK_METHOD3(NeedKeyMock, void(const std::string& type,
1158                                  const uint8* init_data, int init_data_size));
1159   void DemuxerNeedKey(const std::string& type,
1160                       const std::vector<uint8>& init_data) {
1161     const uint8* init_data_ptr = init_data.empty() ? NULL : &init_data[0];
1162     NeedKeyMock(type, init_data_ptr, init_data.size());
1163   }
1164
1165   MOCK_METHOD0(InitSegmentReceived, void(void));
1166
1167   void Seek(base::TimeDelta seek_time) {
1168     demuxer_->StartWaitingForSeek(seek_time);
1169     demuxer_->Seek(seek_time, NewExpectedStatusCB(PIPELINE_OK));
1170     message_loop_.RunUntilIdle();
1171   }
1172
1173   void MarkEndOfStream(PipelineStatus status) {
1174     demuxer_->MarkEndOfStream(status);
1175     message_loop_.RunUntilIdle();
1176   }
1177
1178   bool SetTimestampOffset(const std::string& id,
1179                           base::TimeDelta timestamp_offset) {
1180     if (demuxer_->IsParsingMediaSegment(id))
1181       return false;
1182
1183     timestamp_offset_map_[id] = timestamp_offset;
1184     return true;
1185   }
1186
1187   base::MessageLoop message_loop_;
1188   MockDemuxerHost host_;
1189
1190   scoped_ptr<ChunkDemuxer> demuxer_;
1191   ChunkDemuxer::InitSegmentReceivedCB init_segment_received_cb_;
1192
1193   base::TimeDelta append_window_start_for_next_append_;
1194   base::TimeDelta append_window_end_for_next_append_;
1195
1196   // Map of source id to timestamp offset to use for the next AppendData()
1197   // operation for that source id.
1198   std::map<std::string, base::TimeDelta> timestamp_offset_map_;
1199
1200  private:
1201   DISALLOW_COPY_AND_ASSIGN(ChunkDemuxerTest);
1202 };
1203
1204 TEST_F(ChunkDemuxerTest, Init) {
1205   // Test no streams, audio-only, video-only, and audio & video scenarios.
1206   // Audio and video streams can be encrypted or not encrypted.
1207   for (int i = 0; i < 16; i++) {
1208     bool has_audio = (i & 0x1) != 0;
1209     bool has_video = (i & 0x2) != 0;
1210     bool is_audio_encrypted = (i & 0x4) != 0;
1211     bool is_video_encrypted = (i & 0x8) != 0;
1212
1213     // No test on invalid combination.
1214     if ((!has_audio && is_audio_encrypted) ||
1215         (!has_video && is_video_encrypted)) {
1216       continue;
1217     }
1218
1219     CreateNewDemuxer();
1220
1221     if (is_audio_encrypted || is_video_encrypted) {
1222       int need_key_count = (is_audio_encrypted ? 1 : 0) +
1223                            (is_video_encrypted ? 1 : 0);
1224       EXPECT_CALL(*this, NeedKeyMock(kWebMEncryptInitDataType, NotNull(),
1225                                      DecryptConfig::kDecryptionKeySize))
1226           .Times(Exactly(need_key_count));
1227     }
1228
1229     int stream_flags = 0;
1230     if (has_audio)
1231       stream_flags |= HAS_AUDIO;
1232
1233     if (has_video)
1234       stream_flags |= HAS_VIDEO;
1235
1236     ASSERT_TRUE(InitDemuxerWithEncryptionInfo(
1237         stream_flags, is_audio_encrypted, is_video_encrypted));
1238
1239     DemuxerStream* audio_stream = demuxer_->GetStream(DemuxerStream::AUDIO);
1240     if (has_audio) {
1241       ASSERT_TRUE(audio_stream);
1242
1243       const AudioDecoderConfig& config = audio_stream->audio_decoder_config();
1244       EXPECT_EQ(kCodecVorbis, config.codec());
1245       EXPECT_EQ(32, config.bits_per_channel());
1246       EXPECT_EQ(CHANNEL_LAYOUT_STEREO, config.channel_layout());
1247       EXPECT_EQ(44100, config.samples_per_second());
1248       EXPECT_TRUE(config.extra_data());
1249       EXPECT_GT(config.extra_data_size(), 0u);
1250       EXPECT_EQ(kSampleFormatPlanarF32, config.sample_format());
1251       EXPECT_EQ(is_audio_encrypted,
1252                 audio_stream->audio_decoder_config().is_encrypted());
1253       EXPECT_TRUE(static_cast<ChunkDemuxerStream*>(audio_stream)
1254                       ->supports_partial_append_window_trimming());
1255     } else {
1256       EXPECT_FALSE(audio_stream);
1257     }
1258
1259     DemuxerStream* video_stream = demuxer_->GetStream(DemuxerStream::VIDEO);
1260     if (has_video) {
1261       EXPECT_TRUE(video_stream);
1262       EXPECT_EQ(is_video_encrypted,
1263                 video_stream->video_decoder_config().is_encrypted());
1264       EXPECT_FALSE(static_cast<ChunkDemuxerStream*>(video_stream)
1265                        ->supports_partial_append_window_trimming());
1266     } else {
1267       EXPECT_FALSE(video_stream);
1268     }
1269
1270     ShutdownDemuxer();
1271     demuxer_.reset();
1272   }
1273 }
1274
1275 // TODO(acolwell): Fold this test into Init tests since the tests are
1276 // almost identical.
1277 TEST_F(ChunkDemuxerTest, InitText) {
1278   // Test with 1 video stream and 1 text streams, and 0 or 1 audio streams.
1279   // No encryption cases handled here.
1280   bool has_video = true;
1281   bool is_audio_encrypted = false;
1282   bool is_video_encrypted = false;
1283   for (int i = 0; i < 2; i++) {
1284     bool has_audio = (i & 0x1) != 0;
1285
1286     CreateNewDemuxer();
1287
1288     DemuxerStream* text_stream = NULL;
1289     TextTrackConfig text_config;
1290     EXPECT_CALL(host_, AddTextStream(_, _))
1291         .WillOnce(DoAll(SaveArg<0>(&text_stream),
1292                         SaveArg<1>(&text_config)));
1293
1294     int stream_flags = HAS_TEXT;
1295     if (has_audio)
1296       stream_flags |= HAS_AUDIO;
1297
1298     if (has_video)
1299       stream_flags |= HAS_VIDEO;
1300
1301     ASSERT_TRUE(InitDemuxerWithEncryptionInfo(
1302         stream_flags, is_audio_encrypted, is_video_encrypted));
1303     ASSERT_TRUE(text_stream);
1304     EXPECT_EQ(DemuxerStream::TEXT, text_stream->type());
1305     EXPECT_EQ(kTextSubtitles, text_config.kind());
1306     EXPECT_FALSE(static_cast<ChunkDemuxerStream*>(text_stream)
1307                      ->supports_partial_append_window_trimming());
1308
1309     DemuxerStream* audio_stream = demuxer_->GetStream(DemuxerStream::AUDIO);
1310     if (has_audio) {
1311       ASSERT_TRUE(audio_stream);
1312
1313       const AudioDecoderConfig& config = audio_stream->audio_decoder_config();
1314       EXPECT_EQ(kCodecVorbis, config.codec());
1315       EXPECT_EQ(32, config.bits_per_channel());
1316       EXPECT_EQ(CHANNEL_LAYOUT_STEREO, config.channel_layout());
1317       EXPECT_EQ(44100, config.samples_per_second());
1318       EXPECT_TRUE(config.extra_data());
1319       EXPECT_GT(config.extra_data_size(), 0u);
1320       EXPECT_EQ(kSampleFormatPlanarF32, config.sample_format());
1321       EXPECT_EQ(is_audio_encrypted,
1322                 audio_stream->audio_decoder_config().is_encrypted());
1323       EXPECT_TRUE(static_cast<ChunkDemuxerStream*>(audio_stream)
1324                       ->supports_partial_append_window_trimming());
1325     } else {
1326       EXPECT_FALSE(audio_stream);
1327     }
1328
1329     DemuxerStream* video_stream = demuxer_->GetStream(DemuxerStream::VIDEO);
1330     if (has_video) {
1331       EXPECT_TRUE(video_stream);
1332       EXPECT_EQ(is_video_encrypted,
1333                 video_stream->video_decoder_config().is_encrypted());
1334       EXPECT_FALSE(static_cast<ChunkDemuxerStream*>(video_stream)
1335                        ->supports_partial_append_window_trimming());
1336     } else {
1337       EXPECT_FALSE(video_stream);
1338     }
1339
1340     ShutdownDemuxer();
1341     demuxer_.reset();
1342   }
1343 }
1344
1345 TEST_F(ChunkDemuxerTest, SingleTextTrackIdChange) {
1346   // Test with 1 video stream, 1 audio, and 1 text stream. Send a second init
1347   // segment in which the text track ID changes. Verify appended buffers before
1348   // and after the second init segment map to the same underlying track buffers.
1349   CreateNewDemuxer();
1350   DemuxerStream* text_stream = NULL;
1351   TextTrackConfig text_config;
1352   EXPECT_CALL(host_, AddTextStream(_, _))
1353       .WillOnce(DoAll(SaveArg<0>(&text_stream),
1354                       SaveArg<1>(&text_config)));
1355   ASSERT_TRUE(InitDemuxerWithEncryptionInfo(
1356       HAS_TEXT | HAS_AUDIO | HAS_VIDEO, false, false));
1357   DemuxerStream* audio_stream = demuxer_->GetStream(DemuxerStream::AUDIO);
1358   DemuxerStream* video_stream = demuxer_->GetStream(DemuxerStream::VIDEO);
1359   ASSERT_TRUE(audio_stream);
1360   ASSERT_TRUE(video_stream);
1361   ASSERT_TRUE(text_stream);
1362
1363   AppendMuxedCluster(
1364       MuxedStreamInfo(kAudioTrackNum, "0K 23K"),
1365       MuxedStreamInfo(kVideoTrackNum, "0K 30"),
1366       MuxedStreamInfo(kTextTrackNum, "10K"));
1367   CheckExpectedRanges(kSourceId, "{ [0,46) }");
1368
1369   scoped_ptr<uint8[]> info_tracks;
1370   int info_tracks_size = 0;
1371   CreateInitSegmentWithAlternateTextTrackNum(HAS_TEXT | HAS_AUDIO | HAS_VIDEO,
1372                                              false, false,
1373                                              &info_tracks, &info_tracks_size);
1374   EXPECT_CALL(*this, InitSegmentReceived());
1375   demuxer_->AppendData(kSourceId, info_tracks.get(), info_tracks_size,
1376                        append_window_start_for_next_append_,
1377                        append_window_end_for_next_append_,
1378                        &timestamp_offset_map_[kSourceId],
1379                        init_segment_received_cb_);
1380
1381   AppendMuxedCluster(
1382       MuxedStreamInfo(kAudioTrackNum, "46K 69K"),
1383       MuxedStreamInfo(kVideoTrackNum, "60K"),
1384       MuxedStreamInfo(kAlternateTextTrackNum, "45K"));
1385
1386   CheckExpectedRanges(kSourceId, "{ [0,92) }");
1387   CheckExpectedBuffers(audio_stream, "0 23 46 69");
1388   CheckExpectedBuffers(video_stream, "0 30 60");
1389   CheckExpectedBuffers(text_stream, "10 45");
1390
1391   ShutdownDemuxer();
1392 }
1393
1394 TEST_F(ChunkDemuxerTest, InitSegmentSetsNeedRandomAccessPointFlag) {
1395   // Tests that non-keyframes following an init segment are allowed
1396   // and dropped, as expected if the initialization segment received
1397   // algorithm correctly sets the needs random access point flag to true for all
1398   // track buffers. Note that the first initialization segment is insufficient
1399   // to fully test this since needs random access point flag initializes to
1400   // true.
1401   CreateNewDemuxer();
1402   DemuxerStream* text_stream = NULL;
1403   EXPECT_CALL(host_, AddTextStream(_, _))
1404       .WillOnce(SaveArg<0>(&text_stream));
1405   ASSERT_TRUE(InitDemuxerWithEncryptionInfo(
1406       HAS_TEXT | HAS_AUDIO | HAS_VIDEO, false, false));
1407   DemuxerStream* audio_stream = demuxer_->GetStream(DemuxerStream::AUDIO);
1408   DemuxerStream* video_stream = demuxer_->GetStream(DemuxerStream::VIDEO);
1409   ASSERT_TRUE(audio_stream && video_stream && text_stream);
1410
1411   AppendMuxedCluster(
1412       MuxedStreamInfo(kAudioTrackNum, "23K"),
1413       MuxedStreamInfo(kVideoTrackNum, "0 30K"),
1414       MuxedStreamInfo(kTextTrackNum, "25K 40K"));
1415   CheckExpectedRanges(kSourceId, "{ [23,46) }");
1416
1417   EXPECT_CALL(*this, InitSegmentReceived());
1418   AppendInitSegment(HAS_TEXT | HAS_AUDIO | HAS_VIDEO);
1419   AppendMuxedCluster(
1420       MuxedStreamInfo(kAudioTrackNum, "46K 69K"),
1421       MuxedStreamInfo(kVideoTrackNum, "60 90K"),
1422       MuxedStreamInfo(kTextTrackNum, "80K 90K"));
1423   CheckExpectedRanges(kSourceId, "{ [23,92) }");
1424
1425   CheckExpectedBuffers(audio_stream, "23 46 69");
1426   CheckExpectedBuffers(video_stream, "30 90");
1427   CheckExpectedBuffers(text_stream, "25 40 80 90");
1428 }
1429
1430 // Make sure that the demuxer reports an error if Shutdown()
1431 // is called before all the initialization segments are appended.
1432 TEST_F(ChunkDemuxerTest, Shutdown_BeforeAllInitSegmentsAppended) {
1433   EXPECT_CALL(*this, DemuxerOpened());
1434   demuxer_->Initialize(
1435       &host_, CreateInitDoneCB(
1436           kDefaultDuration(), DEMUXER_ERROR_COULD_NOT_OPEN), true);
1437
1438   EXPECT_EQ(AddId("audio", HAS_AUDIO), ChunkDemuxer::kOk);
1439   EXPECT_EQ(AddId("video", HAS_VIDEO), ChunkDemuxer::kOk);
1440
1441   EXPECT_CALL(*this, InitSegmentReceived());
1442   AppendInitSegmentWithSourceId("audio", HAS_AUDIO);
1443
1444   ShutdownDemuxer();
1445 }
1446
1447 TEST_F(ChunkDemuxerTest, Shutdown_BeforeAllInitSegmentsAppendedText) {
1448   EXPECT_CALL(*this, DemuxerOpened());
1449   demuxer_->Initialize(
1450       &host_, CreateInitDoneCB(
1451           kDefaultDuration(), DEMUXER_ERROR_COULD_NOT_OPEN), true);
1452
1453   EXPECT_EQ(AddId("audio", HAS_AUDIO), ChunkDemuxer::kOk);
1454   EXPECT_EQ(AddId("video_and_text", HAS_VIDEO), ChunkDemuxer::kOk);
1455
1456   EXPECT_CALL(host_, AddTextStream(_, _))
1457       .Times(Exactly(1));
1458
1459   EXPECT_CALL(*this, InitSegmentReceived());
1460   AppendInitSegmentWithSourceId("video_and_text", HAS_VIDEO | HAS_TEXT);
1461
1462   ShutdownDemuxer();
1463 }
1464
1465 // Verifies that all streams waiting for data receive an end of stream
1466 // buffer when Shutdown() is called.
1467 TEST_F(ChunkDemuxerTest, Shutdown_EndOfStreamWhileWaitingForData) {
1468   DemuxerStream* text_stream = NULL;
1469   EXPECT_CALL(host_, AddTextStream(_, _))
1470       .WillOnce(SaveArg<0>(&text_stream));
1471   ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO | HAS_TEXT));
1472
1473   DemuxerStream* audio_stream = demuxer_->GetStream(DemuxerStream::AUDIO);
1474   DemuxerStream* video_stream = demuxer_->GetStream(DemuxerStream::VIDEO);
1475
1476   bool audio_read_done = false;
1477   bool video_read_done = false;
1478   bool text_read_done = false;
1479   audio_stream->Read(base::Bind(&OnReadDone_EOSExpected, &audio_read_done));
1480   video_stream->Read(base::Bind(&OnReadDone_EOSExpected, &video_read_done));
1481   text_stream->Read(base::Bind(&OnReadDone_EOSExpected, &text_read_done));
1482   message_loop_.RunUntilIdle();
1483
1484   EXPECT_FALSE(audio_read_done);
1485   EXPECT_FALSE(video_read_done);
1486   EXPECT_FALSE(text_read_done);
1487
1488   ShutdownDemuxer();
1489
1490   EXPECT_TRUE(audio_read_done);
1491   EXPECT_TRUE(video_read_done);
1492   EXPECT_TRUE(text_read_done);
1493 }
1494
1495 // Test that Seek() completes successfully when the first cluster
1496 // arrives.
1497 TEST_F(ChunkDemuxerTest, AppendDataAfterSeek) {
1498   ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
1499   AppendCluster(kDefaultFirstCluster());
1500
1501   InSequence s;
1502
1503   EXPECT_CALL(*this, Checkpoint(1));
1504
1505   Seek(base::TimeDelta::FromMilliseconds(46));
1506
1507   EXPECT_CALL(*this, Checkpoint(2));
1508
1509   Checkpoint(1);
1510
1511   AppendCluster(kDefaultSecondCluster());
1512
1513   message_loop_.RunUntilIdle();
1514
1515   Checkpoint(2);
1516 }
1517
1518 // Test that parsing errors are handled for clusters appended after init.
1519 TEST_F(ChunkDemuxerTest, ErrorWhileParsingClusterAfterInit) {
1520   ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
1521   AppendCluster(kDefaultFirstCluster());
1522
1523   EXPECT_CALL(host_, OnDemuxerError(PIPELINE_ERROR_DECODE));
1524   AppendGarbage();
1525 }
1526
1527 // Test the case where a Seek() is requested while the parser
1528 // is in the middle of cluster. This is to verify that the parser
1529 // does not reset itself on a seek.
1530 TEST_F(ChunkDemuxerTest, SeekWhileParsingCluster) {
1531   ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
1532
1533   InSequence s;
1534
1535   scoped_ptr<Cluster> cluster_a(GenerateCluster(0, 6));
1536
1537   // Split the cluster into two appends at an arbitrary point near the end.
1538   int first_append_size = cluster_a->size() - 11;
1539   int second_append_size = cluster_a->size() - first_append_size;
1540
1541   // Append the first part of the cluster.
1542   AppendData(cluster_a->data(), first_append_size);
1543
1544   ExpectRead(DemuxerStream::AUDIO, 0);
1545   ExpectRead(DemuxerStream::VIDEO, 0);
1546   ExpectRead(DemuxerStream::AUDIO, kAudioBlockDuration);
1547
1548   Seek(base::TimeDelta::FromSeconds(5));
1549
1550   // Append the rest of the cluster.
1551   AppendData(cluster_a->data() + first_append_size, second_append_size);
1552
1553   // Append the new cluster and verify that only the blocks
1554   // in the new cluster are returned.
1555   AppendCluster(GenerateCluster(5000, 6));
1556   GenerateExpectedReads(5000, 6);
1557 }
1558
1559 // Test the case where AppendData() is called before Init().
1560 TEST_F(ChunkDemuxerTest, AppendDataBeforeInit) {
1561   scoped_ptr<uint8[]> info_tracks;
1562   int info_tracks_size = 0;
1563   CreateInitSegment(HAS_AUDIO | HAS_VIDEO,
1564                     false, false, &info_tracks, &info_tracks_size);
1565   demuxer_->AppendData(kSourceId, info_tracks.get(), info_tracks_size,
1566                        append_window_start_for_next_append_,
1567                        append_window_end_for_next_append_,
1568                        &timestamp_offset_map_[kSourceId],
1569                        init_segment_received_cb_);
1570 }
1571
1572 // Make sure Read() callbacks are dispatched with the proper data.
1573 TEST_F(ChunkDemuxerTest, Read) {
1574   ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
1575
1576   AppendCluster(kDefaultFirstCluster());
1577
1578   bool audio_read_done = false;
1579   bool video_read_done = false;
1580   ReadAudio(base::Bind(&OnReadDone,
1581                        base::TimeDelta::FromMilliseconds(0),
1582                        &audio_read_done));
1583   ReadVideo(base::Bind(&OnReadDone,
1584                        base::TimeDelta::FromMilliseconds(0),
1585                        &video_read_done));
1586
1587   EXPECT_TRUE(audio_read_done);
1588   EXPECT_TRUE(video_read_done);
1589 }
1590
1591 TEST_F(ChunkDemuxerTest, OutOfOrderClusters) {
1592   ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
1593   AppendCluster(kDefaultFirstCluster());
1594   AppendCluster(GenerateCluster(10, 4));
1595
1596   // Make sure that AppendCluster() does not fail with a cluster that has
1597   // overlaps with the previously appended cluster.
1598   AppendCluster(GenerateCluster(5, 4));
1599
1600   // Verify that AppendData() can still accept more data.
1601   scoped_ptr<Cluster> cluster_c(GenerateCluster(45, 2));
1602   demuxer_->AppendData(kSourceId, cluster_c->data(), cluster_c->size(),
1603                        append_window_start_for_next_append_,
1604                        append_window_end_for_next_append_,
1605                        &timestamp_offset_map_[kSourceId],
1606                        init_segment_received_cb_);
1607 }
1608
1609 TEST_F(ChunkDemuxerTest, NonMonotonicButAboveClusterTimecode) {
1610   ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
1611   AppendCluster(kDefaultFirstCluster());
1612
1613   ClusterBuilder cb;
1614
1615   // Test the case where block timecodes are not monotonically
1616   // increasing but stay above the cluster timecode.
1617   cb.SetClusterTimecode(5);
1618   AddSimpleBlock(&cb, kAudioTrackNum, 5);
1619   AddSimpleBlock(&cb, kVideoTrackNum, 10);
1620   AddSimpleBlock(&cb, kAudioTrackNum, 7);
1621   AddSimpleBlock(&cb, kVideoTrackNum, 15);
1622
1623   EXPECT_CALL(host_, OnDemuxerError(PIPELINE_ERROR_DECODE));
1624   AppendCluster(cb.Finish());
1625
1626   // Verify that AppendData() ignores data after the error.
1627   scoped_ptr<Cluster> cluster_b(GenerateCluster(20, 2));
1628   demuxer_->AppendData(kSourceId, cluster_b->data(), cluster_b->size(),
1629                        append_window_start_for_next_append_,
1630                        append_window_end_for_next_append_,
1631                        &timestamp_offset_map_[kSourceId],
1632                        init_segment_received_cb_);
1633 }
1634
1635 TEST_F(ChunkDemuxerTest, BackwardsAndBeforeClusterTimecode) {
1636   ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
1637   AppendCluster(kDefaultFirstCluster());
1638
1639   ClusterBuilder cb;
1640
1641   // Test timecodes going backwards and including values less than the cluster
1642   // timecode.
1643   cb.SetClusterTimecode(5);
1644   AddSimpleBlock(&cb, kAudioTrackNum, 5);
1645   AddSimpleBlock(&cb, kVideoTrackNum, 5);
1646   AddSimpleBlock(&cb, kAudioTrackNum, 3);
1647   AddSimpleBlock(&cb, kVideoTrackNum, 3);
1648
1649   EXPECT_CALL(host_, OnDemuxerError(PIPELINE_ERROR_DECODE));
1650   AppendCluster(cb.Finish());
1651
1652   // Verify that AppendData() ignores data after the error.
1653   scoped_ptr<Cluster> cluster_b(GenerateCluster(6, 2));
1654   demuxer_->AppendData(kSourceId, cluster_b->data(), cluster_b->size(),
1655                        append_window_start_for_next_append_,
1656                        append_window_end_for_next_append_,
1657                        &timestamp_offset_map_[kSourceId],
1658                        init_segment_received_cb_);
1659 }
1660
1661
1662 TEST_F(ChunkDemuxerTest, PerStreamMonotonicallyIncreasingTimestamps) {
1663   ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
1664   AppendCluster(kDefaultFirstCluster());
1665
1666   ClusterBuilder cb;
1667
1668   // Test monotonic increasing timestamps on a per stream
1669   // basis.
1670   cb.SetClusterTimecode(5);
1671   AddSimpleBlock(&cb, kAudioTrackNum, 5);
1672   AddSimpleBlock(&cb, kVideoTrackNum, 5);
1673   AddSimpleBlock(&cb, kAudioTrackNum, 4);
1674   AddSimpleBlock(&cb, kVideoTrackNum, 7);
1675
1676   EXPECT_CALL(host_, OnDemuxerError(PIPELINE_ERROR_DECODE));
1677   AppendCluster(cb.Finish());
1678 }
1679
1680 // Test the case where a cluster is passed to AppendCluster() before
1681 // INFO & TRACKS data.
1682 TEST_F(ChunkDemuxerTest, ClusterBeforeInitSegment) {
1683   EXPECT_CALL(*this, DemuxerOpened());
1684   demuxer_->Initialize(
1685       &host_, NewExpectedStatusCB(DEMUXER_ERROR_COULD_NOT_OPEN), true);
1686
1687   ASSERT_EQ(AddId(), ChunkDemuxer::kOk);
1688
1689   AppendCluster(GenerateCluster(0, 1));
1690 }
1691
1692 // Test cases where we get an MarkEndOfStream() call during initialization.
1693 TEST_F(ChunkDemuxerTest, EOSDuringInit) {
1694   EXPECT_CALL(*this, DemuxerOpened());
1695   demuxer_->Initialize(
1696       &host_, NewExpectedStatusCB(DEMUXER_ERROR_COULD_NOT_OPEN), true);
1697   MarkEndOfStream(PIPELINE_OK);
1698 }
1699
1700 TEST_F(ChunkDemuxerTest, EndOfStreamWithNoAppend) {
1701   EXPECT_CALL(*this, DemuxerOpened());
1702   demuxer_->Initialize(
1703       &host_, NewExpectedStatusCB(DEMUXER_ERROR_COULD_NOT_OPEN), true);
1704
1705   ASSERT_EQ(AddId(), ChunkDemuxer::kOk);
1706
1707   CheckExpectedRanges("{ }");
1708   MarkEndOfStream(PIPELINE_OK);
1709   ShutdownDemuxer();
1710   CheckExpectedRanges("{ }");
1711   demuxer_->RemoveId(kSourceId);
1712   demuxer_.reset();
1713 }
1714
1715 TEST_F(ChunkDemuxerTest, EndOfStreamWithNoMediaAppend) {
1716   ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
1717
1718   CheckExpectedRanges("{ }");
1719   MarkEndOfStream(PIPELINE_OK);
1720   CheckExpectedRanges("{ }");
1721 }
1722
1723 TEST_F(ChunkDemuxerTest, DecodeErrorEndOfStream) {
1724   ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
1725
1726   AppendCluster(kDefaultFirstCluster());
1727   CheckExpectedRanges(kDefaultFirstClusterRange);
1728
1729   EXPECT_CALL(host_, OnDemuxerError(PIPELINE_ERROR_DECODE));
1730   MarkEndOfStream(PIPELINE_ERROR_DECODE);
1731   CheckExpectedRanges(kDefaultFirstClusterRange);
1732 }
1733
1734 TEST_F(ChunkDemuxerTest, NetworkErrorEndOfStream) {
1735   ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
1736
1737   AppendCluster(kDefaultFirstCluster());
1738   CheckExpectedRanges(kDefaultFirstClusterRange);
1739
1740   EXPECT_CALL(host_, OnDemuxerError(PIPELINE_ERROR_NETWORK));
1741   MarkEndOfStream(PIPELINE_ERROR_NETWORK);
1742 }
1743
1744 // Helper class to reduce duplicate code when testing end of stream
1745 // Read() behavior.
1746 class EndOfStreamHelper {
1747  public:
1748   explicit EndOfStreamHelper(Demuxer* demuxer)
1749       : demuxer_(demuxer),
1750         audio_read_done_(false),
1751         video_read_done_(false) {
1752   }
1753
1754   // Request a read on the audio and video streams.
1755   void RequestReads() {
1756     EXPECT_FALSE(audio_read_done_);
1757     EXPECT_FALSE(video_read_done_);
1758
1759     DemuxerStream* audio = demuxer_->GetStream(DemuxerStream::AUDIO);
1760     DemuxerStream* video = demuxer_->GetStream(DemuxerStream::VIDEO);
1761
1762     audio->Read(base::Bind(&OnEndOfStreamReadDone, &audio_read_done_));
1763     video->Read(base::Bind(&OnEndOfStreamReadDone, &video_read_done_));
1764     base::MessageLoop::current()->RunUntilIdle();
1765   }
1766
1767   // Check to see if |audio_read_done_| and |video_read_done_| variables
1768   // match |expected|.
1769   void CheckIfReadDonesWereCalled(bool expected) {
1770     base::MessageLoop::current()->RunUntilIdle();
1771     EXPECT_EQ(expected, audio_read_done_);
1772     EXPECT_EQ(expected, video_read_done_);
1773   }
1774
1775  private:
1776   static void OnEndOfStreamReadDone(
1777       bool* called,
1778       DemuxerStream::Status status,
1779       const scoped_refptr<DecoderBuffer>& buffer) {
1780     EXPECT_EQ(status, DemuxerStream::kOk);
1781     EXPECT_TRUE(buffer->end_of_stream());
1782     *called = true;
1783   }
1784
1785   Demuxer* demuxer_;
1786   bool audio_read_done_;
1787   bool video_read_done_;
1788
1789   DISALLOW_COPY_AND_ASSIGN(EndOfStreamHelper);
1790 };
1791
1792 // Make sure that all pending reads that we don't have media data for get an
1793 // "end of stream" buffer when MarkEndOfStream() is called.
1794 TEST_F(ChunkDemuxerTest, EndOfStreamWithPendingReads) {
1795   ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
1796
1797   AppendCluster(GenerateCluster(0, 2));
1798
1799   bool audio_read_done_1 = false;
1800   bool video_read_done_1 = false;
1801   EndOfStreamHelper end_of_stream_helper_1(demuxer_.get());
1802   EndOfStreamHelper end_of_stream_helper_2(demuxer_.get());
1803
1804   ReadAudio(base::Bind(&OnReadDone,
1805                        base::TimeDelta::FromMilliseconds(0),
1806                        &audio_read_done_1));
1807   ReadVideo(base::Bind(&OnReadDone,
1808                        base::TimeDelta::FromMilliseconds(0),
1809                        &video_read_done_1));
1810   message_loop_.RunUntilIdle();
1811
1812   EXPECT_TRUE(audio_read_done_1);
1813   EXPECT_TRUE(video_read_done_1);
1814
1815   end_of_stream_helper_1.RequestReads();
1816
1817   EXPECT_CALL(host_, SetDuration(
1818       base::TimeDelta::FromMilliseconds(kVideoBlockDuration)));
1819   MarkEndOfStream(PIPELINE_OK);
1820
1821   end_of_stream_helper_1.CheckIfReadDonesWereCalled(true);
1822
1823   end_of_stream_helper_2.RequestReads();
1824   end_of_stream_helper_2.CheckIfReadDonesWereCalled(true);
1825 }
1826
1827 // Make sure that all Read() calls after we get an MarkEndOfStream()
1828 // call return an "end of stream" buffer.
1829 TEST_F(ChunkDemuxerTest, ReadsAfterEndOfStream) {
1830   ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
1831
1832   AppendCluster(GenerateCluster(0, 2));
1833
1834   bool audio_read_done_1 = false;
1835   bool video_read_done_1 = false;
1836   EndOfStreamHelper end_of_stream_helper_1(demuxer_.get());
1837   EndOfStreamHelper end_of_stream_helper_2(demuxer_.get());
1838   EndOfStreamHelper end_of_stream_helper_3(demuxer_.get());
1839
1840   ReadAudio(base::Bind(&OnReadDone,
1841                        base::TimeDelta::FromMilliseconds(0),
1842                        &audio_read_done_1));
1843   ReadVideo(base::Bind(&OnReadDone,
1844                        base::TimeDelta::FromMilliseconds(0),
1845                        &video_read_done_1));
1846
1847   end_of_stream_helper_1.RequestReads();
1848
1849   EXPECT_TRUE(audio_read_done_1);
1850   EXPECT_TRUE(video_read_done_1);
1851   end_of_stream_helper_1.CheckIfReadDonesWereCalled(false);
1852
1853   EXPECT_CALL(host_, SetDuration(
1854       base::TimeDelta::FromMilliseconds(kVideoBlockDuration)));
1855   MarkEndOfStream(PIPELINE_OK);
1856
1857   end_of_stream_helper_1.CheckIfReadDonesWereCalled(true);
1858
1859   // Request a few more reads and make sure we immediately get
1860   // end of stream buffers.
1861   end_of_stream_helper_2.RequestReads();
1862   end_of_stream_helper_2.CheckIfReadDonesWereCalled(true);
1863
1864   end_of_stream_helper_3.RequestReads();
1865   end_of_stream_helper_3.CheckIfReadDonesWereCalled(true);
1866 }
1867
1868 TEST_F(ChunkDemuxerTest, EndOfStreamDuringCanceledSeek) {
1869   ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
1870
1871   AppendCluster(0, 10);
1872   EXPECT_CALL(host_, SetDuration(base::TimeDelta::FromMilliseconds(138)));
1873   MarkEndOfStream(PIPELINE_OK);
1874
1875   // Start the first seek.
1876   Seek(base::TimeDelta::FromMilliseconds(20));
1877
1878   // Simulate another seek being requested before the first
1879   // seek has finished prerolling.
1880   base::TimeDelta seek_time2 = base::TimeDelta::FromMilliseconds(30);
1881   demuxer_->CancelPendingSeek(seek_time2);
1882
1883   // Finish second seek.
1884   Seek(seek_time2);
1885
1886   DemuxerStream::Status status;
1887   base::TimeDelta last_timestamp;
1888
1889   // Make sure audio can reach end of stream.
1890   ReadUntilNotOkOrEndOfStream(DemuxerStream::AUDIO, &status, &last_timestamp);
1891   ASSERT_EQ(status, DemuxerStream::kOk);
1892
1893   // Make sure video can reach end of stream.
1894   ReadUntilNotOkOrEndOfStream(DemuxerStream::VIDEO, &status, &last_timestamp);
1895   ASSERT_EQ(status, DemuxerStream::kOk);
1896 }
1897
1898 // Verify buffered range change behavior for audio/video/text tracks.
1899 TEST_F(ChunkDemuxerTest, EndOfStreamRangeChanges) {
1900   DemuxerStream* text_stream = NULL;
1901
1902   EXPECT_CALL(host_, AddTextStream(_, _))
1903       .WillOnce(SaveArg<0>(&text_stream));
1904   ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO | HAS_TEXT));
1905
1906   AppendMuxedCluster(
1907       MuxedStreamInfo(kVideoTrackNum, "0K 33"),
1908       MuxedStreamInfo(kAudioTrackNum, "0K 23K"));
1909
1910   // Check expected ranges and verify that an empty text track does not
1911   // affect the expected ranges.
1912   CheckExpectedRanges(kSourceId, "{ [0,46) }");
1913
1914   EXPECT_CALL(host_, SetDuration(base::TimeDelta::FromMilliseconds(66)));
1915   MarkEndOfStream(PIPELINE_OK);
1916
1917   // Check expected ranges and verify that an empty text track does not
1918   // affect the expected ranges.
1919   CheckExpectedRanges(kSourceId, "{ [0,66) }");
1920
1921   // Unmark end of stream state and verify that the ranges return to
1922   // their pre-"end of stream" values.
1923   demuxer_->UnmarkEndOfStream();
1924   CheckExpectedRanges(kSourceId, "{ [0,46) }");
1925
1926   // Add text track data and verify that the buffered ranges don't change
1927   // since the intersection of all the tracks doesn't change.
1928   EXPECT_CALL(host_, SetDuration(base::TimeDelta::FromMilliseconds(200)));
1929   AppendMuxedCluster(
1930       MuxedStreamInfo(kVideoTrackNum, "0K 33"),
1931       MuxedStreamInfo(kAudioTrackNum, "0K 23K"),
1932       MuxedStreamInfo(kTextTrackNum, "0K 100K"));
1933   CheckExpectedRanges(kSourceId, "{ [0,46) }");
1934
1935   // Mark end of stream and verify that text track data is reflected in
1936   // the new range.
1937   MarkEndOfStream(PIPELINE_OK);
1938   CheckExpectedRanges(kSourceId, "{ [0,200) }");
1939 }
1940
1941 // Make sure AppendData() will accept elements that span multiple calls.
1942 TEST_F(ChunkDemuxerTest, AppendingInPieces) {
1943   EXPECT_CALL(*this, DemuxerOpened());
1944   demuxer_->Initialize(
1945       &host_, CreateInitDoneCB(kDefaultDuration(), PIPELINE_OK), true);
1946
1947   ASSERT_EQ(AddId(), ChunkDemuxer::kOk);
1948
1949   scoped_ptr<uint8[]> info_tracks;
1950   int info_tracks_size = 0;
1951   CreateInitSegment(HAS_AUDIO | HAS_VIDEO,
1952                     false, false, &info_tracks, &info_tracks_size);
1953
1954   scoped_ptr<Cluster> cluster_a(kDefaultFirstCluster());
1955   scoped_ptr<Cluster> cluster_b(kDefaultSecondCluster());
1956
1957   size_t buffer_size = info_tracks_size + cluster_a->size() + cluster_b->size();
1958   scoped_ptr<uint8[]> buffer(new uint8[buffer_size]);
1959   uint8* dst = buffer.get();
1960   memcpy(dst, info_tracks.get(), info_tracks_size);
1961   dst += info_tracks_size;
1962
1963   memcpy(dst, cluster_a->data(), cluster_a->size());
1964   dst += cluster_a->size();
1965
1966   memcpy(dst, cluster_b->data(), cluster_b->size());
1967   dst += cluster_b->size();
1968
1969   EXPECT_CALL(*this, InitSegmentReceived());
1970   AppendDataInPieces(buffer.get(), buffer_size);
1971
1972   GenerateExpectedReads(0, 9);
1973 }
1974
1975 TEST_F(ChunkDemuxerTest, WebMFile_AudioAndVideo) {
1976   struct BufferTimestamps buffer_timestamps[] = {
1977     {0, 0},
1978     {33, 3},
1979     {67, 6},
1980     {100, 9},
1981     {133, 12},
1982     {kSkip, kSkip},
1983   };
1984
1985   // TODO(wolenetz/acolwell): Remove this SetDuration expectation and update the
1986   // ParseWebMFile() call's expected duration, below, once the file is fixed to
1987   // have the correct duration in the init segment. See http://crbug.com/354284.
1988   EXPECT_CALL(host_, SetDuration(base::TimeDelta::FromMilliseconds(2746)));
1989
1990   ASSERT_TRUE(ParseWebMFile("bear-320x240.webm", buffer_timestamps,
1991                             base::TimeDelta::FromMilliseconds(2744)));
1992 }
1993
1994 TEST_F(ChunkDemuxerTest, WebMFile_LiveAudioAndVideo) {
1995   struct BufferTimestamps buffer_timestamps[] = {
1996     {0, 0},
1997     {33, 3},
1998     {67, 6},
1999     {100, 9},
2000     {133, 12},
2001     {kSkip, kSkip},
2002   };
2003
2004   ASSERT_TRUE(ParseWebMFile("bear-320x240-live.webm", buffer_timestamps,
2005                             kInfiniteDuration()));
2006 }
2007
2008 TEST_F(ChunkDemuxerTest, WebMFile_AudioOnly) {
2009   struct BufferTimestamps buffer_timestamps[] = {
2010     {kSkip, 0},
2011     {kSkip, 3},
2012     {kSkip, 6},
2013     {kSkip, 9},
2014     {kSkip, 12},
2015     {kSkip, kSkip},
2016   };
2017
2018   // TODO(wolenetz/acolwell): Remove this SetDuration expectation and update the
2019   // ParseWebMFile() call's expected duration, below, once the file is fixed to
2020   // have the correct duration in the init segment. See http://crbug.com/354284.
2021   EXPECT_CALL(host_, SetDuration(base::TimeDelta::FromMilliseconds(2746)));
2022
2023   ASSERT_TRUE(ParseWebMFile("bear-320x240-audio-only.webm", buffer_timestamps,
2024                             base::TimeDelta::FromMilliseconds(2744),
2025                             HAS_AUDIO));
2026 }
2027
2028 TEST_F(ChunkDemuxerTest, WebMFile_VideoOnly) {
2029   struct BufferTimestamps buffer_timestamps[] = {
2030     {0, kSkip},
2031     {33, kSkip},
2032     {67, kSkip},
2033     {100, kSkip},
2034     {133, kSkip},
2035     {kSkip, kSkip},
2036   };
2037
2038   // TODO(wolenetz/acolwell): Remove this SetDuration expectation and update the
2039   // ParseWebMFile() call's expected duration, below, once the file is fixed to
2040   // have the correct duration in the init segment. See http://crbug.com/354284.
2041   EXPECT_CALL(host_, SetDuration(base::TimeDelta::FromMilliseconds(2736)));
2042
2043   ASSERT_TRUE(ParseWebMFile("bear-320x240-video-only.webm", buffer_timestamps,
2044                             base::TimeDelta::FromMilliseconds(2703),
2045                             HAS_VIDEO));
2046 }
2047
2048 TEST_F(ChunkDemuxerTest, WebMFile_AltRefFrames) {
2049   struct BufferTimestamps buffer_timestamps[] = {
2050     {0, 0},
2051     {33, 3},
2052     {33, 6},
2053     {67, 9},
2054     {100, 12},
2055     {kSkip, kSkip},
2056   };
2057
2058   ASSERT_TRUE(ParseWebMFile("bear-320x240-altref.webm", buffer_timestamps,
2059                             base::TimeDelta::FromMilliseconds(2767)));
2060 }
2061
2062 // Verify that we output buffers before the entire cluster has been parsed.
2063 TEST_F(ChunkDemuxerTest, IncrementalClusterParsing) {
2064   ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
2065   AppendEmptyCluster(0);
2066
2067   scoped_ptr<Cluster> cluster(GenerateCluster(0, 6));
2068
2069   bool audio_read_done = false;
2070   bool video_read_done = false;
2071   ReadAudio(base::Bind(&OnReadDone,
2072                        base::TimeDelta::FromMilliseconds(0),
2073                        &audio_read_done));
2074   ReadVideo(base::Bind(&OnReadDone,
2075                        base::TimeDelta::FromMilliseconds(0),
2076                        &video_read_done));
2077
2078   // Make sure the reads haven't completed yet.
2079   EXPECT_FALSE(audio_read_done);
2080   EXPECT_FALSE(video_read_done);
2081
2082   // Append data one byte at a time until one or both reads complete.
2083   int i = 0;
2084   for (; i < cluster->size() && !(audio_read_done || video_read_done); ++i) {
2085     AppendData(cluster->data() + i, 1);
2086     message_loop_.RunUntilIdle();
2087   }
2088
2089   EXPECT_TRUE(audio_read_done || video_read_done);
2090   EXPECT_GT(i, 0);
2091   EXPECT_LT(i, cluster->size());
2092
2093   audio_read_done = false;
2094   video_read_done = false;
2095   ReadAudio(base::Bind(&OnReadDone,
2096                        base::TimeDelta::FromMilliseconds(23),
2097                        &audio_read_done));
2098   ReadVideo(base::Bind(&OnReadDone,
2099                        base::TimeDelta::FromMilliseconds(33),
2100                        &video_read_done));
2101
2102   // Make sure the reads haven't completed yet.
2103   EXPECT_FALSE(audio_read_done);
2104   EXPECT_FALSE(video_read_done);
2105
2106   // Append the remaining data.
2107   ASSERT_LT(i, cluster->size());
2108   AppendData(cluster->data() + i, cluster->size() - i);
2109
2110   message_loop_.RunUntilIdle();
2111
2112   EXPECT_TRUE(audio_read_done);
2113   EXPECT_TRUE(video_read_done);
2114 }
2115
2116 TEST_F(ChunkDemuxerTest, ParseErrorDuringInit) {
2117   EXPECT_CALL(*this, DemuxerOpened());
2118   demuxer_->Initialize(
2119       &host_, CreateInitDoneCB(
2120           kNoTimestamp(), DEMUXER_ERROR_COULD_NOT_OPEN), true);
2121
2122   ASSERT_EQ(AddId(), ChunkDemuxer::kOk);
2123
2124   uint8 tmp = 0;
2125   demuxer_->AppendData(kSourceId, &tmp, 1,
2126                        append_window_start_for_next_append_,
2127                        append_window_end_for_next_append_,
2128                        &timestamp_offset_map_[kSourceId],
2129                        init_segment_received_cb_);
2130 }
2131
2132 TEST_F(ChunkDemuxerTest, AVHeadersWithAudioOnlyType) {
2133   EXPECT_CALL(*this, DemuxerOpened());
2134   demuxer_->Initialize(
2135       &host_, CreateInitDoneCB(kNoTimestamp(),
2136                                DEMUXER_ERROR_COULD_NOT_OPEN), true);
2137
2138   std::vector<std::string> codecs(1);
2139   codecs[0] = "vorbis";
2140   ASSERT_EQ(demuxer_->AddId(kSourceId, "audio/webm", codecs),
2141             ChunkDemuxer::kOk);
2142
2143   AppendInitSegment(HAS_AUDIO | HAS_VIDEO);
2144 }
2145
2146 TEST_F(ChunkDemuxerTest, AVHeadersWithVideoOnlyType) {
2147   EXPECT_CALL(*this, DemuxerOpened());
2148   demuxer_->Initialize(
2149       &host_, CreateInitDoneCB(kNoTimestamp(),
2150                                DEMUXER_ERROR_COULD_NOT_OPEN), true);
2151
2152   std::vector<std::string> codecs(1);
2153   codecs[0] = "vp8";
2154   ASSERT_EQ(demuxer_->AddId(kSourceId, "video/webm", codecs),
2155             ChunkDemuxer::kOk);
2156
2157   AppendInitSegment(HAS_AUDIO | HAS_VIDEO);
2158 }
2159
2160 TEST_F(ChunkDemuxerTest, MultipleHeaders) {
2161   ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
2162
2163   AppendCluster(kDefaultFirstCluster());
2164
2165   // Append another identical initialization segment.
2166   EXPECT_CALL(*this, InitSegmentReceived());
2167   AppendInitSegment(HAS_AUDIO | HAS_VIDEO);
2168
2169   AppendCluster(kDefaultSecondCluster());
2170
2171   GenerateExpectedReads(0, 9);
2172 }
2173
2174 TEST_F(ChunkDemuxerTest, AddSeparateSourcesForAudioAndVideo) {
2175   std::string audio_id = "audio1";
2176   std::string video_id = "video1";
2177   ASSERT_TRUE(InitDemuxerAudioAndVideoSources(audio_id, video_id));
2178
2179   // Append audio and video data into separate source ids.
2180   AppendCluster(audio_id,
2181       GenerateSingleStreamCluster(0, 92, kAudioTrackNum, kAudioBlockDuration));
2182   GenerateAudioStreamExpectedReads(0, 4);
2183   AppendCluster(video_id,
2184       GenerateSingleStreamCluster(0, 132, kVideoTrackNum, kVideoBlockDuration));
2185   GenerateVideoStreamExpectedReads(0, 4);
2186 }
2187
2188 TEST_F(ChunkDemuxerTest, AddSeparateSourcesForAudioAndVideoText) {
2189   // TODO(matthewjheaney): Here and elsewhere, we need more tests
2190   // for inband text tracks (http://crbug/321455).
2191
2192   std::string audio_id = "audio1";
2193   std::string video_id = "video1";
2194
2195   EXPECT_CALL(host_, AddTextStream(_, _))
2196     .Times(Exactly(2));
2197   ASSERT_TRUE(InitDemuxerAudioAndVideoSourcesText(audio_id, video_id, true));
2198
2199   // Append audio and video data into separate source ids.
2200   AppendCluster(audio_id,
2201       GenerateSingleStreamCluster(0, 92, kAudioTrackNum, kAudioBlockDuration));
2202   GenerateAudioStreamExpectedReads(0, 4);
2203   AppendCluster(video_id,
2204       GenerateSingleStreamCluster(0, 132, kVideoTrackNum, kVideoBlockDuration));
2205   GenerateVideoStreamExpectedReads(0, 4);
2206 }
2207
2208 TEST_F(ChunkDemuxerTest, AddIdFailures) {
2209   EXPECT_CALL(*this, DemuxerOpened());
2210   demuxer_->Initialize(
2211       &host_, CreateInitDoneCB(kDefaultDuration(), PIPELINE_OK), true);
2212
2213   std::string audio_id = "audio1";
2214   std::string video_id = "video1";
2215
2216   ASSERT_EQ(AddId(audio_id, HAS_AUDIO), ChunkDemuxer::kOk);
2217
2218   // Adding an id with audio/video should fail because we already added audio.
2219   ASSERT_EQ(AddId(), ChunkDemuxer::kReachedIdLimit);
2220
2221   EXPECT_CALL(*this, InitSegmentReceived());
2222   AppendInitSegmentWithSourceId(audio_id, HAS_AUDIO);
2223
2224   // Adding an id after append should fail.
2225   ASSERT_EQ(AddId(video_id, HAS_VIDEO), ChunkDemuxer::kReachedIdLimit);
2226 }
2227
2228 // Test that Read() calls after a RemoveId() return "end of stream" buffers.
2229 TEST_F(ChunkDemuxerTest, RemoveId) {
2230   std::string audio_id = "audio1";
2231   std::string video_id = "video1";
2232   ASSERT_TRUE(InitDemuxerAudioAndVideoSources(audio_id, video_id));
2233
2234   // Append audio and video data into separate source ids.
2235   AppendCluster(audio_id,
2236       GenerateSingleStreamCluster(0, 92, kAudioTrackNum, kAudioBlockDuration));
2237   AppendCluster(video_id,
2238       GenerateSingleStreamCluster(0, 132, kVideoTrackNum, kVideoBlockDuration));
2239
2240   // Read() from audio should return normal buffers.
2241   GenerateAudioStreamExpectedReads(0, 4);
2242
2243   // Remove the audio id.
2244   demuxer_->RemoveId(audio_id);
2245
2246   // Read() from audio should return "end of stream" buffers.
2247   bool audio_read_done = false;
2248   ReadAudio(base::Bind(&OnReadDone_EOSExpected, &audio_read_done));
2249   message_loop_.RunUntilIdle();
2250   EXPECT_TRUE(audio_read_done);
2251
2252   // Read() from video should still return normal buffers.
2253   GenerateVideoStreamExpectedReads(0, 4);
2254 }
2255
2256 // Test that removing an ID immediately after adding it does not interfere with
2257 // quota for new IDs in the future.
2258 TEST_F(ChunkDemuxerTest, RemoveAndAddId) {
2259   std::string audio_id_1 = "audio1";
2260   ASSERT_TRUE(AddId(audio_id_1, HAS_AUDIO) == ChunkDemuxer::kOk);
2261   demuxer_->RemoveId(audio_id_1);
2262
2263   std::string audio_id_2 = "audio2";
2264   ASSERT_TRUE(AddId(audio_id_2, HAS_AUDIO) == ChunkDemuxer::kOk);
2265 }
2266
2267 TEST_F(ChunkDemuxerTest, SeekCanceled) {
2268   ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
2269
2270   // Append cluster at the beginning of the stream.
2271   AppendCluster(GenerateCluster(0, 4));
2272
2273   // Seek to an unbuffered region.
2274   Seek(base::TimeDelta::FromSeconds(50));
2275
2276   // Attempt to read in unbuffered area; should not fulfill the read.
2277   bool audio_read_done = false;
2278   bool video_read_done = false;
2279   ReadAudio(base::Bind(&OnReadDone_AbortExpected, &audio_read_done));
2280   ReadVideo(base::Bind(&OnReadDone_AbortExpected, &video_read_done));
2281   EXPECT_FALSE(audio_read_done);
2282   EXPECT_FALSE(video_read_done);
2283
2284   // Now cancel the pending seek, which should flush the reads with empty
2285   // buffers.
2286   base::TimeDelta seek_time = base::TimeDelta::FromSeconds(0);
2287   demuxer_->CancelPendingSeek(seek_time);
2288   message_loop_.RunUntilIdle();
2289   EXPECT_TRUE(audio_read_done);
2290   EXPECT_TRUE(video_read_done);
2291
2292   // A seek back to the buffered region should succeed.
2293   Seek(seek_time);
2294   GenerateExpectedReads(0, 4);
2295 }
2296
2297 TEST_F(ChunkDemuxerTest, SeekCanceledWhileWaitingForSeek) {
2298   ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
2299
2300   // Append cluster at the beginning of the stream.
2301   AppendCluster(GenerateCluster(0, 4));
2302
2303   // Start waiting for a seek.
2304   base::TimeDelta seek_time1 = base::TimeDelta::FromSeconds(50);
2305   base::TimeDelta seek_time2 = base::TimeDelta::FromSeconds(0);
2306   demuxer_->StartWaitingForSeek(seek_time1);
2307
2308   // Now cancel the upcoming seek to an unbuffered region.
2309   demuxer_->CancelPendingSeek(seek_time2);
2310   demuxer_->Seek(seek_time1, NewExpectedStatusCB(PIPELINE_OK));
2311
2312   // Read requests should be fulfilled with empty buffers.
2313   bool audio_read_done = false;
2314   bool video_read_done = false;
2315   ReadAudio(base::Bind(&OnReadDone_AbortExpected, &audio_read_done));
2316   ReadVideo(base::Bind(&OnReadDone_AbortExpected, &video_read_done));
2317   EXPECT_TRUE(audio_read_done);
2318   EXPECT_TRUE(video_read_done);
2319
2320   // A seek back to the buffered region should succeed.
2321   Seek(seek_time2);
2322   GenerateExpectedReads(0, 4);
2323 }
2324
2325 // Test that Seek() successfully seeks to all source IDs.
2326 TEST_F(ChunkDemuxerTest, SeekAudioAndVideoSources) {
2327   std::string audio_id = "audio1";
2328   std::string video_id = "video1";
2329   ASSERT_TRUE(InitDemuxerAudioAndVideoSources(audio_id, video_id));
2330
2331   AppendCluster(
2332       audio_id,
2333       GenerateSingleStreamCluster(0, 92, kAudioTrackNum, kAudioBlockDuration));
2334   AppendCluster(
2335       video_id,
2336       GenerateSingleStreamCluster(0, 132, kVideoTrackNum, kVideoBlockDuration));
2337
2338   // Read() should return buffers at 0.
2339   bool audio_read_done = false;
2340   bool video_read_done = false;
2341   ReadAudio(base::Bind(&OnReadDone,
2342                        base::TimeDelta::FromMilliseconds(0),
2343                        &audio_read_done));
2344   ReadVideo(base::Bind(&OnReadDone,
2345                        base::TimeDelta::FromMilliseconds(0),
2346                        &video_read_done));
2347   EXPECT_TRUE(audio_read_done);
2348   EXPECT_TRUE(video_read_done);
2349
2350   // Seek to 3 (an unbuffered region).
2351   Seek(base::TimeDelta::FromSeconds(3));
2352
2353   audio_read_done = false;
2354   video_read_done = false;
2355   ReadAudio(base::Bind(&OnReadDone,
2356                        base::TimeDelta::FromSeconds(3),
2357                        &audio_read_done));
2358   ReadVideo(base::Bind(&OnReadDone,
2359                        base::TimeDelta::FromSeconds(3),
2360                        &video_read_done));
2361   // Read()s should not return until after data is appended at the Seek point.
2362   EXPECT_FALSE(audio_read_done);
2363   EXPECT_FALSE(video_read_done);
2364
2365   AppendCluster(audio_id,
2366                 GenerateSingleStreamCluster(
2367                     3000, 3092, kAudioTrackNum, kAudioBlockDuration));
2368   AppendCluster(video_id,
2369                 GenerateSingleStreamCluster(
2370                     3000, 3132, kVideoTrackNum, kVideoBlockDuration));
2371
2372   message_loop_.RunUntilIdle();
2373
2374   // Read() should return buffers at 3.
2375   EXPECT_TRUE(audio_read_done);
2376   EXPECT_TRUE(video_read_done);
2377 }
2378
2379 // Test that Seek() completes successfully when EndOfStream
2380 // is called before data is available for that seek point.
2381 // This scenario might be useful if seeking past the end of stream
2382 // of either audio or video (or both).
2383 TEST_F(ChunkDemuxerTest, EndOfStreamAfterPastEosSeek) {
2384   ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
2385
2386   AppendCluster(GenerateSingleStreamCluster(0, 120, kAudioTrackNum, 10));
2387   AppendCluster(GenerateSingleStreamCluster(0, 100, kVideoTrackNum, 5));
2388
2389   // Seeking past the end of video.
2390   // Note: audio data is available for that seek point.
2391   bool seek_cb_was_called = false;
2392   base::TimeDelta seek_time = base::TimeDelta::FromMilliseconds(110);
2393   demuxer_->StartWaitingForSeek(seek_time);
2394   demuxer_->Seek(seek_time,
2395                  base::Bind(OnSeekDone_OKExpected, &seek_cb_was_called));
2396   message_loop_.RunUntilIdle();
2397
2398   EXPECT_FALSE(seek_cb_was_called);
2399
2400   EXPECT_CALL(host_, SetDuration(
2401       base::TimeDelta::FromMilliseconds(120)));
2402   MarkEndOfStream(PIPELINE_OK);
2403   message_loop_.RunUntilIdle();
2404
2405   EXPECT_TRUE(seek_cb_was_called);
2406
2407   ShutdownDemuxer();
2408 }
2409
2410 // Test that EndOfStream is ignored if coming during a pending seek
2411 // whose seek time is before some existing ranges.
2412 TEST_F(ChunkDemuxerTest, EndOfStreamDuringPendingSeek) {
2413   ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
2414
2415   AppendCluster(GenerateSingleStreamCluster(0, 120, kAudioTrackNum, 10));
2416   AppendCluster(GenerateSingleStreamCluster(0, 100, kVideoTrackNum, 5));
2417   AppendCluster(GenerateSingleStreamCluster(200, 300, kAudioTrackNum, 10));
2418   AppendCluster(GenerateSingleStreamCluster(200, 300, kVideoTrackNum, 5));
2419
2420   bool seek_cb_was_called = false;
2421   base::TimeDelta seek_time = base::TimeDelta::FromMilliseconds(160);
2422   demuxer_->StartWaitingForSeek(seek_time);
2423   demuxer_->Seek(seek_time,
2424                  base::Bind(OnSeekDone_OKExpected, &seek_cb_was_called));
2425   message_loop_.RunUntilIdle();
2426
2427   EXPECT_FALSE(seek_cb_was_called);
2428
2429   EXPECT_CALL(host_, SetDuration(base::TimeDelta::FromMilliseconds(300)));
2430   MarkEndOfStream(PIPELINE_OK);
2431   message_loop_.RunUntilIdle();
2432
2433   EXPECT_FALSE(seek_cb_was_called);
2434
2435   demuxer_->UnmarkEndOfStream();
2436
2437   AppendCluster(GenerateSingleStreamCluster(140, 180, kAudioTrackNum, 10));
2438   AppendCluster(GenerateSingleStreamCluster(140, 180, kVideoTrackNum, 5));
2439
2440   message_loop_.RunUntilIdle();
2441
2442   EXPECT_TRUE(seek_cb_was_called);
2443
2444   ShutdownDemuxer();
2445 }
2446
2447 // Test ranges in an audio-only stream.
2448 TEST_F(ChunkDemuxerTest, GetBufferedRanges_AudioIdOnly) {
2449   EXPECT_CALL(*this, DemuxerOpened());
2450   demuxer_->Initialize(
2451       &host_, CreateInitDoneCB(kDefaultDuration(), PIPELINE_OK), true);
2452
2453   ASSERT_EQ(AddId(kSourceId, HAS_AUDIO), ChunkDemuxer::kOk);
2454   EXPECT_CALL(*this, InitSegmentReceived());
2455   AppendInitSegment(HAS_AUDIO);
2456
2457   // Test a simple cluster.
2458   AppendCluster(
2459       GenerateSingleStreamCluster(0, 92, kAudioTrackNum, kAudioBlockDuration));
2460
2461   CheckExpectedRanges("{ [0,92) }");
2462
2463   // Append a disjoint cluster to check for two separate ranges.
2464   AppendCluster(GenerateSingleStreamCluster(
2465       150, 219, kAudioTrackNum, kAudioBlockDuration));
2466
2467   CheckExpectedRanges("{ [0,92) [150,219) }");
2468 }
2469
2470 // Test ranges in a video-only stream.
2471 TEST_F(ChunkDemuxerTest, GetBufferedRanges_VideoIdOnly) {
2472   EXPECT_CALL(*this, DemuxerOpened());
2473   demuxer_->Initialize(
2474       &host_, CreateInitDoneCB(kDefaultDuration(), PIPELINE_OK), true);
2475
2476   ASSERT_EQ(AddId(kSourceId, HAS_VIDEO), ChunkDemuxer::kOk);
2477   EXPECT_CALL(*this, InitSegmentReceived());
2478   AppendInitSegment(HAS_VIDEO);
2479
2480   // Test a simple cluster.
2481   AppendCluster(
2482       GenerateSingleStreamCluster(0, 132, kVideoTrackNum, kVideoBlockDuration));
2483
2484   CheckExpectedRanges("{ [0,132) }");
2485
2486   // Append a disjoint cluster to check for two separate ranges.
2487   AppendCluster(GenerateSingleStreamCluster(
2488       200, 299, kVideoTrackNum, kVideoBlockDuration));
2489
2490   CheckExpectedRanges("{ [0,132) [200,299) }");
2491 }
2492
2493 TEST_F(ChunkDemuxerTest, GetBufferedRanges_AudioVideo) {
2494   ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
2495
2496   // Audio: 0 -> 23
2497   // Video: 0 -> 33
2498   // Buffered Range: 0 -> 23
2499   // Audio block duration is smaller than video block duration,
2500   // so the buffered ranges should correspond to the audio blocks.
2501   AppendCluster(GenerateSingleStreamCluster(
2502       0, kAudioBlockDuration, kAudioTrackNum, kAudioBlockDuration));
2503   AppendCluster(GenerateSingleStreamCluster(
2504       0, kVideoBlockDuration, kVideoTrackNum, kVideoBlockDuration));
2505
2506   CheckExpectedRanges("{ [0,23) }");
2507
2508   // Audio: 300 -> 400
2509   // Video: 320 -> 420
2510   // Buffered Range: 320 -> 400  (end overlap)
2511   AppendCluster(GenerateSingleStreamCluster(300, 400, kAudioTrackNum, 50));
2512   AppendCluster(GenerateSingleStreamCluster(320, 420, kVideoTrackNum, 50));
2513
2514   CheckExpectedRanges("{ [0,23) [320,400) }");
2515
2516   // Audio: 520 -> 590
2517   // Video: 500 -> 570
2518   // Buffered Range: 520 -> 570  (front overlap)
2519   AppendCluster(GenerateSingleStreamCluster(520, 590, kAudioTrackNum, 70));
2520   AppendCluster(GenerateSingleStreamCluster(500, 570, kVideoTrackNum, 70));
2521
2522   CheckExpectedRanges("{ [0,23) [320,400) [520,570) }");
2523
2524   // Audio: 720 -> 750
2525   // Video: 700 -> 770
2526   // Buffered Range: 720 -> 750  (complete overlap, audio)
2527   AppendCluster(GenerateSingleStreamCluster(720, 750, kAudioTrackNum, 30));
2528   AppendCluster(GenerateSingleStreamCluster(700, 770, kVideoTrackNum, 70));
2529
2530   CheckExpectedRanges("{ [0,23) [320,400) [520,570) [720,750) }");
2531
2532   // Audio: 900 -> 970
2533   // Video: 920 -> 950
2534   // Buffered Range: 920 -> 950  (complete overlap, video)
2535   AppendCluster(GenerateSingleStreamCluster(900, 970, kAudioTrackNum, 70));
2536   AppendCluster(GenerateSingleStreamCluster(920, 950, kVideoTrackNum, 30));
2537
2538   CheckExpectedRanges("{ [0,23) [320,400) [520,570) [720,750) [920,950) }");
2539
2540   // Appending within buffered range should not affect buffered ranges.
2541   AppendCluster(GenerateSingleStreamCluster(930, 950, kAudioTrackNum, 20));
2542   CheckExpectedRanges("{ [0,23) [320,400) [520,570) [720,750) [920,950) }");
2543
2544   // Appending to single stream outside buffered ranges should not affect
2545   // buffered ranges.
2546   AppendCluster(GenerateSingleStreamCluster(1230, 1240, kVideoTrackNum, 10));
2547   CheckExpectedRanges("{ [0,23) [320,400) [520,570) [720,750) [920,950) }");
2548 }
2549
2550 TEST_F(ChunkDemuxerTest, GetBufferedRanges_AudioVideoText) {
2551   EXPECT_CALL(host_, AddTextStream(_, _));
2552   ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO | HAS_TEXT));
2553
2554   // Append audio & video data
2555   AppendMuxedCluster(
2556       MuxedStreamInfo(kAudioTrackNum, "0K 23K"),
2557       MuxedStreamInfo(kVideoTrackNum, "0K 33"));
2558
2559   // Verify that a text track with no cues does not result in an empty buffered
2560   // range.
2561   CheckExpectedRanges("{ [0,46) }");
2562
2563   // Add some text cues.
2564   AppendMuxedCluster(
2565       MuxedStreamInfo(kAudioTrackNum, "100K 123K"),
2566       MuxedStreamInfo(kVideoTrackNum, "100K 133"),
2567       MuxedStreamInfo(kTextTrackNum, "100K 200K"));
2568
2569   // Verify that the text cues are not reflected in the buffered ranges.
2570   CheckExpectedRanges("{ [0,46) [100,146) }");
2571
2572   // Remove the buffered ranges.
2573   demuxer_->Remove(kSourceId, base::TimeDelta(),
2574                    base::TimeDelta::FromMilliseconds(250));
2575   CheckExpectedRanges("{ }");
2576 }
2577
2578 // Once MarkEndOfStream() is called, GetBufferedRanges should not cut off any
2579 // over-hanging tails at the end of the ranges as this is likely due to block
2580 // duration differences.
2581 TEST_F(ChunkDemuxerTest, GetBufferedRanges_EndOfStream) {
2582   ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
2583
2584   AppendMuxedCluster(
2585       MuxedStreamInfo(kAudioTrackNum, "0K 23K"),
2586       MuxedStreamInfo(kVideoTrackNum, "0K 33"));
2587
2588   CheckExpectedRanges("{ [0,46) }");
2589
2590   EXPECT_CALL(host_, SetDuration(base::TimeDelta::FromMilliseconds(66)));
2591   MarkEndOfStream(PIPELINE_OK);
2592
2593   // Verify that the range extends to the end of the video data.
2594   CheckExpectedRanges("{ [0,66) }");
2595
2596   // Verify that the range reverts to the intersection when end of stream
2597   // has been cancelled.
2598   demuxer_->UnmarkEndOfStream();
2599   CheckExpectedRanges("{ [0,46) }");
2600
2601   // Append and remove data so that the 2 streams' end ranges do not overlap.
2602
2603   EXPECT_CALL(host_, SetDuration(base::TimeDelta::FromMilliseconds(398)));
2604   AppendMuxedCluster(
2605       MuxedStreamInfo(kAudioTrackNum, "200K 223K"),
2606       MuxedStreamInfo(kVideoTrackNum, "200K 233 266 299 332K 365"));
2607
2608   // At this point, the per-stream ranges are as follows:
2609   // Audio: [0,46) [200,246)
2610   // Video: [0,66) [200,398)
2611   CheckExpectedRanges("{ [0,46) [200,246) }");
2612
2613   demuxer_->Remove(kSourceId, base::TimeDelta::FromMilliseconds(200),
2614                    base::TimeDelta::FromMilliseconds(300));
2615
2616   // At this point, the per-stream ranges are as follows:
2617   // Audio: [0,46)
2618   // Video: [0,66) [332,398)
2619   CheckExpectedRanges("{ [0,46) }");
2620
2621   AppendMuxedCluster(
2622       MuxedStreamInfo(kAudioTrackNum, "200K 223K"),
2623       MuxedStreamInfo(kVideoTrackNum, "200K 233"));
2624
2625   // At this point, the per-stream ranges are as follows:
2626   // Audio: [0,46) [200,246)
2627   // Video: [0,66) [200,266) [332,398)
2628   // NOTE: The last range on each stream do not overlap in time.
2629   CheckExpectedRanges("{ [0,46) [200,246) }");
2630
2631   MarkEndOfStream(PIPELINE_OK);
2632
2633   // NOTE: The last range on each stream gets extended to the highest
2634   // end timestamp according to the spec. The last audio range gets extended
2635   // from [200,246) to [200,398) which is why the intersection results in the
2636   // middle range getting larger AND the new range appearing.
2637   CheckExpectedRanges("{ [0,46) [200,266) [332,398) }");
2638 }
2639
2640 TEST_F(ChunkDemuxerTest, DifferentStreamTimecodes) {
2641   ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
2642
2643   // Create a cluster where the video timecode begins 25ms after the audio.
2644   AppendCluster(GenerateCluster(0, 25, 8));
2645
2646   Seek(base::TimeDelta::FromSeconds(0));
2647   GenerateExpectedReads(0, 25, 8);
2648
2649   // Seek to 5 seconds.
2650   Seek(base::TimeDelta::FromSeconds(5));
2651
2652   // Generate a cluster to fulfill this seek, where audio timecode begins 25ms
2653   // after the video.
2654   AppendCluster(GenerateCluster(5025, 5000, 8));
2655   GenerateExpectedReads(5025, 5000, 8);
2656 }
2657
2658 TEST_F(ChunkDemuxerTest, DifferentStreamTimecodesSeparateSources) {
2659   std::string audio_id = "audio1";
2660   std::string video_id = "video1";
2661   ASSERT_TRUE(InitDemuxerAudioAndVideoSources(audio_id, video_id));
2662
2663   // Generate two streams where the video stream starts 5ms after the audio
2664   // stream and append them.
2665   AppendCluster(audio_id, GenerateSingleStreamCluster(
2666       25, 4 * kAudioBlockDuration + 25, kAudioTrackNum, kAudioBlockDuration));
2667   AppendCluster(video_id, GenerateSingleStreamCluster(
2668       30, 4 * kVideoBlockDuration + 30, kVideoTrackNum, kVideoBlockDuration));
2669
2670   // Both streams should be able to fulfill a seek to 25.
2671   Seek(base::TimeDelta::FromMilliseconds(25));
2672   GenerateAudioStreamExpectedReads(25, 4);
2673   GenerateVideoStreamExpectedReads(30, 4);
2674 }
2675
2676 TEST_F(ChunkDemuxerTest, DifferentStreamTimecodesOutOfRange) {
2677   std::string audio_id = "audio1";
2678   std::string video_id = "video1";
2679   ASSERT_TRUE(InitDemuxerAudioAndVideoSources(audio_id, video_id));
2680
2681   // Generate two streams where the video stream starts 10s after the audio
2682   // stream and append them.
2683   AppendCluster(audio_id, GenerateSingleStreamCluster(0,
2684       4 * kAudioBlockDuration + 0, kAudioTrackNum, kAudioBlockDuration));
2685   AppendCluster(video_id, GenerateSingleStreamCluster(10000,
2686       4 * kVideoBlockDuration + 10000, kVideoTrackNum, kVideoBlockDuration));
2687
2688   // Should not be able to fulfill a seek to 0.
2689   base::TimeDelta seek_time = base::TimeDelta::FromMilliseconds(0);
2690   demuxer_->StartWaitingForSeek(seek_time);
2691   demuxer_->Seek(seek_time,
2692                  NewExpectedStatusCB(PIPELINE_ERROR_ABORT));
2693   ExpectRead(DemuxerStream::AUDIO, 0);
2694   ExpectEndOfStream(DemuxerStream::VIDEO);
2695 }
2696
2697 TEST_F(ChunkDemuxerTest, ClusterWithNoBuffers) {
2698   ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
2699
2700   // Generate and append an empty cluster beginning at 0.
2701   AppendEmptyCluster(0);
2702
2703   // Sanity check that data can be appended after this cluster correctly.
2704   AppendCluster(GenerateCluster(0, 2));
2705   ExpectRead(DemuxerStream::AUDIO, 0);
2706   ExpectRead(DemuxerStream::VIDEO, 0);
2707 }
2708
2709 TEST_F(ChunkDemuxerTest, CodecPrefixMatching) {
2710   ChunkDemuxer::Status expected = ChunkDemuxer::kNotSupported;
2711
2712 #if defined(USE_PROPRIETARY_CODECS)
2713   expected = ChunkDemuxer::kOk;
2714 #endif
2715
2716   std::vector<std::string> codecs;
2717   codecs.push_back("avc1.4D4041");
2718
2719   EXPECT_EQ(demuxer_->AddId("source_id", "video/mp4", codecs), expected);
2720 }
2721
2722 // Test codec ID's that are not compliant with RFC6381, but have been
2723 // seen in the wild.
2724 TEST_F(ChunkDemuxerTest, CodecIDsThatAreNotRFC6381Compliant) {
2725   ChunkDemuxer::Status expected = ChunkDemuxer::kNotSupported;
2726
2727 #if defined(USE_PROPRIETARY_CODECS)
2728   expected = ChunkDemuxer::kOk;
2729 #endif
2730   const char* codec_ids[] = {
2731     // GPAC places leading zeros on the audio object type.
2732     "mp4a.40.02",
2733     "mp4a.40.05"
2734   };
2735
2736   for (size_t i = 0; i < arraysize(codec_ids); ++i) {
2737     std::vector<std::string> codecs;
2738     codecs.push_back(codec_ids[i]);
2739
2740     ChunkDemuxer::Status result =
2741         demuxer_->AddId("source_id", "audio/mp4", codecs);
2742
2743     EXPECT_EQ(result, expected)
2744         << "Fail to add codec_id '" << codec_ids[i] << "'";
2745
2746     if (result == ChunkDemuxer::kOk)
2747       demuxer_->RemoveId("source_id");
2748   }
2749 }
2750
2751 TEST_F(ChunkDemuxerTest, EndOfStreamStillSetAfterSeek) {
2752   ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
2753
2754   EXPECT_CALL(host_, SetDuration(_))
2755       .Times(AnyNumber());
2756
2757   base::TimeDelta kLastAudioTimestamp = base::TimeDelta::FromMilliseconds(92);
2758   base::TimeDelta kLastVideoTimestamp = base::TimeDelta::FromMilliseconds(99);
2759
2760   AppendCluster(kDefaultFirstCluster());
2761   AppendCluster(kDefaultSecondCluster());
2762   MarkEndOfStream(PIPELINE_OK);
2763
2764   DemuxerStream::Status status;
2765   base::TimeDelta last_timestamp;
2766
2767   // Verify that we can read audio & video to the end w/o problems.
2768   ReadUntilNotOkOrEndOfStream(DemuxerStream::AUDIO, &status, &last_timestamp);
2769   EXPECT_EQ(DemuxerStream::kOk, status);
2770   EXPECT_EQ(kLastAudioTimestamp, last_timestamp);
2771
2772   ReadUntilNotOkOrEndOfStream(DemuxerStream::VIDEO, &status, &last_timestamp);
2773   EXPECT_EQ(DemuxerStream::kOk, status);
2774   EXPECT_EQ(kLastVideoTimestamp, last_timestamp);
2775
2776   // Seek back to 0 and verify that we can read to the end again..
2777   Seek(base::TimeDelta::FromMilliseconds(0));
2778
2779   ReadUntilNotOkOrEndOfStream(DemuxerStream::AUDIO, &status, &last_timestamp);
2780   EXPECT_EQ(DemuxerStream::kOk, status);
2781   EXPECT_EQ(kLastAudioTimestamp, last_timestamp);
2782
2783   ReadUntilNotOkOrEndOfStream(DemuxerStream::VIDEO, &status, &last_timestamp);
2784   EXPECT_EQ(DemuxerStream::kOk, status);
2785   EXPECT_EQ(kLastVideoTimestamp, last_timestamp);
2786 }
2787
2788 TEST_F(ChunkDemuxerTest, GetBufferedRangesBeforeInitSegment) {
2789   EXPECT_CALL(*this, DemuxerOpened());
2790   demuxer_->Initialize(&host_, CreateInitDoneCB(PIPELINE_OK), true);
2791   ASSERT_EQ(AddId("audio", HAS_AUDIO), ChunkDemuxer::kOk);
2792   ASSERT_EQ(AddId("video", HAS_VIDEO), ChunkDemuxer::kOk);
2793
2794   CheckExpectedRanges("audio", "{ }");
2795   CheckExpectedRanges("video", "{ }");
2796 }
2797
2798 // Test that Seek() completes successfully when the first cluster
2799 // arrives.
2800 TEST_F(ChunkDemuxerTest, EndOfStreamDuringSeek) {
2801   InSequence s;
2802
2803   ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
2804
2805   AppendCluster(kDefaultFirstCluster());
2806
2807   base::TimeDelta seek_time = base::TimeDelta::FromSeconds(0);
2808   demuxer_->StartWaitingForSeek(seek_time);
2809
2810   AppendCluster(kDefaultSecondCluster());
2811   EXPECT_CALL(host_, SetDuration(
2812       base::TimeDelta::FromMilliseconds(kDefaultSecondClusterEndTimestamp)));
2813   MarkEndOfStream(PIPELINE_OK);
2814
2815   demuxer_->Seek(seek_time, NewExpectedStatusCB(PIPELINE_OK));
2816
2817   GenerateExpectedReads(0, 4);
2818   GenerateExpectedReads(46, 66, 5);
2819
2820   EndOfStreamHelper end_of_stream_helper(demuxer_.get());
2821   end_of_stream_helper.RequestReads();
2822   end_of_stream_helper.CheckIfReadDonesWereCalled(true);
2823 }
2824
2825 TEST_F(ChunkDemuxerTest, ConfigChange_Video) {
2826   InSequence s;
2827
2828   ASSERT_TRUE(InitDemuxerWithConfigChangeData());
2829
2830   DemuxerStream::Status status;
2831   base::TimeDelta last_timestamp;
2832
2833   DemuxerStream* video = demuxer_->GetStream(DemuxerStream::VIDEO);
2834
2835   // Fetch initial video config and verify it matches what we expect.
2836   const VideoDecoderConfig& video_config_1 = video->video_decoder_config();
2837   ASSERT_TRUE(video_config_1.IsValidConfig());
2838   EXPECT_EQ(video_config_1.natural_size().width(), 320);
2839   EXPECT_EQ(video_config_1.natural_size().height(), 240);
2840
2841   ExpectRead(DemuxerStream::VIDEO, 0);
2842
2843   ReadUntilNotOkOrEndOfStream(DemuxerStream::VIDEO, &status, &last_timestamp);
2844
2845   ASSERT_EQ(status, DemuxerStream::kConfigChanged);
2846   EXPECT_EQ(last_timestamp.InMilliseconds(), 501);
2847
2848   // Fetch the new decoder config.
2849   const VideoDecoderConfig& video_config_2 = video->video_decoder_config();
2850   ASSERT_TRUE(video_config_2.IsValidConfig());
2851   EXPECT_EQ(video_config_2.natural_size().width(), 640);
2852   EXPECT_EQ(video_config_2.natural_size().height(), 360);
2853
2854   ExpectRead(DemuxerStream::VIDEO, 527);
2855
2856   // Read until the next config change.
2857   ReadUntilNotOkOrEndOfStream(DemuxerStream::VIDEO, &status, &last_timestamp);
2858   ASSERT_EQ(status, DemuxerStream::kConfigChanged);
2859   EXPECT_EQ(last_timestamp.InMilliseconds(), 793);
2860
2861   // Get the new config and verify that it matches the first one.
2862   ASSERT_TRUE(video_config_1.Matches(video->video_decoder_config()));
2863
2864   ExpectRead(DemuxerStream::VIDEO, 801);
2865
2866   // Read until the end of the stream just to make sure there aren't any other
2867   // config changes.
2868   ReadUntilNotOkOrEndOfStream(DemuxerStream::VIDEO, &status, &last_timestamp);
2869   ASSERT_EQ(status, DemuxerStream::kOk);
2870 }
2871
2872 TEST_F(ChunkDemuxerTest, ConfigChange_Audio) {
2873   InSequence s;
2874
2875   ASSERT_TRUE(InitDemuxerWithConfigChangeData());
2876
2877   DemuxerStream::Status status;
2878   base::TimeDelta last_timestamp;
2879
2880   DemuxerStream* audio = demuxer_->GetStream(DemuxerStream::AUDIO);
2881
2882   // Fetch initial audio config and verify it matches what we expect.
2883   const AudioDecoderConfig& audio_config_1 = audio->audio_decoder_config();
2884   ASSERT_TRUE(audio_config_1.IsValidConfig());
2885   EXPECT_EQ(audio_config_1.samples_per_second(), 44100);
2886   EXPECT_EQ(audio_config_1.extra_data_size(), 3863u);
2887
2888   ExpectRead(DemuxerStream::AUDIO, 0);
2889
2890   // The first config change seen is from a splice frame representing an overlap
2891   // of buffer from config 1 by buffers from config 2.
2892   ReadUntilNotOkOrEndOfStream(DemuxerStream::AUDIO, &status, &last_timestamp);
2893   ASSERT_EQ(status, DemuxerStream::kConfigChanged);
2894   EXPECT_EQ(last_timestamp.InMilliseconds(), 524);
2895
2896   // Fetch the new decoder config.
2897   const AudioDecoderConfig& audio_config_2 = audio->audio_decoder_config();
2898   ASSERT_TRUE(audio_config_2.IsValidConfig());
2899   EXPECT_EQ(audio_config_2.samples_per_second(), 44100);
2900   EXPECT_EQ(audio_config_2.extra_data_size(), 3935u);
2901
2902   // The next config change is from a splice frame representing an overlap of
2903   // buffers from config 2 by buffers from config 1.
2904   ReadUntilNotOkOrEndOfStream(DemuxerStream::AUDIO, &status, &last_timestamp);
2905   ASSERT_EQ(status, DemuxerStream::kConfigChanged);
2906   EXPECT_EQ(last_timestamp.InMilliseconds(), 782);
2907   ASSERT_TRUE(audio_config_1.Matches(audio->audio_decoder_config()));
2908
2909   // Read until the end of the stream just to make sure there aren't any other
2910   // config changes.
2911   ReadUntilNotOkOrEndOfStream(DemuxerStream::AUDIO, &status, &last_timestamp);
2912   ASSERT_EQ(status, DemuxerStream::kOk);
2913   EXPECT_EQ(last_timestamp.InMilliseconds(), 2744);
2914 }
2915
2916 TEST_F(ChunkDemuxerTest, ConfigChange_Seek) {
2917   InSequence s;
2918
2919   ASSERT_TRUE(InitDemuxerWithConfigChangeData());
2920
2921   DemuxerStream* video = demuxer_->GetStream(DemuxerStream::VIDEO);
2922
2923   // Fetch initial video config and verify it matches what we expect.
2924   const VideoDecoderConfig& video_config_1 = video->video_decoder_config();
2925   ASSERT_TRUE(video_config_1.IsValidConfig());
2926   EXPECT_EQ(video_config_1.natural_size().width(), 320);
2927   EXPECT_EQ(video_config_1.natural_size().height(), 240);
2928
2929   ExpectRead(DemuxerStream::VIDEO, 0);
2930
2931   // Seek to a location with a different config.
2932   Seek(base::TimeDelta::FromMilliseconds(527));
2933
2934   // Verify that the config change is signalled.
2935   ExpectConfigChanged(DemuxerStream::VIDEO);
2936
2937   // Fetch the new decoder config and verify it is what we expect.
2938   const VideoDecoderConfig& video_config_2 = video->video_decoder_config();
2939   ASSERT_TRUE(video_config_2.IsValidConfig());
2940   EXPECT_EQ(video_config_2.natural_size().width(), 640);
2941   EXPECT_EQ(video_config_2.natural_size().height(), 360);
2942
2943   // Verify that Read() will return a buffer now.
2944   ExpectRead(DemuxerStream::VIDEO, 527);
2945
2946   // Seek back to the beginning and verify we get another config change.
2947   Seek(base::TimeDelta::FromMilliseconds(0));
2948   ExpectConfigChanged(DemuxerStream::VIDEO);
2949   ASSERT_TRUE(video_config_1.Matches(video->video_decoder_config()));
2950   ExpectRead(DemuxerStream::VIDEO, 0);
2951
2952   // Seek to a location that requires a config change and then
2953   // seek to a new location that has the same configuration as
2954   // the start of the file without a Read() in the middle.
2955   Seek(base::TimeDelta::FromMilliseconds(527));
2956   Seek(base::TimeDelta::FromMilliseconds(801));
2957
2958   // Verify that no config change is signalled.
2959   ExpectRead(DemuxerStream::VIDEO, 801);
2960   ASSERT_TRUE(video_config_1.Matches(video->video_decoder_config()));
2961 }
2962
2963 TEST_F(ChunkDemuxerTest, TimestampPositiveOffset) {
2964   ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
2965
2966   ASSERT_TRUE(SetTimestampOffset(kSourceId, base::TimeDelta::FromSeconds(30)));
2967   AppendCluster(GenerateCluster(0, 2));
2968
2969   Seek(base::TimeDelta::FromMilliseconds(30000));
2970
2971   GenerateExpectedReads(30000, 2);
2972 }
2973
2974 TEST_F(ChunkDemuxerTest, TimestampNegativeOffset) {
2975   ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
2976
2977   ASSERT_TRUE(SetTimestampOffset(kSourceId, base::TimeDelta::FromSeconds(-1)));
2978   AppendCluster(GenerateCluster(1000, 2));
2979
2980   GenerateExpectedReads(0, 2);
2981 }
2982
2983 TEST_F(ChunkDemuxerTest, TimestampOffsetSeparateStreams) {
2984   std::string audio_id = "audio1";
2985   std::string video_id = "video1";
2986   ASSERT_TRUE(InitDemuxerAudioAndVideoSources(audio_id, video_id));
2987
2988   ASSERT_TRUE(SetTimestampOffset(
2989       audio_id, base::TimeDelta::FromMilliseconds(-2500)));
2990   ASSERT_TRUE(SetTimestampOffset(
2991       video_id, base::TimeDelta::FromMilliseconds(-2500)));
2992   AppendCluster(audio_id, GenerateSingleStreamCluster(2500,
2993       2500 + kAudioBlockDuration * 4, kAudioTrackNum, kAudioBlockDuration));
2994   AppendCluster(video_id, GenerateSingleStreamCluster(2500,
2995       2500 + kVideoBlockDuration * 4, kVideoTrackNum, kVideoBlockDuration));
2996   GenerateAudioStreamExpectedReads(0, 4);
2997   GenerateVideoStreamExpectedReads(0, 4);
2998
2999   Seek(base::TimeDelta::FromMilliseconds(27300));
3000
3001   ASSERT_TRUE(SetTimestampOffset(
3002       audio_id, base::TimeDelta::FromMilliseconds(27300)));
3003   ASSERT_TRUE(SetTimestampOffset(
3004       video_id, base::TimeDelta::FromMilliseconds(27300)));
3005   AppendCluster(audio_id, GenerateSingleStreamCluster(
3006       0, kAudioBlockDuration * 4, kAudioTrackNum, kAudioBlockDuration));
3007   AppendCluster(video_id, GenerateSingleStreamCluster(
3008       0, kVideoBlockDuration * 4, kVideoTrackNum, kVideoBlockDuration));
3009   GenerateVideoStreamExpectedReads(27300, 4);
3010   GenerateAudioStreamExpectedReads(27300, 4);
3011 }
3012
3013 TEST_F(ChunkDemuxerTest, IsParsingMediaSegmentMidMediaSegment) {
3014   ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
3015
3016   scoped_ptr<Cluster> cluster = GenerateCluster(0, 2);
3017   // Append only part of the cluster data.
3018   AppendData(cluster->data(), cluster->size() - 13);
3019
3020   // Confirm we're in the middle of parsing a media segment.
3021   ASSERT_TRUE(demuxer_->IsParsingMediaSegment(kSourceId));
3022
3023   demuxer_->Abort(kSourceId,
3024                   append_window_start_for_next_append_,
3025                   append_window_end_for_next_append_,
3026                   &timestamp_offset_map_[kSourceId]);
3027
3028   // After Abort(), parsing should no longer be in the middle of a media
3029   // segment.
3030   ASSERT_FALSE(demuxer_->IsParsingMediaSegment(kSourceId));
3031 }
3032
3033 #if defined(USE_PROPRIETARY_CODECS)
3034 #if defined(ENABLE_MPEG2TS_STREAM_PARSER)
3035 TEST_F(ChunkDemuxerTest, EmitBuffersDuringAbort) {
3036   EXPECT_CALL(*this, DemuxerOpened());
3037   demuxer_->Initialize(
3038       &host_, CreateInitDoneCB(kInfiniteDuration(), PIPELINE_OK), true);
3039   EXPECT_EQ(ChunkDemuxer::kOk, AddIdForMp2tSource(kSourceId));
3040
3041   // For info:
3042   // DTS/PTS derived using dvbsnoop -s ts -if bear-1280x720.ts -tssubdecode
3043   // Video: first PES:
3044   //        PTS: 126912 (0x0001efc0)  [= 90 kHz-Timestamp: 0:00:01.4101]
3045   //        DTS: 123909 (0x0001e405)  [= 90 kHz-Timestamp: 0:00:01.3767]
3046   // Audio: first PES:
3047   //        PTS: 126000 (0x0001ec30)  [= 90 kHz-Timestamp: 0:00:01.4000]
3048   //        DTS: 123910 (0x0001e406)  [= 90 kHz-Timestamp: 0:00:01.3767]
3049   // Video: last PES:
3050   //        PTS: 370155 (0x0005a5eb)  [= 90 kHz-Timestamp: 0:00:04.1128]
3051   //        DTS: 367152 (0x00059a30)  [= 90 kHz-Timestamp: 0:00:04.0794]
3052   // Audio: last PES:
3053   //        PTS: 353788 (0x000565fc)  [= 90 kHz-Timestamp: 0:00:03.9309]
3054
3055   scoped_refptr<DecoderBuffer> buffer = ReadTestDataFile("bear-1280x720.ts");
3056   EXPECT_CALL(*this, InitSegmentReceived());
3057   AppendData(kSourceId, buffer->data(), buffer->data_size());
3058
3059   // Confirm we're in the middle of parsing a media segment.
3060   ASSERT_TRUE(demuxer_->IsParsingMediaSegment(kSourceId));
3061
3062   // Abort on the Mpeg2 TS parser triggers the emission of the last video
3063   // buffer which is pending in the stream parser.
3064   Ranges<base::TimeDelta> range_before_abort =
3065       demuxer_->GetBufferedRanges(kSourceId);
3066   demuxer_->Abort(kSourceId,
3067                   append_window_start_for_next_append_,
3068                   append_window_end_for_next_append_,
3069                   &timestamp_offset_map_[kSourceId]);
3070   Ranges<base::TimeDelta> range_after_abort =
3071       demuxer_->GetBufferedRanges(kSourceId);
3072
3073   ASSERT_EQ(range_before_abort.size(), 1u);
3074   ASSERT_EQ(range_after_abort.size(), 1u);
3075   EXPECT_EQ(range_after_abort.start(0), range_before_abort.start(0));
3076   EXPECT_GT(range_after_abort.end(0), range_before_abort.end(0));
3077 }
3078
3079 TEST_F(ChunkDemuxerTest, SeekCompleteDuringAbort) {
3080   EXPECT_CALL(*this, DemuxerOpened());
3081   demuxer_->Initialize(
3082       &host_, CreateInitDoneCB(kInfiniteDuration(), PIPELINE_OK), true);
3083   EXPECT_EQ(ChunkDemuxer::kOk, AddIdForMp2tSource(kSourceId));
3084
3085   // For info:
3086   // DTS/PTS derived using dvbsnoop -s ts -if bear-1280x720.ts -tssubdecode
3087   // Video: first PES:
3088   //        PTS: 126912 (0x0001efc0)  [= 90 kHz-Timestamp: 0:00:01.4101]
3089   //        DTS: 123909 (0x0001e405)  [= 90 kHz-Timestamp: 0:00:01.3767]
3090   // Audio: first PES:
3091   //        PTS: 126000 (0x0001ec30)  [= 90 kHz-Timestamp: 0:00:01.4000]
3092   //        DTS: 123910 (0x0001e406)  [= 90 kHz-Timestamp: 0:00:01.3767]
3093   // Video: last PES:
3094   //        PTS: 370155 (0x0005a5eb)  [= 90 kHz-Timestamp: 0:00:04.1128]
3095   //        DTS: 367152 (0x00059a30)  [= 90 kHz-Timestamp: 0:00:04.0794]
3096   // Audio: last PES:
3097   //        PTS: 353788 (0x000565fc)  [= 90 kHz-Timestamp: 0:00:03.9309]
3098
3099   scoped_refptr<DecoderBuffer> buffer = ReadTestDataFile("bear-1280x720.ts");
3100   EXPECT_CALL(*this, InitSegmentReceived());
3101   AppendData(kSourceId, buffer->data(), buffer->data_size());
3102
3103   // Confirm we're in the middle of parsing a media segment.
3104   ASSERT_TRUE(demuxer_->IsParsingMediaSegment(kSourceId));
3105
3106   // Seek to a time corresponding to buffers that will be emitted during the
3107   // abort.
3108   Seek(base::TimeDelta::FromMilliseconds(4110));
3109
3110   // Abort on the Mpeg2 TS parser triggers the emission of the last video
3111   // buffer which is pending in the stream parser.
3112   demuxer_->Abort(kSourceId,
3113                   append_window_start_for_next_append_,
3114                   append_window_end_for_next_append_,
3115                   &timestamp_offset_map_[kSourceId]);
3116 }
3117
3118 #endif
3119 #endif
3120
3121 TEST_F(ChunkDemuxerTest, WebMIsParsingMediaSegmentDetection) {
3122   const uint8 kBuffer[] = {
3123     0x1F, 0x43, 0xB6, 0x75, 0x83,  // CLUSTER (size = 3)
3124     0xE7, 0x81, 0x01,                // Cluster TIMECODE (value = 1)
3125
3126     0x1F, 0x43, 0xB6, 0x75, 0xFF,  // CLUSTER (size = unknown; really 3 due to:)
3127     0xE7, 0x81, 0x02,                // Cluster TIMECODE (value = 2)
3128     /* e.g. put some blocks here... */
3129     0x1A, 0x45, 0xDF, 0xA3, 0x8A,  // EBMLHEADER (size = 10, not fully appended)
3130   };
3131
3132   // This array indicates expected return value of IsParsingMediaSegment()
3133   // following each incrementally appended byte in |kBuffer|.
3134   const bool kExpectedReturnValues[] = {
3135     false, false, false, false, true,
3136     true, true, false,
3137
3138     false, false, false, false, true,
3139     true, true, true,
3140
3141     true, true, true, true, false,
3142   };
3143
3144   COMPILE_ASSERT(arraysize(kBuffer) == arraysize(kExpectedReturnValues),
3145       test_arrays_out_of_sync);
3146   COMPILE_ASSERT(arraysize(kBuffer) == sizeof(kBuffer), not_one_byte_per_index);
3147
3148   ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
3149
3150   for (size_t i = 0; i < sizeof(kBuffer); i++) {
3151     DVLOG(3) << "Appending and testing index " << i;
3152     AppendData(kBuffer + i, 1);
3153     bool expected_return_value = kExpectedReturnValues[i];
3154     EXPECT_EQ(expected_return_value,
3155               demuxer_->IsParsingMediaSegment(kSourceId));
3156   }
3157 }
3158
3159 TEST_F(ChunkDemuxerTest, DurationChange) {
3160   ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
3161   const int kStreamDuration = kDefaultDuration().InMilliseconds();
3162
3163   // Add data leading up to the currently set duration.
3164   AppendCluster(GenerateCluster(kStreamDuration - kAudioBlockDuration,
3165                                 kStreamDuration - kVideoBlockDuration,
3166                                 2));
3167
3168   CheckExpectedRanges(kSourceId, "{ [201191,201224) }");
3169
3170   // Add data beginning at the currently set duration and expect a new duration
3171   // to be signaled. Note that the last video block will have a higher end
3172   // timestamp than the last audio block.
3173   const int kNewStreamDurationVideo = kStreamDuration + kVideoBlockDuration;
3174   EXPECT_CALL(host_, SetDuration(
3175       base::TimeDelta::FromMilliseconds(kNewStreamDurationVideo)));
3176   AppendCluster(GenerateCluster(kDefaultDuration().InMilliseconds(), 2));
3177
3178   CheckExpectedRanges(kSourceId, "{ [201191,201247) }");
3179
3180   // Add more data to the end of each media type. Note that the last audio block
3181   // will have a higher end timestamp than the last video block.
3182   const int kFinalStreamDuration = kStreamDuration + kAudioBlockDuration * 3;
3183   EXPECT_CALL(host_, SetDuration(
3184       base::TimeDelta::FromMilliseconds(kFinalStreamDuration)));
3185   AppendCluster(GenerateCluster(kStreamDuration + kAudioBlockDuration,
3186                                 kStreamDuration + kVideoBlockDuration,
3187                                 3));
3188
3189   // See that the range has increased appropriately (but not to the full
3190   // duration of 201293, since there is not enough video appended for that).
3191   CheckExpectedRanges(kSourceId, "{ [201191,201290) }");
3192 }
3193
3194 TEST_F(ChunkDemuxerTest, DurationChangeTimestampOffset) {
3195   ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
3196   ASSERT_TRUE(SetTimestampOffset(kSourceId, kDefaultDuration()));
3197   EXPECT_CALL(host_, SetDuration(
3198       kDefaultDuration() + base::TimeDelta::FromMilliseconds(
3199           kVideoBlockDuration * 2)));
3200   AppendCluster(GenerateCluster(0, 4));
3201 }
3202
3203 TEST_F(ChunkDemuxerTest, EndOfStreamTruncateDuration) {
3204   ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
3205
3206   AppendCluster(kDefaultFirstCluster());
3207
3208   EXPECT_CALL(host_, SetDuration(
3209       base::TimeDelta::FromMilliseconds(kDefaultFirstClusterEndTimestamp)));
3210   MarkEndOfStream(PIPELINE_OK);
3211 }
3212
3213
3214 TEST_F(ChunkDemuxerTest, ZeroLengthAppend) {
3215   ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
3216   AppendData(NULL, 0);
3217 }
3218
3219 TEST_F(ChunkDemuxerTest, AppendAfterEndOfStream) {
3220   ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
3221
3222   EXPECT_CALL(host_, SetDuration(_))
3223       .Times(AnyNumber());
3224
3225   AppendCluster(kDefaultFirstCluster());
3226   MarkEndOfStream(PIPELINE_OK);
3227
3228   demuxer_->UnmarkEndOfStream();
3229
3230   AppendCluster(kDefaultSecondCluster());
3231   MarkEndOfStream(PIPELINE_OK);
3232 }
3233
3234 // Test receiving a Shutdown() call before we get an Initialize()
3235 // call. This can happen if video element gets destroyed before
3236 // the pipeline has a chance to initialize the demuxer.
3237 TEST_F(ChunkDemuxerTest, Shutdown_BeforeInitialize) {
3238   demuxer_->Shutdown();
3239   demuxer_->Initialize(
3240       &host_, CreateInitDoneCB(DEMUXER_ERROR_COULD_NOT_OPEN), true);
3241   message_loop_.RunUntilIdle();
3242 }
3243
3244 // Verifies that signaling end of stream while stalled at a gap
3245 // boundary does not trigger end of stream buffers to be returned.
3246 TEST_F(ChunkDemuxerTest, EndOfStreamWhileWaitingForGapToBeFilled) {
3247   ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
3248
3249   AppendCluster(0, 10);
3250   AppendCluster(300, 10);
3251   CheckExpectedRanges(kSourceId, "{ [0,132) [300,432) }");
3252
3253   GenerateExpectedReads(0, 10);
3254
3255   bool audio_read_done = false;
3256   bool video_read_done = false;
3257   ReadAudio(base::Bind(&OnReadDone,
3258                        base::TimeDelta::FromMilliseconds(138),
3259                        &audio_read_done));
3260   ReadVideo(base::Bind(&OnReadDone,
3261                        base::TimeDelta::FromMilliseconds(138),
3262                        &video_read_done));
3263
3264   // Verify that the reads didn't complete
3265   EXPECT_FALSE(audio_read_done);
3266   EXPECT_FALSE(video_read_done);
3267
3268   EXPECT_CALL(host_, SetDuration(base::TimeDelta::FromMilliseconds(438)));
3269   MarkEndOfStream(PIPELINE_OK);
3270
3271   // Verify that the reads still haven't completed.
3272   EXPECT_FALSE(audio_read_done);
3273   EXPECT_FALSE(video_read_done);
3274
3275   demuxer_->UnmarkEndOfStream();
3276
3277   AppendCluster(138, 22);
3278
3279   message_loop_.RunUntilIdle();
3280
3281   CheckExpectedRanges(kSourceId, "{ [0,435) }");
3282
3283   // Verify that the reads have completed.
3284   EXPECT_TRUE(audio_read_done);
3285   EXPECT_TRUE(video_read_done);
3286
3287   // Read the rest of the buffers.
3288   GenerateExpectedReads(161, 171, 20);
3289
3290   // Verify that reads block because the append cleared the end of stream state.
3291   audio_read_done = false;
3292   video_read_done = false;
3293   ReadAudio(base::Bind(&OnReadDone_EOSExpected,
3294                        &audio_read_done));
3295   ReadVideo(base::Bind(&OnReadDone_EOSExpected,
3296                        &video_read_done));
3297
3298   // Verify that the reads don't complete.
3299   EXPECT_FALSE(audio_read_done);
3300   EXPECT_FALSE(video_read_done);
3301
3302   EXPECT_CALL(host_, SetDuration(base::TimeDelta::FromMilliseconds(437)));
3303   MarkEndOfStream(PIPELINE_OK);
3304
3305   EXPECT_TRUE(audio_read_done);
3306   EXPECT_TRUE(video_read_done);
3307 }
3308
3309 TEST_F(ChunkDemuxerTest, CanceledSeekDuringInitialPreroll) {
3310   ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
3311
3312   // Cancel preroll.
3313   base::TimeDelta seek_time = base::TimeDelta::FromMilliseconds(200);
3314   demuxer_->CancelPendingSeek(seek_time);
3315
3316   // Initiate the seek to the new location.
3317   Seek(seek_time);
3318
3319   // Append data to satisfy the seek.
3320   AppendCluster(seek_time.InMilliseconds(), 10);
3321 }
3322
3323 TEST_F(ChunkDemuxerTest, SetMemoryLimitType) {
3324   ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
3325
3326   // Set different memory limits for audio and video.
3327   demuxer_->SetMemoryLimits(DemuxerStream::AUDIO, 10 * kBlockSize);
3328   demuxer_->SetMemoryLimits(DemuxerStream::VIDEO, 5 * kBlockSize);
3329
3330   base::TimeDelta seek_time = base::TimeDelta::FromMilliseconds(1000);
3331
3332   // Append data at the start that can be garbage collected:
3333   AppendSingleStreamCluster(kSourceId, kAudioTrackNum, 0, 10);
3334   AppendSingleStreamCluster(kSourceId, kVideoTrackNum, 0, 5);
3335
3336   CheckExpectedRanges(DemuxerStream::AUDIO, "{ [0,230) }");
3337   CheckExpectedRanges(DemuxerStream::VIDEO, "{ [0,165) }");
3338
3339   // Seek so we can garbage collect the data appended above.
3340   Seek(seek_time);
3341
3342   // Append data at seek_time.
3343   AppendSingleStreamCluster(kSourceId, kAudioTrackNum,
3344                             seek_time.InMilliseconds(), 10);
3345   AppendSingleStreamCluster(kSourceId, kVideoTrackNum,
3346                             seek_time.InMilliseconds(), 5);
3347
3348   // Verify that the old data, and nothing more, has been garbage collected.
3349   CheckExpectedRanges(DemuxerStream::AUDIO, "{ [1000,1230) }");
3350   CheckExpectedRanges(DemuxerStream::VIDEO, "{ [1000,1165) }");
3351 }
3352
3353 TEST_F(ChunkDemuxerTest, GCDuringSeek) {
3354   ASSERT_TRUE(InitDemuxer(HAS_AUDIO));
3355
3356   demuxer_->SetMemoryLimits(DemuxerStream::AUDIO, 5 * kBlockSize);
3357
3358   base::TimeDelta seek_time1 = base::TimeDelta::FromMilliseconds(1000);
3359   base::TimeDelta seek_time2 = base::TimeDelta::FromMilliseconds(500);
3360
3361   // Initiate a seek to |seek_time1|.
3362   Seek(seek_time1);
3363
3364   // Append data to satisfy the first seek request.
3365   AppendSingleStreamCluster(kSourceId, kAudioTrackNum,
3366                             seek_time1.InMilliseconds(), 5);
3367   CheckExpectedRanges(kSourceId, "{ [1000,1115) }");
3368
3369   // Signal that the second seek is starting.
3370   demuxer_->StartWaitingForSeek(seek_time2);
3371
3372   // Append data to satisfy the second seek. This append triggers
3373   // the garbage collection logic since we set the memory limit to
3374   // 5 blocks.
3375   AppendSingleStreamCluster(kSourceId, kAudioTrackNum,
3376                             seek_time2.InMilliseconds(), 5);
3377
3378   // Verify that the buffers that cover |seek_time2| do not get
3379   // garbage collected.
3380   CheckExpectedRanges(kSourceId, "{ [500,615) }");
3381
3382   // Complete the seek.
3383   demuxer_->Seek(seek_time2, NewExpectedStatusCB(PIPELINE_OK));
3384
3385
3386   // Append more data and make sure that the blocks for |seek_time2|
3387   // don't get removed.
3388   //
3389   // NOTE: The current GC algorithm tries to preserve the GOP at the
3390   //  current position as well as the last appended GOP. This is
3391   //  why there are 2 ranges in the expectations.
3392   AppendSingleStreamCluster(kSourceId, kAudioTrackNum, 700, 5);
3393   CheckExpectedRanges(kSourceId, "{ [500,592) [792,815) }");
3394 }
3395
3396 TEST_F(ChunkDemuxerTest, AppendWindow_Video) {
3397   ASSERT_TRUE(InitDemuxer(HAS_VIDEO));
3398   DemuxerStream* stream = demuxer_->GetStream(DemuxerStream::VIDEO);
3399
3400   // Set the append window to [50,280).
3401   append_window_start_for_next_append_ = base::TimeDelta::FromMilliseconds(50);
3402   append_window_end_for_next_append_ = base::TimeDelta::FromMilliseconds(280);
3403
3404   // Append a cluster that starts before and ends after the append window.
3405   AppendSingleStreamCluster(kSourceId, kVideoTrackNum,
3406                             "0K 30 60 90 120K 150 180 210 240K 270 300 330K");
3407
3408   // Verify that GOPs that start outside the window are not included
3409   // in the buffer. Also verify that buffers that start inside the
3410   // window and extend beyond the end of the window are not included.
3411   CheckExpectedRanges(kSourceId, "{ [120,270) }");
3412   CheckExpectedBuffers(stream, "120 150 180 210 240");
3413
3414   // Extend the append window to [50,650).
3415   append_window_end_for_next_append_ = base::TimeDelta::FromMilliseconds(650);
3416
3417   // Append more data and verify that adding buffers start at the next
3418   // keyframe.
3419   AppendSingleStreamCluster(kSourceId, kVideoTrackNum,
3420                             "360 390 420K 450 480 510 540K 570 600 630K");
3421   CheckExpectedRanges(kSourceId, "{ [120,270) [420,630) }");
3422 }
3423
3424 TEST_F(ChunkDemuxerTest, AppendWindow_Audio) {
3425   ASSERT_TRUE(InitDemuxer(HAS_AUDIO));
3426   DemuxerStream* stream = demuxer_->GetStream(DemuxerStream::AUDIO);
3427
3428   // Set the append window to [50,280).
3429   append_window_start_for_next_append_ = base::TimeDelta::FromMilliseconds(50);
3430   append_window_end_for_next_append_ = base::TimeDelta::FromMilliseconds(280);
3431
3432   // Append a cluster that starts before and ends after the append window.
3433   AppendSingleStreamCluster(
3434       kSourceId, kAudioTrackNum,
3435       "0K 30K 60K 90K 120K 150K 180K 210K 240K 270K 300K 330K");
3436
3437   // Verify that frames that end outside the window are not included
3438   // in the buffer. Also verify that buffers that start inside the
3439   // window and extend beyond the end of the window are not included.
3440   //
3441   // The first 50ms of the range should be truncated since it overlaps
3442   // the start of the append window.
3443   CheckExpectedRanges(kSourceId, "{ [50,280) }");
3444
3445   // The "50P" buffer is the "0" buffer marked for complete discard.  The next
3446   // "50" buffer is the "30" buffer marked with 20ms of start discard.
3447   CheckExpectedBuffers(stream, "50P 50 60 90 120 150 180 210 240");
3448
3449   // Extend the append window to [50,650).
3450   append_window_end_for_next_append_ = base::TimeDelta::FromMilliseconds(650);
3451
3452   // Append more data and verify that a new range is created.
3453   AppendSingleStreamCluster(
3454       kSourceId, kAudioTrackNum,
3455       "360K 390K 420K 450K 480K 510K 540K 570K 600K 630K");
3456   CheckExpectedRanges(kSourceId, "{ [50,280) [360,650) }");
3457 }
3458
3459 TEST_F(ChunkDemuxerTest, AppendWindow_AudioOverlapStartAndEnd) {
3460   ASSERT_TRUE(InitDemuxer(HAS_AUDIO));
3461
3462   // Set the append window to [10,20).
3463   append_window_start_for_next_append_ = base::TimeDelta::FromMilliseconds(10);
3464   append_window_end_for_next_append_ = base::TimeDelta::FromMilliseconds(20);
3465
3466   // Append a cluster that starts before and ends after the append window.
3467   AppendSingleStreamCluster(kSourceId, kAudioTrackNum, "0K");
3468
3469   // Verify the append is clipped to the append window.
3470   CheckExpectedRanges(kSourceId, "{ [10,20) }");
3471 }
3472
3473 TEST_F(ChunkDemuxerTest, AppendWindow_WebMFile_AudioOnly) {
3474   EXPECT_CALL(*this, DemuxerOpened());
3475   demuxer_->Initialize(
3476       &host_,
3477       CreateInitDoneCB(base::TimeDelta::FromMilliseconds(2744), PIPELINE_OK),
3478       true);
3479   ASSERT_EQ(ChunkDemuxer::kOk, AddId(kSourceId, HAS_AUDIO));
3480
3481   // Set the append window to [50,150).
3482   append_window_start_for_next_append_ = base::TimeDelta::FromMilliseconds(50);
3483   append_window_end_for_next_append_ = base::TimeDelta::FromMilliseconds(150);
3484
3485   // Read a WebM file into memory and send the data to the demuxer.  The chunk
3486   // size has been chosen carefully to ensure the preroll buffer used by the
3487   // partial append window trim must come from a previous Append() call.
3488   scoped_refptr<DecoderBuffer> buffer =
3489       ReadTestDataFile("bear-320x240-audio-only.webm");
3490   EXPECT_CALL(*this, InitSegmentReceived());
3491   AppendDataInPieces(buffer->data(), buffer->data_size(), 128);
3492
3493   DemuxerStream* stream = demuxer_->GetStream(DemuxerStream::AUDIO);
3494   CheckExpectedBuffers(stream, "50P 50 62 86 109 122 125 128");
3495 }
3496
3497 TEST_F(ChunkDemuxerTest, AppendWindow_AudioConfigUpdateRemovesPreroll) {
3498   EXPECT_CALL(*this, DemuxerOpened());
3499   demuxer_->Initialize(
3500       &host_,
3501       CreateInitDoneCB(base::TimeDelta::FromMilliseconds(2744), PIPELINE_OK),
3502       true);
3503   ASSERT_EQ(ChunkDemuxer::kOk, AddId(kSourceId, HAS_AUDIO));
3504
3505   // Set the append window such that the first file is completely before the
3506   // append window.
3507   // TODO(wolenetz/acolwell): Update this duration once the files are fixed to
3508   // have the correct duration in their init segments, and the
3509   // CreateInitDoneCB() call, above, is fixed to used that duration. See
3510   // http://crbug.com/354284.
3511   const base::TimeDelta duration_1 = base::TimeDelta::FromMilliseconds(2746);
3512   append_window_start_for_next_append_ = duration_1;
3513
3514   // Read a WebM file into memory and append the data.
3515   scoped_refptr<DecoderBuffer> buffer =
3516       ReadTestDataFile("bear-320x240-audio-only.webm");
3517   EXPECT_CALL(*this, InitSegmentReceived());
3518   AppendDataInPieces(buffer->data(), buffer->data_size(), 512);
3519   CheckExpectedRanges(kSourceId, "{ }");
3520
3521   DemuxerStream* stream = demuxer_->GetStream(DemuxerStream::AUDIO);
3522   AudioDecoderConfig config_1 = stream->audio_decoder_config();
3523
3524   // Read a second WebM with a different config in and append the data.
3525   scoped_refptr<DecoderBuffer> buffer2 =
3526       ReadTestDataFile("bear-320x240-audio-only-48khz.webm");
3527   EXPECT_CALL(*this, InitSegmentReceived());
3528   EXPECT_CALL(host_, SetDuration(_)).Times(AnyNumber());
3529   ASSERT_TRUE(SetTimestampOffset(kSourceId, duration_1));
3530   AppendDataInPieces(buffer2->data(), buffer2->data_size(), 512);
3531   CheckExpectedRanges(kSourceId, "{ [2746,5519) }");
3532
3533   Seek(duration_1);
3534   ExpectConfigChanged(DemuxerStream::AUDIO);
3535   ASSERT_FALSE(config_1.Matches(stream->audio_decoder_config()));
3536   CheckExpectedBuffers(stream, "2746 2767 2789 2810");
3537 }
3538
3539 TEST_F(ChunkDemuxerTest, AppendWindow_Text) {
3540   DemuxerStream* text_stream = NULL;
3541   EXPECT_CALL(host_, AddTextStream(_, _))
3542       .WillOnce(SaveArg<0>(&text_stream));
3543   ASSERT_TRUE(InitDemuxer(HAS_VIDEO | HAS_TEXT));
3544   DemuxerStream* video_stream = demuxer_->GetStream(DemuxerStream::VIDEO);
3545
3546   // Set the append window to [20,280).
3547   append_window_start_for_next_append_ = base::TimeDelta::FromMilliseconds(20);
3548   append_window_end_for_next_append_ = base::TimeDelta::FromMilliseconds(280);
3549
3550   // Append a cluster that starts before and ends after the append
3551   // window.
3552   AppendMuxedCluster(
3553       MuxedStreamInfo(kVideoTrackNum,
3554                       "0K 30 60 90 120K 150 180 210 240K 270 300 330K"),
3555       MuxedStreamInfo(kTextTrackNum, "0K 100K 200K 300K" ));
3556
3557   // Verify that text cues that start outside the window are not included
3558   // in the buffer. Also verify that cues that extend beyond the
3559   // window are not included.
3560   CheckExpectedRanges(kSourceId, "{ [100,270) }");
3561   CheckExpectedBuffers(video_stream, "120 150 180 210 240");
3562   CheckExpectedBuffers(text_stream, "100");
3563
3564   // Extend the append window to [20,650).
3565   append_window_end_for_next_append_ = base::TimeDelta::FromMilliseconds(650);
3566
3567   // Append more data and verify that a new range is created.
3568   AppendMuxedCluster(
3569       MuxedStreamInfo(kVideoTrackNum,
3570                       "360 390 420K 450 480 510 540K 570 600 630K"),
3571       MuxedStreamInfo(kTextTrackNum, "400K 500K 600K 700K" ));
3572   CheckExpectedRanges(kSourceId, "{ [100,270) [400,630) }");
3573
3574   // Seek to the new range and verify that the expected buffers are returned.
3575   Seek(base::TimeDelta::FromMilliseconds(420));
3576   CheckExpectedBuffers(video_stream, "420 450 480 510 540 570 600");
3577   CheckExpectedBuffers(text_stream, "400 500");
3578 }
3579
3580 TEST_F(ChunkDemuxerTest, StartWaitingForSeekAfterParseError) {
3581   ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
3582   EXPECT_CALL(host_, OnDemuxerError(PIPELINE_ERROR_DECODE));
3583   AppendGarbage();
3584   base::TimeDelta seek_time = base::TimeDelta::FromSeconds(50);
3585   demuxer_->StartWaitingForSeek(seek_time);
3586 }
3587
3588 TEST_F(ChunkDemuxerTest, Remove_AudioVideoText) {
3589   DemuxerStream* text_stream = NULL;
3590   EXPECT_CALL(host_, AddTextStream(_, _))
3591       .WillOnce(SaveArg<0>(&text_stream));
3592   ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO | HAS_TEXT));
3593
3594   DemuxerStream* audio_stream = demuxer_->GetStream(DemuxerStream::AUDIO);
3595   DemuxerStream* video_stream = demuxer_->GetStream(DemuxerStream::VIDEO);
3596
3597   AppendMuxedCluster(
3598       MuxedStreamInfo(kAudioTrackNum, "0K 20K 40K 60K 80K 100K 120K 140K"),
3599       MuxedStreamInfo(kVideoTrackNum, "0K 30 60 90 120K 150 180"),
3600       MuxedStreamInfo(kTextTrackNum, "0K 100K 200K"));
3601
3602   CheckExpectedBuffers(audio_stream, "0 20 40 60 80 100 120 140");
3603   CheckExpectedBuffers(video_stream, "0 30 60 90 120 150 180");
3604   CheckExpectedBuffers(text_stream, "0 100 200");
3605
3606   // Remove the buffers that were added.
3607   demuxer_->Remove(kSourceId, base::TimeDelta(),
3608                    base::TimeDelta::FromMilliseconds(300));
3609
3610   // Verify that all the appended data has been removed.
3611   CheckExpectedRanges(kSourceId, "{ }");
3612
3613   // Append new buffers that are clearly different than the original
3614   // ones and verify that only the new buffers are returned.
3615   AppendMuxedCluster(
3616       MuxedStreamInfo(kAudioTrackNum, "1K 21K 41K 61K 81K 101K 121K 141K"),
3617       MuxedStreamInfo(kVideoTrackNum, "1K 31 61 91 121K 151 181"),
3618       MuxedStreamInfo(kTextTrackNum, "1K 101K 201K"));
3619
3620   Seek(base::TimeDelta());
3621   CheckExpectedBuffers(audio_stream, "1 21 41 61 81 101 121 141");
3622   CheckExpectedBuffers(video_stream, "1 31 61 91 121 151 181");
3623   CheckExpectedBuffers(text_stream, "1 101 201");
3624 }
3625
3626 TEST_F(ChunkDemuxerTest, Remove_StartAtDuration) {
3627   ASSERT_TRUE(InitDemuxer(HAS_AUDIO));
3628   DemuxerStream* audio_stream = demuxer_->GetStream(DemuxerStream::AUDIO);
3629
3630   // Set the duration to something small so that the append that
3631   // follows updates the duration to reflect the end of the appended data.
3632   EXPECT_CALL(host_, SetDuration(
3633       base::TimeDelta::FromMilliseconds(1)));
3634   demuxer_->SetDuration(0.001);
3635
3636   EXPECT_CALL(host_, SetDuration(
3637       base::TimeDelta::FromMilliseconds(160)));
3638   AppendSingleStreamCluster(kSourceId, kAudioTrackNum,
3639                             "0K 20K 40K 60K 80K 100K 120K 140K");
3640
3641   CheckExpectedRanges(kSourceId, "{ [0,160) }");
3642   CheckExpectedBuffers(audio_stream, "0 20 40 60 80 100 120 140");
3643
3644   demuxer_->Remove(kSourceId,
3645                    base::TimeDelta::FromSecondsD(demuxer_->GetDuration()),
3646                    kInfiniteDuration());
3647
3648   Seek(base::TimeDelta());
3649   CheckExpectedRanges(kSourceId, "{ [0,160) }");
3650   CheckExpectedBuffers(audio_stream, "0 20 40 60 80 100 120 140");
3651 }
3652
3653 // Verifies that a Seek() will complete without text cues for
3654 // the seek point and will return cues after the seek position
3655 // when they are eventually appended.
3656 TEST_F(ChunkDemuxerTest, SeekCompletesWithoutTextCues) {
3657   DemuxerStream* text_stream = NULL;
3658   EXPECT_CALL(host_, AddTextStream(_, _))
3659       .WillOnce(SaveArg<0>(&text_stream));
3660   ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO | HAS_TEXT));
3661
3662   DemuxerStream* audio_stream = demuxer_->GetStream(DemuxerStream::AUDIO);
3663   DemuxerStream* video_stream = demuxer_->GetStream(DemuxerStream::VIDEO);
3664
3665   base::TimeDelta seek_time = base::TimeDelta::FromMilliseconds(120);
3666   bool seek_cb_was_called = false;
3667   demuxer_->StartWaitingForSeek(seek_time);
3668   demuxer_->Seek(seek_time,
3669                  base::Bind(OnSeekDone_OKExpected, &seek_cb_was_called));
3670   message_loop_.RunUntilIdle();
3671
3672   EXPECT_FALSE(seek_cb_was_called);
3673
3674   bool text_read_done = false;
3675   text_stream->Read(base::Bind(&OnReadDone,
3676                                base::TimeDelta::FromMilliseconds(225),
3677                                &text_read_done));
3678
3679   // Append audio & video data so the seek completes.
3680   AppendMuxedCluster(
3681       MuxedStreamInfo(kAudioTrackNum,
3682                       "0K 20K 40K 60K 80K 100K 120K 140K 160K 180K 200K"),
3683       MuxedStreamInfo(kVideoTrackNum, "0K 30 60 90 120K 150 180 210"));
3684
3685   message_loop_.RunUntilIdle();
3686   EXPECT_TRUE(seek_cb_was_called);
3687   EXPECT_FALSE(text_read_done);
3688
3689   // Read some audio & video buffers to further verify seek completion.
3690   CheckExpectedBuffers(audio_stream, "120 140");
3691   CheckExpectedBuffers(video_stream, "120 150");
3692
3693   EXPECT_FALSE(text_read_done);
3694
3695   // Append text cues that start after the seek point and verify that
3696   // they are returned by Read() calls.
3697   AppendMuxedCluster(
3698       MuxedStreamInfo(kAudioTrackNum, "220K 240K 260K 280K"),
3699       MuxedStreamInfo(kVideoTrackNum, "240K 270 300 330"),
3700       MuxedStreamInfo(kTextTrackNum, "225K 275K 325K"));
3701
3702   message_loop_.RunUntilIdle();
3703   EXPECT_TRUE(text_read_done);
3704
3705   // NOTE: we start at 275 here because the buffer at 225 was returned
3706   // to the pending read initiated above.
3707   CheckExpectedBuffers(text_stream, "275 325");
3708
3709   // Verify that audio & video streams continue to return expected values.
3710   CheckExpectedBuffers(audio_stream, "160 180");
3711   CheckExpectedBuffers(video_stream, "180 210");
3712 }
3713
3714 TEST_F(ChunkDemuxerTest, ClusterWithUnknownSize) {
3715   ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
3716
3717   AppendCluster(GenerateCluster(0, 0, 4, true));
3718   CheckExpectedRanges(kSourceId, "{ [0,46) }");
3719
3720   // A new cluster indicates end of the previous cluster with unknown size.
3721   AppendCluster(GenerateCluster(46, 66, 5, true));
3722   CheckExpectedRanges(kSourceId, "{ [0,115) }");
3723 }
3724
3725 TEST_F(ChunkDemuxerTest, CuesBetweenClustersWithUnknownSize) {
3726   ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
3727
3728   // Add two clusters separated by Cues in a single Append() call.
3729   scoped_ptr<Cluster> cluster = GenerateCluster(0, 0, 4, true);
3730   std::vector<uint8> data(cluster->data(), cluster->data() + cluster->size());
3731   data.insert(data.end(), kCuesHeader, kCuesHeader + sizeof(kCuesHeader));
3732   cluster = GenerateCluster(46, 66, 5, true);
3733   data.insert(data.end(), cluster->data(), cluster->data() + cluster->size());
3734   AppendData(&*data.begin(), data.size());
3735
3736   CheckExpectedRanges(kSourceId, "{ [0,115) }");
3737 }
3738
3739 TEST_F(ChunkDemuxerTest, CuesBetweenClusters) {
3740   ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
3741
3742   AppendCluster(GenerateCluster(0, 0, 4));
3743   AppendData(kCuesHeader, sizeof(kCuesHeader));
3744   AppendCluster(GenerateCluster(46, 66, 5));
3745   CheckExpectedRanges(kSourceId, "{ [0,115) }");
3746 }
3747
3748 }  // namespace media