Upstream version 9.38.198.0
[platform/framework/web/crosswalk.git] / src / media / filters / chunk_demuxer_unittest.cc
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include <algorithm>
6
7 #include "base/bind.h"
8 #include "base/message_loop/message_loop.h"
9 #include "base/strings/string_number_conversions.h"
10 #include "base/strings/string_split.h"
11 #include "base/strings/string_util.h"
12 #include "media/base/audio_decoder_config.h"
13 #include "media/base/decoder_buffer.h"
14 #include "media/base/decrypt_config.h"
15 #include "media/base/mock_demuxer_host.h"
16 #include "media/base/test_data_util.h"
17 #include "media/base/test_helpers.h"
18 #include "media/filters/chunk_demuxer.h"
19 #include "media/formats/webm/cluster_builder.h"
20 #include "media/formats/webm/webm_constants.h"
21 #include "media/formats/webm/webm_crypto_helpers.h"
22 #include "testing/gtest/include/gtest/gtest.h"
23
24 using ::testing::AnyNumber;
25 using ::testing::Exactly;
26 using ::testing::InSequence;
27 using ::testing::NotNull;
28 using ::testing::Return;
29 using ::testing::SaveArg;
30 using ::testing::SetArgumentPointee;
31 using ::testing::_;
32
33 namespace media {
34
35 const uint8 kTracksHeader[] = {
36   0x16, 0x54, 0xAE, 0x6B,  // Tracks ID
37   0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,  // tracks(size = 0)
38 };
39
40 // WebM Block bytes that represent a VP8 keyframe.
41 const uint8 kVP8Keyframe[] = {
42   0x010, 0x00, 0x00, 0x9d, 0x01, 0x2a, 0x00, 0x10, 0x00, 0x10, 0x00
43 };
44
45 // WebM Block bytes that represent a VP8 interframe.
46 const uint8 kVP8Interframe[] = { 0x11, 0x00, 0x00 };
47
48 static const uint8 kCuesHeader[] = {
49   0x1C, 0x53, 0xBB, 0x6B,  // Cues ID
50   0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,  // cues(size = 0)
51 };
52
53 const int kTracksHeaderSize = sizeof(kTracksHeader);
54 const int kTracksSizeOffset = 4;
55
56 // The size of TrackEntry element in test file "webm_vorbis_track_entry" starts
57 // at index 1 and spans 8 bytes.
58 const int kAudioTrackSizeOffset = 1;
59 const int kAudioTrackSizeWidth = 8;
60 const int kAudioTrackEntryHeaderSize =
61     kAudioTrackSizeOffset + kAudioTrackSizeWidth;
62
63 // The size of TrackEntry element in test file "webm_vp8_track_entry" starts at
64 // index 1 and spans 8 bytes.
65 const int kVideoTrackSizeOffset = 1;
66 const int kVideoTrackSizeWidth = 8;
67 const int kVideoTrackEntryHeaderSize =
68     kVideoTrackSizeOffset + kVideoTrackSizeWidth;
69
70 const int kVideoTrackNum = 1;
71 const int kAudioTrackNum = 2;
72 const int kTextTrackNum = 3;
73 const int kAlternateTextTrackNum = 4;
74
75 const int kAudioBlockDuration = 23;
76 const int kVideoBlockDuration = 33;
77 const int kTextBlockDuration = 100;
78 const int kBlockSize = 10;
79
80 const char kSourceId[] = "SourceId";
81 const char kDefaultFirstClusterRange[] = "{ [0,46) }";
82 const int kDefaultFirstClusterEndTimestamp = 66;
83 const int kDefaultSecondClusterEndTimestamp = 132;
84
85 base::TimeDelta kDefaultDuration() {
86   return base::TimeDelta::FromMilliseconds(201224);
87 }
88
89 // Write an integer into buffer in the form of vint that spans 8 bytes.
90 // The data pointed by |buffer| should be at least 8 bytes long.
91 // |number| should be in the range 0 <= number < 0x00FFFFFFFFFFFFFF.
92 static void WriteInt64(uint8* buffer, int64 number) {
93   DCHECK(number >= 0 && number < 0x00FFFFFFFFFFFFFFLL);
94   buffer[0] = 0x01;
95   int64 tmp = number;
96   for (int i = 7; i > 0; i--) {
97     buffer[i] = tmp & 0xff;
98     tmp >>= 8;
99   }
100 }
101
102 MATCHER_P(HasTimestamp, timestamp_in_ms, "") {
103   return arg.get() && !arg->end_of_stream() &&
104          arg->timestamp().InMilliseconds() == timestamp_in_ms;
105 }
106
107 MATCHER(IsEndOfStream, "") { return arg.get() && arg->end_of_stream(); }
108
109 static void OnReadDone(const base::TimeDelta& expected_time,
110                        bool* called,
111                        DemuxerStream::Status status,
112                        const scoped_refptr<DecoderBuffer>& buffer) {
113   EXPECT_EQ(status, DemuxerStream::kOk);
114   EXPECT_EQ(expected_time, buffer->timestamp());
115   *called = true;
116 }
117
118 static void OnReadDone_AbortExpected(
119     bool* called, DemuxerStream::Status status,
120     const scoped_refptr<DecoderBuffer>& buffer) {
121   EXPECT_EQ(status, DemuxerStream::kAborted);
122   EXPECT_EQ(NULL, buffer.get());
123   *called = true;
124 }
125
126 static void OnReadDone_EOSExpected(bool* called,
127                                    DemuxerStream::Status status,
128                                    const scoped_refptr<DecoderBuffer>& buffer) {
129   EXPECT_EQ(status, DemuxerStream::kOk);
130   EXPECT_TRUE(buffer->end_of_stream());
131   *called = true;
132 }
133
134 static void OnSeekDone_OKExpected(bool* called, PipelineStatus status) {
135   EXPECT_EQ(status, PIPELINE_OK);
136   *called = true;
137 }
138
139 static void LogFunc(const std::string& str) { DVLOG(1) << str; }
140
141 class ChunkDemuxerTest : public ::testing::Test {
142  protected:
143   enum CodecsIndex {
144     AUDIO,
145     VIDEO,
146     MAX_CODECS_INDEX
147   };
148
149   // Default cluster to append first for simple tests.
150   scoped_ptr<Cluster> kDefaultFirstCluster() {
151     return GenerateCluster(0, 4);
152   }
153
154   // Default cluster to append after kDefaultFirstCluster()
155   // has been appended. This cluster starts with blocks that
156   // have timestamps consistent with the end times of the blocks
157   // in kDefaultFirstCluster() so that these two clusters represent
158   // a continuous region.
159   scoped_ptr<Cluster> kDefaultSecondCluster() {
160     return GenerateCluster(46, 66, 5);
161   }
162
163   ChunkDemuxerTest()
164       : append_window_end_for_next_append_(kInfiniteDuration()) {
165     CreateNewDemuxer();
166   }
167
168   void CreateNewDemuxer() {
169     base::Closure open_cb =
170         base::Bind(&ChunkDemuxerTest::DemuxerOpened, base::Unretained(this));
171     Demuxer::NeedKeyCB need_key_cb =
172         base::Bind(&ChunkDemuxerTest::DemuxerNeedKey, base::Unretained(this));
173     demuxer_.reset(
174         new ChunkDemuxer(open_cb, need_key_cb, base::Bind(&LogFunc), true));
175   }
176
177   virtual ~ChunkDemuxerTest() {
178     ShutdownDemuxer();
179   }
180
181   void CreateInitSegment(int stream_flags,
182                          bool is_audio_encrypted,
183                          bool is_video_encrypted,
184                          scoped_ptr<uint8[]>* buffer,
185                          int* size) {
186     CreateInitSegmentInternal(
187         stream_flags, is_audio_encrypted, is_video_encrypted, buffer, false,
188         size);
189   }
190
191   void CreateInitSegmentWithAlternateTextTrackNum(int stream_flags,
192                                                   bool is_audio_encrypted,
193                                                   bool is_video_encrypted,
194                                                   scoped_ptr<uint8[]>* buffer,
195                                                   int* size) {
196     DCHECK(stream_flags & HAS_TEXT);
197     CreateInitSegmentInternal(
198         stream_flags, is_audio_encrypted, is_video_encrypted, buffer, true,
199         size);
200   }
201
202   void CreateInitSegmentInternal(int stream_flags,
203                                  bool is_audio_encrypted,
204                                  bool is_video_encrypted,
205                                  scoped_ptr<uint8[]>* buffer,
206                                  bool use_alternate_text_track_id,
207                                  int* size) {
208     bool has_audio = (stream_flags & HAS_AUDIO) != 0;
209     bool has_video = (stream_flags & HAS_VIDEO) != 0;
210     bool has_text = (stream_flags & HAS_TEXT) != 0;
211     scoped_refptr<DecoderBuffer> ebml_header;
212     scoped_refptr<DecoderBuffer> info;
213     scoped_refptr<DecoderBuffer> audio_track_entry;
214     scoped_refptr<DecoderBuffer> video_track_entry;
215     scoped_refptr<DecoderBuffer> audio_content_encodings;
216     scoped_refptr<DecoderBuffer> video_content_encodings;
217     scoped_refptr<DecoderBuffer> text_track_entry;
218
219     ebml_header = ReadTestDataFile("webm_ebml_element");
220
221     info = ReadTestDataFile("webm_info_element");
222
223     int tracks_element_size = 0;
224
225     if (has_audio) {
226       audio_track_entry = ReadTestDataFile("webm_vorbis_track_entry");
227       tracks_element_size += audio_track_entry->data_size();
228       if (is_audio_encrypted) {
229         audio_content_encodings = ReadTestDataFile("webm_content_encodings");
230         tracks_element_size += audio_content_encodings->data_size();
231       }
232     }
233
234     if (has_video) {
235       video_track_entry = ReadTestDataFile("webm_vp8_track_entry");
236       tracks_element_size += video_track_entry->data_size();
237       if (is_video_encrypted) {
238         video_content_encodings = ReadTestDataFile("webm_content_encodings");
239         tracks_element_size += video_content_encodings->data_size();
240       }
241     }
242
243     if (has_text) {
244       // TODO(matthewjheaney): create an abstraction to do
245       // this (http://crbug/321454).
246       // We need it to also handle the creation of multiple text tracks.
247       //
248       // This is the track entry for a text track,
249       // TrackEntry [AE], size=30
250       //   TrackNum [D7], size=1, val=3 (or 4 if use_alternate_text_track_id)
251       //   TrackUID [73] [C5], size=1, value=3 (must remain constant for same
252       //     track, even if TrackNum changes)
253       //   TrackType [83], size=1, val=0x11
254       //   CodecId [86], size=18, val="D_WEBVTT/SUBTITLES"
255       char str[] = "\xAE\x9E\xD7\x81\x03\x73\xC5\x81\x03"
256                    "\x83\x81\x11\x86\x92"
257                    "D_WEBVTT/SUBTITLES";
258       DCHECK_EQ(str[4], kTextTrackNum);
259       if (use_alternate_text_track_id)
260         str[4] = kAlternateTextTrackNum;
261
262       const int len = strlen(str);
263       DCHECK_EQ(len, 32);
264       const uint8* const buf = reinterpret_cast<const uint8*>(str);
265       text_track_entry = DecoderBuffer::CopyFrom(buf, len);
266       tracks_element_size += text_track_entry->data_size();
267     }
268
269     *size = ebml_header->data_size() + info->data_size() +
270         kTracksHeaderSize + tracks_element_size;
271
272     buffer->reset(new uint8[*size]);
273
274     uint8* buf = buffer->get();
275     memcpy(buf, ebml_header->data(), ebml_header->data_size());
276     buf += ebml_header->data_size();
277
278     memcpy(buf, info->data(), info->data_size());
279     buf += info->data_size();
280
281     memcpy(buf, kTracksHeader, kTracksHeaderSize);
282     WriteInt64(buf + kTracksSizeOffset, tracks_element_size);
283     buf += kTracksHeaderSize;
284
285     // TODO(xhwang): Simplify this! Probably have test data files that contain
286     // ContentEncodings directly instead of trying to create one at run-time.
287     if (has_audio) {
288       memcpy(buf, audio_track_entry->data(),
289              audio_track_entry->data_size());
290       if (is_audio_encrypted) {
291         memcpy(buf + audio_track_entry->data_size(),
292                audio_content_encodings->data(),
293                audio_content_encodings->data_size());
294         WriteInt64(buf + kAudioTrackSizeOffset,
295                    audio_track_entry->data_size() +
296                    audio_content_encodings->data_size() -
297                    kAudioTrackEntryHeaderSize);
298         buf += audio_content_encodings->data_size();
299       }
300       buf += audio_track_entry->data_size();
301     }
302
303     if (has_video) {
304       memcpy(buf, video_track_entry->data(),
305              video_track_entry->data_size());
306       if (is_video_encrypted) {
307         memcpy(buf + video_track_entry->data_size(),
308                video_content_encodings->data(),
309                video_content_encodings->data_size());
310         WriteInt64(buf + kVideoTrackSizeOffset,
311                    video_track_entry->data_size() +
312                    video_content_encodings->data_size() -
313                    kVideoTrackEntryHeaderSize);
314         buf += video_content_encodings->data_size();
315       }
316       buf += video_track_entry->data_size();
317     }
318
319     if (has_text) {
320       memcpy(buf, text_track_entry->data(),
321              text_track_entry->data_size());
322       buf += text_track_entry->data_size();
323     }
324   }
325
326   ChunkDemuxer::Status AddId() {
327     return AddId(kSourceId, HAS_AUDIO | HAS_VIDEO);
328   }
329
330   ChunkDemuxer::Status AddId(const std::string& source_id, int stream_flags) {
331     bool has_audio = (stream_flags & HAS_AUDIO) != 0;
332     bool has_video = (stream_flags & HAS_VIDEO) != 0;
333     std::vector<std::string> codecs;
334     std::string type;
335
336     if (has_audio) {
337       codecs.push_back("vorbis");
338       type = "audio/webm";
339     }
340
341     if (has_video) {
342       codecs.push_back("vp8");
343       type = "video/webm";
344     }
345
346     if (!has_audio && !has_video) {
347       return AddId(kSourceId, HAS_AUDIO | HAS_VIDEO);
348     }
349
350     return demuxer_->AddId(source_id, type, codecs);
351   }
352
353   ChunkDemuxer::Status AddIdForMp2tSource(const std::string& source_id) {
354     std::vector<std::string> codecs;
355     std::string type = "video/mp2t";
356     codecs.push_back("mp4a.40.2");
357     codecs.push_back("avc1.640028");
358     return demuxer_->AddId(source_id, type, codecs);
359   }
360
361   void AppendData(const uint8* data, size_t length) {
362     AppendData(kSourceId, data, length);
363   }
364
365   void AppendCluster(const std::string& source_id,
366                      scoped_ptr<Cluster> cluster) {
367     AppendData(source_id, cluster->data(), cluster->size());
368   }
369
370   void AppendCluster(scoped_ptr<Cluster> cluster) {
371     AppendCluster(kSourceId, cluster.Pass());
372   }
373
374   void AppendCluster(int timecode, int block_count) {
375     AppendCluster(GenerateCluster(timecode, block_count));
376   }
377
378   void AppendSingleStreamCluster(const std::string& source_id, int track_number,
379                                  int timecode, int block_count) {
380     int block_duration = 0;
381     switch (track_number) {
382       case kVideoTrackNum:
383         block_duration = kVideoBlockDuration;
384         break;
385       case kAudioTrackNum:
386         block_duration = kAudioBlockDuration;
387         break;
388       case kTextTrackNum:  // Fall-through.
389       case kAlternateTextTrackNum:
390         block_duration = kTextBlockDuration;
391         break;
392     }
393     ASSERT_NE(block_duration, 0);
394     int end_timecode = timecode + block_count * block_duration;
395     AppendCluster(source_id,
396                   GenerateSingleStreamCluster(
397                       timecode, end_timecode, track_number, block_duration));
398   }
399
400   struct BlockInfo {
401     BlockInfo()
402         : track_number(0),
403           timestamp_in_ms(0),
404           flags(0),
405           duration(0) {
406     }
407
408     BlockInfo(int tn, int ts, int f, int d)
409         : track_number(tn),
410           timestamp_in_ms(ts),
411           flags(f),
412           duration(d) {
413     }
414
415     int track_number;
416     int timestamp_in_ms;
417     int flags;
418     int duration;
419
420     bool operator< (const BlockInfo& rhs) const {
421       return timestamp_in_ms < rhs.timestamp_in_ms;
422     }
423   };
424
425   // |track_number| - The track number to place in
426   // |block_descriptions| - A space delimited string of block info that
427   //  is used to populate |blocks|. Each block info has a timestamp in
428   //  milliseconds and optionally followed by a 'K' to indicate that a block
429   //  should be marked as a keyframe. For example "0K 30 60" should populate
430   //  |blocks| with 3 BlockInfo objects: a keyframe with timestamp 0 and 2
431   //  non-keyframes at 30ms and 60ms.
432   void ParseBlockDescriptions(int track_number,
433                               const std::string block_descriptions,
434                               std::vector<BlockInfo>* blocks) {
435     std::vector<std::string> timestamps;
436     base::SplitString(block_descriptions, ' ', &timestamps);
437
438     for (size_t i = 0; i < timestamps.size(); ++i) {
439       std::string timestamp_str = timestamps[i];
440       BlockInfo block_info;
441       block_info.track_number = track_number;
442       block_info.flags = 0;
443       block_info.duration = 0;
444
445       if (EndsWith(timestamp_str, "K", true)) {
446         block_info.flags = kWebMFlagKeyframe;
447         // Remove the "K" off of the token.
448         timestamp_str = timestamp_str.substr(0, timestamps[i].length() - 1);
449       }
450       CHECK(base::StringToInt(timestamp_str, &block_info.timestamp_in_ms));
451
452       if (track_number == kTextTrackNum ||
453           track_number == kAlternateTextTrackNum) {
454         block_info.duration = kTextBlockDuration;
455         ASSERT_EQ(kWebMFlagKeyframe, block_info.flags)
456             << "Text block with timestamp " << block_info.timestamp_in_ms
457             << " was not marked as a keyframe."
458             << " All text blocks must be keyframes";
459       }
460
461       if (track_number == kAudioTrackNum)
462         ASSERT_TRUE(block_info.flags & kWebMFlagKeyframe);
463
464       blocks->push_back(block_info);
465     }
466   }
467
468   scoped_ptr<Cluster> GenerateCluster(const std::vector<BlockInfo>& blocks,
469                                       bool unknown_size) {
470     DCHECK_GT(blocks.size(), 0u);
471     ClusterBuilder cb;
472
473     std::vector<uint8> data(10);
474     for (size_t i = 0; i < blocks.size(); ++i) {
475       if (i == 0)
476         cb.SetClusterTimecode(blocks[i].timestamp_in_ms);
477
478       if (blocks[i].duration) {
479         if (blocks[i].track_number == kVideoTrackNum) {
480           AddVideoBlockGroup(&cb,
481                              blocks[i].track_number, blocks[i].timestamp_in_ms,
482                              blocks[i].duration, blocks[i].flags);
483         } else {
484           cb.AddBlockGroup(blocks[i].track_number, blocks[i].timestamp_in_ms,
485                            blocks[i].duration, blocks[i].flags,
486                            &data[0], data.size());
487         }
488       } else {
489         cb.AddSimpleBlock(blocks[i].track_number, blocks[i].timestamp_in_ms,
490                           blocks[i].flags,
491                           &data[0], data.size());
492       }
493     }
494
495     return unknown_size ? cb.FinishWithUnknownSize() : cb.Finish();
496   }
497
498   scoped_ptr<Cluster> GenerateCluster(
499       std::priority_queue<BlockInfo> block_queue,
500       bool unknown_size) {
501     std::vector<BlockInfo> blocks(block_queue.size());
502     for (size_t i = block_queue.size() - 1; !block_queue.empty(); --i) {
503       blocks[i] = block_queue.top();
504       block_queue.pop();
505     }
506
507     return GenerateCluster(blocks, unknown_size);
508   }
509
510   // |block_descriptions| - The block descriptions used to construct the
511   // cluster. See the documentation for ParseBlockDescriptions() for details on
512   // the string format.
513   void AppendSingleStreamCluster(const std::string& source_id, int track_number,
514                                  const std::string& block_descriptions) {
515     std::vector<BlockInfo> blocks;
516     ParseBlockDescriptions(track_number, block_descriptions, &blocks);
517     AppendCluster(source_id, GenerateCluster(blocks, false));
518   }
519
520   struct MuxedStreamInfo {
521     MuxedStreamInfo()
522         : track_number(0),
523           block_descriptions("")
524     {}
525
526     MuxedStreamInfo(int track_num, const char* block_desc)
527         : track_number(track_num),
528           block_descriptions(block_desc) {
529     }
530
531     int track_number;
532     // The block description passed to ParseBlockDescriptions().
533     // See the documentation for that method for details on the string format.
534     const char* block_descriptions;
535   };
536
537   void AppendMuxedCluster(const MuxedStreamInfo& msi_1,
538                           const MuxedStreamInfo& msi_2) {
539     std::vector<MuxedStreamInfo> msi(2);
540     msi[0] = msi_1;
541     msi[1] = msi_2;
542     AppendMuxedCluster(msi);
543   }
544
545   void AppendMuxedCluster(const MuxedStreamInfo& msi_1,
546                           const MuxedStreamInfo& msi_2,
547                           const MuxedStreamInfo& msi_3) {
548     std::vector<MuxedStreamInfo> msi(3);
549     msi[0] = msi_1;
550     msi[1] = msi_2;
551     msi[2] = msi_3;
552     AppendMuxedCluster(msi);
553   }
554
555   void AppendMuxedCluster(const std::vector<MuxedStreamInfo> msi) {
556     std::priority_queue<BlockInfo> block_queue;
557     for (size_t i = 0; i < msi.size(); ++i) {
558       std::vector<BlockInfo> track_blocks;
559       ParseBlockDescriptions(msi[i].track_number, msi[i].block_descriptions,
560                              &track_blocks);
561
562       for (size_t j = 0; j < track_blocks.size(); ++j)
563         block_queue.push(track_blocks[j]);
564     }
565
566     AppendCluster(kSourceId, GenerateCluster(block_queue, false));
567   }
568
569   void AppendData(const std::string& source_id,
570                   const uint8* data, size_t length) {
571     EXPECT_CALL(host_, AddBufferedTimeRange(_, _)).Times(AnyNumber());
572
573     demuxer_->AppendData(source_id, data, length,
574                          append_window_start_for_next_append_,
575                          append_window_end_for_next_append_,
576                          &timestamp_offset_map_[source_id]);
577   }
578
579   void AppendDataInPieces(const uint8* data, size_t length) {
580     AppendDataInPieces(data, length, 7);
581   }
582
583   void AppendDataInPieces(const uint8* data, size_t length, size_t piece_size) {
584     const uint8* start = data;
585     const uint8* end = data + length;
586     while (start < end) {
587       size_t append_size = std::min(piece_size,
588                                     static_cast<size_t>(end - start));
589       AppendData(start, append_size);
590       start += append_size;
591     }
592   }
593
594   void AppendInitSegment(int stream_flags) {
595     AppendInitSegmentWithSourceId(kSourceId, stream_flags);
596   }
597
598   void AppendInitSegmentWithSourceId(const std::string& source_id,
599                                      int stream_flags) {
600     AppendInitSegmentWithEncryptedInfo(source_id, stream_flags, false, false);
601   }
602
603   void AppendInitSegmentWithEncryptedInfo(const std::string& source_id,
604                                           int stream_flags,
605                                           bool is_audio_encrypted,
606                                           bool is_video_encrypted) {
607     scoped_ptr<uint8[]> info_tracks;
608     int info_tracks_size = 0;
609     CreateInitSegment(stream_flags,
610                       is_audio_encrypted, is_video_encrypted,
611                       &info_tracks, &info_tracks_size);
612     AppendData(source_id, info_tracks.get(), info_tracks_size);
613   }
614
615   void AppendGarbage() {
616     // Fill up an array with gibberish.
617     int garbage_cluster_size = 10;
618     scoped_ptr<uint8[]> garbage_cluster(new uint8[garbage_cluster_size]);
619     for (int i = 0; i < garbage_cluster_size; ++i)
620       garbage_cluster[i] = i;
621     AppendData(garbage_cluster.get(), garbage_cluster_size);
622   }
623
624   void InitDoneCalled(PipelineStatus expected_status,
625                       PipelineStatus status) {
626     EXPECT_EQ(status, expected_status);
627   }
628
629   void AppendEmptyCluster(int timecode) {
630     AppendCluster(GenerateEmptyCluster(timecode));
631   }
632
633   PipelineStatusCB CreateInitDoneCB(const base::TimeDelta& expected_duration,
634                                     PipelineStatus expected_status) {
635     if (expected_duration != kNoTimestamp())
636       EXPECT_CALL(host_, SetDuration(expected_duration));
637     return CreateInitDoneCB(expected_status);
638   }
639
640   PipelineStatusCB CreateInitDoneCB(PipelineStatus expected_status) {
641     return base::Bind(&ChunkDemuxerTest::InitDoneCalled,
642                       base::Unretained(this),
643                       expected_status);
644   }
645
646   enum StreamFlags {
647     HAS_AUDIO = 1 << 0,
648     HAS_VIDEO = 1 << 1,
649     HAS_TEXT = 1 << 2
650   };
651
652   bool InitDemuxer(int stream_flags) {
653     return InitDemuxerWithEncryptionInfo(stream_flags, false, false);
654   }
655
656   bool InitDemuxerWithEncryptionInfo(
657       int stream_flags, bool is_audio_encrypted, bool is_video_encrypted) {
658
659     PipelineStatus expected_status =
660         (stream_flags != 0) ? PIPELINE_OK : DEMUXER_ERROR_COULD_NOT_OPEN;
661
662     base::TimeDelta expected_duration = kNoTimestamp();
663     if (expected_status == PIPELINE_OK)
664       expected_duration = kDefaultDuration();
665
666     EXPECT_CALL(*this, DemuxerOpened());
667     demuxer_->Initialize(
668         &host_, CreateInitDoneCB(expected_duration, expected_status), true);
669
670     if (AddId(kSourceId, stream_flags) != ChunkDemuxer::kOk)
671       return false;
672
673     AppendInitSegmentWithEncryptedInfo(
674         kSourceId, stream_flags,
675         is_audio_encrypted, is_video_encrypted);
676     return true;
677   }
678
679   bool InitDemuxerAudioAndVideoSourcesText(const std::string& audio_id,
680                                            const std::string& video_id,
681                                            bool has_text) {
682     EXPECT_CALL(*this, DemuxerOpened());
683     demuxer_->Initialize(
684         &host_, CreateInitDoneCB(kDefaultDuration(), PIPELINE_OK), true);
685
686     if (AddId(audio_id, HAS_AUDIO) != ChunkDemuxer::kOk)
687       return false;
688     if (AddId(video_id, HAS_VIDEO) != ChunkDemuxer::kOk)
689       return false;
690
691     int audio_flags = HAS_AUDIO;
692     int video_flags = HAS_VIDEO;
693
694     if (has_text) {
695       audio_flags |= HAS_TEXT;
696       video_flags |= HAS_TEXT;
697     }
698
699     AppendInitSegmentWithSourceId(audio_id, audio_flags);
700     AppendInitSegmentWithSourceId(video_id, video_flags);
701     return true;
702   }
703
704   bool InitDemuxerAudioAndVideoSources(const std::string& audio_id,
705                                        const std::string& video_id) {
706     return InitDemuxerAudioAndVideoSourcesText(audio_id, video_id, false);
707   }
708
709   // Initializes the demuxer with data from 2 files with different
710   // decoder configurations. This is used to test the decoder config change
711   // logic.
712   //
713   // bear-320x240.webm VideoDecoderConfig returns 320x240 for its natural_size()
714   // bear-640x360.webm VideoDecoderConfig returns 640x360 for its natural_size()
715   // The resulting video stream returns data from each file for the following
716   // time ranges.
717   // bear-320x240.webm : [0-501)       [801-2736)
718   // bear-640x360.webm :       [527-793)
719   //
720   // bear-320x240.webm AudioDecoderConfig returns 3863 for its extra_data_size()
721   // bear-640x360.webm AudioDecoderConfig returns 3935 for its extra_data_size()
722   // The resulting audio stream returns data from each file for the following
723   // time ranges.
724   // bear-320x240.webm : [0-524)       [779-2736)
725   // bear-640x360.webm :       [527-759)
726   bool InitDemuxerWithConfigChangeData() {
727     scoped_refptr<DecoderBuffer> bear1 = ReadTestDataFile("bear-320x240.webm");
728     scoped_refptr<DecoderBuffer> bear2 = ReadTestDataFile("bear-640x360.webm");
729
730     EXPECT_CALL(*this, DemuxerOpened());
731
732     demuxer_->Initialize(
733         &host_, CreateInitDoneCB(base::TimeDelta::FromMilliseconds(2744),
734                                  PIPELINE_OK), true);
735
736     if (AddId(kSourceId, HAS_AUDIO | HAS_VIDEO) != ChunkDemuxer::kOk)
737       return false;
738
739     // Append the whole bear1 file.
740     // TODO(wolenetz/acolwell): Remove this extra SetDuration expectation once
741     // the files are fixed to have the correct duration in their init segments,
742     // and the CreateInitDoneCB() call, above, is fixed to used that duration.
743     // See http://crbug.com/354284.
744     EXPECT_CALL(host_, SetDuration(base::TimeDelta::FromMilliseconds(2746)));
745     AppendData(bear1->data(), bear1->data_size());
746     // Last audio frame has timestamp 2721 and duration 24 (estimated from max
747     // seen so far for audio track).
748     // Last video frame has timestamp 2703 and duration 33 (from TrackEntry
749     // DefaultDuration for video track).
750     CheckExpectedRanges(kSourceId, "{ [0,2736) }");
751
752     // Append initialization segment for bear2.
753     // Note: Offsets here and below are derived from
754     // media/test/data/bear-640x360-manifest.js and
755     // media/test/data/bear-320x240-manifest.js which were
756     // generated from media/test/data/bear-640x360.webm and
757     // media/test/data/bear-320x240.webm respectively.
758     AppendData(bear2->data(), 4340);
759
760     // Append a media segment that goes from [0.527000, 1.014000).
761     AppendData(bear2->data() + 55290, 18785);
762     CheckExpectedRanges(kSourceId, "{ [0,1027) [1201,2736) }");
763
764     // Append initialization segment for bear1 & fill gap with [779-1197)
765     // segment.
766     AppendData(bear1->data(), 4370);
767     AppendData(bear1->data() + 72737, 28183);
768     CheckExpectedRanges(kSourceId, "{ [0,2736) }");
769
770     MarkEndOfStream(PIPELINE_OK);
771     return true;
772   }
773
774   void ShutdownDemuxer() {
775     if (demuxer_) {
776       demuxer_->Shutdown();
777       message_loop_.RunUntilIdle();
778     }
779   }
780
781   void AddSimpleBlock(ClusterBuilder* cb, int track_num, int64 timecode) {
782     uint8 data[] = { 0x00 };
783     cb->AddSimpleBlock(track_num, timecode, 0, data, sizeof(data));
784   }
785
786   scoped_ptr<Cluster> GenerateCluster(int timecode, int block_count) {
787     return GenerateCluster(timecode, timecode, block_count);
788   }
789
790   void AddVideoBlockGroup(ClusterBuilder* cb, int track_num, int64 timecode,
791                           int duration, int flags) {
792     const uint8* data =
793         (flags & kWebMFlagKeyframe) != 0 ? kVP8Keyframe : kVP8Interframe;
794     int size = (flags & kWebMFlagKeyframe) != 0 ? sizeof(kVP8Keyframe) :
795         sizeof(kVP8Interframe);
796     cb->AddBlockGroup(track_num, timecode, duration, flags, data, size);
797   }
798
799   scoped_ptr<Cluster> GenerateCluster(int first_audio_timecode,
800                                       int first_video_timecode,
801                                       int block_count) {
802     return GenerateCluster(first_audio_timecode, first_video_timecode,
803                            block_count, false);
804   }
805   scoped_ptr<Cluster> GenerateCluster(int first_audio_timecode,
806                                       int first_video_timecode,
807                                       int block_count,
808                                       bool unknown_size) {
809     CHECK_GT(block_count, 0);
810
811     std::priority_queue<BlockInfo> block_queue;
812
813     if (block_count == 1) {
814       block_queue.push(BlockInfo(kAudioTrackNum,
815                                  first_audio_timecode,
816                                  kWebMFlagKeyframe,
817                                  kAudioBlockDuration));
818       return GenerateCluster(block_queue, unknown_size);
819     }
820
821     int audio_timecode = first_audio_timecode;
822     int video_timecode = first_video_timecode;
823
824     // Create simple blocks for everything except the last 2 blocks.
825     // The first video frame must be a keyframe.
826     uint8 video_flag = kWebMFlagKeyframe;
827     for (int i = 0; i < block_count - 2; i++) {
828       if (audio_timecode <= video_timecode) {
829         block_queue.push(BlockInfo(kAudioTrackNum,
830                                    audio_timecode,
831                                    kWebMFlagKeyframe,
832                                    0));
833         audio_timecode += kAudioBlockDuration;
834         continue;
835       }
836
837       block_queue.push(BlockInfo(kVideoTrackNum,
838                                  video_timecode,
839                                  video_flag,
840                                  0));
841       video_timecode += kVideoBlockDuration;
842       video_flag = 0;
843     }
844
845     // Make the last 2 blocks BlockGroups so that they don't get delayed by the
846     // block duration calculation logic.
847     block_queue.push(BlockInfo(kAudioTrackNum,
848                                audio_timecode,
849                                kWebMFlagKeyframe,
850                                kAudioBlockDuration));
851     block_queue.push(BlockInfo(kVideoTrackNum,
852                                video_timecode,
853                                video_flag,
854                                kVideoBlockDuration));
855
856     return GenerateCluster(block_queue, unknown_size);
857   }
858
859   scoped_ptr<Cluster> GenerateSingleStreamCluster(int timecode,
860                                                   int end_timecode,
861                                                   int track_number,
862                                                   int block_duration) {
863     CHECK_GT(end_timecode, timecode);
864
865     std::vector<uint8> data(kBlockSize);
866
867     ClusterBuilder cb;
868     cb.SetClusterTimecode(timecode);
869
870     // Create simple blocks for everything except the last block.
871     while (timecode < (end_timecode - block_duration)) {
872       cb.AddSimpleBlock(track_number, timecode, kWebMFlagKeyframe,
873                         &data[0], data.size());
874       timecode += block_duration;
875     }
876
877     if (track_number == kVideoTrackNum) {
878       AddVideoBlockGroup(&cb, track_number, timecode, block_duration,
879                          kWebMFlagKeyframe);
880     } else {
881       cb.AddBlockGroup(track_number, timecode, block_duration,
882                        kWebMFlagKeyframe, &data[0], data.size());
883     }
884
885     return cb.Finish();
886   }
887
888   void Read(DemuxerStream::Type type, const DemuxerStream::ReadCB& read_cb) {
889     demuxer_->GetStream(type)->Read(read_cb);
890     message_loop_.RunUntilIdle();
891   }
892
893   void ReadAudio(const DemuxerStream::ReadCB& read_cb) {
894     Read(DemuxerStream::AUDIO, read_cb);
895   }
896
897   void ReadVideo(const DemuxerStream::ReadCB& read_cb) {
898     Read(DemuxerStream::VIDEO, read_cb);
899   }
900
901   void GenerateExpectedReads(int timecode, int block_count) {
902     GenerateExpectedReads(timecode, timecode, block_count);
903   }
904
905   void GenerateExpectedReads(int start_audio_timecode,
906                              int start_video_timecode,
907                              int block_count) {
908     CHECK_GT(block_count, 0);
909
910     if (block_count == 1) {
911       ExpectRead(DemuxerStream::AUDIO, start_audio_timecode);
912       return;
913     }
914
915     int audio_timecode = start_audio_timecode;
916     int video_timecode = start_video_timecode;
917
918     for (int i = 0; i < block_count; i++) {
919       if (audio_timecode <= video_timecode) {
920         ExpectRead(DemuxerStream::AUDIO, audio_timecode);
921         audio_timecode += kAudioBlockDuration;
922         continue;
923       }
924
925       ExpectRead(DemuxerStream::VIDEO, video_timecode);
926       video_timecode += kVideoBlockDuration;
927     }
928   }
929
930   void GenerateSingleStreamExpectedReads(int timecode,
931                                          int block_count,
932                                          DemuxerStream::Type type,
933                                          int block_duration) {
934     CHECK_GT(block_count, 0);
935     int stream_timecode = timecode;
936
937     for (int i = 0; i < block_count; i++) {
938       ExpectRead(type, stream_timecode);
939       stream_timecode += block_duration;
940     }
941   }
942
943   void GenerateAudioStreamExpectedReads(int timecode, int block_count) {
944     GenerateSingleStreamExpectedReads(
945         timecode, block_count, DemuxerStream::AUDIO, kAudioBlockDuration);
946   }
947
948   void GenerateVideoStreamExpectedReads(int timecode, int block_count) {
949     GenerateSingleStreamExpectedReads(
950         timecode, block_count, DemuxerStream::VIDEO, kVideoBlockDuration);
951   }
952
953   scoped_ptr<Cluster> GenerateEmptyCluster(int timecode) {
954     ClusterBuilder cb;
955     cb.SetClusterTimecode(timecode);
956     return cb.Finish();
957   }
958
959   void CheckExpectedRanges(const std::string& expected) {
960     CheckExpectedRanges(kSourceId, expected);
961   }
962
963   void CheckExpectedRanges(const std::string&  id,
964                            const std::string& expected) {
965     Ranges<base::TimeDelta> r = demuxer_->GetBufferedRanges(id);
966
967     std::stringstream ss;
968     ss << "{ ";
969     for (size_t i = 0; i < r.size(); ++i) {
970       ss << "[" << r.start(i).InMilliseconds() << ","
971          << r.end(i).InMilliseconds() << ") ";
972     }
973     ss << "}";
974     EXPECT_EQ(expected, ss.str());
975   }
976
977   MOCK_METHOD2(ReadDone, void(DemuxerStream::Status status,
978                               const scoped_refptr<DecoderBuffer>&));
979
980   void StoreStatusAndBuffer(DemuxerStream::Status* status_out,
981                             scoped_refptr<DecoderBuffer>* buffer_out,
982                             DemuxerStream::Status status,
983                             const scoped_refptr<DecoderBuffer>& buffer) {
984     *status_out = status;
985     *buffer_out = buffer;
986   }
987
988   void ReadUntilNotOkOrEndOfStream(DemuxerStream::Type type,
989                                    DemuxerStream::Status* status,
990                                    base::TimeDelta* last_timestamp) {
991     DemuxerStream* stream = demuxer_->GetStream(type);
992     scoped_refptr<DecoderBuffer> buffer;
993
994     *last_timestamp = kNoTimestamp();
995     do {
996       stream->Read(base::Bind(&ChunkDemuxerTest::StoreStatusAndBuffer,
997                               base::Unretained(this), status, &buffer));
998       base::MessageLoop::current()->RunUntilIdle();
999       if (*status == DemuxerStream::kOk && !buffer->end_of_stream())
1000         *last_timestamp = buffer->timestamp();
1001     } while (*status == DemuxerStream::kOk && !buffer->end_of_stream());
1002   }
1003
1004   void ExpectEndOfStream(DemuxerStream::Type type) {
1005     EXPECT_CALL(*this, ReadDone(DemuxerStream::kOk, IsEndOfStream()));
1006     demuxer_->GetStream(type)->Read(base::Bind(
1007         &ChunkDemuxerTest::ReadDone, base::Unretained(this)));
1008     message_loop_.RunUntilIdle();
1009   }
1010
1011   void ExpectRead(DemuxerStream::Type type, int64 timestamp_in_ms) {
1012     EXPECT_CALL(*this, ReadDone(DemuxerStream::kOk,
1013                                 HasTimestamp(timestamp_in_ms)));
1014     demuxer_->GetStream(type)->Read(base::Bind(
1015         &ChunkDemuxerTest::ReadDone, base::Unretained(this)));
1016     message_loop_.RunUntilIdle();
1017   }
1018
1019   void ExpectConfigChanged(DemuxerStream::Type type) {
1020     EXPECT_CALL(*this, ReadDone(DemuxerStream::kConfigChanged, _));
1021     demuxer_->GetStream(type)->Read(base::Bind(
1022         &ChunkDemuxerTest::ReadDone, base::Unretained(this)));
1023     message_loop_.RunUntilIdle();
1024   }
1025
1026   void CheckExpectedBuffers(DemuxerStream* stream,
1027                             const std::string& expected) {
1028     std::vector<std::string> timestamps;
1029     base::SplitString(expected, ' ', &timestamps);
1030     std::stringstream ss;
1031     for (size_t i = 0; i < timestamps.size(); ++i) {
1032       // Initialize status to kAborted since it's possible for Read() to return
1033       // without calling StoreStatusAndBuffer() if it doesn't have any buffers
1034       // left to return.
1035       DemuxerStream::Status status = DemuxerStream::kAborted;
1036       scoped_refptr<DecoderBuffer> buffer;
1037       stream->Read(base::Bind(&ChunkDemuxerTest::StoreStatusAndBuffer,
1038                               base::Unretained(this), &status, &buffer));
1039       base::MessageLoop::current()->RunUntilIdle();
1040       if (status != DemuxerStream::kOk || buffer->end_of_stream())
1041         break;
1042
1043       if (i > 0)
1044         ss << " ";
1045       ss << buffer->timestamp().InMilliseconds();
1046
1047       // Handle preroll buffers.
1048       if (EndsWith(timestamps[i], "P", true)) {
1049         ASSERT_EQ(kInfiniteDuration(), buffer->discard_padding().first);
1050         ASSERT_EQ(base::TimeDelta(), buffer->discard_padding().second);
1051         ss << "P";
1052       }
1053     }
1054     EXPECT_EQ(expected, ss.str());
1055   }
1056
1057   MOCK_METHOD1(Checkpoint, void(int id));
1058
1059   struct BufferTimestamps {
1060     int video_time_ms;
1061     int audio_time_ms;
1062   };
1063   static const int kSkip = -1;
1064
1065   // Test parsing a WebM file.
1066   // |filename| - The name of the file in media/test/data to parse.
1067   // |timestamps| - The expected timestamps on the parsed buffers.
1068   //    a timestamp of kSkip indicates that a Read() call for that stream
1069   //    shouldn't be made on that iteration of the loop. If both streams have
1070   //    a kSkip then the loop will terminate.
1071   bool ParseWebMFile(const std::string& filename,
1072                      const BufferTimestamps* timestamps,
1073                      const base::TimeDelta& duration) {
1074     return ParseWebMFile(filename, timestamps, duration, HAS_AUDIO | HAS_VIDEO);
1075   }
1076
1077   bool ParseWebMFile(const std::string& filename,
1078                      const BufferTimestamps* timestamps,
1079                      const base::TimeDelta& duration,
1080                      int stream_flags) {
1081     EXPECT_CALL(*this, DemuxerOpened());
1082     demuxer_->Initialize(
1083         &host_, CreateInitDoneCB(duration, PIPELINE_OK), true);
1084
1085     if (AddId(kSourceId, stream_flags) != ChunkDemuxer::kOk)
1086       return false;
1087
1088     // Read a WebM file into memory and send the data to the demuxer.
1089     scoped_refptr<DecoderBuffer> buffer = ReadTestDataFile(filename);
1090     AppendDataInPieces(buffer->data(), buffer->data_size(), 512);
1091
1092     // Verify that the timestamps on the first few packets match what we
1093     // expect.
1094     for (size_t i = 0;
1095          (timestamps[i].audio_time_ms != kSkip ||
1096           timestamps[i].video_time_ms != kSkip);
1097          i++) {
1098       bool audio_read_done = false;
1099       bool video_read_done = false;
1100
1101       if (timestamps[i].audio_time_ms != kSkip) {
1102         ReadAudio(base::Bind(&OnReadDone,
1103                              base::TimeDelta::FromMilliseconds(
1104                                  timestamps[i].audio_time_ms),
1105                              &audio_read_done));
1106         EXPECT_TRUE(audio_read_done);
1107       }
1108
1109       if (timestamps[i].video_time_ms != kSkip) {
1110         ReadVideo(base::Bind(&OnReadDone,
1111                              base::TimeDelta::FromMilliseconds(
1112                                  timestamps[i].video_time_ms),
1113                              &video_read_done));
1114         EXPECT_TRUE(video_read_done);
1115       }
1116     }
1117
1118     return true;
1119   }
1120
1121   MOCK_METHOD0(DemuxerOpened, void());
1122   // TODO(xhwang): This is a workaround of the issue that move-only parameters
1123   // are not supported in mocked methods. Remove this when the issue is fixed
1124   // (http://code.google.com/p/googletest/issues/detail?id=395) or when we use
1125   // std::string instead of scoped_ptr<uint8[]> (http://crbug.com/130689).
1126   MOCK_METHOD3(NeedKeyMock, void(const std::string& type,
1127                                  const uint8* init_data, int init_data_size));
1128   void DemuxerNeedKey(const std::string& type,
1129                       const std::vector<uint8>& init_data) {
1130     const uint8* init_data_ptr = init_data.empty() ? NULL : &init_data[0];
1131     NeedKeyMock(type, init_data_ptr, init_data.size());
1132   }
1133
1134   void Seek(base::TimeDelta seek_time) {
1135     demuxer_->StartWaitingForSeek(seek_time);
1136     demuxer_->Seek(seek_time, NewExpectedStatusCB(PIPELINE_OK));
1137     message_loop_.RunUntilIdle();
1138   }
1139
1140   void MarkEndOfStream(PipelineStatus status) {
1141     demuxer_->MarkEndOfStream(status);
1142     message_loop_.RunUntilIdle();
1143   }
1144
1145   bool SetTimestampOffset(const std::string& id,
1146                           base::TimeDelta timestamp_offset) {
1147     if (demuxer_->IsParsingMediaSegment(id))
1148       return false;
1149
1150     timestamp_offset_map_[id] = timestamp_offset;
1151     return true;
1152   }
1153
1154   base::MessageLoop message_loop_;
1155   MockDemuxerHost host_;
1156
1157   scoped_ptr<ChunkDemuxer> demuxer_;
1158
1159   base::TimeDelta append_window_start_for_next_append_;
1160   base::TimeDelta append_window_end_for_next_append_;
1161
1162   // Map of source id to timestamp offset to use for the next AppendData()
1163   // operation for that source id.
1164   std::map<std::string, base::TimeDelta> timestamp_offset_map_;
1165
1166  private:
1167   DISALLOW_COPY_AND_ASSIGN(ChunkDemuxerTest);
1168 };
1169
1170 TEST_F(ChunkDemuxerTest, Init) {
1171   // Test no streams, audio-only, video-only, and audio & video scenarios.
1172   // Audio and video streams can be encrypted or not encrypted.
1173   for (int i = 0; i < 16; i++) {
1174     bool has_audio = (i & 0x1) != 0;
1175     bool has_video = (i & 0x2) != 0;
1176     bool is_audio_encrypted = (i & 0x4) != 0;
1177     bool is_video_encrypted = (i & 0x8) != 0;
1178
1179     // No test on invalid combination.
1180     if ((!has_audio && is_audio_encrypted) ||
1181         (!has_video && is_video_encrypted)) {
1182       continue;
1183     }
1184
1185     CreateNewDemuxer();
1186
1187     if (is_audio_encrypted || is_video_encrypted) {
1188       int need_key_count = (is_audio_encrypted ? 1 : 0) +
1189                            (is_video_encrypted ? 1 : 0);
1190       EXPECT_CALL(*this, NeedKeyMock(kWebMEncryptInitDataType, NotNull(),
1191                                      DecryptConfig::kDecryptionKeySize))
1192           .Times(Exactly(need_key_count));
1193     }
1194
1195     int stream_flags = 0;
1196     if (has_audio)
1197       stream_flags |= HAS_AUDIO;
1198
1199     if (has_video)
1200       stream_flags |= HAS_VIDEO;
1201
1202     ASSERT_TRUE(InitDemuxerWithEncryptionInfo(
1203         stream_flags, is_audio_encrypted, is_video_encrypted));
1204
1205     DemuxerStream* audio_stream = demuxer_->GetStream(DemuxerStream::AUDIO);
1206     if (has_audio) {
1207       ASSERT_TRUE(audio_stream);
1208
1209       const AudioDecoderConfig& config = audio_stream->audio_decoder_config();
1210       EXPECT_EQ(kCodecVorbis, config.codec());
1211       EXPECT_EQ(32, config.bits_per_channel());
1212       EXPECT_EQ(CHANNEL_LAYOUT_STEREO, config.channel_layout());
1213       EXPECT_EQ(44100, config.samples_per_second());
1214       EXPECT_TRUE(config.extra_data());
1215       EXPECT_GT(config.extra_data_size(), 0u);
1216       EXPECT_EQ(kSampleFormatPlanarF32, config.sample_format());
1217       EXPECT_EQ(is_audio_encrypted,
1218                 audio_stream->audio_decoder_config().is_encrypted());
1219       EXPECT_TRUE(static_cast<ChunkDemuxerStream*>(audio_stream)
1220                       ->supports_partial_append_window_trimming());
1221     } else {
1222       EXPECT_FALSE(audio_stream);
1223     }
1224
1225     DemuxerStream* video_stream = demuxer_->GetStream(DemuxerStream::VIDEO);
1226     if (has_video) {
1227       EXPECT_TRUE(video_stream);
1228       EXPECT_EQ(is_video_encrypted,
1229                 video_stream->video_decoder_config().is_encrypted());
1230       EXPECT_FALSE(static_cast<ChunkDemuxerStream*>(video_stream)
1231                        ->supports_partial_append_window_trimming());
1232     } else {
1233       EXPECT_FALSE(video_stream);
1234     }
1235
1236     ShutdownDemuxer();
1237     demuxer_.reset();
1238   }
1239 }
1240
1241 // TODO(acolwell): Fold this test into Init tests since the tests are
1242 // almost identical.
1243 TEST_F(ChunkDemuxerTest, InitText) {
1244   // Test with 1 video stream and 1 text streams, and 0 or 1 audio streams.
1245   // No encryption cases handled here.
1246   bool has_video = true;
1247   bool is_audio_encrypted = false;
1248   bool is_video_encrypted = false;
1249   for (int i = 0; i < 2; i++) {
1250     bool has_audio = (i & 0x1) != 0;
1251
1252     CreateNewDemuxer();
1253
1254     DemuxerStream* text_stream = NULL;
1255     TextTrackConfig text_config;
1256     EXPECT_CALL(host_, AddTextStream(_, _))
1257         .WillOnce(DoAll(SaveArg<0>(&text_stream),
1258                         SaveArg<1>(&text_config)));
1259
1260     int stream_flags = HAS_TEXT;
1261     if (has_audio)
1262       stream_flags |= HAS_AUDIO;
1263
1264     if (has_video)
1265       stream_flags |= HAS_VIDEO;
1266
1267     ASSERT_TRUE(InitDemuxerWithEncryptionInfo(
1268         stream_flags, is_audio_encrypted, is_video_encrypted));
1269     ASSERT_TRUE(text_stream);
1270     EXPECT_EQ(DemuxerStream::TEXT, text_stream->type());
1271     EXPECT_EQ(kTextSubtitles, text_config.kind());
1272     EXPECT_FALSE(static_cast<ChunkDemuxerStream*>(text_stream)
1273                      ->supports_partial_append_window_trimming());
1274
1275     DemuxerStream* audio_stream = demuxer_->GetStream(DemuxerStream::AUDIO);
1276     if (has_audio) {
1277       ASSERT_TRUE(audio_stream);
1278
1279       const AudioDecoderConfig& config = audio_stream->audio_decoder_config();
1280       EXPECT_EQ(kCodecVorbis, config.codec());
1281       EXPECT_EQ(32, config.bits_per_channel());
1282       EXPECT_EQ(CHANNEL_LAYOUT_STEREO, config.channel_layout());
1283       EXPECT_EQ(44100, config.samples_per_second());
1284       EXPECT_TRUE(config.extra_data());
1285       EXPECT_GT(config.extra_data_size(), 0u);
1286       EXPECT_EQ(kSampleFormatPlanarF32, config.sample_format());
1287       EXPECT_EQ(is_audio_encrypted,
1288                 audio_stream->audio_decoder_config().is_encrypted());
1289       EXPECT_TRUE(static_cast<ChunkDemuxerStream*>(audio_stream)
1290                       ->supports_partial_append_window_trimming());
1291     } else {
1292       EXPECT_FALSE(audio_stream);
1293     }
1294
1295     DemuxerStream* video_stream = demuxer_->GetStream(DemuxerStream::VIDEO);
1296     if (has_video) {
1297       EXPECT_TRUE(video_stream);
1298       EXPECT_EQ(is_video_encrypted,
1299                 video_stream->video_decoder_config().is_encrypted());
1300       EXPECT_FALSE(static_cast<ChunkDemuxerStream*>(video_stream)
1301                        ->supports_partial_append_window_trimming());
1302     } else {
1303       EXPECT_FALSE(video_stream);
1304     }
1305
1306     ShutdownDemuxer();
1307     demuxer_.reset();
1308   }
1309 }
1310
1311 TEST_F(ChunkDemuxerTest, SingleTextTrackIdChange) {
1312   // Test with 1 video stream, 1 audio, and 1 text stream. Send a second init
1313   // segment in which the text track ID changes. Verify appended buffers before
1314   // and after the second init segment map to the same underlying track buffers.
1315   CreateNewDemuxer();
1316   DemuxerStream* text_stream = NULL;
1317   TextTrackConfig text_config;
1318   EXPECT_CALL(host_, AddTextStream(_, _))
1319       .WillOnce(DoAll(SaveArg<0>(&text_stream),
1320                       SaveArg<1>(&text_config)));
1321   ASSERT_TRUE(InitDemuxerWithEncryptionInfo(
1322       HAS_TEXT | HAS_AUDIO | HAS_VIDEO, false, false));
1323   DemuxerStream* audio_stream = demuxer_->GetStream(DemuxerStream::AUDIO);
1324   DemuxerStream* video_stream = demuxer_->GetStream(DemuxerStream::VIDEO);
1325   ASSERT_TRUE(audio_stream);
1326   ASSERT_TRUE(video_stream);
1327   ASSERT_TRUE(text_stream);
1328
1329   AppendMuxedCluster(
1330       MuxedStreamInfo(kAudioTrackNum, "0K 23K"),
1331       MuxedStreamInfo(kVideoTrackNum, "0K 30"),
1332       MuxedStreamInfo(kTextTrackNum, "10K"));
1333   CheckExpectedRanges(kSourceId, "{ [0,46) }");
1334
1335   scoped_ptr<uint8[]> info_tracks;
1336   int info_tracks_size = 0;
1337   CreateInitSegmentWithAlternateTextTrackNum(HAS_TEXT | HAS_AUDIO | HAS_VIDEO,
1338                                              false, false,
1339                                              &info_tracks, &info_tracks_size);
1340   demuxer_->AppendData(kSourceId, info_tracks.get(), info_tracks_size,
1341                        append_window_start_for_next_append_,
1342                        append_window_end_for_next_append_,
1343                        &timestamp_offset_map_[kSourceId]);
1344
1345   AppendMuxedCluster(
1346       MuxedStreamInfo(kAudioTrackNum, "46K 69K"),
1347       MuxedStreamInfo(kVideoTrackNum, "60K"),
1348       MuxedStreamInfo(kAlternateTextTrackNum, "45K"));
1349
1350   CheckExpectedRanges(kSourceId, "{ [0,92) }");
1351   CheckExpectedBuffers(audio_stream, "0 23 46 69");
1352   CheckExpectedBuffers(video_stream, "0 30 60");
1353   CheckExpectedBuffers(text_stream, "10 45");
1354
1355   ShutdownDemuxer();
1356 }
1357
1358 TEST_F(ChunkDemuxerTest, InitSegmentSetsNeedRandomAccessPointFlag) {
1359   // Tests that non-keyframes following an init segment are allowed
1360   // and dropped, as expected if the initialization segment received
1361   // algorithm correctly sets the needs random access point flag to true for all
1362   // track buffers. Note that the first initialization segment is insufficient
1363   // to fully test this since needs random access point flag initializes to
1364   // true.
1365   CreateNewDemuxer();
1366   DemuxerStream* text_stream = NULL;
1367   EXPECT_CALL(host_, AddTextStream(_, _))
1368       .WillOnce(SaveArg<0>(&text_stream));
1369   ASSERT_TRUE(InitDemuxerWithEncryptionInfo(
1370       HAS_TEXT | HAS_AUDIO | HAS_VIDEO, false, false));
1371   DemuxerStream* audio_stream = demuxer_->GetStream(DemuxerStream::AUDIO);
1372   DemuxerStream* video_stream = demuxer_->GetStream(DemuxerStream::VIDEO);
1373   ASSERT_TRUE(audio_stream && video_stream && text_stream);
1374
1375   AppendMuxedCluster(
1376       MuxedStreamInfo(kAudioTrackNum, "23K"),
1377       MuxedStreamInfo(kVideoTrackNum, "0 30K"),
1378       MuxedStreamInfo(kTextTrackNum, "25K 40K"));
1379   CheckExpectedRanges(kSourceId, "{ [23,46) }");
1380
1381   AppendInitSegment(HAS_TEXT | HAS_AUDIO | HAS_VIDEO);
1382   AppendMuxedCluster(
1383       MuxedStreamInfo(kAudioTrackNum, "46K 69K"),
1384       MuxedStreamInfo(kVideoTrackNum, "60 90K"),
1385       MuxedStreamInfo(kTextTrackNum, "80K 90K"));
1386   CheckExpectedRanges(kSourceId, "{ [23,92) }");
1387
1388   CheckExpectedBuffers(audio_stream, "23 46 69");
1389   CheckExpectedBuffers(video_stream, "30 90");
1390   CheckExpectedBuffers(text_stream, "25 40 80 90");
1391 }
1392
1393 // Make sure that the demuxer reports an error if Shutdown()
1394 // is called before all the initialization segments are appended.
1395 TEST_F(ChunkDemuxerTest, Shutdown_BeforeAllInitSegmentsAppended) {
1396   EXPECT_CALL(*this, DemuxerOpened());
1397   demuxer_->Initialize(
1398       &host_, CreateInitDoneCB(
1399           kDefaultDuration(), DEMUXER_ERROR_COULD_NOT_OPEN), true);
1400
1401   EXPECT_EQ(AddId("audio", HAS_AUDIO), ChunkDemuxer::kOk);
1402   EXPECT_EQ(AddId("video", HAS_VIDEO), ChunkDemuxer::kOk);
1403
1404   AppendInitSegmentWithSourceId("audio", HAS_AUDIO);
1405
1406   ShutdownDemuxer();
1407 }
1408
1409 TEST_F(ChunkDemuxerTest, Shutdown_BeforeAllInitSegmentsAppendedText) {
1410   EXPECT_CALL(*this, DemuxerOpened());
1411   demuxer_->Initialize(
1412       &host_, CreateInitDoneCB(
1413           kDefaultDuration(), DEMUXER_ERROR_COULD_NOT_OPEN), true);
1414
1415   EXPECT_EQ(AddId("audio", HAS_AUDIO), ChunkDemuxer::kOk);
1416   EXPECT_EQ(AddId("video_and_text", HAS_VIDEO), ChunkDemuxer::kOk);
1417
1418   EXPECT_CALL(host_, AddTextStream(_, _))
1419       .Times(Exactly(1));
1420
1421   AppendInitSegmentWithSourceId("video_and_text", HAS_VIDEO | HAS_TEXT);
1422
1423   ShutdownDemuxer();
1424 }
1425
1426 // Verifies that all streams waiting for data receive an end of stream
1427 // buffer when Shutdown() is called.
1428 TEST_F(ChunkDemuxerTest, Shutdown_EndOfStreamWhileWaitingForData) {
1429   DemuxerStream* text_stream = NULL;
1430   EXPECT_CALL(host_, AddTextStream(_, _))
1431       .WillOnce(SaveArg<0>(&text_stream));
1432   ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO | HAS_TEXT));
1433
1434   DemuxerStream* audio_stream = demuxer_->GetStream(DemuxerStream::AUDIO);
1435   DemuxerStream* video_stream = demuxer_->GetStream(DemuxerStream::VIDEO);
1436
1437   bool audio_read_done = false;
1438   bool video_read_done = false;
1439   bool text_read_done = false;
1440   audio_stream->Read(base::Bind(&OnReadDone_EOSExpected, &audio_read_done));
1441   video_stream->Read(base::Bind(&OnReadDone_EOSExpected, &video_read_done));
1442   text_stream->Read(base::Bind(&OnReadDone_EOSExpected, &text_read_done));
1443   message_loop_.RunUntilIdle();
1444
1445   EXPECT_FALSE(audio_read_done);
1446   EXPECT_FALSE(video_read_done);
1447   EXPECT_FALSE(text_read_done);
1448
1449   ShutdownDemuxer();
1450
1451   EXPECT_TRUE(audio_read_done);
1452   EXPECT_TRUE(video_read_done);
1453   EXPECT_TRUE(text_read_done);
1454 }
1455
1456 // Test that Seek() completes successfully when the first cluster
1457 // arrives.
1458 TEST_F(ChunkDemuxerTest, AppendDataAfterSeek) {
1459   ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
1460   AppendCluster(kDefaultFirstCluster());
1461
1462   InSequence s;
1463
1464   EXPECT_CALL(*this, Checkpoint(1));
1465
1466   Seek(base::TimeDelta::FromMilliseconds(46));
1467
1468   EXPECT_CALL(*this, Checkpoint(2));
1469
1470   Checkpoint(1);
1471
1472   AppendCluster(kDefaultSecondCluster());
1473
1474   message_loop_.RunUntilIdle();
1475
1476   Checkpoint(2);
1477 }
1478
1479 // Test that parsing errors are handled for clusters appended after init.
1480 TEST_F(ChunkDemuxerTest, ErrorWhileParsingClusterAfterInit) {
1481   ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
1482   AppendCluster(kDefaultFirstCluster());
1483
1484   EXPECT_CALL(host_, OnDemuxerError(PIPELINE_ERROR_DECODE));
1485   AppendGarbage();
1486 }
1487
1488 // Test the case where a Seek() is requested while the parser
1489 // is in the middle of cluster. This is to verify that the parser
1490 // does not reset itself on a seek.
1491 TEST_F(ChunkDemuxerTest, SeekWhileParsingCluster) {
1492   ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
1493
1494   InSequence s;
1495
1496   scoped_ptr<Cluster> cluster_a(GenerateCluster(0, 6));
1497
1498   // Split the cluster into two appends at an arbitrary point near the end.
1499   int first_append_size = cluster_a->size() - 11;
1500   int second_append_size = cluster_a->size() - first_append_size;
1501
1502   // Append the first part of the cluster.
1503   AppendData(cluster_a->data(), first_append_size);
1504
1505   ExpectRead(DemuxerStream::AUDIO, 0);
1506   ExpectRead(DemuxerStream::VIDEO, 0);
1507   ExpectRead(DemuxerStream::AUDIO, kAudioBlockDuration);
1508
1509   Seek(base::TimeDelta::FromSeconds(5));
1510
1511   // Append the rest of the cluster.
1512   AppendData(cluster_a->data() + first_append_size, second_append_size);
1513
1514   // Append the new cluster and verify that only the blocks
1515   // in the new cluster are returned.
1516   AppendCluster(GenerateCluster(5000, 6));
1517   GenerateExpectedReads(5000, 6);
1518 }
1519
1520 // Test the case where AppendData() is called before Init().
1521 TEST_F(ChunkDemuxerTest, AppendDataBeforeInit) {
1522   scoped_ptr<uint8[]> info_tracks;
1523   int info_tracks_size = 0;
1524   CreateInitSegment(HAS_AUDIO | HAS_VIDEO,
1525                     false, false, &info_tracks, &info_tracks_size);
1526   demuxer_->AppendData(kSourceId, info_tracks.get(), info_tracks_size,
1527                        append_window_start_for_next_append_,
1528                        append_window_end_for_next_append_,
1529                        &timestamp_offset_map_[kSourceId]);
1530 }
1531
1532 // Make sure Read() callbacks are dispatched with the proper data.
1533 TEST_F(ChunkDemuxerTest, Read) {
1534   ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
1535
1536   AppendCluster(kDefaultFirstCluster());
1537
1538   bool audio_read_done = false;
1539   bool video_read_done = false;
1540   ReadAudio(base::Bind(&OnReadDone,
1541                        base::TimeDelta::FromMilliseconds(0),
1542                        &audio_read_done));
1543   ReadVideo(base::Bind(&OnReadDone,
1544                        base::TimeDelta::FromMilliseconds(0),
1545                        &video_read_done));
1546
1547   EXPECT_TRUE(audio_read_done);
1548   EXPECT_TRUE(video_read_done);
1549 }
1550
1551 TEST_F(ChunkDemuxerTest, OutOfOrderClusters) {
1552   ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
1553   AppendCluster(kDefaultFirstCluster());
1554   AppendCluster(GenerateCluster(10, 4));
1555
1556   // Make sure that AppendCluster() does not fail with a cluster that has
1557   // overlaps with the previously appended cluster.
1558   AppendCluster(GenerateCluster(5, 4));
1559
1560   // Verify that AppendData() can still accept more data.
1561   scoped_ptr<Cluster> cluster_c(GenerateCluster(45, 2));
1562   demuxer_->AppendData(kSourceId, cluster_c->data(), cluster_c->size(),
1563                        append_window_start_for_next_append_,
1564                        append_window_end_for_next_append_,
1565                        &timestamp_offset_map_[kSourceId]);
1566 }
1567
1568 TEST_F(ChunkDemuxerTest, NonMonotonicButAboveClusterTimecode) {
1569   ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
1570   AppendCluster(kDefaultFirstCluster());
1571
1572   ClusterBuilder cb;
1573
1574   // Test the case where block timecodes are not monotonically
1575   // increasing but stay above the cluster timecode.
1576   cb.SetClusterTimecode(5);
1577   AddSimpleBlock(&cb, kAudioTrackNum, 5);
1578   AddSimpleBlock(&cb, kVideoTrackNum, 10);
1579   AddSimpleBlock(&cb, kAudioTrackNum, 7);
1580   AddSimpleBlock(&cb, kVideoTrackNum, 15);
1581
1582   EXPECT_CALL(host_, OnDemuxerError(PIPELINE_ERROR_DECODE));
1583   AppendCluster(cb.Finish());
1584
1585   // Verify that AppendData() ignores data after the error.
1586   scoped_ptr<Cluster> cluster_b(GenerateCluster(20, 2));
1587   demuxer_->AppendData(kSourceId, cluster_b->data(), cluster_b->size(),
1588                        append_window_start_for_next_append_,
1589                        append_window_end_for_next_append_,
1590                        &timestamp_offset_map_[kSourceId]);
1591 }
1592
1593 TEST_F(ChunkDemuxerTest, BackwardsAndBeforeClusterTimecode) {
1594   ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
1595   AppendCluster(kDefaultFirstCluster());
1596
1597   ClusterBuilder cb;
1598
1599   // Test timecodes going backwards and including values less than the cluster
1600   // timecode.
1601   cb.SetClusterTimecode(5);
1602   AddSimpleBlock(&cb, kAudioTrackNum, 5);
1603   AddSimpleBlock(&cb, kVideoTrackNum, 5);
1604   AddSimpleBlock(&cb, kAudioTrackNum, 3);
1605   AddSimpleBlock(&cb, kVideoTrackNum, 3);
1606
1607   EXPECT_CALL(host_, OnDemuxerError(PIPELINE_ERROR_DECODE));
1608   AppendCluster(cb.Finish());
1609
1610   // Verify that AppendData() ignores data after the error.
1611   scoped_ptr<Cluster> cluster_b(GenerateCluster(6, 2));
1612   demuxer_->AppendData(kSourceId, cluster_b->data(), cluster_b->size(),
1613                        append_window_start_for_next_append_,
1614                        append_window_end_for_next_append_,
1615                        &timestamp_offset_map_[kSourceId]);
1616 }
1617
1618
1619 TEST_F(ChunkDemuxerTest, PerStreamMonotonicallyIncreasingTimestamps) {
1620   ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
1621   AppendCluster(kDefaultFirstCluster());
1622
1623   ClusterBuilder cb;
1624
1625   // Test monotonic increasing timestamps on a per stream
1626   // basis.
1627   cb.SetClusterTimecode(5);
1628   AddSimpleBlock(&cb, kAudioTrackNum, 5);
1629   AddSimpleBlock(&cb, kVideoTrackNum, 5);
1630   AddSimpleBlock(&cb, kAudioTrackNum, 4);
1631   AddSimpleBlock(&cb, kVideoTrackNum, 7);
1632
1633   EXPECT_CALL(host_, OnDemuxerError(PIPELINE_ERROR_DECODE));
1634   AppendCluster(cb.Finish());
1635 }
1636
1637 // Test the case where a cluster is passed to AppendCluster() before
1638 // INFO & TRACKS data.
1639 TEST_F(ChunkDemuxerTest, ClusterBeforeInitSegment) {
1640   EXPECT_CALL(*this, DemuxerOpened());
1641   demuxer_->Initialize(
1642       &host_, NewExpectedStatusCB(DEMUXER_ERROR_COULD_NOT_OPEN), true);
1643
1644   ASSERT_EQ(AddId(), ChunkDemuxer::kOk);
1645
1646   AppendCluster(GenerateCluster(0, 1));
1647 }
1648
1649 // Test cases where we get an MarkEndOfStream() call during initialization.
1650 TEST_F(ChunkDemuxerTest, EOSDuringInit) {
1651   EXPECT_CALL(*this, DemuxerOpened());
1652   demuxer_->Initialize(
1653       &host_, NewExpectedStatusCB(DEMUXER_ERROR_COULD_NOT_OPEN), true);
1654   MarkEndOfStream(PIPELINE_OK);
1655 }
1656
1657 TEST_F(ChunkDemuxerTest, EndOfStreamWithNoAppend) {
1658   EXPECT_CALL(*this, DemuxerOpened());
1659   demuxer_->Initialize(
1660       &host_, NewExpectedStatusCB(DEMUXER_ERROR_COULD_NOT_OPEN), true);
1661
1662   ASSERT_EQ(AddId(), ChunkDemuxer::kOk);
1663
1664   CheckExpectedRanges("{ }");
1665   MarkEndOfStream(PIPELINE_OK);
1666   ShutdownDemuxer();
1667   CheckExpectedRanges("{ }");
1668   demuxer_->RemoveId(kSourceId);
1669   demuxer_.reset();
1670 }
1671
1672 TEST_F(ChunkDemuxerTest, EndOfStreamWithNoMediaAppend) {
1673   ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
1674
1675   CheckExpectedRanges("{ }");
1676   MarkEndOfStream(PIPELINE_OK);
1677   CheckExpectedRanges("{ }");
1678 }
1679
1680 TEST_F(ChunkDemuxerTest, DecodeErrorEndOfStream) {
1681   ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
1682
1683   AppendCluster(kDefaultFirstCluster());
1684   CheckExpectedRanges(kDefaultFirstClusterRange);
1685
1686   EXPECT_CALL(host_, OnDemuxerError(PIPELINE_ERROR_DECODE));
1687   MarkEndOfStream(PIPELINE_ERROR_DECODE);
1688   CheckExpectedRanges(kDefaultFirstClusterRange);
1689 }
1690
1691 TEST_F(ChunkDemuxerTest, NetworkErrorEndOfStream) {
1692   ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
1693
1694   AppendCluster(kDefaultFirstCluster());
1695   CheckExpectedRanges(kDefaultFirstClusterRange);
1696
1697   EXPECT_CALL(host_, OnDemuxerError(PIPELINE_ERROR_NETWORK));
1698   MarkEndOfStream(PIPELINE_ERROR_NETWORK);
1699 }
1700
1701 // Helper class to reduce duplicate code when testing end of stream
1702 // Read() behavior.
1703 class EndOfStreamHelper {
1704  public:
1705   explicit EndOfStreamHelper(Demuxer* demuxer)
1706       : demuxer_(demuxer),
1707         audio_read_done_(false),
1708         video_read_done_(false) {
1709   }
1710
1711   // Request a read on the audio and video streams.
1712   void RequestReads() {
1713     EXPECT_FALSE(audio_read_done_);
1714     EXPECT_FALSE(video_read_done_);
1715
1716     DemuxerStream* audio = demuxer_->GetStream(DemuxerStream::AUDIO);
1717     DemuxerStream* video = demuxer_->GetStream(DemuxerStream::VIDEO);
1718
1719     audio->Read(base::Bind(&OnEndOfStreamReadDone, &audio_read_done_));
1720     video->Read(base::Bind(&OnEndOfStreamReadDone, &video_read_done_));
1721     base::MessageLoop::current()->RunUntilIdle();
1722   }
1723
1724   // Check to see if |audio_read_done_| and |video_read_done_| variables
1725   // match |expected|.
1726   void CheckIfReadDonesWereCalled(bool expected) {
1727     base::MessageLoop::current()->RunUntilIdle();
1728     EXPECT_EQ(expected, audio_read_done_);
1729     EXPECT_EQ(expected, video_read_done_);
1730   }
1731
1732  private:
1733   static void OnEndOfStreamReadDone(
1734       bool* called,
1735       DemuxerStream::Status status,
1736       const scoped_refptr<DecoderBuffer>& buffer) {
1737     EXPECT_EQ(status, DemuxerStream::kOk);
1738     EXPECT_TRUE(buffer->end_of_stream());
1739     *called = true;
1740   }
1741
1742   Demuxer* demuxer_;
1743   bool audio_read_done_;
1744   bool video_read_done_;
1745
1746   DISALLOW_COPY_AND_ASSIGN(EndOfStreamHelper);
1747 };
1748
1749 // Make sure that all pending reads that we don't have media data for get an
1750 // "end of stream" buffer when MarkEndOfStream() is called.
1751 TEST_F(ChunkDemuxerTest, EndOfStreamWithPendingReads) {
1752   ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
1753
1754   AppendCluster(GenerateCluster(0, 2));
1755
1756   bool audio_read_done_1 = false;
1757   bool video_read_done_1 = false;
1758   EndOfStreamHelper end_of_stream_helper_1(demuxer_.get());
1759   EndOfStreamHelper end_of_stream_helper_2(demuxer_.get());
1760
1761   ReadAudio(base::Bind(&OnReadDone,
1762                        base::TimeDelta::FromMilliseconds(0),
1763                        &audio_read_done_1));
1764   ReadVideo(base::Bind(&OnReadDone,
1765                        base::TimeDelta::FromMilliseconds(0),
1766                        &video_read_done_1));
1767   message_loop_.RunUntilIdle();
1768
1769   EXPECT_TRUE(audio_read_done_1);
1770   EXPECT_TRUE(video_read_done_1);
1771
1772   end_of_stream_helper_1.RequestReads();
1773
1774   EXPECT_CALL(host_, SetDuration(
1775       base::TimeDelta::FromMilliseconds(kVideoBlockDuration)));
1776   MarkEndOfStream(PIPELINE_OK);
1777
1778   end_of_stream_helper_1.CheckIfReadDonesWereCalled(true);
1779
1780   end_of_stream_helper_2.RequestReads();
1781   end_of_stream_helper_2.CheckIfReadDonesWereCalled(true);
1782 }
1783
1784 // Make sure that all Read() calls after we get an MarkEndOfStream()
1785 // call return an "end of stream" buffer.
1786 TEST_F(ChunkDemuxerTest, ReadsAfterEndOfStream) {
1787   ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
1788
1789   AppendCluster(GenerateCluster(0, 2));
1790
1791   bool audio_read_done_1 = false;
1792   bool video_read_done_1 = false;
1793   EndOfStreamHelper end_of_stream_helper_1(demuxer_.get());
1794   EndOfStreamHelper end_of_stream_helper_2(demuxer_.get());
1795   EndOfStreamHelper end_of_stream_helper_3(demuxer_.get());
1796
1797   ReadAudio(base::Bind(&OnReadDone,
1798                        base::TimeDelta::FromMilliseconds(0),
1799                        &audio_read_done_1));
1800   ReadVideo(base::Bind(&OnReadDone,
1801                        base::TimeDelta::FromMilliseconds(0),
1802                        &video_read_done_1));
1803
1804   end_of_stream_helper_1.RequestReads();
1805
1806   EXPECT_TRUE(audio_read_done_1);
1807   EXPECT_TRUE(video_read_done_1);
1808   end_of_stream_helper_1.CheckIfReadDonesWereCalled(false);
1809
1810   EXPECT_CALL(host_, SetDuration(
1811       base::TimeDelta::FromMilliseconds(kVideoBlockDuration)));
1812   MarkEndOfStream(PIPELINE_OK);
1813
1814   end_of_stream_helper_1.CheckIfReadDonesWereCalled(true);
1815
1816   // Request a few more reads and make sure we immediately get
1817   // end of stream buffers.
1818   end_of_stream_helper_2.RequestReads();
1819   end_of_stream_helper_2.CheckIfReadDonesWereCalled(true);
1820
1821   end_of_stream_helper_3.RequestReads();
1822   end_of_stream_helper_3.CheckIfReadDonesWereCalled(true);
1823 }
1824
1825 TEST_F(ChunkDemuxerTest, EndOfStreamDuringCanceledSeek) {
1826   ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
1827
1828   AppendCluster(0, 10);
1829   EXPECT_CALL(host_, SetDuration(base::TimeDelta::FromMilliseconds(138)));
1830   MarkEndOfStream(PIPELINE_OK);
1831
1832   // Start the first seek.
1833   Seek(base::TimeDelta::FromMilliseconds(20));
1834
1835   // Simulate another seek being requested before the first
1836   // seek has finished prerolling.
1837   base::TimeDelta seek_time2 = base::TimeDelta::FromMilliseconds(30);
1838   demuxer_->CancelPendingSeek(seek_time2);
1839
1840   // Finish second seek.
1841   Seek(seek_time2);
1842
1843   DemuxerStream::Status status;
1844   base::TimeDelta last_timestamp;
1845
1846   // Make sure audio can reach end of stream.
1847   ReadUntilNotOkOrEndOfStream(DemuxerStream::AUDIO, &status, &last_timestamp);
1848   ASSERT_EQ(status, DemuxerStream::kOk);
1849
1850   // Make sure video can reach end of stream.
1851   ReadUntilNotOkOrEndOfStream(DemuxerStream::VIDEO, &status, &last_timestamp);
1852   ASSERT_EQ(status, DemuxerStream::kOk);
1853 }
1854
1855 // Verify buffered range change behavior for audio/video/text tracks.
1856 TEST_F(ChunkDemuxerTest, EndOfStreamRangeChanges) {
1857   DemuxerStream* text_stream = NULL;
1858
1859   EXPECT_CALL(host_, AddTextStream(_, _))
1860       .WillOnce(SaveArg<0>(&text_stream));
1861   ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO | HAS_TEXT));
1862
1863   AppendMuxedCluster(
1864       MuxedStreamInfo(kVideoTrackNum, "0K 33"),
1865       MuxedStreamInfo(kAudioTrackNum, "0K 23K"));
1866
1867   // Check expected ranges and verify that an empty text track does not
1868   // affect the expected ranges.
1869   CheckExpectedRanges(kSourceId, "{ [0,46) }");
1870
1871   EXPECT_CALL(host_, SetDuration(base::TimeDelta::FromMilliseconds(66)));
1872   MarkEndOfStream(PIPELINE_OK);
1873
1874   // Check expected ranges and verify that an empty text track does not
1875   // affect the expected ranges.
1876   CheckExpectedRanges(kSourceId, "{ [0,66) }");
1877
1878   // Unmark end of stream state and verify that the ranges return to
1879   // their pre-"end of stream" values.
1880   demuxer_->UnmarkEndOfStream();
1881   CheckExpectedRanges(kSourceId, "{ [0,46) }");
1882
1883   // Add text track data and verify that the buffered ranges don't change
1884   // since the intersection of all the tracks doesn't change.
1885   EXPECT_CALL(host_, SetDuration(base::TimeDelta::FromMilliseconds(200)));
1886   AppendMuxedCluster(
1887       MuxedStreamInfo(kVideoTrackNum, "0K 33"),
1888       MuxedStreamInfo(kAudioTrackNum, "0K 23K"),
1889       MuxedStreamInfo(kTextTrackNum, "0K 100K"));
1890   CheckExpectedRanges(kSourceId, "{ [0,46) }");
1891
1892   // Mark end of stream and verify that text track data is reflected in
1893   // the new range.
1894   MarkEndOfStream(PIPELINE_OK);
1895   CheckExpectedRanges(kSourceId, "{ [0,200) }");
1896 }
1897
1898 // Make sure AppendData() will accept elements that span multiple calls.
1899 TEST_F(ChunkDemuxerTest, AppendingInPieces) {
1900   EXPECT_CALL(*this, DemuxerOpened());
1901   demuxer_->Initialize(
1902       &host_, CreateInitDoneCB(kDefaultDuration(), PIPELINE_OK), true);
1903
1904   ASSERT_EQ(AddId(), ChunkDemuxer::kOk);
1905
1906   scoped_ptr<uint8[]> info_tracks;
1907   int info_tracks_size = 0;
1908   CreateInitSegment(HAS_AUDIO | HAS_VIDEO,
1909                     false, false, &info_tracks, &info_tracks_size);
1910
1911   scoped_ptr<Cluster> cluster_a(kDefaultFirstCluster());
1912   scoped_ptr<Cluster> cluster_b(kDefaultSecondCluster());
1913
1914   size_t buffer_size = info_tracks_size + cluster_a->size() + cluster_b->size();
1915   scoped_ptr<uint8[]> buffer(new uint8[buffer_size]);
1916   uint8* dst = buffer.get();
1917   memcpy(dst, info_tracks.get(), info_tracks_size);
1918   dst += info_tracks_size;
1919
1920   memcpy(dst, cluster_a->data(), cluster_a->size());
1921   dst += cluster_a->size();
1922
1923   memcpy(dst, cluster_b->data(), cluster_b->size());
1924   dst += cluster_b->size();
1925
1926   AppendDataInPieces(buffer.get(), buffer_size);
1927
1928   GenerateExpectedReads(0, 9);
1929 }
1930
1931 TEST_F(ChunkDemuxerTest, WebMFile_AudioAndVideo) {
1932   struct BufferTimestamps buffer_timestamps[] = {
1933     {0, 0},
1934     {33, 3},
1935     {67, 6},
1936     {100, 9},
1937     {133, 12},
1938     {kSkip, kSkip},
1939   };
1940
1941   // TODO(wolenetz/acolwell): Remove this SetDuration expectation and update the
1942   // ParseWebMFile() call's expected duration, below, once the file is fixed to
1943   // have the correct duration in the init segment. See http://crbug.com/354284.
1944   EXPECT_CALL(host_, SetDuration(base::TimeDelta::FromMilliseconds(2746)));
1945
1946   ASSERT_TRUE(ParseWebMFile("bear-320x240.webm", buffer_timestamps,
1947                             base::TimeDelta::FromMilliseconds(2744)));
1948 }
1949
1950 TEST_F(ChunkDemuxerTest, WebMFile_LiveAudioAndVideo) {
1951   struct BufferTimestamps buffer_timestamps[] = {
1952     {0, 0},
1953     {33, 3},
1954     {67, 6},
1955     {100, 9},
1956     {133, 12},
1957     {kSkip, kSkip},
1958   };
1959
1960   ASSERT_TRUE(ParseWebMFile("bear-320x240-live.webm", buffer_timestamps,
1961                             kInfiniteDuration()));
1962 }
1963
1964 TEST_F(ChunkDemuxerTest, WebMFile_AudioOnly) {
1965   struct BufferTimestamps buffer_timestamps[] = {
1966     {kSkip, 0},
1967     {kSkip, 3},
1968     {kSkip, 6},
1969     {kSkip, 9},
1970     {kSkip, 12},
1971     {kSkip, kSkip},
1972   };
1973
1974   // TODO(wolenetz/acolwell): Remove this SetDuration expectation and update the
1975   // ParseWebMFile() call's expected duration, below, once the file is fixed to
1976   // have the correct duration in the init segment. See http://crbug.com/354284.
1977   EXPECT_CALL(host_, SetDuration(base::TimeDelta::FromMilliseconds(2746)));
1978
1979   ASSERT_TRUE(ParseWebMFile("bear-320x240-audio-only.webm", buffer_timestamps,
1980                             base::TimeDelta::FromMilliseconds(2744),
1981                             HAS_AUDIO));
1982 }
1983
1984 TEST_F(ChunkDemuxerTest, WebMFile_VideoOnly) {
1985   struct BufferTimestamps buffer_timestamps[] = {
1986     {0, kSkip},
1987     {33, kSkip},
1988     {67, kSkip},
1989     {100, kSkip},
1990     {133, kSkip},
1991     {kSkip, kSkip},
1992   };
1993
1994   // TODO(wolenetz/acolwell): Remove this SetDuration expectation and update the
1995   // ParseWebMFile() call's expected duration, below, once the file is fixed to
1996   // have the correct duration in the init segment. See http://crbug.com/354284.
1997   EXPECT_CALL(host_, SetDuration(base::TimeDelta::FromMilliseconds(2736)));
1998
1999   ASSERT_TRUE(ParseWebMFile("bear-320x240-video-only.webm", buffer_timestamps,
2000                             base::TimeDelta::FromMilliseconds(2703),
2001                             HAS_VIDEO));
2002 }
2003
2004 TEST_F(ChunkDemuxerTest, WebMFile_AltRefFrames) {
2005   struct BufferTimestamps buffer_timestamps[] = {
2006     {0, 0},
2007     {33, 3},
2008     {33, 6},
2009     {67, 9},
2010     {100, 12},
2011     {kSkip, kSkip},
2012   };
2013
2014   ASSERT_TRUE(ParseWebMFile("bear-320x240-altref.webm", buffer_timestamps,
2015                             base::TimeDelta::FromMilliseconds(2767)));
2016 }
2017
2018 // Verify that we output buffers before the entire cluster has been parsed.
2019 TEST_F(ChunkDemuxerTest, IncrementalClusterParsing) {
2020   ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
2021   AppendEmptyCluster(0);
2022
2023   scoped_ptr<Cluster> cluster(GenerateCluster(0, 6));
2024
2025   bool audio_read_done = false;
2026   bool video_read_done = false;
2027   ReadAudio(base::Bind(&OnReadDone,
2028                        base::TimeDelta::FromMilliseconds(0),
2029                        &audio_read_done));
2030   ReadVideo(base::Bind(&OnReadDone,
2031                        base::TimeDelta::FromMilliseconds(0),
2032                        &video_read_done));
2033
2034   // Make sure the reads haven't completed yet.
2035   EXPECT_FALSE(audio_read_done);
2036   EXPECT_FALSE(video_read_done);
2037
2038   // Append data one byte at a time until one or both reads complete.
2039   int i = 0;
2040   for (; i < cluster->size() && !(audio_read_done || video_read_done); ++i) {
2041     AppendData(cluster->data() + i, 1);
2042     message_loop_.RunUntilIdle();
2043   }
2044
2045   EXPECT_TRUE(audio_read_done || video_read_done);
2046   EXPECT_GT(i, 0);
2047   EXPECT_LT(i, cluster->size());
2048
2049   audio_read_done = false;
2050   video_read_done = false;
2051   ReadAudio(base::Bind(&OnReadDone,
2052                        base::TimeDelta::FromMilliseconds(23),
2053                        &audio_read_done));
2054   ReadVideo(base::Bind(&OnReadDone,
2055                        base::TimeDelta::FromMilliseconds(33),
2056                        &video_read_done));
2057
2058   // Make sure the reads haven't completed yet.
2059   EXPECT_FALSE(audio_read_done);
2060   EXPECT_FALSE(video_read_done);
2061
2062   // Append the remaining data.
2063   ASSERT_LT(i, cluster->size());
2064   AppendData(cluster->data() + i, cluster->size() - i);
2065
2066   message_loop_.RunUntilIdle();
2067
2068   EXPECT_TRUE(audio_read_done);
2069   EXPECT_TRUE(video_read_done);
2070 }
2071
2072 TEST_F(ChunkDemuxerTest, ParseErrorDuringInit) {
2073   EXPECT_CALL(*this, DemuxerOpened());
2074   demuxer_->Initialize(
2075       &host_, CreateInitDoneCB(
2076           kNoTimestamp(), DEMUXER_ERROR_COULD_NOT_OPEN), true);
2077
2078   ASSERT_EQ(AddId(), ChunkDemuxer::kOk);
2079
2080   uint8 tmp = 0;
2081   demuxer_->AppendData(kSourceId, &tmp, 1,
2082                        append_window_start_for_next_append_,
2083                        append_window_end_for_next_append_,
2084                        &timestamp_offset_map_[kSourceId]);
2085 }
2086
2087 TEST_F(ChunkDemuxerTest, AVHeadersWithAudioOnlyType) {
2088   EXPECT_CALL(*this, DemuxerOpened());
2089   demuxer_->Initialize(
2090       &host_, CreateInitDoneCB(kNoTimestamp(),
2091                                DEMUXER_ERROR_COULD_NOT_OPEN), true);
2092
2093   std::vector<std::string> codecs(1);
2094   codecs[0] = "vorbis";
2095   ASSERT_EQ(demuxer_->AddId(kSourceId, "audio/webm", codecs),
2096             ChunkDemuxer::kOk);
2097
2098   AppendInitSegment(HAS_AUDIO | HAS_VIDEO);
2099 }
2100
2101 TEST_F(ChunkDemuxerTest, AVHeadersWithVideoOnlyType) {
2102   EXPECT_CALL(*this, DemuxerOpened());
2103   demuxer_->Initialize(
2104       &host_, CreateInitDoneCB(kNoTimestamp(),
2105                                DEMUXER_ERROR_COULD_NOT_OPEN), true);
2106
2107   std::vector<std::string> codecs(1);
2108   codecs[0] = "vp8";
2109   ASSERT_EQ(demuxer_->AddId(kSourceId, "video/webm", codecs),
2110             ChunkDemuxer::kOk);
2111
2112   AppendInitSegment(HAS_AUDIO | HAS_VIDEO);
2113 }
2114
2115 TEST_F(ChunkDemuxerTest, MultipleHeaders) {
2116   ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
2117
2118   AppendCluster(kDefaultFirstCluster());
2119
2120   // Append another identical initialization segment.
2121   AppendInitSegment(HAS_AUDIO | HAS_VIDEO);
2122
2123   AppendCluster(kDefaultSecondCluster());
2124
2125   GenerateExpectedReads(0, 9);
2126 }
2127
2128 TEST_F(ChunkDemuxerTest, AddSeparateSourcesForAudioAndVideo) {
2129   std::string audio_id = "audio1";
2130   std::string video_id = "video1";
2131   ASSERT_TRUE(InitDemuxerAudioAndVideoSources(audio_id, video_id));
2132
2133   // Append audio and video data into separate source ids.
2134   AppendCluster(audio_id,
2135       GenerateSingleStreamCluster(0, 92, kAudioTrackNum, kAudioBlockDuration));
2136   GenerateAudioStreamExpectedReads(0, 4);
2137   AppendCluster(video_id,
2138       GenerateSingleStreamCluster(0, 132, kVideoTrackNum, kVideoBlockDuration));
2139   GenerateVideoStreamExpectedReads(0, 4);
2140 }
2141
2142 TEST_F(ChunkDemuxerTest, AddSeparateSourcesForAudioAndVideoText) {
2143   // TODO(matthewjheaney): Here and elsewhere, we need more tests
2144   // for inband text tracks (http://crbug/321455).
2145
2146   std::string audio_id = "audio1";
2147   std::string video_id = "video1";
2148
2149   EXPECT_CALL(host_, AddTextStream(_, _))
2150     .Times(Exactly(2));
2151   ASSERT_TRUE(InitDemuxerAudioAndVideoSourcesText(audio_id, video_id, true));
2152
2153   // Append audio and video data into separate source ids.
2154   AppendCluster(audio_id,
2155       GenerateSingleStreamCluster(0, 92, kAudioTrackNum, kAudioBlockDuration));
2156   GenerateAudioStreamExpectedReads(0, 4);
2157   AppendCluster(video_id,
2158       GenerateSingleStreamCluster(0, 132, kVideoTrackNum, kVideoBlockDuration));
2159   GenerateVideoStreamExpectedReads(0, 4);
2160 }
2161
2162 TEST_F(ChunkDemuxerTest, AddIdFailures) {
2163   EXPECT_CALL(*this, DemuxerOpened());
2164   demuxer_->Initialize(
2165       &host_, CreateInitDoneCB(kDefaultDuration(), PIPELINE_OK), true);
2166
2167   std::string audio_id = "audio1";
2168   std::string video_id = "video1";
2169
2170   ASSERT_EQ(AddId(audio_id, HAS_AUDIO), ChunkDemuxer::kOk);
2171
2172   // Adding an id with audio/video should fail because we already added audio.
2173   ASSERT_EQ(AddId(), ChunkDemuxer::kReachedIdLimit);
2174
2175   AppendInitSegmentWithSourceId(audio_id, HAS_AUDIO);
2176
2177   // Adding an id after append should fail.
2178   ASSERT_EQ(AddId(video_id, HAS_VIDEO), ChunkDemuxer::kReachedIdLimit);
2179 }
2180
2181 // Test that Read() calls after a RemoveId() return "end of stream" buffers.
2182 TEST_F(ChunkDemuxerTest, RemoveId) {
2183   std::string audio_id = "audio1";
2184   std::string video_id = "video1";
2185   ASSERT_TRUE(InitDemuxerAudioAndVideoSources(audio_id, video_id));
2186
2187   // Append audio and video data into separate source ids.
2188   AppendCluster(audio_id,
2189       GenerateSingleStreamCluster(0, 92, kAudioTrackNum, kAudioBlockDuration));
2190   AppendCluster(video_id,
2191       GenerateSingleStreamCluster(0, 132, kVideoTrackNum, kVideoBlockDuration));
2192
2193   // Read() from audio should return normal buffers.
2194   GenerateAudioStreamExpectedReads(0, 4);
2195
2196   // Remove the audio id.
2197   demuxer_->RemoveId(audio_id);
2198
2199   // Read() from audio should return "end of stream" buffers.
2200   bool audio_read_done = false;
2201   ReadAudio(base::Bind(&OnReadDone_EOSExpected, &audio_read_done));
2202   message_loop_.RunUntilIdle();
2203   EXPECT_TRUE(audio_read_done);
2204
2205   // Read() from video should still return normal buffers.
2206   GenerateVideoStreamExpectedReads(0, 4);
2207 }
2208
2209 // Test that removing an ID immediately after adding it does not interfere with
2210 // quota for new IDs in the future.
2211 TEST_F(ChunkDemuxerTest, RemoveAndAddId) {
2212   std::string audio_id_1 = "audio1";
2213   ASSERT_TRUE(AddId(audio_id_1, HAS_AUDIO) == ChunkDemuxer::kOk);
2214   demuxer_->RemoveId(audio_id_1);
2215
2216   std::string audio_id_2 = "audio2";
2217   ASSERT_TRUE(AddId(audio_id_2, HAS_AUDIO) == ChunkDemuxer::kOk);
2218 }
2219
2220 TEST_F(ChunkDemuxerTest, SeekCanceled) {
2221   ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
2222
2223   // Append cluster at the beginning of the stream.
2224   AppendCluster(GenerateCluster(0, 4));
2225
2226   // Seek to an unbuffered region.
2227   Seek(base::TimeDelta::FromSeconds(50));
2228
2229   // Attempt to read in unbuffered area; should not fulfill the read.
2230   bool audio_read_done = false;
2231   bool video_read_done = false;
2232   ReadAudio(base::Bind(&OnReadDone_AbortExpected, &audio_read_done));
2233   ReadVideo(base::Bind(&OnReadDone_AbortExpected, &video_read_done));
2234   EXPECT_FALSE(audio_read_done);
2235   EXPECT_FALSE(video_read_done);
2236
2237   // Now cancel the pending seek, which should flush the reads with empty
2238   // buffers.
2239   base::TimeDelta seek_time = base::TimeDelta::FromSeconds(0);
2240   demuxer_->CancelPendingSeek(seek_time);
2241   message_loop_.RunUntilIdle();
2242   EXPECT_TRUE(audio_read_done);
2243   EXPECT_TRUE(video_read_done);
2244
2245   // A seek back to the buffered region should succeed.
2246   Seek(seek_time);
2247   GenerateExpectedReads(0, 4);
2248 }
2249
2250 TEST_F(ChunkDemuxerTest, SeekCanceledWhileWaitingForSeek) {
2251   ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
2252
2253   // Append cluster at the beginning of the stream.
2254   AppendCluster(GenerateCluster(0, 4));
2255
2256   // Start waiting for a seek.
2257   base::TimeDelta seek_time1 = base::TimeDelta::FromSeconds(50);
2258   base::TimeDelta seek_time2 = base::TimeDelta::FromSeconds(0);
2259   demuxer_->StartWaitingForSeek(seek_time1);
2260
2261   // Now cancel the upcoming seek to an unbuffered region.
2262   demuxer_->CancelPendingSeek(seek_time2);
2263   demuxer_->Seek(seek_time1, NewExpectedStatusCB(PIPELINE_OK));
2264
2265   // Read requests should be fulfilled with empty buffers.
2266   bool audio_read_done = false;
2267   bool video_read_done = false;
2268   ReadAudio(base::Bind(&OnReadDone_AbortExpected, &audio_read_done));
2269   ReadVideo(base::Bind(&OnReadDone_AbortExpected, &video_read_done));
2270   EXPECT_TRUE(audio_read_done);
2271   EXPECT_TRUE(video_read_done);
2272
2273   // A seek back to the buffered region should succeed.
2274   Seek(seek_time2);
2275   GenerateExpectedReads(0, 4);
2276 }
2277
2278 // Test that Seek() successfully seeks to all source IDs.
2279 TEST_F(ChunkDemuxerTest, SeekAudioAndVideoSources) {
2280   std::string audio_id = "audio1";
2281   std::string video_id = "video1";
2282   ASSERT_TRUE(InitDemuxerAudioAndVideoSources(audio_id, video_id));
2283
2284   AppendCluster(
2285       audio_id,
2286       GenerateSingleStreamCluster(0, 92, kAudioTrackNum, kAudioBlockDuration));
2287   AppendCluster(
2288       video_id,
2289       GenerateSingleStreamCluster(0, 132, kVideoTrackNum, kVideoBlockDuration));
2290
2291   // Read() should return buffers at 0.
2292   bool audio_read_done = false;
2293   bool video_read_done = false;
2294   ReadAudio(base::Bind(&OnReadDone,
2295                        base::TimeDelta::FromMilliseconds(0),
2296                        &audio_read_done));
2297   ReadVideo(base::Bind(&OnReadDone,
2298                        base::TimeDelta::FromMilliseconds(0),
2299                        &video_read_done));
2300   EXPECT_TRUE(audio_read_done);
2301   EXPECT_TRUE(video_read_done);
2302
2303   // Seek to 3 (an unbuffered region).
2304   Seek(base::TimeDelta::FromSeconds(3));
2305
2306   audio_read_done = false;
2307   video_read_done = false;
2308   ReadAudio(base::Bind(&OnReadDone,
2309                        base::TimeDelta::FromSeconds(3),
2310                        &audio_read_done));
2311   ReadVideo(base::Bind(&OnReadDone,
2312                        base::TimeDelta::FromSeconds(3),
2313                        &video_read_done));
2314   // Read()s should not return until after data is appended at the Seek point.
2315   EXPECT_FALSE(audio_read_done);
2316   EXPECT_FALSE(video_read_done);
2317
2318   AppendCluster(audio_id,
2319                 GenerateSingleStreamCluster(
2320                     3000, 3092, kAudioTrackNum, kAudioBlockDuration));
2321   AppendCluster(video_id,
2322                 GenerateSingleStreamCluster(
2323                     3000, 3132, kVideoTrackNum, kVideoBlockDuration));
2324
2325   message_loop_.RunUntilIdle();
2326
2327   // Read() should return buffers at 3.
2328   EXPECT_TRUE(audio_read_done);
2329   EXPECT_TRUE(video_read_done);
2330 }
2331
2332 // Test that Seek() completes successfully when EndOfStream
2333 // is called before data is available for that seek point.
2334 // This scenario might be useful if seeking past the end of stream
2335 // of either audio or video (or both).
2336 TEST_F(ChunkDemuxerTest, EndOfStreamAfterPastEosSeek) {
2337   ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
2338
2339   AppendCluster(GenerateSingleStreamCluster(0, 120, kAudioTrackNum, 10));
2340   AppendCluster(GenerateSingleStreamCluster(0, 100, kVideoTrackNum, 5));
2341
2342   // Seeking past the end of video.
2343   // Note: audio data is available for that seek point.
2344   bool seek_cb_was_called = false;
2345   base::TimeDelta seek_time = base::TimeDelta::FromMilliseconds(110);
2346   demuxer_->StartWaitingForSeek(seek_time);
2347   demuxer_->Seek(seek_time,
2348                  base::Bind(OnSeekDone_OKExpected, &seek_cb_was_called));
2349   message_loop_.RunUntilIdle();
2350
2351   EXPECT_FALSE(seek_cb_was_called);
2352
2353   EXPECT_CALL(host_, SetDuration(
2354       base::TimeDelta::FromMilliseconds(120)));
2355   MarkEndOfStream(PIPELINE_OK);
2356   message_loop_.RunUntilIdle();
2357
2358   EXPECT_TRUE(seek_cb_was_called);
2359
2360   ShutdownDemuxer();
2361 }
2362
2363 // Test that EndOfStream is ignored if coming during a pending seek
2364 // whose seek time is before some existing ranges.
2365 TEST_F(ChunkDemuxerTest, EndOfStreamDuringPendingSeek) {
2366   ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
2367
2368   AppendCluster(GenerateSingleStreamCluster(0, 120, kAudioTrackNum, 10));
2369   AppendCluster(GenerateSingleStreamCluster(0, 100, kVideoTrackNum, 5));
2370   AppendCluster(GenerateSingleStreamCluster(200, 300, kAudioTrackNum, 10));
2371   AppendCluster(GenerateSingleStreamCluster(200, 300, kVideoTrackNum, 5));
2372
2373   bool seek_cb_was_called = false;
2374   base::TimeDelta seek_time = base::TimeDelta::FromMilliseconds(160);
2375   demuxer_->StartWaitingForSeek(seek_time);
2376   demuxer_->Seek(seek_time,
2377                  base::Bind(OnSeekDone_OKExpected, &seek_cb_was_called));
2378   message_loop_.RunUntilIdle();
2379
2380   EXPECT_FALSE(seek_cb_was_called);
2381
2382   EXPECT_CALL(host_, SetDuration(base::TimeDelta::FromMilliseconds(300)));
2383   MarkEndOfStream(PIPELINE_OK);
2384   message_loop_.RunUntilIdle();
2385
2386   EXPECT_FALSE(seek_cb_was_called);
2387
2388   demuxer_->UnmarkEndOfStream();
2389
2390   AppendCluster(GenerateSingleStreamCluster(140, 180, kAudioTrackNum, 10));
2391   AppendCluster(GenerateSingleStreamCluster(140, 180, kVideoTrackNum, 5));
2392
2393   message_loop_.RunUntilIdle();
2394
2395   EXPECT_TRUE(seek_cb_was_called);
2396
2397   ShutdownDemuxer();
2398 }
2399
2400 // Test ranges in an audio-only stream.
2401 TEST_F(ChunkDemuxerTest, GetBufferedRanges_AudioIdOnly) {
2402   EXPECT_CALL(*this, DemuxerOpened());
2403   demuxer_->Initialize(
2404       &host_, CreateInitDoneCB(kDefaultDuration(), PIPELINE_OK), true);
2405
2406   ASSERT_EQ(AddId(kSourceId, HAS_AUDIO), ChunkDemuxer::kOk);
2407   AppendInitSegment(HAS_AUDIO);
2408
2409   // Test a simple cluster.
2410   AppendCluster(
2411       GenerateSingleStreamCluster(0, 92, kAudioTrackNum, kAudioBlockDuration));
2412
2413   CheckExpectedRanges("{ [0,92) }");
2414
2415   // Append a disjoint cluster to check for two separate ranges.
2416   AppendCluster(GenerateSingleStreamCluster(
2417       150, 219, kAudioTrackNum, kAudioBlockDuration));
2418
2419   CheckExpectedRanges("{ [0,92) [150,219) }");
2420 }
2421
2422 // Test ranges in a video-only stream.
2423 TEST_F(ChunkDemuxerTest, GetBufferedRanges_VideoIdOnly) {
2424   EXPECT_CALL(*this, DemuxerOpened());
2425   demuxer_->Initialize(
2426       &host_, CreateInitDoneCB(kDefaultDuration(), PIPELINE_OK), true);
2427
2428   ASSERT_EQ(AddId(kSourceId, HAS_VIDEO), ChunkDemuxer::kOk);
2429   AppendInitSegment(HAS_VIDEO);
2430
2431   // Test a simple cluster.
2432   AppendCluster(
2433       GenerateSingleStreamCluster(0, 132, kVideoTrackNum, kVideoBlockDuration));
2434
2435   CheckExpectedRanges("{ [0,132) }");
2436
2437   // Append a disjoint cluster to check for two separate ranges.
2438   AppendCluster(GenerateSingleStreamCluster(
2439       200, 299, kVideoTrackNum, kVideoBlockDuration));
2440
2441   CheckExpectedRanges("{ [0,132) [200,299) }");
2442 }
2443
2444 TEST_F(ChunkDemuxerTest, GetBufferedRanges_AudioVideo) {
2445   ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
2446
2447   // Audio: 0 -> 23
2448   // Video: 0 -> 33
2449   // Buffered Range: 0 -> 23
2450   // Audio block duration is smaller than video block duration,
2451   // so the buffered ranges should correspond to the audio blocks.
2452   AppendCluster(GenerateSingleStreamCluster(
2453       0, kAudioBlockDuration, kAudioTrackNum, kAudioBlockDuration));
2454   AppendCluster(GenerateSingleStreamCluster(
2455       0, kVideoBlockDuration, kVideoTrackNum, kVideoBlockDuration));
2456
2457   CheckExpectedRanges("{ [0,23) }");
2458
2459   // Audio: 300 -> 400
2460   // Video: 320 -> 420
2461   // Buffered Range: 320 -> 400  (end overlap)
2462   AppendCluster(GenerateSingleStreamCluster(300, 400, kAudioTrackNum, 50));
2463   AppendCluster(GenerateSingleStreamCluster(320, 420, kVideoTrackNum, 50));
2464
2465   CheckExpectedRanges("{ [0,23) [320,400) }");
2466
2467   // Audio: 520 -> 590
2468   // Video: 500 -> 570
2469   // Buffered Range: 520 -> 570  (front overlap)
2470   AppendCluster(GenerateSingleStreamCluster(520, 590, kAudioTrackNum, 70));
2471   AppendCluster(GenerateSingleStreamCluster(500, 570, kVideoTrackNum, 70));
2472
2473   CheckExpectedRanges("{ [0,23) [320,400) [520,570) }");
2474
2475   // Audio: 720 -> 750
2476   // Video: 700 -> 770
2477   // Buffered Range: 720 -> 750  (complete overlap, audio)
2478   AppendCluster(GenerateSingleStreamCluster(720, 750, kAudioTrackNum, 30));
2479   AppendCluster(GenerateSingleStreamCluster(700, 770, kVideoTrackNum, 70));
2480
2481   CheckExpectedRanges("{ [0,23) [320,400) [520,570) [720,750) }");
2482
2483   // Audio: 900 -> 970
2484   // Video: 920 -> 950
2485   // Buffered Range: 920 -> 950  (complete overlap, video)
2486   AppendCluster(GenerateSingleStreamCluster(900, 970, kAudioTrackNum, 70));
2487   AppendCluster(GenerateSingleStreamCluster(920, 950, kVideoTrackNum, 30));
2488
2489   CheckExpectedRanges("{ [0,23) [320,400) [520,570) [720,750) [920,950) }");
2490
2491   // Appending within buffered range should not affect buffered ranges.
2492   AppendCluster(GenerateSingleStreamCluster(930, 950, kAudioTrackNum, 20));
2493   CheckExpectedRanges("{ [0,23) [320,400) [520,570) [720,750) [920,950) }");
2494
2495   // Appending to single stream outside buffered ranges should not affect
2496   // buffered ranges.
2497   AppendCluster(GenerateSingleStreamCluster(1230, 1240, kVideoTrackNum, 10));
2498   CheckExpectedRanges("{ [0,23) [320,400) [520,570) [720,750) [920,950) }");
2499 }
2500
2501 TEST_F(ChunkDemuxerTest, GetBufferedRanges_AudioVideoText) {
2502   EXPECT_CALL(host_, AddTextStream(_, _));
2503   ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO | HAS_TEXT));
2504
2505   // Append audio & video data
2506   AppendMuxedCluster(
2507       MuxedStreamInfo(kAudioTrackNum, "0K 23K"),
2508       MuxedStreamInfo(kVideoTrackNum, "0K 33"));
2509
2510   // Verify that a text track with no cues does not result in an empty buffered
2511   // range.
2512   CheckExpectedRanges("{ [0,46) }");
2513
2514   // Add some text cues.
2515   AppendMuxedCluster(
2516       MuxedStreamInfo(kAudioTrackNum, "100K 123K"),
2517       MuxedStreamInfo(kVideoTrackNum, "100K 133"),
2518       MuxedStreamInfo(kTextTrackNum, "100K 200K"));
2519
2520   // Verify that the text cues are not reflected in the buffered ranges.
2521   CheckExpectedRanges("{ [0,46) [100,146) }");
2522
2523   // Remove the buffered ranges.
2524   demuxer_->Remove(kSourceId, base::TimeDelta(),
2525                    base::TimeDelta::FromMilliseconds(250));
2526   CheckExpectedRanges("{ }");
2527 }
2528
2529 // Once MarkEndOfStream() is called, GetBufferedRanges should not cut off any
2530 // over-hanging tails at the end of the ranges as this is likely due to block
2531 // duration differences.
2532 TEST_F(ChunkDemuxerTest, GetBufferedRanges_EndOfStream) {
2533   ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
2534
2535   AppendMuxedCluster(
2536       MuxedStreamInfo(kAudioTrackNum, "0K 23K"),
2537       MuxedStreamInfo(kVideoTrackNum, "0K 33"));
2538
2539   CheckExpectedRanges("{ [0,46) }");
2540
2541   EXPECT_CALL(host_, SetDuration(base::TimeDelta::FromMilliseconds(66)));
2542   MarkEndOfStream(PIPELINE_OK);
2543
2544   // Verify that the range extends to the end of the video data.
2545   CheckExpectedRanges("{ [0,66) }");
2546
2547   // Verify that the range reverts to the intersection when end of stream
2548   // has been cancelled.
2549   demuxer_->UnmarkEndOfStream();
2550   CheckExpectedRanges("{ [0,46) }");
2551
2552   // Append and remove data so that the 2 streams' end ranges do not overlap.
2553
2554   EXPECT_CALL(host_, SetDuration(base::TimeDelta::FromMilliseconds(398)));
2555   AppendMuxedCluster(
2556       MuxedStreamInfo(kAudioTrackNum, "200K 223K"),
2557       MuxedStreamInfo(kVideoTrackNum, "200K 233 266 299 332K 365"));
2558
2559   // At this point, the per-stream ranges are as follows:
2560   // Audio: [0,46) [200,246)
2561   // Video: [0,66) [200,398)
2562   CheckExpectedRanges("{ [0,46) [200,246) }");
2563
2564   demuxer_->Remove(kSourceId, base::TimeDelta::FromMilliseconds(200),
2565                    base::TimeDelta::FromMilliseconds(300));
2566
2567   // At this point, the per-stream ranges are as follows:
2568   // Audio: [0,46)
2569   // Video: [0,66) [332,398)
2570   CheckExpectedRanges("{ [0,46) }");
2571
2572   AppendMuxedCluster(
2573       MuxedStreamInfo(kAudioTrackNum, "200K 223K"),
2574       MuxedStreamInfo(kVideoTrackNum, "200K 233"));
2575
2576   // At this point, the per-stream ranges are as follows:
2577   // Audio: [0,46) [200,246)
2578   // Video: [0,66) [200,266) [332,398)
2579   // NOTE: The last range on each stream do not overlap in time.
2580   CheckExpectedRanges("{ [0,46) [200,246) }");
2581
2582   MarkEndOfStream(PIPELINE_OK);
2583
2584   // NOTE: The last range on each stream gets extended to the highest
2585   // end timestamp according to the spec. The last audio range gets extended
2586   // from [200,246) to [200,398) which is why the intersection results in the
2587   // middle range getting larger AND the new range appearing.
2588   CheckExpectedRanges("{ [0,46) [200,266) [332,398) }");
2589 }
2590
2591 TEST_F(ChunkDemuxerTest, DifferentStreamTimecodes) {
2592   ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
2593
2594   // Create a cluster where the video timecode begins 25ms after the audio.
2595   AppendCluster(GenerateCluster(0, 25, 8));
2596
2597   Seek(base::TimeDelta::FromSeconds(0));
2598   GenerateExpectedReads(0, 25, 8);
2599
2600   // Seek to 5 seconds.
2601   Seek(base::TimeDelta::FromSeconds(5));
2602
2603   // Generate a cluster to fulfill this seek, where audio timecode begins 25ms
2604   // after the video.
2605   AppendCluster(GenerateCluster(5025, 5000, 8));
2606   GenerateExpectedReads(5025, 5000, 8);
2607 }
2608
2609 TEST_F(ChunkDemuxerTest, DifferentStreamTimecodesSeparateSources) {
2610   std::string audio_id = "audio1";
2611   std::string video_id = "video1";
2612   ASSERT_TRUE(InitDemuxerAudioAndVideoSources(audio_id, video_id));
2613
2614   // Generate two streams where the video stream starts 5ms after the audio
2615   // stream and append them.
2616   AppendCluster(audio_id, GenerateSingleStreamCluster(
2617       25, 4 * kAudioBlockDuration + 25, kAudioTrackNum, kAudioBlockDuration));
2618   AppendCluster(video_id, GenerateSingleStreamCluster(
2619       30, 4 * kVideoBlockDuration + 30, kVideoTrackNum, kVideoBlockDuration));
2620
2621   // Both streams should be able to fulfill a seek to 25.
2622   Seek(base::TimeDelta::FromMilliseconds(25));
2623   GenerateAudioStreamExpectedReads(25, 4);
2624   GenerateVideoStreamExpectedReads(30, 4);
2625 }
2626
2627 TEST_F(ChunkDemuxerTest, DifferentStreamTimecodesOutOfRange) {
2628   std::string audio_id = "audio1";
2629   std::string video_id = "video1";
2630   ASSERT_TRUE(InitDemuxerAudioAndVideoSources(audio_id, video_id));
2631
2632   // Generate two streams where the video stream starts 10s after the audio
2633   // stream and append them.
2634   AppendCluster(audio_id, GenerateSingleStreamCluster(0,
2635       4 * kAudioBlockDuration + 0, kAudioTrackNum, kAudioBlockDuration));
2636   AppendCluster(video_id, GenerateSingleStreamCluster(10000,
2637       4 * kVideoBlockDuration + 10000, kVideoTrackNum, kVideoBlockDuration));
2638
2639   // Should not be able to fulfill a seek to 0.
2640   base::TimeDelta seek_time = base::TimeDelta::FromMilliseconds(0);
2641   demuxer_->StartWaitingForSeek(seek_time);
2642   demuxer_->Seek(seek_time,
2643                  NewExpectedStatusCB(PIPELINE_ERROR_ABORT));
2644   ExpectRead(DemuxerStream::AUDIO, 0);
2645   ExpectEndOfStream(DemuxerStream::VIDEO);
2646 }
2647
2648 TEST_F(ChunkDemuxerTest, ClusterWithNoBuffers) {
2649   ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
2650
2651   // Generate and append an empty cluster beginning at 0.
2652   AppendEmptyCluster(0);
2653
2654   // Sanity check that data can be appended after this cluster correctly.
2655   AppendCluster(GenerateCluster(0, 2));
2656   ExpectRead(DemuxerStream::AUDIO, 0);
2657   ExpectRead(DemuxerStream::VIDEO, 0);
2658 }
2659
2660 TEST_F(ChunkDemuxerTest, CodecPrefixMatching) {
2661   ChunkDemuxer::Status expected = ChunkDemuxer::kNotSupported;
2662
2663 #if defined(USE_PROPRIETARY_CODECS)
2664   expected = ChunkDemuxer::kOk;
2665 #endif
2666
2667   std::vector<std::string> codecs;
2668   codecs.push_back("avc1.4D4041");
2669
2670   EXPECT_EQ(demuxer_->AddId("source_id", "video/mp4", codecs), expected);
2671 }
2672
2673 // Test codec ID's that are not compliant with RFC6381, but have been
2674 // seen in the wild.
2675 TEST_F(ChunkDemuxerTest, CodecIDsThatAreNotRFC6381Compliant) {
2676   ChunkDemuxer::Status expected = ChunkDemuxer::kNotSupported;
2677
2678 #if defined(USE_PROPRIETARY_CODECS)
2679   expected = ChunkDemuxer::kOk;
2680 #endif
2681   const char* codec_ids[] = {
2682     // GPAC places leading zeros on the audio object type.
2683     "mp4a.40.02",
2684     "mp4a.40.05"
2685   };
2686
2687   for (size_t i = 0; i < arraysize(codec_ids); ++i) {
2688     std::vector<std::string> codecs;
2689     codecs.push_back(codec_ids[i]);
2690
2691     ChunkDemuxer::Status result =
2692         demuxer_->AddId("source_id", "audio/mp4", codecs);
2693
2694     EXPECT_EQ(result, expected)
2695         << "Fail to add codec_id '" << codec_ids[i] << "'";
2696
2697     if (result == ChunkDemuxer::kOk)
2698       demuxer_->RemoveId("source_id");
2699   }
2700 }
2701
2702 TEST_F(ChunkDemuxerTest, EndOfStreamStillSetAfterSeek) {
2703   ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
2704
2705   EXPECT_CALL(host_, SetDuration(_))
2706       .Times(AnyNumber());
2707
2708   base::TimeDelta kLastAudioTimestamp = base::TimeDelta::FromMilliseconds(92);
2709   base::TimeDelta kLastVideoTimestamp = base::TimeDelta::FromMilliseconds(99);
2710
2711   AppendCluster(kDefaultFirstCluster());
2712   AppendCluster(kDefaultSecondCluster());
2713   MarkEndOfStream(PIPELINE_OK);
2714
2715   DemuxerStream::Status status;
2716   base::TimeDelta last_timestamp;
2717
2718   // Verify that we can read audio & video to the end w/o problems.
2719   ReadUntilNotOkOrEndOfStream(DemuxerStream::AUDIO, &status, &last_timestamp);
2720   EXPECT_EQ(DemuxerStream::kOk, status);
2721   EXPECT_EQ(kLastAudioTimestamp, last_timestamp);
2722
2723   ReadUntilNotOkOrEndOfStream(DemuxerStream::VIDEO, &status, &last_timestamp);
2724   EXPECT_EQ(DemuxerStream::kOk, status);
2725   EXPECT_EQ(kLastVideoTimestamp, last_timestamp);
2726
2727   // Seek back to 0 and verify that we can read to the end again..
2728   Seek(base::TimeDelta::FromMilliseconds(0));
2729
2730   ReadUntilNotOkOrEndOfStream(DemuxerStream::AUDIO, &status, &last_timestamp);
2731   EXPECT_EQ(DemuxerStream::kOk, status);
2732   EXPECT_EQ(kLastAudioTimestamp, last_timestamp);
2733
2734   ReadUntilNotOkOrEndOfStream(DemuxerStream::VIDEO, &status, &last_timestamp);
2735   EXPECT_EQ(DemuxerStream::kOk, status);
2736   EXPECT_EQ(kLastVideoTimestamp, last_timestamp);
2737 }
2738
2739 TEST_F(ChunkDemuxerTest, GetBufferedRangesBeforeInitSegment) {
2740   EXPECT_CALL(*this, DemuxerOpened());
2741   demuxer_->Initialize(&host_, CreateInitDoneCB(PIPELINE_OK), true);
2742   ASSERT_EQ(AddId("audio", HAS_AUDIO), ChunkDemuxer::kOk);
2743   ASSERT_EQ(AddId("video", HAS_VIDEO), ChunkDemuxer::kOk);
2744
2745   CheckExpectedRanges("audio", "{ }");
2746   CheckExpectedRanges("video", "{ }");
2747 }
2748
2749 // Test that Seek() completes successfully when the first cluster
2750 // arrives.
2751 TEST_F(ChunkDemuxerTest, EndOfStreamDuringSeek) {
2752   InSequence s;
2753
2754   ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
2755
2756   AppendCluster(kDefaultFirstCluster());
2757
2758   base::TimeDelta seek_time = base::TimeDelta::FromSeconds(0);
2759   demuxer_->StartWaitingForSeek(seek_time);
2760
2761   AppendCluster(kDefaultSecondCluster());
2762   EXPECT_CALL(host_, SetDuration(
2763       base::TimeDelta::FromMilliseconds(kDefaultSecondClusterEndTimestamp)));
2764   MarkEndOfStream(PIPELINE_OK);
2765
2766   demuxer_->Seek(seek_time, NewExpectedStatusCB(PIPELINE_OK));
2767
2768   GenerateExpectedReads(0, 4);
2769   GenerateExpectedReads(46, 66, 5);
2770
2771   EndOfStreamHelper end_of_stream_helper(demuxer_.get());
2772   end_of_stream_helper.RequestReads();
2773   end_of_stream_helper.CheckIfReadDonesWereCalled(true);
2774 }
2775
2776 TEST_F(ChunkDemuxerTest, ConfigChange_Video) {
2777   InSequence s;
2778
2779   ASSERT_TRUE(InitDemuxerWithConfigChangeData());
2780
2781   DemuxerStream::Status status;
2782   base::TimeDelta last_timestamp;
2783
2784   DemuxerStream* video = demuxer_->GetStream(DemuxerStream::VIDEO);
2785
2786   // Fetch initial video config and verify it matches what we expect.
2787   const VideoDecoderConfig& video_config_1 = video->video_decoder_config();
2788   ASSERT_TRUE(video_config_1.IsValidConfig());
2789   EXPECT_EQ(video_config_1.natural_size().width(), 320);
2790   EXPECT_EQ(video_config_1.natural_size().height(), 240);
2791
2792   ExpectRead(DemuxerStream::VIDEO, 0);
2793
2794   ReadUntilNotOkOrEndOfStream(DemuxerStream::VIDEO, &status, &last_timestamp);
2795
2796   ASSERT_EQ(status, DemuxerStream::kConfigChanged);
2797   EXPECT_EQ(last_timestamp.InMilliseconds(), 501);
2798
2799   // Fetch the new decoder config.
2800   const VideoDecoderConfig& video_config_2 = video->video_decoder_config();
2801   ASSERT_TRUE(video_config_2.IsValidConfig());
2802   EXPECT_EQ(video_config_2.natural_size().width(), 640);
2803   EXPECT_EQ(video_config_2.natural_size().height(), 360);
2804
2805   ExpectRead(DemuxerStream::VIDEO, 527);
2806
2807   // Read until the next config change.
2808   ReadUntilNotOkOrEndOfStream(DemuxerStream::VIDEO, &status, &last_timestamp);
2809   ASSERT_EQ(status, DemuxerStream::kConfigChanged);
2810   EXPECT_EQ(last_timestamp.InMilliseconds(), 793);
2811
2812   // Get the new config and verify that it matches the first one.
2813   ASSERT_TRUE(video_config_1.Matches(video->video_decoder_config()));
2814
2815   ExpectRead(DemuxerStream::VIDEO, 801);
2816
2817   // Read until the end of the stream just to make sure there aren't any other
2818   // config changes.
2819   ReadUntilNotOkOrEndOfStream(DemuxerStream::VIDEO, &status, &last_timestamp);
2820   ASSERT_EQ(status, DemuxerStream::kOk);
2821 }
2822
2823 TEST_F(ChunkDemuxerTest, ConfigChange_Audio) {
2824   InSequence s;
2825
2826   ASSERT_TRUE(InitDemuxerWithConfigChangeData());
2827
2828   DemuxerStream::Status status;
2829   base::TimeDelta last_timestamp;
2830
2831   DemuxerStream* audio = demuxer_->GetStream(DemuxerStream::AUDIO);
2832
2833   // Fetch initial audio config and verify it matches what we expect.
2834   const AudioDecoderConfig& audio_config_1 = audio->audio_decoder_config();
2835   ASSERT_TRUE(audio_config_1.IsValidConfig());
2836   EXPECT_EQ(audio_config_1.samples_per_second(), 44100);
2837   EXPECT_EQ(audio_config_1.extra_data_size(), 3863u);
2838
2839   ExpectRead(DemuxerStream::AUDIO, 0);
2840
2841   // The first config change seen is from a splice frame representing an overlap
2842   // of buffer from config 1 by buffers from config 2.
2843   ReadUntilNotOkOrEndOfStream(DemuxerStream::AUDIO, &status, &last_timestamp);
2844   ASSERT_EQ(status, DemuxerStream::kConfigChanged);
2845   EXPECT_EQ(last_timestamp.InMilliseconds(), 524);
2846
2847   // Fetch the new decoder config.
2848   const AudioDecoderConfig& audio_config_2 = audio->audio_decoder_config();
2849   ASSERT_TRUE(audio_config_2.IsValidConfig());
2850   EXPECT_EQ(audio_config_2.samples_per_second(), 44100);
2851   EXPECT_EQ(audio_config_2.extra_data_size(), 3935u);
2852
2853   // The next config change is from a splice frame representing an overlap of
2854   // buffers from config 2 by buffers from config 1.
2855   ReadUntilNotOkOrEndOfStream(DemuxerStream::AUDIO, &status, &last_timestamp);
2856   ASSERT_EQ(status, DemuxerStream::kConfigChanged);
2857   EXPECT_EQ(last_timestamp.InMilliseconds(), 782);
2858   ASSERT_TRUE(audio_config_1.Matches(audio->audio_decoder_config()));
2859
2860   // Read until the end of the stream just to make sure there aren't any other
2861   // config changes.
2862   ReadUntilNotOkOrEndOfStream(DemuxerStream::AUDIO, &status, &last_timestamp);
2863   ASSERT_EQ(status, DemuxerStream::kOk);
2864   EXPECT_EQ(last_timestamp.InMilliseconds(), 2744);
2865 }
2866
2867 TEST_F(ChunkDemuxerTest, ConfigChange_Seek) {
2868   InSequence s;
2869
2870   ASSERT_TRUE(InitDemuxerWithConfigChangeData());
2871
2872   DemuxerStream* video = demuxer_->GetStream(DemuxerStream::VIDEO);
2873
2874   // Fetch initial video config and verify it matches what we expect.
2875   const VideoDecoderConfig& video_config_1 = video->video_decoder_config();
2876   ASSERT_TRUE(video_config_1.IsValidConfig());
2877   EXPECT_EQ(video_config_1.natural_size().width(), 320);
2878   EXPECT_EQ(video_config_1.natural_size().height(), 240);
2879
2880   ExpectRead(DemuxerStream::VIDEO, 0);
2881
2882   // Seek to a location with a different config.
2883   Seek(base::TimeDelta::FromMilliseconds(527));
2884
2885   // Verify that the config change is signalled.
2886   ExpectConfigChanged(DemuxerStream::VIDEO);
2887
2888   // Fetch the new decoder config and verify it is what we expect.
2889   const VideoDecoderConfig& video_config_2 = video->video_decoder_config();
2890   ASSERT_TRUE(video_config_2.IsValidConfig());
2891   EXPECT_EQ(video_config_2.natural_size().width(), 640);
2892   EXPECT_EQ(video_config_2.natural_size().height(), 360);
2893
2894   // Verify that Read() will return a buffer now.
2895   ExpectRead(DemuxerStream::VIDEO, 527);
2896
2897   // Seek back to the beginning and verify we get another config change.
2898   Seek(base::TimeDelta::FromMilliseconds(0));
2899   ExpectConfigChanged(DemuxerStream::VIDEO);
2900   ASSERT_TRUE(video_config_1.Matches(video->video_decoder_config()));
2901   ExpectRead(DemuxerStream::VIDEO, 0);
2902
2903   // Seek to a location that requires a config change and then
2904   // seek to a new location that has the same configuration as
2905   // the start of the file without a Read() in the middle.
2906   Seek(base::TimeDelta::FromMilliseconds(527));
2907   Seek(base::TimeDelta::FromMilliseconds(801));
2908
2909   // Verify that no config change is signalled.
2910   ExpectRead(DemuxerStream::VIDEO, 801);
2911   ASSERT_TRUE(video_config_1.Matches(video->video_decoder_config()));
2912 }
2913
2914 TEST_F(ChunkDemuxerTest, TimestampPositiveOffset) {
2915   ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
2916
2917   ASSERT_TRUE(SetTimestampOffset(kSourceId, base::TimeDelta::FromSeconds(30)));
2918   AppendCluster(GenerateCluster(0, 2));
2919
2920   Seek(base::TimeDelta::FromMilliseconds(30000));
2921
2922   GenerateExpectedReads(30000, 2);
2923 }
2924
2925 TEST_F(ChunkDemuxerTest, TimestampNegativeOffset) {
2926   ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
2927
2928   ASSERT_TRUE(SetTimestampOffset(kSourceId, base::TimeDelta::FromSeconds(-1)));
2929   AppendCluster(GenerateCluster(1000, 2));
2930
2931   GenerateExpectedReads(0, 2);
2932 }
2933
2934 TEST_F(ChunkDemuxerTest, TimestampOffsetSeparateStreams) {
2935   std::string audio_id = "audio1";
2936   std::string video_id = "video1";
2937   ASSERT_TRUE(InitDemuxerAudioAndVideoSources(audio_id, video_id));
2938
2939   ASSERT_TRUE(SetTimestampOffset(
2940       audio_id, base::TimeDelta::FromMilliseconds(-2500)));
2941   ASSERT_TRUE(SetTimestampOffset(
2942       video_id, base::TimeDelta::FromMilliseconds(-2500)));
2943   AppendCluster(audio_id, GenerateSingleStreamCluster(2500,
2944       2500 + kAudioBlockDuration * 4, kAudioTrackNum, kAudioBlockDuration));
2945   AppendCluster(video_id, GenerateSingleStreamCluster(2500,
2946       2500 + kVideoBlockDuration * 4, kVideoTrackNum, kVideoBlockDuration));
2947   GenerateAudioStreamExpectedReads(0, 4);
2948   GenerateVideoStreamExpectedReads(0, 4);
2949
2950   Seek(base::TimeDelta::FromMilliseconds(27300));
2951
2952   ASSERT_TRUE(SetTimestampOffset(
2953       audio_id, base::TimeDelta::FromMilliseconds(27300)));
2954   ASSERT_TRUE(SetTimestampOffset(
2955       video_id, base::TimeDelta::FromMilliseconds(27300)));
2956   AppendCluster(audio_id, GenerateSingleStreamCluster(
2957       0, kAudioBlockDuration * 4, kAudioTrackNum, kAudioBlockDuration));
2958   AppendCluster(video_id, GenerateSingleStreamCluster(
2959       0, kVideoBlockDuration * 4, kVideoTrackNum, kVideoBlockDuration));
2960   GenerateVideoStreamExpectedReads(27300, 4);
2961   GenerateAudioStreamExpectedReads(27300, 4);
2962 }
2963
2964 TEST_F(ChunkDemuxerTest, IsParsingMediaSegmentMidMediaSegment) {
2965   ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
2966
2967   scoped_ptr<Cluster> cluster = GenerateCluster(0, 2);
2968   // Append only part of the cluster data.
2969   AppendData(cluster->data(), cluster->size() - 13);
2970
2971   // Confirm we're in the middle of parsing a media segment.
2972   ASSERT_TRUE(demuxer_->IsParsingMediaSegment(kSourceId));
2973
2974   demuxer_->Abort(kSourceId,
2975                   append_window_start_for_next_append_,
2976                   append_window_end_for_next_append_,
2977                   &timestamp_offset_map_[kSourceId]);
2978
2979   // After Abort(), parsing should no longer be in the middle of a media
2980   // segment.
2981   ASSERT_FALSE(demuxer_->IsParsingMediaSegment(kSourceId));
2982 }
2983
2984 #if defined(USE_PROPRIETARY_CODECS)
2985 #if defined(ENABLE_MPEG2TS_STREAM_PARSER)
2986 TEST_F(ChunkDemuxerTest, EmitBuffersDuringAbort) {
2987   EXPECT_CALL(*this, DemuxerOpened());
2988   demuxer_->Initialize(
2989       &host_, CreateInitDoneCB(kInfiniteDuration(), PIPELINE_OK), true);
2990   EXPECT_EQ(ChunkDemuxer::kOk, AddIdForMp2tSource(kSourceId));
2991
2992   // For info:
2993   // DTS/PTS derived using dvbsnoop -s ts -if bear-1280x720.ts -tssubdecode
2994   // Video: first PES:
2995   //        PTS: 126912 (0x0001efc0)  [= 90 kHz-Timestamp: 0:00:01.4101]
2996   //        DTS: 123909 (0x0001e405)  [= 90 kHz-Timestamp: 0:00:01.3767]
2997   // Audio: first PES:
2998   //        PTS: 126000 (0x0001ec30)  [= 90 kHz-Timestamp: 0:00:01.4000]
2999   //        DTS: 123910 (0x0001e406)  [= 90 kHz-Timestamp: 0:00:01.3767]
3000   // Video: last PES:
3001   //        PTS: 370155 (0x0005a5eb)  [= 90 kHz-Timestamp: 0:00:04.1128]
3002   //        DTS: 367152 (0x00059a30)  [= 90 kHz-Timestamp: 0:00:04.0794]
3003   // Audio: last PES:
3004   //        PTS: 353788 (0x000565fc)  [= 90 kHz-Timestamp: 0:00:03.9309]
3005
3006   scoped_refptr<DecoderBuffer> buffer = ReadTestDataFile("bear-1280x720.ts");
3007   AppendData(kSourceId, buffer->data(), buffer->data_size());
3008
3009   // Confirm we're in the middle of parsing a media segment.
3010   ASSERT_TRUE(demuxer_->IsParsingMediaSegment(kSourceId));
3011
3012   // Abort on the Mpeg2 TS parser triggers the emission of the last video
3013   // buffer which is pending in the stream parser.
3014   Ranges<base::TimeDelta> range_before_abort =
3015       demuxer_->GetBufferedRanges(kSourceId);
3016   demuxer_->Abort(kSourceId,
3017                   append_window_start_for_next_append_,
3018                   append_window_end_for_next_append_,
3019                   &timestamp_offset_map_[kSourceId]);
3020   Ranges<base::TimeDelta> range_after_abort =
3021       demuxer_->GetBufferedRanges(kSourceId);
3022
3023   ASSERT_EQ(range_before_abort.size(), 1u);
3024   ASSERT_EQ(range_after_abort.size(), 1u);
3025   EXPECT_EQ(range_after_abort.start(0), range_before_abort.start(0));
3026   EXPECT_GT(range_after_abort.end(0), range_before_abort.end(0));
3027 }
3028
3029 TEST_F(ChunkDemuxerTest, SeekCompleteDuringAbort) {
3030   EXPECT_CALL(*this, DemuxerOpened());
3031   demuxer_->Initialize(
3032       &host_, CreateInitDoneCB(kInfiniteDuration(), PIPELINE_OK), true);
3033   EXPECT_EQ(ChunkDemuxer::kOk, AddIdForMp2tSource(kSourceId));
3034
3035   // For info:
3036   // DTS/PTS derived using dvbsnoop -s ts -if bear-1280x720.ts -tssubdecode
3037   // Video: first PES:
3038   //        PTS: 126912 (0x0001efc0)  [= 90 kHz-Timestamp: 0:00:01.4101]
3039   //        DTS: 123909 (0x0001e405)  [= 90 kHz-Timestamp: 0:00:01.3767]
3040   // Audio: first PES:
3041   //        PTS: 126000 (0x0001ec30)  [= 90 kHz-Timestamp: 0:00:01.4000]
3042   //        DTS: 123910 (0x0001e406)  [= 90 kHz-Timestamp: 0:00:01.3767]
3043   // Video: last PES:
3044   //        PTS: 370155 (0x0005a5eb)  [= 90 kHz-Timestamp: 0:00:04.1128]
3045   //        DTS: 367152 (0x00059a30)  [= 90 kHz-Timestamp: 0:00:04.0794]
3046   // Audio: last PES:
3047   //        PTS: 353788 (0x000565fc)  [= 90 kHz-Timestamp: 0:00:03.9309]
3048
3049   scoped_refptr<DecoderBuffer> buffer = ReadTestDataFile("bear-1280x720.ts");
3050   AppendData(kSourceId, buffer->data(), buffer->data_size());
3051
3052   // Confirm we're in the middle of parsing a media segment.
3053   ASSERT_TRUE(demuxer_->IsParsingMediaSegment(kSourceId));
3054
3055   // Seek to a time corresponding to buffers that will be emitted during the
3056   // abort.
3057   Seek(base::TimeDelta::FromMilliseconds(4110));
3058
3059   // Abort on the Mpeg2 TS parser triggers the emission of the last video
3060   // buffer which is pending in the stream parser.
3061   demuxer_->Abort(kSourceId,
3062                   append_window_start_for_next_append_,
3063                   append_window_end_for_next_append_,
3064                   &timestamp_offset_map_[kSourceId]);
3065 }
3066
3067 #endif
3068 #endif
3069
3070 TEST_F(ChunkDemuxerTest, WebMIsParsingMediaSegmentDetection) {
3071   const uint8 kBuffer[] = {
3072     0x1F, 0x43, 0xB6, 0x75, 0x83,  // CLUSTER (size = 3)
3073     0xE7, 0x81, 0x01,                // Cluster TIMECODE (value = 1)
3074
3075     0x1F, 0x43, 0xB6, 0x75, 0xFF,  // CLUSTER (size = unknown; really 3 due to:)
3076     0xE7, 0x81, 0x02,                // Cluster TIMECODE (value = 2)
3077     /* e.g. put some blocks here... */
3078     0x1A, 0x45, 0xDF, 0xA3, 0x8A,  // EBMLHEADER (size = 10, not fully appended)
3079   };
3080
3081   // This array indicates expected return value of IsParsingMediaSegment()
3082   // following each incrementally appended byte in |kBuffer|.
3083   const bool kExpectedReturnValues[] = {
3084     false, false, false, false, true,
3085     true, true, false,
3086
3087     false, false, false, false, true,
3088     true, true, true,
3089
3090     true, true, true, true, false,
3091   };
3092
3093   COMPILE_ASSERT(arraysize(kBuffer) == arraysize(kExpectedReturnValues),
3094       test_arrays_out_of_sync);
3095   COMPILE_ASSERT(arraysize(kBuffer) == sizeof(kBuffer), not_one_byte_per_index);
3096
3097   ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
3098
3099   for (size_t i = 0; i < sizeof(kBuffer); i++) {
3100     DVLOG(3) << "Appending and testing index " << i;
3101     AppendData(kBuffer + i, 1);
3102     bool expected_return_value = kExpectedReturnValues[i];
3103     EXPECT_EQ(expected_return_value,
3104               demuxer_->IsParsingMediaSegment(kSourceId));
3105   }
3106 }
3107
3108 TEST_F(ChunkDemuxerTest, DurationChange) {
3109   ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
3110   const int kStreamDuration = kDefaultDuration().InMilliseconds();
3111
3112   // Add data leading up to the currently set duration.
3113   AppendCluster(GenerateCluster(kStreamDuration - kAudioBlockDuration,
3114                                 kStreamDuration - kVideoBlockDuration,
3115                                 2));
3116
3117   CheckExpectedRanges(kSourceId, "{ [201191,201224) }");
3118
3119   // Add data beginning at the currently set duration and expect a new duration
3120   // to be signaled. Note that the last video block will have a higher end
3121   // timestamp than the last audio block.
3122   const int kNewStreamDurationVideo = kStreamDuration + kVideoBlockDuration;
3123   EXPECT_CALL(host_, SetDuration(
3124       base::TimeDelta::FromMilliseconds(kNewStreamDurationVideo)));
3125   AppendCluster(GenerateCluster(kDefaultDuration().InMilliseconds(), 2));
3126
3127   CheckExpectedRanges(kSourceId, "{ [201191,201247) }");
3128
3129   // Add more data to the end of each media type. Note that the last audio block
3130   // will have a higher end timestamp than the last video block.
3131   const int kFinalStreamDuration = kStreamDuration + kAudioBlockDuration * 3;
3132   EXPECT_CALL(host_, SetDuration(
3133       base::TimeDelta::FromMilliseconds(kFinalStreamDuration)));
3134   AppendCluster(GenerateCluster(kStreamDuration + kAudioBlockDuration,
3135                                 kStreamDuration + kVideoBlockDuration,
3136                                 3));
3137
3138   // See that the range has increased appropriately (but not to the full
3139   // duration of 201293, since there is not enough video appended for that).
3140   CheckExpectedRanges(kSourceId, "{ [201191,201290) }");
3141 }
3142
3143 TEST_F(ChunkDemuxerTest, DurationChangeTimestampOffset) {
3144   ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
3145   ASSERT_TRUE(SetTimestampOffset(kSourceId, kDefaultDuration()));
3146   EXPECT_CALL(host_, SetDuration(
3147       kDefaultDuration() + base::TimeDelta::FromMilliseconds(
3148           kVideoBlockDuration * 2)));
3149   AppendCluster(GenerateCluster(0, 4));
3150 }
3151
3152 TEST_F(ChunkDemuxerTest, EndOfStreamTruncateDuration) {
3153   ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
3154
3155   AppendCluster(kDefaultFirstCluster());
3156
3157   EXPECT_CALL(host_, SetDuration(
3158       base::TimeDelta::FromMilliseconds(kDefaultFirstClusterEndTimestamp)));
3159   MarkEndOfStream(PIPELINE_OK);
3160 }
3161
3162
3163 TEST_F(ChunkDemuxerTest, ZeroLengthAppend) {
3164   ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
3165   AppendData(NULL, 0);
3166 }
3167
3168 TEST_F(ChunkDemuxerTest, AppendAfterEndOfStream) {
3169   ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
3170
3171   EXPECT_CALL(host_, SetDuration(_))
3172       .Times(AnyNumber());
3173
3174   AppendCluster(kDefaultFirstCluster());
3175   MarkEndOfStream(PIPELINE_OK);
3176
3177   demuxer_->UnmarkEndOfStream();
3178
3179   AppendCluster(kDefaultSecondCluster());
3180   MarkEndOfStream(PIPELINE_OK);
3181 }
3182
3183 // Test receiving a Shutdown() call before we get an Initialize()
3184 // call. This can happen if video element gets destroyed before
3185 // the pipeline has a chance to initialize the demuxer.
3186 TEST_F(ChunkDemuxerTest, Shutdown_BeforeInitialize) {
3187   demuxer_->Shutdown();
3188   demuxer_->Initialize(
3189       &host_, CreateInitDoneCB(DEMUXER_ERROR_COULD_NOT_OPEN), true);
3190   message_loop_.RunUntilIdle();
3191 }
3192
3193 // Verifies that signaling end of stream while stalled at a gap
3194 // boundary does not trigger end of stream buffers to be returned.
3195 TEST_F(ChunkDemuxerTest, EndOfStreamWhileWaitingForGapToBeFilled) {
3196   ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
3197
3198   AppendCluster(0, 10);
3199   AppendCluster(300, 10);
3200   CheckExpectedRanges(kSourceId, "{ [0,132) [300,432) }");
3201
3202   GenerateExpectedReads(0, 10);
3203
3204   bool audio_read_done = false;
3205   bool video_read_done = false;
3206   ReadAudio(base::Bind(&OnReadDone,
3207                        base::TimeDelta::FromMilliseconds(138),
3208                        &audio_read_done));
3209   ReadVideo(base::Bind(&OnReadDone,
3210                        base::TimeDelta::FromMilliseconds(138),
3211                        &video_read_done));
3212
3213   // Verify that the reads didn't complete
3214   EXPECT_FALSE(audio_read_done);
3215   EXPECT_FALSE(video_read_done);
3216
3217   EXPECT_CALL(host_, SetDuration(base::TimeDelta::FromMilliseconds(438)));
3218   MarkEndOfStream(PIPELINE_OK);
3219
3220   // Verify that the reads still haven't completed.
3221   EXPECT_FALSE(audio_read_done);
3222   EXPECT_FALSE(video_read_done);
3223
3224   demuxer_->UnmarkEndOfStream();
3225
3226   AppendCluster(138, 22);
3227
3228   message_loop_.RunUntilIdle();
3229
3230   CheckExpectedRanges(kSourceId, "{ [0,435) }");
3231
3232   // Verify that the reads have completed.
3233   EXPECT_TRUE(audio_read_done);
3234   EXPECT_TRUE(video_read_done);
3235
3236   // Read the rest of the buffers.
3237   GenerateExpectedReads(161, 171, 20);
3238
3239   // Verify that reads block because the append cleared the end of stream state.
3240   audio_read_done = false;
3241   video_read_done = false;
3242   ReadAudio(base::Bind(&OnReadDone_EOSExpected,
3243                        &audio_read_done));
3244   ReadVideo(base::Bind(&OnReadDone_EOSExpected,
3245                        &video_read_done));
3246
3247   // Verify that the reads don't complete.
3248   EXPECT_FALSE(audio_read_done);
3249   EXPECT_FALSE(video_read_done);
3250
3251   EXPECT_CALL(host_, SetDuration(base::TimeDelta::FromMilliseconds(437)));
3252   MarkEndOfStream(PIPELINE_OK);
3253
3254   EXPECT_TRUE(audio_read_done);
3255   EXPECT_TRUE(video_read_done);
3256 }
3257
3258 TEST_F(ChunkDemuxerTest, CanceledSeekDuringInitialPreroll) {
3259   ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
3260
3261   // Cancel preroll.
3262   base::TimeDelta seek_time = base::TimeDelta::FromMilliseconds(200);
3263   demuxer_->CancelPendingSeek(seek_time);
3264
3265   // Initiate the seek to the new location.
3266   Seek(seek_time);
3267
3268   // Append data to satisfy the seek.
3269   AppendCluster(seek_time.InMilliseconds(), 10);
3270 }
3271
3272 TEST_F(ChunkDemuxerTest, GCDuringSeek) {
3273   ASSERT_TRUE(InitDemuxer(HAS_AUDIO));
3274
3275   demuxer_->SetMemoryLimitsForTesting(5 * kBlockSize);
3276
3277   base::TimeDelta seek_time1 = base::TimeDelta::FromMilliseconds(1000);
3278   base::TimeDelta seek_time2 = base::TimeDelta::FromMilliseconds(500);
3279
3280   // Initiate a seek to |seek_time1|.
3281   Seek(seek_time1);
3282
3283   // Append data to satisfy the first seek request.
3284   AppendSingleStreamCluster(kSourceId, kAudioTrackNum,
3285                             seek_time1.InMilliseconds(), 5);
3286   CheckExpectedRanges(kSourceId, "{ [1000,1115) }");
3287
3288   // Signal that the second seek is starting.
3289   demuxer_->StartWaitingForSeek(seek_time2);
3290
3291   // Append data to satisfy the second seek. This append triggers
3292   // the garbage collection logic since we set the memory limit to
3293   // 5 blocks.
3294   AppendSingleStreamCluster(kSourceId, kAudioTrackNum,
3295                             seek_time2.InMilliseconds(), 5);
3296
3297   // Verify that the buffers that cover |seek_time2| do not get
3298   // garbage collected.
3299   CheckExpectedRanges(kSourceId, "{ [500,615) }");
3300
3301   // Complete the seek.
3302   demuxer_->Seek(seek_time2, NewExpectedStatusCB(PIPELINE_OK));
3303
3304
3305   // Append more data and make sure that the blocks for |seek_time2|
3306   // don't get removed.
3307   //
3308   // NOTE: The current GC algorithm tries to preserve the GOP at the
3309   //  current position as well as the last appended GOP. This is
3310   //  why there are 2 ranges in the expectations.
3311   AppendSingleStreamCluster(kSourceId, kAudioTrackNum, 700, 5);
3312   CheckExpectedRanges(kSourceId, "{ [500,592) [792,815) }");
3313 }
3314
3315 TEST_F(ChunkDemuxerTest, AppendWindow_Video) {
3316   ASSERT_TRUE(InitDemuxer(HAS_VIDEO));
3317   DemuxerStream* stream = demuxer_->GetStream(DemuxerStream::VIDEO);
3318
3319   // Set the append window to [50,280).
3320   append_window_start_for_next_append_ = base::TimeDelta::FromMilliseconds(50);
3321   append_window_end_for_next_append_ = base::TimeDelta::FromMilliseconds(280);
3322
3323   // Append a cluster that starts before and ends after the append window.
3324   AppendSingleStreamCluster(kSourceId, kVideoTrackNum,
3325                             "0K 30 60 90 120K 150 180 210 240K 270 300 330K");
3326
3327   // Verify that GOPs that start outside the window are not included
3328   // in the buffer. Also verify that buffers that start inside the
3329   // window and extend beyond the end of the window are not included.
3330   CheckExpectedRanges(kSourceId, "{ [120,270) }");
3331   CheckExpectedBuffers(stream, "120 150 180 210 240");
3332
3333   // Extend the append window to [50,650).
3334   append_window_end_for_next_append_ = base::TimeDelta::FromMilliseconds(650);
3335
3336   // Append more data and verify that adding buffers start at the next
3337   // keyframe.
3338   AppendSingleStreamCluster(kSourceId, kVideoTrackNum,
3339                             "360 390 420K 450 480 510 540K 570 600 630K");
3340   CheckExpectedRanges(kSourceId, "{ [120,270) [420,630) }");
3341 }
3342
3343 TEST_F(ChunkDemuxerTest, AppendWindow_Audio) {
3344   ASSERT_TRUE(InitDemuxer(HAS_AUDIO));
3345   DemuxerStream* stream = demuxer_->GetStream(DemuxerStream::AUDIO);
3346
3347   // Set the append window to [50,280).
3348   append_window_start_for_next_append_ = base::TimeDelta::FromMilliseconds(50);
3349   append_window_end_for_next_append_ = base::TimeDelta::FromMilliseconds(280);
3350
3351   // Append a cluster that starts before and ends after the append window.
3352   AppendSingleStreamCluster(
3353       kSourceId, kAudioTrackNum,
3354       "0K 30K 60K 90K 120K 150K 180K 210K 240K 270K 300K 330K");
3355
3356   // Verify that frames that end outside the window are not included
3357   // in the buffer. Also verify that buffers that start inside the
3358   // window and extend beyond the end of the window are not included.
3359   //
3360   // The first 50ms of the range should be truncated since it overlaps
3361   // the start of the append window.
3362   CheckExpectedRanges(kSourceId, "{ [50,280) }");
3363
3364   // The "50P" buffer is the "0" buffer marked for complete discard.  The next
3365   // "50" buffer is the "30" buffer marked with 20ms of start discard.
3366   CheckExpectedBuffers(stream, "50P 50 60 90 120 150 180 210 240");
3367
3368   // Extend the append window to [50,650).
3369   append_window_end_for_next_append_ = base::TimeDelta::FromMilliseconds(650);
3370
3371   // Append more data and verify that a new range is created.
3372   AppendSingleStreamCluster(
3373       kSourceId, kAudioTrackNum,
3374       "360K 390K 420K 450K 480K 510K 540K 570K 600K 630K");
3375   CheckExpectedRanges(kSourceId, "{ [50,280) [360,650) }");
3376 }
3377
3378 TEST_F(ChunkDemuxerTest, AppendWindow_AudioOverlapStartAndEnd) {
3379   ASSERT_TRUE(InitDemuxer(HAS_AUDIO));
3380
3381   // Set the append window to [10,20).
3382   append_window_start_for_next_append_ = base::TimeDelta::FromMilliseconds(10);
3383   append_window_end_for_next_append_ = base::TimeDelta::FromMilliseconds(20);
3384
3385   // Append a cluster that starts before and ends after the append window.
3386   AppendSingleStreamCluster(kSourceId, kAudioTrackNum, "0K");
3387
3388   // Verify the append is clipped to the append window.
3389   CheckExpectedRanges(kSourceId, "{ [10,20) }");
3390 }
3391
3392 TEST_F(ChunkDemuxerTest, AppendWindow_WebMFile_AudioOnly) {
3393   EXPECT_CALL(*this, DemuxerOpened());
3394   demuxer_->Initialize(
3395       &host_,
3396       CreateInitDoneCB(base::TimeDelta::FromMilliseconds(2744), PIPELINE_OK),
3397       true);
3398   ASSERT_EQ(ChunkDemuxer::kOk, AddId(kSourceId, HAS_AUDIO));
3399
3400   // Set the append window to [50,150).
3401   append_window_start_for_next_append_ = base::TimeDelta::FromMilliseconds(50);
3402   append_window_end_for_next_append_ = base::TimeDelta::FromMilliseconds(150);
3403
3404   // Read a WebM file into memory and send the data to the demuxer.  The chunk
3405   // size has been chosen carefully to ensure the preroll buffer used by the
3406   // partial append window trim must come from a previous Append() call.
3407   scoped_refptr<DecoderBuffer> buffer =
3408       ReadTestDataFile("bear-320x240-audio-only.webm");
3409   AppendDataInPieces(buffer->data(), buffer->data_size(), 128);
3410
3411   DemuxerStream* stream = demuxer_->GetStream(DemuxerStream::AUDIO);
3412   CheckExpectedBuffers(stream, "50P 50 62 86 109 122 125 128");
3413 }
3414
3415 TEST_F(ChunkDemuxerTest, AppendWindow_AudioConfigUpdateRemovesPreroll) {
3416   EXPECT_CALL(*this, DemuxerOpened());
3417   demuxer_->Initialize(
3418       &host_,
3419       CreateInitDoneCB(base::TimeDelta::FromMilliseconds(2744), PIPELINE_OK),
3420       true);
3421   ASSERT_EQ(ChunkDemuxer::kOk, AddId(kSourceId, HAS_AUDIO));
3422
3423   // Set the append window such that the first file is completely before the
3424   // append window.
3425   // TODO(wolenetz/acolwell): Update this duration once the files are fixed to
3426   // have the correct duration in their init segments, and the
3427   // CreateInitDoneCB() call, above, is fixed to used that duration. See
3428   // http://crbug.com/354284.
3429   const base::TimeDelta duration_1 = base::TimeDelta::FromMilliseconds(2746);
3430   append_window_start_for_next_append_ = duration_1;
3431
3432   // Read a WebM file into memory and append the data.
3433   scoped_refptr<DecoderBuffer> buffer =
3434       ReadTestDataFile("bear-320x240-audio-only.webm");
3435   AppendDataInPieces(buffer->data(), buffer->data_size(), 512);
3436   CheckExpectedRanges(kSourceId, "{ }");
3437
3438   DemuxerStream* stream = demuxer_->GetStream(DemuxerStream::AUDIO);
3439   AudioDecoderConfig config_1 = stream->audio_decoder_config();
3440
3441   // Read a second WebM with a different config in and append the data.
3442   scoped_refptr<DecoderBuffer> buffer2 =
3443       ReadTestDataFile("bear-320x240-audio-only-48khz.webm");
3444   EXPECT_CALL(host_, SetDuration(_)).Times(AnyNumber());
3445   ASSERT_TRUE(SetTimestampOffset(kSourceId, duration_1));
3446   AppendDataInPieces(buffer2->data(), buffer2->data_size(), 512);
3447   CheckExpectedRanges(kSourceId, "{ [2746,5519) }");
3448
3449   Seek(duration_1);
3450   ExpectConfigChanged(DemuxerStream::AUDIO);
3451   ASSERT_FALSE(config_1.Matches(stream->audio_decoder_config()));
3452   CheckExpectedBuffers(stream, "2746 2767 2789 2810");
3453 }
3454
3455 TEST_F(ChunkDemuxerTest, AppendWindow_Text) {
3456   DemuxerStream* text_stream = NULL;
3457   EXPECT_CALL(host_, AddTextStream(_, _))
3458       .WillOnce(SaveArg<0>(&text_stream));
3459   ASSERT_TRUE(InitDemuxer(HAS_VIDEO | HAS_TEXT));
3460   DemuxerStream* video_stream = demuxer_->GetStream(DemuxerStream::VIDEO);
3461
3462   // Set the append window to [20,280).
3463   append_window_start_for_next_append_ = base::TimeDelta::FromMilliseconds(20);
3464   append_window_end_for_next_append_ = base::TimeDelta::FromMilliseconds(280);
3465
3466   // Append a cluster that starts before and ends after the append
3467   // window.
3468   AppendMuxedCluster(
3469       MuxedStreamInfo(kVideoTrackNum,
3470                       "0K 30 60 90 120K 150 180 210 240K 270 300 330K"),
3471       MuxedStreamInfo(kTextTrackNum, "0K 100K 200K 300K" ));
3472
3473   // Verify that text cues that start outside the window are not included
3474   // in the buffer. Also verify that cues that extend beyond the
3475   // window are not included.
3476   CheckExpectedRanges(kSourceId, "{ [100,270) }");
3477   CheckExpectedBuffers(video_stream, "120 150 180 210 240");
3478   CheckExpectedBuffers(text_stream, "100");
3479
3480   // Extend the append window to [20,650).
3481   append_window_end_for_next_append_ = base::TimeDelta::FromMilliseconds(650);
3482
3483   // Append more data and verify that a new range is created.
3484   AppendMuxedCluster(
3485       MuxedStreamInfo(kVideoTrackNum,
3486                       "360 390 420K 450 480 510 540K 570 600 630K"),
3487       MuxedStreamInfo(kTextTrackNum, "400K 500K 600K 700K" ));
3488   CheckExpectedRanges(kSourceId, "{ [100,270) [400,630) }");
3489
3490   // Seek to the new range and verify that the expected buffers are returned.
3491   Seek(base::TimeDelta::FromMilliseconds(420));
3492   CheckExpectedBuffers(video_stream, "420 450 480 510 540 570 600");
3493   CheckExpectedBuffers(text_stream, "400 500");
3494 }
3495
3496 TEST_F(ChunkDemuxerTest, StartWaitingForSeekAfterParseError) {
3497   ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
3498   EXPECT_CALL(host_, OnDemuxerError(PIPELINE_ERROR_DECODE));
3499   AppendGarbage();
3500   base::TimeDelta seek_time = base::TimeDelta::FromSeconds(50);
3501   demuxer_->StartWaitingForSeek(seek_time);
3502 }
3503
3504 TEST_F(ChunkDemuxerTest, Remove_AudioVideoText) {
3505   DemuxerStream* text_stream = NULL;
3506   EXPECT_CALL(host_, AddTextStream(_, _))
3507       .WillOnce(SaveArg<0>(&text_stream));
3508   ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO | HAS_TEXT));
3509
3510   DemuxerStream* audio_stream = demuxer_->GetStream(DemuxerStream::AUDIO);
3511   DemuxerStream* video_stream = demuxer_->GetStream(DemuxerStream::VIDEO);
3512
3513   AppendMuxedCluster(
3514       MuxedStreamInfo(kAudioTrackNum, "0K 20K 40K 60K 80K 100K 120K 140K"),
3515       MuxedStreamInfo(kVideoTrackNum, "0K 30 60 90 120K 150 180"),
3516       MuxedStreamInfo(kTextTrackNum, "0K 100K 200K"));
3517
3518   CheckExpectedBuffers(audio_stream, "0 20 40 60 80 100 120 140");
3519   CheckExpectedBuffers(video_stream, "0 30 60 90 120 150 180");
3520   CheckExpectedBuffers(text_stream, "0 100 200");
3521
3522   // Remove the buffers that were added.
3523   demuxer_->Remove(kSourceId, base::TimeDelta(),
3524                    base::TimeDelta::FromMilliseconds(300));
3525
3526   // Verify that all the appended data has been removed.
3527   CheckExpectedRanges(kSourceId, "{ }");
3528
3529   // Append new buffers that are clearly different than the original
3530   // ones and verify that only the new buffers are returned.
3531   AppendMuxedCluster(
3532       MuxedStreamInfo(kAudioTrackNum, "1K 21K 41K 61K 81K 101K 121K 141K"),
3533       MuxedStreamInfo(kVideoTrackNum, "1K 31 61 91 121K 151 181"),
3534       MuxedStreamInfo(kTextTrackNum, "1K 101K 201K"));
3535
3536   Seek(base::TimeDelta());
3537   CheckExpectedBuffers(audio_stream, "1 21 41 61 81 101 121 141");
3538   CheckExpectedBuffers(video_stream, "1 31 61 91 121 151 181");
3539   CheckExpectedBuffers(text_stream, "1 101 201");
3540 }
3541
3542 TEST_F(ChunkDemuxerTest, Remove_StartAtDuration) {
3543   ASSERT_TRUE(InitDemuxer(HAS_AUDIO));
3544   DemuxerStream* audio_stream = demuxer_->GetStream(DemuxerStream::AUDIO);
3545
3546   // Set the duration to something small so that the append that
3547   // follows updates the duration to reflect the end of the appended data.
3548   EXPECT_CALL(host_, SetDuration(
3549       base::TimeDelta::FromMilliseconds(1)));
3550   demuxer_->SetDuration(0.001);
3551
3552   EXPECT_CALL(host_, SetDuration(
3553       base::TimeDelta::FromMilliseconds(160)));
3554   AppendSingleStreamCluster(kSourceId, kAudioTrackNum,
3555                             "0K 20K 40K 60K 80K 100K 120K 140K");
3556
3557   CheckExpectedRanges(kSourceId, "{ [0,160) }");
3558   CheckExpectedBuffers(audio_stream, "0 20 40 60 80 100 120 140");
3559
3560   demuxer_->Remove(kSourceId,
3561                    base::TimeDelta::FromSecondsD(demuxer_->GetDuration()),
3562                    kInfiniteDuration());
3563
3564   Seek(base::TimeDelta());
3565   CheckExpectedRanges(kSourceId, "{ [0,160) }");
3566   CheckExpectedBuffers(audio_stream, "0 20 40 60 80 100 120 140");
3567 }
3568
3569 // Verifies that a Seek() will complete without text cues for
3570 // the seek point and will return cues after the seek position
3571 // when they are eventually appended.
3572 TEST_F(ChunkDemuxerTest, SeekCompletesWithoutTextCues) {
3573   DemuxerStream* text_stream = NULL;
3574   EXPECT_CALL(host_, AddTextStream(_, _))
3575       .WillOnce(SaveArg<0>(&text_stream));
3576   ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO | HAS_TEXT));
3577
3578   DemuxerStream* audio_stream = demuxer_->GetStream(DemuxerStream::AUDIO);
3579   DemuxerStream* video_stream = demuxer_->GetStream(DemuxerStream::VIDEO);
3580
3581   base::TimeDelta seek_time = base::TimeDelta::FromMilliseconds(120);
3582   bool seek_cb_was_called = false;
3583   demuxer_->StartWaitingForSeek(seek_time);
3584   demuxer_->Seek(seek_time,
3585                  base::Bind(OnSeekDone_OKExpected, &seek_cb_was_called));
3586   message_loop_.RunUntilIdle();
3587
3588   EXPECT_FALSE(seek_cb_was_called);
3589
3590   bool text_read_done = false;
3591   text_stream->Read(base::Bind(&OnReadDone,
3592                                base::TimeDelta::FromMilliseconds(225),
3593                                &text_read_done));
3594
3595   // Append audio & video data so the seek completes.
3596   AppendMuxedCluster(
3597       MuxedStreamInfo(kAudioTrackNum,
3598                       "0K 20K 40K 60K 80K 100K 120K 140K 160K 180K 200K"),
3599       MuxedStreamInfo(kVideoTrackNum, "0K 30 60 90 120K 150 180 210"));
3600
3601   message_loop_.RunUntilIdle();
3602   EXPECT_TRUE(seek_cb_was_called);
3603   EXPECT_FALSE(text_read_done);
3604
3605   // Read some audio & video buffers to further verify seek completion.
3606   CheckExpectedBuffers(audio_stream, "120 140");
3607   CheckExpectedBuffers(video_stream, "120 150");
3608
3609   EXPECT_FALSE(text_read_done);
3610
3611   // Append text cues that start after the seek point and verify that
3612   // they are returned by Read() calls.
3613   AppendMuxedCluster(
3614       MuxedStreamInfo(kAudioTrackNum, "220K 240K 260K 280K"),
3615       MuxedStreamInfo(kVideoTrackNum, "240K 270 300 330"),
3616       MuxedStreamInfo(kTextTrackNum, "225K 275K 325K"));
3617
3618   message_loop_.RunUntilIdle();
3619   EXPECT_TRUE(text_read_done);
3620
3621   // NOTE: we start at 275 here because the buffer at 225 was returned
3622   // to the pending read initiated above.
3623   CheckExpectedBuffers(text_stream, "275 325");
3624
3625   // Verify that audio & video streams continue to return expected values.
3626   CheckExpectedBuffers(audio_stream, "160 180");
3627   CheckExpectedBuffers(video_stream, "180 210");
3628 }
3629
3630 TEST_F(ChunkDemuxerTest, ClusterWithUnknownSize) {
3631   ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
3632
3633   AppendCluster(GenerateCluster(0, 0, 4, true));
3634   CheckExpectedRanges(kSourceId, "{ [0,46) }");
3635
3636   // A new cluster indicates end of the previous cluster with unknown size.
3637   AppendCluster(GenerateCluster(46, 66, 5, true));
3638   CheckExpectedRanges(kSourceId, "{ [0,115) }");
3639 }
3640
3641 TEST_F(ChunkDemuxerTest, CuesBetweenClustersWithUnknownSize) {
3642   ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
3643
3644   // Add two clusters separated by Cues in a single Append() call.
3645   scoped_ptr<Cluster> cluster = GenerateCluster(0, 0, 4, true);
3646   std::vector<uint8> data(cluster->data(), cluster->data() + cluster->size());
3647   data.insert(data.end(), kCuesHeader, kCuesHeader + sizeof(kCuesHeader));
3648   cluster = GenerateCluster(46, 66, 5, true);
3649   data.insert(data.end(), cluster->data(), cluster->data() + cluster->size());
3650   AppendData(&*data.begin(), data.size());
3651
3652   CheckExpectedRanges(kSourceId, "{ [0,115) }");
3653 }
3654
3655 TEST_F(ChunkDemuxerTest, CuesBetweenClusters) {
3656   ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
3657
3658   AppendCluster(GenerateCluster(0, 0, 4));
3659   AppendData(kCuesHeader, sizeof(kCuesHeader));
3660   AppendCluster(GenerateCluster(46, 66, 5));
3661   CheckExpectedRanges(kSourceId, "{ [0,115) }");
3662 }
3663
3664 }  // namespace media