Revert "[M120 Migration]Fix for crash during chrome exit"
[platform/framework/web/chromium-efl.git] / media / filters / ffmpeg_demuxer.cc
1 // Copyright 2012 The Chromium Authors
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "media/filters/ffmpeg_demuxer.h"
6
7 #include <algorithm>
8 #include <memory>
9 #include <set>
10 #include <utility>
11
12 #include "base/base64.h"
13 #include "base/feature_list.h"
14 #include "base/functional/bind.h"
15 #include "base/functional/callback_helpers.h"
16 #include "base/memory/ptr_util.h"
17 #include "base/metrics/histogram_functions.h"
18 #include "base/metrics/histogram_macros.h"
19 #include "base/numerics/safe_conversions.h"
20 #include "base/strings/string_number_conversions.h"
21 #include "base/strings/string_util.h"
22 #include "base/sys_byteorder.h"
23 #include "base/task/bind_post_task.h"
24 #include "base/task/sequenced_task_runner.h"
25 #include "base/task/thread_pool.h"
26 #include "base/time/time.h"
27 #include "base/trace_event/trace_event.h"
28 #include "build/build_config.h"
29 #include "media/base/decrypt_config.h"
30 #include "media/base/demuxer.h"
31 #include "media/base/demuxer_memory_limit.h"
32 #include "media/base/limits.h"
33 #include "media/base/media_switches.h"
34 #include "media/base/media_tracks.h"
35 #include "media/base/media_types.h"
36 #include "media/base/sample_rates.h"
37 #include "media/base/supported_types.h"
38 #include "media/base/timestamp_constants.h"
39 #include "media/base/video_codecs.h"
40 #include "media/base/webvtt_util.h"
41 #include "media/ffmpeg/ffmpeg_common.h"
42 #include "media/filters/ffmpeg_aac_bitstream_converter.h"
43 #include "media/filters/ffmpeg_bitstream_converter.h"
44 #include "media/filters/ffmpeg_glue.h"
45 #include "media/filters/ffmpeg_h264_to_annex_b_bitstream_converter.h"
46 #include "media/formats/mpeg/mpeg1_audio_stream_parser.h"
47 #include "media/formats/webm/webm_crypto_helpers.h"
48 #include "media/media_buildflags.h"
49 #include "third_party/ffmpeg/ffmpeg_features.h"
50 #include "third_party/ffmpeg/libavcodec/packet.h"
51
52 #if BUILDFLAG(ENABLE_PLATFORM_HEVC)
53 #include "media/filters/ffmpeg_h265_to_annex_b_bitstream_converter.h"
54 #endif
55
56 namespace media {
57
58 namespace {
59
60 constexpr int64_t kInvalidPTSMarker = static_cast<int64_t>(0x8000000000000000);
61
62 void SetAVStreamDiscard(AVStream* stream, AVDiscard discard) {
63   DCHECK(stream);
64   stream->discard = discard;
65 }
66
67 int AVSeekFrame(AVFormatContext* s, int stream_index, int64_t timestamp) {
68   // Seek to a timestamp <= to the desired timestamp.
69   int result = av_seek_frame(s, stream_index, timestamp, AVSEEK_FLAG_BACKWARD);
70   if (result >= 0) {
71     return result;
72   }
73
74   // Seek to the nearest keyframe, wherever that may be.
75   return av_seek_frame(s, stream_index, timestamp, 0);
76 }
77
78 }  // namespace
79
80 static base::Time ExtractTimelineOffset(
81     container_names::MediaContainerName container,
82     const AVFormatContext* format_context) {
83   if (container == container_names::MediaContainerName::kContainerWEBM) {
84     const AVDictionaryEntry* entry =
85         av_dict_get(format_context->metadata, "creation_time", nullptr, 0);
86
87     base::Time timeline_offset;
88
89     // FFmpegDemuxerTests assume base::Time::FromUTCString() is used here.
90     if (entry != nullptr && entry->value != nullptr &&
91         base::Time::FromUTCString(entry->value, &timeline_offset)) {
92       return timeline_offset;
93     }
94   }
95
96   return base::Time();
97 }
98
99 static base::TimeDelta FramesToTimeDelta(int frames, double sample_rate) {
100   return base::Microseconds(frames * base::Time::kMicrosecondsPerSecond /
101                             sample_rate);
102 }
103
104 static base::TimeDelta ExtractStartTime(AVStream* stream) {
105   // The default start time is zero.
106   base::TimeDelta start_time;
107
108   // First try to use  the |start_time| value as is.
109   if (stream->start_time != kNoFFmpegTimestamp)
110     start_time = ConvertFromTimeBase(stream->time_base, stream->start_time);
111
112   // Next try to use the first DTS value, for codecs where we know PTS == DTS
113   // (excludes all H26x codecs). The start time must be returned in PTS.
114   if (av_stream_get_first_dts(stream) != kNoFFmpegTimestamp &&
115       stream->codecpar->codec_id != AV_CODEC_ID_HEVC &&
116       stream->codecpar->codec_id != AV_CODEC_ID_H264 &&
117       stream->codecpar->codec_id != AV_CODEC_ID_MPEG4) {
118     const base::TimeDelta first_pts =
119         ConvertFromTimeBase(stream->time_base, av_stream_get_first_dts(stream));
120     if (first_pts < start_time)
121       start_time = first_pts;
122   }
123
124   return start_time;
125 }
126
127 // Record audio decoder config UMA stats corresponding to a src= playback.
128 static void RecordAudioCodecStats(const AudioDecoderConfig& audio_config) {
129   base::UmaHistogramEnumeration("Media.AudioCodec", audio_config.codec());
130 }
131
132 // Record video decoder config UMA stats corresponding to a src= playback.
133 static void RecordVideoCodecStats(container_names::MediaContainerName container,
134                                   const VideoDecoderConfig& video_config,
135                                   AVColorRange color_range,
136                                   MediaLog* media_log) {
137   // TODO(xhwang): Fix these misleading metric names. They should be something
138   // like "Media.SRC.Xxxx". See http://crbug.com/716183.
139   base::UmaHistogramEnumeration("Media.VideoCodec", video_config.codec());
140   if (container == container_names::MediaContainerName::kContainerMOV) {
141     base::UmaHistogramEnumeration("Media.SRC.VideoCodec.MP4",
142                                   video_config.codec());
143   } else if (container == container_names::MediaContainerName::kContainerWEBM) {
144     base::UmaHistogramEnumeration("Media.SRC.VideoCodec.WebM",
145                                   video_config.codec());
146   }
147 }
148
149 static const char kCodecNone[] = "none";
150
151 static const char* GetCodecName(enum AVCodecID id) {
152   const AVCodecDescriptor* codec_descriptor = avcodec_descriptor_get(id);
153   // If the codec name can't be determined, return none for tracking.
154   return codec_descriptor ? codec_descriptor->name : kCodecNone;
155 }
156
157 static base::Value GetTimeValue(base::TimeDelta value) {
158   if (value == kInfiniteDuration)
159     return base::Value("kInfiniteDuration");
160   if (value == kNoTimestamp)
161     return base::Value("kNoTimestamp");
162   return base::Value(value.InSecondsF());
163 }
164
165 template <>
166 struct MediaLogPropertyTypeSupport<MediaLogProperty::kMaxDuration,
167                                    base::TimeDelta> {
168   static base::Value Convert(base::TimeDelta t) { return GetTimeValue(t); }
169 };
170
171 template <>
172 struct MediaLogPropertyTypeSupport<MediaLogProperty::kStartTime,
173                                    base::TimeDelta> {
174   static base::Value Convert(base::TimeDelta t) { return GetTimeValue(t); }
175 };
176
177 static int ReadFrameAndDiscardEmpty(AVFormatContext* context,
178                                     AVPacket* packet) {
179   // Skip empty packets in a tight loop to avoid timing out fuzzers.
180   int result;
181   bool drop_packet;
182   do {
183     result = av_read_frame(context, packet);
184     drop_packet = (!packet->data || !packet->size) && result >= 0;
185     if (drop_packet) {
186       av_packet_unref(packet);
187       DLOG(WARNING) << "Dropping empty packet, size: " << packet->size
188                     << ", data: " << static_cast<void*>(packet->data);
189     }
190   } while (drop_packet);
191
192   return result;
193 }
194
195 std::unique_ptr<FFmpegDemuxerStream> FFmpegDemuxerStream::Create(
196     FFmpegDemuxer* demuxer,
197     AVStream* stream,
198     MediaLog* media_log) {
199   if (!demuxer || !stream)
200     return nullptr;
201
202   std::unique_ptr<FFmpegDemuxerStream> demuxer_stream;
203   std::unique_ptr<AudioDecoderConfig> audio_config;
204   std::unique_ptr<VideoDecoderConfig> video_config;
205
206   if (stream->codecpar->codec_type == AVMEDIA_TYPE_AUDIO) {
207     audio_config = std::make_unique<AudioDecoderConfig>();
208
209     // TODO(chcunningham): Change AVStreamToAudioDecoderConfig to check
210     // IsValidConfig internally and return a null scoped_ptr if not valid.
211     if (!AVStreamToAudioDecoderConfig(stream, audio_config.get()) ||
212         !audio_config->IsValidConfig() ||
213         !IsSupportedAudioType(AudioType::FromDecoderConfig(*audio_config))) {
214       MEDIA_LOG(DEBUG, media_log) << "Warning, FFmpegDemuxer failed to create "
215                                      "a valid/supported audio decoder "
216                                      "configuration from muxed stream, config:"
217                                   << audio_config->AsHumanReadableString();
218       return nullptr;
219     }
220
221     MEDIA_LOG(INFO, media_log) << "FFmpegDemuxer: created audio stream, config "
222                                << audio_config->AsHumanReadableString();
223   } else if (stream->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) {
224     video_config = std::make_unique<VideoDecoderConfig>();
225
226     // TODO(chcunningham): Change AVStreamToVideoDecoderConfig to check
227     // IsValidConfig internally and return a null scoped_ptr if not valid.
228     if (!AVStreamToVideoDecoderConfig(stream, video_config.get()) ||
229         !video_config->IsValidConfig() ||
230         !IsSupportedVideoType(VideoType::FromDecoderConfig(*video_config))) {
231       MEDIA_LOG(DEBUG, media_log) << "Warning, FFmpegDemuxer failed to create "
232                                      "a valid/supported video decoder "
233                                      "configuration from muxed stream, config:"
234                                   << video_config->AsHumanReadableString();
235       return nullptr;
236     }
237
238     MEDIA_LOG(INFO, media_log) << "FFmpegDemuxer: created video stream, config "
239                                << video_config->AsHumanReadableString();
240   }
241
242   return base::WrapUnique(
243       new FFmpegDemuxerStream(demuxer, stream, std::move(audio_config),
244                               std::move(video_config), media_log));
245 }
246
247 static void UnmarkEndOfStreamAndClearError(AVFormatContext* format_context) {
248   format_context->pb->eof_reached = 0;
249   format_context->pb->error = 0;
250 }
251
252 //
253 // FFmpegDemuxerStream
254 //
255 FFmpegDemuxerStream::FFmpegDemuxerStream(
256     FFmpegDemuxer* demuxer,
257     AVStream* stream,
258     std::unique_ptr<AudioDecoderConfig> audio_config,
259     std::unique_ptr<VideoDecoderConfig> video_config,
260     MediaLog* media_log)
261     : demuxer_(demuxer),
262       task_runner_(base::SequencedTaskRunner::GetCurrentDefault()),
263       stream_(stream),
264       start_time_(kNoTimestamp),
265       audio_config_(audio_config.release()),
266       video_config_(video_config.release()),
267       media_log_(media_log),
268       end_of_stream_(false),
269       last_packet_timestamp_(kNoTimestamp),
270       last_packet_duration_(kNoTimestamp),
271       is_enabled_(true),
272       waiting_for_keyframe_(false),
273       aborted_(false),
274       fixup_negative_timestamps_(false),
275       fixup_chained_ogg_(false),
276       num_discarded_packet_warnings_(0),
277       last_packet_pos_(AV_NOPTS_VALUE),
278       last_packet_dts_(AV_NOPTS_VALUE) {
279   DCHECK(demuxer_);
280
281   bool is_encrypted = false;
282
283   // Determine our media format.
284   switch (stream->codecpar->codec_type) {
285     case AVMEDIA_TYPE_AUDIO:
286       DCHECK(audio_config_.get() && !video_config_.get());
287       type_ = AUDIO;
288       is_encrypted = audio_config_->is_encrypted();
289       break;
290     case AVMEDIA_TYPE_VIDEO:
291       DCHECK(video_config_.get() && !audio_config_.get());
292       type_ = VIDEO;
293       is_encrypted = video_config_->is_encrypted();
294       break;
295     default:
296       NOTREACHED();
297       break;
298   }
299
300   // Calculate the duration.
301   duration_ = ConvertStreamTimestamp(stream->time_base, stream->duration);
302
303   if (is_encrypted) {
304     AVDictionaryEntry* key =
305         av_dict_get(stream->metadata, "enc_key_id", nullptr, 0);
306     DCHECK(key);
307     DCHECK(key->value);
308     if (!key || !key->value)
309       return;
310     base::StringPiece base64_key_id(key->value);
311     std::string enc_key_id;
312     base::Base64Decode(base64_key_id, &enc_key_id);
313     DCHECK(!enc_key_id.empty());
314     if (enc_key_id.empty())
315       return;
316
317     encryption_key_id_.assign(enc_key_id);
318     demuxer_->OnEncryptedMediaInitData(EmeInitDataType::WEBM, enc_key_id);
319   }
320 }
321
322 FFmpegDemuxerStream::~FFmpegDemuxerStream() {
323   DCHECK(!demuxer_);
324   DCHECK(!read_cb_);
325   DCHECK(buffer_queue_.IsEmpty());
326 }
327
328 void FFmpegDemuxerStream::EnqueuePacket(ScopedAVPacket packet) {
329   DCHECK(task_runner_->RunsTasksInCurrentSequence());
330   DCHECK(packet->size);
331   DCHECK(packet->data);
332
333   const bool is_audio = type() == AUDIO;
334
335   // dts == pts when dts is not present.
336   int64_t packet_dts =
337       packet->dts == AV_NOPTS_VALUE ? packet->pts : packet->dts;
338
339   // Chained ogg files have non-monotonically increasing position and time stamp
340   // values, which prevents us from using them to determine if a packet should
341   // be dropped. Since chained ogg is only allowed on single track audio only
342   // opus/vorbis media, and dropping packets is only necessary for multi-track
343   // video-and-audio streams, we can just disable dropping when we detect
344   // chained ogg.
345   // For similar reasons, we only want to allow packet drops for audio streams;
346   // video frame dropping is handled by the renderer when correcting for a/v
347   // sync.
348   if (is_audio && !fixup_chained_ogg_ && last_packet_pos_ != AV_NOPTS_VALUE) {
349     // Some containers have unknown position...
350     if (packet->pos == -1)
351       packet->pos = last_packet_pos_;
352
353     if (packet->pos < last_packet_pos_) {
354       DVLOG(3) << "Dropped packet with out of order position (" << packet->pos
355                << " < " << last_packet_pos_ << ")";
356       return;
357     }
358     if (last_packet_dts_ != AV_NOPTS_VALUE && packet->pos == last_packet_pos_ &&
359         packet_dts <= last_packet_dts_) {
360       DVLOG(3) << "Dropped packet with out of order display timestamp ("
361                << packet_dts << " < " << last_packet_dts_ << ")";
362       return;
363     }
364   }
365
366   if (!demuxer_ || end_of_stream_) {
367     NOTREACHED() << "Attempted to enqueue packet on a stopped stream";
368     return;
369   }
370
371   last_packet_pos_ = packet->pos;
372   last_packet_dts_ = packet_dts;
373
374   if (waiting_for_keyframe_) {
375     if (packet->flags & AV_PKT_FLAG_KEY) {
376       waiting_for_keyframe_ = false;
377     } else {
378       DVLOG(1) << "Dropped non-keyframe pts=" << packet->pts;
379       return;
380     }
381   }
382
383 #if BUILDFLAG(USE_PROPRIETARY_CODECS)
384   // Convert the packet if there is a bitstream filter.
385   if (bitstream_converter_ &&
386       !bitstream_converter_->ConvertPacket(packet.get())) {
387     DVLOG(1) << "Format conversion failed.";
388   }
389 #endif
390
391   scoped_refptr<DecoderBuffer> buffer;
392
393     size_t side_data_size = 0;
394     uint8_t* side_data = av_packet_get_side_data(
395         packet.get(), AV_PKT_DATA_MATROSKA_BLOCKADDITIONAL, &side_data_size);
396
397     std::unique_ptr<DecryptConfig> decrypt_config;
398     int data_offset = 0;
399     if ((type() == DemuxerStream::AUDIO && audio_config_->is_encrypted()) ||
400         (type() == DemuxerStream::VIDEO && video_config_->is_encrypted())) {
401       if (!WebMCreateDecryptConfig(
402               packet->data, packet->size,
403               reinterpret_cast<const uint8_t*>(encryption_key_id_.data()),
404               encryption_key_id_.size(), &decrypt_config, &data_offset)) {
405         MEDIA_LOG(ERROR, media_log_) << "Creation of DecryptConfig failed.";
406       }
407     }
408
409     // FFmpeg may return garbage packets for MP3 stream containers, so we need
410     // to drop these to avoid decoder errors. The ffmpeg team maintains that
411     // this behavior isn't ideal, but have asked for a significant refactoring
412     // of the AVParser infrastructure to fix this, which is overkill for now.
413     // See http://crbug.com/794782.
414     //
415     // This behavior may also occur with ADTS streams, but is rarer in practice
416     // because ffmpeg's ADTS demuxer does more validation on the packets, so
417     // when invalid data is received, av_read_frame() fails and playback ends.
418     if (is_audio && demuxer_->container() ==
419                         container_names::MediaContainerName::kContainerMP3) {
420       DCHECK(!data_offset);  // Only set for containers supporting encryption...
421
422       // MP3 packets may be zero-padded according to ffmpeg, so trim until we
423       // have the packet; adjust |data_offset| too so this work isn't repeated.
424       uint8_t* packet_end = packet->data + packet->size;
425       uint8_t* header_start = packet->data;
426       while (header_start < packet_end && !*header_start) {
427         ++header_start;
428         ++data_offset;
429       }
430
431       if (packet_end - header_start < MPEG1AudioStreamParser::kHeaderSize ||
432           !MPEG1AudioStreamParser::ParseHeader(nullptr, nullptr, header_start,
433                                                nullptr)) {
434         LIMITED_MEDIA_LOG(INFO, media_log_, num_discarded_packet_warnings_, 5)
435             << "Discarding invalid MP3 packet, ts: "
436             << ConvertStreamTimestamp(stream_->time_base, packet->pts)
437             << ", duration: "
438             << ConvertStreamTimestamp(stream_->time_base, packet->duration);
439         return;
440       }
441     }
442
443     // If a packet is returned by FFmpeg's av_parser_parse2() the packet will
444     // reference inner memory of FFmpeg.  As such we should transfer the packet
445     // into memory we control.
446     if (side_data_size > 0) {
447       buffer = DecoderBuffer::CopyFrom(packet->data + data_offset,
448                                        packet->size - data_offset);
449       buffer->WritableSideData().alpha_data.assign(side_data,
450                                                    side_data + side_data_size);
451     } else {
452       buffer = DecoderBuffer::CopyFrom(packet->data + data_offset,
453                                        packet->size - data_offset);
454     }
455
456     size_t skip_samples_size = 0;
457     const uint32_t* skip_samples_ptr =
458         reinterpret_cast<const uint32_t*>(av_packet_get_side_data(
459             packet.get(), AV_PKT_DATA_SKIP_SAMPLES, &skip_samples_size));
460     const int kSkipSamplesValidSize = 10;
461     const int kSkipEndSamplesOffset = 1;
462     if (skip_samples_size >= kSkipSamplesValidSize) {
463       // Because FFmpeg rolls codec delay and skip samples into one we can only
464       // allow front discard padding on the first buffer.  Otherwise the discard
465       // helper can't figure out which data to discard.  See AudioDiscardHelper.
466       int discard_front_samples = base::ByteSwapToLE32(*skip_samples_ptr);
467       if (last_packet_timestamp_ != kNoTimestamp && discard_front_samples) {
468         DLOG(ERROR) << "Skip samples are only allowed for the first packet.";
469         discard_front_samples = 0;
470       }
471
472       if (discard_front_samples < 0) {
473         // See https://crbug.com/1189939 and https://trac.ffmpeg.org/ticket/9622
474         DLOG(ERROR) << "Negative skip samples are not allowed.";
475         discard_front_samples = 0;
476       }
477
478       const int discard_end_samples =
479           base::ByteSwapToLE32(*(skip_samples_ptr + kSkipEndSamplesOffset));
480
481       if (discard_front_samples || discard_end_samples) {
482         DCHECK(is_audio);
483         const int samples_per_second =
484             audio_decoder_config().samples_per_second();
485         buffer->set_discard_padding(std::make_pair(
486             FramesToTimeDelta(discard_front_samples, samples_per_second),
487             FramesToTimeDelta(discard_end_samples, samples_per_second)));
488       }
489     }
490
491     if (decrypt_config)
492       buffer->set_decrypt_config(std::move(decrypt_config));
493
494   if (packet->duration >= 0) {
495     buffer->set_duration(
496         ConvertStreamTimestamp(stream_->time_base, packet->duration));
497   } else {
498     // TODO(wolenetz): Remove when FFmpeg stops returning negative durations.
499     // https://crbug.com/394418
500     DVLOG(1) << "FFmpeg returned a buffer with a negative duration! "
501              << packet->duration;
502     buffer->set_duration(kNoTimestamp);
503   }
504
505   // Note: If pts is kNoFFmpegTimestamp, stream_timestamp will be kNoTimestamp.
506   const base::TimeDelta stream_timestamp =
507       ConvertStreamTimestamp(stream_->time_base, packet->pts);
508
509   if (stream_timestamp == kNoTimestamp ||
510       stream_timestamp == kInfiniteDuration) {
511     MEDIA_LOG(ERROR, media_log_) << "FFmpegDemuxer: PTS is not defined";
512     demuxer_->NotifyDemuxerError(DEMUXER_ERROR_COULD_NOT_PARSE);
513     return;
514   }
515
516   // If this file has negative timestamps don't rebase any other stream types
517   // against the negative starting time.
518   base::TimeDelta start_time = demuxer_->start_time();
519   if (!is_audio && start_time.is_negative()) {
520     start_time = base::TimeDelta();
521   }
522
523   // Don't rebase timestamps for positive start times, the HTML Media Spec
524   // details this in section "4.8.10.6 Offsets into the media resource." We
525   // will still need to rebase timestamps before seeking with FFmpeg though.
526   if (start_time.is_positive())
527     start_time = base::TimeDelta();
528
529   buffer->set_timestamp(stream_timestamp - start_time);
530
531   // If the packet is marked for complete discard and it doesn't already have
532   // any discard padding set, mark the DecoderBuffer for complete discard. We
533   // don't want to overwrite any existing discard padding since the discard
534   // padding may refer to frames beyond this packet.
535   if (packet->flags & AV_PKT_FLAG_DISCARD &&
536       buffer->discard_padding() == DecoderBuffer::DiscardPadding()) {
537     buffer->set_discard_padding(
538         std::make_pair(kInfiniteDuration, base::TimeDelta()));
539   }
540
541   if (is_audio) {
542     // Fixup negative timestamps where the before-zero portion is completely
543     // discarded after decoding.
544     if (buffer->timestamp().is_negative()) {
545       // Discard padding may also remove samples after zero.
546       auto fixed_ts = buffer->discard_padding().first + buffer->timestamp();
547
548       // Allow for rounding error in the discard padding calculations.
549       if (fixed_ts == base::Microseconds(-1)) {
550         fixed_ts = base::TimeDelta();
551       }
552
553       if (fixed_ts >= base::TimeDelta()) {
554         buffer->set_timestamp(fixed_ts);
555       }
556     }
557
558     // Only allow negative timestamps past if we know they'll be fixed up by the
559     // code paths below; otherwise they should be treated as a parse error.
560     if ((!fixup_chained_ogg_ || last_packet_timestamp_ == kNoTimestamp) &&
561         buffer->timestamp().is_negative()) {
562       MEDIA_LOG(ERROR, media_log_)
563           << "FFmpegDemuxer: unfixable negative timestamp.";
564       demuxer_->NotifyDemuxerError(DEMUXER_ERROR_COULD_NOT_PARSE);
565       return;
566     }
567
568     // If enabled, and no codec delay is present, mark audio packets with
569     // negative timestamps for post-decode discard. If codec delay is present,
570     // discard is handled by the decoder using that value.
571     if (fixup_negative_timestamps_ && stream_timestamp.is_negative() &&
572         buffer->duration() != kNoTimestamp &&
573         !audio_decoder_config().codec_delay()) {
574       if ((stream_timestamp + buffer->duration()).is_negative()) {
575         DCHECK_EQ(buffer->discard_padding().second, base::TimeDelta());
576
577         // Discard the entire packet if it's entirely before zero, but don't
578         // override the discard padding if it refers to frames beyond this
579         // packet.
580         if (buffer->discard_padding().first <= buffer->duration()) {
581           buffer->set_discard_padding(
582               std::make_pair(kInfiniteDuration, base::TimeDelta()));
583         }
584       } else {
585         // Only discard part of the frame if it overlaps zero.
586         buffer->set_discard_padding(std::make_pair(
587             std::max(-stream_timestamp, buffer->discard_padding().first),
588             buffer->discard_padding().second));
589       }
590     }
591   }
592
593   if (last_packet_timestamp_ != kNoTimestamp) {
594     // FFmpeg doesn't support chained ogg correctly.  Instead of guaranteeing
595     // continuity across links in the chain it uses the timestamp information
596     // from each link directly.  Doing so can lead to timestamps which appear to
597     // go backwards in time.
598     //
599     // If the new link starts with a negative timestamp or a timestamp less than
600     // the original (positive) |start_time|, we will get a negative timestamp
601     // here.
602     //
603     // Fixing chained ogg is non-trivial, so for now just reuse the last good
604     // timestamp.  The decoder will rewrite the timestamps to be sample accurate
605     // later.  See http://crbug.com/396864.
606     //
607     // Note: This will not work with codecs that have out of order frames like
608     // H.264 with b-frames, but luckily you can't put those in ogg files...
609     if (fixup_chained_ogg_ && buffer->timestamp() < last_packet_timestamp_) {
610       buffer->set_timestamp(last_packet_timestamp_ +
611                             (last_packet_duration_ != kNoTimestamp
612                                  ? last_packet_duration_
613                                  : base::Microseconds(1)));
614     }
615
616     if (is_audio) {
617       // The demuxer should always output positive timestamps.
618       DCHECK_GE(buffer->timestamp(), base::TimeDelta());
619     }
620
621     if (last_packet_timestamp_ < buffer->timestamp()) {
622       buffered_ranges_.Add(last_packet_timestamp_, buffer->timestamp());
623       demuxer_->NotifyBufferingChanged();
624     }
625   }
626
627   if (packet->flags & AV_PKT_FLAG_KEY)
628     buffer->set_is_key_frame(true);
629
630   // One last sanity check on the packet timestamps in case any of the above
631   // calculations have pushed the values to the limits.
632   if (buffer->timestamp() == kNoTimestamp ||
633       buffer->timestamp() == kInfiniteDuration) {
634     MEDIA_LOG(ERROR, media_log_) << "FFmpegDemuxer: PTS is not defined";
635     demuxer_->NotifyDemuxerError(DEMUXER_ERROR_COULD_NOT_PARSE);
636     return;
637   }
638
639   last_packet_timestamp_ = buffer->timestamp();
640   last_packet_duration_ = buffer->duration();
641
642   const base::TimeDelta new_duration = last_packet_timestamp_;
643   if (new_duration > duration_ || duration_ == kNoTimestamp)
644     duration_ = new_duration;
645
646   buffer_queue_.Push(std::move(buffer));
647   SatisfyPendingRead();
648 }
649
650 void FFmpegDemuxerStream::SetEndOfStream() {
651   DCHECK(task_runner_->RunsTasksInCurrentSequence());
652   end_of_stream_ = true;
653   SatisfyPendingRead();
654 }
655
656 void FFmpegDemuxerStream::FlushBuffers(bool preserve_packet_position) {
657   DCHECK(task_runner_->RunsTasksInCurrentSequence());
658   DCHECK(preserve_packet_position || !read_cb_)
659       << "There should be no pending read";
660
661   // H264 and AAC require that we resend the header after flush.
662   // Reset bitstream for converter to do so.
663   // This is related to chromium issue 140371 (http://crbug.com/140371).
664   ResetBitstreamConverter();
665
666   if (!preserve_packet_position) {
667     last_packet_pos_ = AV_NOPTS_VALUE;
668     last_packet_dts_ = AV_NOPTS_VALUE;
669   }
670
671   buffer_queue_.Clear();
672   end_of_stream_ = false;
673   last_packet_timestamp_ = kNoTimestamp;
674   last_packet_duration_ = kNoTimestamp;
675   aborted_ = false;
676 }
677
678 void FFmpegDemuxerStream::Abort() {
679   aborted_ = true;
680   if (read_cb_)
681     std::move(read_cb_).Run(DemuxerStream::kAborted, {});
682 }
683
684 void FFmpegDemuxerStream::Stop() {
685   DCHECK(task_runner_->RunsTasksInCurrentSequence());
686   buffer_queue_.Clear();
687   demuxer_ = nullptr;
688   stream_ = nullptr;
689   end_of_stream_ = true;
690   if (read_cb_) {
691     std::move(read_cb_).Run(DemuxerStream::kOk,
692                             {DecoderBuffer::CreateEOSBuffer()});
693   }
694 }
695
696 DemuxerStream::Type FFmpegDemuxerStream::type() const {
697   DCHECK(task_runner_->RunsTasksInCurrentSequence());
698   return type_;
699 }
700
701 StreamLiveness FFmpegDemuxerStream::liveness() const {
702   DCHECK(task_runner_->RunsTasksInCurrentSequence());
703   return liveness_;
704 }
705
706 void FFmpegDemuxerStream::Read(uint32_t count, ReadCB read_cb) {
707   DCHECK(task_runner_->RunsTasksInCurrentSequence());
708   CHECK(!read_cb_) << "Overlapping reads are not supported";
709   read_cb_ = base::BindPostTaskToCurrentDefault(std::move(read_cb));
710   requested_buffer_count_ = static_cast<size_t>(count);
711   DVLOG(3) << __func__
712            << " requested_buffer_count_ = " << requested_buffer_count_;
713   // Don't accept any additional reads if we've been told to stop.
714   // The |demuxer_| may have been destroyed in the pipeline thread.
715   //
716   // TODO(scherkus): it would be cleaner to reply with an error message.
717   if (!demuxer_) {
718     std::move(read_cb_).Run(DemuxerStream::kOk,
719                             {DecoderBuffer::CreateEOSBuffer()});
720     return;
721   }
722
723   if (!is_enabled_) {
724     DVLOG(1) << "Read from disabled stream, returning EOS";
725     std::move(read_cb_).Run(kOk, {DecoderBuffer::CreateEOSBuffer()});
726     return;
727   }
728
729   if (aborted_) {
730     std::move(read_cb_).Run(kAborted, {});
731     return;
732   }
733
734   SatisfyPendingRead();
735 }
736
737 void FFmpegDemuxerStream::EnableBitstreamConverter() {
738   DCHECK(task_runner_->RunsTasksInCurrentSequence());
739
740 #if BUILDFLAG(USE_PROPRIETARY_CODECS)
741   InitBitstreamConverter();
742 #else
743   DLOG(ERROR) << "Proprietary codecs not enabled and stream requires bitstream "
744                  "conversion. Playback will likely fail.";
745 #endif
746 }
747
748 void FFmpegDemuxerStream::ResetBitstreamConverter() {
749 #if BUILDFLAG(USE_PROPRIETARY_CODECS)
750   if (bitstream_converter_)
751     InitBitstreamConverter();
752 #endif  // BUILDFLAG(USE_PROPRIETARY_CODECS)
753 }
754
755 void FFmpegDemuxerStream::InitBitstreamConverter() {
756 #if BUILDFLAG(USE_PROPRIETARY_CODECS)
757   switch (stream_->codecpar->codec_id) {
758     case AV_CODEC_ID_H264:
759       // Clear |extra_data| so that future (fallback) decoders will know that
760       // conversion is forcibly enabled on this stream.
761       //
762       // TODO(sandersd): Ideally we would convert |extra_data| to concatenated
763       // SPS/PPS data, but it's too late to be useful because Initialize() was
764       // already called on GpuVideoDecoder, which is the only path that would
765       // consume that data.
766       if (video_config_)
767         video_config_->SetExtraData(std::vector<uint8_t>());
768       bitstream_converter_ =
769           std::make_unique<FFmpegH264ToAnnexBBitstreamConverter>(
770               stream_->codecpar);
771       break;
772 #if BUILDFLAG(ENABLE_PLATFORM_HEVC)
773     case AV_CODEC_ID_HEVC:
774       bitstream_converter_ =
775           std::make_unique<FFmpegH265ToAnnexBBitstreamConverter>(
776               stream_->codecpar);
777       break;
778 #endif
779     case AV_CODEC_ID_AAC:
780       // FFmpeg doesn't understand xHE-AAC profiles yet, which can't be put in
781       // ADTS anyways, so skip bitstream conversion when the profile is
782       // unknown.
783       if (audio_config_->profile() != AudioCodecProfile::kXHE_AAC) {
784         bitstream_converter_ =
785             std::make_unique<FFmpegAACBitstreamConverter>(stream_->codecpar);
786       }
787       break;
788     default:
789       break;
790   }
791 #endif  // BUILDFLAG(USE_PROPRIETARY_CODECS)
792 }
793
794 bool FFmpegDemuxerStream::SupportsConfigChanges() { return false; }
795
796 AudioDecoderConfig FFmpegDemuxerStream::audio_decoder_config() {
797   DCHECK(task_runner_->RunsTasksInCurrentSequence());
798   DCHECK_EQ(type_, AUDIO);
799   DCHECK(audio_config_.get());
800   return *audio_config_;
801 }
802
803 VideoDecoderConfig FFmpegDemuxerStream::video_decoder_config() {
804   DCHECK(task_runner_->RunsTasksInCurrentSequence());
805   DCHECK_EQ(type_, VIDEO);
806   DCHECK(video_config_.get());
807   return *video_config_;
808 }
809
810 bool FFmpegDemuxerStream::IsEnabled() const {
811   DCHECK(task_runner_->RunsTasksInCurrentSequence());
812   return is_enabled_;
813 }
814
815 void FFmpegDemuxerStream::SetEnabled(bool enabled, base::TimeDelta timestamp) {
816   DCHECK(task_runner_->RunsTasksInCurrentSequence());
817   DCHECK(demuxer_);
818   DCHECK(demuxer_->ffmpeg_task_runner());
819   if (enabled == is_enabled_)
820     return;
821
822   is_enabled_ = enabled;
823   demuxer_->ffmpeg_task_runner()->PostTask(
824       FROM_HERE, base::BindOnce(&SetAVStreamDiscard, av_stream(),
825                                 enabled ? AVDISCARD_DEFAULT : AVDISCARD_ALL));
826   if (is_enabled_) {
827     waiting_for_keyframe_ = true;
828   }
829   if (!is_enabled_ && read_cb_) {
830     DVLOG(1) << "Read from disabled stream, returning EOS";
831     std::move(read_cb_).Run(kOk, {DecoderBuffer::CreateEOSBuffer()});
832   }
833 }
834
835 void FFmpegDemuxerStream::SetLiveness(StreamLiveness liveness) {
836   DCHECK(task_runner_->RunsTasksInCurrentSequence());
837   DCHECK_EQ(liveness_, StreamLiveness::kUnknown);
838   liveness_ = liveness;
839 }
840
841 Ranges<base::TimeDelta> FFmpegDemuxerStream::GetBufferedRanges() const {
842   return buffered_ranges_;
843 }
844
845 void FFmpegDemuxerStream::SatisfyPendingRead() {
846   DCHECK(task_runner_->RunsTasksInCurrentSequence());
847   if (read_cb_) {
848     if (!buffer_queue_.IsEmpty()) {
849       DemuxerStream::DecoderBufferVector output_buffers;
850
851       for (size_t i = 0;
852            i < std::min(requested_buffer_count_, buffer_queue_.queue_size());
853            ++i) {
854         output_buffers.emplace_back(buffer_queue_.Pop());
855       }
856       DVLOG(3) << __func__ << " Status:kOk, return output_buffers.size = "
857                << output_buffers.size();
858       std::move(read_cb_).Run(DemuxerStream::kOk, std::move(output_buffers));
859     } else if (end_of_stream_) {
860       std::move(read_cb_).Run(DemuxerStream::kOk,
861                               {DecoderBuffer::CreateEOSBuffer()});
862     }
863   }
864   // Have capacity? Ask for more!
865   if (HasAvailableCapacity() && !end_of_stream_) {
866     demuxer_->NotifyCapacityAvailable();
867   }
868 }
869
870 bool FFmpegDemuxerStream::HasAvailableCapacity() {
871   // Try to have two second's worth of encoded data per stream.
872   const base::TimeDelta kCapacity = base::Seconds(2);
873   return buffer_queue_.IsEmpty() || buffer_queue_.Duration() < kCapacity;
874 }
875
876 size_t FFmpegDemuxerStream::MemoryUsage() const {
877   return buffer_queue_.data_size();
878 }
879
880 std::string FFmpegDemuxerStream::GetMetadata(const char* key) const {
881   const AVDictionaryEntry* entry =
882       av_dict_get(stream_->metadata, key, nullptr, 0);
883   return (entry == nullptr || entry->value == nullptr) ? "" : entry->value;
884 }
885
886 // static
887 base::TimeDelta FFmpegDemuxerStream::ConvertStreamTimestamp(
888     const AVRational& time_base,
889     int64_t timestamp) {
890   if (timestamp == kNoFFmpegTimestamp)
891     return kNoTimestamp;
892
893   return ConvertFromTimeBase(time_base, timestamp);
894 }
895
896 //
897 // FFmpegDemuxer
898 //
899 FFmpegDemuxer::FFmpegDemuxer(
900     const scoped_refptr<base::SequencedTaskRunner>& task_runner,
901     DataSource* data_source,
902     const EncryptedMediaInitDataCB& encrypted_media_init_data_cb,
903     MediaTracksUpdatedCB media_tracks_updated_cb,
904     MediaLog* media_log,
905     bool is_local_file)
906     : task_runner_(task_runner),
907       // FFmpeg has no asynchronous API, so we use base::WaitableEvents inside
908       // the BlockingUrlProtocol to handle hops to the render thread for network
909       // reads and seeks.
910       blocking_task_runner_(base::ThreadPool::CreateSequencedTaskRunner(
911           {base::MayBlock(), base::TaskPriority::USER_BLOCKING})),
912       data_source_(data_source),
913       media_log_(media_log),
914       encrypted_media_init_data_cb_(encrypted_media_init_data_cb),
915       media_tracks_updated_cb_(std::move(media_tracks_updated_cb)),
916       is_local_file_(is_local_file) {
917   DCHECK(task_runner_.get());
918   DCHECK(data_source_);
919   DCHECK(media_tracks_updated_cb_);
920 }
921
922 FFmpegDemuxer::~FFmpegDemuxer() {
923   DCHECK(!init_cb_);
924   DCHECK(!pending_seek_cb_);
925
926   // NOTE: This class is not destroyed on |task_runner|, so we must ensure that
927   // there are no outstanding WeakPtrs by the time we reach here.
928   DCHECK(!weak_factory_.HasWeakPtrs());
929
930   // There may be outstanding tasks in the blocking pool which are trying to use
931   // these members, so release them in sequence with any outstanding calls. The
932   // earlier call to Abort() on |data_source_| prevents further access to it.
933   blocking_task_runner_->DeleteSoon(FROM_HERE, url_protocol_.release());
934   blocking_task_runner_->DeleteSoon(FROM_HERE, glue_.release());
935 }
936
937 std::string FFmpegDemuxer::GetDisplayName() const {
938   return "FFmpegDemuxer";
939 }
940
941 DemuxerType FFmpegDemuxer::GetDemuxerType() const {
942   return DemuxerType::kFFmpegDemuxer;
943 }
944
945 void FFmpegDemuxer::Initialize(DemuxerHost* host,
946                                PipelineStatusCallback init_cb) {
947   DCHECK(task_runner_->RunsTasksInCurrentSequence());
948   host_ = host;
949   weak_this_ = cancel_pending_seek_factory_.GetWeakPtr();
950   init_cb_ = std::move(init_cb);
951
952   // Give a WeakPtr to BlockingUrlProtocol since we'll need to release it on the
953   // blocking thread pool.
954   url_protocol_ = std::make_unique<BlockingUrlProtocol>(
955       data_source_, base::BindPostTaskToCurrentDefault(base::BindRepeating(
956                         &FFmpegDemuxer::OnDataSourceError, weak_this_)));
957   glue_ = std::make_unique<FFmpegGlue>(url_protocol_.get());
958   AVFormatContext* format_context = glue_->format_context();
959
960   // Disable ID3v1 tag reading to avoid costly seeks to end of file for data we
961   // don't use.  FFmpeg will only read ID3v1 tags if no other metadata is
962   // available, so add a metadata entry to ensure some is always present.
963   av_dict_set(&format_context->metadata, "skip_id3v1_tags", "", 0);
964
965   // Ensure ffmpeg doesn't give up too early while looking for stream params;
966   // this does not increase the amount of data downloaded.  The default value
967   // is 5 AV_TIME_BASE units (1 second each), which prevents some oddly muxed
968   // streams from being detected properly; this value was chosen arbitrarily.
969   format_context->max_analyze_duration = 60 * AV_TIME_BASE;
970
971   // Open the AVFormatContext using our glue layer.
972   blocking_task_runner_->PostTaskAndReplyWithResult(
973       FROM_HERE,
974       base::BindOnce(&FFmpegGlue::OpenContext, base::Unretained(glue_.get()),
975                      is_local_file_),
976       base::BindOnce(&FFmpegDemuxer::OnOpenContextDone,
977                      weak_factory_.GetWeakPtr()));
978 }
979
980 void FFmpegDemuxer::AbortPendingReads() {
981   DCHECK(task_runner_->RunsTasksInCurrentSequence());
982
983   // If Stop() has been called, then drop this call.
984   if (stopped_)
985     return;
986
987   // This should only be called after the demuxer has been initialized.
988   DCHECK_GT(streams_.size(), 0u);
989
990   // Abort all outstanding reads.
991   for (const auto& stream : streams_) {
992     if (stream)
993       stream->Abort();
994   }
995
996   // It's important to invalidate read/seek completion callbacks to avoid any
997   // errors that occur because of the data source abort.
998   weak_factory_.InvalidateWeakPtrs();
999   data_source_->Abort();
1000
1001   // Aborting the read may cause EOF to be marked, undo this.
1002   blocking_task_runner_->PostTask(
1003       FROM_HERE,
1004       base::BindOnce(&UnmarkEndOfStreamAndClearError, glue_->format_context()));
1005   pending_read_ = false;
1006
1007   // TODO(dalecurtis): We probably should report PIPELINE_ERROR_ABORT here
1008   // instead to avoid any preroll work that may be started upon return, but
1009   // currently the PipelineImpl does not know how to handle this.
1010   if (pending_seek_cb_)
1011     RunPendingSeekCB(PIPELINE_OK);
1012 }
1013
1014 void FFmpegDemuxer::Stop() {
1015   DCHECK(task_runner_->RunsTasksInCurrentSequence());
1016
1017   if (init_cb_)
1018     RunInitCB(PIPELINE_ERROR_ABORT);
1019   if (pending_seek_cb_)
1020     RunPendingSeekCB(PIPELINE_ERROR_ABORT);
1021
1022   // The order of Stop() and Abort() is important here.  If Abort() is called
1023   // first, control may pass into FFmpeg where it can destruct buffers that are
1024   // in the process of being fulfilled by the DataSource.
1025   data_source_->Stop();
1026   url_protocol_->Abort();
1027
1028   for (const auto& stream : streams_) {
1029     if (stream)
1030       stream->Stop();
1031   }
1032
1033   data_source_ = nullptr;
1034
1035   // Invalidate WeakPtrs on |task_runner_|, destruction may happen on another
1036   // thread. We don't need to wait for any outstanding tasks since they will all
1037   // fail to return after invalidating WeakPtrs.
1038   stopped_ = true;
1039   weak_factory_.InvalidateWeakPtrs();
1040   cancel_pending_seek_factory_.InvalidateWeakPtrs();
1041 }
1042
1043 void FFmpegDemuxer::StartWaitingForSeek(base::TimeDelta seek_time) {}
1044
1045 void FFmpegDemuxer::CancelPendingSeek(base::TimeDelta seek_time) {
1046   if (task_runner_->RunsTasksInCurrentSequence()) {
1047     AbortPendingReads();
1048   } else {
1049     // Don't use GetWeakPtr() here since we are on the wrong thread.
1050     task_runner_->PostTask(
1051         FROM_HERE,
1052         base::BindOnce(&FFmpegDemuxer::AbortPendingReads, weak_this_));
1053   }
1054 }
1055
1056 void FFmpegDemuxer::Seek(base::TimeDelta time, PipelineStatusCallback cb) {
1057   DCHECK(task_runner_->RunsTasksInCurrentSequence());
1058   DCHECK(!pending_seek_cb_);
1059   TRACE_EVENT_ASYNC_BEGIN0("media", "FFmpegDemuxer::Seek", this);
1060   pending_seek_cb_ = std::move(cb);
1061   SeekInternal(time, base::BindOnce(&FFmpegDemuxer::OnSeekFrameDone,
1062                                     weak_factory_.GetWeakPtr()));
1063 }
1064
1065 bool FFmpegDemuxer::IsSeekable() const {
1066   return true;
1067 }
1068
1069 void FFmpegDemuxer::SeekInternal(base::TimeDelta time,
1070                                  base::OnceCallback<void(int)> seek_cb) {
1071   DCHECK(task_runner_->RunsTasksInCurrentSequence());
1072
1073   // FFmpeg requires seeks to be adjusted according to the lowest starting time.
1074   // Since EnqueuePacket() rebased negative timestamps by the start time, we
1075   // must correct the shift here.
1076   //
1077   // Additionally, to workaround limitations in how we expose seekable ranges to
1078   // Blink (http://crbug.com/137275), we also want to clamp seeks before the
1079   // start time to the start time.
1080   base::TimeDelta seek_time;
1081   if (start_time_.is_negative()) {
1082     seek_time = time + start_time_;
1083   } else {
1084     seek_time = std::max(start_time_, time);
1085   }
1086
1087   // When seeking in an opus stream we need to ensure we deliver enough data to
1088   // satisfy the seek preroll; otherwise the audio at the actual seek time will
1089   // not be entirely accurate.
1090   FFmpegDemuxerStream* audio_stream =
1091       GetFirstEnabledFFmpegStream(DemuxerStream::AUDIO);
1092   if (audio_stream) {
1093     const AudioDecoderConfig& config = audio_stream->audio_decoder_config();
1094     if (config.codec() == AudioCodec::kOpus)
1095       seek_time = std::max(start_time_, seek_time - config.seek_preroll());
1096   }
1097
1098   // Choose the seeking stream based on whether it contains the seek time, if
1099   // no match can be found prefer the preferred stream.
1100   //
1101   // TODO(dalecurtis): Currently FFmpeg does not ensure that all streams in a
1102   // given container will demux all packets after the seek point.  Instead it
1103   // only guarantees that all packets after the file position of the seek will
1104   // be demuxed.  It's an open question whether FFmpeg should fix this:
1105   // http://lists.ffmpeg.org/pipermail/ffmpeg-devel/2014-June/159212.html
1106   // Tracked by http://crbug.com/387996.
1107   FFmpegDemuxerStream* demux_stream = FindPreferredStreamForSeeking(seek_time);
1108   DCHECK(demux_stream);
1109   const AVStream* seeking_stream = demux_stream->av_stream();
1110   DCHECK(seeking_stream);
1111
1112   blocking_task_runner_->PostTaskAndReplyWithResult(
1113       FROM_HERE,
1114       base::BindOnce(&AVSeekFrame, glue_->format_context(),
1115                      seeking_stream->index,
1116                      ConvertToTimeBase(seeking_stream->time_base, seek_time)),
1117       std::move(seek_cb));
1118 }
1119
1120 base::Time FFmpegDemuxer::GetTimelineOffset() const {
1121   return timeline_offset_;
1122 }
1123
1124 std::vector<DemuxerStream*> FFmpegDemuxer::GetAllStreams() {
1125   DCHECK(task_runner_->RunsTasksInCurrentSequence());
1126   std::vector<DemuxerStream*> result;
1127   // Put enabled streams at the beginning of the list so that
1128   // MediaResource::GetFirstStream returns the enabled stream if there is one.
1129   // TODO(servolk): Revisit this after media track switching is supported.
1130   for (const auto& stream : streams_) {
1131     if (stream && stream->IsEnabled())
1132       result.push_back(stream.get());
1133   }
1134   // And include disabled streams at the end of the list.
1135   for (const auto& stream : streams_) {
1136     if (stream && !stream->IsEnabled())
1137       result.push_back(stream.get());
1138   }
1139   return result;
1140 }
1141
1142 FFmpegDemuxerStream* FFmpegDemuxer::GetFirstEnabledFFmpegStream(
1143     DemuxerStream::Type type) const {
1144   for (const auto& stream : streams_) {
1145     if (stream && stream->type() == type && stream->IsEnabled()) {
1146       return stream.get();
1147     }
1148   }
1149   return nullptr;
1150 }
1151
1152 base::TimeDelta FFmpegDemuxer::GetStartTime() const {
1153   return std::max(start_time_, base::TimeDelta());
1154 }
1155
1156 int64_t FFmpegDemuxer::GetMemoryUsage() const {
1157   int64_t allocation_size = 0;
1158   for (const auto& stream : streams_) {
1159     if (stream)
1160       allocation_size += stream->MemoryUsage();
1161   }
1162   return allocation_size;
1163 }
1164
1165 absl::optional<container_names::MediaContainerName>
1166 FFmpegDemuxer::GetContainerForMetrics() const {
1167   return container();
1168 }
1169
1170 void FFmpegDemuxer::OnEncryptedMediaInitData(
1171     EmeInitDataType init_data_type,
1172     const std::string& encryption_key_id) {
1173   std::vector<uint8_t> key_id_local(encryption_key_id.begin(),
1174                                     encryption_key_id.end());
1175   encrypted_media_init_data_cb_.Run(init_data_type, key_id_local);
1176 }
1177
1178 void FFmpegDemuxer::NotifyCapacityAvailable() {
1179   DCHECK(task_runner_->RunsTasksInCurrentSequence());
1180   ReadFrameIfNeeded();
1181 }
1182
1183 void FFmpegDemuxer::NotifyBufferingChanged() {
1184   DCHECK(task_runner_->RunsTasksInCurrentSequence());
1185   Ranges<base::TimeDelta> buffered;
1186   bool initialized_buffered_ranges = false;
1187   for (const auto& stream : streams_) {
1188     if (!stream)
1189       continue;
1190     if (initialized_buffered_ranges) {
1191       buffered = buffered.IntersectionWith(stream->GetBufferedRanges());
1192     } else {
1193       buffered = stream->GetBufferedRanges();
1194       initialized_buffered_ranges = true;
1195     }
1196   }
1197   host_->OnBufferedTimeRangesChanged(buffered);
1198 }
1199
1200 // Helper for calculating the bitrate of the media based on information stored
1201 // in |format_context| or failing that the size and duration of the media.
1202 //
1203 // Returns 0 if a bitrate could not be determined.
1204 static int CalculateBitrate(AVFormatContext* format_context,
1205                             const base::TimeDelta& duration,
1206                             int64_t filesize_in_bytes) {
1207   // If there is a bitrate set on the container, use it.
1208   if (format_context->bit_rate > 0)
1209     return format_context->bit_rate;
1210
1211   // Then try to sum the bitrates individually per stream.
1212   int bitrate = 0;
1213   for (size_t i = 0; i < format_context->nb_streams; ++i) {
1214     AVCodecParameters* codec_parameters = format_context->streams[i]->codecpar;
1215     bitrate += codec_parameters->bit_rate;
1216   }
1217   if (bitrate > 0)
1218     return bitrate;
1219
1220   // See if we can approximate the bitrate as long as we have a filesize and
1221   // valid duration.
1222   if (duration <= base::TimeDelta() || duration == kInfiniteDuration ||
1223       !filesize_in_bytes)
1224     return 0;
1225
1226   // Don't multiply by 8 first; it will overflow if (filesize_in_bytes >= 2^60).
1227   return base::ClampRound(filesize_in_bytes * duration.ToHz() * 8);
1228 }
1229
1230 void FFmpegDemuxer::OnOpenContextDone(bool result) {
1231   DCHECK(task_runner_->RunsTasksInCurrentSequence());
1232   if (stopped_) {
1233     MEDIA_LOG(ERROR, media_log_) << GetDisplayName() << ": bad state";
1234     RunInitCB(PIPELINE_ERROR_ABORT);
1235     return;
1236   }
1237
1238 #if BUILDFLAG(IS_ANDROID) || BUILDFLAG(ENABLE_HLS_DEMUXER) || defined(TIZEN_MULTIMEDIA)
1239   if (glue_->detected_hls()) {
1240     MEDIA_LOG(INFO, media_log_)
1241         << GetDisplayName() << ": detected HLS manifest";
1242     RunInitCB(DEMUXER_ERROR_DETECTED_HLS);
1243     return;
1244   }
1245 #endif
1246
1247   if (!result) {
1248     MEDIA_LOG(ERROR, media_log_) << GetDisplayName() << ": open context failed";
1249     RunInitCB(DEMUXER_ERROR_COULD_NOT_OPEN);
1250     return;
1251   }
1252
1253   // Fully initialize AVFormatContext by parsing the stream a little.
1254   blocking_task_runner_->PostTaskAndReplyWithResult(
1255       FROM_HERE,
1256       base::BindOnce(&avformat_find_stream_info, glue_->format_context(),
1257                      static_cast<AVDictionary**>(nullptr)),
1258       base::BindOnce(&FFmpegDemuxer::OnFindStreamInfoDone,
1259                      weak_factory_.GetWeakPtr()));
1260 }
1261
1262 void FFmpegDemuxer::OnFindStreamInfoDone(int result) {
1263   DCHECK(task_runner_->RunsTasksInCurrentSequence());
1264   if (stopped_ || !data_source_) {
1265     MEDIA_LOG(ERROR, media_log_) << GetDisplayName() << ": bad state";
1266     RunInitCB(PIPELINE_ERROR_ABORT);
1267     return;
1268   }
1269
1270   if (result < 0) {
1271     MEDIA_LOG(ERROR, media_log_) << GetDisplayName()
1272                                  << ": find stream info failed";
1273     RunInitCB(DEMUXER_ERROR_COULD_NOT_PARSE);
1274     return;
1275   }
1276
1277   // Create demuxer stream entries for each possible AVStream. Each stream
1278   // is examined to determine if it is supported or not (is the codec enabled
1279   // for it in this release?). Unsupported streams are skipped, allowing for
1280   // partial playback. At least one audio or video stream must be playable.
1281   AVFormatContext* format_context = glue_->format_context();
1282   streams_.resize(format_context->nb_streams);
1283
1284   std::unique_ptr<MediaTracks> media_tracks(new MediaTracks());
1285
1286   DCHECK(track_id_to_demux_stream_map_.empty());
1287
1288   // If available, |start_time_| will be set to the lowest stream start time.
1289   start_time_ = kInfiniteDuration;
1290
1291   base::TimeDelta max_duration;
1292   int supported_audio_track_count = 0;
1293   int supported_video_track_count = 0;
1294   bool has_opus_or_vorbis_audio = false;
1295   bool needs_negative_timestamp_fixup = false;
1296   for (size_t i = 0; i < format_context->nb_streams; ++i) {
1297     AVStream* stream = format_context->streams[i];
1298     const AVCodecParameters* codec_parameters = stream->codecpar;
1299     const AVMediaType codec_type = codec_parameters->codec_type;
1300     const AVCodecID codec_id = codec_parameters->codec_id;
1301     // Skip streams which are not properly detected.
1302     if (codec_id == AV_CODEC_ID_NONE) {
1303       stream->discard = AVDISCARD_ALL;
1304       continue;
1305     }
1306
1307     if (codec_type == AVMEDIA_TYPE_AUDIO) {
1308       // Log the codec detected, whether it is supported or not, and whether or
1309       // not we have already detected a supported codec in another stream.
1310       const int32_t codec_hash = HashCodecName(GetCodecName(codec_id));
1311       base::UmaHistogramSparse("Media.DetectedAudioCodecHash", codec_hash);
1312       if (is_local_file_) {
1313         base::UmaHistogramSparse("Media.DetectedAudioCodecHash.Local",
1314                                  codec_hash);
1315       }
1316     } else if (codec_type == AVMEDIA_TYPE_VIDEO) {
1317       // Log the codec detected, whether it is supported or not, and whether or
1318       // not we have already detected a supported codec in another stream.
1319       const int32_t codec_hash = HashCodecName(GetCodecName(codec_id));
1320       base::UmaHistogramSparse("Media.DetectedVideoCodecHash", codec_hash);
1321       if (is_local_file_) {
1322         base::UmaHistogramSparse("Media.DetectedVideoCodecHash.Local",
1323                                  codec_hash);
1324       }
1325
1326 #if BUILDFLAG(ENABLE_PLATFORM_HEVC)
1327       if (codec_id == AV_CODEC_ID_HEVC) {
1328         // If ffmpeg is built without HEVC parser/decoder support, it will be
1329         // able to demux HEVC based solely on container-provided information,
1330         // but unable to get some of the parameters without parsing the stream
1331         // (e.g. coded size needs to be read from SPS, pixel format is typically
1332         // deduced from decoder config in hvcC box). These are not really needed
1333         // when using external decoder (e.g. hardware decoder), so override them
1334         // to make sure this translates into a valid VideoDecoderConfig. Coded
1335         // size is overridden in AVStreamToVideoDecoderConfig().
1336         if (stream->codecpar->format == AV_PIX_FMT_NONE)
1337           stream->codecpar->format = AV_PIX_FMT_YUV420P;
1338       }
1339 #endif
1340     } else if (codec_type == AVMEDIA_TYPE_SUBTITLE) {
1341       stream->discard = AVDISCARD_ALL;
1342       continue;
1343     } else {
1344       stream->discard = AVDISCARD_ALL;
1345       continue;
1346     }
1347
1348     // Skip disabled tracks. The mov demuxer translates MOV_TKHD_FLAG_ENABLED to
1349     // AV_DISPOSITION_DEFAULT.
1350     if (container() == container_names::MediaContainerName::kContainerMOV &&
1351         !(stream->disposition & AV_DISPOSITION_DEFAULT)) {
1352       stream->discard = AVDISCARD_ALL;
1353       continue;
1354     }
1355
1356     // Attempt to create a FFmpegDemuxerStream from the AVStream. This will
1357     // return nullptr if the AVStream is invalid. Validity checks will verify
1358     // things like: codec, channel layout, sample/pixel format, etc...
1359     std::unique_ptr<FFmpegDemuxerStream> demuxer_stream =
1360         FFmpegDemuxerStream::Create(this, stream, media_log_);
1361     if (demuxer_stream.get()) {
1362       streams_[i] = std::move(demuxer_stream);
1363     } else {
1364       if (codec_type == AVMEDIA_TYPE_AUDIO) {
1365         MEDIA_LOG(INFO, media_log_)
1366             << GetDisplayName()
1367             << ": skipping invalid or unsupported audio track";
1368       } else if (codec_type == AVMEDIA_TYPE_VIDEO) {
1369         MEDIA_LOG(INFO, media_log_)
1370             << GetDisplayName()
1371             << ": skipping invalid or unsupported video track";
1372       }
1373
1374       // This AVStream does not successfully convert.
1375       continue;
1376     }
1377
1378     StreamParser::TrackId track_id =
1379         static_cast<StreamParser::TrackId>(media_tracks->tracks().size() + 1);
1380     auto track_label =
1381         MediaTrack::Label(streams_[i]->GetMetadata("handler_name"));
1382     auto track_language =
1383         MediaTrack::Language(streams_[i]->GetMetadata("language"));
1384
1385     // Some metadata is named differently in FFmpeg for webm files.
1386     if (glue_->container() ==
1387         container_names::MediaContainerName::kContainerWEBM) {
1388       track_label = MediaTrack::Label(streams_[i]->GetMetadata("title"));
1389     }
1390
1391     if (codec_type == AVMEDIA_TYPE_AUDIO) {
1392       ++supported_audio_track_count;
1393       streams_[i]->SetEnabled(supported_audio_track_count == 1,
1394                               base::TimeDelta());
1395     } else if (codec_type == AVMEDIA_TYPE_VIDEO) {
1396       ++supported_video_track_count;
1397       streams_[i]->SetEnabled(supported_video_track_count == 1,
1398                               base::TimeDelta());
1399     }
1400
1401     // TODO(chcunningham): Remove the IsValidConfig() checks below. If the
1402     // config isn't valid we shouldn't have created a demuxer stream nor
1403     // an entry in |media_tracks|, so the check should always be true.
1404     if ((codec_type == AVMEDIA_TYPE_AUDIO &&
1405          media_tracks->getAudioConfig(track_id).IsValidConfig()) ||
1406         (codec_type == AVMEDIA_TYPE_VIDEO &&
1407          media_tracks->getVideoConfig(track_id).IsValidConfig())) {
1408       MEDIA_LOG(INFO, media_log_)
1409           << GetDisplayName()
1410           << ": skipping duplicate media stream id=" << track_id;
1411       continue;
1412     }
1413
1414     // Note when we find our audio/video stream (we only want one of each) and
1415     // record src= playback UMA stats for the stream's decoder config.
1416     MediaTrack* media_track = nullptr;
1417     if (codec_type == AVMEDIA_TYPE_AUDIO) {
1418       AudioDecoderConfig audio_config = streams_[i]->audio_decoder_config();
1419       RecordAudioCodecStats(audio_config);
1420
1421       media_track = media_tracks->AddAudioTrack(audio_config, track_id,
1422                                                 MediaTrack::Kind("main"),
1423                                                 track_label, track_language);
1424       media_track->set_id(MediaTrack::Id(base::NumberToString(track_id)));
1425       DCHECK(track_id_to_demux_stream_map_.find(media_track->id()) ==
1426              track_id_to_demux_stream_map_.end());
1427       track_id_to_demux_stream_map_[media_track->id()] = streams_[i].get();
1428     } else if (codec_type == AVMEDIA_TYPE_VIDEO) {
1429       VideoDecoderConfig video_config = streams_[i]->video_decoder_config();
1430
1431       RecordVideoCodecStats(glue_->container(), video_config,
1432                             stream->codecpar->color_range, media_log_);
1433
1434       media_track = media_tracks->AddVideoTrack(video_config, track_id,
1435                                                 MediaTrack::Kind("main"),
1436                                                 track_label, track_language);
1437       media_track->set_id(MediaTrack::Id(base::NumberToString(track_id)));
1438       DCHECK(track_id_to_demux_stream_map_.find(media_track->id()) ==
1439              track_id_to_demux_stream_map_.end());
1440       track_id_to_demux_stream_map_[media_track->id()] = streams_[i].get();
1441     }
1442
1443     max_duration = std::max(max_duration, streams_[i]->duration());
1444
1445     base::TimeDelta start_time = ExtractStartTime(stream);
1446
1447     // Note: This value is used for seeking, so we must take the true value and
1448     // not the one possibly clamped to zero below.
1449     if (start_time != kNoTimestamp && start_time < start_time_)
1450       start_time_ = start_time;
1451
1452     const bool is_opus_or_vorbis =
1453         codec_id == AV_CODEC_ID_OPUS || codec_id == AV_CODEC_ID_VORBIS;
1454     if (!has_opus_or_vorbis_audio)
1455       has_opus_or_vorbis_audio = is_opus_or_vorbis;
1456
1457     if (codec_type == AVMEDIA_TYPE_AUDIO && start_time.is_negative() &&
1458         is_opus_or_vorbis) {
1459       needs_negative_timestamp_fixup = true;
1460
1461       // Fixup the seeking information to avoid selecting the audio stream
1462       // simply because it has a lower starting time.
1463       start_time = base::TimeDelta();
1464     }
1465
1466     streams_[i]->set_start_time(start_time);
1467   }
1468
1469   if (media_tracks->tracks().empty()) {
1470     MEDIA_LOG(ERROR, media_log_) << GetDisplayName()
1471                                  << ": no supported streams";
1472     RunInitCB(DEMUXER_ERROR_NO_SUPPORTED_STREAMS);
1473     return;
1474   }
1475
1476   if (format_context->duration != kNoFFmpegTimestamp) {
1477     // If there is a duration value in the container use that to find the
1478     // maximum between it and the duration from A/V streams.
1479     const AVRational av_time_base = {1, AV_TIME_BASE};
1480     max_duration =
1481         std::max(max_duration,
1482                  ConvertFromTimeBase(av_time_base, format_context->duration));
1483   } else {
1484     // The duration is unknown, in which case this is likely a live stream.
1485     max_duration = kInfiniteDuration;
1486   }
1487
1488   // Chained ogg is only allowed on single track audio only opus/vorbis media.
1489   const bool needs_chained_ogg_fixup =
1490       glue_->container() ==
1491           container_names::MediaContainerName::kContainerOgg &&
1492       supported_audio_track_count == 1 && !supported_video_track_count &&
1493       has_opus_or_vorbis_audio;
1494
1495   // FFmpeg represents audio data marked as before the beginning of stream as
1496   // having negative timestamps.  This data must be discarded after it has been
1497   // decoded, not before since it is used to warmup the decoder.  There are
1498   // currently two known cases for this: vorbis in ogg and opus.
1499   //
1500   // For API clarity, it was decided that the rest of the media pipeline should
1501   // not be exposed to negative timestamps.  Which means we need to rebase these
1502   // negative timestamps and mark them for discard post decoding.
1503   //
1504   // Post-decode frame dropping for packets with negative timestamps is outlined
1505   // in section A.2 in the Ogg Vorbis spec:
1506   // http://xiph.org/vorbis/doc/Vorbis_I_spec.html
1507   //
1508   // FFmpeg's use of negative timestamps for opus pre-skip is nonstandard, but
1509   // for more information on pre-skip see section 4.2 of the Ogg Opus spec:
1510   // https://tools.ietf.org/html/draft-ietf-codec-oggopus-08#section-4.2
1511   if (needs_negative_timestamp_fixup || needs_chained_ogg_fixup) {
1512     for (auto& stream : streams_) {
1513       if (!stream)
1514         continue;
1515       if (needs_negative_timestamp_fixup)
1516         stream->enable_negative_timestamp_fixups();
1517       if (needs_chained_ogg_fixup)
1518         stream->enable_chained_ogg_fixups();
1519     }
1520   }
1521
1522   // If no start time could be determined, default to zero.
1523   if (start_time_ == kInfiniteDuration)
1524     start_time_ = base::TimeDelta();
1525
1526   // MPEG-4 B-frames cause grief for a simple container like AVI. Enable PTS
1527   // generation so we always get timestamps, see http://crbug.com/169570
1528   if (glue_->container() ==
1529       container_names::MediaContainerName::kContainerAVI) {
1530     format_context->flags |= AVFMT_FLAG_GENPTS;
1531   }
1532
1533   // FFmpeg will incorrectly adjust the start time of MP3 files into the future
1534   // based on discard samples. We were unable to fix this upstream without
1535   // breaking ffmpeg functionality. https://crbug.com/1062037
1536   if (glue_->container() ==
1537       container_names::MediaContainerName::kContainerMP3) {
1538     start_time_ = base::TimeDelta();
1539   }
1540
1541   // For testing purposes, don't overwrite the timeline offset if set already.
1542   if (timeline_offset_.is_null()) {
1543     timeline_offset_ =
1544         ExtractTimelineOffset(glue_->container(), format_context);
1545   }
1546
1547   // Since we're shifting the externally visible start time to zero, we need to
1548   // adjust the timeline offset to compensate.
1549   if (!timeline_offset_.is_null() && start_time_.is_negative())
1550     timeline_offset_ += start_time_;
1551
1552   if (max_duration == kInfiniteDuration && !timeline_offset_.is_null()) {
1553     SetLiveness(StreamLiveness::kLive);
1554   } else if (max_duration != kInfiniteDuration) {
1555     SetLiveness(StreamLiveness::kRecorded);
1556   } else {
1557     SetLiveness(StreamLiveness::kUnknown);
1558   }
1559
1560   // Good to go: set the duration and bitrate and notify we're done
1561   // initializing.
1562   host_->SetDuration(max_duration);
1563   duration_ = max_duration;
1564   duration_known_ = (max_duration != kInfiniteDuration);
1565
1566   int64_t filesize_in_bytes = 0;
1567   url_protocol_->GetSize(&filesize_in_bytes);
1568   bitrate_ = CalculateBitrate(format_context, max_duration, filesize_in_bytes);
1569   if (bitrate_ > 0)
1570     data_source_->SetBitrate(bitrate_);
1571
1572   LogMetadata(format_context, max_duration);
1573   media_tracks_updated_cb_.Run(std::move(media_tracks));
1574
1575   RunInitCB(PIPELINE_OK);
1576 }
1577
1578 void FFmpegDemuxer::LogMetadata(AVFormatContext* avctx,
1579                                 base::TimeDelta max_duration) {
1580   std::vector<AudioDecoderConfig> audio_tracks;
1581   std::vector<VideoDecoderConfig> video_tracks;
1582
1583   DCHECK_EQ(avctx->nb_streams, streams_.size());
1584
1585   for (auto const& stream : streams_) {
1586     if (!stream)
1587       continue;
1588     if (stream->type() == DemuxerStream::AUDIO) {
1589       audio_tracks.push_back(stream->audio_decoder_config());
1590     } else if (stream->type() == DemuxerStream::VIDEO) {
1591       video_tracks.push_back(stream->video_decoder_config());
1592     }
1593   }
1594   media_log_->SetProperty<MediaLogProperty::kAudioTracks>(audio_tracks);
1595   media_log_->SetProperty<MediaLogProperty::kVideoTracks>(video_tracks);
1596   media_log_->SetProperty<MediaLogProperty::kMaxDuration>(max_duration);
1597   media_log_->SetProperty<MediaLogProperty::kStartTime>(start_time_);
1598   media_log_->SetProperty<MediaLogProperty::kBitrate>(bitrate_);
1599 }
1600
1601 FFmpegDemuxerStream* FFmpegDemuxer::FindStreamWithLowestStartTimestamp(
1602     bool enabled) {
1603   FFmpegDemuxerStream* lowest_start_time_stream = nullptr;
1604   for (const auto& stream : streams_) {
1605     if (!stream || stream->IsEnabled() != enabled)
1606       continue;
1607     if (av_stream_get_first_dts(stream->av_stream()) == kInvalidPTSMarker)
1608       continue;
1609     if (!lowest_start_time_stream ||
1610         stream->start_time() < lowest_start_time_stream->start_time()) {
1611       lowest_start_time_stream = stream.get();
1612     }
1613   }
1614   return lowest_start_time_stream;
1615 }
1616
1617 FFmpegDemuxerStream* FFmpegDemuxer::FindPreferredStreamForSeeking(
1618     base::TimeDelta seek_time) {
1619   // If we have a selected/enabled video stream and its start time is lower
1620   // than the |seek_time| or unknown, then always prefer it for seeking.
1621   for (const auto& stream : streams_) {
1622     if (!stream)
1623       continue;
1624
1625     if (stream->type() != DemuxerStream::VIDEO)
1626       continue;
1627
1628     if (av_stream_get_first_dts(stream->av_stream()) == kInvalidPTSMarker)
1629       continue;
1630
1631     if (!stream->IsEnabled())
1632       continue;
1633
1634     if (stream->start_time() <= seek_time)
1635       return stream.get();
1636   }
1637
1638   // If video stream is not present or |seek_time| is lower than the video start
1639   // time, then try to find an enabled stream with the lowest start time.
1640   FFmpegDemuxerStream* lowest_start_time_enabled_stream =
1641       FindStreamWithLowestStartTimestamp(true);
1642   if (lowest_start_time_enabled_stream &&
1643       lowest_start_time_enabled_stream->start_time() <= seek_time) {
1644     return lowest_start_time_enabled_stream;
1645   }
1646
1647   // If there's no enabled streams to consider from, try a disabled stream with
1648   // the lowest known start time.
1649   FFmpegDemuxerStream* lowest_start_time_disabled_stream =
1650       FindStreamWithLowestStartTimestamp(false);
1651   if (lowest_start_time_disabled_stream &&
1652       lowest_start_time_disabled_stream->start_time() <= seek_time) {
1653     return lowest_start_time_disabled_stream;
1654   }
1655
1656   // Otherwise fall back to any other stream.
1657   for (const auto& stream : streams_) {
1658     if (stream)
1659       return stream.get();
1660   }
1661
1662   NOTREACHED_NORETURN();
1663 }
1664
1665 void FFmpegDemuxer::OnSeekFrameDone(int result) {
1666   DCHECK(task_runner_->RunsTasksInCurrentSequence());
1667   DCHECK(pending_seek_cb_);
1668
1669   if (stopped_) {
1670     MEDIA_LOG(ERROR, media_log_) << GetDisplayName() << ": bad state";
1671     RunPendingSeekCB(PIPELINE_ERROR_ABORT);
1672     return;
1673   }
1674
1675   if (result < 0) {
1676     MEDIA_LOG(ERROR, media_log_) << GetDisplayName() << ": demuxer seek failed";
1677     RunPendingSeekCB(PIPELINE_ERROR_READ);
1678     return;
1679   }
1680
1681   // Tell streams to flush buffers due to seeking.
1682   for (const auto& stream : streams_) {
1683     if (stream)
1684       stream->FlushBuffers(false);
1685   }
1686
1687   // Resume reading until capacity.
1688   ReadFrameIfNeeded();
1689
1690   // Notify we're finished seeking.
1691   RunPendingSeekCB(PIPELINE_OK);
1692 }
1693
1694 void FFmpegDemuxer::FindAndEnableProperTracks(
1695     const std::vector<MediaTrack::Id>& track_ids,
1696     base::TimeDelta curr_time,
1697     DemuxerStream::Type track_type,
1698     TrackChangeCB change_completed_cb) {
1699   DCHECK(task_runner_->RunsTasksInCurrentSequence());
1700
1701   std::set<FFmpegDemuxerStream*> enabled_streams;
1702   for (const auto& id : track_ids) {
1703     auto it = track_id_to_demux_stream_map_.find(id);
1704     if (it == track_id_to_demux_stream_map_.end())
1705       continue;
1706     FFmpegDemuxerStream* stream = it->second;
1707     DCHECK_EQ(track_type, stream->type());
1708     // TODO(servolk): Remove after multiple enabled audio tracks are supported
1709     // by the media::RendererImpl.
1710     if (!enabled_streams.empty()) {
1711       MEDIA_LOG(INFO, media_log_)
1712           << "Only one enabled audio track is supported, ignoring track " << id;
1713       continue;
1714     }
1715     enabled_streams.insert(stream);
1716     stream->SetEnabled(true, curr_time);
1717   }
1718
1719   // First disable all streams that need to be disabled and then enable streams
1720   // that are enabled.
1721   for (const auto& stream : streams_) {
1722     if (stream && stream->type() == track_type &&
1723         enabled_streams.find(stream.get()) == enabled_streams.end()) {
1724       DVLOG(1) << __func__ << ": disabling stream " << stream.get();
1725       stream->SetEnabled(false, curr_time);
1726     }
1727   }
1728
1729   std::vector<DemuxerStream*> streams(enabled_streams.begin(),
1730                                       enabled_streams.end());
1731   std::move(change_completed_cb).Run(track_type, streams);
1732 }
1733
1734 void FFmpegDemuxer::OnEnabledAudioTracksChanged(
1735     const std::vector<MediaTrack::Id>& track_ids,
1736     base::TimeDelta curr_time,
1737     TrackChangeCB change_completed_cb) {
1738   FindAndEnableProperTracks(track_ids, curr_time, DemuxerStream::AUDIO,
1739                             std::move(change_completed_cb));
1740 }
1741
1742 void FFmpegDemuxer::OnVideoSeekedForTrackChange(
1743     DemuxerStream* video_stream,
1744     base::OnceClosure seek_completed_cb,
1745     int result) {
1746   static_cast<FFmpegDemuxerStream*>(video_stream)->FlushBuffers(true);
1747   // TODO(crbug.com/1424380): Report seek failures for track changes too.
1748   std::move(seek_completed_cb).Run();
1749 }
1750
1751 void FFmpegDemuxer::SeekOnVideoTrackChange(
1752     base::TimeDelta seek_to_time,
1753     TrackChangeCB seek_completed_cb,
1754     DemuxerStream::Type stream_type,
1755     const std::vector<DemuxerStream*>& streams) {
1756   DCHECK_EQ(stream_type, DemuxerStream::VIDEO);
1757   if (streams.size() != 1u) {
1758     // If FFmpegDemuxer::FindAndEnableProperTracks() was not able to find the
1759     // selected streams in the ID->DemuxerStream map, then its possible for
1760     // this vector to be empty. If that's the case, we don't want to bother
1761     // with seeking, and just call the callback immediately.
1762     std::move(seek_completed_cb).Run(stream_type, streams);
1763     return;
1764   }
1765   SeekInternal(seek_to_time,
1766                base::BindOnce(&FFmpegDemuxer::OnVideoSeekedForTrackChange,
1767                               weak_factory_.GetWeakPtr(), streams[0],
1768                               base::BindOnce(std::move(seek_completed_cb),
1769                                              DemuxerStream::VIDEO, streams)));
1770 }
1771
1772 void FFmpegDemuxer::OnSelectedVideoTrackChanged(
1773     const std::vector<MediaTrack::Id>& track_ids,
1774     base::TimeDelta curr_time,
1775     TrackChangeCB change_completed_cb) {
1776   // Find tracks -> Seek track -> run callback.
1777   FindAndEnableProperTracks(
1778       track_ids, curr_time, DemuxerStream::VIDEO,
1779       track_ids.empty() ? std::move(change_completed_cb)
1780                         : base::BindOnce(&FFmpegDemuxer::SeekOnVideoTrackChange,
1781                                          weak_factory_.GetWeakPtr(), curr_time,
1782                                          std::move(change_completed_cb)));
1783 }
1784
1785 void FFmpegDemuxer::ReadFrameIfNeeded() {
1786   DCHECK(task_runner_->RunsTasksInCurrentSequence());
1787
1788   // Make sure we have work to do before reading.
1789   if (stopped_ || !StreamsHaveAvailableCapacity() || pending_read_ ||
1790       pending_seek_cb_) {
1791     return;
1792   }
1793
1794   // Allocate and read an AVPacket from the media. Save |packet_ptr| since
1795   // evaluation order of packet.get() and std::move(&packet) is
1796   // undefined.
1797   auto packet = ScopedAVPacket::Allocate();
1798   AVPacket* packet_ptr = packet.get();
1799
1800   pending_read_ = true;
1801   blocking_task_runner_->PostTaskAndReplyWithResult(
1802       FROM_HERE,
1803       base::BindOnce(&ReadFrameAndDiscardEmpty, glue_->format_context(),
1804                      packet_ptr),
1805       base::BindOnce(&FFmpegDemuxer::OnReadFrameDone,
1806                      weak_factory_.GetWeakPtr(), std::move(packet)));
1807 }
1808
1809 void FFmpegDemuxer::OnReadFrameDone(ScopedAVPacket packet, int result) {
1810   DCHECK(task_runner_->RunsTasksInCurrentSequence());
1811   DCHECK(pending_read_);
1812   pending_read_ = false;
1813
1814   if (stopped_ || pending_seek_cb_)
1815     return;
1816
1817   // Consider the stream as ended if:
1818   // - either underlying ffmpeg returned an error
1819   // - or FFMpegDemuxer reached the maximum allowed memory usage.
1820   if (result < 0 || IsMaxMemoryUsageReached()) {
1821     if (result < 0) {
1822       MEDIA_LOG(DEBUG, media_log_)
1823           << GetDisplayName()
1824           << ": av_read_frame(): " << AVErrorToString(result);
1825     } else {
1826       MEDIA_LOG(DEBUG, media_log_)
1827           << GetDisplayName() << ": memory limit exceeded";
1828     }
1829
1830     // Update the duration based on the highest elapsed time across all streams.
1831     base::TimeDelta max_duration;
1832     for (const auto& stream : streams_) {
1833       if (!stream)
1834         continue;
1835
1836       base::TimeDelta duration = stream->duration();
1837       if (duration != kNoTimestamp && duration > max_duration)
1838         max_duration = duration;
1839     }
1840
1841     if (duration_ == kInfiniteDuration || max_duration > duration_) {
1842       host_->SetDuration(max_duration);
1843       duration_known_ = true;
1844       duration_ = max_duration;
1845     }
1846
1847     // If we have reached the end of stream, tell the downstream filters about
1848     // the event.
1849     StreamHasEnded();
1850     return;
1851   }
1852
1853   // Queue the packet with the appropriate stream; we must defend against ffmpeg
1854   // giving us a bad stream index.  See http://crbug.com/698549 for example.
1855   if (packet->stream_index >= 0 &&
1856       static_cast<size_t>(packet->stream_index) < streams_.size()) {
1857     // This is ensured by ReadFrameAndDiscardEmpty.
1858     DCHECK(packet->data);
1859     DCHECK(packet->size);
1860
1861     if (auto& demuxer_stream = streams_[packet->stream_index]) {
1862       if (demuxer_stream->IsEnabled())
1863         demuxer_stream->EnqueuePacket(std::move(packet));
1864
1865       // If duration estimate was incorrect, update it and tell higher layers.
1866       if (duration_known_) {
1867         const base::TimeDelta duration = demuxer_stream->duration();
1868         if (duration != kNoTimestamp && duration > duration_) {
1869           duration_ = duration;
1870           host_->SetDuration(duration_);
1871         }
1872       }
1873     }
1874   }
1875
1876   // Keep reading until we've reached capacity.
1877   ReadFrameIfNeeded();
1878 }
1879
1880 bool FFmpegDemuxer::StreamsHaveAvailableCapacity() {
1881   DCHECK(task_runner_->RunsTasksInCurrentSequence());
1882   for (const auto& stream : streams_) {
1883     if (stream && stream->IsEnabled() && stream->HasAvailableCapacity())
1884       return true;
1885   }
1886   return false;
1887 }
1888
1889 bool FFmpegDemuxer::IsMaxMemoryUsageReached() const {
1890   DCHECK(task_runner_->RunsTasksInCurrentSequence());
1891
1892   size_t memory_left =
1893       GetDemuxerMemoryLimit(Demuxer::DemuxerTypes::kFFmpegDemuxer);
1894   for (const auto& stream : streams_) {
1895     if (!stream)
1896       continue;
1897
1898     size_t stream_memory_usage = stream->MemoryUsage();
1899     if (stream_memory_usage > memory_left)
1900       return true;
1901     memory_left -= stream_memory_usage;
1902   }
1903   return false;
1904 }
1905
1906 void FFmpegDemuxer::StreamHasEnded() {
1907   DCHECK(task_runner_->RunsTasksInCurrentSequence());
1908   for (const auto& stream : streams_) {
1909     if (stream)
1910       stream->SetEndOfStream();
1911   }
1912 }
1913
1914 void FFmpegDemuxer::OnDataSourceError() {
1915   MEDIA_LOG(ERROR, media_log_) << GetDisplayName() << ": data source error";
1916   host_->OnDemuxerError(PIPELINE_ERROR_READ);
1917 }
1918
1919 void FFmpegDemuxer::NotifyDemuxerError(PipelineStatus status) {
1920   MEDIA_LOG(ERROR, media_log_) << GetDisplayName()
1921                                << ": demuxer error: " << status;
1922   host_->OnDemuxerError(status);
1923 }
1924
1925 void FFmpegDemuxer::SetLiveness(StreamLiveness liveness) {
1926   DCHECK(task_runner_->RunsTasksInCurrentSequence());
1927   for (const auto& stream : streams_) {
1928     if (stream)
1929       stream->SetLiveness(liveness);
1930   }
1931 }
1932
1933 void FFmpegDemuxer::RunInitCB(PipelineStatus status) {
1934   DCHECK(task_runner_->RunsTasksInCurrentSequence());
1935   DCHECK(init_cb_);
1936   TRACE_EVENT_ASYNC_END1("media", "FFmpegDemuxer::Initialize", this, "status",
1937                          PipelineStatusToString(status));
1938   std::move(init_cb_).Run(status);
1939 }
1940
1941 void FFmpegDemuxer::RunPendingSeekCB(PipelineStatus status) {
1942   DCHECK(task_runner_->RunsTasksInCurrentSequence());
1943   DCHECK(pending_seek_cb_);
1944   TRACE_EVENT_ASYNC_END1("media", "FFmpegDemuxer::Seek", this, "status",
1945                          PipelineStatusToString(status));
1946   std::move(pending_seek_cb_).Run(status);
1947 }
1948
1949 }  // namespace media