1 // Copyright 2012 The Chromium Authors
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "media/filters/ffmpeg_demuxer.h"
12 #include "base/base64.h"
13 #include "base/feature_list.h"
14 #include "base/functional/bind.h"
15 #include "base/functional/callback_helpers.h"
16 #include "base/memory/ptr_util.h"
17 #include "base/metrics/histogram_functions.h"
18 #include "base/metrics/histogram_macros.h"
19 #include "base/numerics/safe_conversions.h"
20 #include "base/strings/string_number_conversions.h"
21 #include "base/strings/string_util.h"
22 #include "base/sys_byteorder.h"
23 #include "base/task/bind_post_task.h"
24 #include "base/task/sequenced_task_runner.h"
25 #include "base/task/thread_pool.h"
26 #include "base/time/time.h"
27 #include "base/trace_event/trace_event.h"
28 #include "build/build_config.h"
29 #include "media/base/decrypt_config.h"
30 #include "media/base/demuxer.h"
31 #include "media/base/demuxer_memory_limit.h"
32 #include "media/base/limits.h"
33 #include "media/base/media_switches.h"
34 #include "media/base/media_tracks.h"
35 #include "media/base/media_types.h"
36 #include "media/base/sample_rates.h"
37 #include "media/base/supported_types.h"
38 #include "media/base/timestamp_constants.h"
39 #include "media/base/video_codecs.h"
40 #include "media/base/webvtt_util.h"
41 #include "media/ffmpeg/ffmpeg_common.h"
42 #include "media/filters/ffmpeg_aac_bitstream_converter.h"
43 #include "media/filters/ffmpeg_bitstream_converter.h"
44 #include "media/filters/ffmpeg_glue.h"
45 #include "media/filters/ffmpeg_h264_to_annex_b_bitstream_converter.h"
46 #include "media/formats/mpeg/mpeg1_audio_stream_parser.h"
47 #include "media/formats/webm/webm_crypto_helpers.h"
48 #include "media/media_buildflags.h"
49 #include "third_party/ffmpeg/ffmpeg_features.h"
50 #include "third_party/ffmpeg/libavcodec/packet.h"
52 #if BUILDFLAG(ENABLE_PLATFORM_HEVC)
53 #include "media/filters/ffmpeg_h265_to_annex_b_bitstream_converter.h"
60 constexpr int64_t kInvalidPTSMarker = static_cast<int64_t>(0x8000000000000000);
62 void SetAVStreamDiscard(AVStream* stream, AVDiscard discard) {
64 stream->discard = discard;
67 int AVSeekFrame(AVFormatContext* s, int stream_index, int64_t timestamp) {
68 // Seek to a timestamp <= to the desired timestamp.
69 int result = av_seek_frame(s, stream_index, timestamp, AVSEEK_FLAG_BACKWARD);
74 // Seek to the nearest keyframe, wherever that may be.
75 return av_seek_frame(s, stream_index, timestamp, 0);
80 static base::Time ExtractTimelineOffset(
81 container_names::MediaContainerName container,
82 const AVFormatContext* format_context) {
83 if (container == container_names::MediaContainerName::kContainerWEBM) {
84 const AVDictionaryEntry* entry =
85 av_dict_get(format_context->metadata, "creation_time", nullptr, 0);
87 base::Time timeline_offset;
89 // FFmpegDemuxerTests assume base::Time::FromUTCString() is used here.
90 if (entry != nullptr && entry->value != nullptr &&
91 base::Time::FromUTCString(entry->value, &timeline_offset)) {
92 return timeline_offset;
99 static base::TimeDelta FramesToTimeDelta(int frames, double sample_rate) {
100 return base::Microseconds(frames * base::Time::kMicrosecondsPerSecond /
104 static base::TimeDelta ExtractStartTime(AVStream* stream) {
105 // The default start time is zero.
106 base::TimeDelta start_time;
108 // First try to use the |start_time| value as is.
109 if (stream->start_time != kNoFFmpegTimestamp)
110 start_time = ConvertFromTimeBase(stream->time_base, stream->start_time);
112 // Next try to use the first DTS value, for codecs where we know PTS == DTS
113 // (excludes all H26x codecs). The start time must be returned in PTS.
114 if (av_stream_get_first_dts(stream) != kNoFFmpegTimestamp &&
115 stream->codecpar->codec_id != AV_CODEC_ID_HEVC &&
116 stream->codecpar->codec_id != AV_CODEC_ID_H264 &&
117 stream->codecpar->codec_id != AV_CODEC_ID_MPEG4) {
118 const base::TimeDelta first_pts =
119 ConvertFromTimeBase(stream->time_base, av_stream_get_first_dts(stream));
120 if (first_pts < start_time)
121 start_time = first_pts;
127 // Record audio decoder config UMA stats corresponding to a src= playback.
128 static void RecordAudioCodecStats(const AudioDecoderConfig& audio_config) {
129 base::UmaHistogramEnumeration("Media.AudioCodec", audio_config.codec());
132 // Record video decoder config UMA stats corresponding to a src= playback.
133 static void RecordVideoCodecStats(container_names::MediaContainerName container,
134 const VideoDecoderConfig& video_config,
135 AVColorRange color_range,
136 MediaLog* media_log) {
137 // TODO(xhwang): Fix these misleading metric names. They should be something
138 // like "Media.SRC.Xxxx". See http://crbug.com/716183.
139 base::UmaHistogramEnumeration("Media.VideoCodec", video_config.codec());
140 if (container == container_names::MediaContainerName::kContainerMOV) {
141 base::UmaHistogramEnumeration("Media.SRC.VideoCodec.MP4",
142 video_config.codec());
143 } else if (container == container_names::MediaContainerName::kContainerWEBM) {
144 base::UmaHistogramEnumeration("Media.SRC.VideoCodec.WebM",
145 video_config.codec());
149 static const char kCodecNone[] = "none";
151 static const char* GetCodecName(enum AVCodecID id) {
152 const AVCodecDescriptor* codec_descriptor = avcodec_descriptor_get(id);
153 // If the codec name can't be determined, return none for tracking.
154 return codec_descriptor ? codec_descriptor->name : kCodecNone;
157 static base::Value GetTimeValue(base::TimeDelta value) {
158 if (value == kInfiniteDuration)
159 return base::Value("kInfiniteDuration");
160 if (value == kNoTimestamp)
161 return base::Value("kNoTimestamp");
162 return base::Value(value.InSecondsF());
166 struct MediaLogPropertyTypeSupport<MediaLogProperty::kMaxDuration,
168 static base::Value Convert(base::TimeDelta t) { return GetTimeValue(t); }
172 struct MediaLogPropertyTypeSupport<MediaLogProperty::kStartTime,
174 static base::Value Convert(base::TimeDelta t) { return GetTimeValue(t); }
177 static int ReadFrameAndDiscardEmpty(AVFormatContext* context,
179 // Skip empty packets in a tight loop to avoid timing out fuzzers.
183 result = av_read_frame(context, packet);
184 drop_packet = (!packet->data || !packet->size) && result >= 0;
186 av_packet_unref(packet);
187 DLOG(WARNING) << "Dropping empty packet, size: " << packet->size
188 << ", data: " << static_cast<void*>(packet->data);
190 } while (drop_packet);
195 std::unique_ptr<FFmpegDemuxerStream> FFmpegDemuxerStream::Create(
196 FFmpegDemuxer* demuxer,
198 MediaLog* media_log) {
199 if (!demuxer || !stream)
202 std::unique_ptr<FFmpegDemuxerStream> demuxer_stream;
203 std::unique_ptr<AudioDecoderConfig> audio_config;
204 std::unique_ptr<VideoDecoderConfig> video_config;
206 if (stream->codecpar->codec_type == AVMEDIA_TYPE_AUDIO) {
207 audio_config = std::make_unique<AudioDecoderConfig>();
209 // TODO(chcunningham): Change AVStreamToAudioDecoderConfig to check
210 // IsValidConfig internally and return a null scoped_ptr if not valid.
211 if (!AVStreamToAudioDecoderConfig(stream, audio_config.get()) ||
212 !audio_config->IsValidConfig() ||
213 !IsSupportedAudioType(AudioType::FromDecoderConfig(*audio_config))) {
214 MEDIA_LOG(DEBUG, media_log) << "Warning, FFmpegDemuxer failed to create "
215 "a valid/supported audio decoder "
216 "configuration from muxed stream, config:"
217 << audio_config->AsHumanReadableString();
221 MEDIA_LOG(INFO, media_log) << "FFmpegDemuxer: created audio stream, config "
222 << audio_config->AsHumanReadableString();
223 } else if (stream->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) {
224 video_config = std::make_unique<VideoDecoderConfig>();
226 // TODO(chcunningham): Change AVStreamToVideoDecoderConfig to check
227 // IsValidConfig internally and return a null scoped_ptr if not valid.
228 if (!AVStreamToVideoDecoderConfig(stream, video_config.get()) ||
229 !video_config->IsValidConfig() ||
230 !IsSupportedVideoType(VideoType::FromDecoderConfig(*video_config))) {
231 MEDIA_LOG(DEBUG, media_log) << "Warning, FFmpegDemuxer failed to create "
232 "a valid/supported video decoder "
233 "configuration from muxed stream, config:"
234 << video_config->AsHumanReadableString();
238 MEDIA_LOG(INFO, media_log) << "FFmpegDemuxer: created video stream, config "
239 << video_config->AsHumanReadableString();
242 return base::WrapUnique(
243 new FFmpegDemuxerStream(demuxer, stream, std::move(audio_config),
244 std::move(video_config), media_log));
247 static void UnmarkEndOfStreamAndClearError(AVFormatContext* format_context) {
248 format_context->pb->eof_reached = 0;
249 format_context->pb->error = 0;
253 // FFmpegDemuxerStream
255 FFmpegDemuxerStream::FFmpegDemuxerStream(
256 FFmpegDemuxer* demuxer,
258 std::unique_ptr<AudioDecoderConfig> audio_config,
259 std::unique_ptr<VideoDecoderConfig> video_config,
262 task_runner_(base::SequencedTaskRunner::GetCurrentDefault()),
264 start_time_(kNoTimestamp),
265 audio_config_(audio_config.release()),
266 video_config_(video_config.release()),
267 media_log_(media_log),
268 end_of_stream_(false),
269 last_packet_timestamp_(kNoTimestamp),
270 last_packet_duration_(kNoTimestamp),
272 waiting_for_keyframe_(false),
274 fixup_negative_timestamps_(false),
275 fixup_chained_ogg_(false),
276 num_discarded_packet_warnings_(0),
277 last_packet_pos_(AV_NOPTS_VALUE),
278 last_packet_dts_(AV_NOPTS_VALUE) {
281 bool is_encrypted = false;
283 // Determine our media format.
284 switch (stream->codecpar->codec_type) {
285 case AVMEDIA_TYPE_AUDIO:
286 DCHECK(audio_config_.get() && !video_config_.get());
288 is_encrypted = audio_config_->is_encrypted();
290 case AVMEDIA_TYPE_VIDEO:
291 DCHECK(video_config_.get() && !audio_config_.get());
293 is_encrypted = video_config_->is_encrypted();
300 // Calculate the duration.
301 duration_ = ConvertStreamTimestamp(stream->time_base, stream->duration);
304 AVDictionaryEntry* key =
305 av_dict_get(stream->metadata, "enc_key_id", nullptr, 0);
308 if (!key || !key->value)
310 base::StringPiece base64_key_id(key->value);
311 std::string enc_key_id;
312 base::Base64Decode(base64_key_id, &enc_key_id);
313 DCHECK(!enc_key_id.empty());
314 if (enc_key_id.empty())
317 encryption_key_id_.assign(enc_key_id);
318 demuxer_->OnEncryptedMediaInitData(EmeInitDataType::WEBM, enc_key_id);
322 FFmpegDemuxerStream::~FFmpegDemuxerStream() {
325 DCHECK(buffer_queue_.IsEmpty());
328 void FFmpegDemuxerStream::EnqueuePacket(ScopedAVPacket packet) {
329 DCHECK(task_runner_->RunsTasksInCurrentSequence());
330 DCHECK(packet->size);
331 DCHECK(packet->data);
333 const bool is_audio = type() == AUDIO;
335 // dts == pts when dts is not present.
337 packet->dts == AV_NOPTS_VALUE ? packet->pts : packet->dts;
339 // Chained ogg files have non-monotonically increasing position and time stamp
340 // values, which prevents us from using them to determine if a packet should
341 // be dropped. Since chained ogg is only allowed on single track audio only
342 // opus/vorbis media, and dropping packets is only necessary for multi-track
343 // video-and-audio streams, we can just disable dropping when we detect
345 // For similar reasons, we only want to allow packet drops for audio streams;
346 // video frame dropping is handled by the renderer when correcting for a/v
348 if (is_audio && !fixup_chained_ogg_ && last_packet_pos_ != AV_NOPTS_VALUE) {
349 // Some containers have unknown position...
350 if (packet->pos == -1)
351 packet->pos = last_packet_pos_;
353 if (packet->pos < last_packet_pos_) {
354 DVLOG(3) << "Dropped packet with out of order position (" << packet->pos
355 << " < " << last_packet_pos_ << ")";
358 if (last_packet_dts_ != AV_NOPTS_VALUE && packet->pos == last_packet_pos_ &&
359 packet_dts <= last_packet_dts_) {
360 DVLOG(3) << "Dropped packet with out of order display timestamp ("
361 << packet_dts << " < " << last_packet_dts_ << ")";
366 if (!demuxer_ || end_of_stream_) {
367 NOTREACHED() << "Attempted to enqueue packet on a stopped stream";
371 last_packet_pos_ = packet->pos;
372 last_packet_dts_ = packet_dts;
374 if (waiting_for_keyframe_) {
375 if (packet->flags & AV_PKT_FLAG_KEY) {
376 waiting_for_keyframe_ = false;
378 DVLOG(1) << "Dropped non-keyframe pts=" << packet->pts;
383 #if BUILDFLAG(USE_PROPRIETARY_CODECS)
384 // Convert the packet if there is a bitstream filter.
385 if (bitstream_converter_ &&
386 !bitstream_converter_->ConvertPacket(packet.get())) {
387 DVLOG(1) << "Format conversion failed.";
391 scoped_refptr<DecoderBuffer> buffer;
393 size_t side_data_size = 0;
394 uint8_t* side_data = av_packet_get_side_data(
395 packet.get(), AV_PKT_DATA_MATROSKA_BLOCKADDITIONAL, &side_data_size);
397 std::unique_ptr<DecryptConfig> decrypt_config;
399 if ((type() == DemuxerStream::AUDIO && audio_config_->is_encrypted()) ||
400 (type() == DemuxerStream::VIDEO && video_config_->is_encrypted())) {
401 if (!WebMCreateDecryptConfig(
402 packet->data, packet->size,
403 reinterpret_cast<const uint8_t*>(encryption_key_id_.data()),
404 encryption_key_id_.size(), &decrypt_config, &data_offset)) {
405 MEDIA_LOG(ERROR, media_log_) << "Creation of DecryptConfig failed.";
409 // FFmpeg may return garbage packets for MP3 stream containers, so we need
410 // to drop these to avoid decoder errors. The ffmpeg team maintains that
411 // this behavior isn't ideal, but have asked for a significant refactoring
412 // of the AVParser infrastructure to fix this, which is overkill for now.
413 // See http://crbug.com/794782.
415 // This behavior may also occur with ADTS streams, but is rarer in practice
416 // because ffmpeg's ADTS demuxer does more validation on the packets, so
417 // when invalid data is received, av_read_frame() fails and playback ends.
418 if (is_audio && demuxer_->container() ==
419 container_names::MediaContainerName::kContainerMP3) {
420 DCHECK(!data_offset); // Only set for containers supporting encryption...
422 // MP3 packets may be zero-padded according to ffmpeg, so trim until we
423 // have the packet; adjust |data_offset| too so this work isn't repeated.
424 uint8_t* packet_end = packet->data + packet->size;
425 uint8_t* header_start = packet->data;
426 while (header_start < packet_end && !*header_start) {
431 if (packet_end - header_start < MPEG1AudioStreamParser::kHeaderSize ||
432 !MPEG1AudioStreamParser::ParseHeader(nullptr, nullptr, header_start,
434 LIMITED_MEDIA_LOG(INFO, media_log_, num_discarded_packet_warnings_, 5)
435 << "Discarding invalid MP3 packet, ts: "
436 << ConvertStreamTimestamp(stream_->time_base, packet->pts)
438 << ConvertStreamTimestamp(stream_->time_base, packet->duration);
443 // If a packet is returned by FFmpeg's av_parser_parse2() the packet will
444 // reference inner memory of FFmpeg. As such we should transfer the packet
445 // into memory we control.
446 if (side_data_size > 0) {
447 buffer = DecoderBuffer::CopyFrom(packet->data + data_offset,
448 packet->size - data_offset);
449 buffer->WritableSideData().alpha_data.assign(side_data,
450 side_data + side_data_size);
452 buffer = DecoderBuffer::CopyFrom(packet->data + data_offset,
453 packet->size - data_offset);
456 size_t skip_samples_size = 0;
457 const uint32_t* skip_samples_ptr =
458 reinterpret_cast<const uint32_t*>(av_packet_get_side_data(
459 packet.get(), AV_PKT_DATA_SKIP_SAMPLES, &skip_samples_size));
460 const int kSkipSamplesValidSize = 10;
461 const int kSkipEndSamplesOffset = 1;
462 if (skip_samples_size >= kSkipSamplesValidSize) {
463 // Because FFmpeg rolls codec delay and skip samples into one we can only
464 // allow front discard padding on the first buffer. Otherwise the discard
465 // helper can't figure out which data to discard. See AudioDiscardHelper.
466 int discard_front_samples = base::ByteSwapToLE32(*skip_samples_ptr);
467 if (last_packet_timestamp_ != kNoTimestamp && discard_front_samples) {
468 DLOG(ERROR) << "Skip samples are only allowed for the first packet.";
469 discard_front_samples = 0;
472 if (discard_front_samples < 0) {
473 // See https://crbug.com/1189939 and https://trac.ffmpeg.org/ticket/9622
474 DLOG(ERROR) << "Negative skip samples are not allowed.";
475 discard_front_samples = 0;
478 const int discard_end_samples =
479 base::ByteSwapToLE32(*(skip_samples_ptr + kSkipEndSamplesOffset));
481 if (discard_front_samples || discard_end_samples) {
483 const int samples_per_second =
484 audio_decoder_config().samples_per_second();
485 buffer->set_discard_padding(std::make_pair(
486 FramesToTimeDelta(discard_front_samples, samples_per_second),
487 FramesToTimeDelta(discard_end_samples, samples_per_second)));
492 buffer->set_decrypt_config(std::move(decrypt_config));
494 if (packet->duration >= 0) {
495 buffer->set_duration(
496 ConvertStreamTimestamp(stream_->time_base, packet->duration));
498 // TODO(wolenetz): Remove when FFmpeg stops returning negative durations.
499 // https://crbug.com/394418
500 DVLOG(1) << "FFmpeg returned a buffer with a negative duration! "
502 buffer->set_duration(kNoTimestamp);
505 // Note: If pts is kNoFFmpegTimestamp, stream_timestamp will be kNoTimestamp.
506 const base::TimeDelta stream_timestamp =
507 ConvertStreamTimestamp(stream_->time_base, packet->pts);
509 if (stream_timestamp == kNoTimestamp ||
510 stream_timestamp == kInfiniteDuration) {
511 MEDIA_LOG(ERROR, media_log_) << "FFmpegDemuxer: PTS is not defined";
512 demuxer_->NotifyDemuxerError(DEMUXER_ERROR_COULD_NOT_PARSE);
516 // If this file has negative timestamps don't rebase any other stream types
517 // against the negative starting time.
518 base::TimeDelta start_time = demuxer_->start_time();
519 if (!is_audio && start_time.is_negative()) {
520 start_time = base::TimeDelta();
523 // Don't rebase timestamps for positive start times, the HTML Media Spec
524 // details this in section "4.8.10.6 Offsets into the media resource." We
525 // will still need to rebase timestamps before seeking with FFmpeg though.
526 if (start_time.is_positive())
527 start_time = base::TimeDelta();
529 buffer->set_timestamp(stream_timestamp - start_time);
531 // If the packet is marked for complete discard and it doesn't already have
532 // any discard padding set, mark the DecoderBuffer for complete discard. We
533 // don't want to overwrite any existing discard padding since the discard
534 // padding may refer to frames beyond this packet.
535 if (packet->flags & AV_PKT_FLAG_DISCARD &&
536 buffer->discard_padding() == DecoderBuffer::DiscardPadding()) {
537 buffer->set_discard_padding(
538 std::make_pair(kInfiniteDuration, base::TimeDelta()));
542 // Fixup negative timestamps where the before-zero portion is completely
543 // discarded after decoding.
544 if (buffer->timestamp().is_negative()) {
545 // Discard padding may also remove samples after zero.
546 auto fixed_ts = buffer->discard_padding().first + buffer->timestamp();
548 // Allow for rounding error in the discard padding calculations.
549 if (fixed_ts == base::Microseconds(-1)) {
550 fixed_ts = base::TimeDelta();
553 if (fixed_ts >= base::TimeDelta()) {
554 buffer->set_timestamp(fixed_ts);
558 // Only allow negative timestamps past if we know they'll be fixed up by the
559 // code paths below; otherwise they should be treated as a parse error.
560 if ((!fixup_chained_ogg_ || last_packet_timestamp_ == kNoTimestamp) &&
561 buffer->timestamp().is_negative()) {
562 MEDIA_LOG(ERROR, media_log_)
563 << "FFmpegDemuxer: unfixable negative timestamp.";
564 demuxer_->NotifyDemuxerError(DEMUXER_ERROR_COULD_NOT_PARSE);
568 // If enabled, and no codec delay is present, mark audio packets with
569 // negative timestamps for post-decode discard. If codec delay is present,
570 // discard is handled by the decoder using that value.
571 if (fixup_negative_timestamps_ && stream_timestamp.is_negative() &&
572 buffer->duration() != kNoTimestamp &&
573 !audio_decoder_config().codec_delay()) {
574 if ((stream_timestamp + buffer->duration()).is_negative()) {
575 DCHECK_EQ(buffer->discard_padding().second, base::TimeDelta());
577 // Discard the entire packet if it's entirely before zero, but don't
578 // override the discard padding if it refers to frames beyond this
580 if (buffer->discard_padding().first <= buffer->duration()) {
581 buffer->set_discard_padding(
582 std::make_pair(kInfiniteDuration, base::TimeDelta()));
585 // Only discard part of the frame if it overlaps zero.
586 buffer->set_discard_padding(std::make_pair(
587 std::max(-stream_timestamp, buffer->discard_padding().first),
588 buffer->discard_padding().second));
593 if (last_packet_timestamp_ != kNoTimestamp) {
594 // FFmpeg doesn't support chained ogg correctly. Instead of guaranteeing
595 // continuity across links in the chain it uses the timestamp information
596 // from each link directly. Doing so can lead to timestamps which appear to
597 // go backwards in time.
599 // If the new link starts with a negative timestamp or a timestamp less than
600 // the original (positive) |start_time|, we will get a negative timestamp
603 // Fixing chained ogg is non-trivial, so for now just reuse the last good
604 // timestamp. The decoder will rewrite the timestamps to be sample accurate
605 // later. See http://crbug.com/396864.
607 // Note: This will not work with codecs that have out of order frames like
608 // H.264 with b-frames, but luckily you can't put those in ogg files...
609 if (fixup_chained_ogg_ && buffer->timestamp() < last_packet_timestamp_) {
610 buffer->set_timestamp(last_packet_timestamp_ +
611 (last_packet_duration_ != kNoTimestamp
612 ? last_packet_duration_
613 : base::Microseconds(1)));
617 // The demuxer should always output positive timestamps.
618 DCHECK_GE(buffer->timestamp(), base::TimeDelta());
621 if (last_packet_timestamp_ < buffer->timestamp()) {
622 buffered_ranges_.Add(last_packet_timestamp_, buffer->timestamp());
623 demuxer_->NotifyBufferingChanged();
627 if (packet->flags & AV_PKT_FLAG_KEY)
628 buffer->set_is_key_frame(true);
630 // One last sanity check on the packet timestamps in case any of the above
631 // calculations have pushed the values to the limits.
632 if (buffer->timestamp() == kNoTimestamp ||
633 buffer->timestamp() == kInfiniteDuration) {
634 MEDIA_LOG(ERROR, media_log_) << "FFmpegDemuxer: PTS is not defined";
635 demuxer_->NotifyDemuxerError(DEMUXER_ERROR_COULD_NOT_PARSE);
639 last_packet_timestamp_ = buffer->timestamp();
640 last_packet_duration_ = buffer->duration();
642 const base::TimeDelta new_duration = last_packet_timestamp_;
643 if (new_duration > duration_ || duration_ == kNoTimestamp)
644 duration_ = new_duration;
646 buffer_queue_.Push(std::move(buffer));
647 SatisfyPendingRead();
650 void FFmpegDemuxerStream::SetEndOfStream() {
651 DCHECK(task_runner_->RunsTasksInCurrentSequence());
652 end_of_stream_ = true;
653 SatisfyPendingRead();
656 void FFmpegDemuxerStream::FlushBuffers(bool preserve_packet_position) {
657 DCHECK(task_runner_->RunsTasksInCurrentSequence());
658 DCHECK(preserve_packet_position || !read_cb_)
659 << "There should be no pending read";
661 // H264 and AAC require that we resend the header after flush.
662 // Reset bitstream for converter to do so.
663 // This is related to chromium issue 140371 (http://crbug.com/140371).
664 ResetBitstreamConverter();
666 if (!preserve_packet_position) {
667 last_packet_pos_ = AV_NOPTS_VALUE;
668 last_packet_dts_ = AV_NOPTS_VALUE;
671 buffer_queue_.Clear();
672 end_of_stream_ = false;
673 last_packet_timestamp_ = kNoTimestamp;
674 last_packet_duration_ = kNoTimestamp;
678 void FFmpegDemuxerStream::Abort() {
681 std::move(read_cb_).Run(DemuxerStream::kAborted, {});
684 void FFmpegDemuxerStream::Stop() {
685 DCHECK(task_runner_->RunsTasksInCurrentSequence());
686 buffer_queue_.Clear();
689 end_of_stream_ = true;
691 std::move(read_cb_).Run(DemuxerStream::kOk,
692 {DecoderBuffer::CreateEOSBuffer()});
696 DemuxerStream::Type FFmpegDemuxerStream::type() const {
697 DCHECK(task_runner_->RunsTasksInCurrentSequence());
701 StreamLiveness FFmpegDemuxerStream::liveness() const {
702 DCHECK(task_runner_->RunsTasksInCurrentSequence());
706 void FFmpegDemuxerStream::Read(uint32_t count, ReadCB read_cb) {
707 DCHECK(task_runner_->RunsTasksInCurrentSequence());
708 CHECK(!read_cb_) << "Overlapping reads are not supported";
709 read_cb_ = base::BindPostTaskToCurrentDefault(std::move(read_cb));
710 requested_buffer_count_ = static_cast<size_t>(count);
712 << " requested_buffer_count_ = " << requested_buffer_count_;
713 // Don't accept any additional reads if we've been told to stop.
714 // The |demuxer_| may have been destroyed in the pipeline thread.
716 // TODO(scherkus): it would be cleaner to reply with an error message.
718 std::move(read_cb_).Run(DemuxerStream::kOk,
719 {DecoderBuffer::CreateEOSBuffer()});
724 DVLOG(1) << "Read from disabled stream, returning EOS";
725 std::move(read_cb_).Run(kOk, {DecoderBuffer::CreateEOSBuffer()});
730 std::move(read_cb_).Run(kAborted, {});
734 SatisfyPendingRead();
737 void FFmpegDemuxerStream::EnableBitstreamConverter() {
738 DCHECK(task_runner_->RunsTasksInCurrentSequence());
740 #if BUILDFLAG(USE_PROPRIETARY_CODECS)
741 InitBitstreamConverter();
743 DLOG(ERROR) << "Proprietary codecs not enabled and stream requires bitstream "
744 "conversion. Playback will likely fail.";
748 void FFmpegDemuxerStream::ResetBitstreamConverter() {
749 #if BUILDFLAG(USE_PROPRIETARY_CODECS)
750 if (bitstream_converter_)
751 InitBitstreamConverter();
752 #endif // BUILDFLAG(USE_PROPRIETARY_CODECS)
755 void FFmpegDemuxerStream::InitBitstreamConverter() {
756 #if BUILDFLAG(USE_PROPRIETARY_CODECS)
757 switch (stream_->codecpar->codec_id) {
758 case AV_CODEC_ID_H264:
759 // Clear |extra_data| so that future (fallback) decoders will know that
760 // conversion is forcibly enabled on this stream.
762 // TODO(sandersd): Ideally we would convert |extra_data| to concatenated
763 // SPS/PPS data, but it's too late to be useful because Initialize() was
764 // already called on GpuVideoDecoder, which is the only path that would
765 // consume that data.
767 video_config_->SetExtraData(std::vector<uint8_t>());
768 bitstream_converter_ =
769 std::make_unique<FFmpegH264ToAnnexBBitstreamConverter>(
772 #if BUILDFLAG(ENABLE_PLATFORM_HEVC)
773 case AV_CODEC_ID_HEVC:
774 bitstream_converter_ =
775 std::make_unique<FFmpegH265ToAnnexBBitstreamConverter>(
779 case AV_CODEC_ID_AAC:
780 // FFmpeg doesn't understand xHE-AAC profiles yet, which can't be put in
781 // ADTS anyways, so skip bitstream conversion when the profile is
783 if (audio_config_->profile() != AudioCodecProfile::kXHE_AAC) {
784 bitstream_converter_ =
785 std::make_unique<FFmpegAACBitstreamConverter>(stream_->codecpar);
791 #endif // BUILDFLAG(USE_PROPRIETARY_CODECS)
794 bool FFmpegDemuxerStream::SupportsConfigChanges() { return false; }
796 AudioDecoderConfig FFmpegDemuxerStream::audio_decoder_config() {
797 DCHECK(task_runner_->RunsTasksInCurrentSequence());
798 DCHECK_EQ(type_, AUDIO);
799 DCHECK(audio_config_.get());
800 return *audio_config_;
803 VideoDecoderConfig FFmpegDemuxerStream::video_decoder_config() {
804 DCHECK(task_runner_->RunsTasksInCurrentSequence());
805 DCHECK_EQ(type_, VIDEO);
806 DCHECK(video_config_.get());
807 return *video_config_;
810 bool FFmpegDemuxerStream::IsEnabled() const {
811 DCHECK(task_runner_->RunsTasksInCurrentSequence());
815 void FFmpegDemuxerStream::SetEnabled(bool enabled, base::TimeDelta timestamp) {
816 DCHECK(task_runner_->RunsTasksInCurrentSequence());
818 DCHECK(demuxer_->ffmpeg_task_runner());
819 if (enabled == is_enabled_)
822 is_enabled_ = enabled;
823 demuxer_->ffmpeg_task_runner()->PostTask(
824 FROM_HERE, base::BindOnce(&SetAVStreamDiscard, av_stream(),
825 enabled ? AVDISCARD_DEFAULT : AVDISCARD_ALL));
827 waiting_for_keyframe_ = true;
829 if (!is_enabled_ && read_cb_) {
830 DVLOG(1) << "Read from disabled stream, returning EOS";
831 std::move(read_cb_).Run(kOk, {DecoderBuffer::CreateEOSBuffer()});
835 void FFmpegDemuxerStream::SetLiveness(StreamLiveness liveness) {
836 DCHECK(task_runner_->RunsTasksInCurrentSequence());
837 DCHECK_EQ(liveness_, StreamLiveness::kUnknown);
838 liveness_ = liveness;
841 Ranges<base::TimeDelta> FFmpegDemuxerStream::GetBufferedRanges() const {
842 return buffered_ranges_;
845 void FFmpegDemuxerStream::SatisfyPendingRead() {
846 DCHECK(task_runner_->RunsTasksInCurrentSequence());
848 if (!buffer_queue_.IsEmpty()) {
849 DemuxerStream::DecoderBufferVector output_buffers;
852 i < std::min(requested_buffer_count_, buffer_queue_.queue_size());
854 output_buffers.emplace_back(buffer_queue_.Pop());
856 DVLOG(3) << __func__ << " Status:kOk, return output_buffers.size = "
857 << output_buffers.size();
858 std::move(read_cb_).Run(DemuxerStream::kOk, std::move(output_buffers));
859 } else if (end_of_stream_) {
860 std::move(read_cb_).Run(DemuxerStream::kOk,
861 {DecoderBuffer::CreateEOSBuffer()});
864 // Have capacity? Ask for more!
865 if (HasAvailableCapacity() && !end_of_stream_) {
866 demuxer_->NotifyCapacityAvailable();
870 bool FFmpegDemuxerStream::HasAvailableCapacity() {
871 // Try to have two second's worth of encoded data per stream.
872 const base::TimeDelta kCapacity = base::Seconds(2);
873 return buffer_queue_.IsEmpty() || buffer_queue_.Duration() < kCapacity;
876 size_t FFmpegDemuxerStream::MemoryUsage() const {
877 return buffer_queue_.data_size();
880 std::string FFmpegDemuxerStream::GetMetadata(const char* key) const {
881 const AVDictionaryEntry* entry =
882 av_dict_get(stream_->metadata, key, nullptr, 0);
883 return (entry == nullptr || entry->value == nullptr) ? "" : entry->value;
887 base::TimeDelta FFmpegDemuxerStream::ConvertStreamTimestamp(
888 const AVRational& time_base,
890 if (timestamp == kNoFFmpegTimestamp)
893 return ConvertFromTimeBase(time_base, timestamp);
899 FFmpegDemuxer::FFmpegDemuxer(
900 const scoped_refptr<base::SequencedTaskRunner>& task_runner,
901 DataSource* data_source,
902 const EncryptedMediaInitDataCB& encrypted_media_init_data_cb,
903 MediaTracksUpdatedCB media_tracks_updated_cb,
906 : task_runner_(task_runner),
907 // FFmpeg has no asynchronous API, so we use base::WaitableEvents inside
908 // the BlockingUrlProtocol to handle hops to the render thread for network
910 blocking_task_runner_(base::ThreadPool::CreateSequencedTaskRunner(
911 {base::MayBlock(), base::TaskPriority::USER_BLOCKING})),
912 data_source_(data_source),
913 media_log_(media_log),
914 encrypted_media_init_data_cb_(encrypted_media_init_data_cb),
915 media_tracks_updated_cb_(std::move(media_tracks_updated_cb)),
916 is_local_file_(is_local_file) {
917 DCHECK(task_runner_.get());
918 DCHECK(data_source_);
919 DCHECK(media_tracks_updated_cb_);
922 FFmpegDemuxer::~FFmpegDemuxer() {
924 DCHECK(!pending_seek_cb_);
926 // NOTE: This class is not destroyed on |task_runner|, so we must ensure that
927 // there are no outstanding WeakPtrs by the time we reach here.
928 DCHECK(!weak_factory_.HasWeakPtrs());
930 // There may be outstanding tasks in the blocking pool which are trying to use
931 // these members, so release them in sequence with any outstanding calls. The
932 // earlier call to Abort() on |data_source_| prevents further access to it.
933 blocking_task_runner_->DeleteSoon(FROM_HERE, url_protocol_.release());
934 blocking_task_runner_->DeleteSoon(FROM_HERE, glue_.release());
937 std::string FFmpegDemuxer::GetDisplayName() const {
938 return "FFmpegDemuxer";
941 DemuxerType FFmpegDemuxer::GetDemuxerType() const {
942 return DemuxerType::kFFmpegDemuxer;
945 void FFmpegDemuxer::Initialize(DemuxerHost* host,
946 PipelineStatusCallback init_cb) {
947 DCHECK(task_runner_->RunsTasksInCurrentSequence());
949 weak_this_ = cancel_pending_seek_factory_.GetWeakPtr();
950 init_cb_ = std::move(init_cb);
952 // Give a WeakPtr to BlockingUrlProtocol since we'll need to release it on the
953 // blocking thread pool.
954 url_protocol_ = std::make_unique<BlockingUrlProtocol>(
955 data_source_, base::BindPostTaskToCurrentDefault(base::BindRepeating(
956 &FFmpegDemuxer::OnDataSourceError, weak_this_)));
957 glue_ = std::make_unique<FFmpegGlue>(url_protocol_.get());
958 AVFormatContext* format_context = glue_->format_context();
960 // Disable ID3v1 tag reading to avoid costly seeks to end of file for data we
961 // don't use. FFmpeg will only read ID3v1 tags if no other metadata is
962 // available, so add a metadata entry to ensure some is always present.
963 av_dict_set(&format_context->metadata, "skip_id3v1_tags", "", 0);
965 // Ensure ffmpeg doesn't give up too early while looking for stream params;
966 // this does not increase the amount of data downloaded. The default value
967 // is 5 AV_TIME_BASE units (1 second each), which prevents some oddly muxed
968 // streams from being detected properly; this value was chosen arbitrarily.
969 format_context->max_analyze_duration = 60 * AV_TIME_BASE;
971 // Open the AVFormatContext using our glue layer.
972 blocking_task_runner_->PostTaskAndReplyWithResult(
974 base::BindOnce(&FFmpegGlue::OpenContext, base::Unretained(glue_.get()),
976 base::BindOnce(&FFmpegDemuxer::OnOpenContextDone,
977 weak_factory_.GetWeakPtr()));
980 void FFmpegDemuxer::AbortPendingReads() {
981 DCHECK(task_runner_->RunsTasksInCurrentSequence());
983 // If Stop() has been called, then drop this call.
987 // This should only be called after the demuxer has been initialized.
988 DCHECK_GT(streams_.size(), 0u);
990 // Abort all outstanding reads.
991 for (const auto& stream : streams_) {
996 // It's important to invalidate read/seek completion callbacks to avoid any
997 // errors that occur because of the data source abort.
998 weak_factory_.InvalidateWeakPtrs();
999 data_source_->Abort();
1001 // Aborting the read may cause EOF to be marked, undo this.
1002 blocking_task_runner_->PostTask(
1004 base::BindOnce(&UnmarkEndOfStreamAndClearError, glue_->format_context()));
1005 pending_read_ = false;
1007 // TODO(dalecurtis): We probably should report PIPELINE_ERROR_ABORT here
1008 // instead to avoid any preroll work that may be started upon return, but
1009 // currently the PipelineImpl does not know how to handle this.
1010 if (pending_seek_cb_)
1011 RunPendingSeekCB(PIPELINE_OK);
1014 void FFmpegDemuxer::Stop() {
1015 DCHECK(task_runner_->RunsTasksInCurrentSequence());
1018 RunInitCB(PIPELINE_ERROR_ABORT);
1019 if (pending_seek_cb_)
1020 RunPendingSeekCB(PIPELINE_ERROR_ABORT);
1022 // The order of Stop() and Abort() is important here. If Abort() is called
1023 // first, control may pass into FFmpeg where it can destruct buffers that are
1024 // in the process of being fulfilled by the DataSource.
1025 data_source_->Stop();
1026 url_protocol_->Abort();
1028 for (const auto& stream : streams_) {
1033 data_source_ = nullptr;
1035 // Invalidate WeakPtrs on |task_runner_|, destruction may happen on another
1036 // thread. We don't need to wait for any outstanding tasks since they will all
1037 // fail to return after invalidating WeakPtrs.
1039 weak_factory_.InvalidateWeakPtrs();
1040 cancel_pending_seek_factory_.InvalidateWeakPtrs();
1043 void FFmpegDemuxer::StartWaitingForSeek(base::TimeDelta seek_time) {}
1045 void FFmpegDemuxer::CancelPendingSeek(base::TimeDelta seek_time) {
1046 if (task_runner_->RunsTasksInCurrentSequence()) {
1047 AbortPendingReads();
1049 // Don't use GetWeakPtr() here since we are on the wrong thread.
1050 task_runner_->PostTask(
1052 base::BindOnce(&FFmpegDemuxer::AbortPendingReads, weak_this_));
1056 void FFmpegDemuxer::Seek(base::TimeDelta time, PipelineStatusCallback cb) {
1057 DCHECK(task_runner_->RunsTasksInCurrentSequence());
1058 DCHECK(!pending_seek_cb_);
1059 TRACE_EVENT_ASYNC_BEGIN0("media", "FFmpegDemuxer::Seek", this);
1060 pending_seek_cb_ = std::move(cb);
1061 SeekInternal(time, base::BindOnce(&FFmpegDemuxer::OnSeekFrameDone,
1062 weak_factory_.GetWeakPtr()));
1065 bool FFmpegDemuxer::IsSeekable() const {
1069 void FFmpegDemuxer::SeekInternal(base::TimeDelta time,
1070 base::OnceCallback<void(int)> seek_cb) {
1071 DCHECK(task_runner_->RunsTasksInCurrentSequence());
1073 // FFmpeg requires seeks to be adjusted according to the lowest starting time.
1074 // Since EnqueuePacket() rebased negative timestamps by the start time, we
1075 // must correct the shift here.
1077 // Additionally, to workaround limitations in how we expose seekable ranges to
1078 // Blink (http://crbug.com/137275), we also want to clamp seeks before the
1079 // start time to the start time.
1080 base::TimeDelta seek_time;
1081 if (start_time_.is_negative()) {
1082 seek_time = time + start_time_;
1084 seek_time = std::max(start_time_, time);
1087 // When seeking in an opus stream we need to ensure we deliver enough data to
1088 // satisfy the seek preroll; otherwise the audio at the actual seek time will
1089 // not be entirely accurate.
1090 FFmpegDemuxerStream* audio_stream =
1091 GetFirstEnabledFFmpegStream(DemuxerStream::AUDIO);
1093 const AudioDecoderConfig& config = audio_stream->audio_decoder_config();
1094 if (config.codec() == AudioCodec::kOpus)
1095 seek_time = std::max(start_time_, seek_time - config.seek_preroll());
1098 // Choose the seeking stream based on whether it contains the seek time, if
1099 // no match can be found prefer the preferred stream.
1101 // TODO(dalecurtis): Currently FFmpeg does not ensure that all streams in a
1102 // given container will demux all packets after the seek point. Instead it
1103 // only guarantees that all packets after the file position of the seek will
1104 // be demuxed. It's an open question whether FFmpeg should fix this:
1105 // http://lists.ffmpeg.org/pipermail/ffmpeg-devel/2014-June/159212.html
1106 // Tracked by http://crbug.com/387996.
1107 FFmpegDemuxerStream* demux_stream = FindPreferredStreamForSeeking(seek_time);
1108 DCHECK(demux_stream);
1109 const AVStream* seeking_stream = demux_stream->av_stream();
1110 DCHECK(seeking_stream);
1112 blocking_task_runner_->PostTaskAndReplyWithResult(
1114 base::BindOnce(&AVSeekFrame, glue_->format_context(),
1115 seeking_stream->index,
1116 ConvertToTimeBase(seeking_stream->time_base, seek_time)),
1117 std::move(seek_cb));
1120 base::Time FFmpegDemuxer::GetTimelineOffset() const {
1121 return timeline_offset_;
1124 std::vector<DemuxerStream*> FFmpegDemuxer::GetAllStreams() {
1125 DCHECK(task_runner_->RunsTasksInCurrentSequence());
1126 std::vector<DemuxerStream*> result;
1127 // Put enabled streams at the beginning of the list so that
1128 // MediaResource::GetFirstStream returns the enabled stream if there is one.
1129 // TODO(servolk): Revisit this after media track switching is supported.
1130 for (const auto& stream : streams_) {
1131 if (stream && stream->IsEnabled())
1132 result.push_back(stream.get());
1134 // And include disabled streams at the end of the list.
1135 for (const auto& stream : streams_) {
1136 if (stream && !stream->IsEnabled())
1137 result.push_back(stream.get());
1142 FFmpegDemuxerStream* FFmpegDemuxer::GetFirstEnabledFFmpegStream(
1143 DemuxerStream::Type type) const {
1144 for (const auto& stream : streams_) {
1145 if (stream && stream->type() == type && stream->IsEnabled()) {
1146 return stream.get();
1152 base::TimeDelta FFmpegDemuxer::GetStartTime() const {
1153 return std::max(start_time_, base::TimeDelta());
1156 int64_t FFmpegDemuxer::GetMemoryUsage() const {
1157 int64_t allocation_size = 0;
1158 for (const auto& stream : streams_) {
1160 allocation_size += stream->MemoryUsage();
1162 return allocation_size;
1165 absl::optional<container_names::MediaContainerName>
1166 FFmpegDemuxer::GetContainerForMetrics() const {
1170 void FFmpegDemuxer::OnEncryptedMediaInitData(
1171 EmeInitDataType init_data_type,
1172 const std::string& encryption_key_id) {
1173 std::vector<uint8_t> key_id_local(encryption_key_id.begin(),
1174 encryption_key_id.end());
1175 encrypted_media_init_data_cb_.Run(init_data_type, key_id_local);
1178 void FFmpegDemuxer::NotifyCapacityAvailable() {
1179 DCHECK(task_runner_->RunsTasksInCurrentSequence());
1180 ReadFrameIfNeeded();
1183 void FFmpegDemuxer::NotifyBufferingChanged() {
1184 DCHECK(task_runner_->RunsTasksInCurrentSequence());
1185 Ranges<base::TimeDelta> buffered;
1186 bool initialized_buffered_ranges = false;
1187 for (const auto& stream : streams_) {
1190 if (initialized_buffered_ranges) {
1191 buffered = buffered.IntersectionWith(stream->GetBufferedRanges());
1193 buffered = stream->GetBufferedRanges();
1194 initialized_buffered_ranges = true;
1197 host_->OnBufferedTimeRangesChanged(buffered);
1200 // Helper for calculating the bitrate of the media based on information stored
1201 // in |format_context| or failing that the size and duration of the media.
1203 // Returns 0 if a bitrate could not be determined.
1204 static int CalculateBitrate(AVFormatContext* format_context,
1205 const base::TimeDelta& duration,
1206 int64_t filesize_in_bytes) {
1207 // If there is a bitrate set on the container, use it.
1208 if (format_context->bit_rate > 0)
1209 return format_context->bit_rate;
1211 // Then try to sum the bitrates individually per stream.
1213 for (size_t i = 0; i < format_context->nb_streams; ++i) {
1214 AVCodecParameters* codec_parameters = format_context->streams[i]->codecpar;
1215 bitrate += codec_parameters->bit_rate;
1220 // See if we can approximate the bitrate as long as we have a filesize and
1222 if (duration <= base::TimeDelta() || duration == kInfiniteDuration ||
1226 // Don't multiply by 8 first; it will overflow if (filesize_in_bytes >= 2^60).
1227 return base::ClampRound(filesize_in_bytes * duration.ToHz() * 8);
1230 void FFmpegDemuxer::OnOpenContextDone(bool result) {
1231 DCHECK(task_runner_->RunsTasksInCurrentSequence());
1233 MEDIA_LOG(ERROR, media_log_) << GetDisplayName() << ": bad state";
1234 RunInitCB(PIPELINE_ERROR_ABORT);
1238 #if BUILDFLAG(IS_ANDROID) || BUILDFLAG(ENABLE_HLS_DEMUXER) || defined(TIZEN_MULTIMEDIA)
1239 if (glue_->detected_hls()) {
1240 MEDIA_LOG(INFO, media_log_)
1241 << GetDisplayName() << ": detected HLS manifest";
1242 RunInitCB(DEMUXER_ERROR_DETECTED_HLS);
1248 MEDIA_LOG(ERROR, media_log_) << GetDisplayName() << ": open context failed";
1249 RunInitCB(DEMUXER_ERROR_COULD_NOT_OPEN);
1253 // Fully initialize AVFormatContext by parsing the stream a little.
1254 blocking_task_runner_->PostTaskAndReplyWithResult(
1256 base::BindOnce(&avformat_find_stream_info, glue_->format_context(),
1257 static_cast<AVDictionary**>(nullptr)),
1258 base::BindOnce(&FFmpegDemuxer::OnFindStreamInfoDone,
1259 weak_factory_.GetWeakPtr()));
1262 void FFmpegDemuxer::OnFindStreamInfoDone(int result) {
1263 DCHECK(task_runner_->RunsTasksInCurrentSequence());
1264 if (stopped_ || !data_source_) {
1265 MEDIA_LOG(ERROR, media_log_) << GetDisplayName() << ": bad state";
1266 RunInitCB(PIPELINE_ERROR_ABORT);
1271 MEDIA_LOG(ERROR, media_log_) << GetDisplayName()
1272 << ": find stream info failed";
1273 RunInitCB(DEMUXER_ERROR_COULD_NOT_PARSE);
1277 // Create demuxer stream entries for each possible AVStream. Each stream
1278 // is examined to determine if it is supported or not (is the codec enabled
1279 // for it in this release?). Unsupported streams are skipped, allowing for
1280 // partial playback. At least one audio or video stream must be playable.
1281 AVFormatContext* format_context = glue_->format_context();
1282 streams_.resize(format_context->nb_streams);
1284 std::unique_ptr<MediaTracks> media_tracks(new MediaTracks());
1286 DCHECK(track_id_to_demux_stream_map_.empty());
1288 // If available, |start_time_| will be set to the lowest stream start time.
1289 start_time_ = kInfiniteDuration;
1291 base::TimeDelta max_duration;
1292 int supported_audio_track_count = 0;
1293 int supported_video_track_count = 0;
1294 bool has_opus_or_vorbis_audio = false;
1295 bool needs_negative_timestamp_fixup = false;
1296 for (size_t i = 0; i < format_context->nb_streams; ++i) {
1297 AVStream* stream = format_context->streams[i];
1298 const AVCodecParameters* codec_parameters = stream->codecpar;
1299 const AVMediaType codec_type = codec_parameters->codec_type;
1300 const AVCodecID codec_id = codec_parameters->codec_id;
1301 // Skip streams which are not properly detected.
1302 if (codec_id == AV_CODEC_ID_NONE) {
1303 stream->discard = AVDISCARD_ALL;
1307 if (codec_type == AVMEDIA_TYPE_AUDIO) {
1308 // Log the codec detected, whether it is supported or not, and whether or
1309 // not we have already detected a supported codec in another stream.
1310 const int32_t codec_hash = HashCodecName(GetCodecName(codec_id));
1311 base::UmaHistogramSparse("Media.DetectedAudioCodecHash", codec_hash);
1312 if (is_local_file_) {
1313 base::UmaHistogramSparse("Media.DetectedAudioCodecHash.Local",
1316 } else if (codec_type == AVMEDIA_TYPE_VIDEO) {
1317 // Log the codec detected, whether it is supported or not, and whether or
1318 // not we have already detected a supported codec in another stream.
1319 const int32_t codec_hash = HashCodecName(GetCodecName(codec_id));
1320 base::UmaHistogramSparse("Media.DetectedVideoCodecHash", codec_hash);
1321 if (is_local_file_) {
1322 base::UmaHistogramSparse("Media.DetectedVideoCodecHash.Local",
1326 #if BUILDFLAG(ENABLE_PLATFORM_HEVC)
1327 if (codec_id == AV_CODEC_ID_HEVC) {
1328 // If ffmpeg is built without HEVC parser/decoder support, it will be
1329 // able to demux HEVC based solely on container-provided information,
1330 // but unable to get some of the parameters without parsing the stream
1331 // (e.g. coded size needs to be read from SPS, pixel format is typically
1332 // deduced from decoder config in hvcC box). These are not really needed
1333 // when using external decoder (e.g. hardware decoder), so override them
1334 // to make sure this translates into a valid VideoDecoderConfig. Coded
1335 // size is overridden in AVStreamToVideoDecoderConfig().
1336 if (stream->codecpar->format == AV_PIX_FMT_NONE)
1337 stream->codecpar->format = AV_PIX_FMT_YUV420P;
1340 } else if (codec_type == AVMEDIA_TYPE_SUBTITLE) {
1341 stream->discard = AVDISCARD_ALL;
1344 stream->discard = AVDISCARD_ALL;
1348 // Skip disabled tracks. The mov demuxer translates MOV_TKHD_FLAG_ENABLED to
1349 // AV_DISPOSITION_DEFAULT.
1350 if (container() == container_names::MediaContainerName::kContainerMOV &&
1351 !(stream->disposition & AV_DISPOSITION_DEFAULT)) {
1352 stream->discard = AVDISCARD_ALL;
1356 // Attempt to create a FFmpegDemuxerStream from the AVStream. This will
1357 // return nullptr if the AVStream is invalid. Validity checks will verify
1358 // things like: codec, channel layout, sample/pixel format, etc...
1359 std::unique_ptr<FFmpegDemuxerStream> demuxer_stream =
1360 FFmpegDemuxerStream::Create(this, stream, media_log_);
1361 if (demuxer_stream.get()) {
1362 streams_[i] = std::move(demuxer_stream);
1364 if (codec_type == AVMEDIA_TYPE_AUDIO) {
1365 MEDIA_LOG(INFO, media_log_)
1367 << ": skipping invalid or unsupported audio track";
1368 } else if (codec_type == AVMEDIA_TYPE_VIDEO) {
1369 MEDIA_LOG(INFO, media_log_)
1371 << ": skipping invalid or unsupported video track";
1374 // This AVStream does not successfully convert.
1378 StreamParser::TrackId track_id =
1379 static_cast<StreamParser::TrackId>(media_tracks->tracks().size() + 1);
1381 MediaTrack::Label(streams_[i]->GetMetadata("handler_name"));
1382 auto track_language =
1383 MediaTrack::Language(streams_[i]->GetMetadata("language"));
1385 // Some metadata is named differently in FFmpeg for webm files.
1386 if (glue_->container() ==
1387 container_names::MediaContainerName::kContainerWEBM) {
1388 track_label = MediaTrack::Label(streams_[i]->GetMetadata("title"));
1391 if (codec_type == AVMEDIA_TYPE_AUDIO) {
1392 ++supported_audio_track_count;
1393 streams_[i]->SetEnabled(supported_audio_track_count == 1,
1395 } else if (codec_type == AVMEDIA_TYPE_VIDEO) {
1396 ++supported_video_track_count;
1397 streams_[i]->SetEnabled(supported_video_track_count == 1,
1401 // TODO(chcunningham): Remove the IsValidConfig() checks below. If the
1402 // config isn't valid we shouldn't have created a demuxer stream nor
1403 // an entry in |media_tracks|, so the check should always be true.
1404 if ((codec_type == AVMEDIA_TYPE_AUDIO &&
1405 media_tracks->getAudioConfig(track_id).IsValidConfig()) ||
1406 (codec_type == AVMEDIA_TYPE_VIDEO &&
1407 media_tracks->getVideoConfig(track_id).IsValidConfig())) {
1408 MEDIA_LOG(INFO, media_log_)
1410 << ": skipping duplicate media stream id=" << track_id;
1414 // Note when we find our audio/video stream (we only want one of each) and
1415 // record src= playback UMA stats for the stream's decoder config.
1416 MediaTrack* media_track = nullptr;
1417 if (codec_type == AVMEDIA_TYPE_AUDIO) {
1418 AudioDecoderConfig audio_config = streams_[i]->audio_decoder_config();
1419 RecordAudioCodecStats(audio_config);
1421 media_track = media_tracks->AddAudioTrack(audio_config, track_id,
1422 MediaTrack::Kind("main"),
1423 track_label, track_language);
1424 media_track->set_id(MediaTrack::Id(base::NumberToString(track_id)));
1425 DCHECK(track_id_to_demux_stream_map_.find(media_track->id()) ==
1426 track_id_to_demux_stream_map_.end());
1427 track_id_to_demux_stream_map_[media_track->id()] = streams_[i].get();
1428 } else if (codec_type == AVMEDIA_TYPE_VIDEO) {
1429 VideoDecoderConfig video_config = streams_[i]->video_decoder_config();
1431 RecordVideoCodecStats(glue_->container(), video_config,
1432 stream->codecpar->color_range, media_log_);
1434 media_track = media_tracks->AddVideoTrack(video_config, track_id,
1435 MediaTrack::Kind("main"),
1436 track_label, track_language);
1437 media_track->set_id(MediaTrack::Id(base::NumberToString(track_id)));
1438 DCHECK(track_id_to_demux_stream_map_.find(media_track->id()) ==
1439 track_id_to_demux_stream_map_.end());
1440 track_id_to_demux_stream_map_[media_track->id()] = streams_[i].get();
1443 max_duration = std::max(max_duration, streams_[i]->duration());
1445 base::TimeDelta start_time = ExtractStartTime(stream);
1447 // Note: This value is used for seeking, so we must take the true value and
1448 // not the one possibly clamped to zero below.
1449 if (start_time != kNoTimestamp && start_time < start_time_)
1450 start_time_ = start_time;
1452 const bool is_opus_or_vorbis =
1453 codec_id == AV_CODEC_ID_OPUS || codec_id == AV_CODEC_ID_VORBIS;
1454 if (!has_opus_or_vorbis_audio)
1455 has_opus_or_vorbis_audio = is_opus_or_vorbis;
1457 if (codec_type == AVMEDIA_TYPE_AUDIO && start_time.is_negative() &&
1458 is_opus_or_vorbis) {
1459 needs_negative_timestamp_fixup = true;
1461 // Fixup the seeking information to avoid selecting the audio stream
1462 // simply because it has a lower starting time.
1463 start_time = base::TimeDelta();
1466 streams_[i]->set_start_time(start_time);
1469 if (media_tracks->tracks().empty()) {
1470 MEDIA_LOG(ERROR, media_log_) << GetDisplayName()
1471 << ": no supported streams";
1472 RunInitCB(DEMUXER_ERROR_NO_SUPPORTED_STREAMS);
1476 if (format_context->duration != kNoFFmpegTimestamp) {
1477 // If there is a duration value in the container use that to find the
1478 // maximum between it and the duration from A/V streams.
1479 const AVRational av_time_base = {1, AV_TIME_BASE};
1481 std::max(max_duration,
1482 ConvertFromTimeBase(av_time_base, format_context->duration));
1484 // The duration is unknown, in which case this is likely a live stream.
1485 max_duration = kInfiniteDuration;
1488 // Chained ogg is only allowed on single track audio only opus/vorbis media.
1489 const bool needs_chained_ogg_fixup =
1490 glue_->container() ==
1491 container_names::MediaContainerName::kContainerOgg &&
1492 supported_audio_track_count == 1 && !supported_video_track_count &&
1493 has_opus_or_vorbis_audio;
1495 // FFmpeg represents audio data marked as before the beginning of stream as
1496 // having negative timestamps. This data must be discarded after it has been
1497 // decoded, not before since it is used to warmup the decoder. There are
1498 // currently two known cases for this: vorbis in ogg and opus.
1500 // For API clarity, it was decided that the rest of the media pipeline should
1501 // not be exposed to negative timestamps. Which means we need to rebase these
1502 // negative timestamps and mark them for discard post decoding.
1504 // Post-decode frame dropping for packets with negative timestamps is outlined
1505 // in section A.2 in the Ogg Vorbis spec:
1506 // http://xiph.org/vorbis/doc/Vorbis_I_spec.html
1508 // FFmpeg's use of negative timestamps for opus pre-skip is nonstandard, but
1509 // for more information on pre-skip see section 4.2 of the Ogg Opus spec:
1510 // https://tools.ietf.org/html/draft-ietf-codec-oggopus-08#section-4.2
1511 if (needs_negative_timestamp_fixup || needs_chained_ogg_fixup) {
1512 for (auto& stream : streams_) {
1515 if (needs_negative_timestamp_fixup)
1516 stream->enable_negative_timestamp_fixups();
1517 if (needs_chained_ogg_fixup)
1518 stream->enable_chained_ogg_fixups();
1522 // If no start time could be determined, default to zero.
1523 if (start_time_ == kInfiniteDuration)
1524 start_time_ = base::TimeDelta();
1526 // MPEG-4 B-frames cause grief for a simple container like AVI. Enable PTS
1527 // generation so we always get timestamps, see http://crbug.com/169570
1528 if (glue_->container() ==
1529 container_names::MediaContainerName::kContainerAVI) {
1530 format_context->flags |= AVFMT_FLAG_GENPTS;
1533 // FFmpeg will incorrectly adjust the start time of MP3 files into the future
1534 // based on discard samples. We were unable to fix this upstream without
1535 // breaking ffmpeg functionality. https://crbug.com/1062037
1536 if (glue_->container() ==
1537 container_names::MediaContainerName::kContainerMP3) {
1538 start_time_ = base::TimeDelta();
1541 // For testing purposes, don't overwrite the timeline offset if set already.
1542 if (timeline_offset_.is_null()) {
1544 ExtractTimelineOffset(glue_->container(), format_context);
1547 // Since we're shifting the externally visible start time to zero, we need to
1548 // adjust the timeline offset to compensate.
1549 if (!timeline_offset_.is_null() && start_time_.is_negative())
1550 timeline_offset_ += start_time_;
1552 if (max_duration == kInfiniteDuration && !timeline_offset_.is_null()) {
1553 SetLiveness(StreamLiveness::kLive);
1554 } else if (max_duration != kInfiniteDuration) {
1555 SetLiveness(StreamLiveness::kRecorded);
1557 SetLiveness(StreamLiveness::kUnknown);
1560 // Good to go: set the duration and bitrate and notify we're done
1562 host_->SetDuration(max_duration);
1563 duration_ = max_duration;
1564 duration_known_ = (max_duration != kInfiniteDuration);
1566 int64_t filesize_in_bytes = 0;
1567 url_protocol_->GetSize(&filesize_in_bytes);
1568 bitrate_ = CalculateBitrate(format_context, max_duration, filesize_in_bytes);
1570 data_source_->SetBitrate(bitrate_);
1572 LogMetadata(format_context, max_duration);
1573 media_tracks_updated_cb_.Run(std::move(media_tracks));
1575 RunInitCB(PIPELINE_OK);
1578 void FFmpegDemuxer::LogMetadata(AVFormatContext* avctx,
1579 base::TimeDelta max_duration) {
1580 std::vector<AudioDecoderConfig> audio_tracks;
1581 std::vector<VideoDecoderConfig> video_tracks;
1583 DCHECK_EQ(avctx->nb_streams, streams_.size());
1585 for (auto const& stream : streams_) {
1588 if (stream->type() == DemuxerStream::AUDIO) {
1589 audio_tracks.push_back(stream->audio_decoder_config());
1590 } else if (stream->type() == DemuxerStream::VIDEO) {
1591 video_tracks.push_back(stream->video_decoder_config());
1594 media_log_->SetProperty<MediaLogProperty::kAudioTracks>(audio_tracks);
1595 media_log_->SetProperty<MediaLogProperty::kVideoTracks>(video_tracks);
1596 media_log_->SetProperty<MediaLogProperty::kMaxDuration>(max_duration);
1597 media_log_->SetProperty<MediaLogProperty::kStartTime>(start_time_);
1598 media_log_->SetProperty<MediaLogProperty::kBitrate>(bitrate_);
1601 FFmpegDemuxerStream* FFmpegDemuxer::FindStreamWithLowestStartTimestamp(
1603 FFmpegDemuxerStream* lowest_start_time_stream = nullptr;
1604 for (const auto& stream : streams_) {
1605 if (!stream || stream->IsEnabled() != enabled)
1607 if (av_stream_get_first_dts(stream->av_stream()) == kInvalidPTSMarker)
1609 if (!lowest_start_time_stream ||
1610 stream->start_time() < lowest_start_time_stream->start_time()) {
1611 lowest_start_time_stream = stream.get();
1614 return lowest_start_time_stream;
1617 FFmpegDemuxerStream* FFmpegDemuxer::FindPreferredStreamForSeeking(
1618 base::TimeDelta seek_time) {
1619 // If we have a selected/enabled video stream and its start time is lower
1620 // than the |seek_time| or unknown, then always prefer it for seeking.
1621 for (const auto& stream : streams_) {
1625 if (stream->type() != DemuxerStream::VIDEO)
1628 if (av_stream_get_first_dts(stream->av_stream()) == kInvalidPTSMarker)
1631 if (!stream->IsEnabled())
1634 if (stream->start_time() <= seek_time)
1635 return stream.get();
1638 // If video stream is not present or |seek_time| is lower than the video start
1639 // time, then try to find an enabled stream with the lowest start time.
1640 FFmpegDemuxerStream* lowest_start_time_enabled_stream =
1641 FindStreamWithLowestStartTimestamp(true);
1642 if (lowest_start_time_enabled_stream &&
1643 lowest_start_time_enabled_stream->start_time() <= seek_time) {
1644 return lowest_start_time_enabled_stream;
1647 // If there's no enabled streams to consider from, try a disabled stream with
1648 // the lowest known start time.
1649 FFmpegDemuxerStream* lowest_start_time_disabled_stream =
1650 FindStreamWithLowestStartTimestamp(false);
1651 if (lowest_start_time_disabled_stream &&
1652 lowest_start_time_disabled_stream->start_time() <= seek_time) {
1653 return lowest_start_time_disabled_stream;
1656 // Otherwise fall back to any other stream.
1657 for (const auto& stream : streams_) {
1659 return stream.get();
1662 NOTREACHED_NORETURN();
1665 void FFmpegDemuxer::OnSeekFrameDone(int result) {
1666 DCHECK(task_runner_->RunsTasksInCurrentSequence());
1667 DCHECK(pending_seek_cb_);
1670 MEDIA_LOG(ERROR, media_log_) << GetDisplayName() << ": bad state";
1671 RunPendingSeekCB(PIPELINE_ERROR_ABORT);
1676 MEDIA_LOG(ERROR, media_log_) << GetDisplayName() << ": demuxer seek failed";
1677 RunPendingSeekCB(PIPELINE_ERROR_READ);
1681 // Tell streams to flush buffers due to seeking.
1682 for (const auto& stream : streams_) {
1684 stream->FlushBuffers(false);
1687 // Resume reading until capacity.
1688 ReadFrameIfNeeded();
1690 // Notify we're finished seeking.
1691 RunPendingSeekCB(PIPELINE_OK);
1694 void FFmpegDemuxer::FindAndEnableProperTracks(
1695 const std::vector<MediaTrack::Id>& track_ids,
1696 base::TimeDelta curr_time,
1697 DemuxerStream::Type track_type,
1698 TrackChangeCB change_completed_cb) {
1699 DCHECK(task_runner_->RunsTasksInCurrentSequence());
1701 std::set<FFmpegDemuxerStream*> enabled_streams;
1702 for (const auto& id : track_ids) {
1703 auto it = track_id_to_demux_stream_map_.find(id);
1704 if (it == track_id_to_demux_stream_map_.end())
1706 FFmpegDemuxerStream* stream = it->second;
1707 DCHECK_EQ(track_type, stream->type());
1708 // TODO(servolk): Remove after multiple enabled audio tracks are supported
1709 // by the media::RendererImpl.
1710 if (!enabled_streams.empty()) {
1711 MEDIA_LOG(INFO, media_log_)
1712 << "Only one enabled audio track is supported, ignoring track " << id;
1715 enabled_streams.insert(stream);
1716 stream->SetEnabled(true, curr_time);
1719 // First disable all streams that need to be disabled and then enable streams
1720 // that are enabled.
1721 for (const auto& stream : streams_) {
1722 if (stream && stream->type() == track_type &&
1723 enabled_streams.find(stream.get()) == enabled_streams.end()) {
1724 DVLOG(1) << __func__ << ": disabling stream " << stream.get();
1725 stream->SetEnabled(false, curr_time);
1729 std::vector<DemuxerStream*> streams(enabled_streams.begin(),
1730 enabled_streams.end());
1731 std::move(change_completed_cb).Run(track_type, streams);
1734 void FFmpegDemuxer::OnEnabledAudioTracksChanged(
1735 const std::vector<MediaTrack::Id>& track_ids,
1736 base::TimeDelta curr_time,
1737 TrackChangeCB change_completed_cb) {
1738 FindAndEnableProperTracks(track_ids, curr_time, DemuxerStream::AUDIO,
1739 std::move(change_completed_cb));
1742 void FFmpegDemuxer::OnVideoSeekedForTrackChange(
1743 DemuxerStream* video_stream,
1744 base::OnceClosure seek_completed_cb,
1746 static_cast<FFmpegDemuxerStream*>(video_stream)->FlushBuffers(true);
1747 // TODO(crbug.com/1424380): Report seek failures for track changes too.
1748 std::move(seek_completed_cb).Run();
1751 void FFmpegDemuxer::SeekOnVideoTrackChange(
1752 base::TimeDelta seek_to_time,
1753 TrackChangeCB seek_completed_cb,
1754 DemuxerStream::Type stream_type,
1755 const std::vector<DemuxerStream*>& streams) {
1756 DCHECK_EQ(stream_type, DemuxerStream::VIDEO);
1757 if (streams.size() != 1u) {
1758 // If FFmpegDemuxer::FindAndEnableProperTracks() was not able to find the
1759 // selected streams in the ID->DemuxerStream map, then its possible for
1760 // this vector to be empty. If that's the case, we don't want to bother
1761 // with seeking, and just call the callback immediately.
1762 std::move(seek_completed_cb).Run(stream_type, streams);
1765 SeekInternal(seek_to_time,
1766 base::BindOnce(&FFmpegDemuxer::OnVideoSeekedForTrackChange,
1767 weak_factory_.GetWeakPtr(), streams[0],
1768 base::BindOnce(std::move(seek_completed_cb),
1769 DemuxerStream::VIDEO, streams)));
1772 void FFmpegDemuxer::OnSelectedVideoTrackChanged(
1773 const std::vector<MediaTrack::Id>& track_ids,
1774 base::TimeDelta curr_time,
1775 TrackChangeCB change_completed_cb) {
1776 // Find tracks -> Seek track -> run callback.
1777 FindAndEnableProperTracks(
1778 track_ids, curr_time, DemuxerStream::VIDEO,
1779 track_ids.empty() ? std::move(change_completed_cb)
1780 : base::BindOnce(&FFmpegDemuxer::SeekOnVideoTrackChange,
1781 weak_factory_.GetWeakPtr(), curr_time,
1782 std::move(change_completed_cb)));
1785 void FFmpegDemuxer::ReadFrameIfNeeded() {
1786 DCHECK(task_runner_->RunsTasksInCurrentSequence());
1788 // Make sure we have work to do before reading.
1789 if (stopped_ || !StreamsHaveAvailableCapacity() || pending_read_ ||
1794 // Allocate and read an AVPacket from the media. Save |packet_ptr| since
1795 // evaluation order of packet.get() and std::move(&packet) is
1797 auto packet = ScopedAVPacket::Allocate();
1798 AVPacket* packet_ptr = packet.get();
1800 pending_read_ = true;
1801 blocking_task_runner_->PostTaskAndReplyWithResult(
1803 base::BindOnce(&ReadFrameAndDiscardEmpty, glue_->format_context(),
1805 base::BindOnce(&FFmpegDemuxer::OnReadFrameDone,
1806 weak_factory_.GetWeakPtr(), std::move(packet)));
1809 void FFmpegDemuxer::OnReadFrameDone(ScopedAVPacket packet, int result) {
1810 DCHECK(task_runner_->RunsTasksInCurrentSequence());
1811 DCHECK(pending_read_);
1812 pending_read_ = false;
1814 if (stopped_ || pending_seek_cb_)
1817 // Consider the stream as ended if:
1818 // - either underlying ffmpeg returned an error
1819 // - or FFMpegDemuxer reached the maximum allowed memory usage.
1820 if (result < 0 || IsMaxMemoryUsageReached()) {
1822 MEDIA_LOG(DEBUG, media_log_)
1824 << ": av_read_frame(): " << AVErrorToString(result);
1826 MEDIA_LOG(DEBUG, media_log_)
1827 << GetDisplayName() << ": memory limit exceeded";
1830 // Update the duration based on the highest elapsed time across all streams.
1831 base::TimeDelta max_duration;
1832 for (const auto& stream : streams_) {
1836 base::TimeDelta duration = stream->duration();
1837 if (duration != kNoTimestamp && duration > max_duration)
1838 max_duration = duration;
1841 if (duration_ == kInfiniteDuration || max_duration > duration_) {
1842 host_->SetDuration(max_duration);
1843 duration_known_ = true;
1844 duration_ = max_duration;
1847 // If we have reached the end of stream, tell the downstream filters about
1853 // Queue the packet with the appropriate stream; we must defend against ffmpeg
1854 // giving us a bad stream index. See http://crbug.com/698549 for example.
1855 if (packet->stream_index >= 0 &&
1856 static_cast<size_t>(packet->stream_index) < streams_.size()) {
1857 // This is ensured by ReadFrameAndDiscardEmpty.
1858 DCHECK(packet->data);
1859 DCHECK(packet->size);
1861 if (auto& demuxer_stream = streams_[packet->stream_index]) {
1862 if (demuxer_stream->IsEnabled())
1863 demuxer_stream->EnqueuePacket(std::move(packet));
1865 // If duration estimate was incorrect, update it and tell higher layers.
1866 if (duration_known_) {
1867 const base::TimeDelta duration = demuxer_stream->duration();
1868 if (duration != kNoTimestamp && duration > duration_) {
1869 duration_ = duration;
1870 host_->SetDuration(duration_);
1876 // Keep reading until we've reached capacity.
1877 ReadFrameIfNeeded();
1880 bool FFmpegDemuxer::StreamsHaveAvailableCapacity() {
1881 DCHECK(task_runner_->RunsTasksInCurrentSequence());
1882 for (const auto& stream : streams_) {
1883 if (stream && stream->IsEnabled() && stream->HasAvailableCapacity())
1889 bool FFmpegDemuxer::IsMaxMemoryUsageReached() const {
1890 DCHECK(task_runner_->RunsTasksInCurrentSequence());
1892 size_t memory_left =
1893 GetDemuxerMemoryLimit(Demuxer::DemuxerTypes::kFFmpegDemuxer);
1894 for (const auto& stream : streams_) {
1898 size_t stream_memory_usage = stream->MemoryUsage();
1899 if (stream_memory_usage > memory_left)
1901 memory_left -= stream_memory_usage;
1906 void FFmpegDemuxer::StreamHasEnded() {
1907 DCHECK(task_runner_->RunsTasksInCurrentSequence());
1908 for (const auto& stream : streams_) {
1910 stream->SetEndOfStream();
1914 void FFmpegDemuxer::OnDataSourceError() {
1915 MEDIA_LOG(ERROR, media_log_) << GetDisplayName() << ": data source error";
1916 host_->OnDemuxerError(PIPELINE_ERROR_READ);
1919 void FFmpegDemuxer::NotifyDemuxerError(PipelineStatus status) {
1920 MEDIA_LOG(ERROR, media_log_) << GetDisplayName()
1921 << ": demuxer error: " << status;
1922 host_->OnDemuxerError(status);
1925 void FFmpegDemuxer::SetLiveness(StreamLiveness liveness) {
1926 DCHECK(task_runner_->RunsTasksInCurrentSequence());
1927 for (const auto& stream : streams_) {
1929 stream->SetLiveness(liveness);
1933 void FFmpegDemuxer::RunInitCB(PipelineStatus status) {
1934 DCHECK(task_runner_->RunsTasksInCurrentSequence());
1936 TRACE_EVENT_ASYNC_END1("media", "FFmpegDemuxer::Initialize", this, "status",
1937 PipelineStatusToString(status));
1938 std::move(init_cb_).Run(status);
1941 void FFmpegDemuxer::RunPendingSeekCB(PipelineStatus status) {
1942 DCHECK(task_runner_->RunsTasksInCurrentSequence());
1943 DCHECK(pending_seek_cb_);
1944 TRACE_EVENT_ASYNC_END1("media", "FFmpegDemuxer::Seek", this, "status",
1945 PipelineStatusToString(status));
1946 std::move(pending_seek_cb_).Run(status);
1949 } // namespace media