#include <CoreVideo/CoreVideo.h>
#include <OpenGL/CGLIOSurface.h>
+#include <OpenGL/gl.h>
#include "base/bind.h"
+#include "base/callback_helpers.h"
#include "base/command_line.h"
#include "base/sys_byteorder.h"
#include "base/thread_task_runner_handle.h"
#include "content/public/common/content_switches.h"
#include "media/filters/h264_parser.h"
#include "ui/gl/scoped_binders.h"
-#include "ui/gl/scoped_cgl.h"
using content_common_gpu_media::kModuleVt;
using content_common_gpu_media::InitializeStubs;
using content_common_gpu_media::IsVtInitialized;
using content_common_gpu_media::StubPathMap;
+#define NOTIFY_STATUS(name, status) \
+ do { \
+ LOG(ERROR) << name << " failed with status " << status; \
+ NotifyError(PLATFORM_FAILURE); \
+ } while (0)
+
namespace content {
// Size of NALU length headers in AVCC/MPEG-4 format (can be 1, 2, or 4).
CVImageBufferRef image_buffer,
CMTime presentation_time_stamp,
CMTime presentation_duration) {
- // TODO(sandersd): Implement flush-before-delete to guarantee validity.
VTVideoDecodeAccelerator* vda =
reinterpret_cast<VTVideoDecodeAccelerator*>(decompression_output_refcon);
int32_t bitstream_id = reinterpret_cast<intptr_t>(source_frame_refcon);
VTVideoDecodeAccelerator::DecodedFrame::~DecodedFrame() {
}
-VTVideoDecodeAccelerator::VTVideoDecodeAccelerator(CGLContextObj cgl_context)
+VTVideoDecodeAccelerator::PendingAction::PendingAction(
+ Action action,
+ int32_t bitstream_id)
+ : action(action),
+ bitstream_id(bitstream_id) {
+}
+
+VTVideoDecodeAccelerator::PendingAction::~PendingAction() {
+}
+
+VTVideoDecodeAccelerator::VTVideoDecodeAccelerator(
+ CGLContextObj cgl_context,
+ const base::Callback<bool(void)>& make_context_current)
: cgl_context_(cgl_context),
+ make_context_current_(make_context_current),
client_(NULL),
+ has_error_(false),
format_(NULL),
session_(NULL),
gpu_task_runner_(base::ThreadTaskRunnerHandle::Get()),
weak_this_factory_(this),
decoder_thread_("VTDecoderThread") {
+ DCHECK(!make_context_current_.is_null());
callback_.decompressionOutputCallback = OutputThunk;
callback_.decompressionOutputRefCon = this;
}
return true;
}
-// TODO(sandersd): Proper error reporting instead of CHECKs.
-void VTVideoDecodeAccelerator::ConfigureDecoder(
+bool VTVideoDecodeAccelerator::ConfigureDecoder(
const std::vector<const uint8_t*>& nalu_data_ptrs,
const std::vector<size_t>& nalu_data_sizes) {
DCHECK(decoder_thread_.message_loop_proxy()->BelongsToCurrentThread());
+
// Construct a new format description from the parameter sets.
// TODO(sandersd): Replace this with custom code to support OS X < 10.9.
format_.reset();
- CHECK(!CMVideoFormatDescriptionCreateFromH264ParameterSets(
+ OSStatus status = CMVideoFormatDescriptionCreateFromH264ParameterSets(
kCFAllocatorDefault,
nalu_data_ptrs.size(), // parameter_set_count
&nalu_data_ptrs.front(), // ¶meter_set_pointers
&nalu_data_sizes.front(), // ¶meter_set_sizes
kNALUHeaderLength, // nal_unit_header_length
- format_.InitializeInto()));
- CMVideoDimensions coded_dimensions =
- CMVideoFormatDescriptionGetDimensions(format_);
+ format_.InitializeInto());
+ if (status) {
+ NOTIFY_STATUS("CMVideoFormatDescriptionCreateFromH264ParameterSets()",
+ status);
+ return false;
+ }
+
+ // If the session is compatible, there's nothing to do.
+ if (session_ &&
+ VTDecompressionSessionCanAcceptFormatDescription(session_, format_)) {
+ return true;
+ }
// Prepare VideoToolbox configuration dictionaries.
base::ScopedCFTypeRef<CFMutableDictionaryRef> decoder_config(
&kCFTypeDictionaryKeyCallBacks,
&kCFTypeDictionaryValueCallBacks));
+ CMVideoDimensions coded_dimensions =
+ CMVideoFormatDescriptionGetDimensions(format_);
#define CFINT(i) CFNumberCreate(kCFAllocatorDefault, kCFNumberSInt32Type, &i)
// TODO(sandersd): RGBA option for 4:4:4 video.
int32_t pixel_format = kCVPixelFormatType_422YpCbCr8;
CFDictionarySetValue(
image_config, kCVPixelBufferOpenGLCompatibilityKey, kCFBooleanTrue);
- // TODO(sandersd): Check if the session is already compatible.
- // TODO(sandersd): Flush.
+ // TODO(sandersd): Does the old session need to be flushed first?
session_.reset();
- CHECK(!VTDecompressionSessionCreate(
+ status = VTDecompressionSessionCreate(
kCFAllocatorDefault,
format_, // video_format_description
decoder_config, // video_decoder_specification
image_config, // destination_image_buffer_attributes
&callback_, // output_callback
- session_.InitializeInto()));
-
- // If the size has changed, trigger a request for new picture buffers.
- gfx::Size new_coded_size(coded_dimensions.width, coded_dimensions.height);
- if (coded_size_ != new_coded_size) {
- coded_size_ = new_coded_size;
- gpu_task_runner_->PostTask(FROM_HERE, base::Bind(
- &VTVideoDecodeAccelerator::SizeChangedTask,
- weak_this_factory_.GetWeakPtr(),
- coded_size_));;
+ session_.InitializeInto());
+ if (status) {
+ NOTIFY_STATUS("VTDecompressionSessionCreate()", status);
+ return false;
}
+
+ return true;
}
void VTVideoDecodeAccelerator::Decode(const media::BitstreamBuffer& bitstream) {
DCHECK(CalledOnValidThread());
- // TODO(sandersd): Test what happens if bitstream buffers are passed to VT out
- // of order.
+ // Not actually a requirement of the VDA API, but we're lazy and use negative
+ // values as flags internally. Revisit that if this actually happens.
+ if (bitstream.id() < 0) {
+ LOG(ERROR) << "Negative bitstream ID";
+ NotifyError(INVALID_ARGUMENT);
+ client_->NotifyEndOfBitstreamBuffer(bitstream.id());
+ return;
+ }
+ pending_bitstream_ids_.push(bitstream.id());
decoder_thread_.message_loop_proxy()->PostTask(FROM_HERE, base::Bind(
&VTVideoDecodeAccelerator::DecodeTask, base::Unretained(this),
bitstream));
}
-// TODO(sandersd): Proper error reporting instead of CHECKs.
void VTVideoDecodeAccelerator::DecodeTask(
- const media::BitstreamBuffer bitstream) {
+ const media::BitstreamBuffer& bitstream) {
DCHECK(decoder_thread_.message_loop_proxy()->BelongsToCurrentThread());
+ // Once we have a bitstream buffer, we must either decode it or drop it.
+ // This construct ensures that the buffer is always dropped unless we call
+ // drop_bitstream.Release().
+ base::ScopedClosureRunner drop_bitstream(base::Bind(
+ &VTVideoDecodeAccelerator::DropBitstream, base::Unretained(this),
+ bitstream.id()));
+
// Map the bitstream buffer.
base::SharedMemory memory(bitstream.handle(), true);
size_t size = bitstream.size();
- CHECK(memory.Map(size));
+ if (!memory.Map(size)) {
+ LOG(ERROR) << "Failed to map bitstream buffer";
+ NotifyError(PLATFORM_FAILURE);
+ return;
+ }
const uint8_t* buf = static_cast<uint8_t*>(memory.memory());
// NALUs are stored with Annex B format in the bitstream buffer (start codes),
//
// 1. Locate relevant NALUs and compute the size of the translated data.
// Also record any parameter sets for VideoToolbox initialization.
+ bool config_changed = false;
size_t data_size = 0;
std::vector<media::H264NALU> nalus;
- std::vector<const uint8_t*> config_nalu_data_ptrs;
- std::vector<size_t> config_nalu_data_sizes;
parser_.SetStream(buf, size);
media::H264NALU nalu;
while (true) {
media::H264Parser::Result result = parser_.AdvanceToNextNALU(&nalu);
if (result == media::H264Parser::kEOStream)
break;
- CHECK_EQ(result, media::H264Parser::kOk);
- // TODO(sandersd): Check that these are only at the start.
- if (nalu.nal_unit_type == media::H264NALU::kSPS ||
- nalu.nal_unit_type == media::H264NALU::kPPS ||
- nalu.nal_unit_type == media::H264NALU::kSPSExt) {
- DVLOG(2) << "Parameter set " << nalu.nal_unit_type;
- config_nalu_data_ptrs.push_back(nalu.data);
- config_nalu_data_sizes.push_back(nalu.size);
- } else {
- nalus.push_back(nalu);
- data_size += kNALUHeaderLength + nalu.size;
+ if (result != media::H264Parser::kOk) {
+ LOG(ERROR) << "Failed to find H.264 NALU";
+ NotifyError(PLATFORM_FAILURE);
+ return;
+ }
+ // TODO(sandersd): Strict ordering rules.
+ switch (nalu.nal_unit_type) {
+ case media::H264NALU::kSPS:
+ last_sps_.assign(nalu.data, nalu.data + nalu.size);
+ last_spsext_.clear();
+ config_changed = true;
+ break;
+ case media::H264NALU::kSPSExt:
+ // TODO(sandersd): Check that the previous NALU was an SPS.
+ last_spsext_.assign(nalu.data, nalu.data + nalu.size);
+ config_changed = true;
+ break;
+ case media::H264NALU::kPPS:
+ last_pps_.assign(nalu.data, nalu.data + nalu.size);
+ config_changed = true;
+ break;
+ default:
+ nalus.push_back(nalu);
+ data_size += kNALUHeaderLength + nalu.size;
+ break;
}
}
// 2. Initialize VideoToolbox.
- // TODO(sandersd): Reinitialize when there are new parameter sets.
- if (!session_)
- ConfigureDecoder(config_nalu_data_ptrs, config_nalu_data_sizes);
+ // TODO(sandersd): Check if the new configuration is identical before
+ // reconfiguring.
+ if (config_changed) {
+ if (last_sps_.size() == 0 || last_pps_.size() == 0) {
+ LOG(ERROR) << "Invalid configuration data";
+ NotifyError(INVALID_ARGUMENT);
+ return;
+ }
+ // TODO(sandersd): Check that the SPS and PPS IDs match.
+ std::vector<const uint8_t*> nalu_data_ptrs;
+ std::vector<size_t> nalu_data_sizes;
+ nalu_data_ptrs.push_back(&last_sps_.front());
+ nalu_data_sizes.push_back(last_sps_.size());
+ if (last_spsext_.size() != 0) {
+ nalu_data_ptrs.push_back(&last_spsext_.front());
+ nalu_data_sizes.push_back(last_spsext_.size());
+ }
+ nalu_data_ptrs.push_back(&last_pps_.front());
+ nalu_data_sizes.push_back(last_pps_.size());
+
+ // If ConfigureDecoder() fails, it already called NotifyError().
+ if (!ConfigureDecoder(nalu_data_ptrs, nalu_data_sizes))
+ return;
+ }
+
+ // If there are no non-configuration units, immediately return an empty
+ // (ie. dropped) frame. It is an error to create a MemoryBlock with zero
+ // size.
+ if (!data_size)
+ return;
+
+ // If the session is not configured, fail.
+ if (!session_) {
+ LOG(ERROR) << "Image slice without configuration data";
+ NotifyError(INVALID_ARGUMENT);
+ return;
+ }
// 3. Allocate a memory-backed CMBlockBuffer for the translated data.
+ // TODO(sandersd): Check that the slice's PPS matches the current PPS.
base::ScopedCFTypeRef<CMBlockBufferRef> data;
- CHECK(!CMBlockBufferCreateWithMemoryBlock(
+ OSStatus status = CMBlockBufferCreateWithMemoryBlock(
kCFAllocatorDefault,
NULL, // &memory_block
data_size, // block_length
0, // offset_to_data
data_size, // data_length
0, // flags
- data.InitializeInto()));
+ data.InitializeInto());
+ if (status) {
+ NOTIFY_STATUS("CMBlockBufferCreateWithMemoryBlock()", status);
+ return;
+ }
// 4. Copy NALU data, inserting length headers.
size_t offset = 0;
for (size_t i = 0; i < nalus.size(); i++) {
media::H264NALU& nalu = nalus[i];
uint32_t header = base::HostToNet32(static_cast<uint32_t>(nalu.size));
- CHECK(!CMBlockBufferReplaceDataBytes(
- &header, data, offset, kNALUHeaderLength));
+ status = CMBlockBufferReplaceDataBytes(
+ &header, data, offset, kNALUHeaderLength);
+ if (status) {
+ NOTIFY_STATUS("CMBlockBufferReplaceDataBytes()", status);
+ return;
+ }
offset += kNALUHeaderLength;
- CHECK(!CMBlockBufferReplaceDataBytes(nalu.data, data, offset, nalu.size));
+ status = CMBlockBufferReplaceDataBytes(nalu.data, data, offset, nalu.size);
+ if (status) {
+ NOTIFY_STATUS("CMBlockBufferReplaceDataBytes()", status);
+ return;
+ }
offset += nalu.size;
}
// 5. Package the data for VideoToolbox and request decoding.
base::ScopedCFTypeRef<CMSampleBufferRef> frame;
- CHECK(!CMSampleBufferCreate(
+ status = CMSampleBufferCreate(
kCFAllocatorDefault,
data, // data_buffer
true, // data_ready
NULL, // &sample_timing_array
0, // num_sample_size_entries
NULL, // &sample_size_array
- frame.InitializeInto()));
+ frame.InitializeInto());
+ if (status) {
+ NOTIFY_STATUS("CMSampleBufferCreate()", status);
+ return;
+ }
// Asynchronous Decompression allows for parallel submission of frames
// (without it, DecodeFrame() does not return until the frame has been
kVTDecodeFrame_EnableAsynchronousDecompression;
intptr_t bitstream_id = bitstream.id();
- CHECK(!VTDecompressionSessionDecodeFrame(
+ status = VTDecompressionSessionDecodeFrame(
session_,
frame, // sample_buffer
decode_flags, // decode_flags
reinterpret_cast<void*>(bitstream_id), // source_frame_refcon
- NULL)); // &info_flags_out
+ NULL); // &info_flags_out
+ if (status) {
+ NOTIFY_STATUS("VTDecompressionSessionDecodeFrame()", status);
+ return;
+ }
+
+ // Now that the bitstream is decoding, don't drop it.
+ (void)drop_bitstream.Release();
}
// This method may be called on any VideoToolbox thread.
-// TODO(sandersd): Proper error reporting instead of CHECKs.
void VTVideoDecodeAccelerator::Output(
int32_t bitstream_id,
OSStatus status,
CVImageBufferRef image_buffer) {
- CHECK(!status);
- CHECK_EQ(CFGetTypeID(image_buffer), CVPixelBufferGetTypeID());
- CFRetain(image_buffer);
+ if (status) {
+ // TODO(sandersd): Handle dropped frames.
+ NOTIFY_STATUS("Decoding", status);
+ image_buffer = NULL;
+ } else if (CFGetTypeID(image_buffer) != CVPixelBufferGetTypeID()) {
+ LOG(ERROR) << "Decoded frame is not a CVPixelBuffer";
+ NotifyError(PLATFORM_FAILURE);
+ image_buffer = NULL;
+ } else {
+ CFRetain(image_buffer);
+ }
gpu_task_runner_->PostTask(FROM_HERE, base::Bind(
&VTVideoDecodeAccelerator::OutputTask,
weak_this_factory_.GetWeakPtr(),
void VTVideoDecodeAccelerator::OutputTask(DecodedFrame frame) {
DCHECK(CalledOnValidThread());
decoded_frames_.push(frame);
- SendPictures();
-}
-
-void VTVideoDecodeAccelerator::SizeChangedTask(gfx::Size coded_size) {
- DCHECK(CalledOnValidThread());
- texture_size_ = coded_size;
- // TODO(sandersd): Dismiss existing picture buffers.
- client_->ProvidePictureBuffers(
- kNumPictureBuffers, texture_size_, GL_TEXTURE_RECTANGLE_ARB);
+ ProcessDecodedFrames();
}
void VTVideoDecodeAccelerator::AssignPictureBuffers(
DCHECK(CalledOnValidThread());
for (size_t i = 0; i < pictures.size(); i++) {
- CHECK(!texture_ids_.count(pictures[i].id()));
- available_picture_ids_.push(pictures[i].id());
+ DCHECK(!texture_ids_.count(pictures[i].id()));
+ assigned_picture_ids_.insert(pictures[i].id());
+ available_picture_ids_.push_back(pictures[i].id());
texture_ids_[pictures[i].id()] = pictures[i].texture_id();
}
- // Pictures are not marked as uncleared until this method returns. They will
- // become broken if they are used before that happens.
+ // Pictures are not marked as uncleared until after this method returns, and
+ // they will be broken if they are used before that happens. So, schedule
+ // future work after that happens.
gpu_task_runner_->PostTask(FROM_HERE, base::Bind(
- &VTVideoDecodeAccelerator::SendPictures,
+ &VTVideoDecodeAccelerator::ProcessDecodedFrames,
weak_this_factory_.GetWeakPtr()));
}
DCHECK(CalledOnValidThread());
DCHECK_EQ(CFGetRetainCount(picture_bindings_[picture_id]), 1);
picture_bindings_.erase(picture_id);
- available_picture_ids_.push(picture_id);
- SendPictures();
+ // Don't put the picture back in the available list if has been dismissed.
+ if (assigned_picture_ids_.count(picture_id) != 0) {
+ available_picture_ids_.push_back(picture_id);
+ ProcessDecodedFrames();
+ }
}
-// TODO(sandersd): Proper error reporting instead of CHECKs.
-void VTVideoDecodeAccelerator::SendPictures() {
+void VTVideoDecodeAccelerator::CompleteAction(Action action) {
DCHECK(CalledOnValidThread());
- if (available_picture_ids_.empty() || decoded_frames_.empty())
- return;
- gfx::ScopedCGLSetCurrentContext scoped_set_current_context(cgl_context_);
- glEnable(GL_TEXTURE_RECTANGLE_ARB);
+ switch (action) {
+ case ACTION_FLUSH:
+ client_->NotifyFlushDone();
+ break;
+ case ACTION_RESET:
+ client_->NotifyResetDone();
+ break;
+ case ACTION_DESTROY:
+ delete this;
+ break;
+ }
+}
- while (!available_picture_ids_.empty() && !decoded_frames_.empty()) {
- int32_t picture_id = available_picture_ids_.front();
- available_picture_ids_.pop();
- DecodedFrame frame = decoded_frames_.front();
+void VTVideoDecodeAccelerator::CompleteActions(int32_t bitstream_id) {
+ DCHECK(CalledOnValidThread());
+ while (!pending_actions_.empty() &&
+ pending_actions_.front().bitstream_id == bitstream_id) {
+ CompleteAction(pending_actions_.front().action);
+ pending_actions_.pop();
+ }
+}
+
+void VTVideoDecodeAccelerator::ProcessDecodedFrames() {
+ DCHECK(CalledOnValidThread());
+
+ while (!decoded_frames_.empty()) {
+ if (pending_actions_.empty()) {
+ // No pending actions; send frames normally.
+ if (!has_error_)
+ SendPictures(pending_bitstream_ids_.back());
+ return;
+ }
+
+ int32_t next_action_bitstream_id = pending_actions_.front().bitstream_id;
+ int32_t last_sent_bitstream_id = -1;
+ switch (pending_actions_.front().action) {
+ case ACTION_FLUSH:
+ // Send frames normally.
+ if (has_error_)
+ return;
+ last_sent_bitstream_id = SendPictures(next_action_bitstream_id);
+ break;
+
+ case ACTION_RESET:
+ // Drop decoded frames.
+ if (has_error_)
+ return;
+ while (!decoded_frames_.empty() &&
+ last_sent_bitstream_id != next_action_bitstream_id) {
+ last_sent_bitstream_id = decoded_frames_.front().bitstream_id;
+ decoded_frames_.pop();
+ DCHECK_EQ(pending_bitstream_ids_.front(), last_sent_bitstream_id);
+ pending_bitstream_ids_.pop();
+ client_->NotifyEndOfBitstreamBuffer(last_sent_bitstream_id);
+ }
+ break;
+
+ case ACTION_DESTROY:
+ // Drop decoded frames, without bookkeeping.
+ while (!decoded_frames_.empty()) {
+ last_sent_bitstream_id = decoded_frames_.front().bitstream_id;
+ decoded_frames_.pop();
+ }
+
+ // Handle completing the action specially, as it is important not to
+ // access |this| after calling CompleteAction().
+ if (last_sent_bitstream_id == next_action_bitstream_id)
+ CompleteAction(ACTION_DESTROY);
+
+ // Either |this| was deleted or no more progress can be made.
+ return;
+ }
+
+ // If we ran out of buffers (or pictures), no more progress can be made
+ // until more frames are decoded.
+ if (last_sent_bitstream_id != next_action_bitstream_id)
+ return;
+
+ // Complete all actions pending for this |bitstream_id|, then loop to see
+ // if progress can be made on the next action.
+ CompleteActions(next_action_bitstream_id);
+ }
+}
+
+int32_t VTVideoDecodeAccelerator::ProcessDroppedFrames(
+ int32_t last_sent_bitstream_id,
+ int32_t up_to_bitstream_id) {
+ DCHECK(CalledOnValidThread());
+ // Drop frames as long as there is a frame, we have not reached the next
+ // action, and the next frame has no image.
+ while (!decoded_frames_.empty() &&
+ last_sent_bitstream_id != up_to_bitstream_id &&
+ decoded_frames_.front().image_buffer.get() == NULL) {
+ const DecodedFrame& frame = decoded_frames_.front();
+ DCHECK_EQ(pending_bitstream_ids_.front(), frame.bitstream_id);
+ client_->NotifyEndOfBitstreamBuffer(frame.bitstream_id);
+ last_sent_bitstream_id = frame.bitstream_id;
decoded_frames_.pop();
- IOSurfaceRef surface = CVPixelBufferGetIOSurface(frame.image_buffer);
+ pending_bitstream_ids_.pop();
+ }
+ return last_sent_bitstream_id;
+}
+
+// TODO(sandersd): If GpuVideoDecoder didn't specifically check the size of
+// textures, this would be unnecessary, as the size is actually a property of
+// the texture binding, not the texture. We rebind every frame, so the size
+// passed to ProvidePictureBuffers() is meaningless.
+void VTVideoDecodeAccelerator::ProcessSizeChangeIfNeeded() {
+ DCHECK(CalledOnValidThread());
+ DCHECK(!decoded_frames_.empty());
+
+ // Find the size of the next image.
+ const DecodedFrame& frame = decoded_frames_.front();
+ CVImageBufferRef image_buffer = frame.image_buffer.get();
+ size_t width = CVPixelBufferGetWidth(image_buffer);
+ size_t height = CVPixelBufferGetHeight(image_buffer);
+ gfx::Size image_size(width, height);
+
+ if (picture_size_ != image_size) {
+ // Dismiss all assigned picture buffers.
+ for (int32_t picture_id : assigned_picture_ids_)
+ client_->DismissPictureBuffer(picture_id);
+ assigned_picture_ids_.clear();
+ available_picture_ids_.clear();
+
+ // Request new pictures.
+ client_->ProvidePictureBuffers(
+ kNumPictureBuffers, image_size, GL_TEXTURE_RECTANGLE_ARB);
+ picture_size_ = image_size;
+ }
+}
+
+int32_t VTVideoDecodeAccelerator::SendPictures(int32_t up_to_bitstream_id) {
+ DCHECK(CalledOnValidThread());
+ DCHECK(!decoded_frames_.empty());
+
+ // TODO(sandersd): Store the actual last sent bitstream ID?
+ int32_t last_sent_bitstream_id = -1;
+
+ last_sent_bitstream_id =
+ ProcessDroppedFrames(last_sent_bitstream_id, up_to_bitstream_id);
+ if (last_sent_bitstream_id == up_to_bitstream_id || decoded_frames_.empty())
+ return last_sent_bitstream_id;
+
+ ProcessSizeChangeIfNeeded();
+ if (available_picture_ids_.empty())
+ return last_sent_bitstream_id;
+
+ if (!make_context_current_.Run()) {
+ LOG(ERROR) << "Failed to make GL context current";
+ NotifyError(PLATFORM_FAILURE);
+ return last_sent_bitstream_id;
+ }
+
+ glEnable(GL_TEXTURE_RECTANGLE_ARB);
+ while (!available_picture_ids_.empty() && !has_error_) {
+ DCHECK_NE(last_sent_bitstream_id, up_to_bitstream_id);
+ DCHECK(!decoded_frames_.empty());
+
+ // We don't pop |frame| or |picture_id| until they are consumed, which may
+ // not happen if an error occurs. Conveniently, this also removes some
+ // refcounting.
+ const DecodedFrame& frame = decoded_frames_.front();
+ DCHECK_EQ(pending_bitstream_ids_.front(), frame.bitstream_id);
+ int32_t picture_id = available_picture_ids_.back();
+
+ CVImageBufferRef image_buffer = frame.image_buffer.get();
+ IOSurfaceRef surface = CVPixelBufferGetIOSurface(image_buffer);
gfx::ScopedTextureBinder
texture_binder(GL_TEXTURE_RECTANGLE_ARB, texture_ids_[picture_id]);
- CHECK(!CGLTexImageIOSurface2D(
+ CGLError status = CGLTexImageIOSurface2D(
cgl_context_, // ctx
GL_TEXTURE_RECTANGLE_ARB, // target
GL_RGB, // internal_format
- texture_size_.width(), // width
- texture_size_.height(), // height
+ picture_size_.width(), // width
+ picture_size_.height(), // height
GL_YCBCR_422_APPLE, // format
GL_UNSIGNED_SHORT_8_8_APPLE, // type
surface, // io_surface
- 0)); // plane
+ 0); // plane
+ if (status != kCGLNoError) {
+ NOTIFY_STATUS("CGLTexImageIOSurface2D()", status);
+ break;
+ }
picture_bindings_[picture_id] = frame.image_buffer;
client_->PictureReady(media::Picture(
- picture_id, frame.bitstream_id, gfx::Rect(texture_size_)));
+ picture_id, frame.bitstream_id, gfx::Rect(picture_size_)));
+ available_picture_ids_.pop_back();
client_->NotifyEndOfBitstreamBuffer(frame.bitstream_id);
- }
+ last_sent_bitstream_id = frame.bitstream_id;
+ decoded_frames_.pop();
+ pending_bitstream_ids_.pop();
+
+ last_sent_bitstream_id =
+ ProcessDroppedFrames(last_sent_bitstream_id, up_to_bitstream_id);
+ if (last_sent_bitstream_id == up_to_bitstream_id || decoded_frames_.empty())
+ break;
+ ProcessSizeChangeIfNeeded();
+ }
glDisable(GL_TEXTURE_RECTANGLE_ARB);
+
+ return last_sent_bitstream_id;
+}
+
+void VTVideoDecodeAccelerator::FlushTask() {
+ DCHECK(decoder_thread_.message_loop_proxy()->BelongsToCurrentThread());
+ OSStatus status = VTDecompressionSessionFinishDelayedFrames(session_);
+ if (status)
+ NOTIFY_STATUS("VTDecompressionSessionFinishDelayedFrames()", status);
+}
+
+void VTVideoDecodeAccelerator::QueueAction(Action action) {
+ DCHECK(CalledOnValidThread());
+ if (pending_bitstream_ids_.empty()) {
+ // If there are no pending frames, all actions complete immediately.
+ CompleteAction(action);
+ } else {
+ // Otherwise, queue the action.
+ pending_actions_.push(PendingAction(action, pending_bitstream_ids_.back()));
+
+ // Request a flush to make sure the action will eventually complete.
+ decoder_thread_.message_loop_proxy()->PostTask(FROM_HERE, base::Bind(
+ &VTVideoDecodeAccelerator::FlushTask, base::Unretained(this)));
+
+ // See if we can make progress now that there is a new pending action.
+ ProcessDecodedFrames();
+ }
+}
+
+void VTVideoDecodeAccelerator::NotifyError(Error error) {
+ if (!CalledOnValidThread()) {
+ gpu_task_runner_->PostTask(FROM_HERE, base::Bind(
+ &VTVideoDecodeAccelerator::NotifyError,
+ weak_this_factory_.GetWeakPtr(),
+ error));
+ return;
+ }
+ has_error_ = true;
+ client_->NotifyError(error);
+}
+
+void VTVideoDecodeAccelerator::DropBitstream(int32_t bitstream_id) {
+ DCHECK(decoder_thread_.message_loop_proxy()->BelongsToCurrentThread());
+ gpu_task_runner_->PostTask(FROM_HERE, base::Bind(
+ &VTVideoDecodeAccelerator::OutputTask,
+ weak_this_factory_.GetWeakPtr(),
+ DecodedFrame(bitstream_id, NULL)));
}
void VTVideoDecodeAccelerator::Flush() {
DCHECK(CalledOnValidThread());
- // TODO(sandersd): Trigger flush, sending frames.
+ QueueAction(ACTION_FLUSH);
}
void VTVideoDecodeAccelerator::Reset() {
DCHECK(CalledOnValidThread());
- // TODO(sandersd): Trigger flush, discarding frames.
+ QueueAction(ACTION_RESET);
}
void VTVideoDecodeAccelerator::Destroy() {
DCHECK(CalledOnValidThread());
- // TODO(sandersd): Trigger flush, discarding frames, and wait for them.
- delete this;
+ // Drop any other pending actions.
+ while (!pending_actions_.empty())
+ pending_actions_.pop();
+ // Return all bitstream buffers.
+ while (!pending_bitstream_ids_.empty()) {
+ client_->NotifyEndOfBitstreamBuffer(pending_bitstream_ids_.front());
+ pending_bitstream_ids_.pop();
+ }
+ QueueAction(ACTION_DESTROY);
}
bool VTVideoDecodeAccelerator::CanDecodeOnIOThread() {