1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "content/common/gpu/media/gpu_video_decode_accelerator.h"
10 #include "base/command_line.h"
11 #include "base/logging.h"
12 #include "base/message_loop/message_loop_proxy.h"
13 #include "base/stl_util.h"
15 #include "content/common/gpu/gpu_channel.h"
16 #include "content/common/gpu/gpu_messages.h"
17 #include "content/public/common/content_switches.h"
18 #include "gpu/command_buffer/common/command_buffer.h"
19 #include "ipc/ipc_message_macros.h"
20 #include "ipc/ipc_message_utils.h"
21 #include "media/base/limits.h"
22 #include "ui/gl/gl_context.h"
23 #include "ui/gl/gl_surface_egl.h"
26 #include "base/win/windows_version.h"
27 #include "content/common/gpu/media/dxva_video_decode_accelerator.h"
28 #elif defined(OS_CHROMEOS) && defined(ARCH_CPU_ARMEL) && defined(USE_X11)
29 #include "content/common/gpu/media/v4l2_video_decode_accelerator.h"
30 #include "content/common/gpu/media/v4l2_video_device.h"
31 #elif defined(ARCH_CPU_X86_FAMILY) && defined(USE_X11)
32 #include "content/common/gpu/media/vaapi_video_decode_accelerator.h"
33 #include "ui/gl/gl_context_glx.h"
34 #include "ui/gl/gl_implementation.h"
35 #elif defined(OS_ANDROID)
36 #include "content/common/gpu/media/android_video_decode_accelerator.h"
39 #include "ui/gfx/size.h"
43 static bool MakeDecoderContextCurrent(
44 const base::WeakPtr<GpuCommandBufferStub> stub) {
46 DLOG(ERROR) << "Stub is gone; won't MakeCurrent().";
50 if (!stub->decoder()->MakeCurrent()) {
51 DLOG(ERROR) << "Failed to MakeCurrent()";
58 // A helper class that works like AutoLock but only acquires the lock when
62 explicit DebugAutoLock(base::Lock& lock) : lock_(lock) {
69 lock_.AssertAcquired();
76 DISALLOW_COPY_AND_ASSIGN(DebugAutoLock);
79 class GpuVideoDecodeAccelerator::MessageFilter
80 : public IPC::ChannelProxy::MessageFilter {
82 MessageFilter(GpuVideoDecodeAccelerator* owner, int32 host_route_id)
83 : owner_(owner), host_route_id_(host_route_id) {}
85 virtual void OnChannelError() OVERRIDE { channel_ = NULL; }
87 virtual void OnChannelClosing() OVERRIDE { channel_ = NULL; }
89 virtual void OnFilterAdded(IPC::Channel* channel) OVERRIDE {
93 virtual void OnFilterRemoved() OVERRIDE {
94 // This will delete |owner_| and |this|.
95 owner_->OnFilterRemoved();
98 virtual bool OnMessageReceived(const IPC::Message& msg) OVERRIDE {
99 if (msg.routing_id() != host_route_id_)
102 IPC_BEGIN_MESSAGE_MAP(MessageFilter, msg)
103 IPC_MESSAGE_FORWARD(AcceleratedVideoDecoderMsg_Decode, owner_,
104 GpuVideoDecodeAccelerator::OnDecode)
105 IPC_MESSAGE_UNHANDLED(return false;)
106 IPC_END_MESSAGE_MAP()
110 bool SendOnIOThread(IPC::Message* message) {
111 DCHECK(!message->is_sync());
116 return channel_->Send(message);
120 virtual ~MessageFilter() {}
123 GpuVideoDecodeAccelerator* owner_;
124 int32 host_route_id_;
125 // The channel to which this filter was added.
126 IPC::Channel* channel_;
129 GpuVideoDecodeAccelerator::GpuVideoDecodeAccelerator(
131 GpuCommandBufferStub* stub,
132 const scoped_refptr<base::MessageLoopProxy>& io_message_loop)
133 : init_done_msg_(NULL),
134 host_route_id_(host_route_id),
137 filter_removed_(true, false),
138 io_message_loop_(io_message_loop),
139 weak_factory_for_io_(this) {
141 stub_->AddDestructionObserver(this);
142 stub_->channel()->AddRoute(host_route_id_, this);
143 child_message_loop_ = base::MessageLoopProxy::current();
144 make_context_current_ =
145 base::Bind(&MakeDecoderContextCurrent, stub_->AsWeakPtr());
148 GpuVideoDecodeAccelerator::~GpuVideoDecodeAccelerator() {
149 // This class can only be self-deleted from OnWillDestroyStub(), which means
150 // the VDA has already been destroyed in there.
151 CHECK(!video_decode_accelerator_.get());
154 bool GpuVideoDecodeAccelerator::OnMessageReceived(const IPC::Message& msg) {
155 if (!video_decode_accelerator_)
159 IPC_BEGIN_MESSAGE_MAP(GpuVideoDecodeAccelerator, msg)
160 IPC_MESSAGE_HANDLER(AcceleratedVideoDecoderMsg_Decode, OnDecode)
161 IPC_MESSAGE_HANDLER(AcceleratedVideoDecoderMsg_AssignPictureBuffers,
162 OnAssignPictureBuffers)
163 IPC_MESSAGE_HANDLER(AcceleratedVideoDecoderMsg_ReusePictureBuffer,
164 OnReusePictureBuffer)
165 IPC_MESSAGE_HANDLER(AcceleratedVideoDecoderMsg_Flush, OnFlush)
166 IPC_MESSAGE_HANDLER(AcceleratedVideoDecoderMsg_Reset, OnReset)
167 IPC_MESSAGE_HANDLER(AcceleratedVideoDecoderMsg_Destroy, OnDestroy)
168 IPC_MESSAGE_UNHANDLED(handled = false)
169 IPC_END_MESSAGE_MAP()
173 void GpuVideoDecodeAccelerator::ProvidePictureBuffers(
174 uint32 requested_num_of_buffers,
175 const gfx::Size& dimensions,
176 uint32 texture_target) {
177 if (dimensions.width() > media::limits::kMaxDimension ||
178 dimensions.height() > media::limits::kMaxDimension ||
179 dimensions.GetArea() > media::limits::kMaxCanvas) {
180 NotifyError(media::VideoDecodeAccelerator::PLATFORM_FAILURE);
183 if (!Send(new AcceleratedVideoDecoderHostMsg_ProvidePictureBuffers(
185 requested_num_of_buffers,
188 DLOG(ERROR) << "Send(AcceleratedVideoDecoderHostMsg_ProvidePictureBuffers) "
191 texture_dimensions_ = dimensions;
192 texture_target_ = texture_target;
195 void GpuVideoDecodeAccelerator::DismissPictureBuffer(
196 int32 picture_buffer_id) {
197 // Notify client that picture buffer is now unused.
198 if (!Send(new AcceleratedVideoDecoderHostMsg_DismissPictureBuffer(
199 host_route_id_, picture_buffer_id))) {
200 DLOG(ERROR) << "Send(AcceleratedVideoDecoderHostMsg_DismissPictureBuffer) "
203 DebugAutoLock auto_lock(debug_uncleared_textures_lock_);
204 uncleared_textures_.erase(picture_buffer_id);
207 void GpuVideoDecodeAccelerator::PictureReady(
208 const media::Picture& picture) {
209 // VDA may call PictureReady on IO thread. SetTextureCleared should run on
210 // the child thread. VDA is responsible to call PictureReady on the child
211 // thread when a picture buffer is delivered the first time.
212 if (child_message_loop_->BelongsToCurrentThread()) {
213 SetTextureCleared(picture);
215 DCHECK(io_message_loop_->BelongsToCurrentThread());
216 if (DCHECK_IS_ON()) {
217 DebugAutoLock auto_lock(debug_uncleared_textures_lock_);
218 DCHECK_EQ(0u, uncleared_textures_.count(picture.picture_buffer_id()));
222 if (!Send(new AcceleratedVideoDecoderHostMsg_PictureReady(
224 picture.picture_buffer_id(),
225 picture.bitstream_buffer_id()))) {
226 DLOG(ERROR) << "Send(AcceleratedVideoDecoderHostMsg_PictureReady) failed";
230 void GpuVideoDecodeAccelerator::NotifyError(
231 media::VideoDecodeAccelerator::Error error) {
232 if (init_done_msg_) {
233 // If we get an error while we're initializing, NotifyInitializeDone won't
234 // be called, so we need to send the reply (with an error) here.
235 GpuCommandBufferMsg_CreateVideoDecoder::WriteReplyParams(
237 if (!Send(init_done_msg_))
238 DLOG(ERROR) << "Send(init_done_msg_) failed";
239 init_done_msg_ = NULL;
242 if (!Send(new AcceleratedVideoDecoderHostMsg_ErrorNotification(
243 host_route_id_, error))) {
244 DLOG(ERROR) << "Send(AcceleratedVideoDecoderHostMsg_ErrorNotification) "
249 void GpuVideoDecodeAccelerator::Initialize(
250 const media::VideoCodecProfile profile,
251 IPC::Message* init_done_msg) {
252 DCHECK(!video_decode_accelerator_.get());
253 DCHECK(!init_done_msg_);
254 DCHECK(init_done_msg);
255 init_done_msg_ = init_done_msg;
258 // Ensure we will be able to get a GL context at all before initializing
260 if (!make_context_current_.Run()) {
261 NotifyError(media::VideoDecodeAccelerator::PLATFORM_FAILURE);
267 if (base::win::GetVersion() < base::win::VERSION_WIN7) {
268 NOTIMPLEMENTED() << "HW video decode acceleration not available.";
269 NotifyError(media::VideoDecodeAccelerator::PLATFORM_FAILURE);
272 DVLOG(0) << "Initializing DXVA HW decoder for windows.";
273 video_decode_accelerator_.reset(new DXVAVideoDecodeAccelerator(
274 this, make_context_current_));
275 #elif defined(OS_CHROMEOS) && defined(ARCH_CPU_ARMEL) && defined(USE_X11)
276 scoped_ptr<V4L2Device> device = V4L2Device::Create();
278 NotifyError(media::VideoDecodeAccelerator::PLATFORM_FAILURE);
281 video_decode_accelerator_.reset(
282 new V4L2VideoDecodeAccelerator(gfx::GLSurfaceEGL::GetHardwareDisplay(),
284 weak_factory_for_io_.GetWeakPtr(),
285 make_context_current_,
288 #elif defined(ARCH_CPU_X86_FAMILY) && defined(USE_X11)
289 if (gfx::GetGLImplementation() != gfx::kGLImplementationDesktopGL) {
290 VLOG(1) << "HW video decode acceleration not available without "
292 NotifyError(media::VideoDecodeAccelerator::PLATFORM_FAILURE);
295 gfx::GLContextGLX* glx_context =
296 static_cast<gfx::GLContextGLX*>(stub_->decoder()->GetGLContext());
297 video_decode_accelerator_.reset(new VaapiVideoDecodeAccelerator(
298 glx_context->display(), this, make_context_current_));
299 #elif defined(OS_ANDROID)
300 video_decode_accelerator_.reset(new AndroidVideoDecodeAccelerator(
302 stub_->decoder()->AsWeakPtr(),
303 make_context_current_));
305 NOTIMPLEMENTED() << "HW video decode acceleration not available.";
306 NotifyError(media::VideoDecodeAccelerator::PLATFORM_FAILURE);
310 if (video_decode_accelerator_->CanDecodeOnIOThread()) {
311 filter_ = new MessageFilter(this, host_route_id_);
312 stub_->channel()->AddFilter(filter_.get());
315 if (!video_decode_accelerator_->Initialize(profile))
316 NotifyError(media::VideoDecodeAccelerator::PLATFORM_FAILURE);
319 // Runs on IO thread if video_decode_accelerator_->CanDecodeOnIOThread() is
320 // true, otherwise on the main thread.
321 void GpuVideoDecodeAccelerator::OnDecode(
322 base::SharedMemoryHandle handle, int32 id, uint32 size) {
323 DCHECK(video_decode_accelerator_.get());
325 DLOG(ERROR) << "BitstreamBuffer id " << id << " out of range";
326 if (child_message_loop_->BelongsToCurrentThread()) {
327 NotifyError(media::VideoDecodeAccelerator::INVALID_ARGUMENT);
329 child_message_loop_->PostTask(
331 base::Bind(&GpuVideoDecodeAccelerator::NotifyError,
332 base::Unretained(this),
333 media::VideoDecodeAccelerator::INVALID_ARGUMENT));
337 video_decode_accelerator_->Decode(media::BitstreamBuffer(id, handle, size));
340 void GpuVideoDecodeAccelerator::OnAssignPictureBuffers(
341 const std::vector<int32>& buffer_ids,
342 const std::vector<uint32>& texture_ids) {
343 if (buffer_ids.size() != texture_ids.size()) {
344 NotifyError(media::VideoDecodeAccelerator::INVALID_ARGUMENT);
348 gpu::gles2::GLES2Decoder* command_decoder = stub_->decoder();
349 gpu::gles2::TextureManager* texture_manager =
350 command_decoder->GetContextGroup()->texture_manager();
352 std::vector<media::PictureBuffer> buffers;
353 std::vector<scoped_refptr<gpu::gles2::TextureRef> > textures;
354 for (uint32 i = 0; i < buffer_ids.size(); ++i) {
355 if (buffer_ids[i] < 0) {
356 DLOG(ERROR) << "Buffer id " << buffer_ids[i] << " out of range";
357 NotifyError(media::VideoDecodeAccelerator::INVALID_ARGUMENT);
360 gpu::gles2::TextureRef* texture_ref = texture_manager->GetTexture(
363 DLOG(ERROR) << "Failed to find texture id " << texture_ids[i];
364 NotifyError(media::VideoDecodeAccelerator::INVALID_ARGUMENT);
367 gpu::gles2::Texture* info = texture_ref->texture();
368 if (info->target() != texture_target_) {
369 DLOG(ERROR) << "Texture target mismatch for texture id "
371 NotifyError(media::VideoDecodeAccelerator::INVALID_ARGUMENT);
374 if (texture_target_ == GL_TEXTURE_EXTERNAL_OES) {
375 // GL_TEXTURE_EXTERNAL_OES textures have their dimensions defined by the
376 // underlying EGLImage. Use |texture_dimensions_| for this size.
377 texture_manager->SetLevelInfo(texture_ref,
378 GL_TEXTURE_EXTERNAL_OES,
381 texture_dimensions_.width(),
382 texture_dimensions_.height(),
389 // For other targets, texture dimensions should already be defined.
390 GLsizei width = 0, height = 0;
391 info->GetLevelSize(texture_target_, 0, &width, &height);
392 if (width != texture_dimensions_.width() ||
393 height != texture_dimensions_.height()) {
394 DLOG(ERROR) << "Size mismatch for texture id " << texture_ids[i];
395 NotifyError(media::VideoDecodeAccelerator::INVALID_ARGUMENT);
399 uint32 service_texture_id;
400 if (!command_decoder->GetServiceTextureId(
401 texture_ids[i], &service_texture_id)) {
402 DLOG(ERROR) << "Failed to translate texture!";
403 NotifyError(media::VideoDecodeAccelerator::PLATFORM_FAILURE);
406 buffers.push_back(media::PictureBuffer(
407 buffer_ids[i], texture_dimensions_, service_texture_id));
408 textures.push_back(texture_ref);
410 video_decode_accelerator_->AssignPictureBuffers(buffers);
411 DebugAutoLock auto_lock(debug_uncleared_textures_lock_);
412 for (uint32 i = 0; i < buffer_ids.size(); ++i)
413 uncleared_textures_[buffer_ids[i]] = textures[i];
416 void GpuVideoDecodeAccelerator::OnReusePictureBuffer(
417 int32 picture_buffer_id) {
418 DCHECK(video_decode_accelerator_.get());
419 video_decode_accelerator_->ReusePictureBuffer(picture_buffer_id);
422 void GpuVideoDecodeAccelerator::OnFlush() {
423 DCHECK(video_decode_accelerator_.get());
424 video_decode_accelerator_->Flush();
427 void GpuVideoDecodeAccelerator::OnReset() {
428 DCHECK(video_decode_accelerator_.get());
429 video_decode_accelerator_->Reset();
432 void GpuVideoDecodeAccelerator::OnDestroy() {
433 DCHECK(video_decode_accelerator_.get());
437 void GpuVideoDecodeAccelerator::OnFilterRemoved() {
438 // We're destroying; cancel all callbacks.
439 weak_factory_for_io_.InvalidateWeakPtrs();
440 filter_removed_.Signal();
443 void GpuVideoDecodeAccelerator::NotifyEndOfBitstreamBuffer(
444 int32 bitstream_buffer_id) {
445 if (!Send(new AcceleratedVideoDecoderHostMsg_BitstreamBufferProcessed(
446 host_route_id_, bitstream_buffer_id))) {
448 << "Send(AcceleratedVideoDecoderHostMsg_BitstreamBufferProcessed) "
453 void GpuVideoDecodeAccelerator::NotifyInitializeDone() {
454 GpuCommandBufferMsg_CreateVideoDecoder::WriteReplyParams(
455 init_done_msg_, host_route_id_);
456 if (!Send(init_done_msg_))
457 DLOG(ERROR) << "Send(init_done_msg_) failed";
458 init_done_msg_ = NULL;
461 void GpuVideoDecodeAccelerator::NotifyFlushDone() {
462 if (!Send(new AcceleratedVideoDecoderHostMsg_FlushDone(host_route_id_)))
463 DLOG(ERROR) << "Send(AcceleratedVideoDecoderHostMsg_FlushDone) failed";
466 void GpuVideoDecodeAccelerator::NotifyResetDone() {
467 if (!Send(new AcceleratedVideoDecoderHostMsg_ResetDone(host_route_id_)))
468 DLOG(ERROR) << "Send(AcceleratedVideoDecoderHostMsg_ResetDone) failed";
471 void GpuVideoDecodeAccelerator::OnWillDestroyStub() {
472 // The stub is going away, so we have to stop and destroy VDA here, before
473 // returning, because the VDA may need the GL context to run and/or do its
474 // cleanup. We cannot destroy the VDA before the IO thread message filter is
475 // removed however, since we cannot service incoming messages with VDA gone.
476 // We cannot simply check for existence of VDA on IO thread though, because
477 // we don't want to synchronize the IO thread with the ChildThread.
478 // So we have to wait for the RemoveFilter callback here instead and remove
479 // the VDA after it arrives and before returning.
481 stub_->channel()->RemoveFilter(filter_.get());
482 filter_removed_.Wait();
485 stub_->channel()->RemoveRoute(host_route_id_);
486 stub_->RemoveDestructionObserver(this);
488 if (video_decode_accelerator_)
489 video_decode_accelerator_.release()->Destroy();
494 bool GpuVideoDecodeAccelerator::Send(IPC::Message* message) {
495 if (filter_.get() && io_message_loop_->BelongsToCurrentThread())
496 return filter_->SendOnIOThread(message);
497 DCHECK(child_message_loop_->BelongsToCurrentThread());
498 return stub_->channel()->Send(message);
501 void GpuVideoDecodeAccelerator::SetTextureCleared(
502 const media::Picture& picture) {
503 DCHECK(child_message_loop_->BelongsToCurrentThread());
504 DebugAutoLock auto_lock(debug_uncleared_textures_lock_);
505 std::map<int32, scoped_refptr<gpu::gles2::TextureRef> >::iterator it;
506 it = uncleared_textures_.find(picture.picture_buffer_id());
507 if (it == uncleared_textures_.end())
508 return; // the texture has been cleared
510 scoped_refptr<gpu::gles2::TextureRef> texture_ref = it->second;
511 GLenum target = texture_ref->texture()->target();
512 gpu::gles2::TextureManager* texture_manager =
513 stub_->decoder()->GetContextGroup()->texture_manager();
514 DCHECK(!texture_ref->texture()->IsLevelCleared(target, 0));
515 texture_manager->SetLevelCleared(texture_ref, target, 0, true);
516 uncleared_textures_.erase(it);
519 } // namespace content