1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "content/common/gpu/media/gpu_video_decode_accelerator.h"
10 #include "base/command_line.h"
11 #include "base/logging.h"
12 #include "base/message_loop/message_loop_proxy.h"
13 #include "base/stl_util.h"
15 #include "content/common/gpu/gpu_channel.h"
16 #include "content/common/gpu/gpu_messages.h"
17 #include "content/public/common/content_switches.h"
18 #include "gpu/command_buffer/common/command_buffer.h"
19 #include "ipc/ipc_message_macros.h"
20 #include "ipc/ipc_message_utils.h"
21 #include "ipc/message_filter.h"
22 #include "media/base/limits.h"
23 #include "ui/gl/gl_context.h"
24 #include "ui/gl/gl_surface_egl.h"
27 #include "base/win/windows_version.h"
28 #include "content/common/gpu/media/dxva_video_decode_accelerator.h"
29 #elif defined(OS_CHROMEOS) && defined(ARCH_CPU_ARMEL) && defined(USE_X11)
30 #include "content/common/gpu/media/v4l2_video_decode_accelerator.h"
31 #include "content/common/gpu/media/v4l2_video_device.h"
32 #elif defined(ARCH_CPU_X86_FAMILY) && (defined(USE_X11) || defined(USE_OZONE))
33 #include "content/common/gpu/media/vaapi_video_decode_accelerator.h"
35 #include "ui/gl/gl_context_glx.h"
36 #include "ui/gl/gl_implementation.h"
38 #elif defined(OS_ANDROID)
39 #include "content/common/gpu/media/android_video_decode_accelerator.h"
42 #include "ui/gfx/size.h"
46 static bool MakeDecoderContextCurrent(
47 const base::WeakPtr<GpuCommandBufferStub> stub) {
49 DLOG(ERROR) << "Stub is gone; won't MakeCurrent().";
53 if (!stub->decoder()->MakeCurrent()) {
54 DLOG(ERROR) << "Failed to MakeCurrent()";
61 // DebugAutoLock works like AutoLock but only acquires the lock when
64 typedef base::AutoLock DebugAutoLock;
68 explicit DebugAutoLock(base::Lock&) {}
72 class GpuVideoDecodeAccelerator::MessageFilter : public IPC::MessageFilter {
74 MessageFilter(GpuVideoDecodeAccelerator* owner, int32 host_route_id)
75 : owner_(owner), host_route_id_(host_route_id) {}
77 virtual void OnChannelError() OVERRIDE { channel_ = NULL; }
79 virtual void OnChannelClosing() OVERRIDE { channel_ = NULL; }
81 virtual void OnFilterAdded(IPC::Channel* channel) OVERRIDE {
85 virtual void OnFilterRemoved() OVERRIDE {
86 // This will delete |owner_| and |this|.
87 owner_->OnFilterRemoved();
90 virtual bool OnMessageReceived(const IPC::Message& msg) OVERRIDE {
91 if (msg.routing_id() != host_route_id_)
94 IPC_BEGIN_MESSAGE_MAP(MessageFilter, msg)
95 IPC_MESSAGE_FORWARD(AcceleratedVideoDecoderMsg_Decode, owner_,
96 GpuVideoDecodeAccelerator::OnDecode)
97 IPC_MESSAGE_UNHANDLED(return false;)
102 bool SendOnIOThread(IPC::Message* message) {
103 DCHECK(!message->is_sync());
108 return channel_->Send(message);
112 virtual ~MessageFilter() {}
115 GpuVideoDecodeAccelerator* owner_;
116 int32 host_route_id_;
117 // The channel to which this filter was added.
118 IPC::Channel* channel_;
121 GpuVideoDecodeAccelerator::GpuVideoDecodeAccelerator(
123 GpuCommandBufferStub* stub,
124 const scoped_refptr<base::MessageLoopProxy>& io_message_loop)
125 : host_route_id_(host_route_id),
128 filter_removed_(true, false),
129 io_message_loop_(io_message_loop),
130 weak_factory_for_io_(this) {
132 stub_->AddDestructionObserver(this);
133 child_message_loop_ = base::MessageLoopProxy::current();
134 make_context_current_ =
135 base::Bind(&MakeDecoderContextCurrent, stub_->AsWeakPtr());
138 GpuVideoDecodeAccelerator::~GpuVideoDecodeAccelerator() {
139 // This class can only be self-deleted from OnWillDestroyStub(), which means
140 // the VDA has already been destroyed in there.
141 DCHECK(!video_decode_accelerator_);
144 bool GpuVideoDecodeAccelerator::OnMessageReceived(const IPC::Message& msg) {
145 if (!video_decode_accelerator_)
149 IPC_BEGIN_MESSAGE_MAP(GpuVideoDecodeAccelerator, msg)
150 IPC_MESSAGE_HANDLER(AcceleratedVideoDecoderMsg_Decode, OnDecode)
151 IPC_MESSAGE_HANDLER(AcceleratedVideoDecoderMsg_AssignPictureBuffers,
152 OnAssignPictureBuffers)
153 IPC_MESSAGE_HANDLER(AcceleratedVideoDecoderMsg_ReusePictureBuffer,
154 OnReusePictureBuffer)
155 IPC_MESSAGE_HANDLER(AcceleratedVideoDecoderMsg_Flush, OnFlush)
156 IPC_MESSAGE_HANDLER(AcceleratedVideoDecoderMsg_Reset, OnReset)
157 IPC_MESSAGE_HANDLER(AcceleratedVideoDecoderMsg_Destroy, OnDestroy)
158 IPC_MESSAGE_UNHANDLED(handled = false)
159 IPC_END_MESSAGE_MAP()
163 void GpuVideoDecodeAccelerator::ProvidePictureBuffers(
164 uint32 requested_num_of_buffers,
165 const gfx::Size& dimensions,
166 uint32 texture_target) {
167 if (dimensions.width() > media::limits::kMaxDimension ||
168 dimensions.height() > media::limits::kMaxDimension ||
169 dimensions.GetArea() > media::limits::kMaxCanvas) {
170 NotifyError(media::VideoDecodeAccelerator::PLATFORM_FAILURE);
173 if (!Send(new AcceleratedVideoDecoderHostMsg_ProvidePictureBuffers(
175 requested_num_of_buffers,
178 DLOG(ERROR) << "Send(AcceleratedVideoDecoderHostMsg_ProvidePictureBuffers) "
181 texture_dimensions_ = dimensions;
182 texture_target_ = texture_target;
185 void GpuVideoDecodeAccelerator::DismissPictureBuffer(
186 int32 picture_buffer_id) {
187 // Notify client that picture buffer is now unused.
188 if (!Send(new AcceleratedVideoDecoderHostMsg_DismissPictureBuffer(
189 host_route_id_, picture_buffer_id))) {
190 DLOG(ERROR) << "Send(AcceleratedVideoDecoderHostMsg_DismissPictureBuffer) "
193 DebugAutoLock auto_lock(debug_uncleared_textures_lock_);
194 uncleared_textures_.erase(picture_buffer_id);
197 void GpuVideoDecodeAccelerator::PictureReady(
198 const media::Picture& picture) {
199 // VDA may call PictureReady on IO thread. SetTextureCleared should run on
200 // the child thread. VDA is responsible to call PictureReady on the child
201 // thread when a picture buffer is delivered the first time.
202 if (child_message_loop_->BelongsToCurrentThread()) {
203 SetTextureCleared(picture);
205 DCHECK(io_message_loop_->BelongsToCurrentThread());
206 DebugAutoLock auto_lock(debug_uncleared_textures_lock_);
207 DCHECK_EQ(0u, uncleared_textures_.count(picture.picture_buffer_id()));
210 if (!Send(new AcceleratedVideoDecoderHostMsg_PictureReady(
212 picture.picture_buffer_id(),
213 picture.bitstream_buffer_id()))) {
214 DLOG(ERROR) << "Send(AcceleratedVideoDecoderHostMsg_PictureReady) failed";
218 void GpuVideoDecodeAccelerator::NotifyError(
219 media::VideoDecodeAccelerator::Error error) {
220 if (!Send(new AcceleratedVideoDecoderHostMsg_ErrorNotification(
221 host_route_id_, error))) {
222 DLOG(ERROR) << "Send(AcceleratedVideoDecoderHostMsg_ErrorNotification) "
227 void GpuVideoDecodeAccelerator::Initialize(
228 const media::VideoCodecProfile profile,
229 IPC::Message* init_done_msg) {
230 DCHECK(!video_decode_accelerator_.get());
232 if (!stub_->channel()->AddRoute(host_route_id_, this)) {
233 DLOG(ERROR) << "GpuVideoDecodeAccelerator::Initialize(): "
234 "failed to add route";
235 SendCreateDecoderReply(init_done_msg, false);
239 // Ensure we will be able to get a GL context at all before initializing
241 if (!make_context_current_.Run()) {
242 SendCreateDecoderReply(init_done_msg, false);
248 if (base::win::GetVersion() < base::win::VERSION_WIN7) {
249 NOTIMPLEMENTED() << "HW video decode acceleration not available.";
250 SendCreateDecoderReply(init_done_msg, false);
253 DVLOG(0) << "Initializing DXVA HW decoder for windows.";
254 video_decode_accelerator_.reset(
255 new DXVAVideoDecodeAccelerator(make_context_current_));
256 #elif defined(OS_CHROMEOS) && defined(ARCH_CPU_ARMEL) && defined(USE_X11)
257 scoped_ptr<V4L2Device> device = V4L2Device::Create(V4L2Device::kDecoder);
259 SendCreateDecoderReply(init_done_msg, false);
262 video_decode_accelerator_.reset(new V4L2VideoDecodeAccelerator(
263 gfx::GLSurfaceEGL::GetHardwareDisplay(),
264 stub_->decoder()->GetGLContext()->GetHandle(),
265 weak_factory_for_io_.GetWeakPtr(),
266 make_context_current_,
269 #elif defined(ARCH_CPU_X86_FAMILY) && defined(USE_OZONE)
270 video_decode_accelerator_.reset(new VaapiVideoDecodeAccelerator(
271 make_context_current_));
272 #elif defined(ARCH_CPU_X86_FAMILY) && defined(USE_X11)
273 if (gfx::GetGLImplementation() != gfx::kGLImplementationDesktopGL) {
274 VLOG(1) << "HW video decode acceleration not available without "
276 SendCreateDecoderReply(init_done_msg, false);
279 gfx::GLContextGLX* glx_context =
280 static_cast<gfx::GLContextGLX*>(stub_->decoder()->GetGLContext());
281 video_decode_accelerator_.reset(new VaapiVideoDecodeAccelerator(
282 glx_context->display(), make_context_current_));
283 #elif defined(OS_ANDROID)
284 video_decode_accelerator_.reset(new AndroidVideoDecodeAccelerator(
285 stub_->decoder()->AsWeakPtr(),
286 make_context_current_));
288 NOTIMPLEMENTED() << "HW video decode acceleration not available.";
289 SendCreateDecoderReply(init_done_msg, false);
293 if (video_decode_accelerator_->CanDecodeOnIOThread()) {
294 filter_ = new MessageFilter(this, host_route_id_);
295 stub_->channel()->AddFilter(filter_.get());
298 if (!video_decode_accelerator_->Initialize(profile, this)) {
299 SendCreateDecoderReply(init_done_msg, false);
303 SendCreateDecoderReply(init_done_msg, true);
306 // Runs on IO thread if video_decode_accelerator_->CanDecodeOnIOThread() is
307 // true, otherwise on the main thread.
308 void GpuVideoDecodeAccelerator::OnDecode(
309 base::SharedMemoryHandle handle, int32 id, uint32 size) {
310 DCHECK(video_decode_accelerator_.get());
312 DLOG(ERROR) << "BitstreamBuffer id " << id << " out of range";
313 if (child_message_loop_->BelongsToCurrentThread()) {
314 NotifyError(media::VideoDecodeAccelerator::INVALID_ARGUMENT);
316 child_message_loop_->PostTask(
318 base::Bind(&GpuVideoDecodeAccelerator::NotifyError,
319 base::Unretained(this),
320 media::VideoDecodeAccelerator::INVALID_ARGUMENT));
324 video_decode_accelerator_->Decode(media::BitstreamBuffer(id, handle, size));
327 void GpuVideoDecodeAccelerator::OnAssignPictureBuffers(
328 const std::vector<int32>& buffer_ids,
329 const std::vector<uint32>& texture_ids) {
330 if (buffer_ids.size() != texture_ids.size()) {
331 NotifyError(media::VideoDecodeAccelerator::INVALID_ARGUMENT);
335 gpu::gles2::GLES2Decoder* command_decoder = stub_->decoder();
336 gpu::gles2::TextureManager* texture_manager =
337 command_decoder->GetContextGroup()->texture_manager();
339 std::vector<media::PictureBuffer> buffers;
340 std::vector<scoped_refptr<gpu::gles2::TextureRef> > textures;
341 for (uint32 i = 0; i < buffer_ids.size(); ++i) {
342 if (buffer_ids[i] < 0) {
343 DLOG(ERROR) << "Buffer id " << buffer_ids[i] << " out of range";
344 NotifyError(media::VideoDecodeAccelerator::INVALID_ARGUMENT);
347 gpu::gles2::TextureRef* texture_ref = texture_manager->GetTexture(
350 DLOG(ERROR) << "Failed to find texture id " << texture_ids[i];
351 NotifyError(media::VideoDecodeAccelerator::INVALID_ARGUMENT);
354 gpu::gles2::Texture* info = texture_ref->texture();
355 if (info->target() != texture_target_) {
356 DLOG(ERROR) << "Texture target mismatch for texture id "
358 NotifyError(media::VideoDecodeAccelerator::INVALID_ARGUMENT);
361 if (texture_target_ == GL_TEXTURE_EXTERNAL_OES) {
362 // GL_TEXTURE_EXTERNAL_OES textures have their dimensions defined by the
363 // underlying EGLImage. Use |texture_dimensions_| for this size.
364 texture_manager->SetLevelInfo(texture_ref,
365 GL_TEXTURE_EXTERNAL_OES,
368 texture_dimensions_.width(),
369 texture_dimensions_.height(),
376 // For other targets, texture dimensions should already be defined.
377 GLsizei width = 0, height = 0;
378 info->GetLevelSize(texture_target_, 0, &width, &height);
379 if (width != texture_dimensions_.width() ||
380 height != texture_dimensions_.height()) {
381 DLOG(ERROR) << "Size mismatch for texture id " << texture_ids[i];
382 NotifyError(media::VideoDecodeAccelerator::INVALID_ARGUMENT);
386 uint32 service_texture_id;
387 if (!command_decoder->GetServiceTextureId(
388 texture_ids[i], &service_texture_id)) {
389 DLOG(ERROR) << "Failed to translate texture!";
390 NotifyError(media::VideoDecodeAccelerator::PLATFORM_FAILURE);
393 buffers.push_back(media::PictureBuffer(
394 buffer_ids[i], texture_dimensions_, service_texture_id));
395 textures.push_back(texture_ref);
397 video_decode_accelerator_->AssignPictureBuffers(buffers);
398 DebugAutoLock auto_lock(debug_uncleared_textures_lock_);
399 for (uint32 i = 0; i < buffer_ids.size(); ++i)
400 uncleared_textures_[buffer_ids[i]] = textures[i];
403 void GpuVideoDecodeAccelerator::OnReusePictureBuffer(
404 int32 picture_buffer_id) {
405 DCHECK(video_decode_accelerator_.get());
406 video_decode_accelerator_->ReusePictureBuffer(picture_buffer_id);
409 void GpuVideoDecodeAccelerator::OnFlush() {
410 DCHECK(video_decode_accelerator_.get());
411 video_decode_accelerator_->Flush();
414 void GpuVideoDecodeAccelerator::OnReset() {
415 DCHECK(video_decode_accelerator_.get());
416 video_decode_accelerator_->Reset();
419 void GpuVideoDecodeAccelerator::OnDestroy() {
420 DCHECK(video_decode_accelerator_.get());
424 void GpuVideoDecodeAccelerator::OnFilterRemoved() {
425 // We're destroying; cancel all callbacks.
426 weak_factory_for_io_.InvalidateWeakPtrs();
427 filter_removed_.Signal();
430 void GpuVideoDecodeAccelerator::NotifyEndOfBitstreamBuffer(
431 int32 bitstream_buffer_id) {
432 if (!Send(new AcceleratedVideoDecoderHostMsg_BitstreamBufferProcessed(
433 host_route_id_, bitstream_buffer_id))) {
435 << "Send(AcceleratedVideoDecoderHostMsg_BitstreamBufferProcessed) "
440 void GpuVideoDecodeAccelerator::NotifyFlushDone() {
441 if (!Send(new AcceleratedVideoDecoderHostMsg_FlushDone(host_route_id_)))
442 DLOG(ERROR) << "Send(AcceleratedVideoDecoderHostMsg_FlushDone) failed";
445 void GpuVideoDecodeAccelerator::NotifyResetDone() {
446 if (!Send(new AcceleratedVideoDecoderHostMsg_ResetDone(host_route_id_)))
447 DLOG(ERROR) << "Send(AcceleratedVideoDecoderHostMsg_ResetDone) failed";
450 void GpuVideoDecodeAccelerator::OnWillDestroyStub() {
451 // The stub is going away, so we have to stop and destroy VDA here, before
452 // returning, because the VDA may need the GL context to run and/or do its
453 // cleanup. We cannot destroy the VDA before the IO thread message filter is
454 // removed however, since we cannot service incoming messages with VDA gone.
455 // We cannot simply check for existence of VDA on IO thread though, because
456 // we don't want to synchronize the IO thread with the ChildThread.
457 // So we have to wait for the RemoveFilter callback here instead and remove
458 // the VDA after it arrives and before returning.
460 stub_->channel()->RemoveFilter(filter_.get());
461 filter_removed_.Wait();
464 stub_->channel()->RemoveRoute(host_route_id_);
465 stub_->RemoveDestructionObserver(this);
467 if (video_decode_accelerator_)
468 video_decode_accelerator_.release()->Destroy();
473 void GpuVideoDecodeAccelerator::SetTextureCleared(
474 const media::Picture& picture) {
475 DCHECK(child_message_loop_->BelongsToCurrentThread());
476 DebugAutoLock auto_lock(debug_uncleared_textures_lock_);
477 std::map<int32, scoped_refptr<gpu::gles2::TextureRef> >::iterator it;
478 it = uncleared_textures_.find(picture.picture_buffer_id());
479 if (it == uncleared_textures_.end())
480 return; // the texture has been cleared
482 scoped_refptr<gpu::gles2::TextureRef> texture_ref = it->second;
483 GLenum target = texture_ref->texture()->target();
484 gpu::gles2::TextureManager* texture_manager =
485 stub_->decoder()->GetContextGroup()->texture_manager();
486 DCHECK(!texture_ref->texture()->IsLevelCleared(target, 0));
487 texture_manager->SetLevelCleared(texture_ref, target, 0, true);
488 uncleared_textures_.erase(it);
491 bool GpuVideoDecodeAccelerator::Send(IPC::Message* message) {
492 if (filter_.get() && io_message_loop_->BelongsToCurrentThread())
493 return filter_->SendOnIOThread(message);
494 DCHECK(child_message_loop_->BelongsToCurrentThread());
495 return stub_->channel()->Send(message);
498 void GpuVideoDecodeAccelerator::SendCreateDecoderReply(IPC::Message* message,
500 GpuCommandBufferMsg_CreateVideoDecoder::WriteReplyParams(message, succeeded);
504 } // namespace content