31c3029c444b6ee3676de8227d7e4a2a5d645ef6
[platform/framework/web/crosswalk.git] / src / content / common / gpu / media / gpu_video_decode_accelerator.cc
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "content/common/gpu/media/gpu_video_decode_accelerator.h"
6
7 #include <vector>
8
9 #include "base/bind.h"
10 #include "base/command_line.h"
11 #include "base/logging.h"
12 #include "base/message_loop/message_loop_proxy.h"
13 #include "base/stl_util.h"
14
15 #include "content/common/gpu/gpu_channel.h"
16 #include "content/common/gpu/gpu_messages.h"
17 #include "content/public/common/content_switches.h"
18 #include "gpu/command_buffer/common/command_buffer.h"
19 #include "ipc/ipc_message_macros.h"
20 #include "ipc/ipc_message_utils.h"
21 #include "media/base/limits.h"
22 #include "ui/gl/gl_context.h"
23 #include "ui/gl/gl_surface_egl.h"
24
25 #if defined(OS_WIN)
26 #include "base/win/windows_version.h"
27 #include "content/common/gpu/media/dxva_video_decode_accelerator.h"
28 #elif defined(OS_CHROMEOS) && defined(ARCH_CPU_ARMEL) && defined(USE_X11)
29 #include "content/common/gpu/media/v4l2_video_decode_accelerator.h"
30 #include "content/common/gpu/media/v4l2_video_device.h"
31 #elif defined(ARCH_CPU_X86_FAMILY) && defined(USE_X11)
32 #include "content/common/gpu/media/vaapi_video_decode_accelerator.h"
33 #include "ui/gl/gl_context_glx.h"
34 #include "ui/gl/gl_implementation.h"
35 #elif defined(OS_ANDROID)
36 #include "content/common/gpu/media/android_video_decode_accelerator.h"
37 #endif
38
39 #include "ui/gfx/size.h"
40
41 namespace content {
42
43 static bool MakeDecoderContextCurrent(
44     const base::WeakPtr<GpuCommandBufferStub> stub) {
45   if (!stub.get()) {
46     DLOG(ERROR) << "Stub is gone; won't MakeCurrent().";
47     return false;
48   }
49
50   if (!stub->decoder()->MakeCurrent()) {
51     DLOG(ERROR) << "Failed to MakeCurrent()";
52     return false;
53   }
54
55   return true;
56 }
57
58 // DebugAutoLock works like AutoLock but only acquires the lock when
59 // DCHECK is on.
60 #if DCHECK_IS_ON
61 typedef base::AutoLock DebugAutoLock;
62 #else
63 class DebugAutoLock {
64  public:
65   explicit DebugAutoLock(base::Lock&) {}
66 };
67 #endif
68
69 class GpuVideoDecodeAccelerator::MessageFilter
70     : public IPC::ChannelProxy::MessageFilter {
71  public:
72   MessageFilter(GpuVideoDecodeAccelerator* owner, int32 host_route_id)
73       : owner_(owner), host_route_id_(host_route_id) {}
74
75   virtual void OnChannelError() OVERRIDE { channel_ = NULL; }
76
77   virtual void OnChannelClosing() OVERRIDE { channel_ = NULL; }
78
79   virtual void OnFilterAdded(IPC::Channel* channel) OVERRIDE {
80     channel_ = channel;
81   }
82
83   virtual void OnFilterRemoved() OVERRIDE {
84     // This will delete |owner_| and |this|.
85     owner_->OnFilterRemoved();
86   }
87
88   virtual bool OnMessageReceived(const IPC::Message& msg) OVERRIDE {
89     if (msg.routing_id() != host_route_id_)
90       return false;
91
92     IPC_BEGIN_MESSAGE_MAP(MessageFilter, msg)
93       IPC_MESSAGE_FORWARD(AcceleratedVideoDecoderMsg_Decode, owner_,
94                           GpuVideoDecodeAccelerator::OnDecode)
95       IPC_MESSAGE_UNHANDLED(return false;)
96     IPC_END_MESSAGE_MAP()
97     return true;
98   }
99
100   bool SendOnIOThread(IPC::Message* message) {
101     DCHECK(!message->is_sync());
102     if (!channel_) {
103       delete message;
104       return false;
105     }
106     return channel_->Send(message);
107   }
108
109  protected:
110   virtual ~MessageFilter() {}
111
112  private:
113   GpuVideoDecodeAccelerator* owner_;
114   int32 host_route_id_;
115   // The channel to which this filter was added.
116   IPC::Channel* channel_;
117 };
118
119 GpuVideoDecodeAccelerator::GpuVideoDecodeAccelerator(
120     int32 host_route_id,
121     GpuCommandBufferStub* stub,
122     const scoped_refptr<base::MessageLoopProxy>& io_message_loop)
123     : init_done_msg_(NULL),
124       host_route_id_(host_route_id),
125       stub_(stub),
126       texture_target_(0),
127       filter_removed_(true, false),
128       io_message_loop_(io_message_loop),
129       weak_factory_for_io_(this) {
130   DCHECK(stub_);
131   stub_->AddDestructionObserver(this);
132   stub_->channel()->AddRoute(host_route_id_, this);
133   child_message_loop_ = base::MessageLoopProxy::current();
134   make_context_current_ =
135       base::Bind(&MakeDecoderContextCurrent, stub_->AsWeakPtr());
136 }
137
138 GpuVideoDecodeAccelerator::~GpuVideoDecodeAccelerator() {
139   // This class can only be self-deleted from OnWillDestroyStub(), which means
140   // the VDA has already been destroyed in there.
141   CHECK(!video_decode_accelerator_.get());
142 }
143
144 bool GpuVideoDecodeAccelerator::OnMessageReceived(const IPC::Message& msg) {
145   if (!video_decode_accelerator_)
146     return false;
147
148   bool handled = true;
149   IPC_BEGIN_MESSAGE_MAP(GpuVideoDecodeAccelerator, msg)
150     IPC_MESSAGE_HANDLER(AcceleratedVideoDecoderMsg_Decode, OnDecode)
151     IPC_MESSAGE_HANDLER(AcceleratedVideoDecoderMsg_AssignPictureBuffers,
152                         OnAssignPictureBuffers)
153     IPC_MESSAGE_HANDLER(AcceleratedVideoDecoderMsg_ReusePictureBuffer,
154                         OnReusePictureBuffer)
155     IPC_MESSAGE_HANDLER(AcceleratedVideoDecoderMsg_Flush, OnFlush)
156     IPC_MESSAGE_HANDLER(AcceleratedVideoDecoderMsg_Reset, OnReset)
157     IPC_MESSAGE_HANDLER(AcceleratedVideoDecoderMsg_Destroy, OnDestroy)
158     IPC_MESSAGE_UNHANDLED(handled = false)
159   IPC_END_MESSAGE_MAP()
160   return handled;
161 }
162
163 void GpuVideoDecodeAccelerator::ProvidePictureBuffers(
164     uint32 requested_num_of_buffers,
165     const gfx::Size& dimensions,
166     uint32 texture_target) {
167   if (dimensions.width() > media::limits::kMaxDimension ||
168       dimensions.height() > media::limits::kMaxDimension ||
169       dimensions.GetArea() > media::limits::kMaxCanvas) {
170     NotifyError(media::VideoDecodeAccelerator::PLATFORM_FAILURE);
171     return;
172   }
173   if (!Send(new AcceleratedVideoDecoderHostMsg_ProvidePictureBuffers(
174            host_route_id_,
175            requested_num_of_buffers,
176            dimensions,
177            texture_target))) {
178     DLOG(ERROR) << "Send(AcceleratedVideoDecoderHostMsg_ProvidePictureBuffers) "
179                 << "failed";
180   }
181   texture_dimensions_ = dimensions;
182   texture_target_ = texture_target;
183 }
184
185 void GpuVideoDecodeAccelerator::DismissPictureBuffer(
186     int32 picture_buffer_id) {
187   // Notify client that picture buffer is now unused.
188   if (!Send(new AcceleratedVideoDecoderHostMsg_DismissPictureBuffer(
189           host_route_id_, picture_buffer_id))) {
190     DLOG(ERROR) << "Send(AcceleratedVideoDecoderHostMsg_DismissPictureBuffer) "
191                 << "failed";
192   }
193   DebugAutoLock auto_lock(debug_uncleared_textures_lock_);
194   uncleared_textures_.erase(picture_buffer_id);
195 }
196
197 void GpuVideoDecodeAccelerator::PictureReady(
198     const media::Picture& picture) {
199   // VDA may call PictureReady on IO thread. SetTextureCleared should run on
200   // the child thread. VDA is responsible to call PictureReady on the child
201   // thread when a picture buffer is delivered the first time.
202   if (child_message_loop_->BelongsToCurrentThread()) {
203     SetTextureCleared(picture);
204   } else {
205     DCHECK(io_message_loop_->BelongsToCurrentThread());
206     DebugAutoLock auto_lock(debug_uncleared_textures_lock_);
207     DCHECK_EQ(0u, uncleared_textures_.count(picture.picture_buffer_id()));
208   }
209
210   if (!Send(new AcceleratedVideoDecoderHostMsg_PictureReady(
211           host_route_id_,
212           picture.picture_buffer_id(),
213           picture.bitstream_buffer_id()))) {
214     DLOG(ERROR) << "Send(AcceleratedVideoDecoderHostMsg_PictureReady) failed";
215   }
216 }
217
218 void GpuVideoDecodeAccelerator::NotifyError(
219     media::VideoDecodeAccelerator::Error error) {
220   if (init_done_msg_) {
221     // If we get an error while we're initializing, NotifyInitializeDone won't
222     // be called, so we need to send the reply (with an error) here.
223     GpuCommandBufferMsg_CreateVideoDecoder::WriteReplyParams(
224         init_done_msg_, -1);
225     if (!Send(init_done_msg_))
226       DLOG(ERROR) << "Send(init_done_msg_) failed";
227     init_done_msg_ = NULL;
228     return;
229   }
230   if (!Send(new AcceleratedVideoDecoderHostMsg_ErrorNotification(
231           host_route_id_, error))) {
232     DLOG(ERROR) << "Send(AcceleratedVideoDecoderHostMsg_ErrorNotification) "
233                 << "failed";
234   }
235 }
236
237 void GpuVideoDecodeAccelerator::Initialize(
238     const media::VideoCodecProfile profile,
239     IPC::Message* init_done_msg) {
240   DCHECK(!video_decode_accelerator_.get());
241   DCHECK(!init_done_msg_);
242   DCHECK(init_done_msg);
243   init_done_msg_ = init_done_msg;
244
245 #if !defined(OS_WIN)
246   // Ensure we will be able to get a GL context at all before initializing
247   // non-Windows VDAs.
248   if (!make_context_current_.Run()) {
249     NotifyError(media::VideoDecodeAccelerator::PLATFORM_FAILURE);
250     return;
251   }
252 #endif
253
254 #if defined(OS_WIN)
255   if (base::win::GetVersion() < base::win::VERSION_WIN7) {
256     NOTIMPLEMENTED() << "HW video decode acceleration not available.";
257     NotifyError(media::VideoDecodeAccelerator::PLATFORM_FAILURE);
258     return;
259   }
260   DVLOG(0) << "Initializing DXVA HW decoder for windows.";
261   video_decode_accelerator_.reset(
262       new DXVAVideoDecodeAccelerator(make_context_current_));
263 #elif defined(OS_CHROMEOS) && defined(ARCH_CPU_ARMEL) && defined(USE_X11)
264   scoped_ptr<V4L2Device> device = V4L2Device::Create();
265   if (!device.get()) {
266     NotifyError(media::VideoDecodeAccelerator::PLATFORM_FAILURE);
267     return;
268   }
269   video_decode_accelerator_.reset(
270       new V4L2VideoDecodeAccelerator(gfx::GLSurfaceEGL::GetHardwareDisplay(),
271                                      weak_factory_for_io_.GetWeakPtr(),
272                                      make_context_current_,
273                                      device.Pass(),
274                                      io_message_loop_));
275 #elif defined(ARCH_CPU_X86_FAMILY) && defined(USE_X11)
276   if (gfx::GetGLImplementation() != gfx::kGLImplementationDesktopGL) {
277     VLOG(1) << "HW video decode acceleration not available without "
278                "DesktopGL (GLX).";
279     NotifyError(media::VideoDecodeAccelerator::PLATFORM_FAILURE);
280     return;
281   }
282   gfx::GLContextGLX* glx_context =
283       static_cast<gfx::GLContextGLX*>(stub_->decoder()->GetGLContext());
284   video_decode_accelerator_.reset(new VaapiVideoDecodeAccelerator(
285       glx_context->display(), make_context_current_));
286 #elif defined(OS_ANDROID)
287   video_decode_accelerator_.reset(new AndroidVideoDecodeAccelerator(
288       stub_->decoder()->AsWeakPtr(),
289       make_context_current_));
290 #else
291   NOTIMPLEMENTED() << "HW video decode acceleration not available.";
292   NotifyError(media::VideoDecodeAccelerator::PLATFORM_FAILURE);
293   return;
294 #endif
295
296   if (video_decode_accelerator_->CanDecodeOnIOThread()) {
297     filter_ = new MessageFilter(this, host_route_id_);
298     stub_->channel()->AddFilter(filter_.get());
299   }
300
301   if (!video_decode_accelerator_->Initialize(profile, this))
302     NotifyError(media::VideoDecodeAccelerator::PLATFORM_FAILURE);
303 }
304
305 // Runs on IO thread if video_decode_accelerator_->CanDecodeOnIOThread() is
306 // true, otherwise on the main thread.
307 void GpuVideoDecodeAccelerator::OnDecode(
308     base::SharedMemoryHandle handle, int32 id, uint32 size) {
309   DCHECK(video_decode_accelerator_.get());
310   if (id < 0) {
311     DLOG(ERROR) << "BitstreamBuffer id " << id << " out of range";
312     if (child_message_loop_->BelongsToCurrentThread()) {
313       NotifyError(media::VideoDecodeAccelerator::INVALID_ARGUMENT);
314     } else {
315       child_message_loop_->PostTask(
316           FROM_HERE,
317           base::Bind(&GpuVideoDecodeAccelerator::NotifyError,
318                      base::Unretained(this),
319                      media::VideoDecodeAccelerator::INVALID_ARGUMENT));
320     }
321     return;
322   }
323   video_decode_accelerator_->Decode(media::BitstreamBuffer(id, handle, size));
324 }
325
326 void GpuVideoDecodeAccelerator::OnAssignPictureBuffers(
327     const std::vector<int32>& buffer_ids,
328     const std::vector<uint32>& texture_ids) {
329   if (buffer_ids.size() != texture_ids.size()) {
330     NotifyError(media::VideoDecodeAccelerator::INVALID_ARGUMENT);
331     return;
332   }
333
334   gpu::gles2::GLES2Decoder* command_decoder = stub_->decoder();
335   gpu::gles2::TextureManager* texture_manager =
336       command_decoder->GetContextGroup()->texture_manager();
337
338   std::vector<media::PictureBuffer> buffers;
339   std::vector<scoped_refptr<gpu::gles2::TextureRef> > textures;
340   for (uint32 i = 0; i < buffer_ids.size(); ++i) {
341     if (buffer_ids[i] < 0) {
342       DLOG(ERROR) << "Buffer id " << buffer_ids[i] << " out of range";
343       NotifyError(media::VideoDecodeAccelerator::INVALID_ARGUMENT);
344       return;
345     }
346     gpu::gles2::TextureRef* texture_ref = texture_manager->GetTexture(
347         texture_ids[i]);
348     if (!texture_ref) {
349       DLOG(ERROR) << "Failed to find texture id " << texture_ids[i];
350       NotifyError(media::VideoDecodeAccelerator::INVALID_ARGUMENT);
351       return;
352     }
353     gpu::gles2::Texture* info = texture_ref->texture();
354     if (info->target() != texture_target_) {
355       DLOG(ERROR) << "Texture target mismatch for texture id "
356                   << texture_ids[i];
357       NotifyError(media::VideoDecodeAccelerator::INVALID_ARGUMENT);
358       return;
359     }
360     if (texture_target_ == GL_TEXTURE_EXTERNAL_OES) {
361       // GL_TEXTURE_EXTERNAL_OES textures have their dimensions defined by the
362       // underlying EGLImage.  Use |texture_dimensions_| for this size.
363       texture_manager->SetLevelInfo(texture_ref,
364                                     GL_TEXTURE_EXTERNAL_OES,
365                                     0,
366                                     0,
367                                     texture_dimensions_.width(),
368                                     texture_dimensions_.height(),
369                                     1,
370                                     0,
371                                     0,
372                                     0,
373                                     false);
374     } else {
375       // For other targets, texture dimensions should already be defined.
376       GLsizei width = 0, height = 0;
377       info->GetLevelSize(texture_target_, 0, &width, &height);
378       if (width != texture_dimensions_.width() ||
379           height != texture_dimensions_.height()) {
380         DLOG(ERROR) << "Size mismatch for texture id " << texture_ids[i];
381         NotifyError(media::VideoDecodeAccelerator::INVALID_ARGUMENT);
382         return;
383       }
384     }
385     uint32 service_texture_id;
386     if (!command_decoder->GetServiceTextureId(
387             texture_ids[i], &service_texture_id)) {
388       DLOG(ERROR) << "Failed to translate texture!";
389       NotifyError(media::VideoDecodeAccelerator::PLATFORM_FAILURE);
390       return;
391     }
392     buffers.push_back(media::PictureBuffer(
393         buffer_ids[i], texture_dimensions_, service_texture_id));
394     textures.push_back(texture_ref);
395   }
396   video_decode_accelerator_->AssignPictureBuffers(buffers);
397   DebugAutoLock auto_lock(debug_uncleared_textures_lock_);
398   for (uint32 i = 0; i < buffer_ids.size(); ++i)
399     uncleared_textures_[buffer_ids[i]] = textures[i];
400 }
401
402 void GpuVideoDecodeAccelerator::OnReusePictureBuffer(
403     int32 picture_buffer_id) {
404   DCHECK(video_decode_accelerator_.get());
405   video_decode_accelerator_->ReusePictureBuffer(picture_buffer_id);
406 }
407
408 void GpuVideoDecodeAccelerator::OnFlush() {
409   DCHECK(video_decode_accelerator_.get());
410   video_decode_accelerator_->Flush();
411 }
412
413 void GpuVideoDecodeAccelerator::OnReset() {
414   DCHECK(video_decode_accelerator_.get());
415   video_decode_accelerator_->Reset();
416 }
417
418 void GpuVideoDecodeAccelerator::OnDestroy() {
419   DCHECK(video_decode_accelerator_.get());
420   OnWillDestroyStub();
421 }
422
423 void GpuVideoDecodeAccelerator::OnFilterRemoved() {
424   // We're destroying; cancel all callbacks.
425   weak_factory_for_io_.InvalidateWeakPtrs();
426   filter_removed_.Signal();
427 }
428
429 void GpuVideoDecodeAccelerator::NotifyEndOfBitstreamBuffer(
430     int32 bitstream_buffer_id) {
431   if (!Send(new AcceleratedVideoDecoderHostMsg_BitstreamBufferProcessed(
432           host_route_id_, bitstream_buffer_id))) {
433     DLOG(ERROR)
434         << "Send(AcceleratedVideoDecoderHostMsg_BitstreamBufferProcessed) "
435         << "failed";
436   }
437 }
438
439 void GpuVideoDecodeAccelerator::NotifyInitializeDone() {
440   GpuCommandBufferMsg_CreateVideoDecoder::WriteReplyParams(
441       init_done_msg_, host_route_id_);
442   if (!Send(init_done_msg_))
443     DLOG(ERROR) << "Send(init_done_msg_) failed";
444   init_done_msg_ = NULL;
445 }
446
447 void GpuVideoDecodeAccelerator::NotifyFlushDone() {
448   if (!Send(new AcceleratedVideoDecoderHostMsg_FlushDone(host_route_id_)))
449     DLOG(ERROR) << "Send(AcceleratedVideoDecoderHostMsg_FlushDone) failed";
450 }
451
452 void GpuVideoDecodeAccelerator::NotifyResetDone() {
453   if (!Send(new AcceleratedVideoDecoderHostMsg_ResetDone(host_route_id_)))
454     DLOG(ERROR) << "Send(AcceleratedVideoDecoderHostMsg_ResetDone) failed";
455 }
456
457 void GpuVideoDecodeAccelerator::OnWillDestroyStub() {
458   // The stub is going away, so we have to stop and destroy VDA here, before
459   // returning, because the VDA may need the GL context to run and/or do its
460   // cleanup. We cannot destroy the VDA before the IO thread message filter is
461   // removed however, since we cannot service incoming messages with VDA gone.
462   // We cannot simply check for existence of VDA on IO thread though, because
463   // we don't want to synchronize the IO thread with the ChildThread.
464   // So we have to wait for the RemoveFilter callback here instead and remove
465   // the VDA after it arrives and before returning.
466   if (filter_.get()) {
467     stub_->channel()->RemoveFilter(filter_.get());
468     filter_removed_.Wait();
469   }
470
471   stub_->channel()->RemoveRoute(host_route_id_);
472   stub_->RemoveDestructionObserver(this);
473
474   if (video_decode_accelerator_)
475     video_decode_accelerator_.release()->Destroy();
476
477   delete this;
478 }
479
480 bool GpuVideoDecodeAccelerator::Send(IPC::Message* message) {
481   if (filter_.get() && io_message_loop_->BelongsToCurrentThread())
482     return filter_->SendOnIOThread(message);
483   DCHECK(child_message_loop_->BelongsToCurrentThread());
484   return stub_->channel()->Send(message);
485 }
486
487 void GpuVideoDecodeAccelerator::SetTextureCleared(
488     const media::Picture& picture) {
489   DCHECK(child_message_loop_->BelongsToCurrentThread());
490   DebugAutoLock auto_lock(debug_uncleared_textures_lock_);
491   std::map<int32, scoped_refptr<gpu::gles2::TextureRef> >::iterator it;
492   it = uncleared_textures_.find(picture.picture_buffer_id());
493   if (it == uncleared_textures_.end())
494     return;  // the texture has been cleared
495
496   scoped_refptr<gpu::gles2::TextureRef> texture_ref = it->second;
497   GLenum target = texture_ref->texture()->target();
498   gpu::gles2::TextureManager* texture_manager =
499       stub_->decoder()->GetContextGroup()->texture_manager();
500   DCHECK(!texture_ref->texture()->IsLevelCleared(target, 0));
501   texture_manager->SetLevelCleared(texture_ref, target, 0, true);
502   uncleared_textures_.erase(it);
503 }
504
505 }  // namespace content