Upstream version 5.34.92.0
[platform/framework/web/crosswalk.git] / src / content / common / gpu / media / gpu_video_decode_accelerator.cc
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "content/common/gpu/media/gpu_video_decode_accelerator.h"
6
7 #include <vector>
8
9 #include "base/bind.h"
10 #include "base/command_line.h"
11 #include "base/logging.h"
12 #include "base/message_loop/message_loop_proxy.h"
13 #include "base/stl_util.h"
14
15 #include "content/common/gpu/gpu_channel.h"
16 #include "content/common/gpu/gpu_messages.h"
17 #include "content/public/common/content_switches.h"
18 #include "gpu/command_buffer/common/command_buffer.h"
19 #include "ipc/ipc_message_macros.h"
20 #include "ipc/ipc_message_utils.h"
21 #include "media/base/limits.h"
22 #include "ui/gl/gl_context.h"
23 #include "ui/gl/gl_surface_egl.h"
24
25 #if defined(OS_WIN)
26 #include "base/win/windows_version.h"
27 #include "content/common/gpu/media/dxva_video_decode_accelerator.h"
28 #elif defined(OS_CHROMEOS) && defined(ARCH_CPU_ARMEL) && defined(USE_X11)
29 #include "content/common/gpu/media/v4l2_video_decode_accelerator.h"
30 #include "content/common/gpu/media/v4l2_video_device.h"
31 #elif defined(ARCH_CPU_X86_FAMILY) && defined(USE_X11)
32 #include "content/common/gpu/media/vaapi_video_decode_accelerator.h"
33 #include "ui/gl/gl_context_glx.h"
34 #include "ui/gl/gl_implementation.h"
35 #elif defined(OS_ANDROID)
36 #include "content/common/gpu/media/android_video_decode_accelerator.h"
37 #endif
38
39 #include "ui/gfx/size.h"
40
41 namespace content {
42
43 static bool MakeDecoderContextCurrent(
44     const base::WeakPtr<GpuCommandBufferStub> stub) {
45   if (!stub.get()) {
46     DLOG(ERROR) << "Stub is gone; won't MakeCurrent().";
47     return false;
48   }
49
50   if (!stub->decoder()->MakeCurrent()) {
51     DLOG(ERROR) << "Failed to MakeCurrent()";
52     return false;
53   }
54
55   return true;
56 }
57
58 // A helper class that works like AutoLock but only acquires the lock when
59 // DCHECK is on.
60 class DebugAutoLock {
61  public:
62   explicit DebugAutoLock(base::Lock& lock) : lock_(lock) {
63     if (DCHECK_IS_ON())
64       lock_.Acquire();
65   }
66
67   ~DebugAutoLock() {
68     if (DCHECK_IS_ON()) {
69       lock_.AssertAcquired();
70       lock_.Release();
71     }
72   }
73
74  private:
75   base::Lock& lock_;
76   DISALLOW_COPY_AND_ASSIGN(DebugAutoLock);
77 };
78
79 class GpuVideoDecodeAccelerator::MessageFilter
80     : public IPC::ChannelProxy::MessageFilter {
81  public:
82   MessageFilter(GpuVideoDecodeAccelerator* owner, int32 host_route_id)
83       : owner_(owner), host_route_id_(host_route_id) {}
84
85   virtual void OnChannelError() OVERRIDE { channel_ = NULL; }
86
87   virtual void OnChannelClosing() OVERRIDE { channel_ = NULL; }
88
89   virtual void OnFilterAdded(IPC::Channel* channel) OVERRIDE {
90     channel_ = channel;
91   }
92
93   virtual void OnFilterRemoved() OVERRIDE {
94     // This will delete |owner_| and |this|.
95     owner_->OnFilterRemoved();
96   }
97
98   virtual bool OnMessageReceived(const IPC::Message& msg) OVERRIDE {
99     if (msg.routing_id() != host_route_id_)
100       return false;
101
102     IPC_BEGIN_MESSAGE_MAP(MessageFilter, msg)
103       IPC_MESSAGE_FORWARD(AcceleratedVideoDecoderMsg_Decode, owner_,
104                           GpuVideoDecodeAccelerator::OnDecode)
105       IPC_MESSAGE_UNHANDLED(return false;)
106     IPC_END_MESSAGE_MAP()
107     return true;
108   }
109
110   bool SendOnIOThread(IPC::Message* message) {
111     DCHECK(!message->is_sync());
112     if (!channel_) {
113       delete message;
114       return false;
115     }
116     return channel_->Send(message);
117   }
118
119  protected:
120   virtual ~MessageFilter() {}
121
122  private:
123   GpuVideoDecodeAccelerator* owner_;
124   int32 host_route_id_;
125   // The channel to which this filter was added.
126   IPC::Channel* channel_;
127 };
128
129 GpuVideoDecodeAccelerator::GpuVideoDecodeAccelerator(
130     int32 host_route_id,
131     GpuCommandBufferStub* stub,
132     const scoped_refptr<base::MessageLoopProxy>& io_message_loop)
133     : init_done_msg_(NULL),
134       host_route_id_(host_route_id),
135       stub_(stub),
136       texture_target_(0),
137       filter_removed_(true, false),
138       io_message_loop_(io_message_loop),
139       weak_factory_for_io_(this) {
140   DCHECK(stub_);
141   stub_->AddDestructionObserver(this);
142   stub_->channel()->AddRoute(host_route_id_, this);
143   child_message_loop_ = base::MessageLoopProxy::current();
144   make_context_current_ =
145       base::Bind(&MakeDecoderContextCurrent, stub_->AsWeakPtr());
146 }
147
148 GpuVideoDecodeAccelerator::~GpuVideoDecodeAccelerator() {
149   // This class can only be self-deleted from OnWillDestroyStub(), which means
150   // the VDA has already been destroyed in there.
151   CHECK(!video_decode_accelerator_.get());
152 }
153
154 bool GpuVideoDecodeAccelerator::OnMessageReceived(const IPC::Message& msg) {
155   if (!video_decode_accelerator_)
156     return false;
157
158   bool handled = true;
159   IPC_BEGIN_MESSAGE_MAP(GpuVideoDecodeAccelerator, msg)
160     IPC_MESSAGE_HANDLER(AcceleratedVideoDecoderMsg_Decode, OnDecode)
161     IPC_MESSAGE_HANDLER(AcceleratedVideoDecoderMsg_AssignPictureBuffers,
162                         OnAssignPictureBuffers)
163     IPC_MESSAGE_HANDLER(AcceleratedVideoDecoderMsg_ReusePictureBuffer,
164                         OnReusePictureBuffer)
165     IPC_MESSAGE_HANDLER(AcceleratedVideoDecoderMsg_Flush, OnFlush)
166     IPC_MESSAGE_HANDLER(AcceleratedVideoDecoderMsg_Reset, OnReset)
167     IPC_MESSAGE_HANDLER(AcceleratedVideoDecoderMsg_Destroy, OnDestroy)
168     IPC_MESSAGE_UNHANDLED(handled = false)
169   IPC_END_MESSAGE_MAP()
170   return handled;
171 }
172
173 void GpuVideoDecodeAccelerator::ProvidePictureBuffers(
174     uint32 requested_num_of_buffers,
175     const gfx::Size& dimensions,
176     uint32 texture_target) {
177   if (dimensions.width() > media::limits::kMaxDimension ||
178       dimensions.height() > media::limits::kMaxDimension ||
179       dimensions.GetArea() > media::limits::kMaxCanvas) {
180     NotifyError(media::VideoDecodeAccelerator::PLATFORM_FAILURE);
181     return;
182   }
183   if (!Send(new AcceleratedVideoDecoderHostMsg_ProvidePictureBuffers(
184            host_route_id_,
185            requested_num_of_buffers,
186            dimensions,
187            texture_target))) {
188     DLOG(ERROR) << "Send(AcceleratedVideoDecoderHostMsg_ProvidePictureBuffers) "
189                 << "failed";
190   }
191   texture_dimensions_ = dimensions;
192   texture_target_ = texture_target;
193 }
194
195 void GpuVideoDecodeAccelerator::DismissPictureBuffer(
196     int32 picture_buffer_id) {
197   // Notify client that picture buffer is now unused.
198   if (!Send(new AcceleratedVideoDecoderHostMsg_DismissPictureBuffer(
199           host_route_id_, picture_buffer_id))) {
200     DLOG(ERROR) << "Send(AcceleratedVideoDecoderHostMsg_DismissPictureBuffer) "
201                 << "failed";
202   }
203   DebugAutoLock auto_lock(debug_uncleared_textures_lock_);
204   uncleared_textures_.erase(picture_buffer_id);
205 }
206
207 void GpuVideoDecodeAccelerator::PictureReady(
208     const media::Picture& picture) {
209   // VDA may call PictureReady on IO thread. SetTextureCleared should run on
210   // the child thread. VDA is responsible to call PictureReady on the child
211   // thread when a picture buffer is delivered the first time.
212   if (child_message_loop_->BelongsToCurrentThread()) {
213     SetTextureCleared(picture);
214   } else {
215     DCHECK(io_message_loop_->BelongsToCurrentThread());
216     if (DCHECK_IS_ON()) {
217       DebugAutoLock auto_lock(debug_uncleared_textures_lock_);
218       DCHECK_EQ(0u, uncleared_textures_.count(picture.picture_buffer_id()));
219     }
220   }
221
222   if (!Send(new AcceleratedVideoDecoderHostMsg_PictureReady(
223           host_route_id_,
224           picture.picture_buffer_id(),
225           picture.bitstream_buffer_id()))) {
226     DLOG(ERROR) << "Send(AcceleratedVideoDecoderHostMsg_PictureReady) failed";
227   }
228 }
229
230 void GpuVideoDecodeAccelerator::NotifyError(
231     media::VideoDecodeAccelerator::Error error) {
232   if (init_done_msg_) {
233     // If we get an error while we're initializing, NotifyInitializeDone won't
234     // be called, so we need to send the reply (with an error) here.
235     GpuCommandBufferMsg_CreateVideoDecoder::WriteReplyParams(
236         init_done_msg_, -1);
237     if (!Send(init_done_msg_))
238       DLOG(ERROR) << "Send(init_done_msg_) failed";
239     init_done_msg_ = NULL;
240     return;
241   }
242   if (!Send(new AcceleratedVideoDecoderHostMsg_ErrorNotification(
243           host_route_id_, error))) {
244     DLOG(ERROR) << "Send(AcceleratedVideoDecoderHostMsg_ErrorNotification) "
245                 << "failed";
246   }
247 }
248
249 void GpuVideoDecodeAccelerator::Initialize(
250     const media::VideoCodecProfile profile,
251     IPC::Message* init_done_msg) {
252   DCHECK(!video_decode_accelerator_.get());
253   DCHECK(!init_done_msg_);
254   DCHECK(init_done_msg);
255   init_done_msg_ = init_done_msg;
256
257 #if !defined(OS_WIN)
258   // Ensure we will be able to get a GL context at all before initializing
259   // non-Windows VDAs.
260   if (!make_context_current_.Run()) {
261     NotifyError(media::VideoDecodeAccelerator::PLATFORM_FAILURE);
262     return;
263   }
264 #endif
265
266 #if defined(OS_WIN)
267   if (base::win::GetVersion() < base::win::VERSION_WIN7) {
268     NOTIMPLEMENTED() << "HW video decode acceleration not available.";
269     NotifyError(media::VideoDecodeAccelerator::PLATFORM_FAILURE);
270     return;
271   }
272   DVLOG(0) << "Initializing DXVA HW decoder for windows.";
273   video_decode_accelerator_.reset(new DXVAVideoDecodeAccelerator(
274       this, make_context_current_));
275 #elif defined(OS_CHROMEOS) && defined(ARCH_CPU_ARMEL) && defined(USE_X11)
276   scoped_ptr<V4L2Device> device = V4L2Device::Create();
277   if (!device.get()) {
278     NotifyError(media::VideoDecodeAccelerator::PLATFORM_FAILURE);
279     return;
280   }
281   video_decode_accelerator_.reset(
282       new V4L2VideoDecodeAccelerator(gfx::GLSurfaceEGL::GetHardwareDisplay(),
283                                      this,
284                                      weak_factory_for_io_.GetWeakPtr(),
285                                      make_context_current_,
286                                      device.Pass(),
287                                      io_message_loop_));
288 #elif defined(ARCH_CPU_X86_FAMILY) && defined(USE_X11)
289   if (gfx::GetGLImplementation() != gfx::kGLImplementationDesktopGL) {
290     VLOG(1) << "HW video decode acceleration not available without "
291                "DesktopGL (GLX).";
292     NotifyError(media::VideoDecodeAccelerator::PLATFORM_FAILURE);
293     return;
294   }
295   gfx::GLContextGLX* glx_context =
296       static_cast<gfx::GLContextGLX*>(stub_->decoder()->GetGLContext());
297   video_decode_accelerator_.reset(new VaapiVideoDecodeAccelerator(
298       glx_context->display(), this, make_context_current_));
299 #elif defined(OS_ANDROID)
300   video_decode_accelerator_.reset(new AndroidVideoDecodeAccelerator(
301       this,
302       stub_->decoder()->AsWeakPtr(),
303       make_context_current_));
304 #else
305   NOTIMPLEMENTED() << "HW video decode acceleration not available.";
306   NotifyError(media::VideoDecodeAccelerator::PLATFORM_FAILURE);
307   return;
308 #endif
309
310   if (video_decode_accelerator_->CanDecodeOnIOThread()) {
311     filter_ = new MessageFilter(this, host_route_id_);
312     stub_->channel()->AddFilter(filter_.get());
313   }
314
315   if (!video_decode_accelerator_->Initialize(profile))
316     NotifyError(media::VideoDecodeAccelerator::PLATFORM_FAILURE);
317 }
318
319 // Runs on IO thread if video_decode_accelerator_->CanDecodeOnIOThread() is
320 // true, otherwise on the main thread.
321 void GpuVideoDecodeAccelerator::OnDecode(
322     base::SharedMemoryHandle handle, int32 id, uint32 size) {
323   DCHECK(video_decode_accelerator_.get());
324   if (id < 0) {
325     DLOG(ERROR) << "BitstreamBuffer id " << id << " out of range";
326     if (child_message_loop_->BelongsToCurrentThread()) {
327       NotifyError(media::VideoDecodeAccelerator::INVALID_ARGUMENT);
328     } else {
329       child_message_loop_->PostTask(
330           FROM_HERE,
331           base::Bind(&GpuVideoDecodeAccelerator::NotifyError,
332                      base::Unretained(this),
333                      media::VideoDecodeAccelerator::INVALID_ARGUMENT));
334     }
335     return;
336   }
337   video_decode_accelerator_->Decode(media::BitstreamBuffer(id, handle, size));
338 }
339
340 void GpuVideoDecodeAccelerator::OnAssignPictureBuffers(
341     const std::vector<int32>& buffer_ids,
342     const std::vector<uint32>& texture_ids) {
343   if (buffer_ids.size() != texture_ids.size()) {
344     NotifyError(media::VideoDecodeAccelerator::INVALID_ARGUMENT);
345     return;
346   }
347
348   gpu::gles2::GLES2Decoder* command_decoder = stub_->decoder();
349   gpu::gles2::TextureManager* texture_manager =
350       command_decoder->GetContextGroup()->texture_manager();
351
352   std::vector<media::PictureBuffer> buffers;
353   std::vector<scoped_refptr<gpu::gles2::TextureRef> > textures;
354   for (uint32 i = 0; i < buffer_ids.size(); ++i) {
355     if (buffer_ids[i] < 0) {
356       DLOG(ERROR) << "Buffer id " << buffer_ids[i] << " out of range";
357       NotifyError(media::VideoDecodeAccelerator::INVALID_ARGUMENT);
358       return;
359     }
360     gpu::gles2::TextureRef* texture_ref = texture_manager->GetTexture(
361         texture_ids[i]);
362     if (!texture_ref) {
363       DLOG(ERROR) << "Failed to find texture id " << texture_ids[i];
364       NotifyError(media::VideoDecodeAccelerator::INVALID_ARGUMENT);
365       return;
366     }
367     gpu::gles2::Texture* info = texture_ref->texture();
368     if (info->target() != texture_target_) {
369       DLOG(ERROR) << "Texture target mismatch for texture id "
370                   << texture_ids[i];
371       NotifyError(media::VideoDecodeAccelerator::INVALID_ARGUMENT);
372       return;
373     }
374     if (texture_target_ == GL_TEXTURE_EXTERNAL_OES) {
375       // GL_TEXTURE_EXTERNAL_OES textures have their dimensions defined by the
376       // underlying EGLImage.  Use |texture_dimensions_| for this size.
377       texture_manager->SetLevelInfo(texture_ref,
378                                     GL_TEXTURE_EXTERNAL_OES,
379                                     0,
380                                     0,
381                                     texture_dimensions_.width(),
382                                     texture_dimensions_.height(),
383                                     1,
384                                     0,
385                                     0,
386                                     0,
387                                     false);
388     } else {
389       // For other targets, texture dimensions should already be defined.
390       GLsizei width = 0, height = 0;
391       info->GetLevelSize(texture_target_, 0, &width, &height);
392       if (width != texture_dimensions_.width() ||
393           height != texture_dimensions_.height()) {
394         DLOG(ERROR) << "Size mismatch for texture id " << texture_ids[i];
395         NotifyError(media::VideoDecodeAccelerator::INVALID_ARGUMENT);
396         return;
397       }
398     }
399     uint32 service_texture_id;
400     if (!command_decoder->GetServiceTextureId(
401             texture_ids[i], &service_texture_id)) {
402       DLOG(ERROR) << "Failed to translate texture!";
403       NotifyError(media::VideoDecodeAccelerator::PLATFORM_FAILURE);
404       return;
405     }
406     buffers.push_back(media::PictureBuffer(
407         buffer_ids[i], texture_dimensions_, service_texture_id));
408     textures.push_back(texture_ref);
409   }
410   video_decode_accelerator_->AssignPictureBuffers(buffers);
411   DebugAutoLock auto_lock(debug_uncleared_textures_lock_);
412   for (uint32 i = 0; i < buffer_ids.size(); ++i)
413     uncleared_textures_[buffer_ids[i]] = textures[i];
414 }
415
416 void GpuVideoDecodeAccelerator::OnReusePictureBuffer(
417     int32 picture_buffer_id) {
418   DCHECK(video_decode_accelerator_.get());
419   video_decode_accelerator_->ReusePictureBuffer(picture_buffer_id);
420 }
421
422 void GpuVideoDecodeAccelerator::OnFlush() {
423   DCHECK(video_decode_accelerator_.get());
424   video_decode_accelerator_->Flush();
425 }
426
427 void GpuVideoDecodeAccelerator::OnReset() {
428   DCHECK(video_decode_accelerator_.get());
429   video_decode_accelerator_->Reset();
430 }
431
432 void GpuVideoDecodeAccelerator::OnDestroy() {
433   DCHECK(video_decode_accelerator_.get());
434   OnWillDestroyStub();
435 }
436
437 void GpuVideoDecodeAccelerator::OnFilterRemoved() {
438   // We're destroying; cancel all callbacks.
439   weak_factory_for_io_.InvalidateWeakPtrs();
440   filter_removed_.Signal();
441 }
442
443 void GpuVideoDecodeAccelerator::NotifyEndOfBitstreamBuffer(
444     int32 bitstream_buffer_id) {
445   if (!Send(new AcceleratedVideoDecoderHostMsg_BitstreamBufferProcessed(
446           host_route_id_, bitstream_buffer_id))) {
447     DLOG(ERROR)
448         << "Send(AcceleratedVideoDecoderHostMsg_BitstreamBufferProcessed) "
449         << "failed";
450   }
451 }
452
453 void GpuVideoDecodeAccelerator::NotifyInitializeDone() {
454   GpuCommandBufferMsg_CreateVideoDecoder::WriteReplyParams(
455       init_done_msg_, host_route_id_);
456   if (!Send(init_done_msg_))
457     DLOG(ERROR) << "Send(init_done_msg_) failed";
458   init_done_msg_ = NULL;
459 }
460
461 void GpuVideoDecodeAccelerator::NotifyFlushDone() {
462   if (!Send(new AcceleratedVideoDecoderHostMsg_FlushDone(host_route_id_)))
463     DLOG(ERROR) << "Send(AcceleratedVideoDecoderHostMsg_FlushDone) failed";
464 }
465
466 void GpuVideoDecodeAccelerator::NotifyResetDone() {
467   if (!Send(new AcceleratedVideoDecoderHostMsg_ResetDone(host_route_id_)))
468     DLOG(ERROR) << "Send(AcceleratedVideoDecoderHostMsg_ResetDone) failed";
469 }
470
471 void GpuVideoDecodeAccelerator::OnWillDestroyStub() {
472   // The stub is going away, so we have to stop and destroy VDA here, before
473   // returning, because the VDA may need the GL context to run and/or do its
474   // cleanup. We cannot destroy the VDA before the IO thread message filter is
475   // removed however, since we cannot service incoming messages with VDA gone.
476   // We cannot simply check for existence of VDA on IO thread though, because
477   // we don't want to synchronize the IO thread with the ChildThread.
478   // So we have to wait for the RemoveFilter callback here instead and remove
479   // the VDA after it arrives and before returning.
480   if (filter_.get()) {
481     stub_->channel()->RemoveFilter(filter_.get());
482     filter_removed_.Wait();
483   }
484
485   stub_->channel()->RemoveRoute(host_route_id_);
486   stub_->RemoveDestructionObserver(this);
487
488   if (video_decode_accelerator_)
489     video_decode_accelerator_.release()->Destroy();
490
491   delete this;
492 }
493
494 bool GpuVideoDecodeAccelerator::Send(IPC::Message* message) {
495   if (filter_.get() && io_message_loop_->BelongsToCurrentThread())
496     return filter_->SendOnIOThread(message);
497   DCHECK(child_message_loop_->BelongsToCurrentThread());
498   return stub_->channel()->Send(message);
499 }
500
501 void GpuVideoDecodeAccelerator::SetTextureCleared(
502     const media::Picture& picture) {
503   DCHECK(child_message_loop_->BelongsToCurrentThread());
504   DebugAutoLock auto_lock(debug_uncleared_textures_lock_);
505   std::map<int32, scoped_refptr<gpu::gles2::TextureRef> >::iterator it;
506   it = uncleared_textures_.find(picture.picture_buffer_id());
507   if (it == uncleared_textures_.end())
508     return;  // the texture has been cleared
509
510   scoped_refptr<gpu::gles2::TextureRef> texture_ref = it->second;
511   GLenum target = texture_ref->texture()->target();
512   gpu::gles2::TextureManager* texture_manager =
513       stub_->decoder()->GetContextGroup()->texture_manager();
514   DCHECK(!texture_ref->texture()->IsLevelCleared(target, 0));
515   texture_manager->SetLevelCleared(texture_ref, target, 0, true);
516   uncleared_textures_.erase(it);
517 }
518
519 }  // namespace content