Upstream version 9.38.198.0
[platform/framework/web/crosswalk.git] / src / content / common / gpu / media / gpu_video_decode_accelerator.cc
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "content/common/gpu/media/gpu_video_decode_accelerator.h"
6
7 #include <vector>
8
9 #include "base/bind.h"
10 #include "base/command_line.h"
11 #include "base/logging.h"
12 #include "base/message_loop/message_loop_proxy.h"
13 #include "base/stl_util.h"
14
15 #include "content/common/gpu/gpu_channel.h"
16 #include "content/common/gpu/gpu_messages.h"
17 #include "content/public/common/content_switches.h"
18 #include "gpu/command_buffer/common/command_buffer.h"
19 #include "ipc/ipc_message_macros.h"
20 #include "ipc/ipc_message_utils.h"
21 #include "ipc/message_filter.h"
22 #include "media/base/limits.h"
23 #include "ui/gl/gl_context.h"
24 #include "ui/gl/gl_surface_egl.h"
25
26 #if defined(OS_WIN)
27 #include "base/win/windows_version.h"
28 #include "content/common/gpu/media/dxva_video_decode_accelerator.h"
29 #elif defined(OS_MACOSX)
30 #include "content/common/gpu/media/vt_video_decode_accelerator.h"
31 #elif defined(OS_CHROMEOS) && defined(ARCH_CPU_ARMEL) && defined(USE_X11)
32 #include "content/common/gpu/media/v4l2_video_decode_accelerator.h"
33 #include "content/common/gpu/media/v4l2_video_device.h"
34 #elif defined(OS_CHROMEOS) && defined(ARCH_CPU_X86_FAMILY) && defined(USE_X11)
35 #include "content/common/gpu/media/vaapi_video_decode_accelerator.h"
36 #include "ui/gl/gl_context_glx.h"
37 #include "ui/gl/gl_implementation.h"
38 #elif defined(USE_OZONE)
39 #include "media/ozone/media_ozone_platform.h"
40 #elif defined(OS_ANDROID)
41 #include "content/common/gpu/media/android_video_decode_accelerator.h"
42 #endif
43
44 #include "ui/gfx/size.h"
45
46 namespace content {
47
48 static bool MakeDecoderContextCurrent(
49     const base::WeakPtr<GpuCommandBufferStub> stub) {
50   if (!stub) {
51     DLOG(ERROR) << "Stub is gone; won't MakeCurrent().";
52     return false;
53   }
54
55   if (!stub->decoder()->MakeCurrent()) {
56     DLOG(ERROR) << "Failed to MakeCurrent()";
57     return false;
58   }
59
60   return true;
61 }
62
63 // DebugAutoLock works like AutoLock but only acquires the lock when
64 // DCHECK is on.
65 #if DCHECK_IS_ON
66 typedef base::AutoLock DebugAutoLock;
67 #else
68 class DebugAutoLock {
69  public:
70   explicit DebugAutoLock(base::Lock&) {}
71 };
72 #endif
73
74 class GpuVideoDecodeAccelerator::MessageFilter : public IPC::MessageFilter {
75  public:
76   MessageFilter(GpuVideoDecodeAccelerator* owner, int32 host_route_id)
77       : owner_(owner), host_route_id_(host_route_id) {}
78
79   virtual void OnChannelError() OVERRIDE { sender_ = NULL; }
80
81   virtual void OnChannelClosing() OVERRIDE { sender_ = NULL; }
82
83   virtual void OnFilterAdded(IPC::Sender* sender) OVERRIDE {
84     sender_ = sender;
85   }
86
87   virtual void OnFilterRemoved() OVERRIDE {
88     // This will delete |owner_| and |this|.
89     owner_->OnFilterRemoved();
90   }
91
92   virtual bool OnMessageReceived(const IPC::Message& msg) OVERRIDE {
93     if (msg.routing_id() != host_route_id_)
94       return false;
95
96     IPC_BEGIN_MESSAGE_MAP(MessageFilter, msg)
97       IPC_MESSAGE_FORWARD(AcceleratedVideoDecoderMsg_Decode, owner_,
98                           GpuVideoDecodeAccelerator::OnDecode)
99       IPC_MESSAGE_UNHANDLED(return false;)
100     IPC_END_MESSAGE_MAP()
101     return true;
102   }
103
104   bool SendOnIOThread(IPC::Message* message) {
105     DCHECK(!message->is_sync());
106     if (!sender_) {
107       delete message;
108       return false;
109     }
110     return sender_->Send(message);
111   }
112
113  protected:
114   virtual ~MessageFilter() {}
115
116  private:
117   GpuVideoDecodeAccelerator* owner_;
118   int32 host_route_id_;
119   // The sender to which this filter was added.
120   IPC::Sender* sender_;
121 };
122
123 GpuVideoDecodeAccelerator::GpuVideoDecodeAccelerator(
124     int32 host_route_id,
125     GpuCommandBufferStub* stub,
126     const scoped_refptr<base::MessageLoopProxy>& io_message_loop)
127     : host_route_id_(host_route_id),
128       stub_(stub),
129       texture_target_(0),
130       filter_removed_(true, false),
131       io_message_loop_(io_message_loop),
132       weak_factory_for_io_(this) {
133   DCHECK(stub_);
134   stub_->AddDestructionObserver(this);
135   child_message_loop_ = base::MessageLoopProxy::current();
136   make_context_current_ =
137       base::Bind(&MakeDecoderContextCurrent, stub_->AsWeakPtr());
138 }
139
140 GpuVideoDecodeAccelerator::~GpuVideoDecodeAccelerator() {
141   // This class can only be self-deleted from OnWillDestroyStub(), which means
142   // the VDA has already been destroyed in there.
143   DCHECK(!video_decode_accelerator_);
144 }
145
146 bool GpuVideoDecodeAccelerator::OnMessageReceived(const IPC::Message& msg) {
147   if (!video_decode_accelerator_)
148     return false;
149
150   bool handled = true;
151   IPC_BEGIN_MESSAGE_MAP(GpuVideoDecodeAccelerator, msg)
152     IPC_MESSAGE_HANDLER(AcceleratedVideoDecoderMsg_Decode, OnDecode)
153     IPC_MESSAGE_HANDLER(AcceleratedVideoDecoderMsg_AssignPictureBuffers,
154                         OnAssignPictureBuffers)
155     IPC_MESSAGE_HANDLER(AcceleratedVideoDecoderMsg_ReusePictureBuffer,
156                         OnReusePictureBuffer)
157     IPC_MESSAGE_HANDLER(AcceleratedVideoDecoderMsg_Flush, OnFlush)
158     IPC_MESSAGE_HANDLER(AcceleratedVideoDecoderMsg_Reset, OnReset)
159     IPC_MESSAGE_HANDLER(AcceleratedVideoDecoderMsg_Destroy, OnDestroy)
160     IPC_MESSAGE_UNHANDLED(handled = false)
161   IPC_END_MESSAGE_MAP()
162   return handled;
163 }
164
165 void GpuVideoDecodeAccelerator::ProvidePictureBuffers(
166     uint32 requested_num_of_buffers,
167     const gfx::Size& dimensions,
168     uint32 texture_target) {
169   if (dimensions.width() > media::limits::kMaxDimension ||
170       dimensions.height() > media::limits::kMaxDimension ||
171       dimensions.GetArea() > media::limits::kMaxCanvas) {
172     NotifyError(media::VideoDecodeAccelerator::PLATFORM_FAILURE);
173     return;
174   }
175   if (!Send(new AcceleratedVideoDecoderHostMsg_ProvidePictureBuffers(
176            host_route_id_,
177            requested_num_of_buffers,
178            dimensions,
179            texture_target))) {
180     DLOG(ERROR) << "Send(AcceleratedVideoDecoderHostMsg_ProvidePictureBuffers) "
181                 << "failed";
182   }
183   texture_dimensions_ = dimensions;
184   texture_target_ = texture_target;
185 }
186
187 void GpuVideoDecodeAccelerator::DismissPictureBuffer(
188     int32 picture_buffer_id) {
189   // Notify client that picture buffer is now unused.
190   if (!Send(new AcceleratedVideoDecoderHostMsg_DismissPictureBuffer(
191           host_route_id_, picture_buffer_id))) {
192     DLOG(ERROR) << "Send(AcceleratedVideoDecoderHostMsg_DismissPictureBuffer) "
193                 << "failed";
194   }
195   DebugAutoLock auto_lock(debug_uncleared_textures_lock_);
196   uncleared_textures_.erase(picture_buffer_id);
197 }
198
199 void GpuVideoDecodeAccelerator::PictureReady(
200     const media::Picture& picture) {
201   // VDA may call PictureReady on IO thread. SetTextureCleared should run on
202   // the child thread. VDA is responsible to call PictureReady on the child
203   // thread when a picture buffer is delivered the first time.
204   if (child_message_loop_->BelongsToCurrentThread()) {
205     SetTextureCleared(picture);
206   } else {
207     DCHECK(io_message_loop_->BelongsToCurrentThread());
208     DebugAutoLock auto_lock(debug_uncleared_textures_lock_);
209     DCHECK_EQ(0u, uncleared_textures_.count(picture.picture_buffer_id()));
210   }
211
212   if (!Send(new AcceleratedVideoDecoderHostMsg_PictureReady(
213           host_route_id_,
214           picture.picture_buffer_id(),
215           picture.bitstream_buffer_id()))) {
216     DLOG(ERROR) << "Send(AcceleratedVideoDecoderHostMsg_PictureReady) failed";
217   }
218 }
219
220 void GpuVideoDecodeAccelerator::NotifyError(
221     media::VideoDecodeAccelerator::Error error) {
222   if (!Send(new AcceleratedVideoDecoderHostMsg_ErrorNotification(
223           host_route_id_, error))) {
224     DLOG(ERROR) << "Send(AcceleratedVideoDecoderHostMsg_ErrorNotification) "
225                 << "failed";
226   }
227 }
228
229 void GpuVideoDecodeAccelerator::Initialize(
230     const media::VideoCodecProfile profile,
231     IPC::Message* init_done_msg) {
232   DCHECK(!video_decode_accelerator_.get());
233
234   if (!stub_->channel()->AddRoute(host_route_id_, this)) {
235     DLOG(ERROR) << "GpuVideoDecodeAccelerator::Initialize(): "
236                    "failed to add route";
237     SendCreateDecoderReply(init_done_msg, false);
238   }
239
240 #if !defined(OS_WIN)
241   // Ensure we will be able to get a GL context at all before initializing
242   // non-Windows VDAs.
243   if (!make_context_current_.Run()) {
244     SendCreateDecoderReply(init_done_msg, false);
245     return;
246   }
247 #endif
248
249 #if defined(OS_WIN)
250   if (base::win::GetVersion() < base::win::VERSION_WIN7) {
251     NOTIMPLEMENTED() << "HW video decode acceleration not available.";
252     SendCreateDecoderReply(init_done_msg, false);
253     return;
254   }
255   DVLOG(0) << "Initializing DXVA HW decoder for windows.";
256   video_decode_accelerator_.reset(
257       new DXVAVideoDecodeAccelerator(make_context_current_));
258 #elif defined(OS_MACOSX)
259   video_decode_accelerator_.reset(new VTVideoDecodeAccelerator(
260       static_cast<CGLContextObj>(
261           stub_->decoder()->GetGLContext()->GetHandle())));
262 #elif defined(OS_CHROMEOS) && defined(ARCH_CPU_ARMEL) && defined(USE_X11)
263   scoped_ptr<V4L2Device> device = V4L2Device::Create(V4L2Device::kDecoder);
264   if (!device.get()) {
265     SendCreateDecoderReply(init_done_msg, false);
266     return;
267   }
268   video_decode_accelerator_.reset(new V4L2VideoDecodeAccelerator(
269       gfx::GLSurfaceEGL::GetHardwareDisplay(),
270       stub_->decoder()->GetGLContext()->GetHandle(),
271       weak_factory_for_io_.GetWeakPtr(),
272       make_context_current_,
273       device.Pass(),
274       io_message_loop_));
275 #elif defined(OS_CHROMEOS) && defined(ARCH_CPU_X86_FAMILY) && defined(USE_X11)
276   if (gfx::GetGLImplementation() != gfx::kGLImplementationDesktopGL) {
277     VLOG(1) << "HW video decode acceleration not available without "
278                "DesktopGL (GLX).";
279     SendCreateDecoderReply(init_done_msg, false);
280     return;
281   }
282   gfx::GLContextGLX* glx_context =
283       static_cast<gfx::GLContextGLX*>(stub_->decoder()->GetGLContext());
284   video_decode_accelerator_.reset(new VaapiVideoDecodeAccelerator(
285       glx_context->display(), make_context_current_));
286 #elif defined(USE_OZONE)
287   media::MediaOzonePlatform* platform =
288       media::MediaOzonePlatform::GetInstance();
289   video_decode_accelerator_.reset(platform->CreateVideoDecodeAccelerator(
290       make_context_current_));
291   if (!video_decode_accelerator_) {
292     SendCreateDecoderReply(init_done_msg, false);
293     return;
294   }
295 #elif defined(OS_ANDROID)
296   video_decode_accelerator_.reset(new AndroidVideoDecodeAccelerator(
297       stub_->decoder()->AsWeakPtr(),
298       make_context_current_));
299 #else
300   NOTIMPLEMENTED() << "HW video decode acceleration not available.";
301   SendCreateDecoderReply(init_done_msg, false);
302   return;
303 #endif
304
305   if (video_decode_accelerator_->CanDecodeOnIOThread()) {
306     filter_ = new MessageFilter(this, host_route_id_);
307     stub_->channel()->AddFilter(filter_.get());
308   }
309
310   if (!video_decode_accelerator_->Initialize(profile, this)) {
311     SendCreateDecoderReply(init_done_msg, false);
312     return;
313   }
314
315   SendCreateDecoderReply(init_done_msg, true);
316 }
317
318 // Runs on IO thread if video_decode_accelerator_->CanDecodeOnIOThread() is
319 // true, otherwise on the main thread.
320 void GpuVideoDecodeAccelerator::OnDecode(
321     base::SharedMemoryHandle handle, int32 id, uint32 size) {
322   DCHECK(video_decode_accelerator_.get());
323   if (id < 0) {
324     DLOG(ERROR) << "BitstreamBuffer id " << id << " out of range";
325     if (child_message_loop_->BelongsToCurrentThread()) {
326       NotifyError(media::VideoDecodeAccelerator::INVALID_ARGUMENT);
327     } else {
328       child_message_loop_->PostTask(
329           FROM_HERE,
330           base::Bind(&GpuVideoDecodeAccelerator::NotifyError,
331                      base::Unretained(this),
332                      media::VideoDecodeAccelerator::INVALID_ARGUMENT));
333     }
334     return;
335   }
336   video_decode_accelerator_->Decode(media::BitstreamBuffer(id, handle, size));
337 }
338
339 void GpuVideoDecodeAccelerator::OnAssignPictureBuffers(
340     const std::vector<int32>& buffer_ids,
341     const std::vector<uint32>& texture_ids) {
342   if (buffer_ids.size() != texture_ids.size()) {
343     NotifyError(media::VideoDecodeAccelerator::INVALID_ARGUMENT);
344     return;
345   }
346
347   gpu::gles2::GLES2Decoder* command_decoder = stub_->decoder();
348   gpu::gles2::TextureManager* texture_manager =
349       command_decoder->GetContextGroup()->texture_manager();
350
351   std::vector<media::PictureBuffer> buffers;
352   std::vector<scoped_refptr<gpu::gles2::TextureRef> > textures;
353   for (uint32 i = 0; i < buffer_ids.size(); ++i) {
354     if (buffer_ids[i] < 0) {
355       DLOG(ERROR) << "Buffer id " << buffer_ids[i] << " out of range";
356       NotifyError(media::VideoDecodeAccelerator::INVALID_ARGUMENT);
357       return;
358     }
359     gpu::gles2::TextureRef* texture_ref = texture_manager->GetTexture(
360         texture_ids[i]);
361     if (!texture_ref) {
362       DLOG(ERROR) << "Failed to find texture id " << texture_ids[i];
363       NotifyError(media::VideoDecodeAccelerator::INVALID_ARGUMENT);
364       return;
365     }
366     gpu::gles2::Texture* info = texture_ref->texture();
367     if (info->target() != texture_target_) {
368       DLOG(ERROR) << "Texture target mismatch for texture id "
369                   << texture_ids[i];
370       NotifyError(media::VideoDecodeAccelerator::INVALID_ARGUMENT);
371       return;
372     }
373     if (texture_target_ == GL_TEXTURE_EXTERNAL_OES ||
374         texture_target_ == GL_TEXTURE_RECTANGLE) {
375       // These textures have their dimensions defined by the underlying storage.
376       // Use |texture_dimensions_| for this size.
377       texture_manager->SetLevelInfo(texture_ref,
378                                     texture_target_,
379                                     0,
380                                     0,
381                                     texture_dimensions_.width(),
382                                     texture_dimensions_.height(),
383                                     1,
384                                     0,
385                                     0,
386                                     0,
387                                     false);
388     } else {
389       // For other targets, texture dimensions should already be defined.
390       GLsizei width = 0, height = 0;
391       info->GetLevelSize(texture_target_, 0, &width, &height);
392       if (width != texture_dimensions_.width() ||
393           height != texture_dimensions_.height()) {
394         DLOG(ERROR) << "Size mismatch for texture id " << texture_ids[i];
395         NotifyError(media::VideoDecodeAccelerator::INVALID_ARGUMENT);
396         return;
397       }
398     }
399     uint32 service_texture_id;
400     if (!command_decoder->GetServiceTextureId(
401             texture_ids[i], &service_texture_id)) {
402       DLOG(ERROR) << "Failed to translate texture!";
403       NotifyError(media::VideoDecodeAccelerator::PLATFORM_FAILURE);
404       return;
405     }
406     buffers.push_back(media::PictureBuffer(
407         buffer_ids[i], texture_dimensions_, service_texture_id));
408     textures.push_back(texture_ref);
409   }
410   video_decode_accelerator_->AssignPictureBuffers(buffers);
411   DebugAutoLock auto_lock(debug_uncleared_textures_lock_);
412   for (uint32 i = 0; i < buffer_ids.size(); ++i)
413     uncleared_textures_[buffer_ids[i]] = textures[i];
414 }
415
416 void GpuVideoDecodeAccelerator::OnReusePictureBuffer(
417     int32 picture_buffer_id) {
418   DCHECK(video_decode_accelerator_.get());
419   video_decode_accelerator_->ReusePictureBuffer(picture_buffer_id);
420 }
421
422 void GpuVideoDecodeAccelerator::OnFlush() {
423   DCHECK(video_decode_accelerator_.get());
424   video_decode_accelerator_->Flush();
425 }
426
427 void GpuVideoDecodeAccelerator::OnReset() {
428   DCHECK(video_decode_accelerator_.get());
429   video_decode_accelerator_->Reset();
430 }
431
432 void GpuVideoDecodeAccelerator::OnDestroy() {
433   DCHECK(video_decode_accelerator_.get());
434   OnWillDestroyStub();
435 }
436
437 void GpuVideoDecodeAccelerator::OnFilterRemoved() {
438   // We're destroying; cancel all callbacks.
439   weak_factory_for_io_.InvalidateWeakPtrs();
440   filter_removed_.Signal();
441 }
442
443 void GpuVideoDecodeAccelerator::NotifyEndOfBitstreamBuffer(
444     int32 bitstream_buffer_id) {
445   if (!Send(new AcceleratedVideoDecoderHostMsg_BitstreamBufferProcessed(
446           host_route_id_, bitstream_buffer_id))) {
447     DLOG(ERROR)
448         << "Send(AcceleratedVideoDecoderHostMsg_BitstreamBufferProcessed) "
449         << "failed";
450   }
451 }
452
453 void GpuVideoDecodeAccelerator::NotifyFlushDone() {
454   if (!Send(new AcceleratedVideoDecoderHostMsg_FlushDone(host_route_id_)))
455     DLOG(ERROR) << "Send(AcceleratedVideoDecoderHostMsg_FlushDone) failed";
456 }
457
458 void GpuVideoDecodeAccelerator::NotifyResetDone() {
459   if (!Send(new AcceleratedVideoDecoderHostMsg_ResetDone(host_route_id_)))
460     DLOG(ERROR) << "Send(AcceleratedVideoDecoderHostMsg_ResetDone) failed";
461 }
462
463 void GpuVideoDecodeAccelerator::OnWillDestroyStub() {
464   // The stub is going away, so we have to stop and destroy VDA here, before
465   // returning, because the VDA may need the GL context to run and/or do its
466   // cleanup. We cannot destroy the VDA before the IO thread message filter is
467   // removed however, since we cannot service incoming messages with VDA gone.
468   // We cannot simply check for existence of VDA on IO thread though, because
469   // we don't want to synchronize the IO thread with the ChildThread.
470   // So we have to wait for the RemoveFilter callback here instead and remove
471   // the VDA after it arrives and before returning.
472   if (filter_.get()) {
473     stub_->channel()->RemoveFilter(filter_.get());
474     filter_removed_.Wait();
475   }
476
477   stub_->channel()->RemoveRoute(host_route_id_);
478   stub_->RemoveDestructionObserver(this);
479
480   video_decode_accelerator_.reset();
481   delete this;
482 }
483
484 void GpuVideoDecodeAccelerator::SetTextureCleared(
485     const media::Picture& picture) {
486   DCHECK(child_message_loop_->BelongsToCurrentThread());
487   DebugAutoLock auto_lock(debug_uncleared_textures_lock_);
488   std::map<int32, scoped_refptr<gpu::gles2::TextureRef> >::iterator it;
489   it = uncleared_textures_.find(picture.picture_buffer_id());
490   if (it == uncleared_textures_.end())
491     return;  // the texture has been cleared
492
493   scoped_refptr<gpu::gles2::TextureRef> texture_ref = it->second;
494   GLenum target = texture_ref->texture()->target();
495   gpu::gles2::TextureManager* texture_manager =
496       stub_->decoder()->GetContextGroup()->texture_manager();
497   DCHECK(!texture_ref->texture()->IsLevelCleared(target, 0));
498   texture_manager->SetLevelCleared(texture_ref, target, 0, true);
499   uncleared_textures_.erase(it);
500 }
501
502 bool GpuVideoDecodeAccelerator::Send(IPC::Message* message) {
503   if (filter_.get() && io_message_loop_->BelongsToCurrentThread())
504     return filter_->SendOnIOThread(message);
505   DCHECK(child_message_loop_->BelongsToCurrentThread());
506   return stub_->channel()->Send(message);
507 }
508
509 void GpuVideoDecodeAccelerator::SendCreateDecoderReply(IPC::Message* message,
510                                                        bool succeeded) {
511   GpuCommandBufferMsg_CreateVideoDecoder::WriteReplyParams(message, succeeded);
512   Send(message);
513 }
514
515 }  // namespace content