Upstream version 10.38.208.0
[platform/framework/web/crosswalk.git] / src / content / common / gpu / media / gpu_video_decode_accelerator.cc
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "content/common/gpu/media/gpu_video_decode_accelerator.h"
6
7 #include <vector>
8
9 #include "base/bind.h"
10 #include "base/command_line.h"
11 #include "base/logging.h"
12 #include "base/message_loop/message_loop_proxy.h"
13 #include "base/stl_util.h"
14
15 #include "content/common/gpu/gpu_channel.h"
16 #include "content/common/gpu/gpu_messages.h"
17 #include "content/public/common/content_switches.h"
18 #include "gpu/command_buffer/common/command_buffer.h"
19 #include "ipc/ipc_message_macros.h"
20 #include "ipc/ipc_message_utils.h"
21 #include "ipc/message_filter.h"
22 #include "media/base/limits.h"
23 #include "ui/gl/gl_context.h"
24 #include "ui/gl/gl_surface_egl.h"
25
26 #if defined(OS_WIN)
27 #include "base/win/windows_version.h"
28 #include "content/common/gpu/media/dxva_video_decode_accelerator.h"
29 #elif defined(OS_MACOSX)
30 #include "content/common/gpu/media/vt_video_decode_accelerator.h"
31 #elif defined(OS_CHROMEOS) && defined(ARCH_CPU_ARMEL) && defined(USE_X11)
32 #include "content/common/gpu/media/v4l2_video_decode_accelerator.h"
33 #include "content/common/gpu/media/v4l2_video_device.h"
34 #elif defined(OS_CHROMEOS) && defined(ARCH_CPU_X86_FAMILY) && defined(USE_X11)
35 #include "content/common/gpu/media/vaapi_video_decode_accelerator.h"
36 #include "ui/gl/gl_context_glx.h"
37 #include "ui/gl/gl_implementation.h"
38 #elif defined(USE_OZONE)
39 #include "media/ozone/media_ozone_platform.h"
40 #elif defined(OS_ANDROID)
41 #include "content/common/gpu/media/android_video_decode_accelerator.h"
42 #endif
43
44 #include "ui/gfx/size.h"
45
46 namespace content {
47
48 static bool MakeDecoderContextCurrent(
49     const base::WeakPtr<GpuCommandBufferStub> stub) {
50   if (!stub) {
51     DLOG(ERROR) << "Stub is gone; won't MakeCurrent().";
52     return false;
53   }
54
55   if (!stub->decoder()->MakeCurrent()) {
56     DLOG(ERROR) << "Failed to MakeCurrent()";
57     return false;
58   }
59
60   return true;
61 }
62
63 // DebugAutoLock works like AutoLock but only acquires the lock when
64 // DCHECK is on.
65 #if DCHECK_IS_ON
66 typedef base::AutoLock DebugAutoLock;
67 #else
68 class DebugAutoLock {
69  public:
70   explicit DebugAutoLock(base::Lock&) {}
71 };
72 #endif
73
74 class GpuVideoDecodeAccelerator::MessageFilter : public IPC::MessageFilter {
75  public:
76   MessageFilter(GpuVideoDecodeAccelerator* owner, int32 host_route_id)
77       : owner_(owner), host_route_id_(host_route_id) {}
78
79   virtual void OnChannelError() OVERRIDE { sender_ = NULL; }
80
81   virtual void OnChannelClosing() OVERRIDE { sender_ = NULL; }
82
83   virtual void OnFilterAdded(IPC::Sender* sender) OVERRIDE {
84     sender_ = sender;
85   }
86
87   virtual void OnFilterRemoved() OVERRIDE {
88     // This will delete |owner_| and |this|.
89     owner_->OnFilterRemoved();
90   }
91
92   virtual bool OnMessageReceived(const IPC::Message& msg) OVERRIDE {
93     if (msg.routing_id() != host_route_id_)
94       return false;
95
96     IPC_BEGIN_MESSAGE_MAP(MessageFilter, msg)
97       IPC_MESSAGE_FORWARD(AcceleratedVideoDecoderMsg_Decode, owner_,
98                           GpuVideoDecodeAccelerator::OnDecode)
99       IPC_MESSAGE_UNHANDLED(return false;)
100     IPC_END_MESSAGE_MAP()
101     return true;
102   }
103
104   bool SendOnIOThread(IPC::Message* message) {
105     DCHECK(!message->is_sync());
106     if (!sender_) {
107       delete message;
108       return false;
109     }
110     return sender_->Send(message);
111   }
112
113  protected:
114   virtual ~MessageFilter() {}
115
116  private:
117   GpuVideoDecodeAccelerator* owner_;
118   int32 host_route_id_;
119   // The sender to which this filter was added.
120   IPC::Sender* sender_;
121 };
122
123 GpuVideoDecodeAccelerator::GpuVideoDecodeAccelerator(
124     int32 host_route_id,
125     GpuCommandBufferStub* stub,
126     const scoped_refptr<base::MessageLoopProxy>& io_message_loop)
127     : host_route_id_(host_route_id),
128       stub_(stub),
129       texture_target_(0),
130       filter_removed_(true, false),
131       io_message_loop_(io_message_loop),
132       weak_factory_for_io_(this) {
133   DCHECK(stub_);
134   stub_->AddDestructionObserver(this);
135   child_message_loop_ = base::MessageLoopProxy::current();
136   make_context_current_ =
137       base::Bind(&MakeDecoderContextCurrent, stub_->AsWeakPtr());
138 }
139
140 GpuVideoDecodeAccelerator::~GpuVideoDecodeAccelerator() {
141   // This class can only be self-deleted from OnWillDestroyStub(), which means
142   // the VDA has already been destroyed in there.
143   DCHECK(!video_decode_accelerator_);
144 }
145
146 bool GpuVideoDecodeAccelerator::OnMessageReceived(const IPC::Message& msg) {
147   if (!video_decode_accelerator_)
148     return false;
149
150   bool handled = true;
151   IPC_BEGIN_MESSAGE_MAP(GpuVideoDecodeAccelerator, msg)
152     IPC_MESSAGE_HANDLER(AcceleratedVideoDecoderMsg_Decode, OnDecode)
153     IPC_MESSAGE_HANDLER(AcceleratedVideoDecoderMsg_AssignPictureBuffers,
154                         OnAssignPictureBuffers)
155     IPC_MESSAGE_HANDLER(AcceleratedVideoDecoderMsg_ReusePictureBuffer,
156                         OnReusePictureBuffer)
157     IPC_MESSAGE_HANDLER(AcceleratedVideoDecoderMsg_Flush, OnFlush)
158     IPC_MESSAGE_HANDLER(AcceleratedVideoDecoderMsg_Reset, OnReset)
159     IPC_MESSAGE_HANDLER(AcceleratedVideoDecoderMsg_Destroy, OnDestroy)
160     IPC_MESSAGE_UNHANDLED(handled = false)
161   IPC_END_MESSAGE_MAP()
162   return handled;
163 }
164
165 void GpuVideoDecodeAccelerator::ProvidePictureBuffers(
166     uint32 requested_num_of_buffers,
167     const gfx::Size& dimensions,
168     uint32 texture_target) {
169   if (dimensions.width() > media::limits::kMaxDimension ||
170       dimensions.height() > media::limits::kMaxDimension ||
171       dimensions.GetArea() > media::limits::kMaxCanvas) {
172     NotifyError(media::VideoDecodeAccelerator::PLATFORM_FAILURE);
173     return;
174   }
175   if (!Send(new AcceleratedVideoDecoderHostMsg_ProvidePictureBuffers(
176            host_route_id_,
177            requested_num_of_buffers,
178            dimensions,
179            texture_target))) {
180     DLOG(ERROR) << "Send(AcceleratedVideoDecoderHostMsg_ProvidePictureBuffers) "
181                 << "failed";
182   }
183   texture_dimensions_ = dimensions;
184   texture_target_ = texture_target;
185 }
186
187 void GpuVideoDecodeAccelerator::DismissPictureBuffer(
188     int32 picture_buffer_id) {
189   // Notify client that picture buffer is now unused.
190   if (!Send(new AcceleratedVideoDecoderHostMsg_DismissPictureBuffer(
191           host_route_id_, picture_buffer_id))) {
192     DLOG(ERROR) << "Send(AcceleratedVideoDecoderHostMsg_DismissPictureBuffer) "
193                 << "failed";
194   }
195   DebugAutoLock auto_lock(debug_uncleared_textures_lock_);
196   uncleared_textures_.erase(picture_buffer_id);
197 }
198
199 void GpuVideoDecodeAccelerator::PictureReady(
200     const media::Picture& picture) {
201   // VDA may call PictureReady on IO thread. SetTextureCleared should run on
202   // the child thread. VDA is responsible to call PictureReady on the child
203   // thread when a picture buffer is delivered the first time.
204   if (child_message_loop_->BelongsToCurrentThread()) {
205     SetTextureCleared(picture);
206   } else {
207     DCHECK(io_message_loop_->BelongsToCurrentThread());
208     DebugAutoLock auto_lock(debug_uncleared_textures_lock_);
209     DCHECK_EQ(0u, uncleared_textures_.count(picture.picture_buffer_id()));
210   }
211
212   if (!Send(new AcceleratedVideoDecoderHostMsg_PictureReady(
213           host_route_id_,
214           picture.picture_buffer_id(),
215           picture.bitstream_buffer_id(),
216           picture.visible_rect()))) {
217     DLOG(ERROR) << "Send(AcceleratedVideoDecoderHostMsg_PictureReady) failed";
218   }
219 }
220
221 void GpuVideoDecodeAccelerator::NotifyError(
222     media::VideoDecodeAccelerator::Error error) {
223   if (!Send(new AcceleratedVideoDecoderHostMsg_ErrorNotification(
224           host_route_id_, error))) {
225     DLOG(ERROR) << "Send(AcceleratedVideoDecoderHostMsg_ErrorNotification) "
226                 << "failed";
227   }
228 }
229
230 void GpuVideoDecodeAccelerator::Initialize(
231     const media::VideoCodecProfile profile,
232     IPC::Message* init_done_msg) {
233   DCHECK(!video_decode_accelerator_.get());
234
235   if (!stub_->channel()->AddRoute(host_route_id_, this)) {
236     DLOG(ERROR) << "GpuVideoDecodeAccelerator::Initialize(): "
237                    "failed to add route";
238     SendCreateDecoderReply(init_done_msg, false);
239   }
240
241 #if !defined(OS_WIN)
242   // Ensure we will be able to get a GL context at all before initializing
243   // non-Windows VDAs.
244   if (!make_context_current_.Run()) {
245     SendCreateDecoderReply(init_done_msg, false);
246     return;
247   }
248 #endif
249
250 #if defined(OS_WIN)
251   if (base::win::GetVersion() < base::win::VERSION_WIN7) {
252     NOTIMPLEMENTED() << "HW video decode acceleration not available.";
253     SendCreateDecoderReply(init_done_msg, false);
254     return;
255   }
256   DVLOG(0) << "Initializing DXVA HW decoder for windows.";
257   video_decode_accelerator_.reset(
258       new DXVAVideoDecodeAccelerator(make_context_current_));
259 #elif defined(OS_MACOSX)
260   video_decode_accelerator_.reset(new VTVideoDecodeAccelerator(
261       static_cast<CGLContextObj>(
262           stub_->decoder()->GetGLContext()->GetHandle())));
263 #elif defined(OS_CHROMEOS) && defined(ARCH_CPU_ARMEL) && defined(USE_X11)
264   scoped_ptr<V4L2Device> device = V4L2Device::Create(V4L2Device::kDecoder);
265   if (!device.get()) {
266     SendCreateDecoderReply(init_done_msg, false);
267     return;
268   }
269   video_decode_accelerator_.reset(new V4L2VideoDecodeAccelerator(
270       gfx::GLSurfaceEGL::GetHardwareDisplay(),
271       stub_->decoder()->GetGLContext()->GetHandle(),
272       weak_factory_for_io_.GetWeakPtr(),
273       make_context_current_,
274       device.Pass(),
275       io_message_loop_));
276 #elif defined(OS_CHROMEOS) && defined(ARCH_CPU_X86_FAMILY) && defined(USE_X11)
277   if (gfx::GetGLImplementation() != gfx::kGLImplementationDesktopGL) {
278     VLOG(1) << "HW video decode acceleration not available without "
279                "DesktopGL (GLX).";
280     SendCreateDecoderReply(init_done_msg, false);
281     return;
282   }
283   gfx::GLContextGLX* glx_context =
284       static_cast<gfx::GLContextGLX*>(stub_->decoder()->GetGLContext());
285   video_decode_accelerator_.reset(new VaapiVideoDecodeAccelerator(
286       glx_context->display(), make_context_current_));
287 #elif defined(USE_OZONE)
288   media::MediaOzonePlatform* platform =
289       media::MediaOzonePlatform::GetInstance();
290   video_decode_accelerator_.reset(platform->CreateVideoDecodeAccelerator(
291       make_context_current_));
292   if (!video_decode_accelerator_) {
293     SendCreateDecoderReply(init_done_msg, false);
294     return;
295   }
296 #elif defined(OS_ANDROID)
297   video_decode_accelerator_.reset(new AndroidVideoDecodeAccelerator(
298       stub_->decoder()->AsWeakPtr(),
299       make_context_current_));
300 #else
301   NOTIMPLEMENTED() << "HW video decode acceleration not available.";
302   SendCreateDecoderReply(init_done_msg, false);
303   return;
304 #endif
305
306   if (video_decode_accelerator_->CanDecodeOnIOThread()) {
307     filter_ = new MessageFilter(this, host_route_id_);
308     stub_->channel()->AddFilter(filter_.get());
309   }
310
311   if (!video_decode_accelerator_->Initialize(profile, this)) {
312     SendCreateDecoderReply(init_done_msg, false);
313     return;
314   }
315
316   SendCreateDecoderReply(init_done_msg, true);
317 }
318
319 // Runs on IO thread if video_decode_accelerator_->CanDecodeOnIOThread() is
320 // true, otherwise on the main thread.
321 void GpuVideoDecodeAccelerator::OnDecode(
322     base::SharedMemoryHandle handle, int32 id, uint32 size) {
323   DCHECK(video_decode_accelerator_.get());
324   if (id < 0) {
325     DLOG(ERROR) << "BitstreamBuffer id " << id << " out of range";
326     if (child_message_loop_->BelongsToCurrentThread()) {
327       NotifyError(media::VideoDecodeAccelerator::INVALID_ARGUMENT);
328     } else {
329       child_message_loop_->PostTask(
330           FROM_HERE,
331           base::Bind(&GpuVideoDecodeAccelerator::NotifyError,
332                      base::Unretained(this),
333                      media::VideoDecodeAccelerator::INVALID_ARGUMENT));
334     }
335     return;
336   }
337   video_decode_accelerator_->Decode(media::BitstreamBuffer(id, handle, size));
338 }
339
340 void GpuVideoDecodeAccelerator::OnAssignPictureBuffers(
341     const std::vector<int32>& buffer_ids,
342     const std::vector<uint32>& texture_ids) {
343   if (buffer_ids.size() != texture_ids.size()) {
344     NotifyError(media::VideoDecodeAccelerator::INVALID_ARGUMENT);
345     return;
346   }
347
348   gpu::gles2::GLES2Decoder* command_decoder = stub_->decoder();
349   gpu::gles2::TextureManager* texture_manager =
350       command_decoder->GetContextGroup()->texture_manager();
351
352   std::vector<media::PictureBuffer> buffers;
353   std::vector<scoped_refptr<gpu::gles2::TextureRef> > textures;
354   for (uint32 i = 0; i < buffer_ids.size(); ++i) {
355     if (buffer_ids[i] < 0) {
356       DLOG(ERROR) << "Buffer id " << buffer_ids[i] << " out of range";
357       NotifyError(media::VideoDecodeAccelerator::INVALID_ARGUMENT);
358       return;
359     }
360     gpu::gles2::TextureRef* texture_ref = texture_manager->GetTexture(
361         texture_ids[i]);
362     if (!texture_ref) {
363       DLOG(ERROR) << "Failed to find texture id " << texture_ids[i];
364       NotifyError(media::VideoDecodeAccelerator::INVALID_ARGUMENT);
365       return;
366     }
367     gpu::gles2::Texture* info = texture_ref->texture();
368     if (info->target() != texture_target_) {
369       DLOG(ERROR) << "Texture target mismatch for texture id "
370                   << texture_ids[i];
371       NotifyError(media::VideoDecodeAccelerator::INVALID_ARGUMENT);
372       return;
373     }
374     if (texture_target_ == GL_TEXTURE_EXTERNAL_OES ||
375         texture_target_ == GL_TEXTURE_RECTANGLE) {
376       // These textures have their dimensions defined by the underlying storage.
377       // Use |texture_dimensions_| for this size.
378       texture_manager->SetLevelInfo(texture_ref,
379                                     texture_target_,
380                                     0,
381                                     0,
382                                     texture_dimensions_.width(),
383                                     texture_dimensions_.height(),
384                                     1,
385                                     0,
386                                     0,
387                                     0,
388                                     false);
389     } else {
390       // For other targets, texture dimensions should already be defined.
391       GLsizei width = 0, height = 0;
392       info->GetLevelSize(texture_target_, 0, &width, &height);
393       if (width != texture_dimensions_.width() ||
394           height != texture_dimensions_.height()) {
395         DLOG(ERROR) << "Size mismatch for texture id " << texture_ids[i];
396         NotifyError(media::VideoDecodeAccelerator::INVALID_ARGUMENT);
397         return;
398       }
399     }
400     uint32 service_texture_id;
401     if (!command_decoder->GetServiceTextureId(
402             texture_ids[i], &service_texture_id)) {
403       DLOG(ERROR) << "Failed to translate texture!";
404       NotifyError(media::VideoDecodeAccelerator::PLATFORM_FAILURE);
405       return;
406     }
407     buffers.push_back(media::PictureBuffer(
408         buffer_ids[i], texture_dimensions_, service_texture_id));
409     textures.push_back(texture_ref);
410   }
411   video_decode_accelerator_->AssignPictureBuffers(buffers);
412   DebugAutoLock auto_lock(debug_uncleared_textures_lock_);
413   for (uint32 i = 0; i < buffer_ids.size(); ++i)
414     uncleared_textures_[buffer_ids[i]] = textures[i];
415 }
416
417 void GpuVideoDecodeAccelerator::OnReusePictureBuffer(
418     int32 picture_buffer_id) {
419   DCHECK(video_decode_accelerator_.get());
420   video_decode_accelerator_->ReusePictureBuffer(picture_buffer_id);
421 }
422
423 void GpuVideoDecodeAccelerator::OnFlush() {
424   DCHECK(video_decode_accelerator_.get());
425   video_decode_accelerator_->Flush();
426 }
427
428 void GpuVideoDecodeAccelerator::OnReset() {
429   DCHECK(video_decode_accelerator_.get());
430   video_decode_accelerator_->Reset();
431 }
432
433 void GpuVideoDecodeAccelerator::OnDestroy() {
434   DCHECK(video_decode_accelerator_.get());
435   OnWillDestroyStub();
436 }
437
438 void GpuVideoDecodeAccelerator::OnFilterRemoved() {
439   // We're destroying; cancel all callbacks.
440   weak_factory_for_io_.InvalidateWeakPtrs();
441   filter_removed_.Signal();
442 }
443
444 void GpuVideoDecodeAccelerator::NotifyEndOfBitstreamBuffer(
445     int32 bitstream_buffer_id) {
446   if (!Send(new AcceleratedVideoDecoderHostMsg_BitstreamBufferProcessed(
447           host_route_id_, bitstream_buffer_id))) {
448     DLOG(ERROR)
449         << "Send(AcceleratedVideoDecoderHostMsg_BitstreamBufferProcessed) "
450         << "failed";
451   }
452 }
453
454 void GpuVideoDecodeAccelerator::NotifyFlushDone() {
455   if (!Send(new AcceleratedVideoDecoderHostMsg_FlushDone(host_route_id_)))
456     DLOG(ERROR) << "Send(AcceleratedVideoDecoderHostMsg_FlushDone) failed";
457 }
458
459 void GpuVideoDecodeAccelerator::NotifyResetDone() {
460   if (!Send(new AcceleratedVideoDecoderHostMsg_ResetDone(host_route_id_)))
461     DLOG(ERROR) << "Send(AcceleratedVideoDecoderHostMsg_ResetDone) failed";
462 }
463
464 void GpuVideoDecodeAccelerator::OnWillDestroyStub() {
465   // The stub is going away, so we have to stop and destroy VDA here, before
466   // returning, because the VDA may need the GL context to run and/or do its
467   // cleanup. We cannot destroy the VDA before the IO thread message filter is
468   // removed however, since we cannot service incoming messages with VDA gone.
469   // We cannot simply check for existence of VDA on IO thread though, because
470   // we don't want to synchronize the IO thread with the ChildThread.
471   // So we have to wait for the RemoveFilter callback here instead and remove
472   // the VDA after it arrives and before returning.
473   if (filter_.get()) {
474     stub_->channel()->RemoveFilter(filter_.get());
475     filter_removed_.Wait();
476   }
477
478   stub_->channel()->RemoveRoute(host_route_id_);
479   stub_->RemoveDestructionObserver(this);
480
481   video_decode_accelerator_.reset();
482   delete this;
483 }
484
485 void GpuVideoDecodeAccelerator::SetTextureCleared(
486     const media::Picture& picture) {
487   DCHECK(child_message_loop_->BelongsToCurrentThread());
488   DebugAutoLock auto_lock(debug_uncleared_textures_lock_);
489   std::map<int32, scoped_refptr<gpu::gles2::TextureRef> >::iterator it;
490   it = uncleared_textures_.find(picture.picture_buffer_id());
491   if (it == uncleared_textures_.end())
492     return;  // the texture has been cleared
493
494   scoped_refptr<gpu::gles2::TextureRef> texture_ref = it->second;
495   GLenum target = texture_ref->texture()->target();
496   gpu::gles2::TextureManager* texture_manager =
497       stub_->decoder()->GetContextGroup()->texture_manager();
498   DCHECK(!texture_ref->texture()->IsLevelCleared(target, 0));
499   texture_manager->SetLevelCleared(texture_ref, target, 0, true);
500   uncleared_textures_.erase(it);
501 }
502
503 bool GpuVideoDecodeAccelerator::Send(IPC::Message* message) {
504   if (filter_.get() && io_message_loop_->BelongsToCurrentThread())
505     return filter_->SendOnIOThread(message);
506   DCHECK(child_message_loop_->BelongsToCurrentThread());
507   return stub_->channel()->Send(message);
508 }
509
510 void GpuVideoDecodeAccelerator::SendCreateDecoderReply(IPC::Message* message,
511                                                        bool succeeded) {
512   GpuCommandBufferMsg_CreateVideoDecoder::WriteReplyParams(message, succeeded);
513   Send(message);
514 }
515
516 }  // namespace content