Upstream version 7.35.139.0
[platform/framework/web/crosswalk.git] / src / content / common / gpu / media / gpu_video_decode_accelerator.cc
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "content/common/gpu/media/gpu_video_decode_accelerator.h"
6
7 #include <vector>
8
9 #include "base/bind.h"
10 #include "base/command_line.h"
11 #include "base/logging.h"
12 #include "base/message_loop/message_loop_proxy.h"
13 #include "base/stl_util.h"
14
15 #include "content/common/gpu/gpu_channel.h"
16 #include "content/common/gpu/gpu_messages.h"
17 #include "content/public/common/content_switches.h"
18 #include "gpu/command_buffer/common/command_buffer.h"
19 #include "ipc/ipc_message_macros.h"
20 #include "ipc/ipc_message_utils.h"
21 #include "media/base/limits.h"
22 #include "ui/gl/gl_context.h"
23 #include "ui/gl/gl_surface_egl.h"
24
25 #if defined(OS_WIN)
26 #include "base/win/windows_version.h"
27 #include "content/common/gpu/media/dxva_video_decode_accelerator.h"
28 #elif defined(OS_CHROMEOS) && defined(ARCH_CPU_ARMEL) && defined(USE_X11)
29 #include "content/common/gpu/media/v4l2_video_decode_accelerator.h"
30 #include "content/common/gpu/media/v4l2_video_device.h"
31 #elif defined(ARCH_CPU_X86_FAMILY) && (defined(USE_X11) || defined(USE_OZONE))
32 #include "content/common/gpu/media/vaapi_video_decode_accelerator.h"
33 #if defined(USE_X11)
34 #include "ui/gl/gl_context_glx.h"
35 #include "ui/gl/gl_implementation.h"
36 #endif
37 #elif defined(OS_ANDROID)
38 #include "content/common/gpu/media/android_video_decode_accelerator.h"
39 #endif
40
41 #include "ui/gfx/size.h"
42
43 namespace content {
44
45 static bool MakeDecoderContextCurrent(
46     const base::WeakPtr<GpuCommandBufferStub> stub) {
47   if (!stub.get()) {
48     DLOG(ERROR) << "Stub is gone; won't MakeCurrent().";
49     return false;
50   }
51
52   if (!stub->decoder()->MakeCurrent()) {
53     DLOG(ERROR) << "Failed to MakeCurrent()";
54     return false;
55   }
56
57   return true;
58 }
59
60 // DebugAutoLock works like AutoLock but only acquires the lock when
61 // DCHECK is on.
62 #if DCHECK_IS_ON
63 typedef base::AutoLock DebugAutoLock;
64 #else
65 class DebugAutoLock {
66  public:
67   explicit DebugAutoLock(base::Lock&) {}
68 };
69 #endif
70
71 class GpuVideoDecodeAccelerator::MessageFilter
72     : public IPC::ChannelProxy::MessageFilter {
73  public:
74   MessageFilter(GpuVideoDecodeAccelerator* owner, int32 host_route_id)
75       : owner_(owner), host_route_id_(host_route_id) {}
76
77   virtual void OnChannelError() OVERRIDE { channel_ = NULL; }
78
79   virtual void OnChannelClosing() OVERRIDE { channel_ = NULL; }
80
81   virtual void OnFilterAdded(IPC::Channel* channel) OVERRIDE {
82     channel_ = channel;
83   }
84
85   virtual void OnFilterRemoved() OVERRIDE {
86     // This will delete |owner_| and |this|.
87     owner_->OnFilterRemoved();
88   }
89
90   virtual bool OnMessageReceived(const IPC::Message& msg) OVERRIDE {
91     if (msg.routing_id() != host_route_id_)
92       return false;
93
94     IPC_BEGIN_MESSAGE_MAP(MessageFilter, msg)
95       IPC_MESSAGE_FORWARD(AcceleratedVideoDecoderMsg_Decode, owner_,
96                           GpuVideoDecodeAccelerator::OnDecode)
97       IPC_MESSAGE_UNHANDLED(return false;)
98     IPC_END_MESSAGE_MAP()
99     return true;
100   }
101
102   bool SendOnIOThread(IPC::Message* message) {
103     DCHECK(!message->is_sync());
104     if (!channel_) {
105       delete message;
106       return false;
107     }
108     return channel_->Send(message);
109   }
110
111  protected:
112   virtual ~MessageFilter() {}
113
114  private:
115   GpuVideoDecodeAccelerator* owner_;
116   int32 host_route_id_;
117   // The channel to which this filter was added.
118   IPC::Channel* channel_;
119 };
120
121 GpuVideoDecodeAccelerator::GpuVideoDecodeAccelerator(
122     int32 host_route_id,
123     GpuCommandBufferStub* stub,
124     const scoped_refptr<base::MessageLoopProxy>& io_message_loop)
125     : init_done_msg_(NULL),
126       host_route_id_(host_route_id),
127       stub_(stub),
128       texture_target_(0),
129       filter_removed_(true, false),
130       io_message_loop_(io_message_loop),
131       weak_factory_for_io_(this) {
132   DCHECK(stub_);
133   stub_->AddDestructionObserver(this);
134   stub_->channel()->AddRoute(host_route_id_, this);
135   child_message_loop_ = base::MessageLoopProxy::current();
136   make_context_current_ =
137       base::Bind(&MakeDecoderContextCurrent, stub_->AsWeakPtr());
138 }
139
140 GpuVideoDecodeAccelerator::~GpuVideoDecodeAccelerator() {
141   // This class can only be self-deleted from OnWillDestroyStub(), which means
142   // the VDA has already been destroyed in there.
143   CHECK(!video_decode_accelerator_.get());
144 }
145
146 bool GpuVideoDecodeAccelerator::OnMessageReceived(const IPC::Message& msg) {
147   if (!video_decode_accelerator_)
148     return false;
149
150   bool handled = true;
151   IPC_BEGIN_MESSAGE_MAP(GpuVideoDecodeAccelerator, msg)
152     IPC_MESSAGE_HANDLER(AcceleratedVideoDecoderMsg_Decode, OnDecode)
153     IPC_MESSAGE_HANDLER(AcceleratedVideoDecoderMsg_AssignPictureBuffers,
154                         OnAssignPictureBuffers)
155     IPC_MESSAGE_HANDLER(AcceleratedVideoDecoderMsg_ReusePictureBuffer,
156                         OnReusePictureBuffer)
157     IPC_MESSAGE_HANDLER(AcceleratedVideoDecoderMsg_Flush, OnFlush)
158     IPC_MESSAGE_HANDLER(AcceleratedVideoDecoderMsg_Reset, OnReset)
159     IPC_MESSAGE_HANDLER(AcceleratedVideoDecoderMsg_Destroy, OnDestroy)
160     IPC_MESSAGE_UNHANDLED(handled = false)
161   IPC_END_MESSAGE_MAP()
162   return handled;
163 }
164
165 void GpuVideoDecodeAccelerator::ProvidePictureBuffers(
166     uint32 requested_num_of_buffers,
167     const gfx::Size& dimensions,
168     uint32 texture_target) {
169   if (dimensions.width() > media::limits::kMaxDimension ||
170       dimensions.height() > media::limits::kMaxDimension ||
171       dimensions.GetArea() > media::limits::kMaxCanvas) {
172     NotifyError(media::VideoDecodeAccelerator::PLATFORM_FAILURE);
173     return;
174   }
175   if (!Send(new AcceleratedVideoDecoderHostMsg_ProvidePictureBuffers(
176            host_route_id_,
177            requested_num_of_buffers,
178            dimensions,
179            texture_target))) {
180     DLOG(ERROR) << "Send(AcceleratedVideoDecoderHostMsg_ProvidePictureBuffers) "
181                 << "failed";
182   }
183   texture_dimensions_ = dimensions;
184   texture_target_ = texture_target;
185 }
186
187 void GpuVideoDecodeAccelerator::DismissPictureBuffer(
188     int32 picture_buffer_id) {
189   // Notify client that picture buffer is now unused.
190   if (!Send(new AcceleratedVideoDecoderHostMsg_DismissPictureBuffer(
191           host_route_id_, picture_buffer_id))) {
192     DLOG(ERROR) << "Send(AcceleratedVideoDecoderHostMsg_DismissPictureBuffer) "
193                 << "failed";
194   }
195   DebugAutoLock auto_lock(debug_uncleared_textures_lock_);
196   uncleared_textures_.erase(picture_buffer_id);
197 }
198
199 void GpuVideoDecodeAccelerator::PictureReady(
200     const media::Picture& picture) {
201   // VDA may call PictureReady on IO thread. SetTextureCleared should run on
202   // the child thread. VDA is responsible to call PictureReady on the child
203   // thread when a picture buffer is delivered the first time.
204   if (child_message_loop_->BelongsToCurrentThread()) {
205     SetTextureCleared(picture);
206   } else {
207     DCHECK(io_message_loop_->BelongsToCurrentThread());
208     DebugAutoLock auto_lock(debug_uncleared_textures_lock_);
209     DCHECK_EQ(0u, uncleared_textures_.count(picture.picture_buffer_id()));
210   }
211
212   if (!Send(new AcceleratedVideoDecoderHostMsg_PictureReady(
213           host_route_id_,
214           picture.picture_buffer_id(),
215           picture.bitstream_buffer_id()))) {
216     DLOG(ERROR) << "Send(AcceleratedVideoDecoderHostMsg_PictureReady) failed";
217   }
218 }
219
220 void GpuVideoDecodeAccelerator::NotifyError(
221     media::VideoDecodeAccelerator::Error error) {
222   if (init_done_msg_) {
223     // If we get an error while we're initializing, NotifyInitializeDone won't
224     // be called, so we need to send the reply (with an error) here.
225     GpuCommandBufferMsg_CreateVideoDecoder::WriteReplyParams(
226         init_done_msg_, -1);
227     if (!Send(init_done_msg_))
228       DLOG(ERROR) << "Send(init_done_msg_) failed";
229     init_done_msg_ = NULL;
230     return;
231   }
232   if (!Send(new AcceleratedVideoDecoderHostMsg_ErrorNotification(
233           host_route_id_, error))) {
234     DLOG(ERROR) << "Send(AcceleratedVideoDecoderHostMsg_ErrorNotification) "
235                 << "failed";
236   }
237 }
238
239 void GpuVideoDecodeAccelerator::Initialize(
240     const media::VideoCodecProfile profile,
241     IPC::Message* init_done_msg) {
242   DCHECK(!video_decode_accelerator_.get());
243   DCHECK(!init_done_msg_);
244   DCHECK(init_done_msg);
245   init_done_msg_ = init_done_msg;
246
247 #if !defined(OS_WIN)
248   // Ensure we will be able to get a GL context at all before initializing
249   // non-Windows VDAs.
250   if (!make_context_current_.Run()) {
251     NotifyError(media::VideoDecodeAccelerator::PLATFORM_FAILURE);
252     return;
253   }
254 #endif
255
256 #if defined(OS_WIN)
257   if (base::win::GetVersion() < base::win::VERSION_WIN7) {
258     NOTIMPLEMENTED() << "HW video decode acceleration not available.";
259     NotifyError(media::VideoDecodeAccelerator::PLATFORM_FAILURE);
260     return;
261   }
262   DVLOG(0) << "Initializing DXVA HW decoder for windows.";
263   video_decode_accelerator_.reset(
264       new DXVAVideoDecodeAccelerator(make_context_current_));
265 #elif defined(OS_CHROMEOS) && defined(ARCH_CPU_ARMEL) && defined(USE_X11)
266   scoped_ptr<V4L2Device> device = V4L2Device::Create();
267   if (!device.get()) {
268     NotifyError(media::VideoDecodeAccelerator::PLATFORM_FAILURE);
269     return;
270   }
271   video_decode_accelerator_.reset(
272       new V4L2VideoDecodeAccelerator(gfx::GLSurfaceEGL::GetHardwareDisplay(),
273                                      weak_factory_for_io_.GetWeakPtr(),
274                                      make_context_current_,
275                                      device.Pass(),
276                                      io_message_loop_));
277 #elif defined(ARCH_CPU_X86_FAMILY) && defined(USE_OZONE)
278   video_decode_accelerator_.reset(new VaapiVideoDecodeAccelerator(
279       make_context_current_));
280 #elif defined(ARCH_CPU_X86_FAMILY) && defined(USE_X11)
281   if (gfx::GetGLImplementation() != gfx::kGLImplementationDesktopGL) {
282     VLOG(1) << "HW video decode acceleration not available without "
283                "DesktopGL (GLX).";
284     NotifyError(media::VideoDecodeAccelerator::PLATFORM_FAILURE);
285     return;
286   }
287   gfx::GLContextGLX* glx_context =
288       static_cast<gfx::GLContextGLX*>(stub_->decoder()->GetGLContext());
289   video_decode_accelerator_.reset(new VaapiVideoDecodeAccelerator(
290       glx_context->display(), make_context_current_));
291 #elif defined(OS_ANDROID)
292   video_decode_accelerator_.reset(new AndroidVideoDecodeAccelerator(
293       stub_->decoder()->AsWeakPtr(),
294       make_context_current_));
295 #else
296   NOTIMPLEMENTED() << "HW video decode acceleration not available.";
297   NotifyError(media::VideoDecodeAccelerator::PLATFORM_FAILURE);
298   return;
299 #endif
300
301   if (video_decode_accelerator_->CanDecodeOnIOThread()) {
302     filter_ = new MessageFilter(this, host_route_id_);
303     stub_->channel()->AddFilter(filter_.get());
304   }
305
306   if (!video_decode_accelerator_->Initialize(profile, this))
307     NotifyError(media::VideoDecodeAccelerator::PLATFORM_FAILURE);
308 }
309
310 // Runs on IO thread if video_decode_accelerator_->CanDecodeOnIOThread() is
311 // true, otherwise on the main thread.
312 void GpuVideoDecodeAccelerator::OnDecode(
313     base::SharedMemoryHandle handle, int32 id, uint32 size) {
314   DCHECK(video_decode_accelerator_.get());
315   if (id < 0) {
316     DLOG(ERROR) << "BitstreamBuffer id " << id << " out of range";
317     if (child_message_loop_->BelongsToCurrentThread()) {
318       NotifyError(media::VideoDecodeAccelerator::INVALID_ARGUMENT);
319     } else {
320       child_message_loop_->PostTask(
321           FROM_HERE,
322           base::Bind(&GpuVideoDecodeAccelerator::NotifyError,
323                      base::Unretained(this),
324                      media::VideoDecodeAccelerator::INVALID_ARGUMENT));
325     }
326     return;
327   }
328   video_decode_accelerator_->Decode(media::BitstreamBuffer(id, handle, size));
329 }
330
331 void GpuVideoDecodeAccelerator::OnAssignPictureBuffers(
332     const std::vector<int32>& buffer_ids,
333     const std::vector<uint32>& texture_ids) {
334   if (buffer_ids.size() != texture_ids.size()) {
335     NotifyError(media::VideoDecodeAccelerator::INVALID_ARGUMENT);
336     return;
337   }
338
339   gpu::gles2::GLES2Decoder* command_decoder = stub_->decoder();
340   gpu::gles2::TextureManager* texture_manager =
341       command_decoder->GetContextGroup()->texture_manager();
342
343   std::vector<media::PictureBuffer> buffers;
344   std::vector<scoped_refptr<gpu::gles2::TextureRef> > textures;
345   for (uint32 i = 0; i < buffer_ids.size(); ++i) {
346     if (buffer_ids[i] < 0) {
347       DLOG(ERROR) << "Buffer id " << buffer_ids[i] << " out of range";
348       NotifyError(media::VideoDecodeAccelerator::INVALID_ARGUMENT);
349       return;
350     }
351     gpu::gles2::TextureRef* texture_ref = texture_manager->GetTexture(
352         texture_ids[i]);
353     if (!texture_ref) {
354       DLOG(ERROR) << "Failed to find texture id " << texture_ids[i];
355       NotifyError(media::VideoDecodeAccelerator::INVALID_ARGUMENT);
356       return;
357     }
358     gpu::gles2::Texture* info = texture_ref->texture();
359     if (info->target() != texture_target_) {
360       DLOG(ERROR) << "Texture target mismatch for texture id "
361                   << texture_ids[i];
362       NotifyError(media::VideoDecodeAccelerator::INVALID_ARGUMENT);
363       return;
364     }
365     if (texture_target_ == GL_TEXTURE_EXTERNAL_OES) {
366       // GL_TEXTURE_EXTERNAL_OES textures have their dimensions defined by the
367       // underlying EGLImage.  Use |texture_dimensions_| for this size.
368       texture_manager->SetLevelInfo(texture_ref,
369                                     GL_TEXTURE_EXTERNAL_OES,
370                                     0,
371                                     0,
372                                     texture_dimensions_.width(),
373                                     texture_dimensions_.height(),
374                                     1,
375                                     0,
376                                     0,
377                                     0,
378                                     false);
379     } else {
380       // For other targets, texture dimensions should already be defined.
381       GLsizei width = 0, height = 0;
382       info->GetLevelSize(texture_target_, 0, &width, &height);
383       if (width != texture_dimensions_.width() ||
384           height != texture_dimensions_.height()) {
385         DLOG(ERROR) << "Size mismatch for texture id " << texture_ids[i];
386         NotifyError(media::VideoDecodeAccelerator::INVALID_ARGUMENT);
387         return;
388       }
389     }
390     uint32 service_texture_id;
391     if (!command_decoder->GetServiceTextureId(
392             texture_ids[i], &service_texture_id)) {
393       DLOG(ERROR) << "Failed to translate texture!";
394       NotifyError(media::VideoDecodeAccelerator::PLATFORM_FAILURE);
395       return;
396     }
397     buffers.push_back(media::PictureBuffer(
398         buffer_ids[i], texture_dimensions_, service_texture_id));
399     textures.push_back(texture_ref);
400   }
401   video_decode_accelerator_->AssignPictureBuffers(buffers);
402   DebugAutoLock auto_lock(debug_uncleared_textures_lock_);
403   for (uint32 i = 0; i < buffer_ids.size(); ++i)
404     uncleared_textures_[buffer_ids[i]] = textures[i];
405 }
406
407 void GpuVideoDecodeAccelerator::OnReusePictureBuffer(
408     int32 picture_buffer_id) {
409   DCHECK(video_decode_accelerator_.get());
410   video_decode_accelerator_->ReusePictureBuffer(picture_buffer_id);
411 }
412
413 void GpuVideoDecodeAccelerator::OnFlush() {
414   DCHECK(video_decode_accelerator_.get());
415   video_decode_accelerator_->Flush();
416 }
417
418 void GpuVideoDecodeAccelerator::OnReset() {
419   DCHECK(video_decode_accelerator_.get());
420   video_decode_accelerator_->Reset();
421 }
422
423 void GpuVideoDecodeAccelerator::OnDestroy() {
424   DCHECK(video_decode_accelerator_.get());
425   OnWillDestroyStub();
426 }
427
428 void GpuVideoDecodeAccelerator::OnFilterRemoved() {
429   // We're destroying; cancel all callbacks.
430   weak_factory_for_io_.InvalidateWeakPtrs();
431   filter_removed_.Signal();
432 }
433
434 void GpuVideoDecodeAccelerator::NotifyEndOfBitstreamBuffer(
435     int32 bitstream_buffer_id) {
436   if (!Send(new AcceleratedVideoDecoderHostMsg_BitstreamBufferProcessed(
437           host_route_id_, bitstream_buffer_id))) {
438     DLOG(ERROR)
439         << "Send(AcceleratedVideoDecoderHostMsg_BitstreamBufferProcessed) "
440         << "failed";
441   }
442 }
443
444 void GpuVideoDecodeAccelerator::NotifyInitializeDone() {
445   GpuCommandBufferMsg_CreateVideoDecoder::WriteReplyParams(
446       init_done_msg_, host_route_id_);
447   if (!Send(init_done_msg_))
448     DLOG(ERROR) << "Send(init_done_msg_) failed";
449   init_done_msg_ = NULL;
450 }
451
452 void GpuVideoDecodeAccelerator::NotifyFlushDone() {
453   if (!Send(new AcceleratedVideoDecoderHostMsg_FlushDone(host_route_id_)))
454     DLOG(ERROR) << "Send(AcceleratedVideoDecoderHostMsg_FlushDone) failed";
455 }
456
457 void GpuVideoDecodeAccelerator::NotifyResetDone() {
458   if (!Send(new AcceleratedVideoDecoderHostMsg_ResetDone(host_route_id_)))
459     DLOG(ERROR) << "Send(AcceleratedVideoDecoderHostMsg_ResetDone) failed";
460 }
461
462 void GpuVideoDecodeAccelerator::OnWillDestroyStub() {
463   // The stub is going away, so we have to stop and destroy VDA here, before
464   // returning, because the VDA may need the GL context to run and/or do its
465   // cleanup. We cannot destroy the VDA before the IO thread message filter is
466   // removed however, since we cannot service incoming messages with VDA gone.
467   // We cannot simply check for existence of VDA on IO thread though, because
468   // we don't want to synchronize the IO thread with the ChildThread.
469   // So we have to wait for the RemoveFilter callback here instead and remove
470   // the VDA after it arrives and before returning.
471   if (filter_.get()) {
472     stub_->channel()->RemoveFilter(filter_.get());
473     filter_removed_.Wait();
474   }
475
476   stub_->channel()->RemoveRoute(host_route_id_);
477   stub_->RemoveDestructionObserver(this);
478
479   if (video_decode_accelerator_)
480     video_decode_accelerator_.release()->Destroy();
481
482   delete this;
483 }
484
485 bool GpuVideoDecodeAccelerator::Send(IPC::Message* message) {
486   if (filter_.get() && io_message_loop_->BelongsToCurrentThread())
487     return filter_->SendOnIOThread(message);
488   DCHECK(child_message_loop_->BelongsToCurrentThread());
489   return stub_->channel()->Send(message);
490 }
491
492 void GpuVideoDecodeAccelerator::SetTextureCleared(
493     const media::Picture& picture) {
494   DCHECK(child_message_loop_->BelongsToCurrentThread());
495   DebugAutoLock auto_lock(debug_uncleared_textures_lock_);
496   std::map<int32, scoped_refptr<gpu::gles2::TextureRef> >::iterator it;
497   it = uncleared_textures_.find(picture.picture_buffer_id());
498   if (it == uncleared_textures_.end())
499     return;  // the texture has been cleared
500
501   scoped_refptr<gpu::gles2::TextureRef> texture_ref = it->second;
502   GLenum target = texture_ref->texture()->target();
503   gpu::gles2::TextureManager* texture_manager =
504       stub_->decoder()->GetContextGroup()->texture_manager();
505   DCHECK(!texture_ref->texture()->IsLevelCleared(target, 0));
506   texture_manager->SetLevelCleared(texture_ref, target, 0, true);
507   uncleared_textures_.erase(it);
508 }
509
510 }  // namespace content