Upstream version 5.34.105.0
[platform/framework/web/crosswalk.git] / src / xwalk / packaging / crosswalk-2.31-enable-VAVDA-with-EGL.patch
1 From c9f2fa16578bc20c83247e72608b5d6ca4dff6ba Mon Sep 17 00:00:00 2001
2 From: "qing.zhang" <qing.zhang@intel.com>
3 Date: Thu, 7 Nov 2013 08:59:38 -0500
4 Subject: [PATCH] [Tizen] Enabling Hardware Acceleration with Libva and EGL in
5      VDA for Tizen Mobile within chromium v31+.
6
7 Why we need to maintain it in our side:
8 ===========================================
9 1) Upstream confirm VAVDA will continue to be restricted to
10    CrOS/X86 for dev & testing only and not for chromium road map.
11 2) CrOS/X86 no plan to expend EGL backend which finalize in
12    June 2012 and be addressed to the CrOS graphics team.
13
14 So, the upstream no plan to lerage VAVDA with EGL graphic
15    backend for any X86.
16
17 3) The tizen-mobile's driver only support EGL as texture
18    backend. The video hw acceleration of xwalk have to
19    rely on EGL not GLX to bind decoded pixmap.
20 ===========================================
21 That's why we enable specific EGL for VAVDA in tizen port.
22 ---
23  .../gpu/media/gpu_video_decode_accelerator.cc      |   8 +
24  .../media/vaapi_video_decode_accelerator_tizen.cc  | 908 +++++++++++++++++++++
25  .../media/vaapi_video_decode_accelerator_tizen.h   | 273 +++++++
26  content/content_common.gypi                        |  26 +
27  4 files changed, 1215 insertions(+)
28  create mode 100644 content/common/gpu/media/vaapi_video_decode_accelerator_tizen.cc
29  create mode 100644 content/common/gpu/media/vaapi_video_decode_accelerator_tizen.h
30
31 diff --git a/content/common/gpu/media/gpu_video_decode_accelerator.cc b/content/common/gpu/media/gpu_video_decode_accelerator.cc
32 index bd1dc5f..c5a6df2 100644
33 --- a/content/common/gpu/media/gpu_video_decode_accelerator.cc
34 +++ b/content/common/gpu/media/gpu_video_decode_accelerator.cc
35 @@ -32,6 +32,8 @@
36  #include "content/common/gpu/media/vaapi_video_decode_accelerator.h"
37  #include "ui/gl/gl_context_glx.h"
38  #include "ui/gl/gl_implementation.h"
39 +#elif defined(OS_TIZEN_MOBILE) && defined(ARCH_CPU_X86_FAMILY)
40 +#include "content/common/gpu/media/vaapi_video_decode_accelerator_tizen.h"
41  #elif defined(OS_ANDROID)
42  #include "content/common/gpu/media/android_video_decode_accelerator.h"
43  #endif
44 @@ -296,6 +298,12 @@ void GpuVideoDecodeAccelerator::Initialize(
45        static_cast<gfx::GLContextGLX*>(stub_->decoder()->GetGLContext());
46    video_decode_accelerator_.reset(new VaapiVideoDecodeAccelerator(
47        glx_context->display(), this, make_context_current_));
48 +#elif defined(OS_TIZEN_MOBILE) && defined(ARCH_CPU_X86_FAMILY)
49 +  video_decode_accelerator_.reset(new VaapiVideoDecodeAccelerator(
50 +      gfx::GLSurfaceEGL::GetHardwareDisplay(),
51 +      stub_->decoder()->GetGLContext()->GetHandle(),
52 +      this,
53 +      make_context_current_));
54  #elif defined(OS_ANDROID)
55    video_decode_accelerator_.reset(new AndroidVideoDecodeAccelerator(
56        this,
57 diff --git a/content/common/gpu/media/vaapi_video_decode_accelerator_tizen.cc b/content/common/gpu/media/vaapi_video_decode_accelerator_tizen.cc
58 new file mode 100644
59 index 0000000..cfff457
60 --- /dev/null
61 +++ b/content/common/gpu/media/vaapi_video_decode_accelerator_tizen.cc
62 @@ -0,0 +1,908 @@
63 +// Copyright (c) 2013 Intel Corporation. All rights reserved.
64 +// Use of this source code is governed by a BSD-style license that can be
65 +// found in the LICENSE file.
66 +
67 +#include "base/bind.h"
68 +#include "base/debug/trace_event.h"
69 +#include "base/logging.h"
70 +#include "base/metrics/histogram.h"
71 +#include "base/stl_util.h"
72 +#include "base/strings/string_util.h"
73 +#include "base/synchronization/waitable_event.h"
74 +#include "content/child/child_thread.h"
75 +#include "content/common/gpu/gpu_channel.h"
76 +#include "content/common/gpu/media/vaapi_video_decode_accelerator_tizen.h"
77 +#include "media/base/bind_to_current_loop.h"
78 +#include "media/video/picture.h"
79 +#include "ui/gl/gl_bindings.h"
80 +#include "ui/gl/scoped_binders.h"
81 +
82 +static void ReportToUMA(
83 +    content::VaapiH264Decoder::VAVDAH264DecoderFailure failure) {
84 +  UMA_HISTOGRAM_ENUMERATION(
85 +      "Media.VAVDAH264.DecoderFailure",
86 +      failure,
87 +      content::VaapiH264Decoder::VAVDA_H264_DECODER_FAILURES_MAX);
88 +}
89 +
90 +namespace content {
91 +
92 +#define RETURN_AND_NOTIFY_ON_FAILURE(result, log, error_code, ret)  \
93 +  do {                                                              \
94 +    if (!(result)) {                                                \
95 +      DVLOG(1) << log;                                              \
96 +      NotifyError(error_code);                                      \
97 +      return ret;                                                   \
98 +    }                                                               \
99 +  } while (0)
100 +
101 +VaapiVideoDecodeAccelerator::InputBuffer::InputBuffer() : id(0), size(0) {
102 +}
103 +
104 +VaapiVideoDecodeAccelerator::InputBuffer::~InputBuffer() {
105 +}
106 +
107 +void VaapiVideoDecodeAccelerator::NotifyError(Error error) {
108 +  if (message_loop_ != base::MessageLoop::current()) {
109 +    DCHECK_EQ(decoder_thread_.message_loop(), base::MessageLoop::current());
110 +    message_loop_->PostTask(FROM_HERE, base::Bind(
111 +        &VaapiVideoDecodeAccelerator::NotifyError, weak_this_, error));
112 +    return;
113 +  }
114 +
115 +  // Post Cleanup() as a task so we don't recursively acquire lock_.
116 +  message_loop_->PostTask(FROM_HERE, base::Bind(
117 +      &VaapiVideoDecodeAccelerator::Cleanup, weak_this_));
118 +
119 +  DVLOG(1) << "Notifying of error " << error;
120 +  if (client_) {
121 +    client_->NotifyError(error);
122 +    client_ptr_factory_.InvalidateWeakPtrs();
123 +  }
124 +}
125 +
126 +// TFPPicture allocates X Pixmaps and binds them to textures passed
127 +// in PictureBuffers from clients to them. TFPPictures are created as
128 +// a consequence of receiving a set of PictureBuffers from clients and released
129 +// at the end of decode (or when a new set of PictureBuffers is required).
130 +//
131 +// TFPPictures are used for output, contents of VASurfaces passed from decoder
132 +// are put into the associated pixmap memory and sent to client.
133 +class VaapiVideoDecodeAccelerator::TFPPicture {
134 + public:
135 +  ~TFPPicture();
136 +
137 +  static linked_ptr<TFPPicture> Create(
138 +      const base::Callback<bool(void)>& make_context_current,
139 +      EGLDisplay egl_display,
140 +      Display* x_display,
141 +      int32 picture_buffer_id,
142 +      uint32 texture_id,
143 +      gfx::Size size);
144 +  int32 picture_buffer_id() {
145 +    return picture_buffer_id_;
146 +  }
147 +
148 +  uint32 texture_id() {
149 +    return texture_id_;
150 +  }
151 +
152 +  gfx::Size size() {
153 +    return size_;
154 +  }
155 +
156 +  int x_pixmap() {
157 +    return x_pixmap_;
158 +  }
159 +
160 +  // Bind texture to pixmap. Needs to be called every frame.
161 +  bool Bind();
162 +
163 + private:
164 +  TFPPicture(const base::Callback<bool(void)>& make_context_current,
165 +             Display* x_display,
166 +             int32 picture_buffer_id,
167 +             uint32 texture_id,
168 +             gfx::Size size);
169 +
170 +  bool Initialize(EGLDisplay egl_display);
171 +
172 +  base::Callback<bool(void)> make_context_current_;
173 +
174 +  Display* x_display_;
175 +
176 +  // Output id for the client.
177 +  int32 picture_buffer_id_;
178 +  uint32 texture_id_;
179 +
180 +  gfx::Size size_;
181 +
182 +  // Pixmaps bound to this texture.
183 +  Pixmap x_pixmap_;
184 +  EGLDisplay egl_display_;
185 +  EGLImageKHR egl_image_;
186 +
187 +  DISALLOW_COPY_AND_ASSIGN(TFPPicture);
188 +};
189 +
190 +VaapiVideoDecodeAccelerator::TFPPicture::TFPPicture(
191 +    const base::Callback<bool(void)>& make_context_current,
192 +    Display* x_display,
193 +    int32 picture_buffer_id,
194 +    uint32 texture_id,
195 +    gfx::Size size)
196 +    : make_context_current_(make_context_current),
197 +      x_display_(x_display),
198 +      picture_buffer_id_(picture_buffer_id),
199 +      texture_id_(texture_id),
200 +      size_(size),
201 +      x_pixmap_(0),
202 +      egl_image_(0) {
203 +  DCHECK(!make_context_current_.is_null());
204 +};
205 +
206 +linked_ptr<VaapiVideoDecodeAccelerator::TFPPicture>
207 +VaapiVideoDecodeAccelerator::TFPPicture::Create(
208 +    const base::Callback<bool(void)>& make_context_current,
209 +    EGLDisplay egl_display,
210 +    Display* x_display,
211 +    int32 picture_buffer_id,
212 +    uint32 texture_id,
213 +    gfx::Size size) {
214 +
215 +  linked_ptr<TFPPicture> tfp_picture(
216 +    new TFPPicture(make_context_current, x_display,
217 +                     picture_buffer_id, texture_id, size));
218 +
219 +  if (!tfp_picture->Initialize(egl_display))
220 +    tfp_picture.reset();
221 +
222 +  return tfp_picture;
223 +}
224 +
225 +bool VaapiVideoDecodeAccelerator::TFPPicture::Initialize(
226 +    EGLDisplay egl_display) {
227 +  // Check for NULL prevents unittests from crashing on nonexistent ChildThread.
228 +  DCHECK(ChildThread::current() == NULL ||
229 +      ChildThread::current()->message_loop() == base::MessageLoop::current());
230 +
231 +  if (!make_context_current_.Run())
232 +    return false;
233 +
234 +  XWindowAttributes win_attr;
235 +  int screen = DefaultScreen(x_display_);
236 +  XGetWindowAttributes(x_display_, RootWindow(x_display_, screen), &win_attr);
237 +  //TODO(posciak): pass the depth required by libva, not the RootWindow's depth
238 +  x_pixmap_ = XCreatePixmap(x_display_, RootWindow(x_display_, screen),
239 +                            size_.width(), size_.height(), win_attr.depth);
240 +  if (!x_pixmap_) {
241 +    DVLOG(1) << "Failed creating an X Pixmap for TFP";
242 +    return false;
243 +  }
244 +
245 +  egl_display_ = egl_display;
246 +  EGLint image_attrs[] = { EGL_IMAGE_PRESERVED_KHR, 1 , EGL_NONE };
247 +
248 +  egl_image_ = eglCreateImageKHR(egl_display_,
249 +                                EGL_NO_CONTEXT,
250 +                                EGL_NATIVE_PIXMAP_KHR,
251 +                                (EGLClientBuffer)x_pixmap_,
252 +                                image_attrs);
253 +  if (!egl_image_) {
254 +    DVLOG(1) << "Failed creating a EGLImage from Pixmap for KHR";
255 +    return false;
256 +  }
257 +
258 +  return true;
259 +}
260 +VaapiVideoDecodeAccelerator::TFPPicture::~TFPPicture() {
261 +  // Check for NULL prevents unittests from crashing on non-existing ChildThread.
262 +  DCHECK(ChildThread::current() == NULL ||
263 +      ChildThread::current()->message_loop() == base::MessageLoop::current());
264 +
265 +  // Unbind surface from texture and deallocate resources.
266 +  if (make_context_current_.Run()) {
267 +      eglDestroyImageKHR(egl_display_, egl_image_);
268 +  }
269 +
270 +  if (x_pixmap_)
271 +    XFreePixmap(x_display_, x_pixmap_);
272 +  XSync(x_display_, False);  // Needed to work around buggy vdpau-driver.
273 +}
274 +
275 +bool VaapiVideoDecodeAccelerator::TFPPicture::Bind() {
276 +  DCHECK(x_pixmap_);
277 +  DCHECK(egl_image_);
278 +
279 +  // Check for NULL prevents unittests from crashing on nonexistent ChildThread.
280 +  DCHECK(ChildThread::current() == NULL ||
281 +      ChildThread::current()->message_loop() == base::MessageLoop::current());
282 +
283 +  if (!make_context_current_.Run())
284 +    return false;
285 +
286 +  gfx::ScopedTextureBinder texture_binder(GL_TEXTURE_2D, texture_id_);
287 +  glEGLImageTargetTexture2DOES(GL_TEXTURE_2D, egl_image_);
288 +
289 +  return true;
290 +}
291 +
292 +VaapiVideoDecodeAccelerator::TFPPicture*
293 +    VaapiVideoDecodeAccelerator::TFPPictureById(int32 picture_buffer_id) {
294 +  TFPPictures::iterator it = tfp_pictures_.find(picture_buffer_id);
295 +  if (it == tfp_pictures_.end()) {
296 +    DVLOG(1) << "Picture id " << picture_buffer_id << " does not exist";
297 +    return NULL;
298 +  }
299 +
300 +  return it->second.get();
301 +}
302 +
303 +VaapiVideoDecodeAccelerator::VaapiVideoDecodeAccelerator(
304 +    EGLDisplay egl_display, EGLContext egl_context,
305 +    Client* client,
306 +    const base::Callback<bool(void)>& make_context_current)
307 +    : x_display_(0),
308 +      egl_display_(egl_display),
309 +      egl_context_(egl_context),
310 +      make_context_current_(make_context_current),
311 +      state_(kUninitialized),
312 +      input_ready_(&lock_),
313 +      surfaces_available_(&lock_),
314 +      message_loop_(base::MessageLoop::current()),
315 +      weak_this_(base::AsWeakPtr(this)),
316 +      client_ptr_factory_(client),
317 +      client_(client_ptr_factory_.GetWeakPtr()),
318 +      decoder_thread_("VaapiDecoderThread"),
319 +      num_frames_at_client_(0),
320 +      num_stream_bufs_at_decoder_(0),
321 +      finish_flush_pending_(false),
322 +      awaiting_va_surfaces_recycle_(false),
323 +      requested_num_pics_(0) {
324 +  DCHECK(client);
325 +}
326 +VaapiVideoDecodeAccelerator::~VaapiVideoDecodeAccelerator() {
327 +  DCHECK_EQ(message_loop_, base::MessageLoop::current());
328 +}
329 +
330 +class ScopedPtrXFree {
331 + public:
332 +  void operator()(void* x) const {
333 +    ::XFree(x);
334 +  }
335 +};
336 +
337 +bool VaapiVideoDecodeAccelerator::Initialize(
338 +    media::VideoCodecProfile profile) {
339 +  DCHECK_EQ(message_loop_, base::MessageLoop::current());
340 +
341 +  base::AutoLock auto_lock(lock_);
342 +  DCHECK_EQ(state_, kUninitialized);
343 +  DVLOG(2) << "Initializing VAVDA, profile: " << profile;
344 +
345 +  if (!make_context_current_.Run())
346 +    return false;
347 +
348 +  x_display_ = base::MessagePumpForUI::GetDefaultXDisplay();
349 +
350 +  vaapi_wrapper_ = VaapiWrapper::Create(
351 +      profile, x_display_,
352 +      base::Bind(&ReportToUMA, content::VaapiH264Decoder::VAAPI_ERROR));
353 +
354 +  if (!vaapi_wrapper_.get()) {
355 +    DVLOG(1) << "Failed initializing VAAPI";
356 +    return false;
357 +  }
358 +
359 +  decoder_.reset(
360 +      new VaapiH264Decoder(
361 +          vaapi_wrapper_.get(),
362 +          media::BindToCurrentLoop(base::Bind(
363 +              &VaapiVideoDecodeAccelerator::SurfaceReady, weak_this_)),
364 +          base::Bind(&ReportToUMA)));
365 +
366 +  CHECK(decoder_thread_.Start());
367 +
368 +  state_ = kIdle;
369 +
370 +  message_loop_->PostTask(FROM_HERE, base::Bind(
371 +      &Client::NotifyInitializeDone, client_));
372 +  return true;
373 +}
374 +
375 +void VaapiVideoDecodeAccelerator::SurfaceReady(
376 +    int32 input_id,
377 +    const scoped_refptr<VASurface>& va_surface) {
378 +  DCHECK_EQ(message_loop_, base::MessageLoop::current());
379 +  DCHECK(!awaiting_va_surfaces_recycle_);
380 +
381 +  // Drop any requests to output if we are resetting or being destroyed.
382 +  if (state_ == kResetting || state_ == kDestroying)
383 +    return;
384 +
385 +  pending_output_cbs_.push(
386 +      base::Bind(&VaapiVideoDecodeAccelerator::OutputPicture,
387 +                 weak_this_, va_surface, input_id));
388 +
389 +  TryOutputSurface();
390 +}
391 +
392 +void VaapiVideoDecodeAccelerator::OutputPicture(
393 +    const scoped_refptr<VASurface>& va_surface,
394 +    int32 input_id,
395 +    TFPPicture* tfp_picture) {
396 +  DCHECK_EQ(message_loop_, base::MessageLoop::current());
397 +
398 +  int32 output_id  = tfp_picture->picture_buffer_id();
399 +
400 +  TRACE_EVENT2("Video Decoder", "VAVDA::OutputSurface",
401 +               "input_id", input_id,
402 +               "output_id", output_id);
403 +
404 +  DVLOG(3) << "Outputting VASurface " << va_surface->id()
405 +           << " into pixmap bound to picture buffer id " << output_id;
406 +
407 +  RETURN_AND_NOTIFY_ON_FAILURE(tfp_picture->Bind(),
408 +                               "Failed binding texture to pixmap",
409 +                               PLATFORM_FAILURE, );
410 +
411 +  RETURN_AND_NOTIFY_ON_FAILURE(
412 +      vaapi_wrapper_->PutSurfaceIntoPixmap(va_surface->id(),
413 +                                           tfp_picture->x_pixmap(),
414 +                                           tfp_picture->size()),
415 +      "Failed putting surface into pixmap", PLATFORM_FAILURE, );
416 +
417 +  // Notify the client a picture is ready to be displayed.
418 +  ++num_frames_at_client_;
419 +  TRACE_COUNTER1("Video Decoder", "Textures at client", num_frames_at_client_);
420 +  DVLOG(4) << "Notifying output picture id " << output_id
421 +           << " for input "<< input_id << " is ready";
422 +  client_->PictureReady(media::Picture(output_id, input_id));
423 +}
424 +
425 +void VaapiVideoDecodeAccelerator::TryOutputSurface() {
426 +  DCHECK_EQ(message_loop_, base::MessageLoop::current());
427 +
428 +  // Handle Destroy() arriving while pictures are queued for output.
429 +  if (!client_)
430 +    return;
431 +
432 +  if (pending_output_cbs_.empty() || output_buffers_.empty())
433 +    return;
434 +
435 +  OutputCB output_cb = pending_output_cbs_.front();
436 +  pending_output_cbs_.pop();
437 +
438 +  TFPPicture* tfp_picture = TFPPictureById(output_buffers_.front());
439 +  DCHECK(tfp_picture);
440 +  output_buffers_.pop();
441 +
442 +  output_cb.Run(tfp_picture);
443 +
444 +  if (finish_flush_pending_ && pending_output_cbs_.empty())
445 +    FinishFlush();
446 +}
447 +
448 +void VaapiVideoDecodeAccelerator::MapAndQueueNewInputBuffer(
449 +    const media::BitstreamBuffer& bitstream_buffer) {
450 +  DCHECK_EQ(message_loop_, base::MessageLoop::current());
451 +  TRACE_EVENT1("Video Decoder", "MapAndQueueNewInputBuffer", "input_id",
452 +      bitstream_buffer.id());
453 +
454 +  DVLOG(4) << "Mapping new input buffer id: " << bitstream_buffer.id()
455 +           << " size: " << (int)bitstream_buffer.size();
456 +
457 +  scoped_ptr<base::SharedMemory> shm(
458 +      new base::SharedMemory(bitstream_buffer.handle(), true));
459 +  RETURN_AND_NOTIFY_ON_FAILURE(shm->Map(bitstream_buffer.size()),
460 +                              "Failed to map input buffer", UNREADABLE_INPUT,);
461 +
462 +  base::AutoLock auto_lock(lock_);
463 +
464 +  // Set up a new input buffer and queue it for later.
465 +  linked_ptr<InputBuffer> input_buffer(new InputBuffer());
466 +  input_buffer->shm.reset(shm.release());
467 +  input_buffer->id = bitstream_buffer.id();
468 +  input_buffer->size = bitstream_buffer.size();
469 +
470 +  ++num_stream_bufs_at_decoder_;
471 +  TRACE_COUNTER1("Video Decoder", "Stream buffers at decoder",
472 +                 num_stream_bufs_at_decoder_);
473 +
474 +  input_buffers_.push(input_buffer);
475 +  input_ready_.Signal();
476 +}
477 +
478 +bool VaapiVideoDecodeAccelerator::GetInputBuffer_Locked() {
479 +  DCHECK_EQ(decoder_thread_.message_loop(), base::MessageLoop::current());
480 +  lock_.AssertAcquired();
481 +
482 +  if (curr_input_buffer_.get())
483 +    return true;
484 +
485 +  // Will only wait if it is expected that in current state new buffers will
486 +  // be queued from the client via Decode(). The state can change during wait.
487 +  while (input_buffers_.empty() && (state_ == kDecoding || state_ == kIdle)) {
488 +    input_ready_.Wait();
489 +  }
490 +
491 +  // We could have got woken up in a different state or never got to sleep
492 +  // due to current state; check for that.
493 +  switch (state_) {
494 +    case kFlushing:
495 +      // Here we are only interested in finishing up decoding buffers that are
496 +      // already queued up. Otherwise will stop decoding.
497 +      if (input_buffers_.empty())
498 +        return false;
499 +      // else fallthrough
500 +    case kDecoding:
501 +    case kIdle:
502 +      DCHECK(!input_buffers_.empty());
503 +
504 +      curr_input_buffer_ = input_buffers_.front();
505 +      input_buffers_.pop();
506 +
507 +      DVLOG(4) << "New current bitstream buffer, id: "
508 +               << curr_input_buffer_->id
509 +               << " size: " << curr_input_buffer_->size;
510 +
511 +      decoder_->SetStream(
512 +          static_cast<uint8*>(curr_input_buffer_->shm->memory()),
513 +          curr_input_buffer_->size, curr_input_buffer_->id);
514 +      return true;
515 +
516 +    default:
517 +      // We got woken up due to being destroyed/reset, ignore any already
518 +      // queued inputs.
519 +      return false;
520 +  }
521 +}
522 +
523 +void VaapiVideoDecodeAccelerator::ReturnCurrInputBuffer_Locked() {
524 +  lock_.AssertAcquired();
525 +  DCHECK_EQ(decoder_thread_.message_loop(), base::MessageLoop::current());
526 +  DCHECK(curr_input_buffer_.get());
527 +
528 +  int32 id = curr_input_buffer_->id;
529 +  curr_input_buffer_.reset();
530 +  DVLOG(4) << "End of input buffer " << id;
531 +  message_loop_->PostTask(FROM_HERE, base::Bind(
532 +      &Client::NotifyEndOfBitstreamBuffer, client_, id));
533 +
534 +  --num_stream_bufs_at_decoder_;
535 +  TRACE_COUNTER1("Video Decoder", "Stream buffers at decoder",
536 +                 num_stream_bufs_at_decoder_);
537 +}
538 +
539 +bool VaapiVideoDecodeAccelerator::FeedDecoderWithOutputSurfaces_Locked() {
540 +  lock_.AssertAcquired();
541 +  DCHECK_EQ(decoder_thread_.message_loop(), base::MessageLoop::current());
542 +
543 +  while (available_va_surfaces_.empty() &&
544 +         (state_ == kDecoding || state_ == kFlushing || state_ == kIdle)) {
545 +    surfaces_available_.Wait();
546 +  }
547 +
548 +  if (state_ != kDecoding && state_ != kFlushing && state_ != kIdle)
549 +    return false;
550 +
551 +  VASurface::ReleaseCB va_surface_release_cb =
552 +      media::BindToCurrentLoop(base::Bind(
553 +          &VaapiVideoDecodeAccelerator::RecycleVASurfaceID, weak_this_));
554 +
555 +  while (!available_va_surfaces_.empty()) {
556 +    scoped_refptr<VASurface> va_surface(
557 +        new VASurface(available_va_surfaces_.front(), va_surface_release_cb));
558 +    available_va_surfaces_.pop_front();
559 +    decoder_->ReuseSurface(va_surface);
560 +  }
561 +
562 +  return true;
563 +}
564 +
565 +void VaapiVideoDecodeAccelerator::DecodeTask() {
566 +  DCHECK_EQ(decoder_thread_.message_loop(), base::MessageLoop::current());
567 +  TRACE_EVENT0("Video Decoder", "VAVDA::DecodeTask");
568 +  base::AutoLock auto_lock(lock_);
569 +
570 +  if (state_ != kDecoding)
571 +    return;
572 +
573 +  // Main decode task.
574 +  DVLOG(4) << "Decode task";
575 +
576 +  // Try to decode what stream data is (still) in the decoder until we run out
577 +  // of it.
578 +  while (GetInputBuffer_Locked()) {
579 +    DCHECK(curr_input_buffer_.get());
580 +
581 +    VaapiH264Decoder::DecResult res;
582 +    {
583 +      // We are OK releasing the lock here, as decoder never calls our methods
584 +      // directly and we will reacquire the lock before looking at state again.
585 +      // This is the main decode function of the decoder and while keeping
586 +      // the lock for its duration would be fine, it would defeat the purpose
587 +      // of having a separate decoder thread.
588 +      base::AutoUnlock auto_unlock(lock_);
589 +      res = decoder_->Decode();
590 +    }
591 +
592 +    switch (res) {
593 +      case VaapiH264Decoder::kAllocateNewSurfaces:
594 +        DVLOG(1) << "Decoder requesting a new set of surfaces";
595 +        message_loop_->PostTask(FROM_HERE, base::Bind(
596 +            &VaapiVideoDecodeAccelerator::InitiateSurfaceSetChange, weak_this_,
597 +                decoder_->GetRequiredNumOfPictures(),
598 +                decoder_->GetPicSize()));
599 +        // We'll get rescheduled once ProvidePictureBuffers() finishes.
600 +        return;
601 +
602 +      case VaapiH264Decoder::kRanOutOfStreamData:
603 +        ReturnCurrInputBuffer_Locked();
604 +        break;
605 +
606 +      case VaapiH264Decoder::kRanOutOfSurfaces:
607 +        // No more output buffers in the decoder, try getting more or go to
608 +        // sleep waiting for them.
609 +        if (!FeedDecoderWithOutputSurfaces_Locked())
610 +          return;
611 +
612 +        break;
613 +
614 +      case VaapiH264Decoder::kDecodeError:
615 +        RETURN_AND_NOTIFY_ON_FAILURE(false, "Error decoding stream",
616 +                                     PLATFORM_FAILURE, );
617 +        return;
618 +    }
619 +  }
620 +}
621 +
622 +void VaapiVideoDecodeAccelerator::InitiateSurfaceSetChange(size_t num_pics,
623 +                                                           gfx::Size size) {
624 +  DCHECK_EQ(message_loop_, base::MessageLoop::current());
625 +  DCHECK(!awaiting_va_surfaces_recycle_);
626 +
627 +  // At this point decoder has stopped running and has already posted onto our
628 +  // loop any remaining output request callbacks, which executed before we got
629 +  // here. Some of them might have been pended though, because we might not
630 +  // have had enough TFPictures to output surfaces to. Initiate a wait cycle,
631 +  // which will wait for client to return enough PictureBuffers to us, so that
632 +  // we can finish all pending output callbacks, releasing associated surfaces.
633 +  DVLOG(1) << "Initiating surface set change";
634 +  awaiting_va_surfaces_recycle_ = true;
635 +
636 +  requested_num_pics_ = num_pics;
637 +  requested_pic_size_ = size;
638 +
639 +  TryFinishSurfaceSetChange();
640 +}
641 +
642 +void VaapiVideoDecodeAccelerator::TryFinishSurfaceSetChange() {
643 +  DCHECK_EQ(message_loop_, base::MessageLoop::current());
644 +
645 +  if (!awaiting_va_surfaces_recycle_)
646 +    return;
647 +
648 +  if (!pending_output_cbs_.empty() ||
649 +      tfp_pictures_.size() != available_va_surfaces_.size()) {
650 +    // Either:
651 +    // 1. Not all pending pending output callbacks have been executed yet.
652 +    // Wait for the client to return enough pictures and retry later.
653 +    // 2. The above happened and all surface release callbacks have been posted
654 +    // as the result, but not all have executed yet. Post ourselves after them
655 +    // to let them release surfaces.
656 +    DVLOG(2) << "Awaiting pending output/surface release callbacks to finish";
657 +    message_loop_->PostTask(FROM_HERE, base::Bind(
658 +        &VaapiVideoDecodeAccelerator::TryFinishSurfaceSetChange, weak_this_));
659 +    return;
660 +  }
661 +
662 +  // All surfaces released, destroy them and dismiss all PictureBuffers.
663 +  awaiting_va_surfaces_recycle_ = false;
664 +  available_va_surfaces_.clear();
665 +  vaapi_wrapper_->DestroySurfaces();
666 +
667 +  for (TFPPictures::iterator iter = tfp_pictures_.begin();
668 +       iter != tfp_pictures_.end(); ++iter) {
669 +    DVLOG(2) << "Dismissing picture id: " << iter->first;
670 +    client_->DismissPictureBuffer(iter->first);
671 +  }
672 +  tfp_pictures_.clear();
673 +
674 +  // And ask for a new set as requested.
675 +  DVLOG(1) << "Requesting " << requested_num_pics_ << " pictures of size: "
676 +           << requested_pic_size_.ToString();
677 +
678 +  message_loop_->PostTask(FROM_HERE, base::Bind(
679 +      &Client::ProvidePictureBuffers, client_,
680 +      requested_num_pics_, requested_pic_size_, GL_TEXTURE_2D));
681 +}
682 +
683 +void VaapiVideoDecodeAccelerator::Decode(
684 +    const media::BitstreamBuffer& bitstream_buffer) {
685 +  DCHECK_EQ(message_loop_, base::MessageLoop::current());
686 +
687 +  TRACE_EVENT1("Video Decoder", "VAVDA::Decode", "Buffer id",
688 +               bitstream_buffer.id());
689 +
690 +  // We got a new input buffer from the client, map it and queue for later use.
691 +  MapAndQueueNewInputBuffer(bitstream_buffer);
692 +
693 +  base::AutoLock auto_lock(lock_);
694 +  switch (state_) {
695 +    case kIdle:
696 +      state_ = kDecoding;
697 +      decoder_thread_.message_loop()->PostTask(FROM_HERE, base::Bind(
698 +          &VaapiVideoDecodeAccelerator::DecodeTask,
699 +          base::Unretained(this)));
700 +      break;
701 +
702 +    case kDecoding:
703 +      // Decoder already running.
704 +    case kResetting:
705 +      // When resetting, allow accumulating bitstream buffers, so that
706 +      // the client can queue after-seek-buffers while we are finishing with
707 +      // the before-seek one.
708 +      break;
709 +
710 +    default:
711 +      RETURN_AND_NOTIFY_ON_FAILURE(false,
712 +          "Decode request from client in invalid state: " << state_,
713 +          PLATFORM_FAILURE, );
714 +      break;
715 +  }
716 +}
717 +
718 +void VaapiVideoDecodeAccelerator::RecycleVASurfaceID(
719 +    VASurfaceID va_surface_id) {
720 +  DCHECK_EQ(message_loop_, base::MessageLoop::current());
721 +  base::AutoLock auto_lock(lock_);
722 +
723 +  available_va_surfaces_.push_back(va_surface_id);
724 +  surfaces_available_.Signal();
725 +}
726 +
727 +void VaapiVideoDecodeAccelerator::AssignPictureBuffers(
728 +    const std::vector<media::PictureBuffer>& buffers) {
729 +  DCHECK_EQ(message_loop_, base::MessageLoop::current());
730 +
731 +  base::AutoLock auto_lock(lock_);
732 +  DCHECK(tfp_pictures_.empty());
733 +
734 +  while (!output_buffers_.empty())
735 +    output_buffers_.pop();
736 +
737 +  RETURN_AND_NOTIFY_ON_FAILURE(
738 +      buffers.size() == requested_num_pics_,
739 +      "Got an invalid number of picture buffers. (Got " << buffers.size()
740 +      << ", requested " << requested_num_pics_ << ")", INVALID_ARGUMENT, );
741 +  DCHECK(requested_pic_size_ == buffers[0].size());
742 +
743 +  std::vector<VASurfaceID> va_surface_ids;
744 +  RETURN_AND_NOTIFY_ON_FAILURE(
745 +      vaapi_wrapper_->CreateSurfaces(requested_pic_size_,
746 +                                     buffers.size(),
747 +                                     &va_surface_ids),
748 +      "Failed creating VA Surfaces", PLATFORM_FAILURE, );
749 +  DCHECK_EQ(va_surface_ids.size(), buffers.size());
750 +
751 +  for (size_t i = 0; i < buffers.size(); ++i) {
752 +    DVLOG(2) << "Assigning picture id: " << buffers[i].id()
753 +             << " to texture id: " << buffers[i].texture_id()
754 +             << " VASurfaceID: " << va_surface_ids[i];
755 +
756 +    linked_ptr<TFPPicture> tfp_picture(
757 +        TFPPicture::Create(make_context_current_, egl_display_, x_display_,
758 +                           buffers[i].id(), buffers[i].texture_id(),
759 +                           requested_pic_size_));
760 +
761 +    RETURN_AND_NOTIFY_ON_FAILURE(
762 +        tfp_picture.get(), "Failed assigning picture buffer to a texture.",
763 +        PLATFORM_FAILURE, );
764 +
765 +    bool inserted = tfp_pictures_.insert(std::make_pair(
766 +        buffers[i].id(), tfp_picture)).second;
767 +    DCHECK(inserted);
768 +
769 +    output_buffers_.push(buffers[i].id());
770 +    available_va_surfaces_.push_back(va_surface_ids[i]);
771 +    surfaces_available_.Signal();
772 +  }
773 +
774 +  state_ = kDecoding;
775 +  decoder_thread_.message_loop()->PostTask(FROM_HERE, base::Bind(
776 +      &VaapiVideoDecodeAccelerator::DecodeTask, base::Unretained(this)));
777 +}
778 +
779 +void VaapiVideoDecodeAccelerator::ReusePictureBuffer(int32 picture_buffer_id) {
780 +  DCHECK_EQ(message_loop_, base::MessageLoop::current());
781 +  TRACE_EVENT1("Video Decoder", "VAVDA::ReusePictureBuffer", "Picture id",
782 +               picture_buffer_id);
783 +
784 +  --num_frames_at_client_;
785 +  TRACE_COUNTER1("Video Decoder", "Textures at client", num_frames_at_client_);
786 +
787 +  output_buffers_.push(picture_buffer_id);
788 +  TryOutputSurface();
789 +}
790 +
791 +void VaapiVideoDecodeAccelerator::FlushTask() {
792 +  DCHECK_EQ(decoder_thread_.message_loop(), base::MessageLoop::current());
793 +  DVLOG(1) << "Flush task";
794 +
795 +  // First flush all the pictures that haven't been outputted, notifying the
796 +  // client to output them.
797 +  bool res = decoder_->Flush();
798 +  RETURN_AND_NOTIFY_ON_FAILURE(res, "Failed flushing the decoder.",
799 +                               PLATFORM_FAILURE, );
800 +
801 +  // Put the decoder in idle state, ready to resume.
802 +  decoder_->Reset();
803 +
804 +  message_loop_->PostTask(FROM_HERE, base::Bind(
805 +      &VaapiVideoDecodeAccelerator::FinishFlush, weak_this_));
806 +}
807 +
808 +void VaapiVideoDecodeAccelerator::Flush() {
809 +  DCHECK_EQ(message_loop_, base::MessageLoop::current());
810 +  DVLOG(1) << "Got flush request";
811 +
812 +  base::AutoLock auto_lock(lock_);
813 +  state_ = kFlushing;
814 +  // Queue a flush task after all existing decoding tasks to clean up.
815 +  decoder_thread_.message_loop()->PostTask(FROM_HERE, base::Bind(
816 +      &VaapiVideoDecodeAccelerator::FlushTask, base::Unretained(this)));
817 +
818 +  input_ready_.Signal();
819 +  surfaces_available_.Signal();
820 +}
821 +
822 +void VaapiVideoDecodeAccelerator::FinishFlush() {
823 +  DCHECK_EQ(message_loop_, base::MessageLoop::current());
824 +
825 +  finish_flush_pending_ = false;
826 +
827 +  base::AutoLock auto_lock(lock_);
828 +  if (state_ != kFlushing) {
829 +    DCHECK_EQ(state_, kDestroying);
830 +    return;  // We could've gotten destroyed already.
831 +  }
832 +
833 +  // Still waiting for textures from client to finish outputting all pending
834 +  // frames. Try again later.
835 +  if (!pending_output_cbs_.empty()) {
836 +    finish_flush_pending_ = true;
837 +    return;
838 +  }
839 +
840 +  state_ = kIdle;
841 +
842 +  message_loop_->PostTask(FROM_HERE, base::Bind(
843 +      &Client::NotifyFlushDone, client_));
844 +
845 +  DVLOG(1) << "Flush finished";
846 +}
847 +
848 +void VaapiVideoDecodeAccelerator::ResetTask() {
849 +  DCHECK_EQ(decoder_thread_.message_loop(), base::MessageLoop::current());
850 +  DVLOG(1) << "ResetTask";
851 +
852 +  // All the decoding tasks from before the reset request from client are done
853 +  // by now, as this task was scheduled after them and client is expected not
854 +  // to call Decode() after Reset() and before NotifyResetDone.
855 +  decoder_->Reset();
856 +
857 +  base::AutoLock auto_lock(lock_);
858 +
859 +  // Return current input buffer, if present.
860 +  if (curr_input_buffer_.get())
861 +    ReturnCurrInputBuffer_Locked();
862 +
863 +  // And let client know that we are done with reset.
864 +  message_loop_->PostTask(FROM_HERE, base::Bind(
865 +      &VaapiVideoDecodeAccelerator::FinishReset, weak_this_));
866 +}
867 +
868 +void VaapiVideoDecodeAccelerator::Reset() {
869 +  DCHECK_EQ(message_loop_, base::MessageLoop::current());
870 +  DVLOG(1) << "Got reset request";
871 +
872 +  // This will make any new decode tasks exit early.
873 +  base::AutoLock auto_lock(lock_);
874 +  state_ = kResetting;
875 +  finish_flush_pending_ = false;
876 +
877 +  // Drop all remaining input buffers, if present.
878 +  while (!input_buffers_.empty()) {
879 +    message_loop_->PostTask(FROM_HERE, base::Bind(
880 +        &Client::NotifyEndOfBitstreamBuffer, client_,
881 +        input_buffers_.front()->id));
882 +    input_buffers_.pop();
883 +  }
884 +
885 +  decoder_thread_.message_loop()->PostTask(FROM_HERE, base::Bind(
886 +      &VaapiVideoDecodeAccelerator::ResetTask, base::Unretained(this)));
887 +
888 +  input_ready_.Signal();
889 +  surfaces_available_.Signal();
890 +}
891 +
892 +void VaapiVideoDecodeAccelerator::FinishReset() {
893 +  DCHECK_EQ(message_loop_, base::MessageLoop::current());
894 +  DVLOG(1) << "FinishReset";
895 +  base::AutoLock auto_lock(lock_);
896 +
897 +  if (state_ != kResetting) {
898 +    DCHECK(state_ == kDestroying || state_ == kUninitialized) << state_;
899 +    return;  // We could've gotten destroyed already.
900 +  }
901 +
902 +  // Drop pending outputs.
903 +  while (!pending_output_cbs_.empty())
904 +    pending_output_cbs_.pop();
905 +
906 +  if (awaiting_va_surfaces_recycle_) {
907 +    // Decoder requested a new surface set while we were waiting for it to
908 +    // finish the last DecodeTask, running at the time of Reset().
909 +    // Let the surface set change finish first before resetting.
910 +    message_loop_->PostTask(FROM_HERE, base::Bind(
911 +        &VaapiVideoDecodeAccelerator::FinishReset, weak_this_));
912 +    return;
913 +  }
914 +
915 +  num_stream_bufs_at_decoder_ = 0;
916 +  state_ = kIdle;
917 +
918 +  message_loop_->PostTask(FROM_HERE, base::Bind(
919 +      &Client::NotifyResetDone, client_));
920 +
921 +  // The client might have given us new buffers via Decode() while we were
922 +  // resetting and might be waiting for our move, and not call Decode() anymore
923 +  // until we return something. Post a DecodeTask() so that we won't
924 +  // sleep forever waiting for Decode() in that case. Having two of them
925 +  // in the pipe is harmless, the additional one will return as soon as it sees
926 +  // that we are back in kDecoding state.
927 +  if (!input_buffers_.empty()) {
928 +    state_ = kDecoding;
929 +    decoder_thread_.message_loop()->PostTask(FROM_HERE, base::Bind(
930 +      &VaapiVideoDecodeAccelerator::DecodeTask,
931 +      base::Unretained(this)));
932 +  }
933 +
934 +  DVLOG(1) << "Reset finished";
935 +}
936 +
937 +void VaapiVideoDecodeAccelerator::Cleanup() {
938 +  DCHECK_EQ(message_loop_, base::MessageLoop::current());
939 +
940 +  if (state_ == kUninitialized || state_ == kDestroying)
941 +    return;
942 +
943 +  DVLOG(1) << "Destroying VAVDA";
944 +  base::AutoLock auto_lock(lock_);
945 +  state_ = kDestroying;
946 +
947 +  client_ptr_factory_.InvalidateWeakPtrs();
948 +
949 +  {
950 +    base::AutoUnlock auto_unlock(lock_);
951 +    // Post a dummy task to the decoder_thread_ to ensure it is drained.
952 +    base::WaitableEvent waiter(false, false);
953 +    decoder_thread_.message_loop()->PostTask(FROM_HERE, base::Bind(
954 +        &base::WaitableEvent::Signal, base::Unretained(&waiter)));
955 +    input_ready_.Signal();
956 +    surfaces_available_.Signal();
957 +    waiter.Wait();
958 +    decoder_thread_.Stop();
959 +  }
960 +
961 +  state_ = kUninitialized;
962 +}
963 +
964 +void VaapiVideoDecodeAccelerator::Destroy() {
965 +  DCHECK_EQ(message_loop_, base::MessageLoop::current());
966 +  Cleanup();
967 +  delete this;
968 +}
969 +
970 +}  // namespace content
971 diff --git a/content/common/gpu/media/vaapi_video_decode_accelerator_tizen.h b/content/common/gpu/media/vaapi_video_decode_accelerator_tizen.h
972 new file mode 100644
973 index 0000000..d41cf38
974 --- /dev/null
975 +++ b/content/common/gpu/media/vaapi_video_decode_accelerator_tizen.h
976 @@ -0,0 +1,273 @@
977 +// Copyright (c) 2013 Intel Corporation. All rights reserved.
978 +// Use of this source code is governed by a BSD-style license that can be
979 +// found in the LICENSE file.
980 +//
981 +// This file contains an implementation of VideoDecoderAccelerator
982 +// that utilizes hardware video decoder present on Intel CPUs for Tizen.
983 +
984 +#ifndef CONTENT_COMMON_GPU_MEDIA_VAAPI_VIDEO_DECODE_ACCELERATOR_H_
985 +#define CONTENT_COMMON_GPU_MEDIA_VAAPI_VIDEO_DECODE_ACCELERATOR_H_
986 +
987 +#include <map>
988 +#include <queue>
989 +#include <utility>
990 +#include <vector>
991 +
992 +#include "base/logging.h"
993 +#include "base/memory/linked_ptr.h"
994 +#include "base/memory/shared_memory.h"
995 +#include "base/memory/weak_ptr.h"
996 +#include "base/message_loop/message_loop.h"
997 +#include "base/synchronization/condition_variable.h"
998 +#include "base/synchronization/lock.h"
999 +#include "base/threading/non_thread_safe.h"
1000 +#include "base/threading/thread.h"
1001 +#include "content/common/content_export.h"
1002 +#include "content/common/gpu/media/vaapi_h264_decoder.h"
1003 +#include "content/common/gpu/media/vaapi_wrapper.h"
1004 +#include "content/common/gpu/media/video_decode_accelerator_impl.h"
1005 +#include "media/base/bitstream_buffer.h"
1006 +#include "media/video/picture.h"
1007 +#include "media/video/video_decode_accelerator.h"
1008 +#include "ui/gl/gl_bindings.h"
1009 +
1010 +namespace content {
1011 +
1012 +// Class to provide video decode acceleration for Intel systems with hardware
1013 +// support for it, and on which libva is available.
1014 +// Decoding tasks are performed in a separate decoding thread.
1015 +//
1016 +// Threading/life-cycle: this object is created & destroyed on the GPU
1017 +// ChildThread.  A few methods on it are called on the decoder thread which is
1018 +// stopped during |this->Destroy()|, so any tasks posted to the decoder thread
1019 +// can assume |*this| is still alive.  See |weak_this_| below for more details.
1020 +class CONTENT_EXPORT VaapiVideoDecodeAccelerator
1021 +    : public VideoDecodeAcceleratorImpl {
1022 + public:
1023 +  VaapiVideoDecodeAccelerator(
1024 +      EGLDisplay egl_display, EGLContext egl_context,
1025 +      Client* client,
1026 +      const base::Callback<bool(void)>& make_context_current);
1027 +  virtual ~VaapiVideoDecodeAccelerator();
1028 +
1029 +  // media::VideoDecodeAccelerator implementation.
1030 +  virtual bool Initialize(media::VideoCodecProfile profile) OVERRIDE;
1031 +  virtual void Decode(const media::BitstreamBuffer& bitstream_buffer) OVERRIDE;
1032 +  virtual void AssignPictureBuffers(
1033 +      const std::vector<media::PictureBuffer>& buffers) OVERRIDE;
1034 +  virtual void ReusePictureBuffer(int32 picture_buffer_id) OVERRIDE;
1035 +  virtual void Flush() OVERRIDE;
1036 +  virtual void Reset() OVERRIDE;
1037 +  virtual void Destroy() OVERRIDE;
1038 +
1039 +private:
1040 +  // Notify the client that |output_id| is ready for displaying.
1041 +  void NotifyPictureReady(int32 input_id, int32 output_id);
1042 +
1043 +  // Notify the client that an error has occurred and decoding cannot continue.
1044 +  void NotifyError(Error error);
1045 +
1046 +  // Map the received input buffer into this process' address space and
1047 +  // queue it for decode.
1048 +  void MapAndQueueNewInputBuffer(
1049 +      const media::BitstreamBuffer& bitstream_buffer);
1050 +
1051 +  // Get a new input buffer from the queue and set it up in decoder. This will
1052 +  // sleep if no input buffers are available. Return true if a new buffer has
1053 +  // been set up, false if an early exit has been requested (due to initiated
1054 +  // reset/flush/destroy).
1055 +  bool GetInputBuffer_Locked();
1056 +
1057 +  // Signal the client that the current buffer has been read and can be
1058 +  // returned. Will also release the mapping.
1059 +  void ReturnCurrInputBuffer_Locked();
1060 +
1061 +  // Pass one or more output buffers to the decoder. This will sleep
1062 +  // if no buffers are available. Return true if buffers have been set up or
1063 +  // false if an early exit has been requested (due to initiated
1064 +  // reset/flush/destroy).
1065 +  bool FeedDecoderWithOutputSurfaces_Locked();
1066 +
1067 +  // Continue decoding given input buffers and sleep waiting for input/output
1068 +  // as needed. Will exit if a new set of surfaces or reset/flush/destroy
1069 +  // is requested.
1070 +  void DecodeTask();
1071 +
1072 +  // Scheduled after receiving a flush request and executed after the current
1073 +  // decoding task finishes decoding pending inputs. Makes the decoder return
1074 +  // all remaining output pictures and puts it in an idle state, ready
1075 +  // to resume if needed and schedules a FinishFlush.
1076 +  void FlushTask();
1077 +
1078 +  // Scheduled by the FlushTask after decoder is flushed to put VAVDA into idle
1079 +  // state and notify the client that flushing has been finished.
1080 +  void FinishFlush();
1081 +
1082 +  // Scheduled after receiving a reset request and executed after the current
1083 +  // decoding task finishes decoding the current frame. Puts the decoder into
1084 +  // an idle state, ready to resume if needed, discarding decoded but not yet
1085 +  // outputted pictures (decoder keeps ownership of their associated picture
1086 +  // buffers). Schedules a FinishReset afterwards.
1087 +  void ResetTask();
1088 +
1089 +  // Scheduled by ResetTask after it's done putting VAVDA into an idle state.
1090 +  // Drops remaining input buffers and notifies the client that reset has been
1091 +  // finished.
1092 +  void FinishReset();
1093 +
1094 +  // Helper for Destroy(), doing all the actual work except for deleting self.
1095 +  void Cleanup();
1096 +
1097 +  // Get a usable framebuffer configuration for use in binding textures
1098 +  // or return false on failure.
1099 +  bool InitializeFBConfig();
1100 +
1101 +  // Callback for the decoder to execute when it wants us to output given
1102 +  // |va_surface|.
1103 +  void SurfaceReady(int32 input_id, const scoped_refptr<VASurface>& va_surface);
1104 +
1105 +  // Represents a texture bound to an X Pixmap for output purposes.
1106 +  class TFPPicture;
1107 +
1108 +  // Callback to be executed once we have a |va_surface| to be output and
1109 +  // an available |tfp_picture| to use for output.
1110 +  // Puts contents of |va_surface| into given |tfp_picture|, releases the
1111 +  // surface and passes the resulting picture to client for output.
1112 +  void OutputPicture(const scoped_refptr<VASurface>& va_surface,
1113 +                     int32 input_id,
1114 +                     TFPPicture* tfp_picture);
1115 +
1116 +  // Try to OutputPicture() if we have both a ready surface and picture.
1117 +  void TryOutputSurface();
1118 +
1119 +  // Called when a VASurface is no longer in use by the decoder or is not being
1120 +  // synced/waiting to be synced to a picture. Returns it to available surfaces
1121 +  // pool.
1122 +  void RecycleVASurfaceID(VASurfaceID va_surface_id);
1123 +
1124 +  // Initiate wait cycle for surfaces to be released before we release them
1125 +  // and allocate new ones, as requested by the decoder.
1126 +  void InitiateSurfaceSetChange(size_t num_pics, gfx::Size size);
1127 +  // Check if the surfaces have been released or post ourselves for later.
1128 +  void TryFinishSurfaceSetChange();
1129 +
1130 +  // Client-provided X/EGL state.
1131 +  Display* x_display_;
1132 +  EGLDisplay egl_display_;
1133 +  EGLContext egl_context_;
1134 +  base::Callback<bool(void)> make_context_current_;
1135 +
1136 +  // VAVDA state.
1137 +  enum State {
1138 +    // Initialize() not called yet or failed.
1139 +    kUninitialized,
1140 +    // DecodeTask running.
1141 +    kDecoding,
1142 +    // Resetting, waiting for decoder to finish current task and cleanup.
1143 +    kResetting,
1144 +    // Flushing, waiting for decoder to finish current task and cleanup.
1145 +    kFlushing,
1146 +    // Idle, decoder in state ready to start/resume decoding.
1147 +    kIdle,
1148 +    // Destroying, waiting for the decoder to finish current task.
1149 +    kDestroying,
1150 +  };
1151 +
1152 +  // Protects input buffer and surface queues and state_.
1153 +  base::Lock lock_;
1154 +  State state_;
1155 +
1156 +  // An input buffer awaiting consumption, provided by the client.
1157 +  struct InputBuffer {
1158 +    InputBuffer();
1159 +    ~InputBuffer();
1160 +
1161 +    int32 id;
1162 +    size_t size;
1163 +    scoped_ptr<base::SharedMemory> shm;
1164 +  };
1165 +
1166 +  // Queue for incoming input buffers.
1167 +  typedef std::queue<linked_ptr<InputBuffer> > InputBuffers;
1168 +  InputBuffers input_buffers_;
1169 +  // Signalled when input buffers are queued onto the input_buffers_ queue.
1170 +  base::ConditionVariable input_ready_;
1171 +
1172 +  // Current input buffer at decoder.
1173 +  linked_ptr<InputBuffer> curr_input_buffer_;
1174 +
1175 +  // Queue for incoming output buffers (texture ids).
1176 +  typedef std::queue<int32> OutputBuffers;
1177 +  OutputBuffers output_buffers_;
1178 +
1179 +  typedef std::map<int32, linked_ptr<TFPPicture> > TFPPictures;
1180 +  // All allocated TFPPictures, regardless of their current state. TFPPictures
1181 +  // are allocated once and destroyed at the end of decode.
1182 +  TFPPictures tfp_pictures_;
1183 +
1184 +  // Return a TFPPicture associated with given client-provided id.
1185 +  TFPPicture* TFPPictureById(int32 picture_buffer_id);
1186 +
1187 +  // VA Surfaces no longer in use that can be passed back to the decoder for
1188 +  // reuse, once it requests them.
1189 +  std::list<VASurfaceID> available_va_surfaces_;
1190 +  // Signalled when output surfaces are queued onto the available_va_surfaces_
1191 +  // queue.
1192 +  base::ConditionVariable surfaces_available_;
1193 +
1194 +  // Pending output requests from the decoder. When it indicates that we should
1195 +  // output a surface and we have an available TFPPicture (i.e. texture) ready
1196 +  // to use, we'll execute the callback passing the TFPPicture. The callback
1197 +  // will put the contents of the surface into the picture and return it to
1198 +  // the client, releasing the surface as well.
1199 +  // If we don't have any available TFPPictures at the time when the decoder
1200 +  // requests output, we'll store the request on pending_output_cbs_ queue for
1201 +  // later and run it once the client gives us more textures
1202 +  // via ReusePictureBuffer().
1203 +  typedef base::Callback<void(TFPPicture*)> OutputCB;
1204 +  std::queue<OutputCB> pending_output_cbs_;
1205 +
1206 +  // ChildThread's message loop
1207 +  base::MessageLoop* message_loop_;
1208 +
1209 +  // WeakPtr<> pointing to |this| for use in posting tasks from the decoder
1210 +  // thread back to the ChildThread.  Because the decoder thread is a member of
1211 +  // this class, any task running on the decoder thread is guaranteed that this
1212 +  // object is still alive.  As a result, tasks posted from ChildThread to
1213 +  // decoder thread should use base::Unretained(this), and tasks posted from the
1214 +  // decoder thread to the ChildThread should use |weak_this_|.
1215 +  base::WeakPtr<VaapiVideoDecodeAccelerator> weak_this_;
1216 +
1217 +  // To expose client callbacks from VideoDecodeAccelerator.
1218 +  // NOTE: all calls to these objects *MUST* be executed on message_loop_.
1219 +  base::WeakPtrFactory<Client> client_ptr_factory_;
1220 +  base::WeakPtr<Client> client_;
1221 +
1222 +  scoped_ptr<VaapiWrapper> vaapi_wrapper_;
1223 +
1224 +  // Comes after vaapi_wrapper_ to ensure its destructor is executed before
1225 +  // vaapi_wrapper_ is destroyed.
1226 +  scoped_ptr<VaapiH264Decoder> decoder_;
1227 +  base::Thread decoder_thread_;
1228 +
1229 +  int num_frames_at_client_;
1230 +  int num_stream_bufs_at_decoder_;
1231 +
1232 +  // Whether we are waiting for any pending_output_cbs_ to be run before
1233 +  // NotifyingFlushDone.
1234 +  bool finish_flush_pending_;
1235 +
1236 +  // Decoder requested a new surface set and we are waiting for all the surfaces
1237 +  // to be returned before we can free them.
1238 +  bool awaiting_va_surfaces_recycle_;
1239 +
1240 +  // Last requested number/resolution of output picture buffers.
1241 +  size_t requested_num_pics_;
1242 +  gfx::Size requested_pic_size_;
1243 +
1244 +  DISALLOW_COPY_AND_ASSIGN(VaapiVideoDecodeAccelerator);
1245 +};
1246 +
1247 +}  // namespace content
1248 +
1249 +#endif  // CONTENT_COMMON_GPU_MEDIA_VAAPI_VIDEO_DECODE_ACCELERATOR_H_
1250 diff --git a/content/content_common.gypi b/content/content_common.gypi
1251 index 9d6cb61..3f53dd5 100644
1252 --- a/content/content_common.gypi
1253 +++ b/content/content_common.gypi
1254 @@ -583,6 +583,32 @@
1255          '<(DEPTH)/third_party/libva',
1256        ],
1257      }],
1258 +    ['target_arch != "arm" and tizen_mobile == 1 and use_x11 == 1', {
1259 +      'dependencies': [
1260 +        '../media/media.gyp:media',
1261 +      ],
1262 +      'sources': [
1263 +        'common/gpu/media/h264_dpb.cc',
1264 +        'common/gpu/media/h264_dpb.h',
1265 +        'common/gpu/media/va_surface.h',
1266 +        'common/gpu/media/vaapi_h264_decoder.cc',
1267 +        'common/gpu/media/vaapi_h264_decoder.h',
1268 +        'common/gpu/media/vaapi_video_decode_accelerator_tizen.cc',
1269 +        'common/gpu/media/vaapi_video_decode_accelerator_tizen.h',
1270 +        'common/gpu/media/vaapi_wrapper.cc',
1271 +        'common/gpu/media/vaapi_wrapper.h',
1272 +      ],
1273 +      'include_dirs': [
1274 +        '<(DEPTH)/third_party/libva',
1275 +        '<(DEPTH)/third_party/khronos',
1276 +      ],
1277 +      'link_settings': {
1278 +        'libraries': [
1279 +          '-lEGL',
1280 +          '-lGLESv2',
1281 +        ],
1282 +      },
1283 +    }],
1284      ['OS=="win"', {
1285        'dependencies': [
1286          '../media/media.gyp:media',
1287 diff --git a/content/gpu/gpu_main.cc b/content/gpu/gpu_main.cc
1288 index 9e29e03..2c04d40 100644
1289 --- a/content/gpu/gpu_main.cc
1290 +++ b/content/gpu/gpu_main.cc
1291 @@ -42,7 +42,8 @@
1292  #include "sandbox/win/src/sandbox.h"
1293  #elif defined(OS_CHROMEOS) && defined(ARCH_CPU_ARMEL) && defined(USE_X11)
1294  #include "content/common/gpu/media/exynos_video_decode_accelerator.h"
1295 -#elif defined(OS_CHROMEOS) && defined(ARCH_CPU_X86_FAMILY) && defined(USE_X11)
1296 +#elif (defined(OS_CHROMEOS) || defined(OS_TIZEN_MOBILE)) &&\
1297 + defined(ARCH_CPU_X86_FAMILY) && defined(USE_X11)
1298  #include "content/common/gpu/media/vaapi_wrapper.h"
1299  #endif
1300  
1301 @@ -360,7 +361,8 @@ bool WarmUpSandbox(const CommandLine& command_line) {
1302  
1303  #if defined(OS_CHROMEOS) && defined(ARCH_CPU_ARMEL) && defined(USE_X11)
1304    ExynosVideoDecodeAccelerator::PreSandboxInitialization();
1305 -#elif defined(OS_CHROMEOS) && defined(ARCH_CPU_X86_FAMILY) && defined(USE_X11)
1306 +#elif (defined(OS_CHROMEOS) || defined(OS_TIZEN_MOBILE)) &&\
1307 + defined(ARCH_CPU_X86_FAMILY) && defined(USE_X11)
1308    VaapiWrapper::PreSandboxInitialization();
1309  #endif
1310  
1311 -- 
1312 1.8.3.2
1313