--- /dev/null
+From c9f2fa16578bc20c83247e72608b5d6ca4dff6ba Mon Sep 17 00:00:00 2001
+From: "qing.zhang" <qing.zhang@intel.com>
+Date: Thu, 7 Nov 2013 08:59:38 -0500
+Subject: [PATCH] [Tizen] Enabling Hardware Acceleration with Libva and EGL in
+ VDA for Tizen Mobile within chromium v31+.
+
+Why we need to maintain it in our side:
+===========================================
+1) Upstream confirm VAVDA will continue to be restricted to
+ CrOS/X86 for dev & testing only and not for chromium road map.
+2) CrOS/X86 no plan to expend EGL backend which finalize in
+ June 2012 and be addressed to the CrOS graphics team.
+
+So, the upstream no plan to lerage VAVDA with EGL graphic
+ backend for any X86.
+
+3) The tizen-mobile's driver only support EGL as texture
+ backend. The video hw acceleration of xwalk have to
+ rely on EGL not GLX to bind decoded pixmap.
+===========================================
+That's why we enable specific EGL for VAVDA in tizen port.
+---
+ .../gpu/media/gpu_video_decode_accelerator.cc | 8 +
+ .../media/vaapi_video_decode_accelerator_tizen.cc | 908 ++++++++++++++++++++
+ .../media/vaapi_video_decode_accelerator_tizen.h | 273 ++++++
+ content/content_common.gypi | 26 +
+ content/gpu/gpu_main.cc | 6 +-
+ 5 files changed, 1219 insertions(+), 2 deletions(-)
+ create mode 100644 content/common/gpu/media/vaapi_video_decode_accelerator_tizen.cc
+ create mode 100644 content/common/gpu/media/vaapi_video_decode_accelerator_tizen.h
+
+diff --git a/content/common/gpu/media/gpu_video_decode_accelerator.cc b/content/common/gpu/media/gpu_video_decode_accelerator.cc
+index c5de2df..04f804a 100644
+--- a/content/common/gpu/media/gpu_video_decode_accelerator.cc
++++ b/content/common/gpu/media/gpu_video_decode_accelerator.cc
+@@ -29,6 +29,8 @@
+ #elif defined(OS_CHROMEOS) && defined(ARCH_CPU_X86_FAMILY) && defined(USE_X11)
+ #include "ui/gl/gl_context_glx.h"
+ #include "content/common/gpu/media/vaapi_video_decode_accelerator.h"
++#elif defined(OS_TIZEN_MOBILE) && defined(ARCH_CPU_X86_FAMILY)
++#include "content/common/gpu/media/vaapi_video_decode_accelerator_tizen.h"
+ #elif defined(OS_ANDROID)
+ #include "content/common/gpu/media/android_video_decode_accelerator.h"
+ #endif
+@@ -242,6 +244,12 @@ void GpuVideoDecodeAccelerator::Initialize(
+ video_decode_accelerator_.reset(new VaapiVideoDecodeAccelerator(
+ glx_context->display(), glx_context_handle, this,
+ make_context_current_));
++#elif defined(OS_TIZEN_MOBILE) && defined(ARCH_CPU_X86_FAMILY)
++ video_decode_accelerator_.reset(new VaapiVideoDecodeAccelerator(
++ gfx::GLSurfaceEGL::GetHardwareDisplay(),
++ stub_->decoder()->GetGLContext()->GetHandle(),
++ this,
++ make_context_current_));
+ #elif defined(OS_ANDROID)
+ video_decode_accelerator_.reset(new AndroidVideoDecodeAccelerator(
+ this,
+diff --git a/content/common/gpu/media/vaapi_video_decode_accelerator_tizen.cc b/content/common/gpu/media/vaapi_video_decode_accelerator_tizen.cc
+new file mode 100644
+index 0000000..8ca18b2
+--- /dev/null
++++ b/content/common/gpu/media/vaapi_video_decode_accelerator_tizen.cc
+@@ -0,0 +1,908 @@
++// Copyright (c) 2013 Intel Corporation. All rights reserved.
++// Use of this source code is governed by a BSD-style license that can be
++// found in the LICENSE file.
++
++#include "base/bind.h"
++#include "base/debug/trace_event.h"
++#include "base/logging.h"
++#include "base/metrics/histogram.h"
++#include "base/stl_util.h"
++#include "base/strings/string_util.h"
++#include "base/synchronization/waitable_event.h"
++#include "content/child/child_thread.h"
++#include "content/common/gpu/gpu_channel.h"
++#include "content/common/gpu/media/vaapi_video_decode_accelerator_tizen.h"
++#include "media/base/bind_to_loop.h"
++#include "media/video/picture.h"
++#include "ui/gl/gl_bindings.h"
++#include "ui/gl/scoped_binders.h"
++
++static void ReportToUMA(
++ content::VaapiH264Decoder::VAVDAH264DecoderFailure failure) {
++ UMA_HISTOGRAM_ENUMERATION(
++ "Media.VAVDAH264.DecoderFailure",
++ failure,
++ content::VaapiH264Decoder::VAVDA_H264_DECODER_FAILURES_MAX);
++}
++
++namespace content {
++
++#define RETURN_AND_NOTIFY_ON_FAILURE(result, log, error_code, ret) \
++ do { \
++ if (!(result)) { \
++ DVLOG(1) << log; \
++ NotifyError(error_code); \
++ return ret; \
++ } \
++ } while (0)
++
++VaapiVideoDecodeAccelerator::InputBuffer::InputBuffer() : id(0), size(0) {
++}
++
++VaapiVideoDecodeAccelerator::InputBuffer::~InputBuffer() {
++}
++
++void VaapiVideoDecodeAccelerator::NotifyError(Error error) {
++ if (message_loop_ != base::MessageLoop::current()) {
++ DCHECK_EQ(decoder_thread_.message_loop(), base::MessageLoop::current());
++ message_loop_->PostTask(FROM_HERE, base::Bind(
++ &VaapiVideoDecodeAccelerator::NotifyError, weak_this_, error));
++ return;
++ }
++
++ // Post Cleanup() as a task so we don't recursively acquire lock_.
++ message_loop_->PostTask(FROM_HERE, base::Bind(
++ &VaapiVideoDecodeAccelerator::Cleanup, weak_this_));
++
++ DVLOG(1) << "Notifying of error " << error;
++ if (client_) {
++ client_->NotifyError(error);
++ client_ptr_factory_.InvalidateWeakPtrs();
++ }
++}
++
++// TFPPicture allocates X Pixmaps and binds them to textures passed
++// in PictureBuffers from clients to them. TFPPictures are created as
++// a consequence of receiving a set of PictureBuffers from clients and released
++// at the end of decode (or when a new set of PictureBuffers is required).
++//
++// TFPPictures are used for output, contents of VASurfaces passed from decoder
++// are put into the associated pixmap memory and sent to client.
++class VaapiVideoDecodeAccelerator::TFPPicture {
++ public:
++ ~TFPPicture();
++
++ static linked_ptr<TFPPicture> Create(
++ const base::Callback<bool(void)>& make_context_current,
++ EGLDisplay egl_display,
++ Display* x_display,
++ int32 picture_buffer_id,
++ uint32 texture_id,
++ gfx::Size size);
++ int32 picture_buffer_id() {
++ return picture_buffer_id_;
++ }
++
++ uint32 texture_id() {
++ return texture_id_;
++ }
++
++ gfx::Size size() {
++ return size_;
++ }
++
++ int x_pixmap() {
++ return x_pixmap_;
++ }
++
++ // Bind texture to pixmap. Needs to be called every frame.
++ bool Bind();
++
++ private:
++ TFPPicture(const base::Callback<bool(void)>& make_context_current,
++ Display* x_display,
++ int32 picture_buffer_id,
++ uint32 texture_id,
++ gfx::Size size);
++
++ bool Initialize(EGLDisplay egl_display);
++
++ base::Callback<bool(void)> make_context_current_;
++
++ Display* x_display_;
++
++ // Output id for the client.
++ int32 picture_buffer_id_;
++ uint32 texture_id_;
++
++ gfx::Size size_;
++
++ // Pixmaps bound to this texture.
++ Pixmap x_pixmap_;
++ EGLDisplay egl_display_;
++ EGLImageKHR egl_image_;
++
++ DISALLOW_COPY_AND_ASSIGN(TFPPicture);
++};
++
++VaapiVideoDecodeAccelerator::TFPPicture::TFPPicture(
++ const base::Callback<bool(void)>& make_context_current,
++ Display* x_display,
++ int32 picture_buffer_id,
++ uint32 texture_id,
++ gfx::Size size)
++ : make_context_current_(make_context_current),
++ x_display_(x_display),
++ picture_buffer_id_(picture_buffer_id),
++ texture_id_(texture_id),
++ size_(size),
++ x_pixmap_(0),
++ egl_image_(0) {
++ DCHECK(!make_context_current_.is_null());
++};
++
++linked_ptr<VaapiVideoDecodeAccelerator::TFPPicture>
++VaapiVideoDecodeAccelerator::TFPPicture::Create(
++ const base::Callback<bool(void)>& make_context_current,
++ EGLDisplay egl_display,
++ Display* x_display,
++ int32 picture_buffer_id,
++ uint32 texture_id,
++ gfx::Size size) {
++
++ linked_ptr<TFPPicture> tfp_picture(
++ new TFPPicture(make_context_current, x_display,
++ picture_buffer_id, texture_id, size));
++
++ if (!tfp_picture->Initialize(egl_display))
++ tfp_picture.reset();
++
++ return tfp_picture;
++}
++
++bool VaapiVideoDecodeAccelerator::TFPPicture::Initialize(
++ EGLDisplay egl_display) {
++ // Check for NULL prevents unittests from crashing on nonexistent ChildThread.
++ DCHECK(ChildThread::current() == NULL ||
++ ChildThread::current()->message_loop() == base::MessageLoop::current());
++
++ if (!make_context_current_.Run())
++ return false;
++
++ XWindowAttributes win_attr;
++ int screen = DefaultScreen(x_display_);
++ XGetWindowAttributes(x_display_, RootWindow(x_display_, screen), &win_attr);
++ //TODO(posciak): pass the depth required by libva, not the RootWindow's depth
++ x_pixmap_ = XCreatePixmap(x_display_, RootWindow(x_display_, screen),
++ size_.width(), size_.height(), win_attr.depth);
++ if (!x_pixmap_) {
++ DVLOG(1) << "Failed creating an X Pixmap for TFP";
++ return false;
++ }
++
++ egl_display_ = egl_display;
++ EGLint image_attrs[] = { EGL_IMAGE_PRESERVED_KHR, 1 , EGL_NONE };
++
++ egl_image_ = eglCreateImageKHR(egl_display_,
++ EGL_NO_CONTEXT,
++ EGL_NATIVE_PIXMAP_KHR,
++ (EGLClientBuffer)x_pixmap_,
++ image_attrs);
++ if (!egl_image_) {
++ DVLOG(1) << "Failed creating a EGLImage from Pixmap for KHR";
++ return false;
++ }
++
++ return true;
++}
++VaapiVideoDecodeAccelerator::TFPPicture::~TFPPicture() {
++ // Check for NULL prevents unittests from crashing on non-existing ChildThread.
++ DCHECK(ChildThread::current() == NULL ||
++ ChildThread::current()->message_loop() == base::MessageLoop::current());
++
++ // Unbind surface from texture and deallocate resources.
++ if (make_context_current_.Run()) {
++ eglDestroyImageKHR(egl_display_, egl_image_);
++ }
++
++ if (x_pixmap_)
++ XFreePixmap(x_display_, x_pixmap_);
++ XSync(x_display_, False); // Needed to work around buggy vdpau-driver.
++}
++
++bool VaapiVideoDecodeAccelerator::TFPPicture::Bind() {
++ DCHECK(x_pixmap_);
++ DCHECK(egl_image_);
++
++ // Check for NULL prevents unittests from crashing on nonexistent ChildThread.
++ DCHECK(ChildThread::current() == NULL ||
++ ChildThread::current()->message_loop() == base::MessageLoop::current());
++
++ if (!make_context_current_.Run())
++ return false;
++
++ gfx::ScopedTextureBinder texture_binder(GL_TEXTURE_2D, texture_id_);
++ glEGLImageTargetTexture2DOES(GL_TEXTURE_2D, egl_image_);
++
++ return true;
++}
++
++VaapiVideoDecodeAccelerator::TFPPicture*
++ VaapiVideoDecodeAccelerator::TFPPictureById(int32 picture_buffer_id) {
++ TFPPictures::iterator it = tfp_pictures_.find(picture_buffer_id);
++ if (it == tfp_pictures_.end()) {
++ DVLOG(1) << "Picture id " << picture_buffer_id << " does not exist";
++ return NULL;
++ }
++
++ return it->second.get();
++}
++
++VaapiVideoDecodeAccelerator::VaapiVideoDecodeAccelerator(
++ EGLDisplay egl_display, EGLContext egl_context,
++ Client* client,
++ const base::Callback<bool(void)>& make_context_current)
++ : x_display_(0),
++ egl_display_(egl_display),
++ egl_context_(egl_context),
++ make_context_current_(make_context_current),
++ state_(kUninitialized),
++ input_ready_(&lock_),
++ surfaces_available_(&lock_),
++ message_loop_(base::MessageLoop::current()),
++ weak_this_(base::AsWeakPtr(this)),
++ client_ptr_factory_(client),
++ client_(client_ptr_factory_.GetWeakPtr()),
++ decoder_thread_("VaapiDecoderThread"),
++ num_frames_at_client_(0),
++ num_stream_bufs_at_decoder_(0),
++ finish_flush_pending_(false),
++ awaiting_va_surfaces_recycle_(false),
++ requested_num_pics_(0) {
++ DCHECK(client);
++}
++VaapiVideoDecodeAccelerator::~VaapiVideoDecodeAccelerator() {
++ DCHECK_EQ(message_loop_, base::MessageLoop::current());
++}
++
++class ScopedPtrXFree {
++ public:
++ void operator()(void* x) const {
++ ::XFree(x);
++ }
++};
++
++bool VaapiVideoDecodeAccelerator::Initialize(
++ media::VideoCodecProfile profile) {
++ DCHECK_EQ(message_loop_, base::MessageLoop::current());
++
++ base::AutoLock auto_lock(lock_);
++ DCHECK_EQ(state_, kUninitialized);
++ DVLOG(2) << "Initializing VAVDA, profile: " << profile;
++
++ if (!make_context_current_.Run())
++ return false;
++
++ x_display_ = base::MessagePumpForUI::GetDefaultXDisplay();
++
++ vaapi_wrapper_ = VaapiWrapper::Create(
++ profile, x_display_,
++ base::Bind(&ReportToUMA, content::VaapiH264Decoder::VAAPI_ERROR));
++
++ if (!vaapi_wrapper_.get()) {
++ DVLOG(1) << "Failed initializing VAAPI";
++ return false;
++ }
++
++ decoder_.reset(
++ new VaapiH264Decoder(
++ vaapi_wrapper_.get(),
++ media::BindToLoop(message_loop_->message_loop_proxy(), base::Bind(
++ &VaapiVideoDecodeAccelerator::SurfaceReady, weak_this_)),
++ base::Bind(&ReportToUMA)));
++
++ CHECK(decoder_thread_.Start());
++
++ state_ = kIdle;
++
++ message_loop_->PostTask(FROM_HERE, base::Bind(
++ &Client::NotifyInitializeDone, client_));
++ return true;
++}
++
++void VaapiVideoDecodeAccelerator::SurfaceReady(
++ int32 input_id,
++ const scoped_refptr<VASurface>& va_surface) {
++ DCHECK_EQ(message_loop_, base::MessageLoop::current());
++ DCHECK(!awaiting_va_surfaces_recycle_);
++
++ // Drop any requests to output if we are resetting or being destroyed.
++ if (state_ == kResetting || state_ == kDestroying)
++ return;
++
++ pending_output_cbs_.push(
++ base::Bind(&VaapiVideoDecodeAccelerator::OutputPicture,
++ weak_this_, va_surface, input_id));
++
++ TryOutputSurface();
++}
++
++void VaapiVideoDecodeAccelerator::OutputPicture(
++ const scoped_refptr<VASurface>& va_surface,
++ int32 input_id,
++ TFPPicture* tfp_picture) {
++ DCHECK_EQ(message_loop_, base::MessageLoop::current());
++
++ int32 output_id = tfp_picture->picture_buffer_id();
++
++ TRACE_EVENT2("Video Decoder", "VAVDA::OutputSurface",
++ "input_id", input_id,
++ "output_id", output_id);
++
++ DVLOG(3) << "Outputting VASurface " << va_surface->id()
++ << " into pixmap bound to picture buffer id " << output_id;
++
++ RETURN_AND_NOTIFY_ON_FAILURE(tfp_picture->Bind(),
++ "Failed binding texture to pixmap",
++ PLATFORM_FAILURE, );
++
++ RETURN_AND_NOTIFY_ON_FAILURE(
++ vaapi_wrapper_->PutSurfaceIntoPixmap(va_surface->id(),
++ tfp_picture->x_pixmap(),
++ tfp_picture->size()),
++ "Failed putting surface into pixmap", PLATFORM_FAILURE, );
++
++ // Notify the client a picture is ready to be displayed.
++ ++num_frames_at_client_;
++ TRACE_COUNTER1("Video Decoder", "Textures at client", num_frames_at_client_);
++ DVLOG(4) << "Notifying output picture id " << output_id
++ << " for input "<< input_id << " is ready";
++ client_->PictureReady(media::Picture(output_id, input_id));
++}
++
++void VaapiVideoDecodeAccelerator::TryOutputSurface() {
++ DCHECK_EQ(message_loop_, base::MessageLoop::current());
++
++ // Handle Destroy() arriving while pictures are queued for output.
++ if (!client_)
++ return;
++
++ if (pending_output_cbs_.empty() || output_buffers_.empty())
++ return;
++
++ OutputCB output_cb = pending_output_cbs_.front();
++ pending_output_cbs_.pop();
++
++ TFPPicture* tfp_picture = TFPPictureById(output_buffers_.front());
++ DCHECK(tfp_picture);
++ output_buffers_.pop();
++
++ output_cb.Run(tfp_picture);
++
++ if (finish_flush_pending_ && pending_output_cbs_.empty())
++ FinishFlush();
++}
++
++void VaapiVideoDecodeAccelerator::MapAndQueueNewInputBuffer(
++ const media::BitstreamBuffer& bitstream_buffer) {
++ DCHECK_EQ(message_loop_, base::MessageLoop::current());
++ TRACE_EVENT1("Video Decoder", "MapAndQueueNewInputBuffer", "input_id",
++ bitstream_buffer.id());
++
++ DVLOG(4) << "Mapping new input buffer id: " << bitstream_buffer.id()
++ << " size: " << (int)bitstream_buffer.size();
++
++ scoped_ptr<base::SharedMemory> shm(
++ new base::SharedMemory(bitstream_buffer.handle(), true));
++ RETURN_AND_NOTIFY_ON_FAILURE(shm->Map(bitstream_buffer.size()),
++ "Failed to map input buffer", UNREADABLE_INPUT,);
++
++ base::AutoLock auto_lock(lock_);
++
++ // Set up a new input buffer and queue it for later.
++ linked_ptr<InputBuffer> input_buffer(new InputBuffer());
++ input_buffer->shm.reset(shm.release());
++ input_buffer->id = bitstream_buffer.id();
++ input_buffer->size = bitstream_buffer.size();
++
++ ++num_stream_bufs_at_decoder_;
++ TRACE_COUNTER1("Video Decoder", "Stream buffers at decoder",
++ num_stream_bufs_at_decoder_);
++
++ input_buffers_.push(input_buffer);
++ input_ready_.Signal();
++}
++
++bool VaapiVideoDecodeAccelerator::GetInputBuffer_Locked() {
++ DCHECK_EQ(decoder_thread_.message_loop(), base::MessageLoop::current());
++ lock_.AssertAcquired();
++
++ if (curr_input_buffer_.get())
++ return true;
++
++ // Will only wait if it is expected that in current state new buffers will
++ // be queued from the client via Decode(). The state can change during wait.
++ while (input_buffers_.empty() && (state_ == kDecoding || state_ == kIdle)) {
++ input_ready_.Wait();
++ }
++
++ // We could have got woken up in a different state or never got to sleep
++ // due to current state; check for that.
++ switch (state_) {
++ case kFlushing:
++ // Here we are only interested in finishing up decoding buffers that are
++ // already queued up. Otherwise will stop decoding.
++ if (input_buffers_.empty())
++ return false;
++ // else fallthrough
++ case kDecoding:
++ case kIdle:
++ DCHECK(!input_buffers_.empty());
++
++ curr_input_buffer_ = input_buffers_.front();
++ input_buffers_.pop();
++
++ DVLOG(4) << "New current bitstream buffer, id: "
++ << curr_input_buffer_->id
++ << " size: " << curr_input_buffer_->size;
++
++ decoder_->SetStream(
++ static_cast<uint8*>(curr_input_buffer_->shm->memory()),
++ curr_input_buffer_->size, curr_input_buffer_->id);
++ return true;
++
++ default:
++ // We got woken up due to being destroyed/reset, ignore any already
++ // queued inputs.
++ return false;
++ }
++}
++
++void VaapiVideoDecodeAccelerator::ReturnCurrInputBuffer_Locked() {
++ lock_.AssertAcquired();
++ DCHECK_EQ(decoder_thread_.message_loop(), base::MessageLoop::current());
++ DCHECK(curr_input_buffer_.get());
++
++ int32 id = curr_input_buffer_->id;
++ curr_input_buffer_.reset();
++ DVLOG(4) << "End of input buffer " << id;
++ message_loop_->PostTask(FROM_HERE, base::Bind(
++ &Client::NotifyEndOfBitstreamBuffer, client_, id));
++
++ --num_stream_bufs_at_decoder_;
++ TRACE_COUNTER1("Video Decoder", "Stream buffers at decoder",
++ num_stream_bufs_at_decoder_);
++}
++
++bool VaapiVideoDecodeAccelerator::FeedDecoderWithOutputSurfaces_Locked() {
++ lock_.AssertAcquired();
++ DCHECK_EQ(decoder_thread_.message_loop(), base::MessageLoop::current());
++
++ while (available_va_surfaces_.empty() &&
++ (state_ == kDecoding || state_ == kFlushing || state_ == kIdle)) {
++ surfaces_available_.Wait();
++ }
++
++ if (state_ != kDecoding && state_ != kFlushing && state_ != kIdle)
++ return false;
++
++ VASurface::ReleaseCB va_surface_release_cb =
++ media::BindToLoop(message_loop_->message_loop_proxy(), base::Bind(
++ &VaapiVideoDecodeAccelerator::RecycleVASurfaceID, weak_this_));
++
++ while (!available_va_surfaces_.empty()) {
++ scoped_refptr<VASurface> va_surface(
++ new VASurface(available_va_surfaces_.front(), va_surface_release_cb));
++ available_va_surfaces_.pop_front();
++ decoder_->ReuseSurface(va_surface);
++ }
++
++ return true;
++}
++
++void VaapiVideoDecodeAccelerator::DecodeTask() {
++ DCHECK_EQ(decoder_thread_.message_loop(), base::MessageLoop::current());
++ TRACE_EVENT0("Video Decoder", "VAVDA::DecodeTask");
++ base::AutoLock auto_lock(lock_);
++
++ if (state_ != kDecoding)
++ return;
++
++ // Main decode task.
++ DVLOG(4) << "Decode task";
++
++ // Try to decode what stream data is (still) in the decoder until we run out
++ // of it.
++ while (GetInputBuffer_Locked()) {
++ DCHECK(curr_input_buffer_.get());
++
++ VaapiH264Decoder::DecResult res;
++ {
++ // We are OK releasing the lock here, as decoder never calls our methods
++ // directly and we will reacquire the lock before looking at state again.
++ // This is the main decode function of the decoder and while keeping
++ // the lock for its duration would be fine, it would defeat the purpose
++ // of having a separate decoder thread.
++ base::AutoUnlock auto_unlock(lock_);
++ res = decoder_->Decode();
++ }
++
++ switch (res) {
++ case VaapiH264Decoder::kAllocateNewSurfaces:
++ DVLOG(1) << "Decoder requesting a new set of surfaces";
++ message_loop_->PostTask(FROM_HERE, base::Bind(
++ &VaapiVideoDecodeAccelerator::InitiateSurfaceSetChange, weak_this_,
++ decoder_->GetRequiredNumOfPictures(),
++ decoder_->GetPicSize()));
++ // We'll get rescheduled once ProvidePictureBuffers() finishes.
++ return;
++
++ case VaapiH264Decoder::kRanOutOfStreamData:
++ ReturnCurrInputBuffer_Locked();
++ break;
++
++ case VaapiH264Decoder::kRanOutOfSurfaces:
++ // No more output buffers in the decoder, try getting more or go to
++ // sleep waiting for them.
++ if (!FeedDecoderWithOutputSurfaces_Locked())
++ return;
++
++ break;
++
++ case VaapiH264Decoder::kDecodeError:
++ RETURN_AND_NOTIFY_ON_FAILURE(false, "Error decoding stream",
++ PLATFORM_FAILURE, );
++ return;
++ }
++ }
++}
++
++void VaapiVideoDecodeAccelerator::InitiateSurfaceSetChange(size_t num_pics,
++ gfx::Size size) {
++ DCHECK_EQ(message_loop_, base::MessageLoop::current());
++ DCHECK(!awaiting_va_surfaces_recycle_);
++
++ // At this point decoder has stopped running and has already posted onto our
++ // loop any remaining output request callbacks, which executed before we got
++ // here. Some of them might have been pended though, because we might not
++ // have had enough TFPictures to output surfaces to. Initiate a wait cycle,
++ // which will wait for client to return enough PictureBuffers to us, so that
++ // we can finish all pending output callbacks, releasing associated surfaces.
++ DVLOG(1) << "Initiating surface set change";
++ awaiting_va_surfaces_recycle_ = true;
++
++ requested_num_pics_ = num_pics;
++ requested_pic_size_ = size;
++
++ TryFinishSurfaceSetChange();
++}
++
++void VaapiVideoDecodeAccelerator::TryFinishSurfaceSetChange() {
++ DCHECK_EQ(message_loop_, base::MessageLoop::current());
++
++ if (!awaiting_va_surfaces_recycle_)
++ return;
++
++ if (!pending_output_cbs_.empty() ||
++ tfp_pictures_.size() != available_va_surfaces_.size()) {
++ // Either:
++ // 1. Not all pending pending output callbacks have been executed yet.
++ // Wait for the client to return enough pictures and retry later.
++ // 2. The above happened and all surface release callbacks have been posted
++ // as the result, but not all have executed yet. Post ourselves after them
++ // to let them release surfaces.
++ DVLOG(2) << "Awaiting pending output/surface release callbacks to finish";
++ message_loop_->PostTask(FROM_HERE, base::Bind(
++ &VaapiVideoDecodeAccelerator::TryFinishSurfaceSetChange, weak_this_));
++ return;
++ }
++
++ // All surfaces released, destroy them and dismiss all PictureBuffers.
++ awaiting_va_surfaces_recycle_ = false;
++ available_va_surfaces_.clear();
++ vaapi_wrapper_->DestroySurfaces();
++
++ for (TFPPictures::iterator iter = tfp_pictures_.begin();
++ iter != tfp_pictures_.end(); ++iter) {
++ DVLOG(2) << "Dismissing picture id: " << iter->first;
++ client_->DismissPictureBuffer(iter->first);
++ }
++ tfp_pictures_.clear();
++
++ // And ask for a new set as requested.
++ DVLOG(1) << "Requesting " << requested_num_pics_ << " pictures of size: "
++ << requested_pic_size_.ToString();
++
++ message_loop_->PostTask(FROM_HERE, base::Bind(
++ &Client::ProvidePictureBuffers, client_,
++ requested_num_pics_, requested_pic_size_, GL_TEXTURE_2D));
++}
++
++void VaapiVideoDecodeAccelerator::Decode(
++ const media::BitstreamBuffer& bitstream_buffer) {
++ DCHECK_EQ(message_loop_, base::MessageLoop::current());
++
++ TRACE_EVENT1("Video Decoder", "VAVDA::Decode", "Buffer id",
++ bitstream_buffer.id());
++
++ // We got a new input buffer from the client, map it and queue for later use.
++ MapAndQueueNewInputBuffer(bitstream_buffer);
++
++ base::AutoLock auto_lock(lock_);
++ switch (state_) {
++ case kIdle:
++ state_ = kDecoding;
++ decoder_thread_.message_loop()->PostTask(FROM_HERE, base::Bind(
++ &VaapiVideoDecodeAccelerator::DecodeTask,
++ base::Unretained(this)));
++ break;
++
++ case kDecoding:
++ // Decoder already running.
++ case kResetting:
++ // When resetting, allow accumulating bitstream buffers, so that
++ // the client can queue after-seek-buffers while we are finishing with
++ // the before-seek one.
++ break;
++
++ default:
++ RETURN_AND_NOTIFY_ON_FAILURE(false,
++ "Decode request from client in invalid state: " << state_,
++ PLATFORM_FAILURE, );
++ break;
++ }
++}
++
++void VaapiVideoDecodeAccelerator::RecycleVASurfaceID(
++ VASurfaceID va_surface_id) {
++ DCHECK_EQ(message_loop_, base::MessageLoop::current());
++ base::AutoLock auto_lock(lock_);
++
++ available_va_surfaces_.push_back(va_surface_id);
++ surfaces_available_.Signal();
++}
++
++void VaapiVideoDecodeAccelerator::AssignPictureBuffers(
++ const std::vector<media::PictureBuffer>& buffers) {
++ DCHECK_EQ(message_loop_, base::MessageLoop::current());
++
++ base::AutoLock auto_lock(lock_);
++ DCHECK(tfp_pictures_.empty());
++
++ while (!output_buffers_.empty())
++ output_buffers_.pop();
++
++ RETURN_AND_NOTIFY_ON_FAILURE(
++ buffers.size() == requested_num_pics_,
++ "Got an invalid number of picture buffers. (Got " << buffers.size()
++ << ", requested " << requested_num_pics_ << ")", INVALID_ARGUMENT, );
++ DCHECK(requested_pic_size_ == buffers[0].size());
++
++ std::vector<VASurfaceID> va_surface_ids;
++ RETURN_AND_NOTIFY_ON_FAILURE(
++ vaapi_wrapper_->CreateSurfaces(requested_pic_size_,
++ buffers.size(),
++ &va_surface_ids),
++ "Failed creating VA Surfaces", PLATFORM_FAILURE, );
++ DCHECK_EQ(va_surface_ids.size(), buffers.size());
++
++ for (size_t i = 0; i < buffers.size(); ++i) {
++ DVLOG(2) << "Assigning picture id: " << buffers[i].id()
++ << " to texture id: " << buffers[i].texture_id()
++ << " VASurfaceID: " << va_surface_ids[i];
++
++ linked_ptr<TFPPicture> tfp_picture(
++ TFPPicture::Create(make_context_current_, egl_display_, x_display_,
++ buffers[i].id(), buffers[i].texture_id(),
++ requested_pic_size_));
++
++ RETURN_AND_NOTIFY_ON_FAILURE(
++ tfp_picture.get(), "Failed assigning picture buffer to a texture.",
++ PLATFORM_FAILURE, );
++
++ bool inserted = tfp_pictures_.insert(std::make_pair(
++ buffers[i].id(), tfp_picture)).second;
++ DCHECK(inserted);
++
++ output_buffers_.push(buffers[i].id());
++ available_va_surfaces_.push_back(va_surface_ids[i]);
++ surfaces_available_.Signal();
++ }
++
++ state_ = kDecoding;
++ decoder_thread_.message_loop()->PostTask(FROM_HERE, base::Bind(
++ &VaapiVideoDecodeAccelerator::DecodeTask, base::Unretained(this)));
++}
++
++void VaapiVideoDecodeAccelerator::ReusePictureBuffer(int32 picture_buffer_id) {
++ DCHECK_EQ(message_loop_, base::MessageLoop::current());
++ TRACE_EVENT1("Video Decoder", "VAVDA::ReusePictureBuffer", "Picture id",
++ picture_buffer_id);
++
++ --num_frames_at_client_;
++ TRACE_COUNTER1("Video Decoder", "Textures at client", num_frames_at_client_);
++
++ output_buffers_.push(picture_buffer_id);
++ TryOutputSurface();
++}
++
++void VaapiVideoDecodeAccelerator::FlushTask() {
++ DCHECK_EQ(decoder_thread_.message_loop(), base::MessageLoop::current());
++ DVLOG(1) << "Flush task";
++
++ // First flush all the pictures that haven't been outputted, notifying the
++ // client to output them.
++ bool res = decoder_->Flush();
++ RETURN_AND_NOTIFY_ON_FAILURE(res, "Failed flushing the decoder.",
++ PLATFORM_FAILURE, );
++
++ // Put the decoder in idle state, ready to resume.
++ decoder_->Reset();
++
++ message_loop_->PostTask(FROM_HERE, base::Bind(
++ &VaapiVideoDecodeAccelerator::FinishFlush, weak_this_));
++}
++
++void VaapiVideoDecodeAccelerator::Flush() {
++ DCHECK_EQ(message_loop_, base::MessageLoop::current());
++ DVLOG(1) << "Got flush request";
++
++ base::AutoLock auto_lock(lock_);
++ state_ = kFlushing;
++ // Queue a flush task after all existing decoding tasks to clean up.
++ decoder_thread_.message_loop()->PostTask(FROM_HERE, base::Bind(
++ &VaapiVideoDecodeAccelerator::FlushTask, base::Unretained(this)));
++
++ input_ready_.Signal();
++ surfaces_available_.Signal();
++}
++
++void VaapiVideoDecodeAccelerator::FinishFlush() {
++ DCHECK_EQ(message_loop_, base::MessageLoop::current());
++
++ finish_flush_pending_ = false;
++
++ base::AutoLock auto_lock(lock_);
++ if (state_ != kFlushing) {
++ DCHECK_EQ(state_, kDestroying);
++ return; // We could've gotten destroyed already.
++ }
++
++ // Still waiting for textures from client to finish outputting all pending
++ // frames. Try again later.
++ if (!pending_output_cbs_.empty()) {
++ finish_flush_pending_ = true;
++ return;
++ }
++
++ state_ = kIdle;
++
++ message_loop_->PostTask(FROM_HERE, base::Bind(
++ &Client::NotifyFlushDone, client_));
++
++ DVLOG(1) << "Flush finished";
++}
++
++void VaapiVideoDecodeAccelerator::ResetTask() {
++ DCHECK_EQ(decoder_thread_.message_loop(), base::MessageLoop::current());
++ DVLOG(1) << "ResetTask";
++
++ // All the decoding tasks from before the reset request from client are done
++ // by now, as this task was scheduled after them and client is expected not
++ // to call Decode() after Reset() and before NotifyResetDone.
++ decoder_->Reset();
++
++ base::AutoLock auto_lock(lock_);
++
++ // Return current input buffer, if present.
++ if (curr_input_buffer_.get())
++ ReturnCurrInputBuffer_Locked();
++
++ // And let client know that we are done with reset.
++ message_loop_->PostTask(FROM_HERE, base::Bind(
++ &VaapiVideoDecodeAccelerator::FinishReset, weak_this_));
++}
++
++void VaapiVideoDecodeAccelerator::Reset() {
++ DCHECK_EQ(message_loop_, base::MessageLoop::current());
++ DVLOG(1) << "Got reset request";
++
++ // This will make any new decode tasks exit early.
++ base::AutoLock auto_lock(lock_);
++ state_ = kResetting;
++ finish_flush_pending_ = false;
++
++ // Drop all remaining input buffers, if present.
++ while (!input_buffers_.empty()) {
++ message_loop_->PostTask(FROM_HERE, base::Bind(
++ &Client::NotifyEndOfBitstreamBuffer, client_,
++ input_buffers_.front()->id));
++ input_buffers_.pop();
++ }
++
++ decoder_thread_.message_loop()->PostTask(FROM_HERE, base::Bind(
++ &VaapiVideoDecodeAccelerator::ResetTask, base::Unretained(this)));
++
++ input_ready_.Signal();
++ surfaces_available_.Signal();
++}
++
++void VaapiVideoDecodeAccelerator::FinishReset() {
++ DCHECK_EQ(message_loop_, base::MessageLoop::current());
++ DVLOG(1) << "FinishReset";
++ base::AutoLock auto_lock(lock_);
++
++ if (state_ != kResetting) {
++ DCHECK(state_ == kDestroying || state_ == kUninitialized) << state_;
++ return; // We could've gotten destroyed already.
++ }
++
++ // Drop pending outputs.
++ while (!pending_output_cbs_.empty())
++ pending_output_cbs_.pop();
++
++ if (awaiting_va_surfaces_recycle_) {
++ // Decoder requested a new surface set while we were waiting for it to
++ // finish the last DecodeTask, running at the time of Reset().
++ // Let the surface set change finish first before resetting.
++ message_loop_->PostTask(FROM_HERE, base::Bind(
++ &VaapiVideoDecodeAccelerator::FinishReset, weak_this_));
++ return;
++ }
++
++ num_stream_bufs_at_decoder_ = 0;
++ state_ = kIdle;
++
++ message_loop_->PostTask(FROM_HERE, base::Bind(
++ &Client::NotifyResetDone, client_));
++
++ // The client might have given us new buffers via Decode() while we were
++ // resetting and might be waiting for our move, and not call Decode() anymore
++ // until we return something. Post a DecodeTask() so that we won't
++ // sleep forever waiting for Decode() in that case. Having two of them
++ // in the pipe is harmless, the additional one will return as soon as it sees
++ // that we are back in kDecoding state.
++ if (!input_buffers_.empty()) {
++ state_ = kDecoding;
++ decoder_thread_.message_loop()->PostTask(FROM_HERE, base::Bind(
++ &VaapiVideoDecodeAccelerator::DecodeTask,
++ base::Unretained(this)));
++ }
++
++ DVLOG(1) << "Reset finished";
++}
++
++void VaapiVideoDecodeAccelerator::Cleanup() {
++ DCHECK_EQ(message_loop_, base::MessageLoop::current());
++
++ if (state_ == kUninitialized || state_ == kDestroying)
++ return;
++
++ DVLOG(1) << "Destroying VAVDA";
++ base::AutoLock auto_lock(lock_);
++ state_ = kDestroying;
++
++ client_ptr_factory_.InvalidateWeakPtrs();
++
++ {
++ base::AutoUnlock auto_unlock(lock_);
++ // Post a dummy task to the decoder_thread_ to ensure it is drained.
++ base::WaitableEvent waiter(false, false);
++ decoder_thread_.message_loop()->PostTask(FROM_HERE, base::Bind(
++ &base::WaitableEvent::Signal, base::Unretained(&waiter)));
++ input_ready_.Signal();
++ surfaces_available_.Signal();
++ waiter.Wait();
++ decoder_thread_.Stop();
++ }
++
++ state_ = kUninitialized;
++}
++
++void VaapiVideoDecodeAccelerator::Destroy() {
++ DCHECK_EQ(message_loop_, base::MessageLoop::current());
++ Cleanup();
++ delete this;
++}
++
++} // namespace content
+diff --git a/content/common/gpu/media/vaapi_video_decode_accelerator_tizen.h b/content/common/gpu/media/vaapi_video_decode_accelerator_tizen.h
+new file mode 100644
+index 0000000..d41cf38
+--- /dev/null
++++ b/content/common/gpu/media/vaapi_video_decode_accelerator_tizen.h
+@@ -0,0 +1,273 @@
++// Copyright (c) 2013 Intel Corporation. All rights reserved.
++// Use of this source code is governed by a BSD-style license that can be
++// found in the LICENSE file.
++//
++// This file contains an implementation of VideoDecoderAccelerator
++// that utilizes hardware video decoder present on Intel CPUs for Tizen.
++
++#ifndef CONTENT_COMMON_GPU_MEDIA_VAAPI_VIDEO_DECODE_ACCELERATOR_H_
++#define CONTENT_COMMON_GPU_MEDIA_VAAPI_VIDEO_DECODE_ACCELERATOR_H_
++
++#include <map>
++#include <queue>
++#include <utility>
++#include <vector>
++
++#include "base/logging.h"
++#include "base/memory/linked_ptr.h"
++#include "base/memory/shared_memory.h"
++#include "base/memory/weak_ptr.h"
++#include "base/message_loop/message_loop.h"
++#include "base/synchronization/condition_variable.h"
++#include "base/synchronization/lock.h"
++#include "base/threading/non_thread_safe.h"
++#include "base/threading/thread.h"
++#include "content/common/content_export.h"
++#include "content/common/gpu/media/vaapi_h264_decoder.h"
++#include "content/common/gpu/media/vaapi_wrapper.h"
++#include "content/common/gpu/media/video_decode_accelerator_impl.h"
++#include "media/base/bitstream_buffer.h"
++#include "media/video/picture.h"
++#include "media/video/video_decode_accelerator.h"
++#include "ui/gl/gl_bindings.h"
++
++namespace content {
++
++// Class to provide video decode acceleration for Intel systems with hardware
++// support for it, and on which libva is available.
++// Decoding tasks are performed in a separate decoding thread.
++//
++// Threading/life-cycle: this object is created & destroyed on the GPU
++// ChildThread. A few methods on it are called on the decoder thread which is
++// stopped during |this->Destroy()|, so any tasks posted to the decoder thread
++// can assume |*this| is still alive. See |weak_this_| below for more details.
++class CONTENT_EXPORT VaapiVideoDecodeAccelerator
++ : public VideoDecodeAcceleratorImpl {
++ public:
++ VaapiVideoDecodeAccelerator(
++ EGLDisplay egl_display, EGLContext egl_context,
++ Client* client,
++ const base::Callback<bool(void)>& make_context_current);
++ virtual ~VaapiVideoDecodeAccelerator();
++
++ // media::VideoDecodeAccelerator implementation.
++ virtual bool Initialize(media::VideoCodecProfile profile) OVERRIDE;
++ virtual void Decode(const media::BitstreamBuffer& bitstream_buffer) OVERRIDE;
++ virtual void AssignPictureBuffers(
++ const std::vector<media::PictureBuffer>& buffers) OVERRIDE;
++ virtual void ReusePictureBuffer(int32 picture_buffer_id) OVERRIDE;
++ virtual void Flush() OVERRIDE;
++ virtual void Reset() OVERRIDE;
++ virtual void Destroy() OVERRIDE;
++
++private:
++ // Notify the client that |output_id| is ready for displaying.
++ void NotifyPictureReady(int32 input_id, int32 output_id);
++
++ // Notify the client that an error has occurred and decoding cannot continue.
++ void NotifyError(Error error);
++
++ // Map the received input buffer into this process' address space and
++ // queue it for decode.
++ void MapAndQueueNewInputBuffer(
++ const media::BitstreamBuffer& bitstream_buffer);
++
++ // Get a new input buffer from the queue and set it up in decoder. This will
++ // sleep if no input buffers are available. Return true if a new buffer has
++ // been set up, false if an early exit has been requested (due to initiated
++ // reset/flush/destroy).
++ bool GetInputBuffer_Locked();
++
++ // Signal the client that the current buffer has been read and can be
++ // returned. Will also release the mapping.
++ void ReturnCurrInputBuffer_Locked();
++
++ // Pass one or more output buffers to the decoder. This will sleep
++ // if no buffers are available. Return true if buffers have been set up or
++ // false if an early exit has been requested (due to initiated
++ // reset/flush/destroy).
++ bool FeedDecoderWithOutputSurfaces_Locked();
++
++ // Continue decoding given input buffers and sleep waiting for input/output
++ // as needed. Will exit if a new set of surfaces or reset/flush/destroy
++ // is requested.
++ void DecodeTask();
++
++ // Scheduled after receiving a flush request and executed after the current
++ // decoding task finishes decoding pending inputs. Makes the decoder return
++ // all remaining output pictures and puts it in an idle state, ready
++ // to resume if needed and schedules a FinishFlush.
++ void FlushTask();
++
++ // Scheduled by the FlushTask after decoder is flushed to put VAVDA into idle
++ // state and notify the client that flushing has been finished.
++ void FinishFlush();
++
++ // Scheduled after receiving a reset request and executed after the current
++ // decoding task finishes decoding the current frame. Puts the decoder into
++ // an idle state, ready to resume if needed, discarding decoded but not yet
++ // outputted pictures (decoder keeps ownership of their associated picture
++ // buffers). Schedules a FinishReset afterwards.
++ void ResetTask();
++
++ // Scheduled by ResetTask after it's done putting VAVDA into an idle state.
++ // Drops remaining input buffers and notifies the client that reset has been
++ // finished.
++ void FinishReset();
++
++ // Helper for Destroy(), doing all the actual work except for deleting self.
++ void Cleanup();
++
++ // Get a usable framebuffer configuration for use in binding textures
++ // or return false on failure.
++ bool InitializeFBConfig();
++
++ // Callback for the decoder to execute when it wants us to output given
++ // |va_surface|.
++ void SurfaceReady(int32 input_id, const scoped_refptr<VASurface>& va_surface);
++
++ // Represents a texture bound to an X Pixmap for output purposes.
++ class TFPPicture;
++
++ // Callback to be executed once we have a |va_surface| to be output and
++ // an available |tfp_picture| to use for output.
++ // Puts contents of |va_surface| into given |tfp_picture|, releases the
++ // surface and passes the resulting picture to client for output.
++ void OutputPicture(const scoped_refptr<VASurface>& va_surface,
++ int32 input_id,
++ TFPPicture* tfp_picture);
++
++ // Try to OutputPicture() if we have both a ready surface and picture.
++ void TryOutputSurface();
++
++ // Called when a VASurface is no longer in use by the decoder or is not being
++ // synced/waiting to be synced to a picture. Returns it to available surfaces
++ // pool.
++ void RecycleVASurfaceID(VASurfaceID va_surface_id);
++
++ // Initiate wait cycle for surfaces to be released before we release them
++ // and allocate new ones, as requested by the decoder.
++ void InitiateSurfaceSetChange(size_t num_pics, gfx::Size size);
++ // Check if the surfaces have been released or post ourselves for later.
++ void TryFinishSurfaceSetChange();
++
++ // Client-provided X/EGL state.
++ Display* x_display_;
++ EGLDisplay egl_display_;
++ EGLContext egl_context_;
++ base::Callback<bool(void)> make_context_current_;
++
++ // VAVDA state.
++ enum State {
++ // Initialize() not called yet or failed.
++ kUninitialized,
++ // DecodeTask running.
++ kDecoding,
++ // Resetting, waiting for decoder to finish current task and cleanup.
++ kResetting,
++ // Flushing, waiting for decoder to finish current task and cleanup.
++ kFlushing,
++ // Idle, decoder in state ready to start/resume decoding.
++ kIdle,
++ // Destroying, waiting for the decoder to finish current task.
++ kDestroying,
++ };
++
++ // Protects input buffer and surface queues and state_.
++ base::Lock lock_;
++ State state_;
++
++ // An input buffer awaiting consumption, provided by the client.
++ struct InputBuffer {
++ InputBuffer();
++ ~InputBuffer();
++
++ int32 id;
++ size_t size;
++ scoped_ptr<base::SharedMemory> shm;
++ };
++
++ // Queue for incoming input buffers.
++ typedef std::queue<linked_ptr<InputBuffer> > InputBuffers;
++ InputBuffers input_buffers_;
++ // Signalled when input buffers are queued onto the input_buffers_ queue.
++ base::ConditionVariable input_ready_;
++
++ // Current input buffer at decoder.
++ linked_ptr<InputBuffer> curr_input_buffer_;
++
++ // Queue for incoming output buffers (texture ids).
++ typedef std::queue<int32> OutputBuffers;
++ OutputBuffers output_buffers_;
++
++ typedef std::map<int32, linked_ptr<TFPPicture> > TFPPictures;
++ // All allocated TFPPictures, regardless of their current state. TFPPictures
++ // are allocated once and destroyed at the end of decode.
++ TFPPictures tfp_pictures_;
++
++ // Return a TFPPicture associated with given client-provided id.
++ TFPPicture* TFPPictureById(int32 picture_buffer_id);
++
++ // VA Surfaces no longer in use that can be passed back to the decoder for
++ // reuse, once it requests them.
++ std::list<VASurfaceID> available_va_surfaces_;
++ // Signalled when output surfaces are queued onto the available_va_surfaces_
++ // queue.
++ base::ConditionVariable surfaces_available_;
++
++ // Pending output requests from the decoder. When it indicates that we should
++ // output a surface and we have an available TFPPicture (i.e. texture) ready
++ // to use, we'll execute the callback passing the TFPPicture. The callback
++ // will put the contents of the surface into the picture and return it to
++ // the client, releasing the surface as well.
++ // If we don't have any available TFPPictures at the time when the decoder
++ // requests output, we'll store the request on pending_output_cbs_ queue for
++ // later and run it once the client gives us more textures
++ // via ReusePictureBuffer().
++ typedef base::Callback<void(TFPPicture*)> OutputCB;
++ std::queue<OutputCB> pending_output_cbs_;
++
++ // ChildThread's message loop
++ base::MessageLoop* message_loop_;
++
++ // WeakPtr<> pointing to |this| for use in posting tasks from the decoder
++ // thread back to the ChildThread. Because the decoder thread is a member of
++ // this class, any task running on the decoder thread is guaranteed that this
++ // object is still alive. As a result, tasks posted from ChildThread to
++ // decoder thread should use base::Unretained(this), and tasks posted from the
++ // decoder thread to the ChildThread should use |weak_this_|.
++ base::WeakPtr<VaapiVideoDecodeAccelerator> weak_this_;
++
++ // To expose client callbacks from VideoDecodeAccelerator.
++ // NOTE: all calls to these objects *MUST* be executed on message_loop_.
++ base::WeakPtrFactory<Client> client_ptr_factory_;
++ base::WeakPtr<Client> client_;
++
++ scoped_ptr<VaapiWrapper> vaapi_wrapper_;
++
++ // Comes after vaapi_wrapper_ to ensure its destructor is executed before
++ // vaapi_wrapper_ is destroyed.
++ scoped_ptr<VaapiH264Decoder> decoder_;
++ base::Thread decoder_thread_;
++
++ int num_frames_at_client_;
++ int num_stream_bufs_at_decoder_;
++
++ // Whether we are waiting for any pending_output_cbs_ to be run before
++ // NotifyingFlushDone.
++ bool finish_flush_pending_;
++
++ // Decoder requested a new surface set and we are waiting for all the surfaces
++ // to be returned before we can free them.
++ bool awaiting_va_surfaces_recycle_;
++
++ // Last requested number/resolution of output picture buffers.
++ size_t requested_num_pics_;
++ gfx::Size requested_pic_size_;
++
++ DISALLOW_COPY_AND_ASSIGN(VaapiVideoDecodeAccelerator);
++};
++
++} // namespace content
++
++#endif // CONTENT_COMMON_GPU_MEDIA_VAAPI_VIDEO_DECODE_ACCELERATOR_H_
+diff --git a/content/content_common.gypi b/content/content_common.gypi
+index 8c45574..7345d9b 100644
+--- a/content/content_common.gypi
++++ b/content/content_common.gypi
+@@ -537,6 +537,32 @@
+ '<(DEPTH)/third_party/libva',
+ ],
+ }],
++ ['target_arch != "arm" and tizen_mobile == 1 and use_x11 == 1', {
++ 'dependencies': [
++ '../media/media.gyp:media',
++ ],
++ 'sources': [
++ 'common/gpu/media/h264_dpb.cc',
++ 'common/gpu/media/h264_dpb.h',
++ 'common/gpu/media/va_surface.h',
++ 'common/gpu/media/vaapi_h264_decoder.cc',
++ 'common/gpu/media/vaapi_h264_decoder.h',
++ 'common/gpu/media/vaapi_video_decode_accelerator_tizen.cc',
++ 'common/gpu/media/vaapi_video_decode_accelerator_tizen.h',
++ 'common/gpu/media/vaapi_wrapper.cc',
++ 'common/gpu/media/vaapi_wrapper.h',
++ ],
++ 'include_dirs': [
++ '<(DEPTH)/third_party/libva',
++ '<(DEPTH)/third_party/khronos',
++ ],
++ 'link_settings': {
++ 'libraries': [
++ '-lEGL',
++ '-lGLESv2',
++ ],
++ },
++ }],
+ ['OS=="win"', {
+ 'dependencies': [
+ '../media/media.gyp:media',
+diff --git a/content/gpu/gpu_main.cc b/content/gpu/gpu_main.cc
+index 9e29e03..2c04d40 100644
+--- a/content/gpu/gpu_main.cc
++++ b/content/gpu/gpu_main.cc
+@@ -42,7 +42,8 @@
+ #include "sandbox/win/src/sandbox.h"
+ #elif defined(OS_CHROMEOS) && defined(ARCH_CPU_ARMEL) && defined(USE_X11)
+ #include "content/common/gpu/media/exynos_video_decode_accelerator.h"
+-#elif defined(OS_CHROMEOS) && defined(ARCH_CPU_X86_FAMILY) && defined(USE_X11)
++#elif (defined(OS_CHROMEOS) || defined(OS_TIZEN_MOBILE)) &&\
++ defined(ARCH_CPU_X86_FAMILY) && defined(USE_X11)
+ #include "content/common/gpu/media/vaapi_wrapper.h"
+ #endif
+
+@@ -360,7 +361,8 @@ bool WarmUpSandbox(const CommandLine& command_line) {
+
+ #if defined(OS_CHROMEOS) && defined(ARCH_CPU_ARMEL) && defined(USE_X11)
+ ExynosVideoDecodeAccelerator::PreSandboxInitialization();
+-#elif defined(OS_CHROMEOS) && defined(ARCH_CPU_X86_FAMILY) && defined(USE_X11)
++#elif (defined(OS_CHROMEOS) || defined(OS_TIZEN_MOBILE)) &&\
++ defined(ARCH_CPU_X86_FAMILY) && defined(USE_X11)
+ VaapiWrapper::PreSandboxInitialization();
+ #endif
+
+--
+1.7.9.5
+
--- /dev/null
+Author: Sudarsana Nagineni <sudarsana.nagineni@intel.com>
+
+This patch includes Chromium side changes needed for integrating
+WebMediaPlayer with the Tizen Audio Session Manager.
+
+Also, it has changes that are needed for generating audio-session-manager
+stubs and load the library dynamically in the Browser process.
+
+audio-session-manager is using an _attribute_((constructor)) to initialize
+code and setup signal handlers at startup. So, linking with this library
+is causing a sandbox violation by executing the code in Renderer Process,
+since the Renderer Process and the Browser Process are linked with the
+same libraries.
+
+To prevent the problem, we load the audio-session-manager dynamically in
+the Browser process.
+
+diff --git src/build/linux/system.gyp src/build/linux/system.gyp
+index 68e4d36..3ae6ab3 100644
+--- src/build/linux/system.gyp
++++ src/build/linux/system.gyp
+@@ -892,5 +892,19 @@
+ }],
+ ],
+ },
++ {
++ 'target_name': 'audio_session_manager',
++ 'type': 'none',
++ 'toolsets': ['host', 'target'],
++ 'conditions': [
++ ['tizen_mobile == 1', {
++ 'direct_dependent_settings': {
++ 'cflags': [
++ '<!@(<(pkg-config) --cflags audio-session-mgr)',
++ ],
++ },
++ }],
++ ],
++ },
+ ],
+ }
+diff --git src/content/browser/renderer_host/render_view_host_impl.cc src/content/browser/renderer_host/render_view_host_impl.cc
+index 68204fb..a3e654b 100644
+--- src/content/browser/renderer_host/render_view_host_impl.cc
++++ src/content/browser/renderer_host/render_view_host_impl.cc
+@@ -83,6 +83,8 @@
+ #include "content/browser/renderer_host/popup_menu_helper_mac.h"
+ #elif defined(OS_ANDROID)
+ #include "content/browser/media/android/browser_media_player_manager.h"
++#elif defined(OS_TIZEN_MOBILE)
++#include "xwalk/tizen/browser/browser_mediaplayer_manager.h"
+ #endif
+
+ using base::TimeDelta;
+@@ -208,6 +210,8 @@ RenderViewHostImpl::RenderViewHostImpl(
+
+ #if defined(OS_ANDROID)
+ media_player_manager_.reset(BrowserMediaPlayerManager::Create(this));
++#elif defined(OS_TIZEN_MOBILE)
++ media_player_manager_.reset(tizen::BrowserMediaPlayerManager::Create(this));
+ #endif
+ }
+
+diff --git src/content/browser/renderer_host/render_view_host_impl.h src/content/browser/renderer_host/render_view_host_impl.h
+index c4bfb78..cc060bb 100644
+--- src/content/browser/renderer_host/render_view_host_impl.h
++++ src/content/browser/renderer_host/render_view_host_impl.h
+@@ -43,6 +43,12 @@ struct ViewMsg_Navigate_Params;
+ struct ViewMsg_PostMessage_Params;
+ struct ViewMsg_StopFinding_Params;
+
++#if defined(OS_TIZEN_MOBILE)
++namespace tizen {
++class BrowserMediaPlayerManager;
++}
++#endif
++
+ namespace base {
+ class ListValue;
+ }
+@@ -715,6 +721,8 @@ class CONTENT_EXPORT RenderViewHostImpl
+ #if defined(OS_ANDROID)
+ // Manages all the android mediaplayer objects and handling IPCs for video.
+ scoped_ptr<BrowserMediaPlayerManager> media_player_manager_;
++#elif defined(OS_TIZEN_MOBILE)
++ scoped_ptr<tizen::BrowserMediaPlayerManager> media_player_manager_;
+ #endif
+
+ DISALLOW_COPY_AND_ASSIGN(RenderViewHostImpl);
+diff --git src/content/common/content_message_generator.h src/content/common/content_message_generator.h
+index b0af11d..54106a2 100644
+--- src/content/common/content_message_generator.h
++++ src/content/common/content_message_generator.h
+@@ -56,3 +56,6 @@
+ #include "content/common/view_messages.h"
+ #include "content/common/websocket_messages.h"
+ #include "content/common/worker_messages.h"
++#if defined(OS_TIZEN_MOBILE)
++#include "xwalk/tizen/common/media_player_messages.h"
++#endif
+diff --git src/content/content_browser.gypi src/content/content_browser.gypi
+index 1e3485c..6336e49 100644
+--- src/content/content_browser.gypi
++++ src/content/content_browser.gypi
+@@ -1558,5 +1558,69 @@
+ '../third_party/speex/speex.gyp:libspeex',
+ ],
+ }],
++ ['tizen_mobile == 1', {
++ 'sources': [
++ '<(DEPTH)/xwalk/tizen/browser/audio_session_manager.cc',
++ '<(DEPTH)/xwalk/tizen/browser/audio_session_manager.h',
++ '<(DEPTH)/xwalk/tizen/browser/audio_session_manager_init.cc',
++ '<(DEPTH)/xwalk/tizen/browser/audio_session_manager_init.h',
++ '<(DEPTH)/xwalk/tizen/browser/browser_mediaplayer_manager.cc',
++ '<(DEPTH)/xwalk/tizen/browser/browser_mediaplayer_manager.h',
++ ],
++ 'variables': {
++ 'generate_stubs_script': '../tools/generate_stubs/generate_stubs.py',
++ 'extra_header': '../xwalk/tizen/browser/audio_session_manager_stub_headers.fragment',
++ 'sig_files': ['../xwalk/tizen/browser/audio_session_manager.sigs'],
++ 'outfile_type': 'posix_stubs',
++ 'stubs_filename_root': 'audio_session_manager_stubs',
++ 'project_path': 'xwalk/tizen/browser',
++ 'intermediate_dir': '<(INTERMEDIATE_DIR)',
++ 'output_root': '<(SHARED_INTERMEDIATE_DIR)/audio_session_manager',
++ },
++ 'include_dirs': [
++ '<(output_root)',
++ ],
++ 'actions': [
++ {
++ 'action_name': 'generate_stubs',
++ 'inputs': [
++ '<(generate_stubs_script)',
++ '<(extra_header)',
++ '<@(sig_files)',
++ ],
++ 'outputs': [
++ '<(intermediate_dir)/<(stubs_filename_root).cc',
++ '<(output_root)/<(project_path)/<(stubs_filename_root).h',
++ ],
++ 'action': ['python',
++ '<(generate_stubs_script)',
++ '-i', '<(intermediate_dir)',
++ '-o', '<(output_root)/<(project_path)',
++ '-t', '<(outfile_type)',
++ '-e', '<(extra_header)',
++ '-s', '<(stubs_filename_root)',
++ '-p', '<(project_path)',
++ '<@(_inputs)',
++ ],
++ 'process_outputs_as_sources': 1,
++ 'message': 'Generating audio session manager stubs for dynamic loading',
++ },
++ ],
++ 'conditions': [
++ ['OS=="linux" or OS=="solaris"', {
++ 'link_settings': {
++ 'libraries': [
++ '-ldl',
++ ],
++ },
++ }],
++ ],
++ 'dependencies': [
++ '../build/linux/system.gyp:audio_session_manager',
++ ],
++ 'export_dependent_settings': [
++ '../build/linux/system.gyp:audio_session_manager',
++ ],
++ }],
+ ],
+ }
+diff --git src/content/content_common.gypi src/content/content_common.gypi
+index 8c45574..10b0e2b 100644
+--- src/content/content_common.gypi
++++ src/content/content_common.gypi
+@@ -590,5 +590,10 @@
+ },
+ ]
+ }],
++ ['tizen_mobile == 1', {
++ 'sources': [
++ '<(DEPTH)/xwalk/tizen/common/media_player_messages.h',
++ ],
++ }],
+ ],
+ }
+diff --git src/content/content_renderer.gypi src/content/content_renderer.gypi
+index 46b7468..4cab17f 100644
+--- /src/content/content_renderer.gypi
++++ src/content/content_renderer.gypi
+@@ -722,6 +722,14 @@
+ }],
+ ],
+ }],
++ ['tizen_mobile == 1', {
++ 'sources': [
++ '<(DEPTH)/xwalk/tizen/renderer/mediaplayer_impl.cc',
++ '<(DEPTH)/xwalk/tizen/renderer/mediaplayer_impl.h',
++ '<(DEPTH)/xwalk/tizen/renderer/renderer_mediaplayer_manager.cc',
++ '<(DEPTH)/xwalk/tizen/renderer/renderer_mediaplayer_manager.h',
++ ],
++ }],
+ ],
+ 'target_conditions': [
+ ['OS=="android"', {
+diff --git src/content/renderer/render_view_impl.cc src/content/renderer/render_view_impl.cc
+index 6827468..549c8fb 100644
+--- src/content/renderer/render_view_impl.cc
++++ src/content/renderer/render_view_impl.cc
+@@ -255,6 +255,11 @@
+ #include "content/renderer/media/rtc_peer_connection_handler.h"
+ #endif
+
++#if defined(OS_TIZEN_MOBILE)
++#include "xwalk/tizen/renderer/mediaplayer_impl.h"
++#include "xwalk/tizen/renderer/renderer_mediaplayer_manager.h"
++#endif
++
+ using WebKit::WebAXObject;
+ using WebKit::WebApplicationCacheHost;
+ using WebKit::WebApplicationCacheHostClient;
+@@ -865,6 +870,8 @@ RenderViewImpl::RenderViewImpl(RenderViewImplParams* params)
+ body_background_color_(SK_ColorWHITE),
+ expected_content_intent_id_(0),
+ media_player_manager_(NULL),
++#elif defined(OS_TIZEN_MOBILE)
++ media_player_manager_(NULL),
+ #endif
+ #if defined(OS_WIN)
+ focused_plugin_id_(-1),
+@@ -986,6 +993,8 @@ void RenderViewImpl::Initialize(RenderViewImplParams* params) {
+ #if defined(OS_ANDROID)
+ media_player_manager_ = new RendererMediaPlayerManager(this);
+ new JavaBridgeDispatcher(this);
++#elif defined(OS_TIZEN_MOBILE)
++ media_player_manager_ = new tizen::RendererMediaPlayerManager(this);
+ #endif
+
+ // The next group of objects all implement RenderViewObserver, so are deleted
+@@ -3163,6 +3172,13 @@ WebMediaPlayer* RenderViewImpl::createMediaPlayer(
+ sink,
+ RenderThreadImpl::current()->GetGpuFactories(),
+ new RenderMediaLog());
++
++#if defined(OS_TIZEN_MOBILE)
++ tizen::MediaPlayerImpl* media_player = new tizen::MediaPlayerImpl(
++ frame, client, AsWeakPtr(), media_player_manager_, params);
++ return media_player;
++#endif
++
+ return new WebMediaPlayerImpl(frame, client, AsWeakPtr(), params);
+ }
+
+diff --git src/content/renderer/render_view_impl.h src/content/renderer/render_view_impl.h
+index 2b18b3c..314a1c0 100644
+--- src/content/renderer/render_view_impl.h
++++ src/content/renderer/render_view_impl.h
+@@ -129,6 +129,12 @@ namespace webkit_glue {
+ class WebURLResponseExtraDataImpl;
+ }
+
++#if defined(OS_TIZEN_MOBILE)
++namespace tizen {
++class RendererMediaPlayerManager;
++}
++#endif
++
+ namespace content {
+ class BrowserPluginManager;
+ class DeviceOrientationDispatcher;
+@@ -1431,6 +1437,10 @@ class CONTENT_EXPORT RenderViewImpl
+
+ // A date/time picker object for date and time related input elements.
+ scoped_ptr<RendererDateTimePicker> date_time_picker_client_;
++#elif defined(OS_TIZEN_MOBILE)
++ // The media player manager for managing all the media players on this view
++ // and for communicating with the audio session manager in browser process.
++ tizen::RendererMediaPlayerManager* media_player_manager_;
+ #endif
+
+ // Plugins -------------------------------------------------------------------
--- /dev/null
+Name: crosswalk
+Version: 4.32.73.0
+Release: 0
+Summary: Crosswalk is an app runtime based on Chromium
+# License: (BSD-3-Clause and LGPL-2.1+)
+License: BSD-3-Clause
+Group: Web Framework/Web Run Time
+Url: https://github.com/otcshare/crosswalk
+Source: %{name}.tar
+Source1: xwalk
+Source2: org.crosswalkproject.Runtime1.service
+Source3: xwalk.service
+Source1001: crosswalk.manifest
+Source1002: %{name}.xml.in
+Source1003: %{name}.png
+Patch1: %{name}-do-not-look-for-gtk2-when-using-aura.patch
+Patch2: %{name}-look-for-pvr-libGLESv2.so.patch
+Patch3: %{name}-include-tizen-ime-files.patch
+Patch4: %{name}-disable-ffmpeg-pragmas.patch
+Patch5: Chromium-Fix-gcc-4.5.3-uninitialized-warnings.patch
+Patch6: Blink-Fix-gcc-4.5.3-uninitialized-warnings.patch
+Patch7: %{name}-tizen-audio-session-manager.patch
+Patch8: %{name}-mesa-ozone-typedefs.patch
+
+BuildRequires: bison
+BuildRequires: bzip2-devel
+BuildRequires: expat-devel
+BuildRequires: flex
+BuildRequires: gperf
+BuildRequires: libcap-devel
+BuildRequires: python
+BuildRequires: python-xml
+BuildRequires: perl
+BuildRequires: which
+BuildRequires: pkgconfig(alsa)
+BuildRequires: pkgconfig(appcore-common)
+BuildRequires: pkgconfig(appcore-efl)
+BuildRequires: pkgconfig(aul)
+BuildRequires: pkgconfig(audio-session-mgr)
+BuildRequires: pkgconfig(cairo)
+BuildRequires: pkgconfig(capi-appfw-application)
+BuildRequires: pkgconfig(capi-location-manager)
+BuildRequires: pkgconfig(dbus-1)
+BuildRequires: pkgconfig(fontconfig)
+BuildRequires: pkgconfig(freetype2)
+BuildRequires: pkgconfig(gles20)
+BuildRequires: pkgconfig(glib-2.0)
+BuildRequires: pkgconfig(haptic)
+BuildRequires: pkgconfig(icu-i18n)
+BuildRequires: pkgconfig(libdrm)
+BuildRequires: pkgconfig(libexif)
+BuildRequires: pkgconfig(libpci)
+BuildRequires: pkgconfig(libpulse)
+BuildRequires: pkgconfig(libudev)
+BuildRequires: pkgconfig(libxml-2.0)
+BuildRequires: pkgconfig(libxslt)
+BuildRequires: pkgconfig(pango)
+BuildRequires: pkgconfig(pkgmgr-info)
+BuildRequires: pkgconfig(pkgmgr-parser)
+BuildRequires: pkgconfig(nspr)
+BuildRequires: pkgconfig(sensor)
+BuildRequires: pkgconfig(vconf)
+BuildRequires: pkgconfig(x11)
+BuildRequires: pkgconfig(xcomposite)
+BuildRequires: pkgconfig(xcursor)
+BuildRequires: pkgconfig(xdamage)
+BuildRequires: pkgconfig(xext)
+BuildRequires: pkgconfig(xfixes)
+BuildRequires: pkgconfig(xi)
+BuildRequires: pkgconfig(xrandr)
+BuildRequires: pkgconfig(xrender)
+BuildRequires: pkgconfig(xscrnsaver)
+BuildRequires: pkgconfig(xt)
+BuildRequires: pkgconfig(xtst)
+
+# Depending on the Tizen version and profile we are building for, we have
+# different dependencies, patches and gyp options to pass. Checking for
+# specific profiles is not very future-proof. We therefore try to check for
+# either specific features that may be enabled in the current profile (such as
+# Wayland support) or for a certain Tizen major version (the differences betwen
+# Tizen 2 and Tizen 3 are big enough that we need completely different patches
+# and build dependencies, for example).
+%bcond_with wayland
+
+%if "%{tizen}" < "3.0"
+BuildRequires: gst-plugins-atomisp-devel
+BuildRequires: pkgconfig(openssl)
+%else
+BuildRequires: pkgconfig(nss)
+%endif
+
+%if %{with wayland}
+BuildRequires: pkgconfig(wayland-client)
+BuildRequires: pkgconfig(wayland-cursor)
+BuildRequires: pkgconfig(wayland-egl)
+BuildRequires: pkgconfig(xkbcommon)
+%else
+BuildRequires: pkgconfig(scim)
+%endif
+
+%description
+Crosswalk is an app runtime based on Chromium. It is an open source project started by the Intel Open Source Technology Center (http://www.01.org).
+
+%package emulator-support
+Summary: Support files necessary for running Crosswalk on the Tizen emulator
+# License: (BSD-3-Clause and LGPL-2.1+)
+License: BSD-3-Clause
+Group: Web Framework/Web Run Time
+Url: https://github.com/otcshare/crosswalk
+
+%description emulator-support
+This package contains additional support files that are needed for running Crosswalk on the Tizen emulator.
+
+%define _manifestdir /usr/share/packages
+%define _desktop_icondir /usr/share/icons/default/small
+%define _dbusservicedir /usr/share/dbus-1/services
+%define _systemduserservicedir /usr/lib/systemd/user
+
+%prep
+%setup -q -n crosswalk
+
+cp %{SOURCE1001} .
+cp %{SOURCE1002} .
+cp %{SOURCE1003} .
+sed "s/@VERSION@/%{version}/g" %{name}.xml.in > %{name}.xml
+
+cp -a src/AUTHORS AUTHORS.chromium
+cp -a src/LICENSE LICENSE.chromium
+cp -a src/xwalk/AUTHORS AUTHORS.xwalk
+cp -a src/xwalk/LICENSE LICENSE.xwalk
+
+%patch1
+%patch7
+
+%if "%{tizen}" < "3.0"
+%patch2
+%patch3
+%patch4
+%patch5 -p1
+%patch6 -p1
+%endif
+
+%if %{with wayland}
+%patch8
+%endif
+
+%build
+
+# For ffmpeg on ia32. The original CFLAGS set by the gyp and config files in
+# src/third_party/ffmpeg already pass -O2 -fomit-frame-pointer, but Tizen's
+# CFLAGS end up appending -fno-omit-frame-pointer. See http://crbug.com/37246
+export CFLAGS=`echo $CFLAGS | sed s,-fno-omit-frame-pointer,,g`
+
+# Building the RPM in the GBS chroot fails with errors such as
+# /usr/lib/gcc/i586-tizen-linux/4.7/../../../../i586-tizen-linux/bin/ld:
+# failed to set dynamic section sizes: Memory exhausted
+# For now, work around it by passing a GNU ld-specific flag that optimizes the
+# linker for memory usage.
+export LDFLAGS="${LDFLAGS} -Wl,--no-keep-memory"
+
+# Support building in a non-standard directory, possibly outside %{_builddir}.
+# Since the build root is erased every time a new build is performed, one way
+# to avoid losing the build directory is to specify a location outside the
+# build root to the BUILDDIR_NAME definition, such as "/var/tmp/xwalk-build"
+# (remember all paths are still inside the chroot):
+# gbs build --define 'BUILDDIR_NAME /some/path'
+#
+# The --depth and --generator-output combo is used to put all the Makefiles
+# inside the build directory, and (this is the important part) keep file lists
+# (generatedwith <|() in gyp) in the build directory as well, otherwise they
+# will be in the source directory, erased every time and trigger an almost full
+# Blink rebuild (among other smaller targets).
+# We cannot always pass those flags, though, because gyp's make generator does
+# not work if the --generator-output is the top-level source directory.
+BUILDDIR_NAME="%{?BUILDDIR_NAME}"
+if [ -z "${BUILDDIR_NAME}" ]; then
+ BUILDDIR_NAME="."
+else
+ GYP_EXTRA_FLAGS="--depth=. --generator-output=${BUILDDIR_NAME}"
+fi
+
+# Tizen 2's NSS is too old for Chromium, so we have to use the OpenSSL backend.
+%if "%{tizen}" < "3.0"
+GYP_EXTRA_FLAGS="${GYP_EXTRA_FLAGS} -Dtizen_mobile=1 -Duse_openssl=1"
+%endif
+
+%if %{with wayland}
+GYP_EXTRA_FLAGS="${GYP_EXTRA_FLAGS} -Duse_ash=1 -Duse_ozone=1"
+%endif
+
+# Change src/ so that we can pass "." to --depth below, otherwise we would need
+# to pass "src" to it, but this confuses the gyp make generator, that expects
+# to be called from the root source directory.
+cd src
+
+# --no-parallel is added because chroot does not mount a /dev/shm, this will
+# cause python multiprocessing.SemLock error.
+export GYP_GENERATORS='make'
+./xwalk/gyp_xwalk xwalk/xwalk.gyp \
+--no-parallel \
+${GYP_EXTRA_FLAGS} \
+-Dchromeos=0 \
+-Ddisable_nacl=1 \
+-Dpython_ver=2.7 \
+-Duse_aura=1 \
+-Duse_cups=0 \
+-Duse_gconf=0 \
+-Duse_kerberos=0 \
+-Duse_system_bzip2=1 \
+-Duse_system_icu=1 \
+-Duse_system_libexif=1 \
+-Duse_system_libxml=1 \
+-Duse_system_nspr=1 \
+-Denable_xi21_mt=1 \
+-Duse_xi2_mt=0
+
+make %{?_smp_mflags} -C "${BUILDDIR_NAME}" BUILDTYPE=Release xwalk xwalkctl xwalk_launcher xwalk-pkg-helper
+
+%install
+# Support building in a non-standard directory, possibly outside %{_builddir}.
+# Since the build root is erased every time a new build is performed, one way
+# to avoid losing the build directory is to specify a location outside the
+# build root to the BUILDDIR_NAME definition, such as "/var/tmp/xwalk-build"
+# (remember all paths are still inside the chroot):
+# gbs build --define 'BUILDDIR_NAME /some/path'
+BUILDDIR_NAME="%{?BUILDDIR_NAME}"
+if [ -z "${BUILDDIR_NAME}" ]; then
+ BUILDDIR_NAME="."
+fi
+
+# Since BUILDDIR_NAME can be either a relative path or an absolute one, we need
+# to cd into src/ so that it means the same thing in the build and install
+# stages: during the former, a relative location refers to a place inside src/,
+# whereas during the latter a relative location by default would refer to a
+# place one directory above src/. If BUILDDIR_NAME is an absolute path, this is
+# irrelevant anyway.
+cd src
+
+# Binaries.
+install -p -D %{SOURCE1} %{buildroot}%{_bindir}/xwalk
+install -p -D %{SOURCE2} %{buildroot}%{_dbusservicedir}/org.crosswalkproject.Runtime1.service
+install -p -D %{SOURCE3} %{buildroot}%{_systemduserservicedir}/xwalk.service
+install -p -D ${BUILDDIR_NAME}/out/Release/xwalk %{buildroot}%{_libdir}/xwalk/xwalk
+install -p -D ${BUILDDIR_NAME}/out/Release/xwalkctl %{buildroot}%{_bindir}/xwalkctl
+install -p -D ${BUILDDIR_NAME}/out/Release/xwalk-launcher %{buildroot}%{_bindir}/xwalk-launcher
+install -p -D ${BUILDDIR_NAME}/out/Release/xwalk-pkg-helper %{buildroot}%{_bindir}/xwalk-pkg-helper
+
+# Supporting libraries and resources.
+install -p -D ${BUILDDIR_NAME}/out/Release/libffmpegsumo.so %{buildroot}%{_libdir}/xwalk/libffmpegsumo.so
+install -p -D ${BUILDDIR_NAME}/out/Release/libosmesa.so %{buildroot}%{_libdir}/xwalk/libosmesa.so
+install -p -D ${BUILDDIR_NAME}/out/Release/xwalk.pak %{buildroot}%{_libdir}/xwalk/xwalk.pak
+
+# Register xwalk to the package manager.
+install -p -D ../%{name}.xml %{buildroot}%{_manifestdir}/%{name}.xml
+install -p -D ../%{name}.png %{buildroot}%{_desktop_icondir}/%{name}.png
+
+%files
+%manifest %{name}.manifest
+# %license AUTHORS.chromium AUTHORS.xwalk LICENSE.chromium LICENSE.xwalk
+%{_bindir}/xwalk
+%{_bindir}/xwalkctl
+%{_bindir}/xwalk-launcher
+%{_bindir}/xwalk-pkg-helper
+%{_libdir}/xwalk/libffmpegsumo.so
+%{_libdir}/xwalk/xwalk
+%{_libdir}/xwalk/xwalk.pak
+%{_manifestdir}/%{name}.xml
+%{_desktop_icondir}/%{name}.png
+%{_dbusservicedir}/org.crosswalkproject.Runtime1.service
+%{_systemduserservicedir}/xwalk.service
+
+%files emulator-support
+%{_libdir}/xwalk/libosmesa.so