Update To 11.40.268.0
[platform/framework/web/crosswalk.git] / src / gpu / command_buffer / service / async_pixel_transfer_manager_share_group.cc
1 // Copyright 2013 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "gpu/command_buffer/service/async_pixel_transfer_manager_share_group.h"
6
7 #include <list>
8
9 #include "base/bind.h"
10 #include "base/debug/trace_event.h"
11 #include "base/debug/trace_event_synthetic_delay.h"
12 #include "base/lazy_instance.h"
13 #include "base/logging.h"
14 #include "base/memory/ref_counted.h"
15 #include "base/memory/weak_ptr.h"
16 #include "base/synchronization/cancellation_flag.h"
17 #include "base/synchronization/lock.h"
18 #include "base/synchronization/waitable_event.h"
19 #include "base/threading/thread.h"
20 #include "base/threading/thread_checker.h"
21 #include "gpu/command_buffer/service/async_pixel_transfer_delegate.h"
22 #include "ui/gl/gl_bindings.h"
23 #include "ui/gl/gl_context.h"
24 #include "ui/gl/gl_surface.h"
25 #include "ui/gl/gpu_preference.h"
26 #include "ui/gl/scoped_binders.h"
27
28 namespace gpu {
29
30 namespace {
31
32 const char kAsyncTransferThreadName[] = "AsyncTransferThread";
33
34 void PerformNotifyCompletion(
35     AsyncMemoryParams mem_params,
36     scoped_refptr<AsyncPixelTransferCompletionObserver> observer) {
37   TRACE_EVENT0("gpu", "PerformNotifyCompletion");
38   observer->DidComplete(mem_params);
39 }
40
41 // TODO(backer): Factor out common thread scheduling logic from the EGL and
42 // ShareGroup implementations. http://crbug.com/239889
43 class TransferThread : public base::Thread {
44  public:
45   TransferThread()
46       : base::Thread(kAsyncTransferThreadName),
47         initialized_(false) {
48     Start();
49 #if defined(OS_ANDROID) || defined(OS_LINUX)
50     SetPriority(base::kThreadPriority_Background);
51 #endif
52   }
53
54   ~TransferThread() override {
55     // The only instance of this class was declared leaky.
56     NOTREACHED();
57   }
58
59   void InitializeOnMainThread(gfx::GLContext* parent_context) {
60     TRACE_EVENT0("gpu", "TransferThread::InitializeOnMainThread");
61     if (initialized_)
62       return;
63
64     base::WaitableEvent wait_for_init(true, false);
65     message_loop_proxy()->PostTask(
66       FROM_HERE,
67       base::Bind(&TransferThread::InitializeOnTransferThread,
68                  base::Unretained(this),
69                  base::Unretained(parent_context),
70                  &wait_for_init));
71     wait_for_init.Wait();
72   }
73
74   void CleanUp() override {
75     surface_ = NULL;
76     context_ = NULL;
77   }
78
79  private:
80   bool initialized_;
81
82   scoped_refptr<gfx::GLSurface> surface_;
83   scoped_refptr<gfx::GLContext> context_;
84
85   void InitializeOnTransferThread(gfx::GLContext* parent_context,
86                                    base::WaitableEvent* caller_wait) {
87     TRACE_EVENT0("gpu", "InitializeOnTransferThread");
88
89     if (!parent_context) {
90       LOG(ERROR) << "No parent context provided.";
91       caller_wait->Signal();
92       return;
93     }
94
95     surface_ = gfx::GLSurface::CreateOffscreenGLSurface(gfx::Size(1, 1));
96     if (!surface_.get()) {
97       LOG(ERROR) << "Unable to create GLSurface";
98       caller_wait->Signal();
99       return;
100     }
101
102     // TODO(backer): This is coded for integrated GPUs. For discrete GPUs
103     // we would probably want to use a PBO texture upload for a true async
104     // upload (that would hopefully be optimized as a DMA transfer by the
105     // driver).
106     context_ = gfx::GLContext::CreateGLContext(parent_context->share_group(),
107                                                surface_.get(),
108                                                gfx::PreferIntegratedGpu);
109     if (!context_.get()) {
110       LOG(ERROR) << "Unable to create GLContext.";
111       caller_wait->Signal();
112       return;
113     }
114
115     context_->MakeCurrent(surface_.get());
116     initialized_ = true;
117     caller_wait->Signal();
118   }
119
120   DISALLOW_COPY_AND_ASSIGN(TransferThread);
121 };
122
123 base::LazyInstance<TransferThread>::Leaky
124     g_transfer_thread = LAZY_INSTANCE_INITIALIZER;
125
126 base::MessageLoopProxy* transfer_message_loop_proxy() {
127   return g_transfer_thread.Pointer()->message_loop_proxy().get();
128 }
129
130 class PendingTask : public base::RefCountedThreadSafe<PendingTask> {
131  public:
132   explicit PendingTask(const base::Closure& task)
133       : task_(task), task_pending_(true, false) {}
134
135   bool TryRun() {
136     // This is meant to be called on the main thread where the texture
137     // is already bound.
138     DCHECK(checker_.CalledOnValidThread());
139     if (task_lock_.Try()) {
140       // Only run once.
141       if (!task_.is_null())
142         task_.Run();
143       task_.Reset();
144
145       task_lock_.Release();
146       task_pending_.Signal();
147       return true;
148     }
149     return false;
150   }
151
152   void BindAndRun(GLuint texture_id) {
153     // This is meant to be called on the upload thread where we don't have to
154     // restore the previous texture binding.
155     DCHECK(!checker_.CalledOnValidThread());
156     base::AutoLock locked(task_lock_);
157     if (!task_.is_null()) {
158       glBindTexture(GL_TEXTURE_2D, texture_id);
159       task_.Run();
160       task_.Reset();
161       glBindTexture(GL_TEXTURE_2D, 0);
162       // Flush for synchronization between threads.
163       glFlush();
164       task_pending_.Signal();
165     }
166   }
167
168   void Cancel() {
169     base::AutoLock locked(task_lock_);
170     task_.Reset();
171     task_pending_.Signal();
172   }
173
174   bool TaskIsInProgress() {
175     return !task_pending_.IsSignaled();
176   }
177
178   void WaitForTask() {
179     task_pending_.Wait();
180   }
181
182  private:
183   friend class base::RefCountedThreadSafe<PendingTask>;
184
185   virtual ~PendingTask() {}
186
187   base::ThreadChecker checker_;
188
189   base::Lock task_lock_;
190   base::Closure task_;
191   base::WaitableEvent task_pending_;
192
193   DISALLOW_COPY_AND_ASSIGN(PendingTask);
194 };
195
196 // Class which holds async pixel transfers state.
197 // The texture_id is accessed by either thread, but everything
198 // else accessed only on the main thread.
199 class TransferStateInternal
200     : public base::RefCountedThreadSafe<TransferStateInternal> {
201  public:
202   TransferStateInternal(GLuint texture_id,
203                         const AsyncTexImage2DParams& define_params)
204       : texture_id_(texture_id), define_params_(define_params) {}
205
206   bool TransferIsInProgress() {
207     return pending_upload_task_.get() &&
208            pending_upload_task_->TaskIsInProgress();
209   }
210
211   void BindTransfer() {
212     TRACE_EVENT2("gpu", "BindAsyncTransfer",
213                  "width", define_params_.width,
214                  "height", define_params_.height);
215     DCHECK(texture_id_);
216
217     glBindTexture(GL_TEXTURE_2D, texture_id_);
218     bind_callback_.Run();
219   }
220
221   void WaitForTransferCompletion() {
222     TRACE_EVENT0("gpu", "WaitForTransferCompletion");
223     DCHECK(pending_upload_task_.get());
224     if (!pending_upload_task_->TryRun()) {
225       pending_upload_task_->WaitForTask();
226     }
227     pending_upload_task_ = NULL;
228   }
229
230   void CancelUpload() {
231     TRACE_EVENT0("gpu", "CancelUpload");
232     if (pending_upload_task_.get())
233       pending_upload_task_->Cancel();
234     pending_upload_task_ = NULL;
235   }
236
237   void ScheduleAsyncTexImage2D(
238       const AsyncTexImage2DParams tex_params,
239       const AsyncMemoryParams mem_params,
240       scoped_refptr<AsyncPixelTransferUploadStats> texture_upload_stats,
241       const base::Closure& bind_callback) {
242     TRACE_EVENT_SYNTHETIC_DELAY_BEGIN("gpu.AsyncTexImage");
243     pending_upload_task_ = new PendingTask(base::Bind(
244         &TransferStateInternal::PerformAsyncTexImage2D,
245         this,
246         tex_params,
247         mem_params,
248         texture_upload_stats));
249     transfer_message_loop_proxy()->PostTask(
250         FROM_HERE,
251         base::Bind(
252             &PendingTask::BindAndRun, pending_upload_task_, texture_id_));
253
254     // Save the late bind callback, so we can notify the client when it is
255     // bound.
256     bind_callback_ = bind_callback;
257   }
258
259   void ScheduleAsyncTexSubImage2D(
260       AsyncTexSubImage2DParams tex_params,
261       AsyncMemoryParams mem_params,
262       scoped_refptr<AsyncPixelTransferUploadStats> texture_upload_stats) {
263     TRACE_EVENT_SYNTHETIC_DELAY_BEGIN("gpu.AsyncTexImage");
264     pending_upload_task_ = new PendingTask(base::Bind(
265         &TransferStateInternal::PerformAsyncTexSubImage2D,
266         this,
267         tex_params,
268         mem_params,
269         texture_upload_stats));
270     transfer_message_loop_proxy()->PostTask(
271         FROM_HERE,
272         base::Bind(
273             &PendingTask::BindAndRun, pending_upload_task_, texture_id_));
274   }
275
276  private:
277   friend class base::RefCountedThreadSafe<TransferStateInternal>;
278
279   virtual ~TransferStateInternal() {
280   }
281
282   void PerformAsyncTexImage2D(
283       AsyncTexImage2DParams tex_params,
284       AsyncMemoryParams mem_params,
285       scoped_refptr<AsyncPixelTransferUploadStats> texture_upload_stats) {
286     TRACE_EVENT2("gpu",
287                  "PerformAsyncTexImage",
288                  "width",
289                  tex_params.width,
290                  "height",
291                  tex_params.height);
292     DCHECK_EQ(0, tex_params.level);
293
294     base::TimeTicks begin_time;
295     if (texture_upload_stats.get())
296       begin_time = base::TimeTicks::HighResNow();
297
298     void* data = mem_params.GetDataAddress();
299
300     {
301       TRACE_EVENT0("gpu", "glTexImage2D");
302       glTexImage2D(GL_TEXTURE_2D,
303                    tex_params.level,
304                    tex_params.internal_format,
305                    tex_params.width,
306                    tex_params.height,
307                    tex_params.border,
308                    tex_params.format,
309                    tex_params.type,
310                    data);
311       TRACE_EVENT_SYNTHETIC_DELAY_END("gpu.AsyncTexImage");
312     }
313
314     if (texture_upload_stats.get()) {
315       texture_upload_stats->AddUpload(base::TimeTicks::HighResNow() -
316                                       begin_time);
317     }
318   }
319
320   void PerformAsyncTexSubImage2D(
321       AsyncTexSubImage2DParams tex_params,
322       AsyncMemoryParams mem_params,
323       scoped_refptr<AsyncPixelTransferUploadStats> texture_upload_stats) {
324     TRACE_EVENT2("gpu",
325                  "PerformAsyncTexSubImage2D",
326                  "width",
327                  tex_params.width,
328                  "height",
329                  tex_params.height);
330     DCHECK_EQ(0, tex_params.level);
331
332     base::TimeTicks begin_time;
333     if (texture_upload_stats.get())
334       begin_time = base::TimeTicks::HighResNow();
335
336     void* data = mem_params.GetDataAddress();
337     {
338       TRACE_EVENT0("gpu", "glTexSubImage2D");
339       glTexSubImage2D(GL_TEXTURE_2D,
340                       tex_params.level,
341                       tex_params.xoffset,
342                       tex_params.yoffset,
343                       tex_params.width,
344                       tex_params.height,
345                       tex_params.format,
346                       tex_params.type,
347                       data);
348       TRACE_EVENT_SYNTHETIC_DELAY_END("gpu.AsyncTexImage");
349     }
350
351     if (texture_upload_stats.get()) {
352       texture_upload_stats->AddUpload(base::TimeTicks::HighResNow() -
353                                       begin_time);
354     }
355   }
356
357   scoped_refptr<PendingTask> pending_upload_task_;
358
359   GLuint texture_id_;
360
361   // Definition params for texture that needs binding.
362   AsyncTexImage2DParams define_params_;
363
364   // Callback to invoke when AsyncTexImage2D is complete
365   // and the client can safely use the texture. This occurs
366   // during BindCompletedAsyncTransfers().
367   base::Closure bind_callback_;
368 };
369
370 }  // namespace
371
372 class AsyncPixelTransferDelegateShareGroup
373     : public AsyncPixelTransferDelegate,
374       public base::SupportsWeakPtr<AsyncPixelTransferDelegateShareGroup> {
375  public:
376   AsyncPixelTransferDelegateShareGroup(
377       AsyncPixelTransferManagerShareGroup::SharedState* shared_state,
378       GLuint texture_id,
379       const AsyncTexImage2DParams& define_params);
380   ~AsyncPixelTransferDelegateShareGroup() override;
381
382   void BindTransfer() { state_->BindTransfer(); }
383
384   // Implement AsyncPixelTransferDelegate:
385   void AsyncTexImage2D(const AsyncTexImage2DParams& tex_params,
386                        const AsyncMemoryParams& mem_params,
387                        const base::Closure& bind_callback) override;
388   void AsyncTexSubImage2D(const AsyncTexSubImage2DParams& tex_params,
389                           const AsyncMemoryParams& mem_params) override;
390   bool TransferIsInProgress() override;
391   void WaitForTransferCompletion() override;
392
393  private:
394   // A raw pointer is safe because the SharedState is owned by the Manager,
395   // which owns this Delegate.
396   AsyncPixelTransferManagerShareGroup::SharedState* shared_state_;
397   scoped_refptr<TransferStateInternal> state_;
398
399   DISALLOW_COPY_AND_ASSIGN(AsyncPixelTransferDelegateShareGroup);
400 };
401
402 AsyncPixelTransferDelegateShareGroup::AsyncPixelTransferDelegateShareGroup(
403     AsyncPixelTransferManagerShareGroup::SharedState* shared_state,
404     GLuint texture_id,
405     const AsyncTexImage2DParams& define_params)
406     : shared_state_(shared_state),
407       state_(new TransferStateInternal(texture_id, define_params)) {}
408
409 AsyncPixelTransferDelegateShareGroup::~AsyncPixelTransferDelegateShareGroup() {
410   TRACE_EVENT0("gpu", " ~AsyncPixelTransferDelegateShareGroup");
411   state_->CancelUpload();
412 }
413
414 bool AsyncPixelTransferDelegateShareGroup::TransferIsInProgress() {
415   return state_->TransferIsInProgress();
416 }
417
418 void AsyncPixelTransferDelegateShareGroup::WaitForTransferCompletion() {
419   if (state_->TransferIsInProgress()) {
420     state_->WaitForTransferCompletion();
421     DCHECK(!state_->TransferIsInProgress());
422   }
423
424   // Fast track the BindTransfer, if applicable.
425   for (AsyncPixelTransferManagerShareGroup::SharedState::TransferQueue::iterator
426            iter = shared_state_->pending_allocations.begin();
427        iter != shared_state_->pending_allocations.end();
428        ++iter) {
429     if (iter->get() != this)
430       continue;
431
432     shared_state_->pending_allocations.erase(iter);
433     BindTransfer();
434     break;
435   }
436 }
437
438 void AsyncPixelTransferDelegateShareGroup::AsyncTexImage2D(
439     const AsyncTexImage2DParams& tex_params,
440     const AsyncMemoryParams& mem_params,
441     const base::Closure& bind_callback) {
442   DCHECK(!state_->TransferIsInProgress());
443   DCHECK_EQ(static_cast<GLenum>(GL_TEXTURE_2D), tex_params.target);
444   DCHECK_EQ(tex_params.level, 0);
445
446   shared_state_->pending_allocations.push_back(AsWeakPtr());
447   state_->ScheduleAsyncTexImage2D(tex_params,
448                                   mem_params,
449                                   shared_state_->texture_upload_stats,
450                                   bind_callback);
451 }
452
453 void AsyncPixelTransferDelegateShareGroup::AsyncTexSubImage2D(
454     const AsyncTexSubImage2DParams& tex_params,
455     const AsyncMemoryParams& mem_params) {
456   TRACE_EVENT2("gpu", "AsyncTexSubImage2D",
457                "width", tex_params.width,
458                "height", tex_params.height);
459   DCHECK(!state_->TransferIsInProgress());
460   DCHECK_EQ(static_cast<GLenum>(GL_TEXTURE_2D), tex_params.target);
461   DCHECK_EQ(tex_params.level, 0);
462
463   state_->ScheduleAsyncTexSubImage2D(
464       tex_params, mem_params, shared_state_->texture_upload_stats);
465 }
466
467 AsyncPixelTransferManagerShareGroup::SharedState::SharedState()
468     // TODO(reveman): Skip this if --enable-gpu-benchmarking is not present.
469     : texture_upload_stats(new AsyncPixelTransferUploadStats) {}
470
471 AsyncPixelTransferManagerShareGroup::SharedState::~SharedState() {}
472
473 AsyncPixelTransferManagerShareGroup::AsyncPixelTransferManagerShareGroup(
474     gfx::GLContext* context) {
475   g_transfer_thread.Pointer()->InitializeOnMainThread(context);
476 }
477
478 AsyncPixelTransferManagerShareGroup::~AsyncPixelTransferManagerShareGroup() {}
479
480 void AsyncPixelTransferManagerShareGroup::BindCompletedAsyncTransfers() {
481   scoped_ptr<gfx::ScopedTextureBinder> texture_binder;
482
483   while (!shared_state_.pending_allocations.empty()) {
484     if (!shared_state_.pending_allocations.front().get()) {
485       shared_state_.pending_allocations.pop_front();
486       continue;
487     }
488     AsyncPixelTransferDelegateShareGroup* delegate =
489         shared_state_.pending_allocations.front().get();
490     // Terminate early, as all transfers finish in order, currently.
491     if (delegate->TransferIsInProgress())
492       break;
493
494     if (!texture_binder)
495       texture_binder.reset(new gfx::ScopedTextureBinder(GL_TEXTURE_2D, 0));
496
497     // Used to set tex info from the gles2 cmd decoder once upload has
498     // finished (it'll bind the texture and call a callback).
499     delegate->BindTransfer();
500
501     shared_state_.pending_allocations.pop_front();
502   }
503 }
504
505 void AsyncPixelTransferManagerShareGroup::AsyncNotifyCompletion(
506     const AsyncMemoryParams& mem_params,
507     AsyncPixelTransferCompletionObserver* observer) {
508   // Post a PerformNotifyCompletion task to the upload thread. This task
509   // will run after all async transfers are complete.
510   transfer_message_loop_proxy()->PostTask(
511       FROM_HERE,
512       base::Bind(&PerformNotifyCompletion,
513                  mem_params,
514                  make_scoped_refptr(observer)));
515 }
516
517 uint32 AsyncPixelTransferManagerShareGroup::GetTextureUploadCount() {
518   return shared_state_.texture_upload_stats->GetStats(NULL);
519 }
520
521 base::TimeDelta
522 AsyncPixelTransferManagerShareGroup::GetTotalTextureUploadTime() {
523   base::TimeDelta total_texture_upload_time;
524   shared_state_.texture_upload_stats->GetStats(&total_texture_upload_time);
525   return total_texture_upload_time;
526 }
527
528 void AsyncPixelTransferManagerShareGroup::ProcessMorePendingTransfers() {
529 }
530
531 bool AsyncPixelTransferManagerShareGroup::NeedsProcessMorePendingTransfers() {
532   return false;
533 }
534
535 void AsyncPixelTransferManagerShareGroup::WaitAllAsyncTexImage2D() {
536   if (shared_state_.pending_allocations.empty())
537     return;
538
539   AsyncPixelTransferDelegateShareGroup* delegate =
540       shared_state_.pending_allocations.back().get();
541   if (delegate)
542     delegate->WaitForTransferCompletion();
543 }
544
545 AsyncPixelTransferDelegate*
546 AsyncPixelTransferManagerShareGroup::CreatePixelTransferDelegateImpl(
547     gles2::TextureRef* ref,
548     const AsyncTexImage2DParams& define_params) {
549   return new AsyncPixelTransferDelegateShareGroup(
550       &shared_state_, ref->service_id(), define_params);
551 }
552
553 }  // namespace gpu