1 // Copyright 2013 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "gpu/command_buffer/service/async_pixel_transfer_manager_share_group.h"
10 #include "base/debug/trace_event.h"
11 #include "base/lazy_instance.h"
12 #include "base/logging.h"
13 #include "base/memory/ref_counted.h"
14 #include "base/memory/weak_ptr.h"
15 #include "base/synchronization/cancellation_flag.h"
16 #include "base/synchronization/lock.h"
17 #include "base/synchronization/waitable_event.h"
18 #include "base/threading/thread.h"
19 #include "base/threading/thread_checker.h"
20 #include "gpu/command_buffer/service/async_pixel_transfer_delegate.h"
21 #include "gpu/command_buffer/service/safe_shared_memory_pool.h"
22 #include "ui/gl/gl_bindings.h"
23 #include "ui/gl/gl_context.h"
24 #include "ui/gl/gl_surface.h"
25 #include "ui/gl/gpu_preference.h"
26 #include "ui/gl/scoped_binders.h"
32 const char kAsyncTransferThreadName[] = "AsyncTransferThread";
34 void PerformNotifyCompletion(
35 AsyncMemoryParams mem_params,
36 ScopedSafeSharedMemory* safe_shared_memory,
37 scoped_refptr<AsyncPixelTransferCompletionObserver> observer) {
38 TRACE_EVENT0("gpu", "PerformNotifyCompletion");
39 AsyncMemoryParams safe_mem_params = mem_params;
40 safe_mem_params.shared_memory = safe_shared_memory->shared_memory();
41 observer->DidComplete(safe_mem_params);
44 // TODO(backer): Factor out common thread scheduling logic from the EGL and
45 // ShareGroup implementations. http://crbug.com/239889
46 class TransferThread : public base::Thread {
49 : base::Thread(kAsyncTransferThreadName),
52 #if defined(OS_ANDROID) || defined(OS_LINUX)
53 SetPriority(base::kThreadPriority_Background);
57 virtual ~TransferThread() {
58 // The only instance of this class was declared leaky.
62 void InitializeOnMainThread(gfx::GLContext* parent_context) {
63 TRACE_EVENT0("gpu", "TransferThread::InitializeOnMainThread");
67 base::WaitableEvent wait_for_init(true, false);
68 message_loop_proxy()->PostTask(
70 base::Bind(&TransferThread::InitializeOnTransferThread,
71 base::Unretained(this),
72 base::Unretained(parent_context),
77 virtual void CleanUp() OVERRIDE {
82 SafeSharedMemoryPool* safe_shared_memory_pool() {
83 return &safe_shared_memory_pool_;
89 scoped_refptr<gfx::GLSurface> surface_;
90 scoped_refptr<gfx::GLContext> context_;
91 SafeSharedMemoryPool safe_shared_memory_pool_;
93 void InitializeOnTransferThread(gfx::GLContext* parent_context,
94 base::WaitableEvent* caller_wait) {
95 TRACE_EVENT0("gpu", "InitializeOnTransferThread");
97 if (!parent_context) {
98 LOG(ERROR) << "No parent context provided.";
99 caller_wait->Signal();
103 surface_ = gfx::GLSurface::CreateOffscreenGLSurface(gfx::Size(1, 1));
104 if (!surface_.get()) {
105 LOG(ERROR) << "Unable to create GLSurface";
106 caller_wait->Signal();
110 // TODO(backer): This is coded for integrated GPUs. For discrete GPUs
111 // we would probably want to use a PBO texture upload for a true async
112 // upload (that would hopefully be optimized as a DMA transfer by the
114 context_ = gfx::GLContext::CreateGLContext(parent_context->share_group(),
116 gfx::PreferIntegratedGpu);
117 if (!context_.get()) {
118 LOG(ERROR) << "Unable to create GLContext.";
119 caller_wait->Signal();
123 context_->MakeCurrent(surface_.get());
125 caller_wait->Signal();
128 DISALLOW_COPY_AND_ASSIGN(TransferThread);
131 base::LazyInstance<TransferThread>::Leaky
132 g_transfer_thread = LAZY_INSTANCE_INITIALIZER;
134 base::MessageLoopProxy* transfer_message_loop_proxy() {
135 return g_transfer_thread.Pointer()->message_loop_proxy().get();
138 SafeSharedMemoryPool* safe_shared_memory_pool() {
139 return g_transfer_thread.Pointer()->safe_shared_memory_pool();
142 class PendingTask : public base::RefCountedThreadSafe<PendingTask> {
144 explicit PendingTask(const base::Closure& task)
145 : task_(task), task_pending_(true, false) {}
148 // This is meant to be called on the main thread where the texture
150 DCHECK(checker_.CalledOnValidThread());
151 if (task_lock_.Try()) {
153 if (!task_.is_null())
157 task_lock_.Release();
158 task_pending_.Signal();
164 void BindAndRun(GLuint texture_id) {
165 // This is meant to be called on the upload thread where we don't have to
166 // restore the previous texture binding.
167 DCHECK(!checker_.CalledOnValidThread());
168 base::AutoLock locked(task_lock_);
169 if (!task_.is_null()) {
170 glBindTexture(GL_TEXTURE_2D, texture_id);
173 glBindTexture(GL_TEXTURE_2D, 0);
174 // Flush for synchronization between threads.
176 task_pending_.Signal();
181 base::AutoLock locked(task_lock_);
183 task_pending_.Signal();
186 bool TaskIsInProgress() {
187 return !task_pending_.IsSignaled();
191 task_pending_.Wait();
195 friend class base::RefCountedThreadSafe<PendingTask>;
197 virtual ~PendingTask() {}
199 base::ThreadChecker checker_;
201 base::Lock task_lock_;
203 base::WaitableEvent task_pending_;
205 DISALLOW_COPY_AND_ASSIGN(PendingTask);
208 // Class which holds async pixel transfers state.
209 // The texture_id is accessed by either thread, but everything
210 // else accessed only on the main thread.
211 class TransferStateInternal
212 : public base::RefCountedThreadSafe<TransferStateInternal> {
214 TransferStateInternal(GLuint texture_id,
215 const AsyncTexImage2DParams& define_params)
216 : texture_id_(texture_id), define_params_(define_params) {}
218 bool TransferIsInProgress() {
219 return pending_upload_task_.get() &&
220 pending_upload_task_->TaskIsInProgress();
223 void BindTransfer() {
224 TRACE_EVENT2("gpu", "BindAsyncTransfer",
225 "width", define_params_.width,
226 "height", define_params_.height);
229 glBindTexture(GL_TEXTURE_2D, texture_id_);
230 bind_callback_.Run();
233 void WaitForTransferCompletion() {
234 TRACE_EVENT0("gpu", "WaitForTransferCompletion");
235 DCHECK(pending_upload_task_.get());
236 if (!pending_upload_task_->TryRun()) {
237 pending_upload_task_->WaitForTask();
239 pending_upload_task_ = NULL;
242 void CancelUpload() {
243 TRACE_EVENT0("gpu", "CancelUpload");
244 if (pending_upload_task_.get())
245 pending_upload_task_->Cancel();
246 pending_upload_task_ = NULL;
249 void ScheduleAsyncTexImage2D(
250 const AsyncTexImage2DParams tex_params,
251 const AsyncMemoryParams mem_params,
252 scoped_refptr<AsyncPixelTransferUploadStats> texture_upload_stats,
253 const base::Closure& bind_callback) {
254 pending_upload_task_ = new PendingTask(base::Bind(
255 &TransferStateInternal::PerformAsyncTexImage2D,
259 // Duplicate the shared memory so there is no way we can get
260 // a use-after-free of the raw pixels.
261 base::Owned(new ScopedSafeSharedMemory(safe_shared_memory_pool(),
262 mem_params.shared_memory,
263 mem_params.shm_size)),
264 texture_upload_stats));
265 transfer_message_loop_proxy()->PostTask(
268 &PendingTask::BindAndRun, pending_upload_task_, texture_id_));
270 // Save the late bind callback, so we can notify the client when it is
272 bind_callback_ = bind_callback;
275 void ScheduleAsyncTexSubImage2D(
276 AsyncTexSubImage2DParams tex_params,
277 AsyncMemoryParams mem_params,
278 scoped_refptr<AsyncPixelTransferUploadStats> texture_upload_stats) {
279 pending_upload_task_ = new PendingTask(base::Bind(
280 &TransferStateInternal::PerformAsyncTexSubImage2D,
284 base::Owned(new ScopedSafeSharedMemory(safe_shared_memory_pool(),
285 mem_params.shared_memory,
286 mem_params.shm_size)),
287 texture_upload_stats));
288 transfer_message_loop_proxy()->PostTask(
291 &PendingTask::BindAndRun, pending_upload_task_, texture_id_));
295 friend class base::RefCountedThreadSafe<TransferStateInternal>;
297 virtual ~TransferStateInternal() {
300 void PerformAsyncTexImage2D(
301 AsyncTexImage2DParams tex_params,
302 AsyncMemoryParams mem_params,
303 ScopedSafeSharedMemory* safe_shared_memory,
304 scoped_refptr<AsyncPixelTransferUploadStats> texture_upload_stats) {
306 "PerformAsyncTexImage",
311 DCHECK_EQ(0, tex_params.level);
313 base::TimeTicks begin_time;
314 if (texture_upload_stats.get())
315 begin_time = base::TimeTicks::HighResNow();
318 AsyncPixelTransferDelegate::GetAddress(safe_shared_memory, mem_params);
321 TRACE_EVENT0("gpu", "glTexImage2D");
322 glTexImage2D(GL_TEXTURE_2D,
324 tex_params.internal_format,
333 if (texture_upload_stats.get()) {
334 texture_upload_stats->AddUpload(base::TimeTicks::HighResNow() -
339 void PerformAsyncTexSubImage2D(
340 AsyncTexSubImage2DParams tex_params,
341 AsyncMemoryParams mem_params,
342 ScopedSafeSharedMemory* safe_shared_memory,
343 scoped_refptr<AsyncPixelTransferUploadStats> texture_upload_stats) {
345 "PerformAsyncTexSubImage2D",
350 DCHECK_EQ(0, tex_params.level);
352 base::TimeTicks begin_time;
353 if (texture_upload_stats.get())
354 begin_time = base::TimeTicks::HighResNow();
357 AsyncPixelTransferDelegate::GetAddress(safe_shared_memory, mem_params);
360 TRACE_EVENT0("gpu", "glTexSubImage2D");
361 glTexSubImage2D(GL_TEXTURE_2D,
372 if (texture_upload_stats.get()) {
373 texture_upload_stats->AddUpload(base::TimeTicks::HighResNow() -
378 scoped_refptr<PendingTask> pending_upload_task_;
382 // Definition params for texture that needs binding.
383 AsyncTexImage2DParams define_params_;
385 // Callback to invoke when AsyncTexImage2D is complete
386 // and the client can safely use the texture. This occurs
387 // during BindCompletedAsyncTransfers().
388 base::Closure bind_callback_;
393 class AsyncPixelTransferDelegateShareGroup
394 : public AsyncPixelTransferDelegate,
395 public base::SupportsWeakPtr<AsyncPixelTransferDelegateShareGroup> {
397 AsyncPixelTransferDelegateShareGroup(
398 AsyncPixelTransferManagerShareGroup::SharedState* shared_state,
400 const AsyncTexImage2DParams& define_params);
401 virtual ~AsyncPixelTransferDelegateShareGroup();
403 void BindTransfer() { state_->BindTransfer(); }
405 // Implement AsyncPixelTransferDelegate:
406 virtual void AsyncTexImage2D(
407 const AsyncTexImage2DParams& tex_params,
408 const AsyncMemoryParams& mem_params,
409 const base::Closure& bind_callback) OVERRIDE;
410 virtual void AsyncTexSubImage2D(
411 const AsyncTexSubImage2DParams& tex_params,
412 const AsyncMemoryParams& mem_params) OVERRIDE;
413 virtual bool TransferIsInProgress() OVERRIDE;
414 virtual void WaitForTransferCompletion() OVERRIDE;
417 // A raw pointer is safe because the SharedState is owned by the Manager,
418 // which owns this Delegate.
419 AsyncPixelTransferManagerShareGroup::SharedState* shared_state_;
420 scoped_refptr<TransferStateInternal> state_;
422 DISALLOW_COPY_AND_ASSIGN(AsyncPixelTransferDelegateShareGroup);
425 AsyncPixelTransferDelegateShareGroup::AsyncPixelTransferDelegateShareGroup(
426 AsyncPixelTransferManagerShareGroup::SharedState* shared_state,
428 const AsyncTexImage2DParams& define_params)
429 : shared_state_(shared_state),
430 state_(new TransferStateInternal(texture_id, define_params)) {}
432 AsyncPixelTransferDelegateShareGroup::~AsyncPixelTransferDelegateShareGroup() {
433 TRACE_EVENT0("gpu", " ~AsyncPixelTransferDelegateShareGroup");
434 state_->CancelUpload();
437 bool AsyncPixelTransferDelegateShareGroup::TransferIsInProgress() {
438 return state_->TransferIsInProgress();
441 void AsyncPixelTransferDelegateShareGroup::WaitForTransferCompletion() {
442 if (state_->TransferIsInProgress()) {
443 state_->WaitForTransferCompletion();
444 DCHECK(!state_->TransferIsInProgress());
447 // Fast track the BindTransfer, if applicable.
448 for (AsyncPixelTransferManagerShareGroup::SharedState::TransferQueue::iterator
449 iter = shared_state_->pending_allocations.begin();
450 iter != shared_state_->pending_allocations.end();
452 if (iter->get() != this)
455 shared_state_->pending_allocations.erase(iter);
461 void AsyncPixelTransferDelegateShareGroup::AsyncTexImage2D(
462 const AsyncTexImage2DParams& tex_params,
463 const AsyncMemoryParams& mem_params,
464 const base::Closure& bind_callback) {
465 DCHECK(mem_params.shared_memory);
466 DCHECK_LE(mem_params.shm_data_offset + mem_params.shm_data_size,
467 mem_params.shm_size);
468 DCHECK(!state_->TransferIsInProgress());
469 DCHECK_EQ(static_cast<GLenum>(GL_TEXTURE_2D), tex_params.target);
470 DCHECK_EQ(tex_params.level, 0);
472 shared_state_->pending_allocations.push_back(AsWeakPtr());
473 state_->ScheduleAsyncTexImage2D(tex_params,
475 shared_state_->texture_upload_stats,
479 void AsyncPixelTransferDelegateShareGroup::AsyncTexSubImage2D(
480 const AsyncTexSubImage2DParams& tex_params,
481 const AsyncMemoryParams& mem_params) {
482 TRACE_EVENT2("gpu", "AsyncTexSubImage2D",
483 "width", tex_params.width,
484 "height", tex_params.height);
485 DCHECK(!state_->TransferIsInProgress());
486 DCHECK(mem_params.shared_memory);
487 DCHECK_LE(mem_params.shm_data_offset + mem_params.shm_data_size,
488 mem_params.shm_size);
489 DCHECK_EQ(static_cast<GLenum>(GL_TEXTURE_2D), tex_params.target);
490 DCHECK_EQ(tex_params.level, 0);
492 state_->ScheduleAsyncTexSubImage2D(
493 tex_params, mem_params, shared_state_->texture_upload_stats);
496 AsyncPixelTransferManagerShareGroup::SharedState::SharedState()
497 // TODO(reveman): Skip this if --enable-gpu-benchmarking is not present.
498 : texture_upload_stats(new AsyncPixelTransferUploadStats) {}
500 AsyncPixelTransferManagerShareGroup::SharedState::~SharedState() {}
502 AsyncPixelTransferManagerShareGroup::AsyncPixelTransferManagerShareGroup(
503 gfx::GLContext* context) {
504 g_transfer_thread.Pointer()->InitializeOnMainThread(context);
507 AsyncPixelTransferManagerShareGroup::~AsyncPixelTransferManagerShareGroup() {}
509 void AsyncPixelTransferManagerShareGroup::BindCompletedAsyncTransfers() {
510 scoped_ptr<gfx::ScopedTextureBinder> texture_binder;
512 while (!shared_state_.pending_allocations.empty()) {
513 if (!shared_state_.pending_allocations.front().get()) {
514 shared_state_.pending_allocations.pop_front();
517 AsyncPixelTransferDelegateShareGroup* delegate =
518 shared_state_.pending_allocations.front().get();
519 // Terminate early, as all transfers finish in order, currently.
520 if (delegate->TransferIsInProgress())
524 texture_binder.reset(new gfx::ScopedTextureBinder(GL_TEXTURE_2D, 0));
526 // Used to set tex info from the gles2 cmd decoder once upload has
527 // finished (it'll bind the texture and call a callback).
528 delegate->BindTransfer();
530 shared_state_.pending_allocations.pop_front();
534 void AsyncPixelTransferManagerShareGroup::AsyncNotifyCompletion(
535 const AsyncMemoryParams& mem_params,
536 AsyncPixelTransferCompletionObserver* observer) {
537 DCHECK(mem_params.shared_memory);
538 DCHECK_LE(mem_params.shm_data_offset + mem_params.shm_data_size,
539 mem_params.shm_size);
540 // Post a PerformNotifyCompletion task to the upload thread. This task
541 // will run after all async transfers are complete.
542 transfer_message_loop_proxy()->PostTask(
544 base::Bind(&PerformNotifyCompletion,
547 new ScopedSafeSharedMemory(safe_shared_memory_pool(),
548 mem_params.shared_memory,
549 mem_params.shm_size)),
550 make_scoped_refptr(observer)));
553 uint32 AsyncPixelTransferManagerShareGroup::GetTextureUploadCount() {
554 return shared_state_.texture_upload_stats->GetStats(NULL);
558 AsyncPixelTransferManagerShareGroup::GetTotalTextureUploadTime() {
559 base::TimeDelta total_texture_upload_time;
560 shared_state_.texture_upload_stats->GetStats(&total_texture_upload_time);
561 return total_texture_upload_time;
564 void AsyncPixelTransferManagerShareGroup::ProcessMorePendingTransfers() {
567 bool AsyncPixelTransferManagerShareGroup::NeedsProcessMorePendingTransfers() {
571 AsyncPixelTransferDelegate*
572 AsyncPixelTransferManagerShareGroup::CreatePixelTransferDelegateImpl(
573 gles2::TextureRef* ref,
574 const AsyncTexImage2DParams& define_params) {
575 return new AsyncPixelTransferDelegateShareGroup(
576 &shared_state_, ref->service_id(), define_params);