1 // Copyright 2013 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "gpu/command_buffer/service/in_process_command_buffer.h"
10 #include <GLES2/gl2.h>
11 #ifndef GL_GLEXT_PROTOTYPES
12 #define GL_GLEXT_PROTOTYPES 1
14 #include <GLES2/gl2ext.h>
15 #include <GLES2/gl2extchromium.h>
17 #include "base/bind.h"
18 #include "base/bind_helpers.h"
19 #include "base/lazy_instance.h"
20 #include "base/logging.h"
21 #include "base/memory/weak_ptr.h"
22 #include "base/message_loop/message_loop_proxy.h"
23 #include "base/sequence_checker.h"
24 #include "base/threading/thread.h"
25 #include "gpu/command_buffer/service/command_buffer_service.h"
26 #include "gpu/command_buffer/service/context_group.h"
27 #include "gpu/command_buffer/service/gl_context_virtual.h"
28 #include "gpu/command_buffer/service/gpu_control_service.h"
29 #include "gpu/command_buffer/service/gpu_scheduler.h"
30 #include "gpu/command_buffer/service/image_manager.h"
31 #include "gpu/command_buffer/service/transfer_buffer_manager.h"
32 #include "ui/gfx/size.h"
33 #include "ui/gl/gl_context.h"
34 #include "ui/gl/gl_image.h"
35 #include "ui/gl/gl_share_group.h"
37 #if defined(OS_ANDROID)
38 #include "gpu/command_buffer/service/stream_texture_manager_in_process_android.h"
39 #include "ui/gl/android/surface_texture.h"
46 static base::LazyInstance<std::set<InProcessCommandBuffer*> >
47 g_all_shared_contexts = LAZY_INSTANCE_INITIALIZER;
49 static bool g_use_virtualized_gl_context = false;
50 static bool g_uses_explicit_scheduling = false;
51 static GpuMemoryBufferFactory* g_gpu_memory_buffer_factory = NULL;
54 static void RunTaskWithResult(base::Callback<T(void)> task,
56 base::WaitableEvent* completion) {
61 class GpuInProcessThread
62 : public base::Thread,
63 public base::RefCountedThreadSafe<GpuInProcessThread> {
68 friend class base::RefCountedThreadSafe<GpuInProcessThread>;
69 virtual ~GpuInProcessThread();
71 DISALLOW_COPY_AND_ASSIGN(GpuInProcessThread);
74 GpuInProcessThread::GpuInProcessThread() : base::Thread("GpuThread") {
78 GpuInProcessThread::~GpuInProcessThread() {
82 // Used with explicit scheduling when there is no dedicated GPU thread.
83 class GpuCommandQueue {
88 void QueueTask(const base::Closure& task);
90 void SetScheduleCallback(const base::Closure& callback);
93 base::Lock tasks_lock_;
94 std::queue<base::Closure> tasks_;
95 base::Closure schedule_callback_;
97 DISALLOW_COPY_AND_ASSIGN(GpuCommandQueue);
100 GpuCommandQueue::GpuCommandQueue() {}
102 GpuCommandQueue::~GpuCommandQueue() {
103 base::AutoLock lock(tasks_lock_);
104 DCHECK(tasks_.empty());
107 void GpuCommandQueue::QueueTask(const base::Closure& task) {
109 base::AutoLock lock(tasks_lock_);
113 DCHECK(!schedule_callback_.is_null());
114 schedule_callback_.Run();
117 void GpuCommandQueue::RunTasks() {
120 base::AutoLock lock(tasks_lock_);
121 num_tasks = tasks_.size();
127 base::AutoLock lock(tasks_lock_);
128 task = tasks_.front();
130 num_tasks = tasks_.size();
137 void GpuCommandQueue::SetScheduleCallback(const base::Closure& callback) {
138 DCHECK(schedule_callback_.is_null());
139 schedule_callback_ = callback;
142 static base::LazyInstance<GpuCommandQueue> g_gpu_queue =
143 LAZY_INSTANCE_INITIALIZER;
145 class SchedulerClientBase : public InProcessCommandBuffer::SchedulerClient {
147 explicit SchedulerClientBase(bool need_thread);
148 virtual ~SchedulerClientBase();
150 static bool HasClients();
153 scoped_refptr<GpuInProcessThread> thread_;
156 static base::LazyInstance<std::set<SchedulerClientBase*> > all_clients_;
157 static base::LazyInstance<base::Lock> all_clients_lock_;
160 base::LazyInstance<std::set<SchedulerClientBase*> >
161 SchedulerClientBase::all_clients_ = LAZY_INSTANCE_INITIALIZER;
162 base::LazyInstance<base::Lock> SchedulerClientBase::all_clients_lock_ =
163 LAZY_INSTANCE_INITIALIZER;
165 SchedulerClientBase::SchedulerClientBase(bool need_thread) {
166 base::AutoLock(all_clients_lock_.Get());
168 if (!all_clients_.Get().empty()) {
169 SchedulerClientBase* other = *all_clients_.Get().begin();
170 thread_ = other->thread_;
171 DCHECK(thread_.get());
173 thread_ = new GpuInProcessThread;
176 all_clients_.Get().insert(this);
179 SchedulerClientBase::~SchedulerClientBase() {
180 base::AutoLock(all_clients_lock_.Get());
181 all_clients_.Get().erase(this);
184 bool SchedulerClientBase::HasClients() {
185 base::AutoLock(all_clients_lock_.Get());
186 return !all_clients_.Get().empty();
189 // A client that talks to the GPU thread
190 class ThreadClient : public SchedulerClientBase {
193 virtual void QueueTask(const base::Closure& task) OVERRIDE;
194 virtual void ScheduleIdleWork(const base::Closure& callback) OVERRIDE;
197 ThreadClient::ThreadClient() : SchedulerClientBase(true) {
198 DCHECK(thread_.get());
201 void ThreadClient::QueueTask(const base::Closure& task) {
202 thread_->message_loop()->PostTask(FROM_HERE, task);
205 void ThreadClient::ScheduleIdleWork(const base::Closure& callback) {
206 thread_->message_loop()->PostDelayedTask(
207 FROM_HERE, callback, base::TimeDelta::FromMilliseconds(5));
210 // A client that talks to the GpuCommandQueue
211 class QueueClient : public SchedulerClientBase {
214 virtual void QueueTask(const base::Closure& task) OVERRIDE;
215 virtual void ScheduleIdleWork(const base::Closure& callback) OVERRIDE;
218 QueueClient::QueueClient() : SchedulerClientBase(false) {
219 DCHECK(!thread_.get());
222 void QueueClient::QueueTask(const base::Closure& task) {
223 g_gpu_queue.Get().QueueTask(task);
226 void QueueClient::ScheduleIdleWork(const base::Closure& callback) {
227 // TODO(sievers): Should this do anything?
230 static scoped_ptr<InProcessCommandBuffer::SchedulerClient>
231 CreateSchedulerClient() {
232 scoped_ptr<InProcessCommandBuffer::SchedulerClient> client;
233 if (g_uses_explicit_scheduling)
234 client.reset(new QueueClient);
236 client.reset(new ThreadClient);
238 return client.Pass();
243 ScopedEvent(base::WaitableEvent* event) : event_(event) {}
244 ~ScopedEvent() { event_->Signal(); }
247 base::WaitableEvent* event_;
250 } // anonyous namespace
252 InProcessCommandBuffer::InProcessCommandBuffer()
253 : context_lost_(false),
255 last_put_offset_(-1),
256 supports_gpu_memory_buffer_(false),
257 flush_event_(false, false),
258 queue_(CreateSchedulerClient()),
259 gpu_thread_weak_ptr_factory_(this) {}
261 InProcessCommandBuffer::~InProcessCommandBuffer() {
265 bool InProcessCommandBuffer::IsContextLost() {
266 CheckSequencedThread();
267 if (context_lost_ || !command_buffer_) {
270 CommandBuffer::State state = GetState();
271 return error::IsError(state.error);
274 void InProcessCommandBuffer::OnResizeView(gfx::Size size, float scale_factor) {
275 CheckSequencedThread();
276 DCHECK(!surface_->IsOffscreen());
277 surface_->Resize(size);
280 bool InProcessCommandBuffer::MakeCurrent() {
281 CheckSequencedThread();
282 command_buffer_lock_.AssertAcquired();
284 if (!context_lost_ && decoder_->MakeCurrent())
286 DLOG(ERROR) << "Context lost because MakeCurrent failed.";
287 command_buffer_->SetContextLostReason(decoder_->GetContextLostReason());
288 command_buffer_->SetParseError(gpu::error::kLostContext);
292 void InProcessCommandBuffer::PumpCommands() {
293 CheckSequencedThread();
294 command_buffer_lock_.AssertAcquired();
299 gpu_scheduler_->PutChanged();
302 bool InProcessCommandBuffer::GetBufferChanged(int32 transfer_buffer_id) {
303 CheckSequencedThread();
304 command_buffer_lock_.AssertAcquired();
305 command_buffer_->SetGetBuffer(transfer_buffer_id);
309 bool InProcessCommandBuffer::Initialize(
310 scoped_refptr<gfx::GLSurface> surface,
312 bool share_resources,
313 gfx::AcceleratedWidget window,
314 const gfx::Size& size,
315 const std::vector<int32>& attribs,
316 gfx::GpuPreference gpu_preference,
317 const base::Closure& context_lost_callback,
318 unsigned int share_group_id) {
320 share_resources_ = share_resources;
321 context_lost_callback_ = WrapCallback(context_lost_callback);
322 share_group_id_ = share_group_id;
325 // GPU thread must be the same as client thread due to GLSurface not being
327 sequence_checker_.reset(new base::SequenceChecker);
331 base::Callback<bool(void)> init_task =
332 base::Bind(&InProcessCommandBuffer::InitializeOnGpuThread,
333 base::Unretained(this),
340 base::WaitableEvent completion(true, false);
343 base::Bind(&RunTaskWithResult<bool>, init_task, &result, &completion));
348 bool InProcessCommandBuffer::InitializeOnGpuThread(
350 gfx::AcceleratedWidget window,
351 const gfx::Size& size,
352 const std::vector<int32>& attribs,
353 gfx::GpuPreference gpu_preference) {
354 CheckSequencedThread();
355 gpu_thread_weak_ptr_ = gpu_thread_weak_ptr_factory_.GetWeakPtr();
356 // Use one share group for all contexts.
357 CR_DEFINE_STATIC_LOCAL(scoped_refptr<gfx::GLShareGroup>, share_group,
358 (new gfx::GLShareGroup));
360 DCHECK(size.width() >= 0 && size.height() >= 0);
362 TransferBufferManager* manager = new TransferBufferManager();
363 transfer_buffer_manager_.reset(manager);
364 manager->Initialize();
366 scoped_ptr<CommandBufferService> command_buffer(
367 new CommandBufferService(transfer_buffer_manager_.get()));
368 command_buffer->SetPutOffsetChangeCallback(base::Bind(
369 &InProcessCommandBuffer::PumpCommands, gpu_thread_weak_ptr_));
370 command_buffer->SetParseErrorCallback(base::Bind(
371 &InProcessCommandBuffer::OnContextLost, gpu_thread_weak_ptr_));
373 if (!command_buffer->Initialize()) {
374 LOG(ERROR) << "Could not initialize command buffer.";
375 DestroyOnGpuThread();
379 InProcessCommandBuffer* context_group = NULL;
381 if (share_resources_ && !g_all_shared_contexts.Get().empty()) {
382 DCHECK(share_group_id_);
383 for (std::set<InProcessCommandBuffer*>::iterator it =
384 g_all_shared_contexts.Get().begin();
385 it != g_all_shared_contexts.Get().end();
387 if ((*it)->share_group_id_ == share_group_id_) {
389 DCHECK(context_group->share_resources_);
390 context_lost_ = context_group->IsContextLost();
395 share_group = new gfx::GLShareGroup;
398 StreamTextureManager* stream_texture_manager = NULL;
399 #if defined(OS_ANDROID)
400 stream_texture_manager = stream_texture_manager_ =
401 context_group ? context_group->stream_texture_manager_.get()
402 : new StreamTextureManagerInProcess;
405 bool bind_generates_resource = false;
406 decoder_.reset(gles2::GLES2Decoder::Create(
407 context_group ? context_group->decoder_->GetContextGroup()
408 : new gles2::ContextGroup(NULL,
411 stream_texture_manager,
412 bind_generates_resource)));
414 gpu_scheduler_.reset(
415 new GpuScheduler(command_buffer.get(), decoder_.get(), decoder_.get()));
416 command_buffer->SetGetBufferChangeCallback(base::Bind(
417 &GpuScheduler::SetGetBuffer, base::Unretained(gpu_scheduler_.get())));
418 command_buffer_ = command_buffer.Pass();
420 decoder_->set_engine(gpu_scheduler_.get());
424 surface_ = gfx::GLSurface::CreateOffscreenGLSurface(size);
426 surface_ = gfx::GLSurface::CreateViewGLSurface(window);
429 if (!surface_.get()) {
430 LOG(ERROR) << "Could not create GLSurface.";
431 DestroyOnGpuThread();
435 if (g_use_virtualized_gl_context) {
436 context_ = share_group->GetSharedContext();
437 if (!context_.get()) {
438 context_ = gfx::GLContext::CreateGLContext(
439 share_group.get(), surface_.get(), gpu_preference);
440 share_group->SetSharedContext(context_.get());
443 context_ = new GLContextVirtual(
444 share_group.get(), context_.get(), decoder_->AsWeakPtr());
445 if (context_->Initialize(surface_.get(), gpu_preference)) {
446 VLOG(1) << "Created virtual GL context.";
451 context_ = gfx::GLContext::CreateGLContext(
452 share_group.get(), surface_.get(), gpu_preference);
455 if (!context_.get()) {
456 LOG(ERROR) << "Could not create GLContext.";
457 DestroyOnGpuThread();
461 if (!context_->MakeCurrent(surface_.get())) {
462 LOG(ERROR) << "Could not make context current.";
463 DestroyOnGpuThread();
467 gles2::DisallowedFeatures disallowed_features;
468 disallowed_features.swap_buffer_complete_callback = true;
469 disallowed_features.gpu_memory_manager = true;
470 if (!decoder_->Initialize(surface_,
476 LOG(ERROR) << "Could not initialize decoder.";
477 DestroyOnGpuThread();
482 new GpuControlService(decoder_->GetContextGroup()->image_manager(),
483 g_gpu_memory_buffer_factory,
484 decoder_->GetContextGroup()->mailbox_manager(),
485 decoder_->GetQueryManager()));
486 supports_gpu_memory_buffer_ = gpu_control_->SupportsGpuMemoryBuffer();
490 decoder_->SetResizeCallback(base::Bind(
491 &InProcessCommandBuffer::OnResizeView, gpu_thread_weak_ptr_));
494 if (share_resources_) {
495 g_all_shared_contexts.Pointer()->insert(this);
501 void InProcessCommandBuffer::Destroy() {
502 CheckSequencedThread();
504 base::WaitableEvent completion(true, false);
506 base::Callback<bool(void)> destroy_task = base::Bind(
507 &InProcessCommandBuffer::DestroyOnGpuThread, base::Unretained(this));
509 base::Bind(&RunTaskWithResult<bool>, destroy_task, &result, &completion));
513 bool InProcessCommandBuffer::DestroyOnGpuThread() {
514 CheckSequencedThread();
515 gpu_thread_weak_ptr_factory_.InvalidateWeakPtrs();
516 command_buffer_.reset();
517 // Clean up GL resources if possible.
518 bool have_context = context_ && context_->MakeCurrent(surface_);
520 decoder_->Destroy(have_context);
526 g_all_shared_contexts.Pointer()->erase(this);
530 void InProcessCommandBuffer::CheckSequencedThread() {
531 DCHECK(!sequence_checker_ ||
532 sequence_checker_->CalledOnValidSequencedThread());
535 void InProcessCommandBuffer::OnContextLost() {
536 CheckSequencedThread();
537 if (!context_lost_callback_.is_null()) {
538 context_lost_callback_.Run();
539 context_lost_callback_.Reset();
542 context_lost_ = true;
543 if (share_resources_) {
544 for (std::set<InProcessCommandBuffer*>::iterator it =
545 g_all_shared_contexts.Get().begin();
546 it != g_all_shared_contexts.Get().end();
548 (*it)->context_lost_ = true;
553 CommandBuffer::State InProcessCommandBuffer::GetStateFast() {
554 CheckSequencedThread();
555 base::AutoLock lock(state_after_last_flush_lock_);
556 if (state_after_last_flush_.generation - last_state_.generation < 0x80000000U)
557 last_state_ = state_after_last_flush_;
561 CommandBuffer::State InProcessCommandBuffer::GetState() {
562 CheckSequencedThread();
563 return GetStateFast();
566 CommandBuffer::State InProcessCommandBuffer::GetLastState() {
567 CheckSequencedThread();
571 int32 InProcessCommandBuffer::GetLastToken() {
572 CheckSequencedThread();
574 return last_state_.token;
577 void InProcessCommandBuffer::FlushOnGpuThread(int32 put_offset) {
578 CheckSequencedThread();
579 ScopedEvent handle_flush(&flush_event_);
580 base::AutoLock lock(command_buffer_lock_);
581 command_buffer_->Flush(put_offset);
583 // Update state before signaling the flush event.
584 base::AutoLock lock(state_after_last_flush_lock_);
585 state_after_last_flush_ = command_buffer_->GetState();
587 DCHECK((!error::IsError(state_after_last_flush_.error) && !context_lost_) ||
588 (error::IsError(state_after_last_flush_.error) && context_lost_));
590 // If we've processed all pending commands but still have pending queries,
591 // pump idle work until the query is passed.
592 if (put_offset == state_after_last_flush_.get_offset &&
593 gpu_scheduler_->HasMoreWork()) {
594 queue_->ScheduleIdleWork(
595 base::Bind(&InProcessCommandBuffer::ScheduleMoreIdleWork,
596 gpu_thread_weak_ptr_));
600 void InProcessCommandBuffer::ScheduleMoreIdleWork() {
601 CheckSequencedThread();
602 base::AutoLock lock(command_buffer_lock_);
603 if (gpu_scheduler_->HasMoreWork()) {
604 gpu_scheduler_->PerformIdleWork();
605 queue_->ScheduleIdleWork(
606 base::Bind(&InProcessCommandBuffer::ScheduleMoreIdleWork,
607 gpu_thread_weak_ptr_));
611 void InProcessCommandBuffer::Flush(int32 put_offset) {
612 CheckSequencedThread();
613 if (last_state_.error != gpu::error::kNoError)
616 if (last_put_offset_ == put_offset)
619 last_put_offset_ = put_offset;
620 base::Closure task = base::Bind(&InProcessCommandBuffer::FlushOnGpuThread,
621 gpu_thread_weak_ptr_,
626 CommandBuffer::State InProcessCommandBuffer::FlushSync(int32 put_offset,
627 int32 last_known_get) {
628 CheckSequencedThread();
629 if (put_offset == last_known_get || last_state_.error != gpu::error::kNoError)
634 while (last_known_get == last_state_.get_offset &&
635 last_state_.error == gpu::error::kNoError) {
643 void InProcessCommandBuffer::SetGetBuffer(int32 shm_id) {
644 CheckSequencedThread();
645 if (last_state_.error != gpu::error::kNoError)
649 base::AutoLock lock(command_buffer_lock_);
650 command_buffer_->SetGetBuffer(shm_id);
651 last_put_offset_ = 0;
654 base::AutoLock lock(state_after_last_flush_lock_);
655 state_after_last_flush_ = command_buffer_->GetState();
659 gpu::Buffer InProcessCommandBuffer::CreateTransferBuffer(size_t size,
661 CheckSequencedThread();
662 base::AutoLock lock(command_buffer_lock_);
663 return command_buffer_->CreateTransferBuffer(size, id);
666 void InProcessCommandBuffer::DestroyTransferBuffer(int32 id) {
667 CheckSequencedThread();
668 base::Closure task = base::Bind(&CommandBuffer::DestroyTransferBuffer,
669 base::Unretained(command_buffer_.get()),
675 gpu::Buffer InProcessCommandBuffer::GetTransferBuffer(int32 id) {
677 return gpu::Buffer();
680 bool InProcessCommandBuffer::SupportsGpuMemoryBuffer() {
681 return supports_gpu_memory_buffer_;
684 gfx::GpuMemoryBuffer* InProcessCommandBuffer::CreateGpuMemoryBuffer(
687 unsigned internalformat,
689 CheckSequencedThread();
690 base::AutoLock lock(command_buffer_lock_);
691 return gpu_control_->CreateGpuMemoryBuffer(width,
697 void InProcessCommandBuffer::DestroyGpuMemoryBuffer(int32 id) {
698 CheckSequencedThread();
699 base::Closure task = base::Bind(&GpuControl::DestroyGpuMemoryBuffer,
700 base::Unretained(gpu_control_.get()),
706 bool InProcessCommandBuffer::GenerateMailboxNames(
707 unsigned num, std::vector<gpu::Mailbox>* names) {
708 CheckSequencedThread();
709 base::AutoLock lock(command_buffer_lock_);
710 return gpu_control_->GenerateMailboxNames(num, names);
713 uint32 InProcessCommandBuffer::InsertSyncPoint() {
718 void InProcessCommandBuffer::SignalSyncPoint(unsigned sync_point,
719 const base::Closure& callback) {
720 CheckSequencedThread();
721 QueueTask(WrapCallback(callback));
724 void InProcessCommandBuffer::SignalQuery(unsigned query,
725 const base::Closure& callback) {
726 CheckSequencedThread();
727 QueueTask(base::Bind(&GpuControl::SignalQuery,
728 base::Unretained(gpu_control_.get()),
730 WrapCallback(callback)));
733 void InProcessCommandBuffer::SendManagedMemoryStats(
734 const gpu::ManagedMemoryStats& stats) {
737 gpu::error::Error InProcessCommandBuffer::GetLastError() {
738 CheckSequencedThread();
739 return last_state_.error;
742 bool InProcessCommandBuffer::Initialize() {
747 void InProcessCommandBuffer::SetGetOffset(int32 get_offset) { NOTREACHED(); }
749 void InProcessCommandBuffer::SetToken(int32 token) { NOTREACHED(); }
751 void InProcessCommandBuffer::SetParseError(gpu::error::Error error) {
755 void InProcessCommandBuffer::SetContextLostReason(
756 gpu::error::ContextLostReason reason) {
762 void PostCallback(const scoped_refptr<base::MessageLoopProxy>& loop,
763 const base::Closure& callback) {
764 if (!loop->BelongsToCurrentThread()) {
765 loop->PostTask(FROM_HERE, callback);
771 void RunOnTargetThread(scoped_ptr<base::Closure> callback) {
772 DCHECK(callback.get());
776 } // anonymous namespace
778 base::Closure InProcessCommandBuffer::WrapCallback(
779 const base::Closure& callback) {
780 // Make sure the callback gets deleted on the target thread by passing
782 scoped_ptr<base::Closure> scoped_callback(new base::Closure(callback));
783 base::Closure callback_on_client_thread =
784 base::Bind(&RunOnTargetThread, base::Passed(&scoped_callback));
785 base::Closure wrapped_callback =
786 base::Bind(&PostCallback, base::MessageLoopProxy::current(),
787 callback_on_client_thread);
788 return wrapped_callback;
791 #if defined(OS_ANDROID)
792 scoped_refptr<gfx::SurfaceTexture>
793 InProcessCommandBuffer::GetSurfaceTexture(uint32 stream_id) {
794 DCHECK(stream_texture_manager_);
795 return stream_texture_manager_->GetSurfaceTexture(stream_id);
800 void InProcessCommandBuffer::EnableVirtualizedContext() {
801 g_use_virtualized_gl_context = true;
805 void InProcessCommandBuffer::SetScheduleCallback(
806 const base::Closure& callback) {
807 DCHECK(!g_uses_explicit_scheduling);
808 DCHECK(!SchedulerClientBase::HasClients());
809 g_uses_explicit_scheduling = true;
810 g_gpu_queue.Get().SetScheduleCallback(callback);
814 void InProcessCommandBuffer::ProcessGpuWorkOnCurrentThread() {
815 g_gpu_queue.Get().RunTasks();
819 void InProcessCommandBuffer::SetGpuMemoryBufferFactory(
820 GpuMemoryBufferFactory* factory) {
821 g_gpu_memory_buffer_factory = factory;