1 // Copyright 2013 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "gpu/command_buffer/service/in_process_command_buffer.h"
11 #include <GLES2/gl2.h>
12 #ifndef GL_GLEXT_PROTOTYPES
13 #define GL_GLEXT_PROTOTYPES 1
15 #include <GLES2/gl2ext.h>
16 #include <GLES2/gl2extchromium.h>
18 #include "base/bind.h"
19 #include "base/bind_helpers.h"
20 #include "base/lazy_instance.h"
21 #include "base/logging.h"
22 #include "base/memory/weak_ptr.h"
23 #include "base/message_loop/message_loop_proxy.h"
24 #include "base/sequence_checker.h"
25 #include "base/synchronization/condition_variable.h"
26 #include "base/threading/thread.h"
27 #include "gpu/command_buffer/service/command_buffer_service.h"
28 #include "gpu/command_buffer/service/context_group.h"
29 #include "gpu/command_buffer/service/gl_context_virtual.h"
30 #include "gpu/command_buffer/service/gpu_control_service.h"
31 #include "gpu/command_buffer/service/gpu_scheduler.h"
32 #include "gpu/command_buffer/service/image_manager.h"
33 #include "gpu/command_buffer/service/mailbox_manager.h"
34 #include "gpu/command_buffer/service/transfer_buffer_manager.h"
35 #include "ui/gfx/size.h"
36 #include "ui/gl/gl_context.h"
37 #include "ui/gl/gl_image.h"
38 #include "ui/gl/gl_share_group.h"
40 #if defined(OS_ANDROID)
41 #include "gpu/command_buffer/service/stream_texture_manager_in_process_android.h"
42 #include "ui/gl/android/surface_texture.h"
49 static GpuMemoryBufferFactory* g_gpu_memory_buffer_factory = NULL;
52 static void RunTaskWithResult(base::Callback<T(void)> task,
54 base::WaitableEvent* completion) {
59 class GpuInProcessThread
60 : public base::Thread,
61 public InProcessCommandBuffer::Service,
62 public base::RefCountedThreadSafe<GpuInProcessThread> {
66 virtual void AddRef() const OVERRIDE {
67 base::RefCountedThreadSafe<GpuInProcessThread>::AddRef();
69 virtual void Release() const OVERRIDE {
70 base::RefCountedThreadSafe<GpuInProcessThread>::Release();
73 virtual void ScheduleTask(const base::Closure& task) OVERRIDE;
74 virtual void ScheduleIdleWork(const base::Closure& callback) OVERRIDE;
75 virtual bool UseVirtualizedGLContexts() OVERRIDE { return false; }
78 virtual ~GpuInProcessThread();
79 friend class base::RefCountedThreadSafe<GpuInProcessThread>;
81 DISALLOW_COPY_AND_ASSIGN(GpuInProcessThread);
84 GpuInProcessThread::GpuInProcessThread() : base::Thread("GpuThread") {
88 GpuInProcessThread::~GpuInProcessThread() {
92 void GpuInProcessThread::ScheduleTask(const base::Closure& task) {
93 message_loop()->PostTask(FROM_HERE, task);
96 void GpuInProcessThread::ScheduleIdleWork(const base::Closure& callback) {
97 message_loop()->PostDelayedTask(
98 FROM_HERE, callback, base::TimeDelta::FromMilliseconds(5));
101 base::LazyInstance<std::set<InProcessCommandBuffer*> > default_thread_clients_ =
102 LAZY_INSTANCE_INITIALIZER;
103 base::LazyInstance<base::Lock> default_thread_clients_lock_ =
104 LAZY_INSTANCE_INITIALIZER;
108 ScopedEvent(base::WaitableEvent* event) : event_(event) {}
109 ~ScopedEvent() { event_->Signal(); }
112 base::WaitableEvent* event_;
115 class SyncPointManager {
120 uint32 GenerateSyncPoint();
121 void RetireSyncPoint(uint32 sync_point);
123 bool IsSyncPointPassed(uint32 sync_point);
124 void WaitSyncPoint(uint32 sync_point);
127 // This lock protects access to pending_sync_points_ and next_sync_point_ and
128 // is used with the ConditionVariable to signal when a sync point is retired.
130 std::set<uint32> pending_sync_points_;
131 uint32 next_sync_point_;
132 base::ConditionVariable cond_var_;
135 SyncPointManager::SyncPointManager() : next_sync_point_(1), cond_var_(&lock_) {}
137 SyncPointManager::~SyncPointManager() {
138 DCHECK_EQ(pending_sync_points_.size(), 0U);
141 uint32 SyncPointManager::GenerateSyncPoint() {
142 base::AutoLock lock(lock_);
143 uint32 sync_point = next_sync_point_++;
144 DCHECK_EQ(pending_sync_points_.count(sync_point), 0U);
145 pending_sync_points_.insert(sync_point);
149 void SyncPointManager::RetireSyncPoint(uint32 sync_point) {
150 base::AutoLock lock(lock_);
151 DCHECK(pending_sync_points_.count(sync_point));
152 pending_sync_points_.erase(sync_point);
153 cond_var_.Broadcast();
156 bool SyncPointManager::IsSyncPointPassed(uint32 sync_point) {
157 base::AutoLock lock(lock_);
158 return pending_sync_points_.count(sync_point) == 0;
161 void SyncPointManager::WaitSyncPoint(uint32 sync_point) {
162 base::AutoLock lock(lock_);
163 while (pending_sync_points_.count(sync_point)) {
168 base::LazyInstance<SyncPointManager> g_sync_point_manager =
169 LAZY_INSTANCE_INITIALIZER;
171 bool WaitSyncPoint(uint32 sync_point) {
172 g_sync_point_manager.Get().WaitSyncPoint(sync_point);
176 } // anonyous namespace
178 InProcessCommandBuffer::Service::Service() {}
180 InProcessCommandBuffer::Service::~Service() {}
182 scoped_refptr<InProcessCommandBuffer::Service>
183 InProcessCommandBuffer::GetDefaultService() {
184 base::AutoLock lock(default_thread_clients_lock_.Get());
185 scoped_refptr<Service> service;
186 if (!default_thread_clients_.Get().empty()) {
187 InProcessCommandBuffer* other = *default_thread_clients_.Get().begin();
188 service = other->service_;
189 DCHECK(service.get());
191 service = new GpuInProcessThread;
196 InProcessCommandBuffer::InProcessCommandBuffer(
197 const scoped_refptr<Service>& service)
198 : context_lost_(false),
199 last_put_offset_(-1),
200 flush_event_(false, false),
201 service_(service.get() ? service : GetDefaultService()),
202 gpu_thread_weak_ptr_factory_(this) {
204 base::AutoLock lock(default_thread_clients_lock_.Get());
205 default_thread_clients_.Get().insert(this);
209 InProcessCommandBuffer::~InProcessCommandBuffer() {
211 base::AutoLock lock(default_thread_clients_lock_.Get());
212 default_thread_clients_.Get().erase(this);
215 void InProcessCommandBuffer::OnResizeView(gfx::Size size, float scale_factor) {
216 CheckSequencedThread();
217 DCHECK(!surface_->IsOffscreen());
218 surface_->Resize(size);
221 bool InProcessCommandBuffer::MakeCurrent() {
222 CheckSequencedThread();
223 command_buffer_lock_.AssertAcquired();
225 if (!context_lost_ && decoder_->MakeCurrent())
227 DLOG(ERROR) << "Context lost because MakeCurrent failed.";
228 command_buffer_->SetContextLostReason(decoder_->GetContextLostReason());
229 command_buffer_->SetParseError(gpu::error::kLostContext);
233 void InProcessCommandBuffer::PumpCommands() {
234 CheckSequencedThread();
235 command_buffer_lock_.AssertAcquired();
240 gpu_scheduler_->PutChanged();
243 bool InProcessCommandBuffer::GetBufferChanged(int32 transfer_buffer_id) {
244 CheckSequencedThread();
245 command_buffer_lock_.AssertAcquired();
246 command_buffer_->SetGetBuffer(transfer_buffer_id);
250 bool InProcessCommandBuffer::Initialize(
251 scoped_refptr<gfx::GLSurface> surface,
253 gfx::AcceleratedWidget window,
254 const gfx::Size& size,
255 const std::vector<int32>& attribs,
256 gfx::GpuPreference gpu_preference,
257 const base::Closure& context_lost_callback,
258 InProcessCommandBuffer* share_group) {
259 DCHECK(!share_group || service_ == share_group->service_);
260 context_lost_callback_ = WrapCallback(context_lost_callback);
263 // GPU thread must be the same as client thread due to GLSurface not being
265 sequence_checker_.reset(new base::SequenceChecker);
269 gpu::Capabilities capabilities;
270 InitializeOnGpuThreadParams params(is_offscreen,
278 base::Callback<bool(void)> init_task =
279 base::Bind(&InProcessCommandBuffer::InitializeOnGpuThread,
280 base::Unretained(this),
283 base::WaitableEvent completion(true, false);
286 base::Bind(&RunTaskWithResult<bool>, init_task, &result, &completion));
290 capabilities_ = capabilities;
294 bool InProcessCommandBuffer::InitializeOnGpuThread(
295 const InitializeOnGpuThreadParams& params) {
296 CheckSequencedThread();
297 gpu_thread_weak_ptr_ = gpu_thread_weak_ptr_factory_.GetWeakPtr();
299 DCHECK(params.size.width() >= 0 && params.size.height() >= 0);
301 TransferBufferManager* manager = new TransferBufferManager();
302 transfer_buffer_manager_.reset(manager);
303 manager->Initialize();
305 scoped_ptr<CommandBufferService> command_buffer(
306 new CommandBufferService(transfer_buffer_manager_.get()));
307 command_buffer->SetPutOffsetChangeCallback(base::Bind(
308 &InProcessCommandBuffer::PumpCommands, gpu_thread_weak_ptr_));
309 command_buffer->SetParseErrorCallback(base::Bind(
310 &InProcessCommandBuffer::OnContextLost, gpu_thread_weak_ptr_));
312 if (!command_buffer->Initialize()) {
313 LOG(ERROR) << "Could not initialize command buffer.";
314 DestroyOnGpuThread();
318 gl_share_group_ = params.context_group
319 ? params.context_group->gl_share_group_.get()
320 : new gfx::GLShareGroup;
322 #if defined(OS_ANDROID)
323 stream_texture_manager_.reset(new StreamTextureManagerInProcess);
326 bool bind_generates_resource = false;
327 decoder_.reset(gles2::GLES2Decoder::Create(
328 params.context_group ? params.context_group->decoder_->GetContextGroup()
329 : new gles2::ContextGroup(NULL,
333 bind_generates_resource)));
335 gpu_scheduler_.reset(
336 new GpuScheduler(command_buffer.get(), decoder_.get(), decoder_.get()));
337 command_buffer->SetGetBufferChangeCallback(base::Bind(
338 &GpuScheduler::SetGetBuffer, base::Unretained(gpu_scheduler_.get())));
339 command_buffer_ = command_buffer.Pass();
341 decoder_->set_engine(gpu_scheduler_.get());
344 if (params.is_offscreen)
345 surface_ = gfx::GLSurface::CreateOffscreenGLSurface(params.size);
347 surface_ = gfx::GLSurface::CreateViewGLSurface(params.window);
350 if (!surface_.get()) {
351 LOG(ERROR) << "Could not create GLSurface.";
352 DestroyOnGpuThread();
356 if (service_->UseVirtualizedGLContexts()) {
357 context_ = gl_share_group_->GetSharedContext();
358 if (!context_.get()) {
359 context_ = gfx::GLContext::CreateGLContext(
360 gl_share_group_.get(), surface_.get(), params.gpu_preference);
361 gl_share_group_->SetSharedContext(context_.get());
364 context_ = new GLContextVirtual(
365 gl_share_group_.get(), context_.get(), decoder_->AsWeakPtr());
366 if (context_->Initialize(surface_.get(), params.gpu_preference)) {
367 VLOG(1) << "Created virtual GL context.";
372 context_ = gfx::GLContext::CreateGLContext(
373 gl_share_group_.get(), surface_.get(), params.gpu_preference);
376 if (!context_.get()) {
377 LOG(ERROR) << "Could not create GLContext.";
378 DestroyOnGpuThread();
382 if (!context_->MakeCurrent(surface_.get())) {
383 LOG(ERROR) << "Could not make context current.";
384 DestroyOnGpuThread();
388 gles2::DisallowedFeatures disallowed_features;
389 disallowed_features.gpu_memory_manager = true;
390 if (!decoder_->Initialize(surface_,
396 LOG(ERROR) << "Could not initialize decoder.";
397 DestroyOnGpuThread();
402 new GpuControlService(decoder_->GetContextGroup()->image_manager(),
403 g_gpu_memory_buffer_factory,
404 decoder_->GetContextGroup()->mailbox_manager(),
405 decoder_->GetQueryManager(),
406 decoder_->GetCapabilities()));
408 *params.capabilities = gpu_control_->GetCapabilities();
410 if (!params.is_offscreen) {
411 decoder_->SetResizeCallback(base::Bind(
412 &InProcessCommandBuffer::OnResizeView, gpu_thread_weak_ptr_));
414 decoder_->SetWaitSyncPointCallback(base::Bind(&WaitSyncPoint));
419 void InProcessCommandBuffer::Destroy() {
420 CheckSequencedThread();
422 base::WaitableEvent completion(true, false);
424 base::Callback<bool(void)> destroy_task = base::Bind(
425 &InProcessCommandBuffer::DestroyOnGpuThread, base::Unretained(this));
427 base::Bind(&RunTaskWithResult<bool>, destroy_task, &result, &completion));
431 bool InProcessCommandBuffer::DestroyOnGpuThread() {
432 CheckSequencedThread();
433 gpu_thread_weak_ptr_factory_.InvalidateWeakPtrs();
434 command_buffer_.reset();
435 // Clean up GL resources if possible.
436 bool have_context = context_ && context_->MakeCurrent(surface_);
438 decoder_->Destroy(have_context);
443 gl_share_group_ = NULL;
444 #if defined(OS_ANDROID)
445 stream_texture_manager_.reset();
451 void InProcessCommandBuffer::CheckSequencedThread() {
452 DCHECK(!sequence_checker_ ||
453 sequence_checker_->CalledOnValidSequencedThread());
456 void InProcessCommandBuffer::OnContextLost() {
457 CheckSequencedThread();
458 if (!context_lost_callback_.is_null()) {
459 context_lost_callback_.Run();
460 context_lost_callback_.Reset();
463 context_lost_ = true;
466 CommandBuffer::State InProcessCommandBuffer::GetStateFast() {
467 CheckSequencedThread();
468 base::AutoLock lock(state_after_last_flush_lock_);
469 if (state_after_last_flush_.generation - last_state_.generation < 0x80000000U)
470 last_state_ = state_after_last_flush_;
474 CommandBuffer::State InProcessCommandBuffer::GetState() {
475 CheckSequencedThread();
476 return GetStateFast();
479 CommandBuffer::State InProcessCommandBuffer::GetLastState() {
480 CheckSequencedThread();
484 int32 InProcessCommandBuffer::GetLastToken() {
485 CheckSequencedThread();
487 return last_state_.token;
490 void InProcessCommandBuffer::FlushOnGpuThread(int32 put_offset) {
491 CheckSequencedThread();
492 ScopedEvent handle_flush(&flush_event_);
493 base::AutoLock lock(command_buffer_lock_);
494 command_buffer_->Flush(put_offset);
496 // Update state before signaling the flush event.
497 base::AutoLock lock(state_after_last_flush_lock_);
498 state_after_last_flush_ = command_buffer_->GetState();
500 DCHECK((!error::IsError(state_after_last_flush_.error) && !context_lost_) ||
501 (error::IsError(state_after_last_flush_.error) && context_lost_));
503 // If we've processed all pending commands but still have pending queries,
504 // pump idle work until the query is passed.
505 if (put_offset == state_after_last_flush_.get_offset &&
506 gpu_scheduler_->HasMoreWork()) {
507 service_->ScheduleIdleWork(
508 base::Bind(&InProcessCommandBuffer::ScheduleMoreIdleWork,
509 gpu_thread_weak_ptr_));
513 void InProcessCommandBuffer::ScheduleMoreIdleWork() {
514 CheckSequencedThread();
515 base::AutoLock lock(command_buffer_lock_);
516 if (gpu_scheduler_->HasMoreWork()) {
517 gpu_scheduler_->PerformIdleWork();
518 service_->ScheduleIdleWork(
519 base::Bind(&InProcessCommandBuffer::ScheduleMoreIdleWork,
520 gpu_thread_weak_ptr_));
524 void InProcessCommandBuffer::Flush(int32 put_offset) {
525 CheckSequencedThread();
526 if (last_state_.error != gpu::error::kNoError)
529 if (last_put_offset_ == put_offset)
532 last_put_offset_ = put_offset;
533 base::Closure task = base::Bind(&InProcessCommandBuffer::FlushOnGpuThread,
534 gpu_thread_weak_ptr_,
539 void InProcessCommandBuffer::WaitForTokenInRange(int32 start, int32 end) {
540 CheckSequencedThread();
541 while (!InRange(start, end, GetLastToken()) &&
542 last_state_.error == gpu::error::kNoError)
546 void InProcessCommandBuffer::WaitForGetOffsetInRange(int32 start, int32 end) {
547 CheckSequencedThread();
550 while (!InRange(start, end, last_state_.get_offset) &&
551 last_state_.error == gpu::error::kNoError) {
557 void InProcessCommandBuffer::SetGetBuffer(int32 shm_id) {
558 CheckSequencedThread();
559 if (last_state_.error != gpu::error::kNoError)
563 base::AutoLock lock(command_buffer_lock_);
564 command_buffer_->SetGetBuffer(shm_id);
565 last_put_offset_ = 0;
568 base::AutoLock lock(state_after_last_flush_lock_);
569 state_after_last_flush_ = command_buffer_->GetState();
573 scoped_refptr<Buffer> InProcessCommandBuffer::CreateTransferBuffer(size_t size,
575 CheckSequencedThread();
576 base::AutoLock lock(command_buffer_lock_);
577 return command_buffer_->CreateTransferBuffer(size, id);
580 void InProcessCommandBuffer::DestroyTransferBuffer(int32 id) {
581 CheckSequencedThread();
582 base::Closure task = base::Bind(&CommandBuffer::DestroyTransferBuffer,
583 base::Unretained(command_buffer_.get()),
589 scoped_refptr<gpu::Buffer> InProcessCommandBuffer::GetTransferBuffer(int32 id) {
594 gpu::Capabilities InProcessCommandBuffer::GetCapabilities() {
595 return capabilities_;
598 gfx::GpuMemoryBuffer* InProcessCommandBuffer::CreateGpuMemoryBuffer(
601 unsigned internalformat,
603 CheckSequencedThread();
604 base::AutoLock lock(command_buffer_lock_);
605 return gpu_control_->CreateGpuMemoryBuffer(width,
611 void InProcessCommandBuffer::DestroyGpuMemoryBuffer(int32 id) {
612 CheckSequencedThread();
613 base::Closure task = base::Bind(&GpuControl::DestroyGpuMemoryBuffer,
614 base::Unretained(gpu_control_.get()),
620 uint32 InProcessCommandBuffer::InsertSyncPoint() {
621 uint32 sync_point = g_sync_point_manager.Get().GenerateSyncPoint();
622 QueueTask(base::Bind(&InProcessCommandBuffer::RetireSyncPointOnGpuThread,
623 base::Unretained(this),
628 void InProcessCommandBuffer::RetireSyncPointOnGpuThread(uint32 sync_point) {
629 gles2::MailboxManager* mailbox_manager =
630 decoder_->GetContextGroup()->mailbox_manager();
631 if (mailbox_manager->UsesSync() && MakeCurrent())
632 mailbox_manager->PushTextureUpdates();
633 g_sync_point_manager.Get().RetireSyncPoint(sync_point);
636 void InProcessCommandBuffer::SignalSyncPoint(unsigned sync_point,
637 const base::Closure& callback) {
638 CheckSequencedThread();
639 QueueTask(base::Bind(&InProcessCommandBuffer::SignalSyncPointOnGpuThread,
640 base::Unretained(this),
642 WrapCallback(callback)));
645 void InProcessCommandBuffer::SignalSyncPointOnGpuThread(
647 const base::Closure& callback) {
648 if (g_sync_point_manager.Get().IsSyncPointPassed(sync_point)) {
651 service_->ScheduleIdleWork(
652 base::Bind(&InProcessCommandBuffer::SignalSyncPointOnGpuThread,
653 gpu_thread_weak_ptr_,
659 void InProcessCommandBuffer::SignalQuery(unsigned query,
660 const base::Closure& callback) {
661 CheckSequencedThread();
662 QueueTask(base::Bind(&GpuControl::SignalQuery,
663 base::Unretained(gpu_control_.get()),
665 WrapCallback(callback)));
668 void InProcessCommandBuffer::SetSurfaceVisible(bool visible) {}
670 void InProcessCommandBuffer::SendManagedMemoryStats(
671 const gpu::ManagedMemoryStats& stats) {
674 void InProcessCommandBuffer::Echo(const base::Closure& callback) {
675 QueueTask(WrapCallback(callback));
678 uint32 InProcessCommandBuffer::CreateStreamTexture(uint32 texture_id) {
679 base::WaitableEvent completion(true, false);
680 uint32 stream_id = 0;
681 base::Callback<uint32(void)> task =
682 base::Bind(&InProcessCommandBuffer::CreateStreamTextureOnGpuThread,
683 base::Unretained(this),
686 base::Bind(&RunTaskWithResult<uint32>, task, &stream_id, &completion));
691 uint32 InProcessCommandBuffer::CreateStreamTextureOnGpuThread(
692 uint32 client_texture_id) {
693 #if defined(OS_ANDROID)
694 return stream_texture_manager_->CreateStreamTexture(
695 client_texture_id, decoder_->GetContextGroup()->texture_manager());
701 gpu::error::Error InProcessCommandBuffer::GetLastError() {
702 CheckSequencedThread();
703 return last_state_.error;
706 bool InProcessCommandBuffer::Initialize() {
711 void InProcessCommandBuffer::SetGetOffset(int32 get_offset) { NOTREACHED(); }
713 void InProcessCommandBuffer::SetToken(int32 token) { NOTREACHED(); }
715 void InProcessCommandBuffer::SetParseError(gpu::error::Error error) {
719 void InProcessCommandBuffer::SetContextLostReason(
720 gpu::error::ContextLostReason reason) {
726 void PostCallback(const scoped_refptr<base::MessageLoopProxy>& loop,
727 const base::Closure& callback) {
728 if (!loop->BelongsToCurrentThread()) {
729 loop->PostTask(FROM_HERE, callback);
735 void RunOnTargetThread(scoped_ptr<base::Closure> callback) {
736 DCHECK(callback.get());
740 } // anonymous namespace
742 base::Closure InProcessCommandBuffer::WrapCallback(
743 const base::Closure& callback) {
744 // Make sure the callback gets deleted on the target thread by passing
746 scoped_ptr<base::Closure> scoped_callback(new base::Closure(callback));
747 base::Closure callback_on_client_thread =
748 base::Bind(&RunOnTargetThread, base::Passed(&scoped_callback));
749 base::Closure wrapped_callback =
750 base::Bind(&PostCallback, base::MessageLoopProxy::current(),
751 callback_on_client_thread);
752 return wrapped_callback;
755 #if defined(OS_ANDROID)
756 scoped_refptr<gfx::SurfaceTexture>
757 InProcessCommandBuffer::GetSurfaceTexture(uint32 stream_id) {
758 DCHECK(stream_texture_manager_);
759 return stream_texture_manager_->GetSurfaceTexture(stream_id);
764 void InProcessCommandBuffer::SetGpuMemoryBufferFactory(
765 GpuMemoryBufferFactory* factory) {
766 g_gpu_memory_buffer_factory = factory;